From ec8c9e6d6aec8027eee13fa30f264e7c68211c88 Mon Sep 17 00:00:00 2001 From: svcmobrel-release Date: Thu, 3 Aug 2023 21:11:21 -0700 Subject: [PATCH] Updating prebuilts and/or headers c2e810fc3453d74ee0493168dbf7981ba482acd3 - NVIDIA-kernel-module-source-TempVersion/SECURITY.md 7d577fdb9594ae572ff38fdda682a4796ab832ca - NVIDIA-kernel-module-source-TempVersion/COPYING 12f1806bdc25917299525e0e48815306159de132 - NVIDIA-kernel-module-source-TempVersion/Makefile 60176067d89204db2a337983144481c56d94baf2 - NVIDIA-kernel-module-source-TempVersion/README.md 4f4410c3c8db46e5a98d7a35f7d909a49de6cb43 - NVIDIA-kernel-module-source-TempVersion/kernel-open/Makefile 90d4457b6fec29378645d5932ad82d706942f4a6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/conftest.sh a0a15eb341be905ced2a09b8c4feb8bb43b4fb39 - NVIDIA-kernel-module-source-TempVersion/kernel-open/Kbuild 0b1508742a1c5a04b6c3a4be1b48b506f4180848 - NVIDIA-kernel-module-source-TempVersion/kernel-open/dkms.conf 1d17329caf26cdf931122b3c3b7edf4932f43c38 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-msi.h 88399279bd5e31b6e77cb32c7ef6220ce529526b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hypervisor.h 60ef64c0f15526ae2d786e5cec07f28570f0663b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/conftest.h ea98628370602119afb1a065ff954784757ddb10 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_dsi_panel_props.h c06b2748cd7c8f86b5864d5e9abe6ecf0ab622f0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hash.h 423282211355a8cb20bff268166885ac90e2986c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_interface.h c75bfc368c6ce3fc2c1a0c5062834e90d822b365 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-memdbg.h 35da37c070544f565d0f1de82abc7569b5df06af - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_firmware_types.h 82940edf4650b9be67275d3a360ef4e63387a0a7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/cpuopsys.h 1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numbers.h 4b7414705ce10f0a1e312c36a43824b59d572661 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvmisc.h e4a4f57abb8769d204468b2f5000c81f5ea7c92f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs.h 6337f595602bce9d76559de1be90553b52f405d8 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-proto.h b249abc0a7d0c9889008e98cb2f8515a9d310b85 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvgputypes.h e20882a9b14f2bf887e7465d3f238e5ac17bc2f5 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_speculation_barrier.h 5c4c05e5a638888babb5a8af2f0a61c94ecd150b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-format.h b4c5d759f035b540648117b1bff6b1701476a398 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvCpuUuid.h 880e45b68b19fdb91ac94991f0e6d7fc3b406b1f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci-types.h c45b2faf17ca2a205c56daa11e3cb9d864be2238 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-modeset-interface.h 349696856890bdbe76f457376648522b35f874ef - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvimpshared.h 003b2cbe3d82e467c09371aee86e48d65ae6c29b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numa.h b642fb649ce2ba17f37c8aa73f61b38f99a74986 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-retpoline.h 1e7eec6561b04d2d21c3515987aaa116e9401c1f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kernel-interface-api.h 3b12d770f8592b94a8c7774c372e80ad08c5774c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvi2c.h b02c378ac0521c380fc2403f0520949f785b1db6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-dmabuf.h 3100c536eb4c81ae913b92d4bc5905e752301311 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os-interface.h 143051f69a53db0e7c5d2f846a9c14d666e264b4 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kref.h 3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-gpu-info.h 7b2e2e6ff278acddc6980b330f68e374f38e0a6c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-timer.h fdbaee144adb26c00776b802560e15f775ed5aef - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-mm.h befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_stdarg.h 80fcb510fad25cb7a017139f487da1843b7cfcbd - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-lock.h 59d537c1d1b284a9d52277aff87c237e3ec2c99d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs-utils.h e3362c33fe6c7cdec013eceac31e8f6f38dc465f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_types.h 5d8de06378994201e91c2179d149c0edcd694900 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatuscodes.h 95bf694a98ba78d5a19e66463b8adda631e6ce4c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatus.h 4750735d6f3b334499c81d499a06a654a052713d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-caps.h 009cd8e2b7ee8c0aeb05dac44cc84fc8f6f37c06 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-kapi.h d721fca5f2317b9b325dedcbfba51c00d0b23648 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-linux.h 4b1a6c372a531b0d3e0a4e9815dde74cb222447c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/rm-gpu-ops.h 94ad0ba9fd6eb21445baec4fddd7c67a30cceefa - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci.h f3e0f71abf34300d322e313adcd4fcbde9aa6f87 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kthread-q.h 256b5dc6f28738b3ce656c984f01d8f3e13e9faa - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pgprot.h c57259130166701bf6d5e5bb1968397716d29fc0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-platform.h 84e9b6cba7ba26ef4032666f769c5b43fa510aad - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-list-helpers.h df0420a5e3576e5a8b77a7bcefa6888ad62d6fd7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv.h 910255a4d92e002463175a28e38c3f24716fb654 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-api-types.h 42ece56d0459eb9f27b2497de48f08360c4f7f6b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvlimits.h 4a8b7f3cc65fa530670f510796bef51cf8c4bb6b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-register-module.h 5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/dce_rm_client_ipc.h 906329ae5773732896e6fe94948f7674d0b04c17 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_gpio.h 2f5fec803685c61c13f7955baaed056b5524652c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl.h d25291d32caef187daf3589ce4976e4fa6bec70d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-time.h 8c041edbf4ed4fefdfd8006252cf542e34aa617b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvtypes.h cda75171ca7d8bf920aab6d56ef9aadec16fd15d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os/nv_memory_type.h 2ea1436104463c5e3d177e8574c3b4298976d37e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms-ioctl.h 17855f638fd09abfec7d188e49b396793a9f6106 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms.h c181ab9960b0c01a7672bc1fe1bc8870f1e8856d - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-linux.c 0b7e063481a0e195c6e91a4d3464c4792c684f03 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nv-kthread-q.c 07a2d5fa54ff88a0cb30c0945ef3c33ca630a490 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild 7d108165b4a7b6a44ac21460ea3bf4381fb48c5b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h 8bedc7374d7a43250e49fb09139c511b489d45e3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.h 9a0f445fda73c69e1bee7f6b121cbed33fcb01bf - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-mmap.c c5cfba80ea122c9078f2d44f1538144747d7931b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv.c 95ae148b016e4111122c2d9f8f004b53e78998f3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-memdbg.c 24fd035338936c76fda8faeb0d8b1cd59875db92 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia.Kbuild 3ee953312a6a246d65520fc4a65407f448d1d2b8 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-gpio.c cded6e9b6324fd429b865173596c8e549a682bba - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_uvm_interface.c 5f2e279a4abe0dabd478b1589be67df18de4b09d - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-i2c.c c1ebcfec42f7898dd9d909eacd439d288b80523f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-mlock.c d11ab03a617b29efcf00f85e24ebce60f91cf82c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-backlight.c dc39c4ee87f4dc5f5ccc179a98e07ddb82bb8bce - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-modeset-interface.c 7b1bd10726481626dd51f4eebb693794561c20f6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-host1x.c 06e7ec77cd21c43f900984553a4960064753e444 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform-pm.c d4f2cac6234e5ad337c254875a26d17372f28162 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-interface.c e903f50b2624f33807214973558b9ff380bd68e0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform.c 805042e7cdb9663a0d3ca3064baeec8aa8eb3688 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.c c7f1aaa6a5f3a3cdf1e5f80adf40b3c9f185fb94 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.c 0b0ec8d75dfece909db55136731196162c4152d5 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dmabuf.c 84d84563c003d3f568068e7322ce314387a6f579 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-caps.c 94c406f36836c3396b0ca08b4ff71496666b9c43 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-usermap.c fbae5663e3c278d8206d07ec6446ca4c2781795f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.h 2c0d17f9babe897435c7dfa43adb96020f45da2b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dsi-parse-panel-props.c 9b701fe42a0e87d62c58b15c553086a608e89f7b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.h 0ce95e5ed52d6d6ca2bb6aac33ca8f197145ec45 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs-utils.c cf90d9ea3abced81d182ab3c4161e1b5d3ad280d - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.h 02b1936dd9a9e30141245209d79b8304b7f12eb9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-cray.c 26c3971ea7afb4b7f237db9ab1c321c3de814518 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.c 218aac0c408be15523a2d0b70fdbdadd7e1a2e48 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-imp.c 6d4fbea733fdcd92fc6a8a5884e8bb359f9e8abd - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/rmp2pdefines.h 5ac10d9b20ccd37e1e24d4a81b8ac8f83db981e4 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vtophys.c 9999872b1513360d8ecf6c0894f81c63e7d435e9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dma.c fc566df59becef7bc7511ae62a9a97b1532a5af2 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.c b71bf4426322ab59e78e2a1500509a5f4b2b71ab - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.h a3626bf1b80a81c14408c5181e8bd27696df2caf - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci.c 98c1be29932b843453567d4ada2f9912ea4523d7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vm.c 0b7e063481a0e195c6e91a4d3464c4792c684f03 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-kthread-q.c 61eadfa0f5b44a3d95e4d2d42d79321fc909c661 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-clk.c 4eee7319202366822e17d29ecec9f662c075e7ac - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.c 495bcdff3847ff67ba4bbf9af23729eb66eed487 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-acpi.c 64f1c96761f6d9e7e02ab049dd0c810196568036 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.c d844fcaa5b02f1d1a753965a336287148b2ce689 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.h dc165103f9196f5f9e97433ec32ef6dded86d4bb - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-registry.c 68d781e929d103e6fa55fa92b5d4f933fbfb6526 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.h 978d00b0d319c5ad5c0d3732b0e44f4ac0ac9a4c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_gpu_ops.h fbfa2125b2bac1953af6d6fd99352898e516a686 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-msi.c 027fd0ab218eb98abe2b66d05f10b14ebb57e7a3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-nano-timer.c 07f95171c241880c472a630d1ee38fb222be4d59 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia-sources.Kbuild a392fa800565c8345b07af5132db7078b914d59f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-pci.c ee894ec530acbd765c04aec93c1c312d42210aeb - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ipc-soc.c f179d308e984ff44a82f6e1c6007624f1ac916ba - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs.c e2b0e4ef01bb28ff6dcc10cb44570e185ce82df0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-reg.h 7ac10bc4b3b1c5a261388c3f5f9ce0e9b35d7b44 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-usermap.c d9221522e02e18b037b8929fbc075dc3c1e58654 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.c 8bedc7374d7a43250e49fb09139c511b489d45e3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.h eca70b3b8146903ec678a60eebb0462e6ccf4569 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.h 3c9a013abdc787a1022b11099af4277c37cd666b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.Kbuild e4bb0073eb9d6f965923bb9874e4714518850a27 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.h 99642b76e9a84b5a1d2e2f4a8c7fb7bcd77a44fd - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.h 8b2063f0cc2e328f4f986c2ce556cfb626c89810 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.c 6528efa1f8061678b8543c5c0be8761cab860858 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.h ab63f2a971db8bf10585b1a05fe0e3ca180ad6c7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-os-interface.h 40b5613d1fbbe6b74bff67a5d07974ad321f75f0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.h 2911436a80d67074106c507871f4b480aa307237 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.c 8c95aa7ab01dd928974ce7880a532557209bd8e0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.h fa8d8d10ae773bb7db3b3ce1df545de0e04c937e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.c 23586447526d9ffedd7878b6cf5ba00139fadb5e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h cbcd6e13d84ea6b52db12eda98be38e321888eb0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.h a7bc26c1078e95f9ff49c164f3652787adf1fef3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.c bb1f2105d19b50634d46a92ade7fc5f709ec25d3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.c c8982ace6fc79f75c092662902c0c61371195f0c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-linux.c 66b33e4ac9abe09835635f6776c1222deefad741 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.h 6d65ea9f067e09831a8196022bfe00a145bec270 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h 45ec9fd1abfe9a0c7f9ffaf665014cec89c9e7e6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.h 7129c765da5bfb77788441fed39b46dc7dc0fa8e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c 59bb05ef214b5c5f2fe3cf70142dabd47ea70650 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-ioctl.h ef03d0ae581cc0326abe6054249791f8c0faa9a8 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.c 044071d60c8cc8ea66c6caaf1b70fe01c4081ad3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-conftest.h 708d02c8bcdfb12e4d55896e667821357c8251ec - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-priv.h dc0fe38909e2f38e919495b7b4f21652a035a3ee - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.c e4efab24f90d397c270568abb337ab815a447fec - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-fence-helper.h b775af5899366845f9b87393d17a0ab0f1f6a725 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.c 1e05d0ff4e51a10fa3fcd6519dc915bf13aa69c0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.h 492a1b0b02dcd2d60f05ac670daeeddcaa4b0da5 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h 892cac6dd51ccfde68b3c29a5676504f93ee8cd7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.c 355126d65ea1472ce3b278066811d4fb764354ec - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c 5209eba37913f5d621a13091783622759706e6e3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.c e362c64aa67b47becdbf5c8ba2a245e135adeedf - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c 9a882b31b2acc9e1ad3909c0061eee536e648aae - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.h 5008845a531207899830bcf4568c3463ad0ea6bc - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.c 97b6c56b1407de976898e0a8b5a8f38a5211f8bb - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.h d862cc13c29bbce52f6b380b7a0a45a07fe9cbac - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.c c294224282118c70cd546ae024a95479ad9b1de4 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h d9221522e02e18b037b8929fbc075dc3c1e58654 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.c bda08c8398f68ffc2866ebc390dc63a09a16b0b9 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/unix_rm_handle.c e903bbbecf4fb3085aaccca0628f0a0e4aba3e58 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_mode_timings_utils.c 5ef40af650eb65b2c87572a1bbfe655d8821f2d5 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_memory_tracker.c 26f2a36442266c5d2664d509ecfd31094a83e152 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_vasprintf.c 9e008270f277e243f9167ab50401602378a2a6e8 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_vasprintf.h 8d9c4d69394b23d689a4aa6727eb3da1d383765a - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/unix_rm_handle.h 07c675d22c4f0f4be6647b65b6487e2d6927c347 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_memory_tracker.h 667b361db93e35d12d979c47e4d7a68be9aa93b6 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_mode_timings_utils.h 881cbcc7ed39ea9198279136205dbe40142be35e - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_assert.h 1c947cfc8a133b00727104684764e5bb900c9d28 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_mode_timings.h 83044eb5259200922f78ad3248fbc1d4de1ec098 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_common_utils.h 2476f128437c0520204e13a4ddd2239ff3f40c21 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv-float.h a8e49041c1b95431e604852ad0fa3612548e3c82 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_dpy_id.h e3be7ba45506c42d2fca87e9da45db75ced750ca - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_common.h f669280a5e86ba51b691e2609fa7d8c223bd85dc - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C671.c 7c2fe72426fa304315e169e91dc6c1c58b5422fd - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_0073.c 381e1b8aeaa8bd586c51db1f9b37d3634285c16a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_class.h 67db549636b67a32d646fb7fc6c8db2f13689ecc - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9271.c 5e12a290fc91202e4ba9e823b6d8457594ed72d3 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h d2c79c8a4e914519d653d1f14f706ec4a1f787e8 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9171.c 15d54c86d78404639c7f151adc672e19472dcf4a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.c 9be7b7be94a35d1d9a04f269ff560dbbb7860a2a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9571.c 54a1b5e5aaf0848a72befc896ed12f1de433ad4f - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9471.c 443c0a4b17a0019e4de3032c93c5cac258529f01 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_internal.h e6d500269128cbd93790fe68fbcad5ba45c2ba7d - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C371.c 90e8ce7638a28cd781b5d30df565116dc1cea9e8 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.h f75b1d98895bdccda0db2d8dd8feba53b88180c5 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid.h ba9e382b24f57caa9dcf1c26a60b1f2070b1b9dd - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_displayid20.c 28d7b753825d5f4a9402aff14488c125453e95c5 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_tv.c b4813a5e854e75fb38f460e0c27dca8e1ce8dc21 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edid.c 1290abde75d218ae24f930c3b011042a3f360c2e - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid20.h 4a2ad30f49ed92694b717a99ce7adeeb565e8a37 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_861.c 439ef00ffa340bd1b6506970d154a33ca4b64b4a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dmt.c cfaa569ac3d63484c86e8a8d7a483dd849f96be8 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid20.c 1997adbf2f6f5be7eb6c7a88e6660391a85d891b - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_gtf.c 49df9034c1634d0a9588e5588efa832a71750a37 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_cvt.c 58b68f1272b069bb7819cbe86fd9e19d8acd0571 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/edid.h 890d8c2898a3277b0fed360301c2dc2688724f47 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_util.c 3023a58fd19d32280607d4027b09fe51fdb7a096 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.h e66a20fc1579b0dd1392033089f97cf170e8cf10 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/dpsdp.h b5bd3a58b499216e4fe0e0c9c99525b07ac237dc - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.c f531475d8b978bca5b79d39d729b0c9986fe7b36 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming.h 95dae946088f21339299dae48eeafaab31b97b05 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming_pvt.h 0a04709ebdc4acb12038656c433e10c4e7096518 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid.c 1ff879eca2a273293b5cd6048419b2d2d8063b93 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mulAdd.c 1a86a6948bf6768bd23a19f1f05d40968c1d2b15 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_rem.c c3ce12c227d25bc0de48fbcf914fc208e2448741 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sub.c fb062ecbe62a1f5878fd47f0c61490f2bde279dd - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI32.c 38bd00e9c4d2f1354c611404cca6209a6c417669 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros64.c 0e9694d551848d88531f5461a9b3b91611652e9a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui32_r_minMag.c 9f4d355d85fbe998e243fe4c7bbf8ad23062b6e2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i64_to_f64.c 23b76c1d0be64e27a6f7e2ea7b8919f1a45a8e7c - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32_r_minMag.c 5c4ee32cc78efc718aaa60ec31d0b00b1bee3c2c - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui64_r_minMag.c 09cb0cdb90eb23b53cd9c1a76ba26021084710d1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF32.c 00c612847b3bd227a006a4a2697df85866b80315 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mulAddF32.c 29321080baa7eab86947ac825561fdcff54a0e43 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f32.c 2e0fec421f4defd293cf55c5f3af7d91f4b7d2cc - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui64_to_f32.c ebb4f674b6213fec29761fc4e05c1e3ddeda6d17 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mulAdd.c 2e5c29d842a8ebc5fbf987068dc9394cee609cc7 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui64.c daeb408588738b3eb4c8b092d7f92ac597cf1fc6 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_rem.c da3b3f94a817909a3dc93ca5fa7675805c7979e0 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_isSignalingNaN.c bc992c88f3de09e3a82447cf06dbde7c6604f7f8 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_f32.c dafa667ee5dd52c97fc0c3b7144f6b619406c225 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mulAddF64.c 2960704c290f29aae36b8fe006884d5c4abcabb4 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_div.c d4b26dc407a891e9ff5324853f1845a99c5d5cd2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32.c 0adfa7e174cdb488bb22b06642e14e7fc6f49c67 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI64.c fd40a71c7ebf9d632a384fadf9487cfef4f3ea98 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_shiftRightJam128.c 9a5b93459ace2da23964da98617d6b18006fab86 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros8.c ae25eea499b3ea5bdd96c905fd0542da11083048 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normRoundPackToF64.c 729e790328168c64d65a1355e990274c249bbb3a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32_r_minMag.c 296c40b0589536cb9af3231ad3dcd7f2baaa6887 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt.c 5c1026617c588bcf5f1e59230bd5bb900600b9ac - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mul.c 4b37be398b3e73ae59245f03b2ba2394fc902b4d - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normSubnormalF64Sig.c 69dc4cc63b2a9873a6eb636ee7cb704cbd502001 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui32.c d0f8f08c225b60d88b6358d344404ba9df3038ec - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normSubnormalF32Sig.c c951c9dffa123e4f77ed235eca49ef9b67f9f3d2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_subMagsF64.c dde685423af544e5359efdb51b4bf9457c67fa3b - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sqrt.c 577821f706c7de4ca327c1e2fcc34161c96c89f3 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i64_r_minMag.c 5a5e0d9f1ee7e8c0d1d4f9fbcf6eba330a5f1792 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_isSignalingNaN.c 84b0a01ba2a667eb28b166d45bd91352ead83e69 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i64_to_f32.c 513a7d1c3053fc119efcd8ae1bcc9652edc45315 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt.c 4445b1fbbd507144f038fd939311ff95bc2cf5f1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui64_to_f64.c b9fd15957f7ae5effeccb5d8adaa7434b43f44e1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI64.c ab19c6b50c40b8089cb915226d4553d1aa902b0e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32_r_minMag.c 7bc81f5bc894118c08bfd52b59e010bc068ed762 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f32.c 7c8e5ab3f9bf6b2764ce5fffe80b2674be566a12 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/softfloat_state.c ec1a797b11f6e846928a4a49a8756f288bda1dfa - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f64.c 86fdc2472526375539216461732d1db6a9f85b55 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF32.c b22876b0695f58ee56143c9f461f1dde32fefbf3 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui64.c d701741d8d6a92bb890e53deda1b795f5787f465 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le.c baa7af4eea226140c26ffe6ab02a863d07f729fb - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq_signaling.c ce37cdce572a3b02d42120e81c4969b39d1a67b6 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32.c 0108fe6f0d394ad72083aff9bb58507f97a0b669 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f64.c b8c5ccc1e511637d8b2ba2657de4937b80c01c07 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le.c 54cbeb5872a86e822bda852ec15d3dcdad4511ce - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_add.c c29536f617d71fe30accac44b2f1df61c98a97dc - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_div.c 871cb1a4037d7b4e73cb20ad18390736eea7ae36 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui64_r_minMag.c 21a6232d93734b01692689258a3fdfbbf4ff089d - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI32.c 760fd7c257a1f915b61a1089b2acb143c18a082e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF64.c 5e6f9e120a17cc73297a35e4d57e4b9cbce01780 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mul64To128.c 0bf499c0e3a54186fa32b38b310cc9d98ccdcfe3 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq.c 29396b7c23941024a59d5ea06698d2fbc7e1a6ca - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i64.c 108eec2abf1cddb397ce9f652465c2e52f7c143b - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_roundToInt.c fe06512577e642b09196d46430d038d027491e9f - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq_signaling.c d19ff7dfece53875f2d6c6f7dd9e7772f7b0b7ec - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i64_r_minMag.c 1484fc96d7731695bda674e99947280a86990997 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i64.c 8e58f0258218475616ff4e6317516d40ad475626 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt_quiet.c 6fa7493285fe2f7fdc0ac056a6367e90327905c2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sub.c aaf6ccb77a1a89fa055a0fb63513297b35e2e54b - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le_quiet.c bbc70102b30f152a560eb98e7a1a4b11b9ede85e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sqrt.c e0ad81cfb5d2c0e74dc4ece9518ca15ffc77beaf - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_roundToInt.c 50b3147f8413f0595a4c3d6e6eeab84c1ffecada - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normRoundPackToF32.c 50daf9186bc5d0180d1453c957164b136d5ffc89 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq.c 6f83fa864007e8227ae09bb36a7fdc18832d4445 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mul.c a94c8c2bd74633027e52e96f41d24714d8081eb4 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c e7890082ce426d88b4ec93893da32e306478c0d1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt32_1.c 2db07bbb8242bc55a24ef483af6d648db0660de0 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_add.c 9266c83f3e50093cc45d7be6ab993a0e72af1685 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF64.c 00ab2120f71117161d4f6daaa9b90a3036a99841 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32.c 824383b03952c611154bea0a862da2b9e2a43827 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_subMagsF32.c 68843a93e1f46195243ef1164f611b759cf19d17 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le_quiet.c e4930e155580a0f5aa7f3694a6205bc9aebfe7aa - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f64.c 054b23a974fc8d0bab232be433c4e516e6c1250a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt_quiet.c 0d8e42636a3409a647291fdb388001c2b11bba07 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f16.c d9a86343e6cc75714f65f690082dd4b0ba724be9 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF16.c 1dd1b424087d9c872684df0c1b4063b077992d5f - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c 86cda6550cb02bbf595d1667573e4be83702a95e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/specialize.h 21a11759ed2afd746a47c4d78b67640c2d052165 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c a6d5c83f6a0542b33ac9c23ac65ef69002cfff9d - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c 3d0dbc0a672d039a6346e1c21ddf87ffc9181978 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c 252c816378fddab616b1f2a61e9fedd549224483 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c d8b0c55a49c4fa0b040541db6d5ff634d7d103e7 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c d152bc457b655725185bdff42b36bb96d6e6715e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c 0cbae7a5abc336331d460cbd3640d2cda02af434 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c 1ded4df85ff5fa904fa54c27d681265425be1658 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitiveTypes.h f36c896cfa01f1de9f9420189319e4e00c7fc52a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/internals.h 9645e179cf888bcd0e3836e8126b204b4b42b315 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat.h de09949a0ca5cd2a84b882b5b5c874d01d3ae11a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitives.h 4cd1d6cfca3936a39aab9bc0eb622f5c7c848be1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat_types.h b882497ae393bf66a728dae395b64ac53602a1a5 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/nv-softfloat.h be9407a273620c0ba619b53ed72d59d52620c3e4 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/platform.h 91e9bc3214d6bb9b20bc8001d85fe8699df5184a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvos.h 88399279bd5e31b6e77cb32c7ef6220ce529526b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-hypervisor.h f28f98589e65b71e47dbcb2c4230538ae0545e75 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/cpuopsys.h 4b7414705ce10f0a1e312c36a43824b59d572661 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvmisc.h af0bc90b3ad4767de53b8ff91e246fdab0146e8b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvsecurityinfo.h a506a41b8dcf657fb39a740ffc1dfd83835d6c89 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvcfg_sdk.h b249abc0a7d0c9889008e98cb2f8515a9d310b85 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvgputypes.h ae60d53603c7ddbbd72d4e16ce2951f3d42aed32 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nverror.h a31b82c454df785a1d7893af38e83443cfe6f2fc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvdisptypes.h ffa91e1110a5cc286ec44a7bda5461b2be941ea2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_vgpu_types.h 9bca638f5832d831880f090c583fac6fc8cf6ee6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/dpringbuffertypes.h 821a01976045d7c3d2ac35b0f115e90a9e95f8e8 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvimpshared.h 1e7eec6561b04d2d21c3515987aaa116e9401c1f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h 3b12d770f8592b94a8c7774c372e80ad08c5774c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvi2c.h befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_stdarg.h 5d8de06378994201e91c2179d149c0edcd694900 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvstatuscodes.h 95bf694a98ba78d5a19e66463b8adda631e6ce4c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvstatus.h 50d31a6d133b0ea9230f9dc1b701ce16a88a7935 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/rs_access.h eb42327a2b948b79edc04d9145c7aa5b2a2b420e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvlimits.h 9f2e225f027f5a04d1104d29a0039cd2bb7dd85a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvfixedtypes.h a9bf4969ae3e39cc315b6180ee7055e0ad1279c6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvtypes.h 00e9a0ace4b59958a8b048229fb22b4d9e2f8864 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h 3449834cb8b8c630ab1de6df30503c846b26e86b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h f779cd0470e428160fc590b590f2cd4855950058 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h 7c4aef225d174ecbe1130d63b8e8ff752bddf48e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h 5abe75cf18a2fede23529194b406c3cf742edced - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h c8490da9f200f4dbbac7ebe636f3a83485f3001c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h 1022bba330a71b92dcc81f47ba460209fcc70cd0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h b72318d58806bfd25f922107a606b222baa2e28c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h 7a0c878431a9b0d9dda117f165946b1cdf8ebbde - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h e2d8133537e2687df022c6a966c55fbfea1974f3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h 9c6a4f1d864b5161564869b19f8cb2ce9d629c1d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h 0639d6cd553994aff4195e8e7547eebf8e713145 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h 79204c26eb58ee812cc2f72ee1f6d4d7d93817c7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h ea9aac6f0e23f0de444ac3919c35e4b78c18c942 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h f7435e356d54d682a949734574388abbe7ffe1d0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h 64f849ed19609320461b8938f24f0b40fb1a35b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h d107e41878b5bc50a5c8b29684122c9589625a6f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h f4a4eeb35e15e0642d1bf4e2e5b31394f4cbbfa1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h b7b0360b1a6ca78267fa10f7adcd370da86513c3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h 862a17958488d69ca3e92c42ee1bed55cb299fa4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h bb4182eeea20779f62165d2d50ed209b6a07e54e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h b7f2957f506dc285acb87d41d34cfd60408b00ae - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080rc.h c72f147e8fb78126d13567278239acfcd9b9cc1f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h 8dd5acedc0b1613314eb3fe9130a9c282bd49ca1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h 681c94b982e29049638814f6c1e4eb508f8b0bf3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h 3646710984d5c3024d16f9ab346222ad6dfdb4f0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h 6c34803c213ea0a28114bc921e1867cefebec088 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h 76c9f104e04a8fd9e73e03ad59b2e72264c5f169 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h 9e61da81ecdff15d63f9ae8a1c2f0960b820c65c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h dac18fcaf5d652b21f84cfba455f4f5972e786c5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h d51e47795dfe1fc0bae31b9379d6a39ac4d3080f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h 8a613db1c31724a577c4718752c15d9754882f48 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h 3966d65c9701bf97c807cf87838a08cda10f418d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h a1830232f18afe44230d6a8598c50b3fc7656089 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h 2dd40e3e41d74de3865bc700acc9ab7e0540c647 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h f97e7f88aa17788bbbebf55807e449c0ee016384 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h b2b6b3b413ae17af1afde2fc8672cd1bf48e7b19 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h 3c7130d0613d3c8baef6b23bb63c6ee7a10ed21b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h 39f5e838aa6ab007c56e7a59c7d2986d1a7aa34a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h 6679d97e3852ed78ee44780408c523b94f426ca4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h 090f908931690302e3a2c77f3ce41c4de0c61efc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h 7c4e426dee0ae86c00b3bd10873a1a2bd94ed3b2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h 5bdddb9a949a78443f83a7da81ad5fee8a300c44 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h d084d99035f4cc34cd803ff4a5328b9e10ea77fc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h 4b8fa2ce546ae3f06b7dc61df3d534449cdb5b2d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h 8855ee8bad2f2169ebd147e7ac77d9f1340cbad8 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h 82a2e7a2fc6501163d07870f3f640a591f4a8996 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h f3a855fe7a91c2acf2be41629ce906996e01a9fc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h 3d8e37aa8485aadf55335d8f9f913273d90a2442 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h da220a5608a0e4c73fa0315b13e2b29d92b114e9 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h 6834a9c75265c25adfb03f0b2dbfe0559f28cadf - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h 051dbfd1d5ff02b2771bc9b3fad8aaef29aab9ae - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h c3a75647f5ca6cd7b456511af36a9de6d90329c3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h 82364e263f43ea028c2d66db58887958bdef64b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h 143c1c24ec926142d1f84dec7a543f2b98541545 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h 1684a3a8111fd3d83363cebe68d016a54eaaf686 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h 72292c9844eaf24c38967dd4a879c0c0f070a0de - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h 091f7bac99f5c786a64b6fa59d9d27af786bab10 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h c0181e959c1ba5ebfc3f130c8764687b58453f9b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h 2a11fc0a499f8293b83e08572f5e6be04bd1da61 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h a44d2f1b31b8ec124355018204909df19df09748 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h 8ef946f1d7545277ef64891b45a29db44c4e9913 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h 774fd1e730d1d853bf97946f7ecd24c6648c7af4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h 22d828c87b223f937c589a0e863a25d95b734371 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h 7d3819683e9f562a87f36a3e23c043b2b6fd814e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h 7d27fafff043d290b2ec1d2dddbecea2f1df4704 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h 27ad8b5c2406fcd572cd098dd215e93ae1db99e3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h 783db6da0b92b6b8ae26b180129beb0bccb13a5b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h e6f6beaed64167088608027b442f5449cff027c1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080common.h 6b4418e269bb97b9996b05ea153ccd195c661e11 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h 0ac7e4eb4d952c84c6f4e697cbfcb355069377c2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h 1651ec548a2899391a05bc6463b3f7162c7807ab - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h bc22bf13b7d99ee6f80c30b569e084a2b03e385a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h 1ebfe9f0f9a7d2dd2873df82bbc78b1ec982ca93 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h 291f91212d5a37aae46a2944cf89f4b74b1d1809 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h 82aa4d6108ce6abebcbbc95afcb7a6350e287f5f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h c4474dc1f53661c67d8fce5303dcc636d9ad3b8f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h 18814de559257f07bad8a0a9006ac9751fcfa1cb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h e9d692b06c70951dbbd0663a89f822153bce1146 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h 1248e113751f8ed9e4111e86a7f7fb632b102eca - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h b921747a65c67fa093de08fa782c164d048824b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h 7e0773f7bf13350a9fd25b0df4d6c45a55a008df - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h 8fd661537cc4eb55c167b9daae404bfb82408bfe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h f88f1c519a242dfa71221bdcdafc7deab14d8503 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h ccc48726d7da49cddc4d4f86d8dbd2ad585f7b38 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h 3dc187adc0a848e68f62a6a7eb99ac02ee6502cc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h f3b81a241efe1224798b17c062e33936469c3c2b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h 09dedebdcff3244ab8f607a7152e9116d821f9c1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h 440314f66374d35a1628ee8bd61836a80ab421eb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h 92be535d68a7f18088921faa3f1742298ad341c3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h 84fb76f9cff38c797b139cba40175717591d49df - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h 2f92bebccb9da5246b19bd13ff0e6e79de79bc3b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h aec1b750866e34f9626e48c535336f93c5c246fa - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h 9031642283b59ee6d52e2e1ca54332df5c2f7acc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h e10cbe4875736ef16072232789dd3f48647c022f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h 91cccede5c4f26a6b6ca7ba4bc292f3d908a88d4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h f47136417885a729f9c5dee375ec9dec1bd170e0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h f523fe4a55a6a9d01f41f9f34ff149ed75b2e739 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h ad7604ced12ee18c569d2a7ebe71e185ebff3fd4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h 209ef519cb73395cea7d66016448ebc3c6bf6fe4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h 4a3e7d71b9169d703d9373ff80b02a63825a80e4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h 4d9116d23d27a3fc39c366f2685243b83ef7d485 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h abe79ad927e7c70b7c1a8eb687052a782efcd5f4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h ef180860a1ccbcb9f5d2f8a6656a345eef76a2a7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h f7e56d494fea02515180f21b0f56ae0aff583be4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h b66a45c83c84f6d458ef19fd7e0f972f2eabd109 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h 2518a62952c72ee6f3447bc8dc417129f6ac26a4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h 9373c51ca29afec3368fb5b8c2a2f05b0920f291 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h 0ee647b929e55cf39da7e26ffc0f027676fa52fa - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h 6e5b278451308efbb6911a8ab03b0feba504d035 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h c905766589d17fcb99a5d73846ed61f7b7db56fe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h 323fcc6af8c30d5ef292ae90810c5c2fa2009e20 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h 382dc80790d870047db7cea957ef208d4439801e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gspc.h 825f4d976c76d375803e42967fdab53e7814d18d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h 8294d43d202a9cd78367f2e69388a6c6f2c369f7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h cf78a847e0882e1d164eccdb86ea033126019599 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h 76c31150e2f589fbb96cfc06cdc6c1801e128656 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h 7f5548026751a8caaebc245945ccdc4bb037b566 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h 7812ba094d95c1b6d65afc6a1d26930400b8b96f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h f1dae17e75a24c28135cf073bf29f9609a2418e3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h 24782552a13f627e2e94ebb5f7021246a0c0dc53 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h 127f78d2bb92ef3f74effd00c2c67cf7db5382fe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67d.h bb79bbd1b0a37283802bc59f184abe0f9ced08a5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0040.h 4a6444c347825e06bdd62401120553469f79c188 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h 2f87e87bcf9f38017ad84417d332a6aa7022c88f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9471.h 0d8975eec1e3222694e98eb69ddb2c01accf1ba6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000_notification.h c2600834921f8a6aad6a0404076fa76f9bc1c04d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37b.h 861b9d7581eab4a2b8cc7269b5d0e0d1294048d1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005.h 92c2dab6bc48f32f46c6bbc282c63cb4ec7a50bf - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9170.h 0285aed652c6aedd392092cdf2c7b28fde13a263 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00fc.h dec74b9cf8062f1a0a8bbeca58b4f98722fd94b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0076.h a30755b3003023c093f8724cf9a2e0b0c301b586 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9010.h cb610aaae807d182b4a2ee46b9b43ebfa4a49a08 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57e.h bb8d15aee43e1feb76fddf80398e93fd805f1ddb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2082.h 02906b5ba8aab0736a38fd1f6d7b4f6026a5185b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57esw.h ccefba28a2c7979701f963f2c358b4414b84ca98 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9570.h 2e3d5c71793820d90973d547d8afdf41ff989f89 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67a.h 204feb997ba42deab327d570e5f12235d5160f00 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57a.h 03ab4e08e8685696477b62eb1a825e5198d61b8a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080.h 545dd1899c6988ffe5f50300232bd862d915cd5b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc770.h 022e8405220e482f83629dd482efee81cc49f665 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc77f.h 36b0dd6de0d0b49d435a4662c35d1f4ae5b2b1bc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9870.h 02ff42b6686954e4571b8a318575372239db623b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1_notification.h 82c9df617999f93ebd9362851966f601b8131fdd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc570.h eac86d7180236683b86f980f89ec7ebfe6c85791 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl957d.h 866977d299eac812b41eb702a517e27bdc56e875 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37a.h 78259dc2a70da76ef222ac2dc460fe3caa32457a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37e.h 31939808cd46382b1c63bc1e0bd4af953302773f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl977d.h 11fd2de68ab82b81211aa20c66a9a6595199f673 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9270.h 05605d914edda157385e430ccdbeb3fcd8ad3c36 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9171.h 9db39be032023bff165cd9d36bee2466617015a5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0002.h 76c430d54887ed14cace9409712259e10f042b4c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c1.h e63ed2e1ff3fe2a5b29cfc334d3da611db2aadf6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h ea10b0d938d9314638882fdc20b9158a193f7b08 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070.h f5760f5054538f4ecf04d94fb1582a80a930bc29 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc673.h b1133e9abe15cf7b22c04d9627afa2027e781b81 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917c.h 9bd9f416844d798f352fcc6c8aaf2c251253c068 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90cd.h 04ab1761d913030cb7485149ecd365f2f9c0f7da - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005_notification.h fb5ef3d6734a2ee6baba7981cdf6419d013cee85 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc671.h ddbffcce44afa7c07924fd64a608f7f3fe608ccc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0071.h 68c953956a63ef8f7f9bcbe71057af510f4597c1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clb0b5sw.h 38265d86eb7c771d2d3fc5102d53e6a170a7f560 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0041.h 941a031920c0b3bb16473a6a3d4ba8c52c1259d7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917e.h a23967cf3b15eefe0cc37fef5d03dfc716770d85 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc372sw.h 9b2d08d7a37beea802642f807d40413c7f9a8212 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37d.h e0c9a155f829c158c02c21b49c083168f8b00cbe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dswspare.h 95d99f0805c8451f0f221483b3618e4dbd1e1dd8 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90f1.h 8b75d2586151302d181f59d314b6b3f9f80b8986 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc573.h ff47d8a4b4bdb3b9cd04ddb7666005ac7fcf2231 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl003e.h 026f66c4cc7baad36f1af740ae885dae58498e07 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc371.h 15136a724baab270914a01a8c0e8f2c2c83675b6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c3.h 4bbb861011139be1c76b521eaa7ae10951d5bf9a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2081.h d1a19dee52b3318714026f4fcc748cfa4681cd25 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc370.h 158c98c8721d558ab64a025e6fdd04ce7a16ba9e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl947d.h 435a34753d445eb9711c7132d70bd26df2b8bdab - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917d.h 326dbbeb275b4fc29f6a7e2e42b32736474fec04 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9571.h 1409efc057e4f0d55602f374ec006f9db7ad3926 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000.h bd27ceb75c4604fef53658f16a5012d97c1534b2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9470.h e6818f1728a66a70080e87dac15a6f92dd875b4e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927d.h 11b19cb8d722146044ad5a12ae96c13ed5b122b6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917b.h 1efc9d4aa038f208cd19533f6188ac3a629bf31a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917a.h c2d8bb02052e80cd0d11695e734f5e05ab7faeb5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl907dswspare.h 4b8f95693f79a036317ab2f85e150c102ad782e9 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl84a0.h a7c7899429766c092ee3ecf5f672b75bef55216c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9271.h 15d1f928a9b3f36065e377e29367577ae92ab065 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080_notification.h a26ddc6c62faac1ecd5c5f43499aab32c70f32cb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67b.h b29ba657f62f8d8d28a8bdd2976ef3ac8aa6075f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0073.h c5ef1b16b2bd2e33f52b71f2b78db789ebb844f0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9770.h ecc56a5803b85187aa95b788aedd4fa2262c1bb6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2080.h dd4f75c438d19c27e52f25b36fc8ded1ce02133c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917cswspare.h 6db83e33cb3432f34d4b55c3de222eaf793a90f0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00b1.h b29ea3f13f501327c060b9ddfac5834ed396414a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1.h 4d5ccf08ab73343343e0c804002a621996866161 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0092.h 593384ce8938ceeec46c782d6869eda3c7b8c274 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl900e.h 95ca0b08eed54d1c6dd76fdf9cf4715007df1b20 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0020.h c61f8348c2978eef0a07191aaf92bd73e935f7bd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67e.h 509c56534ed6d48b06494bb22d3cf58d63254a05 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc574.h da8d312d2fdc6012e354df4fa71ed62ae4aac369 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927c.h 5416c871e8d50a4e76cbad446030dbedbe1644fd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00f2.h b7a5b31a8c3606aa98ba823e37e21520b55ba95c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl402c.h 26c3ccc33328a66ad3bcfe999424dffda991264f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc670.h 28867d69a6ceac83da53a11a5e1ef87d9476f0be - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57d.h 053e3c0de24348d3f7e7fe9cbd1743f46be7a978 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0004.h 060722ac6a529a379375bb399785cbf2380db4fd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc373.h 13f8e49349460ef0480b74a7043d0591cf3eb68f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57b.h e72a7871d872b2eb823cc67c0a7d4cafb3d0ca18 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90ec.h ba76ecbebe0ed71ea861ed7016abbfc16ced2df7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070_notification.h bae36cac0a8d83003ded2305409192995d264d04 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0001.h ab27db8414f1400a3f4d9011e83ac49628b4fe91 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl987d.h 70b155b0da07a92ede884a9cec715f67e6b5c3e8 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_list.cpp c70d946adb4029b3476873887488748162b88b0b - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messagecodings.cpp ac08ccd5c2e3fadf10ae53e46e582489d1579ed0 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_edid.cpp 6fd536d1849ea4cce5d9b72d1dcbc1db9c818b4e - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_groupimpl.cpp d63fed0074b22584686ad4d0cdaa4388b42194d6 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_watermark.cpp a5df56b2cf8df9d4d8ab6fa2b3521649ef09384a - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_wardatabase.cpp f56f92e32710b0342805b785d34ba1a9f2a54ed3 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_guid.cpp 554e6b7dadbb68ac0f3d2e368ca3fd90832ea254 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_discovery.cpp 60994cb1131d4d37b2d3fce6cc59dfea5ebb4129 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_connectorimpl.cpp 37eabb1ab51cb38660eb24e294c63c8320750b96 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_sst_edid.cpp a0d24a4bd71f999adbaa876168adef5a7d95f2b8 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_configcaps.cpp fa4f4869d3d63c0180f30ae3736600a6627284c6 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_merger.cpp d991afdb694634e9df756184b5951739fc3fd0ab - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_auxretry.cpp 1543bbaba8f3e149239cf44be3c0d080c624d5ba - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_buffer.cpp 56ee9318a7b51a04baa1d25d7d9a798c733dc1bc - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_vrr.cpp 9f31213ab8037d7bb18c96a67d2630d61546544a - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_mst_edid.cpp fea946e5320e7de8e9229bca8d4a6a14b9e8db59 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_crc.cpp 719d2ddbfb8555636496cb5dd74ee6776059db92 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_timer.cpp f83b3c17e9f26651f12c8835a682abdd66aed3a2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_splitter.cpp e874ffeaeb6deec57605bf91eaa2af116a9762bd - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_bitstream.cpp c62ef84471074a9ed428b4a03e644885989b0b83 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_evoadapter.cpp 38fe8122aba8a1bc5745d81192ec7fc75934dd0d - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_deviceimpl.cpp 66e91795dc65e1bc13c545a84556d200c8eb7bd5 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messages.cpp 4803cde0fffcf89fed46d6deaeba5c96c669a908 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messageheader.cpp fe8007b3d98dad71b17595ecb67af77b198827a0 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dptestutil/dp_testmessage.cpp 62d03d24af041276ba2abb96fa1634ae4f99ea8a - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connectorimpl.h aeadcb0bc061b5db0fdf8aa67c1b5703976aa946 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connector.h 01f1dd58ed5bb12503fa45be7a6657cde0a857e2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_guid.h 07d22f84e6a386dad251761278a828dab64b6dd5 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_bitstream.h 11487c992494f502d1c48ff00982998504336800 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_internal.h f6e1b0850f5ed0f23f263d4104523d9290bb8669 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_vrr.h 02b65d96a7a345eaa87042faf6dd94052235009c - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messageheader.h e27519c72e533a69f7433638a1d292fb9df8772e - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_crc.h 543efa25367763292067245cbc39c1382c35df77 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_discovery.h 39aece5465100489867001bf57446bcfc4999c24 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_evoadapter.h 6e515f398e9ae1b603e49ec32576ccd0ce5d8828 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messagecodings.h 070b4f6216f19feebb6a67cbb9c3eb22dc60cf74 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_buffer.h 36e80dd13c5adc64c3adc9a931d5ebbf922e9502 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_groupimpl.h 7974abf146f1f14cd3e3854ef63ddf52ebbeb222 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_deviceimpl.h cdb1e7797c250b0a7c0449e2df5ce71e42b83432 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_merger.h 0f747fdf03bebdcd86dbdf16d00ee2d044bc906c - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messages.h 325818d0a4d1b15447923e2ed92c938d293dc079 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_hostimp.h 2067e2ca3b86014c3e6dfc51d6574d87ae12d907 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timer.h d876d77caef3541ae05f310857f3d32e642fba04 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxdefs.h 78595e6262d5ab0e6232392dc0852feaf83c7585 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxbus.h b4d8c44957efc90ba97092987e6e43c48e85ac86 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_address.h 78c6d7d85b47636fbb21153425ef90c6d0b2d4e2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_configcaps.h 3b74682e142e94b1c68bf619169f12e5805044bc - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_watermark.h 8f83883126b853c97e5859dafd98847ec54d36ac - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_splitter.h 7b7d9a137027fbbedfc041465987fa4ed4198ce4 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_edid.h cca426d571c6b01f7953180e2e550e55c629f0f4 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxretry.h 80380945c76c58648756446435d615f74630f2da - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timeout.h e2075486b392d6b231f2f133922ac096ca4bc095 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_ringbuffer.h 3eea80c74a22de43b6edad21ea5873c791e093e2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_mainlink.h d1e8c84f279cb30978d32c784107c0247afa6e66 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkconfig.h 750ecc85242882a9e428d5a5cf1a64f418d59c5f - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_object.h 379d3933c90eaf9c35a0bad2bd6af960a321465f - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_wardatabase.h e02e5621eaea52a2266a86dcd587f4714680caf4 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkedlist.h 5dff32bd1018e2c5c2540ea7fb571dbea596d5b1 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_regkeydatabase.h 4a098c4d09dedc33b86748d5fe9a30d097675e9f - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_list.h 5bd3706ceea585df76a75dda7f9581b91ee8f998 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_tracing.h 020194b85245bad5de4dfe372a7ccb0c247d6ede - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dptestutil/dp_testmessage.h 2f60ba753549b232e1b995046a356dbe0eced04a - NVIDIA-kernel-module-source-TempVersion/src/common/shared/nvstatus/nvstatus.c ebccc5c2af2863509e957fe98b01d9a14d8b0367 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_list.h 48f063f09bd9b0cb6c4f47d8911643790b3ffbc8 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvUnixVersion.h b85b49fc4ed38a241c79731a02b3b040a654a52a - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvctassert.h 764e5c4364922e3953b4db0411d1d3c3bdac99f4 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_defs.h 8f0d91e1a8f0d3474fb91dc3e6234e55d2c79fcc - NVIDIA-kernel-module-source-TempVersion/src/common/inc/rmosxfac.h f59a2759281341e56372d3cb37b16715944dd8e1 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvPNPVendorIds.h e015e955a05908d4a2202213353eac89f1b80ff6 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvSha1.h b58ed1b4372a5c84d5f3755b7090b196179a2729 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_speculation_barrier.h b4c5d759f035b540648117b1bff6b1701476a398 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvCpuUuid.h 4282574b39d1bcaf394b63aca8769bb52462b89b - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBinSegment.h a27eb14c54c6acb647a95c264b90e25f07fc757e - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBldVer.h 5257e84f2048b01258c78cec70987f158f6b0c44 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc.h 963aebc9ec7bcb9c445eee419f72289b21680cdd - NVIDIA-kernel-module-source-TempVersion/src/common/inc/hdmi_spec.h 62e510fa46465f69e9c55fabf1c8124bee3091c4 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvHdmiFrlCommon.h 3bf0416186ee90833c727f01cc891bd568ea9d0f - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvVer.h a346380cebac17412b4efc0aef2fad27c33b8fb5 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc2.h d2b4cc6228c4b13ef77e47bf30326826c5662ed4 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_ref.h 06aa739230c00998e039b0104e5d73da85c322fe - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_arch.h 86a59440492fd6f869aef3509f0e64a492b4550d - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/dev_mmu.h 38edc89fd4148b5b013b9e07081ba1e9b34516ac - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/kind_macros.h f9311a35f375c7453d99fdde3876440b54d4cb5a - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v03_00/dev_disp.h 1ea0c3d6ea0c79c01accc7b25d15b421ab49a55d - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v04_02/dev_disp.h a26df21c3cc3eeb395428101f11da68386e0d72b - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd14.h 8159b4189c577d545c1280d7d905a2dc2ba29fa7 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd.h 96b9560d322f43a980db5d6cc5072e9e81fdb9d2 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/displayport.h 249d4f7317ce68c3ceb64e2b1ee257cc75eb002b - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd20.h 8c43da4fae8a0aeb374ce46ce19eb8c38b552ae4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/Makefile 17855f638fd09abfec7d188e49b396793a9f6106 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvkms.h 7d108165b4a7b6a44ac21460ea3bf4381fb48c5b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h 16a2e187afedf93bade7967816b0723708544e0d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-workarea.h 20213d53bb52bf9f38400e35d7963d0f4db22f96 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo-states.h 70d9251f331bbf28f5c5bbdf939ebad94db9362d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-softfloat.h 8a6f26ccf2e563b78f6e189c999ba470ed35271d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo.h 853d9005ec695cb5a1c7966a1f93fe0c9c8278cf - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hdmi.h d4889d903bf4de06d85e55b005206ed57f28af69 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-lut.h 6b21a68e254becdd2641bc456f194f54c23abe51 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-framelock.h c1c7047929aafc849a924c7fa9f8bc206b8e7524 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/g_nvkms-evo-states.h 71e8c5d3c4dfec6f2261654c3fc91210bff78da9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-surface.h 64af1df50d2a5b827c1c829a303844de20527522 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rm.h 260b6ef87c755e55a803adad4ce49f2d57315f9a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-event.h 4f5d723c80f607a0e5f797835d561795dbe40ada - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-cursor.h f5f3b11c78a8b0eef40c09e1751615a47f516edb - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hal.h d3f5bc85b538a3a1d4c2389c81001be91205ec9f - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-types.h 9c90df1fa1b6dd33a7e330c47e94b5b9194ad419 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-3dvision.h be3a1682574426c1bf75fcdf88278c18f2783c3f - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dpy.h 8f1994f3f8d100ddcf8b23f5b24872bed939d885 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-vrr.h 75e8a8747795fad89b4d2b662477e5454863dcc7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip.h d7861e2373ac04ffaf6c15caeba887f727aa41fb - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dma.h 182a47c12496b8b7da1c4fe7035d6b36d7316322 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc-types.h c8f714e80dd4bb60ceab0c0c7e6a5b3304940946 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-types.h ef78e73ec9c0b8341bd83306d1f3b2c35e20c43a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-utils.h 867e3091a945d3d43b2f28393b40edeb9d27597b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rmapi.h c1904d38785649d2614563d0cd7de28a15ce4486 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset.h 118d0ea84ff81de16fbdc2c7daf249ee5c82ed6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modepool.h 412d8028a548e67e9ef85cb7d3f88385e70c56f9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-console-restore.h 33dbf734c9757c2c40adb2fb185e964870217743 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip-workarea.h ebafc51b2b274cd1818e471850a5efa9618eb17d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc.h 4020b2a0d4f177c143db40b33d122017416dfa2e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo1.h be6e0e97c1e7ffc0daa2f14ef7b05b9f9c11dc16 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-attributes.h 9dd131355ed1e25a7cee7bfef00501cf6427ae92 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-private.h 17f6fbbd5e0a75faec21347b691f44dcb65c01aa - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector.h 4625828efd425e1b29835ab91fcc3d2d85e92389 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h a8fbb7a071c0e7b326f384fed7547e7b6ec81c3e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-timer.h 52b6c19cce320677bd3a4dfcf1698b236f29e59e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-device.h a0cc9f36fdd73c99ad8f264efa58043d42353b0a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-sync.c 381fba24abae75d98b3ada184ed0cd57335819a9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-format.c 281fdc23f82d8bdb94b26d0093b444eb0c056f51 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-sync.h 445a409950ab8f36cfa24d1dc73e59718d335263 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api.h 2ea1436104463c5e3d177e8574c3b4298976d37e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-ioctl.h 5c4c05e5a638888babb5a8af2f0a61c94ecd150b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-format.h 910255a4d92e002463175a28e38c3f24716fb654 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api-types.h e48c2ec8145a6f2099dddb24d2900e3ae94ec02e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h 727bd77cfbc9ac4989c2ab7eec171ceb516510aa - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h 009cd8e2b7ee8c0aeb05dac44cc84fc8f6f37c06 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi.h fb242aa7a53983118ee019415076033e596374af - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h f6875ef0da055900ef6ef1da5dc94cba2837e4d0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-channelevent.c 01d943d6edb0c647c2b8dbc44460948665b03e7a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c 394ea31caa5957cfb2c8bb8c3cc0e4703213fe7f - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi.c ec97ab37cdf2cec0283657c2c04a139a1a168337 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modepool.c 85ddb19f89833ca57fd2deff2e2b4566e162a56c - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hal.c 8415bcd6ab34e356374659e965790a0715ed7971 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-prealloc.c c98f76bcfc7c654a619762ebc3a2599f9aa89f8d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-3dvision.c 5fb73f35841c41e7376531732cb12303224e61ad - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-lut.c e9626eee225e58ec2d5be756c5015775ca5e54b9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-vrr.c 86da3c7c09354d2c49d95562aba15cbedb543d9b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo1.c fc8182cc1f3af77125dbfa328996bcfe0387cc41 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rm.c 05548338a73ade1b3c2ad1cebf1ab5eb16ef6c9b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-flip.c 07c2f10473e2fbe921b2781cc107b5e56e6373e3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-attributes.c 21c8184de2c9150c21ac5d6fba24e79e513a0a69 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo.c da726d20eea99a96af4c10aace88f419e8ee2a34 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-event.c 5c79c271609ebcc739f8d73d7d47f0b376298438 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c b55665d7bceaad04bbf29a68f44536518302c3d6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo2.c f8bdd07a27296ef6aab86cc9dbccf8df811fff24 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modeset.c 1918ca3aa611cd9dfc79d46d038ab22706f0b1ed - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor3.c 24156462f25922c8de5b5d2558db36b2e68b28ed - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dpy.c c2870190ca4c4d5b3a439386583d0a7c193d6263 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hw-states.c f27f52dc428a6adeb936c8cf99e1fc2d8b0ad667 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dma.c 5acf19920d56793d96c80e8461b0d0213c871b34 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-surface.c c2d0e6bef0c4929a3ca4adfd74bd6168fa4aa000 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-framelock.c 673ad86616f9863766bfec0e118c918297d32010 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/g_nvkms-evo-states.c c799d52bdc792efc377fb5cd307b0eb445c44d6a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor2.c 94f4736acf7981cebfd74302a21f19cdbafa8d71 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hdmi.c 8f22c278a5839d36f74f85469b2d927d9265cb80 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-utils.c eb09642e8b5d9333699f817caaf20483c840b376 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms.c ab17e5b4cafa92aa03691a0c187ef8c9ae53fa59 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor.c 574b1268ff83e4e5ed4da15609247a5c0ec8f51b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-console-restore.c 45230e56d29c98ea0f10f87c1b16ba70c96f24d5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo3.c 8af6062034d464f778969e26d3bf5a9b4cdaccf0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector.cpp 69fed95ab3954dd5cb26590d02cd8ba09cdff1ac - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp 6b985fc50b5040ce1a81418bed73a60edb5d3289 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.hpp f2a05c29383bfc8631ad31909f31a8351501eb27 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-device.cpp 31767fd551f3c89e5b00f54147b6a8e8fa3320e3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp 110ac212ee8832c3fa3c4f45d6d33eed0301e992 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-host.cpp 51af3c1ee6b74ee0c9add3fb7d50cbc502980789 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp f96cd982b4c05351faa31d04ac30d6fa7c866bcb - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.cpp f6c3e8bd4ee13970737e96f9d9a3e4d8afdf9695 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp 893c70c95809f463c7af6dc9c814527804fcdf53 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/Makefile c5f16fdf43ca3d2845d120c219d1da11257072b0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/nv-kernel.ld d1089d8ee0ffcdbf73a42d7c4edb90769aa79d8c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h aba0bd796d932fa19e8fad55ed683ae57d68bffb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-priv.h 1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h 499e72dad20bcc283ee307471f8539b315211da4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h 40cb3c112bbcb6ae83a9186d0c9fa1857cf6a126 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os-interface.h 1b53bbf5f8452b8057ff2dd7828947a047db38d0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv_escape.h 3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h e3679844971ecc4447259fb1bdf4fafbbdff2395 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osapi.h 4750735d6f3b334499c81d499a06a654a052713d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-caps.h 1e89b4a52a5cdc6cac511ff148c7448d53cf5d5c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os_custom.h fbcbb81ae14e8bfde0d665ad20f9cab9b0bbd9c3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv.h ddfedb3b81feb09ea9daadf1a7f63f6309ee6e3b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h 9c7b09c55aabbd670c860bdaf8ec9e8ff254b5e9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h cc3b2163238b2a8acb7e3ca213fb1ae6c5f0a409 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osfuncs.h 2f5fec803685c61c13f7955baaed056b5524652c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h 285ab886f5fad5caf3f6bd0b0c7102bd4c4300bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-reg.h 6ebda7ea5b17b7b9bfa9387fc838db9f0c3405a5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osinit.c b5b409625fde1b640e4e93276e35248f0fccfa4c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c 9d9035afd7af31f30cdbf2d4c75e5e09180f0981 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osunix.c 21ac9d6932199ce0755dbead297eb03c9900f8c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c 49dc935d4475b572478c63324f0832c972a4277d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os.c 532366fd9a288a812eca78b92b304ba3625f8c0a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c 006e77a594ae98067059ad3d7e93821316859063 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c f134270af5ecd7c5ba91bf5228fe3166b101dd6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/escape.c 690927567b5344c8030e2c52d91f824bb94e956c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/registry.c 5940d8e83cd0014e3222952eab29eebaaad19b86 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osapi.c 54b912b640bdcae42f38c41694eb20abcaad61a7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c fb5272f3d0e465aedbc99ddcabb1c6c428837a6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c 0cff83f4fdcc8d025cd68e0a12faaeead09fa03b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/tmr.h 7df66a87c9498ae73c986e60fcb9cb1cbcd19e19 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objrpc.h 1feab39692ea8796ac7675f4780dfd51e6e16326 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objtmr.h 28d6a6ae495d9bc032c084980ebf5d94448bcf29 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_headers.h 31deee778df2651d3d21b4d9c8ab180b8dc1ff14 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_vgpu.h 961ed81de50e67eadf163a3a8008ce1fde1d880c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_hal_stubs.h 4db7387cc1ce08ccc62404b80b19c7f1b685e746 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc.h e4d88af4eb51d32288f913d90e490e329884970b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_global_enums.h 35da37c070544f565d0f1de82abc7569b5df06af - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nv_firmware_types.h df4d313c66e75fa9f4a1ff8ea2c389a6ecd6eb3d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/acpigenfuncs.h bff92c9767308a13df1d0858d5f9c82af155679a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvacpitypes.h db0dc6915302888de06e3aa094d961cfe25e0059 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvrm_registry.h 059c1ab76a5f097593f0f8a79203e14a9cec6287 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c d50ff73efaf5bc7e9cb3f67ed07ede01e8fad6f6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated.h 671286de97aa63201a363fd7a22c92ee8afe4c7c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/eng_state.c 6fa4ba2da905692cd39ec09054f2bd6621aa2a7a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource_desc.c 5a97d4f8ce101908f1a67ffe9cc8ed00b6bf43b2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource.c 1653c7b99cfc86db6692d9d8d6de19f1b24b9071 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_uuid.c caf2b80fa0f01b9a3efcd8326bf6375455f2e1b9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_access.c 4e1be780ac696a61f056933e5550040a2d42c6bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_device_mapping.c 0824d200569def5bf480f2a5127911ed0ea881e6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_share.c f6b4e40b638faf9770b632b404170e1ceb949be5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_gspclient.c db44a803d81d42bfaf84f7ea1e09dc53c662acef - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_timeout.c 9515ea68cdac85989e4d53d4c1251115291708dd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu.c 08be13ced6566aced2f3446bb657dae8efb41fbe - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_rmapi.c 77573c8518ac7622211c4bdd16524d369cc14b96 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_ctrl.c fa854efc5cdf4d167dee13302ee8377191624d95 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device.c 89543f7085fbc2ca01b5a8baae33b5de921c79e9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c 0e4c2d88b61a0cf63045fe70e5ba2c81c44e37af - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c acb2a62fb60e08eb6d16518c43c974783139813b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer.c 834efbfff64c0d01272e49a08bd6196e341985a8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c dd0bd914c6c7bfeabdd9fe87fb984702e0765624 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_objs.c 19447ad30b3fc2ee308bcc45e3409bafa5defe0d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c 3abbef0a6fc95d6f7c7c5a16cbbbb51aaa457cc0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c 0918cada217ca1883527fe805fc30babf7b8038d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_channel.c e1a6dfb38025abeb5adfda929f61eb6ee44b5c84 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c ed25b1e99b860468bbf22c10177e0ba99c73894f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c 8cd12c2da71acede5046c772f14aff7cbd88af12 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/kern_disp.c 01e8b56f7677f5cb7f950d9aa9bd37d04153085b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c 629566bf98be863b12e6dc6aab53d8f5ea13988c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c b41502d73d7781496845377cebd0d445b8ca9dc6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c 8a418dce9fbeb99d5d6e175ed8c88811866f3450 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c e26ade846573c08f7494f17a233b8a9e14685329 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c d6e1bd038fa0eff5d3684a5a2c766fdac77f1198 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c d4a07d1c6beb7ddb229ed6e5374343b6ce916d84 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c bc2b57acc8fa8644615168e3ddbaf7ac161a7a04 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c 2bb921b462c4b50d1f42b39b4728374c7433c8cb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c 086e9a51757c3989dfe0bf89ca6c0b9c7734104a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c 56be7a21457145c3c6b2df7beb4c828b7bd1a3b4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice.c 5be208cc0e1eae1f85f00bb0b502fdba74d6656c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c a64c51c515eb76208a822f1f623d11e2edd8d7ac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c a54628e9d2733c6d0470e1e73bca1573e6486ab3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c 1f4d15f959df38f4f6ea48c7b10fc859c6e04b12 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c ef2a3848e0302c09869a34eba1333d19a17acc56 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c 2c66e086bb149fb1b9ca8f860566a3f5e391b2f3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client.c f89e982b0e31a1898e1e4749c9a8ae9f0bb59a0c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.c d92267a3394ded5d7d218530fd16ce00a920b1d6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/alloc_free.c 2279fd14aab9b5f20b8fc21f04dd0fca41e418c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_notification.c 11a547cbfdbce000a6e5edf48492f5b930ddbdca - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rs_utils.c 81f66675295315cfc52be225c2e9ee912b56fbac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/sharing.c 569f56831cde7bdc528ac2e543eea485025ec6f0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client.c 05669e008dfd89e5c81381e6c60230c1fe17a876 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.c 820b6e63c2b11b0764305c483142f626b6f72038 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rpc_common.c bc83726df04c30d02a1852a10a22c77fdb3ef7a7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.h 5f194ba056b018a8194c16b0bbb6e49c1b80a996 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/param_copy.c e40f6742084cd04252f3ec8b8499a26547b478bc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping.c ac6a5b3adf15eac4a7bd9ae24981f6f5fc727097 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.h 3a0f999e390d93b0db8272f55fbec56f6b055fe4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_utils.c 78f1e379c3d1df9e34baba77f78f48b8585bdc74 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_buffer.c 8e40d2f35828468f34cf6863f9bf99c20dbfc827 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_cache.c b441ee824e9c15c82956254704949317024ceb41 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.h 277441b3da96fc01199f1d2f5102490e2e6cd830 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/control.c 38d0205b68ea2c82709b42eb7e8b9cf92cec8828 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_stubs.c 2f89b9059467e7f67a6a52c46aecae5cb0364ab6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/binary_api.c 46aa43b18480d2eb7519b2dcd0fe6a68c79b8881 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource.c f2c7d77e4183994d7ee414e2a87745fcd23d995e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping_cpu.c 6f46dd43e4b3f2ad803a4c9492cb927aebffc1f0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client_resource.c 59d42b6a123b062237b3b6ca382211e35057ef1e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_list.h ddaf2b8e424df9147a4e2fecf3942b64b1d2b001 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.c 68cc7b258f934097e9dc31a38e7e3bf2ce2fe5d1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event.c c3820fa4bb1192a9317ca834aeee3434c7eb8059 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi.c ea7be8a55a3310aa1c3926ed69c86a6491925e08 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog.c 70507a8d43797eb3cdc13408ae8635f4a2eebce0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog_printf.c b3a29311cc22e2dae686f8ed2df6bc828aa826cf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/profiler.c af4ffa4b423e07cf40eb863c11dbf515c7104874 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_timer.c 1793e056a0afcc5e1f5bb58b207b49c5f1556eca - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_stubs.c 63e5e17280d865ace8cdd8eb8a2598d3d7830ad7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_sanity.c 8e5af753de1725dd919185c29d03ccb0934fab6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_init.c fe91b43c37b64472450cc25329d2dea74d2a9fcf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_minimal.c c0822891f614e6ec847acb971e68aad8847e0cd7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_common.c c68f2c96bfc6fce483a332a5824656d72986a145 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/system.c 37000b419d23a8b052fc1218f09815fafb1d89c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal_mgr.c 7b9c95f912b203c68b6ba1f62470dffee4b4efe3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/thread_state.c 677c655b0b8e86bdab13cdd4044de38647b00eec - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hal.c 8eac3ea49f9a53063f7106211e5236372d87bdaf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/info_block.c b9eabee9140c62385d070628948af0dcda3b0b1a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hals_all.c 003e3012e87b8f8f655749db88141d74660e8d8e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c a5a31b9b62e6d19b934411995c315d4fdac71ca0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_db.c 37d1e3dd86e6409b8e461f90386e013194c9e4d1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c ed24c0406c85dc27f0fca1bac8b0dcb7a60dca2d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_group.c 6aa752ae480e883d077de842f02444151947f82f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c 956b7871a267b7d381d1cd7d4689ef1aec1da415 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem.c 9d9fcd87d784a758659b6cc8a522eaf9beac4b6c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/standard_mem.c 15f3290908931a9e4d74b0c0ec9e460956e39089 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/system_mem.c 623dad3ec0172ed7b3818caece0db5687d587ff3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c 64bd2007101cbf718beb707898e85f40071ae405 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c 94acdcebee0cdcbf359b15803ec841e5284e1ff2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/vaspace.c 079893039c2802e1b0e6fcab5d0ee0e4dc608c84 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/io_vaspace.c 5b9048e62581a3fbb0227d1a46c4ee8d8397bf5b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h 78cbb6428372c25eba0ccf8c08e7d36d18e4bae8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/base_utils.c 6d5915924b4e26a5e7592427e34b77596162d0fe - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/zlib/inflate.c cade0f7049cdb2ab423a073887ed20ba1abdb17e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/utils/nvassert.c 8a4e2aec6fc01ce1133cfc7ef80b6363c5394208 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvoc/src/runtime.c 8ed5171254e51e59fc5586e729793831165b8c0c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/tls/tls.c 206dda159ecbc0340ac9329250302c76a504e5a8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c d48d51a880fced52ad6e323d984e872ccf9ef3bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_client.c d0ae6d7a363db3fdf54ae1a760630b52a2019637 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_resource.c 883ad1cf4ed1714eb74d44d3b9a41d6a4723b650 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_server.c 0c9581aa68a77cb9977a7fbcfd2077ccb618206e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_rights.c dac54d97b38ad722198ec918668f175dc5122e4e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_map.c 1f2e9d09e658474b36d0b0ecd9380d0d2bcc86b2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_domain.c d3e5f13be70c8e458401ec9bdad007dfadedcc11 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvbitvector/nvbitvector.c 836ba8b401fb6b6fcf4ccde1b644ebaefc3d8ee1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/ioaccess/ioaccess.c 9c40bfebe2c57b972683e45dc15f358aaa2280f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c 8f41e7127a65102f0035c03536c701b7ecdaa909 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/string/string_generic.c b528ef8e238dd2c22c6549057b54fe33039c6473 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_tracking.c b6d6074ca77856fc5fe4ff1534c08c023ee592a4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c caff00b37e7f58fde886abcc2737c08526fa089e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_generic.h 66e79047600e0a40c50e709c6c82402d9b205ad0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c da86b765702196eb0011ac9d14873fbc1589d48b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c 7cdc50ee31b9cde14c0ce6fcd390c5d4564e433d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.c a305654bafc883ad28a134a04e83bbd409e0fc06 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.h 2fa76d2d5ba7212f826b656aa683223a470e484c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/core/core.c 6f6c83e9ee6d91fc8700e5015440f2bc72e6600b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_rwlock.c 9b69fbf3efea6ba58f9ba7cb0189c9264c994657 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_common.h b55b7b59f35d848d5a3b43d63da4d2f7b0af5d3e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c 7416712aa964befcf8fede86e5a604871a2d00b8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_rwlock_def.h 6dd0c5f2384610ea075642d8e403ddd8c8db371a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h 87ac95cf569bb550adb3577c6a6658d094c59999 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c a045a19d750d48387640ab659bb30f724c34b8c8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c f0c486c1ad0f7d9516b13a02d52b4d857d8865b1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_compiler_switch.c 595a6238b9f04887dd418be43ff31f3e7ca6b121 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/map.c 4418c0344b64740050ff8ef6ee085f0687a323d4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/list.c 057ad074f6252f7809a88f918986d7d5aacff568 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/queue.c 2389c9dd3b13fd2ff26d2d1342c515579079bc71 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/multimap.c 2975e5cecee2c1fd5f69a8ffc20a49016e83025c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/btree/btree.c f0ce913eb568f85e6e1c1b8965f2cd2b98e81928 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/eheap/eheap_old.c cba2c17804f6f2062dc5d75583e4a03e03016d1d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.h 133e94f73c781709f407b03d8cdfdd8865c39b4b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.c 801eb295d07258ad70b99cb0fe85f3421690e0c4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_private.h 46c1a2066ead316ea69c60dc323bdb649bc11c0f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.c f9bdef39159a8475626a0edcbc3a53505a0ff80a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_hal.h 958d9a2cddc91edfafb5c2f3d9622443ac49a6ef - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.c d405e01478d26ea99cc0012fa2d6e0021bbe6213 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.c 182602832a033b3e2d5f88d4ba8febe63eeb2f9e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.c 376572489e0d4211663da22d5b0de7c7e740fb29 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.h e3c4822ac998ab5c7946919c85011f6172dc35ee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.c fa5e1c6001e60f77415d0a8f87c8b548b12e1217 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.c ddc0ac4e1d8b8aef15e147f1f85f8df37c196763 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_register.h 4fe5357eabd0c5e351fb965ceead308240f68eb1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.h 4f4acfdefc7b9a0cdfe2d5840cc18c9c33366053 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.h 1d66bab50a7d39faa2b0fec469a4512d2c7610d5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.c fbcbeb92e46ba11ac26c04c9688b3ffcf10f5c53 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.h e449382e19e4dcfcf0aec0babe5a1c8ce2f4249b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.c 87a5ae8e07103074020ba052ca45ab39e918d3bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.c 47b7744ddd01b821bf2fd25fdb25c8d6d55ee01d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.c c46cae4a17181c48bafc01237b83537df61c41ae - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.c f42bfa3b5a801358d30f852625d8456290550f46 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.h 59a87763c6abdc54828f2785a7d90e43e607bc87 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.c da3cc08f12ccee23bcb1c0d0c757b8bbcb81e4fd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.h 6fd6953e4ae0af707376a40ea0e4f3e70872be7b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.h 162777624d03af2f17dfdc28bc35143e2ec6cdee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.c b82e5db65ad41764f456d6f924c89d76c165e48d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.h 63e9d0416d5ca1fdf547b5fba9ec76e54690c9dc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_ref_count_nvoc.h b5ddae1e6960b13101aa38b2edc0610aed438ede - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.c 499a3d9c61a86b667cc77cf8653a71f7fe85078a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_name_released.h ac842d9de5eae74ef02b0a75259fb016b80c6eac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.c 88d336f88c9b72ec2c1352d4ebe00c0831eafbca - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_sdk-structures.h fb78615cde6323784f51d33f2acd61fd4030fee0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.c 213ebb4fdfa3c2f64b5f998e2ad990e448d4a104 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_debug_dump_nvoc.h a6174ad345cfdf926cbb4c86c7e8eeadfccb0ddf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_class_list.c fa785f8138598af783aefecf10b141d524e6bb42 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.c de97c5afdc34cb9aff23c3ba166e21f660cf1f47 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal.h f9bdef39159a8475626a0edcbc3a53505a0ff80a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_private.h 53b2c39666e1da206d44d69d54009f20440503bc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.h 93f9738c0e8aa715592306ddf023adf6b548dcc4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nvh_state.h 2b49950ba8f540ed4231c3334810edbb212bb859 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.h d614f90730e2ee78bc3aae47b4e7976500e166e7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.h 4302502637f5c4146cb963801258444f2d8173e1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_allclasses.h 7bb406aa863430507bdf07b5f3e519c0d756220a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.c 6f3fc9676df77fa24c49140331b87ed5988ed57c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/rmconfig.h cb02e66e5fc06aa340ab460c977961701e9ba295 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.c 079ac6d2a90bd2fc9413e092a729202dbc5f724a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.h 65d1ace1e68c9b39cce6db61aa8b86ee47a0ae4b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.c e0988b45cf712f1a7662b6f822eaed3ffd9938f3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h 40c937ca657bda9c0b67bd24c5047d39e596c16c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.c f8e842add67dc070cc011ea103fc56cfd81c8b9a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.c 3a5457a216d197af8f120c660690a55ee44bdd8e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.c 21e3cf689d84b1a28e11f66cc68a0bc6713108b0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.c edead99d125425ddf8f2fa4e4261b8cc3bf566fc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.c b07c2c5e8df4de2bb9d242fd1606f1a57b8a742d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.c bfabd5155af3172e1c0a5a0b66721ff830c7b68f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hypervisor_nvoc.h cc635daf3d7a9a176580951841b82e9eb0d6f5ad - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.c 757b3ecf94d0c8914a32c4bd302f8ccfa4027856 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.c 6263c1ceca0797d34a102f9846acd1fdef06fb60 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resserv_nvoc.h 3b0e038829647cfe0d8807579db33416a420d1d2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec.h abda8536d885be1422810c184b936bbc880972eb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.c f6f40d568bcf2ae89547ad054f9b5357bac366ab - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.h ceb4dd72148dfe4a0581631147e8d7636abfd61f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.h 41784541b2e9ee778b52e686288fe492c0276fec - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.c d32d0b65f5f76cb56ca7cd83c0adfe5cb5330924 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.h d04adc777f547ae6d1369cf4c94963e5abf90b86 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.c ac3965eea078f1998c3a3041f14212578682e599 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.c 0dae533422e24d91a29c82d7be619160bbb6f6be - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.h 3f5a391895fc900396bae68761fe9b4dcb382ec0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.h c3b4c6a1b90a1547e229bb2973eb19c01e1d0055 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.h 285af0d0517cb191387a05ad596f74291ec81737 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_desc_nvoc.h 9646d1c4d472ad800c7c93eec15cc03dd9201073 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.h c370a103a4c1c9cf2df3763988e77ef8f7bc6afb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.h 2239839c8a780a87e786439a49ab63e25d25001a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.h 09597f23d6a5440258656be81e7e6709390128f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_private.h 8e0e60f6d30bbed679c43b4997875989314ee88c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.c dec0f585ca46dc8e1aae49c8ea58db5a415de65c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-message-header.h 871fd0260ab9c164b8f6a7d1aba4563af622f1ac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.h 205490d6651110f28009e752fa286f818bed22fb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.h 07a37ff685e68a703455e0ed7db7940697487ed2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.c cc71518b4151dc2ee0592bbd2866d437043d0e1a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.h 2c28d729456749f16ae03fb48b1e416706762805 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_fwd_decls_nvoc.h 59c3612a596ad6b996c9d1506f9893bd1b5effee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.c 81a6a28692f50efeebecad125de0585dd711ff36 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.h 3f581df19314b273244c4c42ea915ec8ef0d8ce2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.h e839f8a5ebef5f28818bb5824bd7c52320db9a74 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.h e0b8f64c042dcbb6340552cb3517dabdeb490f1b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.h 7523c2ee9228ad0e2fb3566b23b9720d7896afae - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.c ad50b3dbe1685eefe51c4fc296f3eade70789dfb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.h ca042cfcdfe8cc8a141f8bb5c9e6c05d8a71b707 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.h 2ab6933e07a84c64dfcbeef3b3f4e3f14249d8c8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.h ffd4f01212709e321d4097e424fe5d32038f5d8b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c 12776c69191b583ffcf0914697cf41802f52ef01 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_archimpl.h 05cb2fed8648f07b54dc2e8bacbafb323ea8262e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.h 0b15dd4515c5e436a659883a48e62bf3c68bf439 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.h 0269da77a8db8efde1debc8236f2b3de2cd2597e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_desc_nvoc.h 1bdccdbabf5ae52fd65b829c35079bb7a8734939 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.c 410a759c949904b7ae1eecafb31143fad579c0a1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.c 73c598515eb7985c8f4cace0946ec9613960be6c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.c 73a37ad59b9b13b61eb944748b6c2ba3cad7b630 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.h 8915f69e67e1f3a809a5479e36280df06ce8dd90 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.c d792fbb20b6ca5f2d62addf6a94b0c5027ae15fe - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.h 6124890a54e529dff8b9d6ecf8f4bebe1e10a8a2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.h cb03502bf603c88b709ec803b60efd1d6f8e5ee1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-structures.h b378d336af4d5cb4b1fb13b85042fad1fe02f4cc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_journal_nvoc.h 7c1b36cca9e8bf1fe18284685a6a80620df348cb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.h cd833a822c1ce96c79135ba7221d24f347ceadb1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.h a016a7d8e07389736c388cb973f3b2a177ea917d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.c 42d784e8b478bbf48293a805aa227f0abdf1923b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.c b29061454e7d8daa0cef0787f12726d105faf5c4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.c 4b9f2ee66b59181f226e1af5087db6ea80f1ee27 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.h 23d16b4534103f24fac5bb86eb8bab40e5bcba57 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.c e48b8b6ba9da5630a7ade526acbb94e50d9b636d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.h b86536778197748c707c3e9e4c73c5fbcb037e32 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.h 07fd5f5534a6d751107f582ba187c7a53a139954 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.h f4a5684d5a877b90c7ae7b66436117c6feb65f91 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.h ab79a1418b65b9d65081456583169f516dd510c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.c bd048add5f0781d90b55a5293881a2f59ace3070 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.h e50c91a674508b23b072e0dd2edbf743f24b333d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.c df070e15630a11b2f4b64d52228fa5a6e7ab2aa9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.h 0f3140b5eae77a6055f32a91cb13b026bbb23905 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.h 76b1f545e3712a2f8e7c31b101acd9dd682c52f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.c 14450b18d002d4e1786d4630ef4f1994c07ef188 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_odb.h 7b0201852361118f277ee7cc6dd16212c0192f71 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.h 3d3385445934719abda1fefd4eb0762937be0e61 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.c c4fde03d5939b0eef108fde9c2f10661568f22a9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.h 5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/os/dce_rm_client_ipc.h 76b24227c65570898c19e16bf35b2cad143f3d05 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu.h 61c7d3ac2dc61ee81abd743a6536a439592ee162 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_child_list.h bf894a769c46d5d173e3875cd9667bb3fe82feb9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_timeout.h f17b704f2489ffedcc057d4a6da77c42ece42923 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource.h 0e8353854e837f0ef0fbf0d5ff5d7a25aa1eef7c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_state.h 426c6ab6cecc3b1ba540b01309d1603301a86db1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_desc.h c33ab6494c9423c327707fce2bcb771328984a3c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_halspec.h 6b27c9edf93f29a31787d9acaaefb2cefc31e7d4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h 1938fd2511213c8003864d879cf1c41ae1169a5f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_uuid.h cf3d1427394c425c543e253adf443192ca613762 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_access.h ce3302c1890e2f7990434f7335cb619b12dee854 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h 97d0a067e89251672f191788abe81cf26dcb335f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/device/device.h 61711ed293ee6974a6ed9a8a3732ae5fedcdc666 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h b39826404d84e0850aa3385691d8dde6e30d70d4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h 51a209575d3e3fe8feb7269ece7df0846e18ca2a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h 277a2719f8c063037c6a9ed55ade2b1cb17f48ae - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h 74bc902cd00b17da3a1dfa7fd3ebc058de439b76 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_channel.h be7da8d1106ee14ff808d86abffb86794299b2df - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_objs.h 576216219d27aa887beeccefc22bcead4d1234d7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp.h 5179f01acf7e9e251552dc17c0dcd84f7d341d82 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h 9a33a37c6cea9bad513aa14c942c689f28f7c0d8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h f758ea5f9cbd23a678290ef0b8d98d470e3499e0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h 6756126ddd616d6393037bebf371fceacaf3a9f1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h 20416f7239833dcaa743bbf988702610e9251289 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h a29f55d5fbc90dade83df3ef3263018633675284 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h 82abc2458910250c1a912e023f37e87c1c9bbb9e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h 889ba18a43cc2b5c5e970a90ddcb770ce873b785 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h b52e6a0499640e651aa4200b2c8a1653df04a420 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h 24d01769b39a6dd62574a95fad64443b05872151 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h efc50bb2ff6ccf1b7715fd413ca680034920758e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h ccca322d29ae171ee81c95d58e31f1c109429ae7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gsp/message_queue.h 1e3bebe46b7f2f542eedace554a4156b3afb51f1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h ce4e0f7177f46f4fc507a68b635e5395a3f7dde6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h 5f60ac544252b894ac7ecc0c6dc4446e6275eae5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi.h 2baec15f4c68a9c59dd107a0db288e39914e6737 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client.h a92dbf2870fe0df245ea8967f2f6a68f5075ecaf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h 61e3704cd51161c9804cb168d5ce4553b7311973 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource.h 99a27d87c7f1487f8df5781d284c2e9a83525892 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/binary_api.h 497492340cea19a93b62da69ca2000b811c8f5d6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event_buffer.h f3028fbcafe73212a94d295951122b532ff5445b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rs_utils.h b4bae9ea958b4d014908459e08c93319784c47dd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event.h ac9288d75555180c1d5dd6dd7e0e11fb57a967f2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/exports.h 2b23f2dbd8f3f63a17a1b63ebb40a2fd7fd8801a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/alloc_size.h c9cb08c7c73c0bdd75a320640d16bf4b4defe873 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/mapping_list.h f19dad1746e639d866c700c2f871fcc0144f2e5e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/control.h f1713ecc0b3e58e46c346409dbf4630aa6f7f3ed - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/param_copy.h 255c28b9bd27098382bace05af3ad7f195d12895 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi_utils.h 4453fe6463e3155063f2bdbf36f44697606a80a5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client_resource.h 7615ac3a83d0ad23b2160ff8ad90bec9eb1f3c6c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal.h b259f23312abe56d34a8f0da36ef549ef60ba5b0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h c6efd51b8b8447829a0867cd7fb7a5a5a2fb1e3d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/traceable.h 7e75b5d99376fba058b31996d49449f8fe62d3f0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/profiler.h fd780f85cb1cd0fd3914fa31d1bd4933437b791d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/tracer.h 3a28bf1692efb34d2161907c3781401951cc2d4f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal_structs.h c8496199cd808ed4c79d8e149961e721ad96714e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/capability.h e5b881419bc00d925eba9f8493f6b36cf3ce7ca7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_stub.h 408c0340350b813c3cba17fd36171075e156df72 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os.h cda75171ca7d8bf920aab6d56ef9aadec16fd15d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/nv_memory_type.h af25180a08db4d5d20afd09f948b15d8c4d2d738 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_fixed_mode_timings_props.h 457c02092adfc1587d6e3cd866e28c567acbc43a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/info_block.h bffae4da6a1f9b7dc7c879587fd674b49b46dac1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/core.h cbfff1f06eecc99fb5a1c82d43397043058f02fc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/printf.h f929d43974893cd155ab2f5f77606f0040fe3e39 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/locks.h b5859c7862fb3eeb266f7213845885789801194a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/system.h 37f267155ddfc3db38f110dbb0397f0463d055ff - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/strict.h bdc4ab675c6f6c4bd77c3aaf08aa5c865b186802 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal.h ed496ab6e8b64d3398f929146e908c5a453a03d9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/prelude.h b319914c97f9978488e8fb049d39c72ed64fd4d2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/thread_state.h b00302aec7e4f4e3b89a2f699f8b1f18fc17b1ba - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal_mgr.h 8ef620afdf720259cead00d20fae73d31e59c2f7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h 2c48d7335bdb0b7ea88b78216c0aeab2e11e00c1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h e188d9f2d042ffe029b96d8fbb16c79a0fc0fb01 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h ea32018e3464bb1ac792e39227badf482fa2dc67 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h 5b151d0d97b83c9fb76b76c476947f9e15e774ad - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h 0ce5d6370c086d2944b2e8d31ff72a510d98dc8f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h 4c386104eaead66c66df11258c3f1182b46e96ee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h a5f49a031db4171228a27482d091283e84632ace - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/system_mem.h d15991bc770c5ab41fe746995294c5213efa056b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h 5ae08b2077506cbc41e40e1b3672e615ce9d910f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/vaspace.h 02d6a37ef1bb057604cb98a905fa02429f200c96 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/mem.h 1a08e83fd6f0a072d6887c60c529e29211bcd007 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h 2d4afabd63699feec3aea5e89601db009fc51a08 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/standard_mem.h 5e9928552086947b10092792db4a8c4c57a84adf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/acpi_common.h 2f05394872ffa95d700b7822489fa59f74ad5819 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/sli/sli.h fff3ebc8527b34f8c463daad4d20ee5e33321344 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/ref_count.h 04dba2b7a6a360f3e855a7d6a7484ddcdfb90c19 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/base_utils.h f8d9eb5f6a6883de962b63b4b7de35c01b20182f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/protobuf/prb.h 601edb7333b87349d791d430f1cac84fb6fbb919 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/zlib/inflate.h 9255fff39d7422ca4a56ba5ab60866779201d3e8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/poolalloc.h 8dd7f2d9956278ed036bbc288bff4dde86a9b509 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/eventbufferproducer.h e53d5fc9b66dbec4c947224050866cec30b2f537 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvrange.h 398e4cd63852a18da6e42b920eacd927a2c38bc0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nv_enum.h ba3c81e9eae32eefbf81818b48fdf6ccd7e73163 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvmacro.h 18321894aa7631b491ea39edc2d45d1028cdc9c6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvprintf.h 167f49cccc912430bb6b3cb77395f665a32cc8be - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvbitvector.h 1ed5d8ae82f37112b163187fa48d2720957e6bdf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvassert.h 62a18f19f79512ebccdf286068e0b557c7926e13 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/runtime.h 00433b51c4d6254fd4dfc3dcd9b4ad59e485e7c0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/object.h 1b28bd0ee2e560ca2854a73a3ee5fb1cf713d013 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/utility.h 5cadc87ba685991c7d4c6d453dcc9a2cca4398bf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/prelude.h 664ff0e10e893923b70425fa49c9c48ed0735573 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/rtti.h bdb558ee8f782e6be06fc262820f6bd9ce75bd51 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/tls/tls.h 56b8bae7756ed36d0831f76f95033f74eaab01db - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h 7239704e6fe88b9d75984fb5e9f4b5706502d7f3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog_printf.h e08146f5de1596f5337c49cfbe180e30e880dedb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog.h d2c035e67e295b8f33f0fc52d9c30e43c5d7c2ba - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h cd033fe116a41285a979e629a2ee7b11ec99369f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_rights.h 2dec1c73507f66736674d203cc4a00813ccb11bc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_domain.h a0d3d164eb92280353cdc4458d2561aae8a68c1d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_server.h 89ece4711626bf1e4197c69bd5754e2798214d76 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/resserv.h bacdb2c1a1dbf182a0a3be15efa0a5f83365118f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_resource.h df174d6b4f718ef699ca6f38c16aaeffa111ad3c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_map.h 841ddca998b570feb1d59b50d644c8f2b59ae8e9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_client.h b795f5cb77ecd2cc407102900b63977cfb34bbfd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/ioaccess/ioaccess.h 3dcee4e110f4c571e7f49fae2f2d0630d008a906 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/nvport.h 46345715dde843be2890b33f191b2f3b69385e0d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/memory.h a1d93b6ec8ff01a3c2651e772a826ee11a7781d7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/util.h b93c2532babf176f7b91735682e7d7cdc41f96f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/debug.h 147d47ef4bd860394d1d8ae82c68d97887e2898b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/core.h 6d698ca4fc5e48c525f214a57e1de0cc4aa9e36b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/thread.h 3e656d5ed1f5df898ec444921ce77a40ead66b28 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/atomic.h 3ac7ddf3d402f3fd20cffe9d4e93f457de319605 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/sync.h 2487ffc1eb1e50b27ba07e0581da543d80bdaa72 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/safe.h 22420ad669a9809602f111385b7840556e58ecff - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/cpu.h 6ad1beaa2783a57330240d47b373930cd36ca5d0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/crypto.h 2805fad632acad045044e0b8417de88032177300 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/string.h 23afbd04f4e4b3301edcfdec003c8e936d898e38 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h eedda5c4b0611c3b95f726b0a2db4b0a23b7b1cf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h a8c9b83169aceb5f97d9f7a411db449496dc18f6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_generic.h aafca30178f49676f640be9c6d34f623a3e3a9a4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/safe_generic.h 600ad8781585e87df49ab1aaa39a07c8e8de74f5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h 0747ee16c7e6c726f568867d0fbbad411c8795c8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h 2a76929dc6b0e8624d02002600bc454cc851dee4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h 1d6a239ed6c8dab1397f056a81ff456141ec7f9c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_valist.h 31f2042e852f074970644903335af5ffa2b59c38 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h 65a237b66732aafe39bc4a14d87debd2b094fb83 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/map.h c9e75f7b02241ededa5328a4f559e70dec60d159 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/type_safety.h 3924b67e6d63e9a15876331c695daaf679454b05 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/list.h a28ab42de95e4878fb46e19d7b965c23f92b3213 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/btree.h 4cd6b110470da3aee29e999e096ca582104fab21 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/queue.h 1dacc1c1efc757c12e4c64eac171474a798b86fd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/eheap_old.h 969cbac56935a80fafd7cceff157b27e623f9429 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/multimap.h Change-Id: I0561bddf423eb47180bdf85ccb3d24cafebfb44d --- .../COPYING | 369 + .../Makefile | 76 + .../README.md | 160 + .../SECURITY.md | 16 + .../kernel-open/Kbuild | 250 + .../kernel-open/Makefile | 126 + .../kernel-open/common/inc/conftest.h | 34 + .../kernel-open/common/inc/cpuopsys.h | 453 + .../common/inc/dce_rm_client_ipc.h | 35 + .../kernel-open/common/inc/nv-caps.h | 94 + .../kernel-open/common/inc/nv-dmabuf.h | 31 + .../kernel-open/common/inc/nv-gpu-info.h | 44 + .../kernel-open/common/inc/nv-hash.h | 96 + .../kernel-open/common/inc/nv-hypervisor.h | 125 + .../kernel-open/common/inc/nv-ioctl-numa.h | 84 + .../kernel-open/common/inc/nv-ioctl-numbers.h | 43 + .../kernel-open/common/inc/nv-ioctl.h | 145 + .../common/inc/nv-kernel-interface-api.h | 41 + .../kernel-open/common/inc/nv-kref.h | 61 + .../kernel-open/common/inc/nv-kthread-q.h | 255 + .../kernel-open/common/inc/nv-linux.h | 2058 +++++ .../kernel-open/common/inc/nv-list-helpers.h | 93 + .../kernel-open/common/inc/nv-lock.h | 92 + .../kernel-open/common/inc/nv-memdbg.h | 49 + .../kernel-open/common/inc/nv-mm.h | 264 + .../common/inc/nv-modeset-interface.h | 122 + .../kernel-open/common/inc/nv-msi.h | 115 + .../kernel-open/common/inc/nv-pci-types.h | 36 + .../kernel-open/common/inc/nv-pci.h | 48 + .../kernel-open/common/inc/nv-pgprot.h | 134 + .../kernel-open/common/inc/nv-platform.h | 40 + .../kernel-open/common/inc/nv-procfs-utils.h | 227 + .../kernel-open/common/inc/nv-procfs.h | 28 + .../kernel-open/common/inc/nv-proto.h | 98 + .../common/inc/nv-register-module.h | 55 + .../kernel-open/common/inc/nv-retpoline.h | 82 + .../kernel-open/common/inc/nv-time.h | 251 + .../kernel-open/common/inc/nv-timer.h | 66 + .../kernel-open/common/inc/nv.h | 1103 +++ .../kernel-open/common/inc/nvCpuUuid.h | 44 + .../common/inc/nv_firmware_types.h | 34 + .../common/inc/nv_speculation_barrier.h | 227 + .../kernel-open/common/inc/nv_stdarg.h | 39 + .../kernel-open/common/inc/nv_uvm_interface.h | 1520 ++++ .../kernel-open/common/inc/nv_uvm_types.h | 964 +++ .../kernel-open/common/inc/nvgputypes.h | 177 + .../kernel-open/common/inc/nvi2c.h | 37 + .../kernel-open/common/inc/nvimpshared.h | 108 + .../kernel-open/common/inc/nvkms-api-types.h | 607 ++ .../kernel-open/common/inc/nvkms-format.h | 125 + .../kernel-open/common/inc/nvkms-kapi.h | 1081 +++ .../kernel-open/common/inc/nvlimits.h | 59 + .../kernel-open/common/inc/nvmisc.h | 915 ++ .../kernel-open/common/inc/nvstatus.h | 123 + .../kernel-open/common/inc/nvstatuscodes.h | 162 + .../kernel-open/common/inc/nvtypes.h | 662 ++ .../kernel-open/common/inc/os-interface.h | 257 + .../common/inc/os/nv_memory_type.h | 41 + .../common/inc/os_dsi_panel_props.h | 364 + .../kernel-open/common/inc/os_gpio.h | 32 + .../kernel-open/common/inc/rm-gpu-ops.h | 110 + .../kernel-open/conftest.sh | 6067 ++++++++++++++ .../kernel-open/count-lines.mk | 25 + .../kernel-open/dkms.conf | 12 + .../kernel-open/nvidia-drm/nv-pci-table.c | 79 + .../kernel-open/nvidia-drm/nv-pci-table.h | 31 + .../nvidia-drm/nvidia-dma-fence-helper.h | 121 + .../nvidia-drm/nvidia-dma-resv-helper.h | 126 + .../nvidia-drm/nvidia-drm-conftest.h | 64 + .../nvidia-drm/nvidia-drm-connector.c | 472 ++ .../nvidia-drm/nvidia-drm-connector.h | 89 + .../kernel-open/nvidia-drm/nvidia-drm-crtc.c | 1415 ++++ .../kernel-open/nvidia-drm/nvidia-drm-crtc.h | 300 + .../kernel-open/nvidia-drm/nvidia-drm-drv.c | 1063 +++ .../kernel-open/nvidia-drm/nvidia-drm-drv.h | 36 + .../nvidia-drm/nvidia-drm-encoder.c | 352 + .../nvidia-drm/nvidia-drm-encoder.h | 68 + .../kernel-open/nvidia-drm/nvidia-drm-fb.c | 257 + .../kernel-open/nvidia-drm/nvidia-drm-fb.h | 66 + .../nvidia-drm/nvidia-drm-format.c | 163 + .../nvidia-drm/nvidia-drm-format.h | 43 + .../nvidia-drm/nvidia-drm-gem-dma-buf.c | 228 + .../nvidia-drm/nvidia-drm-gem-dma-buf.h | 76 + .../nvidia-drm/nvidia-drm-gem-nvkms-memory.c | 592 ++ .../nvidia-drm/nvidia-drm-gem-nvkms-memory.h | 110 + .../nvidia-drm/nvidia-drm-gem-user-memory.c | 217 + .../nvidia-drm/nvidia-drm-gem-user-memory.h | 72 + .../kernel-open/nvidia-drm/nvidia-drm-gem.c | 399 + .../kernel-open/nvidia-drm/nvidia-drm-gem.h | 211 + .../nvidia-drm/nvidia-drm-helper.c | 204 + .../nvidia-drm/nvidia-drm-helper.h | 584 ++ .../kernel-open/nvidia-drm/nvidia-drm-ioctl.h | 235 + .../kernel-open/nvidia-drm/nvidia-drm-linux.c | 186 + .../nvidia-drm/nvidia-drm-modeset.c | 577 ++ .../nvidia-drm/nvidia-drm-modeset.h | 53 + .../nvidia-drm/nvidia-drm-os-interface.h | 56 + .../nvidia-drm/nvidia-drm-prime-fence.c | 527 ++ .../nvidia-drm/nvidia-drm-prime-fence.h | 48 + .../kernel-open/nvidia-drm/nvidia-drm-priv.h | 144 + .../kernel-open/nvidia-drm/nvidia-drm-utils.c | 231 + .../kernel-open/nvidia-drm/nvidia-drm-utils.h | 54 + .../kernel-open/nvidia-drm/nvidia-drm.Kbuild | 124 + .../kernel-open/nvidia-drm/nvidia-drm.c | 59 + .../kernel-open/nvidia-drm/nvidia-drm.h | 31 + .../kernel-open/nvidia-modeset/nv-kthread-q.c | 335 + .../nvidia-modeset/nvidia-modeset-linux.c | 1851 +++++ .../nvidia-modeset-os-interface.h | 330 + .../nvidia-modeset/nvidia-modeset.Kbuild | 99 + .../kernel-open/nvidia-modeset/nvkms-ioctl.h | 73 + .../kernel-open/nvidia-modeset/nvkms.h | 90 + .../kernel-open/nvidia/nv-acpi.c | 1411 ++++ .../kernel-open/nvidia/nv-backlight.c | 81 + .../kernel-open/nvidia/nv-caps.c | 853 ++ .../kernel-open/nvidia/nv-clk.c | 630 ++ .../kernel-open/nvidia/nv-cray.c | 217 + .../kernel-open/nvidia/nv-dma.c | 1251 +++ .../kernel-open/nvidia/nv-dmabuf.c | 896 ++ .../nvidia/nv-dsi-parse-panel-props.c | 1017 +++ .../kernel-open/nvidia/nv-frontend.c | 412 + .../kernel-open/nvidia/nv-frontend.h | 47 + .../kernel-open/nvidia/nv-gpio.c | 264 + .../kernel-open/nvidia/nv-host1x.c | 80 + .../kernel-open/nvidia/nv-i2c.c | 564 ++ .../kernel-open/nvidia/nv-ibmnpu.c | 448 + .../kernel-open/nvidia/nv-ibmnpu.h | 80 + .../kernel-open/nvidia/nv-imp.c | 702 ++ .../kernel-open/nvidia/nv-ipc-soc.c | 158 + .../kernel-open/nvidia/nv-kthread-q.c | 335 + .../kernel-open/nvidia/nv-memdbg.c | 232 + .../kernel-open/nvidia/nv-mmap.c | 781 ++ .../kernel-open/nvidia/nv-modeset-interface.c | 146 + .../kernel-open/nvidia/nv-msi.c | 169 + .../kernel-open/nvidia/nv-nano-timer.c | 176 + .../kernel-open/nvidia/nv-p2p.c | 958 +++ .../kernel-open/nvidia/nv-p2p.h | 427 + .../kernel-open/nvidia/nv-pat.c | 478 ++ .../kernel-open/nvidia/nv-pat.h | 59 + .../kernel-open/nvidia/nv-pci-table.c | 79 + .../kernel-open/nvidia/nv-pci-table.h | 31 + .../kernel-open/nvidia/nv-pci.c | 1099 +++ .../kernel-open/nvidia/nv-platform-pm.c | 122 + .../kernel-open/nvidia/nv-platform.c | 1544 ++++ .../kernel-open/nvidia/nv-procfs-utils.c | 47 + .../kernel-open/nvidia/nv-procfs.c | 1477 ++++ .../kernel-open/nvidia/nv-reg.h | 937 +++ .../kernel-open/nvidia/nv-report-err.c | 89 + .../kernel-open/nvidia/nv-report-err.h | 66 + .../kernel-open/nvidia/nv-rsync.c | 201 + .../kernel-open/nvidia/nv-rsync.h | 57 + .../kernel-open/nvidia/nv-usermap.c | 161 + .../kernel-open/nvidia/nv-vm.c | 736 ++ .../kernel-open/nvidia/nv-vtophys.c | 39 + .../kernel-open/nvidia/nv.c | 5640 +++++++++++++ .../kernel-open/nvidia/nv_gpu_ops.h | 302 + .../kernel-open/nvidia/nv_uvm_interface.c | 1544 ++++ .../kernel-open/nvidia/nvidia-sources.Kbuild | 44 + .../kernel-open/nvidia/nvidia.Kbuild | 265 + .../kernel-open/nvidia/os-interface.c | 2159 +++++ .../kernel-open/nvidia/os-mlock.c | 287 + .../kernel-open/nvidia/os-pci.c | 206 + .../kernel-open/nvidia/os-registry.c | 336 + .../kernel-open/nvidia/os-usermap.c | 78 + .../kernel-open/nvidia/rmp2pdefines.h | 31 + .../src/common/displayport/inc/dp_address.h | 284 + .../src/common/displayport/inc/dp_auxbus.h | 80 + .../src/common/displayport/inc/dp_auxdefs.h | 97 + .../src/common/displayport/inc/dp_auxretry.h | 181 + .../src/common/displayport/inc/dp_bitstream.h | 98 + .../src/common/displayport/inc/dp_buffer.h | 97 + .../common/displayport/inc/dp_configcaps.h | 535 ++ .../src/common/displayport/inc/dp_connector.h | 681 ++ .../common/displayport/inc/dp_connectorimpl.h | 627 ++ .../src/common/displayport/inc/dp_crc.h | 41 + .../common/displayport/inc/dp_deviceimpl.h | 524 ++ .../src/common/displayport/inc/dp_discovery.h | 328 + .../src/common/displayport/inc/dp_edid.h | 323 + .../common/displayport/inc/dp_evoadapter.h | 410 + .../src/common/displayport/inc/dp_groupimpl.h | 122 + .../src/common/displayport/inc/dp_guid.h | 120 + .../src/common/displayport/inc/dp_hostimp.h | 55 + .../src/common/displayport/inc/dp_internal.h | 139 + .../common/displayport/inc/dp_linkconfig.h | 450 + .../common/displayport/inc/dp_linkedlist.h | 143 + .../src/common/displayport/inc/dp_list.h | 84 + .../src/common/displayport/inc/dp_mainlink.h | 265 + .../src/common/displayport/inc/dp_merger.h | 148 + .../displayport/inc/dp_messagecodings.h | 559 ++ .../common/displayport/inc/dp_messageheader.h | 94 + .../src/common/displayport/inc/dp_messages.h | 324 + .../src/common/displayport/inc/dp_object.h | 132 + .../displayport/inc/dp_regkeydatabase.h | 102 + .../common/displayport/inc/dp_ringbuffer.h | 33 + .../src/common/displayport/inc/dp_splitter.h | 156 + .../src/common/displayport/inc/dp_timeout.h | 74 + .../src/common/displayport/inc/dp_timer.h | 104 + .../src/common/displayport/inc/dp_tracing.h | 128 + .../src/common/displayport/inc/dp_vrr.h | 95 + .../common/displayport/inc/dp_wardatabase.h | 75 + .../src/common/displayport/inc/dp_watermark.h | 134 + .../inc/dptestutil/dp_testmessage.h | 122 + .../common/displayport/src/dp_auxretry.cpp | 315 + .../common/displayport/src/dp_bitstream.cpp | 204 + .../src/common/displayport/src/dp_buffer.cpp | 267 + .../common/displayport/src/dp_configcaps.cpp | 3170 +++++++ .../displayport/src/dp_connectorimpl.cpp | 6798 +++++++++++++++ .../src/common/displayport/src/dp_crc.cpp | 93 + .../common/displayport/src/dp_deviceimpl.cpp | 2675 ++++++ .../common/displayport/src/dp_discovery.cpp | 938 +++ .../src/common/displayport/src/dp_edid.cpp | 625 ++ .../common/displayport/src/dp_evoadapter.cpp | 1846 +++++ .../common/displayport/src/dp_groupimpl.cpp | 331 + .../src/common/displayport/src/dp_guid.cpp | 81 + .../src/common/displayport/src/dp_list.cpp | 159 + .../src/common/displayport/src/dp_merger.cpp | 310 + .../displayport/src/dp_messagecodings.cpp | 690 ++ .../displayport/src/dp_messageheader.cpp | 85 + .../common/displayport/src/dp_messages.cpp | 606 ++ .../common/displayport/src/dp_mst_edid.cpp | 188 + .../common/displayport/src/dp_splitter.cpp | 314 + .../common/displayport/src/dp_sst_edid.cpp | 342 + .../src/common/displayport/src/dp_timer.cpp | 199 + .../src/common/displayport/src/dp_vrr.cpp | 247 + .../common/displayport/src/dp_wardatabase.cpp | 645 ++ .../common/displayport/src/dp_watermark.cpp | 872 ++ .../src/dptestutil/dp_testmessage.cpp | 94 + .../src/common/inc/displayport/displayport.h | 631 ++ .../src/common/inc/displayport/dpcd.h | 1501 ++++ .../src/common/inc/displayport/dpcd14.h | 790 ++ .../src/common/inc/displayport/dpcd20.h | 48 + .../src/common/inc/hdmi_spec.h | 86 + .../src/common/inc/nvBinSegment.h | 36 + .../src/common/inc/nvBldVer.h | 72 + .../src/common/inc/nvCpuUuid.h | 44 + .../src/common/inc/nvHdmiFrlCommon.h | 134 + .../src/common/inc/nvPNPVendorIds.h | 557 ++ .../src/common/inc/nvSha1.h | 390 + .../src/common/inc/nvUnixVersion.h | 15 + .../src/common/inc/nvVer.h | 17 + .../src/common/inc/nv_list.h | 558 ++ .../src/common/inc/nv_speculation_barrier.h | 219 + .../src/common/inc/nvctassert.h | 189 + .../src/common/inc/nvlog_defs.h | 529 ++ .../src/common/inc/nvlog_inc.h | 39 + .../src/common/inc/nvlog_inc2.h | 46 + .../src/common/inc/rmosxfac.h | 43 + .../swref/published/disp/v03_00/dev_disp.h | 67 + .../swref/published/disp/v04_02/dev_disp.h | 27 + .../src/common/inc/swref/published/nv_arch.h | 114 + .../src/common/inc/swref/published/nv_ref.h | 154 + .../swref/published/turing/tu102/dev_mmu.h | 119 + .../published/turing/tu102/kind_macros.h | 31 + .../modeset/hdmipacket/nvhdmi_frlInterface.h | 268 + .../src/common/modeset/hdmipacket/nvhdmipkt.c | 616 ++ .../src/common/modeset/hdmipacket/nvhdmipkt.h | 317 + .../modeset/hdmipacket/nvhdmipkt_0073.c | 385 + .../modeset/hdmipacket/nvhdmipkt_9171.c | 804 ++ .../modeset/hdmipacket/nvhdmipkt_9271.c | 71 + .../modeset/hdmipacket/nvhdmipkt_9471.c | 71 + .../modeset/hdmipacket/nvhdmipkt_9571.c | 71 + .../modeset/hdmipacket/nvhdmipkt_C371.c | 71 + .../modeset/hdmipacket/nvhdmipkt_C671.c | 1389 ++++ .../modeset/hdmipacket/nvhdmipkt_class.h | 179 + .../modeset/hdmipacket/nvhdmipkt_common.h | 114 + .../modeset/hdmipacket/nvhdmipkt_internal.h | 60 + .../src/common/modeset/timing/displayid.h | 776 ++ .../src/common/modeset/timing/displayid20.h | 752 ++ .../src/common/modeset/timing/dpsdp.h | 373 + .../src/common/modeset/timing/edid.h | 352 + .../src/common/modeset/timing/nvt_cvt.c | 627 ++ .../common/modeset/timing/nvt_displayid20.c | 1892 +++++ .../src/common/modeset/timing/nvt_dmt.c | 272 + .../src/common/modeset/timing/nvt_dsc_pps.c | 2303 +++++ .../src/common/modeset/timing/nvt_dsc_pps.h | 324 + .../src/common/modeset/timing/nvt_edid.c | 2662 ++++++ .../common/modeset/timing/nvt_edidext_861.c | 2942 +++++++ .../modeset/timing/nvt_edidext_displayid.c | 1437 ++++ .../modeset/timing/nvt_edidext_displayid20.c | 381 + .../src/common/modeset/timing/nvt_gtf.c | 138 + .../src/common/modeset/timing/nvt_tv.c | 192 + .../src/common/modeset/timing/nvt_util.c | 370 + .../src/common/modeset/timing/nvtiming.h | 5415 ++++++++++++ .../src/common/modeset/timing/nvtiming_pvt.h | 144 + .../src/common/sdk/nvidia/inc/class/cl0000.h | 52 + .../nvidia/inc/class/cl0000_notification.h | 68 + .../src/common/sdk/nvidia/inc/class/cl0001.h | 37 + .../src/common/sdk/nvidia/inc/class/cl0002.h | 51 + .../src/common/sdk/nvidia/inc/class/cl0004.h | 50 + .../src/common/sdk/nvidia/inc/class/cl0005.h | 58 + .../nvidia/inc/class/cl0005_notification.h | 51 + .../src/common/sdk/nvidia/inc/class/cl0020.h | 31 + .../src/common/sdk/nvidia/inc/class/cl003e.h | 51 + .../src/common/sdk/nvidia/inc/class/cl0040.h | 55 + .../src/common/sdk/nvidia/inc/class/cl0041.h | 45 + .../src/common/sdk/nvidia/inc/class/cl0071.h | 38 + .../src/common/sdk/nvidia/inc/class/cl0073.h | 55 + .../src/common/sdk/nvidia/inc/class/cl0076.h | 38 + .../src/common/sdk/nvidia/inc/class/cl0080.h | 64 + .../nvidia/inc/class/cl0080_notification.h | 45 + .../src/common/sdk/nvidia/inc/class/cl0092.h | 68 + .../src/common/sdk/nvidia/inc/class/cl00b1.h | 28 + .../src/common/sdk/nvidia/inc/class/cl00c1.h | 69 + .../src/common/sdk/nvidia/inc/class/cl00c3.h | 43 + .../src/common/sdk/nvidia/inc/class/cl00f2.h | 38 + .../src/common/sdk/nvidia/inc/class/cl00fc.h | 39 + .../src/common/sdk/nvidia/inc/class/cl2080.h | 497 ++ .../src/common/sdk/nvidia/inc/class/cl2081.h | 43 + .../src/common/sdk/nvidia/inc/class/cl2082.h | 43 + .../src/common/sdk/nvidia/inc/class/cl30f1.h | 56 + .../nvidia/inc/class/cl30f1_notification.h | 74 + .../src/common/sdk/nvidia/inc/class/cl402c.h | 47 + .../src/common/sdk/nvidia/inc/class/cl5070.h | 43 + .../nvidia/inc/class/cl5070_notification.h | 44 + .../src/common/sdk/nvidia/inc/class/cl84a0.h | 158 + .../src/common/sdk/nvidia/inc/class/cl900e.h | 39 + .../src/common/sdk/nvidia/inc/class/cl9010.h | 39 + .../sdk/nvidia/inc/class/cl907dswspare.h | 37 + .../src/common/sdk/nvidia/inc/class/cl90cd.h | 244 + .../src/common/sdk/nvidia/inc/class/cl90ec.h | 46 + .../src/common/sdk/nvidia/inc/class/cl90f1.h | 39 + .../src/common/sdk/nvidia/inc/class/cl9170.h | 41 + .../src/common/sdk/nvidia/inc/class/cl9171.h | 295 + .../src/common/sdk/nvidia/inc/class/cl917a.h | 56 + .../src/common/sdk/nvidia/inc/class/cl917b.h | 60 + .../src/common/sdk/nvidia/inc/class/cl917c.h | 298 + .../sdk/nvidia/inc/class/cl917cswspare.h | 37 + .../src/common/sdk/nvidia/inc/class/cl917d.h | 1551 ++++ .../sdk/nvidia/inc/class/cl917dcrcnotif.h | 44 + .../src/common/sdk/nvidia/inc/class/cl917e.h | 265 + .../src/common/sdk/nvidia/inc/class/cl9270.h | 41 + .../src/common/sdk/nvidia/inc/class/cl9271.h | 295 + .../src/common/sdk/nvidia/inc/class/cl927c.h | 299 + .../src/common/sdk/nvidia/inc/class/cl927d.h | 1556 ++++ .../src/common/sdk/nvidia/inc/class/cl9470.h | 41 + .../src/common/sdk/nvidia/inc/class/cl9471.h | 295 + .../src/common/sdk/nvidia/inc/class/cl947d.h | 1606 ++++ .../src/common/sdk/nvidia/inc/class/cl9570.h | 41 + .../src/common/sdk/nvidia/inc/class/cl9571.h | 295 + .../src/common/sdk/nvidia/inc/class/cl957d.h | 1602 ++++ .../src/common/sdk/nvidia/inc/class/cl9770.h | 41 + .../src/common/sdk/nvidia/inc/class/cl977d.h | 1587 ++++ .../src/common/sdk/nvidia/inc/class/cl9870.h | 41 + .../src/common/sdk/nvidia/inc/class/cl987d.h | 1590 ++++ .../common/sdk/nvidia/inc/class/clb0b5sw.h | 58 + .../src/common/sdk/nvidia/inc/class/clc370.h | 55 + .../src/common/sdk/nvidia/inc/class/clc371.h | 41 + .../common/sdk/nvidia/inc/class/clc372sw.h | 36 + .../src/common/sdk/nvidia/inc/class/clc373.h | 350 + .../src/common/sdk/nvidia/inc/class/clc37a.h | 213 + .../src/common/sdk/nvidia/inc/class/clc37b.h | 67 + .../src/common/sdk/nvidia/inc/class/clc37d.h | 953 +++ .../sdk/nvidia/inc/class/clc37dcrcnotif.h | 49 + .../sdk/nvidia/inc/class/clc37dswspare.h | 36 + .../src/common/sdk/nvidia/inc/class/clc37e.h | 498 ++ .../src/common/sdk/nvidia/inc/class/clc570.h | 47 + .../src/common/sdk/nvidia/inc/class/clc573.h | 598 ++ .../src/common/sdk/nvidia/inc/class/clc574.h | 45 + .../src/common/sdk/nvidia/inc/class/clc57a.h | 179 + .../src/common/sdk/nvidia/inc/class/clc57b.h | 64 + .../src/common/sdk/nvidia/inc/class/clc57d.h | 1277 +++ .../src/common/sdk/nvidia/inc/class/clc57e.h | 657 ++ .../common/sdk/nvidia/inc/class/clc57esw.h | 45 + .../src/common/sdk/nvidia/inc/class/clc670.h | 45 + .../src/common/sdk/nvidia/inc/class/clc671.h | 47 + .../src/common/sdk/nvidia/inc/class/clc673.h | 399 + .../src/common/sdk/nvidia/inc/class/clc67a.h | 181 + .../src/common/sdk/nvidia/inc/class/clc67b.h | 66 + .../src/common/sdk/nvidia/inc/class/clc67d.h | 1339 +++ .../src/common/sdk/nvidia/inc/class/clc67e.h | 700 ++ .../src/common/sdk/nvidia/inc/class/clc770.h | 45 + .../src/common/sdk/nvidia/inc/class/clc77f.h | 34 + .../src/common/sdk/nvidia/inc/cpuopsys.h | 419 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000base.h | 69 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000client.h | 166 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h | 326 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000event.h | 113 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h | 847 ++ .../inc/ctrl/ctrl0000/ctrl0000gpuacct.h | 255 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000gspc.h | 32 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h | 101 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h | 636 ++ .../nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h | 98 + .../inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h | 112 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000system.h | 1276 +++ .../nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h | 433 + .../nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h | 32 + .../src/common/sdk/nvidia/inc/ctrl/ctrl0002.h | 178 + .../src/common/sdk/nvidia/inc/ctrl/ctrl0004.h | 93 + .../src/common/sdk/nvidia/inc/ctrl/ctrl0020.h | 80 + .../src/common/sdk/nvidia/inc/ctrl/ctrl003e.h | 191 + .../src/common/sdk/nvidia/inc/ctrl/ctrl0041.h | 472 ++ .../src/common/sdk/nvidia/inc/ctrl/ctrl0073.h | 45 + .../nvidia/inc/ctrl/ctrl0073/ctrl0073base.h | 60 + .../nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h | 1261 +++ .../sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h | 2752 ++++++ .../nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h | 32 + .../nvidia/inc/ctrl/ctrl0073/ctrl0073event.h | 32 + .../inc/ctrl/ctrl0073/ctrl0073internal.h | 47 + .../nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h | 32 + .../inc/ctrl/ctrl0073/ctrl0073specific.h | 1841 ++++ .../nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h | 166 + .../nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h | 32 + .../nvidia/inc/ctrl/ctrl0073/ctrl0073system.h | 1072 +++ .../src/common/sdk/nvidia/inc/ctrl/ctrl0080.h | 51 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080base.h | 73 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h | 138 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h | 112 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h | 32 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h | 32 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h | 908 ++ .../sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h | 232 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h | 642 ++ .../nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h | 585 ++ .../sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h | 274 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080host.h | 112 + .../inc/ctrl/ctrl0080/ctrl0080internal.h | 103 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h | 73 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h | 75 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h | 48 + .../sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080rc.h | 57 + .../nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h | 93 + .../src/common/sdk/nvidia/inc/ctrl/ctrl2080.h | 83 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h | 32 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080base.h | 112 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h | 240 + .../inc/ctrl/ctrl2080/ctrl2080boardobj.h | 35 + .../ctrl2080/ctrl2080boardobjgrpclasses.h | 34 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h | 1493 ++++ .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h | 314 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h | 32 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h | 41 + .../inc/ctrl/ctrl2080/ctrl2080clkavfs.h | 36 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080common.h | 34 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h | 185 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h | 105 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h | 63 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080event.h | 372 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h | 32 + .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h | 2907 +++++++ .../nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h | 759 ++ .../nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h | 210 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h | 410 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h | 34 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h | 36 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h | 3781 +++++++++ .../nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h | 96 + .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h | 1789 ++++ .../nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h | 263 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h | 82 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h | 55 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h | 365 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h | 30 + .../inc/ctrl/ctrl2080/ctrl2080internal.h | 2291 +++++ .../nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h | 34 + .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h | 323 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h | 338 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h | 32 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h | 505 ++ .../inc/ctrl/ctrl2080/ctrl2080perf_cf.h | 33 + .../ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h | 32 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h | 32 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h | 34 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080power.h | 32 + .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h | 368 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h | 32 + .../inc/ctrl/ctrl2080/ctrl2080thermal.h | 30 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h | 233 + .../inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h | 32 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h | 190 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h | 42 + .../nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h | 38 + .../src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h | 1493 ++++ .../src/common/sdk/nvidia/inc/ctrl/ctrl402c.h | 971 +++ .../nvidia/inc/ctrl/ctrl5070/ctrl5070base.h | 67 + .../nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h | 1181 +++ .../nvidia/inc/ctrl/ctrl5070/ctrl5070common.h | 79 + .../nvidia/inc/ctrl/ctrl5070/ctrl5070event.h | 143 + .../inc/ctrl/ctrl5070/ctrl5070impoverrides.h | 33 + .../sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h | 936 +++ .../sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h | 578 ++ .../nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h | 521 ++ .../nvidia/inc/ctrl/ctrl5070/ctrl5070system.h | 81 + .../nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h | 32 + .../src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h | 174 + .../src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h | 124 + .../nvidia/inc/ctrl/ctrlc370/ctrlc370base.h | 67 + .../nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h | 300 + .../nvidia/inc/ctrl/ctrlc370/ctrlc370event.h | 46 + .../sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h | 123 + .../nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h | 133 + .../nvidia/inc/ctrl/ctrlc372/ctrlc372base.h | 61 + .../nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h | 680 ++ .../src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h | 67 + .../common/sdk/nvidia/inc/dpringbuffertypes.h | 59 + .../src/common/sdk/nvidia/inc/nv-hypervisor.h | 125 + .../sdk/nvidia/inc/nv-kernel-interface-api.h | 41 + .../src/common/sdk/nvidia/inc/nv_stdarg.h | 39 + .../src/common/sdk/nvidia/inc/nv_vgpu_types.h | 61 + .../src/common/sdk/nvidia/inc/nvcfg_sdk.h | 29 + .../src/common/sdk/nvidia/inc/nvdisptypes.h | 92 + .../src/common/sdk/nvidia/inc/nverror.h | 281 + .../src/common/sdk/nvidia/inc/nvfixedtypes.h | 379 + .../src/common/sdk/nvidia/inc/nvgputypes.h | 177 + .../src/common/sdk/nvidia/inc/nvi2c.h | 37 + .../src/common/sdk/nvidia/inc/nvimpshared.h | 98 + .../src/common/sdk/nvidia/inc/nvlimits.h | 52 + .../src/common/sdk/nvidia/inc/nvmisc.h | 915 ++ .../src/common/sdk/nvidia/inc/nvos.h | 3163 +++++++ .../common/sdk/nvidia/inc/nvsecurityinfo.h | 71 + .../src/common/sdk/nvidia/inc/nvstatus.h | 123 + .../src/common/sdk/nvidia/inc/nvstatuscodes.h | 162 + .../src/common/sdk/nvidia/inc/nvtypes.h | 625 ++ .../src/common/sdk/nvidia/inc/rs_access.h | 272 + .../src/common/shared/nvstatus/nvstatus.c | 82 + .../src/common/softfloat/COPYING.txt | 37 + .../common/softfloat/nvidia/nv-softfloat.h | 163 + .../src/common/softfloat/nvidia/platform.h | 56 + .../source/8086-SSE/s_commonNaNToF16UI.c | 51 + .../source/8086-SSE/s_commonNaNToF32UI.c | 51 + .../source/8086-SSE/s_commonNaNToF64UI.c | 53 + .../source/8086-SSE/s_f32UIToCommonNaN.c | 59 + .../source/8086-SSE/s_f64UIToCommonNaN.c | 59 + .../source/8086-SSE/s_propagateNaNF32UI.c | 63 + .../source/8086-SSE/s_propagateNaNF64UI.c | 63 + .../source/8086-SSE/softfloat_raiseFlags.c | 52 + .../softfloat/source/8086-SSE/specialize.h | 208 + .../src/common/softfloat/source/f32_add.c | 61 + .../src/common/softfloat/source/f32_div.c | 176 + .../src/common/softfloat/source/f32_eq.c | 66 + .../softfloat/source/f32_eq_signaling.c | 61 + .../softfloat/source/f32_isSignalingNaN.c | 51 + .../src/common/softfloat/source/f32_le.c | 66 + .../common/softfloat/source/f32_le_quiet.c | 71 + .../src/common/softfloat/source/f32_lt.c | 66 + .../common/softfloat/source/f32_lt_quiet.c | 71 + .../src/common/softfloat/source/f32_mul.c | 137 + .../src/common/softfloat/source/f32_mulAdd.c | 60 + .../src/common/softfloat/source/f32_rem.c | 168 + .../common/softfloat/source/f32_roundToInt.c | 113 + .../src/common/softfloat/source/f32_sqrt.c | 121 + .../src/common/softfloat/source/f32_sub.c | 61 + .../src/common/softfloat/source/f32_to_f16.c | 88 + .../src/common/softfloat/source/f32_to_f64.c | 93 + .../src/common/softfloat/source/f32_to_i32.c | 84 + .../softfloat/source/f32_to_i32_r_minMag.c | 89 + .../src/common/softfloat/source/f32_to_i64.c | 84 + .../softfloat/source/f32_to_i64_r_minMag.c | 94 + .../src/common/softfloat/source/f32_to_ui32.c | 84 + .../softfloat/source/f32_to_ui32_r_minMag.c | 88 + .../src/common/softfloat/source/f32_to_ui64.c | 84 + .../softfloat/source/f32_to_ui64_r_minMag.c | 90 + .../src/common/softfloat/source/f64_add.c | 65 + .../src/common/softfloat/source/f64_div.c | 172 + .../src/common/softfloat/source/f64_eq.c | 66 + .../softfloat/source/f64_eq_signaling.c | 61 + .../softfloat/source/f64_isSignalingNaN.c | 51 + .../src/common/softfloat/source/f64_le.c | 67 + .../common/softfloat/source/f64_le_quiet.c | 72 + .../src/common/softfloat/source/f64_lt.c | 67 + .../common/softfloat/source/f64_lt_quiet.c | 72 + .../src/common/softfloat/source/f64_mul.c | 139 + .../src/common/softfloat/source/f64_mulAdd.c | 60 + .../src/common/softfloat/source/f64_rem.c | 185 + .../common/softfloat/source/f64_roundToInt.c | 113 + .../src/common/softfloat/source/f64_sqrt.c | 133 + .../src/common/softfloat/source/f64_sub.c | 65 + .../src/common/softfloat/source/f64_to_f32.c | 88 + .../src/common/softfloat/source/f64_to_i32.c | 82 + .../softfloat/source/f64_to_i32_r_minMag.c | 96 + .../src/common/softfloat/source/f64_to_i64.c | 84 + .../softfloat/source/f64_to_i64_r_minMag.c | 100 + .../src/common/softfloat/source/f64_to_ui32.c | 82 + .../softfloat/source/f64_to_ui32_r_minMag.c | 88 + .../src/common/softfloat/source/f64_to_ui64.c | 84 + .../softfloat/source/f64_to_ui64_r_minMag.c | 93 + .../src/common/softfloat/source/i32_to_f32.c | 58 + .../src/common/softfloat/source/i32_to_f64.c | 65 + .../src/common/softfloat/source/i64_to_f32.c | 70 + .../src/common/softfloat/source/i64_to_f64.c | 58 + .../softfloat/source/include/internals.h | 144 + .../softfloat/source/include/primitiveTypes.h | 83 + .../softfloat/source/include/primitives.h | 282 + .../softfloat/source/include/softfloat.h | 167 + .../source/include/softfloat_types.h | 81 + .../common/softfloat/source/s_addMagsF32.c | 126 + .../common/softfloat/source/s_addMagsF64.c | 128 + .../softfloat/source/s_approxRecipSqrt32_1.c | 74 + .../softfloat/source/s_approxRecipSqrt_1Ks.c | 49 + .../softfloat/source/s_countLeadingZeros64.c | 73 + .../softfloat/source/s_countLeadingZeros8.c | 59 + .../common/softfloat/source/s_mul64To128.c | 67 + .../src/common/softfloat/source/s_mulAddF32.c | 224 + .../src/common/softfloat/source/s_mulAddF64.c | 243 + .../softfloat/source/s_normRoundPackToF32.c | 58 + .../softfloat/source/s_normRoundPackToF64.c | 58 + .../softfloat/source/s_normSubnormalF32Sig.c | 52 + .../softfloat/source/s_normSubnormalF64Sig.c | 52 + .../softfloat/source/s_roundPackToF16.c | 113 + .../softfloat/source/s_roundPackToF32.c | 113 + .../softfloat/source/s_roundPackToF64.c | 117 + .../common/softfloat/source/s_roundToI32.c | 84 + .../common/softfloat/source/s_roundToI64.c | 89 + .../common/softfloat/source/s_roundToUI32.c | 80 + .../common/softfloat/source/s_roundToUI64.c | 85 + .../softfloat/source/s_shiftRightJam128.c | 70 + .../common/softfloat/source/s_subMagsF32.c | 143 + .../common/softfloat/source/s_subMagsF64.c | 141 + .../common/softfloat/source/softfloat_state.c | 49 + .../src/common/softfloat/source/ui32_to_f32.c | 57 + .../src/common/softfloat/source/ui32_to_f64.c | 59 + .../src/common/softfloat/source/ui64_to_f32.c | 64 + .../src/common/softfloat/source/ui64_to_f64.c | 59 + .../src/common/unix/common/inc/nv-float.h | 40 + .../src/common/unix/common/inc/nv_assert.h | 82 + .../common/unix/common/inc/nv_common_utils.h | 98 + .../src/common/unix/common/inc/nv_dpy_id.h | 369 + .../common/unix/common/inc/nv_mode_timings.h | 163 + .../utils/interface/nv_memory_tracker.h | 62 + .../utils/interface/nv_mode_timings_utils.h | 135 + .../common/utils/interface/nv_vasprintf.h | 65 + .../common/utils/interface/unix_rm_handle.h | 122 + .../unix/common/utils/nv_memory_tracker.c | 230 + .../unix/common/utils/nv_mode_timings_utils.c | 159 + .../common/unix/common/utils/nv_vasprintf.c | 74 + .../common/unix/common/utils/unix_rm_handle.c | 385 + .../src/nvidia-modeset/Makefile | 144 + .../include/dp/nvdp-connector-event-sink.h | 43 + .../include/dp/nvdp-connector.h | 100 + .../nvidia-modeset/include/dp/nvdp-device.h | 43 + .../nvidia-modeset/include/dp/nvdp-timer.h | 42 + .../include/g_nvkms-evo-states.h | 41 + .../nvidia-modeset/include/nvkms-3dvision.h | 39 + .../nvidia-modeset/include/nvkms-attributes.h | 51 + .../include/nvkms-console-restore.h | 31 + .../src/nvidia-modeset/include/nvkms-cursor.h | 53 + .../src/nvidia-modeset/include/nvkms-dma.h | 286 + .../src/nvidia-modeset/include/nvkms-dpy.h | 85 + .../src/nvidia-modeset/include/nvkms-event.h | 32 + .../nvidia-modeset/include/nvkms-evo-states.h | 107 + .../src/nvidia-modeset/include/nvkms-evo.h | 299 + .../src/nvidia-modeset/include/nvkms-evo1.h | 59 + .../include/nvkms-flip-workarea.h | 51 + .../src/nvidia-modeset/include/nvkms-flip.h | 92 + .../nvidia-modeset/include/nvkms-framelock.h | 79 + .../src/nvidia-modeset/include/nvkms-hal.h | 31 + .../src/nvidia-modeset/include/nvkms-hdmi.h | 77 + .../src/nvidia-modeset/include/nvkms-lut.h | 54 + .../nvidia-modeset/include/nvkms-modepool.h | 64 + .../include/nvkms-modeset-types.h | 74 + .../include/nvkms-modeset-workarea.h | 61 + .../nvidia-modeset/include/nvkms-modeset.h | 58 + .../include/nvkms-prealloc-types.h | 46 + .../nvidia-modeset/include/nvkms-prealloc.h | 36 + .../nvidia-modeset/include/nvkms-private.h | 81 + .../src/nvidia-modeset/include/nvkms-rm.h | 152 + .../src/nvidia-modeset/include/nvkms-rmapi.h | 111 + .../nvidia-modeset/include/nvkms-softfloat.h | 90 + .../nvidia-modeset/include/nvkms-surface.h | 96 + .../src/nvidia-modeset/include/nvkms-types.h | 2737 ++++++ .../src/nvidia-modeset/include/nvkms-utils.h | 273 + .../src/nvidia-modeset/include/nvkms-vrr.h | 64 + .../interface/nvkms-api-types.h | 607 ++ .../src/nvidia-modeset/interface/nvkms-api.h | 4003 +++++++++ .../nvidia-modeset/interface/nvkms-format.h | 125 + .../nvidia-modeset/interface/nvkms-ioctl.h | 73 + .../src/nvidia-modeset/interface/nvkms-sync.h | 97 + .../kapi/include/nvkms-kapi-internal.h | 176 + .../kapi/include/nvkms-kapi-notifiers.h | 85 + .../kapi/interface/nvkms-kapi-private.h | 59 + .../kapi/interface/nvkms-kapi.h | 1081 +++ .../kapi/src/nvkms-kapi-channelevent.c | 150 + .../kapi/src/nvkms-kapi-notifiers.c | 227 + .../src/nvidia-modeset/kapi/src/nvkms-kapi.c | 3188 +++++++ .../src/nvidia-modeset/lib/nvkms-format.c | 132 + .../src/nvidia-modeset/lib/nvkms-sync.c | 377 + .../include/nvidia-modeset-os-interface.h | 330 + .../os-interface/include/nvkms.h | 90 + .../src/dp/nvdp-connector-event-sink.cpp | 546 ++ .../src/dp/nvdp-connector-event-sink.hpp | 98 + .../nvidia-modeset/src/dp/nvdp-connector.cpp | 1008 +++ .../src/nvidia-modeset/src/dp/nvdp-device.cpp | 148 + .../src/dp/nvdp-evo-interface.cpp | 149 + .../src/dp/nvdp-evo-interface.hpp | 68 + .../src/nvidia-modeset/src/dp/nvdp-host.cpp | 68 + .../src/nvidia-modeset/src/dp/nvdp-timer.cpp | 146 + .../src/nvidia-modeset/src/dp/nvdp-timer.hpp | 93 + .../nvidia-modeset/src/g_nvkms-evo-states.c | 2818 +++++++ .../src/nvidia-modeset/src/nvkms-3dvision.c | 54 + .../src/nvidia-modeset/src/nvkms-attributes.c | 1354 +++ .../src/nvkms-console-restore.c | 876 ++ .../src/nvidia-modeset/src/nvkms-cursor.c | 399 + .../src/nvidia-modeset/src/nvkms-cursor2.c | 50 + .../src/nvidia-modeset/src/nvkms-cursor3.c | 114 + .../src/nvidia-modeset/src/nvkms-dma.c | 484 ++ .../src/nvidia-modeset/src/nvkms-dpy.c | 2846 +++++++ .../src/nvidia-modeset/src/nvkms-event.c | 207 + .../src/nvidia-modeset/src/nvkms-evo.c | 7370 +++++++++++++++++ .../src/nvidia-modeset/src/nvkms-evo1.c | 539 ++ .../src/nvidia-modeset/src/nvkms-evo2.c | 3850 +++++++++ .../src/nvidia-modeset/src/nvkms-evo3.c | 6965 ++++++++++++++++ .../src/nvidia-modeset/src/nvkms-flip.c | 2888 +++++++ .../src/nvidia-modeset/src/nvkms-framelock.c | 2217 +++++ .../src/nvidia-modeset/src/nvkms-hal.c | 214 + .../src/nvidia-modeset/src/nvkms-hdmi.c | 2047 +++++ .../src/nvidia-modeset/src/nvkms-hw-states.c | 1125 +++ .../src/nvidia-modeset/src/nvkms-lut.c | 391 + .../src/nvidia-modeset/src/nvkms-modepool.c | 1986 +++++ .../src/nvidia-modeset/src/nvkms-modeset.c | 2864 +++++++ .../src/nvidia-modeset/src/nvkms-prealloc.c | 146 + .../src/nvidia-modeset/src/nvkms-rm.c | 5426 ++++++++++++ .../src/nvidia-modeset/src/nvkms-rmapi-dgpu.c | 260 + .../src/nvidia-modeset/src/nvkms-surface.c | 1259 +++ .../src/nvidia-modeset/src/nvkms-utils.c | 796 ++ .../src/nvidia-modeset/src/nvkms-vrr.c | 177 + .../src/nvidia-modeset/src/nvkms.c | 5036 +++++++++++ .../src/nvidia-modeset/srcs.mk | 181 + .../src/nvidia/Makefile | 179 + .../arch/nvalloc/common/inc/nvrangetypes.h | 162 + .../arch/nvalloc/unix/include/nv-caps.h | 94 + .../arch/nvalloc/unix/include/nv-gpu-info.h | 44 + .../nvalloc/unix/include/nv-ioctl-numbers.h | 43 + .../arch/nvalloc/unix/include/nv-ioctl.h | 145 + .../unix/include/nv-kernel-rmapi-ops.h | 61 + .../arch/nvalloc/unix/include/nv-priv.h | 367 + .../nvidia/arch/nvalloc/unix/include/nv-reg.h | 927 +++ .../include/nv-unix-nvos-params-wrappers.h | 49 + .../src/nvidia/arch/nvalloc/unix/include/nv.h | 1091 +++ .../arch/nvalloc/unix/include/nv_escape.h | 54 + .../arch/nvalloc/unix/include/os-interface.h | 241 + .../arch/nvalloc/unix/include/os_custom.h | 61 + .../nvidia/arch/nvalloc/unix/include/osapi.h | 192 + .../arch/nvalloc/unix/include/osfuncs.h | 55 + .../nvalloc/unix/include/rmobjexportimport.h | 42 + .../src/nvidia/arch/nvalloc/unix/src/escape.c | 859 ++ .../arch/nvalloc/unix/src/exports-stubs.c | 299 + .../nvidia/arch/nvalloc/unix/src/gcc_helper.c | 35 + .../nvalloc/unix/src/os-hypervisor-stubs.c | 150 + .../src/nvidia/arch/nvalloc/unix/src/os.c | 4908 +++++++++++ .../src/nvidia/arch/nvalloc/unix/src/osapi.c | 4442 ++++++++++ .../src/nvidia/arch/nvalloc/unix/src/osinit.c | 1633 ++++ .../nvidia/arch/nvalloc/unix/src/osmemdesc.c | 1016 +++ .../src/nvidia/arch/nvalloc/unix/src/osunix.c | 88 + .../nvalloc/unix/src/power-management-tegra.c | 145 + .../nvidia/arch/nvalloc/unix/src/registry.c | 524 ++ .../arch/nvalloc/unix/src/rmobjexportimport.c | 628 ++ .../src/nvidia/exports_link_command.txt | 103 + .../src/nvidia/generated/g_allclasses.h | 215 + .../src/nvidia/generated/g_binary_api_nvoc.c | 659 ++ .../src/nvidia/generated/g_binary_api_nvoc.h | 416 + .../src/nvidia/generated/g_chips2halspec.h | 3 + .../nvidia/generated/g_chips2halspec_nvoc.c | 45 + .../nvidia/generated/g_chips2halspec_nvoc.h | 118 + .../src/nvidia/generated/g_client_nvoc.c | 385 + .../src/nvidia/generated/g_client_nvoc.h | 323 + .../nvidia/generated/g_client_resource_nvoc.c | 1269 +++ .../nvidia/generated/g_client_resource_nvoc.h | 635 ++ .../src/nvidia/generated/g_context_dma_nvoc.c | 427 + .../src/nvidia/generated/g_context_dma_nvoc.h | 356 + .../src/nvidia/generated/g_dce_client_nvoc.c | 286 + .../src/nvidia/generated/g_dce_client_nvoc.h | 377 + .../src/nvidia/generated/g_device_nvoc.c | 550 ++ .../src/nvidia/generated/g_device_nvoc.h | 466 ++ .../generated/g_disp_capabilities_nvoc.c | 329 + .../generated/g_disp_capabilities_nvoc.h | 239 + .../nvidia/generated/g_disp_channel_nvoc.c | 1146 +++ .../nvidia/generated/g_disp_channel_nvoc.h | 776 ++ .../nvidia/generated/g_disp_inst_mem_nvoc.c | 169 + .../nvidia/generated/g_disp_inst_mem_nvoc.h | 358 + .../src/nvidia/generated/g_disp_objs_nvoc.c | 4087 +++++++++ .../src/nvidia/generated/g_disp_objs_nvoc.h | 2140 +++++ .../nvidia/generated/g_disp_sf_user_nvoc.c | 329 + .../nvidia/generated/g_disp_sf_user_nvoc.h | 239 + .../src/nvidia/generated/g_eng_desc_nvoc.h | 1518 ++++ .../src/nvidia/generated/g_eng_state_nvoc.c | 189 + .../src/nvidia/generated/g_eng_state_nvoc.h | 385 + .../nvidia/generated/g_event_buffer_nvoc.c | 379 + .../nvidia/generated/g_event_buffer_nvoc.h | 288 + .../src/nvidia/generated/g_event_nvoc.c | 692 ++ .../src/nvidia/generated/g_event_nvoc.h | 529 ++ .../nvidia/generated/g_generic_engine_nvoc.c | 334 + .../nvidia/generated/g_generic_engine_nvoc.h | 237 + .../src/nvidia/generated/g_gpu_class_list.c | 59 + .../src/nvidia/generated/g_gpu_db_nvoc.c | 154 + .../src/nvidia/generated/g_gpu_db_nvoc.h | 154 + .../src/nvidia/generated/g_gpu_group_nvoc.c | 148 + .../src/nvidia/generated/g_gpu_group_nvoc.h | 308 + .../src/nvidia/generated/g_gpu_halspec_nvoc.c | 97 + .../src/nvidia/generated/g_gpu_halspec_nvoc.h | 91 + .../nvidia/generated/g_gpu_mgmt_api_nvoc.c | 322 + .../nvidia/generated/g_gpu_mgmt_api_nvoc.h | 221 + .../src/nvidia/generated/g_gpu_mgr_nvoc.c | 154 + .../src/nvidia/generated/g_gpu_mgr_nvoc.h | 425 + .../src/nvidia/generated/g_gpu_nvoc.c | 433 + .../src/nvidia/generated/g_gpu_nvoc.h | 3188 +++++++ .../nvidia/generated/g_gpu_resource_nvoc.c | 309 + .../nvidia/generated/g_gpu_resource_nvoc.h | 329 + .../src/nvidia/generated/g_hal.h | 142 + .../src/nvidia/generated/g_hal_archimpl.h | 94 + .../src/nvidia/generated/g_hal_mgr_nvoc.c | 154 + .../src/nvidia/generated/g_hal_mgr_nvoc.h | 139 + .../src/nvidia/generated/g_hal_nvoc.c | 148 + .../src/nvidia/generated/g_hal_nvoc.h | 146 + .../src/nvidia/generated/g_hal_private.h | 66 + .../src/nvidia/generated/g_hal_register.h | 51 + .../nvidia/generated/g_hda_codec_api_nvoc.c | 327 + .../nvidia/generated/g_hda_codec_api_nvoc.h | 229 + .../src/nvidia/generated/g_hypervisor_nvoc.h | 151 + .../src/nvidia/generated/g_io_vaspace_nvoc.c | 235 + .../src/nvidia/generated/g_io_vaspace_nvoc.h | 303 + .../src/nvidia/generated/g_journal_nvoc.h | 47 + .../src/nvidia/generated/g_kern_disp_nvoc.c | 346 + .../src/nvidia/generated/g_kern_disp_nvoc.h | 642 ++ .../src/nvidia/generated/g_kernel_head_nvoc.c | 176 + .../src/nvidia/generated/g_kernel_head_nvoc.h | 354 + .../src/nvidia/generated/g_mem_desc_nvoc.h | 1075 +++ .../src/nvidia/generated/g_mem_mgr_nvoc.c | 428 + .../src/nvidia/generated/g_mem_mgr_nvoc.h | 2241 +++++ .../src/nvidia/generated/g_mem_nvoc.c | 312 + .../src/nvidia/generated/g_mem_nvoc.h | 417 + .../nvidia/generated/g_nv_debug_dump_nvoc.h | 402 + .../src/nvidia/generated/g_nv_name_released.h | 1505 ++++ .../src/nvidia/generated/g_nvh_state.h | 28 + .../src/nvidia/generated/g_object_nvoc.c | 130 + .../src/nvidia/generated/g_object_nvoc.h | 187 + .../src/nvidia/generated/g_objtmr_nvoc.c | 357 + .../src/nvidia/generated/g_objtmr_nvoc.h | 1088 +++ .../src/nvidia/generated/g_odb.h | 86 + .../src/nvidia/generated/g_os_desc_mem_nvoc.c | 323 + .../src/nvidia/generated/g_os_desc_mem_nvoc.h | 224 + .../src/nvidia/generated/g_os_hal.h | 10 + .../src/nvidia/generated/g_os_nvoc.c | 149 + .../src/nvidia/generated/g_os_nvoc.h | 1472 ++++ .../src/nvidia/generated/g_os_private.h | 10 + .../nvidia/generated/g_prereq_tracker_nvoc.c | 155 + .../nvidia/generated/g_prereq_tracker_nvoc.h | 254 + .../src/nvidia/generated/g_ref_count_nvoc.h | 183 + .../generated/g_resource_fwd_decls_nvoc.h | 1252 +++ .../src/nvidia/generated/g_resource_nvoc.c | 371 + .../src/nvidia/generated/g_resource_nvoc.h | 355 + .../src/nvidia/generated/g_resserv_nvoc.h | 418 + .../src/nvidia/generated/g_rmconfig_private.h | 695 ++ .../src/nvidia/generated/g_rmconfig_util.c | 32 + .../src/nvidia/generated/g_rmconfig_util.h | 23 + .../nvidia/generated/g_rpc-message-header.h | 68 + .../src/nvidia/generated/g_rpc-structures.h | 216 + .../src/nvidia/generated/g_rs_client_nvoc.c | 421 + .../src/nvidia/generated/g_rs_client_nvoc.h | 601 ++ .../src/nvidia/generated/g_rs_resource_nvoc.c | 186 + .../src/nvidia/generated/g_rs_resource_nvoc.h | 860 ++ .../src/nvidia/generated/g_rs_server_nvoc.c | 313 + .../src/nvidia/generated/g_rs_server_nvoc.h | 1062 +++ .../src/nvidia/generated/g_sdk-structures.h | 62 + .../nvidia/generated/g_standard_mem_nvoc.c | 323 + .../nvidia/generated/g_standard_mem_nvoc.h | 261 + .../src/nvidia/generated/g_subdevice_nvoc.c | 1504 ++++ .../src/nvidia/generated/g_subdevice_nvoc.h | 929 +++ .../nvidia/generated/g_syncpoint_mem_nvoc.c | 323 + .../nvidia/generated/g_syncpoint_mem_nvoc.h | 224 + .../src/nvidia/generated/g_system_mem_nvoc.c | 378 + .../src/nvidia/generated/g_system_mem_nvoc.h | 254 + .../src/nvidia/generated/g_system_nvoc.c | 182 + .../src/nvidia/generated/g_system_nvoc.h | 603 ++ .../src/nvidia/generated/g_tmr_nvoc.c | 417 + .../src/nvidia/generated/g_tmr_nvoc.h | 332 + .../src/nvidia/generated/g_traceable_nvoc.c | 88 + .../src/nvidia/generated/g_traceable_nvoc.h | 87 + .../src/nvidia/generated/g_vaspace_nvoc.c | 131 + .../src/nvidia/generated/g_vaspace_nvoc.h | 389 + .../nvidia/generated/g_virt_mem_mgr_nvoc.c | 148 + .../nvidia/generated/g_virt_mem_mgr_nvoc.h | 133 + .../src/nvidia/generated/rmconfig.h | 709 ++ .../src/nvidia/inc/kernel/core/core.h | 50 + .../src/nvidia/inc/kernel/core/hal.h | 3 + .../src/nvidia/inc/kernel/core/hal_mgr.h | 3 + .../src/nvidia/inc/kernel/core/info_block.h | 59 + .../src/nvidia/inc/kernel/core/locks.h | 205 + .../src/nvidia/inc/kernel/core/prelude.h | 119 + .../src/nvidia/inc/kernel/core/printf.h | 315 + .../src/nvidia/inc/kernel/core/strict.h | 99 + .../src/nvidia/inc/kernel/core/system.h | 3 + .../src/nvidia/inc/kernel/core/thread_state.h | 217 + .../nvidia/inc/kernel/diagnostics/journal.h | 3 + .../inc/kernel/diagnostics/journal_structs.h | 53 + .../inc/kernel/diagnostics/nv_debug_dump.h | 3 + .../nvidia/inc/kernel/diagnostics/profiler.h | 119 + .../nvidia/inc/kernel/diagnostics/traceable.h | 3 + .../nvidia/inc/kernel/diagnostics/tracer.h | 188 + .../inc/kernel/gpu/audio/hda_codec_api.h | 3 + .../inc/kernel/gpu/dce_client/dce_client.h | 3 + .../src/nvidia/inc/kernel/gpu/device/device.h | 3 + .../inc/kernel/gpu/disp/disp_capabilities.h | 3 + .../nvidia/inc/kernel/gpu/disp/disp_channel.h | 3 + .../nvidia/inc/kernel/gpu/disp/disp_objs.h | 3 + .../nvidia/inc/kernel/gpu/disp/disp_sf_user.h | 3 + .../inc/kernel/gpu/disp/head/kernel_head.h | 3 + .../kernel/gpu/disp/inst_mem/disp_inst_mem.h | 3 + .../nvidia/inc/kernel/gpu/disp/kern_disp.h | 3 + .../inc/kernel/gpu/disp/kern_disp_max.h | 36 + .../inc/kernel/gpu/disp/kern_disp_type.h | 68 + .../kernel/gpu/disp/vblank_callback/vblank.h | 112 + .../src/nvidia/inc/kernel/gpu/eng_desc.h | 3 + .../src/nvidia/inc/kernel/gpu/eng_state.h | 3 + .../src/nvidia/inc/kernel/gpu/gpu.h | 3 + .../src/nvidia/inc/kernel/gpu/gpu_access.h | 381 + .../nvidia/inc/kernel/gpu/gpu_child_list.h | 306 + .../inc/kernel/gpu/gpu_device_mapping.h | 62 + .../src/nvidia/inc/kernel/gpu/gpu_halspec.h | 3 + .../src/nvidia/inc/kernel/gpu/gpu_resource.h | 3 + .../nvidia/inc/kernel/gpu/gpu_resource_desc.h | 37 + .../src/nvidia/inc/kernel/gpu/gpu_timeout.h | 144 + .../src/nvidia/inc/kernel/gpu/gpu_uuid.h | 52 + .../nvidia/inc/kernel/gpu/gsp/message_queue.h | 30 + .../inc/kernel/gpu/mem_mgr/context_dma.h | 3 + .../nvidia/inc/kernel/gpu/mem_mgr/heap_base.h | 162 + .../nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h | 3 + .../nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h | 3 + .../nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h | 47 + .../gpu/mem_mgr/virt_mem_allocator_common.h | 151 + .../inc/kernel/gpu/subdevice/generic_engine.h | 3 + .../inc/kernel/gpu/subdevice/subdevice.h | 3 + .../src/nvidia/inc/kernel/gpu_mgr/gpu_db.h | 3 + .../src/nvidia/inc/kernel/gpu_mgr/gpu_group.h | 3 + .../nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h | 3 + .../src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h | 3 + .../nvidia/inc/kernel/mem_mgr/io_vaspace.h | 3 + .../src/nvidia/inc/kernel/mem_mgr/mem.h | 3 + .../nvidia/inc/kernel/mem_mgr/os_desc_mem.h | 3 + .../nvidia/inc/kernel/mem_mgr/standard_mem.h | 3 + .../nvidia/inc/kernel/mem_mgr/syncpoint_mem.h | 3 + .../nvidia/inc/kernel/mem_mgr/system_mem.h | 3 + .../src/nvidia/inc/kernel/mem_mgr/vaspace.h | 3 + .../nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h | 3 + .../src/nvidia/inc/kernel/os/capability.h | 46 + .../src/nvidia/inc/kernel/os/nv_memory_type.h | 41 + .../src/nvidia/inc/kernel/os/os.h | 3 + .../kernel/os/os_fixed_mode_timings_props.h | 52 + .../src/nvidia/inc/kernel/os/os_stub.h | 87 + .../nvidia/inc/kernel/platform/acpi_common.h | 113 + .../src/nvidia/inc/kernel/platform/sli/sli.h | 64 + .../src/nvidia/inc/kernel/rmapi/alloc_size.h | 38 + .../src/nvidia/inc/kernel/rmapi/binary_api.h | 61 + .../src/nvidia/inc/kernel/rmapi/client.h | 3 + .../nvidia/inc/kernel/rmapi/client_resource.h | 3 + .../src/nvidia/inc/kernel/rmapi/control.h | 272 + .../src/nvidia/inc/kernel/rmapi/event.h | 3 + .../nvidia/inc/kernel/rmapi/event_buffer.h | 3 + .../src/nvidia/inc/kernel/rmapi/exports.h | 127 + .../nvidia/inc/kernel/rmapi/mapping_list.h | 159 + .../src/nvidia/inc/kernel/rmapi/param_copy.h | 99 + .../src/nvidia/inc/kernel/rmapi/resource.h | 3 + .../inc/kernel/rmapi/resource_fwd_decls.h | 3 + .../src/nvidia/inc/kernel/rmapi/rmapi.h | 410 + .../src/nvidia/inc/kernel/rmapi/rmapi_utils.h | 58 + .../src/nvidia/inc/kernel/rmapi/rs_utils.h | 188 + .../virtualization/hypervisor/hypervisor.h | 3 + .../src/nvidia/inc/lib/base_utils.h | 76 + .../src/nvidia/inc/lib/protobuf/prb.h | 299 + .../src/nvidia/inc/lib/ref_count.h | 3 + .../src/nvidia/inc/lib/zlib/inflate.h | 134 + .../nvidia/inc/libraries/containers/btree.h | 68 + .../inc/libraries/containers/eheap_old.h | 116 + .../nvidia/inc/libraries/containers/list.h | 331 + .../src/nvidia/inc/libraries/containers/map.h | 300 + .../inc/libraries/containers/multimap.h | 296 + .../nvidia/inc/libraries/containers/queue.h | 143 + .../inc/libraries/containers/type_safety.h | 254 + .../inc/libraries/eventbufferproducer.h | 177 + .../nvidia/inc/libraries/ioaccess/ioaccess.h | 125 + .../nvlog/internal/nvlog_printf_internal.h | 149 + .../src/nvidia/inc/libraries/nvlog/nvlog.h | 334 + .../nvidia/inc/libraries/nvlog/nvlog_printf.h | 91 + .../src/nvidia/inc/libraries/nvoc/object.h | 126 + .../src/nvidia/inc/libraries/nvoc/prelude.h | 255 + .../src/nvidia/inc/libraries/nvoc/rtti.h | 77 + .../src/nvidia/inc/libraries/nvoc/runtime.h | 116 + .../src/nvidia/inc/libraries/nvoc/utility.h | 28 + .../src/nvidia/inc/libraries/nvport/atomic.h | 418 + .../src/nvidia/inc/libraries/nvport/core.h | 64 + .../src/nvidia/inc/libraries/nvport/cpu.h | 637 ++ .../src/nvidia/inc/libraries/nvport/crypto.h | 346 + .../src/nvidia/inc/libraries/nvport/debug.h | 314 + .../libraries/nvport/inline/atomic_clang.h | 472 ++ .../inc/libraries/nvport/inline/atomic_gcc.h | 460 + .../nvport/inline/debug_unix_kernel_os.h | 74 + .../libraries/nvport/inline/memory_tracking.h | 323 + .../libraries/nvport/inline/safe_generic.h | 311 + .../libraries/nvport/inline/sync_tracking.h | 211 + .../libraries/nvport/inline/util_gcc_clang.h | 188 + .../libraries/nvport/inline/util_generic.h | 267 + .../inc/libraries/nvport/inline/util_valist.h | 30 + .../src/nvidia/inc/libraries/nvport/memory.h | 962 +++ .../src/nvidia/inc/libraries/nvport/nvport.h | 262 + .../src/nvidia/inc/libraries/nvport/safe.h | 621 ++ .../src/nvidia/inc/libraries/nvport/string.h | 162 + .../src/nvidia/inc/libraries/nvport/sync.h | 829 ++ .../src/nvidia/inc/libraries/nvport/thread.h | 318 + .../src/nvidia/inc/libraries/nvport/util.h | 254 + .../src/nvidia/inc/libraries/poolalloc.h | 289 + .../libraries/prereq_tracker/prereq_tracker.h | 3 + .../nvidia/inc/libraries/resserv/resserv.h | 372 + .../inc/libraries/resserv/rs_access_map.h | 234 + .../inc/libraries/resserv/rs_access_rights.h | 167 + .../nvidia/inc/libraries/resserv/rs_client.h | 509 ++ .../nvidia/inc/libraries/resserv/rs_domain.h | 80 + .../inc/libraries/resserv/rs_resource.h | 829 ++ .../nvidia/inc/libraries/resserv/rs_server.h | 928 +++ .../src/nvidia/inc/libraries/tls/tls.h | 345 + .../src/nvidia/inc/libraries/utils/nv_enum.h | 684 ++ .../src/nvidia/inc/libraries/utils/nvassert.h | 970 +++ .../nvidia/inc/libraries/utils/nvbitvector.h | 476 ++ .../src/nvidia/inc/libraries/utils/nvmacro.h | 251 + .../src/nvidia/inc/libraries/utils/nvprintf.h | 453 + .../src/nvidia/inc/libraries/utils/nvrange.h | 282 + .../src/nvidia/inc/os/dce_rm_client_ipc.h | 35 + .../src/nvidia/interface/acpigenfuncs.h | 35 + .../interface/deprecated/rmapi_deprecated.h | 120 + .../deprecated/rmapi_deprecated_utils.c | 421 + .../src/nvidia/interface/nv_firmware_types.h | 34 + .../src/nvidia/interface/nvacpitypes.h | 47 + .../src/nvidia/interface/nvrm_registry.h | 1605 ++++ .../src/nvidia/kernel/inc/objrpc.h | 114 + .../src/nvidia/kernel/inc/objtmr.h | 3 + .../src/nvidia/kernel/inc/tmr.h | 3 + .../src/nvidia/kernel/inc/vgpu/rpc.h | 707 ++ .../nvidia/kernel/inc/vgpu/rpc_global_enums.h | 238 + .../nvidia/kernel/inc/vgpu/rpc_hal_stubs.h | 66 + .../src/nvidia/kernel/inc/vgpu/rpc_headers.h | 230 + .../src/nvidia/kernel/inc/vgpu/rpc_vgpu.h | 64 + .../src/nvidia/nv-kernel.ld | 35 + .../src/nvidia/src/kernel/core/hal/hal.c | 119 + .../src/nvidia/src/kernel/core/hal/hals_all.c | 58 + .../nvidia/src/kernel/core/hal/info_block.c | 171 + .../src/nvidia/src/kernel/core/hal_mgr.c | 229 + .../src/nvidia/src/kernel/core/locks_common.c | 307 + .../nvidia/src/kernel/core/locks_minimal.c | 270 + .../src/nvidia/src/kernel/core/system.c | 657 ++ .../src/nvidia/src/kernel/core/thread_state.c | 1247 +++ .../src/nvidia/src/kernel/diagnostics/nvlog.c | 727 ++ .../src/kernel/diagnostics/nvlog_printf.c | 1503 ++++ .../nvidia/src/kernel/diagnostics/profiler.c | 227 + .../src/kernel/gpu/arch/t23x/kern_gpu_t234d.c | 83 + .../src/kernel/gpu/audio/hda_codec_api.c | 34 + .../src/kernel/gpu/dce_client/dce_client.c | 259 + .../kernel/gpu/dce_client/dce_client_rpc.c | 859 ++ .../src/nvidia/src/kernel/gpu/device.c | 582 ++ .../src/nvidia/src/kernel/gpu/device_ctrl.c | 271 + .../src/nvidia/src/kernel/gpu/device_share.c | 310 + .../kernel/gpu/disp/arch/v03/kern_disp_0300.c | 279 + .../kernel/gpu/disp/arch/v04/kern_disp_0402.c | 148 + .../src/kernel/gpu/disp/disp_capabilities.c | 85 + .../nvidia/src/kernel/gpu/disp/disp_channel.c | 781 ++ .../gpu/disp/disp_common_kern_ctrl_minimal.c | 210 + .../gpu/disp/disp_object_kern_ctrl_minimal.c | 98 + .../nvidia/src/kernel/gpu/disp/disp_objs.c | 742 ++ .../nvidia/src/kernel/gpu/disp/disp_sf_user.c | 89 + .../src/kernel/gpu/disp/head/kernel_head.c | 419 + .../inst_mem/arch/v03/disp_inst_mem_0300.c | 344 + .../kernel/gpu/disp/inst_mem/disp_inst_mem.c | 1003 +++ .../nvidia/src/kernel/gpu/disp/kern_disp.c | 933 +++ .../src/nvidia/src/kernel/gpu/eng_state.c | 543 ++ .../src/nvidia/src/kernel/gpu/gpu.c | 3725 +++++++++ .../src/nvidia/src/kernel/gpu/gpu_access.c | 1748 ++++ .../src/kernel/gpu/gpu_device_mapping.c | 329 + .../src/nvidia/src/kernel/gpu/gpu_gspclient.c | 146 + .../src/nvidia/src/kernel/gpu/gpu_resource.c | 413 + .../nvidia/src/kernel/gpu/gpu_resource_desc.c | 512 ++ .../src/nvidia/src/kernel/gpu/gpu_rmapi.c | 635 ++ .../nvidia/src/kernel/gpu/gpu_t234d_kernel.c | 76 + .../src/nvidia/src/kernel/gpu/gpu_timeout.c | 541 ++ .../src/nvidia/src/kernel/gpu/gpu_uuid.c | 317 + .../mem_mgr/arch/turing/mem_mgr_tu102_base.c | 97 + .../src/kernel/gpu/mem_mgr/context_dma.c | 683 ++ .../nvidia/src/kernel/gpu/mem_mgr/mem_desc.c | 3699 +++++++++ .../nvidia/src/kernel/gpu/mem_mgr/mem_utils.c | 830 ++ .../src/kernel/gpu/subdevice/generic_engine.c | 161 + .../src/kernel/gpu/subdevice/subdevice.c | 453 + .../subdevice/subdevice_ctrl_event_kernel.c | 279 + .../gpu/subdevice/subdevice_ctrl_gpu_kernel.c | 792 ++ .../subdevice/subdevice_ctrl_timer_kernel.c | 410 + .../src/nvidia/src/kernel/gpu/timer/timer.c | 1632 ++++ .../src/kernel/gpu/timer/timer_ostimer.c | 325 + .../src/nvidia/src/kernel/gpu_mgr/gpu_db.c | 370 + .../src/nvidia/src/kernel/gpu_mgr/gpu_group.c | 329 + .../nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c | 65 + .../src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c | 2554 ++++++ .../nvidia/src/kernel/mem_mgr/io_vaspace.c | 561 ++ .../src/nvidia/src/kernel/mem_mgr/mem.c | 962 +++ .../src/kernel/mem_mgr/mem_mgr_internal.h | 38 + .../nvidia/src/kernel/mem_mgr/os_desc_mem.c | 199 + .../nvidia/src/kernel/mem_mgr/standard_mem.c | 240 + .../nvidia/src/kernel/mem_mgr/syncpoint_mem.c | 121 + .../nvidia/src/kernel/mem_mgr/system_mem.c | 570 ++ .../src/nvidia/src/kernel/mem_mgr/vaspace.c | 283 + .../nvidia/src/kernel/mem_mgr/virt_mem_mgr.c | 185 + .../src/nvidia/src/kernel/os/os_init.c | 582 ++ .../src/nvidia/src/kernel/os/os_sanity.c | 60 + .../src/nvidia/src/kernel/os/os_stubs.c | 854 ++ .../src/nvidia/src/kernel/os/os_timer.c | 423 + .../src/nvidia/src/kernel/rmapi/alloc_free.c | 1453 ++++ .../src/nvidia/src/kernel/rmapi/binary_api.c | 120 + .../src/nvidia/src/kernel/rmapi/client.c | 832 ++ .../nvidia/src/kernel/rmapi/client_resource.c | 1537 ++++ .../src/nvidia/src/kernel/rmapi/control.c | 839 ++ .../src/kernel/rmapi/deprecated_context.c | 205 + .../src/kernel/rmapi/deprecated_context.h | 42 + .../nvidia/src/kernel/rmapi/entry_points.c | 581 ++ .../nvidia/src/kernel/rmapi/entry_points.h | 428 + .../src/nvidia/src/kernel/rmapi/event.c | 633 ++ .../nvidia/src/kernel/rmapi/event_buffer.c | 696 ++ .../src/kernel/rmapi/event_notification.c | 864 ++ .../src/nvidia/src/kernel/rmapi/mapping.c | 555 ++ .../src/nvidia/src/kernel/rmapi/mapping_cpu.c | 987 +++ .../src/nvidia/src/kernel/rmapi/param_copy.c | 341 + .../src/nvidia/src/kernel/rmapi/resource.c | 286 + .../nvidia/src/kernel/rmapi/resource_desc.c | 219 + .../nvidia/src/kernel/rmapi/resource_desc.h | 88 + .../nvidia/src/kernel/rmapi/resource_list.h | 336 + .../src/nvidia/src/kernel/rmapi/rmapi.c | 694 ++ .../src/nvidia/src/kernel/rmapi/rmapi_cache.c | 277 + .../src/nvidia/src/kernel/rmapi/rmapi_stubs.c | 183 + .../src/nvidia/src/kernel/rmapi/rmapi_utils.c | 147 + .../src/nvidia/src/kernel/rmapi/rpc_common.c | 113 + .../src/nvidia/src/kernel/rmapi/rs_utils.c | 383 + .../src/nvidia/src/kernel/rmapi/sharing.c | 412 + .../src/nvidia/src/lib/base_utils.c | 358 + .../src/nvidia/src/lib/zlib/inflate.c | 1157 +++ .../src/libraries/containers/btree/btree.c | 841 ++ .../libraries/containers/eheap/eheap_old.c | 1418 ++++ .../nvidia/src/libraries/containers/list.c | 409 + .../src/nvidia/src/libraries/containers/map.c | 898 ++ .../src/libraries/containers/multimap.c | 380 + .../nvidia/src/libraries/containers/queue.c | 299 + .../eventbuffer/eventbufferproducer.c | 308 + .../nvidia/src/libraries/ioaccess/ioaccess.c | 146 + .../src/libraries/nvbitvector/nvbitvector.c | 864 ++ .../nvidia/src/libraries/nvoc/src/runtime.c | 311 + .../nvidia/src/libraries/nvport/core/core.c | 94 + .../src/libraries/nvport/cpu/cpu_common.c | 61 + .../src/libraries/nvport/cpu/cpu_common.h | 54 + .../nvport/crypto/crypto_random_xorshift.c | 190 + .../libraries/nvport/memory/memory_generic.h | 222 + .../libraries/nvport/memory/memory_tracking.c | 1340 +++ .../nvport/memory/memory_unix_kernel_os.c | 206 + .../libraries/nvport/string/string_generic.c | 274 + .../nvport/sync/inc/sync_rwlock_def.h | 41 + .../nvport/sync/inc/sync_unix_kernel_os_def.h | 54 + .../src/libraries/nvport/sync/sync_common.h | 158 + .../src/libraries/nvport/sync/sync_rwlock.c | 178 + .../nvport/sync/sync_unix_kernel_os.c | 242 + .../nvport/thread/thread_unix_kernel_os.c | 60 + .../nvport/util/util_compiler_switch.c | 38 + .../libraries/nvport/util/util_gcc_clang.c | 80 + .../nvport/util/util_unix_kernel_os.c | 44 + .../libraries/prereq_tracker/prereq_tracker.c | 347 + .../src/libraries/resserv/src/rs_access_map.c | 717 ++ .../libraries/resserv/src/rs_access_rights.c | 119 + .../src/libraries/resserv/src/rs_client.c | 1741 ++++ .../src/libraries/resserv/src/rs_domain.c | 52 + .../src/libraries/resserv/src/rs_resource.c | 799 ++ .../src/libraries/resserv/src/rs_server.c | 3602 ++++++++ .../src/nvidia/src/libraries/tls/tls.c | 661 ++ .../src/nvidia/src/libraries/utils/nvassert.c | 422 + .../src/nvidia/srcs.mk | 190 + .../utils.mk | 558 ++ .../version.mk | 9 + commitFile.txt | 1159 +++ push_info.txt | 1 + 1166 files changed, 460174 insertions(+) create mode 100644 NVIDIA-kernel-module-source-TempVersion/COPYING create mode 100644 NVIDIA-kernel-module-source-TempVersion/Makefile create mode 100644 NVIDIA-kernel-module-source-TempVersion/README.md create mode 100644 NVIDIA-kernel-module-source-TempVersion/SECURITY.md create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/Kbuild create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/Makefile create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/conftest.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/cpuopsys.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/dce_rm_client_ipc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-caps.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-dmabuf.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-gpu-info.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hash.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hypervisor.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numa.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numbers.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kernel-interface-api.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kref.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kthread-q.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-linux.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-list-helpers.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-lock.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-memdbg.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-mm.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-modeset-interface.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-msi.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci-types.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pgprot.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-platform.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs-utils.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-proto.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-register-module.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-retpoline.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-time.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-timer.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvCpuUuid.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_firmware_types.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_speculation_barrier.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_stdarg.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_interface.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_types.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvgputypes.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvi2c.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvimpshared.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-api-types.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-format.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-kapi.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvlimits.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvmisc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatus.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatuscodes.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvtypes.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os-interface.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os/nv_memory_type.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_dsi_panel_props.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_gpio.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/rm-gpu-ops.h create mode 100755 NVIDIA-kernel-module-source-TempVersion/kernel-open/conftest.sh create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/count-lines.mk create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/dkms.conf create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-fence-helper.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-conftest.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-ioctl.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-linux.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-os-interface.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-priv.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.Kbuild create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nv-kthread-q.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-linux.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms-ioctl.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-acpi.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-backlight.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-caps.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-clk.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-cray.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dma.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dmabuf.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dsi-parse-panel-props.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-gpio.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-host1x.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-i2c.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-imp.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ipc-soc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-kthread-q.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-memdbg.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-mmap.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-modeset-interface.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-msi.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-nano-timer.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform-pm.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs-utils.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-reg.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-usermap.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vm.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vtophys.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_gpu_ops.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_uvm_interface.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia-sources.Kbuild create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia.Kbuild create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-interface.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-mlock.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-pci.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-registry.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-usermap.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/rmp2pdefines.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_address.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxbus.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxdefs.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxretry.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_bitstream.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_buffer.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_configcaps.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connector.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connectorimpl.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_crc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_deviceimpl.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_discovery.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_edid.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_evoadapter.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_groupimpl.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_guid.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_hostimp.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_internal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkconfig.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkedlist.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_list.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_mainlink.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_merger.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messagecodings.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messageheader.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messages.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_object.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_regkeydatabase.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_ringbuffer.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_splitter.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timeout.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timer.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_tracing.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_vrr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_wardatabase.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_watermark.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dptestutil/dp_testmessage.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_auxretry.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_bitstream.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_buffer.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_configcaps.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_connectorimpl.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_crc.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_deviceimpl.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_discovery.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_edid.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_evoadapter.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_groupimpl.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_guid.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_list.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_merger.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messagecodings.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messageheader.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messages.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_mst_edid.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_splitter.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_sst_edid.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_timer.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_vrr.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_wardatabase.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_watermark.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dptestutil/dp_testmessage.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/displayport.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd14.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd20.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/hdmi_spec.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBinSegment.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBldVer.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvCpuUuid.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvHdmiFrlCommon.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvPNPVendorIds.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvSha1.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvUnixVersion.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvVer.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_list.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_speculation_barrier.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvctassert.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_defs.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc2.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/rmosxfac.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v03_00/dev_disp.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v04_02/dev_disp.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_arch.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_ref.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/dev_mmu.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/kind_macros.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_0073.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9171.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9271.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9471.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9571.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C371.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C671.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_class.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_common.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_internal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid20.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/dpsdp.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/edid.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_cvt.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_displayid20.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dmt.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edid.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_861.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid20.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_gtf.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_tv.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_util.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming_pvt.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000_notification.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0001.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0002.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0004.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005_notification.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0020.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl003e.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0040.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0041.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0071.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0073.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0076.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080_notification.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0092.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00b1.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c1.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c3.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00f2.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00fc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2080.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2081.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2082.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1_notification.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl402c.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070_notification.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl84a0.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl900e.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9010.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl907dswspare.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90cd.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90ec.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90f1.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9170.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9171.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917a.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917b.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917c.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917cswspare.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917d.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917e.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9270.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9271.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927c.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927d.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9470.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9471.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl947d.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9570.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9571.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl957d.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9770.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl977d.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9870.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl987d.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clb0b5sw.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc370.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc371.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc372sw.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc373.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37a.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37b.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37d.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dswspare.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37e.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc570.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc573.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc574.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57a.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57b.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57d.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57e.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57esw.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc670.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc671.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc673.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67a.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67b.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67d.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67e.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc770.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc77f.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/cpuopsys.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gspc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080rc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080common.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/dpringbuffertypes.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-hypervisor.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_stdarg.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_vgpu_types.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvcfg_sdk.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvdisptypes.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nverror.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvfixedtypes.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvgputypes.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvi2c.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvimpshared.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvlimits.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvmisc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvos.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvsecurityinfo.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvstatus.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvstatuscodes.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvtypes.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/rs_access.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/shared/nvstatus/nvstatus.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/COPYING.txt create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/nv-softfloat.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/platform.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/specialize.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_add.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_div.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq_signaling.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_isSignalingNaN.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le_quiet.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt_quiet.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mul.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mulAdd.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_rem.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_roundToInt.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sqrt.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sub.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f16.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32_r_minMag.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i64_r_minMag.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32_r_minMag.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui64_r_minMag.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_add.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_div.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq_signaling.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_isSignalingNaN.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le_quiet.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt_quiet.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mul.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mulAdd.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_rem.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_roundToInt.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sqrt.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sub.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_f32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32_r_minMag.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i64_r_minMag.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui32_r_minMag.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui64_r_minMag.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i64_to_f32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i64_to_f64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/internals.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitiveTypes.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitives.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat_types.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt32_1.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros8.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mul64To128.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mulAddF32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mulAddF64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normRoundPackToF32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normRoundPackToF64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normSubnormalF32Sig.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normSubnormalF64Sig.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF16.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_shiftRightJam128.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_subMagsF32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_subMagsF64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/softfloat_state.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui64_to_f32.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui64_to_f64.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv-float.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_assert.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_common_utils.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_dpy_id.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_mode_timings.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_memory_tracker.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_mode_timings_utils.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_vasprintf.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/unix_rm_handle.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_memory_tracker.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_mode_timings_utils.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_vasprintf.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/unix_rm_handle.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/Makefile create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-device.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-timer.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/g_nvkms-evo-states.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-3dvision.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-attributes.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-console-restore.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-cursor.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dma.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dpy.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-event.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo-states.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo1.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip-workarea.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-framelock.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hdmi.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-lut.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modepool.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-types.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-workarea.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc-types.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-private.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rm.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rmapi.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-softfloat.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-surface.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-types.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-utils.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-vrr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api-types.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-format.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-ioctl.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-sync.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-channelevent.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-format.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-sync.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvkms.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-device.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-host.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.cpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.hpp create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/g_nvkms-evo-states.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-3dvision.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-attributes.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-console-restore.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor2.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor3.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dma.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dpy.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-event.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo1.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo2.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo3.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-flip.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-framelock.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hal.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hdmi.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hw-states.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-lut.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modepool.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modeset.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-prealloc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rm.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-surface.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-utils.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-vrr.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/srcs.mk create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/Makefile create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-caps.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-priv.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-reg.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv_escape.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os-interface.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os_custom.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osapi.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osfuncs.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/escape.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osapi.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osinit.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osunix.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/registry.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/exports_link_command.txt create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_allclasses.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_desc_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_class_list.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_archimpl.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_private.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_register.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hypervisor_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_journal_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_desc_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_debug_dump_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_name_released.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nvh_state.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_odb.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_hal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_private.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_ref_count_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_fwd_decls_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resserv_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_private.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-message-header.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-structures.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_sdk-structures.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/rmconfig.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/core.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal_mgr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/info_block.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/locks.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/prelude.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/printf.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/strict.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/system.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/thread_state.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal_structs.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/profiler.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/traceable.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/tracer.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/device/device.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_channel.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_objs.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_desc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_state.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_access.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_child_list.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_halspec.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_timeout.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_uuid.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gsp/message_queue.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/mem.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/standard_mem.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/system_mem.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/vaspace.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/capability.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/nv_memory_type.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_fixed_mode_timings_props.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_stub.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/acpi_common.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/sli/sli.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/alloc_size.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/binary_api.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client_resource.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/control.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event_buffer.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/exports.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/mapping_list.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/param_copy.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi_utils.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rs_utils.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/base_utils.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/protobuf/prb.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/ref_count.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/zlib/inflate.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/btree.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/eheap_old.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/list.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/map.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/multimap.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/queue.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/type_safety.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/eventbufferproducer.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/ioaccess/ioaccess.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog_printf.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/object.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/prelude.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/rtti.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/runtime.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/utility.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/atomic.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/core.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/cpu.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/crypto.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/debug.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/safe_generic.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_generic.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_valist.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/memory.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/nvport.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/safe.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/string.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/sync.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/thread.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/util.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/poolalloc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/resserv.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_map.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_rights.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_client.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_domain.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_resource.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_server.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/tls/tls.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nv_enum.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvassert.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvbitvector.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvmacro.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvprintf.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvrange.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/os/dce_rm_client_ipc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/acpigenfuncs.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nv_firmware_types.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvacpitypes.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvrm_registry.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objrpc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objtmr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/tmr.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_global_enums.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_hal_stubs.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_headers.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_vgpu.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/nv-kernel.ld create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hal.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hals_all.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/info_block.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal_mgr.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_common.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_minimal.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/system.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/thread_state.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog_printf.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/profiler.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_ctrl.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_share.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_channel.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_objs.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/kern_disp.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/eng_state.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_access.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_device_mapping.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_gspclient.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource_desc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_rmapi.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_timeout.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_uuid.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_db.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_group.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/io_vaspace.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/standard_mem.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/system_mem.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/vaspace.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_init.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_sanity.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_stubs.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_timer.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/alloc_free.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/binary_api.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client_resource.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/control.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_buffer.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_notification.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping_cpu.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/param_copy.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_list.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_cache.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_stubs.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_utils.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rpc_common.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rs_utils.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/sharing.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/base_utils.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/zlib/inflate.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/btree/btree.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/eheap/eheap_old.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/list.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/map.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/multimap.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/queue.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/ioaccess/ioaccess.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvbitvector/nvbitvector.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvoc/src/runtime.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/core/core.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_generic.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_tracking.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/string/string_generic.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_rwlock_def.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_common.h create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_rwlock.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_compiler_switch.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_map.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_rights.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_client.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_domain.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_resource.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_server.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/tls/tls.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/utils/nvassert.c create mode 100644 NVIDIA-kernel-module-source-TempVersion/src/nvidia/srcs.mk create mode 100644 NVIDIA-kernel-module-source-TempVersion/utils.mk create mode 100644 NVIDIA-kernel-module-source-TempVersion/version.mk create mode 100644 commitFile.txt create mode 100644 push_info.txt diff --git a/NVIDIA-kernel-module-source-TempVersion/COPYING b/NVIDIA-kernel-module-source-TempVersion/COPYING new file mode 100644 index 0000000..84a3c32 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/COPYING @@ -0,0 +1,369 @@ + +Except where noted otherwise, the individual files within this package are +licensed as MIT: + + Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +However, when linked together to form a Linux kernel module, the resulting Linux +kernel module is dual licensed as MIT/GPLv2. + + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. + diff --git a/NVIDIA-kernel-module-source-TempVersion/Makefile b/NVIDIA-kernel-module-source-TempVersion/Makefile new file mode 100644 index 0000000..0ad5aa9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/Makefile @@ -0,0 +1,76 @@ +########################################################################### +# This is the top level makefile for the NVIDIA Linux kernel module source +# package. +# +# To build: run `make modules` +# To install the build kernel modules: run (as root) `make modules_install` +########################################################################### + +include utils.mk + +all: modules + +nv_kernel_o = src/nvidia/$(OUTPUTDIR)/nv-kernel.o +nv_kernel_o_binary = kernel-open/nvidia/nv-kernel.o_binary + +nv_modeset_kernel_o = src/nvidia-modeset/$(OUTPUTDIR)/nv-modeset-kernel.o +nv_modeset_kernel_o_binary = kernel-open/nvidia-modeset/nv-modeset-kernel.o_binary + +.PHONY: $(nv_kernel_o) $(nv_modeset_kernel_o) modules modules_install + + +########################################################################### +# nv-kernel.o is the OS agnostic portion of nvidia.ko +########################################################################### + +$(nv_kernel_o): + $(MAKE) -C src/nvidia + +$(nv_kernel_o_binary): $(nv_kernel_o) + cd $(dir $@) && ln -sf ../../$^ $(notdir $@) + + +########################################################################### +# nv-modeset-kernel.o is the OS agnostic portion of nvidia-modeset.ko +########################################################################### + +$(nv_modeset_kernel_o): + $(MAKE) -C src/nvidia-modeset + +$(nv_modeset_kernel_o_binary): $(nv_modeset_kernel_o) + cd $(dir $@) && ln -sf ../../$^ $(notdir $@) + + +########################################################################### +# After the OS agnostic portions are built, descend into kernel-open/ and build +# the kernel modules with kbuild. +########################################################################### + +modules: $(nv_kernel_o_binary) $(nv_modeset_kernel_o_binary) + $(MAKE) -C kernel-open modules + + +########################################################################### +# Install the built kernel modules using kbuild. +########################################################################### + +modules_install: + $(MAKE) -C kernel-open modules_install + + +########################################################################### +# clean +########################################################################### + +.PHONY: clean nvidia.clean nvidia-modeset.clean kernel-open.clean + +clean: nvidia.clean nvidia-modeset.clean kernel-open.clean + +nvidia.clean: + $(MAKE) -C src/nvidia clean + +nvidia-modeset.clean: + $(MAKE) -C src/nvidia-modeset clean + +kernel-open.clean: + $(MAKE) -C kernel-open clean diff --git a/NVIDIA-kernel-module-source-TempVersion/README.md b/NVIDIA-kernel-module-source-TempVersion/README.md new file mode 100644 index 0000000..bc42049 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/README.md @@ -0,0 +1,160 @@ +# NVIDIA Linux Open GPU Kernel Module Source + +This is the source release of the NVIDIA Linux open GPU kernel modules, +version 35.4.1. + + +## How to Build + +To build: + + make modules -j`nproc` + +To install, first uninstall any existing NVIDIA kernel modules. Then, +as root: + + make modules_install -j`nproc` + +Note that the kernel modules built here must be used with gsp.bin +firmware and user-space NVIDIA GPU driver components from a corresponding +35.4.1 driver release. This can be achieved by installing +the NVIDIA GPU driver from the .run file using the `--no-kernel-modules` +option. E.g., + + sh ./NVIDIA-Linux-[...].run --no-kernel-modules + + +## Supported Target CPU Architectures + +Currently, the kernel modules can be built for x86_64 or aarch64. +If cross-compiling, set these variables on the make command line: + + TARGET_ARCH=aarch64|x86_64 + CC + LD + AR + CXX + OBJCOPY + +E.g., + + # compile on x86_64 for aarch64 + make modules -j`nproc` \ + TARGET_ARCH=aarch64 \ + CC=aarch64-linux-gnu-gcc \ + LD=aarch64-linux-gnu-ld \ + AR=aarch64-linux-gnu-ar \ + CXX=aarch64-linux-gnu-g++ \ + OBJCOPY=aarch64-linux-gnu-objcopy + + +## Other Build Knobs + +NV_VERBOSE - Set this to "1" to print each complete command executed; + otherwise, a succinct "CC" line is printed. + +DEBUG - Set this to "1" to build the kernel modules as debug. By default, the + build compiles without debugging information. This also enables + various debug log messages in the kernel modules. + +These variables can be set on the make command line. E.g., + + make modules -j`nproc` NV_VERBOSE=1 + + +## Supported Toolchains + +Any reasonably modern version of gcc or clang can be used to build the +kernel modules. Note that the kernel interface layers of the kernel +modules must be built with the toolchain that was used to build the +kernel. + + +## Supported Linux Kernel Versions + +The NVIDIA open kernel modules support the same range of Linux kernel +versions that are supported with the proprietary NVIDIA kernel modules. +This is currently Linux kernel 3.10 or newer. + + +## How to Contribute + +Contributions can be made by creating a pull request on +https://github.com/NVIDIA/open-gpu-kernel-modules +We'll respond via github. + +Note that when submitting a pull request, you will be prompted to accept +a Contributor License Agreement. + +This code base is shared with NVIDIA's proprietary drivers, and various +processing is performed on the shared code to produce the source code that is +published here. This has several implications for the foreseeable future: + +* The github repo will function mostly as a snapshot of each driver + release. + +* We do not expect to be able to provide revision history for individual + changes that were made to NVIDIA's shared code base. There will likely + only be one git commit per driver release. + +* We may not be able to reflect individual contributions as separate + git commits in the github repo. + +* Because the code undergoes various processing prior to publishing here, + contributions made here require manual merging to be applied to the shared + code base. Therefore, large refactoring changes made here may be difficult to + merge and accept back into the shared code base. If you have large + refactoring to suggest, please contact in advance, so we can coordinate. + + +## How to Report Issues + +Any of the existing bug reporting venues can be used to communicate +problems to NVIDIA, such as our forum: + +https://forums.developer.nvidia.com/c/gpu-graphics/linux/148 + +or linux-bugs@nvidia.com. + +Please see the 'NVIDIA Contact Info and Additional Resources' section +of the NVIDIA GPU Driver README for details. + +Please see the separate [SECURITY.md](SECURITY.md) document if you +believe you have discovered a security vulnerability in this software. + + +## Kernel Interface and OS-Agnostic Components of Kernel Modules + +Most of NVIDIA's kernel modules are split into two components: + +* An "OS-agnostic" component: this is the component of each kernel module + that is independent of operating system. + +* A "kernel interface layer": this is the component of each kernel module + that is specific to the Linux kernel version and configuration. + +When packaged in the NVIDIA .run installation package, the OS-agnostic +component is provided as a binary: it is large and time-consuming to +compile, so pre-built versions are provided so that the user does +not have to compile it during every driver installation. For the +nvidia.ko kernel module, this component is named "nv-kernel.o_binary". +For the nvidia-modeset.ko kernel module, this component is named +"nv-modeset-kernel.o_binary". Neither nvidia-drm.ko nor nvidia-uvm.ko +have OS-agnostic components. + +The kernel interface layer component for each kernel module must be built +for the target kernel. + + +## Directory Structure Layout + +- `kernel-open/` The kernel interface layer +- `kernel-open/nvidia/` The kernel interface layer for nvidia.ko +- `kernel-open/nvidia-drm/` The kernel interface layer for nvidia-drm.ko +- `kernel-open/nvidia-modeset/` The kernel interface layer for nvidia-modeset.ko +- `kernel-open/nvidia-uvm/` The kernel interface layer for nvidia-uvm.ko + +- `src/` The OS-agnostic code +- `src/nvidia/` The OS-agnostic code for nvidia.ko +- `src/nvidia-modeset/` The OS-agnostic code for nvidia-modeset.ko +- `src/common/` Utility code used by one or more of nvidia.ko and nvidia-modeset.ko diff --git a/NVIDIA-kernel-module-source-TempVersion/SECURITY.md b/NVIDIA-kernel-module-source-TempVersion/SECURITY.md new file mode 100644 index 0000000..9926a4c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/SECURITY.md @@ -0,0 +1,16 @@ +# Report a Security Vulnerability + +To report a potential security vulnerability in any NVIDIA product, please use either: +* this web form: [Security Vulnerability Submission Form](https://www.nvidia.com/object/submit-security-vulnerability.html), or +* send email to: [NVIDIA PSIRT](mailto:psirt@nvidia.com) + +**OEM Partners should contact their NVIDIA Customer Program Manager** + +If reporting a potential vulnerability via email, please encrypt it using NVIDIA’s public PGP key ([see PGP Key page](https://www.nvidia.com/en-us/security/pgp-key/)) and include the following information: +* Product/Driver name and version/branch that contains the vulnerability +* Type of vulnerability (code execution, denial of service, buffer overflow, etc.) +* Instructions to reproduce the vulnerability +* Proof-of-concept or exploit code +* Potential impact of the vulnerability, including how an attacker could exploit the vulnerability + +See https://www.nvidia.com/en-us/security/ for past NVIDIA Security Bulletins and Notices. diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/Kbuild b/NVIDIA-kernel-module-source-TempVersion/kernel-open/Kbuild new file mode 100644 index 0000000..834eb7c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/Kbuild @@ -0,0 +1,250 @@ +########################################################################### +# Kbuild file for NVIDIA Linux GPU driver kernel modules +########################################################################### + +# +# The parent makefile is expected to define: +# +# NV_KERNEL_SOURCES : The root of the kernel source tree. +# NV_KERNEL_OUTPUT : The kernel's output tree. +# NV_KERNEL_MODULES : A whitespace-separated list of modules to build. +# ARCH : The target CPU architecture: x86_64|arm64|powerpc +# +# Kbuild provides the variables: +# +# $(src) : The directory containing this Kbuild file. +# $(obj) : The directory where the output from this build is written. +# + +NV_BUILD_TYPE ?= release + +# +# Utility macro ASSIGN_PER_OBJ_CFLAGS: to control CFLAGS on a +# per-object basis, Kbuild honors the 'CFLAGS_$(object)' variable. +# E.g., "CFLAGS_nv.o" for CFLAGS that are specific to nv.o. Use this +# macro to assign 'CFLAGS_$(object)' variables for multiple object +# files. +# +# $(1): The object files. +# $(2): The CFLAGS to add for those object files. +# +# With kernel git commit 54b8ae66ae1a3454a7645d159a482c31cd89ab33, the +# handling of object-specific CFLAGs, CFLAGS_$(object) has changed. Prior to +# this commit, the CFLAGS_$(object) variable was required to be defined with +# only the the object name (). With the aforementioned git +# commit, it is now required to give Kbuild relative paths along-with the +# object name (CFLAGS_/somefile.o>). As a result, CFLAGS_$(object) +# is set twice, once with a relative path to the object files and once with +# just the object files. +# +ASSIGN_PER_OBJ_CFLAGS = \ + $(foreach _cflags_variable, \ + $(notdir $(1)) $(1), \ + $(eval $(addprefix CFLAGS_,$(_cflags_variable)) += $(2))) + + +# +# Include the specifics of the individual NVIDIA kernel modules. +# +# Each of these should: +# - Append to 'obj-m', to indicate the kernel module that should be built. +# - Define the object files that should get built to produce the kernel module. +# - Tie into conftest (see the description below). +# + +NV_UNDEF_BEHAVIOR_SANITIZER ?= +ifeq ($(NV_UNDEF_BEHAVIOR_SANITIZER),1) + UBSAN_SANITIZE := y +endif + +$(foreach _module, $(NV_KERNEL_MODULES), \ + $(eval include $(src)/$(_module)/$(_module).Kbuild)) + + +# +# Define CFLAGS that apply to all the NVIDIA kernel modules. EXTRA_CFLAGS +# is deprecated since 2.6.24 in favor of ccflags-y, but we need to support +# older kernels which do not have ccflags-y. Newer kernels append +# $(EXTRA_CFLAGS) to ccflags-y for compatibility. +# + +EXTRA_CFLAGS += -I$(src)/common/inc +EXTRA_CFLAGS += -I$(src) +EXTRA_CFLAGS += -Wall -MD $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args +EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM +EXTRA_CFLAGS += -DNV_VERSION_STRING=\"35.4.1\" + +ifneq ($(SYSSRCHOST1X),) + EXTRA_CFLAGS += -I$(SYSSRCHOST1X) +endif + +EXTRA_CFLAGS += -Wno-unused-function + +ifneq ($(NV_BUILD_TYPE),debug) + EXTRA_CFLAGS += -Wuninitialized +endif + +EXTRA_CFLAGS += -fno-strict-aliasing + +ifeq ($(ARCH),arm64) + EXTRA_CFLAGS += -mstrict-align +endif + +ifeq ($(NV_BUILD_TYPE),debug) + EXTRA_CFLAGS += -g -gsplit-dwarf +endif + +EXTRA_CFLAGS += -ffreestanding + +ifeq ($(ARCH),arm64) + EXTRA_CFLAGS += -mgeneral-regs-only -march=armv8-a + EXTRA_CFLAGS += $(call cc-option,-mno-outline-atomics,) +endif + +ifeq ($(ARCH),x86_64) + EXTRA_CFLAGS += -mno-red-zone -mcmodel=kernel +endif + +ifeq ($(ARCH),powerpc) + EXTRA_CFLAGS += -mlittle-endian -mno-strict-align -mno-altivec +endif + +EXTRA_CFLAGS += +EXTRA_CFLAGS += $(call cc-option,-Werror=undef,) +EXTRA_CFLAGS += -DNV_SPECTRE_V2=$(NV_SPECTRE_V2) +EXTRA_CFLAGS += -DNV_KERNEL_INTERFACE_LAYER + +# +# Detect SGI UV systems and apply system-specific optimizations. +# + +ifneq ($(wildcard /proc/sgi_uv),) + EXTRA_CFLAGS += -DNV_CONFIG_X86_UV +endif + + +# +# The conftest.sh script tests various aspects of the target kernel. +# The per-module Kbuild files included above should: +# +# - Append to the NV_CONFTEST_*_COMPILE_TESTS variables to indicate +# which conftests they require. +# - Append to the NV_OBJECTS_DEPEND_ON_CONFTEST variable any object files +# that depend on conftest. +# +# The conftest machinery below will run the requested tests and +# generate the appropriate header files. +# + +CC ?= cc +LD ?= ld + +NV_CONFTEST_SCRIPT := $(src)/conftest.sh +NV_CONFTEST_HEADER := $(obj)/conftest/headers.h + +NV_CONFTEST_CMD := /bin/sh $(NV_CONFTEST_SCRIPT) \ + "$(CC)" $(ARCH) $(NV_KERNEL_SOURCES) $(NV_KERNEL_OUTPUT) + +NV_CFLAGS_FROM_CONFTEST := $(shell $(NV_CONFTEST_CMD) build_cflags) + +NV_CONFTEST_CFLAGS = $(NV_CFLAGS_FROM_CONFTEST) $(EXTRA_CFLAGS) -fno-pie + +NV_CONFTEST_COMPILE_TEST_HEADERS := $(obj)/conftest/macros.h +NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/functions.h +NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/symbols.h +NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/types.h +NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/generic.h + +NV_CONFTEST_HEADERS := $(obj)/conftest/patches.h +NV_CONFTEST_HEADERS += $(obj)/conftest/headers.h +NV_CONFTEST_HEADERS += $(NV_CONFTEST_COMPILE_TEST_HEADERS) + + +# +# Generate a header file for a single conftest compile test. Each compile test +# header depends on conftest.sh, as well as the generated conftest/headers.h +# file, which is included in the compile test preamble. +# + +$(obj)/conftest/compile-tests/%.h: $(NV_CONFTEST_SCRIPT) $(NV_CONFTEST_HEADER) + @mkdir -p $(obj)/conftest/compile-tests + @echo " CONFTEST: $(notdir $*)" + @$(NV_CONFTEST_CMD) compile_tests '$(NV_CONFTEST_CFLAGS)' \ + $(notdir $*) > $@ + +# +# Concatenate a conftest/*.h header from its constituent compile test headers +# +# $(1): The name of the concatenated header +# $(2): The list of compile tests that make up the header +# + +define NV_GENERATE_COMPILE_TEST_HEADER + $(obj)/conftest/$(1).h: $(addprefix $(obj)/conftest/compile-tests/,$(addsuffix .h,$(2))) + @mkdir -p $(obj)/conftest + @# concatenate /dev/null to prevent cat from hanging when $$^ is empty + @cat $$^ /dev/null > $$@ +endef + +# +# Generate the conftest compile test headers from the lists of compile tests +# provided by the module-specific Kbuild files. +# + +NV_CONFTEST_FUNCTION_COMPILE_TESTS ?= +NV_CONFTEST_GENERIC_COMPILE_TESTS ?= +NV_CONFTEST_MACRO_COMPILE_TESTS ?= +NV_CONFTEST_SYMBOL_COMPILE_TESTS ?= +NV_CONFTEST_TYPE_COMPILE_TESTS ?= + +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,functions,$(NV_CONFTEST_FUNCTION_COMPILE_TESTS))) +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,generic,$(NV_CONFTEST_GENERIC_COMPILE_TESTS))) +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,macros,$(NV_CONFTEST_MACRO_COMPILE_TESTS))) +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,symbols,$(NV_CONFTEST_SYMBOL_COMPILE_TESTS))) +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,types,$(NV_CONFTEST_TYPE_COMPILE_TESTS))) + +$(obj)/conftest/patches.h: $(NV_CONFTEST_SCRIPT) + @mkdir -p $(obj)/conftest + @$(NV_CONFTEST_CMD) patch_check > $@ + +$(obj)/conftest/headers.h: $(NV_CONFTEST_SCRIPT) + @mkdir -p $(obj)/conftest + @$(NV_CONFTEST_CMD) test_kernel_headers '$(NV_CONFTEST_CFLAGS)' > $@ + +clean-dirs := $(obj)/conftest + + +# For any object files that depend on conftest, declare the dependency here. +$(addprefix $(obj)/,$(NV_OBJECTS_DEPEND_ON_CONFTEST)): | $(NV_CONFTEST_HEADERS) + +# Sanity checks of the build environment and target system/kernel + +BUILD_SANITY_CHECKS = \ + cc_sanity_check \ + cc_version_check \ + dom0_sanity_check \ + xen_sanity_check \ + preempt_rt_sanity_check \ + vgpu_kvm_sanity_check \ + module_symvers_sanity_check + +.PHONY: $(BUILD_SANITY_CHECKS) + +$(BUILD_SANITY_CHECKS): + @$(NV_CONFTEST_CMD) $@ full_output + +# Perform all sanity checks before generating the conftest headers + +$(NV_CONFTEST_HEADERS): | $(BUILD_SANITY_CHECKS) + +# Make the conftest headers depend on the kernel version string + +$(obj)/conftest/uts_release: NV_GENERATE_UTS_RELEASE + @mkdir -p $(dir $@) + @NV_UTS_RELEASE="// Kernel version: `$(NV_CONFTEST_CMD) compile_tests '$(NV_CONFTEST_CFLAGS)' uts_release`"; \ + if ! [ -f "$@" ] || [ "$$NV_UTS_RELEASE" != "`cat $@`" ]; \ + then echo "$$NV_UTS_RELEASE" > $@; fi + +.PHONY: NV_GENERATE_UTS_RELEASE + +$(NV_CONFTEST_HEADERS): $(obj)/conftest/uts_release diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/Makefile b/NVIDIA-kernel-module-source-TempVersion/kernel-open/Makefile new file mode 100644 index 0000000..d00c14b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/Makefile @@ -0,0 +1,126 @@ +# +# This Makefile was automatically generated; do not edit. +# + +########################################################################### +# Makefile for NVIDIA Linux GPU driver kernel modules +########################################################################### + +# This makefile is read twice: when a user or nvidia-installer invokes +# 'make', this file is read. It then invokes the Linux kernel's +# Kbuild. Modern versions of Kbuild will then read the Kbuild file in +# this directory. However, old versions of Kbuild will instead read +# this Makefile. For backwards compatibility, when read by Kbuild +# (recognized by KERNELRELEASE not being empty), do nothing but +# include the Kbuild file in this directory. + +ifneq ($(KERNELRELEASE),) + include $(src)/Kbuild +else + + # Determine the location of the Linux kernel source tree, and of the + # kernel's output tree. Use this to invoke Kbuild, and pass the paths + # to the source and output trees to NVIDIA's Kbuild file via + # NV_KERNEL_{SOURCES,OUTPUT}. + + ifdef SYSSRC + KERNEL_SOURCES := $(SYSSRC) + else + KERNEL_UNAME ?= $(shell uname -r) + KERNEL_MODLIB := /lib/modules/$(KERNEL_UNAME) + KERNEL_SOURCES := $(shell test -d $(KERNEL_MODLIB)/source && echo $(KERNEL_MODLIB)/source || echo $(KERNEL_MODLIB)/build) + endif + + KERNEL_OUTPUT := $(KERNEL_SOURCES) + KBUILD_PARAMS := + + ifdef SYSOUT + ifneq ($(SYSOUT), $(KERNEL_SOURCES)) + KERNEL_OUTPUT := $(SYSOUT) + KBUILD_PARAMS := KBUILD_OUTPUT=$(KERNEL_OUTPUT) + endif + else + KERNEL_UNAME ?= $(shell uname -r) + KERNEL_MODLIB := /lib/modules/$(KERNEL_UNAME) + ifeq ($(KERNEL_SOURCES), $(KERNEL_MODLIB)/source) + KERNEL_OUTPUT := $(KERNEL_MODLIB)/build + KBUILD_PARAMS := KBUILD_OUTPUT=$(KERNEL_OUTPUT) + endif + endif + + CC ?= cc + LD ?= ld + OBJDUMP ?= objdump + + ifndef ARCH + ARCH := $(shell uname -m | sed -e 's/i.86/i386/' \ + -e 's/armv[0-7]\w\+/arm/' \ + -e 's/aarch64/arm64/' \ + -e 's/ppc64le/powerpc/' \ + ) + endif + + NV_KERNEL_MODULES ?= $(wildcard nvidia nvidia-modeset nvidia-drm) + NV_KERNEL_MODULES := $(filter-out $(NV_EXCLUDE_KERNEL_MODULES), \ + $(NV_KERNEL_MODULES)) + NV_VERBOSE ?= + SPECTRE_V2_RETPOLINE ?= 0 + + ifeq ($(NV_VERBOSE),1) + KBUILD_PARAMS += V=1 + endif + KBUILD_PARAMS += -C $(KERNEL_SOURCES) M=$(CURDIR) + KBUILD_PARAMS += ARCH=$(ARCH) + KBUILD_PARAMS += NV_KERNEL_SOURCES=$(KERNEL_SOURCES) + KBUILD_PARAMS += NV_KERNEL_OUTPUT=$(KERNEL_OUTPUT) + KBUILD_PARAMS += NV_KERNEL_MODULES="$(NV_KERNEL_MODULES)" + KBUILD_PARAMS += INSTALL_MOD_DIR=kernel/drivers/video + KBUILD_PARAMS += NV_SPECTRE_V2=$(SPECTRE_V2_RETPOLINE) + + .PHONY: modules module clean clean_conftest modules_install + modules clean modules_install: + @$(MAKE) "LD=$(LD)" "CC=$(CC)" "OBJDUMP=$(OBJDUMP)" $(KBUILD_PARAMS) $@ + @if [ "$@" = "modules" ]; then \ + for module in $(NV_KERNEL_MODULES); do \ + if [ -x split-object-file.sh ]; then \ + ./split-object-file.sh $$module.ko; \ + fi; \ + done; \ + fi + + # Compatibility target for scripts that may be directly calling the + # "module" target from the old build system. + + module: modules + + # Check if the any of kernel module linker scripts exist. If they do, pass + # them as linker options (via variable NV_MODULE_LD_SCRIPTS) while building + # the kernel interface object files. These scripts do some processing on the + # module symbols on which the Linux kernel's module resolution is dependent + # and hence must be used whenever present. + + LD_SCRIPT ?= $(KERNEL_SOURCES)/scripts/module-common.lds \ + $(KERNEL_SOURCES)/arch/$(ARCH)/kernel/module.lds \ + $(KERNEL_OUTPUT)/scripts/module.lds + NV_MODULE_COMMON_SCRIPTS := $(foreach s, $(wildcard $(LD_SCRIPT)), -T $(s)) + + # Use $* to match the stem % in the kernel interface file %-linux.o. Replace + # "nv" with "nvidia" in $* as appropriate: e.g. nv-modeset-linux.o links + # nvidia-modeset.mod.o and nvidia-modeset/nv-modeset-interface.o. The kernel + # interface file must have the .mod.o object linked into it: otherwise, the + # kernel module produced by linking the interface against its corresponding + # core object file will not be loadable. The .mod.o file is built as part of + # the MODPOST process (stage 2), so the rule to build the kernel interface + # cannot be defined in the *Kbuild files, which are only used during stage 1. + + %-linux.o: modules + $(LD) $(NV_MODULE_COMMON_SCRIPTS) -r -o $@ \ + $(subst nv,nvidia,$*).mod.o $(subst nv,nvidia,$*)/$*-interface.o + + # Kbuild's "clean" rule won't clean up the conftest headers on its own, and + # clean-dirs doesn't appear to work as advertised. + clean_conftest: + $(RM) -r conftest + clean: clean_conftest + +endif # KERNELRELEASE diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/conftest.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/conftest.h new file mode 100644 index 0000000..dd05144 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/conftest.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _CONFTEST_H +#define _CONFTEST_H + +#include "conftest/headers.h" +#include "conftest/functions.h" +#include "conftest/generic.h" +#include "conftest/macros.h" +#include "conftest/symbols.h" +#include "conftest/types.h" + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/cpuopsys.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/cpuopsys.h new file mode 100644 index 0000000..5227798 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/cpuopsys.h @@ -0,0 +1,453 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! \brief + * Define compile time symbols for CPU type and operating system type. + * This file should only contain preprocessor commands so that + * there are no dependencies on other files. + * + * cpuopsys.h + * + * Copyright (c) 2001, Nvidia Corporation. All rights reserved. + */ + +/*! + * Uniform names are defined for compile time options to distinguish + * CPU types and Operating systems. + * Distinctions between CPU and OpSys should be orthogonal. + * + * These uniform names have initially been defined by keying off the + * makefile/build names defined for builds in the OpenGL group. + * Getting the uniform names defined for other builds may require + * different qualifications. + * + * The file is placed here to allow for the possibility of all driver + * components using the same naming convention for conditional compilation. + */ + +#ifndef CPUOPSYS_H +#define CPUOPSYS_H + +/*****************************************************************************/ +/* Define all OS/CPU-Chip related symbols */ + +/* ***** WINDOWS variations */ +#if defined(_WIN32) || defined(_WIN16) +# define NV_WINDOWS + +# if defined(_WIN32_WINNT) +# define NV_WINDOWS_NT +# elif defined(_WIN32_WCE) +# define NV_WINDOWS_CE +# elif !defined(NV_MODS) +# define NV_WINDOWS_9X +# endif +#endif /* _WIN32 || defined(_WIN16) */ + +/* ***** Unix variations */ +#if defined(__linux__) && !defined(NV_LINUX) && !defined(NV_VMWARE) +# define NV_LINUX +#endif /* defined(__linux__) */ + +#if defined(__VMWARE__) && !defined(NV_VMWARE) +# define NV_VMWARE +#endif /* defined(__VMWARE__) */ + +/* SunOS + gcc */ +#if defined(__sun__) && defined(__svr4__) && !defined(NV_SUNOS) +# define NV_SUNOS +#endif /* defined(__sun__) && defined(__svr4__) */ + +/* SunOS + Sun Compiler (named SunPro, Studio or Forte) */ +#if defined(__SUNPRO_C) || defined(__SUNPRO_CC) +# define NV_SUNPRO_C +# define NV_SUNOS +#endif /* defined(_SUNPRO_C) || defined(__SUNPRO_CC) */ + +#if defined(__FreeBSD__) && !defined(NV_BSD) +# define NV_BSD +#endif /* defined(__FreeBSD__) */ + +/* XXXar don't define NV_UNIX on MacOSX or vxworks or QNX */ +#if (defined(__unix__) || defined(__unix) || defined(__INTEGRITY) ) && !defined(nvmacosx) && !defined(vxworks) && !defined(NV_UNIX) && !defined(__QNX__) && !defined(__QNXNTO__)/* XXX until removed from Makefiles */ +# define NV_UNIX +#endif /* defined(__unix__) */ + +#if (defined(__QNX__) || defined(__QNXNTO__)) && !defined(NV_QNX) +# define NV_QNX +#endif + +#if (defined(__ANDROID__) || defined(ANDROID)) && !defined(NV_ANDROID) +# define NV_ANDROID +#endif + + + + + + + + +#if defined(DceCore) && !defined(NV_DCECORE) +# define NV_DCECORE +#endif + +/* ***** Apple variations */ +#if defined(macintosh) || defined(__APPLE__) +# define NV_MACINTOSH +# if defined(__MACH__) +# define NV_MACINTOSH_OSX +# else +# define NV_MACINTOSH_OS9 +# endif +# if defined(__LP64__) +# define NV_MACINTOSH_64 +# endif +#endif /* defined(macintosh) */ + +/* ***** VxWorks */ +/* Tornado 2.21 is gcc 2.96 and #defines __vxworks. */ +/* Tornado 2.02 is gcc 2.7.2 and doesn't define any OS symbol, so we rely on */ +/* the build system #defining vxworks. */ +#if defined(__vxworks) || defined(vxworks) +# define NV_VXWORKS +#endif + +/* ***** Integrity OS */ +#if defined(__INTEGRITY) +# if !defined(NV_INTEGRITY) +# define NV_INTEGRITY +# endif +#endif + +/* ***** Processor type variations */ +/* Note: The prefix NV_CPU_* is taken by Nvcm.h */ + +#if ((defined(_M_IX86) || defined(__i386__) || defined(__i386)) && !defined(NVCPU_X86)) /* XXX until removed from Makefiles */ +/* _M_IX86 for windows, __i386__ for Linux (or any x86 using gcc) */ +/* __i386 for Studio compiler on Solaris x86 */ +# define NVCPU_X86 /* any IA32 machine (not x86-64) */ +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(_WIN32) && defined(_M_IA64) +# define NVCPU_IA64_WINDOWS /* any IA64 for Windows opsys */ +#endif +#if defined(NV_LINUX) && defined(__ia64__) +# define NVCPU_IA64_LINUX /* any IA64 for Linux opsys */ +#endif +#if defined(NVCPU_IA64_WINDOWS) || defined(NVCPU_IA64_LINUX) || defined(IA64) +# define NVCPU_IA64 /* any IA64 for any opsys */ +#endif + +#if (defined(NV_MACINTOSH) && !(defined(__i386__) || defined(__x86_64__))) || defined(__PPC__) || defined(__ppc) +# if defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) +# ifndef NVCPU_PPC64LE +# define NVCPU_PPC64LE /* PPC 64-bit little endian */ +# endif +# else +# ifndef NVCPU_PPC +# define NVCPU_PPC /* any non-PPC64LE PowerPC architecture */ +# endif +# ifndef NV_BIG_ENDIAN +# define NV_BIG_ENDIAN +# endif +# endif +# define NVCPU_FAMILY_PPC +#endif + +#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) +# define NVCPU_X86_64 /* any x86-64 for any opsys */ +#endif + +#if defined(NVCPU_X86) || defined(NVCPU_X86_64) +# define NVCPU_FAMILY_X86 +#endif + +#if defined(__riscv) && (__riscv_xlen==64) +# define NVCPU_RISCV64 +# if defined(__nvriscv) +# define NVCPU_NVRISCV64 +# endif +#endif + +#if defined(__arm__) || defined(_M_ARM) +/* + * 32-bit instruction set on, e.g., ARMv7 or AArch32 execution state + * on ARMv8 + */ +# define NVCPU_ARM +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(__aarch64__) || defined(__ARM64__) || defined(_M_ARM64) +# define NVCPU_AARCH64 /* 64-bit A64 instruction set on ARMv8 */ +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(NVCPU_ARM) || defined(NVCPU_AARCH64) +# define NVCPU_FAMILY_ARM +#endif + +#if defined(__SH4__) +# ifndef NVCPU_SH4 +# define NVCPU_SH4 /* Renesas (formerly Hitachi) SH4 */ +# endif +# if defined NV_WINDOWS_CE +# define NVCPU_MIN_PAGE_SHIFT 12 +# endif +#endif + +/* For Xtensa processors */ +#if defined(__XTENSA__) +# define NVCPU_XTENSA +# if defined(__XTENSA_EB__) +# define NV_BIG_ENDIAN +# endif +#endif + + +/* + * Other flavors of CPU type should be determined at run-time. + * For example, an x86 architecture with/without SSE. + * If it can compile, then there's no need for a compile time option. + * For some current GCC limitations, these may be fixed by using the Intel + * compiler for certain files in a Linux build. + */ + +/* The minimum page size can be determined from the minimum page shift */ +#if defined(NVCPU_MIN_PAGE_SHIFT) +#define NVCPU_MIN_PAGE_SIZE (1 << NVCPU_MIN_PAGE_SHIFT) +#endif + +#if defined(NVCPU_IA64) || defined(NVCPU_X86_64) || \ + defined(NV_MACINTOSH_64) || defined(NVCPU_AARCH64) || \ + defined(NVCPU_PPC64LE) || defined(NVCPU_RISCV64) +# define NV_64_BITS /* all architectures where pointers are 64 bits */ +#else +/* we assume 32 bits. I don't see a need for NV_16_BITS. */ +#endif + +/* For verification-only features not intended to be included in normal drivers */ +#if defined(NV_MODS) && defined(DEBUG) && !defined(DISABLE_VERIF_FEATURES) +#define NV_VERIF_FEATURES +#endif + +/* + * New, safer family of #define's -- these ones use 0 vs. 1 rather than + * defined/!defined. This is advantageous because if you make a typo, + * say misspelled ENDIAN: + * + * #if NVCPU_IS_BIG_ENDAIN + * + * ...some compilers can give you a warning telling you that you screwed up. + * The compiler can also give you a warning if you forget to #include + * "cpuopsys.h" in your code before the point where you try to use these + * conditionals. + * + * Also, the names have been prefixed in more cases with "CPU" or "OS" for + * increased clarity. You can tell the names apart from the old ones because + * they all use "_IS_" in the name. + * + * Finally, these can be used in "if" statements and not just in #if's. For + * example: + * + * if (NVCPU_IS_BIG_ENDIAN) x = Swap32(x); + * + * Maybe some day in the far-off future these can replace the old #define's. + */ + +#if defined(NV_MODS) +#define NV_IS_MODS 1 +#else +#define NV_IS_MODS 0 +#endif + +#if defined(NV_WINDOWS) +#define NVOS_IS_WINDOWS 1 +#else +#define NVOS_IS_WINDOWS 0 +#endif +#if defined(NV_WINDOWS_CE) +#define NVOS_IS_WINDOWS_CE 1 +#else +#define NVOS_IS_WINDOWS_CE 0 +#endif +#if defined(NV_LINUX) +#define NVOS_IS_LINUX 1 +#else +#define NVOS_IS_LINUX 0 +#endif +#if defined(NV_UNIX) +#define NVOS_IS_UNIX 1 +#else +#define NVOS_IS_UNIX 0 +#endif +#if defined(NV_BSD) +#define NVOS_IS_FREEBSD 1 +#else +#define NVOS_IS_FREEBSD 0 +#endif +#if defined(NV_SUNOS) +#define NVOS_IS_SOLARIS 1 +#else +#define NVOS_IS_SOLARIS 0 +#endif +#if defined(NV_VMWARE) +#define NVOS_IS_VMWARE 1 +#else +#define NVOS_IS_VMWARE 0 +#endif +#if defined(NV_QNX) +#define NVOS_IS_QNX 1 +#else +#define NVOS_IS_QNX 0 +#endif +#if defined(NV_ANDROID) +#define NVOS_IS_ANDROID 1 +#else +#define NVOS_IS_ANDROID 0 +#endif +#if defined(NV_MACINTOSH) +#define NVOS_IS_MACINTOSH 1 +#else +#define NVOS_IS_MACINTOSH 0 +#endif +#if defined(NV_VXWORKS) +#define NVOS_IS_VXWORKS 1 +#else +#define NVOS_IS_VXWORKS 0 +#endif +#if defined(NV_LIBOS) +#define NVOS_IS_LIBOS 1 +#else +#define NVOS_IS_LIBOS 0 +#endif +#if defined(NV_INTEGRITY) +#define NVOS_IS_INTEGRITY 1 +#else +#define NVOS_IS_INTEGRITY 0 +#endif + + + + + + + + + + +#if defined(NVCPU_X86) +#define NVCPU_IS_X86 1 +#else +#define NVCPU_IS_X86 0 +#endif +#if defined(NVCPU_RISCV64) +#define NVCPU_IS_RISCV64 1 +#else +#define NVCPU_IS_RISCV64 0 +#endif +#if defined(NVCPU_NVRISCV64) +#define NVCPU_IS_NVRISCV64 1 +#else +#define NVCPU_IS_NVRISCV64 0 +#endif +#if defined(NVCPU_IA64) +#define NVCPU_IS_IA64 1 +#else +#define NVCPU_IS_IA64 0 +#endif +#if defined(NVCPU_X86_64) +#define NVCPU_IS_X86_64 1 +#else +#define NVCPU_IS_X86_64 0 +#endif +#if defined(NVCPU_FAMILY_X86) +#define NVCPU_IS_FAMILY_X86 1 +#else +#define NVCPU_IS_FAMILY_X86 0 +#endif +#if defined(NVCPU_PPC) +#define NVCPU_IS_PPC 1 +#else +#define NVCPU_IS_PPC 0 +#endif +#if defined(NVCPU_PPC64LE) +#define NVCPU_IS_PPC64LE 1 +#else +#define NVCPU_IS_PPC64LE 0 +#endif +#if defined(NVCPU_FAMILY_PPC) +#define NVCPU_IS_FAMILY_PPC 1 +#else +#define NVCPU_IS_FAMILY_PPC 0 +#endif +#if defined(NVCPU_ARM) +#define NVCPU_IS_ARM 1 +#else +#define NVCPU_IS_ARM 0 +#endif +#if defined(NVCPU_AARCH64) +#define NVCPU_IS_AARCH64 1 +#else +#define NVCPU_IS_AARCH64 0 +#endif +#if defined(NVCPU_FAMILY_ARM) +#define NVCPU_IS_FAMILY_ARM 1 +#else +#define NVCPU_IS_FAMILY_ARM 0 +#endif +#if defined(NVCPU_SH4) +#define NVCPU_IS_SH4 1 +#else +#define NVCPU_IS_SH4 0 +#endif +#if defined(NVCPU_XTENSA) +#define NVCPU_IS_XTENSA 1 +#else +#define NVCPU_IS_XTENSA 0 +#endif +#if defined(NV_BIG_ENDIAN) +#define NVCPU_IS_BIG_ENDIAN 1 +#else +#define NVCPU_IS_BIG_ENDIAN 0 +#endif +#if defined(NV_64_BITS) +#define NVCPU_IS_64_BITS 1 +#else +#define NVCPU_IS_64_BITS 0 +#endif +#if defined(NVCPU_FAMILY_ARM) +#define NVCPU_IS_PCIE_CACHE_COHERENT 0 +#else +#define NVCPU_IS_PCIE_CACHE_COHERENT 1 +#endif +#if defined(NV_DCECORE) +#define NVOS_IS_DCECORE 1 +#else +#define NVOS_IS_DCECORE 0 +#endif +/*****************************************************************************/ + +#endif /* CPUOPSYS_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/dce_rm_client_ipc.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/dce_rm_client_ipc.h new file mode 100644 index 0000000..9b1b5d0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/dce_rm_client_ipc.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _OS_DCE_CLIENT_IPC_H_ +#define _OS_DCE_CLIENT_IPC_H_ + +// RM IPC Client Types + +#define DCE_CLIENT_RM_IPC_TYPE_SYNC 0x0 +#define DCE_CLIENT_RM_IPC_TYPE_EVENT 0x1 +#define DCE_CLIENT_RM_IPC_TYPE_MAX 0x2 + +void dceclientHandleAsyncRpcCallback(NvU32 handle, NvU32 interfaceType, + NvU32 msgLength, void *data, + void *usrCtx); +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-caps.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-caps.h new file mode 100644 index 0000000..35bbf7c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-caps.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_CAPS_H_ +#define _NV_CAPS_H_ + +#include + +/* + * Opaque OS-specific struct; on Linux, this has member + * 'struct proc_dir_entry'. + */ +typedef struct nv_cap nv_cap_t; + +/* + * Creates directory named "capabilities" under the provided path. + * + * @param[in] path Absolute path + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_init(const char *path); + +/* + * Creates capability directory entry + * + * @param[in] parent_cap Parent capability directory + * @param[in] name Capability directory's name + * @param[in] mode Capability directory's access mode + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap, const char *name, int mode); + +/* + * Creates capability file entry + * + * @param[in] parent_cap Parent capability directory + * @param[in] name Capability file's name + * @param[in] mode Capability file's access mode + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap, const char *name, int mode); + +/* + * Destroys capability entry + * + * @param[in] cap Capability entry + */ +void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap); + +/* + * Validates and duplicates the provided file descriptor + * + * @param[in] cap Capability entry + * @param[in] fd File descriptor to be validated + * + * Returns duplicate fd upon success. Otherwise, returns -1. + */ +int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd); + +/* + * Closes file descriptor + * + * This function should be used to close duplicate file descriptors + * returned by nv_cap_validate_and_dup_fd. + * + * @param[in] fd File descriptor to be validated + * + */ +void NV_API_CALL nv_cap_close_fd(int fd); + +#endif /* _NV_CAPS_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-dmabuf.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-dmabuf.h new file mode 100644 index 0000000..ab794df --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-dmabuf.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_DMABUF_H_ +#define _NV_DMABUF_H_ + +#include "nv-linux.h" + +NV_STATUS nv_dma_buf_export(nv_state_t *, nv_ioctl_export_to_dma_buf_fd_t *); + +#endif // _NV_DMABUF_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-gpu-info.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-gpu-info.h new file mode 100644 index 0000000..a8c0c0a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-gpu-info.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_GPU_INFO_H_ +#define _NV_GPU_INFO_H_ + +typedef struct { + NvU32 gpu_id; + + struct { + NvU32 domain; + NvU8 bus, slot, function; + } pci_info; + + /* + * opaque OS-specific pointer; on Linux, this is a pointer to the + * 'struct device' for the GPU. + */ + void *os_device_ptr; +} nv_gpu_info_t; + +#define NV_MAX_GPUS 32 + +#endif /* _NV_GPU_INFO_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hash.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hash.h new file mode 100644 index 0000000..97dbb5d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hash.h @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_HASH_H__ +#define __NV_HASH_H__ + +#include "conftest.h" +#include "nv-list-helpers.h" +#include +#include +#include + +#if defined(NV_LINUX_STRINGHASH_H_PRESENT) +#include /* full_name_hash() */ +#else +#include +#endif + +#if (NV_FULL_NAME_HASH_ARGUMENT_COUNT == 3) +#define nv_string_hash(_str) full_name_hash(NULL, _str, strlen(_str)) +#else +#define nv_string_hash(_str) full_name_hash(_str, strlen(_str)) +#endif + +/** + * This naive hashtable was introduced by commit d9b482c8ba19 (v3.7, 2012-10-31). + * To support older kernels import necessary functionality from + * . + */ + +#define NV_HASH_SIZE(name) (ARRAY_SIZE(name)) +#define NV_HASH_BITS(name) ilog2(NV_HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define NV_HASH_MIN(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +#define NV_DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +static inline void _nv_hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + { + INIT_HLIST_HEAD(&ht[i]); + } +} + +/** + * nv_hash_init - initialize a hash table + * @hashtable: hashtable to be initialized + */ +#define nv_hash_init(hashtable) _nv_hash_init(hashtable, NV_HASH_SIZE(hashtable)) + +/** + * nv_hash_add - add an object to a hashtable + * @hashtable: hashtable to add to + * @node: the &struct hlist_node of the object to be added + * @key: the key of the object to be added + */ +#define nv_hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[NV_HASH_MIN(key, NV_HASH_BITS(hashtable))]) + +/** + * nv_hash_for_each_possible - iterate over all possible objects hashing to the + * same bucket + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define nv_hash_for_each_possible(name, obj, member, key) \ + nv_hlist_for_each_entry(obj, &name[NV_HASH_MIN(key, NV_HASH_BITS(name))], member) + +#endif // __NV_HASH_H__ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hypervisor.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hypervisor.h new file mode 100644 index 0000000..ddc6a91 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hypervisor.h @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_HYPERVISOR_H_ +#define _NV_HYPERVISOR_H_ + +#include + +// Enums for supported hypervisor types. +// New hypervisor type should be added before OS_HYPERVISOR_CUSTOM_FORCED +typedef enum _HYPERVISOR_TYPE +{ + OS_HYPERVISOR_XEN = 0, + OS_HYPERVISOR_VMWARE, + OS_HYPERVISOR_HYPERV, + OS_HYPERVISOR_KVM, + OS_HYPERVISOR_PARALLELS, + OS_HYPERVISOR_CUSTOM_FORCED, + OS_HYPERVISOR_UNKNOWN +} HYPERVISOR_TYPE; + +#define CMD_VGPU_VFIO_WAKE_WAIT_QUEUE 0 +#define CMD_VGPU_VFIO_INJECT_INTERRUPT 1 +#define CMD_VGPU_VFIO_REGISTER_MDEV 2 +#define CMD_VGPU_VFIO_PRESENT 3 + +#define MAX_VF_COUNT_PER_GPU 64 + +typedef enum _VGPU_TYPE_INFO +{ + VGPU_TYPE_NAME = 0, + VGPU_TYPE_DESCRIPTION, + VGPU_TYPE_INSTANCES, +} VGPU_TYPE_INFO; + +typedef struct +{ + void *vgpuVfioRef; + void *waitQueue; + void *nv; + NvU32 *vgpuTypeIds; + NvU32 numVgpuTypes; + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU8 function; + NvBool is_virtfn; +} vgpu_vfio_info; + +typedef struct +{ + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU8 function; + NvBool isNvidiaAttached; + NvBool isMdevAttached; +} vgpu_vf_pci_info; + +typedef enum VGPU_CMD_PROCESS_VF_INFO_E +{ + NV_VGPU_SAVE_VF_INFO = 0, + NV_VGPU_REMOVE_VF_PCI_INFO = 1, + NV_VGPU_REMOVE_VF_MDEV_INFO = 2, + NV_VGPU_GET_VF_INFO = 3 +} VGPU_CMD_PROCESS_VF_INFO; + +typedef enum VGPU_DEVICE_STATE_E +{ + NV_VGPU_DEV_UNUSED = 0, + NV_VGPU_DEV_OPENED = 1, + NV_VGPU_DEV_IN_USE = 2 +} VGPU_DEVICE_STATE; + +typedef enum _VMBUS_CMD_TYPE +{ + VMBUS_CMD_TYPE_INVALID = 0, + VMBUS_CMD_TYPE_SETUP = 1, + VMBUS_CMD_TYPE_SENDPACKET = 2, + VMBUS_CMD_TYPE_CLEANUP = 3, +} VMBUS_CMD_TYPE; + +typedef struct +{ + NvU32 request_id; + NvU32 page_count; + NvU64 *pPfns; + void *buffer; + NvU32 bufferlen; +} vmbus_send_packet_cmd_params; + + +typedef struct +{ + NvU32 override_sint; + NvU8 *nv_guid; +} vmbus_setup_cmd_params; + +/* + * Function prototypes + */ + +HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void); + +#endif // _NV_HYPERVISOR_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numa.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numa.h new file mode 100644 index 0000000..3fad820 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numa.h @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_NUMA_H +#define NV_IOCTL_NUMA_H + +#if defined(NV_LINUX) + +#include + +#if defined(NV_KERNEL_INTERFACE_LAYER) + +#include + +#else + +#include + +#if !defined(__aligned) +#define __aligned(n) __attribute__((aligned(n))) +#endif + +#endif + +#define NV_ESC_NUMA_INFO (NV_IOCTL_BASE + 15) +#define NV_ESC_SET_NUMA_STATUS (NV_IOCTL_BASE + 16) + +#define NV_IOCTL_NUMA_INFO_MAX_OFFLINE_ADDRESSES 64 +typedef struct offline_addresses +{ + uint64_t addresses[NV_IOCTL_NUMA_INFO_MAX_OFFLINE_ADDRESSES] __aligned(8); + uint32_t numEntries; +} nv_offline_addresses_t; + + +/* per-device NUMA memory info as assigned by the system */ +typedef struct nv_ioctl_numa_info +{ + int32_t nid; + int32_t status; + uint64_t memblock_size __aligned(8); + uint64_t numa_mem_addr __aligned(8); + uint64_t numa_mem_size __aligned(8); + nv_offline_addresses_t offline_addresses __aligned(8); +} nv_ioctl_numa_info_t; + +/* set the status of the device NUMA memory */ +typedef struct nv_ioctl_set_numa_status +{ + int32_t status; +} nv_ioctl_set_numa_status_t; + +#define NV_IOCTL_NUMA_STATUS_DISABLED 0 +#define NV_IOCTL_NUMA_STATUS_OFFLINE 1 +#define NV_IOCTL_NUMA_STATUS_ONLINE_IN_PROGRESS 2 +#define NV_IOCTL_NUMA_STATUS_ONLINE 3 +#define NV_IOCTL_NUMA_STATUS_ONLINE_FAILED 4 +#define NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS 5 +#define NV_IOCTL_NUMA_STATUS_OFFLINE_FAILED 6 + +#endif + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numbers.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numbers.h new file mode 100644 index 0000000..cb0b6a2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numbers.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_NUMBERS_H +#define NV_IOCTL_NUMBERS_H + +/* NOTE: using an ioctl() number > 55 will overflow! */ +#define NV_IOCTL_MAGIC 'F' +#define NV_IOCTL_BASE 200 +#define NV_ESC_CARD_INFO (NV_IOCTL_BASE + 0) +#define NV_ESC_REGISTER_FD (NV_IOCTL_BASE + 1) +#define NV_ESC_ALLOC_OS_EVENT (NV_IOCTL_BASE + 6) +#define NV_ESC_FREE_OS_EVENT (NV_IOCTL_BASE + 7) +#define NV_ESC_STATUS_CODE (NV_IOCTL_BASE + 9) +#define NV_ESC_CHECK_VERSION_STR (NV_IOCTL_BASE + 10) +#define NV_ESC_IOCTL_XFER_CMD (NV_IOCTL_BASE + 11) +#define NV_ESC_ATTACH_GPUS_TO_FD (NV_IOCTL_BASE + 12) +#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13) +#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14) +#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17) + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl.h new file mode 100644 index 0000000..ffd1dee --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl.h @@ -0,0 +1,145 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_H +#define NV_IOCTL_H + +#include +#include + +typedef struct { + NvU32 domain; /* PCI domain number */ + NvU8 bus; /* PCI bus number */ + NvU8 slot; /* PCI slot number */ + NvU8 function; /* PCI function number */ + NvU16 vendor_id; /* PCI vendor ID */ + NvU16 device_id; /* PCI device ID */ +} nv_pci_info_t; + +/* + * ioctl()'s with parameter structures too large for the + * _IOC cmd layout use the nv_ioctl_xfer_t structure + * and the NV_ESC_IOCTL_XFER_CMD ioctl() to pass the actual + * size and user argument pointer into the RM, which + * will then copy it to/from kernel space in separate steps. + */ +typedef struct nv_ioctl_xfer +{ + NvU32 cmd; + NvU32 size; + NvP64 ptr NV_ALIGN_BYTES(8); +} nv_ioctl_xfer_t; + +typedef struct nv_ioctl_card_info +{ + NvBool valid; + nv_pci_info_t pci_info; /* PCI config information */ + NvU32 gpu_id; + NvU16 interrupt_line; + NvU64 reg_address NV_ALIGN_BYTES(8); + NvU64 reg_size NV_ALIGN_BYTES(8); + NvU64 fb_address NV_ALIGN_BYTES(8); + NvU64 fb_size NV_ALIGN_BYTES(8); + NvU32 minor_number; + NvU8 dev_name[10]; /* device names such as vmgfx[0-32] for vmkernel */ +} nv_ioctl_card_info_t; + +/* alloc event */ +typedef struct nv_ioctl_alloc_os_event +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 fd; + NvU32 Status; +} nv_ioctl_alloc_os_event_t; + +/* free event */ +typedef struct nv_ioctl_free_os_event +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 fd; + NvU32 Status; +} nv_ioctl_free_os_event_t; + +/* status code */ +typedef struct nv_ioctl_status_code +{ + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU32 status; +} nv_ioctl_status_code_t; + +/* check version string */ +#define NV_RM_API_VERSION_STRING_LENGTH 64 + +typedef struct nv_ioctl_rm_api_version +{ + NvU32 cmd; + NvU32 reply; + char versionString[NV_RM_API_VERSION_STRING_LENGTH]; +} nv_ioctl_rm_api_version_t; + +#define NV_RM_API_VERSION_CMD_STRICT 0 +#define NV_RM_API_VERSION_CMD_RELAXED '1' +#define NV_RM_API_VERSION_CMD_OVERRIDE '2' + +#define NV_RM_API_VERSION_REPLY_UNRECOGNIZED 0 +#define NV_RM_API_VERSION_REPLY_RECOGNIZED 1 + +typedef struct nv_ioctl_query_device_intr +{ + NvU32 intrStatus NV_ALIGN_BYTES(4); + NvU32 status; +} nv_ioctl_query_device_intr; + +/* system parameters that the kernel driver may use for configuration */ +typedef struct nv_ioctl_sys_params +{ + NvU64 memblock_size NV_ALIGN_BYTES(8); +} nv_ioctl_sys_params_t; + +typedef struct nv_ioctl_register_fd +{ + int ctl_fd; +} nv_ioctl_register_fd_t; + +#define NV_DMABUF_EXPORT_MAX_HANDLES 128 + +typedef struct nv_ioctl_export_to_dma_buf_fd +{ + int fd; + NvHandle hClient; + NvU32 totalObjects; + NvU32 numObjects; + NvU32 index; + NvU64 totalSize NV_ALIGN_BYTES(8); + NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES]; + NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8); + NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8); + NvU32 status; +} nv_ioctl_export_to_dma_buf_fd_t; + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kernel-interface-api.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kernel-interface-api.h new file mode 100644 index 0000000..183f9b4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kernel-interface-api.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_KERNEL_INTERFACE_API_H +#define _NV_KERNEL_INTERFACE_API_H +/************************************************************************************************************** +* +* File: nv-kernel-interface-api.h +* +* Description: +* Defines the NV API related macros. +* +**************************************************************************************************************/ + +#if NVOS_IS_UNIX && NVCPU_IS_X86_64 && defined(__use_altstack__) +#define NV_API_CALL __attribute__((altstack(0))) +#else +#define NV_API_CALL +#endif + +#endif /* _NV_KERNEL_INTERFACE_API_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kref.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kref.h new file mode 100644 index 0000000..7e28ce2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kref.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KREF_H__ +#define __NV_KREF_H__ + +#include + +typedef struct nv_kref +{ + atomic_t refcount; +} nv_kref_t; + +static inline void nv_kref_init(nv_kref_t *nv_kref) +{ + atomic_set(&nv_kref->refcount, 1); +} + +static inline void nv_kref_get(nv_kref_t *nv_kref) +{ + atomic_inc(&nv_kref->refcount); +} + +static inline int nv_kref_put(nv_kref_t *nv_kref, + void (*release)(nv_kref_t *nv_kref)) +{ + if (atomic_dec_and_test(&nv_kref->refcount)) + { + release(nv_kref); + return 1; + } + + return 0; +} + +static inline unsigned int nv_kref_read(const nv_kref_t *nv_kref) +{ + return atomic_read(&nv_kref->refcount); +} + +#endif // __NV_KREF_H__ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kthread-q.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kthread-q.h new file mode 100644 index 0000000..82a8a6b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kthread-q.h @@ -0,0 +1,255 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KTHREAD_QUEUE_H__ +#define __NV_KTHREAD_QUEUE_H__ + +#include // atomic_t +#include // list +#include // task_struct +#include // NUMA_NO_NODE + +#include "conftest.h" + +#if defined(NV_LINUX_SEMAPHORE_H_PRESENT) + #include +#else + #include +#endif + +//////////////////////////////////////////////////////////////////////////////// +// nv_kthread_q: +// +// 1. API and overview +// +// This "nv_kthread_q" system implements a simple queuing system for deferred +// work. The nv_kthread_q system has goals and use cases that are similar to +// the named workqueues in the Linux kernel, but nv_kthread_q is much (10x or +// so) smaller, simpler--and correspondingly less general. Deferred work +// items are put into a queue, and run within the context of a dedicated set +// of kernel threads (kthread). +// +// In order to avoid confusion with the Linux workqueue system, I have +// avoided using the term "work", and instead refer to "queues" (also called +// "q's") and "queue items" (also called "q_items"), in both variable names +// and comments. +// +// This module depends only upon the Linux kernel. +// +// Queue items that are submitted to separate nv_kthread_q instances are +// guaranteed to be run in different kthreads. +// +// Queue items that are submitted to the same nv_kthread_q are not guaranteed +// to be serialized, nor are they guaranteed to run concurrently. +// +// 2. Allocations +// +// The caller allocates queues and queue items. The nv_kthread_q APIs do +// the initialization (zeroing and setup) of queues and queue items. +// Allocation is handled that way, because one of the first use cases is a +// bottom half interrupt handler, and for that, queue items should be +// pre-allocated (for example, one per GPU), so that no allocation is +// required in the top-half interrupt handler. Relevant API calls: +// +// 3. Queue initialization +// +// nv_kthread_q_init() initializes a queue on the current NUMA node. +// +// or +// +// nv_kthread_q_init_on_node() initializes a queue on a specific NUMA node. +// +// 3. Scheduling things for the queue to run +// +// The nv_kthread_q_schedule_q_item() routine will schedule a q_item to run. +// +// 4. Stopping the queue(s) +// +// The nv_kthread_q_stop() routine will flush the queue, and safely stop +// the kthread, before returning. +// +//////////////////////////////////////////////////////////////////////////////// + +typedef struct nv_kthread_q nv_kthread_q_t; +typedef struct nv_kthread_q_item nv_kthread_q_item_t; + +typedef void (*nv_q_func_t)(void *args); + +struct nv_kthread_q +{ + struct list_head q_list_head; + spinlock_t q_lock; + + // This is a counting semaphore. It gets incremented and decremented + // exactly once for each item that is added to the queue. + struct semaphore q_sem; + atomic_t main_loop_should_exit; + + struct task_struct *q_kthread; +}; + +struct nv_kthread_q_item +{ + struct list_head q_list_node; + nv_q_func_t function_to_run; + void *function_args; +}; + +#if defined(NV_KTHREAD_CREATE_ON_NODE_PRESENT) + #define NV_KTHREAD_Q_SUPPORTS_AFFINITY() 1 +#else + #define NV_KTHREAD_Q_SUPPORTS_AFFINITY() 0 +#endif + +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE (-1) +#endif + +#define NV_KTHREAD_NO_NODE NUMA_NO_NODE + +// +// The queue must not be used before calling this routine. +// +// The caller allocates an nv_kthread_q_t item. This routine initializes +// the queue, and starts up a kernel thread ("kthread") to service the queue. +// The queue will initially be empty; there is intentionally no way to +// pre-initialize the queue with items to run. +// +// In order to avoid external dependencies (specifically, NV_STATUS codes), this +// returns a Linux kernel (negative) errno on failure, and zero on success. It +// is safe to call nv_kthread_q_stop() on a queue that nv_kthread_q_init() +// failed for. +// +// A short prefix of the qname arg will show up in []'s, via the ps(1) utility. +// +// The kernel thread stack is preferably allocated on the specified NUMA node if +// NUMA-affinity (NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1) is supported, but +// fallback to another node is possible because kernel allocators do not +// guarantee affinity. Note that NUMA-affinity applies only to +// the kthread stack. This API does not do anything about limiting the CPU +// affinity of the kthread. That is left to the caller. +// +// On kernels, which do not support NUMA-aware kthread stack allocations +// (NV_KTHTREAD_Q_SUPPORTS_AFFINITY() == 0), the API will return -ENOTSUPP +// if the value supplied for 'preferred_node' is anything other than +// NV_KTHREAD_NO_NODE. +// +// Reusing a queue: once a queue is initialized, it must be safely shut down +// (see "Stopping the queue(s)", below), before it can be reused. So, for +// a simple queue use case, the following will work: +// +// nv_kthread_q_init_on_node(&some_q, "display_name", preferred_node); +// nv_kthread_q_stop(&some_q); +// nv_kthread_q_init_on_node(&some_q, "reincarnated", preferred_node); +// nv_kthread_q_stop(&some_q); +// +int nv_kthread_q_init_on_node(nv_kthread_q_t *q, + const char *qname, + int preferred_node); + +// +// This routine is the same as nv_kthread_q_init_on_node() with the exception +// that the queue stack will be allocated on the NUMA node of the caller. +// +static inline int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname) +{ + return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE); +} + +// +// The caller is responsible for stopping all queues, by calling this routine +// before, for example, kernel module unloading. This nv_kthread_q_stop() +// routine will flush the queue, and safely stop the kthread, before returning. +// +// You may ONLY call nv_kthread_q_stop() once, unless you reinitialize the +// queue in between, as shown in the nv_kthread_q_init() documentation, above. +// +// Do not add any more items to the queue after calling nv_kthread_q_stop. +// +// Calling nv_kthread_q_stop() on a queue which has been zero-initialized or +// for which nv_kthread_q_init() failed, is a no-op. +// +void nv_kthread_q_stop(nv_kthread_q_t *q); + +// +// All items that were in the queue before nv_kthread_q_flush was called, and +// all items scheduled by those items, will get run before this function +// returns. +// +// You may NOT call nv_kthread_q_flush() after having called nv_kthread_q_stop. +// +// This actually flushes the queue twice. That ensures that the queue is fully +// flushed, for an important use case: rescheduling from within one's own +// callback. In order to do that safely, you need to: +// +// -- set a flag that tells the callback to stop rescheduling itself. +// +// -- call either nv_kthread_q_flush or nv_kthread_q_stop (which internally +// calls nv_kthread_q_flush). The nv_kthread_q_flush, in turn, actually +// flushes the queue *twice*. The first flush waits for any callbacks +// to finish, that missed seeing the "stop_rescheduling" flag. The +// second flush waits for callbacks that were already scheduled when the +// first flush finished. +// +void nv_kthread_q_flush(nv_kthread_q_t *q); + +// Assigns function_to_run and function_args to the q_item. +// +// This must be called before calling nv_kthread_q_schedule_q_item. +void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item, + nv_q_func_t function_to_run, + void *function_args); + +// +// The caller must have already set up the queue, via nv_kthread_q_init(). +// The caller owns the lifetime of the q_item. The nv_kthread_q system runs +// q_items, and adds or removes them from the queue. However, due to the first +// law of q-dynamics, it neither creates nor destroys q_items. +// +// When the callback (the function_to_run argument) is actually run, it is OK +// to free the q_item from within that routine. The nv_kthread_q system +// promises to be done with the q_item before that point. +// +// nv_kthread_q_schedule_q_item may be called from multiple threads at once, +// without danger of corrupting anything. This routine may also be safely +// called from interrupt context, including top-half ISRs. +// +// It is OK to reschedule the same q_item from within its own callback function. +// +// It is also OK to attempt to reschedule the same q_item, if that q_item is +// already pending in the queue. The q_item will not be rescheduled if it is +// already pending. +// +// Returns true (non-zero) if the item was actually scheduled. Returns false if +// the item was not scheduled, which can happen if: +// +// -- The q_item was already pending in a queue, or +// -- The queue is shutting down (or not yet started up). +// +int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q, + nv_kthread_q_item_t *q_item); + +// Built-in test. Returns -1 if any subtest failed, or 0 upon success. +int nv_kthread_q_run_self_test(void); + +#endif // __NV_KTHREAD_QUEUE_H__ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-linux.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-linux.h new file mode 100644 index 0000000..4ade615 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-linux.h @@ -0,0 +1,2058 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_LINUX_H_ +#define _NV_LINUX_H_ + +#include "nvstatus.h" +#include "nv.h" +#include "nv-ioctl-numa.h" +#include "conftest.h" + +#include "nv-lock.h" +#include "nv-pgprot.h" +#include "nv-mm.h" +#include "os-interface.h" +#include "nv-timer.h" +#include "nv-time.h" + +#define NV_KERNEL_NAME "Linux" + +#ifndef AUTOCONF_INCLUDED +#if defined(NV_GENERATED_AUTOCONF_H_PRESENT) +#include +#else +#include +#endif +#endif + +#if defined(NV_GENERATED_UTSRELEASE_H_PRESENT) + #include +#endif + +#if defined(NV_GENERATED_COMPILE_H_PRESENT) + #include +#endif + +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) +#error "This driver does not support kernels older than 2.6.32!" +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0) +# define KERNEL_2_6 +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) +# define KERNEL_3 +#else +#error "This driver does not support development kernels!" +#endif + +#if defined (CONFIG_SMP) && !defined (__SMP__) +#define __SMP__ +#endif + +#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS) +# define MODVERSIONS +#endif + +#include +#include +#include +#include + +#include + +#if !defined(VM_RESERVED) +#define VM_RESERVED 0x00000000 +#endif +#if !defined(VM_DONTEXPAND) +#define VM_DONTEXPAND 0x00000000 +#endif +#if !defined(VM_DONTDUMP) +#define VM_DONTDUMP 0x00000000 +#endif + +#include /* module_init, module_exit */ +#include /* pic_t, size_t, __u32, etc */ +#include /* error codes */ +#include /* circular linked list */ +#include /* NULL, offsetof */ +#include /* wait queues */ +#include /* strchr(), strpbrk() */ + +#include /* isspace(), etc */ +#include /* acquire_console_sem(), etc */ +#include /* cpufreq_get */ + +#include /* kmalloc, kfree, etc */ +#include /* vmalloc, vfree, etc */ + +#include /* poll_wait */ +#include /* mdelay, udelay */ + +#include /* suser(), capable() replacement */ + +#include /* get_random_bytes() */ + +#if defined(NV_LINUX_DMA_BUF_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_AVAILABLE) +#if defined(NV_DRM_DRM_DEVICE_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_DRV_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_GEM_H_PRESENT) +#include +#endif +#endif /* NV_DRM_AVAILABLE */ + +/* + * sched.h was refactored with this commit (as part of Linux 4.11) + * 2017-03-03 1827adb11ad26b2290dc9fe2aaf54976b2439865 + */ +#if defined(NV_LINUX_SCHED_SIGNAL_H_PRESENT) +#include /* task_lock(), task_unlock() */ +#endif + +#if defined(NV_LINUX_SCHED_TASK_H_PRESENT) +#include /* task_lock(), task_unlock() */ +#endif + +/* task and signal-related items, for kernels < 4.11: */ +#include /* task_lock(), task_unlock() */ + +#include /* module_param() */ +#include /* flush_tlb(), flush_tlb_all() */ + +#include /* pci_find_class, etc */ +#include /* tasklets, interrupt helpers */ +#include +#include /* fget(), fput() */ +#include +#include /* CPU hotplug support */ + +#include /* pm_runtime_* */ +#include /* files_fdtable, etc */ + +#include /* do_div() */ +#if defined(NV_ASM_SYSTEM_H_PRESENT) +#include /* cli, sli, save_flags */ +#endif +#include /* ioremap, virt_to_phys */ +#include /* access_ok */ +#include /* PAGE_OFFSET */ +#include /* pte bit definitions */ +#include /* __set_bit() */ + +#if defined(NV_LINUX_TIME_H_PRESENT) +#include /* FD_SET() */ +#endif + +#include "nv-list-helpers.h" + +/* + * Use current->cred->euid, instead of calling current_euid(). + * The latter can pull in the GPL-only debug_lockdep_rcu_enabled() + * symbol when CONFIG_PROVE_RCU. That is only used for debugging. + * + * The Linux kernel relies on the assumption that only the current process + * is permitted to change its cred structure. Therefore, current_euid() + * does not require the RCU's read lock on current->cred. + */ +#define NV_CURRENT_EUID() (__kuid_val(current->cred->euid)) + +#if !defined(NV_KUID_T_PRESENT) +static inline uid_t __kuid_val(uid_t uid) +{ + return uid; +} +#endif + +#if defined(CONFIG_VGA_ARB) +#include +#endif + +#include +#include + +#if defined(NV_LINUX_DMA_MAP_OPS_H_PRESENT) +#include +#endif + +#if defined(CONFIG_SWIOTLB) && defined(NVCPU_AARCH64) +#include +#endif + +#include +#include +#include + +#include + +#include /* workqueue */ +#include "nv-kthread-q.h" /* kthread based queue */ + +#if defined(NV_LINUX_EFI_H_PRESENT) +#include /* efi_enabled */ +#endif + +#include /* fb_info struct */ +#include /* screen_info */ + +#if !defined(CONFIG_PCI) +#warning "Attempting to build driver for a platform with no PCI support!" +#include +#endif + +#if defined(NV_EFI_ENABLED_PRESENT) && defined(NV_EFI_ENABLED_ARGUMENT_COUNT) +#if (NV_EFI_ENABLED_ARGUMENT_COUNT == 1) +#define NV_EFI_ENABLED() efi_enabled(EFI_BOOT) +#else +#error "NV_EFI_ENABLED_ARGUMENT_COUNT value unrecognized!" +#endif +#elif (defined(NV_EFI_ENABLED_PRESENT) || defined(efi_enabled)) +#define NV_EFI_ENABLED() efi_enabled +#else +#define NV_EFI_ENABLED() 0 +#endif + +#if defined(CONFIG_CRAY_XT) +#include +NV_STATUS nvos_forward_error_to_cray(struct pci_dev *, NvU32, + const char *, va_list); +#endif + +#if defined(NVCPU_PPC64LE) && defined(CONFIG_EEH) +#include +#define NV_PCI_ERROR_RECOVERY_ENABLED() eeh_enabled() +#define NV_PCI_ERROR_RECOVERY +#endif + +#if defined(NV_ASM_SET_MEMORY_H_PRESENT) +#include +#endif + +#if defined(NV_SET_MEMORY_UC_PRESENT) +#undef NV_SET_PAGES_UC_PRESENT +#endif + +#if !defined(NVCPU_AARCH64) && !defined(NVCPU_PPC64LE) +#if !defined(NV_SET_MEMORY_UC_PRESENT) && !defined(NV_SET_PAGES_UC_PRESENT) +#error "This driver requires the ability to change memory types!" +#endif +#endif + +/* + * Traditionally, CONFIG_XEN indicated that the target kernel was + * built exclusively for use under a Xen hypervisor, requiring + * modifications to or disabling of a variety of NVIDIA graphics + * driver code paths. As of the introduction of CONFIG_PARAVIRT + * and support for Xen hypervisors within the CONFIG_PARAVIRT_GUEST + * architecture, CONFIG_XEN merely indicates that the target + * kernel can run under a Xen hypervisor, but not that it will. + * + * If CONFIG_XEN and CONFIG_PARAVIRT are defined, the old Xen + * specific code paths are disabled. If the target kernel executes + * stand-alone, the NVIDIA graphics driver will work fine. If the + * kernels executes under a Xen (or other) hypervisor, however, the + * NVIDIA graphics driver has no way of knowing and is unlikely + * to work correctly. + */ +#if defined(CONFIG_XEN) && !defined(CONFIG_PARAVIRT) +#include +#include +#define NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL +#endif + +#ifdef CONFIG_KDB +#include +#include +#endif + +#if defined(CONFIG_X86_REMOTE_DEBUG) +#include +#endif + +#if defined(DEBUG) && defined(CONFIG_KGDB) && \ + defined(NVCPU_AARCH64) +#include +#endif + +#if defined(NVCPU_X86_64) && !defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL) +#define NV_ENABLE_PAT_SUPPORT +#endif + +#define NV_PAT_MODE_DISABLED 0 +#define NV_PAT_MODE_KERNEL 1 +#define NV_PAT_MODE_BUILTIN 2 + +extern int nv_pat_mode; + +#if defined(CONFIG_HOTPLUG_CPU) +#define NV_ENABLE_HOTPLUG_CPU +#include /* struct notifier_block, etc */ +#endif + +#if (defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)) +#include +#endif + +#if defined(CONFIG_ACPI) +#include +#define NV_LINUX_ACPI_EVENTS_SUPPORTED 1 +#endif + +#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED) +#define NV_ACPI_WALK_NAMESPACE(type, start_object, max_depth, \ + user_function, args...) \ + acpi_walk_namespace(type, start_object, max_depth, \ + user_function, NULL, args) +#endif + +#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL) +#define NV_CONFIG_PREEMPT_RT 1 +#endif + +#if defined(NV_WRITE_CR4_PRESENT) +#define NV_READ_CR4() read_cr4() +#define NV_WRITE_CR4(cr4) write_cr4(cr4) +#else +#define NV_READ_CR4() __read_cr4() +#define NV_WRITE_CR4(cr4) __write_cr4(cr4) +#endif + +#ifndef get_cpu +#define get_cpu() smp_processor_id() +#define put_cpu() +#endif + +#if !defined(unregister_hotcpu_notifier) +#define unregister_hotcpu_notifier unregister_cpu_notifier +#endif +#if !defined(register_hotcpu_notifier) +#define register_hotcpu_notifier register_cpu_notifier +#endif + +#if defined(NVCPU_X86_64) +#if !defined(pmd_large) +#define pmd_large(_pmd) \ + ((pmd_val(_pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) +#endif +#endif /* defined(NVCPU_X86_64) */ + +#define NV_PAGE_COUNT(page) \ + ((unsigned int)page_count(page)) +#define NV_GET_PAGE_COUNT(page_ptr) \ + (NV_PAGE_COUNT(NV_GET_PAGE_STRUCT(page_ptr->phys_addr))) +#define NV_GET_PAGE_FLAGS(page_ptr) \ + (NV_GET_PAGE_STRUCT(page_ptr->phys_addr)->flags) + +/* + * Before the introduction of VM_PFNMAP, there was an VM_UNPAGED flag. + * Drivers which wanted to call remap_pfn_range on normal pages had to use this + * VM_UNPAGED flag *and* set PageReserved. With the introduction of VM_PFNMAP, + * that restriction went away. This is described in commit + * + * 2005-10-28 6aab341e0a28aff100a09831c5300a2994b8b986 + * ("mm: re-architect the VM_UNPAGED logic") + * + * , which added VM_PFNMAP and vm_normal_page. Therefore, if VM_PFNMAP is + * defined, then we do *not* need to mark a page as reserved, in order to + * call remap_pfn_range(). + */ +#if !defined(VM_PFNMAP) +#define NV_MAYBE_RESERVE_PAGE(ptr_ptr) \ + SetPageReserved(NV_GET_PAGE_STRUCT(page_ptr->phys_addr)) +#define NV_MAYBE_UNRESERVE_PAGE(page_ptr) \ + ClearPageReserved(NV_GET_PAGE_STRUCT(page_ptr->phys_addr)) +#else +#define NV_MAYBE_RESERVE_PAGE(ptr_ptr) +#define NV_MAYBE_UNRESERVE_PAGE(page_ptr) +#endif /* defined(VM_PFNMAP) */ + +#if !defined(__GFP_COMP) +#define __GFP_COMP 0 +#endif + +#if !defined(DEBUG) && defined(__GFP_NOWARN) +#define NV_GFP_KERNEL (GFP_KERNEL | __GFP_NOWARN) +#define NV_GFP_ATOMIC (GFP_ATOMIC | __GFP_NOWARN) +#else +#define NV_GFP_KERNEL (GFP_KERNEL) +#define NV_GFP_ATOMIC (GFP_ATOMIC) +#endif + +#if defined(GFP_DMA32) +/* + * GFP_DMA32 is similar to GFP_DMA, but instructs the Linux zone + * allocator to allocate memory from the first 4GB on platforms + * such as Linux/x86-64; the alternative is to use an IOMMU such + * as the one implemented with the K8 GART, if available. + */ +#define NV_GFP_DMA32 (NV_GFP_KERNEL | GFP_DMA32) +#else +#define NV_GFP_DMA32 (NV_GFP_KERNEL) +#endif + +extern NvBool nvos_is_chipset_io_coherent(void); + +#if defined(NVCPU_X86_64) +#define CACHE_FLUSH() asm volatile("wbinvd":::"memory") +#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory") +#elif defined(NVCPU_AARCH64) + static inline void nv_flush_cache_cpu(void *info) + { + if (!nvos_is_chipset_io_coherent()) + { +#if defined(NV_FLUSH_CACHE_ALL_PRESENT) + flush_cache_all(); +#else + WARN_ONCE(0, "NVRM: kernel does not support flush_cache_all()\n"); +#endif + } + } +#define CACHE_FLUSH() nv_flush_cache_cpu(NULL) +#define CACHE_FLUSH_ALL() on_each_cpu(nv_flush_cache_cpu, NULL, 1) +#define WRITE_COMBINE_FLUSH() mb() +#elif defined(NVCPU_PPC64LE) +#define CACHE_FLUSH() asm volatile("sync; \n" \ + "isync; \n" ::: "memory") +#define WRITE_COMBINE_FLUSH() CACHE_FLUSH() +#endif + +typedef enum +{ + NV_MEMORY_TYPE_SYSTEM, /* Memory mapped for ROM, SBIOS and physical RAM. */ + NV_MEMORY_TYPE_REGISTERS, + NV_MEMORY_TYPE_FRAMEBUFFER, + NV_MEMORY_TYPE_DEVICE_MMIO, /* All kinds of MMIO referred by NVRM e.g. BARs and MCFG of device */ +} nv_memory_type_t; + +#if defined(NVCPU_AARCH64) || defined(NVCPU_PPC64LE) +#define NV_ALLOW_WRITE_COMBINING(mt) 1 +#elif defined(NVCPU_X86_64) +#if defined(NV_ENABLE_PAT_SUPPORT) +#define NV_ALLOW_WRITE_COMBINING(mt) \ + ((nv_pat_mode != NV_PAT_MODE_DISABLED) && \ + ((mt) != NV_MEMORY_TYPE_REGISTERS)) +#else +#define NV_ALLOW_WRITE_COMBINING(mt) 0 +#endif +#endif + +#if !defined(IRQF_SHARED) +#define IRQF_SHARED SA_SHIRQ +#endif + +#define NV_MAX_RECURRING_WARNING_MESSAGES 10 + +/* various memory tracking/debugging techniques + * disabled for retail builds, enabled for debug builds + */ + +// allow an easy way to convert all debug printfs related to memory +// management back and forth between 'info' and 'errors' +#if defined(NV_DBG_MEM) +#define NV_DBG_MEMINFO NV_DBG_ERRORS +#else +#define NV_DBG_MEMINFO NV_DBG_INFO +#endif + +#define NV_MEM_TRACKING_PAD_SIZE(size) \ + (size) = NV_ALIGN_UP((size + sizeof(void *)), sizeof(void *)) + +#define NV_MEM_TRACKING_HIDE_SIZE(ptr, size) \ + if ((ptr != NULL) && (*(ptr) != NULL)) \ + { \ + NvU8 *__ptr; \ + *(unsigned long *) *(ptr) = (size); \ + __ptr = *(ptr); __ptr += sizeof(void *); \ + *(ptr) = (void *) __ptr; \ + } +#define NV_MEM_TRACKING_RETRIEVE_SIZE(ptr, size) \ + { \ + NvU8 *__ptr = (ptr); __ptr -= sizeof(void *); \ + (ptr) = (void *) __ptr; \ + (size) = *(unsigned long *) (ptr); \ + } + +/* keep track of memory usage */ +#include "nv-memdbg.h" + +static inline void *nv_vmalloc(unsigned long size) +{ +#if defined(NV_VMALLOC_HAS_PGPROT_T_ARG) + void *ptr = __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); +#else + void *ptr = __vmalloc(size, GFP_KERNEL); +#endif + if (ptr) + NV_MEMDBG_ADD(ptr, size); + return ptr; +} + +static inline void nv_vfree(void *ptr, NvU32 size) +{ + NV_MEMDBG_REMOVE(ptr, size); + vfree(ptr); +} + +static inline void *nv_ioremap(NvU64 phys, NvU64 size) +{ + void *ptr = ioremap(phys, size); + if (ptr) + NV_MEMDBG_ADD(ptr, size); + return ptr; +} + +static inline void *nv_ioremap_nocache(NvU64 phys, NvU64 size) +{ + return nv_ioremap(phys, size); +} + +static inline void *nv_ioremap_cache(NvU64 phys, NvU64 size) +{ +#if defined(NV_IOREMAP_CACHE_PRESENT) + void *ptr = ioremap_cache(phys, size); + if (ptr) + NV_MEMDBG_ADD(ptr, size); + return ptr; +#elif defined(NVCPU_PPC64LE) + // + // ioremap_cache() has been only implemented correctly for ppc64le with + // commit f855b2f544d6 in April 2017 (kernel 4.12+). Internally, the kernel + // does provide a default implementation of ioremap_cache() that would be + // incorrect for our use (creating an uncached mapping) before the + // referenced commit, but that implementation is not exported and the + // NV_IOREMAP_CACHE_PRESENT conftest doesn't pick it up, and we end up in + // this #elif branch. + // + // At the same time, ppc64le have supported ioremap_prot() since May 2011 + // (commit 40f1ce7fb7e8, kernel 3.0+) and that covers all kernels we + // support on power. + // + void *ptr = ioremap_prot(phys, size, pgprot_val(PAGE_KERNEL)); + if (ptr) + NV_MEMDBG_ADD(ptr, size); + return ptr; +#else + return nv_ioremap(phys, size); +#endif +} + +static inline void *nv_ioremap_wc(NvU64 phys, NvU64 size) +{ +#if defined(NV_IOREMAP_WC_PRESENT) + void *ptr = ioremap_wc(phys, size); + if (ptr) + NV_MEMDBG_ADD(ptr, size); + return ptr; +#else + return nv_ioremap_nocache(phys, size); +#endif +} + +static inline void nv_iounmap(void *ptr, NvU64 size) +{ + NV_MEMDBG_REMOVE(ptr, size); + iounmap(ptr); +} + +static NvBool nv_numa_node_has_memory(int node_id) +{ + if (node_id < 0 || node_id >= MAX_NUMNODES) + return NV_FALSE; +#if defined(NV_NODE_STATES_N_MEMORY_PRESENT) + return node_state(node_id, N_MEMORY) ? NV_TRUE : NV_FALSE; +#else + return node_state(node_id, N_HIGH_MEMORY) ? NV_TRUE : NV_FALSE; +#endif +} + +#define NV_KMALLOC(ptr, size) \ + { \ + (ptr) = kmalloc(size, NV_GFP_KERNEL); \ + if (ptr) \ + NV_MEMDBG_ADD(ptr, size); \ + } + +#define NV_KMALLOC_ATOMIC(ptr, size) \ + { \ + (ptr) = kmalloc(size, NV_GFP_ATOMIC); \ + if (ptr) \ + NV_MEMDBG_ADD(ptr, size); \ + } + +#if defined(__GFP_RETRY_MAYFAIL) +#define NV_GFP_NO_OOM (NV_GFP_KERNEL | __GFP_RETRY_MAYFAIL) +#elif defined(__GFP_NORETRY) +#define NV_GFP_NO_OOM (NV_GFP_KERNEL | __GFP_NORETRY) +#else +#define NV_GFP_NO_OOM (NV_GFP_KERNEL) +#endif + +#define NV_KMALLOC_NO_OOM(ptr, size) \ + { \ + (ptr) = kmalloc(size, NV_GFP_NO_OOM); \ + if (ptr) \ + NV_MEMDBG_ADD(ptr, size); \ + } + +#define NV_KFREE(ptr, size) \ + { \ + NV_MEMDBG_REMOVE(ptr, size); \ + kfree((void *) (ptr)); \ + } + +#define NV_ALLOC_PAGES_NODE(ptr, nid, order, gfp_mask) \ + { \ + (ptr) = (unsigned long)page_address(alloc_pages_node(nid, gfp_mask, order)); \ + } + +#define NV_GET_FREE_PAGES(ptr, order, gfp_mask) \ + { \ + (ptr) = __get_free_pages(gfp_mask, order); \ + } + +#define NV_FREE_PAGES(ptr, order) \ + { \ + free_pages(ptr, order); \ + } + +#if defined(PAGE_KERNEL_NOENC) +#if defined(__pgprot_mask) +#define NV_PAGE_KERNEL_NOCACHE_NOENC __pgprot_mask(__PAGE_KERNEL_NOCACHE) +#elif defined(default_pgprot) +#define NV_PAGE_KERNEL_NOCACHE_NOENC default_pgprot(__PAGE_KERNEL_NOCACHE) +#elif defined( __pgprot) +#define NV_PAGE_KERNEL_NOCACHE_NOENC __pgprot(__PAGE_KERNEL_NOCACHE) +#else +#error "Unsupported kernel!!!" +#endif +#endif + +static inline NvUPtr nv_vmap(struct page **pages, NvU32 page_count, + NvBool cached, NvBool unencrypted) +{ + void *ptr; + pgprot_t prot = PAGE_KERNEL; +#if defined(NVCPU_X86_64) +#if defined(PAGE_KERNEL_NOENC) + if (unencrypted) + { + prot = cached ? PAGE_KERNEL_NOENC : NV_PAGE_KERNEL_NOCACHE_NOENC; + } + else +#endif + { + prot = cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE; + } +#elif defined(NVCPU_AARCH64) + prot = cached ? PAGE_KERNEL : NV_PGPROT_UNCACHED(PAGE_KERNEL); +#endif + /* All memory cached in PPC64LE; can't honor 'cached' input. */ + ptr = vmap(pages, page_count, VM_MAP, prot); + if (ptr) + NV_MEMDBG_ADD(ptr, page_count * PAGE_SIZE); + return (NvUPtr)ptr; +} + +static inline void nv_vunmap(NvUPtr vaddr, NvU32 page_count) +{ + vunmap((void *)vaddr); + NV_MEMDBG_REMOVE((void *)vaddr, page_count * PAGE_SIZE); +} + +#if defined(NV_GET_NUM_PHYSPAGES_PRESENT) +#define NV_NUM_PHYSPAGES get_num_physpages() +#else +#define NV_NUM_PHYSPAGES num_physpages +#endif +#define NV_GET_CURRENT_PROCESS() current->tgid +#define NV_IN_ATOMIC() in_atomic() +#define NV_LOCAL_BH_DISABLE() local_bh_disable() +#define NV_LOCAL_BH_ENABLE() local_bh_enable() +#define NV_COPY_TO_USER(to, from, n) copy_to_user(to, from, n) +#define NV_COPY_FROM_USER(to, from, n) copy_from_user(to, from, n) + +#define NV_IS_SUSER() capable(CAP_SYS_ADMIN) +#define NV_PCI_DEVICE_NAME(pci_dev) ((pci_dev)->pretty_name) +#define NV_CLI() local_irq_disable() +#define NV_SAVE_FLAGS(eflags) local_save_flags(eflags) +#define NV_RESTORE_FLAGS(eflags) local_irq_restore(eflags) +#define NV_MAY_SLEEP() (!irqs_disabled() && !in_interrupt() && !NV_IN_ATOMIC()) +#define NV_MODULE_PARAMETER(x) module_param(x, int, 0) +#define NV_MODULE_STRING_PARAMETER(x) module_param(x, charp, 0) +#undef MODULE_PARM + +#define NV_NUM_CPUS() num_possible_cpus() + +static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa) +{ +#if defined(NV_PHYS_TO_DMA_PRESENT) + return phys_to_dma(dev, pa); +#elif defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL) + return phys_to_machine(pa); +#else + return (dma_addr_t)pa; +#endif +} + +#define NV_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page)) +#define NV_VMA_PGOFF(vma) ((vma)->vm_pgoff) +#define NV_VMA_SIZE(vma) ((vma)->vm_end - (vma)->vm_start) +#define NV_VMA_OFFSET(vma) (((NvU64)(vma)->vm_pgoff) << PAGE_SHIFT) +#define NV_VMA_PRIVATE(vma) ((vma)->vm_private_data) +#define NV_VMA_FILE(vma) ((vma)->vm_file) + +#define NV_DEVICE_MINOR_NUMBER(x) minor((x)->i_rdev) +#define NV_CONTROL_DEVICE_MINOR 255 + +#define NV_PCI_DISABLE_DEVICE(pci_dev) \ + { \ + NvU16 __cmd[2]; \ + pci_read_config_word((pci_dev), PCI_COMMAND, &__cmd[0]); \ + pci_disable_device(pci_dev); \ + pci_read_config_word((pci_dev), PCI_COMMAND, &__cmd[1]); \ + __cmd[1] |= PCI_COMMAND_MEMORY; \ + pci_write_config_word((pci_dev), PCI_COMMAND, \ + (__cmd[1] | (__cmd[0] & PCI_COMMAND_IO))); \ + } + +#define NV_PCI_RESOURCE_START(pci_dev, bar) pci_resource_start(pci_dev, (bar)) +#define NV_PCI_RESOURCE_SIZE(pci_dev, bar) pci_resource_len(pci_dev, (bar)) +#define NV_PCI_RESOURCE_FLAGS(pci_dev, bar) pci_resource_flags(pci_dev, (bar)) + +#define NV_PCI_RESOURCE_VALID(pci_dev, bar) \ + ((NV_PCI_RESOURCE_START(pci_dev, bar) != 0) && \ + (NV_PCI_RESOURCE_SIZE(pci_dev, bar) != 0)) + +#define NV_PCI_DOMAIN_NUMBER(pci_dev) (NvU32)pci_domain_nr(pci_dev->bus) +#define NV_PCI_BUS_NUMBER(pci_dev) (pci_dev)->bus->number +#define NV_PCI_DEVFN(pci_dev) (pci_dev)->devfn +#define NV_PCI_SLOT_NUMBER(pci_dev) PCI_SLOT(NV_PCI_DEVFN(pci_dev)) + +#if defined(CONFIG_X86_UV) && defined(NV_CONFIG_X86_UV) +#define NV_GET_DOMAIN_BUS_AND_SLOT(domain,bus,devfn) \ + ({ \ + struct pci_dev *__dev = NULL; \ + while ((__dev = pci_get_device(PCI_VENDOR_ID_NVIDIA, \ + PCI_ANY_ID, __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + if (__dev == NULL) \ + { \ + while ((__dev = pci_get_class((PCI_CLASS_BRIDGE_HOST << 8), \ + __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + } \ + if (__dev == NULL) \ + { \ + while ((__dev = pci_get_class((PCI_CLASS_BRIDGE_PCI << 8), \ + __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + } \ + if (__dev == NULL) \ + { \ + while ((__dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, \ + __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + } \ + __dev; \ + }) +#elif defined(NV_PCI_GET_DOMAIN_BUS_AND_SLOT_PRESENT) +#define NV_GET_DOMAIN_BUS_AND_SLOT(domain,bus, devfn) \ + pci_get_domain_bus_and_slot(domain, bus, devfn) +#else +#define NV_GET_DOMAIN_BUS_AND_SLOT(domain,bus,devfn) \ + ({ \ + struct pci_dev *__dev = NULL; \ + while ((__dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, \ + __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + __dev; \ + }) +#endif + +#if defined(NV_PCI_STOP_AND_REMOVE_BUS_DEVICE_PRESENT) // introduced in 3.4.9 +#define NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(pci_dev) pci_stop_and_remove_bus_device(pci_dev) +#elif defined(NV_PCI_REMOVE_BUS_DEVICE_PRESENT) // introduced in 2.6 +#define NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(pci_dev) pci_remove_bus_device(pci_dev) +#endif + +#define NV_PRINT_AT(nv_debug_level,at) \ + { \ + nv_printf(nv_debug_level, \ + "NVRM: VM: %s:%d: 0x%p, %d page(s), count = %d, flags = 0x%08x, " \ + "page_table = 0x%p\n", __FUNCTION__, __LINE__, at, \ + at->num_pages, NV_ATOMIC_READ(at->usage_count), \ + at->flags, at->page_table); \ + } + +#define NV_PRINT_VMA(nv_debug_level,vma) \ + { \ + nv_printf(nv_debug_level, \ + "NVRM: VM: %s:%d: 0x%lx - 0x%lx, 0x%08x bytes @ 0x%016llx, 0x%p, 0x%p\n", \ + __FUNCTION__, __LINE__, vma->vm_start, vma->vm_end, NV_VMA_SIZE(vma), \ + NV_VMA_OFFSET(vma), NV_VMA_PRIVATE(vma), NV_VMA_FILE(vma)); \ + } + +#ifndef minor +# define minor(x) MINOR(x) +#endif + +#if defined(cpu_relax) +#define NV_CPU_RELAX() cpu_relax() +#else +#define NV_CPU_RELAX() barrier() +#endif + +#ifndef IRQ_RETVAL +typedef void irqreturn_t; +#define IRQ_RETVAL(a) +#endif + +#if !defined(PCI_COMMAND_SERR) +#define PCI_COMMAND_SERR 0x100 +#endif +#if !defined(PCI_COMMAND_INTX_DISABLE) +#define PCI_COMMAND_INTX_DISABLE 0x400 +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +/* + * On Linux on PPC64LE enable basic support for Linux PCI error recovery (see + * Documentation/PCI/pci-error-recovery.txt). Currently RM only supports error + * notification and data collection, not actual recovery of the device. + */ +#if defined(NVCPU_PPC64LE) && defined(CONFIG_EEH) +#include +#define NV_PCI_ERROR_RECOVERY +#endif + +/* + * If the host OS has page sizes larger than 4KB, we may have a security + * problem. Registers are typically grouped in 4KB pages, but if there are + * larger pages, then the smallest userspace mapping possible (e.g., a page) + * may give more access than intended to the user. + */ +#define NV_4K_PAGE_ISOLATION_REQUIRED(addr, size) \ + ((PAGE_SIZE > NV_RM_PAGE_SIZE) && \ + ((size) <= NV_RM_PAGE_SIZE) && \ + (((addr) >> NV_RM_PAGE_SHIFT) == \ + (((addr) + (size) - 1) >> NV_RM_PAGE_SHIFT))) + +/* + * The kernel may have a workaround for this, by providing a method to isolate + * a single 4K page in a given mapping. + */ +#if (PAGE_SIZE > NV_RM_PAGE_SIZE) && defined(NVCPU_PPC64LE) && defined(NV_PAGE_4K_PFN) + #define NV_4K_PAGE_ISOLATION_PRESENT + #define NV_4K_PAGE_ISOLATION_MMAP_ADDR(addr) \ + ((NvP64)((void*)(((addr) >> NV_RM_PAGE_SHIFT) << PAGE_SHIFT))) + #define NV_4K_PAGE_ISOLATION_MMAP_LEN(size) PAGE_SIZE + #define NV_4K_PAGE_ISOLATION_ACCESS_START(addr) \ + ((NvP64)((void*)((addr) & ~NV_RM_PAGE_MASK))) + #define NV_4K_PAGE_ISOLATION_ACCESS_LEN(addr, size) \ + ((((addr) & NV_RM_PAGE_MASK) + size + NV_RM_PAGE_MASK) & \ + ~NV_RM_PAGE_MASK) + #define NV_PROT_4K_PAGE_ISOLATION NV_PAGE_4K_PFN +#endif + +static inline int nv_remap_page_range(struct vm_area_struct *vma, + unsigned long virt_addr, NvU64 phys_addr, NvU64 size, pgprot_t prot) +{ + int ret = -1; + +#if defined(NV_4K_PAGE_ISOLATION_PRESENT) && defined(NV_PROT_4K_PAGE_ISOLATION) + if ((size == PAGE_SIZE) && + ((pgprot_val(prot) & NV_PROT_4K_PAGE_ISOLATION) != 0)) + { + /* + * remap_4k_pfn() hardcodes the length to a single OS page, and checks + * whether applying the page isolation workaround will cause PTE + * corruption (in which case it will fail, and this is an unsupported + * configuration). + */ +#if defined(NV_HASH__REMAP_4K_PFN_PRESENT) + ret = hash__remap_4k_pfn(vma, virt_addr, (phys_addr >> PAGE_SHIFT), prot); +#else + ret = remap_4k_pfn(vma, virt_addr, (phys_addr >> PAGE_SHIFT), prot); +#endif + } + else +#endif + { + ret = remap_pfn_range(vma, virt_addr, (phys_addr >> PAGE_SHIFT), size, + prot); + } + + return ret; +} + +static inline pgprot_t nv_adjust_pgprot(pgprot_t vm_prot, NvU32 extra) +{ + pgprot_t prot = __pgprot(pgprot_val(vm_prot) | extra); +#if defined(CONFIG_AMD_MEM_ENCRYPT) && defined(NV_PGPROT_DECRYPTED_PRESENT) + /* + * When AMD memory encryption is enabled, device memory mappings with the + * C-bit set read as 0xFF, so ensure the bit is cleared for user mappings. + * + * If cc_mkdec() is present, then pgprot_decrypted() can't be used. + */ +#if defined(NV_CC_MKDEC_PRESENT) + prot = __pgprot(__sme_clr(pgprot_val(vm_prot))); +#else + prot = pgprot_decrypted(prot); +#endif +#endif + + return prot; +} + +static inline int nv_io_remap_page_range(struct vm_area_struct *vma, + NvU64 phys_addr, NvU64 size, NvU32 extra_prot) +{ + int ret = -1; +#if !defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL) + ret = nv_remap_page_range(vma, vma->vm_start, phys_addr, size, + nv_adjust_pgprot(vma->vm_page_prot, extra_prot)); +#else + ret = io_remap_pfn_range(vma, vma->vm_start, (phys_addr >> PAGE_SHIFT), + size, nv_adjust_pgprot(vma->vm_page_prot, extra_prot)); +#endif + return ret; +} + +static inline vm_fault_t nv_insert_pfn(struct vm_area_struct *vma, + NvU64 virt_addr, NvU64 pfn, NvU32 extra_prot) +{ + /* + * vm_insert_pfn{,_prot} replaced with vmf_insert_pfn{,_prot} in Linux 4.20 + */ +#if defined(NV_VMF_INSERT_PFN_PROT_PRESENT) + return vmf_insert_pfn_prot(vma, virt_addr, pfn, + __pgprot(pgprot_val(vma->vm_page_prot) | extra_prot)); +#else + int ret = -EINVAL; + /* + * Only PPC64LE (NV_4K_PAGE_ISOLATION_PRESENT) requires extra_prot to be + * used when remapping. + * + * vm_insert_pfn_prot() was added in Linux 4.4, whereas POWER9 support + * was added in Linux 4.8. + * + * Rather than tampering with the vma to make use of extra_prot with + * vm_insert_pfn() on older kernels, for now, just fail in this case, as + * it's not expected to be used currently. + */ +#if defined(NV_VM_INSERT_PFN_PROT_PRESENT) + ret = vm_insert_pfn_prot(vma, virt_addr, pfn, + __pgprot(pgprot_val(vma->vm_page_prot) | extra_prot)); +#elif !defined(NV_4K_PAGE_ISOLATION_PRESENT) + ret = vm_insert_pfn(vma, virt_addr, pfn); +#endif + switch (ret) + { + case 0: + case -EBUSY: + /* + * EBUSY indicates that another thread already handled + * the faulted range. + */ + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + default: + break; + } +#endif /* defined(NV_VMF_INSERT_PFN_PROT_PRESENT) */ + return VM_FAULT_SIGBUS; +} + + +#define NV_PAGE_MASK (NvU64)(long)PAGE_MASK + +extern void *nvidia_stack_t_cache; + +/* + * On Linux, when a kmem cache is created, a new sysfs entry is created for the + * same unless it's merged with an existing cache. Upstream Linux kernel commit + * 3b7b314053d021601940c50b07f5f1423ae67e21 (version 4.12+) made cache + * destruction asynchronous which creates a race between cache destroy and + * create. A new cache created with attributes as a previous cache, which is + * scheduled for destruction, can try to create a sysfs entry with the same + * conflicting name. Upstream Linux kernel commit + * d50d82faa0c964e31f7a946ba8aba7c715ca7ab0 (4.18) fixes this issue by cleaning + * up sysfs entry within slab_mutex, so the entry is deleted before a cache with + * the same attributes could be created. + * + * To workaround this kernel issue, we take two steps: + * - Create unmergeable caches: a kmem_cache with a constructor is unmergeable. + * So, we define an empty contructor for the same. Creating an unmergeable + * cache ensures that the kernel doesn't generate an internal name and always + * uses our name instead. + * + * - Generate a unique cache name by appending the current timestamp (ns). We + * wait for the timestamp to increment by at least one to ensure that we do + * not hit a name conflict in cache create -> destroy (async) -> create cycle. + */ +#if defined(NV_KMEM_CACHE_HAS_KOBJ_REMOVE_WORK) && !defined(NV_SYSFS_SLAB_UNLINK_PRESENT) +static inline void nv_kmem_ctor_dummy(void *arg) +{ + (void)arg; +} +#else +#define nv_kmem_ctor_dummy NULL +#endif + +#define NV_KMEM_CACHE_CREATE(name, type) \ + nv_kmem_cache_create(name, sizeof(type), 0) + +/* The NULL pointer check is required for kernels older than 4.3 */ +#define NV_KMEM_CACHE_DESTROY(kmem_cache) \ + if (kmem_cache != NULL) \ + { \ + kmem_cache_destroy(kmem_cache); \ + } + +#define NV_KMEM_CACHE_ALLOC(kmem_cache) \ + kmem_cache_alloc(kmem_cache, GFP_KERNEL) +#define NV_KMEM_CACHE_FREE(ptr, kmem_cache) \ + kmem_cache_free(kmem_cache, ptr) + +static inline void *nv_kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) +{ +#if defined(NV_KMEM_CACHE_HAS_KOBJ_REMOVE_WORK) && !defined(NV_SYSFS_SLAB_UNLINK_PRESENT) + /* + * We cannot call kmem_cache_zalloc directly as it adds the __GFP_ZERO + * flag. This flag together with the presence of a slab constructor is + * flagged as a potential bug by the Linux kernel since it is the role + * of a constructor to fill an allocated object with the desired + * pattern. In our case, we specified a (dummy) constructor as a + * workaround for a bug and not to zero-initialize objects. So, we take + * the pain here to memset allocated object ourselves. + */ + void *object = kmem_cache_alloc(k, flags); + if (object) + memset(object, 0, kmem_cache_size(k)); + return object; +#else + return kmem_cache_zalloc(k, flags); +#endif +} + +static inline int nv_kmem_cache_alloc_stack(nvidia_stack_t **stack) +{ + nvidia_stack_t *sp = NULL; +#if defined(NVCPU_X86_64) + sp = NV_KMEM_CACHE_ALLOC(nvidia_stack_t_cache); + if (sp == NULL) + return -ENOMEM; + sp->size = sizeof(sp->stack); + sp->top = sp->stack + sp->size; +#endif + *stack = sp; + return 0; +} + +static inline void nv_kmem_cache_free_stack(nvidia_stack_t *stack) +{ +#if defined(NVCPU_X86_64) + if (stack != NULL) + { + NV_KMEM_CACHE_FREE(stack, nvidia_stack_t_cache); + } +#endif +} + +#if defined(NVCPU_X86_64) +/* + * RAM is cached on Linux by default, we can assume there's + * nothing to be done here. This is not the case for the + * other memory spaces: we will have made an attempt to add + * a WC MTRR for the frame buffer. + * + * If a WC MTRR is present, we can't satisfy the WB mapping + * attempt here, since the achievable effective memory + * types in that case are WC and UC, if not it's typically + * UC (MTRRdefType is UC); we could only satisfy WB mapping + * requests with a WB MTRR. + */ +#define NV_ALLOW_CACHING(mt) ((mt) == NV_MEMORY_TYPE_SYSTEM) +#else +#define NV_ALLOW_CACHING(mt) ((mt) != NV_MEMORY_TYPE_REGISTERS) +#endif + +typedef struct nvidia_pte_s { + NvU64 phys_addr; + unsigned long virt_addr; + NvU64 dma_addr; +#ifdef CONFIG_XEN + unsigned int guest_pfn; +#endif + unsigned int page_count; +} nvidia_pte_t; + + +/* Standard dma_buf-related information. */ +struct nv_dma_buf +{ + struct dma_buf *dma_buf; + struct dma_buf_attachment *dma_attach; + struct sg_table *sgt; +}; + + +typedef struct nv_alloc_s { + struct nv_alloc_s *next; + struct device *dev; + atomic_t usage_count; + struct { + NvBool contig : 1; + NvBool guest : 1; + NvBool zeroed : 1; + NvBool aliased : 1; + NvBool user : 1; + NvBool node0 : 1; + NvBool peer_io : 1; + NvBool physical : 1; + NvBool unencrypted : 1; + NvBool coherent : 1; + } flags; + unsigned int cache_type; + unsigned int num_pages; + unsigned int order; + unsigned int size; + nvidia_pte_t **page_table; /* list of physical pages allocated */ + unsigned int pid; + struct page **user_pages; + NvU64 guest_id; /* id of guest VM */ + void *import_priv; + struct sg_table *import_sgt; +} nv_alloc_t; + +/** + * nv_is_dma_direct - return true if direct_dma is enabled + * + * Starting with the 5.0 kernel, SWIOTLB is merged into + * direct_dma, so systems without an IOMMU use direct_dma. We + * need to know if this is the case, so that we can use a + * different check for SWIOTLB enablement. + */ +static inline NvBool nv_is_dma_direct(struct device *dev) +{ + NvBool is_direct = NV_FALSE; + +#if defined(NV_DMA_IS_DIRECT_PRESENT) + if (dma_is_direct(get_dma_ops(dev))) + is_direct = NV_TRUE; +#endif + + return is_direct; +} + +/** + * nv_dma_maps_swiotlb - return NV_TRUE if swiotlb is enabled + * + * SWIOTLB creates bounce buffers for the DMA mapping layer to + * use if a driver asks the kernel to map a DMA buffer that is + * outside of the device's addressable range. The driver does + * not function correctly if bounce buffers are enabled for the + * device. So if SWIOTLB is enabled, we should avoid making + * mapping calls. + */ +static inline NvBool +nv_dma_maps_swiotlb(struct device *dev) +{ + NvBool swiotlb_in_use = NV_FALSE; +#if defined(CONFIG_SWIOTLB) + #if defined(NV_DMA_OPS_PRESENT) || defined(NV_GET_DMA_OPS_PRESENT) || \ + defined(NV_SWIOTLB_DMA_OPS_PRESENT) + /* + * We only use the 'dma_ops' symbol on older x86_64 kernels; later kernels, + * including those for other architectures, have converged on the + * get_dma_ops() interface. + */ + #if defined(NV_GET_DMA_OPS_PRESENT) + /* + * The __attribute__ ((unused)) is necessary because in at least one + * case, *none* of the preprocessor branches below are taken, and + * so the ops variable ends up never being referred to at all. This can + * happen with the (NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs == 1) + * case. + */ + const struct dma_map_ops *ops __attribute__ ((unused)) = get_dma_ops(dev); + #else + const struct dma_mapping_ops *ops __attribute__ ((unused)) = dma_ops; + #endif + + /* + * The switch from dma_mapping_ops -> dma_map_ops coincided with the + * switch from swiotlb_map_sg -> swiotlb_map_sg_attrs. + */ + #if defined(NVCPU_AARCH64) && \ + defined(NV_NONCOHERENT_SWIOTLB_DMA_OPS_PRESENT) + /* AArch64 exports these symbols directly */ + swiotlb_in_use = ((ops == &noncoherent_swiotlb_dma_ops) || + (ops == &coherent_swiotlb_dma_ops)); + #elif NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs != 0 + swiotlb_in_use = (ops->map_sg == swiotlb_map_sg_attrs); + #elif NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_dma_ops != 0 + swiotlb_in_use = (ops == &swiotlb_dma_ops); + #endif + /* + * The "else" case that is not shown + * (for NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs == 0 || + * NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_dma_ops == 0) does + * nothing, and ends up dropping us out to the last line of this function, + * effectively returning false. The nearly-human-readable version of that + * case is "struct swiotlb_dma_ops is present (NV_SWIOTLB_DMA_OPS_PRESENT + * is defined) but neither swiotlb_map_sg_attrs nor swiotlb_dma_ops is + * present". + * + * That can happen on kernels that fall within below range: + * + * 2017-12-24 4bd89ed39b2ab8dc4ac4b6c59b07d420b0213bec + * ("swiotlb: remove various exports") + * 2018-06-28 210d0797c97d0e8f3b1a932a0dc143f4c57008a3 + * ("swiotlb: export swiotlb_dma_ops") + * + * Related to this: Between above two commits, this driver has no way of + * detecting whether or not the SWIOTLB is in use. Furthermore, the + * driver cannot support DMA remapping. That leads to the following + * point: "swiotlb=force" is not supported for kernels falling in above + * range. + * + * The other "else" case that is not shown: + * Starting with the 5.0 kernel, swiotlb is integrated into dma_direct, + * which is used when there's no IOMMU. In these kernels, ops == NULL, + * swiotlb_dma_ops no longer exists, and we do not support swiotlb=force + * (doing so would require detecting when swiotlb=force is enabled and + * then returning NV_TRUE even when dma_direct is in use). So for now, + * we just return NV_FALSE and in nv_compute_gfp_mask() we check for + * whether swiotlb could possibly be used (outside of swiotlb=force). + */ + #endif + + /* + * Commit 2017-11-07 d7b417fa08d ("x86/mm: Add DMA support for + * SEV memory encryption") forces SWIOTLB to be enabled when AMD SEV + * is active in all cases. + */ + if (os_sev_enabled) + swiotlb_in_use = NV_TRUE; +#endif + + return swiotlb_in_use; +} + +/* + * TODO: Bug 1522381 will allow us to move these mapping relationships into + * common code. + */ + +/* + * Bug 1606851: the Linux kernel scatterlist code doesn't work for regions + * greater than or equal to 4GB, due to regular use of unsigned int + * throughout. So we need to split our mappings into 4GB-minus-1-page-or-less + * chunks and manage them separately. + */ +typedef struct nv_dma_submap_s { + NvU32 page_count; + NvU32 sg_map_count; + struct sg_table sgt; + NvBool imported; +} nv_dma_submap_t; + +typedef struct nv_dma_map_s { + struct page **pages; + NvU64 page_count; + NvBool contiguous; + NvU32 cache_type; + struct sg_table *import_sgt; + + union + { + struct + { + NvU32 submap_count; + nv_dma_submap_t *submaps; + } discontig; + + struct + { + NvU64 dma_addr; + } contig; + } mapping; + + struct device *dev; +} nv_dma_map_t; + +#define NV_FOR_EACH_DMA_SUBMAP(dm, sm, i) \ + for (i = 0, sm = &dm->mapping.discontig.submaps[0]; \ + i < dm->mapping.discontig.submap_count; \ + i++, sm = &dm->mapping.discontig.submaps[i]) + +#define NV_DMA_SUBMAP_MAX_PAGES ((NvU32)(NV_U32_MAX >> PAGE_SHIFT)) +#define NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(s) (s * NV_DMA_SUBMAP_MAX_PAGES) + +/* + * DO NOT use sg_alloc_table_from_pages on Xen Server, even if it's available. + * This will glom multiple pages into a single sg element, which + * xen_swiotlb_map_sg_attrs may try to route to the SWIOTLB. We must only use + * single-page sg elements on Xen Server. + */ +#if defined(NV_SG_ALLOC_TABLE_FROM_PAGES_PRESENT) && \ + !defined(NV_DOM0_KERNEL_PRESENT) + #define NV_ALLOC_DMA_SUBMAP_SCATTERLIST(dm, sm, i) \ + ((sg_alloc_table_from_pages(&sm->sgt, \ + &dm->pages[NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(i)], \ + sm->page_count, 0, \ + sm->page_count * PAGE_SIZE, NV_GFP_KERNEL) == 0) ? NV_OK : \ + NV_ERR_OPERATING_SYSTEM) +#else + #define NV_ALLOC_DMA_SUBMAP_SCATTERLIST(dm, sm, i) \ + ((sg_alloc_table(&sm->sgt, sm->page_count, NV_GFP_KERNEL)) == \ + 0 ? NV_OK : NV_ERR_OPERATING_SYSTEM) +#endif + +typedef struct nv_ibmnpu_info nv_ibmnpu_info_t; + +typedef struct nv_work_s { + struct work_struct task; + void *data; +} nv_work_t; + +#define NV_MAX_REGISTRY_KEYS_LENGTH 512 + +typedef enum +{ + NV_DEV_STACK_TIMER, + NV_DEV_STACK_ISR, + NV_DEV_STACK_ISR_BH, + NV_DEV_STACK_ISR_BH_UNLOCKED, + NV_DEV_STACK_GPU_WAKEUP, + NV_DEV_STACK_COUNT +} nvidia_linux_dev_stack_t; + +/* Linux version of the opaque type used for os_queue_work_item() */ +struct os_work_queue { + nv_kthread_q_t nvk; +}; + +/* Linux version of the opaque type used for os_wait_*() */ +struct os_wait_queue { + struct completion q; +}; + + +#define MAX_CLIENTS_PER_ADAPTER 127 +#define MAX_TEGRA_I2C_PORTS 10 + +typedef struct nv_i2c_client_entry_s +{ + NvU32 port; + void *pOsClient[MAX_CLIENTS_PER_ADAPTER]; +} nv_i2c_client_entry_t; + +/*! + * @brief Mapping between clock names and clock handles. + * + * TEGRA_DISP_WHICH_CLK_MAX: maximum number of clocks + * defined in below enum. + * + * arch/nvalloc/unix/include/nv.h + * enum TEGRASOC_WHICH_CLK_MAX; + * + */ +typedef struct nvdisplay_clks_s { + struct { + struct clk *handles; + const char *clkName; + } clk[TEGRASOC_WHICH_CLK_MAX]; +} nvdisplay_clks_t; + + +/* + * To report error in msi/msix when unhandled count reaches a threshold + */ + +typedef struct nv_irq_count_info_s +{ + int irq; + NvU64 unhandled; + NvU64 total; + NvU64 last_unhandled; +} nv_irq_count_info_t; + +/* Linux-specific version of nv_dma_device_t */ +struct nv_dma_device { + struct { + NvU64 start; + NvU64 limit; + } addressable_range; + + struct device *dev; + NvBool nvlink; +}; + + + + + + + + + + + + + + +/* linux-specific version of old nv_state_t */ +/* this is a general os-specific state structure. the first element *must* be + the general state structure, for the generic unix-based code */ +typedef struct nv_linux_state_s { + nv_state_t nv_state; + + atomic_t usage_count; + NvU32 suspend_count; + + struct device *dev; + struct pci_dev *pci_dev; + + /* IBM-NPU info associated with this GPU */ + nv_ibmnpu_info_t *npu; + + + + + + + /* NUMA node information for the platforms where GPU memory is presented + * as a NUMA node to the kernel */ + struct { + /* NUMA node id >=0 when the platform supports GPU memory as NUMA node + * otherwise it holds the value of NUMA_NO_NODE */ + NvS32 node_id; + + /* NUMA online/offline status for platforms that support GPU memory as + * NUMA node */ + atomic_t status; + } numa_info; + + nvidia_stack_t *sp[NV_DEV_STACK_COUNT]; + + char registry_keys[NV_MAX_REGISTRY_KEYS_LENGTH]; + + nv_work_t work; + + /* get a timer callback every second */ + struct nv_timer rc_timer; + + /* lock for linux-specific data, not used by core rm */ + struct semaphore ldata_lock; + + /* proc directory information */ + struct proc_dir_entry *proc_dir; + + NvU32 minor_num; + struct nv_linux_state_s *next; + + /* DRM private information */ + struct drm_device *drm; + + /* kthread based bottom half servicing queue and elements */ + nv_kthread_q_t bottom_half_q; + nv_kthread_q_item_t bottom_half_q_item; + + /* Lock for unlocked bottom half protecting common allocated stack */ + void *isr_bh_unlocked_mutex; + + NvBool tce_bypass_enabled; + + NvU32 num_intr; + + /* Lock serializing ISRs for different MSI-X vectors */ + nv_spinlock_t msix_isr_lock; + + /* Lock serializing bottom halves for different MSI-X vectors */ + void *msix_bh_mutex; + + struct msix_entry *msix_entries; + + NvU64 numa_memblock_size; + + struct { + struct backlight_device *dev; + NvU32 displayId; + const char *device_name; + } backlight; + + /* + * file handle for pci sysfs config file (/sys/bus/pci/devices/.../config) + * which will be opened during device probe + */ + struct file *sysfs_config_file; + + /* Per-GPU queue */ + struct os_work_queue queue; + + /* GPU user mapping revocation/remapping (only for non-CTL device) */ + struct semaphore mmap_lock; /* Protects all fields in this category */ + struct list_head open_files; + NvBool all_mappings_revoked; + NvBool safe_to_mmap; + NvBool gpu_wakeup_callback_needed; + + /* Per-device notifier block for ACPI events */ + struct notifier_block acpi_nb; + + + nv_i2c_client_entry_t i2c_clients[MAX_TEGRA_I2C_PORTS]; + + nvdisplay_clks_t disp_clk_handles; + + struct reset_control *dpaux0_reset; + struct reset_control *nvdisplay_reset; + struct reset_control *dsi_core_reset; + struct reset_control *mipi_cal_reset; + + /* + * nv_imp_icc_path represents the interconnect path across which display + * data must travel. + */ + struct icc_path *nv_imp_icc_path; + + + /* Lock serializing ISRs for different SOC vectors */ + nv_spinlock_t soc_isr_lock; + + struct nv_timer snapshot_timer; + nv_spinlock_t snapshot_timer_lock; + void (*snapshot_callback)(void *context); + + /* count for unhandled, total and timestamp of irq */ + nv_irq_count_info_t *irq_count; + + /* Max number of irq triggered and are getting tracked */ + NvU16 current_num_irq_tracked; + + NvBool is_forced_shutdown; + + struct nv_dma_device dma_dev; + struct nv_dma_device niso_dma_dev; +} nv_linux_state_t; + +extern nv_linux_state_t *nv_linux_devices; + +/* + * Macros to protect operations on nv_linux_devices list + * Lock acquisition order while using the nv_linux_devices list + * 1. LOCK_NV_LINUX_DEVICES() + * 2. Traverse the list + * If the list is traversed to search for an element say nvl, + * acquire the nvl->ldata_lock before step 3 + * 3. UNLOCK_NV_LINUX_DEVICES() + * 4. Release nvl->ldata_lock after any read/write access to the + * nvl element is complete + */ +extern struct semaphore nv_linux_devices_lock; +#define LOCK_NV_LINUX_DEVICES() down(&nv_linux_devices_lock) +#define UNLOCK_NV_LINUX_DEVICES() up(&nv_linux_devices_lock) + +/* + * Lock to synchronize system power management transitions, + * and to protect the global system PM state. The procfs power + * management interface acquires this lock in write mode for + * the duration of the sleep operation, any other paths accessing + * device state must acquire the lock in read mode. + */ +extern struct rw_semaphore nv_system_pm_lock; + +extern NvBool nv_ats_supported; + +#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED) +/* + * acpi data storage structure + * + * This structure retains the pointer to the device, + * and any other baggage we want to carry along + * + */ +typedef struct +{ + nvidia_stack_t *sp; + struct acpi_device *device; + struct acpi_handle *handle; + int notify_handler_installed; +} nv_acpi_t; + +#endif + +/* + * file-private data + * hide a pointer to our data structures in a file-private ptr + * there are times we need to grab this data back from the file + * data structure.. + */ + +typedef struct nvidia_event +{ + struct nvidia_event *next; + nv_event_t event; +} nvidia_event_t; + +typedef enum +{ + NV_FOPS_STACK_INDEX_MMAP, + NV_FOPS_STACK_INDEX_IOCTL, + NV_FOPS_STACK_INDEX_COUNT +} nvidia_entry_point_index_t; + +typedef struct +{ + nv_file_private_t nvfp; + + nvidia_stack_t *sp; + nvidia_stack_t *fops_sp[NV_FOPS_STACK_INDEX_COUNT]; + struct semaphore fops_sp_lock[NV_FOPS_STACK_INDEX_COUNT]; + nv_alloc_t *free_list; + void *nvptr; + nvidia_event_t *event_data_head, *event_data_tail; + NvBool dataless_event_pending; + nv_spinlock_t fp_lock; + wait_queue_head_t waitqueue; + nv_kthread_q_item_t deferred_close_q_item; + NvU32 *attached_gpus; + size_t num_attached_gpus; + nv_alloc_mapping_context_t mmap_context; + struct address_space mapping; + + struct list_head entry; +} nv_linux_file_private_t; + +static inline nv_linux_file_private_t *nv_get_nvlfp_from_nvfp(nv_file_private_t *nvfp) +{ + return container_of(nvfp, nv_linux_file_private_t, nvfp); +} + +#define NV_SET_FILE_PRIVATE(filep,data) ((filep)->private_data = (data)) +#define NV_GET_LINUX_FILE_PRIVATE(filep) ((nv_linux_file_private_t *)(filep)->private_data) + +/* for the card devices */ +#define NV_GET_NVL_FROM_FILEP(filep) (NV_GET_LINUX_FILE_PRIVATE(filep)->nvptr) +#define NV_GET_NVL_FROM_NV_STATE(nv) ((nv_linux_state_t *)nv->os_state) + +#define NV_STATE_PTR(nvl) &(((nv_linux_state_t *)(nvl))->nv_state) + + +#define NV_ATOMIC_READ(data) atomic_read(&(data)) +#define NV_ATOMIC_SET(data,val) atomic_set(&(data), (val)) +#define NV_ATOMIC_INC(data) atomic_inc(&(data)) +#define NV_ATOMIC_DEC(data) atomic_dec(&(data)) +#define NV_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data)) + +static inline struct kmem_cache *nv_kmem_cache_create(const char *name, unsigned int size, + unsigned int align) +{ + char *name_unique; + struct kmem_cache *cache; + +#if defined(NV_KMEM_CACHE_HAS_KOBJ_REMOVE_WORK) && !defined(NV_SYSFS_SLAB_UNLINK_PRESENT) + size_t len; + NvU64 tm_ns = nv_ktime_get_raw_ns(); + + /* + * Wait for timer to change at least once. This ensures + * that the name generated below is always unique. + */ + while (tm_ns == nv_ktime_get_raw_ns()); + tm_ns = nv_ktime_get_raw_ns(); + + /* 20 is the max length of a 64-bit integer printed in decimal */ + len = strlen(name) + 20 + 1; + name_unique = kzalloc(len, GFP_KERNEL); + if (!name_unique) + return NULL; + + if (snprintf(name_unique, len, "%s-%llu", name, tm_ns) >= len) + { + WARN(1, "kmem cache name too long: %s\n", name); + kfree(name_unique); + return NULL; + } +#else + name_unique = (char *)name; +#endif + cache = kmem_cache_create(name_unique, size, align, 0, nv_kmem_ctor_dummy); + if (name_unique != name) + kfree(name_unique); + + return cache; +} + + +#if defined(CONFIG_PCI_IOV) +#define NV_PCI_SRIOV_SUPPORT +#endif /* CONFIG_PCI_IOV */ + + +#define NV_PCIE_CFG_MAX_OFFSET 0x1000 + +#include "nv-proto.h" + +/* + * Check if GPU is present on the bus by checking flag + * NV_FLAG_IN_SURPRISE_REMOVAL(set when eGPU is removed from TB3). + */ +static inline NV_STATUS nv_check_gpu_state(nv_state_t *nv) +{ +#if !defined(NVCPU_PPC64LE) + if (NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv)) + { + return NV_ERR_GPU_IS_LOST; + } +#endif + + return NV_OK; +} + +extern NvU32 NVreg_EnableUserNUMAManagement; +extern NvU32 NVreg_RegisterPCIDriver; + +extern NvU32 num_probed_nv_devices; +extern NvU32 num_nv_devices; + +#define NV_FILE_INODE(file) (file)->f_inode + +#if defined(NV_DOM0_KERNEL_PRESENT) || defined(NV_VGPU_KVM_BUILD) +#define NV_VGX_HYPER +#if defined(NV_XEN_IOEMU_INJECT_MSI) +#include +#endif +#endif + +static inline NvU64 nv_pci_bus_address(struct pci_dev *dev, NvU8 bar_index) +{ + NvU64 bus_addr = 0; +#if defined(NV_PCI_BUS_ADDRESS_PRESENT) + bus_addr = pci_bus_address(dev, bar_index); +#elif defined(CONFIG_PCI) + struct pci_bus_region region; + + pcibios_resource_to_bus(dev, ®ion, &dev->resource[bar_index]); + bus_addr = region.start; +#endif + return bus_addr; +} + +/* + * Decrements the usage count of the allocation, and moves the allocation to + * the given nvlfp's free list if the usage count drops to zero. + * + * Returns NV_TRUE if the allocation is moved to the nvlfp's free list. + */ +static inline NvBool nv_alloc_release(nv_linux_file_private_t *nvlfp, nv_alloc_t *at) +{ + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + if (NV_ATOMIC_DEC_AND_TEST(at->usage_count)) + { + NV_ATOMIC_INC(at->usage_count); + + at->next = nvlfp->free_list; + nvlfp->free_list = at; + return NV_TRUE; + } + + return NV_FALSE; +} + +/* + * RB_EMPTY_ROOT was added in 2.6.18 by this commit: + * 2006-06-21 dd67d051529387f6e44d22d1d5540ef281965fdd + */ +#if !defined(RB_EMPTY_ROOT) +#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) +#endif + +/* + * Starting on Power9 systems, DMA addresses for NVLink are no longer + * the same as used over PCIe. + * + * Power9 supports a 56-bit Real Address. This address range is compressed + * when accessed over NVLink to allow the GPU to access all of memory using + * its 47-bit Physical address. + * + * If there is an NPU device present on the system, it implies that NVLink + * sysmem links are present and we need to apply the required address + * conversion for NVLink within the driver. + * + * See Bug 1920398 for further background and details. + * + * Note, a deviation from the documented compression scheme is that the + * upper address bits (i.e. bit 56-63) instead of being set to zero are + * preserved during NVLink address compression so the orignal PCIe DMA + * address can be reconstructed on expansion. These bits can be safely + * ignored on NVLink since they are truncated by the GPU. + * + * Bug 1968345: As a performance enhancement it is the responsibility of + * the caller on PowerPC platforms to check for presence of an NPU device + * before the address transformation is applied. + */ +static inline NvU64 nv_compress_nvlink_addr(NvU64 addr) +{ + NvU64 addr47 = addr; + +#if defined(NVCPU_PPC64LE) + addr47 = addr & ((1ULL << 43) - 1); + addr47 |= (addr & (0x3ULL << 45)) >> 2; + WARN_ON(addr47 & (1ULL << 44)); + addr47 |= (addr & (0x3ULL << 49)) >> 4; + addr47 |= addr & ~((1ULL << 56) - 1); +#endif + + return addr47; +} + +static inline NvU64 nv_expand_nvlink_addr(NvU64 addr47) +{ + NvU64 addr = addr47; + +#if defined(NVCPU_PPC64LE) + addr = addr47 & ((1ULL << 43) - 1); + addr |= (addr47 & (3ULL << 43)) << 2; + addr |= (addr47 & (3ULL << 45)) << 4; + addr |= addr47 & ~((1ULL << 56) - 1); +#endif + + return addr; +} + +// Default flags for ISRs +static inline NvU32 nv_default_irq_flags(nv_state_t *nv) +{ + NvU32 flags = 0; + + /* + * Request IRQs to be disabled in our ISRs to keep consistency across the + * supported kernel versions. + * + * IRQF_DISABLED has been made the default in 2.6.35 with commit e58aa3d2d0cc + * from March 2010. And it has been later completely removed in 4.1 with commit + * d8bf368d0631 from March 2015. Add it to our flags if it's defined to get the + * same behaviour on pre-2.6.35 kernels as on recent ones. + */ +#if defined(IRQF_DISABLED) + flags |= IRQF_DISABLED; +#endif + + /* + * For legacy interrupts, also allow sharing. Sharing doesn't make sense + * for MSI(-X) as on Linux they are never shared across different devices + * and we only register one ISR today. + */ + if ((nv->flags & (NV_FLAG_USES_MSI | NV_FLAG_USES_MSIX)) == 0) + flags |= IRQF_SHARED; + + return flags; +} + +/* + * From v3.7-rc1 kernel have stopped exporting get_unused_fd() and started + * exporting get_unused_fd_flags(), as of this commit: + * 2012-09-26 1a7bd2265fc ("make get_unused_fd_flags() a function") + */ +#if NV_IS_EXPORT_SYMBOL_PRESENT_get_unused_fd + #define NV_GET_UNUSED_FD() get_unused_fd() +#else + #define NV_GET_UNUSED_FD() get_unused_fd_flags(0) +#endif + +#if NV_IS_EXPORT_SYMBOL_PRESENT_get_unused_fd_flags + #define NV_GET_UNUSED_FD_FLAGS(flags) get_unused_fd_flags(flags) +#else + #define NV_GET_UNUSED_FD_FLAGS(flags) (-1) +#endif + +#if defined(NV_SET_CLOSE_ON_EXEC_PRESENT) + #define NV_SET_CLOSE_ON_EXEC(fd, fdt) __set_close_on_exec(fd, fdt) +#elif defined(NV_LINUX_TIME_H_PRESENT) && defined(FD_SET) + #define NV_SET_CLOSE_ON_EXEC(fd, fdt) FD_SET(fd, fdt->close_on_exec) +#else + #define NV_SET_CLOSE_ON_EXEC(fd, fdt) __set_bit(fd, fdt->close_on_exec) +#endif + +#define MODULE_BASE_NAME "nvidia" +#define MODULE_INSTANCE_NUMBER 0 +#define MODULE_INSTANCE_STRING "" +#define MODULE_NAME MODULE_BASE_NAME MODULE_INSTANCE_STRING + +NvS32 nv_request_soc_irq(nv_linux_state_t *, NvU32, nv_soc_irq_type_t, NvU32, NvU32); + +NV_STATUS nv_imp_get_bpmp_data(nv_linux_state_t *nvl); +NV_STATUS nv_imp_icc_get(nv_state_t *nv); +void nv_imp_icc_put(nv_state_t *nv); + + +static inline void nv_mutex_destroy(struct mutex *lock) +{ + mutex_destroy(lock); +} + +static inline NvBool nv_platform_supports_numa(nv_linux_state_t *nvl) +{ + return nvl->numa_info.node_id != NUMA_NO_NODE; +} + +static inline int nv_get_numa_status(nv_linux_state_t *nvl) +{ + if (!nv_platform_supports_numa(nvl)) + { + return NV_IOCTL_NUMA_STATUS_DISABLED; + } + + return NV_ATOMIC_READ(nvl->numa_info.status); +} + +static inline int nv_set_numa_status(nv_linux_state_t *nvl, int status) +{ + if (!nv_platform_supports_numa(nvl)) + { + return -EINVAL; + } + + NV_ATOMIC_SET(nvl->numa_info.status, status); + return 0; +} + +typedef enum +{ + NV_NUMA_STATUS_DISABLED = 0, + NV_NUMA_STATUS_OFFLINE = 1, + NV_NUMA_STATUS_ONLINE_IN_PROGRESS = 2, + NV_NUMA_STATUS_ONLINE = 3, + NV_NUMA_STATUS_ONLINE_FAILED = 4, + NV_NUMA_STATUS_OFFLINE_IN_PROGRESS = 5, + NV_NUMA_STATUS_OFFLINE_FAILED = 6, + NV_NUMA_STATUS_COUNT +} nv_numa_status_t; + +#if defined(NV_LINUX_PLATFORM_DEVICE_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_MUTEX_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_RESET_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_DMA_BUF_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_GPIO_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_OF_GPIO_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_OF_DEVICE_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_OF_PLATFORM_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_INTERCONNECT_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_PM_RUNTIME_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_CLK_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_CLK_PROVIDER_H_PRESENT) +#include +#endif + +#endif /* _NV_LINUX_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-list-helpers.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-list-helpers.h new file mode 100644 index 0000000..a241d35 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-list-helpers.h @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_LIST_HELPERS_H__ +#define __NV_LIST_HELPERS_H__ + +#include +#include "conftest.h" + +/* + * list_first_entry_or_null added by commit 6d7581e62f8b ("list: introduce + * list_first_entry_or_null") in v3.10 (2013-05-29). + */ +#if !defined(list_first_entry_or_null) + #define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +/* + * Added by commit 93be3c2eb337 ("list: introduce list_last_entry(), use + * list_{first,last}_entry()") in v3.13 (2013-11-12). + */ +#if !defined(list_last_entry) + #define list_last_entry(ptr, type, member) \ + list_entry((ptr)->prev, type, member) +#endif + +/* list_last_entry_or_null() doesn't actually exist in the kernel */ +#if !defined(list_last_entry_or_null) + #define list_last_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_last_entry(ptr, type, member) : NULL) +#endif + +/* + * list_prev_entry() and list_next_entry added by commit 008208c6b26f + * ("list: introduce list_next_entry() and list_prev_entry()") in + * v3.13 (2013-11-12). + */ +#if !defined(list_prev_entry) + #define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if !defined(list_next_entry) + #define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif + +#if !defined(NV_LIST_IS_FIRST_PRESENT) + static inline int list_is_first(const struct list_head *list, + const struct list_head *head) + { + return list->prev == head; + } +#endif + +#if defined(NV_HLIST_FOR_EACH_ENTRY_ARGUMENT_COUNT) +#if NV_HLIST_FOR_EACH_ENTRY_ARGUMENT_COUNT == 3 +#define nv_hlist_for_each_entry(pos, head, member) \ + hlist_for_each_entry(pos, head, member) +#else +#if !defined(hlist_entry_safe) +#define hlist_entry_safe(ptr, type, member) \ + (ptr) ? hlist_entry(ptr, type, member) : NULL +#endif + +#define nv_hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) +#endif +#endif // NV_HLIST_FOR_EACH_ENTRY_ARGUMENT_COUNT + +#endif // __NV_LIST_HELPERS_H__ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-lock.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-lock.h new file mode 100644 index 0000000..34f593d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-lock.h @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_LOCK_H_ +#define _NV_LOCK_H_ + +#include "conftest.h" + +#include +#include +#include /* signal_pending, cond_resched */ + +#if defined(NV_LINUX_SCHED_SIGNAL_H_PRESENT) +#include /* signal_pending for kernels >= 4.11 */ +#endif + +#if defined(NV_LINUX_SEMAPHORE_H_PRESENT) +#include +#else +#include +#endif + +#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL) +typedef raw_spinlock_t nv_spinlock_t; +#define NV_SPIN_LOCK_INIT(lock) raw_spin_lock_init(lock) +#define NV_SPIN_LOCK_IRQ(lock) raw_spin_lock_irq(lock) +#define NV_SPIN_UNLOCK_IRQ(lock) raw_spin_unlock_irq(lock) +#define NV_SPIN_LOCK_IRQSAVE(lock,flags) raw_spin_lock_irqsave(lock,flags) +#define NV_SPIN_UNLOCK_IRQRESTORE(lock,flags) raw_spin_unlock_irqrestore(lock,flags) +#define NV_SPIN_LOCK(lock) raw_spin_lock(lock) +#define NV_SPIN_UNLOCK(lock) raw_spin_unlock(lock) +#define NV_SPIN_UNLOCK_WAIT(lock) raw_spin_unlock_wait(lock) +#else +typedef spinlock_t nv_spinlock_t; +#define NV_SPIN_LOCK_INIT(lock) spin_lock_init(lock) +#define NV_SPIN_LOCK_IRQ(lock) spin_lock_irq(lock) +#define NV_SPIN_UNLOCK_IRQ(lock) spin_unlock_irq(lock) +#define NV_SPIN_LOCK_IRQSAVE(lock,flags) spin_lock_irqsave(lock,flags) +#define NV_SPIN_UNLOCK_IRQRESTORE(lock,flags) spin_unlock_irqrestore(lock,flags) +#define NV_SPIN_LOCK(lock) spin_lock(lock) +#define NV_SPIN_UNLOCK(lock) spin_unlock(lock) +#define NV_SPIN_UNLOCK_WAIT(lock) spin_unlock_wait(lock) +#endif + +#if defined(NV_CONFIG_PREEMPT_RT) +#define NV_INIT_SEMA(sema, val) sema_init(sema,val) +#else +#if !defined(__SEMAPHORE_INITIALIZER) && defined(__COMPAT_SEMAPHORE_INITIALIZER) +#define __SEMAPHORE_INITIALIZER __COMPAT_SEMAPHORE_INITIALIZER +#endif +#define NV_INIT_SEMA(sema, val) \ + { \ + struct semaphore __sema = \ + __SEMAPHORE_INITIALIZER(*(sema), val); \ + *(sema) = __sema; \ + } +#endif +#define NV_INIT_MUTEX(mutex) NV_INIT_SEMA(mutex, 1) + +static inline int nv_down_read_interruptible(struct rw_semaphore *lock) +{ + while (!down_read_trylock(lock)) + { + if (signal_pending(current)) + return -EINTR; + cond_resched(); + } + return 0; +} + + +#endif /* _NV_LOCK_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-memdbg.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-memdbg.h new file mode 100644 index 0000000..a749571 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-memdbg.h @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVMEMDBG_H_ +#define _NVMEMDBG_H_ + +#include + +void nv_memdbg_init(void); +void nv_memdbg_add(void *addr, NvU64 size, const char *file, int line); +void nv_memdbg_remove(void *addr, NvU64 size, const char *file, int line); +void nv_memdbg_exit(void); + +#if defined(NV_MEM_LOGGER) + +#define NV_MEMDBG_ADD(ptr, size) \ + nv_memdbg_add(ptr, size, __FILE__, __LINE__) + +#define NV_MEMDBG_REMOVE(ptr, size) \ + nv_memdbg_remove(ptr, size, __FILE__, __LINE__) + +#else + +#define NV_MEMDBG_ADD(ptr, size) +#define NV_MEMDBG_REMOVE(ptr, size) + +#endif /* NV_MEM_LOGGER */ + +#endif /* _NVMEMDBG_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-mm.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-mm.h new file mode 100644 index 0000000..44b2bdc --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-mm.h @@ -0,0 +1,264 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_MM_H__ +#define __NV_MM_H__ + +#include "conftest.h" + +#if !defined(NV_VM_FAULT_T_IS_PRESENT) +typedef int vm_fault_t; +#endif + +/* pin_user_pages + * Presence of pin_user_pages() also implies the presence of unpin-user_page(). + * Both were added in the v5.6-rc1 + * + * pin_user_pages() was added by commit eddb1c228f7951d399240 + * ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6-rc1 (2020-01-30) + * + */ + +#include +#include +#if defined(NV_PIN_USER_PAGES_PRESENT) + #define NV_PIN_USER_PAGES pin_user_pages + #define NV_UNPIN_USER_PAGE unpin_user_page +#else + #define NV_PIN_USER_PAGES NV_GET_USER_PAGES + #define NV_UNPIN_USER_PAGE put_page +#endif // NV_PIN_USER_PAGES_PRESENT + +/* get_user_pages + * + * The 8-argument version of get_user_pages was deprecated by commit + * (2016 Feb 12: cde70140fed8429acf7a14e2e2cbd3e329036653)for the non-remote case + * (calling get_user_pages with current and current->mm). + * + * Completely moved to the 6 argument version of get_user_pages - + * 2016 Apr 4: c12d2da56d0e07d230968ee2305aaa86b93a6832 + * + * write and force parameters were replaced with gup_flags by - + * 2016 Oct 12: 768ae309a96103ed02eb1e111e838c87854d8b51 + * + * A 7-argument version of get_user_pages was introduced into linux-4.4.y by + * commit 8e50b8b07f462ab4b91bc1491b1c91bd75e4ad40 which cherry-picked the + * replacement of the write and force parameters with gup_flags + * + */ + +#if defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS) + #define NV_GET_USER_PAGES get_user_pages +#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS) + #define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \ + get_user_pages(current, current->mm, start, nr_pages, flags, pages, vmas) +#else + static inline long NV_GET_USER_PAGES(unsigned long start, + unsigned long nr_pages, + unsigned int flags, + struct page **pages, + struct vm_area_struct **vmas) + { + int write = flags & FOLL_WRITE; + int force = flags & FOLL_FORCE; + + #if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE) + return get_user_pages(start, nr_pages, write, force, pages, vmas); + #else + // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE + return get_user_pages(current, current->mm, start, nr_pages, write, + force, pages, vmas); + #endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE + } +#endif // NV_GET_USER_PAGES_HAS_ARGS_FLAGS + +/* pin_user_pages_remote + * + * pin_user_pages_remote() was added by commit eddb1c228f7951d399240 + * ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6 (2020-01-30) + * + * pin_user_pages_remote() removed 'tsk' parameter by commit + * 64019a2e467a ("mm/gup: remove task_struct pointer for all gup code") + * in v5.9-rc1 (2020-08-11). * + * + */ + +#if defined(NV_PIN_USER_PAGES_REMOTE_PRESENT) + #if defined (NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK) + #define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \ + pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked) + #else + #define NV_PIN_USER_PAGES_REMOTE pin_user_pages_remote + #endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK +#else + #define NV_PIN_USER_PAGES_REMOTE NV_GET_USER_PAGES_REMOTE +#endif // NV_PIN_USER_PAGES_REMOTE_PRESENT + +/* + * get_user_pages_remote() was added by commit 1e9877902dc7 + * ("mm/gup: Introduce get_user_pages_remote()") in v4.6 (2016-02-12). + * + * Note that get_user_pages_remote() requires the caller to hold a reference on + * the task_struct (if non-NULL and if this API has tsk argument) and the mm_struct. + * This will always be true when using current and current->mm. If the kernel passes + * the driver a vma via driver callback, the kernel holds a reference on vma->vm_mm + * over that callback. + * + * get_user_pages_remote() write/force parameters were replaced + * with gup_flags by commit 9beae1ea8930 ("mm: replace get_user_pages_remote() + * write/force parameters with gup_flags") in v4.9 (2016-10-13). + * + * get_user_pages_remote() added 'locked' parameter by commit 5b56d49fc31d + * ("mm: add locked parameter to get_user_pages_remote()") in + * v4.10 (2016-12-14). + * + * get_user_pages_remote() removed 'tsk' parameter by + * commit 64019a2e467a ("mm/gup: remove task_struct pointer for + * all gup code") in v5.9-rc1 (2020-08-11). + * + */ + +#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT) + #if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED) + #define NV_GET_USER_PAGES_REMOTE get_user_pages_remote + + #elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED) + #define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \ + get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked) + + #elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS) + #define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \ + get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas) + + #else + // NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE + static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int flags, + struct page **pages, + struct vm_area_struct **vmas, + int *locked) + { + int write = flags & FOLL_WRITE; + int force = flags & FOLL_FORCE; + + return get_user_pages_remote(NULL, mm, start, nr_pages, write, force, + pages, vmas); + } + #endif // NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED +#else + #if defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE) + static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int flags, + struct page **pages, + struct vm_area_struct **vmas, + int *locked) + { + int write = flags & FOLL_WRITE; + int force = flags & FOLL_FORCE; + + return get_user_pages(NULL, mm, start, nr_pages, write, force, pages, vmas); + } + + #else + #define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \ + get_user_pages(NULL, mm, start, nr_pages, flags, pages, vmas) + #endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE +#endif // NV_GET_USER_PAGES_REMOTE_PRESENT + +/* + * The .virtual_address field was effectively renamed to .address, by these + * two commits: + * + * struct vm_fault: .address was added by: + * 2016-12-14 82b0f8c39a3869b6fd2a10e180a862248736ec6f + * + * struct vm_fault: .virtual_address was removed by: + * 2016-12-14 1a29d85eb0f19b7d8271923d8917d7b4f5540b3e + */ +static inline unsigned long nv_page_fault_va(struct vm_fault *vmf) +{ +#if defined(NV_VM_FAULT_HAS_ADDRESS) + return vmf->address; +#else + return (unsigned long)(vmf->virtual_address); +#endif +} + +static inline void nv_mmap_read_lock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + mmap_read_lock(mm); +#else + down_read(&mm->mmap_sem); +#endif +} + +static inline void nv_mmap_read_unlock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + mmap_read_unlock(mm); +#else + up_read(&mm->mmap_sem); +#endif +} + +static inline void nv_mmap_write_lock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + mmap_write_lock(mm); +#else + down_write(&mm->mmap_sem); +#endif +} + +static inline void nv_mmap_write_unlock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + mmap_write_unlock(mm); +#else + up_write(&mm->mmap_sem); +#endif +} + +static inline int nv_mm_rwsem_is_locked(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + return rwsem_is_locked(&mm->mmap_lock); +#else + return rwsem_is_locked(&mm->mmap_sem); +#endif +} + +static inline struct rw_semaphore *nv_mmap_get_lock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + return &mm->mmap_lock; +#else + return &mm->mmap_sem; +#endif +} + +#endif // __NV_MM_H__ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-modeset-interface.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-modeset-interface.h new file mode 100644 index 0000000..e2e303f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-modeset-interface.h @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_MODESET_INTERFACE_H_ +#define _NV_MODESET_INTERFACE_H_ + +/* + * This file defines the interface between the nvidia and + * nvidia-modeset UNIX kernel modules. + * + * The nvidia-modeset kernel module calls the nvidia kernel module's + * nvidia_get_rm_ops() function to get the RM API function pointers + * which it will need. + */ + +#include "nvstatus.h" + +#include "nv-gpu-info.h" + +/* + * nvidia_stack_s is defined in nv.h, which pulls in a lot of other + * dependencies. The nvidia-modeset kernel module doesn't need to + * dereference the nvidia_stack_s pointer, so just treat is as an + * opaque pointer for purposes of this API definition. + */ +typedef struct nvidia_stack_s *nvidia_modeset_stack_ptr; + +/* + * Callback functions from the RM OS interface layer into the NVKMS OS interface + * layer. + * + * These functions should be called without the RM lock held, using the kernel's + * native calling convention. + */ +typedef struct { + /* + * Suspend & resume callbacks. Note that these are called once per GPU. + */ + void (*suspend)(NvU32 gpu_id); + void (*resume)(NvU32 gpu_id); +} nvidia_modeset_callbacks_t; + +/* + * The RM API entry points which the nvidia-modeset kernel module should + * call in the nvidia kernel module. + */ + +typedef struct { + /* + * The nvidia-modeset kernel module should assign version_string + * before passing the structure to the nvidia kernel module, so + * that a version match can be confirmed: it is not supported to + * mix nvidia and nvidia-modeset kernel modules from different + * releases. + */ + const char *version_string; + + /* + * Return system information. + */ + struct { + /* Availability of write combining support for video memory */ + NvBool allow_write_combining; + } system_info; + + /* + * Allocate and free an nvidia_stack_t to pass into + * nvidia_modeset_rm_ops_t::op(). An nvidia_stack_t must only be + * used by one thread at a time. + * + * Note that on architectures where an alternate stack is not + * used, alloc_stack() will set sp=NULL even when it returns 0 + * (success). I.e., check the return value, not the sp value. + */ + int (*alloc_stack)(nvidia_modeset_stack_ptr *sp); + void (*free_stack)(nvidia_modeset_stack_ptr sp); + + /* + * Enumerate list of gpus probed by nvidia driver. + * + * gpu_info is an array of NVIDIA_MAX_GPUS elements. The number of GPUs + * in the system is returned. + */ + NvU32 (*enumerate_gpus)(nv_gpu_info_t *gpu_info); + + /* + * {open,close}_gpu() raise and lower the reference count of the + * specified GPU. This is equivalent to opening and closing a + * /dev/nvidiaN device file from user-space. + */ + int (*open_gpu)(NvU32 gpu_id, nvidia_modeset_stack_ptr sp); + void (*close_gpu)(NvU32 gpu_id, nvidia_modeset_stack_ptr sp); + + void (*op)(nvidia_modeset_stack_ptr sp, void *ops_cmd); + + int (*set_callbacks)(const nvidia_modeset_callbacks_t *cb); + +} nvidia_modeset_rm_ops_t; + +NV_STATUS nvidia_get_rm_ops(nvidia_modeset_rm_ops_t *rm_ops); + +#endif /* _NV_MODESET_INTERFACE_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-msi.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-msi.h new file mode 100644 index 0000000..55861d5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-msi.h @@ -0,0 +1,115 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_MSI_H_ +#define _NV_MSI_H_ + +#include "nv-linux.h" + +#if (defined(CONFIG_X86_LOCAL_APIC) || defined(NVCPU_AARCH64) || \ + defined(NVCPU_PPC64LE)) && \ + (defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)) +#define NV_LINUX_PCIE_MSI_SUPPORTED +#endif + +#if !defined(NV_LINUX_PCIE_MSI_SUPPORTED) || !defined(CONFIG_PCI_MSI) +#define NV_PCI_DISABLE_MSI(pci_dev) +#else +#define NV_PCI_DISABLE_MSI(pci_dev) pci_disable_msi(pci_dev) +#endif + +irqreturn_t nvidia_isr (int, void *); +irqreturn_t nvidia_isr_msix (int, void *); +irqreturn_t nvidia_isr_kthread_bh (int, void *); +irqreturn_t nvidia_isr_msix_kthread_bh(int, void *); + +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) +void NV_API_CALL nv_init_msi (nv_state_t *); +void NV_API_CALL nv_init_msix (nv_state_t *); +NvS32 NV_API_CALL nv_request_msix_irq (nv_linux_state_t *); + +#define NV_PCI_MSIX_FLAGS 2 +#define NV_PCI_MSIX_FLAGS_QSIZE 0x7FF + +static inline void nv_free_msix_irq(nv_linux_state_t *nvl) +{ + int i; + + for (i = 0; i < nvl->num_intr; i++) + { + free_irq(nvl->msix_entries[i].vector, (void *)nvl); + } +} + +static inline int nv_get_max_irq(struct pci_dev *pci_dev) +{ + int nvec; + int cap_ptr; + NvU16 ctrl; + + cap_ptr = pci_find_capability(pci_dev, PCI_CAP_ID_MSIX); + /* + * The 'PCI_MSIX_FLAGS' was added in 2.6.21-rc3 by: + * 2007-03-05 f5f2b13129a6541debf8851bae843cbbf48298b7 + */ +#if defined(PCI_MSIX_FLAGS) + pci_read_config_word(pci_dev, cap_ptr + PCI_MSIX_FLAGS, &ctrl); + nvec = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; +#else + pci_read_config_word(pci_dev, cap_ptr + NV_PCI_MSIX_FLAGS, &ctrl); + nvec = (ctrl & NV_PCI_MSIX_FLAGS_QSIZE) + 1; +#endif + + return nvec; +} + +static inline int nv_pci_enable_msix(nv_linux_state_t *nvl, int nvec) +{ + int rc = 0; + + /* + * pci_enable_msix_range() replaced pci_enable_msix() in 3.14-rc1: + * 2014-01-03 302a2523c277bea0bbe8340312b09507905849ed + */ + +#if defined(NV_PCI_ENABLE_MSIX_RANGE_PRESENT) + // We require all the vectors we are requesting so use the same min and max + rc = pci_enable_msix_range(nvl->pci_dev, nvl->msix_entries, nvec, nvec); + if (rc < 0) + { + return NV_ERR_OPERATING_SYSTEM; + } + WARN_ON(nvec != rc); +#else + rc = pci_enable_msix(nvl->pci_dev, nvl->msix_entries, nvec); + if (rc != 0) + { + return NV_ERR_OPERATING_SYSTEM; + } +#endif + + nvl->num_intr = nvec; + return NV_OK; +} +#endif +#endif /* _NV_MSI_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci-types.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci-types.h new file mode 100644 index 0000000..9706d0e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci-types.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PCI_TYPES_H_ +#define _NV_PCI_TYPES_H_ + +#include +#include "conftest.h" + +#if defined(NV_PCI_CHANNEL_STATE_PRESENT) +typedef enum pci_channel_state nv_pci_channel_state_t; +#else +typedef pci_channel_state_t nv_pci_channel_state_t; +#endif + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci.h new file mode 100644 index 0000000..84c0f5d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci.h @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PCI_H_ +#define _NV_PCI_H_ + +#include +#include "nv-linux.h" + +#if defined(NV_DEV_IS_PCI_PRESENT) +#define nv_dev_is_pci(dev) dev_is_pci(dev) +#else +/* + * Non-PCI devices are only supported on kernels which expose the + * dev_is_pci() function. For older kernels, we only support PCI + * devices, hence returning true to take all the PCI code paths. + */ +#define nv_dev_is_pci(dev) (true) +#endif + +int nv_pci_register_driver(void); +void nv_pci_unregister_driver(void); +int nv_pci_count_devices(void); +NvU8 nv_find_pci_capability(struct pci_dev *, NvU8); +int nvidia_dev_get_pci_info(const NvU8 *, struct pci_dev **, NvU64 *, NvU64 *); +nv_linux_state_t * find_pci(NvU32, NvU8, NvU8, NvU8); + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pgprot.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pgprot.h new file mode 100644 index 0000000..b56d956 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pgprot.h @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_PGPROT_H__ + +#define __NV_PGPROT_H__ + +#include "cpuopsys.h" + +#include + +#if !defined(NV_VMWARE) +#if defined(NVCPU_X86_64) +/* mark memory UC-, rather than UC (don't use _PAGE_PWT) */ +static inline pgprot_t pgprot_noncached_weak(pgprot_t old_prot) + { + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD); + return new_prot; + } + +#if !defined (pgprot_noncached) +static inline pgprot_t pgprot_noncached(pgprot_t old_prot) + { + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD | _PAGE_PWT); + return new_prot; + } +#endif +static inline pgprot_t pgprot_modify_writecombine(pgprot_t old_prot) + { + pgprot_t new_prot = old_prot; + pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_PCD | _PAGE_PWT); + new_prot = __pgprot(pgprot_val(new_prot) | _PAGE_PWT); + return new_prot; + } +#endif /* defined(NVCPU_X86_64) */ +#endif /* !defined(NV_VMWARE) */ + +#if defined(NVCPU_AARCH64) +/* + * Don't rely on the kernel's definition of pgprot_noncached(), as on 64-bit + * ARM that's not for system memory, but device memory instead. For I/O cache + * coherent systems, use cached mappings instead of uncached. + */ +#define NV_PGPROT_UNCACHED(old_prot) \ + ((nvos_is_chipset_io_coherent()) ? \ + (old_prot) : \ + __pgprot_modify((old_prot), PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))) +#elif defined(NVCPU_PPC64LE) +/* Don't attempt to mark sysmem pages as uncached on ppc64le */ +#define NV_PGPROT_UNCACHED(old_prot) old_prot +#else +#define NV_PGPROT_UNCACHED(old_prot) pgprot_noncached(old_prot) +#endif + +#define NV_PGPROT_UNCACHED_DEVICE(old_prot) pgprot_noncached(old_prot) +#if defined(NVCPU_AARCH64) +#if defined(NV_MT_DEVICE_GRE_PRESENT) +#define NV_PROT_WRITE_COMBINED_DEVICE (PROT_DEFAULT | PTE_PXN | PTE_UXN | \ + PTE_ATTRINDX(MT_DEVICE_GRE)) +#else +#define NV_PROT_WRITE_COMBINED_DEVICE (PROT_DEFAULT | PTE_PXN | PTE_UXN | \ + PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#endif +#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \ + __pgprot_modify(old_prot, PTE_ATTRINDX_MASK, NV_PROT_WRITE_COMBINED_DEVICE) +#define NV_PGPROT_WRITE_COMBINED(old_prot) NV_PGPROT_UNCACHED(old_prot) +#define NV_PGPROT_READ_ONLY(old_prot) \ + __pgprot_modify(old_prot, 0, PTE_RDONLY) +#elif defined(NVCPU_X86_64) +#define NV_PGPROT_UNCACHED_WEAK(old_prot) pgprot_noncached_weak(old_prot) +#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \ + pgprot_modify_writecombine(old_prot) +#define NV_PGPROT_WRITE_COMBINED(old_prot) \ + NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) +#define NV_PGPROT_READ_ONLY(old_prot) \ + __pgprot(pgprot_val((old_prot)) & ~_PAGE_RW) +#elif defined(NVCPU_PPC64LE) +/* + * Some kernels use H_PAGE instead of _PAGE + */ +#if defined(_PAGE_RW) +#define NV_PAGE_RW _PAGE_RW +#elif defined(H_PAGE_RW) +#define NV_PAGE_RW H_PAGE_RW +#else +#warning "The kernel does not provide page protection defines!" +#endif + +#if defined(_PAGE_4K_PFN) +#define NV_PAGE_4K_PFN _PAGE_4K_PFN +#elif defined(H_PAGE_4K_PFN) +#define NV_PAGE_4K_PFN H_PAGE_4K_PFN +#else +#undef NV_PAGE_4K_PFN +#endif + +#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \ + pgprot_writecombine(old_prot) +/* Don't attempt to mark sysmem pages as write combined on ppc64le */ +#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot +#define NV_PGPROT_READ_ONLY(old_prot) \ + __pgprot(pgprot_val((old_prot)) & ~NV_PAGE_RW) +#else +/* Writecombine is not supported */ +#undef NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) +#undef NV_PGPROT_WRITE_COMBINED(old_prot) +#define NV_PGPROT_READ_ONLY(old_prot) +#endif + +#endif /* __NV_PGPROT_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-platform.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-platform.h new file mode 100644 index 0000000..70fb23f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-platform.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_PLATFORM_H +#define NV_PLATFORM_H + +#include "nv-linux.h" + +irqreturn_t nvidia_isr (int, void *); +irqreturn_t nvidia_isr_kthread_bh (int, void *); + + +int nv_platform_register_driver(void); +void nv_platform_unregister_driver(void); +int nv_platform_count_devices(void); +int nv_soc_register_irqs(nv_state_t *nv); +void nv_soc_free_irqs(nv_state_t *nv); + + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs-utils.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs-utils.h new file mode 100644 index 0000000..5911d2d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs-utils.h @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_PROCFS_UTILS_H +#define _NV_PROCFS_UTILS_H + +#include "conftest.h" + +#ifdef CONFIG_PROC_FS +#include +#include + +/* + * Allow procfs to create file to exercise error forwarding. + * This is supported by CRAY platforms. + */ +#if defined(CONFIG_CRAY_XT) +#define EXERCISE_ERROR_FORWARDING NV_TRUE +#else +#define EXERCISE_ERROR_FORWARDING NV_FALSE +#endif + +#define IS_EXERCISE_ERROR_FORWARDING_ENABLED() (EXERCISE_ERROR_FORWARDING) + +#if defined(NV_PROC_OPS_PRESENT) +typedef struct proc_ops nv_proc_ops_t; + +#define NV_PROC_OPS_SET_OWNER() + +#define NV_PROC_OPS_OPEN proc_open +#define NV_PROC_OPS_READ proc_read +#define NV_PROC_OPS_WRITE proc_write +#define NV_PROC_OPS_LSEEK proc_lseek +#define NV_PROC_OPS_RELEASE proc_release +#else +typedef struct file_operations nv_proc_ops_t; + +#define NV_PROC_OPS_SET_OWNER() .owner = THIS_MODULE, + +#define NV_PROC_OPS_OPEN open +#define NV_PROC_OPS_READ read +#define NV_PROC_OPS_WRITE write +#define NV_PROC_OPS_LSEEK llseek +#define NV_PROC_OPS_RELEASE release +#endif + +#define NV_CREATE_PROC_FILE(filename,parent,__name,__data) \ + ({ \ + struct proc_dir_entry *__entry; \ + int mode = (S_IFREG | S_IRUGO); \ + const nv_proc_ops_t *fops = &nv_procfs_##__name##_fops; \ + if (fops->NV_PROC_OPS_WRITE != 0) \ + mode |= S_IWUSR; \ + __entry = proc_create_data(filename, mode, parent, fops, __data);\ + __entry; \ + }) + +/* + * proc_mkdir_mode exists in Linux 2.6.9, but isn't exported until Linux 3.0. + * Use the older interface instead unless the newer interface is necessary. + */ +#if defined(NV_PROC_REMOVE_PRESENT) +# define NV_PROC_MKDIR_MODE(name, mode, parent) \ + proc_mkdir_mode(name, mode, parent) +#else +# define NV_PROC_MKDIR_MODE(name, mode, parent) \ + ({ \ + struct proc_dir_entry *__entry; \ + __entry = create_proc_entry(name, mode, parent); \ + __entry; \ + }) +#endif + +#define NV_CREATE_PROC_DIR(name,parent) \ + ({ \ + struct proc_dir_entry *__entry; \ + int mode = (S_IFDIR | S_IRUGO | S_IXUGO); \ + __entry = NV_PROC_MKDIR_MODE(name, mode, parent); \ + __entry; \ + }) + +#if defined(NV_PDE_DATA_LOWER_CASE_PRESENT) +#define NV_PDE_DATA(inode) pde_data(inode) +#else +#define NV_PDE_DATA(inode) PDE_DATA(inode) +#endif + +#if defined(NV_PROC_REMOVE_PRESENT) +# define NV_REMOVE_PROC_ENTRY(entry) \ + proc_remove(entry); +#else +# define NV_REMOVE_PROC_ENTRY(entry) \ + remove_proc_entry(entry->name, entry->parent); +#endif + +void nv_procfs_unregister_all(struct proc_dir_entry *entry, + struct proc_dir_entry *delimiter); +#define NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \ + static int nv_procfs_open_##name( \ + struct inode *inode, \ + struct file *filep \ + ) \ + { \ + int ret; \ + ret = single_open(filep, nv_procfs_read_##name, \ + NV_PDE_DATA(inode)); \ + if (ret < 0) \ + { \ + return ret; \ + } \ + ret = nv_down_read_interruptible(&lock); \ + if (ret < 0) \ + { \ + single_release(inode, filep); \ + } \ + return ret; \ + } \ + \ + static int nv_procfs_release_##name( \ + struct inode *inode, \ + struct file *filep \ + ) \ + { \ + up_read(&lock); \ + return single_release(inode, filep); \ + } + +#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, lock) \ + NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \ + \ + static const nv_proc_ops_t nv_procfs_##name##_fops = { \ + NV_PROC_OPS_SET_OWNER() \ + .NV_PROC_OPS_OPEN = nv_procfs_open_##name, \ + .NV_PROC_OPS_READ = seq_read, \ + .NV_PROC_OPS_LSEEK = seq_lseek, \ + .NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \ + }; + + +#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_WRITE(name, lock, \ +write_callback) \ + NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \ + \ + static ssize_t nv_procfs_write_##name( \ + struct file *file, \ + const char __user *buf, \ + size_t size, \ + loff_t *ppos \ + ) \ + { \ + ssize_t ret; \ + struct seq_file *s; \ + \ + s = file->private_data; \ + if (s == NULL) \ + { \ + return -EIO; \ + } \ + \ + ret = write_callback(s, buf + *ppos, size - *ppos); \ + if (ret == 0) \ + { \ + /* avoid infinite loop */ \ + ret = -EIO; \ + } \ + return ret; \ + } \ + \ + static const nv_proc_ops_t nv_procfs_##name##_fops = { \ + NV_PROC_OPS_SET_OWNER() \ + .NV_PROC_OPS_OPEN = nv_procfs_open_##name, \ + .NV_PROC_OPS_READ = seq_read, \ + .NV_PROC_OPS_WRITE = nv_procfs_write_##name, \ + .NV_PROC_OPS_LSEEK = seq_lseek, \ + .NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \ + }; + +#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY_WITHOUT_LOCK(name) \ + static int nv_procfs_open_##name( \ + struct inode *inode, \ + struct file *filep \ + ) \ + { \ + int ret; \ + ret = single_open(filep, nv_procfs_read_##name, \ + NV_PDE_DATA(inode)); \ + return ret; \ + } \ + \ + static int nv_procfs_release_##name( \ + struct inode *inode, \ + struct file *filep \ + ) \ + { \ + return single_release(inode, filep); \ + } \ + \ + static const nv_proc_ops_t nv_procfs_##name##_fops = { \ + NV_PROC_OPS_SET_OWNER() \ + .NV_PROC_OPS_OPEN = nv_procfs_open_##name, \ + .NV_PROC_OPS_READ = seq_read, \ + .NV_PROC_OPS_LSEEK = seq_lseek, \ + .NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \ + }; + +#endif /* CONFIG_PROC_FS */ + +#endif /* _NV_PROCFS_UTILS_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs.h new file mode 100644 index 0000000..11f9585 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_PROCFS_H +#define _NV_PROCFS_H + +#include "nv-procfs-utils.h" + +#endif /* _NV_PROCFS_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-proto.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-proto.h new file mode 100644 index 0000000..2c5e793 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-proto.h @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PROTO_H_ +#define _NV_PROTO_H_ + +#include "nv-pci.h" +#include "nv-register-module.h" + +#include "nv-platform.h" + + +extern const char *nv_device_name; +extern nvidia_module_t nv_fops; + +void nv_acpi_register_notifier (nv_linux_state_t *); +void nv_acpi_unregister_notifier (nv_linux_state_t *); + +NvU8 nv_find_pci_capability (struct pci_dev *, NvU8); + +int nv_procfs_init (void); +void nv_procfs_exit (void); +void nv_procfs_add_warning (const char *, const char *); +int nv_procfs_add_gpu (nv_linux_state_t *); +void nv_procfs_remove_gpu (nv_linux_state_t *); + +int nvidia_mmap (struct file *, struct vm_area_struct *); +int nvidia_mmap_helper (nv_state_t *, nv_linux_file_private_t *, nvidia_stack_t *, struct vm_area_struct *, void *); +int nv_encode_caching (pgprot_t *, NvU32, NvU32); +void nv_revoke_gpu_mappings_locked(nv_state_t *); + +NvUPtr nv_vm_map_pages (struct page **, NvU32, NvBool, NvBool); +void nv_vm_unmap_pages (NvUPtr, NvU32); + +NV_STATUS nv_alloc_contig_pages (nv_state_t *, nv_alloc_t *); +void nv_free_contig_pages (nv_alloc_t *); +NV_STATUS nv_alloc_system_pages (nv_state_t *, nv_alloc_t *); +void nv_free_system_pages (nv_alloc_t *); + +void nv_address_space_init_once (struct address_space *mapping); + +int nv_uvm_init (void); +void nv_uvm_exit (void); +NV_STATUS nv_uvm_suspend (void); +NV_STATUS nv_uvm_resume (void); +void nv_uvm_notify_start_device (const NvU8 *uuid); +void nv_uvm_notify_stop_device (const NvU8 *uuid); +NV_STATUS nv_uvm_event_interrupt (const NvU8 *uuid); + +/* Move these to nv.h once implemented by other UNIX platforms */ +NvBool nvidia_get_gpuid_list (NvU32 *gpu_ids, NvU32 *gpu_count); +int nvidia_dev_get (NvU32, nvidia_stack_t *); +void nvidia_dev_put (NvU32, nvidia_stack_t *); +int nvidia_dev_get_uuid (const NvU8 *, nvidia_stack_t *); +void nvidia_dev_put_uuid (const NvU8 *, nvidia_stack_t *); +int nvidia_dev_block_gc6 (const NvU8 *, nvidia_stack_t *); +int nvidia_dev_unblock_gc6 (const NvU8 *, nvidia_stack_t *); + +#if defined(CONFIG_PM) +NV_STATUS nv_set_system_power_state (nv_power_state_t, nv_pm_action_depth_t); +#endif + +void nvidia_modeset_suspend (NvU32 gpuId); +void nvidia_modeset_resume (NvU32 gpuId); +NvBool nv_is_uuid_in_gpu_exclusion_list (const char *); + +NV_STATUS nv_parse_per_device_option_string(nvidia_stack_t *sp); +nv_linux_state_t * find_uuid(const NvU8 *uuid); +void nv_report_error(struct pci_dev *dev, NvU32 error_number, const char *format, va_list ap); +void nv_shutdown_adapter(nvidia_stack_t *, nv_state_t *, nv_linux_state_t *); +void nv_dev_free_stacks(nv_linux_state_t *); +NvBool nv_lock_init_locks(nvidia_stack_t *, nv_state_t *); +void nv_lock_destroy_locks(nvidia_stack_t *, nv_state_t *); +void nv_linux_add_device_locked(nv_linux_state_t *); +void nv_linux_remove_device_locked(nv_linux_state_t *); +NvBool nv_acpi_power_resource_method_present(struct pci_dev *); + +#endif /* _NV_PROTO_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-register-module.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-register-module.h new file mode 100644 index 0000000..bd4545f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-register-module.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_REGISTER_MODULE_H_ +#define _NV_REGISTER_MODULE_H_ + +#include +#include +#include + +#include "nvtypes.h" + +typedef struct nvidia_module_s { + struct module *owner; + + /* nvidia0, nvidia1 ..*/ + const char *module_name; + + /* module instance */ + NvU32 instance; + + /* file operations */ + int (*open)(struct inode *, struct file *filp); + int (*close)(struct inode *, struct file *filp); + int (*mmap)(struct file *filp, struct vm_area_struct *vma); + int (*ioctl)(struct inode *, struct file * file, unsigned int cmd, unsigned long arg); + unsigned int (*poll)(struct file * file, poll_table *wait); + +} nvidia_module_t; + +int nvidia_register_module(nvidia_module_t *); +int nvidia_unregister_module(nvidia_module_t *); + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-retpoline.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-retpoline.h new file mode 100644 index 0000000..2495503 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-retpoline.h @@ -0,0 +1,82 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_RETPOLINE_H_ +#define _NV_RETPOLINE_H_ + +#include "cpuopsys.h" + +#if (NV_SPECTRE_V2 == 0) +#define NV_RETPOLINE_THUNK NV_SPEC_THUNK +#else +#define NV_RETPOLINE_THUNK NV_NOSPEC_THUNK +#endif + +#if defined(NVCPU_X86_64) +#define NV_SPEC_THUNK(REG) \ + __asm__( \ + ".weak __x86_indirect_thunk_" #REG ";" \ + ".type __x86_indirect_thunk_" #REG ", @function;" \ + "__x86_indirect_thunk_" #REG ":" \ + " .cfi_startproc;" \ + " jmp *%" #REG ";" \ + " .cfi_endproc;" \ + ".size __x86_indirect_thunk_" #REG ", .-__x86_indirect_thunk_" #REG) + +#define NV_NOSPEC_THUNK(REG) \ + __asm__( \ + ".weak __x86_indirect_thunk_" #REG ";" \ + ".type __x86_indirect_thunk_" #REG ", @function;" \ + "__x86_indirect_thunk_" #REG ":" \ + " .cfi_startproc;" \ + " call .Lnv_no_fence_" #REG ";" \ + ".Lnv_fence_" #REG ":" \ + " pause;" \ + " lfence;" \ + " jmp .Lnv_fence_" #REG ";" \ + ".Lnv_no_fence_" #REG ":" \ + " mov %" #REG ", (%rsp);" \ + " ret;" \ + " .cfi_endproc;" \ + ".size __x86_indirect_thunk_" #REG ", .-__x86_indirect_thunk_" #REG) + + __asm__(".pushsection .text"); + NV_RETPOLINE_THUNK(rax); + NV_RETPOLINE_THUNK(rbx); + NV_RETPOLINE_THUNK(rcx); + NV_RETPOLINE_THUNK(rdx); + NV_RETPOLINE_THUNK(rsi); + NV_RETPOLINE_THUNK(rdi); + NV_RETPOLINE_THUNK(rbp); + NV_RETPOLINE_THUNK(r8); + NV_RETPOLINE_THUNK(r9); + NV_RETPOLINE_THUNK(r10); + NV_RETPOLINE_THUNK(r11); + NV_RETPOLINE_THUNK(r12); + NV_RETPOLINE_THUNK(r13); + NV_RETPOLINE_THUNK(r14); + NV_RETPOLINE_THUNK(r15); + __asm__(".popsection"); +#endif + +#endif /* _NV_RETPOLINE_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-time.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-time.h new file mode 100644 index 0000000..7c3f512 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-time.h @@ -0,0 +1,251 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_TIME_H__ +#define __NV_TIME_H__ + +#include "conftest.h" +#include +#include +#include +#include + +#include + +#define NV_MAX_ISR_DELAY_US 20000 +#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000) +#define NV_NSECS_TO_JIFFIES(nsec) ((nsec) * HZ / 1000000000) + +#if !defined(NV_TIMESPEC64_PRESENT) +struct timespec64 { + __s64 tv_sec; + long tv_nsec; +}; +#endif + +#if !defined(NV_KTIME_GET_RAW_TS64_PRESENT) +static inline void ktime_get_raw_ts64(struct timespec64 *ts64) +{ + struct timespec ts; + getrawmonotonic(&ts); + ts64->tv_sec = ts.tv_sec; + ts64->tv_nsec = ts.tv_nsec; +} +#endif + +#if !defined(NV_KTIME_GET_REAL_TS64_PRESENT) +static inline void ktime_get_real_ts64(struct timespec64 *ts64) +{ + struct timeval tv; + do_gettimeofday(&tv); + ts64->tv_sec = tv.tv_sec; + ts64->tv_nsec = tv.tv_usec * (NvU64) NSEC_PER_USEC; +} +#endif + +static NvBool nv_timer_less_than +( + const struct timespec64 *a, + const struct timespec64 *b +) +{ + return (a->tv_sec == b->tv_sec) ? (a->tv_nsec < b->tv_nsec) + : (a->tv_sec < b->tv_sec); +} + +#if !defined(NV_TIMESPEC64_PRESENT) +static inline struct timespec64 timespec64_add +( + const struct timespec64 a, + const struct timespec64 b +) +{ + struct timespec64 result; + + result.tv_sec = a.tv_sec + b.tv_sec; + result.tv_nsec = a.tv_nsec + b.tv_nsec; + while (result.tv_nsec >= NSEC_PER_SEC) + { + ++result.tv_sec; + result.tv_nsec -= NSEC_PER_SEC; + } + return result; +} + +static inline struct timespec64 timespec64_sub +( + const struct timespec64 a, + const struct timespec64 b +) +{ + struct timespec64 result; + + result.tv_sec = a.tv_sec - b.tv_sec; + result.tv_nsec = a.tv_nsec - b.tv_nsec; + while (result.tv_nsec < 0) + { + --(result.tv_sec); + result.tv_nsec += NSEC_PER_SEC; + } + return result; +} + +static inline s64 timespec64_to_ns(struct timespec64 *ts) +{ + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; +} +#endif + +static inline NvU64 nv_ktime_get_raw_ns(void) +{ + struct timespec64 ts; + ktime_get_raw_ts64(&ts); + return (NvU64)timespec64_to_ns(&ts); +} + +// #define NV_CHECK_DELAY_ACCURACY 1 + +/* + * It is generally a bad idea to use udelay() to wait for more than + * a few milliseconds. Since the caller is most likely not aware of + * this, we use mdelay() for any full millisecond to be safe. + */ +static inline NV_STATUS nv_sleep_us(unsigned int us) +{ + + unsigned long mdelay_safe_msec; + unsigned long usec; + +#ifdef NV_CHECK_DELAY_ACCURACY + struct timespec64 tm1, tm2, tm_diff; + + ktime_get_raw_ts64(&tm1); +#endif + + if (in_irq() && (us > NV_MAX_ISR_DELAY_US)) + return NV_ERR_GENERIC; + + mdelay_safe_msec = us / 1000; + if (mdelay_safe_msec) + mdelay(mdelay_safe_msec); + + usec = us % 1000; + if (usec) + udelay(usec); + +#ifdef NV_CHECK_DELAY_ACCURACY + ktime_get_raw_ts64(&tm2); + tm_diff = timespec64_sub(tm2, tm1); + pr_info("NVRM: delay of %d usec results in actual delay of 0x%llu nsec\n", + us, timespec64_to_ns(&tm_diff)); +#endif + return NV_OK; +} + +/* + * Sleep for specified milliseconds. Yields the CPU to scheduler. + * + * On Linux, a jiffie represents the time passed in between two timer + * interrupts. The number of jiffies per second (HZ) varies across the + * supported platforms. On i386, where HZ is 100, a timer interrupt is + * generated every 10ms. NV_MSECS_TO_JIFFIES should be accurate independent of + * the actual value of HZ; any partial jiffies will be 'floor'ed, the + * remainder will be accounted for with mdelay(). + */ +static inline NV_STATUS nv_sleep_ms(unsigned int ms) +{ + NvU64 ns; + unsigned long jiffies; + unsigned long mdelay_safe_msec; + struct timespec64 tm_end, tm_aux; +#ifdef NV_CHECK_DELAY_ACCURACY + struct timespec64 tm_start; +#endif + + ktime_get_raw_ts64(&tm_aux); +#ifdef NV_CHECK_DELAY_ACCURACY + tm_start = tm_aux; +#endif + + if (in_irq() && (ms > NV_MAX_ISR_DELAY_MS)) + { + return NV_ERR_GENERIC; + } + + if (irqs_disabled() || in_interrupt() || in_atomic()) + { + mdelay(ms); + return NV_OK; + } + + ns = ms * (NvU64) NSEC_PER_MSEC; + tm_end.tv_nsec = ns; + tm_end.tv_sec = 0; + tm_end = timespec64_add(tm_aux, tm_end); + + /* do we have a full jiffie to wait? */ + jiffies = NV_NSECS_TO_JIFFIES(ns); + + if (jiffies) + { + // + // If we have at least one full jiffy to wait, give up + // up the CPU; since we may be rescheduled before + // the requested timeout has expired, loop until less + // than a jiffie of the desired delay remains. + // + set_current_state(TASK_INTERRUPTIBLE); + do + { + schedule_timeout(jiffies); + ktime_get_raw_ts64(&tm_aux); + if (nv_timer_less_than(&tm_aux, &tm_end)) + { + tm_aux = timespec64_sub(tm_end, tm_aux); + ns = (NvU64) timespec64_to_ns(&tm_aux); + } + else + ns = 0; + } while ((jiffies = NV_NSECS_TO_JIFFIES(ns)) != 0); + } + + if (ns > (NvU64) NSEC_PER_MSEC) + { + mdelay_safe_msec = ns / (NvU64) NSEC_PER_MSEC; + mdelay(mdelay_safe_msec); + ns %= (NvU64) NSEC_PER_MSEC; + } + if (ns) + { + ndelay(ns); + } +#ifdef NV_CHECK_DELAY_ACCURACY + ktime_get_raw_ts64(&tm_aux); + tm_aux = timespec64_sub(tm_aux, tm_start); + pr_info("NVRM: delay of %d msec results in actual delay of %lld.%09ld sec\n", + ms, tm_aux.tv_sec, tm_aux.tv_nsec); +#endif + return NV_OK; +} + +#endif // __NV_TIME_H__ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-timer.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-timer.h new file mode 100644 index 0000000..6af49fb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-timer.h @@ -0,0 +1,66 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_TIMER_H__ +#define __NV_TIMER_H__ + +#include +#include // For container_of + +#include "conftest.h" + +struct nv_timer +{ + struct timer_list kernel_timer; + void (*nv_timer_callback)(struct nv_timer *nv_timer); +}; + +static inline void nv_timer_callback_typed_data(struct timer_list *timer) +{ + struct nv_timer *nv_timer = + container_of(timer, struct nv_timer, kernel_timer); + + nv_timer->nv_timer_callback(nv_timer); +} + +static inline void nv_timer_callback_anon_data(unsigned long arg) +{ + struct nv_timer *nv_timer = (struct nv_timer *)arg; + + nv_timer->nv_timer_callback(nv_timer); +} + +static inline void nv_timer_setup(struct nv_timer *nv_timer, + void (*callback)(struct nv_timer *nv_timer)) +{ + nv_timer->nv_timer_callback = callback; + +#if defined(NV_TIMER_SETUP_PRESENT) + timer_setup(&nv_timer->kernel_timer, nv_timer_callback_typed_data, 0); +#else + init_timer(&nv_timer->kernel_timer); + nv_timer->kernel_timer.function = nv_timer_callback_anon_data; + nv_timer->kernel_timer.data = (unsigned long)nv_timer; +#endif +} + +#endif // __NV_TIMER_H__ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv.h new file mode 100644 index 0000000..08f7676 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv.h @@ -0,0 +1,1103 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_H_ +#define _NV_H_ + + + +#include + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(__FreeBSD__) + #include // NULL +#elif defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) + #include // NULL +#else + #include // NULL +#endif + +#include +#include "nv_stdarg.h" +#include +#include +#include + +extern nv_cap_t *nvidia_caps_root; + +extern const NvBool nv_is_rm_firmware_supported_os; + + +#include +#include + + +#include + +/* NVIDIA's reserved major character device number (Linux). */ +#define NV_MAJOR_DEVICE_NUMBER 195 + +#define GPU_UUID_LEN (16) + +/* + * Buffer size for an ASCII UUID: We need 2 digits per byte, plus space + * for "GPU", 5 dashes, and '\0' termination: + */ +#define GPU_UUID_ASCII_LEN (GPU_UUID_LEN * 2 + 9) + +/* + * #define an absolute maximum used as a sanity check for the + * NV_ESC_IOCTL_XFER_CMD ioctl() size argument. + */ +#define NV_ABSOLUTE_MAX_IOCTL_SIZE 16384 + +/* + * Solaris provides no more than 8 bits for the argument size in + * the ioctl() command encoding; make sure we don't exceed this + * limit. + */ +#define __NV_IOWR_ASSERT(type) ((sizeof(type) <= NV_PLATFORM_MAX_IOCTL_SIZE) ? 1 : -1) +#define __NV_IOWR(nr, type) ({ \ + typedef char __NV_IOWR_TYPE_SIZE_ASSERT[__NV_IOWR_ASSERT(type)]; \ + _IOWR(NV_IOCTL_MAGIC, (nr), type); \ +}) + +#define NV_PCI_DEV_FMT "%04x:%02x:%02x.%x" +#define NV_PCI_DEV_FMT_ARGS(nv) (nv)->pci_info.domain, (nv)->pci_info.bus, \ + (nv)->pci_info.slot, (nv)->pci_info.function + +#define NV_RM_DEVICE_INTR_ADDRESS 0x100 + +/*! + * @brief The order of the display clocks in the below defined enum + * should be synced with below mapping array and macro. + * All four should be updated simultaneously in case + * of removal or addition of clocks in below order. + * Also, TEGRASOC_WHICH_CLK_MAX is used in various places + * in below mentioned files. + * arch/nvalloc/unix/Linux/nv-linux.h + * + * arch/nvalloc/unix/src/os.c + * dispClkMapRmToOsArr[] = {...}; + * + * arch/nvalloc/unix/Linux/nv-clk.c + * osMapClk[] = {...}; + * + */ +typedef enum _TEGRASOC_WHICH_CLK +{ + TEGRASOC_WHICH_CLK_NVDISPLAYHUB, + TEGRASOC_WHICH_CLK_NVDISPLAY_DISP, + TEGRASOC_WHICH_CLK_NVDISPLAY_P0, + TEGRASOC_WHICH_CLK_NVDISPLAY_P1, + TEGRASOC_WHICH_CLK_DPAUX0, + TEGRASOC_WHICH_CLK_FUSE, + TEGRASOC_WHICH_CLK_DSIPLL_VCO, + TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN, + TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA, + TEGRASOC_WHICH_CLK_SPPLL0_VCO, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB, + TEGRASOC_WHICH_CLK_SPPLL0_DIV10, + TEGRASOC_WHICH_CLK_SPPLL0_DIV25, + TEGRASOC_WHICH_CLK_SPPLL0_DIV27, + TEGRASOC_WHICH_CLK_SPPLL1_VCO, + TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN, + TEGRASOC_WHICH_CLK_SPPLL1_DIV27, + TEGRASOC_WHICH_CLK_VPLL0_REF, + TEGRASOC_WHICH_CLK_VPLL0, + TEGRASOC_WHICH_CLK_VPLL1, + TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF, + TEGRASOC_WHICH_CLK_RG0, + TEGRASOC_WHICH_CLK_RG1, + TEGRASOC_WHICH_CLK_DISPPLL, + TEGRASOC_WHICH_CLK_DISPHUBPLL, + TEGRASOC_WHICH_CLK_DSI_LP, + TEGRASOC_WHICH_CLK_DSI_CORE, + TEGRASOC_WHICH_CLK_DSI_PIXEL, + TEGRASOC_WHICH_CLK_PRE_SOR0, + TEGRASOC_WHICH_CLK_PRE_SOR1, + TEGRASOC_WHICH_CLK_DP_LINK_REF, + TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT, + TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO, + TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M, + TEGRASOC_WHICH_CLK_RG0_M, + TEGRASOC_WHICH_CLK_RG1_M, + TEGRASOC_WHICH_CLK_SOR0_M, + TEGRASOC_WHICH_CLK_SOR1_M, + TEGRASOC_WHICH_CLK_PLLHUB, + TEGRASOC_WHICH_CLK_SOR0, + TEGRASOC_WHICH_CLK_SOR1, + TEGRASOC_WHICH_CLK_SOR_PAD_INPUT, + TEGRASOC_WHICH_CLK_PRE_SF0, + TEGRASOC_WHICH_CLK_SF0, + TEGRASOC_WHICH_CLK_SF1, + TEGRASOC_WHICH_CLK_DSI_PAD_INPUT, + TEGRASOC_WHICH_CLK_PRE_SOR0_REF, + TEGRASOC_WHICH_CLK_PRE_SOR1_REF, + TEGRASOC_WHICH_CLK_SOR0_PLL_REF, + TEGRASOC_WHICH_CLK_SOR1_PLL_REF, + TEGRASOC_WHICH_CLK_SOR0_REF, + TEGRASOC_WHICH_CLK_SOR1_REF, + TEGRASOC_WHICH_CLK_OSC, + TEGRASOC_WHICH_CLK_DSC, + TEGRASOC_WHICH_CLK_MAUD, + TEGRASOC_WHICH_CLK_AZA_2XBIT, + TEGRASOC_WHICH_CLK_AZA_BIT, + TEGRASOC_WHICH_CLK_MIPI_CAL, + TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL, + TEGRASOC_WHICH_CLK_SOR0_DIV, + TEGRASOC_WHICH_CLK_MAX, // TEGRASOC_WHICH_CLK_MAX is defined for boundary checks only. +} TEGRASOC_WHICH_CLK; + +#ifdef NVRM + +extern const char *pNVRM_ID; + +/* + * ptr arithmetic convenience + */ + +typedef union +{ + volatile NvV8 Reg008[1]; + volatile NvV16 Reg016[1]; + volatile NvV32 Reg032[1]; +} nv_hwreg_t, * nv_phwreg_t; + + +#define NVRM_PCICFG_NUM_BARS 6 +#define NVRM_PCICFG_BAR_OFFSET(i) (0x10 + (i) * 4) +#define NVRM_PCICFG_BAR_REQTYPE_MASK 0x00000001 +#define NVRM_PCICFG_BAR_REQTYPE_MEMORY 0x00000000 +#define NVRM_PCICFG_BAR_MEMTYPE_MASK 0x00000006 +#define NVRM_PCICFG_BAR_MEMTYPE_64BIT 0x00000004 +#define NVRM_PCICFG_BAR_ADDR_MASK 0xfffffff0 + +#define NVRM_PCICFG_NUM_DWORDS 16 + +#define NV_GPU_NUM_BARS 3 +#define NV_GPU_BAR_INDEX_REGS 0 +#define NV_GPU_BAR_INDEX_FB 1 +#define NV_GPU_BAR_INDEX_IMEM 2 + +typedef struct +{ + NvU64 cpu_address; + NvU64 size; + NvU32 offset; + NvU32 *map; + nv_phwreg_t map_u; +} nv_aperture_t; + +typedef struct +{ + char *name; + NvU32 *data; +} nv_parm_t; + +#define NV_RM_PAGE_SHIFT 12 +#define NV_RM_PAGE_SIZE (1 << NV_RM_PAGE_SHIFT) +#define NV_RM_PAGE_MASK (NV_RM_PAGE_SIZE - 1) + +#define NV_RM_TO_OS_PAGE_SHIFT (os_page_shift - NV_RM_PAGE_SHIFT) +#define NV_RM_PAGES_PER_OS_PAGE (1U << NV_RM_TO_OS_PAGE_SHIFT) +#define NV_RM_PAGES_TO_OS_PAGES(count) \ + ((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \ + ((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0)) + +#if defined(NVCPU_X86_64) +#define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 3) +#else +#define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 2) +#endif + +typedef struct nvidia_stack_s +{ + NvU32 size; + void *top; + NvU8 stack[NV_STACK_SIZE-16] __attribute__ ((aligned(16))); +} nvidia_stack_t; + +/* + * TODO: Remove once all UNIX layers have been converted to use nvidia_stack_t + */ +typedef nvidia_stack_t nv_stack_t; + +typedef struct nv_file_private_t nv_file_private_t; + +/* + * this is a wrapper for unix events + * unlike the events that will be returned to clients, this includes + * kernel-specific data, such as file pointer, etc.. + */ +typedef struct nv_event_s +{ + NvHandle hParent; + NvHandle hObject; + NvU32 index; + NvU32 info32; + NvU16 info16; + nv_file_private_t *nvfp; /* per file-descriptor data pointer */ + NvU32 fd; + NvBool active; /* whether the event should be signaled */ + NvU32 refcount; /* count of associated RM events */ + struct nv_event_s *next; +} nv_event_t; + +typedef struct nv_kern_mapping_s +{ + void *addr; + NvU64 size; + NvU32 modeFlag; + struct nv_kern_mapping_s *next; +} nv_kern_mapping_t; + +typedef struct nv_usermap_access_params_s +{ + NvU64 addr; + NvU64 size; + NvU64 offset; + NvU64 *page_array; + NvU64 num_pages; + NvU64 mmap_start; + NvU64 mmap_size; + NvU64 access_start; + NvU64 access_size; + NvU64 remap_prot_extra; + NvBool contig; + NvU32 caching; +} nv_usermap_access_params_t; + +/* + * It stores mapping context per mapping + */ +typedef struct nv_alloc_mapping_context_s { + void *alloc; + NvU64 page_index; + NvU64 *page_array; + NvU64 num_pages; + NvU64 mmap_start; + NvU64 mmap_size; + NvU64 access_start; + NvU64 access_size; + NvU64 remap_prot_extra; + NvU32 prot; + NvBool valid; + NvU32 caching; +} nv_alloc_mapping_context_t; + +typedef enum +{ + NV_SOC_IRQ_DISPLAY_TYPE = 0x1, + NV_SOC_IRQ_DPAUX_TYPE, + NV_SOC_IRQ_GPIO_TYPE, + NV_SOC_IRQ_HDACODEC_TYPE, + + + + + NV_SOC_IRQ_INVALID_TYPE +} nv_soc_irq_type_t; + +/* + * It stores interrupt numbers and interrupt type and private data + */ +typedef struct nv_soc_irq_info_s { + NvU32 irq_num; + nv_soc_irq_type_t irq_type; + NvBool bh_pending; + union { + NvU32 gpio_num; + NvU32 dpaux_instance; + } irq_data; +} nv_soc_irq_info_t; + +#define NV_MAX_SOC_IRQS 6 +#define NV_MAX_DPAUX_NUM_DEVICES 4 +#define NV_MAX_SOC_DPAUX_NUM_DEVICES 2 // From SOC_DEV_MAPPING + +#define NV_IGPU_LEGACY_STALL_IRQ 70 +#define NV_IGPU_MAX_STALL_IRQS 3 +#define NV_IGPU_MAX_NONSTALL_IRQS 1 +/* + * per device state + */ + +/* DMA-capable device data, defined by kernel interface layer */ +typedef struct nv_dma_device nv_dma_device_t; + +typedef struct nv_state_t +{ + void *priv; /* private data */ + void *os_state; /* os-specific device state */ + + int flags; + + /* PCI config info */ + nv_pci_info_t pci_info; + NvU16 subsystem_id; + NvU16 subsystem_vendor; + NvU32 gpu_id; + NvU32 iovaspace_id; + struct + { + NvBool valid; + NvU8 uuid[GPU_UUID_LEN]; + } nv_uuid_cache; + void *handle; + + NvU32 pci_cfg_space[NVRM_PCICFG_NUM_DWORDS]; + + /* physical characteristics */ + nv_aperture_t bars[NV_GPU_NUM_BARS]; + nv_aperture_t *regs; + nv_aperture_t *dpaux[NV_MAX_DPAUX_NUM_DEVICES]; + nv_aperture_t *hdacodec_regs; + nv_aperture_t *mipical_regs; + nv_aperture_t *fb, ud; + nv_aperture_t *simregs; + nv_aperture_t *emc_regs; + + NvU32 num_dpaux_instance; + NvU32 interrupt_line; + NvU32 dpaux_irqs[NV_MAX_DPAUX_NUM_DEVICES]; + nv_soc_irq_info_t soc_irq_info[NV_MAX_SOC_IRQS]; + NvS32 current_soc_irq; + NvU32 num_soc_irqs; + NvU32 hdacodec_irq; + NvU8 *soc_dcb_blob; + NvU32 soc_dcb_size; + NvU32 disp_sw_soc_chip_id; + + NvU32 igpu_stall_irq[NV_IGPU_MAX_STALL_IRQS]; + NvU32 igpu_nonstall_irq; + NvU32 num_stall_irqs; + NvU64 dma_mask; + + NvBool primary_vga; + + NvU32 sim_env; + + NvU32 rc_timer_enabled; + + /* list of events allocated for this device */ + nv_event_t *event_list; + + /* lock to protect event_list */ + void *event_spinlock; + + nv_kern_mapping_t *kern_mappings; + + /* Kernel interface DMA device data */ + nv_dma_device_t *dma_dev; + nv_dma_device_t *niso_dma_dev; + + /* + * Per-GPU queue. The actual queue object is usually allocated in the + * arch-specific parent structure (e.g. nv_linux_state_t), and this + * pointer just points to it. + */ + struct os_work_queue *queue; + + /* For loading RM as a firmware (DCE or GSP) client */ + NvBool request_firmware; /* request firmware from the OS */ + NvBool request_fw_client_rm; /* attempt to init RM as FW a client */ + NvBool allow_fallback_to_monolithic_rm; /* allow fallback to monolithic RM if FW client RM doesn't work out */ + NvBool enable_firmware_logs; /* attempt to enable firmware log decoding/printing */ + + /* Variable to track, if nvidia_remove is called */ + NvBool removed; + + NvBool console_device; + + /* Variable to track, if GPU is external GPU */ + NvBool is_external_gpu; + + /* Variable to track, if regkey PreserveVideoMemoryAllocations is set */ + NvBool preserve_vidmem_allocations; + + /* Variable to force allocation of 32-bit addressable memory */ + NvBool force_dma32_alloc; + + /* Variable to track if device has entered dynamic power state */ + NvBool dynamic_power_entered; + + /* PCI power state should be D0 during system suspend */ + NvBool d0_state_in_suspend; + + /* Current cyclestats client and context */ + NvU32 profiler_owner; + void *profiler_context; + + /* + * RMAPI objects to use in the OS layer to talk to core RM. + * + * Note that we only need to store one subdevice handle: in SLI, we will + * have a separate nv_state_t per physical GPU. + */ + struct { + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubDevice; + NvHandle hI2C; + NvHandle hDisp; + } rmapi; + + /* Bool to check if ISO iommu enabled */ + NvBool iso_iommu_present; + + /* Bool to check if dma-buf is supported */ + NvBool dma_buf_supported; + + NvBool printed_openrm_enable_unsupported_gpus_error; + + /* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */ + NvBool nvpcf_dsm_in_gpu_scope; + +} nv_state_t; + +// These define need to be in sync with defines in system.h +#define OS_TYPE_LINUX 0x1 +#define OS_TYPE_FREEBSD 0x2 +#define OS_TYPE_SUNOS 0x3 +#define OS_TYPE_VMWARE 0x4 + +struct nv_file_private_t +{ + NvHandle *handles; + NvU16 maxHandles; + NvU32 deviceInstance; + NvU8 metadata[64]; + + nv_file_private_t *ctl_nvfp; + void *ctl_nvfp_priv; +}; + +// Forward define the gpu ops structures +typedef struct gpuSession *nvgpuSessionHandle_t; +typedef struct gpuDevice *nvgpuDeviceHandle_t; +typedef struct gpuAddressSpace *nvgpuAddressSpaceHandle_t; +typedef struct gpuChannel *nvgpuChannelHandle_t; +typedef struct UvmGpuChannelInfo_tag *nvgpuChannelInfo_t; +typedef struct UvmGpuChannelAllocParams_tag nvgpuChannelAllocParams_t; +typedef struct UvmGpuCaps_tag *nvgpuCaps_t; +typedef struct UvmGpuCopyEnginesCaps_tag *nvgpuCesCaps_t; +typedef struct UvmGpuAddressSpaceInfo_tag *nvgpuAddressSpaceInfo_t; +typedef struct UvmGpuAllocInfo_tag *nvgpuAllocInfo_t; +typedef struct UvmGpuP2PCapsParams_tag *nvgpuP2PCapsParams_t; +typedef struct UvmGpuFbInfo_tag *nvgpuFbInfo_t; +typedef struct UvmGpuEccInfo_tag *nvgpuEccInfo_t; +typedef struct UvmGpuFaultInfo_tag *nvgpuFaultInfo_t; +typedef struct UvmGpuAccessCntrInfo_tag *nvgpuAccessCntrInfo_t; +typedef struct UvmGpuAccessCntrConfig_tag *nvgpuAccessCntrConfig_t; +typedef struct UvmGpuInfo_tag nvgpuInfo_t; +typedef struct UvmGpuClientInfo_tag nvgpuClientInfo_t; +typedef struct UvmPmaAllocationOptions_tag *nvgpuPmaAllocationOptions_t; +typedef struct UvmPmaStatistics_tag *nvgpuPmaStatistics_t; +typedef struct UvmGpuMemoryInfo_tag *nvgpuMemoryInfo_t; +typedef struct UvmGpuExternalMappingInfo_tag *nvgpuExternalMappingInfo_t; +typedef struct UvmGpuChannelResourceInfo_tag *nvgpuChannelResourceInfo_t; +typedef struct UvmGpuChannelInstanceInfo_tag *nvgpuChannelInstanceInfo_t; +typedef struct UvmGpuChannelResourceBindParams_tag *nvgpuChannelResourceBindParams_t; +typedef struct UvmGpuPagingChannelAllocParams_tag nvgpuPagingChannelAllocParams_t; +typedef struct UvmGpuPagingChannel_tag *nvgpuPagingChannelHandle_t; +typedef struct UvmGpuPagingChannelInfo_tag *nvgpuPagingChannelInfo_t; +typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU32, NvU64 *, NvU32, NvU64, NvU64); +typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64); + +/* + * flags + */ + +#define NV_FLAG_OPEN 0x0001 +#define NV_FLAG_EXCLUDE 0x0002 +#define NV_FLAG_CONTROL 0x0004 +// Unused 0x0008 +#define NV_FLAG_SOC_DISPLAY 0x0010 +#define NV_FLAG_USES_MSI 0x0020 +#define NV_FLAG_USES_MSIX 0x0040 +#define NV_FLAG_PASSTHRU 0x0080 +#define NV_FLAG_SUSPENDED 0x0100 +#define NV_FLAG_SOC_IGPU 0x0200 +// Unused 0x0400 +#define NV_FLAG_PERSISTENT_SW_STATE 0x0800 +#define NV_FLAG_IN_RECOVERY 0x1000 +// Unused 0x2000 +#define NV_FLAG_UNBIND_LOCK 0x4000 +/* To be set when GPU is not present on the bus, to help device teardown */ +#define NV_FLAG_IN_SURPRISE_REMOVAL 0x8000 + +typedef enum +{ + NV_PM_ACTION_HIBERNATE, + NV_PM_ACTION_STANDBY, + NV_PM_ACTION_RESUME +} nv_pm_action_t; + +typedef enum +{ + NV_PM_ACTION_DEPTH_DEFAULT, + NV_PM_ACTION_DEPTH_MODESET, + NV_PM_ACTION_DEPTH_UVM +} nv_pm_action_depth_t; + +typedef enum +{ + NV_DYNAMIC_PM_NEVER, + NV_DYNAMIC_PM_COARSE, + NV_DYNAMIC_PM_FINE +} nv_dynamic_power_mode_t; + +typedef enum +{ + NV_POWER_STATE_IN_HIBERNATE, + NV_POWER_STATE_IN_STANDBY, + NV_POWER_STATE_RUNNING +} nv_power_state_t; + +typedef enum +{ + NV_FIRMWARE_GSP, + NV_FIRMWARE_GSP_LOG +} nv_firmware_t; + +#define NV_PRIMARY_VGA(nv) ((nv)->primary_vga) + +#define NV_IS_CTL_DEVICE(nv) ((nv)->flags & NV_FLAG_CONTROL) +#define NV_IS_SOC_DISPLAY_DEVICE(nv) \ + ((nv)->flags & NV_FLAG_SOC_DISPLAY) + +#define NV_IS_SOC_IGPU_DEVICE(nv) \ + ((nv)->flags & NV_FLAG_SOC_IGPU) + +#define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv) \ + (((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0) + +#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \ + ((nv)->iso_iommu_present) + +/* + * NVIDIA ACPI event ID to be passed into the core NVIDIA driver for + * AC/DC event. + */ +#define NV_SYSTEM_ACPI_BATTERY_POWER_EVENT 0x8002 + +/* + * GPU add/remove events + */ +#define NV_SYSTEM_GPU_ADD_EVENT 0x9001 +#define NV_SYSTEM_GPU_REMOVE_EVENT 0x9002 + +/* + * NVIDIA ACPI sub-event IDs (event types) to be passed into + * to core NVIDIA driver for ACPI events. + */ +#define NV_SYSTEM_ACPI_EVENT_VALUE_DISPLAY_SWITCH_DEFAULT 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_AC 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_BATTERY 1 +#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_UNDOCKED 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_DOCKED 1 + +#define NV_ACPI_NVIF_HANDLE_PRESENT 0x01 +#define NV_ACPI_DSM_HANDLE_PRESENT 0x02 +#define NV_ACPI_WMMX_HANDLE_PRESENT 0x04 + +#define NV_EVAL_ACPI_METHOD_NVIF 0x01 +#define NV_EVAL_ACPI_METHOD_WMMX 0x02 + +#define NV_I2C_CMD_READ 1 +#define NV_I2C_CMD_WRITE 2 +#define NV_I2C_CMD_SMBUS_READ 3 +#define NV_I2C_CMD_SMBUS_WRITE 4 +#define NV_I2C_CMD_SMBUS_QUICK_WRITE 5 +#define NV_I2C_CMD_SMBUS_QUICK_READ 6 +#define NV_I2C_CMD_SMBUS_BLOCK_READ 7 +#define NV_I2C_CMD_SMBUS_BLOCK_WRITE 8 + +// Flags needed by OSAllocPagesNode +#define NV_ALLOC_PAGES_NODE_NONE 0x0 +#define NV_ALLOC_PAGES_NODE_SKIP_RECLAIM 0x1 + +/* +** where we hide our nv_state_t * ... +*/ +#define NV_SET_NV_STATE(pgpu,p) ((pgpu)->pOsGpuInfo = (p)) +#define NV_GET_NV_STATE(pGpu) \ + (nv_state_t *)((pGpu) ? (pGpu)->pOsGpuInfo : NULL) + +#define IS_REG_OFFSET(nv, offset, length) \ + (((offset) >= (nv)->regs->cpu_address) && \ + (((offset) + ((length)-1)) <= \ + (nv)->regs->cpu_address + ((nv)->regs->size-1))) + +#define IS_FB_OFFSET(nv, offset, length) \ + (((nv)->fb) && ((offset) >= (nv)->fb->cpu_address) && \ + (((offset) + ((length)-1)) <= (nv)->fb->cpu_address + ((nv)->fb->size-1))) + +#define IS_UD_OFFSET(nv, offset, length) \ + (((nv)->ud.cpu_address != 0) && ((nv)->ud.size != 0) && \ + ((offset) >= (nv)->ud.cpu_address) && \ + (((offset) + ((length)-1)) <= (nv)->ud.cpu_address + ((nv)->ud.size-1))) + +#define IS_IMEM_OFFSET(nv, offset, length) \ + (((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) && \ + ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) && \ + ((offset) >= (nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) && \ + (((offset) + ((length) - 1)) <= \ + (nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + \ + ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].size - 1))) + +#define NV_RM_MAX_MSIX_LINES 8 + +#define NV_MAX_ISR_DELAY_US 20000 +#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000) + +#define NV_TIMERCMP(a, b, CMP) \ + (((a)->tv_sec == (b)->tv_sec) ? \ + ((a)->tv_usec CMP (b)->tv_usec) : ((a)->tv_sec CMP (b)->tv_sec)) + +#define NV_TIMERADD(a, b, result) \ + { \ + (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \ + if ((result)->tv_usec >= 1000000) \ + { \ + ++(result)->tv_sec; \ + (result)->tv_usec -= 1000000; \ + } \ + } + +#define NV_TIMERSUB(a, b, result) \ + { \ + (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \ + if ((result)->tv_usec < 0) \ + { \ + --(result)->tv_sec; \ + (result)->tv_usec += 1000000; \ + } \ + } + +#define NV_TIMEVAL_TO_US(tv) ((NvU64)(tv).tv_sec * 1000000 + (tv).tv_usec) + +#ifndef NV_ALIGN_UP +#define NV_ALIGN_UP(v,g) (((v) + ((g) - 1)) & ~((g) - 1)) +#endif +#ifndef NV_ALIGN_DOWN +#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1)) +#endif + +/* + * driver internal interfaces + */ + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for UNIX specific OS interface. + * + * --------------------------------------------------------------------------- + */ + +NvU32 NV_API_CALL nv_get_dev_minor (nv_state_t *); +void* NV_API_CALL nv_alloc_kernel_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, void **); +NV_STATUS NV_API_CALL nv_free_kernel_mapping (nv_state_t *, void *, void *, void *); +NV_STATUS NV_API_CALL nv_alloc_user_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, NvU32, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_free_user_mapping (nv_state_t *, void *, NvU64, void *); +NV_STATUS NV_API_CALL nv_add_mapping_context_to_file (nv_state_t *, nv_usermap_access_params_t*, NvU32, void *, NvU64, NvU32); + +NvU64 NV_API_CALL nv_get_kern_phys_address (NvU64); +NvU64 NV_API_CALL nv_get_user_phys_address (NvU64); +nv_state_t* NV_API_CALL nv_get_adapter_state (NvU32, NvU8, NvU8); +nv_state_t* NV_API_CALL nv_get_ctl_state (void); + +void NV_API_CALL nv_set_dma_address_size (nv_state_t *, NvU32 ); + +NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU32, NvU32, NvU64, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvBool, NvU32, NvBool, NvBool, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *); + +NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **); +void NV_API_CALL nv_unregister_user_pages (nv_state_t *, NvU64, void **, void **); + +NV_STATUS NV_API_CALL nv_register_peer_io_mem (nv_state_t *, NvU64 *, NvU64, void **); +void NV_API_CALL nv_unregister_peer_io_mem(nv_state_t *, void *); + +struct sg_table; + +NV_STATUS NV_API_CALL nv_register_sgt (nv_state_t *, NvU64 *, NvU64, NvU32, void **, struct sg_table *, void *); +void NV_API_CALL nv_unregister_sgt (nv_state_t *, struct sg_table **, void **, void *); +NV_STATUS NV_API_CALL nv_register_phys_pages (nv_state_t *, NvU64 *, NvU64, NvU32, void **); +void NV_API_CALL nv_unregister_phys_pages (nv_state_t *, void *); + +NV_STATUS NV_API_CALL nv_dma_map_sgt (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **); +NV_STATUS NV_API_CALL nv_dma_map_pages (nv_dma_device_t *, NvU64, NvU64 *, NvBool, NvU32, void **); +NV_STATUS NV_API_CALL nv_dma_unmap_pages (nv_dma_device_t *, NvU64, NvU64 *, void **); + +NV_STATUS NV_API_CALL nv_dma_map_alloc (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **); +NV_STATUS NV_API_CALL nv_dma_unmap_alloc (nv_dma_device_t *, NvU64, NvU64 *, void **); + +NV_STATUS NV_API_CALL nv_dma_map_peer (nv_dma_device_t *, nv_dma_device_t *, NvU8, NvU64, NvU64 *); +void NV_API_CALL nv_dma_unmap_peer (nv_dma_device_t *, NvU64, NvU64); + +NV_STATUS NV_API_CALL nv_dma_map_mmio (nv_dma_device_t *, NvU64, NvU64 *); +void NV_API_CALL nv_dma_unmap_mmio (nv_dma_device_t *, NvU64, NvU64); + +void NV_API_CALL nv_dma_cache_invalidate (nv_dma_device_t *, void *); +void NV_API_CALL nv_dma_enable_nvlink (nv_dma_device_t *); + +NvS32 NV_API_CALL nv_start_rc_timer (nv_state_t *); +NvS32 NV_API_CALL nv_stop_rc_timer (nv_state_t *); + +void NV_API_CALL nv_post_event (nv_event_t *, NvHandle, NvU32, NvU32, NvU16, NvBool); +NvS32 NV_API_CALL nv_get_event (nv_file_private_t *, nv_event_t *, NvU32 *); + +void* NV_API_CALL nv_i2c_add_adapter (nv_state_t *, NvU32); +void NV_API_CALL nv_i2c_del_adapter (nv_state_t *, void *); + +void NV_API_CALL nv_acpi_methods_init (NvU32 *); +void NV_API_CALL nv_acpi_methods_uninit (void); + +NV_STATUS NV_API_CALL nv_acpi_method (NvU32, NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); +NV_STATUS NV_API_CALL nv_acpi_dsm_method (nv_state_t *, NvU8 *, NvU32, NvBool, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); +NV_STATUS NV_API_CALL nv_acpi_ddc_method (nv_state_t *, void *, NvU32 *, NvBool); +NV_STATUS NV_API_CALL nv_acpi_dod_method (nv_state_t *, NvU32 *, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_rom_method (nv_state_t *, NvU32 *, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_get_powersource (NvU32 *); +NvBool NV_API_CALL nv_acpi_is_battery_present(void); + +NV_STATUS NV_API_CALL nv_acpi_mux_method (nv_state_t *, NvU32 *, NvU32, const char *); + +NV_STATUS NV_API_CALL nv_log_error (nv_state_t *, NvU32, const char *, va_list); + +NvU64 NV_API_CALL nv_get_dma_start_address (nv_state_t *); +NV_STATUS NV_API_CALL nv_set_primary_vga_status(nv_state_t *); +NV_STATUS NV_API_CALL nv_pci_trigger_recovery (nv_state_t *); +NvBool NV_API_CALL nv_requires_dma_remap (nv_state_t *); + +NvBool NV_API_CALL nv_is_rm_firmware_active(nv_state_t *); +const void*NV_API_CALL nv_get_firmware(nv_state_t *, nv_firmware_t, const void **, NvU32 *); +void NV_API_CALL nv_put_firmware(const void *); + +nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **); +void NV_API_CALL nv_put_file_private(void *); + +NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvS32 *); + +NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**); +NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode); + +void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv); + +void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64, NvU64); + +void NV_API_CALL nv_p2p_free_platform_data(void *data); + +#if defined(NVCPU_PPC64LE) +NV_STATUS NV_API_CALL nv_get_nvlink_line_rate (nv_state_t *, NvU32 *); +#endif + +NV_STATUS NV_API_CALL nv_revoke_gpu_mappings (nv_state_t *); +void NV_API_CALL nv_acquire_mmap_lock (nv_state_t *); +void NV_API_CALL nv_release_mmap_lock (nv_state_t *); +NvBool NV_API_CALL nv_get_all_mappings_revoked_locked (nv_state_t *); +void NV_API_CALL nv_set_safe_to_mmap_locked (nv_state_t *, NvBool); + +NV_STATUS NV_API_CALL nv_indicate_idle (nv_state_t *); +NV_STATUS NV_API_CALL nv_indicate_not_idle (nv_state_t *); +void NV_API_CALL nv_idle_holdoff (nv_state_t *); + +NvBool NV_API_CALL nv_dynamic_power_available (nv_state_t *); +void NV_API_CALL nv_audio_dynamic_power (nv_state_t *); + +void NV_API_CALL nv_control_soc_irqs (nv_state_t *, NvBool bEnable); +NV_STATUS NV_API_CALL nv_get_current_irq_priv_data(nv_state_t *, NvU32 *); + +NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap (int, int*); +int NV_API_CALL nv_cap_drv_init(void); +void NV_API_CALL nv_cap_drv_exit(void); +NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *); +NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *); + +NvU32 NV_API_CALL nv_get_os_type(void); + +void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end); +struct dma_buf; +typedef struct nv_dma_buf nv_dma_buf_t; +struct drm_gem_object; + +NV_STATUS NV_API_CALL nv_dma_import_sgt (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *); +void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *); +NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvU32 *, struct sg_table **, nv_dma_buf_t **); +NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvU32 *, struct sg_table **, nv_dma_buf_t **); +void NV_API_CALL nv_dma_release_dma_buf (nv_dma_buf_t *); + +void NV_API_CALL nv_schedule_uvm_isr (nv_state_t *); + + +NvBool NV_API_CALL nv_platform_supports_s0ix (void); +NvBool NV_API_CALL nv_s2idle_pm_configured (void); + + +NvBool NV_API_CALL nv_is_chassis_notebook (void); +void NV_API_CALL nv_allow_runtime_suspend (nv_state_t *nv); +void NV_API_CALL nv_disallow_runtime_suspend (nv_state_t *nv); + +typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *); + +NV_STATUS NV_API_CALL nv_get_num_phys_pages (void *, NvU32 *); +NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *); + + +NV_STATUS NV_API_CALL nv_i2c_transfer(nv_state_t *, NvU32, NvU8, nv_i2c_msg_t *, int); +void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *); +NV_STATUS NV_API_CALL nv_i2c_bus_status(nv_state_t *, NvU32, NvS32 *, NvS32 *); +NV_STATUS NV_API_CALL nv_clk_get_handles (nv_state_t *); +void NV_API_CALL nv_clk_clear_handles (nv_state_t *); +NV_STATUS NV_API_CALL nv_enable_clk (nv_state_t *, TEGRASOC_WHICH_CLK); +NvBool NV_API_CALL nv_is_clk_enabled (nv_state_t *, TEGRASOC_WHICH_CLK); +void NV_API_CALL nv_disable_clk (nv_state_t *, TEGRASOC_WHICH_CLK); +NV_STATUS NV_API_CALL nv_get_curr_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *); +NV_STATUS NV_API_CALL nv_get_max_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *); +NV_STATUS NV_API_CALL nv_get_min_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *); +NV_STATUS NV_API_CALL nv_set_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32); +NV_STATUS NV_API_CALL nv_set_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK); +NV_STATUS NV_API_CALL nv_get_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK*); +NV_STATUS NV_API_CALL nv_soc_device_reset (nv_state_t *); +NV_STATUS NV_API_CALL nv_imp_get_import_data (TEGRA_IMP_IMPORT_DATA *); +NV_STATUS NV_API_CALL nv_imp_enable_disable_rfl (nv_state_t *nv, NvBool bEnable); +NV_STATUS NV_API_CALL nv_imp_icc_set_bw (nv_state_t *nv, NvU32 avg_bw_kbps, NvU32 floor_bw_kbps); +NV_STATUS NV_API_CALL nv_soc_pm_powergate (nv_state_t *); +NV_STATUS NV_API_CALL nv_soc_pm_unpowergate (nv_state_t *); +NV_STATUS NV_API_CALL nv_gpio_get_pin_state(nv_state_t *, NvU32, NvU32 *); +void NV_API_CALL nv_gpio_set_pin_state(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_gpio_set_pin_direction(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_gpio_get_pin_direction(nv_state_t *, NvU32, NvU32 *); +NV_STATUS NV_API_CALL nv_gpio_get_pin_number(nv_state_t *, NvU32, NvU32 *); +NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_get_num_dpaux_instances(nv_state_t *nv, NvU32 *num_instances); +NV_STATUS NV_API_CALL nv_get_tegra_brightness_level(nv_state_t *, NvU32 *); +NV_STATUS NV_API_CALL nv_set_tegra_brightness_level(nv_state_t *, NvU32); +NV_STATUS NV_API_CALL nv_get_syncpoint_aperture(NvU32, NvU64 *, NvU64 *, NvU32 *); +NvU32 NV_API_CALL nv_tegra_get_rm_interface_type(NvU32); +NV_STATUS NV_API_CALL nv_tegra_dce_register_ipc_client(NvU32, void *, nvTegraDceClientIpcCallback, NvU32 *); +NV_STATUS NV_API_CALL nv_tegra_dce_client_ipc_send_recv(NvU32, void *, NvU32); +NV_STATUS NV_API_CALL nv_tegra_dce_unregister_ipc_client(NvU32); +NV_STATUS NV_API_CALL nv_dsi_parse_panel_props(nv_state_t *, void *); +NvBool NV_API_CALL nv_dsi_is_panel_connected(nv_state_t *); +NV_STATUS NV_API_CALL nv_dsi_panel_enable(nv_state_t *, void *); +NV_STATUS NV_API_CALL nv_dsi_panel_reset(nv_state_t *, void *); +void NV_API_CALL nv_dsi_panel_disable(nv_state_t *, void *); +void NV_API_CALL nv_dsi_panel_cleanup(nv_state_t *, void *); +NV_STATUS NV_API_CALL nv_soc_mipi_cal_reset(nv_state_t *); +NvU32 NV_API_CALL nv_soc_fuse_register_read (NvU32 addr); + + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for Resource Manager interface. + * + * --------------------------------------------------------------------------- + */ + +NvBool NV_API_CALL rm_init_rm (nvidia_stack_t *); +void NV_API_CALL rm_shutdown_rm (nvidia_stack_t *); +NvBool NV_API_CALL rm_init_private_state (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_free_private_state (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_init_adapter (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_disable_adapter (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_shutdown_adapter (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_exclude_adapter (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_acquire_api_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_release_api_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_acquire_gpu_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_release_gpu_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_acquire_all_gpus_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_release_all_gpus_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_ioctl (nvidia_stack_t *, nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32); +NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *, NvU32 *); +void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_power_management (nvidia_stack_t *, nv_state_t *, nv_pm_action_t); +NV_STATUS NV_API_CALL rm_stop_user_channels (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_restart_user_channels (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_save_low_res_mode (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_get_vbios_version (nvidia_stack_t *, nv_state_t *, char *); +char* NV_API_CALL rm_get_gpu_uuid (nvidia_stack_t *, nv_state_t *); +const NvU8* NV_API_CALL rm_get_gpu_uuid_raw (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_set_rm_firmware_requested(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_get_firmware_version (nvidia_stack_t *, nv_state_t *, char *, NvLength); +void NV_API_CALL rm_cleanup_file_private (nvidia_stack_t *, nv_state_t *, nv_file_private_t *); +void NV_API_CALL rm_unbind_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_read_registry_dword (nvidia_stack_t *, nv_state_t *, const char *, NvU32 *); +NV_STATUS NV_API_CALL rm_write_registry_dword (nvidia_stack_t *, nv_state_t *, const char *, NvU32); +NV_STATUS NV_API_CALL rm_write_registry_binary (nvidia_stack_t *, nv_state_t *, const char *, NvU8 *, NvU32); +NV_STATUS NV_API_CALL rm_write_registry_string (nvidia_stack_t *, nv_state_t *, const char *, const char *, NvU32); +void NV_API_CALL rm_parse_option_string (nvidia_stack_t *, const char *); +char* NV_API_CALL rm_remove_spaces (const char *); +char* NV_API_CALL rm_string_token (char **, const char); + +NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *); +const char* NV_API_CALL rm_get_device_name (NvU16, NvU16, NvU16); + +NV_STATUS NV_API_CALL rm_is_supported_device (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_supported_pci_device(NvU8 pci_class, + NvU8 pci_subclass, + NvU16 vendor, + NvU16 device, + NvU16 subsystem_vendor, + NvU16 subsystem_device, + NvBool print_legacy_warning); + +void NV_API_CALL rm_i2c_remove_adapters (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_i2c_is_smbus_capable (nvidia_stack_t *, nv_state_t *, void *); +NV_STATUS NV_API_CALL rm_i2c_transfer (nvidia_stack_t *, nv_state_t *, void *, NvU8, NvU8, NvU8, NvU32, NvU8 *); + +NV_STATUS NV_API_CALL rm_perform_version_check (nvidia_stack_t *, void *, NvU32); + +NV_STATUS NV_API_CALL rm_system_event (nvidia_stack_t *, NvU32, NvU32); + +void NV_API_CALL rm_disable_gpu_state_persistence (nvidia_stack_t *sp, nv_state_t *); +NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *); +NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64); +NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *); +NV_STATUS NV_API_CALL rm_p2p_get_gpu_info (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **); +NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *); +NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *); +NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *); +NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *); +NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU32, NvU32, NvU64 *, void **); +NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *); +void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle); +NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, NvU64 *); +NV_STATUS NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64); +NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **); +void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *); +NV_STATUS NV_API_CALL rm_log_gpu_crash (nv_stack_t *, nv_state_t *); + +void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd); +NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id); +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(nvidia_stack_t *, nv_state_t *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_handle_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *); +NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *); +NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *); +NvBool NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, NvS32 *, NvU64 *, NvU64 *, NvU64 *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool); +NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_iommu_needed_for_sriov(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_disable_iomap_wc(void); + +void NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool); +void NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t); +void NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t); +NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool); +const char* NV_API_CALL rm_get_vidmem_power_status(nvidia_stack_t *, nv_state_t *); +const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *, nv_state_t *); +const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool); + +void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32); +NV_STATUS NV_API_CALL rm_get_clientnvpcf_power_limits(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *); + +/* vGPU VFIO specific functions */ +NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32); +NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16); +NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 **, NvBool); +NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8); +NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *); +NV_STATUS NV_API_CALL nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32); +NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 **, NvU64 **, NvU32 *); +NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *); +NV_STATUS NV_API_CALL nv_vgpu_update_request(nvidia_stack_t *, const NvU8 *, NvU32, NvU64 *, NvU64 *, const char *); +NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *); + +NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*); +nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*); +void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size); + +/* Callbacks should occur roughly every 10ms. */ +#define NV_SNAPSHOT_TIMER_HZ 100 +void NV_API_CALL nv_start_snapshot_timer(void (*snapshot_callback)(void *context)); +void NV_API_CALL nv_flush_snapshot_timer(void); +void NV_API_CALL nv_stop_snapshot_timer(void); + +static inline const NvU8 *nv_get_cached_uuid(nv_state_t *nv) +{ + return nv->nv_uuid_cache.valid ? nv->nv_uuid_cache.uuid : NULL; +} + + +/* nano second resolution timer callback structure */ +typedef struct nv_nano_timer nv_nano_timer_t; + +/* nano timer functions */ +void NV_API_CALL nv_create_nano_timer(nv_state_t *, void *pTmrEvent, nv_nano_timer_t **); +void NV_API_CALL nv_start_nano_timer(nv_state_t *nv, nv_nano_timer_t *, NvU64 timens); +NV_STATUS NV_API_CALL rm_run_nano_timer_callback(nvidia_stack_t *, nv_state_t *, void *pTmrEvent); +void NV_API_CALL nv_cancel_nano_timer(nv_state_t *, nv_nano_timer_t *); +void NV_API_CALL nv_destroy_nano_timer(nv_state_t *nv, nv_nano_timer_t *); + + +#if defined(NVCPU_X86_64) + +static inline NvU64 nv_rdtsc(void) +{ + NvU64 val; + __asm__ __volatile__ ("rdtsc \t\n" + "shlq $0x20,%%rdx \t\n" + "orq %%rdx,%%rax \t\n" + : "=A" (val)); + return val; +} + +#endif + +#endif /* NVRM */ + +static inline int nv_count_bits(NvU64 word) +{ + NvU64 bits; + + bits = (word & 0x5555555555555555ULL) + ((word >> 1) & 0x5555555555555555ULL); + bits = (bits & 0x3333333333333333ULL) + ((bits >> 2) & 0x3333333333333333ULL); + bits = (bits & 0x0f0f0f0f0f0f0f0fULL) + ((bits >> 4) & 0x0f0f0f0f0f0f0f0fULL); + bits = (bits & 0x00ff00ff00ff00ffULL) + ((bits >> 8) & 0x00ff00ff00ff00ffULL); + bits = (bits & 0x0000ffff0000ffffULL) + ((bits >> 16) & 0x0000ffff0000ffffULL); + bits = (bits & 0x00000000ffffffffULL) + ((bits >> 32) & 0x00000000ffffffffULL); + + return (int)(bits); +} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvCpuUuid.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvCpuUuid.h new file mode 100644 index 0000000..0ab546b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvCpuUuid.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_CPU_UUID_H_ +#define _NV_CPU_UUID_H_ + +#define NV_UUID_LEN 16 + +typedef struct nv_uuid +{ + NvU8 uuid[NV_UUID_LEN]; + +} NvUuid; + +#define NV_UUID_HI(pUuid) (*((NvU64*)((pUuid)->uuid + (NV_UUID_LEN >> 1)))) +#define NV_UUID_LO(pUuid) (*((NvU64*)((pUuid)->uuid + 0))) + +typedef NvUuid NvSystemUuid; + +typedef NvUuid NvProcessorUuid; + +extern const NvProcessorUuid NV_PROCESSOR_UUID_CPU_DEFAULT; + +#endif // _NV_CPU_UUID_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_firmware_types.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_firmware_types.h new file mode 100644 index 0000000..90dd93f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_firmware_types.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_FIRMWARE_TYPES_H +#define NV_FIRMWARE_TYPES_H + +typedef enum { + NV_FIRMWARE_MODE_DISABLED = 0, + NV_FIRMWARE_MODE_ENABLED = 1, + NV_FIRMWARE_MODE_DEFAULT = 2, + NV_FIRMWARE_MODE_INVALID = 0xFF +} NvFirmwareMode; + +#endif // NV_FIRMWARE_TYPES_H diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_speculation_barrier.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_speculation_barrier.h new file mode 100644 index 0000000..20b32bd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_speculation_barrier.h @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * NVIDIA GPZ vulnerability mitigation definitions. + */ + +/* + * There are two copies of this file for legacy reasons: + * + * P4: <$NV_SOURCE/>drivers/common/inc/nv_speculation_barrier.h + * Git: include/nv_speculation_barrier.h + * + * Both files need to be kept in sync if any changes are required. + */ + +#ifndef _NV_SPECULATION_BARRIER_H_ +#define _NV_SPECULATION_BARRIER_H_ + +#define NV_SPECULATION_BARRIER_VERSION 2 + +/* + * GNU-C/MSC/clang - x86/x86_64 : x86_64, __i386, __i386__ + * GNU-C - THUMB mode : __GNUC__, __thumb__ + * GNU-C - ARM modes : __GNUC__, __arm__, __aarch64__ + * armclang - THUMB mode : __ARMCC_VERSION, __thumb__ + * armclang - ARM modes : __ARMCC_VERSION, __arm__, __aarch64__ + * GHS - THUMB mode : __ghs__, __THUMB__ + * GHS - ARM modes : __ghs__, __ARM__, __ARM64__ + */ + +#if defined(_M_IX86) || defined(__i386__) || defined(__i386) \ + || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) + /* All x86 */ + #define NV_SPECULATION_BARRIER_x86 + +#elif defined(macintosh) || defined(__APPLE__) \ + || defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__) \ + || defined(__POWERPC__) || defined(__ppc) || defined(__ppc__) \ + || defined(__ppc64__) || defined(__PPC__) \ + || defined(__PPC64__) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) + /* All PowerPC */ + #define NV_SPECULATION_BARRIER_PPC + +#elif (defined(__GNUC__) && defined(__thumb__)) \ + || (defined(__ARMCC_VERSION) && defined(__thumb__)) \ + || (defined(__ghs__) && defined(__THUMB__)) + /* ARM-thumb mode(<=ARMv7)/T32 (ARMv8) */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB ".inst.w 0xf3af8014\n" + +#elif (defined(__GNUC__) && defined(__arm__)) \ + || (defined(__ARMCC_VERSION) && defined(__arm__)) \ + || (defined(__ghs__) && defined(__ARM__)) + /* aarch32(ARMv8) / arm(<=ARMv7) mode */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB ".inst 0xe320f014\n" + +#elif (defined(__GNUC__) && defined(__aarch64__)) \ + || (defined(__ARMCC_VERSION) && defined(__aarch64__)) \ + || (defined(__ghs__) && defined(__ARM64__)) + /* aarch64(ARMv8) mode */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB "HINT #20\n" +#elif (defined(_MSC_VER) && ( defined(_M_ARM64) || defined(_M_ARM)) ) + /* Not currently implemented for MSVC/ARM64. See bug 3366890. */ +# define nv_speculation_barrier() +# define speculation_barrier() nv_speculation_barrier() +#elif defined(NVCPU_NVRISCV64) && NVOS_IS_LIBOS +# define nv_speculation_barrier() +#else + #error "Unknown compiler/chip family" +#endif + +/* + * nv_speculation_barrier -- General-purpose speculation barrier + * + * This approach provides full protection against variant-1 vulnerability. + * However, the recommended approach is detailed below (See: + * nv_array_index_no_speculate) + * + * Semantics: + * Any memory read that is sequenced after a nv_speculation_barrier(), + * and contained directly within the scope of nv_speculation_barrier() or + * directly within a nested scope, will not speculatively execute until all + * conditions for entering that scope have been architecturally resolved. + * + * Example: + * if (untrusted_index_from_user < bound) { + * ... + * nv_speculation_barrier(); + * ... + * x = array1[untrusted_index_from_user]; + * bit = x & 1; + * y = array2[0x100 * bit]; + * } + */ + +#if defined(NV_SPECULATION_BARRIER_x86) +// Delete after all references are changed to nv_speculation_barrier +#define speculation_barrier() nv_speculation_barrier() + +static inline void nv_speculation_barrier(void) +{ + +#if defined(_MSC_VER) && !defined(__clang__) + _mm_lfence(); +#endif + +#if defined(__GNUC__) || defined(__clang__) + __asm__ __volatile__ ("lfence" : : : "memory"); +#endif + +} + +#elif defined(NV_SPECULATION_BARRIER_PPC) + +static inline void nv_speculation_barrier(void) +{ + asm volatile("ori 31,31,0"); +} + +#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON) + +/* Note: Cortex-A9 GNU-assembler seems to complain about DSB SY */ + #define nv_speculation_barrier() \ + asm volatile \ + ( \ + "DSB sy\n" \ + "ISB\n" \ + : : : "memory" \ + ) +#endif + +/* + * nv_array_index_no_speculate -- Recommended variant-1 mitigation approach + * + * The array-index-no-speculate approach "de-speculates" an array index that + * has already been bounds-checked. + * + * This approach is preferred over nv_speculation_barrier due to the following + * reasons: + * - It is just as effective as the general-purpose speculation barrier. + * - It clearly identifies what array index is being de-speculated and is thus + * self-commenting, whereas the general-purpose speculation barrier requires + * an explanation of what array index is being de-speculated. + * - It performs substantially better than the general-purpose speculation + * barrier on ARM Cortex-A cores (the difference is expected to be tens of + * cycles per invocation). Within tight loops, this difference may become + * noticeable. + * + * Semantics: + * Provided count is non-zero and the caller has already validated or otherwise + * established that index < count, any speculative use of the return value will + * use a speculative value that is less than count. + * + * Example: + * if (untrusted_index_from_user < bound) { + * untrusted_index_from_user = nv_array_index_no_speculate( + * untrusted_index_from_user, bound); + * ... + * x = array1[untrusted_index_from_user]; + * ... + * } + * + * The use of nv_array_index_no_speculate() in the above example ensures that + * subsequent uses of untrusted_index_from_user will not execute speculatively + * (they will wait for the bounds check to complete). + */ + +static inline unsigned long nv_array_index_no_speculate(unsigned long index, + unsigned long count) +{ +#if defined(NV_SPECULATION_BARRIER_x86) && (defined(__GNUC__) || defined(__clang__)) + unsigned long mask; + + __asm__ __volatile__ + ( + "CMP %2, %1 \n" + "SBB %0, %0 \n" + : "=r"(mask) : "r"(index), "r"(count) : "cc" + ); + + return (index & mask); + +#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON) + unsigned long mask; + + asm volatile + ( + "CMP %[ind], %[cnt] \n" + "SBC %[res], %[cnt], %[cnt] \n" + NV_SPEC_BARRIER_CSDB + : [res] "=r" (mask) : [ind] "r" (index), [cnt] "r" (count): "cc" + ); + + return (index & mask); + +/* Fallback to generic speculation barrier for unsupported platforms */ +#else + nv_speculation_barrier(); + + return index; +#endif +} + +#endif //_NV_SPECULATION_BARRIER_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_stdarg.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_stdarg.h new file mode 100644 index 0000000..b23f7f7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_stdarg.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_STDARG_H_ +#define _NV_STDARG_H_ + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) + #include "conftest.h" + #if defined(NV_LINUX_STDARG_H_PRESENT) + #include + #else + #include + #endif +#else + #include +#endif + +#endif // _NV_STDARG_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_interface.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_interface.h new file mode 100644 index 0000000..76347ac --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_interface.h @@ -0,0 +1,1520 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// This file provides the interface that RM exposes to UVM. +// + +#ifndef _NV_UVM_INTERFACE_H_ +#define _NV_UVM_INTERFACE_H_ + +// Forward references, to break circular header file dependencies: +struct UvmOpsUvmEvents; + +#if defined(NVIDIA_UVM_ENABLED) + +// We are in the UVM build system, for a Linux target. +#include "uvm_linux.h" + +#else + +// We are in the RM build system, for a Linux target: +#include "nv-linux.h" + +#endif // NVIDIA_UVM_ENABLED + +#include "nvgputypes.h" +#include "nvstatus.h" +#include "nv_uvm_types.h" + + +// Define the type here as it's Linux specific, used only by the Linux specific +// nvUvmInterfaceRegisterGpu() API. +typedef struct +{ + struct pci_dev *pci_dev; + + // DMA addressable range of the device, mirrors fields in nv_state_t. + NvU64 dma_addressable_start; + NvU64 dma_addressable_limit; +} UvmGpuPlatformInfo; + +/******************************************************************************* + nvUvmInterfaceRegisterGpu + + Registers the GPU with the provided UUID for use. A GPU must be registered + before its UUID can be used with any other API. This call is ref-counted so + every nvUvmInterfaceRegisterGpu must be paired with a corresponding + nvUvmInterfaceUnregisterGpu. + + You don't need to call nvUvmInterfaceSessionCreate before calling this. + + Error codes: + NV_ERR_GPU_UUID_NOT_FOUND + NV_ERR_NO_MEMORY + NV_ERR_GENERIC +*/ +NV_STATUS nvUvmInterfaceRegisterGpu(const NvProcessorUuid *gpuUuid, UvmGpuPlatformInfo *gpuInfo); + +/******************************************************************************* + nvUvmInterfaceUnregisterGpu + + Unregisters the GPU with the provided UUID. This drops the ref count from + nvUvmInterfaceRegisterGpu. Once the reference count goes to 0 the device may + no longer be accessible until the next nvUvmInterfaceRegisterGpu call. No + automatic resource freeing is performed, so only make the last unregister + call after destroying all your allocations associated with that UUID (such + as those from nvUvmInterfaceAddressSpaceCreate). + + If the UUID is not found, no operation is performed. +*/ +void nvUvmInterfaceUnregisterGpu(const NvProcessorUuid *gpuUuid); + +/******************************************************************************* + nvUvmInterfaceSessionCreate + + TODO: Creates session object. All allocations are tied to the session. + + The platformInfo parameter is filled by the callee with miscellaneous system + information. Refer to the UvmPlatformInfo struct for details. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceSessionCreate(uvmGpuSessionHandle *session, + UvmPlatformInfo *platformInfo); + +/******************************************************************************* + nvUvmInterfaceSessionDestroy + + Destroys a session object. All allocations are tied to the session will + be destroyed. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceSessionDestroy(uvmGpuSessionHandle session); + +/******************************************************************************* + nvUvmInterfaceDeviceCreate + + Creates a device object under the given session for the GPU with the given + UUID. Also creates a partition object for the device iff bCreateSmcPartition + is true and pGpuInfo->smcEnabled is true. pGpuInfo->smcUserClientInfo will + be used to determine the SMC partition in this case. A device handle is + returned in the device output parameter. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY + NV_ERR_INVALID_ARGUMENT + NV_ERR_INSUFFICIENT_RESOURCES + NV_ERR_OBJECT_NOT_FOUND +*/ +NV_STATUS nvUvmInterfaceDeviceCreate(uvmGpuSessionHandle session, + const UvmGpuInfo *pGpuInfo, + const NvProcessorUuid *gpuUuid, + uvmGpuDeviceHandle *device, + NvBool bCreateSmcPartition); + +/******************************************************************************* + nvUvmInterfaceDeviceDestroy + + Destroys the device object for the given handle. The handle must have been + obtained in a prior call to nvUvmInterfaceDeviceCreate. +*/ +void nvUvmInterfaceDeviceDestroy(uvmGpuDeviceHandle device); + +/******************************************************************************* + nvUvmInterfaceAddressSpaceCreate + + This function creates an address space. + This virtual address space is created on the GPU specified + by device. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceAddressSpaceCreate(uvmGpuDeviceHandle device, + unsigned long long vaBase, + unsigned long long vaSize, + uvmGpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +/******************************************************************************* + nvUvmInterfaceDupAddressSpace + + This function will dup the given vaspace from the users client to the + kernel client was created as an ops session. + + By duping the vaspace it is guaranteed that RM will refcount the vaspace object. + + Error codes: + NV_ERR_GENERIC +*/ +NV_STATUS nvUvmInterfaceDupAddressSpace(uvmGpuDeviceHandle device, + NvHandle hUserClient, + NvHandle hUserVASpace, + uvmGpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +/******************************************************************************* + nvUvmInterfaceAddressSpaceDestroy + + Destroys an address space that was previously created via + nvUvmInterfaceAddressSpaceCreate. +*/ + +void nvUvmInterfaceAddressSpaceDestroy(uvmGpuAddressSpaceHandle vaSpace); + +/******************************************************************************* + nvUvmInterfaceMemoryAllocFB + + This function will allocate video memory and provide a mapped Gpu + virtual address to this allocation. It also returns the Gpu physical + offset if contiguous allocations are requested. + + This function will allocate a minimum page size if the length provided is 0 + and will return a unique GPU virtual address. + + The default page size will be the small page size (as returned by query + caps). The physical alignment will also be enforced to small page + size(64K/128K). + + Arguments: + vaSpace[IN] - Pointer to vaSpace object + length [IN] - Length of the allocation + gpuPointer[OUT] - GPU VA mapping + allocInfo[IN/OUT] - Pointer to allocation info structure which + contains below given fields + + allocInfo Members: + gpuPhysOffset[OUT] - Physical offset of allocation returned only + if contiguous allocation is requested. + pageSize[IN] - Override the default page size (see above). + alignment[IN] - gpuPointer GPU VA alignment. 0 means 4KB + alignment. + bContiguousPhysAlloc[IN] - Flag to request contiguous allocation. Default + will follow the vidHeapControl default policy. + bMemGrowsDown[IN] + bPersistentVidmem[IN] - Allocate persistent vidmem. + hPhysHandle[IN/OUT] - The handle will be used in allocation if provided. + If not provided; allocator will return the handle + it used eventually. + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_NO_MEMORY - Not enough physical memory to service + allocation request with provided constraints + NV_ERR_INSUFFICIENT_RESOURCES - Not enough available resources to satisfy allocation request + NV_ERR_INVALID_OWNER - Target memory not accessible by specified owner + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB + +*/ +NV_STATUS nvUvmInterfaceMemoryAllocFB(uvmGpuAddressSpaceHandle vaSpace, + NvLength length, + UvmGpuPointer * gpuPointer, + UvmGpuAllocInfo * allocInfo); + +/******************************************************************************* + nvUvmInterfaceMemoryAllocSys + + This function will allocate system memory and provide a mapped Gpu + virtual address to this allocation. + + This function will allocate a minimum page size if the length provided is 0 + and will return a unique GPU virtual address. + + The default page size will be the small page size (as returned by query caps) + + Arguments: + vaSpace[IN] - Pointer to vaSpace object + length [IN] - Length of the allocation + gpuPointer[OUT] - GPU VA mapping + allocInfo[IN/OUT] - Pointer to allocation info structure which + contains below given fields + + allocInfo Members: + gpuPhysOffset[OUT] - Physical offset of allocation returned only + if contiguous allocation is requested. + pageSize[IN] - Override the default page size (see above). + alignment[IN] - gpuPointer GPU VA alignment. 0 means 4KB + alignment. + bContiguousPhysAlloc[IN] - Flag to request contiguous allocation. Default + will follow the vidHeapControl default policy. + bMemGrowsDown[IN] + bPersistentVidmem[IN] - Allocate persistent vidmem. + hPhysHandle[IN/OUT] - The handle will be used in allocation if provided. + If not provided; allocator will return the handle + it used eventually. + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_NO_MEMORY - Not enough physical memory to service + allocation request with provided constraints + NV_ERR_INSUFFICIENT_RESOURCES - Not enough available resources to satisfy allocation request + NV_ERR_INVALID_OWNER - Target memory not accessible by specified owner + NV_ERR_NOT_SUPPORTED - Operation not supported +*/ +NV_STATUS nvUvmInterfaceMemoryAllocSys(uvmGpuAddressSpaceHandle vaSpace, + NvLength length, + UvmGpuPointer * gpuPointer, + UvmGpuAllocInfo * allocInfo); + +/******************************************************************************* + nvUvmInterfaceGetP2PCaps + + Obtain the P2P capabilities between two devices. + + Arguments: + device1[IN] - Device handle of the first GPU (required) + device2[IN] - Device handle of the second GPU (required) + p2pCapsParams [OUT] - P2P capabilities between the two GPUs + + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_GENERIC: + Unexpected error. We try hard to avoid returning this error + code,because it is not very informative. + +*/ +NV_STATUS nvUvmInterfaceGetP2PCaps(uvmGpuDeviceHandle device1, + uvmGpuDeviceHandle device2, + UvmGpuP2PCapsParams * p2pCapsParams); + +/******************************************************************************* + nvUvmInterfaceGetPmaObject + + This function will return pointer to PMA object for the given GPU. This + PMA object handle is required for page allocation. + + Arguments: + device [IN] - Device handle allocated in + nvUvmInterfaceDeviceCreate + pPma [OUT] - Pointer to PMA object + pPmaPubStats [OUT] - Pointer to UvmPmaStatistics object + + Error codes: + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB + NV_ERR_GENERIC: + Unexpected error. We try hard to avoid returning this error + code,because it is not very informative. +*/ +NV_STATUS nvUvmInterfaceGetPmaObject(uvmGpuDeviceHandle device, + void **pPma, + const UvmPmaStatistics **pPmaPubStats); + +// Mirrors pmaEvictPagesCb_t, see its documentation in pma.h. +typedef NV_STATUS (*uvmPmaEvictPagesCallback)(void *callbackData, + NvU32 pageSize, + NvU64 *pPages, + NvU32 count, + NvU64 physBegin, + NvU64 physEnd); + +// Mirrors pmaEvictRangeCb_t, see its documentation in pma.h. +typedef NV_STATUS (*uvmPmaEvictRangeCallback)(void *callbackData, NvU64 physBegin, NvU64 physEnd); + +/******************************************************************************* + nvUvmInterfacePmaRegisterEvictionCallbacks + + Simple wrapper for pmaRegisterEvictionCb(), see its documentation in pma.h. +*/ +NV_STATUS nvUvmInterfacePmaRegisterEvictionCallbacks(void *pPma, + uvmPmaEvictPagesCallback evictPages, + uvmPmaEvictRangeCallback evictRange, + void *callbackData); + +/****************************************************************************** + nvUvmInterfacePmaUnregisterEvictionCallbacks + + Simple wrapper for pmaUnregisterEvictionCb(), see its documentation in pma.h. +*/ +void nvUvmInterfacePmaUnregisterEvictionCallbacks(void *pPma); + +/******************************************************************************* + nvUvmInterfacePmaAllocPages + + @brief Synchronous API for allocating pages from the PMA. + PMA will decide which pma regions to allocate from based on the provided + flags. PMA will also initiate UVM evictions to make room for this + allocation unless prohibited by PMA_FLAGS_DONT_EVICT. UVM callers must pass + this flag to avoid deadlock. Only UVM may allocated unpinned memory from + this API. + + For broadcast methods, PMA will guarantee the same physical frames are + allocated on multiple GPUs, specified by the PMA objects passed in. + + If allocation is contiguous, only one page in pPages will be filled. + Also, contiguous flag must be passed later to nvUvmInterfacePmaFreePages. + + Arguments: + pPma[IN] - Pointer to PMA object + pageCount [IN] - Number of pages required to be allocated. + pageSize [IN] - 64kb, 128kb or 2mb. No other values are permissible. + pPmaAllocOptions[IN] - Pointer to PMA allocation info structure. + pPages[OUT] - Array of pointers, containing the PA base + address of each page. + + Error codes: + NV_ERR_NO_MEMORY: + Internal memory allocation failed. + NV_ERR_GENERIC: + Unexpected error. We try hard to avoid returning this error + code,because it is not very informative. +*/ +NV_STATUS nvUvmInterfacePmaAllocPages(void *pPma, + NvLength pageCount, + NvU32 pageSize, + UvmPmaAllocationOptions *pPmaAllocOptions, + NvU64 *pPages); + +/******************************************************************************* + nvUvmInterfacePmaPinPages + + This function will pin the physical memory allocated using PMA. The pages + passed as input must be unpinned else this function will return an error and + rollback any change if any page is not previously marked "unpinned". + + Arguments: + pPma[IN] - Pointer to PMA object. + pPages[IN] - Array of pointers, containing the PA base + address of each page to be pinned. + pageCount [IN] - Number of pages required to be pinned. + pageSize [IN] - Page size of each page to be pinned. + flags [IN] - UVM_PMA_CALLED_FROM_PMA_EVICTION if called from + PMA eviction, 0 otherwise. + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid input arguments. + NV_ERR_GENERIC - Unexpected error. We try hard to avoid + returning this error code as is not very + informative. + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB +*/ +NV_STATUS nvUvmInterfacePmaPinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags); + +/******************************************************************************* + nvUvmInterfacePmaUnpinPages + + This function will unpin the physical memory allocated using PMA. The pages + passed as input must be already pinned, else this function will return an + error and rollback any change if any page is not previously marked "pinned". + Behaviour is undefined if any blacklisted pages are unpinned. + + Arguments: + pPma[IN] - Pointer to PMA object. + pPages[IN] - Array of pointers, containing the PA base + address of each page to be unpinned. + pageCount [IN] - Number of pages required to be unpinned. + pageSize [IN] - Page size of each page to be unpinned. + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid input arguments. + NV_ERR_GENERIC - Unexpected error. We try hard to avoid + returning this error code as is not very + informative. + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB +*/ +NV_STATUS nvUvmInterfacePmaUnpinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize); + +/******************************************************************************* + nvUvmInterfaceMemoryFree + + Free up a GPU allocation +*/ +void nvUvmInterfaceMemoryFree(uvmGpuAddressSpaceHandle vaSpace, + UvmGpuPointer gpuPointer); + +/******************************************************************************* + nvUvmInterfacePmaFreePages + + This function will free physical memory allocated using PMA. It marks a list + of pages as free. This operation is also used by RM to mark pages as "scrubbed" + for the initial ECC sweep. This function does not fail. + + When allocation was contiguous, an appropriate flag needs to be passed. + + Arguments: + pPma[IN] - Pointer to PMA object + pPages[IN] - Array of pointers, containing the PA base + address of each page. + pageCount [IN] - Number of pages required to be allocated. + pageSize [IN] - Page size of each page + flags [IN] - Flags with information about allocation type + with the same meaning as flags in options for + nvUvmInterfacePmaAllocPages. When called from PMA + eviction, UVM_PMA_CALLED_FROM_PMA_EVICTION needs + to be added to flags. + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_NO_MEMORY - Not enough physical memory to service + allocation request with provided constraints + NV_ERR_INSUFFICIENT_RESOURCES - Not enough available resources to satisfy allocation request + NV_ERR_INVALID_OWNER - Target memory not accessible by specified owner + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB +*/ +void nvUvmInterfacePmaFreePages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags); + +/******************************************************************************* + nvUvmInterfaceMemoryCpuMap + + This function creates a CPU mapping to the provided GPU address. + If the address is not the same as what is returned by the Alloc + function, then the function will map it from the address provided. + This offset will be relative to the gpu offset obtained from the + memory alloc functions. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceMemoryCpuMap(uvmGpuAddressSpaceHandle vaSpace, + UvmGpuPointer gpuPointer, + NvLength length, void **cpuPtr, + NvU32 pageSize); + +/******************************************************************************* + uvmGpuMemoryCpuUnmap + + Unmaps the cpuPtr provided from the process virtual address space. +*/ +void nvUvmInterfaceMemoryCpuUnMap(uvmGpuAddressSpaceHandle vaSpace, + void *cpuPtr); + +/******************************************************************************* + nvUvmInterfaceChannelAllocate + + This function will allocate a channel bound to a copy engine + + allocParams must contain an engineIndex as channels need to be bound to an + engine type at allocation time. The possible values are [0, + UVM_COPY_ENGINE_COUNT_MAX), but notably only the copy engines that have + UvmGpuCopyEngineCaps::supported set to true can be allocated. This struct + also contains information relative to GPFIFO and GPPut. + + channel is filled with the address of the corresponding channel handle. + + channelInfo is filled out with channel get/put. The errorNotifier is filled + out when the channel hits an RC error. On Volta+ devices, it also computes + the work submission token and the work submission offset to be used in the + Host channel submission doorbell. + + Arguments: + vaSpace[IN] - VA space linked to a client and a device under which + the channel will be allocated + allocParams[IN] - structure with allocation settings + channel[OUT] - pointer to the new channel handle + channelInfo[OUT] - structure filled with channel information + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceChannelAllocate(uvmGpuAddressSpaceHandle vaSpace, + const UvmGpuChannelAllocParams *allocParams, + uvmGpuChannelHandle *channel, + UvmGpuChannelInfo *channelInfo); + +/******************************************************************************* + nvUvmInterfaceChannelDestroy + + This function destroys a given channel + + Arguments: + channel[IN] - channel handle +*/ +void nvUvmInterfaceChannelDestroy(uvmGpuChannelHandle channel); + +/******************************************************************************* + nvUvmInterfaceQueryCaps + + Return capabilities for the provided GPU. + If GPU does not exist, an error will be returned. + + If the client is only interested in the capabilities of the Copy Engines of + the given GPU, use nvUvmInterfaceQueryCopyEnginesCaps instead. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceQueryCaps(uvmGpuDeviceHandle device, + UvmGpuCaps * caps); + +/******************************************************************************* + nvUvmInterfaceQueryCopyEnginesCaps + + Return the capabilities of all the Copy Engines for the provided GPU. + If the GPU does not exist, an error will be returned. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device, + UvmGpuCopyEnginesCaps *caps); + +/******************************************************************************* + nvUvmInterfaceGetGpuInfo + + Return various gpu info, refer to the UvmGpuInfo struct for details. + If no gpu matching the uuid is found, an error will be returned. + + On Ampere+ GPUs, pGpuClientInfo contains SMC information provided by the + client regarding the partition targeted in this operation. + + Error codes: + NV_ERR_GENERIC + NV_ERR_INSUFFICIENT_RESOURCES +*/ +NV_STATUS nvUvmInterfaceGetGpuInfo(const NvProcessorUuid *gpuUuid, + const UvmGpuClientInfo *pGpuClientInfo, + UvmGpuInfo *pGpuInfo); + +/******************************************************************************* + nvUvmInterfaceServiceDeviceInterruptsRM + + Tells RM to service all pending interrupts. This is helpful in ECC error + conditions when ECC error interrupt is set & error can be determined only + after ECC notifier will be set or reset. + + Error codes: + NV_ERR_GENERIC + UVM_INVALID_ARGUMENTS +*/ +NV_STATUS nvUvmInterfaceServiceDeviceInterruptsRM(uvmGpuDeviceHandle device); + +/******************************************************************************* + nvUvmInterfaceSetPageDirectory + Sets pageDirectory in the provided location. Also moves the existing PDE to + the provided pageDirectory. + + RM will propagate the update to all channels using the provided VA space. + All channels must be idle when this call is made. + + Arguments: + vaSpace[IN} - VASpace Object + physAddress[IN] - Physical address of new page directory + numEntries[IN] - Number of entries including previous PDE which will be copied + bVidMemAperture[IN] - If set pageDirectory will reside in VidMem aperture else sysmem + pasid[IN] - PASID (Process Address Space IDentifier) of the process + corresponding to the VA space. Ignored unless the VA space + object has ATS enabled. + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceSetPageDirectory(uvmGpuAddressSpaceHandle vaSpace, + NvU64 physAddress, unsigned numEntries, + NvBool bVidMemAperture, NvU32 pasid); + +/******************************************************************************* + nvUvmInterfaceUnsetPageDirectory + Unsets/Restores pageDirectory to RM's defined location. + + Arguments: + vaSpace[IN} - VASpace Object + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceUnsetPageDirectory(uvmGpuAddressSpaceHandle vaSpace); + +/******************************************************************************* + nvUvmInterfaceDupAllocation + + Duplicate the given allocation in a different VA space. + + The physical handle backing the source allocation is duplicated in + the GPU device associated with the destination VA space, and a new mapping + is created in that VA space. + + The input allocation can be located in sysmem (i.e. allocated using + nvUvmInterfaceMemoryAllocSys) or vidmem (i.e. allocated using + nvUvmInterfaceMemoryAllocFB). If located in vidmem, duplication across + GPUs is not supported. + + For duplication of physical memory use nvUvmInterfaceDupMemory. + + Arguments: + srcVaSpace[IN] - Source VA space. + srcAddress[IN] - GPU VA in the source VA space. The provided address + should match one previously returned by + nvUvmInterfaceMemoryAllocFB or + nvUvmInterfaceMemoryAllocSys. + dstVaSpace[IN] - Destination VA space where the new mapping will be + created. + dstVaAlignment[IN] - Alignment of the GPU VA in the destination VA + space. 0 means 4KB alignment. + dstAddress[OUT] - Pointer to the GPU VA in the destination VA space. + + Error codes: + NV_ERR_INVALID_ARGUMENT - If any of the inputs is invalid, or the source + and destination VA spaces are identical. + NV_ERR_OBJECT_NOT_FOUND - If the input allocation is not found in under + the provided VA space. + NV_ERR_NO_MEMORY - If there is no memory to back the duplicate, + or the associated metadata. + NV_ERR_NOT_SUPPORTED - If trying to duplicate vidmem across GPUs. +*/ +NV_STATUS nvUvmInterfaceDupAllocation(uvmGpuAddressSpaceHandle srcVaSpace, + NvU64 srcAddress, + uvmGpuAddressSpaceHandle dstVaSpace, + NvU64 dstVaAlignment, + NvU64 *dstAddress); + +/******************************************************************************* + nvUvmInterfaceDupMemory + + Duplicates a physical memory allocation. If requested, provides information + about the allocation. + + Arguments: + device[IN] - Device linked to a client under which + the phys memory needs to be duped. + hClient[IN] - Client owning the memory. + hPhysMemory[IN] - Phys memory which is to be duped. + hDupedHandle[OUT] - Handle of the duped memory object. + pGpuMemoryInfo[OUT] - see nv_uvm_types.h for more information. + This parameter can be NULL. (optional) + Error codes: + NV_ERR_INVALID_ARGUMENT - If the parameter/s is invalid. + NV_ERR_NOT_SUPPORTED - If the allocation is not a physical allocation. + NV_ERR_OBJECT_NOT_FOUND - If the allocation is not found in under the provided client. +*/ +NV_STATUS nvUvmInterfaceDupMemory(uvmGpuDeviceHandle device, + NvHandle hClient, + NvHandle hPhysMemory, + NvHandle *hDupMemory, + UvmGpuMemoryInfo *pGpuMemoryInfo); + +/******************************************************************************* + nvUvmInterfaceFreeDupedAllocation + + Free the allocation represented by the physical handle used to create the + duped allocation. + + Arguments: + device[IN] - Device handle used to dup the memory. + hPhysHandle[IN] - Handle representing the phys allocation. + + Error codes: + NV_ERROR + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceFreeDupedHandle(uvmGpuDeviceHandle device, + NvHandle hPhysHandle); + +/******************************************************************************* + nvUvmInterfaceGetFbInfo + + Gets FB information from RM. + + Arguments: + device[IN] - GPU device handle + fbInfo [OUT] - Pointer to FbInfo structure which contains + reservedHeapSize & heapSize + Error codes: + NV_ERROR + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceGetFbInfo(uvmGpuDeviceHandle device, + UvmGpuFbInfo * fbInfo); + +/******************************************************************************* + nvUvmInterfaceGetEccInfo + + Gets ECC information from RM. + + Arguments: + device[IN] - GPU device handle + eccInfo [OUT] - Pointer to EccInfo structure + + Error codes: + NV_ERROR + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceGetEccInfo(uvmGpuDeviceHandle device, + UvmGpuEccInfo * eccInfo); + +/******************************************************************************* + nvUvmInterfaceOwnPageFaultIntr + + This function transfers ownership of the replayable page fault interrupt, + between RM and UVM, for a particular GPU. + + bOwnInterrupts == NV_TRUE: UVM is taking ownership from the RM. This causes + the following: RM will not service, enable or disable this interrupt and it + is up to the UVM driver to handle this interrupt. In this case, replayable + page fault interrupts are disabled by this function, before it returns. + + bOwnInterrupts == NV_FALSE: UVM is returning ownership to the RM: in this + case, replayable page fault interrupts MUST BE DISABLED BEFORE CALLING this + function. + + The cases above both result in transferring ownership of a GPU that has its + replayable page fault interrupts disabled. Doing otherwise would make it + very difficult to control which driver handles any interrupts that build up + during the hand-off. + + The calling pattern should look like this: + + UVM setting up a new GPU for operation: + UVM GPU LOCK + nvUvmInterfaceOwnPageFaultIntr(..., NV_TRUE) + UVM GPU UNLOCK + + Enable replayable page faults for that GPU + + UVM tearing down a GPU: + + Disable replayable page faults for that GPU + + UVM GPU GPU LOCK + nvUvmInterfaceOwnPageFaultIntr(..., NV_FALSE) + UVM GPU UNLOCK + + Arguments: + gpuUuid[IN] - UUID of the GPU to operate on + bOwnInterrupts - Set to NV_TRUE for UVM to take ownership of the + replayable page fault interrupts. Set to NV_FALSE + to return ownership of the page fault interrupts + to RM. + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceOwnPageFaultIntr(uvmGpuDeviceHandle device, NvBool bOwnInterrupts); + +/******************************************************************************* + nvUvmInterfaceInitFaultInfo + + This function obtains fault buffer address, size and a few register mappings + for replayable faults, and creates a shadow buffer to store non-replayable + faults if the GPU supports it. + + Arguments: + device[IN] - Device handle associated with the gpu + pFaultInfo[OUT] - information provided by RM for fault handling + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device, + UvmGpuFaultInfo *pFaultInfo); + +/******************************************************************************* + nvUvmInterfaceDestroyFaultInfo + + This function obtains destroys unmaps the fault buffer and clears faultInfo + for replayable faults, and frees the shadow buffer for non-replayable faults. + + Arguments: + device[IN] - Device handle associated with the gpu + pFaultInfo[OUT] - information provided by RM for fault handling + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceDestroyFaultInfo(uvmGpuDeviceHandle device, + UvmGpuFaultInfo *pFaultInfo); + +/******************************************************************************* + nvUvmInterfaceHasPendingNonReplayableFaults + + This function tells whether there are pending non-replayable faults in the + client shadow fault buffer ready to be consumed. + + NOTES: + - This function uses a pre-allocated stack per GPU (stored in the + UvmGpuFaultInfo object) for calls related to non-replayable faults from the + top half. + - Concurrent calls to this function using the same pFaultInfo are not + thread-safe due to pre-allocated stack. Therefore, locking is the caller's + responsibility. + - This function DOES NOT acquire the RM API or GPU locks. That is because + it is called during fault servicing, which could produce deadlocks. + + Arguments: + pFaultInfo[IN] - information provided by RM for fault handling. + Contains a pointer to the shadow fault buffer + hasPendingFaults[OUT] - return value that tells if there are + non-replayable faults ready to be consumed by + the client + + Error codes: + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceHasPendingNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo, + NvBool *hasPendingFaults); + +/******************************************************************************* + nvUvmInterfaceGetNonReplayableFaults + + This function consumes all the non-replayable fault packets in the client + shadow fault buffer and copies them to the given buffer. It also returns the + number of faults that have been copied + + NOTES: + - This function uses a pre-allocated stack per GPU (stored in the + UvmGpuFaultInfo object) for calls from the bottom half that handles + non-replayable faults. + - See nvUvmInterfaceHasPendingNonReplayableFaults for the implications of + using a shared stack. + - This function DOES NOT acquire the RM API or GPU locks. That is because + it is called during fault servicing, which could produce deadlocks. + + Arguments: + pFaultInfo[IN] - information provided by RM for fault handling. + Contains a pointer to the shadow fault buffer + pFaultBuffer[OUT] - buffer provided by the client where fault buffers + are copied when they are popped out of the shadow + fault buffer (which is a circular queue). + numFaults[OUT] - return value that tells the number of faults copied + to the client's buffer + + Error codes: + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceGetNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo, + void *pFaultBuffer, + NvU32 *numFaults); + +/******************************************************************************* + nvUvmInterfaceInitAccessCntrInfo + + This function obtains access counter buffer address, size and a few register mappings + + Arguments: + device[IN] - Device handle associated with the gpu + pAccessCntrInfo[OUT] - Information provided by RM for access counter handling + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceInitAccessCntrInfo(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo); + +/******************************************************************************* + nvUvmInterfaceDestroyAccessCntrInfo + + This function obtains, destroys, unmaps the access counter buffer and clears accessCntrInfo + + Arguments: + device[IN] - Device handle associated with the gpu + pAccessCntrInfo[IN] - Information provided by RM for access counter handling + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceDestroyAccessCntrInfo(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo); + +/******************************************************************************* + nvUvmInterfaceEnableAccessCntr + + This function enables access counters using the given configuration + UVM is also taking ownership from the RM. + This causes the following: RM will not service, enable or disable this + interrupt and it is up to the UVM driver to handle this interrupt. In + this case, access counter notificaion interrupts are enabled by this + function before it returns. + + Arguments: + device[IN] - Device handle associated with the gpu + pAccessCntrInfo[IN] - Pointer to structure filled out by nvUvmInterfaceInitAccessCntrInfo + pAccessCntrConfig[IN] - Configuration for access counters + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceEnableAccessCntr(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo, + UvmGpuAccessCntrConfig *pAccessCntrConfig); + +/******************************************************************************* + nvUvmInterfaceDisableAccessCntr + + This function disables acccess counters + UVM is also returning ownership to the RM: RM can service, enable or + disable this interrupt. In this case, access counter notificaion interrupts + are disabled by this function before it returns. + + Arguments: + device[IN] - Device handle associated with the gpu + pAccessCntrInfo[IN] - Pointer to structure filled out by nvUvmInterfaceInitAccessCntrInfo + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceDisableAccessCntr(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo); + +// +// Called by the UVM driver to register operations with RM. Only one set of +// callbacks can be registered by any driver at a time. If another set of +// callbacks was already registered, NV_ERR_IN_USE is returned. +// +NV_STATUS nvUvmInterfaceRegisterUvmCallbacks(struct UvmOpsUvmEvents *importedUvmOps); + +// +// Counterpart to nvUvmInterfaceRegisterUvmCallbacks. This must only be called +// if nvUvmInterfaceRegisterUvmCallbacks returned NV_OK. +// +// Upon return, the caller is guaranteed that any outstanding callbacks are done +// and no new ones will be invoked. +// +void nvUvmInterfaceDeRegisterUvmOps(void); + +/******************************************************************************* + nvUvmInterfaceP2pObjectCreate + + This API creates an NV50_P2P object for the GPUs with the given device + handles, and returns the handle to the object. + + Arguments: + device1[IN] - first GPU device handle + device2[IN] - second GPU device handle + hP2pObject[OUT] - handle to the created P2p object. + + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_OBJECT_NOT_FOUND : If device object associated with the uuids aren't found. +*/ +NV_STATUS nvUvmInterfaceP2pObjectCreate(uvmGpuDeviceHandle device1, + uvmGpuDeviceHandle device2, + NvHandle *hP2pObject); + +/******************************************************************************* + nvUvmInterfaceP2pObjectDestroy + + This API destroys the NV50_P2P associated with the passed handle. + + Arguments: + session[IN] - Session handle. + hP2pObject[IN] - handle to an P2p object. + + Error codes: NONE +*/ +void nvUvmInterfaceP2pObjectDestroy(uvmGpuSessionHandle session, + NvHandle hP2pObject); + +/******************************************************************************* + nvUvmInterfaceGetExternalAllocPtes + + The interface builds the RM PTEs using the provided input parameters. + + Arguments: + vaSpace[IN] - vaSpace handle. + hMemory[IN] - Memory handle. + offset [IN] - Offset from the beginning of the allocation + where PTE mappings should begin. + Should be aligned with pagesize associated + with the allocation. + size [IN] - Length of the allocation for which PTEs + should be built. + Should be aligned with pagesize associated + with the allocation. + size = 0 will be interpreted as the total size + of the allocation. + gpuExternalMappingInfo[IN/OUT] - See nv_uvm_types.h for more information. + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_INVALID_OBJECT_HANDLE - Invalid memory handle is passed. + NV_ERR_NOT_SUPPORTED - Functionality is not supported (see comments in nv_gpu_ops.c) + NV_ERR_INVALID_BASE - offset is beyond the allocation size + NV_ERR_INVALID_LIMIT - (offset + size) is beyond the allocation size. + NV_ERR_BUFFER_TOO_SMALL - gpuExternalMappingInfo.pteBufferSize is insufficient to + store single PTE. + NV_ERR_NOT_READY - Returned when querying the PTEs requires a deferred setup + which has not yet completed. It is expected that the caller + will reattempt the call until a different code is returned. + + + + +*/ +NV_STATUS nvUvmInterfaceGetExternalAllocPtes(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + UvmGpuExternalMappingInfo *gpuExternalMappingInfo); + +/******************************************************************************* + nvUvmInterfaceRetainChannel + + Validates and returns information about the user's channel and its resources + (local CTX buffers + global CTX buffers). The state is refcounted and must be + released by calling nvUvmInterfaceReleaseChannel. + + Arguments: + vaSpace[IN] - vaSpace handle. + hClient[IN] - Client handle + hChannel[IN] - Channel handle + retainedChannel[OUT] - Opaque pointer to use to refer to this + channel in other nvUvmInterface APIs. + channelInstanceInfo[OUT] - Channel instance information to be filled out. + See nv_uvm_types.h for details. + + Error codes: + NV_ERR_INVALID_ARGUMENT : If the parameter/s are invalid. + NV_ERR_OBJECT_NOT_FOUND : If the object associated with the handle isn't found. + NV_ERR_INVALID_CHANNEL : If the channel verification fails. + NV_ERR_INSUFFICIENT_RESOURCES : If no memory available to store the resource information. + */ +NV_STATUS nvUvmInterfaceRetainChannel(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hClient, + NvHandle hChannel, + void **retainedChannel, + UvmGpuChannelInstanceInfo *channelInstanceInfo); + +/******************************************************************************* + nvUvmInterfaceBindChannelResources + + Associates the mapping address of the channel resources (VAs) provided by the + caller with the channel. + + Arguments: + retainedChannel[IN] - Channel pointer returned by nvUvmInterfaceRetainChannel + channelResourceBindParams[IN] - Buffer of initialized UvmGpuChannelInstanceInfo::resourceCount + entries. See nv_uvm_types.h for details. + + Error codes: + NV_ERR_INVALID_ARGUMENT : If the parameter/s are invalid. + NV_ERR_OBJECT_NOT_FOUND : If the object associated with the handle aren't found. + NV_ERR_INSUFFICIENT_RESOURCES : If no memory available to store the resource information. + */ +NV_STATUS nvUvmInterfaceBindChannelResources(void *retainedChannel, + UvmGpuChannelResourceBindParams *channelResourceBindParams); + +/******************************************************************************* + nvUvmInterfaceReleaseChannel + + Releases state retained by nvUvmInterfaceRetainChannel. + */ +void nvUvmInterfaceReleaseChannel(void *retainedChannel); + +/******************************************************************************* + nvUvmInterfaceStopChannel + + Idles the channel and takes it off the runlist. + + Arguments: + retainedChannel[IN] - Channel pointer returned by nvUvmInterfaceRetainChannel + bImmediate[IN] - If true, kill the channel without attempting to wait for it to go idle. +*/ +void nvUvmInterfaceStopChannel(void *retainedChannel, NvBool bImmediate); + +/******************************************************************************* + nvUvmInterfaceGetChannelResourcePtes + + The interface builds the RM PTEs using the provided input parameters. + + Arguments: + vaSpace[IN] - vaSpace handle. + resourceDescriptor[IN] - The channel resource descriptor returned by returned by + nvUvmInterfaceRetainChannelResources. + offset[IN] - Offset from the beginning of the allocation + where PTE mappings should begin. + Should be aligned with pagesize associated + with the allocation. + size[IN] - Length of the allocation for which PTEs + should be built. + Should be aligned with pagesize associated + with the allocation. + size = 0 will be interpreted as the total size + of the allocation. + gpuExternalMappingInfo[IN/OUT] - See nv_uvm_types.h for more information. + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_INVALID_OBJECT_HANDLE - Invalid memory handle is passed. + NV_ERR_NOT_SUPPORTED - Functionality is not supported. + NV_ERR_INVALID_BASE - offset is beyond the allocation size + NV_ERR_INVALID_LIMIT - (offset + size) is beyond the allocation size. + NV_ERR_BUFFER_TOO_SMALL - gpuExternalMappingInfo.pteBufferSize is insufficient to + store single PTE. +*/ +NV_STATUS nvUvmInterfaceGetChannelResourcePtes(uvmGpuAddressSpaceHandle vaSpace, + NvP64 resourceDescriptor, + NvU64 offset, + NvU64 size, + UvmGpuExternalMappingInfo *externalMappingInfo); + +/******************************************************************************* + nvUvmInterfaceReportNonReplayableFault + + The interface communicates a nonreplayable fault packet from UVM to RM, which + will log the fault, notify the clients and then trigger RC on the channel. + + Arguments: + device[IN] - The device where the fault happened. + pFaultPacket[IN] - The opaque pointer from UVM that will be later + converted to a MMU_FAULT_PACKET type. + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_NOT_SUPPORTED - Functionality is not supported. +*/ +NV_STATUS nvUvmInterfaceReportNonReplayableFault(uvmGpuDeviceHandle device, + const void *pFaultPacket); + +/******************************************************************************* + nvUvmInterfacePagingChannelAllocate + + In SR-IOV heavy, this function requests the allocation of a paging channel + (i.e. a privileged CE channel) bound to a specified copy engine. Unlike + channels allocated via nvUvmInterfaceChannelAllocate, the caller cannot push + methods to a paging channel directly, but instead relies on the + nvUvmInterfacePagingChannelPushStream API to do so. + + SR-IOV heavy only. The implementation of this interface can acquire + RM or GPU locks. + + Arguments: + device[IN] - device under which the paging channel will be allocated + allocParams[IN] - structure with allocation settings + channel[OUT] - pointer to the allocated paging channel handle + channelInfo[OUT] - structure filled with channel information + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_NO_MEMORY - Not enough memory to allocate + paging channel/shadow notifier. + NV_ERR_NOT_SUPPORTED - SR-IOV heavy mode is disabled. + + */ +NV_STATUS nvUvmInterfacePagingChannelAllocate(uvmGpuDeviceHandle device, + const UvmGpuPagingChannelAllocParams *allocParams, + UvmGpuPagingChannelHandle *channel, + UvmGpuPagingChannelInfo *channelInfo); + +/******************************************************************************* + nvUvmInterfacePagingChannelDestroy + + This function destroys a given paging channel. + + SR-IOV heavy only. The implementation of this interface can acquire + RM or GPU locks. + + Arguments: + channel[IN] - paging channel handle. If the passed handle is + the NULL pointer, the function returns immediately. +*/ +void nvUvmInterfacePagingChannelDestroy(UvmGpuPagingChannelHandle channel); + +/******************************************************************************* + + nvUvmInterfacePagingChannelsMap + + Map a guest allocation in the address space associated with all the paging + channels allocated under the given device. + + SR-IOV heavy only. The implementation of this interface can acquire + RM or GPU locks. + + Arguments: + srcVaSpace[IN] - VA space handle used to allocate the input pointer + srcAddress. + srcAddress[IN] - virtual address returned by nvUvmInterfaceMemoryAllocFB + or nvUvmInterfaceMemoryAllocSys. The entire allocation + backing this guest VA is mapped. + device[IN] - device under which paging channels were allocated + dstAddress[OUT] - a virtual address that is valid (i.e. is mapped) in + all the paging channels allocated under the given vaSpace. + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_NOT_SUPPORTED - SR-IOV heavy mode is disabled. +*/ +NV_STATUS nvUvmInterfacePagingChannelsMap(uvmGpuAddressSpaceHandle srcVaSpace, + UvmGpuPointer srcAddress, + uvmGpuDeviceHandle device, + NvU64 *dstAddress); + +/******************************************************************************* + + nvUvmInterfacePagingChannelsUnmap + + Unmap a VA returned by nvUvmInterfacePagingChannelsMap. + + SR-IOV heavy only. The implementation of this interface can acquire + RM or GPU locks. + + Arguments: + srcVaSpace[IN] - VA space handle that was passed to prevous mapping. + srcAddress[IN] - virtual address that was passed to prevous mapping. + device[IN] - device under which paging channels were allocated. + */ +void nvUvmInterfacePagingChannelsUnmap(uvmGpuAddressSpaceHandle srcVaSpace, + UvmGpuPointer srcAddress, + uvmGpuDeviceHandle device); + + +/******************************************************************************* + nvUvmInterfacePagingChannelPushStream + + Used for remote execution of the passed methods; the UVM driver uses this + interface to ask the vGPU plugin to execute certain HW methods on its + behalf. The callee should push the methods in the specified order i.e. is + not allowed to do any reordering. + + The API is asynchronous. The UVM driver can wait on the remote execution by + inserting a semaphore release method at the end of the method stream, and + then loop until the semaphore value reaches the completion value indicated + in the release method. + + The valid HW methods that can be passed by the UVM driver follow; the source + functions listed contain the exact formatting (encoding) of the HW method + used by the UVM driver for Ampere. + + - TLB invalidation targeting a VA range. See + uvm_hal_volta_host_tlb_invalidate_va. + + - TLB invalidation targeting certain levels in the page tree (including + the possibility of invalidating everything). + See uvm_hal_pascal_host_tlb_invalidate_all. + + - Replayable fault replay. See uvm_hal_volta_replay_faults. + + - Replayable fault cancellation targeting a guest virtual address. See + uvm_hal_volta_cancel_faults_va + + - Membar, scoped to device or to the entire system. See + uvm_hal_pascal_host_membar_gpu and uvm_hal_pascal_host_membar_sys + + - Host semaphore acquire, see uvm_hal_turing_host_semaphore_acquire. The + virtual address specified in the semaphore operation must lie within a + buffer previously mapped by nvUvmInterfacePagingChannelsMap. + + - CE semaphore release, see uvm_hal_pascal_ce_semaphore_release. The + virtual address specified in the semaphore operation must lie within a + buffer previously mapped by nvUvmInterfacePagingChannelsMap. + + - 64 bits-wide memset, see uvm_hal_kepler_ce_memset_8. The destination + address is a physical address in vidmem. + + - No-op, see uvm_hal_kepler_host_noop. Used to store the source buffer + of a memcopy method within the input stream itself. + + - Memcopy, see uvm_hal_kepler_ce_memcopy. The destination address is a + physical address in vidmem. The source address is an offset within + methodStream, in bytes, indicating the location of the (inlined) source + buffer. The copy size does not exceed 4KB. + + - CE semaphore release with timestamp, see + uvm_hal_kepler_ce_semaphore_timestamp. The virtual address specified in + the semaphore operation must lie within a buffer previously mapped by + nvUvmInterfacePagingChannelsMap. + + - CE semaphore reduction, see uvm_hal_kepler_ce_semaphore_reduction_inc. + The virtual address specified in the semaphore operation must lie within + a buffer previously mapped by nvUvmInterfacePagingChannelsMap. + + Only invoked in SR-IOV heavy mode. + + NOTES: + - This function uses a pre-allocated stack per paging channel + (stored in the UvmGpuPagingChannel object) + - This function DOES NOT acquire the RM API or GPU locks. That is because + it is called during fault servicing, which could produce deadlocks. + - Concurrent calls to this function using channels under same device are not + allowed due to: + a. pre-allocated stack + b. the fact that internal RPC infrastructure doesn't acquire GPU lock. + Therefore, locking is the caller's responsibility. + - This function DOES NOT sleep (does not allocate memory or acquire locks) + so it can be invoked while holding a spinlock. + + Arguments: + channel[IN] - paging channel handle obtained via + nvUvmInterfacePagingChannelAllocate + + methodStream[IN] - HW methods to be pushed to the paging channel. + + methodStreamSize[IN] - Size of methodStream, in bytes. The maximum push + size is 128KB. + + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_NOT_SUPPORTED - SR-IOV heavy mode is disabled. +*/ +NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channel, + char *methodStream, + NvU32 methodStreamSize); + + +/******************************************************************************* + nvUvmInterfaceInitCslContext + + Allocates and initializes a CSL context for a given secure channel. + + The lifetime of the context is the same as the lifetime of the secure channel + it is paired with. + + Arguments: + ctx[OUT] - The CSL context. + channel[IN] - Handle to a secure channel. + + Error codes: + NV_ERR_INVALID_STATE - The system is not operating in Confidential Compute mode. + NV_ERR_INVALID_CHANNEL - The associated channel is not a secure channel. + NV_ERR_IN_USE - The context has already been initialized. +*/ +NV_STATUS nvUvmInterfaceInitCslContext(UvmCslContext **ctx, + uvmGpuChannelHandle channel); + +/******************************************************************************* + nvUvmInterfaceDeinitCslContext + + Securely deinitializes and clears the contents of a context. + + If context is already deinitialized then function returns immediately. + + Arguments: + ctx[IN] - The CSL context. +*/ +void nvUvmInterfaceDeinitCslContext(UvmCslContext *ctx); + +/******************************************************************************* + nvUvmInterfaceLogDeviceEncryption + + Logs and checks information about device encryption. + + This function DOES NOT acquire the RM API or GPU locks. + + Arguments: + ctx[IN] - The CSL context. + decryptIv[OUT] - Parameter that is stored before a successful device encryption. + It is used as an input to nvUvmInterfaceCslDecrypt. + + This function DOES NOT acquire the RM API or GPU locks. + nvUvmInterfaceLogDeviceEncryption, nvUvmInterfaceCslEncrypt, and + nvUvmInterfaceCslDecrypt must not be called concurrently with the same + UvmCslContext parameter in different threads. The caller must guarantee this + exclusion. + + Error codes: + NV_ERR_INSUFFICIENT_RESOURCES - The device encryption would cause a counter + overflow to occur. +*/ +NV_STATUS nvUvmInterfaceLogDeviceEncryption(UvmCslContext *ctx, + UvmCslIv *decryptIv); + +/******************************************************************************* + nvUvmInterfaceCslEncrypt + + Encrypts data and produces an authentication tag. + + Auth, input and output buffers must not overlap; if they do then calling + this function produces undefined behavior. Performance is typically + maximized when the input and output buffers are 16-byte aligned. This is + natural alignment for AES block. + + This function DOES NOT acquire the RM API or GPU locks. + nvUvmInterfaceLogDeviceEncryption, nvUvmInterfaceCslEncrypt, and + nvUvmInterfaceCslDecrypt must not be called concurrently with the same + UvmCslContext parameter in different threads. The caller must guarantee this + exclusion. + + Arguments: + ctx[IN] - The CSL context. + bufferSize[IN] - Size of the input and output buffers in units of bytes. + Value can range from 1 byte to (2^32) - 1 bytes. + inputBuffer[IN] - Address of plaintext input buffer. + outputBuffer[OUT] - Address of ciphertext output buffer. + authTagBuffer[OUT] - Address of authentication tag buffer. + + Error codes: + NV_ERR_INSUFFICIENT_RESOURCES - The encryption operation would cause a counter + to overflow. + NV_ERR_INVALID_ARGUMENT - The size of the data is 0 bytes. +*/ +NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *ctx, + NvU32 bufferSize, + NvU8 const *inputBuffer, + NvU8 *outputBuffer, + NvU8 *authTagBuffer); + +/******************************************************************************* + nvUvmInterfaceCslDecrypt + + Verifies the authentication tag and decrypts data. + + Auth, input and output buffers must not overlap; if they do then calling + this function produces undefined behavior. Performance is typically + maximized when the input and output buffers are 16-byte aligned. This is + natural alignment for AES block. + + This function DOES NOT acquire the RM API or GPU locks. + nvUvmInterfaceLogDeviceEncryption, nvUvmInterfaceCslEncrypt, and + nvUvmInterfaceCslDecrypt must not be called concurrently with the same + UvmCslContext parameter in different threads. The caller must guarantee this + exclusion. + + Arguments: + ctx[IN] - The CSL context. + bufferSize[IN] - Size of the input and output buffers in units of bytes. + Value can range from 1 byte to (2^32) - 1 bytes. + decryptIv[IN] - Parameter given by nvUvmInterfaceLogDeviceEncryption. + inputBuffer[IN] - Address of ciphertext input buffer. + outputBuffer[OUT] - Address of plaintext output buffer. + authTagBuffer[IN] - Address of authentication tag buffer. + + Error codes: + NV_ERR_INSUFFICIENT_RESOURCES - The decryption operation would cause a counter + overflow to occur. + NV_ERR_INVALID_ARGUMENT - The size of the data is 0 bytes. + NV_ERR_INVALID_DATA - Verification of the authentication tag fails. +*/ +NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *ctx, + NvU32 bufferSize, + NvU8 const *inputBuffer, + UvmCslIv const *decryptIv, + NvU8 *outputBuffer, + NvU8 const *authTagBuffer); + + +#endif // _NV_UVM_INTERFACE_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_types.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_types.h new file mode 100644 index 0000000..e9171ff --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_types.h @@ -0,0 +1,964 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// This file provides common types for both UVM driver and RM's UVM interface. +// + +#ifndef _NV_UVM_TYPES_H_ +#define _NV_UVM_TYPES_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvgputypes.h" +#include "nvCpuUuid.h" + + +// +// Default Page Size if left "0" because in RM BIG page size is default & there +// are multiple BIG page sizes in RM. These defines are used as flags to "0" +// should be OK when user is not sure which pagesize allocation it wants +// +#define UVM_PAGE_SIZE_DEFAULT 0x0 +#define UVM_PAGE_SIZE_4K 0x1000 +#define UVM_PAGE_SIZE_64K 0x10000 +#define UVM_PAGE_SIZE_128K 0x20000 +#define UVM_PAGE_SIZE_2M 0x200000 +#define UVM_PAGE_SIZE_512M 0x20000000 + +// +// When modifying flags, make sure they are compatible with the mirrored +// PMA_* flags in phys_mem_allocator.h. +// +// Input flags +#define UVM_PMA_ALLOCATE_DONT_EVICT NVBIT(0) +#define UVM_PMA_ALLOCATE_PINNED NVBIT(1) +#define UVM_PMA_ALLOCATE_SPECIFY_MINIMUM_SPEED NVBIT(2) +#define UVM_PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE NVBIT(3) +#define UVM_PMA_ALLOCATE_SPECIFY_REGION_ID NVBIT(4) +#define UVM_PMA_ALLOCATE_PREFER_SLOWEST NVBIT(5) +#define UVM_PMA_ALLOCATE_CONTIGUOUS NVBIT(6) +#define UVM_PMA_ALLOCATE_PERSISTENT NVBIT(7) +#define UVM_PMA_ALLOCATE_PROTECTED_REGION NVBIT(8) +#define UVM_PMA_ALLOCATE_FORCE_ALIGNMENT NVBIT(9) +#define UVM_PMA_ALLOCATE_NO_ZERO NVBIT(10) +#define UVM_PMA_ALLOCATE_TURN_BLACKLIST_OFF NVBIT(11) +#define UVM_PMA_ALLOCATE_ALLOW_PARTIAL NVBIT(12) + +// Output flags +#define UVM_PMA_ALLOCATE_RESULT_IS_ZERO NVBIT(0) + +// Input flags to pmaFree +#define UVM_PMA_FREE_IS_ZERO NVBIT(0) + +// +// Indicate that the PMA operation is being done from one of the PMA eviction +// callbacks. +// +// Notably this flag is currently used only by the UVM/RM interface and not +// mirrored in PMA. +// +#define UVM_PMA_CALLED_FROM_PMA_EVICTION 16384 + +#define UVM_UUID_LEN 16 +#define UVM_SW_OBJ_SUBCHANNEL 5 + +typedef unsigned long long UvmGpuPointer; + +// +// The following typedefs serve to explain the resources they point to. +// The actual resources remain RM internal and not exposed. +// +typedef struct uvmGpuSession_tag *uvmGpuSessionHandle; // gpuSessionHandle +typedef struct uvmGpuDevice_tag *uvmGpuDeviceHandle; // gpuDeviceHandle +typedef struct uvmGpuAddressSpace_tag *uvmGpuAddressSpaceHandle; // gpuAddressSpaceHandle +typedef struct uvmGpuChannel_tag *uvmGpuChannelHandle; // gpuChannelHandle +typedef struct uvmGpuCopyEngine_tag *uvmGpuCopyEngineHandle; // gpuObjectHandle + +typedef struct UvmGpuMemoryInfo_tag +{ + // Out: Memory layout. + NvU32 kind; + + // Out: Set to TRUE, if the allocation is in sysmem. + NvBool sysmem; + + // Out: Set to TRUE, if the allocation is a constructed + // under a Device or Subdevice. + // All permutations of sysmem and deviceDescendant are valid. + // !sysmem && !deviceDescendant implies a fabric allocation. + NvBool deviceDescendant; + + // Out: Page size associated with the phys alloc. + NvU32 pageSize; + + // Out: Set to TRUE, if the allocation is contiguous. + NvBool contig; + + // Out: Starting Addr if the allocation is contiguous. + // This is only valid if contig is NV_TRUE. + NvU64 physAddr; + + // Out: Total size of the allocation. + NvU64 size; + + // Out: Uuid of the GPU to which the allocation belongs. + // This is only valid if deviceDescendant is NV_TRUE. + // Note: If the allocation is owned by a device in + // an SLI group and the allocation is broadcast + // across the SLI group, this UUID will be any one + // of the subdevices in the SLI group. + NvProcessorUuid uuid; +} UvmGpuMemoryInfo; + +// Some resources must share the same virtual mappings across channels. A mapped +// resource must be shared by a channel iff: +// +// 1) The channel belongs to a TSG (UvmGpuChannelInstanceInfo::bTsgChannel is +// NV_TRUE). +// +// 2) The channel is in the same TSG as all other channels sharing that mapping +// (UvmGpuChannelInstanceInfo::tsgId matches among channels). +// +// 3) The channel is in the same GPU address space as the other channels +// sharing that mapping. +// +// 4) The resource handle(s) match those of the shared mapping +// (UvmGpuChannelResourceInfo::resourceDescriptor and +// UvmGpuChannelResourceInfo::resourceId). +typedef struct UvmGpuChannelResourceInfo_tag +{ + // Out: Ptr to the RM memDesc of the channel resource. + NvP64 resourceDescriptor; + + // Out: RM ID of the channel resource. + NvU32 resourceId; + + // Out: Alignment needed for the resource allocation. + NvU64 alignment; + + // Out: Info about the resource allocation. + UvmGpuMemoryInfo resourceInfo; +} UvmGpuChannelResourceInfo; + +typedef struct UvmGpuPagingChannelInfo_tag +{ + // Pointer to a shadown buffer mirroring the contents of the error notifier + // for the paging channel + NvNotification *shadowErrorNotifier; +} UvmGpuPagingChannelInfo; + +typedef enum +{ + UVM_GPU_CHANNEL_ENGINE_TYPE_GR = 1, + UVM_GPU_CHANNEL_ENGINE_TYPE_CE = 2, + UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2 = 3, +} UVM_GPU_CHANNEL_ENGINE_TYPE; + +#define UVM_GPU_CHANNEL_MAX_RESOURCES 13 + +typedef struct UvmGpuChannelInstanceInfo_tag +{ + // Out: Starting address of the channel instance. + NvU64 base; + + // Out: Set to NV_TRUE, if the instance is in sysmem. + // Set to NV_FALSE, if the instance is in vidmem. + NvBool sysmem; + + // Out: Hardware runlist ID. + NvU32 runlistId; + + // Out: Hardware channel ID. + NvU32 chId; + + // Out: NV_TRUE if the channel belongs to a subcontext or NV_FALSE if it + // belongs to a regular context. + NvBool bInSubctx; + + // Out: ID of the subcontext to which the channel belongs. + NvU32 subctxId; + + // Out: Whether the channel belongs to a TSG or not + NvBool bTsgChannel; + + // Out: ID of the TSG to which the channel belongs + NvU32 tsgId; + + // Out: Maximum number of subcontexts in the TSG to which the channel belongs + NvU32 tsgMaxSubctxCount; + + // Out: Info of channel resources associated with the channel. + UvmGpuChannelResourceInfo resourceInfo[UVM_GPU_CHANNEL_MAX_RESOURCES]; + + // Out: Number of valid entries in resourceInfo array. + NvU32 resourceCount; + + // Out: Type of the engine the channel is bound to + NvU32 channelEngineType; + + // Out: Channel handle to be used in the CLEAR_FAULTED method + NvU32 clearFaultedToken; + + // Out: Address of the NV_CHRAM_CHANNEL register required to clear the + // ENG_FAULTED/PBDMA_FAULTED bits after servicing non-replayable faults on + // Ampere+ GPUs + volatile NvU32 *pChramChannelRegister; + + // Out: Address of the Runlist PRI Base Register required to ring the + // doorbell after clearing the faulted bit. + volatile NvU32 *pRunlistPRIBaseRegister; + + // Out: SMC engine id to which the GR channel is bound, or zero if the GPU + // does not support SMC or it is a CE channel + NvU32 smcEngineId; + + // Out: Start of the VEID range assigned to the SMC engine the GR channel + // is bound to, or zero if the GPU does not support SMC or it is a CE + // channel + NvU32 smcEngineVeIdOffset; +} UvmGpuChannelInstanceInfo; + +typedef struct UvmGpuChannelResourceBindParams_tag +{ + // In: RM ID of the channel resource. + NvU32 resourceId; + + // In: Starting VA at which the channel resource is mapped. + NvU64 resourceVa; +} UvmGpuChannelResourceBindParams; + +typedef struct UvmGpuChannelInfo_tag +{ + volatile unsigned *gpGet; + volatile unsigned *gpPut; + UvmGpuPointer *gpFifoEntries; + unsigned numGpFifoEntries; + unsigned channelClassNum; + + // The errorNotifier is filled out when the channel hits an RC error. + NvNotification *errorNotifier; + + NvU32 hwRunlistId; + NvU32 hwChannelId; + + volatile unsigned *dummyBar1Mapping; + + // These values are filled by nvUvmInterfaceCopyEngineAlloc. The work + // submission token requires the channel to be bound to a runlist and that + // happens after CE allocation. + volatile NvU32 *workSubmissionOffset; + + // To be deprecated. See pWorkSubmissionToken below. + NvU32 workSubmissionToken; + + // + // This is the memory location where the most recently updated work + // submission token for this channel will be written to. After submitting + // new work and updating GP_PUT with the appropriate fence, the token must + // be read from this location before writing it to the workSubmissionOffset + // to kick off the new work. + // + volatile NvU32 *pWorkSubmissionToken; +} UvmGpuChannelInfo; + +typedef enum +{ + // This value must be passed by Pascal and pre-Pascal GPUs for those + // allocations for which a specific location cannot be enforced. + UVM_BUFFER_LOCATION_DEFAULT = 0, + + UVM_BUFFER_LOCATION_SYS = 1, + UVM_BUFFER_LOCATION_VID = 2, +} UVM_BUFFER_LOCATION; + +typedef struct UvmGpuChannelAllocParams_tag +{ + NvU32 numGpFifoEntries; + + // The next two fields store UVM_BUFFER_LOCATION values + NvU32 gpFifoLoc; + NvU32 gpPutLoc; + + // Index of the engine the channel will be bound to + // ignored if engineType is anything other than UVM_GPU_CHANNEL_ENGINE_TYPE_CE + NvU32 engineIndex; + + // interpreted as UVM_GPU_CHANNEL_ENGINE_TYPE + NvU32 engineType; +} UvmGpuChannelAllocParams; + +typedef struct UvmGpuPagingChannelAllocParams_tag +{ + // Index of the LCE engine the channel will be bound to, a zero-based offset + // from NV2080_ENGINE_TYPE_COPY0. + NvU32 engineIndex; +} UvmGpuPagingChannelAllocParams; + +// The max number of Copy Engines supported by a GPU. +// The gpu ops build has a static assert that this is the correct number. +#define UVM_COPY_ENGINE_COUNT_MAX 10 + +typedef struct +{ + // True if the CE is supported at all + NvBool supported:1; + + // True if the CE is synchronous with GR + NvBool grce:1; + + // True if the CE shares physical CEs with any other CE + // + // The value returned by RM for this field may change when a GPU is + // registered with RM for the first time, so UVM needs to query it + // again each time a GPU is registered. + NvBool shared:1; + + // True if the CE can give enhanced performance for SYSMEM reads over other CEs + NvBool sysmemRead:1; + + // True if the CE can give enhanced performance for SYSMEM writes over other CEs + NvBool sysmemWrite:1; + + // True if the CE can be used for SYSMEM transactions + NvBool sysmem:1; + + // True if the CE can be used for P2P transactions using NVLINK + NvBool nvlinkP2p:1; + + // True if the CE can be used for P2P transactions + NvBool p2p:1; + + // Mask of physical CEs assigned to this LCE + // + // The value returned by RM for this field may change when a GPU is + // registered with RM for the first time, so UVM needs to query it + // again each time a GPU is registered. + NvU32 cePceMask; +} UvmGpuCopyEngineCaps; + +typedef struct UvmGpuCopyEnginesCaps_tag +{ + // Supported CEs may not be contiguous + UvmGpuCopyEngineCaps copyEngineCaps[UVM_COPY_ENGINE_COUNT_MAX]; +} UvmGpuCopyEnginesCaps; + +typedef enum +{ + UVM_LINK_TYPE_NONE, + UVM_LINK_TYPE_PCIE, + UVM_LINK_TYPE_NVLINK_1, + UVM_LINK_TYPE_NVLINK_2, + UVM_LINK_TYPE_NVLINK_3, + + + + +} UVM_LINK_TYPE; + +typedef struct UvmGpuCaps_tag +{ + NvU32 sysmemLink; // UVM_LINK_TYPE + NvU32 sysmemLinkRateMBps; // See UvmGpuP2PCapsParams::totalLinkLineRateMBps + NvBool numaEnabled; + NvU32 numaNodeId; + + // On ATS systems, GPUs connected to different CPU sockets can have peer + // traffic. They are called indirect peers. However, indirect peers are + // mapped using sysmem aperture. In order to disambiguate the location of a + // specific memory address, each GPU maps its memory to a different window + // in the System Physical Address (SPA) space. The following fields contain + // the base + size of such window for the GPU. systemMemoryWindowSize + // different than 0 indicates that the window is valid. + // + // - If the window is valid, then we can map GPU memory to the CPU as + // cache-coherent by adding the GPU address to the window start. + // - If numaEnabled is NV_TRUE, then we can also convert the system + // addresses of allocated GPU memory to struct pages. + // + // TODO: Bug 1986868: fix window start computation for SIMICS + NvU64 systemMemoryWindowStart; + NvU64 systemMemoryWindowSize; + + // This tells if the GPU is connected to NVSwitch. On systems with NVSwitch + // all GPUs are connected to it. If connectedToSwitch is NV_TRUE, + // nvswitchMemoryWindowStart tells the base address for the GPU in the + // NVSwitch address space. It is used when creating PTEs of memory mappings + // to NVSwitch peers. + NvBool connectedToSwitch; + NvU64 nvswitchMemoryWindowStart; +} UvmGpuCaps; + +typedef struct UvmGpuAddressSpaceInfo_tag +{ + NvU32 bigPageSize; + + NvBool atsEnabled; + + // Mapped registers that contain the current GPU time + volatile NvU32 *time0Offset; + volatile NvU32 *time1Offset; + + // Maximum number of subcontexts supported under this GPU address space + NvU32 maxSubctxCount; + + NvBool smcEnabled; + + NvU32 smcSwizzId; + + NvU32 smcGpcCount; +} UvmGpuAddressSpaceInfo; + +typedef struct UvmGpuAllocInfo_tag +{ + NvU64 gpuPhysOffset; // Returns gpuPhysOffset if contiguous requested + NvU32 pageSize; // default is RM big page size - 64K or 128 K" else use 4K or 2M + NvU64 alignment; // Virtual alignment + NvBool bContiguousPhysAlloc; // Flag to request contiguous physical allocation + NvBool bMemGrowsDown; // Causes RM to reserve physical heap from top of FB + NvBool bPersistentVidmem; // Causes RM to allocate persistent video memory + NvHandle hPhysHandle; // Handle for phys allocation either provided or retrieved + + NvBool bUnprotected; // Allocation to be made in unprotected memory whenever + // SEV or GPU CC modes are enabled. Ignored otherwise + +} UvmGpuAllocInfo; + +typedef enum +{ + UVM_VIRT_MODE_NONE = 0, // Baremetal or passthrough virtualization + UVM_VIRT_MODE_LEGACY = 1, // Virtualization without SRIOV support + UVM_VIRT_MODE_SRIOV_HEAVY = 2, // Virtualization with SRIOV Heavy configured + UVM_VIRT_MODE_SRIOV_STANDARD = 3, // Virtualization with SRIOV Standard configured + UVM_VIRT_MODE_COUNT = 4, +} UVM_VIRT_MODE; + +// !!! The following enums (with UvmRm prefix) are defined and documented in +// mm/uvm/interface/uvm_types.h and must be mirrored. Please refer to that file +// for more details. + +// UVM GPU mapping types +typedef enum +{ + UvmRmGpuMappingTypeDefault = 0, + UvmRmGpuMappingTypeReadWriteAtomic = 1, + UvmRmGpuMappingTypeReadWrite = 2, + UvmRmGpuMappingTypeReadOnly = 3, + UvmRmGpuMappingTypeCount = 4 +} UvmRmGpuMappingType; + +// UVM GPU caching types +typedef enum +{ + UvmRmGpuCachingTypeDefault = 0, + UvmRmGpuCachingTypeForceUncached = 1, + UvmRmGpuCachingTypeForceCached = 2, + UvmRmGpuCachingTypeCount = 3 +} UvmRmGpuCachingType; + +// UVM GPU format types +typedef enum { + UvmRmGpuFormatTypeDefault = 0, + UvmRmGpuFormatTypeBlockLinear = 1, + UvmRmGpuFormatTypeCount = 2 +} UvmRmGpuFormatType; + +// UVM GPU Element bits types +typedef enum { + UvmRmGpuFormatElementBitsDefault = 0, + UvmRmGpuFormatElementBits8 = 1, + UvmRmGpuFormatElementBits16 = 2, + // Cuda does not support 24-bit width + UvmRmGpuFormatElementBits32 = 4, + UvmRmGpuFormatElementBits64 = 5, + UvmRmGpuFormatElementBits128 = 6, + UvmRmGpuFormatElementBitsCount = 7 +} UvmRmGpuFormatElementBits; + +// UVM GPU Compression types +typedef enum { + UvmRmGpuCompressionTypeDefault = 0, + UvmRmGpuCompressionTypeEnabledNoPlc = 1, + UvmRmGpuCompressionTypeCount = 2 +} UvmRmGpuCompressionType; + +typedef struct UvmGpuExternalMappingInfo_tag +{ + // In: GPU caching ability. + UvmRmGpuCachingType cachingType; + + // In: Virtual permissions. + UvmRmGpuMappingType mappingType; + + // In: RM virtual mapping memory format + UvmRmGpuFormatType formatType; + + // In: RM virtual mapping element bits + UvmRmGpuFormatElementBits elementBits; + + // In: RM virtual compression type + UvmRmGpuCompressionType compressionType; + + // In: Size of the buffer to store PTEs (in bytes). + NvU64 pteBufferSize; + + // In: Pointer to a buffer to store PTEs. + // Out: The interface will fill the buffer with PTEs + NvU64 *pteBuffer; + + // Out: Number of PTEs filled in to the buffer. + NvU64 numWrittenPtes; + + // Out: Number of PTEs remaining to be filled + // if the buffer is not sufficient to accommodate + // requested PTEs. + NvU64 numRemainingPtes; + + // Out: PTE size (in bytes) + NvU32 pteSize; +} UvmGpuExternalMappingInfo; + +typedef struct UvmGpuP2PCapsParams_tag +{ + // Out: peerId[i] contains gpu[i]'s peer id of gpu[1 - i]. Only defined if + // the GPUs are direct peers. + NvU32 peerIds[2]; + + // Out: UVM_LINK_TYPE + NvU32 p2pLink; + + // Out: optimalNvlinkWriteCEs[i] contains gpu[i]'s optimal CE for writing to + // gpu[1 - i]. The CE indexes are valid only if the GPUs are NVLink peers. + // + // The value returned by RM for this field may change when a GPU is + // registered with RM for the first time, so UVM needs to query it again + // each time a GPU is registered. + NvU32 optimalNvlinkWriteCEs[2]; + + // Out: Maximum unidirectional bandwidth between the peers in megabytes per + // second, not taking into account the protocols overhead. The reported + // bandwidth for indirect peers is zero. + NvU32 totalLinkLineRateMBps; + + // Out: True if the peers have a indirect link to communicate. On P9 + // systems, this is true if peers are connected to different NPUs that + // forward the requests between them. + NvU32 indirectAccess : 1; +} UvmGpuP2PCapsParams; + +// Platform-wide information +typedef struct UvmPlatformInfo_tag +{ + // Out: ATS (Address Translation Services) is supported + NvBool atsSupported; + + // Out: AMD SEV (Secure Encrypted Virtualization) is enabled + NvBool sevEnabled; +} UvmPlatformInfo; + +typedef struct UvmGpuClientInfo_tag +{ + NvHandle hClient; + + NvHandle hSmcPartRef; +} UvmGpuClientInfo; + + +typedef enum +{ + UVM_GPU_CONF_COMPUTE_MODE_NONE, + UVM_GPU_CONF_COMPUTE_MODE_APM, + + UVM_GPU_CONF_COMPUTE_MODE_HCC, + + UVM_GPU_CONF_COMPUTE_MODE_COUNT +} UvmGpuConfComputeMode; + +typedef struct UvmGpuConfComputeCaps_tag +{ + // Out: GPU's confidential compute mode + UvmGpuConfComputeMode mode; +} UvmGpuConfComputeCaps; + + +#define UVM_GPU_NAME_LENGTH 0x40 + +typedef struct UvmGpuInfo_tag +{ + // Printable gpu name + char name[UVM_GPU_NAME_LENGTH]; + + // Uuid of this gpu + NvProcessorUuid uuid; + + // Gpu architecture; NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_* + NvU32 gpuArch; + + // Gpu implementation; NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_* + NvU32 gpuImplementation; + + // Host (gpfifo) class; *_CHANNEL_GPFIFO_*, e.g. KEPLER_CHANNEL_GPFIFO_A + NvU32 hostClass; + + // Copy engine (dma) class; *_DMA_COPY_*, e.g. KEPLER_DMA_COPY_A + NvU32 ceClass; + + // Compute class; *_COMPUTE_*, e.g. KEPLER_COMPUTE_A + NvU32 computeClass; + + // Set if GPU supports TCC Mode & is in TCC mode. + NvBool gpuInTcc; + + // Number of subdevices in SLI group. + NvU32 subdeviceCount; + + // Virtualization mode of this gpu. + NvU32 virtMode; // UVM_VIRT_MODE + + // NV_TRUE if this is a simulated/emulated GPU. NV_FALSE, otherwise. + NvBool isSimulated; + + // Number of GPCs + // If SMC is enabled, this is the currently configured number of GPCs for + // the given partition (also see the smcSwizzId field below). + NvU32 gpcCount; + + // Maximum number of GPCs; NV_SCAL_LITTER_NUM_GPCS + // This number is independent of the partition configuration, and can be + // used to conservatively size GPU-global constructs. + NvU32 maxGpcCount; + + // Number of TPCs + NvU32 tpcCount; + + // Maximum number of TPCs per GPC + NvU32 maxTpcPerGpcCount; + + // NV_TRUE if SMC is enabled on this GPU. + NvBool smcEnabled; + + // SMC partition ID (unique per GPU); note: valid when first looked up in + // nvUvmInterfaceGetGpuInfo(), but not guaranteed to remain valid. + // nvUvmInterfaceDeviceCreate() re-verifies the swizzId and fails if it is + // no longer valid. + NvU32 smcSwizzId; + + UvmGpuClientInfo smcUserClientInfo; + + + // Confidential Compute capabilities of this GPU + UvmGpuConfComputeCaps gpuConfComputeCaps; + +} UvmGpuInfo; + +typedef struct UvmGpuFbInfo_tag +{ + // Max physical address that can be allocated by UVM. This excludes internal + // RM regions that are not registered with PMA either. + NvU64 maxAllocatableAddress; + + NvU32 heapSize; // RAM in KB available for user allocations + NvU32 reservedHeapSize; // RAM in KB reserved for internal RM allocation + NvBool bZeroFb; // Zero FB mode enabled. +} UvmGpuFbInfo; + +typedef struct UvmGpuEccInfo_tag +{ + unsigned eccMask; + unsigned eccOffset; + void *eccReadLocation; + NvBool *eccErrorNotifier; + NvBool bEccEnabled; +} UvmGpuEccInfo; + +typedef struct UvmPmaAllocationOptions_tag +{ + NvU32 flags; + NvU32 minimumSpeed; // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_MININUM_SPEED + NvU64 physBegin, physEnd; // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE + NvU32 regionId; // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_REGION_ID + NvU64 alignment; // valid if flags & UVM_PMA_ALLOCATE_FORCE_ALIGNMENT + NvLength numPagesAllocated; // valid if flags & UVM_PMA_ALLOCATE_ALLOW_PARTIAL + + NvU32 resultFlags; // valid if the allocation function returns NV_OK +} UvmPmaAllocationOptions; + +// +// Mirrored in PMA (PMA_STATS) +// +typedef struct UvmPmaStatistics_tag +{ + volatile NvU64 numPages2m; // PMA-wide 2MB pages count across all regions + volatile NvU64 numFreePages64k; // PMA-wide free 64KB page count across all regions + volatile NvU64 numFreePages2m; // PMA-wide free 2MB pages count across all regions + + volatile NvU64 numPages2mProtected; // PMA-wide 2MB pages count in protected memory + volatile NvU64 numFreePages64kProtected; // PMA-wide free 64KB page count in protected memory + volatile NvU64 numFreePages2mProtected; // PMA-wide free 2MB pages count in protected memory + +} UvmPmaStatistics; + +/******************************************************************************* + uvmEventSuspend + This function will be called by the GPU driver to signal to UVM that the + system is about to enter a sleep state. When it is called, the + following assumptions/guarantees are valid/made: + + * User channels have been preempted and disabled + * UVM channels are still running normally and will continue to do + so until after this function returns control + * User threads are still running, but can no longer issue system + system calls to the GPU driver + * Until exit from this function, UVM is allowed to make full use of + the GPUs under its control, as well as of the GPU driver + + Upon return from this function, UVM may not access GPUs under its control + until the GPU driver calls uvmEventResume(). It may still receive + calls to uvmEventIsrTopHalf() during this time, which it should return + NV_ERR_NO_INTR_PENDING from. It will not receive any other calls. +*/ +typedef NV_STATUS (*uvmEventSuspend_t) (void); + +/******************************************************************************* + uvmEventResume + This function will be called by the GPU driver to signal to UVM that the + system has exited a previously entered sleep state. When it is called, + the following assumptions/guarantees are valid/made: + + * UVM is again allowed to make full use of the GPUs under its + control, as well as of the GPU driver + * UVM channels are running normally + * User channels are still preempted and disabled + * User threads are again running, but still cannot issue system + calls to the GPU driver, nor submit new work + + Upon return from this function, UVM is expected to be fully functional. +*/ +typedef NV_STATUS (*uvmEventResume_t) (void); + +/******************************************************************************* + uvmEventStartDevice + This function will be called by the GPU driver once it has finished its + initialization to tell the UVM driver that this GPU has come up. +*/ +typedef NV_STATUS (*uvmEventStartDevice_t) (const NvProcessorUuid *pGpuUuidStruct); + +/******************************************************************************* + uvmEventStopDevice + This function will be called by the GPU driver to let UVM know that a GPU + is going down. +*/ +typedef NV_STATUS (*uvmEventStopDevice_t) (const NvProcessorUuid *pGpuUuidStruct); + +#if defined (_WIN32) +/******************************************************************************* + uvmEventWddmResetDuringTimeout + This function will be called by KMD in a TDR servicing path to unmap channel + resources and to destroy channels. This is a Windows specific event. +*/ +typedef NV_STATUS (*uvmEventWddmResetDuringTimeout_t) (const NvProcessorUuid *pGpuUuidStruct); + +/******************************************************************************* + uvmEventWddmRestartAfterTimeout + This function will be called by KMD in a TDR servicing path to map channel + resources and to create channels. This is a Windows specific event. +*/ +typedef NV_STATUS (*uvmEventWddmRestartAfterTimeout_t) (const NvProcessorUuid *pGpuUuidStruct); + +/******************************************************************************* + uvmEventServiceInterrupt + This function gets called from RM's intr service routine when an interrupt + to service a page fault is triggered. +*/ +typedef NV_STATUS (*uvmEventServiceInterrupt_t) (void *pDeviceObject, + NvU32 deviceId, NvU32 subdeviceId); +#endif + +/******************************************************************************* + uvmEventIsrTopHalf_t + This function will be called by the GPU driver to let UVM know + that an interrupt has occurred. + + Returns: + NV_OK if the UVM driver handled the interrupt + NV_ERR_NO_INTR_PENDING if the interrupt is not for the UVM driver +*/ +#if defined (__linux__) +typedef NV_STATUS (*uvmEventIsrTopHalf_t) (const NvProcessorUuid *pGpuUuidStruct); +#else +typedef void (*uvmEventIsrTopHalf_t) (void); +#endif + +struct UvmOpsUvmEvents +{ + uvmEventSuspend_t suspend; + uvmEventResume_t resume; + uvmEventStartDevice_t startDevice; + uvmEventStopDevice_t stopDevice; + uvmEventIsrTopHalf_t isrTopHalf; +#if defined (_WIN32) + uvmEventWddmResetDuringTimeout_t wddmResetDuringTimeout; + uvmEventWddmRestartAfterTimeout_t wddmRestartAfterTimeout; + uvmEventServiceInterrupt_t serviceInterrupt; +#endif +}; + +typedef struct UvmGpuFaultInfo_tag +{ + struct + { + // Register mappings obtained from RM + volatile NvU32* pFaultBufferGet; + volatile NvU32* pFaultBufferPut; + // Note: this variable is deprecated since buffer overflow is not a separate + // register from future chips. + volatile NvU32* pFaultBufferInfo; + volatile NvU32* pPmcIntr; + volatile NvU32* pPmcIntrEnSet; + volatile NvU32* pPmcIntrEnClear; + volatile NvU32* pPrefetchCtrl; + NvU32 replayableFaultMask; + // fault buffer cpu mapping and size + void* bufferAddress; + NvU32 bufferSize; + } replayable; + struct + { + // Shadow buffer for non-replayable faults on cpu memory. Resman copies + // here the non-replayable faults that need to be handled by UVM + void* shadowBufferAddress; + + // Execution context for the queue associated with the fault buffer + void* shadowBufferContext; + + // Fault buffer size + NvU32 bufferSize; + + // Preallocated stack for functions called from the UVM isr top half + void *isr_sp; + + // Preallocated stack for functions called from the UVM isr bottom half + void *isr_bh_sp; + } nonReplayable; + NvHandle faultBufferHandle; +} UvmGpuFaultInfo; + +typedef struct UvmGpuPagingChannel_tag +{ + struct gpuDevice *device; + NvNotification *errorNotifier; + NvHandle channelHandle; + NvHandle errorNotifierHandle; + void *pushStreamSp; +} UvmGpuPagingChannel, *UvmGpuPagingChannelHandle; + +typedef struct UvmGpuAccessCntrInfo_tag +{ + // Register mappings obtained from RM + // pointer to the Get register for the access counter buffer + volatile NvU32* pAccessCntrBufferGet; + // pointer to the Put register for the access counter buffer + volatile NvU32* pAccessCntrBufferPut; + // pointer to the Full register for the access counter buffer + volatile NvU32* pAccessCntrBufferFull; + // pointer to the hub interrupt + volatile NvU32* pHubIntr; + // pointer to interrupt enable register + volatile NvU32* pHubIntrEnSet; + // pointer to interrupt disable register + volatile NvU32* pHubIntrEnClear; + // mask for the access counter buffer + NvU32 accessCounterMask; + // access counter buffer cpu mapping and size + void* bufferAddress; + NvU32 bufferSize; + NvHandle accessCntrBufferHandle; + + // The Notification address in the access counter notification msg does not + // contain the correct upper bits 63-47 for GPA-based notifications. RM + // provides us with the correct offset to be added. + // See Bug 1803015 + NvU64 baseDmaSysmemAddr; +} UvmGpuAccessCntrInfo; + +typedef enum +{ + UVM_ACCESS_COUNTER_GRANULARITY_64K = 1, + UVM_ACCESS_COUNTER_GRANULARITY_2M = 2, + UVM_ACCESS_COUNTER_GRANULARITY_16M = 3, + UVM_ACCESS_COUNTER_GRANULARITY_16G = 4, +} UVM_ACCESS_COUNTER_GRANULARITY; + +typedef enum +{ + UVM_ACCESS_COUNTER_USE_LIMIT_NONE = 1, + UVM_ACCESS_COUNTER_USE_LIMIT_QTR = 2, + UVM_ACCESS_COUNTER_USE_LIMIT_HALF = 3, + UVM_ACCESS_COUNTER_USE_LIMIT_FULL = 4, +} UVM_ACCESS_COUNTER_USE_LIMIT; + +typedef struct UvmGpuAccessCntrConfig_tag +{ + NvU32 mimcGranularity; + + NvU32 momcGranularity; + + NvU32 mimcUseLimit; + + NvU32 momcUseLimit; + + NvU32 threshold; +} UvmGpuAccessCntrConfig; + +typedef UvmGpuChannelInfo gpuChannelInfo; +typedef UvmGpuChannelAllocParams gpuChannelAllocParams; +typedef UvmGpuCaps gpuCaps; +typedef UvmGpuCopyEngineCaps gpuCeCaps; +typedef UvmGpuCopyEnginesCaps gpuCesCaps; +typedef UvmGpuP2PCapsParams getP2PCapsParams; +typedef UvmGpuAddressSpaceInfo gpuAddressSpaceInfo; +typedef UvmGpuAllocInfo gpuAllocInfo; +typedef UvmGpuInfo gpuInfo; +typedef UvmGpuClientInfo gpuClientInfo; +typedef UvmGpuAccessCntrInfo gpuAccessCntrInfo; +typedef UvmGpuAccessCntrConfig gpuAccessCntrConfig; +typedef UvmGpuFaultInfo gpuFaultInfo; +typedef UvmGpuMemoryInfo gpuMemoryInfo; +typedef UvmGpuExternalMappingInfo gpuExternalMappingInfo; +typedef UvmGpuChannelResourceInfo gpuChannelResourceInfo; +typedef UvmGpuChannelInstanceInfo gpuChannelInstanceInfo; +typedef UvmGpuChannelResourceBindParams gpuChannelResourceBindParams; +typedef UvmGpuFbInfo gpuFbInfo; +typedef UvmGpuEccInfo gpuEccInfo; +typedef UvmGpuPagingChannel *gpuPagingChannelHandle; +typedef UvmGpuPagingChannelInfo gpuPagingChannelInfo; +typedef UvmGpuPagingChannelAllocParams gpuPagingChannelAllocParams; +typedef UvmPmaAllocationOptions gpuPmaAllocationOptions; + + +typedef struct ccslContext_t UvmCslContext; +typedef NvU64 UvmCslIv; +#define UVM_APM_CSL_AUTHTAG_SIZE 32 + + +#endif // _NV_UVM_TYPES_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvgputypes.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvgputypes.h new file mode 100644 index 0000000..d018414 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvgputypes.h @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2006 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + /***************************************************************************\ +|* *| +|* NV GPU Types *| +|* *| +|* This header contains definitions describing NVIDIA's GPU hardware state. *| +|* *| + \***************************************************************************/ + + +#ifndef NVGPUTYPES_INCLUDED +#define NVGPUTYPES_INCLUDED +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + + /***************************************************************************\ +|* NvNotification *| + \***************************************************************************/ + +/***** NvNotification Structure *****/ +/* + * NV objects return information about method completion to clients via an + * array of notification structures in main memory. + * + * The client sets the status field to NV???_NOTIFICATION_STATUS_IN_PROGRESS. + * NV fills in the NvNotification[] data structure in the following order: + * timeStamp, otherInfo32, otherInfo16, and then status. + */ + +/* memory data structures */ +typedef volatile struct NvNotificationRec { + struct { /* 0000- */ + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvV32 info32; /* info returned depends on method 0008-000b*/ + NvV16 info16; /* info returned depends on method 000c-000d*/ + NvV16 status; /* user sets bit 15, NV sets status 000e-000f*/ +} NvNotification; + + /***************************************************************************\ +|* NvGpuSemaphore *| + \***************************************************************************/ + +/***** NvGpuSemaphore Structure *****/ +/* + * NvGpuSemaphore objects are used by the GPU to synchronize multiple + * command-streams. + * + * Please refer to class documentation for details regarding the content of + * the data[] field. + */ + +/* memory data structures */ +typedef volatile struct NvGpuSemaphoreRec { + NvV32 data[2]; /* Payload/Report data 0000-0007*/ + struct { /* 0008- */ + NvV32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 8- f*/ + } timeStamp; /* -000f*/ +} NvGpuSemaphore; + + /***************************************************************************\ +|* NvGetReport *| + \***************************************************************************/ + +/* + * NV objects, starting with Kelvin, return information such as pixel counts to + * the user via the NV*_GET_REPORT method. + * + * The client fills in the "zero" field to any nonzero value and waits until it + * becomes zero. NV fills in the timeStamp, value, and zero fields. + */ +typedef volatile struct NVGetReportRec { + struct { /* 0000- */ + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 value; /* info returned depends on method 0008-000b*/ + NvU32 zero; /* always written to zero 000c-000f*/ +} NvGetReport; + + /***************************************************************************\ +|* NvRcNotification *| + \***************************************************************************/ + +/* + * NV robust channel notification information is reported to clients via + * standard NV01_EVENT objects bound to instance of the NV*_CHANNEL_DMA and + * NV*_CHANNEL_GPFIFO objects. + */ +typedef struct NvRcNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 exceptLevel; /* exception level 000c-000f*/ + NvU32 exceptType; /* exception type 0010-0013*/ +} NvRcNotification; + + /***************************************************************************\ +|* NvSyncPointFence *| + \***************************************************************************/ + +/***** NvSyncPointFence Structure *****/ +/* + * NvSyncPointFence objects represent a syncpoint event. The syncPointID + * identifies the syncpoint register and the value is the value that the + * register will contain right after the event occurs. + * + * If syncPointID contains NV_INVALID_SYNCPOINT_ID then this is an invalid + * event. This is often used to indicate an event in the past (i.e. no need to + * wait). + * + * For more info on syncpoints refer to Mobile channel and syncpoint + * documentation. + */ +typedef struct NvSyncPointFenceRec { + NvU32 syncPointID; + NvU32 value; +} NvSyncPointFence; + +#define NV_INVALID_SYNCPOINT_ID ((NvU32)-1) + + /***************************************************************************\ +|* *| +|* 64 bit type definitions for use in interface structures. *| +|* *| + \***************************************************************************/ + +typedef NvU64 NvOffset; /* GPU address */ + +#define NvOffset_HI32(n) ((NvU32)(((NvU64)(n)) >> 32)) +#define NvOffset_LO32(n) ((NvU32)((NvU64)(n))) + +/* +* There are two types of GPU-UUIDs available: +* +* (1) a SHA-256 based 32 byte ID, formatted as a 64 character +* hexadecimal string as "GPU-%16x-%08x-%08x-%08x-%024x"; this is +* deprecated. +* +* (2) a SHA-1 based 16 byte ID, formatted as a 32 character +* hexadecimal string as "GPU-%08x-%04x-%04x-%04x-%012x" (the +* canonical format of a UUID); this is the default. +*/ +#define NV_GPU_UUID_SHA1_LEN (16) +#define NV_GPU_UUID_SHA256_LEN (32) +#define NV_GPU_UUID_LEN NV_GPU_UUID_SHA1_LEN + +#ifdef __cplusplus +}; +#endif + +#endif /* NVGPUTYPES_INCLUDED */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvi2c.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvi2c.h new file mode 100644 index 0000000..28c1ba5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvi2c.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_I2C_H_ +#define _NV_I2C_H_ + +#define NV_I2C_MSG_WR 0x0000 +#define NV_I2C_MSG_RD 0x0001 + +typedef struct nv_i2c_msg_s +{ + NvU16 addr; + NvU16 flags; + NvU16 len; + NvU8* buf; +} nv_i2c_msg_t; + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvimpshared.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvimpshared.h new file mode 100644 index 0000000..bcaf304 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvimpshared.h @@ -0,0 +1,108 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************************************************************\ +* * +* Description: * +* Accommodates sharing of IMP-related structures between kernel interface * +* files and core RM. * +* * +\******************************************************************************/ + +#pragma once + +#include +#if defined(_MSC_VER) +#pragma warning(disable:4324) +#endif + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvimpshared.finn +// + + + + + + + + + + + +// +// There are only a small number of discrete dramclk frequencies available on +// the system. This structure contains IMP-relevant information associated +// with a specific dramclk frequency. +// +typedef struct DRAM_CLK_INSTANCE { + NvU32 dram_clk_freq_khz; + + NvU32 mchub_clk_khz; + + NvU32 mc_clk_khz; + + NvU32 max_iso_bw_kbps; + + // + // switch_latency_ns is the maximum time required to switch the dramclk + // frequency to the frequency specified in dram_clk_freq_khz. + // + NvU32 switch_latency_ns; +} DRAM_CLK_INSTANCE; + +// +// This table is used to collect information from other modules that is needed +// for RM IMP calculations. (Used on Tegra only.) +// +#define TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_UNKNOWN 0U +#define TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR4 1U +#define TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR5 2U + +typedef struct TEGRA_IMP_IMPORT_DATA { + // + // max_iso_bw_kbps stores the maximum possible ISO bandwidth available to + // display, assuming display is the only active ISO client. (Note that ISO + // bandwidth will typically be allocated to multiple clients, so display + // will generally not have access to the maximum possible bandwidth.) + // + NvU32 max_iso_bw_kbps; + + NvU32 dram_type; + // On Orin, each dram channel is 16 bits wide. + NvU32 num_dram_channels; + + // + // dram_clk_instance stores entries for all possible dramclk frequencies, + // sorted by dramclk frequency in increasing order. + // + // "24" is expected to be larger than the actual number of required entries + // (which is provided by a BPMP API), but it can be increased if necessary. + // + // num_dram_clk_entries is filled in with the actual number of distinct + // dramclk entries. + // + NvU32 num_dram_clk_entries; + DRAM_CLK_INSTANCE dram_clk_instance[24]; +} TEGRA_IMP_IMPORT_DATA; diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-api-types.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-api-types.h new file mode 100644 index 0000000..7bf6cf3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-api-types.h @@ -0,0 +1,607 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_API_TYPES_H) +#define NVKMS_API_TYPES_H + +#include +#include +#include + +#define NVKMS_MAX_SUBDEVICES NV_MAX_SUBDEVICES + +#define NVKMS_LEFT 0 +#define NVKMS_RIGHT 1 +#define NVKMS_MAX_EYES 2 + +#define NVKMS_MAIN_LAYER 0 +#define NVKMS_OVERLAY_LAYER 1 +#define NVKMS_MAX_LAYERS_PER_HEAD 8 + +#define NVKMS_MAX_PLANES_PER_SURFACE 3 + +#define NVKMS_DP_ADDRESS_STRING_LENGTH 64 + +#define NVKMS_DEVICE_ID_TEGRA 0x0000ffff + +typedef NvU32 NvKmsDeviceHandle; +typedef NvU32 NvKmsDispHandle; +typedef NvU32 NvKmsConnectorHandle; +typedef NvU32 NvKmsSurfaceHandle; +typedef NvU32 NvKmsFrameLockHandle; +typedef NvU32 NvKmsDeferredRequestFifoHandle; +typedef NvU32 NvKmsSwapGroupHandle; +typedef NvU32 NvKmsVblankSyncObjectHandle; + +struct NvKmsSize { + NvU16 width; + NvU16 height; +}; + +struct NvKmsPoint { + NvU16 x; + NvU16 y; +}; + +struct NvKmsSignedPoint { + NvS16 x; + NvS16 y; +}; + +struct NvKmsRect { + NvU16 x; + NvU16 y; + NvU16 width; + NvU16 height; +}; + +/* + * A 3x3 row-major matrix. + * + * The elements are 32-bit single-precision IEEE floating point values. The + * floating point bit pattern should be stored in NvU32s to be passed into the + * kernel. + */ +struct NvKmsMatrix { + NvU32 m[3][3]; +}; + +typedef enum { + NVKMS_CONNECTOR_TYPE_DP = 0, + NVKMS_CONNECTOR_TYPE_VGA = 1, + NVKMS_CONNECTOR_TYPE_DVI_I = 2, + NVKMS_CONNECTOR_TYPE_DVI_D = 3, + NVKMS_CONNECTOR_TYPE_ADC = 4, + NVKMS_CONNECTOR_TYPE_LVDS = 5, + NVKMS_CONNECTOR_TYPE_HDMI = 6, + NVKMS_CONNECTOR_TYPE_USBC = 7, + NVKMS_CONNECTOR_TYPE_DSI = 8, + NVKMS_CONNECTOR_TYPE_DP_SERIALIZER = 9, + NVKMS_CONNECTOR_TYPE_UNKNOWN = 10, + NVKMS_CONNECTOR_TYPE_MAX = NVKMS_CONNECTOR_TYPE_UNKNOWN, +} NvKmsConnectorType; + +static inline +const char *NvKmsConnectorTypeString(const NvKmsConnectorType connectorType) +{ + switch (connectorType) { + case NVKMS_CONNECTOR_TYPE_DP: return "DP"; + case NVKMS_CONNECTOR_TYPE_VGA: return "VGA"; + case NVKMS_CONNECTOR_TYPE_DVI_I: return "DVI-I"; + case NVKMS_CONNECTOR_TYPE_DVI_D: return "DVI-D"; + case NVKMS_CONNECTOR_TYPE_ADC: return "ADC"; + case NVKMS_CONNECTOR_TYPE_LVDS: return "LVDS"; + case NVKMS_CONNECTOR_TYPE_HDMI: return "HDMI"; + case NVKMS_CONNECTOR_TYPE_USBC: return "USB-C"; + case NVKMS_CONNECTOR_TYPE_DSI: return "DSI"; + case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER: return "DP-SERIALIZER"; + default: break; + } + return "Unknown"; +} + +typedef enum { + NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA = 0, + NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS = 1, + NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS = 2, + NVKMS_CONNECTOR_SIGNAL_FORMAT_DP = 3, + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI = 4, + NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN = 5, + NVKMS_CONNECTOR_SIGNAL_FORMAT_MAX = + NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN, +} NvKmsConnectorSignalFormat; + +/*! + * Description of Notifiers and Semaphores (Non-isochronous (NISO) surfaces). + * + * When flipping, the client can optionally specify a notifier and/or + * a semaphore to use with the flip. The surfaces used for these + * should be registered with NVKMS to get an NvKmsSurfaceHandle. + * + * NvKmsNIsoSurface::offsetInWords indicates the starting location, in + * 32-bit words, within the surface where EVO should write the + * notifier or semaphore. Note that only the first 4096 bytes of a + * surface can be used by semaphores or notifiers; offsetInWords must + * allow for the semaphore or notifier to be written within the first + * 4096 bytes of the surface. I.e., this must be satisfied: + * + * ((offsetInWords * 4) + elementSizeInBytes) <= 4096 + * + * Where elementSizeInBytes is: + * + * if NISO_FORMAT_FOUR_WORD*, elementSizeInBytes = 16 + * if NISO_FORMAT_LEGACY, + * if overlay && notifier, elementSizeInBytes = 16 + * else, elementSizeInBytes = 4 + * + * Note that different GPUs support different semaphore and notifier formats. + * Check NvKmsAllocDeviceReply::validNIsoFormatMask to determine which are + * valid for the given device. + * + * Note also that FOUR_WORD and FOUR_WORD_NVDISPLAY are the same size, but + * FOUR_WORD uses a format compatible with display class 907[ce], and + * FOUR_WORD_NVDISPLAY uses a format compatible with c37e (actually defined by + * the NV_DISP_NOTIFIER definition in clc37d.h). + */ +enum NvKmsNIsoFormat { + NVKMS_NISO_FORMAT_LEGACY, + NVKMS_NISO_FORMAT_FOUR_WORD, + NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY, +}; + +enum NvKmsEventType { + NVKMS_EVENT_TYPE_DPY_CHANGED, + NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED, + NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED, + NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED, + NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED, + NVKMS_EVENT_TYPE_FLIP_OCCURRED, +}; + +typedef enum { + NV_EVO_SCALER_1TAP = 0, + NV_EVO_SCALER_2TAPS = 1, + NV_EVO_SCALER_3TAPS = 2, + NV_EVO_SCALER_5TAPS = 3, + NV_EVO_SCALER_8TAPS = 4, + NV_EVO_SCALER_TAPS_MIN = NV_EVO_SCALER_1TAP, + NV_EVO_SCALER_TAPS_MAX = NV_EVO_SCALER_8TAPS, +} NVEvoScalerTaps; + +/* This structure describes the scaling bounds for a given layer. */ +struct NvKmsScalingUsageBounds { + /* + * Maximum vertical downscale factor (scaled by 1024) + * + * For example, if the downscale factor is 1.5, then maxVDownscaleFactor + * would be 1.5 x 1024 = 1536. + */ + NvU16 maxVDownscaleFactor; + + /* + * Maximum horizontal downscale factor (scaled by 1024) + * + * See the example above for maxVDownscaleFactor. + */ + NvU16 maxHDownscaleFactor; + + /* Maximum vertical taps allowed */ + NVEvoScalerTaps vTaps; + + /* Whether vertical upscaling is allowed */ + NvBool vUpscalingAllowed; +}; + +struct NvKmsUsageBounds { + struct { + NvBool usable; + struct NvKmsScalingUsageBounds scaling; + NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8); + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; +}; + +/* + * A 3x4 row-major colorspace conversion matrix. + * + * The output color C' is the CSC matrix M times the column vector + * [ R, G, B, 1 ]. + * + * Each entry in the matrix is a signed 2's-complement fixed-point number with + * 3 integer bits and 16 fractional bits. + */ +struct NvKmsCscMatrix { + NvS32 m[3][4]; +}; + +#define NVKMS_IDENTITY_CSC_MATRIX \ + (struct NvKmsCscMatrix){{ \ + { 0x10000, 0, 0, 0 }, \ + { 0, 0x10000, 0, 0 }, \ + { 0, 0, 0x10000, 0 } \ + }} + +/*! + * A color key match bit used in the blend equations and one can select the src + * or dst Color Key when blending. Assert key bit means match, de-assert key + * bit means nomatch. + * + * The src Color Key means using the key bit from the current layer, the dst + * Color Key means using key bit from the previous layer composition stage. The + * src or dst key bit will be inherited by blended pixel for the preparation of + * next blending, as dst Color Key. + * + * src: Forward the color key match bit from the current layer pixel to next layer + * composition stage. + * + * dst: Forward the color key match bit from the previous composition stage + * pixel to next layer composition stage. + * + * disable: Forward “1” to the next layer composition stage as the color key. + */ +enum NvKmsCompositionColorKeySelect { + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE = 0, + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC, + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST, +}; + +#define NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS 3 + +/*! + * Composition modes used for surfaces in general. + * The various types of composition are: + * + * Opaque: source pixels are opaque regardless of alpha, + * and will occlude the destination pixel. + * + * Alpha blending: aka opacity, which could be specified + * for a surface in its entirety, or on a per-pixel basis. + * + * Non-premultiplied: alpha value applies to source pixel, + * and also counter-weighs the destination pixel. + * Premultiplied: alpha already applied to source pixel, + * so it only counter-weighs the destination pixel. + * + * Color keying: use a color key structure to decide + * the criteria for matching and compositing. + * (See NVColorKey below.) + */ +enum NvKmsCompositionBlendingMode { + /*! + * Modes that use no other parameters. + */ + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE, + + /*! + * Mode that ignores both per-pixel alpha provided + * by client and the surfaceAlpha, makes source pixel + * totally transparent. + */ + NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT, + + /*! + * Modes that use per-pixel alpha provided by client, + * and the surfaceAlpha must be set to 0. + */ + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA, + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA, + + /*! + * These use both the surface-wide and per-pixel alpha values. + * surfaceAlpha is treated as numerator ranging from 0 to 255 + * of a fraction whose denominator is 255. + */ + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA, + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA, +}; + +static inline NvBool +NvKmsIsCompositionModeUseAlpha(enum NvKmsCompositionBlendingMode mode) +{ + return mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA; +} + +/*! + * Abstract description of a color key. + * + * a, r, g, and b are component values in the same width as the framebuffer + * values being scanned out. + * + * match[ARGB] defines whether that component is considered when matching the + * color key -- TRUE means that the value of the corresponding component must + * match the given value for the given pixel to be considered a 'key match'; + * FALSE means that the value of that component is not a key match criterion. + */ +typedef struct { + NvU16 a, r, g, b; + NvBool matchA, matchR, matchG, matchB; +} NVColorKey; + +/*! + * Describes the composition parameters for the single layer. + */ +struct NvKmsCompositionParams { + enum NvKmsCompositionColorKeySelect colorKeySelect; + NVColorKey colorKey; + /* + * It is possible to assign different blending mode for match pixels and + * nomatch pixels. blendingMode[0] is used to blend a pixel with the color key + * match bit "0", and blendingMode[1] is used to blend a pixel with the color + * key match bit "1". + * + * But because of the hardware restrictions match and nomatch pixels can + * not use blending mode PREMULT_ALPHA, NON_PREMULT_ALPHA, + * PREMULT_SURFACE_ALPHA, and NON_PREMULT_SURFACE_ALPHA at once. + */ + enum NvKmsCompositionBlendingMode blendingMode[2]; + NvU8 surfaceAlpha; /* Applies to all pixels of entire surface */ + /* + * Defines the composition order. A smaller value moves the layer closer to + * the top (away from the background). No need to pick consecutive values, + * requirements are that the value should be different for each of the + * layers owned by the head and the value for the main layer should be + * the greatest one. + * + * Cursor always remains at the top of all other layers, this parameter + * has no effect on cursor. NVKMS assigns default depth to each of the + * supported layers, by default depth of the layer is calculated as + * (NVKMS_MAX_LAYERS_PER_HEAD - index of the layer). If depth is set to + * '0' then default depth value will get used. + */ + NvU8 depth; +}; + +/*! + * Describes the composition capabilities supported by the hardware for + * cursor or layer. It describes supported the color key selects and for each + * of the supported color key selects it describes supported blending modes + * for match and nomatch pixles. + */ +struct NvKmsCompositionCapabilities { + + struct { + /* + * A bitmask of the supported blending modes for match and nomatch + * pixels. It should be the bitwise 'or' of one or more + * NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_*) values. + */ + NvU32 supportedBlendModes[2]; + } colorKeySelect[NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS]; + + /* + * A bitmask of the supported color key selects. + * + * It should be the bitwise 'or' of one or more + * NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_*) + * values. + */ + NvU32 supportedColorKeySelects; +}; + +struct NvKmsLayerCapabilities { + /*! + * Whether Layer supports the window mode. If window mode is supported, + * then clients can set the layer's dimensions so that they're smaller than + * the viewport, and can also change the output position of the layer to a + * non-(0, 0) position. + * + * NOTE: Dimension changes are currently unsupported for the main layer, + * and output position changes for the main layer are currently only + * supported via IOCTL_SET_LAYER_POSITION but not via flips. Support for + * these is coming soon, via changes to flip code. + */ + NvBool supportsWindowMode :1; + + /*! + * Whether layer supports HDR pipe. + */ + NvBool supportsHDR :1; + + + /*! + * Describes the supported Color Key selects and blending modes for + * match and nomatch layer pixels. + */ + struct NvKmsCompositionCapabilities composition; + + /*! + * Which NvKmsSurfaceMemoryFormat enum values are supported by the NVKMS + * device on the given scanout surface layer. + * + * Iff a particular enum NvKmsSurfaceMemoryFormat 'value' is supported, + * then (1 << value) will be set in the appropriate bitmask. + * + * Note that these bitmasks just report the static SW/HW capabilities, + * and are a superset of the formats that IMP may allow. Clients are + * still expected to honor the NvKmsUsageBounds for each head. + */ + NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8); +}; + +/*! + * Surface layouts. + * + * BlockLinear is the NVIDIA GPU native tiling format, arranging pixels into + * blocks or tiles for better locality during common GPU operations. + * + * Pitch is the naive "linear" surface layout with pixels laid out sequentially + * in memory line-by-line, optionally with some padding at the end of each line + * for alignment purposes. + */ +enum NvKmsSurfaceMemoryLayout { + NvKmsSurfaceMemoryLayoutBlockLinear = 0, + NvKmsSurfaceMemoryLayoutPitch = 1, +}; + +static inline const char *NvKmsSurfaceMemoryLayoutToString( + enum NvKmsSurfaceMemoryLayout layout) +{ + switch (layout) { + default: + return "Unknown"; + case NvKmsSurfaceMemoryLayoutBlockLinear: + return "BlockLinear"; + case NvKmsSurfaceMemoryLayoutPitch: + return "Pitch"; + } +} + +typedef enum { + MUX_STATE_GET = 0, + MUX_STATE_INTEGRATED = 1, + MUX_STATE_DISCRETE = 2, + MUX_STATE_UNKNOWN = 3, +} NvMuxState; + +enum NvKmsRotation { + NVKMS_ROTATION_0 = 0, + NVKMS_ROTATION_90 = 1, + NVKMS_ROTATION_180 = 2, + NVKMS_ROTATION_270 = 3, + NVKMS_ROTATION_MIN = NVKMS_ROTATION_0, + NVKMS_ROTATION_MAX = NVKMS_ROTATION_270, +}; + +struct NvKmsRRParams { + enum NvKmsRotation rotation; + NvBool reflectionX; + NvBool reflectionY; +}; + +/*! + * Convert each possible NvKmsRRParams to a unique integer [0..15], + * so that we can describe possible NvKmsRRParams with an NvU16 bitmask. + * + * E.g. + * rotation = 0, reflectionX = F, reflectionY = F == 0|0|0 == 0 + * ... + * rotation = 270, reflectionX = T, reflectionY = T == 3|4|8 == 15 + */ +static inline NvU8 NvKmsRRParamsToCapBit(const struct NvKmsRRParams *rrParams) +{ + NvU8 bitPosition = (NvU8)rrParams->rotation; + if (rrParams->reflectionX) { + bitPosition |= NVBIT(2); + } + if (rrParams->reflectionY) { + bitPosition |= NVBIT(3); + } + return bitPosition; +} + +/* + * NVKMS_MEMORY_ISO is used to tag surface memory that will be accessed via + * display's isochronous interface. Examples of this type of memory are pixel + * data and LUT entries. + * + * NVKMS_MEMORY_NISO is used to tag surface memory that will be accessed via + * display's non-isochronous interface. Examples of this type of memory are + * semaphores and notifiers. + */ +typedef enum { + NVKMS_MEMORY_ISO = 0, + NVKMS_MEMORY_NISO = 1, +} NvKmsMemoryIsoType; + +typedef struct { + NvBool coherent; + NvBool noncoherent; +} NvKmsDispIOCoherencyModes; + +enum NvKmsInputColorSpace { + /* Unknown colorspace; no de-gamma will be applied */ + NVKMS_INPUT_COLORSPACE_NONE = 0, + + /* Linear, Rec.709 [-0.5, 7.5) */ + NVKMS_INPUT_COLORSPACE_SCRGB_LINEAR = 1, + + /* PQ, Rec.2020 unity */ + NVKMS_INPUT_COLORSPACE_BT2100_PQ = 2, +}; + +enum NvKmsOutputTf { + /* + * NVKMS itself won't apply any OETF (clients are still + * free to provide a custom OLUT) + */ + NVKMS_OUTPUT_TF_NONE = 0, + NVKMS_OUTPUT_TF_TRADITIONAL_GAMMA_SDR = 1, + NVKMS_OUTPUT_TF_PQ = 2, +}; + +/*! + * HDR Static Metadata Type1 Descriptor as per CEA-861.3 spec. + * This is expected to match exactly with the spec. + */ +struct NvKmsHDRStaticMetadata { + /*! + * Color primaries of the data. + * These are coded as unsigned 16-bit values in units of 0.00002, + * where 0x0000 represents zero and 0xC350 represents 1.0000. + */ + struct { + NvU16 x, y; + } displayPrimaries[3]; + + /*! + * White point of colorspace data. + * These are coded as unsigned 16-bit values in units of 0.00002, + * where 0x0000 represents zero and 0xC350 represents 1.0000. + */ + struct { + NvU16 x, y; + } whitePoint; + + /** + * Maximum mastering display luminance. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxDisplayMasteringLuminance; + + /*! + * Minimum mastering display luminance. + * This value is coded as an unsigned 16-bit value in units of + * 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF + * represents 6.5535 cd/m2. + */ + NvU16 minDisplayMasteringLuminance; + + /*! + * Maximum content light level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxCLL; + + /*! + * Maximum frame-average light level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxFALL; +}; + +#endif /* NVKMS_API_TYPES_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-format.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-format.h new file mode 100644 index 0000000..d1483f8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-format.h @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_FORMAT_H) +#define NVKMS_FORMAT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* + * In order to interpret these pixel format namings, please take note of these + * conventions: + * - The Y8_U8__Y8_V8_N422 and U8_Y8__V8_Y8_N422 formats are both packed formats + * that have an interleaved chroma component across every two pixels. The + * double-underscore is a separator between these two pixel groups. + * - The triple-underscore is a separator between planes. + * - The 'N' suffix is a delimiter for the chroma decimation factor. + * + * As examples of the above rules: + * - The Y8_U8__Y8_V8_N422 format has one 8-bit luma component (Y8) and one + * 8-bit chroma component (U8) in pixel N, and one 8-bit luma component (Y8) + * and one 8-bit chroma component (V8) in pixel (N + 1). This format is + * 422-decimated since the U and V chroma samples are shared between each + * pair of adjacent pixels per line. + * - The Y10___U10V10_N444 format has one plane of 10-bit luma (Y10) components, + * and another plane of 10-bit chroma components (U10V10). This format has no + * chroma decimation since the luma and chroma components are sampled at the + * same rate. + */ +enum NvKmsSurfaceMemoryFormat { + NvKmsSurfaceMemoryFormatI8 = 0, + NvKmsSurfaceMemoryFormatA1R5G5B5 = 1, + NvKmsSurfaceMemoryFormatX1R5G5B5 = 2, + NvKmsSurfaceMemoryFormatR5G6B5 = 3, + NvKmsSurfaceMemoryFormatA8R8G8B8 = 4, + NvKmsSurfaceMemoryFormatX8R8G8B8 = 5, + NvKmsSurfaceMemoryFormatA2B10G10R10 = 6, + NvKmsSurfaceMemoryFormatX2B10G10R10 = 7, + NvKmsSurfaceMemoryFormatA8B8G8R8 = 8, + NvKmsSurfaceMemoryFormatX8B8G8R8 = 9, + NvKmsSurfaceMemoryFormatRF16GF16BF16AF16 = 10, + NvKmsSurfaceMemoryFormatR16G16B16A16 = 11, + NvKmsSurfaceMemoryFormatRF32GF32BF32AF32 = 12, + NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422 = 13, + NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422 = 14, + NvKmsSurfaceMemoryFormatY8___U8V8_N444 = 15, + NvKmsSurfaceMemoryFormatY8___V8U8_N444 = 16, + NvKmsSurfaceMemoryFormatY8___U8V8_N422 = 17, + NvKmsSurfaceMemoryFormatY8___V8U8_N422 = 18, + NvKmsSurfaceMemoryFormatY8___U8V8_N420 = 19, + NvKmsSurfaceMemoryFormatY8___V8U8_N420 = 20, + NvKmsSurfaceMemoryFormatY10___U10V10_N444 = 21, + NvKmsSurfaceMemoryFormatY10___V10U10_N444 = 22, + NvKmsSurfaceMemoryFormatY10___U10V10_N422 = 23, + NvKmsSurfaceMemoryFormatY10___V10U10_N422 = 24, + NvKmsSurfaceMemoryFormatY10___U10V10_N420 = 25, + NvKmsSurfaceMemoryFormatY10___V10U10_N420 = 26, + NvKmsSurfaceMemoryFormatY12___U12V12_N444 = 27, + NvKmsSurfaceMemoryFormatY12___V12U12_N444 = 28, + NvKmsSurfaceMemoryFormatY12___U12V12_N422 = 29, + NvKmsSurfaceMemoryFormatY12___V12U12_N422 = 30, + NvKmsSurfaceMemoryFormatY12___U12V12_N420 = 31, + NvKmsSurfaceMemoryFormatY12___V12U12_N420 = 32, + NvKmsSurfaceMemoryFormatY8___U8___V8_N444 = 33, + NvKmsSurfaceMemoryFormatY8___U8___V8_N420 = 34, + NvKmsSurfaceMemoryFormatMin = NvKmsSurfaceMemoryFormatI8, + NvKmsSurfaceMemoryFormatMax = NvKmsSurfaceMemoryFormatY8___U8___V8_N420, +}; + +typedef struct NvKmsSurfaceMemoryFormatInfo { + enum NvKmsSurfaceMemoryFormat format; + const char *name; + NvU8 depth; + NvBool isYUV; + NvU8 numPlanes; + + union { + struct { + NvU8 bytesPerPixel; + NvU8 bitsPerPixel; + } rgb; + + struct { + NvU8 depthPerComponent; + NvU8 storageBitsPerComponent; + NvU8 horizChromaDecimationFactor; + NvU8 vertChromaDecimationFactor; + } yuv; + }; +} NvKmsSurfaceMemoryFormatInfo; + +const NvKmsSurfaceMemoryFormatInfo *nvKmsGetSurfaceMemoryFormatInfo( + const enum NvKmsSurfaceMemoryFormat format); + +const char *nvKmsSurfaceMemoryFormatToString( + const enum NvKmsSurfaceMemoryFormat format); + +#ifdef __cplusplus +}; +#endif + +#endif /* NVKMS_FORMAT_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-kapi.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-kapi.h new file mode 100644 index 0000000..e85351c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-kapi.h @@ -0,0 +1,1081 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__NVKMS_KAPI_H__) + +#include "nvtypes.h" + +#include "nv-gpu-info.h" +#include "nvkms-api-types.h" +#include "nvkms-format.h" + +#define __NVKMS_KAPI_H__ + +#define NVKMS_KAPI_MAX_HEADS 4 + +#define NVKMS_KAPI_MAX_CONNECTORS 16 +#define NVKMS_KAPI_MAX_CLONE_DISPLAYS 16 + +#define NVKMS_KAPI_EDID_BUFFER_SIZE 2048 + +#define NVKMS_KAPI_MODE_NAME_LEN 32 + +/** + * \defgroup Objects + * @{ + */ + +struct NvKmsKapiDevice; +struct NvKmsKapiMemory; +struct NvKmsKapiSurface; +struct NvKmsKapiChannelEvent; + +typedef NvU32 NvKmsKapiConnector; +typedef NvU32 NvKmsKapiDisplay; + +/** @} */ + +/** + * \defgroup FuncPtrs + * @{ + */ + +/* + * Note: The channel event proc should not call back into NVKMS-KAPI driver. + * The callback into NVKMS-KAPI from the channel event proc, may cause + * deadlock. + */ +typedef void NvKmsChannelEventProc(void *dataPtr, NvU32 dataU32); + +/** @} */ + +/** + * \defgroup Structs + * @{ + */ + +struct NvKmsKapiDisplayModeTimings { + + NvU32 refreshRate; + NvU32 pixelClockHz; + NvU32 hVisible; + NvU32 hSyncStart; + NvU32 hSyncEnd; + NvU32 hTotal; + NvU32 hSkew; + NvU32 vVisible; + NvU32 vSyncStart; + NvU32 vSyncEnd; + NvU32 vTotal; + + struct { + + NvU32 interlaced : 1; + NvU32 doubleScan : 1; + NvU32 hSyncPos : 1; + NvU32 hSyncNeg : 1; + NvU32 vSyncPos : 1; + NvU32 vSyncNeg : 1; + + } flags; + + NvU32 widthMM; + NvU32 heightMM; + +}; + +struct NvKmsKapiDisplayMode { + struct NvKmsKapiDisplayModeTimings timings; + char name[NVKMS_KAPI_MODE_NAME_LEN]; +}; + +#define NVKMS_KAPI_LAYER_MAX 8 + +#define NVKMS_KAPI_LAYER_INVALID_IDX 0xff +#define NVKMS_KAPI_LAYER_PRIMARY_IDX 0 + +struct NvKmsKapiDeviceResourcesInfo { + + NvU32 numHeads; + NvU32 numLayers[NVKMS_KAPI_MAX_HEADS]; + + NvU32 numConnectors; + NvKmsKapiConnector connectorHandles[NVKMS_KAPI_MAX_CONNECTORS]; + + struct { + NvU32 validCursorCompositionModes; + NvU64 supportedCursorSurfaceMemoryFormats; + + struct { + NvU16 validRRTransforms; + NvU32 validCompositionModes; + } layer[NVKMS_KAPI_LAYER_MAX]; + + NvU32 minWidthInPixels; + NvU32 maxWidthInPixels; + + NvU32 minHeightInPixels; + NvU32 maxHeightInPixels; + + NvU32 maxCursorSizeInPixels; + + NvU32 pitchAlignment; + + NvU32 hasVideoMemory; + + NvU8 genericPageKind; + + NvBool supportsSyncpts; + } caps; + + NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX]; + NvBool supportsHDR[NVKMS_KAPI_LAYER_MAX]; +}; + +#define NVKMS_KAPI_LAYER_MASK(layerType) (1 << (layerType)) + +typedef enum NvKmsKapiMappingTypeRec { + NVKMS_KAPI_MAPPING_TYPE_USER = 1, + NVKMS_KAPI_MAPPING_TYPE_KERNEL = 2, +} NvKmsKapiMappingType; + +struct NvKmsKapiConnectorInfo { + + NvKmsKapiConnector handle; + + NvU32 physicalIndex; + + NvU32 headMask; + + NvKmsConnectorSignalFormat signalFormat; + NvKmsConnectorType type; + + /* + * List of connectors, not possible to serve together with this connector + * because they are competing for same resources. + */ + NvU32 numIncompatibleConnectors; + NvKmsKapiConnector incompatibleConnectorHandles[NVKMS_KAPI_MAX_CONNECTORS]; + +}; + +struct NvKmsKapiStaticDisplayInfo { + + NvKmsKapiDisplay handle; + + NvKmsKapiConnector connectorHandle; + + /* Set for DisplayPort MST displays (dynamic displays) */ + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]; + + NvBool internal; + + /* List of potential sibling display for cloning */ + NvU32 numPossibleClones; + NvKmsKapiDisplay possibleCloneHandles[NVKMS_KAPI_MAX_CLONE_DISPLAYS]; + +}; + +struct NvKmsKapiSyncpt { + + /*! + * Possible syncpt use case in kapi. + * For pre-syncpt, use only id and value + * and for post-syncpt, use only fd. + */ + NvBool preSyncptSpecified; + NvU32 preSyncptId; + NvU32 preSyncptValue; + + NvBool postSyncptRequested; +}; + +struct NvKmsKapiLayerConfig { + struct NvKmsKapiSurface *surface; + struct { + enum NvKmsCompositionBlendingMode compMode; + NvU8 surfaceAlpha; + } compParams; + struct NvKmsRRParams rrParams; + struct NvKmsKapiSyncpt syncptParams; + + struct NvKmsHDRStaticMetadata hdrMetadata; + NvBool hdrMetadataSpecified; + + enum NvKmsOutputTf tf; + + NvU8 minPresentInterval; + NvBool tearing; + + NvU16 srcX, srcY; + NvU16 srcWidth, srcHeight; + + NvS16 dstX, dstY; + NvU16 dstWidth, dstHeight; + + enum NvKmsInputColorSpace inputColorSpace; +}; + +struct NvKmsKapiLayerRequestedConfig { + struct NvKmsKapiLayerConfig config; + struct { + NvBool surfaceChanged : 1; + NvBool srcXYChanged : 1; + NvBool srcWHChanged : 1; + NvBool dstXYChanged : 1; + NvBool dstWHChanged : 1; + } flags; +}; + +struct NvKmsKapiCursorRequestedConfig { + struct NvKmsKapiSurface *surface; + struct { + enum NvKmsCompositionBlendingMode compMode; + NvU8 surfaceAlpha; + } compParams; + + NvS16 dstX, dstY; + + struct { + NvBool surfaceChanged : 1; + NvBool dstXYChanged : 1; + } flags; +}; + +struct NvKmsKapiHeadModeSetConfig { + /* + * DRM distinguishes between the head state "enabled" (the specified + * configuration for the head is valid, its resources are allocated, + * etc, but the head may not necessarily be currently driving pixels + * to its output resource) and the head state "active" (the head is + * "enabled" _and_ the head is actively driving pixels to its output + * resource). + * + * This distinction is for DPMS: + * + * DPMS On : enabled=true, active=true + * DPMS Off : enabled=true, active=false + * + * "Enabled" state is indicated by numDisplays != 0. + * "Active" state is indicated by bActive == true. + */ + NvBool bActive; + + NvU32 numDisplays; + NvKmsKapiDisplay displays[NVKMS_KAPI_MAX_CLONE_DISPLAYS]; + + struct NvKmsKapiDisplayMode mode; +}; + +struct NvKmsKapiHeadRequestedConfig { + struct NvKmsKapiHeadModeSetConfig modeSetConfig; + struct { + NvBool activeChanged : 1; + NvBool displaysChanged : 1; + NvBool modeChanged : 1; + } flags; + + struct NvKmsKapiCursorRequestedConfig cursorRequestedConfig; + + struct NvKmsKapiLayerRequestedConfig + layerRequestedConfig[NVKMS_KAPI_LAYER_MAX]; +}; + +struct NvKmsKapiRequestedModeSetConfig { + NvU32 headsMask; + struct NvKmsKapiHeadRequestedConfig + headRequestedConfig[NVKMS_KAPI_MAX_HEADS]; +}; + +struct NvKmsKapiLayerReplyConfig { + int postSyncptFd; +}; + +struct NvKmsKapiHeadReplyConfig { + struct NvKmsKapiLayerReplyConfig + layerReplyConfig[NVKMS_KAPI_LAYER_MAX]; +}; + +struct NvKmsKapiModeSetReplyConfig { + struct NvKmsKapiHeadReplyConfig + headReplyConfig[NVKMS_KAPI_MAX_HEADS]; +}; + +struct NvKmsKapiEventDisplayChanged { + NvKmsKapiDisplay display; +}; + +struct NvKmsKapiEventDynamicDisplayConnected { + NvKmsKapiDisplay display; +}; + +struct NvKmsKapiEventFlipOccurred { + NvU32 head; + NvU32 layer; +}; + +struct NvKmsKapiDpyCRC32 { + NvU32 value; + NvBool supported; +}; + +struct NvKmsKapiCrcs { + struct NvKmsKapiDpyCRC32 compositorCrc32; + struct NvKmsKapiDpyCRC32 rasterGeneratorCrc32; + struct NvKmsKapiDpyCRC32 outputCrc32; +}; + +struct NvKmsKapiEvent { + enum NvKmsEventType type; + + struct NvKmsKapiDevice *device; + + void *privateData; + + union { + struct NvKmsKapiEventDisplayChanged displayChanged; + struct NvKmsKapiEventDynamicDisplayConnected dynamicDisplayConnected; + struct NvKmsKapiEventFlipOccurred flipOccurred; + } u; +}; + +struct NvKmsKapiAllocateDeviceParams { + /* [IN] GPU ID obtained from enumerateGpus() */ + NvU32 gpuId; + + /* [IN] Private data of device allocator */ + void *privateData; + /* [IN] Event callback */ + void (*eventCallback)(const struct NvKmsKapiEvent *event); +}; + +struct NvKmsKapiDynamicDisplayParams { + /* [IN] Display Handle returned by getDisplays() */ + NvKmsKapiDisplay handle; + + /* [OUT] Connection status */ + NvU32 connected; + + /* [IN/OUT] EDID of connected monitor/ Input to override EDID */ + struct { + NvU16 bufferSize; + NvU8 buffer[NVKMS_KAPI_EDID_BUFFER_SIZE]; + } edid; + + /* [IN] Set true to override EDID */ + NvBool overrideEdid; + + /* [IN] Set true to force connected status */ + NvBool forceConnected; + + /* [IN] Set true to force disconnect status */ + NvBool forceDisconnected; +}; + +struct NvKmsKapiCreateSurfaceParams { + + /* [IN] Parameter of each plane */ + struct { + /* [IN] Memory allocated for plane, using allocateMemory() */ + struct NvKmsKapiMemory *memory; + /* [IN] Offsets within the memory object */ + NvU32 offset; + /* [IN] Byte pitch of plane */ + NvU32 pitch; + } planes[NVKMS_MAX_PLANES_PER_SURFACE]; + + /* [IN] Width of the surface, in pixels */ + NvU32 width; + /* [IN] Height of the surface, in pixels */ + NvU32 height; + + /* [IN] The format describing number of planes and their content */ + enum NvKmsSurfaceMemoryFormat format; + + /* [IN] Whether to override the surface objects memory layout parameters + * with those provided here. */ + NvBool explicit_layout; + /* [IN] Whether the surface layout is block-linear or pitch. Used only + * if explicit_layout is NV_TRUE */ + enum NvKmsSurfaceMemoryLayout layout; + /* [IN] block-linear block height of surface. Used only when + * explicit_layout is NV_TRUE and layout is + * NvKmsSurfaceMemoryLayoutBlockLinear */ + NvU8 log2GobsPerBlockY; +}; + +enum NvKmsKapiAllocationType { + NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT = 0, + NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER = 1, + NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN = 2, +}; + +struct NvKmsKapiFunctionsTable { + + /*! + * NVIDIA Driver version string. + */ + const char *versionString; + + /*! + * System Information. + */ + struct { + /* Availability of write combining support for video memory */ + NvBool bAllowWriteCombining; + } systemInfo; + + /*! + * Enumerate the available physical GPUs that can be used with NVKMS. + * + * \param [out] gpuInfo The information of the enumerated GPUs. + * It is an array of NVIDIA_MAX_GPUS elements. + * + * \return Count of enumerated gpus. + */ + NvU32 (*enumerateGpus)(nv_gpu_info_t *gpuInfo); + + /*! + * Allocate an NVK device using which you can query/allocate resources on + * GPU and do modeset. + * + * \param [in] params Parameters required for device allocation. + * + * \return An valid device handle on success, NULL on failure. + */ + struct NvKmsKapiDevice* (*allocateDevice) + ( + const struct NvKmsKapiAllocateDeviceParams *params + ); + + /*! + * Frees a device allocated by allocateDevice() and all its resources. + * + * \param [in] device A device returned by allocateDevice(). + * This function is a no-op if device is not valid. + */ + void (*freeDevice)(struct NvKmsKapiDevice *device); + + /*! + * Grab ownership of device, ownership is required to do modeset. + * + * \param [in] device A device returned by allocateDevice(). + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*grabOwnership)(struct NvKmsKapiDevice *device); + + /*! + * Release ownership of device. + * + * \param [in] device A device returned by allocateDevice(). + */ + void (*releaseOwnership)(struct NvKmsKapiDevice *device); + + /*! + * Registers for notification, via + * NvKmsKapiAllocateDeviceParams::eventCallback, of the events specified + * in interestMask. + * + * This call does nothing if eventCallback is NULL when NvKmsKapiDevice + * is allocated. + * + * Supported events are DPY_CHANGED and DYNAMIC_DPY_CONNECTED. + * + * \param [in] device A device returned by allocateDevice(). + * + * \param [in] interestMask A mask of events requested to listen. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*declareEventInterest) + ( + const struct NvKmsKapiDevice *device, + const NvU32 interestMask + ); + + /*! + * Retrieve various static resources like connector, head etc. present on + * device and capacities. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in/out] info A pointer to an NvKmsKapiDeviceResourcesInfo + * struct that the call will fill out with number + * of resources and their handles. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDeviceResourcesInfo) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDeviceResourcesInfo *info + ); + + /*! + * Retrieve the number of displays on a device and an array of handles to + * those displays. + * + * \param [in] device A device allocated using + * allocateDevice(). + * + * \param [in/out] displayCount The caller should set this to the size + * of the displayHandles array it passed + * in. The function will set it to the + * number of displays returned, or the + * total number of displays on the device + * if displayHandles is NULL or array size + * of less than number of number of displays. + * + * \param [out] displayHandles An array of display handles with + * displayCount entries. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDisplays) + ( + struct NvKmsKapiDevice *device, + NvU32 *numDisplays, NvKmsKapiDisplay *displayHandles + ); + + /*! + * Retrieve information about a specified connector. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] connector Which connector to query, handle return by + * getDeviceResourcesInfo(). + * + * \param [out] info A pointer to an NvKmsKapiConnectorInfo struct + * that the call will fill out with information + * about connector. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getConnectorInfo) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiConnector connector, struct NvKmsKapiConnectorInfo *info + ); + + /*! + * Retrieve information about a specified display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display Which connector to query, handle return by + * getDisplays(). + * + * \param [out] info A pointer to an NvKmsKapiStaticDisplayInfo struct + * that the call will fill out with information + * about display. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getStaticDisplayInfo) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, struct NvKmsKapiStaticDisplayInfo *info + ); + + /*! + * Detect/force connection status/EDID of display. + * + * \param [in/out] params Parameters containing display + * handle, EDID and flags to force connection + * status. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDynamicDisplayInfo) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDynamicDisplayParams *params + ); + + /*! + * Allocate some unformatted video memory of the specified size. + * + * This function allocates video memory on the specified GPU. + * It should be suitable for mapping on the CPU as a pitch + * linear or block-linear surface. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] layout BlockLinear or Pitch. + * + * \param [in] type Allocation type. + * + * \param [in] size Size, in bytes, of the memory to allocate. + * + * \param [in/out] compressible For input, non-zero if compression + * backing store should be allocated for + * the memory, for output, non-zero if + * compression backing store was + * allocated for the memory. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*allocateVideoMemory) + ( + struct NvKmsKapiDevice *device, + enum NvKmsSurfaceMemoryLayout layout, + enum NvKmsKapiAllocationType type, + NvU64 size, + NvU8 *compressible + ); + + /*! + * Allocate some unformatted system memory of the specified size. + * + * This function allocates system memory . It should be suitable + * for mapping on the CPU as a pitch linear or block-linear surface. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] layout BlockLinear or Pitch. + * + * \param [in] type Allocation type. + * + * \param [in] size Size, in bytes, of the memory to allocate. + * + * \param [in/out] compressible For input, non-zero if compression + * backing store should be allocated for + * the memory, for output, non-zero if + * compression backing store was + * allocated for the memory. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*allocateSystemMemory) + ( + struct NvKmsKapiDevice *device, + enum NvKmsSurfaceMemoryLayout layout, + enum NvKmsKapiAllocationType type, + NvU64 size, + NvU8 *compressible + ); + + /*! + * Import some unformatted memory of the specified size. + * + * This function accepts a driver-specific parameter structure representing + * memory allocated elsewhere and imports it to a NVKMS KAPI memory object + * of the specified size. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being imported must have been allocated + * against the same physical device this device object + * represents. + * + * \param [in] size Size, in bytes, of the memory being imported. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the memory object being + * imported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*importMemory) + ( + struct NvKmsKapiDevice *device, NvU64 size, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Duplicate an existing NVKMS KAPI memory object, taking a reference on the + * underlying memory. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being imported need not have been allocated + * against the same physical device this device object + * represents. + * + * \param [in] srcDevice The device associated with srcMemory. + * + * \param [in] srcMemory The memory object to duplicate. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*dupMemory) + ( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiDevice *srcDevice, + const struct NvKmsKapiMemory *srcMemory + ); + + /*! + * Export the specified memory object to a userspace object handle. + * + * This function accepts a driver-specific parameter structure representing + * a new handle to be assigned to an existing NVKMS KAPI memory object. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being exported must have been created against + * or imported to the same device object, and the + * destination object handle must be valid for this + * device as well. + * + * \param [in] memory The memory object to export. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters specifying a handle to add to the + * memory object being exported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*exportMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Free memory allocated using allocateMemory() + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory(). + * + * \return NV_TRUE on success, NV_FALSE if memory is in use. + */ + void (*freeMemory) + ( + struct NvKmsKapiDevice *device, struct NvKmsKapiMemory *memory + ); + + /*! + * Create MMIO mappings for a memory object allocated using + * allocateMemory(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \param [in] type Userspace or kernelspace mapping + * + * \param [out] ppLinearAddress The MMIO address where memory object is + * mapped. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*mapMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + void **ppLinearAddress + ); + + /*! + * Destroy MMIO mappings created for a memory object allocated using + * allocateMemory(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \param [in] type Userspace or kernelspace mapping + * + * \param [in] pLinearAddress The MMIO address return by mapMemory() + */ + void (*unmapMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + const void *pLinearAddress + ); + + /*! + * Create a formatted surface from an NvKmsKapiMemory object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] params Parameters to the surface creation. + * + * \return An valid surface handle on success. NULL on failure. + */ + struct NvKmsKapiSurface* (*createSurface) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiCreateSurfaceParams *params + ); + + /*! + * Destroy a surface created by createSurface(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] surface A surface created using createSurface() + */ + void (*destroySurface) + ( + struct NvKmsKapiDevice *device, struct NvKmsKapiSurface *surface + ); + + /*! + * Enumerate the mode timings available on a given display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display A display handle returned by getDisplays(). + * + * \param [in] modeIndex A mode index (Any integer >= 0). + * + * \param [out] mode A pointer to an NvKmsKapiDisplayMode struct that + * the call will fill out with mode-timings of mode + * at index modeIndex. + * + * \param [out] valid Returns TRUE in this param if mode-timings of + * mode at index modeIndex are valid on display. + * + * \param [out] preferredMode Returns TRUE if this mode is marked as + * "preferred" by the EDID. + * + * \return Value >= 1 if more modes are available, 0 if no more modes are + * available, and Value < 0 on failure. + */ + int (*getDisplayMode) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, NvU32 modeIndex, + struct NvKmsKapiDisplayMode *mode, NvBool *valid, + NvBool *preferredMode + ); + + /*! + * Validate given mode timings available on a given display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display A display handle returned by getDisplays(). + * + * \param [in] mode A pointer to an NvKmsKapiDisplayMode struct that + * filled with mode-timings to validate. + * + * \return NV_TRUE if mode-timings are valid, NV_FALSE on failure. + */ + NvBool (*validateDisplayMode) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, const struct NvKmsKapiDisplayMode *mode + ); + + /*! + * Apply a mode configuration to the device. + * + * Client can describe damaged part of configuration but still it is must + * to describe entire configuration. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] requestedConfig Parameters describing a device-wide + * display configuration. + * + * \param [in] commit If set to 0 them call will only validate + * mode configuration, will not apply it. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*applyModeSetConfig) + ( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsKapiModeSetReplyConfig *replyConfig, + const NvBool commit + ); + + /*! + * Return status of flip. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head A head returned by getDeviceResourcesInfo(). + * + * \param [in] layer A layer index. + * + * \param [out] pending Return TRUE if head has pending flip for + * given layer. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getFlipPendingStatus) + ( + const struct NvKmsKapiDevice *device, + const NvU32 head, + const NvU32 layer, + NvBool *pending + ); + + /*! + * Allocate an event callback. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] proc Function pointer to call when triggered. + * + * \param [in] data Argument to pass into function. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the event callback + * being created. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return struct NvKmsKapiChannelEvent* on success, NULL on failure. + */ + struct NvKmsKapiChannelEvent* (*allocateChannelEvent) + ( + struct NvKmsKapiDevice *device, + NvKmsChannelEventProc *proc, + void *data, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Free an event callback. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] cb struct NvKmsKapiChannelEvent* returned from + * allocateChannelEvent() + */ + void (*freeChannelEvent) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiChannelEvent *cb + ); + + /*! + * Get 32-bit CRC value for the last contents presented on the specified + * head. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head A head returned by getDeviceResourcesInfo(). + * + * \param [out] crc32 The CRC32 generated from the content currently + * presented onto the given head + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getCRC32) + ( + struct NvKmsKapiDevice *device, + NvU32 head, + struct NvKmsKapiCrcs *crc32 + ); + + /*! + * Get the list allocation pages corresponding to the specified memory object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory The memory object for which we need to find the + * list of allocation pages and number of pages. + * + * \param [out] pPages A pointer to the list of NvU64 pointers. Caller + * should free pPages on success using freeMemoryPages(). + * + * \param [out] pNumPages It gives the total number of NvU64 pointers + * returned in pPages. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getMemoryPages) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 **pPages, + NvU32 *pNumPages + ); + + /*! + * Free the list of allocation pages returned by getMemoryPages() + * + * \param [in] pPages A list of NvU64 pointers allocated by getMemoryPages(). + * + */ + void (*freeMemoryPages) + ( + NvU64 *pPages + ); + + /* + * Import SGT as a memory handle. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] sgt SGT pointer. + * \param [in] gem GEM pointer that pinned SGT, to be refcounted. + * + * \param [in] limit Size, in bytes, of the memory backed by the SGT. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* + (*getSystemMemoryHandleFromSgt)(struct NvKmsKapiDevice *device, + NvP64 sgt, + NvP64 gem, + NvU32 limit); + + /* + * Import dma-buf in the memory handle. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] dmaBuf DMA-BUF pointer. + * + * \param [in] limit Size, in bytes, of the dma-buf. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* + (*getSystemMemoryHandleFromDmaBuf)(struct NvKmsKapiDevice *device, + NvP64 dmaBuf, + NvU32 limit); + +}; + +/** @} */ + +/** + * \defgroup Functions + * @{ + */ + +NvBool nvKmsKapiGetFunctionsTable +( + struct NvKmsKapiFunctionsTable *funcsTable +); + +/** @} */ + +#endif /* defined(__NVKMS_KAPI_H__) */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvlimits.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvlimits.h new file mode 100644 index 0000000..e119f67 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvlimits.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvlimits.finn +// + + + + + + + + + + + +/* + * This is the maximum number of GPUs supported in a single system. + */ +#define NV_MAX_DEVICES 32 + +/* + * This is the maximum number of subdevices within a single device. + */ +#define NV_MAX_SUBDEVICES 8 + +/* + * This is the maximum length of the process name string. + */ +#define NV_PROC_NAME_MAX_LENGTH 100U + +/* + * This is the maximum number of heads per GPU. + */ +#define NV_MAX_HEADS 4 diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvmisc.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvmisc.h new file mode 100644 index 0000000..210e237 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvmisc.h @@ -0,0 +1,915 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * nvmisc.h + */ +#ifndef __NV_MISC_H +#define __NV_MISC_H + +#ifdef __cplusplus +extern "C" { +#endif //__cplusplus + +#include "nvtypes.h" + +#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS) +// +// Miscellaneous macros useful for bit field manipulations +// +// STUPID HACK FOR CL 19434692. Will revert when fix CL is delivered bfm -> chips_a. +#ifndef BIT +#define BIT(b) (1U<<(b)) +#endif +#ifndef BIT32 +#define BIT32(b) ((NvU32)1U<<(b)) +#endif +#ifndef BIT64 +#define BIT64(b) ((NvU64)1U<<(b)) +#endif + +#endif + +// +// It is recommended to use the following bit macros to avoid macro name +// collisions with other src code bases. +// +#ifndef NVBIT +#define NVBIT(b) (1U<<(b)) +#endif +#ifndef NVBIT_TYPE +#define NVBIT_TYPE(b, t) (((t)1U)<<(b)) +#endif +#ifndef NVBIT32 +#define NVBIT32(b) NVBIT_TYPE(b, NvU32) +#endif +#ifndef NVBIT64 +#define NVBIT64(b) NVBIT_TYPE(b, NvU64) +#endif + +// Helper macro's for 32 bit bitmasks +#define NV_BITMASK32_ELEMENT_SIZE (sizeof(NvU32) << 3) +#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5) +#define NV_BITMASK32_OFFSET(chId) ((chId) & (0x1F)) +#define NV_BITMASK32_SET(pChannelMask, chId) \ + (pChannelMask)[NV_BITMASK32_IDX(chId)] |= NVBIT(NV_BITMASK32_OFFSET(chId)) +#define NV_BITMASK32_GET(pChannelMask, chId) \ + ((pChannelMask)[NV_BITMASK32_IDX(chId)] & NVBIT(NV_BITMASK32_OFFSET(chId))) + + +// Index of the 'on' bit (assuming that there is only one). +// Even if multiple bits are 'on', result is in range of 0-31. +#define BIT_IDX_32(n) \ + (((((n) & 0xFFFF0000U) != 0U) ? 0x10U: 0U) | \ + ((((n) & 0xFF00FF00U) != 0U) ? 0x08U: 0U) | \ + ((((n) & 0xF0F0F0F0U) != 0U) ? 0x04U: 0U) | \ + ((((n) & 0xCCCCCCCCU) != 0U) ? 0x02U: 0U) | \ + ((((n) & 0xAAAAAAAAU) != 0U) ? 0x01U: 0U) ) + +// Index of the 'on' bit (assuming that there is only one). +// Even if multiple bits are 'on', result is in range of 0-63. +#define BIT_IDX_64(n) \ + (((((n) & 0xFFFFFFFF00000000ULL) != 0U) ? 0x20U: 0U) | \ + ((((n) & 0xFFFF0000FFFF0000ULL) != 0U) ? 0x10U: 0U) | \ + ((((n) & 0xFF00FF00FF00FF00ULL) != 0U) ? 0x08U: 0U) | \ + ((((n) & 0xF0F0F0F0F0F0F0F0ULL) != 0U) ? 0x04U: 0U) | \ + ((((n) & 0xCCCCCCCCCCCCCCCCULL) != 0U) ? 0x02U: 0U) | \ + ((((n) & 0xAAAAAAAAAAAAAAAAULL) != 0U) ? 0x01U: 0U) ) + +/*! + * DRF MACRO README: + * + * Glossary: + * DRF: Device, Register, Field + * FLD: Field + * REF: Reference + * + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA 0xDEADBEEF + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_GAMMA 27:0 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA 31:28 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ZERO 0x00000000 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ONE 0x00000001 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_TWO 0x00000002 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_THREE 0x00000003 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FOUR 0x00000004 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FIVE 0x00000005 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SIX 0x00000006 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SEVEN 0x00000007 + * + * + * Device = _DEVICE_OMEGA + * This is the common "base" that a group of registers in a manual share + * + * Register = _REGISTER_ALPHA + * Register for a given block of defines is the common root for one or more fields and constants + * + * Field(s) = _FIELD_GAMMA, _FIELD_ZETA + * These are the bit ranges for a given field within the register + * Fields are not required to have defined constant values (enumerations) + * + * Constant(s) = _ZERO, _ONE, _TWO, ... + * These are named values (enums) a field can contain; the width of the constants should not be larger than the field width + * + * MACROS: + * + * DRF_SHIFT: + * Bit index of the lower bound of a field + * DRF_SHIFT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 28 + * + * DRF_SHIFT_RT: + * Bit index of the higher bound of a field + * DRF_SHIFT_RT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 31 + * + * DRF_MASK: + * Produces a mask of 1-s equal to the width of a field + * DRF_MASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF (four 1s starting at bit 0) + * + * DRF_SHIFTMASK: + * Produces a mask of 1s equal to the width of a field at the location of the field + * DRF_SHIFTMASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF0000000 + * + * DRF_DEF: + * Shifts a field constant's value to the correct field offset + * DRF_DEF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE) == 0x30000000 + * + * DRF_NUM: + * Shifts a number to the location of a particular field + * DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 3) == 0x30000000 + * NOTE: If the value passed in is wider than the field, the value's high bits will be truncated + * + * DRF_SIZE: + * Provides the width of the field in bits + * DRF_SIZE(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 4 + * + * DRF_VAL: + * Provides the value of an input within the field specified + * DRF_VAL(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xABCD1234) == 0xA + * This is sort of like the inverse of DRF_NUM + * + * DRF_IDX...: + * These macros are similar to the above but for fields that accept an index argumment + * + * FLD_SET_DRF: + * Set the field bits in a given value with the given field constant + * NvU32 x = 0x00001234; + * x = FLD_SET_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, x); + * x == 0x30001234; + * + * FLD_SET_DRF_NUM: + * Same as FLD_SET_DRF but instead of using a field constant a literal/variable is passed in + * NvU32 x = 0x00001234; + * x = FLD_SET_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xF, x); + * x == 0xF0001234; + * + * FLD_IDX...: + * These macros are similar to the above but for fields that accept an index argumment + * + * FLD_TEST_DRF: + * Test if location specified by drf in 'v' has the same value as NV_drfc + * FLD_TEST_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE + * + * FLD_TEST_DRF_NUM: + * Test if locations specified by drf in 'v' have the same value as n + * FLD_TEST_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0x3, 0x3000ABCD) == NV_TRUE + * + * REF_DEF: + * Like DRF_DEF but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_DEF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE) == 0x30000000 + * + * REF_VAL: + * Like DRF_VAL but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_VAL(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xABCD1234) == 0xA + * + * REF_NUM: + * Like DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xA) == 0xA00000000 + * + * FLD_SET_REF_NUM: + * Like FLD_SET_DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * NvU32 x = 0x00001234; + * x = FLD_SET_REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xF, x); + * x == 0xF0001234; + * + * FLD_TEST_REF: + * Like FLD_TEST_DRF but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * FLD_TEST_REF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE + * + * Other macros: + * There a plethora of other macros below that extend the above (notably Multi-Word (MW), 64-bit, and some + * reg read/write variations). I hope these are self explanatory. If you have a need to use them, you + * probably have some knowledge of how they work. + */ + +// tegra mobile uses nvmisc_macros.h and can't access nvmisc.h... and sometimes both get included. +#ifndef _NVMISC_MACROS_H +// Use Coverity Annotation to mark issues as false positives/ignore when using single bit defines. +#define DRF_ISBIT(bitval,drf) \ + ( /* coverity[identical_branches] */ \ + (bitval != 0) ? drf ) +#define DEVICE_BASE(d) (0?d) // what's up with this name? totally non-parallel to the macros below +#define DEVICE_EXTENT(d) (1?d) // what's up with this name? totally non-parallel to the macros below +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +#ifdef MISRA_14_3 +#define DRF_BASE(drf) (drf##_LOW_FIELD) +#define DRF_EXTENT(drf) (drf##_HIGH_FIELD) +#define DRF_SHIFT(drf) ((drf##_LOW_FIELD) % 32U) +#define DRF_SHIFT_RT(drf) ((drf##_HIGH_FIELD) % 32U) +#define DRF_MASK(drf) (0xFFFFFFFFU >> (31U - ((drf##_HIGH_FIELD) % 32U) + ((drf##_LOW_FIELD) % 32U))) +#else +#define DRF_BASE(drf) (NV_FALSE?drf) // much better +#define DRF_EXTENT(drf) (NV_TRUE?drf) // much better +#define DRF_SHIFT(drf) (((NvU32)DRF_BASE(drf)) % 32U) +#define DRF_SHIFT_RT(drf) (((NvU32)DRF_EXTENT(drf)) % 32U) +#define DRF_MASK(drf) (0xFFFFFFFFU>>(31U - DRF_SHIFT_RT(drf) + DRF_SHIFT(drf))) +#endif +#define DRF_DEF(d,r,f,c) (((NvU32)(NV ## d ## r ## f ## c))<>(31-((DRF_ISBIT(1,drf)) % 32)+((DRF_ISBIT(0,drf)) % 32))) +#define DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<>DRF_SHIFT(NV ## d ## r ## f))&DRF_MASK(NV ## d ## r ## f)) +#endif + +// Signed version of DRF_VAL, which takes care of extending sign bit. +#define DRF_VAL_SIGNED(d,r,f,v) (((DRF_VAL(d,r,f,(v)) ^ (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U)))) - (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U))) +#define DRF_IDX_DEF(d,r,f,i,c) ((NV ## d ## r ## f ## c)<>DRF_SHIFT(NV##d##r##f(i)))&DRF_MASK(NV##d##r##f(i))) +#define DRF_IDX_OFFSET_VAL(d,r,f,i,o,v) (((v)>>DRF_SHIFT(NV##d##r##f(i,o)))&DRF_MASK(NV##d##r##f(i,o))) +// Fractional version of DRF_VAL which reads Fx.y fixed point number (x.y)*z +#define DRF_VAL_FRAC(d,r,x,y,v,z) ((DRF_VAL(d,r,x,(v))*z) + ((DRF_VAL(d,r,y,v)*z) / (1<>(63-((DRF_ISBIT(1,drf)) % 64)+((DRF_ISBIT(0,drf)) % 64))) +#define DRF_SHIFTMASK64(drf) (DRF_MASK64(drf)<<(DRF_SHIFT64(drf))) + +#define DRF_DEF64(d,r,f,c) (((NvU64)(NV ## d ## r ## f ## c))<>DRF_SHIFT64(NV ## d ## r ## f))&DRF_MASK64(NV ## d ## r ## f)) + +#define DRF_VAL_SIGNED64(d,r,f,v) (((DRF_VAL64(d,r,f,(v)) ^ (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1)))) - (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1))) +#define DRF_IDX_DEF64(d,r,f,i,c) (((NvU64)(NV ## d ## r ## f ## c))<>DRF_SHIFT64(NV##d##r##f(i)))&DRF_MASK64(NV##d##r##f(i))) +#define DRF_IDX_OFFSET_VAL64(d,r,f,i,o,v) (((NvU64)(v)>>DRF_SHIFT64(NV##d##r##f(i,o)))&DRF_MASK64(NV##d##r##f(i,o))) + +#define FLD_SET_DRF64(d,r,f,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c)) +#define FLD_SET_DRF_NUM64(d,r,f,n,v) ((((NvU64)(v)) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_NUM64(d,r,f,n)) +#define FLD_IDX_SET_DRF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c)) +#define FLD_IDX_OFFSET_SET_DRF64(d,r,f,i,o,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i,o))) | DRF_IDX_OFFSET_DEF64(d,r,f,i,o,c)) +#define FLD_IDX_SET_DRF_DEF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c)) +#define FLD_IDX_SET_DRF_NUM64(d,r,f,i,n,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_NUM64(d,r,f,i,n)) +#define FLD_SET_DRF_IDX64(d,r,f,c,i,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c(i))) + +#define FLD_TEST_DRF64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) == NV##d##r##f##c) +#define FLD_TEST_DRF_AND64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) & NV##d##r##f##c) +#define FLD_TEST_DRF_NUM64(d,r,f,n,v) (DRF_VAL64(d, r, f, (v)) == (n)) +#define FLD_IDX_TEST_DRF64(d,r,f,i,c,v) (DRF_IDX_VAL64(d, r, f, i, (v)) == NV##d##r##f##c) +#define FLD_IDX_OFFSET_TEST_DRF64(d,r,f,i,o,c,v) (DRF_IDX_OFFSET_VAL64(d, r, f, i, o, (v)) == NV##d##r##f##c) + +#define REF_DEF64(drf,d) (((drf ## d)&DRF_MASK64(drf))<>DRF_SHIFT64(drf))&DRF_MASK64(drf)) +#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3) +#define REF_NUM64(drf,n) (((NvU64)(n)&(0xFFFFFFFFFFFFFFFFU>>(63U-((drf##_HIGH_FIELD) % 63U)+((drf##_LOW_FIELD) % 63U)))) << ((drf##_LOW_FIELD) % 63U)) +#else +#define REF_NUM64(drf,n) (((NvU64)(n)&DRF_MASK64(drf))<>DRF_SHIFT(drf))&DRF_MASK(drf)) +#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3) +#define REF_NUM(drf,n) (((n)&(0xFFFFFFFFU>>(31U-((drf##_HIGH_FIELD) % 32U)+((drf##_LOW_FIELD) % 32U)))) << ((drf##_LOW_FIELD) % 32U)) +#else +#define REF_NUM(drf,n) (((n)&DRF_MASK(drf))<>DRF_SHIFT(CR ## d ## r ## f))&DRF_MASK(CR ## d ## r ## f)) + +// Multi-word (MW) field manipulations. For multi-word structures (e.g., Fermi SPH), +// fields may have bit numbers beyond 32. To avoid errors using "classic" multi-word macros, +// all the field extents are defined as "MW(X)". For example, MW(127:96) means +// the field is in bits 0-31 of word number 3 of the structure. +// +// DRF_VAL_MW() macro is meant to be used for native endian 32-bit aligned 32-bit word data, +// not for byte stream data. +// +// DRF_VAL_BS() macro is for byte stream data used in fbQueryBIOS_XXX(). +// +#define DRF_EXPAND_MW(drf) drf // used to turn "MW(a:b)" into "a:b" +#define DRF_PICK_MW(drf,v) ((v)? DRF_EXPAND_##drf) // picks low or high bits +#define DRF_WORD_MW(drf) (DRF_PICK_MW(drf,0)/32) // which word in a multi-word array +#define DRF_BASE_MW(drf) (DRF_PICK_MW(drf,0)%32) // which start bit in the selected word? +#define DRF_EXTENT_MW(drf) (DRF_PICK_MW(drf,1)%32) // which end bit in the selected word +#define DRF_SHIFT_MW(drf) (DRF_PICK_MW(drf,0)%32) +#define DRF_MASK_MW(drf) (0xFFFFFFFFU>>((31-(DRF_EXTENT_MW(drf))+(DRF_BASE_MW(drf)))%32)) +#define DRF_SHIFTMASK_MW(drf) ((DRF_MASK_MW(drf))<<(DRF_SHIFT_MW(drf))) +#define DRF_SIZE_MW(drf) (DRF_EXTENT_MW(drf)-DRF_BASE_MW(drf)+1) + +#define DRF_DEF_MW(d,r,f,c) ((NV##d##r##f##c) << DRF_SHIFT_MW(NV##d##r##f)) +#define DRF_NUM_MW(d,r,f,n) (((n)&DRF_MASK_MW(NV##d##r##f))<>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f)) +#define DRF_SPANS(drf) ((DRF_PICK_MW(drf,0)/32) != (DRF_PICK_MW(drf,1)/32)) +#define DRF_WORD_MW_LOW(drf) (DRF_PICK_MW(drf,0)/32) +#define DRF_WORD_MW_HIGH(drf) (DRF_PICK_MW(drf,1)/32) +#define DRF_MASK_MW_LOW(drf) (0xFFFFFFFFU) +#define DRF_MASK_MW_HIGH(drf) (0xFFFFFFFFU>>(31-(DRF_EXTENT_MW(drf)))) +#define DRF_SHIFT_MW_LOW(drf) (DRF_PICK_MW(drf,0)%32) +#define DRF_SHIFT_MW_HIGH(drf) (0) +#define DRF_MERGE_SHIFT(drf) ((32-((DRF_PICK_MW(drf,0)%32)))%32) +#define DRF_VAL_MW_2WORD(d,r,f,v) (((((v)[DRF_WORD_MW_LOW(NV##d##r##f)])>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \ + (((((v)[DRF_WORD_MW_HIGH(NV##d##r##f)])>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f))) +#define DRF_VAL_MW(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_MW_2WORD(d,r,f,v) : DRF_VAL_MW_1WORD(d,r,f,v) ) + +#define DRF_IDX_DEF_MW(d,r,f,i,c) ((NV##d##r##f##c)<>DRF_SHIFT_MW(NV##d##r##f(i)))&DRF_MASK_MW(NV##d##r##f(i))) + +// +// Logically OR all DRF_DEF constants indexed from zero to s (semiinclusive). +// Caution: Target variable v must be pre-initialized. +// +#define FLD_IDX_OR_DRF_DEF(d,r,f,c,s,v) \ +do \ +{ NvU32 idx; \ + for (idx = 0; idx < (NV ## d ## r ## f ## s); ++idx)\ + { \ + v |= DRF_IDX_DEF(d,r,f,idx,c); \ + } \ +} while(0) + + +#define FLD_MERGE_MW(drf,n,v) (((v)[DRF_WORD_MW(drf)] & ~DRF_SHIFTMASK_MW(drf)) | n) +#define FLD_ASSIGN_MW(drf,n,v) ((v)[DRF_WORD_MW(drf)] = FLD_MERGE_MW(drf, n, v)) +#define FLD_IDX_MERGE_MW(drf,i,n,v) (((v)[DRF_WORD_MW(drf(i))] & ~DRF_SHIFTMASK_MW(drf(i))) | n) +#define FLD_IDX_ASSIGN_MW(drf,i,n,v) ((v)[DRF_WORD_MW(drf(i))] = FLD_MERGE_MW(drf(i), n, v)) + +#define FLD_SET_DRF_MW(d,r,f,c,v) FLD_MERGE_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v) +#define FLD_SET_DRF_NUM_MW(d,r,f,n,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_NUM_MW(d,r,f,n), v) +#define FLD_SET_DRF_DEF_MW(d,r,f,c,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v) +#define FLD_IDX_SET_DRF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v) +#define FLD_IDX_SET_DRF_DEF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v) +#define FLD_IDX_SET_DRF_NUM_MW(d,r,f,i,n,v) FLD_IDX_ASSIGN_MW(NV##d##r##f, i, DRF_IDX_NUM_MW(d,r,f,i,n), v) + +#define FLD_TEST_DRF_MW(d,r,f,c,v) ((DRF_VAL_MW(d, r, f, (v)) == NV##d##r##f##c)) +#define FLD_TEST_DRF_NUM_MW(d,r,f,n,v) ((DRF_VAL_MW(d, r, f, (v)) == n)) +#define FLD_IDX_TEST_DRF_MW(d,r,f,i,c,v) ((DRF_IDX_VAL_MW(d, r, f, i, (v)) == NV##d##r##f##c)) + +#define DRF_VAL_BS(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_BS_2WORD(d,r,f,(v)) : DRF_VAL_BS_1WORD(d,r,f,(v)) ) + +//------------------------------------------------------------------------// +// // +// Common defines for engine register reference wrappers // +// // +// New engine addressing can be created like: // +// \#define ENG_REG_PMC(o,d,r) NV##d##r // +// \#define ENG_IDX_REG_CE(o,d,i,r) CE_MAP(o,r,i) // +// // +// See FB_FBPA* for more examples // +//------------------------------------------------------------------------// + +#define ENG_RD_REG(g,o,d,r) GPU_REG_RD32(g, ENG_REG##d(o,d,r)) +#define ENG_WR_REG(g,o,d,r,v) GPU_REG_WR32(g, ENG_REG##d(o,d,r), (v)) +#define ENG_RD_DRF(g,o,d,r,f) ((GPU_REG_RD32(g, ENG_REG##d(o,d,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define ENG_WR_DRF_DEF(g,o,d,r,f,c) GPU_REG_WR32(g, ENG_REG##d(o,d,r),(GPU_REG_RD32(g,ENG_REG##d(o,d,r))&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define ENG_TEST_IDX_DRF_DEF(g,o,d,r,f,c,i) (ENG_RD_IDX_DRF(g, o, d, r, f, (i)) == NV##d##r##f##c) + +#define ENG_IDX_RD_REG(g,o,d,i,r) GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r)) +#define ENG_IDX_WR_REG(g,o,d,i,r,v) GPU_REG_WR32(g, ENG_IDX_REG##d(o,d,i,r), (v)) + +#define ENG_IDX_RD_DRF(g,o,d,i,r,f) ((GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +// +// DRF_READ_1WORD_BS() and DRF_READ_1WORD_BS_HIGH() do not read beyond the bytes that contain +// the requested value. Reading beyond the actual data causes a page fault panic when the +// immediately following page happened to be protected or not mapped. +// +#define DRF_VAL_BS_1WORD(d,r,f,v) ((DRF_READ_1WORD_BS(d,r,f,v)>>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f)) +#define DRF_VAL_BS_2WORD(d,r,f,v) (((DRF_READ_4BYTE_BS(NV##d##r##f,v)>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \ + (((DRF_READ_1WORD_BS_HIGH(d,r,f,v)>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f))) + +#define DRF_READ_1BYTE_BS(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4])) +#define DRF_READ_2BYTE_BS(drf,v) (DRF_READ_1BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+1])<<8)) +#define DRF_READ_3BYTE_BS(drf,v) (DRF_READ_2BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+2])<<16)) +#define DRF_READ_4BYTE_BS(drf,v) (DRF_READ_3BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+3])<<24)) + +#define DRF_READ_1BYTE_BS_HIGH(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4])) +#define DRF_READ_2BYTE_BS_HIGH(drf,v) (DRF_READ_1BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+1])<<8)) +#define DRF_READ_3BYTE_BS_HIGH(drf,v) (DRF_READ_2BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+2])<<16)) +#define DRF_READ_4BYTE_BS_HIGH(drf,v) (DRF_READ_3BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+3])<<24)) + +// Calculate 2^n - 1 and avoid shift counter overflow +// +// On Windows amd64, 64 << 64 => 1 +// +#define NV_TWO_N_MINUS_ONE(n) (((1ULL<<(n/2))<<((n+1)/2))-1) + +#define DRF_READ_1WORD_BS(d,r,f,v) \ + ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS(NV##d##r##f,(v)): \ + DRF_READ_4BYTE_BS(NV##d##r##f,(v))))) + +#define DRF_READ_1WORD_BS_HIGH(d,r,f,v) \ + ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS_HIGH(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS_HIGH(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS_HIGH(NV##d##r##f,(v)): \ + DRF_READ_4BYTE_BS_HIGH(NV##d##r##f,(v))))) + +#define LOWESTBIT(x) ( (x) & (((x) - 1U) ^ (x)) ) +// Destructive operation on n32 +#define HIGHESTBIT(n32) \ +{ \ + HIGHESTBITIDX_32(n32); \ + n32 = NVBIT(n32); \ +} +#define ONEBITSET(x) ( ((x) != 0U) && (((x) & ((x) - 1U)) == 0U) ) + +// Destructive operation on n32 +#define NUMSETBITS_32(n32) \ +{ \ + n32 = n32 - ((n32 >> 1) & 0x55555555); \ + n32 = (n32 & 0x33333333) + ((n32 >> 2) & 0x33333333); \ + n32 = (((n32 + (n32 >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; \ +} + +/*! + * Calculate number of bits set in a 32-bit unsigned integer. + * Pure typesafe alternative to @ref NUMSETBITS_32. + */ +static NV_FORCEINLINE NvU32 +nvPopCount32(const NvU32 x) +{ + NvU32 temp = x; + temp = temp - ((temp >> 1) & 0x55555555U); + temp = (temp & 0x33333333U) + ((temp >> 2) & 0x33333333U); + temp = (((temp + (temp >> 4)) & 0x0F0F0F0FU) * 0x01010101U) >> 24; + return temp; +} + +/*! + * Calculate number of bits set in a 64-bit unsigned integer. + */ +static NV_FORCEINLINE NvU32 +nvPopCount64(const NvU64 x) +{ + NvU64 temp = x; + temp = temp - ((temp >> 1) & 0x5555555555555555ULL); + temp = (temp & 0x3333333333333333ULL) + ((temp >> 2) & 0x3333333333333333ULL); + temp = (temp + (temp >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + temp = (temp * 0x0101010101010101ULL) >> 56; + return (NvU32)temp; +} + +/*! + * Determine how many bits are set below a bit index within a mask. + * This assigns a dense ordering to the set bits in the mask. + * + * For example the mask 0xCD contains 5 set bits: + * nvMaskPos32(0xCD, 0) == 0 + * nvMaskPos32(0xCD, 2) == 1 + * nvMaskPos32(0xCD, 3) == 2 + * nvMaskPos32(0xCD, 6) == 3 + * nvMaskPos32(0xCD, 7) == 4 + */ +static NV_FORCEINLINE NvU32 +nvMaskPos32(const NvU32 mask, const NvU32 bitIdx) +{ + return nvPopCount32(mask & (NVBIT32(bitIdx) - 1U)); +} + +// Destructive operation on n32 +#define LOWESTBITIDX_32(n32) \ +{ \ + n32 = BIT_IDX_32(LOWESTBIT(n32));\ +} + +// Destructive operation on n32 +#define HIGHESTBITIDX_32(n32) \ +{ \ + NvU32 count = 0; \ + while (n32 >>= 1) \ + { \ + count++; \ + } \ + n32 = count; \ +} + +// Destructive operation on n32 +#define ROUNDUP_POW2(n32) \ +{ \ + n32--; \ + n32 |= n32 >> 1; \ + n32 |= n32 >> 2; \ + n32 |= n32 >> 4; \ + n32 |= n32 >> 8; \ + n32 |= n32 >> 16; \ + n32++; \ +} + +/*! + * Round up a 32-bit unsigned integer to the next power of 2. + * Pure typesafe alternative to @ref ROUNDUP_POW2. + * + * param[in] x must be in range [0, 2^31] to avoid overflow. + */ +static NV_FORCEINLINE NvU32 +nvNextPow2_U32(const NvU32 x) +{ + NvU32 y = x; + y--; + y |= y >> 1; + y |= y >> 2; + y |= y >> 4; + y |= y >> 8; + y |= y >> 16; + y++; + return y; +} + + +static NV_FORCEINLINE NvU32 +nvPrevPow2_U32(const NvU32 x ) +{ + NvU32 y = x; + y |= (y >> 1); + y |= (y >> 2); + y |= (y >> 4); + y |= (y >> 8); + y |= (y >> 16); + return y - (y >> 1); +} + +static NV_FORCEINLINE NvU64 +nvPrevPow2_U64(const NvU64 x ) +{ + NvU64 y = x; + y |= (y >> 1); + y |= (y >> 2); + y |= (y >> 4); + y |= (y >> 8); + y |= (y >> 16); + y |= (y >> 32); + return y - (y >> 1); +} + +// Destructive operation on n64 +#define ROUNDUP_POW2_U64(n64) \ +{ \ + n64--; \ + n64 |= n64 >> 1; \ + n64 |= n64 >> 2; \ + n64 |= n64 >> 4; \ + n64 |= n64 >> 8; \ + n64 |= n64 >> 16; \ + n64 |= n64 >> 32; \ + n64++; \ +} + +#define NV_SWAP_U8(a,b) \ +{ \ + NvU8 temp; \ + temp = a; \ + a = b; \ + b = temp; \ +} + +#define NV_SWAP_U32(a,b) \ +{ \ + NvU32 temp; \ + temp = a; \ + a = b; \ + b = temp; \ +} + +/*! + * @brief Macros allowing simple iteration over bits set in a given mask. + * + * @param[in] maskWidth bit-width of the mask (allowed: 8, 16, 32, 64) + * + * @param[in,out] index lvalue that is used as a bit index in the loop + * (can be declared as any NvU* or NvS* variable) + * @param[in] mask expression, loop will iterate over set bits only + */ +#define FOR_EACH_INDEX_IN_MASK(maskWidth,index,mask) \ +{ \ + NvU##maskWidth lclMsk = (NvU##maskWidth)(mask); \ + for ((index) = 0U; lclMsk != 0U; (index)++, lclMsk >>= 1U)\ + { \ + if (((NvU##maskWidth)NVBIT64(0) & lclMsk) == 0U) \ + { \ + continue; \ + } +#define FOR_EACH_INDEX_IN_MASK_END \ + } \ +} + +// +// Size to use when declaring variable-sized arrays +// +#define NV_ANYSIZE_ARRAY 1 + +// +// Returns ceil(a/b) +// +#define NV_CEIL(a,b) (((a)+(b)-1)/(b)) + +// Clearer name for NV_CEIL +#ifndef NV_DIV_AND_CEIL +#define NV_DIV_AND_CEIL(a, b) NV_CEIL(a,b) +#endif + +#ifndef NV_MIN +#define NV_MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif + +#ifndef NV_MAX +#define NV_MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif + +// +// Returns absolute value of provided integer expression +// +#define NV_ABS(a) ((a)>=0?(a):(-(a))) + +// +// Returns 1 if input number is positive, 0 if 0 and -1 if negative. Avoid +// macro parameter as function call which will have side effects. +// +#define NV_SIGN(s) ((NvS8)(((s) > 0) - ((s) < 0))) + +// +// Returns 1 if input number is >= 0 or -1 otherwise. This assumes 0 has a +// positive sign. +// +#define NV_ZERO_SIGN(s) ((NvS8)((((s) >= 0) * 2) - 1)) + +// Returns the offset (in bytes) of 'member' in struct 'type'. +#ifndef NV_OFFSETOF + #if defined(__GNUC__) && (__GNUC__ > 3) + #define NV_OFFSETOF(type, member) ((NvU32)__builtin_offsetof(type, member)) + #else + #define NV_OFFSETOF(type, member) ((NvU32)(NvU64)&(((type *)0)->member)) // shouldn't we use PtrToUlong? But will need to include windows header. + #endif +#endif + +// +// Performs a rounded division of b into a (unsigned). For SIGNED version of +// NV_ROUNDED_DIV() macro check the comments in bug 769777. +// +#define NV_UNSIGNED_ROUNDED_DIV(a,b) (((a) + ((b) / 2U)) / (b)) + +/*! + * Performs a ceiling division of b into a (unsigned). A "ceiling" division is + * a division is one with rounds up result up if a % b != 0. + * + * @param[in] a Numerator + * @param[in] b Denominator + * + * @return a / b + a % b != 0 ? 1 : 0. + */ +#define NV_UNSIGNED_DIV_CEIL(a, b) (((a) + (b - 1)) / (b)) + +/*! + * Performs subtraction where a negative difference is raised to zero. + * Can be used to avoid underflowing an unsigned subtraction. + * + * @param[in] a Minuend + * @param[in] b Subtrahend + * + * @return a > b ? a - b : 0. + */ +#define NV_SUBTRACT_NO_UNDERFLOW(a, b) ((a)>(b) ? (a)-(b) : 0) + +/*! + * Performs a rounded right-shift of 32-bit unsigned value "a" by "shift" bits. + * Will round result away from zero. + * + * @param[in] a 32-bit unsigned value to shift. + * @param[in] shift Number of bits by which to shift. + * + * @return Resulting shifted value rounded away from zero. + */ +#define NV_RIGHT_SHIFT_ROUNDED(a, shift) \ + (((a) >> (shift)) + !!((NVBIT((shift) - 1) & (a)) == NVBIT((shift) - 1))) + +// +// Power of 2 alignment. +// (Will give unexpected results if 'gran' is not a power of 2.) +// +#ifndef NV_ALIGN_DOWN +// +// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type. +// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed. +// +#define NV_ALIGN_DOWN(v, gran) ((v) & ~((v) - (v) + (gran) - 1)) +#endif + +#ifndef NV_ALIGN_UP +// +// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type. +// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed. +// +#define NV_ALIGN_UP(v, gran) (((v) + ((gran) - 1)) & ~((v) - (v) + (gran) - 1)) +#endif + +#ifndef NV_ALIGN_DOWN64 +#define NV_ALIGN_DOWN64(v, gran) ((v) & ~(((NvU64)gran) - 1)) +#endif + +#ifndef NV_ALIGN_UP64 +#define NV_ALIGN_UP64(v, gran) (((v) + ((gran) - 1)) & ~(((NvU64)gran)-1)) +#endif + +#ifndef NV_IS_ALIGNED +#define NV_IS_ALIGNED(v, gran) (0U == ((v) & ((gran) - 1U))) +#endif + +#ifndef NV_IS_ALIGNED64 +#define NV_IS_ALIGNED64(v, gran) (0U == ((v) & (((NvU64)gran) - 1U))) +#endif + +#ifndef NVMISC_MEMSET +static NV_FORCEINLINE void *NVMISC_MEMSET(void *s, NvU8 c, NvLength n) +{ + NvU8 *b = (NvU8 *) s; + NvLength i; + + for (i = 0; i < n; i++) + { + b[i] = c; + } + + return s; +} +#endif + +#ifndef NVMISC_MEMCPY +static NV_FORCEINLINE void *NVMISC_MEMCPY(void *dest, const void *src, NvLength n) +{ + NvU8 *destByte = (NvU8 *) dest; + const NvU8 *srcByte = (const NvU8 *) src; + NvLength i; + + for (i = 0; i < n; i++) + { + destByte[i] = srcByte[i]; + } + + return dest; +} +#endif + +static NV_FORCEINLINE char *NVMISC_STRNCPY(char *dest, const char *src, NvLength n) +{ + NvLength i; + + for (i = 0; i < n; i++) + { + dest[i] = src[i]; + if (src[i] == '\0') + { + break; + } + } + + for (; i < n; i++) + { + dest[i] = '\0'; + } + + return dest; +} + +/*! + * Convert a void* to an NvUPtr. This is used when MISRA forbids us from doing a direct cast. + * + * @param[in] ptr Pointer to be converted + * + * @return Resulting NvUPtr + */ +static NV_FORCEINLINE NvUPtr NV_PTR_TO_NVUPTR(void *ptr) +{ + union + { + NvUPtr v; + void *p; + } uAddr; + + uAddr.p = ptr; + return uAddr.v; +} + +/*! + * Convert an NvUPtr to a void*. This is used when MISRA forbids us from doing a direct cast. + * + * @param[in] ptr Pointer to be converted + * + * @return Resulting void * + */ +static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address) +{ + union + { + NvUPtr v; + void *p; + } uAddr; + + uAddr.v = address; + return uAddr.p; +} + +#ifdef __cplusplus +} +#endif //__cplusplus + +#endif // __NV_MISC_H + diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatus.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatus.h new file mode 100644 index 0000000..4f5284d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatus.h @@ -0,0 +1,123 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef SDK_NVSTATUS_H +#define SDK_NVSTATUS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +typedef NvU32 NV_STATUS; + +#define NV_STATUS_CODE( name, code, string ) name = (code), + +enum +{ + #include "nvstatuscodes.h" +}; + +#undef NV_STATUS_CODE + +/*! + * @def NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL + * @brief Success: No error or special condition + */ +#define NV_STATUS_LEVEL_OK 0 + +/*! + * @def NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL + * @brief Success, but there is an special condition + * + * @details In general, NV_STATUS_LEVEL_WARN status codes are handled the + * same as NV_STATUS_LEVEL_OK, but are usefil to indicate that + * there is a condition that may be specially handled. + * + * Therefore, in most cases, client function should test for + * status <= NV_STATUS_LEVEL_WARN or status > NV_STATUS_LEVEL_WARN + * to determine success v. failure of a call. + */ +#define NV_STATUS_LEVEL_WARN 1 + +/*! + * @def NV_STATUS_LEVEL_ERR + * @see NV_STATUS_LEVEL + * @brief Unrecoverable error condition + */ +#define NV_STATUS_LEVEL_ERR 3 + +/*! + * @def NV_STATUS_LEVEL + * @see NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL_ERR + * @brief Level of the status code + * + * @warning IMPORTANT: When comparing NV_STATUS_LEVEL(_S) against one of + * these constants, it is important to use '<=' or '>' (rather + * than '<' or '>='). + * + * For example. do: + * if (NV_STATUS_LEVEL(status) <= NV_STATUS_LEVEL_WARN) + * rather than: + * if (NV_STATUS_LEVEL(status) < NV_STATUS_LEVEL_ERR) + * + * By being consistent in this manner, it is easier to systematically + * add additional level constants. New levels are likely to lower + * (rather than raise) the severity of _ERR codes. For example, + * if we were to add NV_STATUS_LEVEL_RETRY to indicate hardware + * failures that may be recoverable (e.g. RM_ERR_TIMEOUT_RETRY + * or RM_ERR_BUSY_RETRY), it would be less severe than + * NV_STATUS_LEVEL_ERR the level to which these status codes now + * belong. Using '<=' and '>' ensures your code is not broken in + * cases like this. + */ +#define NV_STATUS_LEVEL(_S) \ + ((_S) == NV_OK? NV_STATUS_LEVEL_OK: \ + ((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? NV_STATUS_LEVEL_WARN: \ + NV_STATUS_LEVEL_ERR)) + +/*! + * @def NV_STATUS_LEVEL + * @see NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL_ERR + * @brief Character representing status code level + */ +#define NV_STATUS_LEVEL_CHAR(_S) \ + ((_S) == NV_OK? '0': \ + ((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? 'W': \ + 'E')) + +// Function definitions +const char *nvstatusToString(NV_STATUS nvStatusIn); + +#ifdef __cplusplus +} +#endif + +#endif /* SDK_NVSTATUS_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatuscodes.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatuscodes.h new file mode 100644 index 0000000..4d8af82 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatuscodes.h @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef SDK_NVSTATUSCODES_H +#define SDK_NVSTATUSCODES_H + +NV_STATUS_CODE(NV_OK, 0x00000000, "Success") +NV_STATUS_CODE(NV_ERR_GENERIC, 0x0000FFFF, "Failure: Generic Error") + +NV_STATUS_CODE(NV_ERR_BROKEN_FB, 0x00000001, "Frame-Buffer broken") +NV_STATUS_CODE(NV_ERR_BUFFER_TOO_SMALL, 0x00000002, "Buffer passed in is too small") +NV_STATUS_CODE(NV_ERR_BUSY_RETRY, 0x00000003, "System is busy, retry later") +NV_STATUS_CODE(NV_ERR_CALLBACK_NOT_SCHEDULED, 0x00000004, "The requested callback API not scheduled") +NV_STATUS_CODE(NV_ERR_CARD_NOT_PRESENT, 0x00000005, "Card not detected") +NV_STATUS_CODE(NV_ERR_CYCLE_DETECTED, 0x00000006, "Call cycle detected") +NV_STATUS_CODE(NV_ERR_DMA_IN_USE, 0x00000007, "Requested DMA is in use") +NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_LOCKED, 0x00000008, "Requested DMA memory is not locked") +NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_UNLOCKED, 0x00000009, "Requested DMA memory is not unlocked") +NV_STATUS_CODE(NV_ERR_DUAL_LINK_INUSE, 0x0000000A, "Dual-Link is in use") +NV_STATUS_CODE(NV_ERR_ECC_ERROR, 0x0000000B, "Generic ECC error") +NV_STATUS_CODE(NV_ERR_FIFO_BAD_ACCESS, 0x0000000C, "FIFO: Invalid access") +NV_STATUS_CODE(NV_ERR_FREQ_NOT_SUPPORTED, 0x0000000D, "Requested frequency is not supported") +NV_STATUS_CODE(NV_ERR_GPU_DMA_NOT_INITIALIZED, 0x0000000E, "Requested DMA not initialized") +NV_STATUS_CODE(NV_ERR_GPU_IS_LOST, 0x0000000F, "GPU lost from the bus") +NV_STATUS_CODE(NV_ERR_GPU_IN_FULLCHIP_RESET, 0x00000010, "GPU currently in full-chip reset") +NV_STATUS_CODE(NV_ERR_GPU_NOT_FULL_POWER, 0x00000011, "GPU not in full power") +NV_STATUS_CODE(NV_ERR_GPU_UUID_NOT_FOUND, 0x00000012, "GPU UUID not found") +NV_STATUS_CODE(NV_ERR_HOT_SWITCH, 0x00000013, "System in hot switch") +NV_STATUS_CODE(NV_ERR_I2C_ERROR, 0x00000014, "I2C Error") +NV_STATUS_CODE(NV_ERR_I2C_SPEED_TOO_HIGH, 0x00000015, "I2C Error: Speed too high") +NV_STATUS_CODE(NV_ERR_ILLEGAL_ACTION, 0x00000016, "Current action is not allowed") +NV_STATUS_CODE(NV_ERR_IN_USE, 0x00000017, "Generic busy error") +NV_STATUS_CODE(NV_ERR_INFLATE_COMPRESSED_DATA_FAILED, 0x00000018, "Failed to inflate compressed data") +NV_STATUS_CODE(NV_ERR_INSERT_DUPLICATE_NAME, 0x00000019, "Found a duplicate entry in the requested btree") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_RESOURCES, 0x0000001A, "Ran out of a critical resource, other than memory") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_PERMISSIONS, 0x0000001B, "The requester does not have sufficient permissions") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_POWER, 0x0000001C, "Generic Error: Low power") +NV_STATUS_CODE(NV_ERR_INVALID_ACCESS_TYPE, 0x0000001D, "This type of access is not allowed") +NV_STATUS_CODE(NV_ERR_INVALID_ADDRESS, 0x0000001E, "Address not valid") +NV_STATUS_CODE(NV_ERR_INVALID_ARGUMENT, 0x0000001F, "Invalid argument to call") +NV_STATUS_CODE(NV_ERR_INVALID_BASE, 0x00000020, "Invalid base") +NV_STATUS_CODE(NV_ERR_INVALID_CHANNEL, 0x00000021, "Given channel-id not valid") +NV_STATUS_CODE(NV_ERR_INVALID_CLASS, 0x00000022, "Given class-id not valid") +NV_STATUS_CODE(NV_ERR_INVALID_CLIENT, 0x00000023, "Given client not valid") +NV_STATUS_CODE(NV_ERR_INVALID_COMMAND, 0x00000024, "Command passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_DATA, 0x00000025, "Invalid data passed") +NV_STATUS_CODE(NV_ERR_INVALID_DEVICE, 0x00000026, "Current device is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_DMA_SPECIFIER, 0x00000027, "The requested DMA specifier is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_EVENT, 0x00000028, "Invalid event occurred") +NV_STATUS_CODE(NV_ERR_INVALID_FLAGS, 0x00000029, "Invalid flags passed") +NV_STATUS_CODE(NV_ERR_INVALID_FUNCTION, 0x0000002A, "Called function is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_HEAP, 0x0000002B, "Heap corrupted") +NV_STATUS_CODE(NV_ERR_INVALID_INDEX, 0x0000002C, "Index invalid") +NV_STATUS_CODE(NV_ERR_INVALID_IRQ_LEVEL, 0x0000002D, "Requested IRQ level is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_LIMIT, 0x0000002E, "Generic Error: Invalid limit") +NV_STATUS_CODE(NV_ERR_INVALID_LOCK_STATE, 0x0000002F, "Requested lock state not valid") +NV_STATUS_CODE(NV_ERR_INVALID_METHOD, 0x00000030, "Requested method not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT, 0x00000031, "Object not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_BUFFER, 0x00000032, "Object buffer passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_HANDLE, 0x00000033, "Object handle is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_NEW, 0x00000034, "New object is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_OLD, 0x00000035, "Old object is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_PARENT, 0x00000036, "Object parent is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OFFSET, 0x00000037, "The offset passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OPERATION, 0x00000038, "Requested operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OWNER, 0x00000039, "Owner not valid") +NV_STATUS_CODE(NV_ERR_INVALID_PARAM_STRUCT, 0x0000003A, "Invalid structure parameter") +NV_STATUS_CODE(NV_ERR_INVALID_PARAMETER, 0x0000003B, "At least one of the parameters passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_PATH, 0x0000003C, "The requested path is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_POINTER, 0x0000003D, "Pointer not valid") +NV_STATUS_CODE(NV_ERR_INVALID_REGISTRY_KEY, 0x0000003E, "Found an invalid registry key") +NV_STATUS_CODE(NV_ERR_INVALID_REQUEST, 0x0000003F, "Generic Error: Invalid request") +NV_STATUS_CODE(NV_ERR_INVALID_STATE, 0x00000040, "Generic Error: Invalid state") +NV_STATUS_CODE(NV_ERR_INVALID_STRING_LENGTH, 0x00000041, "The string length is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_READ, 0x00000042, "The requested read operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_WRITE, 0x00000043, "The requested write operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_XLATE, 0x00000044, "The requested translate operation is not valid") +NV_STATUS_CODE(NV_ERR_IRQ_NOT_FIRING, 0x00000045, "Requested IRQ is not firing") +NV_STATUS_CODE(NV_ERR_IRQ_EDGE_TRIGGERED, 0x00000046, "IRQ is edge triggered") +NV_STATUS_CODE(NV_ERR_MEMORY_TRAINING_FAILED, 0x00000047, "Failed memory training sequence") +NV_STATUS_CODE(NV_ERR_MISMATCHED_SLAVE, 0x00000048, "Slave mismatch") +NV_STATUS_CODE(NV_ERR_MISMATCHED_TARGET, 0x00000049, "Target mismatch") +NV_STATUS_CODE(NV_ERR_MISSING_TABLE_ENTRY, 0x0000004A, "Requested entry missing not found in the table") +NV_STATUS_CODE(NV_ERR_MODULE_LOAD_FAILED, 0x0000004B, "Failed to load the requested module") +NV_STATUS_CODE(NV_ERR_MORE_DATA_AVAILABLE, 0x0000004C, "There is more data available") +NV_STATUS_CODE(NV_ERR_MORE_PROCESSING_REQUIRED, 0x0000004D, "More processing required for the given call") +NV_STATUS_CODE(NV_ERR_MULTIPLE_MEMORY_TYPES, 0x0000004E, "Multiple memory types found") +NV_STATUS_CODE(NV_ERR_NO_FREE_FIFOS, 0x0000004F, "No more free FIFOs found") +NV_STATUS_CODE(NV_ERR_NO_INTR_PENDING, 0x00000050, "No interrupt pending") +NV_STATUS_CODE(NV_ERR_NO_MEMORY, 0x00000051, "Out of memory") +NV_STATUS_CODE(NV_ERR_NO_SUCH_DOMAIN, 0x00000052, "Requested domain does not exist") +NV_STATUS_CODE(NV_ERR_NO_VALID_PATH, 0x00000053, "Caller did not specify a valid path") +NV_STATUS_CODE(NV_ERR_NOT_COMPATIBLE, 0x00000054, "Generic Error: Incompatible types") +NV_STATUS_CODE(NV_ERR_NOT_READY, 0x00000055, "Generic Error: Not ready") +NV_STATUS_CODE(NV_ERR_NOT_SUPPORTED, 0x00000056, "Call not supported") +NV_STATUS_CODE(NV_ERR_OBJECT_NOT_FOUND, 0x00000057, "Requested object not found") +NV_STATUS_CODE(NV_ERR_OBJECT_TYPE_MISMATCH, 0x00000058, "Specified objects do not match") +NV_STATUS_CODE(NV_ERR_OPERATING_SYSTEM, 0x00000059, "Generic operating system error") +NV_STATUS_CODE(NV_ERR_OTHER_DEVICE_FOUND, 0x0000005A, "Found other device instead of the requested one") +NV_STATUS_CODE(NV_ERR_OUT_OF_RANGE, 0x0000005B, "The specified value is out of bounds") +NV_STATUS_CODE(NV_ERR_OVERLAPPING_UVM_COMMIT, 0x0000005C, "Overlapping unified virtual memory commit") +NV_STATUS_CODE(NV_ERR_PAGE_TABLE_NOT_AVAIL, 0x0000005D, "Requested page table not available") +NV_STATUS_CODE(NV_ERR_PID_NOT_FOUND, 0x0000005E, "Process-Id not found") +NV_STATUS_CODE(NV_ERR_PROTECTION_FAULT, 0x0000005F, "Protection fault") +NV_STATUS_CODE(NV_ERR_RC_ERROR, 0x00000060, "Generic RC error") +NV_STATUS_CODE(NV_ERR_REJECTED_VBIOS, 0x00000061, "Given Video BIOS rejected/invalid") +NV_STATUS_CODE(NV_ERR_RESET_REQUIRED, 0x00000062, "Reset required") +NV_STATUS_CODE(NV_ERR_STATE_IN_USE, 0x00000063, "State in use") +NV_STATUS_CODE(NV_ERR_SIGNAL_PENDING, 0x00000064, "Signal pending") +NV_STATUS_CODE(NV_ERR_TIMEOUT, 0x00000065, "Call timed out") +NV_STATUS_CODE(NV_ERR_TIMEOUT_RETRY, 0x00000066, "Call timed out, please retry later") +NV_STATUS_CODE(NV_ERR_TOO_MANY_PRIMARIES, 0x00000067, "Too many primaries") +NV_STATUS_CODE(NV_ERR_UVM_ADDRESS_IN_USE, 0x00000068, "Unified virtual memory requested address already in use") +NV_STATUS_CODE(NV_ERR_MAX_SESSION_LIMIT_REACHED, 0x00000069, "Maximum number of sessions reached") +NV_STATUS_CODE(NV_ERR_LIB_RM_VERSION_MISMATCH, 0x0000006A, "Library version doesn't match driver version") //Contained within the RMAPI library +NV_STATUS_CODE(NV_ERR_PRIV_SEC_VIOLATION, 0x0000006B, "Priv security violation") +NV_STATUS_CODE(NV_ERR_GPU_IN_DEBUG_MODE, 0x0000006C, "GPU currently in debug mode") +NV_STATUS_CODE(NV_ERR_FEATURE_NOT_ENABLED, 0x0000006D, "Requested Feature functionality is not enabled") +NV_STATUS_CODE(NV_ERR_RESOURCE_LOST, 0x0000006E, "Requested resource has been destroyed") +NV_STATUS_CODE(NV_ERR_PMU_NOT_READY, 0x0000006F, "PMU is not ready or has not yet been initialized") +NV_STATUS_CODE(NV_ERR_FLCN_ERROR, 0x00000070, "Generic falcon assert or halt") +NV_STATUS_CODE(NV_ERR_FATAL_ERROR, 0x00000071, "Fatal/unrecoverable error") +NV_STATUS_CODE(NV_ERR_MEMORY_ERROR, 0x00000072, "Generic memory error") +NV_STATUS_CODE(NV_ERR_INVALID_LICENSE, 0x00000073, "License provided is rejected or invalid") +NV_STATUS_CODE(NV_ERR_NVLINK_INIT_ERROR, 0x00000074, "Nvlink Init Error") +NV_STATUS_CODE(NV_ERR_NVLINK_MINION_ERROR, 0x00000075, "Nvlink Minion Error") +NV_STATUS_CODE(NV_ERR_NVLINK_CLOCK_ERROR, 0x00000076, "Nvlink Clock Error") +NV_STATUS_CODE(NV_ERR_NVLINK_TRAINING_ERROR, 0x00000077, "Nvlink Training Error") +NV_STATUS_CODE(NV_ERR_NVLINK_CONFIGURATION_ERROR, 0x00000078, "Nvlink Configuration Error") +NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC-V assert or halt") + +// Warnings: +NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch") +NV_STATUS_CODE(NV_WARN_INCORRECT_PERFMON_DATA, 0x00010002, "WARNING Incorrect performance monitor data") +NV_STATUS_CODE(NV_WARN_MISMATCHED_SLAVE, 0x00010003, "WARNING Slave mismatch") +NV_STATUS_CODE(NV_WARN_MISMATCHED_TARGET, 0x00010004, "WARNING Target mismatch") +NV_STATUS_CODE(NV_WARN_MORE_PROCESSING_REQUIRED, 0x00010005, "WARNING More processing required for the call") +NV_STATUS_CODE(NV_WARN_NOTHING_TO_DO, 0x00010006, "WARNING Nothing to do") +NV_STATUS_CODE(NV_WARN_NULL_OBJECT, 0x00010007, "WARNING NULL object found") +NV_STATUS_CODE(NV_WARN_OUT_OF_RANGE, 0x00010008, "WARNING value out of range") + +#endif /* SDK_NVSTATUSCODES_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvtypes.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvtypes.h new file mode 100644 index 0000000..c349199 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvtypes.h @@ -0,0 +1,662 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVTYPES_INCLUDED +#define NVTYPES_INCLUDED + +#ifdef __cplusplus +extern "C" { +#endif + +#include "cpuopsys.h" + +#ifndef NVTYPES_USE_STDINT +#define NVTYPES_USE_STDINT 0 +#endif + +#if NVTYPES_USE_STDINT +#ifdef __cplusplus +#include +#include +#else +#include +#include +#endif // __cplusplus +#endif // NVTYPES_USE_STDINT + +#ifndef __cplusplus +// Header includes to make sure wchar_t is defined for C-file compilation +// (C++ is not affected as it is a fundamental type there) +// _MSC_VER is a hack to avoid failures for old setup of UEFI builds which are +// currently set to msvc100 but do not properly set the include paths +#if defined(NV_WINDOWS) && (!defined(_MSC_VER) || (_MSC_VER > 1600)) +#include +#define NV_HAS_WCHAR_T_TYPEDEF 1 +#endif +#endif // __cplusplus + +#if defined(MAKE_NV64TYPES_8BYTES_ALIGNED) && defined(__i386__) +// ensure or force 8-bytes alignment of NV 64-bit types +#define OPTIONAL_ALIGN8_ATTR __attribute__((aligned(8))) +#else +// nothing needed +#define OPTIONAL_ALIGN8_ATTR +#endif // MAKE_NV64TYPES_8BYTES_ALIGNED && i386 + + /***************************************************************************\ +|* Typedefs *| + \***************************************************************************/ + +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +//Typedefs for MISRA COMPLIANCE +typedef unsigned long long UInt64; +typedef signed long long Int64; +typedef unsigned int UInt32; +typedef signed int Int32; +typedef unsigned short UInt16; +typedef signed short Int16; +typedef unsigned char UInt8 ; +typedef signed char Int8 ; + +typedef void Void; +typedef float float32_t; +typedef double float64_t; +#endif + + +// Floating point types +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef float32_t NvF32; /* IEEE Single Precision (S1E8M23) */ +typedef float64_t NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */ +#else +typedef float NvF32; /* IEEE Single Precision (S1E8M23) */ +typedef double NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */ +#endif + + +// 8-bit: 'char' is the only 8-bit in the C89 standard and after. +#if NVTYPES_USE_STDINT +typedef uint8_t NvV8; /* "void": enumerated or multiple fields */ +typedef uint8_t NvU8; /* 0 to 255 */ +typedef int8_t NvS8; /* -128 to 127 */ +#else +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt8 NvV8; /* "void": enumerated or multiple fields */ +typedef UInt8 NvU8; /* 0 to 255 */ +typedef Int8 NvS8; /* -128 to 127 */ +#else +typedef unsigned char NvV8; /* "void": enumerated or multiple fields */ +typedef unsigned char NvU8; /* 0 to 255 */ +typedef signed char NvS8; /* -128 to 127 */ +#endif +#endif // NVTYPES_USE_STDINT + + +#if NVTYPES_USE_STDINT +typedef uint16_t NvV16; /* "void": enumerated or multiple fields */ +typedef uint16_t NvU16; /* 0 to 65535 */ +typedef int16_t NvS16; /* -32768 to 32767 */ +#else +// 16-bit: If the compiler tells us what we can use, then use it. +#ifdef __INT16_TYPE__ +typedef unsigned __INT16_TYPE__ NvV16; /* "void": enumerated or multiple fields */ +typedef unsigned __INT16_TYPE__ NvU16; /* 0 to 65535 */ +typedef signed __INT16_TYPE__ NvS16; /* -32768 to 32767 */ + +// The minimal standard for C89 and after +#else // __INT16_TYPE__ +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt16 NvV16; /* "void": enumerated or multiple fields */ +typedef UInt16 NvU16; /* 0 to 65535 */ +typedef Int16 NvS16; /* -32768 to 32767 */ +#else +typedef unsigned short NvV16; /* "void": enumerated or multiple fields */ +typedef unsigned short NvU16; /* 0 to 65535 */ +typedef signed short NvS16; /* -32768 to 32767 */ +#endif +#endif // __INT16_TYPE__ +#endif // NVTYPES_USE_STDINT + +// wchar type (fixed size types consistent across Linux/Windows boundaries) +#if defined(NV_HAS_WCHAR_T_TYPEDEF) + typedef wchar_t NvWchar; +#else + typedef NvV16 NvWchar; +#endif + +// Macro to build an NvU32 from four bytes, listed from msb to lsb +#define NvU32_BUILD(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d)) + +#if NVTYPES_USE_STDINT +typedef uint32_t NvV32; /* "void": enumerated or multiple fields */ +typedef uint32_t NvU32; /* 0 to 4294967295 */ +typedef int32_t NvS32; /* -2147483648 to 2147483647 */ +#else +// 32-bit: If the compiler tells us what we can use, then use it. +#ifdef __INT32_TYPE__ +typedef unsigned __INT32_TYPE__ NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned __INT32_TYPE__ NvU32; /* 0 to 4294967295 */ +typedef signed __INT32_TYPE__ NvS32; /* -2147483648 to 2147483647 */ + +// Older compilers +#else // __INT32_TYPE__ + +// For historical reasons, NvU32/NvV32 are defined to different base intrinsic +// types than NvS32 on some platforms. +// Mainly for 64-bit linux, where long is 64 bits and win9x, where int is 16 bit. +#if (defined(NV_UNIX) || defined(vxworks) || defined(NV_WINDOWS_CE) || \ + defined(__arm) || defined(__IAR_SYSTEMS_ICC__) || defined(NV_QNX) || \ + defined(NV_INTEGRITY) || defined(NV_MODS) || \ + defined(__GNUC__) || defined(__clang__) || defined(NV_MACINTOSH_64)) && \ + (!defined(NV_MACINTOSH) || defined(NV_MACINTOSH_64)) +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt32 NvV32; /* "void": enumerated or multiple fields */ +typedef UInt32 NvU32; /* 0 to 4294967295 */ +#else +typedef unsigned int NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned int NvU32; /* 0 to 4294967295 */ +#endif + +// The minimal standard for C89 and after +#else // (defined(NV_UNIX) || defined(vxworks) || ... +typedef unsigned long NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned long NvU32; /* 0 to 4294967295 */ +#endif // (defined(NV_UNIX) || defined(vxworks) || ... + +// Mac OS 32-bit still needs this +#if defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64) +typedef signed long NvS32; /* -2147483648 to 2147483647 */ +#else +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef Int32 NvS32; /* -2147483648 to 2147483647 */ +#else +typedef signed int NvS32; /* -2147483648 to 2147483647 */ +#endif +#endif // defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64) +#endif // __INT32_TYPE__ +#endif // NVTYPES_USE_STDINT + + + +#if NVTYPES_USE_STDINT +typedef uint64_t NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef int64_t NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ + +#define NvU64_fmtX PRIX64 +#define NvU64_fmtx PRIx64 +#define NvU64_fmtu PRIu64 +#define NvU64_fmto PRIo64 +#define NvS64_fmtd PRId64 +#define NvS64_fmti PRIi64 +#else +// 64-bit types for compilers that support them, plus some obsolete variants +#if defined(__GNUC__) || defined(__clang__) || defined(__arm) || \ + defined(__IAR_SYSTEMS_ICC__) || defined(__ghs__) || defined(_WIN64) || \ + defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined (__xlC__) +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef Int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ +#else +typedef unsigned long long NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef long long NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ +#endif + +#define NvU64_fmtX "llX" +#define NvU64_fmtx "llx" +#define NvU64_fmtu "llu" +#define NvU64_fmto "llo" +#define NvS64_fmtd "lld" +#define NvS64_fmti "lli" + +// Microsoft since 2003 -- https://msdn.microsoft.com/en-us/library/29dh1w7z.aspx +#else +typedef unsigned __int64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef __int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ + +#define NvU64_fmtX "I64X" +#define NvU64_fmtx "I64x" +#define NvU64_fmtu "I64u" +#define NvU64_fmto "I64o" +#define NvS64_fmtd "I64d" +#define NvS64_fmti "I64i" + +#endif +#endif // NVTYPES_USE_STDINT + +#ifdef NV_TYPESAFE_HANDLES +/* + * Can't use opaque pointer as clients might be compiled with mismatched + * pointer sizes. TYPESAFE check will eventually be removed once all clients + * have transistioned safely to NvHandle. + * The plan is to then eventually scale up the handle to be 64-bits. + */ +typedef struct +{ + NvU32 val; +} NvHandle; +#else +/* + * For compatibility with modules that haven't moved typesafe handles. + */ +typedef NvU32 NvHandle; +#endif // NV_TYPESAFE_HANDLES + +/* Boolean type */ +typedef NvU8 NvBool; +#define NV_TRUE ((NvBool)(0 == 0)) +#define NV_FALSE ((NvBool)(0 != 0)) + +/* Tristate type: NV_TRISTATE_FALSE, NV_TRISTATE_TRUE, NV_TRISTATE_INDETERMINATE */ +typedef NvU8 NvTristate; +#define NV_TRISTATE_FALSE ((NvTristate) 0) +#define NV_TRISTATE_TRUE ((NvTristate) 1) +#define NV_TRISTATE_INDETERMINATE ((NvTristate) 2) + +/* Macros to extract the low and high parts of a 64-bit unsigned integer */ +/* Also designed to work if someone happens to pass in a 32-bit integer */ +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffffU)) +#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffffU)) +#else +#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffff)) +#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffff)) +#endif +#define NvU40_HI32(n) ((NvU32)((((NvU64)(n)) >> 8) & 0xffffffffU)) +#define NvU40_HI24of32(n) ((NvU32)( (NvU64)(n) & 0xffffff00U)) + +/* Macros to get the MSB and LSB of a 32 bit unsigned number */ +#define NvU32_HI16(n) ((NvU16)((((NvU32)(n)) >> 16) & 0xffffU)) +#define NvU32_LO16(n) ((NvU16)(( (NvU32)(n)) & 0xffffU)) + + /***************************************************************************\ +|* *| +|* 64 bit type definitions for use in interface structures. *| +|* *| + \***************************************************************************/ + +#if defined(NV_64_BITS) + +typedef void* NvP64; /* 64 bit void pointer */ +typedef NvU64 NvUPtr; /* pointer sized unsigned int */ +typedef NvS64 NvSPtr; /* pointer sized signed int */ +typedef NvU64 NvLength; /* length to agree with sizeof */ + +#define NvP64_VALUE(n) (n) +#define NvP64_fmt "%p" + +#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(v)) +#define NvP64_PLUS_OFFSET(p,o) (NvP64)((NvU64)(p) + (NvU64)(o)) + +#define NvUPtr_fmtX NvU64_fmtX +#define NvUPtr_fmtx NvU64_fmtx +#define NvUPtr_fmtu NvU64_fmtu +#define NvUPtr_fmto NvU64_fmto +#define NvSPtr_fmtd NvS64_fmtd +#define NvSPtr_fmti NvS64_fmti + +#else + +typedef NvU64 NvP64; /* 64 bit void pointer */ +typedef NvU32 NvUPtr; /* pointer sized unsigned int */ +typedef NvS32 NvSPtr; /* pointer sized signed int */ +typedef NvU32 NvLength; /* length to agree with sizeof */ + +#define NvP64_VALUE(n) ((void *)(NvUPtr)(n)) +#define NvP64_fmt "0x%llx" + +#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(NvUPtr)(v)) +#define NvP64_PLUS_OFFSET(p,o) ((p) + (NvU64)(o)) + +#define NvUPtr_fmtX "X" +#define NvUPtr_fmtx "x" +#define NvUPtr_fmtu "u" +#define NvUPtr_fmto "o" +#define NvSPtr_fmtd "d" +#define NvSPtr_fmti "i" + +#endif + +#define NvP64_NULL (NvP64)0 + +/*! + * Helper macro to pack an @ref NvU64_ALIGN32 structure from a @ref NvU64. + * + * @param[out] pDst Pointer to NvU64_ALIGN32 structure to pack + * @param[in] pSrc Pointer to NvU64 with which to pack + */ +#define NvU64_ALIGN32_PACK(pDst, pSrc) \ +do { \ + (pDst)->lo = NvU64_LO32(*(pSrc)); \ + (pDst)->hi = NvU64_HI32(*(pSrc)); \ +} while (NV_FALSE) + +/*! + * Helper macro to unpack a @ref NvU64_ALIGN32 structure into a @ref NvU64. + * + * @param[out] pDst Pointer to NvU64 in which to unpack + * @param[in] pSrc Pointer to NvU64_ALIGN32 structure from which to unpack + */ +#define NvU64_ALIGN32_UNPACK(pDst, pSrc) \ +do { \ + (*(pDst)) = NvU64_ALIGN32_VAL(pSrc); \ +} while (NV_FALSE) + +/*! + * Helper macro to unpack a @ref NvU64_ALIGN32 structure as a @ref NvU64. + * + * @param[in] pSrc Pointer to NvU64_ALIGN32 structure to unpack + */ +#define NvU64_ALIGN32_VAL(pSrc) \ + ((NvU64) ((NvU64)((pSrc)->lo) | (((NvU64)(pSrc)->hi) << 32U))) + +/*! + * Helper macro to check whether the 32 bit aligned 64 bit number is zero. + * + * @param[in] _pU64 Pointer to NvU64_ALIGN32 structure. + * + * @return + * NV_TRUE _pU64 is zero. + * NV_FALSE otherwise. + */ +#define NvU64_ALIGN32_IS_ZERO(_pU64) \ + (((_pU64)->lo == 0U) && ((_pU64)->hi == 0U)) + +/*! + * Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor. + * + * @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure. + * @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure. + * @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure. + */ +#define NvU64_ALIGN32_ADD(pDst, pSrc1, pSrc2) \ +do { \ + NvU64 __dst, __src1, __scr2; \ + \ + NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \ + NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \ + __dst = __src1 + __scr2; \ + NvU64_ALIGN32_PACK((pDst), &__dst); \ +} while (NV_FALSE) + +/*! + * Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor. + * + * @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure. + * @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure. + * @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure. + */ +#define NvU64_ALIGN32_SUB(pDst, pSrc1, pSrc2) \ +do { \ + NvU64 __dst, __src1, __scr2; \ + \ + NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \ + NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \ + __dst = __src1 - __scr2; \ + NvU64_ALIGN32_PACK((pDst), &__dst); \ +} while (NV_FALSE) + +/*! + * Structure for representing 32 bit aligned NvU64 (64-bit unsigned integer) + * structures. This structure must be used because the 32 bit processor and + * 64 bit processor compilers will pack/align NvU64 differently. + * + * One use case is RM being 64 bit proc whereas PMU being 32 bit proc, this + * alignment difference will result in corrupted transactions between the RM + * and PMU. + * + * See the @ref NvU64_ALIGN32_PACK and @ref NvU64_ALIGN32_UNPACK macros for + * packing and unpacking these structures. + * + * @note The intention of this structure is to provide a datatype which will + * packed/aligned consistently and efficiently across all platforms. + * We don't want to use "NV_DECLARE_ALIGNED(NvU64, 8)" because that + * leads to memory waste on our 32-bit uprocessors (e.g. FALCONs) where + * DMEM efficiency is vital. + */ +typedef struct +{ + /*! + * Low 32 bits. + */ + NvU32 lo; + /*! + * High 32 bits. + */ + NvU32 hi; +} NvU64_ALIGN32; + +/* Useful macro to hide required double cast */ +#define NV_PTR_TO_NvP64(n) (NvP64)(NvUPtr)(n) +#define NV_SIGN_EXT_PTR_TO_NvP64(p) ((NvP64)(NvS64)(NvSPtr)(p)) +#define KERNEL_POINTER_TO_NvP64(p) ((NvP64)(uintptr_t)(p)) + + /***************************************************************************\ +|* *| +|* Limits for common types. *| +|* *| + \***************************************************************************/ + +/* Explanation of the current form of these limits: + * + * - Decimal is used, as hex values are by default positive. + * - Casts are not used, as usage in the preprocessor itself (#if) ends poorly. + * - The subtraction of 1 for some MIN values is used to get around the fact + * that the C syntax actually treats -x as NEGATE(x) instead of a distinct + * number. Since 214748648 isn't a valid positive 32-bit signed value, we + * take the largest valid positive signed number, negate it, and subtract 1. + */ +#define NV_S8_MIN (-128) +#define NV_S8_MAX (+127) +#define NV_U8_MIN (0U) +#define NV_U8_MAX (+255U) +#define NV_S16_MIN (-32768) +#define NV_S16_MAX (+32767) +#define NV_U16_MIN (0U) +#define NV_U16_MAX (+65535U) +#define NV_S32_MIN (-2147483647 - 1) +#define NV_S32_MAX (+2147483647) +#define NV_U32_MIN (0U) +#define NV_U32_MAX (+4294967295U) +#define NV_S64_MIN (-9223372036854775807LL - 1LL) +#define NV_S64_MAX (+9223372036854775807LL) +#define NV_U64_MIN (0ULL) +#define NV_U64_MAX (+18446744073709551615ULL) + +/* Aligns fields in structs so they match up between 32 and 64 bit builds */ +#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX) +#define NV_ALIGN_BYTES(size) __attribute__ ((aligned (size))) +#elif defined(__arm) +#define NV_ALIGN_BYTES(size) __align(ALIGN) +#else +// XXX This is dangerously nonportable! We really shouldn't provide a default +// version of this that doesn't do anything. +#define NV_ALIGN_BYTES(size) +#endif + +// NV_DECLARE_ALIGNED() can be used on all platforms. +// This macro form accounts for the fact that __declspec on Windows is required +// before the variable type, +// and NV_ALIGN_BYTES is required after the variable name. +#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) TYPE_VAR __attribute__ ((aligned (ALIGN))) +#elif defined(_MSC_VER) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) __declspec(align(ALIGN)) TYPE_VAR +#elif defined(__arm) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) __align(ALIGN) TYPE_VAR +#endif + + /***************************************************************************\ +|* Function Declaration Types *| + \***************************************************************************/ + +// stretching the meaning of "nvtypes", but this seems to least offensive +// place to re-locate these from nvos.h which cannot be included by a number +// of builds that need them + +#if defined(_MSC_VER) + + #if _MSC_VER >= 1310 + #define NV_NOINLINE __declspec(noinline) + #else + #define NV_NOINLINE + #endif + + #define NV_INLINE __inline + + #if _MSC_VER >= 1200 + #define NV_FORCEINLINE __forceinline + #else + #define NV_FORCEINLINE __inline + #endif + + #define NV_APIENTRY __stdcall + #define NV_FASTCALL __fastcall + #define NV_CDECLCALL __cdecl + #define NV_STDCALL __stdcall + + #define NV_FORCERESULTCHECK + + #define NV_ATTRIBUTE_UNUSED + + #define NV_FORMAT_PRINTF(_f, _a) + +#else // ! defined(_MSC_VER) + + #if defined(__GNUC__) + #if (__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) && (__GNUC_PATCHLEVEL__ >= 1)) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + #elif defined(__clang__) + #if __has_attribute(noinline) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + #elif defined(__arm) && (__ARMCC_VERSION >= 300000) + #define NV_NOINLINE __attribute__((__noinline__)) + #elif (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)) ||\ + (defined(__SUNPRO_CC) && (__SUNPRO_CC >= 0x590)) + #define NV_NOINLINE __attribute__((__noinline__)) + #elif defined (__INTEL_COMPILER) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + + #if !defined(NV_NOINLINE) + #define NV_NOINLINE + #endif + + /* GreenHills compiler defines __GNUC__, but doesn't support + * __inline__ keyword. */ + #if defined(__ghs__) + #define NV_INLINE inline + #elif defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER) + #define NV_INLINE __inline__ + #elif defined (macintosh) || defined(__SUNPRO_C) || defined(__SUNPRO_CC) + #define NV_INLINE inline + #elif defined(__arm) + #define NV_INLINE __inline + #else + #define NV_INLINE + #endif + + /* Don't force inline on DEBUG builds -- it's annoying for debuggers. */ + #if !defined(DEBUG) + /* GreenHills compiler defines __GNUC__, but doesn't support + * __attribute__ or __inline__ keyword. */ + #if defined(__ghs__) + #define NV_FORCEINLINE inline + #elif defined(__GNUC__) + // GCC 3.1 and beyond support the always_inline function attribute. + #if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)) + #define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__ + #else + #define NV_FORCEINLINE __inline__ + #endif + #elif defined(__clang__) + #if __has_attribute(always_inline) + #define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__ + #else + #define NV_FORCEINLINE __inline__ + #endif + #elif defined(__arm) && (__ARMCC_VERSION >= 220000) + // RVDS 2.2 also supports forceinline, but ADS 1.2 does not + #define NV_FORCEINLINE __forceinline + #else /* defined(__GNUC__) */ + #define NV_FORCEINLINE NV_INLINE + #endif + #else + #define NV_FORCEINLINE NV_INLINE + #endif + + #define NV_APIENTRY + #define NV_FASTCALL + #define NV_CDECLCALL + #define NV_STDCALL + + /* + * The 'warn_unused_result' function attribute prompts GCC to issue a + * warning if the result of a function tagged with this attribute + * is ignored by a caller. In combination with '-Werror', it can be + * used to enforce result checking in RM code; at this point, this + * is only done on UNIX. + */ + #if defined(__GNUC__) && defined(NV_UNIX) + #if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4)) + #define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__)) + #else + #define NV_FORCERESULTCHECK + #endif + #elif defined(__clang__) + #if __has_attribute(warn_unused_result) + #define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__)) + #else + #define NV_FORCERESULTCHECK + #endif + #else /* defined(__GNUC__) */ + #define NV_FORCERESULTCHECK + #endif + + #if defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER) + #define NV_ATTRIBUTE_UNUSED __attribute__((__unused__)) + #else + #define NV_ATTRIBUTE_UNUSED + #endif + + /* + * Functions decorated with NV_FORMAT_PRINTF(f, a) have a format string at + * parameter number 'f' and variadic arguments start at parameter number 'a'. + * (Note that for C++ methods, there is an implicit 'this' parameter so + * explicit parameters are numbered from 2.) + */ + #if defined(__GNUC__) + #define NV_FORMAT_PRINTF(_f, _a) __attribute__((format(printf, _f, _a))) + #else + #define NV_FORMAT_PRINTF(_f, _a) + #endif + +#endif // defined(_MSC_VER) + +#ifdef __cplusplus +} +#endif + +#endif /* NVTYPES_INCLUDED */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os-interface.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os-interface.h new file mode 100644 index 0000000..a3d850a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os-interface.h @@ -0,0 +1,257 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* + * Os interface definitions needed by os-interface.c + */ + +#ifndef OS_INTERFACE_H +#define OS_INTERFACE_H + +/******************* Operating System Interface Routines *******************\ +* * +* Operating system wrapper functions used to abstract the OS. * +* * +\***************************************************************************/ + +#include +#include +#include "nv_stdarg.h" +#include +#include +#include + + + +typedef struct +{ + NvU32 os_major_version; + NvU32 os_minor_version; + NvU32 os_build_number; + const char * os_build_version_str; + const char * os_build_date_plus_str; +}os_version_info; + +/* Each OS defines its own version of this opaque type */ +struct os_work_queue; + +/* Each OS defines its own version of this opaque type */ +typedef struct os_wait_queue os_wait_queue; + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for OS interface. + * + * --------------------------------------------------------------------------- + */ + +NvU64 NV_API_CALL os_get_num_phys_pages (void); +NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64); +void NV_API_CALL os_free_mem (void *); +NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *); +NvU64 NV_API_CALL os_get_current_tick (void); +NvU64 NV_API_CALL os_get_current_tick_hr (void); +NvU64 NV_API_CALL os_get_tick_resolution (void); +NV_STATUS NV_API_CALL os_delay (NvU32); +NV_STATUS NV_API_CALL os_delay_us (NvU32); +NvU64 NV_API_CALL os_get_cpu_frequency (void); +NvU32 NV_API_CALL os_get_current_process (void); +void NV_API_CALL os_get_current_process_name (char *, NvU32); +NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *); +char* NV_API_CALL os_string_copy (char *, const char *); +NvU32 NV_API_CALL os_string_length (const char *); +NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32); +NvS32 NV_API_CALL os_string_compare (const char *, const char *); +NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...); +NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list); +void NV_API_CALL os_log_error (const char *, va_list); +void* NV_API_CALL os_mem_copy (void *, const void *, NvU32); +NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32); +NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32); +void* NV_API_CALL os_mem_set (void *, NvU8, NvU32); +NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32); +void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *); +NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *); +NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *); +NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *); +NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8); +NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16); +NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32); +NvBool NV_API_CALL os_pci_remove_supported (void); +void NV_API_CALL os_pci_remove (void *); +void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32); +void NV_API_CALL os_unmap_kernel_space (void *, NvU64); +void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **); +void NV_API_CALL os_unmap_user_space (void *, NvU64, void *); +NV_STATUS NV_API_CALL os_flush_cpu_cache (void); +NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void); +NV_STATUS NV_API_CALL os_flush_user_cache (void); +void NV_API_CALL os_flush_cpu_write_combine_buffer(void); +NvU8 NV_API_CALL os_io_read_byte (NvU32); +NvU16 NV_API_CALL os_io_read_word (NvU32); +NvU32 NV_API_CALL os_io_read_dword (NvU32); +void NV_API_CALL os_io_write_byte (NvU32, NvU8); +void NV_API_CALL os_io_write_word (NvU32, NvU16); +void NV_API_CALL os_io_write_dword (NvU32, NvU32); +NvBool NV_API_CALL os_is_administrator (void); +NvBool NV_API_CALL os_allow_priority_override (void); +void NV_API_CALL os_dbg_init (void); +void NV_API_CALL os_dbg_breakpoint (void); +void NV_API_CALL os_dbg_set_level (NvU32); +NvU32 NV_API_CALL os_get_cpu_count (void); +NvU32 NV_API_CALL os_get_cpu_number (void); +void NV_API_CALL os_disable_console_access (void); +void NV_API_CALL os_enable_console_access (void); +NV_STATUS NV_API_CALL os_registry_init (void); +NV_STATUS NV_API_CALL os_schedule (void); +NV_STATUS NV_API_CALL os_alloc_spinlock (void **); +void NV_API_CALL os_free_spinlock (void *); +NvU64 NV_API_CALL os_acquire_spinlock (void *); +void NV_API_CALL os_release_spinlock (void *, NvU64); +NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *); +NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *); +NV_STATUS NV_API_CALL os_alloc_mutex (void **); +void NV_API_CALL os_free_mutex (void *); +NV_STATUS NV_API_CALL os_acquire_mutex (void *); +NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *); +void NV_API_CALL os_release_mutex (void *); +void* NV_API_CALL os_alloc_semaphore (NvU32); +void NV_API_CALL os_free_semaphore (void *); +NV_STATUS NV_API_CALL os_acquire_semaphore (void *); +NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *); +NV_STATUS NV_API_CALL os_release_semaphore (void *); +NvBool NV_API_CALL os_semaphore_may_sleep (void); +NV_STATUS NV_API_CALL os_get_version_info (os_version_info*); +NvBool NV_API_CALL os_is_isr (void); +NvBool NV_API_CALL os_pat_supported (void); +void NV_API_CALL os_dump_stack (void); +NvBool NV_API_CALL os_is_efi_enabled (void); +NvBool NV_API_CALL os_is_xen_dom0 (void); +NvBool NV_API_CALL os_is_vgx_hyper (void); +NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32); +NvBool NV_API_CALL os_is_grid_supported (void); +NvU32 NV_API_CALL os_get_grid_csp_support (void); +void NV_API_CALL os_get_screen_info (NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64, NvU64); +void NV_API_CALL os_bug_check (NvU32, const char *); +NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32); +NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**); +NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *); +NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *); +NV_STATUS NV_API_CALL os_get_euid (NvU32 *); +NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr); +NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *); +void NV_API_CALL os_add_record_for_crashLog (void *, NvU32); +void NV_API_CALL os_delete_record_for_crashLog (void *); +NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32); +NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *); +NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *); +NV_STATUS NV_API_CALL os_get_page (NvU64 address); +NV_STATUS NV_API_CALL os_put_page (NvU64 address); +NvU32 NV_API_CALL os_get_page_refcount (NvU64 address); +NvU32 NV_API_CALL os_count_tail_pages (NvU64 address); +void NV_API_CALL os_free_pages_phys (NvU64, NvU32); +NV_STATUS NV_API_CALL os_call_nv_vmbus (NvU32, void *); +NV_STATUS NV_API_CALL os_open_temporary_file (void **); +void NV_API_CALL os_close_file (void *); +NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64); +NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64); +NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **); +NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64); +NvBool NV_API_CALL os_is_nvswitch_present (void); +void NV_API_CALL os_get_random_bytes (NvU8 *, NvU16); +NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **); +void NV_API_CALL os_free_wait_queue (os_wait_queue *); +void NV_API_CALL os_wait_uninterruptible (os_wait_queue *); +void NV_API_CALL os_wait_interruptible (os_wait_queue *); +void NV_API_CALL os_wake_up (os_wait_queue *); +nv_cap_t* NV_API_CALL os_nv_cap_init (const char *); +nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int); +nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int); +void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *); +int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int); +void NV_API_CALL os_nv_cap_close_fd (int); + + +NV_STATUS NV_API_CALL os_get_tegra_platform (NvU32 *); + + + + + + + + + + + + + + +extern NvU32 os_page_size; +extern NvU64 os_page_mask; +extern NvU8 os_page_shift; +extern NvU32 os_sev_status; +extern NvBool os_sev_enabled; +extern NvBool os_dma_buf_enabled; + +/* + * --------------------------------------------------------------------------- + * + * Debug macros. + * + * --------------------------------------------------------------------------- + */ + +#define NV_DBG_INFO 0x0 +#define NV_DBG_SETUP 0x1 +#define NV_DBG_USERERRORS 0x2 +#define NV_DBG_WARNINGS 0x3 +#define NV_DBG_ERRORS 0x4 + + +void NV_API_CALL out_string(const char *str); +int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...); + +#define NV_DEV_PRINTF(debuglevel, nv, format, ... ) \ + nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format, NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__) + +#define NV_DEV_PRINTF_STATUS(debuglevel, nv, status, format, ... ) \ + nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format " (0x%x)\n", NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__, status) + +/* + * Fields for os_lock_user_pages flags parameter + */ +#define NV_LOCK_USER_PAGES_FLAGS_WRITE 0:0 +#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000 +#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001 + + +// NV OS Tegra platform type defines +#define NV_OS_TEGRA_PLATFORM_SIM 0 +#define NV_OS_TEGRA_PLATFORM_FPGA 1 +#define NV_OS_TEGRA_PLATFORM_SILICON 2 + + +#endif /* OS_INTERFACE_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os/nv_memory_type.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os/nv_memory_type.h new file mode 100644 index 0000000..34255c7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os/nv_memory_type.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_MEMORY_TYPE_H +#define NV_MEMORY_TYPE_H + +#define NV_MEMORY_NONCONTIGUOUS 0 +#define NV_MEMORY_CONTIGUOUS 1 + +#define NV_MEMORY_CACHED 0 +#define NV_MEMORY_UNCACHED 1 +#define NV_MEMORY_WRITECOMBINED 2 +#define NV_MEMORY_WRITEBACK 5 +#define NV_MEMORY_DEFAULT 6 +#define NV_MEMORY_UNCACHED_WEAK 7 + +#define NV_PROTECT_READABLE 1 +#define NV_PROTECT_WRITEABLE 2 +#define NV_PROTECT_READ_WRITE (NV_PROTECT_READABLE | NV_PROTECT_WRITEABLE) + +#endif /* NV_MEMORY_TYPE_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_dsi_panel_props.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_dsi_panel_props.h new file mode 100644 index 0000000..ad66cc0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_dsi_panel_props.h @@ -0,0 +1,364 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _OS_DSI_PANEL_PARAMS_H_ +#define _OS_DSI_PANEL_PARAMS_H_ + +#define DSI_GENERIC_LONG_WRITE 0x29 +#define DSI_DCS_LONG_WRITE 0x39 +#define DSI_GENERIC_SHORT_WRITE_1_PARAMS 0x13 +#define DSI_GENERIC_SHORT_WRITE_2_PARAMS 0x23 +#define DSI_DCS_WRITE_0_PARAM 0x05 +#define DSI_DCS_WRITE_1_PARAM 0x15 +#define DSI_DCS_READ_PARAM 0x06 +#define DSI_DCS_COMPRESSION_MODE 0x07 +#define DSI_DCS_PPS_LONG_WRITE 0x0A + +#define DSI_DCS_SET_ADDR_MODE 0x36 +#define DSI_DCS_EXIT_SLEEP_MODE 0x11 +#define DSI_DCS_ENTER_SLEEP_MODE 0x10 +#define DSI_DCS_SET_DISPLAY_ON 0x29 +#define DSI_DCS_SET_DISPLAY_OFF 0x28 +#define DSI_DCS_SET_TEARING_EFFECT_OFF 0x34 +#define DSI_DCS_SET_TEARING_EFFECT_ON 0x35 +#define DSI_DCS_NO_OP 0x0 +#define DSI_NULL_PKT_NO_DATA 0x9 +#define DSI_BLANKING_PKT_NO_DATA 0x19 +#define DSI_DCS_SET_COMPRESSION_METHOD 0xC0 + +/* DCS commands for command mode */ +#define DSI_ENTER_PARTIAL_MODE 0x12 +#define DSI_SET_PIXEL_FORMAT 0x3A +#define DSI_AREA_COLOR_MODE 0x4C +#define DSI_SET_PARTIAL_AREA 0x30 +#define DSI_SET_PAGE_ADDRESS 0x2B +#define DSI_SET_ADDRESS_MODE 0x36 +#define DSI_SET_COLUMN_ADDRESS 0x2A +#define DSI_WRITE_MEMORY_START 0x2C +#define DSI_WRITE_MEMORY_CONTINUE 0x3C + +#define PKT_ID0(id) ((((id) & 0x3f) << 3) | \ + (((DSI_ENABLE) & 0x1) << 9)) +#define PKT_LEN0(len) (((len) & 0x7) << 0) +#define PKT_ID1(id) ((((id) & 0x3f) << 13) | \ + (((DSI_ENABLE) & 0x1) << 19)) +#define PKT_LEN1(len) (((len) & 0x7) << 10) +#define PKT_ID2(id) ((((id) & 0x3f) << 23) | \ + (((DSI_ENABLE) & 0x1) << 29)) +#define PKT_LEN2(len) (((len) & 0x7) << 20) +#define PKT_ID3(id) ((((id) & 0x3f) << 3) | \ + (((DSI_ENABLE) & 0x1) << 9)) +#define PKT_LEN3(len) (((len) & 0x7) << 0) +#define PKT_ID4(id) ((((id) & 0x3f) << 13) | \ + (((DSI_ENABLE) & 0x1) << 19)) +#define PKT_LEN4(len) (((len) & 0x7) << 10) +#define PKT_ID5(id) ((((id) & 0x3f) << 23) | \ + (((DSI_ENABLE) & 0x1) << 29)) +#define PKT_LEN5(len) (((len) & 0x7) << 20) +#define PKT_LP (((DSI_ENABLE) & 0x1) << 30) +#define NUMOF_PKT_SEQ 12 + +/* DSI pixel data format, enum values should match with dt-bindings in tegra-panel.h */ +typedef enum +{ + DSI_PIXEL_FORMAT_16BIT_P, + DSI_PIXEL_FORMAT_18BIT_P, + DSI_PIXEL_FORMAT_18BIT_NP, + DSI_PIXEL_FORMAT_24BIT_P, + DSI_PIXEL_FORMAT_8BIT_DSC, + DSI_PIXEL_FORMAT_12BIT_DSC, + DSI_PIXEL_FORMAT_16BIT_DSC, + DSI_PIXEL_FORMAT_10BIT_DSC, + DSI_PIXEL_FORMAT_30BIT_P, + DSI_PIXEL_FORMAT_36BIT_P, +} DSIPIXELFORMAT; + +/* DSI virtual channel number */ +typedef enum +{ + DSI_VIRTUAL_CHANNEL_0, + DSI_VIRTUAL_CHANNEL_1, + DSI_VIRTUAL_CHANNEL_2, + DSI_VIRTUAL_CHANNEL_3, +} DSIVIRTUALCHANNEL; + +/* DSI transmit method for video data */ +typedef enum +{ + DSI_VIDEO_TYPE_VIDEO_MODE, + DSI_VIDEO_TYPE_COMMAND_MODE, +} DSIVIDEODATAMODE; + +/* DSI HS clock mode */ +typedef enum +{ + DSI_VIDEO_CLOCK_CONTINUOUS, + DSI_VIDEO_CLOCK_TX_ONLY, +} DSICLOCKMODE; + +/* DSI burst mode setting in video mode. Each mode is assigned with a + * fixed value. The rationale behind this is to avoid change of these + * values, since the calculation of dsi clock depends on them. */ +typedef enum +{ + DSI_VIDEO_NON_BURST_MODE = 0, + DSI_VIDEO_NON_BURST_MODE_WITH_SYNC_END = 1, + DSI_VIDEO_BURST_MODE_LOWEST_SPEED = 2, + DSI_VIDEO_BURST_MODE_LOW_SPEED = 3, + DSI_VIDEO_BURST_MODE_MEDIUM_SPEED = 4, + DSI_VIDEO_BURST_MODE_FAST_SPEED = 5, + DSI_VIDEO_BURST_MODE_FASTEST_SPEED = 6, +} DSIVIDEOBURSTMODE; + +/* DSI Ganged Mode */ +typedef enum +{ + DSI_GANGED_SYMMETRIC_LEFT_RIGHT = 1, + DSI_GANGED_SYMMETRIC_EVEN_ODD = 2, + DSI_GANGED_SYMMETRIC_LEFT_RIGHT_OVERLAP = 3, +} DSIGANGEDTYPE; + +typedef enum +{ + DSI_LINK0, + DSI_LINK1, +} DSILINKNUM; + +/* DSI Command Packet type */ +typedef enum +{ + DSI_PACKET_CMD, + DSI_DELAY_MS, + DSI_GPIO_SET, + DSI_SEND_FRAME, + DSI_PACKET_VIDEO_VBLANK_CMD, + DSI_DELAY_US, +} DSICMDPKTTYPE; + +/* DSI Phy type */ +typedef enum +{ + DSI_DPHY, + DSI_CPHY, +} DSIPHYTYPE; + +enum { + DSI_GPIO_LCD_RESET, + DSI_GPIO_PANEL_EN, + DSI_GPIO_PANEL_EN_1, + DSI_GPIO_BL_ENABLE, + DSI_GPIO_BL_PWM, + DSI_GPIO_AVDD_AVEE_EN, + DSI_GPIO_VDD_1V8_LCD_EN, + DSI_GPIO_TE, + DSI_GPIO_BRIDGE_EN_0, + DSI_GPIO_BRIDGE_EN_1, + DSI_GPIO_BRIDGE_REFCLK_EN, + DSI_N_GPIO_PANEL, /* add new gpio above this entry */ +}; + +enum +{ + DSI_DISABLE, + DSI_ENABLE, +}; + + +typedef struct +{ + NvU8 cmd_type; + NvU8 data_id; + union + { + NvU16 data_len; + NvU16 delay_ms; + NvU16 delay_us; + NvU32 gpio; + NvU16 frame_cnt; + struct + { + NvU8 data0; + NvU8 data1; + } sp; + } sp_len_dly; + NvU32 *pdata; + NvU8 link_id; + NvBool club_cmd; +} DSI_CMD, *PDSICMD; + +typedef struct +{ + NvU16 t_hsdexit_ns; + NvU16 t_hstrail_ns; + NvU16 t_datzero_ns; + NvU16 t_hsprepare_ns; + NvU16 t_hsprebegin_ns; + NvU16 t_hspost_ns; + + NvU16 t_clktrail_ns; + NvU16 t_clkpost_ns; + NvU16 t_clkzero_ns; + NvU16 t_tlpx_ns; + + NvU16 t_clkprepare_ns; + NvU16 t_clkpre_ns; + NvU16 t_wakeup_ns; + + NvU16 t_taget_ns; + NvU16 t_tasure_ns; + NvU16 t_tago_ns; +} DSI_PHY_TIMING_IN_NS; + +typedef struct +{ + NvU32 hActive; + NvU32 vActive; + NvU32 hFrontPorch; + NvU32 vFrontPorch; + NvU32 hBackPorch; + NvU32 vBackPorch; + NvU32 hSyncWidth; + NvU32 vSyncWidth; + NvU32 hPulsePolarity; + NvU32 vPulsePolarity; + NvU32 pixelClkRate; +} DSITIMINGS, *PDSITIMINGS; + +typedef struct +{ + NvU8 n_data_lanes; /* required */ + NvU8 pixel_format; /* required */ + NvU8 refresh_rate; /* required */ + NvU8 rated_refresh_rate; + NvU8 panel_reset; /* required */ + NvU8 virtual_channel; /* required */ + NvU8 dsi_instance; + NvU16 dsi_panel_rst_gpio; + NvU16 dsi_panel_bl_en_gpio; + NvU16 dsi_panel_bl_pwm_gpio; + NvU16 even_odd_split_width; + NvU8 controller_vs; + + NvBool panel_has_frame_buffer; /* required*/ + + /* Deprecated. Use DSI_SEND_FRAME panel command instead. */ + NvBool panel_send_dc_frames; + + DSI_CMD *dsi_init_cmd; /* required */ + NvU16 n_init_cmd; /* required */ + NvBool sendInitCmdsEarly; + + DSI_CMD *dsi_early_suspend_cmd; + NvU16 n_early_suspend_cmd; + + DSI_CMD *dsi_late_resume_cmd; + NvU16 n_late_resume_cmd; + + DSI_CMD *dsi_postvideo_cmd; + NvU16 n_postvideo_cmd; + + DSI_CMD *dsi_suspend_cmd; /* required */ + NvU16 n_suspend_cmd; /* required */ + + NvU8 video_data_type; /* required */ + NvU8 video_clock_mode; + NvU8 video_burst_mode; + NvU8 ganged_type; + NvU16 ganged_overlap; + NvBool ganged_swap_links; + NvBool ganged_write_to_all_links; + NvU8 split_link_type; + + NvU8 suspend_aggr; + + NvU16 panel_buffer_size_byte; + NvU16 panel_reset_timeout_msec; + + NvBool hs_cmd_mode_supported; + NvBool hs_cmd_mode_on_blank_supported; + NvBool enable_hs_clock_on_lp_cmd_mode; + NvBool no_pkt_seq_eot; /* 1st generation panel may not + * support eot. Don't set it for + * most panels.*/ + const NvU32 *pktSeq; + NvBool skip_dsi_pkt_header; + NvBool power_saving_suspend; + NvBool suspend_stop_stream_late; + NvBool dsi2lvds_bridge_enable; + NvBool dsi2edp_bridge_enable; + + NvU32 max_panel_freq_khz; + NvU32 lp_cmd_mode_freq_khz; + NvU32 lp_read_cmd_mode_freq_khz; + NvU32 hs_clk_in_lp_cmd_mode_freq_khz; + NvU32 burst_mode_freq_khz; + NvU32 fpga_freq_khz; + + NvU32 te_gpio; + NvBool te_polarity_low; + NvBool dsiEnVRR; + NvBool dsiVrrPanelSupportsTe; + NvBool dsiForceSetTePin; + + int panel_gpio[DSI_N_GPIO_PANEL]; + NvBool panel_gpio_populated; + + NvU32 dpd_dsi_pads; + + DSI_PHY_TIMING_IN_NS phyTimingNs; + + NvU8 *bl_name; + + NvBool lp00_pre_panel_wakeup; + NvBool ulpm_not_supported; + NvBool use_video_host_fifo_for_cmd; + NvBool dsi_csi_loopback; + NvBool set_max_timeout; + NvBool use_legacy_dphy_core; + // Swap P/N pins polarity of all data lanes + NvBool swap_data_lane_polarity; + // Swap P/N pins polarity of clock lane + NvBool swap_clock_lane_polarity; + // Reverse clock polarity for partition A/B. 1st SOT bit goes on negedge of Clock lane + NvBool reverse_clock_polarity; + // DSI Lane Crossbar. Allocating xbar array for max number of lanes + NvBool lane_xbar_exists; + NvU32 lane_xbar_ctrl[8]; + NvU32 refresh_rate_adj; + + NvU8 dsiPhyType; + + DSITIMINGS dsiTimings; + + // DSC Parameters + NvBool dsiDscEnable; + NvU32 dsiDscBpp; + NvU32 dsiDscNumSlices; + NvU32 dsiDscSliceWidth; + NvU32 dsiDscSliceHeight; + NvBool dsiDscEnBlockPrediction; + NvBool dsiDscEnDualDsc; + NvU32 dsiDscDecoderMajorVersion; + NvU32 dsiDscDecoderMinorVersion; +} DSI_PANEL_INFO; + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_gpio.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_gpio.h new file mode 100644 index 0000000..2fb5aad --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_gpio.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _OS_GPIO_H_ +#define _OS_GPIO_H_ + +typedef enum +{ + NV_OS_GPIO_FUNC_HOTPLUG_A, + NV_OS_GPIO_FUNC_HOTPLUG_B, +} NV_OS_GPIO_FUNC_NAMES; + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/rm-gpu-ops.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/rm-gpu-ops.h new file mode 100644 index 0000000..d2839ae --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/rm-gpu-ops.h @@ -0,0 +1,110 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _RM_GPU_OPS_H_ +#define _RM_GPU_OPS_H_ + + + +#include +#include +#include "nv_stdarg.h" +#include +#include + +NV_STATUS NV_API_CALL rm_gpu_ops_create_session (nvidia_stack_t *, nvgpuSessionHandle_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_destroy_session (nvidia_stack_t *, nvgpuSessionHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_device_create (nvidia_stack_t *, nvgpuSessionHandle_t, const nvgpuInfo_t *, const NvProcessorUuid *, nvgpuDeviceHandle_t *, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_device_destroy (nvidia_stack_t *, nvgpuDeviceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create(nvidia_stack_t *, nvgpuDeviceHandle_t, unsigned long long, unsigned long long, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_dup_address_space(nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *, nvgpuAddressSpaceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvLength, NvU64 *, nvgpuAllocInfo_t); + +NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages(nvidia_stack_t *, void *, NvLength, NvU32 , nvgpuPmaAllocationOptions_t, NvU64 *); +NV_STATUS NV_API_CALL rm_gpu_ops_pma_free_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_pma_unpin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_get_pma_object(nvidia_stack_t *, nvgpuDeviceHandle_t, void **, const nvgpuPmaStatistics_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_pma_register_callbacks(nvidia_stack_t *sp, void *, nvPmaEvictPagesCallback, nvPmaEvictRangeCallback, void *); +void NV_API_CALL rm_gpu_ops_pma_unregister_callbacks(nvidia_stack_t *sp, void *); + +NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_sys(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvLength, NvU64 *, nvgpuAllocInfo_t); + +NV_STATUS NV_API_CALL rm_gpu_ops_get_p2p_caps(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuDeviceHandle_t, nvgpuP2PCapsParams_t); + +NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_map(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, NvLength, void **, NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_ummap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, void*); +NV_STATUS NV_API_CALL rm_gpu_ops_channel_allocate(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, const nvgpuChannelAllocParams_t *, nvgpuChannelHandle_t *, nvgpuChannelInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_channel_destroy(nvidia_stack_t *, nvgpuChannelHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_memory_free(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64); +NV_STATUS NV_API_CALL rm_gpu_ops_query_caps(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuCaps_t); +NV_STATUS NV_API_CALL rm_gpu_ops_query_ces_caps(nvidia_stack_t *sp, nvgpuDeviceHandle_t, nvgpuCesCaps_t); +NV_STATUS NV_API_CALL rm_gpu_ops_get_gpu_info(nvidia_stack_t *, const NvProcessorUuid *pUuid, const nvgpuClientInfo_t *, nvgpuInfo_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_service_device_interrupts_rm(nvidia_stack_t *, nvgpuDeviceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_dup_allocation(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuAddressSpaceHandle_t, NvU64, NvU64 *); + +NV_STATUS NV_API_CALL rm_gpu_ops_dup_memory (nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, NvHandle *, nvgpuMemoryInfo_t); + +NV_STATUS NV_API_CALL rm_gpu_ops_free_duped_handle(nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle); +NV_STATUS NV_API_CALL rm_gpu_ops_get_fb_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFbInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_get_ecc_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuEccInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_own_page_fault_intr(nvidia_stack_t *, nvgpuDeviceHandle_t, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_init_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_destroy_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, void *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool *); +NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_own_access_cntr_intr(nvidia_stack_t *, nvgpuSessionHandle_t, nvgpuAccessCntrInfo_t, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, nvgpuAccessCntrConfig_t); +NV_STATUS NV_API_CALL rm_gpu_ops_disable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_set_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, unsigned, NvBool, NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_unset_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_p2p_object_create(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuDeviceHandle_t, NvHandle *); +void NV_API_CALL rm_gpu_ops_p2p_object_destroy(nvidia_stack_t *, nvgpuSessionHandle_t, NvHandle); +NV_STATUS NV_API_CALL rm_gpu_ops_get_external_alloc_ptes(nvidia_stack_t*, nvgpuAddressSpaceHandle_t, NvHandle, NvU64, NvU64, nvgpuExternalMappingInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_retain_channel(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvHandle, NvHandle, void **, nvgpuChannelInstanceInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_bind_channel_resources(nvidia_stack_t *, void *, nvgpuChannelResourceBindParams_t); +void NV_API_CALL rm_gpu_ops_release_channel(nvidia_stack_t *, void *); +void NV_API_CALL rm_gpu_ops_stop_channel(nvidia_stack_t *, void *, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_get_channel_resource_ptes(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvP64, NvU64, NvU64, nvgpuExternalMappingInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_report_non_replayable_fault(nvidia_stack_t *, nvgpuDeviceHandle_t, const void *); + +NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_allocate(nvidia_stack_t *, nvgpuDeviceHandle_t, const nvgpuPagingChannelAllocParams_t *, nvgpuPagingChannelHandle_t *, nvgpuPagingChannelInfo_t); +void NV_API_CALL rm_gpu_ops_paging_channel_destroy(nvidia_stack_t *, nvgpuPagingChannelHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_paging_channels_map(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t, NvU64 *); +void NV_API_CALL rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *, nvgpuPagingChannelHandle_t, char *, NvU32); + + +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *, struct ccslContext_t **, nvgpuChannelHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *, struct ccslContext_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_device_encryption(nvidia_stack_t *, struct ccslContext_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *, NvU8 *); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *, NvU8 const *); +NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_sign(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *); + + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/conftest.sh b/NVIDIA-kernel-module-source-TempVersion/kernel-open/conftest.sh new file mode 100755 index 0000000..5c00771 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/conftest.sh @@ -0,0 +1,6067 @@ +#!/bin/sh + +PATH="${PATH}:/bin:/sbin:/usr/bin" + +# make sure we are in the directory containing this script +SCRIPTDIR=`dirname $0` +cd $SCRIPTDIR + +CC="$1" +ARCH=$2 +ISYSTEM=`$CC -print-file-name=include 2> /dev/null` +SOURCES=$3 +HEADERS=$SOURCES/include +OUTPUT=$4 +XEN_PRESENT=1 +PREEMPT_RT_PRESENT=0 +KERNEL_ARCH="$ARCH" + +if [ "$ARCH" = "i386" -o "$ARCH" = "x86_64" ]; then + if [ -d "$SOURCES/arch/x86" ]; then + KERNEL_ARCH="x86" + fi +fi + +# VGX_BUILD parameter defined only for VGX builds (vGPU Host driver) +# VGX_KVM_BUILD parameter defined only vGPU builds on KVM hypervisor +# GRID_BUILD parameter defined only for GRID builds (GRID Guest driver) +# GRID_BUILD_CSP parameter defined only for GRID CSP builds (GRID Guest driver for CSPs) + +test_xen() { + # + # Determine if the target kernel is a Xen kernel. It used to be + # sufficient to check for CONFIG_XEN, but the introduction of + # modular para-virtualization (CONFIG_PARAVIRT, etc.) and + # Xen guest support, it is no longer possible to determine the + # target environment at build time. Therefore, if both + # CONFIG_XEN and CONFIG_PARAVIRT are present, text_xen() treats + # the kernel as a stand-alone kernel. + # + if ! test_configuration_option CONFIG_XEN || + test_configuration_option CONFIG_PARAVIRT; then + XEN_PRESENT=0 + fi +} + +append_conftest() { + # + # Echo data from stdin: this is a transitional function to make it easier + # to port conftests from drivers with parallel conftest generation to + # older driver versions + # + + while read LINE; do + echo ${LINE} + done +} + +translate_and_preprocess_header_files() { + # Inputs: + # $1: list of relative file paths + # + # This routine creates an upper case, underscore version of each of the + # relative file paths, and uses that as the token to either define or + # undefine in a C header file. For example, linux/fence.h becomes + # NV_LINUX_FENCE_H_PRESENT, and that is either defined or undefined, in the + # output (which goes to stdout, just like the rest of this file). + + # -MG or -MD can interfere with the use of -M and -M -MG for testing file + # existence; filter out any occurrences from CFLAGS. CFLAGS is intentionally + # wrapped with whitespace in the input to sed(1) so the regex can match zero + # or more occurrences of "-MD" or "-MG", surrounded by whitespace to avoid + # accidental matches with tokens that happen to contain either of those + # strings, without special handling of the beginning or the end of the line. + TEST_CFLAGS=`echo "-E -M $CFLAGS " | sed -e 's/\( -M[DG]\)* / /g'` + + for file in $@; do + local file_define=NV_`echo $file | tr '/.' '_' | tr '-' '_' | tr 'a-z' 'A-Z'`_PRESENT + + CODE="#include <$file>" + + if echo "$CODE" | $CC $TEST_CFLAGS - > /dev/null 2>&1; then + echo "#define $file_define" + else + # If preprocessing failed, it could have been because the header + # file under test is not present, or because it is present but + # depends upon the inclusion of other header files. Attempting + # preprocessing again with -MG will ignore a missing header file + # but will still fail if the header file is present. + if echo "$CODE" | $CC $TEST_CFLAGS -MG - > /dev/null 2>&1; then + echo "#undef $file_define" + else + echo "#define $file_define" + fi + fi + done +} + +test_headers() { + # + # Determine which header files (of a set that may or may not be + # present) are provided by the target kernel. + # + FILES="asm/system.h" + FILES="$FILES drm/drmP.h" + FILES="$FILES drm/drm_auth.h" + FILES="$FILES drm/drm_gem.h" + FILES="$FILES drm/drm_crtc.h" + FILES="$FILES drm/drm_atomic.h" + FILES="$FILES drm/drm_atomic_helper.h" + FILES="$FILES drm/drm_encoder.h" + FILES="$FILES drm/drm_atomic_uapi.h" + FILES="$FILES drm/drm_drv.h" + FILES="$FILES drm/drm_framebuffer.h" + FILES="$FILES drm/drm_connector.h" + FILES="$FILES drm/drm_probe_helper.h" + FILES="$FILES drm/drm_blend.h" + FILES="$FILES drm/drm_fourcc.h" + FILES="$FILES drm/drm_prime.h" + FILES="$FILES drm/drm_plane.h" + FILES="$FILES drm/drm_vblank.h" + FILES="$FILES drm/drm_file.h" + FILES="$FILES drm/drm_ioctl.h" + FILES="$FILES drm/drm_device.h" + FILES="$FILES drm/drm_mode_config.h" + FILES="$FILES dt-bindings/interconnect/tegra_icc_id.h" + FILES="$FILES generated/autoconf.h" + FILES="$FILES generated/compile.h" + FILES="$FILES generated/utsrelease.h" + FILES="$FILES linux/efi.h" + FILES="$FILES linux/kconfig.h" + FILES="$FILES linux/platform/tegra/mc_utils.h" + FILES="$FILES linux/semaphore.h" + FILES="$FILES linux/printk.h" + FILES="$FILES linux/ratelimit.h" + FILES="$FILES linux/prio_tree.h" + FILES="$FILES linux/log2.h" + FILES="$FILES linux/of.h" + FILES="$FILES linux/bug.h" + FILES="$FILES linux/sched.h" + FILES="$FILES linux/sched/mm.h" + FILES="$FILES linux/sched/signal.h" + FILES="$FILES linux/sched/task.h" + FILES="$FILES linux/sched/task_stack.h" + FILES="$FILES xen/ioemu.h" + FILES="$FILES linux/fence.h" + FILES="$FILES linux/dma-resv.h" + FILES="$FILES soc/tegra/chip-id.h" + FILES="$FILES soc/tegra/fuse.h" + FILES="$FILES soc/tegra/tegra_bpmp.h" + FILES="$FILES video/nv_internal.h" + FILES="$FILES linux/platform/tegra/dce/dce-client-ipc.h" + FILES="$FILES linux/nvhost.h" + FILES="$FILES linux/nvhost_t194.h" + FILES="$FILES linux/host1x-next.h" + FILES="$FILES asm/book3s/64/hash-64k.h" + FILES="$FILES asm/set_memory.h" + FILES="$FILES asm/prom.h" + FILES="$FILES asm/powernv.h" + FILES="$FILES linux/atomic.h" + FILES="$FILES asm/barrier.h" + FILES="$FILES asm/opal-api.h" + FILES="$FILES sound/hdaudio.h" + FILES="$FILES asm/pgtable_types.h" + FILES="$FILES linux/stringhash.h" + FILES="$FILES linux/dma-map-ops.h" + FILES="$FILES rdma/peer_mem.h" + FILES="$FILES sound/hda_codec.h" + FILES="$FILES linux/dma-buf.h" + FILES="$FILES linux/time.h" + FILES="$FILES linux/platform_device.h" + FILES="$FILES linux/mutex.h" + FILES="$FILES linux/reset.h" + FILES="$FILES linux/of_platform.h" + FILES="$FILES linux/of_device.h" + FILES="$FILES linux/of_gpio.h" + FILES="$FILES linux/gpio.h" + FILES="$FILES linux/gpio/consumer.h" + FILES="$FILES linux/interconnect.h" + FILES="$FILES linux/pm_runtime.h" + FILES="$FILES linux/clk.h" + FILES="$FILES linux/clk-provider.h" + FILES="$FILES linux/ioasid.h" + FILES="$FILES linux/stdarg.h" + FILES="$FILES linux/iosys-map.h" + FILES="$FILES asm/coco.h" + + translate_and_preprocess_header_files $FILES +} + +build_cflags() { + BASE_CFLAGS="-O2 -D__KERNEL__ \ +-DKBUILD_BASENAME=\"#conftest$$\" -DKBUILD_MODNAME=\"#conftest$$\" \ +-nostdinc -isystem $ISYSTEM" + + if [ "$OUTPUT" != "$SOURCES" ]; then + OUTPUT_CFLAGS="-I$OUTPUT/include2 -I$OUTPUT/include" + if [ -f "$OUTPUT/include/generated/autoconf.h" ]; then + AUTOCONF_FILE="$OUTPUT/include/generated/autoconf.h" + else + AUTOCONF_FILE="$OUTPUT/include/linux/autoconf.h" + fi + else + if [ -f "$HEADERS/generated/autoconf.h" ]; then + AUTOCONF_FILE="$HEADERS/generated/autoconf.h" + else + AUTOCONF_FILE="$HEADERS/linux/autoconf.h" + fi + fi + + test_xen + + if [ "$XEN_PRESENT" != "0" ]; then + MACH_CFLAGS="-I$HEADERS/asm/mach-xen" + fi + + SOURCE_HEADERS="$HEADERS" + SOURCE_ARCH_HEADERS="$SOURCES/arch/$KERNEL_ARCH/include" + OUTPUT_HEADERS="$OUTPUT/include" + OUTPUT_ARCH_HEADERS="$OUTPUT/arch/$KERNEL_ARCH/include" + + # Look for mach- directories on this arch, and add it to the list of + # includes if that platform is enabled in the configuration file, which + # may have a definition like this: + # #define CONFIG_ARCH_ 1 + for _mach_dir in `ls -1d $SOURCES/arch/$KERNEL_ARCH/mach-* 2>/dev/null`; do + _mach=`echo $_mach_dir | \ + sed -e "s,$SOURCES/arch/$KERNEL_ARCH/mach-,," | \ + tr 'a-z' 'A-Z'` + grep "CONFIG_ARCH_$_mach \+1" $AUTOCONF_FILE > /dev/null 2>&1 + if [ $? -eq 0 ]; then + MACH_CFLAGS="$MACH_CFLAGS -I$_mach_dir/include" + fi + done + + if [ "$ARCH" = "arm" ]; then + MACH_CFLAGS="$MACH_CFLAGS -D__LINUX_ARM_ARCH__=7" + fi + + # Add the mach-default includes (only found on x86/older kernels) + MACH_CFLAGS="$MACH_CFLAGS -I$SOURCE_HEADERS/asm-$KERNEL_ARCH/mach-default" + MACH_CFLAGS="$MACH_CFLAGS -I$SOURCE_ARCH_HEADERS/asm/mach-default" + + CFLAGS="$BASE_CFLAGS $MACH_CFLAGS $OUTPUT_CFLAGS -include $AUTOCONF_FILE" + CFLAGS="$CFLAGS -I$SOURCE_HEADERS" + CFLAGS="$CFLAGS -I$SOURCE_HEADERS/uapi" + CFLAGS="$CFLAGS -I$SOURCE_HEADERS/xen" + CFLAGS="$CFLAGS -I$OUTPUT_HEADERS/generated/uapi" + CFLAGS="$CFLAGS -I$SOURCE_ARCH_HEADERS" + CFLAGS="$CFLAGS -I$SOURCE_ARCH_HEADERS/uapi" + CFLAGS="$CFLAGS -I$OUTPUT_ARCH_HEADERS/generated" + CFLAGS="$CFLAGS -I$OUTPUT_ARCH_HEADERS/generated/uapi" + + if [ -n "$BUILD_PARAMS" ]; then + CFLAGS="$CFLAGS -D$BUILD_PARAMS" + fi + + # Check if gcc supports asm goto and set CC_HAVE_ASM_GOTO if it does. + # Older kernels perform this check and set this flag in Kbuild, and since + # conftest.sh runs outside of Kbuild it ends up building without this flag. + # Starting with commit e9666d10a5677a494260d60d1fa0b73cc7646eb3 this test + # is done within Kconfig, and the preprocessor flag is no longer needed. + + GCC_GOTO_SH="$SOURCES/build/gcc-goto.sh" + + if [ -f "$GCC_GOTO_SH" ]; then + # Newer versions of gcc-goto.sh don't print anything on success, but + # this is okay, since it's no longer necessary to set CC_HAVE_ASM_GOTO + # based on the output of those versions of gcc-goto.sh. + if [ `/bin/sh "$GCC_GOTO_SH" "$CC"` = "y" ]; then + CFLAGS="$CFLAGS -DCC_HAVE_ASM_GOTO" + fi + fi + + # + # If CONFIG_HAVE_FENTRY is enabled and gcc supports -mfentry flags then set + # CC_USING_FENTRY and add -mfentry into cflags. + # + # linux/ftrace.h file indirectly gets included into the conftest source and + # fails to get compiled, because conftest.sh runs outside of Kbuild it ends + # up building without -mfentry and CC_USING_FENTRY flags. + # + grep "CONFIG_HAVE_FENTRY \+1" $AUTOCONF_FILE > /dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "" > conftest$$.c + + $CC -mfentry -c -x c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + + CFLAGS="$CFLAGS -mfentry -DCC_USING_FENTRY" + fi + fi +} + +CONFTEST_PREAMBLE="#include \"conftest/headers.h\" + #if defined(NV_LINUX_KCONFIG_H_PRESENT) + #include + #endif + #if defined(NV_GENERATED_AUTOCONF_H_PRESENT) + #include + #else + #include + #endif + #if defined(CONFIG_XEN) && \ + defined(CONFIG_XEN_INTERFACE_VERSION) && !defined(__XEN_INTERFACE_VERSION__) + #define __XEN_INTERFACE_VERSION__ CONFIG_XEN_INTERFACE_VERSION + #endif + #if defined(CONFIG_KASAN) && defined(CONFIG_ARM64) + #if defined(CONFIG_KASAN_SW_TAGS) + #define KASAN_SHADOW_SCALE_SHIFT 4 + #else + #define KASAN_SHADOW_SCALE_SHIFT 3 + #endif + #endif" + +test_configuration_option() { + # + # Check to see if the given configuration option is defined + # + + get_configuration_option $1 >/dev/null 2>&1 + + return $? + +} + +set_configuration() { + # + # Set a specific configuration option. This function is called to always + # enable a configuration, in order to verify whether the test code for that + # configuration is no longer required and the corresponding + # conditionally-compiled code in the driver can be removed. + # + DEF="$1" + + if [ "$3" = "" ] + then + VAL="" + CAT="$2" + else + VAL="$2" + CAT="$3" + fi + + echo "#define ${DEF} ${VAL}" | append_conftest "${CAT}" +} + +unset_configuration() { + # + # Un-set a specific configuration option. This function is called to + # always disable a configuration, in order to verify whether the test + # code for that configuration is no longer required and the corresponding + # conditionally-compiled code in the driver can be removed. + # + DEF="$1" + CAT="$2" + + echo "#undef ${DEF}" | append_conftest "${CAT}" +} + +compile_check_conftest() { + # + # Compile the current conftest C file and check+output the result + # + CODE="$1" + DEF="$2" + VAL="$3" + CAT="$4" + + echo "$CONFTEST_PREAMBLE + $CODE" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + if [ "${CAT}" = "functions" ]; then + # + # The logic for "functions" compilation tests is inverted compared to + # other compilation steps: if the function is present, the code + # snippet will fail to compile because the function call won't match + # the prototype. If the function is not present, the code snippet + # will produce an object file with the function as an unresolved + # symbol. + # + echo "#undef ${DEF}" | append_conftest "${CAT}" + else + echo "#define ${DEF} ${VAL}" | append_conftest "${CAT}" + fi + return + else + if [ "${CAT}" = "functions" ]; then + echo "#define ${DEF} ${VAL}" | append_conftest "${CAT}" + else + echo "#undef ${DEF}" | append_conftest "${CAT}" + fi + return + fi +} + +export_symbol_present_conftest() { + # + # Check Module.symvers to see whether the given symbol is present. + # + + SYMBOL="$1" + TAB=' ' + + if grep -e "${TAB}${SYMBOL}${TAB}.*${TAB}EXPORT_SYMBOL.*\$" \ + "$OUTPUT/Module.symvers" >/dev/null 2>&1; then + echo "#define NV_IS_EXPORT_SYMBOL_PRESENT_$SYMBOL 1" | + append_conftest "symbols" + else + # May be a false negative if Module.symvers is absent or incomplete, + # or if the Module.symvers format changes. + echo "#define NV_IS_EXPORT_SYMBOL_PRESENT_$SYMBOL 0" | + append_conftest "symbols" + fi +} + +export_symbol_gpl_conftest() { + # + # Check Module.symvers to see whether the given symbol is present and its + # export type is GPL-only (including deprecated GPL-only symbols). + # + + SYMBOL="$1" + TAB=' ' + + if grep -e "${TAB}${SYMBOL}${TAB}.*${TAB}EXPORT_\(UNUSED_\)*SYMBOL_GPL\$" \ + "$OUTPUT/Module.symvers" >/dev/null 2>&1; then + echo "#define NV_IS_EXPORT_SYMBOL_GPL_$SYMBOL 1" | + append_conftest "symbols" + else + # May be a false negative if Module.symvers is absent or incomplete, + # or if the Module.symvers format changes. + echo "#define NV_IS_EXPORT_SYMBOL_GPL_$SYMBOL 0" | + append_conftest "symbols" + fi +} + +get_configuration_option() { + # + # Print the value of given configuration option, if defined + # + RET=1 + OPTION=$1 + + OLD_FILE="linux/autoconf.h" + NEW_FILE="generated/autoconf.h" + FILE="" + + if [ -f $HEADERS/$NEW_FILE -o -f $OUTPUT/include/$NEW_FILE ]; then + FILE=$NEW_FILE + elif [ -f $HEADERS/$OLD_FILE -o -f $OUTPUT/include/$OLD_FILE ]; then + FILE=$OLD_FILE + fi + + if [ -n "$FILE" ]; then + # + # We are looking at a configured source tree; verify + # that its configuration includes the given option + # via a compile check, and print the option's value. + # + + if [ -f $HEADERS/$FILE ]; then + INCLUDE_DIRECTORY=$HEADERS + elif [ -f $OUTPUT/include/$FILE ]; then + INCLUDE_DIRECTORY=$OUTPUT/include + else + return 1 + fi + + echo "#include <$FILE> + #ifndef $OPTION + #error $OPTION not defined! + #endif + + $OPTION + " > conftest$$.c + + $CC -E -P -I$INCLUDE_DIRECTORY -o conftest$$ conftest$$.c > /dev/null 2>&1 + + if [ -e conftest$$ ]; then + tr -d '\r\n\t ' < conftest$$ + RET=$? + fi + + rm -f conftest$$.c conftest$$ + else + CONFIG=$OUTPUT/.config + if [ -f $CONFIG ] && grep "^$OPTION=" $CONFIG; then + grep "^$OPTION=" $CONFIG | cut -f 2- -d "=" + RET=$? + fi + fi + + return $RET + +} + +check_for_ib_peer_memory_symbols() { + local kernel_dir="$1" + local module_symvers="${kernel_dir}/Module.symvers" + + local sym_ib_register="ib_register_peer_memory_client" + local sym_ib_unregister="ib_unregister_peer_memory_client" + local tab=' ' + + # Return 0 for true(no errors), 1 for false + if [ ! -f "${module_symvers}" ]; then + return 1 + fi + + if grep -e "${tab}${sym_ib_register}${tab}.*${tab}EXPORT_SYMBOL.*\$" \ + "${module_symvers}" > /dev/null 2>&1 && + grep -e "${tab}${sym_ib_unregister}${tab}.*${tab}EXPORT_SYMBOL.*\$" \ + "${module_symvers}" > /dev/null 2>&1; then + return 0 + else + return 1 + fi +} + +compile_test() { + case "$1" in + set_memory_uc) + # + # Determine if the set_memory_uc() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + #if defined(NV_ASM_SET_MEMORY_H_PRESENT) + #if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT) + #include + #endif + #include + #else + #include + #endif + void conftest_set_memory_uc(void) { + set_memory_uc(); + }" + + compile_check_conftest "$CODE" "NV_SET_MEMORY_UC_PRESENT" "" "functions" + ;; + + set_memory_array_uc) + # + # Determine if the set_memory_array_uc() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + #if defined(NV_ASM_SET_MEMORY_H_PRESENT) + #if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT) + #include + #endif + #include + #else + #include + #endif + void conftest_set_memory_array_uc(void) { + set_memory_array_uc(); + }" + + compile_check_conftest "$CODE" "NV_SET_MEMORY_ARRAY_UC_PRESENT" "" "functions" + ;; + + sysfs_slab_unlink) + # + # Determine if the sysfs_slab_unlink() function is present. + # + # This test is useful to check for the presence a fix for the deferred + # kmem_cache destroy feature (see nvbug: 2543505). + # + # Added by commit d50d82faa0c9 ("slub: fix failure when we delete and + # create a slab cache") in 4.18 (2018-06-27). + # + CODE=" + #include + void conftest_sysfs_slab_unlink(void) { + sysfs_slab_unlink(); + }" + + compile_check_conftest "$CODE" "NV_SYSFS_SLAB_UNLINK_PRESENT" "" "functions" + ;; + + list_is_first) + # + # Determine if the list_is_first() function is present. + # + # Added by commit 70b44595eafe ("mm, compaction: use free lists + # to quickly locate a migration source") in 5.1 (2019-03-05) + # + CODE=" + #include + void conftest_list_is_first(void) { + list_is_first(); + }" + + compile_check_conftest "$CODE" "NV_LIST_IS_FIRST_PRESENT" "" "functions" + ;; + + set_pages_uc) + # + # Determine if the set_pages_uc() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + #if defined(NV_ASM_SET_MEMORY_H_PRESENT) + #if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT) + #include + #endif + #include + #else + #include + #endif + void conftest_set_pages_uc(void) { + set_pages_uc(); + }" + + compile_check_conftest "$CODE" "NV_SET_PAGES_UC_PRESENT" "" "functions" + ;; + + set_pages_array_uc) + # + # Determine if the set_pages_array_uc() function is present. + # It does not exist on all architectures. + # + # set_pages_array_uc() was added by commit + # 0f3507555f6fa4acbc85a646d6e8766230db38fc ("x86, CPA: Add + # set_pages_arrayuc and set_pages_array_wb") in v2.6.30-rc1 (Thu Mar + # 19 14:51:15 2009) + # + CODE=" + #include + #if defined(NV_ASM_SET_MEMORY_H_PRESENT) + #if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT) + #include + #endif + #include + #else + #include + #endif + void conftest_set_pages_array_uc(void) { + set_pages_array_uc(); + }" + + compile_check_conftest "$CODE" "NV_SET_PAGES_ARRAY_UC_PRESENT" "" "functions" + ;; + + flush_cache_all) + # + # Determine if flush_cache_all() function is present + # + # flush_cache_all() was removed by commit id + # 68234df4ea79 ("arm64: kill flush_cache_all()") in 4.2 (2015-04-20) + # for aarch64 + # + CODE=" + #include + int conftest_flush_cache_all(void) { + return flush_cache_all(); + }" + compile_check_conftest "$CODE" "NV_FLUSH_CACHE_ALL_PRESENT" "" "functions" + ;; + + pci_get_domain_bus_and_slot) + # + # Determine if the pci_get_domain_bus_and_slot() function + # is present. + # + # Added by commit 3c299dc22635 ("PCI: add + # pci_get_domain_bus_and_slot function") in 2.6.33. + # + CODE=" + #include + void conftest_pci_get_domain_bus_and_slot(void) { + pci_get_domain_bus_and_slot(); + }" + + compile_check_conftest "$CODE" "NV_PCI_GET_DOMAIN_BUS_AND_SLOT_PRESENT" "" "functions" + ;; + + pci_bus_address) + # + # Determine if the pci_bus_address() function is + # present. + # + # Added by commit 06cf56e497c8 ("PCI: Add pci_bus_address() to + # get bus address of a BAR") in v3.14 + # + CODE=" + #include + void conftest_pci_bus_address(void) { + pci_bus_address(); + }" + + compile_check_conftest "$CODE" "NV_PCI_BUS_ADDRESS_PRESENT" "" "functions" + ;; + + hash__remap_4k_pfn) + # + # Determine if the hash__remap_4k_pfn() function is + # present. + # + # Added by commit 6cc1a0ee4ce2 ("powerpc/mm/radix: Add radix + # callback for pmd accessors") in v4.7 (committed 2016-04-29). + # Present only in arch/powerpc + # + CODE=" + #if defined(NV_ASM_BOOK3S_64_HASH_64K_H_PRESENT) + #include + #include + #endif + void conftest_hash__remap_4k_pfn(void) { + hash__remap_4k_pfn(); + }" + + compile_check_conftest "$CODE" "NV_HASH__REMAP_4K_PFN_PRESENT" "" "functions" + ;; + + register_cpu_notifier) + # + # Determine if register_cpu_notifier() is present + # + # Removed by commit 530e9b76ae8f ("cpu/hotplug: Remove obsolete + # cpu hotplug register/unregister functions") in v4.10 + # (2016-12-21) + # + CODE=" + #include + void conftest_register_cpu_notifier(void) { + register_cpu_notifier(); + }" + compile_check_conftest "$CODE" "NV_REGISTER_CPU_NOTIFIER_PRESENT" "" "functions" + ;; + + cpuhp_setup_state) + # + # Determine if cpuhp_setup_state() is present + # + # Added by commit 5b7aa87e0482 ("cpu/hotplug: Implement + # setup/removal interface") in v4.6 (commited 2016-02-26) + # + # It is used as a replacement for register_cpu_notifier + CODE=" + #include + void conftest_cpuhp_setup_state(void) { + cpuhp_setup_state(); + }" + compile_check_conftest "$CODE" "NV_CPUHP_SETUP_STATE_PRESENT" "" "functions" + ;; + + ioremap_cache) + # + # Determine if the ioremap_cache() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_ioremap_cache(void) { + ioremap_cache(); + }" + + compile_check_conftest "$CODE" "NV_IOREMAP_CACHE_PRESENT" "" "functions" + ;; + + ioremap_wc) + # + # Determine if the ioremap_wc() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_ioremap_wc(void) { + ioremap_wc(); + }" + + compile_check_conftest "$CODE" "NV_IOREMAP_WC_PRESENT" "" "functions" + ;; + + file_operations) + # 'ioctl' field removed by commit b19dd42faf41 + # ("bkl: Remove locked .ioctl file operation") in v2.6.36 + CODE=" + #include + int conftest_file_operations(void) { + return offsetof(struct file_operations, ioctl); + }" + + compile_check_conftest "$CODE" "NV_FILE_OPERATIONS_HAS_IOCTL" "" "types" + ;; + + sg_alloc_table) + # + # sg_alloc_table_from_pages added by commit efc42bc98058 + # ("scatterlist: add sg_alloc_table_from_pages function") in v3.6 + # + CODE=" + #include + void conftest_sg_alloc_table_from_pages(void) { + sg_alloc_table_from_pages(); + }" + + compile_check_conftest "$CODE" "NV_SG_ALLOC_TABLE_FROM_PAGES_PRESENT" "" "functions" + ;; + + efi_enabled) + # + # Added in 2.6.12 as a variable + # + # Determine if the efi_enabled symbol is present (as a variable), + # or if the efi_enabled() function is present and how many + # arguments it takes. + # + # Converted from a variable to a function by commit 83e68189745a + # ("efi: Make 'efi_enabled' a function to query EFI facilities") + # in v3.8 + # + echo "$CONFTEST_PREAMBLE + #if defined(NV_LINUX_EFI_H_PRESENT) + #include + #endif + int conftest_efi_enabled(void) { + return efi_enabled(0); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_EFI_ENABLED_PRESENT" | append_conftest "functions" + echo "#define NV_EFI_ENABLED_ARGUMENT_COUNT 1" | append_conftest "functions" + rm -f conftest$$.o + return + else + echo "#define NV_EFI_ENABLED_PRESENT" | append_conftest "symbols" + return + fi + ;; + + dom0_kernel_present) + # Add config parameter if running on DOM0. + if [ -n "$VGX_BUILD" ]; then + echo "#define NV_DOM0_KERNEL_PRESENT" | append_conftest "generic" + else + echo "#undef NV_DOM0_KERNEL_PRESENT" | append_conftest "generic" + fi + return + ;; + + nvidia_vgpu_kvm_build) + # Add config parameter if running on KVM host. + if [ -n "$VGX_KVM_BUILD" ]; then + echo "#define NV_VGPU_KVM_BUILD" | append_conftest "generic" + else + echo "#undef NV_VGPU_KVM_BUILD" | append_conftest "generic" + fi + return + ;; + + vfio_register_notifier) + # + # Check number of arguments required. + # + # New parameters added by commit 22195cbd3451 ("vfio: + # vfio_register_notifier: classify iommu notifier") in v4.10 + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_vfio_register_notifier(void) { + return vfio_register_notifier((struct device *) NULL, (struct notifier_block *) NULL); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_VFIO_NOTIFIER_ARGUMENT_COUNT 2" | append_conftest "functions" + rm -f conftest$$.o + return + else + echo "#define NV_VFIO_NOTIFIER_ARGUMENT_COUNT 4" | append_conftest "functions" + return + fi + ;; + + vfio_info_add_capability_has_cap_type_id_arg) + # + # Check if vfio_info_add_capability() has cap_type_id parameter. + # + # Removed by commit dda01f787df9 ("vfio: Simplify capability + # helper") in v4.16 (2017-12-12) + # + CODE=" + #include + int vfio_info_add_capability(struct vfio_info_cap *caps, + int cap_type_id, + void *cap_type) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_VFIO_INFO_ADD_CAPABILITY_HAS_CAP_TYPE_ID_ARGS" "" "types" + ;; + + vmbus_channel_has_ringbuffer_page) + # + # Check if ringbuffer_page field exist in vmbus_channel structure + # + # Changed in commit 52a42c2a90226dc61c99bbd0cb096deeb52c334b + # ("vmbus: keep pointer to ring buffer page") in v5.0 (2018-09-14) + # + + CODE=" + #include + + int conftest_vmbus_channel_has_ringbuffer_page(void) { + return offsetof(struct vmbus_channel, ringbuffer_page); + }" + + compile_check_conftest "$CODE" "NV_VMBUS_CHANNEL_HAS_RING_BUFFER_PAGE" "" "types" + ;; + + nvidia_grid_build) + if [ -n "$GRID_BUILD" ]; then + echo "#define NV_GRID_BUILD" | append_conftest "generic" + else + echo "#undef NV_GRID_BUILD" | append_conftest "generic" + fi + return + ;; + + nvidia_grid_csp_build) + if [ -n "$GRID_BUILD_CSP" ]; then + echo "#define NV_GRID_BUILD_CSP $GRID_BUILD_CSP" | append_conftest "generic" + else + echo "#undef NV_GRID_BUILD_CSP" | append_conftest "generic" + fi + return + ;; + + vm_fault_has_address) + # + # Determine if the 'vm_fault' structure has an 'address', or a + # 'virtual_address' field. The .virtual_address field was + # effectively renamed to .address: + # + # 'address' added by commit 82b0f8c39a38 ("mm: join + # struct fault_env and vm_fault") in v4.10 (2016-12-14) + # + # 'virtual_address' removed by commit 1a29d85eb0f1 ("mm: use + # vmf->address instead of of vmf->virtual_address") in v4.10 + # (2016-12-14) + # + CODE=" + #include + int conftest_vm_fault_has_address(void) { + return offsetof(struct vm_fault, address); + }" + + compile_check_conftest "$CODE" "NV_VM_FAULT_HAS_ADDRESS" "" "types" + ;; + + kmem_cache_has_kobj_remove_work) + # + # Determine if the 'kmem_cache' structure has 'kobj_remove_work'. + # + # 'kobj_remove_work' was added by commit 3b7b314053d02 ("slub: make + # sysfs file removal asynchronous") in v4.12 (2017-06-23). This + # commit introduced a race between kmem_cache destroy and create + # which we need to workaround in our driver (see nvbug: 2543505). + # Also see comment for sysfs_slab_unlink conftest. + # + CODE=" + #include + #include + #include + int conftest_kmem_cache_has_kobj_remove_work(void) { + return offsetof(struct kmem_cache, kobj_remove_work); + }" + + compile_check_conftest "$CODE" "NV_KMEM_CACHE_HAS_KOBJ_REMOVE_WORK" "" "types" + ;; + + mdev_uuid) + # + # Determine if mdev_uuid() function is present or not + # + # Added by commit 99e3123e3d72 ("vfio-mdev: Make mdev_device + # private and abstract interfaces") in v4.10 + # + CODE=" + #include + #include + void conftest_mdev_uuid() { + mdev_uuid(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_UUID_PRESENT" "" "functions" + + # + # Determine if mdev_uuid() returns 'const guid_t *'. + # + # mdev_uuid() function prototype updated to return 'const guid_t *' + # by commit 278bca7f318e ("vfio-mdev: Switch to use new generic UUID + # API") in v5.1 (2019-01-10). + # + CODE=" + #include + #include + const guid_t *conftest_mdev_uuid_return_guid_ptr(struct mdev_device *mdev) { + return mdev_uuid(mdev); + }" + + compile_check_conftest "$CODE" "NV_MDEV_UUID_RETURN_GUID_PTR" "" "types" + ;; + + mdev_dev) + # + # Determine if mdev_dev() function is present or not + # + # Added by commit 99e3123e3d72 ("vfio-mdev: Make mdev_device + # private and abstract interfaces") in v4.10 + # + CODE=" + #include + #include + void conftest_mdev_dev() { + mdev_dev(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_DEV_PRESENT" "" "functions" + ;; + + mdev_get_type_group_id) + # + # Determine if mdev_get_type_group_id() function is present or not + # + # Added by commit 15fcc44be0c7a ("vfio/mdev: Add + # mdev/mtype_get_type_group_id()") in v5.13 + # + CODE=" + #include + #include + void conftest_mdev_get_type_group_id() { + mdev_get_type_group_id(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_GET_TYPE_GROUP_ID_PRESENT" "" "functions" + ;; + + mdev_parent) + # + # Determine if the struct mdev_parent type is present. + # + # Added by commit 42930553a7c1 ("vfio-mdev: de-polute the + # namespace, rename parent_device & parent_ops") in v4.10 + # + CODE=" + #include + #include + struct mdev_parent_ops conftest_mdev_parent; + " + + compile_check_conftest "$CODE" "NV_MDEV_PARENT_OPS_STRUCT_PRESENT" "" "types" + ;; + + mdev_parent_dev) + # + # Determine if mdev_parent_dev() function is present or not + # + # Added by commit 9372e6feaafb ("vfio-mdev: Make mdev_parent + # private") in v4.10 + # + CODE=" + #include + #include + void conftest_mdev_parent_dev() { + mdev_parent_dev(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_PARENT_DEV_PRESENT" "" "functions" + ;; + + mdev_from_dev) + # + # Determine if mdev_from_dev() function is present or not. + # + # Added by commit 99e3123e3d72 ("vfio-mdev: Make mdev_device + # private and abstract interfaces") in v4.10 (2016-12-30) + # + CODE=" + #include + #include + void conftest_mdev_from_dev() { + mdev_from_dev(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_FROM_DEV_PRESENT" "" "functions" + ;; + + mdev_set_iommu_device) + # + # Determine if mdev_set_iommu_device() function is present or not. + # + # Added by commit 8ac13175cbe9 ("vfio/mdev: Add iommu related member + # in mdev_device) in v5.1 (2019-04-12) + # + CODE=" + #include + #include + void conftest_mdev_set_iommu_device() { + mdev_set_iommu_device(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_SET_IOMMU_DEVICE_PRESENT" "" "functions" + ;; + + pci_irq_vector_helpers) + # + # Determine if pci_alloc_irq_vectors(), pci_free_irq_vectors() + # functions are present or not. + # + # Added by commit aff171641d181ea573 (PCI: Provide sensible IRQ + # vector alloc/free routines) (2016-07-12) + # + CODE=" + #include + #include + void conftest_pci_irq_vector_helpers() { + pci_alloc_irq_vectors(); + pci_free_irq_vectors (); + }" + + compile_check_conftest "$CODE" "NV_PCI_IRQ_VECTOR_HELPERS_PRESENT" "" "functions" + ;; + + + vfio_device_gfx_plane_info) + # + # determine if the 'struct vfio_device_gfx_plane_info' type is present. + # + # Added by commit e20eaa2382e7 ("vfio: ABI for mdev display + # dma-buf operation") in v4.16 (2017-11-23) + # + CODE=" + #include + struct vfio_device_gfx_plane_info info;" + + compile_check_conftest "$CODE" "NV_VFIO_DEVICE_GFX_PLANE_INFO_PRESENT" "" "types" + ;; + + vfio_device_migration_info) + # + # determine if the 'struct vfio_device_migration_info' type is present. + # + # Proposed interface for vGPU Migration + # ("[PATCH v3 0/5] Add migration support for VFIO device ") + # https://lists.gnu.org/archive/html/qemu-devel/2019-02/msg05176.html + # Upstreamed commit a8a24f3f6e38 (vfio: UAPI for migration interface + # for device state) in v5.8 (2020-05-29) + # + CODE=" + #include + struct vfio_device_migration_info info;" + + compile_check_conftest "$CODE" "NV_VFIO_DEVICE_MIGRATION_INFO_PRESENT" "" "types" + ;; + + vfio_device_migration_has_start_pfn) + # + # Determine if the 'vfio_device_migration_info' structure has + # a 'start_pfn' field. + # + # This member was present in proposed interface for vGPU Migration + # ("[PATCH v3 0/5] Add migration support for VFIO device ") + # https://lists.gnu.org/archive/html/qemu-devel/2019-02/msg05176.html + # which is not present in upstreamed commit a8a24f3f6e38 (vfio: UAPI + # for migration interface for device state) in v5.8 (2020-05-29) + # + CODE=" + #include + int conftest_vfio_device_migration_has_start_pfn(void) { + return offsetof(struct vfio_device_migration_info, start_pfn); + }" + + compile_check_conftest "$CODE" "NV_VFIO_DEVICE_MIGRATION_HAS_START_PFN" "" "types" + ;; + + drm_available) + # Determine if the DRM subsystem is usable + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + #if !defined(CONFIG_DRM) && !defined(CONFIG_DRM_MODULE) + #error DRM not enabled + #endif + + void conftest_drm_available(void) { + struct drm_driver drv; + + /* 2013-10-02 1bb72532ac260a2d3982b40bdd4c936d779d0d16 */ + (void)drm_dev_alloc; + + /* 2013-10-02 c22f0ace1926da399d9a16dfaf09174c1b03594c */ + (void)drm_dev_register; + + /* 2013-10-02 c3a49737ef7db0bdd4fcf6cf0b7140a883e32b2a */ + (void)drm_dev_unregister; + }" + + compile_check_conftest "$CODE" "NV_DRM_AVAILABLE" "" "generic" + ;; + + drm_dev_unref) + # + # Determine if drm_dev_unref() is present. + # If it isn't, we use drm_dev_free() instead. + # + # drm_dev_free was added by commit 0dc8fe5985e0 ("drm: introduce + # drm_dev_free() to fix error paths") in v3.13 (2013-10-02) + # + # Renamed to drm_dev_unref by commit 099d1c290e2e + # ("drm: provide device-refcount") in v3.15 (2014-01-29) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + void conftest_drm_dev_unref(void) { + drm_dev_unref(); + }" + + compile_check_conftest "$CODE" "NV_DRM_DEV_UNREF_PRESENT" "" "functions" + ;; + + pde_data) + # + # Determine if the pde_data() function is present. + # + # The commit c28198889c15 removed the function + # 'PDE_DATA()', and replaced it with 'pde_data()' + # ("proc: remove PDE_DATA() completely") in v5.17-rc1. + # + CODE=" + #include + void conftest_pde_data(void) { + pde_data(); + }" + + compile_check_conftest "$CODE" "NV_PDE_DATA_LOWER_CASE_PRESENT" "" "functions" + ;; + + get_num_physpages) + # + # Determine if the get_num_physpages() function is + # present. + # + # Added by commit 7ee3d4e8cd56 ("mm: introduce helper function + # mem_init_print_info() to simplify mem_init()") in v3.11 + # + CODE=" + #include + void conftest_get_num_physpages(void) { + get_num_physpages(NULL); + }" + + compile_check_conftest "$CODE" "NV_GET_NUM_PHYSPAGES_PRESENT" "" "functions" + ;; + + proc_remove) + # + # Determine if the proc_remove() function is present. + # + # Added by commit a8ca16ea7b0a ("proc: Supply a function to + # remove a proc entry by PDE") in v3.10 + # + CODE=" + #include + void conftest_proc_remove(void) { + proc_remove(); + }" + + compile_check_conftest "$CODE" "NV_PROC_REMOVE_PRESENT" "" "functions" + ;; + + backing_dev_info) + # + # Determine if the 'address_space' structure has + # a 'backing_dev_info' field. + # + # Removed by commit b83ae6d42143 ("fs: remove + # mapping->backing_dev_info") in v4.0 + # + CODE=" + #include + int conftest_backing_dev_info(void) { + return offsetof(struct address_space, backing_dev_info); + }" + + compile_check_conftest "$CODE" "NV_ADDRESS_SPACE_HAS_BACKING_DEV_INFO" "" "types" + ;; + + address_space) + # + # Determine if the 'address_space' structure has + # a 'tree_lock' field of type rwlock_t. + # + # 'tree_lock' was changed to spinlock_t by commit 19fd6231279b + # ("mm: spinlock tree_lock") in v2.6.27 + # + # It was removed altogether by commit b93b016313b3 ("page cache: + # use xa_lock") in v4.17 + # + CODE=" + #include + int conftest_address_space(void) { + struct address_space as; + rwlock_init(&as.tree_lock); + return offsetof(struct address_space, tree_lock); + }" + + compile_check_conftest "$CODE" "NV_ADDRESS_SPACE_HAS_RWLOCK_TREE_LOCK" "" "types" + ;; + + address_space_init_once) + # + # Determine if address_space_init_once is present. + # + # Added by commit 2aa15890f3c1 ("mm: prevent concurrent + # unmap_mapping_range() on the same inode") in v2.6.38 + # + # If not present, it will be defined in uvm-linux.h. + # + CODE=" + #include + void conftest_address_space_init_once(void) { + address_space_init_once(); + }" + + compile_check_conftest "$CODE" "NV_ADDRESS_SPACE_INIT_ONCE_PRESENT" "" "functions" + ;; + + kuid_t) + # + # Determine if the 'kuid_t' type is present. + # + # Added by commit 7a4e7408c5ca ("userns: Add kuid_t and kgid_t + # and associated infrastructure in uidgid.h") in v3.5 + # + CODE=" + #include + kuid_t conftest_kuid_t; + " + + compile_check_conftest "$CODE" "NV_KUID_T_PRESENT" "" "types" + ;; + + pm_vt_switch_required) + # + # Determine if the pm_vt_switch_required() function is present. + # + # Added by commit f43f627d2f17 ("PM: make VT switching to the + # suspend console optional v3") in v3.10 + # + CODE=" + #include + void conftest_pm_vt_switch_required(void) { + pm_vt_switch_required(); + }" + + compile_check_conftest "$CODE" "NV_PM_VT_SWITCH_REQUIRED_PRESENT" "" "functions" + ;; + + xen_ioemu_inject_msi) + # Determine if the xen_ioemu_inject_msi() function is present. + CODE=" + #if defined(NV_XEN_IOEMU_H_PRESENT) + #include + #include + #include + #include + #endif + void conftest_xen_ioemu_inject_msi(void) { + xen_ioemu_inject_msi(); + }" + + compile_check_conftest "$CODE" "NV_XEN_IOEMU_INJECT_MSI" "" "functions" + ;; + + phys_to_dma) + # + # Determine if the phys_to_dma function is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_phys_to_dma(void) { + phys_to_dma(); + }" + + compile_check_conftest "$CODE" "NV_PHYS_TO_DMA_PRESENT" "" "functions" + ;; + + + dma_attr_macros) + # + # Determine if the NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT macro present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_dma_attr_macros(void) { + int ret; + ret = DMA_ATTR_SKIP_CPU_SYNC(); + }" + compile_check_conftest "$CODE" "NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT" "" "functions" + ;; + + dma_map_page_attrs) + # + # Determine if the dma_map_page_attrs function is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_dma_map_page_attrs(void) { + dma_map_page_attrs(); + }" + + compile_check_conftest "$CODE" "NV_DMA_MAP_PAGE_ATTRS_PRESENT" "" "functions" + ;; + + dma_ops) + # + # Determine if the 'dma_ops' structure is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_dma_ops(void) { + (void)dma_ops; + }" + + compile_check_conftest "$CODE" "NV_DMA_OPS_PRESENT" "" "symbols" + ;; + + swiotlb_dma_ops) + # + # Determine if the 'swiotlb_dma_ops' structure is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_dma_ops(void) { + (void)swiotlb_dma_ops; + }" + + compile_check_conftest "$CODE" "NV_SWIOTLB_DMA_OPS_PRESENT" "" "symbols" + ;; + + get_dma_ops) + # + # Determine if the get_dma_ops() function is present. + # + # The structure was made available to all architectures by commit + # e1c7e324539a ("dma-mapping: always provide the dma_map_ops + # based implementation") in v4.5 + # + # Commit 0a0f0d8be76d ("dma-mapping: split ") + # in v5.10-rc1 (2020-09-22), moved get_dma_ops() function + # prototype from to . + # + CODE=" + #if defined(NV_LINUX_DMA_MAP_OPS_H_PRESENT) + #include + #else + #include + #endif + void conftest_get_dma_ops(void) { + get_dma_ops(); + }" + + compile_check_conftest "$CODE" "NV_GET_DMA_OPS_PRESENT" "" "functions" + ;; + + noncoherent_swiotlb_dma_ops) + # + # Determine if the 'noncoherent_swiotlb_dma_ops' symbol is present. + # This API only exists on ARM64. + # + # Added by commit 7363590d2c46 ("arm64: Implement coherent DMA API + # based on swiotlb") in v3.15 + # + # Removed by commit 9d3bfbb4df58 ("arm64: Combine coherent and + # non-coherent swiotlb dma_ops") in v4.0 + # + CODE=" + #include + void conftest_noncoherent_swiotlb_dma_ops(void) { + (void)noncoherent_swiotlb_dma_ops; + }" + + compile_check_conftest "$CODE" "NV_NONCOHERENT_SWIOTLB_DMA_OPS_PRESENT" "" "symbols" + ;; + + dma_map_resource) + # + # Determine if the dma_map_resource() function is present. + # + # Added by commit 6f3d87968f9c ("dma-mapping: add + # dma_{map,unmap}_resource") in v4.9 (2016-08-10) + # + CODE=" + #include + void conftest_dma_map_resource(void) { + dma_map_resource(); + }" + + compile_check_conftest "$CODE" "NV_DMA_MAP_RESOURCE_PRESENT" "" "functions" + ;; + + write_cr4) + # + # Determine if the write_cr4() function is present. + # + CODE=" + #include + void conftest_write_cr4(void) { + write_cr4(); + }" + + compile_check_conftest "$CODE" "NV_WRITE_CR4_PRESENT" "" "functions" + ;; + + nvhost_dma_fence_unpack) + # + # Determine if the nvhost_dma_fence_unpack function is present. + # This is only present in NVIDIA Tegra downstream kernels. + # + CODE=" + #if defined(NV_LINUX_NVHOST_H_PRESENT) + #include + #endif + void conftest_nvhost_dma_fence_unpack(void) { + nvhost_dma_fence_unpack(); + }" + + compile_check_conftest "$CODE" "NV_NVHOST_DMA_FENCE_UNPACK_PRESENT" "" "functions" + ;; + + of_get_property) + # + # Determine if the of_get_property function is present. + # + # Support for kernels without CONFIG_OF defined added by commit + # 89272b8c0d42 ("dt: add empty of_get_property for non-dt") in v3.1 + # + # Test if linux/of.h header file inclusion is successful or not and + # define/undefine NV_LINUX_OF_H_USABLE depending upon status of inclusion + # + echo "$CONFTEST_PREAMBLE + #include + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_LINUX_OF_H_USABLE" | append_conftest "generic" + CODE=" + #include + void conftest_of_get_property() { + of_get_property(); + }" + + compile_check_conftest "$CODE" "NV_OF_GET_PROPERTY_PRESENT" "" "functions" + else + echo "#undef NV_LINUX_OF_H_USABLE" | append_conftest "generic" + echo "#undef NV_OF_GET_PROPERTY_PRESENT" | append_conftest "functions" + fi + ;; + + of_find_node_by_phandle) + # + # Determine if the of_find_node_by_phandle function is present. + # + # Support for kernels without CONFIG_OF defined added by commit + # ce16b9d23561 ("of: define of_find_node_by_phandle for + # !CONFIG_OF") in v4.2 + # + # Test if linux/of.h header file inclusion is successful or not and + # define/undefine NV_LINUX_OF_H_USABLE depending upon status of inclusion. + # + echo "$CONFTEST_PREAMBLE + #include + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_LINUX_OF_H_USABLE" | append_conftest "generic" + CODE=" + #include + void conftest_of_find_node_by_phandle() { + of_find_node_by_phandle(); + }" + + compile_check_conftest "$CODE" "NV_OF_FIND_NODE_BY_PHANDLE_PRESENT" "" "functions" + else + echo "#undef NV_LINUX_OF_H_USABLE" | append_conftest "generic" + echo "#undef NV_OF_FIND_NODE_BY_PHANDLE_PRESENT" | append_conftest "functions" + fi + ;; + + of_node_to_nid) + # + # Determine if of_node_to_nid is present + # + # Dummy implementation added by commit 559e2b7ee7a1 + # ("of: Provide default of_node_to_nid() implementation.") in v2.6.36 + # + # Real implementation added by commit 298535c00a2c + # ("of, numa: Add NUMA of binding implementation.") in v4.7 + # + # Test if linux/of.h header file inclusion is successful or not and + # define/undefine NV_LINUX_OF_H_USABLE depending upon status of inclusion. + # + echo "$CONFTEST_PREAMBLE + #include + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_LINUX_OF_H_USABLE" | append_conftest "generic" + CODE=" + #include + #include + #include + void conftest_of_node_to_nid() { + of_node_to_nid(); + }" + + compile_check_conftest "$CODE" "NV_OF_NODE_TO_NID_PRESENT" "" "functions" + else + echo "#undef NV_LINUX_OF_H_USABLE" | append_conftest "generic" + echo "#undef NV_OF_NODE_TO_NID_PRESENT" | append_conftest "functions" + fi + ;; + + pnv_pci_get_npu_dev) + # + # Determine if the pnv_pci_get_npu_dev function is present. + # + # Added by commit 5d2aa710e697 ("powerpc/powernv: Add support + # for Nvlink NPUs") in v4.5 + # + CODE=" + #include + void conftest_pnv_pci_get_npu_dev() { + pnv_pci_get_npu_dev(); + }" + + compile_check_conftest "$CODE" "NV_PNV_PCI_GET_NPU_DEV_PRESENT" "" "functions" + ;; + + kernel_write) + # + # Determine if the function kernel_write() is present. + # + # First exported by commit 7bb307e894d5 ("export kernel_write(), + # convert open-coded instances") in v3.9 + # + echo "$CONFTEST_PREAMBLE + #include + void conftest_kernel_write(void) { + kernel_write(); + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#undef NV_KERNEL_WRITE_PRESENT" | append_conftest "function" + rm -f conftest$$.o + else + echo "#define NV_KERNEL_WRITE_PRESENT" | append_conftest "function" + + # + # Determine the pos argument type, which was changed by + # commit e13ec939e96b1 (fs: fix kernel_write prototype) on + # 9/1/2017. + # + echo "$CONFTEST_PREAMBLE + #include + ssize_t kernel_write(struct file *file, const void *buf, + size_t count, loff_t *pos) + { + return 0; + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_KERNEL_WRITE_HAS_POINTER_POS_ARG" | append_conftest "function" + rm -f conftest$$.o + else + echo "#undef NV_KERNEL_WRITE_HAS_POINTER_POS_ARG" | append_conftest "function" + fi + fi + ;; + + kernel_read_has_pointer_pos_arg) + # + # Determine the pos argument type, which was changed by + # commit bdd1d2d3d251c (fs: fix kernel_read prototype) on + # 9/1/2017. + # + echo "$CONFTEST_PREAMBLE + #include + ssize_t kernel_read(struct file *file, void *buf, size_t count, + loff_t *pos) + { + return 0; + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_KERNEL_READ_HAS_POINTER_POS_ARG" | append_conftest "function" + rm -f conftest$$.o + else + echo "#undef NV_KERNEL_READ_HAS_POINTER_POS_ARG" | append_conftest "function" + fi + ;; + + vm_insert_pfn_prot) + # + # Determine if vm_insert_pfn_prot function is present + # + # Added by commit 1745cbc5d0de ("mm: Add vm_insert_pfn_prot()") in + # v3.16.59 + # + # Removed by commit f5e6d1d5f8f3 ("mm: introduce + # vmf_insert_pfn_prot()") in v4.20. + # + CODE=" + #include + void conftest_vm_insert_pfn_prot() { + vm_insert_pfn_prot(); + }" + + compile_check_conftest "$CODE" "NV_VM_INSERT_PFN_PROT_PRESENT" "" "functions" + ;; + + vmf_insert_pfn_prot) + # + # Determine if vmf_insert_pfn_prot function is present + # + # Added by commit f5e6d1d5f8f3 ("mm: introduce + # vmf_insert_pfn_prot()") in v4.20. + # + CODE=" + #include + void conftest_vmf_insert_pfn_prot() { + vmf_insert_pfn_prot(); + }" + + compile_check_conftest "$CODE" "NV_VMF_INSERT_PFN_PROT_PRESENT" "" "functions" + ;; + + drm_atomic_available) + # + # Determine if the DRM atomic modesetting subsystem is usable + # + # Added by commit 036ef5733ba4 + # ("drm/atomic: Allow drivers to subclass drm_atomic_state, v3") in + # v4.2 (2018-05-18). + # + # Make conftest more robust by adding test for + # drm_atomic_set_mode_prop_for_crtc(), this function added by + # commit 955f3c334f0f ("drm/atomic: Add MODE_ID property") in v4.2 + # (2015-05-25). If the DRM atomic modesetting subsystem is + # back ported to Linux kernel older than v4.2, then commit + # 955f3c334f0f must be back ported in order to get NVIDIA-DRM KMS + # support. + # Commit 72fdb40c1a4b ("drm: extract drm_atomic_uapi.c") in v4.20 + # (2018-09-05), moved drm_atomic_set_mode_prop_for_crtc() function + # prototype from drm/drm_atomic.h to drm/drm_atomic_uapi.h. + # + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #include + #if !defined(CONFIG_DRM) && !defined(CONFIG_DRM_MODULE) + #error DRM not enabled + #endif + void conftest_drm_atomic_modeset_available(void) { + size_t a; + + a = offsetof(struct drm_mode_config_funcs, atomic_state_alloc); + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #include + #if defined(NV_DRM_DRM_ATOMIC_UAPI_H_PRESENT) + #include + #endif + void conftest_drm_atomic_set_mode_prop_for_crtc(void) { + drm_atomic_set_mode_prop_for_crtc(); + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#undef NV_DRM_ATOMIC_MODESET_AVAILABLE" | append_conftest "generic" + else + echo "#define NV_DRM_ATOMIC_MODESET_AVAILABLE" | append_conftest "generic" + fi + else + echo "#undef NV_DRM_ATOMIC_MODESET_AVAILABLE" | append_conftest "generic" + fi + ;; + + drm_bus_present) + # + # Determine if the 'struct drm_bus' type is present. + # + # Added by commit 8410ea3b95d1 ("drm: rework PCI/platform driver + # interface.") in v2.6.39 (2010-12-15) + # + # Removed by commit c5786fe5f1c5 ("drm: Goody bye, drm_bus!") + # in v3.18 (2014-08-29) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + void conftest_drm_bus_present(void) { + struct drm_bus bus; + }" + + compile_check_conftest "$CODE" "NV_DRM_BUS_PRESENT" "" "types" + ;; + + drm_bus_has_bus_type) + # + # Determine if the 'drm_bus' structure has a 'bus_type' field. + # + # Added by commit 8410ea3b95d1 ("drm: rework PCI/platform driver + # interface.") in v2.6.39 (2010-12-15) + # + # Removed by commit 42b21049fc26 ("drm: kill drm_bus->bus_type") + # in v3.16 (2013-11-03) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + int conftest_drm_bus_has_bus_type(void) { + return offsetof(struct drm_bus, bus_type); + }" + + compile_check_conftest "$CODE" "NV_DRM_BUS_HAS_BUS_TYPE" "" "types" + ;; + + drm_bus_has_get_irq) + # + # Determine if the 'drm_bus' structure has a 'get_irq' field. + # + # Added by commit 8410ea3b95d1 ("drm: rework PCI/platform + # driver interface.") in v2.6.39 (2010-12-15) + # + # Removed by commit b2a21aa25a39 ("drm: remove bus->get_irq + # implementations") in v3.16 (2013-11-03) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + int conftest_drm_bus_has_get_irq(void) { + return offsetof(struct drm_bus, get_irq); + }" + + compile_check_conftest "$CODE" "NV_DRM_BUS_HAS_GET_IRQ" "" "types" + ;; + + drm_bus_has_get_name) + # + # Determine if the 'drm_bus' structure has a 'get_name' field. + # + # Added by commit 8410ea3b95d1 ("drm: rework PCI/platform driver + # interface.") in v2.6.39 (2010-12-15) + # + # removed by commit 9de1b51f1fae ("drm: remove drm_bus->get_name") + # in v3.16 (2013-11-03) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + int conftest_drm_bus_has_get_name(void) { + return offsetof(struct drm_bus, get_name); + }" + + compile_check_conftest "$CODE" "NV_DRM_BUS_HAS_GET_NAME" "" "types" + ;; + + drm_driver_has_device_list) + # + # Determine if the 'drm_driver' structure has a 'device_list' field. + # + # Renamed from device_list to legacy_device_list by commit + # b3f2333de8e8 ("drm: restrict the device list for shadow + # attached drivers") in v3.14 (2013-12-11) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + int conftest_drm_driver_has_device_list(void) { + return offsetof(struct drm_driver, device_list); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_DEVICE_LIST" "" "types" + ;; + + + drm_driver_has_legacy_dev_list) + # + # Determine if the 'drm_driver' structure has a 'legacy_dev_list' field. + # + # Renamed from device_list to legacy_device_list by commit + # b3f2333de8e8 ("drm: restrict the device list for shadow + # attached drivers") in v3.14 (2013-12-11) + # + # The commit 57bb1ee60340 ("drm: Compile out legacy chunks from + # struct drm_device") compiles out the legacy chunks like + # drm_driver::legacy_dev_list. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + int conftest_drm_driver_has_legacy_dev_list(void) { + return offsetof(struct drm_driver, legacy_dev_list); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_LEGACY_DEV_LIST" "" "types" + ;; + + jiffies_to_timespec) + # + # Determine if jiffies_to_timespec() is present + # + # removed by commit 751addac78b6 + # ("y2038: remove obsolete jiffies conversion functions") + # in v5.6-rc1 (2019-12-13). + CODE=" + #include + void conftest_jiffies_to_timespec(void){ + jiffies_to_timespec(); + }" + compile_check_conftest "$CODE" "NV_JIFFIES_TO_TIMESPEC_PRESENT" "" "functions" + ;; + + drm_init_function_args) + # + # Determine if these functions: + # drm_universal_plane_init() + # drm_crtc_init_with_planes() + # drm_encoder_init() + # have a 'name' argument, which was added by these commits: + # drm_universal_plane_init: 2015-12-09 b0b3b7951114315d65398c27648705ca1c322faa + # drm_crtc_init_with_planes: 2015-12-09 f98828769c8838f526703ef180b3088a714af2f9 + # drm_encoder_init: 2015-12-09 13a3d91f17a5f7ed2acd275d18b6acfdb131fb15 + # + # Additionally determine whether drm_universal_plane_init() has a + # 'format_modifiers' argument, which was added by: + # 2017-07-23 e6fc3b68558e4c6d8d160b5daf2511b99afa8814 + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_CRTC_H_PRESENT) + #include + #endif + + int conftest_drm_crtc_init_with_planes_has_name_arg(void) { + return + drm_crtc_init_with_planes( + NULL, /* struct drm_device *dev */ + NULL, /* struct drm_crtc *crtc */ + NULL, /* struct drm_plane *primary */ + NULL, /* struct drm_plane *cursor */ + NULL, /* const struct drm_crtc_funcs *funcs */ + NULL); /* const char *name */ + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_INIT_WITH_PLANES_HAS_NAME_ARG" "" "types" + + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_ENCODER_H_PRESENT) + #include + #endif + + int conftest_drm_encoder_init_has_name_arg(void) { + return + drm_encoder_init( + NULL, /* struct drm_device *dev */ + NULL, /* struct drm_encoder *encoder */ + NULL, /* const struct drm_encoder_funcs *funcs */ + DRM_MODE_ENCODER_NONE, /* int encoder_type */ + NULL); /* const char *name */ + }" + + compile_check_conftest "$CODE" "NV_DRM_ENCODER_INIT_HAS_NAME_ARG" "" "types" + + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_PLANE_H_PRESENT) + #include + #endif + + int conftest_drm_universal_plane_init_has_format_modifiers_arg(void) { + return + drm_universal_plane_init( + NULL, /* struct drm_device *dev */ + NULL, /* struct drm_plane *plane */ + 0, /* unsigned long possible_crtcs */ + NULL, /* const struct drm_plane_funcs *funcs */ + NULL, /* const uint32_t *formats */ + 0, /* unsigned int format_count */ + NULL, /* const uint64_t *format_modifiers */ + DRM_PLANE_TYPE_PRIMARY, + NULL); /* const char *name */ + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + + echo "#define NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG" | append_conftest "types" + echo "#define NV_DRM_UNIVERSAL_PLANE_INIT_HAS_NAME_ARG" | append_conftest "types" + else + echo "#undef NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG" | append_conftest "types" + + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_PLANE_H_PRESENT) + #include + #endif + + int conftest_drm_universal_plane_init_has_name_arg(void) { + return + drm_universal_plane_init( + NULL, /* struct drm_device *dev */ + NULL, /* struct drm_plane *plane */ + 0, /* unsigned long possible_crtcs */ + NULL, /* const struct drm_plane_funcs *funcs */ + NULL, /* const uint32_t *formats */ + 0, /* unsigned int format_count */ + DRM_PLANE_TYPE_PRIMARY, + NULL); /* const char *name */ + }" + + compile_check_conftest "$CODE" "NV_DRM_UNIVERSAL_PLANE_INIT_HAS_NAME_ARG" "" "types" + fi + ;; + + vzalloc) + # + # Determine if the vzalloc function is present + # + # Added by commit e1ca7788dec6 ("mm: add vzalloc() and + # vzalloc_node() helpers") in v2.6.37 (2010-10-26) + # + CODE=" + #include + void conftest_vzalloc() { + vzalloc(); + }" + + compile_check_conftest "$CODE" "NV_VZALLOC_PRESENT" "" "functions" + ;; + + drm_driver_has_set_busid) + # + # Determine if the drm_driver structure has a 'set_busid' callback + # field. + # + # Added by commit 915b4d11b8b9 ("drm: add driver->set_busid() + # callback") in v3.18 (2014-08-29) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + int conftest_drm_driver_has_set_busid(void) { + return offsetof(struct drm_driver, set_busid); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_SET_BUSID" "" "types" + ;; + + drm_driver_has_gem_prime_res_obj) + # + # Determine if the drm_driver structure has a 'gem_prime_res_obj' + # callback field. + # + # Added by commit 3aac4502fd3f ("dma-buf: use reservation + # objects") in v3.17 (2014-07-01). + # + # Removed by commit 51c98747113e (drm/prime: Ditch + # gem_prime_res_obj hook) in v5.4. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + int conftest_drm_driver_has_gem_prime_res_obj(void) { + return offsetof(struct drm_driver, gem_prime_res_obj); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ" "" "types" + ;; + + drm_crtc_state_has_connectors_changed) + # + # Determine if the crtc_state has a 'connectors_changed' field. + # + # Added by commit fc596660dd4e ("drm/atomic: add + # connectors_changed to separate it from mode_changed, v2") + # in v4.3 (2015-07-21) + # + CODE=" + #include + void conftest_drm_crtc_state_has_connectors_changed(void) { + struct drm_crtc_state foo; + (void)foo.connectors_changed; + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_STATE_HAS_CONNECTORS_CHANGED" "" "types" + ;; + + drm_reinit_primary_mode_group) + # + # Determine if the function drm_reinit_primary_mode_group() is + # present. + # + # Added by commit 2390cd11bfbe ("drm/crtc: add interface to + # reinitialise the legacy mode group") in v3.17 (2014-06-05) + # + # Removed by commit 3fdefa399e46 ("drm: gc now dead + # mode_group code") in v4.3 (2015-07-09) + # + CODE=" + #if defined(NV_DRM_DRM_CRTC_H_PRESENT) + #include + #endif + void conftest_drm_reinit_primary_mode_group(void) { + drm_reinit_primary_mode_group(); + }" + + compile_check_conftest "$CODE" "NV_DRM_REINIT_PRIMARY_MODE_GROUP_PRESENT" "" "functions" + ;; + + wait_on_bit_lock_argument_count) + # + # Determine how many arguments wait_on_bit_lock takes. + # + # Changed by commit 743162013d40 ("sched: Remove proliferation + # of wait_on_bit() action functions") in v3.17 (2014-07-07) + # + echo "$CONFTEST_PREAMBLE + #include + void conftest_wait_on_bit_lock(void) { + wait_on_bit_lock(NULL, 0, 0); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_WAIT_ON_BIT_LOCK_ARGUMENT_COUNT 3" | append_conftest "functions" + return + fi + + echo "$CONFTEST_PREAMBLE + #include + void conftest_wait_on_bit_lock(void) { + wait_on_bit_lock(NULL, 0, NULL, 0); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_WAIT_ON_BIT_LOCK_ARGUMENT_COUNT 4" | append_conftest "functions" + return + fi + echo "#error wait_on_bit_lock() conftest failed!" | append_conftest "functions" + ;; + + bitmap_clear) + # + # Determine if the bitmap_clear function is present + # + # Added by commit c1a2a962a2ad ("bitmap: introduce bitmap_set, + # bitmap_clear, bitmap_find_next_zero_area") in v2.6.33 + # (2009-12-15) + # + CODE=" + #include + void conftest_bitmap_clear() { + bitmap_clear(); + }" + + compile_check_conftest "$CODE" "NV_BITMAP_CLEAR_PRESENT" "" "functions" + ;; + + pci_stop_and_remove_bus_device) + # + # Determine if the pci_stop_and_remove_bus_device() function is present. + # + # Added by commit 210647af897a ("PCI: Rename pci_remove_bus_device + # to pci_stop_and_remove_bus_device") in v3.4 (2012-02-25) + # + CODE=" + #include + #include + void conftest_pci_stop_and_remove_bus_device() { + pci_stop_and_remove_bus_device(); + }" + + compile_check_conftest "$CODE" "NV_PCI_STOP_AND_REMOVE_BUS_DEVICE_PRESENT" "" "functions" + ;; + + pci_remove_bus_device) + # + # Determine if the pci_remove_bus_device() function is present. + # Added before Linux-2.6.12-rc2 2005-04-16 + # Because we support builds on non-PCI platforms, we still need + # to check for this function's presence. + # + CODE=" + #include + #include + void conftest_pci_remove_bus_device() { + pci_remove_bus_device(); + }" + + compile_check_conftest "$CODE" "NV_PCI_REMOVE_BUS_DEVICE_PRESENT" "" "functions" + ;; + + drm_helper_mode_fill_fb_struct | drm_helper_mode_fill_fb_struct_has_const_mode_cmd_arg) + # + # Determine if the drm_helper_mode_fill_fb_struct function takes + # 'dev' argument. + # + # The drm_helper_mode_fill_fb_struct() has been updated to + # take 'dev' parameter by commit a3f913ca9892 ("drm: Pass 'dev' + # to drm_helper_mode_fill_fb_struct()") in v4.11 (2016-12-14) + # + echo "$CONFTEST_PREAMBLE + #include + void drm_helper_mode_fill_fb_struct(struct drm_device *dev, + struct drm_framebuffer *fb, + const struct drm_mode_fb_cmd2 *mode_cmd) + { + return; + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_DEV_ARG" | append_conftest "function" + echo "#define NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG" | append_conftest "function" + rm -f conftest$$.o + else + echo "#undef NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_DEV_ARG" | append_conftest "function" + + # + # Determine if the drm_mode_fb_cmd2 pointer argument is const in + # drm_mode_config_funcs::fb_create and drm_helper_mode_fill_fb_struct(). + # + # The drm_mode_fb_cmd2 pointer through this call chain was made + # const by commit 1eb83451ba55 ("drm: Pass the user drm_mode_fb_cmd2 + # as const to .fb_create()") in v4.5 (2015-11-11) + # + echo "$CONFTEST_PREAMBLE + #include + void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, + const struct drm_mode_fb_cmd2 *mode_cmd) + { + return; + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG" | append_conftest "function" + rm -f conftest$$.o + else + echo "#undef NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG" | append_conftest "function" + fi + fi + ;; + + mm_context_t) + # + # Determine if the 'mm_context_t' data type is present + # and if it has an 'id' member. + # It does not exist on all architectures. + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_mm_context_t(void) { + return offsetof(mm_context_t, id); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_MM_CONTEXT_T_HAS_ID" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_MM_CONTEXT_T_HAS_ID" | append_conftest "types" + return + fi + ;; + + pci_dev_has_ats_enabled) + # + # Determine if the 'pci_dev' data type has a 'ats_enabled' member. + # + # Added by commit d544d75ac96aa ("PCI: Embed ATS info directly + # into struct pci_dev") in v4.3-rc1 (2015-08-14) + # + CODE=" + #include + int conftest_pci_dev_ats_enabled_t(void) { + return ((struct pci_dev *)0)->ats_enabled; + }" + + compile_check_conftest "$CODE" "NV_PCI_DEV_HAS_ATS_ENABLED" "" "types" + ;; + + mt_device_gre) + # + # Determine if MT_DEVICE_GRE flag is present. + # + # MT_DEVICE_GRE flag is removed by commit 58cc6b72a21274 + # ("arm64: mm: Remove unused support for Device-GRE memory type") in v5.14-rc1 + # (2021-06-01). + # + CODE=" + #include + unsigned int conftest_mt_device_gre(void) { + return MT_DEVICE_GRE; + }" + + compile_check_conftest "$CODE" "NV_MT_DEVICE_GRE_PRESENT" "" "types" + ;; + + get_user_pages) + # + # Conftest for get_user_pages() + # + # Use long type for get_user_pages and unsigned long for nr_pages + # by commit 28a35716d317 ("mm: use long type for page counts + # in mm_populate() and get_user_pages()") in v3.9 (2013-02-22) + # + # Removed struct task_struct *tsk & struct mm_struct *mm from + # get_user_pages by commit cde70140fed8 ("mm/gup: Overload + # get_user_pages() functions") in v4.6 (2016-02-12) + # + # Replaced get_user_pages6 with get_user_pages by commit + # c12d2da56d0e ("mm/gup: Remove the macro overload API migration + # helpers from the get_user*() APIs") in v4.6 (2016-04-04) + # + # Replaced write and force parameters with gup_flags by + # commit 768ae309a961 ("mm: replace get_user_pages() write/force + # parameters with gup_flags") in v4.9 (2016-10-13) + # + # linux-4.4.168 cherry-picked commit 768ae309a961 without + # c12d2da56d0e which is covered in Conftest #3. + # + + # + # This function sets the NV_GET_USER_PAGES_* macros as per the below + # passing conftest's + # + set_get_user_pages_defines () { + if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE" ]; then + echo "#define NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE" | append_conftest "functions" + else + echo "#undef NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE" | append_conftest "functions" + fi + + if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE" ]; then + echo "#define NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE" | append_conftest "functions" + else + echo "#undef NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE" | append_conftest "functions" + fi + + if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS" ]; then + echo "#define NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS" | append_conftest "functions" + else + echo "#undef NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS" | append_conftest "functions" + fi + + if [ "$1" = "NV_GET_USER_PAGES_HAS_ARGS_FLAGS" ]; then + echo "#define NV_GET_USER_PAGES_HAS_ARGS_FLAGS" | append_conftest "functions" + else + echo "#undef NV_GET_USER_PAGES_HAS_ARGS_FLAGS" | append_conftest "functions" + fi + } + + # Conftest #1: Check if get_user_pages accepts 6 arguments. + # Return if true. + # Fall through to conftest #2 on failure. + + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages(unsigned long start, + unsigned long nr_pages, + int write, + int force, + struct page **pages, + struct vm_area_struct **vmas) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + if [ -f conftest$$.o ]; then + set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE" + rm -f conftest$$.o + return + fi + + # Conftest #2: Check if get_user_pages has gup_flags instead of + # write and force parameters. And that gup doesn't accept a + # task_struct and mm_struct as its first arguments. + # Return if available. + # Fall through to conftest #3 on failure. + + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages(unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_FLAGS" + rm -f conftest$$.o + return + fi + + # Conftest #3: Check if get_user_pages has gup_flags instead of + # write and force parameters AND that gup has task_struct and + # mm_struct as its first arguments. + # Return if available. + # Fall through to default case if absent. + + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages(struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS" + rm -f conftest$$.o + return + fi + + set_get_user_pages_defines "NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE" + + return + ;; + + get_user_pages_remote) + # + # Determine if the function get_user_pages_remote() is + # present and has write/force/locked/tsk parameters. + # + # get_user_pages_remote() was added by commit 1e9877902dc7 + # ("mm/gup: Introduce get_user_pages_remote()") in v4.6 (2016-02-12) + # + # get_user_pages[_remote]() write/force parameters + # replaced with gup_flags by commits 768ae309a961 ("mm: replace + # get_user_pages() write/force parameters with gup_flags") and + # commit 9beae1ea8930 ("mm: replace get_user_pages_remote() + # write/force parameters with gup_flags") in v4.9 (2016-10-13) + # + # get_user_pages_remote() added 'locked' parameter by + # commit 5b56d49fc31d ("mm: add locked parameter to + # get_user_pages_remote()") in v4.10 (2016-12-14) + # + # get_user_pages_remote() removed 'tsk' parameter by + # commit 64019a2e467a ("mm/gup: remove task_struct pointer for + # all gup code") in v5.9-rc1 (2020-08-11). + # + + # + # This function sets the NV_GET_USER_PAGES_REMOTE_* macros as per + # the below passing conftest's + # + set_get_user_pages_remote_defines () { + if [ "$1" = "" ]; then + echo "#undef NV_GET_USER_PAGES_REMOTE_PRESENT" | append_conftest "functions" + else + echo "#define NV_GET_USER_PAGES_REMOTE_PRESENT" | append_conftest "functions" + fi + + if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE" ]; then + echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE" | append_conftest "functions" + else + echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE" | append_conftest "functions" + fi + + if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS" ]; then + echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS" | append_conftest "functions" + else + echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS" | append_conftest "functions" + fi + + if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED" ]; then + echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED" | append_conftest "functions" + else + echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED" | append_conftest "functions" + fi + + if [ "$1" = "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED" ]; then + echo "#define NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED" | append_conftest "functions" + else + echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED" | append_conftest "functions" + fi + } + + # conftest #1: check if get_user_pages_remote() is available + # return if not available. + # Fall through to conftest #2 if it is present + + echo "$CONFTEST_PREAMBLE + #include + void conftest_get_user_pages_remote(void) { + get_user_pages_remote(); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_get_user_pages_remote_defines "" + rm -f conftest$$.o + return + fi + + # + # conftest #2: check if get_user_pages_remote() has write and + # force arguments. Return if these arguments are present + # Fall through to conftest #3 if these args are absent. + # + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages_remote(struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + int write, + int force, + struct page **pages, + struct vm_area_struct **vmas) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE" + rm -f conftest$$.o + return + fi + + # + # conftest #3: check if get_user_pages_remote() has gpu_flags + # arguments. Return if these arguments are present + # Fall through to conftest #4 if these args are absent. + # + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages_remote(struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int gpu_flags, + struct page **pages, + struct vm_area_struct **vmas) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS" + rm -f conftest$$.o + return + fi + + # + # conftest #4: check if get_user_pages_remote() has locked argument + # Return if these arguments are present. Fall through to conftest #5 + # if these args are absent. + # + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages_remote(struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas, + int *locked) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED" + rm -f conftest$$.o + return + fi + + # + # conftest #5: check if get_user_pages_remote() does not take + # tsk argument. + # + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages_remote(struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas, + int *locked) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_get_user_pages_remote_defines "NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED" + rm -f conftest$$.o + fi + ;; + + pin_user_pages) + # + # Determine if the function pin_user_pages() is present. + # Presence of pin_user_pages() also implies the presence of + # unpin-user_page(). Both were added in the v5.6-rc1 + # + # pin_user_pages() was added by commit eddb1c228f7951d399240 + # ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in + # v5.6-rc1 (2020-01-30) + + # conftest #1: check if pin_user_pages() is available + # return if not available. + # + CODE=" + #include + void conftest_pin_user_pages(void) { + pin_user_pages(); + }" + + compile_check_conftest "$CODE" "NV_PIN_USER_PAGES_PRESENT" "" "functions" + ;; + + pin_user_pages_remote) + # Determine if the function pin_user_pages_remote() is present + # + # pin_user_pages_remote() was added by commit eddb1c228f7951d399240 + # ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") + # in v5.6 (2020-01-30) + + # pin_user_pages_remote() removed 'tsk' parameter by + # commit 64019a2e467a ("mm/gup: remove task_struct pointer for + # all gup code") in v5.9-rc1 (2020-08-11). + + # + # This function sets the NV_PIN_USER_PAGES_REMOTE_* macros as per + # the below passing conftest's + # + set_pin_user_pages_remote_defines () { + if [ "$1" = "" ]; then + echo "#undef NV_PIN_USER_PAGES_REMOTE_PRESENT" | append_conftest "functions" + else + echo "#define NV_PIN_USER_PAGES_REMOTE_PRESENT" | append_conftest "functions" + fi + + if [ "$1" = "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK" ]; then + echo "#define NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK" | append_conftest "functions" + else + echo "#undef NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK" | append_conftest "functions" + fi + } + + # conftest #1: check if pin_user_pages_remote() is available + # return if not available. + # Fall through to conftest #2 if it is present + # + echo "$CONFTEST_PREAMBLE + #include + void conftest_pin_user_pages_remote(void) { + pin_user_pages_remote(); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_pin_user_pages_remote_defines "" + rm -f conftest$$.o + return + fi + + # conftest #2: Check if pin_user_pages_remote() has tsk argument + echo "$CONFTEST_PREAMBLE + #include + long pin_user_pages_remote(struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas, + int *locked) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + set_pin_user_pages_remote_defines "NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK" + rm -f conftest$$.o + else + set_pin_user_pages_remote_defines "NV_PIN_USER_PAGES_REMOTE_PRESENT" + fi + ;; + + usleep_range) + # + # Determine if the function usleep_range() is present. + # + # Added by commit 5e7f5a178bba ("timer: Added usleep_range timer") + # in v2.6.36 (2010-08-04) + # + CODE=" + #include + void conftest_usleep_range(void) { + usleep_range(); + }" + + compile_check_conftest "$CODE" "NV_USLEEP_RANGE_PRESENT" "" "functions" + ;; + + radix_tree_empty) + # + # Determine if the function radix_tree_empty() is present. + # + # Added by commit e9256efcc8e3 ("radix-tree: introduce + # radix_tree_empty") in v4.7 (2016-05-20) + # + CODE=" + #include + int conftest_radix_tree_empty(void) { + radix_tree_empty(); + }" + + compile_check_conftest "$CODE" "NV_RADIX_TREE_EMPTY_PRESENT" "" "functions" + ;; + + drm_gem_object_lookup) + # + # Determine the number of arguments of drm_gem_object_lookup(). + # + # First argument of type drm_device removed by commit + # a8ad0bd84f98 ("drm: Remove unused drm_device from + # drm_gem_object_lookup()") in v4.7 (2016-05-09) + # + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #if defined(NV_DRM_DRM_GEM_H_PRESENT) + #include + #endif + void conftest_drm_gem_object_lookup(void) { + drm_gem_object_lookup(NULL, NULL, 0); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT 3" | append_conftest "functions" + rm -f conftest$$.o + return + else + echo "#define NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT 2" | append_conftest "functions" + fi + ;; + + drm_master_drop_has_from_release_arg) + # + # Determine if drm_driver::master_drop() has 'from_release' argument. + # + # Last argument 'bool from_release' has been removed by commit + # d6ed682eba54 ("drm: Refactor drop/set master code a bit") + # in v4.8 (2016-06-21) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + void conftest_drm_master_drop_has_from_release_arg(struct drm_driver *drv) { + drv->master_drop(NULL, NULL, false); + }" + + compile_check_conftest "$CODE" "NV_DRM_MASTER_DROP_HAS_FROM_RELEASE_ARG" "" "types" + ;; + + drm_atomic_state_ref_counting) + # + # Determine if functions drm_atomic_state_get/put() are + # present. + # + # Added by commit 0853695c3ba4 ("drm: Add reference counting to + # drm_atomic_state") in v4.10 (2016-10-14) + # + CODE=" + #if defined(NV_DRM_DRM_ATOMIC_H_PRESENT) + #include + #endif + void conftest_drm_atomic_state_get(void) { + drm_atomic_state_get(); + }" + + compile_check_conftest "$CODE" "NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT" "" "functions" + ;; + + vm_ops_fault_removed_vma_arg) + # + # Determine if vma.vm_ops.fault takes (vma, vmf), or just (vmf) + # args. Acronym key: + # vma: struct vm_area_struct + # vm_ops: struct vm_operations_struct + # vmf: struct vm_fault + # + # The redundant vma arg was removed from BOTH vma.vm_ops.fault and + # vma.vm_ops.page_mkwrite by commit 11bac8000449 ("mm, fs: reduce + # fault, page_mkwrite, and pfn_mkwrite to take only vmf") in + # v4.11 (2017-02-24) + # + CODE=" + #include + void conftest_vm_ops_fault_removed_vma_arg(void) { + struct vm_operations_struct vm_ops; + struct vm_fault *vmf; + (void)vm_ops.fault(vmf); + }" + + compile_check_conftest "$CODE" "NV_VM_OPS_FAULT_REMOVED_VMA_ARG" "" "types" + ;; + + pnv_npu2_init_context) + # + # Determine if the pnv_npu2_init_context() function is + # present and the signature of its callback. + # + # Added by commit 1ab66d1fbada ("powerpc/powernv: Introduce + # address translation services for Nvlink2") in v4.12 + # (2017-04-03). + # + echo "$CONFTEST_PREAMBLE + #if defined(NV_ASM_POWERNV_H_PRESENT) + #include + #include + #endif + void conftest_pnv_npu2_init_context(void) { + pnv_npu2_init_context(); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + if [ -f conftest$$.o ]; then + echo "#undef NV_PNV_NPU2_INIT_CONTEXT_PRESENT" | append_conftest "functions" + echo "#undef NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID" | append_conftest "functions" + rm -f conftest$$.o + return + fi + + echo "#define NV_PNV_NPU2_INIT_CONTEXT_PRESENT" | append_conftest "functions" + + # Check the callback signature + echo "$CONFTEST_PREAMBLE + #if defined(NV_ASM_POWERNV_H_PRESENT) + #include + #include + #endif + + struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, + unsigned long flags, + void (*cb)(struct npu_context *, void *), + void *priv) { + return NULL; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + if [ -f conftest$$.o ]; then + echo "#define NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID" | append_conftest "functions" + rm -f conftest$$.o + return + fi + + echo "#undef NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID" | append_conftest "functions" + ;; + + of_get_ibm_chip_id) + # + # Determine if the of_get_ibm_chip_id() function is present. + # + # Added by commit b130e7c04f11 ("powerpc: export + # of_get_ibm_chip_id function") in v4.2 (2015-05-07) + # + CODE=" + #include + #if defined(NV_ASM_PROM_H_PRESENT) + #include + #endif + void conftest_of_get_ibm_chip_id(void) { + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) + of_get_ibm_chip_id(); + #endif + }" + + compile_check_conftest "$CODE" "NV_OF_GET_IBM_CHIP_ID_PRESENT" "" "functions" + ;; + + drm_driver_unload_has_int_return_type) + # + # Determine if drm_driver::unload() returns integer value + # + # Changed to void by commit 11b3c20bdd15 ("drm: Change the return + # type of the unload hook to void") in v4.11 (2017-01-06) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + int conftest_drm_driver_unload_has_int_return_type(struct drm_driver *drv) { + return drv->unload(NULL /* dev */); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_UNLOAD_HAS_INT_RETURN_TYPE" "" "types" + ;; + + is_export_symbol_present_*) + export_symbol_present_conftest $(echo $1 | cut -f5- -d_) + ;; + + is_export_symbol_gpl_*) + export_symbol_gpl_conftest $(echo $1 | cut -f5- -d_) + ;; + + drm_atomic_helper_crtc_destroy_state_has_crtc_arg) + # + # Determine if __drm_atomic_helper_crtc_destroy_state() has 'crtc' + # argument. + # + # 'crtc' argument removed by commit ec2dc6a0fe38 ("drm: Drop crtc + # argument from __drm_atomic_helper_crtc_destroy_state") in v4.7 + # (2016-05-09) + # + CODE=" + #if defined(NV_DRM_DRM_ATOMIC_HELPER_H_PRESENT) + #include + #endif + void conftest_drm_atomic_helper_crtc_destroy_state_has_crtc_arg(void) { + __drm_atomic_helper_crtc_destroy_state(NULL, NULL); + }" + + compile_check_conftest "$CODE" "NV_DRM_ATOMIC_HELPER_CRTC_DESTROY_STATE_HAS_CRTC_ARG" "" "types" + ;; + + drm_atomic_helper_plane_destroy_state_has_plane_arg) + # + # Determine if __drm_atomic_helper_plane_destroy_state has + # 'plane' argument. + # + # 'plane' argument removed by commit 2f701695fd3a (drm: Drop plane + # argument from __drm_atomic_helper_plane_destroy_state") in v4.7 + # (2016-05-09) + # + CODE=" + #if defined(NV_DRM_DRM_ATOMIC_HELPER_H_PRESENT) + #include + #endif + void conftest_drm_atomic_helper_plane_destroy_state_has_plane_arg(void) { + __drm_atomic_helper_plane_destroy_state(NULL, NULL); + }" + + compile_check_conftest "$CODE" "NV_DRM_ATOMIC_HELPER_PLANE_DESTROY_STATE_HAS_PLANE_ARG" "" "types" + ;; + + drm_atomic_helper_connector_dpms) + # + # Determine if the function drm_atomic_helper_connector_dpms() is present. + # + # Removed by commit 7d902c05b480 ("drm: Nuke + # drm_atomic_helper_connector_dpms") in v4.14 (2017-07-25) + # + CODE=" + #if defined(NV_DRM_DRM_ATOMIC_HELPER_H_PRESENT) + #include + #endif + void conftest_drm_atomic_helper_connector_dpms(void) { + drm_atomic_helper_connector_dpms(); + }" + + compile_check_conftest "$CODE" "NV_DRM_ATOMIC_HELPER_CONNECTOR_DPMS_PRESENT" "" "functions" + ;; + + get_backlight_device_by_name) + # + # Determine if the get_backlight_device_by_name() function is present + # + CODE=" + #include + int conftest_get_backlight_device_by_name(void) { + return get_backlight_device_by_name(); + }" + compile_check_conftest "$CODE" "NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT" "" "functions" + ;; + + timer_setup) + # + # Determine if the function timer_setup() is present. + # + # Added by commit 686fef928bba ("timer: Prepare to change timer + # callback argument type") in v4.14 (2017-09-28) + # + CODE=" + #include + int conftest_timer_setup(void) { + return timer_setup(); + }" + compile_check_conftest "$CODE" "NV_TIMER_SETUP_PRESENT" "" "functions" + ;; + + radix_tree_replace_slot) + # + # Determine if the radix_tree_replace_slot() function is + # present and how many arguments it takes. + # + # root parameter added to radix_tree_replace_slot (but the symbol + # was not exported) by commit 6d75f366b924 ("lib: radix-tree: + # check accounting of existing slot replacement users") in v4.10 + # (2016-12-12) + # + # radix_tree_replace_slot symbol export added by commit + # 10257d719686 ("EXPORT_SYMBOL radix_tree_replace_slot") in v4.11 + # (2017-01-11) + # + CODE=" + #include + #include + void conftest_radix_tree_replace_slot(void) { + #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + radix_tree_replace_slot(); + #endif + }" + compile_check_conftest "$CODE" "NV_RADIX_TREE_REPLACE_SLOT_PRESENT" "" "functions" + + echo "$CONFTEST_PREAMBLE + #include + void conftest_radix_tree_replace_slot(void) { + radix_tree_replace_slot(NULL, NULL); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_RADIX_TREE_REPLACE_SLOT_ARGUMENT_COUNT 2" | append_conftest "functions" + return + fi + + echo "$CONFTEST_PREAMBLE + #include + void conftest_radix_tree_replace_slot(void) { + radix_tree_replace_slot(NULL, NULL, NULL); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_RADIX_TREE_REPLACE_SLOT_ARGUMENT_COUNT 3" | append_conftest "functions" + return + else + echo "#error radix_tree_replace_slot() conftest failed!" | append_conftest "functions" + fi + ;; + + kthread_create_on_node) + # + # Determine if kthread_create_on_node is available + # + # kthread_create_on_node was added in by commit 207205a2ba26 + # ("kthread: NUMA aware kthread_create_on_node()") in v2.6.39 + # (2011-03-22). + # + CODE=" + #include + void kthread_create_on_node_conftest(void) { + (void)kthread_create_on_node(); + }" + + compile_check_conftest "$CODE" "NV_KTHREAD_CREATE_ON_NODE_PRESENT" "" "functions" + ;; + + cpumask_of_node) + # + # Determine whether cpumask_of_node is available. + # + # ARM support for cpumask_of_node() lagged until commit 1a2db300348b + # ("arm64, numa: Add NUMA support for arm64 platforms.") in v4.7 + # (2016-04-08) + # + CODE=" + #include + void conftest_cpumask_of_node(void) { + (void)cpumask_of_node(); + }" + + compile_check_conftest "$CODE" "NV_CPUMASK_OF_NODE_PRESENT" "" "functions" + ;; + + drm_mode_object_find_has_file_priv_arg) + # + # Determine if drm_mode_object_find() has 'file_priv' arguments. + # + # Updated to take 'file_priv' argument by commit 418da17214ac + # ("drm: Pass struct drm_file * to __drm_mode_object_find [v2]") + # in v4.15 (2017-03-14) + # + CODE=" + #include + void conftest_drm_mode_object_find_has_file_priv_arg( + struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id, + uint32_t type) { + (void)drm_mode_object_find(dev, file_priv, id, type); + }" + + compile_check_conftest "$CODE" "NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG" | append_conftest "types" + ;; + + pci_enable_msix_range) + # + # Determine if the pci_enable_msix_range() function is present. + # + # Added by commit 302a2523c277 ("PCI/MSI: Add + # pci_enable_msi_range() and pci_enable_msix_range()") in v3.14 + # (2013-12-30) + # + CODE=" + #include + void conftest_pci_enable_msix_range(void) { + pci_enable_msix_range(); + }" + + compile_check_conftest "$CODE" "NV_PCI_ENABLE_MSIX_RANGE_PRESENT" "" "functions" + ;; + + dma_buf_owner) + # + # Determine if the dma_buf struct has an owner member. + # + # Added by commit 9abdffe286c1 ("dma-buf: add ref counting for + # module as exporter") in v4.2 (2015-05-05) + # + CODE=" + #include + int conftest_dma_buf_owner(void) { + return offsetof(struct dma_buf, owner); + }" + + compile_check_conftest "$CODE" "NV_DMA_BUF_OWNER_PRESENT" "" "types" + ;; + + dma_buf_export_args) + # + # Determine argument count for dma_buf_export(). + # + # 4 arguments added by commit d15bd7ee445d + # ("dma-buf: Introduce dma buffer sharing mechanism") + # in v3.3 (2011-12-26) + # + # Additional argument added by commit 3aac4502fd3f + # ("dma-buf: use reservation objects") in v3.17 (2014-07-01). + # + # Parameters wrapped in a single struct dma_buf_export_info by commit: + # d8fbe341beb6("dma-buf: cleanup dma_buf_export() to make it easily extensible") + # in v4.1 (2015-01-23). + # + echo "$CONFTEST_PREAMBLE + #include + struct dma_buf* conftest_dma_buf_export(void) { + return dma_buf_export(NULL); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_DMA_BUF_EXPORT_ARGUMENT_COUNT 1" | append_conftest "functions" + return + fi + + echo "$CONFTEST_PREAMBLE + #include + struct dma_buf* conftest_dma_buf_export(void) { + return dma_buf_export(NULL, NULL, 0, 0); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_DMA_BUF_EXPORT_ARGUMENT_COUNT 4" | append_conftest "functions" + return + fi + + echo "$CONFTEST_PREAMBLE + #include + struct dma_buf* conftest_dma_buf_export(void) { + return dma_buf_export(NULL, NULL, 0, 0, NULL); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_DMA_BUF_EXPORT_ARGUMENT_COUNT 5" | append_conftest "functions" + return + fi + echo "#error dma_buf_export() conftest failed!" | append_conftest "functions" + ;; + + dma_buf_ops_has_kmap) + # + # Determine if .kmap exists in dma_buf_ops. + # In some kernels, this is a mandatory callback. + # + # Added by commit fc13020e086b + # ("dma-buf: add support for kernel cpu access") in v3.4 (2012-03-20) + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_dma_buf_ops_has_kmap(void) { + return offsetof(struct dma_buf_ops, kmap); + } + int conftest_dma_buf_ops_has_kunmap(void) { + return offsetof(struct dma_buf_ops, kunmap); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_OPS_HAS_KMAP" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_OPS_HAS_KMAP" | append_conftest "types" + return + fi + ;; + + dma_buf_ops_has_kmap_atomic) + # + # Determine if .kmap_atomic exists in dma_buf_ops. + # In some kernels, this is a mandatory callback. + # + # Added by commit fc13020e086b + # ("dma-buf: add support for kernel cpu access")in v3.4 (2012-03-20) + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_dma_buf_ops_has_kmap_atomic(void) { + return offsetof(struct dma_buf_ops, kmap_atomic); + } + int conftest_dma_buf_ops_has_kunmap_atomic(void) { + return offsetof(struct dma_buf_ops, kunmap_atomic); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC" | append_conftest "types" + return + fi + ;; + + dma_buf_ops_has_map) + # + # Determine if .map exists in dma_buf_ops. + # In some kernels, this is a mandatory callback. + # + # Added by commit f9b67f0014cb + # ("dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic macro") + # in v4.12 (2017-04-19) + # + # Removed as a mandatory callback by commit f82aab2d521e + # ("dma-buf: Remove requirement for ops->map() from dma_buf_export") + # in v4.20 (2018-08-07) + # + # Completely removed from dma-buf by commit 4337ebbbbda3 + # ("dma-buf: Remove kernel map/unmap hooks") in v5.6 (2019-11-18) + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_dma_buf_ops_has_map(void) { + return offsetof(struct dma_buf_ops, map); + } + int conftest_dma_buf_ops_has_unmap(void) { + return offsetof(struct dma_buf_ops, unmap); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_OPS_HAS_MAP" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_OPS_HAS_MAP" | append_conftest "types" + return + fi + ;; + + dma_buf_ops_has_map_atomic) + # + # Determine if map_atomic/unmap_atomic exists in dma_buf_ops. + # In some kernels, this is a mandatory callback. + # + # Added by commit f9b67f0014cb + # ("dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic macro") + # in v4.12 (2017-04-19) + # + # Removed by commit f664a5269542 + # ("dma-buf: remove kmap_atomic interface") in v4.19 (2018-05-28) + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_dma_buf_ops_has_map_atomic(void) { + return offsetof(struct dma_buf_ops, map_atomic); + } + int conftest_dma_buf_ops_has_unmap_atomic(void) { + return offsetof(struct dma_buf_ops, unmap_atomic); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_OPS_HAS_MAP_ATOMIC" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_OPS_HAS_MAP_ATOMIC" | append_conftest "types" + return + fi + ;; + + dma_buf_has_dynamic_attachment) + # + # Determine if the function dma_buf_attachment_is_dynamic() + # is present. + # + # Added by commit: 15fd552d186c + # ("dma-buf: change DMA-buf locking convention v3") in v5.5 (2018-07-03) + # + echo "$CONFTEST_PREAMBLE + #include + bool conftest_dma_buf_attachment_is_dynamic(void) { + return dma_buf_attachment_is_dynamic(NULL); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_HAS_DYNAMIC_ATTACHMENT" | append_conftest "functions" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_HAS_DYNAMIC_ATTACHMENT" | append_conftest "functions" + return + fi + ;; + + dma_buf_attachment_has_peer2peer) + # + # Determine if peer2peer is present in struct dma_buf_attachment. + # peer2peer being true indicates that a dma-buf importer is able + # to handle peer resources not backed by struct page. + # + # Added by commit: 09606b5446c2 + # ("dma-buf: add peer2peer flag") in v5.8 (2018-03-22) + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_dma_buf_peer2peer(void) { + return offsetof(struct dma_buf_attachment, peer2peer); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER" | append_conftest "types" + return + fi + ;; + + drm_connector_funcs_have_mode_in_name) + # + # Determine if _mode_ is present in connector function names. We + # only test drm_mode_connector_attach_encoder() and assume the + # other functions are changed in sync. + # + # drm_mode_connector_attach_encoder() was renamed to + # drm_connector_attach_encoder() by commit cde4c44d8769 ("drm: + # drop _mode_ from drm_mode_connector_attach_encoder") in v4.19 + # (2018-07-09) + # + # drm_mode_connector_update_edid_property() was renamed by commit + # c555f02371c3 ("drm: drop _mode_ from update_edit_property()") + # in v4.19 (2018-07-09). + # + # The other DRM functions were renamed by commit 97e14fbeb53f + # ("drm: drop _mode_ from remaining connector functions") in v4.19 + # (2018-07-09) + # + # Note that drm_connector.h by introduced by commit 522171951761 + # ("drm: Extract drm_connector.[hc]") in v4.9 (2016-08-12) + # + CODE=" + #include + void conftest_drm_connector_funcs_have_mode_in_name(void) { + drm_mode_connector_attach_encoder(); + }" + + compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME" "" "functions" + ;; + + + node_states_n_memory) + # + # Determine if the N_MEMORY constant exists. + # + # Added by commit 8219fc48adb3 ("mm: node_states: introduce + # N_MEMORY") in v3.8 (2012-12-12). + # + CODE=" + #include + int conftest_node_states_n_memory(void) { + return N_MEMORY; + }" + + compile_check_conftest "$CODE" "NV_NODE_STATES_N_MEMORY_PRESENT" "" "types" + ;; + + vm_fault_t) + # + # Determine if vm_fault_t is present + # + # Added by commit 1c8f422059ae5da07db7406ab916203f9417e396 ("mm: + # change return type to vm_fault_t") in v4.17 (2018-04-05) + # + CODE=" + #include + vm_fault_t conftest_vm_fault_t; + " + compile_check_conftest "$CODE" "NV_VM_FAULT_T_IS_PRESENT" "" "types" + ;; + + vmf_insert_pfn) + # + # Determine if the function vmf_insert_pfn() is + # present. + # + # Added by commit 1c8f422059ae5da07db7406ab916203f9417e396 ("mm: + # change return type to vm_fault_t") in v4.17 (2018-04-05) + # + CODE=" + #include + void conftest_vmf_insert_pfn(void) { + vmf_insert_pfn(); + }" + + compile_check_conftest "$CODE" "NV_VMF_INSERT_PFN_PRESENT" "" "functions" + ;; + + drm_framebuffer_get) + # + # Determine if the function drm_framebuffer_get() is present. + # + # Added by commit a4a69da06bc1 ("drm: Introduce + # drm_framebuffer_{get,put}()") in v4.12 (2017-02-28). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_FRAMEBUFFER_H_PRESENT) + #include + #endif + + void conftest_drm_framebuffer_get(void) { + drm_framebuffer_get(); + }" + + compile_check_conftest "$CODE" "NV_DRM_FRAMEBUFFER_GET_PRESENT" "" "functions" + ;; + + drm_gem_object_get) + # + # Determine if the function drm_gem_object_get() is present. + # + # Added by commit e6b62714e87c ("drm: Introduce + # drm_gem_object_{get,put}()") in v4.12 (2017-02-28). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_GEM_H_PRESENT) + #include + #endif + void conftest_drm_gem_object_get(void) { + drm_gem_object_get(); + }" + + compile_check_conftest "$CODE" "NV_DRM_GEM_OBJECT_GET_PRESENT" "" "functions" + ;; + + drm_dev_put) + # + # Determine if the function drm_dev_put() is present. + # + # Added by commit 9a96f55034e4 ("drm: introduce drm_dev_{get/put} + # functions") in v4.15 (2017-09-26). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + void conftest_drm_dev_put(void) { + drm_dev_put(); + }" + + compile_check_conftest "$CODE" "NV_DRM_DEV_PUT_PRESENT" "" "functions" + ;; + + drm_connector_list_iter) + # + # Determine if the drm_connector_list_iter struct is present. + # + # Added by commit 613051dac40da1751ab269572766d3348d45a197 ("drm: + # locking&new iterators for connector_list") in v4.11 (2016-12-14). + # + CODE=" + #include + int conftest_drm_connector_list_iter(void) { + struct drm_connector_list_iter conn_iter; + }" + + compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_LIST_ITER_PRESENT" "" "types" + + # + # Determine if the function drm_connector_list_iter_get() is + # renamed to drm_connector_list_iter_begin(). + # + # Renamed by b982dab1e66d2b998e80a97acb6eaf56518988d3 (drm: Rename + # connector list iterator API) in v4.12 (2017-02-28). + # + CODE=" + #if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT) + #include + #endif + void conftest_drm_connector_list_iter_begin(void) { + drm_connector_list_iter_begin(); + }" + + compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT" "" "functions" + ;; + + drm_atomic_helper_swap_state_has_stall_arg) + # + # Determine if drm_atomic_helper_swap_state() has 'stall' argument. + # + # drm_atomic_helper_swap_state() function prototype updated to take + # 'state' and 'stall' arguments by commit + # 5e84c2690b805caeff3b4c6c9564c7b8de54742d (drm/atomic-helper: + # Massage swap_state signature somewhat) + # in v4.8 (2016-06-10). + # + CODE=" + #include + void conftest_drm_atomic_helper_swap_state_has_stall_arg( + struct drm_atomic_state *state, + bool stall) { + (void)drm_atomic_helper_swap_state(state, stall); + }" + + compile_check_conftest "$CODE" "NV_DRM_ATOMIC_HELPER_SWAP_STATE_HAS_STALL_ARG" | append_conftest "types" + + # + # Determine if drm_atomic_helper_swap_state() returns int. + # + # drm_atomic_helper_swap_state() function prototype + # updated to return int by commit + # c066d2310ae9bbc695c06e9237f6ea741ec35e43 (drm/atomic: Change + # drm_atomic_helper_swap_state to return an error.) in v4.14 + # (2017-07-11). + # + CODE=" + #include + int conftest_drm_atomic_helper_swap_state_return_int( + struct drm_atomic_state *state, + bool stall) { + return drm_atomic_helper_swap_state(state, stall); + }" + + compile_check_conftest "$CODE" "NV_DRM_ATOMIC_HELPER_SWAP_STATE_RETURN_INT" | append_conftest "types" + ;; + + pm_runtime_available) + # + # Determine if struct dev_pm_info has the 'usage_count' field. + # + # This was added to the kernel in commit 5e928f77a09a0 in v2.6.32 + # (2008-08-18), but originally were dependent on CONFIG_PM_RUNTIME, + # which was folded into the more generic CONFIG_PM in commit + # d30d819dc8310 in v3.19 (2014-11-27). + # Rather than attempt to select the appropriate CONFIG option, + # simply check if this member is present. + # + CODE=" + #include + void pm_runtime_conftest(void) { + struct dev_pm_info dpmi; + atomic_set(&dpmi.usage_count, 1); + }" + + compile_check_conftest "$CODE" "NV_PM_RUNTIME_AVAILABLE" "" "generic" + ;; + + device_driver_of_match_table) + # + # Determine if the device_driver struct has an of_match_table member. + # + # of_match_table was added by commit 597b9d1e44e9 ("drivercore: + # Add of_match_table to the common device drivers") in v2.6.35 + # (2010-04-13). + # + CODE=" + #include + int conftest_device_driver_of_match_table(void) { + return offsetof(struct device_driver, of_match_table); + }" + + compile_check_conftest "$CODE" "NV_DEVICE_DRIVER_OF_MATCH_TABLE_PRESENT" "" "types" + ;; + + device_of_node) + # + # Determine if the device struct has an of_node member. + # + # of_node member was added by commit d706c1b05027 ("driver-core: + # Add device node pointer to struct device") in v2.6.35 + # (2010-04-13). + # + CODE=" + #include + int conftest_device_of_node(void) { + return offsetof(struct device, of_node); + }" + + compile_check_conftest "$CODE" "NV_DEVICE_OF_NODE_PRESENT" "" "types" + ;; + + dev_is_pci) + # + # Determine if the dev_is_pci() macro is present. + # + # dev_is_pci() macro was added by commit fb8a0d9d1bfd ("pci: Add + # SR-IOV convenience functions and macros") in v2.6.34 + # (2010-02-10). + # + CODE=" + #include + void conftest_dev_is_pci(void) { + if(dev_is_pci()) {} + } + " + + compile_check_conftest "$CODE" "NV_DEV_IS_PCI_PRESENT" "" "functions" + ;; + + of_find_matching_node) + # + # Determine if the of_find_matching_node() function is present. + # + # Test if linux/of.h header file inclusion is successful or not and + # define/undefine NV_LINUX_OF_H_USABLE depending upon status of inclusion. + # + # of_find_matching_node was added by commit 283029d16a88 + # ("[POWERPC] Add of_find_matching_node() helper function") in + # v2.6.25 (2008-01-09). + # + echo "$CONFTEST_PREAMBLE + #include + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_LINUX_OF_H_USABLE" | append_conftest "generic" + CODE=" + #include + void conftest_of_find_matching_node() { + of_find_matching_node(); + }" + + compile_check_conftest "$CODE" "NV_OF_FIND_MATCHING_NODE_PRESENT" "" "functions" + else + echo "#undef NV_LINUX_OF_H_USABLE" | append_conftest "generic" + echo "#undef NV_OF_FIND_MATCHING_NODE_PRESENT" | append_conftest "functions" + fi + ;; + + dma_direct_map_resource) + # + # Determine whether dma_is_direct() exists. + # + # dma_is_direct() was added by commit 356da6d0cde3 ("dma-mapping: + # bypass indirect calls for dma-direct") in 5.1 (2018-12-06). + # + # If dma_is_direct() does exist, then we assume that + # dma_direct_map_resource() exists. Both functions were added + # as part of the same patchset. + # + # The presence of dma_is_direct() and dma_direct_map_resource() + # means that dma_direct can perform DMA mappings itself. + # + CODE=" + #include + void conftest_dma_is_direct(void) { + dma_is_direct(); + }" + + compile_check_conftest "$CODE" "NV_DMA_IS_DIRECT_PRESENT" "" "functions" + ;; + + tegra_get_platform) + # + # Determine if tegra_get_platform() function is present + # + CODE=" + #if defined NV_SOC_TEGRA_CHIP_ID_H_PRESENT + #include + #elif defined(NV_SOC_TEGRA_FUSE_H_PRESENT) + #include + #endif + void conftest_tegra_get_platform(void) { + tegra_get_platform(0); + } + " + + compile_check_conftest "$CODE" "NV_TEGRA_GET_PLATFORM_PRESENT" "" "functions" + ;; + + tegra_bpmp_send_receive) + # + # Determine if tegra_bpmp_send_receive() function is present + # + CODE=" + #if defined NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT + #include + #endif + int conftest_tegra_bpmp_send_receive( + int mrq, + void *ob_data, + int ob_sz, + void *ib_data, + int ib_sz) { + return tegra_bpmp_send_receive(mrq, ob_data, ob_sz, ib_data, ib_sz); + } + " + + compile_check_conftest "$CODE" "NV_TEGRA_BPMP_SEND_RECEIVE" "" "functions" + ;; + + drm_alpha_blending_available) + # + # Determine if the DRM subsystem supports alpha blending + # + # This conftest using "generic" rather than "functions" because + # with the logic of "functions" the presence of + # *either*_alpha_property or _blend_mode_property would be enough + # to cause NV_DRM_ALPHA_BLENDING_AVAILABLE to be defined. + # + CODE=" + #if defined(NV_DRM_DRM_BLEND_H_PRESENT) + #include + #endif + void conftest_drm_alpha_blending_available(void) { + /* 2018-04-11 ae0e28265e216dad11d4cbde42fc15e92919af78 */ + (void)drm_plane_create_alpha_property; + + /* 2018-08-23 a5ec8332d4280500544e316f76c04a7adc02ce03 */ + (void)drm_plane_create_blend_mode_property; + }" + + compile_check_conftest "$CODE" "NV_DRM_ALPHA_BLENDING_AVAILABLE" "" "generic" + ;; + + drm_rotation_available) + # + # Determine if the DRM subsystem supports rotation. + # + # drm_plane_create_rotation_property() was added on 2016-09-26 by + # d138dd3c0c70979215f3184cf36f95875e37932e (drm: Add support for + # optional per-plane rotation property) in linux kernel. Presence + # of it is sufficient to say that DRM subsystem support rotation. + # + CODE=" + #if defined(NV_DRM_DRM_BLEND_H_PRESENT) + #include + #endif + void conftest_drm_rotation_available(void) { + drm_plane_create_rotation_property(); + }" + + compile_check_conftest "$CODE" "NV_DRM_ROTATION_AVAILABLE" "" "functions" + ;; + + drm_driver_prime_flag_present) + # + # Determine whether driver feature flag DRIVER_PRIME is present. + # + # The DRIVER_PRIME flag was added by commit 3248877ea179 (drm: + # base prime/dma-buf support (v5)) in v3.4 (2011-11-25) and is + # removed by commit 0424fdaf883a (drm/prime: Actually remove + # DRIVER_PRIME everywhere) on 2019-06-17. + # + # DRIVER_PRIME definition moved from drmP.h to drm_drv.h by + # commit 85e634bce01a (drm: Extract drm_drv.h) in v4.10 + # (2016-11-14). + # + # DRIVER_PRIME define is changed to enum value by commit + # 0e2a933b02c9 (drm: Switch DRIVER_ flags to an enum) in v5.1 + # (2019-01-29). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + unsigned int drm_driver_prime_flag_present_conftest(void) { + return DRIVER_PRIME; + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_PRIME_FLAG_PRESENT" "" "types" + ;; + + drm_connector_for_each_possible_encoder) + # + # Determine the number of arguments of the + # drm_connector_for_each_possible_encoder() macro. + # + # drm_connector_for_each_possible_encoder() is added by commit + # 83aefbb887b5 (drm: Add drm_connector_for_each_possible_encoder()) + # in v4.19. The definition and prorotype is changed to take only + # two arguments connector and encoder, by commit 62afb4ad425a + # (drm/connector: Allow max possible encoders to attach to a + # connector) in v5.5rc1. + # + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_CONNECTOR_H_PRESENT) + #include + #endif + + void conftest_drm_connector_for_each_possible_encoder( + struct drm_connector *connector, + struct drm_encoder *encoder, + int i) { + + drm_connector_for_each_possible_encoder(connector, encoder, i) { + } + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DRM_CONNECTOR_FOR_EACH_POSSIBLE_ENCODER_ARGUMENT_COUNT 3" | append_conftest "functions" + rm -f conftest$$.o + return + else + echo "#define NV_DRM_CONNECTOR_FOR_EACH_POSSIBLE_ENCODER_ARGUMENT_COUNT 2" | append_conftest "functions" + fi + ;; + + mmu_notifier_ops_invalidate_range) + # + # Determine if the mmu_notifier_ops struct has the + # 'invalidate_range' member. + # + # struct mmu_notifier_ops.invalidate_range was added by commit + # 0f0a327fa12cd55de5e7f8c05a70ac3d047f405e ("mmu_notifier: add the + # callback for mmu_notifier_invalidate_range()") in v3.19 + # (2014-11-13). + CODE=" + #include + int conftest_mmu_notifier_ops_invalidate_range(void) { + return offsetof(struct mmu_notifier_ops, invalidate_range); + }" + + compile_check_conftest "$CODE" "NV_MMU_NOTIFIER_OPS_HAS_INVALIDATE_RANGE" "" "types" + ;; + + drm_format_num_planes) + # + # Determine if drm_format_num_planes() function is present. + # + # The drm_format_num_planes() function was added by commit + # d0d110e09629 drm: Add drm_format_num_planes() utility function in + # v3.3 (2011-12-20). Prototype was moved from drm_crtc.h to + # drm_fourcc.h by commit ae4df11a0f53 (drm: Move format-related + # helpers to drm_fourcc.c) in v4.8 (2016-06-09). + # drm_format_num_planes() has been removed by commit 05c452c115bf + # (drm: Remove users of drm_format_num_planes) removed v5.3 + # (2019-05-16). + # + CODE=" + + #if defined(NV_DRM_DRM_CRTC_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_FOURCC_H_PRESENT) + #include + #endif + + void conftest_drm_format_num_planes(void) { + drm_format_num_planes(); + } + " + + compile_check_conftest "$CODE" "NV_DRM_FORMAT_NUM_PLANES_PRESENT" "" "functions" + ;; + + drm_gem_object_has_resv) + # + # Determine if the 'drm_gem_object' structure has a 'resv' field. + # + # A 'resv' filed in the 'drm_gem_object' structure, is added by + # commit 1ba627148ef5 (drm: Add reservation_object to + # drm_gem_object) in v5.2. + # + CODE="$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRM_GEM_H_PRESENT) + #include + #endif + + int conftest_drm_gem_object_has_resv(void) { + return offsetof(struct drm_gem_object, resv); + }" + + compile_check_conftest "$CODE" "NV_DRM_GEM_OBJECT_HAS_RESV" "" "types" + ;; + + proc_ops) + # + # Determine if the 'struct proc_ops' type is present. + # + # Added by commit d56c0d45f0e2 ("proc: decouple proc from VFS with + # "struct proc_ops"") in 5.6-rc1 + # + CODE=" + #include + + struct proc_ops p_ops; + " + + compile_check_conftest "$CODE" "NV_PROC_OPS_PRESENT" "" "types" + ;; + + drm_crtc_state_has_async_flip) + # + # Determine if the 'drm_crtc_state' structure has a 'async_flip' + # field. + # + # Commit 4d85f45c73a2 (drm/atomic: Rename crtc_state->pageflip_flags + # to async_flip) replaced 'pageflip_flags' by 'async_flip' in v5.4. + # + CODE=" + #if defined(NV_DRM_DRM_CRTC_H_PRESENT) + #include + #endif + + int conftest_drm_crtc_state_has_async_flip(void) { + return offsetof(struct drm_crtc_state, async_flip); + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_STATE_HAS_ASYNC_FLIP" "" "types" + ;; + + drm_crtc_state_has_pageflip_flags) + # + # Determine if the 'drm_crtc_state' structure has a + # 'pageflip_flags' field. + # + # 'pageflip_flags' added by commit 6cbe5c466d73 (drm/atomic: Save + # flip flags in drm_crtc_state) in v4.12. Commit 4d85f45c73a2 + # (drm/atomic: Rename crtc_state->pageflip_flags to async_flip) + # replaced 'pageflip_flags' by 'async_flip' in v5.4. + # + CODE=" + #if defined(NV_DRM_DRM_CRTC_H_PRESENT) + #include + #endif + + int conftest_drm_crtc_state_has_pageflip_flags(void) { + return offsetof(struct drm_crtc_state, pageflip_flags); + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_STATE_HAS_PAGEFLIP_FLAGS" "" "types" + ;; + + ktime_get_raw_ts64) + # + # Determine if ktime_get_raw_ts64() is present + # + # Added by commit fb7fcc96a86cf ("timekeeping: Standardize on + # ktime_get_*() naming") in 4.18 (2018-04-27) + # + CODE=" + #include + void conftest_ktime_get_raw_ts64(void){ + ktime_get_raw_ts64(); + }" + compile_check_conftest "$CODE" "NV_KTIME_GET_RAW_TS64_PRESENT" "" "functions" + ;; + + ktime_get_real_ts64) + # + # Determine if ktime_get_real_ts64() is present + # + # Added by commit d6d29896c665d ("timekeeping: Provide timespec64 + # based interfaces") in 3.17 (2014-07-16) + # + CODE=" + #include + void conftest_ktime_get_real_ts64(void){ + ktime_get_real_ts64(); + }" + compile_check_conftest "$CODE" "NV_KTIME_GET_REAL_TS64_PRESENT" "" "functions" + ;; + + drm_format_modifiers_present) + # + # Determine whether the base DRM format modifier support is present. + # + # This will show up in a few places: + # + # -Definition of the format modifier constructor macro, which + # we can use to reconstruct our bleeding-edge format modifiers + # when the local kernel headers don't include them. + # + # -The first set of format modifier vendor macros, including the + # poorly named "NV" vendor, which was later renamed "NVIDIA". + # + # -the "modifier[]" member of the AddFB2 ioctl's parameter + # structure. + # + # All these were added by commit e3eb3250d84e (drm: add support for + # tiled/compressed/etc modifier in addfb2) in 4.1-rc1 (2015-02-05). + CODE=" + #include + #include + int conftest_fourcc_fb_modifiers(void) { + u64 my_fake_mod = fourcc_mod_code(INTEL, 0); + (void)my_fake_mod; + return offsetof(struct drm_mode_fb_cmd2, modifier); + }" + + compile_check_conftest "$CODE" "NV_DRM_FORMAT_MODIFIERS_PRESENT" "" "types" + + ;; + + timespec64) + # + # Determine if struct timespec64 is present + # Added by commit 361a3bf00582 ("time64: Add time64.h header and + # define struct timespec64") in 3.17 (2014-07-16) + # + CODE=" + #include + + struct timespec64 ts64; + " + compile_check_conftest "$CODE" "NV_TIMESPEC64_PRESENT" "" "types" + + ;; + + vmalloc_has_pgprot_t_arg) + # + # Determine if __vmalloc has the 'pgprot' argument. + # + # The third argument to __vmalloc, page protection + # 'pgprot_t prot', was removed by commit 88dca4ca5a93 + # (mm: remove the pgprot argument to __vmalloc) + # in v5.8-rc1 (2020-06-01). + CODE=" + #include + + void conftest_vmalloc_has_pgprot_t_arg(void) { + pgprot_t prot; + (void)__vmalloc(0, 0, prot); + }" + + compile_check_conftest "$CODE" "NV_VMALLOC_HAS_PGPROT_T_ARG" "" "types" + + ;; + + mm_has_mmap_lock) + # + # Determine if the 'mm_struct' structure has a 'mmap_lock' field. + # + # Kernel commit da1c55f1b272 ("mmap locking API: rename mmap_sem + # to mmap_lock") replaced the field 'mmap_sem' by 'mmap_lock' + # in v5.8-rc1 (2020-06-08). + CODE=" + #include + + int conftest_mm_has_mmap_lock(void) { + return offsetof(struct mm_struct, mmap_lock); + }" + + compile_check_conftest "$CODE" "NV_MM_HAS_MMAP_LOCK" "" "types" + ;; + + full_name_hash) + # + # Determine how many arguments full_name_hash takes. + # + # Changed by commit 8387ff2577e ("vfs: make the string hashes salt + # the hash") in v4.8 (2016-06-10) + # + echo "$CONFTEST_PREAMBLE + #include + void conftest_full_name_hash(void) { + full_name_hash(NULL, NULL, 0); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_FULL_NAME_HASH_ARGUMENT_COUNT 3" | append_conftest "functions" + else + echo "#define NV_FULL_NAME_HASH_ARGUMENT_COUNT 2" | append_conftest "functions" + fi + ;; + + hlist_for_each_entry) + # + # Determine how many arguments hlist_for_each_entry takes. + # + # Changed by commit b67bfe0d42c ("hlist: drop the node parameter + # from iterators") in v3.9 (2013-02-28) + # + echo "$CONFTEST_PREAMBLE + #include + void conftest_hlist_for_each_entry(void) { + struct hlist_head *head; + struct dummy + { + struct hlist_node hlist; + }; + struct dummy *pos; + hlist_for_each_entry(pos, head, hlist) {} + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_HLIST_FOR_EACH_ENTRY_ARGUMENT_COUNT 3" | append_conftest "functions" + else + echo "#define NV_HLIST_FOR_EACH_ENTRY_ARGUMENT_COUNT 4" | append_conftest "functions" + fi + ;; + + drm_vma_offset_exact_lookup_locked) + # + # Determine if the drm_vma_offset_exact_lookup_locked() function + # is present. + # + # Added by commit 2225cfe46bcc ("drm/gem: Use kref_get_unless_zero + # for the weak mmap references") in v4.4 + # + CODE=" + #include + void conftest_drm_vma_offset_exact_lookup_locked(void) { + drm_vma_offset_exact_lookup_locked(); + }" + + compile_check_conftest "$CODE" "NV_DRM_VMA_OFFSET_EXACT_LOOKUP_LOCKED_PRESENT" "" "functions" + ;; + + drm_vma_node_is_allowed_has_tag_arg) + # + # Determine if drm_vma_node_is_allowed() has 'tag' arguments of + # 'struct drm_file *' type. + # + # Updated to take 'tag' argument by commit d9a1f0b4eb60 ("drm: use + # drm_file to tag vm-bos") in v4.9 + # + CODE=" + #include + bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, + struct drm_file *tag) { + return true; + }" + + compile_check_conftest "$CODE" "NV_DRM_VMA_NODE_IS_ALLOWED_HAS_TAG_ARG" | append_conftest "types" + ;; + + drm_vma_offset_node_has_readonly) + # + # Determine if the 'drm_vma_offset_node' structure has a 'readonly' + # field. + # + # Added by commit 3e977ac6179b ("drm/i915: Prevent writing into a + # read-only object via a GGTT mmap") in v4.19. + # + CODE=" + #include + + int conftest_drm_vma_offset_node_has_readonly(void) { + return offsetof(struct drm_vma_offset_node, readonly); + }" + + compile_check_conftest "$CODE" "NV_DRM_VMA_OFFSET_NODE_HAS_READONLY" "" "types" + + ;; + + pci_enable_atomic_ops_to_root) + # pci_enable_atomic_ops_to_root was added by + # commit 430a23689dea ("PCI: Add pci_enable_atomic_ops_to_root()") + # in v4.16-rc1 (2018-01-05) + # + CODE=" + #include + void conftest_pci_enable_atomic_ops_to_root(void) { + pci_enable_atomic_ops_to_root(); + }" + compile_check_conftest "$CODE" "NV_PCI_ENABLE_ATOMIC_OPS_TO_ROOT_PRESENT" "" "functions" + ;; + + kvmalloc) + # + # Determine if kvmalloc() is present + # + # Added by commit a7c3e901a46ff54c016d040847eda598a9e3e653 ("mm: + # introduce kv[mz]alloc helpers") in v4.12 (2017-05-08). + # + CODE=" + #include + void conftest_kvmalloc(void){ + kvmalloc(); + }" + compile_check_conftest "$CODE" "NV_KVMALLOC_PRESENT" "" "functions" + + ;; + + drm_gem_object_put_unlocked) + # + # Determine if the function drm_gem_object_put_unlocked() is present. + # + # In v5.9-rc1, commit 2f4dd13d4bb8 ("drm/gem: add + # drm_gem_object_put helper") removes drm_gem_object_put_unlocked() + # function and replace its definition by transient macro. Commit + # ab15d56e27be ("drm: remove transient + # drm_gem_object_put_unlocked()") finally removes + # drm_gem_object_put_unlocked() macro. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_GEM_H_PRESENT) + #include + #endif + void conftest_drm_gem_object_put_unlocked(void) { + drm_gem_object_put_unlocked(); + }" + + compile_check_conftest "$CODE" "NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT" "" "functions" + ;; + + drm_display_mode_has_vrefresh) + # + # Determine if the 'drm_display_mode' structure has a 'vrefresh' + # field. + # + # Removed by commit 0425662fdf05 ("drm: Nuke mode->vrefresh") in + # v5.9-rc1. + # + CODE=" + #include + + int conftest_drm_display_mode_has_vrefresh(void) { + return offsetof(struct drm_display_mode, vrefresh); + }" + + compile_check_conftest "$CODE" "NV_DRM_DISPLAY_MODE_HAS_VREFRESH" "types" + + ;; + + drm_driver_master_set_has_int_return_type) + # + # Determine if drm_driver::master_set() returns integer value + # + # Changed to void by commit 907f53200f98 ("drm: vmwgfx: remove + # drm_driver::master_set() return type") in v5.9-rc1. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + int conftest_drm_driver_master_set_has_int_return_type(struct drm_driver *drv, + struct drm_device *dev, struct drm_file *file_priv, bool from_open) { + + return drv->master_set(dev, file_priv, from_open); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_SET_MASTER_HAS_INT_RETURN_TYPE" "" "types" + ;; + + drm_driver_has_gem_free_object) + # + # Determine if the 'drm_driver' structure has a 'gem_free_object' + # function pointer. + # + # drm_driver::gem_free_object is removed by commit 1a9458aeb8eb + # ("drm: remove drm_driver::gem_free_object") in v5.9-rc1. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + int conftest_drm_driver_has_gem_free_object(void) { + return offsetof(struct drm_driver, gem_free_object); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT" "" "types" + ;; + + vga_tryget) + # + # Determine if vga_tryget() is present + # + # vga_tryget() was removed by commit f369bc3f9096 ("vgaarb: mark + # vga_tryget static") in v5.9-rc1 (2020-08-01). + # + CODE=" + #include + void conftest_vga_tryget(void) { + vga_tryget(); + }" + + compile_check_conftest "$CODE" "NV_VGA_TRYGET_PRESENT" "" "functions" + ;; + + pci_channel_state) + # + # Determine if pci_channel_state enum type is present. + # + # pci_channel_state was removed by commit 16d79cd4e23b ("PCI: Use + # 'pci_channel_state_t' instead of 'enum pci_channel_state'") in + # v5.9-rc1 (2020-07-02). + # + CODE=" + #include + + enum pci_channel_state state; + " + + compile_check_conftest "$CODE" "NV_PCI_CHANNEL_STATE_PRESENT" "" "types" + ;; + + pgprot_decrypted) + # + # Determine if the macro 'pgprot_decrypted()' is present. + # + # Added by commit 21729f81ce8a ("x86/mm: Provide general kernel + # support for memory encryption") in v4.14 (2017-07-18) + CODE=" + #include + + void conftest_pgprot_decrypted(void) + if(pgprot_decrypted()) {} + }" + + compile_check_conftest "$CODE" "NV_PGPROT_DECRYPTED_PRESENT" "" "functions" + + ;; + + cc_mkdec) + # + # Determine if cc_mkdec() is present. + # + # cc_mkdec() by commit b577f542f93c ("x86/coco: Add API to handle + # encryption mask) in v5.18-rc1 (2022-02-22). + # + CODE=" + #if defined(NV_ASM_COCO_H_PRESENT) + #include + #endif + + void conftest_cc_mkdec(void) { + cc_mkdec(); + }" + + compile_check_conftest "$CODE" "NV_CC_MKDEC_PRESENT" "" "functions" + ;; + + drm_prime_pages_to_sg_has_drm_device_arg) + # + # Determine if drm_prime_pages_to_sg() has 'dev' argument. + # + # drm_prime_pages_to_sg() is updated to take 'dev' argument by commit + # 707d561f77b5 ("drm: allow limiting the scatter list size."). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #if defined(NV_DRM_DRM_PRIME_H_PRESENT) + #include + #endif + + struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, + unsigned int nr_pages) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_DRM_PRIME_PAGES_TO_SG_HAS_DRM_DEVICE_ARG" "" "types" + ;; + + drm_driver_has_gem_prime_callbacks) + # + # Determine if drm_driver structure has the GEM and PRIME callback + # function pointers. + # + # The GEM and PRIME callback are removed from drm_driver + # structure, by commit d693def4fd1c ("drm: Remove obsolete GEM and + # PRIME callbacks from struct drm_driver"). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + void conftest_drm_driver_has_gem_and_prime_callbacks(void) { + struct drm_driver drv; + + drv.gem_prime_pin = 0; + drv.gem_prime_get_sg_table = 0; + drv.gem_prime_vmap = 0; + drv.gem_prime_vunmap = 0; + drv.gem_vm_ops = 0; + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS" "" "types" + ;; + + drm_crtc_atomic_check_has_atomic_state_arg) + # + # Determine if drm_crtc_helper_funcs::atomic_check takes 'state' + # argument of 'struct drm_atomic_state' type. + # + # The commit 29b77ad7b9ca ("drm/atomic: Pass the full state to CRTC + # atomic_check") passed the full atomic state to + # drm_crtc_helper_funcs::atomic_check() + # + # To test the signature of drm_crtc_helper_funcs::atomic_check(), + # declare a function prototype with typeof ::atomic_check(), and then + # define the corresponding function implementation with the expected + # signature. Successful compilation indicates that ::atomic_check() + # has the expected signature. + # + echo "$CONFTEST_PREAMBLE + #include + + static const struct drm_crtc_helper_funcs *funcs; + typeof(*funcs->atomic_check) conftest_drm_crtc_atomic_check_has_atomic_state_arg; + + int conftest_drm_crtc_atomic_check_has_atomic_state_arg( + struct drm_crtc *crtc, struct drm_atomic_state *state) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG" | append_conftest "types" + else + echo "#undef NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG" | append_conftest "types" + fi + ;; + + drm_gem_object_vmap_has_map_arg) + # + # Determine if drm_gem_object_funcs::vmap takes 'map' + # argument of 'struct dma_buf_map' type. + # + # The commit 49a3f51dfeee ("drm/gem: Use struct dma_buf_map in GEM + # vmap ops and convert GEM backends") update + # drm_gem_object_funcs::vmap to take 'map' argument. + # + CODE=" + #include + int conftest_drm_gem_object_vmap_has_map_arg( + struct drm_gem_object *obj, struct dma_buf_map *map) { + return obj->funcs->vmap(obj, map); + }" + + compile_check_conftest "$CODE" "NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG" "" "types" + ;; + + set_close_on_exec) + # + # __set_close_on_exec(() was added by + # commit 1dce27c5aa67 ("Wrap accesses to the fd_sets") + # in v3.4-rc1 (2012-02-19) + # + CODE=" + #include + #include + void conftest_set_close_on_exec(void) { + __set_close_on_exec(); + }" + + compile_check_conftest "$CODE" "NV_SET_CLOSE_ON_EXEC_PRESENT" "" "functions" + ;; + + iterate_fd) + # + # iterate_fd() was added by + # commit c3c073f808b2 ("new helper: iterate_fd()") + # in v3.7-rc1 (2012-09-26) + # + CODE=" + #include + #include + void conftest_iterate_fd(void) { + iterate_fd(); + }" + + compile_check_conftest "$CODE" "NV_ITERATE_FD_PRESENT" "" "functions" + ;; + + seq_read_iter) + # + # Determine if seq_read_iter() is present + # + # seq_read_iter() was added by commit d4d50710a8b4 ("seq_file: + # add seq_read_iter") in v5.10-rc1 (2020-11-04). + # + CODE=" + #include + void conftest_seq_read_iter(void) { + seq_read_iter(); + }" + + compile_check_conftest "$CODE" "NV_SEQ_READ_ITER_PRESENT" "" "functions" + ;; + + pci_class_multimedia_hd_audio) + # + # Determine if 'PCI_CLASS_MULTIMEDIA_HD_AUDIO' macro is present + # in . + # + # The commit 07f4f97d7b4b ("vga_switcheroo: Use device link for HDA + # controller") has moved 'PCI_CLASS_MULTIMEDIA_HD_AUDIO' macro from + # to in v4.17-rc1 (2018-03-03). + # + CODE=" + #include + unsigned int conftest_pci_class_multimedia_hd_audio(void) { + return PCI_CLASS_MULTIMEDIA_HD_AUDIO; + }" + + compile_check_conftest "$CODE" "NV_PCI_CLASS_MULTIMEDIA_HD_AUDIO_PRESENT" "" "generic" + ;; + + sg_page_iter_page) + # + # Determine if sg_page_iter_page() is present + # + # sg_page_iter_page() was added by commit 2db76d7c3c6db + # ("lib/scatterlist: sg_page_iter: support sg lists w/o backing + # pages") in v3.10-rc1 (2013-05-11). + # + CODE=" + #include + void conftest_sg_page_iter_page(void) { + sg_page_iter_page(); + }" + + compile_check_conftest "$CODE" "NV_SG_PAGE_ITER_PAGE_PRESENT" "" "functions" + ;; + + unsafe_follow_pfn) + # + # Determine if unsafe_follow_pfn() is present. + # + # unsafe_follow_pfn() was added by commit 69bacee7f9ad + # ("mm: Add unsafe_follow_pfn") in v5.13-rc1. + # + CODE=" + #include + void conftest_unsafe_follow_pfn(void) { + unsafe_follow_pfn(); + }" + + compile_check_conftest "$CODE" "NV_UNSAFE_FOLLOW_PFN_PRESENT" "" "functions" + ;; + + drm_plane_atomic_check_has_atomic_state_arg) + # + # Determine if drm_plane_helper_funcs::atomic_check takes 'state' + # argument of 'struct drm_atomic_state' type. + # + # The commit 7c11b99a8e58 ("drm/atomic: Pass the full state to + # planes atomic_check") passed the full atomic state to + # drm_plane_helper_funcs::atomic_check() + # + # To test the signature of drm_plane_helper_funcs::atomic_check(), + # declare a function prototype with typeof ::atomic_check(), and then + # define the corresponding function implementation with the expected + # signature. Successful compilation indicates that ::atomic_check() + # has the expected signature. + # + echo "$CONFTEST_PREAMBLE + #include + + static const struct drm_plane_helper_funcs *funcs; + typeof(*funcs->atomic_check) conftest_drm_plane_atomic_check_has_atomic_state_arg; + + int conftest_drm_plane_atomic_check_has_atomic_state_arg( + struct drm_plane *plane, struct drm_atomic_state *state) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG" | append_conftest "types" + else + echo "#undef NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG" | append_conftest "types" + fi + ;; + + ib_peer_memory_symbols) + # + # Determine if the following symbols exist in Module.symvers: + # 1. ib_register_peer_memory_client + # 2. ib_unregister_peer_memory_client + # The conftest first checks in the kernel's own Module.symvers in + # the regular path. If the symbols are not found there, it's possible + # that MOFED is installed and check for these symbols in MOFED's + # Module.symvers whose path is different from the kernel's symvers. + # + # Note: KERNELRELEASE and ARCH are defined by Kbuild and automatically + # passed down to conftest.sh as env vars. + + MLNX_OFED_KERNEL_DIR=/usr/src/ofa_kernel + VAR_DKMS_SOURCES_DIR=$(test -d /var/lib/dkms/mlnx-ofed-kernel && + ls -d /var/lib/dkms/mlnx-ofed-kernel/*/build 2>/dev/null) + + if check_for_ib_peer_memory_symbols "$OUTPUT" || \ + check_for_ib_peer_memory_symbols "$MLNX_OFED_KERNEL_DIR/$ARCH/$KERNELRELEASE" || \ + check_for_ib_peer_memory_symbols "$MLNX_OFED_KERNEL_DIR/$KERNELRELEASE" || \ + check_for_ib_peer_memory_symbols "$MLNX_OFED_KERNEL_DIR/default" || \ + check_for_ib_peer_memory_symbols "$VAR_DKMS_SOURCES_DIR"; then + echo "#define NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT" | append_conftest "symbols" + else + echo "#undef NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT" | append_conftest "symbols" + fi + ;; + + add_memory_driver_managed) + # + # Determine if the add_memory_driver_managed function is present + # + # Added by commit 7b7b27214bba ("mm/memory_hotplug: introduce + # add_memory_driver_managed()") in v5.8-rc1 (2020-06-05) + # + CODE=" + #include + void conftest_add_memory_driver_managed() { + add_memory_driver_managed(); + }" + + compile_check_conftest "$CODE" "NV_ADD_MEMORY_DRIVER_MANAGED_PRESENT" "" "functions" + ;; + + add_memory_driver_managed_has_mhp_flags_arg) + # + # Check if add_memory_driver_managed() has mhp_flags arg. + # + # Added by commit b6117199787c ("mm/memory_hotplug: prepare passing flags to + # add_memory() and friends") in v5.10-rc1 (2020-10-16) + # + CODE=" + #include + int add_memory_driver_managed(int nid, u64 start, u64 size, + const char *resource_name, + mhp_t mhp_flags) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_ADD_MEMORY_DRIVER_MANAGED_HAS_MHP_FLAGS_ARG" "" "types" + ;; + + remove_memory_has_nid_arg) + # + # Check if remove_memory() has nid parameter. + # + # Removed by commit e1c158e4956612e7 ("mm/memory_hotplug: remove nid + # parameter from remove_memory() and friends") in v5.15-rc1 (2021-09-09) + # + CODE=" + #include + int remove_memory(int nid, u64 start, u64 size) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_REMOVE_MEMORY_HAS_NID_ARG" "" "types" + ;; + + + device_property_read_u64) + # + # Determine if the device_property_read_u64 function is present + # + # Added by commit b31384fa5de37a1 ("Driver core: Unified device + # properties interface for platform firmware") in v3.19-rc1 (2014-11-05) + # + CODE=" + #include + void conftest_device_property_read_u64() { + device_property_read_u64(); + }" + + compile_check_conftest "$CODE" "NV_DEVICE_PROPERTY_READ_U64_PRESENT" "" "functions" + ;; + + of_property_count_elems_of_size) + # + # Determine if of_property_count_elems_of_size is present + # + # Added by commit 1df09bcof (" Move OF property and graph API from + # base.c to property.c" + # + # Test if linux/of.h header file inclusion is successful or not, + # depending on that check, for of_property_count_elems_of_size + # presence + # + echo "$CONFTEST_PREAMBLE + #include + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + CODE=" + #include + void conftest_of_property_count_elems_of_size() { + of_property_count_elems_of_size(); + }" + + compile_check_conftest "$CODE" "NV_OF_PROPERTY_COUNT_ELEMS_OF_SIZE_PRESENT" "" "functions" + else + echo "#undef NV_OF_PROPERTY_COUNT_ELEMS_OF_SIZE_PRESENT" | append_conftest "functions" + fi + ;; + + of_property_read_variable_u8_array) + # + # Determine if of_property_read_variable_u8_array is present + # + # Added by commit 1df09bcof (" Move OF property and graph API from + # base.c to property.c" + # + # Test if linux/of.h header file inclusion is successful or not, + # depending on that, check for of_property_read_variable_u8_array + # presence + # + echo "$CONFTEST_PREAMBLE + #include + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + CODE=" + #include + void conftest_of_property_read_variable_u8_array() { + of_property_read_variable_u8_array(); + }" + + compile_check_conftest "$CODE" "NV_OF_PROPERTY_READ_VARIABLE_U8_ARRAY_PRESENT" "" "functions" + else + echo "#undef NV_OF_PROPERTY_READ_VARIABLE_U8_ARRAY_PRESENT" | append_conftest "functions" + fi + ;; + + devm_of_platform_populate) + # + # Determine if devm_of_platform_populate() function is present + # + # Added by commit 38b0b21of (add devm_ functions for populate and + # depopulate") + # + CODE=" + #if defined(NV_LINUX_OF_PLATFORM_H_PRESENT) + #include + #endif + void conftest_devm_of_platform_populate(void) + { + devm_of_platform_populate(NULL, NULL); + } + " + compile_check_conftest "$CODE" "NV_DEVM_OF_PLATFORM_POPULATE_PRESENT" "" "functions" + ;; + + of_dma_configure) + # + # Determine if of_dma_configure() function is present + # + # Added by commit 591c1eeof ("configure the platform device + # dma parameters") + # + CODE=" + #if defined(NV_LINUX_OF_DEVICE_H_PRESENT) + #include + #endif + void conftest_of_dma_configure(void) + { + of_dma_configure(); + } + " + + compile_check_conftest "$CODE" "NV_OF_DMA_CONFIGURE_PRESENT" "" "functions" + ;; + + icc_get) + # + # Determine if icc_get() function is present + # + # Added by commit 11f1cec ("interconnect: Add generic on-chip + # interconnect API") + # + CODE=" + #if defined(NV_LINUX_INTERCONNECT_H_PRESENT) + #include + #endif + void conftest_icc_get(void) + { + icc_get(); + } + " + + compile_check_conftest "$CODE" "NV_ICC_GET_PRESENT" "" "functions" + ;; + + icc_set_bw) + # + # Determine if icc_set_bw() function is present + # + # Added by commit 11f1cec ("interconnect: Add generic on-chip + # interconnect API") + # + CODE=" + #if defined(NV_LINUX_INTERCONNECT_H_PRESENT) + #include + #endif + void conftest_icc_set_bw(void) + { + icc_set_bw(); + } + " + + compile_check_conftest "$CODE" "NV_ICC_SET_BW_PRESENT" "" "functions" + ;; + + icc_put) + # + # Determine if icc_put() function is present + # + # Added by commit 11f1cec ("interconnect: Add generic on-chip + # interconnect API") + # + CODE=" + #if defined(NV_LINUX_INTERCONNECT_H_PRESENT) + #include + #endif + void conftest_icc_put(void) + { + icc_put(); + } + " + + compile_check_conftest "$CODE" "NV_ICC_PUT_PRESENT" "" "functions" + ;; + + i2c_new_client_device) + # + # Determine if i2c_new_client_device() function is present + # + # Added by commit 390fd04i2c ("remove deprecated i2c_new_device API") + # + CODE=" + #include + void conftest_i2c_new_client_device(void) + { + i2c_new_client_device(); + } + " + + compile_check_conftest "$CODE" "NV_I2C_NEW_CLIENT_DEVICE_PRESENT" "" "functions" + ;; + + i2c_unregister_device) + # + # Determine if i2c_unregister_device() function is present + # + # Added by commit 9c1600ei2c ("Add i2c_board_info and i2c_new_device()") + # + CODE=" + #include + void conftest_i2c_unregister_device(void) + { + i2c_unregister_device(); + } + " + + compile_check_conftest "$CODE" "NV_I2C_UNREGISTER_DEVICE_PRESENT" "" "functions" + ;; + + of_get_named_gpio) + # + # Determine if of_get_named_gpio() function is present + # + # Added by commit a6b0919 ("of/gpio: Add new method for getting gpios + # under different property names") + # + CODE=" + #if defined(NV_LINUX_OF_GPIO_H_PRESENT) + #include + #endif + void conftest_of_get_named_gpio(void) + { + of_get_named_gpio(); + } + " + + compile_check_conftest "$CODE" "NV_OF_GET_NAME_GPIO_PRESENT" "" "functions" + ;; + + devm_gpio_request_one) + # + # Determine if devm_gpio_request_one() function is present + # + # Added by commit 09d71ff (gpiolib: Implement devm_gpio_request_one()") + # + CODE=" + #if defined(NV_LINUX_GPIO_H_PRESENT) + #include + #endif + void conftest_devm_gpio_request_one(void) + { + devm_gpio_request_one(); + } + " + + compile_check_conftest "$CODE" "NV_DEVM_GPIO_REQUEST_ONE_PRESENT" "" "functions" + ;; + + gpio_direction_input) + # + # Determine if gpio_direction_input() function is present + # + # Added by commit c7caf86 (gpio: remove gpio_ensure_requested()") + # + CODE=" + #if defined(NV_LINUX_GPIO_H_PRESENT) + #include + #endif + void conftest_gpio_direction_input(void) + { + gpio_direction_input(); + } + " + + compile_check_conftest "$CODE" "NV_GPIO_DIRECTION_INPUT_PRESENT" "" "functions" + ;; + + gpio_direction_output) + # + # Determine if gpio_direction_output() function is present + # + # Added by commit c7caf86 (gpio: remove gpio_ensure_requested()") + # + CODE=" + #if defined(NV_LINUX_GPIO_H_PRESENT) + #include + #endif + void conftest_gpio_direction_output(void) + { + gpio_direction_output(); + } + " + + compile_check_conftest "$CODE" "NV_GPIO_DIRECTION_OUTPUT_PRESENT" "" "functions" + ;; + + gpio_get_value) + # + # Determine if gpio_get_value() function is present + # + # Added by commit 7563bbf ("gpiolib/arches: Centralise bolierplate + # asm/gpio.h") + # + CODE=" + #if defined(NV_LINUX_GPIO_H_PRESENT) + #include + #endif + void conftest_gpio_get_value(void) + { + gpio_get_value(); + } + " + + compile_check_conftest "$CODE" "NV_GPIO_GET_VALUE_PRESENT" "" "functions" + ;; + + gpio_set_value) + # + # Determine if gpio_set_value() function is present + # + # Added by commit 7563bbf ("gpiolib/arches: Centralise bolierplate + # asm/gpio.h") + # + CODE=" + #if defined(NV_LINUX_GPIO_H_PRESENT) + #include + #endif + void conftest_gpio_set_value(void) + { + gpio_set_value(); + } + " + + compile_check_conftest "$CODE" "NV_GPIO_SET_VALUE_PRESENT" "" "functions" + ;; + + gpio_to_irq) + # + # Determine if gpio_to_irq() function is present + # + # Added by commit 7563bbf ("gpiolib/arches: Centralise bolierplate + # asm/gpio.h") + # + CODE=" + #if defined(NV_LINUX_GPIO_H_PRESENT) + #include + #endif + void conftest_gpio_to_irq(void) + { + gpio_to_irq(); + } + " + + compile_check_conftest "$CODE" "NV_GPIO_TO_IRQ_PRESENT" "" "functions" + ;; + + migrate_vma_setup) + # + # Determine if migrate_vma_setup() function is present + # + # migrate_vma_setup() function was added by commit + # a7d1f22bb74f32cf3cd93f52776007e161f1a738 ("mm: turn migrate_vma + # upside down) in v5.4. + # (2019-08-20). + CODE=" + #include + int conftest_migrate_vma_setup(void) { + migrate_vma_setup(); + }" + + compile_check_conftest "$CODE" "NV_MIGRATE_VMA_SETUP_PRESENT" "" "functions" + ;; + + migrate_vma_added_flags) + # + # Determine if migrate_vma structure has flags + # + # flags were added to struct migrate_vma by commit + # 5143192cd410c4fc83be09a2e73423765aee072b ("mm/migrate: add a flags + # parameter to_migrate_vma) in v5.9. + # (2020-07-28). + CODE=" + #include + int conftest_migrate_vma_added_flags(void) { + return offsetof(struct migrate_vma, flags); + }" + + compile_check_conftest "$CODE" "NV_MIGRATE_VMA_FLAGS_PRESENT" "" "types" + ;; + + drm_device_has_pdev) + # + # Determine if the 'drm_device' structure has a 'pdev' field. + # + # Removed by commit b347e04452ff ("drm: Remove pdev field from + # struct drm_device") in v5.14-rc1. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DEVICE_H_PRESENT) + #include + #endif + + int conftest_drm_device_has_pdev(void) { + return offsetof(struct drm_device, pdev); + }" + + compile_check_conftest "$CODE" "NV_DRM_DEVICE_HAS_PDEV" "" "types" + ;; + + make_device_exclusive_range) + # + # Determine if the make_device_exclusive_range() function is present + # + # make_device_exclusive_range() function was added by commit + # b756a3b5e7ead ("mm: device exclusive memory access") in v5.14 + # (2021-06-30). + CODE=" + #include + int conftest_make_device_exclusive_range(void) { + make_device_exclusive_range(); + }" + + compile_check_conftest "$CODE" "NV_MAKE_DEVICE_EXCLUSIVE_RANGE_PRESENT" "" "functions" + ;; + + ioasid_get) + # + # Determine if ioasid_get() function is present + # + # ioasid_get() function was added by commit + # cb4789b0d19ff231ce9f73376a023341300aed96 (iommu/ioasid: Add ioasidreferences) in v5.11. + # (2020-11-23). + CODE=" + #if defined(NV_LINUX_IOASID_H_PRESENT) + #include + #endif + void conftest_ioasid_get(void) { + ioasid_get(); + }" + + compile_check_conftest "$CODE" "NV_IOASID_GET_PRESENT" "" "functions" + ;; + + drm_crtc_state_has_no_vblank) + # + # Determine if the 'drm_crtc_state' structure has 'no_vblank'. + # + # drm_crtc_state::no_vblank was added by commit b25c60af7a877 + # ("drm/crtc: Add a generic infrastructure to fake VBLANK events") + # in 4.18.0-rc3 (2018-07-03). + # + CODE=" + #include + void conftest_drm_crtc_state_has_no_vblank(void) { + struct drm_crtc_state foo; + (void)foo.no_vblank; + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_STATE_HAS_NO_VBLANK" "" "types" + ;; + + drm_mode_config_has_allow_fb_modifiers) + # + # Determine if the 'drm_mode_config' structure has + # an 'allow_fb_modifiers' field. + # + # an 'allow_fb_modifiers' field in the 'drm_mode_config' structure, + # is added by commit e3eb3250d84e ("drm: add support for + # tiled/compressed/etc modifier in addfb2") in v4.1, and removed by + # commit 3d082157a242 ("drm: remove allow_fb_modifiers") in v5.18-rc1. + # + # The 'struct drm_mode_config' definition, is moved to + # drm_mode_config.h file by commit 28575f165d36 ("drm: Extract + # drm_mode_config.[hc]") in v4.10. + # + CODE="$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRM_MODE_CONFIG_H_PRESENT) + #include + #else + #include + #endif + int conftest_drm_mode_config_has_allow_fb_modifiers(void) { + return offsetof(struct drm_mode_config, allow_fb_modifiers); + }" + + compile_check_conftest "$CODE" "NV_DRM_MODE_CONFIG_HAS_ALLOW_FB_MODIFIERS" "" "types" + ;; + + dma_set_mask_and_coherent) + # + # Determine if dma_set_mask_and_coherent function is present. + # Added by commit 4aa806b771d1 ("DMA-API: provide a helper to set both DMA + # and coherent DMA masks") in v3.13 (2013-06-26). + # + CODE=" + #include + void conftest_dma_set_mask_and_coherent(void) { + dma_set_mask_and_coherent(); + }" + + compile_check_conftest "$CODE" "NV_DMA_SET_MASK_AND_COHERENT_PRESENT" "" "functions" + ;; + + drm_has_hdr_output_metadata) + # + # Determine if drm_mode.h has 'hdr_output_metadata' structure. + # + # struct hdr_output_metadata was added by commit fbb5d0353c62d + # ("drm: Add HDR source metadata property") in 5.1.0-rc5 + # (2019-05-16) + # + CODE=" + #include + void conftest_drm_has_hdr_output_metadata(void) { + struct hdr_output_metadata foo; + (void)foo; + }" + + compile_check_conftest "$CODE" "NV_DRM_HAS_HDR_OUTPUT_METADATA" "" "types" + ;; + + uts_release) + # + # print the kernel's UTS_RELEASE string. + # + echo "#include + UTS_RELEASE" > conftest$$.c + + $CC $CFLAGS -E -P conftest$$.c + rm -f conftest$$.c + ;; + + platform_irq_count) + # + # Determine if the platform_irq_count() function is present + # + # platform_irq_count was added by commit + # 4b83555d5098e73cf2c5ca7f86c17ca0ba3b968e ("driver-core: platform: Add platform_irq_count()") + # in 4.5-rc1 (2016-01-07) + # + CODE=" + #include + int conftest_platform_irq_count(void) { + return platform_irq_count(); + }" + compile_check_conftest "$CODE" "NV_PLATFORM_IRQ_COUNT_PRESENT" "" "functions" + ;; + + devm_clk_bulk_get_all) + # + # Determine if devm_clk_bulk_get_all() function is present + # + # Added by commit f08c2e286 ("clk: add managed version of clk_bulk_get_all") + # + CODE=" + #if defined(NV_LINUX_CLK_H_PRESENT) + #include + #endif + void conftest_devm_clk_bulk_get_all(void) + { + devm_clk_bulk_get_all(); + } + " + compile_check_conftest "$CODE" "NV_DEVM_CLK_BULK_GET_ALL_PRESENT" "" "functions" + ;; + + mmget_not_zero) + # + # Determine if mmget_not_zero() function is present + # + # mmget_not_zero() function was added by commit + # d2005e3f41d4f9299e2df6a967c8beb5086967a9 ("userfaultfd: don't pin + # the user memory in userfaultfd_file_create()") in v4.7 + # (2016-05-20) in linux/sched.h but then moved to linux/sched/mm.h + # by commit 68e21be2916b359fd8afb536c1911dc014cfd03e + # ("sched/headers: Move task->mm handling methods to + # ") in v4.11 (2017-02-01). + CODE=" + #if defined(NV_LINUX_SCHED_MM_H_PRESENT) + #include + #elif defined(NV_LINUX_SCHED_H_PRESENT) + #include + #endif + void conftest_mmget_not_zero(void) { + mmget_not_zero(); + }" + + compile_check_conftest "$CODE" "NV_MMGET_NOT_ZERO_PRESENT" "" "functions" + ;; + + dma_resv_add_fence) + # + # Determine if the dma_resv_add_fence() function is present. + # + # dma_resv_add_excl_fence() and dma_resv_add_shared_fence() were + # removed and replaced with dma_resv_add_fence() by commit + # 73511edf8b19 ("dma-buf: specify usage while adding fences to + # dma_resv obj v7") in linux-next, expected in v5.19-rc1. + # + CODE=" + #if defined(NV_LINUX_DMA_RESV_H_PRESENT) + #include + #endif + void conftest_dma_resv_add_fence(void) { + dma_resv_add_fence(); + }" + + compile_check_conftest "$CODE" "NV_DMA_RESV_ADD_FENCE_PRESENT" "" "functions" + ;; + + dma_resv_reserve_fences) + # + # Determine if the dma_resv_reserve_fences() function is present. + # + # dma_resv_reserve_shared() was removed and replaced with + # dma_resv_reserve_fences() by commit c8d4c18bfbc4 + # ("dma-buf/drivers: make reserving a shared slot mandatory v4") in + # linux-next, expected in v5.19-rc1. + # + CODE=" + #if defined(NV_LINUX_DMA_RESV_H_PRESENT) + #include + #endif + void conftest_dma_resv_reserve_fences(void) { + dma_resv_reserve_fences(); + }" + + compile_check_conftest "$CODE" "NV_DMA_RESV_RESERVE_FENCES_PRESENT" "" "functions" + ;; + + reservation_object_reserve_shared_has_num_fences_arg) + # + # Determine if reservation_object_reserve_shared() has 'num_fences' + # argument. + # + # reservation_object_reserve_shared() function prototype was updated + # to take 'num_fences' argument by commit ca05359f1e64 ("dma-buf: + # allow reserving more than one shared fence slot") in v4.21-rc1 + # (2018-12-14). + # + CODE=" + #include + void conftest_reservation_object_reserve_shared_has_num_fences_arg( + struct reservation_object *obj, + unsigned int num_fences) { + (void) reservation_object_reserve_shared(obj, num_fences); + }" + + compile_check_conftest "$CODE" "NV_RESERVATION_OBJECT_RESERVE_SHARED_HAS_NUM_FENCES_ARG" "" "types" + ;; + + num_registered_fb) + # + # Determine if 'num_registered_fb' variable is present. + # + # 'num_registered_fb' was removed by commit 5727dcfd8486 + # ("fbdev: Make registered_fb[] private to fbmem.c) for + # v5.20 linux-next (2022-07-27). + # + CODE=" + #include + int conftest_num_registered_fb(void) { + return num_registered_fb; + }" + + compile_check_conftest "$CODE" "NV_NUM_REGISTERED_FB_PRESENT" "" "types" + ;; + + drm_connector_has_override_edid) + # + # Determine if 'struct drm_connector' has an 'override_edid' member. + # + # Removed by commit 90b575f52c6ab ("drm/edid: detach debugfs EDID + # override from EDID property update") in linux-next, expected in + # v6.2-rc1. + # + CODE=" + #if defined(NV_DRM_DRM_CRTC_H_PRESENT) + #include + #endif + #if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT) + #include + #endif + int conftest_drm_connector_has_override_edid(void) { + return offsetof(struct drm_connector, override_edid); + }" + + compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_HAS_OVERRIDE_EDID" "" "types" + ;; + + # When adding a new conftest entry, please use the correct format for + # specifying the relevant upstream Linux kernel commit. + # + # was added|removed|etc by commit (" (). + + *) + # Unknown test name given + echo "Error: unknown conftest '$1' requested" >&2 + exit 1 + ;; + esac +} + +case "$5" in + cc_sanity_check) + # + # Check if the selected compiler can create object files + # in the current environment. + # + VERBOSE=$6 + + echo "int cc_sanity_check(void) { + return 0; + }" > conftest$$.c + + $CC -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ ! -f conftest$$.o ]; then + if [ "$VERBOSE" = "full_output" ]; then + echo ""; + fi + if [ "$CC" != "cc" ]; then + echo "The C compiler '$CC' does not appear to be able to" + echo "create object files. Please make sure you have " + echo "your Linux distribution's libc development package" + echo "installed and that '$CC' is a valid C compiler"; + echo "name." + else + echo "The C compiler '$CC' does not appear to be able to" + echo "create executables. Please make sure you have " + echo "your Linux distribution's gcc and libc development" + echo "packages installed." + fi + if [ "$VERBOSE" = "full_output" ]; then + echo ""; + echo "*** Failed CC sanity check. Bailing out! ***"; + echo ""; + fi + exit 1 + else + rm -f conftest$$.o + exit 0 + fi + ;; + + cc_version_check) + # + # Verify that the same compiler major and minor version is + # used for the kernel and kernel module. A mismatch condition is + # not considered fatal, so this conftest returns a success status + # code, even if it fails. Failure of the test can be distinguished + # by testing for empty (success) versus non-empty (failure) output. + # + # Some gcc version strings that have proven problematic for parsing + # in the past: + # + # gcc.real (GCC) 3.3 (Debian) + # gcc-Version 3.3 (Debian) + # gcc (GCC) 3.1.1 20020606 (Debian prerelease) + # version gcc 3.2.3 + # + # As of this writing, GCC uses a version number as x.y.z and below + # are the typical version strings seen with various distributions. + # gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-23) + # gcc version 4.8.5 20150623 (Red Hat 4.8.5-39) (GCC) + # gcc (GCC) 8.3.1 20190507 (Red Hat 8.3.1-4) + # gcc (GCC) 10.2.1 20200723 (Red Hat 10.2.1-1) + # gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0 + # gcc (Ubuntu 7.5.0-3ubuntu1~16.04) 7.5.0 + # gcc (Debian 8.3.0-6) 8.3.0 + # aarch64-linux-gcc.br_real (Buildroot 2020.08-14-ge5a2a90) 9.3.0, GNU ld (GNU Binutils) 2.33.1 + # + # In order to extract GCC version correctly for version strings + # like the last one above, we first check for x.y.z and if that + # fails, we fallback to x.y format. + VERBOSE=$6 + + kernel_compile_h=$OUTPUT/include/generated/compile.h + + if [ ! -f ${kernel_compile_h} ]; then + # The kernel's compile.h file is not present, so there + # isn't a convenient way to identify the compiler version + # used to build the kernel. + IGNORE_CC_MISMATCH=1 + fi + + if [ -n "$IGNORE_CC_MISMATCH" ]; then + exit 0 + fi + + kernel_cc_string=`cat ${kernel_compile_h} | \ + grep LINUX_COMPILER | cut -f 2 -d '"'` + + kernel_cc_version=`echo ${kernel_cc_string} | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+' | head -n 1` + if [ -z "${kernel_cc_version}" ]; then + kernel_cc_version=`echo ${kernel_cc_string} | grep -o '[0-9]\+\.[0-9]\+' | head -n 1` + fi + kernel_cc_major=`echo ${kernel_cc_version} | cut -d '.' -f 1` + kernel_cc_minor=`echo ${kernel_cc_version} | cut -d '.' -f 2` + + echo " + #if (__GNUC__ != ${kernel_cc_major}) || (__GNUC_MINOR__ != ${kernel_cc_minor}) + #error \"cc version mismatch\" + #endif + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + exit 0; + else + # + # The gcc version check failed + # + + if [ "$VERBOSE" = "full_output" ]; then + echo ""; + echo "Warning: Compiler version check failed:"; + echo ""; + echo "The major and minor number of the compiler used to"; + echo "compile the kernel:"; + echo ""; + echo "${kernel_cc_string}"; + echo ""; + echo "does not match the compiler used here:"; + echo ""; + $CC --version + echo ""; + echo "It is recommended to set the CC environment variable"; + echo "to the compiler that was used to compile the kernel."; + echo "" + echo "To skip the test and silence this warning message, set"; + echo "the IGNORE_CC_MISMATCH environment variable to \"1\"."; + echo "However, mixing compiler versions between the kernel"; + echo "and kernel modules can result in subtle bugs that are"; + echo "difficult to diagnose."; + echo ""; + echo "*** Failed CC version check. ***"; + echo ""; + elif [ "$VERBOSE" = "just_msg" ]; then + echo "Warning: The kernel was built with ${kernel_cc_string}, but the" \ + "current compiler version is `$CC --version | head -n 1`."; + fi + exit 0; + fi + ;; + + xen_sanity_check) + # + # Check if the target kernel is a Xen kernel. If so, exit, since + # the RM doesn't currently support Xen. + # + VERBOSE=$6 + + if [ -n "$IGNORE_XEN_PRESENCE" -o -n "$VGX_BUILD" ]; then + exit 0 + fi + + test_xen + + if [ "$XEN_PRESENT" != "0" ]; then + echo "The kernel you are installing for is a Xen kernel!"; + echo ""; + echo "The NVIDIA driver does not currently support Xen kernels. If "; + echo "you are using a stock distribution kernel, please install "; + echo "a variant of this kernel without Xen support; if this is a "; + echo "custom kernel, please install a standard Linux kernel. Then "; + echo "try installing the NVIDIA kernel module again."; + echo ""; + if [ "$VERBOSE" = "full_output" ]; then + echo "*** Failed Xen sanity check. Bailing out! ***"; + echo ""; + fi + exit 1 + else + exit 0 + fi + ;; + + preempt_rt_sanity_check) + # + # Check if the target kernel has the PREEMPT_RT patch set applied. If + # so, exit, since the RM doesn't support this configuration. + # + VERBOSE=$6 + + if [ -n "$IGNORE_PREEMPT_RT_PRESENCE" ]; then + exit 0 + fi + + if test_configuration_option CONFIG_PREEMPT_RT; then + PREEMPT_RT_PRESENT=1 + elif test_configuration_option CONFIG_PREEMPT_RT_FULL; then + PREEMPT_RT_PRESENT=1 + fi + + if [ "$PREEMPT_RT_PRESENT" != "0" ]; then + echo "The kernel you are installing for is a PREEMPT_RT kernel!"; + echo ""; + echo "The NVIDIA driver does not support real-time kernels. If you "; + echo "are using a stock distribution kernel, please install "; + echo "a variant of this kernel that does not have the PREEMPT_RT "; + echo "patch set applied; if this is a custom kernel, please "; + echo "install a standard Linux kernel. Then try installing the "; + echo "NVIDIA kernel module again."; + echo ""; + if [ "$VERBOSE" = "full_output" ]; then + echo "*** Failed PREEMPT_RT sanity check. Bailing out! ***"; + echo ""; + fi + exit 1 + else + exit 0 + fi + ;; + + patch_check) + # + # Check for any "official" patches that may have been applied and + # construct a description table for reporting purposes. + # + PATCHES="" + + for PATCH in patch-*.h; do + if [ -f $PATCH ]; then + echo "#include \"$PATCH\"" + PATCHES="$PATCHES "`echo $PATCH | sed -s 's/patch-\(.*\)\.h/\1/'` + fi + done + + echo "static struct { + const char *short_description; + const char *description; + } __nv_patches[] = {" + for i in $PATCHES; do + echo "{ \"$i\", NV_PATCH_${i}_DESCRIPTION }," + done + echo "{ NULL, NULL } };" + + exit 0 + ;; + + compile_tests) + # + # Run a series of compile tests to determine the set of interfaces + # and features available in the target kernel. + # + shift 5 + + CFLAGS=$1 + shift + + for i in $*; do compile_test $i; done + + for file in conftest*.d; do + rm -f $file > /dev/null 2>&1 + done + + exit 0 + ;; + + dom0_sanity_check) + # + # Determine whether running in DOM0. + # + VERBOSE=$6 + + if [ -n "$VGX_BUILD" ]; then + if [ -f /proc/xen/capabilities ]; then + if [ "`cat /proc/xen/capabilities`" == "control_d" ]; then + exit 0 + fi + else + echo "The kernel is not running in DOM0."; + echo ""; + if [ "$VERBOSE" = "full_output" ]; then + echo "*** Failed DOM0 sanity check. Bailing out! ***"; + echo ""; + fi + fi + exit 1 + fi + ;; + vgpu_kvm_sanity_check) + # + # Determine whether we are running a vGPU on KVM host. + # + VERBOSE=$6 + iommu=CONFIG_VFIO_IOMMU_TYPE1 + mdev=CONFIG_VFIO_MDEV + kvm=CONFIG_KVM_VFIO + VFIO_IOMMU_PRESENT=0 + VFIO_MDEV_PRESENT=0 + KVM_PRESENT=0 + + if [ -n "$VGX_KVM_BUILD" ]; then + if (test_configuration_option ${iommu} || test_configuration_option ${iommu}_MODULE); then + VFIO_IOMMU_PRESENT=1 + fi + + if (test_configuration_option ${mdev} || test_configuration_option ${mdev}_MODULE); then + VFIO_MDEV_PRESENT=1 + fi + + if (test_configuration_option ${kvm} || test_configuration_option ${kvm}_MODULE); then + KVM_PRESENT=1 + fi + + if [ "$VFIO_IOMMU_PRESENT" != "0" ] && + [ "$VFIO_MDEV_PRESENT" != "0" ] && + [ "$KVM_PRESENT" != "0" ] ; then + exit 0 + else + echo "Below CONFIG options are missing on the kernel for installing"; + echo "NVIDIA vGPU driver on KVM host"; + if [ "$VFIO_IOMMU_PRESENT" = "0" ]; then + echo "CONFIG_VFIO_IOMMU_TYPE1"; + fi + + if [ "$VFIO_MDEV_PRESENT" = "0" ]; then + echo "CONFIG_VFIO_MDEV"; + fi + + if [ "$KVM_PRESENT" = "0" ]; then + echo "CONFIG_KVM"; + fi + echo "Please install the kernel with above CONFIG options set, then"; + echo "try installing again"; + echo ""; + + if [ "$VERBOSE" = "full_output" ]; then + echo "*** Failed vGPU on KVM sanity check. Bailing out! ***"; + echo ""; + fi + fi + exit 1 + else + exit 0 + fi + ;; + test_configuration_option) + # + # Check to see if the given config option is set. + # + OPTION=$6 + + test_configuration_option $OPTION + exit $? + ;; + + get_configuration_option) + # + # Get the value of the given config option. + # + OPTION=$6 + + get_configuration_option $OPTION + exit $? + ;; + + + guess_module_signing_hash) + # + # Determine the best cryptographic hash to use for module signing, + # to the extent that is possible. + # + + HASH=$(get_configuration_option CONFIG_MODULE_SIG_HASH) + + if [ $? -eq 0 ] && [ -n $HASH ]; then + echo $HASH + exit 0 + else + for SHA in 512 384 256 224 1; do + if test_configuration_option CONFIG_MODULE_SIG_SHA$SHA; then + echo sha$SHA + exit 0 + fi + done + fi + exit 1 + ;; + + + test_kernel_headers) + # + # Check for the availability of certain kernel headers + # + + CFLAGS=$6 + + test_headers + + for file in conftest*.d; do + rm -f $file > /dev/null 2>&1 + done + + exit $? + ;; + + + build_cflags) + # + # Generate CFLAGS for use in the compile tests + # + + build_cflags + echo $CFLAGS + exit 0 + ;; + + module_symvers_sanity_check) + # + # Check whether Module.symvers exists and contains at least one + # EXPORT_SYMBOL* symbol from vmlinux + # + + if [ -n "$IGNORE_MISSING_MODULE_SYMVERS" ]; then + exit 0 + fi + + TAB=' ' + + if [ -f "$OUTPUT/Module.symvers" ] && \ + grep -e "^[^${TAB}]*${TAB}[^${TAB}]*${TAB}\+vmlinux" \ + "$OUTPUT/Module.symvers" >/dev/null 2>&1; then + exit 0 + fi + + echo "The Module.symvers file is missing, or does not contain any" + echo "symbols exported from the kernel. This could cause the NVIDIA" + echo "kernel modules to be built against a configuration that does" + echo "not accurately reflect the actual target kernel." + echo "The Module.symvers file check can be disabled by setting the" + echo "environment variable IGNORE_MISSING_MODULE_SYMVERS to 1." + + exit 1 + ;; +esac diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/count-lines.mk b/NVIDIA-kernel-module-source-TempVersion/kernel-open/count-lines.mk new file mode 100644 index 0000000..397db0a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/count-lines.mk @@ -0,0 +1,25 @@ +count: + @echo "conftests:$(words $(ALL_CONFTESTS))" \ + "objects:$(words $(NV_OBJECTS_DEPEND_ON_CONFTEST))" \ + "modules:$(words $(NV_KERNEL_MODULES))" + +.PHONY: count + +# Include the top-level makefile to get $(NV_KERNEL_MODULES) +include Makefile + +# Set $(src) for the to-be-included nvidia*.Kbuild files +src := $(CURDIR) + +# Include nvidia*.Kbuild and append the nvidia*-y objects to ALL_OBJECTS +$(foreach _module, $(NV_KERNEL_MODULES), \ + $(eval include $(_module)/$(_module).Kbuild) \ + ) + +# Concatenate all of the conftest lists; use $(sort ) to remove duplicates +ALL_CONFTESTS := $(sort $(NV_CONFTEST_FUNCTION_COMPILE_TESTS) \ + $(NV_CONFTEST_GENERIC_COMPILE_TESTS) \ + $(NV_CONFTEST_MACRO_COMPILE_TESTS) \ + $(NV_CONFTEST_SYMBOL_COMPILE_TESTS) \ + $(NV_CONFTEST_TYPE_COMPILE_TESTS) \ + ) diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/dkms.conf b/NVIDIA-kernel-module-source-TempVersion/kernel-open/dkms.conf new file mode 100644 index 0000000..aef54d3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/dkms.conf @@ -0,0 +1,12 @@ +PACKAGE_NAME="nvidia" +PACKAGE_VERSION="__VERSION_STRING" +AUTOINSTALL="yes" + +# By default, DKMS will add KERNELRELEASE to the make command line; however, +# this will cause the kernel module build to infer that it was invoked via +# Kbuild directly instead of DKMS. The dkms(8) manual page recommends quoting +# the 'make' command name to suppress this behavior. +MAKE[0]="'make' -j__JOBS NV_EXCLUDE_BUILD_MODULES='__EXCLUDE_MODULES' KERNEL_UNAME=${kernelver} modules" + +# The list of kernel modules will be generated by nvidia-installer at runtime. +__DKMS_MODULES diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.c new file mode 100644 index 0000000..e349473 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.c @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "nv-pci-table.h" + +/* Devices supported by RM */ +struct pci_device_id nv_pci_table[] = { + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_3D << 8), + .class_mask = ~0 + }, + { } +}; + +/* Devices supported by all drivers in nvidia.ko */ +struct pci_device_id nv_module_device_table[] = { + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_3D << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_BRIDGE_OTHER << 8), + .class_mask = ~0 + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, nv_module_device_table); diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.h new file mode 100644 index 0000000..b28483b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PCI_TABLE_H_ +#define _NV_PCI_TABLE_H_ + +#include + +extern struct pci_device_id nv_pci_table[]; + +#endif /* _NV_PCI_TABLE_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-fence-helper.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-fence-helper.h new file mode 100644 index 0000000..a09ab76 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-fence-helper.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DMA_FENCE_HELPER_H__ +#define __NVIDIA_DMA_FENCE_HELPER_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_FENCE_AVAILABLE) + +/* + * Fence headers are moved to file dma-fence.h and struct fence has + * been renamed to dma_fence by commit - + * + * 2016-10-25 : f54d1867005c3323f5d8ad83eed823e84226c429 + */ + +#if defined(NV_LINUX_FENCE_H_PRESENT) +#include +#else +#include +#endif + +#if defined(NV_LINUX_FENCE_H_PRESENT) +typedef struct fence nv_dma_fence_t; +typedef struct fence_ops nv_dma_fence_ops_t; +#else +typedef struct dma_fence nv_dma_fence_t; +typedef struct dma_fence_ops nv_dma_fence_ops_t; +#endif + +#if defined(NV_LINUX_FENCE_H_PRESENT) +#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT +#else +#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT +#endif + +static inline bool nv_dma_fence_is_signaled(nv_dma_fence_t *fence) { +#if defined(NV_LINUX_FENCE_H_PRESENT) + return fence_is_signaled(fence); +#else + return dma_fence_is_signaled(fence); +#endif +} + +static inline nv_dma_fence_t *nv_dma_fence_get(nv_dma_fence_t *fence) +{ +#if defined(NV_LINUX_FENCE_H_PRESENT) + return fence_get(fence); +#else + return dma_fence_get(fence); +#endif +} + +static inline void nv_dma_fence_put(nv_dma_fence_t *fence) { +#if defined(NV_LINUX_FENCE_H_PRESENT) + fence_put(fence); +#else + dma_fence_put(fence); +#endif +} + +static inline signed long +nv_dma_fence_default_wait(nv_dma_fence_t *fence, + bool intr, signed long timeout) { +#if defined(NV_LINUX_FENCE_H_PRESENT) + return fence_default_wait(fence, intr, timeout); +#else + return dma_fence_default_wait(fence, intr, timeout); +#endif +} + +static inline int nv_dma_fence_signal(nv_dma_fence_t *fence) { +#if defined(NV_LINUX_FENCE_H_PRESENT) + return fence_signal(fence); +#else + return dma_fence_signal(fence); +#endif +} + +static inline u64 nv_dma_fence_context_alloc(unsigned num) { +#if defined(NV_LINUX_FENCE_H_PRESENT) + return fence_context_alloc(num); +#else + return dma_fence_context_alloc(num); +#endif +} + +static inline void +nv_dma_fence_init(nv_dma_fence_t *fence, + const nv_dma_fence_ops_t *ops, + spinlock_t *lock, u64 context, unsigned seqno) { +#if defined(NV_LINUX_FENCE_H_PRESENT) + fence_init(fence, ops, lock, context, seqno); +#else + dma_fence_init(fence, ops, lock, context, seqno); +#endif +} + +#endif /* defined(NV_DRM_FENCE_AVAILABLE) */ + +#endif /* __NVIDIA_DMA_FENCE_HELPER_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h new file mode 100644 index 0000000..b520b26 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DMA_RESV_HELPER_H__ +#define __NVIDIA_DMA_RESV_HELPER_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_FENCE_AVAILABLE) + +/* + * linux/reservation.h is renamed to linux/dma-resv.h, by commit + * 52791eeec1d9 (dma-buf: rename reservation_object to dma_resv) + * in v5.4. + */ + +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) +#include +#else +#include +#endif + +#include + +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) +typedef struct dma_resv nv_dma_resv_t; +#else +typedef struct reservation_object nv_dma_resv_t; +#endif + +static inline void nv_dma_resv_init(nv_dma_resv_t *obj) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) + dma_resv_init(obj); +#else + reservation_object_init(obj); +#endif +} + +static inline void nv_dma_resv_fini(nv_dma_resv_t *obj) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) + dma_resv_fini(obj); +#else + reservation_object_init(obj); +#endif +} + +static inline void nv_dma_resv_lock(nv_dma_resv_t *obj, + struct ww_acquire_ctx *ctx) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) + dma_resv_lock(obj, ctx); +#else + ww_mutex_lock(&obj->lock, ctx); +#endif +} + +static inline void nv_dma_resv_unlock(nv_dma_resv_t *obj) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) + dma_resv_unlock(obj); +#else + ww_mutex_unlock(&obj->lock); +#endif +} + +static inline int nv_dma_resv_reserve_fences(nv_dma_resv_t *obj, + unsigned int num_fences, + NvBool shared) +{ +#if defined(NV_DMA_RESV_RESERVE_FENCES_PRESENT) + return dma_resv_reserve_fences(obj, num_fences); +#else + if (shared) { +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) + return dma_resv_reserve_shared(obj, num_fences); +#elif defined(NV_RESERVATION_OBJECT_RESERVE_SHARED_HAS_NUM_FENCES_ARG) + return reservation_object_reserve_shared(obj, num_fences); +#else + unsigned int i; + for (i = 0; i < num_fences; i++) { + reservation_object_reserve_shared(obj); + } +#endif + } + return 0; +#endif +} + +static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj, + nv_dma_fence_t *fence) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) +#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT) + dma_resv_add_fence(obj, fence, DMA_RESV_USAGE_WRITE); +#else + dma_resv_add_excl_fence(obj, fence); +#endif +#else + reservation_object_add_excl_fence(obj, fence); +#endif +} + +#endif /* defined(NV_DRM_FENCE_AVAILABLE) */ + +#endif /* __NVIDIA_DMA_RESV_HELPER_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-conftest.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-conftest.h new file mode 100644 index 0000000..bed8d81 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-conftest.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_CONFTEST_H__ +#define __NVIDIA_DRM_CONFTEST_H__ + +#include "conftest.h" + +/* + * NOTE: This file is expected to get included at the top before including any + * of linux/drm headers. + * + * The goal is to redefine refcount_dec_and_test and refcount_inc before + * including drm header files, so that the drm macro/inline calls to + * refcount_dec_and_test* and refcount_inc get redirected to + * alternate implementation in this file. + */ + +#if NV_IS_EXPORT_SYMBOL_GPL_refcount_inc + +#include + +#define refcount_inc(__ptr) \ + do { \ + atomic_inc(&(__ptr)->refs); \ + } while(0) + +#endif + +#if NV_IS_EXPORT_SYMBOL_GPL_refcount_dec_and_test + +#include + +#define refcount_dec_and_test(__ptr) atomic_dec_and_test(&(__ptr)->refs) + +#endif + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) || \ + defined(NV_DRM_GEM_OBJECT_HAS_RESV) +#define NV_DRM_FENCE_AVAILABLE +#else +#undef NV_DRM_FENCE_AVAILABLE +#endif + +#endif /* defined(__NVIDIA_DRM_CONFTEST_H__) */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.c new file mode 100644 index 0000000..fe838ef --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.c @@ -0,0 +1,472 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-helper.h" +#include "nvidia-drm-priv.h" +#include "nvidia-drm-connector.h" +#include "nvidia-drm-utils.h" +#include "nvidia-drm-encoder.h" + +/* + * Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h") + * moves a number of helper function definitions from + * drm/drm_crtc_helper.h to a new drm_probe_helper.h. + */ +#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT) +#include +#endif +#include + +#include +#include +#include + +static void nv_drm_connector_destroy(struct drm_connector *connector) +{ + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + drm_connector_unregister(connector); + + drm_connector_cleanup(connector); + + if (nv_connector->edid != NULL) { + nv_drm_free(nv_connector->edid); + } + + nv_drm_free(nv_connector); +} + +static bool +__nv_drm_detect_encoder(struct NvKmsKapiDynamicDisplayParams *pDetectParams, + struct drm_connector *connector, + struct drm_encoder *encoder) +{ + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + struct drm_device *dev = connector->dev; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_encoder *nv_encoder; + + /* + * DVI-I connectors can drive both digital and analog + * encoders. If a digital connection has been forced then + * skip analog encoders. + */ + + if (connector->connector_type == DRM_MODE_CONNECTOR_DVII && + connector->force == DRM_FORCE_ON_DIGITAL && + encoder->encoder_type == DRM_MODE_ENCODER_DAC) { + return false; + } + + nv_encoder = to_nv_encoder(encoder); + + memset(pDetectParams, 0, sizeof(*pDetectParams)); + + pDetectParams->handle = nv_encoder->hDisplay; + + switch (connector->force) { + case DRM_FORCE_ON: + case DRM_FORCE_ON_DIGITAL: + pDetectParams->forceConnected = NV_TRUE; + break; + case DRM_FORCE_OFF: + pDetectParams->forceDisconnected = NV_TRUE; + break; + case DRM_FORCE_UNSPECIFIED: + break; + } + +#if defined(NV_DRM_CONNECTOR_HAS_OVERRIDE_EDID) + if (connector->override_edid) { +#else + if (drm_edid_override_connector_update(connector) > 0) { +#endif + const struct drm_property_blob *edid = connector->edid_blob_ptr; + + if (edid->length <= sizeof(pDetectParams->edid.buffer)) { + memcpy(pDetectParams->edid.buffer, edid->data, edid->length); + pDetectParams->edid.bufferSize = edid->length; + pDetectParams->overrideEdid = NV_TRUE; + } else { + WARN_ON(edid->length > + sizeof(pDetectParams->edid.buffer)); + } + } + + if (!nvKms->getDynamicDisplayInfo(nv_dev->pDevice, pDetectParams)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to detect display state"); + return false; + } + + if (pDetectParams->connected) { + if (!pDetectParams->overrideEdid && pDetectParams->edid.bufferSize) { + + if ((nv_connector->edid = nv_drm_calloc( + 1, + pDetectParams->edid.bufferSize)) != NULL) { + + memcpy(nv_connector->edid, + pDetectParams->edid.buffer, + pDetectParams->edid.bufferSize); + } else { + NV_DRM_LOG_ERR("Out of Memory"); + } + } + + return true; + } + + return false; +} + +static enum drm_connector_status __nv_drm_connector_detect_internal( + struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + enum drm_connector_status status = connector_status_disconnected; + + struct drm_encoder *detected_encoder = NULL; + struct nv_drm_encoder *nv_detected_encoder = NULL; + struct drm_encoder *encoder; + + struct NvKmsKapiDynamicDisplayParams *pDetectParams = NULL; + + BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); + + if (nv_connector->edid != NULL) { + nv_drm_free(nv_connector->edid); + nv_connector->edid = NULL; + } + + if ((pDetectParams = nv_drm_calloc( + 1, + sizeof(*pDetectParams))) == NULL) { + WARN_ON(pDetectParams == NULL); + goto done; + } + + nv_drm_connector_for_each_possible_encoder(connector, encoder) { + if (__nv_drm_detect_encoder(pDetectParams, connector, encoder)) { + detected_encoder = encoder; + break; + } + } nv_drm_connector_for_each_possible_encoder_end; + + if (detected_encoder == NULL) { + goto done; + } + + nv_detected_encoder = to_nv_encoder(detected_encoder); + + status = connector_status_connected; + + nv_connector->nv_detected_encoder = nv_detected_encoder; + + if (nv_connector->type == NVKMS_CONNECTOR_TYPE_DVI_I) { + drm_object_property_set_value( + &connector->base, + dev->mode_config.dvi_i_subconnector_property, + detected_encoder->encoder_type == DRM_MODE_ENCODER_DAC ? + DRM_MODE_SUBCONNECTOR_DVIA : + DRM_MODE_SUBCONNECTOR_DVID); + } + +done: + + nv_drm_free(pDetectParams); + + return status; +} + +static void __nv_drm_connector_force(struct drm_connector *connector) +{ + __nv_drm_connector_detect_internal(connector); +} + +static enum drm_connector_status +nv_drm_connector_detect(struct drm_connector *connector, bool force) +{ + return __nv_drm_connector_detect_internal(connector); +} + +static struct drm_connector_funcs nv_connector_funcs = { +#if defined NV_DRM_ATOMIC_HELPER_CONNECTOR_DPMS_PRESENT + .dpms = drm_atomic_helper_connector_dpms, +#endif + .destroy = nv_drm_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .force = __nv_drm_connector_force, + .detect = nv_drm_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int nv_drm_connector_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + struct nv_drm_encoder *nv_detected_encoder = + nv_connector->nv_detected_encoder; + NvU32 modeIndex = 0; + int count = 0; + + + if (nv_connector->edid != NULL) { + nv_drm_connector_update_edid_property(connector, nv_connector->edid); + } + + while (1) { + struct drm_display_mode *mode; + struct NvKmsKapiDisplayMode displayMode; + NvBool valid = 0; + NvBool preferredMode = NV_FALSE; + int ret; + + ret = nvKms->getDisplayMode(nv_dev->pDevice, + nv_detected_encoder->hDisplay, + modeIndex++, &displayMode, &valid, + &preferredMode); + + if (ret < 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to get mode at modeIndex %d of NvKmsKapiDisplay 0x%08x", + modeIndex, nv_detected_encoder->hDisplay); + break; + } + + /* Is end of mode-list */ + + if (ret == 0) { + break; + } + + /* Ignore invalid modes */ + + if (!valid) { + continue; + } + + mode = drm_mode_create(connector->dev); + + if (mode == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to create mode for NvKmsKapiDisplay 0x%08x", + nv_detected_encoder->hDisplay); + continue; + } + + nvkms_display_mode_to_drm_mode(&displayMode, mode); + + if (preferredMode) { + mode->type |= DRM_MODE_TYPE_PREFERRED; + } + + /* Add a mode to a connector's probed_mode list */ + + drm_mode_probed_add(connector, mode); + + count++; + } + + return count; +} + +static int nv_drm_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct drm_device *dev = connector->dev; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_encoder *nv_detected_encoder = + to_nv_connector(connector)->nv_detected_encoder; + struct NvKmsKapiDisplayMode displayMode; + + if (nv_detected_encoder == NULL) { + return MODE_BAD; + } + + drm_mode_to_nvkms_display_mode(mode, &displayMode); + + if (!nvKms->validateDisplayMode(nv_dev->pDevice, + nv_detected_encoder->hDisplay, + &displayMode)) { + return MODE_BAD; + } + + return MODE_OK; +} + +static struct drm_encoder* +nv_drm_connector_best_encoder(struct drm_connector *connector) +{ + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + if (nv_connector->nv_detected_encoder != NULL) { + return &nv_connector->nv_detected_encoder->base; + } + + return NULL; +} + +static const struct drm_connector_helper_funcs nv_connector_helper_funcs = { + .get_modes = nv_drm_connector_get_modes, + .mode_valid = nv_drm_connector_mode_valid, + .best_encoder = nv_drm_connector_best_encoder, +}; + +static struct drm_connector* +nv_drm_connector_new(struct drm_device *dev, + NvU32 physicalIndex, NvKmsConnectorType type, + NvBool internal, + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_connector *nv_connector = NULL; + int ret = -ENOMEM; + + if ((nv_connector = nv_drm_calloc(1, sizeof(*nv_connector))) == NULL) { + goto failed; + } + + if ((nv_connector->base.state = + nv_drm_calloc(1, sizeof(*nv_connector->base.state))) == NULL) { + goto failed_state_alloc; + } + nv_connector->base.state->connector = &nv_connector->base; + + nv_connector->physicalIndex = physicalIndex; + nv_connector->type = type; + nv_connector->internal = internal; + + strcpy(nv_connector->dpAddress, dpAddress); + + ret = drm_connector_init( + dev, + &nv_connector->base, &nv_connector_funcs, + nvkms_connector_type_to_drm_connector_type(type, internal)); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to initialize connector created from physical index %u", + nv_connector->physicalIndex); + goto failed_connector_init; + } + + drm_connector_helper_add(&nv_connector->base, &nv_connector_helper_funcs); + + nv_connector->base.polled = DRM_CONNECTOR_POLL_HPD; + + if (nv_connector->type == NVKMS_CONNECTOR_TYPE_VGA) { + nv_connector->base.polled = + DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + } + + /* Register connector with DRM subsystem */ + + ret = drm_connector_register(&nv_connector->base); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to register connector created from physical index %u", + nv_connector->physicalIndex); + goto failed_connector_register; + } + + return &nv_connector->base; + +failed_connector_register: + drm_connector_cleanup(&nv_connector->base); + +failed_connector_init: + nv_drm_free(nv_connector->base.state); + +failed_state_alloc: + nv_drm_free(nv_connector); + +failed: + return ERR_PTR(ret); +} + +/* + * Get connector with given physical index one exists. Otherwise, create and + * return a new connector. + */ +struct drm_connector* +nv_drm_get_connector(struct drm_device *dev, + NvU32 physicalIndex, NvKmsConnectorType type, + NvBool internal, + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]) +{ + struct drm_connector *connector = NULL; +#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT) + struct drm_connector_list_iter conn_iter; + nv_drm_connector_list_iter_begin(dev, &conn_iter); +#else + struct drm_mode_config *config = &dev->mode_config; + mutex_lock(&config->mutex); +#endif + + /* Lookup for existing connector with same physical index */ + nv_drm_for_each_connector(connector, &conn_iter, dev) { + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + if (nv_connector->physicalIndex == physicalIndex) { + BUG_ON(nv_connector->type != type || + nv_connector->internal != internal); + + if (strcmp(nv_connector->dpAddress, dpAddress) == 0) { + goto done; + } + } + } + connector = NULL; + +done: +#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT) + nv_drm_connector_list_iter_end(&conn_iter); +#else + mutex_unlock(&config->mutex); +#endif + + if (!connector) { + connector = nv_drm_connector_new(dev, + physicalIndex, type, internal, + dpAddress); + } + + return connector; +} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.h new file mode 100644 index 0000000..fd83d7a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_CONNECTOR_H__ +#define __NVIDIA_DRM_CONNECTOR_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT) +#include +#endif + +#include "nvtypes.h" +#include "nvkms-api-types.h" + +struct nv_drm_connector { + NvU32 physicalIndex; + + NvBool internal; + NvKmsConnectorType type; + + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]; + + struct nv_drm_encoder *nv_detected_encoder; + struct edid *edid; + + atomic_t connection_status_dirty; + + struct drm_connector base; +}; + +static inline struct nv_drm_connector *to_nv_connector( + struct drm_connector *connector) +{ + if (connector == NULL) { + return NULL; + } + return container_of(connector, struct nv_drm_connector, base); +} + +static inline void nv_drm_connector_mark_connection_status_dirty( + struct nv_drm_connector *nv_connector) +{ + atomic_cmpxchg(&nv_connector->connection_status_dirty, false, true); +} + +static inline bool nv_drm_connector_check_connection_status_dirty_and_clear( + struct nv_drm_connector *nv_connector) +{ + return atomic_cmpxchg( + &nv_connector->connection_status_dirty, + true, + false) == true; +} + +struct drm_connector* +nv_drm_get_connector(struct drm_device *dev, + NvU32 physicalIndex, NvKmsConnectorType type, + NvBool internal, + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_CONNECTOR_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.c new file mode 100644 index 0000000..e8719f5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.c @@ -0,0 +1,1415 @@ +/* + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-helper.h" +#include "nvidia-drm-priv.h" +#include "nvidia-drm-crtc.h" +#include "nvidia-drm-connector.h" +#include "nvidia-drm-encoder.h" +#include "nvidia-drm-utils.h" +#include "nvidia-drm-fb.h" +#include "nvidia-drm-ioctl.h" +#include "nvidia-drm-format.h" + +#include "nvmisc.h" + +#include +#include + +#include +#include + +#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST) +#include +#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) +static int +nv_drm_atomic_replace_property_blob_from_id(struct drm_device *dev, + struct drm_property_blob **blob, + uint64_t blob_id, + ssize_t expected_size) +{ + struct drm_property_blob *new_blob = NULL; + + if (blob_id != 0) { + new_blob = drm_property_lookup_blob(dev, blob_id); + if (new_blob == NULL) { + return -EINVAL; + } + + if ((expected_size > 0) && + (new_blob->length != expected_size)) { + drm_property_blob_put(new_blob); + return -EINVAL; + } + } + + drm_property_replace_blob(blob, new_blob); + drm_property_blob_put(new_blob); + + return 0; +} + +static bool nv_drm_plane_atomic_hdr_metadata_equal(struct nv_drm_plane_state *old_state, + struct nv_drm_plane_state *new_state) +{ + struct drm_property_blob *old_blob = old_state->hdr_output_metadata; + struct drm_property_blob *new_blob = new_state->hdr_output_metadata; + + if (!old_blob || !new_blob) { + return old_blob == new_blob; + } + + if (old_blob->length != new_blob->length) { + return false; + } + + return !memcmp(old_blob->data, new_blob->data, old_blob->length); +} +#endif + +static void nv_drm_plane_destroy(struct drm_plane *plane) +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + + /* plane->state gets freed here */ + drm_plane_cleanup(plane); + + nv_drm_free(nv_plane); +} + +static inline void +plane_req_config_disable(struct NvKmsKapiLayerRequestedConfig *req_config) +{ + /* Clear layer config */ + memset(&req_config->config, 0, sizeof(req_config->config)); + + /* Set flags to get cleared layer config applied */ + req_config->flags.surfaceChanged = NV_TRUE; + req_config->flags.srcXYChanged = NV_TRUE; + req_config->flags.srcWHChanged = NV_TRUE; + req_config->flags.dstXYChanged = NV_TRUE; + req_config->flags.dstWHChanged = NV_TRUE; +} + +static inline void +cursor_req_config_disable(struct NvKmsKapiCursorRequestedConfig *req_config) +{ + req_config->surface = NULL; + req_config->flags.surfaceChanged = NV_TRUE; +} + +static void +cursor_plane_req_config_update(struct drm_plane *plane, + struct drm_plane_state *plane_state, + struct NvKmsKapiCursorRequestedConfig *req_config) +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + struct NvKmsKapiCursorRequestedConfig old_config = *req_config; + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + struct nv_drm_plane_state *nv_drm_plane_state = + to_nv_drm_plane_state(plane_state); + + if (plane_state->fb == NULL) { + cursor_req_config_disable(req_config); + return; + } + + *req_config = (struct NvKmsKapiCursorRequestedConfig) { + .surface = to_nv_framebuffer(plane_state->fb)->pSurface, + + .dstX = plane_state->crtc_x, + .dstY = plane_state->crtc_y, + }; + +#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) + if (plane->blend_mode_property != NULL && plane->alpha_property != NULL) { + + switch (plane_state->pixel_blend_mode) { + case DRM_MODE_BLEND_PREMULTI: + req_config->compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA; + break; + case DRM_MODE_BLEND_COVERAGE: + req_config->compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA; + break; + default: + /* + * We should not hit this, because + * plane_state->pixel_blend_mode should only have values + * registered in + * __nv_drm_plane_create_alpha_blending_properties(). + */ + WARN_ON("Unsupported blending mode"); + break; + + } + + req_config->compParams.surfaceAlpha = + plane_state->alpha >> 8; + + } else if (plane->blend_mode_property != NULL) { + + switch (plane_state->pixel_blend_mode) { + case DRM_MODE_BLEND_PREMULTI: + req_config->compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA; + break; + case DRM_MODE_BLEND_COVERAGE: + req_config->compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA; + break; + default: + /* + * We should not hit this, because + * plane_state->pixel_blend_mode should only have values + * registered in + * __nv_drm_plane_create_alpha_blending_properties(). + */ + WARN_ON("Unsupported blending mode"); + break; + + } + + } else { + req_config->compParams.compMode = + nv_plane->defaultCompositionMode; + } +#else + req_config->compParams.compMode = nv_plane->defaultCompositionMode; +#endif + + /* + * Unconditionally mark the surface as changed, even if nothing changed, + * so that we always get a flip event: a DRM client may flip with + * the same surface and wait for a flip event. + */ + req_config->flags.surfaceChanged = NV_TRUE; + + if (old_config.surface == NULL && + old_config.surface != req_config->surface) { + req_config->flags.dstXYChanged = NV_TRUE; + return; + } + + req_config->flags.dstXYChanged = + old_config.dstX != req_config->dstX || + old_config.dstY != req_config->dstY; +} + +static int +plane_req_config_update(struct drm_plane *plane, + struct drm_plane_state *plane_state, + struct NvKmsKapiLayerRequestedConfig *req_config) +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + struct NvKmsKapiLayerConfig old_config = req_config->config; + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + struct nv_drm_plane_state *nv_drm_plane_state = + to_nv_drm_plane_state(plane_state); + int ret = 0; + + if (plane_state->fb == NULL) { + plane_req_config_disable(req_config); + return 0; + } + + *req_config = (struct NvKmsKapiLayerRequestedConfig) { + .config = { + .surface = to_nv_framebuffer(plane_state->fb)->pSurface, + + /* Source values are 16.16 fixed point */ + .srcX = plane_state->src_x >> 16, + .srcY = plane_state->src_y >> 16, + .srcWidth = plane_state->src_w >> 16, + .srcHeight = plane_state->src_h >> 16, + + .dstX = plane_state->crtc_x, + .dstY = plane_state->crtc_y, + .dstWidth = plane_state->crtc_w, + .dstHeight = plane_state->crtc_h, + }, + }; + +#if defined(NV_DRM_ROTATION_AVAILABLE) + /* + * plane_state->rotation is only valid when plane->rotation_property + * is non-NULL. + */ + if (plane->rotation_property != NULL) { + if (plane_state->rotation & DRM_MODE_REFLECT_X) { + req_config->config.rrParams.reflectionX = true; + } + + if (plane_state->rotation & DRM_MODE_REFLECT_Y) { + req_config->config.rrParams.reflectionY = true; + } + + switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { + case DRM_MODE_ROTATE_0: + req_config->config.rrParams.rotation = NVKMS_ROTATION_0; + break; + case DRM_MODE_ROTATE_90: + req_config->config.rrParams.rotation = NVKMS_ROTATION_90; + break; + case DRM_MODE_ROTATE_180: + req_config->config.rrParams.rotation = NVKMS_ROTATION_180; + break; + case DRM_MODE_ROTATE_270: + req_config->config.rrParams.rotation = NVKMS_ROTATION_270; + break; + default: + /* + * We should not hit this, because + * plane_state->rotation should only have values + * registered in + * __nv_drm_plane_create_rotation_property(). + */ + WARN_ON("Unsupported rotation"); + break; + } + } +#endif + +#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) + if (plane->blend_mode_property != NULL && plane->alpha_property != NULL) { + + switch (plane_state->pixel_blend_mode) { + case DRM_MODE_BLEND_PREMULTI: + req_config->config.compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA; + break; + case DRM_MODE_BLEND_COVERAGE: + req_config->config.compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA; + break; + default: + /* + * We should not hit this, because + * plane_state->pixel_blend_mode should only have values + * registered in + * __nv_drm_plane_create_alpha_blending_properties(). + */ + WARN_ON("Unsupported blending mode"); + break; + + } + + req_config->config.compParams.surfaceAlpha = + plane_state->alpha >> 8; + + } else if (plane->blend_mode_property != NULL) { + + switch (plane_state->pixel_blend_mode) { + case DRM_MODE_BLEND_PREMULTI: + req_config->config.compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA; + break; + case DRM_MODE_BLEND_COVERAGE: + req_config->config.compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA; + break; + default: + /* + * We should not hit this, because + * plane_state->pixel_blend_mode should only have values + * registered in + * __nv_drm_plane_create_alpha_blending_properties(). + */ + WARN_ON("Unsupported blending mode"); + break; + + } + + } else { + req_config->config.compParams.compMode = + nv_plane->defaultCompositionMode; + } +#else + req_config->config.compParams.compMode = + nv_plane->defaultCompositionMode; +#endif + + req_config->config.inputColorSpace = + nv_drm_plane_state->input_colorspace; + + req_config->config.syncptParams.preSyncptSpecified = false; + req_config->config.syncptParams.postSyncptRequested = false; + + if (plane_state->fence != NULL || nv_drm_plane_state->fd_user_ptr) { + if (!nv_dev->supportsSyncpts) { + return -1; + } + +#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST) +#if defined(NV_NVHOST_DMA_FENCE_UNPACK_PRESENT) + if (plane_state->fence != NULL) { + ret = nvhost_dma_fence_unpack( + plane_state->fence, + &req_config->config.syncptParams.preSyncptId, + &req_config->config.syncptParams.preSyncptValue); + if (ret != 0) { + return ret; + } + req_config->config.syncptParams.preSyncptSpecified = true; + } +#endif + + if (nv_drm_plane_state->fd_user_ptr) { + req_config->config.syncptParams.postSyncptRequested = true; + } +#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT) + if (plane_state->fence != NULL) { + ret = host1x_fence_extract( + plane_state->fence, + &req_config->config.syncptParams.preSyncptId, + &req_config->config.syncptParams.preSyncptValue); + if (ret != 0) { + return ret; + } + req_config->config.syncptParams.preSyncptSpecified = true; + } + + if (nv_drm_plane_state->fd_user_ptr) { + req_config->config.syncptParams.postSyncptRequested = true; + } +#else + return -1; +#endif + } + +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + if (!nv_drm_plane_atomic_hdr_metadata_equal( + to_nv_drm_plane_state(plane->state), + nv_drm_plane_state)) { + if (nv_drm_plane_state->hdr_output_metadata) { + struct hdr_output_metadata *hdr_metadata = + nv_drm_plane_state->hdr_output_metadata->data; + struct hdr_metadata_infoframe *info_frame = + &hdr_metadata->hdmi_metadata_type1; + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + uint32_t i; + + if (hdr_metadata->metadata_type != HDMI_STATIC_METADATA_TYPE1) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported Metadata Type"); + return -1; + } + + for (i = 0; i < ARRAY_SIZE(info_frame->display_primaries); i ++) { + req_config->config.hdrMetadata.displayPrimaries[i].x = + info_frame->display_primaries[i].x; + req_config->config.hdrMetadata.displayPrimaries[i].y = + info_frame->display_primaries[i].y; + } + + req_config->config.hdrMetadata.whitePoint.x = + info_frame->white_point.x; + req_config->config.hdrMetadata.whitePoint.y = + info_frame->white_point.y; + req_config->config.hdrMetadata.maxDisplayMasteringLuminance = + info_frame->max_display_mastering_luminance; + req_config->config.hdrMetadata.minDisplayMasteringLuminance = + info_frame->min_display_mastering_luminance; + req_config->config.hdrMetadata.maxCLL = + info_frame->max_cll; + req_config->config.hdrMetadata.maxFALL = + info_frame->max_fall; + + req_config->config.hdrMetadataSpecified = true; + + switch (info_frame->eotf) { + case HDMI_EOTF_SMPTE_ST2084: + req_config->config.tf = NVKMS_OUTPUT_TF_PQ; + break; + case HDMI_EOTF_TRADITIONAL_GAMMA_SDR: + req_config->config.tf = + NVKMS_OUTPUT_TF_TRADITIONAL_GAMMA_SDR; + break; + default: + NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported EOTF"); + return -1; + } + } else { + req_config->config.hdrMetadataSpecified = false; + req_config->config.tf = NVKMS_OUTPUT_TF_NONE; + } + } +#endif + + /* + * Unconditionally mark the surface as changed, even if nothing changed, + * so that we always get a flip event: a DRM client may flip with + * the same surface and wait for a flip event. + */ + req_config->flags.surfaceChanged = NV_TRUE; + + if (old_config.surface == NULL && + old_config.surface != req_config->config.surface) { + req_config->flags.srcXYChanged = NV_TRUE; + req_config->flags.srcWHChanged = NV_TRUE; + req_config->flags.dstXYChanged = NV_TRUE; + req_config->flags.dstWHChanged = NV_TRUE; + return 0; + } + + req_config->flags.srcXYChanged = + old_config.srcX != req_config->config.srcX || + old_config.srcY != req_config->config.srcY; + + req_config->flags.srcWHChanged = + old_config.srcWidth != req_config->config.srcWidth || + old_config.srcHeight != req_config->config.srcHeight; + + req_config->flags.dstXYChanged = + old_config.dstX != req_config->config.dstX || + old_config.dstY != req_config->config.dstY; + + req_config->flags.dstWHChanged = + old_config.dstWidth != req_config->config.dstWidth || + old_config.dstHeight != req_config->config.dstHeight; + + return 0; +} + +static bool __is_async_flip_requested(const struct drm_plane *plane, + const struct drm_crtc_state *crtc_state) +{ + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { +#if defined(NV_DRM_CRTC_STATE_HAS_ASYNC_FLIP) + return crtc_state->async_flip; +#elif defined(NV_DRM_CRTC_STATE_HAS_PAGEFLIP_FLAGS) + return !!(crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC); +#endif + } + + return false; +} + +static int __nv_drm_cursor_atomic_check(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + + WARN_ON(nv_plane->layer_idx != NVKMS_KAPI_LAYER_INVALID_IDX); + + nv_drm_for_each_crtc_in_state(plane_state->state, crtc, crtc_state, i) { + struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state); + struct NvKmsKapiHeadRequestedConfig *head_req_config = + &nv_crtc_state->req_config; + struct NvKmsKapiCursorRequestedConfig *cursor_req_config = + &head_req_config->cursorRequestedConfig; + + if (plane->state->crtc == crtc && + plane->state->crtc != plane_state->crtc) { + cursor_req_config_disable(cursor_req_config); + continue; + } + + if (plane_state->crtc == crtc) { + cursor_plane_req_config_update(plane, plane_state, + cursor_req_config); + } + } + + return 0; +} + +#if defined(NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG) +static int nv_drm_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +#else +static int nv_drm_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *plane_state) +#endif +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); +#if defined(NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG) + struct drm_plane_state *plane_state = + drm_atomic_get_new_plane_state(state, plane); +#endif + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int ret; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + return __nv_drm_cursor_atomic_check(plane, plane_state); + } + + WARN_ON(nv_plane->layer_idx == NVKMS_KAPI_LAYER_INVALID_IDX); + + nv_drm_for_each_crtc_in_state(plane_state->state, crtc, crtc_state, i) { + struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state); + struct NvKmsKapiHeadRequestedConfig *head_req_config = + &nv_crtc_state->req_config; + struct NvKmsKapiLayerRequestedConfig *plane_requested_config = + &head_req_config->layerRequestedConfig[nv_plane->layer_idx]; + + if (plane->state->crtc == crtc && + plane->state->crtc != plane_state->crtc) { + plane_req_config_disable(plane_requested_config); + continue; + } + + if (plane_state->crtc == crtc) { + ret = plane_req_config_update(plane, + plane_state, + plane_requested_config); + if (ret != 0) { + return ret; + } + + if (__is_async_flip_requested(plane, crtc_state)) { + /* + * Async flip requests that the flip happen 'as soon as + * possible', meaning that it not delay waiting for vblank. + * This may cause tearing on the screen. + */ + plane_requested_config->config.minPresentInterval = 0; + plane_requested_config->config.tearing = NV_TRUE; + } else { + plane_requested_config->config.minPresentInterval = 1; + plane_requested_config->config.tearing = NV_FALSE; + } + } + } + + return 0; +} + +#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG) +static bool nv_drm_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) +{ + /* All supported modifiers are compatible with all supported formats */ + return true; +} +#endif + + +static int nv_drm_plane_atomic_set_property( + struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + struct nv_drm_plane_state *nv_drm_plane_state = + to_nv_drm_plane_state(state); + + if (property == nv_dev->nv_out_fence_property) { +#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST) + nv_drm_plane_state->fd_user_ptr = u64_to_user_ptr(val); +#endif + return 0; + } else if (property == nv_dev->nv_input_colorspace_property) { + nv_drm_plane_state->input_colorspace = val; + return 0; + } +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + else if (property == nv_dev->nv_hdr_output_metadata_property) { + return nv_drm_atomic_replace_property_blob_from_id( + nv_dev->dev, + &nv_drm_plane_state->hdr_output_metadata, + val, + sizeof(struct hdr_output_metadata)); + } +#endif + + return -EINVAL; +} + +static int nv_drm_plane_atomic_get_property( + struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + struct nv_drm_plane_state *nv_drm_plane_state = + to_nv_drm_plane_state(state); + + if (property == nv_dev->nv_out_fence_property) { + return 0; + } else if (property == nv_dev->nv_input_colorspace_property) { + *val = nv_drm_plane_state->input_colorspace; + return 0; + } +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + else if (property == nv_dev->nv_hdr_output_metadata_property) { + struct nv_drm_plane_state *nv_drm_plane_state = + to_nv_drm_plane_state(state); + *val = nv_drm_plane_state->hdr_output_metadata ? + nv_drm_plane_state->hdr_output_metadata->base.id : 0; + return 0; + } +#endif + + return -EINVAL; +} + +static struct drm_plane_state * +nv_drm_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct nv_drm_plane_state *nv_old_plane_state = + to_nv_drm_plane_state(plane->state); + struct nv_drm_plane_state *nv_plane_state = + nv_drm_calloc(1, sizeof(*nv_plane_state)); + + if (nv_plane_state == NULL) { + return NULL; + } + + __drm_atomic_helper_plane_duplicate_state(plane, &nv_plane_state->base); + + nv_plane_state->fd_user_ptr = nv_old_plane_state->fd_user_ptr; + nv_plane_state->input_colorspace = nv_old_plane_state->input_colorspace; + +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + nv_plane_state->hdr_output_metadata = nv_old_plane_state->hdr_output_metadata; + if (nv_plane_state->hdr_output_metadata) { + drm_property_blob_get(nv_plane_state->hdr_output_metadata); + } +#endif + + return &nv_plane_state->base; +} + +static inline void __nv_drm_plane_atomic_destroy_state( + struct drm_plane *plane, + struct drm_plane_state *state) +{ +#if defined(NV_DRM_ATOMIC_HELPER_PLANE_DESTROY_STATE_HAS_PLANE_ARG) + __drm_atomic_helper_plane_destroy_state(plane, state); +#else + __drm_atomic_helper_plane_destroy_state(state); +#endif + +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + struct nv_drm_plane_state *nv_drm_plane_state = + to_nv_drm_plane_state(state); + drm_property_blob_put(nv_drm_plane_state->hdr_output_metadata); +#endif +} + +static void nv_drm_plane_atomic_destroy_state( + struct drm_plane *plane, + struct drm_plane_state *state) +{ + __nv_drm_plane_atomic_destroy_state(plane, state); + + nv_drm_free(to_nv_drm_plane_state(state)); +} + +static const struct drm_plane_funcs nv_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = nv_drm_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_get_property = nv_drm_plane_atomic_get_property, + .atomic_set_property = nv_drm_plane_atomic_set_property, + .atomic_duplicate_state = nv_drm_plane_atomic_duplicate_state, + .atomic_destroy_state = nv_drm_plane_atomic_destroy_state, +#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG) + .format_mod_supported = nv_drm_plane_format_mod_supported, +#endif +}; + +static const struct drm_plane_helper_funcs nv_plane_helper_funcs = { + .atomic_check = nv_drm_plane_atomic_check, +}; + +static void nv_drm_crtc_destroy(struct drm_crtc *crtc) +{ + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + drm_crtc_cleanup(crtc); + + nv_drm_free(nv_crtc); +} + +static inline void +__nv_drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +{ +#if defined(NV_DRM_ATOMIC_HELPER_CRTC_DESTROY_STATE_HAS_CRTC_ARG) + __drm_atomic_helper_crtc_destroy_state(crtc, crtc_state); +#else + __drm_atomic_helper_crtc_destroy_state(crtc_state); +#endif +} + +static inline void nv_drm_crtc_duplicate_req_head_modeset_config( + const struct NvKmsKapiHeadRequestedConfig *old, + struct NvKmsKapiHeadRequestedConfig *new) +{ + uint32_t i; + + /* + * Do not duplicate fields like 'modeChanged' flags expressing delta changed + * in new configuration with respect to previous/old configuration because + * there is no change in new configuration yet with respect + * to older one! + */ + *new = (struct NvKmsKapiHeadRequestedConfig) { + .modeSetConfig = old->modeSetConfig, + }; + + for (i = 0; i < ARRAY_SIZE(old->layerRequestedConfig); i++) { + new->layerRequestedConfig[i] = (struct NvKmsKapiLayerRequestedConfig) { + .config = old->layerRequestedConfig[i].config, + }; + } +} + +/** + * nv_drm_atomic_crtc_duplicate_state - crtc state duplicate hook + * @crtc: DRM crtc + * + * Allocate and accosiate flip state with DRM crtc state, this flip state will + * be getting consumed at the time of atomic update commit to hardware by + * nv_drm_atomic_helper_commit_tail(). + */ +static struct drm_crtc_state* +nv_drm_atomic_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct nv_drm_crtc_state *nv_state = nv_drm_calloc(1, sizeof(*nv_state)); + + if (nv_state == NULL) { + return NULL; + } + + if ((nv_state->nv_flip = + nv_drm_calloc(1, sizeof(*(nv_state->nv_flip)))) == NULL) { + nv_drm_free(nv_state); + return NULL; + } + + __drm_atomic_helper_crtc_duplicate_state(crtc, &nv_state->base); + + INIT_LIST_HEAD(&nv_state->nv_flip->list_entry); + INIT_LIST_HEAD(&nv_state->nv_flip->deferred_flip_list); + + nv_drm_crtc_duplicate_req_head_modeset_config( + &(to_nv_crtc_state(crtc->state)->req_config), + &nv_state->req_config); + + return &nv_state->base; +} + +/** + * nv_drm_atomic_crtc_destroy_state - crtc state destroy hook + * @crtc: DRM crtc + * @state: DRM crtc state object to destroy + * + * Destroy flip state associated with the given crtc state if it haven't get + * consumed because failure of atomic commit. + */ +static void nv_drm_atomic_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct nv_drm_crtc_state *nv_state = to_nv_crtc_state(state); + + if (nv_state->nv_flip != NULL) { + nv_drm_free(nv_state->nv_flip); + nv_state->nv_flip = NULL; + } + + __nv_drm_atomic_helper_crtc_destroy_state(crtc, &nv_state->base); + + nv_drm_free(nv_state); +} + +static struct drm_crtc_funcs nv_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .destroy = nv_drm_crtc_destroy, + .atomic_duplicate_state = nv_drm_atomic_crtc_duplicate_state, + .atomic_destroy_state = nv_drm_atomic_crtc_destroy_state, +}; + +/* + * In kernel versions before the addition of + * drm_crtc_state::connectors_changed, connector changes were + * reflected in drm_crtc_state::mode_changed. + */ +static inline bool +nv_drm_crtc_state_connectors_changed(struct drm_crtc_state *crtc_state) +{ +#if defined(NV_DRM_CRTC_STATE_HAS_CONNECTORS_CHANGED) + return crtc_state->connectors_changed; +#else + return crtc_state->mode_changed; +#endif +} + +static int head_modeset_config_attach_connector( + struct nv_drm_connector *nv_connector, + struct NvKmsKapiHeadModeSetConfig *head_modeset_config) +{ + struct nv_drm_encoder *nv_encoder = nv_connector->nv_detected_encoder; + + if (NV_DRM_WARN(nv_encoder == NULL || + head_modeset_config->numDisplays >= + ARRAY_SIZE(head_modeset_config->displays))) { + return -EINVAL; + } + head_modeset_config->displays[head_modeset_config->numDisplays++] = + nv_encoder->hDisplay; + return 0; +} + +/** + * nv_drm_crtc_atomic_check() can fail after it has modified + * the 'nv_drm_crtc_state::req_config', that is fine because 'nv_drm_crtc_state' + * will be discarded if ->atomic_check() fails. + */ +#if defined(NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG) +static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +#else +static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +#endif +{ +#if defined(NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG) + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, crtc); +#endif + struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state); + struct NvKmsKapiHeadRequestedConfig *req_config = + &nv_crtc_state->req_config; + int ret = 0; + + if (crtc_state->mode_changed) { + drm_mode_to_nvkms_display_mode(&crtc_state->mode, + &req_config->modeSetConfig.mode); + req_config->flags.modeChanged = NV_TRUE; + } + + if (nv_drm_crtc_state_connectors_changed(crtc_state)) { + struct NvKmsKapiHeadModeSetConfig *config = &req_config->modeSetConfig; + struct drm_connector *connector; + struct drm_connector_state *connector_state; + int j; + + config->numDisplays = 0; + + memset(config->displays, 0, sizeof(config->displays)); + + req_config->flags.displaysChanged = NV_TRUE; + + nv_drm_for_each_connector_in_state(crtc_state->state, + connector, connector_state, j) { + if (connector_state->crtc != crtc) { + continue; + } + + if ((ret = head_modeset_config_attach_connector( + to_nv_connector(connector), + config)) != 0) { + return ret; + } + } + } + + if (crtc_state->active_changed) { + req_config->modeSetConfig.bActive = crtc_state->active; + req_config->flags.activeChanged = NV_TRUE; + } + + return ret; +} + +static bool +nv_drm_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static const struct drm_crtc_helper_funcs nv_crtc_helper_funcs = { + .atomic_check = nv_drm_crtc_atomic_check, + .mode_fixup = nv_drm_crtc_mode_fixup, +}; + +static void nv_drm_plane_install_properties( + struct drm_plane *plane, + NvBool supportsHDR) +{ + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + + if (nv_dev->nv_out_fence_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_out_fence_property, 0); + } + + if (nv_dev->nv_input_colorspace_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_input_colorspace_property, + NVKMS_INPUT_COLORSPACE_NONE); + } + +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + if (supportsHDR && nv_dev->nv_hdr_output_metadata_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_hdr_output_metadata_property, 0); + } +#endif +} + +static void +__nv_drm_plane_create_alpha_blending_properties(struct drm_plane *plane, + NvU32 validCompModes) +{ +#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) + if ((validCompModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA)) != 0x0 && + (validCompModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA)) != 0x0) { + + drm_plane_create_alpha_property(plane); + drm_plane_create_blend_mode_property(plane, + NVBIT(DRM_MODE_BLEND_PREMULTI) | + NVBIT(DRM_MODE_BLEND_COVERAGE)); + } else if ((validCompModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA)) != 0x0 && + (validCompModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA)) != 0x0) { + + drm_plane_create_blend_mode_property(plane, + NVBIT(DRM_MODE_BLEND_PREMULTI) | + NVBIT(DRM_MODE_BLEND_COVERAGE)); + } +#endif +} + +static void +__nv_drm_plane_create_rotation_property(struct drm_plane *plane, + NvU16 validLayerRRTransforms) +{ +#if defined(NV_DRM_ROTATION_AVAILABLE) + enum NvKmsRotation curRotation; + NvU32 supported_rotations = 0; + struct NvKmsRRParams rrParams = { + .rotation = NVKMS_ROTATION_0, + .reflectionX = true, + .reflectionY = true, + }; + + if ((NVBIT(NvKmsRRParamsToCapBit(&rrParams)) & + validLayerRRTransforms) != 0) { + supported_rotations |= DRM_MODE_REFLECT_X; + supported_rotations |= DRM_MODE_REFLECT_Y; + } + + rrParams.reflectionX = false; + rrParams.reflectionY = false; + + for (curRotation = NVKMS_ROTATION_MIN; + curRotation <= NVKMS_ROTATION_MAX; curRotation++) { + rrParams.rotation = curRotation; + if ((NVBIT(NvKmsRRParamsToCapBit(&rrParams)) & + validLayerRRTransforms) == 0) { + continue; + } + + switch (curRotation) { + case NVKMS_ROTATION_0: + supported_rotations |= DRM_MODE_ROTATE_0; + break; + case NVKMS_ROTATION_90: + supported_rotations |= DRM_MODE_ROTATE_90; + break; + case NVKMS_ROTATION_180: + supported_rotations |= DRM_MODE_ROTATE_180; + break; + case NVKMS_ROTATION_270: + supported_rotations |= DRM_MODE_ROTATE_270; + break; + default: + break; + } + + } + + if (supported_rotations != 0) { + drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, + supported_rotations); + } +#endif +} + +static struct drm_plane* +nv_drm_plane_create(struct drm_device *dev, + enum drm_plane_type plane_type, + uint32_t layer_idx, + NvU32 head, + const struct NvKmsKapiDeviceResourcesInfo *pResInfo) +{ +#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG) + struct nv_drm_device *nv_dev = to_nv_device(dev); + const NvU64 linear_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, + }; +#endif + enum NvKmsCompositionBlendingMode defaultCompositionMode; + struct nv_drm_plane *nv_plane = NULL; + struct nv_drm_plane_state *nv_plane_state = NULL; + struct drm_plane *plane = NULL; + int ret = -ENOMEM; + uint32_t *formats = NULL; + unsigned int formats_count = 0; + const NvU32 validCompositionModes = + (plane_type == DRM_PLANE_TYPE_CURSOR) ? + pResInfo->caps.validCursorCompositionModes : + pResInfo->caps.layer[layer_idx].validCompositionModes; + const long unsigned int nvkms_formats_mask = + (plane_type == DRM_PLANE_TYPE_CURSOR) ? + pResInfo->caps.supportedCursorSurfaceMemoryFormats : + pResInfo->supportedSurfaceMemoryFormats[layer_idx]; + const NvU16 validLayerRRTransforms = + (plane_type == DRM_PLANE_TYPE_CURSOR) ? + 0x0 : pResInfo->caps.layer[layer_idx].validRRTransforms; + + if ((validCompositionModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE)) != 0x0) { + defaultCompositionMode = NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE; + } else if ((validCompositionModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA)) != 0x0) { + defaultCompositionMode = NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA; + } else { + goto failed; + } + + formats = + nv_drm_format_array_alloc(&formats_count, + nvkms_formats_mask); + if (formats == NULL) { + goto failed; + } + + if ((nv_plane = nv_drm_calloc(1, sizeof(*nv_plane))) == NULL) { + goto failed_plane_alloc; + } + plane = &nv_plane->base; + + nv_plane->defaultCompositionMode = defaultCompositionMode; + nv_plane->layer_idx = layer_idx; + + if ((nv_plane_state = + nv_drm_calloc(1, sizeof(*nv_plane_state))) == NULL) { + goto failed_state_alloc; + } + + plane->state = &nv_plane_state->base; + plane->state->plane = plane; + + /* + * Possible_crtcs for primary and cursor plane is zero because + * drm_crtc_init_with_planes() will assign the plane's possible_crtcs + * after the crtc is successfully initialized. + */ + ret = drm_universal_plane_init( + dev, + plane, + (plane_type == DRM_PLANE_TYPE_OVERLAY) ? + (1 << head) : 0, + &nv_plane_funcs, + formats, formats_count, +#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG) + (plane_type == DRM_PLANE_TYPE_CURSOR) ? + linear_modifiers : nv_dev->modifiers, +#endif + plane_type +#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_NAME_ARG) + , NULL +#endif + ); + + if (ret != 0) { + goto failed_plane_init; + } + + drm_plane_helper_add(plane, &nv_plane_helper_funcs); + + if (plane_type != DRM_PLANE_TYPE_CURSOR) { + nv_drm_plane_install_properties( + plane, + pResInfo->supportsHDR[layer_idx]); + } + + __nv_drm_plane_create_alpha_blending_properties( + plane, + validCompositionModes); + + __nv_drm_plane_create_rotation_property( + plane, + validLayerRRTransforms); + + return plane; + +failed_plane_init: + nv_drm_free(nv_plane_state); + +failed_state_alloc: + nv_drm_free(nv_plane); + +failed_plane_alloc: + nv_drm_free(formats); + +failed: + return ERR_PTR(ret); +} + +/* + * Add drm crtc for given head and supported enum NvKmsSurfaceMemoryFormats. + */ +static struct drm_crtc *__nv_drm_crtc_create(struct nv_drm_device *nv_dev, + struct drm_plane *primary_plane, + struct drm_plane *cursor_plane, + unsigned int head) +{ + struct nv_drm_crtc *nv_crtc = NULL; + struct nv_drm_crtc_state *nv_state = NULL; + int ret = -ENOMEM; + + if ((nv_crtc = nv_drm_calloc(1, sizeof(*nv_crtc))) == NULL) { + goto failed; + } + + nv_state = nv_drm_calloc(1, sizeof(*nv_state)); + if (nv_state == NULL) { + goto failed_state_alloc; + } + + nv_crtc->base.state = &nv_state->base; + nv_crtc->base.state->crtc = &nv_crtc->base; + + nv_crtc->head = head; + INIT_LIST_HEAD(&nv_crtc->flip_list); + spin_lock_init(&nv_crtc->flip_list_lock); + + ret = drm_crtc_init_with_planes(nv_dev->dev, + &nv_crtc->base, + primary_plane, cursor_plane, + &nv_crtc_funcs +#if defined(NV_DRM_CRTC_INIT_WITH_PLANES_HAS_NAME_ARG) + , NULL +#endif + ); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to init crtc for head %u with planes", head); + goto failed_init_crtc; + } + + /* Add crtc to drm sub-system */ + + drm_crtc_helper_add(&nv_crtc->base, &nv_crtc_helper_funcs); + + return &nv_crtc->base; + +failed_init_crtc: + nv_drm_free(nv_state); + +failed_state_alloc: + nv_drm_free(nv_crtc); + +failed: + return ERR_PTR(ret); +} + +void nv_drm_enumerate_crtcs_and_planes( + struct nv_drm_device *nv_dev, + const struct NvKmsKapiDeviceResourcesInfo *pResInfo) +{ + unsigned int i; + + for (i = 0; i < pResInfo->numHeads; i++) { + struct drm_plane *primary_plane = NULL, *cursor_plane = NULL; + NvU32 layer; + + if (pResInfo->numLayers[i] <= NVKMS_KAPI_LAYER_PRIMARY_IDX) { + continue; + } + + primary_plane = + nv_drm_plane_create(nv_dev->dev, + DRM_PLANE_TYPE_PRIMARY, + NVKMS_KAPI_LAYER_PRIMARY_IDX, + i, + pResInfo); + + if (IS_ERR(primary_plane)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to create primary plane for head %u, error = %ld", + i, PTR_ERR(primary_plane)); + continue; + } + + cursor_plane = + nv_drm_plane_create(nv_dev->dev, + DRM_PLANE_TYPE_CURSOR, + NVKMS_KAPI_LAYER_INVALID_IDX, + i, + pResInfo); + if (IS_ERR(cursor_plane)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to create cursor plane for head %u, error = %ld", + i, PTR_ERR(cursor_plane)); + cursor_plane = NULL; + } + + /* Create crtc with the primary and cursor planes */ + { + struct drm_crtc *crtc = + __nv_drm_crtc_create(nv_dev, + primary_plane, cursor_plane, + i); + if (IS_ERR(crtc)) { + nv_drm_plane_destroy(primary_plane); + + if (cursor_plane != NULL) { + nv_drm_plane_destroy(cursor_plane); + } + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to add DRM CRTC for head %u, error = %ld", + i, PTR_ERR(crtc)); + continue; + } + } + + for (layer = 0; layer < pResInfo->numLayers[i]; layer++) { + if (layer == NVKMS_KAPI_LAYER_PRIMARY_IDX) { + continue; + } + + struct drm_plane *overlay_plane = + nv_drm_plane_create(nv_dev->dev, + DRM_PLANE_TYPE_OVERLAY, + layer, + i, + pResInfo); + + if (IS_ERR(overlay_plane)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to create plane for layer-%u of head %u, error = %ld", + layer, i, PTR_ERR(overlay_plane)); + } + } + + } +} +/* + * Helper function to convert NvKmsKapiCrcs to drm_nvidia_crtc_crc32_out. + */ +static void NvKmsKapiCrcsToDrm(const struct NvKmsKapiCrcs *crcs, + struct drm_nvidia_crtc_crc32_v2_out *drmCrcs) +{ + drmCrcs->outputCrc32.value = crcs->outputCrc32.value; + drmCrcs->outputCrc32.supported = crcs->outputCrc32.supported; + drmCrcs->rasterGeneratorCrc32.value = crcs->rasterGeneratorCrc32.value; + drmCrcs->rasterGeneratorCrc32.supported = crcs->rasterGeneratorCrc32.supported; + drmCrcs->compositorCrc32.value = crcs->compositorCrc32.value; + drmCrcs->compositorCrc32.supported = crcs->compositorCrc32.supported; +} + +int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct drm_nvidia_get_crtc_crc32_v2_params *params = data; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_crtc *crtc = NULL; + struct nv_drm_crtc *nv_crtc = NULL; + struct NvKmsKapiCrcs crc32; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return -ENOENT; + } + + crtc = nv_drm_crtc_find(dev, params->crtc_id); + if (!crtc) { + return -ENOENT; + } + + nv_crtc = to_nv_crtc(crtc); + + if (!nvKms->getCRC32(nv_dev->pDevice, nv_crtc->head, &crc32)) { + return -ENODEV; + } + NvKmsKapiCrcsToDrm(&crc32, ¶ms->crc32); + + return 0; +} + +int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct drm_nvidia_get_crtc_crc32_params *params = data; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_crtc *crtc = NULL; + struct nv_drm_crtc *nv_crtc = NULL; + struct NvKmsKapiCrcs crc32; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return -ENOENT; + } + + crtc = nv_drm_crtc_find(dev, params->crtc_id); + if (!crtc) { + return -ENOENT; + } + + nv_crtc = to_nv_crtc(crtc); + + if (!nvKms->getCRC32(nv_dev->pDevice, nv_crtc->head, &crc32)) { + return -ENODEV; + } + params->crc32 = crc32.outputCrc32.value; + + return 0; +} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.h new file mode 100644 index 0000000..532dcd5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.h @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_CRTC_H__ +#define __NVIDIA_DRM_CRTC_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-helper.h" + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include + +#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) || defined(NV_DRM_ROTATION_AVAILABLE) +/* For DRM_ROTATE_* , DRM_REFLECT_* */ +#include +#endif + +#if defined(NV_DRM_ROTATION_AVAILABLE) +/* For DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* */ +#include +#endif + +#include "nvtypes.h" +#include "nvkms-kapi.h" + +#if defined(NV_DRM_ROTATION_AVAILABLE) +/* + * 19-05-2017 c2c446ad29437bb92b157423c632286608ebd3ec has added + * DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* to UAPI and removed + * DRM_ROTATE_* and DRM_MODE_REFLECT_* + */ +#if !defined(DRM_MODE_ROTATE_0) +#define DRM_MODE_ROTATE_0 DRM_ROTATE_0 +#define DRM_MODE_ROTATE_90 DRM_ROTATE_90 +#define DRM_MODE_ROTATE_180 DRM_ROTATE_180 +#define DRM_MODE_ROTATE_270 DRM_ROTATE_270 +#define DRM_MODE_REFLECT_X DRM_REFLECT_X +#define DRM_MODE_REFLECT_Y DRM_REFLECT_Y +#define DRM_MODE_ROTATE_MASK DRM_ROTATE_MASK +#define DRM_MODE_REFLECT_MASK DRM_REFLECT_MASK +#endif + +#endif //NV_DRM_ROTATION_AVAILABLE + +struct nv_drm_crtc { + NvU32 head; + + /** + * @flip_list: + * + * List of flips pending to get processed by __nv_drm_handle_flip_event(). + * Protected by @flip_list_lock. + */ + struct list_head flip_list; + + /** + * @flip_list_lock: + * + * Spinlock to protect @flip_list. + */ + spinlock_t flip_list_lock; + + struct drm_crtc base; +}; + +/** + * struct nv_drm_flip - flip state + * + * This state is getting used to consume DRM completion event associated + * with each crtc state from atomic commit. + * + * Function nv_drm_atomic_apply_modeset_config() consumes DRM completion + * event, save it into flip state associated with crtc and queue flip state into + * crtc's flip list and commits atomic update to hardware. + */ +struct nv_drm_flip { + /** + * @event: + * + * Optional pointer to a DRM event to signal upon completion of + * the state update. + */ + struct drm_pending_vblank_event *event; + + /** + * @pending_events + * + * Number of HW events pending to signal completion of the state + * update. + */ + uint32_t pending_events; + + /** + * @list_entry: + * + * Entry on the per-CRTC &nv_drm_crtc.flip_list. Protected by + * &nv_drm_crtc.flip_list_lock. + */ + struct list_head list_entry; + + /** + * @deferred_flip_list + * + * List flip objects whose processing is deferred until processing of + * this flip object. Protected by &nv_drm_crtc.flip_list_lock. + * nv_drm_atomic_commit() gets last flip object from + * nv_drm_crtc:flip_list and add deferred flip objects into + * @deferred_flip_list, __nv_drm_handle_flip_event() processes + * @deferred_flip_list. + */ + struct list_head deferred_flip_list; +}; + +struct nv_drm_crtc_state { + /** + * @base: + * + * Base DRM crtc state object for this. + */ + struct drm_crtc_state base; + + /** + * @head_req_config: + * + * Requested head's modeset configuration corresponding to this crtc state. + */ + struct NvKmsKapiHeadRequestedConfig req_config; + + /** + * @nv_flip: + * + * Flip state associated with this crtc state, gets allocated + * by nv_drm_atomic_crtc_duplicate_state(), on successful commit it gets + * consumed and queued into flip list by + * nv_drm_atomic_apply_modeset_config() and finally gets destroyed + * by __nv_drm_handle_flip_event() after getting processed. + * + * In case of failure of atomic commit, this flip state getting destroyed by + * nv_drm_atomic_crtc_destroy_state(). + */ + struct nv_drm_flip *nv_flip; +}; + +static inline struct nv_drm_crtc_state *to_nv_crtc_state(struct drm_crtc_state *state) +{ + return container_of(state, struct nv_drm_crtc_state, base); +} + +struct nv_drm_plane { + /** + * @base: + * + * Base DRM plane object for this plane. + */ + struct drm_plane base; + + /** + * @defaultCompositionMode: + * + * Default composition blending mode of this plane. + */ + enum NvKmsCompositionBlendingMode defaultCompositionMode; + + /** + * @layer_idx + * + * Index of this plane in the per head array of layers. + */ + uint32_t layer_idx; +}; + +static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane) +{ + if (plane == NULL) { + return NULL; + } + return container_of(plane, struct nv_drm_plane, base); +} + +struct nv_drm_plane_state { + struct drm_plane_state base; + s32 __user *fd_user_ptr; + enum NvKmsInputColorSpace input_colorspace; +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + struct drm_property_blob *hdr_output_metadata; +#endif +}; + +static inline struct nv_drm_plane_state *to_nv_drm_plane_state(struct drm_plane_state *state) +{ + return container_of(state, struct nv_drm_plane_state, base); +} + +static inline struct nv_drm_crtc *to_nv_crtc(struct drm_crtc *crtc) +{ + if (crtc == NULL) { + return NULL; + } + return container_of(crtc, struct nv_drm_crtc, base); +} + +/* + * CRTCs are static objects, list does not change once after initialization and + * before teardown of device. Initialization/teardown paths are single + * threaded, so no locking required. + */ +static inline +struct nv_drm_crtc *nv_drm_crtc_lookup(struct nv_drm_device *nv_dev, NvU32 head) +{ + struct drm_crtc *crtc; + nv_drm_for_each_crtc(crtc, nv_dev->dev) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + if (nv_crtc->head == head) { + return nv_crtc; + } + } + return NULL; +} + +/** + * nv_drm_crtc_enqueue_flip - Enqueue nv_drm_flip object to flip_list of crtc. + */ +static inline void nv_drm_crtc_enqueue_flip(struct nv_drm_crtc *nv_crtc, + struct nv_drm_flip *nv_flip) +{ + spin_lock(&nv_crtc->flip_list_lock); + list_add(&nv_flip->list_entry, &nv_crtc->flip_list); + spin_unlock(&nv_crtc->flip_list_lock); +} + +/** + * nv_drm_crtc_dequeue_flip - Dequeue nv_drm_flip object to flip_list of crtc. + */ +static inline +struct nv_drm_flip *nv_drm_crtc_dequeue_flip(struct nv_drm_crtc *nv_crtc) +{ + struct nv_drm_flip *nv_flip = NULL; + uint32_t pending_events = 0; + + spin_lock(&nv_crtc->flip_list_lock); + nv_flip = list_first_entry_or_null(&nv_crtc->flip_list, + struct nv_drm_flip, list_entry); + if (likely(nv_flip != NULL)) { + /* + * Decrement pending_event count and dequeue flip object if + * pending_event count becomes 0. + */ + pending_events = --nv_flip->pending_events; + if (!pending_events) { + list_del(&nv_flip->list_entry); + } + } + spin_unlock(&nv_crtc->flip_list_lock); + + if (WARN_ON(nv_flip == NULL) || pending_events) { + return NULL; + } + + return nv_flip; +} + +void nv_drm_enumerate_crtcs_and_planes( + struct nv_drm_device *nv_dev, + const struct NvKmsKapiDeviceResourcesInfo *pResInfo); + +int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_CRTC_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.c new file mode 100644 index 0000000..cc6626a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.c @@ -0,0 +1,1063 @@ +/* + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE and NV_DRM_DRM_GEM_H_PRESENT */ + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-drv.h" +#include "nvidia-drm-fb.h" +#include "nvidia-drm-modeset.h" +#include "nvidia-drm-encoder.h" +#include "nvidia-drm-connector.h" +#include "nvidia-drm-gem.h" +#include "nvidia-drm-crtc.h" +#include "nvidia-drm-prime-fence.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-gem-nvkms-memory.h" +#include "nvidia-drm-gem-user-memory.h" +#include "nvidia-drm-gem-dma-buf.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-ioctl.h" + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_VBLANK_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_FILE_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_PRIME_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_IOCTL_H_PRESENT) +#include +#endif + +#include + +/* + * Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h") + * moves a number of helper function definitions from + * drm/drm_crtc_helper.h to a new drm_probe_helper.h. + */ +#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT) +#include +#endif +#include + +#if defined(NV_DRM_DRM_GEM_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_AUTH_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) +#include +#endif + +static struct nv_drm_device *dev_list = NULL; + +static const char* nv_get_input_colorspace_name( + enum NvKmsInputColorSpace colorSpace) +{ + switch (colorSpace) { + case NVKMS_INPUT_COLORSPACE_NONE: + return "None"; + case NVKMS_INPUT_COLORSPACE_SCRGB_LINEAR: + return "IEC 61966-2-2 linear FP"; + case NVKMS_INPUT_COLORSPACE_BT2100_PQ: + return "ITU-R BT.2100-PQ YCbCr"; + default: + /* We shoudn't hit this */ + WARN_ON("Unsupported input colorspace"); + return "None"; + } +}; + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +static void nv_drm_output_poll_changed(struct drm_device *dev) +{ + struct drm_connector *connector = NULL; + struct drm_mode_config *config = &dev->mode_config; +#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT) + struct drm_connector_list_iter conn_iter; + nv_drm_connector_list_iter_begin(dev, &conn_iter); +#endif + /* + * Here drm_mode_config::mutex has been acquired unconditionally: + * + * - In the non-NV_DRM_CONNECTOR_LIST_ITER_PRESENT case, the mutex must + * be held for the duration of walking over the connectors. + * + * - In the NV_DRM_CONNECTOR_LIST_ITER_PRESENT case, the mutex must be + * held for the duration of a fill_modes() call chain: + * connector->funcs->fill_modes() + * |-> drm_helper_probe_single_connector_modes() + * + * It is easiest to always acquire the mutext for the entire connector + * loop. + */ + mutex_lock(&config->mutex); + + nv_drm_for_each_connector(connector, &conn_iter, dev) { + + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + if (!nv_drm_connector_check_connection_status_dirty_and_clear( + nv_connector)) { + continue; + } + + connector->funcs->fill_modes( + connector, + dev->mode_config.max_width, dev->mode_config.max_height); + } + + mutex_unlock(&config->mutex); +#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT) + nv_drm_connector_list_iter_end(&conn_iter); +#endif +} + +static struct drm_framebuffer *nv_drm_framebuffer_create( + struct drm_device *dev, + struct drm_file *file, + #if defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG) + const struct drm_mode_fb_cmd2 *cmd + #else + struct drm_mode_fb_cmd2 *cmd + #endif +) +{ + struct drm_mode_fb_cmd2 local_cmd; + struct drm_framebuffer *fb; + + local_cmd = *cmd; + + fb = nv_drm_internal_framebuffer_create( + dev, + file, + &local_cmd); + + #if !defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG) + *cmd = local_cmd; + #endif + + return fb; +} + +static const struct drm_mode_config_funcs nv_mode_config_funcs = { + .fb_create = nv_drm_framebuffer_create, + + .atomic_state_alloc = nv_drm_atomic_state_alloc, + .atomic_state_clear = nv_drm_atomic_state_clear, + .atomic_state_free = nv_drm_atomic_state_free, + .atomic_check = nv_drm_atomic_check, + .atomic_commit = nv_drm_atomic_commit, + + .output_poll_changed = nv_drm_output_poll_changed, +}; + +static void nv_drm_event_callback(const struct NvKmsKapiEvent *event) +{ + struct nv_drm_device *nv_dev = event->privateData; + + mutex_lock(&nv_dev->lock); + + if (!atomic_read(&nv_dev->enable_event_handling)) { + goto done; + } + + switch (event->type) { + case NVKMS_EVENT_TYPE_DPY_CHANGED: + nv_drm_handle_display_change( + nv_dev, + event->u.displayChanged.display); + break; + + case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED: + nv_drm_handle_dynamic_display_connected( + nv_dev, + event->u.dynamicDisplayConnected.display); + break; + case NVKMS_EVENT_TYPE_FLIP_OCCURRED: + nv_drm_handle_flip_occurred( + nv_dev, + event->u.flipOccurred.head, + event->u.flipOccurred.layer); + break; + default: + break; + } + +done: + + mutex_unlock(&nv_dev->lock); +} + +/* + * Helper function to initialize drm_device::mode_config from + * NvKmsKapiDevice's resource information. + */ +static void +nv_drm_init_mode_config(struct nv_drm_device *nv_dev, + const struct NvKmsKapiDeviceResourcesInfo *pResInfo) +{ + struct drm_device *dev = nv_dev->dev; + + drm_mode_config_init(dev); + drm_mode_create_dvi_i_properties(dev); + + dev->mode_config.funcs = &nv_mode_config_funcs; + + dev->mode_config.min_width = pResInfo->caps.minWidthInPixels; + dev->mode_config.min_height = pResInfo->caps.minHeightInPixels; + + dev->mode_config.max_width = pResInfo->caps.maxWidthInPixels; + dev->mode_config.max_height = pResInfo->caps.maxHeightInPixels; + + dev->mode_config.cursor_width = pResInfo->caps.maxCursorSizeInPixels; + dev->mode_config.cursor_height = pResInfo->caps.maxCursorSizeInPixels; + + /* + * NVIDIA GPUs have no preferred depth. Arbitrarily report 24, to be + * consistent with other DRM drivers. + */ + + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + +#if defined(NV_DRM_CRTC_STATE_HAS_ASYNC_FLIP) || \ + defined(NV_DRM_CRTC_STATE_HAS_PAGEFLIP_FLAGS) + dev->mode_config.async_page_flip = true; +#else + dev->mode_config.async_page_flip = false; +#endif + +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) && \ + defined(NV_DRM_MODE_CONFIG_HAS_ALLOW_FB_MODIFIERS) + /* Allow clients to define framebuffer layouts using DRM format modifiers */ + dev->mode_config.allow_fb_modifiers = true; +#endif + + /* Initialize output polling support */ + + drm_kms_helper_poll_init(dev); + + /* Disable output polling, because we don't support it yet */ + + drm_kms_helper_poll_disable(dev); +} + +/* + * Helper function to enumerate encoders/connectors from NvKmsKapiDevice. + */ +static void nv_drm_enumerate_encoders_and_connectors +( + struct nv_drm_device *nv_dev +) +{ + struct drm_device *dev = nv_dev->dev; + NvU32 nDisplays = 0; + + if (!nvKms->getDisplays(nv_dev->pDevice, &nDisplays, NULL)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to enumurate NvKmsKapiDisplay count"); + } + + if (nDisplays != 0) { + NvKmsKapiDisplay *hDisplays = + nv_drm_calloc(nDisplays, sizeof(*hDisplays)); + + if (hDisplays != NULL) { + if (!nvKms->getDisplays(nv_dev->pDevice, &nDisplays, hDisplays)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to enumurate NvKmsKapiDisplay handles"); + } else { + NvU32 i; + + for (i = 0; i < nDisplays; i++) { + struct drm_encoder *encoder = + nv_drm_add_encoder(dev, hDisplays[i]); + + if (IS_ERR(encoder)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to add connector for NvKmsKapiDisplay 0x%08x", + hDisplays[i]); + } + } + } + + nv_drm_free(hDisplays); + } else { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate memory for NvKmsKapiDisplay array"); + } + } +} + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +/*! + * 'NV_DRM_OUT_FENCE_PTR' is an atomic per-plane property that clients can use + * to request an out-fence fd for a particular plane that's being flipped. + * 'NV_DRM_OUT_FENCE_PTR' does NOT have the same behavior as the standard + * 'OUT_FENCE_PTR' property - the fd that's returned via 'NV_DRM_OUT_FENCE_PTR' + * will only be signaled once the buffers in the corresponding flip are flipped + * away from. + * In order to use this property, client needs to call set property function + * with user mode pointer as value. Once driver have post syncpt fd from flip reply, + * it will copy post syncpt fd at location pointed by user mode pointer. + */ +static int nv_drm_create_properties(struct nv_drm_device *nv_dev) +{ + struct drm_prop_enum_list enum_list[3] = { }; + int i, len = 0; + + for (i = 0; i < 3; i++) { + enum_list[len].type = i; + enum_list[len].name = nv_get_input_colorspace_name(i); + len++; + } + +#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST) + if (!nv_dev->supportsSyncpts) { + return 0; + } + + nv_dev->nv_out_fence_property = + drm_property_create_range(nv_dev->dev, DRM_MODE_PROP_ATOMIC, + "NV_DRM_OUT_FENCE_PTR", 0, U64_MAX); + if (nv_dev->nv_out_fence_property == NULL) { + return -ENOMEM; + } +#endif + + nv_dev->nv_input_colorspace_property = + drm_property_create_enum(nv_dev->dev, 0, "NV_INPUT_COLORSPACE", + enum_list, len); + if (nv_dev->nv_input_colorspace_property == NULL) { + NV_DRM_LOG_ERR("Failed to create NV_INPUT_COLORSPACE property"); + return -ENOMEM; + } + +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + nv_dev->nv_hdr_output_metadata_property = + drm_property_create(nv_dev->dev, DRM_MODE_PROP_BLOB, + "NV_HDR_STATIC_METADATA", 0); + if (nv_dev->nv_hdr_output_metadata_property == NULL) { + return -ENOMEM; + } +#endif + + return 0; +} + +static int nv_drm_load(struct drm_device *dev, unsigned long flags) +{ +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + struct NvKmsKapiDevice *pDevice; + + struct NvKmsKapiAllocateDeviceParams allocateDeviceParams; + struct NvKmsKapiDeviceResourcesInfo resInfo; +#endif +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) + NvU64 kind; + NvU64 gen; + int i; +#endif + int ret; + + struct nv_drm_device *nv_dev = to_nv_device(dev); + + NV_DRM_DEV_LOG_INFO(nv_dev, "Loading driver"); + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return 0; + } + + /* Allocate NvKmsKapiDevice from GPU ID */ + + memset(&allocateDeviceParams, 0, sizeof(allocateDeviceParams)); + + allocateDeviceParams.gpuId = nv_dev->gpu_info.gpu_id; + + allocateDeviceParams.privateData = nv_dev; + allocateDeviceParams.eventCallback = nv_drm_event_callback; + + pDevice = nvKms->allocateDevice(&allocateDeviceParams); + + if (pDevice == NULL) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to allocate NvKmsKapiDevice"); + return -ENODEV; + } + + /* Query information of resources available on device */ + + if (!nvKms->getDeviceResourcesInfo(pDevice, &resInfo)) { + + nvKms->freeDevice(pDevice); + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to query NvKmsKapiDevice resources info"); + return -ENODEV; + } + + mutex_lock(&nv_dev->lock); + + /* Set NvKmsKapiDevice */ + + nv_dev->pDevice = pDevice; + + nv_dev->pitchAlignment = resInfo.caps.pitchAlignment; + + nv_dev->hasVideoMemory = resInfo.caps.hasVideoMemory; + + nv_dev->genericPageKind = resInfo.caps.genericPageKind; + + // Fermi-Volta use generation 0, Turing+ uses generation 2. + nv_dev->pageKindGeneration = (nv_dev->genericPageKind == 0x06) ? 2 : 0; + + // Desktop GPUs and mobile GPUs Xavier and later use the same sector layout + nv_dev->sectorLayout = 1; + + nv_dev->supportsSyncpts = resInfo.caps.supportsSyncpts; + +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) + gen = nv_dev->pageKindGeneration; + kind = nv_dev->genericPageKind; + + for (i = 0; i <= 5; i++) { + nv_dev->modifiers[i] = + /* Log2(block height) ----------------------------------+ * + * Page Kind ------------------------------------+ | * + * Gob Height/Page Kind Generation --------+ | | * + * Sector layout ---------------------+ | | | * + * Compression --------------------+ | | | | * + * | | | | | */ + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, gen, kind, 5 - i); + } + + nv_dev->modifiers[i++] = DRM_FORMAT_MOD_LINEAR; + nv_dev->modifiers[i++] = DRM_FORMAT_MOD_INVALID; +#endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */ + + /* Initialize drm_device::mode_config */ + + nv_drm_init_mode_config(nv_dev, &resInfo); + + ret = nv_drm_create_properties(nv_dev); + if (ret < 0) { + return -ENODEV; + } + + if (!nvKms->declareEventInterest( + nv_dev->pDevice, + ((1 << NVKMS_EVENT_TYPE_DPY_CHANGED) | + (1 << NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED) | + (1 << NVKMS_EVENT_TYPE_FLIP_OCCURRED)))) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to register event mask"); + } + + /* Add crtcs */ + + nv_drm_enumerate_crtcs_and_planes(nv_dev, &resInfo); + + /* Add connectors and encoders */ + + nv_drm_enumerate_encoders_and_connectors(nv_dev); + +#if !defined(NV_DRM_CRTC_STATE_HAS_NO_VBLANK) + drm_vblank_init(dev, dev->mode_config.num_crtc); +#endif + + /* + * Trigger hot-plug processing, to update connection status of + * all HPD supported connectors. + */ + + drm_helper_hpd_irq_event(dev); + + /* Enable event handling */ + + atomic_set(&nv_dev->enable_event_handling, true); + + init_waitqueue_head(&nv_dev->flip_event_wq); + + mutex_unlock(&nv_dev->lock); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + + return 0; +} + +static void __nv_drm_unload(struct drm_device *dev) +{ +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + struct NvKmsKapiDevice *pDevice = NULL; +#endif + + struct nv_drm_device *nv_dev = to_nv_device(dev); + + NV_DRM_DEV_LOG_INFO(nv_dev, "Unloading driver"); + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return; + } + + mutex_lock(&nv_dev->lock); + + /* Disable event handling */ + + atomic_set(&nv_dev->enable_event_handling, false); + + /* Clean up output polling */ + + drm_kms_helper_poll_fini(dev); + + /* Clean up mode configuration */ + + drm_mode_config_cleanup(dev); + + if (!nvKms->declareEventInterest(nv_dev->pDevice, 0x0)) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to stop event listening"); + } + + /* Unset NvKmsKapiDevice */ + + pDevice = nv_dev->pDevice; + nv_dev->pDevice = NULL; + + mutex_unlock(&nv_dev->lock); + + nvKms->freeDevice(pDevice); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ +} + +#if defined(NV_DRM_DRIVER_UNLOAD_HAS_INT_RETURN_TYPE) +static int nv_drm_unload(struct drm_device *dev) +{ + __nv_drm_unload(dev); + + return 0; +} +#else +static void nv_drm_unload(struct drm_device *dev) +{ + __nv_drm_unload(dev); +} +#endif + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +static int __nv_drm_master_set(struct drm_device *dev, + struct drm_file *file_priv, bool from_open) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + if (!nvKms->grabOwnership(nv_dev->pDevice)) { + return -EINVAL; + } + + return 0; +} + +#if defined(NV_DRM_DRIVER_SET_MASTER_HAS_INT_RETURN_TYPE) +static int nv_drm_master_set(struct drm_device *dev, + struct drm_file *file_priv, bool from_open) +{ + return __nv_drm_master_set(dev, file_priv, from_open); +} +#else +static void nv_drm_master_set(struct drm_device *dev, + struct drm_file *file_priv, bool from_open) +{ + if (__nv_drm_master_set(dev, file_priv, from_open) != 0) { + NV_DRM_DEV_LOG_ERR(to_nv_device(dev), "Failed to grab modeset ownership"); + } +} +#endif + + +#if defined(NV_DRM_MASTER_DROP_HAS_FROM_RELEASE_ARG) +static +void nv_drm_master_drop(struct drm_device *dev, + struct drm_file *file_priv, bool from_release) +#else +static +void nv_drm_master_drop(struct drm_device *dev, struct drm_file *file_priv) +#endif +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + int err; + + /* + * After dropping nvkms modeset onwership, it is not guaranteed that + * drm and nvkms modeset state will remain in sync. Therefore, disable + * all outputs and crtcs before dropping nvkms modeset ownership. + * + * First disable all active outputs atomically and then disable each crtc one + * by one, there is not helper function available to disable all crtcs + * atomically. + */ + + drm_modeset_lock_all(dev); + + if ((err = nv_drm_atomic_helper_disable_all( + dev, + dev->mode_config.acquire_ctx)) != 0) { + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "nv_drm_atomic_helper_disable_all failed with error code %d !", + err); + } + + drm_modeset_unlock_all(dev); + + nvKms->releaseOwnership(nv_dev->pDevice); +} +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_BUS_PRESENT) || defined(NV_DRM_DRIVER_HAS_SET_BUSID) +static int nv_drm_pci_set_busid(struct drm_device *dev, + struct drm_master *master) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + master->unique = nv_drm_asprintf("pci:%04x:%02x:%02x.%d", + nv_dev->gpu_info.pci_info.domain, + nv_dev->gpu_info.pci_info.bus, + nv_dev->gpu_info.pci_info.slot, + nv_dev->gpu_info.pci_info.function); + + if (master->unique == NULL) { + return -ENOMEM; + } + + master->unique_len = strlen(master->unique); + + return 0; +} +#endif + +static int nv_drm_get_dev_info_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_get_dev_info_params *params = data; + + if (dev->primary == NULL) { + return -ENOENT; + } + + params->gpu_id = nv_dev->gpu_info.gpu_id; + params->primary_index = dev->primary->index; +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + params->generic_page_kind = nv_dev->genericPageKind; + params->page_kind_generation = nv_dev->pageKindGeneration; + params->sector_layout = nv_dev->sectorLayout; +#else + params->generic_page_kind = 0; + params->page_kind_generation = 0; + params->sector_layout = 0; +#endif + + return 0; +} + +static +int nv_drm_get_client_capability_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct drm_nvidia_get_client_capability_params *params = data; + + switch (params->capability) { +#if defined(DRM_CLIENT_CAP_STEREO_3D) + case DRM_CLIENT_CAP_STEREO_3D: + params->value = filep->stereo_allowed; + break; +#endif +#if defined(DRM_CLIENT_CAP_UNIVERSAL_PLANES) + case DRM_CLIENT_CAP_UNIVERSAL_PLANES: + params->value = filep->universal_planes; + break; +#endif +#if defined(DRM_CLIENT_CAP_ATOMIC) + case DRM_CLIENT_CAP_ATOMIC: + params->value = filep->atomic; + break; +#endif + default: + return -EINVAL; + } + + return 0; +} + +#if defined(NV_DRM_BUS_PRESENT) + +#if defined(NV_DRM_BUS_HAS_GET_IRQ) +static int nv_drm_bus_get_irq(struct drm_device *dev) +{ + return 0; +} +#endif + +#if defined(NV_DRM_BUS_HAS_GET_NAME) +static const char *nv_drm_bus_get_name(struct drm_device *dev) +{ + return "nvidia-drm"; +} +#endif + +static struct drm_bus nv_drm_bus = { +#if defined(NV_DRM_BUS_HAS_BUS_TYPE) + .bus_type = DRIVER_BUS_PCI, +#endif +#if defined(NV_DRM_BUS_HAS_GET_IRQ) + .get_irq = nv_drm_bus_get_irq, +#endif +#if defined(NV_DRM_BUS_HAS_GET_NAME) + .get_name = nv_drm_bus_get_name, +#endif + .set_busid = nv_drm_pci_set_busid, +}; + +#endif /* NV_DRM_BUS_PRESENT */ + +static const struct file_operations nv_drm_fops = { + .owner = THIS_MODULE, + + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#if defined(CONFIG_COMPAT) + .compat_ioctl = drm_compat_ioctl, +#endif + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + .mmap = nv_drm_mmap, +#endif + + .poll = drm_poll, + .read = drm_read, + + .llseek = noop_llseek, +}; + +static const struct drm_ioctl_desc nv_drm_ioctls[] = { +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_NVKMS_MEMORY, + nv_drm_gem_import_nvkms_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_USERSPACE_MEMORY, + nv_drm_gem_import_userspace_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_MAP_OFFSET, + nv_drm_gem_map_offset_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GET_DEV_INFO, + nv_drm_get_dev_info_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + +#if defined(NV_DRM_FENCE_AVAILABLE) + DRM_IOCTL_DEF_DRV(NVIDIA_FENCE_SUPPORTED, + nv_drm_fence_supported_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_FENCE_CONTEXT_CREATE, + nv_drm_fence_context_create_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_FENCE_ATTACH, + nv_drm_gem_fence_attach_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), +#endif + + DRM_IOCTL_DEF_DRV(NVIDIA_GET_CLIENT_CAPABILITY, + nv_drm_get_client_capability_ioctl, + 0), +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + DRM_IOCTL_DEF_DRV(NVIDIA_GET_CRTC_CRC32, + nv_drm_get_crtc_crc32_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GET_CRTC_CRC32_V2, + nv_drm_get_crtc_crc32_v2_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_EXPORT_NVKMS_MEMORY, + nv_drm_gem_export_nvkms_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_ALLOC_NVKMS_MEMORY, + nv_drm_gem_alloc_nvkms_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_EXPORT_DMABUF_MEMORY, + nv_drm_gem_export_dmabuf_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IDENTIFY_OBJECT, + nv_drm_gem_identify_object_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ +}; + +static struct drm_driver nv_drm_driver = { + + .driver_features = +#if defined(NV_DRM_DRIVER_PRIME_FLAG_PRESENT) + DRIVER_PRIME | +#endif + DRIVER_GEM | DRIVER_RENDER, + +#if defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT) + .gem_free_object = nv_drm_gem_free, +#endif + + .ioctls = nv_drm_ioctls, + .num_ioctls = ARRAY_SIZE(nv_drm_ioctls), + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import = nv_drm_gem_prime_import, + .gem_prime_import_sg_table = nv_drm_gem_prime_import_sg_table, + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) + .gem_prime_export = drm_gem_prime_export, + .gem_prime_get_sg_table = nv_drm_gem_prime_get_sg_table, + .gem_prime_vmap = nv_drm_gem_prime_vmap, + .gem_prime_vunmap = nv_drm_gem_prime_vunmap, + + .gem_vm_ops = &nv_drm_gem_vma_ops, +#endif + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) + .gem_prime_res_obj = nv_drm_gem_prime_res_obj, +#endif + +#if defined(NV_DRM_DRIVER_HAS_SET_BUSID) + .set_busid = nv_drm_pci_set_busid, +#endif + + .load = nv_drm_load, + .unload = nv_drm_unload, + + .fops = &nv_drm_fops, + +#if defined(NV_DRM_BUS_PRESENT) + .bus = &nv_drm_bus, +#endif + + .name = "nvidia-drm", + + .desc = "NVIDIA DRM driver", + .date = "20160202", + +#if defined(NV_DRM_DRIVER_HAS_DEVICE_LIST) + .device_list = LIST_HEAD_INIT(nv_drm_driver.device_list), +#elif defined(NV_DRM_DRIVER_HAS_LEGACY_DEV_LIST) + .legacy_dev_list = LIST_HEAD_INIT(nv_drm_driver.legacy_dev_list), +#endif +}; + + +/* + * Update the global nv_drm_driver for the intended features. + * + * It defaults to PRIME-only, but is upgraded to atomic modeset if the + * kernel supports atomic modeset and the 'modeset' kernel module + * parameter is true. + */ +static void nv_drm_update_drm_driver_features(void) +{ +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + + if (!nv_drm_modeset_module_param) { + return; + } + + nv_drm_driver.driver_features |= DRIVER_MODESET | DRIVER_ATOMIC; + + nv_drm_driver.master_set = nv_drm_master_set; + nv_drm_driver.master_drop = nv_drm_master_drop; + + nv_drm_driver.dumb_create = nv_drm_dumb_create; + nv_drm_driver.dumb_map_offset = nv_drm_dumb_map_offset; + nv_drm_driver.dumb_destroy = nv_drm_dumb_destroy; +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ +} + + + +/* + * Helper function for allocate/register DRM device for given NVIDIA GPU ID. + */ +static void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info) +{ + struct nv_drm_device *nv_dev = NULL; + struct drm_device *dev = NULL; + struct device *device = gpu_info->os_device_ptr; + + DRM_DEBUG( + "Registering device for NVIDIA GPU ID 0x08%x", + gpu_info->gpu_id); + + /* Allocate NVIDIA-DRM device */ + + nv_dev = nv_drm_calloc(1, sizeof(*nv_dev)); + + if (nv_dev == NULL) { + NV_DRM_LOG_ERR( + "Failed to allocate memory for NVIDIA-DRM device object"); + return; + } + + nv_dev->gpu_info = *gpu_info; + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + mutex_init(&nv_dev->lock); +#endif + + /* Allocate DRM device */ + + dev = drm_dev_alloc(&nv_drm_driver, device); + + if (dev == NULL) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to allocate device"); + goto failed_drm_alloc; + } + + dev->dev_private = nv_dev; + nv_dev->dev = dev; + +#if defined(NV_DRM_DEVICE_HAS_PDEV) + if (device->bus == &pci_bus_type) { + dev->pdev = to_pci_dev(device); + } +#endif + + /* Register DRM device to DRM sub-system */ + + if (drm_dev_register(dev, 0) != 0) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to register device"); + goto failed_drm_register; + } + + /* Add NVIDIA-DRM device into list */ + + nv_dev->next = dev_list; + dev_list = nv_dev; + + return; /* Success */ + +failed_drm_register: + + nv_drm_dev_free(dev); + +failed_drm_alloc: + + nv_drm_free(nv_dev); +} + +/* + * Enumerate NVIDIA GPUs and allocate/register DRM device for each of them. + */ +int nv_drm_probe_devices(void) +{ + nv_gpu_info_t *gpu_info = NULL; + NvU32 gpu_count = 0; + NvU32 i; + + int ret = 0; + + nv_drm_update_drm_driver_features(); + + /* Enumerate NVIDIA GPUs */ + + gpu_info = nv_drm_calloc(NV_MAX_GPUS, sizeof(*gpu_info)); + + if (gpu_info == NULL) { + ret = -ENOMEM; + + NV_DRM_LOG_ERR("Failed to allocate gpu ids arrays"); + goto done; + } + + gpu_count = nvKms->enumerateGpus(gpu_info); + + if (gpu_count == 0) { + NV_DRM_LOG_INFO("Not found NVIDIA GPUs"); + goto done; + } + + WARN_ON(gpu_count > NV_MAX_GPUS); + + /* Register DRM device for each NVIDIA GPU */ + + for (i = 0; i < gpu_count; i++) { + nv_drm_register_drm_device(&gpu_info[i]); + } + +done: + + nv_drm_free(gpu_info); + + return ret; +} + +/* + * Unregister all NVIDIA DRM devices. + */ +void nv_drm_remove_devices(void) +{ + while (dev_list != NULL) { + struct nv_drm_device *next = dev_list->next; + + drm_dev_unregister(dev_list->dev); + nv_drm_dev_free(dev_list->dev); + + nv_drm_free(dev_list); + + dev_list = next; + } +} + +#endif /* NV_DRM_AVAILABLE */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.h new file mode 100644 index 0000000..cd20ec9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_DRV_H__ +#define __NVIDIA_DRM_DRV_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +int nv_drm_probe_devices(void); + +void nv_drm_remove_devices(void); + +#endif /* defined(NV_DRM_AVAILABLE) */ + +#endif /* __NVIDIA_DRM_DRV_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.c new file mode 100644 index 0000000..653b432 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.c @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-encoder.h" +#include "nvidia-drm-utils.h" +#include "nvidia-drm-connector.h" +#include "nvidia-drm-crtc.h" +#include "nvidia-drm-helper.h" + +#include "nvmisc.h" + +/* + * Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h") + * moves a number of helper function definitions from + * drm/drm_crtc_helper.h to a new drm_probe_helper.h. + */ +#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT) +#include +#endif +#include + +#include +#include + +static void nv_drm_encoder_destroy(struct drm_encoder *encoder) +{ + struct nv_drm_encoder *nv_encoder = to_nv_encoder(encoder); + + drm_encoder_cleanup(encoder); + + nv_drm_free(nv_encoder); +} + +static const struct drm_encoder_funcs nv_encoder_funcs = { + .destroy = nv_drm_encoder_destroy, +}; + +static bool nv_drm_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void nv_drm_encoder_prepare(struct drm_encoder *encoder) +{ + +} + +static void nv_drm_encoder_commit(struct drm_encoder *encoder) +{ + +} + +static void nv_drm_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + +} + +static const struct drm_encoder_helper_funcs nv_encoder_helper_funcs = { + .mode_fixup = nv_drm_encoder_mode_fixup, + .prepare = nv_drm_encoder_prepare, + .commit = nv_drm_encoder_commit, + .mode_set = nv_drm_encoder_mode_set, +}; + +static uint32_t get_crtc_mask(struct drm_device *dev, uint32_t headMask) +{ + struct drm_crtc *crtc = NULL; + uint32_t crtc_mask = 0x0; + + nv_drm_for_each_crtc(crtc, dev) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + if (headMask & NVBIT(nv_crtc->head)) { + crtc_mask |= drm_crtc_mask(crtc); + } + } + + return crtc_mask; +} + +/* + * Helper function to create new encoder for given NvKmsKapiDisplay + * with given signal format. + */ +static struct drm_encoder* +nv_drm_encoder_new(struct drm_device *dev, + NvKmsKapiDisplay hDisplay, + NvKmsConnectorSignalFormat format, + unsigned int crtc_mask) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + struct nv_drm_encoder *nv_encoder = NULL; + + int ret = 0; + + /* Allocate an NVIDIA encoder object */ + + nv_encoder = nv_drm_calloc(1, sizeof(*nv_encoder)); + + if (nv_encoder == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate memory for NVIDIA-DRM encoder object"); + return ERR_PTR(-ENOMEM); + } + + nv_encoder->hDisplay = hDisplay; + + /* Initialize the base encoder object and add it to the drm subsystem */ + + ret = drm_encoder_init(dev, + &nv_encoder->base, &nv_encoder_funcs, + nvkms_connector_signal_to_drm_encoder_signal(format) +#if defined(NV_DRM_ENCODER_INIT_HAS_NAME_ARG) + , NULL +#endif + ); + + if (ret != 0) { + nv_drm_free(nv_encoder); + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to initialize encoder created from NvKmsKapiDisplay 0x%08x", + hDisplay); + return ERR_PTR(ret); + } + + nv_encoder->base.possible_crtcs = crtc_mask; + + drm_encoder_helper_add(&nv_encoder->base, &nv_encoder_helper_funcs); + + return &nv_encoder->base; +} + +/* + * Add encoder for given NvKmsKapiDisplay + */ +struct drm_encoder* +nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + struct NvKmsKapiStaticDisplayInfo *displayInfo = NULL; + struct NvKmsKapiConnectorInfo *connectorInfo = NULL; + + struct drm_encoder *encoder = NULL; + struct nv_drm_encoder *nv_encoder = NULL; + + struct drm_connector *connector = NULL; + + int ret = 0; + + /* Query NvKmsKapiStaticDisplayInfo and NvKmsKapiConnectorInfo */ + + if ((displayInfo = nv_drm_calloc(1, sizeof(*displayInfo))) == NULL) { + ret = -ENOMEM; + goto done; + } + + if (!nvKms->getStaticDisplayInfo(nv_dev->pDevice, hDisplay, displayInfo)) { + ret = -EINVAL; + goto done; + } + + connectorInfo = nvkms_get_connector_info(nv_dev->pDevice, + displayInfo->connectorHandle); + + if (IS_ERR(connectorInfo)) { + ret = PTR_ERR(connectorInfo); + goto done; + } + + /* Create and add drm encoder */ + + encoder = nv_drm_encoder_new(dev, + displayInfo->handle, + connectorInfo->signalFormat, + get_crtc_mask(dev, connectorInfo->headMask)); + + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + goto done; + } + + /* Get connector from respective physical index */ + + connector = + nv_drm_get_connector(dev, + connectorInfo->physicalIndex, + connectorInfo->type, + displayInfo->internal, displayInfo->dpAddress); + + if (IS_ERR(connector)) { + ret = PTR_ERR(connector); + goto failed_connector_encoder_attach; + } + + /* Attach encoder and connector */ + + ret = nv_drm_connector_attach_encoder(connector, encoder); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to attach encoder created from NvKmsKapiDisplay 0x%08x " + "to connector", + hDisplay); + goto failed_connector_encoder_attach; + } + + nv_encoder = to_nv_encoder(encoder); + + mutex_lock(&dev->mode_config.mutex); + + nv_encoder->nv_connector = to_nv_connector(connector); + + nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector); + + mutex_unlock(&dev->mode_config.mutex); + + goto done; + +failed_connector_encoder_attach: + + drm_encoder_cleanup(encoder); + + nv_drm_free(encoder); + +done: + + nv_drm_free(displayInfo); + + nv_drm_free(connectorInfo); + + return ret != 0 ? ERR_PTR(ret) : encoder; +} + +static inline struct nv_drm_encoder* +get_nv_encoder_from_nvkms_display(struct drm_device *dev, + NvKmsKapiDisplay hDisplay) +{ + struct drm_encoder *encoder; + + nv_drm_for_each_encoder(encoder, dev) { + struct nv_drm_encoder *nv_encoder = to_nv_encoder(encoder); + + if (nv_encoder->hDisplay == hDisplay) { + return nv_encoder; + } + } + + return NULL; +} + +void nv_drm_handle_display_change(struct nv_drm_device *nv_dev, + NvKmsKapiDisplay hDisplay) +{ + struct drm_device *dev = nv_dev->dev; + struct nv_drm_encoder *nv_encoder = NULL; + + mutex_lock(&dev->mode_config.mutex); + + nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay); + + mutex_unlock(&dev->mode_config.mutex); + + if (nv_encoder == NULL) { + return; + } + + nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector); + + drm_kms_helper_hotplug_event(dev); +} + +void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev, + NvKmsKapiDisplay hDisplay) +{ + struct drm_device *dev = nv_dev->dev; + + struct drm_encoder *encoder = NULL; + struct nv_drm_encoder *nv_encoder = NULL; + + /* + * Look for an existing encoder with the same hDisplay and + * use it if available. + */ + + nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay); + + if (nv_encoder != NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Encoder with NvKmsKapiDisplay 0x%08x already exists.", + hDisplay); + return; + } + + encoder = nv_drm_add_encoder(dev, hDisplay); + + if (IS_ERR(encoder)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to add encoder for NvKmsKapiDisplay 0x%08x", + hDisplay); + return; + } + + /* + * On some kernels, DRM has the notion of a "primary group" that + * tracks the global mode setting state for the device. + * + * On kernels where DRM has a primary group, we need to reinitialize + * after adding encoders and connectors. + */ +#if defined(NV_DRM_REINIT_PRIMARY_MODE_GROUP_PRESENT) + drm_reinit_primary_mode_group(dev); +#endif + + drm_kms_helper_hotplug_event(dev); +} +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.h new file mode 100644 index 0000000..bbaf986 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_ENCODER_H__ +#define __NVIDIA_DRM_ENCODER_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-priv.h" + +#if defined(NV_DRM_DRM_ENCODER_H_PRESENT) +#include +#else +#include +#endif + +#include "nvkms-kapi.h" + +struct nv_drm_encoder { + NvKmsKapiDisplay hDisplay; + + struct nv_drm_connector *nv_connector; + + struct drm_encoder base; +}; + +static inline struct nv_drm_encoder *to_nv_encoder( + struct drm_encoder *encoder) +{ + if (encoder == NULL) { + return NULL; + } + return container_of(encoder, struct nv_drm_encoder, base); +} + +struct drm_encoder* +nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay); + +void nv_drm_handle_display_change(struct nv_drm_device *nv_dev, + NvKmsKapiDisplay hDisplay); + +void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev, + NvKmsKapiDisplay hDisplay); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_ENCODER_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.c new file mode 100644 index 0000000..d119e7c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.c @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-ioctl.h" +#include "nvidia-drm-fb.h" +#include "nvidia-drm-utils.h" +#include "nvidia-drm-gem.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-format.h" + +#include + +static void __nv_drm_framebuffer_free(struct nv_drm_framebuffer *nv_fb) +{ + uint32_t i; + + /* Unreference gem object */ + for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) { + if (nv_fb->nv_gem[i] != NULL) { + nv_drm_gem_object_unreference_unlocked(nv_fb->nv_gem[i]); + } + } + + /* Free framebuffer */ + nv_drm_free(nv_fb); +} + +static void nv_drm_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct nv_drm_device *nv_dev = to_nv_device(fb->dev); + struct nv_drm_framebuffer *nv_fb = to_nv_framebuffer(fb); + + /* Cleaup core framebuffer object */ + + drm_framebuffer_cleanup(fb); + + /* Free NvKmsKapiSurface associated with this framebuffer object */ + + nvKms->destroySurface(nv_dev->pDevice, nv_fb->pSurface); + + __nv_drm_framebuffer_free(nv_fb); +} + +static int +nv_drm_framebuffer_create_handle(struct drm_framebuffer *fb, + struct drm_file *file, unsigned int *handle) +{ + struct nv_drm_framebuffer *nv_fb = to_nv_framebuffer(fb); + + return nv_drm_gem_handle_create(file, + nv_fb->nv_gem[0], + handle); +} + +static struct drm_framebuffer_funcs nv_framebuffer_funcs = { + .destroy = nv_drm_framebuffer_destroy, + .create_handle = nv_drm_framebuffer_create_handle, +}; + +static struct nv_drm_framebuffer *nv_drm_framebuffer_alloc( + struct drm_device *dev, + struct drm_file *file, + struct drm_mode_fb_cmd2 *cmd) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_framebuffer *nv_fb; + const int num_planes = nv_drm_format_num_planes(cmd->pixel_format); + uint32_t i; + + /* Allocate memory for the framebuffer object */ + nv_fb = nv_drm_calloc(1, sizeof(*nv_fb)); + + if (nv_fb == NULL) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Failed to allocate memory for framebuffer object"); + return ERR_PTR(-ENOMEM); + } + + if (num_planes > ARRAY_SIZE(nv_fb->nv_gem)) { + NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Unsupported number of planes"); + goto failed; + } + + for (i = 0; i < num_planes; i++) { + if ((nv_fb->nv_gem[i] = nv_drm_gem_object_lookup( + dev, + file, + cmd->handles[i])) == NULL) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Failed to find gem object of type nvkms memory"); + goto failed; + } + } + + return nv_fb; + +failed: + + __nv_drm_framebuffer_free(nv_fb); + + return ERR_PTR(-ENOENT); +} + +static int nv_drm_framebuffer_init(struct drm_device *dev, + struct nv_drm_framebuffer *nv_fb, + enum NvKmsSurfaceMemoryFormat format, + bool have_modifier, + uint64_t modifier) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct NvKmsKapiCreateSurfaceParams params = { }; + uint32_t i; + int ret; + + /* Initialize the base framebuffer object and add it to drm subsystem */ + + ret = drm_framebuffer_init(dev, &nv_fb->base, &nv_framebuffer_funcs); + if (ret != 0) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Failed to initialize framebuffer object"); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) { + if (nv_fb->nv_gem[i] != NULL) { + params.planes[i].memory = nv_fb->nv_gem[i]->pMemory; + params.planes[i].offset = nv_fb->base.offsets[i]; + params.planes[i].pitch = nv_fb->base.pitches[i]; + } + } + params.height = nv_fb->base.height; + params.width = nv_fb->base.width; + params.format = format; + + if (have_modifier) { + params.explicit_layout = true; + params.layout = (modifier & 0x10) ? + NvKmsSurfaceMemoryLayoutBlockLinear : + NvKmsSurfaceMemoryLayoutPitch; + params.log2GobsPerBlockY = modifier & 0xf; + } else { + params.explicit_layout = false; + } + + /* Create NvKmsKapiSurface */ + + nv_fb->pSurface = nvKms->createSurface(nv_dev->pDevice, ¶ms); + if (nv_fb->pSurface == NULL) { + NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Failed to create NvKmsKapiSurface"); + drm_framebuffer_cleanup(&nv_fb->base); + return -EINVAL; + } + + return 0; +} + +struct drm_framebuffer *nv_drm_internal_framebuffer_create( + struct drm_device *dev, + struct drm_file *file, + struct drm_mode_fb_cmd2 *cmd) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_framebuffer *nv_fb; + uint64_t modifier = 0; + int ret; + enum NvKmsSurfaceMemoryFormat format; +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) + int i; +#endif + bool have_modifier = false; + + /* Check whether NvKms supports the given pixel format */ + if (!nv_drm_format_to_nvkms_format(cmd->pixel_format, &format)) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Unsupported drm pixel format 0x%08x", cmd->pixel_format); + return ERR_PTR(-EINVAL); + } + +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) + if (cmd->flags & DRM_MODE_FB_MODIFIERS) { + have_modifier = true; + modifier = cmd->modifier[0]; + + for (i = 0; nv_dev->modifiers[i] != DRM_FORMAT_MOD_INVALID; i++) { + if (nv_dev->modifiers[i] == modifier) { + break; + } + } + + if (nv_dev->modifiers[i] == DRM_FORMAT_MOD_INVALID) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Invalid format modifier for framebuffer object: 0x%016llx", + modifier); + return ERR_PTR(-EINVAL); + } + } +#endif + + nv_fb = nv_drm_framebuffer_alloc(dev, file, cmd); + if (IS_ERR(nv_fb)) { + return (struct drm_framebuffer *)nv_fb; + } + + /* Fill out framebuffer metadata from the userspace fb creation request */ + + drm_helper_mode_fill_fb_struct( + #if defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_DEV_ARG) + dev, + #endif + &nv_fb->base, + cmd); + + /* + * Finish up FB initialization by creating the backing NVKMS surface and + * publishing the DRM fb + */ + + ret = nv_drm_framebuffer_init(dev, nv_fb, format, have_modifier, modifier); + + if (ret != 0) { + __nv_drm_framebuffer_free(nv_fb); + return ERR_PTR(ret); + } + + return &nv_fb->base; +} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.h new file mode 100644 index 0000000..cf477cc --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_FB_H__ +#define __NVIDIA_DRM_FB_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_FRAMEBUFFER_H_PRESENT) +#include +#endif + +#include "nvidia-drm-gem-nvkms-memory.h" +#include "nvkms-kapi.h" + +struct nv_drm_framebuffer { + struct NvKmsKapiSurface *pSurface; + + struct nv_drm_gem_object* + nv_gem[NVKMS_MAX_PLANES_PER_SURFACE]; + + struct drm_framebuffer base; +}; + +static inline struct nv_drm_framebuffer *to_nv_framebuffer( + struct drm_framebuffer *fb) +{ + if (fb == NULL) { + return NULL; + } + return container_of(fb, struct nv_drm_framebuffer, base); +} + +struct drm_framebuffer *nv_drm_internal_framebuffer_create( + struct drm_device *dev, + struct drm_file *file, + struct drm_mode_fb_cmd2 *cmd); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_FB_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.c new file mode 100644 index 0000000..b1831e3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.c @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif +#include +#include + +#include "nvidia-drm-format.h" +#include "nvidia-drm-os-interface.h" + +static const u32 nvkms_to_drm_format[] = { + /* RGB formats */ + [NvKmsSurfaceMemoryFormatA1R5G5B5] = DRM_FORMAT_ARGB1555, + [NvKmsSurfaceMemoryFormatX1R5G5B5] = DRM_FORMAT_XRGB1555, + [NvKmsSurfaceMemoryFormatR5G6B5] = DRM_FORMAT_RGB565, + [NvKmsSurfaceMemoryFormatA8R8G8B8] = DRM_FORMAT_ARGB8888, + [NvKmsSurfaceMemoryFormatX8R8G8B8] = DRM_FORMAT_XRGB8888, + [NvKmsSurfaceMemoryFormatX8B8G8R8] = DRM_FORMAT_XBGR8888, + [NvKmsSurfaceMemoryFormatA2B10G10R10] = DRM_FORMAT_ABGR2101010, + [NvKmsSurfaceMemoryFormatX2B10G10R10] = DRM_FORMAT_XBGR2101010, + [NvKmsSurfaceMemoryFormatA8B8G8R8] = DRM_FORMAT_ABGR8888, + + [NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422] = DRM_FORMAT_YUYV, + [NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422] = DRM_FORMAT_UYVY, + + /* YUV semi-planar formats + * + * NVKMS YUV semi-planar formats are MSB aligned. Yx__UxVx means + * that the UV components are packed like UUUUUVVVVV (MSB to LSB) + * and Yx_VxUx means VVVVVUUUUU (MSB to LSB). + */ + + /* + * 2 plane YCbCr + * index 0 = Y plane, [7:0] Y + * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian + * or + * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian + */ + [NvKmsSurfaceMemoryFormatY8___V8U8_N444] = DRM_FORMAT_NV24, /* non-subsampled Cr:Cb plane */ + [NvKmsSurfaceMemoryFormatY8___U8V8_N444] = DRM_FORMAT_NV42, /* non-subsampled Cb:Cr plane */ + [NvKmsSurfaceMemoryFormatY8___V8U8_N422] = DRM_FORMAT_NV16, /* 2x1 subsampled Cr:Cb plane */ + [NvKmsSurfaceMemoryFormatY8___U8V8_N422] = DRM_FORMAT_NV61, /* 2x1 subsampled Cb:Cr plane */ + [NvKmsSurfaceMemoryFormatY8___V8U8_N420] = DRM_FORMAT_NV12, /* 2x2 subsampled Cr:Cb plane */ + [NvKmsSurfaceMemoryFormatY8___U8V8_N420] = DRM_FORMAT_NV21, /* 2x2 subsampled Cb:Cr plane */ + +#if defined(DRM_FORMAT_P210) + /* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [10:6] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian + * + * 2x1 subsampled Cr:Cb plane, 10 bit per channel + */ + [NvKmsSurfaceMemoryFormatY10___V10U10_N422] = DRM_FORMAT_P210, +#endif + +#if defined(DRM_FORMAT_P010) + /* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [10:6] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian + * + * 2x2 subsampled Cr:Cb plane 10 bits per channel + */ + [NvKmsSurfaceMemoryFormatY10___V10U10_N420] = DRM_FORMAT_P010, +#endif + +#if defined(DRM_FORMAT_P012) + /* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [12:4] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [12:4:12:4] little endian + * + * 2x2 subsampled Cr:Cb plane 12 bits per channel + */ + [NvKmsSurfaceMemoryFormatY12___V12U12_N420] = DRM_FORMAT_P012, +#endif +}; + +bool nv_drm_format_to_nvkms_format(u32 format, + enum NvKmsSurfaceMemoryFormat *nvkms_format) +{ + enum NvKmsSurfaceMemoryFormat i; + for (i = 0; i < ARRAY_SIZE(nvkms_to_drm_format); i++) { + /* + * Note nvkms_to_drm_format[] is sparsely populated: it doesn't + * handle all NvKmsSurfaceMemoryFormat values, so be sure to skip 0 + * entries when iterating through it. + */ + if (nvkms_to_drm_format[i] != 0 && nvkms_to_drm_format[i] == format) { + *nvkms_format = i; + return true; + } + } + return false; +} + +uint32_t *nv_drm_format_array_alloc( + unsigned int *count, + const long unsigned int nvkms_format_mask) +{ + enum NvKmsSurfaceMemoryFormat i; + unsigned int max_count = hweight64(nvkms_format_mask); + uint32_t *array = nv_drm_calloc(1, sizeof(uint32_t) * max_count); + + if (array == NULL) { + return NULL; + } + + *count = 0; + for_each_set_bit(i, &nvkms_format_mask, + sizeof(nvkms_format_mask) * BITS_PER_BYTE) { + + if (i >= ARRAY_SIZE(nvkms_to_drm_format)) { + break; + } + + /* + * Note nvkms_to_drm_format[] is sparsely populated: it doesn't + * handle all NvKmsSurfaceMemoryFormat values, so be sure to skip 0 + * entries when iterating through it. + */ + if (nvkms_to_drm_format[i] == 0) { + continue; + } + array[(*count)++] = nvkms_to_drm_format[i]; + } + + if (*count == 0) { + nv_drm_free(array); + return NULL; + } + + return array; +} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.h new file mode 100644 index 0000000..d165096 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_FORMAT_H__ +#define __NVIDIA_DRM_FORMAT_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include + +#include "nvkms-format.h" + +bool nv_drm_format_to_nvkms_format(u32 format, + enum NvKmsSurfaceMemoryFormat *nvkms_format); + +uint32_t *nv_drm_format_array_alloc( + unsigned int *count, + const long unsigned int nvkms_format_mask); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_FORMAT_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c new file mode 100644 index 0000000..fccde05 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRM_PRIME_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_DRV_H_PRESENT) +#include +#endif + +#include "nvidia-drm-gem-dma-buf.h" +#include "nvidia-drm-ioctl.h" + +#include "linux/dma-buf.h" + +static inline +void __nv_drm_gem_dma_buf_free(struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_device *nv_dev = nv_gem->nv_dev; + struct nv_drm_gem_dma_buf *nv_dma_buf = to_nv_dma_buf(nv_gem); + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + if (nv_dma_buf->base.pMemory) { + /* Free NvKmsKapiMemory handle associated with this gem object */ + nvKms->freeMemory(nv_dev->pDevice, nv_dma_buf->base.pMemory); + } +#endif + + drm_prime_gem_destroy(&nv_gem->base, nv_dma_buf->sgt); + + nv_drm_free(nv_dma_buf); +} + +static int __nv_drm_gem_dma_buf_create_mmap_offset( + struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + uint64_t *offset) +{ + (void)nv_dev; + return nv_drm_gem_create_mmap_offset(nv_gem, offset); +} + +static int __nv_drm_gem_dma_buf_mmap(struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma) +{ + struct dma_buf_attachment *attach = nv_gem->base.import_attach; + struct dma_buf *dma_buf = attach->dmabuf; + struct file *old_file; + int ret; + + /* check if buffer supports mmap */ + if (!dma_buf->file->f_op->mmap) + return -EINVAL; + + /* readjust the vma */ + get_file(dma_buf->file); + old_file = vma->vm_file; + vma->vm_file = dma_buf->file; + vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);; + + ret = dma_buf->file->f_op->mmap(dma_buf->file, vma); + + if (ret) { + /* restore old parameters on failure */ + vma->vm_file = old_file; + fput(dma_buf->file); + } else { + if (old_file) + fput(old_file); + } + + return ret; +} + +const struct nv_drm_gem_object_funcs __nv_gem_dma_buf_ops = { + .free = __nv_drm_gem_dma_buf_free, + .create_mmap_offset = __nv_drm_gem_dma_buf_create_mmap_offset, + .mmap = __nv_drm_gem_dma_buf_mmap, +}; + +struct drm_gem_object* +nv_drm_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct dma_buf *dma_buf = attach->dmabuf; + struct nv_drm_gem_dma_buf *nv_dma_buf; + struct NvKmsKapiMemory *pMemory; + + if ((nv_dma_buf = + nv_drm_calloc(1, sizeof(*nv_dma_buf))) == NULL) { + return NULL; + } + + // dma_buf->size must be a multiple of PAGE_SIZE + BUG_ON(dma_buf->size % PAGE_SIZE); + + pMemory = NULL; +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + pMemory = nvKms->getSystemMemoryHandleFromDmaBuf(nv_dev->pDevice, + (NvP64)(NvUPtr)dma_buf, + dma_buf->size - 1); + } +#endif + + nv_drm_gem_object_init(nv_dev, &nv_dma_buf->base, + &__nv_gem_dma_buf_ops, dma_buf->size, pMemory); + + nv_dma_buf->sgt = sgt; + + return &nv_dma_buf->base.base; +} + +int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_export_dmabuf_memory_params *p = data; + struct nv_drm_gem_dma_buf *nv_dma_buf = NULL; + int ret = 0; + struct NvKmsKapiMemory *pTmpMemory = NULL; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = -EINVAL; + goto done; + } + + if (p->__pad != 0) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed"); + goto done; + } + + if ((nv_dma_buf = nv_drm_gem_object_dma_buf_lookup( + dev, filep, p->handle)) == NULL) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup DMA-BUF GEM object for export: 0x%08x", + p->handle); + goto done; + } + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + if (!nv_dma_buf->base.pMemory) { + /* + * Get RM system memory handle from SGT - RM will take a reference + * on this GEM object to prevent the DMA-BUF from being unpinned + * prematurely. + */ + pTmpMemory = nvKms->getSystemMemoryHandleFromSgt( + nv_dev->pDevice, + (NvP64)(NvUPtr)nv_dma_buf->sgt, + (NvP64)(NvUPtr)&nv_dma_buf->base.base, + nv_dma_buf->base.base.size - 1); + } + } +#endif + + if (!nv_dma_buf->base.pMemory && !pTmpMemory) { + ret = -ENOMEM; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to get memory to export from DMA-BUF GEM object: 0x%08x", + p->handle); + goto done; + } + + if (!nvKms->exportMemory(nv_dev->pDevice, + nv_dma_buf->base.pMemory ? + nv_dma_buf->base.pMemory : pTmpMemory, + p->nvkms_params_ptr, + p->nvkms_params_size)) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to export memory from DMA-BUF GEM object: 0x%08x", + p->handle); + goto done; + } + +done: + if (pTmpMemory) { + /* + * Release reference on RM system memory to prevent circular + * refcounting. Another refcount will still be held by RM FD. + */ + nvKms->freeMemory(nv_dev->pDevice, pTmpMemory); + } + + if (nv_dma_buf != NULL) { + nv_drm_gem_object_unreference_unlocked(&nv_dma_buf->base); + } + + return ret; +} +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h new file mode 100644 index 0000000..05b16fc --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_GEM_DMA_BUF_H__ +#define __NVIDIA_DRM_GEM_DMA_BUF_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-gem.h" + +struct nv_drm_gem_dma_buf { + struct nv_drm_gem_object base; + struct sg_table *sgt; +}; + +extern const struct nv_drm_gem_object_funcs __nv_gem_dma_buf_ops; + +static inline struct nv_drm_gem_dma_buf *to_nv_dma_buf( + struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_dma_buf, base); + } + + return NULL; +} + +static inline +struct nv_drm_gem_dma_buf *nv_drm_gem_object_dma_buf_lookup( + struct drm_device *dev, + struct drm_file *filp, + u32 handle) +{ + struct nv_drm_gem_object *nv_gem = + nv_drm_gem_object_lookup(dev, filp, handle); + + if (nv_gem != NULL && nv_gem->ops != &__nv_gem_dma_buf_ops) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + return NULL; + } + + return to_nv_dma_buf(nv_gem); +} + +struct drm_gem_object* +nv_drm_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + +int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +#endif + +#endif /* __NVIDIA_DRM_GEM_DMA_BUF_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c new file mode 100644 index 0000000..c1dc6d3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c @@ -0,0 +1,592 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-gem-nvkms-memory.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-ioctl.h" + +#if defined(NV_DRM_DRM_DRV_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_PRIME_H_PRESENT) +#include +#endif + +#include + +#include "nv-mm.h" + +static void __nv_drm_gem_nvkms_memory_free(struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_device *nv_dev = nv_gem->nv_dev; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + + if (nv_nvkms_memory->physically_mapped) { + if (nv_nvkms_memory->pWriteCombinedIORemapAddress != NULL) { + iounmap(nv_nvkms_memory->pWriteCombinedIORemapAddress); + } + + nvKms->unmapMemory(nv_dev->pDevice, + nv_nvkms_memory->base.pMemory, + NVKMS_KAPI_MAPPING_TYPE_USER, + nv_nvkms_memory->pPhysicalAddress); + } + + if (nv_nvkms_memory->pages_count != 0) { + nvKms->freeMemoryPages((NvU64 *)nv_nvkms_memory->pages); + } + + /* Free NvKmsKapiMemory handle associated with this gem object */ + + nvKms->freeMemory(nv_dev->pDevice, nv_nvkms_memory->base.pMemory); + + nv_drm_free(nv_nvkms_memory); +} + +static int __nv_drm_gem_nvkms_mmap(struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma) +{ + return drm_gem_mmap_obj(&nv_gem->base, + drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma); +} + +static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault( + struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma, + struct vm_fault *vmf) +{ +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + unsigned long address = nv_page_fault_va(vmf); + struct drm_gem_object *gem = vma->vm_private_data; + unsigned long page_offset, pfn; + vm_fault_t ret; + + page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node); + + if (nv_nvkms_memory->pages_count == 0) { + pfn = (unsigned long)(uintptr_t)nv_nvkms_memory->pPhysicalAddress; + pfn >>= PAGE_SHIFT; + pfn += page_offset; + } else { + BUG_ON(page_offset > nv_nvkms_memory->pages_count); + pfn = page_to_pfn(nv_nvkms_memory->pages[page_offset]); + } + +#if defined(NV_VMF_INSERT_PFN_PRESENT) + ret = vmf_insert_pfn(vma, address, pfn); +#else + ret = vm_insert_pfn(vma, address, pfn); + switch (ret) { + case 0: + case -EBUSY: + /* + * EBUSY indicates that another thread already handled + * the faulted range. + */ + ret = VM_FAULT_NOPAGE; + break; + case -ENOMEM: + ret = VM_FAULT_OOM; + break; + default: + WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret); + ret = VM_FAULT_SIGBUS; + break; + } +#endif /* defined(NV_VMF_INSERT_PFN_PRESENT) */ + return ret; +#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */ + return VM_FAULT_SIGBUS; +} + +static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup( + struct drm_device *dev, + const struct nv_drm_gem_object *nv_gem_src); + +static int __nv_drm_gem_nvkms_map( + struct nv_drm_device *nv_dev, + struct NvKmsKapiMemory *pMemory, + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory, + uint64_t size) +{ + if (!nv_dev->hasVideoMemory) { + return 0; + } + + if (!nvKms->mapMemory(nv_dev->pDevice, + pMemory, + NVKMS_KAPI_MAPPING_TYPE_USER, + &nv_nvkms_memory->pPhysicalAddress)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to map NvKmsKapiMemory 0x%p", + pMemory); + return -ENOMEM; + } + + nv_nvkms_memory->pWriteCombinedIORemapAddress = ioremap_wc( + (uintptr_t)nv_nvkms_memory->pPhysicalAddress, + size); + + if (!nv_nvkms_memory->pWriteCombinedIORemapAddress) { + NV_DRM_DEV_LOG_INFO( + nv_dev, + "Failed to ioremap_wc NvKmsKapiMemory 0x%p", + pMemory); + } + + nv_nvkms_memory->physically_mapped = true; + + return 0; +} + +static int __nv_drm_gem_map_nvkms_memory_offset( + struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + uint64_t *offset) +{ + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + + if (!nv_nvkms_memory->physically_mapped) { + int ret = __nv_drm_gem_nvkms_map(nv_dev, + nv_nvkms_memory->base.pMemory, + nv_nvkms_memory, + nv_nvkms_memory->base.base.size); + if (ret) { + return ret; + } + } + + return nv_drm_gem_create_mmap_offset(&nv_nvkms_memory->base, offset); +} + +static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table( + struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_device *nv_dev = nv_gem->nv_dev; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + struct sg_table *sg_table; + + if (nv_nvkms_memory->pages_count == 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Cannot create sg_table for NvKmsKapiMemory 0x%p", + nv_gem->pMemory); + return NULL; + } + + sg_table = nv_drm_prime_pages_to_sg(nv_dev->dev, + nv_nvkms_memory->pages, + nv_nvkms_memory->pages_count); + + return sg_table; +} + +const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops = { + .free = __nv_drm_gem_nvkms_memory_free, + .prime_dup = __nv_drm_gem_nvkms_prime_dup, + .mmap = __nv_drm_gem_nvkms_mmap, + .handle_vma_fault = __nv_drm_gem_nvkms_handle_vma_fault, + .create_mmap_offset = __nv_drm_gem_map_nvkms_memory_offset, + .prime_get_sg_table = __nv_drm_gem_nvkms_memory_prime_get_sg_table, +}; + +static int __nv_drm_nvkms_gem_obj_init( + struct nv_drm_device *nv_dev, + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory, + struct NvKmsKapiMemory *pMemory, + uint64_t size) +{ + NvU64 *pages = NULL; + NvU32 numPages = 0; + + nv_nvkms_memory->pPhysicalAddress = NULL; + nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL; + nv_nvkms_memory->physically_mapped = false; + + if (!nvKms->getMemoryPages(nv_dev->pDevice, + pMemory, + &pages, + &numPages) && + !nv_dev->hasVideoMemory) { + /* GetMemoryPages may fail for vidmem allocations, + * but it should not fail for sysmem allocations. */ + NV_DRM_DEV_LOG_ERR(nv_dev, + "Failed to get memory pages for NvKmsKapiMemory 0x%p", + pMemory); + return -ENOMEM; + } + nv_nvkms_memory->pages_count = numPages; + nv_nvkms_memory->pages = (struct page **)pages; + + nv_drm_gem_object_init(nv_dev, + &nv_nvkms_memory->base, + &nv_gem_nvkms_memory_ops, + size, + pMemory); + + return 0; +} + +int nv_drm_dumb_create( + struct drm_file *file_priv, + struct drm_device *dev, struct drm_mode_create_dumb *args) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + uint8_t compressible = 0; + struct NvKmsKapiMemory *pMemory; + int ret = 0; + + args->pitch = roundup(args->width * ((args->bpp + 7) >> 3), + nv_dev->pitchAlignment); + + args->size = args->height * args->pitch; + + /* Core DRM requires gem object size to be aligned with PAGE_SIZE */ + + args->size = roundup(args->size, PAGE_SIZE); + + if ((nv_nvkms_memory = + nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) { + ret = -ENOMEM; + goto fail; + } + + if (nv_dev->hasVideoMemory) { + pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice, + NvKmsSurfaceMemoryLayoutPitch, + NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT, + args->size, + &compressible); + } else { + pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice, + NvKmsSurfaceMemoryLayoutPitch, + NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT, + args->size, + &compressible); + } + + if (pMemory == NULL) { + ret = -ENOMEM; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate NvKmsKapiMemory for dumb object of size %llu", + args->size); + goto nvkms_alloc_memory_failed; + } + + ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, args->size); + if (ret) { + goto nvkms_gem_obj_init_failed; + } + + /* Always map dumb buffer memory up front. Clients are only expected + * to use dumb buffers for software rendering, so they're not much use + * without a CPU mapping. + */ + ret = __nv_drm_gem_nvkms_map(nv_dev, pMemory, nv_nvkms_memory, args->size); + if (ret) { + nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base); + goto fail; + } + + return nv_drm_gem_handle_create_drop_reference(file_priv, + &nv_nvkms_memory->base, + &args->handle); + +nvkms_gem_obj_init_failed: + nvKms->freeMemory(nv_dev->pDevice, pMemory); + +nvkms_alloc_memory_failed: + nv_drm_free(nv_nvkms_memory); + +fail: + return ret; +} + +int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_import_nvkms_memory_params *p = data; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + struct NvKmsKapiMemory *pMemory; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = -EINVAL; + goto failed; + } + + if ((nv_nvkms_memory = + nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) { + ret = -ENOMEM; + goto failed; + } + + pMemory = nvKms->importMemory(nv_dev->pDevice, + p->mem_size, + p->nvkms_params_ptr, + p->nvkms_params_size); + + if (pMemory == NULL) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to import NVKMS memory to GEM object"); + goto nvkms_import_memory_failed; + } + + ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, p->mem_size); + if (ret) { + goto nvkms_gem_obj_init_failed; + } + + return nv_drm_gem_handle_create_drop_reference(filep, + &nv_nvkms_memory->base, + &p->handle); +nvkms_gem_obj_init_failed: + nvKms->freeMemory(nv_dev->pDevice, pMemory); + +nvkms_import_memory_failed: + nv_drm_free(nv_nvkms_memory); + +failed: + return ret; +} + +int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_export_nvkms_memory_params *p = data; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = -EINVAL; + goto done; + } + + if (p->__pad != 0) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed"); + goto done; + } + + if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup( + dev, + filep, + p->handle)) == NULL) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup NVKMS gem object for export: 0x%08x", + p->handle); + goto done; + } + + if (!nvKms->exportMemory(nv_dev->pDevice, + nv_nvkms_memory->base.pMemory, + p->nvkms_params_ptr, + p->nvkms_params_size)) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to export memory from NVKMS GEM object: 0x%08x", p->handle); + goto done; + } + +done: + if (nv_nvkms_memory != NULL) { + nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base); + } + + return ret; +} + +int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_alloc_nvkms_memory_params *p = data; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL; + struct NvKmsKapiMemory *pMemory; + enum NvKmsSurfaceMemoryLayout layout; + enum NvKmsKapiAllocationType type; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = -EINVAL; + goto failed; + } + + if (p->__pad != 0) { + NV_DRM_DEV_LOG_ERR(nv_dev, "non-zero value in padding field"); + goto failed; + } + + if ((nv_nvkms_memory = + nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) { + ret = -ENOMEM; + goto failed; + } + + layout = p->block_linear ? + NvKmsSurfaceMemoryLayoutBlockLinear : NvKmsSurfaceMemoryLayoutPitch; + type = (p->flags & NV_GEM_ALLOC_NO_SCANOUT) ? + NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN : NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT; + + if (nv_dev->hasVideoMemory) { + pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice, + layout, + type, + p->memory_size, + &p->compressible); + } else { + pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice, + layout, + type, + p->memory_size, + &p->compressible); + } + + if (pMemory == NULL) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR(nv_dev, + "Failed to allocate NVKMS memory for GEM object"); + goto nvkms_alloc_memory_failed; + } + + ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, + p->memory_size); + if (ret) { + goto nvkms_gem_obj_init_failed; + } + + return nv_drm_gem_handle_create_drop_reference(filep, + &nv_nvkms_memory->base, + &p->handle); + +nvkms_gem_obj_init_failed: + nvKms->freeMemory(nv_dev->pDevice, pMemory); + +nvkms_alloc_memory_failed: + nv_drm_free(nv_nvkms_memory); + +failed: + return ret; +} + +static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup( + struct drm_device *dev, + const struct nv_drm_gem_object *nv_gem_src) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + const struct nv_drm_device *nv_dev_src; + const struct nv_drm_gem_nvkms_memory *nv_nvkms_memory_src; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + struct NvKmsKapiMemory *pMemory; + + BUG_ON(nv_gem_src == NULL || nv_gem_src->ops != &nv_gem_nvkms_memory_ops); + + nv_dev_src = to_nv_device(nv_gem_src->base.dev); + nv_nvkms_memory_src = to_nv_nvkms_memory_const(nv_gem_src); + + if ((nv_nvkms_memory = + nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) { + return NULL; + } + + pMemory = nvKms->dupMemory(nv_dev->pDevice, + nv_dev_src->pDevice, nv_gem_src->pMemory); + if (pMemory == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to import NVKMS memory to GEM object"); + goto nvkms_dup_memory_failed; + } + + if (__nv_drm_nvkms_gem_obj_init(nv_dev, + nv_nvkms_memory, + pMemory, + nv_gem_src->base.size)) { + goto nvkms_gem_obj_init_failed; + } + + return &nv_nvkms_memory->base.base; + +nvkms_gem_obj_init_failed: + nvKms->freeMemory(nv_dev->pDevice, pMemory); + +nvkms_dup_memory_failed: + nv_drm_free(nv_nvkms_memory); + + return NULL; +} + +int nv_drm_dumb_map_offset(struct drm_file *file, + struct drm_device *dev, uint32_t handle, + uint64_t *offset) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + int ret = -EINVAL; + + if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup( + dev, + file, + handle)) == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for mapping: 0x%08x", + handle); + return ret; + } + + ret = __nv_drm_gem_map_nvkms_memory_offset(nv_dev, + &nv_nvkms_memory->base, offset); + + nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base); + + return ret; +} + +int nv_drm_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle) +{ + return drm_gem_handle_delete(file, handle); +} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h new file mode 100644 index 0000000..7ecbb94 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__ +#define __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-gem.h" + +struct nv_drm_gem_nvkms_memory { + struct nv_drm_gem_object base; + + bool physically_mapped; + + void *pPhysicalAddress; + void *pWriteCombinedIORemapAddress; + + struct page **pages; + unsigned long pages_count; +}; + +extern const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops; + +static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory( + struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_nvkms_memory, base); + } + + return NULL; +} + +static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory_const( + const struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_nvkms_memory, base); + } + + return NULL; +} + +static inline +struct nv_drm_gem_nvkms_memory *nv_drm_gem_object_nvkms_memory_lookup( + struct drm_device *dev, + struct drm_file *filp, + u32 handle) +{ + struct nv_drm_gem_object *nv_gem = + nv_drm_gem_object_lookup(dev, filp, handle); + + if (nv_gem != NULL && nv_gem->ops != &nv_gem_nvkms_memory_ops) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + return NULL; + } + + return to_nv_nvkms_memory(nv_gem); +} + +int nv_drm_dumb_create( + struct drm_file *file_priv, + struct drm_device *dev, struct drm_mode_create_dumb *args); + +int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_dumb_map_offset(struct drm_file *file, + struct drm_device *dev, uint32_t handle, + uint64_t *offset); + +int nv_drm_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle); + +struct drm_gem_object *nv_drm_gem_nvkms_prime_import( + struct drm_device *dev, + struct drm_gem_object *gem); + +#endif + +#endif /* __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c new file mode 100644 index 0000000..e554adc --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRM_PRIME_H_PRESENT) +#include +#endif + +#include "nvidia-drm-gem-user-memory.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-ioctl.h" + +#include "linux/dma-buf.h" +#include "linux/mm.h" +#include "nv-mm.h" + +static inline +void __nv_drm_gem_user_memory_free(struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem); + + nv_drm_unlock_user_pages(nv_user_memory->pages_count, + nv_user_memory->pages); + + nv_drm_free(nv_user_memory); +} + +static struct sg_table *__nv_drm_gem_user_memory_prime_get_sg_table( + struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem); + struct drm_gem_object *gem = &nv_gem->base; + + return nv_drm_prime_pages_to_sg(gem->dev, + nv_user_memory->pages, + nv_user_memory->pages_count); +} + +static void *__nv_drm_gem_user_memory_prime_vmap( + struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem); + + return nv_drm_vmap(nv_user_memory->pages, + nv_user_memory->pages_count); +} + +static void __nv_drm_gem_user_memory_prime_vunmap( + struct nv_drm_gem_object *gem, + void *address) +{ + nv_drm_vunmap(address); +} + +static int __nv_drm_gem_user_memory_mmap(struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma) +{ + int ret = drm_gem_mmap_obj(&nv_gem->base, + drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma); + + if (ret < 0) { + return ret; + } + + /* + * Enforce that user-memory GEM mappings are MAP_SHARED, to prevent COW + * with MAP_PRIVATE and VM_MIXEDMAP + */ + if (!(vma->vm_flags & VM_SHARED)) { + return -EINVAL; + } + + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags &= ~VM_IO; + vma->vm_flags |= VM_MIXEDMAP; + + return 0; +} + +static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault( + struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem); + unsigned long address = nv_page_fault_va(vmf); + struct drm_gem_object *gem = vma->vm_private_data; + unsigned long page_offset; + vm_fault_t ret; + + page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node); + + BUG_ON(page_offset > nv_user_memory->pages_count); + + ret = vm_insert_page(vma, address, nv_user_memory->pages[page_offset]); + switch (ret) { + case 0: + case -EBUSY: + /* + * EBUSY indicates that another thread already handled + * the faulted range. + */ + ret = VM_FAULT_NOPAGE; + break; + case -ENOMEM: + ret = VM_FAULT_OOM; + break; + default: + WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret); + ret = VM_FAULT_SIGBUS; + break; + } + + return ret; +} + +static int __nv_drm_gem_user_create_mmap_offset( + struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + uint64_t *offset) +{ + (void)nv_dev; + return nv_drm_gem_create_mmap_offset(nv_gem, offset); +} + +const struct nv_drm_gem_object_funcs __nv_gem_user_memory_ops = { + .free = __nv_drm_gem_user_memory_free, + .prime_get_sg_table = __nv_drm_gem_user_memory_prime_get_sg_table, + .prime_vmap = __nv_drm_gem_user_memory_prime_vmap, + .prime_vunmap = __nv_drm_gem_user_memory_prime_vunmap, + .mmap = __nv_drm_gem_user_memory_mmap, + .handle_vma_fault = __nv_drm_gem_user_memory_handle_vma_fault, + .create_mmap_offset = __nv_drm_gem_user_create_mmap_offset, +}; + +int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + struct drm_nvidia_gem_import_userspace_memory_params *params = data; + struct nv_drm_gem_user_memory *nv_user_memory; + + struct page **pages = NULL; + unsigned long pages_count = 0; + + int ret = 0; + + if ((params->size % PAGE_SIZE) != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Userspace memory 0x%llx size should be in a multiple of page " + "size to create a gem object", + params->address); + return -EINVAL; + } + + pages_count = params->size / PAGE_SIZE; + + ret = nv_drm_lock_user_pages(params->address, pages_count, &pages); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lock user pages for address 0x%llx: %d", + params->address, ret); + return ret; + } + + if ((nv_user_memory = + nv_drm_calloc(1, sizeof(*nv_user_memory))) == NULL) { + ret = -ENOMEM; + goto failed; + } + + nv_user_memory->pages = pages; + nv_user_memory->pages_count = pages_count; + + nv_drm_gem_object_init(nv_dev, + &nv_user_memory->base, + &__nv_gem_user_memory_ops, + params->size, + NULL /* pMemory */); + + return nv_drm_gem_handle_create_drop_reference(filep, + &nv_user_memory->base, + ¶ms->handle); + +failed: + nv_drm_unlock_user_pages(pages_count, pages); + + return ret; +} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h new file mode 100644 index 0000000..275c083 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_GEM_USER_MEMORY_H__ +#define __NVIDIA_DRM_GEM_USER_MEMORY_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-gem.h" + +struct nv_drm_gem_user_memory { + struct nv_drm_gem_object base; + struct page **pages; + unsigned long pages_count; +}; + +extern const struct nv_drm_gem_object_funcs __nv_gem_user_memory_ops; + +static inline struct nv_drm_gem_user_memory *to_nv_user_memory( + struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_user_memory, base); + } + + return NULL; +} + +int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +static inline +struct nv_drm_gem_user_memory *nv_drm_gem_object_user_memory_lookup( + struct drm_device *dev, + struct drm_file *filp, + u32 handle) +{ + struct nv_drm_gem_object *nv_gem = + nv_drm_gem_object_lookup(dev, filp, handle); + + if (nv_gem != NULL && nv_gem->ops != &__nv_gem_user_memory_ops) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + return NULL; + } + + return to_nv_user_memory(nv_gem); +} + +#endif + +#endif /* __NVIDIA_DRM_GEM_USER_MEMORY_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.c new file mode 100644 index 0000000..92d61a6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.c @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-ioctl.h" +#include "nvidia-drm-prime-fence.h" +#include "nvidia-drm-gem.h" +#include "nvidia-drm-gem-nvkms-memory.h" +#include "nvidia-drm-gem-user-memory.h" +#include "nvidia-dma-resv-helper.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-gem-dma-buf.h" +#include "nvidia-drm-gem-nvkms-memory.h" + +#if defined(NV_DRM_DRM_DRV_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_PRIME_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_FILE_H_PRESENT) +#include +#endif + +#include "linux/dma-buf.h" + +#include "nv-mm.h" + +void nv_drm_gem_free(struct drm_gem_object *gem) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + /* Cleanup core gem object */ + drm_gem_object_release(&nv_gem->base); + +#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV) + nv_dma_resv_fini(&nv_gem->resv); +#endif + + nv_gem->ops->free(nv_gem); +} + +#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) && \ + defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG) + +/* + * The 'dma_buf_map' structure is renamed to 'iosys_map' by the commit + * 7938f4218168 ("dma-buf-map: Rename to iosys-map"). + */ +#if defined(NV_LINUX_IOSYS_MAP_H_PRESENT) +typedef struct iosys_map nv_sysio_map_t; +#else +typedef struct dma_buf_map nv_sysio_map_t; +#endif + +static int nv_drm_gem_vmap(struct drm_gem_object *gem, + nv_sysio_map_t *map) +{ + map->vaddr = nv_drm_gem_prime_vmap(gem); + if (map->vaddr == NULL) { + return -ENOMEM; + } + map->is_iomem = true; + return 0; +} + +static void nv_drm_gem_vunmap(struct drm_gem_object *gem, + nv_sysio_map_t *map) +{ + nv_drm_gem_prime_vunmap(gem, map->vaddr); + map->vaddr = NULL; +} +#endif + +#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT) || \ + !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) +static struct drm_gem_object_funcs nv_drm_gem_funcs = { + .free = nv_drm_gem_free, + .get_sg_table = nv_drm_gem_prime_get_sg_table, + +#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) + .export = drm_gem_prime_export, +#if defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG) + .vmap = nv_drm_gem_vmap, + .vunmap = nv_drm_gem_vunmap, +#else + .vmap = nv_drm_gem_prime_vmap, + .vunmap = nv_drm_gem_prime_vunmap, +#endif + .vm_ops = &nv_drm_gem_vma_ops, +#endif +}; +#endif + +void nv_drm_gem_object_init(struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + const struct nv_drm_gem_object_funcs * const ops, + size_t size, + struct NvKmsKapiMemory *pMemory) +{ + struct drm_device *dev = nv_dev->dev; + + nv_gem->nv_dev = nv_dev; + nv_gem->ops = ops; + + nv_gem->pMemory = pMemory; + + /* Initialize the gem object */ + +#if defined(NV_DRM_FENCE_AVAILABLE) + nv_dma_resv_init(&nv_gem->resv); + +#if defined(NV_DRM_GEM_OBJECT_HAS_RESV) + nv_gem->base.resv = &nv_gem->resv; +#endif + +#endif + +#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT) + nv_gem->base.funcs = &nv_drm_gem_funcs; +#endif + + drm_gem_private_object_init(dev, &nv_gem->base, size); +} + +struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ +#if defined(NV_DMA_BUF_OWNER_PRESENT) + struct drm_gem_object *gem_dst; + struct nv_drm_gem_object *nv_gem_src; + + if (dma_buf->owner == dev->driver->fops->owner) { + nv_gem_src = to_nv_gem_object(dma_buf->priv); + + if (nv_gem_src->base.dev != dev && + nv_gem_src->ops->prime_dup != NULL) { + /* + * If we're importing from another NV device, try to handle the + * import internally rather than attaching through the dma-buf + * mechanisms. Importing from the same device is even easier, + * and drm_gem_prime_import() handles that just fine. + */ + gem_dst = nv_gem_src->ops->prime_dup(dev, nv_gem_src); + + if (gem_dst) + return gem_dst; + } + } +#endif /* NV_DMA_BUF_OWNER_PRESENT */ + + return drm_gem_prime_import(dev, dma_buf); +} + +struct sg_table *nv_drm_gem_prime_get_sg_table(struct drm_gem_object *gem) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + if (nv_gem->ops->prime_get_sg_table != NULL) { + return nv_gem->ops->prime_get_sg_table(nv_gem); + } + + return ERR_PTR(-ENOTSUPP); +} + +void *nv_drm_gem_prime_vmap(struct drm_gem_object *gem) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + if (nv_gem->ops->prime_vmap != NULL) { + return nv_gem->ops->prime_vmap(nv_gem); + } + + return ERR_PTR(-ENOTSUPP); +} + +void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + if (nv_gem->ops->prime_vunmap != NULL) { + nv_gem->ops->prime_vunmap(nv_gem, address); + } +} + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) +nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(obj); + + return &nv_gem->resv; +} +#endif + +int nv_drm_gem_map_offset_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_map_offset_params *params = data; + struct nv_drm_gem_object *nv_gem; + int ret; + + if ((nv_gem = nv_drm_gem_object_lookup(dev, + filep, + params->handle)) == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for map: 0x%08x", + params->handle); + return -EINVAL; + } + + if (nv_gem->ops->create_mmap_offset) { + ret = nv_gem->ops->create_mmap_offset(nv_dev, nv_gem, ¶ms->offset); + } else { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Gem object type does not support mapping: 0x%08x", + params->handle); + ret = -EINVAL; + } + + nv_drm_gem_object_unreference_unlocked(nv_gem); + + return ret; +} + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) +int nv_drm_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct drm_file *priv = file->private_data; + struct drm_device *dev = priv->minor->dev; + struct drm_gem_object *obj = NULL; + struct drm_vma_offset_node *node; + int ret = 0; + struct nv_drm_gem_object *nv_gem; + + drm_vma_offset_lock_lookup(dev->vma_offset_manager); + node = nv_drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, + vma->vm_pgoff, vma_pages(vma)); + if (likely(node)) { + obj = container_of(node, struct drm_gem_object, vma_node); + /* + * When the object is being freed, after it hits 0-refcnt it proceeds + * to tear down the object. In the process it will attempt to remove + * the VMA offset and so acquire this mgr->vm_lock. Therefore if we + * find an object with a 0-refcnt that matches our range, we know it is + * in the process of being destroyed and will be freed as soon as we + * release the lock - so we have to check for the 0-refcnted object and + * treat it as invalid. + */ + if (!kref_get_unless_zero(&obj->refcount)) + obj = NULL; + } + drm_vma_offset_unlock_lookup(dev->vma_offset_manager); + + if (!obj) + return -EINVAL; + + nv_gem = to_nv_gem_object(obj); + if (nv_gem->ops->mmap == NULL) { + ret = -EINVAL; + goto done; + } + + if (!nv_drm_vma_node_is_allowed(node, file)) { + ret = -EACCES; + goto done; + } + +#if defined(NV_DRM_VMA_OFFSET_NODE_HAS_READONLY) + if (node->readonly) { + if (vma->vm_flags & VM_WRITE) { + ret = -EINVAL; + goto done; + } + vma->vm_flags &= ~VM_MAYWRITE; + } +#endif + + ret = nv_gem->ops->mmap(nv_gem, vma); + +done: + nv_drm_gem_object_unreference_unlocked(nv_gem); + + return ret; +} +#endif + +int nv_drm_gem_identify_object_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct drm_nvidia_gem_identify_object_params *p = data; + struct nv_drm_gem_dma_buf *nv_dma_buf; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + struct nv_drm_gem_user_memory *nv_user_memory; + struct nv_drm_gem_object *nv_gem = NULL; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return -EINVAL; + } + + nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(dev, filep, p->handle); + if (nv_dma_buf) { + p->object_type = NV_GEM_OBJECT_DMABUF; + nv_gem = &nv_dma_buf->base; + goto done; + } + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(dev, filep, p->handle); + if (nv_nvkms_memory) { + p->object_type = NV_GEM_OBJECT_NVKMS; + nv_gem = &nv_nvkms_memory->base; + goto done; + } +#endif + + nv_user_memory = nv_drm_gem_object_user_memory_lookup(dev, filep, p->handle); + if (nv_user_memory) { + p->object_type = NV_GEM_OBJECT_USERMEMORY; + nv_gem = &nv_user_memory->base; + goto done; + } + + p->object_type = NV_GEM_OBJECT_UNKNOWN; + +done: + if (nv_gem) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + } + return 0; +} + +/* XXX Move these vma operations to os layer */ + +static vm_fault_t __nv_drm_vma_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + struct drm_gem_object *gem = vma->vm_private_data; + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + if (!nv_gem) { + return VM_FAULT_SIGBUS; + } + + return nv_gem->ops->handle_vma_fault(nv_gem, vma, vmf); +} + +/* + * Note that nv_drm_vma_fault() can be called for different or same + * ranges of the same drm_gem_object simultaneously. + */ + +#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) +static vm_fault_t nv_drm_vma_fault(struct vm_fault *vmf) +{ + return __nv_drm_vma_fault(vmf->vma, vmf); +} +#else +static vm_fault_t nv_drm_vma_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + return __nv_drm_vma_fault(vma, vmf); +} +#endif + +const struct vm_operations_struct nv_drm_gem_vma_ops = { + .open = drm_gem_vm_open, + .fault = nv_drm_vma_fault, + .close = drm_gem_vm_close, +}; + +#endif /* NV_DRM_AVAILABLE */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.h new file mode 100644 index 0000000..a27c2e9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_GEM_H__ +#define __NVIDIA_DRM_GEM_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-priv.h" + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_GEM_H_PRESENT) +#include +#endif + +#include "nvkms-kapi.h" +#include "nv-mm.h" + +#if defined(NV_DRM_FENCE_AVAILABLE) +#include "nvidia-dma-fence-helper.h" +#include "nvidia-dma-resv-helper.h" +#endif + +struct nv_drm_gem_object; + +struct nv_drm_gem_object_funcs { + void (*free)(struct nv_drm_gem_object *nv_gem); + struct sg_table *(*prime_get_sg_table)(struct nv_drm_gem_object *nv_gem); + void *(*prime_vmap)(struct nv_drm_gem_object *nv_gem); + void (*prime_vunmap)(struct nv_drm_gem_object *nv_gem, void *address); + struct drm_gem_object *(*prime_dup)(struct drm_device *dev, + const struct nv_drm_gem_object *nv_gem_src); + int (*mmap)(struct nv_drm_gem_object *nv_gem, struct vm_area_struct *vma); + vm_fault_t (*handle_vma_fault)(struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma, + struct vm_fault *vmf); + int (*create_mmap_offset)(struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + uint64_t *offset); +}; + +struct nv_drm_gem_object { + struct drm_gem_object base; + + struct nv_drm_device *nv_dev; + const struct nv_drm_gem_object_funcs *ops; + + struct NvKmsKapiMemory *pMemory; + +#if defined(NV_DRM_FENCE_AVAILABLE) + nv_dma_resv_t resv; +#endif +}; + +static inline struct nv_drm_gem_object *to_nv_gem_object( + struct drm_gem_object *gem) +{ + if (gem != NULL) { + return container_of(gem, struct nv_drm_gem_object, base); + } + + return NULL; +} + +/* + * drm_gem_object_{get/put}() added by commit + * e6b62714e87c8811d5564b6a0738dcde63a51774 (2017-02-28) and + * drm_gem_object_{reference/unreference}() removed by commit + * 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15). + */ + +static inline void +nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem) +{ +#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT) + +#if defined(NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT) + drm_gem_object_put_unlocked(&nv_gem->base); +#else + drm_gem_object_put(&nv_gem->base); +#endif + +#else + drm_gem_object_unreference_unlocked(&nv_gem->base); +#endif +} + +static inline void +nv_drm_gem_object_unreference(struct nv_drm_gem_object *nv_gem) +{ +#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT) + drm_gem_object_put(&nv_gem->base); +#else + drm_gem_object_unreference(&nv_gem->base); +#endif +} + +static inline int nv_drm_gem_handle_create_drop_reference( + struct drm_file *file_priv, + struct nv_drm_gem_object *nv_gem, + uint32_t *handle) +{ + int ret = drm_gem_handle_create(file_priv, &nv_gem->base, handle); + + /* drop reference from allocate - handle holds it now */ + + nv_drm_gem_object_unreference_unlocked(nv_gem); + + return ret; +} + +static inline int nv_drm_gem_create_mmap_offset( + struct nv_drm_gem_object *nv_gem, + uint64_t *offset) +{ + int ret; + + if ((ret = drm_gem_create_mmap_offset(&nv_gem->base)) < 0) { + NV_DRM_DEV_LOG_ERR( + nv_gem->nv_dev, + "drm_gem_create_mmap_offset failed with error code %d", + ret); + goto done; + } + + *offset = drm_vma_node_offset_addr(&nv_gem->base.vma_node); + +done: + + return ret; +} + +void nv_drm_gem_free(struct drm_gem_object *gem); + +static inline struct nv_drm_gem_object *nv_drm_gem_object_lookup( + struct drm_device *dev, + struct drm_file *filp, + u32 handle) +{ +#if (NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT == 3) + return to_nv_gem_object(drm_gem_object_lookup(dev, filp, handle)); +#elif (NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT == 2) + return to_nv_gem_object(drm_gem_object_lookup(filp, handle)); +#else + #error "Unknown argument count of drm_gem_object_lookup()" +#endif +} + +static inline int nv_drm_gem_handle_create(struct drm_file *filp, + struct nv_drm_gem_object *nv_gem, + uint32_t *handle) +{ + return drm_gem_handle_create(filp, &nv_gem->base, handle); +} + +void nv_drm_gem_object_init(struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + const struct nv_drm_gem_object_funcs * const ops, + size_t size, + struct NvKmsKapiMemory *pMemory); + +struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + +struct sg_table *nv_drm_gem_prime_get_sg_table(struct drm_gem_object *gem); + +void *nv_drm_gem_prime_vmap(struct drm_gem_object *gem); + +void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address); + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) +nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj); +#endif + +extern const struct vm_operations_struct nv_drm_gem_vma_ops; + +int nv_drm_gem_map_offset_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_mmap(struct file *file, struct vm_area_struct *vma); + +int nv_drm_gem_identify_object_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +#endif /* NV_DRM_AVAILABLE */ + +#endif /* __NVIDIA_DRM_GEM_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.c new file mode 100644 index 0000000..8fc8620 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.c @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains snapshots of DRM helper functions from the + * Linux kernel which are used by nvidia-drm.ko if the target kernel + * predates the helper function. Having these functions consistently + * present simplifies nvidia-drm.ko source. + */ + +#include "nvidia-drm-helper.h" + +#include "nvmisc.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_ATOMIC_UAPI_H_PRESENT) +#include +#endif + +/* + * The inclusion of drm_framebuffer.h was removed from drm_crtc.h by commit + * 720cf96d8fecde29b72e1101f8a567a0ce99594f ("drm: Drop drm_framebuffer.h from + * drm_crtc.h") in linux-next, expected in v5.19-rc7. + * + * We only need drm_framebuffer.h for drm_framebuffer_put(), and it is always + * present (v4.9+) when drm_framebuffer_{put,get}() is present (v4.12+), so it + * is safe to unconditionally include it when drm_framebuffer_get() is present. + */ +#if defined(NV_DRM_FRAMEBUFFER_GET_PRESENT) +#include +#endif + +static void __nv_drm_framebuffer_put(struct drm_framebuffer *fb) +{ +#if defined(NV_DRM_FRAMEBUFFER_GET_PRESENT) + drm_framebuffer_put(fb); +#else + drm_framebuffer_unreference(fb); +#endif + +} + +/* + * drm_atomic_helper_disable_all() has been added by commit + * 1494276000db789c6d2acd85747be4707051c801, which is Signed-off-by: + * Thierry Reding + * Daniel Vetter + * + * drm_atomic_helper_disable_all() is copied from + * linux/drivers/gpu/drm/drm_atomic_helper.c and modified to use + * nv_drm_for_each_crtc instead of drm_for_each_crtc to loop over all crtcs, + * use nv_drm_for_each_*_in_state instead of for_each_connector_in_state to loop + * over all modeset object states, and use drm_atomic_state_free() if + * drm_atomic_state_put() is not available. + * + * drm_atomic_helper_disable_all() is copied from + * linux/drivers/gpu/drm/drm_atomic_helper.c @ + * 49d70aeaeca8f62b72b7712ecd1e29619a445866, which has the following + * copyright and license information: + * + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark + * Daniel Vetter + */ +int nv_drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + struct drm_connector_state *conn_state; + struct drm_connector *conn; + struct drm_plane_state *plane_state; + struct drm_plane *plane; + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + unsigned plane_mask = 0; + int ret, i; + + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = ctx; + + nv_drm_for_each_crtc(crtc, dev) { + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto free; + } + + crtc_state->active = false; + + ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL); + if (ret < 0) + goto free; + + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret < 0) + goto free; + + ret = drm_atomic_add_affected_connectors(state, crtc); + if (ret < 0) + goto free; + } + + nv_drm_for_each_connector_in_state(state, conn, conn_state, i) { + ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); + if (ret < 0) + goto free; + } + + nv_drm_for_each_plane_in_state(state, plane, plane_state, i) { + ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); + if (ret < 0) + goto free; + + drm_atomic_set_fb_for_plane(plane_state, NULL); + plane_mask |= NVBIT(drm_plane_index(plane)); + plane->old_fb = plane->fb; + } + + ret = drm_atomic_commit(state); +free: + if (plane_mask) { + drm_for_each_plane_mask(plane, dev, plane_mask) { + if (ret == 0) { + plane->fb = NULL; + plane->crtc = NULL; + + WARN_ON(plane->state->fb); + WARN_ON(plane->state->crtc); + + if (plane->old_fb) + __nv_drm_framebuffer_put(plane->old_fb); + } + plane->old_fb = NULL; + } + } + +#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT) + drm_atomic_state_put(state); +#else + if (ret != 0) { + drm_atomic_state_free(state); + } else { + /* + * In case of success, drm_atomic_commit() takes care to cleanup and + * free @state. + * + * Comment placed above drm_atomic_commit() says: The caller must not + * free or in any other way access @state. If the function fails then + * the caller must clean up @state itself. + */ + } +#endif + return ret; +} + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.h new file mode 100644 index 0000000..ecc5ecf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.h @@ -0,0 +1,584 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_HELPER_H__ +#define __NVIDIA_DRM_HELPER_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_DRV_H_PRESENT) +#include +#endif + +/* + * drm_dev_put() is added by commit 9a96f55034e41b4e002b767e9218d55f03bdff7d + * (2017-09-26) and drm_dev_unref() is removed by + * ba1d345401476a5f7fbad622607c5a1f95e59b31 (2018-11-15). + * + * drm_dev_unref() has been added and drm_dev_free() removed by commit - + * + * 2014-01-29: 099d1c290e2ebc3b798961a6c177c3aef5f0b789 + */ +static inline void nv_drm_dev_free(struct drm_device *dev) +{ +#if defined(NV_DRM_DEV_PUT_PRESENT) + drm_dev_put(dev); +#elif defined(NV_DRM_DEV_UNREF_PRESENT) + drm_dev_unref(dev); +#else + drm_dev_free(dev); +#endif +} + +#if defined(NV_DRM_DRM_PRIME_H_PRESENT) +#include +#endif + +static inline struct sg_table* +nv_drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, unsigned int nr_pages) +{ +#if defined(NV_DRM_PRIME_PAGES_TO_SG_HAS_DRM_DEVICE_ARG) + return drm_prime_pages_to_sg(dev, pages, nr_pages); +#else + return drm_prime_pages_to_sg(pages, nr_pages); +#endif +} + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +/* + * drm_for_each_connector(), drm_for_each_crtc(), drm_for_each_fb(), + * drm_for_each_encoder and drm_for_each_plane() were added by kernel + * commit 6295d607ad34ee4e43aab3f20714c2ef7a6adea1 which was + * Signed-off-by: + * Daniel Vetter + * drm_for_each_connector(), drm_for_each_crtc(), drm_for_each_fb(), + * drm_for_each_encoder and drm_for_each_plane() are copied from + * include/drm/drm_crtc @ + * 6295d607ad34ee4e43aab3f20714c2ef7a6adea1 + * which has the following copyright and license information: + * + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#if defined(drm_for_each_plane) +#define nv_drm_for_each_plane(plane, dev) \ + drm_for_each_plane(plane, dev) +#else +#define nv_drm_for_each_plane(plane, dev) \ + list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) +#endif + +#if defined(drm_for_each_crtc) +#define nv_drm_for_each_crtc(crtc, dev) \ + drm_for_each_crtc(crtc, dev) +#else +#define nv_drm_for_each_crtc(crtc, dev) \ + list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) +#endif + +#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT) +#define nv_drm_for_each_connector(connector, conn_iter, dev) \ + drm_for_each_connector_iter(connector, conn_iter) +#elif defined(drm_for_each_connector) +#define nv_drm_for_each_connector(connector, conn_iter, dev) \ + drm_for_each_connector(connector, dev) +#else +#define nv_drm_for_each_connector(connector, conn_iter, dev) \ + WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); \ + list_for_each_entry(connector, &(dev)->mode_config.connector_list, head) +#endif + +#if defined(drm_for_each_encoder) +#define nv_drm_for_each_encoder(encoder, dev) \ + drm_for_each_encoder(encoder, dev) +#else +#define nv_drm_for_each_encoder(encoder, dev) \ + list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head) +#endif + +#if defined(drm_for_each_fb) +#define nv_drm_for_each_fb(fb, dev) \ + drm_for_each_fb(fb, dev) +#else +#define nv_drm_for_each_fb(fb, dev) \ + list_for_each_entry(fb, &(dev)->mode_config.fb_list, head) +#endif + +#include +#include + +int nv_drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); + +/* + * for_each_connector_in_state(), for_each_crtc_in_state() and + * for_each_plane_in_state() were added by kernel commit + * df63b9994eaf942afcdb946d27a28661d7dfbf2a which was Signed-off-by: + * Ander Conselvan de Oliveira + * Daniel Vetter + * + * for_each_connector_in_state(), for_each_crtc_in_state() and + * for_each_plane_in_state() were copied from + * include/drm/drm_atomic.h @ + * 21a01abbe32a3cbeb903378a24e504bfd9fe0648 + * which has the following copyright and license information: + * + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark + * Daniel Vetter + */ + +/** + * nv_drm_for_each_connector_in_state - iterate over all connectors in an + * atomic update + * @__state: &struct drm_atomic_state pointer + * @connector: &struct drm_connector iteration cursor + * @connector_state: &struct drm_connector_state iteration cursor + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all connectors in an atomic update. Note that before the + * software state is committed (by calling drm_atomic_helper_swap_state(), this + * points to the new state, while afterwards it points to the old state. Due to + * this tricky confusion this macro is deprecated. + */ +#if !defined(for_each_connector_in_state) +#define nv_drm_for_each_connector_in_state(__state, \ + connector, connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_connector && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (connector_state) = (__state)->connectors[__i].state, 1); \ + (__i)++) \ + for_each_if (connector) +#else +#define nv_drm_for_each_connector_in_state(__state, \ + connector, connector_state, __i) \ + for_each_connector_in_state(__state, connector, connector_state, __i) +#endif + + +/** + * nv_drm_for_each_crtc_in_state - iterate over all CRTCs in an atomic update + * @__state: &struct drm_atomic_state pointer + * @crtc: &struct drm_crtc iteration cursor + * @crtc_state: &struct drm_crtc_state iteration cursor + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all CRTCs in an atomic update. Note that before the + * software state is committed (by calling drm_atomic_helper_swap_state(), this + * points to the new state, while afterwards it points to the old state. Due to + * this tricky confusion this macro is deprecated. + */ +#if !defined(for_each_crtc_in_state) +#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_crtc && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (crtc_state) = (__state)->crtcs[__i].state, 1); \ + (__i)++) \ + for_each_if (crtc_state) +#else +#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \ + for_each_crtc_in_state(__state, crtc, crtc_state, __i) +#endif + +/** + * nv_drm_for_each_plane_in_state - iterate over all planes in an atomic update + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @plane_state: &struct drm_plane_state iteration cursor + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update. Note that before the + * software state is committed (by calling drm_atomic_helper_swap_state(), this + * points to the new state, while afterwards it points to the old state. Due to + * this tricky confusion this macro is deprecated. + */ +#if !defined(for_each_plane_in_state) +#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_total_plane && \ + ((plane) = (__state)->planes[__i].ptr, \ + (plane_state) = (__state)->planes[__i].state, 1); \ + (__i)++) \ + for_each_if (plane_state) +#else +#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \ + for_each_plane_in_state(__state, plane, plane_state, __i) +#endif + +static inline struct drm_crtc *nv_drm_crtc_find(struct drm_device *dev, + uint32_t id) +{ +#if defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG) + return drm_crtc_find(dev, NULL /* file_priv */, id); +#else + return drm_crtc_find(dev, id); +#endif +} + +static inline struct drm_encoder *nv_drm_encoder_find(struct drm_device *dev, + uint32_t id) +{ +#if defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG) + return drm_encoder_find(dev, NULL /* file_priv */, id); +#else + return drm_encoder_find(dev, id); +#endif +} + +/* + * drm_connector_for_each_possible_encoder() is added by commit + * 83aefbb887b59df0b3520965c3701e01deacfc52 which was Signed-off-by: + * Ville Syrjälä + * + * drm_connector_for_each_possible_encoder() is copied from + * include/drm/drm_connector.h and modified to use nv_drm_encoder_find() + * instead of drm_encoder_find(). + * + * drm_connector_for_each_possible_encoder() is copied from + * include/drm/drm_connector.h @ + * 83aefbb887b59df0b3520965c3701e01deacfc52 + * which has the following copyright and license information: + * + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT) +#include +#endif + +/** + * nv_drm_connector_for_each_possible_encoder - iterate connector's possible + * encoders + * @connector: &struct drm_connector pointer + * @encoder: &struct drm_encoder pointer used as cursor + * @__i: int iteration cursor, for macro-internal use + */ +#if !defined(drm_connector_for_each_possible_encoder) + +#if !defined(for_each_if) +#define for_each_if(condition) if (!(condition)) {} else +#endif + +#define __nv_drm_connector_for_each_possible_encoder(connector, encoder, __i) \ + for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \ + (connector)->encoder_ids[(__i)] != 0; (__i)++) \ + for_each_if((encoder) = \ + nv_drm_encoder_find((connector)->dev, \ + (connector)->encoder_ids[(__i)])) + +#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \ + { \ + unsigned int __i; \ + __nv_drm_connector_for_each_possible_encoder(connector, encoder, __i) + +#define nv_drm_connector_for_each_possible_encoder_end \ + } + +#else + +#if NV_DRM_CONNECTOR_FOR_EACH_POSSIBLE_ENCODER_ARGUMENT_COUNT == 3 + +#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \ + { \ + unsigned int __i; \ + drm_connector_for_each_possible_encoder(connector, encoder, __i) + +#define nv_drm_connector_for_each_possible_encoder_end \ + } + +#else + +#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \ + drm_connector_for_each_possible_encoder(connector, encoder) + +#define nv_drm_connector_for_each_possible_encoder_end + +#endif + +#endif + +static inline int +nv_drm_connector_attach_encoder(struct drm_connector *connector, + struct drm_encoder *encoder) +{ +#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME) + return drm_mode_connector_attach_encoder(connector, encoder); +#else + return drm_connector_attach_encoder(connector, encoder); +#endif +} + +static inline int +nv_drm_connector_update_edid_property(struct drm_connector *connector, + const struct edid *edid) +{ +#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME) + return drm_mode_connector_update_edid_property(connector, edid); +#else + return drm_connector_update_edid_property(connector, edid); +#endif +} + +#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT) +#include + +static inline +void nv_drm_connector_list_iter_begin(struct drm_device *dev, + struct drm_connector_list_iter *iter) +{ +#if defined(NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT) + drm_connector_list_iter_begin(dev, iter); +#else + drm_connector_list_iter_get(dev, iter); +#endif +} + +static inline +void nv_drm_connector_list_iter_end(struct drm_connector_list_iter *iter) +{ +#if defined(NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT) + drm_connector_list_iter_end(iter); +#else + drm_connector_list_iter_put(iter); +#endif +} +#endif + +/* + * The drm_format_num_planes() function was added by commit d0d110e09629 drm: + * Add drm_format_num_planes() utility function in v3.3 (2011-12-20). Prototype + * was moved from drm_crtc.h to drm_fourcc.h by commit ae4df11a0f53 (drm: Move + * format-related helpers to drm_fourcc.c) in v4.8 (2016-06-09). + * drm_format_num_planes() has been removed by commit 05c452c115bf (drm: Remove + * users of drm_format_num_planes) in v5.3 (2019-05-16). + * + * drm_format_info() is available only from v4.10 (2016-10-18), added by commit + * 84770cc24f3a (drm: Centralize format information). + */ +#include +#include + +static inline int nv_drm_format_num_planes(uint32_t format) +{ +#if defined(NV_DRM_FORMAT_NUM_PLANES_PRESENT) + return drm_format_num_planes(format); +#else + const struct drm_format_info *info = drm_format_info(format); + return info != NULL ? info->num_planes : 1; +#endif +} + +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) +/* + * DRM_FORMAT_MOD_LINEAR was also defined after the original modifier support + * was added to the kernel, as a more explicit alias of DRM_FORMAT_MOD_NONE + */ +#if !defined(DRM_FORMAT_MOD_VENDOR_NONE) +#define DRM_FORMAT_MOD_VENDOR_NONE 0 +#endif + +#if !defined(DRM_FORMAT_MOD_LINEAR) +#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0) +#endif + +/* + * DRM_FORMAT_MOD_INVALID was defined after the original modifier support was + * added to the kernel, for use as a sentinel value. + */ +#if !defined(DRM_FORMAT_RESERVED) +#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) +#endif + +#if !defined(DRM_FORMAT_MOD_INVALID) +#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED) +#endif + +/* + * DRM_FORMAT_MOD_VENDOR_NVIDIA was previously called + * DRM_FORMAT_MOD_VNEDOR_NV. + */ +#if !defined(DRM_FORMAT_MOD_VENDOR_NVIDIA) +#define DRM_FORMAT_MOD_VENDOR_NVIDIA DRM_FORMAT_MOD_VENDOR_NV +#endif + +/* + * DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D is a relatively new addition to the + * upstream kernel headers compared to the other format modifiers. + */ +#if !defined(DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D) +#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \ + fourcc_mod_code(NVIDIA, (0x10 | \ + ((h) & 0xf) | \ + (((k) & 0xff) << 12) | \ + (((g) & 0x3) << 20) | \ + (((s) & 0x1) << 22) | \ + (((c) & 0x7) << 23))) +#endif + +#endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */ + +/* + * drm_vma_offset_exact_lookup_locked() were added + * by kernel commit 2225cfe46bcc which was Signed-off-by: + * Daniel Vetter + * + * drm_vma_offset_exact_lookup_locked() were copied from + * include/drm/drm_vma_manager.h @ 2225cfe46bcc + * which has the following copyright and license information: + * + * Copyright (c) 2013 David Herrmann + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include + +/** + * nv_drm_vma_offset_exact_lookup_locked() - Look up node by exact address + * @mgr: Manager object + * @start: Start address (page-based, not byte-based) + * @pages: Size of object (page-based) + * + * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node. + * It only returns the exact object with the given start address. + * + * RETURNS: + * Node at exact start address @start. + */ +static inline struct drm_vma_offset_node * +nv_drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages) +{ +#if defined(NV_DRM_VMA_OFFSET_EXACT_LOOKUP_LOCKED_PRESENT) + return drm_vma_offset_exact_lookup_locked(mgr, start, pages); +#else + struct drm_vma_offset_node *node; + + node = drm_vma_offset_lookup_locked(mgr, start, pages); + return (node && node->vm_node.start == start) ? node : NULL; +#endif +} + +static inline bool +nv_drm_vma_node_is_allowed(struct drm_vma_offset_node *node, + struct file *filp) +{ +#if defined(NV_DRM_VMA_NODE_IS_ALLOWED_HAS_TAG_ARG) + return drm_vma_node_is_allowed(node, filp->private_data); +#else + return drm_vma_node_is_allowed(node, filp); +#endif +} + +#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */ + +#endif /* defined(NV_DRM_AVAILABLE) */ + +#endif /* __NVIDIA_DRM_HELPER_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-ioctl.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-ioctl.h new file mode 100644 index 0000000..9cfca16 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-ioctl.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _UAPI_NVIDIA_DRM_IOCTL_H_ +#define _UAPI_NVIDIA_DRM_IOCTL_H_ + +#include + +/* + * We should do our best to keep these values constant. Any change to these will + * be backwards incompatible with client applications that might be using them + */ +#define DRM_NVIDIA_GET_CRTC_CRC32 0x00 +#define DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY 0x01 +#define DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY 0x02 +#define DRM_NVIDIA_GET_DEV_INFO 0x03 +#define DRM_NVIDIA_FENCE_SUPPORTED 0x04 +#define DRM_NVIDIA_FENCE_CONTEXT_CREATE 0x05 +#define DRM_NVIDIA_GEM_FENCE_ATTACH 0x06 +#define DRM_NVIDIA_GET_CLIENT_CAPABILITY 0x08 +#define DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY 0x09 +#define DRM_NVIDIA_GEM_MAP_OFFSET 0x0a +#define DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY 0x0b +#define DRM_NVIDIA_GET_CRTC_CRC32_V2 0x0c +#define DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY 0x0d +#define DRM_NVIDIA_GEM_IDENTIFY_OBJECT 0x0e + +#define DRM_IOCTL_NVIDIA_GEM_IMPORT_NVKMS_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY), \ + struct drm_nvidia_gem_import_nvkms_memory_params) + +#define DRM_IOCTL_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY), \ + struct drm_nvidia_gem_import_userspace_memory_params) + +#define DRM_IOCTL_NVIDIA_GET_DEV_INFO \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_DEV_INFO), \ + struct drm_nvidia_get_dev_info_params) + +/* + * XXX Solaris compiler has issues with DRM_IO. None of this is supported on + * Solaris anyway, so just skip it. + * + * 'warning: suggest parentheses around arithmetic in operand of |' + */ +#if defined(NV_LINUX) +#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED \ + DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_SUPPORTED) +#else +#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED 0 +#endif + +#define DRM_IOCTL_NVIDIA_FENCE_CONTEXT_CREATE \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_CONTEXT_CREATE), \ + struct drm_nvidia_fence_context_create_params) + +#define DRM_IOCTL_NVIDIA_GEM_FENCE_ATTACH \ + DRM_IOW((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_FENCE_ATTACH), \ + struct drm_nvidia_gem_fence_attach_params) + +#define DRM_IOCTL_NVIDIA_GET_CLIENT_CAPABILITY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CLIENT_CAPABILITY), \ + struct drm_nvidia_get_client_capability_params) + +#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32 \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32), \ + struct drm_nvidia_get_crtc_crc32_params) + +#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32_V2 \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32_V2), \ + struct drm_nvidia_get_crtc_crc32_v2_params) + +#define DRM_IOCTL_NVIDIA_GEM_EXPORT_NVKMS_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY), \ + struct drm_nvidia_gem_export_nvkms_memory_params) + +#define DRM_IOCTL_NVIDIA_GEM_MAP_OFFSET \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_MAP_OFFSET), \ + struct drm_nvidia_gem_map_offset_params) + +#define DRM_IOCTL_NVIDIA_GEM_ALLOC_NVKMS_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY), \ + struct drm_nvidia_gem_alloc_nvkms_memory_params) + +#define DRM_IOCTL_NVIDIA_GEM_EXPORT_DMABUF_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY), \ + struct drm_nvidia_gem_export_dmabuf_memory_params) + +#define DRM_IOCTL_NVIDIA_GEM_IDENTIFY_OBJECT \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IDENTIFY_OBJECT), \ + struct drm_nvidia_gem_identify_object_params) + +struct drm_nvidia_gem_import_nvkms_memory_params { + uint64_t mem_size; /* IN */ + + uint64_t nvkms_params_ptr; /* IN */ + uint64_t nvkms_params_size; /* IN */ + + uint32_t handle; /* OUT */ + + uint32_t __pad; +}; + +struct drm_nvidia_gem_import_userspace_memory_params { + uint64_t size; /* IN Size of memory in bytes */ + uint64_t address; /* IN Virtual address of userspace memory */ + uint32_t handle; /* OUT Handle to gem object */ +}; + +struct drm_nvidia_get_dev_info_params { + uint32_t gpu_id; /* OUT */ + uint32_t primary_index; /* OUT; the "card%d" value */ + + /* See DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D definitions of these */ + uint32_t generic_page_kind; /* OUT */ + uint32_t page_kind_generation; /* OUT */ + uint32_t sector_layout; /* OUT */ +}; + +struct drm_nvidia_fence_context_create_params { + uint32_t handle; /* OUT GEM handle to fence context */ + + uint32_t index; /* IN Index of semaphore to use for fencing */ + uint64_t size; /* IN Size of semaphore surface in bytes */ + + /* Params for importing userspace semaphore surface */ + uint64_t import_mem_nvkms_params_ptr; /* IN */ + uint64_t import_mem_nvkms_params_size; /* IN */ + + /* Params for creating software signaling event */ + uint64_t event_nvkms_params_ptr; /* IN */ + uint64_t event_nvkms_params_size; /* IN */ +}; + +struct drm_nvidia_gem_fence_attach_params { + uint32_t handle; /* IN GEM handle to attach fence to */ + uint32_t fence_context_handle; /* IN GEM handle to fence context on which fence is run on */ + uint32_t sem_thresh; /* IN Semaphore value to reach before signal */ +}; + +struct drm_nvidia_get_client_capability_params { + uint64_t capability; /* IN Client capability enum */ + uint64_t value; /* OUT Client capability value */ +}; + +/* Struct that stores Crc value and if it is supported by hardware */ +struct drm_nvidia_crtc_crc32 { + uint32_t value; /* Read value, undefined if supported is false */ + uint8_t supported; /* Supported boolean, true if readable by hardware */ +}; + +struct drm_nvidia_crtc_crc32_v2_out { + struct drm_nvidia_crtc_crc32 compositorCrc32; /* OUT compositor hardware CRC32 value */ + struct drm_nvidia_crtc_crc32 rasterGeneratorCrc32; /* OUT raster generator CRC32 value */ + struct drm_nvidia_crtc_crc32 outputCrc32; /* OUT SF/SOR CRC32 value */ +}; + +struct drm_nvidia_get_crtc_crc32_v2_params { + uint32_t crtc_id; /* IN CRTC identifier */ + struct drm_nvidia_crtc_crc32_v2_out crc32; /* OUT Crc32 output structure */ +}; + +struct drm_nvidia_get_crtc_crc32_params { + uint32_t crtc_id; /* IN CRTC identifier */ + uint32_t crc32; /* OUT CRC32 value */ +}; + +struct drm_nvidia_gem_export_nvkms_memory_params { + uint32_t handle; /* IN */ + uint32_t __pad; + + uint64_t nvkms_params_ptr; /* IN */ + uint64_t nvkms_params_size; /* IN */ +}; + +struct drm_nvidia_gem_map_offset_params { + uint32_t handle; /* IN Handle to gem object */ + uint32_t __pad; + + uint64_t offset; /* OUT Fake offset */ +}; + +#define NV_GEM_ALLOC_NO_SCANOUT (1 << 0) + +struct drm_nvidia_gem_alloc_nvkms_memory_params { + uint32_t handle; /* OUT */ + uint8_t block_linear; /* IN */ + uint8_t compressible; /* IN/OUT */ + uint16_t __pad; + + uint64_t memory_size; /* IN */ + uint32_t flags; /* IN */ +}; + +struct drm_nvidia_gem_export_dmabuf_memory_params { + uint32_t handle; /* IN GEM Handle*/ + uint32_t __pad; + + uint64_t nvkms_params_ptr; /* IN */ + uint64_t nvkms_params_size; /* IN */ +}; + +typedef enum { + NV_GEM_OBJECT_NVKMS, + NV_GEM_OBJECT_DMABUF, + NV_GEM_OBJECT_USERMEMORY, + + NV_GEM_OBJECT_UNKNOWN = 0x7fffffff /* Force size of 32-bits. */ +} drm_nvidia_gem_object_type; + +struct drm_nvidia_gem_identify_object_params { + uint32_t handle; /* IN GEM handle*/ + drm_nvidia_gem_object_type object_type; /* OUT GEM object type */ +}; + +#endif /* _UAPI_NVIDIA_DRM_IOCTL_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-linux.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-linux.c new file mode 100644 index 0000000..97bc920 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-linux.c @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "nvidia-drm-os-interface.h" +#include "nvidia-drm.h" + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include + +#include "nv-mm.h" + +MODULE_PARM_DESC( + modeset, + "Enable atomic kernel modesetting (1 = enable, 0 = disable (default))"); +bool nv_drm_modeset_module_param = false; +module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400); + +void *nv_drm_calloc(size_t nmemb, size_t size) +{ + return kzalloc(nmemb * size, GFP_KERNEL); +} + +void nv_drm_free(void *ptr) +{ + if (IS_ERR(ptr)) { + return; + } + + kfree(ptr); +} + +char *nv_drm_asprintf(const char *fmt, ...) +{ + va_list ap; + char *p; + + va_start(ap, fmt); + p = kvasprintf(GFP_KERNEL, fmt, ap); + va_end(ap); + + return p; +} + +#if defined(NVCPU_X86) || defined(NVCPU_X86_64) + #define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory") +#elif defined(NVCPU_FAMILY_ARM) + #if defined(NVCPU_ARM) + #define WRITE_COMBINE_FLUSH() { dsb(); outer_sync(); } + #elif defined(NVCPU_AARCH64) + #define WRITE_COMBINE_FLUSH() mb() + #endif +#elif defined(NVCPU_PPC64LE) + #define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory") +#endif + +void nv_drm_write_combine_flush(void) +{ + WRITE_COMBINE_FLUSH(); +} + +int nv_drm_lock_user_pages(unsigned long address, + unsigned long pages_count, struct page ***pages) +{ + struct mm_struct *mm = current->mm; + struct page **user_pages; + int pages_pinned; + + user_pages = nv_drm_calloc(pages_count, sizeof(*user_pages)); + + if (user_pages == NULL) { + return -ENOMEM; + } + + nv_mmap_read_lock(mm); + + pages_pinned = NV_PIN_USER_PAGES(address, pages_count, FOLL_WRITE, + user_pages, NULL); + nv_mmap_read_unlock(mm); + + if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) { + goto failed; + } + + *pages = user_pages; + + return 0; + +failed: + + if (pages_pinned > 0) { + int i; + + for (i = 0; i < pages_pinned; i++) { + NV_UNPIN_USER_PAGE(user_pages[i]); + } + } + + nv_drm_free(user_pages); + + return (pages_pinned < 0) ? pages_pinned : -EINVAL; +} + +void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages) +{ + unsigned long i; + + for (i = 0; i < pages_count; i++) { + set_page_dirty_lock(pages[i]); + NV_UNPIN_USER_PAGE(pages[i]); + } + + nv_drm_free(pages); +} + +void *nv_drm_vmap(struct page **pages, unsigned long pages_count) +{ + return vmap(pages, pages_count, VM_USERMAP, PAGE_KERNEL); +} + +void nv_drm_vunmap(void *address) +{ + vunmap(address); +} + +#endif /* NV_DRM_AVAILABLE */ + +/************************************************************************* + * Linux loading support code. + *************************************************************************/ + +static int __init nv_linux_drm_init(void) +{ + return nv_drm_init(); +} + +static void __exit nv_linux_drm_exit(void) +{ + nv_drm_exit(); +} + +module_init(nv_linux_drm_init); +module_exit(nv_linux_drm_exit); + +#if defined(MODULE_LICENSE) + + MODULE_LICENSE("Dual MIT/GPL"); + + + +#endif +#if defined(MODULE_INFO) + MODULE_INFO(supported, "external"); +#endif +#if defined(MODULE_VERSION) + MODULE_VERSION(NV_VERSION_STRING); +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.c new file mode 100644 index 0000000..9132af9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.c @@ -0,0 +1,577 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-modeset.h" +#include "nvidia-drm-crtc.h" +#include "nvidia-drm-os-interface.h" +#include "nvidia-drm-helper.h" + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_VBLANK_H_PRESENT) +#include +#endif + +#include +#include +#include + +struct nv_drm_atomic_state { + struct NvKmsKapiRequestedModeSetConfig config; + struct drm_atomic_state base; +}; + +static inline struct nv_drm_atomic_state *to_nv_atomic_state( + struct drm_atomic_state *state) +{ + return container_of(state, struct nv_drm_atomic_state, base); +} + +struct drm_atomic_state *nv_drm_atomic_state_alloc(struct drm_device *dev) +{ + struct nv_drm_atomic_state *nv_state = + nv_drm_calloc(1, sizeof(*nv_state)); + + if (nv_state == NULL || drm_atomic_state_init(dev, &nv_state->base) < 0) { + nv_drm_free(nv_state); + return NULL; + } + + return &nv_state->base; +} + +void nv_drm_atomic_state_clear(struct drm_atomic_state *state) +{ + drm_atomic_state_default_clear(state); +} + +void nv_drm_atomic_state_free(struct drm_atomic_state *state) +{ + struct nv_drm_atomic_state *nv_state = + to_nv_atomic_state(state); + drm_atomic_state_default_release(state); + nv_drm_free(nv_state); +} + +/** + * __will_generate_flip_event - Check whether event is going to be generated by + * hardware when it flips from old crtc/plane state to current one. This + * function is called after drm_atomic_helper_swap_state(), therefore new state + * is swapped into current state. + */ +static bool __will_generate_flip_event(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct drm_crtc_state *new_crtc_state = crtc->state; + struct nv_drm_crtc_state *nv_new_crtc_state = + to_nv_crtc_state(new_crtc_state); + struct drm_plane_state *old_plane_state = NULL; + struct drm_plane *plane = NULL; + struct drm_plane *primary_plane = crtc->primary; + bool primary_event = false; + bool overlay_event = false; + int i; + + if (!old_crtc_state->active && !new_crtc_state->active) { + /* + * crtc is not active in old and new states therefore all planes are + * disabled, hardware can not generate flip events. + */ + return false; + } + + /* Find out whether primary & overlay flip done events will be generated. */ + nv_drm_for_each_plane_in_state(old_crtc_state->state, + plane, old_plane_state, i) { + if (old_plane_state->crtc != crtc) { + continue; + } + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + continue; + } + + /* + * Hardware generates flip event for only those + * planes which were active previously. + */ + if (old_crtc_state->active && old_plane_state->fb != NULL) { + nv_new_crtc_state->nv_flip->pending_events++; + } + } + + return nv_new_crtc_state->nv_flip->pending_events != 0; +} + +static int __nv_drm_put_back_post_fence_fd( + struct nv_drm_plane_state *plane_state, + const struct NvKmsKapiLayerReplyConfig *layer_reply_config) +{ + int fd = layer_reply_config->postSyncptFd; + + if ((fd >= 0) && (plane_state->fd_user_ptr != NULL)) { + if (put_user(fd, plane_state->fd_user_ptr)) { + return -EFAULT; + } + + /*! set back to Null and let set_property specify it again */ + plane_state->fd_user_ptr = NULL; + } + return 0; +} + +static int __nv_drm_get_syncpt_data( + struct nv_drm_device *nv_dev, + struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state, + struct NvKmsKapiRequestedModeSetConfig *requested_config, + struct NvKmsKapiModeSetReplyConfig *reply_config) +{ + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + struct NvKmsKapiHeadReplyConfig *head_reply_config; + struct nv_drm_plane_state *plane_state; + struct drm_crtc_state *new_crtc_state = crtc->state; + struct drm_plane_state *old_plane_state = NULL; + struct drm_plane_state *new_plane_state = NULL; + struct drm_plane *plane = NULL; + int i, ret; + + if (!old_crtc_state->active && !new_crtc_state->active) { + /* + * crtc is not active in old and new states therefore all planes are + * disabled, exit early. + */ + return 0; + } + + head_reply_config = &reply_config->headReplyConfig[nv_crtc->head]; + + nv_drm_for_each_plane_in_state(old_crtc_state->state, plane, old_plane_state, i) { + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + + if (plane->type == DRM_PLANE_TYPE_CURSOR || old_plane_state->crtc != crtc) { + continue; + } + + new_plane_state = plane->state; + + if (new_plane_state->crtc != crtc) { + continue; + } + + plane_state = to_nv_drm_plane_state(new_plane_state); + + ret = __nv_drm_put_back_post_fence_fd( + plane_state, + &head_reply_config->layerReplyConfig[nv_plane->layer_idx]); + + if (ret != 0) { + return ret; + } + } + + return 0; +} + +/** + * nv_drm_atomic_commit - validate/commit modeset config + * @dev: DRM device + * @state: atomic state tracking atomic update + * @commit: commit/check modeset config associated with atomic update + * + * @state tracks atomic update and modeset objects affected + * by the atomic update, but the state of the modeset objects it contains + * depends on the current stage of the update. + * At the commit stage, the proposed state is already stored in the current + * state, and @state contains old state for all affected modeset objects. + * At the check/validation stage, @state contains the proposed state for + * all affected objects. + * + * Sequence of atomic update - + * 1. The check/validation of proposed atomic state, + * 2. Do any other steps that might fail, + * 3. Put the proposed state into the current state pointers, + * 4. Actually commit the hardware state, + * 5. Cleanup old state. + * + * The function nv_drm_atomic_apply_modeset_config() is getting called + * at stages (1) and (4) after drm_atomic_helper_swap_state(). + */ +static int +nv_drm_atomic_apply_modeset_config(struct drm_device *dev, + struct drm_atomic_state *state, + bool commit) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct NvKmsKapiRequestedModeSetConfig *requested_config = + &(to_nv_atomic_state(state)->config); + struct NvKmsKapiModeSetReplyConfig reply_config = { }; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i; + int ret; + + memset(requested_config, 0, sizeof(*requested_config)); + + /* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */ + nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) { + /* + * When committing a state, the new state is already stored in + * crtc->state. When checking a proposed state, the proposed state is + * stored in crtc_state. + */ + struct drm_crtc_state *new_crtc_state = + commit ? crtc->state : crtc_state; + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + requested_config->headRequestedConfig[nv_crtc->head] = + to_nv_crtc_state(new_crtc_state)->req_config; + + requested_config->headsMask |= 1 << nv_crtc->head; + + if (commit) { + struct drm_crtc_state *old_crtc_state = crtc_state; + struct nv_drm_crtc_state *nv_new_crtc_state = + to_nv_crtc_state(new_crtc_state); + + nv_new_crtc_state->nv_flip->event = new_crtc_state->event; + nv_new_crtc_state->nv_flip->pending_events = 0; + new_crtc_state->event = NULL; + + /* + * If flip event will be generated by hardware + * then defer flip object processing to flip event from hardware. + */ + if (__will_generate_flip_event(crtc, old_crtc_state)) { + nv_drm_crtc_enqueue_flip(nv_crtc, + nv_new_crtc_state->nv_flip); + + nv_new_crtc_state->nv_flip = NULL; + } + } + } + + if (commit && nvKms->systemInfo.bAllowWriteCombining) { + /* + * XXX This call is required only if dumb buffer is going + * to be presented. + */ + nv_drm_write_combine_flush(); + } + + if (!nvKms->applyModeSetConfig(nv_dev->pDevice, + requested_config, + &reply_config, + commit)) { + return -EINVAL; + } + + if (commit && nv_dev->supportsSyncpts) { + nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) { + /*! loop over affected crtcs and get NvKmsKapiModeSetReplyConfig */ + ret = __nv_drm_get_syncpt_data( + nv_dev, crtc, crtc_state, requested_config, &reply_config); + if (ret != 0) { + return ret; + } + } + } + + return 0; +} + +int nv_drm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int ret = 0; + + if ((ret = drm_atomic_helper_check(dev, state)) != 0) { + goto done; + } + + ret = nv_drm_atomic_apply_modeset_config(dev, + state, false /* commit */); + +done: + return ret; +} + +/** + * __nv_drm_handle_flip_event - handle flip occurred event + * @nv_crtc: crtc on which flip has been occurred + * + * This handler dequeues the first nv_drm_flip from the crtc's flip_list, + * generates an event if requested at flip time, and frees the nv_drm_flip. + */ +static void __nv_drm_handle_flip_event(struct nv_drm_crtc *nv_crtc) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_flip *nv_flip; + + /* + * Acquire event_lock before nv_flip object dequeue, otherwise immediate + * flip event delivery from nv_drm_atomic_commit() races ahead and + * messes up with event delivery order. + */ + spin_lock(&dev->event_lock); + nv_flip = nv_drm_crtc_dequeue_flip(nv_crtc); + if (likely(nv_flip != NULL)) { + struct nv_drm_flip *nv_deferred_flip, *nv_next_deferred_flip; + + if (nv_flip->event != NULL) { + drm_crtc_send_vblank_event(&nv_crtc->base, nv_flip->event); + } + + /* + * Process flips that were deferred until processing of this nv_flip + * object. + */ + list_for_each_entry_safe(nv_deferred_flip, + nv_next_deferred_flip, + &nv_flip->deferred_flip_list, list_entry) { + + if (nv_deferred_flip->event != NULL) { + drm_crtc_send_vblank_event(&nv_crtc->base, + nv_deferred_flip->event); + } + list_del(&nv_deferred_flip->list_entry); + + nv_drm_free(nv_deferred_flip); + } + } + spin_unlock(&dev->event_lock); + + wake_up_all(&nv_dev->flip_event_wq); + + nv_drm_free(nv_flip); +} + +int nv_drm_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock) +{ + int ret = -EBUSY; + + int i; + struct drm_crtc *crtc = NULL; + struct drm_crtc_state *crtc_state = NULL; + struct nv_drm_device *nv_dev = to_nv_device(dev); + + /* + * drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY + * for nonblocking commit if previous updates (commit tasks/flip event) are + * pending. In case of blocking commits it mandates to wait for previous + * updates to complete. + */ + if (nonblock) { + nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + /* + * Here you aren't required to hold nv_drm_crtc::flip_list_lock + * because: + * + * The core DRM driver acquires lock for all affected crtcs before + * calling into ->commit() hook, therefore it is not possible for + * other threads to call into ->commit() hook affecting same crtcs + * and enqueue flip objects into flip_list - + * + * nv_drm_atomic_commit_internal() + * |-> nv_drm_atomic_apply_modeset_config(commit=true) + * |-> nv_drm_crtc_enqueue_flip() + * + * Only possibility is list_empty check races with code path + * dequeuing flip object - + * + * __nv_drm_handle_flip_event() + * |-> nv_drm_crtc_dequeue_flip() + * + * But this race condition can't lead list_empty() to return + * incorrect result. nv_drm_crtc_dequeue_flip() in the middle of + * updating the list could not trick us into thinking the list is + * empty when it isn't. + */ + if (!list_empty(&nv_crtc->flip_list)) { + return -EBUSY; + } + } + } + +#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_HAS_STALL_ARG) + + /* + * nv_drm_atomic_commit_internal() + * implements blocking/non-blocking atomic commit using + * nv_drm_crtc::flip_list, it does not require any help from core DRM + * helper functions to stall commit processing. Therefore passing false to + * 'stall' parameter. + * In this context, failure from drm_atomic_helper_swap_state() is not + * expected. + */ + +#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_RETURN_INT) + ret = drm_atomic_helper_swap_state(state, false /* stall */); + if (WARN_ON(ret != 0)) { + return ret; + } +#else + drm_atomic_helper_swap_state(state, false /* stall */); +#endif + +#else + drm_atomic_helper_swap_state(dev, state); +#endif + + /* + * nv_drm_atomic_commit_internal() must not return failure after + * calling drm_atomic_helper_swap_state(). + */ + + if ((ret = nv_drm_atomic_apply_modeset_config( + dev, + state, true /* commit */)) != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to apply atomic modeset. Error code: %d", + ret); + + goto done; + } + + nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + struct nv_drm_crtc_state *nv_new_crtc_state = + to_nv_crtc_state(crtc->state); + + /* + * If nv_drm_atomic_apply_modeset_config() hasn't consumed the flip + * object, no event will be generated for this flip, and we need process + * it: + */ + + if (nv_new_crtc_state->nv_flip != NULL) { + /* + * First, defer processing of all pending flips for this crtc until + * last flip in the queue has been processed. This is to ensure a + * correct order in event delivery. + */ + spin_lock(&nv_crtc->flip_list_lock); + if (!list_empty(&nv_crtc->flip_list)) { + struct nv_drm_flip *nv_last_flip = + list_last_entry(&nv_crtc->flip_list, + struct nv_drm_flip, list_entry); + + list_add(&nv_new_crtc_state->nv_flip->list_entry, + &nv_last_flip->deferred_flip_list); + + nv_new_crtc_state->nv_flip = NULL; + } + spin_unlock(&nv_crtc->flip_list_lock); + } + + if (nv_new_crtc_state->nv_flip != NULL) { + /* + * Then, if no more pending flips for this crtc, deliver event for the + * current flip. + */ + if (nv_new_crtc_state->nv_flip->event != NULL) { + spin_lock(&dev->event_lock); + drm_crtc_send_vblank_event(crtc, + nv_new_crtc_state->nv_flip->event); + spin_unlock(&dev->event_lock); + } + + nv_drm_free(nv_new_crtc_state->nv_flip); + nv_new_crtc_state->nv_flip = NULL; + } + + if (!nonblock) { + /* + * Here you aren't required to hold nv_drm_crtc::flip_list_lock + * because: + * + * The core DRM driver acquires lock for all affected crtcs before + * calling into ->commit() hook, therefore it is not possible for + * other threads to call into ->commit() hook affecting same crtcs + * and enqueue flip objects into flip_list - + * + * nv_drm_atomic_commit_internal() + * |-> nv_drm_atomic_apply_modeset_config(commit=true) + * |-> nv_drm_crtc_enqueue_flip() + * + * Only possibility is list_empty check races with code path + * dequeuing flip object - + * + * __nv_drm_handle_flip_event() + * |-> nv_drm_crtc_dequeue_flip() + * + * But this race condition can't lead list_empty() to return + * incorrect result. nv_drm_crtc_dequeue_flip() in the middle of + * updating the list could not trick us into thinking the list is + * empty when it isn't. + */ + if (wait_event_timeout( + nv_dev->flip_event_wq, + list_empty(&nv_crtc->flip_list), + 3 * HZ /* 3 second */) == 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Flip event timeout on head %u", nv_crtc->head); + } + } + } + +done: + +#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT) + /* + * If ref counting is present, state will be freed when the caller + * drops its reference after we return. + */ +#else + drm_atomic_state_free(state); +#endif + + return 0; +} + +void nv_drm_handle_flip_occurred(struct nv_drm_device *nv_dev, + NvU32 head, NvU32 plane) +{ + struct nv_drm_crtc *nv_crtc = nv_drm_crtc_lookup(nv_dev, head); + + if (NV_DRM_WARN(nv_crtc == NULL)) { + return; + } + + __nv_drm_handle_flip_event(nv_crtc); +} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.h new file mode 100644 index 0000000..40df631 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_MODESET_H__ +#define __NVIDIA_DRM_MODESET_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvkms-kapi.h" + +struct drm_device; +struct drm_atomic_state; + +struct drm_atomic_state *nv_drm_atomic_state_alloc(struct drm_device *dev); +void nv_drm_atomic_state_clear(struct drm_atomic_state *state); +void nv_drm_atomic_state_free(struct drm_atomic_state *state); + +int nv_drm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state); + +int nv_drm_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, bool nonblock); + + +void nv_drm_handle_flip_occurred(struct nv_drm_device *nv_dev, + NvU32 head, NvU32 plane); + +int nv_drm_shut_down_all_crtcs(struct drm_device *dev); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_MODESET_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-os-interface.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-os-interface.h new file mode 100644 index 0000000..ac52752 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-os-interface.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_OS_INTERFACE_H__ +#define __NVIDIA_DRM_OS_INTERFACE_H__ + +#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */ + +#include "nvtypes.h" + +#if defined(NV_DRM_AVAILABLE) + +struct page; + +/* Set to true when the atomic modeset feature is enabled. */ +extern bool nv_drm_modeset_module_param; + +void *nv_drm_calloc(size_t nmemb, size_t size); + +void nv_drm_free(void *ptr); + +char *nv_drm_asprintf(const char *fmt, ...); + +void nv_drm_write_combine_flush(void); + +int nv_drm_lock_user_pages(unsigned long address, + unsigned long pages_count, struct page ***pages); + +void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages); + +void *nv_drm_vmap(struct page **pages, unsigned long pages_count); + +void nv_drm_vunmap(void *address); + +#endif + +#endif /* __NVIDIA_DRM_OS_INTERFACE_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.c new file mode 100644 index 0000000..c1257f3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.c @@ -0,0 +1,527 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-ioctl.h" +#include "nvidia-drm-gem.h" +#include "nvidia-drm-prime-fence.h" +#include "nvidia-dma-resv-helper.h" + +#if defined(NV_DRM_FENCE_AVAILABLE) + +#include "nvidia-dma-fence-helper.h" + +struct nv_drm_fence_context { + struct nv_drm_device *nv_dev; + + uint32_t context; + + NvU64 fenceSemIndex; /* Index into semaphore surface */ + + /* Mapped semaphore surface */ + struct NvKmsKapiMemory *pSemSurface; + NvU32 *pLinearAddress; + + /* Protects nv_drm_fence_context::{pending, last_seqno} */ + spinlock_t lock; + + /* + * Software signaling structures. __nv_drm_fence_context_new() + * allocates channel event and __nv_drm_fence_context_destroy() frees it. + * There are no simultaneous read/write access to 'cb', therefore it does + * not require spin-lock protection. + */ + struct NvKmsKapiChannelEvent *cb; + + /* List of pending fences which are not yet signaled */ + struct list_head pending; + + unsigned last_seqno; +}; + +struct nv_drm_prime_fence { + struct list_head list_entry; + nv_dma_fence_t base; + spinlock_t lock; +}; + +static inline +struct nv_drm_prime_fence *to_nv_drm_prime_fence(nv_dma_fence_t *fence) +{ + return container_of(fence, struct nv_drm_prime_fence, base); +} + +static const char* +nv_drm_gem_prime_fence_op_get_driver_name(nv_dma_fence_t *fence) +{ + return "NVIDIA"; +} + +static const char* +nv_drm_gem_prime_fence_op_get_timeline_name(nv_dma_fence_t *fence) +{ + return "nvidia.prime"; +} + +static bool nv_drm_gem_prime_fence_op_enable_signaling(nv_dma_fence_t *fence) +{ + // DO NOTHING + return true; +} + +static void nv_drm_gem_prime_fence_op_release(nv_dma_fence_t *fence) +{ + struct nv_drm_prime_fence *nv_fence = to_nv_drm_prime_fence(fence); + nv_drm_free(nv_fence); +} + +static signed long +nv_drm_gem_prime_fence_op_wait(nv_dma_fence_t *fence, + bool intr, signed long timeout) +{ + /* + * If the waiter requests to wait with no timeout, force a timeout to ensure + * that it won't get stuck forever in the kernel if something were to go + * wrong with signaling, such as a malicious userspace not releasing the + * semaphore. + * + * 96 ms (roughly 6 frames @ 60 Hz) is arbitrarily chosen to be long enough + * that it should never get hit during normal operation, but not so long + * that the system becomes unresponsive. + */ + return nv_dma_fence_default_wait(fence, intr, + (timeout == MAX_SCHEDULE_TIMEOUT) ? + msecs_to_jiffies(96) : timeout); +} + +static const nv_dma_fence_ops_t nv_drm_gem_prime_fence_ops = { + .get_driver_name = nv_drm_gem_prime_fence_op_get_driver_name, + .get_timeline_name = nv_drm_gem_prime_fence_op_get_timeline_name, + .enable_signaling = nv_drm_gem_prime_fence_op_enable_signaling, + .release = nv_drm_gem_prime_fence_op_release, + .wait = nv_drm_gem_prime_fence_op_wait, +}; + +static inline void +__nv_drm_prime_fence_signal(struct nv_drm_prime_fence *nv_fence) +{ + list_del(&nv_fence->list_entry); + nv_dma_fence_signal(&nv_fence->base); + nv_dma_fence_put(&nv_fence->base); +} + +static void nv_drm_gem_prime_force_fence_signal( + struct nv_drm_fence_context *nv_fence_context) +{ + WARN_ON(!spin_is_locked(&nv_fence_context->lock)); + + while (!list_empty(&nv_fence_context->pending)) { + struct nv_drm_prime_fence *nv_fence = list_first_entry( + &nv_fence_context->pending, + typeof(*nv_fence), + list_entry); + + __nv_drm_prime_fence_signal(nv_fence); + } +} + +static void nv_drm_gem_prime_fence_event +( + void *dataPtr, + NvU32 dataU32 +) +{ + struct nv_drm_fence_context *nv_fence_context = dataPtr; + + spin_lock(&nv_fence_context->lock); + + while (!list_empty(&nv_fence_context->pending)) { + struct nv_drm_prime_fence *nv_fence = list_first_entry( + &nv_fence_context->pending, + typeof(*nv_fence), + list_entry); + + /* Index into surface with 16 byte stride */ + unsigned int seqno = *((nv_fence_context->pLinearAddress) + + (nv_fence_context->fenceSemIndex * 4)); + + if (nv_fence->base.seqno > seqno) { + /* + * Fences in list are placed in increasing order of sequence + * number, breaks a loop once found first fence not + * ready to signal. + */ + break; + } + + __nv_drm_prime_fence_signal(nv_fence); + } + + spin_unlock(&nv_fence_context->lock); +} + +static inline struct nv_drm_fence_context *__nv_drm_fence_context_new( + struct nv_drm_device *nv_dev, + struct drm_nvidia_fence_context_create_params *p) +{ + struct nv_drm_fence_context *nv_fence_context; + struct NvKmsKapiMemory *pSemSurface; + NvU32 *pLinearAddress; + + /* Allocate backup nvkms resources */ + + pSemSurface = nvKms->importMemory(nv_dev->pDevice, + p->size, + p->import_mem_nvkms_params_ptr, + p->import_mem_nvkms_params_size); + if (!pSemSurface) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to import fence semaphore surface"); + + goto failed; + } + + if (!nvKms->mapMemory(nv_dev->pDevice, + pSemSurface, + NVKMS_KAPI_MAPPING_TYPE_KERNEL, + (void **) &pLinearAddress)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to map fence semaphore surface"); + + goto failed_to_map_memory; + } + + /* + * Allocate a fence context object, initialize it and allocate channel + * event for it. + */ + + if ((nv_fence_context = nv_drm_calloc( + 1, + sizeof(*nv_fence_context))) == NULL) { + goto failed_alloc_fence_context; + } + + /* + * nv_dma_fence_context_alloc() cannot fail, so we do not need + * to check a return value. + */ + + *nv_fence_context = (struct nv_drm_fence_context) { + .nv_dev = nv_dev, + .context = nv_dma_fence_context_alloc(1), + .pSemSurface = pSemSurface, + .pLinearAddress = pLinearAddress, + .fenceSemIndex = p->index, + }; + + INIT_LIST_HEAD(&nv_fence_context->pending); + + spin_lock_init(&nv_fence_context->lock); + + /* + * Except 'cb', the fence context should be completely initialized + * before channel event allocation because the fence context may start + * receiving events immediately after allocation. + * + * There are no simultaneous read/write access to 'cb', therefore it does + * not require spin-lock protection. + */ + nv_fence_context->cb = + nvKms->allocateChannelEvent(nv_dev->pDevice, + nv_drm_gem_prime_fence_event, + nv_fence_context, + p->event_nvkms_params_ptr, + p->event_nvkms_params_size); + if (!nv_fence_context->cb) { + NV_DRM_DEV_LOG_ERR(nv_dev, + "Failed to allocate fence signaling event"); + goto failed_to_allocate_channel_event; + } + + return nv_fence_context; + +failed_to_allocate_channel_event: + nv_drm_free(nv_fence_context); + +failed_alloc_fence_context: + + nvKms->unmapMemory(nv_dev->pDevice, + pSemSurface, + NVKMS_KAPI_MAPPING_TYPE_KERNEL, + (void *) pLinearAddress); + +failed_to_map_memory: + nvKms->freeMemory(nv_dev->pDevice, pSemSurface); + +failed: + return NULL; +} + +static void __nv_drm_fence_context_destroy( + struct nv_drm_fence_context *nv_fence_context) +{ + struct nv_drm_device *nv_dev = nv_fence_context->nv_dev; + + /* + * Free channel event before destroying the fence context, otherwise event + * callback continue to get called. + */ + nvKms->freeChannelEvent(nv_dev->pDevice, nv_fence_context->cb); + + /* Force signal all pending fences and empty pending list */ + spin_lock(&nv_fence_context->lock); + + nv_drm_gem_prime_force_fence_signal(nv_fence_context); + + spin_unlock(&nv_fence_context->lock); + + /* Free nvkms resources */ + + nvKms->unmapMemory(nv_dev->pDevice, + nv_fence_context->pSemSurface, + NVKMS_KAPI_MAPPING_TYPE_KERNEL, + (void *) nv_fence_context->pLinearAddress); + + nvKms->freeMemory(nv_dev->pDevice, nv_fence_context->pSemSurface); + + nv_drm_free(nv_fence_context); +} + +static nv_dma_fence_t *__nv_drm_fence_context_create_fence( + struct nv_drm_fence_context *nv_fence_context, + unsigned int seqno) +{ + struct nv_drm_prime_fence *nv_fence; + int ret = 0; + + if ((nv_fence = nv_drm_calloc(1, sizeof(*nv_fence))) == NULL) { + ret = -ENOMEM; + goto out; + } + + spin_lock(&nv_fence_context->lock); + + /* + * If seqno wrapped, force signal fences to make sure none of them + * get stuck. + */ + if (seqno < nv_fence_context->last_seqno) { + nv_drm_gem_prime_force_fence_signal(nv_fence_context); + } + + INIT_LIST_HEAD(&nv_fence->list_entry); + + spin_lock_init(&nv_fence->lock); + + nv_dma_fence_init(&nv_fence->base, &nv_drm_gem_prime_fence_ops, + &nv_fence->lock, nv_fence_context->context, + seqno); + + list_add_tail(&nv_fence->list_entry, &nv_fence_context->pending); + + nv_fence_context->last_seqno = seqno; + + spin_unlock(&nv_fence_context->lock); + +out: + return ret != 0 ? ERR_PTR(ret) : &nv_fence->base; +} + +int nv_drm_fence_supported_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + return nv_dev->pDevice ? 0 : -EINVAL; +} + +struct nv_drm_gem_fence_context { + struct nv_drm_gem_object base; + struct nv_drm_fence_context *nv_fence_context; +}; + +static inline struct nv_drm_gem_fence_context *to_gem_fence_context( + struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_fence_context, base); + } + + return NULL; +} + +/* + * Tear down of the 'struct nv_drm_gem_fence_context' object is not expected + * to be happen from any worker thread, if that happen it causes dead-lock + * because tear down sequence calls to flush all existing + * worker thread. + */ +static void __nv_drm_gem_fence_context_free(struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_gem_fence_context *nv_gem_fence_context = + to_gem_fence_context(nv_gem); + + __nv_drm_fence_context_destroy(nv_gem_fence_context->nv_fence_context); + + nv_drm_free(nv_gem_fence_context); +} + +const struct nv_drm_gem_object_funcs nv_gem_fence_context_ops = { + .free = __nv_drm_gem_fence_context_free, +}; + +static inline +struct nv_drm_gem_fence_context *__nv_drm_gem_object_fence_context_lookup( + struct drm_device *dev, + struct drm_file *filp, + u32 handle) +{ + struct nv_drm_gem_object *nv_gem = + nv_drm_gem_object_lookup(dev, filp, handle); + + if (nv_gem != NULL && nv_gem->ops != &nv_gem_fence_context_ops) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + return NULL; + } + + return to_gem_fence_context(nv_gem); +} + +int nv_drm_fence_context_create_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_fence_context_create_params *p = data; + struct nv_drm_gem_fence_context *nv_gem_fence_context = NULL; + + if ((nv_gem_fence_context = nv_drm_calloc( + 1, + sizeof(struct nv_drm_gem_fence_context))) == NULL) { + goto done; + } + + if ((nv_gem_fence_context->nv_fence_context = + __nv_drm_fence_context_new(nv_dev, p)) == NULL) { + goto fence_context_new_failed; + } + + nv_drm_gem_object_init(nv_dev, + &nv_gem_fence_context->base, + &nv_gem_fence_context_ops, + 0 /* size */, + NULL /* pMemory */); + + return nv_drm_gem_handle_create_drop_reference(filep, + &nv_gem_fence_context->base, + &p->handle); + +fence_context_new_failed: + nv_drm_free(nv_gem_fence_context); + +done: + return -ENOMEM; +} + +int nv_drm_gem_fence_attach_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + int ret = -EINVAL; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_fence_attach_params *p = data; + + struct nv_drm_gem_object *nv_gem; + struct nv_drm_gem_fence_context *nv_gem_fence_context; + + nv_dma_fence_t *fence; + + nv_gem = nv_drm_gem_object_lookup(nv_dev->dev, filep, p->handle); + + if (!nv_gem) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for fence attach: 0x%08x", + p->handle); + + goto done; + } + + if((nv_gem_fence_context = __nv_drm_gem_object_fence_context_lookup( + nv_dev->dev, + filep, + p->fence_context_handle)) == NULL) { + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for fence context: 0x%08x", + p->fence_context_handle); + + goto fence_context_lookup_failed; + } + + if (IS_ERR(fence = __nv_drm_fence_context_create_fence( + nv_gem_fence_context->nv_fence_context, + p->sem_thresh))) { + ret = PTR_ERR(fence); + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate fence: 0x%08x", p->handle); + + goto fence_context_create_fence_failed; + } + + nv_dma_resv_lock(&nv_gem->resv, NULL); + + ret = nv_dma_resv_reserve_fences(&nv_gem->resv, 1, false); + if (ret == 0) { + nv_dma_resv_add_excl_fence(&nv_gem->resv, fence); + } else { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to reserve fence. Error code: %d", ret); + } + + nv_dma_resv_unlock(&nv_gem->resv); + +fence_context_create_fence_failed: + nv_drm_gem_object_unreference_unlocked(&nv_gem_fence_context->base); + +fence_context_lookup_failed: + nv_drm_gem_object_unreference_unlocked(nv_gem); + +done: + return ret; +} + +#endif /* NV_DRM_FENCE_AVAILABLE */ + +#endif /* NV_DRM_AVAILABLE */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.h new file mode 100644 index 0000000..5afa2ae --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_PRIME_FENCE_H__ +#define __NVIDIA_DRM_PRIME_FENCE_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +struct drm_file; +struct drm_device; + +#if defined(NV_DRM_FENCE_AVAILABLE) + +int nv_drm_fence_supported_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_fence_context_create_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_gem_fence_attach_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +#endif /* NV_DRM_FENCE_AVAILABLE */ + +#endif /* NV_DRM_AVAILABLE */ + +#endif /* __NVIDIA_DRM_PRIME_FENCE_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-priv.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-priv.h new file mode 100644 index 0000000..91b7b4f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-priv.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_PRIV_H__ +#define __NVIDIA_DRM_PRIV_H__ + +#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */ + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_DEVICE_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_GEM_H_PRESENT) +#include +#endif + +#include "nvidia-drm-os-interface.h" + +#include "nvkms-kapi.h" + +#define NV_DRM_LOG_ERR(__fmt, ...) \ + DRM_ERROR("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__) + +#define NV_DRM_LOG_INFO(__fmt, ...) \ + DRM_INFO("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__) + +#define NV_DRM_DEV_LOG_INFO(__dev, __fmt, ...) \ + NV_DRM_LOG_INFO("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__) + +#define NV_DRM_DEV_LOG_ERR(__dev, __fmt, ...) \ + NV_DRM_LOG_ERR("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__) + +#define NV_DRM_WARN(__condition) WARN_ON((__condition)) + +#define NV_DRM_DEBUG_DRIVER(__fmt, ...) \ + DRM_DEBUG_DRIVER("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__) + +#define NV_DRM_DEV_DEBUG_DRIVER(__dev, __fmt, ...) \ + DRM_DEBUG_DRIVER("[GPU ID 0x%08x] " __fmt, \ + __dev->gpu_info.gpu_id, ##__VA_ARGS__) + +struct nv_drm_device { + nv_gpu_info_t gpu_info; + + struct drm_device *dev; + + struct NvKmsKapiDevice *pDevice; + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + /* + * Lock to protect drm-subsystem and fields of this structure + * from concurrent access. + * + * Do not hold this lock if some lock from core drm-subsystem + * is already held, locking order should be like this - + * + * mutex_lock(nv_drm_device::lock); + * .... + * mutex_lock(drm_device::mode_config::lock); + * .... + * ....... + * mutex_unlock(drm_device::mode_config::lock); + * ........ + * .. + * mutex_lock(drm_device::struct_mutex); + * .... + * ........ + * mutex_unlock(drm_device::struct_mutex); + * .. + * mutex_unlock(nv_drm_device::lock); + */ + struct mutex lock; + + NvU32 pitchAlignment; + + NvU8 genericPageKind; + NvU8 pageKindGeneration; + NvU8 sectorLayout; +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) + NvU64 modifiers[6 /* block linear */ + 1 /* linear */ + 1 /* terminator */]; +#endif + + atomic_t enable_event_handling; + + /** + * @flip_event_wq: + * + * The wait queue on which nv_drm_atomic_commit_internal() sleeps until + * next flip event occurs. + */ + wait_queue_head_t flip_event_wq; + +#endif + + NvBool hasVideoMemory; + + NvBool supportsSyncpts; + + struct drm_property *nv_out_fence_property; + struct drm_property *nv_input_colorspace_property; + +#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA) + struct drm_property *nv_hdr_output_metadata_property; +#endif + + struct nv_drm_device *next; +}; + +static inline struct nv_drm_device *to_nv_device( + struct drm_device *dev) +{ + return dev->dev_private; +} + +extern const struct NvKmsKapiFunctionsTable* const nvKms; + +#endif /* defined(NV_DRM_AVAILABLE) */ + +#endif /* __NVIDIA_DRM_PRIV_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.c new file mode 100644 index 0000000..42fb0cd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.c @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_PLANE_H_PRESENT) +#include +#endif + +#include +#include + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-utils.h" + +struct NvKmsKapiConnectorInfo* +nvkms_get_connector_info(struct NvKmsKapiDevice *pDevice, + NvKmsKapiConnector hConnector) +{ + struct NvKmsKapiConnectorInfo *connectorInfo = + nv_drm_calloc(1, sizeof(*connectorInfo)); + + if (connectorInfo == NULL) { + return ERR_PTR(-ENOMEM); + } + + if (!nvKms->getConnectorInfo(pDevice, hConnector, connectorInfo)) { + nv_drm_free(connectorInfo); + + return ERR_PTR(-EINVAL); + } + + return connectorInfo; +} + +int +nvkms_connector_signal_to_drm_encoder_signal(NvKmsConnectorSignalFormat format) +{ + switch (format) { + default: + case NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN: + return DRM_MODE_ENCODER_NONE; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS: + case NVKMS_CONNECTOR_SIGNAL_FORMAT_DP: + return DRM_MODE_ENCODER_TMDS; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS: + return DRM_MODE_ENCODER_LVDS; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA: + return DRM_MODE_ENCODER_DAC; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI: + return DRM_MODE_ENCODER_DSI; + } +} + +int nvkms_connector_type_to_drm_connector_type(NvKmsConnectorType type, + NvBool internal) +{ + switch (type) { + default: + case NVKMS_CONNECTOR_TYPE_UNKNOWN: + return DRM_MODE_CONNECTOR_Unknown; + case NVKMS_CONNECTOR_TYPE_DP: + return + internal ? + DRM_MODE_CONNECTOR_eDP : DRM_MODE_CONNECTOR_DisplayPort; + case NVKMS_CONNECTOR_TYPE_HDMI: + return DRM_MODE_CONNECTOR_HDMIA; + case NVKMS_CONNECTOR_TYPE_DVI_D: + return DRM_MODE_CONNECTOR_DVID; + case NVKMS_CONNECTOR_TYPE_DVI_I: + return DRM_MODE_CONNECTOR_DVII; + case NVKMS_CONNECTOR_TYPE_LVDS: + return DRM_MODE_CONNECTOR_LVDS; + case NVKMS_CONNECTOR_TYPE_VGA: + return DRM_MODE_CONNECTOR_VGA; + case NVKMS_CONNECTOR_TYPE_DSI: + return DRM_MODE_CONNECTOR_DSI; + case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER: + return DRM_MODE_CONNECTOR_DisplayPort; + } +} + +void +nvkms_display_mode_to_drm_mode(const struct NvKmsKapiDisplayMode *displayMode, + struct drm_display_mode *mode) +{ +#if defined(NV_DRM_DISPLAY_MODE_HAS_VREFRESH) + mode->vrefresh = (displayMode->timings.refreshRate + 500) / 1000; /* In Hz */ +#endif + + mode->clock = (displayMode->timings.pixelClockHz + 500) / 1000; /* In Hz */ + + mode->hdisplay = displayMode->timings.hVisible; + mode->hsync_start = displayMode->timings.hSyncStart; + mode->hsync_end = displayMode->timings.hSyncEnd; + mode->htotal = displayMode->timings.hTotal; + mode->hskew = displayMode->timings.hSkew; + + mode->vdisplay = displayMode->timings.vVisible; + mode->vsync_start = displayMode->timings.vSyncStart; + mode->vsync_end = displayMode->timings.vSyncEnd; + mode->vtotal = displayMode->timings.vTotal; + + if (displayMode->timings.flags.interlaced) { + mode->flags |= DRM_MODE_FLAG_INTERLACE; + } + + if (displayMode->timings.flags.doubleScan) { + mode->flags |= DRM_MODE_FLAG_DBLSCAN; + } + + if (displayMode->timings.flags.hSyncPos) { + mode->flags |= DRM_MODE_FLAG_PHSYNC; + } + + if (displayMode->timings.flags.hSyncNeg) { + mode->flags |= DRM_MODE_FLAG_NHSYNC; + } + + if (displayMode->timings.flags.vSyncPos) { + mode->flags |= DRM_MODE_FLAG_PVSYNC; + } + + if (displayMode->timings.flags.vSyncNeg) { + mode->flags |= DRM_MODE_FLAG_NVSYNC; + } + + mode->width_mm = displayMode->timings.widthMM; + mode->height_mm = displayMode->timings.heightMM; + + if (strlen(displayMode->name) != 0) { + memcpy( + mode->name, displayMode->name, + min(sizeof(mode->name), sizeof(displayMode->name))); + + mode->name[sizeof(mode->name) - 1] = '\0'; + } else { + drm_mode_set_name(mode); + } +} + +void drm_mode_to_nvkms_display_mode(const struct drm_display_mode *src, + struct NvKmsKapiDisplayMode *dst) +{ +#if defined(NV_DRM_DISPLAY_MODE_HAS_VREFRESH) + dst->timings.refreshRate = src->vrefresh * 1000; +#else + dst->timings.refreshRate = drm_mode_vrefresh(src) * 1000; +#endif + + dst->timings.pixelClockHz = src->clock * 1000; /* In Hz */ + + dst->timings.hVisible = src->hdisplay; + dst->timings.hSyncStart = src->hsync_start; + dst->timings.hSyncEnd = src->hsync_end; + dst->timings.hTotal = src->htotal; + dst->timings.hSkew = src->hskew; + + dst->timings.vVisible = src->vdisplay; + dst->timings.vSyncStart = src->vsync_start; + dst->timings.vSyncEnd = src->vsync_end; + dst->timings.vTotal = src->vtotal; + + if (src->flags & DRM_MODE_FLAG_INTERLACE) { + dst->timings.flags.interlaced = NV_TRUE; + } else { + dst->timings.flags.interlaced = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_DBLSCAN) { + dst->timings.flags.doubleScan = NV_TRUE; + } else { + dst->timings.flags.doubleScan = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_PHSYNC) { + dst->timings.flags.hSyncPos = NV_TRUE; + } else { + dst->timings.flags.hSyncPos = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_NHSYNC) { + dst->timings.flags.hSyncNeg = NV_TRUE; + } else { + dst->timings.flags.hSyncNeg = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_PVSYNC) { + dst->timings.flags.vSyncPos = NV_TRUE; + } else { + dst->timings.flags.vSyncPos = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_NVSYNC) { + dst->timings.flags.vSyncNeg = NV_TRUE; + } else { + dst->timings.flags.vSyncNeg = NV_FALSE; + } + + dst->timings.widthMM = src->width_mm; + dst->timings.heightMM = src->height_mm; + + memcpy(dst->name, src->name, min(sizeof(dst->name), sizeof(src->name))); +} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.h new file mode 100644 index 0000000..2c0588a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_UTILS_H__ +#define __NVIDIA_DRM_UTILS_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvkms-kapi.h" + +enum drm_plane_type; +struct drm_display_mode; + +struct NvKmsKapiConnectorInfo* +nvkms_get_connector_info(struct NvKmsKapiDevice *pDevice, + NvKmsKapiConnector hConnector); + +int nvkms_connector_signal_to_drm_encoder_signal( + NvKmsConnectorSignalFormat format); + +int nvkms_connector_type_to_drm_connector_type(NvKmsConnectorType type, + NvBool internal); + +void nvkms_display_mode_to_drm_mode( + const struct NvKmsKapiDisplayMode *displayMode, + struct drm_display_mode *mode); + +void drm_mode_to_nvkms_display_mode(const struct drm_display_mode *src, + struct NvKmsKapiDisplayMode *dst); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_UTILS_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.Kbuild b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.Kbuild new file mode 100644 index 0000000..ca32b76 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.Kbuild @@ -0,0 +1,124 @@ +########################################################################### +# Kbuild fragment for nvidia-drm.ko +########################################################################### + +# +# Define NVIDIA_DRM_{SOURCES,OBJECTS} +# + +NVIDIA_DRM_SOURCES = +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-drv.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-utils.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-crtc.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-encoder.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-connector.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fb.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-prime-fence.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c +NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-dma-buf.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-format.c + +NVIDIA_DRM_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_DRM_SOURCES)) + +obj-m += nvidia-drm.o +nvidia-drm-y := $(NVIDIA_DRM_OBJECTS) + +NVIDIA_DRM_KO = nvidia-drm/nvidia-drm.ko + +NV_KERNEL_MODULE_TARGETS += $(NVIDIA_DRM_KO) + +# +# Define nvidia-drm.ko-specific CFLAGS. +# + +NVIDIA_DRM_CFLAGS += -I$(src)/nvidia-drm +NVIDIA_DRM_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0 + +$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_DRM_OBJECTS), $(NVIDIA_DRM_CFLAGS)) + +# +# Register the conftests needed by nvidia-drm.ko +# + +NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_DRM_OBJECTS) + +NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available +NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available +NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc +NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test +NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available + +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages_remote +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages_remote +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_lookup +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_state_ref_counting +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_driver_has_gem_prime_res_obj +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_connector_dpms +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_funcs_have_mode_in_name +NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_framebuffer_get +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_put +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_format_num_planes +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_for_each_possible_encoder +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_rotation_available +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_vma_offset_exact_lookup_locked +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked +NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack + +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_present +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_bus_type +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_irq +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_name +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_device_list +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_legacy_dev_list +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_set_busid +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_connectors_changed +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_init_function_args +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_helper_mode_fill_fb_struct +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_drop_has_from_release_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_unload_has_int_return_type +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_crtc_destroy_state_has_crtc_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_plane_destroy_state_has_plane_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_object_find_has_file_priv_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += dma_buf_owner +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_list_iter +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_swap_state_has_stall_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_prime_flag_present +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_async_flip +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_pageflip_flags +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_modifiers_present +NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_node_is_allowed_has_tag_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_offset_node_has_readonly +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_display_mode_has_vrefresh +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_master_set_has_int_return_type +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_free_object +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_prime_pages_to_sg_has_drm_device_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_callbacks +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_atomic_check_has_atomic_state_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_vmap_has_map_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_plane_atomic_check_has_atomic_state_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_device_has_pdev +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_no_vblank +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_config_has_allow_fb_modifiers +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_has_hdr_output_metadata +NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence +NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences +NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_has_override_edid diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.c new file mode 100644 index 0000000..a191ccf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.c @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-drv.h" + +static struct NvKmsKapiFunctionsTable nvKmsFuncsTable = { + .versionString = NV_VERSION_STRING, +}; + +const struct NvKmsKapiFunctionsTable* const nvKms = &nvKmsFuncsTable; + +#endif + +int nv_drm_init(void) +{ +#if defined(NV_DRM_AVAILABLE) + if (!nvKmsKapiGetFunctionsTable(&nvKmsFuncsTable)) { + NV_DRM_LOG_ERR( + "Version mismatch: nvidia-modeset.ko(%s) nvidia-drm.ko(%s)", + nvKmsFuncsTable.versionString, NV_VERSION_STRING); + return -EINVAL; + } + + return nv_drm_probe_devices(); +#else + return 0; +#endif +} + +void nv_drm_exit(void) +{ +#if defined(NV_DRM_AVAILABLE) + nv_drm_remove_devices(); +#endif +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.h new file mode 100644 index 0000000..9f1c31c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_H__ +#define __NVIDIA_DRM_H__ + +#include "nvidia-drm-conftest.h" + +int nv_drm_init(void); +void nv_drm_exit(void); + +#endif /* __NVIDIA_DRM_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nv-kthread-q.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nv-kthread-q.c new file mode 100644 index 0000000..5a95f4a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nv-kthread-q.c @@ -0,0 +1,335 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-kthread-q.h" +#include "nv-list-helpers.h" + +#include +#include +#include +#include +#include + +#if defined(NV_LINUX_BUG_H_PRESENT) + #include +#else + #include +#endif + +// Today's implementation is a little simpler and more limited than the +// API description allows for in nv-kthread-q.h. Details include: +// +// 1. Each nv_kthread_q instance is a first-in, first-out queue. +// +// 2. Each nv_kthread_q instance is serviced by exactly one kthread. +// +// You can create any number of queues, each of which gets its own +// named kernel thread (kthread). You can then insert arbitrary functions +// into the queue, and those functions will be run in the context of the +// queue's kthread. + +#ifndef WARN + // Only *really* old kernels (2.6.9) end up here. Just use a simple printk + // to implement this, because such kernels won't be supported much longer. + #define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + printk(KERN_ERR format); \ + unlikely(__ret_warn_on); \ + }) +#endif + +#define NVQ_WARN(fmt, ...) \ + do { \ + if (in_interrupt()) { \ + WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \ + ##__VA_ARGS__); \ + } \ + else { \ + WARN(1, "nv_kthread_q: task: %s: " fmt, \ + current->comm, \ + ##__VA_ARGS__); \ + } \ + } while (0) + +static int _main_loop(void *args) +{ + nv_kthread_q_t *q = (nv_kthread_q_t *)args; + nv_kthread_q_item_t *q_item = NULL; + unsigned long flags; + + while (1) { + // Normally this thread is never interrupted. However, + // down_interruptible (instead of down) is called here, + // in order to avoid being classified as a potentially + // hung task, by the kernel watchdog. + while (down_interruptible(&q->q_sem)) + NVQ_WARN("Interrupted during semaphore wait\n"); + + if (atomic_read(&q->main_loop_should_exit)) + break; + + spin_lock_irqsave(&q->q_lock, flags); + + // The q_sem semaphore prevents us from getting here unless there is + // at least one item in the list, so an empty list indicates a bug. + if (unlikely(list_empty(&q->q_list_head))) { + spin_unlock_irqrestore(&q->q_lock, flags); + NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q); + continue; + } + + // Consume one item from the queue + q_item = list_first_entry(&q->q_list_head, + nv_kthread_q_item_t, + q_list_node); + + list_del_init(&q_item->q_list_node); + + spin_unlock_irqrestore(&q->q_lock, flags); + + // Run the item + q_item->function_to_run(q_item->function_args); + + // Make debugging a little simpler by clearing this between runs: + q_item = NULL; + } + + while (!kthread_should_stop()) + schedule(); + + return 0; +} + +void nv_kthread_q_stop(nv_kthread_q_t *q) +{ + // check if queue has been properly initialized + if (unlikely(!q->q_kthread)) + return; + + nv_kthread_q_flush(q); + + // If this assertion fires, then a caller likely either broke the API rules, + // by adding items after calling nv_kthread_q_stop, or possibly messed up + // with inadequate flushing of self-rescheduling q_items. + if (unlikely(!list_empty(&q->q_list_head))) + NVQ_WARN("list not empty after flushing\n"); + + if (likely(!atomic_read(&q->main_loop_should_exit))) { + + atomic_set(&q->main_loop_should_exit, 1); + + // Wake up the kthread so that it can see that it needs to stop: + up(&q->q_sem); + + kthread_stop(q->q_kthread); + q->q_kthread = NULL; + } +} + +// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by +// kthread_create_on_node relies on a 2 entry, per-core cache to minimize +// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the +// stack location ends up being a function of the core assigned to the current +// thread, instead of being a function of the specified NUMA node. The cache was +// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0 +// ("fork: Optimize task creation by caching two thread stacks per CPU if +// CONFIG_VMAP_STACK=y") +// +// To work around the problematic cache, we create up to three kernel threads +// -If the first thread's stack is resident on the preferred node, return this +// thread. +// -Otherwise, create a second thread. If its stack is resident on the +// preferred node, stop the first thread and return this one. +// -Otherwise, create a third thread. The stack allocator does not find a +// cached stack, and so falls back to vmalloc, which takes the NUMA hint into +// consideration. The first two threads are then stopped. +// +// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned. +// +// This function is never invoked when there is no NUMA preference (preferred +// node is NUMA_NO_NODE). +#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1 +static struct task_struct *thread_create_on_node(int (*threadfn)(void *data), + nv_kthread_q_t *q, + int preferred_node, + const char *q_name) +{ + + unsigned i, j; + const static unsigned attempts = 3; + struct task_struct *thread[3]; + + for (i = 0;; i++) { + struct page *stack; + + thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name); + + if (unlikely(IS_ERR(thread[i]))) { + + // Instead of failing, pick the previous thread, even if its + // stack is not allocated on the preferred node. + if (i > 0) + i--; + + break; + } + + // vmalloc is not used to allocate the stack, so simply return the + // thread, even if its stack may not be allocated on the preferred node + if (!is_vmalloc_addr(thread[i]->stack)) + break; + + // Ran out of attempts - return thread even if its stack may not be + // allocated on the preferred node + if ((i == (attempts - 1))) + break; + + // Get the NUMA node where the first page of the stack is resident. If + // it is the preferred node, select this thread. + stack = vmalloc_to_page(thread[i]->stack); + if (page_to_nid(stack) == preferred_node) + break; + } + + for (j = i; j > 0; j--) + kthread_stop(thread[j - 1]); + + return thread[i]; +} +#endif + +int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node) +{ + memset(q, 0, sizeof(*q)); + + INIT_LIST_HEAD(&q->q_list_head); + spin_lock_init(&q->q_lock); + sema_init(&q->q_sem, 0); + + if (preferred_node == NV_KTHREAD_NO_NODE) { + q->q_kthread = kthread_create(_main_loop, q, q_name); + } + else { +#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1 + q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name); +#else + return -ENOTSUPP; +#endif + } + + if (IS_ERR(q->q_kthread)) { + int err = PTR_ERR(q->q_kthread); + + // Clear q_kthread before returning so that nv_kthread_q_stop() can be + // safely called on it making error handling easier. + q->q_kthread = NULL; + + return err; + } + + wake_up_process(q->q_kthread); + + return 0; +} + +// Returns true (non-zero) if the item was actually scheduled, and false if the +// item was already pending in a queue. +static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item) +{ + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&q->q_lock, flags); + + if (likely(list_empty(&q_item->q_list_node))) + list_add_tail(&q_item->q_list_node, &q->q_list_head); + else + ret = 0; + + spin_unlock_irqrestore(&q->q_lock, flags); + + if (likely(ret)) + up(&q->q_sem); + + return ret; +} + +void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item, + nv_q_func_t function_to_run, + void *function_args) +{ + INIT_LIST_HEAD(&q_item->q_list_node); + q_item->function_to_run = function_to_run; + q_item->function_args = function_args; +} + +// Returns true (non-zero) if the q_item got scheduled, false otherwise. +int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q, + nv_kthread_q_item_t *q_item) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was " + "called with a non-alive q: 0x%p\n", q); + return 0; + } + + return _raw_q_schedule(q, q_item); +} + +static void _q_flush_function(void *args) +{ + struct completion *completion = (struct completion *)args; + complete(completion); +} + + +static void _raw_q_flush(nv_kthread_q_t *q) +{ + nv_kthread_q_item_t q_item; + DECLARE_COMPLETION(completion); + + nv_kthread_q_item_init(&q_item, _q_flush_function, &completion); + + _raw_q_schedule(q, &q_item); + + // Wait for the flush item to run. Once it has run, then all of the + // previously queued items in front of it will have run, so that means + // the flush is complete. + wait_for_completion(&completion); +} + +void nv_kthread_q_flush(nv_kthread_q_t *q) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_flush was called after " + "nv_kthread_q_stop. q: 0x%p\n", q); + return; + } + + // This 2x flush is not a typing mistake. The queue really does have to be + // flushed twice, in order to take care of the case of a q_item that + // reschedules itself. + _raw_q_flush(q); + _raw_q_flush(q); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-linux.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-linux.c new file mode 100644 index 0000000..83b69af --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-linux.c @@ -0,0 +1,1851 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-21 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include /* do_div() */ +#include +#include +#include +#include +#include +#include + +#include "nvstatus.h" + +#include "nv-register-module.h" +#include "nv-modeset-interface.h" +#include "nv-kref.h" + +#include "nvidia-modeset-os-interface.h" +#include "nvkms.h" +#include "nvkms-ioctl.h" + +#include "conftest.h" +#include "nv-procfs.h" +#include "nv-kthread-q.h" +#include "nv-time.h" +#include "nv-lock.h" + +#if !defined(CONFIG_RETPOLINE) +#include "nv-retpoline.h" +#endif + +#include + +#define NVKMS_LOG_PREFIX "nvidia-modeset: " + +/* These parameters are used for fault injection tests. Normally the defaults + * should be used. */ +MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc"); +static int fail_malloc_num = -1; +module_param_named(fail_malloc, fail_malloc_num, int, 0400); + +MODULE_PARM_DESC(malloc_verbose, "Report information about malloc calls on module unload"); +static bool malloc_verbose = false; +module_param_named(malloc_verbose, malloc_verbose, bool, 0400); + +static atomic_t nvkms_alloc_called_count; + + +#define NVKMS_SYNCPT_STUBS_NEEDED + +/************************************************************************* + * NVKMS interface for nvhost unit for sync point APIs. + *************************************************************************/ + +#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST) + +#undef NVKMS_SYNCPT_STUBS_NEEDED + +#include + +NvBool nvkms_syncpt_op( + enum NvKmsSyncPtOp op, + NvKmsSyncPtOpParams *params) +{ + struct platform_device *pdev = nvhost_get_default_device(); + + switch (op) { + + case NVKMS_SYNCPT_OP_ALLOC: + params->alloc.id = nvhost_get_syncpt_client_managed( + pdev, params->alloc.syncpt_name); + break; + + case NVKMS_SYNCPT_OP_PUT: + nvhost_syncpt_put_ref_ext(pdev, params->put.id); + break; + + case NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH: { + + struct nvhost_fence *fence; + NvU32 id, thresh; + + fence = nvhost_fence_get(params->fd_to_id_and_thresh.fd); + if (fence == NULL) { + return NV_FALSE; + } + + if (nvhost_fence_num_pts(fence) > 1) { + /*! Syncpoint fence fd contains more than one syncpoint */ + nvhost_fence_put(fence); + return NV_FALSE; + } + + if (nvhost_fence_get_pt(fence, 0, &id, &thresh) != 0) { + nvhost_fence_put(fence); + return NV_FALSE; + } + + params->fd_to_id_and_thresh.id = id; + params->fd_to_id_and_thresh.thresh = thresh; + + nvhost_fence_put(fence); + + break; + } + + case NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD: + nvhost_syncpt_create_fence_single_ext( + pdev, + params->id_and_thresh_to_fd.id, + params->id_and_thresh_to_fd.thresh, + "nvkms-fence", + ¶ms->id_and_thresh_to_fd.fd); + break; + + case NVKMS_SYNCPT_OP_READ_MINVAL: + params->read_minval.minval = + nvhost_syncpt_read_minval(pdev, params->read_minval.id); + break; + + } + + return NV_TRUE; +} + +#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT) && defined(NV_LINUX_NVHOST_H_PRESENT) + +#include +#include +#include +#include + +/* + * If the host1x.h header is present, then we are using the upstream + * host1x driver and so make sure CONFIG_TEGRA_HOST1X is defined to pick + * up the correct prototypes/definitions in nvhost.h. + */ +#define CONFIG_TEGRA_HOST1X + +#include + +#undef NVKMS_SYNCPT_STUBS_NEEDED + +NvBool nvkms_syncpt_op( + enum NvKmsSyncPtOp op, + NvKmsSyncPtOpParams *params) +{ + struct host1x_syncpt *host1x_sp; + struct platform_device *pdev; + struct host1x *host1x; + + pdev = nvhost_get_default_device(); + if (pdev == NULL) { + nvkms_log(NVKMS_LOG_LEVEL_ERROR, NVKMS_LOG_PREFIX, + "Failed to get nvhost default pdev"); + return NV_FALSE; + } + + host1x = nvhost_get_host1x(pdev); + if (host1x == NULL) { + nvkms_log(NVKMS_LOG_LEVEL_ERROR, NVKMS_LOG_PREFIX, + "Failed to get host1x"); + return NV_FALSE; + } + + switch (op) { + + case NVKMS_SYNCPT_OP_ALLOC: + host1x_sp = host1x_syncpt_alloc(host1x, + HOST1X_SYNCPT_CLIENT_MANAGED, + params->alloc.syncpt_name); + if (host1x_sp == NULL) { + return NV_FALSE; + } + + params->alloc.id = host1x_syncpt_id(host1x_sp); + break; + + case NVKMS_SYNCPT_OP_PUT: + host1x_sp = host1x_syncpt_get_by_id_noref(host1x, params->put.id); + if (host1x_sp == NULL) { + return NV_FALSE; + } + + host1x_syncpt_put(host1x_sp); + break; + + case NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH: { + + struct dma_fence *f; + NvU32 id, thresh; + int err; + + f = sync_file_get_fence(params->fd_to_id_and_thresh.fd); + if (f == NULL) { + return NV_FALSE; + } + + if (dma_fence_is_array(f)) { + struct dma_fence_array *array = to_dma_fence_array(f); + + if (array->num_fences > 1) { + /* Syncpoint fence fd contains more than one syncpoint */ + dma_fence_put(f); + return NV_FALSE; + } + + f = array->fences[0]; + } + + err = host1x_fence_extract(f, &id, &thresh); + dma_fence_put(f); + + if (err < 0) { + return NV_FALSE; + } + + params->fd_to_id_and_thresh.id = id; + params->fd_to_id_and_thresh.thresh = thresh; + + break; + } + + case NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD: { + + struct sync_file *file; + struct dma_fence *f; + int fd; + + host1x_sp = host1x_syncpt_get_by_id_noref(host1x, + params->id_and_thresh_to_fd.id); + if (host1x_sp == NULL) { + return NV_FALSE; + } + + f = host1x_fence_create(host1x_sp, + params->id_and_thresh_to_fd.thresh, true); + if (IS_ERR(f)) { + return NV_FALSE; + } + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + dma_fence_put(f); + return NV_FALSE; + } + + file = sync_file_create(f); + dma_fence_put(f); + + if (!file) { + return NV_FALSE; + } + + fd_install(fd, file->file); + + params->id_and_thresh_to_fd.fd = fd; + break; + } + + case NVKMS_SYNCPT_OP_READ_MINVAL: + host1x_sp = host1x_syncpt_get_by_id_noref(host1x, params->read_minval.id); + if (host1x_sp == NULL) { + return NV_FALSE; + } + + params->read_minval.minval = host1x_syncpt_read(host1x_sp); + break; + } + + return NV_TRUE; +} +#endif + + +#ifdef NVKMS_SYNCPT_STUBS_NEEDED +/* Unsupported STUB for nvkms_syncpt APIs */ +NvBool nvkms_syncpt_op( + enum NvKmsSyncPtOp op, + NvKmsSyncPtOpParams *params) +{ + return NV_FALSE; +} +#endif + +#define NVKMS_MAJOR_DEVICE_NUMBER 195 +#define NVKMS_MINOR_DEVICE_NUMBER 254 + +/* + * Convert from microseconds to jiffies. The conversion is: + * ((usec) * HZ / 1000000) + * + * Use do_div() to avoid gcc-generated references to __udivdi3(). + * Note that the do_div() macro divides the first argument in place. + */ +static inline unsigned long NVKMS_USECS_TO_JIFFIES(NvU64 usec) +{ + unsigned long result = usec * HZ; + do_div(result, 1000000); + return result; +} + + +/************************************************************************* + * NVKMS uses a global lock, nvkms_lock. The lock is taken in the + * file operation callback functions when calling into core NVKMS. + *************************************************************************/ + +static struct semaphore nvkms_lock; + +/************************************************************************* + * User clients of NVKMS may need to be synchronized with suspend/resume + * operations. This depends on the state of the system when the NVKMS + * suspend/resume callbacks are invoked. NVKMS uses a single + * RW lock, nvkms_pm_lock, for this synchronization. + *************************************************************************/ + +static struct rw_semaphore nvkms_pm_lock; + +/************************************************************************* + * NVKMS executes almost all of its queued work items on a single + * kthread. The exception are deferred close() handlers, which typically + * block for long periods of time and stall their queue. + *************************************************************************/ + +static struct nv_kthread_q nvkms_kthread_q; +static struct nv_kthread_q nvkms_deferred_close_kthread_q; + +/************************************************************************* + * The nvkms_per_open structure tracks data that is specific to a + * single open. + *************************************************************************/ + +struct nvkms_per_open { + void *data; + + enum NvKmsClientType type; + + union { + struct { + struct { + atomic_t available; + wait_queue_head_t wait_queue; + } events; + } user; + + struct { + struct { + nv_kthread_q_item_t nv_kthread_q_item; + } events; + } kernel; + } u; + + nv_kthread_q_item_t deferred_close_q_item; +}; + +/************************************************************************* + * nvkms_pm_lock helper functions. Since no down_read_interruptible() + * or equivalent interface is available, it needs to be approximated with + * down_read_trylock() to enable the kernel's freezer to round up user + * threads going into suspend. + *************************************************************************/ + +static inline int nvkms_read_trylock_pm_lock(void) +{ + return !down_read_trylock(&nvkms_pm_lock); +} + +static inline void nvkms_read_lock_pm_lock(void) +{ + down_read(&nvkms_pm_lock); +} + +static inline void nvkms_read_unlock_pm_lock(void) +{ + up_read(&nvkms_pm_lock); +} + +static inline void nvkms_write_lock_pm_lock(void) +{ + down_write(&nvkms_pm_lock); +} + +static inline void nvkms_write_unlock_pm_lock(void) +{ + up_write(&nvkms_pm_lock); +} + +/************************************************************************* + * nvidia-modeset-os-interface.h functions. It is assumed that these + * are called while nvkms_lock is held. + *************************************************************************/ + +/* Don't use kmalloc for allocations larger than one page */ +#define KMALLOC_LIMIT PAGE_SIZE + +void* nvkms_alloc(size_t size, NvBool zero) +{ + void *p; + + if (malloc_verbose || fail_malloc_num >= 0) { + int this_alloc = atomic_inc_return(&nvkms_alloc_called_count) - 1; + if (fail_malloc_num >= 0 && fail_malloc_num == this_alloc) { + printk(KERN_WARNING NVKMS_LOG_PREFIX "Failing alloc %d\n", + fail_malloc_num); + return NULL; + } + } + + if (size <= KMALLOC_LIMIT) { + p = kmalloc(size, GFP_KERNEL); + } else { + p = vmalloc(size); + } + + if (zero && (p != NULL)) { + memset(p, 0, size); + } + + return p; +} + +void nvkms_free(void *ptr, size_t size) +{ + if (size <= KMALLOC_LIMIT) { + kfree(ptr); + } else { + vfree(ptr); + } +} + +void* nvkms_memset(void *ptr, NvU8 c, size_t size) +{ + return memset(ptr, c, size); +} + +void* nvkms_memcpy(void *dest, const void *src, size_t n) +{ + return memcpy(dest, src, n); +} + +void* nvkms_memmove(void *dest, const void *src, size_t n) +{ + return memmove(dest, src, n); +} + +int nvkms_memcmp(const void *s1, const void *s2, size_t n) +{ + return memcmp(s1, s2, n); +} + +size_t nvkms_strlen(const char *s) +{ + return strlen(s); +} + +int nvkms_strcmp(const char *s1, const char *s2) +{ + return strcmp(s1, s2); +} + +char* nvkms_strncpy(char *dest, const char *src, size_t n) +{ + return strncpy(dest, src, n); +} + +void nvkms_usleep(NvU64 usec) +{ + if (usec < 1000) { + /* + * If the period to wait is less than one millisecond, sleep + * using udelay(); note this is a busy wait. + */ + udelay(usec); + } else { + /* + * Otherwise, sleep with millisecond precision. Clamp the + * time to ~4 seconds (0xFFF/1000 => 4.09 seconds). + * + * Note that the do_div() macro divides the first argument in + * place. + */ + + int msec; + NvU64 tmp = usec + 500; + do_div(tmp, 1000); + msec = (int) (tmp & 0xFFF); + + /* + * XXX NVKMS TODO: this may need to be msleep_interruptible(), + * though the callers would need to be made to handle + * returning early. + */ + msleep(msec); + } +} + +NvU64 nvkms_get_usec(void) +{ + struct timespec64 ts; + NvU64 ns; + + ktime_get_raw_ts64(&ts); + + ns = timespec64_to_ns(&ts); + return ns / 1000; +} + +int nvkms_copyin(void *kptr, NvU64 uaddr, size_t n) +{ + if (!nvKmsNvU64AddressIsSafe(uaddr)) { + return -EINVAL; + } + + if (copy_from_user(kptr, nvKmsNvU64ToPointer(uaddr), n) != 0) { + return -EFAULT; + } + + return 0; +} + +int nvkms_copyout(NvU64 uaddr, const void *kptr, size_t n) +{ + if (!nvKmsNvU64AddressIsSafe(uaddr)) { + return -EINVAL; + } + + if (copy_to_user(nvKmsNvU64ToPointer(uaddr), kptr, n) != 0) { + return -EFAULT; + } + + return 0; +} + +void nvkms_yield(void) +{ + schedule(); +} + +void nvkms_dump_stack(void) +{ + dump_stack(); +} + +int nvkms_snprintf(char *str, size_t size, const char *format, ...) +{ + int ret; + va_list ap; + + va_start(ap, format); + ret = vsnprintf(str, size, format, ap); + va_end(ap); + + return ret; +} + +int nvkms_vsnprintf(char *str, size_t size, const char *format, va_list ap) +{ + return vsnprintf(str, size, format, ap); +} + +void nvkms_log(const int level, const char *gpuPrefix, const char *msg) +{ + const char *levelString; + const char *levelPrefix; + + switch (level) { + default: + case NVKMS_LOG_LEVEL_INFO: + levelPrefix = ""; + levelString = KERN_INFO; + break; + case NVKMS_LOG_LEVEL_WARN: + levelPrefix = "WARNING: "; + levelString = KERN_WARNING; + break; + case NVKMS_LOG_LEVEL_ERROR: + levelPrefix = "ERROR: "; + levelString = KERN_ERR; + break; + } + + printk("%s%s%s%s%s\n", + levelString, NVKMS_LOG_PREFIX, levelPrefix, gpuPrefix, msg); +} + +void +nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel, + NvBool eventsAvailable) +{ + struct nvkms_per_open *popen = pOpenKernel; + + switch (popen->type) { + case NVKMS_CLIENT_USER_SPACE: + /* + * Write popen->events.available atomically, to avoid any races or + * memory barrier issues interacting with nvkms_poll(). + */ + atomic_set(&popen->u.user.events.available, eventsAvailable); + + wake_up_interruptible(&popen->u.user.events.wait_queue); + + break; + case NVKMS_CLIENT_KERNEL_SPACE: + if (eventsAvailable) { + nv_kthread_q_schedule_q_item( + &nvkms_kthread_q, + &popen->u.kernel.events.nv_kthread_q_item); + } + + break; + } +} + +static void nvkms_suspend(NvU32 gpuId) +{ + if (gpuId == 0) { + nvkms_write_lock_pm_lock(); + } + + down(&nvkms_lock); + nvKmsSuspend(gpuId); + up(&nvkms_lock); +} + +static void nvkms_resume(NvU32 gpuId) +{ + down(&nvkms_lock); + nvKmsResume(gpuId); + up(&nvkms_lock); + + if (gpuId == 0) { + nvkms_write_unlock_pm_lock(); + } +} + + +/************************************************************************* + * Interface with resman. + *************************************************************************/ + +static nvidia_modeset_rm_ops_t __rm_ops = { 0 }; +static nvidia_modeset_callbacks_t nvkms_rm_callbacks = { + .suspend = nvkms_suspend, + .resume = nvkms_resume +}; + +static int nvkms_alloc_rm(void) +{ + NV_STATUS nvstatus; + int ret; + + __rm_ops.version_string = NV_VERSION_STRING; + + nvstatus = nvidia_get_rm_ops(&__rm_ops); + + if (nvstatus != NV_OK) { + printk(KERN_ERR NVKMS_LOG_PREFIX "Version mismatch: " + "nvidia.ko(%s) nvidia-modeset.ko(%s)\n", + __rm_ops.version_string, NV_VERSION_STRING); + return -EINVAL; + } + + ret = __rm_ops.set_callbacks(&nvkms_rm_callbacks); + if (ret < 0) { + printk(KERN_ERR NVKMS_LOG_PREFIX "Failed to register callbacks\n"); + return ret; + } + + return 0; +} + +static void nvkms_free_rm(void) +{ + __rm_ops.set_callbacks(NULL); +} + +void nvkms_call_rm(void *ops) +{ + nvidia_modeset_stack_ptr stack = NULL; + + if (__rm_ops.alloc_stack(&stack) != 0) { + return; + } + + __rm_ops.op(stack, ops); + + __rm_ops.free_stack(stack); +} + +/************************************************************************* + * ref_ptr implementation. + *************************************************************************/ + +struct nvkms_ref_ptr { + nv_kref_t refcnt; + // Access to ptr is guarded by the nvkms_lock. + void *ptr; +}; + +struct nvkms_ref_ptr* nvkms_alloc_ref_ptr(void *ptr) +{ + struct nvkms_ref_ptr *ref_ptr = nvkms_alloc(sizeof(*ref_ptr), NV_FALSE); + if (ref_ptr) { + // The ref_ptr owner counts as a reference on the ref_ptr itself. + nv_kref_init(&ref_ptr->refcnt); + ref_ptr->ptr = ptr; + } + return ref_ptr; +} + +void nvkms_free_ref_ptr(struct nvkms_ref_ptr *ref_ptr) +{ + if (ref_ptr) { + ref_ptr->ptr = NULL; + // Release the owner's reference of the ref_ptr. + nvkms_dec_ref(ref_ptr); + } +} + +void nvkms_inc_ref(struct nvkms_ref_ptr *ref_ptr) +{ + nv_kref_get(&ref_ptr->refcnt); +} + +static void ref_ptr_free(nv_kref_t *ref) +{ + struct nvkms_ref_ptr *ref_ptr = container_of(ref, struct nvkms_ref_ptr, + refcnt); + nvkms_free(ref_ptr, sizeof(*ref_ptr)); +} + +void* nvkms_dec_ref(struct nvkms_ref_ptr *ref_ptr) +{ + void *ptr = ref_ptr->ptr; + nv_kref_put(&ref_ptr->refcnt, ref_ptr_free); + return ptr; +} + +/************************************************************************* + * Timer support + * + * Core NVKMS needs to be able to schedule work to execute in the + * future, within process context. + * + * To achieve this, use struct timer_list to schedule a timer + * callback, nvkms_timer_callback(). This will execute in softirq + * context, so from there schedule an nv_kthread_q item, + * nvkms_kthread_q_callback(), which will execute in process context. + *************************************************************************/ + +struct nvkms_timer_t { + nv_kthread_q_item_t nv_kthread_q_item; + struct timer_list kernel_timer; + NvBool cancel; + NvBool complete; + NvBool isRefPtr; + NvBool kernel_timer_created; + nvkms_timer_proc_t *proc; + void *dataPtr; + NvU32 dataU32; + struct list_head timers_list; +}; + +/* + * Global list with pending timers, any change requires acquiring lock + */ +static struct { + spinlock_t lock; + struct list_head list; +} nvkms_timers; + +static void nvkms_kthread_q_callback(void *arg) +{ + struct nvkms_timer_t *timer = arg; + void *dataPtr; + unsigned long flags = 0; + + /* + * We can delete this timer from pending timers list - it's being + * processed now. + */ + spin_lock_irqsave(&nvkms_timers.lock, flags); + list_del(&timer->timers_list); + spin_unlock_irqrestore(&nvkms_timers.lock, flags); + + /* + * After kthread_q_callback we want to be sure that timer_callback + * for this timer also have finished. It's important during module + * unload - this way we can safely unload this module by first deleting + * pending timers and than waiting for workqueue callbacks. + */ + if (timer->kernel_timer_created) { + del_timer_sync(&timer->kernel_timer); + } + + /* + * Block the kthread during system suspend & resume in order to defer + * handling of events such as DP_IRQ and hotplugs until after resume. + */ + nvkms_read_lock_pm_lock(); + + down(&nvkms_lock); + + if (timer->isRefPtr) { + // If the object this timer refers to was destroyed, treat the timer as + // canceled. + dataPtr = nvkms_dec_ref(timer->dataPtr); + if (!dataPtr) { + timer->cancel = NV_TRUE; + } + } else { + dataPtr = timer->dataPtr; + } + + if (!timer->cancel) { + timer->proc(dataPtr, timer->dataU32); + timer->complete = NV_TRUE; + } + + if (timer->isRefPtr) { + // ref_ptr-based timers are allocated with kmalloc(GFP_ATOMIC). + kfree(timer); + } else if (timer->cancel) { + nvkms_free(timer, sizeof(*timer)); + } + + up(&nvkms_lock); + + nvkms_read_unlock_pm_lock(); +} + +static void nvkms_queue_work(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item) +{ + int ret = nv_kthread_q_schedule_q_item(q, q_item); + /* + * nv_kthread_q_schedule_q_item should only fail (which it indicates by + * returning false) if the item is already scheduled or the queue is + * stopped. Neither of those should happen in NVKMS. + */ + WARN_ON(!ret); +} + +static void _nvkms_timer_callback_internal(struct nvkms_timer_t *nvkms_timer) +{ + /* In softirq context, so schedule nvkms_kthread_q_callback(). */ + nvkms_queue_work(&nvkms_kthread_q, &nvkms_timer->nv_kthread_q_item); +} + +/* + * Why the "inline" keyword? Because only one of these next two functions will + * be used, thus leading to a "defined but not used function" warning. The + * "inline" keyword is redefined in the Kbuild system + * (see: /include/linux/compiler-gcc.h) so as to suppress that warning. + */ +inline static void nvkms_timer_callback_typed_data(struct timer_list *timer) +{ + struct nvkms_timer_t *nvkms_timer = + container_of(timer, struct nvkms_timer_t, kernel_timer); + + _nvkms_timer_callback_internal(nvkms_timer); +} + +inline static void nvkms_timer_callback_anon_data(unsigned long arg) +{ + struct nvkms_timer_t *nvkms_timer = (struct nvkms_timer_t *) arg; + _nvkms_timer_callback_internal(nvkms_timer); +} + +static void +nvkms_init_timer(struct nvkms_timer_t *timer, nvkms_timer_proc_t *proc, + void *dataPtr, NvU32 dataU32, NvBool isRefPtr, NvU64 usec) +{ + unsigned long flags = 0; + + memset(timer, 0, sizeof(*timer)); + timer->cancel = NV_FALSE; + timer->complete = NV_FALSE; + timer->isRefPtr = isRefPtr; + + timer->proc = proc; + timer->dataPtr = dataPtr; + timer->dataU32 = dataU32; + + nv_kthread_q_item_init(&timer->nv_kthread_q_item, nvkms_kthread_q_callback, + timer); + + /* + * After adding timer to timers_list we need to finish referencing it + * (calling nvkms_queue_work() or mod_timer()) before releasing the lock. + * Otherwise, if the code to free the timer were ever updated to + * run in parallel with this, it could race against nvkms_init_timer() + * and free the timer before its initialization is complete. + */ + spin_lock_irqsave(&nvkms_timers.lock, flags); + list_add(&timer->timers_list, &nvkms_timers.list); + + if (usec == 0) { + timer->kernel_timer_created = NV_FALSE; + nvkms_queue_work(&nvkms_kthread_q, &timer->nv_kthread_q_item); + } else { +#if defined(NV_TIMER_SETUP_PRESENT) + timer_setup(&timer->kernel_timer, nvkms_timer_callback_typed_data, 0); +#else + init_timer(&timer->kernel_timer); + timer->kernel_timer.function = nvkms_timer_callback_anon_data; + timer->kernel_timer.data = (unsigned long) timer; +#endif + + timer->kernel_timer_created = NV_TRUE; + mod_timer(&timer->kernel_timer, jiffies + NVKMS_USECS_TO_JIFFIES(usec)); + } + spin_unlock_irqrestore(&nvkms_timers.lock, flags); +} + +nvkms_timer_handle_t* +nvkms_alloc_timer(nvkms_timer_proc_t *proc, + void *dataPtr, NvU32 dataU32, + NvU64 usec) +{ + // nvkms_alloc_timer cannot be called from an interrupt context. + struct nvkms_timer_t *timer = nvkms_alloc(sizeof(*timer), NV_FALSE); + if (timer) { + nvkms_init_timer(timer, proc, dataPtr, dataU32, NV_FALSE, usec); + } + return timer; +} + +NvBool +nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc, + struct nvkms_ref_ptr *ref_ptr, + NvU32 dataU32, NvU64 usec) +{ + // nvkms_alloc_timer_with_ref_ptr is called from an interrupt bottom half + // handler, which runs in a tasklet (i.e. atomic) context. + struct nvkms_timer_t *timer = kmalloc(sizeof(*timer), GFP_ATOMIC); + if (timer) { + // Reference the ref_ptr to make sure that it doesn't get freed before + // the timer fires. + nvkms_inc_ref(ref_ptr); + nvkms_init_timer(timer, proc, ref_ptr, dataU32, NV_TRUE, usec); + } + + return timer != NULL; +} + +void nvkms_free_timer(nvkms_timer_handle_t *handle) +{ + struct nvkms_timer_t *timer = handle; + + if (timer == NULL) { + return; + } + + if (timer->complete) { + nvkms_free(timer, sizeof(*timer)); + return; + } + + timer->cancel = NV_TRUE; +} + +void* nvkms_get_per_open_data(int fd) +{ + struct file *filp = fget(fd); + struct nvkms_per_open *popen = NULL; + dev_t rdev = 0; + void *data = NULL; + + if (filp == NULL) { + return NULL; + } + + if (filp->f_inode == NULL) { + goto done; + } + rdev = filp->f_inode->i_rdev; + + if ((MAJOR(rdev) != NVKMS_MAJOR_DEVICE_NUMBER) || + (MINOR(rdev) != NVKMS_MINOR_DEVICE_NUMBER)) { + goto done; + } + + popen = filp->private_data; + if (popen == NULL) { + goto done; + } + + data = popen->data; + +done: + /* + * fget() incremented the struct file's reference count, which + * needs to be balanced with a call to fput(). It is safe to + * decrement the reference count before returning + * filp->private_data because core NVKMS is currently holding the + * nvkms_lock, which prevents the nvkms_close() => nvKmsClose() + * call chain from freeing the file out from under the caller of + * nvkms_get_per_open_data(). + */ + fput(filp); + + return data; +} + +NvBool nvkms_fd_is_nvidia_chardev(int fd) +{ + struct file *filp = fget(fd); + dev_t rdev = 0; + NvBool ret = NV_FALSE; + + if (filp == NULL) { + return ret; + } + + if (filp->f_inode == NULL) { + goto done; + } + rdev = filp->f_inode->i_rdev; + + if (MAJOR(rdev) == NVKMS_MAJOR_DEVICE_NUMBER) { + ret = NV_TRUE; + } + +done: + fput(filp); + + return ret; +} + +NvBool nvkms_open_gpu(NvU32 gpuId) +{ + nvidia_modeset_stack_ptr stack = NULL; + NvBool ret; + + if (__rm_ops.alloc_stack(&stack) != 0) { + return NV_FALSE; + } + + ret = __rm_ops.open_gpu(gpuId, stack) == 0; + + __rm_ops.free_stack(stack); + + return ret; +} + +void nvkms_close_gpu(NvU32 gpuId) +{ + nvidia_modeset_stack_ptr stack = NULL; + + if (__rm_ops.alloc_stack(&stack) != 0) { + return; + } + + __rm_ops.close_gpu(gpuId, stack); + + __rm_ops.free_stack(stack); +} + +NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info) +{ + return __rm_ops.enumerate_gpus(gpu_info); +} + +NvBool nvkms_allow_write_combining(void) +{ + return __rm_ops.system_info.allow_write_combining; +} + +/************************************************************************* + * Implementation of sysfs interface to control backlight + *************************************************************************/ + +struct nvkms_backlight_device { + NvU32 gpu_id; + NvU32 display_id; + + void *drv_priv; + + struct backlight_device * dev; +}; + +static int nvkms_update_backlight_status(struct backlight_device *bd) +{ + struct nvkms_backlight_device *nvkms_bd = bl_get_data(bd); + NvBool status; + int ret; + + ret = down_interruptible(&nvkms_lock); + + if (ret != 0) { + return ret; + } + + status = nvKmsSetBacklight(nvkms_bd->display_id, nvkms_bd->drv_priv, + bd->props.brightness); + + up(&nvkms_lock); + + return status ? 0 : -EINVAL; +} + +static int nvkms_get_backlight_brightness(struct backlight_device *bd) +{ + struct nvkms_backlight_device *nvkms_bd = bl_get_data(bd); + NvU32 brightness = 0; + NvBool status; + int ret; + + ret = down_interruptible(&nvkms_lock); + + if (ret != 0) { + return ret; + } + + status = nvKmsGetBacklight(nvkms_bd->display_id, nvkms_bd->drv_priv, + &brightness); + + up(&nvkms_lock); + + return status ? brightness : -1; +} + +static const struct backlight_ops nvkms_backlight_ops = { + .update_status = nvkms_update_backlight_status, + .get_brightness = nvkms_get_backlight_brightness, +}; + +struct nvkms_backlight_device* +nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv, + NvU32 current_brightness) +{ + char name[18]; + struct backlight_properties props = { + .brightness = current_brightness, + .max_brightness = 100, + .type = BACKLIGHT_RAW, + }; + nv_gpu_info_t *gpu_info = NULL; + NvU32 gpu_count = 0; + struct nvkms_backlight_device *nvkms_bd = NULL; + int i; + + gpu_info = nvkms_alloc(NV_MAX_GPUS * sizeof(*gpu_info), NV_TRUE); + if (gpu_info == NULL) { + return NULL; + } + + gpu_count = __rm_ops.enumerate_gpus(gpu_info); + if (gpu_count == 0) { + goto done; + } + + for (i = 0; i < gpu_count; i++) { + if (gpu_info[i].gpu_id == gpu_id) { + break; + } + } + + if (i == gpu_count) { + goto done; + } + + nvkms_bd = nvkms_alloc(sizeof(*nvkms_bd), NV_TRUE); + if (nvkms_bd == NULL) { + goto done; + } + + snprintf(name, sizeof(name), "nvidia_%d", i); + name[sizeof(name) - 1] = '\0'; + + nvkms_bd->gpu_id = gpu_id; + nvkms_bd->display_id = display_id; + nvkms_bd->drv_priv = drv_priv; + + nvkms_bd->dev = + backlight_device_register(name, + gpu_info[i].os_device_ptr, + nvkms_bd, + &nvkms_backlight_ops, + &props); + +done: + nvkms_free(gpu_info, NV_MAX_GPUS * sizeof(*gpu_info)); + + return nvkms_bd; +} + +void nvkms_unregister_backlight(struct nvkms_backlight_device *nvkms_bd) +{ + if (nvkms_bd->dev) { + backlight_device_unregister(nvkms_bd->dev); + } + + nvkms_free(nvkms_bd, sizeof(*nvkms_bd)); +} + +/************************************************************************* + * Common to both user-space and kapi NVKMS interfaces + *************************************************************************/ + +static void nvkms_kapi_event_kthread_q_callback(void *arg) +{ + struct NvKmsKapiDevice *device = arg; + + nvKmsKapiHandleEventQueueChange(device); +} + +struct nvkms_per_open *nvkms_open_common(enum NvKmsClientType type, + struct NvKmsKapiDevice *device, + int *status) +{ + struct nvkms_per_open *popen = NULL; + + popen = nvkms_alloc(sizeof(*popen), NV_TRUE); + + if (popen == NULL) { + *status = -ENOMEM; + goto failed; + } + + popen->type = type; + + *status = down_interruptible(&nvkms_lock); + + if (*status != 0) { + goto failed; + } + + popen->data = nvKmsOpen(current->tgid, type, popen); + + up(&nvkms_lock); + + if (popen->data == NULL) { + *status = -EPERM; + goto failed; + } + + switch (popen->type) { + case NVKMS_CLIENT_USER_SPACE: + init_waitqueue_head(&popen->u.user.events.wait_queue); + break; + case NVKMS_CLIENT_KERNEL_SPACE: + nv_kthread_q_item_init(&popen->u.kernel.events.nv_kthread_q_item, + nvkms_kapi_event_kthread_q_callback, + device); + break; + } + + *status = 0; + + return popen; + +failed: + + nvkms_free(popen, sizeof(*popen)); + + return NULL; +} + +void nvkms_close_common(struct nvkms_per_open *popen) +{ + /* + * Don't use down_interruptible(): we need to free resources + * during close, so we have no choice but to wait to take the + * mutex. + */ + + down(&nvkms_lock); + + nvKmsClose(popen->data); + + popen->data = NULL; + + up(&nvkms_lock); + + if (popen->type == NVKMS_CLIENT_KERNEL_SPACE) { + /* + * Flush any outstanding nvkms_kapi_event_kthread_q_callback() work + * items before freeing popen. + * + * Note that this must be done after the above nvKmsClose() call, to + * guarantee that no more nvkms_kapi_event_kthread_q_callback() work + * items get scheduled. + * + * Also, note that though popen->data is freed above, any subsequent + * nvkms_kapi_event_kthread_q_callback()'s for this popen should be + * safe: if any nvkms_kapi_event_kthread_q_callback()-initiated work + * attempts to call back into NVKMS, the popen->data==NULL check in + * nvkms_ioctl_common() should reject the request. + */ + + nv_kthread_q_flush(&nvkms_kthread_q); + } + + nvkms_free(popen, sizeof(*popen)); +} + +static void nvkms_close_deferred(void *data) +{ + struct nvkms_per_open *popen = data; + + nvkms_read_lock_pm_lock(); + + nvkms_close_common(popen); + + nvkms_read_unlock_pm_lock(); +} + +static void nvkms_close_popen(struct nvkms_per_open *popen) +{ + if (nvkms_read_trylock_pm_lock() == 0) { + nvkms_close_common(popen); + nvkms_read_unlock_pm_lock(); + } else { + nv_kthread_q_item_init(&popen->deferred_close_q_item, + nvkms_close_deferred, + popen); + nvkms_queue_work(&nvkms_deferred_close_kthread_q, + &popen->deferred_close_q_item); + } +} + +int nvkms_ioctl_common +( + struct nvkms_per_open *popen, + NvU32 cmd, NvU64 address, const size_t size +) +{ + int status; + NvBool ret; + + status = down_interruptible(&nvkms_lock); + if (status != 0) { + return status; + } + + if (popen->data != NULL) { + ret = nvKmsIoctl(popen->data, cmd, address, size); + } else { + ret = NV_FALSE; + } + + up(&nvkms_lock); + + return ret ? 0 : -EPERM; +} + +/************************************************************************* + * NVKMS interface for kernel space NVKMS clients like KAPI + *************************************************************************/ + +struct nvkms_per_open* nvkms_open_from_kapi +( + struct NvKmsKapiDevice *device +) +{ + int status = 0; + struct nvkms_per_open *ret; + + nvkms_read_lock_pm_lock(); + ret = nvkms_open_common(NVKMS_CLIENT_KERNEL_SPACE, device, &status); + nvkms_read_unlock_pm_lock(); + + return ret; +} + +void nvkms_close_from_kapi(struct nvkms_per_open *popen) +{ + nvkms_close_popen(popen); +} + +NvBool nvkms_ioctl_from_kapi +( + struct nvkms_per_open *popen, + NvU32 cmd, void *params_address, const size_t param_size +) +{ + NvBool ret; + + nvkms_read_lock_pm_lock(); + ret = nvkms_ioctl_common(popen, + cmd, + (NvU64)(NvUPtr)params_address, param_size) == 0; + nvkms_read_unlock_pm_lock(); + + return ret; +} + +/************************************************************************* + * APIs for locking. + *************************************************************************/ + +struct nvkms_sema_t { + struct semaphore os_sema; +}; + +nvkms_sema_handle_t* nvkms_sema_alloc(void) +{ + nvkms_sema_handle_t *sema = nvkms_alloc(sizeof(*sema), NV_TRUE); + + if (sema != NULL) { + sema_init(&sema->os_sema, 1); + } + + return sema; +} + +void nvkms_sema_free(nvkms_sema_handle_t *sema) +{ + nvkms_free(sema, sizeof(*sema)); +} + +void nvkms_sema_down(nvkms_sema_handle_t *sema) +{ + down(&sema->os_sema); +} + +void nvkms_sema_up(nvkms_sema_handle_t *sema) +{ + up(&sema->os_sema); +} + +/************************************************************************* + * Procfs files support code. + *************************************************************************/ + +#if defined(CONFIG_PROC_FS) + +#define NV_DEFINE_SINGLE_NVKMS_PROCFS_FILE(name) \ + NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, nvkms_pm_lock) + +#define NVKMS_PROCFS_FOLDER "driver/nvidia-modeset" + +struct proc_dir_entry *nvkms_proc_dir; + +static void nv_procfs_out_string(void *data, const char *str) +{ + struct seq_file *s = data; + + seq_puts(s, str); +} + +static int nv_procfs_read_nvkms_proc(struct seq_file *s, void *arg) +{ + char *buffer; + nvkms_procfs_proc_t *func; + +#define NVKMS_PROCFS_STRING_SIZE 8192 + + func = s->private; + if (func == NULL) { + return 0; + } + + buffer = nvkms_alloc(NVKMS_PROCFS_STRING_SIZE, NV_TRUE); + + if (buffer != NULL) { + int status = down_interruptible(&nvkms_lock); + + if (status != 0) { + nvkms_free(buffer, NVKMS_PROCFS_STRING_SIZE); + return status; + } + + func(s, buffer, NVKMS_PROCFS_STRING_SIZE, &nv_procfs_out_string); + + up(&nvkms_lock); + + nvkms_free(buffer, NVKMS_PROCFS_STRING_SIZE); + } + + return 0; +} + +NV_DEFINE_SINGLE_NVKMS_PROCFS_FILE(nvkms_proc); + +static NvBool +nvkms_add_proc_file(const nvkms_procfs_file_t *file) +{ + struct proc_dir_entry *new_proc_dir; + + if (nvkms_proc_dir == NULL) { + return NV_FALSE; + } + + new_proc_dir = proc_create_data(file->name, 0, nvkms_proc_dir, + &nv_procfs_nvkms_proc_fops, file->func); + return (new_proc_dir != NULL); +} + +#endif /* defined(CONFIG_PROC_FS) */ + +static void nvkms_proc_init(void) +{ +#if defined(CONFIG_PROC_FS) + const nvkms_procfs_file_t *file; + + nvkms_proc_dir = NULL; + nvKmsGetProcFiles(&file); + + if (file == NULL || file->name == NULL) { + return; + } + + nvkms_proc_dir = NV_CREATE_PROC_DIR(NVKMS_PROCFS_FOLDER, NULL); + if (nvkms_proc_dir == NULL) { + return; + } + + while (file->name != NULL) { + if (!nvkms_add_proc_file(file)) { + nvkms_log(NVKMS_LOG_LEVEL_WARN, NVKMS_LOG_PREFIX, + "Failed to create proc file"); + break; + } + file++; + } +#endif +} + +static void nvkms_proc_exit(void) +{ +#if defined(CONFIG_PROC_FS) + if (nvkms_proc_dir == NULL) { + return; + } + +#if defined(NV_PROC_REMOVE_PRESENT) + proc_remove(nvkms_proc_dir); +#else + /* + * On kernel versions without proc_remove(), we need to explicitly + * remove each proc file beneath nvkms_proc_dir. + * nvkms_proc_init() only creates files directly under + * nvkms_proc_dir, so those are the only files we need to remove + * here: warn if there is any deeper directory nesting. + */ + { + struct proc_dir_entry *entry = nvkms_proc_dir->subdir; + + while (entry != NULL) { + struct proc_dir_entry *next = entry->next; + WARN_ON(entry->subdir != NULL); + remove_proc_entry(entry->name, entry->parent); + entry = next; + } + } + + remove_proc_entry(nvkms_proc_dir->name, nvkms_proc_dir->parent); +#endif /* NV_PROC_REMOVE_PRESENT */ +#endif /* CONFIG_PROC_FS */ +} + +/************************************************************************* + * NVKMS KAPI functions + ************************************************************************/ + +NvBool nvKmsKapiGetFunctionsTable +( + struct NvKmsKapiFunctionsTable *funcsTable +) +{ + return nvKmsKapiGetFunctionsTableInternal(funcsTable); +} +EXPORT_SYMBOL(nvKmsKapiGetFunctionsTable); + +/************************************************************************* + * File operation callback functions. + *************************************************************************/ + +static int nvkms_open(struct inode *inode, struct file *filp) +{ + int status; + + status = nv_down_read_interruptible(&nvkms_pm_lock); + if (status != 0) { + return status; + } + + filp->private_data = + nvkms_open_common(NVKMS_CLIENT_USER_SPACE, NULL, &status); + + nvkms_read_unlock_pm_lock(); + + return status; +} + +static int nvkms_close(struct inode *inode, struct file *filp) +{ + struct nvkms_per_open *popen = filp->private_data; + + if (popen == NULL) { + return -EINVAL; + } + + nvkms_close_popen(popen); + return 0; +} + +static int nvkms_mmap(struct file *filp, struct vm_area_struct *vma) +{ + return -EPERM; +} + +static int nvkms_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + size_t size; + unsigned int nr; + int status; + struct NvKmsIoctlParams params; + struct nvkms_per_open *popen = filp->private_data; + + if ((popen == NULL) || (popen->data == NULL)) { + return -EINVAL; + } + + size = _IOC_SIZE(cmd); + nr = _IOC_NR(cmd); + + /* The only supported ioctl is NVKMS_IOCTL_CMD. */ + + if ((nr != NVKMS_IOCTL_CMD) || (size != sizeof(struct NvKmsIoctlParams))) { + return -ENOTTY; + } + + status = copy_from_user(¶ms, (void *) arg, size); + if (status != 0) { + return -EFAULT; + } + + status = nv_down_read_interruptible(&nvkms_pm_lock); + if (status != 0) { + return status; + } + + status = nvkms_ioctl_common(popen, + params.cmd, + params.address, + params.size); + + nvkms_read_unlock_pm_lock(); + + return status; +} + +static unsigned int nvkms_poll(struct file *filp, poll_table *wait) +{ + unsigned int mask = 0; + struct nvkms_per_open *popen = filp->private_data; + + if ((popen == NULL) || (popen->data == NULL)) { + return mask; + } + + BUG_ON(popen->type != NVKMS_CLIENT_USER_SPACE); + + if ((filp->f_flags & O_NONBLOCK) == 0) { + poll_wait(filp, &popen->u.user.events.wait_queue, wait); + } + + if (atomic_read(&popen->u.user.events.available)) { + mask = POLLPRI | POLLIN; + } + + return mask; +} + + +/************************************************************************* + * Module loading support code. + *************************************************************************/ + +static nvidia_module_t nvidia_modeset_module = { + .owner = THIS_MODULE, + .module_name = "nvidia-modeset", + .instance = 1, /* minor number: 255-1=254 */ + .open = nvkms_open, + .close = nvkms_close, + .mmap = nvkms_mmap, + .ioctl = nvkms_ioctl, + .poll = nvkms_poll, +}; + +static int __init nvkms_init(void) +{ + int ret; + + atomic_set(&nvkms_alloc_called_count, 0); + + ret = nvkms_alloc_rm(); + + if (ret != 0) { + return ret; + } + + sema_init(&nvkms_lock, 1); + init_rwsem(&nvkms_pm_lock); + + ret = nv_kthread_q_init(&nvkms_kthread_q, + "nvidia-modeset/kthread_q"); + if (ret != 0) { + goto fail_kthread; + } + + ret = nv_kthread_q_init(&nvkms_deferred_close_kthread_q, + "nvidia-modeset/deferred_close_kthread_q"); + if (ret != 0) { + goto fail_deferred_close_kthread; + } + + INIT_LIST_HEAD(&nvkms_timers.list); + spin_lock_init(&nvkms_timers.lock); + + ret = nvidia_register_module(&nvidia_modeset_module); + + if (ret != 0) { + goto fail_register_module; + } + + down(&nvkms_lock); + if (!nvKmsModuleLoad()) { + ret = -ENOMEM; + } + up(&nvkms_lock); + if (ret != 0) { + goto fail_module_load; + } + + nvkms_proc_init(); + + return 0; + +fail_module_load: + nvidia_unregister_module(&nvidia_modeset_module); +fail_register_module: + nv_kthread_q_stop(&nvkms_deferred_close_kthread_q); +fail_deferred_close_kthread: + nv_kthread_q_stop(&nvkms_kthread_q); +fail_kthread: + nvkms_free_rm(); + + return ret; +} + +static void __exit nvkms_exit(void) +{ + struct nvkms_timer_t *timer, *tmp_timer; + unsigned long flags = 0; + + nvkms_proc_exit(); + + down(&nvkms_lock); + nvKmsModuleUnload(); + up(&nvkms_lock); + + /* + * At this point, any pending tasks should be marked canceled, but + * we still need to drain them, so that nvkms_kthread_q_callback() doesn't + * get called after the module is unloaded. + */ +restart: + spin_lock_irqsave(&nvkms_timers.lock, flags); + + list_for_each_entry_safe(timer, tmp_timer, &nvkms_timers.list, timers_list) { + if (timer->kernel_timer_created) { + /* + * We delete pending timers and check whether it was being executed + * (returns 0) or we have deactivated it before execution (returns 1). + * If it began execution, the kthread_q callback will wait for timer + * completion, and we wait for queue completion with + * nv_kthread_q_stop below. + */ + if (del_timer_sync(&timer->kernel_timer) == 1) { + /* We've deactivated timer so we need to clean after it */ + list_del(&timer->timers_list); + + /* We need to unlock spinlock because we are freeing memory which + * may sleep */ + spin_unlock_irqrestore(&nvkms_timers.lock, flags); + + if (timer->isRefPtr) { + nvkms_dec_ref(timer->dataPtr); + kfree(timer); + } else { + nvkms_free(timer, sizeof(*timer)); + } + + /* List could change when we were freeing memory. */ + goto restart; + } + } + } + + spin_unlock_irqrestore(&nvkms_timers.lock, flags); + + nv_kthread_q_stop(&nvkms_deferred_close_kthread_q); + nv_kthread_q_stop(&nvkms_kthread_q); + + nvidia_unregister_module(&nvidia_modeset_module); + nvkms_free_rm(); + + if (malloc_verbose) { + printk(KERN_INFO NVKMS_LOG_PREFIX "Total allocations: %d\n", + atomic_read(&nvkms_alloc_called_count)); + } +} + +module_init(nvkms_init); +module_exit(nvkms_exit); + +#if defined(MODULE_LICENSE) + + MODULE_LICENSE("Dual MIT/GPL"); + + + +#endif +#if defined(MODULE_INFO) + MODULE_INFO(supported, "external"); +#endif +#if defined(MODULE_VERSION) + MODULE_VERSION(NV_VERSION_STRING); +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h new file mode 100644 index 0000000..91a9a85 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h @@ -0,0 +1,330 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Define the entry points which the NVKMS kernel interface layer + * provides to core NVKMS. + */ + +#if !defined(_NVIDIA_MODESET_OS_INTERFACE_H_) +#define _NVIDIA_MODESET_OS_INTERFACE_H_ + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include /* size_t */ +#else +#include /* size_t */ +#endif +#include "nvtypes.h" /* NvU8 */ + +#include "nvkms.h" +#include "nv_stdarg.h" + +enum NvKmsSyncPtOp { + NVKMS_SYNCPT_OP_ALLOC, + NVKMS_SYNCPT_OP_GET, + NVKMS_SYNCPT_OP_PUT, + NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH, + NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD, + NVKMS_SYNCPT_OP_READ_MINVAL, +}; + +typedef struct { + + struct { + const char *syncpt_name; /* in */ + NvU32 id; /* out */ + } alloc; + + struct { + NvU32 id; /* in */ + } put; + + struct { + NvS32 fd; /* in */ + NvU32 id; /* out */ + NvU32 thresh; /* out */ + } fd_to_id_and_thresh; + + struct { + NvU32 id; /* in */ + NvU32 thresh; /* in */ + NvS32 fd; /* out */ + } id_and_thresh_to_fd; + + struct { + NvU32 id; /* in */ + NvU32 minval; /* out */ + } read_minval; +} NvKmsSyncPtOpParams; + + +void nvkms_call_rm (void *ops); +void* nvkms_alloc (size_t size, + NvBool zero); +void nvkms_free (void *ptr, + size_t size); +void* nvkms_memset (void *ptr, + NvU8 c, + size_t size); +void* nvkms_memcpy (void *dest, + const void *src, + size_t n); +void* nvkms_memmove (void *dest, + const void *src, + size_t n); +int nvkms_memcmp (const void *s1, + const void *s2, + size_t n); +size_t nvkms_strlen (const char *s); +int nvkms_strcmp (const char *s1, + const char *s2); +char* nvkms_strncpy (char *dest, + const char *src, + size_t n); +void nvkms_usleep (NvU64 usec); +NvU64 nvkms_get_usec (void); +int nvkms_copyin (void *kptr, + NvU64 uaddr, + size_t n); +int nvkms_copyout (NvU64 uaddr, + const void *kptr, + size_t n); +void nvkms_yield (void); +void nvkms_dump_stack (void); +NvBool nvkms_syncpt_op (enum NvKmsSyncPtOp op, + NvKmsSyncPtOpParams *params); +int nvkms_snprintf (char *str, + size_t size, + const char *format, ...) + __attribute__((format (printf, 3, 4))); + +int nvkms_vsnprintf (char *str, + size_t size, + const char *format, + va_list ap); + +#define NVKMS_LOG_LEVEL_INFO 0 +#define NVKMS_LOG_LEVEL_WARN 1 +#define NVKMS_LOG_LEVEL_ERROR 2 + +void nvkms_log (const int level, + const char *gpuPrefix, + const char *msg); + +/*! + * Refcounted pointer to an object that may be freed while references still + * exist. + * + * This structure is intended to be used for nvkms timers to refer to objects + * that may be freed while timers with references to the object are still + * pending. + * + * When the owner of an nvkms_ref_ptr is freed, the teardown code should call + * nvkms_free_ref_ptr(). That marks the pointer as invalid so that later calls + * to nvkms_dec_ref() (i.e. from a workqueue callback) return NULL rather than + * the pointer originally passed to nvkms_alloc_ref_ptr(). + */ +struct nvkms_ref_ptr; + +/*! + * Allocate and initialize a ref_ptr. + * + * The pointer stored in the ref_ptr is initialized to ptr, and its refcount is + * initialized to 1. + */ +struct nvkms_ref_ptr* nvkms_alloc_ref_ptr(void *ptr); + +/*! + * Clear a ref_ptr. + * + * This function sets the pointer stored in the ref_ptr to NULL and drops the + * reference created by nvkms_alloc_ref_ptr(). This function should be called + * when the object pointed to by the ref_ptr is freed. + * + * A caller should make sure that no code that can call nvkms_inc_ref() can + * execute after nvkms_free_ref_ptr() is called. + */ +void nvkms_free_ref_ptr(struct nvkms_ref_ptr *ref_ptr); + +/*! + * Increment the refcount of a ref_ptr. + * + * This function should be used when a pointer to the ref_ptr is stored + * somewhere. For example, when the ref_ptr is used as the argument to + * nvkms_alloc_timer. + * + * This may be called outside of the nvkms_lock, for example by an RM callback. + */ +void nvkms_inc_ref(struct nvkms_ref_ptr *ref_ptr); + +/*! + * Decrement the refcount of a ref_ptr and extract the embedded pointer. + * + * This should be used by code that needs to atomically determine whether the + * object pointed to by the ref_ptr still exists. To prevent the object from + * being destroyed while the current thread is executing, this should be called + * from inside the nvkms_lock. + */ +void* nvkms_dec_ref(struct nvkms_ref_ptr *ref_ptr); + +typedef void nvkms_timer_proc_t(void *dataPtr, NvU32 dataU32); +typedef struct nvkms_timer_t nvkms_timer_handle_t; + +/*! + * Schedule a callback function to be called in the future. + * + * The callback function 'proc' will be called with the arguments + * 'dataPtr' and 'dataU32' at 'usec' (or later) microseconds from now. + * If usec==0, the callback will be scheduled to be called as soon as + * possible. + * + * The callback function is guaranteed to be called back with the + * nvkms_lock held, and in process context. + * + * Returns an opaque handle, nvkms_timer_handle_t*, or NULL on + * failure. If non-NULL, the caller is responsible for caching the + * handle and eventually calling nvkms_free_timer() to free the + * memory. + * + * The nvkms_lock may be held when nvkms_alloc_timer() is called, but + * the nvkms_lock is not required. + */ +nvkms_timer_handle_t* nvkms_alloc_timer (nvkms_timer_proc_t *proc, + void *dataPtr, NvU32 dataU32, + NvU64 usec); + +/*! + * Schedule a callback function to be called in the future. + * + * This function is like nvkms_alloc_timer() except that instead of returning a + * pointer to a structure that the caller should free later, the timer will free + * itself after executing the callback function. This is only intended for + * cases where the caller cannot cache the nvkms_alloc_timer() return value. + */ +NvBool +nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc, + struct nvkms_ref_ptr *ref_ptr, + NvU32 dataU32, NvU64 usec); + +/*! + * Free the nvkms_timer_t object. If the callback function has not + * yet been called, freeing the nvkms_timer_handle_t will guarantee + * that it is not called. + * + * The nvkms_lock must be held when calling nvkms_free_timer(). + */ +void nvkms_free_timer (nvkms_timer_handle_t *handle); + + + +/*! + * Notify the NVKMS kernel interface that the event queue has changed. + * + * \param[in] pOpenKernel This indicates the file descriptor + * ("per-open") of the client whose event queue + * has been updated. This is the pointer + * passed by the kernel interface to nvKmsOpen(). + * \param[in] eventsAvailable If TRUE, a new event has been added to the + * event queue. If FALSE, the last event has + * been removed from the event queue. + */ +void +nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel, + NvBool eventsAvailable); + + +/*! + * Get the "per-open" data (the pointer returned by nvKmsOpen()) + * associated with this fd. + */ +void* nvkms_get_per_open_data(int fd); + + +/*! + * Raise and lower the reference count of the specified GPU. + */ +NvBool nvkms_open_gpu(NvU32 gpuId); +void nvkms_close_gpu(NvU32 gpuId); + + +/*! + * Enumerate nvidia gpus. + */ + +NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info); + +/*! + * Availability of write combining support for video memory. + */ + +NvBool nvkms_allow_write_combining(void); + +/*! + * Checks whether the fd is associated with an nvidia character device. + */ +NvBool nvkms_fd_is_nvidia_chardev(int fd); + +/*! + * NVKMS interface for kernel space NVKMS clients like KAPI + */ + +struct nvkms_per_open; + +struct nvkms_per_open* nvkms_open_from_kapi +( + struct NvKmsKapiDevice *device +); + +void nvkms_close_from_kapi(struct nvkms_per_open *popen); + +NvBool nvkms_ioctl_from_kapi +( + struct nvkms_per_open *popen, + NvU32 cmd, void *params_address, const size_t params_size +); + +/*! + * APIs for locking. + */ + +typedef struct nvkms_sema_t nvkms_sema_handle_t; + +nvkms_sema_handle_t* + nvkms_sema_alloc (void); +void nvkms_sema_free (nvkms_sema_handle_t *sema); +void nvkms_sema_down (nvkms_sema_handle_t *sema); +void nvkms_sema_up (nvkms_sema_handle_t *sema); + +/*! + * APIs to register/unregister backlight device. + */ +struct nvkms_backlight_device; + +struct nvkms_backlight_device* +nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv, + NvU32 current_brightness); + +void nvkms_unregister_backlight(struct nvkms_backlight_device *nvkms_bd); + +#endif /* _NVIDIA_MODESET_OS_INTERFACE_H_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild new file mode 100644 index 0000000..0475f26 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild @@ -0,0 +1,99 @@ +########################################################################### +# Kbuild fragment for nvidia-modeset.ko +########################################################################### + +# +# Define NVIDIA_MODESET_{SOURCES,OBJECTS} +# + +NVIDIA_MODESET_SOURCES = nvidia-modeset/nvidia-modeset-linux.c +NVIDIA_MODESET_SOURCES += nvidia-modeset/nv-kthread-q.c + +NVIDIA_MODESET_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_MODESET_SOURCES)) + +obj-m += nvidia-modeset.o +nvidia-modeset-y := $(NVIDIA_MODESET_OBJECTS) + +NVIDIA_MODESET_KO = nvidia-modeset/nvidia-modeset.ko + +NV_KERNEL_MODULE_TARGETS += $(NVIDIA_MODESET_KO) + + +# +# nv-modeset-kernel.o_binary is the core binary component of nvidia-modeset.ko, +# shared across all UNIX platforms. Create a symlink, "nv-modeset-kernel.o" +# that points to nv-modeset-kernel.o_binary, and add nv-modeset-kernel.o to the +# list of objects to link into nvidia-modeset.ko. +# +# Note that: +# - The kbuild "clean" rule will delete all objects in nvidia-modeset-y (which +# is why we use a symlink instead of just adding nv-modeset-kernel.o_binary +# to nvidia-modeset-y). +# - kbuild normally uses the naming convention of ".o_shipped" for +# binary files. That is not used here, because the kbuild rule to +# create the "normal" object file from ".o_shipped" does a copy, not +# a symlink. This file is quite large, so a symlink is preferred. +# - The file added to nvidia-modeset-y should be relative to gmake's cwd. +# But, the target for the symlink rule should be prepended with $(obj). +# + +NVIDIA_MODESET_BINARY_OBJECT := $(src)/nvidia-modeset/nv-modeset-kernel.o_binary +NVIDIA_MODESET_BINARY_OBJECT_O := nvidia-modeset/nv-modeset-kernel.o + +quiet_cmd_symlink = SYMLINK $@ +cmd_symlink = ln -sf $< $@ + +targets += $(NVIDIA_MODESET_BINARY_OBJECT_O) + +$(obj)/$(NVIDIA_MODESET_BINARY_OBJECT_O): $(NVIDIA_MODESET_BINARY_OBJECT) FORCE + $(call if_changed,symlink) + +nvidia-modeset-y += $(NVIDIA_MODESET_BINARY_OBJECT_O) + + +# +# Define nvidia-modeset.ko-specific CFLAGS. +# + +NVIDIA_MODESET_CFLAGS += -I$(src)/nvidia-modeset +NVIDIA_MODESET_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0 + +$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_MODESET_OBJECTS), $(NVIDIA_MODESET_CFLAGS)) + + +# +# Build nv-modeset-interface.o from the kernel interface layer +# objects, suitable for further processing by the installer and +# inclusion as a precompiled kernel interface file. +# + +NVIDIA_MODESET_INTERFACE := nvidia-modeset/nv-modeset-interface.o + +# Linux kernel v5.12 and later looks at "always-y", Linux kernel versions +# before v5.6 looks at "always"; kernel versions between v5.12 and v5.6 +# look at both. + +always += $(NVIDIA_MODESET_INTERFACE) +always-y += $(NVIDIA_MODESET_INTERFACE) + +$(obj)/$(NVIDIA_MODESET_INTERFACE): $(addprefix $(obj)/,$(NVIDIA_MODESET_OBJECTS)) + $(LD) -r -o $@ $^ + +# +# Register the conftests needed by nvidia-modeset.ko +# + +NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_MODESET_OBJECTS) + +NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations +NV_CONFTEST_TYPE_COMPILE_TESTS += node_states_n_memory +NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64 +NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data +NV_CONFTEST_FUNCTION_COMPILE_TESTS += proc_remove +NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup +NV_CONFTEST_FUNCTION_COMPILE_TESTS += kthread_create_on_node +NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64 +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_kthread_create_on_node diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms-ioctl.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms-ioctl.h new file mode 100644 index 0000000..cb27573 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms-ioctl.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_IOCTL_H) +#define NVKMS_IOCTL_H + +#include "nvtypes.h" + +/*! + * Some of the NVKMS ioctl parameter data structures are quite large + * and would exceed the parameter size constraints on at least SunOS. + * + * Redirect ioctls through a level of indirection: user-space assigns + * NvKmsIoctlParams with the real command, size, and pointer, and + * passes the NvKmsIoctlParams through the ioctl. + */ + +struct NvKmsIoctlParams { + NvU32 cmd; + NvU32 size; + NvU64 address NV_ALIGN_BYTES(8); +}; + +#define NVKMS_IOCTL_MAGIC 'm' +#define NVKMS_IOCTL_CMD 0 + +#define NVKMS_IOCTL_IOWR \ + _IOWR(NVKMS_IOCTL_MAGIC, NVKMS_IOCTL_CMD, struct NvKmsIoctlParams) + +/*! + * User-space pointers are always passed to NVKMS in an NvU64. + * This user-space address is eventually passed into the platform's + * copyin/copyout functions, in a void* argument. + * + * This utility function converts from an NvU64 to a pointer. + */ + +static inline void *nvKmsNvU64ToPointer(NvU64 value) +{ + return (void *)(NvUPtr)value; +} + +/*! + * Before casting the NvU64 to a void*, check that casting to a pointer + * size within the kernel does not lose any precision in the current + * environment. + */ +static inline NvBool nvKmsNvU64AddressIsSafe(NvU64 address) +{ + return address == (NvU64)(NvUPtr)address; +} + +#endif /* NVKMS_IOCTL_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms.h new file mode 100644 index 0000000..1276186 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KMS_H__ +#define __NV_KMS_H__ + +#include "nvtypes.h" +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include /* size_t */ +#else +#include /* size_t */ +#endif + +#include "nvkms-kapi.h" + +typedef struct nvkms_per_open nvkms_per_open_handle_t; + +typedef void nvkms_procfs_out_string_func_t(void *data, + const char *str); + +typedef void nvkms_procfs_proc_t(void *data, + char *buffer, size_t size, + nvkms_procfs_out_string_func_t *outString); + +typedef struct { + const char *name; + nvkms_procfs_proc_t *func; +} nvkms_procfs_file_t; + +enum NvKmsClientType { + NVKMS_CLIENT_USER_SPACE, + NVKMS_CLIENT_KERNEL_SPACE, +}; + +NvBool nvKmsIoctl( + void *pOpenVoid, + NvU32 cmd, + NvU64 paramsAddress, + const size_t paramSize); + +void nvKmsClose(void *pOpenVoid); + +void* nvKmsOpen( + NvU32 pid, + enum NvKmsClientType clientType, + nvkms_per_open_handle_t *pOpenKernel); + +NvBool nvKmsModuleLoad(void); + +void nvKmsModuleUnload(void); + +void nvKmsSuspend(NvU32 gpuId); +void nvKmsResume(NvU32 gpuId); + +void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles); + +void nvKmsKapiHandleEventQueueChange +( + struct NvKmsKapiDevice *device +); + +NvBool nvKmsKapiGetFunctionsTableInternal +( + struct NvKmsKapiFunctionsTable *funcsTable +); + +NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness); +NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness); + +#endif /* __NV_KMS_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-acpi.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-acpi.c new file mode 100644 index 0000000..96e31fa --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-acpi.c @@ -0,0 +1,1411 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-reg.h" + +#include + +#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED) +static NV_STATUS nv_acpi_extract_integer (const union acpi_object *, void *, NvU32, NvU32 *); +static NV_STATUS nv_acpi_extract_buffer (const union acpi_object *, void *, NvU32, NvU32 *); +static NV_STATUS nv_acpi_extract_package (const union acpi_object *, void *, NvU32, NvU32 *); +static NV_STATUS nv_acpi_extract_object (const union acpi_object *, void *, NvU32, NvU32 *); + +static void nv_acpi_powersource_hotplug_event(acpi_handle, u32, void *); +static acpi_status nv_acpi_find_methods (acpi_handle, u32, void *, void **); +static NV_STATUS nv_acpi_nvif_method (NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); + +static NV_STATUS nv_acpi_wmmx_method (NvU32, NvU8 *, NvU16 *); + +static acpi_handle nvif_handle = NULL; +static acpi_handle wmmx_handle = NULL; + +// Used for AC Power Source Hotplug Handling +static acpi_handle psr_handle = NULL; +static acpi_handle psr_device_handle = NULL; +static nv_acpi_t *psr_nv_acpi_object = NULL; + +static NvBool battery_present = NV_FALSE; + +#define BIX_BATTERY_TECHNOLOGY_OFFSET 0x4 +#define BIF_BATTERY_TECHNOLOGY_OFFSET 0x3 +#define BATTERY_RECHARGABLE 0x1 + +/* Moved into acpi/video.h in Linux 4.10 */ +#ifndef ACPI_VIDEO_NOTIFY_PROBE +#define ACPI_VIDEO_NOTIFY_PROBE 0x81 +#endif + +/* Added to acpi/video.h in Linux 3.1 */ +#ifndef ACPI_VIDEO_CLASS +#define ACPI_VIDEO_CLASS "video" +#endif + +static int nv_acpi_get_device_handle(nv_state_t *nv, acpi_handle *dev_handle) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + +#if defined(DEVICE_ACPI_HANDLE) + *dev_handle = DEVICE_ACPI_HANDLE(nvl->dev); + return NV_TRUE; +#elif defined (ACPI_HANDLE) + *dev_handle = ACPI_HANDLE(nvl->dev); + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +static int nv_acpi_notify(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct acpi_bus_event *info = data; + nv_stack_t *sp = NULL; + nv_linux_state_t *nvl = container_of(nb, nv_linux_state_t, acpi_nb); + nv_state_t *nv = NV_STATE_PTR(nvl); + + if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) { + if (nv_kmem_cache_alloc_stack(&sp) == 0) { + /* + * Function to handle device specific ACPI events + * such as display hotplug and D-notifier events. + */ + rm_acpi_notify(sp, nv, info->type); + nv_kmem_cache_free_stack(sp); + } + else + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_notify: failed to allocate stack\n"); + + /* + * Special case for ACPI_VIDEO_NOTIFY_PROBE event: intentionally return + * NOTIFY_BAD to inform acpi-video to stop generating keypresses for + * this event. + */ + if (info->type == ACPI_VIDEO_NOTIFY_PROBE) { + return NOTIFY_BAD; + } + } + + return NOTIFY_DONE; +} + +void nv_acpi_register_notifier(nv_linux_state_t *nvl) +{ + nvl->acpi_nb.notifier_call = nv_acpi_notify; + register_acpi_notifier(&nvl->acpi_nb); +} + +void nv_acpi_unregister_notifier(nv_linux_state_t *nvl) +{ + unregister_acpi_notifier(&nvl->acpi_nb); +} + +NV_STATUS NV_API_CALL nv_acpi_get_powersource(NvU32 *ac_plugged) +{ + unsigned long long val; + int status = 0; + + if (!ac_plugged) + return NV_ERR_INVALID_ARGUMENT; + + if (!psr_device_handle) + return NV_ERR_INVALID_ARGUMENT; + + // Check whether or not AC power is plugged in + status = acpi_evaluate_integer(psr_device_handle, "_PSR", NULL, &val); + if (ACPI_FAILURE(status)) + return NV_ERR_GENERIC; + + // AC Power Source Plug State + // - 0x0 unplugged + // - 0x1 plugged + *ac_plugged = (val == 0x1); + + return NV_OK; +} + +#define ACPI_POWER_SOURCE_CHANGE_EVENT 0x80 +static void nv_acpi_powersource_hotplug_event(acpi_handle handle, u32 event_type, void *data) +{ + /* + * This function will handle acpi events from the linux kernel, used + * to detect notifications from Power Source device + */ + nv_acpi_t *pNvAcpiObject = data; + u32 ac_plugged = 0; + + if (event_type == ACPI_POWER_SOURCE_CHANGE_EVENT) + { + if (nv_acpi_get_powersource(&ac_plugged) != NV_OK) + return; + + rm_system_event(pNvAcpiObject->sp, NV_SYSTEM_ACPI_BATTERY_POWER_EVENT, !ac_plugged); + } +} +/* + * End of ACPI event handler functions + */ + +/* Do the necessary allocations and install notifier "handler" on the device-node "device" */ +static nv_acpi_t* nv_install_notifier(struct acpi_handle *handle, acpi_notify_handler handler) +{ + nvidia_stack_t *sp = NULL; + nv_acpi_t *pNvAcpiObject = NULL; + NV_STATUS rmStatus = NV_ERR_GENERIC; + acpi_status status = -1; + + if (!handle) + return NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NULL; + } + + rmStatus = os_alloc_mem((void **) &pNvAcpiObject, sizeof(nv_acpi_t)); + if (rmStatus != NV_OK) + goto return_error; + + os_mem_set((void *)pNvAcpiObject, 0, sizeof(nv_acpi_t)); + + // store a handle reference in our object + pNvAcpiObject->handle = handle; + pNvAcpiObject->sp = sp; + + status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY, + handler, pNvAcpiObject); + if (!ACPI_FAILURE(status)) + { + pNvAcpiObject->notify_handler_installed = 1; + + return pNvAcpiObject; + } + +return_error: + nv_kmem_cache_free_stack(sp); + if (pNvAcpiObject) + os_free_mem((void *)pNvAcpiObject); + + return NULL; +} + +/* Tear-down and remove whatever nv_install_notifier did */ +static void nv_uninstall_notifier(nv_acpi_t *pNvAcpiObject, acpi_notify_handler handler) +{ + acpi_status status; + + if (pNvAcpiObject && pNvAcpiObject->notify_handler_installed) + { + status = acpi_remove_notify_handler(pNvAcpiObject->handle, ACPI_DEVICE_NOTIFY, handler); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_methods_uninit: failed to remove event notification handler (%d)!\n", status); + } + else + { + nv_kmem_cache_free_stack(pNvAcpiObject->sp); + os_free_mem((void *)pNvAcpiObject); + } + } + + return; +} + +/* + * acpi methods init function. + * check if the NVIF, _DSM and WMMX methods are present in the acpi namespace. + * store NVIF, _DSM and WMMX handle if found. + */ + +void NV_API_CALL nv_acpi_methods_init(NvU32 *handlesPresent) +{ + if (!handlesPresent) // Caller passed us invalid pointer. + return; + + *handlesPresent = 0; + + NV_ACPI_WALK_NAMESPACE(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, nv_acpi_find_methods, NULL, NULL); + + if (nvif_handle) + { + *handlesPresent = NV_ACPI_NVIF_HANDLE_PRESENT; + } + + if (wmmx_handle) + *handlesPresent = *handlesPresent | NV_ACPI_WMMX_HANDLE_PRESENT; + + if (psr_handle) + { + // Since _PSR is not a per-GPU construct we only need to register a + // single notifier for the _PSR event. Skip registration for subsequent + // devices + if (psr_nv_acpi_object == NULL) + { + psr_nv_acpi_object = nv_install_notifier(psr_device_handle, nv_acpi_powersource_hotplug_event); + } + } + + return; +} + +acpi_status nv_acpi_find_methods( + acpi_handle handle, + u32 nest_level, + void *dummy1, + void **dummy2 +) +{ + acpi_handle method_handle; + + if (!acpi_get_handle(handle, "NVIF", &method_handle)) + { + nvif_handle = method_handle; + } + + if (!acpi_get_handle(handle, "WMMX", &method_handle)) + { + wmmx_handle = method_handle; + } + + if (!acpi_get_handle(handle, "_PSR", &method_handle)) + { + psr_handle = method_handle; + psr_device_handle = handle; + } + + return 0; +} + +void NV_API_CALL nv_acpi_methods_uninit(void) +{ + nvif_handle = NULL; + wmmx_handle = NULL; + + if (psr_nv_acpi_object != NULL) + { + nv_uninstall_notifier(psr_nv_acpi_object, nv_acpi_powersource_hotplug_event); + + psr_handle = NULL; + psr_device_handle = NULL; + psr_nv_acpi_object = NULL; + } +} + +static NV_STATUS nv_acpi_extract_integer( + const union acpi_object *acpi_object, + void *buffer, + NvU32 buffer_size, + NvU32 *data_size +) +{ + if (acpi_object->type != ACPI_TYPE_INTEGER) + return NV_ERR_INVALID_ARGUMENT; + + if (acpi_object->integer.value & ~0xffffffffULL) + *data_size = sizeof(acpi_object->integer.value); + else + *data_size = sizeof(NvU32); + + if ((buffer_size < sizeof(NvU32)) || + ((buffer_size < sizeof(acpi_object->integer.value)) && + (acpi_object->integer.value & ~0xffffffffULL))) + { + return NV_ERR_BUFFER_TOO_SMALL; + } + + memcpy(buffer, &acpi_object->integer.value, *data_size); + + return NV_OK; +} + +static NV_STATUS nv_acpi_extract_buffer( + const union acpi_object *acpi_object, + void *buffer, + NvU32 buffer_size, + NvU32 *data_size +) +{ + if (acpi_object->type != ACPI_TYPE_BUFFER) + return NV_ERR_INVALID_ARGUMENT; + + *data_size = acpi_object->buffer.length; + + if (buffer_size < acpi_object->buffer.length) + return NV_ERR_BUFFER_TOO_SMALL; + + memcpy(buffer, acpi_object->buffer.pointer, *data_size); + + return NV_OK; +} + +static NV_STATUS nv_acpi_extract_package( + const union acpi_object *acpi_object, + void *buffer, + NvU32 buffer_size, + NvU32 *data_size +) +{ + NV_STATUS status = NV_OK; + NvU32 i, element_size = 0; + + if (acpi_object->type != ACPI_TYPE_PACKAGE) + return NV_ERR_INVALID_ARGUMENT; + + *data_size = 0; + for (i = 0; i < acpi_object->package.count; i++) + { + buffer = ((char *)buffer + element_size); + buffer_size -= element_size; + + status = nv_acpi_extract_object(&acpi_object->package.elements[i], + buffer, buffer_size, &element_size); + if (status != NV_OK) + break; + + *data_size += element_size; + } + + return status; +} + +static NV_STATUS nv_acpi_extract_object( + const union acpi_object *acpi_object, + void *buffer, + NvU32 buffer_size, + NvU32 *data_size +) +{ + NV_STATUS status; + + switch (acpi_object->type) + { + case ACPI_TYPE_INTEGER: + status = nv_acpi_extract_integer(acpi_object, buffer, + buffer_size, data_size); + break; + + case ACPI_TYPE_BUFFER: + status = nv_acpi_extract_buffer(acpi_object, buffer, + buffer_size, data_size); + break; + + case ACPI_TYPE_PACKAGE: + status = nv_acpi_extract_package(acpi_object, buffer, + buffer_size, data_size); + break; + + case ACPI_TYPE_ANY: + /* + * ACPI_TYPE_ANY is used to represent a NULL/Uninitialized object which is objectType 0 + * in the ACPI SPEC. This should not be treated as error. + */ + status = NV_OK; + break; + + default: + status = NV_ERR_NOT_SUPPORTED; + } + + return status; +} + +NV_STATUS NV_API_CALL nv_acpi_method( + NvU32 acpi_method, + NvU32 function, + NvU32 subFunction, + void *inParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *outData, + NvU16 *outDataSize +) +{ + NV_STATUS status; + + switch (acpi_method) + { + case NV_EVAL_ACPI_METHOD_NVIF: + status = nv_acpi_nvif_method(function, + subFunction, + inParams, + inParamSize, + outStatus, + outData, + outDataSize); + break; + + case NV_EVAL_ACPI_METHOD_WMMX: + status = nv_acpi_wmmx_method(function, outData, outDataSize); + break; + + default: + status = NV_ERR_NOT_SUPPORTED; + } + + return status; +} + +/* + * This function executes an NVIF ACPI method. + */ +static NV_STATUS nv_acpi_nvif_method( + NvU32 function, + NvU32 subFunction, + void *inParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *outData, + NvU16 *outDataSize +) +{ + acpi_status status; + struct acpi_object_list input; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *nvif = NULL; + union acpi_object nvif_params[3]; + NvU16 localOutDataSize; + NvU8 localInParams[8]; + + if (!nvif_handle) + return NV_ERR_NOT_SUPPORTED; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_nvif_method: invalid context!\n"); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + nvif_params[0].integer.type = ACPI_TYPE_INTEGER; + nvif_params[0].integer.value = function; + + nvif_params[1].integer.type = ACPI_TYPE_INTEGER; + nvif_params[1].integer.value = subFunction; + + nvif_params[2].buffer.type = ACPI_TYPE_BUFFER; + + if (inParams && (inParamSize > 0)) + { + nvif_params[2].buffer.length = inParamSize; + nvif_params[2].buffer.pointer = inParams; + } + else + { + memset(localInParams, 0, 8); + nvif_params[2].buffer.length = 8; + nvif_params[2].buffer.pointer = localInParams; + } + + input.count = 3; + input.pointer = nvif_params; + + status = acpi_evaluate_object(nvif_handle, NULL, &input, &output); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_nvif_method: failed to get NVIF data, " + "status 0x%x, function 0x%x, subFunction 0x%x!\n", + status, function, subFunction); + return NV_ERR_GENERIC; + } + + nvif = output.pointer; + if (nvif && (nvif->type == ACPI_TYPE_BUFFER) && (nvif->buffer.length >= 4)) + { + if (outStatus) + { + *outStatus = nvif->buffer.pointer[3] << 24 | + nvif->buffer.pointer[2] << 16 | + nvif->buffer.pointer[1] << 8 | + nvif->buffer.pointer[0]; + } + + if (outData && outDataSize) + { + localOutDataSize = nvif->buffer.length - 4; + if (localOutDataSize <= *outDataSize) + { + *outDataSize = NV_MIN(*outDataSize, localOutDataSize); + memcpy(outData, &nvif->buffer.pointer[4], *outDataSize); + } + else + { + *outDataSize = localOutDataSize; + kfree(output.pointer); + return NV_ERR_BUFFER_TOO_SMALL; + } + } + } + else + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_nvif_method: NVIF data invalid, function 0x%x, " + "subFunction 0x%x!\n", function, subFunction); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + + kfree(output.pointer); + return NV_OK; +} + +#define MAX_INPUT_PARAM_SIZE 1024 +/* + * This function executes a _DSM ACPI method. + */ +NV_STATUS NV_API_CALL nv_acpi_dsm_method( + nv_state_t *nv, + NvU8 *pAcpiDsmGuid, + NvU32 acpiDsmRev, + NvBool acpiNvpcfDsmFunction, + NvU32 acpiDsmSubFunction, + void *pInParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *pOutData, + NvU16 *pSize +) +{ + NV_STATUS status = NV_ERR_OPERATING_SYSTEM; + acpi_status acpi_status; + struct acpi_object_list input; + union acpi_object *dsm = NULL; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object dsm_params[4]; + NvU8 *argument3 = NULL; + NvU32 data_size; + acpi_handle dev_handle = NULL; + + if (!nv_acpi_get_device_handle(nv, &dev_handle)) + return NV_ERR_NOT_SUPPORTED; + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + if ((!pInParams) || (inParamSize > MAX_INPUT_PARAM_SIZE) || (!pOutData) || (!pSize)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: invalid argument(s)!\n", __FUNCTION__); + return NV_ERR_INVALID_ARGUMENT; + } + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_INFO, + "NVRM: %s: invalid argument(s)!\n", __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + status = os_alloc_mem((void **)&argument3, inParamSize); + if (status != NV_OK) + return status; + + // + // dsm_params[0].buffer.pointer and dsm_params[1].integer.value set in + // switch below based on acpiDsmFunction + // + + dsm_params[0].buffer.type = ACPI_TYPE_BUFFER; + dsm_params[0].buffer.length = 0x10; + dsm_params[0].buffer.pointer = pAcpiDsmGuid; + + dsm_params[1].integer.type = ACPI_TYPE_INTEGER; + dsm_params[1].integer.value = acpiDsmRev; + + dsm_params[2].integer.type = ACPI_TYPE_INTEGER; + dsm_params[2].integer.value = acpiDsmSubFunction; + + dsm_params[3].buffer.type = ACPI_TYPE_BUFFER; + dsm_params[3].buffer.length = inParamSize; + memcpy(argument3, pInParams, dsm_params[3].buffer.length); + dsm_params[3].buffer.pointer = argument3; + + // parameters for dsm calls (GUID, rev, subfunction, data) + input.count = 4; + input.pointer = dsm_params; + + if (acpiNvpcfDsmFunction) + { + // + // acpi_evaluate_object() can operate with either valid object pathname or + // valid object handle. For NVPCF DSM function, use valid pathname as we do + // not have device handle for NVPCF device + // + dev_handle = NULL; + acpi_status = acpi_evaluate_object(dev_handle, "\\_SB.NPCF._DSM", &input, &output); + } + else + { + acpi_status = acpi_evaluate_object(dev_handle, "_DSM", &input, &output); + } + + if (ACPI_FAILURE(acpi_status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: failed to evaluate _DSM method!\n", __FUNCTION__); + goto exit; + } + + dsm = output.pointer; + if (dsm != NULL) + { + if (outStatus) + { + *outStatus = dsm->buffer.pointer[3] << 24 | + dsm->buffer.pointer[2] << 16 | + dsm->buffer.pointer[1] << 8 | + dsm->buffer.pointer[0]; + } + + status = nv_acpi_extract_object(dsm, pOutData, *pSize, &data_size); + *pSize = data_size; + + kfree(output.pointer); + } + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: DSM data invalid!\n", __FUNCTION__); + } + +exit: + os_free_mem(argument3); + return status; +} + +/* + * This function executes a _DDC ACPI method. + */ +NV_STATUS NV_API_CALL nv_acpi_ddc_method( + nv_state_t *nv, + void *pEdidBuffer, + NvU32 *pSize, + NvBool bReadMultiBlock +) +{ + acpi_status status; + union acpi_object *ddc = NULL; + NvU32 i, largestEdidSize; + acpi_handle dev_handle = NULL; + acpi_handle lcd_dev_handle = NULL; + acpi_handle handle = NULL; + + if (!nv_acpi_get_device_handle(nv, &dev_handle)) + return NV_ERR_NOT_SUPPORTED; + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: invalid context!\n", + __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + while (lcd_dev_handle == NULL) + { + unsigned long long device_id = 0; + + status = acpi_get_next_object(ACPI_TYPE_DEVICE, dev_handle, + handle, &handle); + if (ACPI_FAILURE(status) || (handle == NULL)) + break; + + status = acpi_evaluate_integer(handle, "_ADR", NULL, &device_id); + if (ACPI_FAILURE(status)) + /* Couldnt query device_id for this device */ + continue; + + switch (device_id & 0xffff) { + case 0x0110: + case 0x0118: + case 0x0400: + case 0xA420: + lcd_dev_handle = handle; + nv_printf(NV_DBG_INFO, "NVRM: %s Found LCD: %x\n", + __FUNCTION__, device_id); + break; + default: + break; + } + } + + if (lcd_dev_handle == NULL) + { + nv_printf(NV_DBG_INFO, "NVRM: %s LCD not found\n", __FUNCTION__); + return NV_ERR_GENERIC; + } + + // + // As per ACPI Spec 3.0: + // ARG0 = 0x1 for 128 bytes edid buffer + // ARG0 = 0x2 for 256 bytes edid buffer + // + + largestEdidSize = bReadMultiBlock ? 2 : 1; + + for (i = largestEdidSize; i >= 1; i--) + { + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object ddc_arg0 = { ACPI_TYPE_INTEGER }; + struct acpi_object_list input = { 1, &ddc_arg0 }; + + ddc_arg0.integer.value = i; + status = acpi_evaluate_object(lcd_dev_handle, "_DDC", &input, &output); + if (ACPI_SUCCESS(status)) { + ddc = output.pointer; + break; + } + } + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: failed status: %08x \n", + __FUNCTION__, + status); + return NV_ERR_GENERIC; + } + else + { + if (ddc && (ddc->type == ACPI_TYPE_BUFFER) && (ddc->buffer.length > 0)) + { + if (ddc->buffer.length <= *pSize) + { + *pSize = NV_MIN(*pSize, ddc->buffer.length); + memcpy(pEdidBuffer, ddc->buffer.pointer, *pSize); + } + else + { + kfree(ddc); + return NV_ERR_BUFFER_TOO_SMALL; + } + } + } + + kfree(ddc); + return NV_OK; +} + +/* + * This function executes a _ROM ACPI method. + */ +NV_STATUS NV_API_CALL nv_acpi_rom_method( + nv_state_t *nv, + NvU32 *pInData, + NvU32 *pOutData +) +{ + acpi_status status; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *rom; + union acpi_object rom_arg[2]; + struct acpi_object_list input = { 2, rom_arg }; + acpi_handle dev_handle = NULL; + uint32_t offset, length; + + if (!nv_acpi_get_device_handle(nv, &dev_handle)) + return NV_ERR_NOT_SUPPORTED; + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: invalid context!\n", __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + offset = pInData[0]; + length = pInData[1]; + + rom_arg[0].type = ACPI_TYPE_INTEGER; + rom_arg[0].integer.value = offset; + rom_arg[1].type = ACPI_TYPE_INTEGER; + rom_arg[1].integer.value = length; + + status = acpi_evaluate_object(dev_handle, "_ROM", &input, &output); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: failed to evaluate _ROM method!\n", __FUNCTION__); + return NV_ERR_GENERIC; + } + else + { + rom = output.pointer; + + if ((rom != NULL) && (rom->type == ACPI_TYPE_BUFFER) && + (rom->buffer.length >= length)) + { + memcpy(pOutData, rom->buffer.pointer, length); + } + else + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: Invalid _ROM data\n", __FUNCTION__); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + } + + kfree(output.pointer); + return NV_OK; +} + +/* + * This function executes a _DOD ACPI method. + */ +NV_STATUS NV_API_CALL nv_acpi_dod_method( + nv_state_t *nv, + NvU32 *pOutData, + NvU32 *pSize +) +{ + acpi_status status; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *dod; + acpi_handle dev_handle = NULL; + NvU32 i, count = (*pSize / sizeof(NvU32)); + + if (!nv_acpi_get_device_handle(nv, &dev_handle)) + return NV_ERR_NOT_SUPPORTED; + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: invalid context!\n", __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + status = acpi_evaluate_object(dev_handle, "_DOD", NULL, &output); + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: failed to evaluate _DOD method!\n", __FUNCTION__); + return NV_ERR_GENERIC; + } + else + { + dod = output.pointer; + *pSize = 0; + + if ((dod != NULL) && (dod->type == ACPI_TYPE_PACKAGE) && + (dod->package.count <= count)) + { + for (i = 0; i < dod->package.count; i++) + { + if (dod->package.elements[i].type != ACPI_TYPE_INTEGER) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: _DOD entry invalid!\n", __FUNCTION__); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + + pOutData[i] = dod->package.elements[i].integer.value; + *pSize += sizeof(NvU32); + } + } + else + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: _DOD data too large!\n", __FUNCTION__); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + } + + kfree(output.pointer); + return NV_OK; +} + +/* + * This function executes a WMMX ACPI method. + */ +static NV_STATUS nv_acpi_wmmx_method( + NvU32 arg2, + NvU8 *outData, + NvU16 *outDataSize +) +{ + acpi_status status; + struct acpi_object_list input; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *mmx = NULL; + union acpi_object mmx_params[3]; + + if (!wmmx_handle) + return NV_ERR_NOT_SUPPORTED; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_wmmx_method: invalid context!\n"); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + /* argument 0 and argument 1 are not used in WMMX method, passing 0 */ + + mmx_params[0].integer.type = ACPI_TYPE_INTEGER; + mmx_params[0].integer.value = 0; + + mmx_params[1].integer.type = ACPI_TYPE_INTEGER; + mmx_params[1].integer.value = 0; + + mmx_params[2].integer.type = ACPI_TYPE_INTEGER; + mmx_params[2].integer.value = arg2; + + input.count = 3; + input.pointer = mmx_params; + + status = acpi_evaluate_object(wmmx_handle, NULL, &input, &output); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_wmmx_method: failed to get WMMX data, " + "status 0x%x!\n", status); + return NV_ERR_GENERIC; + } + + mmx = output.pointer; + if (mmx && (mmx->type == ACPI_TYPE_BUFFER) && (mmx->buffer.length > 0)) + { + if (outData && outDataSize) + { + if (mmx->buffer.length <= *outDataSize) + { + *outDataSize = NV_MIN(*outDataSize, mmx->buffer.length); + memcpy(outData, mmx->buffer.pointer, *outDataSize); + } + else + { + kfree(output.pointer); + return NV_ERR_BUFFER_TOO_SMALL; + } + } + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_wmmx_method: WMMX data invalid.\n"); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + + kfree(output.pointer); + return NV_OK; +} + +NvBool nv_acpi_power_resource_method_present( + struct pci_dev *pdev +) +{ + acpi_handle handle = NULL; + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *object_package, *object_reference; + acpi_status status; + +#if defined(DEVICE_ACPI_HANDLE) + handle = DEVICE_ACPI_HANDLE(&pdev->dev); +#elif defined (ACPI_HANDLE) + handle = ACPI_HANDLE(&pdev->dev); +#endif + + if (!handle) + return NV_FALSE; + + status = acpi_evaluate_object(handle, "_PR3", NULL, &buf); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO,"NVRM: Failed to evaluate _PR3 object\n"); + return NV_FALSE; + } + + if (!buf.pointer) + { + nv_printf(NV_DBG_INFO, "NVRM: output buffer pointer is null" + " for _PR3 method\n"); + return NV_FALSE; + } + + object_package = buf.pointer; + + /* + * _PR3 object should be of type package and + * it should contain only one reference + */ + if ((object_package->type != ACPI_TYPE_PACKAGE) && + (object_package->package.count != 0x1)) + { + nv_printf(NV_DBG_ERRORS,"NVRM: _PR3 object is not a type 'package'\n"); + return NV_FALSE; + } + + object_reference = object_package->package.elements; + + /* Check for the reference and the actual type of the reference. */ + if ((object_reference->reference.actual_type != ACPI_TYPE_POWER) && + (object_reference->type != ACPI_TYPE_LOCAL_REFERENCE)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: _PR3 object does not contain POWER Reference\n"); + return NV_FALSE; + } + return NV_TRUE; +} + +/* + * This function executes MUX ACPI methods. + */ +NV_STATUS NV_API_CALL nv_acpi_mux_method( + nv_state_t *nv, + NvU32 *pInOut, + NvU32 muxAcpiId, + const char *pMethodName +) +{ + acpi_status status; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *mux = NULL; + union acpi_object mux_arg = { ACPI_TYPE_INTEGER }; + struct acpi_object_list input = { 1, &mux_arg }; + acpi_handle dev_handle = NULL; + acpi_handle mux_dev_handle = NULL; + acpi_handle handle = NULL; + unsigned long long device_id = 0; + + if ((strcmp(pMethodName, "MXDS") != 0) + && (strcmp(pMethodName, "MXDM") != 0)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: Unsupported ACPI method %s\n", + __FUNCTION__, pMethodName); + return NV_ERR_NOT_SUPPORTED; + } + else + { + nv_printf(NV_DBG_INFO, "NVRM: %s: Call for %s ACPI method \n", + __FUNCTION__, pMethodName); + } + + if (!nv_acpi_get_device_handle(nv, &dev_handle)) + return NV_ERR_NOT_SUPPORTED; + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, "NVRM: %s: invalid context!\n", __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + while (mux_dev_handle == NULL) + { + status = acpi_get_next_object(ACPI_TYPE_DEVICE, dev_handle, + handle, &handle); + if (ACPI_FAILURE(status) || (handle == NULL)) + break; + + status = acpi_evaluate_integer(handle, "_ADR", NULL, &device_id); + if (ACPI_SUCCESS(status) && (device_id == muxAcpiId)) + mux_dev_handle = handle; + } + + if (mux_dev_handle == NULL) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s Mux device handle not found\n", __FUNCTION__); + return NV_ERR_GENERIC; + } + + mux_arg.integer.type = ACPI_TYPE_INTEGER; + mux_arg.integer.value = (NvU64) *pInOut; + + status = acpi_evaluate_object(mux_dev_handle, (acpi_string)pMethodName, + &input, &output); + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, "NVRM: %s: Failed to evaluate %s method!\n", + __FUNCTION__, pMethodName); + return NV_ERR_GENERIC; + } + else + { + mux = output.pointer; + + if (mux && (mux->type == ACPI_TYPE_INTEGER)) + { + *pInOut = mux->integer.value; + } + else + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: Invalid MUX data\n", __FUNCTION__); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + } + + kfree(output.pointer); + return NV_OK; +} + +static acpi_status nv_acpi_find_battery_info( + acpi_handle handle, + NvBool bUseBix +) +{ + acpi_status status = AE_OK; + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *object_package; + NvU32 battery_technology_offset; + + status = acpi_evaluate_object(handle, NULL, NULL, &buf); + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, "NVRM: Failed to evaluate battery's object\n"); + return AE_OK; + } + + if (!buf.pointer) + { + nv_printf(NV_DBG_INFO, "NVRM: Battery object output buffer is null\n"); + return AE_OK; + } + + object_package = buf.pointer; + + if (object_package->type != ACPI_TYPE_PACKAGE) + { + nv_printf(NV_DBG_INFO, "NVRM: Battery method output is not package\n"); + return AE_OK; + } + + if (bUseBix) + { + battery_technology_offset = BIX_BATTERY_TECHNOLOGY_OFFSET; + } + else + { + battery_technology_offset = BIF_BATTERY_TECHNOLOGY_OFFSET; + } + + /* + * Only checking here for Battery technology type. + * Other fields like Battery Model/Serial number could also be checked but + * driver need to support the case where user has removed battery from the + * system. + * _STA method on the battery device handle couldn't be used due to the same + * reason. + * Hence just cheking if battery technology of slot is rechargable or not. + */ + + if ((object_package->package.elements[battery_technology_offset].type != ACPI_TYPE_INTEGER) || + (object_package->package.elements[battery_technology_offset].integer.value != BATTERY_RECHARGABLE)) + { + return AE_OK; + } + + battery_present = NV_TRUE; + + /* Stop traversing acpi tree. */ + return AE_CTRL_TERMINATE; +} + +static acpi_status nv_acpi_find_battery_device( + acpi_handle handle, + u32 nest_level, + void *dummy1, + void **dummy2 +) +{ + acpi_handle bif_method_handle; + acpi_handle bix_method_handle; + acpi_status status = AE_OK; + + // Find method Battery Information /Extended/ (_BIX or _BIF) and then Battery type. + if (!acpi_get_handle(handle, "_BIX", &bix_method_handle)) + { + status = nv_acpi_find_battery_info(bix_method_handle, NV_TRUE/*bUseBix*/); + } + + if ((battery_present == NV_FALSE) && + !acpi_get_handle(handle, "_BIF", &bif_method_handle)) + { + status = nv_acpi_find_battery_info(bif_method_handle, NV_FALSE/*bUseBix*/); + } + + return status; +} + +NvBool NV_API_CALL nv_acpi_is_battery_present(void) +{ + NV_ACPI_WALK_NAMESPACE(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, + nv_acpi_find_battery_device, NULL, NULL); + + if (battery_present == NV_TRUE) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +#else // NV_LINUX_ACPI_EVENTS_SUPPORTED + +void NV_API_CALL nv_acpi_methods_init(NvU32 *handlePresent) +{ + *handlePresent = 0; +} + +void NV_API_CALL nv_acpi_methods_uninit(void) +{ + return; +} + +NV_STATUS NV_API_CALL nv_acpi_method( + NvU32 acpi_method, + NvU32 function, + NvU32 subFunction, + void *inParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *outData, + NvU16 *outDataSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_acpi_dsm_method( + nv_state_t *nv, + NvU8 *pAcpiDsmGuid, + NvU32 acpiDsmRev, + NvBool acpiNvpcfDsmFunction, + NvU32 acpiDsmSubFunction, + void *pInParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *pOutData, + NvU16 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_acpi_ddc_method( + nv_state_t *nv, + void *pEdidBuffer, + NvU32 *pSize, + NvBool bReadMultiBlock +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_acpi_rom_method( + nv_state_t *nv, + NvU32 *pInData, + NvU32 *pOutData +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_acpi_dod_method( + nv_state_t *nv, + NvU32 *pOutData, + NvU32 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool nv_acpi_power_resource_method_present( + struct pci_dev *pdev +) +{ + return NV_FALSE; +} + +NV_STATUS NV_API_CALL nv_acpi_get_powersource(NvU32 *ac_plugged) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void nv_acpi_register_notifier(nv_linux_state_t *nvl) +{ + return; +} + +void nv_acpi_unregister_notifier(nv_linux_state_t *nvl) +{ + return; +} + +NV_STATUS NV_API_CALL nv_acpi_mux_method( + nv_state_t *nv, + NvU32 *pInOut, + NvU32 muxAcpiId, + const char *pMethodName +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool NV_API_CALL nv_acpi_is_battery_present(void) +{ + return NV_FALSE; +} +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-backlight.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-backlight.c new file mode 100644 index 0000000..a4f2d04 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-backlight.c @@ -0,0 +1,81 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include + +#include "os-interface.h" +#include "nv-linux.h" + +NV_STATUS NV_API_CALL nv_get_tegra_brightness_level +( + nv_state_t *nv, + NvU32 *brightness +) +{ +#ifdef NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct backlight_device *bd; + + bd = get_backlight_device_by_name(nvl->backlight.device_name); + if (bd == NULL) + { + nv_printf(NV_DBG_ERRORS, "Unable to get backlight device\n"); + return NV_ERR_GENERIC; + } + + *brightness = bd->props.brightness; + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL nv_set_tegra_brightness_level +( + nv_state_t *nv, + NvU32 brightness +) +{ +#ifdef NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct backlight_device *bd; + + bd = get_backlight_device_by_name(nvl->backlight.device_name); + if (bd == NULL) + { + nv_printf(NV_DBG_ERRORS, "Unable to get backlight device\n"); + return NV_ERR_GENERIC; + } + + bd->props.brightness = brightness; + + backlight_update_status(bd); + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-caps.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-caps.c new file mode 100644 index 0000000..19745d7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-caps.c @@ -0,0 +1,853 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-linux.h" +#include "nv-caps.h" +#include "nv-procfs.h" +#include "nv-hash.h" + +extern int NVreg_ModifyDeviceFiles; + +/* sys_close() or __close_fd() */ +#include + +#define NV_CAP_DRV_MINOR_COUNT 8192 + +/* Hash table with 512 buckets */ +#define NV_CAP_HASH_BITS 9 +NV_DECLARE_HASHTABLE(g_nv_cap_hash_table, NV_CAP_HASH_BITS); + +#define NV_CAP_HASH_SIZE NV_HASH_SIZE(g_nv_cap_hash_table) + +#define nv_cap_hash_key(path) (nv_string_hash(path) % NV_CAP_HASH_SIZE) + +typedef struct nv_cap_table_entry +{ + /* name must be the first element */ + const char *name; + int minor; + struct hlist_node hlist; +} nv_cap_table_entry_t; + +#define NV_CAP_NUM_ENTRIES(_table) (sizeof(_table) / sizeof(_table[0])) + +static nv_cap_table_entry_t g_nv_cap_nvlink_table[] = +{ + {"/driver/nvidia-nvlink/capabilities/fabric-mgmt"} +}; + +static nv_cap_table_entry_t g_nv_cap_mig_table[] = +{ + {"/driver/nvidia/capabilities/mig/config"}, + {"/driver/nvidia/capabilities/mig/monitor"} +}; + +static nv_cap_table_entry_t g_nv_cap_sys_table[] = +{ + + + +}; + +#define NV_CAP_MIG_CI_ENTRIES(_gi) \ + {_gi "/ci0/access"}, \ + {_gi "/ci1/access"}, \ + {_gi "/ci2/access"}, \ + {_gi "/ci3/access"}, \ + {_gi "/ci4/access"}, \ + {_gi "/ci5/access"}, \ + {_gi "/ci6/access"}, \ + {_gi "/ci7/access"} + +#define NV_CAP_MIG_GI_ENTRIES(_gpu) \ + {_gpu "/gi0/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi0"), \ + {_gpu "/gi1/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi1"), \ + {_gpu "/gi2/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi2"), \ + {_gpu "/gi3/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi3"), \ + {_gpu "/gi4/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi4"), \ + {_gpu "/gi5/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi5"), \ + {_gpu "/gi6/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi6"), \ + {_gpu "/gi7/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi7"), \ + {_gpu "/gi8/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi8"), \ + {_gpu "/gi9/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi9"), \ + {_gpu "/gi10/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi10"), \ + {_gpu "/gi11/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi11"), \ + {_gpu "/gi12/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi12"), \ + {_gpu "/gi13/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi13"), \ + {_gpu "/gi14/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi14") + +static nv_cap_table_entry_t g_nv_cap_mig_gpu_table[] = +{ + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu0/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu1/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu2/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu3/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu4/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu5/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu6/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu7/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu8/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu9/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu10/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu11/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu12/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu13/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu14/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu15/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu16/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu17/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu18/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu19/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu20/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu21/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu22/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu23/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu24/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu25/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu26/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu27/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu28/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu29/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu30/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu31/mig") +}; + +struct nv_cap +{ + char *path; + char *name; + int minor; + int permissions; + int modify; + struct proc_dir_entry *parent; + struct proc_dir_entry *entry; +}; + +#define NV_CAP_PROCFS_WRITE_BUF_SIZE 128 + +typedef struct nv_cap_file_private +{ + int minor; + int permissions; + int modify; + char buffer[NV_CAP_PROCFS_WRITE_BUF_SIZE]; + off_t offset; +} nv_cap_file_private_t; + +struct +{ + NvBool initialized; + struct cdev cdev; + dev_t devno; +} g_nv_cap_drv; + +#define NV_CAP_PROCFS_DIR "driver/nvidia-caps" +#define NV_CAP_NAME_BUF_SIZE 128 + +static struct proc_dir_entry *nv_cap_procfs_dir; + +static int nv_procfs_read_nvlink_minors(struct seq_file *s, void *v) +{ + int i, count; + char name[NV_CAP_NAME_BUF_SIZE]; + + count = NV_CAP_NUM_ENTRIES(g_nv_cap_nvlink_table); + for (i = 0; i < count; i++) + { + if (sscanf(g_nv_cap_nvlink_table[i].name, + "/driver/nvidia-nvlink/capabilities/%s", name) == 1) + { + name[sizeof(name) - 1] = '\0'; + seq_printf(s, "%s %d\n", name, g_nv_cap_nvlink_table[i].minor); + } + } + + return 0; +} + +static int nv_procfs_read_sys_minors(struct seq_file *s, void *v) +{ + int i, count; + char name[NV_CAP_NAME_BUF_SIZE]; + + count = NV_CAP_NUM_ENTRIES(g_nv_cap_sys_table); + for (i = 0; i < count; i++) + { + if (sscanf(g_nv_cap_sys_table[i].name, + "/driver/nvidia/capabilities/%s", name) == 1) + { + name[sizeof(name) - 1] = '\0'; + seq_printf(s, "%s %d\n", name, g_nv_cap_sys_table[i].minor); + } + } + + return 0; +} + +static int nv_procfs_read_mig_minors(struct seq_file *s, void *v) +{ + int i, count, gpu; + char name[NV_CAP_NAME_BUF_SIZE]; + + count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_table); + for (i = 0; i < count; i++) + { + if (sscanf(g_nv_cap_mig_table[i].name, + "/driver/nvidia/capabilities/mig/%s", name) == 1) + { + name[sizeof(name) - 1] = '\0'; + seq_printf(s, "%s %d\n", name, g_nv_cap_mig_table[i].minor); + } + } + + count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_gpu_table); + for (i = 0; i < count; i++) + { + if (sscanf(g_nv_cap_mig_gpu_table[i].name, + "/driver/nvidia/capabilities/gpu%d/mig/%s", &gpu, name) == 2) + { + name[sizeof(name) - 1] = '\0'; + seq_printf(s, "gpu%d/%s %d\n", + gpu, name, g_nv_cap_mig_gpu_table[i].minor); + } + } + + return 0; +} + +NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(nvlink_minors, nv_system_pm_lock); + +NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(mig_minors, nv_system_pm_lock); + +NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(sys_minors, nv_system_pm_lock); + +static void nv_cap_procfs_exit(void) +{ + if (!nv_cap_procfs_dir) + { + return; + } + + nv_procfs_unregister_all(nv_cap_procfs_dir, nv_cap_procfs_dir); + nv_cap_procfs_dir = NULL; +} + +int nv_cap_procfs_init(void) +{ + static struct proc_dir_entry *file_entry; + + nv_cap_procfs_dir = NV_CREATE_PROC_DIR(NV_CAP_PROCFS_DIR, NULL); + if (nv_cap_procfs_dir == NULL) + { + return -EACCES; + } + + file_entry = NV_CREATE_PROC_FILE("mig-minors", nv_cap_procfs_dir, + mig_minors, NULL); + if (file_entry == NULL) + { + goto cleanup; + } + + file_entry = NV_CREATE_PROC_FILE("nvlink-minors", nv_cap_procfs_dir, + nvlink_minors, NULL); + if (file_entry == NULL) + { + goto cleanup; + } + + file_entry = NV_CREATE_PROC_FILE("sys-minors", nv_cap_procfs_dir, + sys_minors, NULL); + if (file_entry == NULL) + { + goto cleanup; + } + + return 0; + +cleanup: + nv_cap_procfs_exit(); + + return -EACCES; +} + +static int nv_cap_find_minor(char *path) +{ + unsigned int key = nv_cap_hash_key(path); + nv_cap_table_entry_t *entry; + + nv_hash_for_each_possible(g_nv_cap_hash_table, entry, hlist, key) + { + if (strcmp(path, entry->name) == 0) + { + return entry->minor; + } + } + + return -1; +} + +static void _nv_cap_table_init(nv_cap_table_entry_t *table, int count) +{ + int i; + unsigned int key; + static int minor = 0; + + for (i = 0; i < count; i++) + { + table[i].minor = minor++; + INIT_HLIST_NODE(&table[i].hlist); + key = nv_cap_hash_key(table[i].name); + nv_hash_add(g_nv_cap_hash_table, &table[i].hlist, key); + } + + WARN_ON(minor > NV_CAP_DRV_MINOR_COUNT); +} + +#define nv_cap_table_init(table) \ + _nv_cap_table_init(table, NV_CAP_NUM_ENTRIES(table)) + +static void nv_cap_tables_init(void) +{ + BUILD_BUG_ON(offsetof(nv_cap_table_entry_t, name) != 0); + + nv_hash_init(g_nv_cap_hash_table); + + nv_cap_table_init(g_nv_cap_nvlink_table); + nv_cap_table_init(g_nv_cap_mig_table); + nv_cap_table_init(g_nv_cap_mig_gpu_table); + nv_cap_table_init(g_nv_cap_sys_table); +} + +static ssize_t nv_cap_procfs_write(struct file *file, + const char __user *buffer, + size_t count, loff_t *pos) +{ + nv_cap_file_private_t *private = NULL; + unsigned long bytes_left; + char *proc_buffer; + + private = ((struct seq_file *)file->private_data)->private; + bytes_left = (sizeof(private->buffer) - private->offset - 1); + + if (count == 0) + { + return -EINVAL; + } + + if ((bytes_left == 0) || (count > bytes_left)) + { + return -ENOSPC; + } + + proc_buffer = &private->buffer[private->offset]; + + if (copy_from_user(proc_buffer, buffer, count)) + { + nv_printf(NV_DBG_ERRORS, "nv-caps: failed to copy in proc data!\n"); + return -EFAULT; + } + + private->offset += count; + proc_buffer[count] = '\0'; + + *pos = private->offset; + + return count; +} + +static int nv_cap_procfs_read(struct seq_file *s, void *v) +{ + nv_cap_file_private_t *private = s->private; + + seq_printf(s, "%s: %d\n", "DeviceFileMinor", private->minor); + seq_printf(s, "%s: %d\n", "DeviceFileMode", private->permissions); + seq_printf(s, "%s: %d\n", "DeviceFileModify", private->modify); + + return 0; +} + +static int nv_cap_procfs_open(struct inode *inode, struct file *file) +{ + nv_cap_file_private_t *private = NULL; + int rc; + nv_cap_t *cap = NV_PDE_DATA(inode); + + NV_KMALLOC(private, sizeof(nv_cap_file_private_t)); + if (private == NULL) + { + return -ENOMEM; + } + + private->minor = cap->minor; + private->permissions = cap->permissions; + private->offset = 0; + private->modify = cap->modify; + + rc = single_open(file, nv_cap_procfs_read, private); + if (rc < 0) + { + NV_KFREE(private, sizeof(nv_cap_file_private_t)); + return rc; + } + + rc = nv_down_read_interruptible(&nv_system_pm_lock); + if (rc < 0) + { + single_release(inode, file); + NV_KFREE(private, sizeof(nv_cap_file_private_t)); + } + + return rc; +} + +static int nv_cap_procfs_release(struct inode *inode, struct file *file) +{ + struct seq_file *s = file->private_data; + nv_cap_file_private_t *private = NULL; + char *buffer; + int modify; + nv_cap_t *cap = NV_PDE_DATA(inode); + + if (s != NULL) + { + private = s->private; + } + + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + if (private != NULL) + { + buffer = private->buffer; + + if (private->offset != 0) + { + if (sscanf(buffer, "DeviceFileModify: %d", &modify) == 1) + { + cap->modify = modify; + } + } + + NV_KFREE(private, sizeof(nv_cap_file_private_t)); + } + + /* + * All open files using the proc entry will be invalidated + * if the entry is removed. + */ + file->private_data = NULL; + + return 0; +} + +static nv_proc_ops_t g_nv_cap_procfs_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_cap_procfs_open, + .NV_PROC_OPS_RELEASE = nv_cap_procfs_release, + .NV_PROC_OPS_WRITE = nv_cap_procfs_write, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_LSEEK = seq_lseek, +}; + +/* forward declaration of g_nv_cap_drv_fops */ +static struct file_operations g_nv_cap_drv_fops; + +int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd) +{ + struct file *file; + int dup_fd; + struct inode *inode = NULL; + dev_t rdev = 0; + struct files_struct *files = current->files; + struct fdtable *fdt; + + if (cap == NULL) + { + return -1; + } + + file = fget(fd); + if (file == NULL) + { + return -1; + } + + inode = NV_FILE_INODE(file); + if (inode == NULL) + { + goto err; + } + + /* Make sure the fd belongs to the nv-cap-drv */ + if (file->f_op != &g_nv_cap_drv_fops) + { + goto err; + } + + /* Make sure the fd has the expected capability */ + rdev = inode->i_rdev; + if (MINOR(rdev) != cap->minor) + { + goto err; + } + + dup_fd = NV_GET_UNUSED_FD_FLAGS(O_CLOEXEC); + if (dup_fd < 0) + { + dup_fd = NV_GET_UNUSED_FD(); + if (dup_fd < 0) + { + goto err; + } + + /* + * Set CLOEXEC before installing the FD. + * + * If fork() happens in between, the opened unused FD will have + * a NULL struct file associated with it, which is okay. + * + * The only well known bug here is the race with dup(2), which is + * already documented in the kernel, see fd_install()'s description. + */ + + spin_lock(&files->file_lock); + fdt = files_fdtable(files); + NV_SET_CLOSE_ON_EXEC(dup_fd, fdt); + spin_unlock(&files->file_lock); + } + + fd_install(dup_fd, file); + return dup_fd; + +err: + fput(file); + return -1; +} + +void NV_API_CALL nv_cap_close_fd(int fd) +{ + if (fd == -1) + { + return; + } + + /* + * Acquire task_lock as we access current->files explicitly (__close_fd) + * and implicitly (sys_close), and it will race with the exit path. + */ + task_lock(current); + + /* Nothing to do, we are in exit path */ + if (current->files == NULL) + { + task_unlock(current); + return; + } + +/* + * From v4.17-rc1 (to v5.10.8) kernels have stopped exporting sys_close(fd) + * and started exporting __close_fd, as of this commit: + * 2018-04-02 2ca2a09d6215 ("fs: add ksys_close() wrapper; remove in-kernel + * calls to sys_close()") + * Kernels v5.11-rc1 onwards have stopped exporting __close_fd, and started + * exporting close_fd, as of this commit: + * 2020-12-20 8760c909f54a ("file: Rename __close_fd to close_fd and remove + * the files parameter") + */ +#if NV_IS_EXPORT_SYMBOL_PRESENT_close_fd + close_fd(fd); +#elif NV_IS_EXPORT_SYMBOL_PRESENT___close_fd + __close_fd(current->files, fd); +#else + sys_close(fd); +#endif + + task_unlock(current); +} + +static nv_cap_t* nv_cap_alloc(nv_cap_t *parent_cap, const char *name) +{ + nv_cap_t *cap; + int len; + + if (parent_cap == NULL || name == NULL) + { + return NULL; + } + + NV_KMALLOC(cap, sizeof(nv_cap_t)); + if (cap == NULL) + { + return NULL; + } + + len = strlen(name) + strlen(parent_cap->path) + 2; + NV_KMALLOC(cap->path, len); + if (cap->path == NULL) + { + NV_KFREE(cap, sizeof(nv_cap_t)); + return NULL; + } + + strcpy(cap->path, parent_cap->path); + strcat(cap->path, "/"); + strcat(cap->path, name); + + len = strlen(name) + 1; + NV_KMALLOC(cap->name, len); + if (cap->name == NULL) + { + NV_KFREE(cap->path, strlen(cap->path) + 1); + NV_KFREE(cap, sizeof(nv_cap_t)); + return NULL; + } + + strcpy(cap->name, name); + + cap->minor = -1; + cap->modify = NVreg_ModifyDeviceFiles; + + return cap; +} + +static void nv_cap_free(nv_cap_t *cap) +{ + if (cap == NULL) + { + return; + } + + NV_KFREE(cap->path, strlen(cap->path) + 1); + NV_KFREE(cap->name, strlen(cap->name) + 1); + NV_KFREE(cap, sizeof(nv_cap_t)); +} + +nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap, + const char *name, int mode) +{ + nv_cap_t *cap = NULL; + int minor; + + cap = nv_cap_alloc(parent_cap, name); + if (cap == NULL) + { + return NULL; + } + + cap->parent = parent_cap->entry; + cap->permissions = mode; + + mode = (S_IFREG | S_IRUGO); + + minor = nv_cap_find_minor(cap->path); + if (minor < 0) + { + nv_cap_free(cap); + return NULL; + } + + cap->minor = minor; + + cap->entry = proc_create_data(name, mode, parent_cap->entry, + &g_nv_cap_procfs_fops, (void*)cap); + if (cap->entry == NULL) + { + nv_cap_free(cap); + return NULL; + } + + return cap; +} + +nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap, + const char *name, int mode) +{ + nv_cap_t *cap = NULL; + + cap = nv_cap_alloc(parent_cap, name); + if (cap == NULL) + { + return NULL; + } + + cap->parent = parent_cap->entry; + cap->permissions = mode; + cap->minor = -1; + + mode = (S_IFDIR | S_IRUGO | S_IXUGO); + + cap->entry = NV_PROC_MKDIR_MODE(name, mode, parent_cap->entry); + if (cap->entry == NULL) + { + nv_cap_free(cap); + return NULL; + } + + return cap; +} + +nv_cap_t* NV_API_CALL nv_cap_init(const char *path) +{ + nv_cap_t parent_cap; + nv_cap_t *cap; + int mode; + char *name = NULL; + char dir[] = "/capabilities"; + + if (path == NULL) + { + return NULL; + } + + NV_KMALLOC(name, (strlen(path) + strlen(dir)) + 1); + if (name == NULL) + { + return NULL; + } + + strcpy(name, path); + strcat(name, dir); + parent_cap.entry = NULL; + parent_cap.path = ""; + parent_cap.name = ""; + mode = S_IRUGO | S_IXUGO; + cap = nv_cap_create_dir_entry(&parent_cap, name, mode); + + NV_KFREE(name, strlen(name) + 1); + return cap; +} + +void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap) +{ + if (WARN_ON(cap == NULL)) + { + return; + } + + remove_proc_entry(cap->name, cap->parent); + nv_cap_free(cap); +} + +static int nv_cap_drv_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static int nv_cap_drv_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static struct file_operations g_nv_cap_drv_fops = +{ + .owner = THIS_MODULE, + .open = nv_cap_drv_open, + .release = nv_cap_drv_release +}; + +int NV_API_CALL nv_cap_drv_init(void) +{ + int rc; + + nv_cap_tables_init(); + + if (g_nv_cap_drv.initialized) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-drv is already initialized.\n"); + return -EBUSY; + } + + rc = alloc_chrdev_region(&g_nv_cap_drv.devno, + 0, + NV_CAP_DRV_MINOR_COUNT, + "nvidia-caps"); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev region.\n"); + return rc; + } + + cdev_init(&g_nv_cap_drv.cdev, &g_nv_cap_drv_fops); + + g_nv_cap_drv.cdev.owner = THIS_MODULE; + + rc = cdev_add(&g_nv_cap_drv.cdev, g_nv_cap_drv.devno, + NV_CAP_DRV_MINOR_COUNT); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev.\n"); + goto cdev_add_fail; + } + + rc = nv_cap_procfs_init(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-drv: unable to init proc\n"); + goto proc_init_fail; + } + + g_nv_cap_drv.initialized = NV_TRUE; + + return 0; + +proc_init_fail: + cdev_del(&g_nv_cap_drv.cdev); + +cdev_add_fail: + unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT); + + return rc; +} + +void NV_API_CALL nv_cap_drv_exit(void) +{ + if (!g_nv_cap_drv.initialized) + { + return; + } + + nv_cap_procfs_exit(); + + cdev_del(&g_nv_cap_drv.cdev); + + unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT); + + g_nv_cap_drv.initialized = NV_FALSE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-clk.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-clk.c new file mode 100644 index 0000000..a06af35 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-clk.c @@ -0,0 +1,630 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +/*! + * @brief The below defined static const array points to the + * clock mentioned in enum defined in below file. + * + * arch/nvalloc/unix/include/nv.h + * enum TEGRASOC_WHICH_CLK + * + * The order should be maintained/updated together. + */ +static const char *osMapClk[] = { + [TEGRASOC_WHICH_CLK_NVDISPLAYHUB] = "nvdisplayhub_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_DISP] = "nvdisplay_disp_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_P0] = "nvdisplay_p0_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_P1] = "nvdisplay_p1_clk", + [TEGRASOC_WHICH_CLK_DPAUX0] = "dpaux0_clk", + [TEGRASOC_WHICH_CLK_FUSE] = "fuse_clk", + [TEGRASOC_WHICH_CLK_DSIPLL_VCO] = "dsipll_vco_clk", + [TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN] = "dsipll_clkoutpn_clk", + [TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA] = "dsipll_clkouta_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_VCO] = "sppll0_vco_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN] = "sppll0_clkoutpn_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA] = "sppll0_clkouta_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB] = "sppll0_clkoutb_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_DIV10] = "sppll0_div10_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_DIV25] = "sppll0_div25_clk", + [TEGRASOC_WHICH_CLK_SPPLL0_DIV27] = "sppll0_div27_clk", + [TEGRASOC_WHICH_CLK_SPPLL1_VCO] = "sppll1_vco_clk", + [TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN] = "sppll1_clkoutpn_clk", + [TEGRASOC_WHICH_CLK_SPPLL1_DIV27] = "sppll1_div27_clk", + [TEGRASOC_WHICH_CLK_VPLL0_REF] = "vpll0_ref_clk", + [TEGRASOC_WHICH_CLK_VPLL0] = "vpll0_clk", + [TEGRASOC_WHICH_CLK_VPLL1] = "vpll1_clk", + [TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF] = "nvdisplay_p0_ref_clk", + [TEGRASOC_WHICH_CLK_RG0] = "rg0_clk", + [TEGRASOC_WHICH_CLK_RG1] = "rg1_clk", + [TEGRASOC_WHICH_CLK_DISPPLL] = "disppll_clk", + [TEGRASOC_WHICH_CLK_DISPHUBPLL] = "disphubpll_clk", + [TEGRASOC_WHICH_CLK_DSI_LP] = "dsi_lp_clk", + [TEGRASOC_WHICH_CLK_DSI_CORE] = "dsi_core_clk", + [TEGRASOC_WHICH_CLK_DSI_PIXEL] = "dsi_pixel_clk", + [TEGRASOC_WHICH_CLK_PRE_SOR0] = "pre_sor0_clk", + [TEGRASOC_WHICH_CLK_PRE_SOR1] = "pre_sor1_clk", + [TEGRASOC_WHICH_CLK_DP_LINK_REF] = "dp_link_ref_clk", + [TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT] = "sor_linka_input_clk", + [TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO] = "sor_linka_afifo_clk", + [TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M] = "sor_linka_afifo_m_clk", + [TEGRASOC_WHICH_CLK_RG0_M] = "rg0_m_clk", + [TEGRASOC_WHICH_CLK_RG1_M] = "rg1_m_clk", + [TEGRASOC_WHICH_CLK_SOR0_M] = "sor0_m_clk", + [TEGRASOC_WHICH_CLK_SOR1_M] = "sor1_m_clk", + [TEGRASOC_WHICH_CLK_PLLHUB] = "pllhub_clk", + [TEGRASOC_WHICH_CLK_SOR0] = "sor0_clk", + [TEGRASOC_WHICH_CLK_SOR1] = "sor1_clk", + [TEGRASOC_WHICH_CLK_SOR_PAD_INPUT] = "sor_pad_input_clk", + [TEGRASOC_WHICH_CLK_PRE_SF0] = "pre_sf0_clk", + [TEGRASOC_WHICH_CLK_SF0] = "sf0_clk", + [TEGRASOC_WHICH_CLK_SF1] = "sf1_clk", + [TEGRASOC_WHICH_CLK_PRE_SOR0_REF] = "pre_sor0_ref_clk", + [TEGRASOC_WHICH_CLK_PRE_SOR1_REF] = "pre_sor1_ref_clk", + [TEGRASOC_WHICH_CLK_SOR0_PLL_REF] = "sor0_ref_pll_clk", + [TEGRASOC_WHICH_CLK_SOR1_PLL_REF] = "sor1_ref_pll_clk", + [TEGRASOC_WHICH_CLK_SOR0_REF] = "sor0_ref_clk", + [TEGRASOC_WHICH_CLK_SOR1_REF] = "sor1_ref_clk", + [TEGRASOC_WHICH_CLK_DSI_PAD_INPUT] = "dsi_pad_input_clk", + [TEGRASOC_WHICH_CLK_OSC] = "osc_clk", + [TEGRASOC_WHICH_CLK_DSC] = "dsc_clk", + [TEGRASOC_WHICH_CLK_MAUD] = "maud_clk", + [TEGRASOC_WHICH_CLK_AZA_2XBIT] = "aza_2xbit_clk", + [TEGRASOC_WHICH_CLK_AZA_BIT] = "aza_bit_clk", + [TEGRASOC_WHICH_CLK_MIPI_CAL] = "mipi_cal_clk", + [TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL] = "uart_fst_mipi_cal_clk", + [TEGRASOC_WHICH_CLK_SOR0_DIV] = "sor0_div_clk", +}; + +/*! + * @brief Get the clock handles. + * + * Look up and obtain the clock handles for each display + * clock at boot-time and later using all those handles + * for rest of the operations. for example, enable/disable + * clocks, get current/max frequency of the clock. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * + * @returns NV_STATUS + */ +NV_STATUS NV_API_CALL nv_clk_get_handles( + nv_state_t *nv) +{ + NV_STATUS status = NV_OK; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU32 i, j, clk_count; +#if defined(NV_DEVM_CLK_BULK_GET_ALL_PRESENT) + struct clk_bulk_data *clks; + + clk_count = devm_clk_bulk_get_all(nvl->dev, &clks); + + if (clk_count == 0) + { + nv_printf(NV_DBG_ERRORS,"NVRM: nv_clk_get_handles, failed to get clk handles from devm_clk_bulk_get_all\n"); + return NV_ERR_OBJECT_NOT_FOUND; + } + + // + // TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum + // arch/nvalloc/unix/include/nv.h + // enum TEGRASOC_WHICH_CLK + // + for (i = 0U; i < clk_count; i++) + { + for (j = 0U; j < TEGRASOC_WHICH_CLK_MAX; j++) + { + if (!strcmp(osMapClk[j], clks[i].id)) + { + nvl->disp_clk_handles.clk[j].handles = clks[i].clk; + nvl->disp_clk_handles.clk[j].clkName = __clk_get_name(clks[i].clk); + break; + } + } + if (j == TEGRASOC_WHICH_CLK_MAX) + { + nv_printf(NV_DBG_ERRORS,"NVRM: nv_clk_get_handles, failed to find TEGRA_SOC_WHICH_CLK for %s\n", clks[i].id); + return NV_ERR_OBJECT_NOT_FOUND; + } + } +#else + nv_printf(NV_DBG_ERRORS, "NVRM: devm_clk_bulk_get_all API is not present\n"); + status = NV_ERR_OBJECT_NOT_FOUND; +#endif + + return status; +} + +/*! + * @brief Clear the clock handles assigned by nv_clk_get_handles() + * + * Clear the clock handle for each display of the clocks at shutdown-time. + * Since clock handles are obtained by devm managed devm_clk_bulk_get_all() + * API, devm_clk_bulk_release_all() API is called on all the enumerated + * clk handles automatically when module gets unloaded. Hence, no need + * to explicitly free those handles. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + */ +void NV_API_CALL nv_clk_clear_handles( + nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU32 i; + + // + // TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum + // arch/nvalloc/unix/include/nv.h + // enum TEGRASOC_WHICH_CLK + // + for (i = 0U; i < TEGRASOC_WHICH_CLK_MAX; i++) + { + if (nvl->disp_clk_handles.clk[i].handles != NULL) + { + nvl->disp_clk_handles.clk[i].handles = NULL; + } + } +} + +/*! + * @brief Enable the clock. + * + * Enabling the clock before performing any operation + * on it. The below function will prepare the clock for use + * and enable them. + * + * for more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + * + * @returns NV_STATUS + */ +NV_STATUS NV_API_CALL nv_enable_clk( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status = NV_ERR_GENERIC; + int ret; + + if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL) + { + ret = clk_prepare_enable(nvl->disp_clk_handles.clk[whichClkOS].handles); + + if (ret == 0) + { + status = NV_OK; + } + else + { + status = NV_ERR_FEATURE_NOT_ENABLED; + nv_printf(NV_DBG_ERRORS, "NVRM: clk_prepare_enable failed with error: %d\n", ret); + } + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} + +/*! + * @brief Check if clock is enable or not. + * + * Checking the clock status if it is enabled or not before + * enabling or disabling it. + * + * for more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + * + * @returns clock status. + */ +NvBool NV_API_CALL nv_is_clk_enabled( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + bool ret; + + if (nvl->disp_clk_handles.clk[whichClkOS].handles == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: clock handle requested not found.\n"); + return NV_FALSE; + } + + ret = __clk_is_enabled(nvl->disp_clk_handles.clk[whichClkOS].handles); + return ret == true; +} + +/*! + * @brief Disable the clock. + * + * Disabling the clock after performing operation or required + * work with that clock is done with that particular clock. + * The below function will unprepare the clock for further use + * and disable them. + * + * Note: make sure to disable clock before clk_put is called. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + */ +void NV_API_CALL nv_disable_clk( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + clk_disable_unprepare(nvl->disp_clk_handles.clk[whichClkOS].handles); +} + +/*! + * @brief Get current clock frequency. + * + * Obtain the current clock rate for a clock source. + * This is only valid once the clock source has been enabled. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + * @param[out] pCurrFreqKHz Current clock frequency + */ +NV_STATUS NV_API_CALL nv_get_curr_freq( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pCurrFreqKHz) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status = NV_ERR_GENERIC; + unsigned long currFreqHz; + + if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL) + { + currFreqHz = clk_get_rate(nvl->disp_clk_handles.clk[whichClkOS].handles); + *pCurrFreqKHz = currFreqHz / 1000U; + + if (*pCurrFreqKHz > 0U) + { + status = NV_OK; + } + else + { + status = NV_ERR_FEATURE_NOT_ENABLED; + } + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} + +/*! + * @brief Get maximum clock frequency. + * + * Obtain the maximum clock rate a clock source can provide. + * This is only valid once the clock source has been enabled. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + * @param[out] pMaxFreqKHz Maximum clock frequency + */ +NV_STATUS NV_API_CALL nv_get_max_freq( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pMaxFreqKHz) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status = NV_ERR_GENERIC; + long ret; + + if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL) + { + // + // clk_round_rate(struct clk *clk, rate); + // rate is the maximum possible rate we give, + // it returns rounded clock rate in Hz, i.e., + // maximum clock rate the source clock can + // support or negative errno. + // Here, rate = NV_S64_MAX + // 0 < currFreq < maxFreq < NV_S64_MAX + // clk_round_rate() round of and return the + // nearest freq what a clock can provide. + // sending NV_S64_MAX will return maxFreq. + // + ret = clk_round_rate(nvl->disp_clk_handles.clk[whichClkOS].handles, NV_U32_MAX); + + if (ret >= 0) + { + *pMaxFreqKHz = (NvU32) (ret / 1000); + status = NV_OK; + } + else + { + status = NV_ERR_FEATURE_NOT_ENABLED; + nv_printf(NV_DBG_ERRORS, "NVRM: clk_round_rate failed with error: %ld\n", ret); + } + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} + +/*! + * @brief Get minimum clock frequency. + * + * Obtain the minimum clock rate a clock source can provide. + * This is only valid once the clock source has been enabled. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + * @param[out] pMinFreqKHz Minimum clock frequency + */ +NV_STATUS NV_API_CALL nv_get_min_freq( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pMinFreqKHz) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status = NV_ERR_GENERIC; + long ret; + + if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL) + { + // + // clk_round_rate(struct clk *clk, rate); + // rate is the minimum possible rate we give, + // it returns rounded clock rate in Hz, i.e., + // minimum clock rate the source clock can + // support or negative errno. + // Here, rate = NV_S64_MAX + // 0 < minFreq currFreq < maxFreq < NV_S64_MAX + // clk_round_rate() round of and return the + // nearest freq what a clock can provide. + // sending 0 will return minFreq. + // + ret = clk_round_rate(nvl->disp_clk_handles.clk[whichClkOS].handles, 0); + + if (ret >= 0) + { + *pMinFreqKHz = (NvU32) (ret / 1000); + status = NV_OK; + } + else + { + status = NV_ERR_FEATURE_NOT_ENABLED; + nv_printf(NV_DBG_ERRORS, "NVRM: clk_round_rate failed with error: %ld\n", ret); + } + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} + + +/*! + * @brief set clock frequency. + * + * Setting the frequency of clock source. + * This is only valid once the clock source has been enabled. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOS Enum value of the target clock + * @param[in] reqFreqKHz Required frequency + */ +NV_STATUS NV_API_CALL nv_set_freq( + nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 reqFreqKHz) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status = NV_ERR_GENERIC; + int ret; + + if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL) + { + ret = clk_set_rate(nvl->disp_clk_handles.clk[whichClkOS].handles, + reqFreqKHz * 1000U); + if (ret == 0) + { + status = NV_OK; + } + else + { + status = NV_ERR_INVALID_REQUEST; + nv_printf(NV_DBG_ERRORS, "NVRM: clk_set_rate failed with error: %d\n", ret); + } + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} + +/*! + * @brief set parent clock. + * + * Setting the parent clock of clock source. + * This is only valid once the clock source and the parent + * clock have been enabled. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOSsource Enum value of the source clock + * @param[in] whichClkOSparent Enum value of the parent clock + */ +NV_STATUS NV_API_CALL nv_set_parent +( + nv_state_t *nv, + TEGRASOC_WHICH_CLK whichClkOSsource, + TEGRASOC_WHICH_CLK whichClkOSparent +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status = NV_ERR_GENERIC; + int ret; + + if ((nvl->disp_clk_handles.clk[whichClkOSsource].handles != NULL) && + (nvl->disp_clk_handles.clk[whichClkOSparent].handles != NULL)) + { + ret = clk_set_parent(nvl->disp_clk_handles.clk[whichClkOSsource].handles, + nvl->disp_clk_handles.clk[whichClkOSparent].handles); + if (ret == 0) + { + status = NV_OK; + } + else + { + status = NV_ERR_INVALID_REQUEST; + nv_printf(NV_DBG_ERRORS, "NVRM: clk_set_parent failed with error: %d\n", ret); + } + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} + +/*! + * @brief get parent clock. + * + * Getting the parent clock of clock source. + * This is only valid once the clock source and the parent + * clock have been enabled. + * + * For more details on CCF functions, please check below file: + * + * In the Linux kernel: include/linux/clk.h + * or + * https://www.kernel.org/doc/htmldocs/kernel-api/ + * + * @param[in] nv Per gpu linux state + * @param[in] whichClkOSsource Enum value of the source clock + * @param[in] pWhichClkOSparent Enum value of the parent clock + */ +NV_STATUS NV_API_CALL nv_get_parent +( + nv_state_t *nv, + TEGRASOC_WHICH_CLK whichClkOSsource, + TEGRASOC_WHICH_CLK *pWhichClkOSparent +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status; + struct clk *ret;; + NvU32 i; + + if (nvl->disp_clk_handles.clk[whichClkOSsource].handles != NULL) + { + ret = clk_get_parent(nvl->disp_clk_handles.clk[whichClkOSsource].handles); + if (!IS_ERR(ret)) + { + const char *parentClkName = __clk_get_name(ret); + // + // TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum + // arch/nvalloc/unix/include/nv.h + // enum TEGRASOC_WHICH_CLK + // + for (i = 0U; i < TEGRASOC_WHICH_CLK_MAX; i++) + { + if (!strcmp(nvl->disp_clk_handles.clk[i].clkName, parentClkName)) + { + *pWhichClkOSparent = i; + return NV_OK; + } + } + nv_printf(NV_DBG_ERRORS, "NVRM: unexpected parent clock ref addr: %p\n", ret); + return NV_ERR_INVALID_OBJECT_PARENT; + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: clk_get_parent failed with error: %ld\n", PTR_ERR(ret)); + return NV_ERR_INVALID_POINTER; + } + } + + nv_printf(NV_DBG_ERRORS, "NVRM: invalid source clock requested\n"); + return NV_ERR_OBJECT_NOT_FOUND; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-cray.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-cray.c new file mode 100644 index 0000000..ad7f1f5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-cray.c @@ -0,0 +1,217 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(CONFIG_CRAY_XT) +enum { + NV_FORMAT_STATE_ORDINARY, + NV_FORMAT_STATE_INTRODUCTION, + NV_FORMAT_STATE_FLAGS, + NV_FORMAT_STATE_FIELD_WIDTH, + NV_FORMAT_STATE_PRECISION, + NV_FORMAT_STATE_LENGTH_MODIFIER, + NV_FORMAT_STATE_CONVERSION_SPECIFIER +}; + +enum { + NV_LENGTH_MODIFIER_NONE, + NV_LENGTH_MODIFIER_CHAR, + NV_LENGTH_MODIFIER_SHORT_INT, + NV_LENGTH_MODIFIER_LONG_INT, + NV_LENGTH_MODIFIER_LONG_LONG_INT +}; + +#define NV_IS_FLAG(c) \ + ((c) == '#' || (c) == '0' || (c) == '-' || (c) == ' ' || (c) == '+') +#define NV_IS_LENGTH_MODIFIER(c) \ + ((c) == 'h' || (c) == 'l' || (c) == 'L' || (c) == 'q' || (c) == 'j' || \ + (c) == 'z' || (c) == 't') +#define NV_IS_CONVERSION_SPECIFIER(c) \ + ((c) == 'd' || (c) == 'i' || (c) == 'o' || (c) == 'u' || (c) == 'x' || \ + (c) == 'X' || (c) == 'e' || (c) == 'E' || (c) == 'f' || (c) == 'F' || \ + (c) == 'g' || (c) == 'G' || (c) == 'a' || (c) == 'A' || (c) == 'c' || \ + (c) == 's' || (c) == 'p') + +#define NV_MAX_NUM_INFO_MMRS 6 + +NV_STATUS nvos_forward_error_to_cray( + struct pci_dev *dev, + NvU32 error_number, + const char *format, + va_list ap +) +{ + NvU32 num_info_mmrs; + NvU64 x = 0, info_mmrs[NV_MAX_NUM_INFO_MMRS]; + int state = NV_FORMAT_STATE_ORDINARY; + int modifier = NV_LENGTH_MODIFIER_NONE; + NvU32 i, n = 0, m = 0; + + memset(info_mmrs, 0, sizeof(info_mmrs)); + while (*format != '\0') + { + switch (state) + { + case NV_FORMAT_STATE_ORDINARY: + if (*format == '%') + state = NV_FORMAT_STATE_INTRODUCTION; + break; + case NV_FORMAT_STATE_INTRODUCTION: + if (*format == '%') + { + state = NV_FORMAT_STATE_ORDINARY; + break; + } + case NV_FORMAT_STATE_FLAGS: + if (NV_IS_FLAG(*format)) + { + state = NV_FORMAT_STATE_FLAGS; + break; + } + else if (*format == '*') + { + state = NV_FORMAT_STATE_FIELD_WIDTH; + break; + } + case NV_FORMAT_STATE_FIELD_WIDTH: + if ((*format >= '0') && (*format <= '9')) + { + state = NV_FORMAT_STATE_FIELD_WIDTH; + break; + } + else if (*format == '.') + { + state = NV_FORMAT_STATE_PRECISION; + break; + } + case NV_FORMAT_STATE_PRECISION: + if ((*format >= '0') && (*format <= '9')) + { + state = NV_FORMAT_STATE_PRECISION; + break; + } + else if (NV_IS_LENGTH_MODIFIER(*format)) + { + state = NV_FORMAT_STATE_LENGTH_MODIFIER; + break; + } + else if (NV_IS_CONVERSION_SPECIFIER(*format)) + { + state = NV_FORMAT_STATE_CONVERSION_SPECIFIER; + break; + } + case NV_FORMAT_STATE_LENGTH_MODIFIER: + if ((*format == 'h') || (*format == 'l')) + { + state = NV_FORMAT_STATE_LENGTH_MODIFIER; + break; + } + else if (NV_IS_CONVERSION_SPECIFIER(*format)) + { + state = NV_FORMAT_STATE_CONVERSION_SPECIFIER; + break; + } + } + switch (state) + { + case NV_FORMAT_STATE_INTRODUCTION: + modifier = NV_LENGTH_MODIFIER_NONE; + break; + case NV_FORMAT_STATE_LENGTH_MODIFIER: + switch (*format) + { + case 'h': + modifier = (modifier == NV_LENGTH_MODIFIER_NONE) + ? NV_LENGTH_MODIFIER_SHORT_INT + : NV_LENGTH_MODIFIER_CHAR; + break; + case 'l': + modifier = (modifier == NV_LENGTH_MODIFIER_NONE) + ? NV_LENGTH_MODIFIER_LONG_INT + : NV_LENGTH_MODIFIER_LONG_LONG_INT; + break; + case 'q': + modifier = NV_LENGTH_MODIFIER_LONG_LONG_INT; + default: + return NV_ERR_INVALID_ARGUMENT; + } + break; + case NV_FORMAT_STATE_CONVERSION_SPECIFIER: + switch (*format) + { + case 'c': + case 'd': + case 'i': + x = (unsigned int)va_arg(ap, int); + break; + case 'o': + case 'u': + case 'x': + case 'X': + switch (modifier) + { + case NV_LENGTH_MODIFIER_LONG_LONG_INT: + x = va_arg(ap, unsigned long long int); + break; + case NV_LENGTH_MODIFIER_LONG_INT: + x = va_arg(ap, unsigned long int); + break; + case NV_LENGTH_MODIFIER_CHAR: + case NV_LENGTH_MODIFIER_SHORT_INT: + case NV_LENGTH_MODIFIER_NONE: + x = va_arg(ap, unsigned int); + break; + } + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + state = NV_FORMAT_STATE_ORDINARY; + for (i = 0; i < ((modifier == NV_LENGTH_MODIFIER_LONG_LONG_INT) + ? 2 : 1); i++) + { + if (m == NV_MAX_NUM_INFO_MMRS) + return NV_ERR_INSUFFICIENT_RESOURCES; + info_mmrs[m] = ((info_mmrs[m] << 32) | (x & 0xffffffff)); + x >>= 32; + if (++n == 2) + { + m++; + n = 0; + } + } + } + format++; + } + + num_info_mmrs = (m + (n != 0)); + if (num_info_mmrs > 0) + cray_nvidia_report_error(dev, error_number, num_info_mmrs, info_mmrs); + + return NV_OK; +} +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dma.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dma.c new file mode 100644 index 0000000..1afd03f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dma.c @@ -0,0 +1,1251 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-reg.h" + +#define NV_DMA_DEV_PRINTF(debuglevel, dma_dev, format, ... ) \ + nv_printf(debuglevel, "NVRM: %s: " format, \ + (((dma_dev) && ((dma_dev)->dev)) ? dev_name((dma_dev)->dev) : \ + NULL), \ + ## __VA_ARGS__) + +NvU32 nv_dma_remap_peer_mmio = NV_DMA_REMAP_PEER_MMIO_ENABLE; + +NV_STATUS nv_create_dma_map_scatterlist (nv_dma_map_t *dma_map); +void nv_destroy_dma_map_scatterlist(nv_dma_map_t *dma_map); +NV_STATUS nv_map_dma_map_scatterlist (nv_dma_map_t *dma_map); +void nv_unmap_dma_map_scatterlist (nv_dma_map_t *dma_map); +static void nv_dma_unmap_contig (nv_dma_map_t *dma_map); +static void nv_dma_unmap_scatterlist (nv_dma_map_t *dma_map); + +static inline NvBool nv_dma_is_addressable( + nv_dma_device_t *dma_dev, + NvU64 start, + NvU64 size +) +{ + NvU64 limit = start + size - 1; + + return (start >= dma_dev->addressable_range.start) && + (limit <= dma_dev->addressable_range.limit) && + (limit >= start); +} + +static NV_STATUS nv_dma_map_contig( + nv_dma_device_t *dma_dev, + nv_dma_map_t *dma_map, + NvU64 *va +) +{ +#if defined(NV_DMA_MAP_PAGE_ATTRS_PRESENT) && defined(NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT) + *va = dma_map_page_attrs(dma_map->dev, dma_map->pages[0], 0, + dma_map->page_count * PAGE_SIZE, + DMA_BIDIRECTIONAL, + (dma_map->cache_type == NV_MEMORY_UNCACHED) ? + DMA_ATTR_SKIP_CPU_SYNC : 0); +#else + *va = dma_map_page(dma_map->dev, dma_map->pages[0], 0, + dma_map->page_count * PAGE_SIZE, DMA_BIDIRECTIONAL); +#endif + if (dma_mapping_error(dma_map->dev, *va)) + { + return NV_ERR_OPERATING_SYSTEM; + } + + dma_map->mapping.contig.dma_addr = *va; + + if (!nv_dma_is_addressable(dma_dev, *va, dma_map->page_count * PAGE_SIZE)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA address not in addressable range of device " + "(0x%llx-0x%llx, 0x%llx-0x%llx)\n", + *va, *va + (dma_map->page_count * PAGE_SIZE - 1), + dma_dev->addressable_range.start, + dma_dev->addressable_range.limit); + nv_dma_unmap_contig(dma_map); + return NV_ERR_INVALID_ADDRESS; + } + + return NV_OK; +} + +static void nv_dma_unmap_contig(nv_dma_map_t *dma_map) +{ +#if defined(NV_DMA_MAP_PAGE_ATTRS_PRESENT) && defined(NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT) + dma_unmap_page_attrs(dma_map->dev, dma_map->mapping.contig.dma_addr, + dma_map->page_count * PAGE_SIZE, + DMA_BIDIRECTIONAL, + (dma_map->cache_type == NV_MEMORY_UNCACHED) ? + DMA_ATTR_SKIP_CPU_SYNC : 0); +#else + dma_unmap_page(dma_map->dev, dma_map->mapping.contig.dma_addr, + dma_map->page_count * PAGE_SIZE, DMA_BIDIRECTIONAL); +#endif +} + +static void nv_fill_scatterlist +( + struct scatterlist *sgl, + struct page **pages, + unsigned int page_count +) +{ + unsigned int i; + struct scatterlist *sg; +#if defined(for_each_sg) + for_each_sg(sgl, sg, page_count, i) + { + sg_set_page(sg, pages[i], PAGE_SIZE, 0); + } +#else + for (i = 0; i < page_count; i++) + { + sg = &(sgl)[i]; + sg->page = pages[i]; + sg->length = PAGE_SIZE; + sg->offset = 0; + } +#endif +} + +NV_STATUS nv_create_dma_map_scatterlist(nv_dma_map_t *dma_map) +{ + /* + * We need to split our mapping into at most 4GB - PAGE_SIZE chunks. + * The Linux kernel stores the length (and offset) of a scatter-gather + * segment as an unsigned int, so it will overflow if we try to do + * anything larger. + */ + NV_STATUS status; + nv_dma_submap_t *submap; + NvU32 i; + NvU64 allocated_size = 0; + NvU64 num_submaps = dma_map->page_count + NV_DMA_SUBMAP_MAX_PAGES - 1; + NvU64 total_size = dma_map->page_count << PAGE_SHIFT; + + /* + * This turns into 64-bit division, which the ARMv7 kernel doesn't provide + * implicitly. Instead, we need to use the platform's do_div() to perform + * the division. + */ + do_div(num_submaps, NV_DMA_SUBMAP_MAX_PAGES); + + WARN_ON(NvU64_HI32(num_submaps) != 0); + + if (dma_map->import_sgt && (num_submaps != 1)) + { + return -EINVAL; + } + + dma_map->mapping.discontig.submap_count = NvU64_LO32(num_submaps); + + status = os_alloc_mem((void **)&dma_map->mapping.discontig.submaps, + sizeof(nv_dma_submap_t) * dma_map->mapping.discontig.submap_count); + if (status != NV_OK) + { + return status; + } + + os_mem_set((void *)dma_map->mapping.discontig.submaps, 0, + sizeof(nv_dma_submap_t) * dma_map->mapping.discontig.submap_count); + + /* If we have an imported SGT, just use that directly. */ + if (dma_map->import_sgt) + { + dma_map->mapping.discontig.submaps[0].page_count = dma_map->page_count; + dma_map->mapping.discontig.submaps[0].sgt = *dma_map->import_sgt; + dma_map->mapping.discontig.submaps[0].imported = NV_TRUE; + + return status; + } + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + NvU64 submap_size = NV_MIN(NV_DMA_SUBMAP_MAX_PAGES << PAGE_SHIFT, + total_size - allocated_size); + + submap->page_count = (NvU32)(submap_size >> PAGE_SHIFT); + + status = NV_ALLOC_DMA_SUBMAP_SCATTERLIST(dma_map, submap, i); + if (status != NV_OK) + { + submap->page_count = 0; + break; + } + +#if !defined(NV_SG_ALLOC_TABLE_FROM_PAGES_PRESENT) || \ + defined(NV_DOM0_KERNEL_PRESENT) + { + NvU64 page_idx = NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(i); + nv_fill_scatterlist(submap->sgt.sgl, + &dma_map->pages[page_idx], submap->page_count); + } +#endif + + allocated_size += submap_size; + } + + WARN_ON(allocated_size != total_size); + + if (status != NV_OK) + { + nv_destroy_dma_map_scatterlist(dma_map); + } + + return status; +} + +NV_STATUS nv_map_dma_map_scatterlist(nv_dma_map_t *dma_map) +{ + NV_STATUS status = NV_OK; + nv_dma_submap_t *submap; + NvU64 i; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + /* Imported SGTs will have already been mapped by the exporter. */ + submap->sg_map_count = submap->imported ? + submap->sgt.orig_nents : + dma_map_sg(dma_map->dev, + submap->sgt.sgl, + submap->sgt.orig_nents, + DMA_BIDIRECTIONAL); + if (submap->sg_map_count == 0) + { + status = NV_ERR_OPERATING_SYSTEM; + break; + } + } + + if (status != NV_OK) + { + nv_unmap_dma_map_scatterlist(dma_map); + } + + return status; +} + +void nv_unmap_dma_map_scatterlist(nv_dma_map_t *dma_map) +{ + nv_dma_submap_t *submap; + NvU64 i; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + if (submap->sg_map_count == 0) + { + break; + } + + if (submap->imported) + { + /* Imported SGTs will be unmapped by the exporter. */ + continue; + } + + dma_unmap_sg(dma_map->dev, submap->sgt.sgl, + submap->sgt.orig_nents, + DMA_BIDIRECTIONAL); + } +} + +void nv_destroy_dma_map_scatterlist(nv_dma_map_t *dma_map) +{ + nv_dma_submap_t *submap; + NvU64 i; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + if ((submap->page_count == 0) || submap->imported) + { + break; + } + + sg_free_table(&submap->sgt); + } + + os_free_mem(dma_map->mapping.discontig.submaps); +} + +void nv_load_dma_map_scatterlist( + nv_dma_map_t *dma_map, + NvU64 *va_array +) +{ + unsigned int i, j; + struct scatterlist *sg; + nv_dma_submap_t *submap; + NvU64 sg_addr, sg_off, sg_len, k, l = 0; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + for_each_sg(submap->sgt.sgl, sg, submap->sg_map_count, j) + { + /* + * It is possible for pci_map_sg() to merge scatterlist entries, so + * make sure we account for that here. + */ + for (sg_addr = sg_dma_address(sg), sg_len = sg_dma_len(sg), + sg_off = 0, k = 0; + (sg_off < sg_len) && (k < submap->page_count); + sg_off += PAGE_SIZE, l++, k++) + { + va_array[l] = sg_addr + sg_off; + } + } + } +} + +static NV_STATUS nv_dma_map_scatterlist( + nv_dma_device_t *dma_dev, + nv_dma_map_t *dma_map, + NvU64 *va_array +) +{ + NV_STATUS status; + NvU64 i; + + status = nv_create_dma_map_scatterlist(dma_map); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to allocate DMA mapping scatterlist!\n"); + return status; + } + + status = nv_map_dma_map_scatterlist(dma_map); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to create a DMA mapping!\n"); + nv_destroy_dma_map_scatterlist(dma_map); + return status; + } + + nv_load_dma_map_scatterlist(dma_map, va_array); + + for (i = 0; i < dma_map->page_count; i++) + { + if (!nv_dma_is_addressable(dma_dev, va_array[i], PAGE_SIZE)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA address not in addressable range of device " + "(0x%llx, 0x%llx-0x%llx)\n", + va_array[i], dma_dev->addressable_range.start, + dma_dev->addressable_range.limit); + nv_dma_unmap_scatterlist(dma_map); + return NV_ERR_INVALID_ADDRESS; + } + } + + return NV_OK; +} + +static void nv_dma_unmap_scatterlist(nv_dma_map_t *dma_map) +{ + nv_unmap_dma_map_scatterlist(dma_map); + nv_destroy_dma_map_scatterlist(dma_map); +} + +static void nv_dma_nvlink_addr_compress +( + nv_dma_device_t *dma_dev, + NvU64 *va_array, + NvU64 page_count, + NvBool contig +) +{ +#if defined(NVCPU_PPC64LE) + NvU64 addr = 0; + NvU64 i; + + /* + * On systems that support NVLink sysmem links, apply the required address + * compression scheme when links are trained. Otherwise check that PCIe and + * NVLink DMA mappings are equivalent as per requirements of Bug 1920398. + */ + if (dma_dev->nvlink) + { + for (i = 0; i < (contig ? 1 : page_count); i++) + { + va_array[i] = nv_compress_nvlink_addr(va_array[i]); + } + + return; + } + + for (i = 0; i < (contig ? 1 : page_count); i++) + { + addr = nv_compress_nvlink_addr(va_array[i]); + if (WARN_ONCE(va_array[i] != addr, + "unexpected DMA address compression (0x%llx, 0x%llx)\n", + va_array[i], addr)) + { + break; + } + } +#endif +} + +static void nv_dma_nvlink_addr_decompress +( + nv_dma_device_t *dma_dev, + NvU64 *va_array, + NvU64 page_count, + NvBool contig +) +{ +#if defined(NVCPU_PPC64LE) + NvU64 i; + + if (dma_dev->nvlink) + { + for (i = 0; i < (contig ? 1 : page_count); i++) + { + va_array[i] = nv_expand_nvlink_addr(va_array[i]); + } + } +#endif +} + +NV_STATUS NV_API_CALL nv_dma_map_sgt( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + NvU32 cache_type, + void **priv +) +{ + NV_STATUS status; + nv_dma_map_t *dma_map = NULL; + + if (priv == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (page_count > os_get_num_phys_pages()) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA mapping request too large!\n"); + return NV_ERR_INVALID_REQUEST; + } + + status = os_alloc_mem((void **)&dma_map, sizeof(nv_dma_map_t)); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to allocate nv_dma_map_t!\n"); + return status; + } + + dma_map->dev = dma_dev->dev; + dma_map->pages = NULL; + dma_map->import_sgt = (struct sg_table *) *priv; + dma_map->page_count = page_count; + dma_map->contiguous = NV_FALSE; + dma_map->cache_type = cache_type; + + dma_map->mapping.discontig.submap_count = 0; + status = nv_dma_map_scatterlist(dma_dev, dma_map, va_array); + + if (status != NV_OK) + { + os_free_mem(dma_map); + } + else + { + *priv = dma_map; + nv_dma_nvlink_addr_compress(dma_dev, va_array, dma_map->page_count, + dma_map->contiguous); + } + + return status; +} + +NV_STATUS NV_API_CALL nv_dma_unmap_sgt( + nv_dma_device_t *dma_dev, + void **priv +) +{ + nv_dma_map_t *dma_map; + + if (priv == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + dma_map = *priv; + + *priv = NULL; + + nv_dma_unmap_scatterlist(dma_map); + + os_free_mem(dma_map); + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_dma_map_pages( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + NvBool contig, + NvU32 cache_type, + void **priv +) +{ + NV_STATUS status; + nv_dma_map_t *dma_map = NULL; + + if (priv == NULL) + { + /* + * IOMMU path has not been implemented yet to handle + * anything except a nv_dma_map_t as the priv argument. + */ + return NV_ERR_NOT_SUPPORTED; + } + + if (page_count > os_get_num_phys_pages()) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA mapping request too large!\n"); + return NV_ERR_INVALID_REQUEST; + } + + status = os_alloc_mem((void **)&dma_map, sizeof(nv_dma_map_t)); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to allocate nv_dma_map_t!\n"); + return status; + } + + dma_map->dev = dma_dev->dev; + dma_map->pages = *priv; + dma_map->import_sgt = NULL; + dma_map->page_count = page_count; + dma_map->contiguous = contig; + dma_map->cache_type = cache_type; + + if (dma_map->page_count > 1 && !dma_map->contiguous) + { + dma_map->mapping.discontig.submap_count = 0; + status = nv_dma_map_scatterlist(dma_dev, dma_map, va_array); + } + else + { + /* + * Force single-page mappings to be contiguous to avoid scatterlist + * overhead. + */ + dma_map->contiguous = NV_TRUE; + + status = nv_dma_map_contig(dma_dev, dma_map, va_array); + } + + if (status != NV_OK) + { + os_free_mem(dma_map); + } + else + { + *priv = dma_map; + nv_dma_nvlink_addr_compress(dma_dev, va_array, dma_map->page_count, + dma_map->contiguous); + } + + return status; +} + +NV_STATUS NV_API_CALL nv_dma_unmap_pages( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + void **priv +) +{ + nv_dma_map_t *dma_map; + + if (priv == NULL) + { + /* + * IOMMU path has not been implemented yet to handle + * anything except a nv_dma_map_t as the priv argument. + */ + return NV_ERR_NOT_SUPPORTED; + } + + dma_map = *priv; + + if (page_count > os_get_num_phys_pages()) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA unmapping request too large!\n"); + return NV_ERR_INVALID_REQUEST; + } + + if (page_count != dma_map->page_count) + { + NV_DMA_DEV_PRINTF(NV_DBG_WARNINGS, dma_dev, + "Requested to DMA unmap %llu pages, but there are %llu " + "in the mapping\n", page_count, dma_map->page_count); + return NV_ERR_INVALID_REQUEST; + } + + *priv = dma_map->pages; + + if (dma_map->contiguous) + { + nv_dma_unmap_contig(dma_map); + } + else + { + nv_dma_unmap_scatterlist(dma_map); + } + + os_free_mem(dma_map); + + return NV_OK; +} + +/* + * Wrappers used for DMA-remapping an nv_alloc_t during transition to more + * generic interfaces. + */ +NV_STATUS NV_API_CALL nv_dma_map_alloc +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + NvBool contig, + void **priv +) +{ + NV_STATUS status; + NvU64 i; + nv_alloc_t *at = *priv; + struct page **pages = NULL; + NvU32 cache_type = NV_MEMORY_CACHED; + NvU64 pages_size = sizeof(struct page *) * (contig ? 1 : page_count); + + /* If we have an imported SGT, just use that directly. */ + if (at && at->import_sgt) + { + *priv = at->import_sgt; + status = nv_dma_map_sgt(dma_dev, page_count, va_array, at->cache_type, + priv); + if (status != NV_OK) + { + *priv = at; + } + return status; + } + + /* + * Convert the nv_alloc_t into a struct page * array for + * nv_dma_map_pages(). + */ + status = os_alloc_mem((void **)&pages, pages_size); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to allocate page array for DMA mapping!\n"); + return status; + } + + os_mem_set(pages, 0, pages_size); + + if (at != NULL) + { + WARN_ON(page_count != at->num_pages); + + if (at->flags.user) + { + pages[0] = at->user_pages[0]; + if (!contig) + { + for (i = 1; i < page_count; i++) + { + pages[i] = at->user_pages[i]; + } + } + } + else if (at->flags.physical && contig) + { + /* Supplied pages hold physical address */ + pages[0] = pfn_to_page(PFN_DOWN(va_array[0])); + } + cache_type = at->cache_type; + } + + if (pages[0] == NULL) + { + pages[0] = NV_GET_PAGE_STRUCT(va_array[0]); + if (!contig) + { + for (i = 1; i < page_count; i++) + { + pages[i] = NV_GET_PAGE_STRUCT(va_array[i]); + } + } + } + + *priv = pages; + status = nv_dma_map_pages(dma_dev, page_count, va_array, contig, cache_type, + priv); + if (status != NV_OK) + { + *priv = at; + os_free_mem(pages); + } + + return status; +} + +NV_STATUS NV_API_CALL nv_dma_unmap_alloc +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + void **priv +) +{ + NV_STATUS status = NV_OK; + nv_dma_map_t *dma_map; + + if (priv == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + dma_map = *priv; + + if (!dma_map->import_sgt) + { + status = nv_dma_unmap_pages(dma_dev, page_count, va_array, priv); + if (status != NV_OK) + { + /* + * If nv_dma_unmap_pages() fails, we hit an assert condition and the + * priv argument won't be the page array we allocated in + * nv_dma_map_alloc(), so we skip the free here. But note that since + * this is an assert condition it really should never happen. + */ + return status; + } + + /* Free the struct page * array allocated by nv_dma_map_alloc() */ + os_free_mem(*priv); + } else { + status = nv_dma_unmap_sgt(dma_dev, priv); + } + + return status; +} + +static NvBool nv_dma_use_map_resource +( + nv_dma_device_t *dma_dev +) +{ + if (nv_dma_remap_peer_mmio == NV_DMA_REMAP_PEER_MMIO_DISABLE) + { + return NV_FALSE; + } + +#if defined(NV_DMA_MAP_RESOURCE_PRESENT) + const struct dma_map_ops *ops = get_dma_ops(dma_dev->dev); + + if (ops == NULL) + { + /* On pre-5.0 kernels, if dma_map_resource() is present, then we + * assume that ops != NULL. With direct_dma handling swiotlb on 5.0+ + * kernels, ops == NULL. + */ +#if defined(NV_DMA_IS_DIRECT_PRESENT) + return NV_TRUE; +#else + return NV_FALSE; +#endif + } + + return (ops->map_resource != NULL); +#else + return NV_FALSE; +#endif +} + +/* DMA-map a peer PCI device's BAR for peer access. */ +NV_STATUS NV_API_CALL nv_dma_map_peer +( + nv_dma_device_t *dma_dev, + nv_dma_device_t *peer_dma_dev, + NvU8 bar_index, + NvU64 page_count, + NvU64 *va +) +{ + struct pci_dev *peer_pci_dev = to_pci_dev(peer_dma_dev->dev); + struct resource *res; + NV_STATUS status; + + if (peer_pci_dev == NULL) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, peer_dma_dev, + "Not a PCI device"); + return NV_ERR_INVALID_REQUEST; + } + + BUG_ON(bar_index >= NV_GPU_NUM_BARS); + res = &peer_pci_dev->resource[bar_index]; + if (res->start == 0) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, peer_dma_dev, + "Resource %u not valid", + bar_index); + return NV_ERR_INVALID_REQUEST; + } + + if ((*va < res->start) || ((*va + (page_count * PAGE_SIZE)) > res->end)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, peer_dma_dev, + "Mapping requested (start = 0x%llx, page_count = 0x%llx)" + " outside of resource bounds (start = 0x%llx, end = 0x%llx)\n", + *va, page_count, res->start, res->end); + return NV_ERR_INVALID_REQUEST; + } + + if (nv_dma_use_map_resource(dma_dev)) + { + status = nv_dma_map_mmio(dma_dev, page_count, va); + } + else + { + /* + * Best effort - can't map through the iommu but at least try to + * convert to a bus address. + */ + NvU64 offset = *va - res->start; + *va = nv_pci_bus_address(peer_pci_dev, bar_index) + offset; + status = NV_OK; + } + + return status; +} + +void NV_API_CALL nv_dma_unmap_peer +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 va +) +{ + if (nv_dma_use_map_resource(dma_dev)) + { + nv_dma_unmap_mmio(dma_dev, page_count, va); + } +} + +/* DMA-map another anonymous device's MMIO region for peer access. */ +NV_STATUS NV_API_CALL nv_dma_map_mmio +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va +) +{ +#if defined(NV_DMA_MAP_RESOURCE_PRESENT) + BUG_ON(!va); + + if (nv_dma_use_map_resource(dma_dev)) + { + NvU64 mmio_addr = *va; + *va = dma_map_resource(dma_dev->dev, mmio_addr, page_count * PAGE_SIZE, + DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(dma_dev->dev, *va)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to DMA map MMIO range [0x%llx-0x%llx]\n", + mmio_addr, mmio_addr + page_count * PAGE_SIZE - 1); + return NV_ERR_OPERATING_SYSTEM; + } + } + else + { + /* + * If dma_map_resource is not available, pass through the source address + * without failing. Further, adjust it using the DMA start address to + * keep RM's validation schemes happy. + */ + *va = *va + dma_dev->addressable_range.start; + } + + nv_dma_nvlink_addr_compress(dma_dev, va, page_count, NV_TRUE); + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void NV_API_CALL nv_dma_unmap_mmio +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 va +) +{ +#if defined(NV_DMA_MAP_RESOURCE_PRESENT) + nv_dma_nvlink_addr_decompress(dma_dev, &va, page_count, NV_TRUE); + + if (nv_dma_use_map_resource(dma_dev)) + { + dma_unmap_resource(dma_dev->dev, va, page_count * PAGE_SIZE, + DMA_BIDIRECTIONAL, 0); + } +#endif +} + +/* + * Invalidate DMA mapping in CPU caches by "syncing" to the device. + * + * This is only implemented for ARM platforms, since other supported + * platforms are cache coherent and have not required this (we + * explicitly haven't supported SWIOTLB bounce buffering either where + * this would be needed). + */ +void NV_API_CALL nv_dma_cache_invalidate +( + nv_dma_device_t *dma_dev, + void *priv +) +{ +#if defined(NVCPU_AARCH64) + nv_dma_map_t *dma_map = priv; + + if (dma_map->contiguous) + { + dma_sync_single_for_device(dma_dev->dev, + dma_map->mapping.contig.dma_addr, + (size_t) PAGE_SIZE * dma_map->page_count, + DMA_FROM_DEVICE); + } + else + { + nv_dma_submap_t *submap; + NvU64 i; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + dma_sync_sg_for_device(dma_dev->dev, + submap->sgt.sgl, + submap->sgt.orig_nents, + DMA_FROM_DEVICE); + } + } +#endif +} + +/* Enable DMA-mapping over NVLink */ +void NV_API_CALL nv_dma_enable_nvlink +( + nv_dma_device_t *dma_dev +) +{ + dma_dev->nvlink = NV_TRUE; +} + +#if defined(NV_LINUX_DMA_BUF_H_PRESENT) && \ + defined(NV_DRM_AVAILABLE) && defined(NV_DRM_DRM_GEM_H_PRESENT) + +/* + * drm_gem_object_{get/put}() added by commit + * e6b62714e87c8811d5564b6a0738dcde63a51774 (2017-02-28) and + * drm_gem_object_{reference/unreference}() removed by commit + * 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15). + */ + +static inline void +nv_dma_gem_object_unreference_unlocked(struct drm_gem_object *gem) +{ +#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT) + +#if defined(NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT) + drm_gem_object_put_unlocked(gem); +#else + drm_gem_object_put(gem); +#endif + +#else + drm_gem_object_unreference_unlocked(gem); +#endif +} + +static inline void +nv_dma_gem_object_reference(struct drm_gem_object *gem) +{ +#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT) + drm_gem_object_get(gem); +#else + drm_gem_object_reference(gem); +#endif +} + +NV_STATUS NV_API_CALL nv_dma_import_sgt +( + nv_dma_device_t *dma_dev, + struct sg_table *sgt, + struct drm_gem_object *gem +) +{ + if ((dma_dev == NULL) || + (sgt == NULL) || + (gem == NULL)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Import arguments are NULL!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // Prevent the kernel module controlling GEM from being unloaded + if (!try_module_get(gem->dev->driver->fops->owner)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Couldn't reference the GEM object's owner!\n"); + return NV_ERR_INVALID_DEVICE; + } + + // Do nothing with SGT, it is already mapped and pinned by the exporter + + nv_dma_gem_object_reference(gem); + + return NV_OK; +} + +void NV_API_CALL nv_dma_release_sgt +( + struct sg_table *sgt, + struct drm_gem_object *gem +) +{ + if (gem == NULL) + { + return; + } + + // Do nothing with SGT, it will be unmapped and unpinned by the exporter + WARN_ON(sgt == NULL); + + nv_dma_gem_object_unreference_unlocked(gem); + + module_put(gem->dev->driver->fops->owner); +} + +#else + +NV_STATUS NV_API_CALL nv_dma_import_sgt +( + nv_dma_device_t *dma_dev, + struct sg_table *sgt, + struct drm_gem_object *gem +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void NV_API_CALL nv_dma_release_sgt +( + struct sg_table *sgt, + struct drm_gem_object *gem +) +{ +} +#endif /* NV_LINUX_DMA_BUF_H_PRESENT && NV_DRM_AVAILABLE && NV_DRM_DRM_GEM_H_PRESENT */ + +#if defined(NV_LINUX_DMA_BUF_H_PRESENT) + + +#define IMPORT_DMABUF_FUNCTIONS_DEFINED + +NV_STATUS NV_API_CALL nv_dma_import_dma_buf +( + nv_dma_device_t *dma_dev, + struct dma_buf *dma_buf, + NvU32 *size, + struct sg_table **sgt, + nv_dma_buf_t **import_priv +) +{ + nv_dma_buf_t *nv_dma_buf = NULL; + struct dma_buf_attachment *dma_attach = NULL; + struct sg_table *map_sgt = NULL; + NV_STATUS status = NV_OK; + + if ((dma_dev == NULL) || + (dma_buf == NULL) || + (size == NULL) || + (sgt == NULL) || + (import_priv == NULL)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Import arguments are NULL!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + status = os_alloc_mem((void **)&nv_dma_buf, sizeof(*nv_dma_buf)); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Can't allocate mem for nv_buf!\n"); + return status; + } + + get_dma_buf(dma_buf); + + dma_attach = dma_buf_attach(dma_buf, dma_dev->dev); + if (IS_ERR_OR_NULL(dma_attach)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Can't attach dma_buf!\n"); + status = NV_ERR_OPERATING_SYSTEM; + + goto dma_buf_attach_fail; + } + + map_sgt = dma_buf_map_attachment(dma_attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(map_sgt)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Can't map dma attachment!\n"); + status = NV_ERR_OPERATING_SYSTEM; + + goto dma_buf_map_fail; + } + + nv_dma_buf->dma_buf = dma_buf; + nv_dma_buf->dma_attach = dma_attach; + nv_dma_buf->sgt = map_sgt; + + *size = dma_buf->size; + *import_priv = nv_dma_buf; + *sgt = map_sgt; + + return NV_OK; + +dma_buf_map_fail: + dma_buf_detach(dma_buf, dma_attach); +dma_buf_attach_fail: + os_free_mem(nv_dma_buf); + dma_buf_put(dma_buf); + + return status; +} + +NV_STATUS NV_API_CALL nv_dma_import_from_fd +( + nv_dma_device_t *dma_dev, + NvS32 fd, + NvU32 *size, + struct sg_table **sgt, + nv_dma_buf_t **import_priv +) +{ + struct dma_buf *dma_buf = dma_buf_get(fd); + NV_STATUS status; + + if (IS_ERR_OR_NULL(dma_buf)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Can't get dma_buf from fd!\n"); + return NV_ERR_OPERATING_SYSTEM; + } + + status = nv_dma_import_dma_buf(dma_dev, + dma_buf, size, sgt, import_priv); + dma_buf_put(dma_buf); + + return status; +} + +void NV_API_CALL nv_dma_release_dma_buf +( + nv_dma_buf_t *import_priv +) +{ + nv_dma_buf_t *nv_dma_buf = NULL; + + if (import_priv == NULL) + { + return; + } + + nv_dma_buf = (nv_dma_buf_t *)import_priv; + dma_buf_unmap_attachment(nv_dma_buf->dma_attach, nv_dma_buf->sgt, + DMA_BIDIRECTIONAL); + dma_buf_detach(nv_dma_buf->dma_buf, nv_dma_buf->dma_attach); + dma_buf_put(nv_dma_buf->dma_buf); + + os_free_mem(nv_dma_buf); +} + +#endif /* NV_LINUX_DMA_BUF_H_PRESENT */ + +#ifndef IMPORT_DMABUF_FUNCTIONS_DEFINED + +NV_STATUS NV_API_CALL nv_dma_import_dma_buf +( + nv_dma_device_t *dma_dev, + struct dma_buf *dma_buf, + NvU32 *size, + struct sg_table **sgt, + nv_dma_buf_t **import_priv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_dma_import_from_fd +( + nv_dma_device_t *dma_dev, + NvS32 fd, + NvU32 *size, + struct sg_table **sgt, + nv_dma_buf_t **import_priv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void NV_API_CALL nv_dma_release_dma_buf +( + nv_dma_buf_t *import_priv +) +{ +} +#endif /* !IMPORT_DMABUF_FUNCTIONS_DEFINED */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dmabuf.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dmabuf.c new file mode 100644 index 0000000..1d2073b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dmabuf.c @@ -0,0 +1,896 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include +#include "nv-dmabuf.h" + + + +#if defined(CONFIG_DMA_SHARED_BUFFER) +typedef struct nv_dma_buf_mem_handle +{ + NvHandle h_memory; + NvU64 offset; + NvU64 size; + NvU64 bar1_va; +} nv_dma_buf_mem_handle_t; + +typedef struct nv_dma_buf_file_private +{ + nv_state_t *nv; + NvHandle h_client; + NvHandle h_device; + NvHandle h_subdevice; + NvU32 total_objects; + NvU32 num_objects; + NvU64 total_size; + NvU64 attached_size; + struct mutex lock; + nv_dma_buf_mem_handle_t *handles; + NvU64 bar1_va_ref_count; + void *mig_info; +} nv_dma_buf_file_private_t; + +static void +nv_dma_buf_free_file_private( + nv_dma_buf_file_private_t *priv +) +{ + if (priv == NULL) + { + return; + } + + if (priv->handles != NULL) + { + NV_KFREE(priv->handles, priv->total_objects * sizeof(priv->handles[0])); + priv->handles = NULL; + } + + mutex_destroy(&priv->lock); + + NV_KFREE(priv, sizeof(nv_dma_buf_file_private_t)); +} + +static nv_dma_buf_file_private_t* +nv_dma_buf_alloc_file_private( + NvU32 num_handles +) +{ + nv_dma_buf_file_private_t *priv = NULL; + + NV_KMALLOC(priv, sizeof(nv_dma_buf_file_private_t)); + if (priv == NULL) + { + return NULL; + } + + memset(priv, 0, sizeof(nv_dma_buf_file_private_t)); + + mutex_init(&priv->lock); + + NV_KMALLOC(priv->handles, num_handles * sizeof(priv->handles[0])); + if (priv->handles == NULL) + { + goto failed; + } + + memset(priv->handles, 0, num_handles * sizeof(priv->handles[0])); + + return priv; + +failed: + nv_dma_buf_free_file_private(priv); + + return NULL; +} + +// Must be called with RMAPI lock and GPU lock taken +static void +nv_dma_buf_undup_mem_handles_unlocked( + nvidia_stack_t *sp, + NvU32 index, + NvU32 num_objects, + nv_dma_buf_file_private_t *priv +) +{ + NvU32 i = 0; + + for (i = index; i < num_objects; i++) + { + if (priv->handles[i].h_memory == 0) + { + continue; + } + + rm_dma_buf_undup_mem_handle(sp, priv->nv, priv->h_client, + priv->handles[i].h_memory); + + priv->attached_size -= priv->handles[i].size; + priv->handles[i].h_memory = 0; + priv->handles[i].offset = 0; + priv->handles[i].size = 0; + priv->num_objects--; + } +} + +static void +nv_dma_buf_undup_mem_handles( + nvidia_stack_t *sp, + NvU32 index, + NvU32 num_objects, + nv_dma_buf_file_private_t *priv +) +{ + NV_STATUS status; + + status = rm_acquire_api_lock(sp); + if (WARN_ON(status != NV_OK)) + { + return; + } + + status = rm_acquire_all_gpus_lock(sp); + if (WARN_ON(status != NV_OK)) + { + goto unlock_api_lock; + } + + nv_dma_buf_undup_mem_handles_unlocked(sp, index, num_objects, priv); + + rm_release_all_gpus_lock(sp); + +unlock_api_lock: + rm_release_api_lock(sp); +} + +static NV_STATUS +nv_dma_buf_dup_mem_handles( + nvidia_stack_t *sp, + nv_dma_buf_file_private_t *priv, + nv_ioctl_export_to_dma_buf_fd_t *params +) +{ + NV_STATUS status = NV_OK; + NvU32 index = params->index; + NvU32 count = 0; + NvU32 i = 0; + + status = rm_acquire_api_lock(sp); + if (status != NV_OK) + { + return status; + } + + status = rm_acquire_gpu_lock(sp, priv->nv); + if (status != NV_OK) + { + goto unlock_api_lock; + } + + for (i = 0; i < params->numObjects; i++) + { + NvHandle h_memory_duped = 0; + + if (priv->handles[index].h_memory != 0) + { + status = NV_ERR_IN_USE; + goto failed; + } + + if (params->sizes[i] > priv->total_size - priv->attached_size) + { + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + status = rm_dma_buf_dup_mem_handle(sp, priv->nv, + params->hClient, + priv->h_client, + priv->h_device, + priv->h_subdevice, + priv->mig_info, + params->handles[i], + params->offsets[i], + params->sizes[i], + &h_memory_duped); + if (status != NV_OK) + { + goto failed; + } + + priv->attached_size += params->sizes[i]; + priv->handles[index].h_memory = h_memory_duped; + priv->handles[index].offset = params->offsets[i]; + priv->handles[index].size = params->sizes[i]; + priv->num_objects++; + index++; + count++; + } + + if ((priv->num_objects == priv->total_objects) && + (priv->attached_size != priv->total_size)) + { + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + rm_release_gpu_lock(sp, priv->nv); + + rm_release_api_lock(sp); + + return NV_OK; + +failed: + nv_dma_buf_undup_mem_handles_unlocked(sp, params->index, count, priv); + + rm_release_gpu_lock(sp, priv->nv); + +unlock_api_lock: + rm_release_api_lock(sp); + + return status; +} + +// Must be called with RMAPI lock and GPU lock taken +static void +nv_dma_buf_unmap_unlocked( + nvidia_stack_t *sp, + nv_dma_device_t *peer_dma_dev, + nv_dma_buf_file_private_t *priv, + struct sg_table *sgt, + NvU32 count +) +{ + NV_STATUS status; + NvU32 i; + NvU64 dma_len; + NvU64 dma_addr; + NvU64 bar1_va; + NvBool bar1_unmap_needed; + struct scatterlist *sg = NULL; + + bar1_unmap_needed = (priv->bar1_va_ref_count == 0); + + for_each_sg(sgt->sgl, sg, count, i) + { + dma_addr = sg_dma_address(sg); + dma_len = priv->handles[i].size; + bar1_va = priv->handles[i].bar1_va; + + WARN_ON(sg_dma_len(sg) != priv->handles[i].size); + + nv_dma_unmap_peer(peer_dma_dev, (dma_len / os_page_size), dma_addr); + + if (bar1_unmap_needed) + { + status = rm_dma_buf_unmap_mem_handle(sp, priv->nv, priv->h_client, + priv->handles[i].h_memory, + priv->handles[i].size, + priv->handles[i].bar1_va); + WARN_ON(status != NV_OK); + } + } +} + +static struct sg_table* +nv_dma_buf_map( + struct dma_buf_attachment *attachment, + enum dma_data_direction direction +) +{ + NV_STATUS status; + nvidia_stack_t *sp = NULL; + struct scatterlist *sg = NULL; + struct sg_table *sgt = NULL; + struct dma_buf *buf = attachment->dmabuf; + struct device *dev = attachment->dev; + nv_dma_buf_file_private_t *priv = buf->priv; + nv_dma_device_t peer_dma_dev = {{ 0 }}; + NvBool bar1_map_needed; + NvBool bar1_unmap_needed; + NvU32 count = 0; + NvU32 i = 0; + int rc = 0; + + // + // We support importers that are able to handle MMIO resources + // not backed by struct page. This will need to be revisited + // when dma-buf support for P9 will be added. + // +#if defined(NV_DMA_BUF_HAS_DYNAMIC_ATTACHMENT) && \ + defined(NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER) + if (dma_buf_attachment_is_dynamic(attachment) && + !attachment->peer2peer) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to map dynamic attachment with no P2P support\n"); + return NULL; + } +#endif + + mutex_lock(&priv->lock); + + if (priv->num_objects != priv->total_objects) + { + goto unlock_priv; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + goto unlock_priv; + } + + status = rm_acquire_api_lock(sp); + if (status != NV_OK) + { + goto free_sp; + } + + status = rm_acquire_gpu_lock(sp, priv->nv); + if (status != NV_OK) + { + goto unlock_api_lock; + } + + NV_KMALLOC(sgt, sizeof(struct sg_table)); + if (sgt == NULL) + { + goto unlock_gpu_lock; + } + + memset(sgt, 0, sizeof(struct sg_table)); + + // + // RM currently returns contiguous BAR1, so we create as many + // sg entries as the number of handles being mapped. + // When RM can alloc discontiguous BAR1, this code will need to be revisited. + // + rc = sg_alloc_table(sgt, priv->num_objects, GFP_KERNEL); + if (rc != 0) + { + goto free_sgt; + } + + peer_dma_dev.dev = dev; + peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask; + bar1_map_needed = bar1_unmap_needed = (priv->bar1_va_ref_count == 0); + + for_each_sg(sgt->sgl, sg, priv->num_objects, i) + { + NvU64 dma_addr; + NvU64 dma_len; + + if (bar1_map_needed) + { + status = rm_dma_buf_map_mem_handle(sp, priv->nv, priv->h_client, + priv->handles[i].h_memory, + priv->handles[i].offset, + priv->handles[i].size, + &priv->handles[i].bar1_va); + if (status != NV_OK) + { + goto unmap_handles; + } + } + + dma_addr = priv->handles[i].bar1_va; + dma_len = priv->handles[i].size; + + status = nv_dma_map_peer(&peer_dma_dev, priv->nv->dma_dev, + 0x1, (dma_len / os_page_size), &dma_addr); + if (status != NV_OK) + { + if (bar1_unmap_needed) + { + // Unmap the recently mapped memory handle + (void) rm_dma_buf_unmap_mem_handle(sp, priv->nv, priv->h_client, + priv->handles[i].h_memory, + priv->handles[i].size, + priv->handles[i].bar1_va); + } + + // Unmap remaining memory handles + goto unmap_handles; + } + + sg_set_page(sg, NULL, dma_len, 0); + sg_dma_address(sg) = (dma_addr_t)dma_addr; + sg_dma_len(sg) = dma_len; + count++; + } + + priv->bar1_va_ref_count++; + + rm_release_gpu_lock(sp, priv->nv); + + rm_release_api_lock(sp); + + nv_kmem_cache_free_stack(sp); + + mutex_unlock(&priv->lock); + + return sgt; + +unmap_handles: + nv_dma_buf_unmap_unlocked(sp, &peer_dma_dev, priv, sgt, count); + + sg_free_table(sgt); + +free_sgt: + NV_KFREE(sgt, sizeof(struct sg_table)); + +unlock_gpu_lock: + rm_release_gpu_lock(sp, priv->nv); + +unlock_api_lock: + rm_release_api_lock(sp); + +free_sp: + nv_kmem_cache_free_stack(sp); + +unlock_priv: + mutex_unlock(&priv->lock); + + return NULL; +} + +static void +nv_dma_buf_unmap( + struct dma_buf_attachment *attachment, + struct sg_table *sgt, + enum dma_data_direction direction +) +{ + NV_STATUS status; + struct dma_buf *buf = attachment->dmabuf; + struct device *dev = attachment->dev; + nvidia_stack_t *sp = NULL; + nv_dma_buf_file_private_t *priv = buf->priv; + nv_dma_device_t peer_dma_dev = {{ 0 }}; + int rc = 0; + + mutex_lock(&priv->lock); + + if (priv->num_objects != priv->total_objects) + { + goto unlock_priv; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (WARN_ON(rc != 0)) + { + goto unlock_priv; + } + + status = rm_acquire_api_lock(sp); + if (WARN_ON(status != NV_OK)) + { + goto free_sp; + } + + status = rm_acquire_gpu_lock(sp, priv->nv); + if (WARN_ON(status != NV_OK)) + { + goto unlock_api_lock; + } + + peer_dma_dev.dev = dev; + peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask; + + priv->bar1_va_ref_count--; + + nv_dma_buf_unmap_unlocked(sp, &peer_dma_dev, priv, sgt, priv->num_objects); + + sg_free_table(sgt); + + NV_KFREE(sgt, sizeof(struct sg_table)); + + rm_release_gpu_lock(sp, priv->nv); + +unlock_api_lock: + rm_release_api_lock(sp); + +free_sp: + nv_kmem_cache_free_stack(sp); + +unlock_priv: + mutex_unlock(&priv->lock); +} + +static void +nv_dma_buf_release( + struct dma_buf *buf +) +{ + int rc = 0; + nvidia_stack_t *sp = NULL; + nv_dma_buf_file_private_t *priv = buf->priv; + nv_state_t *nv; + + if (priv == NULL) + { + return; + } + + nv = priv->nv; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (WARN_ON(rc != 0)) + { + return; + } + + nv_dma_buf_undup_mem_handles(sp, 0, priv->num_objects, priv); + + rm_dma_buf_put_client_and_device(sp, priv->nv, priv->h_client, priv->h_device, + priv->h_subdevice, priv->mig_info); + + nv_dma_buf_free_file_private(priv); + buf->priv = NULL; + + nvidia_dev_put(nv->gpu_id, sp); + + nv_kmem_cache_free_stack(sp); + + return; +} + +static int +nv_dma_buf_mmap( + struct dma_buf *buf, + struct vm_area_struct *vma +) +{ + return -ENOTSUPP; +} + +#if defined(NV_DMA_BUF_OPS_HAS_KMAP) || \ + defined(NV_DMA_BUF_OPS_HAS_MAP) +static void* +nv_dma_buf_kmap_stub( + struct dma_buf *buf, + unsigned long page_num +) +{ + return NULL; +} + +static void +nv_dma_buf_kunmap_stub( + struct dma_buf *buf, + unsigned long page_num, + void *addr +) +{ + return; +} +#endif + +#if defined(NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC) || \ + defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC) +static void* +nv_dma_buf_kmap_atomic_stub( + struct dma_buf *buf, + unsigned long page_num +) +{ + return NULL; +} + +static void +nv_dma_buf_kunmap_atomic_stub( + struct dma_buf *buf, + unsigned long page_num, + void *addr +) +{ + return; +} +#endif + +// +// Note: Some of the dma-buf operations are mandatory in some kernels. +// So stubs are added to prevent dma_buf_export() failure. +// The actual implementations of these interfaces is not really required +// for the export operation to work. +// +// Same functions are used for kmap*/map* because of this commit: +// f9b67f0014cb: dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic +// +static const struct dma_buf_ops nv_dma_buf_ops = { + .map_dma_buf = nv_dma_buf_map, + .unmap_dma_buf = nv_dma_buf_unmap, + .release = nv_dma_buf_release, + .mmap = nv_dma_buf_mmap, +#if defined(NV_DMA_BUF_OPS_HAS_KMAP) + .kmap = nv_dma_buf_kmap_stub, + .kunmap = nv_dma_buf_kunmap_stub, +#endif +#if defined(NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC) + .kmap_atomic = nv_dma_buf_kmap_atomic_stub, + .kunmap_atomic = nv_dma_buf_kunmap_atomic_stub, +#endif +#if defined(NV_DMA_BUF_OPS_HAS_MAP) + .map = nv_dma_buf_kmap_stub, + .unmap = nv_dma_buf_kunmap_stub, +#endif +#if defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC) + .map_atomic = nv_dma_buf_kmap_atomic_stub, + .unmap_atomic = nv_dma_buf_kunmap_atomic_stub, +#endif +}; + +static NV_STATUS +nv_dma_buf_create( + nv_state_t *nv, + nv_ioctl_export_to_dma_buf_fd_t *params +) +{ + int rc = 0; + NV_STATUS status; + nvidia_stack_t *sp = NULL; + struct dma_buf *buf = NULL; + nv_dma_buf_file_private_t *priv = NULL; + NvU32 gpu_id = nv->gpu_id; + + if (!nv->dma_buf_supported) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (params->index > (params->totalObjects - params->numObjects)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + priv = nv_dma_buf_alloc_file_private(params->totalObjects); + if (priv == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate dma-buf private\n"); + return NV_ERR_NO_MEMORY; + } + + priv->total_objects = params->totalObjects; + priv->total_size = params->totalSize; + priv->nv = nv; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + status = NV_ERR_NO_MEMORY; + goto cleanup_priv; + } + + rc = nvidia_dev_get(gpu_id, sp); + if (rc != 0) + { + status = NV_ERR_OPERATING_SYSTEM; + goto cleanup_sp; + } + + status = rm_dma_buf_get_client_and_device(sp, priv->nv, + params->hClient, + &priv->h_client, + &priv->h_device, + &priv->h_subdevice, + &priv->mig_info); + if (status != NV_OK) + { + goto cleanup_device; + } + + status = nv_dma_buf_dup_mem_handles(sp, priv, params); + if (status != NV_OK) + { + goto cleanup_client_and_device; + } + +#if (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 1) + { + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &nv_dma_buf_ops; + exp_info.size = params->totalSize; + exp_info.flags = O_RDWR | O_CLOEXEC; + exp_info.priv = priv; + + buf = dma_buf_export(&exp_info); + } +#elif (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 4) + buf = dma_buf_export(priv, &nv_dma_buf_ops, + params->totalSize, O_RDWR | O_CLOEXEC); +#elif (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 5) + buf = dma_buf_export(priv, &nv_dma_buf_ops, + params->totalSize, O_RDWR | O_CLOEXEC, NULL); +#endif + + if (IS_ERR(buf)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to create dma-buf\n"); + + status = NV_ERR_OPERATING_SYSTEM; + + goto cleanup_handles; + } + + nv_kmem_cache_free_stack(sp); + + rc = dma_buf_fd(buf, O_RDWR | O_CLOEXEC); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get dma-buf file descriptor\n"); + + // + // If dma-buf is successfully created, the dup'd handles + // clean-up should be done by the release callback. + // + dma_buf_put(buf); + + return NV_ERR_OPERATING_SYSTEM; + } + + params->fd = rc; + + return NV_OK; + +cleanup_handles: + nv_dma_buf_undup_mem_handles(sp, 0, priv->num_objects, priv); + +cleanup_client_and_device: + rm_dma_buf_put_client_and_device(sp, priv->nv, priv->h_client, priv->h_device, + priv->h_subdevice, priv->mig_info); + +cleanup_device: + nvidia_dev_put(gpu_id, sp); + +cleanup_sp: + nv_kmem_cache_free_stack(sp); + +cleanup_priv: + nv_dma_buf_free_file_private(priv); + + return status; +} + +static NV_STATUS +nv_dma_buf_reuse( + nv_state_t *nv, + nv_ioctl_export_to_dma_buf_fd_t *params +) +{ + int rc = 0; + NV_STATUS status = NV_OK; + nvidia_stack_t *sp = NULL; + struct dma_buf *buf = NULL; + nv_dma_buf_file_private_t *priv = NULL; + + buf = dma_buf_get(params->fd); + if (IS_ERR(buf)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get dma-buf\n"); + return NV_ERR_OPERATING_SYSTEM; + } + + priv = buf->priv; + + if (priv == NULL) + { + status = NV_ERR_OPERATING_SYSTEM; + goto cleanup_dmabuf; + } + + rc = mutex_lock_interruptible(&priv->lock); + if (rc != 0) + { + status = NV_ERR_OPERATING_SYSTEM; + goto cleanup_dmabuf; + } + + if (params->index > (priv->total_objects - params->numObjects)) + { + status = NV_ERR_INVALID_ARGUMENT; + goto unlock_priv; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + status = NV_ERR_NO_MEMORY; + goto unlock_priv; + } + + status = nv_dma_buf_dup_mem_handles(sp, priv, params); + if (status != NV_OK) + { + goto cleanup_sp; + } + +cleanup_sp: + nv_kmem_cache_free_stack(sp); + +unlock_priv: + mutex_unlock(&priv->lock); + +cleanup_dmabuf: + dma_buf_put(buf); + + return status; +} +#endif // CONFIG_DMA_SHARED_BUFFER + +NV_STATUS +nv_dma_buf_export( + nv_state_t *nv, + nv_ioctl_export_to_dma_buf_fd_t *params +) +{ +#if defined(CONFIG_DMA_SHARED_BUFFER) + NV_STATUS status; + + if ((params == NULL) || + (params->totalSize == 0) || + (params->numObjects == 0) || + (params->totalObjects == 0) || + (params->numObjects > NV_DMABUF_EXPORT_MAX_HANDLES) || + (params->numObjects > params->totalObjects)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // If fd >= 0, dma-buf already exists with this fd, so get dma-buf from fd. + // If fd == -1, dma-buf is not created yet, so create it and then store + // additional handles. + // + if (params->fd == -1) + { + status = nv_dma_buf_create(nv, params); + } + else if (params->fd >= 0) + { + status = nv_dma_buf_reuse(nv, params); + } + else + { + status = NV_ERR_INVALID_ARGUMENT; + } + + return status; +#else + return NV_ERR_NOT_SUPPORTED; +#endif // CONFIG_DMA_SHARED_BUFFER +} + + + + + + + + + + + + + diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dsi-parse-panel-props.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dsi-parse-panel-props.c new file mode 100644 index 0000000..b2c3ade --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dsi-parse-panel-props.c @@ -0,0 +1,1017 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#include "os_dsi_panel_props.h" + +int bl_name_len; + +static DSI_CMD *dsi_parse_command +( + const struct device_node *node, + struct property *prop, + u32 n_cmd +) +{ + DSI_CMD *dsi_cmd = NULL; + DSI_CMD *temp; + __be32 *prop_val_ptr; + u32 count = 0, i = 0; + u8 arg1 = 0, arg2 = 0, arg3 = 0; + bool long_pkt = false; + + if (n_cmd == 0) + return NULL; + + if (!prop) + return NULL; + + prop_val_ptr = prop->value; + + NV_KMALLOC(dsi_cmd, sizeof(DSI_CMD) * n_cmd); + if (dsi_cmd == NULL) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI cmd memory allocation failed\n"); + return ERR_PTR(-ENOMEM); + } + + temp = dsi_cmd; + + for (count = 0; count < n_cmd; count++, temp++) { + temp->cmd_type = be32_to_cpu(*prop_val_ptr++); + temp->pdata = NULL; + if ((temp->cmd_type == DSI_PACKET_CMD) || + (temp->cmd_type == DSI_PACKET_VIDEO_VBLANK_CMD)) { + temp->data_id = be32_to_cpu(*prop_val_ptr++); + arg1 = be32_to_cpu(*prop_val_ptr++); + arg2 = be32_to_cpu(*prop_val_ptr++); + prop_val_ptr++; /* skip ecc */ + long_pkt = (temp->data_id == DSI_GENERIC_LONG_WRITE || + temp->data_id == DSI_DCS_LONG_WRITE || + temp->data_id == DSI_NULL_PKT_NO_DATA || + temp->data_id == DSI_BLANKING_PKT_NO_DATA) ? + true : false; + if (!long_pkt && (temp->cmd_type == DSI_PACKET_VIDEO_VBLANK_CMD)) + arg3 = be32_to_cpu(*prop_val_ptr++); + if (long_pkt) { + temp->sp_len_dly.data_len = (arg2 << BITS_PER_BYTE) | arg1; + NV_KMALLOC(temp->pdata, temp->sp_len_dly.data_len); + for (i = 0; i < temp->sp_len_dly.data_len; i++) + (temp->pdata)[i] = be32_to_cpu(*prop_val_ptr++); + prop_val_ptr += 2; /* skip checksum */ + } else { + temp->sp_len_dly.sp.data0 = arg1; + temp->sp_len_dly.sp.data1 = arg2; + if (temp->cmd_type == DSI_PACKET_VIDEO_VBLANK_CMD) + temp->club_cmd = (bool)arg3; + } + } else if (temp->cmd_type == DSI_DELAY_MS) { + temp->sp_len_dly.delay_ms = be32_to_cpu(*prop_val_ptr++); + } else if (temp->cmd_type == DSI_DELAY_US) { + temp->sp_len_dly.delay_us = be32_to_cpu(*prop_val_ptr++); + } else if (temp->cmd_type == DSI_SEND_FRAME) { + temp->sp_len_dly.frame_cnt = be32_to_cpu(*prop_val_ptr++); + } else if (temp->cmd_type == DSI_GPIO_SET) { + temp->sp_len_dly.gpio = be32_to_cpu(*prop_val_ptr++); + temp->data_id = be32_to_cpu(*prop_val_ptr++); + } + } + + return dsi_cmd; +} + +static const u32 *dsi_parse_pkt_seq +( + struct device_node *node, + struct property *prop +) +{ + __be32 *prop_val_ptr; + u32 *pkt_seq; + int line, i; + +#define LINE_STOP 0xff + + if (!prop) + return NULL; + + NV_KMALLOC(pkt_seq, (sizeof(u32) * NUMOF_PKT_SEQ)); + if (pkt_seq == NULL) { + nv_printf(NV_DBG_ERRORS, "NVRM: Memory allocation for DSI pkt sequence failed\n"); + return ERR_PTR(-ENOMEM); + } + + prop_val_ptr = prop->value; + + for (line = 0; line < NUMOF_PKT_SEQ; line += 2) { + /* compute line value from dt line */ + for (i = 0;; i += 2) { + u32 cmd = be32_to_cpu(*prop_val_ptr++); + if (cmd == LINE_STOP) + break; + else if (cmd == PKT_LP) + pkt_seq[line] |= PKT_LP; + else { + u32 len = be32_to_cpu(*prop_val_ptr++); + if (i == 0) /* PKT_ID0 */ + pkt_seq[line] |= + PKT_ID0(cmd) | PKT_LEN0(len); + if (i == 2) /* PKT_ID1 */ + pkt_seq[line] |= + PKT_ID1(cmd) | PKT_LEN1(len); + if (i == 4) /* PKT_ID2 */ + pkt_seq[line] |= + PKT_ID2(cmd) | PKT_LEN2(len); + if (i == 6) /* PKT_ID3 */ + pkt_seq[line + 1] |= + PKT_ID3(cmd) | PKT_LEN3(len); + if (i == 8) /* PKT_ID4 */ + pkt_seq[line + 1] |= + PKT_ID4(cmd) | PKT_LEN4(len); + if (i == 10) /* PKT_ID5 */ + pkt_seq[line + 1] |= + PKT_ID5(cmd) | PKT_LEN5(len); + } + } + } + +#undef LINE_STOP + + return pkt_seq; +} + +static int dsi_get_panel_timings(struct device_node *np_panel, DSI_PANEL_INFO *panelInfo) +{ + struct device_node *np = NULL; + NvU32 temp; + DSITIMINGS *modes = &panelInfo->dsiTimings; + + // Get Panel Node from active-panel phandle + np = of_parse_phandle(np_panel, "nvidia,panel-timings", 0); + if (!np) { + nv_printf(NV_DBG_ERRORS, "NVRM: could not find panel timings node for DSI Panel\n"); + return -ENOENT; + } + + if (!of_property_read_u32(np, "clock-frequency", &temp)) { + modes->pixelClkRate = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "hsync-len", &temp)) { + modes->hSyncWidth = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "vsync-len", &temp)) { + modes->vSyncWidth = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "hback-porch", &temp)) { + modes->hBackPorch = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "vback-porch", &temp)) { + modes->vBackPorch = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "hactive", &temp)) { + modes->hActive = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "vactive", &temp)) { + modes->vActive = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "hfront-porch", &temp)) { + modes->hFrontPorch = temp; + } else { + goto parse_mode_timings_fail; + } + if (!of_property_read_u32(np, "vfront-porch", &temp)) { + modes->vFrontPorch = temp; + } else { + goto parse_mode_timings_fail; + } + + of_node_put(np); + return 0U; + +parse_mode_timings_fail: + nv_printf(NV_DBG_ERRORS, "NVRM: One of the mode timings is missing in DSI Panel mode-timings!\n"); + of_node_put(np); + return -ENOENT; +} + +static int dsi_get_panel_gpio(struct device_node *node, DSI_PANEL_INFO *panel) +{ + int count; + char *label = NULL; + + // If gpios are already populated, just return + if (panel->panel_gpio_populated) + return 0; + + if (!node) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI Panel node not available\n"); + return -ENOENT; + } + +#if defined(NV_OF_GET_NAME_GPIO_PRESENT) + panel->panel_gpio[DSI_GPIO_LCD_RESET] = + of_get_named_gpio(node, "nvidia,panel-rst-gpio", 0); + + panel->panel_gpio[DSI_GPIO_PANEL_EN] = + of_get_named_gpio(node, "nvidia,panel-en-gpio", 0); + + panel->panel_gpio[DSI_GPIO_PANEL_EN_1] = + of_get_named_gpio(node, "nvidia,panel-en-1-gpio", 0); + + panel->panel_gpio[DSI_GPIO_BL_ENABLE] = + of_get_named_gpio(node, "nvidia,panel-bl-en-gpio", 0); + + panel->panel_gpio[DSI_GPIO_BL_PWM] = + of_get_named_gpio(node, "nvidia,panel-bl-pwm-gpio", 0); + + panel->panel_gpio[DSI_GPIO_TE] = + of_get_named_gpio(node, "nvidia,te-gpio", 0); + + panel->panel_gpio[DSI_GPIO_AVDD_AVEE_EN] = + of_get_named_gpio(node, "nvidia,avdd-avee-en-gpio", 0); + + panel->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN] = + of_get_named_gpio(node, "nvidia,vdd-1v8-lcd-en-gpio", 0); + + panel->panel_gpio[DSI_GPIO_BRIDGE_EN_0] = + of_get_named_gpio(node, "nvidia,panel-bridge-en-0-gpio", 0); + + panel->panel_gpio[DSI_GPIO_BRIDGE_EN_1] = + of_get_named_gpio(node, "nvidia,panel-bridge-en-1-gpio", 0); + + panel->panel_gpio[DSI_GPIO_BRIDGE_REFCLK_EN] = + of_get_named_gpio(node, "nvidia,panel-bridge-refclk-en-gpio", 0); + + + for (count = 0; count < DSI_N_GPIO_PANEL; count++) { + if (gpio_is_valid(panel->panel_gpio[count])) { + switch (count) { + case DSI_GPIO_LCD_RESET: + label = "dsi-panel-reset"; + break; + case DSI_GPIO_PANEL_EN: + label = "dsi-panel-en"; + break; + case DSI_GPIO_PANEL_EN_1: + label = "dsi-panel-en-1"; + break; + case DSI_GPIO_BL_ENABLE: + label = "dsi-panel-bl-enable"; + break; + case DSI_GPIO_BL_PWM: + label = "dsi-panel-pwm"; + break; + case DSI_GPIO_TE: + if (panel->dsiEnVRR != NV_TRUE) { + panel->panel_gpio[count] = -1; + } else { + label = "dsi-panel-te"; + panel->dsiVrrPanelSupportsTe = NV_TRUE; + } + break; + case DSI_GPIO_AVDD_AVEE_EN: + label = "dsi-panel-avdd-avee-en"; + break; + case DSI_GPIO_VDD_1V8_LCD_EN: + label = "dsi-panel-vdd-1v8-lcd-en"; + break; + case DSI_GPIO_BRIDGE_EN_0: + label = "dsi-panel-bridge-en-0"; + break; + case DSI_GPIO_BRIDGE_EN_1: + label = "dsi-panel-bridge-en-1"; + break; + case DSI_GPIO_BRIDGE_REFCLK_EN: + label = "dsi-panel-bridge-refclk-en"; + break; + default: + nv_printf(NV_DBG_INFO, "NVRM: DSI Panel invalid gpio entry at index %d\n", count); + } + if (label) { + gpio_request(panel->panel_gpio[count], label); + label = NULL; + } + } + } + + panel->panel_gpio_populated = true; + return 0U; +#else + return -EINVAL; +#endif +} + +static int parse_dsi_properties(const struct device_node *np_dsi, DSI_PANEL_INFO *dsi) +{ + u32 temp; + int ret = 0; + const __be32 *p; + struct property *prop; + struct device_node *np_dsi_panel; + + // Get Panel Node from active-panel phandle + np_dsi_panel = of_parse_phandle(np_dsi, "nvidia,active-panel", 0); + if (np_dsi_panel == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: None of the dsi panel nodes enabled in DT!\n"); + return -EINVAL; + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,enable-hs-clk-in-lp-mode", &temp)) + dsi->enable_hs_clock_on_lp_cmd_mode = (u8)temp; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,set-max-dsi-timeout")) + dsi->set_max_timeout = true; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,use-legacy-dphy-core")) + dsi->use_legacy_dphy_core = true; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-refresh-rate-adj", &temp)) + dsi->refresh_rate_adj = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-n-data-lanes", &temp)) + dsi->n_data_lanes = (u8)temp; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,swap-data-lane-polarity")) + dsi->swap_data_lane_polarity = true; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,swap-clock-lane-polarity")) + dsi->swap_clock_lane_polarity = true; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,reverse-clock-polarity")) + dsi->reverse_clock_polarity = true; + + if (!of_property_read_u32_array(np_dsi_panel, + "nvidia,lane-xbar-ctrl", + dsi->lane_xbar_ctrl, dsi->n_data_lanes)) + dsi->lane_xbar_exists = true; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-type", &temp)) + { + dsi->dsiPhyType = (u8)temp; + if ((temp != DSI_DPHY) && + (temp != DSI_CPHY)) + { + nv_printf(NV_DBG_ERRORS,"NVRM: invalid dsi phy type 0x%x\n", temp); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-video-burst-mode", &temp)) + { + dsi->video_burst_mode = (u8)temp; + if ((temp != DSI_VIDEO_NON_BURST_MODE) && + (temp != DSI_VIDEO_NON_BURST_MODE_WITH_SYNC_END) && + (temp != DSI_VIDEO_BURST_MODE_LOWEST_SPEED) && + (temp != DSI_VIDEO_BURST_MODE_LOW_SPEED) && + (temp != DSI_VIDEO_BURST_MODE_MEDIUM_SPEED) && + (temp != DSI_VIDEO_BURST_MODE_FAST_SPEED) && + (temp != DSI_VIDEO_BURST_MODE_FASTEST_SPEED)) + { + nv_printf(NV_DBG_ERRORS,"NVRM: invalid dsi video burst mode\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + } + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-pixel-format", &temp)) + { + dsi->pixel_format = (u8)temp; + if ((temp != DSI_PIXEL_FORMAT_16BIT_P) && + (temp != DSI_PIXEL_FORMAT_18BIT_P) && + (temp != DSI_PIXEL_FORMAT_18BIT_NP) && + (temp != DSI_PIXEL_FORMAT_24BIT_P) && + (temp != DSI_PIXEL_FORMAT_30BIT_P) && + (temp != DSI_PIXEL_FORMAT_36BIT_P) && + (temp != DSI_PIXEL_FORMAT_8BIT_DSC) && + (temp != DSI_PIXEL_FORMAT_10BIT_DSC) && + (temp != DSI_PIXEL_FORMAT_12BIT_DSC) && + (temp != DSI_PIXEL_FORMAT_16BIT_DSC)) + { + nv_printf(NV_DBG_ERRORS,"NVRM: invalid dsi pixel format\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + } + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-refresh-rate", &temp)) + dsi->refresh_rate = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-rated-refresh-rate", &temp)) + dsi->rated_refresh_rate = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-virtual-channel", &temp)) + { + dsi->virtual_channel = (u8)temp; + if ((temp != DSI_VIRTUAL_CHANNEL_0) && + (temp != DSI_VIRTUAL_CHANNEL_1) && + (temp != DSI_VIRTUAL_CHANNEL_2) && + (temp != DSI_VIRTUAL_CHANNEL_3)) + { + nv_printf(NV_DBG_ERRORS,"NVRM: invalid dsi virtual channel\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + } + + if (!of_property_read_u32(np_dsi_panel, "nvidia,dsi-instance", &temp)) + dsi->dsi_instance = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-panel-reset", &temp)) + dsi->panel_reset = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-te-polarity-low", &temp)) + dsi->te_polarity_low = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-lp00-pre-panel-wakeup", &temp)) + dsi->lp00_pre_panel_wakeup = (u8)temp; + + if (of_find_property(np_dsi_panel, + "nvidia,dsi-bl-name", &bl_name_len)) + { + NV_KMALLOC(dsi->bl_name, sizeof(u8) * bl_name_len); + if (!of_property_read_string(np_dsi_panel, + "nvidia,dsi-bl-name", + (const char **)&dsi->bl_name)) { + } else { + nv_printf(NV_DBG_ERRORS, "NVRM: dsi error parsing bl name\n"); + NV_KFREE(dsi->bl_name, sizeof(u8) * bl_name_len); + } + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-ganged-type", &temp)) { + dsi->ganged_type = (u8)temp; + /* Set pixel width to 1 by default for even-odd split */ + if (dsi->ganged_type == DSI_GANGED_SYMMETRIC_EVEN_ODD) + dsi->even_odd_split_width = 1; + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-even-odd-pixel-width", &temp)) + dsi->even_odd_split_width = temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-ganged-overlap", &temp)) { + dsi->ganged_overlap = (u16)temp; + if (!dsi->ganged_type) + nv_printf(NV_DBG_ERRORS, "NVRM: specified ganged overlap, but no ganged type\n"); + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-ganged-swap-links", &temp)) { + dsi->ganged_swap_links = (bool)temp; + if (!dsi->ganged_type) + nv_printf(NV_DBG_ERRORS, "NVRM: specified ganged swapped links, but no ganged type\n"); + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-ganged-write-to-all-links", &temp)) { + dsi->ganged_write_to_all_links = (bool)temp; + if (!dsi->ganged_type) + nv_printf(NV_DBG_ERRORS, "NVRM: specified ganged write to all links, but no ganged type\n"); + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-split-link-type", &temp)) + dsi->split_link_type = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-suspend-aggr", &temp)) + dsi->suspend_aggr = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-edp-bridge", &temp)) + dsi->dsi2edp_bridge_enable = (bool)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-lvds-bridge", &temp)) + dsi->dsi2lvds_bridge_enable = (bool)temp; + + of_property_for_each_u32(np_dsi_panel, "nvidia,dsi-dpd-pads", prop, p, temp) + dsi->dpd_dsi_pads |= (u32)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-power-saving-suspend", &temp)) + dsi->power_saving_suspend = (bool)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-ulpm-not-support", &temp)) + dsi->ulpm_not_supported = (bool)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-video-data-type", &temp)) { + dsi->video_data_type = (u8)temp; + if ((temp != DSI_VIDEO_TYPE_VIDEO_MODE) && + (temp != DSI_VIDEO_TYPE_COMMAND_MODE)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: invalid dsi video data type\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-video-clock-mode", &temp)) { + dsi->video_clock_mode = (u8)temp; + if ((temp != DSI_VIDEO_CLOCK_CONTINUOUS) && + (temp != DSI_VIDEO_CLOCK_TX_ONLY)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: invalid dsi video clk mode\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,enable-vrr", &temp)) + dsi->dsiEnVRR = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,vrr-force-set-te-pin", &temp)) + dsi->dsiForceSetTePin = (u8)temp; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,send-init-cmds-early")) + dsi->sendInitCmdsEarly = true; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-n-init-cmd", &temp)) { + dsi->n_init_cmd = (u16)temp; + } + dsi->dsi_init_cmd = + dsi_parse_command(np_dsi_panel, + of_find_property(np_dsi_panel, + "nvidia,dsi-init-cmd", NULL), + dsi->n_init_cmd); + if (dsi->n_init_cmd && + IS_ERR_OR_NULL(dsi->dsi_init_cmd)) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI init cmd parsing from DT failed\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + }; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-n-postvideo-cmd", &temp)) { + dsi->n_postvideo_cmd = (u16)temp; + } + dsi->dsi_postvideo_cmd = + dsi_parse_command(np_dsi_panel, + of_find_property(np_dsi_panel, + "nvidia,dsi-postvideo-cmd", NULL), + dsi->n_postvideo_cmd); + if (dsi->n_postvideo_cmd && + IS_ERR_OR_NULL(dsi->dsi_postvideo_cmd)) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI postvideo cmd parsing from DT failed\n"); + goto parse_dsi_settings_fail; + }; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-n-suspend-cmd", &temp)) { + dsi->n_suspend_cmd = (u16)temp; + } + dsi->dsi_suspend_cmd = + dsi_parse_command(np_dsi_panel, + of_find_property(np_dsi_panel, + "nvidia,dsi-suspend-cmd", NULL), + dsi->n_suspend_cmd); + if (dsi->n_suspend_cmd && + IS_ERR_OR_NULL(dsi->dsi_suspend_cmd)) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI suspend cmd parsing from DT failed\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + }; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-n-early-suspend-cmd", &temp)) { + dsi->n_early_suspend_cmd = (u16)temp; + } + dsi->dsi_early_suspend_cmd = + dsi_parse_command(np_dsi_panel, + of_find_property(np_dsi_panel, + "nvidia,dsi-early-suspend-cmd", NULL), + dsi->n_early_suspend_cmd); + if (dsi->n_early_suspend_cmd && + IS_ERR_OR_NULL(dsi->dsi_early_suspend_cmd)) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI early suspend cmd parsing from DT failed\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + }; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-suspend-stop-stream-late", &temp)) { + dsi->suspend_stop_stream_late = (bool)temp; + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-n-late-resume-cmd", &temp)) { + dsi->n_late_resume_cmd = (u16)temp; + } + dsi->dsi_late_resume_cmd = + dsi_parse_command(np_dsi_panel, + of_find_property(np_dsi_panel, + "nvidia,dsi-late-resume-cmd", NULL), + dsi->n_late_resume_cmd); + if (dsi->n_late_resume_cmd && + IS_ERR_OR_NULL(dsi->dsi_late_resume_cmd)) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI late resume cmd parsing from DT failed\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + }; + + dsi->pktSeq = dsi_parse_pkt_seq(np_dsi_panel, + of_find_property(np_dsi_panel, + "nvidia,dsi-pkt-seq", NULL)); + if (IS_ERR(dsi->pktSeq)) { + nv_printf(NV_DBG_ERRORS, "NVRM: DSI packet seq parsing from DT fail\n"); + ret = -EINVAL; + goto parse_dsi_settings_fail; + } + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-hsdexit", &temp)) + dsi->phyTimingNs.t_hsdexit_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-hstrail", &temp)) + dsi->phyTimingNs.t_hstrail_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-datzero", &temp)) + dsi->phyTimingNs.t_datzero_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-hsprepare", &temp)) + dsi->phyTimingNs.t_hsprepare_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-hsprebegin", &temp)) + dsi->phyTimingNs.t_hsprebegin_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-hspost", &temp)) + dsi->phyTimingNs.t_hspost_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-clktrail", &temp)) + dsi->phyTimingNs.t_clktrail_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-clkpost", &temp)) + dsi->phyTimingNs.t_clkpost_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-clkzero", &temp)) + dsi->phyTimingNs.t_clkzero_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-tlpx", &temp)) + dsi->phyTimingNs.t_tlpx_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-clkprepare", &temp)) + dsi->phyTimingNs.t_clkprepare_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-clkpre", &temp)) + dsi->phyTimingNs.t_clkpre_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-wakeup", &temp)) + dsi->phyTimingNs.t_wakeup_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-taget", &temp)) + dsi->phyTimingNs.t_taget_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-tasure", &temp)) + dsi->phyTimingNs.t_tasure_ns = (u16)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,dsi-phy-tago", &temp)) + dsi->phyTimingNs.t_tago_ns = (u16)temp; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,enable-link-compression")) + dsi->dsiDscEnable = true; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,enable-dual-dsc")) + dsi->dsiDscEnDualDsc = true; + + if (of_property_read_bool(np_dsi_panel, + "nvidia,enable-block-pred")) + dsi->dsiDscEnBlockPrediction = true; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,slice-height", &temp)) + dsi->dsiDscSliceHeight = (u32)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,num-of-slices", &temp)) + dsi->dsiDscNumSlices = (u8)temp; + + if (!of_property_read_u32(np_dsi_panel, + "nvidia,comp-rate", &temp)) + dsi->dsiDscBpp = (u8)temp; + + if (of_property_read_bool(np_dsi, "nvidia,dsi-csi-loopback")) + dsi->dsi_csi_loopback = 1; + + ret = dsi_get_panel_timings(np_dsi_panel, dsi); + if (ret != NV_OK) { + nv_printf(NV_DBG_ERRORS, "NVRM: Parsing DSI Panel Timings failed\n"); + goto parse_dsi_settings_fail; + } + + ret = dsi_get_panel_gpio(np_dsi_panel, dsi); + if (ret != NV_OK) { + nv_printf(NV_DBG_ERRORS, "NVRM: Parsing DSI Panel GPIOs failed\n"); + goto parse_dsi_settings_fail; + } + +parse_dsi_settings_fail: + return ret; +} + +NvBool +nv_dsi_is_panel_connected +( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device_node *np_dsi = NULL; + struct device_node *np_dsi_panel = NULL; + NvBool ret = NV_TRUE; + + np_dsi = of_get_child_by_name(nvl->dev->of_node, "dsi"); + + if (np_dsi && !of_device_is_available(np_dsi)) { + ret = NV_FALSE; + goto fail; + } + + np_dsi_panel = of_parse_phandle(np_dsi, "nvidia,active-panel", 0); + if (np_dsi_panel == NULL) + { + ret = NV_FALSE; + } + +fail: + of_node_put(np_dsi_panel); + of_node_put(np_dsi); + return ret; +} + +NV_STATUS +nv_dsi_parse_panel_props +( + nv_state_t *nv, + void *dsiPanelInfo +) +{ + int ret = NV_OK; + struct device_node *np_dsi = NULL; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + np_dsi = of_get_child_by_name(nvl->dev->of_node, "dsi"); + + if (np_dsi && !of_device_is_available(np_dsi)) { + nv_printf(NV_DBG_ERRORS, "NVRM: dsi node not enabled in DT\n"); + of_node_put(np_dsi); + return NV_ERR_NOT_SUPPORTED; + } + + ret = parse_dsi_properties(np_dsi, (DSI_PANEL_INFO *)dsiPanelInfo); + + return ret; +} + +NV_STATUS +nv_dsi_panel_enable +( + nv_state_t *nv, + void *dsiPanelInfo +) +{ + int ret = NV_OK; + DSI_PANEL_INFO *panelInfo = dsiPanelInfo; + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN], 1); + } + + mdelay(10); //Required 1ms delay + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_AVDD_AVEE_EN])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_AVDD_AVEE_EN], 1); + } + + mdelay(20); //Required 10ms delay + + // If backlight enable gpio is specified, set it to output direction and pull high + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_BL_ENABLE])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_BL_ENABLE], 1); + } + + mdelay(10); + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_PANEL_EN])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_PANEL_EN], 1); + } + + mdelay(20); // Requied 10ms + + return ret; +} + +NV_STATUS +nv_dsi_panel_reset +( + nv_state_t *nv, + void *dsiPanelInfo +) +{ + int ret = NV_OK; + int en_panel_rst = -1; + DSI_PANEL_INFO *panelInfo = dsiPanelInfo; + + // Assert and deassert Panel reset GPIO + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_LCD_RESET])) { + en_panel_rst = panelInfo->panel_gpio[DSI_GPIO_LCD_RESET]; + } else { + nv_printf(NV_DBG_ERRORS, "DSI Panel reset gpio invalid\n"); + goto fail; + } + + ret = gpio_direction_output(en_panel_rst, 1); + if (ret < 0) { + nv_printf(NV_DBG_ERRORS, "Deasserting DSI panel reset gpio failed\n"); + goto fail; + } + + mdelay(10); + + ret = gpio_direction_output(en_panel_rst, 0); + if (ret < 0) { + nv_printf(NV_DBG_ERRORS, "Asserting DSI panel reset gpio failed\n"); + goto fail; + } + + mdelay(10); + + ret = gpio_direction_output(en_panel_rst, 1); + if (ret < 0) { + nv_printf(NV_DBG_ERRORS, "Deasserting Dsi panel reset gpio after asserting failed\n"); + goto fail; + } + +fail: + return ret; +} + +void nv_dsi_panel_disable +( + nv_state_t *nv, + void *dsiPanelInfo +) +{ + DSI_PANEL_INFO *panelInfo = dsiPanelInfo; + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_BL_ENABLE])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_BL_ENABLE], 0); + } + + mdelay(10); + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_PANEL_EN])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_PANEL_EN], 0); + } + + // Assert Panel reset GPIO + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_LCD_RESET])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_LCD_RESET], 0); + } + + mdelay(20); + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_AVDD_AVEE_EN])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_AVDD_AVEE_EN], 0); + } + + mdelay(10); + + if (gpio_is_valid(panelInfo->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN])) { + gpio_direction_output(panelInfo->panel_gpio[DSI_GPIO_VDD_1V8_LCD_EN], 0); + } +} + +static void dsi_panel_command_cleanup +( + DSI_CMD *dsi_cmd, + u32 n_cmd +) +{ + int i; + DSI_CMD *temp = dsi_cmd; + + if (dsi_cmd == NULL) + return; + + for (i = 0; i < n_cmd; i++, temp++) { + if (temp->pdata != NULL) + NV_KFREE(temp->pdata, temp->sp_len_dly.data_len); + } + + NV_KFREE(dsi_cmd, sizeof(DSI_CMD) * n_cmd); +} + +void nv_dsi_panel_cleanup +( + nv_state_t *nv, + void *dsiPanelInfo +) +{ + int count; + DSI_PANEL_INFO *panelInfo = dsiPanelInfo; + + dsi_panel_command_cleanup(panelInfo->dsi_init_cmd, panelInfo->n_init_cmd); + + dsi_panel_command_cleanup(panelInfo->dsi_postvideo_cmd, panelInfo->n_postvideo_cmd); + + dsi_panel_command_cleanup(panelInfo->dsi_suspend_cmd, panelInfo->n_suspend_cmd); + + dsi_panel_command_cleanup(panelInfo->dsi_early_suspend_cmd, panelInfo->n_early_suspend_cmd); + + dsi_panel_command_cleanup(panelInfo->dsi_late_resume_cmd, panelInfo->n_late_resume_cmd); + + if (panelInfo->pktSeq != NULL) { + NV_KFREE((u32 *)panelInfo->pktSeq, sizeof(u32) * NUMOF_PKT_SEQ); + } + + if (panelInfo->bl_name != NULL) { + NV_KFREE(panelInfo->bl_name, sizeof(u8) * bl_name_len); + } + + for (count = 0; count < DSI_N_GPIO_PANEL; count++) { + if (gpio_is_valid(panelInfo->panel_gpio[count])) { + gpio_free(panelInfo->panel_gpio[count]); + } + } + panelInfo->panel_gpio_populated = false; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.c new file mode 100644 index 0000000..3aa684e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.c @@ -0,0 +1,412 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-reg.h" +#include "nv-frontend.h" + +#if defined(MODULE_LICENSE) + +MODULE_LICENSE("Dual MIT/GPL"); + + + +#endif +#if defined(MODULE_INFO) +MODULE_INFO(supported, "external"); +#endif +#if defined(MODULE_VERSION) +MODULE_VERSION(NV_VERSION_STRING); +#endif + +#ifdef MODULE_ALIAS_CHARDEV_MAJOR +MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER); +#endif + +/* + * MODULE_IMPORT_NS() is added by commit id 8651ec01daeda + * ("module: add support for symbol namespaces") in 5.4 + */ +#if defined(MODULE_IMPORT_NS) + + +/* + * DMA_BUF namespace is added by commit id 16b0314aa746 + * ("dma-buf: move dma-buf symbols into the DMA_BUF module namespace") in 5.16 + */ +MODULE_IMPORT_NS(DMA_BUF); + + +#endif + +static NvU32 nv_num_instances; + +// lock required to protect table. +struct semaphore nv_module_table_lock; + +// minor number table +nvidia_module_t *nv_minor_num_table[NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX + 1]; + +int nvidia_init_module(void); +void nvidia_exit_module(void); + +/* EXPORTS to Linux Kernel */ + +int nvidia_frontend_open(struct inode *, struct file *); +int nvidia_frontend_close(struct inode *, struct file *); +unsigned int nvidia_frontend_poll(struct file *, poll_table *); +int nvidia_frontend_ioctl(struct inode *, struct file *, unsigned int, unsigned long); +long nvidia_frontend_unlocked_ioctl(struct file *, unsigned int, unsigned long); +long nvidia_frontend_compat_ioctl(struct file *, unsigned int, unsigned long); +int nvidia_frontend_mmap(struct file *, struct vm_area_struct *); + +/* character driver entry points */ +static struct file_operations nv_frontend_fops = { + .owner = THIS_MODULE, + .poll = nvidia_frontend_poll, +#if defined(NV_FILE_OPERATIONS_HAS_IOCTL) + .ioctl = nvidia_frontend_ioctl, +#endif + .unlocked_ioctl = nvidia_frontend_unlocked_ioctl, +#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64 + .compat_ioctl = nvidia_frontend_compat_ioctl, +#endif + .mmap = nvidia_frontend_mmap, + .open = nvidia_frontend_open, + .release = nvidia_frontend_close, +}; + +/* Helper functions */ + +static int add_device(nvidia_module_t *module, nv_linux_state_t *device, NvBool all) +{ + NvU32 i; + int rc = -1; + + // look for free a minor number and assign unique minor number to this device + for (i = 0; i <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN; i++) + { + if (nv_minor_num_table[i] == NULL) + { + nv_minor_num_table[i] = module; + device->minor_num = i; + if (all == NV_TRUE) + { + device = device->next; + if (device == NULL) + { + rc = 0; + break; + } + } + else + { + rc = 0; + break; + } + } + } + return rc; +} + +static int remove_device(nvidia_module_t *module, nv_linux_state_t *device) +{ + int rc = -1; + + // remove this device from minor_number table + if ((device != NULL) && (nv_minor_num_table[device->minor_num] != NULL)) + { + nv_minor_num_table[device->minor_num] = NULL; + device->minor_num = 0; + rc = 0; + } + return rc; +} + +/* Export functions */ + +int nvidia_register_module(nvidia_module_t *module) +{ + int rc = 0; + NvU32 ctrl_minor_num; + + down(&nv_module_table_lock); + if (module->instance >= NV_MAX_MODULE_INSTANCES) + { + printk("NVRM: NVIDIA module instance %d registration failed.\n", + module->instance); + rc = -EINVAL; + goto done; + } + + ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance; + nv_minor_num_table[ctrl_minor_num] = module; + nv_num_instances++; +done: + up(&nv_module_table_lock); + + return rc; +} +EXPORT_SYMBOL(nvidia_register_module); + +int nvidia_unregister_module(nvidia_module_t *module) +{ + int rc = 0; + NvU32 ctrl_minor_num; + + down(&nv_module_table_lock); + + ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance; + if (nv_minor_num_table[ctrl_minor_num] == NULL) + { + printk("NVRM: NVIDIA module for %d instance does not exist\n", + module->instance); + rc = -1; + } + else + { + nv_minor_num_table[ctrl_minor_num] = NULL; + nv_num_instances--; + } + + up(&nv_module_table_lock); + + return rc; +} +EXPORT_SYMBOL(nvidia_unregister_module); + +int nvidia_frontend_add_device(nvidia_module_t *module, nv_linux_state_t * device) +{ + int rc = -1; + NvU32 ctrl_minor_num; + + down(&nv_module_table_lock); + ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance; + if (nv_minor_num_table[ctrl_minor_num] == NULL) + { + printk("NVRM: NVIDIA module for %d instance does not exist\n", + module->instance); + rc = -1; + } + else + { + rc = add_device(module, device, NV_FALSE); + } + up(&nv_module_table_lock); + + return rc; +} +EXPORT_SYMBOL(nvidia_frontend_add_device); + +int nvidia_frontend_remove_device(nvidia_module_t *module, nv_linux_state_t * device) +{ + int rc = 0; + NvU32 ctrl_minor_num; + + down(&nv_module_table_lock); + ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance; + if (nv_minor_num_table[ctrl_minor_num] == NULL) + { + printk("NVRM: NVIDIA module for %d instance does not exist\n", + module->instance); + rc = -1; + } + else + { + rc = remove_device(module, device); + } + up(&nv_module_table_lock); + + return rc; +} +EXPORT_SYMBOL(nvidia_frontend_remove_device); + +int nvidia_frontend_open( + struct inode *inode, + struct file *file +) +{ + int rc = -ENODEV; + nvidia_module_t *module = NULL; + + NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode); + + down(&nv_module_table_lock); + module = nv_minor_num_table[minor_num]; + + if ((module != NULL) && (module->open != NULL)) + { + // Increment the reference count of module to ensure that module does + // not get unloaded if its corresponding device file is open, for + // example nvidiaN.ko should not get unloaded if /dev/nvidiaN is open. + if (!try_module_get(module->owner)) + { + up(&nv_module_table_lock); + return -ENODEV; + } + rc = module->open(inode, file); + if (rc < 0) + { + module_put(module->owner); + } + } + + up(&nv_module_table_lock); + return rc; +} + +int nvidia_frontend_close( + struct inode *inode, + struct file *file +) +{ + int rc = -ENODEV; + nvidia_module_t *module = NULL; + + NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode); + + module = nv_minor_num_table[minor_num]; + + if ((module != NULL) && (module->close != NULL)) + { + rc = module->close(inode, file); + + // Decrement the reference count of module. + module_put(module->owner); + } + + return rc; +} + +unsigned int nvidia_frontend_poll( + struct file *file, + poll_table *wait +) +{ + unsigned int mask = 0; + struct inode *inode = NV_FILE_INODE(file); + NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode); + nvidia_module_t *module = nv_minor_num_table[minor_num]; + + if ((module != NULL) && (module->poll != NULL)) + mask = module->poll(file, wait); + + return mask; +} + +int nvidia_frontend_ioctl( + struct inode *inode, + struct file *file, + unsigned int cmd, + unsigned long i_arg) +{ + int rc = -ENODEV; + nvidia_module_t *module = NULL; + + NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode); + module = nv_minor_num_table[minor_num]; + + if ((module != NULL) && (module->ioctl != NULL)) + rc = module->ioctl(inode, file, cmd, i_arg); + + return rc; +} + +long nvidia_frontend_unlocked_ioctl( + struct file *file, + unsigned int cmd, + unsigned long i_arg +) +{ + return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg); +} + +long nvidia_frontend_compat_ioctl( + struct file *file, + unsigned int cmd, + unsigned long i_arg +) +{ + return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg); +} + +int nvidia_frontend_mmap( + struct file *file, + struct vm_area_struct *vma +) +{ + int rc = -ENODEV; + struct inode *inode = NV_FILE_INODE(file); + NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode); + nvidia_module_t *module = nv_minor_num_table[minor_num]; + + if ((module != NULL) && (module->mmap != NULL)) + rc = module->mmap(file, vma); + + return rc; +} + +static int __init nvidia_frontend_init_module(void) +{ + int status = 0; + + // initialise nvidia module table; + nv_num_instances = 0; + memset(nv_minor_num_table, 0, sizeof(nv_minor_num_table)); + NV_INIT_MUTEX(&nv_module_table_lock); + + status = nvidia_init_module(); + if (status < 0) + { + return status; + } + + // register char device + status = register_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend", &nv_frontend_fops); + if (status < 0) + { + printk("NVRM: register_chrdev() failed!\n"); + nvidia_exit_module(); + } + + return status; +} + +static void __exit nvidia_frontend_exit_module(void) +{ + /* + * If this is the last nvidia_module to be unregistered, cleanup and + * unregister char dev + */ + if (nv_num_instances == 1) + { + unregister_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend"); + } + + nvidia_exit_module(); +} + +module_init(nvidia_frontend_init_module); +module_exit(nvidia_frontend_exit_module); + diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.h new file mode 100644 index 0000000..1ce72a0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_FRONTEND_H_ +#define _NV_FRONTEND_H_ + +#include "nvtypes.h" +#include "nv-linux.h" +#include "nv-register-module.h" + +#define NV_MAX_MODULE_INSTANCES 8 + +#define NV_FRONTEND_MINOR_NUMBER(x) minor((x)->i_rdev) + +#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX 255 +#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN (NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - \ + NV_MAX_MODULE_INSTANCES) + +#define NV_FRONTEND_IS_CONTROL_DEVICE(x) ((x <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX) && \ + (x > NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN)) + +int nvidia_frontend_add_device(nvidia_module_t *, nv_linux_state_t *); +int nvidia_frontend_remove_device(nvidia_module_t *, nv_linux_state_t *); + +extern nvidia_module_t *nv_minor_num_table[]; + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-gpio.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-gpio.c new file mode 100644 index 0000000..83f5955 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-gpio.c @@ -0,0 +1,264 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#include "os_gpio.h" + +#define NV_GPIOF_DIR_IN (1 << 0) + +/*! + * @brief Mapping array of OS GPIO function ID to OS function name, + * this name is used to get GPIO number from Device Tree. + */ +static const char *osMapGpioFunc[] = { + [NV_OS_GPIO_FUNC_HOTPLUG_A] = "os_gpio_hotplug_a", + [NV_OS_GPIO_FUNC_HOTPLUG_B] = "os_gpio_hotplug_b", +}; + +NV_STATUS NV_API_CALL nv_gpio_get_pin_state +( + nv_state_t *nv, + NvU32 pinNum, + NvU32 *pinValue +) +{ + int ret; + +#if defined(NV_GPIO_GET_VALUE_PRESENT) + ret = gpio_get_value(pinNum); +#else + nv_printf(NV_DBG_ERRORS, "gpio_get_value not present\n"); + return NV_ERR_GENERIC; +#endif + if (ret < 0) + { + nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n", + __func__, ret); + return NV_ERR_GENERIC; + } + + *pinValue = ret; + + return NV_OK; +} + +void NV_API_CALL nv_gpio_set_pin_state +( + nv_state_t *nv, + NvU32 pinNum, + NvU32 pinValue +) +{ +#if defined(NV_GPIO_SET_VALUE_PRESENT) + gpio_set_value(pinNum, pinValue); +#else + nv_printf(NV_DBG_ERRORS, "gpio_set_value not present\n"); +#endif +} + +NV_STATUS NV_API_CALL nv_gpio_set_pin_direction +( + nv_state_t *nv, + NvU32 pinNum, + NvU32 direction +) +{ + int ret; + + if (direction) + { +#if defined(NV_GPIO_DIRECTION_INPUT_PRESENT) + ret = gpio_direction_input(pinNum); +#else + nv_printf(NV_DBG_ERRORS, "gpio_direction_input not present\n"); + return NV_ERR_GENERIC; +#endif + } + else + { +#if defined(NV_GPIO_DIRECTION_OUTPUT_PRESENT) + ret = gpio_direction_output(pinNum, 0); +#else + nv_printf(NV_DBG_ERRORS, "gpio_direction_output not present\n"); + return NV_ERR_GENERIC; +#endif + } + + if (ret) + { + nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n", + __func__, ret); + return NV_ERR_GENERIC; + } + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_gpio_get_pin_direction +( + nv_state_t *nv, + NvU32 pinNum, + NvU32 *direction +) +{ +/*! + * TODO: Commenting out until gpio_get_direction wrapper + * support is added in kernel. + */ +#if 0 + int ret; + + ret = nv_gpio_get_direction(pinNum); + if (ret) + { + nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n", + __func__, ret); + return NV_ERR_GENERIC; + } + *direction = ret; +#endif + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_gpio_get_pin_number +( + nv_state_t *nv, + NvU32 function, + NvU32 *pinNum +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc; + +#if defined(NV_OF_GET_NAME_GPIO_PRESENT) + rc = of_get_named_gpio(nvl->dev->of_node, osMapGpioFunc[function], 0); +#else + nv_printf(NV_DBG_ERRORS, "of_get_named_gpio not present\n"); + return NV_ERR_GENERIC; +#endif + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "of_get_name_gpio failed for gpio - %s, rc - %d\n", + osMapGpioFunc[function], rc); + return NV_ERR_GENERIC; + } + *pinNum = rc; + +#if defined(NV_DEVM_GPIO_REQUEST_ONE_PRESENT) + rc = devm_gpio_request_one(nvl->dev, *pinNum, NV_GPIOF_DIR_IN, + osMapGpioFunc[function]); +#else + nv_printf(NV_DBG_ERRORS, "devm_gpio_request_one not present\n"); + return NV_ERR_GENERIC; +#endif + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "request gpio failed for gpio - %s, rc - %d\n", + osMapGpioFunc[function], rc); + return NV_ERR_GENERIC; + } + + return NV_OK; +} + +NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status +( + nv_state_t *nv, + NvU32 pinNum, + NvU32 direction +) +{ + NvU32 irqGpioPin; + NvU32 pinValue; + + if (nv_get_current_irq_type(nv) != NV_SOC_IRQ_GPIO_TYPE) + { + return NV_FALSE; + } + + nv_get_current_irq_priv_data(nv, &irqGpioPin); + if (pinNum != irqGpioPin) + { + return NV_FALSE; + } + +#if defined(NV_GPIO_GET_VALUE_PRESENT) + pinValue = gpio_get_value(pinNum); +#else + nv_printf(NV_DBG_ERRORS, "gpio_get_value not present\n"); + return NV_FALSE; +#endif + if (pinValue != direction) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt +( + nv_state_t * nv, + NvU32 pinNum, + NvU32 trigger_level +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc; + int irq_num; + +#if defined(NV_GPIO_TO_IRQ_PRESENT) + irq_num = gpio_to_irq(pinNum); +#else + nv_printf(NV_DBG_ERRORS, "gpio_to_irq not present\n"); + return NV_ERR_GENERIC; +#endif + + /* + * TODO:Ignoring trigger_level as RM calls this function twice to set + * the level(rising/falling seperately) of interrupt for same gpio pin, + * so hardcoding the trigger_level to rising/falling during first + * registration as second time this registration fails as interrupt + * is already registered. For initial GPIO support let interrupt + * registration fail for second time, will plan to check the status if + * interrupt is already registered and skip for second time in the + * follow-up patch. + */ + + rc = nv_request_soc_irq(nvl, irq_num, NV_SOC_IRQ_GPIO_TYPE, + (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | + IRQF_ONESHOT), pinNum); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "IRQ registration failed for gpio - %d, rc - %d\n", + pinNum, rc); + return NV_ERR_GENERIC; + } + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-host1x.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-host1x.c new file mode 100644 index 0000000..f05e2a6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-host1x.c @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(NV_LINUX_NVHOST_T194_H_PRESENT) +#include +#include + +NV_STATUS nv_get_syncpoint_aperture +( + NvU32 syncpointId, + NvU64 *physAddr, + NvU64 *limit, + NvU32 *offset +) +{ + struct platform_device *host1x_pdev = NULL; + phys_addr_t base; + size_t size; + +#if NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_get_default_device + host1x_pdev = nvhost_get_default_device(); + if (host1x_pdev == NULL) + { + return NV_ERR_INVALID_DEVICE; + } +#endif + +#if NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_syncpt_unit_interface_get_aperture && \ + NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_syncpt_unit_interface_get_byte_offset + nvhost_syncpt_unit_interface_get_aperture( + host1x_pdev, &base, &size); + + *physAddr = base; + *limit = nvhost_syncpt_unit_interface_get_byte_offset(1); + *offset = nvhost_syncpt_unit_interface_get_byte_offset(syncpointId); +#else + return NV_ERR_NOT_SUPPORTED; +#endif + + return NV_OK; +} +#else + +NV_STATUS nv_get_syncpoint_aperture +( + NvU32 syncpointId, + NvU64 *physAddr, + NvU64 *limit, + NvU32 *offset +) +{ + return NV_ERR_NOT_SUPPORTED; +} +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-i2c.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-i2c.c new file mode 100644 index 0000000..5ca11c9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-i2c.c @@ -0,0 +1,564 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) + +static int nv_i2c_algo_master_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num) +{ + nv_state_t *nv = (nv_state_t *)adapter->algo_data; + unsigned int i = 0; + int rc; + NV_STATUS rmStatus = NV_OK; + nvidia_stack_t *sp = NULL; + const unsigned int supported_i2c_flags = I2C_M_RD +#if defined(I2C_M_DMA_SAFE) + | I2C_M_DMA_SAFE +#endif + ; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return rc; + } + + rc = -EIO; + + for (i = 0; ((i < (unsigned int)num) && (rmStatus == NV_OK)); i++) + { + if (msgs[i].flags & ~supported_i2c_flags) + { + /* we only support basic I2C reads/writes, reject any other commands */ + rc = -EINVAL; + nv_printf(NV_DBG_ERRORS, "NVRM: Unsupported I2C flags used. (flags:0x%08x)\n", + msgs[i].flags); + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + else + { + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (msgs[i].flags & I2C_M_RD) ? + NV_I2C_CMD_READ : NV_I2C_CMD_WRITE, + (NvU8)(msgs[i].addr & 0x7f), 0, + (NvU32)(msgs[i].len & 0xffffUL), + (NvU8 *)msgs[i].buf); + } + } + + nv_kmem_cache_free_stack(sp); + + return (rmStatus != NV_OK) ? rc : num; +} + +static int nv_i2c_algo_smbus_xfer( + struct i2c_adapter *adapter, + u16 addr, + unsigned short flags, + char read_write, + u8 command, + int size, + union i2c_smbus_data *data +) +{ + nv_state_t *nv = (nv_state_t *)adapter->algo_data; + int rc; + NV_STATUS rmStatus = NV_OK; + nvidia_stack_t *sp = NULL; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return rc; + } + + rc = -EIO; + + switch (size) + { + case I2C_SMBUS_QUICK: + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (read_write == I2C_SMBUS_READ) ? + NV_I2C_CMD_SMBUS_QUICK_READ : + NV_I2C_CMD_SMBUS_QUICK_WRITE, + (NvU8)(addr & 0x7f), 0, 0, NULL); + break; + + case I2C_SMBUS_BYTE: + if (read_write == I2C_SMBUS_READ) + { + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + NV_I2C_CMD_READ, + (NvU8)(addr & 0x7f), 0, 1, + (NvU8 *)&data->byte); + } + else + { + u8 data = command; + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + NV_I2C_CMD_WRITE, + (NvU8)(addr & 0x7f), 0, 1, + (NvU8 *)&data); + } + break; + + case I2C_SMBUS_BYTE_DATA: + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (read_write == I2C_SMBUS_READ) ? + NV_I2C_CMD_SMBUS_READ : + NV_I2C_CMD_SMBUS_WRITE, + (NvU8)(addr & 0x7f), (NvU8)command, 1, + (NvU8 *)&data->byte); + break; + + case I2C_SMBUS_WORD_DATA: + if (read_write != I2C_SMBUS_READ) + { + data->block[1] = (data->word & 0xff); + data->block[2] = (data->word >> 8); + } + + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (read_write == I2C_SMBUS_READ) ? + NV_I2C_CMD_SMBUS_READ : + NV_I2C_CMD_SMBUS_WRITE, + (NvU8)(addr & 0x7f), (NvU8)command, 2, + (NvU8 *)&data->block[1]); + + if (read_write == I2C_SMBUS_READ) + { + data->word = ((NvU16)data->block[1]) | + ((NvU16)data->block[2] << 8); + } + break; + + case I2C_SMBUS_BLOCK_DATA: + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (read_write == I2C_SMBUS_READ) ? + NV_I2C_CMD_SMBUS_BLOCK_READ : + NV_I2C_CMD_SMBUS_BLOCK_WRITE, + (NvU8)(addr & 0x7f), (NvU8)command, + sizeof(data->block), + (NvU8 *)data->block); + break; + default: + rc = -EINVAL; + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + + nv_kmem_cache_free_stack(sp); + + return (rmStatus != NV_OK) ? rc : 0; +} + +static u32 nv_i2c_algo_functionality(struct i2c_adapter *adapter) +{ + nv_state_t *nv = (nv_state_t *)adapter->algo_data; + u32 ret = I2C_FUNC_I2C; + nvidia_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return 0; + } + + if (rm_i2c_is_smbus_capable(sp, nv, adapter)) + { + ret |= (I2C_FUNC_SMBUS_QUICK | + I2C_FUNC_SMBUS_BYTE | + I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA); + } + + nv_kmem_cache_free_stack(sp); + + return ret; +} + +static struct i2c_algorithm nv_i2c_algo = { + .master_xfer = nv_i2c_algo_master_xfer, + .smbus_xfer = nv_i2c_algo_smbus_xfer, + .functionality = nv_i2c_algo_functionality, +}; + +struct i2c_adapter nv_i2c_adapter_prototype = { + .owner = THIS_MODULE, + .algo = &nv_i2c_algo, + .algo_data = NULL, +}; + +void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port) +{ + NV_STATUS rmStatus; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct i2c_adapter *pI2cAdapter = NULL; + int osstatus = 0; + + // get a i2c adapter + rmStatus = os_alloc_mem((void **)&pI2cAdapter,sizeof(struct i2c_adapter)); + + if (rmStatus != NV_OK) + return NULL; + + // fill in with default structure + os_mem_copy(pI2cAdapter, &nv_i2c_adapter_prototype, sizeof(struct i2c_adapter)); + + pI2cAdapter->dev.parent = nvl->dev; + + if (nvl->pci_dev != NULL) + { + snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name), + "NVIDIA i2c adapter %u at %x:%02x.%u", port, nv->pci_info.bus, + nv->pci_info.slot, PCI_FUNC(nvl->pci_dev->devfn)); + } + else + { + snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name), + "NVIDIA SOC i2c adapter %u", port); + } + + // add our data to the structure + pI2cAdapter->algo_data = (void *)nv; + + // attempt to register with the kernel + osstatus = i2c_add_adapter(pI2cAdapter); + + if (osstatus) + { + // free the memory and NULL the ptr + os_free_mem(pI2cAdapter); + + pI2cAdapter = NULL; + } + + return ((void *)pI2cAdapter); +} + +void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data) +{ + struct i2c_adapter *pI2cAdapter = (struct i2c_adapter *)data; + + if (pI2cAdapter) + { + // release with the OS + i2c_del_adapter(pI2cAdapter); + os_free_mem(pI2cAdapter); + } +} + + +static struct i2c_client * nv_i2c_register_client( + nv_state_t *nv, + NvU32 linuxI2CSwPort, + NvU8 address) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct i2c_adapter *i2c_adapter; + struct i2c_client *client; + int c_index; + struct i2c_board_info i2c_dev_info = { + .type = "tegra_display", + .addr = address, + }; + + /* Get the adapter using i2c port */ + i2c_adapter = i2c_get_adapter(linuxI2CSwPort); + if (i2c_adapter == NULL) + { + nv_printf(NV_DBG_ERRORS, "Unable to get i2c adapter for port(%d)", + linuxI2CSwPort); + return NULL; + } + +#if defined(NV_I2C_NEW_CLIENT_DEVICE_PRESENT) + client = i2c_new_client_device(i2c_adapter, &i2c_dev_info); +#else + nv_printf(NV_DBG_ERRORS, "nv_i2c_new_device not present\n"); + client = NULL; +#endif + if (client == NULL) + { + nv_printf(NV_DBG_ERRORS, "Unable to register client for address(0x%x)", + address); + i2c_put_adapter(i2c_adapter); + return NULL; + } + i2c_put_adapter(i2c_adapter); + + /* Save the Port and i2c client */ + nvl->i2c_clients[linuxI2CSwPort].port = linuxI2CSwPort; + for (c_index = 0; c_index < MAX_CLIENTS_PER_ADAPTER; c_index++) + { + if (nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index] == NULL) + { + nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index] = client; + break; + } + } + + return client; +} + +static struct i2c_client *nv_i2c_get_registered_client( + nv_state_t *nv, + NvU32 linuxI2CSwPort, + NvU8 address) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int c_index; + + for (c_index = 0; c_index < MAX_CLIENTS_PER_ADAPTER; c_index++) + { + struct i2c_client *client; + + client = (struct i2c_client *)nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index]; + if (client) + { + if (address == (NvU8)client->addr) + { + return client; + } + } + else + { + break; + } + } + + return NULL; +} + +NV_STATUS NV_API_CALL nv_i2c_transfer( + nv_state_t *nv, + NvU32 physicalI2CPort, + NvU8 address, + nv_i2c_msg_t *nv_msgs, + int num_msgs +) +{ + struct i2c_client *client; + struct i2c_msg *msgs; + int count; + int rc; + NV_STATUS status = NV_OK; + NvU32 linuxI2CSwPort; + + // + // RM style slave address is 8-bit addressing, but Linux use 7-bit + // addressing, so convert to 7-bit addressing format. + // + address = address >> 1; + + // + // Linux Tegra I2C controller driver uses logical port(controller) number + // where logical port number of I2C1(Gen1) controller is 0, logical port + // number for I2C2(Gen2) controller is 1 and so on. + // But RM passes I2C physical port(controller) number i.e RM passes "1" + // for I2C1(Gen1), 2 for I2C2(Gen2), etc. So convert physical port number + // to logical port number(linuxI2CSwPort). + // + linuxI2CSwPort = physicalI2CPort - 1; + + // + // Check if its valid port + // + if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS)) + { + nv_printf(NV_DBG_ERRORS, "Invalid I2C port:%d\n", linuxI2CSwPort); + return NV_ERR_INVALID_ARGUMENT; + } + + for (count = 0; count < num_msgs; count++) { + // + // RM style slave address is 8-bit addressing, but Linux use 7-bit + // addressing, so convert to 7-bit addressing format. + // + nv_msgs[count].addr = nv_msgs[count].addr >> 1; + + client = nv_i2c_get_registered_client(nv, linuxI2CSwPort, nv_msgs[count].addr); + if (client == NULL) + { + client = nv_i2c_register_client(nv, linuxI2CSwPort, nv_msgs[count].addr); + if (client == NULL) + { + nv_printf(NV_DBG_ERRORS, "i2c client register failed for addr:0x%x\n", + nv_msgs[count].addr); + return NV_ERR_GENERIC; + } + } + } + + msgs = kzalloc((num_msgs * sizeof(*msgs)), GFP_KERNEL); + if (msgs == NULL) + { + nv_printf(NV_DBG_ERRORS, "i2c message allocation failed\n"); + return NV_ERR_NO_MEMORY; + } + + for (count = 0; count < num_msgs; count++) { + msgs[count].addr = nv_msgs[count].addr; + msgs[count].flags = nv_msgs[count].flags; + msgs[count].len = nv_msgs[count].len; + msgs[count].buf = nv_msgs[count].buf; + } + + rc = i2c_transfer(client->adapter, msgs, num_msgs); + if (rc != num_msgs) + { + nv_printf(NV_DBG_ERRORS, "i2c transfer failed for addr:0x%x", + address); + status = NV_ERR_GENERIC; + } + + kfree(msgs); + + return status; +} + +void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int p_index, c_index; + + for (p_index = 0; p_index < MAX_TEGRA_I2C_PORTS; p_index++) + { + for (c_index = 0; + c_index < MAX_CLIENTS_PER_ADAPTER; + c_index++) + { + struct i2c_client *client; + + client = (struct i2c_client *)nvl->i2c_clients[p_index].pOsClient[c_index]; + if (client) + { +#if defined(NV_I2C_UNREGISTER_DEVICE_PRESENT) + i2c_unregister_device(client); +#else + nv_printf(NV_DBG_ERRORS, "i2c_unregister_device not present\n"); +#endif + nvl->i2c_clients[p_index].pOsClient[c_index] = NULL; + } + } + } +} + +NV_STATUS NV_API_CALL nv_i2c_bus_status( + nv_state_t *nv, + NvU32 physicalI2CPort, + NvS32 *scl, + NvS32 *sda) +{ +#if NV_IS_EXPORT_SYMBOL_PRESENT_i2c_bus_status + NvU32 linuxI2CSwPort; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct i2c_adapter *i2c_adapter; + int ret; + + // + // Linux Tegra I2C controller driver uses logical port(controller) number + // where logical port number of I2C1(Gen1) controller is 0, logical port + // number for I2C2(Gen2) controller is 1 and so on. + // But RM passes I2C physical port(controller) number i.e RM passes "1" + // for I2C1(Gen1), 2 for I2C2(Gen2), etc. So convert physical port number + // to logical port number(linuxI2CSwPort). + // + linuxI2CSwPort = physicalI2CPort - 1; + + // + // Check if its valid port + // + if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS)) + { + nv_printf(NV_DBG_ERRORS, "Invalid I2C port:%d\n", linuxI2CSwPort); + return NV_ERR_INVALID_ARGUMENT; + } + + /* Get the adapter using i2c port */ + i2c_adapter = i2c_get_adapter(linuxI2CSwPort); + if (i2c_adapter == NULL) + { + nv_printf(NV_DBG_ERRORS, "Unable to get i2c adapter for port(%d)", + linuxI2CSwPort); + return NULL; + } + + + ret = i2c_bus_status(i2c_adapter, scl, sda); + if (ret < 0) + { + nv_printf(NV_DBG_ERRORS, "i2c_bus_status failed:%d\n", ret); + return NV_ERR_GENERIC; + } + i2c_put_adapter(i2c_adapter); + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + + +#else // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) + +void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data) +{ +} + +void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port) +{ + return NULL; +} + + +NV_STATUS NV_API_CALL nv_i2c_transfer( + nv_state_t *nv, + NvU32 physicalI2CPort, + NvU8 address, + nv_i2c_msg_t *nv_msgs, + int num_msgs +) +{ + return NV_OK; +} + +void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *nv) +{ +} + +NV_STATUS NV_API_CALL nv_i2c_bus_status( + nv_state_t *nv, + NvU32 physicalI2CPort, + NvS32 *scl, + NvS32 *sda) +{ + return NV_ERR_GENERIC; +} + +#endif // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.c new file mode 100644 index 0000000..fcf5dc4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.c @@ -0,0 +1,448 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * nv-ibmnpu.c - interface with the ibmnpu (IBM NVLink Processing Unit) "module" + */ +#include "nv-linux.h" + +#if defined(NVCPU_PPC64LE) +#include "nv-ibmnpu.h" +#include "nv-rsync.h" + +/* + * Temporary query to get the L1D cache block size directly from the device + * tree for the offline cache flush workaround, since the ppc64_caches symbol + * is unavailable to us. + */ +const NvU32 P9_L1D_CACHE_DEFAULT_BLOCK_SIZE = 0x80; + +#if defined(NV_OF_GET_PROPERTY_PRESENT) +static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void) +{ + const __be32 *block_size_prop; + + /* + * Attempt to look up the block size from device tree. If unavailable, just + * return the default that we see on these systems. + */ + struct device_node *cpu = of_find_node_by_type(NULL, "cpu"); + if (!cpu) + { + return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE; + } + + block_size_prop = of_get_property(cpu, "d-cache-block-size", NULL); + if (!block_size_prop) + { + return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE; + } + + return be32_to_cpu(*block_size_prop); +} +#else +static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void) +{ + return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE; +} +#endif + +/* + * GPU device memory can be exposed to the kernel as NUMA node memory via the + * IBMNPU devices associated with the GPU. The platform firmware will specify + * the parameters of where the memory lives in the system address space via + * firmware properties on the IBMNPU devices. These properties specify what + * memory can be accessed through the IBMNPU device, and the driver can online + * a GPU device's memory into the range accessible by its associated IBMNPU + * devices. + * + * This function calls over to the IBMNPU driver to query the parameters from + * firmware, and validates that the resulting parameters are acceptable. + */ +static void nv_init_ibmnpu_numa_info(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nv_npu_numa_info_t *npu_numa_info = &nvl->npu->numa_info; + struct pci_dev *npu_dev = nvl->npu->devs[0]; + NvU64 spa, gpa, aper_size; + + /* + * Terminology: + * - system physical address (spa): 47-bit NVIDIA physical address, which + * is the CPU real address with the NVLink address compression scheme + * already applied in firmware. + * - guest physical address (gpa): 56-bit physical address as seen by the + * operating system. This is the base address that we should use for + * onlining device memory. + */ + nvl->numa_info.node_id = ibmnpu_device_get_memory_config(npu_dev, &spa, &gpa, + &aper_size); + if (nvl->numa_info.node_id == NUMA_NO_NODE) + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "No NUMA memory aperture found\n"); + return; + } + + /* Validate that the compressed system physical address is not too wide */ + if (spa & (~(BIT_ULL(nv_volta_dma_addr_size) - 1))) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Invalid NUMA memory system pa 0x%llx" + " on IBM-NPU device %04x:%02x:%02x.%u\n", + spa, NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev), + NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn)); + goto invalid_numa_config; + } + + /* + * Validate that the guest physical address is aligned to 128GB. + * This alignment requirement comes from the Volta address space + * size on POWER9. + */ + if (!IS_ALIGNED(gpa, BIT_ULL(nv_volta_addr_space_width))) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Invalid alignment in NUMA memory guest pa 0x%llx" + " on IBM-NPU device %04x:%02x:%02x.%u\n", + gpa, NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev), + NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn)); + goto invalid_numa_config; + } + + /* Validate that the aperture can map all of the device's framebuffer */ + if (aper_size < nv->fb->size) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Insufficient NUMA memory aperture size 0x%llx" + " on IBM-NPU device %04x:%02x:%02x.%u (0x%llx required)\n", + aper_size, NV_PCI_DOMAIN_NUMBER(npu_dev), + NV_PCI_BUS_NUMBER(npu_dev), NV_PCI_SLOT_NUMBER(npu_dev), + PCI_FUNC(npu_dev->devfn), nv->fb->size); + goto invalid_numa_config; + } + + npu_numa_info->compr_sys_phys_addr = spa; + npu_numa_info->guest_phys_addr = gpa; + + if (NVreg_EnableUserNUMAManagement) + { + NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_OFFLINE); + } + else + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "User-mode NUMA onlining disabled.\n"); + nvl->numa_info.node_id = NUMA_NO_NODE; + } + + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "NUMA memory aperture: " + "[spa = 0x%llx, gpa = 0x%llx, aper_size = 0x%llx]\n", + spa, gpa, aper_size); + + /* Get the CPU's L1D cache block size for offlining cache flush */ + npu_numa_info->l1d_cache_block_size = nv_ibm_get_cpu_l1d_cache_block_size(); + + return; + +invalid_numa_config: + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "NUMA memory aperture disabled due to invalid firmware configuration\n"); + nvl->numa_info.node_id = NUMA_NO_NODE; +} + +void nv_init_ibmnpu_info(nv_state_t *nv) +{ +#if defined(NV_PNV_PCI_GET_NPU_DEV_PRESENT) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct pci_dev *npu_dev = pnv_pci_get_npu_dev(nvl->pci_dev, 0); + NvU8 dev_count; + + if (!npu_dev) + { + return; + } + + if (os_alloc_mem((void **)&nvl->npu, sizeof(nv_ibmnpu_info_t)) != NV_OK) + { + return; + } + + os_mem_set(nvl->npu, 0, sizeof(nv_ibmnpu_info_t)); + + /* Find any other IBMNPU devices attached to this GPU */ + for (nvl->npu->devs[0] = npu_dev, dev_count = 1; + dev_count < NV_MAX_ATTACHED_IBMNPUS; dev_count++) + { + nvl->npu->devs[dev_count] = pnv_pci_get_npu_dev(nvl->pci_dev, dev_count); + if (!nvl->npu->devs[dev_count]) + { + break; + } + } + + nvl->npu->dev_count = dev_count; + + /* + * If we run out of space for IBMNPU devices, NV_MAX_ATTACHED_IBMNPUS will + * need to be bumped. + */ + WARN_ON((dev_count == NV_MAX_ATTACHED_IBMNPUS) && + pnv_pci_get_npu_dev(nvl->pci_dev, dev_count)); + + ibmnpu_device_get_genregs_info(npu_dev, &nvl->npu->genregs); + + if (nvl->npu->genregs.size > 0) + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, + "IBM-NPU device %04x:%02x:%02x.%u associated with GPU " + " has a generation register space 0x%llx-0x%llx\n", + NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev), + NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn), + nvl->npu->genregs.start_addr, + nvl->npu->genregs.start_addr + nvl->npu->genregs.size - 1); + } + else + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, + "IBM-NPU device %04x:%02x:%02x.%u associated with GPU " + "does not support generation registers\n", + NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev), + NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn)); + } + + nv_init_ibmnpu_numa_info(nv); +#endif +} + +void nv_destroy_ibmnpu_info(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nvl->npu != NULL) + { + os_free_mem(nvl->npu); + nvl->npu = NULL; + } +} + +int nv_init_ibmnpu_devices(nv_state_t *nv) +{ + NvU8 i; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (!nvl->npu) + { + return 0; + } + + for (i = 0; i < nvl->npu->dev_count; i++) + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, + "Initializing IBM-NPU device %04x:%02x:%02x.%u\n", + NV_PCI_DOMAIN_NUMBER(nvl->npu->devs[i]), + NV_PCI_BUS_NUMBER(nvl->npu->devs[i]), + NV_PCI_SLOT_NUMBER(nvl->npu->devs[i]), + PCI_FUNC(nvl->npu->devs[i]->devfn)); + + if (ibmnpu_init_device(nvl->npu->devs[i]) != NVL_SUCCESS) + { + nv_unregister_ibmnpu_devices(nv); + return -EIO; + } + + nvl->npu->initialized_dev_count++; + } + + return 0; +} + +void nv_unregister_ibmnpu_devices(nv_state_t *nv) +{ + NvU8 i; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (!nvl->npu) + { + return; + } + + for (i = 0; i < nvl->npu->initialized_dev_count; i++) + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, + "Unregistering IBM-NPU device %04x:%02x:%02x.%u\n", + NV_PCI_DOMAIN_NUMBER(nvl->npu->devs[i]), + NV_PCI_BUS_NUMBER(nvl->npu->devs[i]), + NV_PCI_SLOT_NUMBER(nvl->npu->devs[i]), + PCI_FUNC(nvl->npu->devs[i]->devfn)); + + ibmnpu_unregister_device(nvl->npu->devs[i]); + } + + nvl->npu->initialized_dev_count = 0; +} + +NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *nv, NvU64 *addr, + NvU64 *size, void **device) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nvl->npu == NULL || nvl->npu->genregs.size == 0) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (addr) + { + *addr = nvl->npu->genregs.start_addr; + } + + if (size) + { + *size = nvl->npu->genregs.size; + } + + if (device) + { + *device = (void*)nvl->npu->devs[0]; + } + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, + NvBool *mode) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nvl->npu == NULL || nvl->npu->genregs.size == 0) + { + return NV_ERR_NOT_SUPPORTED; + } + + *mode = nv_get_rsync_relaxed_ordering_mode(nv); + + return NV_OK; +} + +void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nvl->npu == NULL || nvl->npu->genregs.size == 0) + { + return; + } + + nv_wait_for_rsync(nv); +} + +int nv_get_ibmnpu_chip_id(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nvl->npu == NULL) + { + return -1; + } + + return ibmnpu_device_get_chip_id(nvl->npu->devs[0]); +} + +void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU64 offset, cbsize; + + /* + * The range is commonly an ioremap()ed mapping of the GPU's ATS range and + * needs to be compared against the created mappings. Alternatively, kernel + * page tables can be dumped through sysfs if CONFIG_PPC_PTDUMP is enabled. + */ + NV_DEV_PRINTF(NV_DBG_INFO, nv, + "Flushing CPU virtual range [0x%llx, 0x%llx)\n", + cpu_virtual, cpu_virtual + size); + + cbsize = nvl->npu->numa_info.l1d_cache_block_size; + + CACHE_FLUSH(); + + /* Force eviction of any cache lines from the NUMA-onlined region. */ + for (offset = 0; offset < size; offset += cbsize) + { + asm volatile("dcbf %0,%1" :: "r" (cpu_virtual), "r" (offset) : "memory"); + + /* Reschedule if necessary to avoid lockup warnings */ + cond_resched(); + } + + CACHE_FLUSH(); +} + +#else + +void nv_init_ibmnpu_info(nv_state_t *nv) +{ +} + +void nv_destroy_ibmnpu_info(nv_state_t *nv) +{ +} + +int nv_init_ibmnpu_devices(nv_state_t *nv) +{ + return 0; +} + +void nv_unregister_ibmnpu_devices(nv_state_t *nv) +{ +} + +NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *nv, NvU64 *addr, + NvU64 *size, void **device) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, + NvBool *mode) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv) +{ +} + +int nv_get_ibmnpu_chip_id(nv_state_t *nv) +{ + return -1; +} + +void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 virtual, NvU64 size) +{ +} + +void nv_ibmnpu_cache_flush_numa_region(nv_state_t *nv) +{ +} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.h new file mode 100644 index 0000000..413b16b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_IBMNPU_H_ +#define _NV_IBMNPU_H_ + +#if defined(NVCPU_PPC64LE) + +#include "ibmnpu_linux.h" + +#define NV_MAX_ATTACHED_IBMNPUS 6 + +typedef struct nv_npu_numa_info +{ + /* + * 47-bit NVIDIA 'system physical address': the hypervisor real 56-bit + * address with NVLink address compression scheme applied. + */ + NvU64 compr_sys_phys_addr; + + /* + * 56-bit NVIDIA 'guest physical address'/host virtual address. On + * unvirtualized systems, applying the NVLink address compression scheme + * to this address should be the same as compr_sys_phys_addr. + */ + NvU64 guest_phys_addr; + + /* + * L1 data cache block size on P9 - needed to manually flush/invalidate the + * NUMA region from the CPU caches after offlining. + */ + NvU32 l1d_cache_block_size; +} nv_npu_numa_info_t; + +struct nv_ibmnpu_info +{ + NvU8 dev_count; + NvU8 initialized_dev_count; + struct pci_dev *devs[NV_MAX_ATTACHED_IBMNPUS]; + ibmnpu_genregs_info_t genregs; + nv_npu_numa_info_t numa_info; +}; + +/* + * TODO: These parameters are specific to Volta/P9 configurations, and may + * need to be determined dynamically in the future. + */ +static const NvU32 nv_volta_addr_space_width = 37; +static const NvU32 nv_volta_dma_addr_size = 47; + +#endif + +void nv_init_ibmnpu_info(nv_state_t *nv); +void nv_destroy_ibmnpu_info(nv_state_t *nv); +int nv_init_ibmnpu_devices(nv_state_t *nv); +void nv_unregister_ibmnpu_devices(nv_state_t *nv); +int nv_get_ibmnpu_chip_id(nv_state_t *nv); +void nv_ibmnpu_cache_flush_numa_region(nv_state_t *nv); + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-imp.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-imp.c new file mode 100644 index 0000000..44d420d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-imp.c @@ -0,0 +1,702 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + + + +#if defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || IS_ENABLED(CONFIG_TEGRA_BPMP) +#include +#endif + +#if IS_ENABLED(CONFIG_TEGRA_BPMP) +#include +#elif defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) +#include +#endif // IS_ENABLED(CONFIG_TEGRA_BPMP) + +#if defined NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT +#include +#endif + +#ifdef NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT +#include +#endif + +// +// IMP requires information from various BPMP and MC driver functions. The +// macro below checks that all of the required functions are present. +// +#define IMP_SUPPORT_FUNCTIONS_PRESENT \ + NV_IS_EXPORT_SYMBOL_PRESENT_dram_clk_to_mc_clk && \ + NV_IS_EXPORT_SYMBOL_PRESENT_get_dram_num_channels && \ + NV_IS_EXPORT_SYMBOL_PRESENT_tegra_dram_types && \ + (defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || \ + IS_ENABLED(CONFIG_TEGRA_BPMP)) && \ + defined(NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT) + +// +// Also create a macro to check if all the required ICC symbols are present. +// DT endpoints are defined in dt-bindings/interconnect/tegra_icc_id.h. +// +#define ICC_SUPPORT_FUNCTIONS_PRESENT \ + defined(NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT) + +#if IMP_SUPPORT_FUNCTIONS_PRESENT + static struct mrq_emc_dvfs_latency_response latency_table; + static struct mrq_emc_dvfs_emchub_response emchub_table; + static struct cmd_iso_client_get_max_bw_response max_bw_table; + + + +/*! + * @brief Converts the MC driver dram type to RM format + * + * The MC driver's tegra_dram_types() function returns the dram type as an + * enum. We convert it to an NvU32 for better ABI compatibility when stored in + * the TEGRA_IMP_IMPORT_DATA structure, which is shared between various + * software components. + * + * @param[in] dram_type Dram type (DRAM_TYPE_LPDDRxxx format). + * + * @returns dram type (TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDRxxx format). + */ +static inline NvU32 +nv_imp_convert_dram_type_to_rm_format +( + enum dram_types dram_type +) +{ + NvU32 rm_dram_type; + + switch (dram_type) + { + case DRAM_TYPE_LPDDR4_16CH_ECC_1RANK: + case DRAM_TYPE_LPDDR4_16CH_ECC_2RANK: + case DRAM_TYPE_LPDDR4_8CH_ECC_1RANK: + case DRAM_TYPE_LPDDR4_8CH_ECC_2RANK: + case DRAM_TYPE_LPDDR4_4CH_ECC_1RANK: + case DRAM_TYPE_LPDDR4_4CH_ECC_2RANK: + case DRAM_TYPE_LPDDR4_16CH_1RANK: + case DRAM_TYPE_LPDDR4_16CH_2RANK: + case DRAM_TYPE_LPDDR4_8CH_1RANK: + case DRAM_TYPE_LPDDR4_8CH_2RANK: + case DRAM_TYPE_LPDDR4_4CH_1RANK: + case DRAM_TYPE_LPDDR4_4CH_2RANK: + rm_dram_type = TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR4; + break; + case DRAM_TYPE_LPDDR5_16CH_ECC_1RANK: + case DRAM_TYPE_LPDDR5_16CH_ECC_2RANK: + case DRAM_TYPE_LPDDR5_8CH_ECC_1RANK: + case DRAM_TYPE_LPDDR5_8CH_ECC_2RANK: + case DRAM_TYPE_LPDDR5_4CH_ECC_1RANK: + case DRAM_TYPE_LPDDR5_4CH_ECC_2RANK: + case DRAM_TYPE_LPDDR5_16CH_1RANK: + case DRAM_TYPE_LPDDR5_16CH_2RANK: + case DRAM_TYPE_LPDDR5_8CH_1RANK: + case DRAM_TYPE_LPDDR5_8CH_2RANK: + case DRAM_TYPE_LPDDR5_4CH_1RANK: + case DRAM_TYPE_LPDDR5_4CH_2RANK: + rm_dram_type = TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR5; + break; + default: + rm_dram_type = TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_UNKNOWN; + break; + } + + return rm_dram_type; +} +#endif // IMP_SUPPORT_FUNCTIONS_PRESENT + +/*! + * @brief Collects IMP-relevant BPMP data and saves for later + * + * @param[in] nvl OS-specific device state + * + * @returns NV_OK if successful, + * NV_ERR_GENERIC if the BPMP API returns an error, + * NV_ERR_MISSING_TABLE_ENTRY if the latency table has no entries, + * NV_ERR_INVALID_DATA if the number of clock entries in the latency + * table does not match the number of entries in the emchub table, or + * NV_ERR_NOT_SUPPORTED if the functionality is not available. + */ +NV_STATUS +nv_imp_get_bpmp_data +( + nv_linux_state_t *nvl +) +{ +#if IMP_SUPPORT_FUNCTIONS_PRESENT + NV_STATUS status = NV_OK; + int rc; + int i; + NvBool bApiTableInvalid = NV_FALSE; + static const struct iso_max_bw dummy_iso_bw_pairs[] = + { { 204000U, 1472000U }, + { 533000U, 3520000U }, + { 665000U, 4352000U }, + { 800000U, 5184000U }, + { 1066000U, 6784000U }, + { 1375000U, 8704000U }, + { 1600000U, 10112000U }, + { 1866000U, 11712000U }, + { 2133000U, 13376000U }, + { 2400000U, 15040000U }, + { 2750000U, 17152000U }, + { 3000000U, 18688000U }, + { 3200000U, 20800000U } + }; +#if IS_ENABLED(CONFIG_TEGRA_BPMP) + struct tegra_bpmp *bpmp; + struct tegra_bpmp_message msg; + struct mrq_iso_client_request iso_client_request; + + bpmp = tegra_bpmp_get(nvl->dev); + if (IS_ERR(bpmp)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Error getting bpmp struct: %s\n", + PTR_ERR(bpmp)); + return NV_ERR_GENERIC; + } + // Get the table of dramclk / DVFS latency pairs. + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_EMC_DVFS_LATENCY; + msg.tx.data = NULL; + msg.tx.size = 0; + msg.rx.data = &latency_table; + msg.rx.size = sizeof(latency_table); + + rc = tegra_bpmp_transfer(bpmp, &msg); +#else + // Get the table of dramclk / DVFS latency pairs. + rc = tegra_bpmp_send_receive(MRQ_EMC_DVFS_LATENCY, + NULL, + 0, + &latency_table, + sizeof(latency_table)); +#endif + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, + "MRQ_EMC_DVFS_LATENCY returns error code %d\n", rc); + status = NV_ERR_GENERIC; + goto Cleanup; + } + + nv_printf(NV_DBG_INFO, + "MRQ_EMC_DVFS_LATENCY table size = %u\n", + latency_table.num_pairs); + + if (latency_table.num_pairs == 0U) + { + nv_printf(NV_DBG_ERRORS, + "MRQ_EMC_DVFS_LATENCY table has no entries\n", rc); + status = NV_ERR_MISSING_TABLE_ENTRY; + goto Cleanup; + } + + // Get the table of dramclk / emchubclk pairs. +#if IS_ENABLED(CONFIG_TEGRA_BPMP) + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_EMC_DVFS_EMCHUB; + msg.tx.data = NULL; + msg.tx.size = 0; + msg.rx.data = &emchub_table; + msg.rx.size = sizeof(emchub_table); + + rc = tegra_bpmp_transfer(bpmp, &msg); +#else + rc = tegra_bpmp_send_receive(MRQ_EMC_DVFS_EMCHUB, + NULL, + 0, + &emchub_table, + sizeof(emchub_table)); +#endif + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, + "MRQ_EMC_DVFS_EMCHUB returns error code %d\n", rc); + status = NV_ERR_GENERIC; + goto Cleanup; + } + + nv_printf(NV_DBG_INFO, + "MRQ_EMC_DVFS_EMCHUB table size = %u\n", + emchub_table.num_pairs); + + if (latency_table.num_pairs != emchub_table.num_pairs) + { + nv_printf(NV_DBG_ERRORS, + "MRQ_EMC_DVFS_LATENCY table size (%u) does not match MRQ_EMC_DVFS_EMCHUB table size (%u)\n", + latency_table.num_pairs, + emchub_table.num_pairs); + status = NV_ERR_INVALID_DATA; + goto Cleanup; + } + + // Get the table of dramclk / max ISO BW pairs. +#if IS_ENABLED(CONFIG_TEGRA_BPMP) + memset(&iso_client_request, 0, sizeof(iso_client_request)); + iso_client_request.cmd = CMD_ISO_CLIENT_GET_MAX_BW; + iso_client_request.max_isobw_req.id = TEGRA_ICC_DISPLAY; + msg.mrq = MRQ_ISO_CLIENT; + msg.tx.data = &iso_client_request; + msg.tx.size = sizeof(iso_client_request); + msg.rx.data = &max_bw_table; + msg.rx.size = sizeof(max_bw_table); + + rc = tegra_bpmp_transfer(bpmp, &msg); +#else + // Maybe we don't need the old implementation "else" clause cases anymore. + NV_ASSERT(NV_FALSE); +#endif + if ((rc != 0) || (max_bw_table.num_pairs == 0U)) + { + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, + "MRQ_ISO_CLIENT returns error code %d\n", rc); + } + else + { + nv_printf(NV_DBG_ERRORS, + "CMD_ISO_CLIENT_GET_MAX_BW table does not contain any entries\n"); + } + bApiTableInvalid = NV_TRUE; + } + else + { + // + // Check for entries with ISO BW = 0. It's possible that one entry may + // be zero, but they should not all be zero. (On simulation, due to bug + // 3379796, the API is currently not working; it returns 13 entries, + // each with ISO BW = 0.) + // + bApiTableInvalid = NV_TRUE; + for (i = 0; i < max_bw_table.num_pairs; i++) + { + if (max_bw_table.pairs[i].iso_bw != 0U) + { + bApiTableInvalid = NV_FALSE; + break; + } + } + } + if (bApiTableInvalid) + { + // + // If the table is not returned correctly, for now, fill in a dummy + // table. + // + nv_printf(NV_DBG_ERRORS, + "Creating dummy CMD_ISO_CLIENT_GET_MAX_BW table\n"); + max_bw_table.num_pairs = sizeof(dummy_iso_bw_pairs) / + sizeof(dummy_iso_bw_pairs[0]); + for (i = 0; i < max_bw_table.num_pairs; i++) + { + max_bw_table.pairs[i].freq = dummy_iso_bw_pairs[i].freq; + max_bw_table.pairs[i].iso_bw = dummy_iso_bw_pairs[i].iso_bw; + } + } + nv_printf(NV_DBG_INFO, + "CMD_ISO_CLIENT_GET_MAX_BW table size = %u\n", + max_bw_table.num_pairs); + +Cleanup: +#if IS_ENABLED(CONFIG_TEGRA_BPMP) + tegra_bpmp_put(bpmp); +#endif + return status; +#else // IMP_SUPPORT_FUNCTIONS_PRESENT + return NV_ERR_NOT_SUPPORTED; +#endif +} + +/*! + * @brief Returns IMP-relevant data collected from other modules + * + * @param[out] tegra_imp_import_data Structure to receive the data + * + * @returns NV_OK if successful, + * NV_ERR_BUFFER_TOO_SMALL if the array in TEGRA_IMP_IMPORT_DATA is + * too small, + * NV_ERR_INVALID_DATA if the latency table has different mclk + * frequencies, compared with the emchub table, or + * NV_ERR_NOT_SUPPORTED if the functionality is not available. + */ +NV_STATUS NV_API_CALL +nv_imp_get_import_data +( + TEGRA_IMP_IMPORT_DATA *tegra_imp_import_data +) +{ +#if IMP_SUPPORT_FUNCTIONS_PRESENT + NvU32 i; + NvU32 bwTableIndex = 0U; + NvU32 dram_clk_freq_khz; + enum dram_types dram_type; + + tegra_imp_import_data->num_dram_clk_entries = latency_table.num_pairs; + if (ARRAY_SIZE(tegra_imp_import_data->dram_clk_instance) < + latency_table.num_pairs) + { + nv_printf(NV_DBG_ERRORS, + "ERROR: TEGRA_IMP_IMPORT_DATA struct needs to have at least " + "%d dram_clk_instance entries, but only %d are allocated\n", + latency_table.num_pairs, + ARRAY_SIZE(tegra_imp_import_data->dram_clk_instance)); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // + // Copy data that we collected earlier in the BPMP tables into the caller's + // IMP import structure. + // + for (i = 0U; i < latency_table.num_pairs; i++) + { + dram_clk_freq_khz = latency_table.pairs[i].freq; + // + // For each dramclk frequency, we get some information from the EMCHUB + // table and some information from the LATENCY table. We expect both + // tables to have entries for the same dramclk frequencies. + // + if (dram_clk_freq_khz != emchub_table.pairs[i].freq) + { + nv_printf(NV_DBG_ERRORS, + "MRQ_EMC_DVFS_LATENCY index #%d dramclk freq (%d KHz) does not match " + "MRQ_EMC_DVFS_EMCHUB index #%d dramclk freq (%d KHz)\n", + i, latency_table.pairs[i].freq, + i, emchub_table.pairs[i].freq); + return NV_ERR_INVALID_DATA; + } + + // Copy a few values to the caller's table. + tegra_imp_import_data->dram_clk_instance[i].dram_clk_freq_khz = + dram_clk_freq_khz; + tegra_imp_import_data->dram_clk_instance[i].switch_latency_ns = + latency_table.pairs[i].latency; + tegra_imp_import_data->dram_clk_instance[i].mc_clk_khz = + dram_clk_to_mc_clk(dram_clk_freq_khz / 1000U) * 1000U; + + // MC hubclk is 1/2 of scf clk, which is the same as EMCHUB clk. + tegra_imp_import_data->dram_clk_instance[i].mchub_clk_khz = + emchub_table.pairs[i].hub_freq / 2U; + + // + // The ISO BW table may have more entries then the number of dramclk + // frequencies supported on current chip (i.e., more entries than we + // have in the EMCHUB and LATENCY tables). For each dramclk entry that + // we are filling out, search through the ISO BW table to find the + // largest dramclk less than or equal to the dramclk frequency for + // index "i", and use that ISO BW entry. (We assume all tables have + // their entries in order of increasing dramclk frequency.) + // + // Note: Some of the dramclk frequencies in the ISO BW table have been + // observed to be "rounded down" (e.g., 665000 KHz instead of 665600 + // KHz). + // + while ((bwTableIndex + 1U < max_bw_table.num_pairs) && + (dram_clk_freq_khz >= max_bw_table.pairs[bwTableIndex + 1U].freq)) + { + nv_printf(NV_DBG_INFO, + "Max ISO BW table: index %u, dramclk = %u KHz, max ISO BW = %u KB/sec\n", + bwTableIndex, + max_bw_table.pairs[bwTableIndex].freq, + max_bw_table.pairs[bwTableIndex].iso_bw); + bwTableIndex++; + } + if (dram_clk_freq_khz >= max_bw_table.pairs[bwTableIndex].freq) + { + nv_printf(NV_DBG_INFO, + "For dramclk = %u KHz, setting max ISO BW = %u KB/sec\n", + dram_clk_freq_khz, + max_bw_table.pairs[bwTableIndex].iso_bw); + tegra_imp_import_data->dram_clk_instance[i].max_iso_bw_kbps = + max_bw_table.pairs[bwTableIndex].iso_bw; + } + else + { + // + // Something went wrong. Maybe the ISO BW table doesn't have any + // entries with dramclk frequency as small as the frequency in the + // EMCHUB and LATENCY tables, or maybe the entries are out of + // order. + // + nv_printf(NV_DBG_ERRORS, + "Couldn't get max ISO BW for dramclk = %u KHz\n", + dram_clk_freq_khz); + return NV_ERR_INVALID_DATA; + } + } + + dram_type = tegra_dram_types(); + + tegra_imp_import_data->dram_type = + nv_imp_convert_dram_type_to_rm_format(dram_type); + + tegra_imp_import_data->num_dram_channels = get_dram_num_channels(); + + // Record the overall maximum possible ISO BW. + i = latency_table.num_pairs - 1U; + tegra_imp_import_data->max_iso_bw_kbps = + tegra_imp_import_data->dram_clk_instance[i].max_iso_bw_kbps; + + return NV_OK; +#else // IMP_SUPPORT_FUNCTIONS_PRESENT + return NV_ERR_NOT_SUPPORTED; +#endif +} + +/*! + * @brief Tells BPMP whether or not RFL is valid + * + * Display HW generates an ok_to_switch signal which asserts when mempool + * occupancy is high enough to be able to turn off memory long enough to + * execute a dramclk frequency switch without underflowing display output. + * ok_to_switch drives the RFL ("request for latency") signal in the memory + * unit, and the switch sequencer waits for this signal to go active before + * starting a dramclk switch. However, if the signal is not valid (e.g., if + * display HW or SW has not been initialized yet), the switch sequencer ignores + * the signal. This API tells BPMP whether or not the signal is valid. + * + * @param[in] nv Per GPU Linux state + * @param[in] bEnable True if RFL will be valid; false if invalid + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * NV_ERR_GENERIC if some other kind of error occurred. + */ +NV_STATUS NV_API_CALL +nv_imp_enable_disable_rfl +( + nv_state_t *nv, + NvBool bEnable +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; +#if IMP_SUPPORT_FUNCTIONS_PRESENT +#if IS_ENABLED(CONFIG_TEGRA_BPMP) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct tegra_bpmp *bpmp = tegra_bpmp_get(nvl->dev); + struct tegra_bpmp_message msg; + struct mrq_emc_disp_rfl_request emc_disp_rfl_request; + int rc; + + memset(&emc_disp_rfl_request, 0, sizeof(emc_disp_rfl_request)); + emc_disp_rfl_request.mode = bEnable ? EMC_DISP_RFL_MODE_ENABLED : + EMC_DISP_RFL_MODE_DISABLED; + msg.mrq = MRQ_EMC_DISP_RFL; + msg.tx.data = &emc_disp_rfl_request; + msg.tx.size = sizeof(emc_disp_rfl_request); + msg.rx.data = NULL; + msg.rx.size = 0; + + rc = tegra_bpmp_transfer(bpmp, &msg); + if (rc == 0) + { + nv_printf(NV_DBG_INFO, + "\"Wait for RFL\" is %s via MRQ_EMC_DISP_RFL\n", + bEnable ? "enabled" : "disabled"); + status = NV_OK; + } + else + { + nv_printf(NV_DBG_ERRORS, + "MRQ_EMC_DISP_RFL failed to %s \"Wait for RFL\" (error code = %d)\n", + bEnable ? "enable" : "disable", + rc); + status = NV_ERR_GENERIC; + } +#else + // Maybe we don't need the old implementation "else" clause cases anymore. + NV_ASSERT(NV_FALSE); +#endif +#endif + return status; +} + +/*! + * @brief Obtains a handle for the display data path + * + * If a handle is obtained successfully, it is not returned to the caller; it + * is saved for later use by subsequent nv_imp_icc_set_bw calls. + * nv_imp_icc_get must be called prior to calling nv_imp_icc_set_bw. + * + * @param[out] nv Per GPU Linux state + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * NV_ERR_GENERIC if some other error occurred. + */ +NV_STATUS NV_API_CALL +nv_imp_icc_get +( + nv_state_t *nv +) +{ +#if ICC_SUPPORT_FUNCTIONS_PRESENT + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status = NV_OK; + +#if defined(NV_ICC_GET_PRESENT) + struct device_node *np; + nvl->nv_imp_icc_path = NULL; + // Check if ICC is present in the device tree, and enabled. + np = of_find_node_by_path("/icc"); + if (np != NULL) + { + if (of_device_is_available(np)) + { + // Get the ICC data path. + nvl->nv_imp_icc_path = + icc_get(nvl->dev, TEGRA_ICC_DISPLAY, TEGRA_ICC_PRIMARY); + } + of_node_put(np); + } +#else + nv_printf(NV_DBG_ERRORS, "NVRM: icc_get() not present\n"); + return NV_ERR_NOT_SUPPORTED; +#endif + + if (nvl->nv_imp_icc_path == NULL) + { + nv_printf(NV_DBG_INFO, "NVRM: icc_get disabled\n"); + status = NV_ERR_NOT_SUPPORTED; + } + else if (IS_ERR(nvl->nv_imp_icc_path)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: invalid path = %s\n", + PTR_ERR(nvl->nv_imp_icc_path)); + nvl->nv_imp_icc_path = NULL; + status = NV_ERR_GENERIC; + } + return status; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +/*! + * @brief Releases the handle obtained by nv_imp_icc_get + * + * @param[in] nv Per GPU Linux state + */ +void +nv_imp_icc_put +( + nv_state_t *nv +) +{ +#if ICC_SUPPORT_FUNCTIONS_PRESENT + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); +#if defined(NV_ICC_PUT_PRESENT) + if (nvl->nv_imp_icc_path != NULL) + { + icc_put(nvl->nv_imp_icc_path); + } +#else + nv_printf(NV_DBG_ERRORS, "icc_put() not present\n"); +#endif + nvl->nv_imp_icc_path = NULL; +#endif +} + +/*! + * @brief Allocates a specified amount of ISO memory bandwidth for display + * + * floor_bw_kbps is the minimum required (i.e., floor) dramclk frequency + * multiplied by the width of the pipe over which the display data will travel. + * (It is understood that the bandwidth calculated by multiplying the clock + * frequency by the pipe width will not be realistically achievable, due to + * overhead in the memory subsystem. ICC will not actually use the bandwidth + * value, except to reverse the calculation to get the required dramclk + * frequency.) + * + * nv_imp_icc_get must be called prior to calling this function. + * + * @param[in] nv Per GPU Linux state + * @param[in] avg_bw_kbps Amount of ISO memory bandwidth requested + * @param[in] floor_bw_kbps Min required dramclk freq * pipe width + * + * @returns NV_OK if successful, + * NV_ERR_INSUFFICIENT_RESOURCES if one of the bandwidth values is too + * high, and bandwidth cannot be allocated, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * NV_ERR_GENERIC if some other kind of error occurred. + */ +NV_STATUS NV_API_CALL +nv_imp_icc_set_bw +( + nv_state_t *nv, + NvU32 avg_bw_kbps, + NvU32 floor_bw_kbps +) +{ +#if ICC_SUPPORT_FUNCTIONS_PRESENT + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc; + NV_STATUS status = NV_OK; + + // + // avg_bw_kbps can be either ISO bw request or NISO bw request. + // Use floor_bw_kbps to make floor requests. + // +#if defined(NV_ICC_SET_BW_PRESENT) + // + // nv_imp_icc_path will be NULL on AV + L systems because ICC is disabled. + // In this case, skip the allocation call, and just return a success + // status. + // + if (nvl->nv_imp_icc_path == NULL) + { + return NV_OK; + } + rc = icc_set_bw(nvl->nv_imp_icc_path, avg_bw_kbps, floor_bw_kbps); +#else + nv_printf(NV_DBG_ERRORS, "icc_set_bw() not present\n"); + return NV_ERR_NOT_SUPPORTED; +#endif + + if (rc < 0) + { + // A negative return value indicates an error. + if (rc == -ENOMEM) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + } + else + { + status = NV_ERR_GENERIC; + } + } + return status; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + + diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ipc-soc.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ipc-soc.c new file mode 100644 index 0000000..23ce581 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ipc-soc.c @@ -0,0 +1,158 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#include "dce_rm_client_ipc.h" + + + +#if defined(NV_LINUX_PLATFORM_TEGRA_DCE_DCE_CLIENT_IPC_H_PRESENT) +#include + +#if (NV_IS_EXPORT_SYMBOL_PRESENT_tegra_dce_register_ipc_client && \ + NV_IS_EXPORT_SYMBOL_PRESENT_tegra_dce_client_ipc_send_recv && \ + NV_IS_EXPORT_SYMBOL_PRESENT_tegra_dce_unregister_ipc_client) +#define NV_IS_EXPORT_SYMBOLS_PRESENT_TEGRA_DCE_CLIENT 1 +#else +#define NV_IS_EXPORT_SYMBOLS_PRESENT_TEGRA_DCE_CLIENT 0 +#endif + +#endif + +#if (defined(NV_LINUX_PLATFORM_TEGRA_DCE_DCE_CLIENT_IPC_H_PRESENT) && \ + NV_IS_EXPORT_SYMBOLS_PRESENT_TEGRA_DCE_CLIENT) +static const NvU32 dceClientRmIpcTypeMap[DCE_CLIENT_RM_IPC_TYPE_MAX] = { + [DCE_CLIENT_RM_IPC_TYPE_SYNC] = DCE_CLIENT_IPC_TYPE_CPU_RM, + [DCE_CLIENT_RM_IPC_TYPE_EVENT] = DCE_CLIENT_IPC_TYPE_RM_EVENT, +}; + +static NV_STATUS validate_dce_client_ipc_interface_type(NvU32 interfaceType) +{ + if (interfaceType >= DCE_CLIENT_RM_IPC_TYPE_MAX) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (dceClientRmIpcTypeMap[interfaceType] >= DCE_CLIENT_IPC_TYPE_MAX) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +NvU32 nv_tegra_get_rm_interface_type(NvU32 clientIpcType) +{ + NvU32 interfaceType = DCE_CLIENT_RM_IPC_TYPE_SYNC; + + for (interfaceType = DCE_CLIENT_RM_IPC_TYPE_SYNC; + interfaceType < DCE_CLIENT_RM_IPC_TYPE_MAX; + interfaceType++) + { + if (dceClientRmIpcTypeMap[interfaceType] == clientIpcType) + return interfaceType; + } + + return NV_ERR_INVALID_DATA; +} + +NV_STATUS nv_tegra_dce_register_ipc_client +( + NvU32 interfaceType, + void *usrCtx, + nvTegraDceClientIpcCallback callbackFn, + NvU32 *handle +) +{ + NvU32 dceClientInterfaceType = DCE_CLIENT_IPC_TYPE_MAX; + + if (validate_dce_client_ipc_interface_type(interfaceType) != NV_OK) + { + return NV_ERR_INVALID_ARGUMENT; + } + + dceClientInterfaceType = dceClientRmIpcTypeMap[interfaceType]; + + return tegra_dce_register_ipc_client(dceClientInterfaceType, callbackFn, usrCtx, handle); +} + +NV_STATUS nv_tegra_dce_client_ipc_send_recv +( + NvU32 clientId, + void *msg, + NvU32 msgLength +) +{ + struct dce_ipc_message dce_ipc_msg; + + memset(&dce_ipc_msg, 0, sizeof(struct dce_ipc_message)); + dce_ipc_msg.tx.data = msg; + dce_ipc_msg.rx.data = msg; + dce_ipc_msg.tx.size = msgLength; + dce_ipc_msg.rx.size = msgLength; + + return tegra_dce_client_ipc_send_recv(clientId, &dce_ipc_msg); +} + +NV_STATUS nv_tegra_dce_unregister_ipc_client(NvU32 clientId) +{ + return tegra_dce_unregister_ipc_client(clientId); +} +#else +NvU32 nv_tegra_get_rm_interface_type(NvU32 clientIpcType) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS nv_tegra_dce_register_ipc_client +( + NvU32 interfaceType, + void *usrCtx, + nvTegraDceClientIpcCallback callbackFn, + NvU32 *handle +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS nv_tegra_dce_client_ipc_send_recv +( + NvU32 clientId, + void *msg, + NvU32 msgLength +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS nv_tegra_dce_unregister_ipc_client(NvU32 clientId) +{ + return NV_ERR_NOT_SUPPORTED; +} +#endif + + diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-kthread-q.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-kthread-q.c new file mode 100644 index 0000000..5a95f4a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-kthread-q.c @@ -0,0 +1,335 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-kthread-q.h" +#include "nv-list-helpers.h" + +#include +#include +#include +#include +#include + +#if defined(NV_LINUX_BUG_H_PRESENT) + #include +#else + #include +#endif + +// Today's implementation is a little simpler and more limited than the +// API description allows for in nv-kthread-q.h. Details include: +// +// 1. Each nv_kthread_q instance is a first-in, first-out queue. +// +// 2. Each nv_kthread_q instance is serviced by exactly one kthread. +// +// You can create any number of queues, each of which gets its own +// named kernel thread (kthread). You can then insert arbitrary functions +// into the queue, and those functions will be run in the context of the +// queue's kthread. + +#ifndef WARN + // Only *really* old kernels (2.6.9) end up here. Just use a simple printk + // to implement this, because such kernels won't be supported much longer. + #define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + printk(KERN_ERR format); \ + unlikely(__ret_warn_on); \ + }) +#endif + +#define NVQ_WARN(fmt, ...) \ + do { \ + if (in_interrupt()) { \ + WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \ + ##__VA_ARGS__); \ + } \ + else { \ + WARN(1, "nv_kthread_q: task: %s: " fmt, \ + current->comm, \ + ##__VA_ARGS__); \ + } \ + } while (0) + +static int _main_loop(void *args) +{ + nv_kthread_q_t *q = (nv_kthread_q_t *)args; + nv_kthread_q_item_t *q_item = NULL; + unsigned long flags; + + while (1) { + // Normally this thread is never interrupted. However, + // down_interruptible (instead of down) is called here, + // in order to avoid being classified as a potentially + // hung task, by the kernel watchdog. + while (down_interruptible(&q->q_sem)) + NVQ_WARN("Interrupted during semaphore wait\n"); + + if (atomic_read(&q->main_loop_should_exit)) + break; + + spin_lock_irqsave(&q->q_lock, flags); + + // The q_sem semaphore prevents us from getting here unless there is + // at least one item in the list, so an empty list indicates a bug. + if (unlikely(list_empty(&q->q_list_head))) { + spin_unlock_irqrestore(&q->q_lock, flags); + NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q); + continue; + } + + // Consume one item from the queue + q_item = list_first_entry(&q->q_list_head, + nv_kthread_q_item_t, + q_list_node); + + list_del_init(&q_item->q_list_node); + + spin_unlock_irqrestore(&q->q_lock, flags); + + // Run the item + q_item->function_to_run(q_item->function_args); + + // Make debugging a little simpler by clearing this between runs: + q_item = NULL; + } + + while (!kthread_should_stop()) + schedule(); + + return 0; +} + +void nv_kthread_q_stop(nv_kthread_q_t *q) +{ + // check if queue has been properly initialized + if (unlikely(!q->q_kthread)) + return; + + nv_kthread_q_flush(q); + + // If this assertion fires, then a caller likely either broke the API rules, + // by adding items after calling nv_kthread_q_stop, or possibly messed up + // with inadequate flushing of self-rescheduling q_items. + if (unlikely(!list_empty(&q->q_list_head))) + NVQ_WARN("list not empty after flushing\n"); + + if (likely(!atomic_read(&q->main_loop_should_exit))) { + + atomic_set(&q->main_loop_should_exit, 1); + + // Wake up the kthread so that it can see that it needs to stop: + up(&q->q_sem); + + kthread_stop(q->q_kthread); + q->q_kthread = NULL; + } +} + +// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by +// kthread_create_on_node relies on a 2 entry, per-core cache to minimize +// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the +// stack location ends up being a function of the core assigned to the current +// thread, instead of being a function of the specified NUMA node. The cache was +// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0 +// ("fork: Optimize task creation by caching two thread stacks per CPU if +// CONFIG_VMAP_STACK=y") +// +// To work around the problematic cache, we create up to three kernel threads +// -If the first thread's stack is resident on the preferred node, return this +// thread. +// -Otherwise, create a second thread. If its stack is resident on the +// preferred node, stop the first thread and return this one. +// -Otherwise, create a third thread. The stack allocator does not find a +// cached stack, and so falls back to vmalloc, which takes the NUMA hint into +// consideration. The first two threads are then stopped. +// +// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned. +// +// This function is never invoked when there is no NUMA preference (preferred +// node is NUMA_NO_NODE). +#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1 +static struct task_struct *thread_create_on_node(int (*threadfn)(void *data), + nv_kthread_q_t *q, + int preferred_node, + const char *q_name) +{ + + unsigned i, j; + const static unsigned attempts = 3; + struct task_struct *thread[3]; + + for (i = 0;; i++) { + struct page *stack; + + thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name); + + if (unlikely(IS_ERR(thread[i]))) { + + // Instead of failing, pick the previous thread, even if its + // stack is not allocated on the preferred node. + if (i > 0) + i--; + + break; + } + + // vmalloc is not used to allocate the stack, so simply return the + // thread, even if its stack may not be allocated on the preferred node + if (!is_vmalloc_addr(thread[i]->stack)) + break; + + // Ran out of attempts - return thread even if its stack may not be + // allocated on the preferred node + if ((i == (attempts - 1))) + break; + + // Get the NUMA node where the first page of the stack is resident. If + // it is the preferred node, select this thread. + stack = vmalloc_to_page(thread[i]->stack); + if (page_to_nid(stack) == preferred_node) + break; + } + + for (j = i; j > 0; j--) + kthread_stop(thread[j - 1]); + + return thread[i]; +} +#endif + +int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node) +{ + memset(q, 0, sizeof(*q)); + + INIT_LIST_HEAD(&q->q_list_head); + spin_lock_init(&q->q_lock); + sema_init(&q->q_sem, 0); + + if (preferred_node == NV_KTHREAD_NO_NODE) { + q->q_kthread = kthread_create(_main_loop, q, q_name); + } + else { +#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1 + q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name); +#else + return -ENOTSUPP; +#endif + } + + if (IS_ERR(q->q_kthread)) { + int err = PTR_ERR(q->q_kthread); + + // Clear q_kthread before returning so that nv_kthread_q_stop() can be + // safely called on it making error handling easier. + q->q_kthread = NULL; + + return err; + } + + wake_up_process(q->q_kthread); + + return 0; +} + +// Returns true (non-zero) if the item was actually scheduled, and false if the +// item was already pending in a queue. +static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item) +{ + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&q->q_lock, flags); + + if (likely(list_empty(&q_item->q_list_node))) + list_add_tail(&q_item->q_list_node, &q->q_list_head); + else + ret = 0; + + spin_unlock_irqrestore(&q->q_lock, flags); + + if (likely(ret)) + up(&q->q_sem); + + return ret; +} + +void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item, + nv_q_func_t function_to_run, + void *function_args) +{ + INIT_LIST_HEAD(&q_item->q_list_node); + q_item->function_to_run = function_to_run; + q_item->function_args = function_args; +} + +// Returns true (non-zero) if the q_item got scheduled, false otherwise. +int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q, + nv_kthread_q_item_t *q_item) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was " + "called with a non-alive q: 0x%p\n", q); + return 0; + } + + return _raw_q_schedule(q, q_item); +} + +static void _q_flush_function(void *args) +{ + struct completion *completion = (struct completion *)args; + complete(completion); +} + + +static void _raw_q_flush(nv_kthread_q_t *q) +{ + nv_kthread_q_item_t q_item; + DECLARE_COMPLETION(completion); + + nv_kthread_q_item_init(&q_item, _q_flush_function, &completion); + + _raw_q_schedule(q, &q_item); + + // Wait for the flush item to run. Once it has run, then all of the + // previously queued items in front of it will have run, so that means + // the flush is complete. + wait_for_completion(&completion); +} + +void nv_kthread_q_flush(nv_kthread_q_t *q) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_flush was called after " + "nv_kthread_q_stop. q: 0x%p\n", q); + return; + } + + // This 2x flush is not a typing mistake. The queue really does have to be + // flushed twice, in order to take care of the case of a q_item that + // reschedules itself. + _raw_q_flush(q); + _raw_q_flush(q); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-memdbg.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-memdbg.c new file mode 100644 index 0000000..033a421 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-memdbg.c @@ -0,0 +1,232 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-memdbg.h" +#include "nv-linux.h" + +/* track who's allocating memory and print out a list of leaked allocations at + * teardown. + */ + +typedef struct { + struct rb_node rb_node; + void *addr; + NvU64 size; + NvU32 line; + const char *file; +} nv_memdbg_node_t; + +struct +{ + struct rb_root rb_root; + NvU64 untracked_bytes; + NvU64 num_untracked_allocs; + nv_spinlock_t lock; +} g_nv_memdbg; + +void nv_memdbg_init(void) +{ + NV_SPIN_LOCK_INIT(&g_nv_memdbg.lock); + g_nv_memdbg.rb_root = RB_ROOT; +} + +static nv_memdbg_node_t *nv_memdbg_node_entry(struct rb_node *rb_node) +{ + return rb_entry(rb_node, nv_memdbg_node_t, rb_node); +} + +static void nv_memdbg_insert_node(nv_memdbg_node_t *new) +{ + nv_memdbg_node_t *node; + struct rb_node **rb_node = &g_nv_memdbg.rb_root.rb_node; + struct rb_node *rb_parent = NULL; + + while (*rb_node) + { + node = nv_memdbg_node_entry(*rb_node); + + WARN_ON(new->addr == node->addr); + + rb_parent = *rb_node; + + if (new->addr < node->addr) + rb_node = &(*rb_node)->rb_left; + else + rb_node = &(*rb_node)->rb_right; + } + + rb_link_node(&new->rb_node, rb_parent, rb_node); + rb_insert_color(&new->rb_node, &g_nv_memdbg.rb_root); +} + +static nv_memdbg_node_t *nv_memdbg_remove_node(void *addr) +{ + nv_memdbg_node_t *node = NULL; + struct rb_node *rb_node = g_nv_memdbg.rb_root.rb_node; + + while (rb_node) + { + node = nv_memdbg_node_entry(rb_node); + if (addr == node->addr) + break; + else if (addr < node->addr) + rb_node = rb_node->rb_left; + else + rb_node = rb_node->rb_right; + } + + WARN_ON(!node || node->addr != addr); + + rb_erase(&node->rb_node, &g_nv_memdbg.rb_root); + return node; +} + +void nv_memdbg_add(void *addr, NvU64 size, const char *file, int line) +{ + nv_memdbg_node_t *node; + unsigned long flags; + + WARN_ON(addr == NULL); + + /* If node allocation fails, we can still update the untracked counters */ + node = kmalloc(sizeof(*node), + NV_MAY_SLEEP() ? NV_GFP_KERNEL : NV_GFP_ATOMIC); + if (node) + { + node->addr = addr; + node->size = size; + node->file = file; + node->line = line; + } + + NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags); + + if (node) + { + nv_memdbg_insert_node(node); + } + else + { + ++g_nv_memdbg.num_untracked_allocs; + g_nv_memdbg.untracked_bytes += size; + } + + NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags); +} + +void nv_memdbg_remove(void *addr, NvU64 size, const char *file, int line) +{ + nv_memdbg_node_t *node; + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags); + + node = nv_memdbg_remove_node(addr); + if (!node) + { + WARN_ON(g_nv_memdbg.num_untracked_allocs == 0); + WARN_ON(g_nv_memdbg.untracked_bytes < size); + --g_nv_memdbg.num_untracked_allocs; + g_nv_memdbg.untracked_bytes -= size; + } + + NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags); + + if (node) + { + if ((size != 0) && (node->size != size)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: size mismatch on free: %llu != %llu\n", + size, node->size); + if (node->file) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: allocation: 0x%p @ %s:%d\n", + node->addr, node->file, node->line); + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: allocation: 0x%p\n", + node->addr); + } + os_dbg_breakpoint(); + } + + kfree(node); + } +} + +void nv_memdbg_exit(void) +{ + nv_memdbg_node_t *node; + NvU64 leaked_bytes = 0, num_leaked_allocs = 0; + + if (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: list of leaked memory allocations:\n"); + } + + while (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root)) + { + node = nv_memdbg_node_entry(rb_first(&g_nv_memdbg.rb_root)); + + leaked_bytes += node->size; + ++num_leaked_allocs; + + if (node->file) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %llu bytes, 0x%p @ %s:%d\n", + node->size, node->addr, node->file, node->line); + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %llu bytes, 0x%p\n", + node->size, node->addr); + } + + rb_erase(&node->rb_node, &g_nv_memdbg.rb_root); + kfree(node); + } + + /* If we failed to allocate a node at some point, we may have leaked memory + * even if the tree is empty */ + if (num_leaked_allocs > 0 || g_nv_memdbg.num_untracked_allocs > 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: total leaked memory: %llu bytes in %llu allocations\n", + leaked_bytes + g_nv_memdbg.untracked_bytes, + num_leaked_allocs + g_nv_memdbg.num_untracked_allocs); + + if (g_nv_memdbg.num_untracked_allocs > 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %llu bytes in %llu allocations untracked\n", + g_nv_memdbg.untracked_bytes, g_nv_memdbg.num_untracked_allocs); + } + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-mmap.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-mmap.c new file mode 100644 index 0000000..07a02e6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-mmap.c @@ -0,0 +1,781 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv_speculation_barrier.h" + +/* + * The 'struct vm_operations' open() callback is called by the Linux + * kernel when the parent VMA is split or copied, close() when the + * current VMA is about to be deleted. + * + * We implement these callbacks to keep track of the number of user + * mappings of system memory allocations. This was motivated by a + * subtle interaction problem between the driver and the kernel with + * respect to the bookkeeping of pages marked reserved and later + * mapped with mmap(). + * + * Traditionally, the Linux kernel ignored reserved pages, such that + * when they were mapped via mmap(), the integrity of their usage + * counts depended on the reserved bit being set for as long as user + * mappings existed. + * + * Since we mark system memory pages allocated for DMA reserved and + * typically map them with mmap(), we need to ensure they remain + * reserved until the last mapping has been torn down. This worked + * correctly in most cases, but in a few, the RM API called into the + * RM to free memory before calling munmap() to unmap it. + * + * In the past, we allowed nv_free_pages() to remove the 'at' from + * the parent device's allocation list in this case, but didn't + * release the underlying pages until the last user mapping had been + * destroyed: + * + * In nvidia_vma_release(), we freed any resources associated with + * the allocation (IOMMU mappings, etc.) and cleared the + * underlying pages' reserved bits, but didn't free them. The kernel + * was expected to do this. + * + * This worked in practise, but made dangerous assumptions about the + * kernel's behavior and could fail in some cases. We now handle + * this case differently (see below). + */ +static void +nvidia_vma_open(struct vm_area_struct *vma) +{ + nv_alloc_t *at = NV_VMA_PRIVATE(vma); + + NV_PRINT_VMA(NV_DBG_MEMINFO, vma); + + if (at != NULL) + { + NV_ATOMIC_INC(at->usage_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + } +} + +/* + * (see above for additional information) + * + * If the 'at' usage count drops to zero with the updated logic, the + * the allocation is recorded in the free list of the private + * data associated with the file pointer; nvidia_close() uses this + * list to perform deferred free operations when the parent file + * descriptor is closed. This will typically happen when the process + * exits. + * + * Since this is technically a workaround to handle possible fallout + * from misbehaving clients, we additionally print a warning. + */ +static void +nvidia_vma_release(struct vm_area_struct *vma) +{ + nv_alloc_t *at = NV_VMA_PRIVATE(vma); + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma)); + static int count = 0; + + NV_PRINT_VMA(NV_DBG_MEMINFO, vma); + + if (at != NULL && nv_alloc_release(nvlfp, at)) + { + if ((at->pid == os_get_current_process()) && + (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)) + { + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: late unmap, comm: %s, 0x%p\n", + __FUNCTION__, current->comm, at); + } + } +} + +static int +nvidia_vma_access( + struct vm_area_struct *vma, + unsigned long addr, + void *buffer, + int length, + int write +) +{ + nv_alloc_t *at = NULL; + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma)); + nv_state_t *nv = NV_STATE_PTR(nvlfp->nvptr); + NvU32 pageIndex, pageOffset; + void *kernel_mapping; + const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context; + NvU64 offset; + + pageIndex = ((addr - vma->vm_start) >> PAGE_SHIFT); + pageOffset = (addr & ~PAGE_MASK); + + if (!mmap_context->valid) + { + nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap context\n"); + return -EINVAL; + } + + offset = mmap_context->mmap_start; + + if (nv->flags & NV_FLAG_CONTROL) + { + at = NV_VMA_PRIVATE(vma); + + /* + * at can be NULL for peer IO mem. + */ + if (!at) + return -EINVAL; + + if (pageIndex >= at->num_pages) + return -EINVAL; + + /* + * For PPC64LE build, nv_array_index_no_speculate() is not defined + * therefore call nv_speculation_barrier(). + * When this definition is added, this platform check should be removed. + */ +#if !defined(NVCPU_PPC64LE) + pageIndex = nv_array_index_no_speculate(pageIndex, at->num_pages); +#else + nv_speculation_barrier(); +#endif + kernel_mapping = (void *)(at->page_table[pageIndex]->virt_addr + pageOffset); + } + else if (IS_FB_OFFSET(nv, offset, length)) + { + addr = (offset & PAGE_MASK); + kernel_mapping = os_map_kernel_space(addr, PAGE_SIZE, NV_MEMORY_UNCACHED); + if (kernel_mapping == NULL) + return -ENOMEM; + + kernel_mapping = ((char *)kernel_mapping + pageOffset); + } + else + return -EINVAL; + + length = NV_MIN(length, (int)(PAGE_SIZE - pageOffset)); + + if (write) + memcpy(kernel_mapping, buffer, length); + else + memcpy(buffer, kernel_mapping, length); + + if (at == NULL) + { + kernel_mapping = ((char *)kernel_mapping - pageOffset); + os_unmap_kernel_space(kernel_mapping, PAGE_SIZE); + } + + return length; +} + +static vm_fault_t nvidia_fault( +#if !defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) + struct vm_area_struct *vma, +#endif + struct vm_fault *vmf +) +{ +#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) + struct vm_area_struct *vma = vmf->vma; +#endif + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma)); + nv_linux_state_t *nvl = nvlfp->nvptr; + nv_state_t *nv = NV_STATE_PTR(nvl); + vm_fault_t ret = VM_FAULT_NOPAGE; + + NvU64 page; + NvU64 num_pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT; + NvU64 pfn_start = + (nvlfp->mmap_context.mmap_start >> PAGE_SHIFT) + vma->vm_pgoff; + + // Mapping revocation is only supported for GPU mappings. + if (NV_IS_CTL_DEVICE(nv)) + { + return VM_FAULT_SIGBUS; + } + + // Wake up GPU and reinstate mappings only if we are not in S3/S4 entry + if (!down_read_trylock(&nv_system_pm_lock)) + { + return VM_FAULT_NOPAGE; + } + + down(&nvl->mmap_lock); + + // Wake up the GPU if it is not currently safe to mmap. + if (!nvl->safe_to_mmap) + { + NV_STATUS status; + + if (!nvl->gpu_wakeup_callback_needed) + { + // GPU wakeup callback already scheduled. + up(&nvl->mmap_lock); + up_read(&nv_system_pm_lock); + return VM_FAULT_NOPAGE; + } + + /* + * GPU wakeup cannot be completed directly in the fault handler due to the + * inability to take the GPU lock while mmap_lock is held. + */ + status = rm_schedule_gpu_wakeup(nvl->sp[NV_DEV_STACK_GPU_WAKEUP], nv); + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: rm_schedule_gpu_wakeup failed: %x\n", status); + up(&nvl->mmap_lock); + up_read(&nv_system_pm_lock); + return VM_FAULT_SIGBUS; + } + // Ensure that we do not schedule duplicate GPU wakeup callbacks. + nvl->gpu_wakeup_callback_needed = NV_FALSE; + + up(&nvl->mmap_lock); + up_read(&nv_system_pm_lock); + return VM_FAULT_NOPAGE; + } + + // Safe to mmap, map all pages in this VMA. + for (page = 0; page < num_pages; page++) + { + NvU64 virt_addr = vma->vm_start + (page << PAGE_SHIFT); + NvU64 pfn = pfn_start + page; + + ret = nv_insert_pfn(vma, virt_addr, pfn, + nvlfp->mmap_context.remap_prot_extra); + if (ret != VM_FAULT_NOPAGE) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: nv_insert_pfn failed: %x\n", ret); + break; + } + + nvl->all_mappings_revoked = NV_FALSE; + } + up(&nvl->mmap_lock); + up_read(&nv_system_pm_lock); + + return ret; +} + +static struct vm_operations_struct nv_vm_ops = { + .open = nvidia_vma_open, + .close = nvidia_vma_release, + .fault = nvidia_fault, + .access = nvidia_vma_access, +}; + +int nv_encode_caching( + pgprot_t *prot, + NvU32 cache_type, + nv_memory_type_t memory_type +) +{ + pgprot_t tmp; + + if (prot == NULL) + { + tmp = __pgprot(0); + prot = &tmp; + } + + switch (cache_type) + { + case NV_MEMORY_UNCACHED_WEAK: +#if defined(NV_PGPROT_UNCACHED_WEAK) + *prot = NV_PGPROT_UNCACHED_WEAK(*prot); + break; +#endif + case NV_MEMORY_UNCACHED: + *prot = (memory_type == NV_MEMORY_TYPE_SYSTEM) ? + NV_PGPROT_UNCACHED(*prot) : + NV_PGPROT_UNCACHED_DEVICE(*prot); + break; +#if defined(NV_PGPROT_WRITE_COMBINED) && \ + defined(NV_PGPROT_WRITE_COMBINED_DEVICE) + case NV_MEMORY_DEFAULT: + case NV_MEMORY_WRITECOMBINED: + if (NV_ALLOW_WRITE_COMBINING(memory_type)) + { + *prot = (memory_type == NV_MEMORY_TYPE_FRAMEBUFFER) ? + NV_PGPROT_WRITE_COMBINED_DEVICE(*prot) : + NV_PGPROT_WRITE_COMBINED(*prot); + break; + } + + /* + * If WC support is unavailable, we need to return an error + * code to the caller, but need not print a warning. + * + * For frame buffer memory, callers are expected to use the + * UC- memory type if we report WC as unsupported, which + * translates to the effective memory type WC if a WC MTRR + * exists or else UC. + */ + return 1; +#endif + case NV_MEMORY_CACHED: + if (NV_ALLOW_CACHING(memory_type)) + break; + // Intentional fallthrough. + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: cache type %d not supported for memory type %d!\n", + cache_type, memory_type); + return 1; + } + return 0; +} + +int static nvidia_mmap_peer_io( + struct vm_area_struct *vma, + nv_alloc_t *at, + NvU64 page_index, + NvU64 pages +) +{ + int ret; + NvU64 start; + NvU64 size; + + BUG_ON(!at->flags.contig); + + start = at->page_table[page_index]->phys_addr; + size = pages * PAGE_SIZE; + + ret = nv_io_remap_page_range(vma, start, size, 0); + + return ret; +} + +int static nvidia_mmap_sysmem( + struct vm_area_struct *vma, + nv_alloc_t *at, + NvU64 page_index, + NvU64 pages +) +{ + NvU64 j; + int ret = 0; + unsigned long start = 0; + + NV_ATOMIC_INC(at->usage_count); + + start = vma->vm_start; + for (j = page_index; j < (page_index + pages); j++) + { + /* + * For PPC64LE build, nv_array_index_no_speculate() is not defined + * therefore call nv_speculation_barrier(). + * When this definition is added, this platform check should be removed. + */ +#if !defined(NVCPU_PPC64LE) + j = nv_array_index_no_speculate(j, (page_index + pages)); +#else + nv_speculation_barrier(); +#endif + +#if defined(NV_VGPU_KVM_BUILD) + if (at->flags.guest) + { + ret = nv_remap_page_range(vma, start, at->page_table[j]->phys_addr, + PAGE_SIZE, vma->vm_page_prot); + } + else +#endif + { + vma->vm_page_prot = nv_adjust_pgprot(vma->vm_page_prot, 0); + ret = vm_insert_page(vma, start, + NV_GET_PAGE_STRUCT(at->page_table[j]->phys_addr)); + } + + if (ret) + { + NV_ATOMIC_DEC(at->usage_count); + return -EAGAIN; + } + start += PAGE_SIZE; + } + + return ret; +} + +static int nvidia_mmap_numa( + struct vm_area_struct *vma, + const nv_alloc_mapping_context_t *mmap_context) +{ + NvU64 start, addr; + unsigned int pages; + NvU64 i; + + pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT; + start = vma->vm_start; + + if (mmap_context->num_pages < pages) + { + return -EINVAL; + } + + // Needed for the linux kernel for mapping compound pages + vma->vm_flags |= VM_MIXEDMAP; + + for (i = 0, addr = mmap_context->page_array[0]; i < pages; + addr = mmap_context->page_array[++i], start += PAGE_SIZE) + { + if (vm_insert_page(vma, start, NV_GET_PAGE_STRUCT(addr)) != 0) + { + return -EAGAIN; + } + } + + return 0; +} + +int nvidia_mmap_helper( + nv_state_t *nv, + nv_linux_file_private_t *nvlfp, + nvidia_stack_t *sp, + struct vm_area_struct *vma, + void *vm_priv +) +{ + NvU32 prot = 0; + int ret; + const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status; + + if (nvlfp == NULL) + return NV_ERR_INVALID_ARGUMENT; + + /* + * If mmap context is not valid on this file descriptor, this mapping wasn't + * previously validated with the RM so it must be rejected. + */ + if (!mmap_context->valid) + { + nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap\n"); + return -EINVAL; + } + + NV_PRINT_VMA(NV_DBG_MEMINFO, vma); + + status = nv_check_gpu_state(nv); + if (status != NV_OK) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, + "GPU is lost, skipping nvidia_mmap_helper\n"); + return status; + } + + NV_VMA_PRIVATE(vma) = vm_priv; + + prot = mmap_context->prot; + + /* + * Nvidia device node(nvidia#) maps device's BAR memory, + * Nvidia control node(nvidiactrl) maps system memory. + */ + if (!NV_IS_CTL_DEVICE(nv)) + { + NvU32 remap_prot_extra = mmap_context->remap_prot_extra; + NvU64 mmap_start = mmap_context->mmap_start; + NvU64 mmap_length = mmap_context->mmap_size; + NvU64 access_start = mmap_context->access_start; + NvU64 access_len = mmap_context->access_size; + + if (IS_REG_OFFSET(nv, access_start, access_len)) + { + if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED, + NV_MEMORY_TYPE_REGISTERS)) + { + return -ENXIO; + } + } + else if (IS_FB_OFFSET(nv, access_start, access_len)) + { + if (IS_UD_OFFSET(nv, access_start, access_len)) + { + if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED, + NV_MEMORY_TYPE_FRAMEBUFFER)) + { + return -ENXIO; + } + } + else + { + if (nv_encode_caching(&vma->vm_page_prot, + rm_disable_iomap_wc() ? NV_MEMORY_UNCACHED : mmap_context->caching, + NV_MEMORY_TYPE_FRAMEBUFFER)) + { + if (nv_encode_caching(&vma->vm_page_prot, + NV_MEMORY_UNCACHED_WEAK, NV_MEMORY_TYPE_FRAMEBUFFER)) + { + return -ENXIO; + } + } + } + } + + down(&nvl->mmap_lock); + if (nvl->safe_to_mmap) + { + nvl->all_mappings_revoked = NV_FALSE; + + // + // This path is similar to the sysmem mapping code. + // TODO: Refactor is needed as part of bug#2001704. + // Use pfn_valid to determine whether the physical address has + // backing struct page. This is used to isolate P8 from P9. + // + if ((nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE) && + !IS_REG_OFFSET(nv, access_start, access_len) && + (pfn_valid(PFN_DOWN(mmap_start)))) + { + ret = nvidia_mmap_numa(vma, mmap_context); + if (ret) + { + up(&nvl->mmap_lock); + return ret; + } + } + else + { + if (nv_io_remap_page_range(vma, mmap_start, mmap_length, + remap_prot_extra) != 0) + { + up(&nvl->mmap_lock); + return -EAGAIN; + } + } + } + up(&nvl->mmap_lock); + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND; + } + else + { + nv_alloc_t *at; + NvU64 page_index; + NvU64 pages; + NvU64 mmap_size; + + at = (nv_alloc_t *)mmap_context->alloc; + page_index = mmap_context->page_index; + mmap_size = NV_VMA_SIZE(vma); + pages = mmap_size >> PAGE_SHIFT; + + if ((page_index + pages) > at->num_pages) + { + return -ERANGE; + } + + /* + * Callers that pass in non-NULL VMA private data must never reach this + * code. They should be mapping on a non-control node. + */ + BUG_ON(NV_VMA_PRIVATE(vma)); + + if (at->flags.peer_io) + { + if (nv_encode_caching(&vma->vm_page_prot, + at->cache_type, + NV_MEMORY_TYPE_DEVICE_MMIO)) + { + return -ENXIO; + } + + /* + * There is no need to keep 'peer IO at' alive till vma_release like + * 'sysmem at' because there are no security concerns where a client + * could free RM allocated sysmem before unmapping it. Hence, vm_ops + * are NOP, and at->usage_count is never being used. + */ + NV_VMA_PRIVATE(vma) = NULL; + + ret = nvidia_mmap_peer_io(vma, at, page_index, pages); + + BUG_ON(NV_VMA_PRIVATE(vma)); + } + else + { + if (nv_encode_caching(&vma->vm_page_prot, + at->cache_type, + NV_MEMORY_TYPE_SYSTEM)) + { + return -ENXIO; + } + + NV_VMA_PRIVATE(vma) = at; + + ret = nvidia_mmap_sysmem(vma, at, page_index, pages); + } + + if (ret) + { + return ret; + } + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED); + vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP); + } + + if ((prot & NV_PROTECT_WRITEABLE) == 0) + { + vma->vm_page_prot = NV_PGPROT_READ_ONLY(vma->vm_page_prot); + vma->vm_flags &= ~VM_WRITE; + vma->vm_flags &= ~VM_MAYWRITE; + } + + vma->vm_ops = &nv_vm_ops; + + return 0; +} + +int nvidia_mmap( + struct file *file, + struct vm_area_struct *vma +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file); + nv_state_t *nv = NV_STATE_PTR(nvl); + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + nvidia_stack_t *sp = NULL; + int status; + + // + // Do not allow mmap operation if this is a fd into + // which rm objects have been exported. + // + if (nvlfp->nvfp.handles != NULL) + { + return -EINVAL; + } + + down(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_MMAP]); + + sp = nvlfp->fops_sp[NV_FOPS_STACK_INDEX_MMAP]; + + status = nvidia_mmap_helper(nv, nvlfp, sp, vma, NULL); + + up(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_MMAP]); + + return status; +} + +void +nv_revoke_gpu_mappings_locked( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nv_linux_file_private_t *nvlfp; + + /* Revoke all mappings for every open file */ + list_for_each_entry (nvlfp, &nvl->open_files, entry) + { + unmap_mapping_range(&nvlfp->mapping, 0, ~0, 1); + } + + nvl->all_mappings_revoked = NV_TRUE; +} + +NV_STATUS NV_API_CALL nv_revoke_gpu_mappings( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + // Mapping revocation is only supported for GPU mappings. + if (NV_IS_CTL_DEVICE(nv)) + { + return NV_ERR_NOT_SUPPORTED; + } + + down(&nvl->mmap_lock); + + nv_revoke_gpu_mappings_locked(nv); + + up(&nvl->mmap_lock); + + return NV_OK; +} + +void NV_API_CALL nv_acquire_mmap_lock( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + down(&nvl->mmap_lock); +} + +void NV_API_CALL nv_release_mmap_lock( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + up(&nvl->mmap_lock); +} + +NvBool NV_API_CALL nv_get_all_mappings_revoked_locked( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + // Caller must hold nvl->mmap_lock for all decisions based on this + return nvl->all_mappings_revoked; +} + +void NV_API_CALL nv_set_safe_to_mmap_locked( + nv_state_t *nv, + NvBool safe_to_mmap +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + // Caller must hold nvl->mmap_lock + + /* + * If nvl->safe_to_mmap is transitioning from TRUE to FALSE, we expect to + * need to schedule a GPU wakeup callback when we fault. + * + * nvl->gpu_wakeup_callback_needed will be set to FALSE in nvidia_fault() + * after scheduling the GPU wakeup callback, preventing us from scheduling + * duplicates. + */ + if (!safe_to_mmap && nvl->safe_to_mmap) + { + nvl->gpu_wakeup_callback_needed = NV_TRUE; + } + + nvl->safe_to_mmap = safe_to_mmap; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-modeset-interface.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-modeset-interface.c new file mode 100644 index 0000000..5a8911c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-modeset-interface.c @@ -0,0 +1,146 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-modeset-interface.h" + +#include "os-interface.h" +#include "nv-linux.h" +#include "nvstatus.h" +#include "nv.h" + +static const nvidia_modeset_callbacks_t *nv_modeset_callbacks; + +static int nvidia_modeset_rm_ops_alloc_stack(nvidia_stack_t **sp) +{ + return nv_kmem_cache_alloc_stack(sp); +} + +static void nvidia_modeset_rm_ops_free_stack(nvidia_stack_t *sp) +{ + if (sp != NULL) + { + nv_kmem_cache_free_stack(sp); + } +} + +static int nvidia_modeset_set_callbacks(const nvidia_modeset_callbacks_t *cb) +{ + if ((nv_modeset_callbacks != NULL && cb != NULL) || + (nv_modeset_callbacks == NULL && cb == NULL)) + { + return -EINVAL; + } + + nv_modeset_callbacks = cb; + return 0; +} + +void nvidia_modeset_suspend(NvU32 gpuId) +{ + if (nv_modeset_callbacks) + { + nv_modeset_callbacks->suspend(gpuId); + } +} + +void nvidia_modeset_resume(NvU32 gpuId) +{ + if (nv_modeset_callbacks) + { + nv_modeset_callbacks->resume(gpuId); + } +} + +static NvU32 nvidia_modeset_enumerate_gpus(nv_gpu_info_t *gpu_info) +{ + nv_linux_state_t *nvl; + unsigned int count; + + LOCK_NV_LINUX_DEVICES(); + + count = 0; + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + + /* + * The gpu_info[] array has NV_MAX_GPUS elements. Fail if there + * are more GPUs than that. + */ + if (count >= NV_MAX_GPUS) { + nv_printf(NV_DBG_WARNINGS, "NVRM: More than %d GPUs found.", + NV_MAX_GPUS); + count = 0; + break; + } + + gpu_info[count].gpu_id = nv->gpu_id; + + gpu_info[count].pci_info.domain = nv->pci_info.domain; + gpu_info[count].pci_info.bus = nv->pci_info.bus; + gpu_info[count].pci_info.slot = nv->pci_info.slot; + gpu_info[count].pci_info.function = nv->pci_info.function; + + gpu_info[count].os_device_ptr = nvl->dev; + + count++; + } + + UNLOCK_NV_LINUX_DEVICES(); + + return count; +} + +NV_STATUS nvidia_get_rm_ops(nvidia_modeset_rm_ops_t *rm_ops) +{ + const nvidia_modeset_rm_ops_t local_rm_ops = { + .version_string = NV_VERSION_STRING, + .system_info = { + .allow_write_combining = NV_FALSE, + }, + .alloc_stack = nvidia_modeset_rm_ops_alloc_stack, + .free_stack = nvidia_modeset_rm_ops_free_stack, + .enumerate_gpus = nvidia_modeset_enumerate_gpus, + .open_gpu = nvidia_dev_get, + .close_gpu = nvidia_dev_put, + .op = rm_kernel_rmapi_op, /* provided by nv-kernel.o */ + .set_callbacks = nvidia_modeset_set_callbacks, + }; + + if (strcmp(rm_ops->version_string, NV_VERSION_STRING) != 0) + { + rm_ops->version_string = NV_VERSION_STRING; + return NV_ERR_GENERIC; + } + + *rm_ops = local_rm_ops; + + if (NV_ALLOW_WRITE_COMBINING(NV_MEMORY_TYPE_FRAMEBUFFER)) { + rm_ops->system_info.allow_write_combining = NV_TRUE; + } + + return NV_OK; +} + +EXPORT_SYMBOL(nvidia_get_rm_ops); diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-msi.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-msi.c new file mode 100644 index 0000000..7efaeb4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-msi.c @@ -0,0 +1,169 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-msi.h" +#include "nv-proto.h" + +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) +void NV_API_CALL nv_init_msi(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc = 0; + + rc = pci_enable_msi(nvl->pci_dev); + if (rc == 0) + { + nv->interrupt_line = nvl->pci_dev->irq; + nv->flags |= NV_FLAG_USES_MSI; + nvl->num_intr = 1; + NV_KMALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * nvl->num_intr); + + if (nvl->irq_count == NULL) + { + nv->flags &= ~NV_FLAG_USES_MSI; + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Failed to allocate counter for MSI entry; " + "falling back to PCIe virtual-wire interrupts.\n"); + } + else + { + memset(nvl->irq_count, 0, sizeof(nv_irq_count_info_t) * nvl->num_intr); + nvl->current_num_irq_tracked = 0; + } + } + else + { + nv->flags &= ~NV_FLAG_USES_MSI; + if (nvl->pci_dev->irq != 0) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Failed to enable MSI; " + "falling back to PCIe virtual-wire interrupts.\n"); + } + } + + return; +} + +void NV_API_CALL nv_init_msix(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int num_intr = 0; + struct msix_entry *msix_entries; + int rc = 0; + int i; + + NV_SPIN_LOCK_INIT(&nvl->msix_isr_lock); + + rc = os_alloc_mutex(&nvl->msix_bh_mutex); + if (rc != 0) + goto failed; + + num_intr = nv_get_max_irq(nvl->pci_dev); + + if (num_intr > NV_RM_MAX_MSIX_LINES) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Reducing MSI-X count from %d to the " + "driver-supported maximum %d.\n", num_intr, NV_RM_MAX_MSIX_LINES); + num_intr = NV_RM_MAX_MSIX_LINES; + } + + NV_KMALLOC(nvl->msix_entries, sizeof(struct msix_entry) * num_intr); + if (nvl->msix_entries == NULL) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate MSI-X entries.\n"); + goto failed; + } + + for (i = 0, msix_entries = nvl->msix_entries; i < num_intr; i++, msix_entries++) + { + msix_entries->entry = i; + } + + NV_KMALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr); + + if (nvl->irq_count == NULL) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate counter for MSI-X entries.\n"); + goto failed; + } + else + { + memset(nvl->irq_count, 0, sizeof(nv_irq_count_info_t) * num_intr); + nvl->current_num_irq_tracked = 0; + } + rc = nv_pci_enable_msix(nvl, num_intr); + if (rc != NV_OK) + goto failed; + + nv->flags |= NV_FLAG_USES_MSIX; + return; + +failed: + nv->flags &= ~NV_FLAG_USES_MSIX; + + if (nvl->msix_entries) + { + NV_KFREE(nvl->msix_entries, sizeof(struct msix_entry) * num_intr); + } + + if (nvl->irq_count) + { + NV_KFREE(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr); + } + + if (nvl->msix_bh_mutex) + { + os_free_mutex(nvl->msix_bh_mutex); + nvl->msix_bh_mutex = NULL; + } + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to enable MSI-X.\n"); +} + +NvS32 NV_API_CALL nv_request_msix_irq(nv_linux_state_t *nvl) +{ + int i; + int j; + struct msix_entry *msix_entries; + int rc = NV_ERR_INVALID_ARGUMENT; + nv_state_t *nv = NV_STATE_PTR(nvl); + + for (i = 0, msix_entries = nvl->msix_entries; i < nvl->num_intr; + i++, msix_entries++) + { + rc = request_threaded_irq(msix_entries->vector, nvidia_isr_msix, + nvidia_isr_msix_kthread_bh, nv_default_irq_flags(nv), + nv_device_name, (void *)nvl); + if (rc) + { + for( j = 0; j < i; j++) + { + free_irq(nvl->msix_entries[i].vector, (void *)nvl); + } + break; + } + } + + return rc; +} +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-nano-timer.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-nano-timer.c new file mode 100644 index 0000000..13f57db --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-nano-timer.c @@ -0,0 +1,176 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include // For container_of +#include +#include +#include "os-interface.h" +#include "nv-linux.h" + +struct nv_nano_timer +{ + struct hrtimer hr_timer; // This parameter holds linux high resolution timer object + // can get replaced with platform specific timer object + nv_linux_state_t *nv_linux_state; + void (*nv_nano_timer_callback)(struct nv_nano_timer *nv_nstimer); + void *pTmrEvent; +}; + +/*! + * @brief runs nano second resolution timer callback +* + * @param[in] nv_nstimer Pointer to nv_nano_timer_t object + */ +static void +nvidia_nano_timer_callback( + nv_nano_timer_t *nv_nstimer) +{ + nv_state_t *nv = NULL; + nv_linux_state_t *nvl = nv_nstimer->nv_linux_state; + unsigned long flags; + nvidia_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: no cache memory \n"); + return; + } + + nv = NV_STATE_PTR(nvl); + + if (rm_run_nano_timer_callback(sp, nv, nv_nstimer->pTmrEvent) != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Error in service of callback \n"); + } + + nv_kmem_cache_free_stack(sp); +} + +/*! + * @brief Allocates nano second resolution timer object + * + * @returns nv_nano_timer_t allocated pointer + */ +static nv_nano_timer_t *nv_alloc_nano_timer(void) +{ + nv_nano_timer_t *nv_nstimer; + + NV_KMALLOC(nv_nstimer, sizeof(nv_nano_timer_t)); + + if (nv_nstimer == NULL) + { + return NULL; + } + + memset(nv_nstimer, 0, sizeof(nv_nano_timer_t)); + + return nv_nstimer; +} + +static enum hrtimer_restart nv_nano_timer_callback_typed_data(struct hrtimer *hrtmr) +{ + struct nv_nano_timer *nv_nstimer = + container_of(hrtmr, struct nv_nano_timer, hr_timer); + + nv_nstimer->nv_nano_timer_callback(nv_nstimer); + + return HRTIMER_NORESTART; +} + +/*! + * @brief Creates & initializes nano second resolution timer object + * + * @param[in] nv Per gpu linux state + * @param[in] tmrEvent pointer to TMR_EVENT + * @param[in] nv_nstimer Pointer to nv_nano_timer_t object + */ +void NV_API_CALL nv_create_nano_timer( + nv_state_t *nv, + void *pTmrEvent, + nv_nano_timer_t **pnv_nstimer) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nv_nano_timer_t *nv_nstimer = nv_alloc_nano_timer(); + + if (nv_nstimer == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Not able to create timer object \n"); + *pnv_nstimer = NULL; + return; + } + + nv_nstimer->nv_linux_state = nvl; + nv_nstimer->pTmrEvent = pTmrEvent; + + nv_nstimer->nv_nano_timer_callback = nvidia_nano_timer_callback; + hrtimer_init(&nv_nstimer->hr_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + nv_nstimer->hr_timer.function = nv_nano_timer_callback_typed_data; + + *pnv_nstimer = nv_nstimer; +} + +/*! + * @brief Starts nano second resolution timer + * + * @param[in] nv Per gpu linux state + * @param[in] nv_nstimer Pointer to nv_nano_timer_t object + * @param[in] timens time in nano seconds + */ +void NV_API_CALL nv_start_nano_timer( + nv_state_t *nv, + nv_nano_timer_t *nv_nstimer, + NvU64 time_ns) +{ + ktime_t ktime = ktime_set(0, time_ns); + hrtimer_start(&nv_nstimer->hr_timer, ktime, HRTIMER_MODE_REL); +} + +/*! + * @brief Cancels nano second resolution timer + * + * @param[in] nv Per gpu linux state + * @param[in] nv_nstimer Pointer to nv_nano_timer_t object + */ +void NV_API_CALL nv_cancel_nano_timer( + nv_state_t *nv, + nv_nano_timer_t *nv_nstimer) +{ + hrtimer_cancel(&nv_nstimer->hr_timer); + +} + +/*! + * @brief Cancels & deletes nano second resolution timer object + * + * @param[in] nv Per gpu linux state + * @param[in] nv_nstimer Pointer to nv_nano_timer_t object + */ +void NV_API_CALL nv_destroy_nano_timer( + nv_state_t *nv, + nv_nano_timer_t *nv_nstimer) +{ + nv_cancel_nano_timer(nv, nv_nstimer); + NV_KFREE(nv_nstimer, sizeof(nv_nano_timer_t)); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.c new file mode 100644 index 0000000..af7049e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.c @@ -0,0 +1,958 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-ibmnpu.h" +#include "nv-rsync.h" + +#include "nv-p2p.h" +#include "rmp2pdefines.h" + +typedef struct nv_p2p_dma_mapping { + struct list_head list_node; + struct nvidia_p2p_dma_mapping *dma_mapping; +} nv_p2p_dma_mapping_t; + +typedef struct nv_p2p_mem_info { + void (*free_callback)(void *data); + void *data; + struct nvidia_p2p_page_table page_table; + struct { + struct list_head list_head; + struct semaphore lock; + } dma_mapping_list; + NvBool bPersistent; + void *private; +} nv_p2p_mem_info_t; + +int nvidia_p2p_cap_persistent_pages = 1; +EXPORT_SYMBOL(nvidia_p2p_cap_persistent_pages); + +// declared and created in nv.c +extern void *nvidia_p2p_page_t_cache; + +static struct nvidia_status_mapping { + NV_STATUS status; + int error; +} nvidia_status_mappings[] = { + { NV_ERR_GENERIC, -EIO }, + { NV_ERR_INSUFFICIENT_RESOURCES, -ENOMEM }, + { NV_ERR_NO_MEMORY, -ENOMEM }, + { NV_ERR_INVALID_ARGUMENT, -EINVAL }, + { NV_ERR_INVALID_OBJECT_HANDLE, -EINVAL }, + { NV_ERR_INVALID_STATE, -EIO }, + { NV_ERR_NOT_SUPPORTED, -ENOTSUPP }, + { NV_ERR_OBJECT_NOT_FOUND, -EINVAL }, + { NV_ERR_STATE_IN_USE, -EBUSY }, + { NV_ERR_GPU_UUID_NOT_FOUND, -ENODEV }, + { NV_OK, 0 }, +}; + +#define NVIDIA_STATUS_MAPPINGS \ + (sizeof(nvidia_status_mappings) / sizeof(struct nvidia_status_mapping)) + +static int nvidia_p2p_map_status(NV_STATUS status) +{ + int error = -EIO; + uint8_t i; + + for (i = 0; i < NVIDIA_STATUS_MAPPINGS; i++) + { + if (nvidia_status_mappings[i].status == status) + { + error = nvidia_status_mappings[i].error; + break; + } + } + return error; +} + +static NvU32 nvidia_p2p_page_size_mappings[NVIDIA_P2P_PAGE_SIZE_COUNT] = { + NVRM_P2P_PAGESIZE_SMALL_4K, NVRM_P2P_PAGESIZE_BIG_64K, NVRM_P2P_PAGESIZE_BIG_128K +}; + +static NV_STATUS nvidia_p2p_map_page_size(NvU32 page_size, NvU32 *page_size_index) +{ + NvU32 i; + + for (i = 0; i < NVIDIA_P2P_PAGE_SIZE_COUNT; i++) + { + if (nvidia_p2p_page_size_mappings[i] == page_size) + { + *page_size_index = i; + break; + } + } + + if (i == NVIDIA_P2P_PAGE_SIZE_COUNT) + return NV_ERR_GENERIC; + + return NV_OK; +} + +static NV_STATUS nv_p2p_insert_dma_mapping( + struct nv_p2p_mem_info *mem_info, + struct nvidia_p2p_dma_mapping *dma_mapping +) +{ + NV_STATUS status; + struct nv_p2p_dma_mapping *node; + + status = os_alloc_mem((void**)&node, sizeof(*node)); + if (status != NV_OK) + { + return status; + } + + down(&mem_info->dma_mapping_list.lock); + + node->dma_mapping = dma_mapping; + list_add_tail(&node->list_node, &mem_info->dma_mapping_list.list_head); + + up(&mem_info->dma_mapping_list.lock); + + return NV_OK; +} + +static struct nvidia_p2p_dma_mapping* nv_p2p_remove_dma_mapping( + struct nv_p2p_mem_info *mem_info, + struct nvidia_p2p_dma_mapping *dma_mapping +) +{ + struct nv_p2p_dma_mapping *cur; + struct nvidia_p2p_dma_mapping *ret_dma_mapping = NULL; + + down(&mem_info->dma_mapping_list.lock); + + list_for_each_entry(cur, &mem_info->dma_mapping_list.list_head, list_node) + { + if (dma_mapping == NULL || dma_mapping == cur->dma_mapping) + { + ret_dma_mapping = cur->dma_mapping; + list_del(&cur->list_node); + os_free_mem(cur); + break; + } + } + + up(&mem_info->dma_mapping_list.lock); + + return ret_dma_mapping; +} + +static void nv_p2p_free_dma_mapping( + struct nvidia_p2p_dma_mapping *dma_mapping +) +{ + nv_dma_device_t peer_dma_dev = {{ 0 }}; + NvU32 page_size; + NV_STATUS status; + NvU32 i; + + peer_dma_dev.dev = &dma_mapping->pci_dev->dev; + peer_dma_dev.addressable_range.limit = dma_mapping->pci_dev->dma_mask; + + page_size = nvidia_p2p_page_size_mappings[dma_mapping->page_size_type]; + + if (dma_mapping->private != NULL) + { + WARN_ON(page_size != PAGE_SIZE); + + status = nv_dma_unmap_alloc(&peer_dma_dev, + dma_mapping->entries, + dma_mapping->dma_addresses, + &dma_mapping->private); + WARN_ON(status != NV_OK); + } + else + { + for (i = 0; i < dma_mapping->entries; i++) + { + nv_dma_unmap_peer(&peer_dma_dev, page_size / PAGE_SIZE, + dma_mapping->dma_addresses[i]); + } + } + + os_free_mem(dma_mapping->dma_addresses); + + os_free_mem(dma_mapping); +} + +static void nv_p2p_free_page_table( + struct nvidia_p2p_page_table *page_table +) +{ + NvU32 i; + struct nvidia_p2p_dma_mapping *dma_mapping; + struct nv_p2p_mem_info *mem_info = NULL; + + mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table); + + dma_mapping = nv_p2p_remove_dma_mapping(mem_info, NULL); + while (dma_mapping != NULL) + { + nv_p2p_free_dma_mapping(dma_mapping); + + dma_mapping = nv_p2p_remove_dma_mapping(mem_info, NULL); + } + + for (i = 0; i < page_table->entries; i++) + { + NV_KMEM_CACHE_FREE(page_table->pages[i], nvidia_p2p_page_t_cache); + } + + if (page_table->gpu_uuid != NULL) + { + os_free_mem(page_table->gpu_uuid); + } + + if (page_table->pages != NULL) + { + os_free_mem(page_table->pages); + } + + os_free_mem(mem_info); +} + +static NV_STATUS nv_p2p_put_pages( + nvidia_stack_t * sp, + uint64_t p2p_token, + uint32_t va_space, + uint64_t virtual_address, + struct nvidia_p2p_page_table **page_table +) +{ + NV_STATUS status; + struct nv_p2p_mem_info *mem_info = NULL; + + mem_info = container_of(*page_table, nv_p2p_mem_info_t, page_table); + + /* + * rm_p2p_put_pages returns NV_OK if the page_table was found and + * got unlinked from the RM's tracker (atomically). This ensures that + * RM's tear-down path does not race with this path. + * + * rm_p2p_put_pages returns NV_ERR_OBJECT_NOT_FOUND if the page_table + * was already unlinked. + */ + if (mem_info->bPersistent) + { + status = rm_p2p_put_pages_persistent(sp, mem_info->private, *page_table); + } + else + { + status = rm_p2p_put_pages(sp, p2p_token, va_space, + virtual_address, *page_table); + } + + if (status == NV_OK) + { + nv_p2p_free_page_table(*page_table); + *page_table = NULL; + } + else if (!mem_info->bPersistent && (status == NV_ERR_OBJECT_NOT_FOUND)) + { + status = NV_OK; + *page_table = NULL; + } + else + { + WARN_ON(status != NV_OK); + } + + return status; +} + +void NV_API_CALL nv_p2p_free_platform_data( + void *data +) +{ + if (data == NULL) + { + WARN_ON(data == NULL); + return; + } + + nv_p2p_free_page_table((struct nvidia_p2p_page_table*)data); +} + +int nvidia_p2p_init_mapping( + uint64_t p2p_token, + struct nvidia_p2p_params *params, + void (*destroy_callback)(void *data), + void *data +) +{ + return -ENOTSUPP; +} + +EXPORT_SYMBOL(nvidia_p2p_init_mapping); + +int nvidia_p2p_destroy_mapping(uint64_t p2p_token) +{ + return -ENOTSUPP; +} + +EXPORT_SYMBOL(nvidia_p2p_destroy_mapping); + +static void nv_p2p_mem_info_free_callback(void *data) +{ + nv_p2p_mem_info_t *mem_info = (nv_p2p_mem_info_t*) data; + + mem_info->free_callback(mem_info->data); + + nv_p2p_free_platform_data(&mem_info->page_table); +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +int nvidia_p2p_register_rsync_driver( + nvidia_p2p_rsync_driver_t *driver, + void *data +) +{ + if (driver == NULL) + { + return -EINVAL; + } + + if (!NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(driver)) + { + return -EINVAL; + } + + if (driver->get_relaxed_ordering_mode == NULL || + driver->put_relaxed_ordering_mode == NULL || + driver->wait_for_rsync == NULL) + { + return -EINVAL; + } + + return nv_register_rsync_driver(driver->get_relaxed_ordering_mode, + driver->put_relaxed_ordering_mode, + driver->wait_for_rsync, data); +} + +EXPORT_SYMBOL(nvidia_p2p_register_rsync_driver); + +void nvidia_p2p_unregister_rsync_driver( + nvidia_p2p_rsync_driver_t *driver, + void *data +) +{ + if (driver == NULL) + { + WARN_ON(1); + return; + } + + if (!NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(driver)) + { + WARN_ON(1); + return; + } + + if (driver->get_relaxed_ordering_mode == NULL || + driver->put_relaxed_ordering_mode == NULL || + driver->wait_for_rsync == NULL) + { + WARN_ON(1); + return; + } + + nv_unregister_rsync_driver(driver->get_relaxed_ordering_mode, + driver->put_relaxed_ordering_mode, + driver->wait_for_rsync, data); +} + +EXPORT_SYMBOL(nvidia_p2p_unregister_rsync_driver); + +int nvidia_p2p_get_rsync_registers( + nvidia_p2p_rsync_reg_info_t **reg_info +) +{ + nv_linux_state_t *nvl; + nv_state_t *nv; + NV_STATUS status; + void *ptr = NULL; + NvU64 addr; + NvU64 size; + struct pci_dev *ibmnpu = NULL; + NvU32 index = 0; + NvU32 count = 0; + nvidia_p2p_rsync_reg_info_t *info = NULL; + nvidia_p2p_rsync_reg_t *regs = NULL; + + if (reg_info == NULL) + { + return -EINVAL; + } + + status = os_alloc_mem((void**)&info, sizeof(*info)); + if (status != NV_OK) + { + return -ENOMEM; + } + + memset(info, 0, sizeof(*info)); + + info->version = NVIDIA_P2P_RSYNC_REG_INFO_VERSION; + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl; nvl = nvl->next) + { + count++; + } + + status = os_alloc_mem((void**)®s, (count * sizeof(*regs))); + if (status != NV_OK) + { + nvidia_p2p_put_rsync_registers(info); + UNLOCK_NV_LINUX_DEVICES(); + return -ENOMEM; + } + + for (nvl = nv_linux_devices; nvl; nvl = nvl->next) + { + nv = NV_STATE_PTR(nvl); + + addr = 0; + size = 0; + + status = nv_get_ibmnpu_genreg_info(nv, &addr, &size, (void**)&ibmnpu); + if (status != NV_OK) + { + continue; + } + + ptr = nv_ioremap_nocache(addr, size); + if (ptr == NULL) + { + continue; + } + + regs[index].ptr = ptr; + regs[index].size = size; + regs[index].gpu = nvl->pci_dev; + regs[index].ibmnpu = ibmnpu; + regs[index].cluster_id = 0; + regs[index].socket_id = nv_get_ibmnpu_chip_id(nv); + + index++; + } + + UNLOCK_NV_LINUX_DEVICES(); + + info->regs = regs; + info->entries = index; + + if (info->entries == 0) + { + nvidia_p2p_put_rsync_registers(info); + return -ENODEV; + } + + *reg_info = info; + + return 0; +} + +EXPORT_SYMBOL(nvidia_p2p_get_rsync_registers); + +void nvidia_p2p_put_rsync_registers( + nvidia_p2p_rsync_reg_info_t *reg_info +) +{ + NvU32 i; + nvidia_p2p_rsync_reg_t *regs = NULL; + + if (reg_info == NULL) + { + return; + } + + if (reg_info->regs) + { + for (i = 0; i < reg_info->entries; i++) + { + regs = ®_info->regs[i]; + + if (regs->ptr) + { + nv_iounmap(regs->ptr, regs->size); + } + } + + os_free_mem(reg_info->regs); + } + + os_free_mem(reg_info); +} + +EXPORT_SYMBOL(nvidia_p2p_put_rsync_registers); diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.h new file mode 100644 index 0000000..60bc859 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.h @@ -0,0 +1,427 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_P2P_H_ +#define _NV_P2P_H_ + +/* + * NVIDIA P2P Structure Versioning + * + * For the nvidia_p2p_*_t structures allocated by the NVIDIA driver, it will + * set the version field of the structure according to the definition used by + * the NVIDIA driver. The "major" field of the version is defined as the upper + * 16 bits, and the "minor" field of the version is defined as the lower 16 + * bits. The version field will always be the first 4 bytes of the structure, + * and third-party drivers should check the value of this field in structures + * allocated by the NVIDIA driver to ensure runtime compatibility. + * + * In general, version numbers will be incremented as follows: + * - When a backwards-compatible change is made to the structure layout, the + * minor version for that structure will be incremented. Third-party drivers + * built against an older minor version will continue to work with the newer + * minor version used by the NVIDIA driver, without recompilation. + * - When a breaking change is made to the structure layout, the major version + * will be incremented. Third-party drivers built against an older major + * version require at least recompilation and potentially additional updates + * to use the new API. + */ +#define NVIDIA_P2P_MAJOR_VERSION_MASK 0xffff0000 +#define NVIDIA_P2P_MINOR_VERSION_MASK 0x0000ffff + +#define NVIDIA_P2P_MAJOR_VERSION(v) \ + (((v) & NVIDIA_P2P_MAJOR_VERSION_MASK) >> 16) + +#define NVIDIA_P2P_MINOR_VERSION(v) \ + (((v) & NVIDIA_P2P_MINOR_VERSION_MASK)) + +#define NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) \ + (NVIDIA_P2P_MAJOR_VERSION((p)->version) == NVIDIA_P2P_MAJOR_VERSION(v)) + +#define NVIDIA_P2P_VERSION_COMPATIBLE(p, v) \ + (NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) && \ + (NVIDIA_P2P_MINOR_VERSION((p)->version) >= (NVIDIA_P2P_MINOR_VERSION(v)))) + +enum { + NVIDIA_P2P_ARCHITECTURE_TESLA = 0, + NVIDIA_P2P_ARCHITECTURE_FERMI, + NVIDIA_P2P_ARCHITECTURE_CURRENT = NVIDIA_P2P_ARCHITECTURE_FERMI +}; + +#define NVIDIA_P2P_PARAMS_VERSION 0x00010001 + +enum { + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_GPU = 0, + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE, + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX = \ + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE +}; + +#define NVIDIA_P2P_GPU_UUID_LEN 16 + +typedef +struct nvidia_p2p_params { + uint32_t version; + uint32_t architecture; + union nvidia_p2p_mailbox_addresses { + struct { + uint64_t wmb_addr; + uint64_t wmb_data; + uint64_t rreq_addr; + uint64_t rcomp_addr; + uint64_t reserved[2]; + } fermi; + } addresses[NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX+1]; +} nvidia_p2p_params_t; + +/* + * Capability flag for users to detect + * driver support for persistent pages. + */ +extern int nvidia_p2p_cap_persistent_pages; +#define NVIDIA_P2P_CAP_PERSISTENT_PAGES + +/* + * This API is not supported. + */ +int nvidia_p2p_init_mapping(uint64_t p2p_token, + struct nvidia_p2p_params *params, + void (*destroy_callback)(void *data), + void *data); + +/* + * This API is not supported. + */ +int nvidia_p2p_destroy_mapping(uint64_t p2p_token); + +enum nvidia_p2p_page_size_type { + NVIDIA_P2P_PAGE_SIZE_4KB = 0, + NVIDIA_P2P_PAGE_SIZE_64KB, + NVIDIA_P2P_PAGE_SIZE_128KB, + NVIDIA_P2P_PAGE_SIZE_COUNT +}; + +typedef +struct nvidia_p2p_page { + uint64_t physical_address; + union nvidia_p2p_request_registers { + struct { + uint32_t wreqmb_h; + uint32_t rreqmb_h; + uint32_t rreqmb_0; + uint32_t reserved[3]; + } fermi; + } registers; +} nvidia_p2p_page_t; + +#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00010002 + +#define NVIDIA_P2P_PAGE_TABLE_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_PAGE_TABLE_VERSION) + +typedef +struct nvidia_p2p_page_table { + uint32_t version; + uint32_t page_size; /* enum nvidia_p2p_page_size_type */ + struct nvidia_p2p_page **pages; + uint32_t entries; + uint8_t *gpu_uuid; +} nvidia_p2p_page_table_t; + +/* + * @brief + * Make the pages underlying a range of GPU virtual memory + * accessible to a third-party device. + * + * This API only supports pinned, GPU-resident memory, such as that provided + * by cudaMalloc(). + * + * This API may sleep. + * + * @param[in] p2p_token + * A token that uniquely identifies the P2P mapping. + * @param[in] va_space + * A GPU virtual address space qualifier. + * @param[in] virtual_address + * The start address in the specified virtual address space. + * Address must be aligned to the 64KB boundary. + * @param[in] length + * The length of the requested P2P mapping. + * Length must be a multiple of 64KB. + * @param[out] page_table + * A pointer to an array of structures with P2P PTEs. + * @param[in] free_callback + * A pointer to the function to be invoked when the pages + * underlying the virtual address range are freed + * implicitly. + * If NULL, persistent pages will be returned. + * This means the pages underlying the range of GPU virtual memory + * will persist until explicitly freed by nvidia_p2p_put_pages(). + * Persistent GPU memory mappings are not supported on PowerPC, + + * MIG-enabled devices, APM-enabled devices and vGPU. + + + + * @param[in] data + * A non-NULL opaque pointer to private data to be passed to the + * callback function. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -ENOTSUPP if the requested operation is not supported. + * -ENOMEM if the driver failed to allocate memory or if + * insufficient resources were available to complete the operation. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_get_pages(uint64_t p2p_token, uint32_t va_space, + uint64_t virtual_address, + uint64_t length, + struct nvidia_p2p_page_table **page_table, + void (*free_callback)(void *data), + void *data); + +#define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00020003 + +#define NVIDIA_P2P_DMA_MAPPING_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_DMA_MAPPING_VERSION) + +struct pci_dev; + +typedef +struct nvidia_p2p_dma_mapping { + uint32_t version; + enum nvidia_p2p_page_size_type page_size_type; + uint32_t entries; + uint64_t *dma_addresses; + void *private; + struct pci_dev *pci_dev; +} nvidia_p2p_dma_mapping_t; + +/* + * @brief + * Make the physical pages retrieved using nvidia_p2p_get_pages accessible to + * a third-party device. + * + * @param[in] peer + * The struct pci_dev * of the peer device that needs to DMA to/from the + * mapping. + * @param[in] page_table + * The page table outlining the physical pages underlying the mapping, as + * retrieved with nvidia_p2p_get_pages(). + * @param[out] dma_mapping + * The DMA mapping containing the DMA addresses to use on the third-party + * device. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -ENOTSUPP if the requested operation is not supported. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_dma_map_pages(struct pci_dev *peer, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping **dma_mapping); + +/* + * @brief + * Unmap the physical pages previously mapped to the third-party device by + * nvidia_p2p_dma_map_pages(). + * + * @param[in] peer + * The struct pci_dev * of the peer device that the DMA mapping belongs to. + * @param[in] page_table + * The page table backing the DMA mapping to be unmapped. + * @param[in] dma_mapping + * The DMA mapping containing the DMA addresses used by the third-party + * device, as retrieved with nvidia_p2p_dma_map_pages(). After this call + * returns, neither this struct nor the addresses contained within will be + * valid for use by the third-party device. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping *dma_mapping); + +/* + * @brief + * Release a set of pages previously made accessible to + * a third-party device. + * + * @param[in] p2p_token + * A token that uniquely identifies the P2P mapping. + * @param[in] va_space + * A GPU virtual address space qualifier. + * @param[in] virtual_address + * The start address in the specified virtual address space. + * @param[in] page_table + * A pointer to the array of structures with P2P PTEs. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_put_pages(uint64_t p2p_token, uint32_t va_space, + uint64_t virtual_address, + struct nvidia_p2p_page_table *page_table); + +/* + * @brief + * Free a third-party P2P page table. (This function is a no-op.) + * + * @param[in] page_table + * A pointer to the array of structures with P2P PTEs. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + */ +int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table); + +/* + * @brief + * Free a third-party P2P DMA mapping. (This function is a no-op.) + * + * @param[in] dma_mapping + * A pointer to the DMA mapping structure. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + */ +int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping); + +#define NVIDIA_P2P_RSYNC_DRIVER_VERSION 0x00010001 + +#define NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_DRIVER_VERSION) + +typedef +struct nvidia_p2p_rsync_driver { + uint32_t version; + int (*get_relaxed_ordering_mode)(int *mode, void *data); + void (*put_relaxed_ordering_mode)(int mode, void *data); + void (*wait_for_rsync)(struct pci_dev *gpu, void *data); +} nvidia_p2p_rsync_driver_t; + +/* + * @brief + * Registers the rsync driver. + * + * @param[in] driver + * A pointer to the rsync driver structure. The NVIDIA driver would use, + * + * get_relaxed_ordering_mode to obtain a reference to the current relaxed + * ordering mode (treated as a boolean) from the rsync driver. + * + * put_relaxed_ordering_mode to release a reference to the current relaxed + * ordering mode back to the rsync driver. The NVIDIA driver will call this + * function once for each successful call to get_relaxed_ordering_mode, and + * the relaxed ordering mode must not change until the last reference is + * released. + * + * wait_for_rsync to call into the rsync module to issue RSYNC. This callback + * can't sleep or re-schedule as it may arrive under spinlocks. + * @param[in] data + * A pointer to the rsync driver's private data. + * + * @Returns + * 0 upon successful completion. + * -EINVAL parameters are incorrect. + * -EBUSY if a module is already registered or GPU devices are in use. + */ +int nvidia_p2p_register_rsync_driver(nvidia_p2p_rsync_driver_t *driver, + void *data); + +/* + * @brief + * Unregisters the rsync driver. + * + * @param[in] driver + * A pointer to the rsync driver structure. + * @param[in] data + * A pointer to the rsync driver's private data. + */ +void nvidia_p2p_unregister_rsync_driver(nvidia_p2p_rsync_driver_t *driver, + void *data); + +#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION 0x00020001 + +#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_REG_INFO_VERSION) + +typedef struct nvidia_p2p_rsync_reg { + void *ptr; + size_t size; + struct pci_dev *ibmnpu; + struct pci_dev *gpu; + uint32_t cluster_id; + uint32_t socket_id; +} nvidia_p2p_rsync_reg_t; + +typedef struct nvidia_p2p_rsync_reg_info { + uint32_t version; + nvidia_p2p_rsync_reg_t *regs; + size_t entries; +} nvidia_p2p_rsync_reg_info_t; + +/* + * @brief + * Gets rsync (GEN-ID) register information associated with the supported + * NPUs. + * + * The caller would use the returned information {GPU device, NPU device, + * socket-id, cluster-id} to pick the optimal generation registers to issue + * RSYNC (NVLink HW flush). + * + * The interface allocates structures to return the information, hence + * nvidia_p2p_put_rsync_registers() must be called to free the structures. + * + * Note, cluster-id is hardcoded to zero as early system configurations would + * only support cluster mode i.e. all devices would share the same cluster-id + * (0). In the future, appropriate kernel support would be needed to query + * cluster-ids. + * + * @param[out] reg_info + * A pointer to the rsync reg info structure. + * + * @Returns + * 0 Upon successful completion. Otherwise, returns negative value. + */ +int nvidia_p2p_get_rsync_registers(nvidia_p2p_rsync_reg_info_t **reg_info); + +/* + * @brief + * Frees the structures allocated by nvidia_p2p_get_rsync_registers(). + * + * @param[in] reg_info + * A pointer to the rsync reg info structure. + */ +void nvidia_p2p_put_rsync_registers(nvidia_p2p_rsync_reg_info_t *reg_info); + +#endif /* _NV_P2P_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.c new file mode 100644 index 0000000..1fa530d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.c @@ -0,0 +1,478 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-reg.h" +#include "nv-pat.h" + +int nv_pat_mode = NV_PAT_MODE_DISABLED; + +#if defined(NV_ENABLE_PAT_SUPPORT) +/* + * Private PAT support for use by the NVIDIA driver. This is used on + * kernels that do not modify the PAT to include a write-combining + * entry. + * + * On kernels that have CONFIG_X86_PAT, the NVIDIA driver still checks that the + * WC entry is as expected before using PAT. + */ + +#if defined(CONFIG_X86_PAT) +#define NV_ENABLE_BUILTIN_PAT_SUPPORT 0 +#else +#define NV_ENABLE_BUILTIN_PAT_SUPPORT 1 +#endif + + +#define NV_READ_PAT_ENTRIES(pat1, pat2) rdmsr(0x277, (pat1), (pat2)) +#define NV_WRITE_PAT_ENTRIES(pat1, pat2) wrmsr(0x277, (pat1), (pat2)) +#define NV_PAT_ENTRY(pat, index) \ + (((pat) & (0xff << ((index)*8))) >> ((index)*8)) + +#if NV_ENABLE_BUILTIN_PAT_SUPPORT + +static unsigned long orig_pat1, orig_pat2; + +static inline void nv_disable_caches(unsigned long *cr4) +{ + unsigned long cr0 = read_cr0(); + write_cr0(((cr0 & (0xdfffffff)) | 0x40000000)); + wbinvd(); + *cr4 = NV_READ_CR4(); + if (*cr4 & 0x80) NV_WRITE_CR4(*cr4 & ~0x80); + __flush_tlb(); +} + +static inline void nv_enable_caches(unsigned long cr4) +{ + unsigned long cr0 = read_cr0(); + wbinvd(); + __flush_tlb(); + write_cr0((cr0 & 0x9fffffff)); + if (cr4 & 0x80) NV_WRITE_CR4(cr4); +} + +static void nv_setup_pat_entries(void *info) +{ + unsigned long pat1, pat2, cr4; + unsigned long eflags; + +#if defined(NV_ENABLE_HOTPLUG_CPU) + int cpu = (NvUPtr)info; + if ((cpu != 0) && (cpu != (int)smp_processor_id())) + return; +#endif + + NV_SAVE_FLAGS(eflags); + NV_CLI(); + nv_disable_caches(&cr4); + + NV_READ_PAT_ENTRIES(pat1, pat2); + + pat1 &= 0xffff00ff; + pat1 |= 0x00000100; + + NV_WRITE_PAT_ENTRIES(pat1, pat2); + + nv_enable_caches(cr4); + NV_RESTORE_FLAGS(eflags); +} + +static void nv_restore_pat_entries(void *info) +{ + unsigned long cr4; + unsigned long eflags; + +#if defined(NV_ENABLE_HOTPLUG_CPU) + int cpu = (NvUPtr)info; + if ((cpu != 0) && (cpu != (int)smp_processor_id())) + return; +#endif + + NV_SAVE_FLAGS(eflags); + NV_CLI(); + nv_disable_caches(&cr4); + + NV_WRITE_PAT_ENTRIES(orig_pat1, orig_pat2); + + nv_enable_caches(cr4); + NV_RESTORE_FLAGS(eflags); +} + +/* + * NOTE 1: + * Functions register_cpu_notifier(), unregister_cpu_notifier(), + * macros register_hotcpu_notifier, register_hotcpu_notifier, + * and CPU states CPU_DOWN_FAILED, CPU_DOWN_PREPARE + * were removed by the following commit: + * 2016 Dec 25: b272f732f888d4cf43c943a40c9aaa836f9b7431 + * + * NV_REGISTER_CPU_NOTIFIER_PRESENT is true when + * register_cpu_notifier() is present. + * + * The functions cpuhp_setup_state() and cpuhp_remove_state() should be + * used as an alternative to register_cpu_notifier() and + * unregister_cpu_notifier() functions. The following + * commit introduced these functions as well as the enum cpuhp_state. + * 2016 Feb 26: 5b7aa87e0482be768486e0c2277aa4122487eb9d + * + * NV_CPUHP_CPUHP_STATE_PRESENT is true when cpuhp_setup_state() is present. + * + * For kernels where both cpuhp_setup_state() and register_cpu_notifier() + * are present, we still use register_cpu_notifier(). + */ + +static int +nvidia_cpu_teardown(unsigned int cpu) +{ +#if defined(NV_ENABLE_HOTPLUG_CPU) + unsigned int this_cpu = get_cpu(); + + if (this_cpu == cpu) + nv_restore_pat_entries(NULL); + else + smp_call_function(nv_restore_pat_entries, &cpu, 1); + + put_cpu(); +#endif + return 0; +} + +static int +nvidia_cpu_online(unsigned int cpu) +{ +#if defined(NV_ENABLE_HOTPLUG_CPU) + unsigned int this_cpu = get_cpu(); + + if (this_cpu == cpu) + nv_setup_pat_entries(NULL); + else + smp_call_function(nv_setup_pat_entries, &cpu, 1); + + put_cpu(); +#endif + return 0; +} + +static int nv_enable_builtin_pat_support(void) +{ + unsigned long pat1, pat2; + + NV_READ_PAT_ENTRIES(orig_pat1, orig_pat2); + nv_printf(NV_DBG_SETUP, "saved orig pats as 0x%lx 0x%lx\n", orig_pat1, orig_pat2); + + on_each_cpu(nv_setup_pat_entries, NULL, 1); + + NV_READ_PAT_ENTRIES(pat1, pat2); + nv_printf(NV_DBG_SETUP, "changed pats to 0x%lx 0x%lx\n", pat1, pat2); + return 1; +} + +static void nv_disable_builtin_pat_support(void) +{ + unsigned long pat1, pat2; + + on_each_cpu(nv_restore_pat_entries, NULL, 1); + + nv_pat_mode = NV_PAT_MODE_DISABLED; + + NV_READ_PAT_ENTRIES(pat1, pat2); + nv_printf(NV_DBG_SETUP, "restored orig pats as 0x%lx 0x%lx\n", pat1, pat2); +} + +static int +nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +{ +/* CPU_DOWN_FAILED was added by the following commit + * 2004 Oct 18: 71da3667be80d30121df3972caa0bf5684228379 + * + * CPU_DOWN_PREPARE was added by the following commit + * 2004 Oct 18: d13d28de21d913aacd3c91e76e307fa2eb7835d8 + * + * We use one ifdef for both macros since they were added on the same day. + */ +#if defined(CPU_DOWN_FAILED) + switch (action) + { + case CPU_DOWN_FAILED: + case CPU_ONLINE: + nvidia_cpu_online((NvUPtr)hcpu); + break; + case CPU_DOWN_PREPARE: + nvidia_cpu_teardown((NvUPtr)hcpu); + break; + } +#endif + return NOTIFY_OK; +} + +/* + * See NOTE 1. + * In order to avoid warnings for unused variable when compiling against + * kernel versions which include changes of commit id + * b272f732f888d4cf43c943a40c9aaa836f9b7431, we have to protect declaration + * of nv_hotcpu_nfb with #if. + * + * NV_REGISTER_CPU_NOTIFIER_PRESENT is checked before + * NV_CPUHP_SETUP_STATE_PRESENT to avoid compilation warnings for unused + * variable nvidia_pat_online for kernels where both + * NV_REGISTER_CPU_NOTIFIER_PRESENT and NV_CPUHP_SETUP_STATE_PRESENT + * are true. + */ +#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU) +static struct notifier_block nv_hotcpu_nfb = { + .notifier_call = nvidia_cpu_callback, + .priority = 0 +}; +#elif defined(NV_CPUHP_SETUP_STATE_PRESENT) +static enum cpuhp_state nvidia_pat_online; +#endif + +static int +nvidia_register_cpu_hotplug_notifier(void) +{ + int ret; +/* See NOTE 1 */ +#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU) + /* register_hotcpu_notiifer() returns 0 on success or -ENOENT on failure */ + ret = register_hotcpu_notifier(&nv_hotcpu_nfb); +#elif defined(NV_CPUHP_SETUP_STATE_PRESENT) + /* + * cpuhp_setup_state() returns positive number on success when state is + * CPUHP_AP_ONLINE_DYN. On failure, it returns a negative number. + */ + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "nvidia/pat:online", + nvidia_cpu_online, + nvidia_cpu_teardown); + if (ret < 0) + { + /* + * If cpuhp_setup_state() fails, the cpuhp_remove_state() + * should never be called. If it gets called, we might remove + * some other state. Hence, explicitly set + * nvidia_pat_online to zero. This will trigger a BUG() + * in cpuhp_remove_state(). + */ + nvidia_pat_online = 0; + } + else + { + nvidia_pat_online = ret; + } +#else + + /* + * This function should be a no-op for kernels which + * - do not have CONFIG_HOTPLUG_CPU enabled, + * - do not have PAT support, + * - do not have the cpuhp_setup_state() function. + * + * On such kernels, returning an error here would result in module init + * failure. Hence, return 0 here. + */ + if (nv_pat_mode == NV_PAT_MODE_BUILTIN) + { + ret = 0; + } + else + { + ret = -EIO; + } +#endif + + if (ret < 0) + { + nv_disable_pat_support(); + nv_printf(NV_DBG_ERRORS, + "NVRM: CPU hotplug notifier registration failed!\n"); + return -EIO; + } + return 0; +} + +static void +nvidia_unregister_cpu_hotplug_notifier(void) +{ +/* See NOTE 1 */ +#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU) + unregister_hotcpu_notifier(&nv_hotcpu_nfb); +#elif defined(NV_CPUHP_SETUP_STATE_PRESENT) + cpuhp_remove_state(nvidia_pat_online); +#endif + return; +} + + +#else /* NV_ENABLE_BUILTIN_PAT_SUPPORT */ + +static int nv_enable_builtin_pat_support(void) +{ + return 0; +} +static void nv_disable_builtin_pat_support(void) +{ +} +static int nvidia_register_cpu_hotplug_notifier(void) +{ + return -EIO; +} +static void nvidia_unregister_cpu_hotplug_notifier(void) +{ +} + +#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */ + +static int nv_determine_pat_mode(void) +{ + unsigned int pat1, pat2, i; + NvU8 PAT_WC_index; + + if (!test_bit(X86_FEATURE_PAT, + (volatile unsigned long *)&boot_cpu_data.x86_capability)) + { + if ((boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) || + (boot_cpu_data.cpuid_level < 1) || + ((cpuid_edx(1) & (1 << 16)) == 0) || + (boot_cpu_data.x86 != 6) || (boot_cpu_data.x86_model >= 15)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: CPU does not support the PAT.\n"); + return NV_PAT_MODE_DISABLED; + } + } + + NV_READ_PAT_ENTRIES(pat1, pat2); + PAT_WC_index = 0xf; + + for (i = 0; i < 4; i++) + { + if (NV_PAT_ENTRY(pat1, i) == 0x01) + { + PAT_WC_index = i; + break; + } + + if (NV_PAT_ENTRY(pat2, i) == 0x01) + { + PAT_WC_index = (i + 4); + break; + } + } + + if (PAT_WC_index == 1) + { + return NV_PAT_MODE_KERNEL; + } + else if (PAT_WC_index != 0xf) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: PAT configuration unsupported.\n"); + return NV_PAT_MODE_DISABLED; + } + else + { +#if NV_ENABLE_BUILTIN_PAT_SUPPORT + return NV_PAT_MODE_BUILTIN; +#else + return NV_PAT_MODE_DISABLED; +#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */ + } +} + + +int nv_enable_pat_support(void) +{ + if (nv_pat_mode != NV_PAT_MODE_DISABLED) + return 1; + + nv_pat_mode = nv_determine_pat_mode(); + + switch (nv_pat_mode) + { + case NV_PAT_MODE_DISABLED: + /* avoid the PAT if unavailable/unusable */ + return 0; + case NV_PAT_MODE_KERNEL: + /* inherit the kernel's PAT layout */ + return 1; + case NV_PAT_MODE_BUILTIN: + /* use builtin code to modify the PAT layout */ + break; + } + + return nv_enable_builtin_pat_support(); +} + +void nv_disable_pat_support(void) +{ + if (nv_pat_mode != NV_PAT_MODE_BUILTIN) + return; + + nv_disable_builtin_pat_support(); +} + +int nv_init_pat_support(nvidia_stack_t *sp) +{ + NV_STATUS status; + NvU32 data; + int disable_pat = 0; + int ret = 0; + + status = rm_read_registry_dword(sp, NULL, + NV_USE_PAGE_ATTRIBUTE_TABLE, &data); + if ((status == NV_OK) && ((int)data != ~0)) + { + disable_pat = (data == 0); + } + + if (!disable_pat) + { + nv_enable_pat_support(); + if (nv_pat_mode == NV_PAT_MODE_BUILTIN) + { + ret = nvidia_register_cpu_hotplug_notifier(); + return ret; + } + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: builtin PAT support disabled.\n"); + } + + return 0; +} + +void nv_teardown_pat_support(void) +{ + if (nv_pat_mode == NV_PAT_MODE_BUILTIN) + { + nv_disable_pat_support(); + nvidia_unregister_cpu_hotplug_notifier(); + } +} +#endif /* defined(NV_ENABLE_PAT_SUPPORT) */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.h new file mode 100644 index 0000000..0d26a84 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_PAT_H_ +#define _NV_PAT_H_ + +#include "nv-linux.h" + + +#if defined(NV_ENABLE_PAT_SUPPORT) +extern int nv_init_pat_support(nvidia_stack_t *sp); +extern void nv_teardown_pat_support(void); +extern int nv_enable_pat_support(void); +extern void nv_disable_pat_support(void); +#else +static inline int nv_init_pat_support(nvidia_stack_t *sp) +{ + (void)sp; + return 0; +} + +static inline void nv_teardown_pat_support(void) +{ + return; +} + +static inline int nv_enable_pat_support(void) +{ + return 1; +} + +static inline void nv_disable_pat_support(void) +{ + return; +} +#endif + +#endif /* _NV_PAT_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.c new file mode 100644 index 0000000..e349473 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.c @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "nv-pci-table.h" + +/* Devices supported by RM */ +struct pci_device_id nv_pci_table[] = { + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_3D << 8), + .class_mask = ~0 + }, + { } +}; + +/* Devices supported by all drivers in nvidia.ko */ +struct pci_device_id nv_module_device_table[] = { + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_3D << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_BRIDGE_OTHER << 8), + .class_mask = ~0 + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, nv_module_device_table); diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.h new file mode 100644 index 0000000..b28483b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PCI_TABLE_H_ +#define _NV_PCI_TABLE_H_ + +#include + +extern struct pci_device_id nv_pci_table[]; + +#endif /* _NV_PCI_TABLE_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci.c new file mode 100644 index 0000000..a8320bd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci.c @@ -0,0 +1,1099 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-pci-table.h" +#include "nv-pci-types.h" +#include "nv-pci.h" +#include "nv-ibmnpu.h" +#include "nv-frontend.h" +#include "nv-msi.h" +#include "nv-hypervisor.h" + +#if defined(NV_VGPU_KVM_BUILD) +#include "nv-vgpu-vfio-interface.h" +#endif + +#if defined(NV_SEQ_READ_ITER_PRESENT) +#include +#include +#endif + +static void +nv_check_and_exclude_gpu( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + char *uuid_str; + + uuid_str = rm_get_gpu_uuid(sp, nv); + if (uuid_str == NULL) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Unable to read UUID"); + return; + } + + if (nv_is_uuid_in_gpu_exclusion_list(uuid_str)) + { + NV_STATUS rm_status = rm_exclude_adapter(sp, nv); + if (rm_status != NV_OK) + { + NV_DEV_PRINTF_STATUS(NV_DBG_ERRORS, nv, rm_status, + "Failed to exclude GPU %s", uuid_str); + goto done; + } + nv->flags |= NV_FLAG_EXCLUDE; + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Excluded GPU %s successfully\n", + uuid_str); + } + +done: + os_free_mem(uuid_str); +} + +static NvBool nv_treat_missing_irq_as_error(void) +{ +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + return (nv_get_hypervisor_type() != OS_HYPERVISOR_HYPERV); +#else + return NV_TRUE; +#endif +} + +static void nv_init_dynamic_power_management +( + nvidia_stack_t *sp, + struct pci_dev *pci_dev +) +{ + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + nv_state_t *nv = NV_STATE_PTR(nvl); + char filename[50]; + int ret; + NvBool pr3_acpi_method_present = NV_FALSE; + + nvl->sysfs_config_file = NULL; + + ret = snprintf(filename, sizeof(filename), + "/sys/bus/pci/devices/%04x:%02x:%02x.0/config", + NV_PCI_DOMAIN_NUMBER(pci_dev), + NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev)); + if (ret > 0 || ret < sizeof(filename)) + { + struct file *file = filp_open(filename, O_RDONLY, 0); + if (!IS_ERR(file)) + { +#if defined(NV_SEQ_READ_ITER_PRESENT) + /* + * Sanity check for confirming if file path is mounted over + * sysfs file system. + */ + if ((file->f_inode != NULL) && (file->f_inode->i_sb != NULL) && + (strcmp(file->f_inode->i_sb->s_id, "sysfs") == 0)) + { + struct seq_file *sf = file->private_data; + + /* + * Sanity check for confirming if 'file->private_data' + * actually points to 'struct seq_file'. + */ + if ((sf != NULL) && (sf->file == file) && (sf->op == NULL)) + { + struct kernfs_open_file *of = sf->private; + + /* + * Sanity check for confirming if 'sf->private' + * actually points to 'struct kernfs_open_file'. + */ + if ((of != NULL) && (of->file == file) && + (of->seq_file == sf)) + { + nvl->sysfs_config_file = file; + } + } + } + + if (nvl->sysfs_config_file == NULL) + { + filp_close(file, NULL); + } +#else + nvl->sysfs_config_file = file; +#endif + } + } + + if (nv_get_hypervisor_type() != OS_HYPERVISOR_UNKNOWN) + { + pr3_acpi_method_present = nv_acpi_power_resource_method_present(pci_dev); + } + else if (pci_dev->bus && pci_dev->bus->self) + { + pr3_acpi_method_present = nv_acpi_power_resource_method_present(pci_dev->bus->self); + } + + rm_init_dynamic_power_management(sp, nv, pr3_acpi_method_present); +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +/* find nvidia devices and set initial state */ +static int +nv_pci_probe +( + struct pci_dev *pci_dev, + const struct pci_device_id *id_table +) +{ + nv_state_t *nv = NULL; + nv_linux_state_t *nvl = NULL; + unsigned int i, j; + int flags = 0; + nvidia_stack_t *sp = NULL; + NvBool prev_nv_ats_supported = nv_ats_supported; + NV_STATUS status; + NvBool last_bar_64bit = NV_FALSE; + + nv_printf(NV_DBG_SETUP, "NVRM: probing 0x%x 0x%x, class 0x%x\n", + pci_dev->vendor, pci_dev->device, pci_dev->class); + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return -1; + } + + +#ifdef NV_PCI_SRIOV_SUPPORT + if (pci_dev->is_virtfn) + { +#if defined(NV_VGPU_KVM_BUILD) + nvl = pci_get_drvdata(pci_dev->physfn); + if (!nvl) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Aborting probe for VF %04x:%02x:%02x.%x " + "since PF is not bound to nvidia driver.\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + goto failed; + } + + if (pci_dev->dev.bus->iommu_ops == NULL) + { + nv = NV_STATE_PTR(nvl); + if (rm_is_iommu_needed_for_sriov(sp, nv)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Aborting probe for VF %04x:%02x:%02x.%x " + "since IOMMU is not present on the system.\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + goto failed; + } + } + + if (nvidia_vgpu_vfio_probe(pci_dev) != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Failed to register device to vGPU VFIO module"); + goto failed; + } + + nv_kmem_cache_free_stack(sp); + return 0; +#else + nv_printf(NV_DBG_ERRORS, "NVRM: Ignoring probe for VF %04x:%02x:%02x.%x ", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + + goto failed; +#endif /* NV_VGPU_KVM_BUILD */ + } +#endif /* NV_PCI_SRIOV_SUPPORT */ + + + if (!rm_is_supported_pci_device( + (pci_dev->class >> 16) & 0xFF, + (pci_dev->class >> 8) & 0xFF, + pci_dev->vendor, + pci_dev->device, + pci_dev->subsystem_vendor, + pci_dev->subsystem_device, + NV_FALSE /* print_legacy_warning */)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: ignoring the legacy GPU %04x:%02x:%02x.%x\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + goto failed; + } + + num_probed_nv_devices++; + + if (pci_enable_device(pci_dev) != 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: pci_enable_device failed, aborting\n"); + goto failed; + } + + if ((pci_dev->irq == 0 && !pci_find_capability(pci_dev, PCI_CAP_ID_MSIX)) + && nv_treat_missing_irq_as_error()) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Can't find an IRQ for your NVIDIA card!\n"); + nv_printf(NV_DBG_ERRORS, "NVRM: Please check your BIOS settings.\n"); + nv_printf(NV_DBG_ERRORS, "NVRM: [Plug & Play OS] should be set to NO\n"); + nv_printf(NV_DBG_ERRORS, "NVRM: [Assign IRQ to VGA] should be set to YES \n"); + goto failed; + } + + for (i = 0, j = 0; i < NVRM_PCICFG_NUM_BARS && j < NV_GPU_NUM_BARS; i++) + { + if (NV_PCI_RESOURCE_VALID(pci_dev, i)) + { +#if defined(NV_PCI_MAX_MMIO_BITS_SUPPORTED) + if ((NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_MEM_TYPE_64) && + ((NV_PCI_RESOURCE_START(pci_dev, i) >> NV_PCI_MAX_MMIO_BITS_SUPPORTED))) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: This is a 64-bit BAR mapped above %dGB by the system\n" + "NVRM: BIOS or the %s kernel. This PCI I/O region assigned\n" + "NVRM: to your NVIDIA device is not supported by the kernel.\n" + "NVRM: BAR%d is %dM @ 0x%llx (PCI:%04x:%02x:%02x.%x)\n", + (1 << (NV_PCI_MAX_MMIO_BITS_SUPPORTED - 30)), + NV_KERNEL_NAME, i, + (NV_PCI_RESOURCE_SIZE(pci_dev, i) >> 20), + (NvU64)NV_PCI_RESOURCE_START(pci_dev, i), + NV_PCI_DOMAIN_NUMBER(pci_dev), + NV_PCI_BUS_NUMBER(pci_dev), NV_PCI_SLOT_NUMBER(pci_dev), + PCI_FUNC(pci_dev->devfn)); + goto failed; + } +#endif + if ((NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_MEM_TYPE_64) && + (NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_MEM_PREFETCH)) + { + struct pci_dev *bridge = pci_dev->bus->self; + NvU32 base_upper, limit_upper; + + last_bar_64bit = NV_TRUE; + + if (bridge == NULL) + goto next_bar; + + pci_read_config_dword(pci_dev, NVRM_PCICFG_BAR_OFFSET(i) + 4, + &base_upper); + if (base_upper == 0) + goto next_bar; + + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, + &base_upper); + pci_read_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, + &limit_upper); + + if ((base_upper != 0) && (limit_upper != 0)) + goto next_bar; + + nv_printf(NV_DBG_ERRORS, + "NVRM: This is a 64-bit BAR mapped above 4GB by the system\n" + "NVRM: BIOS or the %s kernel, but the PCI bridge\n" + "NVRM: immediately upstream of this GPU does not define\n" + "NVRM: a matching prefetchable memory window.\n", + NV_KERNEL_NAME); + nv_printf(NV_DBG_ERRORS, + "NVRM: This may be due to a known Linux kernel bug. Please\n" + "NVRM: see the README section on 64-bit BARs for additional\n" + "NVRM: information.\n"); + goto failed; + } + +next_bar: + // + // If we are here, then we have found a valid BAR -- 32 or 64-bit. + // + j++; + continue; + } + + // + // If last_bar_64bit is "true" then, we are looking at the 2nd (upper) + // half of the 64-bit BAR. This is typically all 0s which looks invalid + // but it's normal and not a problem and we can ignore it and continue. + // + if (last_bar_64bit) + { + last_bar_64bit = NV_FALSE; + continue; + } + + // Invalid 32 or 64-bit BAR. + nv_printf(NV_DBG_ERRORS, + "NVRM: This PCI I/O region assigned to your NVIDIA device is invalid:\n" + "NVRM: BAR%d is %dM @ 0x%llx (PCI:%04x:%02x:%02x.%x)\n", i, + (NV_PCI_RESOURCE_SIZE(pci_dev, i) >> 20), + (NvU64)NV_PCI_RESOURCE_START(pci_dev, i), + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + goto failed; + } + + if (!request_mem_region(NV_PCI_RESOURCE_START(pci_dev, NV_GPU_BAR_INDEX_REGS), + NV_PCI_RESOURCE_SIZE(pci_dev, NV_GPU_BAR_INDEX_REGS), + nv_device_name)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: request_mem_region failed for %dM @ 0x%llx. This can\n" + "NVRM: occur when a driver such as rivatv is loaded and claims\n" + "NVRM: ownership of the device's registers.\n", + (NV_PCI_RESOURCE_SIZE(pci_dev, NV_GPU_BAR_INDEX_REGS) >> 20), + (NvU64)NV_PCI_RESOURCE_START(pci_dev, NV_GPU_BAR_INDEX_REGS)); + goto failed; + } + + NV_KMALLOC(nvl, sizeof(nv_linux_state_t)); + if (nvl == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate memory\n"); + goto err_not_supported; + } + + os_mem_set(nvl, 0, sizeof(nv_linux_state_t)); + + nv = NV_STATE_PTR(nvl); + + pci_set_drvdata(pci_dev, (void *)nvl); + + /* default to 32-bit PCI bus address space */ + pci_dev->dma_mask = 0xffffffffULL; + + nvl->dev = &pci_dev->dev; + nvl->pci_dev = pci_dev; + nvl->dma_dev.dev = nvl->dev; + + nv->pci_info.vendor_id = pci_dev->vendor; + nv->pci_info.device_id = pci_dev->device; + nv->subsystem_id = pci_dev->subsystem_device; + nv->subsystem_vendor = pci_dev->subsystem_vendor; + nv->os_state = (void *) nvl; + nv->dma_dev = &nvl->dma_dev; + nv->pci_info.domain = NV_PCI_DOMAIN_NUMBER(pci_dev); + nv->pci_info.bus = NV_PCI_BUS_NUMBER(pci_dev); + nv->pci_info.slot = NV_PCI_SLOT_NUMBER(pci_dev); + nv->handle = pci_dev; + nv->flags |= flags; + + if (!nv_lock_init_locks(sp, nv)) + { + goto err_not_supported; + } + + nvl->all_mappings_revoked = NV_TRUE; + nvl->safe_to_mmap = NV_TRUE; + nvl->gpu_wakeup_callback_needed = NV_TRUE; + INIT_LIST_HEAD(&nvl->open_files); + + for (i = 0, j = 0; i < NVRM_PCICFG_NUM_BARS && j < NV_GPU_NUM_BARS; i++) + { + if ((NV_PCI_RESOURCE_VALID(pci_dev, i)) && + (NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_SPACE) + == PCI_BASE_ADDRESS_SPACE_MEMORY) + { + nv->bars[j].offset = NVRM_PCICFG_BAR_OFFSET(i); + nv->bars[j].cpu_address = NV_PCI_RESOURCE_START(pci_dev, i); + nv->bars[j].size = NV_PCI_RESOURCE_SIZE(pci_dev, i); + j++; + } + } + nv->regs = &nv->bars[NV_GPU_BAR_INDEX_REGS]; + nv->fb = &nv->bars[NV_GPU_BAR_INDEX_FB]; + + nv->interrupt_line = pci_dev->irq; + + NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_DISABLED); + nvl->numa_info.node_id = NUMA_NO_NODE; + + nv_init_ibmnpu_info(nv); + + + + + +#if defined(NVCPU_PPC64LE) + // Use HW NUMA support as a proxy for ATS support. This is true in the only + // PPC64LE platform where ATS is currently supported (IBM P9). + nv_ats_supported &= nv_platform_supports_numa(nvl); +#else + + + + + +#endif + if (nv_ats_supported) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "ATS supported by this GPU!\n"); + } + else + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "ATS not supported by this GPU. " + "Disabling ATS support for all the GPUs in the system!\n"); + } + + pci_set_master(pci_dev); + +#if defined(CONFIG_VGA_ARB) && !defined(NVCPU_PPC64LE) +#if defined(VGA_DEFAULT_DEVICE) +#if defined(NV_VGA_TRYGET_PRESENT) + vga_tryget(VGA_DEFAULT_DEVICE, VGA_RSRC_LEGACY_MASK); +#endif +#endif + vga_set_legacy_decoding(pci_dev, VGA_RSRC_NONE); +#endif + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "GPU is lost, skipping nv_pci_probe\n"); + goto err_not_supported; + } + + if ((rm_is_supported_device(sp, nv)) != NV_OK) + goto err_not_supported; + + if (!rm_init_private_state(sp, nv)) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "rm_init_private_state() failed!\n"); + goto err_zero_dev; + } + + nv_printf(NV_DBG_INFO, + "NVRM: PCI:%04x:%02x:%02x.%x (%04x:%04x): BAR0 @ 0x%llx (%lluMB)\n", + nv->pci_info.domain, nv->pci_info.bus, nv->pci_info.slot, + PCI_FUNC(pci_dev->devfn), nv->pci_info.vendor_id, nv->pci_info.device_id, + nv->regs->cpu_address, (nv->regs->size >> 20)); + nv_printf(NV_DBG_INFO, + "NVRM: PCI:%04x:%02x:%02x.%x (%04x:%04x): BAR1 @ 0x%llx (%lluMB)\n", + nv->pci_info.domain, nv->pci_info.bus, nv->pci_info.slot, + PCI_FUNC(pci_dev->devfn), nv->pci_info.vendor_id, nv->pci_info.device_id, + nv->fb->cpu_address, (nv->fb->size >> 20)); + + num_nv_devices++; + + /* + * The newly created nvl object is added to the nv_linux_devices global list + * only after all the initialization operations for that nvl object are + * completed, so as to protect against simultaneous lookup operations which + * may discover a partially initialized nvl object in the list + */ + LOCK_NV_LINUX_DEVICES(); + + nv_linux_add_device_locked(nvl); + + UNLOCK_NV_LINUX_DEVICES(); + + if (nvidia_frontend_add_device((void *)&nv_fops, nvl) != 0) + goto err_remove_device; + +#if defined(NV_PM_VT_SWITCH_REQUIRED_PRESENT) + pm_vt_switch_required(nvl->dev, NV_TRUE); +#endif + + nv_init_dynamic_power_management(sp, pci_dev); + + nv_procfs_add_gpu(nvl); + + /* Parse and set any per-GPU registry keys specified. */ + nv_parse_per_device_option_string(sp); + + rm_set_rm_firmware_requested(sp, nv); + +#if defined(NV_VGPU_KVM_BUILD) + if (nvidia_vgpu_vfio_probe(nvl->pci_dev) != NV_OK) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to register device to vGPU VFIO module"); + nvidia_frontend_remove_device((void *)&nv_fops, nvl); + goto err_vgpu_kvm; + } +#endif + + nv_check_and_exclude_gpu(sp, nv); + +#if defined(DPM_FLAG_NO_DIRECT_COMPLETE) + dev_pm_set_driver_flags(nvl->dev, DPM_FLAG_NO_DIRECT_COMPLETE); +#elif defined(DPM_FLAG_NEVER_SKIP) + dev_pm_set_driver_flags(nvl->dev, DPM_FLAG_NEVER_SKIP); +#endif + + nv_kmem_cache_free_stack(sp); + + return 0; + +#if defined(NV_VGPU_KVM_BUILD) +err_vgpu_kvm: +#endif + nv_procfs_remove_gpu(nvl); + rm_cleanup_dynamic_power_management(sp, nv); +#if defined(NV_PM_VT_SWITCH_REQUIRED_PRESENT) + pm_vt_switch_unregister(nvl->dev); +#endif +err_remove_device: + LOCK_NV_LINUX_DEVICES(); + nv_linux_remove_device_locked(nvl); + UNLOCK_NV_LINUX_DEVICES(); +err_zero_dev: + rm_free_private_state(sp, nv); +err_not_supported: + nv_ats_supported = prev_nv_ats_supported; + nv_destroy_ibmnpu_info(nv); + nv_lock_destroy_locks(sp, nv); + if (nvl != NULL) + { + NV_KFREE(nvl, sizeof(nv_linux_state_t)); + } + release_mem_region(NV_PCI_RESOURCE_START(pci_dev, NV_GPU_BAR_INDEX_REGS), + NV_PCI_RESOURCE_SIZE(pci_dev, NV_GPU_BAR_INDEX_REGS)); + NV_PCI_DISABLE_DEVICE(pci_dev); + pci_set_drvdata(pci_dev, NULL); +failed: + nv_kmem_cache_free_stack(sp); + return -1; +} + +static void +nv_pci_remove(struct pci_dev *pci_dev) +{ + nv_linux_state_t *nvl = NULL; + nv_state_t *nv; + nvidia_stack_t *sp = NULL; + + nv_printf(NV_DBG_SETUP, "NVRM: removing GPU %04x:%02x:%02x.%x\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + + +#ifdef NV_PCI_SRIOV_SUPPORT + if (pci_dev->is_virtfn) + { +#if defined(NV_VGPU_KVM_BUILD) + /* Arg 2 == NV_TRUE means that the PCI device should be removed */ + nvidia_vgpu_vfio_remove(pci_dev, NV_TRUE); +#endif /* NV_VGPU_KVM_BUILD */ + return; + } +#endif /* NV_PCI_SRIOV_SUPPORT */ + + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return; + } + + LOCK_NV_LINUX_DEVICES(); + nvl = pci_get_drvdata(pci_dev); + if (!nvl || (nvl->pci_dev != pci_dev)) + { + goto done; + } + + nv = NV_STATE_PTR(nvl); + down(&nvl->ldata_lock); + + /* + * Sanity check: A removed device shouldn't have a non-zero usage_count. + * For eGPU, fall off the bus along with clients active is a valid scenario. + * Hence skipping the sanity check for eGPU. + */ + if ((NV_ATOMIC_READ(nvl->usage_count) != 0) && !(nv->is_external_gpu)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Attempting to remove minor device %u with non-zero usage count!\n", + nvl->minor_num); + + /* + * We can't return from this function without corrupting state, so we wait for + * the usage count to go to zero. + */ + while (NV_ATOMIC_READ(nvl->usage_count) != 0) + { + + /* + * While waiting, release the locks so that other threads can make + * forward progress. + */ + up(&nvl->ldata_lock); + UNLOCK_NV_LINUX_DEVICES(); + + os_delay(500); + + /* Re-acquire the locks before checking again */ + LOCK_NV_LINUX_DEVICES(); + nvl = pci_get_drvdata(pci_dev); + if (!nvl) + { + /* The device was not found, which should not happen */ + nv_printf(NV_DBG_ERRORS, + "NVRM: Failed removal of minor device %u!\n", + nvl->minor_num); + WARN_ON(1); + goto done; + } + nv = NV_STATE_PTR(nvl); + down(&nvl->ldata_lock); + } + + nv_printf(NV_DBG_ERRORS, + "NVRM: Continuing with GPU removal for minor device %u\n", + nvl->minor_num); + } + + rm_check_for_gpu_surprise_removal(sp, nv); + + nv_linux_remove_device_locked(nvl); + + /* Remove proc entry for this GPU */ + nv_procfs_remove_gpu(nvl); + + rm_cleanup_dynamic_power_management(sp, nv); + + nv->removed = NV_TRUE; + + UNLOCK_NV_LINUX_DEVICES(); + +#if defined(NV_PM_VT_SWITCH_REQUIRED_PRESENT) + pm_vt_switch_unregister(&pci_dev->dev); +#endif + +#if defined(NV_VGPU_KVM_BUILD) + /* Arg 2 == NV_TRUE means that the PCI device should be removed */ + nvidia_vgpu_vfio_remove(pci_dev, NV_TRUE); +#endif + + /* Update the frontend data structures */ + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + nvidia_frontend_remove_device((void *)&nv_fops, nvl); + } + + if ((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) || (nv->flags & NV_FLAG_OPEN)) + { + nv_acpi_unregister_notifier(nvl); + if (nv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + rm_disable_gpu_state_persistence(sp, nv); + } + nv_shutdown_adapter(sp, nv, nvl); + nv_dev_free_stacks(nvl); + } + + if (nvl->sysfs_config_file != NULL) + { + filp_close(nvl->sysfs_config_file, NULL); + nvl->sysfs_config_file = NULL; + } + + nv_unregister_ibmnpu_devices(nv); + nv_destroy_ibmnpu_info(nv); + + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + nv_lock_destroy_locks(sp, nv); + } + + num_probed_nv_devices--; + + pci_set_drvdata(pci_dev, NULL); + + rm_i2c_remove_adapters(sp, nv); + rm_free_private_state(sp, nv); + release_mem_region(NV_PCI_RESOURCE_START(pci_dev, NV_GPU_BAR_INDEX_REGS), + NV_PCI_RESOURCE_SIZE(pci_dev, NV_GPU_BAR_INDEX_REGS)); + + num_nv_devices--; + + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + NV_PCI_DISABLE_DEVICE(pci_dev); + NV_KFREE(nvl, sizeof(nv_linux_state_t)); + } + else + { + up(&nvl->ldata_lock); + } + + nv_kmem_cache_free_stack(sp); + return; + +done: + UNLOCK_NV_LINUX_DEVICES(); + nv_kmem_cache_free_stack(sp); +} + +static void +nv_pci_shutdown(struct pci_dev *pci_dev) +{ + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + + if ((nvl != NULL) && nvl->is_forced_shutdown) + { + nvl->is_forced_shutdown = NV_FALSE; + return; + } + + /* pci_clear_master is not defined for !CONFIG_PCI */ +#ifdef CONFIG_PCI + pci_clear_master(pci_dev); +#endif + + /* SHH HW mandates 1us delay to realise the effects of + * Bus Mater Enable(BME) disable. Adding 1us delay for + * all the chips as the delay is not in the data path + * and not big. Creating HAL for this would be a overkill. + */ + udelay(1); +} + +/*! + * @brief This function accepts pci information corresponding to a GPU + * and returns a reference to the nv_linux_state_t corresponding to that GPU. + * + * @param[in] domain Pci domain number for the GPU to be found. + * @param[in] bus Pci bus number for the GPU to be found. + * @param[in] slot Pci slot number for the GPU to be found. + * @param[in] function Pci function number for the GPU to be found. + * + * @return Pointer to nv_linux_state_t for the GPU if it is found, or NULL otherwise. + */ +nv_linux_state_t * find_pci(NvU32 domain, NvU8 bus, NvU8 slot, NvU8 function) +{ + nv_linux_state_t *nvl = NULL; + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + + if (nv->pci_info.domain == domain && + nv->pci_info.bus == bus && + nv->pci_info.slot == slot && + nv->pci_info.function == function) + { + break; + } + } + + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +int nvidia_dev_get_pci_info(const NvU8 *uuid, struct pci_dev **pci_dev_out, + NvU64 *dma_start, NvU64 *dma_limit) +{ + nv_linux_state_t *nvl; + + /* Takes nvl->ldata_lock */ + nvl = find_uuid(uuid); + if (!nvl) + return -ENODEV; + + *pci_dev_out = nvl->pci_dev; + *dma_start = nvl->dma_dev.addressable_range.start; + *dma_limit = nvl->dma_dev.addressable_range.limit; + + up(&nvl->ldata_lock); + + return 0; +} + +NvU8 nv_find_pci_capability(struct pci_dev *pci_dev, NvU8 capability) +{ + u16 status = 0; + u8 cap_ptr = 0, cap_id = 0xff; + + pci_read_config_word(pci_dev, PCI_STATUS, &status); + status &= PCI_STATUS_CAP_LIST; + if (!status) + return 0; + + switch (pci_dev->hdr_type) { + case PCI_HEADER_TYPE_NORMAL: + case PCI_HEADER_TYPE_BRIDGE: + pci_read_config_byte(pci_dev, PCI_CAPABILITY_LIST, &cap_ptr); + break; + default: + return 0; + } + + do { + cap_ptr &= 0xfc; + pci_read_config_byte(pci_dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); + if (cap_id == capability) + return cap_ptr; + pci_read_config_byte(pci_dev, cap_ptr + PCI_CAP_LIST_NEXT, &cap_ptr); + } while (cap_ptr && cap_id != 0xff); + + return 0; +} + +/* make sure the pci_driver called probe for all of our devices. + * we've seen cases where rivafb claims the device first and our driver + * doesn't get called. + */ +int +nv_pci_count_devices(void) +{ + struct pci_dev *pci_dev; + int count = 0; + + if (NVreg_RegisterPCIDriver == 0) + { + return 0; + } + + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); + while (pci_dev) + { + if (rm_is_supported_pci_device( + PCI_BASE_CLASS_DISPLAY, + PCI_CLASS_DISPLAY_VGA & 0xFF, + pci_dev->vendor, + pci_dev->device, + pci_dev->subsystem_vendor, + pci_dev->subsystem_device, + NV_TRUE /* print_legacy_warning */)) + { + count++; + } + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pci_dev); + } + + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, NULL); + while (pci_dev) + { + if (rm_is_supported_pci_device( + (pci_dev->class >> 16) & 0xFF, + (pci_dev->class >> 8) & 0xFF, + pci_dev->vendor, + pci_dev->device, + pci_dev->subsystem_vendor, + pci_dev->subsystem_device, + NV_TRUE /* print_legacy_warning */)) + { + count++; + } + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pci_dev); + } + + return count; +} + +#if defined(NV_PCI_ERROR_RECOVERY) +static pci_ers_result_t +nv_pci_error_detected( + struct pci_dev *pci_dev, + nv_pci_channel_state_t error +) +{ + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + + if ((nvl == NULL) || (nvl->pci_dev != pci_dev)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: invalid device!\n", __FUNCTION__); + return PCI_ERS_RESULT_NONE; + } + + /* + * Tell Linux to continue recovery of the device. The kernel will enable + * MMIO for the GPU and call the mmio_enabled callback. + */ + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static pci_ers_result_t +nv_pci_mmio_enabled( + struct pci_dev *pci_dev +) +{ + NV_STATUS status = NV_OK; + nv_stack_t *sp = NULL; + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + nv_state_t *nv = NULL; + + if ((nvl == NULL) || (nvl->pci_dev != pci_dev)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: invalid device!\n", __FUNCTION__); + goto done; + } + + nv = NV_STATE_PTR(nvl); + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: failed to allocate stack!\n", + __FUNCTION__); + goto done; + } + + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "A fatal error was detected.\n"); + + /* + * MMIO should be re-enabled now. If we still get bad reads, there's + * likely something wrong with the adapter itself that will require a + * reset. This should let us know whether the GPU has completely fallen + * off the bus or just did something the host didn't like. + */ + status = rm_is_supported_device(sp, nv); + if (status != NV_OK) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "The kernel has enabled MMIO for the device,\n" + "NVRM: but it still appears unreachable. The device\n" + "NVRM: will not function properly until it is reset.\n"); + } + + status = rm_log_gpu_crash(sp, nv); + if (status != NV_OK) + { + NV_DEV_PRINTF_STATUS(NV_DBG_ERRORS, nv, status, + "Failed to log crash data\n"); + goto done; + } + +done: + if (sp != NULL) + { + nv_kmem_cache_free_stack(sp); + } + + /* + * Tell Linux to abandon recovery of the device. The kernel might be able + * to recover the device, but RM and clients don't yet support that. + */ + return PCI_ERS_RESULT_DISCONNECT; +} + +struct pci_error_handlers nv_pci_error_handlers = { + .error_detected = nv_pci_error_detected, + .mmio_enabled = nv_pci_mmio_enabled, +}; +#endif + +#if defined(CONFIG_PM) +extern struct dev_pm_ops nv_pm_ops; +#endif + +struct pci_driver nv_pci_driver = { + .name = MODULE_NAME, + .id_table = nv_pci_table, + .probe = nv_pci_probe, + .remove = nv_pci_remove, + .shutdown = nv_pci_shutdown, +#if defined(CONFIG_PM) + .driver.pm = &nv_pm_ops, +#endif +#if defined(NV_PCI_ERROR_RECOVERY) + .err_handler = &nv_pci_error_handlers, +#endif +}; + +void nv_pci_unregister_driver(void) +{ + if (NVreg_RegisterPCIDriver == 0) + { + return; + } + return pci_unregister_driver(&nv_pci_driver); +} + +int nv_pci_register_driver(void) +{ + if (NVreg_RegisterPCIDriver == 0) + { + return 0; + } + return pci_register_driver(&nv_pci_driver); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform-pm.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform-pm.c new file mode 100644 index 0000000..51be49f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform-pm.c @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +/*! + * @brief Unpowergate the display. + * + * Increment the device's usage counter, run pm_request_resume(dev) + * and return its result. + * + * For more details on runtime pm functions, please check the below + * files in the Linux kernel: + * + * include/linux/pm_runtime.h + * include/linux/pm.h + * or + * https://www.kernel.org/doc/Documentation/power/runtime_pm.txt + * + * pm_request_resume() submits a request to execute the subsystem-level + * resume callback for the device (the request is represented by a work + * item in pm_wq); returns 0 on success, 1 if the device's runtime PM + * status was already 'active', or error code if the request hasn't + * been queued up. + * + * @param[in] nv Per gpu linux state + * + * @returns NV_STATUS + */ +NV_STATUS NV_API_CALL nv_soc_pm_unpowergate( + nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvS32 ret = -EBUSY; + + ret = pm_runtime_get(nvl->dev); + + if (ret == 1) + { + nv_printf(NV_DBG_INFO, "NVRM: device was already unpowergated\n"); + } + else if (ret == -EINPROGRESS) + { + /* + * pm_runtime_get() internally calls __pm_runtime_resume(...RPM_ASYNC) + * which internally calls rpm_resume() and this function will throw + * "-EINPROGRESS" if it is being called when device state is + * RPM_RESUMING and RPM_ASYNC or RPM_NOWAIT is set. + */ + nv_printf(NV_DBG_INFO, "NVRM: device is already unpowergating\n"); + } + else if (ret < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: unpowergate unsuccessful. ret: %d\n", ret); + return NV_ERR_GENERIC; + } + + return NV_OK; +} + +/*! + * @brief Powergate the display. + * + * Decrement the device's usage counter; if the result is 0 then run + * pm_request_idle(dev) and return its result. + * + * For more details on runtime pm functions, please check the below + * files in the Linux kernel: + * + * include/linux/pm_runtime.h + * include/linux/pm.h + * or + * https://www.kernel.org/doc/Documentation/power/runtime_pm.txt + * + * @param[in] nv Per gpu linux state + * + * @returns NV_STATUS + */ +NV_STATUS NV_API_CALL nv_soc_pm_powergate( + nv_state_t *nv) +{ + NV_STATUS status = NV_ERR_GENERIC; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvS32 ret = -EBUSY; + + ret = pm_runtime_put(nvl->dev); + + if (ret == 0) + { + status = NV_OK; + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: powergate unsuccessful. ret: %d\n", ret); + } + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform.c new file mode 100644 index 0000000..1f23e8f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform.c @@ -0,0 +1,1544 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "nv-platform.h" +#include "nv-frontend.h" +#include "nv-linux.h" +#include + +static irqreturn_t nvidia_soc_isr(int irq, void *arg) +{ + irqreturn_t ret; + nv_linux_state_t *nvl = (void *) arg; + nv_state_t *nv = NV_STATE_PTR(nvl); + NvU32 irq_count; + + NV_SPIN_LOCK(&nvl->soc_isr_lock); + + /* + * > Only 1 interrupt at a time is allowed to be serviced. + * > So when bh_pending is true, bottom half is scheduled/active + * and serving previous interrupt by disabling all interrupts + * at interrupt controller level, also here GPU lock is already + * taken so this interrupt will anyways be blocked until bottom + * half releases GPU lock, so return early for now. + * > Once bottom half processed earlier interrupt, it will release + * GPU lock and re-enable all interrupts and set bh_pending to + * false. Upon re-enabling, this interrupt will be serviced + * again because all interrupts that we care are level triggered. + */ + for (irq_count = 0; irq_count < nv->num_soc_irqs; irq_count++) + { + if (nv->soc_irq_info[irq_count].bh_pending == NV_TRUE) + { + NV_SPIN_UNLOCK(&nvl->soc_isr_lock); + return IRQ_HANDLED; + } + } + nv->current_soc_irq = irq; + + ret = nvidia_isr(irq, arg); + if (ret == IRQ_WAKE_THREAD) + { + for (irq_count = 0; irq_count < nv->num_soc_irqs; irq_count++) + { + if (nv->soc_irq_info[irq_count].irq_num == irq) + { + nv->soc_irq_info[irq_count].bh_pending = NV_TRUE; + } + } + } + else + { + nv->current_soc_irq = -1; + } + + NV_SPIN_UNLOCK(&nvl->soc_isr_lock); + + return ret; +} + +NvS32 nv_request_soc_irq( + nv_linux_state_t *nvl, + NvU32 irq, + nv_soc_irq_type_t type, + NvU32 flags, + NvU32 priv_data) +{ + nv_state_t *nv = NV_STATE_PTR(nvl); + NvS32 ret; + NvU32 irq_index; + + if (nv->num_soc_irqs >= NV_MAX_SOC_IRQS) + { + nv_printf(NV_DBG_ERRORS, "Exceeds Maximum SOC interrupts\n"); + return -EINVAL; + } + + ret = request_threaded_irq(irq, nvidia_soc_isr, nvidia_isr_kthread_bh, + flags, dev_name(nvl->dev), (void *)nvl); + if (ret != 0) + { + nv_printf(NV_DBG_ERRORS, "nv_request_soc_irq for irq %d failed\n", irq); + return ret; + } + + if (nv->flags & NV_FLAG_SOC_IGPU) + { + disable_irq_nosync(irq); + } + + irq_index = nv->num_soc_irqs; + nv->soc_irq_info[irq_index].irq_num = irq; + nv->soc_irq_info[irq_index].irq_type = type; + if (type == NV_SOC_IRQ_GPIO_TYPE) + { + nv->soc_irq_info[irq_index].irq_data.gpio_num = priv_data; + } + else if (type == NV_SOC_IRQ_DPAUX_TYPE) + { + nv->soc_irq_info[irq_index].irq_data.dpaux_instance = priv_data; + } + nv->num_soc_irqs++; + + return ret; +} + +nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t *nv) +{ + int count; + + for (count = 0; count < nv->num_soc_irqs; count++) + { + if (nv->soc_irq_info[count].irq_num == nv->current_soc_irq) + { + return nv->soc_irq_info[count].irq_type; + } + } + + return NV_SOC_IRQ_INVALID_TYPE; +} + +NV_STATUS NV_API_CALL nv_get_current_irq_priv_data(nv_state_t *nv, NvU32 *priv_data) +{ + int count; + + if (nv->current_soc_irq == -1) + { + nv_printf(NV_DBG_ERRORS, "%s:No SOC interrupt in progress\n", __func__); + return NV_ERR_GENERIC; + } + + for (count = 0; count < nv->num_soc_irqs; count++) + { + if (nv->soc_irq_info[count].irq_num == nv->current_soc_irq) + { + if (nv->soc_irq_info[count].irq_type == NV_SOC_IRQ_GPIO_TYPE) + { + *priv_data = nv->soc_irq_info[count].irq_data.gpio_num; + } + else if (nv->soc_irq_info[count].irq_type == NV_SOC_IRQ_DPAUX_TYPE) + { + *priv_data = nv->soc_irq_info[count].irq_data.dpaux_instance; + } + } + } + + return NV_OK; +} + +static void nv_soc_free_irq_by_type(nv_state_t *nv, nv_soc_irq_type_t type) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int count; + + if ((nv->num_soc_irqs == 0) || (type == 0)) + { + return; + } + + for (count = 0; count < NV_MAX_SOC_IRQS; count++) + { + if (type == nv->soc_irq_info[count].irq_type) + { + free_irq(nv->soc_irq_info[count].irq_num, (void *)nvl); + nv->soc_irq_info[count].irq_type = 0; + nv->soc_irq_info[count].irq_num = 0; + nv->soc_irq_info[count].bh_pending = NV_FALSE; + nv->num_soc_irqs--; + } + } +} + +int nv_soc_register_irqs(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc; + int dpauxindex; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /* Skip registering interrupts for OpenRM */ + if (nv_is_rm_firmware_active(nv)) + return 0; + + rc = nv_request_soc_irq(nvl, nv->interrupt_line, + NV_SOC_IRQ_DISPLAY_TYPE, + nv_default_irq_flags(nv), 0); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "failed to request display irq (%d)\n", rc); + return rc; + } + + rc = nv_request_soc_irq(nvl, nv->hdacodec_irq, NV_SOC_IRQ_HDACODEC_TYPE, + nv_default_irq_flags(nv), 0); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "failed to request hdacodec irq (%d)\n", rc); + free_irq(nv->interrupt_line, (void *) nvl); + return rc; + } + + for (dpauxindex = 0; dpauxindex < nv->num_dpaux_instance; dpauxindex++) + { + rc = nv_request_soc_irq(nvl, nv->dpaux_irqs[dpauxindex], + NV_SOC_IRQ_DPAUX_TYPE, + nv_default_irq_flags(nv), dpauxindex); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "failed to request dpaux irq (%d)\n", rc); + free_irq(nv->interrupt_line, (void *)nvl); + free_irq(nv->hdacodec_irq, (void *)nvl); + return rc; + } + } + + return 0; +} + +void nv_soc_free_irqs(nv_state_t *nv) +{ + nv_soc_free_irq_by_type(nv, NV_SOC_IRQ_DISPLAY_TYPE); + nv_soc_free_irq_by_type(nv, NV_SOC_IRQ_HDACODEC_TYPE); + nv_soc_free_irq_by_type(nv, NV_SOC_IRQ_DPAUX_TYPE); + nv_soc_free_irq_by_type(nv, NV_SOC_IRQ_GPIO_TYPE); + + + + + + + + +} + +static void nv_platform_free_device_dpaux(nv_state_t *nv) +{ + int dpauxindex; + + for (dpauxindex = 0; dpauxindex < nv->num_dpaux_instance; dpauxindex++) + { + if (nv->dpaux[dpauxindex] != NULL && + nv->dpaux[dpauxindex]->size != 0 && + nv->dpaux[dpauxindex]->cpu_address != 0) + { + release_mem_region(nv->dpaux[dpauxindex]->cpu_address, + nv->dpaux[dpauxindex]->size); + } + + if (nv->dpaux[dpauxindex] != NULL) + { + NV_KFREE(nv->dpaux[dpauxindex], sizeof(*(nv->dpaux[dpauxindex]))); + } + } +} + +static int nv_platform_alloc_device_dpaux(struct platform_device *plat_dev, nv_state_t *nv) +{ + static const size_t MAX_LENGTH = 10; + const char *sdpaux = "dpaux"; + int dpauxindex = 0; + int irq = 0; + int rc = 0; + int num_dpaux_instance = 0; + const struct resource *res; + phys_addr_t res_addr = 0; + resource_size_t res_size = 0; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + nv->num_dpaux_instance = 0; + if (!of_property_read_u32(nvl->dev->of_node, "nvidia,num-dpaux-instance", &num_dpaux_instance)) + { + nv->num_dpaux_instance = (unsigned) num_dpaux_instance; + nv_printf(NV_DBG_INFO, "NVRM: Found %d dpAux instances in device tree.\n", + num_dpaux_instance); + } + + if (nv->num_dpaux_instance > NV_MAX_DPAUX_NUM_DEVICES) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Number of dpAux instances [%d] in device tree are more than" + "that of allowed [%d]. Initilizing %d dpAux instances.\n", nv->num_dpaux_instance, + NV_MAX_DPAUX_NUM_DEVICES, NV_MAX_DPAUX_NUM_DEVICES); + nv->num_dpaux_instance = NV_MAX_DPAUX_NUM_DEVICES; + } + + for (dpauxindex = 0; dpauxindex < nv->num_dpaux_instance; dpauxindex++) + { + char sdpaux_device[MAX_LENGTH]; + snprintf(sdpaux_device, sizeof(sdpaux_device), "%s%d", sdpaux, dpauxindex); + + NV_KMALLOC(nv->dpaux[dpauxindex], sizeof(*(nv->dpaux[dpauxindex]))); + if (nv->dpaux[dpauxindex] == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate nv->dpaux[%d] memory\n", dpauxindex); + rc = -ENOMEM; + goto err_free_dpaux_dev; + } + + os_mem_set(nv->dpaux[dpauxindex], 0, sizeof(*(nv->dpaux[dpauxindex]))); + + res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, sdpaux_device); + if (!res) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get IO memory resource\n"); + rc = -ENXIO; + goto err_free_dpaux_dev; + } + res_addr = res->start; + res_size = res->end - res->start; + + irq = platform_get_irq_byname(plat_dev, sdpaux_device); + if (irq < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get IO irq resource\n"); + rc = irq; + goto err_free_dpaux_dev; + } + + if (!request_mem_region(res_addr, res_size, nv_device_name)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: request_mem_region failed for %pa\n", + res_addr); + rc = -ENXIO; + goto err_free_dpaux_dev; + } + + nv->dpaux[dpauxindex]->cpu_address = res_addr; + nv->dpaux[dpauxindex]->size = res_size; + nv->dpaux_irqs[dpauxindex] = irq; + } + + return rc; + +err_free_dpaux_dev: + nv_platform_free_device_dpaux(nv); + + return rc; +} + +NV_STATUS NV_API_CALL nv_soc_device_reset(nv_state_t *nv) +{ + NV_STATUS status = NV_OK; + + int rc = 0; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + /* + * Skip all reset functions if the 'nvidia,skip-clk-rsts' DT property + * is present. This property is currently present in the System FPGA device + * tree because the BPMP firmware isn't available on FPGA yet. + */ + bool skip_clk_rsts = of_property_read_bool(nvl->dev->of_node, "nvidia,skip-clk-rsts"); + if (!skip_clk_rsts) + { + // Resetting the Display + if (nvl->nvdisplay_reset != NULL) + { + rc = reset_control_reset(nvl->nvdisplay_reset); + if (rc != 0) + { + status = NV_ERR_GENERIC; + nv_printf(NV_DBG_ERRORS, "NVRM: reset_control_reset failed, rc: %d\n", rc); + goto out; + } + } + + // Resetting the dpaux + if (nvl->dpaux0_reset != NULL) + { + rc = reset_control_reset(nvl->dpaux0_reset); + if (rc != 0) + { + status = NV_ERR_GENERIC; + nv_printf(NV_DBG_ERRORS, "NVRM: reset_control_reset failed, rc: %d\n", rc); + goto out; + } + } + + // Resetting the DSI + if (nvl->dsi_core_reset != NULL) + { + rc = reset_control_reset(nvl->dsi_core_reset); + if (rc != 0) + { + status = NV_ERR_GENERIC; + nv_printf(NV_DBG_ERRORS, "NVRM: reset_control_reset failed, rc: %d\n", rc); + goto out; + } + } + } + +out: + return status; +} + +NV_STATUS NV_API_CALL nv_soc_mipi_cal_reset(nv_state_t *nv) +{ + NV_STATUS status = NV_OK; + + int rc = 0; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + bool skip_clk_rsts = of_property_read_bool(nvl->dev->of_node, "nvidia,skip-clk-rsts"); + + if (skip_clk_rsts) + return NV_OK; + + if (nvl->mipi_cal_reset != NULL) + { + rc = reset_control_reset(nvl->mipi_cal_reset); + if (rc != 0) + { + status = NV_ERR_GENERIC; + nv_printf(NV_DBG_ERRORS, "NVRM: mipi_cal reset_control_reset failed, rc: %d\n", rc); + } + } + else + { + status = NV_ERR_GENERIC; + } + + return status; +} + +// This function gets called only for Tegra +static void nv_platform_get_iommu_availability(struct platform_device *plat_dev, + nv_state_t *nv) +{ + struct device_node *np = plat_dev->dev.of_node; + struct device_node *iso_np = NULL; + + // Assume ISO iommu is present + nv->iso_iommu_present = NV_TRUE; + + iso_np = of_parse_phandle(np, "iommus", 0); + if (iso_np) { + if (!of_device_is_available(iso_np)) { + nv->iso_iommu_present = NV_FALSE; + nv_printf(NV_DBG_INFO, "NVRM: ISO iommu device is NOT available\n"); + } + of_node_put(iso_np); + } else { + nv_printf(NV_DBG_INFO, "NVRM: unable to parse ISO DT phandle\n"); + } +} + +static int nv_platform_register_mapping_devs(struct platform_device *plat_dev, + nv_state_t *nv) +{ + struct device_node *np = plat_dev->dev.of_node; + struct device_node *niso_np = NULL; + struct platform_device *niso_plat_dev = NULL; + int rc = 0; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + nv->niso_dma_dev = NULL; + + niso_np = of_get_child_by_name(np, "nvdisplay-niso"); + if (niso_np == NULL) + { + nv_printf(NV_DBG_INFO, "NVRM: no nvdisplay-niso child node\n"); + goto register_mapping_devs_end; + } + +#if defined(NV_DEVM_OF_PLATFORM_POPULATE_PRESENT) + rc = devm_of_platform_populate(&plat_dev->dev); +#else + nv_printf(NV_DBG_ERRORS, "NVRM: devm_of_platform_populate not present\n"); + rc = -ENOSYS; +#endif + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: devm_of_platform_populate failed\n"); + goto register_mapping_devs_end; + } + + niso_plat_dev = of_find_device_by_node(niso_np); + if (niso_plat_dev == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: no nvdisplay-niso platform devices\n"); + rc = -ENODEV; + goto register_mapping_devs_end; + } + +#if defined(NV_OF_DMA_CONFIGURE_PRESENT) + rc = of_dma_configure(&niso_plat_dev->dev, niso_np, true); +#else + nv_printf(NV_DBG_ERRORS, "NVRM: of_dma_configure not present\n"); + rc = -ENOSYS; +#endif + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: nv_of_dma_configure failed for niso\n"); + goto register_mapping_devs_end; + } + + nvl->niso_dma_dev.dev = &niso_plat_dev->dev; + nvl->niso_dma_dev.addressable_range.start = 0; + nvl->niso_dma_dev.addressable_range.limit = NV_U64_MAX; + nv->niso_dma_dev = &nvl->niso_dma_dev; + +register_mapping_devs_end: + of_node_put(niso_np); + return rc; +} + +static int nv_platform_parse_dcb(struct platform_device *plat_dev, + nv_state_t *nv) +{ + int ret; + +#if defined(NV_OF_PROPERTY_COUNT_ELEMS_OF_SIZE_PRESENT) + struct device_node *np = plat_dev->dev.of_node; + ret = of_property_count_elems_of_size(np, "nvidia,dcb-image", sizeof(u8)); +#else + nv_printf(NV_DBG_ERRORS, "of_property_count_elems_of_size not present\n"); + return -ENOSYS; +#endif + if (ret > 0) + { + nv->soc_dcb_size = ret; + /* Allocate dcb array */ + NV_KMALLOC(nv->soc_dcb_blob, nv->soc_dcb_size); + if (nv->soc_dcb_blob == NULL) + { + nv_printf(NV_DBG_ERRORS, "failed to allocate dcb array"); + return -ENOMEM; + } + } + +#if defined(NV_OF_PROPERTY_READ_VARIABLE_U8_ARRAY_PRESENT) + ret = of_property_read_variable_u8_array(np, "nvidia,dcb-image", + nv->soc_dcb_blob, 0, nv->soc_dcb_size); +#else + nv_printf(NV_DBG_ERRORS, "of_property_read_variable_u8_array not present\n"); + ret = -ENOSYS; +#endif + if (IS_ERR(&ret)) + { + nv_printf(NV_DBG_ERRORS, "failed to read dcb blob"); + NV_KFREE(nv->soc_dcb_blob, nv->soc_dcb_size); + nv->soc_dcb_blob = NULL; + nv->soc_dcb_size = 0; + return ret; + } + + return 0; +} + + +static int nv_platform_device_display_probe(struct platform_device *plat_dev) +{ + nv_state_t *nv = NULL; + nv_linux_state_t *nvl = NULL; + nvidia_stack_t *sp = NULL; + phys_addr_t res_addr = 0; + resource_size_t res_size = 0; + int irq = 0; + int rc = 0; + const struct resource *res; + bool skip_clk_rsts; + NV_STATUS status; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: failed to allocate stack!\n", + __FUNCTION__); + return rc; + } + + NV_KMALLOC(nvl, sizeof(*nvl)); + if (nvl == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate nvl memory\n"); + rc = -ENOMEM; + goto err_free_stack; + } + os_mem_set(nvl, 0, sizeof(*nvl)); + + nv = NV_STATE_PTR(nvl); + + platform_set_drvdata(plat_dev, (void *)nvl); + + nvl->dev = &plat_dev->dev; + + /* + * fill SOC dma device information + */ + nvl->dma_dev.dev = nvl->dev; + nvl->dma_dev.addressable_range.start = 0; + nvl->dma_dev.addressable_range.limit = NV_U64_MAX; + nv->dma_dev = &nvl->dma_dev; + + nvl->tce_bypass_enabled = NV_TRUE; + + NV_KMALLOC(nv->regs, sizeof(*(nv->regs))); + if (nv->regs == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate nv->regs memory\n"); + rc = -ENOMEM; + goto err_free_nvl; + } + os_mem_set(nv->regs, 0, sizeof(*(nv->regs))); + + res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, "nvdisplay"); + if (!res) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get IO memory resource\n"); + rc = -ENODEV; + goto err_free_nv_regs; + } + res_addr = res->start; + res_size = res->end - res->start; + + irq = platform_get_irq_byname(plat_dev, "nvdisplay"); + if (irq < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get IO irq resource\n"); + rc = -ENODEV; + goto err_free_nv_regs; + } + + if (!request_mem_region(res_addr, res_size, nv_device_name)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: request_mem_region failed for %pa\n", + res_addr); + rc = -ENOMEM; + goto err_free_nv_regs; + } + + nv->regs->cpu_address = res_addr; + nv->regs->size = res_size; + nv->interrupt_line = irq; + nv->flags = NV_FLAG_SOC_DISPLAY; + + nv->os_state = (void *) nvl; + + // Check if ISO SMMU status + nv_platform_get_iommu_availability(plat_dev, nv); + + rc = nv_platform_register_mapping_devs(plat_dev, nv); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate niso platform device\n"); + goto err_release_mem_region_regs; + } + + rc = nv_platform_alloc_device_dpaux(plat_dev, nv); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to alloc DpAux device\n"); + goto err_release_mem_region_regs; + } + + NV_KMALLOC(nv->hdacodec_regs, sizeof(*(nv->hdacodec_regs))); + if (nv->hdacodec_regs == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate hdacodecregs memory\n"); + rc = -ENOMEM; + goto err_release_mem_region_regs; + } + os_mem_set(nv->hdacodec_regs, 0, sizeof(*(nv->hdacodec_regs))); + + res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, "hdacodec"); + if (!res) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get hdacodec IO memory resource\n"); + rc = -ENODEV; + goto err_free_nv_codec_regs; + } + res_addr = res->start; + res_size = res->end - res->start; + + if (!request_mem_region(res_addr, res_size, nv_device_name)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: request_mem_region of hdacodec failed for %pa\n", + res_addr); + rc = -ENOMEM; + goto err_free_nv_codec_regs; + } + + nv->hdacodec_regs->cpu_address = res_addr; + nv->hdacodec_regs->size = res_size; + + nv->hdacodec_irq = platform_get_irq_byname(plat_dev, "hdacodec"); + if (nv->hdacodec_irq < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get HDACODEC IO irq resource\n"); + rc = -ENODEV; + goto err_release_mem_hdacodec_region_regs; + } + + + NV_KMALLOC(nv->mipical_regs, sizeof(*(nv->mipical_regs))); + if (nv->mipical_regs == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate mipical registers memory\n"); + rc = -ENOMEM; + goto err_release_mem_hdacodec_region_regs; + } + os_mem_set(nv->mipical_regs, 0, sizeof(*(nv->mipical_regs))); + + res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, "mipical"); + if (!res) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get mipical IO memory resource\n"); + rc = -ENODEV; + goto err_free_mipical_regs; + } + res_addr = res->start; + res_size = res->end - res->start; + + if (!request_mem_region(res_addr, res_size, nv_device_name)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: request_mem_region of mipical failed for %pa\n", + res_addr); + rc = -ENOMEM; + goto err_free_mipical_regs; + } + + nv->mipical_regs->cpu_address = res_addr; + nv->mipical_regs->size = res_size; + + // Enabling power management for the device. + pm_runtime_enable(&plat_dev->dev); + + /* + * Skip all clock/reset functions if the 'nvidia,skip-clk-rsts' DT property + * is present. This property is currently present in the System FPGA device + * tree because the BPMP firmware isn't available on FPGA yet. + */ + skip_clk_rsts = of_property_read_bool(nvl->dev->of_node, "nvidia,skip-clk-rsts"); + if (!skip_clk_rsts) + { + /* + * Getting all the display-clock handles + * from BPMP FW at the time of probe. + */ + status = nv_clk_get_handles(nv); + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get clock handles\n"); + rc = -EPERM; + goto err_release_mem_mipical_region_regs; + } + + /* + * Getting dpaux-reset handles + * from device tree at the time of probe. + */ + nvl->dpaux0_reset = devm_reset_control_get(nvl->dev, "dpaux0_reset"); + if (IS_ERR(nvl->dpaux0_reset)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: devm_reset_control_get failed, err: %ld\n", PTR_ERR(nvl->dpaux0_reset)); + nvl->dpaux0_reset = NULL; + } + + /* + * Getting display-reset handles + * from device tree at the time of probe. + */ + nvl->nvdisplay_reset = devm_reset_control_get(nvl->dev, "nvdisplay_reset"); + if (IS_ERR(nvl->nvdisplay_reset)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: devm_reset_control_get failed, err: %ld\n", PTR_ERR(nvl->nvdisplay_reset)); + nvl->nvdisplay_reset = NULL; + } + + /* + * Getting dsi-core reset handles + * from device tree at the time of probe. + */ + nvl->dsi_core_reset = devm_reset_control_get(nvl->dev, "dsi_core_reset"); + if (IS_ERR(nvl->dsi_core_reset)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: devm_reset_control_get failed, err: %ld\n", PTR_ERR(nvl->dsi_core_reset)); + nvl->dsi_core_reset = NULL; + } + + /* + * Getting mipi_cal reset handle + * from device tree at the time of probe. + */ + nvl->mipi_cal_reset = devm_reset_control_get(nvl->dev, "mipi_cal_reset"); + if (IS_ERR(nvl->mipi_cal_reset)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: mipi_cal devm_reset_control_get failed, err: %ld\n", PTR_ERR(nvl->mipi_cal_reset)); + nvl->mipi_cal_reset = NULL; + } + } + + status = nv_imp_get_bpmp_data(nvl); + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get bpmp data\n"); + rc = -EPERM; + goto err_destroy_clk_handles; + } + + status = nv_imp_icc_get(nv); + if (status != NV_OK) + { + // + // nv_imp_icc_get errors are normally treated as fatal, but ICC is + // expected to be disabled on AV + L (causing NV_ERR_NOT_SUPPORTED to + // be returned), so this is not treated as fatal. + // + if (status != NV_ERR_NOT_SUPPORTED) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get icc handle\n"); + rc = -EPERM; + goto err_destroy_clk_handles; + } + } + /* + * Get the backlight device name + */ + of_property_read_string(nvl->dev->of_node, "nvidia,backlight-name", + &nvl->backlight.device_name); + + /* + * TODO bug 2100708: the fake domain is used to opt out of some RM paths + * that cause issues otherwise, see the bug for details. + */ + nv->pci_info.domain = 2; + nv->pci_info.bus = 0; + nv->pci_info.slot = 0; + + num_probed_nv_devices++; + + if (!nv_lock_init_locks(sp, nv)) + { + rc = -EPERM; + goto err_put_icc_handle; + } + + nvl->safe_to_mmap = NV_TRUE; + INIT_LIST_HEAD(&nvl->open_files); + NV_SPIN_LOCK_INIT(&nvl->soc_isr_lock); + + if (!rm_init_private_state(sp, nv)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: rm_init_private_state() failed!\n"); + rc = -EPERM; + goto err_destroy_lock; + } + + num_nv_devices++; + + /* + * The newly created nvl object is added to the nv_linux_devices global list + * only after all the initialization operations for that nvl object are + * completed, so as to protect against simultaneous lookup operations which + * may discover a partially initialized nvl object in the list + */ + LOCK_NV_LINUX_DEVICES(); + + nv_linux_add_device_locked(nvl); + + UNLOCK_NV_LINUX_DEVICES(); + + if (nvidia_frontend_add_device((void *)&nv_fops, nvl) != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to add device\n"); + rc = -ENODEV; + goto err_remove_device; + } + + rm_set_rm_firmware_requested(sp, nv); + + /* + * Parse DCB blob + */ + rc = nv_platform_parse_dcb(plat_dev, nv); + if (rc != 0) + { + goto err_remove_device; + } + + /* + * Parse display rm sw-soc-chip-id + */ + rc = of_property_read_u32(nvl->dev->of_node, "nvidia,disp-sw-soc-chip-id", + &nv->disp_sw_soc_chip_id); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Unable to read disp_sw_soc_chip_id\n"); + goto err_remove_device; + } + + /* + * TODO: procfs, vt_switch, dynamic_power_management + */ + + nv_kmem_cache_free_stack(sp); + + dma_set_mask(nv->dma_dev->dev, DMA_BIT_MASK(39)); +#if defined(NV_DMA_SET_MASK_AND_COHERENT_PRESENT) + dma_set_mask_and_coherent(nv->niso_dma_dev->dev, DMA_BIT_MASK(39)); +#else + nv_printf(NV_DBG_INFO, "NVRM: Using default 32-bit DMA mask\n"); +#endif + + return rc; + +err_remove_device: + LOCK_NV_LINUX_DEVICES(); + nv_linux_remove_device_locked(nvl); + UNLOCK_NV_LINUX_DEVICES(); + rm_free_private_state(sp, nv); +err_destroy_lock: + nv_lock_destroy_locks(sp, nv); +err_put_icc_handle: + nv_imp_icc_put(nv); +err_destroy_clk_handles: + nv_clk_clear_handles(nv); +err_remove_dpaux_device: + nv_platform_free_device_dpaux(nv); +err_release_mem_mipical_region_regs: + release_mem_region(nv->mipical_regs->cpu_address, nv->mipical_regs->size); +err_free_mipical_regs: + NV_KFREE(nv->mipical_regs, sizeof(*(nv->mipical_regs))); +err_release_mem_hdacodec_region_regs: + release_mem_region(nv->hdacodec_regs->cpu_address, nv->hdacodec_regs->size); +err_release_mem_region_regs: + release_mem_region(nv->regs->cpu_address, nv->regs->size); +err_free_nv_codec_regs: + NV_KFREE(nv->hdacodec_regs, sizeof(*(nv->hdacodec_regs))); +err_free_nv_regs: + NV_KFREE(nv->regs, sizeof(*(nv->regs))); +err_free_nvl: + NV_KFREE(nvl, sizeof(*nvl)); + platform_set_drvdata(plat_dev, NULL); +err_free_stack: + nv_kmem_cache_free_stack(sp); + + return rc; +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +static int nv_platform_device_display_remove(struct platform_device *plat_dev) +{ + nv_linux_state_t *nvl = NULL; + nv_state_t *nv; + nvidia_stack_t *sp = NULL; + int rc; + + nv_printf(NV_DBG_SETUP, "NVRM: removing SOC Display device\n"); + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc < 0) + return rc; + + LOCK_NV_LINUX_DEVICES(); + nvl = platform_get_drvdata(plat_dev); + if (!nvl || (nvl->dev != &plat_dev->dev)) + { + goto done; + } + + nv_linux_remove_device_locked(nvl); + + /* + * TODO: procfs + */ + + down(&nvl->ldata_lock); + UNLOCK_NV_LINUX_DEVICES(); + + /* + * TODO: vt_switch, dynamic_power_management + */ + + nvidia_frontend_remove_device((void *)&nv_fops, nvl); + + nv = NV_STATE_PTR(nvl); + + if ((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) || (nv->flags & NV_FLAG_OPEN)) + { + nv_acpi_unregister_notifier(nvl); + if (nv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + rm_disable_gpu_state_persistence(sp, nv); + } + nv_shutdown_adapter(sp, nv, nvl); + nv_dev_free_stacks(nvl); + } + + nv_lock_destroy_locks(sp, nv); + + num_probed_nv_devices--; + + rm_free_private_state(sp, nv); + + release_mem_region(nv->mipical_regs->cpu_address, nv->mipical_regs->size); + + NV_KFREE(nv->mipical_regs, sizeof(*(nv->mipical_regs))); + + release_mem_region(nv->hdacodec_regs->cpu_address, nv->hdacodec_regs->size); + + NV_KFREE(nv->hdacodec_regs, sizeof(*(nv->hdacodec_regs))); + + release_mem_region(nv->regs->cpu_address, nv->regs->size); + + NV_KFREE(nv->regs, sizeof(*(nv->regs))); + + nv_imp_icc_put(nv); + + nv_platform_free_device_dpaux(nv); + + /* + * Clearing all the display-clock handles + * at the time of device remove. + */ + nv_clk_clear_handles(nv); + + // Disabling power management for the device. + pm_runtime_disable(&plat_dev->dev); + + num_nv_devices--; + + NV_KFREE(nv->soc_dcb_blob, nv->soc_dcb_size); + + NV_KFREE(nvl, sizeof(*nvl)); + + nv_kmem_cache_free_stack(sp); + + return 0; + +done: + UNLOCK_NV_LINUX_DEVICES(); + nv_kmem_cache_free_stack(sp); + + return 0; +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +static int nv_platform_device_probe(struct platform_device *plat_dev) +{ + int rc = 0; + + if (plat_dev->dev.of_node) + { + + + + + + + + { + + rc = nv_platform_device_display_probe(plat_dev); + + } + } + else + { + + rc = nv_platform_device_display_probe(plat_dev); + + } + + return rc; +} + +static int nv_platform_device_remove(struct platform_device *plat_dev) +{ + int rc = 0; + + if (plat_dev->dev.of_node) + { + + + + + + + + { + + rc = nv_platform_device_display_remove(plat_dev); + + } + } + else + { + + rc = nv_platform_device_display_remove(plat_dev); + + } + + return rc; +} + +const struct of_device_id nv_platform_device_table[] = +{ + { .compatible = "nvidia,tegra234-display",}, + + + + {}, +}; +MODULE_DEVICE_TABLE(of, nv_platform_device_table); + +#if defined(CONFIG_PM) +extern struct dev_pm_ops nv_pm_ops; +#endif + +struct platform_driver nv_platform_driver = { + .driver = { + .name = "nv_platform", + .of_match_table = nv_platform_device_table, + .owner = THIS_MODULE, +#if defined(CONFIG_PM) + .pm = &nv_pm_ops, +#endif + }, + .probe = nv_platform_device_probe, + .remove = nv_platform_device_remove, +}; + +int nv_platform_count_devices(void) +{ + int count = 0; + struct device_node *np = NULL; + + while ((np = of_find_matching_node(np, nv_platform_device_table))) + { + count++; + } + + return count; +} + +int nv_platform_register_driver(void) +{ + return platform_driver_register(&nv_platform_driver); +} + +void nv_platform_unregister_driver(void) +{ + platform_driver_unregister(&nv_platform_driver); +} + +extern int tegra_fuse_control_read(unsigned long addr, unsigned int *data); + +unsigned int NV_API_CALL nv_soc_fuse_register_read (unsigned int addr) +{ + unsigned int data = 0; + +#if NV_IS_EXPORT_SYMBOL_PRESENT_tegra_fuse_control_read + tegra_fuse_control_read ((unsigned long)(addr), &data); +#endif + + return data; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs-utils.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs-utils.c new file mode 100644 index 0000000..b9d8524 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs-utils.c @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if defined(CONFIG_PROC_FS) + +#include "nv-procfs-utils.h" + +void +nv_procfs_unregister_all(struct proc_dir_entry *entry, struct proc_dir_entry *delimiter) +{ +#if defined(NV_PROC_REMOVE_PRESENT) + proc_remove(entry); +#else + while (entry) + { + struct proc_dir_entry *next = entry->next; + if (entry->subdir) + nv_procfs_unregister_all(entry->subdir, delimiter); + remove_proc_entry(entry->name, entry->parent); + if (entry == delimiter) + break; + entry = next; + } +#endif +} +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs.c new file mode 100644 index 0000000..0100990 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs.c @@ -0,0 +1,1477 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(CONFIG_PROC_FS) + +#include "nv-procfs.h" +#include "nv_compiler.h" +#include "nv-reg.h" +#include "conftest/patches.h" +#include "nv-ibmnpu.h" + +#define NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(name) \ + NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, nv_system_pm_lock) + +static const char *__README_warning = \ + "The NVIDIA graphics driver tries to detect potential problems\n" + "with the host system and warns about them using the system's\n" + "logging mechanisms. Important warning message are also logged\n" + "to dedicated text files in this directory.\n"; + +static const char *__README_patches = \ + "The NVIDIA graphics driver's kernel interface files can be\n" + "patched to improve compatibility with new Linux kernels or to\n" + "fix bugs in these files. When applied, each official patch\n" + "provides a short text file with a short description of itself\n" + "in this directory.\n"; + +static struct proc_dir_entry *proc_nvidia; +static struct proc_dir_entry *proc_nvidia_warnings; +static struct proc_dir_entry *proc_nvidia_patches; +static struct proc_dir_entry *proc_nvidia_gpus; + +extern char *NVreg_RegistryDwords; +extern char *NVreg_RegistryDwordsPerDevice; +extern char *NVreg_RmMsg; +extern char *NVreg_GpuBlacklist; +extern char *NVreg_TemporaryFilePath; +extern char *NVreg_ExcludedGpus; + +static char nv_registry_keys[NV_MAX_REGISTRY_KEYS_LENGTH]; + +#if defined(CONFIG_PM) +static nv_pm_action_depth_t nv_pm_action_depth = NV_PM_ACTION_DEPTH_DEFAULT; +#endif + +static int nv_procfs_read_registry(struct seq_file *s, void *v); + +#define NV_NUMA_STATUS_MSG_LEN (32) +#define NV_PROC_WRITE_BUFFER_SIZE (512 * PAGE_SIZE) + +typedef struct +{ + nvidia_stack_t *sp; + struct semaphore sp_lock; + + nv_state_t *nv; + + void *data; + off_t off; +} nv_procfs_private_t; + +/* + * Status messages directly corresponding to states in nv_numa_states_t. + */ +static const char *nv_numa_status_messages[] = +{ + "disabled", + "offline", + "online_in_progress", + "online", + "online_failed", + "offline_in_progress", + "offline_failed", +}; + +static int +nv_procfs_read_gpu_info( + struct seq_file *s, + void *v +) +{ + nv_state_t *nv = s->private; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct pci_dev *pci_dev = nvl->pci_dev; + char *type; + const char *name; + char *uuid; + char vbios_version[15]; + nvidia_stack_t *sp = NULL; + char firmware_version[64] = { 0 }; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return 0; + } + + if (rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE) != NV_OK) + { + nv_kmem_cache_free_stack(sp); + return 0; + } + + name = rm_get_device_name(pci_dev->device, + pci_dev->subsystem_vendor, + pci_dev->subsystem_device); + + seq_printf(s, "Model: \t\t %s\n", name); + seq_printf(s, "IRQ: \t\t %d\n", nv->interrupt_line); + + uuid = rm_get_gpu_uuid(sp, nv); + + if (uuid != NULL) + { + seq_printf(s, "GPU UUID: \t %s\n", uuid); + os_free_mem(uuid); + uuid = NULL; + } + + rm_get_vbios_version(sp, nv, vbios_version); + + seq_printf(s, "Video BIOS: \t %s\n", vbios_version); + + if (nv_find_pci_capability(pci_dev, PCI_CAP_ID_EXP)) + type = "PCIe"; + else + type = "PCI"; + seq_printf(s, "Bus Type: \t %s\n", type); + + seq_printf(s, "DMA Size: \t %d bits\n", + nv_count_bits(pci_dev->dma_mask)); + seq_printf(s, "DMA Mask: \t 0x%llx\n", pci_dev->dma_mask); + seq_printf(s, "Bus Location: \t %04x:%02x:%02x.%x\n", + nv->pci_info.domain, nv->pci_info.bus, + nv->pci_info.slot, PCI_FUNC(pci_dev->devfn)); + seq_printf(s, "Device Minor: \t %u\n", nvl->minor_num); + + rm_get_firmware_version(sp, nv, firmware_version, sizeof(firmware_version)); + if (firmware_version[0] != '\0') + { + seq_printf(s, "GPU Firmware: \t %s\n", firmware_version); + } + +#if defined(DEBUG) + do + { + int j; + for (j = 0; j < NV_GPU_NUM_BARS; j++) + { + seq_printf(s, "BAR%u: \t\t 0x%llx (%lluMB)\n", + j, nv->bars[j].cpu_address, (nv->bars[j].size >> 20)); + } + } while (0); +#endif + + seq_printf(s, "GPU Excluded:\t %s\n", + ((nv->flags & NV_FLAG_EXCLUDE) != 0) ? "Yes" : "No"); + + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE); + + nv_kmem_cache_free_stack(sp); + + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(gpu_info); + +static int +nv_procfs_read_power( + struct seq_file *s, + void *v +) +{ + nv_state_t *nv = s->private; + nvidia_stack_t *sp = NULL; + const char *vidmem_power_status; + const char *dynamic_power_status; + const char *gc6_support; + const char *gcoff_support; + NvU32 limitRated, limitCurr; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return 0; + } + + dynamic_power_status = rm_get_dynamic_power_management_status(sp, nv); + seq_printf(s, "Runtime D3 status: %s\n", dynamic_power_status); + + vidmem_power_status = rm_get_vidmem_power_status(sp, nv); + seq_printf(s, "Video Memory: %s\n\n", vidmem_power_status); + + seq_printf(s, "GPU Hardware Support:\n"); + gc6_support = rm_get_gpu_gcx_support(sp, nv, NV_TRUE); + seq_printf(s, " Video Memory Self Refresh: %s\n", gc6_support); + + gcoff_support = rm_get_gpu_gcx_support(sp, nv, NV_FALSE); + seq_printf(s, " Video Memory Off: %s\n\n", gcoff_support); + + seq_printf(s, "Power Limits:\n"); + status = rm_get_clientnvpcf_power_limits(sp, nv, &limitRated, &limitCurr); + if (status != NV_OK) + { + seq_printf(s, " Default: N/A milliwatts\n"); + seq_printf(s, " GPU Boost: N/A milliwatts\n"); + } + else + { + seq_printf(s, " Default: %u milliwatts\n", limitRated); + seq_printf(s, " GPU Boost: %u milliwatts\n", limitCurr); + } + + nv_kmem_cache_free_stack(sp); + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(power); + +static int +nv_procfs_read_version( + struct seq_file *s, + void *v +) +{ + seq_printf(s, "NVRM version: %s\n", pNVRM_ID); + seq_printf(s, "GCC version: %s\n", NV_COMPILER); + + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(version); + +static void +nv_procfs_close_file( + nv_procfs_private_t *nvpp +) +{ + nvidia_stack_t *sp; + + if (nvpp->data != NULL) + { + os_free_mem(nvpp->data); + } + + sp = nvpp->sp; + if (sp != NULL) + { + nv_kmem_cache_free_stack(sp); + } + + NV_KFREE(nvpp, sizeof(*nvpp)); +} + +static int +nv_procfs_open_file( + struct inode *inode, + struct file *file, + nv_procfs_private_t **pnvpp +) +{ + int retval = 0; + NV_STATUS status; + nv_procfs_private_t *nvpp = NULL; + nvidia_stack_t *sp = NULL; + + NV_KMALLOC(nvpp, sizeof(nv_procfs_private_t)); + if (nvpp == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate procfs private!\n"); + return -ENOMEM; + } + memset(nvpp, 0, sizeof(*nvpp)); + + NV_INIT_MUTEX(&nvpp->sp_lock); + + nvpp->nv = NV_PDE_DATA(inode); + + if (0 == (file->f_mode & FMODE_WRITE)) + goto done; + + retval = nv_kmem_cache_alloc_stack(&sp); + if (retval != 0) + { + goto done; + } + + status = os_alloc_mem((void **)&nvpp->data, NV_PROC_WRITE_BUFFER_SIZE); + if (status != NV_OK) + { + retval = -ENOMEM; + goto done; + } + + os_mem_set((void *)nvpp->data, 0, NV_PROC_WRITE_BUFFER_SIZE); + nvpp->sp = sp; + +done: + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + *pnvpp = nvpp; + + return 0; +} + +static int +nv_procfs_open_registry( + struct inode *inode, + struct file *file +) +{ + nv_procfs_private_t *nvpp = NULL; + int retval; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_read_registry, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_registry( + struct inode *inode, + struct file *file +) +{ + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv; + nv_linux_state_t *nvl = NULL; + nvidia_stack_t *sp = nvpp->sp; + char *key_name, *key_value, *registry_keys; + size_t key_len, len; + long count; + NV_STATUS rm_status; + int rc = 0; + + if (0 != nvpp->off) + { + nv = nvpp->nv; + if (nv != NULL) + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + key_value = (char *)nvpp->data; + + key_name = strsep(&key_value, "="); + + if (NULL == key_name || NULL == key_value) + { + rc = -EINVAL; + goto done; + } + + key_len = (strlen(key_name) + 1); + count = (nvpp->off - key_len); + + if (count <= 0) + { + rc = -EINVAL; + goto done; + } + + rm_status = rm_write_registry_binary(sp, nv, key_name, + key_value, count); + if (rm_status != NV_OK) + { + rc = -EFAULT; + goto done; + } + + registry_keys = ((nvl != NULL) ? + nvl->registry_keys : nv_registry_keys); + if (strstr(registry_keys, key_name) != NULL) + goto done; + len = strlen(registry_keys); + + if ((len + key_len + 2) <= NV_MAX_REGISTRY_KEYS_LENGTH) + { + if (len != 0) + strcat(registry_keys, ", "); + strcat(registry_keys, key_name); + } + } + +done: + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return rc; +} + +static int +nv_procfs_read_params( + struct seq_file *s, + void *v +) +{ + unsigned int i; + nv_parm_t *entry; + + for (i = 0; (entry = &nv_parms[i])->name != NULL; i++) + seq_printf(s, "%s: %u\n", entry->name, *entry->data); + + seq_printf(s, "RegistryDwords: \"%s\"\n", + (NVreg_RegistryDwords != NULL) ? NVreg_RegistryDwords : ""); + seq_printf(s, "RegistryDwordsPerDevice: \"%s\"\n", + (NVreg_RegistryDwordsPerDevice != NULL) ? NVreg_RegistryDwordsPerDevice : ""); + seq_printf(s, "RmMsg: \"%s\"\n", + (NVreg_RmMsg != NULL) ? NVreg_RmMsg : ""); + seq_printf(s, "GpuBlacklist: \"%s\"\n", + (NVreg_GpuBlacklist != NULL) ? NVreg_GpuBlacklist : ""); + seq_printf(s, "TemporaryFilePath: \"%s\"\n", + (NVreg_TemporaryFilePath != NULL) ? NVreg_TemporaryFilePath : ""); + seq_printf(s, "ExcludedGpus: \"%s\"\n", + (NVreg_ExcludedGpus != NULL) ? NVreg_ExcludedGpus : ""); + + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(params); + +static int +nv_procfs_read_registry( + struct seq_file *s, + void *v +) +{ + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv = nvpp->nv; + nv_linux_state_t *nvl = NULL; + char *registry_keys; + + if (nv != NULL) + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + registry_keys = ((nvl != NULL) ? + nvl->registry_keys : nv_registry_keys); + + seq_printf(s, "Binary: \"%s\"\n", registry_keys); + return 0; +} + +static ssize_t +nv_procfs_write_file( + struct file *file, + const char __user *buffer, + size_t count, + loff_t *pos +) +{ + int status = 0; + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + char *proc_buffer; + unsigned long bytes_left; + + down(&nvpp->sp_lock); + + bytes_left = (NV_PROC_WRITE_BUFFER_SIZE - nvpp->off - 1); + + if (count == 0) + { + status = -EINVAL; + goto done; + } + else if ((bytes_left == 0) || (count > bytes_left)) + { + status = -ENOSPC; + goto done; + } + + proc_buffer = &((char *)nvpp->data)[nvpp->off]; + + if (copy_from_user(proc_buffer, buffer, count)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy in proc data!\n"); + status = -EFAULT; + } + else + { + nvpp->off += count; + } + + *pos = nvpp->off; + +done: + up(&nvpp->sp_lock); + + return ((status < 0) ? status : (int)count); +} + +static nv_proc_ops_t nv_procfs_registry_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_registry, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_file, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = nv_procfs_close_registry, +}; + +#if defined(CONFIG_PM) +static int +nv_procfs_show_suspend_depth( + struct seq_file *m, + void *v +) +{ + seq_printf(m, "default modeset uvm\n"); + return 0; +} + +static ssize_t +nv_procfs_write_suspend_depth( + struct file *file, + const char __user *buf, + size_t count, + loff_t *pos +) +{ + char kbuf[sizeof("modeset\n")]; + unsigned i; + + if (!NV_IS_SUSER()) + { + return -EPERM; + } + + if (count < strlen("uvm") || count > sizeof(kbuf)) + { + return -EINVAL; + } + + if (copy_from_user(kbuf, buf, count)) + { + return -EFAULT; + } + + count = min(count, sizeof(kbuf) - 1); + for (i = 0; i < count && isalpha(kbuf[i]); i++); + kbuf[i] = '\0'; + + if (strcasecmp(kbuf, "uvm") == 0) + { + nv_pm_action_depth = NV_PM_ACTION_DEPTH_UVM; + } + else if (strcasecmp(kbuf, "modeset") == 0) + { + nv_pm_action_depth = NV_PM_ACTION_DEPTH_MODESET; + } + else if (strcasecmp(kbuf, "default") == 0) + { + nv_pm_action_depth = NV_PM_ACTION_DEPTH_DEFAULT; + } + else + { + return -EINVAL; + } + + return count; +} + +static int +nv_procfs_open_suspend_depth( + struct inode *inode, + struct file *file +) +{ + return single_open(file, nv_procfs_show_suspend_depth, NULL); +} + +static nv_proc_ops_t nv_procfs_suspend_depth_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_suspend_depth, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_suspend_depth, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = single_release +}; + +static int +nv_procfs_show_suspend( + struct seq_file *m, + void *v +) +{ + seq_printf(m, "suspend hibernate resume\n"); + return 0; +} + +static ssize_t +nv_procfs_write_suspend( + struct file *file, + const char __user *buf, + size_t count, + loff_t *pos +) +{ + NV_STATUS status; + char kbuf[sizeof("hibernate\n")]; + nv_power_state_t power_state; + unsigned i; + + if (!NV_IS_SUSER()) + { + return -EPERM; + } + + if (count < strlen("resume") || count > sizeof(kbuf)) + { + return -EINVAL; + } + + if (copy_from_user(kbuf, buf, count)) + { + return -EFAULT; + } + + count = min(count, sizeof(kbuf) - 1); + for (i = 0; i < count && isalpha(kbuf[i]); i++); + kbuf[i] = '\0'; + + if (strcasecmp(kbuf, "suspend") == 0) + { + power_state = NV_POWER_STATE_IN_STANDBY; + } + else if (strcasecmp(kbuf, "hibernate") == 0) + { + power_state = NV_POWER_STATE_IN_HIBERNATE; + } + else if (strcasecmp(kbuf, "resume") == 0) + { + power_state = NV_POWER_STATE_RUNNING; + } + else + { + return -EINVAL; + } + + status = nv_set_system_power_state(power_state, nv_pm_action_depth); + + return (status != NV_OK) ? -EIO : count; +} + +static int +nv_procfs_open_suspend( + struct inode *inode, + struct file *file +) +{ + return single_open(file, nv_procfs_show_suspend, NULL); +} + +static nv_proc_ops_t nv_procfs_suspend_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_suspend, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_suspend, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = single_release +}; + +#endif + +/* + * Forwards error to nv_log_error which exposes data to vendor callback + */ +void +exercise_error_forwarding_va( + nv_state_t *nv, + NvU32 err, + const char *fmt, + ... +) +{ + va_list arguments; + + va_start(arguments, fmt); + nv_log_error(nv, err, fmt, arguments); + va_end(arguments); +} + +static int +nv_procfs_show_exercise_error_forwarding( + struct seq_file *m, + void *v +) +{ + return 0; +} + +static int +nv_procfs_open_exercise_error_forwarding( + struct inode *inode, + struct file *file +) +{ + nv_procfs_private_t *nvpp = NULL; + int retval; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_show_exercise_error_forwarding, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_exercise_error_forwarding( + struct inode *inode, + struct file *file +) +{ + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv = nvpp->nv; + char *proc_buffer = &((char *)nvpp->data)[0]; + size_t count = nvpp->off; + int i = 0, status = 0; + NvU32 xid = 0; + const NvU8 MAX_XID_DIGITS = 3; + + while (i < count && i <= MAX_XID_DIGITS && proc_buffer[i] != ',') + { + if (proc_buffer[i] < '0' || proc_buffer[i] > '9') + { + status = -EINVAL; + goto done; + } + + xid = xid * 10 + (proc_buffer[i++] - '0'); + } + + if (count > (i + 1) && proc_buffer[i] == ',') + exercise_error_forwarding_va(nv, xid, &proc_buffer[i + 1], 0xdeadbee0, + 0xdeadbee1, 0xdeadbee2, 0xdeadbee3, 0xdeadbee4, 0xdeadbee5); + else + status = -EINVAL; + +done: + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return status; +} + +static nv_proc_ops_t nv_procfs_exercise_error_forwarding_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_exercise_error_forwarding, + .NV_PROC_OPS_WRITE = nv_procfs_write_file, + .NV_PROC_OPS_RELEASE = nv_procfs_close_exercise_error_forwarding, +}; + +static int +nv_procfs_read_unbind_lock( + struct seq_file *s, + void *v +) +{ + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv = nvpp->nv; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + down(&nvl->ldata_lock); + if (nv->flags & NV_FLAG_UNBIND_LOCK) + { + seq_printf(s, "1\n"); + } + else + { + seq_printf(s, "0\n"); + } + up(&nvl->ldata_lock); + + return 0; +} + +static int +nv_procfs_open_unbind_lock( + struct inode *inode, + struct file *file +) +{ + nv_procfs_private_t *nvpp = NULL; + int retval; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_read_unbind_lock, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_unbind_lock( + struct inode *inode, + struct file *file +) +{ + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv; + nvidia_stack_t *sp = nvpp->sp; + int rc = 0; + nv_linux_state_t * nvl; + int value; + + if (0 != nvpp->off) + { + nv = nvpp->nv; + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (NULL == nvpp->data || NULL == nv) + { + rc = -EINVAL; + goto done; + } + + if (sscanf((char *)nvpp->data, "%u\n", &value) != 1) + { + rc = -EINVAL; + goto done; + } + + down(&nvl->ldata_lock); + if ((value == 1) && !(nv->flags & NV_FLAG_UNBIND_LOCK)) + { + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + rm_unbind_lock(sp, nv); + + if (nv->flags & NV_FLAG_UNBIND_LOCK) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "UnbindLock acquired\n"); + } + else + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Could not acquire UnbindLock\n"); + } + } + else if ((value == 0) && (nv->flags & NV_FLAG_UNBIND_LOCK)) + { + nv->flags &= ~NV_FLAG_UNBIND_LOCK; + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "UnbindLock released\n"); + } + up(&nvl->ldata_lock); + } + +done: + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return rc; +} + +static nv_proc_ops_t nv_procfs_unbind_lock_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_unbind_lock, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_file, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = nv_procfs_close_unbind_lock, +}; + +static const char* +numa_status_describe(nv_numa_status_t state) +{ + if (state < 0 || state >= NV_NUMA_STATUS_COUNT) + return "invalid"; + + return nv_numa_status_messages[state]; +} + +static NvBool +numa_is_change_allowed(nv_numa_status_t current_state, nv_numa_status_t requested) +{ + NvBool allowed = NV_TRUE; + + switch (requested) { + case NV_NUMA_STATUS_OFFLINE: + case NV_NUMA_STATUS_OFFLINE_FAILED: + allowed = (current_state == NV_NUMA_STATUS_OFFLINE_IN_PROGRESS); + break; + + /* All except Offline. */ + case NV_NUMA_STATUS_OFFLINE_IN_PROGRESS: + allowed = (current_state != NV_NUMA_STATUS_OFFLINE); + break; + + case NV_NUMA_STATUS_ONLINE: + allowed = (current_state == NV_NUMA_STATUS_ONLINE_IN_PROGRESS); + break; + + case NV_NUMA_STATUS_ONLINE_FAILED: + allowed = (current_state == NV_NUMA_STATUS_ONLINE_IN_PROGRESS) || + (current_state == NV_NUMA_STATUS_ONLINE); + break; + + case NV_NUMA_STATUS_ONLINE_IN_PROGRESS: + allowed = (current_state == NV_NUMA_STATUS_OFFLINE); + break; + + /* Fallthrough. */ + case NV_NUMA_STATUS_DISABLED: + default: + return NV_FALSE; + } + + return allowed; +} + +static NV_STATUS +numa_status_read( + nv_state_t *nv, + nv_stack_t *sp, + NvS32 *nid, + NvS32 *status, + NvU64 *numa_mem_addr, + NvU64 *numa_mem_size, + nv_offline_addresses_t *list +) +{ + NV_STATUS rm_status; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + down(&nvl->ldata_lock); + + /* + * If GPU has not been initialized but NUMA info is valid, populate + * NUMA node ID and status. Memory range and offline addresses cannot + * be read at this point so fill in dummy values. + */ + if (!(nv->flags & NV_FLAG_OPEN)) + { + if (nv_platform_supports_numa(nvl)) + { + *nid = nvl->numa_info.node_id; + *status = nv_get_numa_status(nvl); + *numa_mem_addr = 0; + *numa_mem_size = 0; + memset(list, 0x0, sizeof(*list)); + } + + rm_status = NV_ERR_NOT_READY; + goto done; + } + + list->numEntries = ARRAY_SIZE(list->addresses); + + rm_status = rm_get_gpu_numa_info(sp, nv, + nid, numa_mem_addr, numa_mem_size, + list->addresses, &list->numEntries); + *status = nv_get_numa_status(nvl); + +done: + up(&nvl->ldata_lock); + return rm_status; +} + +static int +nv_procfs_read_offline_pages( + struct seq_file *s, + void *v +) +{ + NvU32 i; + int retval = 0; + NV_STATUS rm_status; + nv_ioctl_numa_info_t numa_info; + nv_procfs_private_t *nvpp = s->private; + nv_stack_t *sp = nvpp->sp; + nv_state_t *nv = nvpp->nv; + + rm_status = numa_status_read(nv, sp, + &numa_info.nid, + &numa_info.status, + &numa_info.numa_mem_addr, + &numa_info.numa_mem_size, + &numa_info.offline_addresses); + + if (rm_status != NV_OK) + return -EIO; + + for (i = 0; i < numa_info.offline_addresses.numEntries; ++i) + { + seq_printf(s, "%p\n", + (void*) numa_info.offline_addresses.addresses[i]); + } + + return retval; +} + +static int +nv_procfs_open_offline_pages( + struct inode *inode, + struct file *file +) +{ + int retval; + nv_procfs_private_t *nvpp = NULL; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_read_offline_pages, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_offline_pages( + struct inode *inode, + struct file *file +) +{ + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return 0; +} + +static int +nv_procfs_read_numa_status( + struct seq_file *s, + void *v +) +{ + int retval = 0; + NV_STATUS rm_status; + nv_ioctl_numa_info_t numa_info; + nv_procfs_private_t *nvpp = s->private; + nv_stack_t *sp = nvpp->sp; + nv_state_t *nv = nvpp->nv; + + rm_status = numa_status_read(nv, sp, + &numa_info.nid, + &numa_info.status, + &numa_info.numa_mem_addr, + &numa_info.numa_mem_size, + &numa_info.offline_addresses); + + if ((rm_status != NV_OK) && (rm_status != NV_ERR_NOT_READY)) + return -EIO; + + /* Note: RM clients need to read block size from sysfs. */ + seq_printf(s, "Node: %d\n", numa_info.nid); + seq_printf(s, "Status: %s\n", numa_status_describe(numa_info.status)); + + if (rm_status == NV_OK) + { + seq_printf(s, "Address: %llx\n", numa_info.numa_mem_addr); + seq_printf(s, "Size: %llx\n", numa_info.numa_mem_size); + } + + return retval; +} + +static int +nv_procfs_open_numa_status( + struct inode *inode, + struct file *file +) +{ + int retval; + nv_procfs_private_t *nvpp = NULL; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_read_numa_status, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_numa_status( + struct inode *inode, + struct file *file +) +{ + int retval = 0; + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + nvidia_stack_t *sp = nvpp->sp; + nv_state_t *nv = nvpp->nv; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + const size_t MAX_STATES = ARRAY_SIZE(nv_numa_status_messages); + nv_numa_status_t current_state = nv_get_numa_status(nvl); + char *cmd = nvpp->data; + + down(&nvl->ldata_lock); + + if (nvpp->off != 0) + { + NvU32 state; + nv_numa_status_t requested = NV_NUMA_STATUS_DISABLED; + NV_STATUS rm_status = NV_OK; + + for (state = 0; state < MAX_STATES; ++state) + { + if (strncmp(nv_numa_status_messages[state], + cmd, + NV_NUMA_STATUS_MSG_LEN) == 0) + { + requested = state; + break; + } + } + + if (requested != current_state) + { + /* Validate state transition. */ + if (!numa_is_change_allowed(current_state, requested)) + { + retval = -EINVAL; + goto done; + } + + if (requested == NV_NUMA_STATUS_OFFLINE_IN_PROGRESS) + { + /* + * If this call fails, RM is not ready to offline + * memory => retain status. + */ + rm_status = rm_gpu_numa_offline(sp, nv); + } + + if (rm_status == NV_OK) + { + retval = nv_set_numa_status(nvl, requested); + if (retval < 0) + goto done; + + if (requested == NV_NUMA_STATUS_ONLINE) + { + rm_status = rm_gpu_numa_online(sp, nv); + } + } + + retval = (rm_status == NV_OK) ? retval: -EBUSY; + } + } + +done: + up(&nvl->ldata_lock); + + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return retval; +} + +static const nv_proc_ops_t nv_procfs_numa_status_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_numa_status, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_file, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = nv_procfs_close_numa_status, +}; + +static const nv_proc_ops_t nv_procfs_offline_pages_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_offline_pages, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = nv_procfs_close_offline_pages, +}; + +static int +nv_procfs_read_text_file( + struct seq_file *s, + void *v +) +{ + seq_puts(s, s->private); + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(text_file); + +static void +nv_procfs_add_text_file( + struct proc_dir_entry *parent, + const char *filename, + const char *text +) +{ + NV_CREATE_PROC_FILE(filename, parent, text_file, (void *)text); +} +#endif + +void nv_procfs_add_warning( + const char *filename, + const char *text +) +{ +#if defined(CONFIG_PROC_FS) + nv_procfs_add_text_file(proc_nvidia_warnings, filename, text); +#endif +} + +int nv_procfs_init(void) +{ +#if defined(CONFIG_PROC_FS) + NvU32 i = 0; + char nv_dir_name[20]; + struct proc_dir_entry *entry; + + snprintf(nv_dir_name, sizeof(nv_dir_name), "driver/%s", nv_device_name); + + nv_dir_name[sizeof(nv_dir_name) - 1] = '\0'; + + proc_nvidia = NV_CREATE_PROC_DIR(nv_dir_name, NULL); + + if (!proc_nvidia) + goto failed; + + entry = NV_CREATE_PROC_FILE("params", proc_nvidia, params, NULL); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("registry", proc_nvidia, registry, NULL); + if (!entry) + goto failed; + +#if defined(CONFIG_PM) + entry = NV_CREATE_PROC_FILE("suspend_depth", proc_nvidia, suspend_depth, NULL); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("suspend", proc_nvidia, suspend, NULL); + if (!entry) + goto failed; +#endif + + proc_nvidia_warnings = NV_CREATE_PROC_DIR("warnings", proc_nvidia); + if (!proc_nvidia_warnings) + goto failed; + nv_procfs_add_text_file(proc_nvidia_warnings, "README", __README_warning); + + proc_nvidia_patches = NV_CREATE_PROC_DIR("patches", proc_nvidia); + if (!proc_nvidia_patches) + goto failed; + + for (i = 0; __nv_patches[i].short_description; i++) + { + nv_procfs_add_text_file(proc_nvidia_patches, + __nv_patches[i].short_description, __nv_patches[i].description); + } + + nv_procfs_add_text_file(proc_nvidia_patches, "README", __README_patches); + + entry = NV_CREATE_PROC_FILE("version", proc_nvidia, version, NULL); + if (!entry) + goto failed; + + proc_nvidia_gpus = NV_CREATE_PROC_DIR("gpus", proc_nvidia); + if (!proc_nvidia_gpus) + goto failed; +#endif + return 0; +#if defined(CONFIG_PROC_FS) +failed: + nv_procfs_unregister_all(proc_nvidia, proc_nvidia); + return -ENOMEM; +#endif +} + +void nv_procfs_exit(void) +{ +#if defined(CONFIG_PROC_FS) + nv_procfs_unregister_all(proc_nvidia, proc_nvidia); +#endif +} + +int nv_procfs_add_gpu(nv_linux_state_t *nvl) +{ +#if defined(CONFIG_PROC_FS) + nv_state_t *nv; + + /* Buffer size is 32 in order to fit the full name when PCI domain is 32 bit. */ + char name[32]; + struct proc_dir_entry *proc_nvidia_gpu, *entry; + + nv = NV_STATE_PTR(nvl); + + snprintf(name, sizeof(name), "%04x:%02x:%02x.%1x", + nv->pci_info.domain, nv->pci_info.bus, + nv->pci_info.slot, PCI_FUNC(nvl->pci_dev->devfn)); + + proc_nvidia_gpu = NV_CREATE_PROC_DIR(name, proc_nvidia_gpus); + if (!proc_nvidia_gpu) + goto failed; + + entry = NV_CREATE_PROC_FILE("information", proc_nvidia_gpu, gpu_info, + nv); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("registry", proc_nvidia_gpu, registry, nv); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("power", proc_nvidia_gpu, power, nv); + if (!entry) + goto failed; + + if (IS_EXERCISE_ERROR_FORWARDING_ENABLED()) + { + entry = NV_CREATE_PROC_FILE("exercise_error_forwarding", proc_nvidia_gpu, + exercise_error_forwarding, nv); + if (!entry) + goto failed; + } + + if (os_is_vgx_hyper()) + { + entry = NV_CREATE_PROC_FILE("unbindLock", proc_nvidia_gpu, unbind_lock, nv); + if (!entry) + goto failed; + } + + if (nv_get_numa_status(nvl) != NV_IOCTL_NUMA_STATUS_DISABLED) + { + entry = NV_CREATE_PROC_FILE("numa_status", proc_nvidia_gpu, numa_status, + nv); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("offline_pages", proc_nvidia_gpu, offline_pages, + nv); + if (!entry) + goto failed; + } + + nvl->proc_dir = proc_nvidia_gpu; +#endif + return 0; +#if defined(CONFIG_PROC_FS) +failed: + if (proc_nvidia_gpu) + { + nv_procfs_unregister_all(proc_nvidia_gpu, proc_nvidia_gpu); + } + return -1; +#endif +} + +void nv_procfs_remove_gpu(nv_linux_state_t *nvl) +{ +#if defined(CONFIG_PROC_FS) + nv_procfs_unregister_all(nvl->proc_dir, nvl->proc_dir); +#endif +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-reg.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-reg.h new file mode 100644 index 0000000..f257e0f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-reg.h @@ -0,0 +1,937 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RM_REG_H_ +#define _RM_REG_H_ + +#include "nvtypes.h" + +/* + * use NV_REG_STRING to stringify a registry key when using that registry key + */ + +#define __NV_REG_STRING(regkey) #regkey +#define NV_REG_STRING(regkey) __NV_REG_STRING(regkey) + +/* + * use NV_DEFINE_REG_ENTRY and NV_DEFINE_PARAMS_TABLE_ENTRY to simplify definition + * of registry keys in the kernel module source code. + */ + +#define __NV_REG_VAR(regkey) NVreg_##regkey + +#if defined(NV_MODULE_PARAMETER) +#define NV_DEFINE_REG_ENTRY(regkey, default_value) \ + static NvU32 __NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_PARAMETER(__NV_REG_VAR(regkey)) +#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \ + NvU32 __NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_PARAMETER(__NV_REG_VAR(regkey)) +#else +#define NV_DEFINE_REG_ENTRY(regkey, default_value) \ + static NvU32 __NV_REG_VAR(regkey) = (default_value) +#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \ + NvU32 __NV_REG_VAR(regkey) = (default_value) +#endif + +#if defined(NV_MODULE_STRING_PARAMETER) +#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \ + char *__NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_STRING_PARAMETER(__NV_REG_VAR(regkey)) +#else +#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \ + char *__NV_REG_VAR(regkey) = (default_value) +#endif + +#define NV_DEFINE_PARAMS_TABLE_ENTRY(regkey) \ + { NV_REG_STRING(regkey), &__NV_REG_VAR(regkey) } + +/* + * Like NV_DEFINE_PARMS_TABLE_ENTRY, but allows a mismatch between the name of + * the regkey and the name of the module parameter. When using this macro, the + * name of the parameter is passed to the extra "parameter" argument, and it is + * this name that must be used in the NV_DEFINE_REG_ENTRY() macro. + */ + +#define NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(regkey, parameter) \ + { NV_REG_STRING(regkey), &__NV_REG_VAR(parameter)} + +/* + *----------------- registry key definitions-------------------------- + */ + +/* + * Option: ModifyDeviceFiles + * + * Description: + * + * When this option is enabled, the NVIDIA driver will verify the validity + * of the NVIDIA device files in /dev and attempt to dynamically modify + * and/or (re-)create them, if necessary. If you don't wish for the NVIDIA + * driver to touch the device files, you can use this registry key. + * + * This module parameter is only honored by the NVIDIA GPU driver and NVIDIA + * capability driver. Furthermore, the NVIDIA capability driver provides + * modifiable /proc file entry (DeviceFileModify=0/1) to alter the behavior of + * this module parameter per device file. + * + * Possible Values: + * 0 = disable dynamic device file management + * 1 = enable dynamic device file management (default) + */ + +#define __NV_MODIFY_DEVICE_FILES ModifyDeviceFiles +#define NV_REG_MODIFY_DEVICE_FILES NV_REG_STRING(__NV_MODIFY_DEVICE_FILES) + +/* + * Option: DeviceFileUID + * + * Description: + * + * This registry key specifies the UID assigned to the NVIDIA device files + * created and/or modified by the NVIDIA driver when dynamic device file + * management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default UID is 0 ('root'). + */ + +#define __NV_DEVICE_FILE_UID DeviceFileUID +#define NV_REG_DEVICE_FILE_UID NV_REG_STRING(__NV_DEVICE_FILE_UID) + +/* + * Option: DeviceFileGID + * + * Description: + * + * This registry key specifies the GID assigned to the NVIDIA device files + * created and/or modified by the NVIDIA driver when dynamic device file + * management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default GID is 0 ('root'). + */ + +#define __NV_DEVICE_FILE_GID DeviceFileGID +#define NV_REG_DEVICE_FILE_GID NV_REG_STRING(__NV_DEVICE_FILE_GID) + +/* + * Option: DeviceFileMode + * + * Description: + * + * This registry key specifies the device file mode assigned to the NVIDIA + * device files created and/or modified by the NVIDIA driver when dynamic + * device file management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default mode is 0666 (octal, rw-rw-rw-). + */ + +#define __NV_DEVICE_FILE_MODE DeviceFileMode +#define NV_REG_DEVICE_FILE_MODE NV_REG_STRING(__NV_DEVICE_FILE_MODE) + +/* + * Option: ResmanDebugLevel + * + * Default value: ~0 + */ + +#define __NV_RESMAN_DEBUG_LEVEL ResmanDebugLevel +#define NV_REG_RESMAN_DEBUG_LEVEL NV_REG_STRING(__NV_RESMAN_DEBUG_LEVEL) + +/* + * Option: RmLogonRC + * + * Default value: 1 + */ + +#define __NV_RM_LOGON_RC RmLogonRC +#define NV_REG_RM_LOGON_RC NV_REG_STRING(__NV_RM_LOGON_RC) + +/* + * Option: InitializeSystemMemoryAllocations + * + * Description: + * + * The NVIDIA Linux driver normally clears system memory it allocates + * for use with GPUs or within the driver stack. This is to ensure + * that potentially sensitive data is not rendered accessible by + * arbitrary user applications. + * + * Owners of single-user systems or similar trusted configurations may + * choose to disable the aforementioned clears using this option and + * potentially improve performance. + * + * Possible values: + * + * 1 = zero out system memory allocations (default) + * 0 = do not perform memory clears + */ + +#define __NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \ + InitializeSystemMemoryAllocations +#define NV_REG_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \ + NV_REG_STRING(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS) + +/* + * Option: RegistryDwords + * + * Description: + * + * This option accepts a semicolon-separated list of key=value pairs. Each + * key name is checked against the table of static options; if a match is + * found, the static option value is overridden, but invalid options remain + * invalid. Pairs that do not match an entry in the static option table + * are passed on to the RM directly. + * + * Format: + * + * NVreg_RegistryDwords=";;..." + */ + +#define __NV_REGISTRY_DWORDS RegistryDwords +#define NV_REG_REGISTRY_DWORDS NV_REG_STRING(__NV_REGISTRY_DWORDS) + +/* + * Option: RegistryDwordsPerDevice + * + * Description: + * + * This option allows to specify registry keys per GPU device. It helps to + * control registry at GPU level of granularity. It accepts a semicolon + * separated list of key=value pairs. The first key value pair MUST be + * "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot + * number and F is the Function. This PCI BDF is used to identify which GPU to + * assign the registry keys that follows next. + * If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT + * found, then all the registry keys that follows are skipped, until we find next + * valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for + * the value of the "pci" string: + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI dev id string. + * + * For each of the registry keys that follows, key name is checked against the + * table of static options; if a match is found, the static option value is + * overridden, but invalid options remain invalid. Pairs that do not match an + * entry in the static option table are passed on to the RM directly. + * + * Format: + * + * NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F;;;..; \ + * pci=DDDD:BB:DD.F;;..;" + */ + +#define __NV_REGISTRY_DWORDS_PER_DEVICE RegistryDwordsPerDevice +#define NV_REG_REGISTRY_DWORDS_PER_DEVICE NV_REG_STRING(__NV_REGISTRY_DWORDS_PER_DEVICE) + +#define __NV_RM_MSG RmMsg +#define NV_RM_MSG NV_REG_STRING(__NV_RM_MSG) + +/* + * Option: UsePageAttributeTable + * + * Description: + * + * Enable/disable use of the page attribute table (PAT) available in + * modern x86/x86-64 processors to set the effective memory type of memory + * mappings to write-combining (WC). + * + * If enabled, an x86 processor with PAT support is present and the host + * system's Linux kernel did not configure one of the PAT entries to + * indicate the WC memory type, the driver will change the second entry in + * the PAT from its default (write-through (WT)) to WC at module load + * time. If the kernel did update one of the PAT entries, the driver will + * not modify the PAT. + * + * In both cases, the driver will honor attempts to map memory with the WC + * memory type by selecting the appropriate PAT entry using the correct + * set of PTE flags. + * + * Possible values: + * + * ~0 = use the NVIDIA driver's default logic (default) + * 1 = enable use of the PAT for WC mappings. + * 0 = disable use of the PAT for WC mappings. + */ + +#define __NV_USE_PAGE_ATTRIBUTE_TABLE UsePageAttributeTable +#define NV_USE_PAGE_ATTRIBUTE_TABLE NV_REG_STRING(__NV_USE_PAGE_ATTRIBUTE_TABLE) + +/* + * Option: EnableMSI + * + * Description: + * + * When this option is enabled and the host kernel supports the MSI feature, + * the NVIDIA driver will enable the PCI-E MSI capability of GPUs with the + * support for this feature instead of using PCI-E wired interrupt. + * + * Possible Values: + * + * 0 = disable MSI interrupt + * 1 = enable MSI interrupt (default) + * + */ + +#define __NV_ENABLE_MSI EnableMSI +#define NV_REG_ENABLE_MSI NV_REG_STRING(__NV_ENABLE_MSI) + +/* + * Option: EnablePCIeGen3 + * + * Description: + * + * Due to interoperability problems seen with Kepler PCIe Gen3 capable GPUs + * when configured on SandyBridge E desktop platforms, NVIDIA feels that + * delivering a reliable, high-quality experience is not currently possible in + * PCIe Gen3 mode on all PCIe Gen3 platforms. Therefore, Quadro, Tesla and + * NVS Kepler products operate in PCIe Gen2 mode by default. You may use this + * option to enable PCIe Gen3 support. + * + * This is completely unsupported! + * + * Possible Values: + * + * 0: disable PCIe Gen3 support (default) + * 1: enable PCIe Gen3 support + */ + +#define __NV_ENABLE_PCIE_GEN3 EnablePCIeGen3 +#define NV_REG_ENABLE_PCIE_GEN3 NV_REG_STRING(__NV_ENABLE_PCIE_GEN3) + +/* + * Option: MemoryPoolSize + * + * Description: + * + * When set to a non-zero value, this option specifies the size of the + * memory pool, given as a multiple of 1 GB, created on VMware ESXi to + * satisfy any system memory allocations requested by the NVIDIA kernel + * module. + */ + +#define __NV_MEMORY_POOL_SIZE MemoryPoolSize +#define NV_REG_MEMORY_POOL_SIZE NV_REG_STRING(__NV_MEMORY_POOL_SIZE) + +/* + * Option: KMallocHeapMaxSize + * + * Description: + * + * When set to a non-zero value, this option specifies the maximum size of the + * heap memory space reserved for kmalloc operations. Given as a + * multiple of 1 MB created on VMware ESXi to satisfy any system memory + * allocations requested by the NVIDIA kernel module. + */ + +#define __NV_KMALLOC_HEAP_MAX_SIZE KMallocHeapMaxSize +#define NV_KMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_KMALLOC_HEAP_MAX_SIZE) + +/* + * Option: VMallocHeapMaxSize + * + * Description: + * + * When set to a non-zero value, this option specifies the maximum size of the + * heap memory space reserved for vmalloc operations. Given as a + * multiple of 1 MB created on VMware ESXi to satisfy any system memory + * allocations requested by the NVIDIA kernel module. + */ + +#define __NV_VMALLOC_HEAP_MAX_SIZE VMallocHeapMaxSize +#define NV_VMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_VMALLOC_HEAP_MAX_SIZE) + +/* + * Option: IgnoreMMIOCheck + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will ignore + * MMIO limit check during device probe on VMWare ESXi kernel. This is + * typically necessary when VMware ESXi MMIO limit differs between any + * base version and its updates. Customer using updates can set regkey + * to avoid probe failure. + */ + +#define __NV_IGNORE_MMIO_CHECK IgnoreMMIOCheck +#define NV_REG_IGNORE_MMIO_CHECK NV_REG_STRING(__NV_IGNORE_MMIO_CHECK) + +/* + * Option: TCEBypassMode + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will attempt to setup + * all GPUs in "TCE bypass mode", in which DMA mappings of system memory bypass + * the IOMMU/TCE remapping hardware on IBM POWER systems. This is typically + * necessary for CUDA applications in which large system memory mappings may + * exceed the default TCE remapping capacity when operated in non-bypass mode. + * + * This option has no effect on non-POWER platforms. + * + * Possible Values: + * + * 0: system default TCE mode on all GPUs + * 1: enable TCE bypass mode on all GPUs + * 2: disable TCE bypass mode on all GPUs + */ +#define __NV_TCE_BYPASS_MODE TCEBypassMode +#define NV_REG_TCE_BYPASS_MODE NV_REG_STRING(__NV_TCE_BYPASS_MODE) + +#define NV_TCE_BYPASS_MODE_DEFAULT 0 +#define NV_TCE_BYPASS_MODE_ENABLE 1 +#define NV_TCE_BYPASS_MODE_DISABLE 2 + +/* + * Option: pci + * + * Description: + * + * On Unix platforms, per GPU based registry key can be specified as: + * NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F,". + * where DDDD:BB:DD.F refers to Domain:Bus:Device.Function. + * We need this key "pci" to identify what follows next is a PCI BDF identifier, + * for which the registry keys are to be applied. + * + * This define is not used on non-UNIX platforms. + * + * Possible Formats for value: + * + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI BDF identifier string. + */ +#define __NV_PCI_DEVICE_BDF pci +#define NV_REG_PCI_DEVICE_BDF NV_REG_STRING(__NV_PCI_DEVICE_BDF) + +/* + * Option: EnableStreamMemOPs + * + * Description: + * + * When this option is enabled, the CUDA driver will enable support for + * CUDA Stream Memory Operations in user-mode applications, which are so + * far required to be disabled by default due to limited support in + * devtools. + * + * Note: this is treated as a hint. MemOPs may still be left disabled by CUDA + * driver for other reasons. + * + * Possible Values: + * + * 0 = disable feature (default) + * 1 = enable feature + */ +#define __NV_ENABLE_STREAM_MEMOPS EnableStreamMemOPs +#define NV_REG_ENABLE_STREAM_MEMOPS NV_REG_STRING(__NV_ENABLE_STREAM_MEMOPS) + +/* + * Option: EnableUserNUMAManagement + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will require the + * user-mode NVIDIA Persistence daemon to manage the onlining and offlining + * of its NUMA device memory. + * + * This option has no effect on platforms that do not support onlining + * device memory to a NUMA node (this feature is only supported on certain + * POWER9 systems). + * + * Possible Values: + * + * 0: disable user-mode NUMA management + * 1: enable user-mode NUMA management (default) + */ +#define __NV_ENABLE_USER_NUMA_MANAGEMENT EnableUserNUMAManagement +#define NV_REG_ENABLE_USER_NUMA_MANAGEMENT NV_REG_STRING(__NV_ENABLE_USER_NUMA_MANAGEMENT) + +/* + * Option: GpuBlacklist + * + * Description: + * + * This option accepts a list of blacklisted GPUs, separated by commas, that + * cannot be attached or used. Each blacklisted GPU is identified by a UUID in + * the ASCII format with leading "GPU-". An exact match is required; no partial + * UUIDs. This regkey is deprecated and will be removed in the future. Use + * NV_REG_EXCLUDED_GPUS instead. + */ +#define __NV_GPU_BLACKLIST GpuBlacklist +#define NV_REG_GPU_BLACKLIST NV_REG_STRING(__NV_GPU_BLACKLIST) + +/* + * Option: ExcludedGpus + * + * Description: + * + * This option accepts a list of excluded GPUs, separated by commas, that + * cannot be attached or used. Each excluded GPU is identified by a UUID in + * the ASCII format with leading "GPU-". An exact match is required; no partial + * UUIDs. + */ +#define __NV_EXCLUDED_GPUS ExcludedGpus +#define NV_REG_EXCLUDED_GPUS NV_REG_STRING(__NV_EXCLUDED_GPUS) + +/* + * Option: NvLinkDisable + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will not attempt to + * initialize or train NVLink connections for any GPUs. System reboot is required + * for changes to take affect. + * + * This option has no effect if no GPUs support NVLink. + * + * Possible Values: + * + * 0: Do not disable NVLink (default) + * 1: Disable NVLink + */ +#define __NV_NVLINK_DISABLE NvLinkDisable +#define NV_REG_NVLINK_DISABLE NV_REG_STRING(__NV_NVLINK_DISABLE) + +/* + * Option: RestrictProfilingToAdminUsers + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will prevent users + * without administrative access (i.e., the CAP_SYS_ADMIN capability) from + * using GPU performance counters. + * + * Possible Values: + * + * 0: Do not restrict GPU counters (default) + * 1: Restrict GPU counters to system administrators only + */ + +#define __NV_RM_PROFILING_ADMIN_ONLY RmProfilingAdminOnly +#define __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER RestrictProfilingToAdminUsers +#define NV_REG_RM_PROFILING_ADMIN_ONLY NV_REG_STRING(__NV_RM_PROFILING_ADMIN_ONLY) + +/* + * Option: TemporaryFilePath + * + * Description: + * + * When specified, this option changes the location in which the + * NVIDIA kernel module will create unnamed temporary files (e.g. to + * save the contents of video memory in). The indicated file must + * be a directory. By default, temporary files are created in /tmp. + */ +#define __NV_TEMPORARY_FILE_PATH TemporaryFilePath +#define NV_REG_TEMPORARY_FILE_PATH NV_REG_STRING(__NV_TEMPORARY_FILE_PATH) + +/* + * Option: PreserveVideoMemoryAllocations + * + * If enabled, this option prompts the NVIDIA kernel module to save and + * restore all video memory allocations across system power management + * cycles, i.e. suspend/resume and hibernate/restore. Otherwise, + * only select allocations are preserved. + * + * Possible Values: + * + * 0: Preserve only select video memory allocations (default) + * 1: Preserve all video memory allocations + */ +#define __NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS PreserveVideoMemoryAllocations +#define NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS \ + NV_REG_STRING(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS) + + +/* + * Option: EnableS0ixPowerManagement + * + * When this option is enabled, the NVIDIA driver will use S0ix-based + * power management for system suspend/resume, if both the platform and + * the GPU support S0ix. + * + * During system suspend, if S0ix is enabled and + * video memory usage is above the threshold configured by + * 'S0ixPowerManagementVideoMemoryThreshold', video memory will be kept + * in self-refresh mode while the rest of the GPU is powered down. + * + * Otherwise, the driver will copy video memory contents to system memory + * and power off the video memory along with the GPU. + * + * Possible Values: + * + * 0: Disable S0ix based power management (default) + * 1: Enable S0ix based power management + */ + +#define __NV_ENABLE_S0IX_POWER_MANAGEMENT EnableS0ixPowerManagement +#define NV_REG_ENABLE_S0IX_POWER_MANAGEMENT \ + NV_REG_STRING(__NV_ENABLE_S0IX_POWER_MANAGEMENT) + +/* + * Option: S0ixPowerManagementVideoMemoryThreshold + * + * This option controls the threshold that the NVIDIA driver will use during + * S0ix-based system power management. + * + * When S0ix is enabled and the system is suspended, the driver will + * compare the amount of video memory in use with this threshold, + * to decide whether to keep video memory in self-refresh or copy video + * memory content to system memory. + * + * See the 'EnableS0ixPowerManagement' option. + * + * Values are expressed in Megabytes (1048576 bytes). + * + * Default value for this option is 256MB. + * + */ +#define __NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + S0ixPowerManagementVideoMemoryThreshold +#define NV_REG_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + NV_REG_STRING(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD) + + +/* + * Option: DynamicPowerManagement + * + * This option controls how aggressively the NVIDIA kernel module will manage + * GPU power through kernel interfaces. + * + * Possible Values: + * + * 0: Never allow the GPU to be powered down (default). + * 1: Power down the GPU when it is not initialized. + * 2: Power down the GPU after it has been inactive for some time. + * 3: (Default) Power down the GPU after a period of inactivity (i.e., + * mode 2) on Ampere or later notebooks. Otherwise, do not power down + * the GPU. + */ +#define __NV_DYNAMIC_POWER_MANAGEMENT DynamicPowerManagement +#define NV_REG_DYNAMIC_POWER_MANAGEMENT \ + NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT) + +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_NEVER 0 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_COARSE 1 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_FINE 2 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_DEFAULT 3 + +/* + * Option: DynamicPowerManagementVideoMemoryThreshold + * + * This option controls the threshold that the NVIDIA driver will use + * when selecting the dynamic power management scheme. + * + * When the driver detects that the GPU is idle, it will compare the amount + * of video memory in use with this threshold. + * + * If the current video memory usage is less than the threshold, the + * driver may preserve video memory contents in system memory and power off + * the video memory along with the GPU itself, if supported. Otherwise, + * the video memory will be kept in self-refresh mode while powering down + * the rest of the GPU, if supported. + * + * Values are expressed in Megabytes (1048576 bytes). + * + * If the requested value is greater than 200MB (the default), then it + * will be capped to 200MB. + */ +#define __NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + DynamicPowerManagementVideoMemoryThreshold +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD) + +/* + * Option: RegisterPCIDriver + * + * Description: + * + * When this option is enabled, the NVIDIA driver will register with + * PCI subsystem. + * + * Possible values: + * + * 1 - register as PCI driver (default) + * 0 - do not register as PCI driver + */ + +#define __NV_REGISTER_PCI_DRIVER RegisterPCIDriver +#define NV_REG_REGISTER_PCI_DRIVER NV_REG_STRING(__NV_REGISTER_PCI_DRIVER) + +/* + * Option: EnablePCIERelaxedOrderingMode + * + * Description: + * + * When this option is enabled, the registry key RmSetPCIERelaxedOrdering will + * be set to NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE, causing + * every device to set the relaxed ordering bit to 1 in all outbound MWr + * transaction-layer packets. This is equivalent to setting the regkey to + * FORCE_ENABLE as a non-per-device registry key. + * + * Possible values: + * 0 - Do not enable PCIe TLP relaxed ordering bit-setting (default) + * 1 - Enable PCIe TLP relaxed ordering bit-setting + */ +#define __NV_ENABLE_PCIE_RELAXED_ORDERING_MODE EnablePCIERelaxedOrderingMode +#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \ + NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE) + +/* + * Option: EnableGpuFirmware + * + * Description: + * + * When this option is enabled, the NVIDIA driver will enable use of GPU + * firmware. + * + * Possible mode values: + * 0 - Do not enable GPU firmware + * 1 - Enable GPU firmware + * 2 - (Default) Use the default enablement policy for GPU firmware + * + * Setting this to anything other than 2 will alter driver firmware- + * enablement policies, possibly disabling GPU firmware where it would + * have otherwise been enabled by default. + * + * If this key is set globally to the system, the driver may still attempt + * to apply some policies to maintain uniform firmware modes across all + * GPUS. This may result in the driver failing initialization on some GPUs + * to maintain such a policy. + * + * If this key is set using NVreg_RegistryDwordsPerDevice, then the driver + * will attempt to honor whatever configuration is specified without applying + * additional policies. This may also result in failed GPU initialzations if + * the configuration is not possible (for example if the firmware is missing + * from the filesystem, or the GPU is not capable). + * + * Policy bits: + * + * POLICY_ALLOW_FALLBACK: + * As the normal behavior is to fail GPU initialization if this registry + * entry is set in such a way that results in an invalid configuration, if + * instead the user would like the driver to automatically try to fallback + * to initializing the failing GPU with firmware disabled, then this bit can + * be set (ex: 0x11 means try to enable GPU firmware but fall back if needed). + * Note that this can result in a mixed mode configuration (ex: GPU0 has + * firmware enabled, but GPU1 does not). + * + */ + +#define __NV_ENABLE_GPU_FIRMWARE EnableGpuFirmware +#define NV_REG_ENABLE_GPU_FIRMWARE NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE) + +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000 +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001 +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002 + +#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0 +#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010 + +#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012 +#define NV_REG_ENABLE_GPU_FIRMWARE_INVALID_VALUE 0xFFFFFFFF + +/* + * Option: EnableGpuFirmwareLogs + * + * When this option is enabled, the NVIDIA driver will send GPU firmware logs + * to the system log, when possible. + * + * Possible values: + * 0 - Do not send GPU firmware logs to the system log + * 1 - Enable sending of GPU firmware logs to the system log + * 2 - (Default) Enable sending of GPU firmware logs to the system log for + * the debug kernel driver build only + */ +#define __NV_ENABLE_GPU_FIRMWARE_LOGS EnableGpuFirmwareLogs +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE_LOGS) + +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000 +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001 +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002 + +/* + * Option: EnableDbgBreakpoint + * + * When this option is set to a non-zero value, and the kernel is configured + * appropriately, assertions within resman will trigger a CPU breakpoint (e.g., + * INT3 on x86_64), assumed to be caught by an attached debugger. + * + * When this option is set to the value zero (the default), assertions within + * resman will print to the system log, but no CPU breakpoint will be triggered. + */ +#define __NV_ENABLE_DBG_BREAKPOINT EnableDbgBreakpoint + + +/* + * Option: OpenRmEnableUnsupportedGpus + * + * Open nvidia.ko support for features beyond what is used on Data Center GPUs + * is still fairly immature, so for now require users to opt into use of open + * nvidia.ko with a special registry key, if not on a Data Center GPU. + */ + +#define __NV_OPENRM_ENABLE_UNSUPPORTED_GPUS OpenRmEnableUnsupportedGpus +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS NV_REG_STRING(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS) +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE 0x00000000 +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_ENABLE 0x00000001 +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE + +/* + * Option: NVreg_DmaRemapPeerMmio + * + * Description: + * + * When this option is enabled, the NVIDIA driver will use device driver + * APIs provided by the Linux kernel for DMA-remapping part of a device's + * MMIO region to another device, creating e.g., IOMMU mappings as necessary. + * When this option is disabled, the NVIDIA driver will instead only apply a + * fixed offset, which may be zero, to CPU physical addresses to produce the + * DMA address for the peer's MMIO region, and no IOMMU mappings will be + * created. + * + * This option only affects peer MMIO DMA mappings, and not system memory + * mappings. + * + * Possible Values: + * 0 = disable dynamic DMA remapping of peer MMIO regions + * 1 = enable dynamic DMA remapping of peer MMIO regions (default) + */ +#define __NV_DMA_REMAP_PEER_MMIO DmaRemapPeerMmio +#define NV_DMA_REMAP_PEER_MMIO NV_REG_STRING(__NV_DMA_REMAP_PEER_MMIO) +#define NV_DMA_REMAP_PEER_MMIO_DISABLE 0x00000000 +#define NV_DMA_REMAP_PEER_MMIO_ENABLE 0x00000001 + +#if defined(NV_DEFINE_REGISTRY_KEY_TABLE) + +/* + *---------registry key parameter declarations-------------- + */ + +NV_DEFINE_REG_ENTRY(__NV_RESMAN_DEBUG_LEVEL, ~0); +NV_DEFINE_REG_ENTRY(__NV_RM_LOGON_RC, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MODIFY_DEVICE_FILES, 1); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_UID, 0); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_GID, 0); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_MODE, 0666); +NV_DEFINE_REG_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, 1); +NV_DEFINE_REG_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE, ~0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_PCIE_GEN3, 0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_MSI, 1); +NV_DEFINE_REG_ENTRY(__NV_TCE_BYPASS_MODE, NV_TCE_BYPASS_MODE_DEFAULT); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_STREAM_MEMOPS, 0); +NV_DEFINE_REG_ENTRY(__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER, 1); +NV_DEFINE_REG_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, 0); + +NV_DEFINE_REG_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT, 0); +NV_DEFINE_REG_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 256); + +NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3); +NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS, NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG); +NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT); + +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_USER_NUMA_MANAGEMENT, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MEMORY_POOL_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_KMALLOC_HEAP_MAX_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_VMALLOC_HEAP_MAX_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0); + +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 0); + + + +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0); + +NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_RM_MSG, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL); +NV_DEFINE_REG_ENTRY(__NV_DMA_REMAP_PEER_MMIO, NV_DMA_REMAP_PEER_MMIO_ENABLE); + +/* + *----------------registry database definition---------------------- + */ + +/* + * You can enable any of the registry options disabled by default by + * editing their respective entries in the table below. The last field + * determines if the option is considered valid - in order for the + * changes to take effect, you need to recompile and reload the NVIDIA + * kernel module. + */ +nv_parm_t nv_parms[] = { + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RESMAN_DEBUG_LEVEL), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_LOGON_RC), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MODIFY_DEVICE_FILES), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_UID), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_GID), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_MSI), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_GEN3), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MEMORY_POOL_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_KMALLOC_HEAP_MAX_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_VMALLOC_HEAP_MAX_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IGNORE_MMIO_CHECK), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_TCE_BYPASS_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_STREAM_MEMOPS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_USER_NUMA_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_NVLINK_DISABLE), + NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(__NV_RM_PROFILING_ADMIN_ONLY, + __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS), + + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD), + + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DMA_REMAP_PEER_MMIO), + {NULL, NULL} +}; + +#elif defined(NVRM) + +extern nv_parm_t nv_parms[]; + +#endif /* NV_DEFINE_REGISTRY_KEY_TABLE */ + +#endif /* _RM_REG_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.c new file mode 100644 index 0000000..eec5af3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.c @@ -0,0 +1,89 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ +#include "nv-linux.h" +#include "os-interface.h" + +#include "nv-report-err.h" + +nv_report_error_cb_t nv_error_cb_handle = NULL; + +int nv_register_error_cb(nv_report_error_cb_t report_error_cb) +{ + if (report_error_cb == NULL) + return -EINVAL; + + if (nv_error_cb_handle != NULL) + return -EBUSY; + + nv_error_cb_handle = report_error_cb; + return 0; +} + +EXPORT_SYMBOL(nv_register_error_cb); + +int nv_unregister_error_cb(void) +{ + if (nv_error_cb_handle == NULL) + return -EPERM; + + nv_error_cb_handle = NULL; + return 0; +} + +EXPORT_SYMBOL(nv_unregister_error_cb); + +struct pci_dev; + +void nv_report_error( + struct pci_dev *dev, + NvU32 error_number, + const char *format, + va_list ap +) +{ + va_list ap_copy; + char *buffer; + int length = 0; + int status = NV_OK; + + if (nv_error_cb_handle != NULL) + { + va_copy(ap_copy, ap); + length = vsnprintf(NULL, 0, format, ap); + va_end(ap_copy); + + if (length > 0) + { + status = os_alloc_mem((void *)&buffer, (length + 1)*sizeof(char)); + + if (status == NV_OK) + { + vsnprintf(buffer, length, format, ap); + nv_error_cb_handle(dev, error_number, buffer, length + 1); + os_free_mem(buffer); + } + } + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.h new file mode 100644 index 0000000..d488709 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.h @@ -0,0 +1,66 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_REPORT_ERR_H_ +#define _NV_REPORT_ERR_H_ + +/* + * @brief + * Callback definition for obtaining XID error string and data. + * + * @param[in] pci_dev * + * Structure describring GPU PCI device. + * @param[in] uint32_t + * XID number + * @param[in] char * + * Error string with HWERR info. + * @param[in] int + * Length of error string. + */ +typedef void (*nv_report_error_cb_t)(struct pci_dev *, uint32_t, char *, int); + +/* + * @brief + * Register callback function to obtain XID error string and data. + * + * @param[in] report_error_cb + * A function pointer to recieve callback. + * + * @return + * 0 upon successful completion. + * -EINVAL callback handle is NULL. + * -EBUSY callback handle is already registered. + */ +int nv_register_error_cb(nv_report_error_cb_t report_error_cb); + +/* + * @brief + * Unregisters callback function handle. + * + * @return + * 0 upon successful completion. + * -EPERM unregister not permitted on NULL callback handle. + */ +int nv_unregister_error_cb(void); + +#endif /* _NV_REPORT_ERR_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.c new file mode 100644 index 0000000..57860ba --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.c @@ -0,0 +1,201 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-linux.h" +#include "nv-rsync.h" + +nv_rsync_info_t g_rsync_info; + +void nv_init_rsync_info( + void +) +{ + g_rsync_info.relaxed_ordering_mode = NV_FALSE; + g_rsync_info.usage_count = 0; + g_rsync_info.data = NULL; + NV_INIT_MUTEX(&g_rsync_info.lock); +} + +void nv_destroy_rsync_info( + void +) +{ + WARN_ON(g_rsync_info.data); + WARN_ON(g_rsync_info.usage_count); + WARN_ON(g_rsync_info.relaxed_ordering_mode); +} + +int nv_get_rsync_info( + void +) +{ + int mode; + int rc = 0; + + down(&g_rsync_info.lock); + + if (g_rsync_info.usage_count == 0) + { + if (g_rsync_info.get_relaxed_ordering_mode) + { + rc = g_rsync_info.get_relaxed_ordering_mode(&mode, + g_rsync_info.data); + if (rc != 0) + { + goto done; + } + + g_rsync_info.relaxed_ordering_mode = !!mode; + } + } + + g_rsync_info.usage_count++; + +done: + up(&g_rsync_info.lock); + + return rc; +} + +void nv_put_rsync_info( + void +) +{ + int mode; + + down(&g_rsync_info.lock); + + g_rsync_info.usage_count--; + + if (g_rsync_info.usage_count == 0) + { + if (g_rsync_info.put_relaxed_ordering_mode) + { + mode = g_rsync_info.relaxed_ordering_mode; + g_rsync_info.put_relaxed_ordering_mode(mode, g_rsync_info.data); + g_rsync_info.relaxed_ordering_mode = NV_FALSE; + } + } + + up(&g_rsync_info.lock); +} + +int nv_register_rsync_driver( + int (*get_relaxed_ordering_mode)(int *mode, void *data), + void (*put_relaxed_ordering_mode)(int mode, void *data), + void (*wait_for_rsync)(struct pci_dev *gpu, void *data), + void *data +) +{ + int rc = 0; + + down(&g_rsync_info.lock); + + if (g_rsync_info.get_relaxed_ordering_mode != NULL) + { + rc = -EBUSY; + goto done; + } + + if (g_rsync_info.usage_count != 0) + { + rc = -EBUSY; + goto done; + } + + g_rsync_info.get_relaxed_ordering_mode = get_relaxed_ordering_mode; + g_rsync_info.put_relaxed_ordering_mode = put_relaxed_ordering_mode; + g_rsync_info.wait_for_rsync = wait_for_rsync; + g_rsync_info.data = data; + +done: + up(&g_rsync_info.lock); + + return rc; +} + +void nv_unregister_rsync_driver( + int (*get_relaxed_ordering_mode)(int *mode, void *data), + void (*put_relaxed_ordering_mode)(int mode, void *data), + void (*wait_for_rsync)(struct pci_dev *gpu, void *data), + void *data +) +{ + down(&g_rsync_info.lock); + + WARN_ON(g_rsync_info.usage_count != 0); + + WARN_ON(g_rsync_info.get_relaxed_ordering_mode != + get_relaxed_ordering_mode); + WARN_ON(g_rsync_info.put_relaxed_ordering_mode != + put_relaxed_ordering_mode); + WARN_ON(g_rsync_info.wait_for_rsync != wait_for_rsync); + WARN_ON(g_rsync_info.data != data); + + g_rsync_info.get_relaxed_ordering_mode = NULL; + g_rsync_info.put_relaxed_ordering_mode = NULL; + g_rsync_info.wait_for_rsync = NULL; + g_rsync_info.data = NULL; + + up(&g_rsync_info.lock); +} + +NvBool nv_get_rsync_relaxed_ordering_mode( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + /* shouldn't be called without opening a device */ + WARN_ON(NV_ATOMIC_READ(nvl->usage_count) == 0); + + /* + * g_rsync_info.relaxed_ordering_mode can be safely accessed outside of + * g_rsync_info.lock once a device is opened. During nvidia_open(), we + * lock the relaxed ordering state by ref-counting the rsync module + * through get_relaxed_ordering_mode. + */ + return g_rsync_info.relaxed_ordering_mode; +} + +void nv_wait_for_rsync( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + /* shouldn't be called without opening a device */ + WARN_ON(NV_ATOMIC_READ(nvl->usage_count) == 0); + + /* + * g_rsync_info.relaxed_ordering_mode can be safely accessed outside of + * g_rsync_info.lock once a device is opened. During nvidia_open(), we + * block unregistration of the rsync driver by ref-counting the module + * through get_relaxed_ordering_mode. + */ + if (g_rsync_info.relaxed_ordering_mode) + { + WARN_ON(g_rsync_info.wait_for_rsync == NULL); + g_rsync_info.wait_for_rsync(nvl->pci_dev, g_rsync_info.data); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.h new file mode 100644 index 0000000..6e262e6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.h @@ -0,0 +1,57 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_RSYNC_H_ +#define _NV_RSYNC_H_ + +#include "nv-linux.h" + +typedef struct nv_rsync_info +{ + struct semaphore lock; + uint32_t usage_count; + NvBool relaxed_ordering_mode; + int (*get_relaxed_ordering_mode)(int *mode, void *data); + void (*put_relaxed_ordering_mode)(int mode, void *data); + void (*wait_for_rsync)(struct pci_dev *gpu, void *data); + void *data; +} nv_rsync_info_t; + +void nv_init_rsync_info(void); +void nv_destroy_rsync_info(void); +int nv_get_rsync_info(void); +void nv_put_rsync_info(void); +int nv_register_rsync_driver( + int (*get_relaxed_ordering_mode)(int *mode, void *data), + void (*put_relaxed_ordering_mode)(int mode, void *data), + void (*wait_for_rsync)(struct pci_dev *gpu, void *data), + void *data); +void nv_unregister_rsync_driver( + int (*get_relaxed_ordering_mode)(int *mode, void *data), + void (*put_relaxed_ordering_mode)(int mode, void *data), + void (*wait_for_rsync)(struct pci_dev *gpu, void *data), + void *data); +NvBool nv_get_rsync_relaxed_ordering_mode(nv_state_t *nv); +void nv_wait_for_rsync(nv_state_t *nv); + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-usermap.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-usermap.c new file mode 100644 index 0000000..47361f7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-usermap.c @@ -0,0 +1,161 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-frontend.h" + +NV_STATUS NV_API_CALL nv_add_mapping_context_to_file( + nv_state_t *nv, + nv_usermap_access_params_t *nvuap, + NvU32 prot, + void *pAllocPriv, + NvU64 pageIndex, + NvU32 fd +) +{ + NV_STATUS status = NV_OK; + nv_alloc_mapping_context_t *nvamc = NULL; + nv_file_private_t *nvfp = NULL; + nv_linux_file_private_t *nvlfp = NULL; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + void *priv = NULL; + + nvfp = nv_get_file_private(fd, NV_IS_CTL_DEVICE(nv), &priv); + if (nvfp == NULL) + return NV_ERR_INVALID_ARGUMENT; + + nvlfp = nv_get_nvlfp_from_nvfp(nvfp); + + nvamc = &nvlfp->mmap_context; + + if (nvamc->valid) + { + status = NV_ERR_STATE_IN_USE; + goto done; + } + + if (NV_IS_CTL_DEVICE(nv)) + { + nvamc->alloc = pAllocPriv; + nvamc->page_index = pageIndex; + } + else + { + if (NV_STATE_PTR(nvlfp->nvptr) != nv) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + nvamc->mmap_start = nvuap->mmap_start; + nvamc->mmap_size = nvuap->mmap_size; + if (nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE) + { + nvamc->page_array = nvuap->page_array; + nvamc->num_pages = nvuap->num_pages; + } + nvamc->access_start = nvuap->access_start; + nvamc->access_size = nvuap->access_size; + nvamc->remap_prot_extra = nvuap->remap_prot_extra; + } + + nvamc->prot = prot; + nvamc->valid = NV_TRUE; + nvamc->caching = nvuap->caching; + +done: + nv_put_file_private(priv); + + return status; +} + +NV_STATUS NV_API_CALL nv_alloc_user_mapping( + nv_state_t *nv, + void *pAllocPrivate, + NvU64 pageIndex, + NvU32 pageOffset, + NvU64 size, + NvU32 protect, + NvU64 *pUserAddress, + void **ppPrivate +) +{ + nv_alloc_t *at = pAllocPrivate; + + if (at->flags.contig) + *pUserAddress = (at->page_table[0]->phys_addr + (pageIndex * PAGE_SIZE) + pageOffset); + else + *pUserAddress = (at->page_table[pageIndex]->phys_addr + pageOffset); + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_free_user_mapping( + nv_state_t *nv, + void *pAllocPrivate, + NvU64 userAddress, + void *pPrivate +) +{ + return NV_OK; +} + +/* + * This function adjust the {mmap,access}_{start,size} to reflect platform-specific + * mechanisms for isolating mappings at a finer granularity than the os_page_size + */ +NV_STATUS NV_API_CALL nv_get_usermap_access_params( + nv_state_t *nv, + nv_usermap_access_params_t *nvuap +) +{ + NvU64 addr = nvuap->addr; + NvU64 size = nvuap->size; + + nvuap->remap_prot_extra = 0; + + /* + * Do verification and cache encoding based on the original + * (ostensibly smaller) mmap request, since accesses should be + * restricted to that range. + */ + if (rm_gpu_need_4k_page_isolation(nv) && + NV_4K_PAGE_ISOLATION_REQUIRED(addr, size)) + { +#if defined(NV_4K_PAGE_ISOLATION_PRESENT) + nvuap->remap_prot_extra = NV_PROT_4K_PAGE_ISOLATION; + nvuap->access_start = (NvU64)NV_4K_PAGE_ISOLATION_ACCESS_START(addr); + nvuap->access_size = NV_4K_PAGE_ISOLATION_ACCESS_LEN(addr, size); + nvuap->mmap_start = (NvU64)NV_4K_PAGE_ISOLATION_MMAP_ADDR(addr); + nvuap->mmap_size = NV_4K_PAGE_ISOLATION_MMAP_LEN(size); +#else + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "4K page isolation required but not available!\n"); + return NV_ERR_OPERATING_SYSTEM; +#endif + } + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vm.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vm.c new file mode 100644 index 0000000..fed769e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vm.c @@ -0,0 +1,736 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os-interface.h" +#include "nv.h" +#include "nv-linux.h" + +static inline void nv_set_contig_memory_uc(nvidia_pte_t *page_ptr, NvU32 num_pages) +{ +#if defined(NV_SET_MEMORY_UC_PRESENT) + struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); + unsigned long addr = (unsigned long)page_address(page); + set_memory_uc(addr, num_pages); +#elif defined(NV_SET_PAGES_UC_PRESENT) + struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); + set_pages_uc(page, num_pages); +#endif +} + +static inline void nv_set_contig_memory_wb(nvidia_pte_t *page_ptr, NvU32 num_pages) +{ +#if defined(NV_SET_MEMORY_UC_PRESENT) + struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); + unsigned long addr = (unsigned long)page_address(page); + set_memory_wb(addr, num_pages); +#elif defined(NV_SET_PAGES_UC_PRESENT) + struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); + set_pages_wb(page, num_pages); +#endif +} + +static inline int nv_set_memory_array_type_present(NvU32 type) +{ + switch (type) + { +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + case NV_MEMORY_UNCACHED: + return 1; + case NV_MEMORY_WRITEBACK: + return 1; +#endif + default: + return 0; + } +} + +static inline int nv_set_pages_array_type_present(NvU32 type) +{ + switch (type) + { +#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + case NV_MEMORY_UNCACHED: + return 1; + case NV_MEMORY_WRITEBACK: + return 1; +#endif + default: + return 0; + } +} + +static inline void nv_set_memory_array_type( + unsigned long *pages, + NvU32 num_pages, + NvU32 type +) +{ + switch (type) + { +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + case NV_MEMORY_UNCACHED: + set_memory_array_uc(pages, num_pages); + break; + case NV_MEMORY_WRITEBACK: + set_memory_array_wb(pages, num_pages); + break; +#endif + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): type %d unimplemented\n", + __FUNCTION__, type); + break; + } +} + +static inline void nv_set_pages_array_type( + struct page **pages, + NvU32 num_pages, + NvU32 type +) +{ + switch (type) + { +#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + case NV_MEMORY_UNCACHED: + set_pages_array_uc(pages, num_pages); + break; + case NV_MEMORY_WRITEBACK: + set_pages_array_wb(pages, num_pages); + break; +#endif + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): type %d unimplemented\n", + __FUNCTION__, type); + break; + } +} + +static inline void nv_set_contig_memory_type( + nvidia_pte_t *page_ptr, + NvU32 num_pages, + NvU32 type +) +{ + switch (type) + { + case NV_MEMORY_UNCACHED: + nv_set_contig_memory_uc(page_ptr, num_pages); + break; + case NV_MEMORY_WRITEBACK: + nv_set_contig_memory_wb(page_ptr, num_pages); + break; + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): type %d unimplemented\n", + __FUNCTION__, type); + } +} + +static inline void nv_set_memory_type(nv_alloc_t *at, NvU32 type) +{ + NvU32 i; + NV_STATUS status = NV_OK; +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + unsigned long *pages = NULL; +#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + struct page **pages = NULL; +#else + unsigned long *pages = NULL; +#endif + + nvidia_pte_t *page_ptr; + struct page *page; + + if (nv_set_memory_array_type_present(type)) + { + status = os_alloc_mem((void **)&pages, + at->num_pages * sizeof(unsigned long)); + + } + else if (nv_set_pages_array_type_present(type)) + { + status = os_alloc_mem((void **)&pages, + at->num_pages * sizeof(struct page*)); + } + + if (status != NV_OK) + pages = NULL; + + // + // If the set_{memory,page}_array_* functions are in the kernel interface, + // it's faster to use them since they work on non-contiguous memory, + // whereas the set_{memory,page}_* functions do not. + // + if (pages) + { + for (i = 0; i < at->num_pages; i++) + { + page_ptr = at->page_table[i]; + page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + pages[i] = (unsigned long)page_address(page); +#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + pages[i] = page; +#endif + } +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + nv_set_memory_array_type(pages, at->num_pages, type); +#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + nv_set_pages_array_type(pages, at->num_pages, type); +#endif + os_free_mem(pages); + } + + // + // If the set_{memory,page}_array_* functions aren't present in the kernel + // interface, each page has to be set individually, which has been measured + // to be ~10x slower than using the set_{memory,page}_array_* functions. + // + else + { + for (i = 0; i < at->num_pages; i++) + nv_set_contig_memory_type(at->page_table[i], 1, type); + } +} + +static NvU64 nv_get_max_sysmem_address(void) +{ + NvU64 global_max_pfn = 0ULL; + int node_id; + + for_each_online_node(node_id) + { + global_max_pfn = max(global_max_pfn, (NvU64)node_end_pfn(node_id)); + } + + return ((global_max_pfn + 1) << PAGE_SHIFT) - 1; +} + +static unsigned int nv_compute_gfp_mask( + nv_state_t *nv, + nv_alloc_t *at +) +{ + unsigned int gfp_mask = NV_GFP_KERNEL; + struct device *dev = at->dev; + + /* + * If we know that SWIOTLB is enabled (and therefore we avoid calling the + * kernel to DMA-remap the pages), or if we are using dma_direct (which may + * transparently use the SWIOTLB for pages that are unaddressable by the + * device, in kernel versions 5.0 and later), limit our allocation pool + * to the first 4GB to avoid allocating pages outside of our device's + * addressable limit. + * Also, limit the allocation to the first 4GB if explicitly requested by + * setting the "nv->force_dma32_alloc" variable. + */ + if (!nv || !nv_requires_dma_remap(nv) || nv_is_dma_direct(dev) || nv->force_dma32_alloc) + { + NvU64 max_sysmem_address = nv_get_max_sysmem_address(); + if ((dev && dev->dma_mask && (*(dev->dma_mask) < max_sysmem_address)) || + (nv && nv->force_dma32_alloc)) + { + gfp_mask = NV_GFP_DMA32; + } + } +#if defined(__GFP_RETRY_MAYFAIL) + gfp_mask |= __GFP_RETRY_MAYFAIL; +#elif defined(__GFP_NORETRY) + gfp_mask |= __GFP_NORETRY; +#endif +#if defined(__GFP_ZERO) + if (at->flags.zeroed) + gfp_mask |= __GFP_ZERO; +#endif +#if defined(__GFP_THISNODE) + if (at->flags.node0) + gfp_mask |= __GFP_THISNODE; +#endif + // Compound pages are required by vm_insert_page for high-order page + // allocations + if (at->order > 0) + gfp_mask |= __GFP_COMP; + + return gfp_mask; +} + +/* + * This function is needed for allocating contiguous physical memory in xen + * dom0. Because of the use of xen sw iotlb in xen dom0, memory allocated by + * NV_GET_FREE_PAGES may not be machine contiguous when size is more than + * 1 page. nv_alloc_coherent_pages() will give us machine contiguous memory. + * Even though we get dma_address directly in this function, we will + * still call pci_map_page() later to get dma address. This is fine as it + * will return the same machine address. + */ +static NV_STATUS nv_alloc_coherent_pages( + nv_state_t *nv, + nv_alloc_t *at +) +{ + nvidia_pte_t *page_ptr; + NvU32 i; + unsigned int gfp_mask; + unsigned long virt_addr = 0; + dma_addr_t bus_addr; + nv_linux_state_t *nvl; + struct device *dev; + + if (!nv) + { + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: coherent page alloc on nvidiactl not supported\n", __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; + } + + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + dev = nvl->dev; + + gfp_mask = nv_compute_gfp_mask(nv, at); + + virt_addr = (unsigned long)dma_alloc_coherent(dev, + at->num_pages * PAGE_SIZE, + &bus_addr, + gfp_mask); + if (!virt_addr) + { + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__); + return NV_ERR_NO_MEMORY; + } + + for (i = 0; i < at->num_pages; i++) + { + page_ptr = at->page_table[i]; + + page_ptr->virt_addr = virt_addr + i * PAGE_SIZE; + page_ptr->phys_addr = virt_to_phys((void *)page_ptr->virt_addr); + page_ptr->dma_addr = bus_addr + i * PAGE_SIZE; + } + + if (at->cache_type != NV_MEMORY_CACHED) + { + nv_set_contig_memory_type(at->page_table[0], + at->num_pages, + NV_MEMORY_UNCACHED); + } + + at->flags.coherent = NV_TRUE; + return NV_OK; +} + +static void nv_free_coherent_pages( + nv_alloc_t *at +) +{ + nvidia_pte_t *page_ptr; + struct device *dev = at->dev; + + page_ptr = at->page_table[0]; + + if (at->cache_type != NV_MEMORY_CACHED) + { + nv_set_contig_memory_type(at->page_table[0], + at->num_pages, + NV_MEMORY_WRITEBACK); + } + + dma_free_coherent(dev, at->num_pages * PAGE_SIZE, + (void *)page_ptr->virt_addr, page_ptr->dma_addr); +} + +NV_STATUS nv_alloc_contig_pages( + nv_state_t *nv, + nv_alloc_t *at +) +{ + NV_STATUS status; + nvidia_pte_t *page_ptr; + NvU32 i, j; + unsigned int gfp_mask; + unsigned long virt_addr = 0; + NvU64 phys_addr; + struct device *dev = at->dev; + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages); + + // TODO: This is a temporary WAR, and will be removed after fixing bug 200732409. + if (os_is_xen_dom0() || at->flags.unencrypted) + return nv_alloc_coherent_pages(nv, at); + + + if (!NV_SOC_IS_ISO_IOMMU_PRESENT(nv)) + { + return nv_alloc_coherent_pages(nv, at); + } + + + at->order = get_order(at->num_pages * PAGE_SIZE); + gfp_mask = nv_compute_gfp_mask(nv, at); + + if (at->flags.node0) + { + NV_ALLOC_PAGES_NODE(virt_addr, 0, at->order, gfp_mask); + } + else + { + NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask); + } + if (virt_addr == 0) + { + if (os_is_vgx_hyper()) + { + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: failed to allocate memory, trying coherent memory \n", __FUNCTION__); + + status = nv_alloc_coherent_pages(nv, at); + return status; + } + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__); + return NV_ERR_NO_MEMORY; + } +#if !defined(__GFP_ZERO) + if (at->flags.zeroed) + memset((void *)virt_addr, 0, (at->num_pages * PAGE_SIZE)); +#endif + + for (i = 0; i < at->num_pages; i++, virt_addr += PAGE_SIZE) + { + phys_addr = nv_get_kern_phys_address(virt_addr); + if (phys_addr == 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: %s: failed to look up physical address\n", + __FUNCTION__); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + page_ptr = at->page_table[i]; + page_ptr->phys_addr = phys_addr; + page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr); + page_ptr->virt_addr = virt_addr; + page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr); + + NV_MAYBE_RESERVE_PAGE(page_ptr); + } + + if (at->cache_type != NV_MEMORY_CACHED) + { + nv_set_contig_memory_type(at->page_table[0], + at->num_pages, + NV_MEMORY_UNCACHED); + } + + at->flags.coherent = NV_FALSE; + + return NV_OK; + +failed: + if (i > 0) + { + for (j = 0; j < i; j++) + NV_MAYBE_UNRESERVE_PAGE(at->page_table[j]); + } + + page_ptr = at->page_table[0]; + NV_FREE_PAGES(page_ptr->virt_addr, at->order); + + return status; +} + +void nv_free_contig_pages( + nv_alloc_t *at +) +{ + nvidia_pte_t *page_ptr; + unsigned int i; + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages); + + if (at->flags.coherent) + return nv_free_coherent_pages(at); + + if (at->cache_type != NV_MEMORY_CACHED) + { + nv_set_contig_memory_type(at->page_table[0], + at->num_pages, + NV_MEMORY_WRITEBACK); + } + + for (i = 0; i < at->num_pages; i++) + { + page_ptr = at->page_table[i]; + + if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count) + { + static int count = 0; + if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: %s: page count != initial page count (%u,%u)\n", + __FUNCTION__, NV_GET_PAGE_COUNT(page_ptr), + page_ptr->page_count); + } + } + NV_MAYBE_UNRESERVE_PAGE(page_ptr); + } + + page_ptr = at->page_table[0]; + + NV_FREE_PAGES(page_ptr->virt_addr, at->order); +} + +NV_STATUS nv_alloc_system_pages( + nv_state_t *nv, + nv_alloc_t *at +) +{ + NV_STATUS status; + nvidia_pte_t *page_ptr; + NvU32 i, j; + unsigned int gfp_mask; + unsigned long virt_addr = 0; + NvU64 phys_addr; + struct device *dev = at->dev; + dma_addr_t bus_addr; + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %u: %u pages\n", __FUNCTION__, at->num_pages); + + gfp_mask = nv_compute_gfp_mask(nv, at); + + for (i = 0; i < at->num_pages; i++) + { + if (at->flags.unencrypted && (dev != NULL)) + { + virt_addr = (unsigned long)dma_alloc_coherent(dev, + PAGE_SIZE, + &bus_addr, + gfp_mask); + at->flags.coherent = NV_TRUE; + } + else if (at->flags.node0) + { + NV_ALLOC_PAGES_NODE(virt_addr, 0, 0, gfp_mask); + } + else + { + NV_GET_FREE_PAGES(virt_addr, 0, gfp_mask); + } + + if (virt_addr == 0) + { + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__); + status = NV_ERR_NO_MEMORY; + goto failed; + } +#if !defined(__GFP_ZERO) + if (at->flags.zeroed) + memset((void *)virt_addr, 0, PAGE_SIZE); +#endif + + phys_addr = nv_get_kern_phys_address(virt_addr); + if (phys_addr == 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: %s: failed to look up physical address\n", + __FUNCTION__); + NV_FREE_PAGES(virt_addr, 0); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + +#if defined(_PAGE_NX) + if (((_PAGE_NX & pgprot_val(PAGE_KERNEL)) != 0) && + (phys_addr < 0x400000)) + { + nv_printf(NV_DBG_SETUP, + "NVRM: VM: %s: discarding page @ 0x%llx\n", + __FUNCTION__, phys_addr); + --i; + continue; + } +#endif + + page_ptr = at->page_table[i]; + page_ptr->phys_addr = phys_addr; + page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr); + page_ptr->virt_addr = virt_addr; + + // + // Use unencrypted dma_addr returned by dma_alloc_coherent() as + // nv_phys_to_dma() returns encrypted dma_addr when AMD SEV is enabled. + // + if (at->flags.coherent) + page_ptr->dma_addr = bus_addr; + else if (dev) + page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr); + else + page_ptr->dma_addr = page_ptr->phys_addr; + + NV_MAYBE_RESERVE_PAGE(page_ptr); + } + + if (at->cache_type != NV_MEMORY_CACHED) + nv_set_memory_type(at, NV_MEMORY_UNCACHED); + + return NV_OK; + +failed: + if (i > 0) + { + for (j = 0; j < i; j++) + { + page_ptr = at->page_table[j]; + NV_MAYBE_UNRESERVE_PAGE(page_ptr); + if (at->flags.coherent) + { + dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr, + page_ptr->dma_addr); + } + else + { + NV_FREE_PAGES(page_ptr->virt_addr, 0); + } + } + } + + return status; +} + +void nv_free_system_pages( + nv_alloc_t *at +) +{ + nvidia_pte_t *page_ptr; + unsigned int i; + struct device *dev = at->dev; + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages); + + if (at->cache_type != NV_MEMORY_CACHED) + nv_set_memory_type(at, NV_MEMORY_WRITEBACK); + + for (i = 0; i < at->num_pages; i++) + { + page_ptr = at->page_table[i]; + + if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count) + { + static int count = 0; + if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: %s: page count != initial page count (%u,%u)\n", + __FUNCTION__, NV_GET_PAGE_COUNT(page_ptr), + page_ptr->page_count); + } + } + + NV_MAYBE_UNRESERVE_PAGE(page_ptr); + if (at->flags.coherent) + { + dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr, + page_ptr->dma_addr); + } + else + { + NV_FREE_PAGES(page_ptr->virt_addr, 0); + } + } +} + +NvUPtr nv_vm_map_pages( + struct page **pages, + NvU32 count, + NvBool cached, + NvBool unencrypted +) +{ + NvUPtr virt_addr = 0; + + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: can't map %d pages, invalid context!\n", + __FUNCTION__, count); + os_dbg_breakpoint(); + return virt_addr; + } + + virt_addr = nv_vmap(pages, count, cached, unencrypted); + return virt_addr; +} + +void nv_vm_unmap_pages( + NvUPtr virt_addr, + NvU32 count +) +{ + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: can't unmap %d pages at 0x%0llx, " + "invalid context!\n", __FUNCTION__, count, virt_addr); + os_dbg_breakpoint(); + return; + } + + nv_vunmap(virt_addr, count); +} + +void nv_address_space_init_once(struct address_space *mapping) +{ +#if defined(NV_ADDRESS_SPACE_INIT_ONCE_PRESENT) + address_space_init_once(mapping); +#else + memset(mapping, 0, sizeof(*mapping)); + INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); + +#if defined(NV_ADDRESS_SPACE_HAS_RWLOCK_TREE_LOCK) + // + // The .tree_lock member variable was changed from type rwlock_t, to + // spinlock_t, on 25 July 2008, by mainline commit + // 19fd6231279be3c3bdd02ed99f9b0eb195978064. + // + rwlock_init(&mapping->tree_lock); +#else + spin_lock_init(&mapping->tree_lock); +#endif + + spin_lock_init(&mapping->i_mmap_lock); + INIT_LIST_HEAD(&mapping->private_list); + spin_lock_init(&mapping->private_lock); + INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); + INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); +#endif /* !NV_ADDRESS_SPACE_INIT_ONCE_PRESENT */ +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vtophys.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vtophys.c new file mode 100644 index 0000000..df2a01e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vtophys.c @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +NvU64 NV_API_CALL nv_get_kern_phys_address(NvU64 address) +{ + /* direct-mapped kernel address */ + if (virt_addr_valid(address)) + return __pa(address); + + nv_printf(NV_DBG_ERRORS, + "NVRM: can't translate address in %s()!\n", __FUNCTION__); + return 0; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv.c new file mode 100644 index 0000000..efec78e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv.c @@ -0,0 +1,5640 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvmisc.h" +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-p2p.h" +#include "nv-reg.h" +#include "nv-msi.h" +#include "nv-pci-table.h" + +#if defined(NV_UVM_ENABLE) +#include "nv_uvm_interface.h" +#endif + +#if defined(NV_VGPU_KVM_BUILD) +#include "nv-vgpu-vfio-interface.h" +#endif + + + + + + +#include "nv-frontend.h" +#include "nv-hypervisor.h" +#include "nv-ibmnpu.h" +#include "nv-rsync.h" +#include "nv-kthread-q.h" +#include "nv-pat.h" +#include "nv-dmabuf.h" + +#if !defined(CONFIG_RETPOLINE) +#include "nv-retpoline.h" +#endif + +#include + +#include /* HDA struct snd_card */ + +#include + +#if defined(NV_SOUND_HDAUDIO_H_PRESENT) +#include "sound/hdaudio.h" +#endif + +#if defined(NV_SOUND_HDA_CODEC_H_PRESENT) +#include +#include +#include +#endif + +#if defined(NV_SEQ_READ_ITER_PRESENT) +#include +#include +#include +#endif + +#include /* System DMI info */ + +#include + +#include "conftest/patches.h" + + + + + +#define RM_THRESHOLD_TOTAL_IRQ_COUNT 100000 +#define RM_THRESHOLD_UNAHNDLED_IRQ_COUNT 99900 +#define RM_UNHANDLED_TIMEOUT_US 100000 + +const NvBool nv_is_rm_firmware_supported_os = NV_TRUE; + +// Deprecated, use NV_REG_ENABLE_GPU_FIRMWARE instead +char *rm_firmware_active = NULL; +NV_MODULE_STRING_PARAMETER(rm_firmware_active); + +#define NV_FIRMWARE_GSP_FILENAME "nvidia/" NV_VERSION_STRING "/gsp.bin" +#define NV_FIRMWARE_GSP_LOG_FILENAME "nvidia/" NV_VERSION_STRING "/gsp_log.bin" + +MODULE_FIRMWARE(NV_FIRMWARE_GSP_FILENAME); + +/* + * Global NVIDIA capability state, for GPU driver + */ +nv_cap_t *nvidia_caps_root = NULL; + +/* + * our global state; one per device + */ +NvU32 num_nv_devices = 0; +NvU32 num_probed_nv_devices = 0; + +nv_linux_state_t *nv_linux_devices; + +/* + * And one for the control device + */ +nv_linux_state_t nv_ctl_device = { { 0 } }; +extern NvU32 nv_dma_remap_peer_mmio; + +nv_kthread_q_t nv_kthread_q; +nv_kthread_q_t nv_deferred_close_kthread_q; + +struct rw_semaphore nv_system_pm_lock; + +#if defined(CONFIG_PM) +static nv_power_state_t nv_system_power_state; +static nv_pm_action_depth_t nv_system_pm_action_depth; +struct semaphore nv_system_power_state_lock; +#endif + +void *nvidia_p2p_page_t_cache; +static void *nvidia_pte_t_cache; +void *nvidia_stack_t_cache; +static nvidia_stack_t *__nv_init_sp; + +static int nv_tce_bypass_mode = NV_TCE_BYPASS_MODE_DEFAULT; + +struct semaphore nv_linux_devices_lock; + +static NvTristate nv_chipset_is_io_coherent = NV_TRISTATE_INDETERMINATE; + +// True if all the successfully probed devices support ATS +// Assigned at device probe (module init) time +NvBool nv_ats_supported = NVCPU_IS_PPC64LE + + + + + +; + +// allow an easy way to convert all debug printfs related to events +// back and forth between 'info' and 'errors' +#if defined(NV_DBG_EVENTS) +#define NV_DBG_EVENTINFO NV_DBG_ERRORS +#else +#define NV_DBG_EVENTINFO NV_DBG_INFO +#endif + +#if defined(HDA_MAX_CODECS) +#define NV_HDA_MAX_CODECS HDA_MAX_CODECS +#else +#define NV_HDA_MAX_CODECS 8 +#endif + +/*** + *** STATIC functions, only in this file + ***/ + +/* nvos_ functions.. do not take a state device parameter */ +static int nvos_count_devices(void); + +static nv_alloc_t *nvos_create_alloc(struct device *, int); +static int nvos_free_alloc(nv_alloc_t *); + +/*** + *** EXPORTS to Linux Kernel + ***/ + +static irqreturn_t nvidia_isr_common_bh (void *); +static void nvidia_isr_bh_unlocked (void *); +static int nvidia_ctl_open (struct inode *, struct file *); +static int nvidia_ctl_close (struct inode *, struct file *); + +const char *nv_device_name = MODULE_NAME; +static const char *nvidia_stack_cache_name = MODULE_NAME "_stack_cache"; +static const char *nvidia_pte_cache_name = MODULE_NAME "_pte_cache"; +static const char *nvidia_p2p_page_cache_name = MODULE_NAME "_p2p_page_cache"; + +static int nvidia_open (struct inode *, struct file *); +static int nvidia_close (struct inode *, struct file *); +static unsigned int nvidia_poll (struct file *, poll_table *); +static int nvidia_ioctl (struct inode *, struct file *, unsigned int, unsigned long); + +/* character device entry points*/ +nvidia_module_t nv_fops = { + .owner = THIS_MODULE, + .module_name = MODULE_NAME, + .instance = MODULE_INSTANCE_NUMBER, + .open = nvidia_open, + .close = nvidia_close, + .ioctl = nvidia_ioctl, + .mmap = nvidia_mmap, + .poll = nvidia_poll, +}; + +#if defined(CONFIG_PM) +static int nv_pmops_suspend (struct device *dev); +static int nv_pmops_resume (struct device *dev); +static int nv_pmops_freeze (struct device *dev); +static int nv_pmops_thaw (struct device *dev); +static int nv_pmops_restore (struct device *dev); +static int nv_pmops_poweroff (struct device *dev); +static int nv_pmops_runtime_suspend (struct device *dev); +static int nv_pmops_runtime_resume (struct device *dev); + +struct dev_pm_ops nv_pm_ops = { + .suspend = nv_pmops_suspend, + .resume = nv_pmops_resume, + .freeze = nv_pmops_freeze, + .thaw = nv_pmops_thaw, + .poweroff = nv_pmops_poweroff, + .restore = nv_pmops_restore, + .runtime_suspend = nv_pmops_runtime_suspend, + .runtime_resume = nv_pmops_runtime_resume, +}; +#endif + +/*** + *** see nv.h for functions exported to other parts of resman + ***/ + +/*** + *** STATIC functions + ***/ + +#if defined(NVCPU_X86_64) +#define NV_AMD_SEV_BIT BIT(1) + +static +NvBool nv_is_sev_supported( + void +) +{ + unsigned int eax, ebx, ecx, edx; + + /* Check for the SME/SEV support leaf */ + eax = 0x80000000; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (eax < 0x8000001f) + return NV_FALSE; + + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + /* Check whether SEV is supported */ + if (!(eax & NV_AMD_SEV_BIT)) + return NV_FALSE; + + return NV_TRUE; +} +#endif + +static +void nv_sev_init( + void +) +{ +#if defined(MSR_AMD64_SEV) && defined(NVCPU_X86_64) + NvU32 lo_val, hi_val; + + if (!nv_is_sev_supported()) + return; + + rdmsr(MSR_AMD64_SEV, lo_val, hi_val); + + os_sev_status = lo_val; +#if defined(MSR_AMD64_SEV_ENABLED) + os_sev_enabled = (os_sev_status & MSR_AMD64_SEV_ENABLED); +#endif +#endif +} + +static +nv_alloc_t *nvos_create_alloc( + struct device *dev, + int num_pages +) +{ + nv_alloc_t *at; + unsigned int pt_size, i; + + NV_KMALLOC(at, sizeof(nv_alloc_t)); + if (at == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate alloc info\n"); + return NULL; + } + + memset(at, 0, sizeof(nv_alloc_t)); + + at->dev = dev; + pt_size = num_pages * sizeof(nvidia_pte_t *); + if (os_alloc_mem((void **)&at->page_table, pt_size) != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate page table\n"); + NV_KFREE(at, sizeof(nv_alloc_t)); + return NULL; + } + + memset(at->page_table, 0, pt_size); + at->num_pages = num_pages; + NV_ATOMIC_SET(at->usage_count, 0); + + for (i = 0; i < at->num_pages; i++) + { + at->page_table[i] = NV_KMEM_CACHE_ALLOC(nvidia_pte_t_cache); + if (at->page_table[i] == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate page table entry\n"); + nvos_free_alloc(at); + return NULL; + } + memset(at->page_table[i], 0, sizeof(nvidia_pte_t)); + } + + at->pid = os_get_current_process(); + + return at; +} + +static +int nvos_free_alloc( + nv_alloc_t *at +) +{ + unsigned int i; + + if (at == NULL) + return -1; + + if (NV_ATOMIC_READ(at->usage_count)) + return 1; + + for (i = 0; i < at->num_pages; i++) + { + if (at->page_table[i] != NULL) + NV_KMEM_CACHE_FREE(at->page_table[i], nvidia_pte_t_cache); + } + os_free_mem(at->page_table); + + NV_KFREE(at, sizeof(nv_alloc_t)); + + return 0; +} + +static void +nv_module_resources_exit(nv_stack_t *sp) +{ + nv_kmem_cache_free_stack(sp); + + NV_KMEM_CACHE_DESTROY(nvidia_p2p_page_t_cache); + NV_KMEM_CACHE_DESTROY(nvidia_pte_t_cache); + NV_KMEM_CACHE_DESTROY(nvidia_stack_t_cache); +} + +static int __init +nv_module_resources_init(nv_stack_t **sp) +{ + int rc = -ENOMEM; + + nvidia_stack_t_cache = NV_KMEM_CACHE_CREATE(nvidia_stack_cache_name, + nvidia_stack_t); + if (nvidia_stack_t_cache == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nvidia_stack_t cache allocation failed.\n"); + goto exit; + } + + nvidia_pte_t_cache = NV_KMEM_CACHE_CREATE(nvidia_pte_cache_name, + nvidia_pte_t); + if (nvidia_pte_t_cache == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nvidia_pte_t cache allocation failed.\n"); + goto exit; + } + + nvidia_p2p_page_t_cache = NV_KMEM_CACHE_CREATE(nvidia_p2p_page_cache_name, + nvidia_p2p_page_t); + if (nvidia_p2p_page_t_cache == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nvidia_p2p_page_t cache allocation failed.\n"); + goto exit; + } + + rc = nv_kmem_cache_alloc_stack(sp); + if (rc < 0) + { + goto exit; + } + +exit: + if (rc < 0) + { + nv_kmem_cache_free_stack(*sp); + + NV_KMEM_CACHE_DESTROY(nvidia_p2p_page_t_cache); + NV_KMEM_CACHE_DESTROY(nvidia_pte_t_cache); + NV_KMEM_CACHE_DESTROY(nvidia_stack_t_cache); + } + + return rc; +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +static void +nv_module_state_exit(nv_stack_t *sp) +{ + nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device); + + nv_teardown_pat_support(); + + nv_kthread_q_stop(&nv_deferred_close_kthread_q); + nv_kthread_q_stop(&nv_kthread_q); + + nv_lock_destroy_locks(sp, nv); +} + +static int +nv_module_state_init(nv_stack_t *sp) +{ + int rc; + nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device); + + nv->os_state = (void *)&nv_ctl_device; + + if (!nv_lock_init_locks(sp, nv)) + { + return -ENOMEM; + } + + rc = nv_kthread_q_init(&nv_kthread_q, "nv_queue"); + if (rc != 0) + { + goto exit; + } + + rc = nv_kthread_q_init(&nv_deferred_close_kthread_q, "nv_queue"); + if (rc != 0) + { + nv_kthread_q_stop(&nv_kthread_q); + goto exit; + } + + rc = nv_init_pat_support(sp); + if (rc < 0) + { + nv_kthread_q_stop(&nv_deferred_close_kthread_q); + nv_kthread_q_stop(&nv_kthread_q); + goto exit; + } + + nv_linux_devices = NULL; + NV_INIT_MUTEX(&nv_linux_devices_lock); + init_rwsem(&nv_system_pm_lock); + +#if defined(CONFIG_PM) + NV_INIT_MUTEX(&nv_system_power_state_lock); + nv_system_power_state = NV_POWER_STATE_RUNNING; + nv_system_pm_action_depth = NV_PM_ACTION_DEPTH_DEFAULT; +#endif + + NV_SPIN_LOCK_INIT(&nv_ctl_device.snapshot_timer_lock); + +exit: + if (rc < 0) + { + nv_lock_destroy_locks(sp, nv); + } + + return rc; +} + +static void __init +nv_registry_keys_init(nv_stack_t *sp) +{ + NV_STATUS status; + nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device); + NvU32 data; + + /* + * Determine the TCE bypass mode here so it can be used during + * device probe. Also determine whether we should allow + * user-mode NUMA onlining of device memory. + */ + if (NVCPU_IS_PPC64LE) + { + status = rm_read_registry_dword(sp, nv, + NV_REG_TCE_BYPASS_MODE, + &data); + if ((status == NV_OK) && ((int)data != NV_TCE_BYPASS_MODE_DEFAULT)) + { + nv_tce_bypass_mode = data; + } + + if (NVreg_EnableUserNUMAManagement) + { + /* Force on the core RM registry key to match. */ + status = rm_write_registry_dword(sp, nv, "RMNumaOnlining", 1); + WARN_ON(status != NV_OK); + } + } + + status = rm_read_registry_dword(sp, nv, NV_DMA_REMAP_PEER_MMIO, &data); + if (status == NV_OK) + { + nv_dma_remap_peer_mmio = data; + } +} + +static void __init +nv_report_applied_patches(void) +{ + unsigned i; + + for (i = 0; __nv_patches[i].short_description; i++) + { + if (i == 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Applied patches:\n"); + } + + nv_printf(NV_DBG_ERRORS, + "NVRM: Patch #%d: %s\n", i + 1, __nv_patches[i].short_description); + } +} + +static void +nv_drivers_exit(void) +{ + + nv_platform_unregister_driver(); + + nv_pci_unregister_driver(); + + nvidia_unregister_module(&nv_fops); +} + +static int __init +nv_drivers_init(void) +{ + int rc; + + rc = nvidia_register_module(&nv_fops); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to register character device.\n"); + return rc; + } + + rc = nv_pci_register_driver(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA PCI devices found.\n"); + rc = -ENODEV; + goto exit; + } + + + rc = nv_platform_register_driver(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: SOC driver registration failed!\n"); + nv_pci_unregister_driver(); + rc = -ENODEV; + } + + +exit: + if (rc < 0) + { + nvidia_unregister_module(&nv_fops); + } + + return rc; +} + +static void +nv_module_exit(nv_stack_t *sp) +{ + nv_module_state_exit(sp); + + rm_shutdown_rm(sp); + + nv_destroy_rsync_info(); + + + + + nv_cap_drv_exit(); + + nv_module_resources_exit(sp); +} + +static int __init +nv_module_init(nv_stack_t **sp) +{ + int rc; + + rc = nv_module_resources_init(sp); + if (rc < 0) + { + return rc; + } + + rc = nv_cap_drv_init(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: nv-cap-drv init failed.\n"); + goto cap_drv_exit; + } + + + + + + + + + + nv_init_rsync_info(); + nv_sev_init(); + + if (!rm_init_rm(*sp)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: rm_init_rm() failed!\n"); + rc = -EIO; + goto nvlink_exit; + } + + rc = nv_module_state_init(*sp); + if (rc < 0) + { + goto init_rm_exit; + } + + return rc; + +init_rm_exit: + rm_shutdown_rm(*sp); + +nvlink_exit: + nv_destroy_rsync_info(); + + + + +cap_drv_exit: + nv_cap_drv_exit(); + nv_module_resources_exit(*sp); + + return rc; +} + +/* + * In this function we check for the cases where GPU exclusion is not + * honored, and issue a warning. + * + * Only GPUs that support a mechanism to query UUID prior to + * initializing the GPU can be excluded, so that we can detect and + * exclude them during device probe. This function checks that an + * initialized GPU was not specified in the exclusion list, and issues a + * warning if so. + */ +static void +nv_assert_not_in_gpu_exclusion_list( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + char *uuid = rm_get_gpu_uuid(sp, nv); + + if (uuid == NULL) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Unable to read UUID"); + return; + } + + if (nv_is_uuid_in_gpu_exclusion_list(uuid)) + { + NV_DEV_PRINTF(NV_DBG_WARNINGS, nv, + "Could not exclude GPU %s because PBI is not supported\n", + uuid); + WARN_ON(1); + } + + os_free_mem(uuid); + + return; +} + +static int __init nv_caps_root_init(void) +{ + nvidia_caps_root = os_nv_cap_init("driver/" MODULE_NAME); + + return (nvidia_caps_root == NULL) ? -ENOENT : 0; +} + +static void nv_caps_root_exit(void) +{ + os_nv_cap_destroy_entry(nvidia_caps_root); + nvidia_caps_root = NULL; +} + +int __init nvidia_init_module(void) +{ + int rc; + NvU32 count; + nvidia_stack_t *sp = NULL; + const NvBool is_nvswitch_present = os_is_nvswitch_present(); + + nv_memdbg_init(); + + rc = nv_procfs_init(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to initialize procfs.\n"); + return rc; + } + + rc = nv_caps_root_init(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to initialize capabilities.\n"); + goto procfs_exit; + } + + rc = nv_module_init(&sp); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to initialize module.\n"); + goto caps_root_exit; + } + + count = nvos_count_devices(); + if ((count == 0) && (!is_nvswitch_present)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA GPU found.\n"); + rc = -ENODEV; + goto module_exit; + } + + rc = nv_drivers_init(); + if (rc < 0) + { + goto module_exit; + } + + if (num_probed_nv_devices != count) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: The NVIDIA probe routine was not called for %d device(s).\n", + count - num_probed_nv_devices); + nv_printf(NV_DBG_ERRORS, + "NVRM: This can occur when a driver such as: \n" + "NVRM: nouveau, rivafb, nvidiafb or rivatv " + "\nNVRM: was loaded and obtained ownership of the NVIDIA device(s).\n"); + nv_printf(NV_DBG_ERRORS, + "NVRM: Try unloading the conflicting kernel module (and/or\n" + "NVRM: reconfigure your kernel without the conflicting\n" + "NVRM: driver(s)), then try loading the NVIDIA kernel module\n" + "NVRM: again.\n"); + } + + if ((num_probed_nv_devices == 0) && (!is_nvswitch_present)) + { + rc = -ENODEV; + nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA devices probed.\n"); + goto drivers_exit; + } + + if (num_probed_nv_devices != num_nv_devices) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: The NVIDIA probe routine failed for %d device(s).\n", + num_probed_nv_devices - num_nv_devices); + } + + if ((num_nv_devices == 0) && (!is_nvswitch_present)) + { + rc = -ENODEV; + nv_printf(NV_DBG_ERRORS, + "NVRM: None of the NVIDIA devices were initialized.\n"); + goto drivers_exit; + } + + /* + * Initialize registry keys after PCI driver registration has + * completed successfully to support per-device module + * parameters. + */ + nv_registry_keys_init(sp); + + nv_report_applied_patches(); + + nv_printf(NV_DBG_ERRORS, "NVRM: loading %s\n", pNVRM_ID); + +#if defined(NV_UVM_ENABLE) + rc = nv_uvm_init(); + if (rc != 0) + { + goto drivers_exit; + } +#endif + + __nv_init_sp = sp; + + return 0; + +drivers_exit: + nv_drivers_exit(); + +module_exit: + nv_module_exit(sp); + +caps_root_exit: + nv_caps_root_exit(); + +procfs_exit: + nv_procfs_exit(); + + return rc; +} + +void nvidia_exit_module(void) +{ + nvidia_stack_t *sp = __nv_init_sp; + +#if defined(NV_UVM_ENABLE) + nv_uvm_exit(); +#endif + + nv_drivers_exit(); + + nv_module_exit(sp); + + nv_caps_root_exit(); + + nv_procfs_exit(); + + nv_memdbg_exit(); +} + +static void *nv_alloc_file_private(void) +{ + nv_linux_file_private_t *nvlfp; + unsigned int i; + + NV_KMALLOC(nvlfp, sizeof(nv_linux_file_private_t)); + if (!nvlfp) + return NULL; + + memset(nvlfp, 0, sizeof(nv_linux_file_private_t)); + + for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i) + { + NV_INIT_MUTEX(&nvlfp->fops_sp_lock[i]); + } + init_waitqueue_head(&nvlfp->waitqueue); + NV_SPIN_LOCK_INIT(&nvlfp->fp_lock); + + return nvlfp; +} + +static void nv_free_file_private(nv_linux_file_private_t *nvlfp) +{ + nvidia_event_t *nvet; + + if (nvlfp == NULL) + return; + + for (nvet = nvlfp->event_data_head; nvet != NULL; nvet = nvlfp->event_data_head) + { + nvlfp->event_data_head = nvlfp->event_data_head->next; + NV_KFREE(nvet, sizeof(nvidia_event_t)); + } + + if (nvlfp->mmap_context.page_array != NULL) + { + os_free_mem(nvlfp->mmap_context.page_array); + } + + NV_KFREE(nvlfp, sizeof(nv_linux_file_private_t)); +} + + +static int nv_is_control_device( + struct inode *inode +) +{ + return (minor((inode)->i_rdev) == NV_CONTROL_DEVICE_MINOR); +} + +/* + * Search the global list of nv devices for the one with the given minor device + * number. If found, nvl is returned with nvl->ldata_lock taken. + */ +static nv_linux_state_t *find_minor(NvU32 minor) +{ + nv_linux_state_t *nvl; + + LOCK_NV_LINUX_DEVICES(); + nvl = nv_linux_devices; + while (nvl != NULL) + { + if (nvl->minor_num == minor) + { + down(&nvl->ldata_lock); + break; + } + nvl = nvl->next; + } + + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +/* + * Search the global list of nv devices for the one with the given gpu_id. + * If found, nvl is returned with nvl->ldata_lock taken. + */ +static nv_linux_state_t *find_gpu_id(NvU32 gpu_id) +{ + nv_linux_state_t *nvl; + + LOCK_NV_LINUX_DEVICES(); + nvl = nv_linux_devices; + while (nvl != NULL) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + if (nv->gpu_id == gpu_id) + { + down(&nvl->ldata_lock); + break; + } + nvl = nvl->next; + } + + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +/* + * Search the global list of nv devices for the one with the given UUID. Devices + * with missing UUID information are ignored. If found, nvl is returned with + * nvl->ldata_lock taken. + */ +nv_linux_state_t *find_uuid(const NvU8 *uuid) +{ + nv_linux_state_t *nvl = NULL; + nv_state_t *nv; + const NvU8 *dev_uuid; + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl; nvl = nvl->next) + { + nv = NV_STATE_PTR(nvl); + down(&nvl->ldata_lock); + dev_uuid = nv_get_cached_uuid(nv); + if (dev_uuid && memcmp(dev_uuid, uuid, GPU_UUID_LEN) == 0) + goto out; + up(&nvl->ldata_lock); + } + +out: + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +/* + * Search the global list of nv devices. The search logic is: + * + * 1) If any device has the given UUID, return it + * + * 2) If no device has the given UUID but at least one device is missing + * its UUID (for example because rm_init_adapter has not run on it yet), + * return that device. + * + * 3) If no device has the given UUID and all UUIDs are present, return NULL. + * + * In cases 1 and 2, nvl is returned with nvl->ldata_lock taken. + * + * The reason for this weird logic is because UUIDs aren't always available. See + * bug 1642200. + */ +static nv_linux_state_t *find_uuid_candidate(const NvU8 *uuid) +{ + nv_linux_state_t *nvl = NULL; + nv_state_t *nv; + const NvU8 *dev_uuid; + int use_missing; + int has_missing = 0; + + LOCK_NV_LINUX_DEVICES(); + + /* + * Take two passes through the list. The first pass just looks for the UUID. + * The second looks for the target or missing UUIDs. It would be nice if + * this could be done in a single pass by remembering which nvls are missing + * UUIDs, but we have to hold the nvl lock after we check for the UUID. + */ + for (use_missing = 0; use_missing <= 1; use_missing++) + { + for (nvl = nv_linux_devices; nvl; nvl = nvl->next) + { + nv = NV_STATE_PTR(nvl); + down(&nvl->ldata_lock); + dev_uuid = nv_get_cached_uuid(nv); + if (dev_uuid) + { + /* Case 1: If a device has the given UUID, return it */ + if (memcmp(dev_uuid, uuid, GPU_UUID_LEN) == 0) + goto out; + } + else + { + /* Case 2: If no device has the given UUID but at least one + * device is missing its UUID, return that device. */ + if (use_missing) + goto out; + has_missing = 1; + } + up(&nvl->ldata_lock); + } + + /* Case 3: If no device has the given UUID and all UUIDs are present, + * return NULL. */ + if (!has_missing) + break; + } + +out: + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +void nv_dev_free_stacks(nv_linux_state_t *nvl) +{ + NvU32 i; + for (i = 0; i < NV_DEV_STACK_COUNT; i++) + { + if (nvl->sp[i]) + { + nv_kmem_cache_free_stack(nvl->sp[i]); + nvl->sp[i] = NULL; + } + } +} + +static int nv_dev_alloc_stacks(nv_linux_state_t *nvl) +{ + NvU32 i; + int rc; + + for (i = 0; i < NV_DEV_STACK_COUNT; i++) + { + rc = nv_kmem_cache_alloc_stack(&nvl->sp[i]); + if (rc != 0) + { + nv_dev_free_stacks(nvl); + return rc; + } + } + + return 0; +} + +static int validate_numa_start_state(nv_linux_state_t *nvl) +{ + int rc = 0; + int numa_status = nv_get_numa_status(nvl); + + if (numa_status != NV_IOCTL_NUMA_STATUS_DISABLED) + { + if (nv_ctl_device.numa_memblock_size == 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: numa memblock size of zero " + "found during device start"); + rc = -EINVAL; + } + else + { + /* Keep the individual devices consistent with the control device */ + nvl->numa_memblock_size = nv_ctl_device.numa_memblock_size; + } + } + + return rc; +} + +NV_STATUS NV_API_CALL nv_get_num_dpaux_instances(nv_state_t *nv, NvU32 *num_instances) +{ + *num_instances = nv->num_dpaux_instance; + return NV_OK; +} + +void NV_API_CALL +nv_schedule_uvm_isr(nv_state_t *nv) +{ +#if defined(NV_UVM_ENABLE) + nv_uvm_event_interrupt(nv_get_cached_uuid(nv)); +#endif +} + +/* + * Brings up the device on the first file open. Assumes nvl->ldata_lock is held. + */ +static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + NvU32 msi_config = 0; +#endif + int rc = 0; + NvBool kthread_init = NV_FALSE; + NvBool power_ref = NV_FALSE; + + rc = nv_get_rsync_info(); + if (rc != 0) + { + return rc; + } + + rc = validate_numa_start_state(nvl); + if (rc != 0) + { + goto failed; + } + + if (nv_dev_is_pci(nvl->dev) && (nv->pci_info.device_id == 0)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: open of non-existent GPU with minor number %d\n", nvl->minor_num); + rc = -ENXIO; + goto failed; + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + if (rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE) != NV_OK) + { + rc = -EINVAL; + goto failed; + } + power_ref = NV_TRUE; + } + else + { + if (rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE) != NV_OK) + { + rc = -EINVAL; + goto failed; + } + power_ref = NV_TRUE; + } + + rc = nv_init_ibmnpu_devices(nv); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to initialize ibmnpu devices attached to GPU with minor number %d\n", + nvl->minor_num); + goto failed; + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + rc = nv_dev_alloc_stacks(nvl); + if (rc != 0) + goto failed; + } + +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + if (nv_dev_is_pci(nvl->dev)) + { + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + rm_read_registry_dword(sp, nv, NV_REG_ENABLE_MSI, &msi_config); + if (msi_config == 1) + { + if (pci_find_capability(nvl->pci_dev, PCI_CAP_ID_MSIX)) + { + nv_init_msix(nv); + } + if (pci_find_capability(nvl->pci_dev, PCI_CAP_ID_MSI) && + !(nv->flags & NV_FLAG_USES_MSIX)) + { + nv_init_msi(nv); + } + } + } + } +#endif + + if (((!(nv->flags & NV_FLAG_USES_MSI)) && (!(nv->flags & NV_FLAG_USES_MSIX))) + && (nv->interrupt_line == 0) && !(nv->flags & NV_FLAG_SOC_DISPLAY) + && !(nv->flags & NV_FLAG_SOC_IGPU)) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "No interrupts of any type are available. Cannot use this GPU.\n"); + rc = -EIO; + goto failed; + } + + rc = 0; + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + if (nv->flags & NV_FLAG_SOC_DISPLAY) + { + + rc = nv_soc_register_irqs(nv); + + } + + + + + + + else if (!(nv->flags & NV_FLAG_USES_MSIX)) + { + rc = request_threaded_irq(nv->interrupt_line, nvidia_isr, + nvidia_isr_kthread_bh, nv_default_irq_flags(nv), + nv_device_name, (void *)nvl); + } +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + else + { + rc = nv_request_msix_irq(nvl); + } +#endif + } + if (rc != 0) + { + if ((nv->interrupt_line != 0) && (rc == -EBUSY)) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Tried to get IRQ %d, but another driver\n", + (unsigned int) nv->interrupt_line); + nv_printf(NV_DBG_ERRORS, "NVRM: has it and is not sharing it.\n"); + nv_printf(NV_DBG_ERRORS, "NVRM: You may want to verify that no audio driver"); + nv_printf(NV_DBG_ERRORS, " is using the IRQ.\n"); + } + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "request_irq() failed (%d)\n", rc); + goto failed; + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + rc = os_alloc_mutex(&nvl->isr_bh_unlocked_mutex); + if (rc != 0) + goto failed; + nv_kthread_q_item_init(&nvl->bottom_half_q_item, nvidia_isr_bh_unlocked, (void *)nv); + rc = nv_kthread_q_init(&nvl->bottom_half_q, nv_device_name); + if (rc != 0) + goto failed; + kthread_init = NV_TRUE; + + rc = nv_kthread_q_init(&nvl->queue.nvk, "nv_queue"); + if (rc) + goto failed; + nv->queue = &nvl->queue; + } + + if (!rm_init_adapter(sp, nv)) + { + if (!(nv->flags & NV_FLAG_USES_MSIX) && + !(nv->flags & NV_FLAG_SOC_DISPLAY) && + !(nv->flags & NV_FLAG_SOC_IGPU)) + { + free_irq(nv->interrupt_line, (void *) nvl); + } + else if (nv->flags & NV_FLAG_SOC_DISPLAY) + { + + nv_soc_free_irqs(nv); + + } + + + + + + +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + else + { + nv_free_msix_irq(nvl); + } +#endif + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "rm_init_adapter failed, device minor number %d\n", + nvl->minor_num); + rc = -EIO; + goto failed; + } + + { + const NvU8 *uuid = rm_get_gpu_uuid_raw(sp, nv); + + if (uuid != NULL) + { +#if defined(NV_UVM_ENABLE) + nv_uvm_notify_start_device(uuid); +#endif + } + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + nv_acpi_register_notifier(nvl); + } + + nv->flags |= NV_FLAG_OPEN; + + /* + * Now that RM init is done, allow dynamic power to control the GPU in FINE + * mode, if enabled. (If the mode is COARSE, this unref will do nothing + * which will cause the GPU to remain powered up.) + * This is balanced by a FINE ref increment at the beginning of + * nv_stop_device(). + */ + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + + return 0; + +failed: +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + if (nv->flags & NV_FLAG_USES_MSI) + { + nv->flags &= ~NV_FLAG_USES_MSI; + NV_PCI_DISABLE_MSI(nvl->pci_dev); + if(nvl->irq_count) + NV_KFREE(nvl->irq_count, nvl->num_intr * sizeof(nv_irq_count_info_t)); + } + if (nv->flags & NV_FLAG_USES_MSIX) + { + nv->flags &= ~NV_FLAG_USES_MSIX; + pci_disable_msix(nvl->pci_dev); + NV_KFREE(nvl->irq_count, nvl->num_intr*sizeof(nv_irq_count_info_t)); + NV_KFREE(nvl->msix_entries, nvl->num_intr*sizeof(struct msix_entry)); + } + + if (nvl->msix_bh_mutex) + { + os_free_mutex(nvl->msix_bh_mutex); + nvl->msix_bh_mutex = NULL; + } +#endif + + if (nv->queue && !(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + nv->queue = NULL; + nv_kthread_q_stop(&nvl->queue.nvk); + } + + if (kthread_init && !(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + nv_kthread_q_stop(&nvl->bottom_half_q); + + if (nvl->isr_bh_unlocked_mutex) + { + os_free_mutex(nvl->isr_bh_unlocked_mutex); + nvl->isr_bh_unlocked_mutex = NULL; + } + + nv_dev_free_stacks(nvl); + + nv_unregister_ibmnpu_devices(nv); + + if (power_ref) + { + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE); + } + + nv_put_rsync_info(); + + return rc; +} + +/* + * Makes sure the device is ready for operations and increases nvl->usage_count. + * Assumes nvl->ldata_lock is held. + */ +static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc; + NV_STATUS status; + + if (os_is_vgx_hyper()) + { + /* fail open if GPU is being unbound */ + if (nv->flags & NV_FLAG_UNBIND_LOCK) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Open failed as GPU is locked for unbind operation\n"); + return -ENODEV; + } + } + + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Opening GPU with minor number %d\n", + nvl->minor_num); + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Device in removal process\n"); + return -ENODEV; + } + + if ( ! (nv->flags & NV_FLAG_OPEN)) + { + /* Sanity check: !NV_FLAG_OPEN requires usage_count == 0 */ + if (NV_ATOMIC_READ(nvl->usage_count) != 0) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Minor device %u is referenced without being open!\n", + nvl->minor_num); + WARN_ON(1); + return -EBUSY; + } + + rc = nv_start_device(nv, sp); + if (rc != 0) + return rc; + } + else if (rm_is_device_sequestered(sp, nv)) + { + /* Do not increment the usage count of sequestered devices. */ + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Device is currently unavailable\n"); + return -EBUSY; + } + + NV_ATOMIC_INC(nvl->usage_count); + return 0; +} + +static void nv_init_mapping_revocation(nv_linux_state_t *nvl, + struct file *file, + nv_linux_file_private_t *nvlfp, + struct inode *inode) +{ + down(&nvl->mmap_lock); + + /* Set up struct address_space for use with unmap_mapping_range() */ + nv_address_space_init_once(&nvlfp->mapping); + nvlfp->mapping.host = inode; + nvlfp->mapping.a_ops = inode->i_mapping->a_ops; +#if defined(NV_ADDRESS_SPACE_HAS_BACKING_DEV_INFO) + nvlfp->mapping.backing_dev_info = inode->i_mapping->backing_dev_info; +#endif + file->f_mapping = &nvlfp->mapping; + + /* Add nvlfp to list of open files in nvl for mapping revocation */ + list_add(&nvlfp->entry, &nvl->open_files); + + up(&nvl->mmap_lock); +} + +/* +** nvidia_open +** +** nv driver open entry point. Sessions are created here. +*/ +int +nvidia_open( + struct inode *inode, + struct file *file +) +{ + nv_state_t *nv = NULL; + nv_linux_state_t *nvl = NULL; + int rc = 0; + nv_linux_file_private_t *nvlfp = NULL; + nvidia_stack_t *sp = NULL; + unsigned int i; + unsigned int k; + + nv_printf(NV_DBG_INFO, "NVRM: nvidia_open...\n"); + + nvlfp = nv_alloc_file_private(); + if (nvlfp == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate file private!\n"); + return -ENOMEM; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + nv_free_file_private(nvlfp); + return rc; + } + + for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i) + { + rc = nv_kmem_cache_alloc_stack(&nvlfp->fops_sp[i]); + if (rc != 0) + { + nv_kmem_cache_free_stack(sp); + for (k = 0; k < i; ++k) + { + nv_kmem_cache_free_stack(nvlfp->fops_sp[k]); + } + nv_free_file_private(nvlfp); + return rc; + } + } + + NV_SET_FILE_PRIVATE(file, nvlfp); + nvlfp->sp = sp; + + /* for control device, just jump to its open routine */ + /* after setting up the private data */ + if (nv_is_control_device(inode)) + { + rc = nvidia_ctl_open(inode, file); + if (rc != 0) + goto failed; + return rc; + } + + rc = nv_down_read_interruptible(&nv_system_pm_lock); + if (rc < 0) + goto failed; + + /* Takes nvl->ldata_lock */ + nvl = find_minor(NV_DEVICE_MINOR_NUMBER(inode)); + if (!nvl) + { + rc = -ENODEV; + up_read(&nv_system_pm_lock); + goto failed; + } + + nvlfp->nvptr = nvl; + nv = NV_STATE_PTR(nvl); + + if ((nv->flags & NV_FLAG_EXCLUDE) != 0) + { + char *uuid = rm_get_gpu_uuid(sp, nv); + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "open() not permitted for excluded %s\n", + (uuid != NULL) ? uuid : "GPU"); + if (uuid != NULL) + os_free_mem(uuid); + rc = -EPERM; + goto failed1; + } + + rc = nv_open_device(nv, sp); + /* Fall-through on error */ + + nv_assert_not_in_gpu_exclusion_list(sp, nv); + +failed1: + up(&nvl->ldata_lock); + + up_read(&nv_system_pm_lock); +failed: + if (rc != 0) + { + if (nvlfp != NULL) + { + nv_kmem_cache_free_stack(sp); + for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i) + { + nv_kmem_cache_free_stack(nvlfp->fops_sp[i]); + } + nv_free_file_private(nvlfp); + NV_SET_FILE_PRIVATE(file, NULL); + } + } + else + { + nv_init_mapping_revocation(nvl, file, nvlfp, inode); + } + + return rc; +} + +static void validate_numa_shutdown_state(nv_linux_state_t *nvl) +{ + int numa_status = nv_get_numa_status(nvl); + WARN_ON((numa_status != NV_IOCTL_NUMA_STATUS_OFFLINE) && + (numa_status != NV_IOCTL_NUMA_STATUS_DISABLED)); +} + +void nv_shutdown_adapter(nvidia_stack_t *sp, + nv_state_t *nv, + nv_linux_state_t *nvl) +{ + validate_numa_shutdown_state(nvl); + + rm_disable_adapter(sp, nv); + + // It's safe to call nv_kthread_q_stop even if queue is not initialized + nv_kthread_q_stop(&nvl->bottom_half_q); + + if (nv->queue != NULL) + { + nv->queue = NULL; + nv_kthread_q_stop(&nvl->queue.nvk); + } + + if (nvl->isr_bh_unlocked_mutex) + { + os_free_mutex(nvl->isr_bh_unlocked_mutex); + nvl->isr_bh_unlocked_mutex = NULL; + } + + if (!(nv->flags & NV_FLAG_USES_MSIX) && + !(nv->flags & NV_FLAG_SOC_DISPLAY) && + !(nv->flags & NV_FLAG_SOC_IGPU)) + { + free_irq(nv->interrupt_line, (void *)nvl); + if (nv->flags & NV_FLAG_USES_MSI) + { + NV_PCI_DISABLE_MSI(nvl->pci_dev); + if(nvl->irq_count) + NV_KFREE(nvl->irq_count, nvl->num_intr * sizeof(nv_irq_count_info_t)); + } + } + else if (nv->flags & NV_FLAG_SOC_DISPLAY) + { + + nv_soc_free_irqs(nv); + + } + + + + + + +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + else + { + nv_free_msix_irq(nvl); + pci_disable_msix(nvl->pci_dev); + nv->flags &= ~NV_FLAG_USES_MSIX; + NV_KFREE(nvl->msix_entries, nvl->num_intr*sizeof(struct msix_entry)); + NV_KFREE(nvl->irq_count, nvl->num_intr*sizeof(nv_irq_count_info_t)); + } +#endif + + if (nvl->msix_bh_mutex) + { + os_free_mutex(nvl->msix_bh_mutex); + nvl->msix_bh_mutex = NULL; + } + + rm_shutdown_adapter(sp, nv); +} + +/* + * Tears down the device on the last file close. Assumes nvl->ldata_lock is + * held. + */ +static void nv_stop_device(nv_state_t *nv, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + static int persistence_mode_notice_logged; + + /* + * The GPU needs to be powered on to go through the teardown sequence. + * This balances the FINE unref at the end of nv_start_device(). + */ + rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + +#if defined(NV_UVM_ENABLE) + { + const NvU8* uuid; + // Inform UVM before disabling adapter. Use cached copy + uuid = nv_get_cached_uuid(nv); + if (uuid != NULL) + { + // this function cannot fail + nv_uvm_notify_stop_device(uuid); + } + } +#endif + /* Adapter is already shutdown as part of nvidia_pci_remove */ + if (!nv->removed) + { + if (nv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + rm_disable_adapter(sp, nv); + } + else + { + nv_acpi_unregister_notifier(nvl); + nv_shutdown_adapter(sp, nv, nvl); + } + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + nv_dev_free_stacks(nvl); + } + + if ((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) && + (!persistence_mode_notice_logged) && (!os_is_vgx_hyper())) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Persistence mode is deprecated and" + " will be removed in a future release. Please use" + " nvidia-persistenced instead.\n"); + persistence_mode_notice_logged = 1; + } + + /* leave INIT flag alone so we don't reinit every time */ + nv->flags &= ~NV_FLAG_OPEN; + + nv_unregister_ibmnpu_devices(nv); + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE); + } + else + { + /* If in legacy persistence mode, only unref FINE refcount. */ + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + } + + nv_put_rsync_info(); +} + +/* + * Decreases nvl->usage_count, stopping the device when it reaches 0. Assumes + * nvl->ldata_lock is held. + */ +static void nv_close_device(nv_state_t *nv, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Attempting to close unopened minor device %u!\n", + nvl->minor_num); + WARN_ON(1); + return; + } + + if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count)) + nv_stop_device(nv, sp); +} + +/* +** nvidia_close +** +** Primary driver close entry point. +*/ + +static void +nvidia_close_callback( + nv_linux_file_private_t *nvlfp +) +{ + nv_linux_state_t *nvl = nvlfp->nvptr; + nv_state_t *nv = NV_STATE_PTR(nvl); + nvidia_stack_t *sp = nvlfp->sp; + unsigned int i; + NvBool bRemove = NV_FALSE; + + rm_cleanup_file_private(sp, nv, &nvlfp->nvfp); + + down(&nvl->mmap_lock); + list_del(&nvlfp->entry); + up(&nvl->mmap_lock); + + down(&nvl->ldata_lock); + nv_close_device(nv, sp); + + bRemove = (!NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv)) && + (NV_ATOMIC_READ(nvl->usage_count) == 0) && + rm_get_device_remove_flag(sp, nv->gpu_id); + + for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i) + { + nv_kmem_cache_free_stack(nvlfp->fops_sp[i]); + } + + nv_free_file_private(nvlfp); + + /* + * In case of surprise removal of device, we have 2 cases as below: + * + * 1> When nvidia_pci_remove is scheduled prior to nvidia_close. + * nvidia_pci_remove will not destroy linux layer locks & nv linux state + * struct but will set variable nv->removed for nvidia_close. + * Once all the clients are closed, last nvidia_close will clean up linux + * layer locks and nv linux state struct. + * + * 2> When nvidia_close is scheduled prior to nvidia_pci_remove. + * This will be treated as normal working case. nvidia_close will not do + * any cleanup related to linux layer locks and nv linux state struct. + * nvidia_pci_remove when scheduled will do necessary cleanup. + */ + if ((NV_ATOMIC_READ(nvl->usage_count) == 0) && nv->removed) + { + nvidia_frontend_remove_device((void *)&nv_fops, nvl); + nv_lock_destroy_locks(sp, nv); + NV_KFREE(nvl, sizeof(nv_linux_state_t)); + } + else + { + up(&nvl->ldata_lock); + +#if defined(NV_PCI_STOP_AND_REMOVE_BUS_DEVICE) + if (bRemove) + { + NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(nvl->pci_dev); + } +#endif + } + + nv_kmem_cache_free_stack(sp); +} + +static void nvidia_close_deferred(void *data) +{ + nv_linux_file_private_t *nvlfp = data; + + down_read(&nv_system_pm_lock); + + nvidia_close_callback(nvlfp); + + up_read(&nv_system_pm_lock); +} + +int +nvidia_close( + struct inode *inode, + struct file *file +) +{ + int rc; + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + nv_linux_state_t *nvl = nvlfp->nvptr; + nv_state_t *nv = NV_STATE_PTR(nvl); + + NV_DEV_PRINTF(NV_DBG_INFO, nv, "nvidia_close on GPU with minor number %d\n", NV_DEVICE_MINOR_NUMBER(inode)); + + if (nv_is_control_device(inode)) + { + return nvidia_ctl_close(inode, file); + } + + NV_SET_FILE_PRIVATE(file, NULL); + + rc = nv_down_read_interruptible(&nv_system_pm_lock); + if (rc == 0) + { + nvidia_close_callback(nvlfp); + up_read(&nv_system_pm_lock); + } + else + { + nv_kthread_q_item_init(&nvlfp->deferred_close_q_item, + nvidia_close_deferred, + nvlfp); + rc = nv_kthread_q_schedule_q_item(&nv_deferred_close_kthread_q, + &nvlfp->deferred_close_q_item); + WARN_ON(rc == 0); + } + + return 0; +} + +unsigned int +nvidia_poll( + struct file *file, + poll_table *wait +) +{ + unsigned int mask = 0; + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + unsigned long eflags; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file); + nv_state_t *nv = NV_STATE_PTR(nvl); + NV_STATUS status; + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "GPU is lost, skipping nvidia_poll\n"); + return POLLHUP; + } + + if ((file->f_flags & O_NONBLOCK) == 0) + poll_wait(file, &nvlfp->waitqueue, wait); + + NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags); + + if ((nvlfp->event_data_head != NULL) || nvlfp->dataless_event_pending) + { + mask = (POLLPRI | POLLIN); + nvlfp->dataless_event_pending = NV_FALSE; + } + + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + + return mask; +} + +#define NV_CTL_DEVICE_ONLY(nv) \ +{ \ + if (((nv)->flags & NV_FLAG_CONTROL) == 0) \ + { \ + status = -EINVAL; \ + goto done; \ + } \ +} + +#define NV_ACTUAL_DEVICE_ONLY(nv) \ +{ \ + if (((nv)->flags & NV_FLAG_CONTROL) != 0) \ + { \ + status = -EINVAL; \ + goto done; \ + } \ +} + +/* + * Fills the ci array with the state of num_entries devices. Returns -EINVAL if + * num_entries isn't big enough to hold all available devices. + */ +static int nvidia_read_card_info(nv_ioctl_card_info_t *ci, size_t num_entries) +{ + nv_state_t *nv; + nv_linux_state_t *nvl; + size_t i = 0; + int rc = 0; + + /* Clear each card's flags field the lazy way */ + memset(ci, 0, num_entries * sizeof(ci[0])); + + LOCK_NV_LINUX_DEVICES(); + + if (num_entries < num_nv_devices) + { + rc = -EINVAL; + goto out; + } + + for (nvl = nv_linux_devices; nvl && i < num_entries; nvl = nvl->next) + { + nv = NV_STATE_PTR(nvl); + + /* We do not include excluded GPUs in the list... */ + if ((nv->flags & NV_FLAG_EXCLUDE) != 0) + continue; + + ci[i].valid = NV_TRUE; + ci[i].pci_info.domain = nv->pci_info.domain; + ci[i].pci_info.bus = nv->pci_info.bus; + ci[i].pci_info.slot = nv->pci_info.slot; + ci[i].pci_info.vendor_id = nv->pci_info.vendor_id; + ci[i].pci_info.device_id = nv->pci_info.device_id; + ci[i].gpu_id = nv->gpu_id; + ci[i].interrupt_line = nv->interrupt_line; + ci[i].reg_address = nv->regs->cpu_address; + ci[i].reg_size = nv->regs->size; + ci[i].minor_number = nvl->minor_num; + if (nv_dev_is_pci(nvl->dev)) + { + ci[i].fb_address = nv->fb->cpu_address; + ci[i].fb_size = nv->fb->size; + } + i++; + } + +out: + UNLOCK_NV_LINUX_DEVICES(); + return rc; +} + +int +nvidia_ioctl( + struct inode *inode, + struct file *file, + unsigned int cmd, + unsigned long i_arg) +{ + NV_STATUS rmStatus; + int status = 0; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file); + nv_state_t *nv = NV_STATE_PTR(nvl); + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + nvidia_stack_t *sp = NULL; + nv_ioctl_xfer_t ioc_xfer; + void *arg_ptr = (void *) i_arg; + void *arg_copy = NULL; + size_t arg_size = 0; + int arg_cmd; + + nv_printf(NV_DBG_INFO, "NVRM: ioctl(0x%x, 0x%x, 0x%x)\n", + _IOC_NR(cmd), (unsigned int) i_arg, _IOC_SIZE(cmd)); + + status = nv_down_read_interruptible(&nv_system_pm_lock); + if (status < 0) + return status; + + down(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_IOCTL]); + sp = nvlfp->fops_sp[NV_FOPS_STACK_INDEX_IOCTL]; + + rmStatus = nv_check_gpu_state(nv); + if (rmStatus == NV_ERR_GPU_IS_LOST) + { + nv_printf(NV_DBG_INFO, "NVRM: GPU is lost, skipping nvidia_ioctl\n"); + status = -EINVAL; + goto done; + } + + arg_size = _IOC_SIZE(cmd); + arg_cmd = _IOC_NR(cmd); + + if (arg_cmd == NV_ESC_IOCTL_XFER_CMD) + { + if (arg_size != sizeof(nv_ioctl_xfer_t)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: invalid ioctl XFER structure size!\n"); + status = -EINVAL; + goto done; + } + + if (NV_COPY_FROM_USER(&ioc_xfer, arg_ptr, sizeof(ioc_xfer))) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to copy in ioctl XFER data!\n"); + status = -EFAULT; + goto done; + } + + arg_cmd = ioc_xfer.cmd; + arg_size = ioc_xfer.size; + arg_ptr = NvP64_VALUE(ioc_xfer.ptr); + + if (arg_size > NV_ABSOLUTE_MAX_IOCTL_SIZE) + { + nv_printf(NV_DBG_ERRORS, "NVRM: invalid ioctl XFER size!\n"); + status = -EINVAL; + goto done; + } + } + + NV_KMALLOC(arg_copy, arg_size); + if (arg_copy == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate ioctl memory\n"); + status = -ENOMEM; + goto done; + } + + if (NV_COPY_FROM_USER(arg_copy, arg_ptr, arg_size)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy in ioctl data!\n"); + status = -EFAULT; + goto done; + } + + switch (arg_cmd) + { + case NV_ESC_QUERY_DEVICE_INTR: + { + nv_ioctl_query_device_intr *query_intr = arg_copy; + + NV_ACTUAL_DEVICE_ONLY(nv); + + if ((arg_size < sizeof(*query_intr)) || + (!nv->regs->map)) + { + status = -EINVAL; + goto done; + } + + query_intr->intrStatus = + *(nv->regs->map + (NV_RM_DEVICE_INTR_ADDRESS >> 2)); + query_intr->status = NV_OK; + break; + } + + /* pass out info about the card */ + case NV_ESC_CARD_INFO: + { + size_t num_arg_devices = arg_size / sizeof(nv_ioctl_card_info_t); + + NV_CTL_DEVICE_ONLY(nv); + + status = nvidia_read_card_info(arg_copy, num_arg_devices); + break; + } + + case NV_ESC_ATTACH_GPUS_TO_FD: + { + size_t num_arg_gpus = arg_size / sizeof(NvU32); + size_t i; + + NV_CTL_DEVICE_ONLY(nv); + + if (num_arg_gpus == 0 || nvlfp->num_attached_gpus != 0 || + arg_size % sizeof(NvU32) != 0) + { + status = -EINVAL; + goto done; + } + + NV_KMALLOC(nvlfp->attached_gpus, arg_size); + if (nvlfp->attached_gpus == NULL) + { + status = -ENOMEM; + goto done; + } + memcpy(nvlfp->attached_gpus, arg_copy, arg_size); + nvlfp->num_attached_gpus = num_arg_gpus; + + for (i = 0; i < nvlfp->num_attached_gpus; i++) + { + if (nvlfp->attached_gpus[i] == 0) + { + continue; + } + + if (nvidia_dev_get(nvlfp->attached_gpus[i], sp)) + { + while (i--) + { + if (nvlfp->attached_gpus[i] != 0) + nvidia_dev_put(nvlfp->attached_gpus[i], sp); + } + NV_KFREE(nvlfp->attached_gpus, arg_size); + nvlfp->num_attached_gpus = 0; + + status = -EINVAL; + break; + } + } + + break; + } + + case NV_ESC_CHECK_VERSION_STR: + { + NV_CTL_DEVICE_ONLY(nv); + + rmStatus = rm_perform_version_check(sp, arg_copy, arg_size); + status = ((rmStatus == NV_OK) ? 0 : -EINVAL); + break; + } + + case NV_ESC_SYS_PARAMS: + { + nv_ioctl_sys_params_t *api = arg_copy; + + NV_CTL_DEVICE_ONLY(nv); + + if (arg_size != sizeof(nv_ioctl_sys_params_t)) + { + status = -EINVAL; + goto done; + } + + /* numa_memblock_size should only be set once */ + if (nvl->numa_memblock_size == 0) + { + nvl->numa_memblock_size = api->memblock_size; + } + else + { + status = (nvl->numa_memblock_size == api->memblock_size) ? + 0 : -EBUSY; + goto done; + } + break; + } + + case NV_ESC_NUMA_INFO: + { + nv_ioctl_numa_info_t *api = arg_copy; + rmStatus = NV_OK; + + NV_ACTUAL_DEVICE_ONLY(nv); + + if (arg_size != sizeof(nv_ioctl_numa_info_t)) + { + status = -EINVAL; + goto done; + } + + api->offline_addresses.numEntries = + ARRAY_SIZE(api->offline_addresses.addresses), + + rmStatus = rm_get_gpu_numa_info(sp, nv, + &(api->nid), + &(api->numa_mem_addr), + &(api->numa_mem_size), + (api->offline_addresses.addresses), + &(api->offline_addresses.numEntries)); + if (rmStatus != NV_OK) + { + status = -EBUSY; + goto done; + } + + api->status = nv_get_numa_status(nvl); + api->memblock_size = nv_ctl_device.numa_memblock_size; + break; + } + + case NV_ESC_SET_NUMA_STATUS: + { + nv_ioctl_set_numa_status_t *api = arg_copy; + rmStatus = NV_OK; + + if (!NV_IS_SUSER()) + { + status = -EACCES; + goto done; + } + + NV_ACTUAL_DEVICE_ONLY(nv); + + if (arg_size != sizeof(nv_ioctl_set_numa_status_t)) + { + status = -EINVAL; + goto done; + } + + /* + * The nv_linux_state_t for the device needs to be locked + * in order to prevent additional open()/close() calls from + * manipulating the usage count for the device while we + * determine if NUMA state can be changed. + */ + down(&nvl->ldata_lock); + + if (nv_get_numa_status(nvl) != api->status) + { + if (api->status == NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS) + { + /* + * Only the current client should have an open file + * descriptor for the device, to allow safe offlining. + */ + if (NV_ATOMIC_READ(nvl->usage_count) > 1) + { + status = -EBUSY; + goto unlock; + } + else + { + /* + * If this call fails, it indicates that RM + * is not ready to offline memory, and we should keep + * the current NUMA status of ONLINE. + */ + rmStatus = rm_gpu_numa_offline(sp, nv); + if (rmStatus != NV_OK) + { + status = -EBUSY; + goto unlock; + } + } + } + + status = nv_set_numa_status(nvl, api->status); + if (status < 0) + { + if (api->status == NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS) + (void) rm_gpu_numa_online(sp, nv); + goto unlock; + } + + if (api->status == NV_IOCTL_NUMA_STATUS_ONLINE) + { + rmStatus = rm_gpu_numa_online(sp, nv); + if (rmStatus != NV_OK) + { + status = -EBUSY; + goto unlock; + } + } + } + +unlock: + up(&nvl->ldata_lock); + + break; + } + + case NV_ESC_EXPORT_TO_DMABUF_FD: + { + nv_ioctl_export_to_dma_buf_fd_t *params = arg_copy; + + if (arg_size != sizeof(nv_ioctl_export_to_dma_buf_fd_t)) + { + status = -EINVAL; + goto done; + } + + NV_ACTUAL_DEVICE_ONLY(nv); + + params->status = nv_dma_buf_export(nv, params); + + break; + } + + default: + rmStatus = rm_ioctl(sp, nv, &nvlfp->nvfp, arg_cmd, arg_copy, arg_size); + status = ((rmStatus == NV_OK) ? 0 : -EINVAL); + break; + } + +done: + up(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_IOCTL]); + + up_read(&nv_system_pm_lock); + + if (arg_copy != NULL) + { + if (status != -EFAULT) + { + if (NV_COPY_TO_USER(arg_ptr, arg_copy, arg_size)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy out ioctl data\n"); + status = -EFAULT; + } + } + NV_KFREE(arg_copy, arg_size); + } + + return status; +} + +irqreturn_t +nvidia_isr_msix( + int irq, + void *arg +) +{ + irqreturn_t ret; + nv_linux_state_t *nvl = (void *) arg; + + // nvidia_isr_msix() is called for each of the MSI-X vectors and they can + // run in parallel on different CPUs (cores), but this is not currently + // supported by nvidia_isr() and its children. As a big hammer fix just + // spinlock around the nvidia_isr() call to serialize them. + // + // At this point interrupts are disabled on the CPU running our ISR (see + // comments for nv_default_irq_flags()) so a plain spinlock is enough. + NV_SPIN_LOCK(&nvl->msix_isr_lock); + + ret = nvidia_isr(irq, arg); + + NV_SPIN_UNLOCK(&nvl->msix_isr_lock); + + return ret; +} + +/* + * driver receives an interrupt + * if someone waiting, then hand it off. + */ +irqreturn_t +nvidia_isr( + int irq, + void *arg +) +{ + nv_linux_state_t *nvl = (void *) arg; + nv_state_t *nv = NV_STATE_PTR(nvl); + NvU32 need_to_run_bottom_half_gpu_lock_held = 0; + NvBool rm_handled = NV_FALSE, uvm_handled = NV_FALSE, rm_fault_handling_needed = NV_FALSE; + NvU32 rm_serviceable_fault_cnt = 0; + NvU32 sec, usec; + NvU16 index = 0; + NvU64 currentTime = 0; + NvBool found_irq = NV_FALSE; + + rm_gpu_handle_mmu_faults(nvl->sp[NV_DEV_STACK_ISR], nv, &rm_serviceable_fault_cnt); + rm_fault_handling_needed = (rm_serviceable_fault_cnt != 0); + +#if defined (NV_UVM_ENABLE) + // + // Returns NV_OK if the UVM driver handled the interrupt + // + // Returns NV_ERR_NO_INTR_PENDING if the interrupt is not for + // the UVM driver. + // + // Returns NV_WARN_MORE_PROCESSING_REQUIRED if the UVM top-half ISR was + // unable to get its lock(s), due to other (UVM) threads holding them. + // + // RM can normally treat NV_WARN_MORE_PROCESSING_REQUIRED the same as + // NV_ERR_NO_INTR_PENDING, but in some cases the extra information may + // be helpful. + // + if (nv_uvm_event_interrupt(nv_get_cached_uuid(nv)) == NV_OK) + uvm_handled = NV_TRUE; +#endif + + rm_handled = rm_isr(nvl->sp[NV_DEV_STACK_ISR], nv, + &need_to_run_bottom_half_gpu_lock_held); + + /* Replicating the logic in linux kernel to track unhandled interrupt crossing a threshold */ + if ((nv->flags & NV_FLAG_USES_MSI) || (nv->flags & NV_FLAG_USES_MSIX)) + { + if (nvl->irq_count != NULL) + { + for (index = 0; index < nvl->current_num_irq_tracked; index++) + { + if (nvl->irq_count[index].irq == irq) + { + found_irq = NV_TRUE; + break; + } + + found_irq = NV_FALSE; + } + + if (!found_irq && nvl->current_num_irq_tracked < nvl->num_intr) + { + index = nvl->current_num_irq_tracked; + nvl->irq_count[index].irq = irq; + nvl->current_num_irq_tracked++; + found_irq = NV_TRUE; + } + + if (found_irq) + { + nvl->irq_count[index].total++; + + if(rm_handled == NV_FALSE) + { + os_get_current_time(&sec, &usec); + currentTime = ((NvU64)sec) * 1000000 + (NvU64)usec; + + /* Reset unhandled count if it's been more than 0.1 seconds since the last unhandled IRQ */ + if ((currentTime - nvl->irq_count[index].last_unhandled) > RM_UNHANDLED_TIMEOUT_US) + nvl->irq_count[index].unhandled = 1; + else + nvl->irq_count[index].unhandled++; + + nvl->irq_count[index].last_unhandled = currentTime; + rm_handled = NV_TRUE; + } + + if (nvl->irq_count[index].total >= RM_THRESHOLD_TOTAL_IRQ_COUNT) + { + if (nvl->irq_count[index].unhandled > RM_THRESHOLD_UNAHNDLED_IRQ_COUNT) + nv_printf(NV_DBG_ERRORS,"NVRM: Going over RM unhandled interrupt threshold for irq %d\n", irq); + + nvl->irq_count[index].total = 0; + nvl->irq_count[index].unhandled = 0; + nvl->irq_count[index].last_unhandled = 0; + } + } + else + nv_printf(NV_DBG_ERRORS,"NVRM: IRQ number out of valid range\n"); + } + } + + if (need_to_run_bottom_half_gpu_lock_held) + { + return IRQ_WAKE_THREAD; + } + else + { + // + // If rm_isr does not need to run a bottom half and mmu_faults_copied + // indicates that bottom half is needed, then we enqueue a kthread based + // bottom half, as this specific bottom_half will acquire the GPU lock + // + if (rm_fault_handling_needed) + nv_kthread_q_schedule_q_item(&nvl->bottom_half_q, &nvl->bottom_half_q_item); + } + + return IRQ_RETVAL(rm_handled || uvm_handled || rm_fault_handling_needed); +} + +irqreturn_t +nvidia_isr_kthread_bh( + int irq, + void *data +) +{ + return nvidia_isr_common_bh(data); +} + +irqreturn_t +nvidia_isr_msix_kthread_bh( + int irq, + void *data +) +{ + NV_STATUS status; + irqreturn_t ret; + nv_state_t *nv = (nv_state_t *) data; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + // + // Synchronize kthreads servicing bottom halves for different MSI-X vectors + // as they share same pre-allocated alt-stack. + // + status = os_acquire_mutex(nvl->msix_bh_mutex); + // os_acquire_mutex can only fail if we cannot sleep and we can + WARN_ON(status != NV_OK); + + ret = nvidia_isr_common_bh(data); + + os_release_mutex(nvl->msix_bh_mutex); + + return ret; +} + +static irqreturn_t +nvidia_isr_common_bh( + void *data +) +{ + nv_state_t *nv = (nv_state_t *) data; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nvidia_stack_t *sp = nvl->sp[NV_DEV_STACK_ISR_BH]; + NV_STATUS status; + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + nv_printf(NV_DBG_INFO, "NVRM: GPU is lost, skipping ISR bottom half\n"); + } + else + { + rm_isr_bh(sp, nv); + } + + return IRQ_HANDLED; +} + +static void +nvidia_isr_bh_unlocked( + void * args +) +{ + nv_state_t *nv = (nv_state_t *) args; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nvidia_stack_t *sp; + NV_STATUS status; + + // + // Synchronize kthreads servicing unlocked bottom half as they + // share same pre-allocated stack for alt-stack + // + status = os_acquire_mutex(nvl->isr_bh_unlocked_mutex); + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: Unable to take bottom_half mutex!\n", + __FUNCTION__); + WARN_ON(1); + } + + sp = nvl->sp[NV_DEV_STACK_ISR_BH_UNLOCKED]; + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + nv_printf(NV_DBG_INFO, + "NVRM: GPU is lost, skipping unlocked ISR bottom half\n"); + } + else + { + rm_isr_bh_unlocked(sp, nv); + } + + os_release_mutex(nvl->isr_bh_unlocked_mutex); +} + +static void +nvidia_rc_timer_callback( + struct nv_timer *nv_timer +) +{ + nv_linux_state_t *nvl = container_of(nv_timer, nv_linux_state_t, rc_timer); + nv_state_t *nv = NV_STATE_PTR(nvl); + nvidia_stack_t *sp = nvl->sp[NV_DEV_STACK_TIMER]; + NV_STATUS status; + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + nv_printf(NV_DBG_INFO, + "NVRM: GPU is lost, skipping device timer callbacks\n"); + return; + } + + if (rm_run_rc_callback(sp, nv) == NV_OK) + { + // set another timeout 1 sec in the future: + mod_timer(&nvl->rc_timer.kernel_timer, jiffies + HZ); + } +} + +/* +** nvidia_ctl_open +** +** nv control driver open entry point. Sessions are created here. +*/ +static int +nvidia_ctl_open( + struct inode *inode, + struct file *file +) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + nv_state_t *nv = NV_STATE_PTR(nvl); + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + + nv_printf(NV_DBG_INFO, "NVRM: nvidia_ctl_open\n"); + + down(&nvl->ldata_lock); + + /* save the nv away in file->private_data */ + nvlfp->nvptr = nvl; + + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + nv->flags |= (NV_FLAG_OPEN | NV_FLAG_CONTROL); + } + + NV_ATOMIC_INC(nvl->usage_count); + up(&nvl->ldata_lock); + + return 0; +} + + +/* +** nvidia_ctl_close +*/ +static int +nvidia_ctl_close( + struct inode *inode, + struct file *file +) +{ + nv_alloc_t *at, *next; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file); + nv_state_t *nv = NV_STATE_PTR(nvl); + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + nvidia_stack_t *sp = nvlfp->sp; + unsigned int i; + + nv_printf(NV_DBG_INFO, "NVRM: nvidia_ctl_close\n"); + + down(&nvl->ldata_lock); + if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count)) + { + nv->flags &= ~NV_FLAG_OPEN; + } + up(&nvl->ldata_lock); + + rm_cleanup_file_private(sp, nv, &nvlfp->nvfp); + + if (nvlfp->free_list != NULL) + { + at = nvlfp->free_list; + while (at != NULL) + { + next = at->next; + if (at->pid == os_get_current_process()) + NV_PRINT_AT(NV_DBG_MEMINFO, at); + nv_free_pages(nv, at->num_pages, + at->flags.contig, + at->cache_type, + (void *)at); + at = next; + } + } + + if (nvlfp->num_attached_gpus != 0) + { + size_t i; + + for (i = 0; i < nvlfp->num_attached_gpus; i++) + { + if (nvlfp->attached_gpus[i] != 0) + nvidia_dev_put(nvlfp->attached_gpus[i], sp); + } + + NV_KFREE(nvlfp->attached_gpus, sizeof(NvU32) * nvlfp->num_attached_gpus); + nvlfp->num_attached_gpus = 0; + } + + for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i) + { + nv_kmem_cache_free_stack(nvlfp->fops_sp[i]); + } + + nv_free_file_private(nvlfp); + NV_SET_FILE_PRIVATE(file, NULL); + + nv_kmem_cache_free_stack(sp); + + return 0; +} + + +void NV_API_CALL +nv_set_dma_address_size( + nv_state_t *nv, + NvU32 phys_addr_bits +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU64 start_addr = nv_get_dma_start_address(nv); + NvU64 new_mask = (((NvU64)1) << phys_addr_bits) - 1; + + nvl->dma_dev.addressable_range.limit = start_addr + new_mask; + + /* + * The only scenario in which we definitely should not update the DMA mask + * is on POWER, when using TCE bypass mode (see nv_get_dma_start_address() + * for details), since the meaning of the DMA mask is overloaded in that + * case. + */ + if (!nvl->tce_bypass_enabled) + { + dma_set_mask(&nvl->pci_dev->dev, new_mask); + /* Certain kernels have a bug which causes pci_set_consistent_dma_mask + * to call GPL sme_active symbol, this bug has already been fixed in a + * minor release update but detect the failure scenario here to prevent + * an installation regression */ +#if !NV_IS_EXPORT_SYMBOL_GPL_sme_active + dma_set_coherent_mask(&nvl->pci_dev->dev, new_mask); +#endif + } +} + +static NvUPtr +nv_map_guest_pages(nv_alloc_t *at, + NvU64 address, + NvU32 page_count, + NvU32 page_idx) +{ + struct page **pages; + NvU32 j; + NvUPtr virt_addr; + + NV_KMALLOC(pages, sizeof(struct page *) * page_count); + if (pages == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate vmap() page descriptor table!\n"); + return 0; + } + + for (j = 0; j < page_count; j++) + { + pages[j] = NV_GET_PAGE_STRUCT(at->page_table[page_idx+j]->phys_addr); + } + + virt_addr = nv_vm_map_pages(pages, page_count, + at->cache_type == NV_MEMORY_CACHED, at->flags.unencrypted); + NV_KFREE(pages, sizeof(struct page *) * page_count); + + return virt_addr; +} + +NV_STATUS NV_API_CALL +nv_alias_pages( + nv_state_t *nv, + NvU32 page_cnt, + NvU32 contiguous, + NvU32 cache_type, + NvU64 guest_id, + NvU64 *pte_array, + void **priv_data +) +{ + nv_alloc_t *at; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU32 i=0; + nvidia_pte_t *page_ptr = NULL; + + at = nvos_create_alloc(nvl->dev, page_cnt); + + if (at == NULL) + { + return NV_ERR_NO_MEMORY; + } + + at->cache_type = cache_type; + if (contiguous) + at->flags.contig = NV_TRUE; +#if defined(NVCPU_AARCH64) + if (at->cache_type != NV_MEMORY_CACHED) + at->flags.aliased = NV_TRUE; +#endif + + at->flags.guest = NV_TRUE; + + at->order = get_order(at->num_pages * PAGE_SIZE); + + for (i=0; i < at->num_pages; ++i) + { + page_ptr = at->page_table[i]; + + if (contiguous && i>0) + { + page_ptr->dma_addr = pte_array[0] + (i << PAGE_SHIFT); + } + else + { + page_ptr->dma_addr = pte_array[i]; + } + + page_ptr->phys_addr = page_ptr->dma_addr; + + /* aliased pages will be mapped on demand. */ + page_ptr->virt_addr = 0x0; + } + + at->guest_id = guest_id; + *priv_data = at; + NV_ATOMIC_INC(at->usage_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +/* + * This creates a dummy nv_alloc_t for peer IO mem, so that it can + * be mapped using NvRmMapMemory. + */ +NV_STATUS NV_API_CALL nv_register_peer_io_mem( + nv_state_t *nv, + NvU64 *phys_addr, + NvU64 page_count, + void **priv_data +) +{ + nv_alloc_t *at; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU64 i; + NvU64 addr; + + at = nvos_create_alloc(nvl->dev, page_count); + + if (at == NULL) + return NV_ERR_NO_MEMORY; + + // IO regions should be uncached and contiguous + at->cache_type = NV_MEMORY_UNCACHED; + at->flags.contig = NV_TRUE; +#if defined(NVCPU_AARCH64) + at->flags.aliased = NV_TRUE; +#endif + at->flags.peer_io = NV_TRUE; + + at->order = get_order(at->num_pages * PAGE_SIZE); + + addr = phys_addr[0]; + + for (i = 0; i < page_count; i++) + { + at->page_table[i]->phys_addr = addr; + addr += PAGE_SIZE; + } + + // No struct page array exists for this memory. + at->user_pages = NULL; + + *priv_data = at; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +void NV_API_CALL nv_unregister_peer_io_mem( + nv_state_t *nv, + void *priv_data +) +{ + nv_alloc_t *at = priv_data; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + nvos_free_alloc(at); +} + +/* + * By registering user pages, we create a dummy nv_alloc_t for it, so that the + * rest of the RM can treat it like any other alloc. + * + * This also converts the page array to an array of physical addresses. + */ +NV_STATUS NV_API_CALL nv_register_user_pages( + nv_state_t *nv, + NvU64 page_count, + NvU64 *phys_addr, + void *import_priv, + void **priv_data +) +{ + nv_alloc_t *at; + NvU64 i; + struct page **user_pages; + nv_linux_state_t *nvl; + nvidia_pte_t *page_ptr; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_register_user_pages: 0x%x\n", page_count); + user_pages = *priv_data; + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + at = nvos_create_alloc(nvl->dev, page_count); + + if (at == NULL) + { + return NV_ERR_NO_MEMORY; + } + + /* + * Anonymous memory currently must be write-back cacheable, and we can't + * enforce contiguity. + */ + at->cache_type = NV_MEMORY_UNCACHED; +#if defined(NVCPU_AARCH64) + at->flags.aliased = NV_TRUE; +#endif + + at->flags.user = NV_TRUE; + + at->order = get_order(at->num_pages * PAGE_SIZE); + + for (i = 0; i < page_count; i++) + { + /* + * We only assign the physical address and not the DMA address, since + * this allocation hasn't been DMA-mapped yet. + */ + page_ptr = at->page_table[i]; + page_ptr->phys_addr = page_to_phys(user_pages[i]); + + phys_addr[i] = page_ptr->phys_addr; + } + + /* Save off the user pages array to be restored later */ + at->user_pages = user_pages; + + /* Save off the import private data to be returned later */ + if (import_priv != NULL) + { + at->import_priv = import_priv; + } + + *priv_data = at; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +void NV_API_CALL nv_unregister_user_pages( + nv_state_t *nv, + NvU64 page_count, + void **import_priv, + void **priv_data +) +{ + nv_alloc_t *at = *priv_data; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_unregister_user_pages: 0x%x\n", page_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + WARN_ON(!at->flags.user); + + /* Restore the user pages array for the caller to handle */ + *priv_data = at->user_pages; + + /* Return the import private data for the caller to handle */ + if (import_priv != NULL) + { + *import_priv = at->import_priv; + } + + nvos_free_alloc(at); +} + +/* + * This creates a dummy nv_alloc_t for existing physical allocations, so + * that it can be mapped using NvRmMapMemory and BAR2 code path. + */ +NV_STATUS NV_API_CALL nv_register_phys_pages( + nv_state_t *nv, + NvU64 *phys_addr, + NvU64 page_count, + NvU32 cache_type, + void **priv_data +) +{ + nv_alloc_t *at; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU64 i; + NvU64 addr; + + at = nvos_create_alloc(nvl->dev, page_count); + + if (at == NULL) + return NV_ERR_NO_MEMORY; + /* + * Setting memory flags to cacheable and discontiguous. + */ + at->cache_type = cache_type; + + /* + * Only physical address is available so we don't try to reuse existing + * mappings + */ + at->flags.physical = NV_TRUE; + + at->order = get_order(at->num_pages * PAGE_SIZE); + + for (i = 0, addr = phys_addr[0]; i < page_count; addr = phys_addr[++i]) + { + at->page_table[i]->phys_addr = addr; + } + + at->user_pages = NULL; + *priv_data = at; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_register_sgt( + nv_state_t *nv, + NvU64 *phys_addr, + NvU64 page_count, + NvU32 cache_type, + void **priv_data, + struct sg_table *import_sgt, + void *import_priv +) +{ + nv_alloc_t *at; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + unsigned int i, j = 0; + NvU64 sg_addr, sg_off, sg_len; + struct scatterlist *sg; + + at = nvos_create_alloc(nvl->dev, page_count); + + if (at == NULL) + return NV_ERR_NO_MEMORY; + + /* Populate phys addrs with DMA addrs from SGT */ + for_each_sg(import_sgt->sgl, sg, import_sgt->nents, i) + { + /* + * It is possible for dma_map_sg() to merge scatterlist entries, so + * make sure we account for that here. + */ + for (sg_addr = sg_dma_address(sg), sg_len = sg_dma_len(sg), sg_off = 0; + (sg_off < sg_len) && (j < page_count); + sg_off += PAGE_SIZE, j++) + { + phys_addr[j] = sg_addr + sg_off; + } + } + + /* + * Setting memory flags to cacheable and discontiguous. + */ + at->cache_type = cache_type; + + at->import_sgt = import_sgt; + + /* Save off the import private data to be returned later */ + if (import_priv != NULL) + { + at->import_priv = import_priv; + } + + at->order = get_order(at->num_pages * PAGE_SIZE); + + *priv_data = at; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +void NV_API_CALL nv_unregister_sgt( + nv_state_t *nv, + struct sg_table **import_sgt, + void **import_priv, + void *priv_data +) +{ + nv_alloc_t *at = priv_data; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_unregister_sgt\n"); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + /* Restore the imported SGT for the caller to handle */ + *import_sgt = at->import_sgt; + + /* Return the import private data for the caller to handle */ + if (import_priv != NULL) + { + *import_priv = at->import_priv; + } + + nvos_free_alloc(at); +} + +void NV_API_CALL nv_unregister_phys_pages( + nv_state_t *nv, + void *priv_data +) +{ + nv_alloc_t *at = priv_data; + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + nvos_free_alloc(at); +} + +NV_STATUS NV_API_CALL nv_get_num_phys_pages( + void *pAllocPrivate, + NvU32 *pNumPages +) +{ + nv_alloc_t *at = pAllocPrivate; + + if (!pNumPages) { + return NV_ERR_INVALID_ARGUMENT; + } + + *pNumPages = at->num_pages; + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_get_phys_pages( + void *pAllocPrivate, + void *pPages, + NvU32 *pNumPages +) +{ + nv_alloc_t *at = pAllocPrivate; + struct page **pages = (struct page **)pPages; + NvU32 page_count; + int i; + + if (!pNumPages || !pPages) { + return NV_ERR_INVALID_ARGUMENT; + } + + page_count = NV_MIN(*pNumPages, at->num_pages); + + for (i = 0; i < page_count; i++) { + pages[i] = NV_GET_PAGE_STRUCT(at->page_table[i]->phys_addr); + } + + *pNumPages = page_count; + + return NV_OK; +} + +void* NV_API_CALL nv_alloc_kernel_mapping( + nv_state_t *nv, + void *pAllocPrivate, + NvU64 pageIndex, + NvU32 pageOffset, + NvU64 size, + void **pPrivate +) +{ + nv_alloc_t *at = pAllocPrivate; + NvU32 j, page_count; + NvUPtr virt_addr; + struct page **pages; + NvBool isUserAllocatedMem; + + // + // For User allocated memory (like ErrorNotifier's) which is NOT allocated + // nor owned by RM, the RM driver just stores the physical address + // corresponding to that memory and does not map it until required. + // In that case, in page tables the virt_addr == 0, so first we need to map + // those pages to obtain virtual address. + // + isUserAllocatedMem = at->flags.user && + !at->page_table[pageIndex]->virt_addr && + at->page_table[pageIndex]->phys_addr; + + // + // User memory may NOT have kernel VA. So check this and fallback to else + // case to create one. + // + if (((size + pageOffset) <= PAGE_SIZE) && + !at->flags.guest && !at->flags.aliased && + !isUserAllocatedMem && !at->flags.physical) + { + *pPrivate = NULL; + return (void *)(at->page_table[pageIndex]->virt_addr + pageOffset); + } + else + { + size += pageOffset; + page_count = (size >> PAGE_SHIFT) + ((size & ~NV_PAGE_MASK) ? 1 : 0); + + if (at->flags.guest) + { + virt_addr = nv_map_guest_pages(at, + nv->bars[NV_GPU_BAR_INDEX_REGS].cpu_address, + page_count, pageIndex); + } + else + { + NV_KMALLOC(pages, sizeof(struct page *) * page_count); + if (pages == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate vmap() page descriptor table!\n"); + return NULL; + } + + for (j = 0; j < page_count; j++) + pages[j] = NV_GET_PAGE_STRUCT(at->page_table[pageIndex+j]->phys_addr); + + virt_addr = nv_vm_map_pages(pages, page_count, + at->cache_type == NV_MEMORY_CACHED, at->flags.unencrypted); + NV_KFREE(pages, sizeof(struct page *) * page_count); + } + + if (virt_addr == 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to map pages!\n"); + return NULL; + } + + *pPrivate = (void *)(NvUPtr)page_count; + return (void *)(virt_addr + pageOffset); + } + + return NULL; +} + +NV_STATUS NV_API_CALL nv_free_kernel_mapping( + nv_state_t *nv, + void *pAllocPrivate, + void *address, + void *pPrivate +) +{ + nv_alloc_t *at = pAllocPrivate; + NvUPtr virt_addr; + NvU32 page_count; + + virt_addr = ((NvUPtr)address & NV_PAGE_MASK); + page_count = (NvUPtr)pPrivate; + + if (at->flags.guest) + { + nv_iounmap((void *)virt_addr, (page_count * PAGE_SIZE)); + } + else if (pPrivate != NULL) + { + nv_vm_unmap_pages(virt_addr, page_count); + } + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_alloc_pages( + nv_state_t *nv, + NvU32 page_count, + NvBool contiguous, + NvU32 cache_type, + NvBool zeroed, + NvBool unencrypted, + NvU64 *pte_array, + void **priv_data +) +{ + nv_alloc_t *at; + NV_STATUS status = NV_ERR_NO_MEMORY; + nv_linux_state_t *nvl = NULL; + NvBool will_remap = NV_FALSE; + NvU32 i; + struct device *dev = NULL; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_alloc_pages: %d pages\n", page_count); + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: contig %d cache_type %d\n", + contiguous, cache_type); + + // + // system memory allocation can be associated with a client instead of a gpu + // handle the case where per device state is NULL + // + if(nv) + { + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + will_remap = nv_requires_dma_remap(nv); + dev = nvl->dev; + } + + if (nv_encode_caching(NULL, cache_type, NV_MEMORY_TYPE_SYSTEM)) + return NV_ERR_NOT_SUPPORTED; + + at = nvos_create_alloc(dev, page_count); + if (at == NULL) + return NV_ERR_NO_MEMORY; + + at->cache_type = cache_type; + + if (contiguous) + at->flags.contig = NV_TRUE; + if (zeroed) + at->flags.zeroed = NV_TRUE; +#if defined(NVCPU_AARCH64) + if (at->cache_type != NV_MEMORY_CACHED) + at->flags.aliased = NV_TRUE; +#endif + if (unencrypted) + at->flags.unencrypted = NV_TRUE; + +#if defined(NVCPU_PPC64LE) + /* + * Starting on Power9 systems, DMA addresses for NVLink are no longer the + * same as used over PCIe. There is an address compression scheme required + * for NVLink ONLY which impacts the upper address bits of the DMA address. + * + * This divergence between PCIe and NVLink DMA mappings breaks assumptions + * in the driver where during initialization we allocate system memory + * for the GPU to access over PCIe before NVLink is trained -- and some of + * these mappings persist on the GPU. If these persistent mappings are not + * equivalent they will cause invalid DMA accesses from the GPU once we + * switch to NVLink. + * + * To work around this we limit all system memory allocations from the driver + * during the period before NVLink is enabled to be from NUMA node 0 (CPU 0) + * which has a CPU real address with the upper address bits (above bit 42) + * set to 0. Effectively making the PCIe and NVLink DMA mappings equivalent + * allowing persistent system memory mappings already programmed on the GPU + * to remain valid after NVLink is enabled. + * + * See Bug 1920398 for more details. + */ + if (nv && nvl->npu && !nvl->dma_dev.nvlink) + at->flags.node0 = NV_TRUE; +#endif + + if (at->flags.contig) + status = nv_alloc_contig_pages(nv, at); + else + status = nv_alloc_system_pages(nv, at); + + if (status != NV_OK) + goto failed; + + for (i = 0; i < ((contiguous) ? 1 : page_count); i++) + { + /* + * The contents of the pte_array[] depend on whether or not this device + * requires DMA-remapping. If it does, it should be the phys addresses + * used by the DMA-remapping paths, otherwise it should be the actual + * address that the device should use for DMA (which, confusingly, may + * be different than the CPU physical address, due to a static DMA + * offset). + */ + if ((nv == NULL) || will_remap) + { + pte_array[i] = at->page_table[i]->phys_addr; + } + else + { + pte_array[i] = nv_phys_to_dma(dev, + at->page_table[i]->phys_addr); + } + } + + *priv_data = at; + NV_ATOMIC_INC(at->usage_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; + +failed: + nvos_free_alloc(at); + + return status; +} + +NV_STATUS NV_API_CALL nv_free_pages( + nv_state_t *nv, + NvU32 page_count, + NvBool contiguous, + NvU32 cache_type, + void *priv_data +) +{ + NV_STATUS rmStatus = NV_OK; + nv_alloc_t *at = priv_data; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_free_pages: 0x%x\n", page_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + /* + * If the 'at' usage count doesn't drop to zero here, not all of + * the user mappings have been torn down in time - we can't + * safely free the memory. We report success back to the RM, but + * defer the actual free operation until later. + * + * This is described in greater detail in the comments above the + * nvidia_vma_(open|release)() callbacks in nv-mmap.c. + */ + if (!NV_ATOMIC_DEC_AND_TEST(at->usage_count)) + return NV_OK; + + if (!at->flags.guest) + { + if (at->flags.contig) + nv_free_contig_pages(at); + else + nv_free_system_pages(at); + } + + nvos_free_alloc(at); + + return rmStatus; +} + +NvBool nv_lock_init_locks +( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + nv_linux_state_t *nvl; + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + NV_INIT_MUTEX(&nvl->ldata_lock); + NV_INIT_MUTEX(&nvl->mmap_lock); + + NV_ATOMIC_SET(nvl->usage_count, 0); + + if (!rm_init_event_locks(sp, nv)) + return NV_FALSE; + + return NV_TRUE; +} + +void nv_lock_destroy_locks +( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + rm_destroy_event_locks(sp, nv); +} + +void NV_API_CALL nv_post_event( + nv_event_t *event, + NvHandle handle, + NvU32 index, + NvU32 info32, + NvU16 info16, + NvBool data_valid +) +{ + nv_linux_file_private_t *nvlfp = nv_get_nvlfp_from_nvfp(event->nvfp); + unsigned long eflags; + nvidia_event_t *nvet; + + NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags); + + if (data_valid) + { + NV_KMALLOC_ATOMIC(nvet, sizeof(nvidia_event_t)); + if (nvet == NULL) + { + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + return; + } + + if (nvlfp->event_data_tail != NULL) + nvlfp->event_data_tail->next = nvet; + if (nvlfp->event_data_head == NULL) + nvlfp->event_data_head = nvet; + nvlfp->event_data_tail = nvet; + nvet->next = NULL; + + nvet->event = *event; + nvet->event.hObject = handle; + nvet->event.index = index; + nvet->event.info32 = info32; + nvet->event.info16 = info16; + } + // + // 'event_pending' is interpreted by nvidia_poll() and nv_get_event() to + // mean that an event without data is pending. Therefore, only set it to + // true here if newly posted event is dataless. + // + else + { + nvlfp->dataless_event_pending = NV_TRUE; + } + + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + + wake_up_interruptible(&nvlfp->waitqueue); +} + +NvBool NV_API_CALL nv_is_rm_firmware_active( + nv_state_t *nv +) +{ + if (rm_firmware_active) + { + // "all" here means all GPUs + if (strcmp(rm_firmware_active, "all") == 0) + return NV_TRUE; + } + return NV_FALSE; +} + +const char *nv_firmware_path( + nv_firmware_t fw_type +) +{ + switch (fw_type) + { + case NV_FIRMWARE_GSP: + return NV_FIRMWARE_GSP_FILENAME; + case NV_FIRMWARE_GSP_LOG: + return NV_FIRMWARE_GSP_LOG_FILENAME; + } + return ""; +} + +const void* NV_API_CALL nv_get_firmware( + nv_state_t *nv, + nv_firmware_t fw_type, + const void **fw_buf, + NvU32 *fw_size +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + const struct firmware *fw; + + // path is relative to /lib/firmware + // if this fails it will print an error to dmesg + if (request_firmware(&fw, nv_firmware_path(fw_type), nvl->dev) != 0) + return NULL; + + *fw_size = fw->size; + *fw_buf = fw->data; + + return fw; +} + +void NV_API_CALL nv_put_firmware( + const void *fw_handle +) +{ + release_firmware(fw_handle); +} + +nv_file_private_t* NV_API_CALL nv_get_file_private( + NvS32 fd, + NvBool ctl, + void **os_private +) +{ + struct file *filp = NULL; + nv_linux_file_private_t *nvlfp = NULL; + dev_t rdev = 0; + + filp = fget(fd); + + if (filp == NULL || !NV_FILE_INODE(filp)) + { + goto fail; + } + + rdev = (NV_FILE_INODE(filp))->i_rdev; + + if (MAJOR(rdev) != NV_MAJOR_DEVICE_NUMBER) + { + goto fail; + } + + if (ctl) + { + if (MINOR(rdev) != NV_CONTROL_DEVICE_MINOR) + goto fail; + } + else + { + NvBool found = NV_FALSE; + int i; + + for (i = 0; i <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN; i++) + { + if ((nv_minor_num_table[i] != NULL) && (MINOR(rdev) == i)) + { + found = NV_TRUE; + break; + } + } + + if (!found) + goto fail; + } + + nvlfp = NV_GET_LINUX_FILE_PRIVATE(filp); + + *os_private = filp; + + return &nvlfp->nvfp; + +fail: + + if (filp != NULL) + { + fput(filp); + } + + return NULL; +} + +void NV_API_CALL nv_put_file_private( + void *os_private +) +{ + struct file *filp = os_private; + fput(filp); +} + +int NV_API_CALL nv_get_event( + nv_file_private_t *nvfp, + nv_event_t *event, + NvU32 *pending +) +{ + nv_linux_file_private_t *nvlfp = nv_get_nvlfp_from_nvfp(nvfp); + nvidia_event_t *nvet; + unsigned long eflags; + + NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags); + + nvet = nvlfp->event_data_head; + if (nvet == NULL) + { + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + return NV_ERR_GENERIC; + } + + *event = nvet->event; + + if (nvlfp->event_data_tail == nvet) + nvlfp->event_data_tail = NULL; + nvlfp->event_data_head = nvet->next; + + *pending = (nvlfp->event_data_head != NULL); + + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + + NV_KFREE(nvet, sizeof(nvidia_event_t)); + + return NV_OK; +} + +int NV_API_CALL nv_start_rc_timer( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nv->rc_timer_enabled) + return -1; + + nv_printf(NV_DBG_INFO, "NVRM: initializing rc timer\n"); + + nv_timer_setup(&nvl->rc_timer, nvidia_rc_timer_callback); + + nv->rc_timer_enabled = 1; + + // set the timeout for 1 second in the future: + mod_timer(&nvl->rc_timer.kernel_timer, jiffies + HZ); + + nv_printf(NV_DBG_INFO, "NVRM: rc timer initialized\n"); + + return 0; +} + +int NV_API_CALL nv_stop_rc_timer( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (!nv->rc_timer_enabled) + return -1; + + nv_printf(NV_DBG_INFO, "NVRM: stopping rc timer\n"); + nv->rc_timer_enabled = 0; + del_timer_sync(&nvl->rc_timer.kernel_timer); + nv_printf(NV_DBG_INFO, "NVRM: rc timer stopped\n"); + + return 0; +} + +#define SNAPSHOT_TIMER_FREQ (jiffies + HZ / NV_SNAPSHOT_TIMER_HZ) + +static void snapshot_timer_callback(struct nv_timer *timer) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + nv_state_t *nv = NV_STATE_PTR(nvl); + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&nvl->snapshot_timer_lock, flags); + if (nvl->snapshot_callback != NULL) + { + nvl->snapshot_callback(nv->profiler_context); + mod_timer(&timer->kernel_timer, SNAPSHOT_TIMER_FREQ); + } + NV_SPIN_UNLOCK_IRQRESTORE(&nvl->snapshot_timer_lock, flags); +} + +void NV_API_CALL nv_start_snapshot_timer(void (*snapshot_callback)(void *context)) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + + nvl->snapshot_callback = snapshot_callback; + nv_timer_setup(&nvl->snapshot_timer, snapshot_timer_callback); + mod_timer(&nvl->snapshot_timer.kernel_timer, SNAPSHOT_TIMER_FREQ); +} + +void NV_API_CALL nv_stop_snapshot_timer(void) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + NvBool timer_active; + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&nvl->snapshot_timer_lock, flags); + timer_active = nvl->snapshot_callback != NULL; + nvl->snapshot_callback = NULL; + NV_SPIN_UNLOCK_IRQRESTORE(&nvl->snapshot_timer_lock, flags); + + if (timer_active) + del_timer_sync(&nvl->snapshot_timer.kernel_timer); +} + +void NV_API_CALL nv_flush_snapshot_timer(void) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + nv_state_t *nv = NV_STATE_PTR(nvl); + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&nvl->snapshot_timer_lock, flags); + if (nvl->snapshot_callback != NULL) + nvl->snapshot_callback(nv->profiler_context); + NV_SPIN_UNLOCK_IRQRESTORE(&nvl->snapshot_timer_lock, flags); +} + +static int __init +nvos_count_devices(void) +{ + int count; + + count = nv_pci_count_devices(); + + count += nv_platform_count_devices(); + + + return count; +} + +NvBool nvos_is_chipset_io_coherent(void) +{ + if (nv_chipset_is_io_coherent == NV_TRISTATE_INDETERMINATE) + { + nvidia_stack_t *sp = NULL; + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: cannot allocate stack for platform coherence check callback \n"); + WARN_ON(1); + return NV_FALSE; + } + + nv_chipset_is_io_coherent = rm_is_chipset_io_coherent(sp); + + nv_kmem_cache_free_stack(sp); + } + + return nv_chipset_is_io_coherent; +} + +#if defined(CONFIG_PM) +static NV_STATUS +nv_power_management( + nv_state_t *nv, + nv_pm_action_t pm_action +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int status = NV_OK; + nvidia_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "GPU is lost, skipping PM event\n"); + goto failure; + } + + switch (pm_action) + { + case NV_PM_ACTION_STANDBY: + /* fall through */ + case NV_PM_ACTION_HIBERNATE: + { + status = rm_power_management(sp, nv, pm_action); + + nv_kthread_q_stop(&nvl->bottom_half_q); + + nv_disable_pat_support(); + break; + } + case NV_PM_ACTION_RESUME: + { + nv_enable_pat_support(); + + nv_kthread_q_item_init(&nvl->bottom_half_q_item, + nvidia_isr_bh_unlocked, (void *)nv); + + status = nv_kthread_q_init(&nvl->bottom_half_q, nv_device_name); + if (status != NV_OK) + break; + + status = rm_power_management(sp, nv, pm_action); + break; + } + default: + status = NV_ERR_INVALID_ARGUMENT; + break; + } + +failure: + nv_kmem_cache_free_stack(sp); + + return status; +} + +static NV_STATUS +nv_restore_user_channels( + nv_state_t *nv +) +{ + NV_STATUS status = NV_OK; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nv_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + down(&nvl->ldata_lock); + + if ((nv->flags & NV_FLAG_OPEN) == 0) + { + goto done; + } + + status = rm_restart_user_channels(sp, nv); + WARN_ON(status != NV_OK); + + down(&nvl->mmap_lock); + + nv_set_safe_to_mmap_locked(nv, NV_TRUE); + + up(&nvl->mmap_lock); + + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + +done: + up(&nvl->ldata_lock); + + nv_kmem_cache_free_stack(sp); + + return status; +} + +static NV_STATUS +nv_preempt_user_channels( + nv_state_t *nv +) +{ + NV_STATUS status = NV_OK; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nv_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + down(&nvl->ldata_lock); + + if ((nv->flags & NV_FLAG_OPEN) == 0) + { + goto done; + } + + status = rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + WARN_ON(status != NV_OK); + + down(&nvl->mmap_lock); + + nv_set_safe_to_mmap_locked(nv, NV_FALSE); + nv_revoke_gpu_mappings_locked(nv); + + up(&nvl->mmap_lock); + + status = rm_stop_user_channels(sp, nv); + WARN_ON(status != NV_OK); + +done: + up(&nvl->ldata_lock); + + nv_kmem_cache_free_stack(sp); + + return status; +} + +static NV_STATUS +nvidia_suspend( + struct device *dev, + nv_pm_action_t pm_action, + NvBool is_procfs_suspend +) +{ + NV_STATUS status = NV_OK; + struct pci_dev *pci_dev = NULL; + nv_linux_state_t *nvl; + nv_state_t *nv; + + if (nv_dev_is_pci(dev)) + { + pci_dev = to_pci_dev(dev); + nvl = pci_get_drvdata(pci_dev); + } + else + { + nvl = dev_get_drvdata(dev); + } + nv = NV_STATE_PTR(nvl); + + down(&nvl->ldata_lock); + + if (((nv->flags & NV_FLAG_OPEN) == 0) && + ((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) == 0)) + { + goto done; + } + + if ((nv->flags & NV_FLAG_SUSPENDED) != 0) + { + nvl->suspend_count++; + goto pci_pm; + } + + if (nv->preserve_vidmem_allocations && !is_procfs_suspend) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "PreserveVideoMemoryAllocations module parameter is set. " + "System Power Management attempted without driver procfs suspend interface. " + "Please refer to the 'Configuring Power Management Support' section in the driver README.\n"); + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + nvidia_modeset_suspend(nv->gpu_id); + + status = nv_power_management(nv, pm_action); + + if (status != NV_OK) + { + nvidia_modeset_resume(nv->gpu_id); + goto done; + } + else + { + nv->flags |= NV_FLAG_SUSPENDED; + } + +pci_pm: + /* + * Check if PCI power state should be D0 during system suspend. The PCI PM + * core will change the power state only if the driver has not saved the + * state in it's suspend callback. + */ + if ((nv->d0_state_in_suspend) && (pci_dev != NULL) && + !is_procfs_suspend && (pm_action == NV_PM_ACTION_STANDBY)) + { + pci_save_state(pci_dev); + } + +done: + up(&nvl->ldata_lock); + + return status; +} + +static NV_STATUS +nvidia_resume( + struct device *dev, + nv_pm_action_t pm_action +) +{ + NV_STATUS status = NV_OK; + struct pci_dev *pci_dev; + nv_linux_state_t *nvl; + nv_state_t *nv; + + if (nv_dev_is_pci(dev)) + { + pci_dev = to_pci_dev(dev); + nvl = pci_get_drvdata(pci_dev); + } + else + { + nvl = dev_get_drvdata(dev); + } + nv = NV_STATE_PTR(nvl); + + down(&nvl->ldata_lock); + + if ((nv->flags & NV_FLAG_SUSPENDED) == 0) + { + goto done; + } + + if (nvl->suspend_count != 0) + { + nvl->suspend_count--; + } + else + { + status = nv_power_management(nv, pm_action); + + if (status == NV_OK) + { + nvidia_modeset_resume(nv->gpu_id); + nv->flags &= ~NV_FLAG_SUSPENDED; + } + } + +done: + up(&nvl->ldata_lock); + + return status; +} + +static NV_STATUS +nv_resume_devices( + nv_pm_action_t pm_action, + nv_pm_action_depth_t pm_action_depth +) +{ + nv_linux_state_t *nvl; + NvBool resume_devices = NV_TRUE; + NV_STATUS status; + + if (pm_action_depth == NV_PM_ACTION_DEPTH_MODESET) + { + goto resume_modeset; + } + + if (pm_action_depth == NV_PM_ACTION_DEPTH_UVM) + { + resume_devices = NV_FALSE; + } + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + if (resume_devices) + { + status = nvidia_resume(nvl->dev, pm_action); + WARN_ON(status != NV_OK); + } + } + + UNLOCK_NV_LINUX_DEVICES(); + + status = nv_uvm_resume(); + WARN_ON(status != NV_OK); + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + status = nv_restore_user_channels(NV_STATE_PTR(nvl)); + WARN_ON(status != NV_OK); + } + + UNLOCK_NV_LINUX_DEVICES(); + +resume_modeset: + nvidia_modeset_resume(0); + + return NV_OK; +} + +static NV_STATUS +nv_suspend_devices( + nv_pm_action_t pm_action, + nv_pm_action_depth_t pm_action_depth +) +{ + nv_linux_state_t *nvl; + NvBool resume_devices = NV_FALSE; + NV_STATUS status = NV_OK; + + nvidia_modeset_suspend(0); + + if (pm_action_depth == NV_PM_ACTION_DEPTH_MODESET) + { + return NV_OK; + } + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL && status == NV_OK; nvl = nvl->next) + { + status = nv_preempt_user_channels(NV_STATE_PTR(nvl)); + WARN_ON(status != NV_OK); + } + + UNLOCK_NV_LINUX_DEVICES(); + + if (status == NV_OK) + { + status = nv_uvm_suspend(); + WARN_ON(status != NV_OK); + } + if (status != NV_OK) + { + goto done; + } + + if (pm_action_depth == NV_PM_ACTION_DEPTH_UVM) + { + return NV_OK; + } + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL && status == NV_OK; nvl = nvl->next) + { + status = nvidia_suspend(nvl->dev, pm_action, NV_TRUE); + WARN_ON(status != NV_OK); + } + if (status != NV_OK) + { + resume_devices = NV_TRUE; + } + + UNLOCK_NV_LINUX_DEVICES(); + +done: + if (status != NV_OK) + { + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + if (resume_devices) + { + nvidia_resume(nvl->dev, pm_action); + } + + nv_restore_user_channels(NV_STATE_PTR(nvl)); + } + + UNLOCK_NV_LINUX_DEVICES(); + } + + return status; +} + +NV_STATUS +nv_set_system_power_state( + nv_power_state_t power_state, + nv_pm_action_depth_t pm_action_depth +) +{ + NV_STATUS status; + nv_pm_action_t pm_action; + + switch (power_state) + { + case NV_POWER_STATE_IN_HIBERNATE: + pm_action = NV_PM_ACTION_HIBERNATE; + break; + case NV_POWER_STATE_IN_STANDBY: + pm_action = NV_PM_ACTION_STANDBY; + break; + case NV_POWER_STATE_RUNNING: + pm_action = NV_PM_ACTION_RESUME; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + down(&nv_system_power_state_lock); + + if (nv_system_power_state == power_state) + { + status = NV_OK; + goto done; + } + + if (power_state == NV_POWER_STATE_RUNNING) + { + status = nv_resume_devices(pm_action, nv_system_pm_action_depth); + up_write(&nv_system_pm_lock); + } + else + { + if (nv_system_power_state != NV_POWER_STATE_RUNNING) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + nv_system_pm_action_depth = pm_action_depth; + + down_write(&nv_system_pm_lock); + status = nv_suspend_devices(pm_action, nv_system_pm_action_depth); + if (status != NV_OK) + { + up_write(&nv_system_pm_lock); + goto done; + } + } + + nv_system_power_state = power_state; + +done: + up(&nv_system_power_state_lock); + + return status; +} + +int nv_pmops_suspend( + struct device *dev +) +{ + NV_STATUS status; + + status = nvidia_suspend(dev, NV_PM_ACTION_STANDBY, NV_FALSE); + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_resume( + struct device *dev +) +{ + NV_STATUS status; + + status = nvidia_resume(dev, NV_PM_ACTION_RESUME); + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_freeze( + struct device *dev +) +{ + NV_STATUS status; + + status = nvidia_suspend(dev, NV_PM_ACTION_HIBERNATE, NV_FALSE); + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_thaw( + struct device *dev +) +{ + return 0; +} + +int nv_pmops_restore( + struct device *dev +) +{ + NV_STATUS status; + + status = nvidia_resume(dev, NV_PM_ACTION_RESUME); + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_poweroff( + struct device *dev +) +{ + return 0; +} + +static int +nvidia_transition_dynamic_power( + struct device *dev, + NvBool enter +) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + nv_state_t *nv = NV_STATE_PTR(nvl); + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if ((nv->flags & (NV_FLAG_OPEN | NV_FLAG_PERSISTENT_SW_STATE)) == 0) + { + return 0; + } + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return -ENOMEM; + } + + status = rm_transition_dynamic_power(sp, nv, enter); + + nv_kmem_cache_free_stack(sp); + + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_runtime_suspend( + struct device *dev +) +{ + return nvidia_transition_dynamic_power(dev, NV_TRUE); +} + +int nv_pmops_runtime_resume( + struct device *dev +) +{ + return nvidia_transition_dynamic_power(dev, NV_FALSE); +} +#endif /* defined(CONFIG_PM) */ + +nv_state_t* NV_API_CALL nv_get_adapter_state( + NvU32 domain, + NvU8 bus, + NvU8 slot +) +{ + nv_linux_state_t *nvl; + + LOCK_NV_LINUX_DEVICES(); + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + if (nv->pci_info.domain == domain && nv->pci_info.bus == bus + && nv->pci_info.slot == slot) + { + UNLOCK_NV_LINUX_DEVICES(); + return nv; + } + } + UNLOCK_NV_LINUX_DEVICES(); + + return NULL; +} + +nv_state_t* NV_API_CALL nv_get_ctl_state(void) +{ + return NV_STATE_PTR(&nv_ctl_device); +} + +NV_STATUS NV_API_CALL nv_log_error( + nv_state_t *nv, + NvU32 error_number, + const char *format, + va_list ap +) +{ + NV_STATUS status = NV_OK; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + nv_report_error(nvl->pci_dev, error_number, format, ap); +#if defined(CONFIG_CRAY_XT) + status = nvos_forward_error_to_cray(nvl->pci_dev, error_number, + format, ap); +#endif + + return status; +} + +NvU64 NV_API_CALL nv_get_dma_start_address( + nv_state_t *nv +) +{ +#if defined(NVCPU_PPC64LE) + struct pci_dev *pci_dev; + dma_addr_t dma_addr; + NvU64 saved_dma_mask; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + /* + * If TCE bypass is disabled via a module parameter, then just return + * the default (which is 0). + * + * Otherwise, the DMA start address only needs to be set once, and it + * won't change afterward. Just return the cached value if asked again, + * to avoid the kernel printing redundant messages to the kernel + * log when we call pci_set_dma_mask(). + */ + if ((nv_tce_bypass_mode == NV_TCE_BYPASS_MODE_DISABLE) || + (nvl->tce_bypass_enabled)) + { + return nvl->dma_dev.addressable_range.start; + } + + pci_dev = nvl->pci_dev; + + /* + * Linux on IBM POWER8 offers 2 different DMA set-ups, sometimes + * referred to as "windows". + * + * The "default window" provides a 2GB region of PCI address space + * located below the 32-bit line. The IOMMU is used to provide a + * "rich" mapping--any page in system memory can be mapped at an + * arbitrary address within this window. The mappings are dynamic + * and pass in and out of being as pci_map*()/pci_unmap*() calls + * are made. + * + * Dynamic DMA Windows (sometimes "Huge DDW") provides a linear + * mapping of the system's entire physical address space at some + * fixed offset above the 59-bit line. IOMMU is still used, and + * pci_map*()/pci_unmap*() are still required, but mappings are + * static. They're effectively set up in advance, and any given + * system page will always map to the same PCI bus address. I.e. + * physical 0x00000000xxxxxxxx => PCI 0x08000000xxxxxxxx + * + * This driver does not support the 2G default window because + * of its limited size, and for reasons having to do with UVM. + * + * Linux on POWER8 will only provide the DDW-style full linear + * mapping when the driver claims support for 64-bit DMA addressing + * (a pre-requisite because the PCI addresses used in this case will + * be near the top of the 64-bit range). The linear mapping + * is not available in all system configurations. + * + * Detect whether the linear mapping is present by claiming + * 64-bit support and then mapping physical page 0. For historical + * reasons, Linux on POWER8 will never map a page to PCI address 0x0. + * In the "default window" case page 0 will be mapped to some + * non-zero address below the 32-bit line. In the + * DDW/linear-mapping case, it will be mapped to address 0 plus + * some high-order offset. + * + * If the linear mapping is present and sane then return the offset + * as the starting address for all DMA mappings. + */ + saved_dma_mask = pci_dev->dma_mask; + if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64)) != 0) + { + goto done; + } + + dma_addr = pci_map_single(pci_dev, NULL, 1, DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(pci_dev, dma_addr)) + { + pci_set_dma_mask(pci_dev, saved_dma_mask); + goto done; + } + + pci_unmap_single(pci_dev, dma_addr, 1, DMA_BIDIRECTIONAL); + + /* + * From IBM: "For IODA2, native DMA bypass or KVM TCE-based implementation + * of full 64-bit DMA support will establish a window in address-space + * with the high 14 bits being constant and the bottom up-to-50 bits + * varying with the mapping." + * + * Unfortunately, we don't have any good interfaces or definitions from + * the kernel to get information about the DMA offset assigned by OS. + * However, we have been told that the offset will be defined by the top + * 14 bits of the address, and bits 40-49 will not vary for any DMA + * mappings until 1TB of system memory is surpassed; this limitation is + * essential for us to function properly since our current GPUs only + * support 40 physical address bits. We are in a fragile place where we + * need to tell the OS that we're capable of 64-bit addressing, while + * relying on the assumption that the top 24 bits will not vary in this + * case. + * + * The way we try to compute the window, then, is mask the trial mapping + * against the DMA capabilities of the device. That way, devices with + * greater addressing capabilities will only take the bits it needs to + * define the window. + */ + if ((dma_addr & DMA_BIT_MASK(32)) != 0) + { + /* + * Huge DDW not available - page 0 mapped to non-zero address below + * the 32-bit line. + */ + nv_printf(NV_DBG_WARNINGS, + "NVRM: DMA window limited by platform\n"); + pci_set_dma_mask(pci_dev, saved_dma_mask); + goto done; + } + else if ((dma_addr & saved_dma_mask) != 0) + { + NvU64 memory_size = os_get_num_phys_pages() * PAGE_SIZE; + if ((dma_addr & ~saved_dma_mask) != + ((dma_addr + memory_size) & ~saved_dma_mask)) + { + /* + * The physical window straddles our addressing limit boundary, + * e.g., for an adapter that can address up to 1TB, the window + * crosses the 40-bit limit so that the lower end of the range + * has different bits 63:40 than the higher end of the range. + * We can only handle a single, static value for bits 63:40, so + * we must fall back here. + */ + nv_printf(NV_DBG_WARNINGS, + "NVRM: DMA window limited by memory size\n"); + pci_set_dma_mask(pci_dev, saved_dma_mask); + goto done; + } + } + + nvl->tce_bypass_enabled = NV_TRUE; + nvl->dma_dev.addressable_range.start = dma_addr & ~(saved_dma_mask); + + /* Update the coherent mask to match */ + dma_set_coherent_mask(&pci_dev->dev, pci_dev->dma_mask); + +done: + return nvl->dma_dev.addressable_range.start; +#else + return 0; +#endif +} + +NV_STATUS NV_API_CALL nv_set_primary_vga_status( + nv_state_t *nv +) +{ + /* IORESOURCE_ROM_SHADOW wasn't added until 2.6.10 */ +#if defined(IORESOURCE_ROM_SHADOW) + nv_linux_state_t *nvl; + struct pci_dev *pci_dev; + + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + pci_dev = nvl->pci_dev; + + nv->primary_vga = ((NV_PCI_RESOURCE_FLAGS(pci_dev, PCI_ROM_RESOURCE) & + IORESOURCE_ROM_SHADOW) == IORESOURCE_ROM_SHADOW); + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL nv_pci_trigger_recovery( + nv_state_t *nv +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; +#if defined(NV_PCI_ERROR_RECOVERY) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + /* + * Calling readl() on PPC64LE will allow the kernel to check its state for + * the device and update it accordingly. This needs to be done before + * checking if the PCI channel is offline, so that we don't check stale + * state. + * + * This will also kick off the recovery process for the device. + */ + if (NV_PCI_ERROR_RECOVERY_ENABLED()) + { + if (readl(nv->regs->map) == 0xFFFFFFFF) + { + if (pci_channel_offline(nvl->pci_dev)) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "PCI channel for the device is offline\n"); + status = NV_OK; + } + } + } +#endif + return status; +} + +NvBool NV_API_CALL nv_requires_dma_remap( + nv_state_t *nv +) +{ + NvBool dma_remap = NV_FALSE; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + dma_remap = !nv_dma_maps_swiotlb(nvl->dev); + return dma_remap; +} + +/* + * Intended for use by external kernel modules to list nvidia gpu ids. + */ +NvBool nvidia_get_gpuid_list(NvU32 *gpu_ids, NvU32 *gpu_count) +{ + nv_linux_state_t *nvl; + unsigned int count; + NvBool ret = NV_TRUE; + + LOCK_NV_LINUX_DEVICES(); + + count = 0; + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + count++; + + if (*gpu_count == 0) + { + goto done; + } + else if ((*gpu_count) < count) + { + ret = NV_FALSE; + goto done; + } + + count = 0; + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + gpu_ids[count++] = nv->gpu_id; + } + + +done: + + *gpu_count = count; + + UNLOCK_NV_LINUX_DEVICES(); + + return ret; +} + +/* + * Kernel-level analog to nvidia_open, intended for use by external + * kernel modules. This increments the ref count of the device with + * the given gpu_id and makes sure the device has been initialized. + * + * Clients of this interface are counted by the RM reset path, to ensure a + * GPU is not reset while the GPU is active. + * + * Returns -ENODEV if the given gpu_id does not exist. + */ +int nvidia_dev_get(NvU32 gpu_id, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl; + int rc; + + /* Takes nvl->ldata_lock */ + nvl = find_gpu_id(gpu_id); + if (!nvl) + return -ENODEV; + + rc = nv_open_device(NV_STATE_PTR(nvl), sp); + + if (rc == 0) + WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_TRUE) != NV_OK); + + up(&nvl->ldata_lock); + return rc; +} + +/* + * Kernel-level analog to nvidia_close, intended for use by external + * kernel modules. This decrements the ref count of the device with + * the given gpu_id, potentially tearing it down. + */ +void nvidia_dev_put(NvU32 gpu_id, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl; + + /* Takes nvl->ldata_lock */ + nvl = find_gpu_id(gpu_id); + if (!nvl) + return; + + nv_close_device(NV_STATE_PTR(nvl), sp); + + WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_FALSE) != NV_OK); + + up(&nvl->ldata_lock); +} + +/* + * Like nvidia_dev_get but uses UUID instead of gpu_id. Note that this may + * trigger initialization and teardown of unrelated devices to look up their + * UUIDs. + * + * Clients of this interface are counted by the RM reset path, to ensure a + * GPU is not reset while the GPU is active. + */ +int nvidia_dev_get_uuid(const NvU8 *uuid, nvidia_stack_t *sp) +{ + nv_state_t *nv = NULL; + nv_linux_state_t *nvl = NULL; + const NvU8 *dev_uuid; + int rc = 0; + + /* Takes nvl->ldata_lock */ + nvl = find_uuid_candidate(uuid); + while (nvl) + { + nv = NV_STATE_PTR(nvl); + + /* + * If the device is missing its UUID, this call exists solely so + * rm_get_gpu_uuid_raw will be called and we can inspect the UUID. + */ + rc = nv_open_device(nv, sp); + if (rc != 0) + goto out; + + /* The UUID should always be present following nv_open_device */ + dev_uuid = nv_get_cached_uuid(nv); + WARN_ON(!dev_uuid); + if (dev_uuid && memcmp(dev_uuid, uuid, GPU_UUID_LEN) == 0) + break; + + /* No match, try again. */ + nv_close_device(nv, sp); + up(&nvl->ldata_lock); + nvl = find_uuid_candidate(uuid); + } + + if (nvl) + { + rc = 0; + WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_TRUE) != NV_OK); + } + else + rc = -ENODEV; + +out: + if (nvl) + up(&nvl->ldata_lock); + return rc; +} + +/* + * Like nvidia_dev_put but uses UUID instead of gpu_id. + */ +void nvidia_dev_put_uuid(const NvU8 *uuid, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl; + + /* Callers must already have called nvidia_dev_get_uuid() */ + + /* Takes nvl->ldata_lock */ + nvl = find_uuid(uuid); + if (!nvl) + return; + + nv_close_device(NV_STATE_PTR(nvl), sp); + + WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_FALSE) != NV_OK); + + up(&nvl->ldata_lock); +} + +int nvidia_dev_block_gc6(const NvU8 *uuid, nvidia_stack_t *sp) + +{ + nv_linux_state_t *nvl; + + /* Callers must already have called nvidia_dev_get_uuid() */ + + /* Takes nvl->ldata_lock */ + nvl = find_uuid(uuid); + if (!nvl) + return -ENODEV; + + if (rm_ref_dynamic_power(sp, NV_STATE_PTR(nvl), NV_DYNAMIC_PM_FINE) != NV_OK) + { + up(&nvl->ldata_lock); + return -EINVAL; + } + + up(&nvl->ldata_lock); + + return 0; +} + +int nvidia_dev_unblock_gc6(const NvU8 *uuid, nvidia_stack_t *sp) + +{ + nv_linux_state_t *nvl; + + /* Callers must already have called nvidia_dev_get_uuid() */ + + /* Takes nvl->ldata_lock */ + nvl = find_uuid(uuid); + if (!nvl) + return -ENODEV; + + rm_unref_dynamic_power(sp, NV_STATE_PTR(nvl), NV_DYNAMIC_PM_FINE); + + up(&nvl->ldata_lock); + + return 0; +} + +NV_STATUS NV_API_CALL nv_get_device_memory_config( + nv_state_t *nv, + NvU64 *compr_addr_sys_phys, + NvU64 *addr_guest_phys, + NvU32 *addr_width, + NvS32 *node_id +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + if (!nv_platform_supports_numa(nvl)) + { + return NV_ERR_NOT_SUPPORTED; + } + +#if defined(NVCPU_PPC64LE) + nv_npu_numa_info_t *numa_info; + + numa_info = &nvl->npu->numa_info; + + if (node_id != NULL) + { + *node_id = nvl->numa_info.node_id; + } + + if (compr_addr_sys_phys != NULL) + { + *compr_addr_sys_phys = + numa_info->compr_sys_phys_addr; + } + + if (addr_guest_phys != NULL) + { + *addr_guest_phys = + numa_info->guest_phys_addr; + } + + if (addr_width != NULL) + { + *addr_width = nv_volta_dma_addr_size - nv_volta_addr_space_width; + } + + status = NV_OK; +#endif + + + + + + + + + + + + + + + + + + + + + + + + + + return status; +} + +#if defined(NVCPU_PPC64LE) + +NV_STATUS NV_API_CALL nv_get_nvlink_line_rate( + nv_state_t *nvState, + NvU32 *linerate +) +{ +#if defined(NV_PNV_PCI_GET_NPU_DEV_PRESENT) && defined(NV_OF_GET_PROPERTY_PRESENT) + + nv_linux_state_t *nvl; + struct pci_dev *npuDev; + NvU32 *pSpeedPtr = NULL; + NvU32 speed; + int len; + + if (nvState != NULL) + nvl = NV_GET_NVL_FROM_NV_STATE(nvState); + else + return NV_ERR_INVALID_ARGUMENT; + + if (!nvl->npu) + { + return NV_ERR_NOT_SUPPORTED; + } + + npuDev = nvl->npu->devs[0]; + if (!npuDev->dev.of_node) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: OF Node not found in IBM-NPU device node\n", + __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; + } + + pSpeedPtr = (NvU32 *) of_get_property(npuDev->dev.of_node, "ibm,nvlink-speed", &len); + + if (pSpeedPtr) + { + speed = (NvU32) be32_to_cpup(pSpeedPtr); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } + + if (!speed) + { + return NV_ERR_NOT_SUPPORTED; + } + else + { + *linerate = speed; + } + + return NV_OK; + +#endif + + return NV_ERR_NOT_SUPPORTED; +} + +#endif + +NV_STATUS NV_API_CALL nv_indicate_idle( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + struct file *file = nvl->sysfs_config_file; + loff_t f_pos = 0; + char buf; + + pm_runtime_put_noidle(dev); + +#if defined(NV_SEQ_READ_ITER_PRESENT) + { + struct kernfs_open_file *of = ((struct seq_file *)file->private_data)->private; + struct kernfs_node *kn; + + mutex_lock(&of->mutex); + kn = of->kn; + if (kn != NULL && atomic_inc_unless_negative(&kn->active)) + { + if ((kn->attr.ops != NULL) && (kn->attr.ops->read != NULL)) + { + kn->attr.ops->read(of, &buf, 1, f_pos); + } + atomic_dec(&kn->active); + } + mutex_unlock(&of->mutex); + } +#else +#if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG) + kernel_read(file, &buf, 1, &f_pos); +#else + kernel_read(file, f_pos, &buf, 1); +#endif +#endif + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL nv_indicate_not_idle( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + + pm_runtime_get_noresume(dev); + + nvl->is_forced_shutdown = NV_TRUE; + pci_bus_type.shutdown(dev); + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void NV_API_CALL nv_idle_holdoff( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + + pm_runtime_get_noresume(dev); +#endif +} + +NvBool NV_API_CALL nv_dynamic_power_available( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + return nvl->sysfs_config_file != NULL; +#else + return NV_FALSE; +#endif +} + +/* caller should hold nv_linux_devices_lock using LOCK_NV_LINUX_DEVICES */ +void nv_linux_add_device_locked(nv_linux_state_t *nvl) +{ + if (nv_linux_devices == NULL) { + nv_linux_devices = nvl; + } + else + { + nv_linux_state_t *tnvl; + for (tnvl = nv_linux_devices; tnvl->next != NULL; tnvl = tnvl->next); + tnvl->next = nvl; + } +} + +/* caller should hold nv_linux_devices_lock using LOCK_NV_LINUX_DEVICES */ +void nv_linux_remove_device_locked(nv_linux_state_t *nvl) +{ + if (nvl == nv_linux_devices) { + nv_linux_devices = nvl->next; + } + else + { + nv_linux_state_t *tnvl; + for (tnvl = nv_linux_devices; tnvl->next != nvl; tnvl = tnvl->next); + tnvl->next = nvl->next; + } +} + +void NV_API_CALL nv_control_soc_irqs(nv_state_t *nv, NvBool bEnable) +{ + int count; + + if (bEnable) + { + for (count = 0; count < nv->num_soc_irqs; count++) + { + nv->soc_irq_info[count].bh_pending = NV_FALSE; + nv->current_soc_irq = -1; + enable_irq(nv->soc_irq_info[count].irq_num); + } + } + else + { + for (count = 0; count < nv->num_soc_irqs; count++) + { + disable_irq_nosync(nv->soc_irq_info[count].irq_num); + } + } +} + +NvU32 NV_API_CALL nv_get_dev_minor(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + return nvl->minor_num; +} + +NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap(int fd, int *duped_fd) +{ + + + + + + + + + + return NV_ERR_NOT_SUPPORTED; + +} + +/* + * Wakes up the NVIDIA GPU HDA codec and contoller by reading + * codec proc file. + */ +void NV_API_CALL nv_audio_dynamic_power( + nv_state_t *nv +) +{ +/* + * The runtime power management for nvidia HDA controller can be possible + * after commit 07f4f97d7b4b ("vga_switcheroo: Use device link for HDA + * controller"). This commit has also moved 'PCI_CLASS_MULTIMEDIA_HD_AUDIO' + * macro from to . + * If 'NV_PCI_CLASS_MULTIMEDIA_HD_AUDIO_PRESENT' is not defined, then + * this function will be stub function. + * + * Also, check if runtime PM is enabled in the kernel (with + * 'NV_PM_RUNTIME_AVAILABLE') and stub this function if it is disabled. This + * function uses kernel fields only present when the kconfig has runtime PM + * enabled. + */ +#if defined(NV_PCI_CLASS_MULTIMEDIA_HD_AUDIO_PRESENT) && defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + struct pci_dev *audio_pci_dev, *pci_dev; + struct snd_card *card; + + if (!nv_dev_is_pci(dev)) + return; + + pci_dev = to_pci_dev(dev); + + audio_pci_dev = os_pci_init_handle(NV_PCI_DOMAIN_NUMBER(pci_dev), + NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), + 1, NULL, NULL); + + if (audio_pci_dev == NULL) + return; + + /* + * Check if HDA controller is in pm suspended state. The HDA contoller + * can not be runtime resumed if this API is called during system + * suspend/resume time and HDA controller is in pm suspended state. + */ + if (audio_pci_dev->dev.power.is_suspended) + return; + + card = pci_get_drvdata(audio_pci_dev); + if (card == NULL) + return; + + /* + * Commit be57bfffb7b5 ("ALSA: hda: move hda_codec.h to include/sound") + * in v4.20-rc1 moved "hda_codec.h" header file from the private sound + * folder to include/sound. + */ +#if defined(NV_SOUND_HDA_CODEC_H_PRESENT) + { + struct list_head *p; + struct hda_codec *codec = NULL; + unsigned int cmd, res; + + /* + * Traverse the list of devices which the sound card maintains and + * search for HDA codec controller. + */ + list_for_each_prev(p, &card->devices) + { + struct snd_device *pdev = list_entry(p, struct snd_device, list); + + if (pdev->type == SNDRV_DEV_CODEC) + { + codec = pdev->device_data; + + /* + * NVIDIA HDA codec controller uses linux kernel HDA codec + * driver. Commit 05852448690d ("ALSA: hda - Support indirect + * execution of verbs") added support for overriding exec_verb. + * This codec->core.exec_verb will be codec_exec_verb() for + * NVIDIA HDA codec driver. + */ + if (codec->core.exec_verb == NULL) + { + return; + } + + break; + } + } + + if (codec == NULL) + { + return; + } + + /* If HDA codec controller is already runtime active, then return */ + if (snd_hdac_is_power_on(&codec->core)) + { + return; + } + + /* + * Encode codec verb for getting vendor ID from root node. + * Refer Intel High Definition Audio Specification for more details. + */ + cmd = (codec->addr << 28) | (AC_NODE_ROOT << 20) | + (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; + + /* + * It will internally increment the runtime PM refcount, + * wake-up the audio codec controller and send the HW + * command for getting vendor ID. Once the vendor ID will be + * returned back, then it will decrement the runtime PM refcount + * and runtime suspend audio codec controller again (If refcount is + * zero) once auto suspend counter expires. + */ + codec->core.exec_verb(&codec->core, cmd, 0, &res); + } +#else + { + int codec_addr; + + /* + * The filp_open() call below depends on the current task's fs_struct + * (current->fs), which may already be NULL if this is called during + * process teardown. + */ + if (current->fs == NULL) + return; + + /* If device is runtime active, then return */ + if (audio_pci_dev->dev.power.runtime_status == RPM_ACTIVE) + return; + + for (codec_addr = 0; codec_addr < NV_HDA_MAX_CODECS; codec_addr++) + { + char filename[48]; + NvU8 buf; + int ret; + + ret = snprintf(filename, sizeof(filename), + "/proc/asound/card%d/codec#%d", + card->number, codec_addr); + + if (ret > 0 && ret < sizeof(filename) && + (os_open_and_read_file(filename, &buf, 1) == NV_OK)) + { + break; + } + } + } +#endif +#endif +} + +static int nv_match_dev_state(const void *data, struct file *filp, unsigned fd) +{ + nv_linux_state_t *nvl = NULL; + dev_t rdev = 0; + + if (filp == NULL || + filp->private_data == NULL || + NV_FILE_INODE(filp) == NULL) + return 0; + + rdev = (NV_FILE_INODE(filp))->i_rdev; + if (MAJOR(rdev) != NV_MAJOR_DEVICE_NUMBER) + return 0; + + nvl = NV_GET_NVL_FROM_FILEP(filp); + if (nvl == NULL) + return 0; + + return (data == nvl); +} + +NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *nv, void *os_info) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + return nv_match_dev_state(nvl, os_info, -1); +} + +NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *nv) +{ + struct files_struct *files = current->files; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + +#ifdef NV_ITERATE_FD_PRESENT + return !!iterate_fd(files, 0, nv_match_dev_state, nvl); +#else + struct fdtable *fdtable; + int ret_val = 0; + int fd = 0; + + if (files == NULL) + return 0; + + spin_lock(&files->file_lock); + + for (fdtable = files_fdtable(files); fd < fdtable->max_fds; fd++) + { + struct file *filp; + +#ifdef READ_ONCE + filp = READ_ONCE(fdtable->fd[fd]); +#else + filp = ACCESS_ONCE(fdtable->fd[fd]); + smp_read_barrier_depends(); +#endif + if (filp == NULL) + continue; + + ret_val = nv_match_dev_state(nvl, filp, fd); + if (ret_val) + break; + } + + spin_unlock(&files->file_lock); + + return !!ret_val; +#endif +} + + +NvBool NV_API_CALL nv_platform_supports_s0ix(void) +{ +#if defined(CONFIG_ACPI) + return (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) != 0; +#else + return NV_FALSE; +#endif +} + +NvBool NV_API_CALL nv_s2idle_pm_configured(void) +{ + NvU8 buf[8]; + +#if defined(NV_SEQ_READ_ITER_PRESENT) + struct file *file; + ssize_t num_read; + struct kiocb kiocb; + struct iov_iter iter; + struct kvec iov = { + .iov_base = &buf, + .iov_len = sizeof(buf), + }; + + if (os_open_readonly_file("/sys/power/mem_sleep", (void **)&file) != NV_OK) + { + return NV_FALSE; + } + + init_sync_kiocb(&kiocb, file); + kiocb.ki_pos = 0; + iov_iter_kvec(&iter, READ, &iov, 1, sizeof(buf)); + + num_read = seq_read_iter(&kiocb, &iter); + + os_close_file((void *)file); + + if (num_read != sizeof(buf)) + { + return NV_FALSE; + } +#else + if (os_open_and_read_file("/sys/power/mem_sleep", buf, + sizeof(buf)) != NV_OK) + { + return NV_FALSE; + } +#endif + + return (memcmp(buf, "[s2idle]", 8) == 0); +} + + +/* + * Function query system chassis info, to figure out if the platform is + * Laptop or Notebook. + * This function should be used when querying GPU form factor information is + * not possible via core RM or if querying both system and GPU form factor + * information is necessary. + */ +NvBool NV_API_CALL nv_is_chassis_notebook(void) +{ + const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); + + // + // Return true only for Laptop & Notebook + // As per SMBIOS spec Laptop = 9 and Notebook = 10 + // + return (chassis_type && (!strcmp(chassis_type, "9") || !strcmp(chassis_type, "10"))); +} + +void NV_API_CALL nv_allow_runtime_suspend +( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + + spin_lock_irq(&dev->power.lock); + + if (dev->power.runtime_auto == false) + { + dev->power.runtime_auto = true; + atomic_add_unless(&dev->power.usage_count, -1, 0); + } + + spin_unlock_irq(&dev->power.lock); +#endif +} + +void NV_API_CALL nv_disallow_runtime_suspend +( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + + spin_lock_irq(&dev->power.lock); + + if (dev->power.runtime_auto == true) + { + dev->power.runtime_auto = false; + atomic_inc(&dev->power.usage_count); + } + + spin_unlock_irq(&dev->power.lock); +#endif +} + +NvU32 NV_API_CALL nv_get_os_type(void) +{ + return OS_TYPE_LINUX; +} + +void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size) +{ +#if NVCPU_IS_PPC64LE + return nv_ibmnpu_cache_flush_range(nv, cpu_virtual, size); +#elif NVCPU_IS_AARCH64 + + NvU64 va, cbsize; + NvU64 end_cpu_virtual = cpu_virtual + size; + + nv_printf(NV_DBG_INFO, + "Flushing CPU virtual range [0x%llx, 0x%llx)\n", + cpu_virtual, end_cpu_virtual); + + cbsize = cache_line_size(); + // Align address to line size + cpu_virtual = NV_ALIGN_UP(cpu_virtual, cbsize); + + // Force eviction of any cache lines from the NUMA-onlined region. + for (va = cpu_virtual; va < end_cpu_virtual; va += cbsize) + { + asm volatile("dc civac, %0" : : "r" (va): "memory"); + // Reschedule if necessary to avoid lockup warnings + cond_resched(); + } + asm volatile("dsb sy" : : : "memory"); + +#endif +} + +static struct resource *nv_next_resource(struct resource *p) +{ + if (p->child != NULL) + return p->child; + + while ((p->sibling == NULL) && (p->parent != NULL)) + p = p->parent; + + return p->sibling; +} + +/* + * Function to get the correct PCI Bus memory window which can be mapped + * in the real mode emulator (emu). + * The function gets called during the initialization of the emu before + * remapping it to OS. + */ +void NV_API_CALL nv_get_updated_emu_seg( + NvU32 *start, + NvU32 *end +) +{ + struct resource *p; + + if (*start >= *end) + return; + + for (p = iomem_resource.child; (p != NULL); p = nv_next_resource(p)) + { + /* If we passed the resource we are looking for, stop */ + if (p->start > *end) + { + p = NULL; + break; + } + + /* Skip until we find a range that matches what we look for */ + if (p->end < *start) + continue; + + if ((p->end > *end) && (p->child)) + continue; + + if ((p->flags & IORESOURCE_MEM) != IORESOURCE_MEM) + continue; + + /* Found a match, break */ + break; + } + + if (p != NULL) + { + *start = max((resource_size_t)*start, p->start); + *end = min((resource_size_t)*end, p->end); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_gpu_ops.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_gpu_ops.h new file mode 100644 index 0000000..8af88dc --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_gpu_ops.h @@ -0,0 +1,302 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* + * nv_gpu_ops.h + * + * This file defines the interface between the common RM layer + * and the OS specific platform layers. (Currently supported + * are Linux and KMD) + * + */ + +#ifndef _NV_GPU_OPS_H_ +#define _NV_GPU_OPS_H_ +#include "nvgputypes.h" +#include "nv_uvm_types.h" + +typedef struct gpuSession *gpuSessionHandle; +typedef struct gpuDevice *gpuDeviceHandle; +typedef struct gpuAddressSpace *gpuAddressSpaceHandle; +typedef struct gpuChannel *gpuChannelHandle; +typedef struct gpuObject *gpuObjectHandle; + +typedef struct gpuRetainedChannel_struct gpuRetainedChannel; + +NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session); + +NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session); + +NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session, + const gpuInfo *pGpuInfo, + const NvProcessorUuid *gpuGuid, + struct gpuDevice **device, + NvBool bCreateSmcPartition); + +NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device); + +NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device, + NvU64 vaBase, + NvU64 vaSize, + gpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +NV_STATUS nvGpuOpsGetP2PCaps(gpuDeviceHandle device1, + gpuDeviceHandle device2, + getP2PCapsParams *p2pCaps); + +void nvGpuOpsAddressSpaceDestroy(gpuAddressSpaceHandle vaSpace); + +NV_STATUS nvGpuOpsMemoryAllocFb (gpuAddressSpaceHandle vaSpace, + NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo); + +NV_STATUS nvGpuOpsMemoryAllocSys (gpuAddressSpaceHandle vaSpace, + NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo); + +NV_STATUS nvGpuOpsPmaAllocPages(void *pPma, + NvLength pageCount, + NvU32 pageSize, + gpuPmaAllocationOptions *pPmaAllocOptions, + NvU64 *pPages); + +void nvGpuOpsPmaFreePages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags); + +NV_STATUS nvGpuOpsPmaPinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags); + +NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize); + +NV_STATUS nvGpuOpsChannelAllocate(gpuAddressSpaceHandle vaSpace, + const gpuChannelAllocParams *params, + gpuChannelHandle *channelHandle, + gpuChannelInfo *channelInfo); + +NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace, + NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset); + +void nvGpuOpsChannelDestroy(struct gpuChannel *channel); + +void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace, + NvU64 pointer); + +NV_STATUS nvGpuOpsMemoryCpuMap(gpuAddressSpaceHandle vaSpace, + NvU64 memory, NvLength length, + void **cpuPtr, NvU32 pageSize); + +void nvGpuOpsMemoryCpuUnMap(gpuAddressSpaceHandle vaSpace, + void* cpuPtr); + +NV_STATUS nvGpuOpsQueryCaps(struct gpuDevice *device, + gpuCaps *caps); + +NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device, + gpuCesCaps *caps); + +NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuAddressSpace *dstVaSpace, + NvU64 dstVaAlignment, + NvU64 *dstAddress); + +NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device, + NvHandle hClient, + NvHandle hPhysMemory, + NvHandle *hDupMemory, + gpuMemoryInfo *pGpuMemoryInfo); + +NV_STATUS nvGpuOpsGetGuid(NvHandle hClient, NvHandle hDevice, + NvHandle hSubDevice, NvU8 *gpuGuid, + unsigned guidLength); + +NV_STATUS nvGpuOpsGetClientInfoFromPid(unsigned pid, + const NvU8 *gpuUuid, + NvHandle *hClient, + NvHandle *hDevice, + NvHandle *hSubDevice); + +NV_STATUS nvGpuOpsFreeDupedHandle(struct gpuDevice *device, + NvHandle hPhysHandle); + +NV_STATUS nvGpuOpsGetAttachedGpus(NvU8 *guidList, unsigned *numGpus); + +NV_STATUS nvGpuOpsGetGpuInfo(const NvProcessorUuid *gpuUuid, + const gpuClientInfo *pGpuClientInfo, + gpuInfo *pGpuInfo); + +NV_STATUS nvGpuOpsGetGpuIds(const NvU8 *pUuid, unsigned uuidLength, NvU32 *pDeviceId, + NvU32 *pSubdeviceId); + +NV_STATUS nvGpuOpsOwnPageFaultIntr(struct gpuDevice *device, NvBool bOwnInterrupts); + +NV_STATUS nvGpuOpsServiceDeviceInterruptsRM(struct gpuDevice *device); + +NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel * channel, NvBool *bEccDbeSet); + +NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace * vaSpace, + NvU64 physAddress, unsigned numEntries, + NvBool bVidMemAperture, NvU32 pasid); + +NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace * vaSpace); + +NV_STATUS nvGpuOpsGetGmmuFmt(struct gpuAddressSpace * vaSpace, void ** pFmt); + +NV_STATUS nvGpuOpsInvalidateTlb(struct gpuAddressSpace * vaSpace); + +NV_STATUS nvGpuOpsGetFbInfo(struct gpuDevice *device, gpuFbInfo * fbInfo); + +NV_STATUS nvGpuOpsGetEccInfo(struct gpuDevice *device, gpuEccInfo * eccInfo); + +NV_STATUS nvGpuOpsInitFaultInfo(struct gpuDevice *device, gpuFaultInfo *pFaultInfo); + +NV_STATUS nvGpuOpsDestroyFaultInfo(struct gpuDevice *device, + gpuFaultInfo *pFaultInfo); + +NV_STATUS nvGpuOpsHasPendingNonReplayableFaults(gpuFaultInfo *pFaultInfo, NvBool *hasPendingFaults); + +NV_STATUS nvGpuOpsGetNonReplayableFaults(gpuFaultInfo *pFaultInfo, void *faultBuffer, NvU32 *numFaults); + +NV_STATUS nvGpuOpsDupAddressSpace(struct gpuDevice *device, + NvHandle hUserClient, + NvHandle hUserVASpace, + struct gpuAddressSpace **vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device, + void **pPma, + const UvmPmaStatistics **pPmaPubStats); + +NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo); + +NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device, + gpuAccessCntrInfo *pAccessCntrInfo); + +NV_STATUS nvGpuOpsOwnAccessCntrIntr(struct gpuSession *session, + gpuAccessCntrInfo *pAccessCntrInfo, + NvBool bOwnInterrupts); + +NV_STATUS nvGpuOpsEnableAccessCntr(struct gpuDevice *device, + gpuAccessCntrInfo *pAccessCntrInfo, + gpuAccessCntrConfig *pAccessCntrConfig); + +NV_STATUS nvGpuOpsDisableAccessCntr(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo); + +NV_STATUS nvGpuOpsP2pObjectCreate(struct gpuDevice *device1, + struct gpuDevice *device2, + NvHandle *hP2pObject); + +NV_STATUS nvGpuOpsP2pObjectDestroy(struct gpuSession *session, + NvHandle hP2pObject); + +NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace, + NvHandle hDupedMemory, + NvU64 offset, + NvU64 size, + gpuExternalMappingInfo *pGpuExternalMappingInfo); + +NV_STATUS nvGpuOpsRetainChannel(struct gpuAddressSpace *vaSpace, + NvHandle hClient, + NvHandle hChannel, + gpuRetainedChannel **retainedChannel, + gpuChannelInstanceInfo *channelInstanceInfo); + +void nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel); + +NV_STATUS nvGpuOpsBindChannelResources(gpuRetainedChannel *retainedChannel, + gpuChannelResourceBindParams *channelResourceBindParams); + +void nvGpuOpsStopChannel(gpuRetainedChannel *retainedChannel, NvBool bImmediate); + +NV_STATUS nvGpuOpsGetChannelResourcePtes(struct gpuAddressSpace *vaSpace, + NvP64 resourceDescriptor, + NvU64 offset, + NvU64 size, + gpuExternalMappingInfo *pGpuExternalMappingInfo); + +NV_STATUS nvGpuOpsReportNonReplayableFault(struct gpuDevice *device, + const void *pFaultPacket); + +// Private interface used for windows only + +#if defined(NV_WINDOWS) +NV_STATUS nvGpuOpsGetRmHandleForSession(gpuSessionHandle hSession, NvHandle *hRmClient); + +NV_STATUS nvGpuOpsGetRmHandleForChannel(gpuChannelHandle hChannel, NvHandle *hRmChannel); +#endif // WINDOWS + +// Interface used for SR-IOV heavy + +NV_STATUS nvGpuOpsPagingChannelAllocate(struct gpuDevice *device, + const gpuPagingChannelAllocParams *params, + gpuPagingChannelHandle *channelHandle, + gpuPagingChannelInfo *channelinfo); + +void nvGpuOpsPagingChannelDestroy(UvmGpuPagingChannel *channel); + +NV_STATUS nvGpuOpsPagingChannelsMap(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuDevice *device, + NvU64 *dstAddress); + +void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuDevice *device); + +NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel, + char *methodStream, + NvU32 methodStreamSize); + + +// Interface used for CCSL + +NV_STATUS nvGpuOpsCcslContextInit(UvmCslContext **ctx, + gpuChannelHandle channel); +NV_STATUS nvGpuOpsCcslContextClear(UvmCslContext *ctx); +NV_STATUS nvGpuOpsCcslLogDeviceEncryption(UvmCslContext *ctx); +NV_STATUS nvGpuOpsCcslEncrypt(UvmCslContext *ctx, + NvU32 bufferSize, + NvU8 const *inputBuffer, + NvU8 *outputBuffer, + NvU8 *authTagBuffer); +NV_STATUS nvGpuOpsCcslDecrypt(UvmCslContext *ctx, + NvU32 bufferSize, + NvU8 const *inputBuffer, + NvU8 *outputBuffer, + NvU8 const *authTagBuffer); +NV_STATUS nvGpuOpsCcslSign(UvmCslContext *ctx, + NvU32 bufferSize, + NvU8 const *inputBuffer, + NvU8 *authTagBuffer); + + +#endif /* _NV_GPU_OPS_H_*/ diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_uvm_interface.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_uvm_interface.c new file mode 100644 index 0000000..4363649 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_uvm_interface.c @@ -0,0 +1,1544 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file sets up the communication between the UVM driver and RM. RM will + * call the UVM driver providing to it the set of OPS it supports. UVM will + * then return by filling out the structure with the callbacks it supports. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(NV_UVM_ENABLE) + +#include "nv_uvm_interface.h" +#include "nv_gpu_ops.h" +#include "rm-gpu-ops.h" + +// This is really a struct UvmOpsUvmEvents *. It needs to be an atomic because +// it can be read outside of the g_pNvUvmEventsLock. Use getUvmEvents and +// setUvmEvents to access it. +static atomic_long_t g_pNvUvmEvents; +static struct semaphore g_pNvUvmEventsLock; + +static struct UvmOpsUvmEvents *getUvmEvents(void) +{ + return (struct UvmOpsUvmEvents *)atomic_long_read(&g_pNvUvmEvents); +} + +static void setUvmEvents(struct UvmOpsUvmEvents *newEvents) +{ + atomic_long_set(&g_pNvUvmEvents, (long)newEvents); +} + +static nvidia_stack_t *g_sp; +static struct semaphore g_spLock; + +// Use these to test g_sp usage. When DEBUG_GLOBAL_STACK, one out of every +// DEBUG_GLOBAL_STACK_THRESHOLD calls to nvUvmGetSafeStack will use g_sp. +#define DEBUG_GLOBAL_STACK 0 +#define DEBUG_GLOBAL_STACK_THRESHOLD 2 + +static atomic_t g_debugGlobalStackCount = ATOMIC_INIT(0); + +// Called at module load, not by an external client +int nv_uvm_init(void) +{ + int rc = nv_kmem_cache_alloc_stack(&g_sp); + if (rc != 0) + return rc; + + NV_INIT_MUTEX(&g_spLock); + NV_INIT_MUTEX(&g_pNvUvmEventsLock); + return 0; +} + +void nv_uvm_exit(void) +{ + // If this fires, the dependent driver never unregistered its callbacks with + // us before going away, leaving us potentially making callbacks to garbage + // memory. + WARN_ON(getUvmEvents() != NULL); + + nv_kmem_cache_free_stack(g_sp); +} + + +// Testing code to force use of the global stack every now and then +static NvBool forceGlobalStack(void) +{ + // Make sure that we do not try to allocate memory in interrupt or atomic + // context + if (DEBUG_GLOBAL_STACK || !NV_MAY_SLEEP()) + { + if ((atomic_inc_return(&g_debugGlobalStackCount) % + DEBUG_GLOBAL_STACK_THRESHOLD) == 0) + return NV_TRUE; + } + return NV_FALSE; +} + +// Guaranteed to always return a valid stack. It first attempts to allocate one +// from the pool. If that fails, it falls back to the global pre-allocated +// stack. This fallback will serialize. +// +// This is required so paths that free resources do not themselves require +// allocation of resources. +static nvidia_stack_t *nvUvmGetSafeStack(void) +{ + nvidia_stack_t *sp; + if (forceGlobalStack() || nv_kmem_cache_alloc_stack(&sp) != 0) + { + sp = g_sp; + down(&g_spLock); + } + return sp; +} + +static void nvUvmFreeSafeStack(nvidia_stack_t *sp) +{ + if (sp == g_sp) + up(&g_spLock); + else + nv_kmem_cache_free_stack(sp); +} + +NV_STATUS nvUvmInterfaceRegisterGpu(const NvProcessorUuid *gpuUuid, UvmGpuPlatformInfo *gpuInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + int rc; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + return NV_ERR_NO_MEMORY; + + rc = nvidia_dev_get_uuid(gpuUuid->uuid, sp); + if (rc == 0) + { + rc = nvidia_dev_get_pci_info(gpuUuid->uuid, + &gpuInfo->pci_dev, + &gpuInfo->dma_addressable_start, + &gpuInfo->dma_addressable_limit); + + // Block GPU from entering GC6 while used by UVM. + if (rc == 0) + rc = nvidia_dev_block_gc6(gpuUuid->uuid, sp); + + // Avoid leaking reference on GPU if we failed. + if (rc != 0) + nvidia_dev_put_uuid(gpuUuid->uuid, sp); + } + + switch (rc) + { + case 0: + status = NV_OK; + break; + case -ENOMEM: + status = NV_ERR_NO_MEMORY; + break; + case -ENODEV: + status = NV_ERR_GPU_UUID_NOT_FOUND; + break; + default: + status = NV_ERR_GENERIC; + break; + } + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceRegisterGpu); + +void nvUvmInterfaceUnregisterGpu(const NvProcessorUuid *gpuUuid) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + nvidia_dev_unblock_gc6(gpuUuid->uuid, sp); + nvidia_dev_put_uuid(gpuUuid->uuid, sp); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceUnregisterGpu); + +NV_STATUS nvUvmInterfaceSessionCreate(uvmGpuSessionHandle *session, + UvmPlatformInfo *platformInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + memset(platformInfo, 0, sizeof(*platformInfo)); + platformInfo->atsSupported = nv_ats_supported; + + platformInfo->sevEnabled = os_sev_enabled; + + status = rm_gpu_ops_create_session(sp, (gpuSessionHandle *)session); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceSessionCreate); + +NV_STATUS nvUvmInterfaceSessionDestroy(uvmGpuSessionHandle session) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_destroy_session(sp, (gpuSessionHandle)session); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceSessionDestroy); + +NV_STATUS nvUvmInterfaceDeviceCreate(uvmGpuSessionHandle session, + const UvmGpuInfo *pGpuInfo, + const NvProcessorUuid *gpuUuid, + uvmGpuDeviceHandle *device, + NvBool bCreateSmcPartition) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_device_create(sp, + (gpuSessionHandle)session, + (const gpuInfo *)pGpuInfo, + gpuUuid, + (gpuDeviceHandle *)device, + bCreateSmcPartition); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDeviceCreate); + +void nvUvmInterfaceDeviceDestroy(uvmGpuDeviceHandle device) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_device_destroy(sp, (gpuDeviceHandle)device); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceDeviceDestroy); + +NV_STATUS nvUvmInterfaceDupAddressSpace(uvmGpuDeviceHandle device, + NvHandle hUserClient, + NvHandle hUserVASpace, + uvmGpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_dup_address_space(sp, + (gpuDeviceHandle)device, + hUserClient, + hUserVASpace, + (gpuAddressSpaceHandle *)vaSpace, + vaSpaceInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDupAddressSpace); + +NV_STATUS nvUvmInterfaceAddressSpaceCreate(uvmGpuDeviceHandle device, + unsigned long long vaBase, + unsigned long long vaSize, + uvmGpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_address_space_create(sp, + (gpuDeviceHandle)device, + vaBase, + vaSize, + (gpuAddressSpaceHandle *)vaSpace, + vaSpaceInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceAddressSpaceCreate); + +void nvUvmInterfaceAddressSpaceDestroy(uvmGpuAddressSpaceHandle vaSpace) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_address_space_destroy( + sp, (gpuAddressSpaceHandle)vaSpace); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceAddressSpaceDestroy); + +NV_STATUS nvUvmInterfaceMemoryAllocFB(uvmGpuAddressSpaceHandle vaSpace, + NvLength length, UvmGpuPointer * gpuPointer, + UvmGpuAllocInfo * allocInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_memory_alloc_fb( + sp, (gpuAddressSpaceHandle)vaSpace, + length, (NvU64 *) gpuPointer, + allocInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceMemoryAllocFB); + +NV_STATUS nvUvmInterfaceMemoryAllocSys(uvmGpuAddressSpaceHandle vaSpace, + NvLength length, UvmGpuPointer * gpuPointer, + UvmGpuAllocInfo * allocInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_memory_alloc_sys( + sp, (gpuAddressSpaceHandle)vaSpace, + length, (NvU64 *) gpuPointer, + allocInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} + +EXPORT_SYMBOL(nvUvmInterfaceMemoryAllocSys); + +NV_STATUS nvUvmInterfaceGetP2PCaps(uvmGpuDeviceHandle device1, + uvmGpuDeviceHandle device2, + UvmGpuP2PCapsParams * p2pCapsParams) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_p2p_caps(sp, + (gpuDeviceHandle)device1, + (gpuDeviceHandle)device2, + p2pCapsParams); + nv_kmem_cache_free_stack(sp); + return status; +} + +EXPORT_SYMBOL(nvUvmInterfaceGetP2PCaps); + +NV_STATUS nvUvmInterfaceGetPmaObject(uvmGpuDeviceHandle device, + void **pPma, + const UvmPmaStatistics **pPmaPubStats) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_pma_object(sp, (gpuDeviceHandle)device, pPma, (const nvgpuPmaStatistics_t *)pPmaPubStats); + + nv_kmem_cache_free_stack(sp); + return status; +} + +EXPORT_SYMBOL(nvUvmInterfaceGetPmaObject); + +NV_STATUS nvUvmInterfacePmaRegisterEvictionCallbacks(void *pPma, + uvmPmaEvictPagesCallback evictPages, + uvmPmaEvictRangeCallback evictRange, + void *callbackData) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_pma_register_callbacks(sp, pPma, evictPages, evictRange, callbackData); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePmaRegisterEvictionCallbacks); + +void nvUvmInterfacePmaUnregisterEvictionCallbacks(void *pPma) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_pma_unregister_callbacks(sp, pPma); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfacePmaUnregisterEvictionCallbacks); + +NV_STATUS nvUvmInterfacePmaAllocPages(void *pPma, + NvLength pageCount, + NvU32 pageSize, + UvmPmaAllocationOptions *pPmaAllocOptions, + NvU64 *pPages) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_pma_alloc_pages( + sp, pPma, + pageCount, + pageSize, + (nvgpuPmaAllocationOptions_t)pPmaAllocOptions, + pPages); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePmaAllocPages); + +NV_STATUS nvUvmInterfacePmaPinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_pma_pin_pages(sp, pPma, pPages, pageCount, pageSize, flags); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePmaPinPages); + +NV_STATUS nvUvmInterfacePmaUnpinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_pma_unpin_pages(sp, pPma, pPages, pageCount, pageSize); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePmaUnpinPages); + +void nvUvmInterfaceMemoryFree(uvmGpuAddressSpaceHandle vaSpace, + UvmGpuPointer gpuPointer) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_memory_free( + sp, (gpuAddressSpaceHandle)vaSpace, + (NvU64) gpuPointer); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceMemoryFree); + +void nvUvmInterfacePmaFreePages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_pma_free_pages(sp, pPma, pPages, pageCount, pageSize, flags); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfacePmaFreePages); + +NV_STATUS nvUvmInterfaceMemoryCpuMap(uvmGpuAddressSpaceHandle vaSpace, + UvmGpuPointer gpuPointer, NvLength length, void **cpuPtr, + NvU32 pageSize) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_memory_cpu_map( + sp, (gpuAddressSpaceHandle)vaSpace, + (NvU64) gpuPointer, length, cpuPtr, pageSize); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceMemoryCpuMap); + +void nvUvmInterfaceMemoryCpuUnMap(uvmGpuAddressSpaceHandle vaSpace, + void *cpuPtr) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + rm_gpu_ops_memory_cpu_ummap(sp, (gpuAddressSpaceHandle)vaSpace, cpuPtr); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceMemoryCpuUnMap); + +NV_STATUS nvUvmInterfaceChannelAllocate(uvmGpuAddressSpaceHandle vaSpace, + const UvmGpuChannelAllocParams *allocParams, + uvmGpuChannelHandle *channel, + UvmGpuChannelInfo *channelInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_channel_allocate(sp, + (gpuAddressSpaceHandle)vaSpace, + allocParams, + (gpuChannelHandle *)channel, + channelInfo); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceChannelAllocate); + +void nvUvmInterfaceChannelDestroy(uvmGpuChannelHandle channel) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + rm_gpu_ops_channel_destroy(sp, (gpuChannelHandle)channel); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceChannelDestroy); + +NV_STATUS nvUvmInterfaceQueryCaps(uvmGpuDeviceHandle device, + UvmGpuCaps * caps) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_query_caps(sp, (gpuDeviceHandle)device, caps); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceQueryCaps); + +NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device, + UvmGpuCopyEnginesCaps *caps) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_query_ces_caps(sp, (gpuDeviceHandle)device, caps); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceQueryCopyEnginesCaps); + +NV_STATUS nvUvmInterfaceGetGpuInfo(const NvProcessorUuid *gpuUuid, + const UvmGpuClientInfo *pGpuClientInfo, + UvmGpuInfo *pGpuInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_gpu_info(sp, gpuUuid, pGpuClientInfo, pGpuInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetGpuInfo); + +NV_STATUS nvUvmInterfaceServiceDeviceInterruptsRM(uvmGpuDeviceHandle device) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_service_device_interrupts_rm(sp, + (gpuDeviceHandle)device); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceServiceDeviceInterruptsRM); + +NV_STATUS nvUvmInterfaceSetPageDirectory(uvmGpuAddressSpaceHandle vaSpace, + NvU64 physAddress, unsigned numEntries, + NvBool bVidMemAperture, NvU32 pasid) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_set_page_directory(sp, (gpuAddressSpaceHandle)vaSpace, + physAddress, numEntries, bVidMemAperture, pasid); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceSetPageDirectory); + +NV_STATUS nvUvmInterfaceUnsetPageDirectory(uvmGpuAddressSpaceHandle vaSpace) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = + rm_gpu_ops_unset_page_directory(sp, (gpuAddressSpaceHandle)vaSpace); + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceUnsetPageDirectory); + +NV_STATUS nvUvmInterfaceDupAllocation(uvmGpuAddressSpaceHandle srcVaSpace, + NvU64 srcAddress, + uvmGpuAddressSpaceHandle dstVaSpace, + NvU64 dstVaAlignment, + NvU64 *dstAddress) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_dup_allocation(sp, + (gpuAddressSpaceHandle)srcVaSpace, + srcAddress, + (gpuAddressSpaceHandle)dstVaSpace, + dstVaAlignment, + dstAddress); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDupAllocation); + +NV_STATUS nvUvmInterfaceDupMemory(uvmGpuDeviceHandle device, + NvHandle hClient, + NvHandle hPhysMemory, + NvHandle *hDupMemory, + UvmGpuMemoryInfo *pGpuMemoryInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_dup_memory(sp, + (gpuDeviceHandle)device, + hClient, + hPhysMemory, + hDupMemory, + pGpuMemoryInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDupMemory); + + +NV_STATUS nvUvmInterfaceFreeDupedHandle(uvmGpuDeviceHandle device, + NvHandle hPhysHandle) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_free_duped_handle(sp, + (gpuDeviceHandle)device, + hPhysHandle); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceFreeDupedHandle); + +NV_STATUS nvUvmInterfaceGetFbInfo(uvmGpuDeviceHandle device, + UvmGpuFbInfo * fbInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_fb_info(sp, (gpuDeviceHandle)device, fbInfo); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetFbInfo); + +NV_STATUS nvUvmInterfaceGetEccInfo(uvmGpuDeviceHandle device, + UvmGpuEccInfo * eccInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_ecc_info(sp, (gpuDeviceHandle)device, eccInfo); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetEccInfo); + +NV_STATUS nvUvmInterfaceOwnPageFaultIntr(uvmGpuDeviceHandle device, NvBool bOwnInterrupts) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_own_page_fault_intr(sp, (gpuDeviceHandle)device, bOwnInterrupts); + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceOwnPageFaultIntr); + + +NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device, + UvmGpuFaultInfo *pFaultInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_init_fault_info(sp, + (gpuDeviceHandle)device, + pFaultInfo); + + // Preallocate a stack for functions called from ISR top half + pFaultInfo->nonReplayable.isr_sp = NULL; + pFaultInfo->nonReplayable.isr_bh_sp = NULL; + if (status == NV_OK) + { + // NOTE: nv_kmem_cache_alloc_stack does not allocate a stack on PPC. + // Therefore, the pointer can be NULL on success. Always use the + // returned error code to determine if the operation was successful. + int err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_sp); + if (!err) + { + err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_bh_sp); + if (err) + { + nv_kmem_cache_free_stack(pFaultInfo->nonReplayable.isr_sp); + pFaultInfo->nonReplayable.isr_sp = NULL; + } + } + + if (err) + { + rm_gpu_ops_destroy_fault_info(sp, + (gpuDeviceHandle)device, + pFaultInfo); + + status = NV_ERR_NO_MEMORY; + } + } + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceInitFaultInfo); + +NV_STATUS nvUvmInterfaceInitAccessCntrInfo(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_init_access_cntr_info(sp, + (gpuDeviceHandle)device, + pAccessCntrInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceInitAccessCntrInfo); + +NV_STATUS nvUvmInterfaceEnableAccessCntr(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo, + UvmGpuAccessCntrConfig *pAccessCntrConfig) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_enable_access_cntr (sp, + (gpuDeviceHandle)device, + pAccessCntrInfo, + pAccessCntrConfig); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceEnableAccessCntr); + +NV_STATUS nvUvmInterfaceDestroyFaultInfo(uvmGpuDeviceHandle device, + UvmGpuFaultInfo *pFaultInfo) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + // Free the preallocated stack for functions called from ISR + if (pFaultInfo->nonReplayable.isr_sp != NULL) + { + nv_kmem_cache_free_stack((nvidia_stack_t *)pFaultInfo->nonReplayable.isr_sp); + pFaultInfo->nonReplayable.isr_sp = NULL; + } + + if (pFaultInfo->nonReplayable.isr_bh_sp != NULL) + { + nv_kmem_cache_free_stack((nvidia_stack_t *)pFaultInfo->nonReplayable.isr_bh_sp); + pFaultInfo->nonReplayable.isr_bh_sp = NULL; + } + + status = rm_gpu_ops_destroy_fault_info(sp, + (gpuDeviceHandle)device, + pFaultInfo); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDestroyFaultInfo); + +NV_STATUS nvUvmInterfaceHasPendingNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo, + NvBool *hasPendingFaults) +{ + return rm_gpu_ops_has_pending_non_replayable_faults(pFaultInfo->nonReplayable.isr_sp, + pFaultInfo, + hasPendingFaults); +} +EXPORT_SYMBOL(nvUvmInterfaceHasPendingNonReplayableFaults); + +NV_STATUS nvUvmInterfaceGetNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo, + void *pFaultBuffer, + NvU32 *numFaults) +{ + return rm_gpu_ops_get_non_replayable_faults(pFaultInfo->nonReplayable.isr_bh_sp, + pFaultInfo, + pFaultBuffer, + numFaults); +} +EXPORT_SYMBOL(nvUvmInterfaceGetNonReplayableFaults); + +NV_STATUS nvUvmInterfaceDestroyAccessCntrInfo(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_destroy_access_cntr_info(sp, + (gpuDeviceHandle)device, + pAccessCntrInfo); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDestroyAccessCntrInfo); + +NV_STATUS nvUvmInterfaceDisableAccessCntr(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_disable_access_cntr(sp, + (gpuDeviceHandle)device, + pAccessCntrInfo); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDisableAccessCntr); + +// this function is called by the UVM driver to register the ops +NV_STATUS nvUvmInterfaceRegisterUvmCallbacks(struct UvmOpsUvmEvents *importedUvmOps) +{ + NV_STATUS status = NV_OK; + + if (!importedUvmOps) + { + return NV_ERR_INVALID_ARGUMENT; + } + + down(&g_pNvUvmEventsLock); + if (getUvmEvents() != NULL) + { + status = NV_ERR_IN_USE; + } + else + { + // Be careful: as soon as the pointer is assigned, top half ISRs can + // start reading it to make callbacks, even before we drop the lock. + setUvmEvents(importedUvmOps); + } + up(&g_pNvUvmEventsLock); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceRegisterUvmCallbacks); + +static void flush_top_half(void *info) +{ + // Prior top halves on this core must have completed for this callback to + // run at all, so we're done. + return; +} + +void nvUvmInterfaceDeRegisterUvmOps(void) +{ + // Taking the lock forces us to wait for non-interrupt callbacks to finish + // up. + down(&g_pNvUvmEventsLock); + setUvmEvents(NULL); + up(&g_pNvUvmEventsLock); + + // We cleared the pointer so nv_uvm_event_interrupt can't invoke any new + // top half callbacks, but prior ones could still be executing on other + // cores. We can wait for them to finish by waiting for a context switch to + // happen on every core. + // + // This is slow, but since nvUvmInterfaceDeRegisterUvmOps is very rare + // (module unload) it beats having the top half synchronize with a spin lock + // every time. + // + // Note that since we dropped the lock, another set of callbacks could have + // already been registered. That's ok, since we just need to wait for old + // ones to finish. + on_each_cpu(flush_top_half, NULL, 1); +} +EXPORT_SYMBOL(nvUvmInterfaceDeRegisterUvmOps); + +NV_STATUS nv_uvm_suspend(void) +{ + NV_STATUS status = NV_OK; + struct UvmOpsUvmEvents *events; + + // Synchronize callbacks with unregistration + down(&g_pNvUvmEventsLock); + + // It's not strictly necessary to use a cached local copy of the events + // pointer here since it can't change under the lock, but we'll do it for + // consistency. + events = getUvmEvents(); + if (events && events->suspend) + { + status = events->suspend(); + } + + up(&g_pNvUvmEventsLock); + + return status; +} + +NV_STATUS nv_uvm_resume(void) +{ + NV_STATUS status = NV_OK; + struct UvmOpsUvmEvents *events; + + // Synchronize callbacks with unregistration + down(&g_pNvUvmEventsLock); + + // It's not strictly necessary to use a cached local copy of the events + // pointer here since it can't change under the lock, but we'll do it for + // consistency. + events = getUvmEvents(); + if (events && events->resume) + { + status = events->resume(); + } + + up(&g_pNvUvmEventsLock); + + return status; +} + +void nv_uvm_notify_start_device(const NvU8 *pUuid) +{ + NvProcessorUuid uvmUuid; + struct UvmOpsUvmEvents *events; + + memcpy(uvmUuid.uuid, pUuid, UVM_UUID_LEN); + + // Synchronize callbacks with unregistration + down(&g_pNvUvmEventsLock); + + // It's not strictly necessary to use a cached local copy of the events + // pointer here since it can't change under the lock, but we'll do it for + // consistency. + events = getUvmEvents(); + if(events && events->startDevice) + { + events->startDevice(&uvmUuid); + } + up(&g_pNvUvmEventsLock); +} + +void nv_uvm_notify_stop_device(const NvU8 *pUuid) +{ + NvProcessorUuid uvmUuid; + struct UvmOpsUvmEvents *events; + + memcpy(uvmUuid.uuid, pUuid, UVM_UUID_LEN); + + // Synchronize callbacks with unregistration + down(&g_pNvUvmEventsLock); + + // It's not strictly necessary to use a cached local copy of the events + // pointer here since it can't change under the lock, but we'll do it for + // consistency. + events = getUvmEvents(); + if(events && events->stopDevice) + { + events->stopDevice(&uvmUuid); + } + up(&g_pNvUvmEventsLock); +} + +NV_STATUS nv_uvm_event_interrupt(const NvU8 *pUuid) +{ + // + // This is called from interrupt context, so we can't take + // g_pNvUvmEventsLock to prevent the callbacks from being unregistered. Even + // if we could take the lock, we don't want to slow down the ISR more than + // absolutely necessary. + // + // Instead, we allow this function to be called concurrently with + // nvUvmInterfaceDeRegisterUvmOps. That function will clear the events + // pointer, then wait for all top halves to finish out. This means the + // pointer may change out from under us, but the callbacks are still safe to + // invoke while we're in this function. + // + // This requires that we read the pointer exactly once here so neither we + // nor the compiler make assumptions about the pointer remaining valid while + // in this function. + // + struct UvmOpsUvmEvents *events = getUvmEvents(); + + if (events && events->isrTopHalf) + return events->isrTopHalf((const NvProcessorUuid *)pUuid); + + // + // NV_OK means that the interrupt was for the UVM driver, so use + // NV_ERR_NO_INTR_PENDING to tell the caller that we didn't do anything. + // + return NV_ERR_NO_INTR_PENDING; +} + +NV_STATUS nvUvmInterfaceP2pObjectCreate(uvmGpuDeviceHandle device1, + uvmGpuDeviceHandle device2, + NvHandle *hP2pObject) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_p2p_object_create(sp, + (gpuDeviceHandle)device1, + (gpuDeviceHandle)device2, + hP2pObject); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceP2pObjectCreate); + +void nvUvmInterfaceP2pObjectDestroy(uvmGpuSessionHandle session, + NvHandle hP2pObject) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_p2p_object_destroy(sp, (gpuSessionHandle)session, hP2pObject); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceP2pObjectDestroy); + +NV_STATUS nvUvmInterfaceGetExternalAllocPtes(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hDupedMemory, + NvU64 offset, + NvU64 size, + UvmGpuExternalMappingInfo *gpuExternalMappingInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_external_alloc_ptes(sp, + (gpuAddressSpaceHandle)vaSpace, + hDupedMemory, + offset, + size, + gpuExternalMappingInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetExternalAllocPtes); + +NV_STATUS nvUvmInterfaceRetainChannel(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hClient, + NvHandle hChannel, + void **retainedChannel, + UvmGpuChannelInstanceInfo *channelInstanceInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_retain_channel(sp, + (gpuAddressSpaceHandle)vaSpace, + hClient, + hChannel, + retainedChannel, + channelInstanceInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceRetainChannel); + +NV_STATUS nvUvmInterfaceBindChannelResources(void *retainedChannel, + UvmGpuChannelResourceBindParams *channelResourceBindParams) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_bind_channel_resources(sp, + retainedChannel, + channelResourceBindParams); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceBindChannelResources); + +void nvUvmInterfaceReleaseChannel(void *retainedChannel) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_release_channel(sp, retainedChannel); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceReleaseChannel); + +void nvUvmInterfaceStopChannel(void *retainedChannel, NvBool bImmediate) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_stop_channel(sp, retainedChannel, bImmediate); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceStopChannel); + +NV_STATUS nvUvmInterfaceGetChannelResourcePtes(uvmGpuAddressSpaceHandle vaSpace, + NvP64 resourceDescriptor, + NvU64 offset, + NvU64 size, + UvmGpuExternalMappingInfo *externalMappingInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_channel_resource_ptes(sp, + (gpuAddressSpaceHandle)vaSpace, + resourceDescriptor, + offset, + size, + externalMappingInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetChannelResourcePtes); + +NV_STATUS nvUvmInterfaceReportNonReplayableFault(uvmGpuDeviceHandle device, + const void *pFaultPacket) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_report_non_replayable_fault(sp, (gpuDeviceHandle)device, pFaultPacket); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceReportNonReplayableFault); + +NV_STATUS nvUvmInterfacePagingChannelAllocate(uvmGpuDeviceHandle device, + const UvmGpuPagingChannelAllocParams *allocParams, + UvmGpuPagingChannelHandle *channel, + UvmGpuPagingChannelInfo *channelInfo) +{ + nvidia_stack_t *sp = NULL; + nvidia_stack_t *pushStreamSp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + return NV_ERR_NO_MEMORY; + + if (nv_kmem_cache_alloc_stack(&pushStreamSp) != 0) + { + nv_kmem_cache_free_stack(sp); + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_paging_channel_allocate(sp, + (gpuDeviceHandle)device, + allocParams, + (gpuPagingChannelHandle *)channel, + channelInfo); + + if (status == NV_OK) + (*channel)->pushStreamSp = pushStreamSp; + else + nv_kmem_cache_free_stack(pushStreamSp); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelAllocate); + +void nvUvmInterfacePagingChannelDestroy(UvmGpuPagingChannelHandle channel) +{ + nvidia_stack_t *sp; + + if (channel == NULL) + return; + + sp = nvUvmGetSafeStack(); + nv_kmem_cache_free_stack(channel->pushStreamSp); + rm_gpu_ops_paging_channel_destroy(sp, (gpuPagingChannelHandle)channel); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelDestroy); + +NV_STATUS nvUvmInterfacePagingChannelsMap(uvmGpuAddressSpaceHandle srcVaSpace, + UvmGpuPointer srcAddress, + uvmGpuDeviceHandle device, + NvU64 *dstAddress) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + return NV_ERR_NO_MEMORY; + + status = rm_gpu_ops_paging_channels_map(sp, + (gpuAddressSpaceHandle)srcVaSpace, + (NvU64)srcAddress, + (gpuDeviceHandle)device, + dstAddress); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelsMap); + +void nvUvmInterfacePagingChannelsUnmap(uvmGpuAddressSpaceHandle srcVaSpace, + UvmGpuPointer srcAddress, + uvmGpuDeviceHandle device) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + rm_gpu_ops_paging_channels_unmap(sp, + (gpuAddressSpaceHandle)srcVaSpace, + (NvU64)srcAddress, + (gpuDeviceHandle)device); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelsUnmap); + +NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channel, + char *methodStream, + NvU32 methodStreamSize) +{ + return rm_gpu_ops_paging_channel_push_stream(channel->pushStreamSp, + (gpuPagingChannelHandle)channel, + methodStream, + methodStreamSize); +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelPushStream); + + +NV_STATUS nvUvmInterfaceInitCslContext(UvmCslContext **ctx, + uvmGpuChannelHandle channel) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_ccsl_context_init(sp, ctx, (gpuChannelHandle) channel); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceInitCslContext); + +void nvUvmInterfaceDeinitCslContext(UvmCslContext *ctx) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + rm_gpu_ops_ccsl_context_clear(sp, ctx); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceDeinitCslContext); + +NV_STATUS nvUvmInterfaceLogDeviceEncryption(UvmCslContext *ctx, + UvmCslIv *decryptIv) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_ccsl_log_device_encryption(sp, ctx); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceLogDeviceEncryption); + +NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *ctx, + NvU32 bufferSize, + NvU8 const *inputBuffer, + NvU8 *outputBuffer, + NvU8 *authTagBuffer) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_ccsl_encrypt(sp, ctx, bufferSize, inputBuffer, outputBuffer, authTagBuffer); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceCslEncrypt); + +NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *ctx, + NvU32 bufferSize, + NvU8 const *inputBuffer, + UvmCslIv const *decryptIv, + NvU8 *outputBuffer, + NvU8 const *authTagBuffer) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_ccsl_decrypt(sp, ctx, bufferSize, inputBuffer, outputBuffer, authTagBuffer); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceCslDecrypt); + +NV_STATUS nvUvmInterfaceCslSign(struct ccslContext_t * ctx, + NvU32 bufferSize, + NvU8 const *inputBuffer, + NvU8 *authTagBuffer) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_ccsl_sign(sp, ctx, bufferSize, inputBuffer, authTagBuffer); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceCslSign); + + +#else // NV_UVM_ENABLE + +NV_STATUS nv_uvm_suspend(void) +{ + return NV_OK; +} + +NV_STATUS nv_uvm_resume(void) +{ + return NV_OK; +} + +#endif // NV_UVM_ENABLE diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia-sources.Kbuild b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia-sources.Kbuild new file mode 100644 index 0000000..6de240f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia-sources.Kbuild @@ -0,0 +1,44 @@ +NVIDIA_SOURCES ?= +NVIDIA_SOURCES_CXX ?= + +NVIDIA_SOURCES += nvidia/nv-platform.c +NVIDIA_SOURCES += nvidia/nv-dsi-parse-panel-props.c +NVIDIA_SOURCES += nvidia/nv-clk.c +NVIDIA_SOURCES += nvidia/nv-gpio.c +NVIDIA_SOURCES += nvidia/nv-nano-timer.c +NVIDIA_SOURCES += nvidia/nv-backlight.c +NVIDIA_SOURCES += nvidia/nv-imp.c +NVIDIA_SOURCES += nvidia/nv-host1x.c +NVIDIA_SOURCES += nvidia/nv-platform-pm.c +NVIDIA_SOURCES += nvidia/nv-ipc-soc.c +NVIDIA_SOURCES += nvidia/nv.c +NVIDIA_SOURCES += nvidia/nv-pci.c +NVIDIA_SOURCES += nvidia/nv-dmabuf.c +NVIDIA_SOURCES += nvidia/nv-acpi.c +NVIDIA_SOURCES += nvidia/nv-cray.c +NVIDIA_SOURCES += nvidia/nv-dma.c +NVIDIA_SOURCES += nvidia/nv-i2c.c +NVIDIA_SOURCES += nvidia/nv-mmap.c +NVIDIA_SOURCES += nvidia/nv-p2p.c +NVIDIA_SOURCES += nvidia/nv-pat.c +NVIDIA_SOURCES += nvidia/nv-procfs.c +NVIDIA_SOURCES += nvidia/nv-procfs-utils.c +NVIDIA_SOURCES += nvidia/nv-usermap.c +NVIDIA_SOURCES += nvidia/nv-vm.c +NVIDIA_SOURCES += nvidia/nv-vtophys.c +NVIDIA_SOURCES += nvidia/os-interface.c +NVIDIA_SOURCES += nvidia/os-mlock.c +NVIDIA_SOURCES += nvidia/os-pci.c +NVIDIA_SOURCES += nvidia/os-registry.c +NVIDIA_SOURCES += nvidia/os-usermap.c +NVIDIA_SOURCES += nvidia/nv-modeset-interface.c +NVIDIA_SOURCES += nvidia/nv-pci-table.c +NVIDIA_SOURCES += nvidia/nv-kthread-q.c +NVIDIA_SOURCES += nvidia/nv-memdbg.c +NVIDIA_SOURCES += nvidia/nv-ibmnpu.c +NVIDIA_SOURCES += nvidia/nv-report-err.c +NVIDIA_SOURCES += nvidia/nv-rsync.c +NVIDIA_SOURCES += nvidia/nv-msi.c +NVIDIA_SOURCES += nvidia/nv-caps.c +NVIDIA_SOURCES += nvidia/nv-frontend.c +NVIDIA_SOURCES += nvidia/nv_uvm_interface.c diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia.Kbuild b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia.Kbuild new file mode 100644 index 0000000..2956703 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia.Kbuild @@ -0,0 +1,265 @@ +########################################################################### +# Kbuild fragment for nvidia.ko +########################################################################### + +# +# Define NVIDIA_{SOURCES,OBJECTS} +# + +include $(src)/nvidia/nvidia-sources.Kbuild +NVIDIA_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_SOURCES)) + +obj-m += nvidia.o +nvidia-y := $(NVIDIA_OBJECTS) + +NVIDIA_KO = nvidia/nvidia.ko + + +# +# nv-kernel.o_binary is the core binary component of nvidia.ko, shared +# across all UNIX platforms. Create a symlink, "nv-kernel.o" that +# points to nv-kernel.o_binary, and add nv-kernel.o to the list of +# objects to link into nvidia.ko. +# +# Note that: +# - The kbuild "clean" rule will delete all objects in nvidia-y (which +# is why we use a symlink instead of just adding nv-kernel.o_binary +# to nvidia-y). +# - kbuild normally uses the naming convention of ".o_shipped" for +# binary files. That is not used here, because the kbuild rule to +# create the "normal" object file from ".o_shipped" does a copy, not +# a symlink. This file is quite large, so a symlink is preferred. +# - The file added to nvidia-y should be relative to gmake's cwd. +# But, the target for the symlink rule should be prepended with $(obj). +# - The "symlink" command is called using kbuild's if_changed macro to +# generate an .nv-kernel.o.cmd file which can be used on subsequent +# runs to determine if the command line to create the symlink changed +# and needs to be re-executed. +# + +NVIDIA_BINARY_OBJECT := $(src)/nvidia/nv-kernel.o_binary +NVIDIA_BINARY_OBJECT_O := nvidia/nv-kernel.o + +quiet_cmd_symlink = SYMLINK $@ + cmd_symlink = ln -sf $< $@ + +targets += $(NVIDIA_BINARY_OBJECT_O) + +$(obj)/$(NVIDIA_BINARY_OBJECT_O): $(NVIDIA_BINARY_OBJECT) FORCE + $(call if_changed,symlink) + +nvidia-y += $(NVIDIA_BINARY_OBJECT_O) + + +# +# Define nvidia.ko-specific CFLAGS. +# + +NVIDIA_CFLAGS += -I$(src)/nvidia +NVIDIA_CFLAGS += -DNVIDIA_UNDEF_LEGACY_BIT_MACROS + +ifeq ($(NV_BUILD_TYPE),release) + NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG +endif + +ifeq ($(NV_BUILD_TYPE),develop) + NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_MEM_LOGGER +endif + +ifeq ($(NV_BUILD_TYPE),debug) + NVIDIA_CFLAGS += -DDEBUG -D_DEBUG -UNDEBUG -DNV_MEM_LOGGER +endif + +$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_OBJECTS), $(NVIDIA_CFLAGS)) + + +# +# nv-procfs.c requires nv-compiler.h +# + +NV_COMPILER_VERSION_HEADER = $(obj)/nv_compiler.h + +$(NV_COMPILER_VERSION_HEADER): + @echo \#define NV_COMPILER \"`$(CC) -v 2>&1 | tail -n 1`\" > $@ + +$(obj)/nvidia/nv-procfs.o: $(NV_COMPILER_VERSION_HEADER) + +clean-files += $(NV_COMPILER_VERSION_HEADER) + + +# +# Build nv-interface.o from the kernel interface layer objects, suitable +# for further processing by the top-level makefile to produce a precompiled +# kernel interface file. +# + +NVIDIA_INTERFACE := nvidia/nv-interface.o + +# Linux kernel v5.12 and later looks at "always-y", Linux kernel versions +# before v5.6 looks at "always"; kernel versions between v5.12 and v5.6 +# look at both. + +always += $(NVIDIA_INTERFACE) +always-y += $(NVIDIA_INTERFACE) + +$(obj)/$(NVIDIA_INTERFACE): $(addprefix $(obj)/,$(NVIDIA_OBJECTS)) + $(LD) -r -o $@ $^ + + +# +# Register the conftests needed by nvidia.ko +# + +NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_OBJECTS) + +NV_CONFTEST_FUNCTION_COMPILE_TESTS += hash__remap_4k_pfn +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_array_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_array_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_wc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += sg_alloc_table +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_get_domain_bus_and_slot +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_num_physpages +NV_CONFTEST_FUNCTION_COMPILE_TESTS += efi_enabled +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data +NV_CONFTEST_FUNCTION_COMPILE_TESTS += proc_remove +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pm_vt_switch_required +NV_CONFTEST_FUNCTION_COMPILE_TESTS += xen_ioemu_inject_msi +NV_CONFTEST_FUNCTION_COMPILE_TESTS += phys_to_dma +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_dma_ops +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_attr_macros +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_page_attrs +NV_CONFTEST_FUNCTION_COMPILE_TESTS += write_cr4 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_property +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_find_node_by_phandle +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_node_to_nid +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pnv_pci_get_npu_dev +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_ibm_chip_id +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_bus_address +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_stop_and_remove_bus_device +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_remove_bus_device +NV_CONFTEST_FUNCTION_COMPILE_TESTS += register_cpu_notifier +NV_CONFTEST_FUNCTION_COMPILE_TESTS += cpuhp_setup_state +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_resource +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_backlight_device_by_name +NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_msix_range +NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_read_has_pointer_pos_arg +NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_write +NV_CONFTEST_FUNCTION_COMPILE_TESTS += kthread_create_on_node +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_find_matching_node +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dev_is_pci +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_direct_map_resource +NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_get_platform +NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_bpmp_send_receive +NV_CONFTEST_FUNCTION_COMPILE_TESTS += flush_cache_all +NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn +NV_CONFTEST_FUNCTION_COMPILE_TESTS += jiffies_to_timespec +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += full_name_hash +NV_CONFTEST_FUNCTION_COMPILE_TESTS += hlist_for_each_entry +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_atomic_ops_to_root +NV_CONFTEST_FUNCTION_COMPILE_TESTS += vga_tryget +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pgprot_decrypted +NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_mkdec +NV_CONFTEST_FUNCTION_COMPILE_TESTS += iterate_fd +NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter +NV_CONFTEST_FUNCTION_COMPILE_TESTS += sg_page_iter_page +NV_CONFTEST_FUNCTION_COMPILE_TESTS += unsafe_follow_pfn +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_close_on_exec +NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed +NV_CONFTEST_FUNCTION_COMPILE_TESTS += device_property_read_u64 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_of_platform_populate +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_dma_configure +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_count_elems_of_size +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_read_variable_u8_array +NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_new_client_device +NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_unregister_device +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_named_gpio +NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_gpio_request_one +NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_direction_input +NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_direction_output +NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_get_value +NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_set_value +NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_to_irq +NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_get +NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_put +NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_set_bw +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_export_args +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap_atomic +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map_atomic +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_has_dynamic_attachment +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_attachment_has_peer2peer +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_set_mask_and_coherent +NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_clk_bulk_get_all + +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_map_sg_attrs +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_dma_ops +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___close_fd +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_close_fd +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_unused_fd +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_unused_fd_flags +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_get_default_device +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_syncpt_unit_interface_get_byte_offset +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_syncpt_unit_interface_get_aperture +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_register_ipc_client +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_unregister_ipc_client +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_client_ipc_send_recv +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_dram_clk_to_mc_clk +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_dram_num_channels +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dram_types +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pxm_to_node +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_screen_info +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_i2c_bus_status +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_fuse_control_read +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_get_platform + +NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations +NV_CONFTEST_TYPE_COMPILE_TESTS += kuid_t +NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops +NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops +NV_CONFTEST_TYPE_COMPILE_TESTS += noncoherent_swiotlb_dma_ops +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_insert_pfn_prot +NV_CONFTEST_TYPE_COMPILE_TESTS += vmf_insert_pfn_prot +NV_CONFTEST_TYPE_COMPILE_TESTS += address_space_init_once +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += vmbus_channel_has_ringbuffer_page +NV_CONFTEST_TYPE_COMPILE_TESTS += device_driver_of_match_table +NV_CONFTEST_TYPE_COMPILE_TESTS += device_of_node +NV_CONFTEST_TYPE_COMPILE_TESTS += node_states_n_memory +NV_CONFTEST_TYPE_COMPILE_TESTS += kmem_cache_has_kobj_remove_work +NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink +NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops +NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64 +NV_CONFTEST_TYPE_COMPILE_TESTS += vmalloc_has_pgprot_t_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock +NV_CONFTEST_TYPE_COMPILE_TESTS += pci_channel_state +NV_CONFTEST_TYPE_COMPILE_TESTS += pci_dev_has_ats_enabled +NV_CONFTEST_TYPE_COMPILE_TESTS += mt_device_gre +NV_CONFTEST_TYPE_COMPILE_TESTS += remove_memory_has_nid_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += add_memory_driver_managed_has_mhp_flags_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += num_registered_fb + +NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present +NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_vgpu_kvm_build +NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_build +NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_csp_build +NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages +NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages_remote +NV_CONFTEST_GENERIC_COMPILE_TESTS += pin_user_pages +NV_CONFTEST_GENERIC_COMPILE_TESTS += pin_user_pages_remote +NV_CONFTEST_GENERIC_COMPILE_TESTS += pm_runtime_available +NV_CONFTEST_GENERIC_COMPILE_TESTS += vm_fault_t +NV_CONFTEST_GENERIC_COMPILE_TESTS += pci_class_multimedia_hd_audio +NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-interface.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-interface.c new file mode 100644 index 0000000..520829e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-interface.c @@ -0,0 +1,2159 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#include "nv-time.h" + + +#if defined(NV_SOC_TEGRA_CHIP_ID_H_PRESENT) +#include +#elif defined(NV_SOC_TEGRA_FUSE_H_PRESENT) +#include +#endif + + +extern char *NVreg_TemporaryFilePath; + +#define MAX_ERROR_STRING 512 +static char nv_error_string[MAX_ERROR_STRING]; +nv_spinlock_t nv_error_string_lock; + +extern nv_linux_state_t nv_ctl_device; + +extern nv_kthread_q_t nv_kthread_q; + +NvU32 os_page_size = PAGE_SIZE; +NvU64 os_page_mask = NV_PAGE_MASK; +NvU8 os_page_shift = PAGE_SHIFT; +NvU32 os_sev_status = 0; +NvBool os_sev_enabled = 0; + + +#if defined(CONFIG_DMA_SHARED_BUFFER) +NvBool os_dma_buf_enabled = NV_TRUE; +#else +NvBool os_dma_buf_enabled = NV_FALSE; +#endif // CONFIG_DMA_SHARED_BUFFER + + + + +void NV_API_CALL os_disable_console_access(void) +{ + console_lock(); +} + +void NV_API_CALL os_enable_console_access(void) +{ + console_unlock(); +} + +typedef struct semaphore os_mutex_t; + +// +// os_alloc_mutex - Allocate the RM mutex +// +// ppMutex - filled in with pointer to opaque structure to mutex data type +// +NV_STATUS NV_API_CALL os_alloc_mutex +( + void **ppMutex +) +{ + NV_STATUS rmStatus; + os_mutex_t *os_mutex; + + rmStatus = os_alloc_mem(ppMutex, sizeof(os_mutex_t)); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate mutex!\n"); + return rmStatus; + } + os_mutex = (os_mutex_t *)*ppMutex; + NV_INIT_MUTEX(os_mutex); + + return NV_OK; +} + +// +// os_free_mutex - Free resources associated with mutex allocated +// via os_alloc_mutex above. +// +// pMutex - Pointer to opaque structure to mutex data type +// +void NV_API_CALL os_free_mutex +( + void *pMutex +) +{ + os_mutex_t *os_mutex = (os_mutex_t *)pMutex; + + if (os_mutex != NULL) + { + os_free_mem(pMutex); + } +} + +// +// pMutex - Pointer to opaque structure to mutex data type +// + +NV_STATUS NV_API_CALL os_acquire_mutex +( + void *pMutex +) +{ + os_mutex_t *os_mutex = (os_mutex_t *)pMutex; + + if (!NV_MAY_SLEEP()) + { + return NV_ERR_INVALID_REQUEST; + } + down(os_mutex); + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_cond_acquire_mutex +( + void * pMutex +) +{ + os_mutex_t *os_mutex = (os_mutex_t *)pMutex; + if (!NV_MAY_SLEEP()) + { + return NV_ERR_INVALID_REQUEST; + } + + if (down_trylock(os_mutex)) + { + return NV_ERR_TIMEOUT_RETRY; + } + + return NV_OK; +} + + +void NV_API_CALL os_release_mutex +( + void *pMutex +) +{ + os_mutex_t *os_mutex = (os_mutex_t *)pMutex; + up(os_mutex); +} + +typedef struct semaphore os_semaphore_t; + + +void* NV_API_CALL os_alloc_semaphore +( + NvU32 initialValue +) +{ + NV_STATUS rmStatus; + os_semaphore_t *os_sema; + + rmStatus = os_alloc_mem((void *)&os_sema, sizeof(os_semaphore_t)); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate semaphore!\n"); + return NULL; + } + + NV_INIT_SEMA(os_sema, initialValue); + + return (void *)os_sema; +} + +void NV_API_CALL os_free_semaphore +( + void *pSema +) +{ + os_semaphore_t *os_sema = (os_semaphore_t *)pSema; + + os_free_mem(os_sema); +} + +NV_STATUS NV_API_CALL os_acquire_semaphore +( + void *pSema +) +{ + os_semaphore_t *os_sema = (os_semaphore_t *)pSema; + + if (!NV_MAY_SLEEP()) + { + return NV_ERR_INVALID_REQUEST; + } + down(os_sema); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_cond_acquire_semaphore +( + void * pSema +) +{ + os_semaphore_t *os_sema = (os_semaphore_t *)pSema; + // + // NOTE: down_trylock() is safe to call from IRQ, se we don't need an + // NV_MAY_SLEEP() check here. We do check it in os_cond_acquire_mutex(), + // even though it is also calling down_trylock(), since that keeps it + // in line with the kernel's 'struct mutex' API. + // + if (down_trylock(os_sema)) + { + return NV_ERR_TIMEOUT_RETRY; + } + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_release_semaphore +( + void *pSema +) +{ + os_semaphore_t *os_sema = (os_semaphore_t *)pSema; + up(os_sema); + return NV_OK; +} + +NvBool NV_API_CALL os_semaphore_may_sleep(void) +{ + return NV_MAY_SLEEP(); +} + +NvBool NV_API_CALL os_is_isr(void) +{ + return (in_irq()); +} + +// return TRUE if the caller is the super-user +NvBool NV_API_CALL os_is_administrator(void) +{ + return NV_IS_SUSER(); +} + +NvBool NV_API_CALL os_allow_priority_override(void) +{ + return capable(CAP_SYS_NICE); +} + +NvU64 NV_API_CALL os_get_num_phys_pages(void) +{ + return (NvU64)NV_NUM_PHYSPAGES; +} + +char* NV_API_CALL os_string_copy( + char *dst, + const char *src +) +{ + return strcpy(dst, src); +} + +NvU32 NV_API_CALL os_string_length( + const char* str +) +{ + return strlen(str); +} + +NvU32 NV_API_CALL os_strtoul(const char *str, char **endp, NvU32 base) +{ + return (NvU32)simple_strtoul(str, endp, base); +} + +NvS32 NV_API_CALL os_string_compare(const char *str1, const char *str2) +{ + return strcmp(str1, str2); +} + +void *os_mem_copy_custom( + void *dstPtr, + const void *srcPtr, + NvU32 length +) +{ + void *ret = dstPtr; + NvU32 dwords, bytes = length; + NvU8 *dst = dstPtr; + const NvU8 *src = srcPtr; + + if ((length >= 128) && + (((NvUPtr)dst & 3) == 0) & (((NvUPtr)src & 3) == 0)) + { + dwords = (length / sizeof(NvU32)); + bytes = (length % sizeof(NvU32)); + + while (dwords != 0) + { + *(NvU32 *)dst = *(const NvU32 *)src; + dst += sizeof(NvU32); + src += sizeof(NvU32); + dwords--; + } + } + + while (bytes != 0) + { + *dst = *src; + dst++; + src++; + bytes--; + } + + return ret; +} + +void *NV_API_CALL os_mem_copy( + void *dst, + const void *src, + NvU32 length +) +{ +#if defined(NVCPU_AARCH64) + /* + * TODO: Remove once memset/memcpy restructure is complete + * + * When performing memcpy for memory mapped as device, memcpy_[to/from]io + * must be used. WAR to check the source and destination to determine the + * correct memcpy_io to use. + * + * This WAR is limited to just aarch64 for now because the address range used + * to map ioremap and vmalloc is different on ppc64le, and is_vmalloc_addr() + * does not correctly handle this. is_ioremap_addr() is needed instead. This + * will have to be addressed when reorganizing RM to use the new memset model. + */ + if (is_vmalloc_addr(dst) && !is_vmalloc_addr(src)) + { + memcpy_toio(dst, src, length); + return dst; + } + else if (!is_vmalloc_addr(dst) && is_vmalloc_addr(src)) + { + memcpy_fromio(dst, src, length); + return dst; + } + else if (is_vmalloc_addr(dst) && is_vmalloc_addr(src)) + { + return os_mem_copy_custom(dst, src, length); + } + else +#endif + { +#if defined(CONFIG_CC_OPTIMIZE_FOR_SIZE) + /* + * When the kernel is configured with CC_OPTIMIZE_FOR_SIZE=y, Kbuild uses + * -Os universally. With -Os, GCC will aggressively inline builtins, even + * if -fno-builtin is specified, including memcpy with a tiny byte-copy + * loop on x86 (rep movsb). This is horrible for performance - a strict + * dword copy is much faster - so when we detect this case, just provide + * our own implementation. + */ + return os_mem_copy_custom(dst, src, length); +#else + /* + * Generally speaking, the kernel-provided memcpy will be the fastest, + * (optimized much better for the target architecture than the above + * loop), so we want to use that whenever we can get to it. + */ + return memcpy(dst, src, length); +#endif + } +} + +NV_STATUS NV_API_CALL os_memcpy_from_user( + void *to, + const void *from, + NvU32 n +) +{ + return (NV_COPY_FROM_USER(to, from, n) ? NV_ERR_INVALID_ADDRESS : NV_OK); +} + +NV_STATUS NV_API_CALL os_memcpy_to_user( + void *to, + const void *from, + NvU32 n +) +{ + return (NV_COPY_TO_USER(to, from, n) ? NV_ERR_INVALID_ADDRESS : NV_OK); +} + +void* NV_API_CALL os_mem_set( + void *dst, + NvU8 c, + NvU32 length +) +{ +#if defined(NVCPU_AARCH64) + /* + * TODO: Remove once memset/memcpy restructure is complete + * + * WAR to check the destination to determine if the memory is of type Device + * or Normal, and use the correct memset. + * + * This WAR is limited to just aarch64 for now because the address range used + * to map ioremap and vmalloc is different on ppc64le, and is_vmalloc_addr() + * does not correctly handle this. is_ioremap_addr() is needed instead. This + * will have to be addressed when reorganizing RM to use the new memset model. + */ + if (is_vmalloc_addr(dst)) + { + memset_io(dst, (int)c, length); + return dst; + } + else +#endif + return memset(dst, (int)c, length); +} + +NvS32 NV_API_CALL os_mem_cmp( + const NvU8 *buf0, + const NvU8* buf1, + NvU32 length +) +{ + return memcmp(buf0, buf1, length); +} + + +/* + * Operating System Memory Functions + * + * There are 2 interesting aspects of resource manager memory allocations + * that need special consideration on Linux: + * + * 1. They are typically very large, (e.g. single allocations of 164KB) + * + * 2. The resource manager assumes that it can safely allocate memory in + * interrupt handlers. + * + * The first requires that we call vmalloc, the second kmalloc. We decide + * which one to use at run time, based on the size of the request and the + * context. Allocations larger than 128KB require vmalloc, in the context + * of an ISR they fail. + */ + +#if defined(NV_VGX_HYPER) +/* + * Citrix Hypervisor-8.0 Dom0 sysmem ends up getting fragmented because + * of which high-order kmalloc allocations fail. We try to avoid it by + * requesting allocations not larger than 8K. + * + * KVM will be affected low memory pressure situation a lot, + * particularly if hugetlbfs hugepages are being used. Hence, 8K applies + * here too. + */ +#define KMALLOC_LIMIT 8192 +#else +#define KMALLOC_LIMIT 131072 +#endif + +#define VMALLOC_ALLOCATION_SIZE_FLAG (1 << 0) + +NV_STATUS NV_API_CALL os_alloc_mem( + void **address, + NvU64 size +) +{ + unsigned long alloc_size; + + if (address == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *address = NULL; + NV_MEM_TRACKING_PAD_SIZE(size); + + // + // NV_KMALLOC, nv_vmalloc take an input of 4 bytes in x86. To avoid + // truncation and wrong allocation, below check is required. + // + alloc_size = size; + + if (alloc_size != size) + return NV_ERR_INVALID_PARAMETER; + + if (!NV_MAY_SLEEP()) + { + if (alloc_size <= KMALLOC_LIMIT) + NV_KMALLOC_ATOMIC(*address, alloc_size); + } + else + { + if (alloc_size <= KMALLOC_LIMIT) + { + NV_KMALLOC_NO_OOM(*address, alloc_size); + } + if (*address == NULL) + { + *address = nv_vmalloc(alloc_size); + alloc_size |= VMALLOC_ALLOCATION_SIZE_FLAG; + } + } + + NV_MEM_TRACKING_HIDE_SIZE(address, alloc_size); + + return ((*address != NULL) ? NV_OK : NV_ERR_NO_MEMORY); +} + +void NV_API_CALL os_free_mem(void *address) +{ + NvU32 size; + + NV_MEM_TRACKING_RETRIEVE_SIZE(address, size); + + if (size & VMALLOC_ALLOCATION_SIZE_FLAG) + { + size &= ~VMALLOC_ALLOCATION_SIZE_FLAG; + nv_vfree(address, size); + } + else + NV_KFREE(address, size); +} + + +/***************************************************************************** +* +* Name: osGetCurrentTime +* +*****************************************************************************/ + +NV_STATUS NV_API_CALL os_get_current_time( + NvU32 *seconds, + NvU32 *useconds +) +{ + struct timespec64 tm; + + ktime_get_real_ts64(&tm); + + *seconds = tm.tv_sec; + *useconds = tm.tv_nsec / NSEC_PER_USEC; + + return NV_OK; +} + +// +// Get the High resolution tick count of the system uptime +// +NvU64 NV_API_CALL os_get_current_tick_hr(void) +{ + struct timespec64 tm; + ktime_get_raw_ts64(&tm); + return (NvU64) timespec64_to_ns(&tm); +} + +#if BITS_PER_LONG >= 64 + +NvU64 NV_API_CALL os_get_current_tick(void) +{ +#if defined(NV_JIFFIES_TO_TIMESPEC_PRESENT) + struct timespec ts; + jiffies_to_timespec(jiffies, &ts); + return (NvU64) timespec_to_ns(&ts); +#else + struct timespec64 ts; + jiffies_to_timespec64(jiffies, &ts); + return (NvU64) timespec64_to_ns(&ts); +#endif +} + +NvU64 NV_API_CALL os_get_tick_resolution(void) +{ + return (NvU64)jiffies_to_usecs(1) * NSEC_PER_USEC; +} + +#else + +NvU64 NV_API_CALL os_get_current_tick(void) +{ + /* + * 'jiffies' overflows regularly on 32-bit builds (unsigned long is 4 bytes + * instead of 8 bytes), so it's unwise to build a tick counter on it, since + * the rest of the Resman assumes the 'tick' returned from this function is + * monotonically increasing and never overflows. + * + * Instead, use the previous implementation that we've lived with since the + * beginning, which uses system clock time to calculate the tick. This is + * subject to problems if the system clock time changes dramatically + * (more than a second or so) while the Resman is actively tracking a + * timeout. + */ + NvU32 seconds, useconds; + + (void) os_get_current_time(&seconds, &useconds); + + return ((NvU64)seconds * NSEC_PER_SEC + + (NvU64)useconds * NSEC_PER_USEC); +} + +NvU64 NV_API_CALL os_get_tick_resolution(void) +{ + /* + * os_get_current_tick() uses os_get_current_time(), which has + * microsecond resolution. + */ + return 1000ULL; +} + +#endif + +//--------------------------------------------------------------------------- +// +// Misc services. +// +//--------------------------------------------------------------------------- + +NV_STATUS NV_API_CALL os_delay_us(NvU32 MicroSeconds) +{ + return nv_sleep_us(MicroSeconds); +} + +NV_STATUS NV_API_CALL os_delay(NvU32 MilliSeconds) +{ + return nv_sleep_ms(MilliSeconds); +} + +NvU64 NV_API_CALL os_get_cpu_frequency(void) +{ + NvU64 cpu_hz = 0; +#if defined(CONFIG_CPU_FREQ) + cpu_hz = (cpufreq_get(0) * 1000); +#elif defined(NVCPU_X86_64) + NvU64 tsc[2]; + + tsc[0] = nv_rdtsc(); + mdelay(250); + tsc[1] = nv_rdtsc(); + + cpu_hz = ((tsc[1] - tsc[0]) * 4); +#endif + return cpu_hz; +} + +NvU32 NV_API_CALL os_get_current_process(void) +{ + return NV_GET_CURRENT_PROCESS(); +} + +void NV_API_CALL os_get_current_process_name(char *buf, NvU32 len) +{ + task_lock(current); + strncpy(buf, current->comm, len - 1); + buf[len - 1] = '\0'; + task_unlock(current); +} + +NV_STATUS NV_API_CALL os_get_current_thread(NvU64 *threadId) +{ + if (in_interrupt()) + *threadId = 0; + else + *threadId = (NvU64) current->pid; + + return NV_OK; +} + +/*******************************************************************************/ +/* */ +/* Debug and logging utilities follow */ +/* */ +/*******************************************************************************/ + +// The current debug display level (default to maximum debug level) +NvU32 cur_debuglevel = 0xffffffff; + +/* + * The binary core of RM (nv-kernel.o) calls both out_string, and nv_printf. + */ +inline void NV_API_CALL out_string(const char *str) +{ + printk("%s", str); +} + +/* + * nv_printf() prints to the kernel log for the driver. + * Returns the number of characters written. + */ +int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...) +{ + va_list arglist; + int chars_written = 0; + + if (debuglevel >= ((cur_debuglevel >> 4) & 0x3)) + { + size_t length; + char *temp; + + // When printk is called to extend the output of the previous line + // (i.e. when the previous line did not end in \n), the printk call + // must contain KERN_CONT. Older kernels still print the line + // correctly, but KERN_CONT was technically always required. + + // This means that every call to printk() needs to have a KERN_xxx + // prefix. The only way to get this is to rebuild the format string + // into a new buffer, with a KERN_xxx prefix prepended. + + // Unfortunately, we can't guarantee that two calls to nv_printf() + // won't be interrupted by a printk from another driver. So to be + // safe, we always append KERN_CONT. It's still technically wrong, + // but it works. + + // The long-term fix is to modify all NV_PRINTF-ish calls so that the + // string always contains only one \n (at the end) and NV_PRINTF_EX + // is deleted. But that is unlikely to ever happen. + + length = strlen(printf_format); + if (length < 1) + return 0; + + temp = kmalloc(length + sizeof(KERN_CONT), GFP_ATOMIC); + if (!temp) + return 0; + + // KERN_CONT changed in the 3.6 kernel, so we can't assume its + // composition or size. + memcpy(temp, KERN_CONT, sizeof(KERN_CONT) - 1); + memcpy(temp + sizeof(KERN_CONT) - 1, printf_format, length + 1); + + va_start(arglist, printf_format); + chars_written = vprintk(temp, arglist); + va_end(arglist); + + kfree(temp); + } + + return chars_written; +} + +NvS32 NV_API_CALL os_snprintf(char *buf, NvU32 size, const char *fmt, ...) +{ + va_list arglist; + int chars_written; + + va_start(arglist, fmt); + chars_written = vsnprintf(buf, size, fmt, arglist); + va_end(arglist); + + return chars_written; +} + +NvS32 NV_API_CALL os_vsnprintf(char *buf, NvU32 size, const char *fmt, va_list arglist) +{ + return vsnprintf(buf, size, fmt, arglist); +} + +void NV_API_CALL os_log_error(const char *fmt, va_list ap) +{ + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&nv_error_string_lock, flags); + + vsnprintf(nv_error_string, MAX_ERROR_STRING, fmt, ap); + nv_error_string[MAX_ERROR_STRING - 1] = 0; + printk(KERN_ERR "%s", nv_error_string); + + NV_SPIN_UNLOCK_IRQRESTORE(&nv_error_string_lock, flags); +} + +void NV_API_CALL os_io_write_byte( + NvU32 address, + NvU8 value +) +{ + outb(value, address); +} + +void NV_API_CALL os_io_write_word( + NvU32 address, + NvU16 value +) +{ + outw(value, address); +} + +void NV_API_CALL os_io_write_dword( + NvU32 address, + NvU32 value +) +{ + outl(value, address); +} + +NvU8 NV_API_CALL os_io_read_byte( + NvU32 address +) +{ + return inb(address); +} + +NvU16 NV_API_CALL os_io_read_word( + NvU32 address +) +{ + return inw(address); +} + +NvU32 NV_API_CALL os_io_read_dword( + NvU32 address +) +{ + return inl(address); +} + + +static NvBool NV_API_CALL xen_support_fully_virtualized_kernel(void) +{ +#if defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL) + return (os_is_vgx_hyper()); +#endif + return NV_FALSE; +} + +void* NV_API_CALL os_map_kernel_space( + NvU64 start, + NvU64 size_bytes, + NvU32 mode +) +{ + void *vaddr; + + if (!xen_support_fully_virtualized_kernel() && start == 0) + { + if (mode != NV_MEMORY_CACHED) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: os_map_kernel_space: won't map address 0x%0llx UC!\n", start); + return NULL; + } + else + return (void *)PAGE_OFFSET; + } + + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: os_map_kernel_space: can't map 0x%0llx, invalid context!\n", start); + os_dbg_breakpoint(); + return NULL; + } + + switch (mode) + { + case NV_MEMORY_CACHED: + vaddr = nv_ioremap_cache(start, size_bytes); + break; + case NV_MEMORY_WRITECOMBINED: + vaddr = rm_disable_iomap_wc() ? + nv_ioremap_nocache(start, size_bytes) : + nv_ioremap_wc(start, size_bytes); + break; + case NV_MEMORY_UNCACHED: + case NV_MEMORY_DEFAULT: + vaddr = nv_ioremap_nocache(start, size_bytes); + break; + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: os_map_kernel_space: unsupported mode!\n"); + return NULL; + } + + return vaddr; +} + +void NV_API_CALL os_unmap_kernel_space( + void *addr, + NvU64 size_bytes +) +{ + if (addr == (void *)PAGE_OFFSET) + return; + + nv_iounmap(addr, size_bytes); +} + +// flush the cpu's cache, uni-processor version +NV_STATUS NV_API_CALL os_flush_cpu_cache(void) +{ + CACHE_FLUSH(); + return NV_OK; +} + +// flush the cache of all cpus +NV_STATUS NV_API_CALL os_flush_cpu_cache_all(void) +{ +#if defined(NVCPU_AARCH64) + CACHE_FLUSH_ALL(); + return NV_OK; +#endif + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL os_flush_user_cache(void) +{ +#if defined(NVCPU_AARCH64) + if (!NV_MAY_SLEEP()) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // The Linux kernel does not export an interface for flushing a range, + // although it is possible. For now, just flush the entire cache to be + // safe. + // + CACHE_FLUSH_ALL(); + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void NV_API_CALL os_flush_cpu_write_combine_buffer(void) +{ + WRITE_COMBINE_FLUSH(); +} + +// override initial debug level from registry +void NV_API_CALL os_dbg_init(void) +{ + NvU32 new_debuglevel; + nvidia_stack_t *sp = NULL; + + NV_SPIN_LOCK_INIT(&nv_error_string_lock); + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return; + } + + if (NV_OK == rm_read_registry_dword(sp, NULL, + "ResmanDebugLevel", + &new_debuglevel)) + { + if (new_debuglevel != (NvU32)~0) + cur_debuglevel = new_debuglevel; + } + + nv_kmem_cache_free_stack(sp); +} + +void NV_API_CALL os_dbg_set_level(NvU32 new_debuglevel) +{ + nv_printf(NV_DBG_SETUP, "NVRM: Changing debuglevel from 0x%x to 0x%x\n", + cur_debuglevel, new_debuglevel); + cur_debuglevel = new_debuglevel; +} + +NV_STATUS NV_API_CALL os_schedule(void) +{ + if (NV_MAY_SLEEP()) + { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + return NV_OK; + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: os_schedule: Attempted to yield" + " the CPU while in atomic or interrupt" + " context\n"); + return NV_ERR_ILLEGAL_ACTION; + } +} + +typedef struct { + nv_kthread_q_item_t item; + void *data; +} os_queue_data_t; + +static void os_execute_work_item(void *_oqd) +{ + os_queue_data_t *oqd = _oqd; + nvidia_stack_t *sp = NULL; + void *data = oqd->data; + + NV_KFREE(oqd, sizeof(os_queue_data_t)); + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return; + } + + rm_execute_work_item(sp, data); + + nv_kmem_cache_free_stack(sp); +} + +NV_STATUS NV_API_CALL os_queue_work_item(struct os_work_queue *queue, void *data) +{ + os_queue_data_t *oqd; + nv_kthread_q_t *kthread; + + /* Use the global queue unless a valid queue was provided */ + kthread = queue ? &queue->nvk : &nv_kthread_q; + + /* Make sure the kthread is active */ + if (unlikely(!kthread->q_kthread)) { + nv_printf(NV_DBG_ERRORS, "NVRM: queue is not enabled\n"); + return NV_ERR_NOT_READY; + } + + /* Allocate atomically just in case we're called in atomic context. */ + NV_KMALLOC_ATOMIC(oqd, sizeof(os_queue_data_t)); + if (!oqd) + return NV_ERR_NO_MEMORY; + + nv_kthread_q_item_init(&oqd->item, os_execute_work_item, oqd); + oqd->data = data; + + nv_kthread_q_schedule_q_item(kthread, &oqd->item); + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_flush_work_queue(struct os_work_queue *queue) +{ + nv_kthread_q_t *kthread; + + /* Use the global queue unless a valid queue was provided */ + kthread = queue ? &queue->nvk : &nv_kthread_q; + + if (NV_MAY_SLEEP()) + { + if (kthread->q_kthread) + nv_kthread_q_flush(kthread); + + return NV_OK; + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: os_flush_work_queue: attempted to execute passive" + "work from an atomic or interrupt context.\n"); + return NV_ERR_ILLEGAL_ACTION; + } +} + +extern NvU32 NVreg_EnableDbgBreakpoint; + +void NV_API_CALL os_dbg_breakpoint(void) +{ + if (NVreg_EnableDbgBreakpoint == 0) + { + return; + } + +#if defined(CONFIG_X86_REMOTE_DEBUG) || defined(CONFIG_KGDB) || defined(CONFIG_XMON) + #if defined(NVCPU_X86_64) + __asm__ __volatile__ ("int $3"); + #elif defined(NVCPU_ARM) + __asm__ __volatile__ (".word %c0" :: "i" (KGDB_COMPILED_BREAK)); + #elif defined(NVCPU_AARCH64) + # warning "Need to implement os_dbg_breakpoint() for aarch64" + #elif defined(NVCPU_PPC64LE) + __asm__ __volatile__ ("trap"); + #endif // NVCPU_* +#elif defined(CONFIG_KDB) + KDB_ENTER(); +#endif // CONFIG_X86_REMOTE_DEBUG || CONFIG_KGDB || CONFIG_XMON +} + +NvU32 NV_API_CALL os_get_cpu_number() +{ + NvU32 cpu_id = get_cpu(); + put_cpu(); + return cpu_id; +} + +NvU32 NV_API_CALL os_get_cpu_count() +{ + return NV_NUM_CPUS(); +} + +NvBool NV_API_CALL os_pat_supported(void) +{ + return (nv_pat_mode != NV_PAT_MODE_DISABLED); +} + +NvBool NV_API_CALL os_is_efi_enabled(void) +{ + return NV_EFI_ENABLED(); +} + +void NV_API_CALL os_get_screen_info( + NvU64 *pPhysicalAddress, + NvU16 *pFbWidth, + NvU16 *pFbHeight, + NvU16 *pFbDepth, + NvU16 *pFbPitch, + NvU64 consoleBar1Address, + NvU64 consoleBar2Address +) +{ +#if defined(CONFIG_FB) && defined(NV_NUM_REGISTERED_FB_PRESENT) + int i; + *pPhysicalAddress = 0; + *pFbWidth = *pFbHeight = *pFbDepth = *pFbPitch = 0; + + for (i = 0; i < num_registered_fb; i++) + { + if (!registered_fb[i]) + continue; + + /* Make sure base address is mapped to GPU BAR */ + if ((registered_fb[i]->fix.smem_start == consoleBar1Address) || + (registered_fb[i]->fix.smem_start == consoleBar2Address)) + { + *pPhysicalAddress = registered_fb[i]->fix.smem_start; + *pFbWidth = registered_fb[i]->var.xres; + *pFbHeight = registered_fb[i]->var.yres; + *pFbDepth = registered_fb[i]->var.bits_per_pixel; + *pFbPitch = registered_fb[i]->fix.line_length; + break; + } + } +#elif NV_IS_EXPORT_SYMBOL_PRESENT_screen_info + /* + * If there is not a framebuffer console, return 0 size. + * + * orig_video_isVGA is set to 1 during early Linux kernel + * initialization, and then will be set to a value, such as + * VIDEO_TYPE_VLFB or VIDEO_TYPE_EFI if an fbdev console is used. + */ + if (screen_info.orig_video_isVGA <= 1) + { + *pPhysicalAddress = 0; + *pFbWidth = *pFbHeight = *pFbDepth = *pFbPitch = 0; + return; + } + + *pPhysicalAddress = screen_info.lfb_base; +#if defined(VIDEO_CAPABILITY_64BIT_BASE) + *pPhysicalAddress |= (NvU64)screen_info.ext_lfb_base << 32; +#endif + *pFbWidth = screen_info.lfb_width; + *pFbHeight = screen_info.lfb_height; + *pFbDepth = screen_info.lfb_depth; + *pFbPitch = screen_info.lfb_linelength; +#else + *pPhysicalAddress = 0; + *pFbWidth = *pFbHeight = *pFbDepth = *pFbPitch = 0; +#endif +} + +void NV_API_CALL os_dump_stack() +{ + dump_stack(); +} + +typedef struct os_spinlock_s +{ + nv_spinlock_t lock; + unsigned long eflags; +} os_spinlock_t; + +NV_STATUS NV_API_CALL os_alloc_spinlock(void **ppSpinlock) +{ + NV_STATUS rmStatus; + os_spinlock_t *os_spinlock; + + rmStatus = os_alloc_mem(ppSpinlock, sizeof(os_spinlock_t)); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate spinlock!\n"); + return rmStatus; + } + + os_spinlock = (os_spinlock_t *)*ppSpinlock; + NV_SPIN_LOCK_INIT(&os_spinlock->lock); + os_spinlock->eflags = 0; + return NV_OK; +} + +void NV_API_CALL os_free_spinlock(void *pSpinlock) +{ + os_free_mem(pSpinlock); +} + +NvU64 NV_API_CALL os_acquire_spinlock(void *pSpinlock) +{ + os_spinlock_t *os_spinlock = (os_spinlock_t *)pSpinlock; + unsigned long eflags; + + NV_SPIN_LOCK_IRQSAVE(&os_spinlock->lock, eflags); + os_spinlock->eflags = eflags; + +#if defined(NVCPU_X86_64) + eflags &= X86_EFLAGS_IF; +#elif defined(NVCPU_AARCH64) + eflags &= PSR_I_BIT; +#endif + return eflags; +} + +void NV_API_CALL os_release_spinlock(void *pSpinlock, NvU64 oldIrql) +{ + os_spinlock_t *os_spinlock = (os_spinlock_t *)pSpinlock; + unsigned long eflags; + + eflags = os_spinlock->eflags; + os_spinlock->eflags = 0; + NV_SPIN_UNLOCK_IRQRESTORE(&os_spinlock->lock, eflags); +} + +#define NV_KERNEL_RELEASE ((LINUX_VERSION_CODE >> 16) & 0x0ff) +#define NV_KERNEL_VERSION ((LINUX_VERSION_CODE >> 8) & 0x0ff) +#define NV_KERNEL_SUBVERSION ((LINUX_VERSION_CODE) & 0x0ff) + +NV_STATUS NV_API_CALL os_get_version_info(os_version_info * pOsVersionInfo) +{ + NV_STATUS status = NV_OK; + + pOsVersionInfo->os_major_version = NV_KERNEL_RELEASE; + pOsVersionInfo->os_minor_version = NV_KERNEL_VERSION; + pOsVersionInfo->os_build_number = NV_KERNEL_SUBVERSION; + +#if defined(UTS_RELEASE) + pOsVersionInfo->os_build_version_str = UTS_RELEASE; +#endif + +#if defined(UTS_VERSION) + pOsVersionInfo->os_build_date_plus_str = UTS_VERSION; +#endif + + return status; +} + +NvBool NV_API_CALL os_is_xen_dom0(void) +{ +#if defined(NV_DOM0_KERNEL_PRESENT) + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +NvBool NV_API_CALL os_is_vgx_hyper(void) +{ +#if defined(NV_VGX_HYPER) + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +NV_STATUS NV_API_CALL os_inject_vgx_msi(NvU16 guestID, NvU64 msiAddr, NvU32 msiData) +{ +#if defined(NV_VGX_HYPER) && defined(NV_DOM0_KERNEL_PRESENT) && \ + defined(NV_XEN_IOEMU_INJECT_MSI) + int rc = 0; + rc = xen_ioemu_inject_msi(guestID, msiAddr, msiData); + if (rc) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: can't inject MSI to guest:%d, addr:0x%x, data:0x%x, err:%d\n", + __FUNCTION__, guestID, msiAddr, msiData, rc); + return NV_ERR_OPERATING_SYSTEM; + } + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NvBool NV_API_CALL os_is_grid_supported(void) +{ +#if defined(NV_GRID_BUILD) + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +NvU32 NV_API_CALL os_get_grid_csp_support(void) +{ +#if defined(NV_GRID_BUILD_CSP) + return NV_GRID_BUILD_CSP; +#else + return 0; +#endif +} + +void NV_API_CALL os_bug_check(NvU32 bugCode, const char *bugCodeStr) +{ + panic(bugCodeStr); +} + +NV_STATUS NV_API_CALL os_get_euid(NvU32 *pSecToken) +{ + *pSecToken = NV_CURRENT_EUID(); + return NV_OK; +} + +// These functions are needed only on x86_64 platforms. +#if defined(NVCPU_X86_64) + +static NvBool os_verify_checksum(const NvU8 *pMappedAddr, NvU32 length) +{ + NvU8 sum = 0; + NvU32 iter = 0; + + for (iter = 0; iter < length; iter++) + sum += pMappedAddr[iter]; + + return sum == 0; +} + +#define _VERIFY_SMBIOS3(_pMappedAddr) \ + _pMappedAddr && \ + (os_mem_cmp(_pMappedAddr, "_SM3_", 5) == 0 && \ + _pMappedAddr[6] < 32 && \ + _pMappedAddr[6] > 0 && \ + os_verify_checksum(_pMappedAddr, _pMappedAddr[6])) + +#define OS_VERIFY_SMBIOS3(pMappedAddr) _VERIFY_SMBIOS3((pMappedAddr)) + +#define _VERIFY_SMBIOS(_pMappedAddr) \ + _pMappedAddr && \ + (os_mem_cmp(_pMappedAddr, "_SM_", 4) == 0 && \ + _pMappedAddr[5] < 32 && \ + _pMappedAddr[5] > 0 && \ + os_verify_checksum(_pMappedAddr, _pMappedAddr[5]) && \ + os_mem_cmp((_pMappedAddr + 16), "_DMI_", 5) == 0 && \ + os_verify_checksum((_pMappedAddr + 16), 15)) + +#define OS_VERIFY_SMBIOS(pMappedAddr) _VERIFY_SMBIOS((pMappedAddr)) + +#define SMBIOS_LEGACY_BASE 0xF0000 +#define SMBIOS_LEGACY_SIZE 0x10000 + +static NV_STATUS os_get_smbios_header_legacy(NvU64 *pSmbsAddr) +{ + NV_STATUS status = NV_ERR_OPERATING_SYSTEM; + NvU8 *pMappedAddr = NULL; + NvU8 *pIterAddr = NULL; + + pMappedAddr = (NvU8*)os_map_kernel_space(SMBIOS_LEGACY_BASE, + SMBIOS_LEGACY_SIZE, + NV_MEMORY_CACHED); + if (pMappedAddr == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pIterAddr = pMappedAddr; + + for (; pIterAddr < (pMappedAddr + SMBIOS_LEGACY_SIZE); pIterAddr += 16) + { + if (OS_VERIFY_SMBIOS3(pIterAddr)) + { + *pSmbsAddr = SMBIOS_LEGACY_BASE + (pIterAddr - pMappedAddr); + status = NV_OK; + break; + } + + if (OS_VERIFY_SMBIOS(pIterAddr)) + { + *pSmbsAddr = SMBIOS_LEGACY_BASE + (pIterAddr - pMappedAddr); + status = NV_OK; + break; + } + } + + os_unmap_kernel_space(pMappedAddr, SMBIOS_LEGACY_SIZE); + + return status; +} + +// This function is needed only if "efi" is enabled. +#if (defined(NV_LINUX_EFI_H_PRESENT) && defined(CONFIG_EFI)) +static NV_STATUS os_verify_smbios_header_uefi(NvU64 smbsAddr) +{ + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + NvU64 start= 0, offset =0 , size = 32; + NvU8 *pMappedAddr = NULL, *pBufAddr = NULL; + + start = smbsAddr; + offset = (start & ~os_page_mask); + start &= os_page_mask; + size = ((size + offset + ~os_page_mask) & os_page_mask); + + pBufAddr = (NvU8*)os_map_kernel_space(start, + size, + NV_MEMORY_CACHED); + if (pBufAddr == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pMappedAddr = pBufAddr + offset; + + if (OS_VERIFY_SMBIOS3(pMappedAddr)) + { + status = NV_OK; + goto done; + } + + if (OS_VERIFY_SMBIOS(pMappedAddr)) + { + status = NV_OK; + } + +done: + os_unmap_kernel_space(pBufAddr, size); + return status; +} +#endif + +static NV_STATUS os_get_smbios_header_uefi(NvU64 *pSmbsAddr) +{ + NV_STATUS status = NV_ERR_OPERATING_SYSTEM; + +// Make sure that efi.h is present before using "struct efi". +#if (defined(NV_LINUX_EFI_H_PRESENT) && defined(CONFIG_EFI)) + +// Make sure that efi.h has SMBIOS3_TABLE_GUID present. +#if defined(SMBIOS3_TABLE_GUID) + if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) + { + status = os_verify_smbios_header_uefi(efi.smbios3); + if (status == NV_OK) + { + *pSmbsAddr = efi.smbios3; + return NV_OK; + } + } +#endif + + if (efi.smbios != EFI_INVALID_TABLE_ADDR) + { + status = os_verify_smbios_header_uefi(efi.smbios); + if (status == NV_OK) + { + *pSmbsAddr = efi.smbios; + return NV_OK; + } + } +#endif + + return status; +} + +#endif // defined(NVCPU_X86_64) + +// The function locates the SMBIOS entry point. +NV_STATUS NV_API_CALL os_get_smbios_header(NvU64 *pSmbsAddr) +{ + +#if !defined(NVCPU_X86_64) + return NV_ERR_NOT_SUPPORTED; +#else + NV_STATUS status = NV_OK; + + if (os_is_efi_enabled()) + { + status = os_get_smbios_header_uefi(pSmbsAddr); + } + else + { + status = os_get_smbios_header_legacy(pSmbsAddr); + } + + return status; +#endif +} + +NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi +( + NvU32 *pRsdpAddr +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + if (pRsdpAddr == NULL) + { + return NV_ERR_INVALID_STATE; + } + + *pRsdpAddr = 0; + +// Make sure that efi.h is present before using "struct efi". +#if (defined(NV_LINUX_EFI_H_PRESENT) && defined(CONFIG_EFI)) + + if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) + { + *pRsdpAddr = efi.acpi20; + status = NV_OK; + } + else if (efi.acpi != EFI_INVALID_TABLE_ADDR) + { + *pRsdpAddr = efi.acpi; + status = NV_OK; + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: RSDP Not found!\n"); + status = NV_ERR_OPERATING_SYSTEM; + } +#endif + + return status; +} + +void NV_API_CALL os_add_record_for_crashLog(void *pbuffer, NvU32 size) +{ +} + +void NV_API_CALL os_delete_record_for_crashLog(void *pbuffer) +{ +} + +#if !defined(NV_VGPU_KVM_BUILD) +NV_STATUS NV_API_CALL os_call_vgpu_vfio(void *pvgpu_vfio_info, NvU32 cmd_type) +{ + return NV_ERR_NOT_SUPPORTED; +} +#endif + +NV_STATUS NV_API_CALL os_alloc_pages_node +( + NvS32 nid, + NvU32 size, + NvU32 flag, + NvU64 *pAddress +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + +#if defined(__GFP_THISNODE) && defined(GFP_HIGHUSER_MOVABLE) && \ + defined(__GFP_COMP) && defined(__GFP_NORETRY) && defined(__GFP_NOWARN) + gfp_t gfp_mask; + struct page *alloc_addr; + unsigned int order = get_order(size); + + /* + * Explanation of flags used: + * + * 1. __GFP_THISNODE: This will make sure the allocation happens + * on the node specified by nid. + * + * 2. GFP_HIGHUSER_MOVABLE: This makes allocations from ZONE_MOVABLE. + * + * 3. __GFP_COMP: This will make allocations with compound + * pages, which is needed in order to use + * vm_insert_page API. + * + * 4. __GFP_NORETRY: Used to avoid the Linux kernel OOM killer. + * + * 5. __GFP_NOWARN: Used to avoid a WARN_ON in the slowpath if + * the requested order is too large (just fail + * instead). + * + * 6. (Optional) __GFP_RECLAIM: Used to allow/forbid reclaim. + * This is part of GFP_USER and consequently + * GFP_HIGHUSER_MOVABLE. + * + * Some of these flags are relatively more recent, with the last of them + * (GFP_HIGHUSER_MOVABLE) having been added with this Linux kernel commit: + * + * 2007-07-17 769848c03895b63e5662eb7e4ec8c4866f7d0183 + * + * Assume that this feature will only be used on kernels that support all + * of the needed GFP flags. + */ + + gfp_mask = __GFP_THISNODE | GFP_HIGHUSER_MOVABLE | __GFP_COMP | + __GFP_NORETRY | __GFP_NOWARN; + +#if defined(__GFP_RECLAIM) + if (flag & NV_ALLOC_PAGES_NODE_SKIP_RECLAIM) + { + gfp_mask &= ~(__GFP_RECLAIM); + } +#endif // defined(__GFP_RECLAIM) + + alloc_addr = alloc_pages_node(nid, gfp_mask, order); + if (alloc_addr == NULL) + { + nv_printf(NV_DBG_INFO, + "NVRM: alloc_pages_node(node = %d, order = %u) failed\n", + nid, order); + status = NV_ERR_NO_MEMORY; + } + else if (page_to_nid(alloc_addr) != nid) + { + // + // We can hit this case when a Linux kernel bug is not patched. + // The needed patch is https://patchwork.kernel.org/patch/10427387/ + // + nv_printf(NV_DBG_ERRORS, + "NVRM: alloc_pages_node(node = %d, order = %u) wrong node ID.\n", + nid, order); + __free_pages(alloc_addr, order); + status = NV_ERR_NO_MEMORY; + } + else + { + *pAddress = (NvU64)page_to_phys(alloc_addr); + status = NV_OK; + } +#endif // GFP flags + + return status; +} + +NV_STATUS NV_API_CALL os_get_page +( + NvU64 address +) +{ + get_page(NV_GET_PAGE_STRUCT(address)); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_put_page +( + NvU64 address +) +{ + put_page(NV_GET_PAGE_STRUCT(address)); + return NV_OK; +} + +NvU32 NV_API_CALL os_get_page_refcount +( + NvU64 address +) +{ + return NV_PAGE_COUNT(NV_GET_PAGE_STRUCT(address)); +} + +NvU32 NV_API_CALL os_count_tail_pages +( + NvU64 address +) +{ + NvU32 order = compound_order(compound_head(NV_GET_PAGE_STRUCT(address))); + + return 1 << order; +} + +void NV_API_CALL os_free_pages_phys +( + NvU64 address, + NvU32 size +) +{ + __free_pages(NV_GET_PAGE_STRUCT(address), get_order(size)); +} + +NV_STATUS NV_API_CALL os_numa_memblock_size +( + NvU64 *memblock_size +) +{ + if (nv_ctl_device.numa_memblock_size == 0) + return NV_ERR_INVALID_STATE; + *memblock_size = nv_ctl_device.numa_memblock_size; + return NV_OK; +} + +NV_STATUS NV_API_CALL os_call_nv_vmbus(NvU32 vmbus_cmd, void *input) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL os_open_temporary_file +( + void **ppFile +) +{ +#if defined(O_TMPFILE) + struct file *file; + const char *default_path = "/tmp"; + const int flags = O_TMPFILE | O_LARGEFILE | O_RDWR; + const char *path = NVreg_TemporaryFilePath; + + /* + * The filp_open() call below depends on the current task's fs_struct + * (current->fs), which may already be NULL if this is called during + * process teardown. + */ + if (current->fs == NULL) + { + return NV_ERR_OPERATING_SYSTEM; + } + + if (!path) + { + path = default_path; + } + + file = filp_open(path, flags, 0); + if (IS_ERR(file)) + { + if ((path != default_path) && (PTR_ERR(file) == -ENOENT)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: The temporary file path specified via the NVreg_TemporaryFilePath\n" + "NVRM: module parameter does not exist. Defaulting to /tmp.\n"); + + file = filp_open(default_path, flags, 0); + } + } + + if (IS_ERR(file)) + { + return NV_ERR_OPERATING_SYSTEM; + } + + *ppFile = (void *)file; + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void NV_API_CALL os_close_file +( + void *pFile +) +{ + filp_close(pFile, NULL); +} + +#define NV_MAX_NUM_FILE_IO_RETRIES 10 + +NV_STATUS NV_API_CALL os_write_file +( + void *pFile, + NvU8 *pBuffer, + NvU64 size, + NvU64 offset +) +{ +#if defined(NV_KERNEL_WRITE_PRESENT) + loff_t f_pos = offset; + ssize_t num_written; + int num_retries = NV_MAX_NUM_FILE_IO_RETRIES; + +retry: +#if defined(NV_KERNEL_WRITE_HAS_POINTER_POS_ARG) + num_written = kernel_write(pFile, pBuffer, size, &f_pos); +#else + num_written = kernel_write(pFile, pBuffer, size, f_pos); +#endif + if (num_written < 0) + { + return NV_ERR_OPERATING_SYSTEM; + } + else if (num_written < size) + { + if (num_written > 0) + { + pBuffer += num_written; + size -= num_written; + } + if (--num_retries > 0) + { + cond_resched(); + goto retry; + } + return NV_ERR_OPERATING_SYSTEM; + } + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL os_read_file +( + void *pFile, + NvU8 *pBuffer, + NvU64 size, + NvU64 offset +) +{ + loff_t f_pos = offset; + ssize_t num_read; + int num_retries = NV_MAX_NUM_FILE_IO_RETRIES; + +retry: +#if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG) + num_read = kernel_read(pFile, pBuffer, size, &f_pos); +#else + num_read = kernel_read(pFile, f_pos, pBuffer, size); +#endif + if (num_read < 0) + { + return NV_ERR_OPERATING_SYSTEM; + } + else if (num_read < size) + { + if (num_read > 0) + { + pBuffer += num_read; + size -= num_read; + } + if (--num_retries > 0) + { + cond_resched(); + goto retry; + } + return NV_ERR_OPERATING_SYSTEM; + } + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_open_readonly_file +( + const char *filename, + void **ppFile +) +{ + struct file *file; + + /* + * The filp_open() call below depends on the current task's fs_struct + * (current->fs), which may already be NULL if this is called during + * process teardown. + */ + if (current->fs == NULL) + { + return NV_ERR_OPERATING_SYSTEM; + } + + file = filp_open(filename, O_RDONLY, 0); + if (IS_ERR(file)) + { + return NV_ERR_OPERATING_SYSTEM; + } + + *ppFile = (void *)file; + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_open_and_read_file +( + const char *filename, + NvU8 *buf, + NvU64 count +) +{ + void *fileHandle; + NV_STATUS status; + + status = os_open_readonly_file(filename, &fileHandle); + if (status != NV_OK) + { + return status; + } + + status = os_read_file(fileHandle, buf, count, 0); + + os_close_file(fileHandle); + + return status; +} + +NvBool NV_API_CALL os_is_nvswitch_present(void) +{ + struct pci_device_id nvswitch_pci_table[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), + .class = PCI_CLASS_BRIDGE_OTHER << 8, + .class_mask = PCI_ANY_ID + }, + {0} + }; + + return !!pci_dev_present(nvswitch_pci_table); +} + +void NV_API_CALL os_get_random_bytes +( + NvU8 *bytes, + NvU16 numBytes +) +{ + get_random_bytes(bytes, numBytes); +} + +NV_STATUS NV_API_CALL os_alloc_wait_queue +( + os_wait_queue **wq +) +{ + NV_KMALLOC(*wq, sizeof(os_wait_queue)); + if (*wq == NULL) + return NV_ERR_NO_MEMORY; + + init_completion(&(*wq)->q); + + return NV_OK; +} + +void NV_API_CALL os_free_wait_queue +( + os_wait_queue *wq +) +{ + NV_KFREE(wq, sizeof(os_wait_queue)); +} + +void NV_API_CALL os_wait_uninterruptible +( + os_wait_queue *wq +) +{ + wait_for_completion(&wq->q); +} + +void NV_API_CALL os_wait_interruptible +( + os_wait_queue *wq +) +{ + wait_for_completion_interruptible(&wq->q); +} + +void NV_API_CALL os_wake_up +( + os_wait_queue *wq +) +{ + complete_all(&wq->q); +} + + + +/* Define Tegra platform defines locally */ +#define NV_TEGRA_PLATFORM_SILICON 0 +#define NV_TEGRA_PLATFORM_FPGA 2 +#define NV_TEGRA_PLATFORM_VDK 8 + +/* Below temporary macros will be deleted after tegra_get_platform() + * is aligned to upstream kernel */ +#define NV_TEMP_TEGRA_PLATFORM_FPGA 3 +#define NV_TEMP_TEGRA_PLATFORM_VDK 5 + +NV_STATUS NV_API_CALL os_get_tegra_platform +( + NvU32 *mode +) +{ +#if defined(NV_TEGRA_GET_PLATFORM_PRESENT) && NV_IS_EXPORT_SYMBOL_PRESENT_tegra_get_platform + NvU8 platform; + + platform = tegra_get_platform(); + switch (platform) + { + case NV_TEMP_TEGRA_PLATFORM_VDK: + case NV_TEGRA_PLATFORM_VDK: + *mode = NV_OS_TEGRA_PLATFORM_SIM; + break; + case NV_TEGRA_PLATFORM_FPGA: + case NV_TEMP_TEGRA_PLATFORM_FPGA: + *mode = NV_OS_TEGRA_PLATFORM_FPGA; + break; + default: + nv_printf(NV_DBG_ERRORS, "Invalid Tegra platform(%d)\n", + platform); + case NV_TEGRA_PLATFORM_SILICON: + *mode = NV_OS_TEGRA_PLATFORM_SILICON; + break; + } + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + + +nv_cap_t* NV_API_CALL os_nv_cap_init +( + const char *path +) +{ + return nv_cap_init(path); +} + +nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry +( + nv_cap_t *parent_cap, + const char *name, + int mode +) +{ + return nv_cap_create_dir_entry(parent_cap, name, mode); +} + +nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry +( + nv_cap_t *parent_cap, + const char *name, + int mode +) +{ + return nv_cap_create_file_entry(parent_cap, name, mode); +} + +void NV_API_CALL os_nv_cap_destroy_entry +( + nv_cap_t *cap +) +{ + nv_cap_destroy_entry(cap); +} + +int NV_API_CALL os_nv_cap_validate_and_dup_fd +( + const nv_cap_t *cap, + int fd +) +{ + return nv_cap_validate_and_dup_fd(cap, fd); +} + +void NV_API_CALL os_nv_cap_close_fd +( + int fd +) +{ + nv_cap_close_fd(fd); +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-mlock.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-mlock.c new file mode 100644 index 0000000..e378245 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-mlock.c @@ -0,0 +1,287 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +static inline int nv_follow_pfn(struct vm_area_struct *vma, + unsigned long address, + unsigned long *pfn) +{ +#if defined(NV_UNSAFE_FOLLOW_PFN_PRESENT) + return unsafe_follow_pfn(vma, address, pfn); +#else + return follow_pfn(vma, address, pfn); +#endif +} + +/*! + * @brief Locates the PFNs for a user IO address range, and converts those to + * their associated PTEs. + * + * @param[in] vma VMA that contains the virtual address range given by the + * start and page count parameters. + * @param[in] start Beginning of the virtual address range of the IO PTEs. + * @param[in] page_count Number of pages containing the IO range being + * mapped. + * @param[in,out] pte_array Storage array for PTE addresses. Must be large + * enough to contain at least page_count pointers. + * + * @return NV_OK if the PTEs were identified successfully, error otherwise. + */ +static NV_STATUS get_io_ptes(struct vm_area_struct *vma, + NvUPtr start, + NvU64 page_count, + NvU64 **pte_array) +{ + NvU64 i; + unsigned long pfn; + + for (i = 0; i < page_count; i++) + { + if (nv_follow_pfn(vma, (start + (i * PAGE_SIZE)), &pfn) < 0) + { + return NV_ERR_INVALID_ADDRESS; + } + + pte_array[i] = (NvU64 *)(pfn << PAGE_SHIFT); + + if (i == 0) + continue; + + // + // This interface is to be used for contiguous, uncacheable I/O regions. + // Internally, osCreateOsDescriptorFromIoMemory() checks the user-provided + // flags against this, and creates a single memory descriptor with the same + // attributes. This check ensures the actual mapping supplied matches the + // user's declaration. Ensure the PFNs represent a contiguous range, + // error if they do not. + // + if ((NvU64)pte_array[i] != (((NvU64)pte_array[i-1]) + PAGE_SIZE)) + { + return NV_ERR_INVALID_ADDRESS; + } + } + return NV_OK; +} + +/*! + * @brief Pins user IO pages that have been mapped to the user processes virtual + * address space with remap_pfn_range. + * + * @param[in] vma VMA that contains the virtual address range given by the + * start and the page count. + * @param[in] start Beginning of the virtual address range of the IO pages. + * @param[in] page_count Number of pages to pin from start. + * @param[in,out] page_array Storage array for pointers to the pinned pages. + * Must be large enough to contain at least page_count + * pointers. + * + * @return NV_OK if the pages were pinned successfully, error otherwise. + */ +static NV_STATUS get_io_pages(struct vm_area_struct *vma, + NvUPtr start, + NvU64 page_count, + struct page **page_array) +{ + NV_STATUS rmStatus = NV_OK; + NvU64 i, pinned = 0; + unsigned long pfn; + + for (i = 0; i < page_count; i++) + { + if ((nv_follow_pfn(vma, (start + (i * PAGE_SIZE)), &pfn) < 0) || + (!pfn_valid(pfn))) + { + rmStatus = NV_ERR_INVALID_ADDRESS; + break; + } + + // Page-backed memory mapped to userspace with remap_pfn_range + page_array[i] = pfn_to_page(pfn); + get_page(page_array[i]); + pinned++; + } + + if (pinned < page_count) + { + for (i = 0; i < pinned; i++) + put_page(page_array[i]); + rmStatus = NV_ERR_INVALID_ADDRESS; + } + + return rmStatus; +} + +NV_STATUS NV_API_CALL os_lookup_user_io_memory( + void *address, + NvU64 page_count, + NvU64 **pte_array, + void **page_array +) +{ + NV_STATUS rmStatus; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long pfn; + NvUPtr start = (NvUPtr)address; + void **result_array; + + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): invalid context!\n", __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; + } + + rmStatus = os_alloc_mem((void **)&result_array, (page_count * sizeof(NvP64))); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate page table!\n"); + return rmStatus; + } + + nv_mmap_read_lock(mm); + + // find the first VMA which intersects the interval start_addr..end_addr-1, + vma = find_vma_intersection(mm, start, start+1); + + // Verify that the given address range is contained in a single vma + if ((vma == NULL) || ((vma->vm_flags & (VM_IO | VM_PFNMAP)) == 0) || + !((vma->vm_start <= start) && + ((vma->vm_end - start) >> PAGE_SHIFT >= page_count))) + { + nv_printf(NV_DBG_ERRORS, + "Cannot map memory with base addr 0x%llx and size of 0x%llx pages\n", + start ,page_count); + rmStatus = NV_ERR_INVALID_ADDRESS; + goto done; + } + + if (nv_follow_pfn(vma, start, &pfn) < 0) + { + rmStatus = NV_ERR_INVALID_ADDRESS; + goto done; + } + + if (pfn_valid(pfn)) + { + rmStatus = get_io_pages(vma, start, page_count, (struct page **)result_array); + if (rmStatus == NV_OK) + *page_array = (void *)result_array; + } + else + { + rmStatus = get_io_ptes(vma, start, page_count, (NvU64 **)result_array); + if (rmStatus == NV_OK) + *pte_array = (NvU64 *)result_array; + } + +done: + nv_mmap_read_unlock(mm); + + if (rmStatus != NV_OK) + { + os_free_mem(result_array); + } + + return rmStatus; +} + +NV_STATUS NV_API_CALL os_lock_user_pages( + void *address, + NvU64 page_count, + void **page_array, + NvU32 flags +) +{ + NV_STATUS rmStatus; + struct mm_struct *mm = current->mm; + struct page **user_pages; + NvU64 i, pinned; + unsigned int gup_flags = DRF_VAL(_LOCK_USER_PAGES, _FLAGS, _WRITE, flags) ? FOLL_WRITE : 0; + int ret; + + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): invalid context!\n", __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; + } + + rmStatus = os_alloc_mem((void **)&user_pages, + (page_count * sizeof(*user_pages))); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate page table!\n"); + return rmStatus; + } + + nv_mmap_read_lock(mm); + ret = NV_PIN_USER_PAGES((unsigned long)address, + page_count, gup_flags, user_pages, NULL); + nv_mmap_read_unlock(mm); + pinned = ret; + + if (ret < 0) + { + os_free_mem(user_pages); + return NV_ERR_INVALID_ADDRESS; + } + else if (pinned < page_count) + { + for (i = 0; i < pinned; i++) + NV_UNPIN_USER_PAGE(user_pages[i]); + os_free_mem(user_pages); + return NV_ERR_INVALID_ADDRESS; + } + + *page_array = user_pages; + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_unlock_user_pages( + NvU64 page_count, + void *page_array +) +{ + NvBool write = 1; + struct page **user_pages = page_array; + NvU32 i; + + for (i = 0; i < page_count; i++) + { + if (write) + set_page_dirty_lock(user_pages[i]); + NV_UNPIN_USER_PAGE(user_pages[i]); + } + + os_free_mem(user_pages); + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-pci.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-pci.c new file mode 100644 index 0000000..3fdf487 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-pci.c @@ -0,0 +1,206 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +void* NV_API_CALL os_pci_init_handle( + NvU32 domain, + NvU8 bus, + NvU8 slot, + NvU8 function, + NvU16 *vendor, + NvU16 *device +) +{ + struct pci_dev *dev; + unsigned int devfn = PCI_DEVFN(slot, function); + + if (!NV_MAY_SLEEP()) + return NULL; + + dev = NV_GET_DOMAIN_BUS_AND_SLOT(domain, bus, devfn); + if (dev != NULL) + { + if (vendor) *vendor = dev->vendor; + if (device) *device = dev->device; + pci_dev_put(dev); /* TODO: Fix me! (hotplug) */ + } + return (void *) dev; +} + +NV_STATUS NV_API_CALL os_pci_read_byte( + void *handle, + NvU32 offset, + NvU8 *pReturnValue +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + { + *pReturnValue = 0xff; + return NV_ERR_NOT_SUPPORTED; + } + pci_read_config_byte( (struct pci_dev *) handle, offset, pReturnValue); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_read_word( + void *handle, + NvU32 offset, + NvU16 *pReturnValue +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + { + *pReturnValue = 0xffff; + return NV_ERR_NOT_SUPPORTED; + } + pci_read_config_word( (struct pci_dev *) handle, offset, pReturnValue); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_read_dword( + void *handle, + NvU32 offset, + NvU32 *pReturnValue +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + { + *pReturnValue = 0xffffffff; + return NV_ERR_NOT_SUPPORTED; + } + pci_read_config_dword( (struct pci_dev *) handle, offset, pReturnValue); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_write_byte( + void *handle, + NvU32 offset, + NvU8 value +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + return NV_ERR_NOT_SUPPORTED; + + pci_write_config_byte( (struct pci_dev *) handle, offset, value); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_write_word( + void *handle, + NvU32 offset, + NvU16 value +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + return NV_ERR_NOT_SUPPORTED; + + pci_write_config_word( (struct pci_dev *) handle, offset, value); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_write_dword( + void *handle, + NvU32 offset, + NvU32 value +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + return NV_ERR_NOT_SUPPORTED; + + pci_write_config_dword( (struct pci_dev *) handle, offset, value); + return NV_OK; +} + +NvBool NV_API_CALL os_pci_remove_supported(void) +{ +#if defined NV_PCI_STOP_AND_REMOVE_BUS_DEVICE + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +void NV_API_CALL os_pci_remove( + void *handle +) +{ +#if defined(NV_PCI_STOP_AND_REMOVE_BUS_DEVICE) + NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(handle); +#elif defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: %s() is called even though NV_PCI_STOP_AND_REMOVE_BUS_DEVICE is not defined\n", + __FUNCTION__); + os_dbg_breakpoint(); +#endif +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-registry.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-registry.c new file mode 100644 index 0000000..ed0d09e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-registry.c @@ -0,0 +1,336 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ +#define NV_DEFINE_REGISTRY_KEY_TABLE +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-reg.h" +#include "nv-gpu-info.h" + +/*! + * @brief This function parses the PCI BDF identifier string and returns the + * Domain, Bus, Device and function components from the PCI BDF string. + * + * This parser is highly adaptable and hence allows PCI BDF string in following + * 3 formats. + * + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI dev id string. + * + * @param[in] pci_dev_str String containing the BDF to be parsed. + * @param[out] pci_domain Pointer where pci_domain is to be returned. + * @param[out] pci_bus Pointer where pci_bus is to be returned. + * @param[out] pci_slot Pointer where pci_slot is to be returned. + * @param[out] pci_func Pointer where pci_func is to be returned. + * + * @return NV_TRUE if succeeds, or NV_FALSE otherwise. + */ +static NV_STATUS pci_str_to_bdf(char *pci_dev_str, NvU32 *pci_domain, + NvU32 *pci_bus, NvU32 *pci_slot, NvU32 *pci_func) +{ + char *option_string = NULL; + char *token, *string; + NvU32 domain, bus, slot; + NV_STATUS status = NV_OK; + + // + // remove_spaces() allocates memory, hence we need to keep a pointer + // to the original string for freeing at end of function. + // + if ((option_string = rm_remove_spaces(pci_dev_str)) == NULL) + { + // memory allocation failed, returning + return NV_ERR_GENERIC; + } + + string = option_string; + + if (!strlen(string) || !pci_domain || !pci_bus || !pci_slot || !pci_func) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if ((token = strsep(&string, ".")) != NULL) + { + // PCI device can have maximum 8 functions only. + if ((string != NULL) && (!(*string >= '0' && *string <= '7') || + (strlen(string) > 1))) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI function in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + else if (string == NULL) + { + *pci_func = 0; + } + else + { + *pci_func = (NvU32)(*string - '0'); + } + + domain = simple_strtoul(token, &string, 16); + + if ((string == NULL) || (*string != ':') || (*(string + 1) == '\0')) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI domain/bus in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + token = string; + bus = simple_strtoul((token + 1), &string, 16); + + if (string == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI bus/slot in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (*string != '\0') + { + if ((*string != ':') || (*(string + 1) == '\0')) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI slot in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + token = string; + slot = (NvU32)simple_strtoul(token + 1, &string, 16); + if ((slot == 0) && ((token + 1) == string)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI slot in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + *pci_domain = domain; + *pci_bus = bus; + *pci_slot = slot; + } + else + { + *pci_slot = bus; + *pci_bus = domain; + *pci_domain = 0; + } + status = NV_OK; + } + else + { + status = NV_ERR_INVALID_ARGUMENT; + } + +done: + // Freeing the memory allocated by remove_spaces(). + os_free_mem(option_string); + return status; +} + +/*! + * @brief This function parses the registry keys per GPU device. It accepts a + * semicolon separated list of key=value pairs. The first key value pair MUST be + * "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot + * number and F is the Function. This PCI BDF is used to identify which GPU to + * assign the registry keys that follows next. + * If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT + * found, then all the registry keys that follows are skipped, until we find next + * valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for + * the value of the "pci" string: + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI dev id string. + * + * + * @param[in] sp pointer to nvidia_stack_t struct. + * + * @return NV_OK if succeeds, or NV_STATUS error code otherwise. + */ +NV_STATUS nv_parse_per_device_option_string(nvidia_stack_t *sp) +{ + NV_STATUS status = NV_OK; + char *option_string = NULL; + char *ptr, *token; + char *name, *value; + NvU32 data, domain, bus, slot, func; + nv_linux_state_t *nvl = NULL; + nv_state_t *nv = NULL; + + if (NVreg_RegistryDwordsPerDevice != NULL) + { + if ((option_string = rm_remove_spaces(NVreg_RegistryDwordsPerDevice)) == NULL) + { + return NV_ERR_GENERIC; + } + + ptr = option_string; + + while ((token = strsep(&ptr, ";")) != NULL) + { + if (!(name = strsep(&token, "=")) || !strlen(name)) + { + continue; + } + + if (!(value = strsep(&token, "=")) || !strlen(value)) + { + continue; + } + + if (strsep(&token, "=") != NULL) + { + continue; + } + + // If this key is "pci", then value is pci_dev id string + // which needs special parsing as it is NOT a dword. + if (strcmp(name, NV_REG_PCI_DEVICE_BDF) == 0) + { + status = pci_str_to_bdf(value, &domain, &bus, &slot, &func); + + // Check if PCI_DEV id string was in a valid format or NOT. + if (NV_OK != status) + { + // lets reset cached pci dev + nv = NULL; + } + else + { + nvl = find_pci(domain, bus, slot, func); + // + // If NO GPU found corresponding to this GPU, then reset + // cached state. This helps ignore the following registry + // keys until valid PCI BDF is found in the commandline. + // + if (!nvl) + { + nv = NULL; + } + else + { + nv = NV_STATE_PTR(nvl); + } + } + continue; + } + + // + // Check if cached pci_dev string in the commandline is in valid + // format, else we will skip all the successive registry entries + // ( pairs) until a valid PCI_DEV string is encountered + // in the commandline. + // + if (!nv) + continue; + + data = (NvU32)simple_strtoul(value, NULL, 0); + + rm_write_registry_dword(sp, nv, name, data); + } + + os_free_mem(option_string); + } + return status; +} + +/* + * Compare given string UUID with the GpuBlacklist or ExcludedGpus registry + * parameter string and return whether the UUID is in the GPU exclusion list + */ +NvBool nv_is_uuid_in_gpu_exclusion_list(const char *uuid) +{ + const char *input; + char *list; + char *ptr; + char *token; + + // + // When both NVreg_GpuBlacklist and NVreg_ExcludedGpus are defined + // NVreg_ExcludedGpus takes precedence. + // + if (NVreg_ExcludedGpus != NULL) + input = NVreg_ExcludedGpus; + else if (NVreg_GpuBlacklist != NULL) + input = NVreg_GpuBlacklist; + else + return NV_FALSE; + + if ((list = rm_remove_spaces(input)) == NULL) + return NV_FALSE; + + ptr = list; + + while ((token = strsep(&ptr, ",")) != NULL) + { + if (strcmp(token, uuid) == 0) + { + os_free_mem(list); + return NV_TRUE; + } + } + os_free_mem(list); + return NV_FALSE; +} + +NV_STATUS NV_API_CALL os_registry_init(void) +{ + nv_parm_t *entry; + unsigned int i; + nvidia_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + if (NVreg_RmMsg != NULL) + { + rm_write_registry_string(sp, NULL, + "RmMsg", NVreg_RmMsg, strlen(NVreg_RmMsg)); + } + + rm_parse_option_string(sp, NVreg_RegistryDwords); + + for (i = 0; (entry = &nv_parms[i])->name != NULL; i++) + { + rm_write_registry_dword(sp, NULL, entry->name, *entry->data); + } + + nv_kmem_cache_free_stack(sp); + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-usermap.c b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-usermap.c new file mode 100644 index 0000000..2022e0f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-usermap.c @@ -0,0 +1,78 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +void* NV_API_CALL os_map_user_space( + NvU64 start, + NvU64 size_bytes, + NvU32 mode, + NvU32 protect, + void **priv_data +) +{ + return (void *)(NvUPtr)start; +} + +void NV_API_CALL os_unmap_user_space( + void *address, + NvU64 size, + void *priv_data +) +{ +} + +NV_STATUS NV_API_CALL os_match_mmap_offset( + void *pAllocPrivate, + NvU64 offset, + NvU64 *pPageIndex +) +{ + nv_alloc_t *at = pAllocPrivate; + NvU64 i; + + for (i = 0; i < at->num_pages; i++) + { + if (at->flags.contig) + { + if (offset == (at->page_table[0]->phys_addr + (i * PAGE_SIZE))) + { + *pPageIndex = i; + return NV_OK; + } + } + else + { + if (offset == at->page_table[i]->phys_addr) + { + *pPageIndex = i; + return NV_OK; + } + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/rmp2pdefines.h b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/rmp2pdefines.h new file mode 100644 index 0000000..2ef8458 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/rmp2pdefines.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RMP2PDEFINES_H_ +#define _RMP2PDEFINES_H_ + +#define NVRM_P2P_PAGESIZE_SMALL_4K (4 << 10) +#define NVRM_P2P_PAGESIZE_BIG_64K (64 << 10) +#define NVRM_P2P_PAGESIZE_BIG_128K (128 << 10) + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_address.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_address.h new file mode 100644 index 0000000..505632d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_address.h @@ -0,0 +1,284 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_address.h * +* Basic class for AUX Address * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_ADDRESS_H +#define INCLUDED_DP_ADDRESS_H + +#include "dp_internal.h" + +namespace DisplayPort +{ + class Address + { + public: + enum + { + maxHops = 15, // update DP_MAX_ADDRESS_HOPS when changed (in displayportCommon.h) + maxHopsHDCP = 7, + maxPortCount = 15 + }; + + Address() + { + clear(); + } + + Address(unsigned hop0) + { + clear(); + hop[hops++] = hop0; + } + + Address(unsigned hop0, unsigned hop1) + { + clear(); + hop[hops++] = hop0; + hop[hops++] = hop1; + } + + Address(const Address & other) + { + clear(); + for(unsigned i = 0; i < other.size(); i++) + { + append(other[i]); + } + } + + void clear() + { + hops = 0; + for (unsigned i = 0; i < maxHops; i++) + { + hop[i] = 0; + } + } + + Address parent() const + { + DP_ASSERT(hops != 0); + Address addr = *this; + addr.hops --; + return addr; + } + + unsigned tail() const + { + if (hops == 0) + { + DP_ASSERT(hops != 0); + return 0; + } + return hop[hops-1]; + } + + void append(unsigned port) + { + if (hops >= maxHops) + { + DP_ASSERT(0); + return; + } + hop[hops++] = port; + } + + void prepend(unsigned port) + { + if (hops >= maxHops) + { + DP_ASSERT(0); + return; + } + hops++; + for (unsigned i = hops - 1; i > 0; i--) + hop[i] = hop[i-1]; + hop[0] = port; + } + + void pop() + { + if (hops == 0) + { + DP_ASSERT(0); + return; + } + hops--; + } + + // Just to keep clear copy + Address & operator = (const Address & other) + { + clear(); + for(unsigned i = 0; i < other.size(); i++) + { + append(other[i]); + } + + return *this; + } + + bool operator == (const Address & other) const + { + if (other.size() != size()) + return false; + + for (unsigned i = 0; i < hops; i++) + if (other[i] != (*this)[i]) + return false; + + return true; + } + + // + // Sort by size first, then "alphabetically" (lexicographical see wikipedia) + // + bool operator > (const Address & other) const + { + if (size() > other.size()) + return true; + else if (size() < other.size()) + return false; + + for (unsigned i = 0; i < hops; i++) + { + if ((*this)[i] > other[i]) + return true; + else if ((*this)[i] < other[i]) + return false; + } + + return false; + } + + // + // Sort by size first, then "alphabetically" (lexicographical see wikipedia) + // + bool operator < (const Address & other) const + { + if (size() < other.size()) + return true; + else if (size() > other.size()) + return false; + + for (unsigned i = 0; i < hops; i++) + { + if ((*this)[i] < other[i]) + return true; + else if ((*this)[i] > other[i]) + return false; + } + + return false; + } + + bool operator >= (const Address & other) const + { + return !((*this) < other); + } + + bool operator <= (const Address & other) const + { + return !((*this) > other); + } + + bool operator != (const Address & other) const + { + return !((*this) == other); + } + + unsigned size() const + { + return hops; + } + + unsigned & operator [](unsigned index) + { + DP_ASSERT(index < hops); + return hop[index]; + } + + const unsigned & operator [](unsigned index) const + { + DP_ASSERT(index < hops); + return hop[index]; + } + + bool under(const Address & root) const + { + if (size() < root.size()) + return false; + + for (unsigned i = 0; i < root.size(); i++) + if ((*this)[i] != root[i]) + return false; + + return true; + } + + typedef char StringBuffer[maxHops*3+1]; + char * toString(StringBuffer & buffer, bool removeLeadingZero = false) const + { + char * p = &buffer[0]; + int hopsWritten = 0; + for (unsigned i = 0; i < hops; i++) + { + if (i == 0 && hop[0] == 0 && removeLeadingZero) + continue; + if (hopsWritten > 0) + *p++ = '.'; + if (hop[i] >= 10) + *p++ = (char)(hop[i] / 10 +'0'); + *p++ = (char)(hop[i] % 10 + '0'); + hopsWritten++; + } + + *p++= 0; + return (char *)&buffer[0]; + } + + // Large enough to fit 4 hops into every NvU32 + typedef NvU32 NvU32Buffer[(maxHops-1)/4+1 < 4 ? 4 : (maxHops-1)/4+1]; + NvU32 * toNvU32Buffer(NvU32Buffer & buffer) const + { + for (unsigned i = 0; i < hops; i++) + { + buffer[i/4] |= ((NvU8) hop[i]) << (i % 4) * 8; + } + + return (NvU32 *)&buffer[0]; + } + + private: + unsigned hop[maxHops]; + unsigned hops; + }; +} + +#endif //INCLUDED_DP_ADDRESS_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxbus.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxbus.h new file mode 100644 index 0000000..12d0388 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxbus.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_auxbus.h * +* Interface for low level access to the aux bus. * +* This is the synchronous version of the interface. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_AUXBUS_H +#define INCLUDED_DP_AUXBUS_H + +namespace DisplayPort +{ + class AuxBus : virtual public Object + { + public: + enum status + { + success, + defer, + nack, + unSupported, + }; + + enum Action + { + read, + write, + writeStatusUpdateRequest, // I2C only + }; + + enum Type + { + native, + i2c, + i2cMot + }; + + virtual status transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason = NULL, + NvU8 offset = 0, NvU8 nWriteTransactions = 0) = 0; + + virtual unsigned transactionSize() = 0; + virtual status fecTransaction(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) { return nack; } + virtual void setDevicePlugged(bool) {} + virtual ~AuxBus() {} + }; + + // + // Wraps an auxbus interface with one that prints all the input and output + // + AuxBus * CreateAuxLogger(AuxBus * auxBus); +} + +#endif //INCLUDED_DP_AUXBUS_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxdefs.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxdefs.h new file mode 100644 index 0000000..26f34c9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxdefs.h @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_auxdefs.h * +* Definitions for DPCD AUX offsets * +* Should be used sparingly (DPCD HAL preferred) * +* * +\***************************************************************************/ + +#ifndef __DP_AUXDEFS_H__ +#define __DP_AUXDEFS_H__ + +#define DPCD_MESSAGEBOX_SIZE 48 + +// +// This definitions are being used for orin Hdcp opensourcing. Ideally this +// should be replaced with build flags. Bug ID: 200733434 +// +#define DP_OPTION_HDCP_SUPPORT_ENABLE 1 /* HDCP Enable */ + +#define DP_OPTION_HDCP_12_ENABLED 1 /* DP1.2 HDCP ENABLE */ + +#define DP_OPTION_QSE_ENABLED 1 /* Remove here when QSE p4r check-in */ + +// +// If a message is outstanding for at least 4 seconds +// assume no reply is coming through +// +#define DPCD_MESSAGE_REPLY_TIMEOUT 4000 + +#define DPCD_LINK_ADDRESS_MESSAGE_RETRIES 20 // 20 retries +#define DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN 10 // 10ms between attempts + +// pointing to the defaults for LAM settings to start with +#define DPCD_REMOTE_DPCD_WRITE_MESSAGE_RETRIES DPCD_LINK_ADDRESS_MESSAGE_RETRIES +#define DPCD_REMOTE_DPCD_WRITE_MESSAGE_COOLDOWN DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN + +#define DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES 7 // 7 retries +#define DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN +#define DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN_BKSV 20 // 20ms between attempts + +#define DPCD_QUERY_STREAM_MESSAGE_RETRIES 7 // 7 retries +#define DPCD_QUERY_STREAM_MESSAGE_COOLDOWN 20 // 20ms between attempts + +#define MST_EDID_RETRIES 20 +#define MST_EDID_COOLDOWN 10 + +#define MST_ALLOCATE_RETRIES 10 +#define MST_ALLOCATE_COOLDOWN 10 + +#define HDCP_AUTHENTICATION_RETRIES 6 // 6 retries +#define HDCP_CPIRQ_RXSTAUS_RETRIES 3 +#define HDCP_AUTHENTICATION_COOLDOWN 1000// 1 sec between attempts +#define HDCP22_AUTHENTICATION_COOLDOWN 2000// 2 sec between attempts +#define HDCP_AUTHENTICATION_COOLDOWN_HPD 3000// 3 sec for first stream Add +#define HDCP_CPIRQ_RXSTATUS_COOLDOWN 20 // 20ms between attempts + +// Need to re-submit Stream Validation request to falcon microcontroller after 1 sec if current request fails +#define HDCP_STREAM_VALIDATION_RESUBMIT_COOLDOWN 1000 + +// +// Wait till 8secs for completion of the KSV and Stream Validation, if that doesn't complete +// then timeout. +// +#define HDCP_STREAM_VALIDATION_REQUEST_COOLDOWN 8000 + +#define DPCD_OUI_NVIDIA 0x00044B + +// +// Define maximum retry count that checking Payload ID table updated before +// trigger ACT sequence. +// +#define PAYLOADIDTABLE_UPDATED_CHECK_RETRIES 300 + +#endif // __DP_AUXDEFS_H__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxretry.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxretry.h new file mode 100644 index 0000000..2f20949 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxretry.h @@ -0,0 +1,181 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_auxretry.h * +* Adapter interface for friendlier AuxBus * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_AUXRETRY_H +#define INCLUDED_DP_AUXRETRY_H + +#include "dp_auxbus.h" +#include "dp_timeout.h" + +namespace DisplayPort +{ + enum + { + minimumRetriesOnDefer = 7 + }; + + class AuxRetry + { + AuxBus * aux; + public: + AuxRetry(AuxBus * aux = 0) + : aux(aux) + { + } + + AuxBus * getDirect() + { + return aux; + } + + enum status + { + ack, + nack, + unsupportedRegister, + defer + }; + + // + // Perform an aux read transaction. + // - Automatically handles defers up to retry limit + // - Retries on partial read + // + virtual status readTransaction(int address, NvU8 * buffer, unsigned size, unsigned retries = minimumRetriesOnDefer); + + // + // Similar to readTransaction except that it supports reading + // larger spans than AuxBus::transactionSize() + // + virtual status read(int address, NvU8 * buffer, unsigned size, unsigned retries = minimumRetriesOnDefer); + + // + // Perform an aux write transaction. + // - Automatically handles defers up to retry limit + // - Retries on partial write + // + virtual status writeTransaction(int address, NvU8 * buffer, unsigned size, unsigned retries = minimumRetriesOnDefer); + + // + // Similar to writeTransaction except that it supports writin + // larger spans than AuxBus::transactionSize() + // + virtual status write(int address, NvU8 * buffer, unsigned size, unsigned retries = minimumRetriesOnDefer); + }; + + class AuxLogger : public AuxBus + { + AuxBus * bus; + char hex[256]; + char hex_body[256]; + char hint[128]; + + public: + AuxLogger(AuxBus * bus) : bus(bus) + { + } + + const char * getAction(Action action) + { + if (action == read) + return "rd "; + else if (action == write) + return "wr "; + else if (action == writeStatusUpdateRequest) + return "writeStatusUpdateRequest "; + else + DP_ASSERT(0); + return "???"; + } + + const char * getType(Type typ) + { + if (typ == native) + return ""; + else if (typ == i2c) + return "i2c "; + else if (typ == i2cMot) + return "i2cMot "; + else + DP_ASSERT(0); + return "???"; + } + + const char * getStatus(status stat) + { + if (stat == success) + return ""; + else if (stat == nack) + return "(nack) "; + else if (stat == defer) + return "(defer) "; + else + DP_ASSERT(0); + return "???"; + } + + const char * getRequestId(unsigned requestIdentifier) + { + switch(requestIdentifier) + { + case 0x1: return "LINK_ADDRESS"; + case 0x4: return "CLEAR_PAT"; + case 0x10: return "ENUM_PATH"; + case 0x11: return "ALLOCATE"; + case 0x12: return "QUERY"; + case 0x20: return "DPCD_READ"; + case 0x21: return "DPCD_WRITE"; + case 0x22: return "I2C_READ"; + case 0x23: return "I2C_WRITE"; + case 0x24: return "POWER_UP_PHY"; + case 0x25: return "POWER_DOWN_PHY"; + case 0x38: return "HDCP_STATUS"; + default: return ""; + } + } + + virtual status transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, unsigned * pNakReason, + NvU8 offset, NvU8 nWriteTransactions); + + virtual unsigned transactionSize() + { + return bus->transactionSize(); + } + + virtual status fecTransaction(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) + { + return bus->fecTransaction(fecStatus, fecErrorCount, flags); + } + }; +} + +#endif //INCLUDED_DP_AUXRETRY_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_bitstream.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_bitstream.h new file mode 100644 index 0000000..3d01f74 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_bitstream.h @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_bitstream.h * +* This is an implementation of the big endian bit stream * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_BITSTREAM_H +#define INCLUDED_DP_BITSTREAM_H + +#include "dp_buffer.h" + +namespace DisplayPort +{ + // + // Bitstream reader interface + // - reads a packed stream of bits in Big Endian format + // - handles alignment, buffering, and buffer bounds checking + // + class BitStreamReader + { + Buffer * sourceBuffer; + unsigned bitsOffset; + unsigned bitsEnd; + + public: + // Read 1-32 bits from the stream into *value. Returns true on success + bool read(unsigned * value, unsigned bits); + + // Read 1-32 bits from stream. Returns 'default' on failure. + unsigned readOrDefault(unsigned bits, unsigned defaultValue); + + // Skip bits until we're aligned to the power of two alignment + bool align(unsigned align); + + unsigned offset(); + Buffer * buffer(); + BitStreamReader(Buffer * buffer, unsigned bitsOffset, unsigned bitsCount); + }; + + // + // Bitstream writer interface + // + class BitStreamWriter + { + Buffer * targetBuffer; + unsigned bitsOffset; + public: + // + // Create a bitstream writer at a specific bit offset + // into an already existing buffer + // + BitStreamWriter(Buffer * buffer, unsigned bitsOffset = 0); + + // + // Write n bits to the buffer in big endian format. + // No buffering is performed. + // + bool write(unsigned value, unsigned bits); + + // + // Emit zero's until the offset is divisible by align. + // CAVEAT: align must be a power of 2 (eg 8) + // + bool align(unsigned align); + + // + // Get current offset and buffer target + // + unsigned offset(); + Buffer * buffer(); + }; +} + +#endif //INCLUDED_DP_BITSTREAM_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_buffer.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_buffer.h new file mode 100644 index 0000000..6d3e6f5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_buffer.h @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_buffer.h * +* Resizable byte buffer and stream classes * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_BUFFER_H +#define INCLUDED_DP_BUFFER_H + +#include "dp_internal.h" + +namespace DisplayPort +{ + class Buffer + { + public: + NvU8 *data; // Data buffer + unsigned length; // bytes used + unsigned capacity; // size of allocation + bool errorState; // did we lose a malloc in there? + public: + // + // Write will only fail if we're unable to reallocate the buffer. In this case + // the buffer will be reset to its empty state. + // + const NvU8 * getData() const { return data; } + NvU8 * getData() { return data; } + bool resize(unsigned newSize); + void memZero(); + void reset(); + unsigned getLength() const { return length; } + + // Is in error state? This happens if malloc fails. Error state is + // held until reset is called. + bool isError() const; + + Buffer(const Buffer & other); + Buffer(NvU8 * data, unsigned size); + Buffer & operator = (const Buffer & other); + Buffer(); + ~Buffer(); + + void swap(Buffer & other) { + swap_args(other.data, data); + swap_args(other.length, length); + swap_args(other.capacity, capacity); + swap_args(other.errorState, errorState); + } + + bool operator== (const Buffer & other) const; + }; + + class Stream + { + protected: + Buffer * parent; + unsigned byteOffset; + public: + Stream(Buffer * buffer); + bool seek(unsigned where); + bool read(NvU8 * buffer, unsigned size); + bool write(NvU8 * buffer, unsigned size); + + // returns error state of buffer + bool isError() const; + unsigned remaining(); + unsigned offset(); + }; + + void swapBuffers(Buffer & left, Buffer & right); +} + +#endif //INCLUDED_DP_BUFFER_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_configcaps.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_configcaps.h new file mode 100644 index 0000000..bf563aa --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_configcaps.h @@ -0,0 +1,535 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_configcaps.h * +* Abstraction for basic caps registers * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_CONFIGCAPS_H +#define INCLUDED_DP_CONFIGCAPS_H + +#include "dp_connector.h" +#include "dp_auxretry.h" +#include "dp_linkconfig.h" +#include "dp_regkeydatabase.h" + +namespace DisplayPort +{ + enum PowerState + { + PowerStateD0 = 1, + PowerStateD3 = 2, + PowerStateD3AuxOn = 5 + }; + + // Extended caps = offset 0x80 + enum DwnStreamPortType + { + DISPLAY_PORT = 0, + ANALOG_VGA, + DVI, + HDMI, + WITHOUT_EDID, + DISPLAY_PORT_PLUSPLUS + } ; + + enum DwnStreamPortAttribute + { + RESERVED = 0, + IL_720_480_60HZ, + IL_720_480_50HZ, + IL_1920_1080_60HZ, + IL_1920_1080_50HZ, + PG_1280_720_60HZ, + PG_1280_720_50_HZ, + } ; + + // DPCD Offset 102 enums + enum TrainingPatternSelectType + { + TRAINING_DISABLED, + TRAINING_PAT_ONE, + TRAINING_PAT_TWO, + TRAINING_PAT_THREE, + }; + + enum SymbolErrorSelectType + { + DISPARITY_ILLEGAL_SYMBOL_ERROR, + DISPARITY_ERROR, + ILLEGAL_SYMBOL_ERROR, + }; + + // DPCD Offset 1A1 enums + enum MultistreamHotplugMode + { + HPD_LONG_PULSE, + IRQ_HPD, + }; + + // DPCD Offset 220 + enum TestPatternType + { + NO_PATTERN, + COLOR_RAMPS, + BLACK_WHITE, + COLOR_SQUARE, + } ; + + // DPCD Offset 232, 233 + enum ColorFormatType + { + RGB, + YCbCr_422, + YCbCr_444, + } ; + + enum DynamicRangeType + { + VESA, + CEA, + } ; + + enum YCBCRCoeffType + { + ITU601, + ITU709, + } ; + + #define HDCP_BCAPS_SIZE (0x1) + #define HDCP_VPRIME_SIZE (0x14) + #define HDCP_KSV_FIFO_SIZE (0xF) + #define HDCP_KSV_FIFO_WINDOWS_RETRY (0x3) + #define HDCP22_BCAPS_SIZE (0x1) + + // Bstatus DPCD offset 0x68029 + #define HDCPREADY (0x1) + #define R0PRIME_AVAILABLE (0x2) + #define LINK_INTEGRITY_FAILURE (0x4) + #define REAUTHENTICATION_REQUEST (0x8) + + struct BInfo + { + bool maxCascadeExceeded; + unsigned depth; + bool maxDevsExceeded; + unsigned deviceCount; + }; + + struct BCaps + { + bool repeater; + bool HDCPCapable; + }; + + enum + { + PHYSICAL_PORT_START = 0x0, + PHYSICAL_PORT_END = 0x7, + LOGICAL_PORT_START = 0x8, + LOGICAL_PORT_END = 0xF + }; + + class LaneStatus + { + public: + // + // Lane Status + // CAUTION: Only updated on IRQ/HPD right now + // + virtual bool getLaneStatusClockRecoveryDone(int lane) = 0; // DPCD offset 202, 203 + virtual bool getLaneStatusSymbolLock(int lane)= 0; + virtual bool getInterlaneAlignDone() = 0; + virtual bool getDownStreamPortStatusChange() = 0; + }; + + class TestRequest + { + public: + virtual bool getPendingTestRequestTraining() = 0; // DPCD offset 218 + virtual void getTestRequestTraining(LinkRate & rate, unsigned & lanes) = 0; // DPCD offset 219, 220 + virtual bool getPendingAutomatedTestRequest() = 0; // DPCD offset 218 + virtual bool getPendingTestRequestEdidRead() = 0; // DPCD offset 218 + virtual bool getPendingTestRequestPhyCompliance() = 0; // DPCD offset 218 + virtual LinkQualityPatternType getPhyTestPattern() = 0; // DPCD offset 248 + virtual AuxRetry::status setTestResponse(bool ack, bool edidChecksumWrite = false) = 0; + virtual AuxRetry::status setTestResponseChecksum(NvU8 checksum) = 0; + }; + + class LegacyPort + { + public: + virtual DwnStreamPortType getDownstreamPortType() = 0; + virtual DwnStreamPortAttribute getDownstreamNonEDIDPortAttribute() = 0; + + // For port type = HDMI + virtual NvU64 getMaxTmdsClkRate() = 0; + }; + + class LinkState + { + public: + // + // Link state + // + virtual bool isPostLtAdjustRequestSupported() = 0; + virtual void setPostLtAdjustRequestGranted(bool bGrantPostLtRequest) = 0; + virtual bool getIsPostLtAdjRequestInProgress() = 0; // DPCD offset 204 + virtual TrainingPatternSelectType getTrainingPatternSelect() = 0; // DPCD offset 102 + + virtual bool setTrainingMultiLaneSet(NvU8 numLanes, + NvU8 *voltSwingSet, + NvU8 *preEmphasisSet) = 0; + + virtual bool readTraining(NvU8* voltageSwingLane, + NvU8* preemphasisLane = 0, + NvU8* trainingScoreLane = 0, + NvU8* postCursor = 0, + NvU8 activeLaneCount = 0) = 0; + + virtual bool isLaneSettingsChanged(NvU8* oldVoltageSwingLane, + NvU8* newVoltageSwingLane, + NvU8* oldPreemphasisLane, + NvU8* newPreemphasisLane, + NvU8 activeLaneCount) = 0; + + virtual AuxRetry::status setIgnoreMSATimingParamters(bool msaTimingParamIgnoreEn) = 0; + virtual AuxRetry::status setLinkQualLaneSet(unsigned lane, LinkQualityPatternType linkQualPattern) = 0; + virtual AuxRetry::status setLinkQualPatternSet(LinkQualityPatternType linkQualPattern, unsigned laneCount = 0) = 0; + }; + + class LinkCapabilities + { + public: + + // + // Physical layer feature set + // + virtual NvU64 getMaxLinkRate() = 0; // Maximum byte-block in Hz + virtual unsigned getMaxLaneCount() = 0; // DPCD offset 2 + virtual unsigned getMaxLaneCountSupportedAtLinkRate(LinkRate linkRate) = 0; + virtual bool getEnhancedFraming() = 0; + virtual bool getSupportsNoHandshakeTraining() = 0; + virtual bool getMsaTimingparIgnored() = 0; + virtual bool getDownstreamPort(NvU8 *portType) = 0; // DPCD offset 5 + virtual bool getSupportsMultistream() = 0; // DPCD offset 21h + virtual bool getNoLinkTraining() = 0; // DPCD offset 330h + virtual unsigned getPhyRepeaterCount() = 0; // DPCD offset F0002h + }; + + class OUI + { + public: + virtual bool getOuiSupported() = 0; + virtual AuxRetry::status setOuiSource(unsigned ouiId, const char * model, size_t modelNameLength, NvU8 chipRevision) = 0; + virtual bool getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) = 0; + }; + + class HDCP + { + public: + virtual bool getBKSV(NvU8 *bKSV) = 0; // DPCD offset 0x68000 + virtual bool getBCaps(BCaps &bCaps, NvU8 * rawByte = 0) = 0; // DPCD offset 0x68028 + virtual bool getHdcp22BCaps(BCaps &bCaps, NvU8 * rawByte = 0) = 0; // DPCD offset 0x6921D + virtual bool getBinfo(BInfo &bInfo) = 0; // DPCD offset 0x6802A + + // Generic interfaces for HDCP 1.x / 2.2 + virtual bool getRxStatus(const HDCPState &hdcpState, NvU8 *data) = 0; + }; + + class DPCDHAL : + virtual public Object, + public TestRequest, + public LaneStatus, + public LinkState, + public LinkCapabilities, + public OUI, + public HDCP + { + public: + // + // Notifications of external events + // We sent IRQ/HPD events to the HAL so that it knows + // when to re-read the registers. All the remaining + // calls are either accessors to cached state (caps), + // or DPCD get/setters + // + virtual void notifyIRQ() = 0; + virtual void notifyHPD(bool status, bool bSkipDPCDRead = false) = 0; + + virtual void populateFakeDpcd() = 0; + + // DPCD override routines + virtual void overrideMaxLinkRate(NvU32 overrideMaxLinkRate) = 0; + virtual void overrideMaxLaneCount(NvU32 maxLaneCount) = 0; + virtual void skipCableBWCheck(NvU32 maxLaneAtHighRate, NvU32 maxLaneAtLowRate) = 0; + virtual void overrideOptimalLinkCfg(LinkRate optimalLinkRate, NvU32 optimalLaneCount) = 0; + virtual void overrideOptimalLinkRate(LinkRate optimalLinkRate) = 0; + + virtual bool isDpcdOffline() = 0; + virtual void setAuxBus(AuxBus * bus) = 0; + virtual NvU32 getVideoFallbackSupported() = 0; + // + // Cached CAPS + // These are only re-read when notifyHPD is called + // + virtual unsigned getRevisionMajor() = 0; + virtual unsigned getRevisionMinor() = 0; + + virtual unsigned lttprGetRevisionMajor() = 0; + virtual unsigned lttprGetRevisionMinor() = 0; + + virtual bool getSDPExtnForColorimetry() = 0; + + bool isAtLeastVersion(unsigned major, unsigned minor) + { + if (getRevisionMajor() > major) + return true; + + if (getRevisionMajor() < major) + return false; + + return getRevisionMinor() >= minor; + } + + bool isVersion(unsigned major, unsigned minor) + { + if ((getRevisionMajor() == major) && + (getRevisionMinor() == minor)) + return true; + + return false; + } + + bool lttprIsAtLeastVersion(unsigned major, unsigned minor) + { + if (lttprGetRevisionMajor() > major) + return true; + + if (lttprGetRevisionMinor() < major) + return false; + + return lttprGetRevisionMinor() >= minor; + } + + bool lttprIsVersion(unsigned major, unsigned minor) + { + if ((lttprGetRevisionMajor() == major) && + (lttprGetRevisionMinor() == minor)) + return true; + + return false; + } + + // Convert Link Bandwidth read from DPCD register to Linkrate + NvU64 mapLinkBandiwdthToLinkrate(NvU32 linkBandwidth) + { + if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _1_62_GBPS, linkBandwidth)) + return RBR; + else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _2_70_GBPS, linkBandwidth)) + return HBR; + else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _5_40_GBPS, linkBandwidth)) + return HBR2; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_BANDWIDTH, _VAL, _8_10_GBPS, linkBandwidth)) + return HBR3; + else + { + DP_ASSERT(0 && "Unknown link bandwidth. Assuming HBR"); + return HBR; + } + } + + // + // Native aux transaction size (16 for AUX) + // + virtual size_t getTransactionSize() = 0; + + // + // SST Branching device/dongle/repeater + // - Describes downstream port limitations + // - Not for use with MST + // - Primarily used for dongles (look at port 0 for pclk limits) + // + virtual LegacyPort * getLegacyPort(unsigned index) = 0; + virtual unsigned getLegacyPortCount() = 0; + + virtual PCONCaps * getPCONCaps() = 0; + + // + // Single stream specific caps + // + virtual unsigned getNumberOfAudioEndpoints() = 0; + virtual int getSinkCount() = 0; + virtual void setSinkCount(int sinkCount) = 0; + + // + // MISC + // + virtual bool isPC2Disabled() = 0; + virtual void setPC2Disabled(bool disabled) = 0; + + virtual void setDPCDOffline(bool enable) = 0; + virtual void updateDPCDOffline() = 0; + + virtual void setSupportsESI(bool bIsESISupported) = 0; + virtual void setLttprSupported(bool isLttprSupported) = 0; + + // + // Intermediate Link Rate (eDP ILR) + // + virtual void setIndexedLinkrateEnabled(bool newVal) = 0; + virtual bool isIndexedLinkrateEnabled() = 0; + virtual bool isIndexedLinkrateCapable() = 0; + virtual NvU16 *getLinkRateTable() = 0; + virtual bool getRawLinkRateTable(NvU8 *buffer = NULL) = 0; + + // + // Link power state management + // + virtual bool setPowerState(PowerState newState) = 0; + virtual PowerState getPowerState() = 0; + // + // Multistream + // + virtual bool getGUID(GUID & guid) = 0; // DPCD offset 30 + virtual AuxRetry::status setGUID(GUID & guid) = 0; + virtual AuxRetry::status setMessagingEnable(bool uprequestEnable, bool upstreamIsSource) = 0; + virtual AuxRetry::status setMultistreamLink(bool bMultistream) = 0; + virtual void payloadTableClearACT() = 0; + virtual bool payloadWaitForACTReceived() = 0; + virtual bool payloadAllocate(unsigned streamId, unsigned begin, unsigned count) = 0; + virtual bool clearPendingMsg() = 0; + virtual bool isMessagingEnabled() = 0; + + // + // If set to IRQ we'll receive CSN messages on hotplugs (which are actually easy to miss). + // If set to HPD mode we'll always receive an HPD whenever the topology changes. + // The library supports using both modes. + // + virtual AuxRetry::status setMultistreamHotplugMode(MultistreamHotplugMode notifyType) = 0; + + // + // Interrupts + // + virtual bool interruptContentProtection() = 0; + virtual void clearInterruptContentProtection() = 0; + + virtual bool intteruptMCCS() = 0; + virtual void clearInterruptMCCS() = 0; + + virtual bool interruptDownReplyReady() = 0; + virtual void clearInterruptDownReplyReady() = 0; + + virtual bool interruptUpRequestReady() = 0; + virtual void clearInterruptUpRequestReady() = 0; + + virtual bool interruptCapabilitiesChanged() = 0; + virtual void clearInterruptCapabilitiesChanged() = 0; + + virtual bool getLinkStatusChanged() = 0; + virtual void clearLinkStatusChanged() = 0; + + virtual bool getHdmiLinkStatusChanged() = 0; + virtual void clearHdmiLinkStatusChanged() = 0; + + virtual bool getStreamStatusChanged() = 0; + virtual void clearStreamStatusChanged() =0; + + virtual void setDirtyLinkStatus(bool dirty) = 0; + virtual void refreshLinkStatus() = 0; + virtual bool isLinkStatusValid(unsigned lanes) = 0; + + virtual void getCustomTestPattern(NvU8 *testPattern) = 0; // DPCD offset 250 - 259 + + // + // Message Boxes + // + virtual AuxRetry::status writeDownRequestMessageBox(NvU8 * data, size_t length) = 0; + virtual size_t getDownRequestMessageBoxSize() = 0; + + virtual AuxRetry::status writeUpReplyMessageBox(NvU8 * data, size_t length) = 0; + virtual size_t getUpReplyMessageBoxSize() = 0; + + virtual AuxRetry::status readDownReplyMessageBox(NvU32 offset, NvU8 * data, size_t length) = 0; + virtual size_t getDownReplyMessageBoxSize() = 0; + + virtual AuxRetry::status readUpRequestMessageBox(NvU32 offset, NvU8 * data, size_t length) = 0; + virtual size_t getUpRequestMessageBoxSize() = 0; + + // MST<->SST override + virtual void overrideMultiStreamCap(bool mstCapable) = 0; + virtual bool getMultiStreamCapOverride() = 0; + + virtual bool getDpcdMultiStreamCap(void) = 0; + + // Set GPU DP support capability + virtual void setGpuDPSupportedVersions(bool supportDp1_2, bool supportDp1_4) = 0; + + // Set GPU FEC support capability + virtual void setGpuFECSupported(bool bSupportFEC) = 0; + + virtual void applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase) = 0; + + // PCON configuration + + // Reset PCON (to default state) + virtual void resetProtocolConverter() = 0; + // Source control mode and FRL/HDMI mode selection. + virtual bool setSourceControlMode(bool bEnableSourceControlMode, bool bEnableFRLMode) = 0; + + virtual bool checkPCONFrlReady(bool *bFrlReady) = 0; + virtual bool setupPCONFrlLinkAssessment(NvU32 linkBw, + bool bEnableExtendLTMode = false, + bool bEnableConcurrentMode = false) = 0; + + virtual bool checkPCONFrlLinkStatus(NvU32 *frlRate) = 0; + + virtual bool queryHdmiLinkStatus(bool *bLinkActive, bool *bLinkReady) = 0; + virtual NvU32 restorePCONFrlLink(NvU32 linkBwMask, + bool bEnableExtendLTMode = false, + bool bEnableConcurrentMode = false) = 0; + + virtual void readPsrCapabilities(vesaPsrSinkCaps *caps) = 0; + virtual bool updatePsrConfiguration(vesaPsrConfig config) = 0; + virtual bool readPsrConfiguration(vesaPsrConfig *config) = 0; + virtual bool readPsrState(vesaPsrState *psrState) = 0; + virtual bool readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState) = 0; + virtual bool writePsrErrorStatus(vesaPsrErrorStatus psrErr) = 0; + virtual bool readPsrErrorStatus(vesaPsrErrorStatus *psrErr) = 0; + virtual bool writePsrEvtIndicator(vesaPsrEventIndicator psrErr) = 0; + virtual bool readPsrEvtIndicator(vesaPsrEventIndicator *psrErr) = 0; + + virtual ~DPCDHAL() {} + + }; + + // + // Implement interface + // + DPCDHAL * MakeDPCDHAL(AuxBus * bus, Timer * timer); +} + +#endif //INCLUDED_DP_CONFIGCAPS_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connector.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connector.h new file mode 100644 index 0000000..831bd72 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connector.h @@ -0,0 +1,681 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_connector.h * +* This is the primary client interface. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_CONNECTOR_H +#define INCLUDED_DP_CONNECTOR_H + +#include "dp_auxdefs.h" +#include "dp_object.h" +#include "dp_mainlink.h" +#include "dp_auxbus.h" +#include "dp_address.h" +#include "dp_guid.h" +#include "dp_evoadapter.h" +#include "dp_auxbus.h" +#include "dp_auxretry.h" +#include "displayport.h" +#include "dp_vrr.h" +#include "../../modeset/timing/nvt_dsc_pps.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" + +namespace DisplayPort +{ + class EvoInterface; + + typedef enum + { + DP_TESTMESSAGE_STATUS_SUCCESS = 0, + DP_TESTMESSAGE_STATUS_ERROR = 0xDEADBEEF, + DP_TESTMESSAGE_STATUS_ERROR_INSUFFICIENT_INPUT_BUFFER = 0xDEADBEED, + DP_TESTMESSAGE_STATUS_ERROR_INVALID_PARAM = 0xDEADBEEC + // new error code should be here + } DP_TESTMESSAGE_STATUS; + + typedef enum + { + False = 0, + True = 1, + Indeterminate = 2 + } TriState; + + enum ConnectorType + { + connectorDisplayPort, + connectorHDMI, + connectorDVI, + connectorVGA + }; + + typedef struct portMap + { + NvU16 validMap; // port i is valid = bit i is high + NvU16 inputMap; // port i is input port = bit i is high && validMap bit i is high + NvU16 internalMap; // port i is internal = bit i is high && validMap bit i is high + } PortMap; + + enum ForceDsc + { + DSC_DEFAULT, + DSC_FORCE_ENABLE, + DSC_FORCE_DISABLE + }; + + struct DpModesetParams + { + unsigned headIndex; + ModesetInfo modesetInfo; + DP_COLORFORMAT colorFormat; + NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS msaparams; + + DpModesetParams() : headIndex(0), modesetInfo(), colorFormat(dpColorFormat_Unknown), msaparams() {} + + DpModesetParams(unsigned newHeadIndex, + ModesetInfo newModesetInfo, + DP_COLORFORMAT newColorFormat = dpColorFormat_Unknown) : + headIndex(newHeadIndex), + modesetInfo(newModesetInfo), + colorFormat(newColorFormat), + msaparams() {} + + DpModesetParams(unsigned newHeadIndex, + ModesetInfo *newModesetInfo, + DP_COLORFORMAT newColorFormat = dpColorFormat_Unknown) : + headIndex(newHeadIndex), + modesetInfo(*newModesetInfo), + colorFormat(newColorFormat), + msaparams() {} + + }; + + struct DscOutParams + { + unsigned PPS[DSC_MAX_PPS_SIZE_DWORD]; // Out - PPS SDP data + }; + + struct DscParams + { + bool bCheckWithDsc; // [IN] - Client telling DP Library to check with DSC. + ForceDsc forceDsc; // [IN] - Client telling DP Library to force enable/disable DSC + DSC_INFO::FORCED_DSC_PARAMS* forcedParams; // [IN] - Client telling DP Library to force certain DSC params. + bool bEnableDsc; // [OUT] - DP Library telling client that DSC is needed for this mode. + unsigned bitsPerPixelX16; // [IN/OUT] - Bits per pixel value multiplied by 16 + DscOutParams *pDscOutParams; // [OUT] - DSC parameters + + DscParams() : bCheckWithDsc(false), forceDsc(DSC_DEFAULT), forcedParams(NULL), bEnableDsc(false), bitsPerPixelX16(0), pDscOutParams(NULL) {} + }; + + class Group; + + bool SetConfigSingleHeadMultiStreamMode(Group **targets, // Array of group pointers given for getting configured in single head multistream mode. + NvU32 displayIDs[], // Array of displayIDs given for getting configured in single head multistream mode. + NvU32 numStreams, // Number of streams driven out from single head. + DP_SINGLE_HEAD_MULTI_STREAM_MODE mode, // Configuration mode : SST or MST + bool bSetConfig, // Set or clear the configuration. + NvU8 vbiosPrimaryDispIdIndex = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, // VBIOS primary display ID index in displayIDs[] array + bool bEnableAudioOverRightPanel = false); // Audio MUX config : right or left panel + + // + // Device object + // This object represents a displayport device. Devices are not reported + // to clients until the EDID is already on file. + // + class Device : virtual public Object + { + public: + + virtual bool isPlugged() = 0; + virtual bool isVideoSink() = 0; // Invariant: won't change once reported + virtual bool isAudioSink() = 0; // Invariant + + virtual bool isLoop() = 0; // the address starts and ends at th same device + virtual bool isRedundant() = 0; + virtual bool isMustDisconnect() = 0; // Is this monitor's head being attach preventing + // us from enumerating other panels? + virtual bool isZombie() = 0; // Head is attached but we're not connected + virtual bool isCableOk() = 0; // cable can be not ok, whenwe saw hpd, device connected, but can't talk over aux + + virtual bool isLogical() = 0; // Is device connected to logical port + + virtual Address getTopologyAddress() const = 0; // Invariant + virtual bool isMultistream() = 0; + + virtual ConnectorType getConnectorType() = 0; // Invariant + + virtual unsigned getEDIDSize() const= 0; // Invariant + // Copies EDID into client buffer. Fails if the buffer is too small + virtual bool getEDID(char * buffer, unsigned size) const = 0; + + virtual unsigned getRawEDIDSize() const= 0; + // Copies RawEDID into client buffer. Fails if the buffer is too small + virtual bool getRawEDID(char * buffer, unsigned size) const = 0; + + virtual bool getPCONCaps(PCONCaps *pPCONCaps) = 0; + + virtual bool isFallbackEdid() = 0; // is the device edid a fallback one? + virtual GUID getGUID() const = 0; // Returns the GUID for the device + virtual bool isPowerSuspended() = 0; + virtual bool isActive() = 0; // Whether the device has a head attached to it + virtual TriState hdcpAvailableHop() = 0; // Whether the device support HDCP, + // regardless of whether the path leading to it supports HDCP. + virtual TriState hdcpAvailable() = 0; // Whether HDCP can be enabled. + // Note this checks that the entire path to the node support HDCP. + + virtual PortMap getPortMap() const = 0; + + virtual void setPanelPowerParams(bool bSinkPowerStateD0, bool bPanelPowerStateOn) = 0; + virtual Group * getOwningGroup() = 0; // Return the group this device is currently a member of + + virtual AuxBus * getRawAuxChannel() = 0; // No automatic retry on DEFER. See limitations in dp_auxbus.h + virtual AuxRetry * getAuxChannel() = 0; // User friendly AUX interface + + virtual Device * getParent() = 0; + virtual Device * getChild(unsigned portNumber) = 0; + + virtual void dpcdOverrides() = 0; // Apply DPCD overrides if required + + virtual bool getDpcdRevision(unsigned * major, unsigned * minor) = 0; // get the dpcd revision (maybe cached) + + virtual bool getSDPExtnForColorimetrySupported() = 0; + + virtual bool getIgnoreMSACap() = 0; + + virtual AuxRetry::status setIgnoreMSAEnable(bool msaTimingParamIgnoreEn) = 0; + + virtual NvBool isDSCPossible() = 0; + + virtual NvBool isDSCSupported() = 0; + + virtual DscCaps getDscCaps() = 0; + + // + // This function returns the device itself or its parent device that is doing + // DSC decompression for it. + // + virtual Device* getDevDoingDscDecompression() = 0; + virtual void markDeviceForDeletion() = 0; + + virtual bool getRawDscCaps(NvU8 *buffer, NvU32 bufferSize) = 0; + + // This interface is still nascent. Please don't use it. Read size limit is 16 bytes. + virtual AuxBus::status getDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason = NULL) = 0; + + virtual AuxBus::status setDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason = NULL) = 0; + + virtual AuxBus::status dscCrcControl(NvBool bEnable, gpuDscCrc *gpuData, sinkDscCrc *sinkData) = 0; + virtual AuxBus::status queryFecData(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) = 0; + + // + // The address send here will be right shifted by the library. DD should + // send the DDC address without the shift. + // Parameter bForceMot in both getI2cData and setI2cData is used to forfully set + // the MOT bit. It is needed for some special cases where the MOT bit shouldn't + // be set but some customers need it to please their monitors. + // + virtual bool getI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot = false) = 0; + virtual bool setI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot = false) = 0; + + // + // Calls VRR enablement implementation in dp_vrr.cpp. + // The enablement steps include interaction over DPAux in the vendor specific + // DPCD space. + // + virtual bool startVrrEnablement() = 0; // VF: calls actual enablement code. + virtual void resetVrrEnablement() = 0; // VF: resets enablement state. + virtual bool isVrrMonitorEnabled() = 0; // VF: gets monitor enablement state. + virtual bool isVrrDriverEnabled() = 0; // VF: gets driver enablement state. + + // If the sink support MSA override in MST environment. + virtual bool isMSAOverMSTCapable() = 0; + virtual bool isFakedMuxDevice() = 0; + + virtual bool setPanelReplayConfig(panelReplayConfig prcfg) = 0; + virtual bool isPanelReplaySupported() = 0; + + protected: + virtual ~Device() {} + + }; + + class Group : virtual public Object + { + public: + + // + // Routines for changing which panels are in a group. To move a stream to a new + // monitor without a modeset: + // remove(old_panel) + // insert(new_panel) + // The library will automatically switch over to the new configuration + // + virtual void insert(Device * dev) = 0; + virtual void remove(Device * dev) = 0; + + // + // group->enumDevices(0) - Get first element + // group->enumDevices(i) - Get next element + // + // for (Device * i = group->enumDevices(0); i; i = group->enumDevices(i)) + // + virtual Device * enumDevices(Device * previousDevice) = 0; + + virtual void destroy() = 0; // Destroy the group object + + // Toggles the encryption status for the stream. + // Returns whether encryption is currently enabled. + virtual bool hdcpGetEncrypted() = 0; + + protected: + virtual ~Group() {} + + }; + + class Connector : virtual public Object + { + public: + // + // Normally the Connector::EventSink callsback can occur in response to the following + // 1. Timer callbacks + // 2. notifyLongPulse/notifyShortPulse + // + class EventSink + { + public: + virtual void newDevice(Device * dev) = 0; // New device appears in topology + virtual void lostDevice(Device * dev) = 0; // Lost device from topology + // Device object ceases to exist after this call + + virtual void notifyMustDisconnect(Group * grp) = 0; // Notification that an attached head is preventing + // us from completing detection of a newly connected device + + virtual void notifyDetectComplete() = 0; // Rolling call. Happens every time we've done another full + // detect on the topology + + virtual void bandwidthChangeNotification(Device * dev, bool isComplianceMode) = 0; // Available bandwidth to panel has changed, or panel has + // become a zombie + + virtual void notifyZombieStateChange(Device * dev, bool zombied) = 0; // Notification that zombie device was attached or dettached + virtual void notifyCableOkStateChange(Device * dev, bool cableOk) = 0; // Notification that device got cable state chagne (true - cable is good, false - cables is bad) + virtual void notifyHDCPCapDone(Device * dev, bool hdcpCap) = 0; // Notification that device's HDCP cap detection is done and get state change. + virtual void notifyMCCSEvent(Device * dev) = 0; // Notification that an MCCS event is coming + }; + + // Query current Device topology + virtual Device * enumDevices(Device * previousDevice) = 0; + + // Called before system enters an S3 state + virtual void pause() = 0; + + // Get maximum link configuration + virtual LinkConfiguration getMaxLinkConfig() = 0; + + // Get currently active link configuration + virtual LinkConfiguration getActiveLinkConfig() = 0; + + // Get Current link configuration + virtual void getCurrentLinkConfig(unsigned & laneCount, NvU64 & linkRate) = 0; + + // Get the clock calculation supported by the panel + virtual unsigned getPanelDataClockMultiplier() = 0; + + // Get the clock calculation supported by the GPU + virtual unsigned getGpuDataClockMultiplier() = 0; + + // Resume from standby/initial boot notification + // The library is considered to start up in the suspended state. You must make this + // API call to enable the library. None of the library APIs are functional before + // this call. + // + // Returns the group representing the firmware panel if any is active. + // + // plugged Does RM report the root-port DisplayId in + // its plugged connector mask + // + // firmwareLinkHandsOff RM does NOT report the rootport displayId as active, + // but one of the active panels shares the same SOR. + // + // firmwareDPActive RM reports the rootport displayId in the active device list + // but display-driver hasn't yet performed its first modeset. + // + // isUefiSystem DD tells the library whether this system is a UEFI based + // one so that the library can get the current and max link config + // from RM/UEFI instead of trying to determine them on its own. + // + // firmwareHead Head being used to drive the firmware + // display, if firmwareDPActive is true. + // + // bFirmwareLinkUseMultistream + // Specifies whether the firmware connector is being driven in SST + // (false) or MST (true) mode. + // + // bDisableVbiosScratchRegisterUpdate + // Disables update of + // NV_PDISP_SOR_DP_SCRATCH_RAD/MISC scratch + // pad registers with last lit up display + // address. This address is used by VBIOS in + // case of driver unload or BSOD. + // + // bAllowMST Allow/Disallow Multi-streaming + // + virtual Group * resume(bool firmwareLinkHandsOff, + bool firmwareDPActive, + bool plugged, + bool isUefiSystem = false, + unsigned firmwareHead = 0, + bool bFirmwareLinkUseMultistream = false, + bool bDisableVbiosScratchRegisterUpdate = false, + bool bAllowMST = true) = 0; + + // The display-driver should enable hands off mode when attempting + // to use a shared resource (such as the SOR) in a non-DP configuration. + virtual void enableLinkHandsOff() = 0; + virtual void releaseLinkHandsOff() = 0; + + // Usage scenario: + // beginCompoundQuery() + // compoundQueryAttach(1280x1024) + // compoundQueryAttach(1920x1080) + // .endCompoundQuery() + // Will tell you if you have sufficient bandwidth to operate + // two panels at 1920x1080 and 1280x1024 assuming all currently + // attached panels are detached. + virtual void beginCompoundQuery() = 0; + + // + // twoChannelAudioHz + // If you need 192khz stereo specify 192000 here. + // + // eightChannelAudioHz + // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + // + // pixelClockHz + // Requested pixel clock for the mode + // + // depth + // Requested color depth + // + virtual bool compoundQueryAttach(Group * target, + unsigned twoChannelAudioHz, + unsigned eightChannelAudioHz, + NvU64 pixelClockHz, + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) = 0; + + virtual bool compoundQueryAttach(Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams) = 0; // DSC parameters + + virtual bool endCompoundQuery() = 0; + + // Interface to indicate if clients need to perform a head shutdown before a modeset + virtual bool isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) = 0; + + // Interface to indicate if clients need to perform a head shutdown before a modeset + virtual bool isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + ModesetInfo modesetInfo) = 0; // Modeset info relevant DSC data + + // + // Interface for clients to query library if the link is going to be trained during notifyAttachBegin(modeset). + // Note: This API is not intended to know if a link training will be performed during assessment of the link. + // This API is added to see if library can avoid link training during modeset so that client can take necessary decision + // to avoid a destructive modeset from UEFI mode at post to a GPU driver detected mode + // (thus prevent a visible glitch - i.e. Smooth Transition) + // + // How isLinkTrainingNeededForModeset API is different from isHeadShutDownNeeded API - + // In case of MST : we always shutdown head and link train if link is inactive, so both APIs return TRUE + // In case of SST : + // - If requested link config < active link config, we shutdown head to prevent overflow + // as head will still be driving at higher mode during link training to lower mode + // So both APIs return TRUE + // - If requested link config >= active link config, we don't need a head shutdown since + // SOR clocks can be changed by entering flush mode but will need to link train for mode change + // So isHeadShutDownNeeded returns FALSE and isLinkTrainingNeededForModeset returns TRUE + // + virtual bool isLinkTrainingNeededForModeset (ModesetInfo modesetInfo) = 0; + + // Notify library before/after modeset (update) + virtual bool notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) = 0; + + // Group of panels we're attaching to this head + virtual bool notifyAttachBegin(Group * target, const DpModesetParams &modesetParams) = 0; + + virtual void readRemoteHdcpCaps() = 0; + + // modeset might be cancelled when NAB failed + virtual void notifyAttachEnd(bool modesetCancelled) = 0; + + // + // Client needs to be notified about the SST<->MST transition, + // based on which null modeset will be sent. + // + virtual bool isLinkAwaitingTransition() = 0; + + virtual void resetLinkTrainingCounter() = 0; + + // Notify library before/after shutdown (update) + virtual void notifyDetachBegin(Group * target) = 0; + virtual void notifyDetachEnd(bool bKeepOdAlive = false) = 0; + + // Notify library to assess PCON link capability + virtual bool assessPCONLinkCapability(PCONLinkControl *params) = 0; + + // Notify library of hotplug/IRQ + virtual void notifyLongPulse(bool statusConnected) = 0; + virtual void notifyShortPulse() = 0; + + // Notify Library when ACPI initialization is done + virtual void notifyAcpiInitDone() = 0; + + // Notify Library when GPU capability changes. Usually because power event. + virtual void notifyGPUCapabilityChange() = 0; + virtual void notifyHBR2WAREngage() = 0; + + // Create a new Group. Note that if you wish to do a modeset but send the + // stream nowhere, you may do a modeset with an EMPTY group. This is expected + // to be the mechanism by which monitor faking is implemented. + virtual Group * newGroup() = 0; + + // Shutdown and the destroy the connector manager + virtual void destroy() = 0; + + virtual void createFakeMuxDevice(const NvU8 *buffer, NvU32 bufferSize) = 0; + virtual void deleteFakeMuxDevice() = 0; + virtual bool getRawDscCaps(NvU8 *buffer, NvU32 bufferSize) = 0; + + // + // OS Modeset Order mitigation causes the library to delay the reporting + // of new devices until they can be safely turned on. + // When enabled the library client will not see connection events until + // MustDisconnect messages are processed. + // + // Policy state should be set before the library is brought out of + // the suspended state. + // + // Important Note: This option changes the definition of QueryMode. + // Without OS order mitigation query mode assumes that you will + // deatach all of the heads from any zombied monitors *before* + // activating the new panel. If your driver cannot guarantee + // this invariant, then it must enable order mitigation. + // + virtual void setPolicyModesetOrderMitigation(bool enabled) = 0; + + // + // force LT at NAB for compliance test (Power Management) in Win10 RS2+ (WDDM 2.2) + // + // RS2 no longer sends an explicit call for setPanelPowerParams during the Resume. + // It does that by specifying an additional flag during the call to SetTimings. Due to + // this DP lib doesn't get chance to perform this transition from setPanelPowerParams + // and since it was already skipping LT in NAB/modeswitch, so LT get missed out on the + // compliance device during resume from S3/S4. + // + virtual void setPolicyForceLTAtNAB(bool enabled) = 0; + + // + // There are cases where OS does not detach heads from connector immediately after hot-unplug, + // on next hot-plug there is no guarantee that newly connected sink is capable to drive existing + // raster timings. Flush mode has following restriction + // When exiting flush mode S/W should ensure that the final + // link clock & lane count should be able to support existing raster. + // If we run into this situation and use flush mode then that will cause display engine to hang. + // This variable ensures to assess link safely in this situation and instead of using flush mode ask + // DD to detach/reattach heads for link training. + // + virtual void setPolicyAssessLinkSafely(bool enabled) = 0; + + // + // These interfaces are meant to be used *ONLY* for tool purposes. + // Clients should *NOT* use them for their own implementation. + // + // Sets the preferred link config which the tool has requested to train to. + // Each set call should be paired with a reset call. Also, preferred link configs won't persist across HPDs. + // It is advisable to do compound queries before setting a mode on a preferred config. + // Compound queries and notify attaches(link train) would use the preferred link config unless it is reset again. + // (not advisable to leave a preferred link config always ON). + // + virtual bool setPreferredLinkConfig(LinkConfiguration & lc, bool commit, + bool force = false, + LinkTrainingType forceTrainType = NORMAL_LINK_TRAINING) = 0; + + // + // Resets the preferred link config and lets the library go back to default LT policy. + // Should follow a previous set call. + // + virtual bool resetPreferredLinkConfig(bool force=false) = 0; + + // + // These interfaces are used by client to allow/disallow + // Multi-streaming. + // + // If connected sink is MST capable then: + // Client should detach all active MST video/audio streams before + // disallowing MST, vise-versa client should detach active SST + // stream before allowing MST. + // + virtual void setAllowMultiStreaming(bool bAllowMST) = 0; + virtual bool getAllowMultiStreaming(void) = 0; + + // This function reads sink MST capability from DPCD register(s). + virtual bool getSinkMultiStreamCap(void) = 0; + + // These interfaces are Deprecated, use setAllowMultiStreaming() + virtual void setDp11ProtocolForced() = 0; + virtual void resetDp11ProtocolForced() = 0; + virtual bool isDp11ProtocolForced() = 0; + + virtual bool getHDCPAbortCodesDP12(NvU32 &hdcpAbortCodesDP12) = 0; + + virtual bool getOuiSink(unsigned &ouiId, char * modelName, + size_t modelNameBufferSize, NvU8 & chipRevision) = 0; + + virtual bool getIgnoreSourceOuiHandshake() = 0; + virtual void setIgnoreSourceOuiHandshake(bool bIgnore) = 0; + + // + // The following function is to be used to get the capability bit that tells the client whether the connector + // can do multistream. + // + virtual bool isMultiStreamCapable() = 0; + virtual bool isFlushSupported() = 0; + virtual bool isStreamCloningEnabled() = 0; + virtual NvU32 maxLinkRateSupported() = 0; + virtual bool isFECSupported() = 0; + virtual bool isFECCapable() = 0; + + // Following APIs are for link test/config for DP Test Utility + virtual bool getTestPattern(NV0073_CTRL_DP_TESTPATTERN *pTestPattern) = 0; + virtual bool setTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, + NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, + NvBool bIsHBR2, NvBool bSkipLaneDataOverride) = 0; + // "data" is an array of NV0073_CTRL_MAX_LANES unsigned ints + virtual bool getLaneConfig(NvU32 *numLanes, NvU32 *data) = 0; + // "data" is an array of NV0073_CTRL_MAX_LANES unsigned ints + virtual bool setLaneConfig(NvU32 numLanes, NvU32 *data) = 0; + + virtual DP_TESTMESSAGE_STATUS sendDPTestMessage(void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus) = 0; + + virtual DP_TESTMESSAGE_STATUS getStreamIDs(NvU32 *pStreamIDs, NvU32 *pCount) = 0; + // Function to configure power up/down for DP Main Link + virtual void configurePowerState(bool bPowerUp) = 0; + + virtual void readPsrCapabilities(vesaPsrSinkCaps *caps) = 0; + virtual bool updatePsrConfiguration(vesaPsrConfig config) = 0; + virtual bool readPsrConfiguration(vesaPsrConfig *config) = 0; + virtual bool readPsrState(vesaPsrState *psrState) = 0; + virtual bool readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState) = 0; + virtual bool writePsrErrorStatus(vesaPsrErrorStatus psrErr) = 0; + virtual bool readPsrErrorStatus(vesaPsrErrorStatus *psrErr) = 0; + virtual bool writePsrEvtIndicator(vesaPsrEventIndicator psrErr) = 0; + virtual bool readPsrEvtIndicator(vesaPsrEventIndicator *psrErr) = 0; + virtual bool updatePsrLinkState(bool bTrainLink) = 0; + + protected: + virtual ~Connector() {} + }; + + // + // Library routine to create primary port interface + // (Not intended to be used by display driver) + Connector * createConnector(MainLink * mainInterface, // DisplayDriver implemented MainLink object + AuxBus * auxInterface, // DisplayDriver implemented AuxRetry wrapper + Timer * timerInterface, // DisplayDriver provided Timer services + Connector::EventSink * sink); // Interface to notify DisplayDriver of events +} +#endif //INCLUDED_DP_CONNECTOR_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connectorimpl.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connectorimpl.h new file mode 100644 index 0000000..41ba9fd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connectorimpl.h @@ -0,0 +1,627 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_connectorimpl.cpp * +* DP connector implementation * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_CONNECTORIMPL_H +#define INCLUDED_DP_CONNECTORIMPL_H + +#include "dp_internal.h" +#include "dp_guid.h" +#include "dp_connector.h" +#include "dp_configcaps.h" +#include "dp_list.h" +#include "dp_buffer.h" +#include "dp_auxdefs.h" +#include "dp_watermark.h" +#include "dp_edid.h" +#include "dp_discovery.h" +#include "dp_groupimpl.h" +#include "dp_deviceimpl.h" +#include "./dptestutil/dp_testmessage.h" + +// HDCP abort codes +#define HDCP_FLAGS_ABORT_DEVICE_REVOKED 0x00000800 // Abort due to a revoked device in DP1.2 topology +#define HDCP_FLAGS_ABORT_DEVICE_INVALID 0x00080000 // Abort due to an invalid device in DP1.2 topology +#define HDCP_FLAGS_ABORT_HOP_LIMIT_EXCEEDED 0x80000000 // Abort, number of devices in DP1.2 topology exceeds supported limit + +static inline unsigned getDataClockMultiplier(NvU64 linkRate, NvU64 laneCount) +{ + // + // To get the clock multiplier: + // - Convert the linkRate from Hz to 10kHz by dividing it by 10000. + // - Multiply the 10kHz linkRate by the laneCount. + // - Multiply by 10.0/8, to account for the 8b/10b encoding overhead in the DP protocol layer. + // + // Avoid floating point in the arithmetic in the calculation + // through the following conversions: + // linkRate/10000.0 * laneCount * 10.0/8 + // (linkRate * laneCount * 10) / (10000 * 8) + // (linkRate * laneCount) / (1000 * 8) + // + return (unsigned) DisplayPort::axb_div_c_64(linkRate, laneCount, 8000); +} + +namespace DisplayPort +{ + + typedef enum + { + DP_TRANSPORT_MODE_INIT = 0, + DP_TRANSPORT_MODE_SINGLE_STREAM = 1, + DP_TRANSPORT_MODE_MULTI_STREAM = 2, + } DP_TRANSPORT_MODE; + + struct ConnectorImpl : public Connector, DiscoveryManager::DiscoveryManagerEventSink, Timer::TimerCallback, MessageManager::MessageReceiver::MessageReceiverEventSink + { + // DPCD HAL Layer - We should use this in place of direct register accesses + DPCDHAL * hal; + + MainLink * main; // Main link controls + AuxBus * auxBus; + + TestMessage testMessage; // TestMessage instance + + Timer * timer; // OS provided timer services + Connector::EventSink * sink; // Event Sink + + unsigned ouiId; // Sink ouiId + char modelName[NV_DPCD_SOURCE_DEV_ID_STRING__SIZE + 1]; // Device Model-name + bool bIgnoreSrcOuiHandshake; // Skip writing source OUI + + LinkPolicy linkPolicy; + bool linkGuessed; // True when link was "guessed" during HPD in TMDS mode + bool isLinkQuiesced; // True when link was set to quiet mode by TMDS modeset + + bool bNoLtDoneAfterHeadDetach; // True when head is disconnected in NDE + + bool isDP12AuthCap; // To tell whether this DP1.2 connector/ upmost device has the authentication Cap. + bool isHDCPAuthOn; // To tell whether this connector has the authentication on. + bool isHDCPReAuthPending; // To tell whether HDCP Auth is pending (at every stream addition and cleared at handler). + bool isHDCPAuthTriggered; // To tell whether HDCP Auth is triggered and only cleared at unplug/device detach for MST. + bool isHopLimitExceeded; // To tell the current topology is over limitation. + bool bIsDiscoveryDetectActive; // To tell device discovery is active ( isDiscoveryDetectComplete is also used as DD notify and not want to impacts that. ) + bool isDiscoveryDetectComplete; // To tell device discovery is finished. + bool bDeferNotifyLostDevice; // To tell if we should defer notify lost device event to client. + + HDCPValidateData hdcpValidateData; // Cache the HDCP ValidateData. + unsigned authRetries; // Retry counter for the authentication. + unsigned retryLT; // Retry counter for link training in case of link lost in PostLQA + unsigned hdcpCapsRetries; // Retry counter for Hdcp Caps read. + unsigned hdcpCpIrqRxStatusRetries; // Retry counter for CPIRQ RxStatus read. + bool bLTPhyRepeater; // Link Train PHY Repeaters between Source and Sink + bool bFromResumeToNAB; // True if from resume to NAB, WAR flag for unblocking GA1.5 + bool bAttachOnResume; // True if notifyLongPulse is called for resume (reboot/S3/S4) + bool bSkipAssessLinkForEDP; // Skip assessLink() for eDP. Assuming max is reachable. + bool bPConConnected; // HDMI2.1-Protocol Converter (Support SRC control mode) connected. + bool bSkipAssessLinkForPCon; // Skip assessLink() for PCON. DD will call assessFRLLink later. + bool bHdcpAuthOnlyOnDemand; // True if only initiate Hdcp authentication on demand and MST won't auto-trigger authenticate at device attach. + + bool constructorFailed; + + // + // OS Modeset Order mitigation causes the library to delay the reporting + // of new devices until they can be safely turned on. + // When enabled the library client will not see connection events until + // MustDisconnect messages are processed. + // + // Policy state should be set before the library is brought out of + // the suspended state. + // + bool policyModesetOrderMitigation; + + // + // force LT at NAB for compliance test (Power Management) in Win10 RS2+ (WDDM 2.2) + // + // RS2 no longer sends an explicit call for setPanelPowerParams during the Resume. + // It does that by specifying an additional flag during the call to SetTimings. Due to + // this DP lib doesn't get chance to perform this transition from setPanelPowerParams + // and since it was already skipping LT in NAB/modeswitch, so LT get missed out on the + // compliance device during resume from S3/S4. + // + bool policyForceLTAtNAB; + + // + // There are cases where OS does not detach heads from connector immediately after hot-unplug, + // on next hot-plug there is no guarantee that newly connected sink is capable to drive existing + // raster timings. Flush mode has following restriction + // When exiting flush mode S/W should ensure that the final + // link clock & lane count should be able to support existing raster. + // If we run into this situation and use flush mode then that will cause display engine to hang. + // This variable ensures to assess link safely in this situation: if newly connected sink is + // not capable to drive existing raster then just restore link configuration which was there + // before enabling flush mode, through fake link training. + // + bool policyAssessLinkSafely; + + bool bDisableVbiosScratchRegisterUpdate; + + // Only works when policyModesetOrderMitigation is true. + // To record if we should report newDevice. + bool modesetOrderMitigation; + + List deviceList; + List activeGroups; + LinkedList intransitionGroups; + LinkedList addStreamMSTIntransitionGroups; + List inactiveGroups; + + // Compound query + bool compoundQueryActive; + bool compoundQueryResult; + unsigned compoundQueryCount; + unsigned compoundQueryLocalLinkPBN; + + unsigned freeSlots, maximumSlots; + + // Multistream messaging + MessageManager * messageManager; + DiscoveryManager * discoveryManager; + + // Multistream timeslot management (on local link) + LinkConfiguration highestAssessedLC; // As of last assess, the highest possible link configuration + + LinkConfiguration activeLinkConfig; // Current link config. + + // this is the link config requested by a client. + // can be set and reset by the client for a given operation. + LinkConfiguration preferredLinkConfig; + + // + // Desired link configuration of single head multiple sst secondary connector. + // + LinkConfiguration oneHeadSSTSecPrefLnkCfg; + + // All possible link configs + LinkConfiguration * allPossibleLinkCfgs; + unsigned numPossibleLnkCfg; + + PCONLinkControl activePConLinkControl; + + // + // We're waiting for an MST<->SST transition + // The transition cannot be made without the DD + // disconnecting all heads. All devices are reported + // as must_disconnect. Once the last device blocking + // the transition is deattached from a head - we transition. + // + bool linkAwaitingTransition; + + // Unless we're awaiting transition this is identical to hal->getSupportsMultistream() + DP_TRANSPORT_MODE linkState; + + bool bAudioOverRightPanel; + + bool previousPlugged; + bool connectorActive; // Keep track of if connector is active to serve any IRQ + + Group * firmwareGroup; // The group used for book-keeping when we're in firmware mode + + List pendingEdidReads; // List of DevicePendingEDIDRead structures. + // This list tracks the currently in progress MST Edid Reads + + Device * lastDeviceSetForVbios; + + // Flag which gets set when ACPI init is done. DD calls notifyAcpiInitDone to tell client that ACPI init is completed + // & client can now initiate DDC EDID read for a device which supports EDID through SBIOS + bool bAcpiInitDone; + + // Flag to check if the system is UEFI. + bool bIsUefiSystem; + + // Flag to check if LT should be skipped. + bool bSkipLt; + + // Flag to make sure that zombie gets triggred when a powerChange event happens + bool bMitigateZombie; + + // + // HP Valor QHD+ N15P-Q3 EDP needs 50ms delay after D3 + // during trainLinkOptimized to come up on S4 + // + bool bDelayAfterD3; + + // + // ASUS and Samsung monitors have inconsistent behavior when + // DPCD 0x600 updated to D3. Skip D3 only in case these monitors + // are driven in SST config + // + bool bKeepLinkAlive; + + // + // HP Trump dock link training is unstable during S4 resume, which causes + // system to hang. Keep the link alive to increase stability. + // See Bug 2109823. + // + bool bKeepLinkAliveMST; + + // Keep the link alive when connector is in SST + bool bKeepLinkAliveSST; + + // + // HTC Vive Link box is not happy when we power down the link + // during link training when there is no stream present. It requests + // for a link retraining pulse which is not required. + // WAR to address this - NV Bug# 1793084 + // + bool bKeepOptLinkAlive; + + // Keep both DP and FRL link alive to save time. + bool bKeepLinkAliveForPCON; + + // + // Remote HDCP DCPD access should be D0 but won't introduce extra Dx + // state toggle. Use the counter to avoid powerdownlink when HDCP probe. + // + unsigned pendingRemoteHdcpDetections; + + // + // ASUS PQ 321 tiled monitor sometimes loses link while assessing link + // or link training .So if we lower config from HBR2 to HBR and when + // we retrain the link , we see black screen. + // So WAR is to retry link training with same config for 3 times before + // lowering link config. NV Bug #1846925 + // + bool bNoFallbackInPostLQA; + + bool bReportDeviceLostBeforeNew; + bool bEnableAudioBeyond48K; + bool bDisableSSC; + bool bEnableFastLT; + NvU32 maxLinkRateFromRegkey; + + // + // Latency(ms) to apply between link-train and FEC enable for bug + // 2561206. + // + NvU32 LT2FecLatencyMs; + + // + // Dual SST Partner connector object pointer + ConnectorImpl *pCoupledConnector; + + // Set to true when a DSC mode is requested. + bool bFECEnable; + + // Save link config before entering PSR. + LinkConfiguration psrLinkConfig; + + // + // Apply MST DSC caps WAR based on OUI ID of sink + // + bool bDscMstCapBug3143315; + + // + // Synaptics branch device doesn't support Virtual Peer Devices so DSC + // capability of downstream device should be decided based on device's own + // and its parent's DSC capability + // + bool bDscCapBasedOnParent; + + void sharedInit(); + ConnectorImpl(MainLink * main, AuxBus * auxBus, Timer * timer, Connector::EventSink * sink); + + void setPolicyModesetOrderMitigation(bool enabled); + void setPolicyForceLTAtNAB(bool enabled); + void setPolicyAssessLinkSafely(bool enabled); + + void discoveryDetectComplete(); + void discoveryNewDevice(const DiscoveryManager::Device & device); + void discoveryLostDevice(const Address & address); + void processNewDevice(const DiscoveryManager::Device & device, + const Edid & edid, + bool isMultistream, + DwnStreamPortType portType, + DwnStreamPortAttribute portAttribute, + bool isCompliance = false); + + void applyEdidWARs(Edid & edid, DiscoveryManager::Device device); + void applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase); + + ResStatusNotifyMessage ResStatus; + + void messageProcessed(MessageManager::MessageReceiver * from); + + ~ConnectorImpl(); + + // + // Utility functions + // + virtual void hardwareWasReset(); + virtual LinkConfiguration getMaxLinkConfig(); + virtual LinkConfiguration getActiveLinkConfig(); + virtual void powerdownLink(bool bPowerdownPanel = false); + + GroupImpl * getActiveGroupForSST(); + bool detectSinkCountChange(); + bool handlePhyPatternRequest(); + void applyOuiWARs(); + bool linkUseMultistream() + { + return (linkState == DP_TRANSPORT_MODE_MULTI_STREAM); + } + + void populateAllDpConfigs(); + + // + // Suspend resume API + // + virtual Group * resume(bool firmwareLinkHandsOff, + bool firmwareDPActive, + bool plugged, + bool isUefiSystem = false, + unsigned firmwareHead = 0, + bool bFirmwareLinkUseMultistream = false, + bool bDisableVbiosScratchRegisterUpdate = false, + bool bAllowMST = true); + virtual void pause(); + + virtual Device * enumDevices(Device * previousDevice) ; + + + virtual void beginCompoundQuery() ; + virtual bool compoundQueryAttach(Group * target, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth); + + virtual bool compoundQueryAttach(Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams = NULL); // DSC parameters + + virtual bool endCompoundQuery(); + + // + // Timer callback tags. + // (we pass the address of these variables as context to ::expired) + char tagFireEvents; + char tagDelayedLinkTrain; + char tagHDCPReauthentication; + char tagDelayedHdcpCapRead; + char tagDelayedHDCPCPIrqHandling; + + // + // Enable disable TMDS mode + // + virtual void enableLinkHandsOff(); + virtual void releaseLinkHandsOff(); + + // + // Timer callback for event management + // Uses: fireEvents() + virtual void expired(const void * tag); + + // Generate Events. + // useTimer specifies whether we fire the events on the timer + // context, or this context. + void fireEvents(); + + // returns the number of pending notifications. + void fireEventsInternal(); + + virtual bool isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + ModesetInfo modesetInfo); + + virtual bool isLinkTrainingNeededForModeset(ModesetInfo modesetInfo); + + virtual bool notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + const DpModesetParams &modesetParams); + + virtual bool isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) ; + + virtual bool notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) ; + + virtual void readRemoteHdcpCaps(); + virtual void notifyAttachEnd(bool modesetCancelled); + virtual void notifyDetachBegin(Group * target); + virtual void notifyDetachEnd(bool bKeepOdAlive = false); + + bool performIeeeOuiHandshake(); + void setIgnoreSourceOuiHandshake(bool bIgnore); + bool getIgnoreSourceOuiHandshake(); + bool willLinkSupportModeSST(const LinkConfiguration & linkConfig, const ModesetInfo & modesetInfo); + void forceLinkTraining(); + + void assessLink(LinkTrainingType trainType = NORMAL_LINK_TRAINING); + + bool isLinkInD3(); + bool isLinkActive(); + bool isLinkLost(); + bool trainSingleHeadMultipleSSTLinkNotAlive(GroupImpl *pGroupAttached); + bool isLinkAwaitingTransition(); + bool isNoActiveStreamAndPowerdown(); + void incPendingRemoteHdcpDetection() + { + pendingRemoteHdcpDetections++; + } + void decPendingRemoteHdcpDetection() + { + if (pendingRemoteHdcpDetections > 0) + { + pendingRemoteHdcpDetections--; + } + } + bool trainLinkOptimized(LinkConfiguration lConfig); + bool trainLinkOptimizedSingleHeadMultipleSST(GroupImpl * group); + bool getValidLowestLinkConfig(LinkConfiguration & lConfig, LinkConfiguration & lowestSelected, ModesetInfo queryModesetInfo); + bool postLTAdjustment(const LinkConfiguration &, bool force); + void populateUpdatedLaneSettings(NvU8* voltageSwingLane, NvU8* preemphasisLane, NvU32 *data); + void populateDscCaps(DSC_INFO* dscInfo, DeviceImpl * dev, DSC_INFO::FORCED_DSC_PARAMS* forcedParams); + void populateDscGpuCaps(DSC_INFO* dscInfo); + void populateForcedDscParams(DSC_INFO* dscInfo, DSC_INFO::FORCED_DSC_PARAMS* forcedParams); + void populateDscSinkCaps(DSC_INFO* dscInfo, DeviceImpl * dev); + void populateDscModesetInfo(MODESET_INFO * pModesetInfo, const DpModesetParams * pModesetParams); + + bool train(const LinkConfiguration & lConfig, bool force, LinkTrainingType trainType = NORMAL_LINK_TRAINING); + bool validateLinkConfiguration(const LinkConfiguration & lConfig); + + virtual bool assessPCONLinkCapability(PCONLinkControl *params); + bool trainPCONFrlLink(PCONLinkControl *pConControl); + + // Set Device DSC state based on current DSC state of all active devices on this connector + bool setDeviceDscState(Device * dev, bool bEnableDsc); + + // the lowest level function(nearest to the hal) for the connector. + bool rawTrain(const LinkConfiguration & lConfig, bool force, LinkTrainingType linkTrainingType); + + bool enableFlush(); + bool beforeAddStream(GroupImpl * group, bool force=false, bool forFlushMode = false); + void afterAddStream(GroupImpl * group); + void beforeDeleteStream(GroupImpl * group, bool forFlushMode = false); + void afterDeleteStream(GroupImpl * group); + void disableFlush(bool test=false); + + bool beforeAddStreamMST(GroupImpl * group, bool force = false, bool forFlushMode = false); + + bool deleteAllVirtualChannels(); + void clearTimeslices(); + bool allocateTimeslice(GroupImpl * targetGroup); + void freeTimeslice(GroupImpl * targetGroup); + void flushTimeslotsToHardware(); + bool getHDCPAbortCodesDP12(NvU32 &hdcpAbortCodesDP12); + bool getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision); + bool hdcpValidateKsv(const NvU8 *ksv, NvU32 Size); + void cancelHdcpCallbacks(); + bool handleCPIRQ(); + void handleSSC(); + void handleMCCSIRQ(); + void handleHdmiLinkStatusChanged(); + void sortActiveGroups(bool ascending); + void configInit(); + + virtual DeviceImpl* findDeviceInList(const Address & address); + virtual void disconnectDeviceList(); + void notifyLongPulseInternal(bool statusConnected); + virtual void notifyLongPulse(bool status); + virtual void notifyShortPulse(); + virtual Group * newGroup() ; + virtual void destroy(); + virtual void createFakeMuxDevice(const NvU8 *buffer, NvU32 bufferSize); + virtual void deleteFakeMuxDevice(); + virtual bool getRawDscCaps(NvU8 *buffer, NvU32 bufferSize); + virtual bool isMultiStreamCapable(); + virtual bool isFlushSupported(); + virtual bool isStreamCloningEnabled(); + virtual bool isFECSupported(); + virtual bool isFECCapable(); + virtual NvU32 maxLinkRateSupported(); + virtual bool setPreferredLinkConfig(LinkConfiguration & lc, bool commit, bool force = false, LinkTrainingType trainType = NORMAL_LINK_TRAINING); + virtual bool resetPreferredLinkConfig(bool force = false); + virtual void setAllowMultiStreaming(bool bAllowMST); + virtual bool getAllowMultiStreaming(void); + virtual bool getSinkMultiStreamCap(void); + virtual void setDp11ProtocolForced(); + virtual void resetDp11ProtocolForced(); + virtual bool isDp11ProtocolForced(); + bool isAcpiInitDone(); + virtual void notifyAcpiInitDone(); + Group * createFirmwareGroup(); + virtual void notifyGPUCapabilityChange(); + virtual void notifyHBR2WAREngage(); + + bool getTestPattern(NV0073_CTRL_DP_TESTPATTERN *testPattern); + bool setTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, NvBool bIsHBR2, NvBool bSkipLaneDataOverride = false); + bool getLaneConfig(NvU32 *numLanes, NvU32 *data); // "data" is an array of NV0073_CTRL_MAX_LANES unsigned ints + bool setLaneConfig(NvU32 numLanes, NvU32 *data); // "data" is an array of NV0073_CTRL_MAX_LANES unsigned ints + void getCurrentLinkConfig(unsigned & laneCount, NvU64 & linkRate); // CurrentLink Configuration + unsigned getPanelDataClockMultiplier(); + unsigned getGpuDataClockMultiplier(); + void configurePowerState(bool bPowerUp); + virtual void readPsrCapabilities(vesaPsrSinkCaps *caps); + virtual bool updatePsrConfiguration(vesaPsrConfig config); + virtual bool readPsrConfiguration(vesaPsrConfig *config); + virtual bool readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState); + virtual bool writePsrErrorStatus(vesaPsrErrorStatus psrErr); + virtual bool readPsrErrorStatus(vesaPsrErrorStatus *psrErr); + virtual bool writePsrEvtIndicator(vesaPsrEventIndicator psrErr); + virtual bool readPsrEvtIndicator(vesaPsrEventIndicator *psrErr); + virtual bool readPsrState(vesaPsrState *psrState); + virtual bool updatePsrLinkState(bool bTrainLink); + + // for dp test utility. pBuffer is the request buffer of type DP_STATUS_REQUEST_xxxx + DP_TESTMESSAGE_STATUS sendDPTestMessage(void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus); + + DP_TESTMESSAGE_STATUS getStreamIDs(NvU32 *pStreamIDs, NvU32 *pCount); // for dp test utility, called by DD + + // Reset link training counter for the active link configuration. + virtual void resetLinkTrainingCounter() + { + activeLinkConfig.setLTCounter(0); + } + }; + + // + // New devices do not get a DeviceImpl created until after + // the EDID read has completed. This object is used + // to track the necessary state. + // + struct DevicePendingEDIDRead : protected EdidReadMultistream::EdidReadMultistreamEventSink, public ListElement + { + EdidReadMultistream reader; + DiscoveryManager::Device device; + ConnectorImpl * parent; + + void mstEdidCompleted(EdidReadMultistream * from); + void mstEdidReadFailed(EdidReadMultistream * from); + + public: + DevicePendingEDIDRead(ConnectorImpl * _parent, MessageManager * manager, DiscoveryManager::Device dev) + : reader(_parent->timer, manager, this, dev.address), device(dev), parent(_parent) + { + } + }; +} + +#endif //INCLUDED_DP_CONNECTORIMPL_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_crc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_crc.h new file mode 100644 index 0000000..27d5341 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_crc.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_crc.h * +* CRC Algorithms for the messaging subsystem. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_CRC_H +#define INCLUDED_DP_CRC_H + +#include "dp_bitstream.h" + +namespace DisplayPort +{ + unsigned dpCalculateHeaderCRC(BitStreamReader * reader); + unsigned dpCalculateBodyCRC(BitStreamReader * writer); +} + +#endif //INCLUDED_DP_CRC_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_deviceimpl.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_deviceimpl.h new file mode 100644 index 0000000..67349fd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_deviceimpl.h @@ -0,0 +1,524 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort*********************************\ +* * +* Module: dp_connector.cpp * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_DEVICEIMPL_H +#define INCLUDED_DP_DEVICEIMPL_H + +#include "dp_connector.h" +#include "dp_internal.h" +#include "dp_edid.h" +#include "dp_list.h" +#include "dp_auxdefs.h" +#include "dp_vrr.h" + +namespace DisplayPort +{ + #define PREDEFINED_DSC_MST_BPPX16 160; + #define HDCP_BCAPS_DDC_OFFSET 0x40 + #define HDCP_BCAPS_DDC_EN_BIT 0x80 + #define HDCP_BCAPS_DP_EN_BIT 0x01 + #define HDCP_I2C_CLIENT_ADDR 0x74 + + struct GroupImpl; + struct ConnectorImpl; + class DeviceHDCPDetection; + class VrrEnablement; + + struct DeviceImpl : public Device, + public AuxBus, + public ListElement + { + // + // Shadow state: This is the last state delivered to DD. + // see the ConnectorImpl::fireEvents() function for handling. + // + // State is double buffered to allow for allow announces + // to happen at the end of the state updates. We assume + // the DD can call any Connector API in response to the + // event. + // + struct Shadow + { + bool plugged; + bool zombie; + bool cableOk; + bool mustDisconnect; + bool hdcpCapDone; + LinkConfiguration highestAssessedLC; + } shadow; + + struct BandWidth + { + struct _Enum_Path + { + unsigned total, free; + bool bPathFECCapable; + bool dataValid; // Is the cache valid? + } enum_path; + + struct Compound_Query_State + { + unsigned totalTimeSlots; // Total timeslots available for allocation across this node + + unsigned timeslots_used_by_query; // Timeslots accounted for. + + unsigned bandwidthAllocatedForIndex; // Compound query is compromised of several + // qeuery attaches. These query attaches + // may have more than one device associated. + // this mask keeps track of which queryAttach's + // have already had the stream "rounted" past + // this node. + } compound_query_state; + + LinkConfiguration lastHopLinkConfig; // inferred from enum_path.total + + } bandwidth; + + enum rawEprState + { + software, + hardware + }; + + void resetCacheInferredLink(); + LinkConfiguration * inferLeafLink(unsigned * totalLinkSlots); + + + DeviceImpl * parent; // Upstream parent device + DeviceImpl * children[16]; + PortMap portMap; + + Edid rawEDID; + Edid processedEdid; + Edid ddcEdid; + DPCDHAL * hal; + GroupImpl * activeGroup; + ConnectorImpl * connector; + ConnectorType connectorType; + Address address; + GUID guid; + GUID guid2; + bool bVirtualPeerDevice; + NvU8 peerDevice; + NvU8 dpcdRevisionMajor; + NvU8 dpcdRevisionMinor; + bool multistream; + bool videoSink, audioSink; + bool plugged; + + + AuxRetry friendlyAux; + bool payloadAllocated; // did the allocate payload go through? + + unsigned char BCAPS[HDCP_BCAPS_SIZE]; // Hdcp1.x bCaps raw data + unsigned char BKSV[HDCP_KSV_SIZE]; // Hdcp1.x bKsv raw data + unsigned char nvBCaps[HDCP_BCAPS_SIZE]; // NV generic HDCP BCAPS including 1.x, 2.2, ... + NvU64 maxTmdsClkRate; + + + bool isPendingNewDevice(); + bool isPendingLostDevice(); + bool isPendingZombie(); + bool isPendingCableOk(); + bool isPendingBandwidthChange(); + bool isPendingHDCPCapDone(); + + TriState isHDCPCap; + bool isDeviceHDCPDetectionAlive; + DeviceHDCPDetection * deviceHDCPDetection; + + PCONCaps pconCaps; + + // this flag signifies that the compliance device has requested EDID read test and may follow + // hidden and lazy zombie policy. + bool complianceDeviceEdidReadTest; + + bool lazyExitNow; + + // VRR Enablement structure + VrrEnablement *vrrEnablement; + + // DSC fields + NvU8 rawDscCaps[16]; + DscCaps dscCaps; + + // Panel replay Caps + PanelReplayCaps prCaps; + + bool bIsFakedMuxDevice; + bool bIsPreviouslyFakedMuxDevice; + bool bisMarkedForDeletion; + + // + // Device doing the DSC decompression for this device. This could be device itself + // or its parent + // + DeviceImpl* devDoingDscDecompression; + // + // If DSC stream can be sent to this device or not. Either device itself or it's + // parent can do DSC decompression + // + bool bDSCPossible; + + bool bFECSupported; + bool bFECUncorrectedSupported; + bool bFECCorrectedSupported; + bool bFECBitSupported; + bool bFECParityBlockSupported; + bool bFECParitySupported; + + TriState bSdpExtCapable; + bool bMSAOverMSTCapable; + + DeviceImpl(DPCDHAL * hal, ConnectorImpl * connector, DeviceImpl * parent); + ~DeviceImpl(); + + virtual bool isCableOk(); + virtual bool isLogical(); + virtual bool isZombie(); + + virtual unsigned getEDIDSize() const; + virtual bool getEDID(char * buffer, unsigned size) const; + virtual unsigned getRawEDIDSize() const; + virtual bool getRawEDID(char * buffer, unsigned size) const; + + virtual bool getPCONCaps(PCONCaps *pPCONCaps); + + virtual Group * getOwningGroup() + { + return (Group *)activeGroup; + } + + bool isActive(); + + void applyOUIOverrides(); + + virtual Device * getParent() + { + return parent; + } + + virtual Device * getChild(unsigned portNumber) + { + return children[portNumber]; + } + + virtual bool isMultistream() // Sink supports multistream, remember we can have 1.1 targets + { + return address.size() != 0; + } + + virtual bool isNativeDPCD() + { + return (address.size() < 2); + } + + virtual bool isVideoSink() + { + return videoSink; + } + + virtual bool isAudioSink() + { + return audioSink; + } + + virtual bool isLoop() + { + DP_LOG(("isLoop implementation is pending (bug 791059)")); + return false; + } + + virtual bool isRedundant() + { + DP_LOG(("isRedundant implementation is pending (bug 791059)")); + return false; + } + + virtual bool isMustDisconnect(); + + virtual bool isPlugged() + { + return plugged; + } + + virtual Address getTopologyAddress() const + { + return address; + } + + virtual ConnectorType getConnectorType() + { + return connectorType; + } + + virtual bool isFallbackEdid() + { + return this->processedEdid.isFallbackEdid(); + } + + virtual GUID getGUID() const + { + return guid; + } + + virtual PortMap getPortMap() const + { + return portMap; + } + + virtual TriState hdcpAvailableHop(); + virtual TriState hdcpAvailable(); + + virtual bool isMSAOverMSTCapable() + { + return bMSAOverMSTCapable; + } + + virtual bool isFakedMuxDevice(); + virtual bool isPreviouslyFakedMuxDevice(); + + bool bypassDpcdPowerOff() + { + return processedEdid.WARFlags.disableDpcdPowerOff; + } + + bool powerOnMonitorBeforeLt() + { + return processedEdid.WARFlags.powerOnBeforeLt; + } + + bool forceMaxLinkConfig() + { + return processedEdid.WARFlags.forceMaxLinkConfig; + } + + bool skipRedundantLt() + { + return processedEdid.WARFlags.skipRedundantLt; + } + + bool ignoreRedundantHotplug() + { + return processedEdid.WARFlags.ignoreRedundantHotplug; + } + + bool isOptimalLinkConfigOverridden() + { + return processedEdid.WARFlags.overrideOptimalLinkCfg; + } + + // Apply DPCD overrides if required + void dpcdOverrides(); + + bool getDpcdRevision(unsigned * major, unsigned * minor) + { + if (!major || !minor) + { + DP_ASSERT(0 && "Null pointers passed in."); + return false; + } + + *major = this->dpcdRevisionMajor; + *minor = this->dpcdRevisionMinor; + return true; + } + + bool getIgnoreMSACap() + { + return hal->getMsaTimingparIgnored(); + } + + AuxRetry::status setIgnoreMSAEnable(bool msaTimingParamIgnoreEn) + { + return hal->setIgnoreMSATimingParamters(msaTimingParamIgnoreEn); + } + + bool isVirtualPeerDevice() + { + return bVirtualPeerDevice; + } + + bool isBranchDevice() + { + return !isVideoSink() && !isAudioSink(); + } + + bool isAtLeastVersion(unsigned major, unsigned minor) + { + if (dpcdRevisionMajor > major) + return true; + + if (dpcdRevisionMajor < major) + return false; + + return dpcdRevisionMinor >= minor; + } + + virtual void queryGUID2(); + + virtual bool getSDPExtnForColorimetrySupported(); + + virtual bool isPowerSuspended(); + + virtual void setPanelPowerParams(bool bSinkPowerStateD0, bool bPanelPowerStateOn); + + virtual status transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned *pNakReason= NULL, + NvU8 offset= 0, NvU8 nWriteTransactions= 0); + virtual unsigned transactionSize(); + // default behaviour is querying first three registers for every lane --> flags = 0x7 + virtual status fecTransaction(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags = NV_DP_FEC_FLAGS_SELECT_ALL); + virtual AuxBus * getRawAuxChannel() { return this; } + virtual AuxRetry * getAuxChannel() { return &friendlyAux; } + virtual AuxBus::status getDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason=NULL); + virtual AuxBus::status setDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason=NULL); + virtual AuxBus::status queryFecData(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags); + + virtual DscCaps getDscCaps(); + + // + // This function returns the device itself or its parent device that is doing + // DSC decompression for it. + // + virtual Device* getDevDoingDscDecompression(); + virtual void markDeviceForDeletion() {bisMarkedForDeletion = true;}; + virtual bool isMarkedForDeletion() {return bisMarkedForDeletion;}; + virtual bool getRawDscCaps(NvU8 *buffer, NvU32 bufferSize); + + virtual AuxBus::status dscCrcControl(NvBool bEnable, gpuDscCrc *dataGpu, sinkDscCrc *dataSink); + + // + // Parameter bForceMot in both getI2cData and setI2cData is used to forfully set + // the MOT bit. It is needed for some special cases where the MOT bit shouldn't + // be set but some customers need it to please their monitors. + // + virtual bool getI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot = false); + virtual bool setI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot = false); + virtual bool getRawEpr(unsigned * totalEpr, unsigned * freeEpr, rawEprState eprState); + + void switchToComplianceFallback(); + + // VRR Display Enablement Functions + bool startVrrEnablement(void); + void resetVrrEnablement(void); + bool isVrrMonitorEnabled(void); + bool isVrrDriverEnabled(void); + + // Panel replay related functions + bool isPanelReplaySupported(void); + void getPanelReplayCaps(void); + bool setPanelReplayConfig(panelReplayConfig prcfg); + + NvBool getDSCSupport(); + bool getFECSupport(); + NvBool isDSCPassThroughSupported(); + NvBool isDSCSupported(); + NvBool isDSCPossible(); + bool isFECSupported(); + bool readAndParseDSCCaps(); + bool parseDscCaps(const NvU8 *buffer, NvU32 bufferSize); + bool setDscEnable(bool enable); + bool getDscEnable(bool *pEnable); + unsigned getDscVersionMajor(); + unsigned getDscVersionMinor(); + unsigned getDscRcBufferSize(); + unsigned getDscRcBufferBlockSize(); + unsigned getDscMaxSlicesPerSink(); + unsigned getDscLineBufferBitDepth(); + NvBool isDscBlockPredictionSupported(); + unsigned getDscMaxBitsPerPixel(); + NvBool isDscRgbSupported(); + NvBool isDscYCbCr444Supported(); + NvBool isDscYCbCrSimple422Supported(); + NvBool isDscYCbCr422NativeSupported(); + NvBool isDscYCbCr420NativeSupported(); + unsigned getDscPeakThroughputMode0(); + unsigned getDscPeakThroughputModel(); + unsigned getDscMaxSliceWidth(); + unsigned getDscDecoderColorDepthSupportMask(); + void setDscDecompressionDevice(bool bDscCapBasedOnParent); + }; + class DeviceHDCPDetection : public Object, MessageManager::Message::MessageEventSink, Timer::TimerCallback + { + DeviceImpl* parent; + RemoteDpcdReadMessage remoteBKSVReadMessage; + RemoteDpcdReadMessage remoteBCapsReadMessage; + RemoteDpcdReadMessage remote22BCapsReadMessage; + MessageManager * messageManager; // For transmit and receive + Timer * timer; + bool bksvReadCompleted; + bool bCapsReadCompleted; + bool isValidBKSV; + bool isBCapsHDCP; + unsigned retriesRemoteBKSVReadMessage; + unsigned retriesRemoteBCapsReadMessage; + unsigned retriesRemote22BCapsReadMessage; + bool retryRemoteBKSVReadMessage; + bool retryRemoteBCapsReadMessage; + bool retryRemote22BCapsReadMessage; + bool bBKSVReadMessagePending; + bool bBCapsReadMessagePending; + + public: + + DeviceHDCPDetection(DeviceImpl * parent, MessageManager * messageManager, Timer * timer) + : bksvReadCompleted(false),bCapsReadCompleted(false),isValidBKSV(false), + isBCapsHDCP(false), retriesRemoteBKSVReadMessage(0), retriesRemoteBCapsReadMessage(0), + retriesRemote22BCapsReadMessage(0), retryRemoteBKSVReadMessage(false), + retryRemoteBCapsReadMessage(false), retryRemote22BCapsReadMessage(false), + bBKSVReadMessagePending(false), bBCapsReadMessagePending(false) + + { + this->parent = parent; + this->messageManager = messageManager; + this->timer = timer; + } + + ~DeviceHDCPDetection(); + void expired(const void * tag); + void start(); + void waivePendingHDCPCapDoneNotification(); + + bool hdcpValidateKsv(const NvU8 *ksv, NvU32 Size); + void handleRemoteDpcdReadDownReply(MessageManager::Message * from); + void messageFailed(MessageManager::Message * from, NakData * nakData); + void messageCompleted(MessageManager::Message * from); + }; +} + +#endif //INCLUDED_DP_DEVICEIMPL_H + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_discovery.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_discovery.h new file mode 100644 index 0000000..b0aa170 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_discovery.h @@ -0,0 +1,328 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_discovery.h * +* Class definition for discovery manager. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_DISCOVERY_H +#define INCLUDED_DP_DISCOVERY_H + +#include "dp_address.h" +#include "dp_list.h" +#include "dp_messages.h" +#include "dp_messagecodings.h" + +namespace DisplayPort +{ + class DiscoveryManager : virtual public Object + { + public: + struct Device + { + Address address; // direct topology address + + bool legacy; // legacy (NON DP) device emulated on this port + bool branch; // DP 1.2 style branching device + PeerDevice peerDevice; // connector type of the device on this port + unsigned dpcdRevisionMajor; + unsigned dpcdRevisionMinor; + GUID peerGuid; // device guid + unsigned SDPStreams; // maximum number of audio streams supported + unsigned SDPStreamSinks; // number of outputs to select from + bool dirty; // got updates for the same device + PortMap portMap; + bool videoSink; // Should be true when a video sink is supported + NvU64 maxTmdsClkRate; + + Device():peerDevice(None),SDPStreams(0),SDPStreamSinks(0),dirty(false),videoSink(false) + { + portMap.validMap = portMap.inputMap = portMap.internalMap = 0; + } + + ~Device(){} + + }; + + struct ReceiverSink : + virtual public Object, + public MessageManager::MessageReceiver::MessageReceiverEventSink + { + DiscoveryManager * parent; + + // will handle CSN (up_req) and generate a up_reply for it. + virtual void messageProcessed(MessageManager::MessageReceiver * from); + void handleCSN(MessageManager::MessageReceiver * from); + + ReceiverSink(DiscoveryManager * parent) + :parent(parent) + {} + + virtual ~ReceiverSink() + {} + }; + + // This will account for upreplies and their failures/retries. + struct CsnUpReplyContainer : ListElement, Timer::TimerCallback, MessageManager::Message::MessageEventSink + { + struct CsnUpReply: public GenericUpReplyMessage + { + CsnUpReplyContainer * container; + + CsnUpReply(CsnUpReplyContainer * container, const Address & target) + : GenericUpReplyMessage(target, 0x2), container(container) + {} + + ~CsnUpReply() + {} + + }; + + DiscoveryManager * parent; + CsnUpReply upReplyMessage; + unsigned delayInUsec; + unsigned retries; + Address target; + + virtual void messageFailed(MessageManager::Message * from, NakData * nakData) + { + // if reason of failure is not timeout or defer; just forget trying again. + if (!(nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + messageCompleted(from); + return; + } + + // queue a callback to reset and send again + queueUpReply(); + return; + } + + virtual void messageCompleted(MessageManager::Message * from) + { + // don't delete now. Queue callback to delete later + retries = 0; + parent->timer->queueCallback(this, "CSNF", 5000); + } + + void queueUpReply() + { + parent->timer->queueCallback(this, "CSNF", delayInUsec/1000); + } + + void postUpReply() + { + upReplyMessage.set(target); + parent->messageManager->postReply(&this->upReplyMessage, this); + } + + virtual void expired(const void * tag) + { + if (retries) + retries--; + + if (retries) + postUpReply(); + else + { + // enough retries. wrap up. + delete this; + } + } + + CsnUpReplyContainer(DiscoveryManager * parent) + :parent(parent), upReplyMessage(this, target), delayInUsec(200000), retries(4), target(Address(0)) + {} + + virtual ~CsnUpReplyContainer() + { + // remove self from queue and delete + // cancel all pending callbacks + parent->timer->cancelCallbacks(this); + parent->pendingCsnUpReplies.remove(this); + } + + }; + + ReceiverSink receiverSink; + + ConnStatusNotifyMessage connectionStatusNotifyProcessor; + + GUIDBuilder guidBuilder; + + List pendingCsnUpReplies; + + public: + + struct DiscoveryManagerEventSink + { + virtual void discoveryDetectComplete() = 0; // reply to processDetect + virtual void discoveryNewDevice(const DiscoveryManager::Device & device) = 0; // these can go out anytime + virtual void discoveryLostDevice(const Address & address) = 0; + }; + + enum { + maximumTopologyNodes = 128 + }; + + Device currentDevices[maximumTopologyNodes]; + unsigned currentDevicesCount; + + Device * findDevice(const Address & address); + Device * findDevice(GUID & guid); + void addDevice(const Device & device); + void removeDevice(Device * device); + void removeDeviceTree(const Address & prefix); + Device * findChildDeviceForBranchWithGuid(GUID guid, unsigned port, Address & childAddr); + + // + // This is responsible for a "complete" detection of a sink. Specifically using remote dpcd reads and writes + // + struct SinkDetection : MessageManager::Message::MessageEventSink, ListElement, Timer::TimerCallback + { + Device device; + Address address; + RemoteDpcdWriteMessage remoteDpcdWriteMessage; + RemoteDpcdReadMessage remoteDpcdReadMessage; + PowerUpPhyMessage powerUpPhyMessage; + LinkAddressMessage linkAddressMessage; + DiscoveryManager * parent; + bool completed; + unsigned retriesRemoteDpcdWriteMessage; + bool retryRemoteDpcdWriteMessage; + unsigned retriesRemoteDpcdReadMessage; + bool retryRemoteDpcdReadMessage; + unsigned retriesLinkAddressMessage; + bool retryLinkAddressMessage; + + bool bFromCSN; + + SinkDetection(DiscoveryManager * parent, const Device & device, bool bFromCSN) + : device(device), address(device.address), parent(parent), completed(false), + retriesRemoteDpcdWriteMessage(0), retryRemoteDpcdWriteMessage(false), + retriesRemoteDpcdReadMessage(0), retryRemoteDpcdReadMessage(false), + bFromCSN(bFromCSN) + {} + + ~SinkDetection(); + void expired(const void * tag); + void start(); + + void detectCompleted(bool passed); + void messageFailed(MessageManager::Message * from, NakData * nakData); + void handleRemoteDpcdReadDownReply(); + void handleRemoteDpcdWriteDownReply(); + void handleLinkAddressDownReply(); + + void messageCompleted(MessageManager::Message * from); + + }; + + // + // This object represents an address in some stage of detection + // + struct BranchDetection : MessageManager::Message::MessageEventSink, ListElement, Timer::TimerCallback + { + Device parentDevice; + Address address; + LinkAddressMessage::Result child[16]; + unsigned childCount; + + LinkAddressMessage linkAddressMessage; + RemoteDpcdWriteMessage remoteDpcdWriteMessage; + + DiscoveryManager * parent; + bool completed; + bool retryLinkAddressMessage; + unsigned retriesLinkAddressMessage; + unsigned retriesRemoteDpcdWriteMessage; + bool retryRemoteDpcdWriteMessage; + + BranchDetection(DiscoveryManager * parent, const Device & device) + : parentDevice(device), address(parentDevice.address), + parent(parent), completed(false), + retryLinkAddressMessage(false), retriesLinkAddressMessage(0), + retriesRemoteDpcdWriteMessage(0), retryRemoteDpcdWriteMessage(false) + {} + + void expired(const void * tag); + void start(); + ~BranchDetection(); + + void detectCompleted(bool present); + void messageFailed(MessageManager::Message * from, NakData * nakData) ; + void handleLinkAddressDownReply(); + void handleRemoteDpcdReadDownReply(); + void messageCompleted(MessageManager::Message * from); + }; + + void detect(const Address & address); + void detectBranch(Device device); + void detectSink(Device newDevice, bool bFromCSN); + +public: + + List outstandingBranchDetections; + List outstandingSinkDetections; + DiscoveryManagerEventSink * sink; // To call NotifyDetectComplete() + MessageManager * messageManager; // For transmit and receive + Timer * timer; + DPCDHAL * hal; + + DiscoveryManager(MessageManager * messageManager, DiscoveryManagerEventSink * sink, Timer * timer, DPCDHAL * hal) + : receiverSink(this), + connectionStatusNotifyProcessor(&receiverSink), + guidBuilder(timer, 0x10DE9070), + currentDevicesCount(0), + sink(sink), + messageManager(messageManager), + timer(timer), + hal(hal) + { + + // + // Register to filter all the upmessages. We want to know when + // connection status notify events are on their way. + // + messageManager->registerReceiver(&connectionStatusNotifyProcessor); + } + + ~DiscoveryManager() + { + while (!this->outstandingBranchDetections.isEmpty()) + delete this->outstandingBranchDetections.front(); + + while (!this->outstandingSinkDetections.isEmpty()) + delete this->outstandingSinkDetections.front(); + + while (!this->pendingCsnUpReplies.isEmpty()) + delete this->pendingCsnUpReplies.front(); + } + + void notifyLongPulse(bool status); + + }; +} +#endif //INCLUDED_DP_DISCOVERY_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_edid.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_edid.h new file mode 100644 index 0000000..95eb53f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_edid.h @@ -0,0 +1,323 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_edid.h * +* reading EDID from SST/MST Device * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_EDID_H +#define INCLUDED_DP_EDID_H + +#include "dp_buffer.h" +#include "dp_auxbus.h" +#include "dp_address.h" +#include "dp_messages.h" +#include "dp_messagecodings.h" +#include "dp_timer.h" + +namespace DisplayPort +{ + class Edid; + + // + // Shared utility object for MST/SST edid reading. + // This object handles the retry, CRC validating, + // identification of EDID length, DDC ping, etc. + // + // It's designed as an asynchronous state machine + // because of the way MST EDID reads are built. + // + class EdidAssembler + { + public: + EdidAssembler(Edid * const edid, bool bPatchCrc = false); + + // + // returns false - when existing data in Edid is invalid + // returns seg - segment from which to read next block + // returns offset - offset within block from which to start reading next block + // + bool readNextRequest(NvU8 & seg, NvU8 & offset); + + // returns false when Edid read is completed + void postReply(const Buffer & buffer, unsigned sizeCompleted, bool success); + void postReply(unsigned char * data, unsigned sizeCompleted, bool success); + + // returns true when it read all the required blocks + bool readIsComplete(); + void reset(); + private: + Edid * edid; + Stream stream; + + NvU8 oldBlockChecksum; + unsigned blocksRead; + unsigned totalBlockCnt; + unsigned retriesCount; + bool bPatchCrc; + }; + + // + // EDID + // + class Edid + { + public: + Edid(); + ~Edid(); + + Buffer * getBuffer() const { return &buffer; } + NvU8 getFirstPageChecksum(); // Get checksum byte + NvU8 getLastPageChecksum(); // Get checksum byte for last block + + bool verifyCRC(); + unsigned getEdidVersion(); + unsigned getBlockCount(); + const char * getName() const; + unsigned getEdidSize() const; + bool isChecksumValid() const; + bool isJunkEdid() const; + bool isFallbackEdid() const; + void swap(Edid & right); + void applyEdidWorkArounds(NvU32 warFlag, const DpMonitorDenylistData *pDenylistData); + void patchCrc(); + void setForcedEdidChecksum(bool set) + { + this->forcedCheckSum = set; + } + + void setFallbackFlag(bool set) + { + this->fallbackEdid = set; + } + + void setPatchedChecksum(bool set) + { + this->patchedChecksum = set; + } + + bool isPatchedChecksum() const + { + return this->patchedChecksum; + } + + bool isValidHeader() const + { + NvU8 validHeaderData[8] = { + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x00}; + + if (buffer.getLength() < 0x8) + return false; + + for (unsigned i = 0; i < 8; i++) + { + if (buffer.data[i] != validHeaderData[i]) + { + DP_LOG(("DP-EDID> Invalid EDID Header")); + return false; + } + } + + return true; + } + + unsigned getManufId() const + { + if (buffer.getLength() < 0xa) + return 0; + + return ((buffer.data[0x9] << 8) | (buffer.data[0x8])); + } + + unsigned getProductId() const + { + if (buffer.getLength() < 0xc) + return 0; + + return ((buffer.data[0xb] << 8) | (buffer.data[0xa])); + } + + unsigned getYearWeek() const + { + if (buffer.getLength() < 0x12) + return 0; + + return ((buffer.data[0x11] << 8) | (buffer.data[0x10])); + } + + typedef struct + { + bool extensionCountDisabled; + bool dataForced; + bool disableDpcdPowerOff; + bool forceMaxLinkConfig; + bool powerOnBeforeLt; + bool skipRedundantLt; + bool skipCableBWCheck; + bool overrideOptimalLinkCfg; + bool overrideMaxLaneCount; + bool ignoreRedundantHotplug; + bool delayAfterD3; + bool keepLinkAlive; + bool useLegacyAddress; + bool reassessMaxLink; + bool bIgnoreDscCap; // Ignore DSC even if sink reports DSC capability + }_WARFlags; + + _WARFlags WARFlags; + + typedef struct + { + unsigned maxLaneCount; // Max lane count value to override + unsigned maxLaneAtHighRate; // Max lane count supported at HBR + unsigned maxLaneAtLowRate; // Max lane count supported at RBR + unsigned optimalLinkRate; // Optimal link rate value to override + unsigned optimalLaneCount; // Optimal lane count value to override + }_WARData; + + _WARData WARData; + + void resetData() + { + buffer.reset(); + checkSumValid = false; + forcedCheckSum = false; + fallbackEdid = false; + // clear the WARFlags + _WARFlags temp = {0}; + WARFlags = temp; + } + + bool operator== (const Edid & other) + { + return (buffer == other.buffer); + } + + bool operator!= (const Edid & other) + { + return !(buffer == other.buffer); + } + + private: + void validateCheckSum(); + + mutable Buffer buffer; + bool checkSumValid; + bool forcedCheckSum; + bool fallbackEdid; + bool patchedChecksum; + }; + + // + // SST EDID Read API + // + bool EdidReadSST(Edid & edid, AuxBus * aux, Timer * timer, bool pendingTestRequestEdidRead = false, bool bBypassAssembler = false, MainLink *main = NULL); + + enum EDID_DDC + { + EDID_DDC_NONE = 0x00, + EDID_DDC_ADR0 = 0xA0, + EDID_DDC_ADR1 = 0xA2, + EDID_DDC_ADR2 = 0xA6, + EDID_SEG_SELECTOR_OFFSET = 0x60, + }; + EDID_DDC sstDDCPing(AuxBus & dpAux); + + // + // MST EDID Read API + // + + class EdidReadMultistream : public Object, protected MessageManager::Message::MessageEventSink, Timer::TimerCallback + { + public: + class EdidReadMultistreamEventSink // Connector will inherit from this + { + public: + virtual void mstEdidCompleted(EdidReadMultistream * from) = 0; + virtual void mstEdidReadFailed(EdidReadMultistream * from) = 0; + }; + + EdidReadMultistream(Timer * timer, MessageManager * manager, EdidReadMultistream::EdidReadMultistreamEventSink * sink, Address topologyAddress) + : topologyAddress(topologyAddress), manager(manager), edidReaderManager(&edid), ddcIndex(0), + retries(0), timer(timer), sink(sink) + { + startReadingEdid(); + } + + Edid edid; + Address topologyAddress; + ~EdidReadMultistream(); + + private: + void startReadingEdid(); + + MessageManager * manager; + RemoteI2cReadMessage remoteI2cRead; + EdidAssembler edidReaderManager; // come up another word besides edidReaderManager eg Manager + NvU8 DDCAddress; + NvU8 ddcIndex; + unsigned retries; + Timer * timer; + + void readNextBlock(NvU8 seg, NvU8 offset); + void failedToReadEdid(); + void expired(const void * tag); + + EdidReadMultistreamEventSink * sink; + + virtual void messageFailed(MessageManager::Message * from, NakData * nakData); + virtual void messageCompleted(MessageManager::Message * from); + void edidAttemptDone(bool succeeded); + }; + + // + // Useful defines + // + enum + { + EDID_BLOCK_SIZE = 0x80, + EDID_SEGMENT_SIZE = 2*EDID_BLOCK_SIZE, + EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT = 3, + // DID EDID CTS v1.3 d12 currently outlines that Source shall support up to 16 blocks of EDID data. + EDID_MAX_BLOCK_COUNT = 16, + }; + + static const NvU8 ddcAddrList[] = {EDID_DDC_ADR0, EDID_DDC_ADR1, EDID_DDC_ADR2}; + const NvU8 ddcAddrListSize = sizeof(ddcAddrList)/sizeof(NvU8); + + // HDMI 1.4 Section 8.5: HDMI Sink can have up to 100ms to get EDID ready. + const NvU8 EDID_READ_RETRY_TIMEOUT_MS = 100; + const NvU8 EDID_MAX_AUX_RETRIES = 10; + const NvU8 EDID_AUX_WAIT_TIME = 1; + NvU8 getEDIDBlockChecksum(const Buffer &); + + void makeEdidFallback(Edid & edid, NvU32 fallbackFormatSupported = 0); + void makeEdidFallbackVGA(Edid & edid); + +} + +#endif //INCLUDED_DP_EDID_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_evoadapter.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_evoadapter.h new file mode 100644 index 0000000..5ff03df --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_evoadapter.h @@ -0,0 +1,410 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* List **************************************\ +* * +* Module: dp_evoadapter.h * +* Interface for low level access to the aux bus. * +* This is the synchronous version of the interface. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_EVOADAPTER_H +#define INCLUDED_DP_EVOADAPTER_H + +#include "dp_timer.h" +#include "dp_auxbus.h" +#include "dp_mainlink.h" +#include "dp_wardatabase.h" +#include "dp_auxdefs.h" +#include "dp_regkeydatabase.h" + +#include + +#define HDCP_DUMMY_CN (0x1) +#define HDCP_DUMMY_CKSV (0xFFFFF) + + +namespace DisplayPort +{ + class EvoInterface + { + public: + // + // IOCTL access to RM class DISPLAY_COMMON and NV50_DISPLAY + // + virtual NvU32 rmControl0073(NvU32 command, void * params, NvU32 paramSize) = 0; + virtual NvU32 rmControl5070(NvU32 command, void * params, NvU32 paramSize) = 0; + + virtual bool getMaxLinkConfigFromUefi(NvU8 &linkRate, NvU8 &laneCount) + { + linkRate = 0; laneCount = 0; + return true; + } + + // + // Call to tell DD that linkTraining will be performed. + // Required when head is attached & we enter in flush mode GPUs. + // Required to enable/disable Audio. + // + // Derived classes that override these functions must call down to + // DisplayPort::EvoInterface::pre/postLinkTraining() to inherit this + // implementation. + // + virtual void preLinkTraining(NvU32 head) + { + } + virtual void postLinkTraining(NvU32 head) + { + } + + virtual NvU32 getSubdeviceIndex() = 0; + virtual NvU32 getDisplayId() = 0; + virtual NvU32 getSorIndex() = 0; + virtual NvU32 getLinkIndex() = 0; // Link A = 0, Link B = 1 + // + // Query the value of a registry key. Implementations should return 0 + // if the regkey is not set. + // + virtual NvU32 getRegkeyValue(const char *key) + { + return 0; + } + virtual NvU32 monitorDenylistInfo(NvU32 manufId, NvU32 productId, DpMonitorDenylistData *pDenylistData) + { + return 0; + } + + virtual bool isInbandStereoSignalingSupported() + { + return false; + } + }; + + MainLink * MakeEvoMainLink(EvoInterface * provider, Timer * timer); + AuxBus * MakeEvoAuxBus(EvoInterface * provider, Timer * timer); + + class EvoAuxBus : public AuxBus + { + public: + EvoAuxBus(EvoInterface * provider, Timer * timer) + : provider(provider), + timer(timer), + displayId(provider->getDisplayId()), + subdeviceIndex(provider->getSubdeviceIndex()), + devicePlugged(false) + { + } + + virtual status transaction(Action action, Type type, int address, NvU8 * buffer, + unsigned sizeRequested, unsigned * sizeCompleted, + unsigned * pNakReason = NULL, + NvU8 offset = 0, NvU8 nWriteTransactions = 0); + virtual unsigned transactionSize(); + virtual void setDevicePlugged(bool); + + private: + EvoInterface * provider; + Timer * timer; + NvU32 displayId; + NvU32 subdeviceIndex; + bool devicePlugged; + }; + + class EvoMainLink : public MainLink + { + EvoInterface * provider; + Timer * timer; + NvU32 displayId; + NvU32 subdeviceIndex; + NvU32 _maxLinkRateSupportedGpu; + NvU32 _maxLinkRateSupportedDfp; + unsigned allHeadMask; + bool _hasIncreasedWatermarkLimits; + bool _hasMultistream; + bool _isPC2Disabled; + bool _isEDP; + bool _isDP1_2Supported; + bool _isDP1_4Supported; + bool _isStreamCloningEnabled; + bool _needForceRmEdid; + bool _skipPowerdownEDPPanelWhenHeadDetach; + bool _isDscDisabledByRegkey; + bool _isMstDisabledByRegkey; + bool _isFECSupported; + bool _useDfpMaxLinkRateCaps; + bool _applyLinkBwOverrideWarRegVal; + bool _isDynamicMuxCapable; + bool _enableMSAOverrideOverMST; + + bool _isLTPhyRepeaterSupported; + // + // LTTPR count reported by RM, it might not be the same with DPLib probe + // For example, some Intel LTTPR might not be ready to response 0xF0000 probe + // done by RM, but when DPLib checks the same DPCD offsets it responses + // properly. This will cause serious LT problem. + // + unsigned _rmPhyRepeaterCount; + + struct DSC + { + bool isDscSupported; + unsigned encoderColorFormatMask; + unsigned lineBufferSizeKB; + unsigned rateBufferSizeKB; + unsigned bitsPerPixelPrecision; + unsigned maxNumHztSlices; + unsigned lineBufferBitDepth; + }_DSC; + + private: + virtual void initializeRegkeyDatabase(); + virtual void applyRegkeyOverrides(); + + public: + EvoMainLink(EvoInterface * provider, Timer * timer); + + virtual bool hasIncreasedWatermarkLimits() + { + return _hasIncreasedWatermarkLimits; + } + + virtual bool hasMultistream() + { + return _hasMultistream; + } + + virtual bool isPC2Disabled() + { + return _isPC2Disabled; + } + + virtual bool isDP1_2Supported() + { + return _isDP1_2Supported; + } + virtual bool isDP1_4Supported() + { + return _isDP1_4Supported; + } + virtual bool isFECSupported() + { + return _isFECSupported; + } + + virtual bool isStreamCloningEnabled() + { + return _isStreamCloningEnabled; + } + + virtual NvU32 maxLinkRateSupported() + { + // + // For cases where RM asks dplib to honor the maxLinkRate limit defined in DCB, always use + // this as the limit. Regkey has no meaning in this case. + // In other cases, based on regkey either honor the dcb limit or the max link rate for the + // specific GPU architecture. This is needed to avoid regressions on existing chips. + // + if ((_applyLinkBwOverrideWarRegVal || _useDfpMaxLinkRateCaps) && + (_maxLinkRateSupportedDfp < _maxLinkRateSupportedGpu)) + { + return _maxLinkRateSupportedDfp; + } + + return _maxLinkRateSupportedGpu; + } + + virtual bool isForceRmEdidRequired() + { + return _needForceRmEdid; + } + + virtual bool fetchEdidByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize); + virtual bool applyEdidOverrideByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize); + + virtual bool isDynamicMuxCapable() + { + return _isDynamicMuxCapable; + } + + virtual bool isInternalPanelDynamicMuxCapable() + { + return (_isDynamicMuxCapable && _isEDP); + } + + // Get GPU DSC capabilities + virtual void getDscCaps(bool *pbDscSupported, + unsigned *pEncoderColorFormatMask, + unsigned *pLineBufferSizeKB, + unsigned *pRateBufferSizeKB, + unsigned *pBitsPerPixelPrecision, + unsigned *pMaxNumHztSlices, + unsigned *pLineBufferBitDepth) + { + if (pbDscSupported) + { + *pbDscSupported = _DSC.isDscSupported; + } + + if (pEncoderColorFormatMask) + { + *pEncoderColorFormatMask = _DSC.encoderColorFormatMask; + } + + if (pLineBufferSizeKB) + { + *pLineBufferSizeKB = _DSC.lineBufferSizeKB; + } + + if (pRateBufferSizeKB) + { + *pRateBufferSizeKB = _DSC.rateBufferSizeKB; + } + + if (pBitsPerPixelPrecision) + { + *pBitsPerPixelPrecision = _DSC.bitsPerPixelPrecision; + } + + if (pMaxNumHztSlices) + { + *pMaxNumHztSlices = _DSC.maxNumHztSlices; + } + + if (pLineBufferBitDepth) + { + *pLineBufferBitDepth = _DSC.lineBufferBitDepth; + } + } + + virtual NvU32 getRootDisplayId() + { + return this->displayId; + } + + virtual bool isLttprSupported() + { + return this->_isLTPhyRepeaterSupported; + } + + // Return the current mux state. Returns false if device is not mux capable + bool getDynamicMuxState(NvU32 *muxState); + + virtual bool aquireSema(); + virtual void releaseSema(); + virtual bool physicalLayerSetTestPattern(PatternInfo * patternInfo); + + virtual void preLinkTraining(NvU32 head); + virtual void postLinkTraining(NvU32 head); + virtual NvU32 getRegkeyValue(const char *key); + virtual const DP_REGKEY_DATABASE& getRegkeyDatabase(); + virtual NvU32 getSorIndex(); + virtual bool isInbandStereoSignalingSupported(); + virtual bool train(const LinkConfiguration & link, bool force, LinkTrainingType linkTrainingType, + LinkConfiguration *retLink, bool bSkipLt = false, bool isPostLtAdjRequestGranted = false, + unsigned phyRepeaterCount = 0); + virtual bool retrieveRingBuffer(NvU8 dpRingBuffertype, NvU32 numRecords); + virtual void getLinkConfig(unsigned & laneCount, NvU64 & linkRate); + virtual bool getMaxLinkConfigFromUefi(NvU8 &linkRate, NvU8 &laneCount); + virtual bool setDpMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams); + virtual bool setDpStereoMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams); + virtual bool setFlushMode(); + virtual void clearFlushMode(unsigned headMask, bool testMode=false); + + virtual bool dscCrcTransaction(NvBool bEnable, gpuDscCrc *data, NvU16 *headIndex); + + void triggerACT(); + void configureHDCPRenegotiate(NvU64 cN = HDCP_DUMMY_CN, NvU64 cKsv = HDCP_DUMMY_CKSV, bool bForceReAuth = false, + bool bRxIDMsgPending = false); + void configureHDCPGetHDCPState(HDCPState &hdcpState); + bool rmUpdateDynamicDfpCache(NvU32 headIndex, RmDfpCache * dfpCache, NvBool bResetDfp); + + virtual NvU32 streamToHead(NvU32 streamId, DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY); + + virtual NvU32 headToStream(NvU32 head, DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY); + + void configureSingleStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + bool bEnhancedFraming, + NvU32 tuSize, + NvU32 waterMark, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamId = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + bool bEnableAudioOverRightPanel = false, + bool bEnable2Head1Or = false); + + void configureMultiStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + NvU32 slotStart, + NvU32 slotEnd, + NvU32 PBN, + NvU32 Timeslice, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + bool bEnableAudioOverRightPanel = false, + bool bEnable2Head1Or = false); + + void configureSingleHeadMultiStreamMode(NvU32 displayIDs[], + NvU32 numStreams, + NvU32 mode, + bool bSetConfig, + NvU8 vbiosPrimaryDispIdIndex = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY); + + void configureMsScratchRegisters(NvU32 address, + NvU32 hopCount, + NvU32 driverState); + + bool isActive(); + bool isEDP(); + bool skipPowerdownEdpPanelWhenHeadDetach(); + bool supportMSAOverMST(); + bool queryAndUpdateDfpParams(); + bool controlRateGoverning(NvU32 head, bool enable, bool updateNow); + + bool getDpTestPattern(NV0073_CTRL_DP_TESTPATTERN *testPattern); + bool setDpTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, + NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, + NvBool bIsHBR2, NvBool bSkipLaneDataOverride); + bool getDpLaneData(NvU32 *numLanes, NvU32 *data); + bool setDpLaneData(NvU32 numLanes, NvU32 *data); + void configurePowerState(bool bPowerUp); + NvU32 monitorDenylistInfo(NvU32 ManufacturerID, NvU32 ProductID, DpMonitorDenylistData *pDenylistData); + NvU32 allocDisplayId(); + bool freeDisplayId(NvU32 displayId); + void queryGPUCapability(); + bool getEdpPowerData(bool *panelPowerOn, bool *dpcdPowerStateD0); + virtual bool vrrRunEnablementStage(unsigned stage, NvU32 *status); + + void configureTriggerSelect(NvU32 head, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY); + void configureTriggerAll(NvU32 head, bool enable); + bool configureLinkRateTable(const NvU16 *pLinkRateTable, LinkRates *pLinkRates); + bool configureFec(const bool bEnableFec); + }; + +} + +#endif //INCLUDED_DP_EVOADAPTER_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_groupimpl.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_groupimpl.h new file mode 100644 index 0000000..d421e7d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_groupimpl.h @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_groupimpl.h * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_GROUPIMPL_H +#define INCLUDED_DP_GROUPIMPL_H + +#include "dp_connector.h" +#include "dp_deviceimpl.h" +#include "dp_linkedlist.h" +#include "dp_watermark.h" +#include "dp_auxdefs.h" + +namespace DisplayPort +{ + + struct GroupImpl : public Group, ListElement, Timer::TimerCallback + { + ConnectorImpl * parent; + LinkedList members; + List elements; + unsigned headIndex; + unsigned streamIndex; + bool streamValidationDone; + bool headInFirmware; // Set if this is a firmware run mode. If set lastModesetInfo is NOT valid + bool bIsHeadShutdownNeeded; // Set if head shutdown is requested during modeset + bool hdcpEnabled; + bool hdcpPreviousStatus; + bool bWaitForDeAllocACT; + bool bDeferredPayloadAlloc; + ModesetInfo lastModesetInfo; + DSC_MODE dscModeRequest; // DSC mode requested during NAB + DSC_MODE dscModeActive; // DSC mode currently active, set in NAE + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID singleHeadMultiStreamID; + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultiStreamMode; + DP_COLORFORMAT colorFormat; + + struct + { + unsigned PBN; + int count; + int begin; + bool hardwareDirty; // Does the configureStream need to be called again? + Watermark watermarks; // Cached watermark calculations + } timeslot; + + GroupImpl(ConnectorImpl * parent, bool isFirmwareGroup = false) + : parent(parent), + streamValidationDone(true), + headInFirmware(false), + bIsHeadShutdownNeeded(true), + hdcpEnabled(false), + hdcpPreviousStatus(false), + bWaitForDeAllocACT(false), + dscModeRequest(DSC_MODE_NONE), + dscModeActive(DSC_MODE_NONE), + singleHeadMultiStreamID(DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY), + singleHeadMultiStreamMode(DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE), + headAttached(false) + { + timeslot.count = 0; + } + + ~GroupImpl() + { + } + + virtual void insert(Device * dev); + virtual void remove(Device * dev); + void update(Device * dev, bool allocationState); // send the allocatepayload/deallocatepayload message + bool contains(Device * dev) { return members.contains(dev); } + virtual Device * enumDevices(Device * previousDevice); + + void updateVbiosScratchRegister(Device * lastDevice); // Update the VBIOS scratch register with last lit display + + // + // Timer callback tags. + // (we pass the address of these variables as context to ::expired) + // + char tagHDCPReauthentication; + char tagStreamValidation; + + unsigned authRetries; // Retry counter for the authentication. + + virtual void expired(const void * tag); + virtual bool hdcpGetEncrypted(); + virtual void destroy(); + void cancelHdcpCallbacks(); + + bool isHeadAttached() { return headAttached; } + void setHeadAttached(bool attached); + + private: + bool headAttached; // True if modeset started (during NAB). Sets back to False during NDE + }; +} + +#endif //INCLUDED_DP_GROUPIMPL_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_guid.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_guid.h new file mode 100644 index 0000000..2a1318d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_guid.h @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_guid.h * +* GUID struct and builder class * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_GUID_H +#define INCLUDED_DP_GUID_H + +#include "dp_internal.h" +#include "dp_timer.h" + +namespace DisplayPort +{ +#define DPCD_GUID_SIZE 16 + + struct GUID + { + NvU8 data[DPCD_GUID_SIZE]; + + GUID() + { + dpMemZero(&data, sizeof(data)); + } + + bool isGuidZero() + { + for (unsigned i = 0 ; i < DPCD_GUID_SIZE; i++) + if (data[i]) + return false; + + return true; + } + + bool operator == (const GUID & other) const + { + for (unsigned i = 0 ; i < DPCD_GUID_SIZE; i++) + if (data[i] != other.data[i]) + return false; + + return true; + } + + bool operator != (const GUID & other) const + { + return !((*this) == other); + } + + void copyFrom(const NvU8 * buffer) + { + dpMemCopy(&this->data[0], buffer, sizeof data); + } + + // XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + // Two Xs per byte, plus four dashes and a NUL byte. + typedef char StringBuffer[DPCD_GUID_SIZE*2 + 5]; + char * toString(StringBuffer & buffer) const + { + char *p = &buffer[0]; + + for (unsigned i = 0; i < DPCD_GUID_SIZE; i++) { + dpByteToHexChar(p, data[i]); + p += 2; + if (i == 3 || i == 5 || i == 7 || i == 9) + *p++ = '-'; + } + + *p++ = '\0'; + + DP_ASSERT(p == buffer + sizeof(buffer)); + + return buffer; + } + }; + + class GUIDBuilder + { + NvU32 salt; + NvU32 previousRandom; + Timer * source; + + + // + // Linear congruential random number generator + // Seed values chosen from numerical methods + // + NvU32 random(); + + public: + GUIDBuilder(Timer * source, NvU32 salt); + + void makeGuid(GUID & guid); + }; +} + +#endif //INCLUDED_DP_GUID_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_hostimp.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_hostimp.h new file mode 100644 index 0000000..aa96fab --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_hostimp.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_hostimp.h * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_HOSTIMP_H +#define INCLUDED_DP_HOSTIMP_H + +#include "nvtypes.h" +#include "dp_tracing.h" + +extern "C" void * dpMalloc(NvLength size); +extern "C" void dpFree(void * ptr); +extern "C" void dpDebugBreakpoint(); +// Note: dpPrint() implementations are expected to append a newline themselves. +extern "C" void dpPrint(const char * formatter, ...); +extern "C" void dpTraceEvent(NV_DP_TRACING_EVENT event, + NV_DP_TRACING_PRIORITY priority, NvU32 numArgs, ...); + +#if defined(_DEBUG) || defined(DEBUG) + #define NV_DP_ASSERT_ENABLED 1 +#else + #define NV_DP_ASSERT_ENABLED 0 +#endif + +#if NV_DP_ASSERT_ENABLED +extern "C" void dpAssert(const char *expression, const char *file, + const char *function, int line); +#endif + +#endif // INCLUDED_DP_HOSTIMP_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_internal.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_internal.h new file mode 100644 index 0000000..e233a75 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_internal.h @@ -0,0 +1,139 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_internal.h * +* RM stubs to allow unit testing. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_INTERNAL_H +#define INCLUDED_DP_INTERNAL_H + +// +// Clients should not include this file +// This file provides the private malloc implementation. +// + +#include +#include // size_t + +#include "dp_object.h" +#include "dp_ringbuffer.h" + +static inline void dpByteToHexChar(char *output, NvU8 c) +{ + char dig = (c>>4) & 0xF; + output[0] = dig < 10 ? dig + '0' : dig + 'A' - 10; + dig = c & 0xF; + output[1] = dig < 10 ? dig + '0' : dig + 'A' - 10; +} + +static inline void dpHexDump(char * output, unsigned outSize, NvU8 * buffer, unsigned size) +{ + char * tail = output; + if (outSize < size * 3 + 1) + return; + + for (unsigned i = 0; i < size; i++) + { + dpByteToHexChar(tail, buffer[i]); + tail += 2; + *tail++ = ' '; + } + *tail = 0; +} + +namespace DisplayPort +{ + template + inline void swap_args(T & left, T & right) + { + T temp = left; + left = right; + right = temp; + } + + inline NvU64 divide_ceil(NvU64 a, NvU64 b) + { + return (a + b - 1) / b; + } + + inline NvU64 divide_floor(NvU64 a, NvU64 b) + { + return a / b; + } + + inline NvU64 axb_div_c_64(NvU64 a, NvU64 b, NvU64 c) + { + // NvU64 arithmetic to keep precision and avoid floats + // a*b/c = (a/c)*b + ((a%c)*b + c/2)/c + return ((a/c)*b + ((a%c)*b + c/2)/c); + } +} + +#define DP_MIN(x,y) ((x)<(y)?(x):(y)) +#define DP_MAX(x,y) ((x)<(y)?(y):(x)) + +// +// Macro to suppress unused local variable +// +template void dp_used(const T & /*x*/) {} +#define DP_USED(x) dp_used(x) + + +// +// Basic debug logging facility +// + +#if NV_DP_ASSERT_ENABLED +#define DP_LOG(x) \ + do \ + { \ + dpPrint x; \ + addDpLogRecord x; \ + }while (false) + +#define DP_ASSERT(x) \ + if (!(x)) \ + { \ + addDpAssertRecord(); \ + dpAssert(#x, __FILE__, __FUNCTION__, __LINE__); \ + dpDebugBreakpoint(); \ + } +#else + +#define DP_LOG(x) + +#define DP_ASSERT(x) \ + { \ + DP_USED(x); \ + if (!(x)) \ + { \ + addDpAssertRecord(); \ + } \ + } +#endif + +#endif //INCLUDED_DP_INTERNAL_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkconfig.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkconfig.h new file mode 100644 index 0000000..311b054 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkconfig.h @@ -0,0 +1,450 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* List **************************************\ +* * +* Module: dp_linkconfig.h * +* Link Configuration object implementation * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_LINKCONFIG_H +#define INCLUDED_DP_LINKCONFIG_H + +#include "dp_auxdefs.h" +#include "dp_internal.h" +#include "dp_watermark.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" // NV0073_CTRL_HDCP_VPRIME_SIZE +#include "displayport.h" + +namespace DisplayPort +{ + typedef NvU64 LinkRate; + + class LinkRates : virtual public Object + { + public: + // Store link rate in multipler of 270MBPS to save space + NvU8 element[NV_DPCD_SUPPORTED_LINK_RATES__SIZE]; + NvU8 entries; + + LinkRates() : entries(0) {} + + void clear() + { + entries = 0; + for (int i = 0; i < NV_DPCD_SUPPORTED_LINK_RATES__SIZE; i++) + { + element[i] = 0; + } + } + + bool import(NvU8 linkBw) + { + if (entries < NV_DPCD_SUPPORTED_LINK_RATES__SIZE) + { + element[entries] = linkBw; + entries++; + return true; + } + else + return false; + } + + NvU8 getNumLinkRates() + { + return entries; + } + + LinkRate getLowerRate(LinkRate rate) + { + int i; + NvU8 linkBw = (NvU8)(rate / DP_LINK_BW_FREQ_MULTI_MBPS); + + if ((entries == 0) || (linkBw <= element[0])) + return 0; + + for (i = entries - 1; i > 0; i--) + { + if (linkBw > element[i]) + break; + } + + rate = (LinkRate)element[i] * DP_LINK_BW_FREQ_MULTI_MBPS; + return rate; + } + + LinkRate getMaxRate() + { + LinkRate rate = 0; + if ((entries > 0) && + (entries <= NV_DPCD_SUPPORTED_LINK_RATES__SIZE)) + { + rate = (LinkRate)element[entries - 1] * DP_LINK_BW_FREQ_MULTI_MBPS; + } + + return rate; + } + }; + + class LinkPolicy : virtual public Object + { + bool bNoFallback; // No fallback when LT fails + LinkRates linkRates; + + public: + LinkPolicy() : bNoFallback(false) + { + } + bool skipFallback() + { + return bNoFallback; + } + void setSkipFallBack(bool bSkipFallback) + { + bNoFallback = bSkipFallback; + } + + LinkRates *getLinkRates() + { + return &linkRates; + } + }; + enum + { + totalTimeslots = 64, + totalUsableTimeslots = totalTimeslots - 1 + }; + + // in MBps + enum + { + RBR = 162000000, + EDP_2_16GHZ = 216000000, + EDP_2_43GHZ = 243000000, + HBR = 270000000, + EDP_3_24GHZ = 324000000, + EDP_4_32GHZ = 432000000, + HBR2 = 540000000, + EDP_6_75GHZ = 675000000, + HBR3 = 810000000 + }; + + struct HDCPState + { + bool HDCP_State_Encryption; + bool HDCP_State_1X_Capable; + bool HDCP_State_22_Capable; + bool HDCP_State_Authenticated; + bool HDCP_State_Repeater_Capable; + }; + + struct HDCPValidateData + { + }; + + typedef enum + { + DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST, + DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST, + }DP_SINGLE_HEAD_MULTI_STREAM_MODE; + +#define HEAD_INVALID_STREAMS 0 +#define HEAD_DEFAULT_STREAMS 1 + + typedef enum + { + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY = 0, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY = 1, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_MAX = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY, + } DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID; + +#define DP_INVALID_SOR_INDEX 0xFFFFFFFF +#define DSC_DEPTH_FACTOR 16 + + + class LinkConfiguration : virtual public Object + { + public: + LinkPolicy policy; + unsigned lanes; + LinkRate peakRatePossible; + LinkRate peakRate; + LinkRate minRate; + bool enhancedFraming; + bool multistream; + bool disablePostLTRequest; + bool bEnableFEC; + bool bDisableLTTPR; + + // + // The counter to record how many times link training happens. + // Client can reset the counter by calling setLTCounter(0) + // + unsigned linkTrainCounter; + + LinkConfiguration() : + lanes(0), peakRatePossible(0), peakRate(0), minRate(0), + enhancedFraming(false), multistream(false), disablePostLTRequest(false), + bEnableFEC(false), bDisableLTTPR(false), linkTrainCounter(0) {}; + + LinkConfiguration(LinkPolicy * p, unsigned lanes, LinkRate peakRate, + bool enhancedFraming, bool MST, bool disablePostLTRequest = false, + bool bEnableFEC = false, bool bDisableLTTPR = false) : + lanes(lanes), peakRatePossible(peakRate), peakRate(peakRate), + enhancedFraming(enhancedFraming), multistream(MST), + disablePostLTRequest(disablePostLTRequest), + bEnableFEC(bEnableFEC), bDisableLTTPR(bDisableLTTPR), + linkTrainCounter(0) + { + // downrate for spread and FEC + minRate = linkOverhead(peakRate); + if (p) + { + policy = *p; + } + } + + void setLTCounter(unsigned counter) + { + linkTrainCounter = counter; + } + + unsigned getLTCounter() + { + return linkTrainCounter; + } + + NvU64 linkOverhead(NvU64 rate) + { + if(bEnableFEC) + { + + // if FEC is enabled, we have to account for 3% overhead + // for FEC+downspread according to DP 1.4 spec + + return rate - 3 * rate/ 100; + } + else + { + // if FEC is not enabled, link overhead comprises only of + // 0.05% downspread. + return rate - 5 * rate/ 1000; + + } + } + + void enableFEC(bool setFEC) + { + bEnableFEC = setFEC; + + // If FEC is enabled, update minRate with FEC+downspread overhead. + minRate = linkOverhead(peakRate); + } + + LinkConfiguration(unsigned long TotalLinkPBN) + : enhancedFraming(true), + multistream(true), + disablePostLTRequest(false), + bEnableFEC(false), + bDisableLTTPR(false), + linkTrainCounter(0) + { + // Reverse engineer a link configuration from Total TotalLinkPBN + // Note that HBR2 twice HBR. The table below treats HBR2x1 and HBRx2, etc. + + // + // BW Effective Lanes Total TotalLinkPBN + // 165 1 195.5555556 + // 165 2 391.1111111 + // 165 4 782.2222222 + // 270 1 320 + // 270 2 640 + // 270 4 1280 + // 270 8 2560 + // + + if (TotalLinkPBN <= 90) + peakRatePossible = peakRate = RBR, minRate = linkOverhead(RBR), lanes=0; // FAIL + if (TotalLinkPBN <= 195) + peakRatePossible = peakRate = RBR, minRate = linkOverhead(RBR), lanes=1; + else if (TotalLinkPBN <= 320) + peakRatePossible = peakRate = HBR, minRate=linkOverhead(HBR), lanes = 1; + else if (TotalLinkPBN <= 391) + peakRatePossible = peakRate = RBR, minRate=linkOverhead(RBR), lanes = 2; + else if (TotalLinkPBN <= 640) + peakRatePossible = peakRate = HBR, minRate=linkOverhead(HBR), lanes = 2; // could be HBR2x1, but TotalLinkPBN works out same + else if (TotalLinkPBN <= 782) + peakRatePossible = peakRate = RBR, minRate=linkOverhead(RBR), lanes = 4; + else if (TotalLinkPBN <= 960) + peakRatePossible = peakRate = HBR3, minRate=linkOverhead(HBR3), lanes = 1; + else if (TotalLinkPBN <= 1280) + peakRatePossible = peakRate = HBR, minRate=linkOverhead(HBR), lanes = 4; // could be HBR2x2 + else if (TotalLinkPBN <= 1920) + peakRatePossible = peakRate = HBR3, minRate=linkOverhead(HBR3), lanes = 2; // could be HBR2x + else if (TotalLinkPBN <= 2560) + peakRatePossible = peakRate = HBR2, minRate=linkOverhead(HBR2), lanes = 4; + else if (TotalLinkPBN <= 3840) + peakRatePossible = peakRate = HBR3, minRate=linkOverhead(HBR3), lanes = 4; + else { + peakRatePossible = peakRate = RBR, minRate = linkOverhead(RBR), lanes = 0; // FAIL + DP_ASSERT(0 && "Unknown configuration"); + } + } + + void setEnhancedFraming(bool newEnhancedFraming) + { + enhancedFraming = newEnhancedFraming; + } + + bool isValid() + { + return lanes != laneCount_0; + } + + bool lowerConfig(bool bReduceLaneCnt = false) + { + // + // TODO: bReduceLaneCnt is set to fallback to 4 lanes with lower + // valid link rate. But we should reset to max lane count + // sink supports instead. + // + + LinkRate lowerRate = policy.getLinkRates()->getLowerRate(peakRate); + + if(bReduceLaneCnt) + { + // Reduce laneCount before reducing linkRate + if(lanes == laneCount_1) + { + if (lowerRate) + { + lanes = laneCount_4; + peakRate = lowerRate; + } + else + { + lanes = laneCount_0; + } + } + else + { + lanes /= 2; + } + } + else + { + // Reduce the link rate instead of lane count + if (lowerRate) + { + peakRate = lowerRate; + } + else + { + lanes /= 2; + } + } + + minRate = linkOverhead(peakRate); + + return lanes != laneCount_0; + } + + void setLaneRate(LinkRate newRate, unsigned newLanes) + { + peakRate = newRate; + lanes = newLanes; + minRate = linkOverhead(peakRate); + } + + unsigned pbnTotal() + { + return PBNForSlots(totalUsableTimeslots); + } + + void pbnRequired(const ModesetInfo & modesetInfo, unsigned & base_pbn, unsigned & slots, unsigned & slots_pbn) + { + base_pbn = pbnForMode(modesetInfo); + slots = slotsForPBN(base_pbn); + slots_pbn = PBNForSlots(slots); + } + + NvU32 slotsForPBN(NvU32 allocatedPBN, bool usable = false) + { + NvU64 bytes_per_pbn = 54 * 1000000 / 64; // this comes out exact + NvU64 bytes_per_timeslot = peakRate * lanes / 64; + + if (bytes_per_timeslot == 0) + return (NvU32)-1; + + if (usable) + { + // round down to find the usable integral slots for a given value of PBN. + NvU32 slots = (NvU32)divide_floor(allocatedPBN * bytes_per_pbn, bytes_per_timeslot); + DP_ASSERT(slots <= 64); + + return slots; + } + else + return (NvU32)divide_ceil(allocatedPBN * bytes_per_pbn, bytes_per_timeslot); + } + + NvU32 PBNForSlots(NvU32 slots) // Rounded down + { + NvU64 bytes_per_pbn = 54 * 1000000 / 64; // this comes out exact + NvU64 bytes_per_timeslot = peakRate * lanes / 64; + + return (NvU32)(bytes_per_timeslot * slots/ bytes_per_pbn); + } + + bool operator!= (const LinkConfiguration & right) const + { + return !(*this == right); + } + + bool operator== (const LinkConfiguration & right) const + { + return (this->lanes == right.lanes && + this->peakRate == right.peakRate && + this->enhancedFraming == right.enhancedFraming && + this->multistream == right.multistream && + this->bEnableFEC == right.bEnableFEC); + } + + bool operator< (const LinkConfiguration & right) const + { + NvU64 leftMKBps = peakRate * lanes; + NvU64 rightMKBps = right.peakRate * right.lanes; + + if (leftMKBps == rightMKBps) + { + return (lanes < right.lanes); + } + else + { + return (leftMKBps < rightMKBps); + } + } + }; +} +#endif //INCLUDED_DP_LINKCONFIG_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkedlist.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkedlist.h new file mode 100644 index 0000000..cb0b6f2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkedlist.h @@ -0,0 +1,143 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/******************************* DisplayPort *******************************\ +* * +* Module: dp_linkedlist.h * +* A linked list that uses DislayPort::List as a backend, but which * +* allocates the list backbone dynamically. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_LINKEDLIST_H +#define INCLUDED_DP_LINKEDLIST_H + +#include "dp_list.h" + +namespace DisplayPort +{ + template + class LinkedList : public Object + { + // The Element class forms the list backbone and contains pointers to + // each item in the list. + class Element : public ListElement + { + public: + Element(T *item) : item(item) { } + T *item; + }; + + List list; + + // No public copy constructor. + LinkedList(LinkedList &other) { } + + // Find the Element containing an item. + Element *containing(T *item) + { + for (ListElement *le = list.begin(); le != list.end(); le = le->next) + { + Element *e = static_cast(le); + if (e->item == item) + return e; + } + return NULL; + } + + public: + // The list starts out empty. + LinkedList() { } + + // Insert an item at the front of the list. + void insertFront(T *item) + { + // Construct an element and add it to the list. + Element *e = new Element(item); + DP_ASSERT(e); + if (e) + { + list.insertFront(e); + } + } + + // Remove an item from the list. + // O(n) to find the item to remove. + // It is an error to try to remove an item that is not in the list. + void remove(T *item) + { + Element *e = containing(item); + DP_ASSERT(e && "Item was not a member of the list"); + delete e; + } + + // Find the next item in the list after the specified item. If item is + // NULL, this returns the first item. + T *next(T *prev) + { + if (list.isEmpty()) + return NULL; + + // If prev is NULL or not in the list, return the first item. + Element *e = containing(prev); + if (!e) + { + e = static_cast(list.begin()); + return e->item; + } + else if (e->next != list.end()) + { + e = static_cast(e->next); + return e->item; + } + else + { + // prev was the last element in the list. + return NULL; + } + } + + // Query whether an item is a member of the list. + // O(n) + bool contains(T *item) + { + Element *e = containing(item); + return e != NULL; + } + + bool isEmpty() + { + return list.isEmpty(); + } + + T *pop() + { + DP_ASSERT(!list.isEmpty()); + Element *e = static_cast(list.last()); + T *item = e->item; + delete e; + return item; + } + }; +} + +#endif // INCLUDED_DP_LINKEDLIST_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_list.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_list.h new file mode 100644 index 0000000..77fd759 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_list.h @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_list.h * +* Simple doubly linked list queue * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_LIST_H +#define INCLUDED_DP_LIST_H + +#include "dp_object.h" + +namespace DisplayPort +{ + // + // List is an intrusive container, it may + // only contain elements that derive from ListElement + // + // NOTE! Deleting an element automatically unlinks it + // from the enclosing container. + // + struct ListElement : virtual public Object + { + ListElement * next, * prev; + + ListElement(); + virtual ~ListElement(); + }; + + + class List : public ListElement + { + public: + bool isEmpty(); + void insertFront(ListElement * item); + void insertBack(ListElement * item); + void insertBefore(ListElement * insertBeforeThis, ListElement * item); + void clear(); + ListElement* front(); + ListElement* last(); + + ListElement* begin() { return this->next; } + ListElement* end() { return this; } + + static ListElement * remove(ListElement * item); // Removes but does not delete + bool contains(ListElement * item); + ListElement * replace(ListElement * replacement, ListElement * replacee); + List(); + ~List(); + + unsigned size() + { + unsigned count = 0; + for (ListElement * i = begin(); i!=end(); i = i->next) + count++; + return count; + } + }; +} + +#endif //INCLUDED_DP_LIST_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_mainlink.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_mainlink.h new file mode 100644 index 0000000..0744f8b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_mainlink.h @@ -0,0 +1,265 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* List **************************************\ +* * +* Module: dp_mainlink.h * +* Mainlink interface implemented by client. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_MAINLINK_H +#define INCLUDED_DP_MAINLINK_H + +#include "dp_linkconfig.h" +#include "dp_vrr.h" +#include "dp_wardatabase.h" +#include "dp_auxdefs.h" +#include "displayport.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "dp_regkeydatabase.h" + +#define HDCP_DUMMY_CN (0x1) +#define HDCP_DUMMY_CKSV (0xFFFFF) + +namespace DisplayPort +{ + typedef enum + { + NONE, //Abort it manually + UNTRUST, //Abort due to Kp mismatch + UNRELBL, //Abort due to repeated link failure + KSV_LEN, //Abort due to KSV length + KSV_SIG, //Abort due to KSV signature + SRM_SIG, //Abort due to SRM signature + SRM_REV, //Abort due to SRM revocation + NORDY, //Abort due to repeater not ready + KSVTOP, //Abort due to KSV topology error + BADBKSV //Abort due to invalid Bksv + }AbortAuthReason; + + // This is also used for DPCD offset 10B. 249 + enum LinkQualityPatternType + { + LINK_QUAL_DISABLED, + LINK_QUAL_D10_2, + LINK_QUAL_SYM_ERROR, + LINK_QUAL_PRBS7, + LINK_QUAL_80BIT_CUST, + LINK_QUAL_HBR2_COMPLIANCE_EYE, + LINK_QUAL_CP2520PAT3, + }; + + typedef struct + { + LinkQualityPatternType lqsPattern; + + // + // 80 bits DP CSTM Test Pattern data; + // ctsmLower takes bits 31:0 (lowest 32 bits) + // ctsmMiddle takes bits 63:32 (middle 32 bits) + // ctsmUpper takes bits 79:64 (highest 16 bits) + // + int ctsmLower; + int ctsmMiddle; + int ctsmUpper; + } PatternInfo; + + typedef struct + { + unsigned char bcaps; + unsigned char bksv[5]; + bool hdcpCapable; + unsigned char updMask; + }RmDfpCache; + + typedef enum + { + NORMAL_LINK_TRAINING, // full LT + NO_LINK_TRAINING, + FAST_LINK_TRAINING, + }LinkTrainingType; + + class MainLink : virtual public Object + { + private: + virtual void initializeRegkeyDatabase() = 0; + virtual void applyRegkeyOverrides() = 0; + + public: + virtual bool physicalLayerSetTestPattern(PatternInfo * patternInfo) = 0; + + // + // Wrappers for existing link training RM control calls + // + virtual bool train(const LinkConfiguration & link, bool force, LinkTrainingType linkTrainingType, + LinkConfiguration *retLink, bool bSkipLt = false, bool isPostLtAdjRequestGranted = false, + unsigned phyRepeaterCount = 0) = 0; + + // RM control call to retrieve buffer from RM for DP Library to dump logs + virtual bool retrieveRingBuffer(NvU8 dpRingBuffertype, NvU32 numRecords) = 0; + + // + // Requests to DD to perform pre & post link training steps + // which may disconnect and later reconnect the head (For Pre-gf119 GPUs) + // + virtual void preLinkTraining(NvU32 head) = 0; + virtual void postLinkTraining(NvU32 head) = 0; + virtual NvU32 getRegkeyValue(const char *key) = 0; + virtual const DP_REGKEY_DATABASE& getRegkeyDatabase() = 0; + virtual NvU32 getSorIndex() = 0; + virtual bool isInbandStereoSignalingSupported() = 0; + + + virtual bool isEDP() = 0; + virtual bool supportMSAOverMST() = 0; + virtual bool isForceRmEdidRequired() = 0; + virtual bool fetchEdidByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize) = 0; + virtual bool applyEdidOverrideByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize) = 0; + + // Return if Panel is Dynamic MUX capable + virtual bool isDynamicMuxCapable() = 0; + + // Return the current mux state. Returns false if not mux capable + virtual bool getDynamicMuxState(NvU32 *muxState) = 0; + + // Return if Internal panel is Dynamic Mux capable + virtual bool isInternalPanelDynamicMuxCapable() = 0; + + // Check if we should skip power down eDP when head detached. + virtual bool skipPowerdownEdpPanelWhenHeadDetach() = 0; + + // Get GPU DSC capabilities + virtual void getDscCaps(bool *pbDscSupported = NULL, + unsigned *pEncoderColorFormatMask = NULL, + unsigned *pLineBufferSizeKB = NULL, + unsigned *pRateBufferSizeKB = NULL, + unsigned *pBitsPerPixelPrecision = NULL, + unsigned *pMaxNumHztSlices = NULL, + unsigned *pLineBufferBitDepth = NULL) = 0; + + // + // Get the current link config. + // (Used for the boot case where EFI/VBIOS may have already trained + // the link. We need this to confirm the programming since + // we cannot rely on the DPCD registers being correct or sane) + // + virtual void getLinkConfig(unsigned &laneCount, NvU64 & linkRate) = 0; + + // Get the max link config from UEFI. + virtual bool getMaxLinkConfigFromUefi(NvU8 &linkRate, NvU8 &laneCount) = 0; + // + // Query if a head is attached to this DisplayId + // + virtual bool isActive() = 0; + + virtual bool hasIncreasedWatermarkLimits() = 0; + virtual bool hasMultistream() = 0; + virtual bool isPC2Disabled() = 0; + virtual bool isDP1_2Supported() = 0; + virtual bool isDP1_4Supported() = 0; + virtual bool isStreamCloningEnabled() = 0; + virtual NvU32 maxLinkRateSupported() = 0; + virtual bool isLttprSupported() = 0; + virtual bool isFECSupported() = 0; + + virtual bool setDpMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams) = 0; + virtual bool setDpStereoMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams) = 0; + virtual bool setFlushMode() = 0; + virtual void clearFlushMode(unsigned headMask, bool testMode=false) = 0; + + // + // HDCP Renegotiate and trigger ACT. + // + virtual void configureHDCPRenegotiate(NvU64 cN = HDCP_DUMMY_CN, NvU64 cKsv = HDCP_DUMMY_CKSV, bool bForceReAuth = false, bool bRxIDMsgPending = false) = 0; + virtual void triggerACT() = 0; + virtual void configureHDCPGetHDCPState(HDCPState &hdcpState) = 0; + + virtual NvU32 streamToHead(NvU32 streamId, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) = 0; + virtual NvU32 headToStream(NvU32 head, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) = 0; + + virtual void configureSingleStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + bool bEnhancedFraming, + NvU32 tuSize, + NvU32 waterMark, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamId = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + bool bEnableAudioOverRightPanel = false, + bool bEnable2Head1Or = false)= 0; + + virtual void configureMultiStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + NvU32 slotStart, + NvU32 slotEnd, + NvU32 PBN, + NvU32 Timeslice, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + bool bEnableAudioOverRightPanel = false, + bool bEnable2Head1Or = false)= 0; + + virtual void configureSingleHeadMultiStreamMode(NvU32 displayIDs[], + NvU32 numStreams, + NvU32 mode, + bool bSetConfig, + NvU8 vbiosPrimaryDispIdIndex = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY)= 0; + + virtual void configureMsScratchRegisters(NvU32 address, + NvU32 hopCount, + NvU32 driverState) = 0; + + virtual bool controlRateGoverning(NvU32 head, bool enable, bool updateNow = true) = 0; + virtual bool getDpTestPattern(NV0073_CTRL_DP_TESTPATTERN * testPattern) = 0; + virtual bool setDpTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, + NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, + NvBool bIsHBR2, NvBool bSkipLaneDataOverride = false) = 0; + virtual bool getDpLaneData(NvU32 *numLanes, NvU32 *data) = 0; + virtual bool setDpLaneData(NvU32 numLanes, NvU32 *data) = 0; + virtual bool rmUpdateDynamicDfpCache(NvU32 headIndex, RmDfpCache * dfpCache, NvBool bResetDfp) = 0; + virtual void configurePowerState(bool bPowerUp) = 0; + virtual NvU32 monitorDenylistInfo(NvU32 ManufacturerID, NvU32 ProductID, DpMonitorDenylistData *pDenylistData) = 0; + virtual NvU32 getRootDisplayId() = 0; + virtual NvU32 allocDisplayId() = 0; + virtual bool freeDisplayId(NvU32 displayId) = 0; + virtual void queryGPUCapability() = 0; + virtual bool queryAndUpdateDfpParams() = 0; + virtual bool getEdpPowerData(bool *panelPowerOn, bool *bDPCDPowerStateD0) = 0; + virtual bool vrrRunEnablementStage(unsigned stage, NvU32 *status) = 0; + + virtual void configureTriggerSelect(NvU32 head, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) = 0; + + virtual void configureTriggerAll(NvU32 head, bool enable) = 0; + virtual bool dscCrcTransaction(NvBool bEnable, gpuDscCrc *data, NvU16 *headIndex){ return false; } + virtual bool configureLinkRateTable(const NvU16 *pLinkRateTable, LinkRates *pLinkRates) = 0; + virtual bool configureFec(const bool bEnableFec) = 0; + }; +} + +#endif //INCLUDED_DP_MAINLINK_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_merger.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_merger.h new file mode 100644 index 0000000..ff57de7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_merger.h @@ -0,0 +1,148 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_merger.h * +* Asynchronous Message merger * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_MERGER_H +#define INCLUDED_DP_MERGER_H + +#include "dp_list.h" +#include "dp_auxretry.h" +#include "dp_timer.h" +#include "dp_bitstream.h" +#include "dp_address.h" +#include "dp_messageheader.h" +#include "dp_configcaps.h" + +namespace DisplayPort +{ + // after 4 secs delete dead transactions + #define DP_INCOMPLETE_MESSAGE_TIMEOUT_USEC 4000000 + struct EncodedMessage; + + class MessageTransactionMerger : virtual public Object + { + class IncompleteMessage : public ListElement + { + public: + EncodedMessage message; + NvU64 lastUpdated; + + }; + + List incompleteMessages; + Timer * timer; + NvU64 incompleteMessageTimeoutMs; + IncompleteMessage * freeOnNextCall; // we don't need to delete it on destruct + // since this is ALSO a member of the list we own + + IncompleteMessage * getTransactionRecord(const Address & address, unsigned messageNumber); + public: + MessageTransactionMerger(Timer * timer, unsigned incompleteMessageTimeoutMs) + : timer(timer), incompleteMessageTimeoutMs(incompleteMessageTimeoutMs), freeOnNextCall(0) + { + } + + // + // Pushes data into the queue and returns an encoded + // message if an entire message is assembled. + // + EncodedMessage * pushTransaction(MessageHeader * header, Buffer * data); + }; + + class IncomingTransactionManager : virtual public Object + { + public: + class IncomingTransactionManagerEventSink + { + public: + virtual void messagedReceived(IncomingTransactionManager * from, EncodedMessage * message) = 0; + }; + + void mailboxInterrupt(); + + // + // Create a message merger object + // - sink is called whenever a new message is received + // Callback::fired is passed an IncompleteMessage as the data arg. + // + IncomingTransactionManager(Timer * timerInterface, const Address & addressPrefix, IncomingTransactionManagerEventSink * sink); + virtual ~IncomingTransactionManager(); + + protected: + virtual AuxRetry::status readMessageBox(NvU32 offset, NvU8 * data, size_t length) = 0; + virtual size_t getMessageBoxSize() = 0; + virtual size_t getTransactionSize() = 0; + virtual void clearMessageBoxInterrupt() = 0; + private: + MessageTransactionMerger incompleteMessages; // List + + Buffer localWindow; + Timer * timer; + IncomingTransactionManagerEventSink * sink; + Address addressPrefix; // This is the aux address of the downstream port + // This field will be prepended to the address decoded. + }; + + class DownReplyManager : public IncomingTransactionManager + { + public: + DownReplyManager(DPCDHAL * hal, Timer * timer, const Address & addressPrefix, IncomingTransactionManagerEventSink * sink) + : IncomingTransactionManager(timer, addressPrefix, sink), hal(hal) + { + } + virtual ~DownReplyManager() {} + + protected: + DPCDHAL * hal; + + virtual AuxRetry::status readMessageBox(NvU32 offset, NvU8 * data, size_t length); + virtual size_t getMessageBoxSize(); + virtual size_t getTransactionSize(); + virtual void clearMessageBoxInterrupt(); + }; + + class UpRequestManager : public IncomingTransactionManager + { + public: + UpRequestManager(DPCDHAL * hal, Timer * timer, const Address & addressPrefix, IncomingTransactionManagerEventSink * sink) + : IncomingTransactionManager(timer, addressPrefix, sink), hal(hal) + { + } + virtual ~UpRequestManager() {} + protected: + DPCDHAL * hal; + + virtual AuxRetry::status readMessageBox(NvU32 offset, NvU8 * data, size_t length); + virtual size_t getMessageBoxSize(); + virtual size_t getTransactionSize(); + virtual void clearMessageBoxInterrupt(); + }; +} + +#endif //INCLUDED_DP_MERGER_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messagecodings.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messagecodings.h new file mode 100644 index 0000000..0ae4d88 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messagecodings.h @@ -0,0 +1,559 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_messagecodings.h * +* Encoding routines for various messages. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_MESSAGECODINGS_H +#define INCLUDED_DP_MESSAGECODINGS_H + +#include "dp_messages.h" +#include "displayport.h" +#include "dp_auxdefs.h" + +/* Fields for the HDCP stream status */ +#define NV_DP_HDCP_STREAM_STATE 1:0 +#define NV_DP_HDCP_STREAM_STATE_NO_EXIST (0x00000000) +#define NV_DP_HDCP_STREAM_STATE_NOT_ACTIVE (0x00000001) +#define NV_DP_HDCP_STREAM_STATE_ACTIVE (0x00000002) +#define NV_DP_HDCP_STREAM_STATE_ERROR (0x00000003) +#define NV_DP_HDCP_STREAM_REPEATER 2:2 +#define NV_DP_HDCP_STREAM_REPEATER_SIMPLE (0x00000000) +#define NV_DP_HDCP_STREAM_REPEATER_REPEATER (0x00000001) +#define NV_DP_HDCP_STREAM_ENCRYPTION 3:3 +#define NV_DP_HDCP_STREAM_ENCRYPTION_OFF (0x00000000) +#define NV_DP_HDCP_STREAM_ENCRYPTION_ON (0x00000001) +#define NV_DP_HDCP_STREAM_AUTHENTICATION 4:4 +#define NV_DP_HDCP_STREAM_AUTHENTICATION_OFF (0x00000000) +#define NV_DP_HDCP_STREAM_AUTHENTICATION_IP (0x00000000) +#define NV_DP_HDCP_STREAM_AUTHENTICATION_ON (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_LEGACY 8:8 +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_LEGACY_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_LEGACY_YES (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_NON_DP1_2_CP 9:9 +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_NON_DP1_2_CP_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_NON_DP1_2_CP_YES (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_MULTI 10:10 +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_MULTI_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_MULTI_YES (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP1X 11:11 +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP1X_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP1X_YES (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP2X 12:12 +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP2X_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP2X_YES (0x00000001) + +namespace DisplayPort +{ + typedef NakData Message_NakData; + + enum + { + REMOTE_READ_BUFFER_SIZE = 128, + }; + + typedef enum + { + None, + UpstreamSourceOrSSTBranch, + DownstreamBranch, + DownstreamSink, + Dongle + }PeerDevice; + + struct I2cWriteTransaction + { + I2cWriteTransaction(unsigned WriteI2cDeviceId, unsigned NumBytes, + unsigned char * buffer, bool NoStopBit = false, + unsigned I2cTransactionDelay = 0); + I2cWriteTransaction(); + unsigned WriteI2cDeviceId; + unsigned NumBytes; + unsigned char *I2cData; + bool NoStopBit; + unsigned I2cTransactionDelay; + }; + + typedef enum + { + DoesNotExist = 0, + NotActive = 1, + Active = 2, + }StreamState; + + typedef enum + { + CP_IRQ_ON = 0, + No_EVENT = 1 + }StreamEvent; + + typedef enum + { + STREAM_BEHAVIOUR_MASK_OFF = 0, + STREAM_BEHAVIOUR_MASK_ON = 1 + }StreamBehaviorMask; + + typedef enum + { + STREAM_EVENT_MASK_OFF = 0, + STREAM_EVENT_MASK_ON = 1 + }StreamEventMask; + + typedef enum + { + Force_Reauth = 0, + BlockFlow = 1 + }StreamBehavior; + + + typedef enum + { + StreamUnconnected = 0, + NonAuthLegacyDevice = 1, // TV or CRT + DP_MST = 4 + }OutputSinkType; + + typedef enum + { + HDCP1x = 1, + HDCP2x = 2 + }OutputCPType; + + typedef enum + { + SinkEvent0, + SinkEvent255 = 0xFF + }SinkEvent; + + // + // LINK_ADDRESS 0x1 + // + class LinkAddressMessage : public MessageManager::Message + { + public: + struct Result + { + bool isInputPort; + PeerDevice peerDeviceType; + unsigned portNumber; + bool hasMessaging; + bool dpPlugged; + + bool legacyPlugged; + unsigned dpcdRevisionMajor; + unsigned dpcdRevisionMinor; + GUID peerGUID; + unsigned SDPStreams; + unsigned SDPStreamSinks; + }; + + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + GUID guid; // originating branch device + unsigned numberOfPorts; + Result res[16]; + } reply; + + public: + LinkAddressMessage() : Message(NV_DP_SBMSG_REQUEST_ID_LINK_ADDRESS, + NV_DP_SBMSG_PRIORITY_LEVEL_2) + { + dpMemZero(&reply, sizeof(reply)); + } + + // Second stage init kept separate from constructor (reusable message) + void set(const Address & target); + + void getGUID(GUID & guid){guid = reply.guid;} + + // Number of ports described + unsigned resultCount(){return reply.numberOfPorts;} + const Result * result(unsigned index) + { + return &reply.res[index]; + } + }; + + + // + // CONNECTION_STATUS_NOTIFY 0x2 + // + class ConnStatusNotifyMessage : public MessageManager::MessageReceiver + { + public: + typedef struct + { + GUID guid; + unsigned port; + bool legacyPlugged; + bool devicePlugged; + bool messagingCapability; + bool isInputPort; + PeerDevice peerDeviceType; + }Request; + + protected: + Request request; + + public: + Request * getUpRequestData(){ return &request; } + virtual bool processByType(EncodedMessage * message, BitStreamReader * reader); + ConnStatusNotifyMessage(MessageReceiverEventSink * sink); + }; + + // + // GENERIC_UP_REPLY 0xnn + // + class GenericUpReplyMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + virtual void expired(const void * tag) + { } + + public: + GenericUpReplyMessage(const Address & target, unsigned requestId, + bool bReplyIsNack = false, bool bBroadcast = true, + bool bPath = false); + GenericUpReplyMessage(unsigned requestId, bool bReplyIsNack, + bool bBroadcast, bool bPath); + void set(const Address & target, bool bReplyIsNack = false, + bool bBroadcast = true, bool bPath = false); + + }; + + // + // CLEAR_PAYLOAD_ID_TABLE 0x14 + // + class ClearPayloadIdTableMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + virtual ParseResponseStatus parseResponse(EncodedMessage * message); + public: + ClearPayloadIdTableMessage(); + }; + + // + // ENUM_PATH_RESOURCES 0x10 + // + class EnumPathResMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + public: + struct + { + unsigned portNumber; + bool bFECCapability; + unsigned TotalPBN; + unsigned FreePBN; + } reply; + EnumPathResMessage(const Address & target, unsigned port, bool point); + }; + + // + // ALLOCATE_PAYLOAD 0x11 + // + class AllocatePayloadMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + unsigned PBN; + unsigned virtualChannelPayloadId; + }reply; + + public: + + AllocatePayloadMessage() : Message(NV_DP_SBMSG_REQUEST_ID_ALLOCATE_PAYLOAD, + NV_DP_SBMSG_PRIORITY_LEVEL_4) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned port, + unsigned nSDPStreams, + unsigned vcPayloadId, + unsigned PBN, + unsigned* SDPStreamSink, + bool entirePath); + + unsigned replyPortNumber(){return reply.portNumber;} + unsigned replyPBN(){return reply.PBN;} + unsigned replyVirtualChannelPayloadId(){return reply.virtualChannelPayloadId;} + + }; + + // + // QUERY_PAYLOAD 0x12 + // + class QueryPayloadMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + unsigned allocatedPBN; + } reply; + + public: + QueryPayloadMessage(const Address & target, + unsigned port, + unsigned vcPayloadId); + + unsigned replyPortNumber() {return reply.portNumber;} + unsigned replyAllocatedPBN() {return reply.allocatedPBN;} + }; + + // + // RESOURCE_STATUS_NOTIFY 0x13 + // + class ResStatusNotifyMessage : public MessageManager::MessageReceiver + { + virtual bool processByType(EncodedMessage * message, + BitStreamReader * reader); + public: + struct + { + unsigned port; + GUID guid; + unsigned PBN; + } request; + + public: + ResStatusNotifyMessage(MessageReceiverEventSink * sink); + }; + + // + // REMOTE_DPCD_READ 0x20 + // + class RemoteDpcdReadMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + unsigned numBytesReadDPCD; + unsigned char readData[REMOTE_READ_BUFFER_SIZE]; // Buffer + } reply; + + public: + void set(const Address & target, + unsigned port, + unsigned dpcdAddress, + unsigned nBytesToRead); + + RemoteDpcdReadMessage() : Message(NV_DP_SBMSG_REQUEST_ID_REMOTE_DPCD_READ, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + + unsigned replyPortNumber(){return reply.portNumber;} + unsigned replyNumOfBytesReadDPCD(){return reply.numBytesReadDPCD;} + + const NvU8 * replyGetData() + { + return reply.readData; + } + }; + + // + // REMOTE_DPCD_WRITE 0x21 + // + class RemoteDpcdWriteMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + public: + void set(const Address & target, + unsigned port, + unsigned dpcdAddress, + unsigned nBytesToWrite, + const NvU8 * writeData); + + RemoteDpcdWriteMessage() : Message(NV_DP_SBMSG_REQUEST_ID_REMOTE_DPCD_WRITE, + NV_DP_SBMSG_PRIORITY_LEVEL_3) {} + }; + + // + // REMOTE_I2C_READ 0x22 + // + class RemoteI2cReadMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + private: + struct + { + unsigned portNumber; + unsigned numBytesReadI2C; + unsigned char readData[REMOTE_READ_BUFFER_SIZE]; + } reply; + + public: + + RemoteI2cReadMessage() : Message(NV_DP_SBMSG_REQUEST_ID_REMOTE_I2C_READ, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned nWriteTransactions, + unsigned port, + I2cWriteTransaction* transactions, + unsigned readI2cDeviceId, + unsigned nBytesToRead); + + unsigned replyPortNumber(){return reply.portNumber;} + unsigned replyNumOfBytesReadI2C(){return reply.numBytesReadI2C;} + unsigned char* replyGetI2CData(unsigned* numBytes) + { + *numBytes = this->replyNumOfBytesReadI2C(); + return reply.readData; + } + }; + + // + // REMOTE_I2C_WRITE 0x23 + // + class RemoteI2cWriteMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + private: + struct + { + unsigned portNumber; + } reply; + + public: + + RemoteI2cWriteMessage() : Message(NV_DP_SBMSG_REQUEST_ID_REMOTE_I2C_WRITE, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned port, + unsigned writeI2cDeviceId, + unsigned nBytesToWrite, + unsigned char* writeData); + + unsigned replyPortNumber() {return reply.portNumber;} + }; + + // + // POWER_UP_PHY 0x24 + // + class PowerUpPhyMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + } reply; + + public: + PowerUpPhyMessage() : Message(NV_DP_SBMSG_REQUEST_ID_POWER_UP_PHY, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned port, + bool entirePath); + + unsigned replyPortNumber(){return reply.portNumber;} + }; + + // + // POWER_DOWN_PHY 0x25 + // + class PowerDownPhyMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + } reply; + + public: + PowerDownPhyMessage() : Message(NV_DP_SBMSG_REQUEST_ID_POWER_DOWN_PHY, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned port, + bool entirePath); + + unsigned replyPortNumber(){return reply.portNumber;} + }; + + // + // SINK_EVENT_NOTIFY 0x30 + // + class SinkEventNotifyMessage : public MessageManager::MessageReceiver + { + virtual bool processByType(EncodedMessage * message, BitStreamReader * reader); + + public: + SinkEventNotifyMessage(MessageReceiverEventSink * sink, unsigned requestId); + }; + +} + +#endif //INCLUDED_DP_MESSAGECODINGS_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messageheader.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messageheader.h new file mode 100644 index 0000000..3d09e6d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messageheader.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_messageheader.h * +* DP message header parser * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_MESSAGEHEADER_H +#define INCLUDED_DP_MESSAGEHEADER_H + +#include "dp_internal.h" +#include "dp_list.h" +#include "dp_auxretry.h" +#include "dp_timer.h" +#include "dp_bitstream.h" +#include "dp_address.h" + +namespace DisplayPort +{ + // + // User filled message structure + // + #define MAX_MESSAGE_SIZE 64 + struct EncodedMessage : public Object + { + unsigned messageNumber; // 0 or 1 + Address address; // target device for message (source for reply) + Buffer buffer; + bool isBroadcast; + bool isPathMessage; + + EncodedMessage() + : messageNumber(0), isBroadcast(false), isPathMessage(false) + {} + + void swap(EncodedMessage & other) + { + swap_args(messageNumber, other.messageNumber); + swap_args(address, other.address); + swap_args(isBroadcast, other.isBroadcast); + swap_args(isPathMessage, other.isPathMessage); + buffer.swap(other.buffer); + } + }; + + // + // Decoded message header + // + struct MessageHeader + { + Address address; + unsigned messageNumber; + unsigned payloadBytes; + bool isBroadcast; + bool isPathMessage; + bool isTransactionStart; + bool isTransactionEnd; + unsigned headerSizeBits; + }; + + bool decodeHeader(BitStreamReader * reader, MessageHeader * header, const Address & address); + + // + // Routines for maintaining a list of partially complete messages + // + + // after 4 secs delete dead transactions + #define DP_INCOMPLETE_MESSAGE_TIMEOUT_USEC 4000000 + +} +#endif //INCLUDED_DP_MESSAGEHEADER_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messages.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messages.h new file mode 100644 index 0000000..496b18a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messages.h @@ -0,0 +1,324 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_messages.h * +* Encoding routines for aux common messages. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_MESSAGES_H +#define INCLUDED_DP_MESSAGES_H + +#include "dp_address.h" +#include "dp_bitstream.h" +#include "dp_splitter.h" +#include "dp_merger.h" +#include "dp_crc.h" +#include "dp_list.h" +#include "dp_connector.h" +#include "dp_messageheader.h" +#include "dp_auxdefs.h" + +namespace DisplayPort +{ + bool extractGUID(BitStreamReader * reader, GUID * guid); + + typedef enum + { + NakUndefined, + NakWriteFailure, + NakInvalidRAD, + NakCrcFailure, + NakBadParam, + NakDefer, + NakLinkFailure, + NakNoResources, + NakDpcdFail, + NakI2cNak, + NakAllocateFail, + + // Extensions + NakTimeout = 0x100 // Message was unable to be transmitted + + } NakReason; + + typedef struct + { + GUID guid; + NakReason reason; + unsigned nak_data; + } NakData; + + typedef enum + { + ParseResponseSuccess, + ParseResponseFailed, + ParseResponseWrong + } ParseResponseStatus; + + // + // Priority levels are defined to prioritize SBMs for DP1.4 (Highest Priority - LEVEL1, Lowest Priority - DEFAULT) + // Current implementation has the following priority levels + // CLEAR_PAYLOAD_ID_TABLE = NV_DP_SBMSG_PRIORITY_LEVEL_1 + // LINK_ADDRESS = NV_DP_SBMSG_PRIORITY_LEVEL_2 + // REMOTE_DPCD_READ, REMOTE_DPCD_WRITE = NV_DP_SBMSG_PRIORITY_LEVEL_3 + // REMOTE_I2C_READ, REMOTE_I2C_WRITE = NV_DP_SBMSG_PRIORITY_LEVEL_3 + // POWER_UP_PHY, POWER_DOWN_PHY = NV_DP_SBMSG_PRIORITY_LEVEL_3 + // ENUM_PATH_RESOURCES, ALLOCATE_PAYLOAD = NV_DP_SBMSG_PRIORITY_LEVEL_4 + // All other messages = NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT + // + // However, Message::setMessagePriority can be used to override this priority levels, if required. + // + typedef enum + { + NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT, + NV_DP_SBMSG_PRIORITY_LEVEL_4, + NV_DP_SBMSG_PRIORITY_LEVEL_3, + NV_DP_SBMSG_PRIORITY_LEVEL_2, + NV_DP_SBMSG_PRIORITY_LEVEL_1, + } DPSideBandMessagePriority; + + // + // CLASS: MessageManager + // + class MessageManager : + virtual public Object, + IncomingTransactionManager::IncomingTransactionManagerEventSink + { + + Timer * timer; + DPCDHAL * hal; + DownRequestManager splitterDownRequest; + UpReplyManager splitterUpReply; + UpRequestManager mergerUpRequest; + DownReplyManager mergerDownReply; + bool isBeingDestroyed; + bool isPaused; + + List messageReceivers; + List notYetSentDownRequest; // Down Messages yet to be processed + List notYetSentUpReply; // Up Reply Messages yet to be processed + List awaitingReplyDownRequest; // Transmitted, Split, but not yet replied to + + void onUpRequestReceived(bool status, EncodedMessage * message); + void onDownReplyReceived(bool status, EncodedMessage * message); + void transmitAwaitingDownRequests(); + void transmitAwaitingUpReplies(); + + // IncomingTransactionManager + void messagedReceived(IncomingTransactionManager * from, EncodedMessage * message); + + public: + class Message; + void cancelAllByType(unsigned type); + void cancelAll(Message * message); + + void pause() + { + isPaused = true; + } + + void clearPendingMsg() + { + hal->clearPendingMsg(); + } + void IRQUpReqest() + { + mergerUpRequest.mailboxInterrupt(); + } + + void IRQDownReply() + { + mergerDownReply.mailboxInterrupt(); + } + + MessageManager(DPCDHAL * hal, Timer * timer) + : timer(timer), hal(hal), + splitterDownRequest(hal, timer), + splitterUpReply(hal, timer), + mergerUpRequest(hal, timer, Address(0), this), + mergerDownReply(hal, timer, Address(0), this), + isBeingDestroyed(false) + { + } + + // + // CLASS: MessageReceiver + // + class MessageReceiver : public ListElement, OutgoingTransactionManager::OutgoingTransactionManagerEventSink + { + public: + class MessageReceiverEventSink + { + public: + virtual void messageProcessed(MessageReceiver * from) = 0; + }; + + // Returns false if the message should be passed to the next receiver + virtual bool process(EncodedMessage * message); + + // per message type should implement this + virtual bool processByType(EncodedMessage * message, BitStreamReader * reader) = 0; + + unsigned getRequestId() {return requestId;} + Address & getAddress() {return address;} + + MessageReceiver(MessageReceiverEventSink* sink, unsigned requestId) + : sink(sink), + requestId(requestId), + bProcessed(true), + address(0) // 0 to start with + {} + + virtual void splitterFailed(OutgoingTransactionManager * from) + { + DP_ASSERT(0 && "why did we send a reply"); + } + + virtual void splitterTransmitted(OutgoingTransactionManager * from) + { + DP_ASSERT(0 && "why did we send a reply"); + } + + protected: + MessageReceiverEventSink * sink; + unsigned requestId; + bool bProcessed; + Address address; + MessageManager * parent; + + }; + + // + // CLASS: Message + // + class Message : public ListElement, + OutgoingTransactionManager::OutgoingTransactionManagerEventSink, + Timer::TimerCallback /* countdown timer for reply */ + { + public: + class MessageEventSink + { + public: + virtual void messageFailed(Message * from, NakData * nakData) = 0; + virtual void messageCompleted(Message * from) = 0; + }; + unsigned getMsgType() {return requestIdentifier;} + unsigned getSinkPort() {return sinkPort;} + protected: + // Encoded message body (set in dp_messagecodings) + // this data structure is invalidated on post + // as the data gets swapped into the transmit buffer. + EncodedMessage encodedMessage; + MessageEventSink * sink; + + MessageManager * parent; + bool transmitReply; + bool bTransmitted; + bool bBusyWaiting; + unsigned requestIdentifier; + unsigned messagePriority; + unsigned sinkPort; + + // State updated by post operation + struct { + unsigned messageNumber; + Address target; + } state; + + virtual ParseResponseStatus parseResponseAck( + EncodedMessage * message, BitStreamReader * reader) = 0; + virtual ParseResponseStatus parseResponse(EncodedMessage * message); + virtual void splitterFailed(OutgoingTransactionManager * from); + virtual void expired(const void * tag); + virtual void splitterTransmitted(OutgoingTransactionManager * from); + + public: + friend class MessageManager; + + Message(int requestIdentifier, int messagePriority) + : sink(0), + parent(0), + transmitReply(false), + bTransmitted(false), + bBusyWaiting(false), + requestIdentifier(requestIdentifier), + messagePriority(messagePriority), + sinkPort(0xFF) + { + } + + void clear() + { + if (parent) { + parent->timer->cancelCallbacks(this); + parent->splitterDownRequest.cancel(this); + } + + parent = 0; + List::remove(this); + encodedMessage.buffer.reset(); + } + + // This function can be used to override the already set priority of the message from it's constructor. + void setMessagePriority(DPSideBandMessagePriority priorityLevel) + { + this->messagePriority = priorityLevel; + return; + } + + protected: + ~Message() + { + clear(); + } + }; + + // + // Register new receiver for unpair messages + // (eg. broadcast messages or sink->source messages) + // + void registerReceiver(MessageReceiver * receiver); + + // Post a message to be asynchronously transmitted + void post(Message * message, Message::MessageEventSink * sink, bool isReply = false); + void postReply(Message * message, Message::MessageEventSink * sink); + void cancel(Message * message); + + bool send(Message * message, NakData & nakData); + friend class Message; + ~MessageManager(); + }; + struct GenericMessageCompletion : public MessageManager::Message::MessageEventSink + { + bool failed; + bool completed; + NakData nakData; + GenericMessageCompletion(); + void messageFailed(MessageManager::Message * from, NakData * data); + void messageCompleted(MessageManager::Message * from); + }; +} + +#endif //INCLUDED_DP_MESSAGES_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_object.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_object.h new file mode 100644 index 0000000..9bb02e8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_object.h @@ -0,0 +1,132 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_object.h * +* This is the object from which all other dynamically-allocated objects * +* must inherit. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_OBJECT_H +#define INCLUDED_DP_OBJECT_H + +#include "nvtypes.h" +#include "stddef.h" +#include "dp_hostimp.h" + +static inline void dpMemCopy(void * target, const void * source, size_t len) +{ + NvU8 * t = (NvU8 *)target; + const NvU8 * s = (const NvU8 *)source; + + while (len--) + *t++=*s++; +} + +static inline void dpMemZero(void * target, size_t len) +{ + NvU8 * t = (NvU8 *)target; + + while (len--) + *t++=0; +} + +static inline bool dpMemCmp(void *pvBuf1, void *pvBuf2, size_t size) +{ + NvU8 *pBuf1 = (NvU8 *)pvBuf1; + NvU8 *pBuf2 = (NvU8 *)pvBuf2; + + if(!pBuf1 || !pBuf2 || !size) + return false; + + do + { + if(*pBuf1++ == *pBuf2++) + continue; + else + break; + }while(--size); + + if(!size) + return true; + else + return false; +} + +namespace DisplayPort +{ + // + // Any object allocated through "new" must virtually inherit from this type. + // This guarantees that the memory allocation goes through dpMalloc/dpFree. + // Leak detection is implemented only on allocations of this type. Data + // structures may assume 0 initialization if allocated off the heap. + // + // You must use virtual inheritance because objects that inherit from + // multiple Object-derived classes would otherwise cause ambiguity when + // someone tries to use new or delete on them. + // + struct Object + { + virtual ~Object() {} + + void *operator new(size_t sz) + { + void * block = dpMalloc(sz); + if (block) + { + dpMemZero(block, sz); + } + return block; + } + + void *operator new[](size_t sz) + { + void * block = dpMalloc(sz); + if (block) + { + dpMemZero(block, sz); + } + return block; + } + + void operator delete(void * ptr) + { + if (ptr) + { + dpFree(ptr); + } + } + + void operator delete[](void * ptr) + { + if (ptr) + { + dpFree(ptr); + } + } + }; +} + +#endif // INCLUDED_DP_OBJECT_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_regkeydatabase.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_regkeydatabase.h new file mode 100644 index 0000000..6e49227 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_regkeydatabase.h @@ -0,0 +1,102 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_regkeydatabase.h * +* Definition of the DP_REGKEY_DATABASE * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_REGKEYDATABASE_H +#define INCLUDED_DP_REGKEYDATABASE_H + +#include "dp_auxdefs.h" + +// Regkey Names +#define NV_DP_REGKEY_ENABLE_AUDIO_BEYOND_48K "ENABLE_AUDIO_BEYOND48K" +#define NV_DP_REGKEY_OVERRIDE_DPCD_REV "OVERRIDE_DPCD_REV" +#define NV_DP_REGKEY_DISABLE_SSC "DISABLE_SSC" +#define NV_DP_REGKEY_ENABLE_FAST_LINK_TRAINING "ENABLE_FAST_LINK_TRAINING" +#define NV_DP_REGKEY_DISABLE_MST "DISABLE_MST" +#define NV_DP_REGKEY_ENABLE_INBAND_STEREO_SIGNALING "ENABLE_INBAND_STEREO_SIGNALING" +#define NV_DP_REGKEY_SKIP_POWEROFF_EDP_IN_HEAD_DETACH "SKIP_POWEROFF_EDP_IN_HEAD_DETACH" +#define NV_DP_REGKEY_ENABLE_OCA_LOGGING "ENABLE_OCA_LOGGING" +#define NV_DP_REGKEY_REPORT_DEVICE_LOST_BEFORE_NEW "HP_WAR_1707690" +#define NV_DP_REGKEY_APPLY_LINK_BW_OVERRIDE_WAR "APPLY_LINK_BW_OVERRIDE_WAR" +#define NV_DP_REGKEY_APPLY_MAX_LINK_RATE_OVERRIDES "APPLY_OVERRIDES_FOR_BUG_2489143" +#define NV_DP_REGKEY_DISABLE_DSC "DISABLE_DSC" +#define NV_DP_REGKEY_SKIP_ASSESSLINK_FOR_EDP "HP_WAR_2189772" +#define NV_DP_REGKEY_HDCP_AUTH_ONLY_ON_DEMAND "DP_HDCP_AUTH_ONLY_ON_DEMAND" +#define NV_DP_REGKEY_ENABLE_MSA_OVER_MST "ENABLE_MSA_OVER_MST" + +// Keep link alive for SST and MST +#define NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE "DP_KEEP_OPT_LINK_ALIVE" +// Keep link alive when connector is in MST +#define NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_MST "DP_KEEP_OPT_LINK_ALIVE_MST" +// Keep link alive when connector is in SST +#define NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_SST "DP_KEEP_OPT_LINK_ALIVE_SST" + +#define NV_DP_REGKEY_FORCE_EDP_ILR "DP_BYPASS_EDP_ILR_REV_CHECK" + +// +// DSC capability of downstream device should be decided based on device's own +// and its parent's DSC capability. +// +#define NV_DP_DSC_MST_CAP_BUG_3143315 "DP_DSC_MST_CAP_BUG_3143315" + +// +// Data Base used to store all the regkey values. +// The actual data base is declared statically in dp_evoadapter.cpp. +// All entries set to 0 before initialized by the first EvoMainLink constructor. +// The first EvoMainLink constructor will populate that data base. +// Later EvoMainLink will use values from that data base. +// +struct DP_REGKEY_DATABASE +{ + bool bInitialized; // set to true after the first EvoMainLink instance is constructed + // Below are regkey values + bool bAudioBeyond48kEnabled; + NvU32 dpcdRevOveride; + bool bSscDisabled; + bool bFastLinkTrainingEnabled; + bool bMstDisabled; + bool bInbandStereoSignalingEnabled; + bool bPoweroffEdpInHeadDetachSkipped; + bool bOcaLoggingEnabled; + bool bReportDeviceLostBeforeNew; + bool bLinkBwOverrideWarApplied; + NvU32 applyMaxLinkRateOverrides; + bool bDscDisabled; + bool bAssesslinkForEdpSkipped; + bool bHdcpAuthOnlyOnDemand; + bool bMsaOverMstEnabled; + bool bOptLinkKeptAlive; + bool bOptLinkKeptAliveMst; + bool bOptLinkKeptAliveSst; + bool bBypassEDPRevCheck; + bool bDscMstCapBug3143315; +}; + +#endif //INCLUDED_DP_REGKEYDATABASE_H + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_ringbuffer.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_ringbuffer.h new file mode 100644 index 0000000..67fa9e0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_ringbuffer.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include "dp_object.h" + +#define addToRingBufferCollection(x) {} +#define addDpLogRecord(x, ...) {} +#define addDpAssertRecord() {} +#define queryDpLogRecords(a, b, c) {} +#define resetDpAssertRingBuffer() {} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_splitter.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_splitter.h new file mode 100644 index 0000000..827ae1b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_splitter.h @@ -0,0 +1,156 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_splitter.h * +* Asynchronous Message splitter * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_SPLITTER_H +#define INCLUDED_DP_SPLITTER_H + +#include "dp_list.h" +#include "dp_auxretry.h" +#include "dp_timer.h" +#include "dp_auxdefs.h" +#include "dp_messageheader.h" + +namespace DisplayPort +{ + + struct EncodedMessage; + class DPCDHAL; + + class MessageTransactionSplitter + { + EncodedMessage * messageOutstanding; // If set we've pulled an item out of the downQueue queue. + // One or more transactions have been sent as a result + // messageOutstanding->messageOffset show how far into + // the message we are. + unsigned assemblyTransmitted; + public: + void set(EncodedMessage * messageOutstanding) + { + this->messageOutstanding = messageOutstanding; + assemblyTransmitted = 0; + } + + // + // Encode the next transaction. + // returns false if there are no more transactions + // + bool get(Buffer & assemblyBuffer); + + MessageTransactionSplitter() + {} + }; + + class OutgoingTransactionManager: + virtual public Object, + private Timer::TimerCallback + { + public: + class OutgoingTransactionManagerEventSink + { + public: + virtual void splitterFailed(OutgoingTransactionManager * from) = 0; // Sink DEFER the writes + virtual void splitterTransmitted(OutgoingTransactionManager * from) = 0; // message was sent (may NACK later) + }; + + // Send the encoded message. This call is destructive to the EncodedMessage + // passed in + bool send( EncodedMessage & payload, OutgoingTransactionManagerEventSink * sink); + + OutgoingTransactionManager(Timer * timer); + virtual ~OutgoingTransactionManager() { timer->cancelCallbacks(this); } + + // Do not make any calls to the event sink + void cancel(OutgoingTransactionManagerEventSink * sink); + + protected: + virtual AuxRetry::status writeMessageBox(NvU8 * data, size_t length) = 0; + virtual size_t getMessageBoxSize() = 0; + private: + void writeToWindow( bool firstAttempt); + void split(); + void expired(const void * tag); // timer callback + + unsigned retriesLeft; + + Buffer assemblyBuffer; + MessageTransactionSplitter transactionSplitter; + + // + // List of outgoing messages + // + struct OutgoingMessage : ListElement + { + OutgoingTransactionManagerEventSink* eventSink; + EncodedMessage message; + }; + + List queuedMessages; + + // + // Message currently assembled in transactionSplitter + // (if any) + // + OutgoingMessage * activeMessage; + Timer * timer; + }; + + + class DownRequestManager : public OutgoingTransactionManager + { + public: + DownRequestManager(DPCDHAL * hal, Timer * timer) + : OutgoingTransactionManager(timer), hal(hal) + { + } + + virtual ~DownRequestManager() {} + protected: + DPCDHAL * hal; + + virtual AuxRetry::status writeMessageBox(NvU8 * data, size_t length); + virtual size_t getMessageBoxSize(); + }; + + class UpReplyManager : public OutgoingTransactionManager + { + public: + UpReplyManager(DPCDHAL * hal, Timer * timer) + : OutgoingTransactionManager(timer), hal(hal) + { + } + virtual ~UpReplyManager() {} + protected: + DPCDHAL * hal; + + virtual AuxRetry::status writeMessageBox(NvU8 * data, size_t length); + virtual size_t getMessageBoxSize(); + }; +} + +#endif //INCLUDED_DP_SPLITTER_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timeout.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timeout.h new file mode 100644 index 0000000..35f07ab --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timeout.h @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_timeout.h * +* Local timeout management * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_TIMEOUT_H +#define INCLUDED_DP_TIMEOUT_H + +#include "dp_timer.h" + +namespace DisplayPort +{ + // + // Timeout management + // + class Timeout : virtual public Object + { + Timer * timer; + NvU64 timeoutTime; // What time to trigger the timeout at + + public: + + Timeout(Timer * _timer, int timeoutMilliseconds) + : timer(_timer), timeoutTime(_timer->getTimeUs() + timeoutMilliseconds*1000 + 1 /* counter could be about to roll */) + { + } + + NvS64 remainingUs() + { + NvS64 remaining = (NvS64)(timeoutTime - timer->getTimeUs()); + + // Rollover check + if (remaining < 0) + { + remaining = 0; + } + + DP_ASSERT(remaining < ((NvS64)1000000*3600) && "Timeout remaining over an hour"); + + return remaining; + } + + bool valid() + { + return remainingUs() > 0; + } + }; +} + +#endif //INCLUDED_DP_TIMEOUT_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timer.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timer.h new file mode 100644 index 0000000..bf8c3f6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timer.h @@ -0,0 +1,104 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_timer.h * +* Local timer interface * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_TIMER_H +#define INCLUDED_DP_TIMER_H + +#include "dp_list.h" + +namespace DisplayPort +{ + // + // RawTimer + // This API is expected to be implemented by the + // library client. + // + class RawTimer : virtual public Object + { + public: + struct Callback : virtual public Object + { + virtual void expired() = 0; + }; + virtual void queueCallback(Callback * callback, int milliseconds) = 0; + virtual NvU64 getTimeUs() = 0; + virtual void sleep(int milliseconds) = 0; + }; + + + // + // Timer + // + class Timer : public RawTimer::Callback + { + public: + struct TimerCallback + { + virtual void expired(const void * context) = 0; + }; + + private: + RawTimer * raw; + NvU64 nextTimestamp; + List pending; + struct PendingCallback : ListElement + { + TimerCallback * target; + const void * context; + NvU64 timestamp; // in usec + bool executeInSleep; + + }; + + virtual void expired(); + unsigned fire(bool fromSleep); + + void _pump(unsigned milliseconds, bool fromSleep); + public: + Timer(RawTimer * raw) : raw(raw) {} + virtual ~Timer() {} + + // + // Queue a timer callback. + // Unless the dont-execute-in-sleep flag is + // + void queueCallback(Timer::TimerCallback * target, const void * context, unsigned milliseconds, bool executeInSleep = true); + NvU64 getTimeUs(); + void sleep(unsigned milliseconds); + void cancelCallbacks(Timer::TimerCallback * to); + + void cancelCallback(Timer::TimerCallback * to, const void * context); + void queueCallbackInOrder(Timer::TimerCallback * target, const void * context, unsigned milliseconds, bool executeInSleep); + void cancelCallbacksWithoutContext(const void * context); + void cancelAllCallbacks(); + bool checkCallbacksOfSameContext(const void * context); + }; +} + +#endif //INCLUDED_DP_TIMER_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_tracing.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_tracing.h new file mode 100644 index 0000000..993320f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_tracing.h @@ -0,0 +1,128 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + /******************************* DisplayPort ******************************\ +* * +* Module: dp_tracing.h * +* Header file for support of tracing, implemented by a host provider * +* Because this is platform-agnostic, the tracing API * +* is left up to the host interface. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_TRACING_H +#define INCLUDED_DP_TRACING_H + +#include "nvtypes.h" + +typedef enum NV_DP_TRACING_EVENT +{ + TRACE_DP_ID_HOTPLUG, + TRACE_DP_ID_NEW_SINK_DETECTED, + TRACE_DP_ID_NEW_SINK_REPORTED, + TRACE_DP_ID_NEW_MST_DEVICE, + TRACE_DP_ID_LOST_DEVICE, + TRACE_DP_ID_LINK_ASSESSMENT, + TRACE_DP_ID_LINK_TRAINING_START, + TRACE_DP_ID_LINK_TRAINING_DONE, + TRACE_DP_ID_NOTIFY_ATTACH_BEGIN, + TRACE_DP_ID_NOTIFY_ATTACH_BEGIN_STATUS, + TRACE_DP_ID_NOTIFY_ATTACH_END, + TRACE_DP_ID_NOTIFY_DETACH_BEGIN, + TRACE_DP_ID_NOTIFY_DETACH_END, + TRACE_DP_ID_MESSAGE_EXPIRED +} NV_DP_TRACING_EVENT; + +typedef enum NV_DP_TRACING_PRIORITY +{ + TRACE_DP_PRIORITY_ERROR, + TRACE_DP_PRIORITY_WARNING, + TRACE_DP_PRIORITY_INFO +} NV_DP_TRACING_PRIORITY; + +#define NV_DPTRACE_MAX_PARAMS 8 + +#define _NV_DPTRACE_EXPAND_HELPER(x) x +#define _NV_DPTRACE_EXPAND(x) _NV_DPTRACE_EXPAND_HELPER(x) + +// +// _COUNT_ARGS: Counts the size of an argument list. +// +// For example, if the argument list is two-arguments "A, B", then call it like this: +// _COUNT_ARGS(_placeholder, A, B, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +// +// which maps to the argument names like this: +// _COUNT_ARGS(_0=_placeholder, _1=A, _2=B, _3=9, _4=8, _5=7, _6=6, _7=5, _8=4,, _9=3, _10=2, ...) +// +// and thus _COUNT_ARGS will return 2, the correct size of the argument list. +// +#define _NV_DPTRACE_COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, ...) _10 + +#define NV_DPTRACE_ERROR(...) NV_DPTRACE_EVENT(TRACE_DP_PRIORITY_ERROR, __VA_ARGS__) +#define NV_DPTRACE_WARNING(...) NV_DPTRACE_EVENT(TRACE_DP_PRIORITY_WARNING, __VA_ARGS__) +#define NV_DPTRACE_INFO(...) NV_DPTRACE_EVENT(TRACE_DP_PRIORITY_INFO, __VA_ARGS__) + +// +// When ##__VA_ARGS__ is used, it will delete a preceding comma (',') when +// __VA_ARGS__ is blank (i.e. zero-length argument list). This allows +// the zero-argument case to work without resulting in a syntax error. +// +// We have a placeholder argument as the first parameter to _COUNT_ARGS +// so that we can take advantage of this comma-deleting behavior. +// +// However, there shouldn't be a zero-arg case as of now, because the first arg is the event. +// +#define NV_DPTRACE_EVENT(priority, ...) \ + _NV_DPTRACE_SEND(priority, _NV_DPTRACE_EXPAND(_NV_DPTRACE_COUNT_ARGS(_0, ##__VA_ARGS__, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)), __VA_ARGS__) + +#define _NV_DPTRACE_SEND(priority, argc, ...) _NV_DPTRACE_EXPAND(_NV_DPTRACE_SEND_N(priority, argc, __VA_ARGS__)) +#define _NV_DPTRACE_SEND_N(priority, argc, ...) _NV_DPTRACE_EXPAND(_NV_DPTRACE_##argc(priority, __VA_ARGS__)) + +// The first argument is the event - macro number is one higher than num args passed to dpTraceEvent +#define _NV_DPTRACE_1(priority, event) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 0); + +#define _NV_DPTRACE_2(priority, event, p1) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 1, p1); + +#define _NV_DPTRACE_3(priority, event, p1, p2) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 2, p1, p2); + +#define _NV_DPTRACE_4(priority, event, p1, p2, p3) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 3, p1, p2, p3); + +#define _NV_DPTRACE_5(priority, event, p1, p2, p3, p4) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 4, p1, p2, p3, p4); + +#define _NV_DPTRACE_6(priority, event, p1, p2, p3, p4, p5) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 5, p1, p2, p3, p4, p5); + +#define _NV_DPTRACE_7(priority, event, p1, p2, p3, p4, p5, p6) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 6, p1, p2, p3, p4, p5, p6); + +#define _NV_DPTRACE_8(priority, event, p1, p2, p3, p4, p5, p6, p7) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 7, p1, p2, p3, p4, p5, p6, p7); + +#define _NV_DPTRACE_9(priority, event, p1, p2, p3, p4, p5, p6, p7, p8) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 8, p1, p2, p3, p4, p5, p6, p7, p8); + +#endif // INCLUDED_DP_TRACING_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_vrr.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_vrr.h new file mode 100644 index 0000000..4fa73aa --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_vrr.h @@ -0,0 +1,95 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_vrr.h * +* Prototypes and definitions related to VRR enablement * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_VRR_H +#define INCLUDED_DP_VRR_H + +#include "dp_object.h" + +// Worstcase VRR enablement handshake timeout of 600ms (40x15ms) +#define VRR_ENABLE_STATUS_TIMEOUT_THRESHOLD 40 +#define VRR_ENABLE_STATUS_TIMEOUT_INTERVAL_MS 15 + +// Retry enablement threshold in notifyShortPulse() +#define VRR_MAX_RETRIES 3 + +namespace DisplayPort +{ + enum VrrEnableStage + { + VRR_ENABLE_STAGE_MONITOR_ENABLE_BEGIN, + VRR_ENABLE_STAGE_MONITOR_ENABLE_CHALLENGE, + VRR_ENABLE_STAGE_MONITOR_ENABLE_CHECK, + VRR_ENABLE_STAGE_DRIVER_ENABLE_BEGIN, + VRR_ENABLE_STAGE_DRIVER_ENABLE_CHALLENGE, + VRR_ENABLE_STAGE_DRIVER_ENABLE_CHECK, + VRR_ENABLE_STAGE_RESET_MONITOR, + VRR_ENABLE_STAGE_INIT_PUBLIC_INFO, + VRR_ENABLE_STAGE_GET_PUBLIC_INFO, + VRR_ENABLE_STAGE_STATUS_CHECK, + }; + + struct DeviceImpl; + + class VrrEnablement : virtual public Object + { + private: + DeviceImpl *parent; + bool bMonitorEnabled; + + bool vrrGetPublicInfo(void); + bool vrrWaitOnEnableStatus(void); + bool vrrEnableMonitor(void); + bool vrrEnableDriver(void); + + public: + + VrrEnablement(DeviceImpl *parent) + : parent(parent) + { + reset(); + } + + ~VrrEnablement() + { + parent = NULL; + reset(); + } + + bool start(void); + void reset(void) + { + bMonitorEnabled = false; + } + bool isMonitorEnabled(void); + bool isDriverEnabled(void); + }; +} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_wardatabase.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_wardatabase.h new file mode 100644 index 0000000..9b54f95 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_wardatabase.h @@ -0,0 +1,75 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_wardatabase.h * +* EDID and OUI based workarounds for panel/TCON issues * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_WARDATABASE_H +#define INCLUDED_DP_WARDATABASE_H + +#include "dp_object.h" + +namespace DisplayPort +{ + #define WAR_MAX_REASSESS_ATTEMPT 3 + #define WAR_MAX_RETRAIN_ATTEMPT 3 + + typedef enum + { + DP_MONITOR_CAPABILITY_DP_SKIP_REDUNDANT_LT = (1 << 0), // Do not train if the link B/W and lane count are already set to the desired quantities + DP_MONITOR_CAPABILITY_DP_SKIP_CABLE_BW_CHECK = (1 << 1), // Skip the link training attempts to test cable bandwidth in CheckDpLink + DP_MONITOR_CAPABILITY_DP_MULTI_WRITE_DPCD_0x600 = (1 << 2), // Repeatedly write 0x1 to 0x600 with extra delays until the read verifies the write + DP_MONITOR_CAPABILITY_DP_WRITE_0x600_BEFORE_LT = (1 << 3), // Power on a monitor before every link training + DP_MONITOR_CAPABILITY_DP_OVERRIDE_OPTIMAL_LINK_CONFIG = (1 << 4), // Override optimal link config + DP_MONITOR_CAPABILITY_DP_OVERRIDE_MAX_LANE_COUNT = (1 << 5), // WAR for some DP monitors which claims more lane count than it really supports. It may generate interrupt storm if unsupported lane count is applied + DP_MONITOR_CAPABILITY_DP_AVOID_UPDATE_POWER_STATE = (1 << 6), // Don't update panel power state when head detach or lid closed + } DP_MONITOR_CAPABILITY; + + struct DpMonitorDenylistData: virtual public Object + { + // Max lane count supported override value + unsigned int dpMaxLaneCountOverride; + + // Link rate and Lane count value overrides + // when we need to skip BW check + struct + { + unsigned int maxLaneAtHighRate; + unsigned int maxLaneAtLowRate; + } dpSkipCheckLink; + + // Link rate and Lane count value overrides + // when we need to force optimal link config + struct + { + unsigned int linkRate; + unsigned int laneCount; + } dpOverrideOptimalLinkConfig; + }; +} + +#endif // INCLUDED_DP_WARDATABASE_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_watermark.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_watermark.h new file mode 100644 index 0000000..b9f05d0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_watermark.h @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_watermark.h * +* DP watermark IsModePossible calculations. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_WATERMARK_H +#define INCLUDED_DP_WATERMARK_H + +#include "displayport.h" + +#define WAR_AUDIOCLAMPING_FREQ 48000 // Audio freq. more than 48KHz are currently clamped due to bug 925211 + +namespace DisplayPort +{ + class LinkConfiguration; + + struct ModesetInfo + { + unsigned twoChannelAudioHz; // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz; // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz; // Requested pixel clock for the mode + unsigned rasterWidth; + unsigned rasterHeight; + unsigned surfaceWidth; // RasterBlankStartX - newRasterBlankEndX + unsigned surfaceHeight; // Active region height + unsigned depth; + unsigned rasterBlankStartX; + unsigned rasterBlankEndX; + unsigned bitsPerComponent; // Bits per component + bool bEnableDsc; // bEnableDsc=1 indicates DSC would be enabled for the mode + DSC_MODE mode; // DSC Mode + + ModesetInfo(): twoChannelAudioHz(0), + eightChannelAudioHz(0), + pixelClockHz(0), + rasterWidth(0), + rasterHeight(0), + surfaceWidth(0), + surfaceHeight(0), + depth(0), + rasterBlankStartX(0), + rasterBlankEndX(0), + bitsPerComponent(0), + bEnableDsc(false), + mode(DSC_SINGLE) {} + + ModesetInfo(unsigned newTwoChannelAudioHz, unsigned newEightChannelAudioHz, NvU64 newPixelClockHz, + unsigned newRasterWidth, unsigned newRasterHeight, + unsigned newSurfaceWidth, unsigned newSurfaceHeight, unsigned newDepth, + unsigned newRasterBlankStartX=0, unsigned newRasterBlankEndX=0, bool newBEnableDsc = false, + DSC_MODE newMode = DSC_SINGLE): + twoChannelAudioHz(newTwoChannelAudioHz), + eightChannelAudioHz(newEightChannelAudioHz), + pixelClockHz(newPixelClockHz), + rasterWidth(newRasterWidth), + rasterHeight(newRasterHeight), + surfaceWidth(newSurfaceWidth), + surfaceHeight(newSurfaceHeight), + depth(newDepth), + rasterBlankStartX(newRasterBlankStartX), + rasterBlankEndX(newRasterBlankEndX), + bitsPerComponent(0), + bEnableDsc(newBEnableDsc), + mode(newMode){} + }; + + struct Watermark + { + unsigned waterMark; + unsigned tuSize; + unsigned hBlankSym; + unsigned vBlankSym; + }; + + bool isModePossibleSST + ( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo, + bool bUseIncreasedWatermarkLimits = false + ); + + bool isModePossibleMST + ( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo + ); + + bool isModePossibleSSTWithFEC + ( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo, + bool bUseIncreasedWatermarkLimits = false + ); + + bool isModePossibleMSTWithFEC + ( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo + ); + + // Return Payload Bandwidth Number(PBN)for requested mode + unsigned pbnForMode(const ModesetInfo & modesetInfo); +} + +#endif //INCLUDED_DP_WATERMARK_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dptestutil/dp_testmessage.h b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dptestutil/dp_testmessage.h new file mode 100644 index 0000000..b841dc0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dptestutil/dp_testmessage.h @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort*********************************\ +* * +* Module: dp_testmessage.h * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_TESTMESSAGE_H +#define INCLUDED_DP_TESTMESSAGE_H + +#include "dp_auxdefs.h" + +#include "dp_connector.h" + +#define DP_LPRIME_SIZE 20 +namespace DisplayPort +{ + // test request status, for DP and nvapi + typedef enum + { + DP_TESTMESSAGE_REQUEST_STATUS_PENDING = 0, // the request is still be processing + DP_TESTMESSAGE_REQUEST_STATUS_DONE = 1, // request has been processed + DP_TESTMESSAGE_REQUEST_STATUS_ERROR = 2, // error, Dp lib busy with other request + DP_TESTMESSAGE_REQUEST_STATUS_NEWREQUEST = 3, // new request for user + } DP_TESTMESSAGE_REQUEST_STATUS; + + // Request type enum. + typedef enum + { + } DP_TESTMESSAGE_REQUEST_TYPE; + + class TestMessage; + struct ConnectorImpl; + + struct DPTestMessageCompletion : public MessageManager::Message::MessageEventSink + { + TestMessage *parent; + + public: + void setParent(TestMessage *parent) + { + this->parent = parent; + } + // call back function if message fails, the status of the dp lib(testMessageStatus) + // need to be set to DONE + void messageFailed(MessageManager::Message * from, NakData * data); + + // call back function if message complete, the status of the dp lib(testMessageStatus) + // need to be set to DONE. + // If a message has a reply, it is necessary to record the reply in the dp lib to + // send back to user later + void messageCompleted(MessageManager::Message * from); + + }; + + class TestMessage : virtual public Object + { + private: + ConnectorImpl *pConnector; + // check if the user provided request struct is of valid size + inline bool isValidStruct(DP_TESTMESSAGE_REQUEST_TYPE requestType, NvU32 structSize) + { + switch (requestType) + { + default: + return false; + } + } + MessageManager *pMsgManager; + DPTestMessageCompletion diagCompl; + + // Data Structure for Generic Message. + NvU32 replyBytes; + + public: + + DP_TESTMESSAGE_REQUEST_STATUS testMessageStatus; + + TestMessage() : testMessageStatus(DP_TESTMESSAGE_REQUEST_STATUS_DONE) + { + diagCompl.setParent(this); + pConnector = 0; + pMsgManager = 0; + replyBytes = 0; + } + DP_TESTMESSAGE_STATUS sendDPTestMessage(void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus); + MessageManager * getMessageManager(); + void setupTestMessage(MessageManager *msgManager, ConnectorImpl *connector) + { + pMsgManager = msgManager; + pConnector = connector; + } + + }; +} + + +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_auxretry.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_auxretry.cpp new file mode 100644 index 0000000..bcc2143 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_auxretry.cpp @@ -0,0 +1,315 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* Module: dp_auxretry.cpp * +* Interface implemented by library client. * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_auxbus.h" +#include "dp_auxretry.h" +#include "dp_messageheader.h" + +#include "displayport.h" + +using namespace DisplayPort; + +// +// Read a DPCD address. +// - allows size greater than single transaction/burst size +// - handles defer retries +// - handles nacks with incomplete data +// +AuxRetry::status AuxRetry::readTransaction(int address, NvU8 * buffer, unsigned size, unsigned retries) +{ + unsigned completed; + AuxBus::status s; + + DP_ASSERT( size <= aux->transactionSize() ); + + do + { + s = aux->transaction(AuxBus::read, AuxBus::native, address, buffer, size, &completed); + + // + // Got success & requested data. Also size of returned data is + // expected & non zero. + // + if ((s == AuxBus::success) && (completed == size) && (completed != 0)) + { + return ack; + } + else + { + // + // Handle defer case with a simple retry + // + if (s == AuxBus::defer) + { + if (retries) + { + --retries; + continue; + } + + return defer; + } + + // + // Nack shouldn't happen in general. Unsupported registers + // are supposed to ACK with size of 0. + // + if ( s == AuxBus::nack ) + { + return nack; + } + + if ( completed == 0 ) + { + return unsupportedRegister; + } + + // + // We got less data back than we requested... + // It's unclear when this might happen in the spec. + // We can either + // 1. Split the read into multiple pieces + // (Dangerous since we may receive non-atomic updates) + // 2. Retry + // + if ( completed < size ) + { + // + // Retry + // + if (retries) + { + --retries; + continue; + } + else + { + // Closest approximation is a defer + return defer; + } + } + } + } while(retries); + + if ((s == AuxBus::defer) || (completed < size)) + { + return defer; + } + + return ack; +} + +// +// Write a DPCD address. +// - allows size greater than single transaction/burst size +// - handles defer retries +// - handles nacks with incomplete data +// +AuxRetry::status AuxRetry::writeTransaction(int address, NvU8 * buffer, unsigned size, unsigned retries) +{ + unsigned completed; + AuxBus::status s; + + DP_ASSERT( size <= aux->transactionSize() ); + + do + { + s = aux->transaction(AuxBus::write, AuxBus::native, address, buffer, size, &completed); + + // + // Got success & requested data. Also size of returned data is + // expected & non zero. + // + if ((s == AuxBus::success) && (completed == size) && (completed != 0)) + { + return ack; + } + else + { + // + // Handle defer case with a simple retry + // + if (s == AuxBus::defer) + { + if (retries) + { + --retries; + continue; + } + + return defer; + } + + // + // Nack shouldn't happen in general. Unsupported registers + // are supposed to ACK with size of 0. + // + if ( s == AuxBus::nack ) + { + return nack; + } + + DP_ASSERT( s == AuxBus::success); + + if ( completed == 0 ) + { + return unsupportedRegister; + } + + // + // Incomplete write? + // Shouldn't happen. Just retry if it does + // + if ( completed < size ) + { + // + // Retry + // + if (retries) + { + --retries; + continue; + } + else + { + // Closest approximation is a defer + return defer; + } + } + } + } while(retries); + + if ((s == AuxBus::defer) || (completed < size)) + { + return defer; + } + + return ack; +} + +// +// Similar to readTransaction except that it supports reading +// larger spans than AuxBus::transactionSize() +// +AuxRetry::status AuxRetry::read(int address, NvU8 * buffer, unsigned size, unsigned retries) +{ + for (unsigned i = 0 ; i < size; ) + { + int todo = DP_MIN(size - i, aux->transactionSize()); + status s = readTransaction(address+i, buffer+i, todo, retries); + + if (s != ack) + { + return s; + } + + i += todo; + } + + return ack; +} + +// +// Similar to writeTransaction except that it supports writing +// larger spans than AuxBus::transactionSize() +// +AuxRetry::status AuxRetry::write(int address, NvU8 * buffer, unsigned size, unsigned retries) +{ + for (unsigned i = 0 ; i < size; ) + { + int todo = DP_MIN(size - i, aux->transactionSize()); + status s = writeTransaction(address+i, buffer+i, todo, retries); + + if (s != ack) + { + return s; + } + + i += todo; + } + + return ack; +} + +AuxBus::status AuxLogger::transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, unsigned * pNakReason, + NvU8 offset, NvU8 nWriteTransactions) +{ + AuxBus::status result = bus->transaction(action, type, address, buffer, sizeRequested, sizeCompleted); + hint[0] = 0; + // + // Do the hex dump. + // - We can't make library calls + // - We need to do this in one printf + if (result == success) + { + if (type == native) + if (address == NV_DPCD_MBOX_DOWN_REQ || address == NV_DPCD_MBOX_UP_REP || + address == NV_DPCD_MBOX_DOWN_REP || address == NV_DPCD_MBOX_UP_REQ) + { + unsigned len = *sizeCompleted; + Buffer storage(buffer, len); + BitStreamReader reader(&storage, 0, len*8); + MessageHeader header; + DisplayPort::decodeHeader(&reader, &header, Address(1)); + Address::StringBuffer sb; + DP_USED(sb); + dpHexDump(&hex[0], sizeof(hex), buffer, header.headerSizeBits/8); + dpHexDump(&hex_body[0], sizeof(hex), buffer + header.headerSizeBits/8, len - header.headerSizeBits/8); +#if defined(_DEBUG) || defined(DEBUG) + const char * name = ""; + if (header.isTransactionStart && action==write && len > header.headerSizeBits/8) + name = getRequestId(buffer[header.headerSizeBits/8]); + + DP_LOG(("DP-AUX> %s%s%s%s%04Xh hint(to:%s %s%s %s #%d) { %s| %s}", + sizeRequested == *sizeCompleted ? "" : "INCOMPLETE ", getStatus(result), + getAction(action), getType(type), address, + header.address.toString(sb), header.isTransactionStart ? "S" : "", + header.isTransactionEnd ? "E" : "", name, header.messageNumber, + hex, hex_body)); +#endif + return result; + } + } + else + hex[0] = 0; + + dpHexDump(&hex[0], sizeof(hex), buffer, *sizeCompleted); + DP_LOG(("DP-AUX> %s%s%s%s%04Xh { %s }", sizeRequested == *sizeCompleted ? "" : "INCOMPLETE ", + getStatus(result), getAction(action), getType(type), address, hex)); + + return result; +} + +AuxBus * DisplayPort::CreateAuxLogger(AuxBus * auxBus) +{ + return new AuxLogger(auxBus); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_bitstream.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_bitstream.cpp new file mode 100644 index 0000000..39117de --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_bitstream.cpp @@ -0,0 +1,204 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_bitstream.c * +* Implementation of Big Endian bit streams. * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_bitstream.h" + +using namespace DisplayPort; +bool BitStreamReader::read(unsigned * value, unsigned bits) +{ + unsigned topbit = (7- (this->bitsOffset & 7)); + + if (this->bitsOffset + bits > this->bitsEnd) + { + return false; + } + + // + // We're filling the byte down from 'topbit' towards 0. + // Can we fit all of the bits starting at topbit before + // overflowing to the next byte? + // + if (bits <= (topbit+1)) + { + int bottombit = topbit - (bits-1); + *value = (this->buffer()->data[this->bitsOffset / 8] >> bottombit) & ((1 << bits)-1); + + this->bitsOffset+=bits; + return true; + } + + // + // We're either reading too many bits or we're straddling + // a byte boundary. Serialize bit by bit. + // NOTE: This scenario is entire unlikely. Don't optimize. + // + + *value = 0; + while (bits) + { + unsigned bit; + if (!read(&bit, 1)) + { + return false; + } + *value = *value * 2 + bit; + bits--; + } + + return true; +} + +unsigned BitStreamReader::readOrDefault(unsigned bits, unsigned defaultValue) +{ + unsigned value; + + if (read(&value, bits)) + { + return value; + } + else + { + return defaultValue; + } +} + + +bool BitStreamReader::align(unsigned align) +{ + // Verify alignment is a power of two + if (!(align && ((align & (align - 1)) == 0))) + { + DP_ASSERT(0); + } + else + { + if (this->bitsOffset & (align - 1)) + { + this->bitsOffset = (this->bitsOffset + align) &~ (align - 1); + } + } + return this->bitsOffset <= this->bitsEnd; +} + +bool BitStreamWriter::write(unsigned value, unsigned bits) +{ + DP_ASSERT((value < (1ULL << bits)) && "Value out of range"); + unsigned topbit = (7- (this->bitsOffset & 7)); + + if (this->bitsOffset + bits > this->buffer()->length * 8) + { + this->buffer()->resize((this->bitsOffset + bits+7)/8); + } + + // + // We're filling the byte down from 'topbit' towards 0. + // Can we fit all of the bits starting at topbit before + // overflowing to the next byte? + // + if (bits <= (topbit+1)) + { + int bottombit = topbit - (bits-1); + NvU8 clearmask = ((1 << bits)-1) << bottombit; + + this->buffer()->data[this->bitsOffset / 8] = (NvU8)((this->buffer()->data[this->bitsOffset / 8] &~ clearmask) | (value << bottombit)); + + this->bitsOffset+=bits; + return true; + } + + // + // We're either writing too many bits or we're straddling + // a byte boundary. Serialize bit by bit. + // NOTE: This scenario is entire unlikely. Don't optimize. + // + + while (bits) + { + bits --; + if (!write( (value >> bits) & 1, 1)) + { + return false; + } + } + + return true; +} + +bool BitStreamWriter::align(unsigned align) +{ + // Verify alignment is a power of two + if (!(align && ((align & (align - 1)) == 0))) + { + DP_ASSERT(0); + } + else + { + if (this->bitsOffset & (align - 1)) + return this->write(0, align - (this->bitsOffset & (align - 1))); + } + + return true; +} + +unsigned BitStreamReader::offset() +{ + return this->bitsOffset; +} + +unsigned BitStreamWriter::offset() +{ + return this->bitsOffset; +} + +Buffer * BitStreamWriter::buffer() +{ + return this->targetBuffer; +} + +Buffer * BitStreamReader::buffer() +{ + return this->sourceBuffer; +} + + +BitStreamWriter::BitStreamWriter(Buffer * buffer, unsigned bitsOffset) +{ + this->targetBuffer = buffer; + this->bitsOffset = bitsOffset; +} + + +BitStreamReader::BitStreamReader(Buffer * buffer, unsigned bitsOffset, unsigned bitsCount) +{ + this->sourceBuffer = buffer; + this->bitsOffset = bitsOffset; + this->bitsEnd = bitsCount + bitsOffset; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_buffer.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_buffer.cpp new file mode 100644 index 0000000..abc0d0f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_buffer.cpp @@ -0,0 +1,267 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_buffer.cpp * +* Resizable byte buffer and stream operations * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_buffer.h" + +using namespace DisplayPort; + +void DisplayPort::swapBuffers(Buffer & left, Buffer & right) +{ + NvU8 *tmpData = left.data; + unsigned tmpLength = left.length; + unsigned tmpCapacity = left.capacity; + bool tmpErrorState = left.errorState; + + left.data = right.data; + left.length = right.length; + left.capacity = right.capacity; + left.errorState = right.errorState; + + right.data = tmpData; + right.length = tmpLength; + right.capacity = tmpCapacity; + right.errorState= tmpErrorState; +} + + +bool Stream::seek(unsigned where) +{ + // + // Allow seek to any position in the file INCLUDING + // the first byte past the end of the file. + // + if (where > this->parent->length) + { + return false; + } + + this->byteOffset = where; + + return true; +} + +bool Stream::read(NvU8 * buffer, unsigned size) +{ + unsigned stopReadAt = this->byteOffset + size; + + if (stopReadAt > this->parent->length) + { + return false; + } + + dpMemCopy(buffer, this->parent->data + this->byteOffset, size); + this->byteOffset = stopReadAt; + + return true; +} + +bool Buffer::resize(unsigned stopWriteAt) +{ + bool mustIncrease = stopWriteAt > this->capacity; + + if (mustIncrease || (stopWriteAt * 4 < this->capacity) ) + { + unsigned newCapacity; + NvU8 * newBuffer; + + newCapacity = 32; + + while (newCapacity <= stopWriteAt) + { + newCapacity *= 2; + } + + if (newCapacity == this->capacity) { + this->length = stopWriteAt; + return true; + } + + newBuffer = (NvU8 *)dpMalloc(sizeof(NvU8) * newCapacity); + + if (!newBuffer) + { + if (mustIncrease) + { + if (this->data) + { + dpFree(this->data); + } + + this->errorState = true; + this->data = 0; + this->capacity = 0; + this->length = 0; + } + else + newCapacity = this->capacity; + + return false; + } + + if (this->data) + { + dpMemCopy(newBuffer, this->data, DP_MIN(newCapacity, this->length)); + dpFree(this->data); + } + + this->data = newBuffer; + this->capacity = newCapacity; + + } + + this->length = stopWriteAt; + return true; +} + +void Buffer::memZero() +{ + if (this->data) + dpMemZero(this->data, this->length); +} + +bool Stream::write(NvU8 * buffer, unsigned size) +{ + unsigned stopWriteAt = this->byteOffset + size; + + if (stopWriteAt > this->parent->length) + { + this->parent->resize(stopWriteAt); + } + + if (isError()) + return false; + + dpMemCopy( this->parent->data + this->byteOffset, buffer, size); + this->byteOffset = stopWriteAt; + this->parent->length = DP_MAX(this->parent->length, stopWriteAt); + + return true; +} + +unsigned Stream::remaining() +{ + return this->parent->length - this->byteOffset; +} + +unsigned Stream::offset() +{ + return this->byteOffset; +} + +Buffer::~Buffer() +{ + reset(); +} + +void Buffer::reset() +{ + if (this->data) + { + dpFree(this->data); + } + + length = 0; + capacity = 0; + data = 0; + errorState = false; +} + +bool Buffer::isError() const +{ + return this->errorState; +} + + +Stream::Stream(Buffer * buffer) + : parent(buffer), byteOffset(0) +{ +} + +bool Stream::isError() const +{ + return this->parent->errorState; +} + +Buffer::Buffer() + : data(0), length(0), capacity(0), errorState(false) +{ +} + +Buffer::Buffer(NvU8 * src, unsigned size) + : data(0), length(0), capacity(0), errorState(false) +{ + if (src && size && resize(size) && data) + dpMemCopy(data, src, size); +} + +Buffer::Buffer(const Buffer & other) + : data(0), length(0), capacity(0), errorState(false) +{ + if (other.isError()) + { + errorState = true; + } + else + { + if (resize(other.getLength()) && other.getData()) + dpMemCopy(getData(), other.getData(), getLength()); + } +} + +Buffer & Buffer::operator = (const Buffer & other) +{ + if (other.isError()) + { + errorState = true; + } + else + { + if (resize(other.getLength())) + dpMemCopy(getData(), other.getData(), getLength()); + } + return *this; +} + + +bool Buffer::operator== (const Buffer & other) const +{ + if (length != other.length) + return false; + + for (unsigned i = 0; i < length; i++) + { + if (data[i] != other.data[i]) + return false; + + } + + return true; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_configcaps.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_configcaps.cpp new file mode 100644 index 0000000..a3e583e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_configcaps.cpp @@ -0,0 +1,3170 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_configcaps.cpp * +* Abstraction for basic caps registers * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_auxbus.h" +#include "dp_configcaps.h" +#include "dp_auxdefs.h" +#include "displayport.h" + +using namespace DisplayPort; + +struct DPCDHALImpl : DPCDHAL +{ + AuxRetry bus; + Timer * timer; + bool dpcdOffline; + bool gpuDP1_2Supported; + bool gpuDP1_4Supported; + bool bGrantsPostLtRequest; + bool pc2Disabled; + bool uprequestEnable; + bool upstreamIsSource; + bool bMultistream; + bool bGpuFECSupported; + bool bLttprSupported; + bool bBypassILREdpRevCheck; + NvU32 overrideDpcdMaxLinkRate; + NvU32 overrideDpcdRev; + NvU32 overrideDpcdMaxLaneCount; + + struct _LegacyPort: public LegacyPort + { + DwnStreamPortType type; + DwnStreamPortAttribute nonEDID; + + NvU64 maxTmdsClkRate; + + DwnStreamPortType getDownstreamPortType() + { + return type; + } + + DwnStreamPortAttribute getDownstreamNonEDIDPortAttribute() + { + return nonEDID; + } + + NvU64 getMaxTmdsClkRate() + { + return maxTmdsClkRate; + } + + } legacyPort[16]; + + struct + { + unsigned revisionMajor, revisionMinor; // DPCD offset 0 + bool supportsESI; + LinkRate maxLinkRate; // DPCD offset 1 + unsigned maxLaneCount; // DPCD offset 2 + unsigned maxLanesAtHBR; + unsigned maxLanesAtRBR; + bool enhancedFraming; + bool bPostLtAdjustmentSupport; + + bool supportsNoHandshakeTraining; + bool bSupportsTPS4; + unsigned NORP; // DPCD offset 4 + + bool detailedCapInfo; // DPCD offset 5 + bool downStreamPortPresent; + NvU8 downStreamPortType; + + unsigned downStreamPortCount; // DPCD offset 7 + bool ouiSupported; + bool msaTimingParIgnored; + + NvU16 linkRateTable[NV_DPCD_SUPPORTED_LINK_RATES__SIZE]; // DPCD offset 10 ~ 1F + + bool supportsMultistream; // DPCD offset 21 + unsigned numberAudioEndpoints; // DPCD offset 22 + bool overrideToSST; // force to SST even if MST capable + bool noLinkTraining; // DPCD offset 330h + + bool extendedRxCapsPresent; // DPCD offset 000Eh [7] - Extended Receiver Capability present + + // DPCD Offset 2211h; + unsigned extendedSleepWakeTimeoutRequestMs; + // DPCD Offset 0119h [0] - If we grant the extendedSleepWakeTimeoutRequest + bool bExtendedSleepWakeTimeoutGranted; + + // 0x2206, if the sink supports 128b/132b + bool bDP20ChannelCodingSupported; + // 0x2215 + bool bUHBR_10GSupported; + bool bUHBR_13_5GSupported; + bool bUHBR_20GSupported; + + + // DPCD Offset F0002h - Number of Physical Repeaters present (after mapping) between Source and Sink + unsigned phyRepeaterCount; + // DPCD offset 700 - EDP_DPCD_REV + unsigned eDpRevision; + + struct + { + unsigned revisionMajor, revisionMinor; // DPCD offset F0000h + LinkRate maxLinkRate; // DPCD offset F0001h + unsigned maxLaneCount; // DPCD offset F0004h + unsigned phyRepeaterExtendedWakeTimeoutMs; // DPCD offset F0005h + + // 0xF0006, if the PHY Repeater supports 128b/132b + bool bDP20ChannelCodingSupported; + // 0xF0007 + bool UHBR_10GSupported; + bool UHBR_13_5GSupported; + bool UHBR_20GSupported; + } repeaterCaps; + + PCONCaps pconCaps; + vesaPsrSinkCaps psrCaps; + NvU32 videoFallbackFormats; // DPCD offset 0200h + + } caps; + + struct + { + unsigned sinkCount; // DPCD offset 200 + bool automatedTestRequest; + bool cpIRQ; + bool mccsIRQ; + bool downRepMsgRdy; + bool upReqMsgRdy; + bool rxCapChanged; // DPCD offset 2005 + bool linkStatusChanged; // DPCD offset 2005 + bool streamStatusChanged; // DPCD offset 2005 + bool hdmiLinkStatusChanged; // DPCD offset 2005 + NvU8 eightyBitCustomPat[10]; // DPCD offset 250 - 259 + + struct + { + struct + { + bool clockRecoveryDone; + bool channelEqualizationDone; + bool symbolLocked; + } laneStatus[4]; // DPCD offset 202, 203 + + bool interlaneAlignDone; // DPCD offset 204 + bool downstmPortChng; + bool linkStatusUpdated; + + // + // (ESI specific) signifies that we have link trained and should + // update the link status in the next query to isLinkLost. Keep in + // mind that linkStatusChanged might still be zero. + // + bool linkStatusDirtied; + } laneStatusIntr; + + struct + { + bool testRequestTraining; // DPCD offset 218 + LinkRate testRequestLinkRate; // DPCD offset 219 + unsigned testRequestLaneCount; // DPCD offset 220 + } testTraining; + + struct + { + bool testRequestEdidRead; // DPCD offset 218 + } testEdid; + + struct + { + bool testRequestPattern; // DPCD offset 218 + TestPatternType testPatRequested; // DPCD offset 221 + NvU16 testHorTotalPixels; // DPCD offset 222, 223 + NvU16 testVerTotalLines; // DPCD offset 224, 225 + NvU16 testHorStartPixels; // DPCD offset 226, 227 + NvU16 testVerStartLines; // DPCD offset 228, 229 + NvU16 testHsyncWidthPixels; // DPCD offset 22A, 22B + bool testHsyncPolarity; + NvU16 testVsyncWidthLines; // DPCD offset 22C, 22D + bool testVsyncPolarity; + NvU16 testActiveWidthPixels; // DPCD offset 22E, 22F + NvU16 testActiveHeightLines; // DPCD offset 230, 231 + } testPattern; + + struct + { + bool testRequestPhyCompliance; // DPCD offset 218 + LinkQualityPatternType phyTestPattern; // DPCD offset 248 + } testPhyCompliance; + + } interrupts; + + bool bIndexedLinkrateCapable, bIndexedLinkrateEnabled; + + public: + DPCDHALImpl(AuxBus * bus, Timer * timer) + : bus(bus), + timer(timer), + gpuDP1_2Supported(false), + gpuDP1_4Supported(false), + bGrantsPostLtRequest(false), + uprequestEnable(false), + upstreamIsSource(false), + bMultistream(false), + bGpuFECSupported(false), + bBypassILREdpRevCheck(false), + overrideDpcdMaxLinkRate(0), + overrideDpcdRev(0) + { + // start with default caps. + populateFakeDpcd(); + } + + ~DPCDHALImpl() + { + } + + virtual void setAuxBus(AuxBus * bus) + { + this->bus = bus; + } + + bool isDpcdOffline() + { + return dpcdOffline; + } + + void setDPCDOffline(bool bOffline) + { + dpcdOffline = bOffline; + } + + void updateDPCDOffline() + { + NvU8 buffer[16]; + unsigned retries = 16; + // Burst read from 0x00 to 0x0F. + if (AuxRetry::ack != bus.read(NV_DPCD_REV, &buffer[0], sizeof buffer, retries)) + { + dpcdOffline = true; + } + else + { + dpcdOffline = false; + } + } + + void setPC2Disabled(bool disabled) + { + pc2Disabled = disabled; + } + + void setLttprSupported(bool isLttprSupported) + { + bLttprSupported = isLttprSupported; + } + + bool isPC2Disabled() + { + return pc2Disabled; + } + void parseAndReadCaps() + { + NvU8 buffer[16]; + NvU8 byte = 0; + AuxRetry::status status; + unsigned retries = 16; + // Burst read from 0x00 to 0x0F. + + // + // The Extended Receiver Capability field at DPCD Addresses 02200h through 022FFh is valid + // with DPCD Rev. 1.4 (and higher). + // + // A DPRX that supports the Extended Receiver Capability field must set the + // EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT bit in the TRAINING_AUX_RD_INTERVAL + // register (DPCD Address 0000Eh, bit 7) to 1 + // + caps.extendedRxCapsPresent = false; + if (AuxRetry::ack == bus.read(NV_DPCD_TRAINING_AUX_RD_INTERVAL, &byte, sizeof byte)) + { + caps.extendedRxCapsPresent = DRF_VAL(_DPCD14, _TRAINING_AUX_RD_INTERVAL, _EXTENDED_RX_CAP, byte); + } + + if (caps.extendedRxCapsPresent) + { + status = bus.read(NV_DPCD14_EXTENDED_REV, &buffer[0], sizeof buffer, retries); + } + else + { + status = bus.read(NV_DPCD_REV, &buffer[0], sizeof buffer, retries); + } + + if (AuxRetry::ack != status) + { + // Failed to read caps. + // Set an invalid state here and make sure we REMEMBER we couldn't get the caps + caps.revisionMajor = 0; + dpcdOffline = true; + return; + } + + // reset the faked dpcd flag since real LT should be possible now. + dpcdOffline = false; + + // reset edp revision to 0 + caps.eDpRevision = 0; + + if (overrideDpcdRev) + { + // Override the revision no. as DPCD override regkey is set + caps.revisionMajor = DRF_VAL(_DPCD, _REV, _MAJOR, overrideDpcdRev); + caps.revisionMinor = DRF_VAL(_DPCD, _REV, _MINOR, overrideDpcdRev); + } + else + { + caps.revisionMajor = DRF_VAL(_DPCD, _REV, _MAJOR, buffer[0]); + caps.revisionMinor = DRF_VAL(_DPCD, _REV, _MINOR, buffer[0]); + if (isAtLeastVersion(1, 2)) + { + // + // WAR required for panels with MSTAR chip as they report themselves as + // DP1.2 but they don't support DP1.2. Check OUI & ESI sinkCount. if OUI + // is not supported & sinkCount is "0", downgrade the revision to 1.1. + // + if (FLD_TEST_DRF(_DPCD, _DOWN_STREAM_PORT, _OUI_SUPPORT, _NO, buffer[7])) + { + // Read the ESI sinkCount & overwrite revision no. if ESI not supported + NvU8 esiBuffer[1] = {0}; + NvU32 sinkCount; + AuxRetry::status status; + // + // Don't just check the transaction status as not-supporting ESI means it may + // NACK a transaction to ESI space or may return "0" as sinkCount. We need + // to override the revision Minor in both cases. + // + status = bus.read(NV_DPCD_SINK_COUNT_ESI, &esiBuffer[0], sizeof esiBuffer); + sinkCount = DRF_VAL(_DPCD, _SINK_COUNT_ESI, _SINK_COUNT, esiBuffer[0]); + + if ((sinkCount == 0) || (status != AuxRetry::ack)) + { + // If ESI not supported then overwrite the revision + caps.revisionMajor = 1; + caps.revisionMinor = 1; + } + } + + // Check if DPCD_DISPLAY_CONTROL_CAPABLE = 1 + if (FLD_TEST_DRF(_DPCD, _EDP_CONFIG_CAP, _DISPLAY_CONTROL_CAPABLE, _YES, buffer[0x0D])) + { + NvU8 edpBuffer[1] = {0}; + status = bus.read(NV_DPCD_EDP_REV, &edpBuffer[0], sizeof edpBuffer); + caps.eDpRevision = DRF_VAL(_DPCD, _EDP, _REV_VAL, edpBuffer[0]); + } + } + } + + bIndexedLinkrateCapable = false; + + if (isAtLeastVersion(1,4) && caps.extendedRxCapsPresent == false) + { + DP_ASSERT(0 && "A DPRX with DPCD Rev. 1.4 (or higher) must have Extended Receiver Capability field."); + } + + caps.supportsESI = (isAtLeastVersion(1,2) && gpuDP1_2Supported); // Support ESI register space only when GPU support DP1.2MST + + if (caps.eDpRevision >= NV_DPCD_EDP_REV_VAL_1_4 || this->bBypassILREdpRevCheck) + { + NvU16 linkRate = 0; + if (getRawLinkRateTable((NvU8*)&caps.linkRateTable[0])) + { + // First entry must be non-zero for validation + if (caps.linkRateTable[0] != 0) + { + bIndexedLinkrateCapable = true; + for (int i = 0; caps.linkRateTable[i] && (i < NV_DPCD_SUPPORTED_LINK_RATES__SIZE); i++) + { + if (linkRate < caps.linkRateTable[i]) + linkRate = caps.linkRateTable[i]; + } + if (linkRate) + caps.maxLinkRate = LINK_RATE_KHZ_TO_MBPS((NvU64)linkRate * DP_LINK_RATE_TABLE_MULTIPLIER_KHZ); + } + } + } + if (!bIndexedLinkrateCapable) + { + if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _1_62_GBPS, buffer[1])) + caps.maxLinkRate = RBR; + else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _2_70_GBPS, buffer[1])) + caps.maxLinkRate = HBR; + else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _5_40_GBPS, buffer[1])) + caps.maxLinkRate = HBR2; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_BANDWIDTH, _VAL, _8_10_GBPS, buffer[1])) + caps.maxLinkRate = HBR3; + else + { + DP_ASSERT(0 && "Unknown max link rate. Assuming DP 1.1 defaults"); + caps.maxLinkRate = HBR; + } + } + + // + // To prevent WAR being overridden. + // + if (overrideDpcdMaxLaneCount) + { + caps.maxLaneCount = overrideDpcdMaxLaneCount; + } + else + { + caps.maxLaneCount = DRF_VAL(_DPCD, _MAX_LANE_COUNT, _LANE, buffer[0x2]); + } + + if (!IS_VALID_LANECOUNT(caps.maxLaneCount)) + { + DP_ASSERT(0 && "Invalid lane count. Assuming 1"); + caps.maxLaneCount = 1; + } + + caps.bPostLtAdjustmentSupport = FLD_TEST_DRF(_DPCD, _MAX_LANE_COUNT, _POST_LT_ADJ_REQ_SUPPORT, _YES, buffer[0x2]); + caps.enhancedFraming = FLD_TEST_DRF(_DPCD, _MAX_LANE_COUNT, _ENHANCED_FRAMING, _YES, buffer[0x2]); + if (isAtLeastVersion(1,1) && (!caps.enhancedFraming)) + { + DP_ASSERT(0 && "A DPRX with DPCD Rev. 1.1 (or higher) must have enhanced framing capability."); + } + + if (isAtLeastVersion(1,2) && gpuDP1_2Supported && caps.bPostLtAdjustmentSupport) + { + // Source grants post Link training adjustment support + bGrantsPostLtRequest = true; + } + else + { + // Disable post Link training adjustment support whenever sink does not report capability + // This covers the case of MST to SST transition during which initially this flag is set, we need to explicitly reset this + // in order to avoid PostLTAdjustment during LT. + bGrantsPostLtRequest = false; + } + + caps.supportsNoHandshakeTraining = FLD_TEST_DRF(_DPCD, _MAX_DOWNSPREAD, _NO_AUX_HANDSHAKE_LT, _TRUE, buffer[0x3]); + caps.bSupportsTPS4 = FLD_TEST_DRF(_DPCD14, _MAX_DOWNSPREAD, _TPS4_SUPPORTED, _YES, buffer[0x3]); + + caps.NORP = DRF_VAL(_DPCD, _NORP, _VAL, buffer[0x4]) + 1; + + caps.downStreamPortPresent = FLD_TEST_DRF(_DPCD, _DOWNSTREAMPORT, _PRESENT, _YES, buffer[0x5]); + caps.detailedCapInfo = FLD_TEST_DRF(_DPCD, _DOWNSTREAMPORT, _DETAILED_CAP_INFO_AVAILABLE, _YES, buffer[0x5]); + caps.downStreamPortType = DRF_VAL(_DPCD, _DOWNSTREAMPORT, _TYPE, buffer[0x5]); + + switch (DRF_VAL(_DPCD, _DOWNSTREAMPORT, _TYPE, buffer[0x5])) + { + case 0: legacyPort[0].type = DISPLAY_PORT; break; + case 1: legacyPort[0].type = ANALOG_VGA; break; + case 2: legacyPort[0].type = DVI; break; + case 3: legacyPort[0].type = WITHOUT_EDID; break; + default: DP_ASSERT(0 && "Unknown port type"); break; + } + + caps.downStreamPortCount = DRF_VAL(_DPCD, _DOWN_STREAM_PORT, _COUNT, buffer[0x7]); + caps.msaTimingParIgnored = FLD_TEST_DRF(_DPCD, _DOWN_STREAM_PORT, _MSA_TIMING_PAR_IGNORED, _YES, buffer[0x7]); + caps.ouiSupported = FLD_TEST_DRF(_DPCD, _DOWN_STREAM_PORT, _OUI_SUPPORT, _YES, buffer[0x7]); + + if (caps.downStreamPortPresent && !caps.downStreamPortCount) + { + DP_LOG(("DPHAL> Non-compliant device, reporting downstream port present, but no downstream ports. Overriding port count to 1.")); + caps.downStreamPortCount = 1; + } + + // Burst read from 0x20 to 0x22. + bus.read(NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS, &buffer[0], 0x22 - 0x20 + 1); + + caps.videoFallbackFormats = buffer[0]; + + caps.supportsMultistream = FLD_TEST_DRF(_DPCD, _MSTM, _CAP, _YES, buffer[0x1]); + + caps.numberAudioEndpoints = (unsigned)(DRF_VAL(_DPCD, _NUMBER_OF_AUDIO_ENDPOINTS, _VALUE, buffer[0x2])); + + // 02206h + if (AuxRetry::ack == bus.read(NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING, &buffer[0], 1)) + { + caps.bDP20ChannelCodingSupported = + FLD_TEST_DRF(_DPCD14, + _EXTENDED_MAIN_LINK_CHANNEL_CODING, + _ANSI_128B_132B, + _YES, + buffer[0]); + if (caps.bDP20ChannelCodingSupported == true) + { + // 0x2215 + if (AuxRetry::ack == bus.read(NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES, &buffer[0], 1)) + { + caps.bUHBR_10GSupported = + FLD_TEST_DRF(_DPCD20, + _128B_132B_SUPPORTED_LINK_RATES, + _UHBR10, + _YES, + buffer[0]); + + caps.bUHBR_13_5GSupported = + FLD_TEST_DRF(_DPCD20, + _128B_132B_SUPPORTED_LINK_RATES, + _UHBR13_5, + _YES, + buffer[0]); + + caps.bUHBR_20GSupported = + FLD_TEST_DRF(_DPCD20, + _128B_132B_SUPPORTED_LINK_RATES, + _UHBR20, + _YES, + buffer[0]); + } + DP_ASSERT(caps.bUHBR_10GSupported && "Unknown max link rate or HBR2 without at least DP 1.2. Assuming DP 1.1 defaults"); + } + } + + if (bLttprSupported) + { + // Burst read from 0xF0000 to 0xF0007 + if (AuxRetry::ack == bus.read(NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV, &buffer[0], 0x8, retries)) + { + caps.repeaterCaps.revisionMinor = DRF_VAL(_DPCD14, _LT_TUNABLE_PHY_REPEATER_REV, _MINOR, buffer[0x0]); + caps.repeaterCaps.revisionMajor = DRF_VAL(_DPCD14, _LT_TUNABLE_PHY_REPEATER_REV, _MAJOR, buffer[0x0]); + + if (lttprIsAtLeastVersion(1, 4)) + { + caps.phyRepeaterCount = mapPhyRepeaterVal(DRF_VAL(_DPCD14, _PHY_REPEATER_CNT, _VAL, buffer[0x2])); + + if (caps.phyRepeaterCount != 0) + { + if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_RATE_PHY_REPEATER, _VAL, _1_62_GBPS, buffer[1])) + caps.repeaterCaps.maxLinkRate = RBR; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_RATE_PHY_REPEATER, _VAL, _2_70_GBPS, buffer[1])) + caps.repeaterCaps.maxLinkRate = HBR; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_RATE_PHY_REPEATER, _VAL, _5_40_GBPS, buffer[1])) + caps.repeaterCaps.maxLinkRate = HBR2; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_RATE_PHY_REPEATER, _VAL, _8_10_GBPS, buffer[1])) + caps.repeaterCaps.maxLinkRate = HBR3; + else + { + DP_ASSERT(0 && "Unknown max link rate or HBR2 without at least DP 1.2. Assuming DP 1.1 defaults"); + caps.repeaterCaps.maxLinkRate = HBR; + } + + caps.repeaterCaps.maxLaneCount = + DRF_VAL(_DPCD14, _MAX_LANE_COUNT_PHY_REPEATER, + _VAL, buffer[0x4]); + + // The cumulative number of 10ms. + caps.repeaterCaps.phyRepeaterExtendedWakeTimeoutMs = + DRF_VAL(_DPCD14, + _PHY_REPEATER_EXTENDED_WAKE_TIMEOUT, + _REQ, buffer[0x5]) * 10; + + // An LTTPR that supports 128b/132b channel coding shall program this register to 20h. + if (lttprIsAtLeastVersion(2, 0)) + { + caps.repeaterCaps.bDP20ChannelCodingSupported = + FLD_TEST_DRF(_DPCD14, + _PHY_REPEATER_MAIN_LINK_CHANNEL_CODING, + _128B_132B_SUPPORTED, + _YES, + buffer[6]); + + caps.repeaterCaps.UHBR_10GSupported = + FLD_TEST_DRF(_DPCD14, + _PHY_REPEATER_128B_132B_RATES, + _10G_SUPPORTED, + _YES, + buffer[7]); + + caps.repeaterCaps.UHBR_13_5GSupported = + FLD_TEST_DRF(_DPCD14, + _PHY_REPEATER_128B_132B_RATES, + _13_5G_SUPPORTED, + _YES, + buffer[7]); + + caps.repeaterCaps.UHBR_20GSupported = + FLD_TEST_DRF(_DPCD14, + _PHY_REPEATER_128B_132B_RATES, + _20G_SUPPORTED, + _YES, + buffer[7]); + + if (buffer[7] && !caps.repeaterCaps.bDP20ChannelCodingSupported) + { + DP_ASSERT(0 && "UHBR is supported without 128b/132b Channel Encoding Supported!"); + } + } + } + else + { + caps.repeaterCaps.maxLinkRate = 0; + } + } + else + { + // not supported DP revision, we should not be doing LTTPR training + caps.phyRepeaterCount = 0; + caps.repeaterCaps.maxLinkRate = 0; + } + } + } + + // Check if the device requests extended sleep wake timeout + if (AuxRetry::ack == bus.read(NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST, &buffer[0], 1)) + { + if (buffer[0] == NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_1MS) + { + caps.extendedSleepWakeTimeoutRequestMs = DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_DEFAULT_MS; + } + else + { + caps.extendedSleepWakeTimeoutRequestMs = buffer[0] * 20; + } + } + else + { + caps.extendedSleepWakeTimeoutRequestMs = 0; + } + + byte = 0U; + dpMemZero(&caps.psrCaps, sizeof(vesaPsrSinkCaps)); + + status = bus.read(NV_DPCD_EDP_PSR_VERSION, &byte, sizeof byte); + if (status == AuxRetry::ack && byte > 0U) + { + caps.psrCaps.psrVersion = byte; + } + + if (caps.psrCaps.psrVersion) + { + unsigned psrSetupTimeMap[8] = { 330U, 275U, 220U, 165U, 110U, 55U, 0U }; + byte = 0U; + if (AuxRetry::ack == bus.read(NV_DPCD_EDP_PSR_CAP, &byte, sizeof byte)) + { + caps.psrCaps.linkTrainingRequired = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CAP, _LT_NEEDED, _YES, byte); + caps.psrCaps.psrSetupTime = + psrSetupTimeMap[DRF_VAL(_DPCD_EDP, _PSR_CAP,_SETUP_TIME, byte)]; + caps.psrCaps.yCoordinateRequired = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CAP,_Y_COORD_NEEDED, _YES, byte); + caps.psrCaps.psr2UpdateGranularityRequired = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CAP,_GRAN_REQUIRED, _YES, byte); + } + + // Version 2 supports PSR2 and SU + if (caps.psrCaps.psrVersion == 2U) + { + NvU16 xGranular = 0U; + if (AuxRetry::ack == bus.read(NV_DPCD_EDP_PSR2_X_GRANULARITY_H, &byte, sizeof byte)) + { + xGranular = byte; + } + + byte = 0U; + if (AuxRetry::ack == bus.read(NV_DPCD_EDP_PSR2_X_GRANULARITY_L, &byte, sizeof byte)) + { + xGranular = (xGranular << 8U) | byte; + } + + caps.psrCaps.suXGranularity = xGranular; + } + + // version 3 supports Y coordinate + if (caps.psrCaps.psrVersion > 2U) + { + if (AuxRetry::ack == bus.read(NV_DPCD_EDP_PSR2_Y_GRANULARITY, &byte, sizeof byte)) + { + caps.psrCaps.suYGranularity = byte; + } + } + } + + parsePortDescriptors(); + } + + virtual PCONCaps * getPCONCaps() + { + return &(caps.pconCaps); + } + + virtual unsigned getRevisionMajor() // DPCD offset 0 + { + return caps.revisionMajor; + } + + virtual unsigned getRevisionMinor() + { + return caps.revisionMinor; + } + + virtual unsigned lttprGetRevisionMajor() // DPCD offset F0000h + { + return caps.repeaterCaps.revisionMajor; + } + + virtual unsigned lttprGetRevisionMinor() + { + return caps.repeaterCaps.revisionMinor; + } + + virtual LinkRate getMaxLinkRate() // DPCD offset 1 * 27000000 + { + if (caps.phyRepeaterCount == 0) + return caps.maxLinkRate; + else + return DP_MIN(caps.maxLinkRate, caps.repeaterCaps.maxLinkRate); + } + + virtual unsigned getMaxLaneCount() // DPCD offset 2 + { + if (caps.phyRepeaterCount == 0) + return caps.maxLaneCount; + else + return DP_MIN(caps.maxLaneCount, caps.repeaterCaps.maxLaneCount); + } + + virtual bool getNoLinkTraining() + { + return caps.noLinkTraining; + } + + virtual unsigned getPhyRepeaterCount() + { + return caps.phyRepeaterCount; + } + + // Max lanes supported at the desired link rate. + virtual unsigned getMaxLaneCountSupportedAtLinkRate(LinkRate linkRate) + { + if (linkRate == HBR) + { + if (caps.maxLanesAtHBR) + { + return DP_MIN(caps.maxLanesAtHBR, getMaxLaneCount()); + } + } + else if (linkRate == RBR) + { + if (caps.maxLanesAtRBR) + { + return DP_MIN(caps.maxLanesAtRBR, getMaxLaneCount()); + } + } + // None of the above cases got hit, simply return the max lane count + return getMaxLaneCount(); + } + + virtual bool getEnhancedFraming() + { + return caps.enhancedFraming; + } + + virtual bool getDownstreamPort(NvU8 *portType) // DPCD offset 5 + { + *portType = caps.downStreamPortType; + return caps.downStreamPortPresent; + } + + virtual bool getSupportsNoHandshakeTraining() + { + return caps.supportsNoHandshakeTraining; + } + + virtual unsigned getLegacyPortCount() // DPCD offset 7 + { + return caps.downStreamPortCount; + } + + virtual LegacyPort * getLegacyPort(unsigned index) + { + return &legacyPort[index]; + } + + virtual bool getMsaTimingparIgnored() + { + return caps.msaTimingParIgnored; + } + + virtual bool getOuiSupported() + { + return caps.ouiSupported; + } + + virtual bool getSDPExtnForColorimetry() + { + bool bSDPExtnForColorimetry = false; + NvU8 byte = 0; + if (caps.extendedRxCapsPresent) + { + if (AuxRetry::ack == bus.read(NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST, &byte, sizeof byte)) + { + bSDPExtnForColorimetry = FLD_TEST_DRF(_DPCD14, _EXTENDED_DPRX_FEATURE_ENUM_LIST, + _VSC_SDP_EXT_FOR_COLORIMETRY, _YES, byte); + } + } + return bSDPExtnForColorimetry; + } + + virtual AuxRetry::status setOuiSource(unsigned ouiId, const char * model, size_t modelNameLength, NvU8 chipRevision) + { + NvU8 ouiBuffer[16]; + + // The first 3 bytes are IEEE_OUI. 2 hex digits per register. + ouiBuffer[0] = (ouiId >> 16) & 0xFF; + ouiBuffer[1] = (ouiId >> 8) & 0xFF; + ouiBuffer[2] = ouiId & 0xFF; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (modelNameLength > NV_DPCD_SOURCE_DEV_ID_STRING__SIZE) + { + DP_LOG(("DPHAL> setOuiSource(): modelNameLength should not be greater than 6")); + modelNameLength = NV_DPCD_SOURCE_DEV_ID_STRING__SIZE; + } + + // Next 6 bytes are Device Identification String. + for (unsigned int i = 0; i < modelNameLength; i++) + { + ouiBuffer[3+i] = *model; + if (*model) + model++; + } + ouiBuffer[9] = chipRevision; + + for (int i = 0xA; i<=0xF; ++i) + ouiBuffer[i] = 0; + + return bus.write(NV_DPCD_SOURCE_IEEE_OUI, &ouiBuffer[0], sizeof ouiBuffer); + } + + virtual bool getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) + { + NvU8 ouiBuffer[16]; + int address = NV_DPCD_SINK_IEEE_OUI; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + // If buffer size is larger than dev_id size, the extras are not used. + // If buffer size is smaller, than we can only get certain bytes. + if (modelNameBufferSize > NV_DPCD_SOURCE_DEV_ID_STRING__SIZE) + { + modelNameBufferSize = NV_DPCD_SOURCE_DEV_ID_STRING__SIZE; + } + + // + // Check if there is a downstream facing port (DFP) + // If DFP is present, device is a branch device - use branch offset + // Else device is a sink device - use sink offset + // + if(caps.downStreamPortPresent) + { + address = NV_DPCD_BRANCH_IEEE_OUI; + } + + if (AuxRetry::ack != bus.read(address, &ouiBuffer[0], sizeof ouiBuffer)) + { + *modelName = 0; + ouiId = 0; + chipRevision = 0; + return false; + } + // The first 3 bytes are IEEE_OUI. 2 hex digits per register. + ouiId = ouiBuffer[0] | (ouiBuffer[1] << 8) | (ouiBuffer[2] << 16); + + // Next 6 bytes are Device Identification String, copy as much as we can (limited buffer case). + unsigned int i; + for (i = 0; i < modelNameBufferSize; i++) + modelName[i] = ouiBuffer[3+i]; + + chipRevision = ouiBuffer[9]; + + return true; + } + + virtual bool getSupportsMultistream() // DPCD offset 21h + { + return caps.supportsMultistream && (!caps.overrideToSST); + } + + virtual void setSupportsESI(bool bIsESISupported) + { + caps.supportsESI = bIsESISupported; + } + + // + // Single stream specific caps + // + virtual unsigned getNumberOfAudioEndpoints() // DPCD offset 22h + { + if (caps.numberAudioEndpoints) + return caps.numberAudioEndpoints; + else + return caps.NORP > 1; + } + + virtual bool getGUID(GUID & guid) // DPCD offset 30h + { + NvU8 buffer[DPCD_GUID_SIZE]; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + bus.read(NV_DPCD_GUID, &buffer[0], sizeof(buffer)); + + for (unsigned i = 0; i < DPCD_GUID_SIZE; i++) + { + guid.data[i] = buffer[i]; + } + return true; + } + + virtual AuxRetry::status setGUID(GUID & guid) + { + NvU8 buffer[DPCD_GUID_SIZE]; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + for (unsigned i = 0; i < DPCD_GUID_SIZE; i++) + { + buffer[i] = guid.data[i]; + } + + return bus.write(NV_DPCD_GUID, buffer, sizeof(buffer)); + } + + void parsePortDescriptors() + { + NvU8 basicCaps[128]; + unsigned bytesPerPort = caps.detailedCapInfo ? 4 : 1; + // When Detailed_cap_info_available bit is set to 1, the max number + // of downstream port is limited to 32. Otherwise it supports up to 127 + unsigned maxPorts = caps.detailedCapInfo ? 32 : 127; + unsigned infoByte0; + if (caps.downStreamPortCount > maxPorts) + caps.downStreamPortCount = 1; + unsigned size = (bytesPerPort * caps.downStreamPortCount); + + if (AuxRetry::ack != bus.read(NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT(0), &basicCaps[0], size)) + { + DP_LOG(("DPHAL> Unable to read detailed caps!")); + caps.downStreamPortCount = 0; + return; + } + + if (!((isVersion(1,0)) || + (isVersion(1,1) && basicCaps[0] == 0 && + legacyPort[0].type == ANALOG_VGA))) + { + for (unsigned port = 0; port < caps.downStreamPortCount; port++) + { + // The index to access detailed info byte 0 + infoByte0 = port * bytesPerPort; + switch (DRF_VAL(_DPCD, _DETAILED_CAP_INFO_DWNSTRM_PORT, _TX_TYPE, basicCaps[infoByte0])) + { + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DISPLAYPORT: + { + legacyPort[port].type = DISPLAY_PORT; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_ANALOG: + { + legacyPort[port].type = ANALOG_VGA; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DVI: + { + legacyPort[port].type = DVI; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_HDMI: + { + NvU8 pConCaps = basicCaps[infoByte0+2]; + + legacyPort[port].type = HDMI; + + caps.pconCaps.maxTmdsClkRate = basicCaps[infoByte0+1]; + + caps.pconCaps.bSourceControlModeSupported = + FLD_TEST_DRF(_DPCD, _DETAILED_CAP_INFO, _SRC_CONTROL_MODE_SUPPORT, _YES, pConCaps); + caps.pconCaps.bConcurrentLTSupported = + FLD_TEST_DRF(_DPCD, _DETAILED_CAP_INFO, _CONCURRENT_LT_SUPPORT, _YES, pConCaps); + caps.pconCaps.maxHdmiLinkBandwidthGbps = + DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_FRL_LINK_BW_SUPPORT, pConCaps); + + switch (DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_BITS_PER_COMPONENT_DEF, pConCaps)) + { + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_10BPC: + caps.pconCaps.maxBpc = 10; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_12BPC: + caps.pconCaps.maxBpc = 12; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_16BPC: + caps.pconCaps.maxBpc = 16; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_8BPC: + default: + caps.pconCaps.maxBpc = 8; + break; + } + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_OTHERS_NO_EDID: + { + legacyPort[port].type = WITHOUT_EDID; + switch (DRF_VAL(_DPCD, _DETAILED_CAP_INFO_DWNSTRM_PORT, _NON_EDID_ATTR, basicCaps[infoByte0])) + { + default: + { + DP_ASSERT(0 && "Unknown non-edid type, assume Reserved"); + legacyPort[port].nonEDID = RESERVED; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_480I_60HZ: + { + legacyPort[port].nonEDID = IL_720_480_60HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_480I_50HZ: + { + legacyPort[port].nonEDID = IL_720_480_50HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_1080I_60HZ: + { + legacyPort[port].nonEDID = IL_1920_1080_60HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_1080I_50HZ: + { + legacyPort[port].nonEDID = IL_1920_1080_50HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_720P_60HZ: + { + legacyPort[port].nonEDID = PG_1280_720_60HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_720P_50HZ: + { + legacyPort[port].nonEDID = PG_1280_720_50_HZ; + break; + } + } + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DP_PLUSPLUS: + { + legacyPort[port].type = DISPLAY_PORT_PLUSPLUS; + break; + } + default: + { + DP_ASSERT(0 && "Unknown port type"); + break; + } + } + + // Set the Init value to Zero + legacyPort[port].maxTmdsClkRate = 0; + + if (legacyPort[port].type == DVI || + legacyPort[port].type == HDMI || + legacyPort[port].type == DISPLAY_PORT_PLUSPLUS) + { + legacyPort[port].maxTmdsClkRate = ((NvU64)basicCaps[infoByte0 + 1]) * 2500000; + if (legacyPort[port].maxTmdsClkRate == 0) + { + DP_ASSERT(legacyPort[port].maxTmdsClkRate && "No Max TMDS clock rate limits."); + } + + /* + Bug : 3202060 + Parse Byte 2 as well to check the Dongle supports HDMI FRL Output + If HDMI FRL is supported, the maxTmdsClkRate limit should be removed. + */ + + if (DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_FRL_LINK_BW_SUPPORT, basicCaps[infoByte0 + 2])) + { + // Disable the TMDS CLK Limit + legacyPort[port].maxTmdsClkRate = 0; + } + } + } + } + } + + // + // Notifications of external events + // + virtual void notifyIRQ() + { + parseAndReadInterrupts(); + } + + virtual void populateFakeDpcd() + { + dpcdOffline = true; + // fill out the bare minimum caps required ... this should be extended in for more dpcd offsets in future. + caps.revisionMajor = 0x1; + caps.revisionMinor = 0x1; + caps.supportsESI = false; + caps.maxLinkRate = HBR3; + caps.maxLaneCount = 4; + caps.enhancedFraming = true; + caps.downStreamPortPresent = true; + caps.downStreamPortCount = 1; + + // populate the sinkcount interrupt + interrupts.sinkCount = 1; + } + + // DPCD override routine: Max link rate override. + void overrideMaxLinkRate(NvU32 overrideMaxLinkRate) + { + if (overrideMaxLinkRate) + { + caps.maxLinkRate = mapLinkBandiwdthToLinkrate(overrideMaxLinkRate); + } + } + + // DPCD override routine: Max lane count override. + void overrideMaxLaneCount(NvU32 maxLaneCount) + { + caps.maxLaneCount = maxLaneCount; + overrideDpcdMaxLaneCount = maxLaneCount; + } + + // DPCD override routine: Max lane count override at a given link rate. + void skipCableBWCheck(NvU32 maxLaneAtHighRate, NvU32 maxLaneAtLowRate) + { + caps.maxLanesAtHBR = maxLaneAtHighRate; + caps.maxLanesAtRBR = maxLaneAtLowRate; + } + + // DPCD override routine: Optimal link config (link rate and lane count) override. + void overrideOptimalLinkCfg(LinkRate optimalLinkRate, + NvU32 optimalLaneCount) + { + caps.maxLinkRate = optimalLinkRate; + caps.maxLaneCount = optimalLaneCount; + } + + // DPCD override routine: Optimal link rate + void overrideOptimalLinkRate(LinkRate optimalLinkRate) + { + caps.maxLinkRate = optimalLinkRate; + } + + virtual void notifyHPD(bool status, bool bSkipDPCDRead) + { + if (!status) + { + // check if dpcd is alive + NvU8 buffer; + unsigned retries = 16; + if (AuxRetry::ack == bus.read(NV_DPCD_REV, &buffer, sizeof buffer, retries)) + return; + + // Support for EDID locking: + // Refill the cache with "default" dpcd data on an unplug event as later on + // the client may send a hot-plug event for edid locked fake device (no real dpcd). + // Also raise flag "dpcdOffline" so that dpcd accesses may be optimized. + populateFakeDpcd(); + return; + } + + // Skip DPCD read if requested. + if (!bSkipDPCDRead) + { + parseAndReadCaps(); + } + + // + // For Allienware eDp Panel more time is required to assert the HPD & + // power on the AUX link. Retry 1 more time if it has failed. This is + // a BAD way to do it but no EDID is available to differentiate here + // this is the first access which happens before EDID read to read caps. + // We also found that some LG panels on HP NBs goes in a bad state after + // factory reset. Retyring 3 times works for them. So making faultyRetries as 3. + // + NvU32 faultyRetries = 3; + while ((dpcdOffline) && (faultyRetries > 0)) + { + // Read the caps again + parseAndReadCaps(); + --faultyRetries; + } + + parseAndReadInterrupts(); + } + + virtual bool isPostLtAdjustRequestSupported() + { + // + // If the upstream DPTX and downstream DPRX both support TPS4, + // TPS4 shall be used instead of POST_LT_ADJ_REQ. + // + NvBool bTps4Supported = gpuDP1_4Supported && caps.bSupportsTPS4; + return bGrantsPostLtRequest && !bTps4Supported; + } + + virtual void setPostLtAdjustRequestGranted(bool bGrantPostLtRequest) + { + NvU8 data = 0; + + bus.read(NV_DPCD_LANE_COUNT_SET, &data, sizeof data); + + if (bGrantPostLtRequest) + { + data = FLD_SET_DRF(_DPCD, _LANE_COUNT_SET, _POST_LT_ADJ_REQ_GRANTED, _YES, data); + } + + else + { + data = FLD_SET_DRF(_DPCD, _LANE_COUNT_SET, _POST_LT_ADJ_REQ_GRANTED, _NO, data); + } + + if (AuxRetry::ack != bus.write(NV_DPCD_LANE_COUNT_SET, &data, sizeof data)) + { + DP_LOG(("DPCONN> Failed to set POST_LT_ADJ_REQ_GRANTED bit.")); + } + } + + virtual bool getIsPostLtAdjRequestInProgress() // DPCD offset 204 + { + NvU8 buffer; + + if (AuxRetry::ack != bus.read(NV_DPCD_LANE_ALIGN_STATUS_UPDATED, &buffer, 1)) + { + DP_LOG(("DPCONN> Post Link Training : Failed to read POST_LT_ADJ_REQ_IN_PROGRESS")); + return false; + } + + return FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED, + _POST_LT_ADJ_REQ_IN_PROGRESS, _YES, buffer); + } + + virtual TrainingPatternSelectType getTrainingPatternSelect() + { + NvU8 trainingPat = 0; + TrainingPatternSelectType pattern = TRAINING_DISABLED; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + bus.read(NV_DPCD_TRAINING_PATTERN_SET, &trainingPat, sizeof trainingPat); + + trainingPat = DRF_VAL(_DPCD, _TRAINING_PATTERN_SET, _TPS, trainingPat); + + if (trainingPat == NV_DPCD_TRAINING_PATTERN_SET_TPS_NONE) + pattern = TRAINING_DISABLED; + if (trainingPat == NV_DPCD_TRAINING_PATTERN_SET_TPS_TP1) + pattern = TRAINING_PAT_ONE; + if (trainingPat == NV_DPCD_TRAINING_PATTERN_SET_TPS_TP2) + pattern = TRAINING_PAT_TWO; + if (trainingPat == NV_DPCD_TRAINING_PATTERN_SET_TPS_TP3) + pattern = TRAINING_PAT_THREE; + + return pattern; + } + + virtual bool setTrainingMultiLaneSet(NvU8 numLanes, + NvU8 *voltSwingSet, + NvU8 *preEmphasisSet) + { + NvU8 trainingCtrl[DP_MAX_LANES] = {0}; + unsigned writeAddress = NV_DPCD_TRAINING_LANE_SET(0); + NvU8 laneIndex; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + for (laneIndex = 0; laneIndex < numLanes; laneIndex++) + { + if (voltSwingSet[laneIndex] <= NV_DPCD_MAX_VOLTAGE_SWING) + { + trainingCtrl[laneIndex] = FLD_SET_DRF_NUM(_DPCD, _TRAINING_LANE_SET, + _VOLTAGE_SWING, voltSwingSet[laneIndex], + trainingCtrl[laneIndex]); + } + else + { + DP_ASSERT(0 && "Out of bounds voltage swing. Assuming 0"); + } + + if (voltSwingSet[laneIndex] == NV_DPCD_MAX_VOLTAGE_SWING) + { + trainingCtrl[laneIndex] = FLD_SET_DRF(_DPCD, _TRAINING_LANE_SET, + _VOLTAGE_SWING_MAX_REACHED, + _TRUE, trainingCtrl[laneIndex]); + } + + if (preEmphasisSet[laneIndex] <= NV_DPCD_MAX_VOLTAGE_PREEMPHASIS) + { + trainingCtrl[laneIndex] = FLD_SET_DRF_NUM(_DPCD, _TRAINING_LANE_SET, + _PREEMPHASIS, preEmphasisSet[laneIndex], + trainingCtrl[laneIndex]); + } + else + { + DP_ASSERT(0 && "Out of bounds preemphasis. Assuming 0"); + } + + if (preEmphasisSet[laneIndex] == NV_DPCD_MAX_VOLTAGE_PREEMPHASIS) + { + trainingCtrl[laneIndex] = FLD_SET_DRF(_DPCD, _TRAINING_LANE_SET, + _PREEMPHASIS_MAX_REACHED, _TRUE, + trainingCtrl[laneIndex]); + } + } + + return(AuxRetry::ack == bus.write(writeAddress, trainingCtrl, (unsigned)numLanes)); + } + + virtual AuxRetry::status setIgnoreMSATimingParamters(bool msaTimingParamIgnoreEn) + { + + NvU8 downspreadCtrl = 0; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + bus.read(NV_DPCD_DOWNSPREAD_CTRL, &downspreadCtrl, sizeof downspreadCtrl); + + if (msaTimingParamIgnoreEn) + downspreadCtrl = FLD_SET_DRF(_DPCD, _DOWNSPREAD_CTRL, _MSA_TIMING_PAR_IGNORED, _TRUE, downspreadCtrl); + else + downspreadCtrl = FLD_SET_DRF(_DPCD, _DOWNSPREAD_CTRL, _MSA_TIMING_PAR_IGNORED, _FALSE, downspreadCtrl); + + return bus.write(NV_DPCD_DOWNSPREAD_CTRL, &downspreadCtrl, sizeof downspreadCtrl); + } + + virtual AuxRetry::status setLinkQualPatternSet(LinkQualityPatternType linkQualPattern, unsigned laneCount) + { + if (caps.revisionMajor <= 0) + { + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + return AuxRetry::nack; + } + + if (this->isVersion(1, 1) == true) + { + NvU8 buffer = 0; + if (AuxRetry::ack != bus.read(NV_DPCD_TRAINING_PATTERN_SET, &buffer, 1)) + { + DP_ASSERT(0 && "Can't read from NV_DPCD_TRAINING_PATTERN_SET."); + return AuxRetry::nack; + } + + // write on bits 3:2 + NvU8 value = ((linkQualPattern << 2) & 0xc) | (buffer & (~0xc)); + return bus.write(NV_DPCD_TRAINING_PATTERN_SET, &value, sizeof value); + } + else if (isAtLeastVersion(1,2) == true) + { + AuxRetry::status requestStatus = AuxRetry::nack ; + + // Set test patterns for all requested lanes + for (unsigned i = 0; i < laneCount; i++) + { + requestStatus = setLinkQualLaneSet(i, linkQualPattern); + if (requestStatus != AuxRetry::ack) + break; + } + + return requestStatus; + } + else + { + DP_ASSERT(0 && "Regs only supported for DP1.2"); + return AuxRetry::unsupportedRegister; + } + } + + virtual AuxRetry::status setLinkQualLaneSet(unsigned lane, LinkQualityPatternType linkQualPattern) + { + NvU8 linkQuality = 0; + unsigned writeAddress = NV_DPCD_LINK_QUAL_LANE_SET(lane); + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (isAtLeastVersion(1,2) == false) + { + DP_ASSERT(0 && "Regs only supported for DP1.2"); + return AuxRetry::unsupportedRegister; + } + + // check if parameter is valid + if (lane >= displayPort_LaneSupported) + { + DP_ASSERT(0 && "Unknown lane selected. Assuming Lane 0"); + writeAddress = NV_DPCD_LINK_QUAL_LANE_SET(0); + } + + if (linkQualPattern == LINK_QUAL_DISABLED) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _NO, linkQuality); + if (linkQualPattern == LINK_QUAL_D10_2) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _D10_2, linkQuality); + if (linkQualPattern == LINK_QUAL_SYM_ERROR) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _SYM_ERR_MEASUREMENT_CNT, linkQuality); + if (linkQualPattern == LINK_QUAL_PRBS7) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _PRBS7, linkQuality); + if (linkQualPattern == LINK_QUAL_80BIT_CUST) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _80_BIT_CUSTOM, linkQuality); + if (linkQualPattern == LINK_QUAL_HBR2_COMPLIANCE_EYE) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _HBR2, linkQuality); + if (linkQualPattern == LINK_QUAL_CP2520PAT3) + linkQuality = FLD_SET_DRF(_DPCD14, _LINK_QUAL_LANE_SET, _LQS, _CP2520PAT3, linkQuality); + + return bus.write(writeAddress, &linkQuality, sizeof linkQuality); + } + + virtual AuxRetry::status setMessagingEnable(bool _uprequestEnable, bool _upstreamIsSource) + { + NvU8 mstmCtrl = 0; + + if (!this->isAtLeastVersion(1, 2)) + { + DP_ASSERT(!_uprequestEnable && "Can't enable multistream on DP 1.1"); + return AuxRetry::nack; + } + + uprequestEnable = _uprequestEnable; + upstreamIsSource = _upstreamIsSource; + + // + // Lets not touch the MST enable bit here. + // Branch might be getting driven in MST mode and we do not want to + // change that unless we are sure there are no more streams being driven. + // + if (AuxRetry::ack != bus.read(NV_DPCD_MSTM_CTRL, &mstmCtrl, 1)) + { + DP_LOG(("DPHAL> ERROR! Unable to read 00111h MSTM_CTRL.")); + } + + if (_uprequestEnable) + { + bMultistream = FLD_TEST_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, mstmCtrl); + } + else + { + bMultistream = false; + } + mstmCtrl = 0; + if (bMultistream) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, mstmCtrl); + if (uprequestEnable) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UP_REQ_EN, _YES, mstmCtrl); + if (upstreamIsSource) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UPSTREAM_IS_SRC, _YES, mstmCtrl); + + return bus.write(NV_DPCD_MSTM_CTRL, &mstmCtrl, sizeof mstmCtrl); + } + + virtual AuxRetry::status setMultistreamLink(bool enable) + { + NvU8 mstmCtrl = 0; + + if (!this->isAtLeastVersion(1, 2)) + { + DP_ASSERT(!enable && "Can't enable multistream on DP 1.1"); + return AuxRetry::nack; + } + + bMultistream = enable; + + if (bMultistream) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, mstmCtrl); + if (uprequestEnable) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UP_REQ_EN, _YES, mstmCtrl); + if (upstreamIsSource) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UPSTREAM_IS_SRC, _YES, mstmCtrl); + + return bus.write(NV_DPCD_MSTM_CTRL, &mstmCtrl, sizeof mstmCtrl); + } + + virtual AuxRetry::status setMultistreamHotplugMode(MultistreamHotplugMode notifyType) + { + NvU8 deviceCtrl = 0; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + // notifytype == HPD_LONG_PULSE, adapter 0 + if (notifyType == IRQ_HPD) + deviceCtrl = FLD_SET_DRF(_DPCD, _BRANCH_DEV_CTRL, _HOTPLUG_EVENT_TYPE, _IRQ_HPD, deviceCtrl); + + return bus.write(NV_DPCD_BRANCH_DEV_CTRL, &deviceCtrl, sizeof deviceCtrl); + } + + + bool parseTestRequestTraining(NvU8 * buffer /* 0x18-0x28 valid */) + { + if (buffer[1] == 0x6) + interrupts.testTraining.testRequestLinkRate = RBR; + else if (buffer[1] == 0xa) + interrupts.testTraining.testRequestLinkRate = HBR; + else if (buffer[1] == 0x14) + interrupts.testTraining.testRequestLinkRate = HBR2; + else if (buffer[1] == 0x1E) + interrupts.testTraining.testRequestLinkRate = HBR3; + else + { + DP_ASSERT(0 && "Unknown max link rate. Assuming RBR"); + interrupts.testTraining.testRequestLinkRate = RBR; + } + + interrupts.testTraining.testRequestLaneCount = buffer[(0x220 - 0x218)] & 0xf; + + return true; + } + + void parseAutomatedTestRequest(bool testRequestPending) + { + NvU8 buffer[16]; + + interrupts.automatedTestRequest = false; + interrupts.testEdid.testRequestEdidRead = false; + interrupts.testTraining.testRequestTraining = false; + interrupts.testPhyCompliance.testRequestPhyCompliance = false; + + if (!testRequestPending) + { + return; + } + interrupts.automatedTestRequest = true; + + if (AuxRetry::ack != bus.read(NV_DPCD_TEST_REQUEST, &buffer[0], 16)) + { + DP_LOG(("DPHAL> ERROR! Automated test request found. Unable to read 0x218 register.")); + return; + } + + if (FLD_TEST_DRF(_DPCD, _TEST_REQUEST, _TEST_LINK_TRAINING, _YES, buffer[0])) + { + interrupts.testTraining.testRequestTraining = parseTestRequestTraining(&buffer[0]); + } + + if (FLD_TEST_DRF(_DPCD, _TEST_REQUEST, _TEST_EDID_READ, _YES, buffer[0])) + { + interrupts.testEdid.testRequestEdidRead = true; + } + + if (FLD_TEST_DRF(_DPCD, _TEST_REQUEST, _TEST_PHY_TEST_PATTERN, _YES, buffer[0])) + { + interrupts.testPhyCompliance.testRequestPhyCompliance = parseTestRequestPhy(); + } + } + + virtual bool parseTestRequestPhy() + { + NvU8 buffer = 0; + NvU8 bits = 0; + if (AuxRetry::ack != bus.read(NV_DPCD_PHY_TEST_PATTERN, &buffer, 1)) + { + DP_LOG(("DPHAL> ERROR! Test pattern request found but unable to read NV_DPCD_PHY_TEST_PATTERN register.")); + return false; + } + + if (isVersion(1,0)) + bits = 0; + else + bits = DRF_VAL(_DPCD, _PHY_TEST_PATTERN_SEL, _DP12, buffer); + + if (bits == NV_DPCD_PHY_TEST_PATTERN_SEL_NO) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_DISABLED; + else if (bits == NV_DPCD_PHY_TEST_PATTERN_SEL_D10_2) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_D10_2; + else if (bits == NV_DPCD_PHY_TEST_PATTERN_SEL_SYM_ERR_MEASUREMENT_CNT) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_SYM_ERROR; + else if (bits == NV_DPCD_LINK_QUAL_LANE_SET_LQS_PRBS7) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_PRBS7; + else if (bits == NV_DPCD_LINK_QUAL_LANE_SET_LQS_80_BIT_CUSTOM) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_80BIT_CUST; + else if (bits == NV_DPCD_LINK_QUAL_LANE_SET_LQS_HBR2) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_HBR2_COMPLIANCE_EYE; + else if (bits == NV_DPCD14_PHY_TEST_PATTERN_SEL_CP2520PAT3) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_CP2520PAT3; + else + { + DP_ASSERT(0 && "Unknown pattern type, assuming none"); + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_DISABLED; + return false; + } + + if (interrupts.testPhyCompliance.phyTestPattern == LINK_QUAL_80BIT_CUST) + { + NvU8 buffer[NV_DPCD_TEST_80BIT_CUSTOM_PATTERN__SIZE] = {0}; + if (AuxRetry::ack != bus.read(NV_DPCD_TEST_80BIT_CUSTOM_PATTERN(0), &buffer[0], + NV_DPCD_TEST_80BIT_CUSTOM_PATTERN__SIZE)) + { + DP_LOG(("DPHAL> ERROR! Request for 80 bit custom pattern. Can't read from 250h.")); + return false; + } + + for (unsigned i = 0; i < NV_DPCD_TEST_80BIT_CUSTOM_PATTERN__SIZE; i++) + { + interrupts.eightyBitCustomPat[i] = buffer[i]; + } + } + + return true; + } + + virtual bool interruptCapabilitiesChanged() + { + return interrupts.rxCapChanged; + } + + virtual void clearInterruptCapabilitiesChanged() + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _RX_CAP_CHANGED, _YES, irqVector); + bus.write(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + + virtual bool getLinkStatusChanged() + { + return interrupts.linkStatusChanged; + } + + virtual void clearLinkStatusChanged() + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _LINK_STATUS_CHANGED, _YES, irqVector); + bus.write(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + + virtual bool getHdmiLinkStatusChanged() + { + return interrupts.hdmiLinkStatusChanged; + } + + virtual void clearHdmiLinkStatusChanged() + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _HDMI_LINK_STATUS_CHANGED, _YES, irqVector); + bus.write(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + + virtual bool getStreamStatusChanged() + { + return interrupts.streamStatusChanged; + } + + virtual void clearStreamStatusChanged() + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _STREAM_STATUS_CHANGED, _YES, irqVector); + bus.write(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + + virtual bool isLinkStatusValid(unsigned lanes) + { + bool linkStatus = true; + + this->setDirtyLinkStatus(true); + this->refreshLinkStatus(); + + for (unsigned lane = 0; lane < lanes ; lane++) + { + linkStatus = linkStatus && interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone && + interrupts.laneStatusIntr.laneStatus[lane].channelEqualizationDone && + interrupts.laneStatusIntr.laneStatus[lane].symbolLocked; + } + + linkStatus = linkStatus && interrupts.laneStatusIntr.interlaneAlignDone; + + return linkStatus; + } + + virtual void refreshLinkStatus() + { + if (interrupts.laneStatusIntr.linkStatusDirtied) + { + if (caps.supportsESI && + (caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4) && + (caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4A)) + { + this->fetchLinkStatusESI(); + } + else + { + this->fetchLinkStatusLegacy(); + } + } + } + + virtual void setDirtyLinkStatus(bool dirty) + { + interrupts.laneStatusIntr.linkStatusDirtied = dirty; + } + + void parseAndReadInterruptsESI() + { + NvU8 buffer[16] = {0}; + bool automatedTestRequest; + + if (AuxRetry::ack != bus.read(NV_DPCD_SINK_COUNT_ESI, &buffer[2], 0x2005 - 0x2002 + 1)) + return; + + interrupts.sinkCount = DRF_VAL(_DPCD, _SINK_COUNT_ESI, _SINK_COUNT, buffer[2]); + + // check if edp revision is v1.4 or v1.4a + if ((caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4) && (caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4A)) + { + automatedTestRequest = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _AUTO_TEST, _YES, buffer[3]); + } + else + { + // if edp rev is v1.4 or v1.4a, then use legacy address for auto test. + NvU8 legacy = 0; + if (AuxRetry::ack != bus.read(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &legacy, 1)) + return; + automatedTestRequest = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _AUTO_TEST, _YES, legacy); + } + + interrupts.cpIRQ = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _CP, _YES, buffer[3]); + interrupts.mccsIRQ = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _MCCS_IRQ, _YES, buffer[3]); + interrupts.downRepMsgRdy = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _DOWN_REP_MSG_RDY, _YES, buffer[3]); + interrupts.upReqMsgRdy = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _UP_REQ_MSG_RDY, _YES, buffer[3]); + + interrupts.rxCapChanged = FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _RX_CAP_CHANGED, _YES, buffer[5]); + interrupts.linkStatusChanged = FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _LINK_STATUS_CHANGED, _YES, buffer[5]); + interrupts.streamStatusChanged = FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _STREAM_STATUS_CHANGED, _YES, buffer[5]); + interrupts.hdmiLinkStatusChanged = FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _HDMI_LINK_STATUS_CHANGED, _YES, buffer[5]); + // + // Link status changed bit is not necessarily set at all times when the sink + // loses the lane status. Refresh the lane status in any case on an IRQ + // + if ((caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4) && + (caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4A)) + { + fetchLinkStatusESI(); + } + else + { + fetchLinkStatusLegacy(); + } + + if (interrupts.linkStatusChanged) + { + this->clearLinkStatusChanged(); + } + + if (interrupts.rxCapChanged) + { + + DP_LOG(("DPHAL> RX Capabilities have changed!")); + parseAndReadCaps(); + this->clearInterruptCapabilitiesChanged(); + } + + if (interrupts.hdmiLinkStatusChanged) + { + this->clearHdmiLinkStatusChanged(); + } + + parseAutomatedTestRequest(automatedTestRequest); + } + + void readLTTPRLinkStatus(NvS32 rxIndex, NvU8 *buffer) + { + int addrLane01Status; + // LINK_STATUS for LTTPR is 3 bytes. (NV_DPCD14_PHY_REPEATER_START(i) + 0x20 ~ 0x22) + int bytesToRead = 3; + + DP_ASSERT((rxIndex > 0 && rxIndex <= 8) && "Invalid rxIndex"); + // + // NV_DPCD14_PHY_REPEATER_START is 0-based. + // rxIndex is 1-based. + // + addrLane01Status = NV_DPCD14_PHY_REPEATER_START(rxIndex - 1) + + NV_DPCD14_LANE0_1_STATUS_PHY_REPEATER; + bus.read(addrLane01Status, buffer, bytesToRead); + } + + void resetIntrLaneStatus() + { + // + // Reset all laneStatus to true. + // These bits can only set to true when all DPRX (including sink and LTTPRs) set + // the corresponding bit to true. Set to true as init value, and later will do &= + // through all the lanes. + // + for (int lane = 0; lane < 4; lane++) + { + interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone = true; + interrupts.laneStatusIntr.laneStatus[lane].channelEqualizationDone = true; + interrupts.laneStatusIntr.laneStatus[lane].symbolLocked = true; + } + interrupts.laneStatusIntr.interlaneAlignDone = true; + interrupts.laneStatusIntr.downstmPortChng = true; + interrupts.laneStatusIntr.linkStatusUpdated = true; + } + + void fetchLinkStatusESI() + { + NvU8 buffer[16] = {0}; + NvS32 rxIndex; + + // LINK_STATUS_ESI from 0x200C to 0x200E + int bytesToRead = 3; + + // Reset all laneStatus to true. + resetIntrLaneStatus(); + + for (rxIndex = caps.phyRepeaterCount; rxIndex >= (NvS32) NV0073_CTRL_DP_DATA_TARGET_SINK; rxIndex--) + { + if (rxIndex != NV0073_CTRL_DP_DATA_TARGET_SINK) + { + readLTTPRLinkStatus(rxIndex, &buffer[0xC]); + } + else + { + bus.read(NV_DPCD_LANE0_1_STATUS_ESI, &buffer[0xC], bytesToRead); + } + + for (int lane = 0; lane < 4; lane++) + { + unsigned laneBits = buffer[0xC+lane/2] >> (4*(lane & 1)); + interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone &= !!(laneBits & 1); + interrupts.laneStatusIntr.laneStatus[lane].channelEqualizationDone &= !!(laneBits & 2); + interrupts.laneStatusIntr.laneStatus[lane].symbolLocked &= !!(laneBits & 4); + } + + interrupts.laneStatusIntr.interlaneAlignDone &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED_ESI, _INTERLANE_ALIGN_DONE, _YES, buffer[0xE]); + interrupts.laneStatusIntr.downstmPortChng &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED_ESI, _DOWNSTRM_PORT_STATUS_DONE, _YES, buffer[0xE]); + interrupts.laneStatusIntr.linkStatusUpdated &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED_ESI, _LINK_STATUS_UPDATED, _YES, buffer[0xE]); + } + this->setDirtyLinkStatus(false); + } + + void fetchLinkStatusLegacy() + { + NvU8 buffer[16] = {0}; + NvS32 rxIndex; + // LINK_STATUS from 0x202 to 0x204 + int bytesToRead = 3; + + // Reset all laneStatus to true. + resetIntrLaneStatus(); + + for (rxIndex = caps.phyRepeaterCount; rxIndex >= (NvS32) NV0073_CTRL_DP_DATA_TARGET_SINK; rxIndex--) + { + if (rxIndex != NV0073_CTRL_DP_DATA_TARGET_SINK) + { + readLTTPRLinkStatus(rxIndex, &buffer[2]); + } + else + { + bus.read(NV_DPCD_LANE0_1_STATUS, &buffer[2], bytesToRead); + } + + for (int lane = 0; lane < 4; lane++) + { + unsigned laneBits = buffer[2+lane/2] >> (4*(lane & 1)); + interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone &= !!(laneBits & 1); + interrupts.laneStatusIntr.laneStatus[lane].channelEqualizationDone &= !!(laneBits & 2); + interrupts.laneStatusIntr.laneStatus[lane].symbolLocked &= !!(laneBits & 4); + } + + interrupts.laneStatusIntr.interlaneAlignDone &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED, _INTERLANE_ALIGN_DONE, _YES, buffer[4]); + interrupts.laneStatusIntr.downstmPortChng &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED, _D0WNSTRM_PORT_STATUS_DONE, _YES, buffer[4]); + interrupts.laneStatusIntr.linkStatusUpdated &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED, _LINK_STATUS_UPDATED, _YES, buffer[4]); + } + this->setDirtyLinkStatus(false); + } + + virtual bool readTraining(NvU8* voltageSwingLane, NvU8* preemphasisLane, + NvU8* trainingScoreLane, NvU8* postCursor, + NvU8 activeLaneCount) + { + NvU8 buffer[0xd] = {0}; + if (voltageSwingLane && preemphasisLane) + { + if (AuxRetry::ack != bus.read(NV_DPCD_LANE0_1_ADJUST_REQ, &buffer[0x6], 2)) + { + DP_ASSERT(0 && "Can't read NV_DPCD_LANE0_1_ADJUST_REQ."); + return false; + } + voltageSwingLane[0] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEX_DRIVE_CURRENT, buffer[6]); + voltageSwingLane[1] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEXPLUS1_DRIVE_CURRENT, buffer[6]); + voltageSwingLane[2] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEX_DRIVE_CURRENT, buffer[7]); + voltageSwingLane[3] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEXPLUS1_DRIVE_CURRENT, buffer[7]); + + preemphasisLane[0] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEX_PREEMPHASIS, buffer[6]); + preemphasisLane[1] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEXPLUS1_PREEMPHASIS, buffer[6]); + preemphasisLane[2] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEX_PREEMPHASIS, buffer[7]); + preemphasisLane[3] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEXPLUS1_PREEMPHASIS, buffer[7]); + + } + if (trainingScoreLane) + { + if (AuxRetry::ack != bus.read(NV_DPCD_TRAINING_SCORE_LANE(0), &buffer[0x8], 4)) + { + DP_ASSERT(0 && "Can't read NV_DPCD_TRAINING_SCORE_LANE(0)."); + return false; + } + trainingScoreLane[0] = buffer[0x8]; + trainingScoreLane[1] = buffer[0x9]; + trainingScoreLane[2] = buffer[0xa]; + trainingScoreLane[3] = buffer[0xb]; + } + if (postCursor) + { + if (AuxRetry::ack != bus.read(NV_DPCD_ADJUST_REQ_POST_CURSOR2, &buffer[0xc], 1)) + { + DP_ASSERT(0 && "Can't read NV_DPCD_ADJUST_REQ_POST_CURSOR2."); + return false; + } + postCursor[0] = DRF_IDX_VAL(_DPCD, _ADJUST_REQ_POST_CURSOR2, _LANE, 0, buffer[0xc]); + postCursor[1] = DRF_IDX_VAL(_DPCD, _ADJUST_REQ_POST_CURSOR2, _LANE, 1, buffer[0xc]); + postCursor[2] = DRF_IDX_VAL(_DPCD, _ADJUST_REQ_POST_CURSOR2, _LANE, 2, buffer[0xc]); + postCursor[3] = DRF_IDX_VAL(_DPCD, _ADJUST_REQ_POST_CURSOR2, _LANE, 3, buffer[0xc]); + } + return true; + } + + virtual bool isLaneSettingsChanged(NvU8* oldVoltageSwingLane, + NvU8* newVoltageSwingLane, + NvU8* oldPreemphasisLane, + NvU8* newPreemphasisLane, + NvU8 activeLaneCount) + { + for (unsigned i = 0; i < activeLaneCount; i++) + { + if (oldVoltageSwingLane[i] != newVoltageSwingLane[i] || + oldPreemphasisLane[i] != newPreemphasisLane[i] ) + { + return true; + } + } + return false; + } + + void parseAndReadInterruptsLegacy() + { + bool automatedTestRequest = false; + NvU8 buffer[16] = {0}; + + if (AuxRetry::ack != bus.read(NV_DPCD_SINK_COUNT, &buffer[0], 2)) + return; + + interrupts.sinkCount = NV_DPCD_SINK_COUNT_VAL(buffer[0]); + + automatedTestRequest = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _AUTO_TEST, _YES, buffer[1]); + interrupts.cpIRQ = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _CP, _YES, buffer[1]); + interrupts.mccsIRQ = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _MCCS_IRQ, _YES, buffer[1]); + interrupts.downRepMsgRdy = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _DOWN_REP_MSG_RDY, _YES, buffer[1]); + interrupts.upReqMsgRdy = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _UP_REQ_MSG_RDY, _YES, buffer[1]); + + fetchLinkStatusLegacy(); + this->setDirtyLinkStatus(false); + + parseAutomatedTestRequest(automatedTestRequest); + } + + void parseAndReadInterrupts() + { + if (caps.supportsESI) + parseAndReadInterruptsESI(); // DP 1.2 should use the new ESI region + else + parseAndReadInterruptsLegacy(); + + } + + virtual int getSinkCount() // DPCD offset 200 + { + return interrupts.sinkCount; + } + + // + // This was introduced as part of WAR for HP SDC Panel since their + // TCON sets DPCD 0x200 SINK_COUNT=0. It should never be called to + // set the SinkCount in other cases since SinkCount comes from DPCD. + // + virtual void setSinkCount(int sinkCount) + { + interrupts.sinkCount = sinkCount; + } + + virtual bool interruptContentProtection() + { + return interrupts.cpIRQ; + } + + virtual void clearInterruptContentProtection() + { + if (caps.supportsESI) + { + NvU8 irqVector = 0; + + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _CP, _YES, irqVector); + + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + else + { + NvU8 irqVector = 0; + + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _CP, _YES, irqVector); + + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &irqVector, sizeof irqVector); + } + } + + virtual bool intteruptMCCS() + { + return interrupts.mccsIRQ; + } + + virtual void clearInterruptMCCS() + { + if (caps.supportsESI) + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _MCCS_IRQ, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + else + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _MCCS_IRQ, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &irqVector, sizeof irqVector); + } + } + + virtual bool interruptDownReplyReady() + { + return interrupts.downRepMsgRdy; + } + + virtual bool interruptUpRequestReady() + { + return interrupts.upReqMsgRdy; + } + + virtual void clearInterruptDownReplyReady() + { + if (caps.supportsESI) + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _DOWN_REP_MSG_RDY, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + else + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _DOWN_REP_MSG_RDY, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &irqVector, sizeof irqVector); + } + } + + virtual void clearInterruptUpRequestReady() + { + if (caps.supportsESI) + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _UP_REQ_MSG_RDY, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + else + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _UP_REQ_MSG_RDY, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &irqVector, sizeof irqVector); + } + } + + virtual bool getLaneStatusSymbolLock(int lane) + { + return interrupts.laneStatusIntr.laneStatus[lane].symbolLocked; + } + + virtual bool getLaneStatusClockRecoveryDone(int lane) + { + return interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone; + } + + virtual bool getInterlaneAlignDone() // DPCD offset 204 + { + return interrupts.laneStatusIntr.interlaneAlignDone; + } + + virtual bool getDownStreamPortStatusChange() + { + return interrupts.laneStatusIntr.downstmPortChng; + } + + virtual bool getPendingTestRequestTraining() // DPCD offset 218 + { + return interrupts.testTraining.testRequestTraining; + } + + virtual bool getPendingAutomatedTestRequest() + { + return interrupts.automatedTestRequest; + } + + virtual bool getPendingTestRequestEdidRead() + { + return interrupts.testEdid.testRequestEdidRead; + } + + virtual bool getPendingTestRequestPhyCompliance() + { + return interrupts.testPhyCompliance.testRequestPhyCompliance; + } + + virtual void getTestRequestTraining(LinkRate & rate, unsigned & lanes) // DPCD offset 219, 220 + { + rate = interrupts.testTraining.testRequestLinkRate; + lanes = interrupts.testTraining.testRequestLaneCount; + } + + virtual LinkQualityPatternType getPhyTestPattern() // DPCD offset 248 + { + return interrupts.testPhyCompliance.phyTestPattern; + } + + virtual void getCustomTestPattern(NvU8 *testPattern) // DPCD offset 250 - 259 + { + int i; + + for (i = 0; i < 10; i++) + { + testPattern[i] = interrupts.eightyBitCustomPat[i]; + } + } + + virtual bool getBKSV(NvU8 *bKSV) //DPCD offset 0x68000 + { + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (AuxRetry::ack == bus.read(NV_DPCD_HDCP_BKSV_OFFSET, &bKSV[0], HDCP_KSV_SIZE)) + { + DP_LOG(("Found HDCP Bksv= %02x %02x %02x %02x %02x", + bKSV[4], bKSV[3], bKSV[2], bKSV[1], bKSV[0])); + return true; + } + return false; + } + + virtual bool getBCaps(BCaps &bCaps, NvU8 * rawByte) //DPCD offset 0x68028 + { + NvU8 buffer; + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (AuxRetry::ack == bus.read(NV_DPCD_HDCP_BCAPS_OFFSET, &buffer, sizeof buffer)) + { + bCaps.HDCPCapable = FLD_TEST_DRF(_DPCD, _HDCP_BCAPS_OFFSET, _HDCP_CAPABLE, _YES, buffer); + bCaps.repeater = FLD_TEST_DRF(_DPCD, _HDCP_BCAPS_OFFSET, _HDCP_REPEATER, _YES, buffer); + if (rawByte) + *rawByte = buffer; + return true; + } + + DP_ASSERT(!"Unable to get BCaps"); + return false; + } + + virtual bool getHdcp22BCaps(BCaps &bCaps, NvU8 *rawByte) //DPCD offset 0x6921D + { + NvU8 buffer; + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (AuxRetry::ack == bus.read(NV_DPCD_HDCP22_BCAPS_OFFSET, &buffer, sizeof buffer)) + { + bCaps.HDCPCapable = FLD_TEST_DRF(_DPCD, _HDCP22_BCAPS_OFFSET, _HDCP_CAPABLE, _YES, buffer); + bCaps.repeater = FLD_TEST_DRF(_DPCD, _HDCP22_BCAPS_OFFSET, _HDCP_REPEATER, _YES, buffer); + if (rawByte) + *rawByte = buffer; + return true; + } + + DP_ASSERT(!"Unable to get 22BCaps"); + return false; + } + + virtual bool getBinfo(BInfo &bInfo) //DPCD offset 0x6802A + { + NvU16 buffer; + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (AuxRetry::ack == bus.read(NV_DPCD_HDCP_BINFO_OFFSET, (NvU8*)&buffer, sizeof buffer)) + { + bInfo.maxCascadeExceeded = FLD_TEST_DRF(_DPCD_HDCP, _BINFO_OFFSET, _MAX_CASCADE_EXCEEDED, _TRUE, buffer); + bInfo.depth = DRF_VAL(_DPCD_HDCP, _BINFO_OFFSET, _DEPTH, buffer); + bInfo.maxDevsExceeded = FLD_TEST_DRF(_DPCD_HDCP, _BINFO_OFFSET, _MAX_DEVS_EXCEEDED, _TRUE, buffer); + bInfo.deviceCount = DRF_VAL(_DPCD_HDCP, _BINFO_OFFSET, _DEVICE_COUNT, buffer); + return true; + } + + DP_ASSERT(!"Unable to get Binfo"); + return false; + } + + // Get RxStatus per provided HDCP cap + virtual bool getRxStatus(const HDCPState &hdcpState, NvU8 *data) + { + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + NvU32 addr = hdcpState.HDCP_State_22_Capable ? + NV_DPCD_HDCP22_RX_STATUS : NV_DPCD_HDCP_BSTATUS_OFFSET; + + if (AuxRetry::ack == bus.read(addr, data, sizeof(NvU8))) + { + return true; + } + + DP_ASSERT(!"Unable to get RxStatus//Bstatus"); + return false; + } + + virtual AuxRetry::status setTestResponseChecksum(NvU8 checksum) + { + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + return bus.write(NV_DPCD_TEST_EDID_CHKSUM, &checksum, sizeof checksum); + } + + virtual AuxRetry::status setTestResponse(bool ack, bool edidChecksumWrite) + { + NvU8 testResponse = 0; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (ack) + testResponse = FLD_SET_DRF(_DPCD, _TEST_RESPONSE, _TEST_ACK, _YES, testResponse); + else + testResponse = FLD_SET_DRF(_DPCD, _TEST_RESPONSE, _TEST_NACK, _YES, testResponse); + + if (edidChecksumWrite) + testResponse = FLD_SET_DRF(_DPCD, _TEST_RESPONSE, _TEST_EDID_CHKSUM_WRITE, _YES, testResponse); + + return bus.write(NV_DPCD_TEST_RESPONSE, &testResponse, sizeof testResponse); + } + + // Message box encoding + virtual AuxRetry::status writeDownRequestMessageBox(NvU8 * data, size_t length) + { + // + // We can assume no message was sent if this fails. + // Reasoning: + // Sinks are not allowed to DEFER except on the first 16 byte write. + // If there isn't enough room for the 48 byte packet, that write + // will defer. + // + return bus.write(NV_DPCD_MBOX_DOWN_REQ, data, (unsigned)length); + } + + virtual size_t getDownRequestMessageBoxSize() + { + return DP_MESSAGEBOX_SIZE; + } + + virtual AuxRetry::status writeUpReplyMessageBox(NvU8 * data, size_t length) + { + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + // + // We can assume no message was sent if this fails. + // Reasoning: + // Sinks are not allowed to DEFER except on the first 16 byte write. + // If there isn't enough room for the 48 byte packet, that write + // will defer. + // + return bus.write(NV_DPCD_MBOX_UP_REP, data, (unsigned)length); + } + + virtual size_t getUpReplyMessageBoxSize() + { + return 48; + } + + virtual AuxRetry::status readDownReplyMessageBox(NvU32 offset, NvU8 * data, size_t length) + { + // if (caps.revisionMajor <= 0) + // DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + DP_ASSERT(offset + length <= DP_MESSAGEBOX_SIZE); + + return bus.read(NV_DPCD_MBOX_DOWN_REP + offset, data, (unsigned)length); + } + + virtual size_t getDownReplyMessageBoxSize() + { + return DP_MESSAGEBOX_SIZE; + } + + virtual AuxRetry::status readUpRequestMessageBox(NvU32 offset, NvU8 * data, size_t length) + { + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + DP_ASSERT(offset + length <= DP_MESSAGEBOX_SIZE); + + return bus.read(NV_DPCD_MBOX_UP_REQ + offset, data, (unsigned)length); + } + + virtual size_t getUpRequestMessageBoxSize() + { + return DP_MESSAGEBOX_SIZE; + } + + virtual size_t getTransactionSize() + { + return bus.getDirect()->transactionSize(); + } + + virtual PowerState getPowerState() + { + NvU8 data; + if (AuxRetry::ack != bus.read(NV_DPCD_SET_POWER, &data, sizeof data, 0)) + { + // Assume powerdown state + return PowerStateD3; + } + + switch (DRF_VAL(_DPCD, _SET_POWER, _VAL, data)) + { + case NV_DPCD_SET_POWER_VAL_D3_PWRDWN: + return PowerStateD3; + + case NV_DPCD_SET_POWER_VAL_D0_NORMAL: + return PowerStateD0; + + case NV_DPCD_SET_POWER_VAL_D3_AUX_ON: + { + DP_ASSERT(isAtLeastVersion(1, 2) && "DP 1.2 specific power state to be set on a non-DP1.2 system!?"); + return PowerStateD3AuxOn; + } + default: + DP_ASSERT(0 && "Unknown power state! Assuming device is asleep"); + return PowerStateD3; + } + } + + virtual bool setPowerState(PowerState newState) + { + NvU8 timeoutMs = 0; + + if (newState == PowerStateD0) + timeoutMs = caps.extendedSleepWakeTimeoutRequestMs; + + // Default behavior is 2ms for better tolerance. + if (timeoutMs < 2) + timeoutMs = 2; + + // + // A Branch Device must forward this value to its downstream devices. + // When set to D3 state, a Sink Device may put its AUX CH circuit in a "power + // saving" state. In this mode the AUX CH circuit may only detect the presence of a + // differential signal input without replying to an AUX CH request transaction. Upon + // detecting the presence of a differential signal input, the Sink Device must exit the + // "power saving" state within 1ms. + // + if (isAtLeastVersion(1, 1)) + { + NvU8 data = 0; + if (newState == PowerStateD0) + data |= NV_DPCD_SET_POWER_VAL_D0_NORMAL; + else if (newState == PowerStateD3) + { + if (caps.extendedSleepWakeTimeoutRequestMs > 1) + { + NvU8 grant = 0; + // Grant extended sleep wake timeout before go D3. + grant = FLD_SET_DRF(_DPCD, _EXTENDED_DPRX_WAKE_TIMEOUT, _PERIOD_GRANTED, _YES, grant); + if (AuxRetry::ack == bus.write(NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT, &grant, sizeof(grant))) + { + DP_LOG(("DisplayPort: Failed to grant extended sleep wake timeout before D3\n")); + } + } + data = NV_DPCD_SET_POWER_VAL_D3_PWRDWN; + } + else + { + DP_ASSERT(0 && "Unknown power state"); + } + + // + // If we're powering on, we need to allow up to 1ms for the power + // to come online. Ideally we'd handle this with a callback, + // but for now we're going to do a wait here. + // + Timeout timeout(timer, timeoutMs); + unsigned retries = 0; + + do + { + if (AuxRetry::ack == bus.write(NV_DPCD_SET_POWER, &data, sizeof(data))) + { + return true; + } + retries++; + } + while (timeout.valid() || (retries < 40) /* some panels need up to 40 retries */); + + DP_LOG(("DisplayPort: Failed to bring panel back to wake state")); + } + else + { + // DP 1.0 devices cannot be put to sleep + if (newState == PowerStateD0) + return true; + } + + return false; + } + + virtual void payloadTableClearACT() + { + NvU8 byte = NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_UPDATED_YES; + bus.write(NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS, &byte, sizeof byte); + } + + virtual bool payloadWaitForACTReceived() + { + NvU8 byte = 0; + int retries = 0; + + while (true) + { + if (++retries > 40) + { + DP_LOG(("DPHAL> ACT Not received by sink device!")); + return false; + } + + if (AuxRetry::ack == bus.read(NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS, &byte, sizeof byte)) + { + if (FLD_TEST_DRF(_DPCD, _PAYLOAD_TABLE_UPDATE_STATUS, _ACT_HANDLED, _YES, byte)) + { + DP_LOG(("DPHAL> ACT Received")); + return true; + } + } + } + } + + virtual bool payloadAllocate(unsigned streamId, unsigned begin, unsigned count) + { + bool bResult = false; + NvU8 payloadAllocate[3]; + DP_ASSERT(streamId < 64 && "Invalid stream location"); + payloadAllocate[0] = (NvU8)streamId; + payloadAllocate[1] = (NvU8)begin; + payloadAllocate[2] = (NvU8)count; + + AuxRetry::status status = bus.write(NV_DPCD_PAYLOAD_ALLOC_SET, (NvU8*)&payloadAllocate, sizeof payloadAllocate); + + if (status == AuxRetry::ack) + { + // + // Bit 0 = VC Payload Table Updated(Change/Read only) + // 1 = Update, cleared to zero when u Packet Source writes 1 + // 0 = Not updated since the last time this bit was cleared + // + NvU8 payloadStatus; + int retries = 0; + + // + // Bug 1385165 that Synaptics branch revision 1.0 found to spend more than 200ms before table updated. + // Retries without delay is too soon for device to complete table update process. + // That will hit bug 1334070 and trigger monitor unplug/hotplug at early return. + // + do + { + if ((bus.read(NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS, &payloadStatus, sizeof(payloadStatus)) == AuxRetry::ack)) + { + if (FLD_TEST_DRF(_DPCD, _PAYLOAD_TABLE_UPDATE_STATUS, _UPDATED, _YES, payloadStatus)) + { + bResult = true; + break; + } + } + else + { + DP_LOG(("DPHAL> Read NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS failed.")); + } + + timer->sleep(1); + } while (++retries < PAYLOADIDTABLE_UPDATED_CHECK_RETRIES); + } + else + { + DP_LOG(("DPHAL> Send NV_DPCD_PAYLOAD_ALLOC_SET failed.")); + } + + DP_LOG(("DPHAL> Requesting allocation Stream:%d | First Slot:%d | Count:%d (%s)", streamId, begin, count, bResult ? "OK" : "FAILED")); + return bResult; + } + + void overrideMultiStreamCap(bool mstCapable) + { + caps.overrideToSST = !mstCapable; + } + + bool getMultiStreamCapOverride() + { + return caps.overrideToSST; + } + + bool getDpcdMultiStreamCap(void) + { + return caps.supportsMultistream; + } + + void setGpuDPSupportedVersions(bool supportDp1_2, bool supportDp1_4) + { + if (supportDp1_4) + DP_ASSERT(supportDp1_2 && "GPU supports DP1.4 should also support DP1.2!"); + + gpuDP1_2Supported = supportDp1_2; + gpuDP1_4Supported = supportDp1_4; + } + + void setGpuFECSupported(bool bSupportFEC) + { + bGpuFECSupported = bSupportFEC; + } + + void applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase) + { + DP_ASSERT(dpRegkeyDatabase.bInitialized && + "All regkeys are invalid because dpRegkeyDatabase is not initialized!"); + overrideDpcdRev = dpRegkeyDatabase.dpcdRevOveride; + bBypassILREdpRevCheck = dpRegkeyDatabase.bBypassEDPRevCheck; + } + + // To clear pending message {DOWN_REP/UP_REQ} and reply true if existed. + virtual bool clearPendingMsg() + { + NvU8 irqVector, data = 0; + if (AuxRetry::ack == bus.read(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, + &irqVector, sizeof(irqVector))) + { + if (FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _DOWN_REP_MSG_RDY, _YES, irqVector)) + { + // Clear pending DOWN_REP. + data = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _DOWN_REP_MSG_RDY, _YES, 0); + } + if (FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _UP_REQ_MSG_RDY, _YES, irqVector)) + { + // Clear pending UP_REQ + data = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _UP_REQ_MSG_RDY, _YES, data); + } + if (!data || + (AuxRetry::ack != bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, + &data, sizeof(data)))) + { + DP_LOG(("DPCONN> %s(): No Pending Message or " + "Failed to clear pending message: irqVector/data = 0x%08x/0x%08x", + __FUNCTION__, irqVector, data)); + return false; + } + + return true; + } + else + { + DP_LOG(("DPCONN> Clear Pending MSG: Failed to read ESI0")); + } + + return false; + } + + virtual bool isMessagingEnabled() + { + NvU8 mstmCtrl; + + if ((AuxRetry::ack == bus.read(NV_DPCD_MSTM_CTRL, &mstmCtrl, 1)) && + (FLD_TEST_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, mstmCtrl))) + { + return true; + } + return false; + } + + virtual void setIndexedLinkrateEnabled(bool val) + { + bIndexedLinkrateEnabled = val; + } + + virtual bool isIndexedLinkrateEnabled() + { + return bIndexedLinkrateEnabled; + } + + virtual bool isIndexedLinkrateCapable() + { + return bIndexedLinkrateCapable; + } + + virtual NvU16 *getLinkRateTable() + { + if (!bIndexedLinkrateCapable) + { + DP_LOG(("DPCONN> link rate table should be invalid")); + } + return &caps.linkRateTable[0]; + } + + virtual NvU32 getVideoFallbackSupported() + { + return caps.videoFallbackFormats; + } + + virtual bool getRawLinkRateTable(NvU8 *buffer) + { + NvU16 temp[NV_DPCD_SUPPORTED_LINK_RATES__SIZE]; + NvU8 *data = (buffer == NULL) ? (NvU8*)&temp[0] : buffer; + + if (AuxRetry::ack != bus.read(NV_DPCD_SUPPORTED_LINK_RATES(0), data, + NV_DPCD_SUPPORTED_LINK_RATES__SIZE * sizeof(NvU16))) + { + return false; + } + return true; + } + + virtual void resetProtocolConverter() + { + NvU8 data = 0; + bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data)); + bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_2, &data, sizeof(data)); + + } + + virtual bool setSourceControlMode(bool bEnableSourceControlMode, bool bEnableFRLMode) + { + NvU8 data = 0; + + if (bEnableSourceControlMode) + { + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _SRC_CONTROL_MODE, _ENABLE, data); + if (bEnableFRLMode) + { + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _LINK_FRL_MODE, _ENABLE, data); + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _IRQ_LINK_FRL_MODE, _ENABLE, data); + } + else + { + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _LINK_FRL_MODE, _DISABLE, data); + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _IRQ_LINK_FRL_MODE, _DISABLE, data); + } + } + else + { + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _SRC_CONTROL_MODE, _DISABLE, data); + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _LINK_FRL_MODE, _DISABLE, data); + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _IRQ_LINK_FRL_MODE, _DISABLE, data); + } + + if (AuxRetry::ack != bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + return true; + } + + virtual bool checkPCONFrlReady(bool *bFrlReady) + { + NvU8 data = 0; + + if (bFrlReady == NULL) + { + DP_ASSERT(0); + return true; + } + + *bFrlReady = false; + + if (AuxRetry::ack != bus.read(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &data, sizeof(data))) + { + return false; + } + + if (data == 0) + { + return false; + } + + if (FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _HDMI_LINK_STATUS_CHANGED, _NO, data)) + { + parseAndReadInterruptsESI(); + return false; + } + + // Clear only this interrupt bit. + this->clearHdmiLinkStatusChanged(); + + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_HDMI_TX_LINK_STATUS, &data, sizeof(data))) + { + return false; + } + + if (FLD_TEST_DRF(_DPCD20, _PCON_HDMI_TX_LINK_STATUS, _LINK_READY, _YES, data)) + { + *bFrlReady = true; + } + return true; + } + + virtual bool setupPCONFrlLinkAssessment(NvU32 linkBwMask, + bool bEnableExtendLTMode = false, + bool bEnableConcurrentMode = false) + { + NvU8 data = 0; + + NvU32 requestedMaxBw = (NvU32)(getMaxFrlBwFromMask(linkBwMask)) + 1; // +1 to convert PCONHdmiLinkBw enum to DPCD FRL BW cap definition + NvU32 targetBw = NV_MIN(caps.pconCaps.maxHdmiLinkBandwidthGbps, + requestedMaxBw); + + // Step 1: Configure FRL Link (FRL BW, BW mask / Concurrent) + if (bEnableExtendLTMode) + { + // + // Set FRL_LT_CONTROL to Extended mode: + // PCON FW trains for all Link BW selected in Link BW Mask (Bit 0~5) + // + data = linkBwMask; + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_2, _FRL_LT_CONTROL, + _EXTENDED, data); + } + else + { + // Set FRL_LT_CONTROL to Normal mode, so PCON stops when first FRL LT succeed. + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_2, _FRL_LT_CONTROL, + _NORMAL, data); + } + + if (AuxRetry::ack != bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_2, &data, sizeof(data))) + { + return false; + } + + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + + if (bEnableConcurrentMode && caps.pconCaps.bConcurrentLTSupported) + { + // Client selects concurrent. + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _CONCURRENT_LT_MODE, + _ENABLE, data); + } + else + { + // + // Don't do concurrent LT for now. + // + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _CONCURRENT_LT_MODE, + _DISABLE, data); + } + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _HDMI_LINK, + _ENABLE, data); + data = FLD_SET_DRF_NUM(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _MAX_LINK_BW, + targetBw, data); + + if (AuxRetry::ack != bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + + return true; + } + + virtual bool checkPCONFrlLinkStatus(NvU32 *frlRateMask) + { + NvU8 data = 0; + + if (frlRateMask == NULL) + { + DP_ASSERT(0); + return true; + } + + *frlRateMask = 0; + // Check if IRQ happens. + if (AuxRetry::ack != bus.read(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &data, sizeof(data))) + { + return false; + } + + if (FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _HDMI_LINK_STATUS_CHANGED, _NO, data)) + { + return false; + } + // Check HDMI Link Active status (0x303B Bit 0) and Link Config (0x3036) + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_HDMI_TX_LINK_STATUS, &data, sizeof(data))) + { + return false; + } + + if (FLD_TEST_DRF(_DPCD20, _PCON_HDMI_TX_LINK_STATUS, _LINK_ACTIVE, _YES, data)) + { + if (AuxRetry::ack == bus.read(NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS, &data, sizeof(data))) + { + *frlRateMask = DRF_VAL(_DPCD20, _PCON_HDMI_LINK_CONFIG_STATUS, _LT_RESULT, data); + } + + } + + return true; + } + + virtual bool queryHdmiLinkStatus(bool *bLinkActive, bool *bLinkReady) + { + NvU8 data = 0; + + if (bLinkReady == NULL && bLinkReady == NULL) + return false; + + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_HDMI_TX_LINK_STATUS, &data, sizeof(data))) + { + return false; + } + if (bLinkReady != NULL) + { + *bLinkReady = (FLD_TEST_DRF(_DPCD20, _PCON_HDMI_TX_LINK_STATUS, + _LINK_READY, _YES, data)); + } + if (bLinkActive != NULL) + { + *bLinkActive = (FLD_TEST_DRF(_DPCD20, _PCON_HDMI_TX_LINK_STATUS, + _LINK_ACTIVE, _YES, data)); + } + return true; + } + + virtual NvU32 restorePCONFrlLink(NvU32 linkBwMask, + bool bEnableExtendLTMode = false, + bool bEnableConcurrentMode = false) + { + // Restore HDMI Link. + // 1. Clear HDMI link enable bit (305A bit 7) + NvU8 data = 0; + NvU32 loopCount; + NvU32 frlRate; + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _HDMI_LINK, _DISABLE, data); + if (AuxRetry::ack != bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + // 2. Set FRL or TMDS (Optional if not changed) (305A bit 5) + // 3. Read FRL Ready Bit (303B bit 1) + + Timeout timeout(timer, 500 /* 500ms */); + data = 0; + do + { + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_HDMI_TX_LINK_STATUS, + &data, sizeof(data))) + continue; + if (FLD_TEST_DRF(_DPCD20, _PCON_HDMI_TX_LINK_STATUS, _LINK_READY, _YES, data)) + break; + } while (timeout.valid()); + + if (FLD_TEST_DRF(_DPCD20, _PCON_HDMI_TX_LINK_STATUS, _LINK_READY, _NO, data)) + { + return false; + } + + // 4. Configure FRL Link (Optional if not changed) + // 5. Set HDMI Enable Bit. + data = 0; + + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _HDMI_LINK, _ENABLE, data); + if (AuxRetry::ack != bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + + // 6. Read HDMI Link Status link active bit (2005 bit 3) + // 7. Read HDMI link active status bit and link config status (303b bit0 / 3036) + loopCount = NV_PCON_FRL_LT_TIMEOUT_THRESHOLD; + do + { + if (checkPCONFrlLinkStatus(&frlRate) == true) + { + break; + } + Timeout timeout(this->timer, NV_PCON_FRL_LT_TIMEOUT_INTERVAL_MS); + while(timeout.valid()); + continue; + } while (--loopCount); + + return frlRate; + } + + virtual void readPsrCapabilities(vesaPsrSinkCaps *caps) + { + dpMemCopy(caps, &this->caps.psrCaps, sizeof(vesaPsrSinkCaps)); + } + + virtual bool updatePsrConfiguration(vesaPsrConfig psrcfg) + { + NvU8 config = 0U; + + if (psrcfg.psrCfgEnable) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _SINK_ENABLE, _YES, config); + } + if (psrcfg.srcTxEnabledInPsrActive) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _SOURCE_LINK_ACTIVE, _YES, config); + } + if (psrcfg.crcVerifEnabledInPsrActive) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _CRC_VERIFICATION_ACTIVE, _YES, config); + } + if (psrcfg.frameCaptureSecondActiveFrame) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _FRAME_CAPTURE_INDICATION, _SECOND, config); + } + if (psrcfg.selectiveUpdateOnSecondActiveline) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _SU_LINE_CAPTURE_INDICATION, _SECOND, config); + } + if (psrcfg.enableHpdIrqOnCrcMismatch) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _HPD_IRQ_ON_CRC_ERROR, _YES, config); + } + if (psrcfg.enablePsr2) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _ENABLE_PSR2, _YES, config); + } + + return AuxRetry::ack == + bus.write(NV_DPCD_EDP_PSR_CONFIG, &config, 1); + } + + virtual bool readPsrConfiguration(vesaPsrConfig *psrcfg) + { + NvU8 config = 0U; + bool retVal = AuxRetry::ack == + bus.read(NV_DPCD_EDP_PSR_CONFIG, &config, 1); + + psrcfg->psrCfgEnable = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _SINK_ENABLE, _YES, config); + psrcfg->srcTxEnabledInPsrActive = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _SOURCE_LINK_ACTIVE, _YES, config); + psrcfg->crcVerifEnabledInPsrActive = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _CRC_VERIFICATION_ACTIVE, + _YES, config); + psrcfg->frameCaptureSecondActiveFrame = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _FRAME_CAPTURE_INDICATION, + _SECOND, config); + psrcfg->selectiveUpdateOnSecondActiveline = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, + _SU_LINE_CAPTURE_INDICATION, _SECOND, config); + psrcfg->enableHpdIrqOnCrcMismatch = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _HPD_IRQ_ON_CRC_ERROR, _YES, config); + psrcfg->enablePsr2 = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _ENABLE_PSR2, _YES, config); + + return retVal; + } + + virtual bool readPsrState(vesaPsrState *psrState) + { + NvU8 config = 0U; + bool retVal = AuxRetry::ack == + bus.read(NV_DPCD_PANEL_SELF_REFRESH_STATUS, &config, 1); + + if (retVal) + { + *psrState = + (vesaPsrState)DRF_VAL(_DPCD, _PANEL_SELF_REFRESH_STATUS, + _VAL, config); + } + return retVal; + } + + virtual bool readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState) + { + NvU8 config[2] = { 0U , 0U }; + bool retVal = AuxRetry::ack == + bus.read(NV_DPCD_PANEL_SELF_REFRESH_DEBUG0, + &config[0], sizeof(config)); + + if (retVal) + { + psrDbgState->maxResyncFrames = + DRF_VAL(_DPCD_PANEL_SELF_REFRESH, + _DEBUG0, _MAX_RESYNC_FRAME_CNT, config[0]); + psrDbgState->actualResyncFrames = + DRF_VAL(_DPCD_PANEL_SELF_REFRESH, + _DEBUG0, _LAST_RESYNC_FRAME_CNT, config[0]); + + psrDbgState->lastSdpPsrState = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _PSR_STATE_BIT, config[1]); + psrDbgState->lastSdpUpdateRfb = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _RFB_BIT, config[1]); + psrDbgState->lastSdpCrcValid = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _CRC_VALID_BIT, config[1]); + psrDbgState->lastSdpSuValid = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _SU_VALID_BIT, config[1]); + psrDbgState->lastSdpFirstSURcvd = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _SU_FIRST_LINE_RCVD, config[1]); + psrDbgState->lastSdpLastSURcvd = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _SU_LAST_LINE_RCVD, config[1]); + psrDbgState->lastSdpYCoordValid = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _Y_CORD_VALID, config[1]); + } + return retVal; + } + + virtual bool writePsrErrorStatus(vesaPsrErrorStatus psrErr) + { + NvU8 config = 0U; + config = FLD_SET_DRF_NUM(_DPCD_PANEL_SELF_REFRESH, + _ERR_STATUS, + _LINK_CRC_ERR, + psrErr.linkCrcError, + config); + config = FLD_SET_DRF_NUM(_DPCD_PANEL_SELF_REFRESH, + _ERR_STATUS, + _RFB_ERR, + psrErr.rfbStoreError, + config); + config = FLD_SET_DRF_NUM(_DPCD_PANEL_SELF_REFRESH, + _ERR_STATUS, + _VSC_SDP_ERR, + psrErr.vscSdpError, + config); + + return AuxRetry::ack == bus.write( + NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS, &config, 1); + } + + virtual bool readPsrErrorStatus(vesaPsrErrorStatus *psrErr) + { + NvU8 config = 0U; + bool retVal; + retVal = AuxRetry::ack == bus.read( + NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS, + &config, sizeof(config)); + + if (retVal) + { + psrErr->vscSdpError = FLD_TEST_DRF(_DPCD, + _PANEL_SELF_REFRESH_ERR_STATUS, + _LINK_CRC_ERR, _YES, config); + psrErr->rfbStoreError = FLD_TEST_DRF(_DPCD, + _PANEL_SELF_REFRESH_ERR_STATUS, + _RFB_ERR, _YES, config); + psrErr->linkCrcError = FLD_TEST_DRF(_DPCD, + _PANEL_SELF_REFRESH_ERR_STATUS, + _VSC_SDP_ERR, + _YES, config); + } + return retVal; + } + + virtual bool writePsrEvtIndicator(vesaPsrEventIndicator psrEvt) + { + NvU8 config = 0U; + + if (psrEvt.sinkCapChange) + { + config = FLD_SET_DRF(_DPCD, + _PANEL_SELF_REFRESH_EVENT_STATUS, + _CAP_CHANGE, + _YES, config); + } + return AuxRetry::ack == bus.write( + NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS, &config, 1); + } + + virtual bool readPsrEvtIndicator(vesaPsrEventIndicator *psrEvt) + { + NvU8 config = 0U; + bool retVal; + retVal = AuxRetry::ack == bus.read( + NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS, + &config, sizeof(config)); + + if (retVal) + { + psrEvt->sinkCapChange = DRF_VAL(_DPCD, + _PANEL_SELF_REFRESH_EVENT_STATUS, + _CAP_CHANGE, + config); + } + return retVal; + } +}; + +DPCDHAL * DisplayPort::MakeDPCDHAL(AuxBus * bus, Timer * timer) +{ + return new DPCDHALImpl(bus, timer); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_connectorimpl.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_connectorimpl.cpp new file mode 100644 index 0000000..f4c1dde --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_connectorimpl.cpp @@ -0,0 +1,6798 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_connectorimpl.cpp * +* DP connector implementation * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_guid.h" +#include "dp_configcaps.h" +#include "dp_list.h" +#include "dp_buffer.h" +#include "dp_auxdefs.h" +#include "dp_watermark.h" +#include "dp_edid.h" +#include "dp_discovery.h" +#include "dp_groupimpl.h" +#include "dp_deviceimpl.h" +#include "dp_connectorimpl.h" + +#include "dp_auxbus.h" +#include "dpringbuffertypes.h" + +#include "ctrl/ctrl0073/ctrl0073dfp.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "dp_tracing.h" + +using namespace DisplayPort; + +// These wrappers are specifically for DSC PPS library malloc and free callbacks +// Pointer to these functions are populated to dscMalloc/dscFree in DSC_InitializeCallBack and it is initialized from both DPLib and HDMiPacketLib. +// In HDMI case, callback function for malloc/free needs client handle so to match function prototype, in DP case, adding these wrappers. +extern "C" void * dpMallocCb(const void *clientHandle, NvLength size); +extern "C" void dpFreeCb(const void *clientHandle, void *pMemPtr); + +extern "C" void * dpMallocCb(const void *clientHandle, NvLength size) +{ + return dpMalloc(size); +} + +extern "C" void dpFreeCb(const void *clientHandle, void *pMemPtr) +{ + dpFree(pMemPtr); +} + +ConnectorImpl::ConnectorImpl(MainLink * main, AuxBus * auxBus, Timer * timer, Connector::EventSink * sink) + : main(main), + auxBus(auxBus), + timer(timer), + sink(sink), + bIgnoreSrcOuiHandshake(false), + linkPolicy(), + linkGuessed(false), + isLinkQuiesced(false), + bNoLtDoneAfterHeadDetach(false), + isDP12AuthCap(false), + isHDCPAuthOn(false), + isHDCPReAuthPending(false), + isHDCPAuthTriggered(false), + isHopLimitExceeded(false), + isDiscoveryDetectComplete(false), + bDeferNotifyLostDevice(false), + hdcpValidateData(), + authRetries(0), + retryLT(0), + hdcpCapsRetries(0), + hdcpCpIrqRxStatusRetries(0), + bFromResumeToNAB(false), + bAttachOnResume(false), + bHdcpAuthOnlyOnDemand(false), + constructorFailed(false), + policyModesetOrderMitigation(false), + policyForceLTAtNAB(false), + policyAssessLinkSafely(false), + bDisableVbiosScratchRegisterUpdate(false), + modesetOrderMitigation(false), + compoundQueryActive(false), + compoundQueryResult(false), + compoundQueryCount(0), + messageManager(0), + discoveryManager(0), + numPossibleLnkCfg(0), + linkAwaitingTransition(false), + linkState(DP_TRANSPORT_MODE_INIT), + bAudioOverRightPanel(false), + connectorActive(false), + firmwareGroup(0), + bAcpiInitDone(false), + bIsUefiSystem(false), + bSkipLt(false), + bMitigateZombie(false), + bDelayAfterD3(false), + bKeepOptLinkAlive(false), + bNoFallbackInPostLQA(false), + LT2FecLatencyMs(0), + bDscCapBasedOnParent(false), + ResStatus(this) +{ + clearTimeslices(); + hal = MakeDPCDHAL(auxBus, timer); + if (hal == NULL) + { + constructorFailed = true; + return; + } + highestAssessedLC = getMaxLinkConfig(); + firmwareGroup = createFirmwareGroup(); + + if (firmwareGroup == NULL) + { + constructorFailed = true; + return; + } + + hal->setPC2Disabled(main->isPC2Disabled()); + + // + // If a GPU is DP1.2 or DP1.4 supported then set these capalibilities. + // This is used for accessing DP1.2/DP1.4 specific register space & features + // + hal->setGpuDPSupportedVersions(main->isDP1_2Supported(), main->isDP1_4Supported()); + + // Set if GPU supports FEC. Check panel FEC caps only if GPU supports it. + hal->setGpuFECSupported(main->isFECSupported()); + + // Set if LTTPR training is supported per regKey + hal->setLttprSupported(main->isLttprSupported()); + + const DP_REGKEY_DATABASE& dpRegkeyDatabase = main->getRegkeyDatabase(); + this->applyRegkeyOverrides(dpRegkeyDatabase); + hal->applyRegkeyOverrides(dpRegkeyDatabase); + + // Initialize DSC callbacks + DSC_CALLBACK callback; + callback.clientHandle = NULL; + callback.dscPrint = NULL; + callback.dscMalloc = dpMallocCb; + callback.dscFree = dpFreeCb; + DSC_InitializeCallback(callback); +} + +void ConnectorImpl::applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase) +{ + DP_ASSERT(dpRegkeyDatabase.bInitialized && + "All regkeys are invalid because dpRegkeyDatabase is not initialized!"); + + this->bSkipAssessLinkForEDP = dpRegkeyDatabase.bAssesslinkForEdpSkipped; + + // If Hdcp authenticatoin on demand regkey is set, override to the provided value. + this->bHdcpAuthOnlyOnDemand = dpRegkeyDatabase.bHdcpAuthOnlyOnDemand; + + if (dpRegkeyDatabase.bOptLinkKeptAlive) + { + this->bKeepLinkAliveMST = true; + this->bKeepLinkAliveSST = true; + } + else + { + this->bKeepLinkAliveMST = dpRegkeyDatabase.bOptLinkKeptAliveMst; + this->bKeepLinkAliveSST = dpRegkeyDatabase.bOptLinkKeptAliveSst; + } + this->bReportDeviceLostBeforeNew = dpRegkeyDatabase.bReportDeviceLostBeforeNew; + this->maxLinkRateFromRegkey = dpRegkeyDatabase.applyMaxLinkRateOverrides; + this->bEnableAudioBeyond48K = dpRegkeyDatabase.bAudioBeyond48kEnabled; + this->bDisableSSC = dpRegkeyDatabase.bSscDisabled; + this->bEnableFastLT = dpRegkeyDatabase.bFastLinkTrainingEnabled; + this->bDscMstCapBug3143315 = dpRegkeyDatabase.bDscMstCapBug3143315; +} + +void ConnectorImpl::setPolicyModesetOrderMitigation(bool enabled) +{ + policyModesetOrderMitigation = enabled; +} + +void ConnectorImpl::setPolicyForceLTAtNAB(bool enabled) +{ + policyForceLTAtNAB = enabled; +} + +void ConnectorImpl::setPolicyAssessLinkSafely(bool enabled) +{ + policyAssessLinkSafely = enabled; +} + +// +// This function is to re-read remote HDCP BKSV and BCAPS. +// +// Function is added for DP1.2 devices which don't have valid BKSV at HPD and +// make BKSV available after Payload Ack. +// +void ConnectorImpl::readRemoteHdcpCaps() +{ + if (hdcpCapsRetries) + { + fireEvents(); + return; + } + +} + +void ConnectorImpl::discoveryDetectComplete() +{ + fireEvents(); + // no outstanding EDID reads and branch/sink detections for MST + if (pendingEdidReads.isEmpty() && + (!discoveryManager || + (discoveryManager->outstandingBranchDetections.isEmpty() && + discoveryManager->outstandingSinkDetections.isEmpty()))) + { + bDeferNotifyLostDevice = false; + isDiscoveryDetectComplete = true; + bIsDiscoveryDetectActive = false; + + // Complete detection and see if can enter power saving state. + isNoActiveStreamAndPowerdown(); + + fireEvents(); + } +} + +void ConnectorImpl::applyEdidWARs(Edid & edid, DiscoveryManager::Device device) +{ + DpMonitorDenylistData *pDenylistData = new DpMonitorDenylistData(); + NvU32 warFlag = 0; + warFlag = main->monitorDenylistInfo(edid.getManufId(), edid.getProductId(), pDenylistData); + + // Apply any edid overrides if required + edid.applyEdidWorkArounds(warFlag, pDenylistData); + + delete pDenylistData; +} + +void DisplayPort::DevicePendingEDIDRead::mstEdidCompleted(EdidReadMultistream * from) +{ + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-CONN> Edid read complete: %s %s", + from->topologyAddress.toString(sb), + from->edid.getName())); + ConnectorImpl * connector = parent; + parent->applyEdidWARs(from->edid, device); + parent->processNewDevice(device, from->edid, true, DISPLAY_PORT, RESERVED); + delete this; + connector->discoveryDetectComplete(); +} + +void DisplayPort::DevicePendingEDIDRead::mstEdidReadFailed(EdidReadMultistream * from) +{ + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-CONN> Edid read failed: %s (using fallback)", + from->topologyAddress.toString(sb))); + ConnectorImpl * connector = parent; + parent->processNewDevice(device, Edid(), true, DISPLAY_PORT, RESERVED); + delete this; + connector->discoveryDetectComplete(); +} + +void ConnectorImpl::messageProcessed(MessageManager::MessageReceiver * from) +{ + if (from == &ResStatus) + { + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + if (i->getGUID() == ResStatus.request.guid) + { + DeviceImpl * child = ((DeviceImpl *)i)->children[ResStatus.request.port]; + if (child) + { + child->resetCacheInferredLink(); + sink->bandwidthChangeNotification((DisplayPort::Device*)child, false); + return; + } + + break; + } + + // Child wasn't found... Invalidate all bandwidths on topology + for (Device * i = enumDevices(0); i; i = enumDevices(i)) { + ((DeviceImpl *)i)->resetCacheInferredLink(); + } + } + else + DP_ASSERT(0 && "Received unexpected upstream message that we AREN'T registered for"); +} + +void ConnectorImpl::discoveryNewDevice(const DiscoveryManager::Device & device) +{ + // + // We're guaranteed that there isn't already a device on the list with the same + // address. If we receive the same device announce again - it is considered + // a notification that the device underlying may have seen an HPD. + // + // We're going to queue an EDID read, and remember which device we did it on. + // If the EDID comes back different we'll have mark the old device object + // as disconnected - and create a new one. This is required because + // EDID is one of the fields considered to be immutable. + // + + if (!device.branch) + { + if (!device.videoSink) + { + // Don't read EDID on a device having no videoSink + processNewDevice(device, Edid(), false, DISPLAY_PORT, RESERVED); + return; + } + pendingEdidReads.insertBack(new DevicePendingEDIDRead(this, messageManager, device)); + } + else + { + // Don't try to read the EDID on a branch device + processNewDevice(device, Edid(), true, DISPLAY_PORT, RESERVED); + } +} + +void ConnectorImpl::processNewDevice(const DiscoveryManager::Device & device, + const Edid & edid, + bool isMultistream, + DwnStreamPortType portType, + DwnStreamPortAttribute portAttribute, + bool isCompliance) +{ + // + // Ideally we should read EDID here. But instead just report the device + // try to find device in list of devices + // + DeviceImpl * existingDev = findDeviceInList(device.address); + if (existingDev) + existingDev->resetCacheInferredLink(); + + // + // Process fallback EDID + // + Edid processedEdid = edid; + + if (!edid.getEdidSize() || !edid.isChecksumValid() || !edid.isValidHeader() || + edid.isPatchedChecksum()) + { + if (portType == WITHOUT_EDID) + { + switch(portAttribute) + { + case RESERVED: + case IL_720_480_60HZ: + case IL_720_480_50HZ: + case IL_1920_1080_60HZ: + case IL_1920_1080_50HZ: + case PG_1280_720_60HZ: + case PG_1280_720_50_HZ: + DP_ASSERT(0 && "Default EDID feature not supported!"); + break; + } + + } + if (portType == ANALOG_VGA) + makeEdidFallbackVGA(processedEdid); + else + { + makeEdidFallback(processedEdid, hal->getVideoFallbackSupported()); + } + } + + // + // Process caps + // + bool hasAudio = device.SDPStreams && device.SDPStreamSinks; + bool hasVideo = device.videoSink; + NvU64 maxTmdsClkRate = 0U; + ConnectorType connector = connectorDisplayPort; + + if (portType == DISPLAY_PORT_PLUSPLUS || portType == DVI || portType == HDMI) + { + maxTmdsClkRate = device.maxTmdsClkRate; + } + + switch(portType) + { + case DISPLAY_PORT: + case DISPLAY_PORT_PLUSPLUS: // DP port that supports DP and TMDS + connector = connectorDisplayPort; + break; + + case ANALOG_VGA: + connector = connectorVGA; + break; + + case DVI: + connector = connectorDVI; + break; + + case HDMI: + connector = connectorHDMI; + break; + + case WITHOUT_EDID: + connector = connectorDisplayPort; + break; + } + + // Dongle in SST mode. + if ((device.peerDevice == Dongle) && (device.address.size() == 0)) + hasAudio = hasVideo = false; + + if (device.branch) + hasAudio = hasVideo = false; + + if (!existingDev) + goto create; + + if (isCompliance && (existingDev->processedEdid == processedEdid)) + { + // unzombie the old device + } + else if (existingDev->audioSink != hasAudio || + existingDev->videoSink != hasVideo || + existingDev->rawEDID != edid || + existingDev->processedEdid != processedEdid || + existingDev->connectorType != connector || + existingDev->multistream != isMultistream || + existingDev->complianceDeviceEdidReadTest != isCompliance || + existingDev->maxTmdsClkRate != maxTmdsClkRate || + (existingDev->address.size() > 1 && !existingDev->getParent()) || + // If it is an Uninitialized Mux device, goto create so that we can properly + // initialize the device and all its caps + existingDev->isFakedMuxDevice()) + goto create; + + // Complete match, make sure its marked as plugged + existingDev->plugged = true; + if (existingDev->isActive()) + existingDev->activeGroup->update(existingDev, true); + + + fireEvents(); + return; +create: + // If there is an existing device, mark it as no longer available. + if (existingDev) + existingDev->plugged = false; + + // Find parent + DeviceImpl * parent = 0; + if (device.address.size() != 0) + { + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + { + if ((i->getTopologyAddress() == device.address.parent()) && + (((DeviceImpl *)i)->plugged)) + { + parent = (DeviceImpl*)i; + break; + } + } + } + + DP_ASSERT((parent || device.address.size() <= 1) && "Device was registered before parent"); + + DeviceImpl * newDev; + // + // If it is a faked Mux device, we have already notified DD of few of its caps. + // Reuse the same device to make sure that DD updates the same device's parameters + // otherwise create a new device + // + if (existingDev && existingDev->isFakedMuxDevice()) + { + newDev = existingDev; + existingDev = NULL; + } + else + { + newDev = new DeviceImpl(hal, this, parent); + } + + if (parent) + parent->children[device.address.tail()] = newDev; + + if (!newDev) + { + DP_ASSERT(0 && "new failed"); + return; + } + + // Fill out the new device + newDev->address = device.address; + newDev->multistream = isMultistream; + newDev->videoSink = hasVideo; + newDev->audioSink = hasAudio; + newDev->plugged = true; + newDev->rawEDID = edid; + newDev->processedEdid = processedEdid; + newDev->connectorType = connector; + newDev->guid = device.peerGuid; + newDev->peerDevice = device.peerDevice; + newDev->portMap = device.portMap; + newDev->dpcdRevisionMajor = device.dpcdRevisionMajor; + newDev->dpcdRevisionMinor = device.dpcdRevisionMinor; + newDev->complianceDeviceEdidReadTest = isCompliance; + newDev->maxTmdsClkRate = maxTmdsClkRate; + + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + newDev->address.toNvU32Buffer(addrBuffer); + NV_DPTRACE_INFO(NEW_SINK_DETECTED, newDev->address.size(), addrBuffer[0], addrBuffer[1], addrBuffer[2], addrBuffer[3], + newDev->multistream, newDev->rawEDID.getManufId(), newDev->rawEDID.getProductId()); + + // Apply any DPCD overrides if required + newDev->dpcdOverrides(); + + // + // Some 4K eDP panel needs HBR2 to support higher modes, Highest assessed LC + // remains in a stale state after applying DPCD overrides here. So we need to + // assess the link again. + // + if (newDev->isOptimalLinkConfigOverridden()) + { + this->assessLink(); + } + + // Panel has issues with LQA, reassess link + if (processedEdid.WARFlags.reassessMaxLink) + { + // + // If the highest assessed LC is not equal to max possible link config and + // panel is branch device which GPU is link training, re-assess link + // + int retries = 0; + + while((retries < WAR_MAX_REASSESS_ATTEMPT) && (highestAssessedLC != getMaxLinkConfig())) + { + DP_LOG(("DP> Assessed link is not equal to highest possible config. Reassess link.")); + this->assessLink(); + retries++; + } + } + + // Postpone the remote HDCPCap read for Dongles + DP_ASSERT(!isLinkInD3() && "Hdcp probe at D3"); + if (device.peerDevice != Dongle) + { + DP_ASSERT(newDev->isDeviceHDCPDetectionAlive == false); + if ((newDev->deviceHDCPDetection = new DeviceHDCPDetection(newDev, messageManager, timer))) + { + // + // We cannot move the hdcpDetection after stream added because DD + // needs hdcp Cap before stream added. + // + newDev->isDeviceHDCPDetectionAlive = true; + newDev->deviceHDCPDetection->start(); + } + else + { + // For the risk control, make the device as not HDCPCap. + DP_ASSERT(0 && "new failed"); + newDev->isDeviceHDCPDetectionAlive = false; + newDev->isHDCPCap = False; + + if (!newDev->isMultistream()) + newDev->shadow.hdcpCapDone = true; + } + } + + newDev->vrrEnablement = new VrrEnablement(newDev); + if (!newDev->vrrEnablement) + { + DP_ASSERT(0 && "new VrrEnablement failed"); + } + + BInfo bInfo; + if ((!isHopLimitExceeded) && (hal->getBinfo(bInfo))) + { + if (bInfo.maxCascadeExceeded || bInfo.maxDevsExceeded) + { + if (isHDCPAuthOn) + { + isHDCPAuthOn = false; + } + isHopLimitExceeded = true; + } + else + isHopLimitExceeded = false; + } + + // + // If the device is a faked Mux device, then we just initizlied it. + // Reset its faked status and skip adding it to the deviceList + // + if (newDev->isFakedMuxDevice()) + { + newDev->bIsFakedMuxDevice = false; + newDev->bIsPreviouslyFakedMuxDevice = true; + } + else + { + deviceList.insertBack(newDev); + } + + // if a new device has replaced a previous compliance device; let this event be exposed to DD now. + // ie : the old device will be zombied/lost now ... lazily(instead of at an unplug which happened a while back.) + if (existingDev && existingDev->complianceDeviceEdidReadTest) + existingDev->lazyExitNow = true; + + if(newDev->isBranchDevice() && newDev->isAtLeastVersion(1,4)) + { + // + // GUID_2 will be non-zero for a virtual peer device and 0 for others. + // This will help identify if a device is virtual peer device or not. + // + newDev->queryGUID2(); + } + + // Read panel DSC support only if GPU supports DSC + bool bGpuDscSupported; + main->getDscCaps(&bGpuDscSupported); + if (bGpuDscSupported) + { + if (newDev->getDSCSupport()) + { + // Read and parse DSC caps only if panel supports DSC + newDev->readAndParseDSCCaps(); + } + + if (!processedEdid.WARFlags.bIgnoreDscCap) + { + // Check if DSC is possible for the device and if so, set DSC Decompression device. + newDev->setDscDecompressionDevice(this->bDscCapBasedOnParent); + } + } + + // Read panel replay capabilities + newDev->getPanelReplayCaps(); + + // Get Panel FEC support only if GPU supports FEC + if (this->isFECSupported()) + { + newDev->getFECSupport(); + } + + if (main->supportMSAOverMST()) + { + newDev->bMSAOverMSTCapable = newDev->getSDPExtnForColorimetrySupported(); + } + else + { + newDev->bMSAOverMSTCapable = false; + } + + fireEvents(); +} + +void ConnectorImpl::populateAllDpConfigs() +{ + LinkRate linkRate; + LinkRates *pConnLinkRates; + unsigned laneCounts[] = {laneCount_1, laneCount_2, laneCount_4}; + unsigned laneSets = sizeof(laneCounts) / sizeof(laneCounts[0]); + + // + // Following sequence is to be followed for saving power by default; + // It may vary with sinks which support link rate table. + // + // Link Config MBPS + // 1*RBR 162 + // 1*HBR 270 + // 2*RBR 324 + // 1*HBR2 540 + // 2*HBR 540 + // 4*RBR 648 + // 1*HBR3 810 + // ... + // + if (numPossibleLnkCfg) + { + DP_LOG(("DP> DPCONN> Rebuild possible link rate confgiurations")); + delete[] allPossibleLinkCfgs; + numPossibleLnkCfg = 0; + } + + // Attempt to configure link rate table mode if supported + pConnLinkRates = linkPolicy.getLinkRates(); + if (hal->isIndexedLinkrateCapable() && + main->configureLinkRateTable(hal->getLinkRateTable(), pConnLinkRates)) + { + // Maximal link rate is limited with link rate table + hal->overrideOptimalLinkRate(pConnLinkRates->getMaxRate()); + hal->setIndexedLinkrateEnabled(true); + } + else + { + // Reset configured link rate table if ever enabled to get RM act right + if (hal->isIndexedLinkrateEnabled()) + { + main->configureLinkRateTable(NULL, NULL); + hal->setIndexedLinkrateEnabled(false); + } + + // Get maximal link rate supported by GPU + linkRate = main->maxLinkRateSupported(); + + // Insert by order + pConnLinkRates->clear(); + if (linkRate >= RBR) + pConnLinkRates->import(linkBW_1_62Gbps); + + if (linkRate >= HBR) + pConnLinkRates->import(linkBW_2_70Gbps); + + if (linkRate >= HBR2) + pConnLinkRates->import(linkBW_5_40Gbps); + + if (linkRate >= HBR3) + pConnLinkRates->import(linkBW_8_10Gbps); + } + + numPossibleLnkCfg = laneSets * pConnLinkRates->getNumLinkRates(); + if (numPossibleLnkCfg == 0) + { + DP_LOG(("DPCONN> %s: lane count %d or link rates %d!", + pConnLinkRates->getNumLinkRates(), laneSets, __FUNCTION__)); + DP_ASSERT(0 && "Invalid lane count %d or link rates %d!"); + return; + } + + allPossibleLinkCfgs = new LinkConfiguration[numPossibleLnkCfg](); + + if (allPossibleLinkCfgs == NULL) + { + DP_LOG(("DPCONN> %s: Failed to allocate allPossibleLinkCfgs array", + __FUNCTION__)); + numPossibleLnkCfg = 0; + return; + } + + // Populate all possible link configuration + linkRate = pConnLinkRates->getMaxRate(); + for (unsigned i = 0; i < pConnLinkRates->getNumLinkRates(); i++) + { + for (unsigned j = 0; j < laneSets; j++) + { + allPossibleLinkCfgs[i * laneSets + j].setLaneRate(linkRate, laneCounts[j]); + } + linkRate = pConnLinkRates->getLowerRate(linkRate); + } + + // Sort link configurations per bandwidth from low to high + for (unsigned i = 0; i < numPossibleLnkCfg - 1; i++) + { + LinkConfiguration *pLowCfg = &allPossibleLinkCfgs[i]; + for (unsigned j = i + 1; j < numPossibleLnkCfg; j++) + { + if (allPossibleLinkCfgs[j] < *pLowCfg) + pLowCfg = &allPossibleLinkCfgs[j]; + } + // Swap + if (pLowCfg != &allPossibleLinkCfgs[i]) + { + LinkRate swapRate = pLowCfg->peakRate; + unsigned swapLanes = pLowCfg->lanes; + pLowCfg->setLaneRate(allPossibleLinkCfgs[i].peakRate, + allPossibleLinkCfgs[i].lanes); + allPossibleLinkCfgs[i].setLaneRate(swapRate, swapLanes); + } + } +} + +void ConnectorImpl::discoveryLostDevice(const Address & address) +{ + DeviceImpl * existingDev = findDeviceInList(address); + + if (!existingDev) + { + DP_ASSERT(0 && "Device lost on device not in database?!"); + return; + } + + existingDev->plugged = false; + existingDev->devDoingDscDecompression = NULL; + fireEvents(); +} + +ConnectorImpl::~ConnectorImpl() +{ + if (numPossibleLnkCfg) + delete[] allPossibleLinkCfgs; + + timer->cancelCallbacks(this); + delete discoveryManager; + pendingEdidReads.clear(); + delete messageManager; + delete hal; +} + +// +// Clear all the state associated with the head attachment +// +void ConnectorImpl::hardwareWasReset() +{ + activeLinkConfig.lanes = 0; + + while (!activeGroups.isEmpty()) + { + GroupImpl * g = (GroupImpl *)activeGroups.front(); + activeGroups.remove(g); + inactiveGroups.insertBack(g); + + g->setHeadAttached(false); + } +} + +Group * ConnectorImpl::resume(bool firmwareLinkHandsOff, + bool firmwareDPActive, + bool plugged, + bool isUefiSystem, + unsigned firmwareHead, + bool bFirmwareLinkUseMultistream, + bool bDisableVbiosScratchRegisterUpdate, + bool bAllowMST) +{ + Group * result = 0; + hardwareWasReset(); + previousPlugged = false; + connectorActive = true; + bIsUefiSystem = isUefiSystem; + + this->bDisableVbiosScratchRegisterUpdate = bDisableVbiosScratchRegisterUpdate; + + bFromResumeToNAB = true; + + if (firmwareLinkHandsOff) + { + isLinkQuiesced = true; + } + else if (firmwareDPActive) + { + DP_LOG(("CONN> Detected firmware panel is active on head %d.", firmwareHead)); + ((GroupImpl *)firmwareGroup)->setHeadAttached(true); + ((GroupImpl *)firmwareGroup)->headIndex = firmwareHead; + ((GroupImpl *)firmwareGroup)->streamIndex = 1; + ((GroupImpl *)firmwareGroup)->headInFirmware = true; + + this->linkState = bFirmwareLinkUseMultistream ? DP_TRANSPORT_MODE_MULTI_STREAM : DP_TRANSPORT_MODE_SINGLE_STREAM; + + inactiveGroups.remove((GroupImpl *)firmwareGroup); + activeGroups.remove((GroupImpl *)firmwareGroup); + activeGroups.insertBack((GroupImpl *)firmwareGroup); + + result = firmwareGroup; + } + + hal->overrideMultiStreamCap(bAllowMST); + + // + // In resume code path, all devices on this connector gets lost and deleted on first fireEvents() + // and that could generate unnecessary new/lost device events. Therefore defer to lost devices + // until discovery detect gets completed, this allows processNewDevice() function to look + // at matching existing devices and optimize creation of new devices. We only have to set the flag + // to true when plugged = true, since if disconnected, we are not going to defer anything. + // + bDeferNotifyLostDevice = plugged; + bAttachOnResume = true; + notifyLongPulse(plugged); + bAttachOnResume = false; + + return result; +} + + +void ConnectorImpl::pause() +{ + connectorActive = false; + if (messageManager) + { + messageManager->pause(); + } +} + +// Query current Device topology +Device * ConnectorImpl::enumDevices(Device * previousDevice) +{ + if (previousDevice) + previousDevice = (DeviceImpl *)((DeviceImpl*)previousDevice)->next; + else + previousDevice = (DeviceImpl *)deviceList.begin(); + + if ((DeviceImpl*)previousDevice == deviceList.end()) + return 0; + else + return (DeviceImpl *)previousDevice; +} + +LinkConfiguration ConnectorImpl::getMaxLinkConfig() +{ + NvU64 maxLinkRate; + + DP_ASSERT(hal); + + if (main->isEDP()) + { + // Regkey is supported on eDP panels only + maxLinkRate = maxLinkRateFromRegkey; + // Check if valid value is present in regkey + if (maxLinkRate && (IS_VALID_LINKBW(maxLinkRate))) + { + maxLinkRate = maxLinkRate * DP_LINK_BW_FREQ_MULTI_MBPS; + } + else + { + maxLinkRate = hal->getMaxLinkRate(); + } + } + else + { + maxLinkRate = hal->getMaxLinkRate(); + } + + LinkRate linkRate = maxLinkRate ? + DP_MIN(maxLinkRate, main->maxLinkRateSupported()) : + main->maxLinkRateSupported(); + + unsigned laneCount = hal->getMaxLaneCount() ? + DP_MIN(hal->getMaxLaneCountSupportedAtLinkRate(linkRate), hal->getMaxLaneCount()) : + 4; + + return LinkConfiguration (&this->linkPolicy, + laneCount, linkRate, + this->hal->getEnhancedFraming(), + linkUseMultistream(), + false, /* disablePostLTRequest */ + this->bFECEnable); +} + +LinkConfiguration ConnectorImpl::getActiveLinkConfig() +{ + DP_ASSERT(hal); + + return activeLinkConfig; +} + +void ConnectorImpl::beginCompoundQuery() +{ + if (linkGuessed && (main->getSorIndex() != DP_INVALID_SOR_INDEX)) + { + assessLink(); + } + + DP_ASSERT( !compoundQueryActive && "Previous compoundQuery was not ended."); + compoundQueryActive = true; + compoundQueryCount = 0; + compoundQueryResult = true; + compoundQueryLocalLinkPBN = 0; + + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + + if (i->getTopologyAddress().size() <= 1) + { + dev->bandwidth.lastHopLinkConfig = highestAssessedLC; + dev->bandwidth.compound_query_state.totalTimeSlots = 63; + dev->bandwidth.compound_query_state.timeslots_used_by_query = 0; + continue; + } + + if (!this->linkUseMultistream()) + continue; + + // Initialize starting conditions + // + // Note: this compound query code assumes that the total bandwidth is + // available for the configuration being queried. This ignores the + // concentrator case where some bandwidth may be in use by streams not + // controlled by this driver instance. Concentrators are currently not + // supported. + dev->bandwidth.compound_query_state.timeslots_used_by_query = 0; + dev->inferLeafLink(&dev->bandwidth.compound_query_state.totalTimeSlots); + + // + // Some VBIOS leave the branch in stale state and allocatePayload request queued + // at branch end gets processed much later causing the FreePBN returned to be stale. + // Clear the PBN in case EPR reports 0 free PBN when we have not explicitly requested + // for it, to clear up any previous stale allocations + // + if (dev->bandwidth.compound_query_state.totalTimeSlots == 0 && + !dev->payloadAllocated && dev->plugged) + { + GroupImpl *group = dev->activeGroup; + if (group != NULL) + { + NakData nakData; + Address devAddress = dev->getTopologyAddress(); + + AllocatePayloadMessage allocate; + unsigned sink = 0; // hardcode the audio sink to 0th in the device. + allocate.set(devAddress.parent(), devAddress.tail(), + dev->isAudioSink() ? 1 : 0, group->streamIndex, 0, &sink, true); + + ((DeviceImpl *)dev)->bandwidth.enum_path.dataValid = false; + + if (group->parent->messageManager->send(&allocate, nakData)) + dev->inferLeafLink(&dev->bandwidth.compound_query_state.totalTimeSlots); + } + } + + // Clear assement state + dev->bandwidth.compound_query_state.bandwidthAllocatedForIndex = 0; + } +} + +// +// This call will be deprecated as soon as all clients move to the new API +// +bool ConnectorImpl::compoundQueryAttach(Group * target, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) +{ + ModesetInfo modesetInfo(twoChannelAudioHz, eightChannelAudioHz, pixelClockHz, + rasterWidth, rasterHeight, (rasterBlankStartX - rasterBlankEndX), + 0/*surfaceHeight*/, depth, rasterBlankStartX, rasterBlankEndX); + + DpModesetParams modesetParams(0, modesetInfo); + return compoundQueryAttach(target, modesetParams); +} + +bool ConnectorImpl::compoundQueryAttach(Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams) // DSC parameters +{ + DP_ASSERT( compoundQueryActive ); + ModesetInfo localModesetInfo = modesetParams.modesetInfo; + + compoundQueryCount++; + + if (!modesetParams.modesetInfo.depth || !modesetParams.modesetInfo.pixelClockHz) + { + DP_ASSERT(!"DP-CONN> Params with zero value passed to query!"); + compoundQueryResult = false; + return false; + } + + // + // Bug 925211: In some case we need to clamp the supporting frequencies to <= 48KHz. + // Check if audio frequency is greater than 48Khz & is not overridden by regkey + // "ENABLE_AUDIO_BEYOND48K" simply return false. + // + if (((modesetParams.modesetInfo.twoChannelAudioHz > WAR_AUDIOCLAMPING_FREQ) + || (modesetParams.modesetInfo.eightChannelAudioHz > WAR_AUDIOCLAMPING_FREQ)) + && !(bEnableAudioBeyond48K)) + { + compoundQueryResult = false; + return false; + } + + bool bGpuDscSupported; + main->getDscCaps(&bGpuDscSupported); + + if (linkUseMultistream()) + { + LinkConfiguration lc; + if (this->preferredLinkConfig.isValid()) + lc = preferredLinkConfig; + else + lc = highestAssessedLC; + + if (pDscParams && (pDscParams->forceDsc != DSC_FORCE_DISABLE)) + { + bool bFecCapable = false; + + Device * newDev = target->enumDevices(0); + DeviceImpl * dev = (DeviceImpl *)newDev; + + if (dev && dev->isDSCPossible()) + { + if (dev->devDoingDscDecompression != dev) + { + // + // If DSC decoding is going to happen at sink's parent then + // we have to make sure the path from source to sink's parent + // is fec is capable. + // Refer DP 1.4 Spec 5.4.5 + // + if(dev->address.size() == 2) + { + // + // if there is only one branch between source and sink then branch + // should be directly connected to source (sst-case) and dpcd cap + // should already be available. + // + bFecCapable = dev->parent->isFECSupported(); + } + else + { + // + // If there are multiple branches in the path, we have to check + // fecCapability field in epr reply to sink's parent's parent. + // Epr reply for each branch should already be updated with inferLeafLink. + // fecCapability field being true here means up to sink's parent, + // which is "downstream end of path" for sink's parent's parent, + // is fec capable. + // Refer DP 1.4 Spec 2.11.9.4.1 + // + bFecCapable = dev->parent->parent->isFECSupported(); + } + } + else + { + bFecCapable = dev->isFECSupported(); + } + } + + // Make sure panel/it's parent & GPU supports DSC and the whole path supports FEC + if (bGpuDscSupported && // If GPU supports DSC + this->isFECSupported() && // If GPU supports FEC + pDscParams && // If client sent DSC info + pDscParams->bCheckWithDsc && // If client wants to check with DSC + (dev && dev->isDSCPossible()) && // Either device or it's parent supports DSC + bFecCapable && // If path up to dsc decoding device supports FEC + (modesetParams.modesetInfo.bitsPerComponent != 6)) // DSC doesn't support bpc = 6 + { + DSC_INFO dscInfo; + MODESET_INFO modesetInfoDSC; + WAR_DATA warData; + NvU64 availableBandwidthBitsPerSecond = 0; + unsigned PPS[DSC_MAX_PPS_SIZE_DWORD]; + unsigned bitsPerPixelX16 = 0; + + if (!pDscParams->bitsPerPixelX16) + { + // + // For now, we will keep a pre defined value for bitsPerPixel for MST = 10 + // bitsPerPixelX16 = 160 + // + pDscParams->bitsPerPixelX16 = PREDEFINED_DSC_MST_BPPX16; + } + + bitsPerPixelX16 = pDscParams->bitsPerPixelX16; + + if (!this->preferredLinkConfig.isValid()) + { + lc.enableFEC(true); + } + + dpMemZero(PPS, sizeof(unsigned) * DSC_MAX_PPS_SIZE_DWORD); + dpMemZero(&dscInfo, sizeof(DSC_INFO)); + + // Populate DSC related info for PPS calculations + populateDscCaps(&dscInfo, dev->devDoingDscDecompression, pDscParams->forcedParams); + + // populate modeset related info for PPS calculations + populateDscModesetInfo(&modesetInfoDSC, &modesetParams); + + // checking for DSC v1.1 and YUV combination + if ( (dscInfo.sinkCaps.algorithmRevision.versionMajor == 1) && + (dscInfo.sinkCaps.algorithmRevision.versionMinor == 1) && + (modesetParams.colorFormat == dpColorFormat_YCbCr444 )) + { + DP_LOG(("WARNING: DSC v1.2 or higher is recommended for using YUV444")); + DP_LOG(("Current version is 1.1")); + } + + availableBandwidthBitsPerSecond = lc.minRate * 8 * lc.lanes; + + warData.dpData.linkRateHz = lc.peakRate; + warData.dpData.laneCount = lc.lanes; + warData.dpData.dpMode = DSC_DP_MST; + warData.dpData.hBlank = modesetParams.modesetInfo.rasterWidth - modesetParams.modesetInfo.surfaceWidth; + warData.connectorType = DSC_DP; + + if ((DSC_GeneratePPS(&dscInfo, &modesetInfoDSC, + &warData, availableBandwidthBitsPerSecond, + (NvU32*)(PPS), + (NvU32*)(&bitsPerPixelX16))) != NVT_STATUS_SUCCESS) + { + if (pDscParams->forceDsc == DSC_FORCE_ENABLE) + { + // If DSC is force enabled then return failure here + compoundQueryResult = false; + pDscParams->bEnableDsc = false; + return false; + } + else + { + // If PPS calculation failed then try without DSC + pDscParams->bEnableDsc = false; + lc.enableFEC(false); + goto nonDscDpIMP; + } + } + else + { + pDscParams->bEnableDsc = true; + compoundQueryResult = true; + localModesetInfo.bEnableDsc = true; + localModesetInfo.depth = bitsPerPixelX16; + + if (dev->devDoingDscDecompression != dev) + { + // + // Device's parent is doing DSC decompression so we need to check + // if device's parent can send uncompressed stream to Sink. + // + unsigned mode_pbn; + + mode_pbn = pbnForMode(modesetParams.modesetInfo); + + // + // As Device's Parent is doing DSC decompression, this is leaf device and + // complete available bandwidth at this node is available for requested mode. + // + if (mode_pbn > dev->bandwidth.enum_path.total) + { + compoundQueryResult = false; + pDscParams->bEnableDsc = false; + return false; + } + } + + if (pDscParams->pDscOutParams != NULL) + { + // + // If requested then DP Library is supposed to return if mode is + // possible with DSC and calculated PPS and bits per pixel. + // + dpMemCopy(pDscParams->pDscOutParams->PPS, PPS, sizeof(unsigned) * DSC_MAX_PPS_SIZE_DWORD); + pDscParams->bitsPerPixelX16 = bitsPerPixelX16; + } + else + { + // + // Client only wants to know if mode is possible or not but doesn't + // need all calculated PPS parameters in case DSC is required. Do nothing. + // + } + } + } + } + +nonDscDpIMP: + // I. Evaluate use of local link bandwidth + + // Calculate the PBN required + unsigned base_pbn, slots, slots_pbn; + lc.pbnRequired(localModesetInfo, base_pbn, slots, slots_pbn); + + // Accumulate the amount of PBN rounded up to nearest timeslot + compoundQueryLocalLinkPBN += slots_pbn; + if (compoundQueryLocalLinkPBN > lc.pbnTotal()) + compoundQueryResult = false; + + // Verify the min blanking, etc + Watermark dpinfo; + + if (this->isFECSupported()) + { + if (!isModePossibleMSTWithFEC(lc, localModesetInfo, &dpinfo)) + { + compoundQueryResult = false; + } + } + else + { + if (!isModePossibleMST(lc, localModesetInfo, &dpinfo)) + { + compoundQueryResult = false; + } + } + + for(Device * d = target->enumDevices(0); d; d = target->enumDevices(d)) + { + DeviceImpl * i = (DeviceImpl *)d; + + // Allocate bandwidth for the entire path to the root + // NOTE: Above we're already handle the local link + DeviceImpl * tail = i; + while (tail && tail->getParent()) + { + // Have we already accounted for this stream? + if (!(tail->bandwidth.compound_query_state.bandwidthAllocatedForIndex & (1 << compoundQueryCount))) + { + tail->bandwidth.compound_query_state.bandwidthAllocatedForIndex |= (1 << compoundQueryCount); + + LinkConfiguration * linkConfig = tail->inferLeafLink(NULL); + tail->bandwidth.compound_query_state.timeslots_used_by_query += linkConfig->slotsForPBN(base_pbn); + + if ( tail->bandwidth.compound_query_state.timeslots_used_by_query > tail->bandwidth.compound_query_state.totalTimeSlots) + compoundQueryResult = false; + } + tail = (DeviceImpl*)tail->getParent(); + } + } + } + else // SingleStream case + { + DeviceImpl * nativeDev = findDeviceInList(Address()); + + if (compoundQueryCount != 1) + { + compoundQueryResult = false; + return false; + } + + if (nativeDev && (nativeDev->connectorType == connectorHDMI)) + { + if (modesetParams.colorFormat == dpColorFormat_YCbCr420) + { + if ((nativeDev->maxTmdsClkRate) && + (nativeDev->maxTmdsClkRate < + ((modesetParams.modesetInfo.pixelClockHz * modesetParams.modesetInfo.depth /24)/2))) + { + compoundQueryResult = false; + return false; + } + } + else + { + if ((nativeDev->maxTmdsClkRate) && + (nativeDev->maxTmdsClkRate < + (modesetParams.modesetInfo.pixelClockHz * modesetParams.modesetInfo.depth /24))) + { + compoundQueryResult = false; + return false; + } + } + } + + LinkConfiguration lc = highestAssessedLC; + + // check if there is a special request from the client + if (this->preferredLinkConfig.isValid()) + { + lc = preferredLinkConfig; + } + else + { + // + // Always check for DP IMP without FEC overhead first before + // trying with DSC/FEC + // + lc.enableFEC(false); + } + + // If do not found valid native device the force lagacy DP IMP + if (!nativeDev) + { + compoundQueryResult = this->willLinkSupportModeSST(lc, modesetParams.modesetInfo); + } + else if ((pDscParams && (pDscParams->forceDsc == DSC_FORCE_ENABLE)) || // DD has forced DSC Enable + (modesetParams.modesetInfo.mode == DSC_DUAL) || // DD decided to use 2 Head 1 OR mode + (!this->willLinkSupportModeSST(lc, modesetParams.modesetInfo))) // Mode is not possible without DSC + { + // If DP IMP fails without DSC or client requested to force DSC + if (pDscParams && pDscParams->forceDsc != DSC_FORCE_DISABLE) + { + // Check if panel and GPU both supports DSC or not. Also check if panel supports FEC + if (bGpuDscSupported && // if GPU supports DSC + this->isFECSupported() && // If GPU supports FEC + pDscParams && // if client sent DSC info + pDscParams->bCheckWithDsc && // if client wants to check with DSC + nativeDev->isDSCPossible() && // if device supports DSC decompression + (nativeDev->isFECSupported() || main->isEDP()) && // if device supports FEC decoding or is an DSC capable eDP panel which doesn't support FEC + (modesetParams.modesetInfo.bitsPerComponent != 6)) // DSC doesn't support bpc = 6 + { + DSC_INFO dscInfo; + MODESET_INFO modesetInfoDSC; + WAR_DATA warData; + NvU64 availableBandwidthBitsPerSecond = 0; + unsigned PPS[DSC_MAX_PPS_SIZE_DWORD]; + unsigned bitsPerPixelX16 = pDscParams->bitsPerPixelX16; + + if (!this->preferredLinkConfig.isValid() && nativeDev->isFECSupported()) + { + lc.enableFEC(true); + } + + dpMemZero(PPS, sizeof(unsigned) * DSC_MAX_PPS_SIZE_DWORD); + dpMemZero(&dscInfo, sizeof(DSC_INFO)); + + // Populate DSC related info for PPS calculations + populateDscCaps(&dscInfo, nativeDev->devDoingDscDecompression, pDscParams->forcedParams); + + // Populate modeset related info for PPS calculations + populateDscModesetInfo(&modesetInfoDSC, &modesetParams); + + // checking for DSC v1.1 and YUV combination + if ( (dscInfo.sinkCaps.algorithmRevision.versionMajor == 1) && + (dscInfo.sinkCaps.algorithmRevision.versionMinor == 1) && + (modesetParams.colorFormat == dpColorFormat_YCbCr444 )) + { + DP_LOG(("WARNING: DSC v1.2 or higher is recommended for using YUV444")); + DP_LOG(("Current version is 1.1")); + } + + availableBandwidthBitsPerSecond = lc.minRate * 8 * lc.lanes; + + warData.dpData.linkRateHz = lc.peakRate; + warData.dpData.laneCount = lc.lanes; + warData.dpData.hBlank = modesetParams.modesetInfo.rasterWidth - modesetParams.modesetInfo.surfaceWidth; + warData.dpData.dpMode = DSC_DP_SST; + warData.connectorType = DSC_DP; + + if ((DSC_GeneratePPS(&dscInfo, &modesetInfoDSC, + &warData, availableBandwidthBitsPerSecond, + (NvU32*)(PPS), + (NvU32*)(&bitsPerPixelX16))) != NVT_STATUS_SUCCESS) + { + compoundQueryResult = false; + pDscParams->bEnableDsc = false; + } + else + { + localModesetInfo.bEnableDsc = true; + localModesetInfo.depth = bitsPerPixelX16; + LinkConfiguration lowestSelected; + bool bIsModeSupported = false; + + + if (this->preferredLinkConfig.isValid()) + { + // Check if mode is possible with preferred link config + bIsModeSupported = willLinkSupportModeSST(lc, localModesetInfo); + } + else + { + // + // Check if mode is possible with calculated bits_per_pixel. + // Check with all possible link configs and not just highest + // assessed because with DSC, mode can fail with higher + // link config and pass for lower one. This is because + // if raster parameters are really small and DP bandwidth is + // very high then we may end up with some TU with 0 active + // symbols in SST. This may cause HW hang and so DP IMP rejects + // this mode. Refer Bug 200379426. + // + bIsModeSupported = getValidLowestLinkConfig(lc, lowestSelected, localModesetInfo); + } + + if (!bIsModeSupported) + { + pDscParams->bEnableDsc = false; + compoundQueryResult = false; + } + else + { + pDscParams->bEnableDsc = true; + compoundQueryResult = true; + + if (pDscParams->pDscOutParams != NULL) + { + // + // If requested then DP Library is supposed to return if mode is + // possible with DSC and calculated PPS and bits per pixel. + // + dpMemCopy(pDscParams->pDscOutParams->PPS, PPS, sizeof(unsigned) * DSC_MAX_PPS_SIZE_DWORD); + pDscParams->bitsPerPixelX16 = bitsPerPixelX16; + } + else + { + // + // Client only wants to know if mode is possible or not but doesn't + // need all calculated PPS parameters in case DSC is required. Do nothing. + // + } + } + } + } + else + { + // Either GPU or Sink doesn't support DSC + compoundQueryResult = false; + } + } + else + { + // Client hasn't sent DSC params info or has asked to force disable DSC. + compoundQueryResult = false; + } + } + else + { + // Mode was successful + compoundQueryResult = true; + } + } + + return compoundQueryResult; +} +void ConnectorImpl::populateDscModesetInfo(MODESET_INFO* pModesetInfo, const DpModesetParams* pModesetParams) +{ + pModesetInfo->pixelClockHz = pModesetParams->modesetInfo.pixelClockHz; + pModesetInfo->activeWidth = pModesetParams->modesetInfo.surfaceWidth; + pModesetInfo->activeHeight = pModesetParams->modesetInfo.surfaceHeight; + pModesetInfo->bitsPerComponent = pModesetParams->modesetInfo.bitsPerComponent; + + if (pModesetParams->colorFormat == dpColorFormat_RGB) + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_RGB; + } + else if (pModesetParams->colorFormat == dpColorFormat_YCbCr444) + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_YCbCr444; + } + else if (pModesetParams->colorFormat == dpColorFormat_YCbCr422) + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_YCbCr422; + } + else if (pModesetParams->colorFormat == dpColorFormat_YCbCr420) + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_YCbCr420; + } + else + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_RGB; + } + + if (pModesetParams->modesetInfo.mode == DSC_DUAL) + { + pModesetInfo->bDualMode = true; + } + else + { + pModesetInfo->bDualMode = false; + } + + if (pModesetParams->modesetInfo.mode == DSC_DROP) + { + pModesetInfo->bDropMode = true; + } + else + { + pModesetInfo->bDropMode = false; + } +} + +void ConnectorImpl::populateDscGpuCaps(DSC_INFO* dscInfo) +{ + unsigned encoderColorFormatMask; + unsigned lineBufferSizeKB; + unsigned rateBufferSizeKB; + unsigned bitsPerPixelPrecision; + unsigned maxNumHztSlices; + unsigned lineBufferBitDepth; + + // Get GPU DSC capabilities + main->getDscCaps(NULL, + &encoderColorFormatMask, + &lineBufferSizeKB, + &rateBufferSizeKB, + &bitsPerPixelPrecision, + &maxNumHztSlices, + &lineBufferBitDepth); + + if (encoderColorFormatMask & NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB) + { + dscInfo->gpuCaps.encoderColorFormatMask |= DSC_ENCODER_COLOR_FORMAT_RGB; + } + + if (encoderColorFormatMask & NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444) + { + dscInfo->gpuCaps.encoderColorFormatMask |= DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444; + } + + if (encoderColorFormatMask & NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) + { + dscInfo->gpuCaps.encoderColorFormatMask |= DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422; + } + + if (encoderColorFormatMask & NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420) + { + dscInfo->gpuCaps.encoderColorFormatMask |= DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420; + } + + dscInfo->gpuCaps.lineBufferSize = lineBufferSizeKB; + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_16; + } + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_8; + } + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_4; + } + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_2; + } + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1; + } + + dscInfo->gpuCaps.maxNumHztSlices = maxNumHztSlices; + + dscInfo->gpuCaps.lineBufferBitDepth = lineBufferBitDepth; +} + +void ConnectorImpl::populateDscSinkCaps(DSC_INFO* dscInfo, DeviceImpl * dev) +{ + // Early return if dscInfo or dev is NULL + if ((dscInfo == NULL) || (dev == NULL)) + { + return; + } + + if (dev->dscCaps.dscDecoderColorFormatCaps.bRgb) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_RGB; + } + + if (dev->dscCaps.dscDecoderColorFormatCaps.bYCbCr444) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_444; + } + if (dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrSimple422) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422; + } + if (dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422; + } + if (dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrNative420) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420; + } + + if (dev->dscCaps.dscBitsPerPixelIncrement == BITS_PER_PIXEL_PRECISION_1_16) + { + dscInfo->sinkCaps.bitsPerPixelPrecision |= DSC_BITS_PER_PIXEL_PRECISION_1_16; + } + + if (dev->dscCaps.dscBitsPerPixelIncrement == BITS_PER_PIXEL_PRECISION_1_16) + { + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_16; + } + + if (dev->dscCaps.dscBitsPerPixelIncrement == BITS_PER_PIXEL_PRECISION_1_8) + { + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_8; + } + + if (dev->dscCaps.dscBitsPerPixelIncrement == BITS_PER_PIXEL_PRECISION_1_4) + { + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_4; + } + + if (dev->dscCaps.dscBitsPerPixelIncrement == BITS_PER_PIXEL_PRECISION_1_2) + { + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_2; + } + + if (dev->dscCaps.dscBitsPerPixelIncrement == BITS_PER_PIXEL_PRECISION_1) + { + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1; + } + + // Decoder color depth mask + if (dev->dscCaps.dscDecoderColorDepthMask & DSC_BITS_PER_COLOR_MASK_12) + { + dscInfo->sinkCaps.decoderColorDepthMask |= DSC_DECODER_COLOR_DEPTH_CAPS_12_BITS; + } + + if (dev->dscCaps.dscDecoderColorDepthMask & DSC_BITS_PER_COLOR_MASK_10) + { + dscInfo->sinkCaps.decoderColorDepthMask |= DSC_DECODER_COLOR_DEPTH_CAPS_10_BITS; + } + + if (dev->dscCaps.dscDecoderColorDepthMask & DSC_BITS_PER_COLOR_MASK_8) + { + dscInfo->sinkCaps.decoderColorDepthMask |= DSC_DECODER_COLOR_DEPTH_CAPS_8_BITS; + } + + dscInfo->sinkCaps.maxSliceWidth = dev->dscCaps.dscMaxSliceWidth; + dscInfo->sinkCaps.sliceCountSupportedMask = dev->dscCaps.sliceCountSupportedMask; + dscInfo->sinkCaps.maxNumHztSlices = dev->dscCaps.maxSlicesPerSink; + dscInfo->sinkCaps.lineBufferBitDepth = dev->dscCaps.lineBufferBitDepth; + dscInfo->sinkCaps.bBlockPrediction = dev->dscCaps.bDscBlockPredictionSupport; + dscInfo->sinkCaps.algorithmRevision.versionMajor = dev->dscCaps.versionMajor; + dscInfo->sinkCaps.algorithmRevision.versionMinor = dev->dscCaps.versionMinor; + dscInfo->sinkCaps.peakThroughputMode0 = dev->dscCaps.dscPeakThroughputMode0; + dscInfo->sinkCaps.peakThroughputMode1 = dev->dscCaps.dscPeakThroughputMode1; + dscInfo->sinkCaps.maxBitsPerPixelX16 = dev->dscCaps.maxBitsPerPixelX16; + + if (main->isEDP()) + { + // If eDP panel does not populate peak DSC throughput, use _MODE0_340. + if (!dscInfo->sinkCaps.peakThroughputMode0) + { + dscInfo->sinkCaps.peakThroughputMode0 = NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_340; + } + + // If eDP panel does not populate max slice width, use 2560. + if (!dscInfo->sinkCaps.maxSliceWidth) + { + dscInfo->sinkCaps.maxSliceWidth = 2560; + } + } +} + +void ConnectorImpl::populateForcedDscParams(DSC_INFO* dscInfo, DSC_INFO::FORCED_DSC_PARAMS * forcedParams) +{ + if(forcedParams) + { + dscInfo->forcedDscParams.sliceWidth = forcedParams->sliceWidth; + dscInfo->forcedDscParams.sliceHeight = forcedParams->sliceHeight; + dscInfo->forcedDscParams.sliceCount = forcedParams->sliceCount; + dscInfo->forcedDscParams.dscRevision = forcedParams->dscRevision; + } + +} + +void ConnectorImpl::populateDscCaps(DSC_INFO* dscInfo, DeviceImpl * dev, DSC_INFO::FORCED_DSC_PARAMS * forcedParams) +{ + // Sink DSC capabilities + populateDscSinkCaps(dscInfo, dev); + + // GPU DSC capabilities + populateDscGpuCaps(dscInfo); + + // Forced DSC params + populateForcedDscParams(dscInfo, forcedParams); +} + +bool ConnectorImpl::endCompoundQuery() +{ + DP_ASSERT( compoundQueryActive && "Spurious compoundQuery end."); + compoundQueryActive = false; + return compoundQueryResult; +} + +// +// Set link to HDMI mode +// +void ConnectorImpl::enableLinkHandsOff() +{ + if (isLinkQuiesced) + { + DP_ASSERT(0 && "Link is already quiesced."); + return; + } + + isLinkQuiesced = true; + + // Set the Lane Count to 0 to shut down the link. + powerdownLink(); +} + +// +// Restore from HDMI mode +// +void ConnectorImpl::releaseLinkHandsOff() +{ + if (!isLinkQuiesced) + { + DP_ASSERT(0 && "Link is already in use."); + return; + } + + isLinkQuiesced = false; + assessLink(); +} + +// +// Timer callback for event management +// Uses: fireEvents() +void ConnectorImpl::expired(const void * tag) +{ + if (tag == &tagFireEvents) + fireEventsInternal(); + else + DP_ASSERT(0); +} + +// Generate Events. +// useTimer specifies whether we fire the events on the timer +// context, or this context. +void ConnectorImpl::fireEvents() +{ + bool eventsPending = false; + + // Don't fire any events if we're not done with the modeset + if (!intransitionGroups.isEmpty()) + { + return; + } + + // Walk through the devices looking for state changes + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = e->next) + { + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->isPendingNewDevice() || + dev->isPendingLostDevice() || + dev->isPendingCableOk() || + dev->isPendingZombie() || + dev->isPendingHDCPCapDone()) + eventsPending = true; + } + + // If there were any queue an immediate callback to handle them + if (eventsPending || isDiscoveryDetectComplete) + { + // Queue the fireEventsInternal. + // It's critical we don't allow this to be processed in a sleep + // since DD may do a modeset in response + timer->queueCallback(this, &tagFireEvents, 0, false /* not allowed in sleep */); + } +} + +void ConnectorImpl::fireEventsInternal() +{ + ListElement * next; + Address::StringBuffer sb, sb1; + DP_USED(sb); + DP_USED(sb1); + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = next) + { + next = e->next; + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->isPendingLostDevice()) + { + // + // For bug 2335599, where the connected monitor is switched to MST + // from SST after S3 resume, we need to disconnect SST monitor + // early before adding MST monitors. This will avoid client from + // mistaking the disconnection of SST monitor later as parent of + // MST monitors, which will wrongly disconnect MST monitors too. + // + if (!(!dev->multistream && linkUseMultistream()) && + bDeferNotifyLostDevice) + { + continue; + } + dev->shadow.plugged = false; + DP_LOG(("DPCONN> Lost device %s", dev->address.toString(sb))); + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + dev->address.toNvU32Buffer(addrBuffer); + NV_DPTRACE_WARNING(LOST_DEVICE, dev->address.size(), addrBuffer[0], addrBuffer[1], + addrBuffer[2], addrBuffer[3]); + sink->lostDevice(dev); +#if defined(DEBUG) + // Assert that this device is not contained in any groups. + List* groupLists[] = { + &activeGroups, + &inactiveGroups + }; + + for (unsigned i = 0; i < sizeof(groupLists) / sizeof(groupLists[0]); i++) + { + List *groupList = groupLists[i]; + for (ListElement *e = groupList->begin(); e != groupList->end(); e = e->next) + { + GroupImpl *g = (GroupImpl *)e; + DP_ASSERT(!g->contains(dev)); + } + } +#endif + delete dev; + continue; + } + + if (dev->isPendingCableOk()) + { + dev->shadow.cableOk = dev->isCableOk(); + sink->notifyCableOkStateChange(dev, dev->shadow.cableOk); + } + + if (dev->isPendingZombie()) + { + dev->shadow.zombie = dev->isZombie(); + if (dev->complianceDeviceEdidReadTest) + { + // the zombie event will be hidden for DD/OS + DP_LOG(("DPCONN> Compliance: Device Internal Zombie? : %d 0x%x", dev->shadow.zombie ? 1 : 0, dev)); + return; + } + bMitigateZombie = false; + DP_LOG(("DPCONN> Zombie? : %d 0x%x", dev->shadow.zombie ? 1 : 0, dev)); + sink->notifyZombieStateChange(dev, dev->shadow.zombie); + } + + if (dev->isPendingHDCPCapDone()) + { + DP_ASSERT(dev->isHDCPCap != Indeterminate && "HDCPCap reading is not done!!"); + if (dev->isHDCPCap != Indeterminate) + { + // Notify RM about the new Bcaps.. + if (dev->isActive()) + { + RmDfpCache dfpCache = {0}; + dfpCache.updMask = 0; + dfpCache.bcaps = *dev->BCAPS; + for (unsigned i=0; iBKSV[i]; + + dfpCache.updMask |= (1 << NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BCAPS); + dfpCache.updMask |= (1 << NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BKSV); + dev->connector->main->rmUpdateDynamicDfpCache(dev->activeGroup->headIndex, &dfpCache, False); + } + + sink->notifyHDCPCapDone(dev, !!dev->isHDCPCap); + DP_LOG(("DPCONN> Notify HDCP cap Done : %x", !!dev->isHDCPCap)); + } + else + { + sink->notifyHDCPCapDone(dev, false); + } + + dev->shadow.hdcpCapDone = true; + } + + bool mustDisconnect = dev->isMustDisconnect(); + if (dev->shadow.mustDisconnect != mustDisconnect && mustDisconnect) + { + dev->shadow.mustDisconnect = mustDisconnect; + sink->notifyMustDisconnect(dev->activeGroup); + } + } + + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = next) + { + next = e->next; + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->isPendingNewDevice()) + { + if (bReportDeviceLostBeforeNew && bDeferNotifyLostDevice) + { + // Let's try to find if there's a device pending lost on the same address + DeviceImpl* _device = NULL; + for (ListElement * le = deviceList.begin(); le != deviceList.end(); le = le->next) + { + _device = (DeviceImpl*)le; + if ((_device->address == dev->address) && (_device->plugged != dev->plugged)) + break; + } + if (_device && + (_device->address == dev->address) && + (_device->plugged != dev->plugged)) + { + // If yes, then we need to report this lost device first. + _device->shadow.plugged = false; + DP_LOG(("DPCONN> Lost device 0x%x", _device)); + sink->lostDevice(_device); + DP_ASSERT(!_device->activeGroup && "DD didn't remove panel from group"); + delete _device; + } + } + dev->shadow.plugged = true; + if (dev->isDSCPossible()) + { + DP_LOG(("DPCONN> New device %s | Native DSC Capability - %s | DSC Decompression Device - %s", + dev->address.toString(sb), + (dev->isDSCSupported() ? "Capable" : "Not Capable"), + (dev->devDoingDscDecompression) ? dev->devDoingDscDecompression->address.toString(sb1):"NA")); + } + else + { + DP_LOG(("DPCONN> New device %s", dev->address.toString(sb))); + } + + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + dev->address.toNvU32Buffer(addrBuffer); + NV_DPTRACE_INFO(NEW_SINK_REPORTED, dev->address.size(), addrBuffer[0], addrBuffer[1], + addrBuffer[2], addrBuffer[3]); + + sink->newDevice(dev); + } + } + + if (isDiscoveryDetectComplete) + { + // + // Bug 200236666 : + // isDiscoveryDetectComplete can be set when we process a new device after + // completing last edid read. In such scenario we will send notifyDetectComplete + // before newDevice for that sink has been sent to DD + // a/ sink->newDevice(dev) above can trigger the pending edid read + // b/ after last edid read completes (::mstEdidCompleted), ::processNewDevice + // will set the plugged flag for new device + // c/ this will queue pendingNewDevice event callback for the last device pending discovery + // d/ isDiscoveryDetectComplete flag set during b/ will trigger a + // premature notifyDetectComplete to DD before pendingNewDevice callback + // To fix above scenario : check if there is any newly pending new/lost device + // if yes, then defer sending notifyDetectComplete till next callback + // + bool bDeferNotifyDetectComplete = false; + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = next) + { + next = e->next; + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->isPendingNewDevice() || dev->isPendingLostDevice()) + { + bDeferNotifyDetectComplete = true; + DP_ASSERT(0 && "DP-CONN> Defer notifyDetectComplete as a new/lost device is pending!"); + break; + } + } + + if (!bDeferNotifyDetectComplete) + { + isDiscoveryDetectComplete = false; + DP_LOG(("DP-CONN> NotifyDetectComplete")); + sink->notifyDetectComplete(); + } + } + +} + +// +// This call will be deprecated as soon as all clients move to the new API +// +bool ConnectorImpl::isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) +{ + ModesetInfo modesetInfo = ModesetInfo(twoChannelAudioHz, eightChannelAudioHz, pixelClockHz, + rasterWidth, rasterHeight, (rasterBlankStartX - rasterBlankEndX), 0 /*surfaceHeight*/, + depth, rasterBlankStartX, rasterBlankEndX); + return isHeadShutDownNeeded(target, headIndex, modesetInfo); +} + +// +// Head shutdown will be needed if any of the following conditions are true: +// a. Link rate is going lower than current +// b. Head is activated as MST +// +bool ConnectorImpl::isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + ModesetInfo modesetInfo) +{ + if (linkUseMultistream()) + { + return true; + } + if (activeGroups.isEmpty()) + { + return false; + } + + bool bHeadShutdownNeeded = true; + LinkConfiguration lowestSelected; + + // Force highestLink config in SST + bool bSkipLowestConfigCheck = false; + bool bIsModeSupported = false; + LinkConfiguration maxLc = getMaxLinkConfig(); + lowestSelected = maxLc; + GroupImpl* targetImpl = (GroupImpl*)target; + + // Certain panels only work when link train to highest linkConfig in SST mode. + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->forceMaxLinkConfig()) + { + bSkipLowestConfigCheck = true; + } + } + + // + // Check if there is a special request from the client, + // If so, skip lowering down the link config. + // + if (this->preferredLinkConfig.isValid()) + { + lowestSelected = preferredLinkConfig; + bSkipLowestConfigCheck = true; + } + + // If the flag is set, simply neglect downgrading to lowest possible linkConfig + if (!bSkipLowestConfigCheck) + { + LinkConfiguration lConfig = lowestSelected; + + bIsModeSupported = getValidLowestLinkConfig(lConfig, lowestSelected, modesetInfo); + } + else + { + if (this->willLinkSupportModeSST(lowestSelected, modesetInfo)) + { + bIsModeSupported = true; + } + } + + if (bIsModeSupported) + { + // + // This is to handle a case where we query current link config + // to UEFI during boot time and it fails to return. Currently + // we do not handle this scenario and head is not shut down + // though it's actually required. This is to allow head shutdown + // in such cases. + // + if (!isLinkActive()) + { + return true; + } + + // For dual DP while changing link config, we need to shut + // down the head + if (lowestSelected.lanes == 8) + { + // If link config is changing, head shutdown will be needed. + if ((activeLinkConfig.lanes == lowestSelected.lanes) && + (activeLinkConfig.peakRate == lowestSelected.peakRate)) + { + bHeadShutdownNeeded = false; + } + } + // + // If link config is going lower then we need to shut down the + // head. If we link train to a lower config before reducing the + // mode, we will hang the HW since head would still be driving + // the higher mode at the time of link train. + // + else if ((lowestSelected.peakRate * lowestSelected.lanes) >= (activeLinkConfig.peakRate * activeLinkConfig.lanes)) + { + bHeadShutdownNeeded = false; + } + } + else + { + DP_ASSERT( 0 && "DP-CONN> This mode is not possible at any link configuration!"); + } + + if (targetImpl) + { + targetImpl->bIsHeadShutdownNeeded = bHeadShutdownNeeded; + } + + return bHeadShutdownNeeded; +} + +bool ConnectorImpl::isLinkTrainingNeededForModeset (ModesetInfo modesetInfo) +{ + // Force highestLink config in SST + bool bSkipLowestConfigCheck = false; + bool bIsModeSupported = false; + LinkConfiguration lowestSelected = getMaxLinkConfig(); + + if (linkUseMultistream()) + { + if (!isLinkActive()) + { + // If MST, we always need to link train if link is not active + return true; + } + else if (getMaxLinkConfig() != activeLinkConfig) + { + // + // If the link is active, we have to retrain, if active Link Config is + // not the highest possible Link Config. + // + return true; + } + else + { + // + // We don't have to retrain if link is active and at highest possible config + // since for MST we should always link train to highest possible Link Config. + // + return false; + } + } + + // + // Link training is needed if link is not alive OR alive but inactive + // ie., lane status reports symbol lock/interlane align/CR failures + // + if (isLinkLost() || !isLinkActive()) + { + return true; + } + + // + // Link training is needed if link config was previously guessed (not assessed by the driver). + // The link config is marked as guessed in below cases - + // a. Invalid link rate returned by UEFI + // b. When max link config is HBR3 and currently assessed by UEFI != HBR3 + // c. If a SOR is not assigned to display during link assessment + // + if (this->linkGuessed) + { + return true; + } + + // Certain panels only work when link train to highest linkConfig in SST mode. + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->forceMaxLinkConfig()) + { + bSkipLowestConfigCheck = true; + } + } + + // + // Check if there is a special request from the client, + // If so, skip lowering down the link config. + // + if (this->preferredLinkConfig.isValid()) + { + lowestSelected = preferredLinkConfig; + bSkipLowestConfigCheck = true; + } + + // If the flag is set, simply neglect downgrading to lowest possible linkConfig + if (!bSkipLowestConfigCheck) + { + LinkConfiguration lConfig = lowestSelected; + + bIsModeSupported = getValidLowestLinkConfig(lConfig, lowestSelected, modesetInfo); + } + else + { + if (this->willLinkSupportModeSST(lowestSelected, modesetInfo)) + { + bIsModeSupported = true; + } + } + + // + // Link training is needed if requested mode/link config is + // different from the active mode/link config + // + if (bIsModeSupported) + { + if ((activeLinkConfig.lanes != lowestSelected.lanes) || + (activeLinkConfig.peakRate != lowestSelected.peakRate)) + { + return true; + } + } + else + { + DP_ASSERT( 0 && "DP-CONN> This mode is not possible at any link configuration!"); + } + + return false; +} + +bool DisplayPort::SetConfigSingleHeadMultiStreamMode(Group **targets, + NvU32 displayIDs[], + NvU32 numStreams, + DP_SINGLE_HEAD_MULTI_STREAM_MODE mode, + bool bSetConfig, + NvU8 vbiosPrimaryDispIdIndex, + bool bEnableAudioOverRightPanel) +{ + GroupImpl *pTargetImpl = NULL; + ConnectorImpl *pConnectorImpl = NULL; + ConnectorImpl *pPrevConnectorImpl = NULL; + + if (numStreams > NV0073_CTRL_CMD_DP_SINGLE_HEAD_MAX_STREAMS || numStreams <= 0) + { + DP_LOG(("DP-CONN> ERROR: in configuring single head multistream mode " + "invalid number of streams")); + return false; + } + + for (NvU32 iter = 0; iter < numStreams; iter++) + { + pTargetImpl = (GroupImpl*)targets[iter]; + + if(pTargetImpl == NULL) + { + DP_LOG(("DP-CONN> ERROR: in configuring single head multistream mode:" + "invalid target passed by client")); + return false; + } + + pConnectorImpl = (ConnectorImpl*) (pTargetImpl->parent); + + if (bSetConfig) + { + if (DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST == mode) + { + // + // Detach any active firmware groups before configuring singleHead dual SST + // + if (pTargetImpl->isHeadAttached() && pTargetImpl->headInFirmware) + { + pConnectorImpl->notifyDetachBegin(NULL); + pConnectorImpl->notifyDetachEnd(); + } + + if (displayIDs[iter] != pConnectorImpl->main->getRootDisplayId()) + { + DP_ASSERT( 0 && "DP-CONN> invalid single head multistream SST configuration !"); + return false; + } + + // 0th index is primary connector index, + // 1st is secondary connector index so on + if (iter > DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) + { + pPrevConnectorImpl->pCoupledConnector = pConnectorImpl; + if (iter == (numStreams - 1)) + { + pConnectorImpl->pCoupledConnector = + (ConnectorImpl*)((GroupImpl*)targets[DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY])->parent; + } + // Clear secondary connector's link guessed state + pConnectorImpl->linkGuessed = false; + } + + pPrevConnectorImpl = pConnectorImpl; + } + + pTargetImpl->singleHeadMultiStreamMode = mode; + pTargetImpl->singleHeadMultiStreamID = (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID)iter; + + // Save the 'Audio over Right Pannel' configuration in Connector Impl + // Use this configuration when SF gets programed. + if (bEnableAudioOverRightPanel) + { + pConnectorImpl->bAudioOverRightPanel = true; + } + } + else + { + pTargetImpl->singleHeadMultiStreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE; + pTargetImpl->singleHeadMultiStreamID = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY; + pConnectorImpl->pCoupledConnector = NULL; + pConnectorImpl->bAudioOverRightPanel = false; + } + } + + pConnectorImpl->main->configureSingleHeadMultiStreamMode(displayIDs, + numStreams, + (NvU32)mode, + bSetConfig, + vbiosPrimaryDispIdIndex); + + return true; +} + +// +// This call will be deprecated as soon as all clients move to the new API +// +bool ConnectorImpl::notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) +{ + ModesetInfo modesetInfo(twoChannelAudioHz, eightChannelAudioHz, pixelClockHz, rasterWidth, + rasterHeight, (rasterBlankStartX - rasterBlankEndX), 0 /*surfaceHeight*/, + depth, rasterBlankStartX, rasterBlankEndX); + + DpModesetParams modesetParams(headIndex, modesetInfo); + + return notifyAttachBegin (target, modesetParams); +} + +bool ConnectorImpl::setDeviceDscState(Device * dev, bool bEnableDsc) +{ + if (!((DeviceImpl *)dev)->isDSCPossible()) + { + return true; + } + + if (bEnableDsc) + { + if(!(((DeviceImpl *)dev)->setDscEnable(true /*bEnableDsc*/))) + { + DP_ASSERT(!"DP-CONN> Failed to configure DSC on Sink!"); + return false; + } + } + else + { + bool bCurrDscEnable = false; + // Get Current DSC Enable State + if (!((DeviceImpl *)dev)->getDscEnable(&bCurrDscEnable)) + { + DP_LOG(("DP> Not able to get DSC Enable State!")); + } + + if (bCurrDscEnable) + { + // Before disabling DSC check if any active device with same parent has DSC enabled or not + bool bDisableDsc = true; + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + { + if((dev != i) && (((DeviceImpl *)i)->parent == ((DeviceImpl *)dev)->parent) && + (((DeviceImpl *)i)->activeGroup) && + (((DeviceImpl *)i)->activeGroup->isHeadAttached()) && + (((DeviceImpl *)i)->activeGroup->lastModesetInfo.bEnableDsc)) + { + DP_LOG(("Parent is shared among devices and other device is active so we can't disable DSC")); + bDisableDsc = false; + break; + } + } + + if(bDisableDsc && !((DeviceImpl *)dev)->setDscEnable(false /*bEnableDsc*/)) + { + DP_ASSERT(!"DP-CONN> Failed to configure DSC on Sink!"); + return false; + } + } + } + return true; +} + +// +// Notify library before/after modeset (update) +// Here is what NAB essentially does: +// 0. Makes sure TMDS is not attached +// 1. Trains link to optimized link config ("optimized" depends on DP1.1, DP1.2) +// 2. Performs quick watermark check for IMP. If IMP is not possible, forces link, zombies devices +// 3. if anything of above fails, marks devices in given group as zombies +// +// Return : true - NAB passed +// false - NAB failed due to invalid params or link training failure +// Link configs are forced in case of link training failure +// +bool ConnectorImpl::notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + const DpModesetParams &modesetParams) +{ + unsigned twoChannelAudioHz = modesetParams.modesetInfo.twoChannelAudioHz; + unsigned eightChannelAudioHz = modesetParams.modesetInfo.eightChannelAudioHz; + NvU64 pixelClockHz = modesetParams.modesetInfo.pixelClockHz; + unsigned rasterWidth = modesetParams.modesetInfo.rasterWidth; + unsigned rasterHeight = modesetParams.modesetInfo.rasterHeight; + unsigned rasterBlankStartX = modesetParams.modesetInfo.rasterBlankStartX; + unsigned rasterBlankEndX = modesetParams.modesetInfo.rasterBlankEndX; + unsigned depth = modesetParams.modesetInfo.depth; + bool bLinkTrainingStatus = true; + bool bEnableDsc = modesetParams.modesetInfo.bEnableDsc; + bool bEnableFEC; + + if(preferredLinkConfig.isValid()) + { + bEnableFEC = preferredLinkConfig.bEnableFEC; + } + else + { + DeviceImpl * nativeDev = findDeviceInList(Address()); + if (main->isEDP() && nativeDev) + { + // eDP can support DSC with and without FEC + bEnableFEC = bEnableDsc && nativeDev->isFECSupported(); + } + else + { + bEnableFEC = bEnableDsc; + } + } + + DP_LOG(("DPCONN> Notify Attach Begin (Head %d, pclk %d raster %d x %d %d bpp", + modesetParams.headIndex, pixelClockHz, rasterWidth, rasterHeight, depth)); + NV_DPTRACE_INFO(NOTIFY_ATTACH_BEGIN, modesetParams.headIndex, pixelClockHz, rasterWidth, rasterHeight, + depth, bEnableDsc, bEnableFEC); + + if (!depth || !pixelClockHz) + { + DP_ASSERT(!"DP-CONN> Params with zero value passed to query!"); + return false; + } + + if ((modesetParams.modesetInfo.mode == DSC_DUAL) || + (modesetParams.modesetInfo.mode == DSC_DROP)) + { + if ((modesetParams.headIndex == NV_SECONDARY_HEAD_INDEX_1) || + (modesetParams.headIndex == NV_SECONDARY_HEAD_INDEX_3)) + { + DP_ASSERT(!"DP-CONN> For Two Head One OR, client should send Primary Head index!"); + return false; + } + } + + for (Device * dev = target->enumDevices(0); dev; dev = target->enumDevices(dev)) + { + Address::StringBuffer buffer; + DP_USED(buffer); + DP_LOG(("DPCONN> | %s (%s) |", dev->getTopologyAddress().toString(buffer), dev->isVideoSink() ? "VIDEO" : "BRANCH")); + } + + if (firmwareGroup && ((GroupImpl *)firmwareGroup)->headInFirmware) + { + DP_ASSERT(bIsUefiSystem || (0 && "DPCONN> Firmware still active on head. De-activating")); + } + + GroupImpl* targetImpl = (GroupImpl*)target; + + if (bEnableDsc) + { + DP_LOG(("DPCONN> DSC Mode = %s", (modesetParams.modesetInfo.mode == DSC_SINGLE) ? "SINGLE" : "DUAL")); + targetImpl->dscModeRequest = modesetParams.modesetInfo.mode; + } + + DP_ASSERT(!(targetImpl->isHeadAttached() && targetImpl->bIsHeadShutdownNeeded) && "Head should have been shut down but it is still active!"); + + targetImpl->headInFirmware = false; + if (firmwareGroup) + { + ((GroupImpl *)firmwareGroup)->headInFirmware = false; + } + + if (firmwareGroup && activeGroups.contains((GroupImpl*)firmwareGroup)) + { + if (((GroupImpl *)firmwareGroup)->isHeadAttached()) + { + targetImpl->setHeadAttached(true); + } + activeGroups.remove((GroupImpl*)firmwareGroup); + inactiveGroups.insertBack((GroupImpl*)firmwareGroup); + } + + if (this->linkGuessed && (targetImpl->singleHeadMultiStreamMode != DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST)) + { + DP_ASSERT(!(this->linkGuessed) && "Link was not assessed previously. Probable reason: system was not in driver mode. Assessing now."); + this->assessLink(); + } + + DP_ASSERT(this->isLinkQuiesced == 0 && "According to bracketting calls TMDS/alternate DP still active!"); + + // Transfer the group to active list + inactiveGroups.remove(targetImpl); + activeGroups.insertBack(targetImpl); + intransitionGroups.insertFront(targetImpl); + + targetImpl->lastModesetInfo = ModesetInfo(twoChannelAudioHz, eightChannelAudioHz, + pixelClockHz, rasterWidth, rasterHeight, + (rasterBlankStartX - rasterBlankEndX), modesetParams.modesetInfo.surfaceHeight, + depth, rasterBlankStartX, rasterBlankEndX, bEnableDsc, modesetParams.modesetInfo.mode); + + targetImpl->headIndex = modesetParams.headIndex; + targetImpl->streamIndex = main->headToStream(modesetParams.headIndex, targetImpl->singleHeadMultiStreamID); + targetImpl->colorFormat = modesetParams.colorFormat; + + DP_ASSERT(!this->isLinkQuiesced && "TMDS is attached, NABegin is impossible!"); + + // Update the FEC enabled flag according to the mode requested. + this->bFECEnable |= bEnableFEC; + highestAssessedLC.enableFEC(this->bFECEnable); + + // if failed, we're guaranteed that assessed link rate didn't meet the mode requirements + // isZombie() will catch this + bLinkTrainingStatus = trainLinkOptimized(getMaxLinkConfig()); + + // if LT is successful, see if panel supports DSC and if so, set DSC enabled/disabled + // according to the mode requested. + if(bLinkTrainingStatus) + { + for (Device * dev = target->enumDevices(0); dev; dev = target->enumDevices(dev)) + { + if(!setDeviceDscState(dev, bEnableDsc)) + { + DP_ASSERT(!"DP-CONN> Failed to configure DSC on Sink!"); + } + } + } + +// TODO: Need to check if we can completely remove DP_OPTION_HDCP_12_ENABLED and remove it + + beforeAddStream(targetImpl); + + if (linkUseMultistream()) + { + // Which pipeline to take the affect out of trigger ACT + if ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST != targetImpl->singleHeadMultiStreamMode) || + (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY == targetImpl->singleHeadMultiStreamID)) + { + main->configureTriggerSelect(targetImpl->headIndex, targetImpl->singleHeadMultiStreamID); + } + } + + if (!linkUseMultistream() || main->supportMSAOverMST()) + { + bool enableInbandStereoSignaling = false; + + DP_ASSERT(activeGroups.isEmpty() == false); + + if (main->isInbandStereoSignalingSupported()) + { + enableInbandStereoSignaling = true; + } + + // + // Bug 200362535 + // setDpStereoMSAParameters does not cache the msa params. It will immediately + // apply just the stereo specific parameters. This is required because we + // can toggle the msa params using nvidia control panel and in that scenario + // we do not get supervisor interrupts. Since SV interrupts do not occur the + // msa parameters do not get applied. So to avoid having to reboot to apply the + // stereo msa params setDpStereoMSAParameters is called. + // + // setDpMSAParameters will contain all msa params, including stereo cached. + // These will be applied during supervisor interrupt. So if we will get + // SV interrupts later the same stereo settings will be applied twice. + // first by setDpStereoMSAParameters and later by setDpMSAParameters. + // + main->setDpStereoMSAParameters(!enableInbandStereoSignaling, modesetParams.msaparams); + main->setDpMSAParameters(!enableInbandStereoSignaling, modesetParams.msaparams); + } + + NV_DPTRACE_INFO(NOTIFY_ATTACH_BEGIN_STATUS, bLinkTrainingStatus); + + bFromResumeToNAB = false; + return bLinkTrainingStatus; +} + + +// +// modesetCancelled True, when DD respected NAB failure and cancelled modeset. +// False, when NAB succeeded, or DD didn't honor NAB failure +// +// Here is what NAE supposed to do: +// 1. modesetCancelled == TRUE, NAB failed: +// unzombie all devices and set linkForced to false; We have Status Quo for next modeset +// 2. modesetCancelled == False, NAB failed: +// If NAB failed, linkForces is TRUE. NAE goes finds zombied devices and notifies DD about them. +// 3. modesetCancelled == False, NAB succeeded: +// NAE is no-op. (but we have some special sanity code) +// +void ConnectorImpl::notifyAttachEnd(bool modesetCancelled) +{ + GroupImpl* currentModesetDeviceGroup = NULL; + DP_LOG(("DPCONN> Notify Attach End")); + NV_DPTRACE_INFO(NOTIFY_ATTACH_END); + + bFromResumeToNAB = false; + + if (intransitionGroups.isEmpty()) + { + DP_ASSERT( 0 && "INVALID STATE: Modeset Group is NULL"); + return; + } + + currentModesetDeviceGroup = intransitionGroups.pop(); + + if (modesetCancelled) + { + currentModesetDeviceGroup->setHeadAttached(false); + } + + // set dscModeActive to what was requested in NAB and clear dscModeRequest + currentModesetDeviceGroup->dscModeActive = currentModesetDeviceGroup->dscModeRequest; + currentModesetDeviceGroup->dscModeRequest = DSC_MODE_NONE; + + currentModesetDeviceGroup->setHeadAttached(true); + RmDfpCache dfpCache = {0}; + dfpCache.updMask = 0; + if (currentModesetDeviceGroup->isHeadAttached()) + { + for (DeviceImpl * dev = (DeviceImpl *)currentModesetDeviceGroup->enumDevices(0); + dev; dev = (DeviceImpl *)currentModesetDeviceGroup->enumDevices(dev)) + { + dfpCache.bcaps = *dev->BCAPS; + for (unsigned i=0; iBKSV[i]; + + dfpCache.updMask |= (1 << NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BCAPS); + dfpCache.updMask |= (1 << NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BKSV); + main->rmUpdateDynamicDfpCache(dev->activeGroup->headIndex, &dfpCache, True); + + // Remove this while enabling HDCP for MSC + break; + } + } + + // + // Add rest of the streams (other than primary) in notifyAE, since this can't be done + // unless a SOR is attached to a Head (part of modeset), and trigger ACT immediate + // + if ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST == currentModesetDeviceGroup->singleHeadMultiStreamMode) && + (currentModesetDeviceGroup->singleHeadMultiStreamID > DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY)) + { + DP_ASSERT(linkUseMultistream() && "it should be multistream link to configure single head MST"); + hal->payloadTableClearACT(); + hal->payloadAllocate(currentModesetDeviceGroup->streamIndex, + currentModesetDeviceGroup->timeslot.begin, currentModesetDeviceGroup->timeslot.count); + main->configureTriggerSelect(currentModesetDeviceGroup->headIndex, currentModesetDeviceGroup->singleHeadMultiStreamID); + main->triggerACT(); + } + + afterAddStream(currentModesetDeviceGroup); + + // + // Turn on the Authentication/Encryption back if previous is on. + // For DP1.1, let the upstream to turn it back. + // For DP1.2, we should turn the modeset back if it was on. + // The authentication will be called off during the modeset. + // + HDCPState hdcpState = {0}; + main->configureHDCPGetHDCPState(hdcpState); + if ((!hdcpState.HDCP_State_Authenticated) && (isHDCPAuthOn == true) + && (currentModesetDeviceGroup->hdcpEnabled)) + { + if (!this->linkUseMultistream()) + { + currentModesetDeviceGroup->hdcpEnabled = isHDCPAuthOn = false; + } + } + + fireEvents(); +} + +// Notify library before/after shutdown (update) +void ConnectorImpl::notifyDetachBegin(Group * target) +{ + if (!target) + target = firmwareGroup; + + NV_DPTRACE_INFO(NOTIFY_DETACH_BEGIN); + + GroupImpl * group = (GroupImpl*)target; + + DP_LOG(("DPCONN> Notify detach begin")); + DP_ASSERT((group->headInFirmware || group->isHeadAttached()) && "Disconnecting an inactive device"); + + // check to see if a pattern request was on. if yes clear the pattern + PatternInfo pattern_info; + pattern_info.lqsPattern = hal->getPhyTestPattern(); + // send control call to rm for the pattern + if (pattern_info.lqsPattern != LINK_QUAL_DISABLED) + { + pattern_info.lqsPattern = LINK_QUAL_DISABLED; + if (!main->physicalLayerSetTestPattern(&pattern_info)) + DP_ASSERT(0 && "Could not set the PHY_TEST_PATTERN"); + } + + beforeDeleteStream(group); + + // + // Set the trigger select so as to which frontend corresponding to the stream + // to take the affect + // + if(linkUseMultistream()) + { + main->configureTriggerSelect(group->headIndex, group->singleHeadMultiStreamID); + + // Clear payload of other than primary streams and trigger ACT immediate + if ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST == group->singleHeadMultiStreamMode) && + (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY != group->singleHeadMultiStreamID)) + { + main->triggerACT(); + if (!hal->payloadWaitForACTReceived()) + { + DP_LOG(("DP-TS> Downstream device did not receive ACT during stream clear")); + DP_ASSERT(0); + } + } + } + + intransitionGroups.insertFront(group); +} + +// +// Here is what NDE does: +// 1. delete unplugged devices (they were zombies, if they're on this list) +// 2. unmark zombies (they were plugged zombies, they might want to get link trained next time) +// 3. mark head as detached (so that we can delete any HPD unplugged devices) +// +void ConnectorImpl::notifyDetachEnd(bool bKeepOdAlive) +{ + GroupImpl* currentModesetDeviceGroup = NULL; + DP_LOG(("DPCONN> Notify detach end")); + NV_DPTRACE_INFO(NOTIFY_DETACH_END); + + if (intransitionGroups.isEmpty()) + { + DP_ASSERT( 0 && "INVALID STATE: Modeset Group is NULL"); + return; + } + + currentModesetDeviceGroup = intransitionGroups.pop(); + + afterDeleteStream(currentModesetDeviceGroup); + + if (!linkUseMultistream()) + { + Device * d = 0; + for (d = currentModesetDeviceGroup->enumDevices(0); + currentModesetDeviceGroup->enumDevices(d) != 0; + d = currentModesetDeviceGroup->enumDevices(d)) + { + // only one device in the group + DP_ASSERT(d && (((DeviceImpl*)d)->activeGroup == currentModesetDeviceGroup)); + } + } + + // nullify last modeset info + dpMemZero(¤tModesetDeviceGroup->lastModesetInfo, sizeof(ModesetInfo)); + currentModesetDeviceGroup->setHeadAttached(false); + currentModesetDeviceGroup->headInFirmware = false; + currentModesetDeviceGroup->dscModeActive = DSC_MODE_NONE; + + // Mark head as disconnected + bNoLtDoneAfterHeadDetach = true; + + // + // Update the last modeset HDCP status here. Hdcp got disabled after modeset + // thus hdcpPreviousStatus would be false to SST after device inserted. + // + HDCPState hdcpState = {0}; + main->configureHDCPGetHDCPState(hdcpState); + if (!(isHDCPAuthOn = hdcpState.HDCP_State_Authenticated)) + { + currentModesetDeviceGroup->hdcpEnabled = false; + } + + // Update Vbios scratch register + for (Device * d = currentModesetDeviceGroup->enumDevices(0); d; + d = currentModesetDeviceGroup->enumDevices(d)) + { + currentModesetDeviceGroup->updateVbiosScratchRegister(d); + } + + // Reset value of bIsHeadShutdownNeeded to get rid of false asserts + currentModesetDeviceGroup->bIsHeadShutdownNeeded = false; + + // If this is eDP and the LCD power is not ON, we don't need to Disable DSC here + bool bPanelPwrSts = true; + if ((!main->isEDP()) || (main->getEdpPowerData(&bPanelPwrSts, NULL) && bPanelPwrSts)) + { + // Disable DSC decompression on the panel if panel supports DSC and reset bFECEnable Flag + for (Device * dev = currentModesetDeviceGroup->enumDevices(0); dev; dev = currentModesetDeviceGroup->enumDevices(dev)) + { + if(!(setDeviceDscState(dev, false/*bEnableDsc*/))) + { + DP_ASSERT(!"DP-CONN> Failed to configure DSC on Sink!"); + } + } + } + + // Transfer to inactive group and cancel pending callbacks for that group. + currentModesetDeviceGroup->cancelHdcpCallbacks(); + activeGroups.remove(currentModesetDeviceGroup); + inactiveGroups.insertBack(currentModesetDeviceGroup); + + if (activeGroups.isEmpty()) + { + cancelHdcpCallbacks(); + + // We disconnected a panel, try to clear the transition + if (linkAwaitingTransition) + { + assessLink(); + } + // + // Power down the links as we have switched away from the monitor. + // Only power down if we are in single stream + // + else + { + // + // Power down the links as we have switched away from the monitor. + // For shared SOR case, we need this to keep SW stats in DP instances in sync. + // Only power down the link when it's not a compliance test device. + // + // Some eDP panels are known having problems when power down. + // See bug 1425706, 1376753, 1347872, 1355592 + // + // Hotplug may trigger detach before processNewDevice if previous state has + // lost device not yet detached. Avoid to powerdown for the case for following + // device discovery hdcp probe. + // + if (!bIsDiscoveryDetectActive) + powerdownLink(!main->skipPowerdownEdpPanelWhenHeadDetach() && !bKeepOdAlive); + } + if (this->policyModesetOrderMitigation && this->modesetOrderMitigation) + this->modesetOrderMitigation = false; + } + fireEvents(); +} + +bool ConnectorImpl::trainPCONFrlLink(PCONLinkControl *pconControl) +{ + NvU32 loopCount = NV_PCON_SOURCE_CONTROL_MODE_TIMEOUT_THRESHOLD; + NvU32 frlRateMask = 0; + bool bFrlReady = false; + bool result = false; + + // Initial return values. + pconControl->result.trainedFrlBwMask = 0; + pconControl->result.maxFrlBwTrained = PCON_HDMI_LINK_BW_FRL_INVALID; + + // Step 1: Setup PCON for later operation + + // Step 1.1: Set D0 power + hal->setPowerState(PowerStateD0); + + hal->resetProtocolConverter(); + + // Step 1.2: Enable Source Control Mode and FRL mode, enable FRL-Ready IRQ + hal->setSourceControlMode(true, true); + + do + { + // + // Step 1.3: Poll for HDMI-Link-Status Change (0x2005 Bit 3) + // Get FRL Ready Bit (0x303B Bit 1) + // + hal->checkPCONFrlReady(&bFrlReady); + if (bFrlReady == true) + { + break; + } + Timeout timeout(this->timer, NV_PCON_SOURCE_CONTROL_MODE_TIMEOUT_INTERVAL_MS); + while(timeout.valid()); + continue; + } while (--loopCount); + + if (bFrlReady == false) + { + pconControl->result.status = NV_DP_PCON_CONTROL_STATUS_ERROR_TIMEOUT; + return false; + } + + // Step 2: Assess FRL Link capability. + + // + // Step 2.1: Configure FRL Link (FRL BW, BW mask / Concurrent) + // Start with mask for all bandwidth. Please refer to definition of DPCD 0x305B. + // + result = hal->setupPCONFrlLinkAssessment(pconControl->frlHdmiBwMask, + pconControl->flags.bExtendedLTMode, + pconControl->flags.bConcurrentMode); + if (result == false) + { + pconControl->result.status = NV_DP_PCON_CONTROL_STATUS_ERROR_GENERIC; + return false; + } + + // Step 2.2: Poll for HDMI-Link-Status Change (0x2005 Bit 3) + loopCount = NV_PCON_FRL_LT_TIMEOUT_THRESHOLD; + do + { + result = hal->checkPCONFrlLinkStatus(&frlRateMask); + if (result == true) + { + break; + } + Timeout timeout(this->timer, NV_PCON_FRL_LT_TIMEOUT_INTERVAL_MS); + while(timeout.valid()); + continue; + } while (--loopCount); + + if (result == true) + { + // + // frlRateMask is result from checkPCONFrlLinkStatus (0x3036) Bit 1~6. + // + pconControl->result.status = NV_DP_PCON_CONTROL_STATUS_SUCCESS; + pconControl->result.trainedFrlBwMask = frlRateMask; + pconControl->result.maxFrlBwTrained = getMaxFrlBwFromMask(frlRateMask); + } + else + { + pconControl->result.status = NV_DP_PCON_CONTROL_STATUS_ERROR_FRL_LT_FAILURE; + } + return result; +} + +bool ConnectorImpl::assessPCONLinkCapability(PCONLinkControl *pConControl) +{ + NvU32 status; + + if (pConControl == NULL || !this->previousPlugged) + return false; + + bool bIsFlushModeEnabled = enableFlush(); + + if (!bIsFlushModeEnabled) + { + return false; + } + + if (pConControl->flags.bSourceControlMode) + { + status = trainPCONFrlLink(pConControl); + if (status == false) + { + // restore Autonomous mode and treat this as an active DP dongle. + hal->resetProtocolConverter(); + // Exit flush mode + disableFlush(); + if (!pConControl->flags.bSkipFallback) + { + bSkipAssessLinkForPCon = false; + assessLink(); + } + return status; + } + activePConLinkControl.flags = pConControl->flags; + activePConLinkControl.frlHdmiBwMask = pConControl->frlHdmiBwMask; + activePConLinkControl.result = pConControl->result; + } + + // Step 3: Assess DP Link capability. + LinkConfiguration lConfig = getMaxLinkConfig(); + highestAssessedLC = getMaxLinkConfig(); + + hal->updateDPCDOffline(); + if (hal->isDpcdOffline()) + { + disableFlush(); + return false; + } + if (!train(lConfig, false /* do not force LT */)) + { + // + // Note that now train() handles fallback, activeLinkConfig + // has the max link config that was assessed. + // + lConfig = activeLinkConfig; + } + + highestAssessedLC = lConfig; + linkGuessed = false; + disableFlush(); + + this->bKeepLinkAliveForPCON = pConControl->flags.bKeepPCONLinkAlive; + return status; +} + +bool ConnectorImpl::getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) +{ + if (!previousPlugged || !hal->getOuiSupported()) + return false; + + return hal->getOuiSink(ouiId, modelName, modelNameBufferSize, chipRevision); +} + +void ConnectorImpl::setIgnoreSourceOuiHandshake(bool bIgnoreOuiHandShake) +{ + bIgnoreSrcOuiHandshake = bIgnoreOuiHandShake; +} + +bool ConnectorImpl::getIgnoreSourceOuiHandshake() +{ + return bIgnoreSrcOuiHandshake; +} + +bool ConnectorImpl::performIeeeOuiHandshake() +{ + const char *ieeeOuiDevId = "NVIDIA"; + + if (!hal->getOuiSupported() || getIgnoreSourceOuiHandshake()) + return false; + + if (hal->setOuiSource(DPCD_OUI_NVIDIA, ieeeOuiDevId, 6 /* string length of ieeeOuiDevId */, 0) == AuxRetry::ack) + { + NvU8 chipRevision = 0; + + // parse client OUI. + if (hal->getOuiSink(ouiId, &modelName[0], sizeof(modelName), chipRevision)) + { + DP_LOG(("DP> SINK-OUI id(0x%08x) %s: rev:%d.%d", ouiId, + (NvU8*)modelName, + (unsigned)DRF_VAL(_DPCD, _SINK_HARDWARE_REV, _MAJOR, chipRevision), + (unsigned)DRF_VAL(_DPCD, _SINK_HARDWARE_REV, _MINOR, chipRevision))); + return true; + } + } + return false; +} + + +bool ConnectorImpl::willLinkSupportModeSST(const LinkConfiguration & linkConfig, const ModesetInfo & modesetInfo) +{ + DP_ASSERT(!linkUseMultistream() && "IMP for SST only"); + + // + // mode is not known yet, we have to report is possible + // Otherwise we're going to mark all devices as zombies on first HPD(c), + // since modeset info is not available. + // + if (modesetInfo.pixelClockHz == 0) + return true; + + if (linkConfig.lanes == 0 || linkConfig.peakRate == 0) + return false; + + Watermark water; + + if (this->isFECSupported()) + { + if (!isModePossibleSSTWithFEC(linkConfig, modesetInfo, &water, main->hasIncreasedWatermarkLimits())) + { + // Verify audio + return false; + } + } + else + { + if (!isModePossibleSST(linkConfig, modesetInfo, &water, main->hasIncreasedWatermarkLimits())) + { + // Verify audio + return false; + } + } + return true; +} + +// gets max values for DPCD HAL and forces link trainig with that config +void ConnectorImpl::forceLinkTraining() +{ + LinkConfiguration forcedMaxConfig(getMaxLinkConfig()); + train(forcedMaxConfig, true); +} + +void ConnectorImpl::powerdownLink(bool bPowerdownPanel) +{ + LinkConfiguration powerOff = getMaxLinkConfig(); + bool bPanelPwrSts = true; + powerOff.lanes = 0; + // Inform Sink about Main Link Power Down. + + // + // 1> If it is eDP and the power is not on, we don't need to put it into D3 here + // 2> If FEC is enabled then we have to put panel in D3 after powering down mainlink + // as FEC disable has to be detected by panel which will happen as part of link + // power down, we need to keep panel in D0 for this. + // + if (!this->bFECEnable && + ((!main->isEDP()) || (main->getEdpPowerData(&bPanelPwrSts, NULL) && bPanelPwrSts))) + { + hal->setPowerState(PowerStateD3); + } + + train(powerOff, !bPowerdownPanel); // Train to 0 links 0 BW + + // + // If FEC is enabled, put panel to D3 here for non-eDP. + // For eDP with FEC support, FEC state would be cleared as part of panel + // power down + // + if (this->bFECEnable && (!main->isEDP())) + { + hal->setPowerState(PowerStateD3); + } + + // Set FEC state as false in link power down + this->bFECEnable = false; + highestAssessedLC.enableFEC(false); +} + +GroupImpl * ConnectorImpl::getActiveGroupForSST() +{ + if (this->linkUseMultistream()) + return 0; + GroupImpl * groupAttached = 0; + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + // there should only be one group for the connector. + if (groupAttached) + { + DP_ASSERT(0 && "Multiple attached heads"); + return 0; + } + groupAttached = (GroupImpl * )e; + } + return groupAttached; +} + +bool ConnectorImpl::trainSingleHeadMultipleSSTLinkNotAlive(GroupImpl *pGroupAttached) +{ + GroupImpl *pPriGrpAttached = NULL; + GroupImpl *pSecGrpAttached = NULL; + ConnectorImpl *pPriConnImpl = NULL; + ConnectorImpl *pSecConnImpl = NULL; + + if ((pGroupAttached == NULL) || + (pCoupledConnector == NULL) || + (pGroupAttached->singleHeadMultiStreamMode != DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST)) + { + return false; + } + if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) + { + pSecGrpAttached = pCoupledConnector->getActiveGroupForSST(); + pPriGrpAttached = pGroupAttached; + pSecConnImpl = pCoupledConnector; + pPriConnImpl = this; + } + else if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY) + { + pPriGrpAttached = pCoupledConnector->getActiveGroupForSST(); + pSecGrpAttached = pGroupAttached; + pPriConnImpl = pCoupledConnector; + pSecConnImpl = this; + } + else + { + DP_ASSERT(0 && "Invalid 2-SST configuration "); + return false; + } + + if (!pPriGrpAttached || !pSecGrpAttached || !pPriConnImpl || !pSecConnImpl) + { + DP_ASSERT(0 && "Invalid 2-SST configuration "); + return false; + } + + if (!pPriConnImpl->trainLinkOptimizedSingleHeadMultipleSST(pPriGrpAttached)) + { + DP_ASSERT(0 && "not able to configure 2-SST mode on primary link"); + return false; + } + + if (!pSecConnImpl->trainLinkOptimizedSingleHeadMultipleSST(pSecGrpAttached)) + { + DP_ASSERT(0 && "not able to configure 2-SST mode for secondary link"); + return false; + } + + return true; +} + +void ConnectorImpl::assessLink(LinkTrainingType trainType) +{ + this->bSkipLt = false; // Assesslink should never skip LT, so let's reset it in case it was set. + + if (bSkipAssessLinkForPCon) + { + // Skip assessLink() for PCON. client should call assessPCONLinkCapability later. + return; + } + + if (trainType == NO_LINK_TRAINING) + { + train(preferredLinkConfig, false, trainType); + return; + } + + if (isLinkQuiesced || + (firmwareGroup && ((GroupImpl *)firmwareGroup)->headInFirmware)) + { + highestAssessedLC = getMaxLinkConfig(); + + if (bIsUefiSystem && !hal->getSupportsMultistream()) + { + // + // Since this is a UEFI based system which can provide max link config + // supported on this panel. So try to get the max supported link config + // and update the highestAssessedLC. Once done set linkGuessed as false. + // + unsigned laneCount = 0; + NvU64 linkRate = 0; + NvU8 linkRateFromUefi, laneCountFromUefi; + + // Query the max link config if provided by UEFI. + if ((!linkGuessed) && (main->getMaxLinkConfigFromUefi(linkRateFromUefi, laneCountFromUefi))) + { + laneCount = laneCountFromUefi; + + if (linkRateFromUefi == 0x6) + { + linkRate = RBR; + } + else if (linkRateFromUefi == 0xA) + { + linkRate = HBR; + } + else if (linkRateFromUefi == 0x14) + { + linkRate = HBR2; + } + else if (linkRateFromUefi == 0x1E) + { + linkRate = HBR3; + } + else + { + DP_ASSERT(0 && "DP> Invalid link rate returned from UEFI!"); + linkGuessed = true; + } + + if ((getMaxLinkConfig().peakRate == HBR3) && + (linkRate != HBR3)) + { + // + // UEFI does not support HBR3 yet (The support will be added in Volta). + // Mark the link as guessed when max supported link config is HBR3 and + // the currently assessed link config, by UEFI is not the highest, to + // force the link assessment by driver. + // + linkGuessed = true; + } + else + { + // + // SW policy change: If the BIOS max link config isn't same as max of panel, mark DPlib for re-link + // assessment by marking linkGuessed as true. + // Re-link training is prefereable over glitchless and booting at low resolutions + // + if (laneCount != highestAssessedLC.lanes || linkRate != highestAssessedLC.peakRate) + { + linkGuessed = true; + } + else + { + linkGuessed = false; + // Update software state with latest link status info + hal->setDirtyLinkStatus(true); + hal->refreshLinkStatus(); + } + } + } + else if (!linkGuessed) + { + // We failed to query max link config from UEFI. Mark link as guessed. + DP_LOG(("DP CONN> Failed to query max link config from UEFI.")); + linkGuessed = true; + } + + if (!linkGuessed) + { + // Update SW state with UEFI provided max link config + highestAssessedLC = LinkConfiguration (&this->linkPolicy, + laneCount, linkRate, + this->hal->getEnhancedFraming(), + linkUseMultistream()); + + // Get the currently applied linkconfig and update SW state + getCurrentLinkConfig(laneCount, linkRate); + + activeLinkConfig = LinkConfiguration (&this->linkPolicy, + laneCount, linkRate, + this->hal->getEnhancedFraming(), + linkUseMultistream()); + } + } + else + { + linkGuessed = true; + } + + return; + } + + if (linkAwaitingTransition) + { + if (activeGroups.isEmpty()) + { + linkState = hal->getSupportsMultistream() ? + DP_TRANSPORT_MODE_MULTI_STREAM : DP_TRANSPORT_MODE_SINGLE_STREAM; + linkAwaitingTransition = false; + } + else + { + // + // If modesetOrderMitigation isn't on, we need to reassess + // immediately. This is because we will report the connects at the + // same time as the disconnects. IMP Query can be done immediately + // on connects. On the other hand if modeset order mitigation is + // off - all attached devices are going to be reported as + // disconnected and might as well use the old configuration. + // + if (this->policyModesetOrderMitigation && this->modesetOrderMitigation) + return; + } + } + else + { + if (hal->isDpcdOffline()) + linkState = DP_TRANSPORT_MODE_INIT; + } + + // + // Bug 1545352: This is done to avoid shutting down a display for freeing up a SOR for LT, + // when no SOR is assigned properly to the connector. It can happen when more + // than max supported number of display(s) is connected. + // It came as a requirement from some clients to avoid glitches when shutting + // down a display to make SOR availability for those monitors. + // + if (main->getSorIndex() == DP_INVALID_SOR_INDEX) + { + highestAssessedLC = getMaxLinkConfig(); + linkGuessed = true; + return; + } + + LinkConfiguration lConfig = getMaxLinkConfig(); + + LinkConfiguration preFlushModeActiveLinkConfig = activeLinkConfig; + + if (main->isInternalPanelDynamicMuxCapable()) + { + // Skip Link assessment for Dynamic MUX capable Internal Panel + if ((activeLinkConfig.lanes == lConfig.lanes) && + (activeLinkConfig.peakRate == lConfig.peakRate) && + (!isLinkInD3()) && (!isLinkLost())) + { + linkGuessed = false; + return; + } + } + + // + // Disconnect heads + // + bool bIsFlushModeEnabled = enableFlush(); + + if (!bIsFlushModeEnabled) + { + goto done; + } + + // + // if dpcd is offline; avoid assessing. Just consider max. + // keep lowering lane/rate config till train succeeds + // + hal->updateDPCDOffline(); + if (!hal->isDpcdOffline()) + { + if (!train(lConfig, false /* do not force LT */)) + { + // + // Note that now train() handles fallback, activeLinkConfig + // has the max link config that was assessed. + // + lConfig = activeLinkConfig; + } + + if (!this->linkUseMultistream() && this->policyAssessLinkSafely) + { + GroupImpl * groupAttached = this->getActiveGroupForSST(); + + if (groupAttached && groupAttached->isHeadAttached() && + !willLinkSupportModeSST(lConfig, groupAttached->lastModesetInfo)) + { + DP_ASSERT(0 && "DP> Maximum assessed link configuration is not capable to driver existing raster!"); + + train(preFlushModeActiveLinkConfig, true); + linkGuessed = true; + goto done; + } + } + } + + highestAssessedLC = lConfig; + + // It is critical that this restore the original (desired) configuration + trainLinkOptimized(lConfig); + + linkGuessed = false; + +done: + + NV_DPTRACE_INFO(LINK_ASSESSMENT, highestAssessedLC.peakRate, highestAssessedLC.lanes); + + if (bIsFlushModeEnabled) + { + disableFlush(); + } +} + +bool ConnectorImpl::handleCPIRQ() +{ + NvU8 bStatus; + HDCPState hdcpState = {0}; + + if (!isLinkActive()) + { + DP_LOG(("DP> CP_IRQ: Ignored with link down")); + return true; + } + + main->configureHDCPGetHDCPState(hdcpState); + if (hal->getRxStatus(hdcpState, &bStatus)) + { + NvBool bReAuthReq = NV_FALSE; + NvBool bRxIDMsgPending = NV_FALSE; + DP_LOG(("DP> CP_IRQ HDCP ver:%s RxStatus:0x%2x HDCP Authenticated:%s Encryption:%s", + hdcpState.HDCP_State_22_Capable ? "2.2" : "1.x", + bStatus, + hdcpState.HDCP_State_Authenticated ? "YES" : "NO", + hdcpState.HDCP_State_Encryption ? "ON" : "OFF")); + + // Check device if HDCP2.2 capable instead actual encryption status, + if (hdcpState.HDCP_State_22_Capable) + { + if (FLD_TEST_DRF(_DPCD, _HDCP22_RX_STATUS, _REAUTH_REQUEST, _YES, bStatus) || + FLD_TEST_DRF(_DPCD, _HDCP22_RX_STATUS, _LINK_INTEGRITY_FAILURE, _YES, bStatus)) + { + if (this->linkUseMultistream()) + { + // + // Bug 2860192: Some MST hub throw integrity failure before source trigger + // authentication. This may be stale data since Branch is + // doing protocol translation(DP to HDMI), and cannot treat + // as sink's fault. + // For MST, we would not lose anything here by ignoring either + // CP_Irq event since Auth never started after HPD high or + // LinkTraining start. + // + if (isHDCPAuthTriggered) + { + bReAuthReq = NV_TRUE; + } + else + { + DP_LOG(("DP>Ignore integrity failure or ReAuth in transition or before AKE_INIT.")); + } + } + else + { + bReAuthReq = NV_TRUE; + } + } + + if (FLD_TEST_DRF(_DPCD, _HDCP22_RX_STATUS, _READY, _YES, bStatus)) + { + bRxIDMsgPending = NV_TRUE; + } + } + else + { + if (FLD_TEST_DRF(_DPCD, _HDCP_BSTATUS, _REAUTHENTICATION_REQUESET, _TRUE, bStatus) || + FLD_TEST_DRF(_DPCD, _HDCP_BSTATUS, _LINK_INTEGRITY_FAILURE, _TRUE, bStatus)) + { + bReAuthReq = NV_TRUE; + } + } + + if (bReAuthReq || bRxIDMsgPending) + { + DP_LOG(("DP> CP_IRQ: REAUTHENTICATION/RXIDPENDING REQUEST")); + + if (bReAuthReq) + { + authRetries = 0; + } + + if (!this->linkUseMultistream()) + { + // Get primary connector when multi-stream SST deployed. + GroupImpl *pGroupAttached = getActiveGroupForSST(); + ConnectorImpl *sstPrim = this; + + if (pGroupAttached && + (pGroupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) && + (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY)) + { + DP_ASSERT(this->pCoupledConnector); + sstPrim = this->pCoupledConnector; + } + + sstPrim->main->configureHDCPRenegotiate(HDCP_DUMMY_CN, + HDCP_DUMMY_CKSV, + !!bReAuthReq, + !!bRxIDMsgPending); + sstPrim->main->configureHDCPGetHDCPState(hdcpState); + isHDCPAuthOn = hdcpState.HDCP_State_Authenticated; + } + } + + return true; + } + else + { + DP_LOG(("DP> CP_IRQ: RxStatus Read failed.")); + return false; + } +} + +void ConnectorImpl::handleSSC() +{ +} + +void ConnectorImpl::handleHdmiLinkStatusChanged() +{ + bool bLinkActive; + NvU32 newFrlRate; + // Check Link status + if (!hal->queryHdmiLinkStatus(&bLinkActive, NULL)) + { + return; + } + if (!bLinkActive) + { + newFrlRate = hal->restorePCONFrlLink(activePConLinkControl.frlHdmiBwMask, + activePConLinkControl.flags.bExtendedLTMode, + activePConLinkControl.flags.bConcurrentMode); + + if (newFrlRate != activePConLinkControl.result.trainedFrlBwMask) + { + activePConLinkControl.result.trainedFrlBwMask = newFrlRate; + activePConLinkControl.result.maxFrlBwTrained = getMaxFrlBwFromMask(newFrlRate); + for (Device *i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl *dev = (DeviceImpl *)i; + if ((dev->activeGroup != NULL) && (dev->plugged)) + { + sink->bandwidthChangeNotification(dev, false); + } + } + } + } +} + +void ConnectorImpl::handleMCCSIRQ() +{ + for (Device *i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl *dev = (DeviceImpl *)i; + if ((dev->activeGroup != NULL) && (dev->plugged)) + { + sink->notifyMCCSEvent(dev); + } + } +} + +// +// Checks if the link is still trained. +// Note that these hal registers are ONLY re-read in response to an IRQ. +// Calling this function returns the information from the last interrupt. +// +bool ConnectorImpl::isLinkLost() +{ + if (isLinkActive()) + { + // Bug 200320196: Add DPCD offline check to avoid link-train in unplugged state. + if (!hal->isDpcdOffline()) + { + unsigned laneCount; + NvU64 linkRate; + getCurrentLinkConfig(laneCount, linkRate); + // + // Check SW lane count in RM in case it's disabled beyond DPLib. + // Bug 1933751/2897747 + // + if (laneCount == laneCount_0) + return true; + } + + // update the sw cache if required + hal->refreshLinkStatus(); + if (!hal->getInterlaneAlignDone()) + return true; + + for (unsigned i = 0; i < activeLinkConfig.lanes; i++) + { + if (!hal->getLaneStatusSymbolLock(i)) + return true; + if (!hal->getLaneStatusClockRecoveryDone(i)) + return true; + } + + if (!hal->getInterlaneAlignDone()) + return true; + } + return false; +} + +bool ConnectorImpl::isLinkActive() +{ + return (activeLinkConfig.isValid()); +} + +bool ConnectorImpl::isLinkInD3() +{ + return (hal->getPowerState() == PowerStateD3); +} + +bool ConnectorImpl::trainLinkOptimizedSingleHeadMultipleSST(GroupImpl *pGroupAttached) +{ + if (!pGroupAttached) + { + DP_LOG(("DP-CONN> 2-sst group not valid")); + return false; + } + + if (preferredLinkConfig.isValid()) + { + ConnectorImpl *pSecConImpl = this->pCoupledConnector; + if (pSecConImpl->preferredLinkConfig.isValid() && + (preferredLinkConfig.lanes == laneCount_4) && (pSecConImpl->preferredLinkConfig.lanes == laneCount_4) && + (preferredLinkConfig.peakRate == pSecConImpl->preferredLinkConfig.peakRate)) + { + if (willLinkSupportModeSST(preferredLinkConfig, pGroupAttached->lastModesetInfo)) + { + if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) + { + if (!this->enableFlush()) + return false; + } + preferredLinkConfig.policy.setSkipFallBack(true); + if (!train(preferredLinkConfig, false)) + { + DP_LOG(("DP-CONN> Unable to set preferred linkconfig on 2-SST display")); + return false; + } + if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY) + { + this->disableFlush(); + } + return true; + } + else + { + DP_LOG(("DP-CONN> Invalid 2-SST Preferred link configuration")); + return false; + } + } + } + + if (pGroupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY) + { + if (this->pCoupledConnector->oneHeadSSTSecPrefLnkCfg.isValid()) + { + bool trainDone = false; + this->pCoupledConnector->oneHeadSSTSecPrefLnkCfg.policy.setSkipFallBack(true); + if (!train(this->pCoupledConnector->oneHeadSSTSecPrefLnkCfg, false)) + { + DP_LOG(("DP-CONN> Unable set the primary configuration on secondary display")); + trainDone = false; + } + else + { + trainDone = true; + } + this->disableFlush(); + return trainDone; + } + } + + } + + // Order for 2-SST link training and must be with 4 lanes + unsigned linkRateList[] = {RBR, HBR, HBR2, HBR3}; + NvU8 linkRateCount = sizeof(linkRateList) / sizeof(unsigned); + + for (NvU8 i = 0; i < linkRateCount; i++) + { + LinkConfiguration linkCfg = LinkConfiguration(&this->linkPolicy, + laneCount_4, linkRateList[i], + hal->getEnhancedFraming(), false); + linkCfg.policy.setSkipFallBack(true); + if (willLinkSupportModeSST(linkCfg, pGroupAttached->lastModesetInfo)) + { + if (!this->enableFlush()) + return false; + if (!train(linkCfg, false)) + { + if (i == linkRateCount - 1) + { + // Re-train max link config + linkCfg = getMaxLinkConfig(); + linkCfg.policy.setSkipFallBack(true); + if (!train(linkCfg, false)) + { + DP_ASSERT(0 && "DPCONN> 2-SST setting max link configuration failed "); + break; + } + } + } + else + { + oneHeadSSTSecPrefLnkCfg = linkCfg; + break; + } + } + } + + return true; +} + +bool ConnectorImpl::isNoActiveStreamAndPowerdown() +{ + if (activeGroups.isEmpty()) + { + bool bKeepMSTLinkAlive = (this->bKeepLinkAliveMST && activeLinkConfig.multistream); + bool bKeepSSTLinkAlive = (this->bKeepLinkAliveSST && !activeLinkConfig.multistream); + // + // Power saving unless: + // - Setting fake flag as true to prevent panel power down here. + // - Regkey sets to keep link alive for MST and it's in MST. + // - Regkey sets to keep link alive for SST and it's in SST. + // - bKeepOptLinkAlive is set to true - to avoid link retraining. + // - Device discovery processing that processNewDevice has HDCP probe. + // - Pending remote HDCP detection messages - prevent power down to access HDCP DCPD regs. + // - Keep link active with compliance device as we always do + // + if ((!bKeepMSTLinkAlive) && + (!bKeepSSTLinkAlive) && + (!bKeepOptLinkAlive) && + (!bKeepLinkAliveForPCON) && + (!bIsDiscoveryDetectActive) && + (pendingRemoteHdcpDetections == 0) && + (!main->isInternalPanelDynamicMuxCapable())) + { + powerdownLink(); + + // Sharp panel for HP Valor QHD+ needs 50 ms after D3 + if (bDelayAfterD3) + { + timer->sleep(50); + } + } + + return true; + } + + return false; +} + +bool ConnectorImpl::trainLinkOptimized(LinkConfiguration lConfig) +{ + LinkConfiguration lowestSelected; // initializes to 0 + bool bSkipLowestConfigCheck = false; // Force highestLink config in SST + bool bSkipRedundantLt = false; // Skip redundant LT + bool bEnteredFlushMode = false; + bool bLinkTrainingSuccessful = true; // status indicating if link training actually succeeded + // forced link training is considered a failure + bool bTwoHeadOneOrLinkRetrain = false; // force link re-train if any attached + // groups are in 2Head1OR mode. + + // Power off the link if no stream are active + if (isNoActiveStreamAndPowerdown()) + { + return true; + } + + // + // Split policy. + // If we're multistream we *always pick the highest link configuration available + // - we don't want to interrupt existing panels to light up new ones + // If we're singlestream we always pick the lowest power configurations + // - there can't be multiple streams, so the previous limitation doesn't apply + // + + // + // Find the active group(s) + // + GroupImpl * groupAttached = 0; + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + DP_ASSERT(bIsUefiSystem || (!groupAttached && "Multiple attached heads")); + groupAttached = (GroupImpl * )e; + + if ((groupAttached->dscModeRequest == DSC_DUAL) && (groupAttached->dscModeActive != DSC_DUAL)) + { + // + // If current modeset group requires 2Head1OR and + // - group is not active yet (first modeset on the group) + // - group is active but not in 2Head1OR mode (last modeset on the group did not require 2Head1OR) + // then re-train the link + // This is because for 2Head1OR mode, we need to set some LT parametes for slave SOR after + // successful LT on primary SOR without which 2Head1OR modeset will lead to HW hang. + // + bTwoHeadOneOrLinkRetrain = true; + break; + } + } + + lowestSelected = getMaxLinkConfig(); + + if (!activeLinkConfig.multistream) + { + if (groupAttached && + groupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + return trainLinkOptimizedSingleHeadMultipleSST(groupAttached); + } + + if (preferredLinkConfig.isValid()) + { + if (activeLinkConfig != preferredLinkConfig) + { + // if a tool has requested a preferred link config; check if its possible; and train to it. + // else choose the normal path + if (groupAttached && + willLinkSupportModeSST(preferredLinkConfig, groupAttached->lastModesetInfo)) + { + if (!this->enableFlush()) + return false; + if (!train(preferredLinkConfig, false)) + { + DP_LOG(("DP-CONN> Preferred linkconfig could not be applied. Forcing on gpu side.")); + train(preferredLinkConfig, true); + } + this->disableFlush(); + return true; + } + else + { + DP_LOG(("DP-CONN> Preferred linkconfig does not support the mode")); + return false; + } + } + else + { + // We are already at preferred. Nothing to do here. Return. + return true; + } + } + + // + // This is required for making certain panels to work by training them in + // highest linkConfig in SST mode. + // + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->forceMaxLinkConfig()) + { + bSkipLowestConfigCheck = true; + } + if (dev->skipRedundantLt()) + { + bSkipRedundantLt = true; + } + } + + if (bPConConnected) + { + // When PCON is connected, always LT to max to avoid LT. + bSkipLowestConfigCheck = true; + } + + // If the flag is set, simply neglect downgrading to lowest possible linkConfig + if (!bSkipLowestConfigCheck) + { + lConfig = lowestSelected; + + if (groupAttached) + { + lConfig.enableFEC(this->bFECEnable); + // Find lowest link configuration supporting the mode + getValidLowestLinkConfig(lConfig, lowestSelected, groupAttached->lastModesetInfo); + } + } + + if (lowestSelected.isValid()) + { + // + // Check if we are already trained to the desired link config? + // Make sure requested FEC state matches with the current FEC state of link. + // If 2Head1OR mode is requested, retrain if group is not active or + // last modeset on active group was not in 2Head1OR mode. + // bTwoHeadOneOrLinkRetrain tracks this requirement. + // + + // + // Set linkStatus to be dirty so that when isLinkLost() calls + // refreshLinkStatus() it will get real time status. This is to + // fix an issue that when UEFI-to-Driver transition, LTTPR is not + // link trainined but will be link trainined by RM. + // + hal->setDirtyLinkStatus(true); + if ((activeLinkConfig == lowestSelected) && + (!isLinkInD3()) && + (!isLinkLost()) && + (this->bFECEnable == activeLinkConfig.bEnableFEC) && + !bTwoHeadOneOrLinkRetrain) + { + if (bSkipRedundantLt || main->isInternalPanelDynamicMuxCapable()) + { + // Skip LT if the links are already trained to desired config. + DP_LOG(("DP-CONN> Skipping redundant LT.")); + return true; + } + else + { + // Make sure link status is still good. + if (activeLinkConfig.lanes && hal->isLinkStatusValid(activeLinkConfig.lanes)) + { + // Pass on a flag to RM ctrl call to skip LT at RM level. + DP_LOG(("DP-CONN> Skipping redundant LT from RM.")); + bSkipLt = true; + } + } + } + else + { + bSkipLt = false; + } + + if (groupAttached && groupAttached->isHeadAttached()) + { + // Enter flush mode/detach head before LT + if (!bSkipLt) + { + if (!(bEnteredFlushMode = this->enableFlush())) + return false; + } + } + + bLinkTrainingSuccessful = train(lowestSelected, false); + // + // If LT failed, check if skipLT was marked. If so, clear the flag and + // enable flush mode if required (headattached) and try real LT once. + // + if (!bLinkTrainingSuccessful && bSkipLt) + { + bSkipLt = false; + if (groupAttached && groupAttached->isHeadAttached()) + { + if (!(bEnteredFlushMode = this->enableFlush())) + return false; + } + bLinkTrainingSuccessful = train(lowestSelected, false); + } + if (!bLinkTrainingSuccessful) + { + // Try fall back to max link config and if that fails try original assessed link configuration + if (!train(getMaxLinkConfig(), false)) + { + if (!willLinkSupportModeSST(activeLinkConfig, groupAttached->lastModesetInfo)) + { + train(lowestSelected, true); + + // Mark link training as failed since we forced it + bLinkTrainingSuccessful = false; + } + } + } + } + else + { + if (groupAttached && groupAttached->isHeadAttached()) + { + if (!(bEnteredFlushMode = this->enableFlush())) + return false; + } + + // Mode wasn't possible at any assessed configuration. + train(getMaxLinkConfig(), true); + + // Mark link training as failed since we forced it + bLinkTrainingSuccessful = false; + } + + lConfig = activeLinkConfig; + + if (bEnteredFlushMode) + { + this->disableFlush(); + } + + // In case this was set, we should reset it to prevent skipping LT next time. + bSkipLt = false; + } + else + { + bool bRetrainToEnsureLinkStatus; + + // + // Multistream: + // If we can't restore all streams after a link train - we need to make sure that + // we set RG_DIV to "slow down" the effective pclk for that head. RG_DIV does give + // us enough room to account for both the HBR2->RBR drop and the 4->1 drop. + // This should allow us to keep the link up and operating at a sane frequency. + // .. thus we'll allow training at any frequency .. + // + + // for MST; the setPreferred calls assessLink directly. + if (preferredLinkConfig.isValid() && (activeLinkConfig != preferredLinkConfig)) + { + if (!train(preferredLinkConfig, false)) + { + DP_LOG(("DP-CONN> Preferred linkconfig could not be applied. Forcing on gpu side.")); + train(preferredLinkConfig, true); + } + return true; + } + + // + // Make sure link is physically active and healthy, otherwise re-train. + // Make sure requested FEC state matches with the current FEC state of link. + // If 2Head1OR mode is requested, retrain if group is not active or last modeset on active group + // was not in 2Head1OR mode. bTwoHeadOneOrLinkRetrain tracks this requirement. + // + bRetrainToEnsureLinkStatus = (isLinkActive() && isLinkInD3()) || + isLinkLost() || + (activeLinkConfig.bEnableFEC != this->bFECEnable) || + bTwoHeadOneOrLinkRetrain; + + if (bRetrainToEnsureLinkStatus || (!isLinkActive())) + { + // + // Train to the highestAssesed link config for MST cases to avoid redundant + // fallback. There is no point of trying to link train at highest link config + // when it failed during the assessment. + // train() handles fallback now. So we don't need to step down when LT fails. + // + LinkConfiguration desired = highestAssessedLC; + + NvU8 retries = DP_LT_MAX_FOR_MST_MAX_RETRIES; + + desired.enableFEC(this->bFECEnable); + + if (bRetrainToEnsureLinkStatus) + { + bEnteredFlushMode = enableFlush(); + } + + // + // In some cases, the FEC isn't enabled and link is not lost (e.g. DP_KEEP_OPT_LINK_ALIVE = 1), + // but we're going to enable DSC. We need to update bSkipLt for retraining the link with FEC. + // As the bSkipLt was set to true prviously while link is not lost. + // + if (activeLinkConfig.bEnableFEC != this->bFECEnable) + { + bSkipLt = false; + } + + train(desired, false); + if (!activeLinkConfig.isValid()) + { + DP_LOG(("DPCONN> Unable to train link (at all). Forcing training (picture won't show up)")); + train(getMaxLinkConfig(), true); + + // Mark link training as failed since we forced it + bLinkTrainingSuccessful = false; + } + + // + // Bug 2354318: On some MST branches, we might see a problem that LT failed during + // assessLink(), but somehow works later. In this case, we should not + // retry since highestAssessedLC is not a valid comparison now. + // + if (highestAssessedLC.isValid()) + { + while ((highestAssessedLC != activeLinkConfig) && retries > 0) + { + // Give it a few more chances. + train(desired, false); + retries--; + }; + } + + lConfig = activeLinkConfig; + + if (bEnteredFlushMode) + { + disableFlush(); + } + } + } + + return (bLinkTrainingSuccessful && lConfig.isValid()); +} + +bool ConnectorImpl::getValidLowestLinkConfig +( + LinkConfiguration &lConfig, + LinkConfiguration &lowestSelected, + ModesetInfo modesetInfo +) +{ + bool bIsModeSupported = false; + unsigned i; + LinkConfiguration selectedConfig; + + for (i = 0; i < numPossibleLnkCfg; i++) + { + if ((this->allPossibleLinkCfgs[i].lanes > lConfig.lanes) || (this->allPossibleLinkCfgs[i].peakRate > lConfig.peakRate)) + { + continue; + } + + // Update enhancedFraming for target config + this->allPossibleLinkCfgs[i].enhancedFraming = lConfig.enhancedFraming; + + selectedConfig = this->allPossibleLinkCfgs[i]; + + selectedConfig.enableFEC(lConfig.bEnableFEC); + + if (willLinkSupportModeSST(selectedConfig, modesetInfo)) + { + bIsModeSupported = true; + break; + } + } + + if (bIsModeSupported) + { + lowestSelected = selectedConfig; + } + else + { + // Invalidate link config if mode is not possible at all + lowestSelected.lanes = 0; + } + + return bIsModeSupported; +} + +bool ConnectorImpl::postLTAdjustment(const LinkConfiguration & lConfig, bool force) +{ + NvU8 lastVoltageSwingLane[DP_MAX_LANES] = {0}; + NvU8 lastPreemphasisLane[DP_MAX_LANES] = {0}; + NvU8 lastTrainingScoreLane[DP_MAX_LANES] = {0}; + NvU8 lastPostCursor[DP_MAX_LANES] = {0}; + NvU8 currVoltageSwingLane[DP_MAX_LANES] = {0}; + NvU8 currPreemphasisLane[DP_MAX_LANES] = {0}; + NvU8 currTrainingScoreLane[DP_MAX_LANES] = {0}; + NvU8 currPostCursor[DP_MAX_LANES] = {0}; + NvU32 updatedLaneSettings[DP_MAX_LANES] = {0}; + NvU8 adjReqCount = 0; + NvU64 startTime; + LinkConfiguration linkConfig = lConfig; + + // Cache Voltage Swing and Preemphasis value just after Link training + if (!hal->readTraining(lastVoltageSwingLane, + lastPreemphasisLane, + lastTrainingScoreLane, + lastPostCursor, + (NvU8)activeLinkConfig.lanes)) + { + DP_LOG(("DPCONN> Post Link Training : Unable to read current training values")); + } + + if (hal->getTrainingPatternSelect() != TRAINING_DISABLED) + { + DP_LOG(("DPCONN> Post Link Training : Training pattern is not disabled.")); + } + + // + // We have cleared DPCD 102h + // Now hardware will automatically send the idle pattern + // + startTime = timer->getTimeUs(); + + do + { + if (!hal->getIsPostLtAdjRequestInProgress()) + { + // Clear POST_LT_ADJ_REQ_GRANTED bit and start normal AV transmission + hal->setPostLtAdjustRequestGranted(false); + return true; + } + + // Wait for 2ms + Timeout timeout(timer, 2); + + // check if DPCD 00206h~00207h change has reached to ADJ_REQ_LIMIT + if (adjReqCount > DP_POST_LT_ADJ_REQ_LIMIT) + { + // Clear POST_LT_ADJ_REQ_GRANTED bit and start normal AV transmission + hal->setPostLtAdjustRequestGranted(false); + return true; + } + + if (!hal->readTraining(currVoltageSwingLane, + currPreemphasisLane, + currTrainingScoreLane, + currPostCursor, + (NvU8)activeLinkConfig.lanes)) + { + DP_LOG(("DPCONN> Post Link Training : Unable to read current training values")); + } + else + { + if (!hal->isLaneSettingsChanged(lastVoltageSwingLane, + currVoltageSwingLane, + lastPreemphasisLane, + currPreemphasisLane, + (NvU8)activeLinkConfig.lanes)) + { + // Check if we have exceeded DP_POST_LT_ADJ_REQ_TIMER (200 ms) + if ((timer->getTimeUs() - startTime) > DP_POST_LT_ADJ_REQ_TIMER) + { + DP_LOG(("DPCONN> Post Link Training : DP_POST_LT_ADJ_REQ_TIMER is timed out.")); + // Clear POST_LT_ADJ_REQ_GRANTED bit and start normal AV transmission + hal->setPostLtAdjustRequestGranted(false); + return true; + } + } + else + { + adjReqCount++; + + // Clear ADJ_REQ_TIMER + startTime = timer->getTimeUs(); + + // Change RX drive settings according to DPCD 00206h & 00207h + if (!hal->setTrainingMultiLaneSet((NvU8)activeLinkConfig.lanes, + currVoltageSwingLane, + currPreemphasisLane)) + { + DP_LOG(("DPCONN> Post Link Training : Failed to set RX drive setting according to DPCD 00206h & 00207h.")); + } + + // Populate updated lane settings for currently active lanes + populateUpdatedLaneSettings(currVoltageSwingLane, currPreemphasisLane, updatedLaneSettings); + + // Change TX drive settings according to DPCD 00206h & 00207h + if (!setLaneConfig(activeLinkConfig.lanes, updatedLaneSettings)) + { + DP_LOG(("DPCONN> Post Link Training : Failed to set TX drive setting according to DPCD 00206h & 00207h.")); + } + + // Update last Voltage Swing and Preemphasis values + if (!hal->readTraining(lastVoltageSwingLane, + lastPreemphasisLane, + lastTrainingScoreLane, + lastPostCursor, + (NvU8)activeLinkConfig.lanes)) + { + DP_LOG(("DPCONN> Post Link Training : Unable to read current training values")); + } + } + } + + // Mark the linkStatus as dirty since we need to retrain in case Rx has lost sync + hal->setDirtyLinkStatus(true); + }while (!isLinkLost()); + + // Clear POST_LT_ADJ_REQ_GRANTED bit + hal->setPostLtAdjustRequestGranted(false); + + if (isLinkLost()) + { + if (bNoFallbackInPostLQA && (retryLT < WAR_MAX_RETRAIN_ATTEMPT)) + { + // + // A monitor may lose link sometimes during assess link or link training. + // So retry for 3 times before fallback to lower config + // + retryLT++; + train(lConfig, force); + return true; + } + // + // If the link is not alive, then we need to retrain at a lower config + // There is no reason to try at the same link configuration. Follow the + // fallback policy that is followed for CR phase of LT + // + if (!linkConfig.lowerConfig()) + { + DP_LOG(("DPCONN> Post Link Training : Already at the lowest link rate. Cannot reduce further")); + return false; + } + train(linkConfig, force); + } + else if (bNoFallbackInPostLQA && (retryLT != 0)) + { + retryLT = 0; + } + + return true; +} + +void ConnectorImpl::populateUpdatedLaneSettings(NvU8* voltageSwingLane, NvU8* preemphasisLane, NvU32 *data) +{ + NvU32 laneIndex; + + for (laneIndex = 0; laneIndex < activeLinkConfig.lanes; laneIndex++) + { + switch (voltageSwingLane[laneIndex]) + { + case driveCurrent_Level0: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _DRIVECURRENT, _LEVEL0, data[laneIndex]); + break; + + case driveCurrent_Level1: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _DRIVECURRENT, _LEVEL1, data[laneIndex]); + break; + + case driveCurrent_Level2: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _DRIVECURRENT, _LEVEL2, data[laneIndex]); + break; + + case driveCurrent_Level3: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _DRIVECURRENT, _LEVEL3, data[laneIndex]); + break; + } + + switch (preemphasisLane[laneIndex]) + { + case preEmphasis_Level1: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _PREEMPHASIS, _LEVEL1, data[laneIndex]); + break; + + case preEmphasis_Level2: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _PREEMPHASIS, _LEVEL2, data[laneIndex]); + break; + + case preEmphasis_Level3: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _PREEMPHASIS, _LEVEL3, data[laneIndex]); + break; + } + } +} + +bool ConnectorImpl::validateLinkConfiguration(const LinkConfiguration & lConfig) +{ + if (!IS_VALID_LANECOUNT(lConfig.lanes)) + return false; + + if (lConfig.lanes > hal->getMaxLaneCount()) + return false; + + if (lConfig.lanes != 0) + { + if (!IS_VALID_LINKBW(lConfig.peakRate/DP_LINK_BW_FREQ_MULTI_MBPS)) + return false; + + if (lConfig.peakRate > hal->getMaxLinkRate()) + return false; + + if (IS_INTERMEDIATE_LINKBW(lConfig.peakRate/DP_LINK_BW_FREQ_MULTI_MBPS)) + { + NvU16 *ilrTable; + NvU32 i; + if (!hal->isIndexedLinkrateEnabled()) + return false; + + ilrTable = hal->getLinkRateTable(); + for (i = 0; i < NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES; i++) + { + // + // lConfig.peakRate is in MBPS and ilrTable entries are the values read from DPCD + // Convert the ilrTable value to MBPS before the comparison + // + if (LINK_RATE_KHZ_TO_MBPS(ilrTable[i] * DP_LINK_RATE_TABLE_MULTIPLIER_KHZ) == lConfig.peakRate) + break; + if (ilrTable[i] == 0) + return false; + } + if (i == NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES) + return false; + } + } + + return true; +} + +bool ConnectorImpl::train(const LinkConfiguration & lConfig, bool force, + LinkTrainingType trainType) +{ + LinkTrainingType preferredTrainingType = trainType; + bool result; + // + // Validate link config against caps + // + if (!force) + { + if (!validateLinkConfiguration(lConfig)) + return false; + } + + if (!lConfig.multistream) + { + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->powerOnMonitorBeforeLt() && lConfig.lanes != 0) + { + // + // Some panels expose that they are in D0 even when they are not. + // Explicit write to DPCD 0x600 is required to wake up such panel before LT. + // + hal->setPowerState(PowerStateD0); + } + } + // + // Enable special LT only when regkey 'ENABLE_FAST_LINK_TRAINING' set + // to 1 in DD's path. + // + if (bEnableFastLT) + { + // If the panel can support NLT or FLT, then let's try it first + if (hal->getNoLinkTraining()) + preferredTrainingType = NO_LINK_TRAINING; + else if (hal->getSupportsNoHandshakeTraining()) + preferredTrainingType = FAST_LINK_TRAINING; + } + + } + + // + // Don't set the stream if we're shutting off the link + // or forcing the config + // + if (!force && lConfig.lanes != 0) + { + if (isLinkActive()) + { + if (activeLinkConfig.multistream != lConfig.multistream) + { + activeLinkConfig.lanes = 0; + rawTrain(activeLinkConfig, true, NORMAL_LINK_TRAINING); + } + } + + if (AuxRetry::ack != hal->setMultistreamLink(lConfig.multistream)) + { + DP_LOG(("DP> Failed to enable multistream mode on current link")); + } + } + + // + // Read link rate table before link-train to assure on-board re-driver + // knows link rate going to be set in link rate table. + // If eDP's power has been shutdown here, don't query Link rate table, + // else it will cause panel wake up. + // + if (hal->isIndexedLinkrateEnabled() && (lConfig.lanes != 0)) + { + hal->getRawLinkRateTable(); + } + + activeLinkConfig = lConfig; + result = rawTrain(lConfig, force, preferredTrainingType); + + // If NLT or FLT failed, then fallback to normal LT again + if (!result && (preferredTrainingType != NORMAL_LINK_TRAINING)) + result = rawTrain(lConfig, force, NORMAL_LINK_TRAINING); + + if (!result) + activeLinkConfig.lanes = 0; + else + bNoLtDoneAfterHeadDetach = false; + + if (!force && result) + this->hal->setDirtyLinkStatus(true); + + // We don't need post LQA while powering down the lanes. + if ((lConfig.lanes != 0) && + hal->isPostLtAdjustRequestSupported() && + result) + { + result = postLTAdjustment(activeLinkConfig, force); + } + + if((lConfig.lanes != 0) && result && lConfig.bEnableFEC) + { + // + // Extended latency from link-train end to FEC enable pattern + // to avoid link lost or blank screen with Synaptics branch. + // (Bug 2561206) + // + if (LT2FecLatencyMs) + { + timer->sleep(LT2FecLatencyMs); + } + + result = main->configureFec(true /*bEnableFec*/); + DP_ASSERT(result); + } + + if (lConfig != activeLinkConfig) + { + // fallback happens, returns fail to make sure clients notice it. + result = false; + } + return result; +} + +void ConnectorImpl::sortActiveGroups(bool ascending) +{ + List activeSortedGroups; + + while(!activeGroups.isEmpty()) + { + ListElement * e = activeGroups.begin(); + GroupImpl * g = (GroupImpl *)e; + + GroupImpl * groupToInsertBefore = NULL; + + // Remove from active group for sorting + activeGroups.remove(g); + + for (ListElement *e1 = activeSortedGroups.begin(); e1 != activeSortedGroups.end(); e1 = e1->next) + { + GroupImpl * g1 = (GroupImpl *)e1; + if ((g->headIndex < g1->headIndex) || + ((g->headIndex == g1->headIndex) && + ((ascending && (g->singleHeadMultiStreamID < g1->singleHeadMultiStreamID)) || + (!ascending && (g->singleHeadMultiStreamID > g1->singleHeadMultiStreamID))) + )) + { + groupToInsertBefore = g1; + break; + } + } + + if (NULL == groupToInsertBefore) + { + activeSortedGroups.insertBack(g); + } + else + { + activeSortedGroups.insertBefore(groupToInsertBefore, g); + } + } + + // Repopulate active group list + while (!activeSortedGroups.isEmpty()) + { + ListElement * e = activeSortedGroups.begin(); + + // Remove from sorted list + activeSortedGroups.remove(e); + // Insert back to active group list + activeGroups.insertBack(e); + } +} + +bool ConnectorImpl::enableFlush() +{ + bool bHeadAttached = false; + + if (activeGroups.isEmpty()) + return true; + + // + // If SST check that head should be attached with single group else if MST at least + // 1 group should have headAttached before calling flush on SOR + // + if (!this->linkUseMultistream()) + { + GroupImpl * activeGroup = this->getActiveGroupForSST(); + + if (activeGroup && !activeGroup->isHeadAttached() && intransitionGroups.isEmpty()) + { + DP_LOG(("DPCONN> SST-Flush mode should not be called when head is not attached. Returning early without enabling flush")); + return true; + } + } + else + { + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + GroupImpl * group = (GroupImpl *)e; + if (group->isHeadAttached()) + { + bHeadAttached = true; + break; + } + } + + if (!bHeadAttached) + { + DP_LOG(("DPCONN> MST-Flush mode should not be called when head is not attached. Returning early without enabling flush")); + return true; + } + } + + if (!main->setFlushMode()) + return false; + + // + // Enabling flush mode shuts down the link, so the next link training + // call must not skip programming the hardware. Otherwise, EVO will + // hang if the head is still active when flush mode is disabled. + // + bSkipLt = false; + + sortActiveGroups(false); + + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + GroupImpl * g = (GroupImpl *)e; + + if (!this->linkUseMultistream()) + { + GroupImpl * activeGroup = this->getActiveGroupForSST(); + DP_ASSERT(g == activeGroup); + } + + bool skipPreLinkTraining = (((g->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST) || + (g->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST)) && + (g->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY)); + if (!skipPreLinkTraining) + main->preLinkTraining(g->headIndex); + + beforeDeleteStream(g, true); + if (this->linkUseMultistream()) + { + main->configureTriggerSelect(g->headIndex, g->singleHeadMultiStreamID); + main->triggerACT(); + } + afterDeleteStream(g); + } + + return true; +} + +// +// This is a wrapper for call to mainlink::train(). +bool ConnectorImpl::rawTrain(const LinkConfiguration & lConfig, bool force, LinkTrainingType linkTrainingType) +{ + { + // + // this is the common path + // activeLinkConfig will be updated in main->train() in case fallback happens. + // if the link config sent has disable Post LT request set, we send false for corresponding flag + // + if (lConfig.disablePostLTRequest) + { + return (main->train(lConfig, force, linkTrainingType, &activeLinkConfig, bSkipLt, false, + hal->getPhyRepeaterCount())); + } + return (main->train(lConfig, force, linkTrainingType, &activeLinkConfig, bSkipLt, hal->isPostLtAdjustRequestSupported(), + hal->getPhyRepeaterCount())); + } +} + +// +// Timeslot management +// + +bool ConnectorImpl::deleteAllVirtualChannels() +{ + // Clear the payload table + hal->payloadTableClearACT(); + if (!hal->payloadAllocate(0, 0, 63)) + { + DP_LOG(("DPCONN> Payload table could not be cleared")); + } + + // send clear_payload_id_table + DP_LOG(("DPCONN> Sending CLEAR_PAYLOAD_ID_TABLE broadcast")); + + for (unsigned retries = 0 ; retries < 7; retries++) + { + ClearPayloadIdTableMessage clearPayload; + NakData nack; + + if (this->messageManager->send(&clearPayload, nack)) + return true; + } + + // we should not have reached here. + DP_ASSERT(0 && "DPCONN> CLEAR_PAYLOAD_ID failed!"); + return false; +} + +void ConnectorImpl::clearTimeslices() +{ + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)((Group *)i); + group->timeslot.PBN = 0; + group->timeslot.count = 0; + group->timeslot.begin = 1; + group->timeslot.hardwareDirty = false; + } + + maximumSlots = 63; + freeSlots = maximumSlots; +} + + +void ConnectorImpl::freeTimeslice(GroupImpl * targetGroup) +{ + // compact timeslot allocation + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + GroupImpl * group = (GroupImpl *)e; + + if (group->timeslot.begin > targetGroup->timeslot.begin) { + group->timeslot.begin -= targetGroup->timeslot.count; + group->timeslot.hardwareDirty = true; + + // + // enable TRIGGER_ALL on SFs corresponding to the the single head MST driving heads + // as both both pipelines need to take the affect of the shift happening due to deactivating + // an MST display being driven through same SOR + // + if ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST == group->singleHeadMultiStreamMode) && + (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY == group->singleHeadMultiStreamID)) + { + main->configureTriggerAll(group->headIndex, true); + } + } + } + + // mark stream as free + freeSlots += targetGroup->timeslot.count; + targetGroup->timeslot.PBN = 0; + targetGroup->timeslot.count = 0; + targetGroup->timeslot.hardwareDirty = true; +} + +bool ConnectorImpl::allocateTimeslice(GroupImpl * targetGroup) +{ + unsigned base_pbn, slot_count, slots_pbn; + + DP_ASSERT(isLinkActive()); + if (this->isFECSupported()) + { + if (!isModePossibleMSTWithFEC(activeLinkConfig, + targetGroup->lastModesetInfo, + &targetGroup->timeslot.watermarks)) + { + DP_ASSERT(0 && "DisplayDriver bug! This mode is not possible at any " + "link configuration. It would have been reject at mode filtering time!"); + return false; + } + } + else + { + if (!isModePossibleMST(activeLinkConfig, + targetGroup->lastModesetInfo, + &targetGroup->timeslot.watermarks)) + { + DP_ASSERT(0 && "DisplayDriver bug! This mode is not possible at any " + "link configuration. It would have been reject at mode filtering time!"); + return false; + } + } + + activeLinkConfig.pbnRequired(targetGroup->lastModesetInfo, base_pbn, slot_count, slots_pbn); + + // Check for available timeslots + if (slot_count > freeSlots) + return false; + + int firstFreeSlot = 1; + + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + + if (group->timeslot.count != 0 && + (group->timeslot.begin + group->timeslot.count) >= firstFreeSlot) + { + firstFreeSlot = group->timeslot.begin + group->timeslot.count; + } + } + + DP_ASSERT((maximumSlots - firstFreeSlot + 1) == freeSlots && "Timeslot allocation table corrupted"); + + // Already allocated? + DP_ASSERT(!targetGroup->timeslot.count && "Reallocation of stream that is already present"); + + targetGroup->timeslot.count = slot_count; + targetGroup->timeslot.begin = firstFreeSlot; + targetGroup->timeslot.PBN = base_pbn; + targetGroup->timeslot.hardwareDirty = true; + freeSlots -= slot_count; + + return true; +} + + +void ConnectorImpl::flushTimeslotsToHardware() +{ + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + + if (group->timeslot.hardwareDirty) + { + group->timeslot.hardwareDirty = false; + bool bEnable2Head1Or = false; + + if ((group->lastModesetInfo.mode == DSC_DUAL) || + (group->lastModesetInfo.mode == DSC_DROP)) + { + bEnable2Head1Or = true; + } + + main->configureMultiStream(group->headIndex, + group->timeslot.watermarks.hBlankSym, + group->timeslot.watermarks.vBlankSym, + group->timeslot.begin, + group->timeslot.begin+group->timeslot.count-1, + group->timeslot.PBN, + activeLinkConfig.PBNForSlots(group->timeslot.count), + group->colorFormat, + group->singleHeadMultiStreamID, + group->singleHeadMultiStreamMode, + bAudioOverRightPanel, + bEnable2Head1Or); + } + } +} + +void ConnectorImpl::beforeDeleteStream(GroupImpl * group, bool forFlushMode) +{ + + // + // During flush entry, if the link is not trained, retrain + // the link so that ACT can be ack'd by the sink. + // (ACK is only for multistream case) + // + // Note: A re-training might be required even in cases where link is not + // alive in non-flush mode case (Eg: beforeDeleteStream called from NAB). + // However we cannot simply re-train is such cases, without ensuring that + // head is not actively driving pixels and this needs to be handled + // differently . + // + if(forFlushMode && linkUseMultistream()) + { + if(isLinkLost()) + { + train(activeLinkConfig, false); + } + } + + // check if this is a firmware group + if (group && group->isHeadAttached() && group->headInFirmware) + { + // check if MST is enabled and we have inited messagemanager + if (hal->getSupportsMultistream() && messageManager) + { + // Firmware group can be assumed to be taking up all 63 slots. + group->timeslot.begin = 1; + group->timeslot.count = 63; + this->freeSlots = 0; + + // 1. clear the timeslots using CLEAR_PAYLAOD_TABLE + // 2. clear gpu timeslots. + if (!deleteAllVirtualChannels()) + DP_ASSERT(0 && "Failed to delete VCs. Vbios state in branch could not be cleaned."); + + freeTimeslice(group); + flushTimeslotsToHardware(); + group->bWaitForDeAllocACT = false; + + return; + } + } + + if (linkUseMultistream() && group && group->isHeadAttached() && group->timeslot.count) + { + // Detach all the panels from payload + for (Device * d = group->enumDevices(0); d; d = group->enumDevices(d)) + { + group->update(d, false); + } + + freeTimeslice(group); + flushTimeslotsToHardware(); + group->bWaitForDeAllocACT = true; + + // Delete the stream + hal->payloadTableClearACT(); + hal->payloadAllocate(group->streamIndex, group->timeslot.begin, 0); + + // + // If entering flush mode, enable RG (with Immediate effect) otherwise for detaching a display, + // if not single heas MST, not required to enable RG. For single head MST streams deletion, enable + // RG at loadv + // + if (forFlushMode || + ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST == group->singleHeadMultiStreamMode) && + (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY != group->singleHeadMultiStreamID))) + { + main->controlRateGoverning(group->headIndex, true/*enable*/, forFlushMode /*Immediate/loadv*/); + } + } +} + +void ConnectorImpl::afterDeleteStream(GroupImpl * group) +{ + if (linkUseMultistream() && group->isHeadAttached() && group->bWaitForDeAllocACT) + { + if (!hal->payloadWaitForACTReceived()) + { + DP_LOG(("DP> Delete stream failed. Device did not acknowledge stream deletion ACT!")); + DP_ASSERT(0); + } + } +} + +void ConnectorImpl::afterAddStream(GroupImpl * group) +{ + // Skip this as there is no timeslot allocation + if (!linkUseMultistream() || !group->timeslot.count) + return; + + if (group->bDeferredPayloadAlloc) + { + DP_ASSERT(addStreamMSTIntransitionGroups.contains(group)); + hal->payloadTableClearACT(); + hal->payloadAllocate(group->streamIndex, group->timeslot.begin, group->timeslot.count); + main->triggerACT(); + } + group->bDeferredPayloadAlloc = false; + + if (addStreamMSTIntransitionGroups.contains(group)) { + addStreamMSTIntransitionGroups.remove(group); + } + + if (!hal->payloadWaitForACTReceived()) + { + DP_LOG(("ACT has not been received.Triggering ACT once more")); + DP_ASSERT(0); + + // + // Bug 1334070: During modeset for cloned displays on certain GPU family, + // ACT triggered during SOR attach is not being received due to timing issues. + // Also DP1.2 spec mentions that there is no harm in sending the ACT + // again if there is no change in payload table. Hence triggering ACT once more here + // + main->triggerACT(); + if (!hal->payloadWaitForACTReceived()) + { + DP_LOG(("DP-TS> Downstream device did not receive ACT during stream re-add.")); + return; + } + } + + for (Device * d = group->enumDevices(0); d; d = group->enumDevices(d)) + { + group->update((DeviceImpl *)d, true); + + lastDeviceSetForVbios = d; + } + + // Disable rate gov at the end of adding all streams + if ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST != group->singleHeadMultiStreamMode) || + (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_MAX == group->singleHeadMultiStreamID)) + { + main->controlRateGoverning(group->headIndex, false/*disable*/, false/*loadv*/); + } + + group->updateVbiosScratchRegister(lastDeviceSetForVbios); +} + +bool ConnectorImpl::beforeAddStream(GroupImpl * group, bool test, bool forFlushMode) +{ + bool res = false; + if (linkUseMultistream()) + { + res = beforeAddStreamMST(group, test, forFlushMode); + } + else + { + // SST + Watermark water; + bool bEnable2Head1Or = false; + bool bIsModePossible = false; + + if ((group->lastModesetInfo.mode == DSC_DUAL) || + (group->lastModesetInfo.mode == DSC_DROP)) + { + bEnable2Head1Or = true; + } + + if (this->isFECSupported()) + { + bIsModePossible = isModePossibleSSTWithFEC(activeLinkConfig, + group->lastModesetInfo, + &water, + main->hasIncreasedWatermarkLimits()); + } + else + { + bIsModePossible = isModePossibleSST(activeLinkConfig, + group->lastModesetInfo, + &water, + main->hasIncreasedWatermarkLimits()); + } + + if (bIsModePossible) + { + if (group->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + if (group->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY) + { + // + // configure sf parameters after secondary linktraining on primary link. + // + main->configureSingleStream(group->headIndex, + water.hBlankSym, + water.vBlankSym, + activeLinkConfig.enhancedFraming, + water.tuSize, + water.waterMark, + group->colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + group->singleHeadMultiStreamMode, + bAudioOverRightPanel); + } + } + else + { + main->configureSingleStream(group->headIndex, + water.hBlankSym, + water.vBlankSym, + activeLinkConfig.enhancedFraming, + water.tuSize, + water.waterMark, + group->colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + false /*bEnableAudioOverRightPanel*/, + bEnable2Head1Or); + } + } + else + { + if (test) + { + main->configureSingleStream(group->headIndex, + water.hBlankSym, + water.vBlankSym, + activeLinkConfig.enhancedFraming, + water.tuSize, + water.waterMark, + group->colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + false /*bEnableAudioOverRightPanel*/, + bEnable2Head1Or); + DP_LOG(("DP-TS> Unable to allocate stream. Setting RG_DIV mode")); + res = true; + } + else + DP_ASSERT(0); + } + } + return res; +} + +bool ConnectorImpl::beforeAddStreamMST(GroupImpl * group, bool test, bool forFlushMode) +{ + bool res = false; + bool isPrimaryStream = (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY == group->singleHeadMultiStreamID); + if (allocateTimeslice(group)) + { + flushTimeslotsToHardware(); + if (!forFlushMode && isPrimaryStream) + { + main->controlRateGoverning(group->headIndex, true /*enable*/); + } + + // If not single Head MST mode or if primary stream then program here + // other streams programmed in NAE + if (forFlushMode || + (isPrimaryStream && + addStreamMSTIntransitionGroups.isEmpty())) + { + hal->payloadTableClearACT(); + hal->payloadAllocate(group->streamIndex, group->timeslot.begin, group->timeslot.count); + } + else if (isPrimaryStream && + !addStreamMSTIntransitionGroups.isEmpty()) + { + + group->bDeferredPayloadAlloc = true; + } + + addStreamMSTIntransitionGroups.insertFront(group); + } + else + { + if (!test) + { + DP_LOG(("DP-TS> Unable to allocate stream. Should call mainLink->configureStream to trigger RG_DIV mode")); + main->configureMultiStream(group->headIndex, + group->timeslot.watermarks.hBlankSym, group->timeslot.watermarks.vBlankSym, + 1, 0, 0, 0, group->colorFormat, group->singleHeadMultiStreamID, group->singleHeadMultiStreamMode, bAudioOverRightPanel); + } + else + { + flushTimeslotsToHardware(); + + if (forFlushMode || + (DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST != group->singleHeadMultiStreamMode) || isPrimaryStream) + { + main->configureTriggerSelect(group->headIndex, group->singleHeadMultiStreamID); + hal->payloadTableClearACT(); + hal->payloadAllocate(group->streamIndex, group->timeslot.begin, group->timeslot.count); + } + + DP_LOG(("DP-TS> Unable to allocate stream. Setting RG_DIV mode")); + res = true; + } + } + + return res; +} + +void ConnectorImpl::disableFlush( bool test) +{ + bool bHeadAttached = false; + + if (activeGroups.isEmpty()) + return; + + sortActiveGroups(true); + + // + // If SST check that head should be attached with single group else if MST at least + // 1 group should have headAttached before calling disable flush on SOR + // + if (!this->linkUseMultistream()) + { + GroupImpl * activeGroup = this->getActiveGroupForSST(); + + if (activeGroup && !activeGroup->isHeadAttached() && intransitionGroups.isEmpty()) + { + DP_LOG(("DPCONN> SST-Flush mode disable should not be called when head is not attached. Returning early without disabling flush\n")); + return; + } + } + else + { + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + GroupImpl * group = (GroupImpl *)e; + if (group->isHeadAttached()) + { + bHeadAttached = true; + break; + } + } + + if (!bHeadAttached) + { + DP_LOG(("DPCONN> MST-Flush mode disable should not be called when head is not attached. Returning early without disabling flush\n")); + return; + } + } + + // + // We need to rebuild the tiemslot configuration when exiting flush mode + // Bug 1550750: Change the order to proceed from last to front as they were added. + // Some tiled monitors are happy with this. + // + for (ListElement * e = activeGroups.last(); e != activeGroups.end(); e = e->prev) + { + GroupImpl * g = (GroupImpl *)e; + bool force = false; + NvU32 headMask = 0; + + if (!g->isHeadAttached() && this->linkUseMultistream()) + continue; + + bool skipPostLinkTraining = (((g->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST) || + (g->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST)) && + (g->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY)); + + // + // Allocate the timeslot configuration + // + force = beforeAddStream(g, test, true); + if (this->linkUseMultistream()) + { + main->configureTriggerSelect(g->headIndex, g->singleHeadMultiStreamID); + } + + if (g->lastModesetInfo.mode == DSC_DUAL) + { + // For 2 Head 1 OR - Legal combinations are Head0 and Head1, Head2 and Head3 + headMask = (1 << g->headIndex) | (1 << (g->headIndex + 1)); + } + else + { + headMask = (1 << g->headIndex); + } + + main->clearFlushMode(headMask, force); // ACT is triggered here + if (!skipPostLinkTraining) + main->postLinkTraining(g->headIndex); + afterAddStream(g); + } +} + +DeviceImpl* ConnectorImpl::findDeviceInList(const Address & address) +{ + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = e->next) + { + DeviceImpl* device = (DeviceImpl*)e; + + // + // There may be multiple hits with the same address. This can + // happen when the head is still attached to the old device.branch + // We never need to resurrect old unplugged devices - and their + // object will be destroyed as soon as the DD handles the + // notifyZombie message. + // + if ((device->address == address) && device->plugged) + return device; + } + + // + // If no plugged devices are found, we should search back through zombied devices. + // This is purely as an optimizations to allow the automatic restoration of a + // panel if it 'reappears' while its still being driven + // + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = e->next) + { + DeviceImpl* device = (DeviceImpl*)e; + + if (device->address == address) + return device; + } + + return 0; +} + +void ConnectorImpl::disconnectDeviceList() +{ + for (Device * d = enumDevices(0); d; d = enumDevices(d)) + { + ((DeviceImpl*)d)->plugged = false; + // Clear the active bit (payload_allocate) + ((DeviceImpl*)d)->payloadAllocated = false; + + // Deallocate object which may go stale after long pulse handling. + if (((DeviceImpl*)d)->isDeviceHDCPDetectionAlive) + { + delete ((DeviceImpl*)d)->deviceHDCPDetection; + ((DeviceImpl*)d)->deviceHDCPDetection = NULL; + ((DeviceImpl*)d)->isHDCPCap = False; + } + } +} + +// status == true: attach, == false: detach +void ConnectorImpl::notifyLongPulse(bool statusConnected) +{ + NvU32 muxState = 0; + NV_DPTRACE_INFO(HOTPLUG, statusConnected, connectorActive, previousPlugged); + + if (!connectorActive) + { + DP_LOG(("DP> Got a long pulse before any connector is active!!")); + return; + } + + if (main->getDynamicMuxState(&muxState)) + { + DeviceImpl * existingDev = findDeviceInList(Address()); + bool bIsMuxOnDgpu = DRF_VAL(0073, _CTRL_DFP_DISP_MUX, _STATE, muxState) == NV0073_CTRL_DFP_DISP_MUX_STATE_DISCRETE_GPU; + + if (existingDev && existingDev->isFakedMuxDevice() && !bIsMuxOnDgpu) + { + DP_LOG((" NotifyLongPulse ignored as mux is not pointing to dGPU and there is a faked device")); + return; + } + + if (existingDev && existingDev->isPreviouslyFakedMuxDevice() && !existingDev->isMarkedForDeletion()) + { + DP_LOG((" NotifyLongPulse ignored as there is a previously faked device but it is not marked for deletion")); + if (!statusConnected) + { + DP_LOG((" Calling notifyDetectComplete")); + sink->notifyDetectComplete(); + } + return; + } + } + + if (previousPlugged && statusConnected) + { + if (main->isInternalPanelDynamicMuxCapable()) + return; + + DP_LOG(("DP> Redundant plug")); + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->ignoreRedundantHotplug()) + { + DP_LOG(("DP> Skipping link assessment")); + return; + } + } + + // + // Exit early to avoid coonector re-initialization from breaking MST + // branch state when streams are allocated. + // Additional exceptions: + // - UEFI post(firmwareGroup->headInFirmware)for fresh init. + // - MST to SST transition for that unplug event may be filtered by RM. + // Messaging will be disabled in this case. + // + if (linkUseMultistream() && (!activeGroups.isEmpty()) && + (!(firmwareGroup && ((GroupImpl *)firmwareGroup)->headInFirmware)) && + (hal->isMessagingEnabled())) + { + DP_LOG(("DP> Bail out early on redundant hotplug with active" + "MST stream")); + return; + } + } + + this->notifyLongPulseInternal(statusConnected); +} + +// +// notifyLongPulse() filters redundant hotplug notifications and calls into +// notifyLongPulseInternal(). +// +// setAllowMultiStreaming() calls into notifyLongPulseInternal() in order to +// re-detect already connected sink after enabling/disabling +// MST support. +// +void ConnectorImpl::notifyLongPulseInternal(bool statusConnected) +{ + // start from scratch + preferredLinkConfig = LinkConfiguration(); + + bPConConnected = false; + bSkipAssessLinkForPCon = false; + + // + // Check if the panel is eDP and DPCD data for that is already parsed. + // Passing this as a parameter inside notifyHPD to skip reading of DPCD + // data in case of eDP after sleep/hibernate resume. + // + hal->notifyHPD(statusConnected, (!hal->isDpcdOffline() && main->isEDP())); + if (main->isLttprSupported()) + { + // + // Update LTTPR counts since it's only correct after HPD. + // If there are some other DFP parameters might change during HPD cycle + // then we can remove the isLttprSupported() check. + // + main->queryAndUpdateDfpParams(); + } + + // For bug 2489143, max link rate needs to be forced on eDP through regkey + if (main->isEDP()) + { + hal->overrideMaxLinkRate(maxLinkRateFromRegkey); + } + + // Some panels whose TCON erroneously sets DPCD 0x200 SINK_COUNT=0. + if (main->isEDP() && hal->getSinkCount() == 0) + hal->setSinkCount(1); + + // disconnect all devices + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) { + GroupImpl * g = (GroupImpl *)i; + + // Clear the timeslot table + freeTimeslice(g); + } + + disconnectDeviceList(); + + auxBus->setDevicePlugged(statusConnected); + + if (statusConnected) + { + // Reset all settings for previous downstream device + configInit(); + + if (! hal->isAtLeastVersion(1, 0 ) ) + goto completed; + + DP_LOG(("DP> HPD v%d.%d", hal->getRevisionMajor(), hal->getRevisionMinor())); + + // + // Handle to clear pending CP_IRQ that throw short pulse before L-HPD. There's no + // more short pulse corresponding to CP_IRQ after HPD, but IRQ vector needs to be + // clear or block following CP_IRQ. + // + if (hal->interruptContentProtection()) + { + DP_LOG(("DP>clear pending CP interrupt at hpd")); + hal->clearInterruptContentProtection(); + } + + populateAllDpConfigs(); + + // + // Perform OUI authentication + // + if (!performIeeeOuiHandshake() && hal->isAtLeastVersion(1, 2)) + { + DP_LOG(("DP> OUI Noncompliance! Sink is DP 1.2 and is required to implement")); + } + + // Apply Oui WARs here + applyOuiWARs(); + + // Tear down old message manager + DP_ASSERT( !hal->getSupportsMultistream() || (hal->isAtLeastVersion(1, 2) && " Device supports multistream but not DP 1.2 !?!? ")); + + // Check if we should be attempting a transition between MST<->SST + if (main->hasMultistream()) + { + if (linkState == DP_TRANSPORT_MODE_INIT) + { + linkState = hal->getSupportsMultistream() ? + DP_TRANSPORT_MODE_MULTI_STREAM : + DP_TRANSPORT_MODE_SINGLE_STREAM; + linkAwaitingTransition = false; + } + else + { + if (linkUseMultistream() != hal->getSupportsMultistream()) + { + linkAwaitingTransition = true; + DP_LOG(("CONN> Link Awaiting Transition.")); + } + else + { + linkAwaitingTransition = false; + } + } + } + + // + // Only transition between multiStream and single stream when there + // are no active panels. Note: that if we're unable to transition + // we will mark all of the displays as MUST_DISCONNECT. + // + + // + // Shutdown the old message manager if there was one + // + delete discoveryManager; + isDiscoveryDetectComplete = false; + bIsDiscoveryDetectActive = true; + + pendingEdidReads.clear(); // destroy any half completed requests + delete messageManager; + messageManager = 0; + discoveryManager = 0; + + cancelHdcpCallbacks(); + if (hal->getSupportsMultistream() && main->hasMultistream()) + { + bool bDeleteFirmwareVC = false; + + DP_LOG(("DP> Multistream panel detected, building message manager")); + + // + // Rebuild the message manager to reset and half received messages + // that may be in the pipe. + // + messageManager = new MessageManager(hal, timer); + messageManager->registerReceiver(&ResStatus); + + // + // Create a discovery manager to initiate detection + // + if (AuxRetry::ack != hal->setMessagingEnable(true, true)) + { + DP_LOG(("DP> Failed to enable messaging for multistream panel")); + } + + if (AuxRetry::ack != hal->setMultistreamHotplugMode(IRQ_HPD)) + { + DP_LOG(("DP> Failed to enable hotplug mode for multistream panel")); + } + + discoveryManager = new DiscoveryManager(messageManager, this, timer, hal); + + // Check and clear if any pending message here + if (hal->clearPendingMsg()) + { + DP_LOG(("DP> Stale MSG found: set branch to D3 and back to D0...")); + if (hal->isAtLeastVersion(1, 4)) + { + hal->setMessagingEnable(false, true); + } + hal->setPowerState(PowerStateD3); + hal->setPowerState(PowerStateD0); + if (hal->isAtLeastVersion(1, 4)) + { + hal->setMessagingEnable(true, true); + } + } + pendingRemoteHdcpDetections = 0; + + // + // We need to clear payload table and payload id table during a hotplug in cases + // where DD does not send a null modeset for a device that was plugged. Otherwise + // this will lead to issues where branch does not clear the PBN and sends stale + // available PBN values. One of the scenarios is BSOD in SLI mode, where the secondary + // GPUs are not used for primary boot by VBIOS + // + bDeleteFirmwareVC = ((GroupImpl *)firmwareGroup && + !((GroupImpl *)firmwareGroup)->isHeadAttached() && + !bIsUefiSystem); + + if (bDeleteFirmwareVC || !bAttachOnResume) + { + deleteAllVirtualChannels(); + } + + assessLink(); // Link assessment may re-add a stream + // and must be done AFTER the messaging system + // is restored. + discoveryManager->notifyLongPulse(true); + } + else // SST case + { + DiscoveryManager::Device dev; + Edid tmpEdid; + bool isComplianceForEdidTest = false; + dev.address = Address(); + + + // We will report a dongle as new device with videoSink flag as false. + if (hal->getSinkCount() == 0) + { + dev.peerDevice = Dongle; + } + else + { + dev.peerDevice = DownstreamSink; + + // Handle fallback EDID + if(!EdidReadSST(tmpEdid, auxBus, timer, + hal->getPendingTestRequestEdidRead(), + main->isForceRmEdidRequired(), + main->isForceRmEdidRequired() ? main : 0)) + { + bool status = false; + // + // For some DP2VGA dongle which is unable to get the right after several retries. + // Before library, we do give 26 times retries for DP2VGA dongle EDID retries. + // Give most 24 times here for another re-start in library. + // Bug 996248. + // + if (hal->getLegacyPortCount()) + { + LegacyPort * port = hal->getLegacyPort(0); + if (port->getDownstreamPortType() == ANALOG_VGA) + { + NvU8 retries = DP_READ_EDID_MAX_RETRIES; + for (NvU8 i = 0; i < retries; i++) + { + status = EdidReadSST(tmpEdid, auxBus, timer, + hal->getPendingTestRequestEdidRead(), + main->isForceRmEdidRequired(), + main->isForceRmEdidRequired() ? main : 0); + if (status) + break; + } + } + } + if (!status) + { + // corrupt edid + DP_LOG(("DP-CONN> Corrupt Edid!")); + + // Reading the EDID can fail if AUX is dead. + // So update DPCD state after max number of retries. + hal->updateDPCDOffline(); + } + } + + DP_LOG(("DP-CONN> Edid read complete: Manuf Id: 0x%x, Name: %s", tmpEdid.getManufId(), tmpEdid.getName())); + dev.branch = false; + dev.dpcdRevisionMajor = hal->getRevisionMajor(); + dev.dpcdRevisionMinor = hal->getRevisionMinor(); + dev.legacy = false; + dev.SDPStreams = hal->getNumberOfAudioEndpoints() ? 1 : 0; + dev.SDPStreamSinks = hal->getNumberOfAudioEndpoints(); + dev.videoSink = true; + dev.maxTmdsClkRate = 0U; + + // Apply EDID based WARs and update the WAR flags if needed + applyEdidWARs(tmpEdid, dev); + + // + // HP Valor QHD+ needs 50ms delay after D3 + // to prevent black screen + // + if (tmpEdid.WARFlags.delayAfterD3) + { + bDelayAfterD3 = true; + } + + // Panels use Legacy address range for interrupt reporting + if (tmpEdid.WARFlags.useLegacyAddress) + { + hal->setSupportsESI(false); + } + + // + // For some devices short pulse comes in after we disconnect the + // link, so DPLib ignores the request and link trains after modeset + // happens. When modeset happens the link configuration picked may + // be different than what we assessed before. So we skip the link + // power down in assessLink() in such cases + // + if (tmpEdid.WARFlags.keepLinkAlive) + { + DP_LOG(("tmpEdid.WARFlags.keepLinkAlive = true, set bKeepOptLinkAlive to true. (keep link alive after assessLink())\n")); + bKeepOptLinkAlive = true; + } + // Ack the test response, no matter it is a ref sink or not + if (hal->getPendingTestRequestEdidRead()) + { + isComplianceForEdidTest = true; + hal->setTestResponseChecksum(tmpEdid.getLastPageChecksum()); + hal->setTestResponse(true, true); + } + } + + // + // If this is a zombie VRR device that was previously enabled, + // re-enable it now. This must happen before link training if + // VRR was enabled before the device became a zombie or else the + // monitor will report that it's in normal mode even if the GPU is + // driving it in VRR mode. + // + { + DeviceImpl * existingDev = findDeviceInList(dev.address); + if (existingDev && existingDev->isVrrMonitorEnabled() && + !existingDev->isVrrDriverEnabled()) + { + DP_LOG(("DP> Re-enabling previously enabled zombie VRR monitor")); + existingDev->resetVrrEnablement(); + existingDev->startVrrEnablement(); + } + } + + if ((hal->getPCONCaps())->bSourceControlModeSupported) + { + bPConConnected = true; + } + + if (bPConConnected || + (main->isEDP() && this->bSkipAssessLinkForEDP) || + (main->isInternalPanelDynamicMuxCapable())) + { + this->highestAssessedLC = getMaxLinkConfig(); + this->linkGuessed = bPConConnected; + this->bSkipAssessLinkForPCon = bPConConnected; + } + else + { + if (tmpEdid.WARFlags.powerOnBeforeLt) + { + // + // Some panels expose that they are in D0 even when they are not. + // Explicit write to DPCD 0x600 is required to wake up such panel before LT. + // + hal->setPowerState(PowerStateD0); + } + this->assessLink(); + } + + if (hal->getLegacyPortCount() != 0) + { + LegacyPort * port = hal->getLegacyPort(0); + DwnStreamPortType portType = port->getDownstreamPortType(); + dev.maxTmdsClkRate = port->getMaxTmdsClkRate(); + processNewDevice(dev, tmpEdid, false, portType, port->getDownstreamNonEDIDPortAttribute()); + } + else + { + processNewDevice(dev, tmpEdid, false, DISPLAY_PORT, RESERVED, isComplianceForEdidTest); + } + + // After processNewDevice, we should not defer any lost device. + bDeferNotifyLostDevice = false; + } + } + else // HPD unplug + { + // + // Shutdown the old message manager if there was one + // + delete discoveryManager; + isDiscoveryDetectComplete = false; + pendingEdidReads.clear(); // destroy any half completed requests + bDeferNotifyLostDevice = false; + + delete messageManager; + messageManager = 0; + discoveryManager = 0; + bAcpiInitDone = false; + bKeepOptLinkAlive = false; + bNoFallbackInPostLQA = false; + bDscCapBasedOnParent = false; + + } +completed: + previousPlugged = statusConnected; + fireEvents(); + + if (!statusConnected) + { + sink->notifyDetectComplete(); + return; + } + if (!(hal->getSupportsMultistream() && main->hasMultistream())) + { + // Ensure NewDev will be processed before notifyDetectComplete on SST + discoveryDetectComplete(); + } +} + +void ConnectorImpl::notifyShortPulse() +{ + // + // Do nothing if device is not plugged or + // resume has not been called after hibernate + // to activate the connector + // + if (!connectorActive || !previousPlugged) + { + DP_LOG(("DP> Got a short pulse after an unplug or before any connector is active!!")); + return; + } + DP_LOG(("DP> IRQ")); + hal->notifyIRQ(); + + // Handle CP_IRQ + if (hal->interruptContentProtection()) + { + // Cancel previous queued delay handling and reset retry counter. + hdcpCpIrqRxStatusRetries = 0; + timer->cancelCallback(this, &tagDelayedHDCPCPIrqHandling); + + if (handleCPIRQ()) + { + hal->clearInterruptContentProtection(); + } + else + { + timer->queueCallback(this, &tagDelayedHDCPCPIrqHandling, HDCP_CPIRQ_RXSTATUS_COOLDOWN); + } + } + + if (hal->getStreamStatusChanged()) + { + if (!messageManager) + { + DP_LOG(("DP> Received Stream status changed Interrupt, but not in multistream mode. Ignoring.")); + } + else + { + handleSSC(); + hal->clearStreamStatusChanged(); + + // + // Handling of SSC takes longer time during which time we miss IRQs. + // Populate interrupts again. + // + hal->notifyIRQ(); + } + } + + if (hal->interruptCapabilitiesChanged()) + { + DP_LOG(("DP> Sink capabilities changed, re-reading caps and reinitializing the link.")); + // We need to set dpcdOffline to re-read the caps + hal->setDPCDOffline(true); + hal->clearInterruptCapabilitiesChanged(); + notifyLongPulse(true); + return; + } + + if (detectSinkCountChange()) + { + DP_LOG(("DP> Change in downstream sink count. Re-analysing link.")); + // We need to set dpcdOffline to re-read the caps + hal->setDPCDOffline(true); + notifyLongPulse(true); + return; + } + + if (hal->interruptDownReplyReady()) + { + if (!messageManager) + { + DP_LOG(("DP> Received DownReply Interrupt, but not in multistream mode. Ignoring.")); + } + else + { + messageManager->IRQDownReply(); + } + } + + if (hal->interruptUpRequestReady()) + { + if (!messageManager) + { + DP_LOG(("DP> Received UpRequest Interrupt, but not in multistream mode. Ignoring.")); + } + else + { + messageManager->IRQUpReqest(); + } + } + + if (hal->getDownStreamPortStatusChange() && hal->getSinkCount()) + { + Edid target; + if (!EdidReadSST(target, auxBus, timer, hal->getPendingTestRequestEdidRead())) + { + DP_LOG(("DP> Failed to read EDID.")); + } + + return; + } + + if (hal->getPendingAutomatedTestRequest()) + { + if (hal->getPendingTestRequestEdidRead()) + { + Edid target; + if (EdidReadSST(target, auxBus, timer, true)) + { + hal->setTestResponseChecksum(target.getLastPageChecksum()); + hal->setTestResponse(true, true); + } + else + hal->setTestResponse(false); + } + else if (hal->getPendingTestRequestTraining()) + { + if (activeLinkConfig.multistream) + { + hal->setTestResponse(false); + } + else + { + LinkRate requestedRate; + unsigned requestedLanes; + + hal->getTestRequestTraining(requestedRate, requestedLanes); + // if one of them is illegal; don't ack. let the box try again. + if (requestedRate == 0 || requestedLanes == 0) + { + DP_ASSERT(0 && "illegal requestedRate/Lane, retry.."); + hal->setTestResponse(false); + } + else + { + // Compliance shouldn't ask us to train above its caps + if (requestedRate == 0 || requestedRate > hal->getMaxLinkRate()) + { + DP_ASSERT(0 && "illegal requestedRate"); + requestedRate = hal->getMaxLinkRate(); + } + + if (requestedLanes == 0 || requestedLanes > hal->getMaxLaneCount()) + { + DP_ASSERT(0 && "illegal requestedLanes"); + requestedLanes = hal->getMaxLaneCount(); + } + + DeviceImpl * dev = findDeviceInList(Address()); + if (!dev || !dev->plugged || dev->multistream) + { + hal->setTestResponse(false); + } + else + { + GroupImpl * groupAttached = this->getActiveGroupForSST(); + DP_ASSERT(groupAttached && groupAttached->isHeadAttached()); + + if (!dev->activeGroup || (dev->activeGroup != groupAttached)) + { + DP_ASSERT(0 && "Compliance: no group attached"); + } + + DP_LOG(("DP> Compliance: LT on IRQ request: 0x%x, %d.", requestedRate, requestedLanes)); + // now see whether the current resolution is supported on the requested link config + LinkConfiguration lc(&linkPolicy, requestedLanes, requestedRate, hal->getEnhancedFraming(), false); + + if (groupAttached && groupAttached->isHeadAttached()) + { + if (willLinkSupportModeSST(lc, groupAttached->lastModesetInfo)) + { + DP_LOG(("DP> Compliance: Executing LT on IRQ: 0x%x, %d.", requestedRate, requestedLanes)); + // we need to force the requirement irrespective of whether is supported or not. + if (!enableFlush()) + { + hal->setTestResponse(false); + } + else + { + // + // Check if linkTraining fails, perform fake linktraining. This is required because + // if we simply fail linkTraining we will not configure the head which results in + // TDRs if any modset happens after this. + // + hal->setTestResponse(true); + if (!train(lc, false)) + train(lc, true); + disableFlush(); + // Don't force/commit. Only keep the request. + setPreferredLinkConfig(lc, false, false); + } + } + else // linkconfig is not supporting bandwidth. Fallback to default edid and notify DD. + { + // override the device with fallback edid and notify a bw change to DD. + DP_LOG(("DP> Compliance: Switching to compliance fallback EDID after IMP failure.")); + dev->switchToComplianceFallback(); + + DP_LOG(("DP> Compliance: Notifying bandwidth change to DD after IMP failure.")); + // notify a bandwidth change to DD + sink->bandwidthChangeNotification(dev, true); + } + } + else + { + hal->setTestResponse(true); + DP_LOG(("DP> Compliance: Link Training when the head is not attached.")); + if (!train(lc, false)) + train(lc, true); + } + } + } + } + } + + else if (hal->getPendingTestRequestPhyCompliance()) + { + hal->setTestResponse(handlePhyPatternRequest()); + } + } + + // Handle MCCS_IRQ + if (hal->intteruptMCCS()) + { + DP_LOG(("DP> MCCS_IRQ")); + handleMCCSIRQ(); + hal->clearInterruptMCCS(); + } + + if (hal->getHdmiLinkStatusChanged()) + { + DP_LOG(("DP> HDMI Link Status Changed")); + handleHdmiLinkStatusChanged(); + } + + // + // Check to make sure sink is not in D3 low power mode + // and interlane alignment is good, etc + // if not - trigger training + // + if (!isLinkInD3() && isLinkLost()) + { + // If the link status of a VRR monitor has changed, we need to check the enablement again. + if (hal->getLinkStatusChanged()) + { + for (Device *i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl *dev = (DeviceImpl *)i; + + if ((dev->plugged) && (dev->activeGroup != NULL) && (dev->isVrrMonitorEnabled())) + { + // Trigger the full enablement, if the monitor is in locked state. + NvU8 retries = VRR_MAX_RETRIES; + if (!dev->isVrrDriverEnabled()) + { + DP_LOG(("DP> VRR enablement state is not synced. Re-enable it.")); + do + { + if (!dev->startVrrEnablement()) + { + continue; + } + else + break; + }while(--retries); + + if (!retries) + { + DP_LOG(("DP> VRR enablement failed on multiple retries.")); + } + } + } + } + } + + DP_LOG(("DP> Link not alive, Try to restore link configuration")); + + if (trainSingleHeadMultipleSSTLinkNotAlive(getActiveGroupForSST())) + { + return; + } + //save the previous highest assessed LC + LinkConfiguration previousAssessedLC = highestAssessedLC; + + assessLink(); + + //If the highest assessed LC has changed, send notification + if(highestAssessedLC != previousAssessedLC) + { + DeviceImpl * dev = findDeviceInList(Address()); + if (dev) + { + sink->bandwidthChangeNotification(dev, false); + } + } + } +} + +bool ConnectorImpl::detectSinkCountChange() +{ + if (this->linkUseMultistream()) + return false; + + DeviceImpl * existingDev = findDeviceInList(Address()); + if (!existingDev) + return false; + + // detect a zero to non-zero sink count change or vice versa + bool hasSink = !!(hal->getSinkCount()); + return ((existingDev->videoSink || existingDev->audioSink) != hasSink); +} + +bool ConnectorImpl::setPreferredLinkConfig(LinkConfiguration & lc, bool commit, + bool force, LinkTrainingType trainType) +{ + bool bEnteredFlushMode; + Device *dev; + + dev = enumDevices(0); + DeviceImpl * nativeDev = (DeviceImpl *)dev; + if (preferredLinkConfig.lanes || preferredLinkConfig.peakRate || preferredLinkConfig.minRate) + DP_ASSERT(0 && "Missing reset call for a preveious set preferred call"); + + if (lc.bEnableFEC && + ((nativeDev && !nativeDev->isFECSupported()) || (!this->isFECSupported()))) + { + DP_ASSERT(0 && "Client requested to enable FEC but either panel or GPU doesn't support FEC"); + return false; + } + + if (!validateLinkConfiguration(lc)) + { + DP_LOG(("Client requested bad LinkConfiguration.")); + return false; + } + + preferredLinkConfig = lc; + preferredLinkConfig.enhancedFraming = hal->getEnhancedFraming(); + preferredLinkConfig.multistream = this->linkUseMultistream(); + preferredLinkConfig.policy = this->linkPolicy; + if (force) + { + // Do flushmode + if (!(bEnteredFlushMode = this->enableFlush())) + DP_ASSERT(0 && "Flush fails"); + if (this->train(preferredLinkConfig, false)) + activeLinkConfig = preferredLinkConfig; + if (bEnteredFlushMode) + this->disableFlush(true); + } + else + { + if (commit) + { + assessLink(trainType); + } + } + return true; +} + +bool ConnectorImpl::resetPreferredLinkConfig(bool force) +{ + preferredLinkConfig = LinkConfiguration(); + if (force) + assessLink(); + return true; +} + +bool ConnectorImpl::isAcpiInitDone() +{ + return (hal->getSupportsMultistream() ? false : bAcpiInitDone); +} + +void ConnectorImpl::notifyAcpiInitDone() +{ + Edid ddcReadEdid; + + // Initiate the EDID Read mechanism only if it is in SST mode & plugged + if (!hal->getSupportsMultistream() && previousPlugged) + { + // Read EDID using RM Control call - NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 + if (EdidReadSST(ddcReadEdid, auxBus, timer, false, true, main)) + { + // Fill the data in device's ddcEdid & mark ACPI Init done + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DP_LOG(("DPCONN> ACPI Init Done. DDC EDID Read completed!!")); + + DeviceImpl * dev = (DeviceImpl*)i; + dev->ddcEdid = ddcReadEdid; + + this->bAcpiInitDone = true; + break; + } + } + } + + return; +} + +bool ConnectorImpl::getHDCPAbortCodesDP12(NvU32 &hdcpAbortCodesDP12) +{ + hdcpAbortCodesDP12 = 0; + + return false; +} + +bool ConnectorImpl::hdcpValidateKsv(const NvU8 *ksv, NvU32 Size) +{ + + if (HDCP_KSV_SIZE <= Size) + { + NvU32 i, j; + NvU32 count_ones = 0; + for (i=0; i < HDCP_KSV_SIZE; i++) + { + for (j = 0; j < 8; j++) + { + if (ksv[i] & (1 <<(j))) + { + count_ones++; + } + } + } + + if (count_ones == 20) + { + return true; + } + } + return false; +} + +void ConnectorImpl::cancelHdcpCallbacks() +{ + this->isHDCPReAuthPending = false; + this->isHDCPAuthTriggered = false; + this->authRetries = 0; + + timer->cancelCallback(this, &tagHDCPReauthentication); // Cancel any queue the auth callback. + timer->cancelCallback(this, &tagDelayedHdcpCapRead); // Cancel any HDCP cap callbacks. + + + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + group->cancelHdcpCallbacks(); + } +} + +// Create a new Group +Group * ConnectorImpl::newGroup() +{ + Group * g = new GroupImpl(this); + if (g) + { + inactiveGroups.insertBack((GroupImpl*)g); + } + return g; +} + +// Create a new Group +Group * ConnectorImpl::createFirmwareGroup() +{ + Group * g = new GroupImpl(this, true); + if (g) + { + inactiveGroups.insertBack((GroupImpl*)g); + } + return g; +} + +// Shutdown and the destroy the connector manager +void ConnectorImpl::destroy() +{ + delete this; +} + +void ConnectorImpl::createFakeMuxDevice(const NvU8 *buffer, NvU32 bufferSize) +{ + if (!buffer) + return; + + // Return immediately if DSC is not supported + if(FLD_TEST_DRF(_DPCD14, _DSC_SUPPORT, _DSC_SUPPORT, _YES, buffer[0]) != 1) + return; + + DeviceImpl * existingDev = findDeviceInList(Address()); + + // Return immediately if we already have a device + if (existingDev) + { + return; + } + + DeviceImpl *newDev = new DeviceImpl(hal, this, NULL); + if (!newDev) + { + return; + } + + newDev->connectorType = connectorDisplayPort; + newDev->plugged = true; + newDev->videoSink = true; + newDev->bIsFakedMuxDevice = true; + newDev->bIsPreviouslyFakedMuxDevice = false; + + // Initialize DSC state + newDev->dscCaps.bDSCSupported = true; + newDev->parseDscCaps(buffer, bufferSize); + dpMemCopy(newDev->rawDscCaps, buffer, DP_MIN(bufferSize, 16)); + newDev->bDSCPossible = true; + newDev->devDoingDscDecompression = newDev; + + populateAllDpConfigs(); + deviceList.insertBack(newDev); + sink->newDevice(newDev); + sink->notifyDetectComplete(); +} + +void ConnectorImpl::deleteFakeMuxDevice() +{ + DeviceImpl * existingDev = findDeviceInList(Address()); + if (!existingDev) + return; + + // If this is not a fake device then don't delete it + if (!existingDev->isPreviouslyFakedMuxDevice()) + return; + + existingDev->markDeviceForDeletion(); + notifyLongPulse(false); + + return; +} + +bool ConnectorImpl::getRawDscCaps(NvU8 *buffer, NvU32 bufferSize) +{ + DeviceImpl * existingDev = findDeviceInList(Address()); + if (!existingDev) + return false; + + return existingDev->getRawDscCaps(buffer, bufferSize); +} + +bool ConnectorImpl::isMultiStreamCapable() +{ + return main->hasMultistream(); +} + +bool ConnectorImpl::isFlushSupported() +{ + return true; +} + +bool ConnectorImpl::isStreamCloningEnabled() +{ + return main->isStreamCloningEnabled(); +} + +bool ConnectorImpl::isFECSupported() +{ + return main->isFECSupported(); +} + +bool ConnectorImpl::isFECCapable() +{ + DeviceImpl *dev; + + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + { + dev = (DeviceImpl *)i; + // If it's SST, or if it's the first connected branch. + if (!this->linkUseMultistream() || dev->address.size() == 1) + { + return (dev->getFECSupport() && this->isFECSupported()); + } + } + return false; +} + +NvU32 ConnectorImpl::maxLinkRateSupported() +{ + return main->maxLinkRateSupported(); +} + +Connector * DisplayPort::createConnector +( + MainLink * main, + AuxBus * aux, + Timer * timer, + Connector::EventSink * sink +) +{ + ConnectorImpl *connector = new ConnectorImpl(main, aux, timer, sink); + + if (connector == NULL || connector->constructorFailed) { + delete connector; + return NULL; + } + + if (main->getRegkeyValue(NV_DP_REGKEY_ENABLE_OCA_LOGGING)) + { + main->retrieveRingBuffer(LOG_CALL, MAX_RECORD_COUNT); + main->retrieveRingBuffer(ASSERT_HIT, MAX_RECORD_COUNT); + } + + return connector; +} + +void ConnectorImpl::setAllowMultiStreaming(bool bAllowMST) +{ + // + // hal->getMultiStreamCapOverride() returns true, if MST cap has been + // overridden to SST. + // + if (!hal->getMultiStreamCapOverride() == bAllowMST) + return; + + if (previousPlugged && + getSinkMultiStreamCap() && + !activeGroups.isEmpty() && linkUseMultistream() != bAllowMST) + { + DP_ASSERT(!"If connected sink is MST capable then:" + "Client should detach all active MST video/audio streams " + "before disallowing MST, vise-versa client should detach " + "active SST stream before allowing MST."); + } + + // + // Disable MST messaging, if client has disallowed MST; + // notifyLongPulseInternal() enable back MST messaging when client + // allow MST. + // + if (previousPlugged && linkUseMultistream() && !bAllowMST) + hal->setMessagingEnable( + false /* _uprequestEnable */, true /* _upstreamIsSource */); + + hal->overrideMultiStreamCap(bAllowMST /* mstCapable */ ); + + // Re-detect already connected sink, and to keep software state in sync + if (previousPlugged && getSinkMultiStreamCap()) + { + isHDCPAuthOn = isDP12AuthCap = false; + notifyLongPulseInternal(true); + } +} + +bool ConnectorImpl::getAllowMultiStreaming(void) +{ + // + // hal->getMultiStreamCapOverride() returns true, if MST cap has been + // overridden to SST. + // + return !hal->getMultiStreamCapOverride(); +} + +bool ConnectorImpl::getSinkMultiStreamCap(void) +{ + return hal->getDpcdMultiStreamCap(); +} + +void ConnectorImpl::setDp11ProtocolForced() +{ + if (!this->linkUseMultistream()) + { + return; + } + + this->notifyLongPulse(false); + hal->setMessagingEnable(false, true); + hal->setMultistreamLink(false); + hal->overrideMultiStreamCap(false /*no mst*/); + this->notifyLongPulse(true); +} + +void ConnectorImpl::resetDp11ProtocolForced() +{ + if (this->linkUseMultistream()) + { + return; + } + + this->notifyLongPulse(false); + hal->overrideMultiStreamCap(true /*mst capable*/); + this->notifyLongPulse(true); +} + +bool ConnectorImpl::isDp11ProtocolForced() +{ + return hal->getMultiStreamCapOverride(); +} + +bool ConnectorImpl::getTestPattern(NV0073_CTRL_DP_TESTPATTERN * testPattern) +{ + return (main->getDpTestPattern(testPattern)); +} + +bool ConnectorImpl::setTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, NvBool bIsHBR2, NvBool bSkipLaneDataOverride) +{ + return (main->setDpTestPattern(testPattern, laneMask, cstm, bIsHBR2, bSkipLaneDataOverride)); +} + +bool ConnectorImpl::getLaneConfig(NvU32 *numLanes, NvU32 *data) +{ + return (main->getDpLaneData(numLanes, data)); +} + +bool ConnectorImpl::setLaneConfig(NvU32 numLanes, NvU32 *data) +{ + return (main->setDpLaneData(numLanes, data)); +} + +void ConnectorImpl::getCurrentLinkConfig(unsigned & laneCount, NvU64 & linkRate) +{ + main->getLinkConfig(laneCount, linkRate); +} + +unsigned ConnectorImpl::getPanelDataClockMultiplier() +{ + LinkConfiguration linkConfig = getMaxLinkConfig(); + return getDataClockMultiplier(linkConfig.peakRatePossible, linkConfig.lanes); +} + +unsigned ConnectorImpl::getGpuDataClockMultiplier() +{ + unsigned laneCount; + NvU64 linkRate; + // Need to get the GPU caps, not monitor caps. + linkRate = maxLinkRateSupported(); + + laneCount = laneCount_4; + + return getDataClockMultiplier(linkRate, laneCount); +} + +void ConnectorImpl::configurePowerState(bool bPowerUp) +{ + main->configurePowerState(bPowerUp); +} + +bool ConnectorImpl::readPsrState(vesaPsrState *psrState) +{ + return hal->readPsrState(psrState); +} + +void ConnectorImpl::readPsrCapabilities(vesaPsrSinkCaps *caps) +{ + hal->readPsrCapabilities(caps); +} + +bool ConnectorImpl::readPsrConfiguration(vesaPsrConfig *psrConfig) +{ + return hal->readPsrConfiguration(psrConfig); +} + +bool ConnectorImpl::updatePsrConfiguration(vesaPsrConfig config) +{ + return hal->updatePsrConfiguration(config); +} + +bool ConnectorImpl::readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState) +{ + return hal->readPsrDebugInfo(psrDbgState); +} + +bool ConnectorImpl::writePsrErrorStatus(vesaPsrErrorStatus psrErr) +{ + return hal->writePsrErrorStatus(psrErr); +} + +bool ConnectorImpl::readPsrErrorStatus(vesaPsrErrorStatus *psrErr) +{ + return hal->readPsrErrorStatus(psrErr); +} + +bool ConnectorImpl::writePsrEvtIndicator(vesaPsrEventIndicator psrEvt) +{ + return hal->writePsrEvtIndicator(psrEvt); +} + +bool ConnectorImpl::readPsrEvtIndicator(vesaPsrEventIndicator *psrEvt) +{ + return hal->readPsrEvtIndicator(psrEvt); +} + +bool ConnectorImpl::updatePsrLinkState(bool bTrainLink) +{ + bool bRet = true; + if (bTrainLink) + { + // Bug 3438892 If the panel is turned off the reciever on its side, + // force panel link on by writting 600 = 1 + if (this->isLinkLost()) + { + hal->setPowerState(PowerStateD0); + return false; + } + + // Check if Link config is valid + if (!this->psrLinkConfig.isValid()) + { + return false; + } + // Restore Link config/do Link Train + bRet = setPreferredLinkConfig(this->psrLinkConfig, false, true, NORMAL_LINK_TRAINING); + } + else + { + // Save the link config + this->psrLinkConfig = getActiveLinkConfig(); + } + return bRet; +} + +bool ConnectorImpl::handlePhyPatternRequest() +{ + + bool status = true; + PatternInfo pattern_info; + + pattern_info.lqsPattern = hal->getPhyTestPattern(); + + // Get lane count from most current link training + unsigned requestedLanes = this->activeLinkConfig.lanes; + + if (pattern_info.lqsPattern == LINK_QUAL_80BIT_CUST) + { + hal->getCustomTestPattern((NvU8 *)&pattern_info.ctsmLower); + } + + // send control call to rm for the pattern + if (!main->physicalLayerSetTestPattern(&pattern_info)) + { + DP_ASSERT(0 && "Could not set the PHY_TEST_PATTERN"); + status = false; + } + else + { + if (AuxRetry::ack != hal->setLinkQualPatternSet(pattern_info.lqsPattern, requestedLanes)) + { + DP_ASSERT(0 && "Could not set the LINK_QUAL_PATTERN"); + status = false; + } + } + return status; +} + +// +// This function is used to send dp test message. +// requestSize indicates the buffer size pointed by pBuffer +// +DP_TESTMESSAGE_STATUS ConnectorImpl::sendDPTestMessage +( + void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus +) +{ + if (messageManager) + { + testMessage.setupTestMessage(messageManager, this); + return testMessage.sendDPTestMessage(pBuffer, requestSize, pDpStatus); + } + else + { + return DP_TESTMESSAGE_STATUS_ERROR; + } +} + +// +// This function is designed for user to call twcie. The first time with NULL of +// pStreamIDs to get the number of streams. +// The second time, user would call the function with allocated buffer. +// +DP_TESTMESSAGE_STATUS ConnectorImpl::getStreamIDs(NvU32 *pStreamIDs, NvU32 *pCount) +{ + DP_TESTMESSAGE_STATUS ret; + + NvU32 streamCnt = activeGroups.size(); + if (NULL == pStreamIDs) + { + ret = DP_TESTMESSAGE_STATUS_SUCCESS; + } + else if (*pCount >= streamCnt) + { + NvU32 n = 0; + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + pStreamIDs[n++] = group->streamIndex; + } + ret = DP_TESTMESSAGE_STATUS_SUCCESS; + } + else + { + //buffer size not enough, the return value will be mapped and returned to nvapi + ret = DP_TESTMESSAGE_STATUS_ERROR_INSUFFICIENT_INPUT_BUFFER; + } + + *pCount = streamCnt; + + return ret; +} + +void ConnectorImpl::notifyGPUCapabilityChange() +{ + // Query current GPU capabilities. + main->queryGPUCapability(); + +} + +void ConnectorImpl::notifyHBR2WAREngage() +{ + bool peakBwChanged = false; + LinkConfiguration preLc = getMaxLinkConfig(); + // Update GPU capabilities + this->notifyGPUCapabilityChange(); + LinkConfiguration postLc = getMaxLinkConfig(); + + peakBwChanged = (preLc.peakRatePossible != postLc.peakRatePossible); + + if (this->previousPlugged && peakBwChanged) + { + // Set caps change status to make sure device becomes zombie + this->bMitigateZombie = true; + + if (this->policyModesetOrderMitigation) + { + this->modesetOrderMitigation = true; + } + // NEED TO CHECK. MAY GO AFTER LONGPULSE TRUE ???? + // If multistream, delete the MST slots allocation in Branch device + if (this->linkUseMultistream()) + this->deleteAllVirtualChannels(); + + // Disconnect the device + this->notifyLongPulse(false); + + // Connect the device again + this->notifyLongPulse(true); + } + +} + +bool ConnectorImpl::isLinkAwaitingTransition() +{ + return this->linkAwaitingTransition; +} + +void ConnectorImpl::configInit() +{ + // Reset branch specific flags + bKeepOptLinkAlive = 0; + bNoFallbackInPostLQA = 0; + LT2FecLatencyMs = 0; + bDscCapBasedOnParent = false; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_crc.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_crc.cpp new file mode 100644 index 0000000..32e26f5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_crc.cpp @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort*********************************\ +* * +* Module: dp_crc.cpp * +* CRC Algorithms for the messaging subsystem. * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_crc.h" +using namespace DisplayPort; + +// +// DP CRC for transactions headers +// +unsigned DisplayPort::dpCalculateHeaderCRC(BitStreamReader * reader) +{ + unsigned remainder = 0; + unsigned bit, i; + + while (reader->read(&bit, 1)) + { + remainder <<= 1; + remainder |= bit; + if ((remainder & 0x10) == 0x10) + { + remainder ^= 0x13; + } + } + + for (i = 4; i != 0; i--) + { + remainder <<= 1; + if ((remainder & 0x10) != 0) + { + remainder ^= 0x13; + } + } + + return remainder & 0xF; +} + +// +// DP CRC for body +// +unsigned DisplayPort::dpCalculateBodyCRC(BitStreamReader * reader) +{ + unsigned remainder = 0; + unsigned bit, i; + + while (reader->read(&bit, 1)) + { + remainder <<= 1; + remainder |= bit; + if ((remainder & 0x100) == 0x100) + { + remainder ^= 0xD5; + } + } + + for (i = 8; i != 0; i--) + { + remainder <<= 1; + if ((remainder & 0x100) != 0) + { + remainder ^= 0xD5; + } + } + + return remainder & 0xFF; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_deviceimpl.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_deviceimpl.cpp new file mode 100644 index 0000000..bf509c0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_deviceimpl.cpp @@ -0,0 +1,2675 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_deviceimpl.cpp * +* DP device implementation * +* * +\***************************************************************************/ + +#include "dp_connectorimpl.h" +#include "dp_deviceimpl.h" +#include "dp_auxdefs.h" +#include "dp_groupimpl.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +using namespace DisplayPort; + +bool DeviceImpl::isMustDisconnect() +{ + // + // Device is must disconnect if we're trying to make an SST<->MST transition + // + if ((this->isActive()) && connector->linkAwaitingTransition) + { + return true; + } + + return false; +} + +DeviceImpl::~DeviceImpl() +{ + if (isDeviceHDCPDetectionAlive && deviceHDCPDetection) + { + delete deviceHDCPDetection; + deviceHDCPDetection = nullptr; + } + + if (vrrEnablement) + { + delete vrrEnablement; + vrrEnablement = NULL; + } + + // Unlink this node from its children + for (unsigned int i = 0; i < sizeof(children)/sizeof(*children); i++) + if (children[i]) + children[i]->parent = 0; + + // Unlink this node from its parent when it's there + if (parent && (parent->children[this->address.tail()] == this)) + parent->children[this->address.tail()] = 0; + + devDoingDscDecompression = NULL; +} + + +DeviceImpl::DeviceImpl(DPCDHAL * hal, ConnectorImpl * connector, DeviceImpl * parent) + : parent(parent), + hal(hal), + activeGroup(0), + connector(connector), + address(), + bVirtualPeerDevice(false), + plugged(false), + friendlyAux(this), + isHDCPCap(False), + isDeviceHDCPDetectionAlive(false), + deviceHDCPDetection(0), + vrrEnablement(0), + bIsFakedMuxDevice(false), + bIsPreviouslyFakedMuxDevice(false), + bisMarkedForDeletion(false), + bSdpExtCapable(Indeterminate) +{ + bandwidth.enum_path.dataValid = false; + shadow.plugged = false; + shadow.zombie = false; + shadow.cableOk = true; + shadow.hdcpCapDone = false; + shadow.highestAssessedLC = connector->highestAssessedLC; + dpMemZero(rawDscCaps, sizeof(rawDscCaps)); +} + +bool DeviceImpl::isZombie() +{ + // You can't be a zombie if nothing is attached + if (!(this->isActive())) + return false; + + if (!plugged) + return true; + + if (isMustDisconnect()) + return true; + + if (!isMultistream()) + { + if (connector->bMitigateZombie) + return true; + + return !connector->willLinkSupportModeSST(connector->highestAssessedLC, + ((GroupImpl*)activeGroup)->lastModesetInfo); + } + else + { + return !this->payloadAllocated; + } +} + +bool DeviceImpl::isCableOk() +{ + if (hal->isDpcdOffline()) + { + // Just say that the cable is ok since we do not have anything connected + return true; + } + else + { + return ! (connector->highestAssessedLC.peakRate < connector->getMaxLinkConfig().peakRate && + connector->highestAssessedLC.lanes < connector->getMaxLinkConfig().lanes); + } +} + +bool DeviceImpl::isLogical() +{ + if (this->address.size() == 0) + return false; + + DP_ASSERT((this->address.tail() <= LOGICAL_PORT_END) && "Invalid port number"); + + // Logical port numbers of a branching unit are from Port 0x08 up to Port 0xF + if (this->address.tail() >= LOGICAL_PORT_START) + return true; + + return false; +} + +bool DeviceImpl::isPendingNewDevice() +{ + if (shadow.plugged == plugged) + return false; + + if (!plugged) + return false; + + // Delay the newDevice event till all enabled heads are not detached. + if (connector->policyModesetOrderMitigation && connector->modesetOrderMitigation) + return false; + + return !connector->linkAwaitingTransition; +} + +bool DeviceImpl::isPendingLostDevice() +{ + // marked for lazy exit..to be done now. + if (complianceDeviceEdidReadTest && lazyExitNow) + return true; + + if (isZombie()) + return false; + + if (shadow.plugged == plugged) + return false; + + return !plugged; +} + +bool DeviceImpl::isPendingZombie() +{ + if (isZombie() && !shadow.zombie) + return true; + else if (!isZombie() && shadow.zombie && plugged) + return (connector->policyModesetOrderMitigation ? false : true); + return false; +} + +bool DeviceImpl::isPendingHDCPCapDone() +{ + if ((isHDCPCap != Indeterminate) && !shadow.hdcpCapDone) + return true; + else + return false; +} + +bool DeviceImpl::isPendingCableOk() +{ + return isCableOk() != shadow.cableOk; +} + +bool DeviceImpl::isPendingBandwidthChange() +{ + return shadow.highestAssessedLC != connector->highestAssessedLC; +} + +bool DeviceImpl::getI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot) +{ + unsigned dataCompleted, sizeRemaining; + DisplayPort::AuxBus::status status; + Type transactionType; + + if (!buffer || !sizeCompleted) + return false; + + dataCompleted = 0; + *sizeCompleted = 0; + do + { + sizeRemaining = (sizeRequested - *sizeCompleted); + if ((this->address.size() < 2) && (sizeRemaining > NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE)) + { + + // + // SST case + // if the transaction buffer is a multiple of 16 bytes (NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE). + // Break it to 16 bytes boundary (HW default) and the first transaction sets the middle of + // transaction bit (MOT). This will mark all the subsequent reads are all of a part of the + // same transaction (I2C restart). + // + status = transaction(AuxBus::read, AuxBus::i2cMot, offset, buffer + *sizeCompleted, + NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE, &dataCompleted); + } + else if (sizeRemaining > NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE) + { + + // + // MST case + // For i2c transactions over MST devices, if the transaction buffer is divided into + // 16 bytes chunks, then read index keeps getting reset for subsequent 16B fetch. + // Refer Bug: 1233042. + // + status = transaction(AuxBus::read, AuxBus::i2cMot, offset, buffer + *sizeCompleted, + sizeRemaining, &dataCompleted); + } + else + { + // + // clear the MOT if it is a single transaction or the last bytes of + // a large, multiple of 16 bytes buffer (end of transaction). + // Note that for some customer specific needs they might force MOT bit + // when it shouldn't be set. So check if client forced the MOT bit and honour that. + // + transactionType = bForceMot ? AuxBus::i2cMot : AuxBus::i2c; + status = transaction(AuxBus::read, transactionType, offset, buffer + *sizeCompleted, + sizeRemaining, &dataCompleted); + } + + if (status != AuxBus::success) + { + DP_LOG(("DPDEV> %s: Failed read transaction", __FUNCTION__)); + break; + } + + if (dataCompleted == 0) + { + // Successfully read 0 bytes? Break out + break; + } + *sizeCompleted += dataCompleted; + } + while (*sizeCompleted < sizeRequested); + + return (status == AuxBus::success); +} + +bool DeviceImpl::setI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot) +{ + unsigned dataCompleted, sizeRemaining; + DisplayPort::AuxBus::status status; + Type transactionType; + + if (!buffer || !sizeCompleted) + return false; + + dataCompleted = 0; + *sizeCompleted = 0; + + // + // If the hop count is one, we're asking for DPCD to the root node. + // If hop count is zero, this is a DP 1.1 target. + // Hop Count Greater than or equal 2 is when we have a single or multiple branch + // device/s. This signifies REMOTE_I2C_WRITE transaction case. + // Here we should not divide the data to 16 byte boundary as if we + // do, the branch device will not know that it needs to set MOT=1. + // So we send the entire data up to a max payload of 255 Bytes. + // Please refer Bug 1964453 for more information. + // + if ((this->address.size() >= 2) && + (sizeRequested > NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE)) + { + status = transaction(AuxBus::write, AuxBus::i2cMot, offset, buffer, + sizeRequested, &dataCompleted); + + if (status != AuxBus::success) + { + DP_LOG(("DPDEV> %s: Failed write transaction", __FUNCTION__)); + return false; + } + *sizeCompleted = dataCompleted; + DP_ASSERT(*sizeCompleted >= sizeRequested); + return (status == AuxBus::success); + } + + do + { + sizeRemaining = (sizeRequested - *sizeCompleted); + if (sizeRemaining > NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE) + { + + // + // if the transaction buffer is a multiple of 16 bytes (NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE). + // Break it to 16 bytes boundary (HW default) and the first transaction sets the middle of + // transaction bit (MOT). This will mark all the subsequent writes are all of a part of the + // same transaction (I2C restart). + // + status = transaction(AuxBus::write, AuxBus::i2cMot, offset, buffer + *sizeCompleted, + NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE, &dataCompleted); + } + else + { + // + // clear the MOT if it is a single transaction or the last bytes of + // a large, multiple of 16 bytes buffer (end of transaction). + // Note that for some customer specific needs they might force MOT bit + // when it shouldn't be set. So check if client forced the MOT bit and honour that. + // + transactionType = bForceMot ? AuxBus::i2cMot : AuxBus::i2c; + status = transaction(AuxBus::write, transactionType, offset, buffer + *sizeCompleted, + sizeRemaining, &dataCompleted); + } + + if (status != AuxBus::success) + { + DP_LOG(("DPDEV> %s: Failed write transaction", __FUNCTION__)); + break; + } + + if (dataCompleted == 0) + { + // Successfully read 0 bytes? Break out + break; + } + *sizeCompleted += dataCompleted; + } while (*sizeCompleted < sizeRequested); + + return (status == AuxBus::success); +} + +AuxBus::status DeviceImpl::getDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason) +{ + if (!buffer || !sizeCompleted) + { + // default param may be NULL + if (pNakReason) *pNakReason = NakUndefined; + return AuxBus::nack; + } + + // + // Remote DPCD doesn't work for Peer Device 4 i.e. DP-to-Legacy Dongle. + // But if a virtual DP peer device with Protocol Converter functionality + // populates the DPCD_Revision field of the LINK_ADDRESS Message reply + // then allow DPCD transaction + // + if ((this->peerDevice == Dongle) && (this->dpcdRevisionMajor == 0)) + { + if (pNakReason) *pNakReason = NakBadParam; + return AuxBus::nack; + } + + return (transaction(AuxBus::read, AuxBus::native, offset, buffer, + sizeRequested, sizeCompleted, pNakReason)); +} + +AuxBus::status DeviceImpl::setDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason) +{ + if (!buffer || !sizeCompleted) + { + // default param may be NULL + if (pNakReason) *pNakReason = NakUndefined; + return AuxBus::nack; + } + + // + // Remote DPCD doesn't work for Peer Device 4 i.e. DP-to-Legacy Dongle + // But if a virtual DP peer device with Protocol Converter functionality + // populates the DPCD_Revision field of the LINK_ADDRESS Message reply + // then allow DPCD transaction + // + if ((this->peerDevice == Dongle) && (this->dpcdRevisionMajor == 0)) + { + if (pNakReason) *pNakReason = NakBadParam; + return AuxBus::nack; + } + + return (transaction(AuxBus::write, AuxBus::native, offset, buffer, + sizeRequested, sizeCompleted, pNakReason)); +} + +AuxBus::status DeviceImpl::queryFecData(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) +{ + if (!fecStatus || !fecErrorCount) + { + return AuxBus::nack; + } + + return (fecTransaction(fecStatus, fecErrorCount, flags)); +} + +DscCaps DeviceImpl::getDscCaps() +{ + return dscCaps; +} + +// +// This function returns the device itself or its parent device that is doing +// DSC decompression for it. +// +Device* DeviceImpl::getDevDoingDscDecompression() +{ + return devDoingDscDecompression; +} + +bool DeviceImpl::getRawDscCaps(NvU8 *buffer, NvU32 bufferSize) +{ + if (bufferSize < sizeof(rawDscCaps)) + return false; + + dpMemCopy(buffer, &rawDscCaps, sizeof(rawDscCaps)); + return true; +} + +AuxBus::status DeviceImpl::transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason, + NvU8 offset, NvU8 nWriteTransactions) +{ + // In case of default implementation, the reason for transaction failure + // must be stored somewhere + unsigned defaultReason; + if (!pNakReason) pNakReason = &defaultReason; + // default failure reason is undefined + *pNakReason = NakUndefined; + + if (type == AuxBus::i2c || type == AuxBus::i2cMot) + { + address >>= 1; // right shifted DDC Address (request identifier in spec) + } + + // If the hop count is one, we're asking for DPCD to the root node. + // If hop count is zero, this is a DP 1.1 target. + if (this->address.size() >= 2) + { + NakData nak; + + if (connector == NULL || connector->messageManager == NULL) + { + return AuxBus::nack; + } + + if (action == AuxBus::read && type == AuxBus::native) + { + RemoteDpcdReadMessage read; + read.set(this->address.parent(), this->address.tail(), address, sizeRequested); + if (!connector->messageManager->send(&read, nak)) { + // Copy reason back to caller + *pNakReason = nak.reason; + // Translate the DPCD error codes + if (nak.reason == NakDefer) + return AuxBus::defer; + if (nak.reason == NakDpcdFail) + return AuxBus::nack; + + // This isn't quite right. We're translating unknown messaging related + // failure cases into defers. This is done so that the client will retry the operation + return AuxBus::defer; + } + + *sizeCompleted = read.replyNumOfBytesReadDPCD(); + + if (*sizeCompleted > sizeRequested) { + DP_LOG(("DPDEV> DPCD Read return more data than requested. Clamping buffer to requested size!")); + *sizeCompleted = sizeRequested; + } + + dpMemCopy(buffer, read.replyGetData(), *sizeCompleted); + + return AuxBus::success; + } + else if ((action == AuxBus::read) && ((type == AuxBus::i2c) || (type == AuxBus::i2cMot))) + { + bool isNoStopBit = (type == AuxBus::i2cMot) ? 1:0; + RemoteI2cReadMessage remoteI2cRead; + I2cWriteTransaction i2cWriteTransactions[1]; + i2cWriteTransactions[0] = I2cWriteTransaction(address, + 0, + &offset, + isNoStopBit); + + if (nWriteTransactions > 1) + { + DP_LOG(("DPDEV> Set function will fail for transactions > 1, please incraease the array size!")); + return AuxBus::nack; + } + + remoteI2cRead.set(this->address.parent(), // topology Address + nWriteTransactions, // number of write transactions + this->address.tail(), // port of Device + i2cWriteTransactions, // list of write transactions + address, // right shifted DDC Address (request identifier in spec) + sizeRequested); // requested size + + if (!connector->messageManager->send(&remoteI2cRead, nak)) { + // Copy reason back to caller + *pNakReason = nak.reason; + // Translate the DPCD error codes + if (nak.reason == NakI2cNak) + return AuxBus::nack; + + // This isn't quite right. We're translating unknown messaging related + // failure cases into defers. This is done so that the client will retry the operation + return AuxBus::defer; + } + + *sizeCompleted = remoteI2cRead.replyNumOfBytesReadI2C(); + + if (*sizeCompleted > sizeRequested) { + DP_LOG(("DPDEV> I2C Read return more data than requested. Clamping buffer to requested size!")); + *sizeCompleted = sizeRequested; + } + + dpMemCopy(buffer, remoteI2cRead.replyGetI2CData(sizeCompleted), *sizeCompleted); + + return AuxBus::success; + } + else if (action == AuxBus::write && type == AuxBus::native) + { + RemoteDpcdWriteMessage write; + write.set(this->address.parent(), this->address.tail(), address, sizeRequested, buffer); + + if (!connector->messageManager->send(&write, nak)) { + // Copy reason back to caller + *pNakReason = nak.reason; + // Translate the DPCD error codes + if (nak.reason == NakDefer) + return AuxBus::defer; + if (nak.reason == NakDpcdFail) + return AuxBus::nack; + + // This isn't quite right. We're translating unknown messaging related + // failure cases into defers. This is done so that the client will retry the operation + return AuxBus::defer; + } + + *sizeCompleted = sizeRequested; + + return AuxBus::success; + } + else if ((action == AuxBus::write) && ((type == AuxBus::i2c) || (type == AuxBus::i2cMot))) + { + RemoteI2cWriteMessage remoteI2cWrite; + + remoteI2cWrite.set(this->address.parent(), // topology Address + this->address.tail(), // port of Device + address, // right shifted DDC Address (request identifier in spec) + sizeRequested, + buffer); + + if (!connector->messageManager->send(&remoteI2cWrite, nak)) { + // Copy reason back to caller + *pNakReason = nak.reason; + // Translate the DPCD error codes + if (nak.reason == NakI2cNak) + return AuxBus::nack; + + // This isn't quite right. We're translating unknown messaging related + // failure cases into defers. This is done so that the client will retry the operation + return AuxBus::defer; + } + + *sizeCompleted = sizeRequested; + + return AuxBus::success; + } + else + { + DP_ASSERT(0 && "Only aux native and i2c reads and writes supported"); + return AuxBus::nack; + } + } + else + { + return this->connector->auxBus->transaction(action, type, address, buffer, + sizeRequested, sizeCompleted, pNakReason); + } +} + +unsigned DeviceImpl::transactionSize() +{ + // + // Remote (DP 1.2) sinks can read much larger chunks at once due to messaging. + // + if (this->address.size() >= 2) + return 255; + else + return this->connector->auxBus->transactionSize(); +} + +static AuxBus::status _QueryFecStatus +( + DeviceImpl *bus, + NvU8 *pStatus +) +{ + AuxBus::status status = AuxBus::success; + + NvU32 addr = NV_DPCD14_FEC_STATUS; + unsigned size = 1; + + unsigned sizeCompleted = 0; + unsigned pNakReason = 0; + + status = bus->getDpcdData(addr, pStatus, size, &sizeCompleted, &pNakReason); + + if (status != AuxBus::success) + { + DP_LOG(("DP> Error querying FEC status!")); + return AuxBus::nack; + } + return AuxBus::success; +} + +static AuxBus::status _QueryFecErrorCount +( + DeviceImpl *bus, + NvU16 *pErrorCount +) +{ + AuxBus::status status = AuxBus::success; + NvU32 addr = NV_DPCD14_FEC_ERROR_COUNT; + unsigned size = 2; + + unsigned sizeCompleted = 0; + NvU8 cnt[2] = {0, 0}; + unsigned pNakReason = 0; + + status = bus->getDpcdData(addr, &cnt[0], size, &sizeCompleted, &pNakReason); + + if (status != AuxBus::success) + { + DP_LOG(("DP> Error querying FEC error count!")); + return AuxBus::nack; + } + else + { + *pErrorCount = (((NvU16) cnt[1]) << (sizeof(NvU8) * 8)) | cnt[0]; + } + return AuxBus::success; +} + +static AuxBus::status _WriteFecConfiguration +( + DeviceImpl *bus, + NvU8 configuration +) +{ + AuxBus::status status = AuxBus::success; + + NvU32 addr = NV_DPCD14_FEC_CONFIGURATION; + unsigned size = 1; + + unsigned sizeCompleted = 0; + unsigned pNakReason = 0; + + status = bus->setDpcdData(addr, &configuration, size, &sizeCompleted, &pNakReason); + + if (status != AuxBus::success) + { + DP_LOG(("DP> Error setting FEC configuration!")); + return AuxBus::nack; + } + return AuxBus::success; +} + +AuxBus::status DeviceImpl::fecTransaction(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) +{ + AuxBus::status status; + // the capability needs to be checked first (bits 5:0 and 7 need to be set) + NvU8 data, lane, counter, laneData, offset; + if (!bFECSupported) + { + DP_LOG(("DP> FEC capability not correct!")); + return nack; + } + + if (!bFECUncorrectedSupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _UNCORRECTED, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_LOG(("DP> FEC capability not correct!")); + return success; + } + } + if (!bFECCorrectedSupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _CORRECTED, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_LOG(("DP> FEC capability not correct!")); + return success; + } + } + if (!bFECBitSupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _BIT, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_LOG(("DP> FEC capability not correct!")); + return success; + } + } + if (!bFECParityBlockSupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _PARITY_BLOCK, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_LOG(("DP> FEC capability not correct!")); + return success; + } + } + if (!bFECParitySupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _PARITY_BIT, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_LOG(("DP> FEC capability not correct!")); + return success; + } + } + + status = _QueryFecStatus(this, fecStatus); + if(status != AuxBus::success) + { + return status; + } + // setting configuration for querying error counters for every lane + for (lane = NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_0; lane < connector->activeLinkConfig.lanes; lane++) + { + // keeping FEC ready bit + laneData = DRF_DEF(_DPCD14, _FEC_CONFIGURATION, _FEC_READY, _YES); + // selecting specific lane + laneData |= DRF_NUM(_DPCD14, _FEC_CONFIGURATION, _LANE_SELECT, lane); + // setting configuration for querying all the error counters for a specific lane + for (counter = NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_UNCORRECTED_BLOCK_ERROR_COUNT; + counter <= NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_PARITY_BIT_ERROR_COUNT; counter++) + { + // address function for the current register (in the matrix registers start from 0 and in the bit mask from 1) + offset = counter - 1; + // if flag for corresponding register is not set skip querying + if ((flags & NVBIT(offset)) == 0) continue; + // selecting specific counter + data = laneData | DRF_NUM(_DPCD14, _FEC_CONFIGURATION, _FEC_ERROR_COUNT_SEL, counter) ; + status = _WriteFecConfiguration(this, data); + if (status != AuxBus::success) + { + return status; + } + // reading specific error counter register based on address function + status = _QueryFecErrorCount(this, fecErrorCount[lane] + offset); + if (status != AuxBus::success) + { + return status; + } + } + } + return AuxBus::success; +} + +// Apply DPCD overrides if required +void DeviceImpl::dpcdOverrides() +{ + if (this->parent) + { + // + // Device is behind a branch. SW can't perform overrides as branch will + // handle link training the device not source. Also hal can only override + // capability of sink, not the individual device behind the branch. + // + return; + } + if (processedEdid.WARFlags.overrideMaxLaneCount) + { + hal->overrideMaxLaneCount(processedEdid.WARData.maxLaneCount); + } + if (processedEdid.WARFlags.skipCableBWCheck) + { + hal->skipCableBWCheck(processedEdid.WARData.maxLaneAtHighRate, + processedEdid.WARData.maxLaneAtLowRate); + } + if (processedEdid.WARFlags.overrideOptimalLinkCfg) + { + LinkRate optimalLinkRate = 0; + + switch(processedEdid.WARData.optimalLinkRate) + { + case 0x6: + optimalLinkRate = RBR; + break; + case 0xa: + optimalLinkRate = HBR; + break; + case 0x14: + optimalLinkRate = HBR2; + break; + case 0x1E: + optimalLinkRate = HBR3; + break; + default: + optimalLinkRate = RBR; + DP_LOG(("DP-DEV> Invalid link rate supplied. Falling back to RBR")); + break; + } + hal->overrideOptimalLinkCfg(optimalLinkRate, processedEdid.WARData.optimalLaneCount); + } +} + +void DeviceImpl::applyOUIOverrides() +{ + // For now we only need this for Synaptic branch. + if ((this->peerDevice == DownstreamBranch) || + (this->peerDevice == UpstreamSourceOrSSTBranch)) + { + NvU8 buffer[16] = {0}; + unsigned size = 13; // Read 0x500 ~ 0x50C + unsigned sizeCompleted = 0; + unsigned nakReason = NakUndefined; + + // + // Synaptic branch claims it supports MSA override, but some older firmware has problems + // on their decoder. We need to disable the feature in that case. + // + if (AuxBus::success != this->getDpcdData(NV_DPCD_BRANCH_IEEE_OUI, &buffer[0], + size, &sizeCompleted, &nakReason)) + return; + + // Check Branch IEEE_OUI (0x500h~0x502h) is Synaptic IEEE_OUI (0x90, 0xCC, 0x24) + if ((buffer[0] == 0x90) && (buffer[1] == 0xCC) && (buffer[2] == 0x24)) + { + // Check if Device Identification String (0x503~0x506) is "SYNA" + if ((buffer[3] == 0x53) && (buffer[4] == 0x59) && (buffer[5] == 0x4E) && (buffer[6] == 0x41)) + { + // For Synaptic VMM5331 and VMM5320, it only support MSA-Over-MST for DP after Firmware 5.4.5 + if (buffer[7] == 0x53 && + (buffer[8] == 0x31 || buffer[8] == 0x20)) + { + this->bSdpExtCapable = False; + + // + // Check firmware version + // 0x50A: FW/SW Major Revision. + // 0x50B: FW/SW Minor Revision. + // 0x50C: Build Number. + // + if ((buffer[10] >= 0x06) || + ((buffer[10] == 0x05) && (buffer[11] >= 0x05)) || + ((buffer[10] == 0x05) && (buffer[11] == 0x04) && (buffer[12] >= 0x05))) + { + this->bSdpExtCapable = True; + } + } + } + } + + } +} + +bool DeviceImpl::getSDPExtnForColorimetrySupported() +{ + DeviceImpl *targetDevice = NULL; + DeviceImpl *parentDevice = NULL; + + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + + // + // On fakeed mux devices, we cannot check if the device has + // the capability as we don't have access to aux. + // + if (this->isFakedMuxDevice()) + { + return false; + } + + // If the capability is queried/set already. + if (this->bSdpExtCapable != Indeterminate) + { + return (this->bSdpExtCapable == True); + } + + if (!this->isMultistream()) + { + // If the device is directly connected to the source read the DPCD directly + this->bSdpExtCapable = hal->getSDPExtnForColorimetry() ? True : False; + return (this->bSdpExtCapable == True); + } + + // For MST devices + switch (this->peerDevice) + { + case DownstreamBranch: + case UpstreamSourceOrSSTBranch: + { + targetDevice = this; + break; + } + case DownstreamSink: + { + // + // When the device is type of DownstreamSink and with branch(es) + // between GPU and it, query goes to the device and its parent + // + targetDevice = this; + parentDevice = (DeviceImpl *)this->getParent(); + break; + } + case Dongle: + { + // + // Bug 2527026: When the device is type of dongle and with branch(es) + // between GPU and it, query goes to its parent. + // + targetDevice = (DeviceImpl *)this->getParent(); + break; + } + default: + { + DP_ASSERT(0 && "Unsupported Peer Type for SDP_EXT COLORIMETRY"); + return false; + break; + } + } + + // Send remote DPCD for devices behind the branch + if ((AuxBus::success == targetDevice->getDpcdData(NV_DPCD_TRAINING_AUX_RD_INTERVAL, + &byte, sizeof byte, &size, &nakReason)) && + (FLD_TEST_DRF(_DPCD14, _TRAINING_AUX_RD_INTERVAL, _EXTENDED_RX_CAP, _YES, byte))) + { + byte = 0; + size = 0; + nakReason = NakUndefined; + + if (AuxBus::success == targetDevice->getDpcdData(NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST, + &byte, sizeof byte, &size, &nakReason)) + { + this->bSdpExtCapable = FLD_TEST_DRF(_DPCD14, + _EXTENDED_DPRX_FEATURE_ENUM_LIST, + _VSC_SDP_EXT_FOR_COLORIMETRY, + _YES, byte) ? True : False; + } + } + this->applyOUIOverrides(); + if (parentDevice && (this->bSdpExtCapable == True)) + { + // + // Do not override bSdpExtCapable for the sink. Although result won't + // change but we can keep the value for debug purpose. + // + return parentDevice->getSDPExtnForColorimetrySupported(); + } + + return (this->bSdpExtCapable == True); +} + +bool DeviceImpl::isPowerSuspended() +{ + bool bPanelPowerOn, bDPCDPowerStateD0; + if (connector->main->isEDP()) + { + connector->main->getEdpPowerData(&bPanelPowerOn, &bDPCDPowerStateD0); + return !bDPCDPowerStateD0; + } + return (connector->hal->getPowerState() == PowerStateD3); +} + +void DeviceImpl::setPanelPowerParams(bool bSinkPowerStateD0, bool bPanelPowerStateOn) +{ + bool bPanelPowerOn, bDPCDPowerStateD0; + GroupImpl * pGroupAttached = connector->getActiveGroupForSST(); + + // + // For single head dual SST mode, set the panel power params for the + // secondary connector while updating the primary connector. + // + if (pGroupAttached && + connector->pCoupledConnector && + (pGroupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) && + (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY)) + { + return; + } + + if (connector->main->isEDP()) + { + connector->main->getEdpPowerData(&bPanelPowerOn, &bDPCDPowerStateD0); + } + else + { + bDPCDPowerStateD0 = (connector->hal->getPowerState() == PowerStateD0)? + true : false; + } + + // Going to Suspend (D3) + if (!bSinkPowerStateD0) + { + if (this->bypassDpcdPowerOff()) + { + DP_LOG(("DP-DEV> Bypassing 600h write for this display")); + return; + } + + if (connector->main->isEDP()) + { + /* + * If it's an eDP panel, the setPowerState call below will turn on LCD_POWER + * if it's already off. So only call the function when panel power is on + * and DPCD_SET_POWER is set to _D0. + */ + if (bPanelPowerOn && bDPCDPowerStateD0) + { + // monitor to be put to sleep + if (connector->hal->setPowerState(PowerStateD3)) + shadow.highestAssessedLC = connector->highestAssessedLC; + } + } + else + { + + if (connector->pCoupledConnector) + { + // Put secondary connctor to sleep + connector->pCoupledConnector->hal->setPowerState(PowerStateD3); + } + + // monitor to be put to sleep + if (connector->hal->setPowerState(PowerStateD3)) + { + shadow.highestAssessedLC = connector->highestAssessedLC; + } + } + // + // If bPanelPowerStateOn is false and this + // is not a multistream device, then shut down the main link. Some eDP + // panels are known to need this in order to actually shut down. + // + if (!isMultistream() && !bPanelPowerStateOn) + { + if (connector->pCoupledConnector) + { + // configure power state on secondary + connector->pCoupledConnector->main->configurePowerState(false); + } + connector->main->configurePowerState(false); + } + } + else + { + if (connector->main->isEDP() && !bPanelPowerOn) + { + // Turn on the eDP panel if required. + connector->main->configurePowerState(true); + } + // monitor to be brought out of sleep + if (connector->hal->setPowerState(PowerStateD0)) + { + if (connector->pCoupledConnector) + { + // power up main link on secondary + connector->pCoupledConnector->hal->setPowerState(PowerStateD0); + } + + // Mark linkStatus as dirty as we need to read linkStatus again since we are resuming a power state D0, link might have lost. + connector->hal->setDirtyLinkStatus(true); + if (connector->pCoupledConnector) + { + connector->pCoupledConnector->hal->setDirtyLinkStatus(true); + } + + if (connector->activeGroups.isEmpty()) + { + return; + } + if ((!connector->isLinkActive()) || + (connector->main->isEDP() && !bPanelPowerOn) || + (connector->isLinkLost()) || + (!bDPCDPowerStateD0)) + { + // + // If link is inactive, lost, or the panel was off before, then + // assess Link. Note that this'll detach head if required. + // + if (pGroupAttached && + pGroupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + // Special handling for single head dual SST cases + connector->trainSingleHeadMultipleSSTLinkNotAlive(pGroupAttached); + } + else + { + connector->assessLink(); + } + } + } + else + DP_ASSERT(0 && "Could not bring the monitor back from sleep."); + } +} + +void DeviceImpl::switchToComplianceFallback() +{ + Edid fallbackEdid; + makeEdidFallback(fallbackEdid); + this->processedEdid.resetData(); + this->processedEdid = fallbackEdid; +} + +TriState DeviceImpl::hdcpAvailableHop() +{ + return this->isHDCPCap; +} + +TriState DeviceImpl::hdcpAvailable() +{ + if (isNativeDPCD()) + { + return this->hdcpAvailableHop(); + } + return False; +} + +void DeviceImpl::resetCacheInferredLink() +{ + this->bandwidth.enum_path.dataValid = false; +} + +LinkConfiguration * DeviceImpl::inferLeafLink(unsigned * totalLinkSlots) +{ + // update the EPR data + if (!bandwidth.enum_path.dataValid) + { + if (plugged) + { + NakData nack; + for (unsigned retries = 0; retries < 7; retries++) + { + EnumPathResMessage epr(getTopologyAddress().parent(), getTopologyAddress().tail(), true); + bool sendStatus = connector->messageManager->send(&epr, nack); + if (!sendStatus) + { + if (nack.reason == NakDefer || nack.reason == NakTimeout) + continue; + + bandwidth.enum_path.total = bandwidth.enum_path.free = 0; + break; + } + else + { + bandwidth.enum_path.total = epr.reply.TotalPBN; + bandwidth.enum_path.free = epr.reply.FreePBN; + bandwidth.enum_path.bPathFECCapable = epr.reply.bFECCapability; + break; + } + } + } + else + { + bandwidth.enum_path.total = bandwidth.enum_path.free = 0; + } + + bandwidth.enum_path.dataValid = true; + bandwidth.lastHopLinkConfig = LinkConfiguration(bandwidth.enum_path.total); + // Update FEC support of the device after EPR + this->getFECSupport(); + } + + if (totalLinkSlots) + { + *totalLinkSlots = bandwidth.lastHopLinkConfig.slotsForPBN(bandwidth.enum_path.total, true /*epr aware*/); + + // + // Override the totalLinkSlots returned to 63 only if peer device is + // 2 (branch), since TS-0 will be used for MTP header. + // Branch may return the total pbn corresponding to 64 timeslots. + // + if (*totalLinkSlots == 64 && peerDevice == DownstreamBranch) + { + *totalLinkSlots = 63; + } + } + + return &bandwidth.lastHopLinkConfig; +} + +bool DeviceImpl::isActive() +{ + DP_ASSERT(!activeGroup || activeGroup->isHeadAttached()); + return activeGroup != NULL; +} + +bool DeviceImpl::getRawEpr(unsigned * totalEpr, unsigned * freeEpr, rawEprState eprState) +{ + DP_ASSERT((totalEpr && freeEpr) && "Invalid arguments passed to function getRawEpr()"); + bool status = true; + *totalEpr = 0; + *freeEpr = 0; + + // If request has come for main link/Native branch device + // return main link PBNs as "0" & return + if (isNativeDPCD()) + return status; + + // Cached/Software state is queried + if (eprState == software) + { + *totalEpr = bandwidth.enum_path.total; + *freeEpr = bandwidth.enum_path.free; + + return status; + } + + // Hardware state is queried. Send a new EPR message to get the current state + EnumPathResMessage rawEpr(getTopologyAddress().parent(), getTopologyAddress().tail(), true); + NakData nack; + for (unsigned retries = 0; retries < 7; retries++) + { + bool sendStatus = connector->messageManager->send(&rawEpr, nack); + if (!sendStatus) + { + status = false; + if (nack.reason == NakDefer) + continue; + + DP_LOG(("DP-DEV> EPR message failed while getting RAW EPR")); + + break; + } + else + { + *totalEpr = rawEpr.reply.TotalPBN; + *freeEpr = rawEpr.reply.FreePBN; + status = true; + + break; + } + } + + return status; +} + +unsigned DeviceImpl::getEDIDSize() const +{ + // Return DDC EDID size only if we got a valid EDID there + if (this->connector->isAcpiInitDone() && ddcEdid.isValidHeader()) + { + return ddcEdid.getEdidSize(); + } + else + { + return processedEdid.getEdidSize(); + } +} + +bool DeviceImpl::getEDID(char * buffer, unsigned size) const +{ + // + // Return DDC EDID only if we got a valid EDID there + // This has priority on regular EDID read from panel + // + if (this->connector->isAcpiInitDone() && ddcEdid.isValidHeader()) + { + if (size < ddcEdid.getEdidSize()) + goto panelEdid; + + dpMemCopy(buffer, ddcEdid.getBuffer()->getData(), ddcEdid.getEdidSize()); + return true; + } + +panelEdid: + // No EDID read from SBIOS. Return panel EDID now. + if (size < processedEdid.getEdidSize()) + return false; + + dpMemCopy(buffer, processedEdid.getBuffer()->getData(), processedEdid.getEdidSize()); + return true; +} + +unsigned DeviceImpl::getRawEDIDSize() const +{ + // Return DDC EDID size only if we got a valid EDID there + if (this->connector->isAcpiInitDone() && ddcEdid.isValidHeader()) + { + return ddcEdid.getEdidSize(); + } + else + { + return rawEDID.getEdidSize(); + } +} + +bool DeviceImpl::getRawEDID(char * buffer, unsigned size) const +{ + // + // Return DDC EDID only if we got a valid EDID there + // This has priority on regular EDID read from panel + // + if (this->connector->isAcpiInitDone() && ddcEdid.isValidHeader()) + { + if (size >= ddcEdid.getEdidSize()) + { + dpMemCopy(buffer, ddcEdid.getBuffer()->getData(), ddcEdid.getEdidSize()); + return true; + } + } + + // No EDID read from SBIOS. Return panel EDID now. + if (size < rawEDID.getEdidSize()) + return false; + + dpMemCopy(buffer, rawEDID.getBuffer()->getData(), rawEDID.getEdidSize()); + return true; +} + +bool DeviceImpl::startVrrEnablement() +{ + bool ret = false; + + if (vrrEnablement) + { + ret = vrrEnablement->start(); + } + + return ret; +} + +void DeviceImpl::resetVrrEnablement() +{ + if (vrrEnablement) + { + vrrEnablement->reset(); + } +} + +bool DeviceImpl::isVrrMonitorEnabled() +{ + bool ret = false; + + if (vrrEnablement) + { + ret = vrrEnablement->isMonitorEnabled(); + } + + return ret; +} + +bool DeviceImpl::isVrrDriverEnabled() +{ + bool ret = false; + + if (vrrEnablement) + { + ret = vrrEnablement->isDriverEnabled(); + } + + return ret; +} + +NvBool DeviceImpl::getDSCSupport() +{ + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + Address::StringBuffer sb; + DP_USED(sb); + + dscCaps.bDSCSupported = false; + + if(AuxBus::success == this->getDpcdData(NV_DPCD14_DSC_SUPPORT, + &byte, sizeof(byte), &size, &nakReason)) + { + if (FLD_TEST_DRF(_DPCD14, _DSC_SUPPORT, _DSC_SUPPORT, _YES, byte)) + { + dscCaps.bDSCSupported = true; + } + + if (FLD_TEST_DRF(_DPCD20, _DSC_SUPPORT, _PASS_THROUGH_SUPPORT, _YES, byte)) + { + dscCaps.bDSCPassThroughSupported = true; + } + } + + else + { + DP_LOG(("DP-DEV> DSC Support AUX READ failed for %s!", address.toString(sb))); + } + + return dscCaps.bDSCSupported; +} + +bool DeviceImpl::isPanelReplaySupported() +{ + return prCaps.panelReplaySupported; +} + +void DeviceImpl::getPanelReplayCaps() +{ + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + + if (AuxBus::success == this->getDpcdData(NV_DPCD20_PANEL_REPLAY_CAPABILITY, + &byte, sizeof(byte), &size, &nakReason)) + { + prCaps.panelReplaySupported = + FLD_TEST_DRF(_DPCD20_PANEL, _REPLAY_CAPABILITY, _SUPPORTED, _YES, byte); + } +} + +bool DeviceImpl::setPanelReplayConfig(panelReplayConfig prcfg) +{ + NvU8 config = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + + if (prcfg.enablePanelReplay) + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _ENABLE_PR_MODE, _YES, config); + } + else + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _ENABLE_PR_MODE, _NO, config); + } + + if (AuxBus::success == this->setDpcdData(NV_DPCD20_PANEL_REPLAY_CONFIGURATION, + &config, sizeof(config), &size, &nakReason)) + { + return true; + } + + return false; +} + +bool DeviceImpl::getFECSupport() +{ + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + + if(this->address.size() > 1) + { + bFECSupported = this->bandwidth.enum_path.bPathFECCapable; + } + + else if (AuxBus::success == this->getDpcdData(NV_DPCD14_FEC_CAPABILITY, + &byte, sizeof(byte), &size, &nakReason)) + { + bFECSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _FEC_CAPABLE, _YES, byte); + bFECUncorrectedSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE, _YES, byte); + bFECCorrectedSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _CORRECTED_BLOCK_ERROR_COUNT_CAPABLE, _YES, byte); + bFECBitSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _BIT_ERROR_COUNT_CAPABLE, _YES, byte); + bFECParityBlockSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _PARITY_BLOCK_ERROR_COUNT_CAPABLE, _YES, byte); + bFECParitySupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _PARITY_ERROR_COUNT_CAPABLE, _YES, byte); + } + + return bFECSupported; +} + +NvBool DeviceImpl::isDSCSupported() +{ + return dscCaps.bDSCSupported; +} + +NvBool DeviceImpl::isDSCPassThroughSupported() +{ + return dscCaps.bDSCPassThroughSupported; +} + +NvBool DeviceImpl::isDSCPossible() +{ + return this->bDSCPossible; +} + +bool DeviceImpl::isFECSupported() +{ + return bFECSupported; +} + +bool DeviceImpl::parseDscCaps(const NvU8 *buffer, NvU32 bufferSize) +{ + + if (bufferSize < 16) + { + DP_LOG((" DSC caps buffer must be greater than or equal to 16")); + return false; + } + + dscCaps.versionMajor = DRF_VAL(_DPCD14, _DSC_ALGORITHM_REVISION, _MAJOR, buffer[0x1]); + dscCaps.versionMinor = DRF_VAL(_DPCD14, _DSC_ALGORITHM_REVISION, _MINOR, buffer[0x1]); + + dscCaps.rcBufferBlockSize = DRF_VAL(_DPCD14, _DSC_RC_BUFFER_BLOCK, _SIZE, buffer[0x2]); + + dscCaps.rcBuffersize = DRF_VAL(_DPCD14, _DSC_RC_BUFFER, _SIZE, buffer[0x3]); + + dscCaps.sliceCountSupportedMask = (((buffer[0xD]) << 8) | buffer[0x4]); + if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_2, _SLICES_PER_SINK_24, _YES, buffer[0xD])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_24; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_2, _SLICES_PER_SINK_20, _YES, buffer[0xD])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_20; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_2, _SLICES_PER_SINK_16, _YES, buffer[0xD])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_16; + + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_12, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_12; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_10, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_10; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_8, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_8; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_6, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_6; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_4, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_4; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_2, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_2; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_1, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_1; + + if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _8, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 8; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _9, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 9; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _10, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 10; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _11, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 11; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _12, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 12; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _13, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 13; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _14, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 14; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _15, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 15; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _16, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 16; + } + + if(FLD_TEST_DRF(_DPCD14, _DSC_BLOCK_PREDICTION, _SUPPORT, _YES, buffer[0x6])) + dscCaps.bDscBlockPredictionSupport = true; + + unsigned maxBitsPerPixelLSB = DRF_VAL(_DPCD14, _DSC_MAXIMUM_BITS_PER_PIXEL_1, _LSB, buffer[0x7]); + unsigned maxBitsPerPixelMSB = DRF_VAL(_DPCD14, _DSC_MAXIMUM_BITS_PER_PIXEL_2, _MSB, buffer[0x8]); + + dscCaps.maxBitsPerPixelX16 = (maxBitsPerPixelMSB << 8) | maxBitsPerPixelLSB; + + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _RGB, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bRgb = true; + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _YCbCr_444, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bYCbCr444 = true; + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _YCbCr_SIMPLE_422, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bYCbCrSimple422 = true; + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _YCbCr_NATIVE_422, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422 = true; + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _YCbCr_NATIVE_420, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bYCbCrNative420 = true; + + if (FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_DEPTH_CAPABILITIES, _12_BITS_PER_COLOR, _YES, buffer[0xa])) + dscCaps.dscDecoderColorDepthMask |= DSC_BITS_PER_COLOR_MASK_12; + if (FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_DEPTH_CAPABILITIES, _10_BITS_PER_COLOR, _YES, buffer[0xa])) + dscCaps.dscDecoderColorDepthMask |= DSC_BITS_PER_COLOR_MASK_10; + if (FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_DEPTH_CAPABILITIES, _8_BITS_PER_COLOR, _YES, buffer[0xa])) + dscCaps.dscDecoderColorDepthMask |= DSC_BITS_PER_COLOR_MASK_8; + + dscCaps.dscPeakThroughputMode0 = DRF_VAL(_DPCD14, _DSC_PEAK_THROUGHPUT, _MODE0, buffer[0xb]); + dscCaps.dscPeakThroughputMode1 = DRF_VAL(_DPCD14, _DSC_PEAK_THROUGHPUT, _MODE1, buffer[0xb]); + + unsigned numOfPixels = DRF_VAL(_DPCD14, _DSC_MAXIMUM_SLICE_WIDTH, _MAX, buffer[0xc]); + dscCaps.dscMaxSliceWidth = numOfPixels * 320; + + if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1_16, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1_16; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1_8, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1_8; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1_4, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1_4; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1_2, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1_2; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1; + } + + return true; +} + +bool DeviceImpl::readAndParseDSCCaps() +{ + // Allocate a buffer of 16 bytes to read DSC caps + + unsigned sizeCompleted = 0; + unsigned nakReason = NakUndefined; + Address::StringBuffer sb; + DP_USED(sb); + + if(AuxBus::success != this->getDpcdData(NV_DPCD14_DSC_SUPPORT, + &rawDscCaps[0], sizeof(rawDscCaps), &sizeCompleted, &nakReason)) + { + DP_LOG(("DP-DEV> Error querying DSC Caps on %s!", this->address.toString(sb))); + return false; + } + + return parseDscCaps(&rawDscCaps[0], sizeof(rawDscCaps)); +} + +void DeviceImpl::queryGUID2() +{ + unsigned sizeCompleted = 0; + unsigned nakReason = NakUndefined; + Address::StringBuffer sb; + DP_USED(sb); + + if(AuxBus::success == this->getDpcdData(NV_DPCD20_GUID_2, + &this->guid2.data[0], DPCD_GUID_SIZE, &sizeCompleted, &nakReason)) + { + if (!(this->guid2.isGuidZero())) + { + this->bVirtualPeerDevice = true; + } + } + else + { + DP_LOG(("DP-DEV> Error querying GUID2 on %s!", this->address.toString(sb))); + } +} + +bool DeviceImpl::getDscEnable(bool *pEnable) +{ + AuxBus::status status = AuxBus::success; + unsigned sizeCompleted = 0; + unsigned pNakReason = 0; + NvU8 byte = 0; + + if (!pEnable || + !this->isDSCPossible() || + !this->devDoingDscDecompression || + !this->devDoingDscDecompression->plugged) + { + return false; + } + + status = this->devDoingDscDecompression->getDpcdData(NV_DPCD14_DSC_ENABLE, + &byte, + sizeof byte, + &sizeCompleted, + &pNakReason); + + if (status != AuxBus::success) + { + DP_LOG(("DP-DEV> Error querying DSC Enable State!")); + return false; + } + + *pEnable = FLD_TEST_DRF(_DPCD14, _DSC_ENABLE, _SINK, _YES, byte); + return true; +} + +void DeviceImpl::setDscDecompressionDevice(bool bDscCapBasedOnParent) +{ + // Decide if DSC stream can be sent to new device + this->bDSCPossible = false; + this->devDoingDscDecompression = NULL; + + if (this->multistream) + { + if ((this->peerDevice == Dongle) && + (this->dpcdRevisionMajor != 0) && + !bDscCapBasedOnParent) + { + // For Peer Type 4 device with LAM DPCD rev != 0.0, check only the device's own DSC capability. + if (this->isDSCSupported()) + { + this->bDSCPossible = true; + this->devDoingDscDecompression = this; + } + } + else + { + // + // Check the device's own and its parent's DSC capability. + // - Sink device will do DSC cecompression when + // 1. Sink device is capable of DSC decompression + // 2. Sink is on a logical port (8-15) + // + // OR + // + // 1. Sink device is capable of DSC decompression + // 2. Parent of sink is a Virtual Peer device + // 3. Parent of sink supports DSC Pass through + // + // - Sink device's parent will do DSC decompression + // 1. Above conditions are not true. + // 2. Parent of sink supports DSC decompression. + // + if (this->isDSCSupported()) + { + if (this->isVideoSink() && this->getParent() != NULL) + { + if (this->isLogical()) + { + this->devDoingDscDecompression = this; + this->bDSCPossible = true; + } + else if (this->parent->isVirtualPeerDevice() && + this->parent->isDSCPassThroughSupported()) + { + // + // This condition takes care of DSC capable sink devices + // connected behind a DSC Pass through capable branch + // + this->devDoingDscDecompression = this; + this->bDSCPossible = true; + } + else if (this->parent->isDSCSupported()) + { + // + // This condition takes care of DSC capable sink devices + // connected behind a branch device that is not capable + // of DSC pass through but can do DSC decompression. + // + this->bDSCPossible = true; + this->devDoingDscDecompression = this->parent; + } + } + else + { + // This condition takes care of branch device capable of DSC. + this->devDoingDscDecompression = this; + this->bDSCPossible = true; + } + } + else if (this->parent && this->parent->isDSCSupported()) + { + // + // This condition takes care of sink devices not capable of DSC + // but parent is capable of DSC decompression. + // + this->bDSCPossible = true; + this->devDoingDscDecompression = this->parent; + } + } + } + else + { + if (this->isDSCSupported()) + { + this->bDSCPossible = true; + this->devDoingDscDecompression = this; + } + } +} + +bool DeviceImpl::setDscEnable(bool enable) +{ + NvU8 dscEnableByte = 0; + NvU8 dscPassthroughByte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + bool bCurrDscEnable = false; + bool bDscPassThrough = false; + bool bDscPassThroughUpdated = true; + Address::StringBuffer buffer; + DP_USED(buffer); + + if (!this->isDSCPossible() || !this->devDoingDscDecompression || + !this->devDoingDscDecompression->plugged) + { + return false; + } + + if ((this->devDoingDscDecompression == this) && !this->isLogical() && this->parent != NULL) + { + // + // If the device has a parent, that means the sink is on a MST link and + // and on a MST link if DSC is possible on the path and devDoingDscDecompression + // is the sink itself and sink is not on a logical port, then the parent should be + // DSC Pass through capable. + // + bDscPassThrough = true; + } + else + { + // + // Get Current DSC Enable State + // Ideally we don't need to check the current state but Synaptics DSC device, + // which was used for inital DSC code developement did not follow spec and so + // we have added this code. Overwriting the same value should not have any + // impact as per the spec. Will remove this check once all DSC devices follow spec. + // + if (!getDscEnable(&bCurrDscEnable)) + { + DP_LOG(("DP-DEV> Not able to get DSC Enable State!")); + return false; + } + } + + if(enable) + { + if(bDscPassThrough) + { + dscPassthroughByte = FLD_SET_DRF(_DPCD20, _DSC_PASS_THROUGH, _ENABLE, _YES, dscPassthroughByte); + DP_LOG(("DP-DEV> Enabling DSC Pass through on branch device - %s", + this->parent->getTopologyAddress().toString(buffer))); + } + + if (!bCurrDscEnable) + { + dscEnableByte = FLD_SET_DRF(_DPCD14, _DSC_ENABLE, _SINK, _YES, dscEnableByte); + DP_LOG(("DP-DEV> Enabling DSC decompression on device - %s", + this->devDoingDscDecompression->getTopologyAddress().toString(buffer))); + } + else + { + DP_LOG(("DP-DEV> DSC decompression is already enabled on device - %s", + this->devDoingDscDecompression->getTopologyAddress().toString(buffer))); + return true; + } + } + else + { + if(bDscPassThrough) + { + dscPassthroughByte = FLD_SET_DRF(_DPCD20, _DSC_PASS_THROUGH, _ENABLE, _NO, dscPassthroughByte); + DP_LOG(("DP-DEV> Disabling DSC Pass through on branch device - %s", + this->parent->getTopologyAddress().toString(buffer))); + } + + if (bCurrDscEnable) + { + dscEnableByte = FLD_SET_DRF(_DPCD14, _DSC_ENABLE, _SINK, _NO, dscEnableByte); + DP_LOG(("DP-DEV> Disabling DSC decompression on device - %s", + this->devDoingDscDecompression->getTopologyAddress().toString(buffer))); + } + else + { + DP_LOG(("DP-DEV> DSC decompression is already disabled on device - %s", + this->devDoingDscDecompression->getTopologyAddress().toString(buffer))); + return true; + } + } + + if (bDscPassThrough) + { + if(this->parent->setDpcdData(NV_DPCD20_DSC_PASS_THROUGH, + &dscPassthroughByte, sizeof dscPassthroughByte, &size, &nakReason)) + { + DP_LOG(("DP-DEV> Setting DSC Passthrough state on parent branch failed")); + bDscPassThroughUpdated = false; + } + } + + return (!this->devDoingDscDecompression->setDpcdData(NV_DPCD14_DSC_ENABLE, + &dscEnableByte, sizeof dscEnableByte, &size, &nakReason)) && bDscPassThroughUpdated; +} + +unsigned DeviceImpl::getDscVersionMajor() +{ + return dscCaps.versionMajor; +} + +unsigned DeviceImpl::getDscVersionMinor() +{ + return dscCaps.versionMinor; +} + +unsigned DeviceImpl::getDscRcBufferSize() +{ + return dscCaps.rcBuffersize; +} + +unsigned DeviceImpl::getDscRcBufferBlockSize() +{ + return dscCaps.rcBufferBlockSize; +} + +unsigned DeviceImpl::getDscMaxSlicesPerSink() +{ + return dscCaps.maxSlicesPerSink; +} + +unsigned DeviceImpl::getDscLineBufferBitDepth() +{ + return dscCaps.lineBufferBitDepth; +} + +NvBool DeviceImpl::isDscBlockPredictionSupported() +{ + return dscCaps.bDscBlockPredictionSupport; +} + +unsigned DeviceImpl::getDscMaxBitsPerPixel() +{ + return dscCaps.maxBitsPerPixelX16; +} + +NvBool DeviceImpl::isDscRgbSupported() +{ + return dscCaps.dscDecoderColorFormatCaps.bRgb; +} + +NvBool DeviceImpl::isDscYCbCr444Supported() +{ + return dscCaps.dscDecoderColorFormatCaps.bYCbCr444; +} + +NvBool DeviceImpl::isDscYCbCrSimple422Supported() +{ + return dscCaps.dscDecoderColorFormatCaps.bYCbCrSimple422; +} + +NvBool DeviceImpl::isDscYCbCr422NativeSupported() +{ + return dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422; +} + +NvBool DeviceImpl::isDscYCbCr420NativeSupported() +{ + return dscCaps.dscDecoderColorFormatCaps.bYCbCrNative420; +} + +unsigned DeviceImpl::getDscPeakThroughputMode0() +{ + return dscCaps.dscPeakThroughputMode0; +} + +unsigned DeviceImpl::getDscPeakThroughputModel() +{ + return dscCaps.dscPeakThroughputMode1; +} + +unsigned DeviceImpl::getDscMaxSliceWidth() +{ + return dscCaps.dscMaxSliceWidth; +} + +unsigned DeviceImpl::getDscDecoderColorDepthSupportMask() +{ + return dscCaps.dscDecoderColorDepthMask; +} + +bool DeviceImpl::isFakedMuxDevice() +{ + return connector->main->isDynamicMuxCapable() && bIsFakedMuxDevice; +} + +bool DeviceImpl::isPreviouslyFakedMuxDevice() +{ + return connector->main->isDynamicMuxCapable() && bIsPreviouslyFakedMuxDevice; +} + +static AuxBus::status _QueryCrcSink +( + DeviceImpl *bus, + NvU16 *sinkCrc0, + NvU16 *sinkCrc1, + NvU16 *sinkCrc2 +) +{ + AuxBus::status status = AuxBus::success; + // no sink op needs to be done if registers are NULL + if (sinkCrc0 == NULL) return status; + NvU32 addr = NV_DPCD14_DSC_CRC_0; + unsigned size = 2; + NvU8 cnt[2] = {0, 0}; + + unsigned sizeCompleted = 0; + unsigned nakReason = 0; + + status = bus->getDpcdData(addr, &cnt[0], size, &sizeCompleted, &nakReason); + + if (status != AuxBus::success) + { + return status; + } + *sinkCrc0 = (((NvU16) cnt[1]) << (sizeof(NvU8) * 8)) | cnt[0]; + + addr = NV_DPCD14_DSC_CRC_1; + size = 2; + + status = bus->getDpcdData(addr, &cnt[0], size, &sizeCompleted, &nakReason); + + if (status != AuxBus::success) + { + return status; + } + *sinkCrc1 = (((NvU16) cnt[1]) << (sizeof(NvU8) * 8)) | cnt[0]; + + addr = NV_DPCD14_DSC_CRC_2; + size = 2; + + status = bus->getDpcdData(addr, &cnt[0], size, &sizeCompleted, &nakReason); + + if (status != AuxBus::success) + { + return status; + } + *sinkCrc2 = (((NvU16) cnt[1]) << (sizeof(NvU8) * 8)) | cnt[0]; + return status; +} + +AuxBus::status DeviceImpl::dscCrcControl(NvBool bEnable, gpuDscCrc *gpuData, sinkDscCrc *sinkData) +{ + // GPU part + if (this->connector->main->dscCrcTransaction(bEnable, gpuData, (NvU16*) &(activeGroup->headIndex)) != true) + { + return AuxBus::nack; + } + + // sink part + if (!sinkData) + { + return AuxBus::success; + } + return _QueryCrcSink(this, &(sinkData->sinkCrc0), &(sinkData->sinkCrc1), &(sinkData->sinkCrc2)); +} + +bool DeviceImpl::getPCONCaps(PCONCaps *pPCONCaps) +{ + AuxBus::status status = AuxBus::success; + NvU32 addr = NV_DPCD_DETAILED_CAP_INFO_ONE(0); + NvU8 data = 0; + unsigned size = 1; + unsigned sizeCompleted = 0; + unsigned nakReason = 0; + + if (isMultistream()) + return false; + + status = getDpcdData(addr, &data, size, &sizeCompleted, &nakReason); + if (status != AuxBus::success) + { + return false; + } + pPCONCaps->maxTmdsClkRate = data; + + addr = NV_DPCD_DETAILED_CAP_INFO_TWO(0); + status = getDpcdData(addr, &data, size, &sizeCompleted, &nakReason); + if (status != AuxBus::success) + { + return false; + } + + pPCONCaps->bSourceControlModeSupported = + FLD_TEST_DRF(_DPCD, _DETAILED_CAP_INFO, _SRC_CONTROL_MODE_SUPPORT, _YES, data); + pPCONCaps->bConcurrentLTSupported = + FLD_TEST_DRF(_DPCD, _DETAILED_CAP_INFO, _CONCURRENT_LT_SUPPORT, _YES, data); + pPCONCaps->maxHdmiLinkBandwidthGbps = + DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_FRL_LINK_BW_SUPPORT, data); + + switch (DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_BITS_PER_COMPONENT_DEF, data)) + { + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_10BPC: + pPCONCaps->maxBpc = 10; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_12BPC: + pPCONCaps->maxBpc = 12; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_16BPC: + pPCONCaps->maxBpc = 16; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_8BPC: + default: + pPCONCaps->maxBpc = 8; + break; + } + return true; +} + + +void +DeviceHDCPDetection::start() +{ + if (parent->isNativeDPCD()) + { + if (!parent->isMultistream()) + { + goto NativeDPCDHDCPCAPRead; + } + else + { + parent->isHDCPCap = False; + waivePendingHDCPCapDoneNotification(); + return; + } + +NativeDPCDHDCPCAPRead: + + BCaps bCaps = {0}; + + parent->hal->getBCaps(bCaps, parent->BCAPS); + *(parent->nvBCaps) = *(parent->BCAPS); + + if (bCaps.HDCPCapable) + { + NvU8 tempBKSV[HDCP_KSV_SIZE] = {0}; + if (parent->hal->getBKSV(tempBKSV)) + { + if (hdcpValidateKsv(tempBKSV, HDCP_KSV_SIZE)) + { + for (unsigned i=0; iBKSV[i] = tempBKSV[i]; + } + } + parent->isHDCPCap = True; + waivePendingHDCPCapDoneNotification(); + return; + } + else + { + unsigned char hdcp22BCAPS[HDCP22_BCAPS_SIZE]; + + // Check if hdcp2.x only device and probe hdcp22Bcaps. + parent->hal->getHdcp22BCaps(bCaps, hdcp22BCAPS); + if (bCaps.HDCPCapable) + { + parent->nvBCaps[0] = FLD_SET_DRF_NUM(_DPCD, _HDCP_BCAPS_OFFSET, + _HDCP_CAPABLE, bCaps.HDCPCapable, + parent->nvBCaps[0]) | + FLD_SET_DRF_NUM(_DPCD, _HDCP_BCAPS_OFFSET, _HDCP_REPEATER, + bCaps.repeater, parent->nvBCaps[0]); + + // + // No need to validate 1.x bksv here and hdcp22 authentication would + // validate certificate with bksv in uproc. + // + parent->isHDCPCap = True; + waivePendingHDCPCapDoneNotification(); + return; + } + } + + parent->isHDCPCap = False; + waivePendingHDCPCapDoneNotification(); + } + else + { + parent->isHDCPCap = False; + waivePendingHDCPCapDoneNotification(); + } +} + +void +DeviceHDCPDetection::messageCompleted +( + MessageManager::Message *from +) +{ + if ((from == &remoteBKSVReadMessage) || + (from == &remoteBCapsReadMessage) || + (from == &remote22BCapsReadMessage)) + { + handleRemoteDpcdReadDownReply(from); + } +} + +void +DeviceHDCPDetection::handleRemoteDpcdReadDownReply +( + MessageManager::Message *from +) +{ + NvU8 i2cBcaps; + unsigned dataCompleted; + unsigned defaultReason; + Address::StringBuffer sb; + DP_USED(sb); + + if (from == &remoteBKSVReadMessage) + { + bksvReadCompleted = true; + bBKSVReadMessagePending = false; + DP_LOG(("DP-QM> REMOTE_DPCD_READ(BKSV) {%p} at '%s' completed", + (MessageManager::Message *)&remoteBKSVReadMessage, + parent->address.toString(sb))); + + if (remoteBKSVReadMessage.replyNumOfBytesReadDPCD() != HDCP_KSV_SIZE) + { + DP_ASSERT(0 && "Incomplete BKSV in remote DPCD read message"); + parent->isHDCPCap = False; + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + return; + } + + DP_ASSERT(remoteBKSVReadMessage.replyPortNumber() == parent->address.tail()); + if (hdcpValidateKsv(remoteBKSVReadMessage.replyGetData(), HDCP_KSV_SIZE)) + { + isValidBKSV = true; + for (unsigned i=0; iBKSV[i] = (remoteBKSVReadMessage.replyGetData())[i]; + + DP_LOG(("DP-QM> Device at '%s' is with valid BKSV.", + parent->address.toString(sb))); + } + } + else if (from == &remoteBCapsReadMessage) + { + bCapsReadCompleted = true; + bBCapsReadMessagePending = false; + DP_LOG(("DP-QM> REMOTE_DPCD_READ(BCaps) {%p} at '%s' completed", + (MessageManager::Message *)&remoteBCapsReadMessage, + parent->address.toString(sb))); + + if (remoteBCapsReadMessage.replyNumOfBytesReadDPCD() != HDCP_BCAPS_SIZE) + { + DP_ASSERT(0 && "Incomplete BCaps in remote DPCD read message"); + parent->isHDCPCap = False; + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + return; + } + + DP_ASSERT(remoteBCapsReadMessage.replyPortNumber() == parent->address.tail()); + if (!!(*remoteBCapsReadMessage.replyGetData() & 0x1)) + { + *(parent->nvBCaps) = *(parent->BCAPS) = *remoteBCapsReadMessage.replyGetData(); + isBCapsHDCP = true; + + DP_LOG(("DP-QM> Device at '%s' is with valid BCAPS : %x", + parent->address.toString(sb), *remoteBCapsReadMessage.replyGetData())); + } + else + { + if (isValidBKSV) + { + DP_LOG(("DP-QM> Device at '%s' is with valid BKSV but Invalid BCAPS : %x", + parent->address.toString(sb), *remoteBCapsReadMessage.replyGetData())); + + // Read the BCAPS DDC offset + parent->transaction(AuxBus::read, AuxBus::i2cMot, HDCP_I2C_CLIENT_ADDR, &i2cBcaps, + 1, &dataCompleted, &defaultReason, HDCP_BCAPS_DDC_OFFSET, 1); + + DP_LOG(("DP-QM> Device at '%s' is with DDC BACPS: %x", + parent->address.toString(sb), i2cBcaps)); + + // If the Reserved Bit is SET, Device supports HDCP + if (i2cBcaps & HDCP_BCAPS_DDC_EN_BIT) + { + isBCapsHDCP = true; + // Set the HDCP cap BCAPS according to DP protocol + *(parent->BCAPS) |= HDCP_BCAPS_DP_EN_BIT; + *(parent->nvBCaps) = *(parent->BCAPS); + } + } + else + { + DP_LOG(("DP-QM> Device at '%s' is without valid BKSV and BCAPS, thus try 22BCAPS")); + + Address parentAddress = parent->address.parent(); + remote22BCapsReadMessage.setMessagePriority(NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT); + remote22BCapsReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP22_BCAPS_OFFSET, HDCP22_BCAPS_SIZE); + bCapsReadCompleted = false; + bBCapsReadMessagePending = true; + messageManager->post(&remote22BCapsReadMessage, this); + } + } + } + else if (from == &remote22BCapsReadMessage) + { + bCapsReadCompleted = true; + bBCapsReadMessagePending = false; + DP_LOG(("DP-QM> REMOTE_DPCD_READ(22BCaps) {%p} at '%s' completed", + (MessageManager::Message *)&remote22BCapsReadMessage, + parent->address.toString(sb))); + + if (remote22BCapsReadMessage.replyNumOfBytesReadDPCD() != HDCP22_BCAPS_SIZE) + { + DP_ASSERT(0 && "Incomplete 22BCaps in remote DPCD read message"); + parent->isHDCPCap = False; + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + return; + } + + DP_ASSERT(remote22BCapsReadMessage.replyPortNumber() == parent->address.tail()); + if (!!(*remote22BCapsReadMessage.replyGetData() & 0x2)) + { + unsigned char hdcp22BCAPS; + + hdcp22BCAPS = *remote22BCapsReadMessage.replyGetData(); + + parent->nvBCaps[0] = FLD_SET_DRF_NUM(_DPCD, _HDCP_BCAPS_OFFSET, + _HDCP_CAPABLE, (hdcp22BCAPS & 0x2) ? 1 : 0, + parent->nvBCaps[0]) | + FLD_SET_DRF_NUM(_DPCD, _HDCP_BCAPS_OFFSET, _HDCP_REPEATER, + (hdcp22BCAPS & 0x1) ? 1 : 0, parent->nvBCaps[0]); + + // hdcp22 will validate certificate's bksv directly. + isBCapsHDCP = isValidBKSV = true; + + DP_LOG(("DP-QM> Device at '%s' is with valid 22BCAPS : %x", + parent->address.toString(sb), *remote22BCapsReadMessage.replyGetData())); + } + } + + if (bCapsReadCompleted && bksvReadCompleted) + { + // Complete remote HDCP probe and check if can power down again. + if (parent->connector) + { + parent->connector->decPendingRemoteHdcpDetection(); + parent->connector->isNoActiveStreamAndPowerdown(); + } + + if (isValidBKSV && isBCapsHDCP) + { + parent->isHDCPCap = True; + } + else + { + parent->isHDCPCap = False; + } + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + } + else + { + parent->isHDCPCap = Indeterminate; + } +} + +bool +DeviceHDCPDetection::hdcpValidateKsv +( + const NvU8 *ksv, + NvU32 Size +) +{ + + if (HDCP_KSV_SIZE <= Size) + { + NvU32 i, j; + NvU32 count_ones = 0; + for (i=0; i < HDCP_KSV_SIZE; i++) + { + for (j = 0; j < 8; j++) + { + if (ksv[i] & (1 <<(j))) + { + count_ones++; + } + } + } + + if (count_ones == 20) + { + return true; + } + } + return false; +} + +void +DeviceHDCPDetection::messageFailed +( + MessageManager::Message *from, + NakData *nakData +) +{ + if (from == &remoteBKSVReadMessage) + { + if ((retriesRemoteBKSVReadMessage < DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteBKSVReadMessage++; + retryRemoteBKSVReadMessage = bBKSVReadMessagePending = true; + timer->queueCallback(this, "BKSV", DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN_BKSV); + return; + } + // + // If message failed is called after all retries have expired or due + // to any other reason then reset the bBKSVReadMessagePending flag + // + bBKSVReadMessagePending = false; + } + + if (from == &remoteBCapsReadMessage) + { + if ((retriesRemoteBCapsReadMessage < DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteBCapsReadMessage++; + retryRemoteBCapsReadMessage = bBCapsReadMessagePending = true; + timer->queueCallback(this, "BCaps", DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN_BKSV); + return; + } + // + // If message failed is called after all retries have expired or due + // to any other reason then reset the bBCapsReadMessagePending flag + // + bBCapsReadMessagePending = false; + } + + if (from == &remote22BCapsReadMessage) + { + if ((retriesRemote22BCapsReadMessage < DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemote22BCapsReadMessage++; + retryRemote22BCapsReadMessage = bBCapsReadMessagePending = true; + timer->queueCallback(this, "22BCaps", DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN_BKSV); + return; + } + // + // If message failed is called after all retries have expired or due to + // any other reason then reset the bBCapsReadMessagePending flag + // + bBCapsReadMessagePending = false; + } + + parent->isHDCPCap = False; + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-QM> Message %s {%p} at '%s' failed. Device marked as not HDCP support.", + from == &remoteBKSVReadMessage ? "REMOTE_DPCD_READ(BKSV)" : + from == &remoteBCapsReadMessage ? "REMOTE_DPC_READ(BCaps)" : + from == &remote22BCapsReadMessage ? "REMOTE_DPC_READ(22BCaps)" : "???", + from, parent->address.toString(sb))); + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + + // Complete remote HDCP probe and check if can power down again. + if (parent->connector) + { + parent->connector->decPendingRemoteHdcpDetection(); + parent->connector->isNoActiveStreamAndPowerdown(); + } + + delete this; + } +} + +void +DeviceHDCPDetection::expired +( + const void *tag +) +{ + // Clear stale HDCP states when monitor instance is already destroyed + if (!parent->plugged) + { + if (retryRemoteBKSVReadMessage) + { + retryRemoteBKSVReadMessage = false; + bBKSVReadMessagePending = false; + } + else if (retryRemoteBCapsReadMessage) + { + retryRemoteBCapsReadMessage = false; + bBCapsReadMessagePending = false; + } + else if (retryRemote22BCapsReadMessage) + { + retryRemote22BCapsReadMessage = false; + bBCapsReadMessagePending = false; + } + + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + return; + } + + if (retryRemoteBKSVReadMessage) + { + Address parentAddress = parent->address.parent(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-QM> Requeing REMOTE_DPCD_READ_MESSAGE(BKSV) to %s", parentAddress.toString(sb))); + + retryRemoteBKSVReadMessage = false; + remoteBKSVReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP_BKSV_OFFSET, HDCP_KSV_SIZE); + DP_LOG(("DP-QM> Get BKSV (remotely) for '%s' sent REMOTE_DPCD_READ {%p}", parent->address.toString(sb), &remoteBKSVReadMessage)); + + bBKSVReadMessagePending = true; + messageManager->post(&remoteBKSVReadMessage, this); + } + + if (retryRemoteBCapsReadMessage) + { + Address parentAddress = parent->address.parent(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-QM> Requeing REMOTE_DPCD_READ_MESSAGE(BCAPS) to %s", parentAddress.toString(sb))); + + retryRemoteBCapsReadMessage = false; + remoteBCapsReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP_BCAPS_OFFSET, HDCP_BCAPS_SIZE); + DP_LOG(("DP-QM> Get BCaps (remotely) for '%s' sent REMOTE_DPCD_READ {%p}", parent->address.toString(sb), &remoteBCapsReadMessage)); + + bBCapsReadMessagePending = true; + messageManager->post(&remoteBCapsReadMessage, this); + } + + if (retryRemote22BCapsReadMessage) + { + Address parentAddress = parent->address.parent(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-QM> Requeing REMOTE_DPCD_READ_MESSAGE(22BCAPS) to %s", parentAddress.toString(sb))); + + retryRemote22BCapsReadMessage = false; + remote22BCapsReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP22_BCAPS_OFFSET, HDCP22_BCAPS_SIZE); + DP_LOG(("DP-QM> Get 22BCaps (remotely) for '%s' sent REMOTE_DPCD_READ {%p}", parent->address.toString(sb), &remote22BCapsReadMessage)); + + bBCapsReadMessagePending = true; + messageManager->post(&remote22BCapsReadMessage, this); + } + +} + +DeviceHDCPDetection::~DeviceHDCPDetection() +{ + parent->isDeviceHDCPDetectionAlive = false; + + // Clear all pending callbacks/messages + if (this->timer) + { + this->timer->cancelCallbacks(this); + } + + if (this->messageManager) + { + this->messageManager->cancelAll(&remoteBKSVReadMessage); + this->messageManager->cancelAll(&remoteBCapsReadMessage); + this->messageManager->cancelAll(&remote22BCapsReadMessage); + } +} + +void +DeviceHDCPDetection::waivePendingHDCPCapDoneNotification() +{ + // Waive the pendingHDCPCapDone notification + parent->shadow.hdcpCapDone = true; + parent->isDeviceHDCPDetectionAlive = false; + delete this; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_discovery.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_discovery.cpp new file mode 100644 index 0000000..59c7207 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_discovery.cpp @@ -0,0 +1,938 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_discovery.cpp * +* The DP MST discovery manager. * +* * +\***************************************************************************/ + +#include "dp_discovery.h" +#include "dp_messages.h" +#include "dp_tracing.h" + +using namespace DisplayPort; + +void DiscoveryManager::notifyLongPulse(bool status) +{ + if (status) + { + Device device; + device.address = Address(0); + device.branch = hal->getSupportsMultistream(); + device.legacy = false; + + detectBranch(device); + } + else if (!status) + { + removeDeviceTree(Address()); + } +} + +void DiscoveryManager::detectBranch(Device device) +{ + Address::StringBuffer sb; + DP_USED(sb); + + // + // 1. Create a LINK_ADDRESS_MESSAGE to send to this target so that we can find who he is + // 2. Create a REMOTE_DPCD_WRITE to set the GUID for this target + // *alternatively* we may have to use the local DPCD HAL to write this + // 3. Enumerate any children that we may wish to queue detect on. + // + DP_LOG(("%s(): target = %s", __FUNCTION__, device.address.toString(sb))); + + BranchDetection * branchDetection = new BranchDetection(this, device); + outstandingBranchDetections.insertBack(branchDetection); + branchDetection->start(); +} + +void DiscoveryManager::detectSink(DiscoveryManager::Device device, bool bFromCSN) +{ + Address::StringBuffer sb; + DP_USED(sb); + + DP_LOG(("%s(): target = %s", __FUNCTION__, device.address.toString(sb))); + SinkDetection * sinkDetection = new SinkDetection(this, device, bFromCSN); + sinkDetection->start(); +} + +DiscoveryManager::Device * DiscoveryManager::findDevice(const Address & address) +{ + for (unsigned i = 0; i < currentDevicesCount; i++) + if (currentDevices[i].address == address) + { + if (currentDevices[i].peerGuid.isGuidZero() && currentDevices[i].peerDevice != Dongle && + (currentDevices[i].dpcdRevisionMajor >= 1 && currentDevices[i].dpcdRevisionMinor >= 2)) + { + DP_ASSERT(0 && "Zero guid for device even though its not a dongle type."); + } + return ¤tDevices[i]; + } + + return 0; +} + +DiscoveryManager::Device * DiscoveryManager::findDevice(GUID & guid) +{ + if (guid.isGuidZero()) + { + DP_ASSERT(0 && "zero guid search"); + return 0; + } + + for (unsigned i = 0; i < currentDevicesCount; i++) + { + if (currentDevices[i].dpcdRevisionMajor <= 1 && currentDevices[i].dpcdRevisionMinor < 2) + continue; + + if (currentDevices[i].peerGuid == guid) + return ¤tDevices[i]; + } + + return 0; +} + +void DiscoveryManager::addDevice(const DiscoveryManager::Device & device) +{ + Address::StringBuffer sb; + DP_USED(sb); + + GUID guid = device.peerGuid; + if (guid.isGuidZero() && + (device.peerDevice != Dongle) && + (device.dpcdRevisionMajor >= 1 && device.dpcdRevisionMinor >= 2)) + { + DP_ASSERT(0 && "GUID missing for the device"); + } + DP_ASSERT(!findDevice(device.address) && "Redundant add"); + sink->discoveryNewDevice(device); + + DP_LOG(("DP-DM> New device '%s' %s %s %s", device.address.toString(sb), + device.branch ? "Branch" : "", device.legacy ? "Legacy" : "", + device.peerDevice == Dongle ? "Dongle" : + device.peerDevice == DownstreamSink ? "DownstreamSink" : "")); + + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + device.address.toNvU32Buffer(addrBuffer); + NV_DPTRACE_INFO(NEW_MST_DEVICE, device.address.size(), addrBuffer[0], addrBuffer[1], + addrBuffer[2], addrBuffer[3], device.branch, device.legacy, device.peerDevice); + + if (currentDevicesCount < maximumTopologyNodes) + { + currentDevices[currentDevicesCount++] = device; + } +} + +void DiscoveryManager::removeDevice(Device * device) +{ + Address::StringBuffer sb; + DP_USED(sb); + + DP_LOG(("DP-DM> Lost device '%s' %s %s %s", device->address.toString(sb), + device->branch ? "Branch" : "", device->legacy ? "Legacy" : "", + device->peerDevice == Dongle ? "Dongle" : + device->peerDevice == DownstreamSink ? "DownstreamSink" : "")); + + sink->discoveryLostDevice(device->address); + + for (unsigned i = (unsigned)(device-¤tDevices[0]); i < currentDevicesCount - 1; i++) + currentDevices[i] = currentDevices[i+1]; + currentDevicesCount--; +} + +void DiscoveryManager::removeDeviceTree(const Address & prefix) +{ + for (unsigned i = 0; i < currentDevicesCount;) + if (currentDevices[i].address.under(prefix)) + removeDevice(¤tDevices[i]); + else + i++; +} + +DiscoveryManager::Device * DiscoveryManager::findChildDeviceForBranchWithGuid +( + GUID guid, + unsigned port, + Address & childAddr +) +{ + // Find it in relevant parent's device list + DiscoveryManager::Device * parentDevice = findDevice(guid); + if (!parentDevice) + { + DP_LOG(("DM> No Parent present for the device in DB.")); + return 0; + } + + childAddr = parentDevice->address; + childAddr.append(port); + return (findDevice(childAddr)); +} + +void DiscoveryManager::SinkDetection::detectCompleted(bool passed) +{ + // we could not read or write the guid + if (!passed) + { + // + // DP1.2 monitors that do not support GUID get filtered and dropped as 'not present'. + // Instead we demote such monitors to DP1.1 and continue sink detection so that end + // user at least gets active display scanout on such monitors (albeit reduced to DP1.1). + // + if (device.dpcdRevisionMajor > 1 || device.dpcdRevisionMinor >= 2) + { + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> sink at '%s' failed GUID identification, demote to 1.1 sink.", + address.toString(sb))); + device.dpcdRevisionMajor = 1; + device.dpcdRevisionMinor = 1; + } + else + { + // Had it previously been reported as present? + if (Device * device = parent->findDevice(address)) + parent->removeDevice(device); + + delete this; + return; + } + } + + // at this point we are sure that we have a device GUID. + // We need to check whether the device is new to the DB. + // Had we previously reported the device? + + Device * oldDevice = parent->findDevice(device.address); + + if (!oldDevice) + { + // completely new device + parent->addDevice(device); + } + // If it was a branch and now isn't.. delete the tree of devices under it + else if (oldDevice && oldDevice->branch && !device.branch) + { + parent->removeDeviceTree(device.address); + } + // It changed, delete the previously reported + else if (oldDevice && (oldDevice->legacy != device.legacy || + oldDevice->dpcdRevisionMajor!= device.dpcdRevisionMajor || + oldDevice->dpcdRevisionMinor!= device.dpcdRevisionMinor || + oldDevice->peerDevice != device.peerDevice|| + oldDevice->peerGuid != device.peerGuid || + oldDevice->SDPStreams != device.SDPStreams|| + oldDevice->SDPStreamSinks != device.SDPStreamSinks || + oldDevice->videoSink != device.videoSink)) + { + parent->removeDevice(oldDevice); + } + + // otherwise.. it already existed, and still does + + // We're done + completed = true; + delete this; +} + +void DiscoveryManager::BranchDetection::detectCompleted(bool present) +{ + // + // Handle device not present + // + if (!present) + { + // Had it previously been reported as present? + if (Device * device = parent->findDevice(address)) + parent->removeDevice(device); + + delete this; + return; + } + + // + // We've got a linkAddressMessage and we were able to program the GUID! + // Report the branch and queue any children that were enumerated for detection + // + parent->addDevice(parentDevice); + + unsigned portsToDelete = (1 << (Address::maxPortCount+1)) - 1; // 16 ports + for (unsigned i = 0; i < childCount; i++) + { + Device newDevice; + newDevice.address = address; + newDevice.address.append(child[i].portNumber); + + // + // Input port? Nothing plugged in? Delete the tree of all devices under this one + // DP 1.2 Spec : 2.11.9.5.x + // + if (child[i].isInputPort || !child[i].dpPlugged) { + continue; + } + + portsToDelete &= ~(1 << child[i].portNumber); + + newDevice.peerDevice = child[i].peerDeviceType; + newDevice.legacy = child[i].legacyPlugged && (newDevice.peerDevice == Dongle); + newDevice.dpcdRevisionMajor = child[i].dpcdRevisionMajor; + newDevice.dpcdRevisionMinor = child[i].dpcdRevisionMinor; + // if internal device; use parent's GUID which we ourselves generated or got from the LAM. + if (child[i].portNumber > PHYSICAL_PORT_END) + newDevice.peerGuid = parentDevice.peerGuid; + else + newDevice.peerGuid = child[i].peerGUID; + + newDevice.SDPStreams = child[i].SDPStreams; + newDevice.SDPStreamSinks = child[i].SDPStreamSinks; + + if (child[i].peerDeviceType == DownstreamBranch && + child[i].hasMessaging) + { + newDevice.branch = true; + newDevice.videoSink = false; + } + else + { + newDevice.branch = false; + newDevice.videoSink = ((child[i].peerDeviceType == Dongle) ? + child[i].legacyPlugged : true); + } + + // + // Had we previously reported the device? + // + Device * oldDevice = parent->findDevice(newDevice.address); + + // If it was a branch and now isn't.. delete the tree of devices under it + if (oldDevice && oldDevice->branch && !newDevice.branch) + { + parent->removeDeviceTree(newDevice.address); + } + // It changed, delete + else if (oldDevice && (oldDevice->legacy != newDevice.legacy || + oldDevice->dpcdRevisionMajor!= newDevice.dpcdRevisionMajor || + oldDevice->dpcdRevisionMinor!= newDevice.dpcdRevisionMinor || + oldDevice->peerDevice != newDevice.peerDevice|| + oldDevice->peerGuid != newDevice.peerGuid || + oldDevice->SDPStreams != newDevice.SDPStreams|| + oldDevice->SDPStreamSinks != newDevice.SDPStreamSinks || + oldDevice->videoSink != newDevice.videoSink)) + { + parent->removeDevice(oldDevice); + } + + // otherwise.. it already existed, and still does + if (newDevice.branch) + { + parent->detectBranch(newDevice); + } + else + { + // the new device is a sink. It may or may not have a guid. + // write the guid if needed. + parent->detectSink(newDevice, false); + } + } + + for (unsigned i = 0; i <= Address::maxPortCount; i++) + if ((portsToDelete >> i) & 1) + { + Address a = address; + a.append(i); + parent->removeDeviceTree(a); + } + + // We're done + completed = true; + delete this; +} + +void DiscoveryManager::BranchDetection::expired(const void * tag) +{ + if (retryLinkAddressMessage) + { + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Requeing LINK_ADDRESS_MESSAGE to %s", address.toString(sb))); + + retryLinkAddressMessage = false; + linkAddressMessage.set(address); + parent->messageManager->post(&linkAddressMessage, this); + } + else if (retryRemoteDpcdWriteMessage) + { + Address parentAddress = address; + parentAddress.pop(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Requeing REMOTE_DPCD_WRITE_MESSAGE to %s", parentAddress.toString(sb))); + + retryRemoteDpcdWriteMessage = false; + remoteDpcdWriteMessage.set(parentAddress, parentAddress.tail(), NV_DPCD_GUID, sizeof(GUID), (NvU8 *)&parentDevice.peerGuid); + DP_LOG(("DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_WRITE {%p}", address.toString(sb), &remoteDpcdWriteMessage)); + + parent->messageManager->post(&remoteDpcdWriteMessage, this); + } +} + +void DiscoveryManager::SinkDetection::expired(const void * tag) +{ + if (retryLinkAddressMessage) + { + Address parentAddress = address; + parentAddress.pop(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Requeueing LAM message to %s", parentAddress.toString(sb))); + + retryLinkAddressMessage = false; + linkAddressMessage.set(parentAddress); + + parent->messageManager->post(&linkAddressMessage, this); + } + else if (retryRemoteDpcdReadMessage) + { + Address parentAddress = address; + parentAddress.pop(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Requeueing REMOTE_DPCD_READ_MESSAGE to %s", parentAddress.toString(sb))); + + retryRemoteDpcdReadMessage = false; + remoteDpcdReadMessage.set(parentAddress, parentAddress.tail(), NV_DPCD_GUID, sizeof(GUID)); + DP_LOG(("DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_READ {%p}", address.toString(sb), &remoteDpcdReadMessage)); + + parent->messageManager->post(&remoteDpcdReadMessage, this); + } + else if (retryRemoteDpcdWriteMessage) + { + Address parentAddress = address; + parentAddress.pop(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Requeueing REMOTE_DPCD_WRITE_MESSAGE to %s", parentAddress.toString(sb))); + + retryRemoteDpcdWriteMessage = false; + remoteDpcdWriteMessage.set(parentAddress, + parentAddress.tail(), + NV_DPCD_GUID, sizeof(GUID), + (NvU8 *)&device.peerGuid); + DP_LOG(("DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_WRITE {%p}", address.toString(sb), &remoteDpcdWriteMessage)); + + parent->messageManager->post(&remoteDpcdWriteMessage, this); + } +} + +void DiscoveryManager::BranchDetection::messageFailed(MessageManager::Message * from, NakData * nakData) +{ + // + // If any of our messages fail, we've completed detection on this buzzard. + // The only exception is if we get a DEFER - then we retry indefinitely + // + if (from == &linkAddressMessage) + { + if (retriesLinkAddressMessage < DPCD_LINK_ADDRESS_MESSAGE_RETRIES && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesLinkAddressMessage++; + retryLinkAddressMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN); + return; + } + } + + if (from == &remoteDpcdWriteMessage) + { + if ((retriesRemoteDpcdWriteMessage < DPCD_REMOTE_DPCD_WRITE_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteDpcdWriteMessage++; + retryRemoteDpcdWriteMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_REMOTE_DPCD_WRITE_MESSAGE_COOLDOWN); + return; + } + } + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Message %s {%p} at '%s' failed. Device marked not present.", + from == &linkAddressMessage ? "LINK_ADDRESS_MESSAGE" : + from == &remoteDpcdWriteMessage ? "REMOTE_DPCD_WRITE(GUID)" : "???", + from, address.toString(sb))); + + + // + // Detection is done and branch doesn't exist. + // (Note this automatically removes self from any list we're in) + // + detectCompleted(false); +} + +void DiscoveryManager::SinkDetection::messageFailed(MessageManager::Message * from, NakData * nakData) +{ + if (from == &remoteDpcdReadMessage) + { + if ((retriesRemoteDpcdReadMessage < DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteDpcdReadMessage++; + retryRemoteDpcdReadMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN); + return; + } + } + + if (from == &remoteDpcdWriteMessage) + { + if ((retriesRemoteDpcdWriteMessage < DPCD_REMOTE_DPCD_WRITE_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteDpcdWriteMessage++; + retryRemoteDpcdWriteMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_REMOTE_DPCD_WRITE_MESSAGE_COOLDOWN); + return; + } + } + + if (from == &linkAddressMessage) + { + if ((retriesLinkAddressMessage < DPCD_LINK_ADDRESS_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesLinkAddressMessage++; + retryLinkAddressMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN); + return; + } + } + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Message %s {%p} at '%s' failed.", + from == &remoteDpcdWriteMessage ? "REMOTE_DPCD_WRITE(GUID)" : + from == &remoteDpcdReadMessage ? "REMOTE_DPCD_READ(GUID)" : + from == &linkAddressMessage ? "LINK_ADDRESS_MESSAGE" : "???", + from, address.toString(sb))); + + detectCompleted(false); +} + +void DiscoveryManager::SinkDetection::handleLinkAddressDownReply() +{ + Address::StringBuffer sb; + DP_USED(sb); + LinkAddressMessage::Result child; + child = *linkAddressMessage.result(address.tail()); + + device.peerDevice = child.peerDeviceType; + device.dpcdRevisionMajor = child.dpcdRevisionMajor; + device.dpcdRevisionMinor = child.dpcdRevisionMinor; + + if (device.dpcdRevisionMajor == 0) + { + device.dpcdRevisionMajor = 1; + device.dpcdRevisionMinor = 1; + } + device.portMap.inputMap |= (1 << child.portNumber); + + DP_LOG(("DP-DM> handleLinkAddressDownReply for sink device on '%s': DPCD Rev = %d.%d", + address.toString(sb), device.dpcdRevisionMajor, device.dpcdRevisionMinor)); + + // Check if the device already has a GUID + // or it is a dongle or on a logical port ; in which case no GUID is required. + if ((!device.peerGuid.isGuidZero()) || + (device.peerDevice == Dongle) || + (device.dpcdRevisionMajor <= 1 && device.dpcdRevisionMinor < 2) || + (device.address.tail() > PHYSICAL_PORT_END)) + { + parent->addDevice(device); + delete this; + return; + } + + Address parentAddress = address.parent(); + remoteDpcdReadMessage.set(parentAddress, address.tail(), NV_DPCD_GUID, sizeof(GUID)); + + parent->messageManager->post(&remoteDpcdReadMessage, this); + +} + +void DiscoveryManager::SinkDetection::handleRemoteDpcdReadDownReply() +{ + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> REMOTE_DPCD_READ {%p} at '%s' completed", + (MessageManager::Message *)&remoteDpcdReadMessage, + address.toString(sb))); + if (remoteDpcdReadMessage.replyNumOfBytesReadDPCD() != sizeof(GUID)) + { + DP_ASSERT(0 && "Incomplete GUID in remote DPCD read message"); + detectCompleted(false); + return; + } + + DP_ASSERT(remoteDpcdReadMessage.replyPortNumber() == address.tail()); + device.peerGuid.copyFrom(remoteDpcdReadMessage.replyGetData()); + + if (!device.peerGuid.isGuidZero()) + { + // we got the GUID ... handle device add/remove + detectCompleted(true); + } + else + { + // + // We need to give ourselves a non-zero GUID! + // + parent->guidBuilder.makeGuid(device.peerGuid); + + Address parentAddress = address.parent(); + remoteDpcdWriteMessage.set(parentAddress, + address.tail(), + NV_DPCD_GUID, sizeof(GUID), + (NvU8 *)&device.peerGuid); + + DP_LOG(("DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_WRITE {%p}", + address.toString(sb), &remoteDpcdWriteMessage)); + + parent->messageManager->post(&remoteDpcdWriteMessage, this); + } +} + +void DiscoveryManager::BranchDetection::handleLinkAddressDownReply() +{ + Address::StringBuffer sb; + DP_USED(sb); + + // + // Copy link address results out of the structure + // - We cannot process the contents until after + // we've programmed the GUID. The reasoning is + // that we need to make sure we do not enumerate + // devices not yet in a usable state. + // + childCount = linkAddressMessage.resultCount(); + for (unsigned i = 0; i < childCount; i++) + { + child[i] = *linkAddressMessage.result(i); + + // also update the portmap + parentDevice.portMap.internalMap = 0xFF00; // ports 0x8 to 0xF are internal + parentDevice.portMap.validMap |= (1 << child[i].portNumber); + if (child[i].isInputPort) + { + parentDevice.peerDevice = child[i].peerDeviceType; + parentDevice.portMap.inputMap |= (1 << child[i].portNumber); + if (address == Address(0)) + { + // + // For immediate branch device, we will have already read DPCD version + // in notifyHPD. So we can just use that to populate here. + // For the remaining devices, LAM to parent branch will report the child + // DPCD version in reply and we are populating it in + // BranchDetection::detectCompleted. + // + parentDevice.dpcdRevisionMajor = parent->hal->getRevisionMajor(); + parentDevice.dpcdRevisionMinor = parent->hal->getRevisionMinor(); + } + } + } + + linkAddressMessage.getGUID(parentDevice.peerGuid); + if (parentDevice.peerGuid.isGuidZero()) + { + // + // We need to give ourselves a non-zero GUID! + // + parent->guidBuilder.makeGuid(parentDevice.peerGuid); + + if (address == Address(0)) + { + DP_LOG(("DP-DM> Setting GUID (locally) for '%s'", address.toString(sb))); + // + // We're locally connected, use the DPCD HAL to write the new GUID + // + if (AuxRetry::ack != parent->hal->setGUID(parentDevice.peerGuid)) + { + detectCompleted(false); + return; + } + + detectCompleted(true); + } + else + { + // + // Let's build a remote DPCD request. Remember the target is the *parent* + // of the device we want to talk to + // + Address parentAddress = address; + parentAddress.pop(); + remoteDpcdWriteMessage.set(parentAddress, address.tail(), + NV_DPCD_GUID, sizeof(GUID), + (NvU8 *)&parentDevice.peerGuid); + + DP_LOG(("DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_WRITE {%p}", + address.toString(sb), &remoteDpcdWriteMessage)); + + parent->messageManager->post(&remoteDpcdWriteMessage, this); + } + } + else + { + // + // Already had a GUID + // + detectCompleted(true); + } + +} + +void DiscoveryManager::BranchDetection::messageCompleted(MessageManager::Message * from) +{ + if (from == &linkAddressMessage) + handleLinkAddressDownReply(); + else if (from == &remoteDpcdWriteMessage) + detectCompleted(true); +} + +void DiscoveryManager::SinkDetection::messageCompleted(MessageManager::Message * from) +{ + if (from == &remoteDpcdReadMessage) + handleRemoteDpcdReadDownReply(); + else if (from == &linkAddressMessage) + handleLinkAddressDownReply(); + else if (from == &remoteDpcdWriteMessage) + detectCompleted(true); +} + +void DiscoveryManager::BranchDetection::start() +{ + // + // 1. Create a LINK_ADDRESS_MESSAGE to send to this target so that we can find who he is + // 2. Create a REMOTE_DPCD_WRITE to set the GUID for this target + // *alternatively* we may have to use the local DPCD HAL to write this + // 3. Enumerate any children that we may wish to queue detect on. + // + linkAddressMessage.set(address); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Detecting '%s' (sending LINK_ADDRESS_MESSAGE {%p})", + address.toString(sb), + (MessageManager::Message *)&linkAddressMessage)); + + parent->messageManager->post(&linkAddressMessage, this); +} + +void DiscoveryManager::SinkDetection::start() +{ + // + // Per DP1.4 requirement: + // Send PowerUpPhy message first, to make sure device is ready to work + // + NakData nakData; + powerUpPhyMessage.set(address.parent(), address.tail(), NV_TRUE); + parent->messageManager->send(&powerUpPhyMessage, nakData); + + Address::StringBuffer sb; + DP_USED(sb); + + // The sink is found in CSN, missing dpcd revision + if (bFromCSN) + { + parent->outstandingSinkDetections.insertBack(this); + // Create a LINK_ADDRESS_MESSAGE to send to parent of this target + linkAddressMessage.set(address.parent()); + + DP_LOG(("DP-DM> Detecting '%s' (sending LINK_ADDRESS_MESSAGE {%p})", + address.toString(sb), + (MessageManager::Message *)&linkAddressMessage)); + parent->messageManager->post(&linkAddressMessage, this); + } + else // The sink is found in LAM sent for branch, and with DPCD rev. + { + // Check if the device already has a GUID + // or it is a dongle or on a logical port ; in which case no GUID is required. + if ((!device.peerGuid.isGuidZero()) || + (device.peerDevice == Dongle) || + (device.dpcdRevisionMajor <= 1 && device.dpcdRevisionMinor < 2) || + (device.address.tail() > PHYSICAL_PORT_END)) + { + parent->addDevice(device); + delete this; + return; + } + + parent->outstandingSinkDetections.insertBack(this); + Address parentAddress = address.parent(); + remoteDpcdReadMessage.set(parentAddress, address.tail(), NV_DPCD_GUID, sizeof(GUID)); + + parent->messageManager->post(&remoteDpcdReadMessage, this); + } + +} + +DiscoveryManager::BranchDetection::~BranchDetection() +{ + List::remove(this); + + if (parent->outstandingSinkDetections.isEmpty() && + parent->outstandingBranchDetections.isEmpty()) + parent->sink->discoveryDetectComplete(); + + parent->timer->cancelCallbacks(this); +} + +DiscoveryManager::SinkDetection::~SinkDetection() +{ + List::remove(this); + + if (parent->outstandingSinkDetections.isEmpty() && + parent->outstandingBranchDetections.isEmpty()) + parent->sink->discoveryDetectComplete(); + + parent->timer->cancelCallbacks(this); +} + +void DiscoveryManager::ReceiverSink::messageProcessed(MessageManager::MessageReceiver * from) +{ + DP_ASSERT((from->getRequestId() == 0x2) && "This receiver is only meant for CSNs"); + + // CSNs are broadcast messages. So replies will always go to immediate downstream branch + CsnUpReplyContainer * csnReplyContainer = new CsnUpReplyContainer(parent); + parent->pendingCsnUpReplies.insertBack(csnReplyContainer); + + //Send acknowledgement to the CSN sender. + csnReplyContainer->postUpReply(); + + ConnStatusNotifyMessage* csnMessage = static_cast(from); + + if (csnMessage->getUpRequestData()->isInputPort) + { + DP_LOG(("Concentrator?? Got CSN for an upstream port!")); + return; + } + + Address childAddr; + DiscoveryManager::Device * oldDevice = parent->findChildDeviceForBranchWithGuid(csnMessage->getUpRequestData()->guid, + csnMessage->getUpRequestData()->port, childAddr); + if (!csnMessage->getUpRequestData()->devicePlugged) // some device was unplugged or powered off + { + if (oldDevice) + parent->removeDeviceTree(childAddr); + return; + } + + handleCSN(from); +} + +void DiscoveryManager::ReceiverSink::handleCSN(MessageManager::MessageReceiver * from) +{ + ConnStatusNotifyMessage* csnMessage = static_cast(from); + + // There is no point in serving an upRequest when no device is present. + if (parent->currentDevicesCount == 0) + { + DP_ASSERT(0 && "DM> No Device in the Topology"); + return; + } + + // + // Check for non-zero GUID in CSN message. It is mandatory to find respective parent + // Branch should not send CSN with Zero GUID as a unique GUID is set before CSN + // + if ((csnMessage->getUpRequestData()->guid).isGuidZero()) + { + DP_ASSERT(0 && "Ignoring CSN. Invalid parent device due to zero-GUID."); + return; + } + + Address childAddr; + unsigned port = csnMessage->getUpRequestData()->port; + DiscoveryManager::Device * oldDevice = + parent->findChildDeviceForBranchWithGuid(csnMessage->getUpRequestData()->guid, + port, + childAddr); + + // Check if we already have a device + if (oldDevice) + { + oldDevice->dirty = true; + + // Set the videoSink status of oldDevice again as old device might be a legacy dongle + // and a video sink is now added with it + oldDevice->videoSink = ((csnMessage->getUpRequestData()->peerDeviceType == Dongle) ? + csnMessage->getUpRequestData()->legacyPlugged : true); + + parent->sink->discoveryNewDevice(*oldDevice); + return; + } + + // Exit if no valid address matched for further detection. + if ((childAddr.size() == 0) || + (childAddr.size() > Address::maxHops)) + { + DP_ASSERT(0 && "Ignoring CSN. Invalid parent device due to GUID not found in discovered topology"); + return; + } + + DiscoveryManager::Device newDevice; + newDevice.address = childAddr; + newDevice.branch = (csnMessage->getUpRequestData()->messagingCapability == true) && + (csnMessage->getUpRequestData()->peerDeviceType == DownstreamBranch); + + newDevice.peerDevice = csnMessage->getUpRequestData()->peerDeviceType; + newDevice.legacy = csnMessage->getUpRequestData()->legacyPlugged == true; + newDevice.SDPStreams = newDevice.SDPStreamSinks = 0; + + if (csnMessage->getUpRequestData()->devicePlugged) // Check for a new device only if it's plugged + { + if (newDevice.branch) + { + newDevice.videoSink = false; + // send a LAM and the whole nine yards + DP_ASSERT(newDevice.legacy == false); + parent->detectBranch(newDevice); + return; + } + else + { + newDevice.SDPStreams = newDevice.SDPStreamSinks = 1; + newDevice.videoSink = ((csnMessage->getUpRequestData()->peerDeviceType == Dongle) ? + csnMessage->getUpRequestData()->legacyPlugged : true); + + parent->detectSink(newDevice, true); + return; + } + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_edid.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_edid.cpp new file mode 100644 index 0000000..6a21db0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_edid.cpp @@ -0,0 +1,625 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_edid.c * +* Implementation of SST/MST EDID reader * +* * +\***************************************************************************/ + +#include "dp_buffer.h" +#include "dp_internal.h" +#include "dp_edid.h" + +using namespace DisplayPort; + +EdidAssembler::EdidAssembler(Edid * const edid, bool bPatchCrc): + edid(edid), stream(edid->getBuffer()), oldBlockChecksum(0x00), + blocksRead(0), totalBlockCnt(0), retriesCount(0), + bPatchCrc(bPatchCrc) {} + + +bool EdidAssembler::readIsComplete() +{ + return (blocksRead > 0 && blocksRead == totalBlockCnt); +} + +void EdidAssembler::reset() +{ + oldBlockChecksum = 0x00; + blocksRead = 0; + totalBlockCnt = 0; + retriesCount = 0; + stream.seek(0); +} + +void EdidAssembler::postReply(const Buffer & buffer, unsigned sizeCompleted, bool success) +{ + if (!success || buffer.isError()) + { + retriesCount++; + return; + } + + // + // For SST: + // Check the Checksum Error Per Block reading, mark the EDID as "patched" if + // CRC is wrong. DPLib will return fallback EDID. + // + blocksRead++; + stream.write(buffer.data, sizeCompleted); + if (getEDIDBlockChecksum(buffer)) + { + if (bPatchCrc) + edid->patchCrc(); + edid->setPatchedChecksum(true); + } + return; +} + +void EdidAssembler::postReply(unsigned char * data, unsigned sizeCompleted, bool success) +{ + // + // For MST: When read of edid block failed, library will attempt to read + // same block again, but not more than EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT times + // + if (!success) + { + retriesCount++; + return; + } + + // + // Check the Checksum Error Per Block reading, + // library will attempt to read same block again, + // but not more than EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT times. + // + Buffer buffer(data, EDID_BLOCK_SIZE); + if (buffer.isError()) + { + retriesCount++; + return; + } + + NvU8 newBlockChecksum = getEDIDBlockChecksum(buffer); + if (newBlockChecksum) + { + if (this->oldBlockChecksum != newBlockChecksum) //First failure? + { + this->oldBlockChecksum = newBlockChecksum; + retriesCount++; + return; + } + } + + this->oldBlockChecksum = 0; + retriesCount = 0; + blocksRead++; + stream.write(data, sizeCompleted); +} + +bool EdidAssembler::readNextRequest(NvU8 & seg, NvU8 & offset) +{ + // + // cache totalBlockCnt, + // In EDID 1.3 HF-EEODB, it might changes after 1 extension block read. + // + if ((blocksRead == 1) || (blocksRead == 2)) + totalBlockCnt = edid->getBlockCount(); + + // + // will return false in two scenarios + // 1. EDID read is complete, all extension blocks were read + // 2. First EDID block was corrupted, then totalBlockCnt = 0 + // + if (blocksRead >= totalBlockCnt) + return false; + + // Retry count exceeded for particular block? + if (retriesCount > EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT) + return false; + + seg = NvU8(blocksRead >> 1); + offset = NvU8((blocksRead & 0x1) * EDID_BLOCK_SIZE); + return true; +} + + +enum +{ + EDID_V1_IDX_EXTENSION = 0x7E, + EDID_V1_IDX_HEADER0 = 0x00, + EDID_V1_HEADER0 = 0x00, + + EDID_V1_IDX_HEADER1 = 0x01, + EDID_V1_HEADER1 = 0xFF, + + EDID_V1_IDX_VERSION = 0x12, + EDID_V1_VERSION_1 = 0x01, + EDID_V2_IDX_VERREV = 0x00, + + // + // from od_edid.h RM to identify VER 2, use 7:4 bits. + // #define EDID_V2_VERREV_VERSION 7:4 /* RW--F */ + // #define EDID_V2_VERREV_VERSION_2 0x02 /* RWI-V */ + // + // Avoiding FLD_* macros, thus shift VER2 value 4 bits to left + // + EDID_V2_VERREV_VERSION_2 = 0x02 << 4, + EDID_FLAGS_CHKSUM_ATTEMPTS_DP = 0x5, +}; + +enum +{ + // EDID CTA-EXT (CTA 861 Extension) block defines + EDID_CTA_EXT_HEADER_OFFSET = 0x00, + EDID_CTA_EXT_HEADER = 0x02, + EDID_CTA_EXT_VERSION_OFFSET = 0x01, + EDID_CTA_EXT_VERSION_3 = 0x03, + EDID_CTA_EXT_DATA_BLOCK_HEADER_OFFSET = 0x04, + EDID_CTA_EXT_DATA_BLOCK_HEADER_HF_EEODB = 0xE2, + EDID_CTA_EXT_DATA_BLOCK_TAG_OFFSET = 0x05, + EDID_CTA_EXT_DATA_BLOCK_TAG_HF_EEODB = 0x78, + EDID_CTA_EXT_DATA_BLOCK_EXT_COUNT_OFFSET = 0x06, +}; + +Edid::Edid(): buffer() +{ + // fill EDID buffer with zeroes + this->buffer.memZero(); + checkSumValid = false; + forcedCheckSum = false; + fallbackEdid = false; + patchedChecksum = false; + + // clear the WARFlags + _WARFlags temp = {0}; + WARFlags = temp; +} + +Edid::~Edid() +{ +} + +bool Edid::verifyCRC() +{ + if (getEdidSize() > 0) + { + this->validateCheckSum(); + return this->checkSumValid; + } + else + return false; +} + +// this routine patches the edid crc after it has been overridden for WARs. +void Edid::patchCrc() +{ + // we always override some bytes within the first 128 + // recalculate and fix the checksum for the first page only. + unsigned chksum = 0; + for (unsigned i = 0; i < 128; i++) + { + chksum += buffer.data[i]; + } + chksum = chksum & 0xFF; + + if (chksum) + buffer.data[127] = 0xFF & (buffer.data[127] + (0x100 - chksum)); +} + +bool Edid::isChecksumValid() const +{ + // return checksum valid if it is. + // else return checksum is valid if checksum wasn't valid but we will assume it to be. + return (checkSumValid || forcedCheckSum); +} + +bool Edid::isFallbackEdid() const +{ + return fallbackEdid; +} + +NvU8 Edid::getFirstPageChecksum() +{ + DP_ASSERT(buffer.getLength() >= 128); + if (buffer.getLength() < 128) + return 0; + else + return buffer.data[127]; +} + +NvU8 Edid::getLastPageChecksum() +{ + NvU32 bufferSize = buffer.getLength(); + NvU32 checksumLocation = this->getBlockCount() * 128 - 1; + + if (bufferSize == 0 || bufferSize < (this->getBlockCount() * 128)) + { + DP_LOG(("DP-EDID> Edid length is 0 or less than required")); + return 0; + } + + if (bufferSize % 128 != 0) + { + DP_LOG(("DP-EDID> Edid length is not a multiple of 128")); + return 0; + } + + return buffer.data[checksumLocation]; + +} + +void Edid::validateCheckSum() +{ + // Each page has its own checksum + checkSumValid = false; + for (unsigned chunk = 0; chunk < this->buffer.length; chunk += 128) + { + unsigned chksum = 0; + for (unsigned i = 0; i < 128; i++) + { + chksum += buffer.data[i+chunk]; + } + + if ((chksum & 0xFF) != 0) + return; + } + checkSumValid = true; +} + +unsigned Edid::getEdidVersion() +{ + if (buffer.isError() || buffer.length < EDID_BLOCK_SIZE) + { + return 0; + } + + // 0 version is "unknown" + unsigned version = 0; + + // Check for Version 1 EDID + if (this->buffer.data[EDID_V1_IDX_VERSION] == EDID_V1_VERSION_1) + { + version = 1; + } + // Check for version 2 EDID + else if (this->buffer.data[EDID_V2_IDX_VERREV] & EDID_V2_VERREV_VERSION_2) + { + // + // Version 2 has 256 bytes by default. + // There is a note about an extra 256 byte block if byte 0x7E + // bit 7 is set but there's no definition for it listed in + // the EDID Version 3 (971113). So, let's just skip it for now. + // + version = 2; + } + else + { + DP_ASSERT(version && "Unknown EDID version"); + } + + return version; +} + +const char * Edid::getName() const +{ + static char decodedName[16] = {0}; + int tail = 0; + if (buffer.length < 128) + return "?"; + + for (int i = 0; i < 4; i++) + if (buffer.data[0x39 + i * 18 + 0] == 0xFC) + { + for (int j = 0; j < 13; j++) + decodedName[tail++] = buffer.data[0x39 + i*18 + 2 + j]; + break; + } + decodedName[tail++] = 0; + return decodedName; +} + +unsigned Edid::getBlockCount() +{ + if (buffer.isError() || buffer.length < EDID_BLOCK_SIZE) + { + return 0; + } + + unsigned version = getEdidVersion(); + + if (version == 1) + { + NvU32 blockCount = (unsigned) this->buffer.data[EDID_V1_IDX_EXTENSION]+1; + + if (blockCount > EDID_MAX_BLOCK_COUNT) + { + DP_LOG(("DPEDID> %s: DDC read returned questionable results: " + "Total block Count too high: %d", + __FUNCTION__, blockCount)); + return 1; + } + // + // Check for the HF-EEODB defined in HDMI 2.1 specification. + // 1. It is EDID version 1.3 and the extension block count is 1 (total block count = 2) + // 2. The 1st EDID extension block is already read. (buffer.length > block size) + // 3. The 1st EDID extension block is CTA extension block. + // 4. It has HF-EEODB (1st extension block: byte4 == 0xE2 and byte5 == 0x78) + // + if ((blockCount == 2) && (buffer.length >= EDID_BLOCK_SIZE * 2)) + { + NvU8 *pExt = &(this->buffer.data[EDID_BLOCK_SIZE]); + + // + // If it's a CTA-EXT block version 3 and has HF-EEODB + // defined, update the total block count. + // + if ((pExt[EDID_CTA_EXT_HEADER_OFFSET] == EDID_CTA_EXT_HEADER) && + (pExt[EDID_CTA_EXT_VERSION_OFFSET] == EDID_CTA_EXT_VERSION_3) && + (pExt[EDID_CTA_EXT_DATA_BLOCK_HEADER_OFFSET] == EDID_CTA_EXT_DATA_BLOCK_HEADER_HF_EEODB) && + (pExt[EDID_CTA_EXT_DATA_BLOCK_TAG_OFFSET] == EDID_CTA_EXT_DATA_BLOCK_TAG_HF_EEODB)) + { + blockCount = pExt[EDID_CTA_EXT_DATA_BLOCK_EXT_COUNT_OFFSET] + 1; + } + + } + return blockCount; + } + else if (version == 2) + { + // + // Version 2 has 256 bytes by default. + // There is a note about an extra 256 byte block + // if byte 0x7E bit 7 is set, but there's no + // definition for it listed in the + // EDID Version 3 (971113) So, let's just skip + // it for now. + // + return 2; + } + else + { + // Unknown EDID version. Skip it. + DP_LOG(("DPEDID> %s: Unknown EDID Version!",__FUNCTION__)); + DP_ASSERT(0 && "Unknown EDID version!"); + return 1; + } +} + +unsigned Edid::getEdidSize() const +{ + return this->buffer.length; +} + +void DisplayPort::Edid::swap(Edid & right) +{ + swapBuffers(buffer, right.buffer); + validateCheckSum(); +} + +const NvU8 fallbackEdidModes[5][EDID_BLOCK_SIZE] = { + // ID Manufacturer Name: NVD + // VIDEO INPUT DEFINITION: + // Digital Signal + // VESA DFP 1.x Compatible + + // + // The first 4 entries are for NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS (DPCD 0x20) + // 1024x768x60Hz: defined in bit 0. + // 1280x720x60Hz: defined in bit 1. + // 1920x1080x60Hz: defined in bit 2. [Mandatory] + // + { + // Bit 2: 1920x1080x60 only + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0xA5, 0x00, 0x00, 0x64, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A, + 0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C, + 0x43, 0x00, 0xC0, 0x1C, 0x32, 0x00, 0x00, 0x1C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB + }, + { + // bit 2 + bit 0: 1920x1080x60 + 1024x768x60 + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0xA5, 0x00, 0x00, 0x64, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x00, 0x08, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A, + 0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C, + 0x43, 0x00, 0xC0, 0x1C, 0x32, 0x00, 0x00, 0x1C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3 + }, + { + // bit 2 + bit 1: 1920x1080x60 + 1280x720x60 + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0xA5, 0x00, 0x00, 0x64, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x81, 0xC0, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A, + 0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C, + 0x43, 0x00, 0xC0, 0x1C, 0x32, 0x00, 0x00, 0x1C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9C + }, + { + // bit2 + bit 1 + bit 0: All 3 modes. + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0xA5, 0x00, 0x00, 0x64, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x00, 0x08, 0x00, 0x81, 0xC0, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A, + 0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C, + 0x43, 0x00, 0xC0, 0x1C, 0x32, 0x00, 0x00, 0x1C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94 + }, + { + // ESTABLISHED TIMING I: + // 640 X 480 @ 60Hz (IBM,VGA) + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0x95, 0x00, 0x00, 0x78, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x20, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92 + } +}; + +// +// Definition of DPCD 0x20: +// 1024x768x60Hz: defined in bit 0. +// 1280x720x60Hz: defined in bit 1. +// 1920x1080x60Hz: defined in bit 2. [Mandatory] +// MIN value is 4 (only 1920x1080 supported) +// MAX value is 7 (supports all 3 modes) +// +#define SINK_VIDEO_FALLBACK_FORMATS_MIN_VALUE (0x00000004) +#define SINK_VIDEO_FALLBACK_FORMATS_MAX_VALUE (0x00000007) + +void DisplayPort::makeEdidFallback(Edid & edid, NvU32 fallbackFormatSupported) +{ + const NvU8 *data; + + // fallbackFormatSupported valid values = 4~7 + if (fallbackFormatSupported > SINK_VIDEO_FALLBACK_FORMATS_MAX_VALUE || + fallbackFormatSupported < SINK_VIDEO_FALLBACK_FORMATS_MIN_VALUE) + { + // 4 is default fallback mode. (only 640x480) + data = fallbackEdidModes[4]; + } + else + { + data = fallbackEdidModes[fallbackFormatSupported-4]; + } + if (!edid.getBuffer()->resize(EDID_BLOCK_SIZE)) + return; + + dpMemCopy(edid.getBuffer()->getData(), (const NvU8*)data, EDID_BLOCK_SIZE); + DP_ASSERT(edid.verifyCRC()); + edid.setFallbackFlag(true); +} + +/* +Fake EDID for DP2VGA dongle when the EDID of the real monitor is not available + +Established Timings [20 CE 00] + 640 x 480 @ 60Hz + 800 x 600 @ 72Hz + 800 x 600 @ 75Hz + 1024 x 768 @ 60Hz + 1024 x 768 @ 70Hz + 1024 x 768 @ 75Hz + +Standard Timings + Timing [3159] : 640 x 480 @ 85Hz (4:3) + Timing [4559] : 800 x 600 @ 85Hz (4:3) + Timing [6159] : 1024 x 768 @ 85Hz (4:3) + Timing [714F] : 1152 x 864 @ 75Hz (4:3) + +Detailed Timing [DTD] 1280 x 1024 @ 60.02Hz + Pixel Clock : 108.00Mhz + HBlank, HBorder : 408, 0 + HSyncStart, HSyncWidth : 48, 112 + VBlank, VBorder : 42, 0 + VSyncStart, VSyncWidth : 1, 3 + Image size : 376mm x 301mm + DigitalSeparate +/+ +*/ + +void DisplayPort::makeEdidFallbackVGA(Edid & edid) +{ + const NvU8 data[] = { + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x13, 0x01, 0x03, 0x80, 0x26, 0x1E, 0x78, 0xEE, 0xCB, 0x05, 0xA3, 0x58, 0x4C, 0x9B, 0x25, + 0x13, 0x50, 0x54, 0x20, 0xCE, 0x00, 0x31, 0x59, 0x45, 0x59, 0x61, 0x59, 0x71, 0x4F, 0x81, 0x40, + 0x81, 0x80, 0x01, 0x01, 0x01, 0x01, 0x30, 0x2A, 0x00, 0x98, 0x51, 0x00, 0x2A, 0x40, 0x30, 0x70, + 0x13, 0x00, 0x78, 0x2D, 0x11, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x30, 0x55, 0x1F, + 0x52, 0x0E, 0x00, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x4C, + 0x43, 0x44, 0x5F, 0x56, 0x47, 0x41, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8 + }; + + if (!edid.getBuffer()->resize(sizeof(data))) + return; + + dpMemCopy(edid.getBuffer()->getData(), (const NvU8*)data, sizeof data); + DP_ASSERT(edid.verifyCRC()); + edid.setFallbackFlag(true); +} + +NvU8 DisplayPort::getEDIDBlockChecksum(const Buffer & buffer) +{ + DP_ASSERT(buffer.getLength() == 128); + + unsigned chksum = 0; + for (unsigned i = 0; i < buffer.getLength(); i++) + { + chksum += buffer.data[i]; + } + chksum = chksum & 0xFF; + return (NvU8)chksum; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_evoadapter.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_evoadapter.cpp new file mode 100644 index 0000000..7cbd4c1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_evoadapter.cpp @@ -0,0 +1,1846 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_evoadapter.cpp * +* Interface for low level access to the aux bus. * +* This is the synchronous version of the interface. * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_evoadapter.h" +#include "dp_auxdefs.h" +#include "dp_tracing.h" +#include "dp_vrr.h" +#include + +#include +#include +#include +#include +#include + +using namespace DisplayPort; + +// +// Evo hardcodes the relationship between stream and head # +// Head#x is always stream x+1 +// +#define STREAM_TO_HEAD_ID(s) ((s) - 1) +#define HEAD_TO_STREAM_ID(s) ((s) + 1) + +// +// Data Base used to store all the regkey values. +// The type is defined in dp_regkeydatabase.h. +// All entries set to 0 before initialized by the first EvoMainLink constructor. +// The first EvoMainLink constructor will populate that data base. +// Later EvoMainLink will use values from that data base. +// +static struct DP_REGKEY_DATABASE dpRegkeyDatabase = {0}; + +enum DP_REG_VAL_TYPE +{ + DP_REG_VAL_BOOL = 0, + DP_REG_VAL_U32 = 1, + DP_REG_VAL_U16 = 2, + DP_REG_VAL_U8 = 3 +}; + +const struct +{ + const char* pName; + void* pValue; + DP_REG_VAL_TYPE valueType; +} DP_REGKEY_TABLE [] = +{ + {NV_DP_REGKEY_ENABLE_AUDIO_BEYOND_48K, &dpRegkeyDatabase.bAudioBeyond48kEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_OVERRIDE_DPCD_REV, &dpRegkeyDatabase.dpcdRevOveride, DP_REG_VAL_U32}, + {NV_DP_REGKEY_DISABLE_SSC, &dpRegkeyDatabase.bSscDisabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_ENABLE_FAST_LINK_TRAINING, &dpRegkeyDatabase.bFastLinkTrainingEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_DISABLE_MST, &dpRegkeyDatabase.bMstDisabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_ENABLE_INBAND_STEREO_SIGNALING, &dpRegkeyDatabase.bInbandStereoSignalingEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_SKIP_POWEROFF_EDP_IN_HEAD_DETACH, &dpRegkeyDatabase.bPoweroffEdpInHeadDetachSkipped, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_ENABLE_OCA_LOGGING, &dpRegkeyDatabase.bOcaLoggingEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_REPORT_DEVICE_LOST_BEFORE_NEW, &dpRegkeyDatabase.bReportDeviceLostBeforeNew, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_APPLY_LINK_BW_OVERRIDE_WAR, &dpRegkeyDatabase.bLinkBwOverrideWarApplied, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_APPLY_MAX_LINK_RATE_OVERRIDES, &dpRegkeyDatabase.applyMaxLinkRateOverrides, DP_REG_VAL_U32}, + {NV_DP_REGKEY_DISABLE_DSC, &dpRegkeyDatabase.bDscDisabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_SKIP_ASSESSLINK_FOR_EDP, &dpRegkeyDatabase.bAssesslinkForEdpSkipped, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_HDCP_AUTH_ONLY_ON_DEMAND, &dpRegkeyDatabase.bHdcpAuthOnlyOnDemand, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_ENABLE_MSA_OVER_MST, &dpRegkeyDatabase.bMsaOverMstEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE, &dpRegkeyDatabase.bOptLinkKeptAlive, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_MST, &dpRegkeyDatabase.bOptLinkKeptAliveMst, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_SST, &dpRegkeyDatabase.bOptLinkKeptAliveSst, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_FORCE_EDP_ILR, &dpRegkeyDatabase.bBypassEDPRevCheck, DP_REG_VAL_BOOL}, + {NV_DP_DSC_MST_CAP_BUG_3143315, &dpRegkeyDatabase.bDscMstCapBug3143315, DP_REG_VAL_BOOL} +}; + +EvoMainLink::EvoMainLink(EvoInterface * provider, Timer * timer) : + provider(provider), + timer(timer), + displayId(provider->getDisplayId()), + subdeviceIndex(provider->getSubdeviceIndex()) +{ + // + // Process GPU caps (This needs to be replaced with a control call caps interface) + // + NvU32 code; + + // Initialize shared regkey data base, and apply the overrides + this->initializeRegkeyDatabase(); + this->applyRegkeyOverrides(); + + _isDynamicMuxCapable = false; + _isLTPhyRepeaterSupported = true; + _rmPhyRepeaterCount = 0; + dpMemZero(&_DSC, sizeof(_DSC)); + queryGPUCapability(); + + queryAndUpdateDfpParams(); + + // + // Tell RM to hands off on the DisplayPort hardware + // + NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS setManualParams = {0}; + setManualParams.subDeviceInstance = subdeviceIndex; + code = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT, &setManualParams, sizeof setManualParams); + DP_ASSERT (code == NVOS_STATUS_SUCCESS && "Unable to enable library mode"); + + // + // Get the mask of valid heads + // + NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS allHeadMaskParams; + dpMemZero(&allHeadMaskParams, sizeof allHeadMaskParams); + allHeadMaskParams.subDeviceInstance = subdeviceIndex; + code = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK, &allHeadMaskParams, sizeof(allHeadMaskParams)); + + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to get head mask"); + allHeadMask = 3; + } + else + { + allHeadMask = allHeadMaskParams.headMask; + } +} + + +bool EvoMainLink::vrrRunEnablementStage(unsigned stage, NvU32 *status) +{ + NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS params = {0}; + params.subDeviceInstance = subdeviceIndex; + params.displayId = this->displayId; + + switch (stage) + { + case VRR_ENABLE_STAGE_MONITOR_ENABLE_BEGIN: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _MONITOR_ENABLE_BEGIN); + break; + case VRR_ENABLE_STAGE_MONITOR_ENABLE_CHALLENGE: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _MONITOR_ENABLE_CHALLENGE); + break; + case VRR_ENABLE_STAGE_MONITOR_ENABLE_CHECK: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _MONITOR_ENABLE_CHECK); + break; + case VRR_ENABLE_STAGE_DRIVER_ENABLE_BEGIN: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _DRIVER_ENABLE_BEGIN); + break; + case VRR_ENABLE_STAGE_DRIVER_ENABLE_CHALLENGE: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _DRIVER_ENABLE_CHALLENGE); + break; + case VRR_ENABLE_STAGE_DRIVER_ENABLE_CHECK: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _DRIVER_ENABLE_CHECK); + break; + case VRR_ENABLE_STAGE_RESET_MONITOR: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _RESET_MONITOR); + break; + case VRR_ENABLE_STAGE_INIT_PUBLIC_INFO: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _INIT_PUBLIC_INFO); + break; + case VRR_ENABLE_STAGE_GET_PUBLIC_INFO: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _GET_PUBLIC_INFO); + break; + case VRR_ENABLE_STAGE_STATUS_CHECK: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _STATUS_CHECK); + break; + default: + DP_ASSERT(0 && "Undefined VRR Enablement Stage."); + return false; + } + NvU32 retVal = provider->rmControl0073(NV0073_CTRL_CMD_DP_ENABLE_VRR, ¶ms, sizeof(params)); + if (status) + { + *status = params.result; + } + if (retVal != NVOS_STATUS_SUCCESS) + { + return false; + } + return true; +} + +bool EvoMainLink::getEdpPowerData(bool *panelPowerOn, bool *dpcdPowerStateD0) +{ + NV0073_CTRL_DP_GET_EDP_DATA_PARAMS params; + params.subDeviceInstance = subdeviceIndex; + params.displayId = this->displayId; + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_EDP_DATA, ¶ms, sizeof(params)); + + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to get eDP power data, assuming panel off."); + if (panelPowerOn) + { + *panelPowerOn = false; + } + if (dpcdPowerStateD0) + { + *dpcdPowerStateD0 = false; + } + return false; + } + else + { + if (panelPowerOn) + { + *panelPowerOn = FLD_TEST_DRF(0073_CTRL_DP, _GET_EDP_DATA, _PANEL_POWER, _ON, + params.data); + } + if (dpcdPowerStateD0) + { + *dpcdPowerStateD0 = FLD_TEST_DRF(0073_CTRL_DP, _GET_EDP_DATA, _DPCD_POWER_STATE, _D0, + params.data); + } + return true; + } +} + +NvU32 EvoMainLink::streamToHead(NvU32 streamId, DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier) +{ + NvU32 headIndex = 0; + NvU32 maxHeads = allHeadMask; + NUMSETBITS_32(maxHeads); + headIndex = DP_MST_STREAMID_TO_HEAD(streamId, streamIdentifier, maxHeads); + + return headIndex; +} + +NvU32 EvoMainLink::headToStream(NvU32 head, DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier) +{ + NvU32 streamIndex = 0; + + NvU32 maxHeads = allHeadMask; + NUMSETBITS_32(maxHeads); + streamIndex = DP_MST_HEAD_TO_STREAMID(head, streamIdentifier, maxHeads); + + return streamIndex; +} + +void EvoMainLink::queryGPUCapability() +{ + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS params; + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.sorIndex = provider->getSorIndex(); + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_CAPS, ¶ms, sizeof(params)); + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to process GPU caps"); + } + else + { + // + // Check if MST feature needs to be disabled by regkey. This is requirement by few OEMs, they don't want to support + // MST feature on particular sku, whenever requested through INF. + // + _hasMultistream = (params.bIsMultistreamSupported == NV_TRUE) && !_isMstDisabledByRegkey; + + _isDP1_2Supported = (params.bIsDp12Supported == NV_TRUE) ? true : false; + _isDP1_4Supported = (params.bIsDp14Supported == NV_TRUE) ? true : false; + + _isStreamCloningEnabled = (params.bIsSCEnabled == NV_TRUE) ? true : false; + _hasIncreasedWatermarkLimits = (params.bHasIncreasedWatermarkLimits == NV_TRUE) ? true : false; + + _isFECSupported = (params.bFECSupported == NV_TRUE) ? true : false; + + _useDfpMaxLinkRateCaps = (params.bOverrideLinkBw == NV_TRUE) ? true : false; + + _isLTPhyRepeaterSupported = (params.bIsTrainPhyRepeater == NV_TRUE) ? true : false; + + if (FLD_TEST_DRF(0073, _CTRL_CMD_DP_GET_CAPS, _MAX_LINK_RATE, _1_62, params.maxLinkRate)) + _maxLinkRateSupportedGpu = RBR; //in Hz + else if (FLD_TEST_DRF(0073, _CTRL_CMD_DP_GET_CAPS, _MAX_LINK_RATE, _2_70, params.maxLinkRate)) + _maxLinkRateSupportedGpu = HBR; //in Hz + else if (FLD_TEST_DRF(0073, _CTRL_CMD_DP_GET_CAPS, _MAX_LINK_RATE, _5_40, params.maxLinkRate)) + _maxLinkRateSupportedGpu = HBR2; //in Hz + else if (FLD_TEST_DRF(0073, _CTRL_CMD_DP_GET_CAPS, _MAX_LINK_RATE, _8_10, params.maxLinkRate)) + _maxLinkRateSupportedGpu = HBR3; //in Hz + else + { + DP_ASSERT(0 && "Unable to get max link rate"); + // Assume that we can at least support RBR. + _maxLinkRateSupportedGpu = RBR; + } + + if (!_isDscDisabledByRegkey) + { + _DSC.isDscSupported = params.DSC.bDscSupported ? true : false; + _DSC.encoderColorFormatMask = params.DSC.encoderColorFormatMask; + _DSC.lineBufferSizeKB = params.DSC.lineBufferSizeKB; + _DSC.rateBufferSizeKB = params.DSC.rateBufferSizeKB; + _DSC.bitsPerPixelPrecision = params.DSC.bitsPerPixelPrecision; + _DSC.maxNumHztSlices = params.DSC.maxNumHztSlices; + _DSC.lineBufferBitDepth = params.DSC.lineBufferBitDepth; + } + } +} + +void EvoMainLink::triggerACT() +{ + NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + + provider->rmControl0073(NV0073_CTRL_CMD_DP_SEND_ACT, ¶ms, sizeof params); +} + +void EvoMainLink::configureHDCPRenegotiate(NvU64 cN, NvU64 cKSV, bool bForceReAuth, bool bRxIDMsgPending){} +void EvoMainLink::configureHDCPGetHDCPState(HDCPState &hdcpState) +{ + // HDCP Not Supported + hdcpState.HDCP_State_Repeater_Capable = false; + hdcpState.HDCP_State_22_Capable = false; + hdcpState.HDCP_State_Encryption = false; + hdcpState.HDCP_State_Authenticated = false; +} + +void EvoMainLink::configureSingleStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + bool bEnhancedFraming, + NvU32 tuSize, + NvU32 waterMark, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamId, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultiStreamMode, + bool bAudioOverRightPanel, + bool bEnable2Head1Or) +{ + NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.head = head; + params.sorIndex = provider->getSorIndex(); + params.bEnableTwoHeadOneOr = bEnable2Head1Or; + + if (singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + // In 2-SST mode configure Head-SF on primary link, so primary link configuration + // gets copied to secondary link. + params.dpLink = streamId; + } + else + { + params.dpLink = provider->getLinkIndex(); + } + + params.bEnableOverride = NV_TRUE; + params.bMST = NV_FALSE; + params.hBlankSym = hBlankSym; + params.vBlankSym = vBlankSym; + params.colorFormat = colorFormat; + + params.SST.bEnhancedFraming = bEnhancedFraming; + params.SST.tuSize = tuSize; + params.SST.waterMark = waterMark; + params.SST.bEnableAudioOverRightPanel = bAudioOverRightPanel; + + provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIG_STREAM, ¶ms, sizeof params); +} + +void EvoMainLink::configureSingleHeadMultiStreamMode(NvU32 displayIDs[], + NvU32 numStreams, + NvU32 mode, + bool bSetConfig, + NvU8 vbiosPrimaryDispIdIndex) +{ + NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + + for (NvU32 pipelineID = 0; pipelineID < numStreams; pipelineID++) + { + params.displayIDs[pipelineID] = displayIDs[pipelineID]; + } + params.mode = mode; + params.bSetConfig = bSetConfig; + params.numStreams = numStreams; + params.vbiosPrimaryDispIdIndex = vbiosPrimaryDispIdIndex; + + provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM, + ¶ms, + sizeof params); +} + +void EvoMainLink::configureMultiStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + NvU32 slotStart, + NvU32 slotEnd, + NvU32 PBN, + NvU32 Timeslice, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode, + bool bAudioOverRightPanel, + bool bEnable2Head1Or) +{ + NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS params = {0}; + params.head = head; + params.subDeviceInstance = this->subdeviceIndex; + params.sorIndex = provider->getSorIndex(); + params.dpLink = provider->getLinkIndex(); + params.bEnableOverride = NV_TRUE; + params.bMST = NV_TRUE; + params.hBlankSym = hBlankSym; + params.vBlankSym = vBlankSym; + params.colorFormat = colorFormat; + params.bEnableTwoHeadOneOr = bEnable2Head1Or; + params.singleHeadMultistreamMode = singleHeadMultistreamMode; + + params.MST.slotStart = slotStart; + params.MST.slotEnd = slotEnd; + params.MST.PBN = PBN; + params.MST.Timeslice = Timeslice; + params.MST.singleHeadMSTPipeline = streamIdentifier; + params.MST.bEnableAudioOverRightPanel = bAudioOverRightPanel; + + provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIG_STREAM, ¶ms, sizeof params); +} + +void EvoMainLink::configureMsScratchRegisters(NvU32 address, + NvU32 hopCount, + NvU32 dpMsDevAddrState) +{ + NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + params.activeDevAddr = address; + params.sorIndex = provider->getSorIndex(); + params.dpLink = provider->getLinkIndex(); + params.hopCount = hopCount; + params.dpMsDevAddrState = dpMsDevAddrState; + + provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG, ¶ms, sizeof params); +} + +// +// EvoMainLink::setDpStereoMSAParameters does the DP library Stereo override for +// In-band signaling through the MSA MISC1 field and keeps the rest of the MSA +// params the same. +// +// On GK110 and later, when stereo is enabled, we send the stereo eye +// information to the sink device through the MSA MISC1 bits 2:1. Certain +// DP 1.2 non-compliant DP->VGA dongles cannot handle this information, and +// lose all signal when these bits are non-zero. This WAR uses a RM control +// to override those MSA bits to zero. It should be called whenever a DP->VGA +// dongle is in use. +// +bool EvoMainLink::setDpStereoMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams) +{ + NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = msaparams.displayId; + //clubbing the MSA params passed by DD with Dp Library Stereo Override + params.bStereoPhaseInverse = msaparams.bStereoPhaseInverse; + params.featureValues.misc[1] = msaparams.featureValues.misc[1]; + + if (bStereoEnable) { + params.bEnableMSA = NV_TRUE | msaparams.bEnableMSA; + params.featureMask.miscMask[1] = DRF_SHIFTMASK(NV_DP_MSA_PROPERTIES_MISC1_STEREO) | msaparams.featureMask.miscMask[1]; + } else { + params.bEnableMSA = NV_FALSE | msaparams.bEnableMSA; + params.featureMask.miscMask[1] |= msaparams.featureMask.miscMask[1]; + } + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES, ¶ms, sizeof params); + + // + // NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES is only implemented on GK110 and + // later, but this WAR is unnecessary on other GPUs, so ignore + // ERROR_NOT_SUPPORTED. + // + // XXX This may fail if a future GPU requires this WAR but does not + // implement this rmcontrol. To avoid that, this class would need to be + // aware of which evo display HAL is in use. + // + if (ret != NVOS_STATUS_SUCCESS && ret != NVOS_STATUS_ERROR_NOT_SUPPORTED) { + DP_ASSERT(!"Enabling MSA stereo override failed!"); + return false; + } + + return true; +} + +// +// EvoMainLink::setDpMSAParameters clubs MSA parameters passed by DD for format YCbCr4:2:0 +// with DP library Stereo override for In-band signaling through the MSA MISC1 field. +// +// On GK110 and later, when stereo is enabled, we send the stereo eye +// information to the sink device through the MSA MISC1 bits 2:1. Certain +// DP 1.2 non-compliant DP->VGA dongles cannot handle this information, and +// lose all signal when these bits are non-zero. This WAR uses a RM control +// to override those MSA bits to zero. It should be called whenever a DP->VGA +// dongle is in use. +// +bool EvoMainLink::setDpMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams) +{ + NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = msaparams.displayId; + //clubbing the MSA params passed by DD with Dp Library Stereo Override + params.bStereoPhaseInverse = msaparams.bStereoPhaseInverse; + params.bCacheMsaOverrideForNextModeset = true; + params.featureValues.misc[0] = msaparams.featureValues.misc[0]; + params.featureValues.misc[1] = msaparams.featureValues.misc[1]; + params.featureMask.miscMask[0] = msaparams.featureMask.miscMask[0]; + + params.featureValues.rasterTotalHorizontal = msaparams.featureValues.rasterTotalHorizontal; + params.featureValues.rasterTotalVertical = msaparams.featureValues.rasterTotalVertical; + params.featureValues.activeStartHorizontal = msaparams.featureValues.activeStartHorizontal; + params.featureValues.activeStartVertical = msaparams.featureValues.activeStartVertical; + params.featureValues.surfaceTotalHorizontal = msaparams.featureValues.surfaceTotalHorizontal; + params.featureValues.surfaceTotalVertical = msaparams.featureValues.surfaceTotalVertical; + params.featureValues.syncWidthHorizontal = msaparams.featureValues.syncWidthHorizontal; + params.featureValues.syncPolarityHorizontal = msaparams.featureValues.syncPolarityHorizontal; + params.featureValues.syncHeightVertical = msaparams.featureValues.syncHeightVertical; + params.featureValues.syncPolarityVertical = msaparams.featureValues.syncPolarityVertical; + + params.featureMask.bRasterTotalHorizontal = msaparams.featureMask.bRasterTotalHorizontal; + params.featureMask.bRasterTotalVertical = msaparams.featureMask.bRasterTotalVertical; + params.featureMask.bActiveStartHorizontal = msaparams.featureMask.bActiveStartHorizontal; + params.featureMask.bActiveStartVertical = msaparams.featureMask.bActiveStartVertical; + params.featureMask.bSurfaceTotalHorizontal = msaparams.featureMask.bSurfaceTotalHorizontal; + params.featureMask.bSurfaceTotalVertical = msaparams.featureMask.bSurfaceTotalVertical; + params.featureMask.bSyncWidthHorizontal = msaparams.featureMask.bSyncWidthHorizontal; + params.featureMask.bSyncPolarityHorizontal = msaparams.featureMask.bSyncPolarityHorizontal; + params.featureMask.bSyncHeightVertical = msaparams.featureMask.bSyncHeightVertical; + params.featureMask.bSyncPolarityVertical = msaparams.featureMask.bSyncPolarityVertical; + + params.featureValues.reserved[0] = msaparams.featureValues.reserved[0]; + params.featureValues.reserved[1] = msaparams.featureValues.reserved[1]; + params.featureValues.reserved[2] = msaparams.featureValues.reserved[2]; + + params.pFeatureDebugValues = msaparams.pFeatureDebugValues; + + if (bStereoEnable) { + params.bEnableMSA = NV_TRUE | msaparams.bEnableMSA; + params.featureMask.miscMask[1] = DRF_SHIFTMASK(NV_DP_MSA_PROPERTIES_MISC1_STEREO) | msaparams.featureMask.miscMask[1]; + } else { + params.bEnableMSA = NV_FALSE | msaparams.bEnableMSA; + params.featureMask.miscMask[1] |= msaparams.featureMask.miscMask[1]; + } + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES, ¶ms, sizeof params); + + // + // NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES is only implemented on GK110 and + // later, but this WAR is unnecessary on other GPUs, so ignore + // ERROR_NOT_SUPPORTED. + // + // XXX This may fail if a future GPU requires this WAR but does not + // implement this rmcontrol. To avoid that, this class would need to be + // aware of which evo display HAL is in use. + // + if (ret != NVOS_STATUS_SUCCESS && ret != NVOS_STATUS_ERROR_NOT_SUPPORTED) { + DP_ASSERT(!"Enabling MSA stereo override failed!"); + return false; + } + + return true; +} + +bool EvoMainLink::setFlushMode() +{ + NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.bFireAndForget = NV_FALSE; + + params.base.subdeviceIndex = subdeviceIndex; + params.sorNumber = provider->getSorIndex(); + params.bEnable = NV_TRUE; + params.bForceRgDiv = NV_FALSE; + params.bImmediate = NV_FALSE; + params.headMask = 0; + + NvU32 ret = provider->rmControl5070(NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE, ¶ms, sizeof params); + + DP_ASSERT((ret == NVOS_STATUS_SUCCESS) && "Enabling flush mode failed!"); + + return ret == NVOS_STATUS_SUCCESS; +} + +void EvoMainLink::clearFlushMode(unsigned headMask, bool testMode) +{ + NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.bFireAndForget = NV_FALSE; + params.base.subdeviceIndex = subdeviceIndex; + params.sorNumber = provider->getSorIndex(); + params.bEnable = NV_FALSE; + params.bImmediate = NV_FALSE; + params.headMask = headMask; + params.bForceRgDiv = testMode; + + NvU32 ret = provider->rmControl5070(NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE, ¶ms, sizeof params); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_LOG(("DP_EVO> Disabling flush mode failed!")); + } +} + + +bool EvoMainLink::physicalLayerSetTestPattern(PatternInfo * patternInfo) +{ + // Main parameter + NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS params; + + // To identify which test pattern to transmit. + NV0073_CTRL_DP_TESTPATTERN ctrlPattern; + + dpMemZero(¶ms, sizeof(params)); + dpMemZero(&ctrlPattern, sizeof(ctrlPattern)); + + switch (patternInfo->lqsPattern) + { + case LINK_QUAL_DISABLED: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_NONE; break; + case LINK_QUAL_D10_2: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_D10_2; break; + case LINK_QUAL_SYM_ERROR: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_SERMP; break; + case LINK_QUAL_PRBS7: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_PRBS_7; break; + case LINK_QUAL_CP2520PAT3: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_CP2520PAT3; break; + case LINK_QUAL_80BIT_CUST: + { + ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_CSTM; + + params.cstm.lower = patternInfo->ctsmLower; + params.cstm.middle = patternInfo->ctsmMiddle; + params.cstm.upper = patternInfo->ctsmUpper; + break; + } +#ifdef NV0073_CTRL_DP_TESTPATTERN_DATA_HBR2COMPLIANCE + case LINK_QUAL_HBR2_COMPLIANCE_EYE: + { + ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_HBR2COMPLIANCE; + params.cstm.lower = 0; + params.cstm.middle = 0; + params.cstm.upper = 0; + break; + } +#endif + default: + DP_ASSERT(0 && "Unknown Phy Pattern"); + return false; + } + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.testPattern = ctrlPattern; + + // + // Set the appropriate laneMask based on the current lane count. The laneMask is used for GF119+ chips + // only so it doesn't matter if we populate it for all chips. It is set to all lanes since + // setting the test pattern on a lane that is off is effectively a nop. + // The laneMask allows for setting the pattern on specific lanes to check for cross-talk, which is the + // phenomenon of observing the signal crossing over to a different lane where it's not set. + // + params.laneMask = 0xf; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_TESTPATTERN, ¶ms, sizeof(params)); + + return code == NVOS_STATUS_SUCCESS; +} + +AuxBus::status EvoAuxBus::transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned *pNakReason, + NvU8 offset, NvU8 nWriteTransactions) +{ + NV0073_CTRL_DP_AUXCH_CTRL_PARAMS params; + + DP_ASSERT(sizeRequested <= NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE); + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + params.cmd = 0; + + if (type == native) + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _AUX); + else + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _I2C); + + if (type == i2cMot) + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_I2C_MOT, _TRUE); + else + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_I2C_MOT, _FALSE); + + if (action == read) + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _READ); + else if (action == write) + { + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _WRITE); + dpMemCopy(params.data, buffer, sizeRequested); + } + else if (action == writeStatusUpdateRequest) + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _WRITE_STATUS); + else + DP_ASSERT(0 && "Unknown action"); + + params.addr = address; + + // + // By definition, an I2C-write-over-AUX request with + // zero bytes of data is an "address-only" transaction. + // + if ((sizeRequested == 0) && (type & (i2cMot | i2c)) && (action == write)) + { + DP_LOG(("DP> Client requested address-only transaction")); + params.bAddrOnly = NV_TRUE; + } + else if ((sizeRequested == 0) && (type == native)) + { + // Native aux transactions with size requested zero are not allowed. + DP_ASSERT(0 && "Native Aux transactions shouldn't have zero size requested"); + return nack; + } + + // Control call is taking size as 0-based. + if (sizeRequested == 0) + { + // + // I2c transactions with size requested zero. Decrementing by 1 will + // lead to 0xffffff(RM_INVALID_DATA). So keep size as zero only. + // + params.size = 0; + } + else + { + params.size = sizeRequested - 1; + } + + NvU32 code = 0; + NvU8 retries = 0; + do + { + retries++; + params.retryTimeMs = 0; + code = provider->rmControl0073(NV0073_CTRL_CMD_DP_AUXCH_CTRL, ¶ms, sizeof(params)); + // eDP is not fully powered up yet. Should not access the panel too early. + if (params.retryTimeMs > 0) + { + timer->sleep(params.retryTimeMs); + } + } while (NVOS_STATUS_SUCCESS != code && params.retryTimeMs && retries < 3); + + if (pNakReason != NULL) + { + *pNakReason = params.replyType; + } + + if (action == writeStatusUpdateRequest && code == NVOS_STATUS_ERROR_NOT_SUPPORTED) + { + // + // On some chips write status requests are generated implicitly by the + // hardware. So while the RmControl() will fail with a "not supported" + // error, the request still went out on the DPAUX channel as part of + // the last IC-over-AUX write transaction. So the error should be ignored. + // + DP_LOG(("DP> %s: Ignore ERROR_NOT_SUPPORTED for writeStatusUpdateRequest. Returning Success", __FUNCTION__)); + return AuxBus::success; + } + + // In case of Timeout we need to retry again for minimum no. of times + if (code != NVOS_STATUS_SUCCESS && code != NVOS_STATUS_ERROR_TIMEOUT) + { + if (devicePlugged) + { + DP_LOG(("DP> AuxChCtl Failing, if a device is connected you shouldn't be seeing this")); + } + return nack; + } + else if (code == NVOS_STATUS_ERROR_TIMEOUT) + { + return AuxBus::defer; + } + + *sizeCompleted = params.size; + + // Reset sizeCompleted if transaction failed. + if (params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_DEFER || + params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CDEFER) + *sizeCompleted = 0; + + if (params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_ACK) + { + // if it was read operation copy read data to buffer + if (action == read) + { + // Check the size of data to be copied. Should not be + // more than available buffer + if (params.size > sizeRequested) + { + params.size = sizeRequested; + } + dpMemCopy(buffer, params.data, params.size); + } + + return AuxBus::success; + } + + if (params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_NACK || + params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CNACK || + params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_TIMEOUT) + return AuxBus::nack; + + if (params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_DEFER || + params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CDEFER) + return AuxBus::defer; + + DP_ASSERT(0 && "Unknown reply type"); + return AuxBus::nack; +} + +unsigned EvoAuxBus::transactionSize() +{ + return NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE; +} + +void EvoAuxBus::setDevicePlugged(bool plugged) +{ + devicePlugged = plugged; +} + +void EvoMainLink::preLinkTraining(NvU32 head) +{ + provider->preLinkTraining(head); +} + +void EvoMainLink::postLinkTraining(NvU32 head) +{ + provider->postLinkTraining(head); +} + +void EvoMainLink::initializeRegkeyDatabase() +{ + NvU32 i; + if (dpRegkeyDatabase.bInitialized) + return; + for (i = 0; i < sizeof(DP_REGKEY_TABLE)/sizeof(DP_REGKEY_TABLE[0]); i++) + { + NvU32 tempValue = 0; + tempValue = provider->getRegkeyValue(DP_REGKEY_TABLE[i].pName); + switch (DP_REGKEY_TABLE[i].valueType) + { + case DP_REG_VAL_U32: + *(NvU32*)(DP_REGKEY_TABLE[i].pValue) = tempValue; + break; + case DP_REG_VAL_U16: + *(NvU16*)(DP_REGKEY_TABLE[i].pValue) = tempValue & 0xFFFF; + break; + case DP_REG_VAL_U8: + *(NvU8*)(DP_REGKEY_TABLE[i].pValue) = tempValue & 0xFF; + break; + case DP_REG_VAL_BOOL: + *(bool*)(DP_REGKEY_TABLE[i].pValue) = !!tempValue; + break; + } + } + dpRegkeyDatabase.bInitialized = true; +} + +void EvoMainLink::applyRegkeyOverrides() +{ + if (!dpRegkeyDatabase.bInitialized) + { + DP_ASSERT(0 && "dpRegkeyDatabase is not initialized before calling applyRegkeyOverrides."); + this->initializeRegkeyDatabase(); + } + _isMstDisabledByRegkey = dpRegkeyDatabase.bMstDisabled; + _isDscDisabledByRegkey = dpRegkeyDatabase.bDscDisabled; + _skipPowerdownEDPPanelWhenHeadDetach = dpRegkeyDatabase.bPoweroffEdpInHeadDetachSkipped; + _applyLinkBwOverrideWarRegVal = dpRegkeyDatabase.bLinkBwOverrideWarApplied; + _enableMSAOverrideOverMST = dpRegkeyDatabase.bMsaOverMstEnabled; +} + +NvU32 EvoMainLink::getRegkeyValue(const char *key) +{ + NvU32 i; + if (!dpRegkeyDatabase.bInitialized) + { + DP_ASSERT(0 && "dpRegkeyDatabase is not initialized before calling getRegkeyValue."); + initializeRegkeyDatabase(); + } + if (key == NULL || key[0] == '\0') + return 0; + + for (i = 0; i < sizeof(DP_REGKEY_TABLE)/sizeof(DP_REGKEY_TABLE[0]); i++) + { + NvU32 j = 0; + bool strSame = true; + while (key[j] != '\0' && DP_REGKEY_TABLE[i].pName[j] != '\0') + { + if (key[j] != DP_REGKEY_TABLE[i].pName[j]) + { + strSame = false; + break; + } + ++j; + } + if (strSame && key[j] == '\0' && DP_REGKEY_TABLE[i].pName[j] == '\0') + { + switch (DP_REGKEY_TABLE[i].valueType) + { + case DP_REG_VAL_U32: + return *(NvU32*)(DP_REGKEY_TABLE[i].pValue); + case DP_REG_VAL_U16: + return (NvU32)*(NvU16*)(DP_REGKEY_TABLE[i].pValue); + case DP_REG_VAL_U8: + return (NvU32)*(NvU8*)(DP_REGKEY_TABLE[i].pValue); + case DP_REG_VAL_BOOL: + return (NvU32)*(bool*)(DP_REGKEY_TABLE[i].pValue); + } + } + } + DP_ASSERT(0 && "Requested regkey not found in dpRegkeyDatabase."); + return 0; +} + +const DP_REGKEY_DATABASE& EvoMainLink::getRegkeyDatabase() +{ + return dpRegkeyDatabase; +} + +NvU32 EvoMainLink::getSorIndex() +{ + return provider->getSorIndex(); +} + +bool EvoMainLink::isInbandStereoSignalingSupported() +{ + return provider->isInbandStereoSignalingSupported(); +} + +bool EvoMainLink::train(const LinkConfiguration & link, bool force, + LinkTrainingType linkTrainingType, + LinkConfiguration *retLink, bool bSkipLt, + bool isPostLtAdjRequestGranted, unsigned phyRepeaterCount) +{ + NvU32 targetIndex; + NvU32 ltCounter = retLink->getLTCounter(); + bool bTrainPhyRepeater = + (!link.bDisableLTTPR) && (_isLTPhyRepeaterSupported); + + if (provider->getSorIndex() == DP_INVALID_SOR_INDEX) + { + // bail out and Skip LT since SOR is not allocated for this displayID + return false; + } + NvU32 err = 0; + + NvU32 dpCtrlCmd = DRF_DEF(0073_CTRL, _DP_CMD, _SET_LANE_COUNT, _TRUE) | + DRF_DEF(0073_CTRL, _DP_CMD, _SET_LINK_BW, _TRUE); + + if (link.multistream) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SET_FORMAT_MODE, _MULTI_STREAM ); + + if(link.bEnableFEC) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _ENABLE_FEC, _TRUE); + + if (isPostLtAdjRequestGranted) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _POST_LT_ADJ_REQ_GRANTED, _YES ); + + if (link.enhancedFraming) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SET_ENHANCED_FRAMING, _TRUE ); + if (bSkipLt) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SKIP_HW_PROGRAMMING, _YES ); + if (force) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FAKE_LINK_TRAINING, _DONOT_TOGGLE_TRANSMISSION ); + + if (linkTrainingType == NO_LINK_TRAINING) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _NO_LINK_TRAINING, _YES ); + else if (linkTrainingType == FAST_LINK_TRAINING) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FAST_LINK_TRAINING, _YES ); + + targetIndex = NV0073_CTRL_DP_DATA_TARGET_SINK; + if (bTrainPhyRepeater && (_rmPhyRepeaterCount != phyRepeaterCount)) + { + // If LTTPR count is out of sync between DPLib and RM, do not link train LTTPRs. + bTrainPhyRepeater = false; + } + + if (bTrainPhyRepeater) + { + + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _TRAIN_PHY_REPEATER, _YES ); + // + // Start from the one closest to GPU. Note this is 1-based index. + // + targetIndex = phyRepeaterCount; + } + + NV_DPTRACE_INFO(LINK_TRAINING_START, link.multistream, link.peakRate, link.lanes, + phyRepeaterCount, _rmPhyRepeaterCount, bTrainPhyRepeater, targetIndex); + + NvU32 status = 0; + NvU8 retries = 0; + bool fallback = false; + + // + // Limited attempts to unblock infinite LT loop while CR failure restores + // high rate and lanes for EQ failure + // + NvU32 crHighRateFallbackCount = 0; + + // + // The rate and lane count we send to RM might be different than what client + // sent to us since fallback might happen. + // + LinkConfiguration requestRmLC = link; + do + { + NvU32 dpCtrlData = 0; + NvU64 linkrate = requestRmLC.peakRate; + NvU64 linkBw = 0; + + switch (linkrate) + { + case RBR: + case EDP_2_16GHZ: + case EDP_2_43GHZ: + case HBR: + case EDP_3_24GHZ: + case EDP_4_32GHZ: + case HBR2: + case EDP_6_75GHZ: + case HBR3: + linkBw = linkrate / DP_LINK_BW_FREQ_MULTI_MBPS; + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LINK_BW, + linkBw, dpCtrlData); + break; + default: + if (requestRmLC.lanes != 0) + { + DP_ASSERT(0 && "Unknown rate"); + return false; + } + break; + } + + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LANE_COUNT, + requestRmLC.lanes, dpCtrlData); + + if (requestRmLC.lanes == 0) + { + // Only need to target sink when powering down the link. + targetIndex = NV0073_CTRL_DP_DATA_TARGET_SINK; + } + + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _TARGET, + targetIndex, dpCtrlData); + + // Properly wait eDP to power up before link training. + status = 0; + retries = 0; + fallback = false; + dpCtrlCmd = FLD_SET_DRF(0073_CTRL, _DP_CMD, _FALLBACK_CONFIG, _FALSE, dpCtrlCmd); + do + { + NV0073_CTRL_DP_CTRL_PARAMS params; + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.cmd = dpCtrlCmd; + params.data = dpCtrlData; + + retries++; + params.retryTimeMs = 0; + status = provider->rmControl0073(NV0073_CTRL_CMD_DP_CTRL, ¶ms, sizeof(params)); + ltCounter++; + err = params.err; + + if (params.retryTimeMs > 0) + { + timer->sleep(params.retryTimeMs); + } + + if (status == NVOS_STATUS_SUCCESS || bSkipLt) + { + // if LT failed when bSkipLt was marked, no point in attempting LT again. + break; + } + + if (!params.retryTimeMs || retries >= 3) + { + break; + } + + } while (true); + + if (NVOS_STATUS_SUCCESS == status) + { + if (targetIndex != NV0073_CTRL_DP_DATA_TARGET_SINK) + { + targetIndex -= 1; + continue; + } + else + { + // all done, leave the loop. + break; + } + } + + if (requestRmLC.policy.skipFallback() || bSkipLt) + { + // + // if LT failed when bSkipLT was marked, no point in falling back as the issue + // is not with LinkConfig. + // + break; + } + + if (FLD_TEST_DRF(0073_CTRL_DP, _CMD, _TRAIN_PHY_REPEATER, _YES, dpCtrlCmd) && + FLD_TEST_DRF(0073_CTRL_DP, _ERR, _INVALID_PARAMETER, _ERR, err) && + FLD_TEST_DRF(0073_CTRL_DP, _ERR, _TRAIN_PHY_REPEATER, _ERR, err)) + { + // + // RM has less LTTPR than DPLib expected. + // - Force to do transparent mode. + // + targetIndex = NV0073_CTRL_DP_DATA_TARGET_SINK; + dpCtrlCmd = FLD_SET_DRF(0073_CTRL, _DP_CMD, _TRAIN_PHY_REPEATER, + _NO, dpCtrlCmd); + continue; + } + + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FALLBACK_CONFIG, _TRUE); + + if (FLD_TEST_DRF(0073_CTRL_DP, _ERR, _CLOCK_RECOVERY, _ERR, err)) + { + // If failed CR, check if we need to fallback. + if (requestRmLC.peakRate != RBR) + { + // + // We need to fallback on link rate if the following conditions are met: + // 1. CR or EQ phase failed. + // 2. The request link bandwidth is NOT RBR + // + fallback = true; + requestRmLC.lowerConfig(); + } + else + { + // Already RBR + // Check how many lanes is done. + requestRmLC.lanes = DRF_VAL(0073_CTRL_DP, _ERR, _CR_DONE_LANE, err); + + while (!IS_VALID_LANECOUNT(requestRmLC.lanes)) + { + requestRmLC.lanes--; + } + + if (requestRmLC.lanes == 0) + { + // This is to WAR some system that doesn't set CR_DONE or EQ_DONE at all. + // In this case, we just simply try half of lanes. + requestRmLC.lanes = DRF_VAL(0073_CTRL, _DP_DATA, _SET_LANE_COUNT, dpCtrlData) / 2; + if (requestRmLC.lanes == 0) + { + // Nothing to try. Bail out. + break; + } + } + // Set back to original desired rate. + requestRmLC.peakRate = link.peakRate; + fallback = true; + crHighRateFallbackCount++; + } + } + if (FLD_TEST_DRF(0073_CTRL_DP, _ERR, _CHANNEL_EQUALIZATION, _ERR, err)) + { + // + // If Channel equalization fails, we need to use the fallback policy + // of reducing the lane count vs link rate, but in the special case + // when all lanes have failed CR, we resort to lowering link rate instead + // (this address the new Fallback SCR v2.0) + // + if (FLD_TEST_DRF(0073_CTRL_DP, _ERR, _CR_DONE_LANE, _0_LANE, err)) + { + //Per spec, if link rate has already been reduced to RBR, exit fallback + if(requestRmLC.peakRate == RBR || !requestRmLC.lowerConfig()) + break; + } + else + { + if(!requestRmLC.lowerConfig(true)) // bReduceLaneCnt = true + break; + } + fallback = true; + } + if (fallback == false) + { + // Nothing to fallback, give up. + break; + } + if ((phyRepeaterCount > 0) && (bTrainPhyRepeater)) + { + // If fallback, need to start from beginning. + targetIndex = phyRepeaterCount; + } + } while (crHighRateFallbackCount < NV_DP_RBR_FALLBACK_MAX_TRIES); + + // + // Result should be checked for only the control call status. 'err' + // doesn't represent failure in LT - some compliance tests such as 700.1.1.2 + // intentionally test against unexpected sink caps + // + bool result = (status == NVOS_STATUS_SUCCESS); + retLink->setLaneRate(requestRmLC.peakRate, result ? requestRmLC.lanes : 0); + retLink->setLTCounter(ltCounter); + + NV_DPTRACE_INFO(LINK_TRAINING_DONE, status, requestRmLC.peakRate, requestRmLC.lanes); + + return result; +} + +bool EvoMainLink::retrieveRingBuffer(NvU8 dpRingBuffertype, NvU32 numRecords) +{ + return false; +} + +// Return the current mux state. Returns false if device is not mux capable +bool EvoMainLink::getDynamicMuxState(NvU32 *muxState) +{ + bool bIsMuxCapable = false; + NvU32 ret = 0; + NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS muxStatusParams; + + if (!muxState) + return false; + + *muxState = 0; + + if (!isDynamicMuxCapable()) + return false; + + dpMemZero(&muxStatusParams, sizeof(muxStatusParams)); + muxStatusParams.subDeviceInstance = subdeviceIndex; + muxStatusParams.displayId = displayId; + muxStatusParams.muxStatus = 0; + + ret = provider->rmControl0073(NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS, + &muxStatusParams, sizeof(muxStatusParams)); + if (ret == NV_OK && + DRF_VAL(0073, _CTRL_DFP_DISP_MUX, _STATE, muxStatusParams.muxStatus) != NV0073_CTRL_DFP_DISP_MUX_STATE_INVALID) + { + bIsMuxCapable = true; + *muxState = muxStatusParams.muxStatus; + } + + return bIsMuxCapable; +} + +bool EvoMainLink::aquireSema() +{ + NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS params; + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.owner = NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_RM; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_AUXCH_SET_SEMA, ¶ms, sizeof(params)); + + return code == NVOS_STATUS_SUCCESS; +} + +void EvoMainLink::releaseSema() +{ + NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS params; + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.owner = NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_RELEASE; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_AUXCH_SET_SEMA, ¶ms, sizeof(params)); + + DP_USED(code); + DP_ASSERT(code == NVOS_STATUS_SUCCESS); +} + +void EvoMainLink::configurePowerState(bool bPowerUp) +{ + NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS params; + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.ctrl = bPowerUp ? NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERUP : + NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERDOWN; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_MAIN_LINK_CTRL, ¶ms, sizeof(params)); + + DP_ASSERT(code == NVOS_STATUS_SUCCESS); +} + +void EvoMainLink::getLinkConfig(unsigned &laneCount, NvU64 & linkRate) +{ + NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_LINK_CONFIG, ¶ms, sizeof(params)); + + if (code == NVOS_STATUS_SUCCESS) + { + laneCount = params.laneCount; + linkRate = (NvU64)27000000 * params.linkBW; // BUG: Beware, turbo mode need to be taken into account + } + else + { + laneCount = 0; + linkRate = 0; + } +} + +bool EvoMainLink::getMaxLinkConfigFromUefi(NvU8 &linkRate, NvU8 &laneCount) +{ + if (provider->getMaxLinkConfigFromUefi(linkRate, laneCount)) + { + if (IS_VALID_LANECOUNT(laneCount) && IS_VALID_LINKBW(linkRate)) + { + return true; + } + } + return false; +} + +bool EvoMainLink::queryAndUpdateDfpParams() +{ + NV0073_CTRL_DFP_GET_INFO_PARAMS dfpParams; + NvU32 dfpFlags; + dpMemZero(&dfpParams, sizeof(dfpParams)); + dfpParams.subDeviceInstance = subdeviceIndex; + dfpParams.displayId = displayId; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DFP_GET_INFO, &dfpParams, sizeof(dfpParams)); + + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to query DFP params."); + return false; + } + + dfpFlags = dfpParams.flags; + _isEDP = DRF_VAL(0073, _CTRL_DFP_FLAGS, _EMBEDDED_DISPLAYPORT, dfpFlags) == + NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE; + + if (_isLTPhyRepeaterSupported) + { + _rmPhyRepeaterCount = DRF_VAL(0073_CTRL_DFP, _FLAGS, + _DP_PHY_REPEATER_COUNT, dfpFlags); + } + + _needForceRmEdid = DRF_VAL(0073, _CTRL_DFP_FLAGS, _DP_FORCE_RM_EDID ,dfpFlags) == + NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE; + + _isPC2Disabled = DRF_VAL(0073, _CTRL_DFP_FLAGS, _DP_POST_CURSOR2_DISABLED, dfpFlags) == + NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE; + + + switch(DRF_VAL(0073, _CTRL_DFP_FLAGS, _DP_LINK_BW, dfpFlags)) + { + default: + DP_ASSERT(0 && "maxLinkRate is set improperly in dfp object."); + // intentionally fall-thru. + case NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS: + _maxLinkRateSupportedDfp = RBR; + break; + case NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS: + _maxLinkRateSupportedDfp = HBR; + break; + case NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS: + _maxLinkRateSupportedDfp = HBR2; + break; + case NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS: + _maxLinkRateSupportedDfp = HBR3; + break; + } + + _isDynamicMuxCapable = FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _DYNAMIC_MUX_CAPABLE, _TRUE, dfpFlags); + + return true; +} + +bool EvoMainLink::fetchEdidByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize) +{ + NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *pEdidParams; + pEdidParams = (NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS*) dpMalloc(sizeof(*pEdidParams)); + + if (pEdidParams == NULL) { + return false; + } + + dpMemZero(pEdidParams, sizeof(*pEdidParams)); + pEdidParams->subDeviceInstance = subdeviceIndex; + pEdidParams->displayId = displayId; + pEdidParams->flags = 0; // use default settings. + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, pEdidParams, sizeof(*pEdidParams)); + + if (code == NVOS_STATUS_SUCCESS) + { + // Silently dropping part of a too-large output buffer matches the + // behavior of the "V1" of this control. + // But it may make sense to revisit this behavior now that it's under + // control of this client. + NvU32 copySize = NV_MIN(pEdidParams->bufferSize, bufferSize); + dpMemCopy(edidBuffer, pEdidParams->edidBuffer, copySize); + } else { + DP_ASSERT(0 && "Unable to read EDID."); + } + + dpFree(pEdidParams); + return code == NVOS_STATUS_SUCCESS; +} + +bool EvoMainLink::applyEdidOverrideByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize) +{ + NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *pEdidOverrideParams = + (NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *) + dpMalloc(sizeof(*pEdidOverrideParams)); + + if (pEdidOverrideParams == NULL) { + return false; + } + + dpMemZero(pEdidOverrideParams, sizeof(*pEdidOverrideParams)); + pEdidOverrideParams->subDeviceInstance = subdeviceIndex; + pEdidOverrideParams->displayId = displayId; + if (bufferSize > sizeof(pEdidOverrideParams->edidBuffer)) { + DP_ASSERT(0 && "EDID override too large for edidBuffer"); + dpFree(pEdidOverrideParams); + return false; + } + pEdidOverrideParams->bufferSize = bufferSize; + dpMemCopy(&pEdidOverrideParams->edidBuffer, edidBuffer, bufferSize); + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_APPLY_EDID_OVERRIDE_V2, + pEdidOverrideParams, + sizeof(*pEdidOverrideParams)); + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to apply EDID override."); + dpFree(pEdidOverrideParams); + return false; + } + + DP_ASSERT(pEdidOverrideParams->bufferSize == bufferSize); + dpMemCopy(edidBuffer, &pEdidOverrideParams->edidBuffer, bufferSize); + + dpFree(pEdidOverrideParams); + + return true; + +} + +bool EvoMainLink::isEDP() +{ + return _isEDP; +} + +bool EvoMainLink::supportMSAOverMST() +{ + return _enableMSAOverrideOverMST; +} + +bool EvoMainLink::skipPowerdownEdpPanelWhenHeadDetach() +{ + return _skipPowerdownEDPPanelWhenHeadDetach; +} + + +bool EvoMainLink::isActive() +{ + NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS params; + + for (int i = 0; i < 32; i++) + { + // + // Skip floorswept heads + // + if (!(allHeadMask & (1 << i))) + { + continue; + } + + dpMemZero(¶ms, sizeof params); + params.subDeviceInstance = 0; + params.head = i; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, ¶ms, sizeof(params)); + + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "We can't get active displays, RM bug!"); + } + else if (params.displayId & displayId) + { + return true; + } + } + + return false; +} + +bool EvoMainLink::controlRateGoverning(NvU32 head, bool enable, bool updateNow) +{ + NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.head = head; + params.sorIndex = provider->getSorIndex(); + + if (enable) + { + params.flags |= DRF_DEF(0073_CTRL, _CMD_DP_SET_RATE_GOV_FLAGS, _ENABLE_RG, _ON); + } + else + { + params.flags |= DRF_DEF(0073_CTRL, _CMD_DP_SET_RATE_GOV_FLAGS, _ENABLE_RG, _OFF); + } + if (updateNow) + { + params.flags |= DRF_DEF(0073_CTRL, _CMD_DP_SET_RATE_GOV_FLAGS, _TRIGGER_MODE, _IMMEDIATE); + } + else + { + params.flags |= DRF_DEF(0073_CTRL, _CMD_DP_SET_RATE_GOV_FLAGS, _TRIGGER_MODE, _LOADV); + } + + provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_RATE_GOV, ¶ms, sizeof params); + + return true; +} + +bool EvoMainLink::getDpTestPattern(NV0073_CTRL_DP_TESTPATTERN * testPattern) +{ + NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS params = {0}; + + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_TESTPATTERN, ¶ms, sizeof params))) + { + testPattern->testPattern = params.testPattern.testPattern; + return true; + } + else + return false; +} + +bool EvoMainLink::setDpTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, NvBool bIsHBR2, NvBool bSkipLaneDataOverride) +{ + NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS params = {0}; + + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + params.testPattern = testPattern; + params.laneMask = laneMask; + params.cstm = cstm; + params.bIsHBR2 = bIsHBR2; + params.bSkipLaneDataOverride = bSkipLaneDataOverride; + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_TESTPATTERN, ¶ms, sizeof params))) + return true; + else + return false; +} + +bool EvoMainLink::getDpLaneData(NvU32 *numLanes, NvU32 *data) +{ + NV0073_CTRL_DP_LANE_DATA_PARAMS params = {0}; + + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_LANE_DATA, ¶ms, sizeof params))) + { + *numLanes = params.numLanes; + dpMemCopy(data, params.data, NV0073_CTRL_MAX_LANES*4); + return true; + } + else + return false; +} + +bool EvoMainLink::setDpLaneData(NvU32 numLanes, NvU32 *data) +{ + NV0073_CTRL_DP_LANE_DATA_PARAMS params = {0}; + + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + params.numLanes = numLanes; + dpMemCopy(params.data, data, NV0073_CTRL_MAX_LANES*4); + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_LANE_DATA, ¶ms, sizeof params))) + return true; + else + return false; +} + +NvU32 EvoMainLink::monitorDenylistInfo(NvU32 ManufacturerID, NvU32 ProductID, DpMonitorDenylistData *pDenylistData) +{ + return provider->monitorDenylistInfo(ManufacturerID, ProductID, pDenylistData); +} +bool EvoMainLink::rmUpdateDynamicDfpCache(NvU32 headIndex, RmDfpCache* dfpCache, NvBool bResetDfp) +{ + NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS params = {0}; + params.headIndex = headIndex; + params.bcaps = dfpCache->bcaps; + for (unsigned i=0; i<5; i++) + params.bksv[i] = dfpCache->bksv[i]; + + params.bHdcpCapable = dfpCache->hdcpCapable; + params.subDeviceInstance = subdeviceIndex; + params.updateMask = dfpCache->updMask; + if (bResetDfp) + params.bResetDfp = NV_TRUE; + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DFP_UPDATE_DYNAMIC_DFP_CACHE, ¶ms, sizeof params))) + return true; + else + return false; +} + +NvU32 EvoMainLink::allocDisplayId() +{ + NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS params = {0}; + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID, ¶ms, sizeof(params)); + if (ret == NVOS_STATUS_SUCCESS) + { + return params.displayIdAssigned; + } + + return 0; +} + +bool EvoMainLink::freeDisplayId(NvU32 displayId) +{ + NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS params = {0}; + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, ¶ms, sizeof(params)); + return ret == NVOS_STATUS_SUCCESS; +} + +void EvoMainLink::configureTriggerSelect(NvU32 head, DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier) +{ + NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS params = {0}; + params.head = head; + params.subDeviceInstance = subdeviceIndex; + params.sorIndex = provider->getSorIndex(); + params.singleHeadMSTPipeline = streamIdentifier; + provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT, ¶ms, sizeof params); +} + +void EvoMainLink::configureTriggerAll(NvU32 head, bool enable) +{ + NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS params = {0}; + params.head = head; + params.subDeviceInstance = subdeviceIndex; + params.enable = enable; + provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL, ¶ms, sizeof params); +} + +MainLink * DisplayPort::MakeEvoMainLink(EvoInterface * provider, Timer * timer) +{ + return new EvoMainLink(provider, timer); +} + +AuxBus * DisplayPort::MakeEvoAuxBus(EvoInterface * provider, Timer * timer) +{ + return new EvoAuxBus(provider, timer); +} + +bool EvoMainLink::dscCrcTransaction(NvBool bEnable, gpuDscCrc *data, NvU16 *headIndex) +{ + NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS params; + NvU32 code; + + dpMemZero(¶ms, sizeof(params)); + params.bEnable = bEnable ? NV_TRUE : NV_FALSE; + params.subDeviceInstance = subdeviceIndex; + params.headIndex = *headIndex; + + // see if setup or querying needs to be specified + if (data == NULL) + { + params.cmd = DRF_DEF(0073_CTRL, _DP_CRC_CONTROL, _CMD, _SETUP); + } + else + { + params.cmd = DRF_DEF(0073_CTRL, _DP_CRC_CONTROL, _CMD, _QUERY); + } + + // GPU part of the call + code = provider->rmControl0073(NV0073_CTRL_CMD_DFP_DSC_CRC_CONTROL, ¶ms, sizeof(params)); + if (code != NVOS_STATUS_SUCCESS) + { + DP_LOG(("DP> Crc control failed.")); + return false; + } + + // if the command is setup, return immediately + if (data != NULL) + { + data->gpuCrc0 = params.gpuCrc0; + data->gpuCrc1 = params.gpuCrc1; + data->gpuCrc2 = params.gpuCrc2; + } + + return true; +} + +// +// @brief This is to request RM to setup/reset link rate table, and save valid +// link rates for use. +// +// @param pLinkRateTable Pointer to link rate table to configure +// @param pLinkRates Pointer to LinkRates to keep valid link rates +// @return +// true Link rate table configured with at least one valid link rate +// false Otherwise +// +bool EvoMainLink::configureLinkRateTable +( + const NvU16 *pLinkRateTable, + LinkRates *pLinkRates +) +{ + NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + // Setup provided link rate table, otherwise it will be reset + if (pLinkRateTable) + { + for (NvU32 i = 0; i < NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES; i++) + { + params.linkRateTbl[i] = pLinkRateTable[i]; + } + } + + NvU32 code = provider->rmControl0073( + NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, + ¶ms, sizeof(params)); + + if ((pLinkRates != NULL ) && (code == NVOS_STATUS_SUCCESS) && + (params.linkBwCount <= NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES)) + { + pLinkRates->clear(); + for (int i = 0; i < params.linkBwCount; i++) + { + switch (params.linkBwTbl[i]) + { + case linkBW_1_62Gbps: + case linkBW_2_16Gbps: + case linkBW_2_43Gbps: + case linkBW_2_70Gbps: + case linkBW_3_24Gbps: + case linkBW_4_32Gbps: + case linkBW_5_40Gbps: + case linkBW_6_75Gbps: + case linkBW_8_10Gbps: + pLinkRates->import(params.linkBwTbl[i]); + break; + + default: + DP_LOG(("DP_EVO> %s: Unsupported link rate received", + __FUNCTION__)); + DP_ASSERT(0); + break; + } + } + return true; + } + return false; +} + +// +// @brief This is to request RM to enable/disable Fec +// +// @param enableFec Indicates if enable/disable is requested +// @return +// true If FEC was configured successfully +// false Otherwise +// +bool EvoMainLink::configureFec +( + const bool bEnableFec +) +{ + NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.bEnableFec = bEnableFec; + + NvU32 code = provider->rmControl0073( + NV0073_CTRL_CMD_DP_CONFIGURE_FEC, + ¶ms, sizeof(params)); + + if (code == NVOS_STATUS_SUCCESS) + { + return true; + } + + return false; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_groupimpl.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_groupimpl.cpp new file mode 100644 index 0000000..80a182d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_groupimpl.cpp @@ -0,0 +1,331 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_groupimpl.cpp * +* DP device group implementation * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_connector.h" +#include "dp_list.h" +#include "dp_auxdefs.h" +#include "dp_deviceimpl.h" +#include "dp_groupimpl.h" +#include "dp_connectorimpl.h" + +using namespace DisplayPort; + +void GroupImpl::update(Device * dev, bool allocationState) +{ + Address::StringBuffer sb; + Address devAddress = dev->getTopologyAddress(); + DP_USED(sb); + + // Do not map a stream that is not yet turned on in the gpu. An update shall be sent later during NAE. + if (allocationState && !this->isHeadAttached()) + return; + + // + // Do not enable the stream on an unplugged device but take care of + // detached devices. We need to clear PBNs allocated by such devices + // + if (allocationState && !((DeviceImpl *)dev)->plugged) + return; + + // + // Check if Parent's messageManager exist or not. This is required for cases + // where parent branch itself has been unplugged. No message can be sent in this case. + // + if (!parent->messageManager) + return; + + if (timeslot.count == 0 || + ((DeviceImpl *)dev)->payloadAllocated == allocationState) + return; + + if (!dev->getParent() || !((dev->getParent())->isPlugged())) + { + DeviceImpl * parentDev = NULL; + + // + // Send ALLOCATE_PAYLOAD with pbn 0 to parent port of previous branch + // Find first plugged parent branch & send message to it + // + while(devAddress.size() > 2) + { + devAddress.pop(); + parentDev = parent->findDeviceInList(devAddress.parent()); + + if (parentDev && parentDev->isPlugged()) + break; + } + + // If no parent found simply return as we don't have a valid address to send message + if (!parentDev) + return; + } + + NakData nakData; + for (int retries = 0 ; retries < 7; retries++) + { + AllocatePayloadMessage allocate; + unsigned sink = 0; // hardcode the audio sink to 0th in the device. + allocate.set(devAddress.parent(), devAddress.tail(), + dev->isAudioSink() ? 1 : 0, streamIndex, allocationState ? timeslot.PBN : 0, + &sink, true); + + // Trigger a refetch of epr + ((DeviceImpl *)dev)->bandwidth.enum_path.dataValid = false; + DeviceImpl * tail = (DeviceImpl *) dev; + while (tail && tail->getParent()) + { + tail->bandwidth.enum_path.dataValid = false; + tail = (DeviceImpl *)tail->getParent(); + } + + if (parent->messageManager->send(&allocate, nakData)) + { + if (allocationState) + { + DP_LOG(("DP-TM> Attached stream:%d to %s", streamIndex, dev->getTopologyAddress().toString(sb))); + } + else + { + DP_LOG(("DP-TM> Detached stream:%d from %s", streamIndex, dev->getTopologyAddress().toString(sb))); + } + + ((DeviceImpl *)dev)->payloadAllocated = allocationState; + + return; + } + } + + // we should not have ideally reached here unless allocate payload failed. + if (allocationState) + { + DP_LOG(("DP-TM> Allocate_payload: Failed to ATTACH stream:%d to %s", streamIndex, dev->getTopologyAddress().toString(sb))); + DP_ASSERT(0); + } + else + { + DP_LOG(("DP-TM> Allocate_payload: Failed to DETACH stream:%d from %s", streamIndex, dev->getTopologyAddress().toString(sb))); + DP_ASSERT(0); + } + +} + +void GroupImpl::insert(Device * dev) +{ + DP_ASSERT(!headInFirmware && "Cannot add or remove from a firmware group. You must perform a modeset away from the device"); + DeviceImpl * di = (DeviceImpl *)dev; + + if (isHeadAttached()) + { + if (di->activeGroup && di->activeGroup != this) + { + DP_ASSERT(0 && "Device already in active group, cannot add to another active group!"); + return; + } + di->activeGroup = this; + } + + members.insertFront(di); + + update(dev, true); + +} + +void GroupImpl::remove(Device * dev) +{ + DP_ASSERT(!headInFirmware && "Cannot add or remove from a firmware group. You must perform a modeset away from the device"); + + DeviceImpl * di = (DeviceImpl *)dev; + + if (isHeadAttached()) + { + di->activeGroup = 0; + } + members.remove(di); + + update(dev, false); + + updateVbiosScratchRegister(dev); +} + +void GroupImpl::destroy() +{ + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + remove(i); + + // Cancel any queue the auth callback. + cancelHdcpCallbacks(); + + delete this; +} + +void GroupImpl::cancelHdcpCallbacks() +{ + authRetries = 0; + + parent->timer->cancelCallback(this, &tagHDCPReauthentication); + parent->timer->cancelCallback(this, &tagStreamValidation); + +} + +Device * GroupImpl::enumDevices(Device * previousDevice) +{ + return members.next(previousDevice); +} + +void GroupImpl::expired(const void * tag) +{ + if (tag == &tagHDCPReauthentication) + { + HDCPState hdcpState = {0}; + parent->main->configureHDCPGetHDCPState(hdcpState); + + if (authRetries < HDCP_AUTHENTICATION_RETRIES) + { + this->hdcpEnabled = hdcpState.HDCP_State_Encryption; + if (hdcpState.HDCP_State_Authenticated) + { + parent->isHDCPAuthOn = true; + authRetries = 0; + } + else + { + unsigned authDelay = (hdcpState.HDCP_State_22_Capable ? + HDCP22_AUTHENTICATION_COOLDOWN : HDCP_AUTHENTICATION_COOLDOWN); + + authRetries++; + parent->main->configureHDCPRenegotiate(); + parent->isHDCPAuthOn = false; + parent->timer->queueCallback(this, &tagHDCPReauthentication, + authDelay); + } + } + else + { + parent->isHDCPAuthOn = this->hdcpEnabled = false; + } + } + else if ( tag == &tagStreamValidation) + { + if (!(this->streamValidationDone)) + { + // If we are here we need to debug what has caused the problem for not getting notification from DD. + DP_ASSERT(0 && "DP> Didn't get final notification." ); + } + } +} + +bool GroupImpl::hdcpGetEncrypted() +{ + // + // Returns whether encryption is currently enabled + // + if (parent->isHDCPAuthOn) + { + return this->hdcpEnabled; + } + else + { + return false; + } +} + +void GroupImpl::updateVbiosScratchRegister(Device * lastDev) +{ + if (!parent->bDisableVbiosScratchRegisterUpdate && + parent->lastDeviceSetForVbios == lastDev) + { + // Take a device which is part of a group + for (ListElement * e = parent->deviceList.begin(); + e != parent->deviceList.end(); e = e->next) + { + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->activeGroup && dev->activeGroup->isHeadAttached()) + { + NvU32 address = 0; + NvU32 addrSize = dev->getTopologyAddress().size(); + + // Set the MS_SCRATCH_REGISTER for lighted up display + for (NvU32 i = addrSize; i; --i) + { + address |= ((dev->address[i-1] & 0xF) << ((addrSize - i)*4)); + } + + parent->main->configureMsScratchRegisters(address, addrSize, 3); + + parent->lastDeviceSetForVbios = (Device *)dev; + + return; + } + } + } +} + +// +// Helper function for attaching and detaching heads. +// +// For attach, we will assert if group already has head attached but for +// some device in the group, active group did not point to current group. +// For detach, we will assert if the group does not have head attached but +// some device in group has an active group OR head is marked attached but +// not all devies in the group have the current group as active group. +// This also sets or clears dev->activeGroup for each contained +// device. +// +void GroupImpl::setHeadAttached(bool attached) +{ + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl *di = (DeviceImpl *)i; + + if (attached) + { + if (headAttached) + { + DP_ASSERT(di->activeGroup == this); + } + di->activeGroup = this; + } + else + { + if (!headAttached) + { + DP_ASSERT(di->activeGroup == NULL); + } + else + { + DP_ASSERT(di->activeGroup == this); + } + di->activeGroup = NULL; + } + } + headAttached = attached; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_guid.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_guid.cpp new file mode 100644 index 0000000..271aada --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_guid.cpp @@ -0,0 +1,81 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_guid.cpp * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_guid.h" +#include "dp_buffer.h" + +using namespace DisplayPort; + +// +// Linear congruential random number generator +// Seed values chosen from numerical methods +// +NvU32 GUIDBuilder::random() +{ + previousRandom = static_cast(( ((NvU64)1664525 * previousRandom + 1013904223) & 0xFFFFFFFF) & 0xFF); + return previousRandom; +} + + +GUIDBuilder::GUIDBuilder(Timer * source, NvU32 salt) + : salt(salt), source(source) +{ + previousRandom = static_cast(( source->getTimeUs() & 0xFFFFFFFF) & 0xFF); +} + +void GUIDBuilder::makeGuid(GUID & guid) +{ + NvU64 currentTimer = source->getTimeUs(); + guid.data[0] = static_cast(( salt >> 24) & 0xFF); + guid.data[1] = static_cast(( salt >> 16) & 0xFF); + guid.data[2] = static_cast(( salt >> 8) & 0xFF); + guid.data[3] = static_cast(( salt) & 0xFF); + + guid.data[4] = static_cast(( currentTimer >> 56) & 0xFF); + guid.data[5] = static_cast(( currentTimer >> 48) & 0xFF); + guid.data[6] = static_cast(( currentTimer >> 40) & 0xFF); + guid.data[7] = static_cast(( currentTimer >> 32) & 0xFF); + guid.data[8] = static_cast(( currentTimer >> 24) & 0xFF); + guid.data[9] = static_cast(( currentTimer >> 16) & 0xFF); + guid.data[10] = static_cast(( currentTimer >> 8) & 0xFF); + guid.data[11] = static_cast(( currentTimer) & 0xFF); + + unsigned rnd = random(); + guid.data[12] = static_cast(( rnd >> 24) & 0xFF); + guid.data[13] = static_cast(( rnd >> 16) & 0xFF); + guid.data[14] = static_cast(( rnd >> 8) & 0xFF); + guid.data[15] = static_cast(( rnd) & 0xFF); + + // + // Spin until we get a new timer counter + // This guarantees a monotonitically increased counter + // + while (source->getTimeUs() == currentTimer) + ; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_list.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_list.cpp new file mode 100644 index 0000000..d8b3b86 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_list.cpp @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* List **************************************\ +* * +* Module: dp_list.cpp * +* Simple doubly linked list * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_list.h" + +using namespace DisplayPort; +ListElement::ListElement() + : next(0), + prev(0) +{ +} + +ListElement::~ListElement() +{ + if (this->next) + { + this->prev->next = this->next; + this->next->prev = this->prev; + this->next = 0; + } +} + +List::List() +{ + this->next = this; + this->prev = this; +} + +void List::clear() +{ + while(!isEmpty()) + delete begin(); +} + +List::~List() +{ + clear(); + this->next = this; + this->prev = this; +} + +bool List::isEmpty() +{ + return this->next == this; +} + +void List::insertFront(ListElement * item) +{ + DP_ASSERT(item->next == 0 && "Attempt to insert when it's already in a list"); + item->prev = this; + item->next = this->next; + item->prev->next = item; + item->next->prev = item; +} + +void List::insertBack(ListElement * item) +{ + DP_ASSERT(item->next == 0 && "Attempt to insert when it's already in a list"); + item->prev = this->prev; + item->next = this; + item->prev->next = item; + item->next->prev = item; +} + +void List::insertBefore(ListElement * insertBeforeThis, ListElement * item) +{ + DP_ASSERT(item->next == 0 && "Attempt to insert when it's already in a list"); + item->next = insertBeforeThis; + item->prev = insertBeforeThis->prev; + insertBeforeThis->prev->next = item; + insertBeforeThis->prev = item; +} + +ListElement* List::front() +{ + DP_ASSERT(!isEmpty()); + return this->next; +} + +ListElement* List::last() +{ + DP_ASSERT(!isEmpty()); + return this->prev; +} + +ListElement * List::remove(ListElement * item) +{ + // Skip if its not already in a list + if (!item->next) + return item; + + item->prev->next = item->next; + item->next->prev = item->prev; + item->next = 0; + item->prev = 0; + + return item; +} + +bool List::contains(ListElement * item) +{ + for (ListElement * i = begin(); i!=end(); i = i->next) + { + if (i == item) + return true; + } + return false; +} + +ListElement * List::replace(ListElement * replacement, ListElement * replacee) +{ + if (!(replacement && replacee)) + { + DP_ASSERT(0 && "replacement or replaces is NULL pointer"); + return 0; + } + + DP_ASSERT(replacement->next && replacement->prev); + + // we are assuming replacee does exist in the list. + replacement->next = replacee->next; + replacement->prev = replacee->prev; + + if (replacement->next) + replacement->next->prev = replacement; + + if (replacement->prev) + replacement->prev->next = replacement; + + return replacee; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_merger.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_merger.cpp new file mode 100644 index 0000000..362fc03 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_merger.cpp @@ -0,0 +1,310 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_merger.cpp * +* Asynchronous Message merger * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_merger.h" +#include "dp_auxdefs.h" +#include "dp_crc.h" +#include "dp_messageheader.h" + +using namespace DisplayPort; + + +EncodedMessage * MessageTransactionMerger::pushTransaction(MessageHeader * header, Buffer * data) +{ + if (freeOnNextCall) + { + delete freeOnNextCall; + freeOnNextCall = 0; + } + + IncompleteMessage * imsg = getTransactionRecord(header->address, header->messageNumber); + + if (!imsg) + { + DP_LOG(("DP-MM> Ignore message due to OOM")); + return 0; + } + + if (header->isTransactionStart) + { + imsg->message.isPathMessage = header->isPathMessage; + imsg->message.isBroadcast = header->isBroadcast; + } + else + { + if (imsg->message.buffer.length == 0) + { + DP_LOG(("DP-MM> Expected transaction-start, ignoring message transaction")); + return 0; + } + + if (imsg->message.isPathMessage != header->isPathMessage || + imsg->message.isBroadcast != header->isBroadcast) + { + DP_ASSERT(0 && "Message type changed during transmission"); + } + } + + // + // Check for redundant start + // + if (header->isTransactionStart && imsg->message.buffer.length) + { + DP_LOG(("DP-MM> Unexpected repeated transaction-start, resetting message state.")); + + // We must have seen a previous incomplete transaction from this device + // they've begun a new packet. Forget about the old thing + imsg->message.buffer.reset(); + } + + // + // Kill the buffer if we've got less payload than we should + // + if (header->payloadBytes > data->length) + { + freeOnNextCall = imsg; + imsg->message.buffer.reset(); + DP_LOG(("DP-MM> Received truncated or corrupted message transaction")); + return 0; + } + + // + // Verify transaction CRC + // + BitStreamReader bsr(data, header->headerSizeBits, (header->payloadBytes-1)*8); + NvU8 dataCrc = (NvU8)dpCalculateBodyCRC(&bsr); + + DP_ASSERT(header->headerSizeBits % 8 == 0 && "Header must be byte aligned"); + + if (dataCrc != data->data[header->headerSizeBits/8 + header->payloadBytes - 1] || + header->payloadBytes == 0) + { + DP_LOG(("DP-MM> Received corruption message transactions")); + freeOnNextCall = imsg; + imsg->message.buffer.reset(); + return 0; + } + + // Discount the processed CRC from the payload count + header->payloadBytes--; + + // + // Append active buffer + // + unsigned i = imsg->message.buffer.length; + imsg->message.buffer.resize(i + header->payloadBytes); + dpMemCopy(&imsg->message.buffer.data[i], &data->data[header->headerSizeBits/8], header->payloadBytes); + + // + // Check for end of message transaction + // + if (header->isTransactionEnd) + { + freeOnNextCall = imsg; + + return &imsg->message; + } + + return 0; +} + +MessageTransactionMerger::IncompleteMessage * MessageTransactionMerger::getTransactionRecord(const Address & address, unsigned messageNumber) +{ + IncompleteMessage * msg; + NvU64 currentTime = this->timer->getTimeUs(); + + // + // Search for existing record + // + for (ListElement * i = incompleteMessages.begin();i != incompleteMessages.end();) + { + msg = (IncompleteMessage *)i; + i = i->next; + if (msg->message.address == address && msg->message.messageNumber == messageNumber) + { + goto found; + } + + // + // Found a stale message in the list + // + if (msg->lastUpdated + incompleteMessageTimeoutMs < currentTime) + delete msg; + } + + // + // None exists? Add a new one + // + msg = new IncompleteMessage(); + msg->message.address = address; + msg->message.messageNumber = messageNumber; + this->incompleteMessages.insertFront(msg); + +found: + // + // Update the timestamp + // + msg->lastUpdated = currentTime; + + return msg; +} + +void IncomingTransactionManager::mailboxInterrupt() +{ + MessageHeader msg; + unsigned totalSize; + AuxRetry::status result; + unsigned txSize = (unsigned)getTransactionSize(); + + // + // Size the static aux window + // + this->localWindow.resize(DP_MAX((unsigned)getTransactionSize(), (unsigned)getMessageBoxSize())); + if (this->localWindow.isError()) + return; + + // + // Read one aux-transaction worth of data + // + result = readMessageBox(0, &this->localWindow.data[0], txSize); + + DP_ASSERT( result != AuxRetry::defer && "Unexpected?!" ); + + if (result != AuxRetry::ack) + return; + + BitStreamReader reader(&this->localWindow, 0, 8*txSize); + + + // + // Before decoding the header, start with the downstream + // ports address prefix + // + if (!decodeHeader(&reader, &msg, addressPrefix)) + { + // + // It's possible we should be NACKing here. Ignoring for now + // to allow the message originator to time out (can take seconds). + // + DP_ASSERT(0 && "Not yet implemented"); + + return; + } + + // + // Let's get the entire sideband message in the localWindow + // + + totalSize = (msg.headerSizeBits / 8) + msg.payloadBytes; + + if (totalSize > txSize) + { + if (totalSize > DPCD_MESSAGEBOX_SIZE) + { + // + // Corrupt packet - total packet can't be larger than the window + // + return; + } + if (AuxRetry::ack!=readMessageBox(txSize, &this->localWindow.data[txSize], totalSize - txSize)) + { + // + // Failed to read second half of message + // + return; + } + } + + clearMessageBoxInterrupt(); + + EncodedMessage * em = incompleteMessages.pushTransaction(&msg, &this->localWindow); + + if (em) + { + this->sink->messagedReceived(this, em); + } +} + +IncomingTransactionManager::~IncomingTransactionManager() +{ +} + + +IncomingTransactionManager::IncomingTransactionManager(Timer * timer, const Address & addressPrefix, IncomingTransactionManagerEventSink * sink) + : incompleteMessages(timer, DP_INCOMPLETE_MESSAGE_TIMEOUT_USEC), addressPrefix(addressPrefix) +{ + this->sink = sink; + this->timer = timer; +} + + + +AuxRetry::status DownReplyManager::readMessageBox(NvU32 offset, NvU8 * data, size_t length) +{ + return hal->readDownReplyMessageBox(offset, data, length); +} + +size_t DownReplyManager::getMessageBoxSize() +{ + return hal->getDownReplyMessageBoxSize(); +} + +size_t DownReplyManager::getTransactionSize() +{ + return hal->getTransactionSize(); +} + +void DownReplyManager::clearMessageBoxInterrupt() +{ + hal->clearInterruptDownReplyReady(); +} + +AuxRetry::status UpRequestManager::readMessageBox(NvU32 offset, NvU8 * data, size_t length) +{ + return hal->readUpRequestMessageBox(offset, data, length); +} + +size_t UpRequestManager::getMessageBoxSize() +{ + return hal->getUpRequestMessageBoxSize(); +} + +size_t UpRequestManager::getTransactionSize() +{ + return hal->getTransactionSize(); +} + +void UpRequestManager::clearMessageBoxInterrupt() +{ + hal->clearInterruptUpRequestReady(); +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messagecodings.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messagecodings.cpp new file mode 100644 index 0000000..ac5ad7a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messagecodings.cpp @@ -0,0 +1,690 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_messagecodings.cpp * +* Encoding routines for various messages * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_messagecodings.h" +#include "dp_auxdefs.h" + +using namespace DisplayPort; + +// +// LINK_ADDRESS 0x1 +// +void LinkAddressMessage::set(const Address & target) +{ + clear(); + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // + // Write request identifier + // + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + encodedMessage.isPathMessage = false; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; +} + +ParseResponseStatus LinkAddressMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + DisplayPort::extractGUID(reader, &reply.guid); + reader->readOrDefault(4 /*zeroes*/, 0); + reply.numberOfPorts = reader->readOrDefault(4 /*Number_Of_Ports*/, 0xF); + + for (unsigned i = 0; i < reply.numberOfPorts; i++) + { + reply.res[i].isInputPort = !!reader->readOrDefault(1 /*Input_Port*/, 1); + reply.res[i].peerDeviceType = (PeerDevice) reader->readOrDefault(3 /*Peer_Device_Type*/, 0x0); + reply.res[i].portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reply.res[i].hasMessaging = !!reader->readOrDefault(1 /*Messaging_Capability_Status*/, 0x1); + reply.res[i].dpPlugged = !!reader->readOrDefault(1 /*DisplayPort_Device_Plug_Status*/, 0x1); + + if (reply.res[i].isInputPort == false) + { + reply.res[i].legacyPlugged = !!reader->readOrDefault(1 /*Legacy_Device_Plug_Status*/, 0x1); + + reader->readOrDefault(5 /*zeroes*/, 0x0); + + unsigned ver = reader->readOrDefault(8/*DPCD_Revision*/, 0); + reply.res[i].dpcdRevisionMajor = ver >> 4; + reply.res[i].dpcdRevisionMinor = ver & 0xF; + DisplayPort::extractGUID(reader, &reply.res[i].peerGUID); + reply.res[i].SDPStreams = reader->readOrDefault(4 /*Number_SDP_Streams*/, 0xF); + reply.res[i].SDPStreamSinks = reader->readOrDefault(4 /*Number_SDP_Stream_Sinks*/, 0xF); + } + else + { + reader->readOrDefault(6 /*zeroes*/, 0x0); + } + } + + return ParseResponseSuccess; +} + +// +// CONNECTION_STATUS_NOTIFY 0x2 +// +ConnStatusNotifyMessage::ConnStatusNotifyMessage(MessageReceiverEventSink * sink) +: MessageReceiver(sink, NV_DP_SBMSG_REQUEST_ID_CONNECTION_STATUS_NOTIFY /*request id*/) +{ +} + +bool ConnStatusNotifyMessage::processByType(EncodedMessage * message, BitStreamReader * reader) +{ + // read the request body + request.port = reader->readOrDefault(4/*Port_Number*/, 0xF); + reader->readOrDefault(4/*zeroes*/, 0); + bool status = DisplayPort::extractGUID(reader/*GUID of the originating branch device*/, &request.guid); + reader->readOrDefault(1/*zero*/, 0); + request.legacyPlugged = !!reader->readOrDefault(1/*Legacy_Device_Plug_Status*/, 0); + request.devicePlugged = !!reader->readOrDefault(1/*DisplayPort_Device_Plug_Status*/, 0); + request.messagingCapability = !!reader->readOrDefault(1/*Messaging_Capability_Status*/, 0); + request.isInputPort = !!reader->readOrDefault(1/*Input_Port*/, 0); + request.peerDeviceType = (PeerDevice) reader->readOrDefault(3/*Peer_Device_Type*/, 0); + + // action will be implemented by evensink + this->sink->messageProcessed(this); + return status; +} + +// +// GENERIC_UP_REPLY 0xnn +// +void GenericUpReplyMessage::set(const Address & target, + bool bReplyIsNack, + bool bBroadcast, + bool bPath) +{ + clear(); + BitStreamWriter writer(&encodedMessage.buffer, 0); + + writer.write(bReplyIsNack?1:0, 1); + writer.write(requestIdentifier, 7); + + encodedMessage.isPathMessage = bPath; + encodedMessage.isBroadcast = bBroadcast; + encodedMessage.address = target; +} + +GenericUpReplyMessage::GenericUpReplyMessage(unsigned requestId, bool bReplyIsNack, bool bBroadcast, bool bPath) +: Message(requestId, NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // + // Write request identifier + // + writer.write(bReplyIsNack?1:0, 1); + writer.write(requestId, 7); + + encodedMessage.isPathMessage = bPath; + encodedMessage.isBroadcast = bBroadcast; +} + +GenericUpReplyMessage::GenericUpReplyMessage(const Address & target, unsigned requestId, bool bReplyIsNack, bool bBroadcast, bool bPath) +: Message(requestId, NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // + // Write request identifier + // + writer.write(bReplyIsNack?1:0, 1); + writer.write(requestId, 7); + + encodedMessage.isPathMessage = bPath; + encodedMessage.isBroadcast = bBroadcast; + encodedMessage.address = target; +} + +ParseResponseStatus GenericUpReplyMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + // + // we are not expecting any replies here + // Since the corresponding post for this kind of message is of reply type; + // message manager won't queue an awaiting down reply for the same. + // + DP_ASSERT(0 && "We shouldn't be here!!"); + return ParseResponseSuccess; +} + +// +// CLEAR_PAYLOAD_ID_TABLE 0x14 +// +ClearPayloadIdTableMessage::ClearPayloadIdTableMessage() +: Message(NV_DP_SBMSG_REQUEST_ID_CLEAR_PAYLOAD_ID_TABLE /* request id */, NV_DP_SBMSG_PRIORITY_LEVEL_1) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0/*zero*/, 1); + writer.write(requestIdentifier, 7); + + encodedMessage.isPathMessage = true; + encodedMessage.isBroadcast = true; + encodedMessage.address = Address(); +} + +ParseResponseStatus ClearPayloadIdTableMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + return ParseResponseSuccess; +} + +ParseResponseStatus ClearPayloadIdTableMessage::parseResponse(EncodedMessage * message) +{ + sink->messageCompleted(this); + return ParseResponseSuccess; +} + +// +// ENUM_PATH_RESOURCES 0x10 +// +EnumPathResMessage::EnumPathResMessage(const Address & target, unsigned port, bool point) +: Message(NV_DP_SBMSG_REQUEST_ID_ENUM_PATH_RESOURCES /* request identifier */, + NV_DP_SBMSG_PRIORITY_LEVEL_4) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0/*zereo*/, 1); + writer.write(requestIdentifier, 7); + writer.write(port, 4); + writer.write(0/*zeroes*/, 4); + + encodedMessage.isPathMessage = !point; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; + dpMemZero(&reply, sizeof(reply)); +} + +ParseResponseStatus EnumPathResMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(3 /*zeroes*/, 0); + reply.bFECCapability = (reader->readOrDefault(1 /*FEC*/, 0x0) == 1) ? true : false; + reply.TotalPBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF); + reply.FreePBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// ALLOCATE_PAYLOAD 0x11 +// +void AllocatePayloadMessage::set +( + const Address & target, + unsigned port, + unsigned nSDPStreams, + unsigned vcPayloadId, + unsigned PBN, + unsigned* SDPStreamSink, + bool entirePath +) +{ + clear(); + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0/*zero*/, 1); + writer.write(requestIdentifier, 7); + + DP_ASSERT(SDPStreamSink || (!nSDPStreams)); + + // Write message request body + writer.write(port, 4); + writer.write(nSDPStreams, 4); + writer.write(0/*zero*/, 1); + writer.write(vcPayloadId, 7); + writer.write(PBN, 16); + for (unsigned i=0; ireadOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(5 /*zeroes*/, 0); + reply.virtualChannelPayloadId = reader->readOrDefault(7 /*Virtual_Channel_Payload_Identifier*/, 0x0); + reply.PBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} +// +// QUERY_PAYLOAD 0x12 +// +QueryPayloadMessage::QueryPayloadMessage +( + const Address & target, + unsigned port, + unsigned vcPayloadId +) + : Message(NV_DP_SBMSG_REQUEST_ID_QUERY_PAYLOAD /* request identifier*/, + NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // Write message request + writer.write(port, 4); + writer.write(0 /*zeroes*/, 5); + writer.write(vcPayloadId, 7); + + encodedMessage.isPathMessage = false; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; + dpMemZero(&reply, sizeof(reply)); +} + +ParseResponseStatus QueryPayloadMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(4 /*zeroes*/, 0); + reply.allocatedPBN = reader->readOrDefault(16 /*Allocated_PBN*/, 0xFFFF); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + + +// +// RESOURCE_STATUS_NOTIFY 0x13 +// + +ResStatusNotifyMessage::ResStatusNotifyMessage(MessageReceiverEventSink * sink) +: MessageReceiver(sink, NV_DP_SBMSG_REQUEST_ID_RESOURCE_STATUS_NOTIFY /*request id*/) +{ + dpMemZero(&request, sizeof(request)); +} + +bool ResStatusNotifyMessage::processByType(EncodedMessage * message, BitStreamReader * reader) +{ + bool status; + + // read the request body + request.port = reader->readOrDefault(4/*Port_Number*/, 0xF); + reader->readOrDefault(4/*zeroes*/, 0); + status = DisplayPort::extractGUID(reader, &request.guid); + request.PBN = reader->readOrDefault(16/*Available_PBN*/, 0); + + // action will be implemented by evensink + this->sink->messageProcessed(this); + return status; +} + +// +// REMOTE_DPCD_READ 0x20 +// +void RemoteDpcdReadMessage::set +( + const Address & target, + unsigned port, + unsigned dpcdAddress, + unsigned nBytesToRead +) +{ + clear(); + + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0/*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request data + writer.write(port, 4); + writer.write(dpcdAddress, 20); + writer.write(nBytesToRead, 8); + + encodedMessage.isPathMessage = false; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; +} + +ParseResponseStatus RemoteDpcdReadMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reader->readOrDefault(4 /*zeroes*/, 0); + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reply.numBytesReadDPCD = reader->readOrDefault(8 /*Num_Of_Bytes_Read*/, 0x0); + for (unsigned i=0; ireadOrDefault(8 /*data*/, 0x0); + } + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// REMOTE_DPCD_WRITE 0x21 +// +void RemoteDpcdWriteMessage::set +( + const Address & target, + unsigned port, + unsigned dpcdAddress, + unsigned nBytesToWrite, + const NvU8 * writeData +) +{ + clear(); + BitStreamWriter writer(&encodedMessage.buffer, 0); + + DP_ASSERT(writeData || (!nBytesToWrite)); + + // Write request identifier + writer.write(0/*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request data + writer.write(port, 4); + writer.write(dpcdAddress, 20); + writer.write(nBytesToWrite, 8); + + for (unsigned i=0; ireadOrDefault(4 /*zeroes*/, 0); + unsigned portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + + DP_ASSERT(portNumber == this->sinkPort); + DP_USED(portNumber); + + if (this->getSinkPort() != portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// REMOTE_I2C_READ 0x22 +// +void RemoteI2cReadMessage::set +( + const Address & target, + unsigned nWriteTransactions, + unsigned port, + I2cWriteTransaction* transactions, + unsigned readI2cDeviceId, + unsigned nBytesToRead +) +{ + clear(); + + BitStreamWriter writer(&encodedMessage.buffer, 0); + + DP_ASSERT(transactions || (!nWriteTransactions)); + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request specific data + writer.write(port, 4); + writer.write(0/*zeroes*/, 2); + writer.write(nWriteTransactions, 2); + + for (unsigned i=0; ireadOrDefault(4 /*zeroes*/, 0); + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reply.numBytesReadI2C = reader->readOrDefault(8 /*Num_Of_Bytes_Read*/, 0x0); + for (unsigned i=0; ireadOrDefault(8 /*data*/, 0x0); + } + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// REMOTE_I2C_WRITE 0x23 +// +void RemoteI2cWriteMessage::set +( + const Address & target, + unsigned port, + unsigned writeI2cDeviceId, + unsigned nBytesToWrite, + unsigned char* writeData +) +{ + clear(); + + BitStreamWriter writer(&encodedMessage.buffer, 0); + + DP_ASSERT(writeData || (!nBytesToWrite)); + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request data + writer.write(port, 4); + writer.write(0/*zero*/, 5); + writer.write(writeI2cDeviceId, 7); + writer.write(nBytesToWrite, 8); + + for (unsigned i=0; ireadOrDefault(4 /*zeroes*/, 0); + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} +// +// POWER_UP_PHY 0x24 +// +void PowerUpPhyMessage::set +( + const Address & target, + unsigned port, + bool entirePath +) +{ + clear(); + + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request specific data + writer.write(port, 4); + writer.write(0 /*zero*/, 4); + + encodedMessage.isPathMessage = entirePath; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; +} + +// +// POWER_DOWN_PHY 0x25 +// +ParseResponseStatus PowerUpPhyMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(4 /*zeroes*/, 0); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +void PowerDownPhyMessage::set +( + const Address & target, + unsigned port, + bool entirePath +) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request specific data + writer.write(port, 4); + writer.write(0/*zeros*/, 4); + + encodedMessage.isPathMessage = entirePath; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; +} + +ParseResponseStatus PowerDownPhyMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(4 /*zeroes*/, 0); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// SINK_EVENT_NOTIFY 0x30 +// + +SinkEventNotifyMessage::SinkEventNotifyMessage(MessageReceiverEventSink * sink, unsigned requestId) +: MessageReceiver(sink, 0x30 /*request id*/) +{ +} + +bool SinkEventNotifyMessage::processByType(EncodedMessage * message, BitStreamReader * reader) +{ + return true; +} + + +I2cWriteTransaction::I2cWriteTransaction +( + unsigned WriteI2cDeviceId, + unsigned NumBytes, + unsigned char * buffer, + bool NoStopBit, + unsigned I2cTransactionDelay +) +{ + this->WriteI2cDeviceId = WriteI2cDeviceId; + this->NumBytes = NumBytes; + this->NoStopBit = NoStopBit; + this->I2cTransactionDelay = I2cTransactionDelay; + this->I2cData = buffer; +} + +I2cWriteTransaction::I2cWriteTransaction(): +WriteI2cDeviceId(0), NumBytes(0), I2cData(0), NoStopBit(0), I2cTransactionDelay(0) +{ +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messageheader.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messageheader.cpp new file mode 100644 index 0000000..c453724 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messageheader.cpp @@ -0,0 +1,85 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_messageheader.cpp * +* DP message header parser * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_crc.h" +#include "dp_messageheader.h" + + +bool DisplayPort::decodeHeader(BitStreamReader * reader, MessageHeader * header, const Address & address) +{ + unsigned startOffset = reader->offset(); + int LCT, i; + + // + // Read the RAD + // + LCT = reader->readOrDefault( 4, 0); + reader->readOrDefault( 4, 0); + + header->address = address; + + for (i = 0; i < LCT - 1; i++) + { + header->address.append(reader->readOrDefault( 4, 0)); + } + + reader->align( 8); + + // + // Read flags + // + header->isBroadcast = !!reader->readOrDefault( 1, 0); + header->isPathMessage = !!reader->readOrDefault( 1, 0); + header->payloadBytes = reader->readOrDefault( 6, 0) ; + + header->isTransactionStart = !!reader->readOrDefault( 1, 0); + header->isTransactionEnd = !!reader->readOrDefault( 1, 0); + reader->readOrDefault( 1, 0); + header->messageNumber = reader->readOrDefault( 1, 0); + + + // Build a bit reader for the slice of header we just processed + BitStreamReader crcReader(reader->buffer(), startOffset, reader->offset()); + + if (reader->readOrDefault( 4, (NvU32)~0) != dpCalculateHeaderCRC(&crcReader)) + { + // Corrupt packet received + char buffer[48*3+1]; + dpHexDump(&buffer[0], sizeof(buffer), (NvU8*)reader->buffer() + startOffset, reader->offset() - startOffset); + DP_LOG(("DP-MM> Corrupt message transaction. Expected CRC %d. Message = {%s}", dpCalculateHeaderCRC(&crcReader), buffer)); + + return false; + } + + header->headerSizeBits = reader->offset() - startOffset; + return true; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messages.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messages.cpp new file mode 100644 index 0000000..dded60f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messages.cpp @@ -0,0 +1,606 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_messages.cpp * +* Encoding for aux common messages. * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_splitter.h" +#include "dp_messages.h" +#include "dp_merger.h" +#include "dp_list.h" +#include "dp_tracing.h" + +using namespace DisplayPort; +namespace DisplayPort +{ + GenericMessageCompletion::GenericMessageCompletion() : + failed(false), completed(false) {} + void GenericMessageCompletion::messageFailed(MessageManager::Message * from, NakData * data) + { + nakData = *data; + failed = true; + completed = true; + } + void GenericMessageCompletion::messageCompleted(MessageManager::Message * from) + { + failed = false; + completed = true; + } +}; + +// +// Transmit a message and wait for the response in place. +// +bool MessageManager::send(MessageManager::Message * message, NakData & nakData) +{ + GenericMessageCompletion completion; + Address::StringBuffer sb; + DP_USED(sb); + + NvU64 startTime, elapsedTime; + message->bBusyWaiting = true; + post(message, &completion); + startTime = timer->getTimeUs(); + do + { + hal->notifyIRQ(); + if (hal->interruptDownReplyReady()) + IRQDownReply(); + + if (completion.completed) + { + nakData = completion.nakData; + break; + } + elapsedTime = timer->getTimeUs() - startTime; + + if (elapsedTime > (DPCD_MESSAGE_REPLY_TIMEOUT * 1000)) + { + message->expired(NULL); + nakData.reason = NakTimeout; + break; + } + + // Sleep while processing timer callbacks + timer->sleep(1); + } while(true); + + return !completion.failed; +} + +bool DisplayPort::extractGUID(BitStreamReader * reader, GUID * guid) +{ + for (unsigned i=0; i < 128; i += 8) + { + unsigned data; + if (!reader->read(&data, 8)) + { + return false; + } + + guid->data[i/8] = (NvU8)data; + } + + return true; +} + +void MessageManager::messagedReceived(IncomingTransactionManager * from, EncodedMessage * message) +{ + if (from == &mergerUpRequest) + { + onUpRequestReceived(true, message); + } + else + { + onDownReplyReceived(true, message); + } +} + +void MessageManager::Message::splitterFailed(OutgoingTransactionManager * from) +{ + // + // Message failed + // + NakData nakData; + nakData.reason = NakTimeout; + MessageManager * parent = this->parent; + + if (sink) + sink->messageFailed(this, &nakData); + + if (from == &parent->splitterDownRequest) + { + // + // Tell the message manager he may begin sending the next message + // + parent->transmitAwaitingDownRequests(); + } + else + { + parent->transmitAwaitingUpReplies(); + } +} + +void MessageManager::Message::splitterTransmitted(OutgoingTransactionManager * from) +{ + bTransmitted = true; + MessageManager * parent = this->parent; + + if (from == &parent->splitterDownRequest) + { + // Client will busy-waiting for the message to complete, we don't need the countdown timer. + if (!bBusyWaiting) + { + // Start the countdown timer for the reply + parent->timer->queueCallback(this, "SPLI", DPCD_MESSAGE_REPLY_TIMEOUT); + } + // Tell the message manager he may begin sending the next message + parent->transmitAwaitingDownRequests(); + } + else // UpReply + { + if (sink) + sink->messageCompleted(this); // This is the end for an up reply + + parent->transmitAwaitingUpReplies(); + } + +} + +// Since transmit DPCD_MESSAGE_REPLY_TIMEOUT time has elapsed. +// - Let's assume the message was not replied to +void MessageManager::Message::expired(const void * tag) +{ + Address::StringBuffer sb; + DP_USED(sb); + + DP_LOG(("DP-MM> Message transmit time expired on message %p (ID = %02X, target = %s)", + (Message*)this, ((Message*)this)->requestIdentifier, (((Message*)this)->state.target).toString(sb))); + + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + (((Message*)this)->state.target).toNvU32Buffer(addrBuffer); + NV_DPTRACE_WARNING(MESSAGE_EXPIRED, ((Message*)this)->requestIdentifier, (((Message*)this)->state.target).size(), + addrBuffer[0], addrBuffer[1], addrBuffer[2], addrBuffer[3]); + + NakData nakData; + nakData.reason = NakTimeout; + + MessageManager * parent = this->parent; + + DP_ASSERT(parent); + if (parent && !parent->isBeingDestroyed) + { + parent->awaitingReplyDownRequest.remove(this); + parent->clearPendingMsg(); + parent->transmitAwaitingDownRequests(); + parent->transmitAwaitingUpReplies(); + } + + if (sink) + sink->messageFailed(this, &nakData); +} + +// +// Enqueue the next message to the splitterDownRequest +// +void MessageManager::transmitAwaitingDownRequests() +{ + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; // Do this first since we may unlink the current node + + if (awaitingReplyDownRequest.isEmpty()) + { + // + // Set the message number, and unlink from the outgoing queue + // + m->encodedMessage.messageNumber = 0; + m->state.messageNumber = 0; + + notYetSentDownRequest.remove(m); + awaitingReplyDownRequest.insertBack(m); + + // + // This call can cause transmitAwaitingDownRequests to be called again + // + bool sent = splitterDownRequest.send(m->encodedMessage, m); + DP_ASSERT(sent); + + return; + } + } +} + +// +// Enqueue the next message to the splitterUpReply +// +void MessageManager::transmitAwaitingUpReplies() +{ + for (ListElement * i = notYetSentUpReply.begin(); i!=notYetSentUpReply.end(); ) + { + Message * m = (Message *)i; + i = i->next; // Do this first since we may unlink the current node + + notYetSentUpReply.remove(m); + + // + // This call can cause transmitAwaitingUpReplies to be called again + // + bool sent = splitterUpReply.send(m->encodedMessage, m); + DP_ASSERT(sent); + } +} + +void MessageManager::postReply(Message * message, Message::MessageEventSink * sink) +{ + post(message, sink, true); +} + +void MessageManager::cancelAllByType(unsigned type) +{ + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; + + if (m->requestIdentifier == type) + notYetSentDownRequest.remove(m); + } + + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; + + if (m->requestIdentifier == type) + awaitingReplyDownRequest.remove(m); + } +} + +void MessageManager::cancelAll(Message * message) +{ + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; + + if (m == message && m->requestIdentifier == message->requestIdentifier) + notYetSentDownRequest.remove(m); + } + + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; + + if (m == message && m->requestIdentifier == message->requestIdentifier) + awaitingReplyDownRequest.remove(m); + } +} + +void MessageManager::post(Message * message, Message::MessageEventSink * sink, bool transmitReply) +{ + DP_ASSERT(!isBeingDestroyed && "You may not post messages in response to a shutdown"); + + if (isPaused) + return; + + // + // Initialize the fields + // + message->sink = sink; + message->bTransmitted = false; + + // + // Queue the message for the outgoing queue. + // Later on we'll walk to the queue and make sure + // we have at most two outstanding messages PER + // target address. This is how the message + // number is decided. + // + + message->parent = this; + message->transmitReply = transmitReply; + if (message->encodedMessage.isBroadcast) + { + // if its a broadcast message; the target would be the immediate branch. + Address addr; + addr.clear(); + addr.append(0); + message->state.target = addr; + } + else + message->state.target = message->encodedMessage.address; + + if ( transmitReply ) + { + notYetSentUpReply.insertBack(message); + transmitAwaitingUpReplies(); + } + else + { + // + // If the list is empty or the incoming message has the least priority possible (DEFAULT priority), + // then just add the incoming message to the back of the list. + // Otherwise, find the right location by traversing the list. + // + if(message->messagePriority == NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT || notYetSentDownRequest.isEmpty()) + { + notYetSentDownRequest.insertBack(message); + } + else + { + ListElement *tmp = notYetSentDownRequest.last(); + Message *msg = (Message*) notYetSentDownRequest.last(); + while((msg->prev != tmp) && (msg->messagePriority < message->messagePriority)) + { + msg = (Message*)msg->prev; + } + notYetSentDownRequest.insertBefore(msg->next, message); + } + transmitAwaitingDownRequests(); + } +} + +void MessageManager::onUpRequestReceived(bool status, EncodedMessage * message) +{ + if (!status) + { + return; + } + + // + // Broadcast the up-request message to all + // the receivers on messageReceivers + // + for (ListElement * i = messageReceivers.begin(); i!=messageReceivers.end(); i=i->next) + { + MessageReceiver * rcr = (MessageReceiver *)i; + if (rcr->process((EncodedMessage *)message)) + { + return; + } + } + + DP_ASSERT(0 && "Warning: Unknown upstream UP_REQ message"); +} + + +void MessageManager::onDownReplyReceived(bool status, EncodedMessage * message) +{ + if (!status) + { + return; + } + + // + // Broadcast the down-request message to all + // the receivers on awaitingReplyDownRequest + // + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); i=i->next) + { + Message * messageAwaitingReply = (Message *)i; + + if( messageAwaitingReply->state.target == message->address && + messageAwaitingReply->state.messageNumber == message->messageNumber) + { + awaitingReplyDownRequest.remove(messageAwaitingReply); + if (messageAwaitingReply->parseResponse(message) == ParseResponseWrong) + { + // + // parseResponse() returns ParseResposeWrong when 'Request_Identifier' of down request + // message and down reply message are mis-matched. So insert message in waiting queue + // and wait for correct down reply message. + // + awaitingReplyDownRequest.insertBack(messageAwaitingReply); + } + + goto nextMessage; + } + } + + DP_LOG(("DPMM> Warning: Unmatched reply message")); +nextMessage: + transmitAwaitingUpReplies(); + transmitAwaitingDownRequests(); +} + +MessageManager::~MessageManager() +{ + // This causes any posts they may attempt to do to fail + isBeingDestroyed = true; + + // + // The message manager should not be shut down until + // all outgoing messages are in the cancelled state + // + NakData nakUndef; + nakUndef.reason = NakUndefined; + + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + ListElement * next = i->next; + if (((Message *)i)->sink) + ((Message *)i)->sink->messageFailed(((Message *)i), &nakUndef); + i = next; + } + if (!notYetSentDownRequest.isEmpty()) + { + + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + ListElement * next = i->next; + DP_LOG(("Down request message type 0x%x client is not cleaning up.", ((Message *)i)->requestIdentifier)); + i = next; + } + } + + for (ListElement * i = notYetSentUpReply.begin(); i!=notYetSentUpReply.end();) + { + ListElement * next = i->next; + if (((Message *)i)->sink) + ((Message *)i)->sink->messageFailed(((Message *)i), &nakUndef); + i = next; + } + if (!notYetSentUpReply.isEmpty()) + { + + for (ListElement * i = notYetSentUpReply.begin(); i!=notYetSentUpReply.end(); ) + { + ListElement * next = i->next; + DP_LOG(("Up reply message type 0x%x client is not cleaning up.", ((Message *)i)->requestIdentifier)); + i = next; + } + } + + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + ListElement * next = i->next; + if (((Message *)i)->sink) + ((Message *)i)->sink->messageFailed(((Message *)i), &nakUndef); + i = next; + } + if (!awaitingReplyDownRequest.isEmpty()) + { + + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + ListElement * next = i->next; + DP_LOG(("Down request message type 0x%x client is not cleaning up.", ((Message *)i)->requestIdentifier)); + i = next; + } + } + + // Do not reclaim the memory of our registered receivers + while (!messageReceivers.isEmpty()) + messageReceivers.remove(messageReceivers.front()); +} + +ParseResponseStatus MessageManager::Message::parseResponse(EncodedMessage * message) +{ + BitStreamReader reader(&message->buffer, 0, message->buffer.length*8); + + // Read ReplyType + bool replyNacked = !!reader.readOrDefault(1, true); + + // Read RequestIdentifier + unsigned requestId = reader.readOrDefault(7, 0); + if (requestId != requestIdentifier) + { + DP_LOG(("DP-MM> Requested = %x Received = %x", requestId, requestIdentifier)); + DP_ASSERT(0 && "Reply type doesn't match"); + return ParseResponseWrong; + } + + if (replyNacked) + { + NakData nakData; + + // failure handler will parse the NAK response and do the required action + if (DisplayPort::extractGUID(&reader, &nakData.guid) == false) + { + DP_ASSERT(0 && "Invalid GUID in NAK"); + } + + nakData.reason = (NakReason)reader.readOrDefault(8, 0); + nakData.nak_data = reader.readOrDefault(8, 0); + + // call specific handler after parsing. + parent->timer->cancelCallbacks(this); + + MessageManager * parent = this->parent; + + if (sink) + sink->messageFailed(this, &nakData); + + parent->transmitAwaitingDownRequests(); + + return ParseResponseSuccess; + } + + ParseResponseStatus parseResult = parseResponseAck(message, &reader); + + if (parseResult == ParseResponseSuccess) + { + parent->timer->cancelCallbacks(this); + + if (this->sink) + { + MessageEventSink * msgSink = this->sink; + msgSink->messageCompleted(this); + } + } + + return parseResult; +} + +void MessageManager::Message::MessageEventSink::messageFailed(Message * from, NakData * nakData) +{ + +} + +void MessageManager::registerReceiver(MessageReceiver * receiver) +{ + messageReceivers.insertBack(receiver); +} + + +bool MessageManager::MessageReceiver::process(EncodedMessage * message) +{ + BitStreamReader reader(&message->buffer, 0, message->buffer.length*8); + + // Read RequestIdentifier + reader.readOrDefault(1, 0); + unsigned reqId = reader.readOrDefault(7, 0); + + if (reqId != this->getRequestId()) + { + // + // This receiver is not meant for this message; + // let the next in the queue handle it. + // + return false; + } + + this->address = message->address; + + // processByType should parse the request, create a response and queue it if needed + bool status = processByType(message, &reader); + if (!status) + { + // + // if we are here; we could get a receiver to handle the request + // but something else went wrong. + // + DP_ASSERT(0); + } + + return true; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_mst_edid.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_mst_edid.cpp new file mode 100644 index 0000000..547316a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_mst_edid.cpp @@ -0,0 +1,188 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_mst_edid.c * +* Implementation Multi Stream EDID reads * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_edid.h" +#include "dp_address.h" +#include "dp_messagecodings.h" +#include "dp_messages.h" + +using namespace DisplayPort; + +EdidReadMultistream::~EdidReadMultistream() +{ + timer->cancelCallbacks(this); +} + +void EdidReadMultistream::startReadingEdid() +{ + NvU8 offset = 0; + I2cWriteTransaction i2cWriteTransactions[1]; + Address::StringBuffer buffer; + DP_USED(buffer); + DP_LOG(("%s(): start for %s", __FUNCTION__, + topologyAddress.toString(buffer))); + + edidReaderManager.reset(); + edid.resetData(); + + DDCAddress = ddcAddrList[ddcIndex]; + + // set offset within segment 0, no need to set segment, because we're starting reading EDID + i2cWriteTransactions[0] = I2cWriteTransaction(DDCAddress >> 1, + sizeof(offset), + &offset, + true); + NvU8 nWriteTransactions = 1; + + remoteI2cRead.set(topologyAddress.parent(), // topology Address + nWriteTransactions, // number of write transactions + topologyAddress.tail(), // port of Device + i2cWriteTransactions, // list of write transactions + DDCAddress >> 1, // right shifted DDC Address (request identifier in spec) + EDID_BLOCK_SIZE); // requested size + + manager->post(&remoteI2cRead, this); +} + +void EdidReadMultistream::messageCompleted(MessageManager::Message * from) +{ + RemoteI2cReadMessage* I2CReadMessage = (RemoteI2cReadMessage*)from; + unsigned char * data = 0; + unsigned numBytesRead; + Address::StringBuffer buffer; + DP_USED(buffer); + + NvU8 seg; + NvU8 offset; + DP_LOG(("%s for %s", __FUNCTION__, topologyAddress.toString(buffer))); + + DP_ASSERT(DDCAddress && "DDCAddress is 0, it is wrong"); + + data = I2CReadMessage->replyGetI2CData(&numBytesRead); + DP_ASSERT(data); + + // this is not required, but I'd like to keep things simple at first submission + DP_ASSERT(numBytesRead == EDID_BLOCK_SIZE); + edidReaderManager.postReply(data, numBytesRead, true); + + if (edidReaderManager.readNextRequest(seg, offset)) + { + readNextBlock(seg, offset); + } + else // EDID read is finished or failed. + { + edidAttemptDone(edidReaderManager.readIsComplete() && edid.verifyCRC()); + } +} + +void EdidReadMultistream::edidAttemptDone(bool succeeded) +{ + if (succeeded) + sink->mstEdidCompleted(this); + else if (ddcIndex + 1 < ddcAddrListSize) + { + ddcIndex++; + startReadingEdid(); + } + else + sink->mstEdidReadFailed(this); +} + +void EdidReadMultistream::readNextBlock(NvU8 seg, NvU8 offset) +{ + I2cWriteTransaction i2cWriteTransactions[2]; + Address::StringBuffer buffer; + DP_USED(buffer); + + // ensure that init function for i2cWriteTranscation for segment and offset won't break + DP_ASSERT(sizeof(seg) == 1); + DP_ASSERT(sizeof(offset) == 1); + + DP_LOG(("%s(): for %s (seg/offset) = %d/%d", __FUNCTION__, + topologyAddress.toString(buffer), + seg, offset)); + + unsigned nWriteTransactions = 2; + if (seg) + { + // select segment + i2cWriteTransactions[0] = I2cWriteTransaction(EDID_SEG_SELECTOR_OFFSET >> 1, + 1, &seg, true); + // set offset within segment + i2cWriteTransactions[1] = I2cWriteTransaction(DDCAddress >> 1, + 1, &offset, true); + } + else + { + // set offset within segment 0 + i2cWriteTransactions[0] = I2cWriteTransaction(DDCAddress >> 1, 1, &offset, true); + nWriteTransactions = 1; + } + + remoteI2cRead.set(topologyAddress.parent(), // topology Address + nWriteTransactions, // number of write transactions + topologyAddress.tail(), // port of Device + i2cWriteTransactions, // list of write transactions + DDCAddress >> 1, // right shifted DDC Address (request identifier in spec) + EDID_BLOCK_SIZE); // requested size + + manager->post(&remoteI2cRead, this, false); +} + +void EdidReadMultistream::expired(const void * tag) +{ + Address::StringBuffer buffer; + DP_USED(buffer); + DP_LOG(("%s on %s", __FUNCTION__, topologyAddress.toString(buffer))); + startReadingEdid(); +} + +void EdidReadMultistream::messageFailed(MessageManager::Message * from, NakData * nakData) +{ + Address::StringBuffer buffer; + DP_USED(buffer); + DP_LOG(("%s on %s", __FUNCTION__, topologyAddress.toString(buffer))); + + if (nakData->reason == NakDefer || nakData->reason == NakTimeout) + { + if (retries < MST_EDID_RETRIES) + { + ++retries; + timer->queueCallback(this, "EDID", MST_EDID_COOLDOWN); + } + else + edidAttemptDone(false /* failed */); + } + else + { + edidAttemptDone(false /* failed */); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_splitter.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_splitter.cpp new file mode 100644 index 0000000..8b5e40d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_splitter.cpp @@ -0,0 +1,314 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_splitter.cpp * +* Asynchronous Message Splitter * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_splitter.h" +#include "dp_auxdefs.h" +#include "dp_crc.h" +#include "dp_configcaps.h" + +using namespace DisplayPort; + +#define DP_MAX_HEADER_SIZE 16 +// timeout after 110ms with a retry recurring every 5ms for 10 times +#define DOWNSTREAM_RETRY_ON_DEFER_TIMEOUT 110 +#define DOWNSTREAM_RETRY_ON_DEFER_PERIOD 5 +#define DOWNSTREAM_RETRY_ON_DEFER_COUNT 10 + +bool MessageTransactionSplitter::get(Buffer & assemblyBuffer) +{ + unsigned i; + unsigned payloadSize; + bool isTransactionStart, isTransactionEnd; + Address address; + unsigned LCT; + unsigned LCR; + unsigned headerSizeBits; + + assemblyBuffer.reset(); + + // + // Done? + // + if (this->messageOutstanding->buffer.length == this->assemblyTransmitted) + { + return false; + } + + address = this->messageOutstanding->address; + if (this->messageOutstanding->isBroadcast) + { + // no RAD + address.clear(); + LCT = 1; + } + else + { + LCT = address.size(); + } + + // Calculate header size + headerSizeBits = 8 + // LCT/LCR + (((4 * (LCT -1)) + 4) &~ 7) + // byte aligned RAD + 16; + + // + // Pick how much data to send. Header+payloadSize <= 48 bytes. + // + payloadSize = DP_MIN(DPCD_MESSAGEBOX_SIZE - (headerSizeBits+7)/8, /*crc*/1 + this->messageOutstanding->buffer.length - this->assemblyTransmitted); + + // + // Is the first or last transaction in the sequence? + // + isTransactionStart = assemblyTransmitted == 0; + isTransactionEnd = (assemblyTransmitted + payloadSize - 1) == messageOutstanding->buffer.length; + + BitStreamWriter writer(&assemblyBuffer, 0); + + // + // Write the header + // + writer.write(LCT, 4); + + LCR = this->messageOutstanding->isBroadcast ? 6 : LCT > 1 ? LCT - 1 : 0; + + writer.write(LCR, 4); + + // port at i=0 is the outport of source/gpu which should not be included in the RAD in outgoing message header + // if this is a broadcast message; LCT would be 1; hence no RAD. + for (i = 1; i < LCT; i++) + writer.write(address[i], 4); + writer.align(8); + + writer.write(this->messageOutstanding->isBroadcast, 1); + writer.write(this->messageOutstanding->isPathMessage, 1); + writer.write(payloadSize, 6); + + writer.write(isTransactionStart, 1); + writer.write(isTransactionEnd, 1); + writer.write(0, 1); + + DP_ASSERT(messageOutstanding->messageNumber == 0 || messageOutstanding->messageNumber == 1); + writer.write(messageOutstanding->messageNumber, 1); + + // + // Generate 4 bit CRC. (Nibble-wise CRC of previous values) + // + BitStreamReader reader(&assemblyBuffer, 0, writer.offset()); + writer.write(dpCalculateHeaderCRC(&reader), 4); + + DP_ASSERT(writer.offset() == headerSizeBits && "Header size mismatch"); + DP_ASSERT((writer.offset() & 7) == 0 && "Packet header must end byte aligned"); + + // + // Generate body CRC + // + BitStreamReader bodyReader(&this->messageOutstanding->buffer, this->assemblyTransmitted * 8, (payloadSize - 1) * 8); + NvU8 bodyCrc = (NvU8)dpCalculateBodyCRC(&bodyReader); + + // Copy in remaining buffer (leaving room for the CRC) + for (i = 0; i < payloadSize - 1; ++i) + writer.write(this->messageOutstanding->buffer.data[i + this->assemblyTransmitted], 8); + writer.write(bodyCrc, 8); + + this->assemblyTransmitted += payloadSize - 1; + + return true; +} + +void OutgoingTransactionManager::expired(const void * tag) +{ + writeToWindow(false); +} + +void OutgoingTransactionManager::cancel(OutgoingTransactionManagerEventSink * sink) +{ + if (activeMessage && activeMessage->eventSink == sink) + activeMessage->eventSink = 0; + + for (ListElement * el = queuedMessages.begin(); el && el!=queuedMessages.end(); el = el->next) + if (((OutgoingMessage *)el)->eventSink == sink) + ((OutgoingMessage *)el)->eventSink = 0; +} + +bool OutgoingTransactionManager::send( EncodedMessage & payload, OutgoingTransactionManagerEventSink * sink) +{ + OutgoingMessage * om = new OutgoingMessage(); + + if (!om) + { + return false; + } + + om->eventSink = sink; + om->message.swap(payload); + + if (!activeMessage) + { + activeMessage = om; + transactionSplitter.set(&om->message); + transactionSplitter.get(this->assemblyBuffer); + writeToWindow(true); + } + else + { + queuedMessages.insertBack(om); + } + + return true; +} + +void OutgoingTransactionManager::writeToWindow( bool firstAttempt) +{ + AuxRetry::status result; + + if (!activeMessage || !activeMessage->eventSink) + goto findNextMessage; + + result = this->writeMessageBox(assemblyBuffer.data, assemblyBuffer.length); + + if (result == AuxRetry::defer) + { + + // + // if retries left; queue one. + // + if (firstAttempt || retriesLeft ) + { + if (firstAttempt) + { + // initialize retriesLeft + retriesLeft = DOWNSTREAM_RETRY_ON_DEFER_COUNT; + } + + retriesLeft--; + DP_LOG(("DP-MM> Messagebox write defer-ed. Q-ing retry.")); + this->timer->queueCallback(this, "SPDE", DOWNSTREAM_RETRY_ON_DEFER_PERIOD); + + return; + } + + // + // Notify message sender of failure. Keep in mind sender + // might turn around immediately with a queue'd send. + // + if (activeMessage) + { + activeMessage->eventSink->splitterFailed(this); + } + + goto findNextMessage; + } + else if (result == AuxRetry::ack) + { + // + // Split off another chunk and transmit + // + if (transactionSplitter.get(assemblyBuffer)) + { + writeToWindow(true); + } + else + { + // + // Notify message sender of success. Keep in mind sender + // might turn around immediately with a queue'd send. + // + if (activeMessage) + { + activeMessage->eventSink->splitterTransmitted(this); + } + + goto findNextMessage; + } + + return; + } + + // + // Notify message sender of failure. Keep in mind sender + // might turn around immediately with a queued send. + // + if (activeMessage) + { + activeMessage->eventSink->splitterFailed(this); + } + +findNextMessage: + // + // The old transaction is complete. Free the memory + // + delete activeMessage; + activeMessage = 0; + + // + // Look for the next transaction + // + if (queuedMessages.isEmpty()) + { + return; + } + else + { + activeMessage = (OutgoingMessage *)queuedMessages.begin(); + queuedMessages.remove(activeMessage); + + transactionSplitter.set(&activeMessage->message); + transactionSplitter.get(this->assemblyBuffer); + writeToWindow(true); + } +} + +OutgoingTransactionManager::OutgoingTransactionManager(Timer * timer) + : timer(timer) +{ + this->activeMessage = 0; +} + +AuxRetry::status DownRequestManager::writeMessageBox(NvU8 * data, size_t length) +{ + return hal->writeDownRequestMessageBox(data, length); +} + +size_t DownRequestManager::getMessageBoxSize() +{ + return hal->getDownRequestMessageBoxSize(); +} + +AuxRetry::status UpReplyManager::writeMessageBox(NvU8 * data, size_t length) +{ + return hal->writeUpReplyMessageBox(data, length); +} + +size_t UpReplyManager::getMessageBoxSize() +{ + return hal->getUpReplyMessageBoxSize(); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_sst_edid.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_sst_edid.cpp new file mode 100644 index 0000000..76ff92a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_sst_edid.cpp @@ -0,0 +1,342 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_sst_edid.c * +* Implementation Single Stream EDID reads * +* * +\***************************************************************************/ + +#include "dp_buffer.h" +#include "dp_auxbus.h" +#include "dp_internal.h" +#include "dp_edid.h" + +using namespace DisplayPort; + +/* +* seg -> 256 segment of EDID +* offset -> offset within segment +*/ +static bool readNextBlock(AuxBus * auxBus, NvU8 seg, NvU8 offset, Buffer & buffer, unsigned & totalRead, unsigned DDCAddress, Timer * timer) +{ + AuxBus::Type type = AuxBus::i2cMot; + AuxBus::status auxStatus; + + unsigned retries = 0; + unsigned sizeRequested; + unsigned sizeCompleted; + unsigned transactionSize = auxBus->transactionSize(); + totalRead = 0; + + DP_ASSERT(auxBus); + DP_ASSERT(transactionSize > 0); + + // ASSERT if edidOffset offset wasn't increased in block len sizes + DP_ASSERT(offset == 0 || offset == EDID_BLOCK_SIZE); + + sizeRequested = transactionSize; + if (!buffer.resize(EDID_BLOCK_SIZE)) + { + return false; + } + + DP_ASSERT(sizeof(seg) == 1); + DP_ASSERT(sizeof(offset) == 1); + + // only set segment if it is required + if (seg) + { + // start EDID read by specifying appropriate Edid segment id + for (unsigned retry = 0; retry < EDID_MAX_AUX_RETRIES; retry++) + { + auxStatus = auxBus->transaction(AuxBus::write, AuxBus::i2cMot, EDID_SEG_SELECTOR_OFFSET >> 1, + &seg, sizeof(seg), &sizeCompleted); + if (auxStatus == AuxBus::success) + break; + + // If deferred due to timeout + if (auxStatus == AuxBus::defer) + { + // Wait for sometime between retries + timer->sleep(EDID_AUX_WAIT_TIME); + continue; + } + + return false; + } + } + + auxStatus = AuxBus::nack; + + for (retries = 0; totalRead < EDID_BLOCK_SIZE;) + { + // + // For retry, rewrite the Offset for the internal read pointer + // except when the previous Read auxstatus was an Aux::defer + // since in that case, the offset was never incremented by sink + // + if ((auxStatus != AuxBus::success) && (auxStatus != AuxBus::defer)) + { + // start from this offset, need to verify with display with multiple edid blocks + for (unsigned retry = 0; retry < EDID_MAX_AUX_RETRIES; retry++) + { + auxStatus = auxBus->transaction(AuxBus::write, AuxBus::i2cMot, DDCAddress >> 1, + (NvU8*)(&offset), sizeof(offset), &sizeCompleted); + if (auxStatus == AuxBus::success) + break; + // If deferred due to timeout + if (auxStatus == AuxBus::defer) + { + // Wait for sometime between retries + timer->sleep(EDID_AUX_WAIT_TIME); + continue; + } + + return false; + } + // if retries exceed EDID_MAX_AUX_RETRIES, give up + if (auxStatus != AuxBus::success) + { + return false; + } + } + // need to change to I2C (not MOT) to read just one last part of EDID block + if (totalRead + transactionSize >= EDID_BLOCK_SIZE) + type = AuxBus::i2c; + + sizeRequested = DP_MIN(transactionSize, EDID_BLOCK_SIZE - totalRead); + auxStatus = auxBus->transaction(AuxBus::read, type, DDCAddress >> 1, + &(buffer.data[totalRead]), sizeRequested, &sizeCompleted); + + if (AuxBus::success != auxStatus || (sizeRequested && (sizeCompleted == 0))) + { + if (retries >= EDID_MAX_AUX_RETRIES) + return false; + + DP_LOG(("DisplayPort: %s: Retrying at totalRead 0x%08x (replyType %x, size %x)", + __FUNCTION__, totalRead, auxStatus, sizeRequested)); + + // Wait for sometime between retries + timer->sleep(EDID_AUX_WAIT_TIME); + retries++; + + continue; + } + + // Assert when size mismatches and it is not last block + if ((sizeRequested != sizeCompleted) && + (totalRead + transactionSize < EDID_BLOCK_SIZE)) + { + DP_LOG(("DisplayPort: %s: dpAux returned edid block smaller than expected. Read from totalRead 0x%08x (replyType %x, size %x)", + __FUNCTION__, totalRead, auxStatus, sizeRequested)); + DP_ASSERT(0); + } + + retries = 0; // reset the number of retries + totalRead += sizeCompleted; + offset += (NvU8)sizeCompleted; + } + + return true; +} + +/*! +* @return: true => EDID read is success, false => read is failure +*/ +static bool sstReadEdid(AuxBus * auxBus, Edid & edid, unsigned DDCAddr, Timer * timer, bool pendingTestRequestEdidRead) +{ + // + // If there is pending test request for edid read, + // ask edidReaderManager to take whatever posted, + // instead of discarding bytes read by a failed read. + // Because cert devices may need to see the checksum of these bytes, + // even if they seem corrupted. + // + EdidAssembler edidReaderManager(&edid, pendingTestRequestEdidRead); + NvU32 retryCount = 0; + Buffer buffer; + if (!buffer.resize(EDID_BLOCK_SIZE)) + { + return false; + } + + DP_ASSERT(auxBus); + + do + { + NvU8 seg = 0; + NvU8 offset = 0; + unsigned totalRead = 0; + edidReaderManager.reset(); + + // start by reading first EDID block, posting it and analyzing for next request + do + { + bool success = readNextBlock(auxBus, seg, offset, buffer, totalRead, DDCAddr, timer); + edidReaderManager.postReply(buffer, totalRead, success); + } + while (edidReaderManager.readNextRequest(seg, offset)); + if (!edid.isPatchedChecksum()) + break; + } while (retryCount++ < EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT); + + // + // EDID read is successful when + // 1. read was done to the end (i.e. no corruption, no blocks exceeding retry count) + // 2. EDID CRC is correct + // + return edidReaderManager.readIsComplete(); +} + +EDID_DDC DisplayPort::sstDDCPing(AuxBus & dpAux) +{ + unsigned sizeRequested = 0, sizeCompleted; + AuxBus::status auxStatus = AuxBus::nack; + NvU8 offset = 0; + unsigned ddcAddrIdx; + + for (ddcAddrIdx = 0; ddcAddrIdx < ddcAddrListSize; ddcAddrIdx++) + { + // + // Don't use an I2C write. Some devices erroneously ACK on the write + // + auxStatus = dpAux.transaction(AuxBus::read, AuxBus::i2c, ddcAddrList[ddcAddrIdx] >> 1, + &offset, sizeRequested, &sizeCompleted); + + if (AuxBus::success == auxStatus) + return (EDID_DDC)ddcAddrList[ddcAddrIdx]; + } + + return EDID_DDC_NONE; + +} + +bool DisplayPort::EdidReadSST(Edid & edid, AuxBus * auxBus, Timer* timer, + bool pendingTestRequestEdidRead, bool bBypassAssembler, + MainLink * main) +{ + Edid previousEdid; + Buffer *buffer; + bool status; + bool firstTrial = true; + NvU64 startTime, elapsedTime; + for (unsigned i = 0; i < ddcAddrListSize; i++) + { + startTime = timer->getTimeUs(); + elapsedTime = 0; + do + { + // + // Client asks to use RM control code to fetch EDID. + // + if (bBypassAssembler && main) + { + unsigned blockCnt; + buffer = edid.getBuffer(); + if (!buffer->resize(EDID_BLOCK_SIZE)) + { + return false; + } + status = main->fetchEdidByRmCtrl(buffer->getData(), buffer->getLength()); + + if (status) + { + blockCnt = edid.getBlockCount(); + + // If read successfully, check if there are two or more blocks. + if (blockCnt != 1) + { + if (!buffer->resize(EDID_BLOCK_SIZE * blockCnt)) + { + return false; + } + status = main->fetchEdidByRmCtrl(buffer->getData(), buffer->getLength()); + } + } + if (!status) + { + // + // If fetchEdidByRmCtrl fails for some reasons: + // Try to read again using DPLib read function. + // One reason client to request read from RM is to making sure + // the EDID is overridden (regkey or others). So call the RM + // control call to apply the EDID overrides. + // + status = sstReadEdid(auxBus, edid, ddcAddrList[i], timer, + pendingTestRequestEdidRead); + if (status) + { + main->applyEdidOverrideByRmCtrl(buffer->getData(), + buffer->getLength()); + } + else + { + DP_LOG(("EDID> Failed to read EDID from RM and DPLib")); + } + } + } + else + { + // + // If there is pending test request for edid read, make sure we get the raw bytes without check. + // Because cert devices may need to see the checksum of whatever is read for edid, even if they seem corrupted. + // + status = sstReadEdid(auxBus, edid, ddcAddrList[i], timer, pendingTestRequestEdidRead); + + } + + if (status) + { + if (edid.verifyCRC()) + { + return true; + } + else + { + if (firstTrial) // first failure? + { + previousEdid.swap(edid); + firstTrial = false; + } + else + { + if (previousEdid == edid) + { + // we got the same invalid checksum again; we will assume it is valid. + edid.setForcedEdidChecksum(true); + return true; + } + } + } + } + elapsedTime = timer->getTimeUs() - startTime; + timer->sleep(1); + } while (elapsedTime < (EDID_READ_RETRY_TIMEOUT_MS * 1000)); + } + + DP_LOG(("EDID> Failed to ping sst DDC addresses")); + + return false; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_timer.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_timer.cpp new file mode 100644 index 0000000..f34431b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_timer.cpp @@ -0,0 +1,199 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_timer.cpp * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_timer.h" +using namespace DisplayPort; + +void Timer::expired() +{ + fire(false); +} + +// Take care, this function is re-entrant. +// Consider that sleep() is effectively a call to fire(). +// Clients may sleep in response to a timer callback. +unsigned Timer::fire(bool fromSleep) // returns min time to next item to be fired +{ + restart: + + NvU64 now = getTimeUs(); + NvU64 nearest = (NvU64)-1; + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); ) + { + if (fromSleep && !i->executeInSleep) { + i = (PendingCallback*)i->next; + continue; + } + + if (now >= i->timestamp) + { + const void * context = i->context; + TimerCallback * target = i->target; + delete i; + if (target) + target->expired(context); // Take care, the client may have made + // a recursive call to fire in here. + // Easy solution: Restart at front of list. + // current time may have also changed + // drastically from a nested sleep + goto restart; + } + else + { + if (i->timestamp < nearest) + nearest = i->timestamp; + i = (PendingCallback*)i->next; + } + } + unsigned minleft = (unsigned)((nearest - now + 999)/ 1000); + return minleft; +} + +void Timer::_pump(unsigned milliseconds, bool fromSleep) +{ + do + { + unsigned amt = fire(fromSleep); + if (amt >= milliseconds) { + raw->sleep(milliseconds); + return; + } + raw->sleep(amt); + milliseconds-=amt; + } while(milliseconds); +} + +// +// Queue a timer callback. +// Unless the dont-execute-in-sleep flag is set +// +void Timer::queueCallback(Timer::TimerCallback * target, const void * context, unsigned milliseconds, bool executeInSleep) +{ + NvU64 now = getTimeUs(); + PendingCallback * callback = new PendingCallback(); + if (callback == NULL) + { + DP_LOG(("DP> %s: Failed to allocate callback", + __FUNCTION__)); + return; + } + callback->target = target; + callback->context = context; + callback->timestamp = now + milliseconds * 1000; + callback->executeInSleep = executeInSleep; + pending.insertBack(callback); + raw->queueCallback(this, milliseconds); +} + +NvU64 Timer::getTimeUs() +{ + return raw->getTimeUs(); +} + +// Sleep a number of milliseconds. +// timer callbacks will be serviced! +void Timer::sleep(unsigned milliseconds) +{ + _pump(milliseconds, true); +} + +void Timer::cancelCallbacks(Timer::TimerCallback * to) +{ + if (!to) + return; + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); i = (PendingCallback *)i->next) + if (i->target == to) + i->target = 0; +} + +void Timer::cancelCallback(Timer::TimerCallback * to, const void * context) +{ + if (!to) + return; + for (PendingCallback * i = (PendingCallback *)pending.begin(); i!=pending.end(); i = (PendingCallback*)i->next) + if (i->target == to && i->context == context) + i->target = 0; +} + +// Queue callbacks in order. +void Timer::queueCallbackInOrder(Timer::TimerCallback * target, const void * context, unsigned milliseconds, bool executeInSleep) +{ + NvU64 now = getTimeUs(); + PendingCallback * callback = new PendingCallback(); + callback->target = target; + callback->context = context; + callback->timestamp = now + milliseconds * 1000; + callback->executeInSleep = executeInSleep; + + //Figure out where to insert the current callback + Timer::PendingCallback* i; + + for (i = (PendingCallback*)pending.begin(); i != pending.end();) + { + // only for the given context. + if(i->context == context) + { + if(i->timestamp > callback->timestamp) + break; + } + i = (PendingCallback*) i->next; + } + if (i == pending.end()) + { + pending.insertBack(callback); + } + else + { + pending.insertBefore(i, callback); + } + + raw->queueCallback(this, milliseconds); +} + +void Timer::cancelAllCallbacks() +{ + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); i = (PendingCallback *)i->next) + i->target = 0; +} + +void Timer::cancelCallbacksWithoutContext(const void * context) +{ + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); i = (PendingCallback *)i->next) + if(i->context != context) + i->target = 0; +} + +bool Timer::checkCallbacksOfSameContext(const void * context) +{ + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); i = (PendingCallback *)i->next) + if(i->context == context) + return true; + + return false; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_vrr.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_vrr.cpp new file mode 100644 index 0000000..f2253f8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_vrr.cpp @@ -0,0 +1,247 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_vrr.cpp * +* Implementation of VRR enablement * +* * +\***************************************************************************/ +#include "dp_connectorimpl.h" +#include "dp_vrr.h" + +using namespace DisplayPort; + +bool VrrEnablement::start() +{ + bool rc; + + DP_LOG(("DPHAL_VRR_ENABLE> **** VRR Enablement Started ****")); + rc = vrrGetPublicInfo(); + if(rc) + { + rc = vrrEnableMonitor(); + if(rc != true) + { + return false; + } + rc = vrrEnableDriver(); + if(rc != true) + { + return false; + } + } + else + { + return false; + } + + DP_LOG(("DPHAL_VRR_ENABLE> **** VRR Enablement Ends ****")); + + return true; +} + +bool VrrEnablement::vrrGetPublicInfo() +{ + MainLink *main = this->parent->connector->main; + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_INIT_PUBLIC_INFO, NULL) != true) + { + return false; + } + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_RESET_MONITOR, NULL) != true) + { + return false; + } + else + { + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + } + + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_GET_PUBLIC_INFO, NULL) != true) + { + return false; + } + + return vrrWaitOnEnableStatus(); +} + +bool VrrEnablement::vrrEnableMonitor() +{ + MainLink *main = this->parent->connector->main; + + DP_LOG(("DPHAL_VRR_ENABLE> ** VRR_MON_ENABLE starts **")); + + // Always set the enable F/W state m/c to a known state. + if(main->vrrRunEnablementStage(VRR_ENABLE_STAGE_RESET_MONITOR, NULL) != true) + { + return false; + } + + // Wait for VRR to be 'ready'. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + + if(main->vrrRunEnablementStage(VRR_ENABLE_STAGE_MONITOR_ENABLE_BEGIN, NULL) != true) + { + return false; + } + + // Wait for VRR to be 'ready'. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + + main->vrrRunEnablementStage(VRR_ENABLE_STAGE_MONITOR_ENABLE_CHALLENGE, NULL); + + // Wait for VRR to be ready. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + // Compare and enable on successful comparison. + if(main->vrrRunEnablementStage(VRR_ENABLE_STAGE_MONITOR_ENABLE_CHECK, NULL) == true) + { + this->bMonitorEnabled = true; + } + + DP_LOG(("DPHAL_VRR_ENABLE> ** VRR_MON_ENABLE ends **")); + + return this->bMonitorEnabled; +} + +bool VrrEnablement::vrrEnableDriver() +{ + NvU32 enableResult; + + MainLink *main = this->parent->connector->main; + + DP_LOG(("DPHAL_VRR_ENABLE> ** VRR_DRV_ENABLE starts **")); + + // Always set the enable F/W state m/c to a known state. + if(main->vrrRunEnablementStage(VRR_ENABLE_STAGE_RESET_MONITOR, NULL) != true) + { + return false; + } + + // Wait for VRR to be 'ready'. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_DRIVER_ENABLE_BEGIN, &enableResult) != true) + { + return false; + } + + if (enableResult == NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING) + { + // Wait for VRR to be ready. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + } + else if (enableResult == NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK) + { + return true; + } + + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_DRIVER_ENABLE_CHALLENGE, NULL) != true) + { + return false; + } + + // Wait for VRR to be 'ready'. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_DRIVER_ENABLE_CHECK, NULL) != true) + { + return false; + } + + DP_LOG(("DPHAL_VRR_ENABLE> ** VRR_DRV_ENABLE ends **")); + + return true; +} + +bool VrrEnablement::vrrWaitOnEnableStatus(void) +{ + NvU32 timeout = VRR_ENABLE_STATUS_TIMEOUT_THRESHOLD; + NvU32 enableResult; + + MainLink *main = this->parent->connector->main; + ConnectorImpl *connector = this->parent->connector; + do + { + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_STATUS_CHECK, &enableResult) == true) + { + return true; + } + else + { + if (enableResult == NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR) + { + return false; + } + else if (enableResult == NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING) + { + Timeout timeout(connector->timer, VRR_ENABLE_STATUS_TIMEOUT_INTERVAL_MS); + while(timeout.valid()); + continue; + } + else + { + return false; + } + } + }while(--timeout); + + return false; +} + +bool VrrEnablement::isMonitorEnabled(void) +{ + return (this->bMonitorEnabled); +} + +bool VrrEnablement::isDriverEnabled(void) +{ + NvU32 enableResult; + MainLink *main = this->parent->connector->main; + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_DRIVER_ENABLE_CHECK, + &enableResult) == true) + { + return true; + } + return false; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_wardatabase.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_wardatabase.cpp new file mode 100644 index 0000000..7e4ee9a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_wardatabase.cpp @@ -0,0 +1,645 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_wardatabase.cpp * +* EDID and OUI based workarounds for panel/TCON issues * +* * +\***************************************************************************/ +#include "dp_wardatabase.h" +#include "dp_edid.h" +#include "dp_connectorimpl.h" + +using namespace DisplayPort; + +void ConnectorImpl::applyOuiWARs() +{ + switch (ouiId) + { + // Megachips Mystique + case 0xE18000: + if (((modelName[0] == 'D') && (modelName[1] == 'p') && (modelName[2] == '1') && + (modelName[3] == '.') && (modelName[4] == '1'))) + { + // + // Mystique based link box for HTC Vive has a peculiar behaviour + // of sending a link retraining pulse if the link is powered down in the absence + // of an active stream. Bug# 1793084. Set the flag so that link is not powered down. + // + bKeepOptLinkAlive = true; + } + + if (((modelName[0] == 'D') && (modelName[1] == 'p') && (modelName[2] == '1') && + (modelName[3] == '.') && (modelName[4] == '2'))) + { + // + // ASUS monitor loses link sometimes during assessing link or link training. + // So if we retrain link by lowering config from HBR2 to HBR we see black screen + // Set the flag so that we first retry link training with same link config + // before following link training fallback. Bug #1846925 + // + bNoFallbackInPostLQA = true; + } + break; + + // Synaptics + case 0x24CC90: + if ((modelName[0] == 'S') && (modelName[1] == 'Y') && (modelName[2] == 'N') && + (modelName[3] == 'A') && (modelName[4] == 'S') && + ((modelName[5] == '1') || (modelName[5] == '2') || + (modelName[5] == '3') || (modelName[5] == '#') || + (modelName[5] == '\"'))) + { + // + // Extended latency from link-train end to FEC enable pattern + // to avoid link lost or blank screen with Synaptics branch. + // (Bug 2561206) + // + // Dock SKU ID: + // Dell Salomon-WD19TB SYNAS1 + // HP Hook SYNAS3 + // HP Adira-A SYNAS# + // Lenovo SYNAS" / SYNAS2 + // + LT2FecLatencyMs = 57; + + if (bDscMstCapBug3143315) + { + // + // Synaptics branch device doesn't support Virtual Peer Devices so DSC + // capability of downstream device should be decided based on device's own + // and its parent's DSC capability + // + bDscCapBasedOnParent = true; + } + } + break; + } +} + +void Edid::applyEdidWorkArounds(NvU32 warFlag, const DpMonitorDenylistData *pDenylistData) +{ + + unsigned ManufacturerID = this->getManufId(); + unsigned ProductID = this->getProductId(); + unsigned YearWeek = this->getYearWeek(); + + // + // Work around EDID problems, using manufacturer, product ID, and date of manufacture, + // to identify each case. + // + switch (ManufacturerID) + { + // Apple + case 0x1006: + if (0x9227 == ProductID) + { + this->WARFlags.powerOnBeforeLt = true; + DP_LOG(("DP-WAR> WAR for Apple thunderbolt J29 panel")); + DP_LOG(("DP-WAR> - Monitor needs to be powered up before LT. Bug 933051")); + } + break; + + // Acer + case 0x7204: + // Bug 451868: Acer AL1512 monitor has a wrong extension count: + if(0xad15 == ProductID && YearWeek <= 0x0d01) + { + // clear the extension count + buffer.data[0x7E] = 0; + this->WARFlags.extensionCountDisabled = true; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid override on Acer AL1512")); + DP_LOG(("DP-WAR> - Disabling extension count.Bug 451868")); + } + break; + + // Westinghouse + case 0x855C: + + // Westinghouse 37" 1080p TV. LVM-37w3 (Port DVI1 EDID). + // Westinghouse 42" 1080p TV. LVM-42w2 (Port DVI1 EDID). + if (ProductID == 0x3703 || ProductID == 0x4202) + { + // Claims HDMI support, but audio causes picture corruption. + // Removing HDMI extension block + + if (buffer.getLength() > 0x80 && + buffer.data[0x7E] == 1 && // extension block present + buffer.data[0x80] == 0x02 && // CEA block + buffer.data[0x81] == 0x03 && // revision 3 + !(buffer.data[0x83] & 0x40)) // No basic audio, must not be the HDMI port + { + // clear the extension count + buffer.data[0x7E] = 0; + this->WARFlags.extensionCountDisabled = true; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on Westinghouse AL1512 LVM- <37/42> w <2/3>")); + DP_LOG(("DP-WAR> - Disabling extension count.")); + } + } + break; + + // IBM + case 0x4D24: + if(ProductID == 0x1A03) + { + // 2001 Week 50 + if (YearWeek == 0x0B32) + { + // Override IBM T210. IBM T210 reports 2048x1536x60Hz in the edid but it's + // actually 2048x1536x40Hz. See bug 76347. This hack was, earlier, in disp driver + // Now it's being moved down to keep all overrides in same place. + // This hack was also preventing disp driver from comparing entire edid when + // trying to figure out whether or not the edid for some device has changed. + buffer.data[0x36] = 0x32; + buffer.data[0x37] = 0x3E; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on IBM T210")); + DP_LOG(("DP-WAR> 2048x1536x60Hz(misreported) -> 2048x1536x40Hz. Bug 76347")); + } + } + break; + // GWY (Gateway) or EMA (eMachines) + case 0xF91E: // GWY + case 0xA115: // EMA + // Some Gateway monitors present the eMachines mfg code, so these two cases are combined. + // Future fixes may require the two cases to be separated. + // Fix for Bug 343870. NOTE: Problem found on G80; fix applied to all GPUs. + if ((ProductID >= 0x0776 ) && (ProductID <= 0x0779)) // Product id's range from decimal 1910 to 1913 + { + // if detailed pixel clock frequency = 106.50MHz + if ( (buffer.data[0x36] == 0x9A) && + (buffer.data[0x37] == 0x29) ) + { + // then change detailed pixel clock frequency to 106.54MHz to fix bug 343870 + buffer.data[0x36] = 0x9E; + buffer.data[0x37] = 0x29; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on GWY/EMA")); + DP_LOG(("DP-WAR> 106.50MHz(misreported) -> 106.50MHz.Bug 343870")); + } + } + break; + + // INX + case 0x2C0C: + // INX L15CX monitor has an invalid detailed timing 10x311 @ 78Hz. + if( ProductID == 0x1502) + { + // remove detailed timing #4: zero out the first 3 bytes of DTD#4 block + buffer.data[0x6c] = 0x0; + buffer.data[0x6d] = 0x0; + buffer.data[0x6e] = 0x0; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on INX L15CX")); + DP_LOG(("DP-WAR> Removing invalid detailed timing 10x311 @ 78Hz")); + } + break; + + // AUO + case 0xAF06: + if ((ProductID == 0x103C) || (ProductID == 0x113C)) + { + // + // Acer have faulty AUO eDP panels which have + // wrong HBlank in the EDID. Correcting it here. + // + buffer.data[0x39] = 0x4B; // new hblank width: 75 + buffer.data[0x3F] = 0x1B; // new hsync pulse width: 27 + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on AUO eDP panel")); + DP_LOG(("DP-WAR> Modifying HBlank and HSync pulse width.")); + DP_LOG(("DP-WAR> Bugs 907998, 1001160")); + } + else if (ProductID == 0x109B || ProductID == 0x119B) + { + this->WARFlags.useLegacyAddress = true; + DP_LOG(("DP-WAR> AUO eDP")); + DP_LOG(("implements only Legacy interrupt address range")); + + // Bug 1792962 - Panel got glitch on D3 write, apply this WAR. + this->WARFlags.disableDpcdPowerOff = true; + DP_LOG(("DP-WAR> Disable DPCD Power Off")); + } + break; + + // LPL + case 0x0C32: + if (ProductID == 0x0000) + { + // + // Patch EDID for Quanta - Toshiba LG 1440x900 panel. See Bug 201428 + // Must 1st verify that we have that panel. It has MFG id 32, 0C + // BUT product ID for this (and other different LG panels) are 0000. + // So verify that the last "Custom Timing" area of the EDID has + // a "Monitor Description" of type FE = "ASCII Data String" which + // has this panel's name = "LP171WX2-A4K5". + // + if ( (buffer.data[0x71] == 0x4C) && + (buffer.data[0x72] == 0x50) && + (buffer.data[0x73] == 0x31) && + (buffer.data[0x74] == 0x37) && + (buffer.data[0x75] == 0x31) && + (buffer.data[0x76] == 0x57) && + (buffer.data[0x77] == 0x58) && + (buffer.data[0x78] == 0x32) && + (buffer.data[0x79] == 0x2D) && + (buffer.data[0x7A] == 0x41) && + (buffer.data[0x7B] == 0x34) && + (buffer.data[0x7C] == 0x4B) && + (buffer.data[0x7D] == 0x35) ) + { + // + // Was 0x95, 0x25 = -> 0x2595 = 9621 or 96.21 Mhz. + // 96,210,000 / 1760 / 912 = 59.939 Hz + // Want 60 * 1760 * 912 ~= 9631 or 96.31 MHz + // 9631 = 0x259F -> 0x9F 0x25. + // So, change byte 36 from 0x95 to 0x9F. + // + buffer.data[0x36] = 0x9F; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on Quanta - Toshiba LG 1440x900")); + DP_LOG(("DP-WAR> Correcting pclk. Bug 201428")); + } + } + else + if (ProductID == 0xE300) + { + // + // Patch EDID for MSI - LG LPL 1280x800 panel. See Bug 359313 + // Must 1st verify that we have that panel. It has MFG id 32, 0C + // BUT product ID for this (and other different LG panels) are E300. + // So verify that the last "Custom Timing" area of the EDID has + // a "Monitor Description" of type FE = "ASCII Data String" which + // has this panel's name = "LP154WX4-TLC3". + // + if ( (buffer.data[0x71] == 0x4C) && + (buffer.data[0x72] == 0x50) && + (buffer.data[0x73] == 0x31) && + (buffer.data[0x74] == 0x35) && + (buffer.data[0x75] == 0x34) && + (buffer.data[0x76] == 0x57) && + (buffer.data[0x77] == 0x58) && + (buffer.data[0x78] == 0x34) && + (buffer.data[0x79] == 0x2D) && + (buffer.data[0x7A] == 0x54) && + (buffer.data[0x7B] == 0x4C) && + (buffer.data[0x7C] == 0x43) && + (buffer.data[0x7D] == 0x33) ) + { + // + // Was 0xBC, 0x1B = -> 0x1BBC = 7100 or 71.00 Mhz. + // 71,000,000 / 1488 / 826 = 59.939 Hz + // Want 60 * 1488 * 826 ~= 7111 or 71.11 MHz + // 7111 = 0x1BC7 -> 0xC7 0x1B. + // So, change byte 36 from 0xBC to 0xC7. + // + buffer.data[0x36] = 0xC7; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on MSI - LG LPL 1280x800")); + DP_LOG(("DP-WAR> Correcting pclk. Bug 359313")); + } + } + break; + + // SKY + case 0x794D: + if (ProductID == 0x9880) + { + // + // Override for Haier TV to remove resolution + // 1366x768 from EDID data. Refer bug 351680 & 327891 + // Overriding 18 bytes from offset 0x36. + // + buffer.data[0x36] = 0x01; + buffer.data[0x37] = 0x1D; + buffer.data[0x38] = 0x00; + buffer.data[0x39] = 0x72; + buffer.data[0x3A] = 0x51; + buffer.data[0x3B] = 0xD0; + buffer.data[0x3C] = 0x1E; + buffer.data[0x3D] = 0x20; + buffer.data[0x3E] = 0x6E; + buffer.data[0x3F] = 0x28; + buffer.data[0x40] = 0x55; + buffer.data[0x41] = 0x00; + buffer.data[0x42] = 0xC4; + buffer.data[0x43] = 0x8E; + buffer.data[0x44] = 0x21; + buffer.data[0x45] = 0x00; + buffer.data[0x46] = 0x00; + buffer.data[0x47] = 0x1E; + + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on Haier TV.")); + DP_LOG(("DP-WAR> Removing 1366x768. bug 351680 & 327891")); + + } + break; + // HP + case 0xF022: + switch (ProductID) + { + case 0x192F: + // + // WAR for bug 1643712 - Issue specific to HP Z1 G2 (Zeus) All-In-One + // Putting the Rx in power save mode before BL_EN is deasserted, makes this specific sink unhappy + // Bug 1559465 will address the right power down sequence. We need to revisit this WAR once Bug 1559465 is fixed. + // + this->WARFlags.disableDpcdPowerOff = true; + DP_LOG(("DP-WAR> Disable DPCD Power Off")); + DP_LOG(("DP-WAR> HP Z1 G2 (Zeus) AIO Bug 1643712")); + break; + } + break; + + // Sharp + case 0x104d: + switch (ProductID) + { + case 0x141c: // HP Valor QHD+ N15P-Q3 Sharp EDP + // + // HP Valor QHD+ N15P-Q3 EDP needs 50 ms delay + // after D3 to avoid black screen issues. + // + this->WARFlags.delayAfterD3 = true; + DP_LOG(("DP-WAR> HP Valor QHD+ N15P-Q3 Sharp EDP needs 50 ms after D3")); + DP_LOG(("DP-WAR> bug 1520011")); + break; + + //Sharp EDPs that declares DP1.2 but doesn't implement ESI address space + case 0x1414: + case 0x1430: + case 0x1445: + case 0x1446: + case 0x144C: + case 0x1450: + case 0x1467: + case 0x145e: + // + // Use Legacy address space for DP1.2 panel + // + this->WARFlags.useLegacyAddress = true; + DP_LOG(("DP-WAR> Sharp EDP implements only Legacy interrupt address range")); + break; + + case 0x143B: + // + // Bug 200113041 + // Need to be unique to identify this Sharp panel. Besides + // manufacturer ID and ProductID, we have to add the mode + // name to make this happen as LQ156D1JW05 in ASCII. + // + if ((buffer.data[0x71] == 0x4C) && + (buffer.data[0x72] == 0x51) && + (buffer.data[0x73] == 0x31) && + (buffer.data[0x74] == 0x35) && + (buffer.data[0x75] == 0x36) && + (buffer.data[0x76] == 0x44) && + (buffer.data[0x77] == 0x31) && + (buffer.data[0x78] == 0x4A) && + (buffer.data[0x79] == 0x57) && + (buffer.data[0x7A] == 0x30) && + (buffer.data[0x7B] == 0x35) && + (buffer.data[0x7C] == 0x0A) && + (buffer.data[0x7D] == 0x20)) + { + this->WARFlags.useLegacyAddress = true; + DP_LOG(("DP-WAR> Sharp EDP implements only Legacy interrupt address range")); + } + break; + } + break; + + // EIZO + case 0xc315: + if (ProductID == 0x2227) + { + // + // The EIZO FlexScan SX2762W generates a redundant long HPD + // pulse after a modeset, which triggers another modeset on GPUs + // without flush mode, triggering an infinite link training + // loop. + // + this->WARFlags.ignoreRedundantHotplug = true; + DP_LOG(("DP-WAR> EIZO FlexScan SX2762W generates redundant")); + DP_LOG(("DP-WAR> hotplugs (bug 1048796)")); + break; + } + break; + + // MEI-Panasonic + case 0xa934: + if (ProductID == 0x96a2) + { + // + // Bug 200113041 + // Need to be unique to identify this MEI-Panasonic panel. + // Besides manufacturer ID and ProductID, we have to add the + // model name to make this happen as VVX17P051J00^ in ASCII. + // + if ((buffer.data[0x71] == 0x56) && + (buffer.data[0x72] == 0x56) && + (buffer.data[0x73] == 0x58) && + (buffer.data[0x74] == 0x31) && + (buffer.data[0x75] == 0x37) && + (buffer.data[0x76] == 0x50) && + (buffer.data[0x77] == 0x30) && + (buffer.data[0x78] == 0x35) && + (buffer.data[0x79] == 0x31) && + (buffer.data[0x7A] == 0x4A) && + (buffer.data[0x7B] == 0x30) && + (buffer.data[0x7C] == 0x30) && + (buffer.data[0x7D] == 0x0A)) + { + this->WARFlags.useLegacyAddress = true; + DP_LOG(("DP-WAR> MEI-Panasonic EDP")); + DP_LOG(("implements only Legacy interrupt address range")); + } + } + break; + + // LG + case 0xE430: + if (ProductID == 0x0469) + { + // + // The LG display can't be driven at FHD with 2*RBR. + // Force max link config + // + this->WARFlags.forceMaxLinkConfig = true; + DP_LOG(("DP-WAR> Force maximum link config WAR required on LG panel.")); + DP_LOG(("DP-WAR> bug 1649626")); + break; + } + break; + case 0x8F34: + if (ProductID == 0xAA55) + { + this->WARFlags.forceMaxLinkConfig = true; + DP_LOG(("DP-WAR> Force maximum link config WAR required on Sharp-CerebrEx panel.")); + } + break; + + // Dell + case 0xAC10: + // Dell U2713H has problem with LQA. Disable it. + if ((ProductID == 0xA092) || (ProductID == 0xF046)) + { + this->WARFlags.reassessMaxLink = true; + } + break; + + // CMN + case 0xAE0D: + if (ProductID == 0x1747) + { + this->WARFlags.useLegacyAddress = true; + DP_LOG(("DP-WAR> CMN eDP")); + DP_LOG(("implements only Legacy interrupt address range")); + } + break; + + // BenQ + case 0xD109: + if ((ProductID == 0x7F2B) || (ProductID == 0x7F2F)) + { + this->WARFlags.ignoreRedundantHotplug = true; + DP_LOG(("DP-WAR> BenQ GSync power on/off redundant hotplug")); + } + break; + + // MSI + case 0x834C: + if (ProductID == 0x4C48) + { + this->WARFlags.useLegacyAddress = true; + DP_LOG(("DP-WAR> MSI eDP\n")); + DP_LOG(("implements only Legacy interrupt address range\n")); + } + break; + + // Unigraf + case 0xC754: + case 0x1863: + { + DP_LOG(("DP-WAR> Unigraf device, keep link alive during detection\n")); + this->WARFlags.keepLinkAlive = true; + } + break; + + // BOE + case 0xE509: + if ((ProductID == 0x977) || (ProductID == 0x974) || (ProductID == 0x9D9)) + { + this->WARFlags.bIgnoreDscCap = true; + DP_LOG(("DP-WAR> BOE panels incorrectly exposing DSC capability. Ignoring it.")); + } + break; + + // NCP + case 0x7038: + if ((ProductID == 0x005F)) + { + this->WARFlags.bIgnoreDscCap = true; + DP_LOG(("DP-WAR> NCP panels incorrectly exposing DSC capability. Ignoring it.")); + } + break; + + // + // This panel advertise DSC capabilities, but panel doesn't support DSC + // So ignoring DSC capability on this panel + // + case 0x6F0E: + if (ProductID == 0x1609) + { + this->WARFlags.bIgnoreDscCap = true; + DP_LOG(("DP-WAR> Ignoring DSC capability on Lenovo CSOT 1609 Panel.")); + DP_LOG(("DP-WAR> Bug 3444252")); + } + break; + + default: + break; + } + + // Find out if the monitor needs a WAR to applied. + if (warFlag) + { + if (warFlag & DP_MONITOR_CAPABILITY_DP_SKIP_REDUNDANT_LT) + { + this->WARFlags.skipRedundantLt = true; + } + + if (warFlag & DP_MONITOR_CAPABILITY_DP_SKIP_CABLE_BW_CHECK) + { + this->WARFlags.skipCableBWCheck = true; + this->WARData.maxLaneAtHighRate = pDenylistData->dpSkipCheckLink.maxLaneAtHighRate; + this->WARData.maxLaneAtLowRate = pDenylistData->dpSkipCheckLink.maxLaneAtLowRate; + } + + if (warFlag & DP_MONITOR_CAPABILITY_DP_WRITE_0x600_BEFORE_LT) + { + // all HP monitors need to be powered up before link training + this->WARFlags.powerOnBeforeLt = true; + DP_LOG(("DP-WAR> HP monitors need to be powered up before LT")); + } + + if (warFlag & DP_MONITOR_CAPABILITY_DP_OVERRIDE_OPTIMAL_LINK_CONFIG) + { + // + // Instead of calculating the optimum link config + // based on timing, bpc etc. just used a default + // fixed link config for the monitor for all modes + // + this->WARFlags.overrideOptimalLinkCfg = true; + // Force the fix max LT + this->WARFlags.forceMaxLinkConfig = true; + this->WARData.optimalLinkRate = pDenylistData->dpOverrideOptimalLinkConfig.linkRate; + this->WARData.optimalLaneCount = pDenylistData->dpOverrideOptimalLinkConfig.laneCount; + DP_LOG(("DP-WAR> Overriding optimal link config on Dell U2410.")); + DP_LOG(("DP-WAR> bug 632801")); + } + + if (warFlag & DP_MONITOR_CAPABILITY_DP_OVERRIDE_MAX_LANE_COUNT) + { + // + // Some monitors claim more lanes than they actually support. + // This particular Lenovo monitos has just 2 lanes, but its DPCD says 4. + // This WAR is to override the max lane count read from DPCD. + // + this->WARFlags.overrideMaxLaneCount = true; + this->WARData.maxLaneCount = pDenylistData->dpMaxLaneCountOverride; + DP_LOG(("DP-WAR> Overriding max lane count on Lenovo L2440x.")); + DP_LOG(("DP-WAR> bug 687952")); + } + } + + if (this->WARFlags.dataForced) + { + DP_LOG(("DP-WAR> EDID was overridden for some data. Patching CRC.")); + this->patchCrc(); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_watermark.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_watermark.cpp new file mode 100644 index 0000000..1dd4363 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_watermark.cpp @@ -0,0 +1,872 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_watermark.cpp * +* DP watermark IsModePossible calculations * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_watermark.h" +#include "dp_linkconfig.h" +#include "displayport.h" + +#define FEC_TOTAL_SYMBOLS_PER_BLK(lanes) ((NvU32)((lanes == 1) ? 512U : 256U)) +#define FEC_PARITY_SYMBOLS_PER_BLK(lanes) ((NvU32)((lanes == 1) ? 12U : 6U)) +//return max number of FEC parity symbols in x link clock cycles +#define FEC_PARITY_SYM_SST(lanes, x) (DP_MIN((NvU32)(x) % FEC_TOTAL_SYMBOLS_PER_BLK(lanes), FEC_PARITY_SYMBOLS_PER_BLK(lanes)) + (NvU32)(x) / FEC_TOTAL_SYMBOLS_PER_BLK(lanes) * FEC_PARITY_SYMBOLS_PER_BLK(lanes) + FEC_PARITY_SYMBOLS_PER_BLK(lanes) + 1U) +#define FEC_PARITY_SYM_MST(lanes, x) (DP_MIN((NvU32)(x) % FEC_TOTAL_SYMBOLS_PER_BLK(lanes), FEC_PARITY_SYMBOLS_PER_BLK(lanes)) + (NvU32)(x) / FEC_TOTAL_SYMBOLS_PER_BLK(lanes) * FEC_PARITY_SYMBOLS_PER_BLK(lanes) + 1U) + + +bool DisplayPort::isModePossibleMST +( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo +) +{ + // + // For MST, use downspread 0.6% + // + NvU64 linkFreq = linkConfig.peakRate * 994 / 1000; + + // + // This function is for multistream only! + // + DP_ASSERT( linkConfig.multistream ); + + if(!modesetInfo.pixelClockHz || !modesetInfo.depth) + { + DP_ASSERT(0 && "INVALID PIXEL CLOCK and DEPTH sent by the client "); + return false; + } + + // depth is multiplied by 16 in case of DSC enable + unsigned DSC_FACTOR = modesetInfo.bEnableDsc ? 16 : 1; + + // Extra bits that we need to send + //(hActiveDiv4Remainder > 0 ? (4- hActiveDiv4Remainder) : 0) --> + // Number of extra pixels that we need to insert due to mapping pixels + // to the DP lanes. (4 lanes for MS) + // + // 160 --> Extra bits that we need to send during horizontal blanking + // (BS+VBID+MVID+MAUD+BE) => 5*8*num_lanes + // + // 6 * 4 --> Pixel padding worst case + // + NvU32 minHBlank = ( ((modesetInfo.surfaceWidth % 4) > 0) ? ((4-(modesetInfo.surfaceWidth % 4)) * modesetInfo.depth)/ DSC_FACTOR : 0 ) + (160 + 6 * 4); + + // Rounding to nearest multiple of 32 since we always send 32 bits in one time slice + minHBlank = minHBlank + (32 - minHBlank % 32); + + // bpp - 1 --> Rounding + minHBlank = ((minHBlank * DSC_FACTOR) + modesetInfo.depth - (1 * DSC_FACTOR))/modesetInfo.depth; + + if (minHBlank > modesetInfo.rasterWidth - modesetInfo.surfaceWidth) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Blanking Width is smaller than minimum permissible value.")); + return false; + } + + // Bug 702290 - Active Width should be greater than 60 + if (modesetInfo.surfaceWidth <= 60) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Minimum Horizontal Active Width <= 60 not supported.")); + return false; + } + + NvS32 vblank_symbols; + NvS32 hblank_symbols = (NvS32)(((NvU64)(modesetInfo.rasterWidth - modesetInfo.surfaceWidth - minHBlank) * linkFreq) / modesetInfo.pixelClockHz); + + //reduce HBlank Symbols to account for secondary data packet + hblank_symbols -= 1; //Stuffer latency to send BS + hblank_symbols -= 3; //SPKT latency to send data to stuffer + + hblank_symbols -= linkConfig.lanes == 1 ? 9 : linkConfig.lanes == 2 ? 6 : 3; + + dpInfo->hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols; + + + // + // Audio IMP calculations + // Perform the related audio calculation to determine the number of extra symbols needed. + // + NvU32 twoChannelAudio_symbols = 0; + + if (modesetInfo.twoChannelAudioHz != 0) + { + // 1-2 channel case + NvU32 samples = (NvU32)divide_ceil(modesetInfo.twoChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz); + + // Round to the next even sample to account for stuffing (2 ch, 4 lanes) + samples = samples + (2 - samples % 2); + + // Convert sample count to symbols + twoChannelAudio_symbols = 10 * samples + 16; + } + + NvU32 eightChannelAudio_symbols = 0; + if (modesetInfo.eightChannelAudioHz != 0) + { + // 3-8 channel case + NvU32 samples = (NvU32)divide_ceil(modesetInfo.eightChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz); + + // Convert sample count to symbols + eightChannelAudio_symbols = 40 * samples + 16; + } + + if (dpInfo->hBlankSym < DP_MAX(twoChannelAudio_symbols, eightChannelAudio_symbols)) + { + return false; + } + + // Refer to dev_disp.ref for more information. + // # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1; + // where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39 + if (modesetInfo.surfaceWidth < 40) + { + vblank_symbols = 0; + } + else + { + vblank_symbols = (NvS32)(((NvU64)(modesetInfo.surfaceWidth - 40) * linkFreq) / modesetInfo.pixelClockHz) - 1; + + vblank_symbols -= linkConfig.lanes == 1 ? 39 : linkConfig.lanes == 2 ? 21 : 12; + } + + dpInfo->vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols; + + return true; +} + + +bool DisplayPort::isModePossibleSST +( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo, + bool bUseIncreasedWatermarkLimits +) +{ + // + // This function is for single stream only! + // + DP_ASSERT( !linkConfig.multistream ); + + unsigned watermarkAdjust = DP_CONFIG_WATERMARK_ADJUST; + unsigned watermarkMinimum = DP_CONFIG_WATERMARK_LIMIT; + // depth is multiplied by 16 in case of DSC enable + unsigned DSC_FACTOR = modesetInfo.bEnableDsc ? 16 : 1; + + if(bUseIncreasedWatermarkLimits) + { + watermarkAdjust = DP_CONFIG_INCREASED_WATERMARK_ADJUST; + watermarkMinimum = DP_CONFIG_INCREASED_WATERMARK_LIMIT; + } + + if(!modesetInfo.pixelClockHz || !modesetInfo.depth) + { + DP_ASSERT(0 && "INVALID PIXEL CLOCK or DEPTH sent by the client "); + return false; + } + // number of link clocks per line. + int vblank_symbols = 0; + NvU64 PrecisionFactor, ratioF, watermarkF; + + NvU32 numLanesPerLink = linkConfig.lanes; + + DP_ASSERT(!linkConfig.multistream && "MST!"); + + // Check if we have a valid laneCount as currently we support only up to 4-lanes + if (!IS_VALID_LANECOUNT(linkConfig.lanes)) + { + // + // Print debug message and Assert. All calculations assume a max of 8 lanes + // & any increase in lanes should cause these calculation to be updated + // + DP_LOG(("NVRM: %s: ERROR: LaneCount - %d is not supported for waterMark calculations.", + __FUNCTION__, linkConfig.lanes)); + DP_LOG(("Current support is only up to 4-Lanes & any change/increase in supported lanes " + "should be reflected in waterMark calculations algorithm. " + "Ex: See calc for minHBlank variable below")); + + DP_ASSERT(0); + return false; + } + + if ((modesetInfo.pixelClockHz * modesetInfo.depth) >= (8 * linkConfig.minRate * linkConfig.lanes * DSC_FACTOR)) + { + return false; + } + + // + // For DSC, if (pclk * bpp) < (1/64 * orclk * 8 * lanes) then some TU may end up with + // 0 active symbols. This may cause HW hang. Bug 200379426 + // + if ((modesetInfo.bEnableDsc) && + ((modesetInfo.pixelClockHz * modesetInfo.depth) < ((8 * linkConfig.minRate * linkConfig.lanes * DSC_FACTOR) / 64))) + { + return false; + } + + // + // Perform the SST calculation. + // For auto mode the watermark calculation does not need to track accumulated error the + // formulas for manual mode will not work. So below calculation was extracted from the DTB. + // + dpInfo->tuSize = 64; + PrecisionFactor = 100000; + ratioF = ((NvU64)modesetInfo.pixelClockHz * modesetInfo.depth * PrecisionFactor) / DSC_FACTOR; + + ratioF /= 8 * (NvU64) linkConfig.minRate * linkConfig.lanes; + + if (PrecisionFactor < ratioF) // Assert if we will end up with a negative number in below + return false; + + watermarkF = ratioF * dpInfo->tuSize * (PrecisionFactor - ratioF) / PrecisionFactor; + dpInfo->waterMark = (unsigned)(watermarkAdjust + ((2 * (modesetInfo.depth * PrecisionFactor / (8 * numLanesPerLink * DSC_FACTOR)) + watermarkF) / PrecisionFactor)); + + // + // Bounds check the watermark + // + NvU32 numSymbolsPerLine = (modesetInfo.surfaceWidth * modesetInfo.depth) / (8 * linkConfig.lanes * DSC_FACTOR); + + if (dpInfo->waterMark > 39 || dpInfo->waterMark > numSymbolsPerLine) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: watermark should not be greater than 39.")); + return false; + } + + // + // Clamp the low side + // + if (dpInfo->waterMark < watermarkMinimum) + dpInfo->waterMark = watermarkMinimum; + + //Bits to send BS/BE/Extra symbols due to pixel padding + //Also accounts for enhanced framing. + NvU32 BlankingBits = 3*8*numLanesPerLink + (linkConfig.enhancedFraming ? 3*8*numLanesPerLink : 0); + + //VBID/MVID/MAUD sent 4 times all the time + BlankingBits += 3*8*4; + + NvU32 surfaceWidthPerLink = modesetInfo.surfaceWidth; + + //Extra bits sent due to pixel steering + NvU32 PixelSteeringBits = (surfaceWidthPerLink % numLanesPerLink) ? (((numLanesPerLink - surfaceWidthPerLink % numLanesPerLink) * modesetInfo.depth) / DSC_FACTOR) : 0; + + BlankingBits += PixelSteeringBits; + NvU64 NumBlankingLinkClocks = (NvU64)BlankingBits * PrecisionFactor / (8 * numLanesPerLink); + NvU32 MinHBlank = (NvU32)(NumBlankingLinkClocks * modesetInfo.pixelClockHz/ linkConfig.minRate / PrecisionFactor); + MinHBlank += 12; + + if (MinHBlank > modesetInfo.rasterWidth - modesetInfo.surfaceWidth) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Blanking Width is smaller than minimum permissible value.")); + return false; + } + + // Bug 702290 - Active Width should be greater than 60 + if (modesetInfo.surfaceWidth <= 60) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Minimum Horizontal Active Width <= 60 not supported.")); + return false; + } + + + NvS32 hblank_symbols = (NvS32)(((NvU64)(modesetInfo.rasterWidth - modesetInfo.surfaceWidth - MinHBlank) * linkConfig.minRate) / modesetInfo.pixelClockHz); + + //reduce HBlank Symbols to account for secondary data packet + hblank_symbols -= 1; //Stuffer latency to send BS + hblank_symbols -= 3; //SPKT latency to send data to stuffer + + hblank_symbols -= numLanesPerLink == 1 ? 9 : numLanesPerLink == 2 ? 6 : 3; + + dpInfo->hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols; + + // + // Audio IMP calculations + // + + // From dev_disp.ref: + + // The packet generation logic needs to know the length of the hblank period. If there is no room + // in the current hblank for a new packet, it will be delayed until the next blanking period. This + // field should be programmed during the second Supervisor interrupt based on the new raster + // dimensions. + + // ... + + // -------------------------------------- + // The following formulas can be used to calculate the maximum audio sampling rate that can + // be supported by DisplayPort given the current raster dimensions. DisplayPort has much more + // bandwidth during blanking periods than HDMI has, so hblank size is less of an issue. + + // ... + + // Size of a packet for 2ch audio = 20 symbols (up to 2 samples) + // Size of a packet for 8ch audio = 40 symbols + // Size of an audio packet header plus control symbols = 2*#lanes + 8 symbols (assuming < 32 samples per line) + // number of packets/hblank for 2ch audio = Floor ((number of free symbols/hblank - (2*#lanes + 8) / 20) + // number of packets/hblank for 8ch audio = Floor ((number of free symbols/hblank - (2*#lanes + 8) / 40) + + // Maximum audio sample rate possible: + // number of audio samples/line = SetRasterSize.Width * audio_fs / pclk + // number of audio packets needed for 2ch audio = Ceiling(SetRasterSize.Width * audio_fs / (pclk*2)) + // number of audio packets needed for 3-8ch audio = SetRasterSize.Width * audio_fs / pclk + + // If number of audio packets needed > number of packets/hblank, then you cannot support that audio frequency + + // Note that the hBlankSym calculated is per lane. So the number of symbols available for audio is + // (number of lanes * hBlankSym). + // The calculation of audio packets per Hblank needs to account for the following - + // 2 symbols for SS and SE; 8 symbols for header; and additional 2 symbols to account for actual values used by HW. + // -------------------------------------- + + if (modesetInfo.twoChannelAudioHz != 0) + { + if ((dpInfo->hBlankSym * numLanesPerLink) < (2 * numLanesPerLink + 8)) + { + // There aren't enough symbols/hblank available. + return false; + } + + NvU32 twoChannelAudioPacketsPerHBlank = (NvU32)divide_floor(((dpInfo->hBlankSym * numLanesPerLink) - (2 * numLanesPerLink) - 8 - (2 * numLanesPerLink)), 20); + + NvU32 twoChannelAudioPackets = (NvU32)divide_ceil(modesetInfo.twoChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz * 2); + + if (twoChannelAudioPackets > twoChannelAudioPacketsPerHBlank) + { + // There aren't enough packets/hblank available. + return false; + } + } + + if (modesetInfo.eightChannelAudioHz != 0) + { + if ((dpInfo->hBlankSym * numLanesPerLink) < (2 * numLanesPerLink + 8)) + { + // There aren't enough symbols/hblank available. + return false; + } + + NvU32 eightChannelAudioPacketsPerHBlank = (NvU32)divide_floor(((dpInfo->hBlankSym * numLanesPerLink) - (2 * numLanesPerLink) - 8 - (2 * numLanesPerLink)), 40); + + NvU32 eightChannelAudioPackets = (NvU32)divide_ceil(modesetInfo.eightChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz); + + if (eightChannelAudioPackets > eightChannelAudioPacketsPerHBlank) + { + // There aren't enough packets/hblank available. + return false; + } + } + + + // Refer to dev_disp.ref for more information. + // # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1; + // where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39 + if (modesetInfo.surfaceWidth < 40) + { + vblank_symbols = 0; + } + else + { + vblank_symbols = (NvS32)(((NvU64)(modesetInfo.surfaceWidth - 40) * linkConfig.minRate) / modesetInfo.pixelClockHz) - 1; + + vblank_symbols -= numLanesPerLink == 1 ? 39 : numLanesPerLink == 2 ? 21 : 12; + } + + dpInfo->vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols; + + return true; +} + +bool DisplayPort::isModePossibleSSTWithFEC +( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo, + bool bUseIncreasedWatermarkLimits +) +{ + // + // This function is for single stream only! + // Refer to Bug 200406501 and 200401850 for algorithm + // + DP_ASSERT( !linkConfig.multistream ); + + unsigned watermarkAdjust = DP_CONFIG_WATERMARK_ADJUST; + unsigned watermarkMinimum = DP_CONFIG_WATERMARK_LIMIT; + // depth is multiplied by 16 in case of DSC enable + unsigned DSC_FACTOR = modesetInfo.bEnableDsc ? 16 : 1; + + if(bUseIncreasedWatermarkLimits) + { + watermarkAdjust = DP_CONFIG_INCREASED_WATERMARK_ADJUST; + watermarkMinimum = DP_CONFIG_INCREASED_WATERMARK_LIMIT; + } + + if(!modesetInfo.pixelClockHz || !modesetInfo.depth) + { + DP_ASSERT(0 && "INVALID PIXEL CLOCK or DEPTH sent by the client "); + return false; + } + // number of link clocks per line. + int vblank_symbols = 0; + NvU64 PrecisionFactor, ratioF, watermarkF; + NvS32 w0, s; + + NvU32 numLanesPerLink = linkConfig.lanes; + + DP_ASSERT(!linkConfig.multistream && "MST!"); + + // Check if we have a valid laneCount as currently we support only up to 4-lanes + if (!IS_VALID_LANECOUNT(linkConfig.lanes)) + { + // + // Print debug message and Assert. All calculations assume a max of 8 lanes + // & any increase in lanes should cause these calculation to be updated + // + DP_LOG(("NVRM: %s: ERROR: LaneCount - %d is not supported for waterMark calculations.", + __FUNCTION__, linkConfig.lanes)); + DP_LOG(("Current support is only up to 4-Lanes & any change/increase in supported lanes " + "should be reflected in waterMark calculations algorithm. " + "Ex: See calc for minHBlank variable below")); + + DP_ASSERT(0); + return false; + } + + if ((modesetInfo.pixelClockHz * modesetInfo.depth) >= (8 * linkConfig.minRate * linkConfig.lanes * DSC_FACTOR)) + { + return false; + } + + // + // For DSC, if (pclk * bpp) < (1/64 * orclk * 8 * lanes) then some TU may end up with + // 0 active symbols. This may cause HW hang. Bug 200379426 + // + if ((modesetInfo.bEnableDsc) && + ((modesetInfo.pixelClockHz * modesetInfo.depth) < ((8 * linkConfig.minRate * linkConfig.lanes * DSC_FACTOR) / 64))) + { + return false; + } + + // + // Perform the SST calculation. + // For auto mode the watermark calculation does not need to track accumulated error the + // formulas for manual mode will not work. So below calculation was extracted from the DTB. + // + dpInfo->tuSize = 64; + PrecisionFactor = 100000; + ratioF = ((NvU64)modesetInfo.pixelClockHz * modesetInfo.depth * PrecisionFactor) / DSC_FACTOR; + + ratioF /= 8 * (NvU64)linkConfig.minRate * linkConfig.lanes; + + if (PrecisionFactor < ratioF) // Assert if we will end up with a negative number in below + return false; + + watermarkF = (ratioF * dpInfo->tuSize * (PrecisionFactor - ratioF)) / PrecisionFactor; + + w0 = (8 / linkConfig.lanes); + if (linkConfig.bEnableFEC) + { + s = (linkConfig.lanes == 1) ? 15 : 10; + } + else + { + s = 3 - w0; + } + + dpInfo->waterMark = (unsigned)(watermarkAdjust + ((3 * (modesetInfo.depth * PrecisionFactor / (8 * numLanesPerLink * DSC_FACTOR)) + watermarkF) / PrecisionFactor) + w0 + 3); + + s = ((NvS32)ratioF * s); + + dpInfo->waterMark = (unsigned)((NvS32)dpInfo->waterMark + (s / (NvS32)PrecisionFactor)); + + // + // Bounds check the watermark + // + NvU32 numSymbolsPerLine = (modesetInfo.surfaceWidth * modesetInfo.depth) / (8 * linkConfig.lanes * DSC_FACTOR); + + if (dpInfo->waterMark > numSymbolsPerLine) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: watermark = %d should not be greater than numSymbolsPerLine = %d.", dpInfo->waterMark, numSymbolsPerLine)); + return false; + } + + // + // Clamp the low side + // + if (dpInfo->waterMark < watermarkMinimum) + dpInfo->waterMark = watermarkMinimum; + + unsigned MinHBlank = 0; + unsigned MinHBlankFEC = 0; + NvU32 BlankingBits = 0; + NvU32 BlankingSymbolsPerLane = 0; + + BlankingBits = (3U * 8U * 4U) + (2U * 8U * numLanesPerLink); + + if (modesetInfo.bEnableDsc) + { + NvU32 sliceCount, sliceWidth, chunkSize; + + sliceCount = (modesetInfo.mode == DSC_DUAL) ? 8U : 4U; + sliceWidth = (NvU32)divide_ceil(modesetInfo.surfaceWidth, sliceCount); + chunkSize = (NvU32)divide_ceil(modesetInfo.depth * sliceWidth, 8U * DSC_FACTOR); + + if(((NvU64)(chunkSize + 1U) * sliceCount * modesetInfo.pixelClockHz) < (NvU64)(linkConfig.minRate * numLanesPerLink * modesetInfo.surfaceWidth)) + { + // BW is plenty, this is common case. + //EOC symbols, when BW enough, only last EOC needs to be considered. + BlankingBits += 8U * numLanesPerLink; //+BlankingBits_DSC_EOC + BlankingBits += (chunkSize * 8U) - (sliceWidth * modesetInfo.depth / DSC_FACTOR); //+BlankingBits_DSC_bytePadding, only need to consider last slice + BlankingBits += (NvU32)(sliceCount * 8U * (divide_ceil(chunkSize, numLanesPerLink) * numLanesPerLink - chunkSize)); //+BlankingBits_DSC_lane_padding + } + else + { // no extra room in link BW + //EOC symbols, EOC will be accumulated until hblank period. + BlankingBits += (sliceCount * 8U * numLanesPerLink); //+BlankingBits_EOC + //padding, can also use simplified but pessimistic version : BlankingBits += SliceNum * (logic_lanes *8-1); + BlankingBits += (NvU32)(sliceCount * ((divide_ceil(chunkSize, numLanesPerLink) * numLanesPerLink * 8U) - (NvU32)(sliceWidth * modesetInfo.depth / DSC_FACTOR))); //+BlankingBits_DSC_padding + } + } + else + { + NvU32 surfaceWidthPerLink = modesetInfo.surfaceWidth; + NvU32 surfaceWidthPerLane = (NvU32)divide_ceil(surfaceWidthPerLink, numLanesPerLink); + + // Padding + BlankingBits += (NvU32)divide_ceil(surfaceWidthPerLane * modesetInfo.depth, 8U) * 8U * numLanesPerLink - (NvU32)(surfaceWidthPerLink * modesetInfo.depth); //+BlankingBits_nonDSC_padding + } + + BlankingSymbolsPerLane = (NvU32)divide_ceil(BlankingBits , (8U * numLanesPerLink)); //in symbols per lane + BlankingSymbolsPerLane += (linkConfig.enhancedFraming ? 3U : 0U); + + if (linkConfig.bEnableFEC) + { + // + // In worst case, FEC symbols fall into a narrow Hblank period, + // we have to consider this in HBlank checker, see bug 200496977 + // but we don't have to consider this in the calculation of hblank_symbols + // + + MinHBlankFEC = FEC_PARITY_SYM_SST(numLanesPerLink, BlankingSymbolsPerLane); //in symbols + BlankingSymbolsPerLane += MinHBlankFEC; + } + + // BlankingSymbolsPerLane is the MinHBlank in link clock cycles, + MinHBlank = (unsigned)(divide_ceil(BlankingSymbolsPerLane * modesetInfo.pixelClockHz, + linkConfig.peakRate)); //in pclk cycles + MinHBlank += 3U; //add some margin + + NvU32 HBlank = (modesetInfo.rasterWidth - modesetInfo.surfaceWidth); + + if (MinHBlank > HBlank) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Blanking Width is smaller than minimum permissible value.")); + return false; + } + + // Bug 702290 - Active Width should be greater than 60 + if (modesetInfo.surfaceWidth <= 60) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Minimum Horizontal Active Width <= 60 not supported.")); + return false; + } + + NvU32 total_hblank_symbols = (NvS32)divide_ceil((HBlank * linkConfig.peakRate), modesetInfo.pixelClockHz); + NvS32 hblank_symbols = (NvS32)(((NvU64)(HBlank - MinHBlank) * linkConfig.peakRate) / modesetInfo.pixelClockHz); + + if (linkConfig.bEnableFEC) + { + hblank_symbols -= (FEC_PARITY_SYM_SST(numLanesPerLink, total_hblank_symbols)); + hblank_symbols += MinHBlankFEC; + } + + //reduce HBlank Symbols to account for secondary data packet + hblank_symbols -= 1; //Stuffer latency to send BS + hblank_symbols -= 3; //SPKT latency to send data to stuffer + hblank_symbols -= 3; //add some margin + + dpInfo->hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols; + + // + // Audio IMP calculations + // + + // From dev_disp.ref: + + // The packet generation logic needs to know the length of the hblank period. If there is no room + // in the current hblank for a new packet, it will be delayed until the next blanking period. This + // field should be programmed during the second Supervisor interrupt based on the new raster + // dimensions. + + // ... + + // -------------------------------------- + // The following formulas can be used to calculate the maximum audio sampling rate that can + // be supported by DisplayPort given the current raster dimensions. DisplayPort has much more + // bandwidth during blanking periods than HDMI has, so hblank size is less of an issue. + + // ... + + // Size of a packet for 2ch audio = 20 symbols (up to 2 samples) + // Size of a packet for 8ch audio = 40 symbols + // Size of an audio packet header plus control symbols = 2*#lanes + 8 symbols (assuming < 32 samples per line) + // number of packets/hblank for 2ch audio = Floor ((number of free symbols/hblank - (2*#lanes + 8) / 20) + // number of packets/hblank for 8ch audio = Floor ((number of free symbols/hblank - (2*#lanes + 8) / 40) + + // Maximum audio sample rate possible: + // number of audio samples/line = SetRasterSize.Width * audio_fs / pclk + // number of audio packets needed for 2ch audio = Ceiling(SetRasterSize.Width * audio_fs / (pclk*2)) + // number of audio packets needed for 3-8ch audio = SetRasterSize.Width * audio_fs / pclk + + // If number of audio packets needed > number of packets/hblank, then you cannot support that audio frequency + + // Note that the hBlankSym calculated is per lane. So the number of symbols available for audio is + // (number of lanes * hBlankSym). + // The calculation of audio packets per Hblank needs to account for the following - + // 2 symbols for SS and SE; 8 symbols for header; and additional 2 symbols to account for actual values used by HW. + // -------------------------------------- + + if (modesetInfo.twoChannelAudioHz != 0) + { + if ((dpInfo->hBlankSym * numLanesPerLink) < ((2 * numLanesPerLink) + 8)) + { + // There aren't enough symbols/hblank available. + return false; + } + + NvU32 twoChannelAudioPacketsPerHBlank = (NvU32)divide_floor(((dpInfo->hBlankSym * numLanesPerLink) - (2 * numLanesPerLink) - 8 - (2 * numLanesPerLink)), 20); + + NvU32 twoChannelAudioPackets = (NvU32)divide_ceil(modesetInfo.twoChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz * 2); + + if (twoChannelAudioPackets > twoChannelAudioPacketsPerHBlank) + { + // There aren't enough packets/hblank available. + return false; + } + } + + if (modesetInfo.eightChannelAudioHz != 0) + { + if ((dpInfo->hBlankSym * numLanesPerLink) < (2 * numLanesPerLink + 8)) + { + // There aren't enough symbols/hblank available. + return false; + } + + NvU32 eightChannelAudioPacketsPerHBlank = (NvU32)divide_floor(((dpInfo->hBlankSym * numLanesPerLink) - (2 * numLanesPerLink) - 8 - (2 * numLanesPerLink)), 40); + + NvU32 eightChannelAudioPackets = (NvU32)divide_ceil(modesetInfo.eightChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz); + + if (eightChannelAudioPackets > eightChannelAudioPacketsPerHBlank) + { + // There aren't enough packets/hblank available. + return false; + } + } + + // Refer to dev_disp.ref for more information. + // # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1; + // where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39 + if (modesetInfo.surfaceWidth < 40) + { + vblank_symbols = 0; + } + else + { + vblank_symbols = (NvS32)(((NvU64)(modesetInfo.surfaceWidth - 3) * linkConfig.peakRate) / modesetInfo.pixelClockHz); + + // + // The active region transmission is delayed because of lane fifo storage. + // compare to the negedge of hblank, all the BE will be delayed by watermark/ratio cycles. + // compare to the posedge of hblank(i.e. the time of sending out BS symbols in vblank period), + // all the BS after active pixels will be delayed by maximum 1.5 TU cycles, + // the delay of the BS will cause the 1st vblank line shorter than expected, + // but it will squeeze hblank period first, + // if hblank is short, the BS will be in hactive period and impact vblank_symbols. + // + + NvS32 squeezed_symbols = (dpInfo->tuSize * 3 / 2) - hblank_symbols; + squeezed_symbols = DP_MAX(squeezed_symbols, 0); + NvS32 msa_symbols = (36 / numLanesPerLink) + 3; + + // + // MSA can't be in the 1st vblank line, except v_front_porch=0 + // if we know v_front_porch != 0, + // we can use MAX(squeezed_symbols, msa_symbols) instead of squeezed_symbols+msa_symbols + // + vblank_symbols -= (squeezed_symbols + msa_symbols); + + if (linkConfig.bEnableFEC) + { + vblank_symbols -= FEC_PARITY_SYM_SST(numLanesPerLink, vblank_symbols); + } + vblank_symbols -= 3U; //add some margin + } + + dpInfo->vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols; + + if (modesetInfo.bEnableDsc) + { + // + // For DSC enabled case, the vblank_symbols must be large enough to accommodate DSC PPS SDP, see bug 2760673 + // For 1 lane, it requires at least 170+13 symbols + // For 2 lane, it requires at least 86+3 symbols + // For 4 lane, it requires at least 44+3 symbols + // normally, no need to check this, except in some small resolution test case. + // + if ((numLanesPerLink == 1U) && (dpInfo->vBlankSym < 183U)) + { + return false; + } + else if ((numLanesPerLink == 2U) && (dpInfo->vBlankSym < 89U)) + { + return false; + } + if ((numLanesPerLink == 4U) && (dpInfo->vBlankSym <47U)) + { + return false; + } + } + + return true; +} + +bool DisplayPort::isModePossibleMSTWithFEC +( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo +) +{ + // + // This function is for multistream only! + // Refer to Bug 200406501 and 200401850 for algorithm + // + DP_ASSERT(linkConfig.multistream); + + if (!modesetInfo.pixelClockHz || !modesetInfo.depth) + { + DP_ASSERT(0 && "INVALID PIXEL CLOCK and DEPTH sent by the client "); + return false; + } + + if (linkConfig.lanes == 0) + { + DP_ASSERT(0 && "No Active link / link train failed "); + return false; + } + + // depth is multiplied by 16 in case of DSC enable + unsigned DSC_FACTOR = modesetInfo.bEnableDsc ? 16 : 1; + dpInfo->tuSize = 64; + + NvU32 BlankingBits, BlankingSymbolsPerLane; + NvU32 numLanesPerLink = 4U; + NvU32 MinHBlank; + + BlankingBits = (3U * 8U * 4U) + (2U * 8U * numLanesPerLink); + + if(modesetInfo.bEnableDsc) + { + NvU32 sliceCount, sliceWidth, chunkSize; + + sliceCount = (modesetInfo.mode == DSC_DUAL) ? 8U : 4U; + sliceWidth = (NvU32)divide_ceil(modesetInfo.surfaceWidth, sliceCount); + chunkSize = (NvU32)divide_ceil(modesetInfo.depth * sliceWidth, 8U * DSC_FACTOR); + + //EOC symbols, EOC will be accumulated until hblank period. + BlankingBits += (sliceCount * 8U * numLanesPerLink); //+BlankingBits_EOC + //+BlankingBits_DSC_padding + BlankingBits += (NvU32)(sliceCount * ((divide_ceil(chunkSize, numLanesPerLink) * numLanesPerLink * 8U) - (NvU32)(sliceWidth * modesetInfo.depth / DSC_FACTOR))); + } + else + { + NvU32 surfaceWidthPerLane = (NvU32)divide_ceil(modesetInfo.surfaceWidth, numLanesPerLink); + + //Extra bits sent due to pixel steering + BlankingBits = (NvU32)divide_ceil(surfaceWidthPerLane * modesetInfo.depth, 8U) * 8U * numLanesPerLink - (NvU32)(modesetInfo.surfaceWidth * modesetInfo.depth); //+BlankingBits_nonDSC_padding + } + + BlankingSymbolsPerLane = (NvU32)divide_ceil(BlankingBits, (8U * numLanesPerLink)); //in symbols per lane + + MinHBlank = (NvU32)divide_ceil(BlankingSymbolsPerLane * 8U * numLanesPerLink * DSC_FACTOR, modesetInfo.depth); + MinHBlank += 3U; //add some margin + + NvU32 HBlank = (modesetInfo.rasterWidth - modesetInfo.surfaceWidth); + + if (MinHBlank > HBlank) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Blanking Width is smaller than minimum permissible value.")); + return false; + } + + // Bug 702290 - Active Width should be greater than 60 + if (modesetInfo.surfaceWidth <= 60) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Minimum Horizontal Active Width <= 60 not supported.")); + return false; + } + + // MST can do SDP splitting so all audio configuration are possible. + dpInfo->hBlankSym = 0U; + dpInfo->vBlankSym = 0U; + + return true; +} + +unsigned DisplayPort::pbnForMode(const ModesetInfo & modesetInfo) +{ + // + // Calculate PBN in terms of 54/64 mbyte/sec + // round up by .6% for spread de-rate. Note: if we're not spreading our link + // this MUST still be counted. It's also to allow downstream links to be spread. + // + unsigned pbnForMode = (NvU32)(divide_ceil(modesetInfo.pixelClockHz * modesetInfo.depth * 1006 * 64 / 8, + (NvU64)54000000 *1000)); + + if(modesetInfo.bEnableDsc) + { + // + // When DSC is enabled consider depth will multiplied by 16 and also 3% FEC Overhead + // as per DP1.4 spec + pbnForMode = (NvU32)(divide_ceil(pbnForMode * 100, 97 * DSC_DEPTH_FACTOR)); + } + + return pbnForMode; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dptestutil/dp_testmessage.cpp b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dptestutil/dp_testmessage.cpp new file mode 100644 index 0000000..5813e3f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dptestutil/dp_testmessage.cpp @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_testmessage.cpp * +* Used for DP Test Utility * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_auxdefs.h" +#include "dp_messages.h" +#include "dp_testmessage.h" +#include "dp_connectorimpl.h" +using namespace DisplayPort; + +// the dp lib status must be set to DONE indicating there is no pending message +void DPTestMessageCompletion::messageFailed(MessageManager::Message * from, NakData * data) +{ + parent->testMessageStatus = DP_TESTMESSAGE_REQUEST_STATUS_DONE; + + { + { + DP_ASSERT(0 && "unknown msg type when msg failed"); + } + } +} + +void DPTestMessageCompletion::messageCompleted(MessageManager::Message * from) +{ + parent->testMessageStatus = DP_TESTMESSAGE_REQUEST_STATUS_DONE; + + { + { + DP_ASSERT(0 && "unknown msg type when msg complete"); + } + } +} + +MessageManager * TestMessage::getMessageManager() +{ + return pMsgManager; +} + +// +// The function request that the request struct size should be check first to ensure the right structure is used and +// no BSOD will happen. +// +// For each request type, the DP lib status for that type should be check in case of request conflict. At one time, +// for each request type, only ONE instance could be processed +// +DP_TESTMESSAGE_STATUS TestMessage::sendDPTestMessage +( + void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus +) +{ + DP_ASSERT(pBuffer); + DP_TESTMESSAGE_REQUEST_TYPE type; + + // the buffer must contain a requestType field at least + if (requestSize < sizeof(DP_TESTMESSAGE_REQUEST_TYPE)) + return DP_TESTMESSAGE_STATUS_ERROR_INVALID_PARAM; + + type = *(DP_TESTMESSAGE_REQUEST_TYPE *)pBuffer; + + if (!isValidStruct(type, requestSize)) + return DP_TESTMESSAGE_STATUS_ERROR_INVALID_PARAM; + + *pDpStatus = DP_TESTMESSAGE_REQUEST_STATUS_ERROR; + return DP_TESTMESSAGE_STATUS_ERROR; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/displayport.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/displayport.h new file mode 100644 index 0000000..b85b1a2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/displayport.h @@ -0,0 +1,631 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DISPLAYPORT_H_ +#define _DISPLAYPORT_H_ + +#include "nvmisc.h" +#include "dpcd.h" +#include "dpcd14.h" +#include "dpcd20.h" + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: DISPLAYPORT.H * +* Defines DISPLAYPORT V1.2 * +* * +\***************************************************************************/ + +// Displayport interoperability with HDMI dongle i2c addr +#define DP2HDMI_DONGLE_I2C_ADDR 0x80 +#define DP2HDMI_DONGLE_DDC_BUFFER_ID_LEN 16 +#define DP2HDMI_DONGLE_CAP_BUFFER_LEN 32 + +// Offset to read the dongle identifier +#define NV_DP2HDMI_DONGLE_IDENTIFIER (0x00000010) +#define NV_DP2HDMI_DONGLE_IDENTIFIER_ADAPTER_REV 2:0 +#define NV_DP2HDMI_DONGLE_IDENTIFIER_ADAPTER_REV_TYPE2 (0x00000000) +#define NV_DP2HDMI_DONGLE_IDENTIFIER_ADAPTER_ID 7:4 +#define NV_DP2HDMI_DONGLE_IDENTIFIER_ADAPTER_ID_TYPE2 (0x0000000A) + +// Offset to read the dongle TMDS clock rate +#define NV_DP2HDMI_DONGLE_TMDS_CLOCK_RATE (0x0000001D) + +// HDMI dongle types +#define DP2HDMI_DONGLE_TYPE_1 0x1 +#define DP2HDMI_DONGLE_TYPE_2 0x2 + +// HDMI dongle frequency limits +#define DP2HDMI_DONGLE_TYPE_1_PCLK_LIMIT 165*1000*1000 +#define DP2HDMI_DONGLE_TYPE_2_PCLK_LIMIT 300*1000*1000 + +#define DPCD_VERSION_12 0x12 +#define DPCD_VERSION_13 0x13 +#define DPCD_VERSION_14 0x14 + +#define DP_LINKINDEX_0 0x0 +#define DP_LINKINDEX_1 0x1 + +// Two Head One OR +#define NV_PRIMARY_HEAD_INDEX_0 0 +#define NV_SECONDARY_HEAD_INDEX_1 1 +#define NV_PRIMARY_HEAD_INDEX_2 2 +#define NV_SECONDARY_HEAD_INDEX_3 3 + +typedef enum +{ + displayPort_Lane0 = 0, + displayPort_Lane1 = 1, + displayPort_Lane2 = 2, + displayPort_Lane3 = 3, + displayPort_Lane4 = 4, + displayPort_Lane5 = 5, + displayPort_Lane6 = 6, + displayPort_Lane7 = 7, + displayPort_LaneSupported +} DP_LANE; + +typedef enum +{ + laneCount_0 = 0x0, + laneCount_1 = 0x1, + laneCount_2 = 0x2, + laneCount_4 = 0x4, + laneCount_8 = 0x8, + laneCount_Supported +} DP_LANE_COUNT; + +typedef enum +{ + linkBW_1_62Gbps = 0x06, + linkBW_2_16Gbps = 0x08, + linkBW_2_43Gbps = 0x09, + linkBW_2_70Gbps = 0x0A, + linkBW_3_24Gbps = 0x0C, + linkBW_4_32Gbps = 0x10, + linkBW_5_40Gbps = 0x14, + linkBW_6_75Gbps = 0x19, + linkBW_8_10Gbps = 0x1E, + linkBW_Supported +} DP_LINK_BANDWIDTH; + +typedef enum +{ + linkSpeedId_1_62Gbps = 0x00, + linkSpeedId_2_70Gbps = 0x01, + linkSpeedId_5_40Gbps = 0x02, + linkSpeedId_8_10Gbps = 0x03, + linkSpeedId_2_16Gbps = 0x04, + linkSpeedId_2_43Gbps = 0x05, + linkSpeedId_3_24Gbps = 0x06, + linkSpeedId_4_32Gbps = 0x07, + linkSpeedId_6_75Gbps = 0x08, + linkSpeedId_Supported +} DP_LINK_SPEED_INDEX; + + +typedef enum +{ + postCursor2_Level0 = 0, + postCursor2_Level1 = 1, + postCursor2_Level2 = 2, + postCursor2_Level3 = 3, + postCursor2_Supported +} DP_POSTCURSOR2; + +typedef enum +{ + preEmphasis_Disabled = 0, + preEmphasis_Level1 = 1, + preEmphasis_Level2 = 2, + preEmphasis_Level3 = 3, + preEmphasis_Supported +} DP_PREEMPHASIS; + +typedef enum +{ + driveCurrent_Level0 = 0, + driveCurrent_Level1 = 1, + driveCurrent_Level2 = 2, + driveCurrent_Level3 = 3, + driveCurrent_Supported +} DP_DRIVECURRENT; + +typedef enum +{ + trainingPattern_Disabled = 0x0, + trainingPattern_1 = 0x1, + trainingPattern_2 = 0x2, + trainingPattern_3 = 0x3, + trainingPattern_4 = 0xB +} DP_TRAININGPATTERN; + +typedef enum +{ + dpOverclock_Percentage_0 = 0, + dpOverclock_Percentage_10 = 10, + dpOverclock_Percentage_20 = 20 +}DP_OVERCLOCKPERCENTAGE; + +typedef enum +{ + dpColorFormat_RGB = 0, + dpColorFormat_YCbCr444 = 0x1, + dpColorFormat_YCbCr422 = 0x2, + dpColorFormat_YCbCr420 = 0x3, + dpColorFormat_Unknown = 0xF +} DP_COLORFORMAT; + +typedef enum +{ + dp_pktType_VideoStreamconfig = 0x7, + dp_pktType_CeaHdrMetaData = 0x21, + dp_pktType_SRInfoFrame = 0x7f, // Self refresh infoframe for eDP enter/exit self refresh, SRS 1698 + dp_pktType_Cea861BInfoFrame = 0x80, + dp_pktType_VendorSpecInfoFrame = 0x81, + dp_pktType_AviInfoFrame = 0x82, + dp_pktType_AudioInfoFrame = 0x84, + dp_pktType_SrcProdDescInfoFrame = 0x83, + dp_pktType_MpegSrcInfoFrame = 0x85, + dp_pktType_DynamicRangeMasteringInfoFrame = 0x87 +} DP_PACKET_TYPE; + +typedef enum +{ + DSC_SLICES_PER_SINK_1 = 1, + DSC_SLICES_PER_SINK_2 = 2, + DSC_SLICES_PER_SINK_4 = 4, + DSC_SLICES_PER_SINK_6 = 6, + DSC_SLICES_PER_SINK_8 = 8, + DSC_SLICES_PER_SINK_10 = 10, + DSC_SLICES_PER_SINK_12 = 12, + DSC_SLICES_PER_SINK_16 = 16, + DSC_SLICES_PER_SINK_20 = 20, + DSC_SLICES_PER_SINK_24 = 24 +} DscSliceCount; + +typedef enum +{ + DSC_BITS_PER_COLOR_MASK_8 = 1, + DSC_BITS_PER_COLOR_MASK_10 = 2, + DSC_BITS_PER_COLOR_MASK_12 = 4 +}DscBitsPerColorMask; + +enum DSC_MODE +{ + DSC_SINGLE, + DSC_DUAL, + DSC_DROP, + DSC_MODE_NONE +}; + +typedef enum +{ + BITS_PER_PIXEL_PRECISION_1_16 = 0, + BITS_PER_PIXEL_PRECISION_1_8 = 1, + BITS_PER_PIXEL_PRECISION_1_4 = 2, + BITS_PER_PIXEL_PRECISION_1_2 = 3, + BITS_PER_PIXEL_PRECISION_1 = 4 +}BITS_PER_PIXEL_INCREMENT; + +typedef enum +{ + NV_DP_FEC_UNCORRECTED = 0, + NV_DP_FEC_CORRECTED = 1, + NV_DP_FEC_BIT = 2, + NV_DP_FEC_PARITY_BLOCK = 3, + NV_DP_FEC_PARITY_BIT = 4 +}FEC_ERROR_COUNTER; + +typedef struct DscCaps +{ + NvBool bDSCSupported; + NvBool bDSCPassThroughSupported; + unsigned versionMajor, versionMinor; + unsigned rcBufferBlockSize; + unsigned rcBuffersize; + unsigned maxSlicesPerSink; + unsigned lineBufferBitDepth; + NvBool bDscBlockPredictionSupport; + unsigned maxBitsPerPixelX16; + unsigned sliceCountSupportedMask; + + struct + { + NvBool bRgb; + NvBool bYCbCr444; + NvBool bYCbCrSimple422; + NvBool bYCbCrNative422; + NvBool bYCbCrNative420; + }dscDecoderColorFormatCaps; + + unsigned dscDecoderColorDepthMask; + unsigned dscPeakThroughputMode0; + unsigned dscPeakThroughputMode1; + unsigned dscMaxSliceWidth; + + BITS_PER_PIXEL_INCREMENT dscBitsPerPixelIncrement; +} DscCaps; + +typedef struct GpuDscCrc +{ + NvU16 gpuCrc0; + NvU16 gpuCrc1; + NvU16 gpuCrc2; +} gpuDscCrc; + +typedef struct SinkDscCrc +{ + NvU16 sinkCrc0; + NvU16 sinkCrc1; + NvU16 sinkCrc2; +} sinkDscCrc; + +typedef struct +{ + NvBool bSourceControlModeSupported; + NvBool bConcurrentLTSupported; + NvU8 maxTmdsClkRate; + NvU8 maxBpc; + NvU8 maxHdmiLinkBandwidthGbps; +} PCONCaps; + +typedef enum +{ + PCON_HDMI_LINK_BW_FRL_9GBPS = 0, + PCON_HDMI_LINK_BW_FRL_18GBPS, + PCON_HDMI_LINK_BW_FRL_24GBPS, + PCON_HDMI_LINK_BW_FRL_32GBPS, + PCON_HDMI_LINK_BW_FRL_40GBPS, + PCON_HDMI_LINK_BW_FRL_48GBPS, + PCON_HDMI_LINK_BW_FRL_INVALID +} PCONHdmiLinkBw; + +typedef enum +{ + NV_DP_PCON_CONTROL_STATUS_SUCCESS = 0, + NV_DP_PCON_CONTROL_STATUS_ERROR_TIMEOUT = 0x80000001, + NV_DP_PCON_CONTROL_STATUS_ERROR_FRL_LT_FAILURE = 0x80000002, + NV_DP_PCON_CONTROL_STATUS_ERROR_FRL_NOT_SUPPORTED = 0x80000003, + NV_DP_PCON_CONTROL_STATUS_ERROR_GENERIC = 0x8000000F +} NV_DP_PCON_CONTROL_STATUS; +// +// Poll HDMI-Link Status change and FRL Ready. +// Spec says it should be done in 500ms, we give it 20% extra time: +// 60 times with interval 10ms. +// +#define NV_PCON_SOURCE_CONTROL_MODE_TIMEOUT_THRESHOLD (60) +#define NV_PCON_SOURCE_CONTROL_MODE_TIMEOUT_INTERVAL_MS (10) +// +// Poll HDMI-Link Status change IRQ and Link Status. +// Spec says it should be done in 250ms, we give it 20% extra time: +// 30 times with interval 10ms. +// +#define NV_PCON_FRL_LT_TIMEOUT_THRESHOLD (30) +#define NV_PCON_FRL_LT_TIMEOUT_INTERVAL_MS (10) + +typedef struct _PCONLinkControl +{ + struct + { + // This struct is being passed in for assessPCONLink I/F + NvU32 bAssessLink : 1; + + // Specify if client wants to use src control - set it false DPLib can just do DP LT alone. + // By default it should be true. + NvU32 bSourceControlMode : 1; + + // Default is sequential mode, set this to choose concurrent mode + NvU32 bConcurrentMode : 1; + + // Default is normal link training mode (stop once FRL-LT succeed). + // Set this to link train all requested FRL Bw in allowedFrlBwMask. + NvU32 bExtendedLTMode : 1; + + // Keep PCON links (DP and FRL link) alive + NvU32 bKeepPCONLinkAlive : 1; + + // Default DPLib will fallback to autonomous mode and perform DP assessLink. + NvU32 bSkipFallback : 1; + } flags; + + // Input: Clients use this to specify the FRL BW PCON should try. + NvU32 frlHdmiBwMask; + + struct + { + NV_DP_PCON_CONTROL_STATUS status; + PCONHdmiLinkBw maxFrlBwTrained; + NvU32 trainedFrlBwMask; + } result; +} PCONLinkControl; + +static NV_INLINE PCONHdmiLinkBw getMaxFrlBwFromMask(NvU32 frlRateMask) +{ + if (frlRateMask == 0) + { + // Nothing is set. Assume TMDS + return PCON_HDMI_LINK_BW_FRL_INVALID; + } + + // find highest set bit (destructive operation) + HIGHESTBITIDX_32(frlRateMask); + + return (PCONHdmiLinkBw)frlRateMask; +} + +/* + EDP VESA PSR defines +*/ + +// PSR state transitions +typedef enum +{ + vesaPsrStatus_Inactive = 0, + vesaPsrStatus_Transition2Active = 1, + vesaPsrStatus_DisplayFromRfb = 2, + vesaPsrStatus_CaptureAndDisplay = 3, + vesaPsrStatus_Transition2Inactive = 4, + vesaPsrStatus_Undefined5 = 5, + vesaPsrStatus_Undefined6 = 6, + vesaPsrStatus_SinkError = 7 +} vesaPsrState; + +typedef struct VesaPsrConfig +{ + NvU8 psrCfgEnable : 1; + NvU8 srcTxEnabledInPsrActive : 1; + NvU8 crcVerifEnabledInPsrActive : 1; + NvU8 frameCaptureSecondActiveFrame : 1; + NvU8 selectiveUpdateOnSecondActiveline : 1; + NvU8 enableHpdIrqOnCrcMismatch : 1; + NvU8 enablePsr2 : 1; + NvU8 reserved : 1; +} vesaPsrConfig; + +typedef struct VesaPsrDebugStatus +{ + NvBool lastSdpPsrState; + NvBool lastSdpUpdateRfb; + NvBool lastSdpCrcValid; + NvBool lastSdpSuValid; + NvBool lastSdpFirstSURcvd; + NvBool lastSdpLastSURcvd; + NvBool lastSdpYCoordValid; + NvU8 maxResyncFrames; + NvU8 actualResyncFrames; +} vesaPsrDebugStatus; + +typedef struct VesaPsrErrorStatus +{ + NvU8 linkCrcError : 1; + NvU8 rfbStoreError : 1; + NvU8 vscSdpError : 1; + NvU8 rsvd : 5; +} vesaPsrErrorStatus; + +typedef struct VesaPsrEventIndicator +{ + NvU8 sinkCapChange : 1; + NvU8 rsvd : 7; +} vesaPsrEventIndicator; + +#pragma pack(1) +typedef struct VesaPsrSinkCaps +{ + NvU8 psrVersion; + NvU8 linkTrainingRequired : 1; + NvU8 psrSetupTime : 3; + NvU8 yCoordinateRequired : 1; + NvU8 psr2UpdateGranularityRequired : 1; + NvU8 reserved : 2; + NvU16 suXGranularity; + NvU8 suYGranularity; +} vesaPsrSinkCaps; +#pragma pack() + +typedef struct PanelReplayCaps +{ + NvBool panelReplaySupported; +} panelReplayCaps; + +typedef struct PanelReplayConfig +{ + NvBool enablePanelReplay; +} panelReplayConfig; + +// Multiplier constant to get link frequency in KHZ +// Maximum link rate of Main Link lanes = Value x 270M. +// To get it to KHz unit, we need to multiply 270K. +#define DP_LINK_BW_FREQUENCY_MULTIPLIER_KHZ (270*1000) + +// Multiplier constant to get link rate table's in KHZ +#define DP_LINK_RATE_TABLE_MULTIPLIER_KHZ 200 + +// +// Multiplier constant to get link frequency (multiplier of 270MHz) in MBps +// a * 270 * 1000 * 1000(270Mhz) * (8 / 10)(8b/10b) / 8(Byte) +// = a * 27000000 +// +#define DP_LINK_BW_FREQ_MULTI_MBPS 27000000 + +// +// Get link rate in multiplier of 270MHz from KHz: +// a * 1000(KHz) / 270 * 1000 * 1000(270Mhz) +// +#define LINK_RATE_KHZ_TO_MULTP(a) ((a) / 270000) + +// +// Get link rate in MBps from KHz: +// a * 1000 * (8 / 10)(8b/10b) / 8(Byte) +// = a * 100 +// +#define LINK_RATE_KHZ_TO_MBPS(a) ((a) * 100) + +#define DP_MAX_LANES 8 // This defines the maximum number of lanes supported on a chip. +#define DP_MAX_LANES_PER_LINK 4 // This defines the maximum number of lanes per link in a chip. +#define DP_AUX_CHANNEL_MAX_BYTES 16 +#define DP_CLOCK_RECOVERY_TOT_TRIES 10 +#define DP_CLOCK_RECOVERY_MAX_TRIES 5 +#define DP_CH_EQ_MAX_RETRIES 5 +#define DP_LT_MAX_FOR_MST_MAX_RETRIES 3 +#define DP_READ_EDID_MAX_RETRIES 7 +#define DP_AUX_CHANNEL_DEFAULT_DEFER_MAX_TRIES 7 +#define DP_AUX_CHANNEL_TIMEOUT_MAX_TRIES 2 +#define DP_SET_POWER_D0_NORMAL_MAX_TRIES 3 +#define DP_SW_AUTO_READ_REQ_SIZE 6 +#define NV_DP_RBR_FALLBACK_MAX_TRIES 3 + +#define DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_DEFAULT_MS 1 + +#define DP_AUX_CHANNEL_TIMEOUT_WAITIDLE 400 // source is required to wait at least 400us before it considers the AUX transaction to have timed out. +#define DP_AUX_CHANNEL_TIMEOUT_VALUE_DEFAULT 400 +#define DP_AUX_CHANNEL_TIMEOUT_VALUE_MAX 3200 + +#define DP_PHY_REPEATER_INDEX_FOR_SINK 0xFFFFFFFF + +#define DP_MESSAGEBOX_SIZE 48 +#define DP_POST_LT_ADJ_REQ_LIMIT 6 +#define DP_POST_LT_ADJ_REQ_TIMER 200000 + +#define DP_AUX_HYBRID_TIMEOUT 600 +#define DP_AUX_SEMA_ACQUIRE_TIMEOUT 20000 + +#define DP_CONFIG_WATERMARK_ADJUST 2 +#define DP_CONFIG_WATERMARK_LIMIT 20 +#define DP_CONFIG_INCREASED_WATERMARK_ADJUST 8 +#define DP_CONFIG_INCREASED_WATERMARK_LIMIT 22 + +#define NV_DP_MSA_PROPERTIES_MISC1_STEREO 2:1 + +#define DP_LANE_STATUS_ARRAY_SIZE ((displayPort_LaneSupported + 1) / 2) +#define DP_LANE_STATUS_ARRAY_INDEX(lane) ((lane) < displayPort_LaneSupported ? ((lane) / 2) : 0) + +#define IS_VALID_LANECOUNT(val) (((NvU32)(val)==0) || ((NvU32)(val)==1) || \ + ((NvU32)(val)==2) || ((NvU32)(val)==4) || \ + ((NvU32)(val)==8)) + +#define IS_STANDARD_LINKBW(val) (((NvU32)(val)==linkBW_1_62Gbps) || \ + ((NvU32)(val)==linkBW_2_70Gbps) || \ + ((NvU32)(val)==linkBW_5_40Gbps) || \ + ((NvU32)(val)==linkBW_8_10Gbps)) + +#define IS_INTERMEDIATE_LINKBW(val) (((NvU32)(val)==linkBW_2_16Gbps) || \ + ((NvU32)(val)==linkBW_2_43Gbps) || \ + ((NvU32)(val)==linkBW_3_24Gbps) || \ + ((NvU32)(val)==linkBW_4_32Gbps) || \ + ((NvU32)(val)==linkBW_6_75Gbps)) + +#define IS_VALID_LINKBW(val) (IS_STANDARD_LINKBW(val) || \ + IS_INTERMEDIATE_LINKBW(val)) +// +// Phy Repeater count read from DPCD offset F0002h is an +// 8 bit value where each bit represents the total count +// 80h = 1 repeater, 40h = 2 , 20h = 3 ... 01h = 8 +// This function maps it to decimal system +// +static NV_INLINE NvU32 mapPhyRepeaterVal(NvU32 value) +{ + switch (value) + { + case NV_DPCD14_PHY_REPEATER_CNT_VAL_0: + return 0; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_1: + return 1; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_2: + return 2; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_3: + return 3; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_4: + return 4; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_5: + return 5; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_6: + return 6; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_7: + return 7; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_8: + return 8; + default: + return 0; + } +} + +// HDCP specific definitions + +#define HDCP22_RTX_SIMPLE_PATTERN 0x12345678 +#define HDCP22_TX_CAPS_PATTERN_BIG_ENDIAN {0x02, 0x00, 0x00} + +#define DP_MST_HEAD_TO_STREAMID(head, pipeId, numHeads) ((head) + 1 + (pipeId) * (numHeads)) +#define DP_MST_STREAMID_TO_HEAD(streamid, pipeId, numHeads) ((streamid) - 1 - ((pipeId) * (numHeads))) +#define DP_MST_STREAMID_TO_PIPE(streamid, head, numHeads) (((streamid) - (head) - 1) / (numHeads)) + +typedef enum +{ + NV_DP_SBMSG_REQUEST_ID_GET_MESSAGE_TRANSACTION_VERSION = 0x00, + NV_DP_SBMSG_REQUEST_ID_LINK_ADDRESS = 0x01, + NV_DP_SBMSG_REQUEST_ID_CONNECTION_STATUS_NOTIFY = 0x02, + + NV_DP_SBMSG_REQUEST_ID_ENUM_PATH_RESOURCES = 0x10, + NV_DP_SBMSG_REQUEST_ID_ALLOCATE_PAYLOAD = 0x11, + NV_DP_SBMSG_REQUEST_ID_QUERY_PAYLOAD = 0x12, + NV_DP_SBMSG_REQUEST_ID_RESOURCE_STATUS_NOTIFY = 0x13, + NV_DP_SBMSG_REQUEST_ID_CLEAR_PAYLOAD_ID_TABLE = 0x14, + + NV_DP_SBMSG_REQUEST_ID_REMOTE_DPCD_READ = 0x20, + NV_DP_SBMSG_REQUEST_ID_REMOTE_DPCD_WRITE = 0x21, + NV_DP_SBMSG_REQUEST_ID_REMOTE_I2C_READ = 0x22, + NV_DP_SBMSG_REQUEST_ID_REMOTE_I2C_WRITE = 0x23, + NV_DP_SBMSG_REQUEST_ID_POWER_UP_PHY = 0x24, + NV_DP_SBMSG_REQUEST_ID_POWER_DOWN_PHY = 0x25, + + NV_DP_SBMSG_REQUEST_ID_SINK_EVENT_NOTIFY = 0x30, + NV_DP_SBMSG_REQUEST_ID_QUERY_STREAM_ENCRYPTION_STATUS = 0x38, + + NV_DP_SBMSG_REQUEST_ID_UNDEFINED = 0xFF, +} NV_DP_SBMSG_REQUEST_ID; + +// FEC + +#define NV_DP_FEC_FLAGS_SELECT_ALL 0x7 +#define NV_DP_ERROR_COUNTERS_PER_LANE 5 +#define NV_DP_MAX_NUM_OF_LANES 4 +#define NV_DP_FEC_ERROR_COUNT_INVALID 0xbadf +#define NV_DP_UNCORRECTED_ERROR NV_DP_FEC_UNCORRECTED : NV_DP_FEC_UNCORRECTED +#define NV_DP_CORRECTED_ERROR NV_DP_FEC_CORRECTED : NV_DP_FEC_CORRECTED +#define NV_DP_BIT_ERROR NV_DP_FEC_BIT : NV_DP_FEC_BIT +#define NV_DP_PARITY_BLOCK_ERROR NV_DP_FEC_PARITY_BLOCK : NV_DP_FEC_PARITY_BLOCK +#define NV_DP_PARITY_BIT_ERROR NV_DP_FEC_PARITY_BIT : NV_DP_FEC_PARITY_BIT +#define NV_DP_UNCORRECTED_ERROR_NO 0 +#define NV_DP_UNCORRECTED_ERROR_YES 1 +#define NV_DP_CORRECTED_ERROR_NO 0 +#define NV_DP_CORRECTED_ERROR_YES 1 +#define NV_DP_BIT_ERROR_NO 0 +#define NV_DP_BIT_ERROR_YES 1 +#define NV_DP_PARITY_BLOCK_ERROR_NO 0 +#define NV_DP_PARITY_BLOCK_ERROR_YES 1 +#define NV_DP_PARITY_BIT_ERROR_NO 0 +#define NV_DP_PARITY_BIT_ERROR_YES 1 + + +#endif // #ifndef _DISPLAYPORT_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd.h new file mode 100644 index 0000000..cb26349 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd.h @@ -0,0 +1,1501 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DPCD_H_ +#define _DPCD_H_ + +#define NV_DPCD_CAP_LEGACY_BASE (0x00000000) + +#define NV_DPCD_REV (0x00000000) /* R-XUR */ +#define NV_DPCD_REV_MAJOR 7:4 /* R-XUF */ +#define NV_DPCD_REV_MAJOR_1 (0x00000001) /* R-XUV */ +#define NV_DPCD_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD_REV_MINOR_0 (0x00000000) /* R-XUV */ +#define NV_DPCD_REV_MINOR_1 (0x00000001) /* R-XUV */ +#define NV_DPCD_REV_MINOR_2 (0x00000002) /* R-XUV */ +#define NV_DPCD_REV_MINOR_4 (0x00000004) /* R-XUV */ + +#define NV_DPCD_MAX_LINK_BANDWIDTH (0x00000001) /* R-XUR */ +#define NV_DPCD_MAX_LINK_BANDWIDTH_VAL 4:0 /* R-XUF */ +#define NV_DPCD_MAX_LINK_BANDWIDTH_VAL_1_62_GBPS (0x00000006) /* R-XUV */ +#define NV_DPCD_MAX_LINK_BANDWIDTH_VAL_2_70_GBPS (0x0000000a) /* R-XUV */ +#define NV_DPCD_MAX_LINK_BANDWIDTH_VAL_5_40_GBPS (0x00000014) /* R-XUV */ + +#define NV_DPCD_MAX_LANE_COUNT (0x00000002) /* R-XUR */ +#define NV_DPCD_MAX_LANE_COUNT_LANE 4:0 /* R-XUF */ +#define NV_DPCD_MAX_LANE_COUNT_LANE_1 (0x00000001) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_LANE_2 (0x00000002) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_LANE_4 (0x00000004) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_LANE_8 (0x00000008) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT 5:5 /* R-XUF */ +#define NV_DPCD_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_ENHANCED_FRAMING 7:7 /* R-XUF */ +#define NV_DPCD_MAX_LANE_COUNT_ENHANCED_FRAMING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_ENHANCED_FRAMING_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_TPS3_SUPPORTED 6:6 /* R-XUF */ +#define NV_DPCD_MAX_LANE_COUNT_TPS3_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_TPS3_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_MAX_DOWNSPREAD (0x00000003) /* R-XUR */ +#define NV_DPCD_MAX_DOWNSPREAD_VAL 0:0 /* R-XUF */ +#define NV_DPCD_MAX_DOWNSPREAD_VAL_NONE (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_DOWNSPREAD_VAL_0_5_PCT (0x00000001) /* R-XUV */ +#define NV_DPCD_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT 6:6 /* R-XUF */ +#define NV_DPCD_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT_TRUE (0x00000001) /* R-XUV */ + +// NORP = Number of Receiver Ports = Value + 1 +#define NV_DPCD_NORP (0x00000004) /* R-XUR */ +#define NV_DPCD_NORP_VAL 0:0 /* R-XUF */ +#define NV_DPCD_NORP_VAL_ONE (0x00000000) /* R-XUV */ +#define NV_DPCD_NORP_VAL_TWO (0x00000001) /* R-XUV */ +#define NV_DPCD_NORP_VAL_SST_MAX (0x00000001) /* R-XUV */ +#define NV_DPCD_NORP_DP_PWR_CAP_5V 5:5 /* R-XUF */ +#define NV_DPCD_NORP_DP_PWR_CAP_12V 6:6 /* R-XUF */ +#define NV_DPCD_NORP_DP_PWR_CAP_18V 7:7 /* R-XUF */ + +#define NV_DPCD_DOWNSTREAMPORT (0x00000005) /* R-XUR */ +#define NV_DPCD_DOWNSTREAMPORT_PRESENT 0:0 /* R-XUF */ +#define NV_DPCD_DOWNSTREAMPORT_PRESENT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_PRESENT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE 2:1 /* R-XUF */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE_DISPLAYPORT (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE_ANALOG (0x00000001) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE_HDMI_DVI (0x00000002) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE_OTHERS (0x00000003) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_FORMAT_CONVERSION 3:3 /* R-XUF */ +#define NV_DPCD_DOWNSTREAMPORT_FORMAT_CONVERSION_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_FORMAT_CONVERSION_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE 4:4 /* R-XUF */ +#define NV_DPCD_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING (0x00000006) /* R-XUR */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B 0:0 /* R-XUF */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B 1:1 /* R-XUF */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DOWN_STREAM_PORT (0x00000007) /* R-XUR */ +#define NV_DPCD_DOWN_STREAM_PORT_COUNT 3:0 /* R-XUF */ +#define NV_DPCD_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED 6:6 /* R-XUF */ +#define NV_DPCD_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DOWN_STREAM_PORT_OUI_SUPPORT 7:7 /* R-XUF */ +#define NV_DPCD_DOWN_STREAM_PORT_OUI_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWN_STREAM_PORT_OUI_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_RECEIVE_PORT0_CAP_0 (0x00000008) /* R-XUR */ +#define NV_DPCD_RECEIVE_PORT1_CAP_0 (0x0000000A) /* R-XUR */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_LOCAL_EDID 1:1 /* R-XUF */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_LOCAL_EDID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_LOCAL_EDID_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT 2:2 /* R-XUF */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_RECEIVE_PORT0_CAP_1 (0x00000009) /* R-XUR */ +#define NV_DPCD_RECEIVE_PORT1_CAP_1 (0x0000000B) /* R-XUR */ +#define NV_DPCD_RECEIVE_PORTX_CAP_1_BUFFER_SIZE 7:0 /* R-XUF */ + +#define NV_DPCD_I2C_CTRL_CAP (0x0000000C) /* R-XUR */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED 7:0 /* R-XUF */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_1K (0x00000001) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_5K (0x00000002) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_10K (0x00000004) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_100K (0x00000008) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_400K (0x00000010) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_1M (0x00000020) /* R-XUV */ + +#define NV_DPCD_EDP_CONFIG_CAP (0x0000000D) /* R-XUR */ +#define NV_DPCD_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET 0:0 /* R-XUF */ +#define NV_DPCD_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_FRAMING_CHANGE 1:1 /* R-XUF */ +#define NV_DPCD_EDP_CONFIG_CAP_FRAMING_CHANGE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_FRAMING_CHANGE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_INVERTED_TRAINING_BIT 2:2 /* R-XUF */ +#define NV_DPCD_EDP_CONFIG_CAP_INVERTED_TRAINING_BIT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_INVERTED_TRAINING_BIT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_DISPLAY_CONTROL_CAPABLE 3:3 /* R-XUF */ +#define NV_DPCD_EDP_CONFIG_CAP_DISPLAY_CONTROL_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_DISPLAY_CONTROL_CAPABLE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL (0x0000000E) /* R-XUR */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL 6:0 /* R-XUF */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_DEFAULT (0x00000000) /* R-XUV */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_4MS (0x00000001) /* R-XUV */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_8MS (0x00000002) /* R-XUV */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_12MS (0x00000003) /* R-XUV */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_16MS (0x00000004) /* R-XUV */ + +#define NV_DPCD_ADAPTER_CAP (0x0000000F) /* R-XUR */ +#define NV_DPCD_ADAPTER_CAP_FORCE_LOAD_SENSE 0:0 /* R-XUF */ +#define NV_DPCD_ADAPTER_CAP_FORCE_LOAD_SENSE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_ADAPTER_CAP_FORCE_LOAD_SENSE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_ADAPTER_CAP_ALT_I2C_PATTERN 1:1 /* R-XUF */ +#define NV_DPCD_ADAPTER_CAP_ALT_I2C_PATTERN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_ADAPTER_CAP_ALT_I2C_PATTERN_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_SUPPORTED_LINK_RATES(i) (0x00000010+(i)*2) /* R--2A */ +#define NV_DPCD_SUPPORTED_LINK_RATES__SIZE (0x00000008) /* R---S */ + +// 00010h-0001Fh: RESERVED. Reads all 0s + +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS (0x00000020) /* R-XUR */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1024_768 0:0 /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1024_768_NO (0X00000000) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1024_768_YES (0X00000001) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1280_720 1:1 /* R-XUV */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1280_720_NO (0X00000000) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1280_720_YES (0X00000001) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1920_1080 2:2 /* R-XUV */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1920_1080_NO (0X00000000) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1920_1080_YES (0X00000001) /* R-XUF */ + +#define NV_DPCD_MSTM (0x00000021) /* R-XUR */ +#define NV_DPCD_MSTM_CAP 0:0 /* R-XUF */ +#define NV_DPCD_MSTM_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MSTM_CAP_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_NUMBER_OF_AUDIO_ENDPOINTS (0x00000022) /* R-XUR */ +#define NV_DPCD_NUMBER_OF_AUDIO_ENDPOINTS_VALUE 7:0 /* R-XUF */ + +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY (0x00000023) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR 3:0 /* R-XUF */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_3MS (0x00000000) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_2MS (0x00000001) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_1MS (0x00000002) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_500US (0x00000003) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_200US (0x00000004) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_100US (0x00000005) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_10US (0x00000006) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_1US (0x00000007) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_DEFAULT NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_2MS +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR 7:4 /* R-XUF */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_3MS (0x00000000) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_2MS (0x00000001) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_1MS (0x00000002) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_500US (0x00000003) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_200US (0x00000004) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_100US (0x00000005) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_DEFAULT NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_2MS + +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEC_LAT_0 (0x00000024) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEC_LAT_1 (0x00000025) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_PP_LAT_0 (0x00000026) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_PP_LAT_1 (0x00000027) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_VID_INTER_LAT (0x00000028) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_VID_PROG_LAT (0x00000029) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_REP_LAT (0x0000002A) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEL_INS_0 (0x0000002B) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEL_INS_1 (0x0000002C) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEL_INS_2 (0x0000002D) /* R-XUR */ + +// 0002Eh - 0002Fh: RESERVED. Reads all 0s + +#define NV_DPCD_GUID (0x00000030) /* R-XUR */ + +// 00040h - 00053h: RESERVED. Reads all 0s + +#define NV_DPCD_RX_GTC_VALUE(i) (0x00000054+(i)) /* R--1A */ +#define NV_DPCD_RX_GTC_VALUE__SIZE 4 /* R---S */ + +#define NV_DPCD_RX_GTC_REQ (0x00000058) /* R-XUR */ +#define NV_DPCD_RX_GTC_REQ_RX_GTC_MSTR_REQ 0:0 /* R-XUF */ +#define NV_DPCD_RX_GTC_REQ_RX_GTC_MSTR_REQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RX_GTC_REQ_RX_GTC_MSTR_REQ_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_RX_GTC_REQ_TX_GTC_VALUE_PHASE_SKEW_EN 1:1 /* R-XUF */ +#define NV_DPCD_RX_GTC_REQ_TX_GTC_VALUE_PHASE_SKEW_EN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RX_GTC_REQ_TX_GTC_VALUE_PHASE_SKEW_EN_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_RX_GTC_FREQ_LOCK (0x00000059) /* R-XUR */ +#define NV_DPCD_RX_GTC_FREQ_LOCK_DONE 0:0 /* R-XUF */ +#define NV_DPCD_RX_GTC_FREQ_LOCK_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RX_GTC_FREQ_LOCK_DONE_YES (0x00000001) /* R-XUV */ + +// 0005Ah - 0006Fh: RESERVED Read all 0s + +#define NV_DPCD_EDP_PSR_VERSION (0x00000070) /* R-XUR */ + +#define NV_DPCD_EDP_PSR_CAP (0x00000071) /* R-XUR */ +#define NV_DPCD_EDP_PSR_CAP_LT_NEEDED 0:0 /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_LT_NEEDED_YES (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_LT_NEEDED_NO (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME 3:1 /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_330US (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_275US (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_220US (0x00000002) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_165US (0x00000003) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_110US (0x00000004) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_55US (0x00000005) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_0US (0x00000006) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_Y_COORD_NEEDED 4:4 /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_Y_COORD_NEEDED_NO (0x00000000) /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_Y_COORD_NEEDED_YES (0x00000001) /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_GRAN_REQUIRED 5:5 /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_GRAN_REQUIRED_NO (0x00000000) /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_GRAN_REQUIRED_YES (0x00000001) /* R-XUF*/ + +#define NV_DPCD_EDP_PSR2_X_GRANULARITY_L (0x00000072) /* R-XUR */ +#define NV_DPCD_EDP_PSR2_X_GRANULARITY_H (0x00000073) /* R-XUR */ +#define NV_DPCD_EDP_PSR2_Y_GRANULARITY (0x00000074) /* R-XUR */ + +// 00072h - 0007Fh: RESERVED Read all 0s + +/* + * When DETAILED_CAP_INFO_AVAILABLE = 0, 1 byte info per port. + * When DETAILED_CAP_INFO_AVAILABLE = 1, 4 bytes info per port. + * DETAILED_CAP_INFO_AVAILABLE located at 0x05h (DOWNSTREAMPORT_PRESENT), bit 5 + * + * Byte 0 definition. +*/ + +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT(i) (0x00000080+(i)*4) /* R--1A */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT__SIZE 4 /* R---S */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE 2:0 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DISPLAYPORT (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_ANALOG (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DVI (0x00000002) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_HDMI (0x00000003) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_OTHERS_NO_EDID (0x00000004) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DP_PLUSPLUS (0x00000005) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_HPD 3:3 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_HPD_NOT_AWARE (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_HPD_AWARE (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_ATTR 7:4 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_480I_60HZ (0x00000001) /* R-XUV */ // 720x480i +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_480I_50HZ (0x00000002) /* R-XUV */ // 720x480i +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_1080I_60HZ (0x00000003) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_1080I_50HZ (0x00000004) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_720P_60HZ (0x00000005) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_720P_50HZ (0x00000007) /* R-XUV */ + +/* + * Byte 1, Reserved for DisplayPort. + */ + +#define NV_DPCD_DETAILED_CAP_INFO_ONE(i) (0x00000081+(i)*4) /* R--1A */ +#define NV_DPCD_DETAILED_CAP_INFO__SIZE NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT__SIZE +#define NV_DPCD_DETAILED_CAP_INFO_ONE__SIZE 4 /* R---S */ +// For Analog VGA Donwstream Port. Maximum Pixel Rate in Mpixels per sec divided by 8 +#define NV_DPCD_DETAILED_CAP_INFO_VGA_MAX_PIXEL_RATE 7:0 /* R-XUF */ +/* + * For DVI/HDMI/DP++ Downstream Port, Maximum TMDS clock rate supported in Mbps divided by 2.5 + * e.g. 66 (0x42) for 165 MHz, 90 (0x5a) for 225 MHz + */ +#define NV_DPCD_DETAILED_CAP_INFO_TMDS_MAX_CLOCK_RATE 7:0 /* R-XUF */ + +// Byte 2, for VGA/DVI/HDMI/DP++ Downstream Port, reserved for DisplayPort. +#define NV_DPCD_DETAILED_CAP_INFO_TWO(i) (0x00000082+(i)*4) /* R--1A */ +#define NV_DPCD_DETAILED_CAP_INFO_TWO__SIZE 4 /* R---S */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF 1:0 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_8BPC (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_10BPC (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_12BPC (0x00000002) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_16BPC (0x00000003) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT 4:2 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_ZERO (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_9G (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_18G (0x00000002) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_24G (0x00000003) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_32G (0x00000004) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_40G (0x00000005) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_48G (0x00000006) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_SRC_CONTROL_MODE_SUPPORT 5:5 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_SRC_CONTROL_MODE_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_SRC_CONTROL_MODE_SUPPORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_CONCURRENT_LT_SUPPORT 6:6 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_CONCURRENT_LT_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_CONCURRENT_LT_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_MAX_BPC_8 8 +#define NV_MAX_BPC_10 10 +#define NV_MAX_BPC_12 12 +#define NV_MAX_BPC_16 16 + +// Byte 3, Reserved for DisplayPort and VGA +#define NV_DPCD_DETAILED_CAP_INFO_THREE(i) (0x00000083+(i)*4) /* R--1A */ +#define NV_DPCD_DETAILED_CAP_INFO_THREE__SIZE 4 /* R---S */ +// For DVI + #define NV_DPCD_DETAILED_CAP_INFO_DVI_DUAL_LINK 1:1 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_DUAL_LINK_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_DUAL_LINK_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_HIGH_COLOR_DEPTH 2:2 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_HIGH_COLOR_DEPTH_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_HIGH_COLOR_DEPTH_YES (0x00000001) /* R-XUV */ +// For HDMI and DP++ + #define NV_DPCD_DETAILED_CAP_INFO_FRAME_SEQ_TO_FRAME_PACK 0:0 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_INFO_FRAME_SEQ_TO_FRAME_PACK_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_INFO_FRAME_SEQ_TO_FRAME_PACK_YES (0x00000001) /* R-XUV */ +// For HDMI-PCon + #define NV_DPCD_DETAILED_CAP_YCBCR422_PASS_THRU_SUPPORTED 1:1 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_YCBCR422_PASS_THRU_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_YCBCR422_PASS_THRU_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_YCBCR420_PASS_THRU_SUPPORTED 2:2 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_YCBCR420_PASS_THRU_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_YCBCR420_PASS_THRU_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR422_SUPPORTED 3:3 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR422_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR422_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR420_SUPPORTED 4:4 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR420_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR420_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB601_TO_YCBCR601_SUPPORTED 5:5 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB601_TO_YCBCR601_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB601_TO_YCBCR601_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB709_TO_YCBCR709_SUPPORTED 6:6 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB709_TO_YCBCR709_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB709_TO_YCBCR709_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGBBT2020_TO_YCBCRBT2020_SUPPORTED 7:7 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_RGBBT2020_TO_YCBCRBT2020_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGBBT2020_TO_YCBCRBT2020_SUPPORTED_YES (0x00000001) /* R-XUV */ + +/* +00090h - 000FFh: RESERVED for supporting up to 127 Downstream devices per Branch device. Read all 0s +Note: When DETAILED_CAP_INFO_AVAILABLE bit is set to 1, the maximum +number of Downstream ports will be limited to 32. +*/ + +#define NV_DPCD_LINK_BANDWIDTH_SET (0x00000100) /* RWXUR */ +#define NV_DPCD_LINK_BANDWIDTH_SET_VAL 7:0 /* RWXUF */ +#define NV_DPCD_LINK_BANDWIDTH_SET_VAL_1_62_GPBS (0x00000006) /* RWXUV */ +#define NV_DPCD_LINK_BANDWIDTH_SET_VAL_2_70_GPBS (0x0000000a) /* RWXUV */ +#define NV_DPCD_LINK_BANDWIDTH_SET_VAL_5_40_GPBS (0x00000014) /* RWXUV */ + +#define NV_DPCD_LANE_COUNT_SET (0x00000101) /* RWXUR */ +#define NV_DPCD_LANE_COUNT_SET_LANE 4:0 /* RWXUF */ +#define NV_DPCD_LANE_COUNT_SET_LANE_1 (0x00000001) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_LANE_2 (0x00000002) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_LANE_4 (0x00000004) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_POST_LT_ADJ_REQ_GRANTED 5:5 /* RWXUF */ +#define NV_DPCD_LANE_COUNT_SET_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_ENHANCEDFRAMING 7:7 /* RWXUF */ +#define NV_DPCD_LANE_COUNT_SET_ENHANCEDFRAMING_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_ENHANCEDFRAMING_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_TRAINING_PATTERN_SET (0x00000102) /* RWXUR */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS 1:0 /* RWXUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS_NONE (0x00000000) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS_TP1 (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS_TP2 (0x00000002) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS_TP3 (0x00000003) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS 3:2 /* R-XUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS_D10_2_TP (0x00000001) /* R-XUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS_SYM_ERR_RATE_TP (0x00000002) /* R-XUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS_PRBS7 (0x00000003) /* R-XUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN 4:4 /* RWXUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED 5:5 /* RWXUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SYM_ERR_SEL 7:6 /* RWXUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_SYM_ERR_SEL_DISPARITY_ILLEGAL_SYMBOL_ERROR (0x00000000) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SYM_ERR_SEL_DISPARITY_ERROR (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SYM_ERR_SEL_ILLEGAL_SYMBOL_ERROR (0x00000002) /* RWXUV */ + +#define NV_DPCD_TRAINING_LANE_SET(i) (0x00000103+(i)) /* RW-1A */ +#define NV_DPCD_TRAINING_LANE_SET__SIZE 4 /* RW--S */ +#define NV_DPCD_TRAINING_LANE_SET_VOLTAGE_SWING 1:0 /* RWXUF */ +#define NV_DPCD_TRAINING_LANE_SET_VOLTAGE_SWING_MAX_REACHED 2:2 /* RWXUF */ +#define NV_DPCD_TRAINING_LANE_SET_VOLTAGE_SWING_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_LANE_SET_PREEMPHASIS 4:3 /* RWXUF */ +#define NV_DPCD_TRAINING_LANE_SET_PREEMPHASIS_MAX_REACHED 5:5 /* RWXUF */ +#define NV_DPCD_TRAINING_LANE_SET_PREEMPHASIS_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_LANE0_SET (0x00000103) /* RWXUR */ + +#define NV_DPCD_MAX_VOLTAGE_SWING (0x00000003) /* RWXUV */ +#define NV_DPCD_MAX_VOLTAGE_PREEMPHASIS (0x00000003) /* RWXUV */ + +#define NV_DPCD_TRAINING_LANE1_SET (0x00000104) /* RWXUR */ +#define NV_DPCD_TRAINING_LANE2_SET (0x00000105) /* RWXUR */ +#define NV_DPCD_TRAINING_LANE3_SET (0x00000106) /* RWXUR */ +#define NV_DPCD_TRAINING_LANEX_SET_DRIVE_CURRENT 1:0 /* RWXUF */ +#define NV_DPCD_TRAINING_LANEX_SET_DRIVE_CURRENT_MAX_REACHED 2:2 /* RWXUF */ +#define NV_DPCD_TRAINING_LANEX_SET_DRIVE_CURRENT_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_LANEX_SET_PREEMPHASIS 4:3 /* RWXUF */ +#define NV_DPCD_TRAINING_LANEX_SET_PREEMPHASIS_MAX_REACHED 5:5 /* RWXUF */ +#define NV_DPCD_TRAINING_LANEX_SET_PREEMPHASIS_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_DOWNSPREAD_CTRL (0x00000107) /* RWXUR */ +#define NV_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP 4:4 /* RWXUF */ +#define NV_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP_NONE (0x00000000) /* RWXUV */ +#define NV_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP_LESS_THAN_0_5 (0x00000001) /* RWXUV */ +#define NV_DPCD_DOWNSPREAD_CTRL_MSA_TIMING_PAR_IGNORED 7:7 /* RWXUF */ +#define NV_DPCD_DOWNSPREAD_CTRL_MSA_TIMING_PAR_IGNORED_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_DOWNSPREAD_CTRL_MSA_TIMING_PAR_IGNORED_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_SET (0x00000108) /* RWXUR */ +#define NV_DPCD_MAIN_LINK_CNANNEL_CODING_SET_ANSI_8B_10B 0:0 /* RWXUF */ +#define NV_DPCD_MAIN_LINK_CNANNEL_CODING_SET_ANSI_8B_10B_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_MAIN_LINK_CNANNEL_CODING_SET_ANSI_8B_10B_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_MAIN_LINK_CNANNEL_CODING_SET_ANSI_128B_132B 1:1 /* RWXUF */ +#define NV_DPCD_MAIN_LINK_CNANNEL_CODING_SET_ANSI_128B_132B_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_MAIN_LINK_CNANNEL_CODING_SET_ANSI_128B_132B_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_I2C_CTRL_SET (0x00000109) /* RWXUR */ +#define NV_DPCD_I2C_CTRL_SET_SPEED 7:0 /* RWXUF */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_DEFAULT (0x00000000) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_1K (0x00000001) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_5K (0x00000002) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_10K (0x00000004) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_100K (0x00000008) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_400K (0x00000010) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_1M (0x00000020) /* RWXUV */ + +#define NV_DPCD_EDP_CONFIG_SET (0x0000010A) /* RWXUR */ +#define NV_DPCD_EDP_CONFIG_SET_ALTERNATE_SCRAMBLER_RESET 0:0 /* RWXUF */ +#define NV_DPCD_EDP_CONFIG_SET_ALTERNATE_SCRAMBLER_RESET_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_ALTERNATE_SCRAMBLER_RESET_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_FRAMING_CHANGE 1:1 /* RWXUF */ +#define NV_DPCD_EDP_CONFIG_SET_FRAMING_CHANGE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_FRAMING_CHANGE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_INVERTED_TRAINING_BIT 2:2 /* RWXUF */ +#define NV_DPCD_EDP_CONFIG_SET_INVERTED_TRAINING_BIT_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_INVERTED_TRAINING_BIT_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_PANEL_SELF_TEST 7:7 /* RWXUF */ +#define NV_DPCD_EDP_CONFIG_SET_PANEL_SELF_TEST_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_PANEL_SELF_TEST_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD_LINK_QUAL_LANE_SET(i) (0x0000010B+(i)) /* RW-1A */ +#define NV_DPCD_LINK_QUAL_LANE_SET__SIZE 4 /* RW--S */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS 2:0 /* RWXUF */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_D10_2 (0x00000001) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_SYM_ERR_MEASUREMENT_CNT (0x00000002) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_PRBS7 (0x00000003) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_80_BIT_CUSTOM (0x00000004) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_HBR2 (0x00000005) /* RWXUV */ + +#define NV_DPCD_TRAINING_LANE0_1_SET2 (0x0000010F) /* RWXUR */ +#define NV_DPCD_TRAINING_LANE2_3_SET2 (0x00000110) /* RWXUR */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEX_SET2_POST_CURSOR2 1:0 /* RWXUF */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEX_SET2_POST_CURSOR2_MAX_REACHED 2:2 /* RWXUF */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEX_SET2_POST_CURSOR2_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEXPLUS1_SET2_POST_CURSOR2 5:4 /* RWXUF */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEXPLUS1_SET2_POST_CURSOR2_MAX_REACHED 6:6 /* RWXUF */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEXPLUS1_SET2_POST_CURSOR2_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_MSTM_CTRL (0x00000111) /* RWXUR */ +#define NV_DPCD_MSTM_CTRL_EN 0:0 /* RWXUF */ +#define NV_DPCD_MSTM_CTRL_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_EN_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_UP_REQ_EN 1:1 /* RWXUF */ +#define NV_DPCD_MSTM_CTRL_UP_REQ_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_UP_REQ_EN_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC 2:2 /* RWXUF */ +#define NV_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_AUDIO_DELAY(i) (0x00000112+(i)) /* RW-1A */ +#define NV_DPCD_AUDIO_DELAY__SIZE 3 /* NNNNS */ + +#define NV_DPCD_LINK_RATE_SET (0x00000115) /* RWXUR */ +#define NV_DPCD_LINK_RATE_SET_VAL 2:0 /* RWXUF */ + +// 00115h - 00117h: RESERVED. Reads all 0s + +#define NV_DPCD_UPSTREAM_DEV_DP_PWR (0x00000118) /* RWXUR */ +#define NV_DPCD_UPSTREAM_DEV_DP_PWR_NOT_NEEDED 0:0 /* RWXUF */ +#define NV_DPCD_UPSTREAM_DEV_DP_PWR_NOT_NEEDED_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_UPSTREAM_DEV_DP_PWR_NOT_NEEDED_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT (0x00000119) /* RWXUR */ +#define NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT_PERIOD_GRANTED 0:0 /* RWXUF */ +#define NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT_PERIOD_GRANTED_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT_PERIOD_GRANTED_YES (0x00000001) /* RWXUV */ + +// 0011Ah - 0011Fh: RESERVED. Reads all 0s +// 00126h - 00153h: RESERVED. Reads all 0s + +#define NV_DPCD_TX_GTC_VALUE(i) (0x00000154+(i)) /* RW-1A */ +#define NV_DPCD_TX_GTC_VALUE__SIZE 4 /* R---S */ + +#define NV_DPCD_RX_GTC_VALUE_PHASE_SKEW (0x00000158) /* RWXUR */ +#define NV_DPCD_RX_GTC_VALUE_PHASE_SKEW_EN 0:0 /* RWXUF */ +#define NV_DPCD_RX_GTC_VALUE_PHASE_SKEW_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_RX_GTC_VALUE_PHASE_SKEW_EN_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_TX_GTC_FREQ_LOCK (0x00000159) /* RWXUR */ +#define NV_DPCD_TX_GTC_FREQ_LOCK_DONE 0:0 /* RWXUF */ +#define NV_DPCD_TX_GTC_FREQ_LOCK_DONE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TX_GTC_FREQ_LOCK_DONE_YES (0x00000001) /* RWXUV */ + +// 0015Ah - 0016Fh: RESERVED. Read all 0s + +#define NV_DPCD_EDP_PSR_CONFIG (0x00000170) /* RWXUR */ +#define NV_DPCD_EDP_PSR_CONFIG_SINK_ENABLE 0:0 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_SINK_ENABLE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SINK_ENABLE_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SOURCE_LINK_ACTIVE 1:1 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_SOURCE_LINK_ACTIVE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SOURCE_LINK_ACTIVE_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_CRC_VERIFICATION_ACTIVE 2:2 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_CRC_VERIFICATION_ACTIVE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_CRC_VERIFICATION_ACTIVE_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_FRAME_CAPTURE_INDICATION 3:3 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_FRAME_CAPTURE_INDICATION_IMM (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_FRAME_CAPTURE_INDICATION_SECOND (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SU_LINE_CAPTURE_INDICATION 4:4 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_SU_LINE_CAPTURE_INDICATION_IMM (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SU_LINE_CAPTURE_INDICATION_SECOND (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_HPD_IRQ_ON_CRC_ERROR 5:5 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_HPD_IRQ_ON_CRC_ERROR_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_HPD_IRQ_ON_CRC_ERROR_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_ENABLE_PSR2 6:6 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_ENABLE_PSR2_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_ENABLE_PSR2_YES (0x00000001) /* RWXUV */ + +// 00171h - 0019Fh: RESERVED. Read all 0s + + +#define NV_DPCD_ADAPTER_CTRL (0x000001A0) /* RWXUR */ +#define NV_DPCD_ADAPTER_CTRL_FORCE_LOAD_SENSE 0:0 /* RWXUF */ +#define NV_DPCD_ADAPTER_CTRL_FORCE_LOAD_SENSE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_ADAPTER_CTRL_FORCE_LOAD_SENSE_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_BRANCH_DEV_CTRL (0x000001A1) /* RWXUR */ +#define NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE 0:0 /* RWXUF */ +#define NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE_LONGPULSE (0x00000000) /* RWXUV */ +#define NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE_IRQ_HPD (0x00000001) /* RWXUV */ +#define NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE_DEFAULT NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE_LONGPULSE + +// 001A2h - 0019Fh: RESERVED. Read all 0s + +#define NV_DPCD_PAYLOAD_ALLOC_SET (0x000001C0) /* RWXUR */ +#define NV_DPCD_PAYLOAD_ALLOC_SET_PAYLOAD_ID 6:0 /* RWXUF */ + +#define NV_DPCD_PAYLOAD_ALLOC_START_TIME_SLOT (0x000001C1) /* RWXUR */ +#define NV_DPCD_PAYLOAD_ALLOC_START_TIME_SLOT_VAL 5:0 /* RWXUF */ + +#define NV_DPCD_PAYLOAD_ALLOC_TIME_SLOT_COUNT (0x000001C2) /* RWXUR */ +#define NV_DPCD_PAYLOAD_ALLOC_TIME_SLOT_COUNT_VAL 5:0 /* RWXUF */ + +// 001C3h - 001FFh: RESERVED. Reads all 0s + +#define NV_DPCD_SINK_COUNT (0x00000200) /* R-XUR */ +// Bits 7 and 5:0 = SINK_COUNT +#define NV_DPCD_SINK_COUNT_VAL_BIT_05_MASK (0x3F) +#define NV_DPCD_SINK_COUNT_VAL_BIT_7 (0x80) +#define NV_DPCD_SINK_COUNT_VAL(x) ((x & NV_DPCD_SINK_COUNT_VAL_BIT_05_MASK) \ + | ((x & NV_DPCD_SINK_COUNT_VAL_BIT_7) >> 1)) +#define NV_DPCD_SINK_COUNT_CP_READY 6:6 /* R-XUF */ +#define NV_DPCD_SINK_COUNT_CP_READY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_COUNT_CP_READY_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR (0x00000201) /* RWXUR */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_REMOTE_CTRL 0:0 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_REMOTE_CTRL_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_REMOTE_CTRL_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_AUTO_TEST 1:1 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_AUTO_TEST_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_AUTO_TEST_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_CP 2:2 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_CP_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_CP_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_MCCS_IRQ 3:3 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_MCCS_IRQ_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_MCCS_IRQ_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_DOWN_REP_MSG_RDY 4:4 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_DOWN_REP_MSG_RDY_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_DOWN_REP_MSG_RDY_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_UP_REQ_MSG_RDY 5:5 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_UP_REQ_MSG_RDY_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_UP_REQ_MSG_RDY_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_SINK_SPECIFIC_IRQ 6:6 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_SINK_SPECIFIC_IRQ_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_SINK_SPECIFIC_IRQ_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_LANE0_1_STATUS (0x00000202) /* R-XUR */ + +#define NV_DPCD_LANE2_3_STATUS (0x00000203) /* R-XUR */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CR_DONE 0:0 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CR_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CR_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CHN_EQ_DONE 1:1 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CHN_EQ_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CHN_EQ_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_SYMBOL_LOCKED 2:2 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_SYMBOL_LOCKED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_SYMBOL_LOCKED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CR_DONE 4:4 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CR_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CR_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CHN_EQ_DONE 5:5 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CHN_EQ_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_SYMBOL_LOCKED 6:6 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_SYMBOL_LOCKED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED (0x00000204) /* R-XUR */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_INTERLANE_ALIGN_DONE 0:0 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_POST_LT_ADJ_REQ_IN_PROGRESS 1:1 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_POST_LT_ADJ_REQ_IN_PROGRESS_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_POST_LT_ADJ_REQ_IN_PROGRESS_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_D0WNSTRM_PORT_STATUS_DONE 6:6 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_D0WNSTRM_PORT_STATUS_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_D0WNSTRM_PORT_STATUS_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_LINK_STATUS_UPDATED 7:7 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_LINK_STATUS_UPDATED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_LINK_STATUS_UPDATED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_SINK_STATUS (0x00000205) /* R-XUR */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS 0:0 /* R-XUF */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS_IN_SYNC_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS_IN_SYNC_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS 1:1 /* R-XUF */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS_IN_SYNC_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS_IN_SYNC_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_LANE0_1_ADJUST_REQ (0x00000206) /* R-XUR */ +#define NV_DPCD_LANE2_3_ADJUST_REQ (0x00000207) /* R-XUR */ +#define NV_DPCD_LANEX_XPLUS1_ADJUST_REQ_LANEX_DRIVE_CURRENT 1:0 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_ADJUST_REQ_LANEX_PREEMPHASIS 3:2 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_ADJUST_REQ_LANEXPLUS1_DRIVE_CURRENT 5:4 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_ADJUST_REQ_LANEXPLUS1_PREEMPHASIS 7:6 /* R-XUF */ + +#define NV_DPCD_TRAINING_SCORE_LANE(i) (0x00000208+(i)) /* R--1A */ +#define NV_DPCD_TRAINING_SCORE_LANE__SIZE 4 /* R---S */ + +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2 (0x0000020C) /* R-XUR */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE(i) i%4*2+1:i%4*2 +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE0 1:0 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE1 3:2 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE2 5:4 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE3 7:6 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE4 1:0 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE5 3:2 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE6 5:4 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE7 7:6 /* R-XUF */ + +// 0020Fh: RESERVED. Read all 0s + +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE0(i) (0x00000210+(i)*2) /* R--1A */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE0__SIZE 4 /* R---S */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE0_VALUE 7:0 /* R-XUF */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE1(i) (0x00000211+(i)*2) /* R--1A */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE1__SIZE 4 /* R---S */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE1_VALUE 6:0 /* R-XUF */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE1_VALID 7:7 /* R-XUF */ + +#define NV_DPCD_TEST_REQUEST (0x00000218) /* R-XUR */ +#define NV_DPCD_TEST_REQUEST_TEST_LINK_TRAINING 0:0 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_LINK_TRAINING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_LINK_TRAINING_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_PATTERN 1:1 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_PATTERN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_PATTERN_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_EDID_READ 2:2 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_EDID_READ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_EDID_READ_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_PHY_TEST_PATTERN 3:3 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_PHY_TEST_PATTERN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_PHY_TEST_PATTERN_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_PHY_TEST_CHANNEL_CODING 5:4 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_PHY_TEST_CHANNEL_CODING_8B10B (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_PHY_TEST_CHANNEL_CODING_128B132B (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_AUDIO_PATTERN_REQ 6:6 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_AUDIO_PATTERN_REQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_AUDIO_PATTERN_REQ_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_AUDIO_DISABLED_VIDEO 7:7 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_AUDIO_DISABLED_VIDEO_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_AUDIO_DISABLED_VIDEO_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_TEST_LINK_RATE (0x00000219) /* R-XUR */ +#define NV_DPCD_TEST_LINK_RATE_TYPE 7:0 /* R-XUF */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_1_62G (0x00000006) /* R-XUV */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_2_70G (0x0000000A) /* R-XUV */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_5_40G (0x00000014) /* R-XUV */ +// +// For PHY Test 128b/132b channel coding (PHY_TEST_CHANNEL_CODING field in +// the TEST_REQUEST register (DPCD Address 00218h, bits 5:4) is programmed to 01b) +// +#define NV_DPCD_TEST_LINK_RATE_TYPE_UHBR10 (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_UHBR20 (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_UHBR135 (0x00000004) /* R-XUV */ + +// 0021Ah - 0021Fh: RESERVED. Read all 0s + +#define NV_DPCD_TEST_LANE_COUNT (0x00000220) /* R-XUR */ +#define NV_DPCD_TEST_LANE_COUNT_VALUE 4:0 /* R-XUF */ + +#define NV_DPCD_TEST_PATTERN (0x00000221) /* R-XUR */ +#define NV_DPCD_TEST_PATTERN_TYPE 1:0 /* R-XUF */ +#define NV_DPCD_TEST_PATTERN_TYPE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_PATTERN_TYPE_COLOR_RAMPS (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_PATTERN_TYPE_BW_VERTICAL_LINES (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_PATTERN_TYPE_COLOR_SQUARES (0x00000003) /* R-XUV */ + +#define NV_DPCD_TEST_H_TOTAL_HIGH_BYTE (0x00000222) /* R-XUR */ +#define NV_DPCD_TEST_H_TOTAL_LOW_BYTE (0x00000223) /* R-XUR */ + +#define NV_DPCD_TEST_V_TOTAL_HIGH_BYTE (0x00000224) /* R-XUR */ +#define NV_DPCD_TEST_V_TOTAL_LOW_BYTE (0x00000225) /* R-XUR */ + +#define NV_DPCD_TEST_H_START_HIGH_BYTE (0x00000226) /* R-XUR */ +#define NV_DPCD_TEST_H_START_LOW_BYTE (0x00000227) /* R-XUR */ + +#define NV_DPCD_TEST_V_START_HIGH_BYTE (0x00000228) /* R-XUR */ +#define NV_DPCD_TEST_V_START_LOW_BYTE (0x00000229) /* R-XUR */ + +#define NV_DPCD_TEST_HSYNC_HIGH_BYTE (0x0000022A) /* R-XUR */ +#define NV_DPCD_TEST_HSYNC_HIGH_BYTE_VALUE 6:0 /* R-XUF */ +#define NV_DPCD_TEST_HSYNC_HIGH_BYTE_POLARITY 7:7 /* R-XUF */ +#define NV_DPCD_TEST_HSYNC_LOW_BYTE (0x0000022B) /* R-XUR */ + +#define NV_DPCD_TEST_VSYNC_HIGH_BYTE (0x0000022C) /* R-XUR */ +#define NV_DPCD_TEST_VSYNC_HIGH_BYTE_VALUE 6:0 /* R-XUF */ +#define NV_DPCD_TEST_VSYNC_HIGH_BYTE_POLARITY 7:7 /* R-XUF */ +#define NV_DPCD_TEST_VSYNC_LOW_BYTE (0x0000022D) /* R-XUR */ + +#define NV_DPCD_TEST_H_WIDTH_HIGH_BYTE (0x0000022E) /* R-XUR */ +#define NV_DPCD_TEST_H_WIDTH_LOW_BYTE (0x0000022F) /* R-XUR */ + +#define NV_DPCD_TEST_V_HEIGHT_HIGH_BYTE (0x00000230) /* R-XUR */ +#define NV_DPCD_TEST_V_HEIGHT_LOW_BYTE (0x00000231) /* R-XUR */ + +#define NV_DPCD_TEST_MISC0 (0x00000232) /* R-XUR */ +#define NV_DPCD_TEST_MISC0_TEST_SYNC_CLOCK 0:0 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT 2:1 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT_RGB (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT_4_2_2 (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT_4_4_4 (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT_RESERVED (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_DYNAMIC_RANGE 3:3 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_YCBCR_COEFF 4:4 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH 7:5 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_6BITS (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_8BITS (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_10BITS (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_12BITS (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_16BITS (0x00000004) /* R-XUV */ + +#define NV_DPCD_TEST_MISC1 (0x00000233) /* R-XUR */ +#define NV_DPCD_TEST_MISC1_TEST_REFRESH_DENOMINATOR 0:0 /* R-XUF */ +#define NV_DPCD_TEST_MISC1_TEST_INTERLACED 1:1 /* R-XUF */ + +#define NV_DPCD_TEST_REFRESH_RATE_NUMERATOR (0x00000234) /* R-XUR */ + +// 00235h - 0023Fh: RESERVED for test automation extensions. Reads all 0s + +#define NV_DPCD_TEST_CRC_R_Cr_LOW_BYTE (0x00000240) /* R-XUR */ +#define NV_DPCD_TEST_CRC_R_Cr_HIGH_BYTE (0x00000241) /* R-XUR */ + +#define NV_DPCD_TEST_CRC_G_Y_LOW_BYTE (0x00000242) /* R-XUR */ +#define NV_DPCD_TEST_CRC_G_Y_HIGH_BYTE (0x00000243) /* R-XUR */ + +#define NV_DPCD_TEST_CRC_B_Cb_LOW_BYTE (0x00000244) /* R-XUR */ +#define NV_DPCD_TEST_CRC_B_Cb_HIGH_BYTE (0x00000245) /* R-XUR */ + +#define NV_DPCD_TEST_SINK_MISC (0x00000246) /* R-XUR */ +#define NV_DPCD_TEST_SINK_TEST_CRC_COUNT 3:0 /* R-XUF */ +#define NV_DPCD_TEST_SINK_TEST_CRC_SUPPORTED 5:5 /* R-XUF */ +#define NV_DPCD_TEST_SINK_TEST_CRC_SUPPORTED_NO (0X00000000) /* R-XUV */ +#define NV_DPCD_TEST_SINK_TEST_CRC_SUPPORTED_YES (0X00000001) /* R-XUV */ + +//00247h: RESERVED for test automation extensions. Reads all 0s + +#define NV_DPCD_PHY_TEST_PATTERN (0x00000248) /* R-XUR */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_DP11 1:0 /* R-XUF */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_DP12 2:0 /* R-XUF */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_D10_2 (0x00000001) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_SYM_ERR_MEASUREMENT_CNT (0x00000002) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_PRBS7 (0x00000003) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_80_BIT_CUSTOM (0x00000004) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_HBR2_COMPLIANCE_EYE (0x00000005) /* R-XUV */ + +#define NV_DPCD_HBR2_COMPLIANCE_SCRAMBLER_RESET_LOW_BYTE (0x0000024A) /* R-XUV */ +#define NV_DPCD_HBR2_COMPLIANCE_SCRAMBLER_RESET_HIGH_BYTE (0x0000024B) /* R-XUV */ + +// 0024Ch - 0024Fh RESERVED for test automation extensions. Reads all 0s + +#define NV_DPCD_TEST_80BIT_CUSTOM_PATTERN(i) (0x00000250+(i)) /* R--1A */ +#define NV_DPCD_TEST_80BIT_CUSTOM_PATTERN__SIZE 10 /* R---S */ + +// 0025Ah - 0025Fh: RESERVED for test automation extensions. Reads all 0s + +#define NV_DPCD_TEST_RESPONSE (0x00000260) /* RWXUR */ +#define NV_DPCD_TEST_RESPONSE_TEST_ACK 0:0 /* RWXUF */ +#define NV_DPCD_TEST_RESPONSE_TEST_ACK_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_ACK_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_NACK 1:1 /* RWXUF */ +#define NV_DPCD_TEST_RESPONSE_TEST_NACK_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_NACK_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_EDID_CHKSUM_WRITE 2:2 /* RWXUF */ +#define NV_DPCD_TEST_RESPONSE_TEST_EDID_CHKSUM_WRITE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_EDID_CHKSUM_WRITE_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_TEST_EDID_CHKSUM (0x00000261) /* RWXUR */ + +// 00263h - 0026Fh: RESERVED for test automation extensions Read all 0s. + +#define NV_DPCD_TEST_SINK (0x00000270) /* RWXUR */ +#define NV_DPCD_TEST_SINK_START 0:0 /* RWXUF */ +#define NV_DPCD_TEST_SINK_START_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_SINK_START_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_TEST_SINK_PHY_SINK_TEST_LANE_SEL 5:4 /* RWXUF */ +#define NV_DPCD_TEST_SINK_PHY_SINK_TEST_LANE_EN 7:7 /* RWXUF */ +#define NV_DPCD_TEST_SINK_PHY_SINK_TEST_LANE_EN_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_SINK_PHY_SINK_TEST_LANE_EN_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD_TEST_AUDIO_MODE (0x00000271) /* R-XUR */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE 3:0 /* R-XUF */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_32_0KHZ (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_44_1KHZ (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_48_0KHZ (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_88_2KHZ (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_96_0KHZ (0x00000004) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_176_4KHZ (0x00000005) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_192_0KHZ (0x00000006) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT 7:4 /* R-XUF */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_1 (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_2 (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_3 (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_4 (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_5 (0x00000004) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_6 (0x00000005) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_7 (0x00000006) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_8 (0x00000007) /* R-XUV */ + +#define NV_DPCD_TEST_AUDIO_PATTERN (0x00000272) /* R-XUR */ +#define NV_DPCD_TEST_AUDIO_PATTERN_TYPE 7:0 /* R-XUF */ +#define NV_DPCD_TEST_AUDIO_PATTERN_TYPE_OP_DEFINED (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PATTERN_TYPE_SAWTOOTH (0x00000001) /* R-XUV */ + +#define NV_DPCD_TEST_AUDIO_PERIOD_CH(i) (0x00000273+(i)) /* R--1A */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH__SIZE 8 /* R---S */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES 3:0 /* R-XUF */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_UNUSED (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_3 (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_6 (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_12 (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_24 (0x00000004) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_48 (0x00000005) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_96 (0x00000006) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_192 (0x00000007) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_384 (0x00000008) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_768 (0x00000009) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_1536 (0x0000000A) /* R-XUV */ + +// 0027Bh - 0027Fh: RESERVED. Read all 0s + +// For DP version 1.3 and above +#define NV_DPCD_FEC_STATUS (0x00000280) /* R-XUR */ +#define NV_DPCD_FEC_STATUS_DECODE_EN 0:0 /* R-XUF */ +#define NV_DPCD_FEC_STATUS_DECODE_EN_NOT_DETECTED (0x00000000) /* R-XUV */ +#define NV_DPCD_FEC_STATUS_DECODE_EN_DETECTED (0x00000001) /* R-XUV */ +#define NV_DPCD_FEC_STATUS_DECODE_DIS 1:1 /* R-XUF */ +#define NV_DPCD_FEC_STATUS_DECODE_DIS_NOT_DETECTED (0x00000000) /* R-XUV */ +#define NV_DPCD_FEC_STATUS_DECODE_DIS_DETECTED (0x00000001) /* R-XUV */ + + +// 00283h - 002BFh: RESERVED. Read all 0s. + +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS (0x000002C0) /* R-XUR */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_UPDATED 0:0 /* R-XUF */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_UPDATED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_UPDATED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED 1:1 /* R-XUF */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_VC_PAYLOAD_ID_SLOT(i) (0x000002C1+(i)) /* R--1A */ +#define NV_DPCD_VC_PAYLOAD_ID_SLOT__SIZE 63 /* R---S */ + +// Source Device-Specific Field, Burst write for 00300h-0030Bh +// 6 hex digits: 0x300~0x302. +#define NV_DPCD_SOURCE_IEEE_OUI (0x00000300) /* RWXUR */ +#define NV_DPCD_OUI_NVIDIA_LITTLE_ENDIAN 0x4B0400 + +// 6 bytes: 0x303~0x308 +#define NV_DPCD_SOURCE_DEV_ID_STRING(i) (0x00000303+(i)) /* RW-1A */ +#define NV_DPCD_SOURCE_DEV_ID_STRING__SIZE 6 /* RW--S */ + +#define NV_DPCD_SOURCE_HARDWARE_REV (0x00000309) /* RWXUR */ +#define NV_DPCD_SOURCE_HARDWARE_REV_MINOR 3:0 /* RWXUF */ +#define NV_DPCD_SOURCE_HARDWARE_REV_MAJOR 7:4 /* RWXUF */ + +#define NV_DPCD_SOURCE_SOFTWARE_REV_MAJOR (0x0000030A) /* RWXUR */ +#define NV_DPCD_SOURCE_SOFTWARE_REV_MINOR (0x0000030B) /* RWXUR */ + +// Sink Device-Specific Field. Read Only +// 6 hex digits: 0x400~0x402 +#define NV_DPCD_SINK_IEEE_OUI (0x00000400) /* R-XUR */ + +// 6 bytes: 0x403~0x408 +#define NV_DPCD_SINK_DEV_ID_STRING(i) (0x00000403+(i)) /* R--1A */ +#define NV_DPCD_SINK_DEV_ID_STRING__SIZE 6 /* R---S */ + +#define NV_DPCD_SINK_HARDWARE_REV (0x00000409) /* R-XUR */ +#define NV_DPCD_SINK_HARDWARE_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD_SINK_HARDWARE_REV_MAJOR 7:4 /* R-XUF */ + +#define NV_DPCD_SINK_SOFTWARE_REV_MAJOR (0x0000040A) /* R-XUR */ +#define NV_DPCD_SINK_SOFTWARE_REV_MINOR (0x0000040B) /* R-XUR */ + +// Branch Device-Specific Field +// 6 hex digits: 0x500~0x502 + +#define NV_DPCD_BRANCH_IEEE_OUI (0x00000500) /* R-XUR */ + +// 6 bytes: 0x503~0x508 +#define NV_DPCD_BRANCH_DEV_ID_STRING (0x00000503+(i)) /* R--1A */ +#define NV_DPCD_BRANCH_DEV_ID_STRING__SIZE 6 /* R---S */ + +#define NV_DPCD_BRANCH_HARDWARE_REV (0x00000509) /* R-XUR */ +#define NV_DPCD_BRANCH_HARDWARE_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD_BRANCH_HARDWARE_REV_MAJOR 7:4 /* R-XUF */ + +#define NV_DPCD_BRANCH_SOFTWARE_REV_MAJOR (0x0000050A) /* R-XUR */ +#define NV_DPCD_BRANCH_SOFTWARE_REV_MINOR (0x0000050B) /* R-XUR */ + +// Sink Control Field +#define NV_DPCD_SET_POWER (0x00000600) /* RWXUR */ +#define NV_DPCD_SET_POWER_VAL 2:0 /* RWXUF */ +#define NV_DPCD_SET_POWER_VAL_RESERVED (0x00000000) /* RWXUV */ +#define NV_DPCD_SET_POWER_VAL_D0_NORMAL (0x00000001) /* RWXUV */ +#define NV_DPCD_SET_POWER_VAL_D3_PWRDWN (0x00000002) /* RWXUV */ +#define NV_DPCD_SET_POWER_VAL_D3_AUX_ON (0x00000005) /* RWXUV */ + +/* + * 00601h - 006FFh: RESERVED. Read all 0s + */ + +// * 00700h - 007FFh: RESERVED for eDP, see eDP v1.4 and above +#define NV_DPCD_EDP_REV (0x00000700) /* R-XUR */ +#define NV_DPCD_EDP_REV_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_REV_VAL_1_1_OR_LOWER (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_2 (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_3 (0x00000002) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_4 (0x00000003) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_4A (0x00000004) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_4B (0x00000005) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1 (0x00000701) /* R-XUR */ +#define NV_DPCD_EDP_GENERAL_CAP1_TCON_BKLGHT_ADJUST_CAP 0:0 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_TCON_BKLGHT_ADJUST_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_TCON_BKLGHT_ADJUST_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_PIN_EN_CAP 1:1 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_PIN_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_PIN_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_AUX_EN_CAP 2:2 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_AUX_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_AUX_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_PIN_EN_CAP 3:3 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_PIN_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_PIN_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_AUX_EN_CAP 4:4 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_AUX_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_AUX_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_FRC_EN_CAP 5:5 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_FRC_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_FRC_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_COLOR_ENGINE_CAP 6:6 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_COLOR_ENGINE_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_COLOR_ENGINE_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_SET_POWER_CAP 7:7 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_SET_POWER_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_SET_POWER_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP (0x00000702) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_PWM_PIN_CAP 0:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_PWM_PIN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_PWM_PIN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_SET_CAP 1:1 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_SET_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_SET_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_BYTE_CNT 2:2 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_BYTE_CNT_2B (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_BYTE_CNT_1B (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_AUX_PWM_PRODUCT_CAP 3:3 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_AUX_PWM_PRODUCT_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_AUX_PWM_PRODUCT_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_PWM_PIN_PASSTHRU_CAP 4:4 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_PWM_PIN_PASSTHRU_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_PWM_PIN_PASSTHRU_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_AUX_SET_CAP 5:5 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_AUX_SET_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_AUX_SET_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_DYNAMIC_BKLGHT_CAP 6:6 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_DYNAMIC_BKLGHT_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_DYNAMIC_BKLGHT_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_VBLANK_BKLGHT_UPDATE_CAP 7:7 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_VBLANK_BKLGHT_UPDATE_CAP_VBL (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_VBLANK_BKLGHT_UPDATE_CAP_IMM (0x00000000) /* R-XUV */ +#define NV_DPCP_EDP_GENERAL_CAP2 (0x00000703) /* R-XUR */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_ENGINE_CAP 0:0 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_ENGINE_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_ENGINE_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_BKLGHT_BRIGHT_BIT_ALIGNMENT 2:1 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP2_BKLGHT_BRIGHT_BIT_ALIGNMENT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_BKLGHT_BRIGHT_BIT_ALIGNMENT_MSB (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_BKLGHT_BRIGHT_BIT_ALIGNMENT_LSB (0x00000002) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_CONTROL_CAP 3:3 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_CONTROL_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_CONTROL_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP3 (0x00000704) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP3_X_REGION_CAP 3:0 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP3_X_REGION_CAP_NOT_SUPPORTED (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP3_Y_REGION_CAP 7:4 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP3_Y_REGION_CAP_NOT_SUPPORTED (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_DISPLAY_CTL (0x00000720) /* RWXUR */ +#define NV_DPCD_EDP_DISPLAY_CTL_BKLGHT_EN 0:0 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_BKLGHT_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BKLGHT_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BKLGHT_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BLACK_VIDEO_EN 1:1 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_BLACK_VIDEO_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BLACK_VIDEO_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BLACK_VIDEO_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_FRC_EN 2:2 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_FRC_EN_2BIT (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_COLOR_ENGINE_EN 3:3 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_COLOR_ENGINE_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_COLOR_ENGINE_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_COLOR_ENGINE_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_OVERDRIVE_CTL 5:4 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_OVERDRIVE_CTL_AUTONOMOUS (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_OVERDRIVE_CTL_DISABLE (0x00000002) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_OVERDRIVE_CTL_ENABLE (0x00000003) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_VBLANK_BKLGHT_UPDATE_EN 7:7 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_VBLANK_BKLGHT_UPDATE_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_VBLANK_BKLGHT_UPDATE_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET (0x00000721) /* RWXUR */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE 1:0 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_PWM_PIN (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_PRESET_LV (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_AUX (0x00000002) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_PWM_AND_AUX (0x00000003) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_PWM_PIN_PASSTHRU_EN 2:2 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_PWM_PIN_PASSTHRU_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_PWM_PIN_PASSTHRU_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_PWM_PIN_PASSTHRU_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_AUX_SET_EN 3:3 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_AUX_SET_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_AUX_SET_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_AUX_SET_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_DYNAMIC_BKLGHT_EN 4:4 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_DYNAMIC_BKLGHT_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_DYNAMIC_BKLGHT_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_DYNAMIC_BKLGHT_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_REGIONAL_BKLGHT_EN 5:5 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_REGIONAL_BKLGHT_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_REGIONAL_BKLGHT_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_REGIONAL_BKLGHT_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_UPDATE_REGION_BRIGHTNESS 6:6 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_UPDATE_REGION_BRIGHTNESS_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_UPDATE_REGION_BRIGHTNESS_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_MSB (0x00000722) /* RWXUR */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_MSB_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_MSB_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_LSB (0x00000723) /* RWXUR */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_LSB_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_LSB_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT (0x00000724) /* RWXUR */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_VAL 4:0 /* RWXUF */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MIN (0x00000725) /* R-XUR */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MIN_VAL 4:0 /* R-XUF */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MIN_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MAX (0x00000726) /* R-XUR */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MAX_VAL 4:0 /* R-XUF */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MAX_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS (0x00000727) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_FAULT_CONDITION 0:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_FAULT_CONDITION_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_FAULT_CONDITION_FAULT (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_FAULT_CONDITION_NORMAL (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_SET (0x00000728) /* RWXUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_SET_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_SET_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MSB (0x0000072A) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MSB_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MSB_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MID (0x0000072B) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MID_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MID_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_LSB (0x0000072C) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_LSB_VAL 1:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_LSB_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MSB (0x0000072D) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MSB_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MSB_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MID (0x0000072E) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MID_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MID_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_LSB (0x0000072F) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_LSB_VAL 1:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_LSB_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_DBC_MINIMUM_BRIGHTNESS_SET (0x00000732) /* RWXUR */ +#define NV_DPCD_EDP_DBC_MINIMUM_BRIGHTNESS_SET_VAL 4:0 /* RWXUF */ +#define NV_DPCD_EDP_DBC_MINIMUM_BRIGHTNESS_SET_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DBC_MAXIMUM_BRIGHTNESS_SET (0x00000733) /* RWXUR */ +#define NV_DPCD_EDP_DBC_MAXIMUM_BRIGHTNESS_CAP_VAL 4:0 /* RWXUF */ +#define NV_DPCD_EDP_DBC_MAXIMUM_BRIGHTNESS_CAP_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BKLGHT_BASE (0x00000740) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BKLGHT_BASE_INDEX_OFFSET_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BKLGHT_BASE_INDEX_OFFSET_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_0 (0x00000741) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_0_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_0_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_1 (0x00000742) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_1_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_1_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_2 (0x00000743) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_2_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_2_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_3 (0x00000744) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_3_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_3_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_4 (0x00000745) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_4_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_4_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_5 (0x00000746) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_5_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_5_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_6 (0x00000747) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_6_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_6_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_7 (0x00000748) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_7_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_7_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_8 (0x00000749) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_8_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_8_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_9 (0x0000074A) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_9_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_9_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_10 (0x0000074B) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_10_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_10_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_11 (0x0000074C) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_11_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_11_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_12 (0x0000074D) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_12_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_12_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_13 (0x0000074E) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_13_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_13_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_14 (0x0000074F) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_14_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_14_VAL_INIT (0x00000000) /* RWXUV */ + +/* + * 00800h - 00FFFh: RESERVED. Read all 0s + */ + +// Sideband MSG Buffers +#define NV_DPCD_MBOX_DOWN_REQ (0x00001000) /* RWXUR */ +#define NV_DPCD_MBOX_UP_REP (0x00001200) /* RWXUR */ +#define NV_DPCD_MBOX_DOWN_REP (0x00001400) /* R-XUR */ +#define NV_DPCD_MBOX_UP_REQ (0x00001600) /* R-XUR */ + +// 0x2000 & 0x2001 : RESERVED for USB-over-AUX + +// ESI (Event Status Indicator) Field +#define NV_DPCD_SINK_COUNT_ESI (0x00002002) /* R-XUR */ +#define NV_DPCD_SINK_COUNT_ESI_SINK_COUNT 5:0 /* R-XUF */ +#define NV_DPCD_SINK_COUNT_ESI_CP_READY 6:6 /* R-XUF */ +#define NV_DPCD_SINK_COUNT_ESI_CP_READY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_COUNT_ESI_CP_READY_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0 (0x00002003) /* R-XUR */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_REMOTE_CTRL 0:0 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_REMOTE_CTRL_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_REMOTE_CTRL_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_AUTO_TEST 1:1 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_AUTO_TEST_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_AUTO_TEST_YES (0x00000001) /* R-XUV */ +// for eDP v1.4 & v1.4a only +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_TOUCH_IRQ 1:1 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_TOUCH_IRQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_TOUCH_IRQ_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_CP 2:2 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_CP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_CP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_MCCS_IRQ 3:3 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_MCCS_IRQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_MCCS_IRQ_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_DOWN_REP_MSG_RDY 4:4 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_DOWN_REP_MSG_RDY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_DOWN_REP_MSG_RDY_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_UP_REQ_MSG_RDY 5:5 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_UP_REQ_MSG_RDY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_UP_REQ_MSG_RDY_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_SINK_SPECIFIC_IRQ 6:6 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_SINK_SPECIFIC_IRQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_SINK_SPECIFIC_IRQ_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1 (0x00002004) /* R-XUR */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1_RX_GTC_MSTR_REQ_STATUS_CHANGE 0:0 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1_RX_GTC_MSTR_REQ_STATUS_CHANGE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1_RX_GTC_MSTR_REQ_STATUS_CHANGE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0 (0x00002005) /* R-XUR */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_RX_CAP_CHANGED 0:0 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_RX_CAP_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_RX_CAP_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_LINK_STATUS_CHANGED 1:1 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_LINK_STATUS_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_LINK_STATUS_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_STREAM_STATUS_CHANGED 2:2 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_STREAM_STATUS_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_STREAM_STATUS_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_HDMI_LINK_STATUS_CHANGED 3:3 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_HDMI_LINK_STATUS_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_HDMI_LINK_STATUS_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_CONNECTED_OFF_ENTRY_REQ 4:4 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_CONNECTED_OFF_ENTRY_REQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_CONNECTED_OFF_ENTRY_REQ_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS (0x00002006) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_LINK_CRC_ERR 0:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_LINK_CRC_ERR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_LINK_CRC_ERR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_RFB_ERR 1:1 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_RFB_ERR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_RFB_ERR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_VSC_SDP_ERR 2:2 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_VSC_SDP_ERR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_VSC_SDP_ERR_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS (0x00002007) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS_CAP_CHANGE 0:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS_CAP_CHANGE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS_CAP_CHANGE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS (0x00002008) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL 2:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_INACTIVE (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_TRANSITION_TO_ACTIVE (0x00000001) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_ACTIVE_DISP_FROM_RFB (0x00000002) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_ACTIVE_SINK_DEV_TIMING (0x00000003) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_TRANSITION_TO_INACTIVE (0x00000004) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_RESERVED0 (0x00000005) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_RESERVED1 (0x00000006) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_SINK_DEV_INTERNAL_ERR (0x00000007) /* R-XUV */ + +#define NV_DPCD_PANEL_SELF_REFRESH_DEBUG0 (0x00002009) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_DEBUG0_MAX_RESYNC_FRAME_CNT 3:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_DEBUG0_LAST_RESYNC_FRAME_CNT 7:4 /* R-XUF */ + +#define NV_DPCD_PANEL_SELF_REFRESH_DEBUG1 (0x0000200A) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP (0x0000200A) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_PSR_STATE_BIT 0:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_RFB_BIT 1:1 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_CRC_VALID_BIT 2:2 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_SU_VALID_BIT 3:3 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_SU_FIRST_LINE_RCVD 4:4 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_SU_LAST_LINE_RCVD 5:5 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_Y_CORD_VALID 6:6 /* R-XUF */ + +// 0200Bh: RESERVED. Read all 0s + +#define NV_DPCD_LANE0_1_STATUS_ESI (0x0000200C) /* R-XUR */ +#define NV_DPCD_LANE2_3_STATUS_ESI (0x0000200D) /* R-XUR */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CR_DONE 0:0 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CR_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CR_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CHN_EQ_DONE 1:1 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CHN_EQ_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CHN_EQ_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_SYMBOL_LOCKED 2:2 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_SYMBOL_LOCKED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_SYMBOL_LOCKED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CR_DONE 4:4 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CR_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CR_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CHN_EQ_DONE 5:5 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CHN_EQ_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CHN_EQ_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_SYMBOL_LOCKED 6:6 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_SYMBOL_LOCKED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_SYMBOL_LOCKED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI (0x0000200E) /* R-XUR */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_INTERLANE_ALIGN_DONE 0:0 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_DOWNSTRM_PORT_STATUS_DONE 6:6 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_DOWNSTRM_PORT_STATUS_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_DOWNSTRM_PORT_STATUS_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_LINK_STATUS_UPDATED 7:7 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_LINK_STATUS_UPDATED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_LINK_STATUS_UPDATED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_SINK_STATUS_ESI (0x0000200F) /* R-XUR */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_0_STATUS 0:0 /* R-XUF */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_0_STATUS_IN_SYNC_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_0_STATUS_IN_SYNC_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_1_STATUS 1:1 /* R-XUF */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_1_STATUS_IN_SYNC_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_1_STATUS_IN_SYNC_YES (0x00000001) /* R-XUV */ + +// 0x00002010-0x0002025: RESERVED. Read all 0s + +#define NV_DPCD_OVERDRIVE_STATUS (0x00002026) /* R-XUR */ +#define NV_DPCD_OVERDRIVE_STATUS_OVERDRIVE_ENGINE_STATUS 0:0 /* R-XUF */ +#define NV_DPCD_OVERDRIVE_STATUS_OVERDRIVE_ENGINE_STATUS_NOT_ACTIVE (0x00000000) /* R-XUV */ +#define NV_DPCD_OVERDRIVE_STATUS_OVERDRIVE_ENGINE_STATUS_ACTIVE (0x00000001) /* R-XUV */ + +// 0x00002027-0x00067FF: RESERVED. Read all 0s + +#define NV_DPCD_HDCP_BKSV_OFFSET (0x00068000) /* R-XUR */ +#define NV_DPCD_HDCP_RPRIME_OFFSET (0x00068005) /* R-XUR */ +#define NV_DPCD_HDCP_AKSV_OFFSET (0x00068007) /* RWXUR */ +#define NV_DPCD_HDCP_AN_OFFSET (0x0006800C) /* RWXUR */ +#define NV_DPCD_HDCP_BKSV_S_OFFSET (0x00000300) /* RWXUV */ +#define NV_DPCD_HDCP_RPRIME_S_OFFSET (0x00000305) /* RWXUV */ +#define NV_DPCD_HDCP_AKSV_S_OFFSET (0x00000307) /* RWXUV */ +#define NV_DPCD_HDCP_AN_S_OFFSET (0x0000030c) /* RWXUV */ +#define NV_DPCD_HDCP_VPRIME_OFFSET (0x00068014) /* R-XUR */ +#define NV_DPCD_HDCP_BCAPS_OFFSET (0x00068028) /* R-XUR */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE 0:0 /* R-XUF */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER 1:1 /* R-XUF */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_OFFSET (0x00068029) /* R-XUR */ +#define NV_DPCD_HDCP_BSTATUS_REAUTHENTICATION_REQUESET 3:3 /* R-XUF */ +#define NV_DPCD_HDCP_BSTATUS_REAUTHENTICATION_REQUESET_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_REAUTHENTICATION_REQUESET_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_LINK_INTEGRITY_FAILURE 2:2 /* R-XUF */ +#define NV_DPCD_HDCP_BSTATUS_LINK_INTEGRITY_FAILURE_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_LINK_INTEGRITY_FAILURE_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_RPRIME_AVAILABLE 1:1 /* R-XUF */ +#define NV_DPCD_HDCP_BSTATUS_RPRIME_AVAILABLE_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_RPRIME_AVAILABLE_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_READY 0:0 /* R-XUF */ +#define NV_DPCD_HDCP_BSTATUS_READY_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_READY_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BINFO_OFFSET (0x0006802A) /* R-XUR */ +#define NV_DPCD_HDCP_BINFO_OFFSET_DEVICE_COUNT 6:0 /* R-XUF */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_DEVS_EXCEEDED 7:7 /* R-XUF */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_DEVS_EXCEEDED_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_DEVS_EXCEEDED_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BINFO_OFFSET_DEPTH 10:8 /* R-XUF */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_CASCADE_EXCEEDED 11:11 /* R-XUF */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_CASCADE_EXCEEDED_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_CASCADE_EXCEEDED_TRUE (0x00000001) /* R-XUV */ + +#define NV_DPCD_HDCP_KSV_FIFO_OFFSET (0x0006802C) /* R-XUR */ + +#define NV_DPCD_HDCP_AINFO_OFFSET (0x0006803B) /* RWXUR */ +#define NV_DPCD_HDCP_AINFO_OFFSET_REAUTHENTICATION_ENABLE_IRQ_HPD 0:0 /* RWXUF */ +#define NV_DPCD_HDCP_AINFO_OFFSET_REAUTHENTICATION_ENABLE_IRQ_HPD_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_HDCP_AINFO_OFFSET_REAUTHENTICATION_ENABLE_IRQ_HPD_YES (0x00000001) /* RWXUV */ + +// Eight-Lane DP Specific DPCD defines +#define NV_DPCD_SL_TRAINING_LANE0_1_SET2(baseAddr) (baseAddr + 0x0000010E) /* RWXUR */ +#define NV_DPCD_SL_TRAINING_LANE2_3_SET2(baseAddr) (baseAddr + 0x0000010F) /* RWXUR */ +#define NV_DPCD_SL_LANE4_5_STATUS(baseAddr) (baseAddr + 0x00000202) /* R-XUR */ +#define NV_DPCD_SL_LANE6_7_STATUS(baseAddr) (baseAddr + 0x00000203) /* R-XUR */ +#define NV_DPCD_DUAL_DP_CAP (0x000003B0) /* RWXUR */ // Dual DP Capability Register +#define NV_DPCD_DUAL_DP_CAP_DDC 0:0 /* RWXUF */ // Dual DP Capability +#define NV_DPCD_DUAL_DP_CAP_DDC_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDC_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDCIC 1:1 /* RWXUF */ // DDCIC : Dual DP Column Interleave Mode Capability +#define NV_DPCD_DUAL_DP_CAP_DDCIC_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDCIC_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDPSBSC 2:2 /* RWXUF */ // DDPSBSC : Dual DP Pixel Side-by-Side Mode Capability +#define NV_DPCD_DUAL_DP_CAP_DDPSBSC_DISBALE (0x00000000) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDPSBSC_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD_DUAL_DP_BASE_ADDRESS 19:0 /* RWXUF */ +#define NV_DPCD_DUAL_DP_COLUMN_WIDTH 15:0 /* RWXUF */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT 4:0 /* RWXUF */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT_1H 0x1 /* RWXUV */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT_2H 0x2 /* RWXUV */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT_4H 0x4 /* RWXUV */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT_8H 0x8 /* RWXUV */ + +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL(baseAddr) (baseAddr + 0x00000110) /* RWXUR */ // Dual Link Control Register +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_PIX_MODE 1:0 /* RWXUF */ // PIX_MODE : Pixel mode select +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_PIX_MODE_SIDE_BY_SIDE (0x00000000) /* RWXUV */ // Side by side Mode enabled +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_PIX_MODE_COL_INTERLEAVE (0x00000001) /* RWXUV */ // Column Interleave Mode enabled +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_DD_ENABLE 7:7 /* RWXUF */ // DD_ENABLE: Enable Dual DP mode. +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_DD_ENABLE_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_DD_ENABLE_FALSE (0x00000000) /* RWXUV */ + +#define NV_DPCD_DUAL_DP_PIXEL_OVERLAP(baseAddr) (baseAddr + 0x00000111) /* RWXUR */ // PIXEL_OVERLAP Register +#define NV_DPCD_DUAL_DP_PIXEL_OVERLAP_IGNORE_PIX_COUNT 6:0 /* RWXUF */ // Ignore Pix Count - Number of pixels to ignore + +#define NV_DPCD_HDCP22_BCAPS_OFFSET (0x0006921D) /* R-XUR */ +#define NV_DPCD_HDCP22_BCAPS_SIZE (0x00000003) /* R---S */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_REPEATER 0:0 /* R-XUF */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_REPEATER_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_REPEATER_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_CAPABLE 1:1 /* R-XUF */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_RECEIVER_CAPABILITY_MASK 15:2 /* R-XUF */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_RECEIVER_CAPABILITY_MASK_RESERVED (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_VERSION 23:16 /* R-XUF */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_VERSION_22 (0x00000002) /* R-XUV */ + +#define NV_DPCD_HDCP22_BINFO_OFFSET (0x00069330) /* R-XUR */ +#define NV_DPCD_HDCP22_BINFO_SIZE (0x00000002) /* R---S */ + +#define NV_DPCD_HDCP22_RX_STATUS (0x00069493) /* R-XUR */ +#define NV_DPCD_HDCP22_RX_STATUS_SIZE (0x00000001) /* R---S */ +#define NV_DPCD_HDCP22_RX_STATUS_READY 0:0 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_READY_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_READY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_HPRIME_AVAILABLE 1:1 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_HPRIME_AVAILABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_HPRIME_AVAILABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_PAIRING_AVAILABLE 2:2 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_PAIRING_AVAILABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_PAIRING_AVAILABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_REAUTH_REQUEST 3:3 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_REAUTH_REQUEST_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_REAUTH_REQUEST_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_LINK_INTEGRITY_FAILURE 4:4 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_LINK_INTEGRITY_FAILURE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_LINK_INTEGRITY_FAILURE_NO (0x00000000) /* R-XUV */ + +#define NV_DPCD_HDCP22_RTX_OFFSET (0x00069000) /* RWXUR */ +#define NV_DPCD_HDCP22_RTX_SIZE (0x00000008) /* R---S */ + +#define NV_DPCD_HDCP22_TXCAPS_OFFSET (0x00069008) /* RWXUR */ +#define NV_DPCD_HDCP22_TXCAPS_SIZE (0x00000003) /* R---S */ + +#define NV_DPCD_HDCP22_CERTRX (0x0006900B) /* R-XUR */ +#define NV_DPCD_HDCP22_CERTRX_SIZE (0x0000020A) /* R---S */ + +#define NV_DPCD_HDCP22_RRX (0x00069215) /* R-XUR */ +#define NV_DPCD_HDCP22_RRX_SIZE (0x00000008) /* R---S */ + +#endif // #ifndef _DPCD_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd14.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd14.h new file mode 100644 index 0000000..89c9172 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd14.h @@ -0,0 +1,790 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DISPLAYPORT14_H_ +#define _DISPLAYPORT14_H_ + +#define NV_DPCD14_EXTEND_CAP_BASE (0x00002200) + +#define NV_DPCD14_MAX_LINK_BANDWIDTH (0x00000001) /* R-XUR */ +#define NV_DPCD14_MAX_LINK_BANDWIDTH_VAL 7:0 /* R-XUF */ +#define NV_DPCD14_MAX_LINK_BANDWIDTH_VAL_8_10_GBPS (0x0000001E) /* R-XUV */ + +#define NV_DPCD14_MAX_DOWNSPREAD (0x00000003) /* R-XUR */ +#define NV_DPCD14_MAX_DOWNSPREAD_TPS4_SUPPORTED 7:7 /* R-XUF */ +#define NV_DPCD14_MAX_DOWNSPREAD_TPS4_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_MAX_DOWNSPREAD_TPS4_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL (0x0000000E) /* R-XUR */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_EXTENDED_RX_CAP 7:7 /* R-XUF */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_EXTENDED_RX_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_EXTENDED_RX_CAP_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_SUPPORT (0x00000060) /* R-XUR */ +#define NV_DPCD14_DSC_SUPPORT_DSC_SUPPORT 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_SUPPORT_DSC_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SUPPORT_DSC_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_ALGORITHM_REVISION (0x00000061) /* R-XUR */ +#define NV_DPCD14_DSC_ALGORITHM_REVISION_MAJOR 3:0 /* R-XUF */ +#define NV_DPCD14_DSC_ALGORITHM_REVISION_MINOR 7:4 /* R-XUF */ + +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK (0x00000062) /* R-XUR */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE 1:0 /* R-XUF */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE_1KB (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE_4KB (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE_16KB (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE_64KB (0x00000003) /* R-XUV */ + +#define NV_DPCD14_DSC_RC_BUFFER (0x00000063) /* R-XUR */ +#define NV_DPCD14_DSC_RC_BUFFER_SIZE 7:0 /* R-XUF */ + +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1 (0x00000064) /* R-XUR */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_1 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_1_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_1_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_2 1:1 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_2_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_2_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_4 3:3 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_4_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_4_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_6 4:4 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_6_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_6_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_8 5:5 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_8_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_8_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_10 6:6 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_10_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_10_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_12 7:7 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_12_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_12_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_LINE_BUFFER (0x00000065) /* R-XUR */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH 3:0 /* R-XUF */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_9 (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_10 (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_11 (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_12 (0x00000003) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_13 (0x00000004) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_14 (0x00000005) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_15 (0x00000006) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_16 (0x00000007) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_8 (0x00000008) /* R-XUV */ + +#define NV_DPCD14_DSC_BLOCK_PREDICTION (0x00000066) /* R-XUR */ +#define NV_DPCD14_DSC_BLOCK_PREDICTION_SUPPORT 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_BLOCK_PREDICTION_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_BLOCK_PREDICTION_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_MAXIMUM_BITS_PER_PIXEL_1 (0x00000067) /* R-XUR */ +#define NV_DPCD14_DSC_MAXIMUM_BITS_PER_PIXEL_1_LSB 7:0 /* R-XUF */ + +#define NV_DPCD14_DSC_MAXIMUM_BITS_PER_PIXEL_2 (0x00000068) /* R-XUR */ +#define NV_DPCD14_DSC_MAXIMUM_BITS_PER_PIXEL_2_MSB 1:0 /* R-XUF */ + +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES (0x00000069) /* R-XUR */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_RGB 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_RGB_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_RGB_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_444 1:1 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_444_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_444_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_SIMPLE_422 2:2 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_SIMPLE_422_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_SIMPLE_422_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_422 3:3 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_422_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_422_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_420 4:4 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_420_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_420_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES (0x0000006A) /* R-XUR */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_8_BITS_PER_COLOR 1:1 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_8_BITS_PER_COLOR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_8_BITS_PER_COLOR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_10_BITS_PER_COLOR 2:2 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_10_BITS_PER_COLOR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_10_BITS_PER_COLOR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_12_BITS_PER_COLOR 3:3 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_12_BITS_PER_COLOR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_12_BITS_PER_COLOR_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_PEAK_THROUGHPUT (0x0000006B) /* R-XUR */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0 3:0 /* R-XUF */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_340 (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_400 (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_450 (0x00000003) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_500 (0x00000004) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_550 (0x00000005) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_600 (0x00000006) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_650 (0x00000007) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_700 (0x00000008) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_750 (0x00000009) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_800 (0x0000000A) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_850 (0x0000000B) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_900 (0x0000000C) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_950 (0x0000000D) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_1000 (0x0000000E) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1 7:4 /* R-XUF */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_340 (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_400 (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_450 (0x00000003) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_500 (0x00000004) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_550 (0x00000005) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_600 (0x00000006) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_650 (0x00000007) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_700 (0x00000008) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_750 (0x00000009) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_800 (0x0000000A) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_850 (0x0000000B) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_900 (0x0000000C) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_950 (0x0000000D) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_1000 (0x0000000E) /* R-XUV */ + +#define NV_DPCD14_DSC_MAXIMUM_SLICE_WIDTH (0x0000006C) /* R-XUR */ +#define NV_DPCD14_DSC_MAXIMUM_SLICE_WIDTH_MAX 7:0 /* R-XUF */ + +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2 (0x0000006D) /* R-XUR */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_16 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_16_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_16_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_20 1:1 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_20_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_20_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_24 2:2 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_24_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_24_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT (0x0000006F) /* R-XUR */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED 2:0 /* R-XUF */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1_16 (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1_8 (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1_4 (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1_2 (0x00000003) /* R-XUV */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1 (0x00000004) /* R-XUV */ + +// Field definition only used only with 128b/132b for DP2.0+ +#define NV_DPCD20_TRAINING_LANE_SET(i) (0x00000103+(i)) /* RW-1A */ +#define NV_DPCD20_TRAINING_LANE_SET__SIZE 4 /* RW--S */ +#define NV_DPCD20_TRAINING_LANE_SET_TX_FFE_PRESET_VALUE 3:0 /* RWXUF */ + +#define NV_DPCD14_DSC_ENABLE (0x00000160) /* R-XUR */ +#define NV_DPCD14_DSC_ENABLE_SINK 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_ENABLE_SINK_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_ENABLE_SINK_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_FEC_CAPABILITY (0x00000090) /* R-XUR */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_CAPABLE 0:0 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE 1:1 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_CORRECTED_BLOCK_ERROR_COUNT_CAPABLE 2:2 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_CORRECTED_BLOCK_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_CORRECTED_BLOCK_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_BIT_ERROR_COUNT_CAPABLE 3:3 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_BIT_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_BIT_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_BLOCK_ERROR_COUNT_CAPABLE 4:4 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_BLOCK_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_BLOCK_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_ERROR_COUNT_CAPABLE 5:5 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +// Bit 6 : RESERVED. Read 0 +#define NV_DPCD14_FEC_CAPABILITY_FEC_ERROR_REPORTING_POLICY_SUPPORTED 7:7 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_ERROR_REPORTING_POLICY_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_ERROR_REPORTING_POLICY_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_TRAINING_PATTERN_SET (0x00000102) /* RWXUR */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS 3:0 /* RWXUF */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_NONE (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_TP1 (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_TP2 (0x00000002) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_TP3 (0x00000003) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_TP4 (0x00000007) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN 4:4 /* RWXUF */ +#define NV_DPCD14_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_YES (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED 5:5 /* RWXUF */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SYM_ERR_SEL 7:6 /* RWXUF */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SYM_ERR_SEL_DISPARITY_ILLEGAL_SYMBOL_ERROR (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SYM_ERR_SEL_DISPARITY_ERROR (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SYM_ERR_SEL_ILLEGAL_SYMBOL_ERROR (0x00000002) /* RWXUV */ + +// Field definition only used only with 128b/132b for DP2.0+ +#define NV_DPCD20_128B_132B_TRAINING_PATTERN (0x00000102) /* RWXUR */ +#define NV_DPCD20_128B_132B_TRAINING_PATTERN_SELECT 3:0 /* RWXUF */ +#define NV_DPCD20_128B_132B_TRAINING_PATTERN_SELECT_NONE (0x00000000) /* RWXUV */ +#define NV_DPCD20_128B_132B_TRAINING_PATTERN_SELECT_TPS1 (0x00000001) /* RWXUV */ +#define NV_DPCD20_128B_132B_TRAINING_PATTERN_SELECT_TPS2 (0x00000002) /* RWXUV */ +#define NV_DPCD20_128B_132B_TRAINING_PATTERN_SELECT_TPS2_CDS (0x00000003) /* RWXUV */ +// Note: Bit 7:4 are reserved for 128b/132b. Driver should keep them 0 + +#define NV_DPCD14_LINK_QUAL_LANE_SET(i) (0x0000010B+(i)) /* RW-1A */ +#define NV_DPCD14_LINK_QUAL_LANE_SET__SIZE 4 /* R---S */ +#define NV_DPCD14_LINK_QUAL_LANE_SET_LQS 2:0 /* RWXUF */ +#define NV_DPCD14_LINK_QUAL_LANE_SET_LQS_CP2520PAT3 (0x00000007) /* RWXUV */ + +#define NV_DPCD14_FEC_CONFIGURATION (0x00000120) /* RWXUR */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_READY 0:0 /* RWXUF */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_READY_NO (0x00000000) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_READY_YES (0x00000001) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL 3:1 /* RWXUF */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_FEC_ERROR_COUNT_DIS (0x00000000) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_UNCORRECTED_BLOCK_ERROR_COUNT (0x00000001) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_CORRECTED_BLOCK_ERROR_COUNT (0x00000002) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_BIT_ERROR_COUNT (0x00000003) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_PARITY_BLOCK_ERROR_COUNT (0x00000004) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_PARITY_BIT_ERROR_COUNT (0x00000005) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT 5:4 /* RWXUF */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_0 (0x00000000) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_1 (0x00000001) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_2 (0x00000002) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_3 (0x00000003) /* RWXUV */ + +// Field definition only used only with 128b/132b for DP2.0+ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED (0x00000204) /* R-XUR */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_DPRX_EQ_INTERLANE_ALIGN_DONE 2:2 /* R-XUF */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_DPRX_EQ_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_DPRX_EQ_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_DPRX_CDS_INTERLANE_ALIGN_DONE 3:3 /* R-XUF */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_DPRX_CDS_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_DPRX_CDS_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_LT_FAILED 4:4 /* R-XUF */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_LT_FAILED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_LT_FAILED_YES (0x00000001) /* R-XUV */ + +// Field definition for 0x0206/0x0207h (ADJUST_REQUEST_LANEX), only used only with 128b/132b for DP2.0+ +#define NV_DPCD20_LANEX_XPLUS1_ADJUST_REQ_LANEX_TX_FFE_PRESET_VALUE 3:0 /* R-XUF */ +#define NV_DPCD20_LANEX_XPLUS1_ADJUST_REQ_LANEXPLUS1_TX_FFE_PRESET_VALUE 7:4 /* R-XUF */ + +// PANEL REPLAY RELATED DPCD +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY (0x000000B0) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED 0:0 +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED_NO (0x00000000) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED_YES (0x00000001) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE 1:1 +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE_NO (0x00000000) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE_YES (0x00000001) + +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION (0x000001B0) +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE 0:0 +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE_NO (0x00000000) +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE_YES (0x00000001) + +#define NV_DPCD14_PHY_TEST_PATTERN (0x00000248) /* R-XUR */ +#define NV_DPCD14_PHY_TEST_PATTERN_SEL_CP2520PAT3 (0x00000007) /* R-XUV */ + +#define NV_DPCD14_DSC_CRC_0 (0x00000262) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_0_LOW_BYTE NV_DPCD14_DSC_CRC_0 +#define NV_DPCD14_DSC_CRC_0_HIGH_BYTE (0x00000263) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_1 (0x00000264) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_1_LOW_BYTE NV_DPCD14_DSC_CRC_1 +#define NV_DPCD14_DSC_CRC_1_HIGH_BYTE (0x00000265) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_2 (0x00000266) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_2_LOW_BYTE NV_DPCD14_DSC_CRC_2 +#define NV_DPCD14_DSC_CRC_2_HIGH_BYTE (0x00000267) /* R-XUR */ + +#define NV_DPCD14_FEC_STATUS (0x00000280) /* R-XUR */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_EN_DETECTED 0:0 /* R-XUF */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_EN_DETECTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_EN_DETECTED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_DIS_DETECTED 1:1 /* R-XUF */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_DIS_DETECTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_DIS_DETECTED_YES (0x00000001) /* R-XUV */ +// Bits 7-2: RESERVED. +#define NV_DPCD14_FEC_STATUS_CLEAR (0x00000001) + +#define NV_DPCD14_FEC_ERROR_COUNT (0x00000281) /* R-XUR */ +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_LOW_BYTE NV_DPCD14_FEC_ERROR_COUNT +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_HIGH_BYTE (0x00000282) /* R-XUR */ +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_VALID 7:7 /* R-XUF */ +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_VALID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_VALID_YES (0x00000001) /* R-XUV */ + +// Field definition for 0x0200E (LANE_ALIGN_STATUS_UPDATED_ESI), used only when DP2.0+ 128b/132b is enabled. +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI (0x0000200E) /* R-XUR */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_DPRX_EQ_INTERLANE_ALIGN_DONE 2:2 /* R-XUF */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_DPRX_EQ_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_DPRX_EQ_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_DPRX_CDS_INTERLANE_ALIGN_DONE 3:3 /* R-XUF */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_DPRX_CDS_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_DPRX_CDS_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_LT_FAILED 4:4 /* R-XUF */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_LT_FAILED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_LT_FAILED_YES (0x00000001) /* R-XUV */ + +// Field definition for 0x0200F (SINK_STATUS_ESI), used only when DP2.0+ 128b/132b is enabled. +#define NV_DPCD20_SINK_STATUS_ESI (0x0000200F) /* R-XUR */ +#define NV_DPCD20_SINK_STATUS_ESI_INTRA_HOP_AUX_REPLY 3:3 /* R-XUF */ +#define NV_DPCD20_SINK_STATUS_ESI_INTRA_HOP_AUX_REPLY_DPRX (0x00000000) /* R-XUV */ +#define NV_DPCD20_SINK_STATUS_ESI_INTRA_HOP_AUX_REPLY_LTTPR (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_REV (0x00002200) /* R-XUR */ +#define NV_DPCD14_EXTENDED_REV_MAJOR 7:4 /* R-XUF */ +#define NV_DPCD14_EXTENDED_REV_MAJOR_1 (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_REV_MINOR_4 (0x00000004) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_MAX_LINK_BANDWIDTH (0x00002201) /* R-XUR */ +#define NV_DPCD14_EXTENDED_MAX_LINK_BANDWIDTH_VAL 7:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LINK_BANDWIDTH_VAL_8_10_GBPS (0x0000001E) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT (0x00002202) /* R-XUR */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_LANE 4:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_LANE_1 (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_LANE_2 (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_LANE_4 (0x00000004) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT 5:5 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_TPS3_SUPPORTED 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_TPS3_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_TPS3_SUPPORTED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_ENHANCED_FRAMING 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_ENHANCED_FRAMING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_ENHANCED_FRAMING_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD (0x00002203) /* R-XUR */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_VAL 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_VAL_NONE (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_VAL_0_5_PCT (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_TPS4_SUPPORTED 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_TPS4_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_TPS4_SUPPORTED_YES (0x00000001) /* R-XUV */ + +// NORP = Number of Receiver Ports = Value + 1 +#define NV_DPCD14_EXTENDED_NORP (0x00002204) /* R-XUR */ +#define NV_DPCD14_EXTENDED_NORP_VAL 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_NORP_VAL_ONE (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_NORP_VAL_TWO (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_NORP_VAL_SST_MAX (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_NORP_DP_PWR_CAP_5V 5:5 /* R-XUF */ +#define NV_DPCD14_EXTENDED_NORP_DP_PWR_CAP_12V 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_NORP_DP_PWR_CAP_18V 7:7 /* R-XUF */ + +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT (0x00002205) /* R-XUR */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_PRESENT 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_PRESENT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_PRESENT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE 2:1 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE_DISPLAYPORT (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE_ANALOG (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE_HDMI_DVI (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE_OTHERS (0x00000003) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_FORMAT_CONVERSION 3:3 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_FORMAT_CONVERSION_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_FORMAT_CONVERSION_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE 4:4 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING (0x00002206) /* R-XUR */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B 1:1 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT (0x00002207) /* R-XUR */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_COUNT 3:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_OUI_SUPPORT 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_OUI_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_OUI_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_RECEIVE_PORT0_CAP_0 (0x00002208) /* R-XUR */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORT1_CAP_0 (0x0000220A) /* R-XUR */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_LOCAL_EDID 1:1 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_LOCAL_EDID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_LOCAL_EDID_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT 2:2 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_HBLANK_EXPANSION_CAPABLE 3:3 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_HBLANK_EXPANSION_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_HBLANK_EXPANSION_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_UNIT 4:4 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_UNIT_PIXEL (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_UNIT_BYTE (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_PER_PORT 5:5 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_PER_PORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_PER_PORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_RECEIVE_PORT0_CAP_1 (0x00002209) /* R-XUR */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORT1_CAP_1 (0x0000220B) /* R-XUR */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_1_BUFFER_SIZE 7:0 /* R-XUF */ + +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP (0x0000220C) /* R-XUR */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED 7:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_1K (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_5K (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_10K (0x00000004) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_100K (0x00000008) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_400K (0x00000010) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_1M (0x00000020) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_EDP_CONFIG_CAP (0x0000220D) /* R-XUR */ +#define NV_DPCD14_EXTENDED_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL (0x0000220E) /* R-XUR */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL 6:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_DEFAULT (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_4MS (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_8MS (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_12MS (0x00000003) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_16MS (0x00000004) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_EXTENDED_RECEIVER_CAP 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_EXTENDED_RECEIVER_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_EXTENDED_RECEIVER_CAP_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_ADAPTER_CAP (0x0000220F) /* R-XUR */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_FORCE_LOAD_SENSE 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_FORCE_LOAD_SENSE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_FORCE_LOAD_SENSE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_ALT_I2C_PATTERN 1:1 /* R-XUF */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_ALT_I2C_PATTERN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_ALT_I2C_PATTERN_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST (0x00002210) /* R-XUR */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_GTC_CAP 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_GTC_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_GTC_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_AV_SYNC_CAP 2:2 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_AV_SYNC_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_AV_SYNC_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_SDP_EXT_FOR_COLORIMETRY 3:3 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_SDP_EXT_FOR_COLORIMETRY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_SDP_EXT_FOR_COLORIMETRY_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP 4:4 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_CHAINING 5:5 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_CHAINING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_CHAINING_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_CHAINING 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_CHAINING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_CHAINING_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST (0x00002211) /* R-XUR */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD 7:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_1MS (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_20MS (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_40MS (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_60MS (0x00000003) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_80MS (0x00000004) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_100MS (0x00000005) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_VSC_EXT_VESA_SDP_MAX_CHAINING (0x00002212) /* R-XUR */ +#define NV_DPCD14_EXTENDED_VSC_EXT_VESA_SDP_MAX_CHAINING_VAL 7:0 /* R-XUF */ + +#define NV_DPCD14_EXTENDED_VSC_EXT_CTA_SDP_MAX_CHAINING (0x00002213) /* R-XUR */ +#define NV_DPCD14_EXTENDED_VSC_EXT_CTA_SDP_MAX_CHAINING_VAL 7:0 /* R-XUF */ + +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST (0x00002214) /* R-XUR */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_ADAPTIVE_SYNC_SDP_SUPPORTED 0:0 /* R-XUF */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_ADAPTIVE_SYNC_SDP_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_ADAPTIVE_SYNC_SDP_SUPPORTED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_VSC_EXT_FRAMEWORK_V1_SUPPORTED 4:4 /* R-XUF */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_VSC_EXT_FRAMEWORK_V1_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_VSC_EXT_FRAMEWORK_V1_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES (0x00002215) /* R-XUR */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR10 0:0 /* R-XUF */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR10_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR10_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR20 1:1 /* R-XUF */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR20_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR20_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR13_5 2:2 /* R-XUF */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR13_5_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR13_5_YES (0x00000001) /* R-XUV */ + +// +// The interval is (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) * INTERVAL_UNIT. +// The maximum is 256 ms. +// +#define NV_DPCD20_128B_132B_TRAINING_AUX_RD_INTERVAL (0x00002216) /* R-XUR */ +#define NV_DPCD20_128B_132B_TRAINING_AUX_RD_INTERVAL_VAL 6:0 /* R-XUF */ +#define NV_DPCD20_128B_132B_TRAINING_AUX_RD_INTERVAL_UNIT 7:7 /* R-XUF */ +#define NV_DPCD20_128B_132B_TRAINING_AUX_RD_INTERVAL_UNIT_2MS (0x00000000) /* R-XUV */ +#define NV_DPCD20_128B_132B_TRAINING_AUX_RD_INTERVAL_UNIT_1MS (0x00000001) /* R-XUV */ +#define NV_DPCD20_128B_132B_TRAINING_AUX_RD_INTERVAL_MAX_MS 256 + +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS (0x00003036) /* R-XUR */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_MODE 0:0 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_MODE_TMDS (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_MODE_FRL (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RESULT 6:1 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_9G 1:1 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_9G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_9G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_18G 2:2 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_18G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_18G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_24G 3:3 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_24G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_24G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_32G 4:4 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_32G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_32G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_40G 5:5 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_40G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_40G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_48G 6:6 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_48G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_48G_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE(i) (0x00003037+(i)) /* RW-1A */ +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE__SIZE 4 /* R---S */ +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT 3:0 /* R-XUF */ +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT_ZERO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT_THREE (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT_TEN (0x00000002) /* R-XUV */ +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT_HUNDRED (0x00000004) /* R-XUV */ + +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS (0x0000303B) /* R-XUR */ +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS_LINK_ACTIVE 0:0 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS_LINK_ACTIVE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS_LINK_ACTIVE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS_LINK_READY 1:1 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS_LINK_READY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS_LINK_READY_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_PCON_CONTROL_0 (0x00003050) /* RWXUR */ +#define NV_DPCD20_PCON_CONTROL_0_OUTPUT_CONFIG 0:0 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_0_OUTPUT_CONFIG_DVI (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_0_OUTPUT_CONFIG_HDMI (0x00000001) /* RWXUV */ + +#define NV_DPCD20_PCON_CONTROL_1 (0x00003051) /* RWXUR */ +#define NV_DPCD20_PCON_CONTROL_1_CONVERT_YCBCR420 0:0 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_1_CONVERT_YCBCR420_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_CONVERT_YCBCR420_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_EDID_PROCESS 1:1 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_EDID_PROCESS_NO (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_EDID_PROCESS_YES (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_AUTO_SCRAMBLING 2:2 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_AUTO_SCRAMBLING_NO (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_AUTO_SCRAMBLING_YES (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_FORCE_SCRAMBLING 3:3 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_FORCE_SCRAMBLING_NO (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_FORCE_SCRAMBLING_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD20_PCON_CONTROL_2 (0x00003052) /* RWXUR */ +#define NV_DPCD20_PCON_CONTROL_2_CONVERT_YCBCR422 0:0 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_2_CONVERT_YCBCR422_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_2_CONVERT_YCBCR422_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD20_PCON_CONTROL_3 (0x00003053) /* RWXUR */ +#define NV_DPCD20_PCON_CONTROL_3_COMPONENT_BIT_DEPTH 1:0 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_3_COMPONENT_BIT_DEPTH_SAME_AS_INC (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_3_COMPONENT_BIT_DEPTH_8BPC (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_3_COMPONENT_BIT_DEPTH_10BPC (0x00000002) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_3_COMPONENT_BIT_DEPTH_12BPC (0x00000003) /* RWXUV */ + +#define NV_DPCD14_OUTPUT_HTOTAL_LOW (0x00003054) /* RWXUR */ +#define NV_DPCD14_OUTPUT_HTOTAL_HIGH (0x00003055) /* RWXUR */ + +#define NV_DPCD14_OUTPUT_HSTART_LOW (0x00003056) /* RWXUR */ +#define NV_DPCD14_OUTPUT_HSTART_HIGH (0x00003057) /* RWXUR */ + +#define NV_DPCD14_OUTPUT_HSP_HSW_LOW (0x00003056) /* RWXUR */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH (0x00003057) /* RWXUR */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH_VAL 6:0 /* RWXUF */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH_OUTPUT_HSP 7:7 /* RWXUF */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH_OUTPUT_HSP_POSITIVE (0x00000000) /* RWXUV */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH_OUTPUT_HSP_NEGATIVE (0x00000001) /* RWXUV */ + +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1 (0x0000305A) /* RWXUR */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW 2:0 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_ZERO (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_9G (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_18G (0x00000002) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_24G (0x00000003) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_32G (0x00000004) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_40G (0x00000005) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_48G (0x00000006) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_SRC_CONTROL_MODE 3:3 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_SRC_CONTROL_MODE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_SRC_CONTROL_MODE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_CONCURRENT_LT_MODE 4:4 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_CONCURRENT_LT_MODE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_CONCURRENT_LT_MODE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_LINK_FRL_MODE 5:5 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_LINK_FRL_MODE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_LINK_FRL_MODE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_IRQ_LINK_FRL_MODE 6:6 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_IRQ_LINK_FRL_MODE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_IRQ_LINK_FRL_MODE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_HDMI_LINK 7:7 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_HDMI_LINK_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_HDMI_LINK_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2 (0x0000305B) /* RWXUR */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK 5:0 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_9G (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_18G (0x00000002) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_24G (0x00000004) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_32G (0x00000008) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_40G (0x00000010) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_48G (0x00000020) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_FRL_LT_CONTROL 6:6 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_FRL_LT_CONTROL_NORMAL (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_FRL_LT_CONTROL_EXTENDED (0x00000001) /* RWXUV */ + +// LT Tunable Repeater Related offsets + +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV (0x000F0000) /* R-XUR */ +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV_MINOR_0 (0x00000000) /* R-XUV */ +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV_MAJOR 7:4 /* R-XUF */ +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV_MAJOR_1 (0x00000001) /* R-XUV */ + +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER (0x000F0001) /* R-XUR */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL 7:0 /* R-XUF */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL_1_62_GBPS (0x00000006) /* R-XUV */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL_2_70_GBPS (0x0000000A) /* R-XUV */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL_5_40_GBPS (0x00000014) /* R-XUV */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL_8_10_GBPS (0x0000001E) /* R-XUV */ + +#define NV_DPCD14_PHY_REPEATER_CNT (0x000F0002) /* R-XUR */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL 7:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_0 (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_1 (0x00000080) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_2 (0x00000040) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_3 (0x00000020) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_4 (0x00000010) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_5 (0x00000008) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_6 (0x00000004) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_7 (0x00000002) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_8 (0x00000001) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_MAX 8 + +#define NV_DPCD14_PHY_REPEATER_MODE (0x000F0003) /* R-XUR */ +#define NV_DPCD14_PHY_REPEATER_MODE_VAL_TRANSPARENT (0x00000055) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_MODE_VAL_NON_TRANSPARENT (0x000000AA) /* R-XUV */ + +#define NV_DPCD14_MAX_LANE_COUNT_PHY_REPEATER (0x000F0004) /* R-XUR */ +#define NV_DPCD14_MAX_LANE_COUNT_PHY_REPEATER_VAL 4:0 /* R-XUF */ + +#define NV_DPCD14_PHY_REPEATER_EXTENDED_WAKE_TIMEOUT (0x000F0005) /* RWXUR */ +#define NV_DPCD14_PHY_REPEATER_EXTENDED_WAKE_TIMEOUT_REQ 6:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EXTENDED_WAKE_TIMEOUT_GRANT 7:7 /* RWXUF */ + +#define NV_DPCD14_PHY_REPEATER_MAIN_LINK_CHANNEL_CODING (0x000F0006) /* RWXUR */ +#define NV_DPCD14_PHY_REPEATER_MAIN_LINK_CHANNEL_CODING_128B_132B_SUPPORTED 0:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_MAIN_LINK_CHANNEL_CODING_128B_132B_SUPPORTED_NO (0x00000000) /* RWXUF */ +#define NV_DPCD14_PHY_REPEATER_MAIN_LINK_CHANNEL_CODING_128B_132B_SUPPORTED_YES (0x00000001) /* RWXUF */ + +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES (0x000F0007) /* R-XUR */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_10G_SUPPORTED 0:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_10G_SUPPORTED_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_10G_SUPPORTED_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_20G_SUPPORTED 1:1 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_20G_SUPPORTED_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_20G_SUPPORTED_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_13_5G_SUPPORTED 2:2 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_13_5G_SUPPORTED_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_13_5G_SUPPORTED_YES (0x00000001) /* R-XUF */ + +#define NV_DPCD14_PHY_REPEATER_EQ_DONE (0x000F0008) /* R-XUR */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR(i) (i):(i) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_0 0:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_0_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_0_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_1 1:1 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_1_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_1_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_2 2:2 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_2_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_2_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_3 3:3 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_3_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_3_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_4 4:4 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_4_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_4_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_5 5:5 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_5_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_5_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_6 6:6 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_6_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_6_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_7 7:7 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_7_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_7_YES (0x00000001) /* R-XUF */ + + +#define NV_DPCD14_PHY_REPEATER_START(i) (0x000F0010+(i)*0x50) /* RW-1A */ +#define NV_DPCD14_PHY_REPEATER_START__SIZE 8 /* R---S */ +// Following defines are offsets +#define NV_DPCD14_TRAINING_PATTERN_SET_PHY_REPEATER (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_LANE0_SET_PHY_REPEATER (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_LANE1_SET_PHY_REPEATER (0x00000002) /* RWXUV */ +#define NV_DPCD14_TRAINING_LANE2_SET_PHY_REPEATER (0x00000003) /* RWXUV */ +#define NV_DPCD14_TRAINING_LANE3_SET_PHY_REPEATER (0x00000004) /* RWXUV */ + +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER (0x00000010) /* R-XUR */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL 6:0 /* R-XUF */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL_4MS (0x00000001) /* R-XUV */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL_8MS (0x00000002) /* R-XUV */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL_12MS (0x00000003) /* R-XUV */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL_16MS (0x00000004) /* R-XUV */ + +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER (0x00000011) /* R-XUR */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_VOLTAGE_SWING_3 0:0 /* R-XUF */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_VOLTAGE_SWING_3_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_VOLTAGE_SWING_3_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_PRE_EMPHASIS_3 1:1 /* R-XUF */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_PRE_EMPHASIS_3_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_PRE_EMPHASIS_3_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_LANE0_1_STATUS_PHY_REPEATER (0x00000020) /* R-XUR */ +#define NV_DPCD14_LANE2_3_STATUS_PHY_REPEATER (0x00000021) /* R-XUR */ +#define NV_DPCD14_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER (0x00000022) /* R-XUR */ +#define NV_DPCD14_ADJUST_REQUEST_LANE0_1_PHY_REPEATER (0x00000023) /* R-XUR */ +#define NV_DPCD14_ADJUST_REQUEST_LANE2_3_PHY_REPEATER (0x00000024) /* R-XUR */ + +#endif // #ifndef _DISPLAYPORT14_H_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd20.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd20.h new file mode 100644 index 0000000..901eca4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd20.h @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define NV_DPCD20_DSC_SUPPORT (0x00000060) /* R-XUR */ +#define NV_DPCD20_DSC_SUPPORT_PASS_THROUGH_SUPPORT 1:1 /* R-XUF */ +#define NV_DPCD20_DSC_SUPPORT_PASS_THROUGH_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DSC_SUPPORT_PASS_THROUGH_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_DSC_PASS_THROUGH (0x00000160) /* R-XUR */ +#define NV_DPCD20_DSC_PASS_THROUGH_ENABLE 1:1 /* R-XUF */ +#define NV_DPCD20_DSC_PASS_THROUGH_ENABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DSC_PASS_THROUGH_ENABLE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_GUID_2 (0x00000040) /* R-XUR */ + +// PANEL REPLAY RELATED DPCD +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY (0x000000B0) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED 0:0 +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED_NO (0x00000000) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED_YES (0x00000001) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE 1:1 +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE_NO (0x00000000) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE_YES (0x00000001) + +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION (0x000001B0) +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE 0:0 +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE_NO (0x00000000) +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE_YES (0x00000001) diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/hdmi_spec.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/hdmi_spec.h new file mode 100644 index 0000000..9371c1c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/hdmi_spec.h @@ -0,0 +1,86 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _HDMI_SPEC_H_ +#define _HDMI_SPEC_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: HDMI_SPEC.H * +* Defines Common HDMI flags * +* * +\***************************************************************************/ + +/* +* RM will be moving to separate packet types for DP and HDMI +* since the SDP packet type differ between HDMI and DP. Going forward +* clients are expected to use the respective packet type. Once all the +* clients move to the new data types, we can remove the redundant +* PACKET_TYPE definition. +*/ + + +typedef enum +{ + pktType_AudioClkRegeneration = 0x01, + pktType_GeneralControl = 0x03, + pktType_GamutMetadata = 0x0a, + pktType_SRInfoFrame = 0x7f, // Self refresh infoframe for eDP enter/exit self refresh, SRS 1698 + pktType_Cea861BInfoFrame = 0x80, + pktType_VendorSpecInfoFrame = 0x81, + pktType_AviInfoFrame = 0x82, + pktType_AudioInfoFrame = 0x84, + pktType_SrcProdDescInfoFrame = 0x83, + pktType_MpegSrcInfoFrame = 0x85, + pktType_DynamicRangeMasteringInfoFrame = 0x87 +} PACKET_TYPE; + +typedef enum +{ + hdmi_pktType_AudioClkRegeneration = 0x01, + hdmi_pktType_GeneralControl = 0x03, + hdmi_pktType_GamutMetadata = 0x0a, + hdmi_pktType_ExtendedMetadata = 0x7f, + hdmi_pktType_Cea861BInfoFrame = 0x80, + hdmi_pktType_VendorSpecInfoFrame = 0x81, + hdmi_pktType_AviInfoFrame = 0x82, + hdmi_pktType_AudioInfoFrame = 0x84, + hdmi_pktType_SrcProdDescInfoFrame = 0x83, + hdmi_pktType_MpegSrcInfoFrame = 0x85, + hdmi_pktType_DynamicRangeMasteringInfoFrame = 0x87 +} HDMI_PACKET_TYPE; + + +#define HDMI_PKT_HDR_SIZE 3 + +#define HDMI_PKT_AVI_NUM_DBYTES 14 +#define HDMI_PKT_AUDIO_NUM_DBYTES 11 +#define HDMI_PKT_GENCTRL_NUM_DBYTES 7 +#define HDMI_PKT_ACR_NUM_DBYTES 7 +#define HDMI_PKT_GAMUT_METADATA_NUM_DBYTES 28 +#define HDMI_PKT_VS_MAX_NUM_DBYTES 28 + +#define HDMI_GENCTRL_PACKET_MUTE_ENABLE 0x01 +#define HDMI_GENCTRL_PACKET_MUTE_DISABLE 0x10 + +#endif // #ifndef _HDMI_SPEC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBinSegment.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBinSegment.h new file mode 100644 index 0000000..1a87551 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBinSegment.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVBINSEGMENT_H +#define NVBINSEGMENT_H + +#define PUSH_SEGMENTS +#define POP_SEGMENTS +#define CODE_SEGMENT(__seg) +#define DATA_SEGMENT(__seg) +#define BSS_SEGMENT(__seg) +#define CONS_SEGMENT(__seg) +#define PAGE_SEGMENT +#define NONPAGE_SEGMENT + +#endif // NVBINSEGMENT_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBldVer.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBldVer.h new file mode 100644 index 0000000..c747ea4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBldVer.h @@ -0,0 +1,72 @@ +#ifndef _NVBLDVER_H_ +#define _NVBLDVER_H_ + +#ifndef _STRINGIZE +#define _STRINGIZE(t) #t +#endif +#ifndef STRINGIZE +#define STRINGIZE(t) _STRINGIZE(t) +#endif + +// These variables can be overriden using ENV vars, see nvCommon.nvmk. +// If no env vars are set, then the defaults seen here will be used. +// In DVS builds, the ENV vars are used to control these values. +// Note- the value of NV_BUILD_CL and NV_BUILD_TYPE_NON_BM is only used in +// non-buildmeister builds, see override section below. +// DVS_SW_CHANGELIST has been added to ENV vars in bug 1486673 +#ifndef DVS_SW_CHANGELIST + #define DVS_SW_CHANGELIST 0 +#endif +#ifndef NV_BUILD_CL + #define NV_BUILD_CL (DVS_SW_CHANGELIST) +#endif +#if NV_BUILD_CL == 0 + #define NV_BUILD_CL (DVS_SW_CHANGELIST) +#endif +#ifndef NV_BUILD_TYPE_NON_BM + #define NV_BUILD_TYPE_NON_BM Private +#endif +#ifndef NV_BUILD_AUTHOR + #define NV_BUILD_AUTHOR unknown +#endif +// End ENV var section + + +// The values of the following strings are set via a buildmeister python script, +// and then checked back in. You cannot make changes to these sections without +// corresponding changes to the buildmeister script +#ifndef NV_BUILD_BRANCH + #define NV_BUILD_BRANCH bugfix_main +#endif +#ifndef NV_PUBLIC_BRANCH + #define NV_PUBLIC_BRANCH bugfix_main +#endif + +#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) +#define NV_BUILD_BRANCH_VERSION "dev/gpu_drv/bugfix_main-15642" +#define NV_BUILD_CHANGELIST_NUM (28397778) +#define NV_BUILD_TYPE "Nightly" +#define NV_BUILD_NAME "dev/gpu_drv/bugfix_main-15642" +#define NV_LAST_OFFICIAL_CHANGELIST_NUM (13979614) + +#else /* Windows builds */ +#define NV_BUILD_BRANCH_VERSION "bugfix_main-16408" +#define NV_BUILD_CHANGELIST_NUM (28392833) +#define NV_BUILD_TYPE "Nightly" +#define NV_BUILD_NAME "bugfix_main-200514" +#define NV_LAST_OFFICIAL_CHANGELIST_NUM (28376693) +#endif +// End buildmeister python edited section + +// A few of the values are defined differently for non-buildmeister builds, +// this section redefines those defines +#ifndef NV_BUILDMEISTER_BLD + #undef NV_BUILD_TYPE + #define NV_BUILD_TYPE STRINGIZE(NV_BUILD_TYPE_NON_BM) + #undef NV_BUILD_CHANGELIST_NUM + #define NV_BUILD_CHANGELIST_NUM NV_BUILD_CL +#endif + +#define NV_DISPLAY_DRIVER_TITLE NV_BUILD_TYPE " " STRINGIZE(NV_BUILD_BRANCH) " " NV_BUILD_NAME " " STRINGIZE(NV_BUILD_AUTHOR) + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvCpuUuid.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvCpuUuid.h new file mode 100644 index 0000000..0ab546b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvCpuUuid.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_CPU_UUID_H_ +#define _NV_CPU_UUID_H_ + +#define NV_UUID_LEN 16 + +typedef struct nv_uuid +{ + NvU8 uuid[NV_UUID_LEN]; + +} NvUuid; + +#define NV_UUID_HI(pUuid) (*((NvU64*)((pUuid)->uuid + (NV_UUID_LEN >> 1)))) +#define NV_UUID_LO(pUuid) (*((NvU64*)((pUuid)->uuid + 0))) + +typedef NvUuid NvSystemUuid; + +typedef NvUuid NvProcessorUuid; + +extern const NvProcessorUuid NV_PROCESSOR_UUID_CPU_DEFAULT; + +#endif // _NV_CPU_UUID_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvHdmiFrlCommon.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvHdmiFrlCommon.h new file mode 100644 index 0000000..8c4d416 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvHdmiFrlCommon.h @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** @file nvHdmiFrlCommon.h + * @brief This file defines data needed for and returned by HDMI 2.1 spec FRL calculations + * It meant to be a spec layer within HDMI lib, without carrying any + * driver/hw related information + */ + +#ifndef _NVHDMIFRLCOMMON_H_ +#define _NVHDMIFRLCOMMON_H_ + +#include "nvmisc.h" + +//****************************************************************************** +// Constants/Structures +//****************************************************************************** +#define MAX_RECONSTRUCTED_HACTIVE_PIXELS 2720 + +// HDMI_BPC: Bits per component enums. +typedef enum tagHDMI_BPC +{ + HDMI_BPC8 = 8, + HDMI_BPC10 = 10, + HDMI_BPC12 = 12, + HDMI_BPC16 = 16 +} HDMI_BPC; + +// HDMI_PIXEL_PACKING: Pixel packing type enums +typedef enum tagHDMI_PIXEL_PACKING +{ + HDMI_PIXEL_PACKING_RGB = 0, + HDMI_PIXEL_PACKING_YCbCr444, + HDMI_PIXEL_PACKING_YCbCr422, + HDMI_PIXEL_PACKING_YCbCr420 +} HDMI_PIXEL_PACKING; + +// HDMI_FRL_DATA_RATE: FRL mode enums +typedef enum tagHDMI_FRL_DATA_RATE +{ + HDMI_FRL_DATA_RATE_NONE, + HDMI_FRL_DATA_RATE_3LANES_3GBPS, + HDMI_FRL_DATA_RATE_3LANES_6GBPS, + HDMI_FRL_DATA_RATE_4LANES_6GBPS, + HDMI_FRL_DATA_RATE_4LANES_8GBPS, + HDMI_FRL_DATA_RATE_4LANES_10GBPS, + HDMI_FRL_DATA_RATE_4LANES_12GBPS, + HDMI_FRL_DATA_RATE_UNSPECIFIED +} HDMI_FRL_DATA_RATE; + +typedef enum tagAUDIO_PKTTYPE +{ + AUDIO_PKTTYPE_LPCM_SAMPLE = 0, + AUDIO_PKTTYPE_ONE_BIT_LPCM_SAMPLE, + AUDIO_PKTTYPE_DST_AUDIO, + AUDIO_PKTTYPE_HBR_AUDIO, + AUDIO_PKTTYPE_MULTI_STREAM_AUDIO, + AUDIO_PKTTYPE_ONE_BIT_MULTI_STREAM_AUDIO, + AUDIO_PKTTYPE_3D_AUDIO, + AUDIO_PKTTYPE_ONE_BIT_3D_AUDIO, + NO_AUDIO +} AUDIO_PKTTYPE; + +typedef struct tagFRL_CAPACITY_COMPUTATION_PARAMS +{ + NvU32 numLanes; + NvU32 frlBitRateGbps; + NvU32 pclk10KHz; + NvU32 hTotal; + NvU32 hActive; + NvU32 bpc; + HDMI_PIXEL_PACKING pixelPacking; + AUDIO_PKTTYPE audioType; + NvU32 numAudioChannels; + NvU32 audioFreqKHz; + + struct + { + NvU32 bppTargetx16; + NvU32 hSlices; + NvU32 sliceWidth; + NvU32 dscTotalChunkKBytes; + } compressionInfo; + +} FRL_CAPACITY_COMPUTATION_PARAMS; + +typedef struct tagFRL_COMPUTATION_RESULT +{ + HDMI_FRL_DATA_RATE frlRate; + NvU32 bppTargetx16; + + NvBool engageCompression; + NvBool isAudioSupported; + NvBool dataFlowDisparityReqMet; + NvBool dataFlowMeteringReqMet; + NvBool isVideoTransportSupported; + NvU32 triBytesBorrowed; // uncompressed mode: num of active Tri-bytes to be transmitted at HBlank + NvU32 hcActiveBytes; // compressed mode: num of FRL character bytes in active region + NvU32 hcActiveTriBytes; // compressed mode: num of FRL tri-bytes in active region + NvU32 hcBlankTriBytes; // compressed mode: num of FRL tri-bytes in blanking region + NvU32 tBlankToTTotalX1k; // compressed mode: ratio of time spent on blanking to the total line time +} FRL_COMPUTATION_RESULT; + +typedef struct tagFRL_PRE_CALC_CONFIG +{ + NvU32 vic; + HDMI_PIXEL_PACKING packing; + HDMI_BPC bpc; + HDMI_FRL_DATA_RATE frlRate; + NvU32 bppX16; + NvBool bCompressedMode; +} FRL_PRE_CALC_CONFIG; + +#endif // _NVHDMIFRLCOMMON_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvPNPVendorIds.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvPNPVendorIds.h new file mode 100644 index 0000000..16e72a2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvPNPVendorIds.h @@ -0,0 +1,557 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. +*/ +/* + * This header file contains the 3-character Plug and Play Vendor IDs and + * their translation into Vendor names. + * + * If the includer defines NV_PNP_VENDOR_IDS_USE_TCHAR, then + * PNPVendorID::vendorName will have type const TCHAR*; otherwise, it will have + * type const char*. + * + * References: + * http://www.uefi.org/pnp_id_list + * + */ + +#ifndef __NV_PNP_VENDOR_IDS_H__ +#define __NV_PNP_VENDOR_IDS_H__ + +#if defined(NV_PNP_VENDOR_IDS_USE_TCHAR) + #define _VENDOR_NAME_TYPE const TCHAR + #define _VENDOR_NAME_ENTRY(x) _T(x) +#else + #define _VENDOR_NAME_TYPE const char + #define _VENDOR_NAME_ENTRY(x) (x) +#endif + +typedef struct tagPNPVendorID +{ + char vendorId[4]; // PNP Vendor ID (example: "SNY") + _VENDOR_NAME_TYPE* vendorName; // Vendor name for display (example: "Sony") +} PNPVendorId; + + +/* + * The PNPVendorIds[] table maps between the 3-character Plug and + * Play Vendor Identifiers and user-friendly vendor names + */ +static const PNPVendorId PNPVendorIds[] = +{ + { "___", _VENDOR_NAME_ENTRY("Targa") }, + { "@@@", _VENDOR_NAME_ENTRY("Sangyo") }, + + { "AAC", _VENDOR_NAME_ENTRY("Acer") }, + { "ABC", _VENDOR_NAME_ENTRY("AboCom System Inc") }, + { "ABP", _VENDOR_NAME_ENTRY("Advanced System Products") }, + { "ACE", _VENDOR_NAME_ENTRY("ACME") }, + { "ACC", _VENDOR_NAME_ENTRY("ACCTON") }, + { "ACI", _VENDOR_NAME_ENTRY("Ancor Communications Inc") }, + { "ACK", _VENDOR_NAME_ENTRY("ACKSYS") }, + { "ACN", _VENDOR_NAME_ENTRY("ACON") }, + { "ACR", _VENDOR_NAME_ENTRY("Acer") }, + { "ACS", _VENDOR_NAME_ENTRY("Altos/ACS") }, + { "ACT", _VENDOR_NAME_ENTRY("Actebis/Targa") }, + { "ADI", _VENDOR_NAME_ENTRY("ADI") }, + { "ADP", _VENDOR_NAME_ENTRY("Adaptec") }, + { "ADT", _VENDOR_NAME_ENTRY("ADTEK") }, + { "ADV", _VENDOR_NAME_ENTRY("AMD") }, + { "ADX", _VENDOR_NAME_ENTRY("ADAX") }, + { "AEI", _VENDOR_NAME_ENTRY("AIR") }, + { "AEM", _VENDOR_NAME_ENTRY("AEM") }, + { "AEO", _VENDOR_NAME_ENTRY("UHC") }, + { "AGI", _VENDOR_NAME_ENTRY("Artish Graphics") }, + { "AKB", _VENDOR_NAME_ENTRY("Akebia") }, + { "AIC", _VENDOR_NAME_ENTRY("Arnos Instruments") }, + { "AIR", _VENDOR_NAME_ENTRY("Advanced Integrated Research") }, + { "AKB", _VENDOR_NAME_ENTRY("Akebia") }, + { "ALA", _VENDOR_NAME_ENTRY("Alacron") }, + { "ALR", _VENDOR_NAME_ENTRY("Advanced Logic Research") }, + { "AMC", _VENDOR_NAME_ENTRY("Attachmate") }, + { "AMD", _VENDOR_NAME_ENTRY("Amdek") }, + { "AMI", _VENDOR_NAME_ENTRY("American Megatrends") }, + { "AMP", _VENDOR_NAME_ENTRY("Amptron") }, + { "AMT", _VENDOR_NAME_ENTRY("Amtrans") }, + { "ANC", _VENDOR_NAME_ENTRY("Ancot") }, + { "ANI", _VENDOR_NAME_ENTRY("Anigma") }, + { "AOC", _VENDOR_NAME_ENTRY("AOC") }, + { "APD", _VENDOR_NAME_ENTRY("Applidata") }, + { "API", _VENDOR_NAME_ENTRY("AcerView") }, + { "APP", _VENDOR_NAME_ENTRY("Apple") }, + { "APS", _VENDOR_NAME_ENTRY("Autologic") }, + { "ARC", _VENDOR_NAME_ENTRY("Alta Research") }, + { "ART", _VENDOR_NAME_ENTRY("ArtMedia") }, + { "ASE", _VENDOR_NAME_ENTRY("ASEM") }, + { "ASI", _VENDOR_NAME_ENTRY("Ahead Systems") }, + { "AST", _VENDOR_NAME_ENTRY("AST Research") }, + { "ASU", _VENDOR_NAME_ENTRY("ASUS") }, + { "ATI", _VENDOR_NAME_ENTRY("Allied Telesis") }, + { "ATO", _VENDOR_NAME_ENTRY("ASTRO DESIGN, INC.") }, + { "ATT", _VENDOR_NAME_ENTRY("AT&T") }, + { "ATX", _VENDOR_NAME_ENTRY("Athenix") }, + { "AUO", _VENDOR_NAME_ENTRY("AU Optronics Corporation") }, + { "AUS", _VENDOR_NAME_ENTRY("Asustek Computer Inc") }, + { "AVI", _VENDOR_NAME_ENTRY("AIR") }, + { "AVO", _VENDOR_NAME_ENTRY("Avocent Corporation") }, + { "AZU", _VENDOR_NAME_ENTRY("Azura") }, + + { "BAN", _VENDOR_NAME_ENTRY("Banyan") }, + { "BCC", _VENDOR_NAME_ENTRY("Beaver Computer Corporation") }, + { "BCD", _VENDOR_NAME_ENTRY("Dr. Seufert GmbH") }, + { "BEO", _VENDOR_NAME_ENTRY("Bang & Olufsen") }, + { "BGT", _VENDOR_NAME_ENTRY("Budzetron") }, + { "BMM", _VENDOR_NAME_ENTRY("MAG Technology") }, + { "BNQ", _VENDOR_NAME_ENTRY("BenQ") }, + { "BOE", _VENDOR_NAME_ENTRY("BOE Technology Group Co., Ltd") }, + { "BRG", _VENDOR_NAME_ENTRY("Bridge") }, + { "BTC", _VENDOR_NAME_ENTRY("Bit 3") }, + { "BTE", _VENDOR_NAME_ENTRY("Brilliant Technology") }, + { "BUS", _VENDOR_NAME_ENTRY("BusTek") }, + + { "CAL", _VENDOR_NAME_ENTRY("Acon") }, + { "CCI", _VENDOR_NAME_ENTRY("Cache") }, + { "CCP", _VENDOR_NAME_ENTRY("Epson") }, + { "CDP", _VENDOR_NAME_ENTRY("CalComp") }, + { "CFG", _VENDOR_NAME_ENTRY("Atlantis") }, + { "CHA", _VENDOR_NAME_ENTRY("Chase Research") }, + { "CIP", _VENDOR_NAME_ENTRY("Ciprico") }, + { "CLO", _VENDOR_NAME_ENTRY("Clone Computers/Analogy") }, + { "CLT", _VENDOR_NAME_ENTRY("automated computer control systems")}, + { "CMD", _VENDOR_NAME_ENTRY("CMD Technology") }, + { "CMO", _VENDOR_NAME_ENTRY("Chi Mei Optoelectronics corp.") }, + { "CNI", _VENDOR_NAME_ENTRY("Connect International") }, + { "CNT", _VENDOR_NAME_ENTRY("CNet Technology") }, + { "COM", _VENDOR_NAME_ENTRY("Comtrol") }, + { "CPC", _VENDOR_NAME_ENTRY("Ciprico") }, + { "CPD", _VENDOR_NAME_ENTRY("CompuAdd") }, + { "CPG", _VENDOR_NAME_ENTRY("DFI") }, + { "CPI", _VENDOR_NAME_ENTRY("Computer Peripherals") }, + { "CPL", _VENDOR_NAME_ENTRY("Compal") }, + { "CPQ", _VENDOR_NAME_ENTRY("Compaq") }, + { "CPT", _VENDOR_NAME_ENTRY("cPATH") }, + { "CPX", _VENDOR_NAME_ENTRY("Powermatic Data Systems") }, + { "CRD", _VENDOR_NAME_ENTRY("Cardinal Technologies") }, + { "CRN", _VENDOR_NAME_ENTRY("Cornerstone") }, + { "CRS", _VENDOR_NAME_ENTRY("Cisco") }, + { "CSE", _VENDOR_NAME_ENTRY("Compu Shack") }, + { "CSI", _VENDOR_NAME_ENTRY("Cabletron") }, + { "CSS", _VENDOR_NAME_ENTRY("CSS Laboratories") }, + { "CTN", _VENDOR_NAME_ENTRY("Computone") }, + { "CTX", _VENDOR_NAME_ENTRY("Chuntex/CTX") }, + { "CUB", _VENDOR_NAME_ENTRY("Cubix") }, + { "CUI", _VENDOR_NAME_ENTRY("CUI") }, + { "CYB", _VENDOR_NAME_ENTRY("CyberVision") }, + + { "DBI", _VENDOR_NAME_ENTRY("DigiBoard") }, + { "DBL", _VENDOR_NAME_ENTRY("Doble Engineering") }, + { "DCC", _VENDOR_NAME_ENTRY("Dale Computer") }, + { "DCE", _VENDOR_NAME_ENTRY("Mylex") }, + { "DCM", _VENDOR_NAME_ENTRY("DCM Data Products") }, + { "DEC", _VENDOR_NAME_ENTRY("DEC") }, + { "DEI", _VENDOR_NAME_ENTRY("Deico Electronics") }, + { "DEL", _VENDOR_NAME_ENTRY("Dell") }, + { "DFI", _VENDOR_NAME_ENTRY("DFI") }, + { "DGC", _VENDOR_NAME_ENTRY("Data General") }, + { "DGS", _VENDOR_NAME_ENTRY("Diagsoft") }, + { "DIA", _VENDOR_NAME_ENTRY("Diadem") }, + { "DIO", _VENDOR_NAME_ENTRY("DIO") }, + { "DIS", _VENDOR_NAME_ENTRY("Diseda") }, + { "DIT", _VENDOR_NAME_ENTRY("Dragon Information Technology") }, + { "DLK", _VENDOR_NAME_ENTRY("D-Link") }, + { "DLO", _VENDOR_NAME_ENTRY("Dlodlo Technologies Co., Ltd") }, + { "DMB", _VENDOR_NAME_ENTRY("Digicom Systems") }, + { "DMS", _VENDOR_NAME_ENTRY("DOME imaging systems") }, + { "DNV", _VENDOR_NAME_ENTRY("NexView") }, + { "DOM", _VENDOR_NAME_ENTRY("Dome Imaging Systems") }, + { "DON", _VENDOR_NAME_ENTRY("DENON, Ltd.") }, + { "DPC", _VENDOR_NAME_ENTRY("Delta") }, + { "DPI", _VENDOR_NAME_ENTRY("DocuPoint") }, + { "DPL", _VENDOR_NAME_ENTRY("Digital Projection Limited") }, + { "DPN", _VENDOR_NAME_ENTRY("Shanghai Lexiang Technology Limited") }, + { "DPT", _VENDOR_NAME_ENTRY("DPT") }, + { "DRT", _VENDOR_NAME_ENTRY("Digital Research") }, + { "DSJ", _VENDOR_NAME_ENTRY("VR Technology Holdings Limited") }, + { "DSM", _VENDOR_NAME_ENTRY("DSM Digial Services") }, + { "DTC", _VENDOR_NAME_ENTRY("Data Technology") }, + { "DTI", _VENDOR_NAME_ENTRY("Diversified Technology") }, + { "DTK", _VENDOR_NAME_ENTRY("DTK Computer") }, + { "DTX", _VENDOR_NAME_ENTRY("Data Translation") }, + { "DVC", _VENDOR_NAME_ENTRY("DecaView") }, + { "DWE", _VENDOR_NAME_ENTRY("Daewoo") }, + + { "ECS", _VENDOR_NAME_ENTRY("EliteGroup/ECS") }, + { "ENC", _VENDOR_NAME_ENTRY("Eizo") }, + { "EGO", _VENDOR_NAME_ENTRY("Ergo Electronics") }, + { "EKC", _VENDOR_NAME_ENTRY("Kodak") }, + { "EHJ", _VENDOR_NAME_ENTRY("Epson") }, + { "EIZ", _VENDOR_NAME_ENTRY("Eizo") }, + { "ELI", _VENDOR_NAME_ENTRY("Edsun") }, + { "ELS", _VENDOR_NAME_ENTRY("ELSA") }, + { "ELX", _VENDOR_NAME_ENTRY("Elonex") }, + { "EMC", _VENDOR_NAME_ENTRY("ProView/EMC") }, + { "ENC", _VENDOR_NAME_ENTRY("Eizo") }, + { "EPI", _VENDOR_NAME_ENTRY("Envision") }, + { "EQX", _VENDOR_NAME_ENTRY("Equinox") }, + { "ERG", _VENDOR_NAME_ENTRY("Ergo") }, + { "ERP", _VENDOR_NAME_ENTRY("EURAPLAN") }, + { "ESI", _VENDOR_NAME_ENTRY("Extended Systems") }, + { "ETT", _VENDOR_NAME_ENTRY("E-Tech Research") }, + { "EVX", _VENDOR_NAME_ENTRY("Everex") }, + { "EXP", _VENDOR_NAME_ENTRY("Data Export") }, + + { "FCB", _VENDOR_NAME_ENTRY("Furukawa Electric") }, + { "FCM", _VENDOR_NAME_ENTRY("Funai") }, + { "FCT", _VENDOR_NAME_ENTRY("Free Computer Technology") }, + { "FDC", _VENDOR_NAME_ENTRY("Future Domain") }, + { "FDX", _VENDOR_NAME_ENTRY("Findex, Inc. ") }, + { "FGL", _VENDOR_NAME_ENTRY("Fujitsu") }, + { "FIC", _VENDOR_NAME_ENTRY("First International") }, + { "FOR", _VENDOR_NAME_ENTRY("Formac") }, + { "FOV", _VENDOR_NAME_ENTRY("FOVE INC") }, + { "FRC", _VENDOR_NAME_ENTRY("FORCE Computers") }, + { "FRI", _VENDOR_NAME_ENTRY("Fibernet Research") }, + { "FTN", _VENDOR_NAME_ENTRY("Fountain Technologies") }, + { "FUJ", _VENDOR_NAME_ENTRY("Fujitsu") }, + + { "GAG", _VENDOR_NAME_ENTRY("Gage Applied Sciences") }, + { "GCI", _VENDOR_NAME_ENTRY("Gateway Communications") }, + { "GEN", _VENDOR_NAME_ENTRY("Genesys") }, + { "GMX", _VENDOR_NAME_ENTRY("GMX") }, + { "GRA", _VENDOR_NAME_ENTRY("Graphica") }, + { "GSM", _VENDOR_NAME_ENTRY("LG Electronics") }, + { "GVC", _VENDOR_NAME_ENTRY("GVC") }, + { "GWY", _VENDOR_NAME_ENTRY("Gateway") }, + + { "HCL", _VENDOR_NAME_ENTRY("HCL") }, + { "HCP", _VENDOR_NAME_ENTRY("Hitachi") }, + { "HCW", _VENDOR_NAME_ENTRY("Hauppauge") }, + { "HDL", _VENDOR_NAME_ENTRY("Headland") }, + { "HEC", _VENDOR_NAME_ENTRY("Hisense") }, + { "HEI", _VENDOR_NAME_ENTRY("Hyundai") }, + { "HIT", _VENDOR_NAME_ENTRY("Hitachi/HINT") }, + { "HMX", _VENDOR_NAME_ENTRY("HUMAX Co., Ltd.") }, + { "HSD", _VENDOR_NAME_ENTRY("HannStar Display Corp") }, + { "HSL", _VENDOR_NAME_ENTRY("Hansol") }, + { "HTC", _VENDOR_NAME_ENTRY("Hitachi") }, + { "HVR", _VENDOR_NAME_ENTRY("HTC Corporation") }, + { "HWD", _VENDOR_NAME_ENTRY("HighWater Designs") }, + { "HWP", _VENDOR_NAME_ENTRY("HP") }, + { "HYL", _VENDOR_NAME_ENTRY("Hypereal") }, + { "HYP", _VENDOR_NAME_ENTRY("Hyphen Limited") }, + { "HWV", _VENDOR_NAME_ENTRY("Huawei Technologies Co., Ltd") }, + + { "IBC", _VENDOR_NAME_ENTRY("IBS") }, + { "IBM", _VENDOR_NAME_ENTRY("IBM") }, + { "ICC", _VENDOR_NAME_ENTRY("BICC Data Networks") }, + { "ICL", _VENDOR_NAME_ENTRY("Fujitsu/ICL") }, + { "ICN", _VENDOR_NAME_ENTRY("Sanyo/Icon") }, + { "ICU", _VENDOR_NAME_ENTRY("Intel") }, + { "IDS", _VENDOR_NAME_ENTRY("Intellistor") }, + { "IFT", _VENDOR_NAME_ENTRY("Informtech") }, + { "IGM", _VENDOR_NAME_ENTRY("IGM Communications") }, + { "III", _VENDOR_NAME_ENTRY("Intelligent Instrumentation") }, + { "IIN", _VENDOR_NAME_ENTRY("Intel") }, + { "IMA", _VENDOR_NAME_ENTRY("Imagraph") }, + { "IMC", _VENDOR_NAME_ENTRY("IMC Networks") }, + { "IMP", _VENDOR_NAME_ENTRY("Impression") }, + { "INF", _VENDOR_NAME_ENTRY("Inframetrics") }, + { "INL", _VENDOR_NAME_ENTRY("InnoLux Display Corporation") }, + { "INP", _VENDOR_NAME_ENTRY("Interphase") }, + { "INS", _VENDOR_NAME_ENTRY("Ines") }, + { "INT", _VENDOR_NAME_ENTRY("Intel") }, + { "IOD", _VENDOR_NAME_ENTRY("IODATA") }, + { "ISA", _VENDOR_NAME_ENTRY("ISA") }, + { "ISI", _VENDOR_NAME_ENTRY("Interface Solutions") }, + { "ISL", _VENDOR_NAME_ENTRY("Isolation Systems") }, + { "ITA", _VENDOR_NAME_ENTRY("Itausa") }, + { "ITC", _VENDOR_NAME_ENTRY("ITK") }, + { "ITN", _VENDOR_NAME_ENTRY("NTI Group/ASUS") }, + { "ITK", _VENDOR_NAME_ENTRY("NTI Group") }, + { "IVK", _VENDOR_NAME_ENTRY("Iiyama") }, + { "IVM", _VENDOR_NAME_ENTRY("Idek Iiyama") }, + { "IVR", _VENDOR_NAME_ENTRY("Inlife-Handnet Co., Ltd.") }, + { "IWR", _VENDOR_NAME_ENTRY("Icuiti Corporation") }, + + { "JDI", _VENDOR_NAME_ENTRY("Japan Display Inc") }, + { "JEN", _VENDOR_NAME_ENTRY("Jean") }, + { "JKC", _VENDOR_NAME_ENTRY("JVC Kenwood Corporation") }, + { "JVC", _VENDOR_NAME_ENTRY("JVC") }, + + { "KDS", _VENDOR_NAME_ENTRY("Korea Data Systems") }, + { "KDK", _VENDOR_NAME_ENTRY("Kodiak") }, + { "KES", _VENDOR_NAME_ENTRY("Kesa Crop") }, + { "KFC", _VENDOR_NAME_ENTRY("KFC Computek") }, + { "KPC", _VENDOR_NAME_ENTRY("King Phoenix") }, + { "KSC", _VENDOR_NAME_ENTRY("Kinetic Systems") }, + { "KTC", _VENDOR_NAME_ENTRY("Kingston Technology") }, + { "KTG", _VENDOR_NAME_ENTRY("KayserThrede") }, + { "KTR", _VENDOR_NAME_ENTRY("IMRI") }, + { "KYC", _VENDOR_NAME_ENTRY("Kyocera") }, + + { "LAG", _VENDOR_NAME_ENTRY("Laguna Systems") }, + { "LCD", _VENDOR_NAME_ENTRY("Toshiba Matsushita Display Technology Co., Ltd")}, + { "LCS", _VENDOR_NAME_ENTRY("Longshine Electronics") }, + { "LEF", _VENDOR_NAME_ENTRY("Leaf Systems") }, + { "LEN", _VENDOR_NAME_ENTRY("Lenovo Group Limited") }, + { "LGE", _VENDOR_NAME_ENTRY("LG Electronics") }, + { "LKM", _VENDOR_NAME_ENTRY("Likom/LKM") }, + { "LNK", _VENDOR_NAME_ENTRY("Link Technologies") }, + { "LTI", _VENDOR_NAME_ENTRY("Longshine") }, + { "LTN", _VENDOR_NAME_ENTRY("Lite-On") }, + + { "MAG", _VENDOR_NAME_ENTRY("MAG Technology") }, + { "MAX", _VENDOR_NAME_ENTRY("Maxdata/Belinea") }, + { "MAY", _VENDOR_NAME_ENTRY("Maynard Electronics") }, + { "MBC", _VENDOR_NAME_ENTRY("MBC") }, + { "MCC", _VENDOR_NAME_ENTRY("MCCI") }, + { "MCD", _VENDOR_NAME_ENTRY("McDATA") }, + { "MCI", _VENDOR_NAME_ENTRY("Micronics") }, + { "MCR", _VENDOR_NAME_ENTRY("Marina Communications") }, + { "MCS", _VENDOR_NAME_ENTRY("Micro Computer Systems") }, + { "MCT", _VENDOR_NAME_ENTRY("Microtec") }, + { "MDD", _VENDOR_NAME_ENTRY("Modis") }, + { "MDG", _VENDOR_NAME_ENTRY("Madge Networks") }, + { "MDS", _VENDOR_NAME_ENTRY("Micro Display Systems") }, + { "MDT", _VENDOR_NAME_ENTRY("Magus Data") }, + { "MED", _VENDOR_NAME_ENTRY("Medion") }, + { "MEI", _VENDOR_NAME_ENTRY("Panasonic") }, + { "MEL", _VENDOR_NAME_ENTRY("Mitsubishi") }, + { "MET", _VENDOR_NAME_ENTRY("Metheus") }, + { "MFG", _VENDOR_NAME_ENTRY("Microfield Graphics") }, + { "MGC", _VENDOR_NAME_ENTRY("CompuAdd") }, + { "MGT", _VENDOR_NAME_ENTRY("Megatech") }, + { "MIC", _VENDOR_NAME_ENTRY("Micronics") }, + { "MIR", _VENDOR_NAME_ENTRY("Miro") }, + { "MJI", _VENDOR_NAME_ENTRY("MARANTZ JAPAN, INC.") }, + { "MLX", _VENDOR_NAME_ENTRY("Mylex") }, + { "MMX", _VENDOR_NAME_ENTRY("MAG Technology") }, + { "MOR", _VENDOR_NAME_ENTRY("Morse Technology") }, + { "MSI", _VENDOR_NAME_ENTRY("Microstep") }, + { "MSV", _VENDOR_NAME_ENTRY("Mosgi") }, + { "MTC", _VENDOR_NAME_ENTRY("Mitac") }, + { "MTI", _VENDOR_NAME_ENTRY("Morse Technology") }, + { "MTQ", _VENDOR_NAME_ENTRY("Mountain Computer") }, + { "MTS", _VENDOR_NAME_ENTRY("Multi-Tech Systems") }, + { "MTX", _VENDOR_NAME_ENTRY("Matrox") }, + { "MVD", _VENDOR_NAME_ENTRY("Microvitec PLC") }, + { "MVN", _VENDOR_NAME_ENTRY("META COMPANY") }, + { "MWY", _VENDOR_NAME_ENTRY("Microway") }, + { "MYA", _VENDOR_NAME_ENTRY("Monydata") }, + { "MYL", _VENDOR_NAME_ENTRY("Mylex") }, + { "MYX", _VENDOR_NAME_ENTRY("Micronyx") }, + { "MZI", _VENDOR_NAME_ENTRY("Mozo") }, + + { "NAN", _VENDOR_NAME_ENTRY("Nanao") }, + { "NCA", _VENDOR_NAME_ENTRY("Siemens Nixdorf") }, + { "NCD", _VENDOR_NAME_ENTRY("NCD") }, + { "NCS", _VENDOR_NAME_ENTRY("Northgate") }, + { "NDC", _VENDOR_NAME_ENTRY("National DataComm") }, + { "NDS", _VENDOR_NAME_ENTRY("Nokia") }, + { "NEC", _VENDOR_NAME_ENTRY("NEC") }, + { "NIC", _VENDOR_NAME_ENTRY("National Instruments") }, + { "NIT", _VENDOR_NAME_ENTRY("Network Info Technology") }, + { "NOK", _VENDOR_NAME_ENTRY("Nokia") }, + { "NPI", _VENDOR_NAME_ENTRY("Network Peripherals") }, + { "NSC", _VENDOR_NAME_ENTRY("National Semiconductor") }, + { "NSS", _VENDOR_NAME_ENTRY("Newport Systems") }, + { "NTI", _VENDOR_NAME_ENTRY("New Tech") }, + { "NVD", _VENDOR_NAME_ENTRY("NVIDIA") }, + { "NVL", _VENDOR_NAME_ENTRY("Novell") }, + { "NXG", _VENDOR_NAME_ENTRY("Nexgen") }, + + { "OAS", _VENDOR_NAME_ENTRY("OAsys") }, + { "OCN", _VENDOR_NAME_ENTRY("Olfan") }, + { "OEC", _VENDOR_NAME_ENTRY("Daytek") }, + { "OLC", _VENDOR_NAME_ENTRY("Olicom") }, + { "OLI", _VENDOR_NAME_ENTRY("Olivetti") }, + { "OKI", _VENDOR_NAME_ENTRY("OKI Electric Industrial Company Ltd") }, + { "ONK", _VENDOR_NAME_ENTRY("ONKYO Corporation") }, + { "OPT", _VENDOR_NAME_ENTRY("OPTi") }, + { "OQI", _VENDOR_NAME_ENTRY("Optiquest") }, + { "OTI", _VENDOR_NAME_ENTRY("Orchid Technology") }, + { "OVR", _VENDOR_NAME_ENTRY("Oculus VR Inc.") }, + { "OZO", _VENDOR_NAME_ENTRY("Zoom Telephonics") }, + + { "PAR", _VENDOR_NAME_ENTRY("Parallan Comp Inc") }, + { "PBE", _VENDOR_NAME_ENTRY("Packard Bell") }, + { "PBI", _VENDOR_NAME_ENTRY("Pitney Bowes") }, + { "PBN", _VENDOR_NAME_ENTRY("Packard Bell") }, + { "PCI", _VENDOR_NAME_ENTRY("Pioneer Computer") }, + { "PCP", _VENDOR_NAME_ENTRY("Procomp") }, + { "PDR", _VENDOR_NAME_ENTRY("Pure Data") }, + { "PEA", _VENDOR_NAME_ENTRY("Peacock") }, + { "PGS", _VENDOR_NAME_ENTRY("Princeton Graphics") }, + { "PHI", _VENDOR_NAME_ENTRY("Phillips") }, + { "PHL", _VENDOR_NAME_ENTRY("Philips") }, + { "PIO", _VENDOR_NAME_ENTRY("Pioneer Electronic Corporation") }, + { "PI0", _VENDOR_NAME_ENTRY("Pioneer") }, + { "PIR", _VENDOR_NAME_ENTRY("Pico Technology Inc") }, + { "PJD", _VENDOR_NAME_ENTRY("Projectiondesign AS") }, + { "PLB", _VENDOR_NAME_ENTRY("PLB") }, + { "PLX", _VENDOR_NAME_ENTRY("Ocean Office Automation") }, + { "PMC", _VENDOR_NAME_ENTRY("PMC Consumer Electronics") }, + { "PMV", _VENDOR_NAME_ENTRY("MAG Technology") }, + { "PNR", _VENDOR_NAME_ENTRY("Planar Systems, Inc.") }, + { "PRO", _VENDOR_NAME_ENTRY("Proteon") }, + { "PSI", _VENDOR_NAME_ENTRY("PSI Perceptive Solutions") }, + { "PTS", _VENDOR_NAME_ENTRY("ProView/EMC/PTS") }, + { "PVR", _VENDOR_NAME_ENTRY("Pimax Tech Co., Ltd") }, + + { "QDI", _VENDOR_NAME_ENTRY("Quantum Data Incorporated") }, + { "QDM", _VENDOR_NAME_ENTRY("Quadram") }, + { "QTD", _VENDOR_NAME_ENTRY("Quantum 3D Inc") }, + { "QTM", _VENDOR_NAME_ENTRY("Quantum") }, + + { "RAC", _VENDOR_NAME_ENTRY("Racore Computer Products") }, + { "RCE", _VENDOR_NAME_ENTRY("RCE") }, + { "RCI", _VENDOR_NAME_ENTRY("RC International") }, + { "REL", _VENDOR_NAME_ENTRY("Relisys") }, + { "REM", _VENDOR_NAME_ENTRY("REM") }, + { "RII", _VENDOR_NAME_ENTRY("Racal Interlan") }, + { "RMP", _VENDOR_NAME_ENTRY("Research Machines") }, + { "ROK", _VENDOR_NAME_ENTRY("Rockwell") }, + { "RTI", _VENDOR_NAME_ENTRY("Rancho Technology") }, + { "RUN", _VENDOR_NAME_ENTRY("RUNCO International") }, + + { "SAM", _VENDOR_NAME_ENTRY("Samsung") }, + { "SAN", _VENDOR_NAME_ENTRY("Sanyo Electric Co.,Ltd.") }, + { "SCC", _VENDOR_NAME_ENTRY("SORD") }, + { "SCD", _VENDOR_NAME_ENTRY("Sanyo") }, + { "SDI", _VENDOR_NAME_ENTRY("Samtron/Sigma Designs") }, + { "SDT", _VENDOR_NAME_ENTRY("Siemens AG") }, + { "SEA", _VENDOR_NAME_ENTRY("Segate") }, + { "SEC", _VENDOR_NAME_ENTRY("Seiko/Epson") }, + { "SEN", _VENDOR_NAME_ENTRY("Sencore") }, + { "SGT", _VENDOR_NAME_ENTRY("Stargate Technology/AT&T") }, + { "SGX", _VENDOR_NAME_ENTRY("SGI") }, + { "SHP", _VENDOR_NAME_ENTRY("Sharp") }, + { "SIB", _VENDOR_NAME_ENTRY("Sanyo") }, + { "SIE", _VENDOR_NAME_ENTRY("Siemens Nixdorf") }, + { "SII", _VENDOR_NAME_ENTRY("Silicon Image, Inc.") }, + { "SIS", _VENDOR_NAME_ENTRY("SiS/Modula Tech") }, + { "SIT", _VENDOR_NAME_ENTRY("Sitintel") }, + { "SIX", _VENDOR_NAME_ENTRY("Zuniq Data") }, + { "SKD", _VENDOR_NAME_ENTRY("Schneider & Koch") }, + { "SKW", _VENDOR_NAME_ENTRY("Skyworth") }, + { "SKY", _VENDOR_NAME_ENTRY("SKYDATA S.P.A.") }, + { "SLB", _VENDOR_NAME_ENTRY("Shlumberger Ltd") }, + { "SLT", _VENDOR_NAME_ENTRY("Salt Internatioinal Corp.") }, + { "SLX", _VENDOR_NAME_ENTRY("Specialix") }, + { "SMC", _VENDOR_NAME_ENTRY("Standard Microsystems") }, + { "SMI", _VENDOR_NAME_ENTRY("Smile") }, + { "SML", _VENDOR_NAME_ENTRY("Smile") }, + { "SMS", _VENDOR_NAME_ENTRY("Silicon Multimedia Systems") }, + { "SNI", _VENDOR_NAME_ENTRY("Siemens Nixdorf") }, + { "SNY", _VENDOR_NAME_ENTRY("Sony") }, + { "SOB", _VENDOR_NAME_ENTRY("Sanyo") }, + { "SPE", _VENDOR_NAME_ENTRY("SPEA") }, + { "SPT", _VENDOR_NAME_ENTRY("Sceptre") }, + { "SRC", _VENDOR_NAME_ENTRY("Shamrock/SunRiver") }, + { "SSS", _VENDOR_NAME_ENTRY("S3") }, + { "STA", _VENDOR_NAME_ENTRY("Stesa") }, + { "STB", _VENDOR_NAME_ENTRY("STB Systems") }, + { "STC", _VENDOR_NAME_ENTRY("Sampo/STAC") }, + { "STP", _VENDOR_NAME_ENTRY("Sceptre") }, + { "STR", _VENDOR_NAME_ENTRY("Starlight Networks") }, + { "SUK", _VENDOR_NAME_ENTRY("Schneider & Koch") }, + { "SUP", _VENDOR_NAME_ENTRY("Supra/Diamond Media") }, + { "SUR", _VENDOR_NAME_ENTRY("Surenam") }, + { "SVR", _VENDOR_NAME_ENTRY("Sensics Inc.") }, + { "SYL", _VENDOR_NAME_ENTRY("Sylvania") }, + { "SYN", _VENDOR_NAME_ENTRY("Synaptics Inc") }, + + { "TAI", _VENDOR_NAME_ENTRY("Toshiba") }, + { "TAT", _VENDOR_NAME_ENTRY("Tatung") }, + { "TAX", _VENDOR_NAME_ENTRY("Taxan") }, + { "TCC", _VENDOR_NAME_ENTRY("Tandon") }, + { "TCI", _VENDOR_NAME_ENTRY("Tulip") }, + { "TCL", _VENDOR_NAME_ENTRY("Tech Concepts") }, + { "TCM", _VENDOR_NAME_ENTRY("Techmedia/3Com") }, + { "TCO", _VENDOR_NAME_ENTRY("Thomas Conrad") }, + { "TCR", _VENDOR_NAME_ENTRY("Thomson Consumer Electronics") }, + { "TCS", _VENDOR_NAME_ENTRY("Tatung") }, + { "TDS", _VENDOR_NAME_ENTRY("Tri Data Systems") }, + { "TDT", _VENDOR_NAME_ENTRY("TDT") }, + { "TDY", _VENDOR_NAME_ENTRY("Tandy") }, + { "TEA", _VENDOR_NAME_ENTRY("Teac") }, + { "TEC", _VENDOR_NAME_ENTRY("Tecmar") }, + { "TEI", _VENDOR_NAME_ENTRY("TECO") }, + { "TGI", _VENDOR_NAME_ENTRY("TriGem") }, + { "TGS", _VENDOR_NAME_ENTRY("Torus") }, + { "TOS", _VENDOR_NAME_ENTRY("Toshiba") }, + { "TRI", _VENDOR_NAME_ENTRY("Tricord") }, + { "TRM", _VENDOR_NAME_ENTRY("Tekram") }, + { "TRL", _VENDOR_NAME_ENTRY("Royal") }, + { "TRS", _VENDOR_NAME_ENTRY("Torus") }, + { "TRU", _VENDOR_NAME_ENTRY("Aashima/Truevision") }, + { "TSB", _VENDOR_NAME_ENTRY("Toshiba") }, + { "TSC", _VENDOR_NAME_ENTRY("Sanyo") }, + { "TSI", _VENDOR_NAME_ENTRY("TeleVideo") }, + { "TST", _VENDOR_NAME_ENTRY("Transtream Inc") }, + { "TTC", _VENDOR_NAME_ENTRY("Telecommunications Techniques") }, + { "TTK", _VENDOR_NAME_ENTRY("Totoku") }, + { "TTX", _VENDOR_NAME_ENTRY("TTX") }, + { "TVI", _VENDOR_NAME_ENTRY("TeleVideo/Truevision") }, + { "TVM", _VENDOR_NAME_ENTRY("TVM") }, + { "TWA", _VENDOR_NAME_ENTRY("Tidewater") }, + { "TWE", _VENDOR_NAME_ENTRY("Kontron") }, + { "TXN", _VENDOR_NAME_ENTRY("Texas Instruments") }, + { "TYN", _VENDOR_NAME_ENTRY("Tyan Computer") }, + + { "UBI", _VENDOR_NAME_ENTRY("Ungermann Bass") }, + { "UFO", _VENDOR_NAME_ENTRY("UFO Systems") }, + { "UNA", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNI", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNM", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNO", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNS", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNT", _VENDOR_NAME_ENTRY("Unisys") }, + { "USC", _VENDOR_NAME_ENTRY("UltraStor") }, + { "USR", _VENDOR_NAME_ENTRY("US Robotics") }, + { "UTB", _VENDOR_NAME_ENTRY("Utobia") }, + + { "VES", _VENDOR_NAME_ENTRY("Vestel") }, + { "VIK", _VENDOR_NAME_ENTRY("Viking") }, + { "VLV", _VENDOR_NAME_ENTRY("Valve Corporation") }, + { "VMI", _VENDOR_NAME_ENTRY("Vermont MicroSystems") }, + { "VOB", _VENDOR_NAME_ENTRY("Vobis") }, + { "VRG", _VENDOR_NAME_ENTRY("VRgineers, Inc. ") }, + { "VRT", _VENDOR_NAME_ENTRY("Varjo Technologies") }, + { "VSC", _VENDOR_NAME_ENTRY("ViewSonic") }, + + { "WAC", _VENDOR_NAME_ENTRY("Wacom Tech") }, + { "WDC", _VENDOR_NAME_ENTRY("Western Digital") }, + { "WDE", _VENDOR_NAME_ENTRY("Westinghouse Digital Electronics") }, + { "WIL", _VENDOR_NAME_ENTRY("WIPRO") }, + { "WTC", _VENDOR_NAME_ENTRY("Wen Technology") }, + { "WYS", _VENDOR_NAME_ENTRY("Wyse Technology") }, + + { "YMH", _VENDOR_NAME_ENTRY("Yamaha Corporation") }, + { "YHQ", _VENDOR_NAME_ENTRY("Yokogawa") }, + + { "ZCM", _VENDOR_NAME_ENTRY("Zenith") }, + { "ZDS", _VENDOR_NAME_ENTRY("Zenith") }, + { "ZYT", _VENDOR_NAME_ENTRY("Zytex") }, +}; + +#endif /* __NV_PNP_VENDOR_IDS_H__ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvSha1.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvSha1.h new file mode 100644 index 0000000..6c9a010 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvSha1.h @@ -0,0 +1,390 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2012 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Utility header file to generate a one-way hash from an arbitrary + * byte array, using the Secure Hashing Algorithm 1 (SHA-1) as defined + * in FIPS PUB 180-1 published April 17, 1995: + * + * http://www.itl.nist.gov/fipspubs/fip180-1.htm + * + * Some common test cases (see Appendices A and B of the above document): + * + * SHA1("abc") = + * A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D + * + * SHA1("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq") = + * 84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1 + */ + +#ifndef __NV_SHA1_H__ +#define __NV_SHA1_H__ + +#include "nvtypes.h" + +/*! + * @brief Structure used by the SHA-1 functions to maintain the state of the + * calculations. + */ + +typedef struct +{ + NvU32 state[5]; + NvU32 count; + NvU8 buffer[128]; +} Sha1Context; + + +/*! + * @brief Pointer to a memory accessor function for use by the SHA-1 hash + * function. + * + * Due to memory constraints in some environments where this code is executed + * (e.g., the PMU/DPU), the data that needs to be processed by the SHA-1 hash + * function may not be readily available. This function is responsible for + * copying the data into a buffer to be used by the SHA-1 function. + * + * Besides, SHA1 library can be used by many different clients, so we need to + * provide the memory accessor functions which can work in client's environment. + * + * @param[out] pBuff The buffer to copy the new data to. + * @param[in] index The desired offset to begin copying from. + * @param[in] size The requested number of bytes to be copied. + * @param[in] info Pointer to the data passed into GenerateSha1 as pData. + * + * @return The actual number of bytes copied into the buffer. + */ + +typedef NvU32 Sha1CopyFunc(NvU8 *pBuff, NvU32 index, NvU32 size, void *pInfo); + + +/* + * The following values are defined by the SHA-1 algorithm for initial values. + */ +#define SHA1_INIT_H0 0x67452301 //!< Initial H0 value +#define SHA1_INIT_H1 0xEFCDAB89 //!< Initial H1 value +#define SHA1_INIT_H2 0x98BADCFE //!< Initial H2 value +#define SHA1_INIT_H3 0x10325476 //!< Initial H3 value +#define SHA1_INIT_H4 0xC3D2E1F0 //!< Initial H4 value + + +/*! + * @brief Reverses the byte order of a word; that is, switching the endianness + * of the word. + * + * @param[in] a A 32-bit word + * + * @returns The 32-bit word with its byte order reversed. + */ + +#define REVERSE_BYTE_ORDER(a) \ + (((a) >> 24) | ((a) << 24) | (((a) >> 8) & 0xFF00) | (((a) << 8) & 0xFF0000)) + + +/*! + * @brief Computation step as defined by SHA-1. + * + * Unlike the 64 byte buffer version outlined in the SHA-1 algorithm, this + * function uses a 128 byte buffer to minimize the calculation needed to + * index the data. + * + * @param[in,out] pState + * Pointer to State word array. + * + * @param[in] pBuffer + * Data to operate on. 128 bytes in length. No length checking is done, + * and is assumed to have been done by the calling function. + */ + +static void +_sha1Transform +( + NvU32 *pState, + NvU8 *pBuffer +) +{ + NvU32 a = pState[0]; + NvU32 b = pState[1]; + NvU32 c = pState[2]; + NvU32 d = pState[3]; + NvU32 e = pState[4]; + NvU32 *pBuf = (NvU32 *)pBuffer; + NvU32 *p; + NvU32 i; + NvU32 j; + NvU32 k; + + for (i = 0; i < 80; i++) + { + p = &pBuf[i & 0xf]; + j = p[0]; + if (i < 16) + { + j = REVERSE_BYTE_ORDER(j); + } + else + { + j ^= p[2] ^ p[8] ^ p[13]; + j = (j << 1) + (j >> 31); + } + p[0] = p[16] = j; + if (i < 40) + { + if (i < 20) + { + k = 0x5a827999 + ((b & (c ^ d)) ^ d); + } + else + { + k = 0x6ed9eba1 + (b ^ c ^ d); + } + } + else + { + if (i < 60) + { + k = 0x8f1bbcdc + (((b | c) & d) | (b & c)); + } + else + { + k = 0xca62c1d6 + (b ^ c ^ d); + } + } + j += (a << 5) + (a >> 27) + e + k; + e = d; + d = c; + c = (b << 30) + (b >> 2); + b = a; + a = j; + } + pState[0] += a; + pState[1] += b; + pState[2] += c; + pState[3] += d; + pState[4] += e; +} + + +/*! + * Initializes the SHA-1 context. + * + * @param[out] pContext + * Pointer to the context to initialize. + */ + +static void +_sha1Initialize +( + Sha1Context *pContext +) +{ + pContext->count = 0; + pContext->state[0] = SHA1_INIT_H0; + pContext->state[1] = SHA1_INIT_H1; + pContext->state[2] = SHA1_INIT_H2; + pContext->state[3] = SHA1_INIT_H3; + pContext->state[4] = SHA1_INIT_H4; +} + + +/*! + * @brief Divides the input buffer into multiple 64-byte buffers and computes + * the message digest for each. + * + * @param[in] pContext + * Pointer to a Sha1Context. + * + * @param[in] pData + * Pointer to the data array to compute the message digest. + * + * @param[in] len + * Size of the data. + * + * @param[in] copyFunc + * Copy routine to use. + */ + +static void +_sha1Update +( + Sha1Context *pContext, + void *pData, + NvU32 len, + Sha1CopyFunc copyFunc +) +{ + NvU32 buffer_offset = (pContext->count & 63); + NvU32 copy_size; + NvU32 idx = 0; + + pContext->count += len; + while ((buffer_offset + len) > 63) + { + copy_size = 64 - buffer_offset; + copyFunc(&pContext->buffer[buffer_offset], idx, copy_size, pData); + _sha1Transform(pContext->state, pContext->buffer); + buffer_offset = 0; + idx += copy_size; + len -= copy_size; + } + if (len > 0) + { + copyFunc(&pContext->buffer[buffer_offset], idx, len, pData); + } +} + + +/*! + * @brief fill memory with zero; not all environments in which this + * code runs have memset(3). + * + * @param[out] pData + * The memory to be filled with zero + * + * @param[in] nBytes + * The number of bytes of memory to fill with zero + */ + +static NV_INLINE void +_sha1MemZero +( + NvU8 *pData, + NvU32 nBytes +) +{ + NvU32 i; + + for (i = 0; i < nBytes; i++) { + pData[i] = 0; + } +} + + +/*! + * @brief Pads the message as specified by the SHA-1 algorithm and computes + * the message digest on the final message chunk(s). + * + * @param[out] pDigest + * The SHA-1 hash values. + * + * @param[in] pContext + * Pointer to a Sha1Context. + */ + +static void +_sha1Final +( + NvU8 *pDigest, + Sha1Context *pContext +) +{ + NvU32 i; + NvU32 bufferOffset = (pContext->count & 63); + NvU8 *pBuffer = (NvU8*)&pContext->buffer[bufferOffset]; + NvU32 *pCount; + NvU32 *pDig32; + + // append padding pattern to the end of input + *pBuffer++ = 0x80; + if (bufferOffset < 56) + { + _sha1MemZero(pBuffer, 59 - bufferOffset); + } + else + { + // need an extra sha1_transform + if (bufferOffset < 63) + { + _sha1MemZero(pBuffer, 63 - bufferOffset); + } + _sha1Transform(pContext->state, pContext->buffer); + _sha1MemZero(pContext->buffer, 60); + } + + // set final count (this is the number of *bits* not *bytes*) + pCount = (NvU32*)&pContext->buffer[15 << 2]; + *pCount = REVERSE_BYTE_ORDER(pContext->count << 3); + + _sha1Transform(pContext->state, pContext->buffer); + + // output hash with each dword in big endian + if (pDigest) + { + pDig32 = (NvU32*) pDigest; + for (i = 0; i < 5; i++) + { + pDig32[i] = REVERSE_BYTE_ORDER(pContext->state[i]); + } + } +} + + +/*! + * @brief Generates the SHA-1 hash value on the data provided. + * + * The function does not manipulate the source data directly, as it may not + * have direct access to it. Therefore, it relies upon the copy function to + * copy segments of the data into a local buffer before any manipulation takes + * place. + * + * @param[out] pHash + * Pointer to store the hash array. The buffer must be 20 bytes in + * length, and the result is stored in big endian format. + * + * @param[in] pData + * The source data array to transform. The actual values and make-up + * of this parameter are dependent on the copy function. + * + * @param[in] nBytes + * The size, in bytes, of the source data. + * + * @param[in] copyFunc + * The function responsible for copying data from the source + * for use by the sha1 function. It is possible for the data + * to exist outside the current execution environment (e.g., + * the PMU, and the data to hash are in system memory), so + * the function will never directly manipulate the source + * data. + */ + +#define NV_SHA1_BLOCK_LENGTH 64 +#define NV_SHA1_DIGEST_LENGTH 20 + +static void +sha1Generate +( + NvU8 pHash[NV_SHA1_DIGEST_LENGTH], + void *pData, + NvU32 nBytes, + Sha1CopyFunc copyFunc +) +{ + Sha1Context context; + + _sha1Initialize(&context); + _sha1Update(&context, pData, nBytes, copyFunc); + _sha1Final(pHash, &context); +} + + +#endif /* __NV_SHA1_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvUnixVersion.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvUnixVersion.h new file mode 100644 index 0000000..b358d55 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvUnixVersion.h @@ -0,0 +1,15 @@ +#ifndef __NV_UNIX_VERSION_H__ +#define __NV_UNIX_VERSION_H__ + +#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \ + (defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1) + +#define NV_VERSION_STRING "35.4.1" + +#else + +#error "nvUnixVersion.h should only be included in UNIX builds" + +#endif + +#endif /* __NV_UNIX_VERSION_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvVer.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvVer.h new file mode 100644 index 0000000..135103e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvVer.h @@ -0,0 +1,17 @@ +// nvVer.h - Versions of NV drivers + +#define NV_COMPANY_NAME_STRING_SHORT "NVIDIA" +#define NV_COMPANY_NAME_STRING_FULL "NVIDIA Corporation" +#define NV_COMPANY_NAME_STRING NV_COMPANY_NAME_STRING_FULL +#define NV_COPYRIGHT_YEAR "2022" +#define NV_COPYRIGHT "(C) " NV_COPYRIGHT_YEAR " NVIDIA Corporation. All rights reserved." // Please do not use the non-ascii copyright symbol for (C). + +#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \ + (defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1) + +// All Version numbering for Unix builds has moved. (Source should be re-directed to directly include that header.) +#include "nvUnixVersion.h" + +#else + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_list.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_list.h new file mode 100644 index 0000000..dbb5189 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_list.h @@ -0,0 +1,558 @@ +/* + * Copyright © 2010 Intel Corporation + * Copyright © 2010 Francisco Jerez + * Copyright © 2012 NVIDIA Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +/* + * This file was copied from the X.Org X server source at commit + * 5884e7dedecdd82ddbb037360cf9c85143e094b5 and modified to match NVIDIA's X + * driver code style. + */ + +#ifndef _NV_LIST_H_ +#define _NV_LIST_H_ + +#ifdef __cplusplus +extern "C" { +#endif //__cplusplus + +#include "nvmisc.h" + + #define HAVE_TYPEOF 1 + +/** + * @file Classic doubly-link circular list implementation. + * For real usage examples of the linked list, see the file test/list.c + * + * Example: + * We need to keep a list of struct foo in the parent struct bar, i.e. what + * we want is something like this. + * + * struct bar { + * ... + * struct foo *list_of_foos; -----> struct foo {}, struct foo {}, struct foo{} + * ... + * } + * + * We need one list head in bar and a list element in all list_of_foos (both are of + * data type 'NVListRec'). + * + * struct bar { + * ... + * NVListRec list_of_foos; + * ... + * } + * + * struct foo { + * ... + * NVListRec entry; + * ... + * } + * + * Now we initialize the list head: + * + * struct bar bar; + * ... + * nvListInit(&bar.list_of_foos); + * + * Then we create the first element and add it to this list: + * + * struct foo *foo = malloc(...); + * .... + * nvListAdd(&foo->entry, &bar.list_of_foos); + * + * Repeat the above for each element you want to add to the list. Deleting + * works with the element itself. + * nvListDel(&foo->entry); + * free(foo); + * + * Note: calling nvListDel(&bar.list_of_foos) will set bar.list_of_foos to an empty + * list again. + * + * Looping through the list requires a 'struct foo' as iterator and the + * name of the field the subnodes use. + * + * struct foo *iterator; + * nvListForEachEntry(iterator, &bar.list_of_foos, entry) { + * if (iterator->something == ...) + * ... + * } + * + * Note: You must not call nvListDel() on the iterator if you continue the + * loop. You need to run the safe for-each loop instead: + * + * struct foo *iterator, *next; + * nvListForEachEntry_safe(iterator, next, &bar.list_of_foos, entry) { + * if (...) + * nvListDel(&iterator->entry); + * } + * + */ + +/** + * The linkage struct for list nodes. This struct must be part of your + * to-be-linked struct. NVListRec is required for both the head of the + * list and for each list node. + * + * Position and name of the NVListRec field is irrelevant. + * There are no requirements that elements of a list are of the same type. + * There are no requirements for a list head, any NVListRec can be a list + * head. + */ +typedef struct NVList { + struct NVList *next, *prev; +} NVListRec, *NVListPtr; + +/** + * Initialize the list as an empty list. + * + * Example: + * nvListInit(&bar->list_of_foos); + * + * @param The list to initialized. + */ +static NV_INLINE void +nvListInit(NVListPtr list) +{ + list->next = list->prev = list; +} + +/** + * Initialize the list as an empty list. + * + * This is functionally the same as nvListInit, but can be used for + * initialization of global variables. + * + * Example: + * static NVListRec list_of_foos = NV_LIST_INIT(&list_of_foos); + * + * @param The list to initialized. + */ +#define NV_LIST_INIT(head) { .prev = (head), .next = (head) } + +static NV_INLINE void +__nvListAdd(NVListPtr entry, NVListPtr prev, NVListPtr next) +{ + next->prev = entry; + entry->next = next; + entry->prev = prev; + prev->next = entry; +} + +/** + * Insert a new element after the given list head. The new element does not + * need to be initialised as empty list. + * The list changes from: + * head -> some element -> ... + * to + * head -> new element -> older element -> ... + * + * Example: + * struct foo *newfoo = malloc(...); + * nvListAdd(&newfoo->entry, &bar->list_of_foos); + * + * @param entry The new element to prepend to the list. + * @param head The existing list. + */ +static NV_INLINE void +nvListAdd(NVListPtr entry, NVListPtr head) +{ + __nvListAdd(entry, head, head->next); +} + +/** + * Append a new element to the end of the list given with this list head. + * + * The list changes from: + * head -> some element -> ... -> lastelement + * to + * head -> some element -> ... -> lastelement -> new element + * + * Example: + * struct foo *newfoo = malloc(...); + * nvListAppend(&newfoo->entry, &bar->list_of_foos); + * + * @param entry The new element to prepend to the list. + * @param head The existing list. + */ +static NV_INLINE void +nvListAppend(NVListPtr entry, NVListPtr head) +{ + __nvListAdd(entry, head->prev, head); +} + +static NV_INLINE void +__nvListDel(NVListPtr prev, NVListPtr next) +{ + next->prev = prev; + prev->next = next; +} + +/** + * Remove the element from the list it is in. Using this function will reset + * the pointers to/from this element so it is removed from the list. It does + * NOT free the element itself or manipulate it otherwise. + * + * Using nvListDel on a pure list head (like in the example at the top of + * this file) will NOT remove the first element from + * the list but rather reset the list as empty list. + * + * Example: + * nvListDel(&foo->entry); + * + * @param entry The element to remove. + */ +static NV_INLINE void +nvListDel(NVListPtr entry) +{ + __nvListDel(entry->prev, entry->next); + nvListInit(entry); +} + +/** + * Check if the list is empty. + * + * Example: + * nvListIsEmpty(&bar->list_of_foos); + * + * @return True if the list contains one or more elements or False otherwise. + */ +static NV_INLINE NvBool +nvListIsEmpty(const NVListRec *head) +{ + return head->next == head; +} + +static NV_INLINE int +nvListCount(const NVListRec *head) +{ + NVListPtr next; + int count = 0; + + for (next = head->next; next != head; next = next->next) { + count++; + } + + return count; +} + +/** + * Check if entry is present in the list. + * + * Example: + * nvListPresent(&foo->entry, &bar->list_of_foos); + * + * @return 1 if the list contains the specified entry; otherwise, return 0. + */ +static NV_INLINE NvBool +nvListPresent(const NVListRec *entry, const NVListRec *head) +{ + const NVListRec *next; + + for (next = head->next; next != head; next = next->next) { + if (next == entry) { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +/** + * Returns a pointer to the container of this list element. + * + * Example: + * struct foo* f; + * f = nv_container_of(&foo->entry, struct foo, entry); + * assert(f == foo); + * + * @param ptr Pointer to the NVListRec. + * @param type Data type of the list element. + * @param member Member name of the NVListRec field in the list element. + * @return A pointer to the data struct containing the list head. + */ +#ifndef nv_container_of +#define nv_container_of(ptr, type, member) \ + (type *)((char *)(ptr) - NV_OFFSETOF(type, member)) +#endif + +/** + * Alias of nv_container_of + */ +#define nvListEntry(ptr, type, member) \ + nv_container_of(ptr, type, member) + +/** + * Retrieve the first list entry for the given list pointer. + * + * Example: + * struct foo *first; + * first = nvListFirstEntry(&bar->list_of_foos, struct foo, list_of_foos); + * + * @param ptr The list head + * @param type Data type of the list element to retrieve + * @param member Member name of the NVListRec field in the list element. + * @return A pointer to the first list element. + */ +#define nvListFirstEntry(ptr, type, member) \ + nvListEntry((ptr)->next, type, member) + +/** + * Retrieve the last list entry for the given listpointer. + * + * Example: + * struct foo *first; + * first = nvListLastEntry(&bar->list_of_foos, struct foo, list_of_foos); + * + * @param ptr The list head + * @param type Data type of the list element to retrieve + * @param member Member name of the NVListRec field in the list element. + * @return A pointer to the last list element. + */ +#define nvListLastEntry(ptr, type, member) \ + nvListEntry((ptr)->prev, type, member) + +#ifdef HAVE_TYPEOF +#define __nv_container_of(ptr, sample, member) \ + nv_container_of(ptr, __typeof__(*sample), member) +#else +/* This implementation of __nv_container_of has undefined behavior according + * to the C standard, but it works in many cases. If your compiler doesn't + * support __typeof__() and fails with this implementation, please try a newer + * compiler. + */ +#define __nv_container_of(ptr, sample, member) \ + (void *)((char *)(ptr) \ + - ((char *)&(sample)->member - (char *)(sample))) +#endif + +/** + * Loop through the list given by head and set pos to struct in the list. + * + * Example: + * struct foo *iterator; + * nvListForEachEntry(iterator, &bar->list_of_foos, entry) { + * [modify iterator] + * } + * + * This macro is not safe for node deletion. Use nvListForEachEntry_safe + * instead. + * + * @param pos Iterator variable of the type of the list elements. + * @param head List head + * @param member Member name of the NVListRec in the list elements. + * + */ +#ifdef HAVE_TYPEOF +#define __NV_LIST_SET(x, y) x = y +#else +static NV_INLINE void __nvListSet(void **x, void *y) +{ + *x = y; +} + +#define __NV_LIST_SET(x, y) __nvListSet((void **) &x, (void *) (y)) +#endif + +#define nvListForEachEntry(pos, head, member) \ + for (__NV_LIST_SET(pos, __nv_container_of((head)->next, pos, member)); \ + &pos->member != (head); \ + __NV_LIST_SET(pos, __nv_container_of(pos->member.next, pos, member))) + +/** + * Loop through the list, keeping a backup pointer to the element. This + * macro allows for the deletion of a list element while looping through the + * list. + * + * See nvListForEachEntry for more details. + */ +#define nvListForEachEntry_safe(pos, tmp, head, member) \ + for (__NV_LIST_SET(pos, __nv_container_of((head)->next, pos, member)), \ + __NV_LIST_SET(tmp, __nv_container_of(pos->member.next, pos, member)); \ + &pos->member != (head); \ + __NV_LIST_SET(pos, tmp), \ + __NV_LIST_SET(tmp, __nv_container_of(pos->member.next, tmp, member))) + +/* NULL-Terminated List Interface + * + * The interface below does _not_ use the NVListRec as described above. + * It is mainly for legacy structures that cannot easily be switched to + * NVListRec. + * + * This interface is for structs like + * struct foo { + * [...] + * struct foo *next; + * [...] + * }; + * + * The position and field name of "next" are arbitrary. + */ + +/** + * Init the element as null-terminated list. + * + * Example: + * struct foo *list = malloc(); + * nvNTListInit(list, next); + * + * @param list The list element that will be the start of the list + * @param member Member name of the field pointing to next struct + */ +#define nvNTListInit(_list, _member) \ + (_list)->_member = NULL + +/** + * Returns the next element in the list or NULL on termination. + * + * Example: + * struct foo *element = list; + * while ((element = nvNTListNext(element, next)) { } + * + * This macro is not safe for node deletion. Use nvListForEachEntry_safe + * instead. + * + * @param list The list or current element. + * @param member Member name of the field pointing to next struct. + */ +#define nvNTListNext(_list, _member) \ + (_list)->_member + +/** + * Iterate through each element in the list. + * + * Example: + * struct foo *iterator; + * nvNTListForEachEntry(iterator, list, next) { + * [modify iterator] + * } + * + * @param entry Assigned to the current list element + * @param list The list to iterate through. + * @param member Member name of the field pointing to next struct. + */ +#define nvNTListForEachEntry(_entry, _list, _member) \ + for (_entry = _list; _entry; _entry = (_entry)->_member) + +/** + * Iterate through each element in the list, keeping a backup pointer to the + * element. This macro allows for the deletion of a list element while + * looping through the list. + * + * See nvNTListForEachEntry for more details. + * + * @param entry Assigned to the current list element + * @param tmp The pointer to the next element + * @param list The list to iterate through. + * @param member Member name of the field pointing to next struct. + */ +#define nvNTListForEachEntrySafe(_entry, _tmp, _list, _member) \ + for (_entry = _list, _tmp = (_entry) ? (_entry)->_member : NULL;\ + _entry; \ + _entry = _tmp, _tmp = (_tmp) ? (_tmp)->_member: NULL) + +/** + * Append the element to the end of the list. This macro may be used to + * merge two lists. + * + * Example: + * struct foo *elem = malloc(...); + * nvNTListInit(elem, next) + * nvNTListAppend(elem, list, struct foo, next); + * + * Resulting list order: + * list_item_0 -> list_item_1 -> ... -> elem_item_0 -> elem_item_1 ... + * + * @param entry An entry (or list) to append to the list + * @param list The list to append to. This list must be a valid list, not + * NULL. + * @param type The list type + * @param member Member name of the field pointing to next struct + */ +#define nvNTListAppend(_entry, _list, _type, _member) \ + do { \ + _type *__iterator = _list; \ + while (__iterator->_member) { __iterator = __iterator->_member;}\ + __iterator->_member = _entry; \ + } while (0) + +/** + * Insert the element at the next position in the list. This macro may be + * used to insert a list into a list. + * + * struct foo *elem = malloc(...); + * nvNTListInit(elem, next) + * nvNTListInsert(elem, list, struct foo, next); + * + * Resulting list order: + * list_item_0 -> elem_item_0 -> elem_item_1 ... -> list_item_1 -> ... + * + * @param entry An entry (or list) to append to the list + * @param list The list to insert to. This list must be a valid list, not + * NULL. + * @param type The list type + * @param member Member name of the field pointing to next struct + */ +#define nvNTListInsert(_entry, _list, _type, _member) \ + do { \ + nvNTListAppend((_list)->_member, _entry, _type, _member); \ + (_list)->_member = _entry; \ + } while (0) + +/** + * Delete the entry from the list by iterating through the list and + * removing any reference from the list to the entry. + * + * Example: + * struct foo *elem = + * nvNTListDel(elem, list, struct foo, next); + * + * @param entry The entry to delete from the list. entry is always + * re-initialized as a null-terminated list. + * @param list The list containing the entry, set to the new list without + * the removed entry. + * @param type The list type + * @param member Member name of the field pointing to the next entry + */ +#define nvNTListDel(_entry, _list, _type, _member) \ + do { \ + _type *__e = _entry; \ + if (__e == NULL || _list == NULL) break; \ + if ((_list) == __e) { \ + _list = __e->_member; \ + } else { \ + _type *__prev = _list; \ + while (__prev->_member && __prev->_member != __e) \ + __prev = nvNTListNext(__prev, _member); \ + if (__prev->_member) \ + __prev->_member = __e->_member; \ + } \ + nvNTListInit(__e, _member); \ + } while(0) + +#ifdef __cplusplus +} +#endif //__cplusplus + +#endif /* _NV_LIST_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_speculation_barrier.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_speculation_barrier.h new file mode 100644 index 0000000..70c9651 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_speculation_barrier.h @@ -0,0 +1,219 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * NVIDIA GPZ vulnerability mitigation definitions. + */ + +/* + * There are two copies of this file for legacy reasons: + * + * P4: <$NV_SOURCE/>drivers/common/inc/nv_speculation_barrier.h + * Git: include/nv_speculation_barrier.h + * + * Both files need to be kept in sync if any changes are required. + */ + +#ifndef _NV_SPECULATION_BARRIER_H_ +#define _NV_SPECULATION_BARRIER_H_ + +#define NV_SPECULATION_BARRIER_VERSION 2 + +/* + * GNU-C/MSC/clang - x86/x86_64 : x86_64, __i386, __i386__ + * GNU-C - THUMB mode : __GNUC__, __thumb__ + * GNU-C - ARM modes : __GNUC__, __arm__, __aarch64__ + * armclang - THUMB mode : __ARMCC_VERSION, __thumb__ + * armclang - ARM modes : __ARMCC_VERSION, __arm__, __aarch64__ + * GHS - THUMB mode : __ghs__, __THUMB__ + * GHS - ARM modes : __ghs__, __ARM__, __ARM64__ + */ + +#if defined(_M_IX86) || defined(__i386__) || defined(__i386) \ + || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) + /* All x86 */ + #define NV_SPECULATION_BARRIER_x86 + +#elif defined(macintosh) || defined(__APPLE__) \ + || defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__) \ + || defined(__POWERPC__) || defined(__ppc) || defined(__ppc__) \ + || defined(__ppc64__) || defined(__PPC__) \ + || defined(__PPC64__) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) + /* All PowerPC */ + #define NV_SPECULATION_BARRIER_PPC + +#elif (defined(__GNUC__) && defined(__thumb__)) \ + || (defined(__ARMCC_VERSION) && defined(__thumb__)) \ + || (defined(__ghs__) && defined(__THUMB__)) + /* ARM-thumb mode(<=ARMv7)/T32 (ARMv8) */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB ".inst.w 0xf3af8014\n" + +#elif (defined(__GNUC__) && defined(__arm__)) \ + || (defined(__ARMCC_VERSION) && defined(__arm__)) \ + || (defined(__ghs__) && defined(__ARM__)) + /* aarch32(ARMv8) / arm(<=ARMv7) mode */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB ".inst 0xe320f014\n" + +#elif (defined(__GNUC__) && defined(__aarch64__)) \ + || (defined(__ARMCC_VERSION) && defined(__aarch64__)) \ + || (defined(__ghs__) && defined(__ARM64__)) + /* aarch64(ARMv8) mode */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB "HINT #20\n" +#elif defined(NVCPU_NVRISCV64) && NVOS_IS_LIBOS +# define nv_speculation_barrier() +#else + #error "Unknown compiler/chip family" +#endif + +/* + * nv_speculation_barrier -- General-purpose speculation barrier + * + * This approach provides full protection against variant-1 vulnerability. + * However, the recommended approach is detailed below (See: + * nv_array_index_no_speculate) + * + * Semantics: + * Any memory read that is sequenced after a nv_speculation_barrier(), + * and contained directly within the scope of nv_speculation_barrier() or + * directly within a nested scope, will not speculatively execute until all + * conditions for entering that scope have been architecturally resolved. + * + * Example: + * if (untrusted_index_from_user < bound) { + * ... + * nv_speculation_barrier(); + * ... + * x = array1[untrusted_index_from_user]; + * bit = x & 1; + * y = array2[0x100 * bit]; + * } + */ + +#if defined(NV_SPECULATION_BARRIER_x86) +// Delete after all references are changed to nv_speculation_barrier +#define speculation_barrier() nv_speculation_barrier() + +static inline void nv_speculation_barrier(void) +{ + +#if defined(__GNUC__) || defined(__clang__) + __asm__ __volatile__ ("lfence" : : : "memory"); +#endif + +} + +#elif defined(NV_SPECULATION_BARRIER_PPC) + +static inline void nv_speculation_barrier(void) +{ + asm volatile("ori 31,31,0"); +} + +#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON) + +/* Note: Cortex-A9 GNU-assembler seems to complain about DSB SY */ + #define nv_speculation_barrier() \ + asm volatile \ + ( \ + "DSB sy\n" \ + "ISB\n" \ + : : : "memory" \ + ) +#endif + +/* + * nv_array_index_no_speculate -- Recommended variant-1 mitigation approach + * + * The array-index-no-speculate approach "de-speculates" an array index that + * has already been bounds-checked. + * + * This approach is preferred over nv_speculation_barrier due to the following + * reasons: + * - It is just as effective as the general-purpose speculation barrier. + * - It clearly identifies what array index is being de-speculated and is thus + * self-commenting, whereas the general-purpose speculation barrier requires + * an explanation of what array index is being de-speculated. + * - It performs substantially better than the general-purpose speculation + * barrier on ARM Cortex-A cores (the difference is expected to be tens of + * cycles per invocation). Within tight loops, this difference may become + * noticeable. + * + * Semantics: + * Provided count is non-zero and the caller has already validated or otherwise + * established that index < count, any speculative use of the return value will + * use a speculative value that is less than count. + * + * Example: + * if (untrusted_index_from_user < bound) { + * untrusted_index_from_user = nv_array_index_no_speculate( + * untrusted_index_from_user, bound); + * ... + * x = array1[untrusted_index_from_user]; + * ... + * } + * + * The use of nv_array_index_no_speculate() in the above example ensures that + * subsequent uses of untrusted_index_from_user will not execute speculatively + * (they will wait for the bounds check to complete). + */ + +static inline unsigned long nv_array_index_no_speculate(unsigned long index, + unsigned long count) +{ +#if defined(NV_SPECULATION_BARRIER_x86) && (defined(__GNUC__) || defined(__clang__)) + unsigned long mask; + + __asm__ __volatile__ + ( + "CMP %2, %1 \n" + "SBB %0, %0 \n" + : "=r"(mask) : "r"(index), "r"(count) : "cc" + ); + + return (index & mask); + +#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON) + unsigned long mask; + + asm volatile + ( + "CMP %[ind], %[cnt] \n" + "SBC %[res], %[cnt], %[cnt] \n" + NV_SPEC_BARRIER_CSDB + : [res] "=r" (mask) : [ind] "r" (index), [cnt] "r" (count): "cc" + ); + + return (index & mask); + +/* Fallback to generic speculation barrier for unsupported platforms */ +#else + nv_speculation_barrier(); + + return index; +#endif +} + +#endif //_NV_SPECULATION_BARRIER_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvctassert.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvctassert.h new file mode 100644 index 0000000..ae3de56 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvctassert.h @@ -0,0 +1,189 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1997-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_CTASSERT_H +#define __NV_CTASSERT_H + +/*****************************************************************************/ + +/* Compile Time assert + * ------------------- + * Use ct_assert(b) instead of assert(b) whenever the condition 'b' is constant, + * i.e. when 'b' can be determined at compile time. + * + * e.g.: check array size: + * ct_assert(__GL_ARRAYSIZE(arrayName) == constArraySize); + * e.g.: check struct size alignment: + * ct_assert(sizeof(struct xy) % 64 == 0); + * + * When available, standard C or C++ language constructs are used: + * - ISO C++11 defines the static_assert keyword + * - ISO C11 defines the _Static_assert keyword + * + * Note that recent versions of Clang support _Static_assert in all compiler modes + * - not just C11 mode - so we test for that in addition to checking explicitly for + * C11 and C++11 support. + * + * Those new language standards aren't available on all supported platforms; an + * alternate method which involves array declarations is employed in that case, + * described below. + * + * In C, there is a restriction where ct_assert() can be placed: + * It can be placed wherever a variable declaration can be placed, i.e.: + * - either anywhere at file scope + * - or inside a function at the beginning of any {} block; it may be mixed + * with variable declarations. + * e.g.: + * void function() + * { + * ct_assert(...); <-- ok \ + * int a; | + * ct_assert(...); <-- ok | declaration section + * int b; | + * ct_assert(...); <-- ok / + * + * a = 0; -- first statement + * + * int c; <-- error + * ct_assert(...); <-- error + * + * {ct_assert(...);} <-- ok (uses its own block for ct_assert()) + * } + * + * In CPP, there is no such restriction, i.e. it can be placed at file scope + * or anywhere inside a function or namespace or class (i.e., wherever + * a variable declaration may be placed). + * + * For C code, the mechanism of this ct_assert() is to declare a prototype + * of a function (e.g. compile_time_assertion_failed_in_line_555, if current + * line number is 555), which gets an array as argument: + * (1) the size of this array is +1, if b != 0 (ok) + * (2) the size of this array is -1, if b == 0 (error) + * + * In case (2) the compiler throws an error. + * e.g. msvc compiler: + * error C2118: negative subscript or subscript is too large + * e.g. gcc 2.95.3: + * size of array `_compile_time_assertion_failed_in_line_555' is negative + * + * In case the condition 'b' is not constant, the msvc compiler throws + * an error: + * error C2057: expected constant expression + * In this case the run time assert() must be used. + * + * For C++ code, we use a different technique because the function prototype + * declaration can have function linkage conflicts. If a single compilation + * unit has ct_assert() statements on the same line number in two different + * files, we would have: + * + * compile_time_assertion_failed_in_line_777(...); from xxx.cpp + * compile_time_assertion_failed_in_line_777(...); from xxx.h + * + * That is valid C++. But if either declaration were in an extern "C" block, + * the same function would be declared with two different linkage types and an + * error would ensue. + * + * Instead, ct_assert() for C++ simply declares an array typedef. As in the C + * version, we will get a compilation error if a typedef with a negative size + * is specified. Line numbers are not needed because C++ allows redundant + * typedefs as long as they are all defined the same way. But we tack them on + * anyway in case the typedef name is reported in compiler errors. C does not + * permit redundant typedefs, so this version should not be used in true C + * code. It can be used in extern "C" blocks of C++ code, however. As with + * the C version, MSVC will throw a "negative subscript" or "expected constant + * expression" error if the expression asserted is false or non-constant. + * + * Notes: + * - This ct_assert() does *not* generate any code or variable. + * Therefore there is no need to define it away for RELEASE builds. + * - The integration of the current source file number (__LINE__) ... + * ... would be required in C++ to allow multiple use inside the same + * class/namespace (if we used the C-style expansion), because the id + * must be unique. + * ... is nice to have in C or C++ if the compiler's error message contains + * the id (this is not the case for msvc) + * - Using three nested macros instead of only one is necessary to get the id + * compile_time_assertion_failed_in_line_555 + * instead of + * compile_time_assertion_failed_in_line___LINE__ + */ + +#if defined(__clang__) +# ifndef __has_extension +# define __has_extension __has_feature // Compatibility with Clang pre-3.0 compilers. +# endif +# define CLANG_C_STATIC_ASSERT __has_extension(c_static_assert) +#else +# define CLANG_C_STATIC_ASSERT 0 +#endif + +// Adding this macro to fix MISRA 2012 rule 20.12 +#define NV_CTASSERT_STRINGIFY_MACRO(b) #b + +#if !defined(NVOC) && ((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || CLANG_C_STATIC_ASSERT) + // ISO C11 defines the _Static_assert keyword +# define ct_assert(b) _Static_assert((b), "Compile time assertion failed: " NV_CTASSERT_STRINGIFY_MACRO(b)) +# define ct_assert_i(b,line) _Static_assert((b), "Compile time assertion failed: " NV_CTASSERT_STRINGIFY_MACRO(b)NV_CTASSERT_STRINGIFY_MACRO(line)) +#elif (defined(__cplusplus) && __cplusplus >= 201103L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) + // ISO C++11 defines the static_assert keyword +# define ct_assert(b) static_assert((b), "Compile time assertion failed: " NV_CTASSERT_STRINGIFY_MACRO(b)) +# define ct_assert_i(b,line) static_assert((b), "Compile time assertion failed: " NV_CTASSERT_STRINGIFY_MACRO(b)NV_CTASSERT_STRINGIFY_MACRO(line)) +#else + // For compilers which don't support ISO C11 or C++11, we fall back to an + // array (type) declaration +# define ct_assert(b) ct_assert_i(b,__LINE__) +# define ct_assert_i(b,line) ct_assert_ii(b,line) +# ifdef __cplusplus +# define ct_assert_ii(b,line) typedef char compile_time_assertion_failed_in_line_##line[(b)?1:-1] +# else + /* + * The use of a function prototype "void compile_time_assertion_failed_in_line_##line(..) + * above violates MISRA-C 2012 Rule 8.6 since the rule disallows a function + * declaration without a definition. To fix the MISRA rule, the cplusplus style + * 'typdef char compile_time_assertion_failed_in_line_##line' + * is acceptable, but doesn't work for typical C code since there can be duplicate + * line numbers leading to duplicate typedefs which C doesn't allow. + * + * The following macro uses the predefined macro __COUNTER__ to create unique + * typedefs that fixes the MISRA violations. However, not all C compilers support + * that macro and even for compilers that support it, the underlying code makes + * use of variably modified identifiers in ct_assert that makes the use of this + * unviable. + * + * For now restrict the use of MACRO only on + * i) GCC 4.3.0 and above that supports __COUNTER__ macro + * ii) Specifically the Falcon port of the compiler since the use of variably + * modified identifiers have been removed on those projects + * + * TBD: Enable the macro on MSVC and CLANG pending + */ +# if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= 40300) && defined(GCC_FALCON) +# define ct_assert_ii(b,line) ct_assert_iii(b,line,__COUNTER__) +# define ct_assert_iii(b,line,cntr) ct_assert_cntr(b,line,cntr) +# define ct_assert_cntr(b,line,cntr) typedef char cnt##cntr##_compile_time_assertion_failed_in_line_##line[(b)?1:-1] __attribute__((unused)) +# else +# define ct_assert_ii(b,line) void compile_time_assertion_failed_in_line_##line(int _compile_time_assertion_failed_in_line_##line[(b) ? 1 : -1]) +# endif +# endif +#endif + +#endif // __NV_CTASSERT_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_defs.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_defs.h new file mode 100644 index 0000000..e0b3b41 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_defs.h @@ -0,0 +1,529 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVLOG_DEFS_H_ +#define _NVLOG_DEFS_H_ + +#include "nvtypes.h" +/******************* Common Debug & Trace Defines ***************************\ +* * +* Module: NVLOG_DEFS.H * +* * +\****************************************************************************/ + +#define NVLOG_MAX_DBG_MODULES 256 + +/********************************/ +/********* Structures *********/ +/********************************/ + +// Forward declaration, so it can be used in the function type definition. + +/** + * @brief Struct representing a buffer in NvLog + * + * All logging (Print, Regtrace, etc) use these buffers. + */ +typedef struct _NVLOG_BUFFER NVLOG_BUFFER; + + +/** + * @brief Type of the 'push' function for NvLog buffers + * + * Function called whenever pushing something to an NvLog buffer + */ +typedef NvBool (*NVLOG_BUFFER_PUSHFUNC) (NVLOG_BUFFER *, NvU8 *, NvU32); + + + +/** + * @brief Fields specific to ring buffers + */ +typedef struct _NVLOG_RING_BUFFER_EXTRA_FIELDS +{ + /** How many times the ring buffer has overflown */ + NvU32 overflow; +} NVLOG_RING_BUFFER_EXTRA_FIELDS; + + +/** + * @brief Struct representing a buffer in NvLog + * + * All logging (Print, Regtrace, etc) use these buffers. + */ +struct _NVLOG_BUFFER +{ + /** Function to call when writing to this buffer */ + union + { + NVLOG_BUFFER_PUSHFUNC fn; + + // Pad this union to prevent struct size from varying between 32/64 bit platforms + NvP64 padding; + } push; + + /** Size of the buffer data section */ + NvU32 size; + /** Buffer tag, for easier identification in a dump */ + NvU32 tag; + /** Flags of the buffer, following NVLOG_BUFFER_FLAGS_* DRF's */ + NvU32 flags; + /** Position of the next available byte in the buffer */ + NvU32 pos; + /** Number of threads currently writing to this buffer */ + volatile NvS32 threadCount; + /** Specific buffer types will define their fields here */ + union + { + NVLOG_RING_BUFFER_EXTRA_FIELDS ring; + } extra; + /** Buffer data. */ + NvU8 data[1]; +}; + +#define NVLOG_MAX_BUFFERS_v11 16 +#define NVLOG_MAX_BUFFERS_v12 256 + +#if NVOS_IS_UNIX +#define NVLOG_MAX_BUFFERS NVLOG_MAX_BUFFERS_v12 +#define NVLOG_LOGGER_VERSION 12 // v1.2 +#else +#define NVLOG_MAX_BUFFERS NVLOG_MAX_BUFFERS_v11 +#define NVLOG_LOGGER_VERSION 11 // v1.1 +#endif // NVOS_IS_UNIX + + +// +// Due to this file's peculiar location, NvPort may or may not be includable +// This hack will go away when NvLog is moved into common/shared +// +#if NVOS_IS_MACINTOSH + +#if !PORT_IS_KERNEL_BUILD +typedef struct PORT_SPINLOCK PORT_SPINLOCK; +#else +#include "nvport/nvport.h" +#endif + +#elif !defined(PORT_IS_KERNEL_BUILD) +typedef struct PORT_SPINLOCK PORT_SPINLOCK; +#else +#include "nvport/nvport.h" +#endif + +/** + * @brief Information about the entire NvLog system + */ +typedef struct _NVLOG_LOGGER +{ + /** NvLog logger version */ + NvU32 version; + /** Logging buffers */ + NVLOG_BUFFER * pBuffers[NVLOG_MAX_BUFFERS]; + /** Index of the first unallocated buffer */ + NvU32 nextFree; + /** Total number of free buffer slots */ + NvU32 totalFree; + /** Lock for all buffer oprations */ + PORT_SPINLOCK* mainLock; +} NVLOG_LOGGER; +extern NVLOG_LOGGER NvLogLogger; + +// +// Buffer flags +// + +// Logging to this buffer is disabled +#define NVLOG_BUFFER_FLAGS_DISABLED 0:0 +#define NVLOG_BUFFER_FLAGS_DISABLED_NO 0 +#define NVLOG_BUFFER_FLAGS_DISABLED_YES 1 + +#define NVLOG_BUFFER_FLAGS_TYPE 2:1 +#define NVLOG_BUFFER_FLAGS_TYPE_RING 0 +#define NVLOG_BUFFER_FLAGS_TYPE_NOWRAP 1 +#define NVLOG_BUFFER_FLAGS_TYPE_SYSTEMLOG 2 + +// Expand buffer when full +#define NVLOG_BUFFER_FLAGS_EXPANDABLE 3:3 +#define NVLOG_BUFFER_FLAGS_EXPANDABLE_NO 0 +#define NVLOG_BUFFER_FLAGS_EXPANDABLE_YES 1 + +// Allocate buffer in non paged memory +#define NVLOG_BUFFER_FLAGS_NONPAGED 4:4 +#define NVLOG_BUFFER_FLAGS_NONPAGED_NO 0 +#define NVLOG_BUFFER_FLAGS_NONPAGED_YES 1 + +// +// Type of buffer locking to use +// NONE - No locking performed, for buffers that are inherently single threaded +// STATE - Lock only during state change, do memory copying unlocked +// Don't use with tiny buffers that overflow every write or two. +// FULL - Keep everything locked for the full duration of the write +// +#define NVLOG_BUFFER_FLAGS_LOCKING 6:5 +#define NVLOG_BUFFER_FLAGS_LOCKING_NONE 0 +#define NVLOG_BUFFER_FLAGS_LOCKING_STATE 1 +#define NVLOG_BUFFER_FLAGS_LOCKING_FULL 2 + +// Store this buffer in OCA minidumps +#define NVLOG_BUFFER_FLAGS_OCA 7:7 +#define NVLOG_BUFFER_FLAGS_OCA_NO 0 +#define NVLOG_BUFFER_FLAGS_OCA_YES 1 + +// Buffer format (not included in registry key) +#define NVLOG_BUFFER_FLAGS_FORMAT 10:8 +#define NVLOG_BUFFER_FLAGS_FORMAT_PRINTF 0 +#define NVLOG_BUFFER_FLAGS_FORMAT_LIBOS_LOG 1 +#define NVLOG_BUFFER_FLAGS_FORMAT_MEMTRACK 2 + +// Buffer GPU index +#define NVLOG_BUFFER_FLAGS_GPU_INSTANCE 31:24 + +typedef NvU32 NVLOG_BUFFER_HANDLE; + +// +// Utility macros +// +#define NVLOG_IS_RING_BUFFER(pBuffer) \ + FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _RING, pBuffer->flags) +#define NVLOG_IS_NOWRAP_BUFFER(pBuffer) \ + FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _NOWRAP, pBuffer->flags) + +#define NVLOG_PRINT_BUFFER_SIZE(pBuffer) ((pBuffer)->size) +#define NVLOG_BUFFER_SIZE(pBuffer) \ + (NV_OFFSETOF(NVLOG_BUFFER, data) + NVLOG_PRINT_BUFFER_SIZE(pBuffer)) + +/********************************/ +/********* Filtering **********/ +/********************************/ +// TODO - Remove all this once tools are updated + +#define NVLOG_FILTER_INVALID (~0) + +#define NVLOG_FILTER_VALUE_SIMPLE_NO 0x0 +#define NVLOG_FILTER_VALUE_SIMPLE_YES 0x1 +#define NVLOG_FILTER_VALUE_EXPLICIT_NO 0x2 +#define NVLOG_FILTER_VALUE_EXPLICIT_YES 0x3 + +#define NVLOG_FILTER_PRINT_LEVEL_REGTRACE 1:0 +#define NVLOG_FILTER_PRINT_LEVEL_INFO 3:2 +#define NVLOG_FILTER_PRINT_LEVEL_NOTICE 5:4 +#define NVLOG_FILTER_PRINT_LEVEL_WARNINGS 7:6 +#define NVLOG_FILTER_PRINT_LEVEL_ERRORS 9:8 +#define NVLOG_FILTER_PRINT_LEVEL_HW_ERROR 11:10 +#define NVLOG_FILTER_PRINT_LEVEL_FATAL 13:12 + +#define NVLOG_FILTER_PRINT_BUFFER 18:14 +#define NVLOG_FILTER_REGTRACE_BUFFER 22:19 + +#define NVLOG_FILTER_REGTRACE_LOG_READ 25:23 +#define NVLOG_FILTER_REGTRACE_LOG_WRITE 27:26 +#define NVLOG_FILTER_REGTRACE_BREAK_READ 29:28 +#define NVLOG_FILTER_REGTRACE_BREAK_WRITE 31:30 + +#define NVLOG_FILTER_VALUE_IS_NO(val) ((val & 0x1) == 0) +#define NVLOG_FILTER_VALUE_IS_YES(val) (val & 0x1) +#define NVLOG_FILTER_PRINT_GET_VALUE(level, num) ((num >> (level*2)) & 0x3) + +/** + * @brief Type representing a value of a given 16bit range. + */ +typedef struct _NVLOG_RANGE_16 +{ + NvU16 low; + NvU16 high; + NvU32 value; +} NVLOG_RANGE_16; + + +/** + * @brief Type representing a value of a given 32bit range. + */ +typedef struct _NVLOG_RANGE_32 +{ + NvU32 low; + NvU32 high; + NvU32 value; +} NVLOG_RANGE_32; + +// +// Maximum number of files that have a filter assigned to them. +// +#define NVLOG_MAX_FILES 1 +// +// Maximum number of line rules (both single line and range) allowed per file +// +#define NVLOG_FILELINE_FILTER_MAX_RANGES 1 + +/** + * @brief Internal type for NVLOG_FILELINE_FILTER. + * + * Contains filtering info for a single file. + */ +typedef struct _NVLOG_FILELINE_FILTER_FILEHASH +{ + /** ID of the file (24bit MD5) */ + NvU32 fileId; + /** Number of elements in the array 'ranges' */ + NvU32 numElems; + /** Value to use if the given value isn't found in the range array */ + NvU32 defaultValue; + /** Array of ranges representing lines in the file */ + NVLOG_RANGE_16 ranges[NVLOG_FILELINE_FILTER_MAX_RANGES]; +} NVLOG_FILELINE_FILTER_FILEHASH; + +/** + * @brief Filter that contains rules that depend on the file and line number. + */ +typedef struct _NVLOG_FILELINE_FILTER +{ + /** Number of elements in the fileHash array */ + NvU32 numFiles; + /** Value to use if a given file isn't found */ + NvU32 defaultValue; + /** Array of file entries, ordered as a hash table */ + NVLOG_FILELINE_FILTER_FILEHASH fileHash[NVLOG_MAX_FILES]; +} NVLOG_FILELINE_FILTER; + +/********************************/ +/********* Print Logger *********/ +/********************************/ + +#define NVLOG_PRINT_LOGGER_VERSION 11 // v1.1 +// Max buffers cannot be over 32. +#define NVLOG_PRINT_MAX_BUFFERS 8 + +#define NVLOG_PRINT_BUFFER_PRIMARY 1 +#define NVLOG_PRINT_BUFFER_SECONDARY 2 +#define NVLOG_PRINT_BUFFER_SYSTEMLOG 3 + +#define NVLOG_PRINT_DESC1_FILEID 23:0 +#define NVLOG_PRINT_DESC1_GPUID 28:24 // 2^5 = 32 possible +#define NVLOG_PRINT_DESC1_MAGIC 31:29 +#define NVLOG_PRINT_DESC1_MAGIC_VALUE 5 + +#define NVLOG_PRINT_DESC2_LINEID 15:0 +#define NVLOG_PRINT_DESC2_GROUPID 17:16 +#define NVLOG_PRINT_DESC2_GROUPID_RM 0 +#define NVLOG_PRINT_DESC2_GROUPID_PMU 1 +#define NVLOG_PRINT_DESC2_OPT_DATA_COUNT 24:18 // number of dwords +#define NVLOG_PRINT_DESC2_OPT_DATA_COUNT_MAX 0x7F +#define NVLOG_PRINT_DESC2_RESERVED 28:25 +#define NVLOG_PRINT_DESC2_MAGIC 31:29 +#define NVLOG_PRINT_DESC2_MAGIC_VALUE 6 + +#define NVLOG_UNKNOWN_GPU_INSTANCE 0x1f + +#define NVLOG_PRINT_MODULE_FILTER_VALUE 1:0 +#define NVLOG_PRINT_MODULE_FILTER_BUFFER 6:2 +#define NVLOG_PRINT_MODULE_FILTER_ENABLED 7:7 + +// +// Regkey fields - These are copied directly from nvRmReg.h +// A copy is necessary as these might be needed on systems that don't +// have nvRmReg.h, such as DVS builds for NvWatch +// +#ifndef NV_REG_STR_RM_NVLOG +#define NV_REG_STR_RM_NVLOG "RMNvLog" +#define NV_REG_STR_RM_NVLOG_BUFFER_FLAGS 7:0 +#define NV_REG_STR_RM_NVLOG_BUFFER_SIZE 23:8 +#define NV_REG_STR_RM_NVLOG_BUFFER_SIZE_DEFAULT ((NVOS_IS_WINDOWS||NVOS_IS_MACINTOSH)?8:250) +#define NV_REG_STR_RM_NVLOG_BUFFER_SIZE_DISABLE 0 +#define NV_REG_STR_RM_NVLOG_RUNTIME_LEVEL 28:25 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP 30:29 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP_NONE 0 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP_32 1 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP_64 2 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP_32_DIFF 3 +#define NV_REG_STR_RM_NVLOG_INITED 31:31 +#define NV_REG_STR_RM_NVLOG_INITED_NO 0 +#define NV_REG_STR_RM_NVLOG_INITED_YES 1 +#endif // NV_REG_STR_RM_NVLOG + + +// +// Arg types: +// 0: Special meaning. End of argument list. +// 1: d, u, x, X, i, o - Integer type +// 2: lld, llu, llx, llX, lli, llo - Long long integer type +// 3: s - string type (size is 0) +// 4: p - pointer type +// 5: c - char type +// 6: f, g, e, F, G, E - floating point type +// 7-14: Unused at the moment, default value is 0 +// 15: Special meaning. Error value - unsupported type. +// +#define NVLOG_PRINT_MAX_ARG_TYPES 0x10 +#define NVLOG_PRINT_ARG_TYPE_ARGLIST_END 0x0 +#define NVLOG_PRINT_ARG_TYPE_INT 0x1 +#define NVLOG_PRINT_ARG_TYPE_LONGLONG 0x2 +#define NVLOG_PRINT_ARG_TYPE_STRING 0x3 +#define NVLOG_PRINT_ARG_TYPE_POINTER 0x4 +#define NVLOG_PRINT_ARG_TYPE_CHAR 0x5 +#define NVLOG_PRINT_ARG_TYPE_FLOAT 0x6 +#define NVLOG_PRINT_ARG_TYPE_ERROR 0xf + + +/** + * @brief Signature of the database required to decode the print logs + * + * The sig1-sig3 values are generated randomly at compile time. + */ +typedef struct _NVLOG_DB_SIGNATURE +{ + NvU32 timestamp; + NvU32 sig1; + NvU32 sig2; + NvU32 sig3; +} NVLOG_DB_SIGNATURE; + +/** + * @brief Filter that contains all rules used to filter DBG_PRINTF calls + */ +typedef struct _NVLOG_PRINT_FILTER +{ + /** Same file:line filter is shared with the Regtrace system */ + NVLOG_FILELINE_FILTER *pFileLineFilter; + /** Filter based on debug levels. Uses NVLOG_FILTER_PRINT_LEVEL_* DRF's */ + NvU32 runtimePrintLevelFilter; + /** Filter based on debug modules. Uses NVLOG_PRINT_MODULE_FILTER_* DRF's */ + NvU8 runtimePrintModuleFilter[NVLOG_MAX_DBG_MODULES]; +} NVLOG_PRINT_FILTER; + + +/** + * @brief Enum representing all possible argument types to DBG_PRINTF + */ +typedef enum _NVLOG_ARGTYPE +{ + NVLOG_ARGTYPE_NONE, + NVLOG_ARGTYPE_INT, + NVLOG_ARGTYPE_LONG_LONG_INT, + NVLOG_ARGTYPE_STRING, + NVLOG_ARGTYPE_POINTER, + NVLOG_ARGTYPE_FLOAT, + NVLOG_ARGTYPE__COUNT +} NVLOG_ARGTYPE; + +/** + * @brief General info about the NvLog Print system + */ +typedef struct _NVLOG_PRINT_LOGGER +{ + /** NvLog print logger version */ + NvU32 version; + /** Runtime argument sizes (16 different arglist values) */ + NvU8 runtimeSizes[NVLOG_PRINT_MAX_ARG_TYPES]; + /** Database signature for decoding */ + NVLOG_DB_SIGNATURE signature; + /** Filter buffer for print statements */ + NVLOG_PRINT_FILTER filter; + /** Flags for all NvLog print buffers */ + NvU32 flags; + /** Buffer indices for all nvlog buffers. buffers[1] is default. */ + NvU32 buffers[NVLOG_PRINT_MAX_BUFFERS]; + /** Initialized flag, set to true after nvlogPrintInit has executed */ + NvBool initialized; + /** Paused flag, set to true after nvlogPrintInit has executed */ + NvBool paused; +} NVLOG_PRINT_LOGGER; +extern NVLOG_PRINT_LOGGER NvLogPrintLogger; + +#define NVLOG_PRINT_BUFFER_TAG(_i) NvU32_BUILD('t','r','p','0' + (_i)) + +/********************************/ +/********** Regtrace **********/ +/********************************/ + +#define NVLOG_REGTRACE_LOGGER_VERSION 10 // v1.0 +#define NVLOG_REGTRACE_MAX_BUFFERS 4 + +#define NVLOG_REGTRACE_READ 0 +#define NVLOG_REGTRACE_WRITE 1 + +#define NVLOG_REGTRACE_DESC1_FILEID NVLOG_PRINT_DESC1_FILEID +#define NVLOG_REGTRACE_DESC1_GPUID NVLOG_PRINT_DESC1_GPUID +#define NVLOG_REGTRACE_DESC1_MAGIC NVLOG_PRINT_DESC1_MAGIC +#define NVLOG_REGTRACE_DESC1_MAGIC_VALUE (NVLOG_PRINT_DESC1_MAGIC_VALUE-1) + +#define NVLOG_REGTRACE_DESC2_LINEID 15:0 +#define NVLOG_REGTRACE_DESC2_READWRITE 16:16 +#define NVLOG_REGTRACE_DESC2_READWRITE_READ NVLOG_REGTRACE_READ +#define NVLOG_REGTRACE_DESC2_READWRITE_WRITE NVLOG_REGTRACE_WRITE +#define NVLOG_REGTRACE_DESC2_REGSIZE 18:17 +#define NVLOG_REGTRACE_DESC2_REGSIZE_8 0 +#define NVLOG_REGTRACE_DESC2_REGSIZE_16 1 +#define NVLOG_REGTRACE_DESC2_REGSIZE_32 2 +#define NVLOG_REGTRACE_DESC2_REGSIZE_64 3 +#define NVLOG_REGTRACE_DESC2_THREADID 28:19 +#define NVLOG_REGTRACE_DESC2_MAGIC 31:29 +#define NVLOG_REGTRACE_DESC2_MAGIC_VALUE 3 + +/** + * @brief Single entry in an NvLog Regtrace buffer. + */ +typedef struct _NVLOG_REGTRACE_RECORD +{ + /** Uses NVLOG_REGTRACE_DESC1_* DRF's */ + NvU32 desc1; + /** Uses NVLOG_REGTRACE_DESC1_* DRF's */ + NvU32 desc2; + /** Address of the register being accessed */ + NvU32 address; + /** Value that was read/written */ + NvU32 value; +} NVLOG_REGTRACE_RECORD; + + + +#define NVLOG_REGTRACE_FILTER_MAX_RANGES 256 + +// Regtrace shares the file:line filter with print + + +/** + * @brief Filter that contains all rules used to filter register access logging + */ +typedef struct _NVLOG_REGTRACE_FILTER +{ + /** Number of elements in the 'ranges' array */ + NvU32 numRanges; + /** File:line based filter. Shared with NvLog print system */ + NVLOG_FILELINE_FILTER *pFileLineFilter; + /** Range array for filtering based on register addresses */ + NVLOG_RANGE_32 ranges[NVLOG_REGTRACE_FILTER_MAX_RANGES]; +} NVLOG_REGTRACE_FILTER; + +/** + * @brief General info about the NvLog Regtrace system + */ +typedef struct _NVLOG_REGTRACE_LOGGER +{ + /** NvLog regtrace logger version */ + NvU32 version; + /** Filter buffer for regtrace statements */ + NVLOG_REGTRACE_FILTER filter; + /** Buffer indices for all NvLog buffers. First element is default buffer */ + NvU32 buffers[NVLOG_REGTRACE_MAX_BUFFERS]; +} NVLOG_REGTRACE_LOGGER; + +#endif // _NVLOG_DEFS_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc.h new file mode 100644 index 0000000..c40c64f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2016 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +// +// This file must not have include guards, it is supposed to be included +// multiple times - Once in a precompiled header, once through noprecomp.h +// + +// WAR for a GCC precompiled headers problem +#if !defined(NV_RM_PRECOMPILED_HEADER) +#include "nvlog_inc2.h" + +// +// If noprecomp is not included, this will not expand and will result in an +// undefined identifier. Hopefully, the meaningful name will hint at the +// underlying problem. +// +#define ___please_include_noprecomp_h___ + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc2.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc2.h new file mode 100644 index 0000000..7f10150 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc2.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013,2016-2017,2020-2020 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVLOG_INC2_H_ +#define _NVLOG_INC2_H_ +// +// Include the auto-generated g_$(filename)-nvlog.h header. The file contains +// information about the trace statements that was pulled out by the NvLog preprocessor. +// NVLOG_INCLUDE is defined by make at compile time, for every source file. +// +// The four lines of macros is some trickiness needed to make it work. +// +#if (defined(NVLOG_ENABLED) || defined(NV_MODS)) && defined(NVLOG_INCLUDE) && !defined(NVLOG_PARSING) +#if NVLOG_ENABLED || defined(NV_MODS) + +#ifndef NVLOG_FILEID // Acts as an include guard +#define NVLOG_INCLUDE3(a) #a +#define NVLOG_INCLUDE2(a) NVLOG_INCLUDE3 a +#define NVLOG_INCLUDE1 NVLOG_INCLUDE2((NVLOG_INCLUDE)) +#include NVLOG_INCLUDE1 +#endif // NVLOG_FILEID + +#endif // NVLOG_ENABLED +#endif // defined(NVLOG_ENABLED) && defined(NVLOG_INCLUDE) + + +#endif // _NVLOG_INC2_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/rmosxfac.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/rmosxfac.h new file mode 100644 index 0000000..5ebfdae --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/rmosxfac.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2003 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RMOSXFAC_H_ +#define _RMOSXFAC_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: RMOSXFAC.H * +* Declarations for common OS interface functions. * +* * +\***************************************************************************/ + +#ifdef __cplusplus +extern "C" { +#endif +extern NvS32 RmInitRm(void); +extern NvS32 RmDestroyRm(void); + +#ifdef __cplusplus +} +#endif + +#endif // _RMOSXFAC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v03_00/dev_disp.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v03_00/dev_disp.h new file mode 100644 index 0000000..138efcb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v03_00/dev_disp.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __v03_00_dev_disp_h__ +#define __v03_00_dev_disp_h__ +#define NV_PDISP_CHN_NUM_CORE 0 /* */ +#define NV_PDISP_CHN_NUM_WIN(i) (1+(i)) /* */ +#define NV_PDISP_CHN_NUM_WIN__SIZE_1 32 /* */ +#define NV_PDISP_CHN_NUM_WINIM(i) (33+(i)) /* */ +#define NV_PDISP_CHN_NUM_WINIM__SIZE_1 32 /* */ +#define NV_PDISP_CHN_NUM_CURS(i) (73+(i)) /* */ +#define NV_PDISP_CHN_NUM_CURS__SIZE_1 8 /* */ +#define NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* R--VF */ +#define NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* R---V */ +#define NV_PDISP_FE_SW 0x00640FFF:0x00640000 /* RW--D */ +#define NV_PDISP_SF_USER_0 0x006F03FF:0x006F0000 /* RW--D */ +#define NV_UDISP_HASH_BASE 0x00000000 /* */ +#define NV_UDISP_HASH_LIMIT 0x00001FFF /* */ +#define NV_UDISP_OBJ_MEM_BASE 0x00002000 /* */ +#define NV_UDISP_OBJ_MEM_LIMIT 0x0000FFFF /* */ +#define NV_UDISP_HASH_TBL_CLIENT_ID (1*32+13):(1*32+0) /* RWXVF */ +#define NV_UDISP_HASH_TBL_INSTANCE (1*32+24):(1*32+14) /* RWXUF */ +#define NV_UDISP_HASH_TBL_CHN (1*32+31):(1*32+25) /* RWXUF */ +#define NV_DMA_TARGET_NODE (0*32+1):(0*32+0) /* RWXVF */ +#define NV_DMA_TARGET_NODE_PHYSICAL_NVM 0x00000001 /* RW--V */ +#define NV_DMA_TARGET_NODE_PHYSICAL_PCI 0x00000002 /* RW--V */ +#define NV_DMA_TARGET_NODE_PHYSICAL_PCI_COHERENT 0x00000003 /* RW--V */ +#define NV_DMA_ACCESS (0*32+2):(0*32+2) /* RWXVF */ +#define NV_DMA_ACCESS_READ_ONLY 0x00000000 /* RW--V */ +#define NV_DMA_ACCESS_READ_AND_WRITE 0x00000001 /* RW--V */ +#define NV_DMA_KIND (0*32+20):(0*32+20) /* RWXVF */ +#define NV_DMA_KIND_PITCH 0x00000000 /* RW--V */ +#define NV_DMA_KIND_BLOCKLINEAR 0x00000001 /* RW--V */ +#define NV_DMA_ADDRESS_BASE_LO (1*32+31):(1*32+0) /* RWXUF */ +#define NV_DMA_ADDRESS_BASE_HI (2*32+6):(2*32+0) /* RWXUF */ +#define NV_DMA_ADDRESS_LIMIT_LO (3*32+31):(3*32+0) /* RWXUF */ +#define NV_DMA_ADDRESS_LIMIT_HI (4*32+6):(4*32+0) /* RWXUF */ +#define NV_DMA_SIZE 20 /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR_CORE 0x00680000 /* */ +#define NV_UDISP_FE_CHN_ARMED_BASEADR_CORE (0x00680000+32768) /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR_WIN(i) ((0x00690000+(i)*4096)) /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR_WINIM(i) ((0x00690000+((i+32)*4096))) /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR_CURS(i) (0x006D8000+(i)*4096) /* RW-4A */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR(i) ((i)>0?(((0x00690000+(i-1)*4096))):0x00680000) /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR__SIZE_1 81 /* */ +#endif // __v03_00_dev_disp_h__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v04_02/dev_disp.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v04_02/dev_disp.h new file mode 100644 index 0000000..1651e22 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v04_02/dev_disp.h @@ -0,0 +1,27 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __v04_02_dev_disp_h__ +#define __v04_02_dev_disp_h__ +#define NV_PDISP 0x006F1FFF:0x00610000 /* RW--D */ +#endif // __v04_02_dev_disp_h__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_arch.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_arch.h new file mode 100644 index 0000000..e4ebcd5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_arch.h @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_ARCH_PUBLISHED_H +#define NV_ARCH_PUBLISHED_H + +// high byte indicates GPU-SERIES, as defined in Gpus.pm. +#define NVGPU_ARCHITECTURE_SERIES 31:24 +#define NVGPU_ARCHITECTURE_SERIES_CLASSIC 0x00 +#define NVGPU_ARCHITECTURE_SERIES_SIMULATION 0x00 // XXX - really should be distinct from CLASSIC_GPUS +#define NVGPU_ARCHITECTURE_SERIES_TEGRA 0xE0 +#define NVGPU_ARCHITECTURE_ARCH 23:0 + +#define GPU_ARCHITECTURE(series, arch) (DRF_DEF(GPU, _ARCHITECTURE, _SERIES, series) | \ + DRF_NUM(GPU, _ARCHITECTURE, _ARCH, arch)) + +// +// Architecture constants. +// +#define GPU_ARCHITECTURE_MAXWELL GPU_ARCHITECTURE(_CLASSIC, 0x0110) +#define GPU_ARCHITECTURE_MAXWELL2 GPU_ARCHITECTURE(_CLASSIC, 0x0120) +#define GPU_ARCHITECTURE_PASCAL GPU_ARCHITECTURE(_CLASSIC, 0x0130) +#define GPU_ARCHITECTURE_VOLTA GPU_ARCHITECTURE(_CLASSIC, 0x0140) +#define GPU_ARCHITECTURE_VOLTA2 GPU_ARCHITECTURE(_CLASSIC, 0x0150) +#define GPU_ARCHITECTURE_TURING GPU_ARCHITECTURE(_CLASSIC, 0x0160) +#define GPU_ARCHITECTURE_AMPERE GPU_ARCHITECTURE(_CLASSIC, 0x0170) +#define GPU_ARCHITECTURE_HOPPER GPU_ARCHITECTURE(_CLASSIC, 0x0180) +#define GPU_ARCHITECTURE_ADA GPU_ARCHITECTURE(_CLASSIC, 0x0190) + +#define GPU_ARCHITECTURE_T12X GPU_ARCHITECTURE(_TEGRA, 0x0040) +#define GPU_ARCHITECTURE_T13X GPU_ARCHITECTURE(_TEGRA, 0x0013) +#define GPU_ARCHITECTURE_T21X GPU_ARCHITECTURE(_TEGRA, 0x0021) +#define GPU_ARCHITECTURE_T18X GPU_ARCHITECTURE(_TEGRA, 0x0018) +#define GPU_ARCHITECTURE_T19X GPU_ARCHITECTURE(_TEGRA, 0x0019) +#define GPU_ARCHITECTURE_T23X GPU_ARCHITECTURE(_TEGRA, 0x0023) + +#define GPU_ARCHITECTURE_SIMS GPU_ARCHITECTURE(_SIMULATION, 0x01f0) // eg: AMODEL + +// +// Implementation constants. +// These must be unique within a single architecture. +// + +#define GPU_IMPLEMENTATION_GM108 0x08 +#define GPU_IMPLEMENTATION_GM107 0x07 +#define GPU_IMPLEMENTATION_GM200 0x00 +#define GPU_IMPLEMENTATION_GM204 0x04 +#define GPU_IMPLEMENTATION_GM206 0x06 + +#define GPU_IMPLEMENTATION_GP100 0x00 +#define GPU_IMPLEMENTATION_GP102 0x02 +#define GPU_IMPLEMENTATION_GP104 0x04 +#define GPU_IMPLEMENTATION_GP106 0x06 +#define GPU_IMPLEMENTATION_GP107 0x07 +#define GPU_IMPLEMENTATION_GP108 0x08 + +#define GPU_IMPLEMENTATION_GV100 0x00 +#define GPU_IMPLEMENTATION_GV11B 0x0B + +#define GPU_IMPLEMENTATION_TU102 0x02 +#define GPU_IMPLEMENTATION_TU104 0x04 +#define GPU_IMPLEMENTATION_TU106 0x06 +#define GPU_IMPLEMENTATION_TU116 0x08 // TU116 has implementation ID 8 in HW +#define GPU_IMPLEMENTATION_TU117 0x07 + +#define GPU_IMPLEMENTATION_GA100 0x00 +#define GPU_IMPLEMENTATION_GA102 0x02 +#define GPU_IMPLEMENTATION_GA103 0x03 +#define GPU_IMPLEMENTATION_GA104 0x04 +#define GPU_IMPLEMENTATION_GA106 0x06 +#define GPU_IMPLEMENTATION_GA107 0x07 +#define GPU_IMPLEMENTATION_GA102F 0x0F + +#define GPU_IMPLEMENTATION_GH100 0x00 + +#define GPU_IMPLEMENTATION_AD102 0x02 +#define GPU_IMPLEMENTATION_AD103 0x03 +#define GPU_IMPLEMENTATION_AD104 0x04 +#define GPU_IMPLEMENTATION_AD106 0x06 +#define GPU_IMPLEMENTATION_AD107 0x07 + +#define GPU_IMPLEMENTATION_T124 0x00 +#define GPU_IMPLEMENTATION_T132 0x00 +#define GPU_IMPLEMENTATION_T210 0x00 +#define GPU_IMPLEMENTATION_T186 0x00 +#define GPU_IMPLEMENTATION_T194 0x00 +#define GPU_IMPLEMENTATION_T232 0x02 +#define GPU_IMPLEMENTATION_T234 0x04 +#define GPU_IMPLEMENTATION_T234D 0x05 + +/* SIMS gpus */ +#define GPU_IMPLEMENTATION_AMODEL 0x00 + +#endif // NV_ARCH_PUBLISHED_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_ref.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_ref.h new file mode 100644 index 0000000..cf05bad --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_ref.h @@ -0,0 +1,154 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +/***************************************************************************\ +* * +* Hardware Reference Manual extracted defines. * +* - Defines in this file are approved by the HW team for publishing. * +* * +\***************************************************************************/ +#ifndef NV_REF_PUBLISHED_H +#define NV_REF_PUBLISHED_H + + + +// +// These registers can be accessed by chip-independent code as +// well as chip-dependent code. +// +// NOTE: DO NOT ADD TO THIS FILE. CREATE CHIP SPECIFIC HAL ROUTINES INSTEAD. +// + +/* + * Standard PCI config space header defines. + * The defines here cannot change across generations. + */ + +/* dev_nv_xve.ref */ +/* PBUS field defines converted to NV_CONFIG field defines */ +#define NV_CONFIG_PCI_NV_0 0x00000000 /* R--4R */ +#define NV_CONFIG_PCI_NV_0_VENDOR_ID 15:0 /* C--UF */ +#define NV_CONFIG_PCI_NV_0_VENDOR_ID_NVIDIA 0x000010DE /* C---V */ +#define NV_CONFIG_PCI_NV_0_DEVICE_ID 31:16 /* R--UF */ +#define NV_CONFIG_PCI_NV_1 0x00000004 /* RW-4R */ +#define NV_CONFIG_PCI_NV_1_IO_SPACE 0:0 /* RWIVF */ +#define NV_CONFIG_PCI_NV_1_IO_SPACE_DISABLED 0x00000000 /* RWI-V */ +#define NV_CONFIG_PCI_NV_1_IO_SPACE_ENABLED 0x00000001 /* RW--V */ +#define NV_CONFIG_PCI_NV_1_MEMORY_SPACE 1:1 /* RWIVF */ +#define NV_CONFIG_PCI_NV_1_MEMORY_SPACE_DISABLED 0x00000000 /* RWI-V */ +#define NV_CONFIG_PCI_NV_1_MEMORY_SPACE_ENABLED 0x00000001 /* RW--V */ +#define NV_CONFIG_PCI_NV_1_BUS_MASTER 2:2 /* RWIVF */ +#define NV_CONFIG_PCI_NV_1_BUS_MASTER_DISABLED 0x00000000 /* RWI-V */ +#define NV_CONFIG_PCI_NV_1_BUS_MASTER_ENABLED 0x00000001 /* RW--V */ +#define NV_CONFIG_PCI_NV_2 0x00000008 /* R--4R */ +#define NV_CONFIG_PCI_NV_2_REVISION_ID 7:0 /* C--UF */ +#define NV_CONFIG_PCI_NV_2_CLASS_CODE 31:8 /* C--VF */ +#define NV_CONFIG_PCI_NV_3 0x0000000C /* RW-4R */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER 15:11 /* RWIUF */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER_0_CLOCKS 0x00000000 /* RWI-V */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER_8_CLOCKS 0x00000001 /* RW--V */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER_240_CLOCKS 0x0000001E /* RW--V */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER_248_CLOCKS 0x0000001F /* RW--V */ +#define NV_CONFIG_PCI_NV_4 0x00000010 /* RW-4R */ +#define NV_CONFIG_PCI_NV_5 0x00000014 /* RW-4R */ +#define NV_CONFIG_PCI_NV_5_ADDRESS_TYPE 2:1 /* C--VF */ +#define NV_CONFIG_PCI_NV_5_ADDRESS_TYPE_64_BIT 0x00000002 /* ----V */ +#define NV_CONFIG_PCI_NV_6 0x00000018 /* RW-4R */ +#define NV_CONFIG_PCI_NV_7(i) (0x0000001C+(i)*4) /* R--4A */ +#define NV_CONFIG_PCI_NV_11 0x0000002C /* R--4R */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_VENDOR_ID 15:0 /* R--UF */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_VENDOR_ID_NONE 0x00000000 /* R---V */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_ID 31:16 /* R--UF */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_ID_NONE 0x00000000 /* R---V */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_ID_TNT2PRO 0x0000001f +#define NV_CONFIG_PCI_NV_12 0x00000030 /* RW-4R */ +#define NV_CONFIG_PCI_NV_13 0x00000034 /* RW-4R */ +#define NV_CONFIG_PCI_NV_13_CAP_PTR 7:0 /* C--VF */ +#define NV_CONFIG_PCI_NV_14 0x00000038 /* R--4R */ +#define NV_CONFIG_PCI_NV_15 0x0000003C /* RW-4R */ +#define NV_CONFIG_PCI_NV_15_INTR_LINE 7:0 /* RWIVF */ +/* + * These defines are the correct fields to be used to extract the + * NEXT_PTR and CAP_ID from any PCI capability structure, + * but they still have NV_24 in the name because they were from the + * first PCI capability structure in the capability list in older GPUs. + */ +#define NV_CONFIG_PCI_NV_24_NEXT_PTR 15:8 /* R--VF */ +#define NV_CONFIG_PCI_NV_24_CAP_ID 7:0 /* C--VF */ + +/* + * Standard registers present on NVIDIA chips used to ID the chip. + * Very stable across generations. + */ + +/* dev_master.ref */ +#define NV_PMC_BOOT_0 0x00000000 /* R--4R */ +#define NV_PMC_BOOT_0_MINOR_REVISION 3:0 /* R--VF */ +#define NV_PMC_BOOT_0_MAJOR_REVISION 7:4 /* R--VF */ +#define NV_PMC_BOOT_0_IMPLEMENTATION 23:20 /* R--VF */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_0 0x00000000 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_1 0x00000001 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_2 0x00000002 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_3 0x00000003 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_4 0x00000004 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_5 0x00000005 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_6 0x00000006 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_7 0x00000007 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_8 0x00000008 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_9 0x00000009 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_A 0x0000000A /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_B 0x0000000B /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_C 0x0000000C /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_D 0x0000000D /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_E 0x0000000E /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_F 0x0000000F /* R---V */ +#define NV_PMC_BOOT_0_ARCHITECTURE 28:24 /* R--VF */ +#define NV_PMC_BOOT_0_ARCHITECTURE_TU100 0x00000016 /* R---V */ +#define NV_PMC_BOOT_0_ARCHITECTURE_TU110 0x00000016 /* R---V */ +#define NV_PMC_BOOT_0_ARCHITECTURE_GA100 0x00000017 /* R---V */ + +#define NV_PMC_BOOT_1 0x00000004 /* R--4R */ +#define NV_PMC_BOOT_1_VGPU8 8:8 /* R--VF */ +#define NV_PMC_BOOT_1_VGPU8_REAL 0x00000000 /* R-I-V */ +#define NV_PMC_BOOT_1_VGPU8_VIRTUAL 0x00000001 /* R---V */ +#define NV_PMC_BOOT_1_VGPU16 16:16 /* R--VF */ +#define NV_PMC_BOOT_1_VGPU16_REAL 0x00000000 /* R-I-V */ +#define NV_PMC_BOOT_1_VGPU16_VIRTUAL 0x00000001 /* R---V */ +#define NV_PMC_BOOT_1_VGPU 17:16 /* C--VF */ +#define NV_PMC_BOOT_1_VGPU_REAL 0x00000000 /* C---V */ +#define NV_PMC_BOOT_1_VGPU_PV 0x00000001 /* ----V */ +#define NV_PMC_BOOT_1_VGPU_VF 0x00000002 /* ----V */ +#define NV_PMC_BOOT_42 0x00000A00 /* R--4R */ +#define NV_PMC_BOOT_42_MINOR_EXTENDED_REVISION 11:8 /* R-XVF */ +#define NV_PMC_BOOT_42_MINOR_REVISION 15:12 /* R-XVF */ +#define NV_PMC_BOOT_42_MAJOR_REVISION 19:16 /* R-XVF */ +#define NV_PMC_BOOT_42_IMPLEMENTATION 23:20 /* */ +#define NV_PMC_BOOT_42_ARCHITECTURE 28:24 /* */ +#define NV_PMC_BOOT_42_CHIP_ID 28:20 /* R-XVF */ + +/* dev_arapb_misc.h */ +#define NV_PAPB_MISC_GP_HIDREV_CHIPID 15:8 /* ----F */ +#define NV_PAPB_MISC_GP_HIDREV_MAJORREV 7:4 /* ----F */ + +#endif // NV_REF_PUBLISHED_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/dev_mmu.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/dev_mmu.h new file mode 100644 index 0000000..0134b3d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/dev_mmu.h @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_mmu_h__ +#define __tu102_dev_mmu_h__ +#define NV_MMU_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PDE__SIZE 8 +#define NV_MMU_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PTE_LOCK (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_LOCK_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_LOCK_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_COMPTAGLINE (1*32+20+11):(1*32+12) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE (1*32+30):(1*32+30) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE (1*32+31):(1*32+31) /* RWXVF */ +#define NV_MMU_PTE_WRITE_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PTE__SIZE 8 +#define NV_MMU_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_PTE_KIND_INVALID 0x07 /* R---V */ +#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY 0x06 /* R---V */ +#define NV_MMU_PTE_KIND_Z16 0x01 /* R---V */ +#define NV_MMU_PTE_KIND_S8 0x02 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24 0x03 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8 0x04 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8 0x05 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE 0x08 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC 0x09 /* R---V */ +#define NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC 0x0A /* R---V */ +#define NV_MMU_PTE_KIND_Z16_COMPRESSIBLE_DISABLE_PLC 0x0B /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_COMPRESSIBLE_DISABLE_PLC 0x0C /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_COMPRESSIBLE_DISABLE_PLC 0x0D /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC 0x0E /* R---V */ +#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0x0F /* R---V */ +#define NV_MMU_CLIENT_KIND_Z16 0x1 /* R---V */ +#define NV_MMU_CLIENT_KIND_Z24S8 0x5 /* R---V */ +#define NV_MMU_CLIENT_KIND_INVALID 0x7 /* R---V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_VER2_PTE_COMPTAGLINE (20+35):36 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#endif // __tu102_dev_mmu_h__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/kind_macros.h b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/kind_macros.h new file mode 100644 index 0000000..4243730 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/kind_macros.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define _kind_macros_orig_H_ + +#define KIND_INVALID(k) ( ((k) ==NV_MMU_CLIENT_KIND_INVALID)) +#define PTEKIND_COMPRESSIBLE(k) ( ((k) >=NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE && (k) <= NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC)) +#define PTEKIND_DISALLOWS_PLC(k) ( !((k) ==NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE)) +#define PTEKIND_SUPPORTED(k) ( ((k) ==NV_MMU_PTE_KIND_INVALID)|| ((k) ==NV_MMU_PTE_KIND_PITCH)|| ((k) ==NV_MMU_PTE_KIND_GENERIC_MEMORY)|| ((k) >=NV_MMU_PTE_KIND_Z16 && (k) <= NV_MMU_PTE_KIND_Z24S8)|| ((k) >=NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE && (k) <= NV_MMU_PTE_KIND_SMSKED_MESSAGE)) +#define KIND_Z(k) ( ((k) >=NV_MMU_CLIENT_KIND_Z16 && (k) <= NV_MMU_CLIENT_KIND_Z24S8)) + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h new file mode 100644 index 0000000..ed6bba9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h @@ -0,0 +1,268 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** @file nvhdmi_frlInterface.h + * @brief This file provides FRL related interfaces between client and HDMI lib + */ + + +#ifndef _NVHDMI_FRLINTERFACE_H_ +#define _NVHDMI_FRLINTERFACE_H_ + +#include "nvhdmipkt.h" +#include "nvHdmiFrlCommon.h" + +#include "../timing/nvtiming.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +// DSC encoder color format bitmasks (these match DSC lib & RM ctrl 0073 fields) +typedef enum tagHDMI_DSC_ENCODER_COLOR_FORMAT +{ + HDMI_DSC_ENCODER_COLOR_FORMAT_RGB = 1, + HDMI_DSC_ENCODER_COLOR_FORMAT_YCBCR444 = 2, + HDMI_DSC_ENCODER_COLOR_FORMAT_YCBCRNATIVE422 = 4, + HDMI_DSC_ENCODER_COLOR_FORMAT_YCBCRNATIVE420 = 8 +} HDMI_DSC_ENCODER_COLOR_FORMAT; + +// Options for QueryFRLConfig interface +typedef enum tagHDMI_QUERY_FRL_OPTION +{ + HDMI_QUERY_FRL_ANY_CONFIG = 0, // any FRL config that supports mode + HDMI_QUERY_FRL_OPTIMUM_CONFIG, // find best fit config for this mode + HDMI_QUERY_FRL_LOWEST_BANDWIDTH, // min bw + HDMI_QUERY_FRL_HIGHEST_PIXEL_QUALITY, // trade off bandwidth for pixel quality + HDMI_QUERY_FRL_HIGHEST_BANDWIDTH +} HDMI_QUERY_FRL_OPTION; + +/************************************************************************************************* +* HDMI_VIDEO_TRANSPORT_INFO: * +* Video transport format - a combination of timing, bpc, packing represents what goes on the link* +* client passes this in, lib uses this for bandwidth calculations to decide required FRL rate * +**************************************************************************************************/ +typedef struct tagHDMI_VIDEO_TRANSPORT_INFO +{ + const NVT_TIMING *pTiming; // backend timing + HDMI_BPC bpc; + HDMI_PIXEL_PACKING packing; + NvBool bDualHeadMode; // 2H1OR +} HDMI_VIDEO_TRANSPORT_INFO; + +/************************************************************************************************ +* HDMI_QUERY_FRL_CLIENT_CONTROL: * +* Allow client to force request DSC/FRL configurations. For testing purpose or otherwise * +* eg, client could query for any fitting FRL config instead of most optimum. It could trade off * +* bandwidth for pixel quality. * +*************************************************************************************************/ +typedef struct tagHDMI_QUERY_FRL_CLIENT_CONTROL +{ + HDMI_QUERY_FRL_OPTION option; + + NvU32 forceFRLRate : 1; + NvU32 forceAudio2Ch48KHz : 1; + NvU32 enableDSC : 1; + NvU32 forceSliceCount : 1; + NvU32 forceSliceWidth : 1; + NvU32 forceBppx16 : 1; + NvU32 skipGeneratePPS : 1; + NvU32 reserved : 25; + + // client can set below params if respective force flag is set + NvU32 sliceCount; + NvU32 sliceWidth; + NvU32 bitsPerPixelX16; + HDMI_FRL_DATA_RATE frlRate; + +} HDMI_QUERY_FRL_CLIENT_CONTROL; + +/************************************************************************************************ +* HDMI_SRC_CAPS: * +* Input to HDMI lib. * +* * +* Client gives info about GPU capabilities - DSC related caps * +*************************************************************************************************/ +typedef struct tagHDMI_SRC_CAPS +{ + struct + { + NvU32 dscCapable : 1; + NvU32 bppPrecision : 8; + NvU32 encoderColorFormatMask : 8; + NvU32 lineBufferSizeKB : 8; + NvU32 rateBufferSizeKB : 8; + NvU32 maxNumHztSlices : 8; + NvU32 lineBufferBitDepth : 8; + NvU32 dualHeadBppTargetMaxX16 : 16; + NvU32 maxWidthPerSlice; + } dscCaps; + + HDMI_FRL_DATA_RATE linkMaxFRLRate; +} HDMI_SRC_CAPS; + +/************************************************************************************************ +* HDMI_SINK_CAPS: * +* Input to HDMI lib. * +* * +* Client gives info from EDID, HDMI lib uses DSC related info to call DSC lib to generate PPS * +* Audio information from CEA861 block is used for bandwidth calculations * +* linkMaxFRLRate and linkMaxFRLRateDSC are max link rates determined from physical link * +* training. * +*************************************************************************************************/ +typedef struct tagHDMI_SINK_CAPS +{ + const NVT_HDMI_FORUM_INFO *pHdmiForumInfo; + NvU32 audioType; + NvU32 maxAudioChannels; + NvU32 maxAudioFreqKHz; + NvBool bHBRAudio; + HDMI_FRL_DATA_RATE linkMaxFRLRate; + HDMI_FRL_DATA_RATE linkMaxFRLRateDSC; +} HDMI_SINK_CAPS; + +/************************************************************************************************ +* HDMI_FRL_CONFIG: * +* Output from HDMI lib. Client uses this info for modeset * +* * +* maxSupportedAudioCh, maxSupportedAudioFreqKHz - max possible audio settings at the chosen * +* FRL rate, though the sink caps may have reported higher caps * +* * +* dscInfo - if current timing requires DSC, lib returns PPS information here * +* * +* bitsPerPixelx16 - optimum bpp value calculated per spec * +* dscHActiveBytes - in compressed video transport mode, number of bytes in 1 line * +* dscHActiveTriBytes - in compressed video transport mode, number of tri-bytes in 1 line * +* dscHBlankTriBytes - in compressed video transport mode, number of tri-bytes to be sent * +* to represent horizontal blanking * +* * +* pps[32] - PPS data. HDMI lib calls DSC lib to fill it in * +*************************************************************************************************/ +#define HDMI_DSC_MAX_PPS_SIZE_DWORD 32 +typedef struct tagHDMI_FRL_CONFIG +{ + HDMI_FRL_DATA_RATE frlRate; + NvU32 maxSupportedAudioCh; + NvU32 maxSupportedAudioFreqKHz; + + // DSC info client will use for core channel modeset + struct + { + NvU32 bEnableDSC : 1; + NvU32 reserved : 31; + + NvU32 bitsPerPixelX16; + NvU32 sliceCount; + NvU32 sliceWidth; + NvU32 pps[HDMI_DSC_MAX_PPS_SIZE_DWORD]; + NvU32 dscHActiveBytes; + NvU32 dscHActiveTriBytes; + NvU32 dscHBlankTriBytes; + NvU32 dscTBlankToTTotalRatioX1k; + } dscInfo; + +} HDMI_FRL_CONFIG; + +/************************************************************************************************ +* NvHdmi_AssessLinkCapabilities: * +* * +* Input parameters: * +* subDevice - Sub Device ID. * +* displayId - Display ID. * +* pSinkEdid - EDID of sink * +* * +* Output parameters: * +* pSrcCaps - src capabilities - DSC caps * +* pSinkCaps - sink capabilities - actual caps calculated from link training * +* * +* Calls RM to get DSC related src side caps. Performs physical link training to determine if * +* sink reported max FRL rate can actually be supported on the physical link * +*************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmi_AssessLinkCapabilities(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps); + +/************************************************************************************************ +* NvHdmi_QueryFRLConfig: * +* * +* Input parameters: * +* libHandle - Hdmi library handle, provided on initializing the library. * +* pVidTransInfo - information about timing, bpc and packing * +* pClientCtrl - settings client wants to see set. HDMI lib tries to honor these * +* pSinkCaps - sink capabilities * +* * +* Output parameters: * +* pFRLConfig - chosen FRL rate and DSC configuration * +* * +*************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmi_QueryFRLConfig(NvHdmiPkt_Handle libHandle, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig); + +/************************************************************************************************ +* NvHdmi_SetFRLConfig: * +* * +* Input parameters: * +* libHandle - Hdmi library handle, provided on initializing the library. * +* subDevice - Sub Device ID. * +* displayId - Display ID. * +* bFakeLt - Indicates that the GPU's link configuration should be forced and that * +* configuration of the sink device should be skipped. * +* pFRLConfig - Link configuration to set. * +* * +************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmi_SetFRLConfig(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig); + +/************************************************************************************************ +* NvHdmi_ClearFRLConfig: * +* * +* Input parameters: * +* libHandle - Hdmi library handle, provided on initializing the library. * +* subDevice - Sub Device ID. * +* displayId - Display ID to change the settings on. * +* * +************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmi_ClearFRLConfig(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId); + +#ifdef __cplusplus +} +#endif + +#endif // _NVHDMI_FRLINTERFACE_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.c new file mode 100644 index 0000000..0a843bb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.c @@ -0,0 +1,616 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt.c + * + * Purpose: Provide initialization functions for HDMI library + */ + +#include "nvlimits.h" +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" +#include "nvhdmipkt_internal.h" + +#include "../timing/nvt_dsc_pps.h" + +#include "class/cl9170.h" +#include "class/cl917d.h" +#include "class/cl9270.h" +#include "class/cl927d.h" +#include "class/cl9470.h" +#include "class/cl947d.h" +#include "class/cl9570.h" +#include "class/cl957d.h" +#include "class/clc370.h" +#include "class/clc37d.h" +#include "class/clc570.h" +#include "class/clc57d.h" +#include "class/clc670.h" +#include "class/clc67d.h" + +// Class hierarchy structure +typedef struct tagNVHDMIPKT_CLASS_HIERARCHY +{ + NVHDMIPKT_CLASS_ID classId; + NVHDMIPKT_CLASS_ID parentClassId; + NvBool isRootClass; + void (*initInterface)(NVHDMIPKT_CLASS*); + NvBool (*constructor) (NVHDMIPKT_CLASS*); + void (*destructor) (NVHDMIPKT_CLASS*); + NvU32 displayClass; + NvU32 coreDmaClass; +} NVHDMIPKT_CLASS_HIERARCHY; + +/************************************************************************************************* + * hierarchy structure establishes the relationship between classes. * + * If isRootClass=NV_TRUE, it is a root class, else it is a child of a class. classId * + * also acts as an index, and hence the order of the structure below should be maintanied. * + * * + * ASSUMPTION: There are two huge assumptions while creating the class relationship and * + * while traversing it. 1. That of the Class ID definitaion (NVHDMIPKT_CLASS_ID), which has * + * to be strictly indexed, that is 0, 1, 2... and so on. And 2. that the structure * + * CLASS_HIERARCHY (above) follow that indexing. That is NVHDMIPKT_0073_CLASS is value 0 and * + * the first entry in CLASS_HIERARCHY, NVHDMIPKT_9171_CLASS is value 1 and hence the second * + * entry in CLASS_HIERARCHY, so on and so forth. * + * * + * HOW TO ADD A NEW CLASS? * + * 1. Add an ID in NVHDMIPKT_CLASS_ID. * + * 2. Add a source file nvhdmipkt_XXXX.c, and include it into makefiles. Makefiles of * + * Mods, Windows, and Linux. * + * 3. Provide initializeHdmiPktInterfaceXXXX, hdmiConstructorXXXX, and, hdmiDestructorXXXX. * + * 4. Add functions that needs to be overridden in NVHDMIPKT_CLASS. * + * 5. Add a relationship in hierarchy[] array. The new class can be a subclass or a root. In * + * case of a root all the interfaces needs to be overridden in NVHDMIPKT_CLASS. * + ************************************************************************************************/ +static const NVHDMIPKT_CLASS_HIERARCHY hierarchy[] = +{ + {// Index 0==NVHDMIPKT_0073_CLASS + NVHDMIPKT_0073_CLASS, // classId + NVHDMIPKT_0073_CLASS, // parentClassId + NV_TRUE, // isRootClass + initializeHdmiPktInterface0073, // initInterface + hdmiConstructor0073, // constructor + hdmiDestructor0073, // destructor + 0, // displayClass + 0 // coreDmaClass + }, + {// Index 1==NVHDMIPKT_9171_CLASS + NVHDMIPKT_9171_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_TRUE, // isRootClass + initializeHdmiPktInterface9171, // initInterface + hdmiConstructor9171, // constructor + hdmiDestructor9171, // destructor + NV9170_DISPLAY, // displayClass + NV917D_CORE_CHANNEL_DMA // coreDmaClass + }, + {// Index 2==NVHDMIPKT_9271_CLASS + NVHDMIPKT_9271_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterface9271, // initInterface + hdmiConstructor9271, // constructor + hdmiDestructor9271, // destructor + NV9270_DISPLAY, // displayClass + NV927D_CORE_CHANNEL_DMA // coreDmaClass + }, + {// Index 3==NVHDMIPKT_9471_CLASS + NVHDMIPKT_9471_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterface9471, // initInterface + hdmiConstructor9471, // constructor + hdmiDestructor9471, // destructor + NV9470_DISPLAY, // displayClass + NV947D_CORE_CHANNEL_DMA // coreDmaClass + }, + {// Index 4==NVHDMIPKT_9571_CLASS + NVHDMIPKT_9571_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterface9571, // initInterface + hdmiConstructor9571, // constructor + hdmiDestructor9571, // destructor + NV9570_DISPLAY, // displayClass + NV957D_CORE_CHANNEL_DMA // coreDmaClass + }, + {// Index 5==NVHDMIPKT_C371_CLASS + NVHDMIPKT_C371_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterfaceC371, // initInterface + hdmiConstructorC371, // constructor + hdmiDestructorC371, // destructor + NVC370_DISPLAY, // displayClass + NVC37D_CORE_CHANNEL_DMA // coreDmaClass + }, + {// Index 6==NVHDMIPKT_C571_CLASS + // Note that Turing (C57x) has a distinct displayClass and coreDmaClass, + // but it inherits the _DISP_SF_USER class from Volta (C37x). We call this + // NVHDMIPKT_C571_CLASS, but reuse initInterface()/constructor()/destructor() + // from C371. + NVHDMIPKT_C571_CLASS, + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterfaceC371, // initInterface + hdmiConstructorC371, // constructor + hdmiDestructorC371, // destructor + NVC570_DISPLAY, // displayClass + NVC57D_CORE_CHANNEL_DMA // coreDmaClass + }, + {// Index 7==NVHDMIPKT_C671_CLASS + NVHDMIPKT_C671_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterfaceC671, // initInterface + hdmiConstructorC671, // constructor + hdmiDestructorC671, // destructor + NVC670_DISPLAY, // displayClass + NVC67D_CORE_CHANNEL_DMA // coreDmaClass + }, +}; + +#if defined(DSC_CALLBACK_MODIFIED) +// Callbacks for DSC PPS library +void *hdmipktMallocCb(const void *clientHandle, NvLength size); +void hdmipktFreeCb(const void *clientHandle, void *pMemPtr); + +void *hdmipktMallocCb(const void *clientHandle, NvLength size) +{ + const NVHDMIPKT_CLASS *pClass = (const NVHDMIPKT_CLASS*)(clientHandle); + return pClass->callback.malloc(pClass->cbHandle, size); +} + +void hdmipktFreeCb(const void *clientHandle, void *pMemPtr) +{ + const NVHDMIPKT_CLASS *pClass = (const NVHDMIPKT_CLASS*)(clientHandle); + pClass->callback.free(pClass->cbHandle, pMemPtr); +} +#endif // DSC_CALLBACK_MODIFIED + +/********************************** HDMI Library interfaces *************************************/ +/* + * NvHdmiPkt_PacketCtrl + */ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketCtrl(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl) +{ + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + return pClass->hdmiPacketCtrl(pClass, + subDevice, + displayId, + head, + packetType, + transmitControl); +} + +/* + * NvHdmiPkt_PacketWrite + */ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketWrite(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + return pClass->hdmiPacketWrite(pClass, + subDevice, + displayId, + head, + packetType, + transmitControl, + packetLen, + pPacket); +} + +NVHDMIPKT_RESULT +NvHdmi_AssessLinkCapabilities(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + if (!pSinkEdid || + !pSrcCaps || + !pSinkCaps) + { + return NVHDMIPKT_INVALID_ARG; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + return pClass->hdmiAssessLinkCapabilities(pClass, + subDevice, + displayId, + pSinkEdid, + pSrcCaps, + pSinkCaps); +} +/* + * NvHdmi_QueryFRLConfig + */ +NVHDMIPKT_RESULT +NvHdmi_QueryFRLConfig(NvHdmiPkt_Handle libHandle, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + if (!pVidTransInfo || + !pClientCtrl || + !pSrcCaps || + !pSinkCaps || + !pFRLConfig) + { + return NVHDMIPKT_INVALID_ARG; + } + + // if there is no FRL capability reported fail this call + if (pSinkCaps->linkMaxFRLRate == HDMI_FRL_DATA_RATE_NONE) + { + return NVHDMIPKT_FAIL; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + return pClass->hdmiQueryFRLConfig(pClass, + pVidTransInfo, + pClientCtrl, + pSrcCaps, + pSinkCaps, + pFRLConfig); +} + +/* + * NvHdmi_SetFRLConfig + */ +NVHDMIPKT_RESULT +NvHdmi_SetFRLConfig(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + if (!pFRLConfig) + { + return NVHDMIPKT_INVALID_ARG; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + return pClass->hdmiSetFRLConfig(pClass, + subDevice, + displayId, + bFakeLt, + pFRLConfig); + +} + +/* + * NvHdmi_ClearFRLConfig + */ +NVHDMIPKT_RESULT +NvHdmi_ClearFRLConfig(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + return pClass->hdmiClearFRLConfig(pClass, + subDevice, + displayId); +} + +/*************************** HDMI Library internal helper functions *****************************/ +/* + * NvHdmiPkt_HwClass2HdmiClass + * internal function; translates display/display-dma class to hdmi class + */ +static NVHDMIPKT_CLASS_ID +NvHdmiPkt_HwClass2HdmiClass(NvU32 const hwClass) +{ + NVHDMIPKT_CLASS_ID hdmiClassId = NVHDMIPKT_9571_CLASS; + NvU32 i = 0; + + for (i = 0; i < NVHDMIPKT_INVALID_CLASS; i++) + { + if ((hierarchy[i].displayClass == hwClass) || + (hierarchy[i].coreDmaClass == hwClass)) + { + hdmiClassId = hierarchy[i].classId; + break; + } + } + + // Assign default class 73 to pre-Kepler families + if (hwClass < NV9170_DISPLAY) + { + hdmiClassId = NVHDMIPKT_0073_CLASS; + } + + return hdmiClassId; +} + +/* + * NvHdmiPkt_InitInterfaces + * internal function; calls class init interface functions + */ +static void +NvHdmiPkt_InitInterfaces(NVHDMIPKT_CLASS_ID const thisClassId, + NVHDMIPKT_CLASS* const pClass) +{ + // Recurse to the root first, and then call each initInterface() method + // from root to child. + if (!hierarchy[thisClassId].isRootClass) + { + NvHdmiPkt_InitInterfaces(hierarchy[thisClassId].parentClassId, pClass); + } + hierarchy[thisClassId].initInterface(pClass); +} + +static void +NvHdmiPkt_CallDestructors(NVHDMIPKT_CLASS_ID const thisClassId, + NVHDMIPKT_CLASS* const pClass) +{ + // Destructor calls are made from this to root class. + hierarchy[thisClassId].destructor(pClass); + if (!hierarchy[thisClassId].isRootClass) + { + NvHdmiPkt_CallDestructors(hierarchy[thisClassId].parentClassId, pClass); + } +} + +/* + * NvHdmiPkt_CallConstructors + * internal function; calls class constructors and returns boolean success/failure + */ +static NvBool +NvHdmiPkt_CallConstructors(NVHDMIPKT_CLASS_ID const thisClassId, + NVHDMIPKT_CLASS* const pClass) +{ + // Recurse to the root first, and then call each constructor + // from root to child. + if (!hierarchy[thisClassId].isRootClass) + { + if (!NvHdmiPkt_CallConstructors(hierarchy[thisClassId].parentClassId, pClass)) + { + return NV_FALSE; + } + } + + if (!hierarchy[thisClassId].constructor(pClass)) + { + if (!hierarchy[thisClassId].isRootClass) + { + // Backtrack on constructor failure + NvHdmiPkt_CallDestructors(hierarchy[thisClassId].parentClassId, pClass); + } + + return NV_FALSE; + } + + return NV_TRUE; +} + +/******************************** HDMI Library Init functions ***********************************/ +/* + * NvHdmiPkt_InitializeLibrary + */ +NvHdmiPkt_Handle +NvHdmiPkt_InitializeLibrary(NvU32 const hwClass, + NvU32 const numSubDevices, + NvHdmiPkt_CBHandle const cbHandle, + const NVHDMIPKT_CALLBACK* const pCallbacks, + NvU32 const sfUserHandle, + const NVHDMIPKT_RM_CLIENT_HANDLES* const pClientHandles) +{ + NVHDMIPKT_CLASS* pClass = 0; + NvU32 i = 0; + NvBool result = NV_FALSE; + NVHDMIPKT_CLASS_ID thisClassId = NVHDMIPKT_INVALID_CLASS; + + // Argument validations + if (pCallbacks == 0 || numSubDevices == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } + + // Validating RM handles/callbacks +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (sfUserHandle == 0 || pClientHandles == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + if (pCallbacks->rmGetMemoryMap == 0 || + pCallbacks->rmFreeMemoryMap == 0 || + pCallbacks->rmDispControl2 == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + + // Mandatory mutex callbacks. + if (pCallbacks->acquireMutex == 0 || pCallbacks->releaseMutex == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } + + // Mandatory memory allocation callbacks. + if (pCallbacks->malloc == 0 || pCallbacks->free == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } + + pClass = pCallbacks->malloc(cbHandle, sizeof(NVHDMIPKT_CLASS)); + if (!pClass) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } + + // 0. Get the hdmi class ID + thisClassId = NvHdmiPkt_HwClass2HdmiClass(hwClass); + + // Init data + NVMISC_MEMSET(pClass, 0, sizeof(NVHDMIPKT_CLASS)); + + for (i = 0; i < NV_MAX_SUBDEVICES; i++) + { + pClass->memMap[i].subDevice = NVHDMIPKT_INVALID_SUBDEV; + } + + pClass->numSubDevices = numSubDevices; + pClass->cbHandle = cbHandle; + pClass->thisId = thisClassId; + + // RM handles/callbacks +#if NVHDMIPKT_RM_CALLS_INTERNAL + pClass->isRMCallInternal = NV_TRUE; + pClass->sfUserHandle = sfUserHandle; + pClass->clientHandles.hClient = pClientHandles->hClient; + pClass->clientHandles.hDevice = pClientHandles->hDevice; + pClass->clientHandles.hDisplay = pClientHandles->hDisplay; + + for (i = 0; i < NV_MAX_SUBDEVICES; i++) + { + pClass->clientHandles.hSubDevices[i] = pClientHandles->hSubDevices[i]; + } +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + pClass->isRMCallInternal = NV_FALSE; + pClass->callback.rmGetMemoryMap = pCallbacks->rmGetMemoryMap; + pClass->callback.rmFreeMemoryMap = pCallbacks->rmFreeMemoryMap; + pClass->callback.rmDispControl2 = pCallbacks->rmDispControl2; +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + + pClass->callback.acquireMutex = pCallbacks->acquireMutex; + pClass->callback.releaseMutex = pCallbacks->releaseMutex; + + pClass->callback.malloc = pCallbacks->malloc; + pClass->callback.free = pCallbacks->free; + +#if !defined (NVHDMIPKT_DONT_USE_TIMER) + pClass->callback.setTimeout = pCallbacks->setTimeout; + pClass->callback.checkTimeout = pCallbacks->checkTimeout; +#endif + +#if defined (DEBUG) + pClass->callback.print = pCallbacks->print; + pClass->callback.assert = pCallbacks->assert; +#endif + + // 1. Init interfaces + NvHdmiPkt_InitInterfaces(thisClassId, pClass); + + // 2. Constructor calls + result = NvHdmiPkt_CallConstructors(thisClassId, pClass); + +#if defined(DSC_CALLBACK_MODIFIED) + DSC_CALLBACK callbacks; + NVMISC_MEMSET(&callbacks, 0, sizeof(DSC_CALLBACK)); + callbacks.clientHandle = pClass; + callbacks.dscMalloc = hdmipktMallocCb; + callbacks.dscFree = hdmipktFreeCb; + DSC_InitializeCallback(callbacks); +#endif // DSC_CALLBACK_MODIFIED + +NvHdmiPkt_InitializeLibrary_exit: + if (result) + { + NvHdmiPkt_Print(pClass, "Initialize Success."); + } + else + { + if (pClass) + { + NvHdmiPkt_Print(pClass, "Initialize Failed."); + } + if (pCallbacks && pCallbacks->free) + { + pCallbacks->free(cbHandle, pClass); + } + } + + return (result == NV_TRUE) ? toHdmiPktHandle(pClass) : NVHDMIPKT_INVALID_HANDLE; +} + +/* + * NvHdmiPkt_DestroyLibrary + */ +void +NvHdmiPkt_DestroyLibrary(NvHdmiPkt_Handle libHandle) +{ + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + NVHDMIPKT_CLASS_ID currClassId = NVHDMIPKT_0073_CLASS; + + if (pClass != 0) + { + NvHdmiPkt_Print(pClass, "Destroy."); + NvHdmiPkt_CBHandle cbHandle = pClass->cbHandle; + void (*freeCb) (NvHdmiPkt_CBHandle handle, + void *pMem) = pClass->callback.free; + + currClassId = pClass->thisId; + NvHdmiPkt_CallDestructors(currClassId, pClass); + + freeCb(cbHandle, pClass); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.h b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.h new file mode 100644 index 0000000..35b2fbd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.h @@ -0,0 +1,317 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt.h + * + * Purpose: This file is a common header for all HDMI Library Clients + */ + +#ifndef _NVHDMIPKT_H_ +#define _NVHDMIPKT_H_ + +#include + +#include "nvmisc.h" + + +#ifdef __cplusplus +extern "C" { +#endif + +/**************************** HDMI Library defines, enums and structs ***************************/ +/************************************************************************************************ + * NOTE: NVHDMIPKT_RM_CALLS_INTERNAL define tells this library to make RM calls (allocate, free * + * control, etc.) internally and not through callbacks into the client. * + ************************************************************************************************/ +#if !defined(NVHDMIPKT_RM_CALLS_INTERNAL) +# define NVHDMIPKT_RM_CALLS_INTERNAL 1 +#endif + +// NVHDMIPKT_RESULT: HDMI library return result enums +typedef enum +{ + NVHDMIPKT_SUCCESS = 0, + NVHDMIPKT_FAIL = 1, + NVHDMIPKT_LIBRARY_INIT_FAIL = 2, + NVHDMIPKT_INVALID_ARG = 3, + NVHDMIPKT_TIMEOUT = 4, + NVHDMIPKT_ERR_GENERAL = 5, + NVHDMIPKT_INSUFFICIENT_BANDWIDTH = 6, + NVHDMIPKT_RETRY = 7 +} NVHDMIPKT_RESULT; + +// NVHDMIPKT_TYPE: HDMI Packet Enums +typedef enum _NVHDMIPKT_TYPE +{ + NVHDMIPKT_TYPE_UNDEFINED = 0, // Undefined Packet Type + NVHDMIPKT_TYPE_GENERIC = 1, // Generic packet, any Generic Packet + // (e.g Gamut Metadata packet) + NVHDMIPKT_TYPE_AVI_INFOFRAME = 2, // Avi infoframe + NVHDMIPKT_TYPE_GENERAL_CONTROL = 3, // GCP + NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME = 4, // VSI + NVHDMIPKT_TYPE_AUDIO_INFOFRAME = 5, // Audio InfoFrame + NVHDMIPKT_TYPE_EXTENDED_METADATA_PACKET = 6, // Extended Metadata Packet (HDMI 2.1) + NVHDMIPKT_INVALID_PKT_TYPE = 13 +} NVHDMIPKT_TYPE; + +// Hdmi packet TransmitControl defines. These definitions reflect the +// defines from ctrl and class defines for info frames. +#define NV_HDMI_PKT_TRANSMIT_CTRL_ENABLE 0:0 +#define NV_HDMI_PKT_TRANSMIT_CTRL_ENABLE_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_ENABLE_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_OTHER 1:1 +#define NV_HDMI_PKT_TRANSMIT_CTRL_OTHER_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_OTHER_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_SINGLE 2:2 +#define NV_HDMI_PKT_TRANSMIT_CTRL_SINGLE_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_SINGLE_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_CHKSUM_HW 3:3 +#define NV_HDMI_PKT_TRANSMIT_CTRL_CHKSUM_HW_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_CHKSUM_HW_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_HBLANK 4:4 +#define NV_HDMI_PKT_TRANSMIT_CTRL_HBLANK_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_HBLANK_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_VIDEO_FMT 5:5 +#define NV_HDMI_PKT_TRANSMIT_CTRL_VIDEO_FMT_SW_CTRL 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_VIDEO_FMT_HW_CTRL 0x00000001 + +// NVHDMIPKT_TC: HDMI Packet Transmit Control +// NOTE: Client should use these defines below for transmit control, and avoid using the ones +// above. Use only if client knows and wants fine control. And in that case the value +// passed has to be explicitly typecasted to NVHDMIPKT_TC by the client. +typedef enum _NVHDMIPKT_TC +{ + NVHDMIPKT_TRANSMIT_CONTROL_DISABLE = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _DIS)), + + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN)), + + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_SINGLE_FRAME = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN)), + + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_OTHER_FRAME = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN)), + + NVHDMIPKT_TRANSMIT_CONTROL_VIDEO_FMT_HW_CTRL = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _VIDEO_FMT, _HW_CTRL)), + +} NVHDMIPKT_TC; + +// RM client handles. Used when client chooses that hdmi library make RM calls. +// NOTE: NVHDMIPKT_RM_CALLS_INTERNAL macro should be define to use it. +typedef struct tagNVHDMIPKT_RM_CLIENT_HANDLES +{ + NvU32 hClient; + NvU32 hDevice; + NvU32 hSubDevices[NV_MAX_SUBDEVICES]; + NvU32 hDisplay; +} NVHDMIPKT_RM_CLIENT_HANDLES; + +/****************************** HDMI Library callbacks into client ******************************/ +typedef void* NvHdmiPkt_CBHandle; + +/************************************************************************************************ + * [rmGetMemoryMap, rmFreeMemoryMap, rmDispControl,] acquireMutex and releaseMutex are mandatory* + * callbacks, to be implemented by the client. Callbacks in [] above are mandatory only for * + * Windows. * + * Linux need not implement those, if they plan to use NVHDMIPKT_RM_CALLS_INTERNAL define. * + * * + * rmGetMemoryMap and rmFreeMemoryMap are RM calls to allocate the DISP_SF_USER class. * + * And mutex callbacks keep hemi packet operations atomic. * + ************************************************************************************************/ +typedef struct _tagNVHDMIPKT_CALLBACK +{ + // MANDATORY callbacks. + NvBool + (*rmGetMemoryMap) (NvHdmiPkt_CBHandle handle, + NvU32 dispSfUserClassId, + NvU32 dispSfUserSize, + NvU32 subDevice, + NvU32* pMemHandle, + void** ppBaseMem); + + void + (*rmFreeMemoryMap) (NvHdmiPkt_CBHandle handle, + NvU32 subDevice, + NvU32 memHandle, + void* pMem); + + NvBool + (*rmDispControl2) (NvHdmiPkt_CBHandle handle, + NvU32 subDevice, + NvU32 cmd, + void* pParams, + NvU32 paramSize); + + + void + (*acquireMutex) (NvHdmiPkt_CBHandle handle); + + void + (*releaseMutex) (NvHdmiPkt_CBHandle handle); + + // OPTIONAL callbacks + /* time in microseconds (us) */ + NvBool + (*setTimeout) (NvHdmiPkt_CBHandle handle, + NvU32 us_timeout); + + /* ChecTimeout returns true when timer times out */ + NvBool + (*checkTimeout) (NvHdmiPkt_CBHandle handle); + + // callbacks to allocate memory on heap to reduce stack usage + void* + (*malloc) (NvHdmiPkt_CBHandle handle, + NvLength numBytes); + + void + (*free) (NvHdmiPkt_CBHandle handle, + void *pMem); + + void + (*print) (NvHdmiPkt_CBHandle handle, + const char* fmtstring, + ...) +#if defined(__GNUC__) + __attribute__ ((format (printf, 2, 3))) +#endif + ; + + void + (*assert) (NvHdmiPkt_CBHandle handle, + NvBool expression); +} NVHDMIPKT_CALLBACK; + +/*********************** HDMI Library interface to write hdmi ctrl/packet ***********************/ +typedef void* NvHdmiPkt_Handle; +#define NVHDMIPKT_INVALID_HANDLE ((NvHdmiPkt_Handle)0) + +/************************************************************************************************ + * NvHdmiPkt_PacketCtrl - Returns HDMI NVHDMIPKT_RESULT. * + * * + * Parameters: * + * libHandle - Hdmi library handle, provided on initializing the library. * + * subDevice - Sub Device ID. * + * displayId - Display ID. * + * head - Head number. * + * packetType - One of the NVHDMIPKT_TYPE types. * + * transmitControl - Packet transmit control setting. * + ************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketCtrl (NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); + +/************************************************************************************************ + * NvHdmiPkt_PacketWrite - Returns HDMI NVHDMIPKT_RESULT. * + * * + * Parameters: * + * libHandle - Hdmi library handle, provided on initializing the library. * + * subDevice - Sub Device ID. * + * displayId - Display ID. * + * head - Head number. * + * packetType - One of the NVHDMIPKT_TYPE types. * + * transmitControl - Packet transmit control setting. * + * packetLen - Length of the packet in bytes to be transmitted. * + * pPacket - Pointer to packet data. * + ************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketWrite(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +/***************************** Interface to initialize HDMI Library *****************************/ + +/************************************************************************************************ + * NvHdmiPkt_InitializeLibrary - Returns NvHdmiPkt_Handle. This handle is used to call * + * library interfaces. If handle returned is invalid - * + * NVHDMIPKT_INVALID_HANDLE -, there was a problem in * + * initialization and the library won't work. * + * * + * Parameters: * + * hwClass - Depending on HW, apply display class or display dma class. Either will do.* + * Eg. for GK104- NV9170_DISPLAY or NV917D_CORE_CHANNEL_DMA. * + * numSubDevices - Number of sub devices. * + * * + * cbHandle - Callback handle. Client cookie for callbacks made to client. * + * pCallback - Callbacks. Struct NVHDMIPKT_CALLBACK. * + * * + * Below mentioned sfUserHandle and clientHandles parameters are used only when not providing * + * rmGetMemoryMap, rmFreeMemoryMap and rmDispControl callbacks. This is meant for Linux. * + * And is controlled by NVHDMIPKT_RM_CALLS_INTERNAL macro. * + * NOTE: And Clients not using NVHDMIPKT_RM_CALLS_INTERNAL, need to set both sfUserHandle and * + * clientHandles to 0. * + * * + * sfUserHandle - SF_USER handle; this is the base handle. Subsequent subdevice handles are * + * derived incrementally from this handle. * + * pClientHandles - RM handles for client, device, subdevices and displayCommon. * + * * + ************************************************************************************************/ +NvHdmiPkt_Handle +NvHdmiPkt_InitializeLibrary(NvU32 const hwClass, + NvU32 const numSubDevices, + NvHdmiPkt_CBHandle const cbHandle, + const NVHDMIPKT_CALLBACK* const pCallback, + NvU32 const sfUserHandle, + const NVHDMIPKT_RM_CLIENT_HANDLES* const pClientHandles); + +/************************************************************************************************ + * NvHdmiPkt_DestroyLibrary * + * * + * When done with the HDMI Library call NvHdmiPkt_DestroyLibrary. It is like a destructor. * + * This destructor frees up resources acquired during initialize. * + * * + ************************************************************************************************/ +void +NvHdmiPkt_DestroyLibrary(NvHdmiPkt_Handle libHandle); + +#ifdef __cplusplus +} +#endif +#endif // _NVHDMIPKT_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_0073.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_0073.c new file mode 100644 index 0000000..0e1bae8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_0073.c @@ -0,0 +1,385 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_0073.c + * + * Purpose: Provides infoframe write functions for HDMI library for Pre-KEPLER chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "hdmi_spec.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" + +NVHDMIPKT_RESULT +hdmiPacketCtrl0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); + +NVHDMIPKT_RESULT +hdmiPacketWrite0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +/* + * hdmiPacketCtrl0073 + */ +NVHDMIPKT_RESULT +hdmiPacketCtrl0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS params = {0}; + + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + + params.subDeviceInstance = subDevice; + params.displayId = displayId; + params.type = pThis->translatePacketType(pThis, packetType); + params.transmitControl = pThis->translateTransmitControl(pThis, transmitControl); + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (NvRmControl(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET_CTRL, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) + +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET_CTRL, + ¶ms, sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to hdmiPacketCtrl failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + } + + return result; +} + +/* + * hdmiPacketWrite0073 + */ +NVHDMIPKT_RESULT +hdmiPacketWrite0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS params = {0}; + + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + + params.subDeviceInstance = subDevice; + params.displayId = displayId; + params.packetSize = packetLen; + params.transmitControl = pThis->translateTransmitControl(pThis, transmitControl); + + // init the infoframe packet + NVMISC_MEMSET(params.aPacket, 0, NV0073_CTRL_SET_OD_MAX_PACKET_SIZE); + + // copy the payload + NVMISC_MEMCPY(params.aPacket, pPacket, packetLen); + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (NvRmControl(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) + +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, + ¶ms, + sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to hdmiPacketWrite failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + } + + return result; +} + +/* + * translatePacketType0073 + */ +static NvU32 +translatePacketType0073(NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TYPE packetType) +{ + NvU32 type0073 = 0; + + switch (packetType) + { + case NVHDMIPKT_TYPE_AVI_INFOFRAME: + type0073 = pktType_AviInfoFrame; + break; + + case NVHDMIPKT_TYPE_GENERIC: + type0073 = pktType_GamutMetadata; + break; + + case NVHDMIPKT_TYPE_GENERAL_CONTROL: + type0073 = pktType_GeneralControl; + break; + + case NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME: + type0073 = pktType_VendorSpecInfoFrame; + break; + + case NVHDMIPKT_TYPE_AUDIO_INFOFRAME: + type0073 = pktType_AudioInfoFrame; + break; + + default: + NvHdmiPkt_Print(pThis, "ERROR - translatePacketType wrong packet type: %0x", + packetType); + NvHdmiPkt_Assert(0); + break; + } + + return type0073; +} + +/* + * translateTransmitControl0073 + */ +static NvU32 +translateTransmitControl0073(NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TC transmitControl) +{ + NvU32 tc = 0; + + // TODO: tc validation + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _ENABLE, _YES, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _EN, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _OTHER_FRAME, _ENABLE, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _EN, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _SINGLE_FRAME, _ENABLE, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _HBLANK, _EN, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _ON_HBLANK, _ENABLE, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _VIDEO_FMT, _HW_CTRL, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _VIDEO_FMT, _HW_CONTROLLED, tc); + } + + return tc; +} + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor0073 + */ +NvBool +hdmiConstructor0073(NVHDMIPKT_CLASS* pThis) +{ + return NV_TRUE; +} + +/* + * hdmiUnDestructor0073 + */ +void +hdmiDestructor0073(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +// Below are dummy functions for the HW functions not needed for a display class +/* + * hdmiWriteDummyPacket + */ +void +hdmiWriteDummyPacket(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiWriteDummyPacket called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return; +} + +/* + * hdmiReadDummyPacketStatus + */ +static NvBool +hdmiReadDummyPacketStatus(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType0073) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiReadDummyPacketStatus called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NV_TRUE; +} + +/* + * hdmiWriteDummyPacketCtrl + */ +static NVHDMIPKT_RESULT +hdmiWriteDummyPacketCtrl(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType0073, + NvU32 transmitControl, + NvBool bDisable) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiWriteDummyPacketCtrl called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +hdmiAssessLinkCapabilitiesDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiAssessLinkCapabilitiesDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +hdmiQueryFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiQueryFRLConfigDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +hdmiSetFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiSetFRLConfigDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +hdmiClearFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiClearFRLConfigDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +/* + * initializeHdmiPktInterface0073 + */ +void +initializeHdmiPktInterface0073(NVHDMIPKT_CLASS* pClass) +{ + pClass->hdmiPacketCtrl = hdmiPacketCtrl0073; + pClass->hdmiPacketWrite = hdmiPacketWrite0073; + pClass->translatePacketType = translatePacketType0073; + pClass->translateTransmitControl = translateTransmitControl0073; + + // Functions below are mapped to dummy functions, as not needed for HW before GK104 + pClass->hdmiReadPacketStatus = hdmiReadDummyPacketStatus; + pClass->hdmiWritePacketCtrl = hdmiWriteDummyPacketCtrl; + pClass->hdmiWriteAviPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteAudioPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteGenericPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteGeneralCtrlPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteVendorPacket = hdmiWriteDummyPacket; + + // Update SF_USER data + pClass->dispSfUserClassId = 0; + pClass->dispSfUserSize = 0; + + // Functions below are used by HDMI FRL and will be available for Ampere+. + pClass->hdmiAssessLinkCapabilities = hdmiAssessLinkCapabilitiesDummy; + pClass->hdmiQueryFRLConfig = hdmiQueryFRLConfigDummy; + pClass->hdmiSetFRLConfig = hdmiSetFRLConfigDummy; + pClass->hdmiClearFRLConfig = hdmiClearFRLConfigDummy; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9171.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9171.c new file mode 100644 index 0000000..eb7d399 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9171.c @@ -0,0 +1,804 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_9171.c + * + * Purpose: Provides packet write functions for HDMI library for KEPLER + chips + */ + +#include "nvlimits.h" +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "hdmi_spec.h" +#include "class/cl9171.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" + +#define NVHDMIPKT_9171_INVALID_PKT_TYPE ((NV9171_SF_HDMI_INFO_IDX_VSI) + 1) +NVHDMIPKT_RESULT +hdmiPacketWrite9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +NVHDMIPKT_RESULT +hdmiPacketCtrl9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); +/* + * hdmiReadPacketStatus9171 + */ +static NvBool +hdmiReadPacketStatus9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType9171) +{ + NvBool bResult = NV_FALSE; + NvU32 regOffset = 0; + NvU32 status = 0; + + if (pBaseReg == 0 || head >= NV9171_SF_HDMI_INFO_STATUS__SIZE_1) + { + return bResult; + } + + switch (pktType9171) + { + case NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GCP: + case NV9171_SF_HDMI_INFO_IDX_VSI: + regOffset = NV9171_SF_HDMI_INFO_STATUS(head, pktType9171); + status = REG_RD32(pBaseReg, regOffset); + bResult = FLD_TEST_DRF(9171, _SF_HDMI_INFO_STATUS, _SENT, _DONE, status); + break; + + default: + break; + } + + return bResult; +} + +/* + * hdmiWritePacketCtrl9171 + */ +static NVHDMIPKT_RESULT +hdmiWritePacketCtrl9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType9171, + NvU32 transmitControl, + NvBool bDisable) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_INVALID_ARG; + NvU32 regOffset = 0; + NvU32 hdmiCtrl = 0; + + if (pBaseReg == 0 || head >= NV9171_SF_HDMI_INFO_CTRL__SIZE_1) + { + return result; + } + + switch (pktType9171) + { + case NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GCP: + case NV9171_SF_HDMI_INFO_IDX_VSI: + regOffset = NV9171_SF_HDMI_INFO_CTRL(head, pktType9171); + hdmiCtrl = REG_RD32(pBaseReg, regOffset); + hdmiCtrl = (bDisable == NV_TRUE) ? + (FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _ENABLE, _DIS, hdmiCtrl)) : + (transmitControl); + REG_WR32(pBaseReg, regOffset, hdmiCtrl); + + result = NVHDMIPKT_SUCCESS; + break; + + default: + break; + } + + return result; +} + +/* + * hdmiWriteAviPacket9171 + */ +static void +hdmiWriteAviPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvU32 data = 0; + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_HEADER(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_HEADER, _HB0, pPacket[0], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_HEADER, _HB1, pPacket[1], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_HEADER, _HB2, pPacket[2], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_HEADER(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, _PB0, pPacket[3], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, _PB1, pPacket[4], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, _PB2, pPacket[5], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, _PB3, pPacket[6], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH, _PB4, pPacket[7], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH, _PB5, pPacket[8], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH, _PB6, pPacket[9], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, _PB7, pPacket[10], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, _PB8, pPacket[11], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, _PB9, pPacket[12], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, _PB10, pPacket[13], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH, _PB11, pPacket[14], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH, _PB12, pPacket[15], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH, _PB13, pPacket[16], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(head), data); + + return; +} + +/* + * hdmiWriteGenericPacket9171 + */ +static void +hdmiWriteGenericPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvU32 data = 0; + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_HEADER(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_HEADER, _HB0, pPacket[0], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_HEADER, _HB1, pPacket[1], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_HEADER, _HB2, pPacket[2], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_HEADER(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_LOW, _PB0, pPacket[3], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_LOW, _PB1, pPacket[4], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_LOW, _PB2, pPacket[5], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_LOW, _PB3, pPacket[6], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_HIGH, _PB4, pPacket[7], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_HIGH, _PB5, pPacket[8], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_HIGH, _PB6, pPacket[9], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_LOW, _PB7, pPacket[10], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_LOW, _PB8, pPacket[11], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_LOW, _PB9, pPacket[12], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_LOW, _PB10, pPacket[13], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_HIGH, _PB11, pPacket[14], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_HIGH, _PB12, pPacket[15], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_HIGH, _PB13, pPacket[16], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_LOW, _PB14, pPacket[17], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_LOW, _PB15, pPacket[18], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_LOW, _PB16, pPacket[19], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_LOW, _PB17, pPacket[20], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_HIGH, _PB18, pPacket[21], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_HIGH, _PB19, pPacket[22], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_HIGH, _PB20, pPacket[23], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_LOW, _PB21, pPacket[24], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_LOW, _PB22, pPacket[25], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_LOW, _PB23, pPacket[26], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_LOW, _PB24, pPacket[27], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_HIGH, _PB25, pPacket[28], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_HIGH, _PB26, pPacket[29], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_HIGH, _PB27, pPacket[30], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH(head), data); + + return; +} + +/* + * hdmiWriteGeneralCtrlPacket9171 + */ +static void +hdmiWriteGeneralCtrlPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvU32 data = 0; + + // orIndexer info is ignored. + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GCP_SUBPACK(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GCP_SUBPACK, _SB0, pPacket[3], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GCP_SUBPACK, _SB1, pPacket[4], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GCP_SUBPACK, _SB2, pPacket[5], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GCP_SUBPACK(head), data); + + return; +} + +/* + * hdmiWriteVendorPacket9171 + */ +static void +hdmiWriteVendorPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacketIn) +{ + NvU32 data = 0; + NvU8 pPacket[31] = {0}; + + NVMISC_MEMCPY(pPacket, pPacketIn, packetLen); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_HEADER(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_HEADER, _HB0, pPacket[0], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_HEADER, _HB1, pPacket[1], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_HEADER, _HB2, pPacket[2], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_HEADER(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK0_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_LOW, _PB0, pPacket[3], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_LOW, _PB1, pPacket[4], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_LOW, _PB2, pPacket[5], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_LOW, _PB3, pPacket[6], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK0_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK0_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_HIGH, _PB4, pPacket[7], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_HIGH, _PB5, pPacket[8], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_HIGH, _PB6, pPacket[9], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK0_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK1_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_LOW, _PB7, pPacket[10], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_LOW, _PB8, pPacket[11], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_LOW, _PB9, pPacket[12], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_LOW, _PB10, pPacket[13], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK1_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK1_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_HIGH, _PB11, pPacket[14], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_HIGH, _PB12, pPacket[15], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_HIGH, _PB13, pPacket[16], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK1_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK2_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_LOW, _PB14, pPacket[17], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_LOW, _PB15, pPacket[18], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_LOW, _PB16, pPacket[19], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_LOW, _PB17, pPacket[20], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK2_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK2_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_HIGH, _PB18, pPacket[21], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_HIGH, _PB19, pPacket[22], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_HIGH, _PB20, pPacket[23], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK2_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_LOW, _PB21, pPacket[24], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_LOW, _PB22, pPacket[25], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_LOW, _PB23, pPacket[26], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_LOW, _PB24, pPacket[27], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK3_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK3_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_HIGH, _PB25, pPacket[28], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_HIGH, _PB26, pPacket[29], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_HIGH, _PB27, pPacket[30], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK3_HIGH(head), data); + + return; +} + +/* + * translatePacketType9171 + */ +static NvU32 +translatePacketType9171(NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TYPE packetType) +{ + NvU32 type9171 = NVHDMIPKT_9171_INVALID_PKT_TYPE; + + switch (packetType) + { + case NVHDMIPKT_TYPE_AVI_INFOFRAME: + type9171 = NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME; + break; + + case NVHDMIPKT_TYPE_GENERIC: + type9171 = NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME; + break; + + case NVHDMIPKT_TYPE_GENERAL_CONTROL: + type9171 = NV9171_SF_HDMI_INFO_IDX_GCP; + break; + + case NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME: + type9171 = NV9171_SF_HDMI_INFO_IDX_VSI; + break; + + case NVHDMIPKT_TYPE_AUDIO_INFOFRAME: + default: + NvHdmiPkt_Print(pThis, "ERROR - translatePacketType wrong packet type: %0x.", + packetType); + NvHdmiPkt_Assert(0); + break; + } + + return type9171; +} + +/* + * translateTransmitControl9171 + */ +static NvU32 +translateTransmitControl9171(NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TC transmitControl) +{ + NvU32 tc = 0; + + // TODO: tc validation + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _ENABLE, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _OTHER, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _SINGLE, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _CHKSUM_HW, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _HBLANK, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _HBLANK, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _VIDEO_FMT, _HW_CTRL, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _VIDEO_FMT, _HW_CONTROLLED, tc); + } + + return tc; +} + +/* + * hdmiPacketCtrl9171 + */ +NVHDMIPKT_RESULT +hdmiPacketCtrl9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl) +{ + NvU32* pBaseReg = (NvU32*)pThis->memMap[subDevice].pMemBase; + NvU32 pktType9171 = pThis->translatePacketType(pThis, packetType); + NvU32 tc = pThis->translateTransmitControl(pThis, transmitControl); + + if (pBaseReg == 0 || head >= NV9171_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 || + pktType9171 == NVHDMIPKT_9171_INVALID_PKT_TYPE) + { + return NVHDMIPKT_INVALID_ARG; + } + + return pThis->hdmiWritePacketCtrl(pThis, pBaseReg, head, pktType9171, tc, NV_FALSE); +} + +/* + * internal utility function + * checkPacketStatus + */ +static NVHDMIPKT_RESULT +checkPacketStatus(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType9171) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NvBool bCheckPacketStatus = NV_TRUE; + NvU32 regOffset = 0; + NvU32 status = 0; + + // check to see if timer callbacks are provided + if (pThis->callback.setTimeout == 0 || pThis->callback.checkTimeout == 0) + { + goto checkPacketStatus_exit; + } + + // Mark packets that don't need status check + switch (pktType9171) + { + case NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GCP: + regOffset = NV9171_SF_HDMI_INFO_STATUS(head, pktType9171); + status = REG_RD32(pBaseReg, regOffset); + bCheckPacketStatus = FLD_TEST_DRF(9171, _SF_HDMI_INFO_CTRL, _SINGLE, _EN, status); + break; + + default: + bCheckPacketStatus = NV_FALSE; + break; + } + + if (bCheckPacketStatus == NV_TRUE) + { + if (pThis->callback.setTimeout(pThis->cbHandle, NVHDMIPKT_STATUS_READ_TIMEOUT_IN_us) + == NV_FALSE) + { + // Timer set failed + goto checkPacketStatus_exit; + } + + while(pThis->hdmiReadPacketStatus(pThis, pBaseReg, head, pktType9171) == NV_FALSE) + { + if (pThis->callback.checkTimeout(pThis->cbHandle) == NV_TRUE) + { + // status check operation timed out + result = NVHDMIPKT_TIMEOUT; + goto checkPacketStatus_exit; + } + } + } + +checkPacketStatus_exit: + return result; +} + +/* + * hdmiPacketWrite9171 + */ +NVHDMIPKT_RESULT +hdmiPacketWrite9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NvU32* pBaseReg = (NvU32*)pThis->memMap[subDevice].pMemBase; + NvU32 pktType9171 = pThis->translatePacketType(pThis, packetType); + NvU32 tc = pThis->translateTransmitControl(pThis, transmitControl); + NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS params = {0}; + + if (pBaseReg == 0 || head >= NV9171_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 || + packetLen == 0 || pPacket == 0 || pktType9171 == NVHDMIPKT_9171_INVALID_PKT_TYPE) + { + result = NVHDMIPKT_INVALID_ARG; + goto hdmiPacketWrite9171_exit; + } + + // acquire mutex + pThis->callback.acquireMutex(pThis->cbHandle); + + // Check status if last infoframe was sent out or not + + if ((result = checkPacketStatus(pThis, pBaseReg, head, pktType9171)) == + NVHDMIPKT_TIMEOUT) + { + NvHdmiPkt_Print(pThis, "ERROR - Packet status check timed out."); + NvHdmiPkt_Assert(0); + goto hdmiPacketWrite9171_release_mutex_exit; + } + + // Disable this packet type. + pThis->hdmiWritePacketCtrl(pThis, pBaseReg, head, pktType9171, tc, NV_TRUE); + + // write the packet + switch (pktType9171) + { + case NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + pThis->hdmiWriteAviPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + case NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME: + pThis->hdmiWriteGenericPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + case NV9171_SF_HDMI_INFO_IDX_GCP: + // Check whether the GCP packet is AVMute DISABLE or AvMute ENABLE + // Enable HDMI only on GCP unmute i.e. AVMUTE DISABLE + if (pPacket[HDMI_PKT_HDR_SIZE] == HDMI_GENCTRL_PACKET_MUTE_DISABLE) + { + // Enable HDMI. + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + params.subDeviceInstance = (NvU8)subDevice; + params.displayId = displayId; + params.bEnable = NV0073_CTRL_SPECIFIC_CTRL_HDMI_ENABLE; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) + +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI, + ¶ms, + sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to enable hdmi ctrl failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + } + } + pThis->hdmiWriteGeneralCtrlPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + case NV9171_SF_HDMI_INFO_IDX_VSI: + pThis->hdmiWriteVendorPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + default: + result = NVHDMIPKT_INVALID_ARG; + break; + } + + // Enable this infoframe. + pThis->hdmiWritePacketCtrl(pThis, pBaseReg, head, pktType9171, tc, NV_FALSE); + +hdmiPacketWrite9171_release_mutex_exit: + // release mutex + pThis->callback.releaseMutex(pThis->cbHandle); +hdmiPacketWrite9171_exit: + return result; +} + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor9171 + */ +NvBool +hdmiConstructor9171(NVHDMIPKT_CLASS* pThis) +{ + NvU32 i = 0; + NvBool result = NV_TRUE; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + for (i = 0; i < pThis->numSubDevices; i++) + { + if (CALL_DISP_RM(NvRmAlloc)(pThis->clientHandles.hClient, + pThis->clientHandles.hSubDevices[i], + pThis->sfUserHandle + i, + pThis->dispSfUserClassId, + (void*)0) != NVOS_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - Init failed. " + "Failed to alloc SF_USER handle"); + NvHdmiPkt_Assert(0); + break; + } + + pThis->memMap[i].memHandle = pThis->sfUserHandle + i; + + if (CALL_DISP_RM(NvRmMapMemory)(pThis->clientHandles.hClient, + pThis->clientHandles.hSubDevices[i], + pThis->memMap[i].memHandle, + 0, + pThis->dispSfUserSize, + &pThis->memMap[i].pMemBase, + 0) != NVOS_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - Init failed. " + "Failed to map SF_USER memory."); + NvHdmiPkt_Assert(0); + break; + } + + if (pThis->memMap[i].pMemBase == 0) + { + NvHdmiPkt_Print(pThis, "ERROR - Init failed. " + "SF_USER memory returned is NULL."); + NvHdmiPkt_Assert(0); + break; + } + + pThis->memMap[i].subDevice = i; + } + + // coudln't complete the loop above + if (i < pThis->numSubDevices) + { + result = NV_FALSE; + goto hdmiConstructor9171_exit; + } +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + for (i = 0; i < pThis->numSubDevices; i++) + { + result = pThis->callback.rmGetMemoryMap(pThis->cbHandle, + pThis->dispSfUserClassId, + pThis->dispSfUserSize, + i, + &pThis->memMap[i].memHandle, + &pThis->memMap[i].pMemBase); + if (result == NV_TRUE) + { + pThis->memMap[i].subDevice = i; + } + else + { + NvHdmiPkt_Print(pThis, "ERROR - Init failed. " + "Failed to map SF_USER memory."); + NvHdmiPkt_Assert(0); + result = NV_FALSE; + goto hdmiConstructor9171_exit; + } + } +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + +hdmiConstructor9171_exit: + return result; +} + +/* + * hdmiDestructor9171 + */ +void +hdmiDestructor9171(NVHDMIPKT_CLASS* pThis) + +{ + NvU32 i = 0; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + for (i = 0; i < NV_MAX_SUBDEVICES; i++) + { + // free memory + if (pThis->memMap[i].pMemBase) + { + if (CALL_DISP_RM(NvRmUnmapMemory)(pThis->clientHandles.hClient, + pThis->clientHandles.hSubDevices[i], + pThis->memMap[i].memHandle, + pThis->memMap[i].pMemBase, + 0) != NVOS_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - unInit failed. " + "SF_USER memory unMap failed."); + NvHdmiPkt_Assert(0); + } + } + + // free handle + if (pThis->memMap[i].memHandle) + { + if (CALL_DISP_RM(NvRmFree)(pThis->clientHandles.hClient, + pThis->clientHandles.hSubDevices[i], + pThis->memMap[i].memHandle) != NVOS_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - unInit failed. " + "Freeing SF_USER memory handle failed."); + NvHdmiPkt_Assert(0); + } + } + + pThis->memMap[i].subDevice = NVHDMIPKT_INVALID_SUBDEV; + pThis->memMap[i].memHandle = 0; + pThis->memMap[i].pMemBase = 0; + } +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + for (i = 0; i < NV_MAX_SUBDEVICES; i++) + { + if (pThis->memMap[i].memHandle) + { + pThis->callback.rmFreeMemoryMap(pThis->cbHandle, + i, + pThis->memMap[i].memHandle, + pThis->memMap[i].pMemBase); + + pThis->memMap[i].subDevice = NVHDMIPKT_INVALID_SUBDEV; + pThis->memMap[i].memHandle = 0; + pThis->memMap[i].pMemBase = 0; + } + } +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + + return; +} + +/* + * initializeHdmiPktInterface9171 + */ +void +initializeHdmiPktInterface9171(NVHDMIPKT_CLASS* pClass) +{ + pClass->hdmiPacketCtrl = hdmiPacketCtrl9171; + pClass->hdmiPacketWrite = hdmiPacketWrite9171; + pClass->translatePacketType = translatePacketType9171; + pClass->translateTransmitControl = translateTransmitControl9171; + + // HW register write functions + pClass->hdmiReadPacketStatus = hdmiReadPacketStatus9171; + pClass->hdmiWritePacketCtrl = hdmiWritePacketCtrl9171; + pClass->hdmiWriteAviPacket = hdmiWriteAviPacket9171; + pClass->hdmiWriteAudioPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteGenericPacket = hdmiWriteGenericPacket9171; + pClass->hdmiWriteGeneralCtrlPacket = hdmiWriteGeneralCtrlPacket9171; + pClass->hdmiWriteVendorPacket = hdmiWriteVendorPacket9171; + + // Update SF_USER data + pClass->dispSfUserClassId = NV9171_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(Nv9171DispSfUserMap); + + // Functions below are used by HDMI FRL and will be available for Ampere+. + pClass->hdmiAssessLinkCapabilities = hdmiAssessLinkCapabilitiesDummy; + pClass->hdmiQueryFRLConfig = hdmiQueryFRLConfigDummy; + pClass->hdmiSetFRLConfig = hdmiSetFRLConfigDummy; + pClass->hdmiClearFRLConfig = hdmiClearFRLConfigDummy; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9271.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9271.c new file mode 100644 index 0000000..eaf65e5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9271.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_9271.c + * + * Purpose: Provides packet write functions for HDMI library for KEPLER + chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "class/cl9271.h" + +/******************************************** NOTE *********************************************** +* This file serves as an example on how to add a new HW SF USER CLASS. Notice that this * +* Class didn't override any functions, as 9171 is identical to 9271. * +*************************************************************************************************/ + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor9271 + */ +NvBool +hdmiConstructor9271(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructor9271 + */ +void +hdmiDestructor9271(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterface9271 + */ +void +initializeHdmiPktInterface9271(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NV9271_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(Nv9271DispSfUserMap); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9471.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9471.c new file mode 100644 index 0000000..d863c9f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9471.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_9471.c + * + * Purpose: Provides packet write functions for HDMI library for Maxwell + chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "class/cl9471.h" + +/******************************************** NOTE *********************************************** +* This file serves as an example on how to add a new HW SF USER CLASS. Notice that this * +* Class didn't override any functions, as 9171 is identical to 9471. * +*************************************************************************************************/ + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor9471 + */ +NvBool +hdmiConstructor9471(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructor9471 + */ +void +hdmiDestructor9471(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterface9471 + */ +void +initializeHdmiPktInterface9471(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NV9471_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(Nv9471DispSfUserMap); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9571.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9571.c new file mode 100644 index 0000000..85e6b13 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9571.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_9571.c + * + * Purpose: Provides packet write functions for HDMI library for Maxwell + chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "class/cl9571.h" + +/******************************************** NOTE *********************************************** +* This file serves as an example on how to add a new HW SF USER CLASS. Notice that this * +* Class didn't override any functions, as 9171 is identical to 9571. * +*************************************************************************************************/ + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor9571 + */ +NvBool +hdmiConstructor9571(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructor9571 + */ +void +hdmiDestructor9571(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterface9571 + */ +void +initializeHdmiPktInterface9571(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NV9571_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(Nv9571DispSfUserMap); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C371.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C371.c new file mode 100644 index 0000000..fd89eac --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C371.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_C371.c + * + * Purpose: Provides packet write functions for HDMI library for Volta+ chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "class/clc371.h" + +/******************************************** NOTE *********************************************** +* This file serves as an example on how to add a new HW SF USER CLASS. Notice that this * +* Class didn't override any functions, as 9171 is identical to C371. * +*************************************************************************************************/ + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructorC371 + */ +NvBool +hdmiConstructorC371(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructorC371 + */ +void +hdmiDestructorC371(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterfaceC371 + */ +void +initializeHdmiPktInterfaceC371(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NVC371_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(NvC371DispSfUserMap); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C671.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C671.c new file mode 100644 index 0000000..8385565 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C671.c @@ -0,0 +1,1389 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_C671.c + * + * Purpose: Provides packet write functions for HDMI library for Ampere+ chips + */ + +#include +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" +#include "nvHdmiFrlCommon.h" + +#include "../timing/nvt_dsc_pps.h" +#include "ctrl/ctrl0073/ctrl0073system.h" + +#include "class/clc671.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" + +#define MULTIPLIER_1G 1000000000 +#define PCLK_VARIANCE_10MHZ 1000 + +// In HDMI case, for PPS set, HDMI2.1 spec expects source to set this field to 13, decoder capability is assumed +// Note, in DP case, DSC decoder is allowed to report line buffer depth capability through DPCD registers +#define HDMI_DSC_DECODER_LINE_BUFFER_BIT_DEPTH_CAP 13 +#define NVHDMIPKT_C671_INVALID_PKT_TYPE ((NVC671_SF_HDMI_INFO_IDX_VSI) + 1) + +extern NVHDMIPKT_RESULT hdmiPacketWrite0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +extern NVHDMIPKT_RESULT hdmiPacketCtrl0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); + +extern NVHDMIPKT_RESULT hdmiPacketWrite9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +static NVHDMIPKT_RESULT hdmiClearFRLConfigC671(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId); + + +// translate FRL rate to RM control param +static NvU32 translateFRLRateToNv0073SetHdmiFrlConfig(HDMI_FRL_DATA_RATE frlRate) +{ + switch(frlRate) + { + case HDMI_FRL_DATA_RATE_NONE : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE; + case HDMI_FRL_DATA_RATE_3LANES_3GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_3G; + case HDMI_FRL_DATA_RATE_3LANES_6GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_6G; + case HDMI_FRL_DATA_RATE_4LANES_6GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_6G; + case HDMI_FRL_DATA_RATE_4LANES_8GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_8G; + case HDMI_FRL_DATA_RATE_4LANES_10GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_10G; + case HDMI_FRL_DATA_RATE_4LANES_12GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_12G; + default: + break; + } + return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE; +} + +/* + * Both DSC_Max_FRL_Rate and Max_FRL_Rate have same translation + * + */ +static HDMI_FRL_DATA_RATE translateFRLCapToFRLDataRate(NvU32 sinkFRLcap) +{ + switch(sinkFRLcap) + { + case 6: return HDMI_FRL_DATA_RATE_4LANES_12GBPS; + case 5: return HDMI_FRL_DATA_RATE_4LANES_10GBPS; + case 4: return HDMI_FRL_DATA_RATE_4LANES_8GBPS; + case 3: return HDMI_FRL_DATA_RATE_4LANES_6GBPS; + case 2: return HDMI_FRL_DATA_RATE_3LANES_6GBPS; + case 1: return HDMI_FRL_DATA_RATE_3LANES_3GBPS; + case 0: // fall through + default: break; + } + + if (sinkFRLcap > 6 && sinkFRLcap <= 15) + { + return HDMI_FRL_DATA_RATE_4LANES_12GBPS; + } + + return HDMI_FRL_DATA_RATE_NONE; +} + +// If we want to force 2ch48KHz fill it in as default, if not, +// Lookup sink short audio descriptor blocks to see max supported audio +static void populateAudioCaps(NVT_EDID_CEA861_INFO const * const p861ExtBlock, + HDMI_SINK_CAPS * pSinkCaps) +{ + NvU32 i; + + for (i = 0; i < p861ExtBlock->total_sad; i++) + { + NvU32 data = p861ExtBlock->audio[i].byte1; + data = (data & NVT_CEA861_AUDIO_FORMAT_MASK) >> NVT_CEA861_AUDIO_FORMAT_SHIFT; + + // unsupported + if ((data == NVT_CEA861_AUDIO_FORMAT_RSVD) || + (data == NVT_CEA861_AUDIO_FORMAT_RSVD15)) + { + continue; + } + + // check for HBR audio support. We don't support any other packet types + if ((data == NVT_CEA861_AUDIO_FORMAT_DTS_HD) || + (data == NVT_CEA861_AUDIO_FORMAT_MAT)) + { + pSinkCaps->bHBRAudio = NV_TRUE; + } + + // num of channels for this audio format + data = p861ExtBlock->audio[i].byte1; + NvU32 numChannels = ((data & NVT_CEA861_AUDIO_MAX_CHANNEL_MASK) >> NVT_CEA861_AUDIO_MAX_CHANNEL_SHIFT) + 1; + if (pSinkCaps->maxAudioChannels < numChannels) + { + pSinkCaps->maxAudioChannels = numChannels; + } + + // get max sampling frequency + data = p861ExtBlock->audio[i].byte2; + NvU32 sampleFreq = (data & NVT_CEA861_AUDIO_SAMPLE_RATE_192KHZ) ? 192 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_176KHZ) ? 176 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_96KHZ) ? 96 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_88KHZ) ? 88 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_48KHZ) ? 48 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_44KHZ) ? 44 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_32KHZ) ? 32 : 0; + if (pSinkCaps->maxAudioFreqKHz < sampleFreq) + { + pSinkCaps->maxAudioFreqKHz = sampleFreq; + } + } +} + +/* + * hdmiAssessLinkCapabilities + * + * 1. Try physical link training to determine max link capacity + * 2. Calculate max audio capabilities + * 3. Limit connector max to what the source can support + * AssesssLinkCapabilities is expected to be called at hotplug time. Ideally, srcCaps need to be calculated one time, + * but for now, no incentive to do so. In future move it out to better place as need arises + */ +static NVHDMIPKT_RESULT +hdmiAssessLinkCapabilitiesC671(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps) +{ + + // Read DSC caps from RM - gpu caps for DSC are same across DP and HDMI FRL (HDMI 2.1+) + // Hence use same RM control as DP case for reading this cap + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS params; + params.subDeviceInstance = 0; + params.sorIndex = 0; // Passing SOR index as 0 since all SORs have the same capability. + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_DP_GET_CAPS, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_DP_GET_CAPS, + ¶ms, + sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + return NVHDMIPKT_FAIL; + } + + pSrcCaps->dscCaps.dscCapable = params.DSC.bDscSupported; + pSrcCaps->dscCaps.encoderColorFormatMask = params.DSC.encoderColorFormatMask; + pSrcCaps->dscCaps.dualHeadBppTargetMaxX16 = 256; // Tu10x/GA10x HW DSC module allow max 16bpp in 2H1OR mode. + + NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS hdmiGpuCapsParams; + NVMISC_MEMSET(&hdmiGpuCapsParams, 0, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS)); +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS, + &hdmiGpuCapsParams, + sizeof(hdmiGpuCapsParams)) != NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + hdmiGpuCapsParams.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS, + &hdmiGpuCapsParams, + sizeof(hdmiGpuCapsParams)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NVMISC_MEMSET(&hdmiGpuCapsParams, 0, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS)); + } + + pSrcCaps->linkMaxFRLRate = translateFRLCapToFRLDataRate(hdmiGpuCapsParams.caps); + + switch(params.DSC.bitsPerPixelPrecision) + { + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_16; break; + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_8; break; + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_4; break; + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_2; break; + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1; break; + default: break; + } + + pSrcCaps->dscCaps.lineBufferSizeKB = params.DSC.lineBufferSizeKB; + pSrcCaps->dscCaps.rateBufferSizeKB = params.DSC.rateBufferSizeKB; + pSrcCaps->dscCaps.maxNumHztSlices = params.DSC.maxNumHztSlices; + pSrcCaps->dscCaps.lineBufferBitDepth = params.DSC.lineBufferBitDepth; + pSrcCaps->dscCaps.maxWidthPerSlice = 5120; // Max DSC buffer width per head is 5120, this can be chunks of 1/2/4 slices, so keep 5120 as the very max. + + pSinkCaps->pHdmiForumInfo = &pSinkEdid->hdmiForumInfo; + populateAudioCaps(&pSinkEdid->ext861, pSinkCaps); + populateAudioCaps(&pSinkEdid->ext861_2, pSinkCaps); + + NvU32 setFRLRate = pSinkEdid->hdmiForumInfo.max_FRL_Rate; + + pSinkCaps->linkMaxFRLRate = translateFRLCapToFRLDataRate(setFRLRate); + pSinkCaps->linkMaxFRLRateDSC = (pSrcCaps->dscCaps.dscCapable && + (pSinkEdid->hdmiForumInfo.dsc_Max_FRL_Rate > setFRLRate)) ? + pSinkCaps->linkMaxFRLRate : + translateFRLCapToFRLDataRate(pSinkEdid->hdmiForumInfo.dsc_Max_FRL_Rate); + + return NVHDMIPKT_SUCCESS; +} + +// Fill in basic params from Timing info etc +static void populateBaseFRLParams(HDMI_VIDEO_TRANSPORT_INFO const *pVidTransInfo, + HDMI_SINK_CAPS const *pSinkCaps, + NvBool bForce2Ch48KHz, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS *pFRLParams) +{ + pFRLParams->pclk10KHz = pVidTransInfo->pTiming->pclk; + pFRLParams->hTotal = pVidTransInfo->pTiming->HTotal; + pFRLParams->hActive = pVidTransInfo->pTiming->HVisible; + pFRLParams->bpc = pVidTransInfo->bpc; + pFRLParams->pixelPacking = pVidTransInfo->packing; + + pFRLParams->numAudioChannels = bForce2Ch48KHz ? 2 : pSinkCaps->maxAudioChannels; + pFRLParams->audioFreqKHz = bForce2Ch48KHz ? 48 : pSinkCaps->maxAudioFreqKHz; + pFRLParams->audioType = pSinkCaps->bHBRAudio ? AUDIO_PKTTYPE_HBR_AUDIO : + AUDIO_PKTTYPE_LPCM_SAMPLE; + + pFRLParams->compressionInfo.dscTotalChunkKBytes = 1024 * (pSinkCaps->pHdmiForumInfo->dsc_totalChunkKBytes); +} + + +// Get next higher link rate +static HDMI_FRL_DATA_RATE getNextHigherLinkRate(HDMI_FRL_DATA_RATE frlRate) +{ + return (frlRate == HDMI_FRL_DATA_RATE_4LANES_12GBPS) ? HDMI_FRL_DATA_RATE_NONE : (frlRate + 1); +} + +// Fill in GPU and Monitor caps for DSC PPS calculations +static void populateDscCaps(HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + DSC_INFO * pDscInfo) +{ + // populate src caps + pDscInfo->gpuCaps.encoderColorFormatMask = pSrcCaps->dscCaps.encoderColorFormatMask; + pDscInfo->gpuCaps.lineBufferSize = pSrcCaps->dscCaps.lineBufferSizeKB; + pDscInfo->gpuCaps.bitsPerPixelPrecision = pSrcCaps->dscCaps.bppPrecision; + pDscInfo->gpuCaps.maxNumHztSlices = pSrcCaps->dscCaps.maxNumHztSlices; + pDscInfo->gpuCaps.lineBufferBitDepth = pSrcCaps->dscCaps.lineBufferBitDepth; + + // populate sink caps + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_RGB; + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_444; + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422; + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422; + + pDscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_16; + if (pSinkCaps->pHdmiForumInfo->dsc_Native_420) + { + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420; + } + + // limited by spec + pDscInfo->sinkCaps.maxSliceWidth = 2720; + + NvU32 sliceCountMask = 0; + NvU32 maxNumHztSlices = pSinkCaps->pHdmiForumInfo->dsc_MaxSlices; + NvU32 peakThroughput = (pSinkCaps->pHdmiForumInfo->dsc_MaxPclkPerSliceMHz == 400) ? + DSC_DECODER_PEAK_THROUGHPUT_MODE0_400 : + DSC_DECODER_PEAK_THROUGHPUT_MODE0_340; + + switch(pSinkCaps->pHdmiForumInfo->dsc_MaxSlices) + { + case 16: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_16; // fall-through + case 12: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_12; // fall-through + case 8: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_8; // fall-through + case 4: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_4; // fall-through + case 2: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_2; // fall-through + case 1: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_1; break; + default: break; + } + + pDscInfo->sinkCaps.sliceCountSupportedMask = sliceCountMask; + pDscInfo->sinkCaps.maxNumHztSlices = maxNumHztSlices; + pDscInfo->sinkCaps.lineBufferBitDepth = HDMI_DSC_DECODER_LINE_BUFFER_BIT_DEPTH_CAP; + + // Color depth supported by DSC decoder of panel + pDscInfo->sinkCaps.decoderColorDepthMask |= pSinkCaps->pHdmiForumInfo->dsc_16bpc ? DSC_DECODER_COLOR_DEPTH_CAPS_16_BITS : 0; + pDscInfo->sinkCaps.decoderColorDepthMask |= pSinkCaps->pHdmiForumInfo->dsc_12bpc ? DSC_DECODER_COLOR_DEPTH_CAPS_12_BITS : 0; + pDscInfo->sinkCaps.decoderColorDepthMask |= pSinkCaps->pHdmiForumInfo->dsc_10bpc ? DSC_DECODER_COLOR_DEPTH_CAPS_10_BITS : 0; + pDscInfo->sinkCaps.decoderColorDepthMask |= DSC_DECODER_COLOR_DEPTH_CAPS_8_BITS; + + pDscInfo->sinkCaps.bBlockPrediction = 1; + pDscInfo->sinkCaps.algorithmRevision.versionMajor = 1; + pDscInfo->sinkCaps.algorithmRevision.versionMinor = 2; + pDscInfo->sinkCaps.peakThroughputMode0 = peakThroughput; + pDscInfo->sinkCaps.peakThroughputMode1 = peakThroughput * 2; +} + +// Fill in mode related info for DSC lib +static void populateDscModesetInfo(HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + MODESET_INFO * pDscModesetInfo) +{ + pDscModesetInfo->pixelClockHz = pVidTransInfo->pTiming->pclk * 10000; // Requested pixel clock for the mode + pDscModesetInfo->activeWidth = pVidTransInfo->pTiming->HVisible; // Active Width + pDscModesetInfo->activeHeight = pVidTransInfo->pTiming->VVisible; // Active Height + pDscModesetInfo->bitsPerComponent = (NvU32)pVidTransInfo->bpc; // BPC value to be used + pDscModesetInfo->colorFormat = (pVidTransInfo->packing == HDMI_PIXEL_PACKING_RGB) ? NVT_COLOR_FORMAT_RGB : + (pVidTransInfo->packing == HDMI_PIXEL_PACKING_YCbCr444) ? NVT_COLOR_FORMAT_YCbCr444 : + (pVidTransInfo->packing == HDMI_PIXEL_PACKING_YCbCr422) ? NVT_COLOR_FORMAT_YCbCr422 : + (pVidTransInfo->packing == HDMI_PIXEL_PACKING_YCbCr420) ? NVT_COLOR_FORMAT_YCbCr420 : 0; + pDscModesetInfo->bDualMode = pVidTransInfo->bDualHeadMode; + pDscModesetInfo->bDropMode = NV_FALSE; +} + +// Checks against source and sink caps whether DSC is possible +// Tries to determine slice width and slice count accounting for 2Head1Or, populates this info into FRL calculation structure +// if this calculation fails DSC cannot be enabled +static NvBool evaluateIsDSCPossible(NVHDMIPKT_CLASS *pThis, + HDMI_SRC_CAPS const *pSrcCaps, + HDMI_SINK_CAPS const *pSinkCaps, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS *pFRLParams) +{ + const NvU32 numHeadsDrivingSink = pVidTransInfo->bDualHeadMode ? 2 : 1; + + if (!pSrcCaps->dscCaps.dscCapable) + { + return NV_FALSE; + } + + if (!pSinkCaps->pHdmiForumInfo->dsc_1p2 || + !pSinkCaps->linkMaxFRLRateDSC || + (!pSinkCaps->pHdmiForumInfo->dsc_16bpc && (pFRLParams->bpc == HDMI_BPC16)) || + (!pSinkCaps->pHdmiForumInfo->dsc_12bpc && (pFRLParams->bpc == HDMI_BPC12)) || + (!pSinkCaps->pHdmiForumInfo->dsc_10bpc && (pFRLParams->bpc == HDMI_BPC10))) + { + return NV_FALSE; + } + + // Disallow DSC if the source or sink don't support DSC with this mode's colorformat/packing. + switch (pVidTransInfo->packing) + { + case HDMI_PIXEL_PACKING_RGB: + if (!(pSrcCaps->dscCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_RGB)) + { + return NV_FALSE; + } + break; + case HDMI_PIXEL_PACKING_YCbCr444: + if (!(pSrcCaps->dscCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444)) + { + return NV_FALSE; + } + break; + case HDMI_PIXEL_PACKING_YCbCr422: + if (!(pSrcCaps->dscCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422)) + { + return NV_FALSE; + } + break; + case HDMI_PIXEL_PACKING_YCbCr420: + if (!(pSrcCaps->dscCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420) || + !pSinkCaps->pHdmiForumInfo->dsc_Native_420) + { + return NV_FALSE; + } + break; + } + + NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pGetHdmiFrlCapacityComputationParams = NULL; + pGetHdmiFrlCapacityComputationParams = pThis->callback.malloc(pThis->cbHandle, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (pGetHdmiFrlCapacityComputationParams) + { + NvBool bIsDSCPossible = NV_FALSE; + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->input = *pFRLParams; + pGetHdmiFrlCapacityComputationParams->dsc.maxSliceCount = NV_MIN(pSrcCaps->dscCaps.maxNumHztSlices * numHeadsDrivingSink, pSinkCaps->pHdmiForumInfo->dsc_MaxSlices); + pGetHdmiFrlCapacityComputationParams->dsc.maxSliceWidth = pSrcCaps->dscCaps.maxWidthPerSlice; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_IS_FRL_DSC_POSSIBLE; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + bIsDSCPossible = pGetHdmiFrlCapacityComputationParams->dsc.bIsDSCPossible; + } + + pThis->callback.free(pThis->cbHandle, pGetHdmiFrlCapacityComputationParams); + return bIsDSCPossible; + } + return NV_FALSE; + + return NV_TRUE; +} + +static void translateBitRate(HDMI_FRL_DATA_RATE frlRate, NvU32 *pFrlBitRateGbps, NvU32 *pNumLanes) +{ + switch(frlRate) + { + case HDMI_FRL_DATA_RATE_4LANES_12GBPS : { *pFrlBitRateGbps = 12; *pNumLanes = 4; break; } + case HDMI_FRL_DATA_RATE_4LANES_10GBPS : { *pFrlBitRateGbps = 10; *pNumLanes = 4; break; } + case HDMI_FRL_DATA_RATE_4LANES_8GBPS : { *pFrlBitRateGbps = 8; *pNumLanes = 4; break; } + case HDMI_FRL_DATA_RATE_4LANES_6GBPS : { *pFrlBitRateGbps = 6; *pNumLanes = 4; break; } + case HDMI_FRL_DATA_RATE_3LANES_6GBPS : { *pFrlBitRateGbps = 6; *pNumLanes = 3; break; } + case HDMI_FRL_DATA_RATE_3LANES_3GBPS : // fall through + default : { *pFrlBitRateGbps = 3; *pNumLanes = 3; break; } + } +} + +// Determine if video transport is possible at any FRL rate in the specified range +// Iterate from min rate to max rate +static NVHDMIPKT_RESULT +determineUncompressedFRLConfig(NVHDMIPKT_CLASS *pThis, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS *pFRLParams, + HDMI_FRL_DATA_RATE minFRLRate, + HDMI_FRL_DATA_RATE maxFRLRate, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT *pResults) +{ + NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pGetHdmiFrlCapacityComputationParams = NULL; + HDMI_FRL_DATA_RATE frlRate = minFRLRate; + NVHDMIPKT_RESULT status = NVHDMIPKT_INSUFFICIENT_BANDWIDTH; + + pGetHdmiFrlCapacityComputationParams = pThis->callback.malloc(pThis->cbHandle, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + + while (frlRate != HDMI_FRL_DATA_RATE_NONE) + { + translateBitRate(frlRate, &pFRLParams->frlBitRateGbps, &pFRLParams->numLanes); + + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->input = *pFRLParams; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_UNCOMPRESSED_VIDEO; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + *pResults = pGetHdmiFrlCapacityComputationParams->result; + } + } + else + { + status = NVHDMIPKT_FAIL; + goto uncompressedQuery_exit; + } + + status = (pResults->isVideoTransportSupported && pResults->isAudioSupported) ? NVHDMIPKT_SUCCESS : status; + + if ((status == NVHDMIPKT_SUCCESS) || + (frlRate == maxFRLRate)) + { + break; + } + + // try again at next link rate + frlRate = getNextHigherLinkRate(frlRate); + } + + pResults->frlRate = frlRate; + +uncompressedQuery_exit: + if (pGetHdmiFrlCapacityComputationParams) + { + pThis->callback.free(pThis->cbHandle, pGetHdmiFrlCapacityComputationParams); + } + return status; +} + +// Determines the absolute min n max Bpp settings we can use with DSC. This is irrespective of FRL rate +static void calcBppMinMax(HDMI_SRC_CAPS const *pSrcCaps, + HDMI_SINK_CAPS const *pSinkCaps, + HDMI_VIDEO_TRANSPORT_INFO const *pVidTransInfo, + NvU32 *pBppMinX16, + NvU32 *pBppMaxX16) +{ + + NvU32 bppMinX16 = 0; + NvU32 bppMaxX16 = 0; + + switch(pVidTransInfo->packing) + { + case HDMI_PIXEL_PACKING_YCbCr420: { bppMinX16 = 6 * 16; bppMaxX16 = (3 * pVidTransInfo->bpc * 8 - 1); break; } + case HDMI_PIXEL_PACKING_YCbCr422: { bppMinX16 = 7 * 16; bppMaxX16 = (2 * pVidTransInfo->bpc * 16 - 1); break; } + case HDMI_PIXEL_PACKING_RGB: + case HDMI_PIXEL_PACKING_YCbCr444: + default: { bppMinX16 = 8 * 16; bppMaxX16 = (3 * pVidTransInfo->bpc * 16 - 1); break; } + } + + // cap to 12 if DSC_All_Bpp is not set + if (!pSinkCaps->pHdmiForumInfo->dsc_All_bpp) + { + bppMaxX16 = (bppMaxX16 > 12*16) ? 12*16 : bppMaxX16; + } + + if (pVidTransInfo->bDualHeadMode && (bppMaxX16 > pSrcCaps->dscCaps.dualHeadBppTargetMaxX16)) + { + bppMaxX16 = pSrcCaps->dscCaps.dualHeadBppTargetMaxX16; + } + + *pBppMinX16 = bppMinX16; + *pBppMaxX16 = bppMaxX16; +} + + +// Determine minimum FRL rate at which Video Transport is possible at given min bpp +// Once FRL rate is found, determine the max bpp possible at this FRL rate +// To determine Primary Compressed Format using this function caller must pass in the full range of min, max FRL and min, max Bpp +// For any optimizations on top of the Primary Compressed Format, caller must adjust the range of these + +static NVHDMIPKT_RESULT +determineCompressedFRLConfig(NVHDMIPKT_CLASS *pThis, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS *pFRLParams, + HDMI_FRL_DATA_RATE minFRLRate, + HDMI_FRL_DATA_RATE maxFRLRate, + NvU32 bppMinX16, + NvU32 bppMaxX16, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT *pResults) +{ + NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pGetHdmiFrlCapacityComputationParams = NULL; + HDMI_FRL_DATA_RATE frlRate = minFRLRate; + NvU32 bppTargetX16 = bppMinX16; + NVHDMIPKT_RESULT status = NVHDMIPKT_INSUFFICIENT_BANDWIDTH; + + pGetHdmiFrlCapacityComputationParams = pThis->callback.malloc(pThis->cbHandle, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + + // Set bppTarget to min and iterate over FRL rates + pFRLParams->compressionInfo.bppTargetx16 = bppMinX16; + while (frlRate != HDMI_FRL_DATA_RATE_NONE) + { + translateBitRate(frlRate, &pFRLParams->frlBitRateGbps, &pFRLParams->numLanes); + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->input = *pFRLParams; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_COMPRESSED_VIDEO; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + *pResults = pGetHdmiFrlCapacityComputationParams->result; + } + } + else + { + status = NVHDMIPKT_FAIL; + goto compressedQuery_exit; + } + + status = (pResults->isVideoTransportSupported && pResults->isAudioSupported) ? NVHDMIPKT_SUCCESS : status; + + if ((status == NVHDMIPKT_SUCCESS) || + (frlRate == maxFRLRate)) + { + break; + } + + frlRate = getNextHigherLinkRate(frlRate); + } + + if (status != NVHDMIPKT_SUCCESS) + { + goto compressedQuery_exit; + } + + // We now have the base FRL rate. Iterate over bppTarget to find the max supported bpp + status = NVHDMIPKT_INSUFFICIENT_BANDWIDTH; + bppTargetX16 = bppMaxX16; + NvU32 stepSize = 16; + + while (status != NVHDMIPKT_SUCCESS) + { + pFRLParams->compressionInfo.bppTargetx16 = bppTargetX16; + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->input = *pFRLParams; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_COMPRESSED_VIDEO; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + *pResults = pGetHdmiFrlCapacityComputationParams->result; + } + } + else + { + status = NVHDMIPKT_FAIL; + goto compressedQuery_exit; + } + + status = (pResults->isVideoTransportSupported && pResults->isAudioSupported) ? NVHDMIPKT_SUCCESS : status; + + if (status == NVHDMIPKT_SUCCESS) + { + // If this is the maxBpp nothing else to try + if (bppTargetX16 == bppMaxX16) + { + break; + } + + // If we detected a successful bppTarget value, go up a step size, + // and iterate by decrementing bppTarget by 1/16 to reach a finer tuned bpp value + if (stepSize == 16) + { + status = NVHDMIPKT_RETRY; + bppTargetX16 = bppTargetX16 + stepSize - 1; + stepSize = 1; + } + } + else + { + bppTargetX16 = bppTargetX16 - stepSize; + // bppTargetX16 is guaranteed to be >= bppMinX16 + } + } + + pResults->frlRate = frlRate; + pResults->bppTargetx16 = bppTargetX16; + +compressedQuery_exit: + if (pGetHdmiFrlCapacityComputationParams) + { + pThis->callback.free(pThis->cbHandle, pGetHdmiFrlCapacityComputationParams); + } + + return status; +} + +/* + * hdmiQueryFRLConfigC671 + * + * This function uses below logic: + * Verify if force params from client are in expected range + * If client is not asking for optimum config or force enable DSC, try uncompressed first + * For DSC enabled, honor all choices client has made for slice count/width. Determine the primary compressed format (PCF) first. + * For any other items client wants to control do this as optimization on top of the PCF + * Call DSC library for PPS generation unless specified otherwise. + */ +static NVHDMIPKT_RESULT +hdmiQueryFRLConfigC671(NVHDMIPKT_CLASS *pThis, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig) +{ + NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pGetHdmiFrlCapacityComputationParams = NULL; + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + + NVMISC_MEMSET(pFRLConfig, 0, sizeof(HDMI_FRL_CONFIG)); + + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS frlParams; + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT frlComputeResult; + NvU32 bppMinX16, bppMaxX16; + + NVMISC_MEMSET(&frlParams, 0, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS)); + NVMISC_MEMSET(&frlComputeResult, 0, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT)); + + NvU32 vic = NVT_GET_CEA_FORMAT(pVidTransInfo->pTiming->etc.status); + NvBool bTryUncompressedMode, bCanUseDSC; + + populateBaseFRLParams(pVidTransInfo, + pSinkCaps, + pClientCtrl->forceAudio2Ch48KHz ? NV_TRUE : NV_FALSE, + &frlParams); + + calcBppMinMax(pSrcCaps, pSinkCaps, pVidTransInfo, &bppMinX16, &bppMaxX16); + bCanUseDSC = evaluateIsDSCPossible(pThis, pSrcCaps, pSinkCaps, pVidTransInfo, &frlParams); + + // Input validation + if ((pClientCtrl->forceFRLRate && (pClientCtrl->frlRate > pSinkCaps->linkMaxFRLRate)) || + (pClientCtrl->enableDSC && !bCanUseDSC) || + (pClientCtrl->forceSliceCount && (pClientCtrl->sliceCount > (NvU32)(NV_MIN(pSrcCaps->dscCaps.maxNumHztSlices, pSinkCaps->pHdmiForumInfo->dsc_MaxSlices)))) || + (pClientCtrl->forceSliceWidth && (pClientCtrl->sliceWidth > NV_MIN(pSrcCaps->dscCaps.maxWidthPerSlice, MAX_RECONSTRUCTED_HACTIVE_PIXELS))) || + (pClientCtrl->forceBppx16 && ((pClientCtrl->bitsPerPixelX16 < bppMinX16) || (pClientCtrl->bitsPerPixelX16 > bppMaxX16))) || + (pClientCtrl->forceBppx16 && !pSinkCaps->pHdmiForumInfo->dsc_All_bpp)) + { + return NVHDMIPKT_FAIL; + } + + bTryUncompressedMode = (bCanUseDSC && (pClientCtrl->enableDSC || + (pClientCtrl->option == HDMI_QUERY_FRL_LOWEST_BANDWIDTH))) ? + NV_FALSE : NV_TRUE; + + HDMI_FRL_DATA_RATE maxRate = NV_MIN(pSinkCaps->linkMaxFRLRate, pSrcCaps->linkMaxFRLRate); + + pGetHdmiFrlCapacityComputationParams = pThis->callback.malloc(pThis->cbHandle, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + + if (bTryUncompressedMode) + { + HDMI_FRL_DATA_RATE minFRLRate = HDMI_FRL_DATA_RATE_NONE, maxFRLRate = HDMI_FRL_DATA_RATE_NONE; + NvBool bHasPreCalcFRLData = NV_FALSE; + + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->preCalc.vic = vic; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_HAS_PRECAL_FRL_DATA; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + bHasPreCalcFRLData = pGetHdmiFrlCapacityComputationParams->preCalc.bHasPreCalcFRLData; + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + // We iterate over a range of FRL rates to see if timing is supported in uncompressed manner + // adjust the min and max range here according to what we aim for: if client wants to force a single FRL rate, + // min n max point to just this one rate. If client just wants any config, just try max supported rate. + // For everything else, iterate from lowest to highest FRL rate + if (pClientCtrl->forceFRLRate) + { + minFRLRate = pClientCtrl->frlRate; + maxFRLRate = pClientCtrl->frlRate; + } + else if (pClientCtrl->option == HDMI_QUERY_FRL_HIGHEST_BANDWIDTH) + { + minFRLRate = maxRate; + maxFRLRate = maxRate; + } + else if (bHasPreCalcFRLData) + { + HDMI_FRL_DATA_RATE preCalcFrlRate; + pGetHdmiFrlCapacityComputationParams->preCalc.packing = pVidTransInfo->packing; + pGetHdmiFrlCapacityComputationParams->preCalc.bpc = pVidTransInfo->bpc; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_GET_PRECAL_UNCOMPRESSED_FRL_CONFIG; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + preCalcFrlRate = pGetHdmiFrlCapacityComputationParams->preCalc.frlRate; + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + if (preCalcFrlRate <= maxRate) + { + minFRLRate = preCalcFrlRate; + maxFRLRate = preCalcFrlRate; + } + else if (!bCanUseDSC) + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + } + else if (pClientCtrl->option == HDMI_QUERY_FRL_ANY_CONFIG) + { + minFRLRate = maxRate; + maxFRLRate = maxRate; + } + else // HDMI_QUERY_FRL_OPTIMUM_CONFIG or HDMI_QUERY_FRL_LOWEST_BANDWIDTH + { + minFRLRate = HDMI_FRL_DATA_RATE_3LANES_3GBPS; + maxFRLRate = maxRate; + } + + result = determineUncompressedFRLConfig(pThis, &frlParams, minFRLRate, maxFRLRate, &frlComputeResult); + if (result == NVHDMIPKT_SUCCESS) + { + goto frlQuery_Success; + } + // If we could not find a FRL rate and DSC is not allowed, try using min audio see if it gets us a pass result + else if (!bCanUseDSC) + { + frlParams.numAudioChannels = 2; + frlParams.audioFreqKHz = 48; + frlParams.audioType = AUDIO_PKTTYPE_LPCM_SAMPLE; + result = determineUncompressedFRLConfig(pThis, &frlParams, minFRLRate, maxFRLRate, &frlComputeResult); + // If still not found return failure. Nothing more to try + if (result != NVHDMIPKT_SUCCESS) + { + goto frlQuery_fail; + } + } + } + + if (bCanUseDSC) + { + HDMI_FRL_DATA_RATE minFRLRateItr, maxFRLRateItr; + HDMI_FRL_DATA_RATE dscMaxFRLRate = NV_MIN(pSinkCaps->linkMaxFRLRateDSC, pSrcCaps->linkMaxFRLRate); + NvU32 bppMinX16Itr, bppMaxX16Itr; + NvBool bHasPreCalcFRLData = NV_FALSE; + + // DSC_All_bpp = 1: + // Lower the compression ratio better the pixel quality, hence a high bppTarget value will be ideal + // DSC_All_bpp = 1 allows us the flexibility to use a bppTarget setting different from the primary compressed format + // DSC_All_bpp = 0: + // Per spec, this supports only the bppTarget from primary compressed format - {minimum FRL rate, bpp, HCactive, HCblank} + + minFRLRateItr = HDMI_FRL_DATA_RATE_3LANES_3GBPS; + maxFRLRateItr = dscMaxFRLRate; + bppMinX16Itr = bppMinX16; + bppMaxX16Itr = bppMaxX16; + + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->preCalc.vic = vic; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_HAS_PRECAL_FRL_DATA; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + bHasPreCalcFRLData = pGetHdmiFrlCapacityComputationParams->preCalc.bHasPreCalcFRLData; + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + if (bHasPreCalcFRLData) + { + HDMI_FRL_DATA_RATE preCalcFrlRate; + NvU32 preCalcBppx16; + + if (pGetHdmiFrlCapacityComputationParams) + { + pGetHdmiFrlCapacityComputationParams->preCalc.packing = pVidTransInfo->packing; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_GET_PRECAL_COMPRESSED_FRL_CONFIG; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + preCalcFrlRate = pGetHdmiFrlCapacityComputationParams->preCalc.frlRate; + preCalcBppx16 = pGetHdmiFrlCapacityComputationParams->preCalc.bppX16; + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + + if (preCalcFrlRate != HDMI_FRL_DATA_RATE_UNSPECIFIED) + { + if (preCalcFrlRate > dscMaxFRLRate) + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + maxFRLRateItr = minFRLRateItr = preCalcFrlRate; + bppMaxX16Itr = bppMinX16Itr = preCalcBppx16; + } + } + + // force SliceWidth and count if requested + if (pClientCtrl->forceSliceCount) + { + frlParams.compressionInfo.hSlices = pClientCtrl->sliceCount; + frlParams.compressionInfo.sliceWidth = NV_UNSIGNED_DIV_CEIL(pVidTransInfo->pTiming->HVisible, pClientCtrl->sliceCount); + } + else if (pClientCtrl->forceSliceWidth) + { + frlParams.compressionInfo.sliceWidth = pClientCtrl->sliceWidth; + frlParams.compressionInfo.hSlices = NV_UNSIGNED_DIV_CEIL(pVidTransInfo->pTiming->HVisible, pClientCtrl->sliceWidth); + } + + if (pClientCtrl->forceFRLRate) + { + if (pClientCtrl->frlRate > dscMaxFRLRate) + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + + minFRLRateItr = pClientCtrl->frlRate; + maxFRLRateItr = pClientCtrl->frlRate; + } + + if (pClientCtrl->forceBppx16) + { + bppMinX16Itr = pClientCtrl->bitsPerPixelX16; + bppMaxX16Itr = pClientCtrl->bitsPerPixelX16; + } + + // Determine Primary Compressed Format + // First determine the FRL rate at which video transport is possible even at bppMin + // Then iterate over bppTarget - start at max n decrement until we hit bppMin. The max bpp for which + // video transport is possible together with the FRL rate is the primary compressed format + + result = determineCompressedFRLConfig(pThis, &frlParams, + minFRLRateItr, maxFRLRateItr, + bppMinX16Itr, bppMaxX16Itr, + &frlComputeResult); + + + // there are no FRL rates at which video transport is possible even at min bpp + // Could not even determine PCF. Cannot support this mode + if (result != NVHDMIPKT_SUCCESS) + { + goto frlQuery_fail; + } + + // Any other optimizations we want to do over the Primary Compressed Format? + { + NvBool bRedoDSCCalc = NV_FALSE; + + if (pClientCtrl->option == HDMI_QUERY_FRL_HIGHEST_BANDWIDTH) + { + NvBool bHasPreCalcFRLData = NV_TRUE; + + if (bHasPreCalcFRLData) + { + frlComputeResult.frlRate = dscMaxFRLRate; + } + else + { + // Keep bppTgt calculated as Primary Compressed Format and use FRL rate the highest availableLinkBw + // redo DSC calculations to recalculate TBlanktoTTotal ratio and HCblank/active to suit the new rate + // The hw method setting matters and may cause blank screen if not recalculated - see Bug 3458295 #9 + minFRLRateItr = maxFRLRateItr = dscMaxFRLRate; + bppMinX16Itr = bppMaxX16Itr = frlComputeResult.bppTargetx16; + bRedoDSCCalc = NV_TRUE; + } + } + + if (pSinkCaps->pHdmiForumInfo->dsc_All_bpp) + { + if ((pClientCtrl->option == HDMI_QUERY_FRL_HIGHEST_PIXEL_QUALITY) && + (frlComputeResult.frlRate < (NvU32)dscMaxFRLRate)) + { + // Increase FRL rate if possible and iterate over primary compressed format bppTarget to max Bpp + minFRLRateItr = getNextHigherLinkRate(frlComputeResult.frlRate); + bppMinX16Itr = frlComputeResult.bppTargetx16; + bppMaxX16Itr = bppMaxX16; + bRedoDSCCalc = NV_TRUE; + } + + if (pClientCtrl->option == HDMI_QUERY_FRL_LOWEST_BANDWIDTH) + { + // Keep FRL rate as the primary compressed format rate and force Bpp to Min + minFRLRateItr = maxFRLRateItr = frlComputeResult.frlRate; + bppMinX16Itr = bppMaxX16Itr = bppMinX16; + bRedoDSCCalc = NV_TRUE; + } + } + + if (bRedoDSCCalc) + { + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS optQueryParams; + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT optQueryResult; + NVMISC_MEMCPY(&optQueryParams, &frlParams, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS)); + + // If optimization is successful copy over new results. If not, no need to fail, keep Primary Compressed Format + if(determineCompressedFRLConfig(pThis, &optQueryParams, minFRLRateItr, maxFRLRateItr, + bppMinX16Itr, bppMaxX16Itr, + &optQueryResult) == NVHDMIPKT_SUCCESS) + { + NVMISC_MEMCPY(&frlParams, &optQueryParams, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS)); + NVMISC_MEMCPY(&frlComputeResult, &optQueryResult, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT)); + } + } + } + } + +frlQuery_Success: + pFRLConfig->maxSupportedAudioCh = frlParams.numAudioChannels; + pFRLConfig->maxSupportedAudioFreqKHz = frlParams.audioFreqKHz; + pFRLConfig->dscInfo.sliceCount = frlParams.compressionInfo.hSlices; + pFRLConfig->dscInfo.sliceWidth = frlParams.compressionInfo.sliceWidth; + + pFRLConfig->frlRate = frlComputeResult.frlRate; + pFRLConfig->dscInfo.bEnableDSC = frlComputeResult.engageCompression; + pFRLConfig->dscInfo.bitsPerPixelX16 = frlComputeResult.bppTargetx16; + pFRLConfig->dscInfo.dscHActiveBytes = frlComputeResult.hcActiveBytes; + pFRLConfig->dscInfo.dscHActiveTriBytes = frlComputeResult.hcActiveTriBytes; + pFRLConfig->dscInfo.dscHBlankTriBytes = frlComputeResult.hcBlankTriBytes; + pFRLConfig->dscInfo.dscTBlankToTTotalRatioX1k = frlComputeResult.tBlankToTTotalX1k; + + if (pFRLConfig->dscInfo.bEnableDSC && !pClientCtrl->skipGeneratePPS) + { + DSC_INFO dscInfo; + MODESET_INFO dscModesetInfo; + WAR_DATA warData; + + NVMISC_MEMSET(&dscInfo , 0, sizeof(DSC_INFO)); + NVMISC_MEMSET(&dscModesetInfo, 0, sizeof(MODESET_INFO)); + NVMISC_MEMSET(&warData , 0, sizeof(WAR_DATA)); + + populateDscCaps(pSrcCaps, pSinkCaps, &dscInfo); + populateDscModesetInfo(pVidTransInfo, &dscModesetInfo); + + dscInfo.forcedDscParams.sliceWidth = pFRLConfig->dscInfo.sliceWidth; + dscInfo.forcedDscParams.dscRevision.versionMajor = 1; + dscInfo.forcedDscParams.dscRevision.versionMinor = 2; + + NvU32 bitsPerPixelX16 = pFRLConfig->dscInfo.bitsPerPixelX16; + NvU32 frlBitRateGbps = 0, numLanes = 0; + translateBitRate(pFRLConfig->frlRate, &frlBitRateGbps, &numLanes); + NvU64 availableLinkBw = (NvU64)(frlBitRateGbps) * (NvU64)(numLanes) * MULTIPLIER_1G; + warData.connectorType = DSC_HDMI; + + if ((DSC_GeneratePPS(&dscInfo, + &dscModesetInfo, + &warData, + availableLinkBw, + pFRLConfig->dscInfo.pps, + &bitsPerPixelX16)) != NVT_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - DSC PPS calculation failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + } + + // DSC lib should honor the bpp setting passed from client, assert here just in case + NvHdmiPkt_Assert(bitsPerPixelX16 == pFRLConfig->dscInfo.bitsPerPixelX16); + } + +frlQuery_fail: + if (pGetHdmiFrlCapacityComputationParams) + { + pThis->callback.free(pThis->cbHandle, pGetHdmiFrlCapacityComputationParams); + } + + return result; +} + +/* + * hdmiSetFRLConfigC671 + */ +static NVHDMIPKT_RESULT +hdmiSetFRLConfigC671(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig) +{ + NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS params = {0}; + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + params.subDeviceInstance = subDevice; + params.displayId = displayId; + params.data = translateFRLRateToNv0073SetHdmiFrlConfig(pFRLConfig->frlRate); + params.bFakeLt = bFakeLt; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) + +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG, + ¶ms, + sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to set HDMI FRL failed."); + NvHdmiPkt_Assert(0); + + return NVHDMIPKT_FAIL; + } + + return NVHDMIPKT_SUCCESS; +} + +/* + * hdmiClearFRLConfigC671 + */ +static NVHDMIPKT_RESULT +hdmiClearFRLConfigC671(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + + NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS params = {0}; + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + params.subDeviceInstance = subDevice; + params.displayId = displayId; + params.data = NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG, + ¶ms, + sizeof(params)); + + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "WARNING - RM call to reset HDMI FRL failed."); + result = NVHDMIPKT_FAIL; + } + return result; +} + +static NVHDMIPKT_RESULT +hdmiPacketWriteC671(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NvU32 pktTypeC671 = pThis->translatePacketType(pThis, packetType); + + if (head >= NVC671_SF_HDMI_INFO_CTRL__SIZE_1 || + packetLen == 0 || + pPacket == 0 || + pktTypeC671 == NVHDMIPKT_C671_INVALID_PKT_TYPE) + { + result = NVHDMIPKT_INVALID_ARG; + goto hdmiPacketWriteC671_exit; + } + + if (pktTypeC671 == NVC671_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME) + { + // In GA10X, we use Generic infoframe for ACR WAR. This RM ctrl is used to control if the WAR is enabled/not. + NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS dispCapsParams; + + NVMISC_MEMSET(&dispCapsParams, 0, sizeof(dispCapsParams)); + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (NvRmControl(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2, + &dispCapsParams, + sizeof(dispCapsParams)) != NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + subDevice, + NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2, + &dispCapsParams, sizeof(dispCapsParams)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to get caps failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + goto hdmiPacketWriteC671_exit; + } + + NvBool bSwAcr = (NV0073_CTRL_SYSTEM_GET_CAP(dispCapsParams.capsTbl, NV0073_CTRL_SYSTEM_CAPS_HDMI21_SW_ACR_BUG_3275257)) ? NV_TRUE: NV_FALSE; + + if (bSwAcr) + { + // acquire mutex + pThis->callback.acquireMutex(pThis->cbHandle); + + result = hdmiPacketWrite0073(pThis, subDevice, displayId, head, packetType, transmitControl, packetLen, pPacket); + + if (result == NVHDMIPKT_SUCCESS) + { + result = hdmiPacketCtrl0073(pThis, subDevice, displayId, head, packetType, transmitControl); + } + + // release mutex + pThis->callback.releaseMutex(pThis->cbHandle); + } + else + { + result = hdmiPacketWrite9171(pThis, subDevice, displayId, head, packetType, transmitControl, packetLen, pPacket); + } + } + else + { + result = hdmiPacketWrite9171(pThis, subDevice, displayId, head, packetType, transmitControl, packetLen, pPacket); + } + +hdmiPacketWriteC671_exit: + return result; +} + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructorC671 + */ +NvBool +hdmiConstructorC671(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructorC671 + */ +void +hdmiDestructorC671(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterfaceC671 + */ +void +initializeHdmiPktInterfaceC671(NVHDMIPKT_CLASS* pClass) +{ + pClass->dispSfUserClassId = NVC671_DISP_SF_USER; + pClass->hdmiAssessLinkCapabilities = hdmiAssessLinkCapabilitiesC671; + pClass->hdmiQueryFRLConfig = hdmiQueryFRLConfigC671; + pClass->hdmiSetFRLConfig = hdmiSetFRLConfigC671; + pClass->hdmiClearFRLConfig = hdmiClearFRLConfigC671; + pClass->hdmiPacketWrite = hdmiPacketWriteC671; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_class.h b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_class.h new file mode 100644 index 0000000..2889753 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_class.h @@ -0,0 +1,179 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_class.h + * + * Purpose: This file contains hdmipkt class definition. Which defines class interfaces. + */ + +#ifndef _NVHDMIPKT_CLASS_H_ +#define _NVHDMIPKT_CLASS_H_ + +#include "nvlimits.h" +#include "nvhdmi_frlInterface.h" + +/************************************************************************************************* + * NOTE * This header file to be used only inside this (Hdmi Packet) library. * + ************************************************************************************************/ +// NVHDMIPKT_CLASS_ID: HDMI packet class version +// NOTE: Anytime a new class comes with upgrades, it needs to be added here. +// Consult resman\kernel\inc\classhal.h, before adding a class. +typedef enum +{ + NVHDMIPKT_0073_CLASS = 0, // pre GK104 + NVHDMIPKT_9171_CLASS = 1, // GK104 + NVHDMIPKT_9271_CLASS = 2, // GK110 + NVHDMIPKT_9471_CLASS = 3, // GM10X + NVHDMIPKT_9571_CLASS = 4, // GM20X + NVHDMIPKT_C371_CLASS = 5, // GV100 + NVHDMIPKT_C571_CLASS = 6, // TU102 + NVHDMIPKT_C671_CLASS = 7, // GA102, T234D + NVHDMIPKT_INVALID_CLASS // Not to be used by client, and always the last entry here. +} NVHDMIPKT_CLASS_ID; + +// Hdmi packet class +struct tagNVHDMIPKT_CLASS +{ + // data + NvU32 dispSfUserClassId; // Id from nvidia/class definition + NvU32 dispSfUserSize; + NvU32 numSubDevices; + NvU32 sfUserHandle; + NVHDMIPKT_RM_CLIENT_HANDLES clientHandles; + NVHDMIPKT_MEM_MAP memMap[NV_MAX_SUBDEVICES]; + NvHdmiPkt_CBHandle cbHandle; + NVHDMIPKT_CALLBACK callback; + NVHDMIPKT_CLASS_ID thisId; + NvBool isRMCallInternal; + + // functions + NVHDMIPKT_RESULT + (*hdmiPacketCtrl) (NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); + + NVHDMIPKT_RESULT + (*hdmiPacketWrite) (NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + + // HW functions - that read/write registers + NvBool + (*hdmiReadPacketStatus) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktTypeNative); + + NVHDMIPKT_RESULT + (*hdmiWritePacketCtrl) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktTypeNative, + NvU32 transmitControl, + NvBool bDisable); + + void + (*hdmiWriteAviPacket) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + void + (*hdmiWriteAudioPacket) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + void + (*hdmiWriteGenericPacket) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + void + (*hdmiWriteGeneralCtrlPacket)(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + void + (*hdmiWriteVendorPacket) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + // utility functions to translate the generic packet type and transmit control + // to corresponding rm ctrl or hw define types. + NvU32 + (*translatePacketType) (NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TYPE packetType); + + NvU32 + (*translateTransmitControl) (NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TC transmitControl); + + // + // HDMI FRL functions to enable/disable HDMI FRL and calculate the bandwidth + // capacity required for target timing. + // + NVHDMIPKT_RESULT + (*hdmiAssessLinkCapabilities) (NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps); + NVHDMIPKT_RESULT + (*hdmiQueryFRLConfig) (NVHDMIPKT_CLASS *pThis, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig); + + NVHDMIPKT_RESULT + (*hdmiSetFRLConfig) (NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig); + + NVHDMIPKT_RESULT + (*hdmiClearFRLConfig) (NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId); +}; + +#endif //_NVHDMIPKT_CLASS_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_common.h b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_common.h new file mode 100644 index 0000000..771e9e7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_common.h @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_common.h + * + * Purpose: This file contains defines and structures used across hdmipkt library. All the + * common stuff goes here. + */ + +#ifndef _NVHDMIPKT_COMMON_H_ +#define _NVHDMIPKT_COMMON_H_ + +/************************************************************************************************* + * NOTE * This header file to be used only inside this (Hdmi Packet) library. * + ************************************************************************************************/ + +#include "nvhdmipkt.h" +#include "nvhdmi_frlInterface.h" +#if NVHDMIPKT_RM_CALLS_INTERNAL +#include "nvRmApi.h" +#define CALL_DISP_RM(x) x + +#endif + +/**************************** HDMI Library defines, enums and structs ***************************/ +// typedefs +typedef struct tagNVHDMIPKT_CLASS NVHDMIPKT_CLASS; +typedef struct tagNVHDMIPKT_MEM_MAP NVHDMIPKT_MEM_MAP; + +// Register read/write defines +#define REG_RD32(reg, offset) (*(((volatile NvU32*)(reg)) + ((offset)/4))) +#define REG_WR32(reg, offset, data) ((*(((volatile NvU32*)(reg)) + ((offset)/4))) = (data)) + +#define NVHDMIPKT_INVALID_SUBDEV (0xFFFFFFFF) +#define NVHDMIPKT_DONT_USE_TIMER +#define NVHDMIPKT_STATUS_READ_TIMEOUT_IN_us (1*1000*1000) /* us - micro second */ + +// Disp SF User memory map and handle structure +struct tagNVHDMIPKT_MEM_MAP +{ + NvU32 subDevice; + NvU32 memHandle; + void* pMemBase; +}; + +// HDMIPKT print define +#if defined (DEBUG) + #define NvHdmiPkt_Print(_p, ...) \ + do { \ + if ((_p)->callback.print) \ + { \ + (_p)->callback.print((_p)->cbHandle, "HdmiPacketLibrary: " __VA_ARGS__); \ + } \ + } while(0) +#else + #define NvHdmiPkt_Print(_p, ...) /* nothing */ +#endif + + +// HDMIPKT assert define +#if defined (DEBUG) + #define NvHdmiPkt_AssertP(p, expr) ((p)->callback.assert ? \ + (p)->callback.assert((p)->cbHandle, !!(expr)) : 0) + #define NvHdmiPkt_Assert(expr) NvHdmiPkt_AssertP(pThis, expr) +#else + #define NvHdmiPkt_AssertP(p, expr) + #define NvHdmiPkt_Assert(expr) +#endif + + +// Prototypes for common functions shared across implementations. +extern void hdmiWriteDummyPacket(NVHDMIPKT_CLASS*, NvU32*, NvU32, NvU32, NvU8 const *const); +extern NVHDMIPKT_RESULT hdmiAssessLinkCapabilitiesDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps); +extern NVHDMIPKT_RESULT hdmiQueryFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig); +extern NVHDMIPKT_RESULT hdmiSetFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig); +extern NVHDMIPKT_RESULT hdmiClearFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId); + + +#endif //_NVHDMIPKT_COMMON_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_internal.h b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_internal.h new file mode 100644 index 0000000..42487f0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_internal.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_internal.h + * + * Purpose: This files contains defines to be used by nvhdmipkt.c + */ + +#ifndef _NVHDMIPKT_INTERNAL_H_ +#define _NVHDMIPKT_INTERNAL_H_ + +/************************************************************************************************* + * NOTE * This header file to be used only inside this (Hdmi Packet) library. * + ************************************************************************************************/ +#define toHdmiPktHandle(p) ((NvHdmiPkt_Handle)(p)) +#define fromHdmiPktHandle(h) ((NVHDMIPKT_CLASS*)(h)) + +extern void initializeHdmiPktInterface0073(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterface9171(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterface9271(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterface9471(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterface9571(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterfaceC371(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterfaceC671(NVHDMIPKT_CLASS*); + +extern NvBool hdmiConstructor0073(NVHDMIPKT_CLASS*); +extern void hdmiDestructor0073 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructor9171(NVHDMIPKT_CLASS*); +extern void hdmiDestructor9171 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructor9271(NVHDMIPKT_CLASS*); +extern void hdmiDestructor9271 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructor9471(NVHDMIPKT_CLASS*); +extern void hdmiDestructor9471 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructor9571(NVHDMIPKT_CLASS*); +extern void hdmiDestructor9571 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructorC371(NVHDMIPKT_CLASS*); +extern void hdmiDestructorC371 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructorC671(NVHDMIPKT_CLASS*); +extern void hdmiDestructorC671 (NVHDMIPKT_CLASS*); + +#endif //_NVHDMIPKT_INTERNAL_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid.h b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid.h new file mode 100644 index 0000000..987c6fe --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid.h @@ -0,0 +1,776 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: displayid.h +// +// Purpose: the template for DisplayID parsing (future replacement for EDID) +// +//***************************************************************************** + + +#ifndef __DISPLAYID_H_ +#define __DISPLAYID_H_ + +#include "nvtiming.h" + +// The structures below must be tightly packed, in order to correctly +// overlay on the EDID DisplayID extension block bytes. Both MSVC and +// gcc support the pack() pragma for this. + +#if defined(__GNUC__) || defined(_MSC_VER) +# define __SUPPORTS_PACK_PRAGMA 1 +#else +# error "unrecognized compiler: displayid structures must be tightly packed" +#endif + +#ifdef __SUPPORTS_PACK_PRAGMA +#pragma pack(1) +#endif + +typedef struct _tagDISPLAYID_SECTION +{ + NvU8 version; // displayid version + NvU8 section_bytes; // length of this displayID section excluding mandatory bytes [0, 251] + + NvU8 product_type; // NVT_DISPLAYID_PROD_X + NvU8 extension_count; + + NvU8 data[NVT_DISPLAYID_SECTION_MAX_SIZE]; // data blocks. Note, the length of this structure may + // exceed valid memory, as DisplayID has variable length + +} DISPLAYID_SECTION; + +#define NVT_DISPLAYID_VER_1_1 0x101 + +#define NVT_DISPLAYID_PROD_EXTENSION 0 // Extension (product type not declared) +#define NVT_DISPLAYID_PROD_TEST 1 // Test Structure/Test Equipment +#define NVT_DISPLAYID_PROD_DISPLAY_PANEL 2 // Display Panel, LCD, or PDP module, etc. +#define NVT_DISPLAYID_PROD_STANDALONE_MONITOR 3 // Standalone display device, desktop monitor, TV monitor +#define NVT_DISPLAYID_PROD_RECEIVER 4 // Television receiver or display product capable of RF signals +#define NVT_DISPLAYID_PROD_REPEATER 5 // Repeater/translator that is not intended as display device +#define NVT_DISPLAYID_PROD_DIRECT_DRIVE 6 // Direct Drive monitor +#define NVT_DISPLAYID_PROD_MAX_NUMBER 6 // max product number + + +typedef struct _tagDISPLAYID_DATA_BLOCK_HEADER +{ + NvU8 type; // identification + NvU8 revision; + NvU8 data_bytes; // number of payload bytes [0, 248] + +} DISPLAYID_DATA_BLOCK_HEADER; + +#define NVT_DISPLAYID_BLOCK_TYPE_PRODUCT_IDENTITY 0 // Product Identification block +#define NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_PARAM 1 // Display Parameters block +#define NVT_DISPLAYID_BLOCK_TYPE_COLOR_CHAR 2 // Color Characteristics block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_1 3 // Type 1 Detailed Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_2 4 // Type 2 Detailed Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_3 5 // Type 3 Short Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_4 6 // Type 4 DMT ID Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_VESA 7 // VESA Standard Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_CEA 8 // CEA Standard Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_RANGE_LIMITS 9 // Video Timing Range Limits block +#define NVT_DISPLAYID_BLOCK_TYPE_SERIAL_NUMBER 10 // Product Serial Number block +#define NVT_DISPLAYID_BLOCK_TYPE_ASCII_STRING 11 // General Purpose ASCII String block +#define NVT_DISPLAYID_BLOCK_TYPE_DEVICE_DATA 12 // Display Device Data block +#define NVT_DISPLAYID_BLOCK_TYPE_INTERFACE_POWER 13 // Interface Power Sequencing block +#define NVT_DISPLAYID_BLOCK_TYPE_TRANSFER_CHAR 14 // Transfer Characteristics block +#define NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_INTERFACE 15 // Display Interface Data Block +#define NVT_DISPLAYID_BLOCK_TYPE_STEREO 16 // Stereo Data Block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_5 17 // Type V Timing Short Descriptor +#define NVT_DISPLAYID_BLOCK_TYPE_TILEDDISPLAY 18 // Tiled Display Data Block +#define NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_INTERFACE_FEATURES 0X26 // DisplayID2.0 Display Interface Features Data Block // +#define NVT_DISPLAYID_BLOCK_TYPE_CTA_DATA 0x81 // DIsplay ID data block +#define NVT_DISPLAYID_BLOCK_TYPE_VENDOR_SPEC 0x7F // Vendor Specific Data Block + +#define NVT_DISPLAYID_PRODUCT_IDENTITY_MIN_LEN 12 +#define NVT_DISPLAYID_PRODUCT_IDENTITY_MAX_STRING_LEN 0xE9 + +typedef struct _tagDISPLAYID_PROD_IDENTIFICATION_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + + NvU8 vendor[3]; + NvU16 product_code; + NvU32 serial_number; + NvU8 model_tag; + NvU8 model_year; + NvU8 productid_string_size; + + NvU8 productid_string[NVT_DISPLAYID_PRODUCT_IDENTITY_MAX_STRING_LEN]; +} DISPLAYID_PROD_IDENTIFICATION_BLOCK; + +typedef struct _tagDISPLAYID_DISPLAY_PARAM_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU16 horizontal_image_size; + NvU16 vertical_image_size; + NvU16 horizontal_pixel_count; + NvU16 vertical_pixel_count; + + NvU8 feature; + + NvU8 transfer_char_gamma; + NvU8 aspect_ratio; + NvU8 color_bit_depth; +} DISPLAYID_DISPLAY_PARAM_BLOCK; + +#define NVT_DISPLAYID_DISPLAY_PARAM_BLOCK_LEN 0x0C + +#define NVT_DISPLAYID_DISPLAY_PARAM_SUPPORT_AUDIO 7:7 +#define NVT_DISPLAYID_DISPLAY_PARAM_SEPARATE_AUDIO 6:6 +#define NVT_DISPLAYID_DISPLAY_PARAM_AUDIO_INPUT_OVERRIDE 5:5 +#define NVT_DISPLAYID_DISPLAY_PARAM_POWER_MANAGEMENT 4:4 +#define NVT_DISPLAYID_DISPLAY_PARAM_FIXED_TIMING 3:3 +#define NVT_DISPLAYID_DISPLAY_PARAM_FIXED_PIXEL_FORMAT 2:2 +#define NVT_DISPLAYID_DISPLAY_PARAM_DEINTERLACING 0:0 + +#define NVT_DISPLAYID_DISPLAY_PARAM_DEPTH_OVERALL 7:4 +#define NVT_DISPLAYID_DISPLAY_PARAM_DEPTH_NATIVE 3:0 + +typedef struct _tagDISPLAYID_COLOR_POINT +{ + NvU8 color_x_bits_low; + NvU8 color_bits_mid; + NvU8 color_y_bits_high; +} DISPLAYID_COLOR_POINT; + +#define NVT_DISPLAYID_COLOR_POINT_Y 7:4 +#define NVT_DISPLAYID_COLOR_POINT_X 3:0 + +#define NVT_DISPLAYID_COLOR_MAX_POINTS 22 + +typedef struct _tagDISPLAYID_COLOR_CHAR_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + + // Color Characteristics Information + NvU8 point_info; + + DISPLAYID_COLOR_POINT points[NVT_DISPLAYID_COLOR_MAX_POINTS]; +} DISPLAYID_COLOR_CHAR_BLOCK; + +#define NVT_DISPLAYID_COLOR_PRIMARIES 6:4 +#define NVT_DISPLAYID_COLOR_WHITE_POINTS 3:0 +#define NVT_DISPLAYID_COLOR_TEMPORAL 7:7 + +// the following fields apply to Timing Descriptors 1-3 (Not all of them are +// used per descriptor, but the format is the same +#define NVT_DISPLAYID_TIMING_PREFERRED 7:7 +#define NVT_DISPLAYID_TIMING_3D_STEREO 6:5 +#define NVT_DISPLAYID_TIMING_3D_STEREO_MONO 0 +#define NVT_DISPLAYID_TIMING_3D_STEREO_STEREO 1 +#define NVT_DISPLAYID_TIMING_3D_STEREO_EITHER 2 +#define NVT_DISPLAYID_TIMING_INTERLACE 4:4 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO 2:0 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_1_1 0 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_5_4 1 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_4_3 2 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_15_9 3 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_16_9 4 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_16_10 5 + +typedef struct _tag_DISPLAYID_TIMING_1_DESCRIPTOR +{ + NvU8 pixel_clock_low_minus_0_01MHz; + NvU8 pixel_clock_mid; + NvU8 pixel_clock_high; + + struct + { + NvU8 aspect_ratio : 3; + NvU8 rsvd : 1; + NvU8 interface_frame_scanning_type : 1; + NvU8 stereo_support : 2; + NvU8 is_preferred_detailed_timing : 1; + }options; + + struct + { + NvU8 active_image_pixels_low_minus_1; + NvU8 active_image_pixels_high; + NvU8 blank_pixels_low_minus_1; + NvU8 blank_pixels_high; + NvU8 front_porch_low_minus_1; + NvU8 front_porch_high : 7; + NvU8 sync_polarity : 1; + NvU8 sync_width_low_minus_1; + NvU8 sync_width_high; + }horizontal; + + struct + { + NvU8 active_image_lines_low_minus_1; + NvU8 active_image_lines_high; + NvU8 blank_lines_low_minus_1; + NvU8 blank_lines_high; + NvU8 front_porch_lines_low_minus_1; + NvU8 front_porch_lines_high : 7; + NvU8 sync_polarity : 1; + NvU8 sync_width_lines_low_minus_1; + NvU8 sync_width_lines_high; + }vertical; + +} DISPLAYID_TIMING_1_DESCRIPTOR; + +#define NVT_DISPLAYID_TIMING_1_MAX_DESCRIPTORS 12 + +typedef struct _tagDISPLAYID_TIMING_1_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + DISPLAYID_TIMING_1_DESCRIPTOR descriptors[NVT_DISPLAYID_TIMING_1_MAX_DESCRIPTORS]; +} DISPLAYID_TIMING_1_BLOCK; + +#define NVT_DISPLAYID_TIMING_1_POLARITY_SHIFT 15 +#define NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS 8 + +typedef struct _tag_DISPLAYID_TIMING_2_DESCRIPTOR +{ + NvU8 pixel_clock_low_minus_0_01MHz; + NvU8 pixel_clock_mid; + NvU8 pixel_clock_high; + + struct + { + NvU8 rsvd : 2; + NvU8 vsync_polarity : 1; + NvU8 hsync_polarity : 1; + NvU8 interface_frame_scanning_type : 1; + NvU8 stereo_support : 2; + NvU8 is_preferred_detailed_timing : 1; + }options; + + struct + { + NvU8 active_image_in_char_minus_1; + NvU8 active_image_in_char_high : 1; + NvU8 blank_in_char_minus_1 : 7; + NvU8 sync_width_in_char_minus_1 : 4; + NvU8 front_porch_in_char_minus_1 : 4; + }horizontal; + + struct + { + NvU8 active_image_lines_low_minus_1; + NvU8 active_image_lines_high : 4; + NvU8 reserved : 4; + NvU8 blank_lines_minus_1; + NvU8 sync_width_lines_minus_1 : 4; + NvU8 front_porch_lines_minus_1 : 4; + }vertical; + +} DISPLAYID_TIMING_2_DESCRIPTOR; + +#define NVT_DISPLAYID_TIMING_2_HORIZ_BLANK_PIXEL 7:1 +#define NVT_DISPLAYID_TIMING_2_HORIZ_ACTIVE_PIXEL_HIGH 0:0 +#define NVT_DISPLAYID_TIMING_2_HORIZ_OFFSET 7:4 +#define NVT_DISPLAYID_TIMING_2_HORIZ_SYNC 3:0 +#define NVT_DISPLAYID_TIMING_2_VERT_ACTIVE_PIXEL_HIGH 3:0 +#define NVT_DISPLAYID_TIMING_2_VERT_OFFSET 7:4 +#define NVT_DISPLAYID_TIMING_2_VERT_SYNC 3:0 + +#define NVT_DISPLAYID_TIMING_2_MAX_DESCRIPTORS 22 + +typedef struct _tagDISPLAYID_TIMING_2_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + DISPLAYID_TIMING_2_DESCRIPTOR descriptors[NVT_DISPLAYID_TIMING_2_MAX_DESCRIPTORS]; +} DISPLAYID_TIMING_2_BLOCK; + +typedef struct _TAG_DISPLAYID_TIMING_3_DESCRIPTOR +{ + NvU8 optns; + NvU8 horizontal_active_pixels; + NvU8 transfer; +} DISPLAYID_TIMING_3_DESCRIPTOR; + +#define NVT_DISPLAYID_TIMING_3_FORMULA 6:4 +#define NVT_DISPLAYID_TIMING_3_FORMULA_STANDARD 0 +#define NVT_DISPLAYID_TIMING_3_FORMULA_REDUCED_BLANKING 1 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO 3:0 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_1_1 0 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_5_4 1 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_4_3 2 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_15_9 3 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_16_9 4 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_16_10 5 +#define NVT_DISPLAYID_TIMING_3_INTERLACE 7:7 +#define NVT_DISPLAYID_TIMING_3_REFRESH_RATE 6:0 + +#define NVT_DISPLAYID_TIMING_3_MAX_DESCRIPTORS 82 + +typedef struct _tagDISPLAYID_TIMING_3_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + DISPLAYID_TIMING_3_DESCRIPTOR descriptors[NVT_DISPLAYID_TIMING_3_MAX_DESCRIPTORS]; +} DISPLAYID_TIMING_3_BLOCK; + +#define NVT_DISPLAYID_TIMING_4_MAX_CODES NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN + +typedef struct _tagDISPLAYID_TIMING_4_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 timing_codes[NVT_DISPLAYID_TIMING_4_MAX_CODES]; +} DISPLAYID_TIMING_4_BLOCK; + +#define NVT_DISPLAYID_TIMING_5_STEREO_SUPPORT_MASK 0x60 +#define NVT_DISPLAYID_TIMING_5_FRACTIONAL_RR_SUPPORT_MASK 0x10 +#define NVT_DISPLAYID_TIMING_5_FORMULA_SUPPORT_MASK 3 + +typedef struct _TAG_DISPLAYID_TIMING_5_DESCRIPTOR +{ + NvU8 optns; + NvU8 rsvd; + NvU8 horizontal_active_pixels_low; + NvU8 horizontal_active_pixels_high; + NvU8 vertical_active_pixels_low; + NvU8 vertical_active_pixels_high; + NvU8 refresh_rate; +} DISPLAYID_TIMING_5_DESCRIPTOR; + +#define NVT_DISPLAYID_TIMING_5_MAX_DESCRIPTORS 53 + +typedef struct _tagDISPLAYID_TIMING_5_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + DISPLAYID_TIMING_5_DESCRIPTOR descriptors[NVT_DISPLAYID_TIMING_5_MAX_DESCRIPTORS]; +} DISPLAYID_TIMING_5_BLOCK; + +#define DISPLAYID_TIMING_VESA_BLOCK_SIZE 0x0A +#define DISPLAYID_TIMING_CEA_BLOCK_SIZE 0x08 + +typedef struct _tagDISPLAYID_TIMING_MODE_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 timing_modes[DISPLAYID_TIMING_VESA_BLOCK_SIZE]; +} DISPLAYID_TIMING_MODE_BLOCK; + + +typedef struct _tagDISPLAYID_RANGE_LIMITS_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 pixel_clock_min[3]; + NvU8 pixel_clock_max[3]; + NvU8 horizontal_frequency_min; + NvU8 horizontal_frequency_max; + NvU16 horizontal_blanking_min; + NvU8 vertical_refresh_rate_min; + NvU8 vertical_refresh_rate_max; + NvU16 vertical_blanking_min; + + NvU8 optns; +} DISPLAYID_RANGE_LIMITS_BLOCK; + +#define DISPLAYID_RANGE_LIMITS_BLOCK_LEN 0xF + +#define NVT_DISPLAYID_RANGE_LIMITS_INTERLACE 7:7 +#define NVT_DISPLAYID_RANGE_LIMITS_CVT_STANDARD 6:6 +#define NVT_DISPLAYID_RANGE_LIMITS_CVT_REDUCED 5:5 +#define NVT_DISPLAYID_RANGE_LIMITS_DFD 4:4 + +typedef struct _tagDISPLAYID_ASCII_STRING_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 data[NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN]; +} DISPLAYID_ASCII_STRING_BLOCK; + +typedef struct _tagDISPLAYID_DEVICE_DATA_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + + NvU8 technology; + NvU8 operating_mode; + NvU16 horizontal_pixel_count; + NvU16 vertical_pixel_count; + NvU8 aspect_ratio; + NvU8 orientation; + + NvU8 subpixel_info; + NvU8 horizontal_pitch; + NvU8 vertical_pitch; + + NvU8 color_bit_depth; + NvU8 response_time; + +} DISPLAYID_DEVICE_DATA_BLOCK; + +#define DISPLAYID_DEVICE_DATA_BLOCK_LEN 0xD + +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_CRT_MONOCHROME 0x00 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_CRT_STANDARD 0x01 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_CRT_OTHER 0x02 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_PASSIVE_MATRIX_TN 0x10 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_PASSIVE_MATRIX_CHOL_LC 0x11 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_PASSIVE_MATRIX_FERRO_LC 0x12 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_PASSIVE_MATRIX_OTHER 0x13 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_TN 0x14 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_IPS 0x15 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_VA 0x16 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_OCB 0x17 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_FERRO 0x18 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_OTHER 0x1F +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_PLASMA_DC 0x20 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_PLASMA_AC 0x21 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROLUM 0x30 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_INORGANIC_LED 0x40 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ORGANIC_LED 0x50 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_FED 0x60 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROPHORETIC 0x70 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROCHROMIC 0x80 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROMECHANICAL 0x90 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROWETTING 0xA0 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_OTHER 0xF0 + +// Display Device operating mode info +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE 7:4 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_REFLECTIVE_NO_ILLUM 0x0 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_REFLECTIVE_ILLUM 0x1 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_REFLECTIVE_ILLUM_DEF 0x2 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSMISSIVE_NO_ILLUM 0x3 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSMISSIVE_ILLUM 0x4 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSMISSIVE_ILLUM_DEF 0x5 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_EMISSIVE 0x6 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSFLECTIVE_REF 0x7 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSFLECTIVE_TRANS 0x8 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSPARENT_AMB 0x9 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSPARENT_EMIS 0xA +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_PROJECTION_REF 0xB +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_PROJECTION_TRANS 0xC +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_PROJECTION_EMIS 0xD +#define NVT_DISPLAYID_DEVICE_BACKLIGHT 3:3 +#define NVT_DISPLAYID_DEVICE_INTENSITY 2:2 + +// Display Device aspect ratio/orientation info +#define NVT_DISPLAYID_DEVICE_ORIENTATION 7:6 +#define NVT_DISPLAYID_DEVICE_ORIENTATION_LANDSCAPE 0 +#define NVT_DISPLAYID_DEVICE_ORIENTATION_PORTRAIT 1 +#define NVT_DISPLAYID_DEVICE_ORIENTATION_NOT_FIXED 2 +#define NVT_DISPLAYID_DEVICE_ORIENTATION_UNDEFINED 3 +#define NVT_DISPLAYID_DEVICE_ROTATION 5:4 +#define NVT_DISPLAYID_DEVICE_ROTATION_NONE 0 +#define NVT_DISPLAYID_DEVICE_ROTATION_CLOCKWISE 1 +#define NVT_DISPLAYID_DEVICE_ROTATION_COUNTERCLOCKWISE 2 +#define NVT_DISPLAYID_DEVICE_ROTATION_BOTH 3 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL 3:2 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL_UPPER_LEFT 0 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL_UPPER_RIGHT 1 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL_LOWER_LEFT 2 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL_LOWER RIGHT 3 +#define NVT_DISPLAYID_DEVICE_SCAN 1:0 +#define NVT_DISPLAYID_DEVICE_SCAN_UNDEFINED 0 +#define NVT_DISPLAYID_DEVICE_SCAN_FAST_LONG 1 +#define NVT_DISPLAYID_DEVICE_SCAN_FAST_SHORT 2 + +// Display Device Color Depth information +#define NVT_DISPLAYID_DEVICE_COLOR_DEPTH 3:0 + +// Display Device Response Time information +#define NVT_DISPLAYID_DEVICE_WHITE_BLACK 7:7 +#define NVT_DISPLAYID_DEVICE_RESPONSE_TIME 6:0 + +#define NVT_DISPLAYID_SUBPIXEL_UNDEFINED 0 +#define NVT_DISPLAYID_SUBPIXEL_RGB_VERTICAL 1 +#define NVT_DISPLAYID_SUBPIXEL_RGB_HORIZONTAL 2 +#define NVT_DISPLAYID_SUBPIXEL_VERTICAL_STR 3 +#define NVT_DISPLAYID_SUBPIXEL_HORIZONTAL_STR 4 +#define NVT_DISPLAYID_SUBPIXEL_QUAD_RED_TOP_LEFT 5 +#define NVT_DISPLAYID_SUBPIXEL_QUAD_RED_BOTTOM_LEFT 6 +#define NVT_DISPLAYID_SUBPIXEL_DELTA_RGB 7 +#define NVT_DISPLAYID_SUBPIXEL_MOSAIC 8 +#define NVT_DISPLAYID_SUBPIXEL_QUAD_INC_WHITE 9 +#define NVT_DISPLAYID_SUBPIXEL_FIVE 10 +#define NVT_DISPLAYID_SUBPIXEL_SIX 11 +#define NVT_DISPLAYID_SUBPIXEL_PENTILE 12 + +typedef struct _tagDISPLAYID_INTERFACE_POWER_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 power_sequence_T1; + NvU8 power_sequence_T2; + NvU8 power_sequence_T3; + NvU8 power_sequence_T4_min; + NvU8 power_sequence_T5_min; + NvU8 power_sequence_T6_min; +} DISPLAYID_INTERFACE_POWER_BLOCK; + +#define DISPLAYID_INTERFACE_POWER_BLOCK_LEN 0x6 + +#define NVT_DISPLAYID_POWER_T1_MIN 7:4 +#define NVT_DISPLAYID_POWER_T1_MAX 3:0 +#define NVT_DISPLAYID_POWER_T2 5:0 +#define NVT_DISPLAYID_POWER_T3 5:0 +#define NVT_DISPLAYID_POWER_T4_MIN 6:0 +#define NVT_DISPLAYID_POWER_T5_MIN 5:0 +#define NVT_DISPLAYID_POWER_T6_MIN 5:0 + +typedef struct _tagDISPLAYID_TRANSFER_CHAR_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 info; + NvU8 samples; + NvU8 curve_data[NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN - 2]; +} DISPLAYID_TRANSFER_CHAR_BLOCK; + +typedef struct _tagDISPLAYID_INTERFACE_DATA_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 info; + + NvU8 version; + NvU8 color_depth_rgb; + NvU8 color_depth_ycbcr444; + NvU8 color_depth_ycbcr422; + NvU8 content_protection; + NvU8 content_protection_version; + + NvU8 spread; + + NvU8 interface_attribute_1; + NvU8 interface_attribute_2; +} DISPLAYID_INTERFACE_DATA_BLOCK; + +#define DISPLAYID_INTERFACE_DATA_BLOCK_LEN 0xA + +#define NVT_DISPLAYID_INTERFACE_TYPE 7:4 + +// Interface Codes (note exception for Analog Interface) +#define NVT_DISPLAYID_INTERFACE_TYPE_ANALOG 0 +#define NVT_DISPLAYID_INTERFACE_TYPE_LVDS 1 +#define NVT_DISPLAYID_INTERFACE_TYPE_TMDS 2 +#define NVT_DISPLAYID_INTERFACE_TYPE_RSDS 3 +#define NVT_DISPLAYID_INTERFACE_TYPE_DVI_D 4 +#define NVT_DISPLAYID_INTERFACE_TYPE_DVI_I_ANALOG 5 +#define NVT_DISPLAYID_INTERFACE_TYPE_DVI_I_DIGITAL 6 +#define NVT_DISPLAYID_INTERFACE_TYPE_HDMI_A 7 +#define NVT_DISPLAYID_INTERFACE_TYPE_HDMI_B 8 +#define NVT_DISPLAYID_INTERFACE_TYPE_MDDI 9 +#define NVT_DISPLAYID_INTERFACE_TYPE_DISPLAYPORT 10 +#define NVT_DISPLAYID_INTERFACE_TYPE_PROPRIETARY 11 + +// Analog Interface Subtype codes +#define NVT_DISPLAYID_INTERFACE_TYPE_ANALOG_VGA 0 +#define NVT_DISPLAYID_INTERFACE_TYPE_ANALOG_VESA_NAVI_V 1 +#define NVT_DISPLAYID_INTERFACE_TYPE_ANALOG_VESA_NAVI_D 2 + +#define NVT_DISPLAYID_INTERFACE_NUMLINKS 3:0 +#define NVT_DISPLAYID_INTERFACE_CONTENT 2:0 +#define NVT_DISPLAYID_INTERFACE_CONTENT_NONE 0 +#define NVT_DISPLAYID_INTERFACE_CONTENT_HDCP 1 +#define NVT_DISPLAYID_INTERFACE_CONTENT_DTCP 2 +#define NVT_DISPLAYID_INTERFACE_CONTENT_DPCP 3 +#define NVT_DISPLAYID_INTERFACE_SPREAD_TYPE 7:6 +#define NVT_DISPLAYID_INTERFACE_SPREAD_TYPE_NONE 0 +#define NVT_DISPLAYID_INTERFACE_SPREAD_TYPE_DOWN 1 +#define NVT_DISPLAYID_INTERFACE_SPREAD_TYPE_CENTER 2 +#define NVT_DISPLAYID_INTERFACE_SPREAD_PER 3:0 + +#define NVT_DISPLAYID_INTERFACE_RGB16 5:5 +#define NVT_DISPLAYID_INTERFACE_RGB14 4:4 +#define NVT_DISPLAYID_INTERFACE_RGB12 3:3 +#define NVT_DISPLAYID_INTERFACE_RGB10 2:2 +#define NVT_DISPLAYID_INTERFACE_RGB8 1:1 +#define NVT_DISPLAYID_INTERFACE_RGB6 0:0 + +#define NVT_DISPLAYID_INTERFACE_YCBCR444_16 5:5 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_14 4:4 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_12 3:3 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_10 2:2 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_8 1:1 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_6 0:0 + +#define NVT_DISPLAYID_INTERFACE_YCBCR422_16 4:4 +#define NVT_DISPLAYID_INTERFACE_YCBCR422_14 3:3 +#define NVT_DISPLAYID_INTERFACE_YCBCR422_12 2:2 +#define NVT_DISPLAYID_INTERFACE_YCBCR422_10 1:1 +#define NVT_DISPLAYID_INTERFACE_YCBCR422_8 0:0 + +// LVDS specific settings +#define NVT_DISPLAYID_LVDS_COLOR 4:4 +#define NVT_DISPLAYID_LVDS_2_8 3:3 +#define NVT_DISPLAYID_LVDS_12 2:2 +#define NVT_DISPLAYID_LVDS_5 1:1 +#define NVT_DISPLAYID_LVDS_3_3 0:0 + +#define NVT_DISPLAYID_INTERFACE_DE 2:2 +#define NVT_DISPLAYID_INTERFACE_POLARITY 1:1 +#define NVT_DISPLAYID_INTERFACE_STROBE 0:0 + +typedef struct _tagDISPLAYID_STEREO_INTERFACE_METHOD_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 stereo_bytes; + NvU8 stereo_code; + NvU8 timing_sub_block[NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN]; +} DISPLAYID_STEREO_INTERFACE_METHOD_BLOCK; + +#define NVT_DISPLAYID_STEREO_FIELD_SEQUENTIAL 0x0 +#define NVT_DISPLAYID_STEREO_SIDE_BY_SIDE 0x1 +#define NVT_DISPLAYID_STEREO_PIXEL_INTERLEAVED 0x2 +#define NVT_DISPLAYID_STEREO_DUAL_INTERFACE 0x3 +#define NVT_DISPLAYID_STEREO_MULTIVIEW 0x4 +#define NVT_DISPLAYID_STEREO_PROPRIETARY 0xFF + +#define NVT_DISPLAYID_STEREO_MIRRORING 2:1 +#define NVT_DISPLAYID_STEREO_POLARITY 0:0 + +typedef struct _tagDISPLAYID_TILED_DISPLAY_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + struct + { + NvU8 single_tile_behavior:3; // 0x03 + NvU8 multi_tile_behavior:2; // 0x03 + NvU8 rsvd :1; // 0x03 + NvU8 has_bezel_info :1; // 0x03 + NvU8 single_enclosure :1; // 0x03 + } capability; + struct + { + NvU8 row :4; // 0x04 + NvU8 col :4; // 0x04 + } topology_low; + struct + { + NvU8 y :4; // 0x05 + NvU8 x :4; // 0x05 + } location_low; + struct + { + NvU8 y :1; // 0x06 + NvU8 reserved1 :1; // 0x06 + NvU8 x :1; // 0x06 + NvU8 reserved2 :1; // 0x06 + NvU8 row :1; // 0x06 + NvU8 reserved3 :1; // 0x06 + NvU8 col :1; // 0x06 + NvU8 reserved4 :1; // 0x06 + } topo_loc_high; + struct + { + NvU8 width_low; // 0x07 + NvU8 width_high; // 0x08 + NvU8 height_low; // 0x09 + NvU8 height_high; // 0X0A + } native_resolution; + struct + { + NvU8 pixel_density; // 0x0B + NvU8 top; // 0x0C + NvU8 bottom; // 0x0D + NvU8 right; // 0x0E + NvU8 left; // 0x0F + } bezel_info; + struct + { + NvU8 vendor_id[3]; // 0x10 ~ 0x12 + NvU8 product_id[2]; // 0x13 ~ 0x14 + NvU8 serial_number[4]; // 0x15 ~ 0x18 + } topology_id; +} DISPLAYID_TILED_DISPLAY_BLOCK; + +typedef struct _tagDISPLAYID_INTERFACE_FEATURES_DATA_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 supported_color_depth_rgb; + NvU8 supported_color_depth_ycbcr444; + NvU8 supported_color_depth_ycbcr422; + NvU8 supported_color_depth_ycbcr420; + NvU8 minimum_pixel_rate_ycbcr420; + NvU8 supported_audio_capability; + NvU8 supported_colorspace_eotf_combination_1; + NvU8 supported_colorspace_eotf_combination_2; + NvU8 additional_supported_colorspace_eotf_total; + NvU8 additional_supported_colorspace_eotf[NVT_DISPLAYID_DISPLAY_INTERFACE_FEATURES_MAX_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF]; +} DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK; + +#define DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK_MAX_LEN sizeof(DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK) + +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB16 5:5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB14 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB12 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB10 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB8 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB6 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_16 5:5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_14 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_12 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_10 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_8 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_6 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_16 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_14 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_12 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_10 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_8 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_16 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_14 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_12 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_10 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_8 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_AUDIO_SUPPORTED_32KHZ 7:7 +#define NVT_DISPLAYID_INTERFACE_FEATURES_AUDIO_SUPPORTED_44_1KHZ 6:6 +#define NVT_DISPLAYID_INTERFACE_FEATURES_AUDIO_SUPPORTED_48KHZ 5:5 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_BT2020_EOTF_SMPTE_ST2084 6:6 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_BT2020_EOTF_BT2020 5:5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_DCI_P3_EOTF_DCI_P3 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_ADOBE_RGB_EOTF_ADOBE_RGB 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_BT709_EOTF_BT1886 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_BT601_EOTF_BT601 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_SRGB_EOTF_SRGB 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF_TOTAL 2:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE 7:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_NOT_DEFINED 0 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_SRGB 1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_BT601 2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_BT709 3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_ADOBE_RGB 4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_DCI_P3 5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_BT2020 6 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_CUSTOM 7 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF 3:0 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_NOT_DEFINED 0 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_SRGB 1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_BT601 2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_BT709 3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_ADOBE_RGB 4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_DCI_P3 5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_BT2020 6 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_GAMMA 7 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_SMPTE_ST2084 8 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_HYBRID_LOG 9 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_CUSTOM 10 + + +#ifdef __SUPPORTS_PACK_PRAGMA +#pragma pack() +#endif + +#endif // __DISPLAYID_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid20.h b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid20.h new file mode 100644 index 0000000..951f382 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid20.h @@ -0,0 +1,752 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: displayid20.h +// +// Purpose: the template for DisplayID 2.0 parsing (future replacement for EDID) +// +//***************************************************************************** + + +#ifndef __DISPLAYID20_H_ +#define __DISPLAYID20_H_ + +#include "nvtiming.h" + +// The structures below must be tightly packed, in order to correctly +// overlay on the DisplayID 2.0 block bytes. Both MSVC and +// gcc support the pack() pragma for this. + +#if defined(__GNUC__) || defined(_MSC_VER) +# define __SUPPORTS_PACK_PRAGMA 1 +#else +# error "unrecognized compiler: displayid structures must be tightly packed" +#endif + +#ifdef __SUPPORTS_PACK_PRAGMA +#pragma pack(1) +#endif + +#define DISPLAYID_2_0_SECTION_SIZE_TOTAL(_pSectionHeader_) ((_pSectionHeader_).section_bytes + \ + sizeof(DISPLAYID_2_0_SECTION_HEADER) + \ + sizeof(NvU8)) +#define DISPLAYID_2_0_DATA_BLOCK_SIZE_TOTAL(_pBlockHeader_) ((_pBlockHeader_)->data_bytes + \ + sizeof(DISPLAYID_2_0_DATA_BLOCK_HEADER)) +#define DISPLAYID_2_0_SECTION_SIZE_MAX 256 +#define DISPLAYID_2_0_SECTION_DATA_SIZE_MAX (DISPLAYID_2_0_SECTION_SIZE_MAX - \ + sizeof(DISPLAYID_2_0_SECTION_HEADER) + +typedef struct _tagDISPLAYID_2_0_SECTION_HEADER +{ + NvU8 revision:4; // displayID revision + NvU8 version:4; // displayID version + NvU8 section_bytes; // length of this displayID section excluding mandatory bytes [0, 251] + + NvU8 product_type:4; // Display Product Primary Use Case + NvU8 reserved:4; // RESERVED + NvU8 extension_count; // Total extension count. +} DISPLAYID_2_0_SECTION_HEADER; + +typedef struct _tagDISPLAYID_2_0_SECTION +{ + DISPLAYID_2_0_SECTION_HEADER header; + + NvU8 data[DISPLAYID_2_0_SECTION_SIZE_MAX]; // data blocks. Note, DisplayID has variable length +} DISPLAYID_2_0_SECTION; + +#define DISPLAYID_2_0_VERSION 2 +#define DISPLAYID_2_0_REVISION 0 + +#define DISPLAYID_2_0_PROD_EXTENSION 0 // Extension (same primary use case as base section) +#define DISPLAYID_2_0_PROD_TEST 1 // Test Structure/Test Equipment +#define DISPLAYID_2_0_PROD_GENERIC_DISPLAY 2 // None of the listed primary use cases; generic display +#define DISPLAYID_2_0_PROD_TELEVISION 3 // Television (TV) display +#define DISPLAYID_2_0_PROD_DESKTOP_PRODUCTIVITY_DISPLAY 4 // Desktop productivity display +#define DISPLAYID_2_0_PROD_DESKTOP_GAMING_DISPLAY 5 // Desktop gaming display +#define DISPLAYID_2_0_PROD_PRESENTATION_DISPLAY 6 // Presentation display +#define DISPLAYID_2_0_PROD_HMD_VR 7 // Head mounted Virtual Reality display +#define DISPLAYID_2_0_PROD_HMD_AR 8 // Head mounted Augmented Reality display + +typedef struct _tagDISPLAYID_2_0_DATA_BLOCK_HEADER +{ + NvU8 type; // Data block tag + NvU8 revision:3; // block revision + NvU8 reserved:5; + NvU8 data_bytes; // number of payload bytes in Block [ 0, 248] +} DISPLAYID_2_0_DATA_BLOCK_HEADER; + +#define DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY 0x20 +#define DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM 0x21 +#define DISPLAYID_2_0_BLOCK_TYPE_TIMING_7 0x22 +#define DISPLAYID_2_0_BLOCK_TYPE_TIMING_8 0x23 +#define DISPLAYID_2_0_BLOCK_TYPE_TIMING_9 0x24 +#define DISPLAYID_2_0_BLOCK_TYPE_RANGE_LIMITS 0x25 +#define DISPLAYID_2_0_BLOCK_TYPE_INTERFACE_FEATURES 0x26 +#define DISPLAYID_2_0_BLOCK_TYPE_STEREO 0x27 +#define DISPLAYID_2_0_BLOCK_TYPE_TILED_DISPLAY 0x28 +#define DISPLAYID_2_0_BLOCK_TYPE_CONTAINER_ID 0x29 +#define DISPLAYID_2_0_BLOCK_TYPE_TIMING_10 0x2A +#define DISPLAYID_2_0_BLOCK_TYPE_ADAPTIVE_SYNC 0x2B +#define DISPLAYID_2_0_BLOCK_TYPE_ARVR_HMD 0x2C +#define DISPLAYID_2_0_BLOCK_TYPE_ARVR_LAYER 0x2D +// 0x7D - 0x2E RESERVED for Additional VESA-defined Data Blocks +#define DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC 0x7E +// 0x80 - 0x7F RESERVED +#define DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA 0x81 +// 0xFF - 0x82 RESERVED for additional data blocks related to external standards organization(s). + +#define DISPLAYID_2_0_PRODUCT_NAME_STRING_MAX_LEN ((0xFB - 0xF) + 1) + +typedef struct _tagDISPLAYID_2_0_PROD_IDENTIFICATION_BLOCK +{ + // Product Identification Data Block (0x20) + // Number of payload bytes 12(0xC) - 248(0xF8) + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + + NvU8 vendor[3]; + NvU8 product_code[2]; + NvU8 serial_number[4]; + NvU8 model_tag; + NvU8 model_year; + NvU8 product_name_string_size; + NvU8 product_name_string[DISPLAYID_2_0_PRODUCT_NAME_STRING_MAX_LEN]; +} DISPLAYID_2_0_PROD_IDENTIFICATION_BLOCK; + +typedef struct _tagDISPLAY_2_0_DISPLAY_PARAM_BLOCK_HEADER +{ + NvU8 type; // Display Parameters Data Block (0x21) + NvU8 revision:3; + NvU8 reserved:4; + NvU8 image_size_multiplier:1; + NvU8 data_bytes; // number of payload bytes 29(0x1D) +} DISPLAY_2_0_DISPLAY_PARAM_BLOCK_HEADER; + +typedef struct _tagDISPLAYID_2_0_COLOR_CHROMATICITY +{ + NvU8 color_x_bits_low; + struct { + NvU8 color_x_bits_high:4; + NvU8 color_y_bits_low:4; + } color_bits_mid; + NvU8 color_y_bits_high; +} DISPLAYID_2_0_COLOR_CHROMATICITY; + +typedef enum _tagDISPLAYID_2_0_NATIVE_COLOR_DEPTH +{ + NATIVE_COLOR_NOT_DEFINED = 0, + NATIVE_COLOR_BPC_6 = 1, + NATIVE_COLOR_BPC_8 = 2, + NATIVE_COLOR_BPC_10 = 3, + NATIVE_COLOR_BPC_12 = 4, + NATIVE_COLOR_BPC_16 = 5, +} DISPLAYID_2_0_NATIVE_COLOR_DEPTH; + +#define DISPLAYID_2_0_DISPLAY_PARAM_BLOCK_PAYLOAD_LENGTH 29 +typedef struct _tagDISPLAYID_2_0_DISPLAY_PARAM_BLOCK +{ + DISPLAY_2_0_DISPLAY_PARAM_BLOCK_HEADER header; + + NvU8 horizontal_image_size[2]; + NvU8 vertical_image_size[2]; + NvU8 horizontal_pixel_count[2]; + NvU8 vertical_pixel_count[2]; + + struct { + NvU8 scan_orientation :3; + NvU8 luminance_information :2; + NvU8 reserved :1; + NvU8 color_information :1; + NvU8 audio_speaker_information :1; + } feature; + + DISPLAYID_2_0_COLOR_CHROMATICITY primary_color_1_chromaticity; + DISPLAYID_2_0_COLOR_CHROMATICITY primary_color_2_chromaticity; + DISPLAYID_2_0_COLOR_CHROMATICITY primary_color_3_chromaticity; + DISPLAYID_2_0_COLOR_CHROMATICITY white_point_chromaticity; + NvU8 max_luminance_full_coverage[2]; + NvU8 max_luminance_1_percent_rectangular_coverage[2]; + NvU8 min_luminance[2]; + + struct { + NvU8 color_depth :3; + NvU8 reserved0 :1; + NvU8 device_technology :3; + NvU8 device_theme_preference :1; + } color_depth_and_device_technology; + + NvU8 gamma_EOTF; +} DISPLAYID_2_0_DISPLAY_PARAM_BLOCK; + +#define DISPLAYID_2_0_SCAN_ORIENTATION_LRTB 0 // Left to right, top to bottom +#define DISPLAYID_2_0_SCAN_ORIENTATION_RLTB 1 // Right to left, top to bottom +#define DISPLAYID_2_0_SCAN_ORIENTATION_TBRL 2 // Top to bottom, right to left +#define DISPLAYID_2_0_SCAN_ORIENTATION_BTRL 3 // Bottom to top, right to left +#define DISPLAYID_2_0_SCAN_ORIENTATION_RLBT 4 // Right to left, bottom to top +#define DISPLAYID_2_0_SCAN_ORIENTATION_LRBT 5 // Left to right, bottom to top +#define DISPLAYID_2_0_SCAN_ORIENTATION_BTLR 6 // Bottom to top, left to right +#define DISPLAYID_2_0_SCAN_ORIENTATION_TBLR 7 // Top to bottom, left to right + +#define DISPLAYID_2_0_COLOR_INFORMATION_1931_CIE 0 +#define DISPLAYID_2_0_color_INFORMATION_1976_CIE 1 + +#define DISPLAYID_2_0_AUDIO_SPEAKER_INTEGRATED 0 +#define DISPLAYID_2_0_AUDIO_SPEAKER_NOT_INTEGRATED 1 + +#define DISPLAYID_2_0_DEVICE_TECHNOLOGY_UNSPECIFIED 0 +#define DISPLAYID_2_0_DEVICE_TECHNOLOGY_LCD 1 +#define DISPLAYID_2_0_DEVICE_TECHNOLOGY_OLED 2 + +#define DISPLAYID_2_0_TYPE7_DSC_PASSTHRU_REVISION 1 +#define DISPLAYID_2_0_TYPE7_YCC420_SUPPORT_REVISION 2 + +// DisplayID_v2.0 E5 - DSC Pass-Through timing +// DisplayID_v2.0 E7 - YCC420 and > 20 bytes per descriptor supported +typedef struct _tagDISPLAYID_2_0_TIMING_7_BLOCK_HEADER +{ + NvU8 type; // Type VII Timing (0x22) + NvU8 revision :3; + NvU8 dsc_passthrough :1; + NvU8 payload_bytes_len :3; + NvU8 reserved :1; + NvU8 data_bytes; // Values range from 1(0x01) to 248(0xF8) +} DISPLAYID_2_0_TIMING_7_BLOCK_HEADER; + +typedef struct _tag_DISPLAYID_2_0_TIMING_7_DESCRIPTOR +{ + // Range is defined as 0.001 through 16,777.216 MP/s + NvU8 pixel_clock[3]; + + struct + { + NvU8 aspect_ratio : 4; + NvU8 interface_frame_scanning_type : 1; + NvU8 stereo_support : 2; + NvU8 is_preferred_or_ycc420 : 1; + } options; + + struct + { + NvU8 active_image_pixels[2]; + NvU8 blank_pixels[2]; + NvU8 front_porch_pixels_low; + NvU8 front_porch_pixels_high : 7; + NvU8 sync_polarity : 1; + NvU8 sync_width_pixels[2]; + } horizontal; + + struct + { + NvU8 active_image_lines[2]; + NvU8 blank_lines[2]; + NvU8 front_porch_lines_low; + NvU8 front_porch_lines_high : 7; + NvU8 sync_polarity : 1; + NvU8 sync_width_lines[2]; + } vertical; +} DISPLAYID_2_0_TIMING_7_DESCRIPTOR; + +#define DISPLAYID_2_0_TIMING_7_MAX_DESCRIPTORS 12 + +typedef struct _tagDISPLAYID_2_0_TIMING_7_BLOCK +{ + DISPLAYID_2_0_TIMING_7_BLOCK_HEADER header; + DISPLAYID_2_0_TIMING_7_DESCRIPTOR descriptors[DISPLAYID_2_0_TIMING_7_MAX_DESCRIPTORS]; +} DISPLAYID_2_0_TIMING_7_BLOCK; + +#define DISPLAYID_2_0_TIMING_DSC_PASSTHRU_TIMING 1 + +// the following fields apply to Timing Descriptors 7 (Not all of them are +// used per descriptor, but the format is the same +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_1_1 0 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_5_4 1 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_4_3 2 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_15_9 3 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_16_9 4 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_16_10 5 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_64_27 6 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_256_135 7 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_CALCULATE 8 // calculate using Horizontal and Vertical Active Image Pixels + +#define DISPLAYID_2_0_TIMING_PROGRESSIVE_SCAN 0 +#define DISPLAYID_2_0_TIMING_INTERLACED_SCAN 1 + +#define DISPLAYID_2_0_TIMING_3D_STEREO_MONO 0 +#define DISPLAYID_2_0_TIMING_3D_STEREO_STEREO 1 +#define DISPLAYID_2_0_TIMING_3D_STEREO_EITHER 2 + +#define DISPLAYID_2_0_TIMING_SYNC_POLARITY_NEGATIVE 0 +#define DISPLAYID_2_0_TIMING_SYNC_POLARITY_POSITIVE 1 + +typedef struct _tagDISPLAYID_2_0_TIMING_8_BLOCK_HEADER +{ + NvU8 type; // Type VIII Timing (0x23) + NvU8 revision :3; + NvU8 timing_code_size :1; + NvU8 reserved :1; + NvU8 is_support_yuv420 :1; + NvU8 timing_code_type :2; + NvU8 data_bytes; // Values range from 1(0x01) to 248(0xF8) +} DISPLAYID_2_0_TIMING_8_BLOCK_HEADER; + +typedef struct _tagDISPLAYID_2_0_TIMING_8_ONE_BYTE_CODE +{ + NvU8 timing_code; +} DISPLAYID_2_0_TIMING_8_ONE_BYTE_CODE; + +typedef struct _tagDISPLAYID_2_0_TIMING_8_TWO_BYTE_CODE +{ + NvU8 timing_code[2]; +} DISPLAYID_2_0_TIMING_8_TWO_BYTE_CODE; + +#define DISPLAYID_2_0_TIMING_8_MAX_CODES 248 + +typedef struct _tagDISPLAYID_2_0_TIMING_8_BLOCK +{ + DISPLAYID_2_0_TIMING_8_BLOCK_HEADER header; + + union + { + DISPLAYID_2_0_TIMING_8_ONE_BYTE_CODE timing_code_1[DISPLAYID_2_0_TIMING_8_MAX_CODES]; + DISPLAYID_2_0_TIMING_8_TWO_BYTE_CODE timing_code_2[DISPLAYID_2_0_TIMING_8_MAX_CODES / 2]; + }; +} DISPLAYID_2_0_TIMING_8_BLOCK; + +#define DISPLAYID_2_0_TIMING_CODE_DMT 0 +#define DISPLAYID_2_0_TIMING_CODE_CTA_VIC 1 +#define DISPLAYID_2_0_TIMING_CODE_HDMI_VIC 2 +#define DISPLAYID_2_0_TIMING_CODE_RSERVED 3 +#define DISPLAYID_2_0_TIMING_CODE_SIZE_1_BYTE 0 +#define DISPLAYID_2_0_TIMING_CODE_SIZE_2_BYTE 1 + +typedef struct _TAG_DISPLAYID_2_0_TIMING_9_DESCRIPTOR +{ + struct { + NvU8 timing_formula :3; + NvU8 reserved0 :1; + NvU8 rr_1000div1001_support :1; + NvU8 stereo_support :2; + NvU8 reserved1 :1; + } options; + + NvU8 horizontal_active_pixels[2]; + NvU8 vertical_active_lines[2]; + NvU8 refresh_rate; // 1 Hz to 256 Hz +} DISPLAYID_2_0_TIMING_9_DESCRIPTOR; + +#define DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD 0 +#define DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_1 1 +#define DISPLAYID_2_0_TIMING_FORMULA_CVT_2_0_REDUCED_BLANKING_2 2 +#define DISPLAYID_2_0_TIMING_FORMULA_CVT_2_0_REDUCED_BLANKING_3 3 + +#define DISPLAYID_2_0_TIMING_9_MAX_DESCRIPTORS 18 + +typedef struct _tagDISPLAYID_2_0_TIMING_9_BLOCK +{ + // Type IX Timing (0x24) + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + DISPLAYID_2_0_TIMING_9_DESCRIPTOR descriptors[DISPLAYID_2_0_TIMING_9_MAX_DESCRIPTORS]; +} DISPLAYID_2_0_TIMING_9_BLOCK; + +#define DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_6 0 +#define DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_7 1 + +typedef struct _tagDISPLAYID_2_0_TIMING_10_BLOCK_HEADER +{ + NvU8 type; // Type X Timing (0x2A) + NvU8 revision :3; + NvU8 reserved0 :1; + NvU8 payload_bytes_len :3; + NvU8 reserved1 :1; + NvU8 payload_bytes; +} DISPLAYID_2_0_TIMING_10_BLOCK_HEADER; + +typedef struct _DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR +{ + struct { + NvU8 timing_formula :3; + NvU8 early_vsync :1; + NvU8 rr1000div1001_or_hblank :1; + NvU8 stereo_support :2; + NvU8 ycc420_support :1; + } options; + + NvU8 horizontal_active_pixels[2]; + NvU8 vertical_active_lines[2]; + NvU8 refresh_rate; // 1 Hz to 256 Hz +} DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR; + +typedef struct _DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR +{ + DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR descriptor_6_bytes; + NvU8 refresh_rate_high :2; + NvU8 delta_hblank :3; + NvU8 additional_vblank_timing :3; +} DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR; + +#define DISPLAYID_2_0_TIMING_10_MAX_6BYTES_DESCRIPTORS 18 +#define DISPLAYID_2_0_TIMING_10_MAX_7BYTES_DESCRIPTORS 16 + +typedef struct _DISPLAYID_2_0_TIMING_10_BLOCK +{ + DISPLAYID_2_0_TIMING_10_BLOCK_HEADER header; + NvU8 descriptors[120]; +} DISPLAYID_2_0_TIMING_10_BLOCK; + +#define DISPLAYID_2_0_RANGE_LIMITS_BLOCK_PAYLOAD_LENGTH 9 +typedef struct _tagDISPLAYID_2_0_RANGE_LIMITS_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + + NvU8 pixel_clock_min[3]; + NvU8 pixel_clock_max[3]; + NvU8 vertical_frequency_min; + NvU8 vertical_frequency_max_7_0; + + struct { + NvU8 vertical_frequency_max_9_8 :2; + NvU8 reserved :5; + NvU8 seamless_dynamic_video_timing_change :1; + } dynamic_video_timing_range_support; +} DISPLAYID_2_0_RANGE_LIMITS_BLOCK; + +#define DISPLAYID_2_0_SEAMLESS_DYNAMIC_VIDEO_TIMING_CHANGE_NOT_SUPPORTED 0 +#define DISPLAYID_2_0_SEAMLESS_DYNAMIC_VIDEO_TIMING_CHANGE_SUPPORTED 1 + +#define DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK_PAYLOAD_LENGTH_MIN 9 +#define DISPLAYID_2_0_MAX_COLOR_SPACE_AND_EOTF 7 +typedef struct _tagDISPLAYID_2_0_INTERFACE_FEATURES_BLOCK +{ + // Display Interface Features Data Block (0x26) + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + + struct { + NvU8 bit_per_primary_6:1; + NvU8 bit_per_primary_8:1; + NvU8 bit_per_primary_10:1; + NvU8 bit_per_primary_12:1; + NvU8 bit_per_primary_14:1; + NvU8 bit_per_primary_16:1; + NvU8 reserved:2; + } interface_color_depth_rgb; + + struct { + NvU8 bit_per_primary_6:1; + NvU8 bit_per_primary_8:1; + NvU8 bit_per_primary_10:1; + NvU8 bit_per_primary_12:1; + NvU8 bit_per_primary_14:1; + NvU8 bit_per_primary_16:1; + NvU8 reserved:2; + } interface_color_depth_ycbcr444; + + struct { + NvU8 bit_per_primary_8:1; + NvU8 bit_per_primary_10:1; + NvU8 bit_per_primary_12:1; + NvU8 bit_per_primary_14:1; + NvU8 bit_per_primary_16:1; + NvU8 reserved:3; + } interface_color_depth_ycbcr422; + + struct { + NvU8 bit_per_primary_8:1; + NvU8 bit_per_primary_10:1; + NvU8 bit_per_primary_12:1; + NvU8 bit_per_primary_14:1; + NvU8 bit_per_primary_16:1; + NvU8 reserved:3; + } interface_color_depth_ycbcr420; + + NvU8 min_pixel_rate_ycbcr420; // x 74.25MP/s + + struct { + NvU8 reserved:5; + NvU8 sample_rate_48_khz:1; + NvU8 sample_rate_44_1_khz:1; + NvU8 sample_rate_32_khz:1; + } audio_capability; + + struct { + NvU8 color_space_srgb_eotf_srgb:1; + NvU8 color_space_bt601_eotf_bt601:1; + NvU8 color_space_bt709_eotf_bt1886:1; + NvU8 color_space_adobe_rgb_eotf_adobe_rgb:1; + NvU8 color_space_dci_p3_eotf_dci_p3:1; + NvU8 color_space_bt2020_eotf_bt2020:1; + NvU8 color_space_bt2020_eotf_smpte_st2084:1; + NvU8 reserved:1; + } color_space_and_eotf_1; + + struct { + NvU8 reserved; + } color_space_and_eotf_2; + + struct { + NvU8 count:3; + NvU8 reserved:5; + } additional_color_space_and_eotf_count; + + struct { + NvU8 eotf:4; + NvU8 color_space:4; + } additional_color_space_and_eotf[DISPLAYID_2_0_MAX_COLOR_SPACE_AND_EOTF]; +} DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK; + +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_NOT_DEFINED 0 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_SRGB 1 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_BT601 2 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_BT709 3 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_ADOBE_RGB 4 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_DCI_P3 5 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_BT2020 6 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_CUSTOM 7 + +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_NOT_DEFINED 0 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_SRGB 1 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_BT601 2 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_BT709 3 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_ADOBE_RGB 4 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_DCI_P3 5 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_BT2020 6 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_GAMMA 7 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_SMPTE_ST2084 8 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_HYBRID_LOG 9 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_CUSTOM 10 + +typedef struct _tagDISPLAYID_2_0_STEREO_INTERFACE_BLOCK_HEADER +{ + NvU8 type; + NvU8 revision:3; + NvU8 reserved:3; + NvU8 stereo_timing_support:2; +} DISPLAYID_2_0_STEREO_INTERFACE_BLOCK_HEADER; + +typedef struct _tagDISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR +{ + NvU8 supported_timing_code_count:5; + NvU8 reserved:1; + NvU8 timing_code_type:2; + NvU8 timing_code[0x1F]; +} DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_FIELD_SEQUENTIAL_INTERFACE_DESCRIPTOR +{ + NvU8 polarity_descriptor:1; + NvU8 reserved:7; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_FIELD_SEQUENTIAL_INTERFACE_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_SIDE_BY_SIDE_INTERFACE_DESCRIPTOR +{ + NvU8 view_identity_descriptor:1; + NvU8 reserved:7; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_SIDE_BY_SIDE_INTERFACE_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_PIXEL_INTERLEAVED_DESCRIPTOR +{ + NvU8 interleaved_pattern_descriptor[8]; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_PIXEL_INTERLEAVED_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_DUAL_INTERFACE_LEFT_AND_RIGHT_SEPARATE_DESCRIPTOR +{ + NvU8 left_and_right_polarity_descriptor:1; + NvU8 mirroring_descriptor:2; + NvU8 reserved:5; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_DUAL_INTERFACE_LEFT_AND_RIGHT_SEPARATE_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_MULTI_VIEW_DESCRIPTOR +{ + NvU8 views_descriptors_count; + NvU8 view_interleaving_method_code_descriptor; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_MULTI_VIEW_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_STACKED_FRAME_DESCRIPTOR +{ + NvU8 view_identity_descriptor:1; + NvU8 reserved:7; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_STACKED_FRAME_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_PROPRIETARY_DESCRIPTOR +{ + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_PROPRIETARY_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_INTERFACE_METHOD_BLOCK +{ + DISPLAYID_2_0_STEREO_INTERFACE_BLOCK_HEADER header; + + NvU8 stereo_bytes; + NvU8 stereo_code; + union { + DISPLAYID_2_0_STEREO_FIELD_SEQUENTIAL_INTERFACE_DESCRIPTOR field_sequential; + DISPLAYID_2_0_STEREO_SIDE_BY_SIDE_INTERFACE_DESCRIPTOR side_by_side; + DISPLAYID_2_0_STEREO_PIXEL_INTERLEAVED_DESCRIPTOR pixel_interleaved; + DISPLAYID_2_0_STEREO_DUAL_INTERFACE_LEFT_AND_RIGHT_SEPARATE_DESCRIPTOR dual_interface; + DISPLAYID_2_0_STEREO_MULTI_VIEW_DESCRIPTOR multi_view; + DISPLAYID_2_0_STEREO_STACKED_FRAME_DESCRIPTOR stacked_frame; + DISPLAYID_2_0_STEREO_PROPRIETARY_DESCRIPTOR proprietary; + }; +} DISPLAYID_2_0_STEREO_INTERFACE_METHOD_BLOCK; + +#define DISPLAYID_2_0_STEREO_CODE_FIELD_SEQUENTIAL 0x0 +#define DISPLAYID_2_0_STEREO_CODE_SIDE_BY_SIDE 0x1 +#define DISPLAYID_2_0_STEREO_CODE_PIXEL_INTERLEAVED 0x2 +#define DISPLAYID_2_0_STEREO_CODE_DUAL_INTERFACE 0x3 +#define DISPLAYID_2_0_STEREO_CODE_MULTIVIEW 0x4 +#define DISPLAYID_2_0_STEREO_CODE_STACKED_FRAME 0x5 +#define DISPLAYID_2_0_STEREO_CODE_PROPRIETARY 0xFF + +#define DISPLAYID_STEREO_MIRRORING 2:1 +#define DISPLAYID_STEREO_POLARITY 0:0 + +#define DISPLAYID_2_0_TILED_DISPLAY_BLOCK_PAYLOAD_LENGTH 22 +typedef struct _tagDISPLAYID_2_0_TILED_DISPLAY_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + struct + { + NvU8 single_tile_behavior:3; // 0x03 + NvU8 multi_tile_behavior:2; // 0x03 + NvU8 rsvd :1; // 0x03 + NvU8 has_bezel_info :1; // 0x03 + NvU8 single_enclosure :1; // 0x03 + } capability; + struct + { + NvU8 row :4; // 0x04 + NvU8 col :4; // 0x04 + } topo_low; + struct + { + NvU8 y :4; // 0x05 + NvU8 x :4; // 0x05 + } loc_low; + struct + { + NvU8 y :2; // 0x06 + NvU8 x :2; // 0x06 + NvU8 row :2; // 0x06 + NvU8 col :2; // 0x06 + } topo_loc_high; + struct + { + NvU8 width_low; // 0x07 + NvU8 width_high; // 0x08 + NvU8 height_low; // 0x09 + NvU8 height_high; // 0X0A + } native_resolution; + struct + { + NvU8 pixel_density; // 0x0B + NvU8 top; // 0x0C + NvU8 bottom; // 0x0D + NvU8 right; // 0x0E + NvU8 left; // 0x0F + } bezel_info; + struct + { + NvU8 vendor_id[3]; // 0x10 ~ 0x12 + NvU8 product_id[2]; // 0x13 ~ 0x14 + NvU8 serial_number[4]; // 0x15 ~ 0x18 + } topo_id; +} DISPLAYID_2_0_TILED_DISPLAY_BLOCK; + +#define DISPLAYID_2_0_CONTAINERID_BLOCK_PAYLOAD_LENGTH 16 +typedef struct _tagDISPLAYID_2_0_CONTAINERID_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + NvU8 container_id[DISPLAYID_2_0_CONTAINERID_BLOCK_PAYLOAD_LENGTH]; +} DISPLAYID_2_0_CONTAINERID_BLOCK; + +#define DISPLAYID_2_0_ADAPTIVE_SYNC_DETAILED_TIMING_COUNT 4 +typedef struct _tagDISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK_HEADER +{ + NvU8 type; // Adaptive-Sync (0x2B) + NvU8 revision :3; + NvU8 reserved0 :1; + NvU8 payload_bytes_adaptive_sync_len :3; + NvU8 reserved1 :1; + NvU8 payload_bytes; +} DISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK_HEADER; + +typedef struct _tagDISPLAYID_2_0_ADAPTIVE_SYNC_DESCRIPTOR +{ + struct + { + NvU8 range : 1; + NvU8 successive_frame_inc_tolerance : 1; + NvU8 modes : 2; + NvU8 seamless_transition_not_support: 1; + NvU8 successive_frame_dec_tolerance : 1; + NvU8 reserved : 2; + } operation_range_info; + + // 6.2 format (six integer bits and two fractional bits) + // six integer bits == 0 - 63ms + // two fractional bits == 0.00(00), 0.25(01b),0.50(10), 0.75(11b) + NvU8 max_single_frame_inc; + NvU8 min_refresh_rate; + struct + { + NvU8 max_rr_7_0; + NvU8 max_rr_9_8 : 2; + NvU8 reserved : 6; + } max_refresh_rate; + + // same as max_single_frame_inc expression + NvU8 max_single_frame_dec; +} DISPLAYID_2_0_ADAPTIVE_SYNC_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK +{ + DISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK_HEADER header; + DISPLAYID_2_0_ADAPTIVE_SYNC_DESCRIPTOR descriptors[DISPLAYID_2_0_ADAPTIVE_SYNC_DETAILED_TIMING_COUNT]; +} DISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK; + +typedef struct _tagDISPLAYID_2_0_VENDOR_SPECIFIC_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + NvU8 vendor_id[3]; + NvU8 vendor_specific_data[245]; +} DISPLAYID_2_0_VENDOR_SPECIFIC_BLOCK; + +typedef struct _tagDISPLAYID_2_0_CTA_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + NvU8 cta_data[248]; +} DISPLAYID_2_0_CTA_BLOCK; + +#ifdef __SUPPORTS_PACK_PRAGMA +#pragma pack() +#endif + +// Entry point functions both used in DID20 and DID20ext +NVT_STATUS parseDisplayId20DataBlock(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); +NvU8 computeDisplayId20SectionCheckSum(const NvU8 *pSectionBytes, NvU32 length); + +#endif // __DISPLAYID20_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/dpsdp.h b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/dpsdp.h new file mode 100644 index 0000000..43754d6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/dpsdp.h @@ -0,0 +1,373 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* +=============================================================================== + + dp_sdp.cpp + + Provide definition needed for display port secondary data packet. + +================================================================================ +*/ + +#ifndef __DPSDP_H__ +#define __DPSDP_H__ + +#include "nvtypes.h" + +#define DP_SDP_HEADER_SIZE 4 +#define DP_SDP_DATA_SIZE 28 + +// TODO: needs to wait for RM to provide the enum. Therefore, hardcoded to 7, which is the packet type for VSC SDP +typedef enum tagSDP_PACKET_TYPE +{ + SDP_PACKET_TYPE_VSC = 7, +} SDP_PACKET_TYPE; + +typedef enum tagSDP_VSC_REVNUM +{ + SDP_VSC_REVNUM_STEREO = 1, + SDP_VSC_REVNUM_STEREO_PSR, + SDP_VSC_REVNUM_STEREO_PSR2, + SDP_VSC_REVNUM_PSR2_EXTN, + SDP_VSC_REVNUM_STEREO_PSR2_COLOR, + SDP_VSC_REVNUM_STEREO_PR, + SDP_VSC_REVNUM_STEREO_PR_COLOR, +} SDP_VSC_REVNUM; + +typedef enum tagSDP_VSC_VALID_DATA_BYTES +{ + SDP_VSC_VALID_DATA_BYTES_STEREO = 1, + SDP_VSC_VALID_DATA_BYTES_STEREO_PSR = 8, + SDP_VSC_VALID_DATA_BYTES_PSR2 = 12, + SDP_VSC_VALID_DATA_BYTES_PSR2_COLOR = 19, + SDP_VSC_VALID_DATA_BYTES_PR = 16, + SDP_VSC_VALID_DATA_BYTES_PR_COLOR = 19, +} SDP_VSC_VALID_DATA_BYTES; + +typedef enum tagSDP_VSC_DYNAMIC_RANGE +{ + SDP_VSC_DYNAMIC_RANGE_VESA, + SDP_VSC_DYNAMIC_RANGE_CEA, +} SDP_VSC_DYNAMIC_RANGE; + +typedef enum tagSDP_VSC_PIX_ENC +{ + SDP_VSC_PIX_ENC_RGB, + SDP_VSC_PIX_ENC_YCBCR444, + SDP_VSC_PIX_ENC_YCBCR422, + SDP_VSC_PIX_ENC_YCBCR420, + SDP_VSC_PIX_ENC_Y, + SDP_VSC_PIX_ENC_RAW, +} SDP_VSC_PIX_ENC; + +typedef enum tagSDP_VSC_BIT_DEPTH_RGB +{ + SDP_VSC_BIT_DEPTH_RGB_6BPC = 0, + SDP_VSC_BIT_DEPTH_RGB_8BPC, + SDP_VSC_BIT_DEPTH_RGB_10BPC, + SDP_VSC_BIT_DEPTH_RGB_12BPC, + SDP_VSC_BIT_DEPTH_RGB_16BPC, + +} SDP_VSC_BIT_DEPTH_RGB; + +typedef enum tagSDP_VSC_BIT_DEPTH_YCBCR +{ + SDP_VSC_BIT_DEPTH_YCBCR_8BPC = 1, + SDP_VSC_BIT_DEPTH_YCBCR_10BPC, + SDP_VSC_BIT_DEPTH_YCBCR_12BPC, + SDP_VSC_BIT_DEPTH_YCBCR_16BPC, + +} SDP_VSC_BIT_DEPTH_YCBCR; + +typedef enum tagSDP_VSC_BIT_DEPTH_RAW +{ + SDP_VSC_BIT_DEPTH_RAW_6BPC = 1, + SDP_VSC_BIT_DEPTH_RAW_7BPC, + SDP_VSC_BIT_DEPTH_RAW_8BPC, + SDP_VSC_BIT_DEPTH_RAW_10BPC, + SDP_VSC_BIT_DEPTH_RAW_12BPC, + SDP_VSC_BIT_DEPTH_RAW_14PC, + SDP_VSC_BIT_DEPTH_RAW_16PC, + +} SDP_VSC_BIT_DEPTH_RAW; + +typedef enum tagSDP_VSC_CONTENT_TYPE +{ + SDP_VSC_CONTENT_TYPE_UNDEFINED = 0, + SDP_VSC_CONTENT_TYPE_GRAPHICS, + SDP_VSC_CONTENT_TYPE_PHOTO, + SDP_VSC_CONTENT_TYPE_VIDEO, + SDP_VSC_CONTENT_TYPE_GAMES, + +} SDP_VSC_CONTENT_TYPE; + +typedef enum tagSDP_VSC_COLOR_FMT_RGB_COLORIMETRY +{ + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_SRGB = 0, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_RGB_WIDE_GAMUT_FIXED, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_RGB_SCRGB, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_ADOBERGB, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_DCI_P3, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_CUSTOM, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_ITU_R_BT2020_RGB, +} SDP_VSC_COLOR_FMT_RGB_COLORIMETRY; + +typedef enum tagSDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY +{ + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT601 = 0, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT709, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_XVYCC601, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_XVYCC709, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_SYCC601, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ADOBEYCC601, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT2020_YCCBCCRC, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT2020_YCBCR, +} SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY; + +typedef enum tagSDP_VSC_COLOR_FMT_RAW_COLORIMETRY +{ + SDP_VSC_COLOR_FMT_RAW_COLORIMETRY_CUSTOM_COLOR_PROFILE = 0, +} SDP_VSC_COLOR_FMT_RAW; + +typedef enum tagSDP_VSC_COLOR_FMT_Y_COLORIMETRY +{ + SDP_VSC_COLOR_FMT_Y_COLORIMETRY_DICOM = 0, +} SDP_VSC_COLOR_FMT_Y; + +// The struct element field hb and db fields are arranged to match the HW registers +// NV_PDISP_SF_DP_GENERIC_INFOFRAME_HEADER* and NV_PDISP_SF_DP_GENERIC_INFOFRAME_SUBPACK0_DB* +typedef struct tagDPSDP_DP_VSC_SDP_DESCRIPTOR +{ + NvU8 dataSize; // the db data size + + // header + struct + { + NvU8 hb0; // DP1.3 spec, the value = 0 + NvU8 hb1; // DP1.3 spec, value = 7 + NvU8 revisionNumber : 5; + NvU8 hb2Reserved : 3; + NvU8 numValidDataBytes : 5; // number of valid data bytes + NvU8 hb3Reserved : 3; + } hb; + + // data content + struct + { + // Stereo field. Note: Needs to be expanded when needed. Refer to DP1.3 spec. + NvU8 stereoInterface; // DB0 + // PSR Field. Note: Needs to be expanded when needed. Refer to DP1.3 spec. + NvU8 psrState : 1; //DB1 + NvU8 psrUpdateRfb : 1; + NvU8 psrCrcValid : 1; + NvU8 psrSuValid : 1; + NvU8 psrSuFirstScanLine : 1; + NvU8 psrSuLastScanLine : 1; + NvU8 psrYCoordinateValid : 1; + NvU8 psrReserved : 1; + NvU8 db2; + NvU8 db3; + NvU8 db4; + NvU8 db5; + NvU8 db6; + NvU8 db7; + // DB8 - DB15 are undefined in DP 1.3 spec. + NvU8 db8; + NvU8 db9; + NvU8 db10; + NvU8 db11; + NvU8 db12; + NvU8 db13; + NvU8 db14; + NvU8 db15; + + // Colorimetry Infoframe Secondary Data Package following DP1.3 spec + NvU8 colorimetryFormat : 4; // DB16 infoframe per DP1.3 spec + NvU8 pixEncoding : 4; // DB16 infoframe per DP1.3 spec + + NvU8 bitDepth : 7; // DB17 infoframe per DP1.3 spec + NvU8 dynamicRange : 1; // DB17 infoframe per DP1.3 spec + + NvU8 contentType : 3; // DB18 infoframe per DP1.3 spec + NvU8 db18Reserved : 5; + + NvU8 db19; + NvU8 db20; + NvU8 db21; + NvU8 db22; + NvU8 db23; + NvU8 db24; + NvU8 db25; + NvU8 db26; + NvU8 db27; + } db; + +} DPSDP_DP_VSC_SDP_DESCRIPTOR; + +typedef struct tagDPSDP_DP_PR_VSC_SDP_DESCRIPTOR +{ + NvU8 dataSize; // the db data size + + // header + struct + { + NvU8 hb0; // DP1.3 spec, the value = 0 + NvU8 hb1; // DP1.3 spec, value = 7 + NvU8 revisionNumber : 5; + NvU8 hb2Reserved : 3; + NvU8 numValidDataBytes : 5; // number of valid data bytes + NvU8 hb3Reserved : 3; + } hb; + + // data content + struct + { + // Stereo field. Note: Needs to be expanded when needed. Refer to DP1.3 spec. + NvU8 stereoInterface; // DB0 + // PSR Field. Note: Needs to be expanded when needed. Refer to DP1.3 spec. + NvU8 prState : 1; // DB1 + NvU8 prReserved : 1; // Always ZERO + NvU8 prCrcValid : 1; + NvU8 prSuValid : 1; + NvU8 prReservedEx : 4; + + NvU8 db2; + NvU8 db3; + NvU8 db4; + NvU8 db5; + NvU8 db6; + NvU8 db7; + // DB8 - DB15 are undefined in DP 1.3 spec. + NvU8 db9; + NvU8 db10; + NvU8 db11; + NvU8 db12; + NvU8 db13; + NvU8 db14; + NvU8 db15; + + // Colorimetry Infoframe Secondary Data Package following DP1.3 spec + NvU8 colorimetryFormat : 4; // DB16 infoframe per DP1.3 spec + NvU8 pixEncoding : 4; // DB16 infoframe per DP1.3 spec + + NvU8 bitDepth : 7; // DB17 infoframe per DP1.3 spec + NvU8 dynamicRange : 1; // DB17 infoframe per DP1.3 spec + + NvU8 contentType : 3; // DB18 infoframe per DP1.3 spec + NvU8 db18Reserved : 5; + + NvU8 db19; + NvU8 db20; + NvU8 db21; + NvU8 db22; + NvU8 db23; + NvU8 db24; + NvU8 db25; + NvU8 db26; + NvU8 db27; + } db; + +} DPSDP_DP_PR_VSC_SDP_DESCRIPTOR; + +typedef struct tagDPSDP_DESCRIPTOR +{ + NvU8 dataSize; + + // header byte + struct + { + NvU8 hb0; + NvU8 hb1; + NvU8 hb2; + NvU8 hb3; + } hb; + + // content byte + struct + { + NvU8 db0; + NvU8 db1; + NvU8 db2; + NvU8 db3; + NvU8 db4; + NvU8 db5; + NvU8 db6; + NvU8 db7; + NvU8 db8; + NvU8 db9; + NvU8 db10; + NvU8 db11; + NvU8 db12; + NvU8 db13; + NvU8 db14; + NvU8 db15; + NvU8 db16; + NvU8 db17; + NvU8 db18; + NvU8 db19; + NvU8 db20; + NvU8 db21; + NvU8 db22; + NvU8 db23; + NvU8 db24; + NvU8 db25; + NvU8 db26; + NvU8 db27; + NvU8 db28; + NvU8 db29; + NvU8 db30; + NvU8 db31; + } db; + +} DPSDP_DESCRIPTOR; + +// The following #defines are for RGB only +#define DP_VSC_SDP_BIT_DEPTH_RGB_6BPC 0 +#define DP_VSC_SDP_BIT_DEPTH_RGB_8BPC 1 +#define DP_VSC_SDP_BIT_DEPTH_RGB_10BPC 2 +#define DP_VSC_SDP_BIT_DEPTH_RGB_12BPC 3 +#define DP_VSC_SDP_BIT_DEPTH_RGB_16BPC 4 + +// The following #defines are for YUV only +#define DP_VSC_SDP_BIT_DEPTH_YUV_8BPC 1 +#define DP_VSC_SDP_BIT_DEPTH_YUV_10BPC 2 +#define DP_VSC_SDP_BIT_DEPTH_YUV_12BPC 3 +#define DP_VSC_SDP_BIT_DEPTH_YUV_16BPC 4 + +// The following #defines are for RAW only +#define DP_VSC_SDP_BIT_DEPTH_RAW_6BPC 1 +#define DP_VSC_SDP_BIT_DEPTH_RAW_7BPC 2 +#define DP_VSC_SDP_BIT_DEPTH_RAW_8BPC 3 +#define DP_VSC_SDP_BIT_DEPTH_RAW_10BPC 4 +#define DP_VSC_SDP_BIT_DEPTH_RAW_12BPC 5 +#define DP_VSC_SDP_BIT_DEPTH_RAW_14BPC 6 +#define DP_VSC_SDP_BIT_DEPTH_RAW_16BPC 7 + +#define DP_INFOFRAME_SDP_V1_3_VERSION 0x13 +#define DP_INFOFRAME_SDP_V1_3_HB3_VERSION_MASK 0xFC +#define DP_INFOFRAME_SDP_V1_3_HB3_VERSION_SHIFT 2 +#define DP_INFOFRAME_SDP_V1_3_HB3_MSB_MASK 0x3 +#define DP_INFOFRAME_SDP_V1_3_NON_AUDIO_SIZE 30 +#endif // __DPSDP_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/edid.h b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/edid.h new file mode 100644 index 0000000..ac2392c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/edid.h @@ -0,0 +1,352 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: edid.h +// +// Purpose: the template for EDID parse +// +//***************************************************************************** + +#ifndef __EDID_H_ +#define __EDID_H_ + +#include "nvtiming_pvt.h" +#include "displayid.h" +#include "displayid20.h" + +// EDID 1.x detailed timing template + +#define NVT_PVT_EDID_LDD_PAYLOAD_SIZE 13 + +typedef struct _tagEDID_LONG_DISPLAY_DESCRIPTOR +{ + // the header + NvU8 prefix[2]; // 0x00 ~ 0x01 + NvU8 rsvd; // 0x02 + NvU8 tag; // 0x03 + NvU8 rsvd2; // 0x04 + + // the payload + NvU8 data[NVT_PVT_EDID_LDD_PAYLOAD_SIZE]; // 0x05~0x11 +}EDID_LONG_DISPLAY_DESCRIPTOR; +typedef struct _tagEDID_MONITOR_RANGE_GTF2 +{ + NvU8 reserved; // byte 0x0B: reserved as 00 + NvU8 startFreq; // byte 0x0C: start frequency for secondary curve, hot. freq./2[kHz] + NvU8 C; // byte 0x0D: C*2 0 <= 127 + NvU8 M_LSB; // byte 0x0E-0x0F: M (LSB) 0 <= M <= 65535 + NvU8 M_MSB; + NvU8 K; // byte 0x10: K 0 <= K <= 255 + NvU8 J; // byte 0x11: J*2 0 <= J <= 127 +}EDID_MONITOR_RANGE_GTF2; + +typedef struct _tagEDID_MONITOR_RANGE_CVT +{ + NvU8 version; // byte 0x0B: cvt version + NvU8 pixel_clock; // byte 0x0C: [bits 7:2]pixel clock precision + // [bits 1:0]max active MSB + NvU8 max_active; // byte 0x0D: with byte 12 [bits 1:0], max active pixels per line + NvU8 aspect_supported; // byte 0x0E: supported aspect ratios + NvU8 aspect_preferred_blanking; // byte 0x0F: preferred aspect ratio / blanking style support + NvU8 scaling_support; // byte 0x10: display scaling support + NvU8 preferred_refresh_rate; // byte 0x11: preferred vertical refresh rate +}EDID_MONITOR_RANGE_CVT; + +// cvt support in display range limit block +#define NVT_PVT_EDID_CVT_PIXEL_CLOCK_MASK 0xFC +#define NVT_PVT_EDID_CVT_PIXEL_CLOCK_SHIFT 2 +#define NVT_PVT_EDID_CVT_ACTIVE_MSB_MASK 0x03 +#define NVT_PVT_EDID_CVT_ACTIVE_MSB_SHIFT 8 + +#define NVT_PVT_EDID_CVT_ASPECT_SUPPORTED_MASK 0xF8 +#define NVT_PVT_EDID_CVT_ASPECT_SUPPORTED_SHIFT 3 +#define NVT_PVT_EDID_CVT_RESERVED0_MASK 0x07 +#define NVT_PVT_EDID_CVT_RESERVED0_SHIFT 0 + +#define NVT_PVT_EDID_CVT_ASPECT_PREFERRED_MASK 0xE0 +#define NVT_PVT_EDID_CVT_ASPECT_PREFERRED_SHIFT 5 +#define NVT_PVT_EDID_CVT_BLANKING_MASK 0x18 +#define NVT_PVT_EDID_CVT_BLANKING_SHIFT 3 +#define NVT_PVT_EDID_CVT_RESERVED1_MASK 0x07 +#define NVT_PVT_EDID_CVT_RESERVED1_SHIFT 0 + +#define NVT_PVT_EDID_CVT_SCALING_MASK 0xF0 +#define NVT_PVT_EDID_CVT_SCALING_SHIFT 4 +#define NVT_PVT_EDID_CVT_RESERVED2_MASK 0x0F +#define NVT_PVT_EDID_CVT_RESERVED2_SHIFT 0 + +typedef struct _tagEDID_MONITOR_RANGE_LIMIT +{ + // the header in monitor descriptor data + NvU8 minVRate; // byte 0x05: min vertical rate + NvU8 maxVRate; // byte 0x06: max vertical rate + NvU8 minHRate; // byte 0x07: min horizontal rate + NvU8 maxHRate; // byte 0x08: max horizontal rate + NvU8 maxPClock10M; // byte 0x09: max pixel clock in 10M + NvU8 timing_support; // byte 0x0A: 2nd GTF / CVT timing formula support + union + { + EDID_MONITOR_RANGE_GTF2 gtf2; // bytes 0x0B-0x11 + EDID_MONITOR_RANGE_CVT cvt; // ... + }u; +} EDID_MONITOR_RANGE_LIMIT; + +// timing_support +#define NVT_PVT_EDID_RANGE_OFFSET_VER_MIN 0x01 +#define NVT_PVT_EDID_RANGE_OFFSET_VER_MAX 0x02 +#define NVT_PVT_EDID_RANGE_OFFSET_HOR_MIN 0x04 +#define NVT_PVT_EDID_RANGE_OFFSET_HOR_MAX 0x08 +#define NVT_PVT_EDID_RANGE_OFFSET_AMOUNT 255 + +typedef struct _tagEDID_CVT_3BYTE_BLOCK +{ + NvU8 addressable_lines; // byte 0: 8 lsb of addressable lines + NvU8 lines_ratio; // byte 1 : [bits7:4] 4 msb of addressable lines [bits3:2] aspect ratio + NvU8 refresh_rates; // byte 2 : supported/preferred refresh rates +}EDID_CVT_3BYTE_BLOCK; + +typedef struct _tagEDID_CVT_3BYTE +{ + // the header in monitor descriptor data. + NvU8 version; // byte 0x05 : version code (0x01) + EDID_CVT_3BYTE_BLOCK block[NVT_EDID_DD_MAX_CVT3_PER_DESCRITPOR]; // bytes 0x06-0x11 +}EDID_CVT_3BYTE; + +// CVT 3byte +#define NVT_PVT_EDID_CVT3_LINES_MSB_MASK 0xF0 +#define NVT_PVT_EDID_CVT3_LINES_MSB_SHIFT 4 +#define NVT_PVT_EDID_CVT3_ASPECT_MASK 0x0C +#define NVT_PVT_EDID_CVT3_ASPECT_SHIFT 2 + +#define NVT_PVT_EDID_CVT3_PREFERRED_RATE_MASK 0x60 +#define NVT_PVT_EDID_CVT3_PREFERRED_RATE_SHIFT 5 +#define NVT_PVT_EDID_CVT3_SUPPORTED_RATE_MASK 0x1F +#define NVT_PVT_EDID_CVT3_SUPPORTED_RATE_SHIFT 0 + +typedef struct _tagEDID_COLOR_POINT_DATA +{ + NvU8 wp1_index; // 0x05: white point index number + NvU8 wp1_x_y; // 0x06: [bits3:2] lsb of wp1_x [bits1:0] lsb of wp1_y + NvU8 wp1_x; // 0x07: msb of wp1_x + NvU8 wp1_y; // 0x08: msb of wp1_y + NvU8 wp1_gamma; // 0x09: (gamma x 100) - 100 + NvU8 wp2_index; // 0x0A: ... + NvU8 wp2_x_y; // 0x0B: ... + NvU8 wp2_x; // 0x0C: ... + NvU8 wp2_y; // 0x0D: ... + NvU8 wp2_gamma; // 0x0E: ... + NvU8 line_feed; // 0x0F: reserved for line feed (0x0A) + NvU16 reserved0; // 0x10-0x11: reserved for space (0x2020) +}EDID_COLOR_POINT_DATA; + +#define NVT_PVT_EDID_CPD_WP_X_MASK 0x0C +#define NVT_PVT_EDID_CPD_WP_X_SHIFT 2 +#define NVT_PVT_EDID_CPD_WP_Y_MASK 0x03 +#define NVT_PVT_EDID_CPD_WP_Y_SHIFT 0 + +typedef struct _tagEDID_STANDARD_TIMING_ID +{ + NvU16 std_timing[NVT_EDID_DD_STI_NUM]; //0x05-0x10: 6 standard timings + NvU8 line_feed; //0x11: reserved for line feed (0x0A) +}EDID_STANDARD_TIMING_ID; + +typedef struct _tagEDID_COLOR_MANAGEMENT_DATA +{ + NvU8 version; //0x05: version (0x03) + NvU8 red_a3_lsb; //0x06: Red a3 LSB + NvU8 red_a3_msb; //0x07: Red a3 MSB + NvU8 red_a2_lsb; //0x08 + NvU8 red_a2_msb; //0x09 + NvU8 green_a3_lsb; //0x0A + NvU8 green_a3_msb; //0x0B + NvU8 green_a2_lsb; //0x0C + NvU8 green_a2_msb; //0x0D + NvU8 blue_a3_lsb; //0x0E + NvU8 blue_a3_msb; //0x0F + NvU8 blue_a2_lsb; //0x10 + NvU8 blue_a2_msb; //0x11 +}EDID_COLOR_MANAGEMENT_DATA; + +typedef struct _tagEDID_EST_TIMINGS_III +{ + NvU8 revision; //0x05: revision (0x0A) + NvU8 timing_byte[12]; //0x05-0x11: established timings III +}EDID_EST_TIMINGS_III; + +typedef struct _tagDETAILEDTIMINGDESCRIPTOR +{ + NvU16 wDTPixelClock; // 0x00 + NvU8 bDTHorizontalActive; // 0x02 + NvU8 bDTHorizontalBlanking; // 0x03 + NvU8 bDTHorizActiveBlank; // 0x04 + NvU8 bDTVerticalActive; // 0x05 + NvU8 bDTVerticalBlanking; // 0x06 + NvU8 bDTVertActiveBlank; // 0x07 + NvU8 bDTHorizontalSync; // 0x08 + NvU8 bDTHorizontalSyncWidth; // 0x09 + NvU8 bDTVerticalSync; // 0x0A + NvU8 bDTHorizVertSyncOverFlow; // 0x0B + NvU8 bDTHorizontalImage; // 0x0C + NvU8 bDTVerticalImage; // 0x0D + NvU8 bDTHorizVertImage; // 0x0E + NvU8 bDTHorizontalBorder; // 0x0F + NvU8 bDTVerticalBorder; // 0x10 + NvU8 bDTFlags; // 0x11 +}DETAILEDTIMINGDESCRIPTOR; + +// EDID 1.x basic block template +typedef struct _tagEDIDV1STRUC +{ + NvU8 bHeader[8]; // 0x00-0x07 + NvU16 wIDManufName; // 0x08 + NvU16 wIDProductCode; // 0x0A + NvU32 dwIDSerialNumber; // 0x0C + NvU8 bWeekManuf; // 0x10 + NvU8 bYearManuf; // 0x11 + NvU8 bVersionNumber; // 0x12 + NvU8 bRevisionNumber; // 0x13 + NvU8 bVideoInputDef; // 0x14 + NvU8 bMaxHorizImageSize; // 0x15 + NvU8 bMaxVertImageSize; // 0x16 + NvU8 bDisplayXferChar; // 0x17 + NvU8 bFeatureSupport; // 0x18 + NvU8 Chromaticity[10]; // 0x19-0x22 + NvU8 bEstablishedTimings1; // 0x23 + NvU8 bEstablishedTimings2; // 0x24 + NvU8 bManufReservedTimings; // 0x25 + NvU16 wStandardTimingID[8]; // 0x26 + DETAILEDTIMINGDESCRIPTOR DetailedTimingDesc[4]; // 0x36 + NvU8 bExtensionFlag; // 0x7E + NvU8 bChecksum; // 0x7F +}EDIDV1STRUC; + +// EDID 2.x basic block template +typedef struct _tagEDIDV2STRUC +{ + NvU8 bHeader; // 0x00 + NvU16 wIDManufName; // 0x01 + NvU16 wIDProductCode; // 0x03 + NvU8 bWeekManuf; // 0x05 + NvU16 wYearManuf; // 0x06 + NvU8 bProductIDString[32]; // 0x08 + NvU8 bSerialNumber[16]; // 0x28 + NvU8 bReserved1[8]; // 0x38 + NvU8 bPhysicalInterfaceType; // 0x40 + NvU8 bVideoInterfaceType; // 0x41 + NvU8 bInterfaceDataFormat[8]; // 0x42 + NvU8 bInterfaceColor[5]; // 0x4A + NvU8 bDisplayTechType; // 0x4F + NvU8 bMajorDisplayChar; // 0x50 + NvU8 bFeaturesSupported[3]; // 0x51 + NvU16 wDisplayResponseTime; // 0x54 + NvU32 dwDisplayXferChar; // 0x56 + NvU32 dwMaxLuminance; // 0x5A + NvU8 bColorimetry[20]; // 0x5E + NvU16 wMaxHorizImageSize; // 0x72 + NvU16 wMaxVertImageSize; // 0x74 + NvU16 wMaxHorizAddressibility; // 0x76 + NvU16 wMaxVertAddressibility; // 0x78 + NvU8 bHorizPixelPitch; // 0x7A + NvU8 bVertPixelPitch; // 0x7B + NvU8 bReserved2; // 0x7C + NvU8 bGTFSupportInfo; // 0x7D + NvU16 wTimingInfoMap; // 0x7E + NvU8 bTableDescriptors[127]; // 0x80 + NvU8 bChecksum; // 0xFF +}EDIDV2STRUC; + +// EDID CEA/EIA-861 extension block template +typedef struct _tagEIA861EXTENSION +{ + NvU8 tag; // 0x00 + NvU8 revision; // 0x01 + NvU8 offset; // 0x02 + NvU8 misc; // 0x03 + NvU8 data[NVT_CEA861_MAX_PAYLOAD]; // 0x04 - 0x7E + NvU8 checksum; // 0x7F +}EIA861EXTENSION; + +typedef struct _tagVTBEXTENSION +{ + NvU8 tag; // 0x00 + NvU8 revision; // 0x01 + NvU8 num_detailed; // 0x02 + NvU8 num_cvt; // 0x03 + NvU8 num_standard; // 0x04 + NvU8 data[NVT_VTB_MAX_PAYLOAD]; // 0x05 - 0x7E + NvU8 checksum; +}VTBEXTENSION; + +// EDID DisplayID extension block template +typedef struct _tagDIDEXTENSION +{ + NvU8 tag; // 0x00 + NvU8 struct_version; // 0x01 + NvU8 length; // 0x02 + NvU8 use_case; // 0x03 + NvU8 ext_count; // 0x04 + NvU8 data[NVT_DID_MAX_EXT_PAYLOAD]; // 0x05 - 0x7E + NvU8 checksum; // 0x7F +}DIDEXTENSION; + +// video signal interface mask +#define NVT_PVT_EDID_INPUT_ISDIGITAL_MASK 0x80 // 0==analog +#define NVT_PVT_EDID_INPUT_ISDIGITAL_SHIFT 7 +#define NVT_PVT_EDID_INPUT_ANALOG_ETC_MASK 0x7F +#define NVT_PVT_EDID_INPUT_ANALOG_ETC_SHIFT 0 + +#define NVT_PVT_EDID_INPUT_INTERFACE_MASK 0x0F +#define NVT_PVT_EDID_INPUT_INTERFACE_SHIFT 0 + +#define NVT_PVT_EDID_INPUT_BPC_MASK 0x70 +#define NVT_PVT_EDID_INPUT_BPC_SHIFT 4 +#define NVT_PVT_EDID_INPUT_BPC_UNDEF 0x00 +#define NVT_PVT_EDID_INPUT_BPC_6 0x01 +#define NVT_PVT_EDID_INPUT_BPC_8 0x02 +#define NVT_PVT_EDID_INPUT_BPC_10 0x03 +#define NVT_PVT_EDID_INPUT_BPC_12 0x04 +#define NVT_PVT_EDID_INPUT_BPC_14 0x05 +#define NVT_PVT_EDID_INPUT_BPC_16 0x06 + +// color characteristic +#define NVT_PVT_EDID_CC_RED_X1_X0_MASK 0xC0 +#define NVT_PVT_EDID_CC_RED_X1_X0_SHIFT 6 +#define NVT_PVT_EDID_CC_RED_Y1_Y0_MASK 0x30 +#define NVT_PVT_EDID_CC_RED_Y1_Y0_SHIFT 4 + +#define NVT_PVT_EDID_CC_GREEN_X1_X0_MASK 0x0C +#define NVT_PVT_EDID_CC_GREEN_X1_X0_SHIFT 2 +#define NVT_PVT_EDID_CC_GREEN_Y1_Y0_MASK 0x03 +#define NVT_PVT_EDID_CC_GREEN_Y1_Y0_SHIFT 0 + +#define NVT_PVT_EDID_CC_BLUE_X1_X0_MASK 0xC0 +#define NVT_PVT_EDID_CC_BLUE_X1_X0_SHIFT 6 +#define NVT_PVT_EDID_CC_BLUE_Y1_Y0_MASK 0x30 +#define NVT_PVT_EDID_CC_BLUE_Y1_Y0_SHIFT 4 + +#define NVT_PVT_EDID_CC_WHITE_X1_X0_MASK 0x0C +#define NVT_PVT_EDID_CC_WHITE_X1_X0_SHIFT 2 +#define NVT_PVT_EDID_CC_WHITE_Y1_Y0_MASK 0x03 +#define NVT_PVT_EDID_CC_WHITE_Y1_Y0_SHIFT 0 + +#endif // __EDID_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_cvt.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_cvt.c new file mode 100644 index 0000000..af00dce --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_cvt.c @@ -0,0 +1,627 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_cvt.c +// +// Purpose: calculate CVT/CVT-RB timing +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "nvtiming_pvt.h" + +PUSH_SEGMENTS + +CONS_SEGMENT(PAGE_CONS) + +const NvU32 NVT_MAX_NVU32= (NvU32)(-1); + +const NvU32 NVT_CVT_CELL_GRAN = 8; // Character cell width. +const NvU32 NVT_CVT_MIN_VSYNCBP = 11; // in 550us (!!) [1000000:550 = 20000:11] +const NvU32 NVT_CVT_V_PORCH = 3; // in pixels +const NvU32 NVT_CVT_C_PRIME = 30; // value of (C' * 10) +const NvU32 NVT_CVT_M_PRIME_D_20 = 15; // value of (M' / 100) +const NvU32 NVT_CVT_CLOCK_STEP = 25; // Pclk step, in 10kHz +const NvU32 NVT_CVT_H_SYNC_PER = 8; // HSYNC percentage (8%) + +const NvU32 NVT_CVT_RB_HBLANK_CELLS = 20; // 160 fixed hblank for RB +const NvU32 NVT_CVT_RB_HFPORCH_CELLS = 6; // 48 fixed hfporch for RB +const NvU32 NVT_CVT_RB_HSYNCW_CELLS = 4; // 32 fixed hsyncwidth for RB +const NvU32 NVT_CVT_RB_MIN_VBLANK = 23; // 460 lines (or 460 us?) [1000000:460 = 50000:23] +const NvU32 NVT_CVT_MIN_V_BPORCH = 6; // Minimum vertical back porch. + + +// VESA CVT spec ver1.2: +// +// Page 24 : Table 5-4 : Delta between Original Reduced Blank Timing and Reduced Blanking Timing V2 +#define NVT_CVT_RB2_CLOCK_STEP_KHZ 1 +#define NVT_CVT_RB2_H_BLANK_PIXELS 80 +#define NVT_CVT_RB2_H_SYNC_PIXELS 32 +#define NVT_CVT_RB2_MIN_VBLANK_MICROSEC 460 +#define NVT_CVT_RB2_MIN_V_FPORCH 1 +#define NVT_CVT_RB2_MIN_V_BPORCH 6 +// Page 16 : Table 3-2 : Vertical Sync Duration +#define NVT_CVT_RB2_V_SYNC_WIDTH 8 +// Page 22: RB_MIN_VBI = RB_V_FPORCH + V_SYNC_RND + MIN_V_BPORCH +#define NVT_CVT_RB2_MIN_VBI NVT_CVT_RB2_V_SYNC_WIDTH + NVT_CVT_RB2_MIN_V_FPORCH + NVT_CVT_RB2_MIN_V_BPORCH +// Page 15 : The Horizontal Sync Pulse duration will in all cases be 32 pixel clocks in duration, with the position +// set so that the trailing edge of the Horizontal Sync Pulse is located in the center of the Horizontal +// Blanking period.This implies that for a fixed blank of 80 pixel clocks, the Horizontal Back Porch is +// fixed to(80 / 2) 40 pixel clocks and the Horizontal Front Porch is fixed to(80 - 40 - 32) = 8 clock cycles. +#define NVT_CVT_RB2_H_FPORCH 8 +#define NVT_CVT_RB2_H_BPORCH 40 + +// VESA CVT spec ver2.0: +// +// Page 15 : Table 3-2 Constants +#define NVT_CVT_RB3_CLOCK_STEP_KHZ 1000 +#define NVT_CVT_RB3_H_BLANK_PIXELS NVT_CVT_RB2_H_BLANK_PIXELS +#define NVT_CVT_RB3_H_SYNC_PIXELS NVT_CVT_RB2_H_SYNC_PIXELS +#define NVT_CVT_RB3_H_FPORCH NVT_CVT_RB2_H_FPORCH +#define NVT_CVT_RB3_MIN_VBLANK_MICROSEC NVT_CVT_RB2_MIN_VBLANK_MICROSEC +#define NVT_CVT_RB3_V_FIELD_RATE_PPM_ADJ 350 +#define NVT_CVT_RB3_V_SYNC_WIDTH NVT_CVT_RB2_V_SYNC_WIDTH +#define NVT_CVT_RB3_MIN_V_FPORCH NVT_CVT_RB2_MIN_V_FPORCH +#define NVT_CVT_RB3_MIN_V_BPROCH NVT_CVT_RB2_MIN_V_BPORCH + +#define NVT_CVT_RB3_MIN_VBI NVT_CVT_RB2_MIN_VBI + +CODE_SEGMENT(PAGE_DD_CODE) +static NvU16 getCVTVSync(NvU32 XRes, NvU32 YRes) +{ + // 4:3 modes + if(XRes * 3 == YRes * 4) + return 4; + + // 16:9 modes + //if((XRes * 9 == YRes * 16) || + // (XRes == 848 && YRes == 480) || // 53:30 = 1.76666 + // (XRes == 1064 && YRes == 600) || // 133:75 = 1.77333 + // (XRes == 1360 && YRes == 768) || // 85:48 = 1.77083 + // (XRes == 1704 && YRes == 960) || // 71:40 = 1.775 + // (XRes == 1864 && YRes == 1050) || // 832:525 = 1.77523809 + // (XRes == 2128 && YRes == 1200) || // 133:75 + // (XRes == 2728 && YRes == 1536) || // 341:192 = 1.7760416 + // (XRes == 3408 && YRes == 1920) || // 71:40 + // (XRes == 4264 && YRes == 2400)) // 533:300 = 1.77666 + // return 5; + // NOTE: Because 16:9 modes are really a collection of mode of + // aspect ratio between 16:9 and 53:30, we will include + // all generic mode within this aspect ration range + if((XRes * 9 <= YRes * 16) && (XRes * 30 >= YRes * 53)) + return 5; + + // 16:10 modes + if((XRes * 5 == YRes * 8) || + (XRes == 1224 && YRes == 768) || + (XRes == 2456 && YRes == 1536)) + return 6; + + // Special 1280 modes + if((XRes == 1280 && YRes == 1024) || + (XRes == 1280 && YRes == 768)) + return 7; + + // Failure value, for identification + return 10; +} + + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCVT(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NvU32 dwXCells, dwVSyncBP, dwHBlankCells, dwPClk, dwHSyncCells, dwVSyncWidth; + + NvU32 dwHPeriodEstimate_NUM, dwHPeroidEstimate_DEN; + NvU32 dwIdealDutyCycle_NUM, dwIdealDutyCycle_DEN; + + // parameter check + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + // Check for valid input parameter + if (width < 300 || height < 200 || rr < 10) + return NVT_STATUS_ERR;//return NVT_STATUS_ERR_BACKOFF | NVT_STATUS_ERR_OUTOFRANGE; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + + pT->etc.status = NVT_STATUS_CVT; + + // H_PIXELS_RND = ROUNDDOWN(H_PIXELS / CELL_GRAN_RND,0) * CELL_GRAN_RND + if ((width % NVT_CVT_CELL_GRAN)!=0) + { + width = (width / NVT_CVT_CELL_GRAN) * NVT_CVT_CELL_GRAN; + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_ALIGNMENT); + } + + // Calculate timing + dwXCells = width / NVT_CVT_CELL_GRAN; // Convert to number of cells + dwVSyncWidth = getCVTVSync(dwXCells * NVT_CVT_CELL_GRAN, height); + + dwHPeriodEstimate_NUM = 20000 - NVT_CVT_MIN_VSYNCBP * rr; + dwHPeroidEstimate_DEN = rr * (height + NVT_CVT_V_PORCH); + + dwVSyncBP = NVT_CVT_MIN_VSYNCBP * dwHPeroidEstimate_DEN / dwHPeriodEstimate_NUM +1; + if(dwVSyncBP < dwVSyncWidth + NVT_CVT_MIN_V_BPORCH) + dwVSyncBP = dwVSyncWidth + NVT_CVT_MIN_V_BPORCH; + + // Check for overflow + //DBG_ASSERT(NVT_MAX_NVU32 / NVT_CVT_C_PRIME > dwHPeroidEstimate_DEN); + + dwIdealDutyCycle_DEN = dwHPeroidEstimate_DEN; + dwIdealDutyCycle_NUM = NVT_CVT_C_PRIME * dwHPeroidEstimate_DEN - NVT_CVT_M_PRIME_D_20 * dwHPeriodEstimate_NUM; + + if (dwIdealDutyCycle_NUM < dwIdealDutyCycle_DEN * 20) + { + dwIdealDutyCycle_NUM=20; + dwIdealDutyCycle_DEN=1; + } + + // Check for overflow + if (NVT_MAX_NVU32 / dwXCells <= dwIdealDutyCycle_NUM) + { + dwIdealDutyCycle_NUM /= 10; + dwIdealDutyCycle_DEN /= 10; + } + + dwHBlankCells = ((dwXCells * dwIdealDutyCycle_NUM)/(200*dwIdealDutyCycle_DEN - 2*dwIdealDutyCycle_NUM))*2; + + // Check for overflow + //DBG_ASSERT(MAX_NVU32 / dwHPeroidEstimate_DEN > (dwXCells + dwHBlankCells)*CVT_CELL_GRAN); + dwPClk = ((dwXCells + dwHBlankCells) * NVT_CVT_CELL_GRAN * dwHPeroidEstimate_DEN * 2 / dwHPeriodEstimate_NUM / NVT_CVT_CLOCK_STEP) * NVT_CVT_CLOCK_STEP; + + dwHSyncCells = (dwXCells + dwHBlankCells) * NVT_CVT_H_SYNC_PER / 100; + + + pT->HVisible = (NvU16)(dwXCells * NVT_CVT_CELL_GRAN); + pT->VVisible = (NvU16)height; + + pT->HTotal = (NvU16)((dwXCells + dwHBlankCells) * NVT_CVT_CELL_GRAN); + pT->HFrontPorch = (NvU16)((dwHBlankCells/2 - dwHSyncCells) * NVT_CVT_CELL_GRAN); + pT->HSyncWidth = (NvU16)(dwHSyncCells * NVT_CVT_CELL_GRAN); + + pT->VTotal = (NvU16)(height + dwVSyncBP + NVT_CVT_V_PORCH); + pT->VFrontPorch = (NvU16)(NVT_CVT_V_PORCH); + pT->VSyncWidth = getCVTVSync(dwXCells * NVT_CVT_CELL_GRAN, height); + + pT->pclk = dwPClk; + + pT->HSyncPol = NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = NVT_V_SYNC_POSITIVE; + + // Clear unused fields + pT->HBorder = pT->VBorder = 0; + pT->interlaced = NVT_PROGRESSIVE; + + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, (NvU32)10000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + NVT_SNPRINTF((char *)pT->etc.name, 40, "CVT:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + + // interlaced adjustment + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + { + if ((pT->VTotal & 0x1) != 0) + pT->interlaced = NVT_INTERLACED_EXTRA_VBLANK_ON_FIELD2; + else + pT->interlaced = NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2; + + pT->pclk >>= 1; + pT->VTotal >>= 1; + pT->VVisible = (pT->VVisible + 1) / 2; + } + pT->etc.rgb444.bpc.bpc8 = 1; + + return NVT_STATUS_SUCCESS; +} + +// CVT-RB timing calculation +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCVT_RB(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NvU32 dwXCells, dwPClk, dwVBILines, dwVSyncWidth; + + // parameter check + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + // Check for valid input parameter + if (width < 300 || height < 200 || rr < 10) + return NVT_STATUS_ERR;//NVT_STATUS_ERR_BACKOFF | NVT_STATUS_ERR_OUTOFRANGE; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + pT->etc.status = NVT_STATUS_CVT_RB; + + // H_PIXELS_RND = ROUNDDOWN(H_PIXELS / CELL_GRAN_RND,0) * CELL_GRAN_RND + if ((width % NVT_CVT_CELL_GRAN)!=0) + { + width = (width / NVT_CVT_CELL_GRAN) * NVT_CVT_CELL_GRAN; + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_ALIGNMENT); + } + + // Calculate timing + dwXCells = width / NVT_CVT_CELL_GRAN; // Convert to number of cells + dwVSyncWidth = getCVTVSync(dwXCells * NVT_CVT_CELL_GRAN, height); + + dwVBILines = (NVT_CVT_RB_MIN_VBLANK * height * rr) / (50000 - NVT_CVT_RB_MIN_VBLANK * rr) + 1; + + if(dwVBILines < NVT_CVT_V_PORCH + dwVSyncWidth + NVT_CVT_MIN_V_BPORCH) + dwVBILines = NVT_CVT_V_PORCH + dwVSyncWidth + NVT_CVT_MIN_V_BPORCH; + + dwPClk = rr * (height + dwVBILines) * (dwXCells + NVT_CVT_RB_HBLANK_CELLS) / (10000 / NVT_CVT_CELL_GRAN) / NVT_CVT_CLOCK_STEP; + dwPClk *= NVT_CVT_CLOCK_STEP; + + pT->HVisible = (NvU16)(dwXCells * NVT_CVT_CELL_GRAN); + pT->VVisible = (NvU16)height; + + pT->HTotal = (NvU16)((dwXCells + NVT_CVT_RB_HBLANK_CELLS) * NVT_CVT_CELL_GRAN); + pT->HFrontPorch = (NvU16)(NVT_CVT_RB_HFPORCH_CELLS * NVT_CVT_CELL_GRAN); + pT->HSyncWidth = (NvU16)(NVT_CVT_RB_HSYNCW_CELLS * NVT_CVT_CELL_GRAN); + + pT->VTotal = (NvU16)(height + dwVBILines); + pT->VFrontPorch = (NvU16)(NVT_CVT_V_PORCH); + pT->VSyncWidth = (NvU16)dwVSyncWidth; + + pT->pclk = dwPClk; + + pT->HSyncPol = NVT_H_SYNC_POSITIVE; + pT->VSyncPol = NVT_V_SYNC_NEGATIVE; + + // Clear unused fields + pT->HBorder = pT->VBorder = 0; + pT->interlaced = 0; + + // fill in the extra timing info + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, (NvU32)10000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + NVT_SNPRINTF((char *)pT->etc.name, 40, "CVT-RB:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + + // interlaced adjustment + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + { + if ((pT->VTotal & 0x1) != 0) + pT->interlaced = NVT_INTERLACED_EXTRA_VBLANK_ON_FIELD2; + else + pT->interlaced = NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2; + + pT->pclk >>= 1; + pT->VTotal >>= 1; + pT->VVisible = (pT->VVisible + 1) / 2; + } + + return NVT_STATUS_SUCCESS; +} + +// CVT-RB2 timing calculation +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCVT_RB2(NvU32 width, NvU32 height, NvU32 rr, NvBool is1000div1001, NVT_TIMING *pT) +{ + NvU32 vbi, act_vbi_lines, total_v_lines, total_pixels, act_pixel_freq_khz; + + // parameter check + if (pT == NULL || width == 0 || height == 0 || rr == 0) + return NVT_STATUS_ERR; + + // Check for valid input parameter + if (width < 300 || height < 200 || rr < 10) + return NVT_STATUS_ERR; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + pT->etc.status = NVT_STATUS_CVT_RB_2; + + // CVT spec1.2 - page 21 : 5.4 Computation of Reduced Blanking Timing Parameters + // 8. Estimate the Horizontal Period (kHz): + // H_PERIOD_EST = ((1000000 / (V_FIELD_RATE_RQD)) - RB_MIN_V_BLANK) / (V_LINES_RND + + // TOP_MARGIN + BOT_MARGIN) + // h_period_est = (1000000 / rr - NVT_CVT_RB2_MIN_VBLANK) / height; + + // 9. Determine the number of lines in the vertical blanking interval : + // VBI_LINES = ROUNDDOWN(RB_MIN_V_BLANK / H_PERIOD_EST, 0) + 1 + // vbi = NVT_CVT_RB2_MIN_VBLANK / h_period_est + 1; + + // combining step 8, 9, + vbi = height * NVT_CVT_RB2_MIN_VBLANK_MICROSEC * rr / (1000000 - NVT_CVT_RB2_MIN_VBLANK_MICROSEC * rr) + 1; + + // 10. Check Vertical Blanking is Sufficient : + // RB_MIN_VBI = RB_V_FPORCH + V_SYNC_RND + MIN_V_BPORCH + // ACT_VBI_LINES = IF(VBI_LINES < RB_MIN_VBI, RB_MIN_VBI, VBI_LINES) + act_vbi_lines = MAX(vbi, NVT_CVT_RB2_MIN_VBI); + + // 11. Find total number of vertical lines : + // TOTAL_V_LINES = ACT_VBI_LINES + V_LINES_RND + TOP_MARGIN + BOT_MARGIN + // + INTERLACE + total_v_lines = act_vbi_lines + height; //+0.5 if interlaced + + // 12. Find total number of pixel clocks per line : + // TOTAL_PIXELS = RB_H_BLANK + TOTAL_ACTIVE_PIXELS + total_pixels = NVT_CVT_RB2_H_BLANK_PIXELS + width; + + // sanity check just in case of bad edid where the timing value could exceed the limit of NVT_TIMING structure which unfortunately is defined in NvU16 + if (total_pixels > (NvU16)-1 || total_v_lines > (NvU16)-1) + return NVT_STATUS_INVALID_PARAMETER; + + // 13. Calculate Pixel Clock Frequency to nearest CLOCK_STEP MHz : + // ACT_PIXEL_FREQ = CLOCK_STEP * ROUNDDOWN((V_FIELD_RATE_RQD * TOTAL_V_LINES * + // TOTAL_PIXELS / 1000000 * REFRESH_MULTIPLIER) / CLOCK_STEP, 0) + if (is1000div1001) + act_pixel_freq_khz = NVT_CVT_RB2_CLOCK_STEP_KHZ * (rr * total_v_lines * total_pixels / 1001 / NVT_CVT_RB2_CLOCK_STEP_KHZ); + else + act_pixel_freq_khz = NVT_CVT_RB2_CLOCK_STEP_KHZ * (rr * total_v_lines * total_pixels / 1000 / NVT_CVT_RB2_CLOCK_STEP_KHZ); + + // 14. Find actual Horizontal Frequency(kHz) : + // ACT_H_FREQ = 1000 * ACT_PIXEL_FREQ / TOTAL_PIXELS + // 15. Find Actual Field Rate(Hz) : + // ACT_FIELD_RATE = 1000 * ACT_H_FREQ / TOTAL_V_LINES + // 16. Find actual Vertical Refresh Rate(Hz) : + // ACT_FRAME_RATE = IF(INT_RQD ? = "y", ACT_FIELD_RATE / 2, ACT_FIELD_RATE + + // fill in the essential timing info for output + pT->HVisible = (NvU16)width; + pT->HTotal = (NvU16)(total_pixels); + pT->HFrontPorch = NVT_CVT_RB2_H_FPORCH; + pT->HSyncWidth = NVT_CVT_RB2_H_SYNC_PIXELS; + pT->VVisible = (NvU16)height; + pT->VTotal = (NvU16)total_v_lines; + pT->VSyncWidth = NVT_CVT_RB2_V_SYNC_WIDTH; + pT->VFrontPorch = (NvU16)(act_vbi_lines - NVT_CVT_RB2_V_SYNC_WIDTH - NVT_CVT_RB2_MIN_V_BPORCH); + pT->pclk = (act_pixel_freq_khz + 5) / 10; //convert to 10Khz + pT->HSyncPol = NVT_H_SYNC_POSITIVE; + pT->VSyncPol = NVT_V_SYNC_NEGATIVE; + pT->HBorder = pT->VBorder = 0; // not supported + pT->interlaced = 0; // not supported yet + + // fill in the extra timing info + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = (NvU32)axb_div_c_64((NvU64)pT->pclk, (NvU64)10000 * (NvU64)1000, (NvU64)pT->HTotal*(NvU64)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + NVT_SNPRINTF((char *)pT->etc.name, 40, "CVT-RB2:%dx%dx%dHz", width, height, rr); + pT->etc.name[39] = '\0'; + + return NVT_STATUS_SUCCESS; +} + + +// CVT-RB3 timing calculation +// This is intended to work in conjunction with VESA Adaptive-Sync operation (or other similar VRR methodology) +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCVT_RB3(NvU32 width, NvU32 height, NvU32 rr, NvU32 deltaHBlank, NvU32 vBlankMicroSec, NvBool isEarlyVSync, NVT_TIMING *pT) +{ + NvU32 vbi, act_v_blank_time, act_v_blank_lines, v_back_porch, total_v_lines, total_pixels, adj_rr_x1M, act_pixel_freq_khz; + NvU64 act_pixel_freq_hz = 0xFFFFFFFFFFFFFFFFULL; + + // parameter check + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0) + return NVT_STATUS_ERR; + + // Check for valid input parameter + if ( (height % 8 != 0) || (deltaHBlank % 8 != 0) || deltaHBlank > 120 || vBlankMicroSec > 245) + return NVT_STATUS_INVALID_PARAMETER; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + pT->etc.status = NVT_STATUS_CVT_RB_3; + + // 1 Calculate the required field refresh rate (Hz): + // V_FIELD_RATE_RQD = I_IP_FREQ_RQD * (1 + C_V_FIELD_RATE_PPM_ADJ / 1000000) + + // Parameters mapping: + // - V_FIELD_RATE_RQD == "adj_rr_x1M" + // - I_IP_FREQ_RQD == "rr" + // - C_V_FIELD_RATE_PPM_ADJ == "NVT_CVT_RB3_V_FIELD_RATE_PPM_ADJ" + adj_rr_x1M = rr * (1000000 + NVT_CVT_RB3_V_FIELD_RATE_PPM_ADJ); + + // 2 Round the desired number of horizontal pixels down to the nearest character cell boundary: + // TOTAL_ACTIVE_PIXELS = ROUNDDOWN(I_H_PIXELS / C_CELL_GRAN_RND, 0) * C_CELL_GRAN_RND + + // Parameters mapping: + // - TOTAL_ACTIVE_PIXELS and I_H_PIXELS == "width" + // - C_CELL_GRAN_RND == "NVT_CVT_CELL_GRAN" + if ((width % NVT_CVT_CELL_GRAN) != 0) + { + // ROUNDDOWN + width = (width / NVT_CVT_CELL_GRAN) * NVT_CVT_CELL_GRAN; + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_ALIGNMENT); + } + + // 3 Round the number of vertical lines down to the nearest integer: + // V_LINES_RND = ROUNDDOWN(I_V_LINES, 0) + + // Parameters mapping: + // - V_LINES_RND == "height" + + // 4 Calculate the estimated Horizontal Period (kHz): + // H_PERIOD_EST = ((1000000 / (V_FIELD_RATE_RQD)) - C_RB_MIN_V_BLANK) / V_LINES_RND + + // Parameters mapping: + // - H_PERIOD_EST == "h_period_est" + // - C_RB_MIN_V_BLANK == "NVT_CVT_RB3_MIN_VBLANK_MICROSEC" + // h_period_est = ((1000000000000 / adj_rr_x1M) - NVT_CVT_RB3_MIN_VBLANK_MICROSEC) / height + + // 5 Calculate the total VBlank time: + // ACT_V_BLANK_TIME = IF(I_VBLANK < C_RB_MIN_V_BLANK, C_RB_MIN_V_BLANK, I_VBLANK) + + // Parameters mapping: + // - ACT_V_BLANK_TIME == "act_v_blank_time" + // - I_VBLANK == "vBlankMicroSec" + act_v_blank_time = MAX(vBlankMicroSec + 460, NVT_CVT_RB3_MIN_VBLANK_MICROSEC); + + // 6 Calculate the number of idealized lines in the VBlank interval: + // VBI_LINES = ROUNDUP(ACT_V_BLANK_TIME / H_PERIOD_EST, 0) + + // Parameters mapping: + // - VBI_LINES == vbi" + // below formula are combining step 4, 5, 6 togerther. i.e. both numerator and denominator multiple by height and addj_rr_x1M. + vbi = (NvU32)(((NvU64)height * (NvU64)act_v_blank_time * (NvU64)adj_rr_x1M) / ((NvU64)1000000000000 - (NvU64)act_v_blank_time * (NvU64)adj_rr_x1M)); + // ROUNDUP + if (((NvU64)height * (NvU64)act_v_blank_time * (NvU64)adj_rr_x1M) % ((NvU64)1000000000000 - (NvU64)act_v_blank_time * (NvU64)adj_rr_x1M) !=0) + vbi += 1; + + // 7 Determine whether idealized VBlank is sufficient and calculate the actual number of lines in the VBlank period: + // RB_MIN_VBI = C_RB_V_FPORCH + C_V_SYNC_RND + C_MIN_V_BPORCH + // V_BLANK = IF(VBI_LINES < RB_MIN_VBI, RB_MIN_VBI, VBI_LINES) + + // Paameters mapping: + // - C_RB_V_FPORCH == 1 + // - C_V_SYNC_RND == 8 + // - C_MIN_V_BPORCH == 6 + // - V_BLANK == "act_v_blank_lines" + // NVT_CVT_RB3_MIN_VBI == 1 + 8 + 6 = 15 + act_v_blank_lines = MAX(vbi, NVT_CVT_RB3_MIN_VBI); + + // 8 Calculate the total number of vertical lines: + // TOTAL_V_LINES = V_BLANK + V_LINES_RND + total_v_lines = act_v_blank_lines + height; + + // 9 Calculate the vertical back porch: + // V_BACK_PORCH = IF(AND(I_RED_BLANK_VER=3, I_EARLY_VSYNC_RQD?="Y"), ROUNDDOWN(VBI_LINES / 2, 0), C_MIN_V_BPORCH) + + // Paameters mapping: + // - V_BACK_PORCH == "v_back_porch" + // - I_RED_BLANK_VER == "3" this is for RB3 function so the value is 3 + // - I_EARLY_VSYNC_RQD == "isEarlyVSync" + // - C_MIN_V_BPORCH == 6 + if (isEarlyVSync) + v_back_porch = vbi / 2; + // v_back_porch = act_v_blank_lines /2 ; + else + v_back_porch = NVT_CVT_RB3_MIN_V_BPROCH; + + // 10 Calculate the vertical front porch: + // V_FRONT_PORCH = V_BLANK – V_BACK_PORCH – C_V_SYNC_RND + // we directly use this to assign as pT->VFrontPorch value in NVT_TIMING + + // 11 Calculate the total number of pixels per line: + // TOTAL_PIXELS = TOTAL_ACTIVE_PIXELS + C_RB_H_BLANK + IF(I_RED_BLANK_VER=3, I_ADDITIONAL_HBLANK, 0) + + // Parameters mapping: + // - C_RB_H_BLANK == 80 + // - I_ADDITIONAL_HBLANK == deltaHBlank scopes are defined in the TypeX in displayId2.1 + // 80 <= HBlank <=200 is a valid scope + total_pixels = width + deltaHBlank + 80; + + // 12 Calculate the horizontal back porch: + // H_BACK_PORCH = C_RB_H_BLANK + IF(I_RED_BLANK_VER=3, I_ADDITIONAL_HBLANK, 0) – C_H_FRONT_PORCH – C_RB_H_SYNC + // NVT_TIMING did not need to store H_BACK_PORCH + + // sanity check just in case of bad edid where the timing value could exceed the limit of NVT_TIMING structure which unfortunately is defined in NvU16 + if (total_pixels > (NvU16)-1 || total_v_lines > (NvU16)-1) + return NVT_STATUS_INVALID_PARAMETER; + + // 13 Calculate the pixel clock frequency to the nearest C_CLOCK_STEP (MHz): + // ACT_PIXEL_FREQ = C_CLOCK_STEP * ROUNDUP((V_FIELD_RATE_RQD * TOTAL_V_LINES * TOTAL_PIXELS / 1000000 * 1) / C_CLOCK_STEP, 0)) + + // Parameters mapping: + // - ACT_PIXEL_FREQ == "act_pixel_freq_hz" + // - C_CLOCK_STEP == "NVT_CVT_RB3_CLOCK_STEP_KHZ" == 1000 + act_pixel_freq_hz = ((NvU64)adj_rr_x1M * (NvU64)total_v_lines * (NvU64)total_pixels / (NvU64)1000000); + + // Here we need to divide extra 1000 because adj_rr_x1M extends to Million, i.e 1Mhz / 1000 = 1kHz + act_pixel_freq_khz = (NvU32)(act_pixel_freq_hz / NVT_CVT_RB3_CLOCK_STEP_KHZ); + + // kHz ROUNDUP + if ((act_pixel_freq_hz % 1000) != 0) + act_pixel_freq_khz += 1; + + pT->HVisible = (NvU16)width; + pT->HTotal = (NvU16)total_pixels; + pT->HFrontPorch = NVT_CVT_RB3_H_FPORCH; + pT->HSyncWidth = NVT_CVT_RB3_H_SYNC_PIXELS; + pT->VVisible = (NvU16)height; + pT->VTotal = (NvU16)total_v_lines; + pT->VSyncWidth = NVT_CVT_RB3_V_SYNC_WIDTH; + pT->VFrontPorch = (NvU16)(act_v_blank_lines - NVT_CVT_RB2_V_SYNC_WIDTH - v_back_porch); + pT->pclk = ((NvU32)act_pixel_freq_khz + 5) / 10; //convert to 10Khz + pT->HSyncPol = NVT_H_SYNC_POSITIVE; + pT->VSyncPol = NVT_V_SYNC_NEGATIVE; + pT->HBorder = pT->VBorder = 0; // not supported + pT->interlaced = 0; // not supported yet + + // fill in the extra timing info + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = (NvU32)axb_div_c_64((NvU64)pT->pclk, (NvU64)10000 * (NvU64)1000, (NvU64)pT->HTotal*(NvU64)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + NVT_SNPRINTF((char *)pT->etc.name, 40, "CVT-RB3:%dx%dx%dHz", width, height, rr); + pT->etc.name[39] = '\0'; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool NvTiming_IsTimingCVTRB(const NVT_TIMING *pTiming) +{ + // Check from the Timing Type + NvU32 reducedType = 0; + NvU32 hblank = 0; + reducedType = NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status); + + if (reducedType == NVT_TYPE_CVT_RB || reducedType == NVT_TYPE_CVT_RB_2 || reducedType == NVT_TYPE_CVT_RB_3) + { + return NV_TRUE; + } + + hblank = pTiming->HTotal - pTiming->HVisible; + + // Manually Check for RB 1 and 2 + // RB1 - HBlank = 160, and HSync = 32, HFrontPorch = 48, HBackPorch = 80 + if ((hblank == 160) && (pTiming->HSyncWidth == 32) && (pTiming->HFrontPorch == 48)) + { + return NV_TRUE; + } + + // RB2 - HBlank = 80, HSync = 32, HFrontPorch = 8, HBackPorch = 40 + if ((hblank == 80) && (pTiming->HSyncWidth == 32) && (pTiming->HFrontPorch == 8)) + { + return NV_TRUE; + } + + // RB3 - HBlank is any integer multiple of 8 [80-200], HSync = 32, HFrontPorch = 8 + if (((hblank > 80) && (hblank <= 200) && (hblank % 8 == 0)) && (pTiming->HSyncWidth == 32) && (pTiming->HFrontPorch == 8)) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +POP_SEGMENTS diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_displayid20.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_displayid20.c new file mode 100644 index 0000000..92d996c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_displayid20.c @@ -0,0 +1,1892 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_displayid20.c +// +// Purpose: the provide displayID 2.0 related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "nvtiming.h" +#include "nvtiming_pvt.h" +#include "displayid20.h" + +PUSH_SEGMENTS + +// DisplayID20 Entry point functions +static NVT_STATUS parseDisplayId20BaseSection(const DISPLAYID_2_0_SECTION *pSection, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); +static NVT_STATUS parseDisplayId20SectionDataBlocks(const DISPLAYID_2_0_SECTION *pSection, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); +static NVT_STATUS parseDisplayId20ExtensionSection(const DISPLAYID_2_0_SECTION *pSection, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); + +// DisplayID20 Data Block Tag Alloction +static NVT_STATUS parseDisplayId20ProductIdentity(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x20 Product Identificaton Block Tag +static NVT_STATUS parseDisplayId20DisplayParam(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x21 Display Parameters +static NVT_STATUS parseDisplayId20Timing7(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x22 Type VII Timing - Detailed Timing +static NVT_STATUS parseDisplayId20Timing8(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x23 Type VIII Timing - Enumerated Timing +static NVT_STATUS parseDisplayId20Timing9(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x24 Type IX Timing - Formula-based +static NVT_STATUS parseDisplayId20RangeLimit(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x25 Dynamic Video Timing Range Limits +static NVT_STATUS parseDisplayId20DisplayInterfaceFeatures(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x26 Display Interface Features +static NVT_STATUS parseDisplayId20Stereo(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x27 Stereo Display Interface +static NVT_STATUS parseDisplayId20TiledDisplay(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x28 Tiled Display Topology +static NVT_STATUS parseDisplayId20ContainerId(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x29 ContainerID +static NVT_STATUS parseDisplayId20Timing10(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x2A Type X Timing - Formula-based RR up to 1024Hz +static NVT_STATUS parseDisplayId20AdaptiveSync(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x2B Adaptive-Sync +static NVT_STATUS parseDisplayId20ARVRHMD(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x2C ARVR HMD +static NVT_STATUS parseDisplayId20ARVRLayer(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x2D ARVR Layer +static NVT_STATUS parseDisplayId20VendorSpecific(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x7E Vendor-specific +static NVT_STATUS parseDisplayId20CtaData(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x81 CTA DisplayID + +// Helper function +static NVT_STATUS getPrimaryUseCase(NvU8 product_type, NVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE *primary_use_case); +static NVT_STATUS parseDisplayId20Timing7Descriptor(const DISPLAYID_2_0_TIMING_7_DESCRIPTOR *pDescriptor, NVT_TIMING *pTiming, NvU8 revision, NvU8 count); +static NVT_STATUS parseDisplayId20Timing9Descriptor(const DISPLAYID_2_0_TIMING_9_DESCRIPTOR *pDescriptor, NVT_TIMING *pTiming, NvU8 count); +static NVT_STATUS parseDisplayId20Timing10Descriptor(const void *pDescriptor, NVT_TIMING *pTiming, NvU8 payloadbytes, NvU8 count); +static NvU32 greatestCommonDenominator(NvU32 x, NvU32 y); +static NvU8 getExistedTimingSeqNumber(NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, enum NVT_TIMING_TYPE); + +/* + * The Second-generation version of VESA DisplayID Standard + * DisplayID v2.0 + * + * @brief Parses a displayID20 section + * + * @param pDisplayId The DisplayId20 Section Block () + * @param length Size of the displayId section Block + * @param pDisplayIdInfo Need to parse the raw data to store as NV structure + * + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NV_STDCALL +NvTiming_parseDisplayId20Info( + const NvU8 *pDisplayId, + NvU32 length, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_SECTION *pSection = NULL; + NvU32 offset = 0; + NvU32 extensionIndex = 0; + NvU32 idx = 0; + + // parameter check + if ((pDisplayId == NULL) || + (pDisplayIdInfo == NULL)) + { + return NVT_STATUS_ERR; + } + + pSection = (const DISPLAYID_2_0_SECTION *)pDisplayId; + + if ((pSection->header.version < DISPLAYID_2_0_VERSION) || + (DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header) > length)) + { + return NVT_STATUS_ERR; + } + + NVMISC_MEMSET(pDisplayIdInfo, 0, sizeof(NVT_DISPLAYID_2_0_INFO)); + + status = parseDisplayId20BaseSection(pSection, pDisplayIdInfo); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + + pDisplayIdInfo->extension_count = pSection->header.extension_count; + for (extensionIndex = 0; extensionIndex < pDisplayIdInfo->extension_count; extensionIndex++) + { + // Get offset to the next section. + offset += DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header); + + // validate the next section buffer is valid + pSection = (const DISPLAYID_2_0_SECTION *)(pDisplayId + offset); + if ((offset + DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header)) > length) + { + return NVT_STATUS_ERR; + } + + // process the section + status = parseDisplayId20ExtensionSection(pSection, pDisplayIdInfo); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + } + + for (idx = 0; idx < pDisplayIdInfo->total_timings; idx++) + { + updateColorFormatForDisplayId20Timings(pDisplayIdInfo, idx); + } + + return status; +} + +NvU32 NvTiming_DisplayID2ValidationMask( + NVT_DISPLAYID_2_0_INFO *pDisplayId20Info, + NvBool bIsStrongValidation) +{ + NvU32 j; + NvU32 ret = 0; + + // check the DisplayId2 version and signature + if (pDisplayId20Info->version != 0x2) + { + ret |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_VERSION); + } + + if (!pDisplayId20Info->valid_data_blocks.product_id_present) + { + ret |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_PRODUCT_ID); + } + + if (pDisplayId20Info->primary_use_case >= PRODUCT_PRIMARY_USE_GENERIC_DISPLAY && + pDisplayId20Info->primary_use_case <= PRODUCT_PRIMARY_USE_HEAD_MOUNT_AUGMENTED_REALITY) + { + if (!(pDisplayId20Info->valid_data_blocks.parameters_present && + pDisplayId20Info->valid_data_blocks.interface_feature_present && + pDisplayId20Info->valid_data_blocks.type7Timing_present && + pDisplayId20Info->total_timings)) + { + ret |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_NO_DATA_BLOCK); + } + } + + // Strong validation to follow + if (bIsStrongValidation == NV_TRUE) + { + // TODO : For each of the Data Block limitation + // Type 7 Timings data block + for (j = 0; j <= pDisplayId20Info->total_timings; j++) + { + if ( NVT_PREFERRED_TIMING_IS_DISPLAYID(pDisplayId20Info->timing[j].etc.flag) && + (pDisplayId20Info->display_param.h_pixels != 0) && + (pDisplayId20Info->display_param.v_pixels != 0)) + { + if ( pDisplayId20Info->timing[j].HVisible != pDisplayId20Info->display_param.h_pixels || + pDisplayId20Info->timing[j].VVisible != pDisplayId20Info->display_param.v_pixels ) + { + ret |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_NO_DATA_BLOCK); + break; + } + } + } + // TODO : go on the next data block validation if it existed. + // TODO : validate extension blocks + } + + return ret; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +NvTiming_DisplayID2ValidationDataBlocks( + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, + NvBool bIsStrongValidation) +{ + if (NvTiming_DisplayID2ValidationMask(pDisplayIdInfo, bIsStrongValidation) != 0) + { + return NVT_STATUS_ERR; + } + else + { + return NVT_STATUS_SUCCESS; + } +} + +// DisplayID20 Entry point functions +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20BaseSection( + const DISPLAYID_2_0_SECTION *pSection, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + // validate for section checksum before processing the data block + if (computeDisplayId20SectionCheckSum((const NvU8 *)pSection, DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header)) != 0) + { + status |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_CHECKSUM); + return status; + } + + pDisplayIdInfo->revision = pSection->header.revision; + pDisplayIdInfo->version = pSection->header.version; + + status = getPrimaryUseCase(pSection->header.product_type, + &pDisplayIdInfo->primary_use_case); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + + status = parseDisplayId20SectionDataBlocks(pSection, pDisplayIdInfo); + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20ExtensionSection( + const DISPLAYID_2_0_SECTION *pSection, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + // validate for section checksum before processing the data block + if (computeDisplayId20SectionCheckSum((const NvU8 *)pSection, DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header)) != 0) + { + status |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_CHECKSUM); + return status; + } + + nvt_assert(pSection->header.version >= DISPLAYID_2_0_VERSION); + nvt_assert(pSection->header.extension_count == 0); + nvt_assert(pSection->header.product_type == DISPLAYID_2_0_PROD_EXTENSION); + + status = parseDisplayId20SectionDataBlocks(pSection, pDisplayIdInfo); + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20SectionDataBlocks( + const DISPLAYID_2_0_SECTION *pSection, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NvU32 i = 0; + NvU32 offset = 0; + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock = NULL; + NVT_STATUS status = NVT_STATUS_SUCCESS; + + while (offset < pSection->header.section_bytes) + { + // Get current block + pDataBlock = (const DISPLAYID_2_0_DATA_BLOCK_HEADER *)(pSection->data + offset); + + // detected zero padding + if (pDataBlock->type == 0) + { + for (i = offset; i < pSection->header.section_bytes; i++) + { + // validate that all paddings are zeros + nvt_assert(pSection->data[i] == 0); + } + break; + } + + // check data block is valid. + if ((offset + DISPLAYID_2_0_DATA_BLOCK_SIZE_TOTAL(pDataBlock)) > pSection->header.section_bytes) + { + return NVT_STATUS_ERR; + } + + // parse the data block + status = parseDisplayId20DataBlock(pDataBlock, pDisplayIdInfo); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + + switch (pDataBlock->type) + { + case DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY: + pDisplayIdInfo->valid_data_blocks.product_id_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM: + pDisplayIdInfo->valid_data_blocks.parameters_present = NV_TRUE; + if (pDisplayIdInfo->display_param.audio_speakers_integrated == AUDIO_SPEAKER_INTEGRATED_SUPPORTED) + { + pDisplayIdInfo->basic_caps |= NVT_DISPLAY_2_0_CAP_BASIC_AUDIO; + } + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_7: + pDisplayIdInfo->valid_data_blocks.type7Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_8: + pDisplayIdInfo->valid_data_blocks.type8Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_9: + pDisplayIdInfo->valid_data_blocks.type9Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_RANGE_LIMITS: + pDisplayIdInfo->valid_data_blocks.dynamic_range_limit_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_INTERFACE_FEATURES: + pDisplayIdInfo->valid_data_blocks.interface_feature_present = NV_TRUE; + + // Supported - Color depth is supported for all supported timings. Supported timing includes all Display-ID exposed timings + // (that is timing exposed using DisplayID timing types and CTA VICs) + if (IS_BPC_SUPPORTED_COLORFORMAT(pDisplayIdInfo->interface_features.yuv444.bpcs)) + { + pDisplayIdInfo->basic_caps |= NVT_DISPLAY_2_0_CAP_YCbCr_444; + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pDisplayIdInfo->interface_features.yuv422.bpcs)) + { + pDisplayIdInfo->basic_caps |= NVT_DISPLAY_2_0_CAP_YCbCr_422; + } + + if (pDisplayIdInfo->interface_features.audio_capability.support_48khz || + pDisplayIdInfo->interface_features.audio_capability.support_44_1khz || + pDisplayIdInfo->interface_features.audio_capability.support_32khz) + { + pDisplayIdInfo->basic_caps |= NVT_DISPLAY_2_0_CAP_BASIC_AUDIO; + } + break; + case DISPLAYID_2_0_BLOCK_TYPE_STEREO: + pDisplayIdInfo->valid_data_blocks.stereo_interface_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TILED_DISPLAY: + pDisplayIdInfo->valid_data_blocks.tiled_display_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_CONTAINER_ID: + pDisplayIdInfo->valid_data_blocks.container_id_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_10: + pDisplayIdInfo->valid_data_blocks.type10Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_ADAPTIVE_SYNC: + pDisplayIdInfo->valid_data_blocks.adaptive_sync_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_ARVR_HMD: + pDisplayIdInfo->valid_data_blocks.arvr_hmd_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_ARVR_LAYER: + pDisplayIdInfo->valid_data_blocks.arvr_layer_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC: + pDisplayIdInfo->valid_data_blocks.vendor_specific_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA: + pDisplayIdInfo->valid_data_blocks.cta_data_present = NV_TRUE; + break; + default: + status = NVT_STATUS_ERR; + } + + // advance to the next block + offset += DISPLAYID_2_0_DATA_BLOCK_SIZE_TOTAL(pDataBlock); + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20DataBlock( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + switch (pDataBlock->type) + { + case DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY: + status = parseDisplayId20ProductIdentity(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM: + status = parseDisplayId20DisplayParam(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_7: + status = parseDisplayId20Timing7(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_8: + status = parseDisplayId20Timing8(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_9: + status = parseDisplayId20Timing9(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_10: + status = parseDisplayId20Timing10(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_RANGE_LIMITS: + status = parseDisplayId20RangeLimit(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_INTERFACE_FEATURES: + status = parseDisplayId20DisplayInterfaceFeatures(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_STEREO: + status = parseDisplayId20Stereo(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TILED_DISPLAY: + status = parseDisplayId20TiledDisplay(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_CONTAINER_ID: + status = parseDisplayId20ContainerId(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_ADAPTIVE_SYNC: + status = parseDisplayId20AdaptiveSync(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_ARVR_HMD: + status = parseDisplayId20ARVRHMD(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_ARVR_LAYER: + status = parseDisplayId20ARVRLayer(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC: + status = parseDisplayId20VendorSpecific(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA: + status = parseDisplayId20CtaData(pDataBlock, pDisplayIdInfo); + break; + default: + status = NVT_STATUS_ERR; + } + return status; +} + +// All Data Blocks Parsing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20ProductIdentity( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + NVT_DISPLAYID_PRODUCT_IDENTITY *pProductIdentity = NULL; + const DISPLAYID_2_0_PROD_IDENTIFICATION_BLOCK *pProductIdBlock = NULL; + + pProductIdBlock = (const DISPLAYID_2_0_PROD_IDENTIFICATION_BLOCK *)pDataBlock; + + // add more validation if needed + + if (pDisplayIdInfo == NULL) return status; + + pProductIdentity = &pDisplayIdInfo->product_identity; + + pProductIdentity->vendor_id = (pProductIdBlock->vendor[0] << 16) | + (pProductIdBlock->vendor[1] << 8) | + (pProductIdBlock->vendor[2]); + pProductIdentity->product_id = (pProductIdBlock->product_code[0]) | + (pProductIdBlock->product_code[1] << 8); + pProductIdentity->serial_number = (pProductIdBlock->serial_number[0]) | + (pProductIdBlock->serial_number[1] << 8) | + (pProductIdBlock->serial_number[2] << 16) | + (pProductIdBlock->serial_number[3] << 24); + pProductIdentity->week = (pProductIdBlock->model_tag >= 1 && pProductIdBlock->model_tag <= 52) ? + pProductIdBlock->model_tag : 0; + pProductIdentity->year = (pProductIdBlock->model_tag == 0xFF) ? + pProductIdBlock->model_year : + pProductIdBlock->model_year + 2000; + + if (pProductIdBlock->product_name_string_size != 0) + { + NVMISC_STRNCPY((char *)pProductIdentity->product_string, + (const char *)pProductIdBlock->product_name_string, + pProductIdBlock->product_name_string_size); + } + pProductIdentity->product_string[pProductIdBlock->product_name_string_size] = '\0'; + + return status; +} + + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20DisplayParam( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_DISPLAY_PARAM_BLOCK *pDisplayParamBlock = NULL; + NVT_DISPLAYID_DISPLAY_PARAMETERS *pDisplayParam = NULL; + + if (pDataBlock->data_bytes != DISPLAYID_2_0_DISPLAY_PARAM_BLOCK_PAYLOAD_LENGTH) + { + return NVT_STATUS_ERR; + } + + // Add more validation here if needed + + if (pDisplayIdInfo == NULL) return status; + + pDisplayParamBlock = (const DISPLAYID_2_0_DISPLAY_PARAM_BLOCK *)pDataBlock; + pDisplayParam = &pDisplayIdInfo->display_param; + + pDisplayParam->revision = pDisplayParamBlock->header.revision; + pDisplayParam->h_image_size_micro_meter = (pDisplayParamBlock->horizontal_image_size[1] << 8 | + pDisplayParamBlock->horizontal_image_size[0]) * + (pDisplayParamBlock->header.image_size_multiplier ? 1000 : 100); + pDisplayParam->v_image_size_micro_meter = (pDisplayParamBlock->vertical_image_size[1] << 8 | + pDisplayParamBlock->vertical_image_size[0]) * + (pDisplayParamBlock->header.image_size_multiplier ? 1000 : 100); + pDisplayParam->h_pixels = pDisplayParamBlock->horizontal_pixel_count[1] << 8 | + pDisplayParamBlock->horizontal_pixel_count[0]; + pDisplayParam->v_pixels = pDisplayParamBlock->vertical_pixel_count[1] << 8 | + pDisplayParamBlock->vertical_pixel_count[0]; + + pDisplayParam->scan_orientation = pDisplayParamBlock->feature.scan_orientation; + pDisplayParam->audio_speakers_integrated = pDisplayParamBlock->feature.audio_speaker_information ? AUDIO_SPEAKER_INTEGRATED_NOT_SUPPORTED : AUDIO_SPEAKER_INTEGRATED_SUPPORTED; + pDisplayParam->color_map_standard = pDisplayParamBlock->feature.color_information ? COLOR_MAP_CIE_1976 : COLOR_MAP_CIE_1931; + + // 12 bits Binary Fraction Representations + pDisplayParam->primaries[0].x = pDisplayParamBlock->primary_color_1_chromaticity.color_bits_mid.color_x_bits_high << 8 | + pDisplayParamBlock->primary_color_1_chromaticity.color_x_bits_low; + pDisplayParam->primaries[0].y = pDisplayParamBlock->primary_color_1_chromaticity.color_y_bits_high << 4 | + pDisplayParamBlock->primary_color_1_chromaticity.color_bits_mid.color_y_bits_low; + pDisplayParam->primaries[1].x = pDisplayParamBlock->primary_color_2_chromaticity.color_bits_mid.color_x_bits_high << 8 | + pDisplayParamBlock->primary_color_2_chromaticity.color_x_bits_low; + pDisplayParam->primaries[1].y = pDisplayParamBlock->primary_color_2_chromaticity.color_y_bits_high << 4 | + pDisplayParamBlock->primary_color_2_chromaticity.color_bits_mid.color_y_bits_low; + pDisplayParam->primaries[2].x = pDisplayParamBlock->primary_color_3_chromaticity.color_bits_mid.color_x_bits_high << 8 | + pDisplayParamBlock->primary_color_3_chromaticity.color_x_bits_low; + pDisplayParam->primaries[2].y = pDisplayParamBlock->primary_color_3_chromaticity.color_y_bits_high << 4 | + pDisplayParamBlock->primary_color_3_chromaticity.color_bits_mid.color_y_bits_low; + pDisplayParam->white.x = pDisplayParamBlock->white_point_chromaticity.color_bits_mid.color_x_bits_high << 8 | + pDisplayParamBlock->white_point_chromaticity.color_x_bits_low; + pDisplayParam->white.y = pDisplayParamBlock->white_point_chromaticity.color_y_bits_high << 4 | + pDisplayParamBlock->white_point_chromaticity.color_bits_mid.color_y_bits_low; + + // IEEE 754 half-precision binary floating-point format + pDisplayParam->native_max_luminance_full_coverage = pDisplayParamBlock->max_luminance_full_coverage[1] << 8 | + pDisplayParamBlock->max_luminance_full_coverage[0]; + pDisplayParam->native_max_luminance_1_percent_rect_coverage = pDisplayParamBlock->max_luminance_1_percent_rectangular_coverage[1] << 8 | + pDisplayParamBlock->max_luminance_1_percent_rectangular_coverage[0]; + pDisplayParam->native_min_luminance = pDisplayParamBlock->min_luminance[1] << 8 | + pDisplayParamBlock->min_luminance[0]; + + if (pDisplayParamBlock->feature.luminance_information == 0) + { + pDisplayParam->native_luminance_info = NATIVE_LUMINANCE_INFO_MIN_GURANTEE_VALUE; + } + else if (pDisplayParamBlock->feature.luminance_information == 1) + { + pDisplayParam->native_luminance_info = NATIVE_LUMINANCE_INFO_SOURCE_DEVICE_GUIDANCE; + } + else + { + return NVT_STATUS_ERR; + } + + UPDATE_BPC_FOR_COLORFORMAT(pDisplayParam->native_color_depth, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_6, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_8, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_10, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_12, + 0, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_16); + + pDisplayParam->device_technology = pDisplayParamBlock->color_depth_and_device_technology.device_technology; + if (pDisplayParam->revision == 1) + { + pDisplayParam->device_theme_Preference = pDisplayParamBlock->color_depth_and_device_technology.device_theme_preference; + } + pDisplayParam->gamma_x100 = (pDisplayParamBlock->gamma_EOTF + 100); + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing7( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_7_BLOCK *pTiming7Block = NULL; + NvU32 descriptorCount = 0; + NvU8 revision = 0; + NvU8 i = 0; + NvU8 startSeqNumber = 0; + + NVT_TIMING newTiming; + + pTiming7Block = (const DISPLAYID_2_0_TIMING_7_BLOCK *)pDataBlock; + + // Based on the DisplayID_2_0_E7 spec: + // the Future descriptor can be defined with more than 20 Byte per descriptor without creating a new timing type + if (pTiming7Block->header.payload_bytes_len == 0) + { + if (pDataBlock->data_bytes % sizeof(DISPLAYID_2_0_TIMING_7_DESCRIPTOR) != 0) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + + descriptorCount = pDataBlock->data_bytes / (sizeof(DISPLAYID_2_0_TIMING_7_DESCRIPTOR) + pTiming7Block->header.payload_bytes_len); + + if (descriptorCount < 1 || descriptorCount > DISPLAYID_2_0_TIMING_7_MAX_DESCRIPTORS) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pDisplayIdInfo != NULL) + { + startSeqNumber = getExistedTimingSeqNumber(pDisplayIdInfo, NVT_TYPE_DISPLAYID_7); + } + + for (i = 0; i < descriptorCount; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + if (parseDisplayId20Timing7Descriptor(&pTiming7Block->descriptors[i], &newTiming, revision, startSeqNumber+i) == NVT_STATUS_SUCCESS) + { + if (!assignNextAvailableDisplayId20Timing(pDisplayIdInfo, &newTiming)) + { + break; + } + } + else + { + if (pDisplayIdInfo == NULL) return NVT_STATUS_ERR; + } + } + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing8( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_8_BLOCK *pTiming8Block = NULL; + NVT_TIMING newTiming; + NvU8 codeType = DISPLAYID_2_0_TIMING_CODE_RSERVED; + NvU8 codeCount = 0; + NvU8 startSeqNumber = 0; + NvU8 i; + + pTiming8Block = (const DISPLAYID_2_0_TIMING_8_BLOCK *)pDataBlock; + + // 1-byte descriptor timing code + if (pTiming8Block->header.timing_code_size == DISPLAYID_2_0_TIMING_CODE_SIZE_1_BYTE) + { + if (pDataBlock->data_bytes % sizeof(DISPLAYID_2_0_TIMING_8_ONE_BYTE_CODE) != 0) + { + return NVT_STATUS_ERR; + } + + codeCount = pDataBlock->data_bytes / sizeof(DISPLAYID_2_0_TIMING_8_ONE_BYTE_CODE); + if (codeCount < 1 || codeCount > DISPLAYID_2_0_TIMING_8_MAX_CODES) + { + return NVT_STATUS_ERR; + } + + codeType = pTiming8Block->header.timing_code_type; + + if (pDisplayIdInfo != NULL) + { + startSeqNumber = getExistedTimingSeqNumber(pDisplayIdInfo, NVT_TYPE_DISPLAYID_8); + } + + for (i = 0; i < codeCount; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (codeType == DISPLAYID_2_0_TIMING_CODE_DMT) + { + if (NvTiming_EnumDMT((NvU32)(pTiming8Block->timing_code_1[i].timing_code), + &newTiming) != NVT_STATUS_SUCCESS) + { + if (pDisplayIdInfo == NULL) return NVT_STATUS_ERR; + break; + } + } + else if (codeType == DISPLAYID_2_0_TIMING_CODE_CTA_VIC) + { + if (NvTiming_EnumCEA861bTiming((NvU32)(pTiming8Block->timing_code_1[i].timing_code), + &newTiming) != NVT_STATUS_SUCCESS) + { + if (pDisplayIdInfo == NULL) return NVT_STATUS_ERR; + break; + } + } + else if (codeType == DISPLAYID_2_0_TIMING_CODE_HDMI_VIC) + { + if (NvTiming_EnumHdmiVsdbExtendedTiming((NvU32)(pTiming8Block->timing_code_1[i].timing_code), + &newTiming) != NVT_STATUS_SUCCESS) + { + if (pDisplayIdInfo == NULL) return NVT_STATUS_ERR; + break; + } + } + else + { + // RESERVED + break; + } + + newTiming.etc.flag |= ((pTiming8Block->header.revision >= 1) && pTiming8Block->header.is_support_yuv420) ? NVT_FLAG_DISPLAYID_T7_T8_EXPLICT_YUV420 : 0; + newTiming.etc.status = NVT_STATUS_DISPLAYID_8N(++startSeqNumber); + + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), "DID20-Type8:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status), + (int)newTiming.HVisible, (int)newTiming.VVisible, + (int)newTiming.etc.rrx1k/1000, (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name) - 1] = '\0'; + + if (!assignNextAvailableDisplayId20Timing(pDisplayIdInfo, &newTiming)) + { + break; + } + } + } + else + { + nvt_assert(0); + // TODO : 2-byte descriptor timing code did not define yet in DID20. + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing9( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_9_BLOCK *pTiming9Block = NULL; + NVT_TIMING newTiming; + NvU32 descriptorCount = 0; + NvU8 startSeqNumber = 0; + NvU8 i = 0; + + descriptorCount = pDataBlock->data_bytes / sizeof(DISPLAYID_2_0_TIMING_9_DESCRIPTOR); + if (descriptorCount < 1 || descriptorCount > DISPLAYID_2_0_TIMING_9_MAX_DESCRIPTORS) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + + pTiming9Block = (const DISPLAYID_2_0_TIMING_9_BLOCK *)pDataBlock; + + if (pDisplayIdInfo != NULL) + { + startSeqNumber = getExistedTimingSeqNumber(pDisplayIdInfo, NVT_TYPE_DISPLAYID_9); + } + + for (i = 0; i < descriptorCount; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayId20Timing9Descriptor(&pTiming9Block->descriptors[i], &newTiming, startSeqNumber+i) == NVT_STATUS_SUCCESS) + { + if (!assignNextAvailableDisplayId20Timing(pDisplayIdInfo, &newTiming)) + { + break; + } + } + else + { + if (pDisplayIdInfo == NULL) return NVT_STATUS_ERR; + } + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing10( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_10_BLOCK *pTiming10Block = NULL; + NvU32 descriptorCount = 0; + NvU8 startSeqNumber = 0; + NvU8 i = 0; + NvU8 eachOfDescriptorsSize = sizeof(DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR); + + NVT_TIMING newTiming; + + pTiming10Block = (const DISPLAYID_2_0_TIMING_10_BLOCK *)pDataBlock; + + if (pTiming10Block->header.type != DISPLAYID_2_0_BLOCK_TYPE_TIMING_10) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pTiming10Block->header.payload_bytes_len == DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_6) + { + descriptorCount = pDataBlock->data_bytes / sizeof(DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR); + + if (descriptorCount < 1 || descriptorCount > DISPLAYID_2_0_TIMING_10_MAX_6BYTES_DESCRIPTORS) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + } + else if (pTiming10Block->header.payload_bytes_len == DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_7) + { + descriptorCount = pDataBlock->data_bytes / sizeof(DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR); + + if (descriptorCount < 1 || descriptorCount > DISPLAYID_2_0_TIMING_10_MAX_7BYTES_DESCRIPTORS) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + } + + eachOfDescriptorsSize += pTiming10Block->header.payload_bytes_len; + + if (pDisplayIdInfo != NULL) + { + startSeqNumber = getExistedTimingSeqNumber(pDisplayIdInfo, NVT_TYPE_DISPLAYID_10); + } + + for (i = 0; i < descriptorCount; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (NVT_STATUS_SUCCESS == parseDisplayId20Timing10Descriptor(&pTiming10Block->descriptors[i*eachOfDescriptorsSize], &newTiming, pTiming10Block->header.payload_bytes_len, startSeqNumber+i)) + { + if (!assignNextAvailableDisplayId20Timing(pDisplayIdInfo, &newTiming)) + { + break; + } + } + else + { + if (pDisplayIdInfo == NULL) return NVT_STATUS_ERR; + } + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20RangeLimit( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_RANGE_LIMITS_BLOCK *pRangeLimitsBlock = NULL; + NVT_DISPLAYID_RANGE_LIMITS *pRangeLimits = NULL; + + if (pDataBlock->data_bytes != DISPLAYID_2_0_RANGE_LIMITS_BLOCK_PAYLOAD_LENGTH) + { + return NVT_STATUS_ERR; + } + + if (pDisplayIdInfo == NULL) return status; + + pRangeLimitsBlock = (const DISPLAYID_2_0_RANGE_LIMITS_BLOCK *)pDataBlock; + pRangeLimits = &pDisplayIdInfo->range_limits; + + pRangeLimits->revision = pDataBlock->revision; + + pRangeLimits->pclk_min = (pRangeLimitsBlock->pixel_clock_min[2] << 16 | + pRangeLimitsBlock->pixel_clock_min[1] << 8 | + pRangeLimitsBlock->pixel_clock_min[0]) + 1; + pRangeLimits->pclk_max = (pRangeLimitsBlock->pixel_clock_max[2] << 16 | + pRangeLimitsBlock->pixel_clock_max[1] << 8 | + pRangeLimitsBlock->pixel_clock_max[0]) + 1; + pRangeLimits->vfreq_min = pRangeLimitsBlock->vertical_frequency_min; + if (pRangeLimits->revision == 1) + { + pRangeLimits->vfreq_max = pRangeLimitsBlock->dynamic_video_timing_range_support.vertical_frequency_max_9_8 << 8 | + pRangeLimitsBlock->vertical_frequency_max_7_0; + } + else + { + pRangeLimits->vfreq_max = pRangeLimitsBlock->vertical_frequency_max_7_0; + } + + pRangeLimits->seamless_dynamic_video_timing_change = pRangeLimitsBlock->dynamic_video_timing_range_support.seamless_dynamic_video_timing_change; + + return status; +} + +#define ADD_COLOR_SPACE_EOTF_COMBINATION(_pInterfaceFeatures, _color_space, _eotf) do { \ + (_pInterfaceFeatures)->colorspace_eotf_combination[(_pInterfaceFeatures)->combination_count].color_space = (_color_space); \ + (_pInterfaceFeatures)->colorspace_eotf_combination[(_pInterfaceFeatures)->combination_count].eotf = (_eotf); \ + (_pInterfaceFeatures)->combination_count++; \ + } while(0) + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20DisplayInterfaceFeatures( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + NvU32 i = 0; + const DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK *pInterfaceFeaturesBlock = NULL; + NVT_DISPLAYID_INTERFACE_FEATURES *pInterfaceFeatures = NULL; + + if (pDataBlock->data_bytes < DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK_PAYLOAD_LENGTH_MIN) + { + return NVT_STATUS_ERR; + } + + // Add more validation here if needed. + + if (pDisplayIdInfo == NULL) return status; + + pInterfaceFeatures = &pDisplayIdInfo->interface_features; + + pInterfaceFeaturesBlock = (const DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK *)pDataBlock; + pInterfaceFeatures->revision = pDataBlock->revision; + + UPDATE_BPC_FOR_COLORFORMAT(pInterfaceFeatures->rgb444, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_6, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_8, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_10, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_12, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_14, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_16); + UPDATE_BPC_FOR_COLORFORMAT(pInterfaceFeatures->yuv444, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_6, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_8, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_10, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_12, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_14, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_16); + UPDATE_BPC_FOR_COLORFORMAT(pInterfaceFeatures->yuv422, + 0, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_8, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_10, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_12, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_14, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_16); + UPDATE_BPC_FOR_COLORFORMAT(pInterfaceFeatures->yuv420, + 0, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_8, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_10, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_12, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_14, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_16); + + // * 74.25MP/s + pInterfaceFeatures->yuv420_min_pclk = pInterfaceFeaturesBlock->min_pixel_rate_ycbcr420 * + 7425; + + pInterfaceFeatures->audio_capability.support_48khz = + pInterfaceFeaturesBlock->audio_capability.sample_rate_48_khz; + pInterfaceFeatures->audio_capability.support_44_1khz = + pInterfaceFeaturesBlock->audio_capability.sample_rate_44_1_khz; + pInterfaceFeatures->audio_capability.support_32khz = + pInterfaceFeaturesBlock->audio_capability.sample_rate_32_khz; + + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_srgb_eotf_srgb) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_SRGB, + INTERFACE_EOTF_SRGB); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_bt601_eotf_bt601) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_BT601, + INTERFACE_EOTF_BT601); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_bt709_eotf_bt1886) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_BT709, + INTERFACE_EOTF_BT1886); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_adobe_rgb_eotf_adobe_rgb) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_ADOBE_RGB, + INTERFACE_EOTF_ADOBE_RGB); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_dci_p3_eotf_dci_p3) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_DCI_P3, + INTERFACE_EOTF_DCI_P3); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_bt2020_eotf_bt2020) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_BT2020, + INTERFACE_EOTF_BT2020); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_bt2020_eotf_smpte_st2084) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_BT2020, + INTERFACE_EOTF_SMPTE_ST2084); + } + + for (i = 0; i < pInterfaceFeaturesBlock->additional_color_space_and_eotf_count.count; i++) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + pInterfaceFeaturesBlock->additional_color_space_and_eotf[i].color_space, + pInterfaceFeaturesBlock->additional_color_space_and_eotf[i].eotf); + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Stereo( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + if (pDisplayIdInfo == NULL) return status; + + // TODO: Implement the parsing here. + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20TiledDisplay( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TILED_DISPLAY_BLOCK *pTiledDisplayBlock = NULL; + NVT_DISPLAYID_TILED_DISPLAY_TOPOLOGY *pTileTopo = NULL; + + if (pDataBlock->data_bytes != DISPLAYID_2_0_TILED_DISPLAY_BLOCK_PAYLOAD_LENGTH) + { + return NVT_STATUS_ERR; + } + + if (pDisplayIdInfo == NULL) return status; + + pTiledDisplayBlock = (const DISPLAYID_2_0_TILED_DISPLAY_BLOCK *)pDataBlock; + pTileTopo = &pDisplayIdInfo->tile_topo; + + pTileTopo->revision = pDataBlock->revision; + + pTileTopo->capability.bSingleEnclosure = pTiledDisplayBlock->capability.single_enclosure; + pTileTopo->capability.bHasBezelInfo = pTiledDisplayBlock->capability.has_bezel_info; + pTileTopo->capability.multi_tile_behavior = pTiledDisplayBlock->capability.multi_tile_behavior; + pTileTopo->capability.single_tile_behavior = pTiledDisplayBlock->capability.single_tile_behavior; + + pTileTopo->topology.row = ((pTiledDisplayBlock->topo_loc_high.row << 5) | + (pTiledDisplayBlock->topo_low.row)) + 1; + pTileTopo->topology.col = ((pTiledDisplayBlock->topo_loc_high.col << 5) | + (pTiledDisplayBlock->topo_low.col)) + 1; + pTileTopo->location.x = ((pTiledDisplayBlock->topo_loc_high.x << 5) | + (pTiledDisplayBlock->loc_low.x)); + pTileTopo->location.y = ((pTiledDisplayBlock->topo_loc_high.y << 5) | + (pTiledDisplayBlock->loc_low.y)); + + pTileTopo->native_resolution.width = ((pTiledDisplayBlock->native_resolution.width_high << 8) | + pTiledDisplayBlock->native_resolution.width_low) + 1; + pTileTopo->native_resolution.height = ((pTiledDisplayBlock->native_resolution.height_high << 8) | + pTiledDisplayBlock->native_resolution.height_low) + 1; + + pTileTopo->bezel_info.top = (pTiledDisplayBlock->bezel_info.top * + pTiledDisplayBlock->bezel_info.pixel_density) / 10; + pTileTopo->bezel_info.bottom = (pTiledDisplayBlock->bezel_info.bottom * + pTiledDisplayBlock->bezel_info.pixel_density) / 10; + pTileTopo->bezel_info.right = (pTiledDisplayBlock->bezel_info.right * + pTiledDisplayBlock->bezel_info.pixel_density) / 10; + pTileTopo->bezel_info.left = (pTiledDisplayBlock->bezel_info.left * + pTiledDisplayBlock->bezel_info.pixel_density) / 10; + + pTileTopo->tile_topology_id.vendor_id = pTiledDisplayBlock->topo_id.vendor_id[0] << 16 | + pTiledDisplayBlock->topo_id.vendor_id[1] << 8 | + pTiledDisplayBlock->topo_id.vendor_id[2]; + pTileTopo->tile_topology_id.product_id = pTiledDisplayBlock->topo_id.product_id[1] << 8 | + pTiledDisplayBlock->topo_id.product_id[0]; + pTileTopo->tile_topology_id.serial_number = pTiledDisplayBlock->topo_id.serial_number[3] << 24 | + pTiledDisplayBlock->topo_id.serial_number[2] << 16 | + pTiledDisplayBlock->topo_id.serial_number[1] << 8 | + pTiledDisplayBlock->topo_id.serial_number[0]; + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20ContainerId( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_CONTAINERID_BLOCK *pContainerIdBlock = NULL; + NVT_DISPLAYID_CONTAINERID *pContainerId = NULL; + + if (pDataBlock->data_bytes != DISPLAYID_2_0_CONTAINERID_BLOCK_PAYLOAD_LENGTH) + { + return NVT_STATUS_ERR; + } + + if (pDisplayIdInfo == NULL) return status; + + pContainerIdBlock = (const DISPLAYID_2_0_CONTAINERID_BLOCK *)pDataBlock; + pContainerId = &pDisplayIdInfo->container_id; + + pContainerId->revision = pDataBlock->revision; + pContainerId->data1 = pContainerIdBlock->container_id[0] << 24 | + pContainerIdBlock->container_id[1] << 16 | + pContainerIdBlock->container_id[2] << 8 | + pContainerIdBlock->container_id[3]; + pContainerId->data2 = pContainerIdBlock->container_id[4] << 8 | + pContainerIdBlock->container_id[5]; + pContainerId->data3 = pContainerIdBlock->container_id[6] << 8 | + pContainerIdBlock->container_id[7]; + pContainerId->data4 = pContainerIdBlock->container_id[8] << 8 | + pContainerIdBlock->container_id[9]; + pContainerId->data5[0] = pContainerIdBlock->container_id[10]; + pContainerId->data5[1] = pContainerIdBlock->container_id[11]; + pContainerId->data5[2] = pContainerIdBlock->container_id[12]; + pContainerId->data5[3] = pContainerIdBlock->container_id[13]; + pContainerId->data5[4] = pContainerIdBlock->container_id[14]; + pContainerId->data5[5] = pContainerIdBlock->container_id[15]; + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20AdaptiveSync( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK *pAdaptiveSyncBlock = NULL; + NvU32 descriptorCnt = 0; + NvU8 i = 0; + + if (pDisplayIdInfo == NULL) return status; + + pAdaptiveSyncBlock = (const DISPLAYID_2_0_ADAPTIVE_SYNC_BLOCK *)pDataBlock; + + if (pAdaptiveSyncBlock->header.payload_bytes_adaptive_sync_len == 0) + { + if (pDataBlock->data_bytes % sizeof(DISPLAYID_2_0_ADAPTIVE_SYNC_DESCRIPTOR) != 0) + { + nvt_assert(0); + return status; + } + + descriptorCnt = pDataBlock->data_bytes / sizeof(DISPLAYID_2_0_ADAPTIVE_SYNC_DESCRIPTOR); + + if (descriptorCnt < 1) return status; + + pDisplayIdInfo->total_adaptive_sync_descriptor = descriptorCnt; + + for (i = 0; i < descriptorCnt; i++) + { + // Byte 0 Adaptive-Sync Operation and Range Information + pDisplayIdInfo->adaptive_sync_descriptor[i].u.information.adaptive_sync_range = pAdaptiveSyncBlock->descriptors[i].operation_range_info.range; + pDisplayIdInfo->adaptive_sync_descriptor[i].u.information.duration_inc_flicker_perf = pAdaptiveSyncBlock->descriptors[i].operation_range_info.successive_frame_inc_tolerance; + pDisplayIdInfo->adaptive_sync_descriptor[i].u.information.modes = pAdaptiveSyncBlock->descriptors[i].operation_range_info.modes; + pDisplayIdInfo->adaptive_sync_descriptor[i].u.information.seamless_not_support = pAdaptiveSyncBlock->descriptors[i].operation_range_info.seamless_transition_not_support; + pDisplayIdInfo->adaptive_sync_descriptor[i].u.information.duration_dec_flicker_perf = pAdaptiveSyncBlock->descriptors[i].operation_range_info.successive_frame_dec_tolerance; + + // 6.2 format (six integer bits and two fractional bits) a value range of 0.00 to 63.75ms + pDisplayIdInfo->adaptive_sync_descriptor[i].max_duration_inc = pAdaptiveSyncBlock->descriptors[i].max_single_frame_inc; + pDisplayIdInfo->adaptive_sync_descriptor[i].min_rr = pAdaptiveSyncBlock->descriptors[i].min_refresh_rate; + pDisplayIdInfo->adaptive_sync_descriptor[i].max_rr = pAdaptiveSyncBlock->descriptors[i].max_refresh_rate.max_rr_9_8 << 8 | + pAdaptiveSyncBlock->descriptors[i].max_refresh_rate.max_rr_7_0; + // 6.2 format (six integer bits and two fractional bits) a value range of 0.00 to 63.75ms + pDisplayIdInfo->adaptive_sync_descriptor[i].max_duration_dec = pAdaptiveSyncBlock->descriptors[i].max_single_frame_dec; + } + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20ARVRHMD( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + if (pDisplayIdInfo == NULL) return status; + + // TODO: Implement the parsing here. + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20ARVRLayer( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + if (pDisplayIdInfo == NULL) return status; + + // TODO: Implement the parsing here. + + return status; +} + + + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20VendorSpecific( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_VENDOR_SPECIFIC_BLOCK *block = NULL; + NVT_DISPLAYID_VENDOR_SPECIFIC *pVendorSpecific = NULL; + NvU32 ieee_oui = 0; + + // Add more validation here if needed + + if (pDisplayIdInfo == NULL) return status; + + block = (const DISPLAYID_2_0_VENDOR_SPECIFIC_BLOCK*)pDataBlock; + pVendorSpecific = &pDisplayIdInfo->vendor_specific; + + ieee_oui = (NvU32)((block->vendor_id[0] << 16) | + (block->vendor_id[1] << 8) | + (block->vendor_id[2])); + + switch (ieee_oui) + { + case NVT_VESA_VENDOR_SPECIFIC_IEEE_ID: + // TODO: below parser shall be updated if DID21 changed in the future + if (pDataBlock->data_bytes == NVT_VESA_VENDOR_SPECIFIC_LENGTH) + { + pVendorSpecific->vesaVsdb.data_struct_type.type = block->vendor_specific_data[3] & NVT_VESA_ORG_VSDB_DATA_TYPE_MASK; + pVendorSpecific->vesaVsdb.data_struct_type.color_space_and_eotf = (block->vendor_specific_data[3] & NVT_VESA_ORG_VSDB_COLOR_SPACE_AND_EOTF_MASK) >> NVT_VESA_ORG_VSDB_COLOR_SPACE_AND_EOTF_SHIFT; + + pVendorSpecific->vesaVsdb.overlapping.pixels_overlapping_count = block->vendor_specific_data[4] & NVT_VESA_ORG_VSDB_PIXELS_OVERLAPPING_MASK; + pVendorSpecific->vesaVsdb.overlapping.multi_sst = (block->vendor_specific_data[4] & NVT_VESA_ORG_VSDB_MULTI_SST_MODE_MASK) >> NVT_VESA_ORG_VSDB_MULTI_SST_MODE_SHIFT; + + pVendorSpecific->vesaVsdb.pass_through_integer.pass_through_integer_dsc = block->vendor_specific_data[5] & NVT_VESA_ORG_VSDB_PASS_THROUGH_INTEGER_MASK; + pVendorSpecific->vesaVsdb.pass_through_fractional.pass_through_fraction_dsc = block->vendor_specific_data[6] & NVT_VESA_ORG_VSDB_PASS_THROUGH_FRACTIOINAL_MASK; + } + else + { + status = NVT_STATUS_ERR; + } + break; + + default: + break; + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20CtaData( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + NVT_EDID_CEA861_INFO *p861Info = NULL; + const DISPLAYID_2_0_CTA_BLOCK * ctaBlock = NULL; + NvU8 *pcta_data = NULL; + + ctaBlock = (const DISPLAYID_2_0_CTA_BLOCK *)pDataBlock; + + // WAR here to add a (size_t) cast for casting member from const to non-const in order to avoid Linux old compiler failed in DVS. + pcta_data = (NvU8 *)(size_t)ctaBlock->cta_data; + + if (pDisplayIdInfo == NULL) + { + status = parseCta861DataBlockInfo(pcta_data, pDataBlock->data_bytes, NULL); + return status; + } + else + { + status = parseCta861DataBlockInfo(pcta_data, pDataBlock->data_bytes, &pDisplayIdInfo->cta.cta861_info); + } + + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + + p861Info = &pDisplayIdInfo->cta.cta861_info; + + parseCta861VsdbBlocks(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + parseCta861HfScdb(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + // This CTA 861 function to parse 861 part + parse861bShortTiming(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + // yuv420-only video + parse861bShortYuv420Timing(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + parseCea861HdrStaticMetadataDataBlock(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + + // CEA861-F at 7.5.12 section about VFPDB block. + if (p861Info->total_vfpdb != 0) + { + parse861bShortPreferredTiming(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + } + + return status; +} + +// Helper function +CODE_SEGMENT(PAGE_DD_CODE) +static NvU32 +greatestCommonDenominator( + NvU32 x, + NvU32 y) +{ + NvU32 g = 0; + + while (x > 0) + { + g = x; + x = y % x; + y = g; + } + return g; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +getPrimaryUseCase( + NvU8 product_type, + NVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE *primary_use_case) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + switch (product_type) + { + case DISPLAYID_2_0_PROD_TEST: + *primary_use_case = PRODUCT_PRIMARY_USE_TEST_EQUIPMENT; + break; + case DISPLAYID_2_0_PROD_GENERIC_DISPLAY: + *primary_use_case = PRODUCT_PRIMARY_USE_GENERIC_DISPLAY; + break; + case DISPLAYID_2_0_PROD_TELEVISION: + *primary_use_case = PRODUCT_PRIMARY_USE_TELEVISION; + break; + case DISPLAYID_2_0_PROD_DESKTOP_PRODUCTIVITY_DISPLAY: + *primary_use_case = PRODUCT_PRIMARY_USE_DESKTOP_PRODUCTIVITY; + break; + case DISPLAYID_2_0_PROD_DESKTOP_GAMING_DISPLAY: + *primary_use_case = PRODUCT_PRIMARY_USE_DESKTOP_GAMING; + break; + case DISPLAYID_2_0_PROD_PRESENTATION_DISPLAY: + *primary_use_case = PRODUCT_PRIMARY_USE_PRESENTATION; + break; + case DISPLAYID_2_0_PROD_HMD_VR: + *primary_use_case = PRODUCT_PRIMARY_USE_HEAD_MOUNT_VIRTUAL_REALITY; + break; + case DISPLAYID_2_0_PROD_HMD_AR: + *primary_use_case = PRODUCT_PRIMARY_USE_HEAD_MOUNT_AUGMENTED_REALITY; + break; + case DISPLAYID_2_0_PROD_EXTENSION: + status = NVT_STATUS_ERR; + break; + default: + status = NVT_STATUS_ERR; + } + + return status; +} + +// used in DID20 and DID20ext +CODE_SEGMENT(PAGE_DD_CODE) +NvU8 +computeDisplayId20SectionCheckSum( + const NvU8 *pSectionBytes, + NvU32 length) +{ + + NvU32 i = 0; + NvU32 checkSum = 0; + + // Each DisplayID section composed of five mandatory bytes: + // DisplayID Structure Version and Revision + // Section Size + // Product Primary Use Case + // Extension Count + // Checksum + for (i = 0, checkSum = 0; i < length; i++) + { + checkSum += pSectionBytes[i]; + } + + return (checkSum & 0xFF); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool +assignNextAvailableDisplayId20Timing( + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, + const NVT_TIMING *pTiming) +{ + if (pDisplayIdInfo == NULL) return NV_TRUE; + + if (pDisplayIdInfo->total_timings >= COUNT(pDisplayIdInfo->timing)) + { + return NV_FALSE; + } + + pDisplayIdInfo->timing[pDisplayIdInfo->total_timings] = *pTiming; + pDisplayIdInfo->total_timings++; + + return NV_TRUE; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing7Descriptor( + const DISPLAYID_2_0_TIMING_7_DESCRIPTOR *pDescriptor, + NVT_TIMING *pTiming, + NvU8 revision, + NvU8 count) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + NvU32 gdc = 0; + + // pclk is in 10Khz + // pixel_clock is in kHz + pTiming->pclk = ((pDescriptor->pixel_clock[2] << 16 | + pDescriptor->pixel_clock[1] << 8 | + pDescriptor->pixel_clock[0]) + 1) / 10; + + pTiming->HBorder = 0; + pTiming->VBorder = 0; + + pTiming->HVisible = ((pDescriptor->horizontal.active_image_pixels[1] << 8) | + (pDescriptor->horizontal.active_image_pixels[0])) + 1; + pTiming->VVisible = ((pDescriptor->vertical.active_image_lines[1] << 8) | + (pDescriptor->vertical.active_image_lines[0])) + 1; + + pTiming->HTotal = (((pDescriptor->horizontal.blank_pixels[1] << 8) | + (pDescriptor->horizontal.blank_pixels[0])) + 1) + + pTiming->HVisible; + pTiming->VTotal = (((pDescriptor->vertical.blank_lines[1] << 8) | + (pDescriptor->vertical.blank_lines[0])) + 1) + + pTiming->VVisible; + + pTiming->HFrontPorch = ((pDescriptor->horizontal.front_porch_pixels_high << 8) | + (pDescriptor->horizontal.front_porch_pixels_low)) + 1; + pTiming->VFrontPorch = ((pDescriptor->vertical.front_porch_lines_high << 8) | + (pDescriptor->vertical.front_porch_lines_low)) + 1; + + pTiming->HSyncWidth = ((pDescriptor->horizontal.sync_width_pixels[1] << 8) | + (pDescriptor->horizontal.sync_width_pixels[0])) + 1; + pTiming->VSyncWidth = ((pDescriptor->vertical.sync_width_lines[1] << 8) | + (pDescriptor->vertical.sync_width_lines[0])) + 1; + + pTiming->HSyncPol = pDescriptor->horizontal.sync_polarity ? NVT_H_SYNC_POSITIVE : + NVT_H_SYNC_NEGATIVE; + pTiming->VSyncPol = pDescriptor->vertical.sync_polarity ? NVT_V_SYNC_POSITIVE : + NVT_V_SYNC_NEGATIVE; + + // EDID used in DP1.4 Compliance test had incorrect HBlank listed, leading to wrong raster sizes being set by driver (bug 2714607) + // Filter incorrect timings here. HTotal must cover sufficient blanking time + if (pTiming->HTotal < (pTiming->HVisible + pTiming->HFrontPorch + pTiming->HSyncWidth)) + { + return NVT_STATUS_ERR; + } + + pTiming->interlaced = pDescriptor->options.interface_frame_scanning_type; + + switch (pDescriptor->options.aspect_ratio) + { + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_1_1: + pTiming->etc.aspect = (1 << 16) | 1; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_5_4: + pTiming->etc.aspect = (5 << 16) | 4; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_4_3: + pTiming->etc.aspect = (4 << 16) | 3; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_15_9: + pTiming->etc.aspect = (15 << 16) | 9; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_16_9: + pTiming->etc.aspect = (16 << 16) | 9; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_16_10: + pTiming->etc.aspect = (16 << 16) | 10; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_64_27: + pTiming->etc.aspect = (64 << 16) | 27; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_256_135: + pTiming->etc.aspect = (256 << 16) | 135; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_CALCULATE: + gdc = greatestCommonDenominator(pTiming->HVisible, pTiming->VVisible); + if (gdc != 0) + { + pTiming->etc.aspect = ((pTiming->HVisible / gdc) << 16) | + (pTiming->VVisible / gdc); + } + else + { + pTiming->etc.aspect = 0; + } + break; + default: + pTiming->etc.aspect = 0; + } + + pTiming->etc.rr = NvTiming_CalcRR(pTiming->pclk, + pTiming->interlaced, + pTiming->HTotal, + pTiming->VTotal); + pTiming->etc.rrx1k = NvTiming_CalcRRx1k(pTiming->pclk, + pTiming->interlaced, + pTiming->HTotal, + pTiming->VTotal); + + pTiming->etc.flag |= (revision >= DISPLAYID_2_0_TYPE7_DSC_PASSTHRU_REVISION ) ? NVT_FLAG_DISPLAYID_T7_DSC_PASSTHRU : 0; + + if (revision >= DISPLAYID_2_0_TYPE7_YCC420_SUPPORT_REVISION) + { + pTiming->etc.flag |= pDescriptor->options.is_preferred_or_ycc420 ? NVT_FLAG_DISPLAYID_T7_T8_EXPLICT_YUV420 : 0; + + if (pDescriptor->options.is_preferred_or_ycc420) // YCC 420 support + { + UPDATE_BPC_FOR_COLORFORMAT(pTiming->etc.yuv420, 0, 1, 1, 1, 0, 1); + } + } + else + { + pTiming->etc.flag |= pDescriptor->options.is_preferred_or_ycc420 ? NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING : 0; + } + + pTiming->etc.status = NVT_STATUS_DISPLAYID_7N(++count); + + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type7:#%2d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)((pTiming->interlaced ? 2 : 1)*pTiming->VVisible), + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + pTiming->etc.name[sizeof(pTiming->etc.name) - 1] = '\0'; + + pTiming->etc.rep = 0x1; + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing9Descriptor( + const DISPLAYID_2_0_TIMING_9_DESCRIPTOR *pDescriptor, + NVT_TIMING *pTiming, + NvU8 count) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + NvU32 width = 0; + NvU32 height = 0; + NvU32 rr = 0; + + width = (pDescriptor->horizontal_active_pixels[1] << 8 | pDescriptor->horizontal_active_pixels[0]) + 1; + height = (pDescriptor->vertical_active_lines[1] << 8 | pDescriptor->vertical_active_lines[0]) + 1; + rr = pDescriptor->refresh_rate + 1; + + switch (pDescriptor->options.timing_formula) + { + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD: + status = NvTiming_CalcCVT(width, height, rr, NVT_PROGRESSIVE, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_1: + status = NvTiming_CalcCVT_RB(width, height, rr, NVT_PROGRESSIVE, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_2_0_REDUCED_BLANKING_2: + status = NvTiming_CalcCVT_RB2(width, height, rr, pDescriptor->options.rr_1000div1001_support, pTiming); + break; + default: + status = NVT_STATUS_ERR; + break; + } + + if (status == NVT_STATUS_SUCCESS) + { + NVMISC_MEMSET(pTiming->etc.name, 0, sizeof(pTiming->etc.name)); + pTiming->etc.status = NVT_STATUS_DISPLAYID_9N(++count); + + if ( pDescriptor->options.timing_formula== DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type9:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + else if (pDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_1) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type9-RB1:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + else if (pDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_2_0_REDUCED_BLANKING_2) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type9-RB2:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + } + + pTiming->etc.name[sizeof(pTiming->etc.name) - 1] = '\0'; + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing10Descriptor( + const void *pDescriptor, + NVT_TIMING *pTiming, + NvU8 payloadbytes, + NvU8 count) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR* p6bytesDescriptor = NULL; + const DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR* p7bytesDescriptor = NULL; + NvU32 width = 0; + NvU32 height = 0; + NvU32 rr = 0; + + p6bytesDescriptor = (const DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR *)pDescriptor; + + width = (p6bytesDescriptor->horizontal_active_pixels[1] << 8 | p6bytesDescriptor->horizontal_active_pixels[0]) + 1; + height = (p6bytesDescriptor->vertical_active_lines[1] << 8 | p6bytesDescriptor->vertical_active_lines[0]) + 1; + rr = p6bytesDescriptor->refresh_rate + 1; + + if (payloadbytes == DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_7) + { + p7bytesDescriptor = (const DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR *)pDescriptor; + rr = (p7bytesDescriptor->descriptor_6_bytes.refresh_rate | p7bytesDescriptor->refresh_rate_high << 8) + 1; + } + + switch (p6bytesDescriptor->options.timing_formula) + { + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD: + status = NvTiming_CalcCVT(width, height, rr, NVT_PROGRESSIVE, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_1: + status = NvTiming_CalcCVT_RB(width, height, rr, NVT_PROGRESSIVE, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_2_0_REDUCED_BLANKING_2: + status = NvTiming_CalcCVT_RB2(width, height, rr, p6bytesDescriptor->options.rr1000div1001_or_hblank, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_2_0_REDUCED_BLANKING_3: + { + NvU32 deltaHBlank = 0; + + if (p7bytesDescriptor != NULL) + { + if (p6bytesDescriptor->options.rr1000div1001_or_hblank == 0) // Horizontal Blank in Pixels = [Field Value] * 8 + 80 + { + deltaHBlank = p7bytesDescriptor->delta_hblank * 8; + } + else if (p6bytesDescriptor->options.rr1000div1001_or_hblank == 1) + { + if (p7bytesDescriptor->delta_hblank <= 5) + deltaHBlank = (p7bytesDescriptor->delta_hblank * 8 + 160) - 80; + else // if 5 < Field Value <=7 + deltaHBlank = (160 - ((p7bytesDescriptor->delta_hblank - 5) * 8)) - 80; + } + + status = NvTiming_CalcCVT_RB3(width, height, rr, deltaHBlank, p7bytesDescriptor->additional_vblank_timing * 35, p6bytesDescriptor->options.early_vsync, pTiming); + } + else // 6 byte descriptor + { + if (p6bytesDescriptor->options.rr1000div1001_or_hblank == 1) + deltaHBlank = 80; + + status = NvTiming_CalcCVT_RB3(width, height, rr, deltaHBlank, 0, p6bytesDescriptor->options.early_vsync, pTiming); + } + break; + } + } + + if ( status == NVT_STATUS_SUCCESS ) + { + NVMISC_MEMSET(pTiming->etc.name, 0, sizeof(pTiming->etc.name)); + pTiming->etc.status = NVT_STATUS_DISPLAYID_10N(++count); + + if (p6bytesDescriptor->options.ycc420_support) + { + // YCC 420 support + UPDATE_BPC_FOR_COLORFORMAT(pTiming->etc.yuv420, 0, 1, 1, 1, 0, 1); + } + + if (p6bytesDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type10:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + + } + else if (p6bytesDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_1) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type10RB1:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + else if (p6bytesDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_2_0_REDUCED_BLANKING_2) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type10RB2:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + else if (p6bytesDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_2_0_REDUCED_BLANKING_3) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type10RB3:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + } + + pTiming->etc.name[sizeof(pTiming->etc.name) - 1] = '\0'; + + return status; +} + +// get the existed stored timing sequence number +CODE_SEGMENT(PAGE_DD_CODE) +static NvU8 +getExistedTimingSeqNumber( + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, + enum NVT_TIMING_TYPE timingType) +{ + NvU8 count = 0; + NvU8 i = 0; + + switch (timingType) + { + case NVT_TYPE_DISPLAYID_7: + case NVT_TYPE_DISPLAYID_8: + case NVT_TYPE_DISPLAYID_9: + case NVT_TYPE_DISPLAYID_10: + break; + default: + return count; + } + + for (i = 0; i< pDisplayIdInfo->total_timings; i++) + { + if (NVT_GET_TIMING_STATUS_TYPE(pDisplayIdInfo->timing[i].etc.status) == timingType) + { + ++count; + } + } + + return count; +} + +// get the version +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 getDID2Version(NvU8 *pData, NvU32 *pVer) +{ + const DISPLAYID_2_0_SECTION *pSection = (const DISPLAYID_2_0_SECTION*)pData; + + *pVer = 0; + if (pSection->header.version == 0x2) + { + *pVer = (((NvU32)pSection->header.version) << 8) + ((NvU32)pSection->header.revision); + } + else + return NVT_STATUS_ERR; // un-recongnized DisplayID20 version + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void +updateColorFormatForDisplayId20Timings( + NVT_DISPLAYID_2_0_INFO *pDisplayId20Info, + NvU32 timingIdx) +{ + // pDisplayId20Info parsed displayID20 info + NVT_TIMING *pT= &pDisplayId20Info->timing[timingIdx]; + + nvt_assert(timingIdx <= COUNT(pDisplayId20Info->timing)); + + // rgb444 (always support 6bpc and 8bpc as per DP spec 5.1.1.1.1 RGB Colorimetry) + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 1, + 1, + pDisplayId20Info->interface_features.rgb444.bpc.bpc10, + pDisplayId20Info->interface_features.rgb444.bpc.bpc12, + pDisplayId20Info->interface_features.rgb444.bpc.bpc14, + pDisplayId20Info->interface_features.rgb444.bpc.bpc16); + + // yuv444 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv444, 0, // yuv444 does not support 6bpc + pDisplayId20Info->interface_features.yuv444.bpc.bpc8, + pDisplayId20Info->interface_features.yuv444.bpc.bpc10, + pDisplayId20Info->interface_features.yuv444.bpc.bpc12, + pDisplayId20Info->interface_features.yuv444.bpc.bpc14, + pDisplayId20Info->interface_features.yuv444.bpc.bpc16); + // yuv422 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv422, 0, // yuv422 does not support 6bpc + pDisplayId20Info->interface_features.yuv422.bpc.bpc8, + pDisplayId20Info->interface_features.yuv422.bpc.bpc10, + pDisplayId20Info->interface_features.yuv422.bpc.bpc12, + pDisplayId20Info->interface_features.yuv422.bpc.bpc14, + pDisplayId20Info->interface_features.yuv422.bpc.bpc16); + + if (!NVT_DID20_TIMING_IS_CTA861(pT->etc.flag, pT->etc.status)) + { + // yuv420 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv420, 0, // yuv420 does not support 6bpc + pDisplayId20Info->interface_features.yuv420.bpc.bpc8, + pDisplayId20Info->interface_features.yuv420.bpc.bpc10, + pDisplayId20Info->interface_features.yuv420.bpc.bpc12, + pDisplayId20Info->interface_features.yuv420.bpc.bpc14, + pDisplayId20Info->interface_features.yuv420.bpc.bpc16); + } +} +POP_SEGMENTS + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dmt.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dmt.c new file mode 100644 index 0000000..d644d37 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dmt.c @@ -0,0 +1,272 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_dmt.c +// +// Purpose: calculate DMT/DMT-RB timing +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "nvtiming_pvt.h" + +PUSH_SEGMENTS + +// DMT table +// Macro to declare a TIMING initializer for given parameters without border +#define DMT_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rr,pclk,id) \ +{hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',NVT_PROGRESSIVE,pclk,{0,rr,set_rrx1k(pclk,ht,vt),0,0x1,{0},{0},{0},{0},NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT,id),"VESA DMT"}} + +#define DMTRB_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rr,pclk,id) \ +{hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',NVT_PROGRESSIVE,pclk,{0,rr,set_rrx1k(pclk,ht,vt),0,0x1,{0},{0},{0},{0},NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT_RB,id),"VESA DMT/RB"}} + +#define DMTRB_2_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rr,pclk,id) \ +{hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',NVT_PROGRESSIVE,pclk,{0,rr,set_rrx1k(pclk,ht,vt),0,0x1,{0},{0},{0},{0},NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT_RB_2,id),"VESA DMT/RB2"}} + +DATA_SEGMENT(PAGE_DATA) + +static NVT_TIMING DMT[] = +{ + // VESA standard entries (ordered according to VESA DMT ID). + // hv,hfp,hsw, ht,hsp, vv,vfp,vsw, vt,vsp, rr,pclk , id + DMT_TIMING ( 640, 32, 64, 832,'+', 350, 32, 3, 445,'-', 85, 3150, 0x01), + DMT_TIMING ( 640, 32, 64, 832,'-', 400, 1, 3, 445,'+', 85, 3150, 0x02), + DMT_TIMING ( 720, 36, 72, 936,'-', 400, 1, 3, 446,'+', 85, 3550, 0x03), + DMT_TIMING ( 640, 8, 96, 800,'-', 480, 2, 2, 525,'-', 60, 2518, 0x04), + // 640x480x72Hz (VESA) - this entry have borders + {640,8,16,40,832,NVT_H_SYNC_NEGATIVE,480,8,1,3,520,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,3150,{0,72,72000,0,1,{0},{0},{0},{0},NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT,5),"VESA DMT"}}, + DMT_TIMING ( 640, 16, 64, 840,'-', 480, 1, 3, 500,'-', 75, 3150, 0x06), + DMT_TIMING ( 640, 56, 56, 832,'-', 480, 1, 3, 509,'-', 85, 3600, 0x07), + DMT_TIMING ( 800, 24, 72,1024,'+', 600, 1, 2, 625,'+', 56, 3600, 0x08), + DMT_TIMING ( 800, 40,128,1056,'+', 600, 1, 4, 628,'+', 60, 4000, 0x09), + DMT_TIMING ( 800, 56,120,1040,'+', 600, 37, 6, 666,'+', 72, 5000, 0x0A), + DMT_TIMING ( 800, 16, 80,1056,'+', 600, 1, 3, 625,'+', 75, 4950, 0x0B), + DMT_TIMING ( 800, 32, 64,1048,'+', 600, 1, 3, 631,'+', 85, 5625, 0x0C), + DMTRB_TIMING( 800, 48, 32, 960,'+', 600, 3, 4, 636,'-',120, 7325, 0x0D), + DMT_TIMING ( 848, 16,112,1088,'+', 480, 6, 8, 517,'+', 60, 3375, 0x0E), + DMT_TIMING (1024, 8,176,1264,'+', 768, 0, 4, 817,'+', 43, 4490, 0x0F), + DMT_TIMING (1024, 24,136,1344,'-', 768, 3, 6, 806,'-', 60, 6500, 0x10), + DMT_TIMING (1024, 24,136,1328,'-', 768, 3, 6, 806,'-', 70, 7500, 0x11), + DMT_TIMING (1024, 16, 96,1312,'+', 768, 1, 3, 800,'+', 75, 7875, 0x12), + DMT_TIMING (1024, 48, 96,1376,'+', 768, 1, 3, 808,'+', 85, 9450, 0x13), + DMTRB_TIMING(1024, 48, 32,1184,'+', 768, 3, 4, 813,'-',120,11550, 0x14), + DMT_TIMING (1152, 64,128,1600,'+', 864, 1, 3, 900,'+', 75,10800, 0x15), + DMTRB_TIMING(1280, 48, 32,1440,'+', 768, 3, 7, 790,'-', 60, 6825, 0x16), + DMT_TIMING (1280, 64,128,1664,'-', 768, 3, 7, 798,'+', 60, 7950, 0x17), + DMT_TIMING (1280, 80,128,1696,'-', 768, 3, 7, 805,'+', 75,10225, 0x18), + DMT_TIMING (1280, 80,136,1712,'-', 768, 3, 7, 809,'+', 85,11750, 0x19), + DMTRB_TIMING(1280, 48, 32,1440,'+', 768, 3, 7, 813,'-',120,14025, 0x1A), + DMTRB_TIMING(1280, 48, 32,1440,'+', 800, 3, 6, 823,'-', 60, 7100, 0x1B), + DMT_TIMING (1280, 72,128,1680,'-', 800, 3, 6, 831,'+', 60, 8350, 0x1C), + DMT_TIMING (1280, 80,128,1696,'-', 800, 3, 6, 838,'+', 75,10650, 0x1D), + DMT_TIMING (1280, 80,136,1712,'-', 800, 3, 6, 843,'+', 85,12250, 0x1E), + DMTRB_TIMING(1280, 48, 32,1440,'+', 800, 3, 6, 847,'-',120,14625, 0x1F), + DMT_TIMING (1280, 96,112,1800,'+', 960, 1, 3,1000,'+', 60,10800, 0x20), + DMT_TIMING (1280, 64,160,1728,'+', 960, 1, 3,1011,'+', 85,14850, 0x21), + DMTRB_TIMING(1280, 48, 32,1440,'+', 960, 3, 4,1017,'-',120,17550, 0x22), + DMT_TIMING (1280, 48,112,1688,'+',1024, 1, 3,1066,'+', 60,10800, 0x23), + DMT_TIMING (1280, 16,144,1688,'+',1024, 1, 3,1066,'+', 75,13500, 0x24), + DMT_TIMING (1280, 64,160,1728,'+',1024, 1, 3,1072,'+', 85,15750, 0x25), + DMTRB_TIMING(1280, 48, 32,1440,'+',1024, 3, 7,1084,'-',120,18725, 0x26), + DMT_TIMING (1360, 64,112,1792,'+', 768, 3, 6, 795,'+', 60, 8550, 0x27), + DMTRB_TIMING(1360, 48, 32,1520,'+', 768, 3, 5, 813,'-',120,14825, 0x28), + DMTRB_TIMING(1400, 48, 32,1560,'+',1050, 3, 4,1080,'-', 60,10100, 0x29), + DMT_TIMING (1400, 88,144,1864,'-',1050, 3, 4,1089,'+', 60,12175, 0x2A), + DMT_TIMING (1400,104,144,1896,'-',1050, 3, 4,1099,'+', 75,15600, 0x2B), + DMT_TIMING (1400,104,152,1912,'-',1050, 3, 4,1105,'+', 85,17950, 0x2C), + DMTRB_TIMING(1400, 48, 32,1560,'+',1050, 3, 4,1050,'-',120,20800, 0x2D), + DMTRB_TIMING(1440, 48, 32,1600,'+', 900, 3, 6, 926,'-', 60, 8875, 0x2E), + DMT_TIMING (1440, 80,152,1904,'-', 900, 3, 6, 934,'+', 60,10650, 0x2F), + DMT_TIMING (1440, 96,152,1936,'-', 900, 3, 6, 942,'+', 75,13675, 0x30), + DMT_TIMING (1440,104,152,1952,'-', 900, 3, 6, 948,'+', 85,15700, 0x31), + DMTRB_TIMING(1440, 48, 32,1600,'+', 900, 3, 6, 953,'-',120,18275, 0x32), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 60,16200, 0x33), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 65,17550, 0x34), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 70,18900, 0x35), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 75,20250, 0x36), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 85,22950, 0x37), + DMTRB_TIMING(1600, 48, 32,1760,'+',1200, 3, 4,1271,'-',120,26825, 0x38), + DMTRB_TIMING(1680, 48, 32,1840,'+',1050, 3, 6,1080,'-', 60,11900, 0x39), + DMT_TIMING (1680,104,176,2240,'-',1050, 3, 6,1089,'+', 60,14625, 0x3A), + DMT_TIMING (1680,120,176,2272,'-',1050, 3, 6,1099,'+', 75,18700, 0x3B), + DMT_TIMING (1680,128,176,2288,'-',1050, 3, 6,1105,'+', 85,21475, 0x3C), + DMTRB_TIMING(1680, 48, 32,1840,'+',1050, 3, 6,1112,'-',120,24550, 0x3D), + DMT_TIMING (1792,128,200,2448,'-',1344, 1, 3,1394,'+', 60,20475, 0x3E), + DMT_TIMING (1792, 96,216,2456,'-',1344, 1, 3,1417,'+', 75,26100, 0x3F), + DMTRB_TIMING(1792, 48, 32,1952,'+',1344, 3, 4,1423,'-',120,33325, 0x40), + DMT_TIMING (1856, 96,224,2528,'-',1392, 1, 3,1439,'+', 60,21825, 0x41), + DMT_TIMING (1856,128,224,2560,'-',1392, 1, 3,1500,'+', 75,28800, 0x42), + DMTRB_TIMING(1856, 48, 32,2016,'+',1392, 3, 4,1474,'-',120,35650, 0x43), + DMTRB_TIMING(1920, 48, 32,2080,'+',1200, 3, 6,1235,'-', 60,15400, 0x44), + DMT_TIMING (1920,136,200,2592,'-',1200, 3, 6,1245,'+', 60,19325, 0x45), + DMT_TIMING (1920,136,208,2608,'-',1200, 3, 6,1255,'+', 75,24525, 0x46), + DMT_TIMING (1920,144,208,2624,'-',1200, 3, 6,1262,'+', 85,28125, 0x47), + DMTRB_TIMING(1920, 48, 32,2080,'+',1200, 3, 6,1271,'-',120,31700, 0x48), + DMT_TIMING (1920,128,208,2600,'-',1440, 1, 3,1500,'+', 60,23400, 0x49), + DMT_TIMING (1920,144,224,2640,'-',1440, 1, 3,1500,'+', 75,29700, 0x4A), + DMTRB_TIMING(1920, 48, 32,2080,'+',1440, 3, 4,1525,'-',120,38050, 0x4B), + DMTRB_TIMING(2560, 48, 32,2720,'+',1600, 3, 6,1646,'-', 60,26850, 0x4C), + DMT_TIMING (2560,192,280,3504,'-',1600, 3, 6,1658,'+', 60,34850, 0x4D), + DMT_TIMING (2560,208,280,3536,'-',1600, 3, 6,1672,'+', 75,44325, 0x4E), + DMT_TIMING (2560,208,280,3536,'-',1600, 3, 6,1682,'+', 85,50525, 0x4F), + DMTRB_TIMING(2560, 48, 32,2720,'+',1600, 3, 6,1694,'-',120,55275, 0x50), + DMT_TIMING (1366, 70,143,1792,'+',768 , 3, 3, 798,'+', 60, 8550, 0x51),//1366 x 768 @60 (non-interlaced) DMT ID: 51h + DMT_TIMING (1920, 88, 44,2200,'+',1080, 4, 5,1125,'+', 60,14850, 0x52),//1920 x 1080 @60 (non-interlaced) DMT ID: 52h + DMTRB_TIMING(1600, 24, 80,1800,'+', 900, 1, 3,1000,'+', 60,10800, 0x53),//1600 x 900 @60 (non-interlaced) DMT ID: 53h + DMTRB_TIMING(2048, 26, 80,2250,'+',1152, 1, 3,1200,'+', 60,16200, 0x54),//2048 x 1152 @60 (non-interlaced) DMT ID: 54h + DMT_TIMING (1280,110, 40,1650,'+', 720, 5, 5, 750,'+', 60, 7425, 0x55),//1280 x 720 @60 (non-interlaced) DMT ID: 55h + DMTRB_TIMING(1366, 14, 56,1500,'+', 768, 1, 3, 800,'+', 60, 7200, 0x56),//1366 x 768 @60 (non-interlaced) DMT ID: 56h + + // Added timing definitions in DMT 1.3 Version 1.0, Rev. 13 + DMTRB_2_TIMING(4096, 8, 56,4176,'+', 2160, 48, 8, 2222,'-', 60,55674, 0x57),//4096 x 2160 @60 (non-interlaced) DMT ID: 57h + DMTRB_2_TIMING(4096, 8, 56,4176,'+', 2160, 48, 8, 2222,'-', 59,55619, 0x58),//4096 x 2160 @60 (non-interlaced) DMT ID: 58h + + // ******************************** + // Additional non-standard entries. + // ******************************** + + // Settings for 640x400 + // GTF timing for 640x400x60Hz has too low HFreq, this is a + // Specially constructed timing from 640x480, with extra blanking + // on top and bottom of the screen + + DMT_TIMING(640,16,96,800,'-',400,50,2,525,'-',60,2518,0), + DMT_TIMING(640,16,96,800,'+',400,12,2,449,'-',70,2518,0), + + // the end of table + NVT_TIMING_SENTINEL +}; +static NvU32 MAX_DMT_FORMAT = sizeof(DMT)/sizeof(DMT[0]) - 1; + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumDMT(NvU32 dmtId, NVT_TIMING *pT) +{ + if ((pT == NULL) || (dmtId == 0)) + { + return NVT_STATUS_ERR; + } + + // The last entry is not used. + if (dmtId > MAX_DMT_FORMAT) + { + return NVT_STATUS_ERR; + } + + // Make sure that the DMT ID matches according to the array index. + if (NVT_GET_TIMING_STATUS_SEQ(DMT[dmtId - 1].etc.status) == dmtId) + { + *pT = DMT[dmtId - 1]; + + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, + (NvU32)10000*(NvU32)1000, + (NvU32)pT->HTotal*(NvU32)pT->VTotal); + NVT_SNPRINTF((char *)pT->etc.name, 40, "DMT:#%d:%dx%dx%dHz", + dmtId, pT->HVisible, pT->VVisible, pT->etc.rr); + ((char *)pT->etc.name)[39] = '\0'; + + return NVT_STATUS_SUCCESS; + } + + return NVT_STATUS_ERR; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcDMT(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NVT_TIMING *p = (NVT_TIMING *)DMT; + + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + // no interlaced DMT timing + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + return NVT_STATUS_ERR; + + while (p->HVisible != 0 && p->VVisible != 0) + { + if (NVT_GET_TIMING_STATUS_TYPE(p->etc.status) == NVT_TYPE_DMT) + { + if ((NvU32)p->HVisible == width && + (NvU32)p->VVisible == height && + (NvU32)p->etc.rr == rr) + { + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + *pT = *p; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, (NvU32)10000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + NVT_SNPRINTF((char *)pT->etc.name, 40, "DMT:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + pT->etc.rgb444.bpc.bpc8 = 1; + return NVT_STATUS_SUCCESS; + } + } + p ++; + } + + // if we couldn't find a DMT with regular blanking, try the DMT with reduced blanking next + return NvTiming_CalcDMT_RB(width, height, rr, flag, pT); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcDMT_RB(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NVT_TIMING *p = (NVT_TIMING *)DMT; + + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + // no interlaced DMT timing + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + return NVT_STATUS_ERR; + + while (p->HVisible != 0 && p->VVisible != 0) + { + // select only reduced-bandwidth timing. + if (NVT_GET_TIMING_STATUS_TYPE(p->etc.status) == NVT_TYPE_DMT_RB) + { + if ((NvU32)p->HVisible == width && + (NvU32)p->VVisible == height && + (NvU32)p->etc.rr == rr) + { + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + *pT = *p; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, (NvU32)10000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + NVT_SNPRINTF((char *)pT->etc.name, 40, "DMT-RB:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + pT->etc.rgb444.bpc.bpc8 = 1; + return NVT_STATUS_SUCCESS; + } + } + p ++; + } + return NVT_STATUS_ERR; +} + +POP_SEGMENTS diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.c new file mode 100644 index 0000000..3ac5011 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.c @@ -0,0 +1,2303 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +//============================================================================= +// +// Provide function to calculate PPS(Picture Parameter Set) +// +// +//============================================================================== + +/* ------------------------ Includes --------------------------------------- */ +#include "nvt_dsc_pps.h" +#include "nvmisc.h" +#include "displayport/displayport.h" +#include + +/* ------------------------ Macros ----------------------------------------- */ + +#if defined (DEBUG) +#define DSC_Print(...) \ + do { \ + if (callbacks.dscPrint) { \ + callbacks.dscPrint("DSC: " __VA_ARGS__); \ + } \ + } while(0) +#else +#define DSC_Print(...) do { } while(0) +#endif + +#define MIN_CHECK(s,a,b) { if((a)<(b)) { DSC_Print("%s (=%u) needs to be larger than %u",s,a,b); return (NVT_STATUS_ERR);} } +#define RANGE_CHECK(s,a,b,c) { if((((NvS32)(a))<(NvS32)(b))||(((NvS32)(a))>(NvS32)(c))) { DSC_Print("%s (=%u) needs to be between %u and %u",s,a,b,c); return (NVT_STATUS_ERR);} } +#define ENUM2_CHECK(s,a,b,c) { if(((a)!=(b))&&((a)!=(c))) { DSC_Print("%s (=%u) needs to be %u or %u",s,a,b,c); return (NVT_STATUS_ERR);} } +#define ENUM3_CHECK(s,a,b,c,d) { if(((a)!=(b))&&((a)!=(c))&&((a)!=(d))) { DSC_Print("%s (=%u) needs to be %u, %u or %u",s,a,b,c,d); return (NVT_STATUS_ERR);} } +#define MAX(a,b) (((a)>=(b) || (b == 0xffffffff))?(a):(b)) +#define MIN(a,b) ((a)>=(b)?(b):(a)) +#define CLAMP(a,b,c) ((a)<=(b)?(b):((a)>(c)?(c):(a))) +#define ADJUST_SLICE_NUM(n) ((n)>4?8:((n)>2?4:(n))) +#define MSB(a) (((a)>>8)&0xFF) +#define LSB(a) ((a)&0xFF) + +#define NUM_BUF_RANGES 15 +#define BPP_UNIT 16 +#define OFFSET_FRACTIONAL_BITS 11 +#define PIXELS_PER_GROUP 3 + +//The max pclk frequency(in Mhz) per slice +//DP1.4 spec defines the number of slices needed per display line, +//based on the pixel rate. it's about 340Mhz per slice. +#define MAX_PCLK_PER_SLICE_KHZ 340000 +//The max slice_width used in slice_width calculation +//this is not HW limitation(which is 5120 per head), just a recommendation +#define MAX_WIDTH_PER_SLICE 5120 +//RC algorithm will get better performance if slice size is bigger. +//This requires slice size be much greater than rc_model_size(8K bits) +//but bigger slice will increase the error rate of DSC slices. +//256KB is a moderate value (about 1280x200 @8bpp) +#define MIN_SLICE_SIZE (256*1024) +// Per DP 1.4 spec, sink should support slice width of up to at least 2560 (it is allowed to support more). +#define SINK_MAX_SLICE_WIDTH_DEFAULT 2560 +// Min bits per pixel supported +#define MIN_BITS_PER_PIXEL 8 +// Max bits per pixel supported +#define MAX_BITS_PER_PIXEL 32 +// Max HBlank pixel count +#define MAX_HBLANK_PIXELS 7680 + +/* ------------------------ Datatypes -------------------------------------- */ + +//input parameters to the pps calculation +typedef struct +{ + NvU32 dsc_version_minor; // DSC minor version (1-DSC1.1, 2-DSC 1.2) + NvU32 bits_per_component; // bits per component of input pixels (8,10,12) + NvU32 linebuf_depth; // bits per component of reconstructed line buffer (8 ~ 13) + NvU32 block_pred_enable; // block prediction enable (0, 1) + NvU32 convert_rgb; // input pixel format (0 YCbCr, 1 RGB) + NvU32 bits_per_pixel; // bits per pixel*BPP_UNIT (8.0*BPP_UNIT ~ 32.0*BPP_UNIT) + NvU32 pic_height; // picture height (8 ~ 8192) + NvU32 pic_width; // picture width (single mode: 32 ~ 5120, dual mode: 64 ~ 8192) + NvU32 slice_height; // 0 - auto, others (8 ~ 8192) - must be (pic_height % slice_height == 0) + NvU32 slice_width; // maximum slice_width, 0-- default: 1280. + NvU32 slice_num; // 0 - auto, others: 1,2,4,8 + NvU32 slice_count_mask; // no of slices supported by sink + NvU32 max_slice_num; // slice number cap determined from GPU and sink caps + NvU32 max_slice_width; // slice width cap determined from GPU and sink caps + NvU32 pixel_clkMHz; // pixel clock frequency in MHz, used for slice_width calculation. + NvU32 dual_mode; // 0 - single mode, 1 - dual mode, only for checking pic_width + NvU32 simple_422; // 4:2:2 simple mode + NvU32 native_420; // 420 native mode + NvU32 native_422; // 422 native mode + NvU32 drop_mode; // 0 - normal mode, 1 - drop mode. + NvU32 peak_throughput_mode0; // peak throughput supported by the sink for 444 and simple 422 modes. + NvU32 peak_throughput_mode1; // peak throughput supported by the sink for native 422 and 420 modes. +} DSC_INPUT_PARAMS; + +//output pps parameters after calculation +typedef struct +{ + NvU32 dsc_version_major; // DSC major version, always 1 + NvU32 dsc_version_minor; // DSC minor version + NvU32 pps_identifier; // Application-specific identifier, always 0 + NvU32 bits_per_component; // bits per component for input pixels + NvU32 linebuf_depth; // line buffer bit depth + NvU32 block_pred_enable; // enable/disable block prediction + NvU32 convert_rgb; // color space for input pixels + NvU32 simple_422; // 4:2:2 simple mode + NvU32 vbr_enable; // enable VBR mode + NvU32 bits_per_pixel; // (bits per pixel * BPP_UNIT) after compression + NvU32 pic_height; // picture height + NvU32 pic_width; // picture width + NvU32 slice_height; // slice height + NvU32 slice_width; // slice width + NvU32 chunk_size; // the size in bytes of the slice chunks + NvU32 initial_xmit_delay; // initial transmission delay + NvU32 initial_dec_delay; // initial decoding delay + NvU32 initial_scale_value; // initial xcXformScale factor value + NvU32 scale_increment_interval; // number of group times between incrementing the rcXformScale factor + NvU32 scale_decrement_interval; // number of group times between decrementing the rcXformScale factor + NvU32 first_line_bpg_offset; // number of additional bits allocated for each group on the first line in a slice + NvU32 nfl_bpg_offset; // number of bits de-allocated for each group after the first line in a slice + NvU32 slice_bpg_offset; // number of bits de-allocated for each group to enforce the slice constrain + NvU32 initial_offset; // initial value for rcXformOffset + NvU32 final_offset; // maximum end-of-slice value for rcXformOffset + NvU32 flatness_min_qp; // minimum flatness QP + NvU32 flatness_max_qp; // maximum flatness QP + //rc_parameter_set + NvU32 rc_model_size; // number of bits within the "RC model" + NvU32 rc_edge_factor; // edge detection factor + NvU32 rc_quant_incr_limit0; // QP threshold for short-term RC + NvU32 rc_quant_incr_limit1; // QP threshold for short-term RC + NvU32 rc_tgt_offset_hi; // upper end of the target bpg range for short-term RC + NvU32 rc_tgt_offset_lo; // lower end of the target bpg range for short-term RC + NvU32 rc_buf_thresh[NUM_BUF_RANGES-1]; // thresholds in "RC model" + //rc_range_parameters + NvU32 range_min_qp[NUM_BUF_RANGES]; // minimum QP for each of the RC ranges + NvU32 range_max_qp[NUM_BUF_RANGES]; // maximum QP for each of the RC ranges + NvU32 range_bpg_offset[NUM_BUF_RANGES]; // bpg adjustment for each of the RC ranges + //420,422 + NvU32 native_420; // 420 native mode + NvU32 native_422; // 422 native mode + NvU32 second_line_bpg_offset; // 2nd line bpg offset to use, native 420 only + NvU32 nsl_bpg_offset; // non-2nd line bpg offset to use, native 420 only + NvU32 second_line_offset_adj; // adjustment to 2nd line bpg offset, native 420 only + + //additional params not in PPS + NvU32 slice_num; + NvU32 groups_per_line; + NvU32 num_extra_mux_bits; + NvU32 flatness_det_thresh; +} DSC_OUTPUT_PARAMS; + +/* ------------------------ Global Variables ------------------------------- */ + +DSC_CALLBACK callbacks; + +static const NvU8 minqp444_8b[15][37]={ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{ 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0} + ,{ 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0} + ,{ 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0} + ,{ 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0} + ,{ 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0} + ,{ 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1} + ,{14,14,13,13,12,12,12,12,11,11,10,10,10,10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3} +}; + +static const NvU8 maxqp444_8b[15][37]={ + { 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 6, 6, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{ 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{ 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0} + ,{ 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0} + ,{ 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1} + ,{ 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1} + ,{10,10, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1} + ,{11,11,10,10, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1} + ,{12,11,11,10,10,10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1} + ,{12,12,11,11,10,10,10,10,10,10, 9, 9, 9, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1} + ,{12,12,12,11,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1} + ,{12,12,12,12,11,11,11,11,11,10,10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1} + ,{13,13,13,13,12,12,11,11,11,11,10,10,10,10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2} + ,{15,15,14,14,13,13,13,13,12,12,11,11,11,11,10,10,10, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4} +}; + +static const NvU8 minqp444_10b[15][49]={ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 7, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 7, 7, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0} + ,{ 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{ 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0} + ,{ 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0} + ,{10, 9, 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0} + ,{10,10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1} + ,{10,10,10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1} + ,{10,10,10,10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1} + ,{12,12,12,12,12,12,12,12,12,12,11,11,11,11,11,11,11,11,11,11,10,10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1} + ,{18,18,17,17,16,16,16,16,15,15,14,14,14,14,13,13,13,12,12,12,11,11,11,11,10,10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3} +}; + +static const NvU8 maxqp444_10b[15][49]={ + { 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{10,10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{12,11,11,10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0} + ,{12,12,11,11,10,10,10,10,10,10,10,10, 9, 9, 9, 8, 7, 7, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0} + ,{13,12,12,11,11,11,11,11,11,11,11,11,10,10, 9, 8, 8, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{13,12,12,12,11,11,11,11,11,11,11,11,10,10,10, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0} + ,{13,13,12,12,11,11,11,11,11,11,11,11,11,10,10, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1} + ,{14,14,13,13,12,12,12,12,12,12,12,12,12,11,11,10, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1} + ,{15,15,14,14,13,13,13,13,13,13,12,12,12,11,11,10,10, 9, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1} + ,{16,15,15,14,14,14,13,13,13,13,13,13,13,12,12,11,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1} + ,{16,16,15,15,14,14,14,14,14,14,13,13,13,12,12,11,11,10,10,10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2} + ,{16,16,16,15,15,15,14,14,14,14,13,13,13,13,12,12,12,11,11,11,10,10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2} + ,{16,16,16,16,15,15,15,15,15,14,14,13,13,13,12,12,12,11,11,11,10,10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2} + ,{17,17,17,17,16,16,15,15,15,15,14,14,14,14,13,13,12,12,12,12,11,11,10,10,10,10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2} + ,{19,19,18,18,17,17,17,17,16,16,15,15,15,15,14,14,14,13,13,13,12,12,12,12,11,11,10,10,10,10,10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4} +}; + +static const NvU8 minqp444_12b[15][61]={ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{11,10,10, 9, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{11,11,10,10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{13,12,12,11,11,11,11,11,11,11,11,11,10,10, 9, 9, 9, 8, 7, 7, 7, 7, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{13,12,12,12,11,11,11,11,11,11,11,11,11,11,11,10, 9, 9, 8, 8, 8, 8, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0} + ,{13,13,12,12,11,11,11,11,11,11,11,11,11,11,11,10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{13,13,12,12,11,11,11,11,11,11,11,11,11,11,11,11,10,10,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0} + ,{13,13,12,12,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,10,10,10, 9, 9, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0} + ,{14,13,13,12,12,12,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,10,10,10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 0} + ,{14,14,13,13,13,13,13,13,13,13,13,13,13,13,13,12,12,12,12,12,11,11,11,11,11,11,10,10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1} + ,{14,14,14,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,12,12,11,11,11,11,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1} + ,{14,14,14,14,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,12,12,12,12,12,12,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1} + ,{17,17,17,17,16,16,15,15,15,15,15,15,15,15,15,15,15,15,15,15,14,14,13,13,13,13,12,12,11,11,11,11,10,10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1} + ,{22,22,21,21,20,20,20,20,19,19,18,18,18,18,17,17,17,16,16,16,15,15,15,15,14,14,13,13,13,13,13,12,12,11,11,11,11,11,10,10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3} +}; + +static const NvU8 maxqp444_12b[15][61]={ + {12,12,12,12,12,12,11,11,11,10, 9, 9, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{14,14,13,13,12,12,12,12,12,12,11,11, 9, 9, 9, 8, 8, 7, 7, 7, 7, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{16,15,15,14,13,13,13,13,13,13,13,13,12,12,12,11,10,10, 9, 9, 9, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{16,16,15,15,14,14,14,14,14,14,14,14,13,13,13,12,11,11,10,10,10, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{17,16,16,15,15,15,15,15,15,15,15,15,14,14,13,12,12,11,10,10,10,10, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0} + ,{17,16,16,16,15,15,15,15,15,15,15,15,14,14,14,13,12,12,11,11,11,11, 9, 9, 9, 9, 8, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0} + ,{17,17,16,16,15,15,15,15,15,15,15,15,15,14,14,13,12,12,11,11,11,11,11,10,10,10, 9, 9, 9, 8, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0} + ,{18,18,17,17,16,16,16,16,16,16,16,16,16,15,15,14,13,13,12,12,12,12,11,11,11,11,10,10,10, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1} + ,{19,19,18,18,17,17,17,17,17,17,16,16,16,15,15,14,14,13,13,13,13,13,12,12,12,12,11,11,10, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1} + ,{20,19,19,18,18,18,17,17,17,17,17,17,17,16,16,15,14,14,13,13,13,13,12,12,12,12,11,11,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 1} + ,{20,20,19,19,18,18,18,18,18,18,17,17,17,16,16,15,15,14,14,14,13,13,12,12,12,12,11,11,10,10,10,10,10,10,10,10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2} + ,{20,20,20,19,19,19,18,18,18,18,17,17,17,17,16,16,16,15,15,15,14,14,13,13,13,13,12,12,11,11,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2} + ,{20,20,20,20,19,19,19,19,19,18,18,17,17,17,16,16,16,15,15,15,14,14,13,13,13,13,12,12,11,11,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2} + ,{21,21,21,21,20,20,19,19,19,19,18,18,18,18,17,17,16,16,16,16,15,15,14,14,14,14,13,13,12,12,12,12,11,11,10,10,10,10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2} + ,{23,23,22,22,21,21,21,21,20,20,19,19,19,19,18,18,18,17,17,17,16,16,16,16,15,15,14,14,14,14,14,13,13,12,12,12,12,12,11,11,10,10,10,10,10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4} +}; + +static const NvU8 minqp422_8b[15][21] = { + {0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{3 ,3 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0} + ,{3 ,3 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0} + ,{3 ,3 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0} + ,{3 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,0 ,0} + ,{3 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1} + ,{3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1} + ,{5 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1} + ,{5 ,5 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,1 ,1} + ,{5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1} + ,{8 ,8 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,2} + ,{12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3} +}; + +static const NvU8 maxqp422_8b[15][21] = { + {4 ,4 ,3 ,3 ,2 ,2 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{4 ,4 ,4 ,4 ,4 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{5 ,5 ,5 ,5 ,5 ,4 ,3 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0} + ,{6 ,6 ,6 ,6 ,6 ,5 ,4 ,3 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0} + ,{7 ,7 ,7 ,7 ,7 ,6 ,5 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1} + ,{7 ,7 ,7 ,7 ,7 ,6 ,5 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1} + ,{7 ,7 ,7 ,7 ,7 ,6 ,5 ,4 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1} + ,{8 ,8 ,8 ,8 ,8 ,7 ,6 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,1 ,1} + ,{9 ,9 ,9 ,8 ,8 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2} + ,{10,10,9 ,9 ,9 ,8 ,7 ,6 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2} + ,{10,10,10,9 ,9 ,8 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,2} + ,{11,11,10,10,9 ,9 ,8 ,7 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2} + ,{11,11,11,10,9 ,9 ,8 ,8 ,7 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2} + ,{12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,3} + ,{13,13,12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4} +}; + +static const NvU8 minqp422_10b[15][29] = { + {0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{4 ,4 ,4 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{5 ,5 ,5 ,4 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{6 ,6 ,6 ,6 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{6 ,6 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0} + ,{6 ,6 ,6 ,6 ,6 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,3 ,2 ,1 ,1 ,0 ,0 ,0 ,0 ,0} + ,{6 ,6 ,6 ,6 ,6 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0} + ,{7 ,7 ,7 ,7 ,7 ,6 ,6 ,6 ,6 ,6 ,6 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1} + ,{7 ,7 ,7 ,7 ,7 ,6 ,6 ,6 ,6 ,6 ,6 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1} + ,{8 ,8 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,7 ,6 ,6 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,1} + ,{9 ,9 ,9 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,5 ,5 ,5 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1} + ,{9 ,9 ,9 ,9 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,6 ,6 ,6 ,5 ,5 ,4 ,3 ,3 ,2 ,2 ,1 ,1 ,1} + ,{9 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,9 ,8 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,3 ,3 ,3 ,2 ,2 ,1 ,1} + ,{12,12,11,11,11,11,11,11,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,1} + ,{16,16,15,15,14,14,13,13,12,12,11,11,10,10,9 ,9 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,3} +}; + +static const NvU8 maxqp422_10b[15][29] = { + {8 ,8 ,7 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{8 ,8 ,8 ,6 ,6 ,5 ,4 ,4 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{9 ,9 ,9 ,8 ,7 ,6 ,5 ,4 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0} + ,{10,10,10,10,9 ,8 ,7 ,6 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1} + ,{11,11,11,11,10,9 ,8 ,6 ,5 ,5 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1} + ,{11,11,11,11,11,10,9 ,8 ,7 ,6 ,6 ,5 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,3 ,2 ,2 ,1 ,1 ,1 ,1 ,1} + ,{11,11,11,11,11,10,9 ,8 ,7 ,7 ,7 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1} + ,{12,12,12,12,12,11,10,9 ,8 ,8 ,8 ,7 ,7 ,7 ,7 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,2 ,2 ,2} + ,{13,13,13,12,12,11,10,10,9 ,9 ,9 ,8 ,8 ,7 ,7 ,7 ,6 ,5 ,5 ,5 ,5 ,4 ,3 ,3 ,2 ,2 ,2 ,2 ,2} + ,{14,14,13,13,13,12,11,10,9 ,9 ,9 ,9 ,8 ,8 ,8 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,2 ,2} + ,{14,14,14,13,13,12,11,11,10,10,10,9 ,9 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,6 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2} + ,{15,15,14,14,13,13,12,11,11,11,10,10,9 ,9 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,2} + ,{15,15,15,14,13,13,12,12,11,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2} + ,{16,16,15,15,14,14,13,13,12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,2} + ,{17,17,16,16,15,15,14,14,13,13,12,12,11,11,10,10,9 ,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,4} +}; + +static const NvU8 minqp422_12b[15][37] = { + {0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{4 ,4 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{9 ,9 ,9 ,8 ,7 ,6 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,2 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{10,10,10,10,8 ,8 ,8 ,7 ,6 ,6 ,6 ,6 ,6 ,5 ,4 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{11,11,11,11,10,9 ,9 ,8 ,7 ,7 ,7 ,7 ,6 ,6 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{11,11,11,11,11,10,10,9 ,9 ,8 ,8 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,3 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0} + ,{11,11,11,11,11,10,10,10,9 ,9 ,9 ,9 ,8 ,7 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,0 ,0 ,0 ,0} + ,{11,11,11,11,11,11,10,10,10,10,10,9 ,8 ,8 ,8 ,7 ,6 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,0 ,0 ,0} + ,{11,11,11,11,11,11,11,11,11,11,11,10,9 ,8 ,8 ,8 ,7 ,6 ,6 ,6 ,6 ,5 ,4 ,4 ,3 ,3 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,1 ,0 ,0 ,0} + ,{11,11,11,11,11,11,11,11,11,11,11,11,9 ,9 ,9 ,8 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,3 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,0 ,0} + ,{13,13,13,13,13,12,12,12,12,12,12,11,11,10,10,10,9 ,9 ,8 ,8 ,8 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1 ,1} + ,{13,13,13,13,13,13,13,13,13,13,12,12,11,11,10,10,10,9 ,9 ,8 ,8 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1} + ,{13,13,13,13,13,13,13,13,13,13,13,12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,6 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1} + ,{16,16,15,15,15,15,15,15,15,15,14,14,13,13,12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2} + ,{20,20,19,19,18,18,17,17,16,16,15,15,14,14,13,13,12,12,12,11,11,10,10,9 ,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,4 ,4} +}; + +static const NvU8 maxqp422_12b[15][37] = { + {12,12,11,9 ,6 ,6 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{12,12,12,10,9 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{13,13,13,12,10,9 ,8 ,7 ,6 ,6 ,6 ,6 ,6 ,6 ,5 ,5 ,4 ,3 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0} + ,{14,14,14,14,12,11,10,9 ,8 ,8 ,8 ,8 ,8 ,7 ,6 ,5 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,2 ,2 ,2 ,1 ,1 ,0 ,0 ,0 ,0 ,0 ,0} + ,{15,15,15,15,14,13,12,10,9 ,9 ,9 ,9 ,8 ,8 ,7 ,6 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,0 ,0 ,0} + ,{15,15,15,15,15,14,13,12,11,10,10,9 ,8 ,8 ,7 ,7 ,7 ,6 ,6 ,6 ,6 ,5 ,4 ,4 ,3 ,3 ,3 ,2 ,2 ,2 ,2 ,1 ,1 ,1 ,1 ,1 ,1} + ,{15,15,15,15,15,14,13,12,11,11,11,11,10,9 ,9 ,9 ,8 ,8 ,7 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1 ,1 ,1} + ,{16,16,16,16,16,15,14,13,12,12,12,11,10,10,10,9 ,8 ,8 ,8 ,7 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,5 ,5 ,3 ,3 ,3 ,2 ,2 ,1 ,1 ,1} + ,{17,17,17,16,16,15,14,14,13,13,13,12,11,10,10,10,9 ,8 ,8 ,8 ,8 ,7 ,6 ,6 ,5 ,5 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,1 ,1 ,1} + ,{18,18,17,17,17,16,15,14,13,13,13,13,11,11,11,10,9 ,9 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,1 ,1} + ,{18,18,18,17,17,16,15,15,14,14,14,13,13,12,12,12,11,11,10,10,10,8 ,8 ,7 ,7 ,7 ,6 ,6 ,6 ,5 ,4 ,4 ,3 ,3 ,2 ,2 ,2} + ,{19,19,18,18,17,17,16,15,15,15,14,14,13,13,12,12,12,11,11,10,10,9 ,8 ,8 ,7 ,7 ,6 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2} + ,{19,19,19,18,17,17,16,16,15,15,15,14,14,13,13,12,12,11,11,10,10,9 ,8 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,4 ,4 ,3 ,3 ,2 ,2} + ,{20,20,19,19,18,18,17,17,16,16,15,15,14,14,13,13,12,12,11,11,10,10,9 ,9 ,8 ,8 ,7 ,7 ,6 ,6 ,5 ,5 ,5 ,4 ,4 ,3 ,3} + ,{21,21,20,20,19,19,18,18,17,17,16,16,15,15,14,14,13,13,13,12,12,11,11,10,10,10,9 ,9 ,8 ,8 ,7 ,7 ,7 ,6 ,6 ,5 ,5} +}; + +static const NvU32 rcBufThresh[] = { 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616, 7744, 7872, 8000, 8064 }; + +/* ------------------------ Static Variables ------------------------------- */ +/* ------------------------ Private Functions Prototype--------------------- */ + +static void * DSC_Malloc(NvLength size); +static void DSC_Free(void * ptr); +static NvU32 +DSC_GetHigherSliceCount +( + NvU32 common_slice_count_mask, + NvU32 desired_slice_num, + NvU32 dual_mode, + NvU32 *new_slice_num +); +static NvU32 DSC_AlignDownForBppPrecision(NvU32 bitsPerPixelX16, NvU32 bitsPerPixelPrecision); + +static NvU32 +DSC_GetPeakThroughputMps(NvU32 peak_throughput); + +static NvU32 +DSC_SliceCountMaskforSliceNum (NvU32 slice_num); + +/* ------------------------ Private Functions ------------------------------ */ + +/* + * @brief Calculate Bits Per Pixel aligned down as per bitsPerPixelPrecision supported + * by Sink + * + * @param[in] bitsPerPixelX16 Bits Per Pixel + * @param[in] bitsPerPixelPrecision Bits Per Pixel Precision Supported by Panel + * + * @returns Aligned down Bits Per Pixel value + */ +static NvU32 +DSC_AlignDownForBppPrecision +( + NvU32 bitsPerPixelX16, + NvU32 bitsPerPixelPrecision +) +{ + NvU32 allignDownForBppPrecision; + + switch (bitsPerPixelPrecision) + { + case DSC_BITS_PER_PIXEL_PRECISION_1_16: + allignDownForBppPrecision = 1; + break; + + case DSC_BITS_PER_PIXEL_PRECISION_1_8: + allignDownForBppPrecision = 2; + break; + + case DSC_BITS_PER_PIXEL_PRECISION_1_4: + allignDownForBppPrecision = 4; + break; + + case DSC_BITS_PER_PIXEL_PRECISION_1_2: + allignDownForBppPrecision = 8; + break; + + case DSC_BITS_PER_PIXEL_PRECISION_1: + allignDownForBppPrecision = 16; + break; + + default: + allignDownForBppPrecision = 16; + } + + return (bitsPerPixelX16 & ~(allignDownForBppPrecision - 1)); +} + +/* + * @brief Calculate chunk size, num_extra_mux_bits + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +DSC_PpsCalcExtraBits +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 numSsps = out->native_422 ? 4 : 3; + NvU32 sliceBits; + NvU32 extra_bits; + NvU32 bitsPerComponent = out->bits_per_component; + NvU32 muxWordSize; + + muxWordSize = (bitsPerComponent >= 12) ? 64 : 48; + if (out->convert_rgb) + { + extra_bits = (numSsps * (muxWordSize + (4 * bitsPerComponent + 4) - 2)); + } + else if (!out->native_422) // YCbCr + { + extra_bits = (numSsps * muxWordSize + (4 * bitsPerComponent + 4) + 2 * (4 * bitsPerComponent) - 2); + } + else + { + extra_bits = (numSsps * muxWordSize + (4 * bitsPerComponent + 4) + 3 * (4 * bitsPerComponent) - 2); + } + + sliceBits = 8 * out->chunk_size * out->slice_height; + //while ((extra_bits>0) && ((sliceBits - extra_bits) % muxWordSize)) + // extra_bits--; + sliceBits = (sliceBits - extra_bits) % muxWordSize; + if (sliceBits != 0) + { + extra_bits -= MIN(extra_bits, muxWordSize - sliceBits); + } + + out->num_extra_mux_bits = extra_bits; + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate RC initial value. + * Require: groups_per_line in Dsc_PpsCalcWidth() + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +DSC_PpsCalcRcInitValue +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 bitsPerPixel = out->bits_per_pixel; + NvU32 xmit_delay; + out->rc_model_size = 8192; + + if (out->native_422) + { + // =IF(CompressBpp >= 8, 2048, IF(CompressBpp <= 7, 5632, 5632 - ROUND((CompressBpp - 7) * (3584), 0))) + if (bitsPerPixel >= 16 * BPP_UNIT) + out->initial_offset = 2048; + else if (bitsPerPixel >= 14 * BPP_UNIT) + out->initial_offset = 5632 - ((bitsPerPixel - 14 * BPP_UNIT) * 1792 + BPP_UNIT / 2) / BPP_UNIT; + else + out->initial_offset = 5632; + } + else + { + if (bitsPerPixel >= 12 * BPP_UNIT) + out->initial_offset = 2048; + else if (bitsPerPixel >= 10 * BPP_UNIT) + out->initial_offset = 5632 - ((bitsPerPixel - 10 * BPP_UNIT) * 1792 + BPP_UNIT / 2) / BPP_UNIT; + else if (bitsPerPixel >= 8 * BPP_UNIT) + out->initial_offset = 6144 - ((bitsPerPixel - 8 * BPP_UNIT) * 256 + BPP_UNIT / 2) / BPP_UNIT; + else + out->initial_offset = 6144; + } + RANGE_CHECK("initial_offset", out->initial_offset, 0, out->rc_model_size); + + out->initial_scale_value = 8 * out->rc_model_size / (out->rc_model_size - out->initial_offset); + if (out->groups_per_line < out->initial_scale_value - 8) + { + out->initial_scale_value = out->groups_per_line + 8; + } + RANGE_CHECK("initial_scale_value", out->initial_scale_value, 0, 63); + + xmit_delay = (4096*BPP_UNIT + bitsPerPixel/2) / bitsPerPixel; + + if (out->native_420 || out->native_422) + { + NvU32 slicew = (out->native_420 || out->native_422) ? out->slice_width / 2 : out->slice_width; + NvU32 padding_pixels = ((slicew % 3) ? (3 - (slicew % 3)) : 0) * (xmit_delay / slicew); + if (3 * bitsPerPixel >= ((xmit_delay + 2) / 3) * (out->native_422 ? 4 : 3) * BPP_UNIT && + (((xmit_delay + padding_pixels) % 3) == 1)) + { + xmit_delay++; + } + } + out->initial_xmit_delay = xmit_delay; + RANGE_CHECK("initial_xmit_delay", out->initial_xmit_delay, 0, 1023); + + return NVT_STATUS_SUCCESS; +} + +static NvU32 DSC_PpsCalcComputeOffset(DSC_OUTPUT_PARAMS *out, NvU32 grpcnt) +{ + NvU32 offset = 0; + NvU32 groupsPerLine = out->groups_per_line; + NvU32 grpcnt_id = (out->initial_xmit_delay + PIXELS_PER_GROUP - 1) / PIXELS_PER_GROUP; + + if(grpcnt <= grpcnt_id) + offset = (grpcnt * PIXELS_PER_GROUP * out->bits_per_pixel + BPP_UNIT - 1) / BPP_UNIT; + else + offset = (grpcnt_id * PIXELS_PER_GROUP * out->bits_per_pixel + BPP_UNIT - 1) / BPP_UNIT - (((grpcnt-grpcnt_id) * out->slice_bpg_offset)>>OFFSET_FRACTIONAL_BITS); + + if(grpcnt <= groupsPerLine) + offset += grpcnt * out->first_line_bpg_offset; + else + offset += groupsPerLine * out->first_line_bpg_offset - (((grpcnt - groupsPerLine) * out->nfl_bpg_offset)>>OFFSET_FRACTIONAL_BITS); + + if(out->native_420) + { + if(grpcnt <= groupsPerLine) + offset -= (grpcnt * out->nsl_bpg_offset) >> OFFSET_FRACTIONAL_BITS; + else if(grpcnt <= 2*groupsPerLine) + offset += (grpcnt - groupsPerLine) * out->second_line_bpg_offset - ((groupsPerLine * out->nsl_bpg_offset)>>OFFSET_FRACTIONAL_BITS); + else + offset += (grpcnt - groupsPerLine) * out->second_line_bpg_offset - (((grpcnt - groupsPerLine) * out->nsl_bpg_offset)>>OFFSET_FRACTIONAL_BITS); + } + return(offset); +} + +/* + * @brief Calculate bpg value except slice_bpg_offset + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static +NvU32 DSC_PpsCalcBpg +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 uncompressedBpgRate; + NvU32 ub_BpgOfs; + NvU32 firstLineBpgOfs; + NvU32 secondLineBpgOfs; + NvU32 bitsPerPixel; + NvU32 rbsMin; + NvU32 hrdDelay; + NvU32 groups_total; + + if (out->native_422) + uncompressedBpgRate = PIXELS_PER_GROUP * out->bits_per_component * 4; + else + uncompressedBpgRate = (3 * out->bits_per_component + (out->convert_rgb ? 2 : 0)) * PIXELS_PER_GROUP; + + ub_BpgOfs = (uncompressedBpgRate*BPP_UNIT - PIXELS_PER_GROUP * out->bits_per_pixel) / BPP_UNIT; + + if (out->slice_height >= 8) + firstLineBpgOfs = 12 + MIN(34, out->slice_height - 8) * 9 / 100; + else + firstLineBpgOfs = 2 * (out->slice_height - 1); + + firstLineBpgOfs = CLAMP(firstLineBpgOfs, 0, ub_BpgOfs); + out->first_line_bpg_offset = firstLineBpgOfs; + RANGE_CHECK("first_line_bpg_offset", out->first_line_bpg_offset, 0, 31); + + if (out->slice_height > 1) + out->nfl_bpg_offset = ((out->first_line_bpg_offset << OFFSET_FRACTIONAL_BITS) + out->slice_height - 2) / (out->slice_height - 1); + else + out->nfl_bpg_offset = 0; + + RANGE_CHECK("nfl_bpg_offset", out->nfl_bpg_offset, 0, 65535); + + secondLineBpgOfs = out->native_420 ? 12 : 0; + secondLineBpgOfs = CLAMP(secondLineBpgOfs, 0, ub_BpgOfs); + out->second_line_bpg_offset = secondLineBpgOfs; + RANGE_CHECK("second_line_bpg_offset", out->second_line_bpg_offset, 0, 31); + + if (out->slice_height > 2) + out->nsl_bpg_offset = ((out->second_line_bpg_offset << OFFSET_FRACTIONAL_BITS) + out->slice_height - 2) / (out->slice_height - 1); + else + out->nsl_bpg_offset = 0; + RANGE_CHECK("nsl_bpg_offset", out->nsl_bpg_offset, 0, 65535); + + out->second_line_offset_adj = out->native_420 ? 512 : 0; + + bitsPerPixel = out->bits_per_pixel; + groups_total = out->groups_per_line * out->slice_height; + out->slice_bpg_offset = (((out->rc_model_size - out->initial_offset + out->num_extra_mux_bits) << OFFSET_FRACTIONAL_BITS) + + groups_total - 1) / groups_total; + RANGE_CHECK("slice_bpg_offset", out->slice_bpg_offset, 0, 65535); + + if((PIXELS_PER_GROUP * bitsPerPixel << OFFSET_FRACTIONAL_BITS) - (out->slice_bpg_offset + out->nfl_bpg_offset) * BPP_UNIT + < (1+5*PIXELS_PER_GROUP)*BPP_UNIT <dsc_version_major > 1) || (out->dsc_version_major == 1 && out->dsc_version_minor >= 2)) && + (out->native_420 || out->native_422)) + { + // OPTIMIZED computation of rbsMin: + // Compute max by sampling offset at points of inflection + // *MODEL NOTE* MN_RBS_MIN + NvU32 maxOffset; + maxOffset = DSC_PpsCalcComputeOffset(out, (out->initial_xmit_delay+PIXELS_PER_GROUP-1)/PIXELS_PER_GROUP ); // After initial delay + maxOffset = MAX(maxOffset, DSC_PpsCalcComputeOffset(out, out->groups_per_line)); // After first line + maxOffset = MAX(maxOffset, DSC_PpsCalcComputeOffset(out, 2*out->groups_per_line)); + rbsMin = out->rc_model_size - out->initial_offset + maxOffset; + } + else + { // DSC 1.1 method + rbsMin = out->rc_model_size - out->initial_offset + + (out->initial_xmit_delay * bitsPerPixel + BPP_UNIT - 1) / BPP_UNIT + + out->groups_per_line * out->first_line_bpg_offset; + } + hrdDelay = (rbsMin * BPP_UNIT + bitsPerPixel - 1) / bitsPerPixel; + out->initial_dec_delay = hrdDelay - out->initial_xmit_delay; + RANGE_CHECK("initial_dec_delay", out->initial_dec_delay, 0, 65535); + + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate final_offset and scale_increment_interval, + * scale_decrement_interval + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_PpsCalcScaleInterval +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 final_scale; + + out->final_offset = (out->rc_model_size - (out->initial_xmit_delay * out->bits_per_pixel + 8) / + BPP_UNIT + out->num_extra_mux_bits); + RANGE_CHECK("final_offset", out->final_offset, 0, out->rc_model_size-1); //try increase initial_xmit_delay + + final_scale = 8 * out->rc_model_size / (out->rc_model_size - out->final_offset); + RANGE_CHECK("final_scale", final_scale, 0, 63); //try increase initial_xmit_delay + + // BEGIN scale_increment_NvU32erval fix + if(final_scale > 9) + { + // + // Note: the following calculation assumes that the rcXformOffset crosses 0 at some point. If the zero-crossing + // doesn't occur in a configuration, we recommend to reconfigure the rc_model_size and thresholds to be smaller + // for that configuration. + // + out->scale_increment_interval = (out->final_offset << OFFSET_FRACTIONAL_BITS) / + ((final_scale - 9) * (out->nfl_bpg_offset + + out->slice_bpg_offset + out->nsl_bpg_offset)); + RANGE_CHECK("scale_increment_interval", out->scale_increment_interval, 0, 65535); + } + else + { + out->scale_increment_interval = 0; + } + + // END scale_increment_interval fix + if (out->initial_scale_value > 8) + out->scale_decrement_interval = out->groups_per_line / (out->initial_scale_value - 8); + else + out->scale_decrement_interval = 4095; + RANGE_CHECK("scale_decrement_interval", out->scale_decrement_interval, 1, 4095); + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate RC parameters + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_PpsCalcRcParam +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 i, idx; + NvU32 bitsPerPixel = out->bits_per_pixel; + NvU32 bpcm8 = out->bits_per_component - 8; + NvU32 yuv_modifier = out->convert_rgb == 0 && out->dsc_version_minor == 1; + NvU32 qp_bpc_modifier = bpcm8 * 2 - yuv_modifier; + const int ofs_und6[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; + const int ofs_und7[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + const int ofs_und10[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; + + out->flatness_min_qp = 3 + qp_bpc_modifier; + out->flatness_max_qp = 12 + qp_bpc_modifier; + out->flatness_det_thresh = 2 << bpcm8; + out->rc_edge_factor = 6; + out->rc_quant_incr_limit0 = 11 + qp_bpc_modifier; + out->rc_quant_incr_limit1 = 11 + qp_bpc_modifier; + out->rc_tgt_offset_hi = 3; + out->rc_tgt_offset_lo = 3; + + for (i = 0; i < NUM_BUF_RANGES - 1; i++) + out->rc_buf_thresh[i] = rcBufThresh[i] & (0xFF << 6); + + if (out->native_422) + { + idx = bitsPerPixel/BPP_UNIT - 12; + if (bpcm8 == 0) + { + for (i = 0; i < NUM_BUF_RANGES; ++i) + { + out->range_min_qp[i] = minqp422_8b[i][idx]; + out->range_max_qp[i] = maxqp422_8b[i][idx]; + } + } + else if (bpcm8 == 2) + { + for (i=0; i < NUM_BUF_RANGES; i++) + { + out->range_min_qp[i] = minqp422_10b[i][idx]; + out->range_max_qp[i] = maxqp422_10b[i][idx]; + } + } + else + { + for (i=0; irange_min_qp[i] = minqp422_12b[i][idx]; + out->range_max_qp[i] = maxqp422_12b[i][idx]; + } + } + + for (i = 0; i < NUM_BUF_RANGES; ++i) + { + if (bitsPerPixel <= 12*BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und6[i]; + } + else if (bitsPerPixel <= 14*BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und6[i] + ((bitsPerPixel - 12*BPP_UNIT) * + (ofs_und7[i] - ofs_und6[i]) + BPP_UNIT) / (2*BPP_UNIT); + } + else if (bitsPerPixel <= 16*BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und7[i]; + } + else if (bitsPerPixel <= 20*BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und7[i] + ((bitsPerPixel - 16*BPP_UNIT) * + (ofs_und10[i] - ofs_und7[i]) + 2*BPP_UNIT) / (4*BPP_UNIT); + } + else + { + out->range_bpg_offset[i] = ofs_und10[i]; + } + } + } + else + { + idx = (2 * (bitsPerPixel - 6 * BPP_UNIT) ) / BPP_UNIT; + + if (bpcm8 == 0) + { + for (i = 0; i < NUM_BUF_RANGES; i++) + { + const NvU32 min = minqp444_8b[i][idx]; + const NvU32 max = maxqp444_8b[i][idx]; + + out->range_min_qp[i] = MAX(0, min - yuv_modifier); + out->range_max_qp[i] = MAX(0, max - yuv_modifier); + } + } + else if (bpcm8 == 2) + { + for (i = 0; i < NUM_BUF_RANGES; i++) + { + const NvU32 min = minqp444_10b[i][idx]; + const NvU32 max = maxqp444_10b[i][idx]; + + out->range_min_qp[i] = MAX(0, min - yuv_modifier); + out->range_max_qp[i] = MAX(0, max - yuv_modifier); + } + } + else + { + for (i = 0; i < NUM_BUF_RANGES; i++) + { + const NvU32 min = minqp444_12b[i][idx]; + const NvU32 max = maxqp444_12b[i][idx]; + + out->range_min_qp[i] = MAX(0, min - yuv_modifier); + out->range_max_qp[i] = MAX(0, max - yuv_modifier); + } + } + + for (i = 0; i < NUM_BUF_RANGES; ++i) + { + //if (out->native_420) + //{ + // NvU32 ofs_und4[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; + // NvU32 ofs_und5[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + // NvU32 ofs_und6[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + // NvU32 ofs_und8[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; + // out->range_min_qp[i] = minqp_420[bpcm8 / 2][i][idx]; + // out->range_max_qp[i] = maxqp_420[bpcm8 / 2][i][idx]; + // if (bitsPerPixel <= 8*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und4[i]; + // else if (bitsPerPixel <= 10*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und4[i] + (NvU32)(0.5 * (bitsPerPixel - 8.0) * (ofs_und5[i] - ofs_und4[i]) + 0.5); + // else if (bitsPerPixel <= 12*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und5[i] + (NvU32)(0.5 * (bitsPerPixel - 10.0) * (ofs_und6[i] - ofs_und5[i]) + 0.5); + // else if (bitsPerPixel <= 16*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und6[i] + (NvU32)(0.25 * (bitsPerPixel - 12.0) * (ofs_und8[i] - ofs_und6[i]) + 0.5); + // else + // out->range_bpg_offset[i] = ofs_und8[i]; + //} + //else if (out->native_422) + //{ + // NvU32 ofs_und6[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; + // NvU32 ofs_und7[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + // NvU32 ofs_und10[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; + // out->range_min_qp[i] = minqp_422[bpcm8 / 2][i][idx]; + // out->range_max_qp[i] = maxqp_422[bpcm8 / 2][i][idx]; + // if (bitsPerPixel <= 12*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und6[i]; + // else if(bitsPerPixel <= 14*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und6[i] + (NvU32)((bitsPerPixel - 12.0) * (ofs_und7[i] - ofs_und6[i]) / 2.0 + 0.5); + // else if(bitsPerPixel <= 16*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und7[i]; + // else if(bitsPerPixel <= 20*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und7[i] + (NvU32)((bitsPerPixel - 16.0) * (ofs_und10[i] - ofs_und7[i]) / 4.0 + 0.5); + // else + // out->range_bpg_offset[i] = ofs_und10[i]; + //} + //else + { + const NvU32 ofs_und6[] = { 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; + const NvU32 ofs_und8[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + const NvU32 ofs_und12[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + const NvU32 ofs_und15[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; + + if (bitsPerPixel <= 6 * BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und6[i]; + } + else if (bitsPerPixel <= 8 * BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und6[i] + ((bitsPerPixel - 6 * BPP_UNIT) * + (ofs_und8[i] - ofs_und6[i]) + BPP_UNIT) / (2 * BPP_UNIT); + } + else if (bitsPerPixel <= 12 * BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und8[i]; + } + else if (bitsPerPixel <= 15 * BPP_UNIT) + { + out->range_bpg_offset[i] = ofs_und12[i] + ((bitsPerPixel - 12 * BPP_UNIT) * + (ofs_und15[i] - ofs_und12[i]) + 3 * BPP_UNIT / 2) / (3 * BPP_UNIT); + } + else + { + out->range_bpg_offset[i] = ofs_und15[i]; + } + } + } + } + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Initialize with basic PPS values based on passed down input params + * + * @param[in] in DSC input parameter + * @param[out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_PpsCalcBase +( + const DSC_INPUT_PARAMS *in, + DSC_OUTPUT_PARAMS *out +) +{ + out->dsc_version_major = 1; + ENUM2_CHECK("dsc_version_minor", in->dsc_version_minor, 1, 2); + out->dsc_version_minor = in->dsc_version_minor == 1 ? 1 : 2; + out->pps_identifier = 0; + ENUM3_CHECK("bits_per_component", in->bits_per_component, 8, 10, 12); + out->bits_per_component = in->bits_per_component; + RANGE_CHECK("bits_per_pixelx16", in->bits_per_pixel, 8 * BPP_UNIT, (out->bits_per_component * 3) * BPP_UNIT); + out->bits_per_pixel = in->bits_per_pixel; + RANGE_CHECK("linebuf_depth", in->linebuf_depth, DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MIN, DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MAX); + out->linebuf_depth = in->linebuf_depth; + ENUM2_CHECK("block_pred_enable", in->block_pred_enable, 0, 1); + out->block_pred_enable = in->block_pred_enable ? 1 : 0; + ENUM2_CHECK("convert_rgb", in->convert_rgb, 0, 1); + out->convert_rgb = in->convert_rgb ? 1 : 0; + RANGE_CHECK("pic_height", in->pic_height, 8, 8192); + out->pic_height = in->pic_height; + + if (in->dual_mode) + { + RANGE_CHECK("pic_width", in->pic_width, 64, 8192); + } + else + { + RANGE_CHECK("pic_width", in->pic_width, 32, 5120); + } + out->pic_width = in->pic_width; + + out->simple_422 = in->simple_422; + out->vbr_enable = 0; + out->native_420 = in->native_420; + out->native_422 = in->native_422; + out->slice_num = in->slice_num; + out->slice_width= in->slice_width; + out->slice_height= in->slice_height; + + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Generate 32bit data array from DSC_OUTPUT_PARAMS. + * + * @param[in] in DSC input parameter + * @param[out] out DSC output parameter + * NvU32[32] to return the pps data. + * The data can be send to SetDscPpsData* methods directly. + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static void +DSC_PpsConstruct +( + const DSC_OUTPUT_PARAMS *in, + NvU32 data[DSC_MAX_PPS_SIZE_DWORD] +) +{ + NvU32 i; + NvU32 pps[96]; + + pps[0] = (in->dsc_version_major << 4) | (in->dsc_version_minor & 0xF); + pps[1] = in->pps_identifier; + pps[2] = 0; + pps[3] = (in->bits_per_component << 4) | (in->linebuf_depth & 0xF); + pps[4] = (in->block_pred_enable << 5) | (in->convert_rgb << 4) | + (in->simple_422 << 3) | (in->vbr_enable << 2) | + MSB(in->bits_per_pixel & 0x3FF); + pps[5] = LSB(in->bits_per_pixel); + pps[6] = MSB(in->pic_height); + pps[7] = LSB(in->pic_height); + pps[8] = MSB(in->pic_width); + pps[9] = LSB(in->pic_width); + pps[10] = MSB(in->slice_height); + pps[11] = LSB(in->slice_height); + pps[12] = MSB(in->slice_width); + pps[13] = LSB(in->slice_width); + pps[14] = MSB(in->chunk_size); + pps[15] = LSB(in->chunk_size); + pps[16] = MSB(in->initial_xmit_delay & 0x3FF); + pps[17] = LSB(in->initial_xmit_delay); + pps[18] = MSB(in->initial_dec_delay); + pps[19] = LSB(in->initial_dec_delay); + pps[20] = 0; + pps[21] = in->initial_scale_value & 0x3F; + pps[22] = MSB(in->scale_increment_interval); + pps[23] = LSB(in->scale_increment_interval); + pps[24] = MSB(in->scale_decrement_interval & 0xFFF); + pps[25] = LSB(in->scale_decrement_interval); + pps[26] = 0; + pps[27] = in->first_line_bpg_offset & 0x1F; + pps[28] = MSB(in->nfl_bpg_offset); + pps[29] = LSB(in->nfl_bpg_offset); + pps[30] = MSB(in->slice_bpg_offset); + pps[31] = LSB(in->slice_bpg_offset); + pps[32] = MSB(in->initial_offset); + pps[33] = LSB(in->initial_offset); + pps[34] = MSB(in->final_offset); + pps[35] = LSB(in->final_offset); + pps[36] = in->flatness_min_qp & 0x1F; + pps[37] = in->flatness_max_qp & 0x1F; + + pps[38] = MSB(in->rc_model_size); + pps[39] = LSB(in->rc_model_size); + pps[40] = in->rc_edge_factor & 0xF; + pps[41] = in->rc_quant_incr_limit0 & 0x1F; + pps[42] = in->rc_quant_incr_limit1 & 0x1F; + pps[43] = (in->rc_tgt_offset_hi << 4) | (in->rc_tgt_offset_lo & 0xF); + for (i = 0; i < NUM_BUF_RANGES - 1; i++) + pps[44 + i] = in->rc_buf_thresh[i] >> 6; + + for (i = 0; i < NUM_BUF_RANGES; i++) + { + NvU32 x = ((in->range_min_qp[i] & 0x1F) << 11) | + ((in->range_max_qp[i] & 0x1F) << 6) | + ((in->range_bpg_offset[i] & 0x3F)) ; + pps[58 + i * 2] = MSB(x); + pps[59 + i * 2] = LSB(x); + } + + pps[88] = (in->native_420 << 1) | (in->native_422); + pps[89] = in->second_line_bpg_offset & 0x1F; + pps[90] = MSB(in->nsl_bpg_offset); + pps[91] = LSB(in->nsl_bpg_offset); + pps[92] = MSB(in->second_line_offset_adj); + pps[93] = LSB(in->second_line_offset_adj); + pps[94] = 0; + pps[95] = 0; + + for (i = 0; i < 24; i++) + { + data[i] = ((pps[i * 4 + 0] << 0) | + (pps[i * 4 + 1] << 8) | + (pps[i * 4 + 2] << 16) | + (pps[i * 4 + 3] << 24)); + } + + for(; i < 32; i++) + data[i] = 0; +} + +/* + * @brief Generate slice count supported mask with given slice num. + * + * @param[in] slice_num slice num for which mask needs to be generated + * + * @returns out_slice_count_mask if successful + * 0 if not successful + */ +static NvU32 +DSC_SliceCountMaskforSliceNum (NvU32 slice_num) +{ + switch (slice_num) + { + case 1: + return DSC_DECODER_SLICES_PER_SINK_1; + case 2: + return DSC_DECODER_SLICES_PER_SINK_2; + case 4: + return DSC_DECODER_SLICES_PER_SINK_4; + case 6: + return DSC_DECODER_SLICES_PER_SINK_6; + case 8: + return DSC_DECODER_SLICES_PER_SINK_8; + case 10: + return DSC_DECODER_SLICES_PER_SINK_10; + case 12: + return DSC_DECODER_SLICES_PER_SINK_12; + case 16: + return DSC_DECODER_SLICES_PER_SINK_16; + case 20: + return DSC_DECODER_SLICES_PER_SINK_20; + case 24: + return DSC_DECODER_SLICES_PER_SINK_24; + default: + return DSC_DECODER_SLICES_PER_SINK_INVALID; + } +} + +/* + * @brief Convert peak throughput placeholders into numeric values. + * + * @param[in] peak_throughput_mode0 peak throughput sink cap placeholder. + * + * @returns peak_throughput_mps actual throughput in MegaPixels/second. + */ +static NvU32 +DSC_GetPeakThroughputMps(NvU32 peak_throughput) +{ + NvU32 peak_throughput_mps; + switch(peak_throughput) + { + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_340: + peak_throughput_mps = 340; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_400: + peak_throughput_mps = 400; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_450: + peak_throughput_mps = 450; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_500: + peak_throughput_mps = 500; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_550: + peak_throughput_mps = 550; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_600: + peak_throughput_mps = 600; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_650: + peak_throughput_mps = 650; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_700: + peak_throughput_mps = 700; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_750: + peak_throughput_mps = 750; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_800: + peak_throughput_mps = 800; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_850: + peak_throughput_mps = 850; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_900: + peak_throughput_mps = 900; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_950: + peak_throughput_mps = 950; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_1000: + peak_throughput_mps = 1000; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_170: + peak_throughput_mps = 170; + break; + default: + peak_throughput_mps = 0; + } + return peak_throughput_mps; +} + +/* + * @brief Get the next higher valid slice count. + * + * @param[in] common_slice_count_mask Includes slice counts supported by both. + * @param[in] desired_slice_num desired slice count + * @param[in] dual_mode if dual mode or not + * @param[in] new_slice_num new slice count if one was found. + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_GetHigherSliceCount +( + NvU32 common_slice_count_mask, + NvU32 desired_slice_num, + NvU32 dual_mode, + NvU32 *new_slice_num +) +{ + // + // slice num = 6 won't exist in common_slice_count_mask, but + // still keeping it to align mask bits and valid_slice_num index. + // + NvU32 valid_slice_num[6] = {1,2,0,4,6,8}; + NvU32 i = 0; + NvU32 slice_mask = common_slice_count_mask; + NvU32 max_slice_num_index = dual_mode ? 5 : 3; + + while (slice_mask && i <= max_slice_num_index) + { + if (slice_mask & 0x1) + { + if (valid_slice_num[i] > desired_slice_num) + { + *new_slice_num = valid_slice_num[i]; + return NVT_STATUS_SUCCESS; + } + } + slice_mask = slice_mask >> 1; + i++; + } + + return NVT_STATUS_ERR; +} + +/* + * @brief Function validates and calculates, if required, the slice parameters like + * slice_width, slice_num for the DSC mode requested. + * + * If slice width, slice num is not forced, fn calculates them by trying to minimize + * slice num used. + * + * If slice width/slice num is forced, it validates the forced parameter and calculates + * corresponding parameter and makes sure it can be supported. + * + * If both slice num and slice width are forced, it validates both. + * + * @param[in] pixel_clkMHz Pixel clock + * @param[in] dual_mode Specify if Dual Mode is enabled or not + * @param[in] max_slice_num max slice number supported by sink + * @param[in] max_slice_width max slice width supported by sink + * @param[in] slice_count_mask Mask of slice counts supported by sink + * @param[in] peak_throughput Peak throughput supported by DSC sink + * decoder in Mega Pixels Per Second + * @param[out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_PpsCalcSliceParams +( + NvU32 pixel_clkMHz, + NvU32 dual_mode, + NvU32 max_slice_num, + NvU32 max_slice_width, + NvU32 slice_count_mask, + NvU32 peak_throughput, + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 min_slice_num; + NvU32 slicew; + NvU32 peak_throughput_mps; + // + // Bits 0,1,3,4,5 represents slice counts 1,2,4,6,8. + // Bit 2 is reserved and Slice count = 6 is not supported + // by GPU, so that is not required to be set. + // + NvU32 gpu_slice_count_mask = DSC_DECODER_SLICES_PER_SINK_1 | + DSC_DECODER_SLICES_PER_SINK_2 | + DSC_DECODER_SLICES_PER_SINK_4; + + NvU32 gpu_slice_count_mask_dual = DSC_DECODER_SLICES_PER_SINK_2 | + DSC_DECODER_SLICES_PER_SINK_4 | + DSC_DECODER_SLICES_PER_SINK_8; + + NvU32 common_slice_count_mask = dual_mode? gpu_slice_count_mask_dual & slice_count_mask : + gpu_slice_count_mask & slice_count_mask; + + if (!common_slice_count_mask) + { + DSC_Print("DSC cannot be supported since no common supported slice count\n"); + return NVT_STATUS_ERR; + } + + peak_throughput_mps = DSC_GetPeakThroughputMps(peak_throughput); + if (!peak_throughput_mps) + { + DSC_Print("Peak throughput cannot be zero.\n"); + return NVT_STATUS_ERR; + } + + if (max_slice_width > MAX_WIDTH_PER_SLICE) + { + DSC_Print("GPU can support only a max of 5120 pixels across all slices\n"); + max_slice_width = MAX_WIDTH_PER_SLICE; + } + + if (out->slice_num == 0 && out->slice_width == 0) + { + NvU32 new_slice_num = 0; + NvU32 min_slice_num_1 = (out->pic_width + max_slice_width - 1) / max_slice_width; + NvU32 min_slice_num_2 = (pixel_clkMHz + peak_throughput_mps - 1) / peak_throughput_mps; + min_slice_num = MAX(min_slice_num_1, min_slice_num_2); + + if (max_slice_num < min_slice_num) + { + DSC_Print("Requested mode cannot be supported with DSC\n"); + return NVT_STATUS_ERR; + } + + if (!(DSC_SliceCountMaskforSliceNum(min_slice_num) & common_slice_count_mask)) + { + if (DSC_GetHigherSliceCount(common_slice_count_mask, min_slice_num, dual_mode, &new_slice_num) == NVT_STATUS_ERR) + { + DSC_Print("DSC cannot be enabled for this mode\n"); + return NVT_STATUS_ERR; + } + else + { + out->slice_num = new_slice_num; + } + } + else + { + out->slice_num = min_slice_num; + } + + out->slice_width = (out->pic_width + out->slice_num - 1) / out->slice_num; + } + else if (out->slice_num == 0) + { + if (out->slice_width > max_slice_width) + { + DSC_Print("Error! Max Supported Slice Width = %u\n", max_slice_width); + return NVT_STATUS_ERR; + } + + out->slice_num = (out->pic_width + out->slice_width - 1) / out->slice_width; + if (!(DSC_SliceCountMaskforSliceNum(out->slice_num) & common_slice_count_mask)) + { + DSC_Print("Slice count corresponding to requested slice_width is not supported\n"); + return NVT_STATUS_ERR; + } + } + else if (out->slice_width == 0) + { + if (!(DSC_SliceCountMaskforSliceNum(out->slice_num) & common_slice_count_mask)) + { + DSC_Print("Slice count requested is not supported\n"); + return NVT_STATUS_ERR; + } + + out->slice_width = (out->pic_width + out->slice_num - 1) / out->slice_num; + + if (out->native_420 || out->native_422) + { + out->slice_width = (out->slice_width+1)/2 * 2 ; + } + + if (out->slice_width > max_slice_width) + { + DSC_Print("Slice width corresponding to the requested slice count is not supported\n"); + return NVT_STATUS_ERR; + } + } + else + { + if (!(DSC_SliceCountMaskforSliceNum(out->slice_num) & common_slice_count_mask)) + { + DSC_Print("Requested slice count is not supported\n"); + return NVT_STATUS_ERR; + } + + if (out->slice_width > max_slice_width) + { + DSC_Print("Requested slice width cannot be supported\n"); + return NVT_STATUS_ERR; + } + + if (out->slice_width != (out->pic_width + out->slice_num - 1) / out->slice_num) + { + DSC_Print("slice_width must equal CEIL(pic_width/slice_num) \n"); + return NVT_STATUS_ERR; + } + } + + if((pixel_clkMHz / out->slice_num) > peak_throughput_mps) + { + DSC_Print("Sink DSC decoder does not support minimum throughout required for this DSC config \n"); + return NVT_STATUS_ERR; + } + + if (max_slice_width < SINK_MAX_SLICE_WIDTH_DEFAULT) + { + DSC_Print("Sink has to support a max slice width of at least 2560 as per DP1.4 spec. Ignoring for now."); + } + + if (out->slice_width < 32) + { + DSC_Print("slice_width must >= 32\n"); + return NVT_STATUS_ERR; + } + + slicew = out->slice_width >> (out->native_420 || out->native_422); // /2 in 4:2:0 mode + out->groups_per_line = (slicew + PIXELS_PER_GROUP - 1) / PIXELS_PER_GROUP; + out->chunk_size = (slicew * out->bits_per_pixel + 8 * BPP_UNIT - 1) / (8 * BPP_UNIT); // Number of bytes per chunk + + // + // Below is not constraint of DSC module, this is RG limitation. + // check total data packet per line from DSC to RG won't larger than pic_width + // + if ((out->chunk_size + 3) / 4 * out->slice_num > out->pic_width) + { + DSC_Print("Error! bpp too high, RG will overflow, normally, this error is also caused by padding (pic_widthslice_height == 0) + { + NvU32 i; + for (i = 1 ; i <= 16; i++) + { + out->slice_height = out->pic_height / i; + if (out->pic_height != out->slice_height * i ) + continue; + + if (DSC_PpsCheckSliceHeight(out) == NVT_STATUS_SUCCESS) + return NVT_STATUS_SUCCESS; + } + DSC_Print("Error! can't find valid slice_height"); + return NVT_STATUS_ERR; + } + + RANGE_CHECK("slice_height", out->slice_height, 8, out->pic_height); + + if (out->pic_height % out->slice_height != 0) + { + DSC_Print("Error! pic_height %% slice_height must be 0"); + return NVT_STATUS_ERR; + } + + if(DSC_PpsCheckSliceHeight(out) != NVT_STATUS_SUCCESS) + { + DSC_Print("Error! slice_height not valid"); + return NVT_STATUS_ERR; + } + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate DSC_OUTPUT_PARAMS from DSC_INPUT_PARAMS. + * + * @param[in] in DSC input parameter + * @param[out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +DSC_PpsCalc +( + const DSC_INPUT_PARAMS *in, + DSC_OUTPUT_PARAMS *out +) +{ + NVT_STATUS ret; + NvU32 peak_throughput = 0; + + ret = DSC_PpsCalcBase(in, out); + if (ret != NVT_STATUS_SUCCESS) + return ret; + + if (in->drop_mode) + { + // in drop mode, HW requires these params to simplify the design + out->bits_per_pixel = 16 * BPP_UNIT; + out->slice_num = 2; + } + + if (out->native_420 || out->native_422) + { + peak_throughput = in->peak_throughput_mode1; + } + else + { + peak_throughput = in->peak_throughput_mode0; + } + + ret = DSC_PpsCalcSliceParams(in->pixel_clkMHz, in->dual_mode, + in->max_slice_num, in->max_slice_width, in->slice_count_mask, + peak_throughput, out); + if (ret != NVT_STATUS_SUCCESS) return ret; + ret = DSC_PpsCalcRcInitValue(out); + if (ret != NVT_STATUS_SUCCESS) return ret; + ret = Dsc_PpsCalcHeight(out); + if (ret != NVT_STATUS_SUCCESS) return ret; + ret = DSC_PpsCalcRcParam(out); + return ret; +} + +/* + * @brief Calculate DSC_OUTPUT_PARAMS from DSC_INPUT_PARAMS internally, + * then pack pps parameters into 32bit data array. + * + * @param[in] in DSC input parameter + * @param[out] out DSC output parameter + * NvU32[32] to return the pps data. + * The data can be send to SetDscPpsData* methods directly. + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +DSC_PpsDataGen +( + const DSC_INPUT_PARAMS *in, + NvU32 out[DSC_MAX_PPS_SIZE_DWORD] +) +{ + NVT_STATUS ret; + DSC_OUTPUT_PARAMS *pPpsOut; + + pPpsOut = (DSC_OUTPUT_PARAMS *)DSC_Malloc(sizeof(DSC_OUTPUT_PARAMS)); + if (pPpsOut == NULL) + { + DSC_Print("ERROR - Memory allocation error."); + ret = NVT_STATUS_NO_MEMORY; + goto done; + } + + NVMISC_MEMSET(pPpsOut, 0, sizeof(DSC_OUTPUT_PARAMS)); + ret = DSC_PpsCalc(in, pPpsOut); + if (ret != NVT_STATUS_SUCCESS) + { + DSC_Print("ERROR - Invalid parameter."); + goto done; + } + + DSC_PpsConstruct(pPpsOut, out); + + /* fall through */ +done: + DSC_Free(pPpsOut); + + return ret; +} + +/* + * @brief Allocates memory for requested size + * + * @param[in] size Size to be allocated + * + * @returns Pointer to allocated memory + */ +static void * +DSC_Malloc(NvLength size) +{ +#if defined(DSC_CALLBACK_MODIFIED) + return (callbacks.dscMalloc)(callbacks.clientHandle, size); +#else + return (callbacks.dscMalloc)(size); +#endif // DSC_CALLBACK_MODIFIED +} + +/* + * @brief Frees dynamically allocated memory + * + * @param[in] ptr Pointer to a memory to be deallocated + * + */ +static void +DSC_Free(void * ptr) +{ +#if defined(DSC_CALLBACK_MODIFIED) + (callbacks.dscFree)(callbacks.clientHandle, ptr); +#else + (callbacks.dscFree)(ptr); +#endif // DSC_CALLBACK_MODIFIED +} + +/* + * @brief Validate input parameter we got from caller of this function + * + * @param[in] pDscInfo Includes Sink and GPU DSC capabilities + * @param[in] pModesetInfo Modeset related information + * @param[in] pWARData Data required for providing WAR for issues + * @param[in] availableBandwidthBitsPerSecond Available bandwidth for video + * transmission(After FEC/Downspread overhead consideration) + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +_validateInput +( + const DSC_INFO *pDscInfo, + const MODESET_INFO *pModesetInfo, + const WAR_DATA *pWARData, + NvU64 availableBandwidthBitsPerSecond +) +{ + // Validate DSC Info + if (pDscInfo->sinkCaps.decoderColorFormatMask == 0) + { + DSC_Print("ERROR - At least one of the color format decoding needs to be supported by Sink."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (!ONEBITSET(pDscInfo->sinkCaps.bitsPerPixelPrecision)) + { + DSC_Print("ERROR - Only one of Bits Per Pixel Precision should be set"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if ((pDscInfo->sinkCaps.bitsPerPixelPrecision != 1) && + (pDscInfo->sinkCaps.bitsPerPixelPrecision != 2) && + (pDscInfo->sinkCaps.bitsPerPixelPrecision != 4) && + (pDscInfo->sinkCaps.bitsPerPixelPrecision != 8) && + (pDscInfo->sinkCaps.bitsPerPixelPrecision != 16)) + { + DSC_Print("ERROR - Bits Per Pixel Precision should be 1/16, 1/8, 1/4, 1/2 or 1 bpp."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.maxSliceWidth == 0) + { + DSC_Print("ERROR - Invalid max slice width supported by sink."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.maxNumHztSlices == 0) + { + DSC_Print("ERROR - Invalid max number of horizontal slices supported by sink."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.lineBufferBitDepth == 0) + { + DSC_Print("ERROR - Invalid line buffer bit depth supported by sink."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.algorithmRevision.versionMinor == 0) + { + DSC_Print("ERROR - Invalid DSC algorithm revision supported by sink."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->gpuCaps.encoderColorFormatMask == 0) + { + DSC_Print("ERROR - At least one of the color format encoding needs to be supported by GPU."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->gpuCaps.lineBufferSize == 0) + { + DSC_Print("ERROR - Invalid Line buffer size supported by GPU."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->gpuCaps.maxNumHztSlices == 0) + { + DSC_Print("ERROR - Invalid max number of horizontal slices supported by GPU."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->gpuCaps.lineBufferBitDepth == 0) + { + DSC_Print("ERROR - Invalid line buffer bit depth supported by GPU."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.sliceCount > pDscInfo->sinkCaps.maxNumHztSlices) + { + DSC_Print("ERROR - Client can't specify forced slice count greater than what sink supports."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if ((pDscInfo->forcedDscParams.sliceCount / (pModesetInfo->bDualMode ? 2 : 1)) > pDscInfo->gpuCaps.maxNumHztSlices) + { + DSC_Print("ERROR - Client can't specify forced slice count greater than what GPU supports."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.sliceWidth > pDscInfo->sinkCaps.maxSliceWidth) + { + DSC_Print("ERROR - Client can't specify forced slice width greater than what sink supports."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if ((pDscInfo->forcedDscParams.sliceCount > 0) && + (pDscInfo->forcedDscParams.sliceWidth != 0)) + { + DSC_Print("ERROR - Client can't specify both forced slice count and slice width."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if ((pDscInfo->forcedDscParams.sliceCount != 0) && + (pDscInfo->forcedDscParams.sliceCount != 1) && + (pDscInfo->forcedDscParams.sliceCount != 2) && + (pDscInfo->forcedDscParams.sliceCount != 4) && + (pDscInfo->forcedDscParams.sliceCount != 8)) + { + DSC_Print("ERROR - Forced Slice Count has to be 1/2/4/8."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.sliceWidth > pModesetInfo->activeWidth) + { + DSC_Print("ERROR - Forced Slice Width can't be more than Active Width."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.sliceHeight > pModesetInfo->activeHeight) + { + DSC_Print("ERROR - Forced Slice Height can't be more than Active Height."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.dscRevision.versionMinor > + pDscInfo->sinkCaps.algorithmRevision.versionMinor) + { + DSC_Print("ERROR - Forced DSC Algorithm Revision is greater than Sink Supported value."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.dscRevision.versionMinor > 2) + { + DSC_Print("ERROR - Forced DSC Algorithm Revision is greater than 1.2"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pModesetInfo->pixelClockHz == 0) + { + DSC_Print("ERROR - Invalid pixel Clock for mode."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pModesetInfo->activeWidth == 0) + { + DSC_Print("ERROR - Invalid active width for mode."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pModesetInfo->activeHeight == 0) + { + DSC_Print("ERROR - Invalid active height for mode."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pModesetInfo->bitsPerComponent == 0) + { + DSC_Print("ERROR - Invalid bits per component for mode."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (availableBandwidthBitsPerSecond == 0) + { + DSC_Print("ERROR - Invalid available bandwidth in Bits Per Second."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pModesetInfo->colorFormat == NVT_COLOR_FORMAT_YCbCr422) + { + // + // For using YCbCr422 with DSC, either of the following has to be true + // 1> Sink supports Simple422 + // 2> GPU and Sink supports Native 422 + // + if ((!(pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422)) && + (!((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422)))) + { + DSC_Print("ERROR - Can't enable YCbCr422 with current GPU and Sink DSC config."); + return NVT_STATUS_INVALID_PARAMETER; + } + } + + if (pModesetInfo->colorFormat == NVT_COLOR_FORMAT_YCbCr420) + { + // + // For using YCbCr420 with DSC, GPU and Sink has to support Native 420 + // + if (!((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420))) + { + DSC_Print("ERROR - Can't enable YCbCr420 with current GPU and Sink DSC config."); + return NVT_STATUS_INVALID_PARAMETER; + } + } + + if ((pDscInfo->sinkCaps.algorithmRevision.versionMajor == 1) && + (pDscInfo->sinkCaps.algorithmRevision.versionMinor == 1) && + (pModesetInfo->colorFormat == NVT_COLOR_FORMAT_YCbCr420)) + { + DSC_Print("WARNING: DSC v1.2 or higher is recommended for using YUV444"); + DSC_Print("Current version is 1.1"); + } + + if (pDscInfo->sinkCaps.maxBitsPerPixelX16 > 1024U) + { + DSC_Print("ERROR - Max bits per pixel can't be greater than 1024"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.decoderColorDepthMask) + { + switch (pModesetInfo->bitsPerComponent) + { + case 12: + if (!(pDscInfo->sinkCaps.decoderColorDepthMask & DSC_DECODER_COLOR_DEPTH_CAPS_12_BITS)) + { + DSC_Print("ERROR - Sink DSC Decoder does not support 12 bpc"); + return NVT_STATUS_INVALID_PARAMETER; + } + break; + case 10: + if (!(pDscInfo->sinkCaps.decoderColorDepthMask & DSC_DECODER_COLOR_DEPTH_CAPS_10_BITS)) + { + DSC_Print("ERROR - Sink DSC Decoder does not support 10 bpc"); + return NVT_STATUS_INVALID_PARAMETER; + } + break; + case 8: + if (!(pDscInfo->sinkCaps.decoderColorDepthMask & DSC_DECODER_COLOR_DEPTH_CAPS_8_BITS)) + { + DSC_Print("ERROR - Sink DSC Decoder does not support 8 bpc"); + return NVT_STATUS_INVALID_PARAMETER; + } + break; + + default: + DSC_Print("ERROR - Invalid bits per component specified"); + return NVT_STATUS_INVALID_PARAMETER; + } + } + else + { + DSC_Print("WARNING - Decoder Color Depth Mask was not provided. Assuming that decoder supports all depths."); + } + + // Validate WAR data + if (pWARData) + { + if ((pWARData->connectorType != DSC_DP) && (pWARData->connectorType != DSC_HDMI)) + { + DSC_Print("WARNING - Incorrect connector info sent with WAR data"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pWARData->connectorType == DSC_DP) + { + if (!IS_VALID_LANECOUNT(pWARData->dpData.laneCount)) + { + DSC_Print("ERROR - Incorrect DP Lane count info sent with WAR data"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (!IS_VALID_LINKBW(pWARData->dpData.linkRateHz / DP_LINK_BW_FREQ_MULTI_MBPS)) + { + DSC_Print("ERROR - Incorrect DP Link rate info sent with WAR data"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pWARData->dpData.hBlank > MAX_HBLANK_PIXELS) + { + DSC_Print("ERROR - Incorrect DP HBlank info sent with WAR data"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if ((pWARData->dpData.dpMode != DSC_DP_SST) && (pWARData->dpData.dpMode != DSC_DP_MST)) + { + DSC_Print("ERROR - Incorrect DP Stream mode sent with WAR data"); + return NVT_STATUS_INVALID_PARAMETER; + } + } + } + + return NVT_STATUS_SUCCESS; +} + +/* ------------------------ Public Functions ------------------------------- */ + +/* + * @brief Calculate PPS parameters based on passed down Sink, + * GPU capability and modeset info + * + * @param[in] pDscInfo Includes Sink and GPU DSC capabilities + * @param[in] pModesetInfo Modeset related information + * @param[in] pWARData Data required for providing WAR for issues + * @param[in] availableBandwidthBitsPerSecond Available bandwidth for video + * transmission(After FEC/Downspread overhead consideration) + * @param[out] pps Calculated PPS parameter. + * The data can be send to SetDscPpsData* methods directly. + * @param[out] pBitsPerPixelX16 Bits per pixel multiplied by 16 + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + * In case this returns failure consider that PPS is not possible. + */ +NVT_STATUS +DSC_GeneratePPS +( + const DSC_INFO *pDscInfo, + const MODESET_INFO *pModesetInfo, + const WAR_DATA *pWARData, + NvU64 availableBandwidthBitsPerSecond, + NvU32 pps[DSC_MAX_PPS_SIZE_DWORD], + NvU32 *pBitsPerPixelX16 +) +{ + DSC_INPUT_PARAMS *in = NULL; + NVT_STATUS ret = NVT_STATUS_ERR; + + if ((!pDscInfo) || (!pModesetInfo) || (!pBitsPerPixelX16)) + { + DSC_Print("ERROR - Invalid parameter."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + + ret = _validateInput(pDscInfo, pModesetInfo, pWARData, availableBandwidthBitsPerSecond); + if (ret != NVT_STATUS_SUCCESS) + { + DSC_Print("ERROR - Invalid parameter."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + + in = (DSC_INPUT_PARAMS *)DSC_Malloc(sizeof(DSC_INPUT_PARAMS)); + if (in == NULL) + { + DSC_Print("ERROR - Memory allocation error."); + ret = NVT_STATUS_NO_MEMORY; + goto done; + } + + NVMISC_MEMSET(in, 0, sizeof(DSC_INPUT_PARAMS)); + + in->bits_per_component = pModesetInfo->bitsPerComponent; + in->linebuf_depth = MIN((pDscInfo->sinkCaps.lineBufferBitDepth), (pDscInfo->gpuCaps.lineBufferBitDepth)); + in->block_pred_enable = pDscInfo->sinkCaps.bBlockPrediction; + + switch (pModesetInfo->colorFormat) + { + case NVT_COLOR_FORMAT_RGB: + in->convert_rgb = 1; + break; + + case NVT_COLOR_FORMAT_YCbCr444: + in->convert_rgb = 0; + break; + case NVT_COLOR_FORMAT_YCbCr422: + in->convert_rgb = 0; + + if ((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422)) + { + in->native_422 = 1; + } + else if (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422) + { + in->simple_422 = 1; + } + else + { + DSC_Print("ERROR - YCbCr422 is not possible with current config."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + break; + case NVT_COLOR_FORMAT_YCbCr420: + in->convert_rgb = 0; + + if ((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422)) + { + in->native_420 = 1; + } + else + { + DSC_Print("ERROR - YCbCr420 is not possible with current config."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + break; + + default: + DSC_Print("ERROR - Invalid color Format specified."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + + // calculate max possible bits per pixel allowed by the available bandwidth + in->bits_per_pixel = (NvU32)((availableBandwidthBitsPerSecond * BPP_UNIT) / pModesetInfo->pixelClockHz); + + if (pWARData && (pWARData->connectorType == DSC_DP)) + { + // + // In DP case, being too close to the available bandwidth caused HW to hang. + // 2 is subtracted based on issues seen in DP CTS testing. Refer to bug 200406501, comment 76 + // This limitation is only on DP, not needed for HDMI DSC HW + // + in->bits_per_pixel = (NvU32)((availableBandwidthBitsPerSecond * BPP_UNIT) / pModesetInfo->pixelClockHz) - (BPP_UNIT/8); + + if (pWARData->dpData.laneCount == 1U) + { + // + // SOR lane fifo might get overflown when DP 1 lane, FEC enabled and pclk*bpp > 96%*linkclk*8 i.e. + // DSC stream is consuming more than 96% of the total bandwidth. Use lower bits per pixel. Refer Bug 200561864. + // + in->bits_per_pixel = (NvU32)((96U * availableBandwidthBitsPerSecond * BPP_UNIT) / (100U * pModesetInfo->pixelClockHz)) - + (BPP_UNIT / 8U); + } + + if ((pWARData->dpData.dpMode == DSC_DP_SST) && (pWARData->dpData.hBlank < 100U)) + { + // + // For short HBlank timing, using bits per pixel value which may have to add DSC padding for each chunk + // may not be possible so use bits per pixel value which won't require DSC padding. Bug 200628516 + // + + NvU32 protocolOverhead; + NvU32 dscOverhead; + NvU32 minSliceCount = (NvU32)NV_CEIL(pModesetInfo->pixelClockHz, (MAX_PCLK_PER_SLICE_KHZ * 1000U)); + NvU32 sliceWidth; + NvU32 i; + + if ((minSliceCount > 2U) &&(minSliceCount < 4U)) + { + minSliceCount = 4U; + } + else if (minSliceCount > 4U) + { + minSliceCount = 8U; + } + + sliceWidth = (NvU32)NV_CEIL(pModesetInfo->activeWidth, minSliceCount); + + if (pWARData->dpData.laneCount == 1U) + { + protocolOverhead = 42U; + } + else if (pWARData->dpData.laneCount == 2U) + { + protocolOverhead = 24U; + } + else + { + protocolOverhead = 21U; + } + + dscOverhead = minSliceCount * 2U; + + if ((pWARData->dpData.hBlank * pWARData->dpData.linkRateHz / pModesetInfo->pixelClockHz) < + (protocolOverhead + dscOverhead + 3U)) + { + // + // For very short HBlank timing, find out bits per pixel value which will not require additional + // DSC padding. 128 will be used as the lowest bits per pixel value. + // + for (i = in->bits_per_pixel; i >= MIN_BITS_PER_PIXEL * BPP_UNIT; i--) + { + if (((i * sliceWidth) % ( 8U * minSliceCount * pWARData->dpData.laneCount * 16U)) == 0U) + { + break; + } + } + in->bits_per_pixel = i; + } + } + } + + // + // bits per pixel upper limit is minimum of 3 times bits per component or 32 + // + if (in->bits_per_pixel > MIN((3 * in->bits_per_component * BPP_UNIT), (MAX_BITS_PER_PIXEL * BPP_UNIT))) + { + in->bits_per_pixel = MIN((3 * in->bits_per_component * BPP_UNIT), (MAX_BITS_PER_PIXEL * BPP_UNIT)); + } + + in->bits_per_pixel = DSC_AlignDownForBppPrecision(in->bits_per_pixel, pDscInfo->sinkCaps.bitsPerPixelPrecision); + + // If user specified bits_per_pixel value to be used check if it is valid one + if (*pBitsPerPixelX16 != 0) + { + *pBitsPerPixelX16 = DSC_AlignDownForBppPrecision(*pBitsPerPixelX16, pDscInfo->sinkCaps.bitsPerPixelPrecision); + + // The calculation of in->bits_per_pixel here in PPSlib, which is the maximum bpp that is allowed by available bandwidth, + // which is applicable to DP alone and not to HDMI FRL. + // Before calling PPS lib to generate PPS data, HDMI library has done calculation according to HDMI2.1 spec + // to determine if FRL rate is sufficient for the requested bpp. So restricting the condition to DP alone. + if ((pWARData && (pWARData->connectorType == DSC_DP)) && + (*pBitsPerPixelX16 > in->bits_per_pixel)) + { + DSC_Print("ERROR - Invalid bits per pixel value specified."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + else + { + in->bits_per_pixel = *pBitsPerPixelX16; + } + + // For DSC Dual Mode, because of architectural limitation we can't use bits_per_pixel more than 16. + if (pModesetInfo->bDualMode && (in->bits_per_pixel > 256 /*bits_per_pixel = 16*/)) + { + DSC_Print("ERROR - DSC Dual Mode, because of architectural limitation we can't use bits_per_pixel more than 16."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + + if ((pDscInfo->sinkCaps.maxBitsPerPixelX16 != 0) && (*pBitsPerPixelX16 > pDscInfo->sinkCaps.maxBitsPerPixelX16)) + { + DSC_Print("ERROR - bits per pixel value specified by user is greater than what DSC decompressor can support."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + } + else + { + // + // For DSC Dual Mode, because of architectural limitation we can't use bits_per_pixel more than 16. + // Forcing it to 16. + // + if (pModesetInfo->bDualMode && (in->bits_per_pixel > 256 /*bits_per_pixel = 16*/)) + { + DSC_Print("ERROR - DSC Dual Mode, because of architectural limitation we can't use bits_per_pixel more than 16."); + DSC_Print("ERROR - Forcing it to 16."); + in->bits_per_pixel = 256; + } + + // If calculated bits_per_pixel is 126 or 127, we need to use 128 value. Bug 2686078 + if ((in->bits_per_pixel == 126) || (in->bits_per_pixel == 127)) + { + DSC_Print("WARNING: bits_per_pixel is forced to 128 because calculated value was 126 or 127"); + in->bits_per_pixel = 128; + } + + if ((pDscInfo->sinkCaps.maxBitsPerPixelX16 != 0) && (in->bits_per_pixel > pDscInfo->sinkCaps.maxBitsPerPixelX16)) + { + DSC_Print("WARNING - Optimal bits per pixel value calculated is greater than what DSC decompressor can support. Forcing it to max that decompressor can support"); + in->bits_per_pixel = pDscInfo->sinkCaps.maxBitsPerPixelX16; + } + } + + in->dsc_version_minor = pDscInfo->forcedDscParams.dscRevision.versionMinor ? pDscInfo->forcedDscParams.dscRevision.versionMinor : + pDscInfo->sinkCaps.algorithmRevision.versionMinor; + in->pic_width = pModesetInfo->activeWidth; + in->pic_height = pModesetInfo->activeHeight; + in->slice_height = pDscInfo->forcedDscParams.sliceHeight; + in->slice_width = pDscInfo->forcedDscParams.sliceWidth; + in->slice_num = pDscInfo->forcedDscParams.sliceCount; + in->max_slice_num = MIN(pDscInfo->sinkCaps.maxNumHztSlices, + pModesetInfo->bDualMode ? pDscInfo->gpuCaps.maxNumHztSlices * 2 : pDscInfo->gpuCaps.maxNumHztSlices); + in->max_slice_width = pDscInfo->sinkCaps.maxSliceWidth; + in->pixel_clkMHz = (NvU32)(pModesetInfo->pixelClockHz / 1000000L); + in->dual_mode = pModesetInfo->bDualMode; + in->drop_mode = pModesetInfo->bDropMode; + in->slice_count_mask = pDscInfo->sinkCaps.sliceCountSupportedMask; + in->peak_throughput_mode0 = pDscInfo->sinkCaps.peakThroughputMode0; + in->peak_throughput_mode1 = pDscInfo->sinkCaps.peakThroughputMode1; + + if (in->native_422) + { + if (in->dsc_version_minor == 1) + { + DSC_Print("Error! DSC1.1 can't support native422!\n"); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + //the bpp in native 422 mode is doubled. + if((((NvS32)(in->bits_per_pixel)) < (NvS32)(2*6*BPP_UNIT)) || + (((NvS32)(in->bits_per_pixel)) > (NvS32)(2*32*BPP_UNIT-1))) + { + DSC_Print("bits_per_pixelx16 (=%u) needs to be between %u and %u", + in->bits_per_pixel, 2*6*BPP_UNIT, 2*32*BPP_UNIT-1); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + } + else + { + if ((((NvS32)(in->bits_per_pixel)) < (NvS32)(8*BPP_UNIT)) || + (((NvS32)(in->bits_per_pixel)) > (NvS32)(32*BPP_UNIT))) + { + DSC_Print("bits_per_pixelx16 (=%u) needs to be between %u and %u", + in->bits_per_pixel, 8*BPP_UNIT, 32*BPP_UNIT); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + } + + ret = DSC_PpsDataGen(in, pps); + + *pBitsPerPixelX16 = in->bits_per_pixel; + + /* fall through */ +done: + DSC_Free(in); + + return ret; +} + +/* + * @brief Initializes callbacks for print and assert + * + * @param[in] callback DSC callbacks + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +NVT_STATUS DSC_InitializeCallback(DSC_CALLBACK callback) +{ + // if callbacks are initialized already, return nothing to do + if (callbacks.dscMalloc && callbacks.dscFree) + { + return NVT_STATUS_SUCCESS; + } + +#if defined(DSC_CALLBACK_MODIFIED) + callbacks.clientHandle = callback.clientHandle; +#endif // DSC_CALLBACK_MODIFIED + callbacks.dscPrint = NULL; + callbacks.dscMalloc = callback.dscMalloc; + callbacks.dscFree = callback.dscFree; +#if defined (DEBUG) + callbacks.dscPrint = callback.dscPrint; +#endif + + return NVT_STATUS_SUCCESS; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.h b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.h new file mode 100644 index 0000000..651d420 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.h @@ -0,0 +1,324 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* +=============================================================================== + + dsc_pps.h + + Provide definition needed for DSC(Display Stream Compression) PPS(Picture Parameter Set) + +================================================================================ +*/ + +#ifndef __DSCPPS_H__ +#define __DSCPPS_H__ + +/* ------------------------ Includes --------------------------------------- */ +#include "nvtypes.h" +#include "nvtiming.h" + +/* ------------------------ Macros ----------------------------------------- */ +#define DSC_MAX_PPS_SIZE_DWORD 32 + +/* ------------------------ Datatypes -------------------------------------- */ + +#define DSC_CALLBACK_MODIFIED 1 + +#if defined(DSC_CALLBACK_MODIFIED) +typedef struct +{ + // DSC - Callbacks + const void* clientHandle; // ClientHandle is only used when calling into HDMI lib's mallocCb/freeCb + void (*dscPrint) (const char* fmtstring, ...); + void *(*dscMalloc)(const void *clientHandle, NvLength size); + void (*dscFree) (const void *clientHandle, void * ptr); +} DSC_CALLBACK; +#else +typedef struct +{ + // DSC - Callbacks + void (*dscPrint) (const char* fmtstring, ...); + void *(*dscMalloc)(NvLength size); + void (*dscFree) (void * ptr); +} DSC_CALLBACK; +#endif // DSC_CALLBACK_MODIFIED + +typedef struct +{ + NvU32 versionMajor; + NvU32 versionMinor; +} DSC_ALGORITHM_REV; + +typedef struct +{ + NvU64 pixelClockHz; // Requested pixel clock for the mode + NvU32 activeWidth; // Active Width + NvU32 activeHeight; // Active Height + NvU32 bitsPerComponent; // BPC value to be used + NVT_COLOR_FORMAT colorFormat; // Color format to be used for this modeset + + // + // Whether to enable Dual mode for DSC. + // Dual mode specifies that 2 heads would be generating + // pixels for complete stream. + // + NvBool bDualMode; + + // + // Whether to enable DROP mode for DSC. + // DROP mode specifies that instead of compressing the pixels, OR will drop + // the pixels of the right half frame to reduce the data rate by half. + // This mode is added for testing 2head1OR solution without a DSC panel + // + NvBool bDropMode; +} MODESET_INFO; + +typedef struct +{ + struct SINK_DSC_CAPS + { + // Mask of all color formats for which decoding supported by panel + NvU32 decoderColorFormatMask; +#define DSC_DECODER_COLOR_FORMAT_RGB (0x00000001) +#define DSC_DECODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002) +#define DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422 (0x00000004) +#define DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000008) +#define DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000010) + + // e.g. 1/16, 1/8, 1/4, 1/2, 1bpp + NvU32 bitsPerPixelPrecision; +#define DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001) +#define DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002) +#define DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000004) +#define DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000008) +#define DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000010) + + // Maximum slice width supported by panel + NvU32 maxSliceWidth; + + // Maximum number of horizontal slices supported + NvU32 maxNumHztSlices; + + // Slice counts supported by the sink + NvU32 sliceCountSupportedMask; +#define DSC_DECODER_SLICES_PER_SINK_INVALID (0x00000000) +#define DSC_DECODER_SLICES_PER_SINK_1 (0x00000001) +#define DSC_DECODER_SLICES_PER_SINK_2 (0x00000002) +#define DSC_DECODER_SLICES_PER_SINK_4 (0x00000008) +#define DSC_DECODER_SLICES_PER_SINK_6 (0x00000010) +#define DSC_DECODER_SLICES_PER_SINK_8 (0x00000020) +#define DSC_DECODER_SLICES_PER_SINK_10 (0x00000040) +#define DSC_DECODER_SLICES_PER_SINK_12 (0x00000080) +#define DSC_DECODER_SLICES_PER_SINK_16 (0x00000100) +#define DSC_DECODER_SLICES_PER_SINK_20 (0x00000200) +#define DSC_DECODER_SLICES_PER_SINK_24 (0x00000400) + + // + // Bit depth used by the Sink device to store the + // reconstructed pixels within the line buffer + // + NvU32 lineBufferBitDepth; +#define DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MIN (0x00000008) +#define DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MAX (0x0000000D) + + NvU32 decoderColorDepthCaps; // Color depth supported by DSC decoder of panel +#define DSC_DECODER_COLOR_DEPTH_CAPS_8_BITS (0x00000001) +#define DSC_DECODER_COLOR_DEPTH_CAPS_10_BITS (0x00000002) +#define DSC_DECODER_COLOR_DEPTH_CAPS_12_BITS (0x00000004) +#define DSC_DECODER_COLOR_DEPTH_CAPS_16_BITS (0x00000008) + + NvU32 decoderColorDepthMask; + + DSC_ALGORITHM_REV algorithmRevision; // DSC algorithm revision that sink supports + + NvBool bBlockPrediction; // Whether block prediction is supported or not. + + // Peak throughput supported for 444 and simple 422 modes + NvU32 peakThroughputMode0; +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_INVALID (0x00000000) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_340 (0x00000001) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_400 (0x00000002) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_450 (0x00000003) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_500 (0x00000004) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_550 (0x00000005) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_600 (0x00000006) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_650 (0x00000007) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_700 (0x00000008) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_750 (0x00000009) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_800 (0x0000000A) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_850 (0x0000000B) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_900 (0x0000000C) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_950 (0x0000000D) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_1000 (0x0000000E) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_170 (0x0000000F) + + // Peak throughput supported for native 422 and 420 modes + NvU32 peakThroughputMode1; +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_INVALID (0x00000000) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_340 (0x00000001) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_400 (0x00000002) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_450 (0x00000003) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_500 (0x00000004) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_550 (0x00000005) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_600 (0x00000006) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_650 (0x00000007) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_700 (0x00000008) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_750 (0x00000009) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_800 (0x0000000A) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_850 (0x0000000B) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_900 (0x0000000C) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_950 (0x0000000D) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_1000 (0x0000000E) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_170 (0x0000000F) + + // Maximum bits_per_pixel supported by the DSC decompressor multiplied by 16 + NvU32 maxBitsPerPixelX16; + }sinkCaps; + + struct GPU_DSC_CAPS + { + // Mask of all color formats for which encoding supported by GPU + NvU32 encoderColorFormatMask; +#define DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001) +#define DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002) +#define DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004) +#define DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008) + + // + // Size of line buffer inside DSC. Should be in number of pixels. + // this should be greater than or equal to active width + // + NvU32 lineBufferSize; + + // e.g. 1/16, 1/8, 1/4, 1/2, 1bpp + NvU32 bitsPerPixelPrecision; + + // Maximum number of horizontal slices supported + NvU32 maxNumHztSlices; + + // + // Bit depth used by the GPU to store the + // reconstructed pixels within the line buffer + // + NvU32 lineBufferBitDepth; + }gpuCaps; + + struct FORCED_DSC_PARAMS + { + // Forced Slice count + NvU32 sliceCount; + + // Forced Slice width + NvU32 sliceWidth; + + // Forced Slice height + NvU32 sliceHeight; + + // Forced DSC Algorithm Revision + DSC_ALGORITHM_REV dscRevision; + }forcedDscParams; +} DSC_INFO; + +typedef struct +{ + NvU32 manufacturerID; + NvU32 productID; + NvU32 yearWeek; +} EDID_INFO; + +typedef enum +{ + DSC_DP, + DSC_HDMI +} DSC_CONNECTOR_TYPE; + +typedef enum +{ + DSC_DP_SST, + DSC_DP_MST +} DSC_DP_MODE; + +typedef struct +{ + DSC_CONNECTOR_TYPE connectorType; + struct DP_DATA + { + NvU64 linkRateHz; + NvU32 laneCount; + DSC_DP_MODE dpMode; + NvU32 hBlank; + }dpData; +} WAR_DATA; + +/* + * Windows testbed compiles are done with warnings as errors + * with the maximum warning level. Here we turn off some + * of the problematic warnings. + */ + +/* ------------------------ Global Variables ------------------------------- */ +/* ------------------------ Static Variables ------------------------------- */ +/* ------------------------ Private Functions ------------------------------ */ +/* ------------------------ Public Functions ------------------------------- */ + +#ifdef __cplusplus +extern "C" { +#endif +/* + * @brief Initializes callbacks for print and assert + * + * @param[in] callback DSC callbacks + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +NVT_STATUS DSC_InitializeCallback(DSC_CALLBACK callback); + +/* + * @brief Calculate PPS parameters based on passed down Sink, + * GPU capability and modeset info + * + * @param[in] pDscInfo Includes Sink and GPU DSC capabilities + * @param[in] pModesetInfo Modeset related information + * @param[in] pWARData Data required for providing WAR for issues + * @param[in] availableBandwidthBitsPerSecond Available bandwidth for video + * transmission(After FEC/Downspread overhead consideration) + * @param[out] pps Calculated PPS parameter. + * The data can be send to SetDscPpsData* methods directly. + * @param[out] pBitsPerPixelX16 Bits per pixel multiplied by 16 + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +NVT_STATUS DSC_GeneratePPS(const DSC_INFO *pDscInfo, + const MODESET_INFO *pModesetInfo, + const WAR_DATA *pWARData, + NvU64 availableBandwidthBitsPerSecond, + NvU32 pps[DSC_MAX_PPS_SIZE_DWORD], + NvU32 *pBitsPerPixelX16); + +#ifdef __cplusplus +} +#endif +#endif // __DSCPPS_H__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edid.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edid.c new file mode 100644 index 0000000..6fe3fb5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edid.c @@ -0,0 +1,2662 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_edid.c +// +// Purpose: the provide edid related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "edid.h" + +PUSH_SEGMENTS + +// Macro to declare a TIMING initializer for given parameters without border +#define EST_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rr,pclk,format) \ +{hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',NVT_PROGRESSIVE,pclk,{0,rr,set_rrx1k(pclk,ht,vt),0,1,{0},{0},{0},{0},format,"VESA Established"}} + +DATA_SEGMENT(PAGE_DATA) +#if !defined(NV_WSA) +CONS_SEGMENT(PAGE_CONS) +#endif // wsa + +// There is a large table of strings that translate 3-character PNP vendor IDs to a more user-friendly name in the following header. +// Mark this constant table as pageable. +#include "nvPNPVendorIds.h" + +static const NVT_TIMING EDID_EST[] = +{ + EST_TIMING( 720, 0, 0, 720,'-', 400, 0,0, 400,'-',70, 0,NVT_STATUS_EDID_EST), // 720x400x70Hz (IBM, VGA) + EST_TIMING( 720, 0, 0, 720,'-', 400, 0,0, 400,'-',88, 0,NVT_STATUS_EDID_EST), // 720x400x88Hz (IBM, XGA2) + {640,0,16,96,800,NVT_H_SYNC_NEGATIVE,480,0,10,2,525,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,2518,{0,60,60000,0,1,{0},{0},{0},{0},NVT_STATUS_EDID_EST,"EDID_Established"}}, + + EST_TIMING( 640, 0, 0, 640,'-', 480, 0,0, 480,'-',67, 0,NVT_STATUS_EDID_EST), // 640x480x67Hz (Apple, Mac II) + + // 640x480x72Hz (VESA) - this entry have borders + {640,8,16,40,832,NVT_H_SYNC_NEGATIVE,480,8,1,3,520,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,3150,{0,72,72000,0,1,{0},{0},{0},{0},NVT_STATUS_EDID_EST,"EDID_Established"}}, + EST_TIMING( 640,16, 64, 840,'-', 480, 1,3, 500,'-',75, 3150,NVT_STATUS_EDID_EST), // 640x480x75Hz (VESA) + EST_TIMING( 800,24, 72,1024,'+', 600, 1,2, 625,'+',56, 3600,NVT_STATUS_EDID_EST), // 800x600x56Hz (VESA) + EST_TIMING( 800,40,128,1056,'+', 600, 1,4, 628,'+',60, 4000,NVT_STATUS_EDID_EST), // 800x600x60Hz (VESA) + + EST_TIMING( 800,56,120,1040,'+', 600,37,6, 666,'+',72, 5000,NVT_STATUS_EDID_EST), // 800x600x72Hz (VESA) + EST_TIMING( 800,16, 80,1056,'+', 600, 1,3, 625,'+',75, 4950,NVT_STATUS_EDID_EST), // 800x600x75Hz (VESA) + EST_TIMING( 832, 0, 0, 832,'-', 624, 0,0, 624,'-',75, 0,NVT_STATUS_EDID_EST), // 832x624x75Hz (Apple, Mac II) + EST_TIMING(1024, 0, 0,1024,'-', 768, 0,0, 768,'-',87, 0,NVT_STATUS_EDID_EST), // 1024x768x87Hz (IBM, Interlaced) + + EST_TIMING(1024,24,136,1344,'-', 768, 3,6, 806,'-',60, 6500,NVT_STATUS_EDID_EST), // 1024x768x60Hz (VESA) + EST_TIMING(1024,24,136,1328,'-', 768, 3,6, 806,'-',70, 7500,NVT_STATUS_EDID_EST), // 1024x768x70Hz (VESA) + EST_TIMING(1024,16, 96,1312,'+', 768, 1,3, 800,'+',75, 7875,NVT_STATUS_EDID_EST), // 1024x768x75Hz (VESA) + EST_TIMING(1280,16,144,1688,'+',1024, 1,3,1066,'+',75,13500,NVT_STATUS_EDID_EST), // 1280x1024x75Hz (VESA) + + // the end + NVT_TIMING_SENTINEL +}; + +static NvU32 MAX_EST_FORMAT = sizeof(EDID_EST)/sizeof(EDID_EST[0]) - 1; + +static const NVT_TIMING EDID_ESTIII[] = +{ + EST_TIMING( 640, 32, 64, 832,'+', 350,32,3, 445,'-',85, 3150,NVT_STATUS_EDID_EST), // 640x350x85Hz + EST_TIMING( 640, 32, 64, 832,'-', 400, 1,3, 445,'+',85, 3150,NVT_STATUS_EDID_EST), // 640x400x85Hz + EST_TIMING( 720, 36, 72, 936,'-', 400, 1,3, 446,'+',85, 3550,NVT_STATUS_EDID_EST), // 720x400x85Hz + EST_TIMING( 640, 56, 56, 832,'-', 480, 1,3, 509,'-',85, 3600,NVT_STATUS_EDID_EST), // 640x480x85Hz + EST_TIMING( 848, 16,112,1088,'+', 480, 6,8, 517,'+',60, 3375,NVT_STATUS_EDID_EST), // 848x480x60HZ + EST_TIMING( 800, 32, 64,1048,'+', 600, 1,3, 631,'+',85, 5625,NVT_STATUS_EDID_EST), // 800x600x85Hz + EST_TIMING(1024, 48, 96,1376,'+', 768, 1,3, 808,'+',85, 9450,NVT_STATUS_EDID_EST), // 1024x768x85Hz + EST_TIMING(1152, 64,128,1600,'+', 864, 1,3, 900,'+',75,10800,NVT_STATUS_EDID_EST), // 1152x864x75Hz + + EST_TIMING(1280, 48, 32,1440,'+', 768, 3,7, 790,'-',60, 6825,NVT_STATUS_EDID_EST), // 1280x768x60Hz (RB) + EST_TIMING(1280, 64,128,1664,'-', 768, 3,7, 798,'+',60, 7950,NVT_STATUS_EDID_EST), // 1280x768x60Hz + EST_TIMING(1280, 80,128,1696,'-', 768, 3,7, 805,'+',75,10225,NVT_STATUS_EDID_EST), // 1280x768x75Hz + EST_TIMING(1280, 80,136,1712,'-', 768, 3,7, 809,'+',85,11750,NVT_STATUS_EDID_EST), // 1280x768x85Hz + EST_TIMING(1280, 96,112,1800,'+', 960, 1,3,1000,'+',60,10800,NVT_STATUS_EDID_EST), // 1280x960x60Hz + EST_TIMING(1280, 64,160,1728,'+', 960, 1,3,1011,'+',85,14850,NVT_STATUS_EDID_EST), // 1280x960x85Hz + EST_TIMING(1280, 48,112,1688,'+',1024, 1,3,1066,'+',60,10800,NVT_STATUS_EDID_EST), // 1280x1024x60Hz + EST_TIMING(1280, 64,160,1728,'+',1024, 1,3,1072,'+',85,15750,NVT_STATUS_EDID_EST), // 1280x1024x85Hz + + EST_TIMING(1360, 64,112,1792,'+', 768, 3,6, 795,'+',60, 8550,NVT_STATUS_EDID_EST), // 1360x768x60Hz + EST_TIMING(1440, 48, 32,1600,'+', 900, 3,6, 926,'-',60, 8875,NVT_STATUS_EDID_EST), // 1440x900x60Hz (RB) + EST_TIMING(1440, 80,152,1904,'-', 900, 3,6, 934,'+',60,10650,NVT_STATUS_EDID_EST), // 1440x900x60Hz + EST_TIMING(1440, 96,152,1936,'-', 900, 3,6, 942,'+',75,13675,NVT_STATUS_EDID_EST), // 1440x900x75Hz + EST_TIMING(1440,104,152,1952,'-', 900, 3,6, 948,'+',85,15700,NVT_STATUS_EDID_EST), // 1440x900x85Hz + EST_TIMING(1400, 48, 32,1560,'+',1050, 3,4,1080,'-',60,10100,NVT_STATUS_EDID_EST), // 1440x1050x60Hz (RB) + EST_TIMING(1400, 88,144,1864,'-',1050, 3,4,1089,'+',60,12175,NVT_STATUS_EDID_EST), // 1440x1050x60Hz + EST_TIMING(1400,104,144,1896,'-',1050, 3,4,1099,'+',75,15600,NVT_STATUS_EDID_EST), // 1440x1050x75Hz + + EST_TIMING(1400,104,152,1912,'-',1050, 3,4,1105,'+',85,17950,NVT_STATUS_EDID_EST), // 1440x1050x85Hz + EST_TIMING(1680, 48, 32,1840,'+',1050, 3,6,1080,'-',60,11900,NVT_STATUS_EDID_EST), // 1680x1050x60Hz (RB) + EST_TIMING(1680,104,176,2240,'-',1050, 3,6,1089,'+',60,14625,NVT_STATUS_EDID_EST), // 1680x1050x60Hz + EST_TIMING(1680,120,176,2272,'-',1050, 3,6,1099,'+',75,18700,NVT_STATUS_EDID_EST), // 1680x1050x75Hz + EST_TIMING(1680,128,176,2288,'-',1050, 3,6,1105,'+',85,21475,NVT_STATUS_EDID_EST), // 1680x1050x85Hz + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',60,16200,NVT_STATUS_EDID_EST), // 1600x1200x60Hz + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',65,17550,NVT_STATUS_EDID_EST), // 1600x1200x65Hz + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',70,18900,NVT_STATUS_EDID_EST), // 1600x1200x70Hz + + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',75,20250,NVT_STATUS_EDID_EST), // 1600x1200x75Hz + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',85,22950,NVT_STATUS_EDID_EST), // 1600x1200x85Hz + EST_TIMING(1792,128,200,2448,'-',1344, 1,3,1394,'+',60,20475,NVT_STATUS_EDID_EST), // 1792x1344x60Hz + EST_TIMING(1792, 96,216,2456,'-',1344, 1,3,1417,'+',75,26100,NVT_STATUS_EDID_EST), // 1792x1344x75Hz + EST_TIMING(1856, 96,224,2528,'-',1392, 1,3,1439,'+',60,21825,NVT_STATUS_EDID_EST), // 1856x1392x60Hz + EST_TIMING(1856,128,224,2560,'-',1392, 1,3,1500,'+',75,28800,NVT_STATUS_EDID_EST), // 1856x1392x75Hz + EST_TIMING(1920, 48, 32,2080,'+',1200, 3,6,1235,'-',60,15400,NVT_STATUS_EDID_EST), // 1920x1200x60Hz (RB) + EST_TIMING(1920,136,200,2592,'-',1200, 3,6,1245,'+',60,19325,NVT_STATUS_EDID_EST), // 1920x1200x60Hz + + EST_TIMING(1920,136,208,2608,'-',1200, 3,6,1255,'+',75,24525,NVT_STATUS_EDID_EST), // 1920x1200x75Hz + EST_TIMING(1920,144,208,2624,'-',1200, 3,6,1262,'+',85,28125,NVT_STATUS_EDID_EST), // 1920x1200x85Hz + EST_TIMING(1920,128,208,2600,'-',1440, 1,3,1500,'+',60,23400,NVT_STATUS_EDID_EST), // 1920x1440x60Hz + EST_TIMING(1920,144,224,2640,'-',1440, 1,3,1500,'+',75,29700,NVT_STATUS_EDID_EST), // 1920x1440x75Hz + + NVT_TIMING_SENTINEL, + NVT_TIMING_SENTINEL, + NVT_TIMING_SENTINEL, + NVT_TIMING_SENTINEL +}; + +static NvU32 MAX_ESTIII_FORMAT = sizeof(EDID_ESTIII)/sizeof(EDID_ESTIII[0]) - 1; + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumEST(NvU32 index, NVT_TIMING *pT) +{ + if ((pT == NULL) || (index > MAX_EST_FORMAT)) + { + return NVT_STATUS_ERR; + } + + *pT = EDID_EST[index]; + + if (pT->HTotal == 0 || pT->VTotal == 0) + { + return NVT_STATUS_ERR; + } + + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, + (NvU32)10000*(NvU32)1000, + (NvU32)pT->HTotal*(NvU32)pT->VTotal); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumESTIII(NvU32 index, NVT_TIMING *pT) +{ + if ((pT == NULL) || (index > MAX_ESTIII_FORMAT)) + { + return NVT_STATUS_ERR; + } + + *pT = EDID_ESTIII[index]; + + if (pT->HTotal == 0 || pT->VTotal == 0) + { + return NVT_STATUS_ERR; + } + + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, + (NvU32)10000*(NvU32)1000, + (NvU32)pT->HTotal*(NvU32)pT->VTotal); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 isHdmi3DStereoType(NvU8 StereoStructureType) +{ + return ((NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_FIELD_ALT == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_LINE_ALT == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEFULL == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTH == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTHGFX == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF == StereoStructureType)); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_GetVESADisplayDescriptorVersion(NvU8 *rawData, NvU32 *pVer) +{ + return getEdidVersion(rawData, pVer); +} + +// get the EDID version +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 getEdidVersion(NvU8 *pEdid, NvU32 *pVer) +{ + EDIDV1STRUC *p = (EDIDV1STRUC *) pEdid; + + if (pEdid[0] == 0x00) + { + // For Version 1.x, first 8 bytes of EDID must be 00h, FFh, FFh, FFh, FFh, FFh, FFh, 00h. + // Beware of Endian-ness and signed-ness. + if (p->bHeader[1] != 0xFF || p->bHeader[2] != 0xFF || p->bHeader[3] != 0xFF || + p->bHeader[4] != 0xFF || p->bHeader[5] != 0xFF || p->bHeader[6] != 0xFF || + p->bHeader[7] != 0x00) + return NVT_STATUS_ERR; + + *pVer = (((NvU32) p->bVersionNumber) << 8) + ((NvU32) p->bRevisionNumber); + } + else if ((pEdid[0] & 0xF0) == 0x20 && (pEdid[0] & 0x0F) >=0) + *pVer = (((NvU32) (pEdid[0] & 0XF0) << 4) + (NvU32) (pEdid[0] & 0X0F)) ; // DisplayID version 2.x + else + return NVT_STATUS_ERR; // un-recongnized EDID version + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidCvt3ByteDescriptor(NvU8 *p, NVT_EDID_INFO *pInfo, NvU32 *vtbCount) +{ + NvU32 k; + NvU32 width, height, aspect, rr = 0; + NVT_EDID_DD_CVT_3BYTE_BLOCK *pTiming = (NVT_EDID_DD_CVT_3BYTE_BLOCK *)p; + NVT_TIMING newTiming; + NVT_STATUS status; + + + if (pTiming->addressable_lines == 0) + return; + + height = pTiming->addressable_lines; + aspect = pTiming->aspect_ratio; + + if (aspect == NVT_EDID_CVT3_ASPECT_4X3) + width = height * 4 / 3; + else if (aspect == NVT_EDID_CVT3_ASPECT_16X9) + width = height * 16 / 9; + else if (aspect == NVT_EDID_CVT3_ASPECT_16X10) + width = height * 16 / 10; + else //15:9 + width = height * 15 / 9; + + width &= 0xFFFFFFF8; // round down to nearest 8 + + // loop through bits4:0 of supported_vert_rate so we can add a timing + // for each supported rate + for (k=1; k<=0x10; k<<=1) + { + // skip if this bit indicate no support for the rate; + if ( (pTiming->supported_vert_rates & (k)) == 0) + continue; + + // find the correct refresh rate for this bit + switch (k) + { + case NVT_EDID_CVT3_SUPPORTED_RATE_60HZ_REDUCED_BLANKING : + case NVT_EDID_CVT3_SUPPORTED_RATE_60HZ : + rr = 60; + break; + case NVT_EDID_CVT3_SUPPORTED_RATE_85HZ : + rr = 85; + break; + case NVT_EDID_CVT3_SUPPORTED_RATE_75HZ : + rr = 75; + break; + case NVT_EDID_CVT3_SUPPORTED_RATE_50HZ : + rr = 50; + break; + } + + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if ( (k) != NVT_EDID_CVT3_SUPPORTED_RATE_60HZ_REDUCED_BLANKING) // standard blanking + { + status = NvTiming_CalcCVT(width, height, rr, + NVT_PROGRESSIVE, + &newTiming); + } + else // reduced blanking + { + status = NvTiming_CalcCVT_RB(width, height, rr, + NVT_PROGRESSIVE, + &newTiming); + } + + if (status == NVT_STATUS_SUCCESS) + { + // For VTB timings, add additional information + if (vtbCount) + { + (*vtbCount)++; + newTiming.etc.status = NVT_STATUS_EDID_VTB_EXT_CVTn(*vtbCount); + newTiming.etc.name[39] = '\0'; + } + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + } // for (k=1; k<=0x10; k<<=1) + +} + +// parse the EDID 1.x based cvt timing info +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidCvtTiming(NVT_EDID_INFO *pInfo) +{ + NvU32 i, j; + + // find display range limit with cvt, or cvt 3-byte LDDs + for (i=0; ildd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_CVT ) + { + NVT_EDID_DD_CVT_3BYTE *pCVT = (NVT_EDID_DD_CVT_3BYTE *)&pInfo->ldd[i].u.cvt; + + // loop through cvt 3-byte blocks + for (j=0; jblock + j), + pInfo, NULL); + } // for(j=0; jestablished_timings_1_2) * 8 - 1), j = 0; i != 0; i >>= 1, j ++) + { + if ((pInfo->established_timings_1_2 & i) != 0 && EDID_EST[j].pclk != 0) + { + // count the timing + newTiming = EDID_EST[j]; + newTiming.etc.status = NVT_STATUS_EDID_ESTn(++count); + NVT_SNPRINTF((char *)newTiming.etc.name, 40, + "EDID-EST(VESA):%dx%dx%dHz", + (int)newTiming.HVisible, + (int)newTiming.VVisible, + (int)newTiming.etc.rr); + newTiming.etc.name[39] = '\0'; + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + } + + // ESTIII block in ldd only supported in EDID1.4 and above + if (pInfo->version < NVT_EDID_VER_1_4) + return; + + for (i=0; ildd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_ESTIII ) + { + NVT_EDID_DD_EST_TIMING3* pEST = &pInfo->ldd[i].u.est3; + + for (j=0; jdata[j] & (1<> 8) & 0x3F) + 60; // bits 5->0 + + // get the height + aspect = ((timing >> 8) & 0xC0); // aspect ratio at bit 7:6 + if (aspect == 0x00) + height = (pInfo->version < 0x103) ? width : (width * 5 / 8); //16:10 per EDID1.3 and 1:1 with earlier EDID + else if (aspect == 0x40) + height = width * 3 / 4; //4:3 + else if (aspect == 0x80) + height = width * 4 / 5; //5:4 + else + height = width * 9 / 16; //16:9 + + // try to get the timing from DMT first + if (NvTiming_CalcDMT(width, height, rr, 0, pT) == NVT_STATUS_SUCCESS) + { + pT->etc.status = NVT_STATUS_EDID_STDn(count); + NVT_SNPRINTF((char *)pT->etc.name, 40, "EDID-STD(DMT):%dx%dx%dHz", (int)width, (int)height, (int)rr); + pT->etc.name[39] = '\0'; + } + else if (pInfo->version >= NVT_EDID_VER_1_4) + { + // EDID1.4 and above defaults to CVT, instead of GTF. GTF is deprecated as of 1.4. + NvTiming_CalcCVT(width, height, rr, NVT_PROGRESSIVE, pT); + pT->etc.status = NVT_STATUS_EDID_STDn(count); + NVT_SNPRINTF((char *)pT->etc.name, 40, "EDID-STD(CVT):%dx%dx%dHz", (int)width, (int)height, (int)rr); + pT->etc.name[39] = '\0'; + } + else + { + // if the mode is not found in DMT, use GTF timing + NvTiming_CalcGTF(width, height, rr, NVT_PROGRESSIVE, pT); + pT->etc.status = NVT_STATUS_EDID_STDn(count); + NVT_SNPRINTF((char *)pT->etc.name, 40, "EDID-STD(GTF):%dx%dx%dHz", (int)width, (int)height, (int)rr); + pT->etc.name[39] = '\0'; + } +} + +// parse the EDID 1.x based standard timing info +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidStandardTiming(NVT_EDID_INFO *pInfo) +{ + NvU32 i, j; + NVT_TIMING newTiming; + NvU32 count = 0; + + // now check for standard timings + for (i=0; istandard_timings[i] & 0x0FF) != 0x1) && //proper indication of unused field + (pInfo->standard_timings[i] != 0x0)) //improper indication (bad edid) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + parseEdidStandardTimingDescriptor(pInfo->standard_timings[i], + pInfo, count, &newTiming); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + count++; + }//if ((pInfo->standard_timings[i] & 0x0FF) != 0x1) + } //for (i=0; iversion < NVT_EDID_VER_1_4) + return; + + // now check for standard timings in long display descriptors + for (i=0; ildd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_STI ) + { + NVT_EDID_DD_STD_TIMING* pSTI = &pInfo->ldd[i].u.std_timing; + for (j=0; jdescriptor[j] & 0x0FF) != 0x00) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + parseEdidStandardTimingDescriptor(pSTI->descriptor[j], + pInfo, count, &newTiming); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + count++; + } // if ((pSTI->std_timing[i] & 0x0FF) != 0x1) + } //for (j=0; jDetailedTimingDesc[i]).tag = NVT_EDID_DISPLAY_DESCRIPTOR_STI ) + } //for (i=0; iwDTPixelClock !=0 || pDTD->bDTHorizontalActive !=0) + && (pDTD->wDTPixelClock != 0x0101 || pDTD->bDTHorizontalActive != 1 || + pDTD->bDTHorizontalBlanking != 1 || pDTD->bDTHorizActiveBlank != 1)) + { + // Note that hvisible and vvisible here correspond to the "Addressable Video" portion of the + // "Active Video" defined in the EDID spec (see section 3.12: Note Regarding Borders) + hvisible = (pDTD->bDTHorizontalActive + ((pDTD->bDTHorizActiveBlank & 0xF0) << 4)) - 2 * pDTD->bDTHorizontalBorder; + vvisible = (pDTD->bDTVerticalActive + ((pDTD->bDTVertActiveBlank & 0xF0) << 4)) - 2 * pDTD->bDTVerticalBorder; + + // Sanity check since we are getting values from the monitor + if (hvisible <= 0 || vvisible <= 0 || pDTD->wDTPixelClock == 0) + { + if (pT) + pT->HVisible = 0; + return NVT_STATUS_ERR; + } + + // if the output timing buffer is not provide, simply return here to indicate a legal descriptor + if (pT == NULL) + return NVT_STATUS_SUCCESS; + + // horizontal timing parameters + pT->HVisible = (NvU16)hvisible; + pT->HBorder = (NvU16)pDTD->bDTHorizontalBorder; + pT->HTotal = (NvU16)hvisible + (NvU16)(pDTD->bDTHorizontalBlanking + ((pDTD->bDTHorizActiveBlank & 0x0F) << 8)) + pT->HBorder * 2; + pT->HFrontPorch = (NvU16)(pDTD->bDTHorizontalSync + ((pDTD->bDTHorizVertSyncOverFlow & 0xC0) << 2)); + pT->HSyncWidth = (NvU16)(pDTD->bDTHorizontalSyncWidth + ((pDTD->bDTHorizVertSyncOverFlow & 0x30) << 4)); + + // vertical timing parameters + pT->VVisible = (NvU16)vvisible; + pT->VBorder = (NvU16)pDTD->bDTVerticalBorder; + pT->VTotal = (NvU16)vvisible + (NvU16)(pDTD->bDTVerticalBlanking + ((pDTD->bDTVertActiveBlank & 0x0F) << 8)) + pT->VBorder * 2; + pT->VFrontPorch = (NvU16)(((pDTD->bDTVerticalSync & 0xF0) >> 4) + ((pDTD->bDTHorizVertSyncOverFlow & 0x0C) << 2)); + pT->VSyncWidth = (NvU16)((pDTD->bDTVerticalSync & 0x0F) + ((pDTD->bDTHorizVertSyncOverFlow & 0x03) << 4)); + + // pixel clock + pT->pclk = (NvU32)pDTD->wDTPixelClock; + + // sync polarities + if ((pDTD->bDTFlags & 0x18) == 0x18) + { + pT->HSyncPol = ((pDTD->bDTFlags & 0x2) != 0) ? NVT_H_SYNC_POSITIVE : NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = ((pDTD->bDTFlags & 0x4) != 0) ? NVT_V_SYNC_POSITIVE : NVT_V_SYNC_NEGATIVE; + } + else if ((pDTD->bDTFlags & 0x18) == 0x10) + { + pT->HSyncPol = ((pDTD->bDTFlags & 0x2) != 0) ? NVT_H_SYNC_POSITIVE : NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = NVT_V_SYNC_POSITIVE; + } + else + { + pT->HSyncPol = NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = NVT_V_SYNC_POSITIVE; + } + + // interlaced + if ((pDTD->bDTFlags & 0x80) == 0x80) + pT->interlaced = 1; + else + pT->interlaced = 0; + + // Eizo split EDID case, using 0th bit to indicate split display capability + if (((pDTD->bDTFlags & 1) == 1) && !(((pDTD->bDTFlags & 0x20) == 0x20) || ((pDTD->bDTFlags & 0x40) == 0x40))) + { + pT->etc.flag |= NVT_FLAG_EDID_DTD_EIZO_SPLIT; + } + if (pT->interlaced) + { + // Adjust for one extra blank line in every other frame. + dwTotalPixels = (((NvU32)pT->HTotal * pT->VTotal) + + ((NvU32)pT->HTotal * (pT->VTotal + 1))) / 2; + } + else + { + dwTotalPixels = (NvU32)pT->HTotal * pT->VTotal; + } + + pT->etc.rr = (NvU16)(((NvU32)pDTD->wDTPixelClock*10000+dwTotalPixels/2)/dwTotalPixels); + // Using utility call to multiply and divide to take care of overflow and truncation of large values + // How did we arrive at 10000000? It comes from the fact that Pixel clock mentioned in EDID is in mulitples of 10KHz = 10000 + // and the refresh rate is mentioned in 0.001Hz, that is 60Hz will be represented as 60000, which brings in the factor of 1000. + // And hence 10000 * 1000 = 10000000 + pT->etc.rrx1k = axb_div_c(pDTD->wDTPixelClock, 10000000, dwTotalPixels); + pT->etc.status = NVT_STATUS_EDID_DTD; + NVT_SNPRINTF((char *)pT->etc.name, sizeof(pT->etc.name), "EDID-Detailed:%dx%dx%d.%03dHz%s", (int)pT->HVisible, (int)(pT->interlaced ? 2 : 1)*pT->VVisible , (int)pT->etc.rrx1k/1000, (int)pT->etc.rrx1k%1000, (pT->interlaced ? "/i" : "")); + pT->etc.name[sizeof(pT->etc.name) - 1] = '\0'; + + // aspect ratio + pT->etc.aspect = (pDTD->bDTHorizVertImage & 0xF0) << 20 | pDTD->bDTHorizontalImage << 16 | + (pDTD->bDTHorizVertImage & 0x0F) << 8 | pDTD->bDTVerticalImage; + + pT->etc.rep = 0x1; // Bit mask for no pixel repetition. + + return NVT_STATUS_SUCCESS; + } + + return NVT_STATUS_ERR; +} + +// parse the EDID 1.x based standard timing info +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidDetailedTiming(NvU8 *pEdid, NVT_EDID_INFO *pInfo) +{ + EDIDV1STRUC *p = (EDIDV1STRUC *) pEdid; + NVT_TIMING newTiming; + NvU32 i; + NvBool found = NV_FALSE; + + for (i = 0; i < 4; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseEdidDetailedTimingDescriptor((NvU8 *)&p->DetailedTimingDesc[i], + &newTiming) == NVT_STATUS_SUCCESS) + { + newTiming.etc.status = NVT_STATUS_EDID_DTDn(i+1); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + found = NV_TRUE; + } + } + + if (found) + { + // if edid_ver 1.3, PTM flag should be set + //nvt_assert(pInfo->version > 0x103 || (pInfo->u.feature & + // NVT_EDID_OTHER_FEATURES_PTM_INCLUDE_NATIVE)); + + if (pInfo->u.feature & NVT_EDID_OTHER_FEATURES_PTM_INCLUDE_NATIVE) + { + pInfo->timing[0].etc.flag |= NVT_FLAG_DTD1_PREFERRED_TIMING; + } + } +} + + +// parse the EDID 1.x 18-byte long display descriptor +CODE_SEGMENT(PAGE_DD_CODE) +static void parseEdidLongDisplayDescriptor(EDID_LONG_DISPLAY_DESCRIPTOR *descriptor, NVT_EDID_18BYTE_DESCRIPTOR *p, NvU32 version) +{ + NvU32 i; + + // bypass the input pointer check in this private function + + // return if it's a detailed timing descriptor + if (descriptor->prefix[0] != 0 || descriptor->prefix[1] != 0) + return; + + // other sanity check for the input data + if (descriptor->rsvd != 0) + return; + + p->tag = descriptor->tag; + + // now translate the descriptor + switch (descriptor->tag) + { + case NVT_EDID_DISPLAY_DESCRIPTOR_DPSN: // display product serial number + case NVT_EDID_DISPLAY_DESCRITPOR_DPN: // display product name + case NVT_EDID_DISPLAY_DESCRIPTOR_ADS: // alphanumeric data string (ASCII) + + // copy the 13 characters payload from the 18-byte descriptor + for (i = 0; i < NVT_PVT_EDID_LDD_PAYLOAD_SIZE; i++) + { + if (descriptor->data[i] == 0x0A) + p->u.serial_number.str[i] = '\0'; + else + p->u.serial_number.str[i] = descriptor->data[i]; + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_DRL: // display range limit + { + EDID_MONITOR_RANGE_LIMIT *pRangeLimit = (EDID_MONITOR_RANGE_LIMIT *)&descriptor->data[0]; + + p->u.range_limit.min_v_rate = pRangeLimit->minVRate; + p->u.range_limit.max_v_rate = pRangeLimit->maxVRate; + p->u.range_limit.min_h_rate = pRangeLimit->minHRate; + p->u.range_limit.max_h_rate = pRangeLimit->maxHRate; + p->u.range_limit.max_pclk_MHz = pRangeLimit->maxPClock10M * 10; + p->u.range_limit.timing_support = pRangeLimit->timing_support; + + // add 255Hz offsets if needed, use descriptor->rsvd2 + // to offset the min values their max MUST be offset as well + if (version >= NVT_EDID_VER_1_4) + { + if (descriptor->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MAX) + { + p->u.range_limit.max_v_rate += NVT_PVT_EDID_RANGE_OFFSET_AMOUNT; + if (descriptor->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MIN) + { + p->u.range_limit.min_v_rate += NVT_PVT_EDID_RANGE_OFFSET_AMOUNT; + } + } + if (descriptor->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MAX) + { + p->u.range_limit.max_h_rate += NVT_PVT_EDID_RANGE_OFFSET_AMOUNT; + if (descriptor->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MIN) + { + p->u.range_limit.min_h_rate += NVT_PVT_EDID_RANGE_OFFSET_AMOUNT; + } + } + } + + if (p->u.range_limit.timing_support == NVT_EDID_RANGE_SUPPORT_GTF2) + { + // descriptor->data[7] + // Start frequency for secondary curve, hor freq./2[kHz] + p->u.range_limit.u.gtf2.C = pRangeLimit->u.gtf2.C / 2; // 0 <= C <= 127 + p->u.range_limit.u.gtf2.K = pRangeLimit->u.gtf2.K; // 0 <= K <= 255 + p->u.range_limit.u.gtf2.J = pRangeLimit->u.gtf2.J / 2; // 0 <= J <= 127 + p->u.range_limit.u.gtf2.M = (pRangeLimit->u.gtf2.M_MSB << 8) + + pRangeLimit->u.gtf2.M_LSB; // 0 <= M <= 65535 + } + else if (p->u.range_limit.timing_support == NVT_EDID_RANGE_SUPPORT_CVT) + { + // the pixel clock adjustment is in cvt.pixel_clock @ bits7:2 + // that number is in 0.25MHz, ie actual max clock is max_pclk_MHz - (0.25 x cvt_pixel_clock) + // subtract the whole number part from max_pclk_MHz, save the remainder + p->u.range_limit.max_pclk_MHz -= (pRangeLimit->u.cvt.pixel_clock & NVT_PVT_EDID_CVT_PIXEL_CLOCK_MASK) >> NVT_PVT_EDID_CVT_PIXEL_CLOCK_SHIFT >> 2; // ie divide by 4 to get whole number + p->u.range_limit.u.cvt.pixel_clock_adjustment = ((pRangeLimit->u.cvt.pixel_clock & NVT_PVT_EDID_CVT_PIXEL_CLOCK_MASK) >> NVT_PVT_EDID_CVT_PIXEL_CLOCK_SHIFT) & 0x03; // ie modulus 4 + + p->u.range_limit.u.cvt.max_active_pixels_per_line = (pRangeLimit->u.cvt.pixel_clock & NVT_PVT_EDID_CVT_ACTIVE_MSB_MASK) << NVT_PVT_EDID_CVT_ACTIVE_MSB_SHIFT; + p->u.range_limit.u.cvt.max_active_pixels_per_line |= pRangeLimit->u.cvt.max_active; + p->u.range_limit.u.cvt.max_active_pixels_per_line <<= 3; // ie multiply 8 + + p->u.range_limit.u.cvt.aspect_supported = (pRangeLimit->u.cvt.aspect_supported & NVT_PVT_EDID_CVT_ASPECT_SUPPORTED_MASK) >> NVT_PVT_EDID_CVT_ASPECT_SUPPORTED_SHIFT; + + p->u.range_limit.u.cvt.aspect_preferred = ( pRangeLimit->u.cvt.aspect_preferred_blanking & NVT_PVT_EDID_CVT_ASPECT_PREFERRED_MASK) >> NVT_PVT_EDID_CVT_ASPECT_PREFERRED_SHIFT; + p->u.range_limit.u.cvt.blanking_support = ( pRangeLimit->u.cvt.aspect_preferred_blanking & NVT_PVT_EDID_CVT_BLANKING_MASK) >> NVT_PVT_EDID_CVT_BLANKING_SHIFT; + + p->u.range_limit.u.cvt.scaling_support = (pRangeLimit->u.cvt.scaling_support & NVT_PVT_EDID_CVT_SCALING_MASK) >> NVT_PVT_EDID_CVT_SCALING_SHIFT; + + p->u.range_limit.u.cvt.preferred_refresh_rate = pRangeLimit->u.cvt.preferred_refresh_rate; + } + } + + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_CPD: // color point data + { + EDID_COLOR_POINT_DATA *pColorPoint = (EDID_COLOR_POINT_DATA *)&descriptor->data[0]; + + p->u.color_point.wp1_index = pColorPoint->wp1_index; + p->u.color_point.wp1_x = pColorPoint->wp1_x << 2; + p->u.color_point.wp1_x |= (pColorPoint->wp1_x_y & NVT_PVT_EDID_CPD_WP_X_MASK) >> NVT_PVT_EDID_CPD_WP_X_SHIFT; + p->u.color_point.wp1_y = pColorPoint->wp1_y << 2; + p->u.color_point.wp1_y |= (pColorPoint->wp1_x_y & NVT_PVT_EDID_CPD_WP_Y_MASK) >> NVT_PVT_EDID_CPD_WP_Y_SHIFT; + p->u.color_point.wp1_gamma = pColorPoint->wp1_gamma + 100; + + p->u.color_point.wp2_index = pColorPoint->wp2_index; + p->u.color_point.wp2_x = pColorPoint->wp2_x << 2; + p->u.color_point.wp2_x |= (pColorPoint->wp2_x_y & NVT_PVT_EDID_CPD_WP_X_MASK) >> NVT_PVT_EDID_CPD_WP_X_SHIFT; + p->u.color_point.wp2_y = pColorPoint->wp2_y << 2; + p->u.color_point.wp2_y |= (pColorPoint->wp2_x_y & NVT_PVT_EDID_CPD_WP_Y_MASK) >> NVT_PVT_EDID_CPD_WP_Y_SHIFT; + p->u.color_point.wp2_gamma = pColorPoint->wp2_gamma + 100; + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_STI: // standard timing identification + { + EDID_STANDARD_TIMING_ID *pStdTiming = (EDID_STANDARD_TIMING_ID *)&descriptor->data[0]; + + for(i=0; iu.std_timing.descriptor[i] = pStdTiming->std_timing[i]; + } + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_DCM: // display color management + { + EDID_COLOR_MANAGEMENT_DATA *pColorMan = (EDID_COLOR_MANAGEMENT_DATA *)&descriptor->data[0]; + + p->u.color_man.red_a3 = pColorMan->red_a3_lsb | (pColorMan->red_a3_msb << 8); + p->u.color_man.red_a2 = pColorMan->red_a2_lsb | (pColorMan->red_a2_msb << 8); + + p->u.color_man.green_a3 = pColorMan->green_a3_lsb | (pColorMan->green_a3_msb << 8); + p->u.color_man.green_a2 = pColorMan->green_a2_lsb | (pColorMan->green_a2_msb << 8); + + p->u.color_man.blue_a3 = pColorMan->blue_a3_lsb | (pColorMan->blue_a3_msb << 8); + p->u.color_man.blue_a2 = pColorMan->blue_a2_lsb | (pColorMan->blue_a2_msb << 8); + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_CVT: // CVT 3-byte timing code + { + EDID_CVT_3BYTE *pCVT_3byte = (EDID_CVT_3BYTE *)&descriptor->data[0]; + + for (i=0; iblock[i].addressable_lines != 0) + { + p->u.cvt.block[i].addressable_lines = pCVT_3byte->block[i].addressable_lines; + p->u.cvt.block[i].addressable_lines |= (pCVT_3byte->block[i].lines_ratio & NVT_PVT_EDID_CVT3_LINES_MSB_MASK) << NVT_PVT_EDID_CVT3_LINES_MSB_SHIFT; + p->u.cvt.block[i].addressable_lines +=1; + p->u.cvt.block[i].addressable_lines <<= 1; + + p->u.cvt.block[i].aspect_ratio = (pCVT_3byte->block[i].lines_ratio & NVT_PVT_EDID_CVT3_ASPECT_MASK) >> NVT_PVT_EDID_CVT3_ASPECT_SHIFT; + + p->u.cvt.block[i].preferred_vert_rates = (pCVT_3byte->block[i].refresh_rates & NVT_PVT_EDID_CVT3_PREFERRED_RATE_MASK) >> NVT_PVT_EDID_CVT3_PREFERRED_RATE_SHIFT; + p->u.cvt.block[i].supported_vert_rates = (pCVT_3byte->block[i].refresh_rates & NVT_PVT_EDID_CVT3_SUPPORTED_RATE_MASK) >> NVT_PVT_EDID_CVT3_SUPPORTED_RATE_SHIFT; + } + } + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_ESTIII: // establishied timing III + { + EDID_EST_TIMINGS_III *pEstTiming = (EDID_EST_TIMINGS_III *)&descriptor->data[0]; + + for(i=0; iu.est3.data[i] = pEstTiming->timing_byte[i]; + } + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_DUMMY: // dummy descriptor + default: + // unresolved descriptor yet + for (i = 0; i < NVT_PVT_EDID_LDD_PAYLOAD_SIZE; i++) + { + p->u.dummy.data[i] = descriptor->data[i]; + } + break; + } + +} + +// get generic EDID info +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NV_STDCALL NvTiming_ParseEDIDInfo(NvU8 *pEdid, NvU32 length, NVT_EDID_INFO *pInfo) +{ + NvU32 i, j, k, data; + EDIDV1STRUC *p; + NvU8 *pExt; + NVT_EDID_CEA861_INFO *p861Info; + + // parameter check + if (pEdid == NULL || length < 128 || pInfo == NULL) + { + return NVT_STATUS_ERR; + } + + NVMISC_MEMSET(pInfo, 0, sizeof(NVT_EDID_INFO)); + + // get the EDID version + if (getEdidVersion(pEdid, &pInfo->version) == NVT_STATUS_ERR) + { + return NVT_STATUS_ERR; + } + + p = (EDIDV1STRUC *) pEdid; + + // get the IDs + pInfo->manuf_id = p->wIDManufName; + pInfo->product_id = p->wIDProductCode; + + // translate the ID into manufacturer's name + pInfo->manuf_name[0] = 'A' + (NvU8)((pInfo->manuf_id & 0x007c) >> 2) - 1; + pInfo->manuf_name[1] = 'A' + (NvU8)((pInfo->manuf_id & 0x0003) << 3 | (pInfo->manuf_id & 0xe000) >> 13) - 1; + pInfo->manuf_name[2] = 'A' + (NvU8)((pInfo->manuf_id & 0x1f00) >> 8) - 1; + pInfo->manuf_name[3] = '\0'; + + // get serial number + pInfo->serial_number = p->dwIDSerialNumber; + + // get the week and year + pInfo->week = p->bWeekManuf; + pInfo->year = p->bYearManuf + 1990; + + // get the interface info + pInfo->input.isDigital = (p->bVideoInputDef & NVT_PVT_EDID_INPUT_ISDIGITAL_MASK) >> NVT_PVT_EDID_INPUT_ISDIGITAL_SHIFT; + + if (pInfo->input.isDigital && pInfo->version > 0x103) // must be at least EDID1.4 to support the following fields + { + switch ( (p->bVideoInputDef & NVT_PVT_EDID_INPUT_BPC_MASK) >> NVT_PVT_EDID_INPUT_BPC_SHIFT) + { + case NVT_PVT_EDID_INPUT_BPC_6 : + pInfo->input.u.digital.bpc = 6; + break; + case NVT_PVT_EDID_INPUT_BPC_8 : + pInfo->input.u.digital.bpc = 8; + break; + case NVT_PVT_EDID_INPUT_BPC_10 : + pInfo->input.u.digital.bpc = 10; + break; + case NVT_PVT_EDID_INPUT_BPC_12 : + pInfo->input.u.digital.bpc = 12; + break; + case NVT_PVT_EDID_INPUT_BPC_14 : + pInfo->input.u.digital.bpc = 14; + break; + case NVT_PVT_EDID_INPUT_BPC_16 : + pInfo->input.u.digital.bpc = 16; + break; + default : + pInfo->input.u.digital.bpc = 0; + break; + } + pInfo->input.u.digital.video_interface = (p->bVideoInputDef & NVT_PVT_EDID_INPUT_INTERFACE_MASK) >> NVT_PVT_EDID_INPUT_INTERFACE_SHIFT; + } + else if (!pInfo->input.isDigital) + { + pInfo->input.u.analog_data = (p->bVideoInputDef & NVT_PVT_EDID_INPUT_ANALOG_ETC_MASK) >> NVT_PVT_EDID_INPUT_ANALOG_ETC_SHIFT; + } + + // get the max image size and aspect ratio + if (p->bMaxHorizImageSize != 0 && p->bMaxVertImageSize != 0) + { + pInfo->screen_size_x = p->bMaxHorizImageSize; + pInfo->screen_size_y = p->bMaxVertImageSize; + pInfo->screen_aspect_x = 0; + pInfo->screen_aspect_y = 0; + } + else if (p->bMaxHorizImageSize != 0 && p->bMaxVertImageSize == 0) + { + pInfo->screen_size_x = 0; + pInfo->screen_size_y = 0; + pInfo->screen_aspect_x = 99 + p->bMaxHorizImageSize; + pInfo->screen_aspect_y = 100; + } + else if (p->bMaxHorizImageSize == 0 && p->bMaxVertImageSize != 0) + { + pInfo->screen_size_x = 0; + pInfo->screen_size_y = 0; + pInfo->screen_aspect_x = 100; + pInfo->screen_aspect_y = 99 + p->bMaxVertImageSize; + } + + // get the gamma + pInfo->gamma = p->bDisplayXferChar + 100; + + // get the features + pInfo->u.feature = p->bFeatureSupport; + + // get chromaticity coordinates + pInfo->cc_red_x = p->Chromaticity[2] << 2; + pInfo->cc_red_x |= (p->Chromaticity[0] & NVT_PVT_EDID_CC_RED_X1_X0_MASK) >> NVT_PVT_EDID_CC_RED_X1_X0_SHIFT; + pInfo->cc_red_y = p->Chromaticity[3] << 2; + pInfo->cc_red_y |= (p->Chromaticity[0] & NVT_PVT_EDID_CC_RED_Y1_Y0_MASK) >> NVT_PVT_EDID_CC_RED_Y1_Y0_SHIFT; + + pInfo->cc_green_x = p->Chromaticity[4] << 2; + pInfo->cc_green_x |= (p->Chromaticity[0] & NVT_PVT_EDID_CC_GREEN_X1_X0_MASK) >> NVT_PVT_EDID_CC_GREEN_X1_X0_SHIFT; + pInfo->cc_green_y = p->Chromaticity[5] << 2; + pInfo->cc_green_y |= (p->Chromaticity[0] & NVT_PVT_EDID_CC_GREEN_Y1_Y0_MASK) >> NVT_PVT_EDID_CC_GREEN_Y1_Y0_SHIFT; + + pInfo->cc_blue_x = p->Chromaticity[6] << 2; + pInfo->cc_blue_x |= (p->Chromaticity[1] & NVT_PVT_EDID_CC_BLUE_X1_X0_MASK) >> NVT_PVT_EDID_CC_BLUE_X1_X0_SHIFT; + pInfo->cc_blue_y = p->Chromaticity[7] << 2; + pInfo->cc_blue_y |= (p->Chromaticity[1] & NVT_PVT_EDID_CC_BLUE_Y1_Y0_MASK) >> NVT_PVT_EDID_CC_BLUE_Y1_Y0_SHIFT; + + pInfo->cc_white_x = p->Chromaticity[8] << 2; + pInfo->cc_white_x |= (p->Chromaticity[1] & NVT_PVT_EDID_CC_WHITE_X1_X0_MASK) >> NVT_PVT_EDID_CC_WHITE_X1_X0_SHIFT; + pInfo->cc_white_y = p->Chromaticity[9] << 2; + pInfo->cc_white_y |= (p->Chromaticity[1] & NVT_PVT_EDID_CC_WHITE_Y1_Y0_MASK) >> NVT_PVT_EDID_CC_WHITE_Y1_Y0_SHIFT; + + // copy established timings + pInfo->established_timings_1_2 = (NvU16)p->bEstablishedTimings1 << 8; + pInfo->established_timings_1_2 |= (NvU16)p->bEstablishedTimings2; + + // copy manuf reserved timings + pInfo->manufReservedTimings = p->bManufReservedTimings; + + // copy standard timings + for (i = 0; i < NVT_EDID_MAX_STANDARD_TIMINGS; i++) + { + pInfo->standard_timings[i] = p->wStandardTimingID[i]; + } + + // get the number of extensions + pInfo->total_extensions = p->bExtensionFlag; + + // check_sum + for (i = 0, data = 0; i < length; i++) + { + data += pEdid[i]; + } + pInfo->checksum_ok = !(data & 0xFF); + pInfo->checksum = p->bChecksum; + + + // now find out the total number of all of the timings in the EDID + pInfo->total_timings = 0; + + // now find out the detailed timings + parseEdidDetailedTiming(pEdid, pInfo); + + // now parse all 18-byte long display descriptors (not detailed timing) + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + parseEdidLongDisplayDescriptor((EDID_LONG_DISPLAY_DESCRIPTOR *)&p->DetailedTimingDesc[i], &pInfo->ldd[i], pInfo->version); + } + + // now check the number of timings in the extension + for (k = 0, j = 1; j <= pInfo->total_extensions && (j + 1) * sizeof(EDIDV1STRUC) <= length; j++) + { + pExt = pEdid + sizeof(EDIDV1STRUC) * j; + + // check for 861 extension first + switch (*pExt) + { + case NVT_EDID_EXTENSION_CTA: + p861Info = (k == 0) ? &pInfo->ext861 : &pInfo->ext861_2; + + get861ExtInfo(pExt, sizeof(EDIDV1STRUC), p861Info); + + // HF EEODB is present in edid v1.3 and v1.4 does not need this.Also, it is always present in the 1st CTA extension block. + if (j == 1 && pInfo->version == NVT_EDID_VER_1_3) + { + parseCta861HfEeodb(p861Info, &pInfo->total_extensions); + } + + // update pInfo with basic hdmi info + // assumes each edid will only have one such block across multiple cta861 blocks (otherwise may create declaration conflict) + // In case of multiple such blocks, the last one takes precedence, except for SCDB + + // parseCta861VsdbBlocks() uses hfScdb info so need to be parsed first + parseCta861HfScdb(p861Info, pInfo, FROM_CTA861_EXTENSION); + parseCta861VsdbBlocks(p861Info, pInfo, FROM_CTA861_EXTENSION); + + // parse HDR related information from the HDR static metadata data block + parseCea861HdrStaticMetadataDataBlock(p861Info, pInfo, FROM_CTA861_EXTENSION); + + // parse Dolby Vision related information from the DV vendor specific video data block + parseCea861DvStaticMetadataDataBlock(p861Info, &pInfo->dv_static_metadata_info); + + // Timings are listed (or shall) be listed in priority order + // So read SVD, yuv420 SVDs first before reading detailed timings + + // add the 861B short video timing descriptor + if (p861Info->revision >= NVT_CEA861_REV_B) + { + // base video + parse861bShortTiming(p861Info, pInfo, FROM_CTA861_EXTENSION); + + // yuv420-only video + parse861bShortYuv420Timing(p861Info, pInfo, FROM_CTA861_EXTENSION); + } + + // add the detailed timings in 18-byte long display descriptor + parse861ExtDetailedTiming(pExt, p861Info->basic_caps, pInfo); + + // CEA861-F at 7.5.12 section about VFPDB block. + if (p861Info->revision >= NVT_CEA861_REV_F && p861Info->total_vfpdb != 0) + { + parse861bShortPreferredTiming(p861Info, pInfo, FROM_CTA861_EXTENSION); + } + + k++; + break; + + case NVT_EDID_EXTENSION_VTB: + parseVTBExtension(pExt, pInfo); + break; + + case NVT_EDID_EXTENSION_DISPLAYID: + if ((pExt[1] & 0xF0) == 0x20) // displayID2.x as EDID extension + { + if(getDisplayId20EDIDExtInfo(pExt, sizeof(EDIDV1STRUC), + pInfo) == NVT_STATUS_SUCCESS) + { + if (pInfo->ext861.total_y420vdb != 0 || pInfo->ext861.total_y420cmdb != 0) + { + pInfo->ext_displayid20.interface_features.yuv420_min_pclk = 0; + } + + if (!pInfo->ext861.basic_caps) + { + pInfo->ext861.basic_caps = pInfo->ext_displayid20.basic_caps; + } + } + } + else // displayID13 as EDID extension + { + //do not fail function based on return value of getDisplayIdEDIDExtInfo refer bug 3247180 where some rogue monitors don't provide correct DID13 raw data. + if (getDisplayIdEDIDExtInfo(pExt, sizeof(EDIDV1STRUC), + pInfo) == NVT_STATUS_SUCCESS) + { + // Check if YCbCr is supported in base block + // since it is mandatory if YCbCr is supported on any other display interface as per 5.1.1.1 Video Colorimetry + if(pInfo->u.feature_ver_1_4_digital.support_ycrcb_444) + { + if (!pInfo->ext_displayid.supported_displayId2_0) + { + pInfo->ext_displayid.u4.display_interface.ycbcr444_depth.support_8b = 1; + } + else + { + pInfo->ext_displayid.u4.display_interface_features.ycbcr444_depth.support_8b = 1; + } + } + + if(pInfo->u.feature_ver_1_4_digital.support_ycrcb_422) + { + if (!pInfo->ext_displayid.supported_displayId2_0) + { + pInfo->ext_displayid.u4.display_interface.ycbcr422_depth.support_8b = 1; + } + else + { + pInfo->ext_displayid.u4.display_interface_features.ycbcr422_depth.support_8b = 1; + } + } + } + } + break; + + default: + break; + } + } + + // Copy all the timings(could include type 7/8/9/10) from displayid20->timings[] to pEdidInfo->timings[] + for (i = 0; i < pInfo->ext_displayid20.total_timings; i++) + { + if (!assignNextAvailableTiming(pInfo, &(pInfo->ext_displayid20.timing[i]))) + { + return NVT_STATUS_ERR; + } + } + + // check for cvt timings - in display range limits or cvt 3-byte LDD, only for EDID1.4 and above + if (pInfo->version > 0x0103) + { + parseEdidCvtTiming(pInfo); + } + + // now check for standard timings - base EDID and then the LDDs + parseEdidStandardTiming(pInfo); + + // find out the total established timings - base EDID and then the LDDs + parseEdidEstablishedTiming(pInfo); + + getEdidHDM1_4bVsdbTiming(pInfo); + + // Assert if no timings were found (due to a bad EDID) or if we mistakenly + // assigned more timings than we allocated space for (due to bad logic above) + nvt_assert(pInfo->total_timings && + (pInfo->total_timings <= COUNT(pInfo->timing))); + + // go through all timings and update supported color formats + // consider the supported bpc per color format from parsed EDID / CTA861 / DisplayId + updateColorFormatAndBpcTiming(pInfo); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void updateColorFormatAndBpcTiming(NVT_EDID_INFO *pInfo) +{ + NvU32 i, j, data; + + for (i = 0; i < pInfo->total_timings; i++) + { + data = NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status); + switch (data) + { + case NVT_TYPE_HDMI_STEREO: + case NVT_TYPE_HDMI_EXT: + // VTB timing use the base EDID (block 0) to determine the color format support + case NVT_TYPE_EDID_VTB_EXT: + case NVT_TYPE_EDID_VTB_EXT_STD: + case NVT_TYPE_EDID_VTB_EXT_DTD: + case NVT_TYPE_EDID_VTB_EXT_CVT: + // pInfo->u.feature_ver_1_3.color_type provides mono, rgb, rgy, undefined + // assume RGB 8-bpc support only (VTB is pretty old edid standard) + pInfo->timing[i].etc.rgb444.bpc.bpc8 = 1; + break; + // These are from the CTA block, and relies on + // Since there could be multiple CEA blocks, these are adjusted when the blocks are parsed + case NVT_TYPE_EDID_861ST: + case NVT_TYPE_EDID_EXT_DTD: + if (pInfo->ext_displayid20.as_edid_extension && + pInfo->ext_displayid20.valid_data_blocks.cta_data_present) + { + updateColorFormatForDisplayId20ExtnTimings(pInfo, i); + } + updateBpcForTiming(pInfo, i); + break; + default: + // * the displayID_v1.3/v2.0 EDID extension need to follow the EDID bpc definition. + // * all other default to base edid + updateBpcForTiming(pInfo, i); + } + + // The timings[i] entries need to update the bpc values where are based on the different color format again + // if displayId extension existed it's interface feature data block + if (pInfo->ext_displayid.version == 0x12 || pInfo->ext_displayid.version == 0x13) + { + updateColorFormatForDisplayIdExtnTimings(pInfo, i); + } + else if (pInfo->ext_displayid20.valid_data_blocks.interface_feature_present) + { + // DisplayId2.0 spec has its own way of determining color format support which includes bpc + color format + updateColorFormatForDisplayId20ExtnTimings(pInfo, i); + } + } + + // Go through all the timings and set CTA format accordingly. If a timing is a CTA 861b timing, store the + // index of this CTA 861b standard in NVT_TIMING.etc.status field. + // However parser needs to exclude the DTD timing in EDID base block where is shared same detailed timing in VIC/DTD_ext in CTA861 + for (i = 0; i < pInfo->total_timings; i++) + { + data = NvTiming_GetCEA861TimingIndex(&pInfo->timing[i]); + // DisplayID block did not belong to CTA timing and it owned the deep color block itself + if (data && !((NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_1) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_2) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_7) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_8) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_9) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_10))) + { + // CEA timings may be enumerated outside of SVD blocks -- the formats of these timings don't have CEA FORMAT (vic) set + // before marking them CEA, make sure their color formats are updated too + if (NVT_GET_CEA_FORMAT(pInfo->timing[i].etc.status) == 0 && + (!NVT_IS_DTD(pInfo->timing[i].etc.status) || + isMatchedCTA861Timing(pInfo, &pInfo->timing[i]))) + { + for (j = 0; j < pInfo->total_timings; j++) + { + // It is assumed CTA timings that are repeated by the CTA block or different CTA blocks will + // announce the same color format for the same CTA timings + if (NVT_GET_CEA_FORMAT(pInfo->timing[j].etc.status) == data) + { + // There could be anomalies between EDID 1.4 base block color format vs CEA861 basic caps + // In this case we assume the union is supported + pInfo->timing[i].etc.rgb444.bpcs |= pInfo->timing[j].etc.rgb444.bpcs; + pInfo->timing[i].etc.yuv444.bpcs |= pInfo->timing[j].etc.yuv444.bpcs; + pInfo->timing[i].etc.yuv422.bpcs |= pInfo->timing[j].etc.yuv422.bpcs; + pInfo->timing[i].etc.yuv420.bpcs |= pInfo->timing[j].etc.yuv420.bpcs; + break; + } + } + + // now update the VIC of this timing + NVT_SET_CEA_FORMAT(pInfo->timing[i].etc.status, data); + } + // see the aspect ratio info if needed + if (pInfo->timing[i].etc.aspect == 0) + { + pInfo->timing[i].etc.aspect = getCEA861TimingAspectRatio(data); + } + } + } + +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool isMatchedCTA861Timing(NVT_EDID_INFO *pInfo, NVT_TIMING *pT) +{ + NvU32 j; + + for (j = 0; j < pInfo->total_timings; j++) + { + if (NVT_GET_CEA_FORMAT(pInfo->timing[j].etc.status) && NvTiming_IsTimingExactEqual(&pInfo->timing[j], pT)) + { + return NV_TRUE; + } + } + return NV_FALSE; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void updateBpcForTiming(NVT_EDID_INFO *pInfo, NvU32 index) +{ + NVT_EDID_CEA861_INFO *p861Info; + + // assume/prefer data from 1st CEA block if multiple exist + p861Info = &pInfo->ext861; + + pInfo->timing[index].etc.rgb444.bpc.bpc8 = 1; + + if (pInfo->version >= NVT_EDID_VER_1_4 && pInfo->input.isDigital) + { + if (pInfo->u.feature_ver_1_4_digital.support_ycrcb_444) + { + pInfo->timing[index].etc.yuv444.bpc.bpc8 = 1; + } + if (pInfo->u.feature_ver_1_4_digital.support_ycrcb_422) + { + pInfo->timing[index].etc.yuv422.bpc.bpc8 = 1; + } + if (pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_DISPLAYPORT_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_UNDEFINED) + { + pInfo->timing[index].etc.rgb444.bpc.bpc6 = 1; + + // trust bpc claim in edid base block for DP only + if (pInfo->input.u.digital.bpc >= NVT_EDID_VIDEOSIGNAL_BPC_10) + { + pInfo->timing[index].etc.rgb444.bpc.bpc10 = 1; + pInfo->timing[index].etc.yuv444.bpc.bpc10 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_444 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_444); + pInfo->timing[index].etc.yuv422.bpc.bpc10 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_422 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_422); + } + if (pInfo->input.u.digital.bpc >= NVT_EDID_VIDEOSIGNAL_BPC_12) + { + pInfo->timing[index].etc.rgb444.bpc.bpc12 = 1; + pInfo->timing[index].etc.yuv444.bpc.bpc12 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_444 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_444); + pInfo->timing[index].etc.yuv422.bpc.bpc12 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_422 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_422); + } + if (pInfo->input.u.digital.bpc >= NVT_EDID_VIDEOSIGNAL_BPC_16) + { + pInfo->timing[index].etc.rgb444.bpc.bpc16 = 1; + pInfo->timing[index].etc.yuv444.bpc.bpc16 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_444 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_444); + pInfo->timing[index].etc.yuv422.bpc.bpc16 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_422 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_422); + } + } + else if ((pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_A_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_B_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_UNDEFINED) && + p861Info->revision >= NVT_CEA861_REV_A) + { + updateHDMILLCDeepColorForTiming(pInfo, index); + } + } + else if (p861Info->revision >= NVT_CEA861_REV_A) + { + updateHDMILLCDeepColorForTiming(pInfo, index); + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_Get18ByteLongDescriptorIndex(NVT_EDID_INFO *pEdidInfo, NvU8 tag, NvU32 *pDtdIndex) +{ + NvU32 dtdIndex; + + if (!pEdidInfo || !pDtdIndex) + { + return NVT_STATUS_ERR; + } + + for (dtdIndex = *pDtdIndex; dtdIndex < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; dtdIndex++) + { + if (pEdidInfo->ldd[dtdIndex].tag == tag) + { + *pDtdIndex = dtdIndex; + return NVT_STATUS_SUCCESS; + } + } + + return NVT_STATUS_ERR; +} + +// get the edid timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetEdidTimingEx(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT, NvU32 rrx1k) +{ + NvU32 i, j; + NvU32 preferred_cea, preferred_displayid_dtd, preferred_dtd1, dtd1, map0, map1, map2, map3, map4, ceaIndex, max, cvt; + NVT_TIMING *pEdidTiming; + NVT_EDID_DD_RANGE_CVT *pCVT = NULL; + NVT_TIMING cvtTiming; + + // input check + if (pEdidInfo == NULL || pEdidInfo->total_timings == 0 || pT == 0) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0) // rrx1k is optional, can be 0. + return NVT_STATUS_ERR; + + pEdidTiming = pEdidInfo->timing; + + // the timing mapping index : + // + // preferred_cea - the "prefer SVD" in CEA-861-F (i.e. A Sink that prefers a Video Format that is not listed as an SVD in Video Data Block, but instead listed in YCBCR 4:2:0 VDB) + // preferred_displayid_dtd - the "prefer detailed timing of DispalyID" extension + // preferred_dtd1 - the first deatiled timing and PTM flag is enable + // dtd1 - the first detailed timing + // map0 - the "perfect" match (the timing's H/V-visible and pixel clock(refresh rate) are the same as the asking "width", "height" and "rr". + // map1 - the "closest" match with the honor of the interlaced flag + // map2 - the "closest" match without the honor of the interlaced flag + // map3 - the "closest" match to the panel's native timing (i.e. the first DTD timing or the short 861B/C/D timings with "native" flag). + // map4 - the "closest" match with the same refresh rate + // max - the timing with the max visible area + preferred_cea = preferred_displayid_dtd = preferred_dtd1 = dtd1 = map0 = map1 = map2 = map3 = map4 = ceaIndex = pEdidInfo->total_timings; + max = cvt = 0; + for (i = 0; i < pEdidInfo->total_timings; i++) + { + // if the client prefers _NATIVE timing, then don't select custom timing + if ((flag & (NVT_FLAG_NATIVE_TIMING | NVT_FLAG_EDID_TIMING)) != 0 && NVT_IS_CUST_ENTRY(pEdidTiming[i].etc.status) != 0) + { + continue; + } + + // find the perfect match is possible + if ((flag & NVT_FLAG_MAX_EDID_TIMING) == 0 && + width == pEdidTiming[i].HVisible && + height == frame_height(pEdidTiming[i]) && + rr == pEdidTiming[i].etc.rr && + ((rrx1k == 0) || (rrx1k == pEdidTiming[i].etc.rrx1k)) && + !!(flag & NVT_PVT_INTERLACED_MASK) == !!pEdidTiming[i].interlaced) + { + if (map0 >= pEdidInfo->total_timings) + { + // make sure we take the priority as "detailed>standard>established". (The array timing[] always have the detailed timings in the front and then the standard and established.) + map0 = i; + } + + if ( (NVT_PREFERRED_TIMING_IS_CEA(pEdidTiming[i].etc.flag)) || + ((0 == (flag & NVT_FLAG_EDID_861_ST)) && NVT_PREFERRED_TIMING_IS_DTD1(pEdidTiming[i].etc.flag, pEdidTiming[i].etc.status)) || + (NVT_PREFERRED_TIMING_IS_DISPLAYID(pEdidTiming[i].etc.flag)) || + (NVT_IS_NATIVE_TIMING(pEdidTiming[i].etc.status))) + { + *pT = pEdidTiming[i]; + return NVT_STATUS_SUCCESS; + } + + if (NVT_GET_TIMING_STATUS_TYPE(pEdidTiming[i].etc.status) == NVT_TYPE_EDID_861ST) + { + if (ceaIndex == pEdidInfo->total_timings) + { + // Save the first entry found. + ceaIndex = i; + } + else + { + if (((flag & NVT_FLAG_CEA_4X3_TIMING) && (pEdidTiming[i].etc.aspect == 0x40003)) || + ((flag & NVT_FLAG_CEA_16X9_TIMING) && (pEdidTiming[i].etc.aspect == 0x160009))) + { + // Use preferred aspect ratio if specified. + ceaIndex = i; + } + } + } + } // if ((flag & NVT_FLAG_MAX_EDID_TIMING) == 0 && + + // bypass the custom timing to be select for the mismatch case + if (NVT_GET_TIMING_STATUS_TYPE(pEdidTiming[i].etc.status) == NVT_TYPE_CUST || + NVT_IS_CUST_ENTRY(pEdidTiming[i].etc.status) != 0) + { + if (width != pEdidTiming[i].HVisible || height != frame_height(pEdidTiming[i]) || rr != pEdidTiming[i].etc.rr) + { + continue; + } + } + + // find out the preferred timing just in case of cea_vfpdb is existed + if (preferred_cea == pEdidInfo->total_timings && + NVT_PREFERRED_TIMING_IS_CEA(pEdidTiming[i].etc.flag)) + { + preferred_cea = i; + } + + // find out the preferred timing just in case + // Caller we will force rr value as 1 to select the DisplayID prefer timing in pEdidTiming if it existed + // however, we can't assign the correct refresh rate we want if we had two and above rr values which shared the same timing. + if (rr != 1) + { + if (pEdidTiming[i].etc.rr == rr && NVT_PREFERRED_TIMING_IS_DISPLAYID(pEdidTiming[i].etc.flag)) + { + preferred_displayid_dtd = i; + } + } + else if (preferred_displayid_dtd == pEdidInfo->total_timings && + NVT_PREFERRED_TIMING_IS_DISPLAYID(pEdidTiming[i].etc.flag)) + { + preferred_displayid_dtd = i; + } + + if (NVT_PREFERRED_TIMING_IS_DTD1(pEdidTiming[i].etc.flag, pEdidTiming[i].etc.status)) + { + preferred_dtd1 = i; + } + + if (NVT_IS_DTD1(pEdidTiming[i].etc.status)) + { + dtd1 = i; + } + + // find out the max mode just in case + if (pEdidTiming[i].HVisible * pEdidTiming[i].VVisible > pEdidTiming[max].HVisible * pEdidTiming[max].VVisible) + max = i; + + // if the requested timing is not in the EDID, try to find out the EDID entry with the same progressive/interlaced setting + if (map1 >= pEdidInfo->total_timings) + { + if (!!(flag & NVT_PVT_INTERLACED_MASK) == !!pEdidTiming[i].interlaced && + width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i])) + { + map1 = i; + } + } + else + { + if (!!(flag & NVT_PVT_INTERLACED_MASK) == !!pEdidTiming[i].interlaced && + width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i]) && + abs_delta(pEdidTiming[i].HVisible, width) <= abs_delta(pEdidTiming[map1].HVisible, width) && + abs_delta(frame_height(pEdidTiming[i]), height) <= abs_delta(frame_height(pEdidTiming[map1]), height)) + { + // if there're 2 timings with the same visible size, choose the one with closer refresh rate + if (pEdidTiming[i].HVisible == pEdidTiming[map1].HVisible && + frame_height(pEdidTiming[i]) == frame_height(pEdidTiming[map1])) + { + if (abs_delta(pEdidTiming[i].etc.rr, rr) < abs_delta(pEdidTiming[map1].etc.rr, rr)) + { + map1 = i; + } + } + else + { + map1 = i; + } + } + } + + // if the requested timing is not in the EDID, try to find out the EDID entry without the progressive/interlaced setting + if (map2 >= pEdidInfo->total_timings) + { + if (width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i])) + { + map2 = i; + } + } + else + { + if (width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i]) && + abs_delta(pEdidTiming[i].HVisible, width) <= abs_delta(pEdidTiming[map2].HVisible, width) && + abs_delta(frame_height(pEdidTiming[i]), height) <= abs_delta(frame_height(pEdidTiming[map2]), height)) + { + // if there're 2 timings with the same visible size, choose the one with closer refresh rate + if (pEdidTiming[i].HVisible == pEdidTiming[map2].HVisible && + frame_height(pEdidTiming[i]) == frame_height(pEdidTiming[map2])) + { + if (abs_delta(pEdidTiming[i].etc.rr, rr) < abs_delta(pEdidTiming[map2].etc.rr, rr)) + { + map2 = i; + } + } + else + { + map2 = i; + } + } + } + + // find out the native timing + if (NVT_IS_NATIVE_TIMING(pEdidTiming[i].etc.status) || NVT_IS_DTD1(pEdidTiming[i].etc.status)) + { + if (map3 >= pEdidInfo->total_timings) + { + if (width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i])) + { + map3 = i; + } + } + else if(abs_delta(pEdidTiming[i].HVisible, width) <= abs_delta(pEdidTiming[map3].HVisible, width) && + abs_delta(frame_height(pEdidTiming[i]), height) <= abs_delta(frame_height(pEdidTiming[map3]), height) && + width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i])) + { + map3 = i; + } + } + + // find the edid timing with refresh rate matching + if (map4 >= pEdidInfo->total_timings) + { + if (width <= pEdidTiming[i].HVisible && + height <= pEdidTiming[i].VVisible && + rr == pEdidTiming[i].etc.rr) + { + map4 = i; + } + } + else + { + if (width <= pEdidTiming[i].HVisible && + height <= pEdidTiming[i].HVisible && + rr == pEdidTiming[i].etc.rr && + abs_delta(pEdidTiming[i].HVisible, width) <= abs_delta(pEdidTiming[map4].HVisible, width) && + abs_delta(pEdidTiming[i].VVisible, height) <= abs_delta(pEdidTiming[map4].VVisible, height)) + { + map4 = i; + } + } + + }//for (i = 0; i < pEdidInfo->total_timings; i++) + + if ( (preferred_displayid_dtd == preferred_dtd1) && (preferred_dtd1 == dtd1) && + (dtd1 == map0) && + (map0 == map1) && + (map1 == map2) && + (map2 == map3) && + (map3 == map4) && + (map4 == pEdidInfo->total_timings) && + pEdidInfo->version >= NVT_EDID_VER_1_4 && + pEdidInfo->u.feature_ver_1_4_digital.continuous_frequency && + !(flag & NVT_PVT_INTERLACED_MASK)) + { + // try to find CVT timing that fits + NvU32 maxHeight, minHeight, tempHeight; + + minHeight = ~0; + maxHeight = tempHeight= 0; + + // looping through long display descriptors + for (i=0; ildd[i].tag != NVT_EDID_DISPLAY_DESCRIPTOR_DRL || pEdidInfo->ldd[i].u.range_limit.timing_support != NVT_EDID_RANGE_SUPPORT_CVT) + { + continue; + } + + pCVT = &pEdidInfo->ldd[i].u.range_limit.u.cvt; + + if (width <= pCVT->max_active_pixels_per_line || (pCVT->scaling_support & NVT_EDID_CVT_SCALING_HOR_SHRINK)) + { + for (j=0; jaspect_supported & (1< tempHeight) + { + minHeight = tempHeight; + } + if (maxHeight < tempHeight) + { + maxHeight = tempHeight; + } + + }//for (j=0; j<5; j++) + }//if (width <= pCVT->max_active_pixels_per_line || (pCVT->scaling_support & NVT_EDID_CVT_SCALING_HOR_STRETCH)) + + if ( ((minHeight < height) && (pCVT->scaling_support & NVT_EDID_CVT_SCALING_VER_SHRINK)) || + ((maxHeight > height) && (pCVT->scaling_support & NVT_EDID_CVT_SCALING_VER_STRETCH)) ) + { + cvt = 1; + } + + if (cvt) + { + break; + } + }//for (i=0; iblanking_support & NVT_EDID_CVT_BLANKING_REDUCED && NvTiming_CalcCVT_RB(width, height, rr, NVT_PROGRESSIVE, &cvtTiming) == NVT_STATUS_SUCCESS) + { + if ( cvtTiming.pclk > (NvU32)((pEdidInfo->ldd[i].u.range_limit.max_pclk_MHz * 100) - (pCVT->pixel_clock_adjustment * 25)) ) + { + cvt = 0; + } + } + else if (pCVT->blanking_support & NVT_EDID_CVT_BLANKING_STANDARD && NvTiming_CalcCVT(width, height, rr, NVT_PROGRESSIVE, &cvtTiming) == NVT_STATUS_SUCCESS) + { + if ( cvtTiming.pclk > (NvU32)((pEdidInfo->ldd[i].u.range_limit.max_pclk_MHz * 100) - (pCVT->pixel_clock_adjustment * 25)) ) + { + cvt = 0; + } + } + else + { + cvt = 0; + } + + } + }//(dtd1 == map0 == map1 == map2 == map3 == pEdidInfo->total_timings) && pEdidInfo->version >= NVT_EDID_VER_1_4 && + // pEdidInfo->feature_ver_1_4_digital.continuous_frequency && !(flag & NVT_PVT_INTERLACED_MASK)) + + // now return the mismatched EDID timing + if (flag & NVT_FLAG_NV_PREFERRED_TIMING) + { + *pT = (preferred_displayid_dtd != pEdidInfo->total_timings) ? pEdidTiming[preferred_displayid_dtd] : + (preferred_cea != pEdidInfo->total_timings) ? pEdidTiming[preferred_cea] : + (preferred_dtd1 != pEdidInfo->total_timings) ? pEdidTiming[preferred_dtd1] : + pEdidTiming[dtd1]; + // what if DTD1 itself is filtered out, in such case dtd1 index points to an invalid timing[]? + // (dtd1 != pEdidInfo->total_timings) ? pEdidTiming[dtd1] : pEdidTiming[0]; + } + else if (flag & NVT_FLAG_DTD1_TIMING) + { + *pT = pEdidTiming[dtd1]; + } + else if ((flag & NVT_FLAG_MAX_EDID_TIMING) && (0 == (flag & NVT_FLAG_EDID_861_ST))) + { + *pT = pEdidTiming[max]; + } + else if ((flag & (NVT_FLAG_CEA_4X3_TIMING | NVT_FLAG_CEA_16X9_TIMING | NVT_FLAG_EDID_861_ST)) && ceaIndex < (pEdidInfo->total_timings)) + { + *pT = pEdidTiming[ceaIndex]; + } + else if ((flag & NVT_FLAG_NATIVE_TIMING) != 0 && map3 < pEdidInfo->total_timings) + { + // Allow closest refresh rate match when EDID has detailed timing for different RR on native resolution. + if (map0 < pEdidInfo->total_timings && + pEdidTiming[map0].HVisible == pEdidTiming[map3].HVisible && + pEdidTiming[map0].VVisible == pEdidTiming[map3].VVisible) + { + *pT = pEdidTiming[map0]; + } + else + { + *pT = pEdidTiming[map3]; + } + } + else if (map0 < pEdidInfo->total_timings) + { + // use the exact mapped timing if possible + *pT = pEdidTiming[map0]; + } + else if ((flag & NVT_FLAG_EDID_TIMING_RR_MATCH) && map4 < pEdidInfo->total_timings) + { + *pT = pEdidTiming[map4]; + } + else if (map1 < pEdidInfo->total_timings) + { + // use the mapped timing if possible + *pT = pEdidTiming[map1]; + } + else if (map2 < pEdidInfo->total_timings) + { + // use the 2nd mapped timing if possible + *pT = pEdidTiming[map2]; + } + else if (dtd1 < pEdidInfo->total_timings && width <= pEdidTiming[dtd1].HVisible && height <= pEdidTiming[dtd1].VVisible) + { + // use the 1st detailed timing if possible + *pT = pEdidTiming[dtd1]; + } + else if (cvt) + { + // use the cvt timing + *pT = cvtTiming; + } + else + { + // use the max timing for all other cases + *pT = pEdidTiming[max]; + } + + // set the mismatch status + if (pT->HVisible != width || frame_height(*pT) != height) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_SIZE); + } + if (!NvTiming_IsRoundedRREqual(pT->etc.rr, pT->etc.rrx1k, (NvU16)rr)) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_RR); + } + if (!!pT->interlaced != !!(flag & NVT_PVT_INTERLACED_MASK)) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_FORMAT); + } + + return NVT_STATUS_SUCCESS; +} + +// get the edid timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetEdidTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT) +{ + return NvTiming_GetEdidTimingEx(width, height, rr, flag, pEdidInfo, pT, 0); +} +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetHDMIStereoExtTimingFromEDID(NvU32 width, NvU32 height, NvU32 rr, NvU8 StereoStructureType, NvU8 SideBySideHalfDetail, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_EXT_TIMING *pT) +{ + NVT_STATUS status = NVT_STATUS_ERR; + NvU8 Vic; + NvU32 i; + NVT_TIMING Timing; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_EXT_TIMING)); + + // adjust the flags -- + // need EDID timing with RR match, + // not max timing, + flag = flag | NVT_FLAG_EDID_TIMING | NVT_FLAG_EDID_TIMING_RR_MATCH | NVT_FLAG_EDID_861_ST; + flag = flag & ~(NVT_FLAG_MAX_EDID_TIMING); + + status = NvTiming_GetEdidTiming(width, height, rr, flag, pEdidInfo, &Timing); + if (NVT_STATUS_SUCCESS == status) + { + status = NVT_STATUS_ERR; + + // is this an exact match? + if (0 == NVT_GET_TIMING_STATUS_MATCH(Timing.etc.status)) + { + if (NVT_TYPE_EDID_861ST == NVT_GET_TIMING_STATUS_TYPE(Timing.etc.status)) + { + // lookup the vic for this timing in the support map. + Vic = (NvU8) NVT_GET_CEA_FORMAT(Timing.etc.status); + for (i = 0; i < pEdidInfo->Hdmi3Dsupport.total; ++i) + { + if (Vic == pEdidInfo->Hdmi3Dsupport.map[i].Vic) + { + break; + } + } + if (i < pEdidInfo->Hdmi3Dsupport.total) + { + // does this vic support the requested structure type? + if (0 != (NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(StereoStructureType) & pEdidInfo->Hdmi3Dsupport.map[i].StereoStructureMask)) + { + // if this is side-by-side(half) the detail needs to match also. + if ((NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF != StereoStructureType) || (SideBySideHalfDetail == pEdidInfo->Hdmi3Dsupport.map[i].SideBySideHalfDetail)) + { + // convert the 2D timing to 3D. + NvTiming_GetHDMIStereoTimingFrom2DTiming(&Timing, StereoStructureType, SideBySideHalfDetail, pT); + status = NVT_STATUS_SUCCESS; + } + } + } + } + } + } + return status; +} + +// EDID based AspectRatio Timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetEDIDBasedASPRTiming( NvU16 width, NvU16 height, NvU16 rr, NVT_EDID_INFO *pEI, NVT_TIMING *pT) +{ + NvU32 i, dwStatus; + NvU32 dwNativeIndex; + NvU32 flag; + NvU32 ret; + + // sanity check + if( pEI == NULL || pEI->total_timings == 0 || pT == NULL ) + { + return NVT_STATUS_ERR; + } + if( width == 0 || height == 0 ) + { + return NVT_STATUS_ERR; + } + + // get an EDID timing. Return err if it fails as we don't have any timing to tweak. + flag = 0; + ret = NvTiming_GetEdidTiming(width, height, rr, flag, pEI, pT); + if( NVT_STATUS_SUCCESS != ret ) + { + return NVT_STATUS_ERR; + } + // in case we have an exact match from EDID (in terms of Size), we return Success. + else if ((NVT_GET_TIMING_STATUS_MATCH(pT->etc.status) & NVT_STATUS_TIMING_MISMATCH_SIZE) == 0) + { + return NVT_STATUS_SUCCESS; + } + + // find the Native timing + for (i = 0, dwNativeIndex = pEI->total_timings + 1; i < pEI->total_timings; i++) + { + dwStatus = pEI->timing[i].etc.status; + + if ((NVT_IS_NATIVE_TIMING(dwStatus)) || NVT_IS_DTD1(dwStatus)) + { + dwNativeIndex = i; + break; + } + } + + // we don't want to apply LogicScaling(Letterboxing) to Wide Mode on Wide Panel (or non-Wide Mode on non-Wide Panel) + if( nvt_is_wideaspect(width, height) == nvt_is_wideaspect(pEI->timing[dwNativeIndex].HVisible, pEI->timing[dwNativeIndex].VVisible) ) + { + return NVT_STATUS_ERR; + } + + // Letterbox mode enabled by regkey LogicScalingMode + // When we try to set modes not supported in EDID (eg. DFP over DSub) the display may not fit the screen. + // If Logic Scaling is enabled (ie why we are here), we need to tweak the timing (for CRT) provided: + // 1) the aspect ratio of native mode and requested mode differ + // eg. Native AR = 5:4, 1280x1024 + // Requested AR = 16:10, 1280x800 + // 2) Both Width and Height do not mismatch together; If they do we shall go in for DMT/GTF timing + // by failing this call. + if( pT->interlaced == 0 && + dwNativeIndex < pEI->total_timings && + (pEI->timing[dwNativeIndex].HVisible*height != pEI->timing[dwNativeIndex].VVisible*width) && + (width == pT->HVisible || height == pT->VVisible)) + { + pT->HFrontPorch += (pT->HVisible - width) / 2; + pT->VFrontPorch += (pT->VVisible - height) / 2; + pT->HVisible = width; + pT->VVisible = height; + if(rr != pT->etc.rr) + { + pT->etc.rrx1k = rr * 1000; + pT->pclk = RRx1kToPclk (pT); + } + + pT->etc.status = NVT_STATUS_ASPR; + return NVT_STATUS_SUCCESS; + } + + return NVT_STATUS_ERR; +} + +// check whether EDID is valid +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_EDIDValidationMask(NvU8 *pEdid, NvU32 length, NvBool bIsStrongValidation) +{ + NvU32 i, j, version, checkSum; + EDIDV1STRUC *p = (EDIDV1STRUC *)pEdid; + EDID_LONG_DISPLAY_DESCRIPTOR *pLdd; + NvU8 *pExt; + DETAILEDTIMINGDESCRIPTOR *pDTD; + NvU32 ret = 0; + + // check the EDID base size to avoid accessing beyond the EDID buffer, do not proceed with + // further validation. + if (length < sizeof(EDIDV1STRUC)) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_SIZE); + return ret; + } + + // check the EDID version and signature + if (getEdidVersion(pEdid, &version) != NVT_STATUS_SUCCESS) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_VERSION); + return ret; + } + + // check block 0 checksum value + if (!isChecksumValid(pEdid)) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + return ret; + } + + // Strong validation to follow + if (bIsStrongValidation == NV_TRUE) + { + // range limit check + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + pLdd = (EDID_LONG_DISPLAY_DESCRIPTOR *)&p->DetailedTimingDesc[i]; + if (pLdd->tag == NVT_EDID_DISPLAY_DESCRIPTOR_DRL && (version == 0x103 || (version == 0x104 && (p->bFeatureSupport & 1)))) + { + EDID_MONITOR_RANGE_LIMIT *pRangeLimit = (EDID_MONITOR_RANGE_LIMIT *)pLdd->data; + NvU8 max_v_rate_offset, min_v_rate_offset, max_h_rate_offset, min_h_rate_offset; + + // add 255Hz offsets as needed before doing the check, use descriptor->rsvd2 + nvt_assert(!(pLdd->rsvd2 & 0xF0)); + + max_v_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MAX ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + min_v_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MIN ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + max_h_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MAX ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + min_h_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MIN ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + + if ((pRangeLimit->minVRate + min_v_rate_offset) > (pRangeLimit->maxVRate + max_v_rate_offset) || + (pRangeLimit->minHRate + min_h_rate_offset) > (pRangeLimit->maxHRate + max_h_rate_offset) || + pRangeLimit->maxVRate == 0 || + pRangeLimit->maxHRate == 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_RANGE_LIMIT); + } + break; + } + } + + // extension and size check + if ((NvU32)(p->bExtensionFlag + 1) * sizeof(EDIDV1STRUC) > length) + { + // Do not proceed with further validation if the size is invalid. + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_SIZE); + return ret; + } + + // validate Detailed Timing Descriptors, 4 blocks + for (i = 0; i < 4; i++) + { + if (*((NvU16 *)&p->DetailedTimingDesc[i]) != 0) + { + // This block is not a Display Descriptor. + // It must be a valid timing definition + // validate the block by passing NULL as the NVTIMING parameter to parseEdidDetailedTimingDescriptor + if (parseEdidDetailedTimingDescriptor((NvU8 *)&p->DetailedTimingDesc[i], NULL) != NVT_STATUS_SUCCESS) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD); + } + } + else + { + // This block is a display descriptor, validate + if (((EDID_LONG_DISPLAY_DESCRIPTOR *)&p->DetailedTimingDesc[i])->rsvd != 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD); + } + } + } + + // validate extension blocks + for (j = 1; j <= p->bExtensionFlag; j++) + { + pExt = pEdid + sizeof(EDIDV1STRUC) * j; + + // check for 861 extension + switch (*pExt) + { + case NVT_EDID_EXTENSION_CTA: + // first sanity check on the extension block + if (get861ExtInfo(pExt, sizeof(EIA861EXTENSION), NULL) != NVT_STATUS_SUCCESS) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT); + } + + // check sum on CEA extension block + for (i = 0, checkSum = 0; i < sizeof(EIA861EXTENSION); i ++) + { + checkSum += pExt[i]; + } + + if ((checkSum & 0xFF) != 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + } + + // 0 indicates no DTD in this block + if (((EIA861EXTENSION*)pExt)->offset == 0) + { + continue; + } + + // validate DTD blocks + pDTD = (DETAILEDTIMINGDESCRIPTOR *)&pExt[((EIA861EXTENSION *)pExt)->offset]; + while (pDTD->wDTPixelClock != 0 && + (NvU8 *)pDTD - pExt < (int)sizeof(EIA861EXTENSION)) + { + if (parseEdidDetailedTimingDescriptor((NvU8 *)pDTD, NULL) != NVT_STATUS_SUCCESS) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD); + } + pDTD++; + } + break; + case NVT_EDID_EXTENSION_VTB: + // perform a checksum on the VTB block + for (i = 0, checkSum = 0; i < sizeof(VTBEXTENSION); i++) + { + checkSum += pExt[i]; + } + if ((checkSum & 0xFF) != 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + } + break; + case NVT_EDID_EXTENSION_DISPLAYID: + // perform a checksum on the VTB block + for (i = 0, checkSum = 0; i < sizeof(EIA861EXTENSION); i++) + { + checkSum += pExt[i]; + } + if ((checkSum & 0xFF) != 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + } + break; + default: + break; + } + } + + + } + + return ret; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EDIDValidation (NvU8 *pEdid, NvU32 length, NvBool bIsStrongValidation) +{ + if (NvTiming_EDIDValidationMask(pEdid, length, bIsStrongValidation) != 0) { + return NVT_STATUS_ERR; + } else { + return NVT_STATUS_SUCCESS; + } +} + +// Function Description: Get the first Detailed Timing Descriptor +// +// Parameters: +// pEdidInfo: IN - pointer to parsed EDID +// pT: OUT - pointer to where the DTD1 timing will be stored +// +// Return: +// NVT_STATUS_SUCCESS: DTD1 was found in parsed EDID, pT is a valid result +// NVT_STATUS_INVALID_PARAMETER: one or more parameter was invalid +// NVT_STATUS_ERR: DTD1 was not found in parsed EDID, pT is invalid +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetDTD1Timing (NVT_EDID_INFO * pEdidInfo, NVT_TIMING * pT) +{ + NvU32 j; + + // check param + if (pEdidInfo == NULL || pT == NULL) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // find the PTM mode + for (j = 0; j < pEdidInfo->total_timings; j++) + { + if (NVT_PREFERRED_TIMING_IS_DTD1(pEdidInfo->timing[j].etc.flag, pEdidInfo->timing[j].etc.status)) + { + *pT = pEdidInfo->timing[j]; + return NVT_STATUS_SUCCESS; + } + } + + // find DisplayID preferred + for (j = 1; j < pEdidInfo->total_timings; j++) + { + if (NVT_PREFERRED_TIMING_IS_DISPLAYID(pEdidInfo->timing[j].etc.flag)) + { + *pT = pEdidInfo->timing[j]; + return NVT_STATUS_SUCCESS; + } + } + + // DTD1 should exist, but if it doesn't, return not found + for (j = 0; j < pEdidInfo->total_timings; j++) + { + NvU32 data = pEdidInfo->timing[j].etc.status; + if (NVT_IS_DTD1(data)) + { + *pT = pEdidInfo->timing[j]; + return NVT_STATUS_SUCCESS; + } + } + + // DTD1 should exist, but if it doesn't, return not found + return NVT_STATUS_ERR; +} + +// Description: Parses a VTB extension block into its associated timings +// +// Parameters: +// pEdidExt: IN - pointer to the beginning of the extension block +// pInfo: IN - The original block information, including the +// array of timings. +// +// NOTE: this function *really* should be in its own separate file, but a certain DVS test +// uses cross build makefiles which do not allow the specification of a new file. +CODE_SEGMENT(PAGE_DD_CODE) +void parseVTBExtension(NvU8 *pEdidExt, NVT_EDID_INFO *pInfo) +{ + NvU32 i; + VTBEXTENSION *pExt = (VTBEXTENSION *)pEdidExt; + NvU32 count; + NvU32 bytes; + NVT_TIMING newTiming; + + // Null = bad idea + if (pEdidExt == NULL) + { + return; + } + + // Sanity check for VTB extension block + if (pExt->tag != NVT_EDID_EXTENSION_VTB || + pExt->revision == NVT_VTB_REV_NONE) + { + return; + } + + // Sanity check - ensure that the # of descriptor does not exceed + // byte size + count = (NvU32)sizeof(EDID_LONG_DISPLAY_DESCRIPTOR) * pExt->num_detailed + + (NvU32)sizeof(EDID_CVT_3BYTE_BLOCK) * pExt->num_cvt + + (NvU32)sizeof(NvU16) * pExt->num_standard; + if (count > NVT_VTB_MAX_PAYLOAD) + { + return; + } + + count = 0; + bytes = 0; + + // Process Detailed Timings + for (i = 0; i < pExt->num_detailed; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseEdidDetailedTimingDescriptor((NvU8 *)(pExt->data + bytes), + &newTiming) == NVT_STATUS_SUCCESS) + { + newTiming.etc.name[39] = '\0'; + newTiming.etc.status = NVT_STATUS_EDID_VTB_EXT_DTDn(++count); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + bytes += (NvU32)(sizeof(EDID_LONG_DISPLAY_DESCRIPTOR)); + } + } + + // Process CVT Timings + for (i = 0; i < pExt->num_cvt; i++) + { + parseEdidCvt3ByteDescriptor((NvU8 *)(pExt->data + bytes), pInfo, &count); + + bytes += (NvU32)sizeof(EDID_CVT_3BYTE_BLOCK); + } + + // Process Standard Timings + for (i = 0; i < pExt->num_standard; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + parseEdidStandardTimingDescriptor(*(NvU16 *)(pExt->data + bytes), + pInfo, count, &newTiming); + newTiming.etc.name[39] = '\0'; + newTiming.etc.status = NVT_STATUS_EDID_VTB_EXT_STDn(++count); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + bytes += (NvU32)sizeof(NvU16); + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +static int IsPrintable(NvU8 c) +{ + return ((c >= ' ') && (c <= '~')); +} + +CODE_SEGMENT(PAGE_DD_CODE) +static int IsWhiteSpace(NvU8 c) +{ + // consider anything unprintable or single space (ASCII 32) + // to be whitespace + return (!IsPrintable(c) || (c == ' ')); +} + +CODE_SEGMENT(PAGE_DD_CODE) +static void RemoveTrailingWhiteSpace(NvU8 *str, int len) +{ + int i; + + for (i = len; (i >= 0) && IsWhiteSpace(str[i]); i--) + { + str[i] = '\0'; + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +static void RemoveNonPrintableCharacters(NvU8 *str) +{ + int i; + + // Check that all characters are printable. + // If not, replace them with '?' + for (i = 0; str[i] != '\0'; i++) + { + if (!IsPrintable(str[i])) + { + str[i] = '?'; + } + } +} + +/** + * @brief Assigns this timing to the next available slot in pInfo->timing[] if + * possible. + * @param pInfo EDID struct containing the parsed timings + * @param pTiming New timing to be copied into pInfo->timing[] + */ +CODE_SEGMENT(PAGE_DD_CODE) +NvBool assignNextAvailableTiming(NVT_EDID_INFO *pInfo, + const NVT_TIMING *pTiming) +{ + // Don't write past the end of + // pInfo->timing[NVT_EDID_MAX_TOTAL_TIMING] + if (pInfo->total_timings >= COUNT(pInfo->timing)) { + return NV_FALSE; + } + + pInfo->timing[pInfo->total_timings++] = *pTiming; + return NV_TRUE; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetProductName(const NVT_EDID_INFO *pEdidInfo, + NvU8 *pProductName, + const NvU32 productNameLength) +{ + NvU32 i = 0, m = 0, n = 0; + + if( pEdidInfo == NULL || pProductName == NULL ) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + for ( i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + if (pEdidInfo->ldd[i].tag == NVT_EDID_DISPLAY_DESCRITPOR_DPN) + { + for(n = 0; n < NVT_EDID_LDD_PAYLOAD_SIZE && pEdidInfo->ldd[i].u.product_name.str[n] != 0x0; n++) + { + pProductName[m++] = pEdidInfo->ldd[i].u.product_name.str[n]; + if ((m + 1) >= productNameLength) + { + goto done; + } + } + } + } +done: + pProductName[m] = '\0'; //Ensure a null termination at the end. + + RemoveTrailingWhiteSpace(pProductName, m); + RemoveNonPrintableCharacters(pProductName); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_CalculateEDIDCRC32(NvU8* pEDIDBuffer, NvU32 edidsize) +{ + return calculateCRC32(pEDIDBuffer, edidsize); +} + +//Calculates EDID's CRC after purging 'Week of Manufacture', 'Year of Manufacture', +//'Product ID String' & 'Serial Number' from EDID +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_CalculateCommonEDIDCRC32(NvU8* pEDIDBuffer, NvU32 edidVersion) +{ + NvU32 commonEDIDBufferSize = 0; + NvU8 CommonEDIDBuffer[256]; + NvU32 edidBufferIndex = 0; + + if(pEDIDBuffer==NULL) + { + return 0; + } + + // Transfer over the original EDID buffer + NVMISC_MEMCPY(CommonEDIDBuffer, pEDIDBuffer, 256); + + // Wipe out the Serial Number, Week of Manufacture, and Year of Manufacture or Model Year + NVMISC_MEMSET(CommonEDIDBuffer + 0x0C, 0, 6); + + // Wipe out the checksums + CommonEDIDBuffer[0x7F] = 0; + CommonEDIDBuffer[0xFF] = 0; + + // We also need to zero out any "EDID Other Monitor Descriptors" (http://en.wikipedia.org/wiki/Extended_display_identification_data) + for (edidBufferIndex = 54; edidBufferIndex <= 108; edidBufferIndex += 18) + { + if (CommonEDIDBuffer[edidBufferIndex] == 0 && CommonEDIDBuffer[edidBufferIndex+1] == 0) + { + // Wipe this block out. It contains OEM-specific details that contain things like serial numbers + NVMISC_MEMSET(CommonEDIDBuffer + edidBufferIndex, 0, 18); + } + } + + // Check what size we should do the compare against + if ( edidVersion > NVT_EDID_VER_1_4 ) + { + commonEDIDBufferSize = 256; + } + else + { + commonEDIDBufferSize = 128; + } + + return NvTiming_CalculateEDIDCRC32(CommonEDIDBuffer, commonEDIDBufferSize); +} // NvTiming_CalculateCommonEDIDCRC32 + +// Calculate the minimum and maximum v_rate and h_rate, as well as +// maximum pclk; initialize with the range of values in the EDID mode +// list, but override with what is in the range limit descriptor section. +// +// based on drivers/modeset.nxt/CODE/edid.c:EdidGetMonitorLimits() and +// EdidBuildRangeLimits() +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalculateEDIDLimits(NVT_EDID_INFO *pEdidInfo, NVT_EDID_RANGE_LIMIT *pLimit) +{ + NvU32 i; + + NVMISC_MEMSET(pLimit, 0, sizeof(NVT_EDID_RANGE_LIMIT)); + + // the below currently only supports 1.x EDIDs + if ((pEdidInfo->version & 0xFF00) != 0x100) + { + return NVT_STATUS_ERR; + } + + pLimit->min_v_rate_hzx1k = ~0; + pLimit->max_v_rate_hzx1k = 0; + pLimit->min_h_rate_hz = ~0; + pLimit->max_h_rate_hz = 0; + pLimit->max_pclk_10khz = 0; + + // find the ranges in the EDID mode list + for (i = 0; i < pEdidInfo->total_timings; i++) + { + NVT_TIMING *pTiming = &pEdidInfo->timing[i]; + NvU32 h_rate_hz; + + if (pLimit->min_v_rate_hzx1k > pTiming->etc.rrx1k) + { + pLimit->min_v_rate_hzx1k = pTiming->etc.rrx1k; + } + if (pLimit->max_v_rate_hzx1k < pTiming->etc.rrx1k) + { + pLimit->max_v_rate_hzx1k = pTiming->etc.rrx1k; + } + + h_rate_hz = axb_div_c(pTiming->pclk, 10000, (NvU32)pTiming->HTotal); + + if (pLimit->min_h_rate_hz > h_rate_hz) + { + pLimit->min_h_rate_hz = h_rate_hz; + } + if (pLimit->max_h_rate_hz < h_rate_hz) + { + pLimit->max_h_rate_hz = h_rate_hz; + } + + if (pLimit->max_pclk_10khz < pTiming->pclk) + { + pLimit->max_pclk_10khz = pTiming->pclk; + } + } + + // use the range limit display descriptor, if available: these + // override anything we found in the EDID mode list + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + if (pEdidInfo->ldd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_DRL) + { + NVT_EDID_DD_RANGE_LIMIT *pRangeLimit = &pEdidInfo->ldd[i].u.range_limit; + NvU32 max_pclk_10khz; + + // {min,max}_v_rate is in hz + if (pRangeLimit->min_v_rate != 0) { + pLimit->min_v_rate_hzx1k = pRangeLimit->min_v_rate * 1000; + } + if (pRangeLimit->max_v_rate != 0) { + pLimit->max_v_rate_hzx1k = pRangeLimit->max_v_rate * 1000; + } + + // {min,max}_h_rate is in khz + if (pRangeLimit->min_h_rate != 0) { + pLimit->min_h_rate_hz = pRangeLimit->min_h_rate * 1000; + } + if (pRangeLimit->max_h_rate != 0) { + pLimit->max_h_rate_hz = pRangeLimit->max_h_rate * 1000; + } + + // EdidGetMonitorLimits() honored the pclk from the + // modelist over what it found in the range limit + // descriptor, so do the same here + max_pclk_10khz = pRangeLimit->max_pclk_MHz * 100; + if (pLimit->max_pclk_10khz < max_pclk_10khz) { + pLimit->max_pclk_10khz = max_pclk_10khz; + } + + break; + } + } + + return NVT_STATUS_SUCCESS; +} + +// Build a user-friendly name: +// +// * get the vendor name: +// * use the 3 character PNP ID from the EDID's manufacturer ID field +// * expand, if possible, the PNP ID using the PNPVendorIds[] table +// * get the product name from the descriptor block(s) +// * prepend the vendor name and the product name, unless the product +// name already contains the vendor name +// * if any characters in the string are outside the printable ASCII +// range, replace them with '?' + +#define tolower(c) (((c) >= 'A' && (c) <= 'Z') ? (c) + ('a'-'A') : (c)) + +CODE_SEGMENT(PAGE_DD_CODE) +void NvTiming_GetMonitorName(NVT_EDID_INFO *pEdidInfo, + NvU8 monitor_name[NVT_EDID_MONITOR_NAME_STRING_LENGTH]) +{ + NvU8 product_name[NVT_EDID_MONITOR_NAME_STRING_LENGTH]; + const NvU8 *vendor_name; + NVT_STATUS status; + NvU32 i, j; + NvBool prepend_vendor; + + NVMISC_MEMSET(monitor_name, 0, NVT_EDID_MONITOR_NAME_STRING_LENGTH); + + // get vendor_name: it is either the manufacturer ID or the PNP vendor name + vendor_name = pEdidInfo->manuf_name; + + for (i = 0; i < (sizeof(PNPVendorIds)/sizeof(PNPVendorIds[0])); i++) + { + if ((vendor_name[0] == PNPVendorIds[i].vendorId[0]) && + (vendor_name[1] == PNPVendorIds[i].vendorId[1]) && + (vendor_name[2] == PNPVendorIds[i].vendorId[2])) + { + vendor_name = (const NvU8 *) PNPVendorIds[i].vendorName; + break; + } + } + + // get the product name from the descriptor blocks + status = NvTiming_GetProductName(pEdidInfo, product_name, sizeof(product_name)); + + if (status != NVT_STATUS_SUCCESS) + { + product_name[0] = '\0'; + } + + // determine if the product name already includes the vendor name; + // if so, do not prepend the vendor name to the monitor name + prepend_vendor = NV_TRUE; + + for (i = 0; i < NVT_EDID_MONITOR_NAME_STRING_LENGTH; i++) + { + if (vendor_name[i] == '\0') + { + prepend_vendor = NV_FALSE; + break; + } + + if (tolower(product_name[i]) != tolower(vendor_name[i])) + { + break; + } + } + + j = 0; + + // prepend the vendor name to the monitor name + if (prepend_vendor) + { + for (i = 0; (i < NVT_EDID_MONITOR_NAME_STRING_LENGTH) && (vendor_name[i] != '\0'); i++) + { + monitor_name[j++] = vendor_name[i]; + } + } + + // if we added the vendor name above, add a space between the + // vendor name and the product name + if ((j > 0) && (j < (NVT_EDID_MONITOR_NAME_STRING_LENGTH - 1))) + { + monitor_name[j++] = ' '; + } + + // append the product name to the monitor string + for (i = 0; (i < NVT_EDID_MONITOR_NAME_STRING_LENGTH) && (product_name[i] != '\0'); i++) + { + if (j >= (NVT_EDID_MONITOR_NAME_STRING_LENGTH - 1)) + { + break; + } + monitor_name[j++] = product_name[i]; + } + monitor_name[j] = '\0'; + + RemoveTrailingWhiteSpace(monitor_name, j); + RemoveNonPrintableCharacters(monitor_name); +} + +CODE_SEGMENT(PAGE_DD_CODE) +void updateHDMILLCDeepColorForTiming(NVT_EDID_INFO *pInfo, NvU32 index) +{ + NVT_EDID_CEA861_INFO *p861Info = &pInfo->ext861; + // NOTE: EDID and CEA861 does not have clear statement regarding this. + // To be backward compatible with current Nvidia implementation, if not edid >= 1.4 and CEA block exists, follow color format declaration from CEA block. + // update supported color space within each bpc + // rgb 8bpc always supported + + UPDATE_BPC_FOR_COLORFORMAT(pInfo->timing[index].etc.rgb444, 0, 1, + pInfo->hdmiLlcInfo.dc_30_bit, + pInfo->hdmiLlcInfo.dc_36_bit, + 0, pInfo->hdmiLlcInfo.dc_48_bit); + + if (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_444) + { + // pHdmiLlc->dc_y444 assumed basic cap is set; when base cap is set, 8bpc yuv444 always supported + UPDATE_BPC_FOR_COLORFORMAT(pInfo->timing[index].etc.yuv444, 0, 1, + pInfo->hdmiLlcInfo.dc_y444 && pInfo->hdmiLlcInfo.dc_30_bit, + pInfo->hdmiLlcInfo.dc_y444 && pInfo->hdmiLlcInfo.dc_36_bit, + 0, pInfo->hdmiLlcInfo.dc_y444 && pInfo->hdmiLlcInfo.dc_48_bit); + } + if (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_422) + { + // pHdmiLlc->dc_y444 assumed basic cap is set; when base cap is set, 8bpc yuv422 always supported + // newer CEA861/HDMI specs suggest the base cap should support both or neither (Nvidia puts no limitations here) + // HDMI1.4b spec Section 6.2.4 Color Depth Requirements states that YCbCr 4:2:2 format is 36-bit mode, which means 8, 10 and 12bpc output is supported as soon as there is enough bandwidth + UPDATE_BPC_FOR_COLORFORMAT(pInfo->timing[index].etc.yuv422, 0, 1, 1, 1, 0, 0); + } +} + +POP_SEGMENTS diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_861.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_861.c new file mode 100644 index 0000000..4dd5b85 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_861.c @@ -0,0 +1,2942 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_edidext_861.c +// +// Purpose: the provide edid 861 extension related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "edid.h" + +PUSH_SEGMENTS + +#define EIA_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rrx1k,ip,aspect,rep,format) \ + {hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',(ip)=='i' ? NVT_INTERLACED:NVT_PROGRESSIVE,\ + 0,{0,((rrx1k)+500)/1000,rrx1k,((1?aspect)<<16)|(0?aspect),rep,{0},{0},{0},{0},NVT_STATUS_EDID_861STn(format),"CEA-861B:#"#format""}} + + +#define NVT_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rrx1k,ip,aspect,rep,format,name) \ + {hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',(ip)=='i' ? NVT_INTERLACED:NVT_PROGRESSIVE,\ + 0,{0,((rrx1k)+500)/1000,rrx1k,((1?aspect)<<16)|(0?aspect),rep,{0},{0},{0},{0},NVT_TYPE_NV_PREDEFINEDn(format),name}} + +#define HDMI_EXT_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rrx1k,ip,aspect,rep,format,name) \ + {hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',(ip)=='i' ? NVT_INTERLACED:NVT_PROGRESSIVE,\ + 0,{0,((rrx1k)+500)/1000,rrx1k,((1?aspect)<<16)|(0?aspect),rep,{0},{0},{0},{0},NVT_STATUS_HDMI_EXTn(format),name}} + +DATA_SEGMENT(PAGE_DATA) +CONS_SEGMENT(PAGE_CONS) + +static const NVT_TIMING EIA861B[]= +{ + // all 64 EIA/CEA-861E timings + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 1),//640 x 480p @59.94/60 (Format 1) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-', 59940,'p', 4:3, 0x1, 2),//720 x 480p @59.94/60 (Format 2) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-', 59940,'p',16:9, 0x1, 3),//720 x 480p @59.94/60 (Format 3) + EIA_TIMING(1280, 110, 40,1650,'+', 720, 5,5, 750,'+', 59940,'p',16:9, 0x1, 4),//1280 x 720p @59.94/60 (Format 4) + EIA_TIMING(1920, 88, 44,2200,'+', 540, 2,5, 562,'+', 59940,'i',16:9, 0x1, 5),//1920 x 1080i @59.94/60 (Format 5) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-', 59940,'i', 4:3, 0x2, 6),//720(1440) x 480i @59.94/60 (Format 6) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-', 59940,'i',16:9, 0x2, 7),//720(1440) x 480i @59.94/60 (Format 7) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 263,'-', 59940,'p', 4:3, 0x2, 8),//720(1440) x 240p @59.94/60 (Format 8) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 263,'-', 59940,'p',16:9, 0x2, 9),//720(1440) x 240p @59.94/60 (Format 9) + EIA_TIMING(2880, 76,248,3432,'-', 240, 4,3, 262,'-', 59940,'i', 4:3, 0x3ff,10),//(2880) x 480i @59.94/60 (Format 10) + EIA_TIMING(2880, 76,248,3432,'-', 240, 4,3, 262,'-', 59940,'i',16:9, 0x3ff,11),//(2880) x 480i @59.94/60 (Format 11) + EIA_TIMING(2880, 76,248,3432,'-', 240, 5,3, 263,'-', 59940,'p', 4:3, 0x3ff,12),//(2880) x 480p @59.94/60 (Format 12) + EIA_TIMING(2880, 76,248,3432,'-', 240, 5,3, 263,'-', 59940,'p',16:9, 0x3ff,13),//(2880) x 480p @59.94/60 (Format 13) + EIA_TIMING(1440, 32,124,1716,'-', 480, 9,6, 525,'-', 59940,'p', 4:3, 0x3,14),//1440 x 480p @59.94/60 (Format 14) + EIA_TIMING(1440, 32,124,1716,'-', 480, 9,6, 525,'-', 59940,'p',16:9, 0x3,15),//1440 x 480p @59.94/60 (Format 15) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4,5,1125,'+', 59940,'p',16:9, 0x1,16),//1920 x 1080p @59.94/60 (Format 16) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-', 50000,'p', 4:3, 0x1,17),//720 x 576p @50 (Format 17) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-', 50000,'p',16:9, 0x1,18),//720 x 576p @50 (Format 18) + EIA_TIMING(1280, 440, 40,1980,'+', 720, 5,5, 750,'+', 50000,'p',16:9, 0x1,19),//1280 x 720p @50 (Format 19) + EIA_TIMING(1920, 528, 44,2640,'+', 540, 2,5, 562,'+', 50000,'i',16:9, 0x1,20),//1920 x 1080i @50 (Format 20) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-', 50000,'i', 4:3, 0x2,21),//720(1440) x 576i @50 (Format 21) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-', 50000,'i',16:9, 0x2,22),//720(1440) x 576i @50 (Format 22) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-', 50000,'p', 4:3, 0x2,23),//720(1440) x 288p @50 (Format 23) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-', 50000,'p',16:9, 0x2,24),//720(1440) x 288p @50 (Format 24) + EIA_TIMING(2880, 48,252,3456,'-', 288, 2,3, 312,'-', 50000,'i', 4:3, 0x3ff,25),//(2880) x 576i @50 (Format 25) + EIA_TIMING(2880, 48,252,3456,'-', 288, 2,3, 312,'-', 50000,'i',16:9, 0x3ff,26),//(2880) x 576i @50 (Format 26) + EIA_TIMING(2880, 48,252,3456,'-', 288, 2,3, 312,'-', 50000,'p', 4:3, 0x3ff,27),//(2880) x 288p @50 (Format 27) + EIA_TIMING(2880, 48,252,3456,'-', 288, 2,3, 312,'-', 50000,'p',16:9, 0x3ff,28),//(2880) x 288p @50 (Format 28) + EIA_TIMING(1440, 24,128,1728,'-', 576, 5,5, 625,'_', 50000,'p', 4:3, 0x3,29),//1440 x 576p @50 (Format 29) + EIA_TIMING(1440, 24,128,1728,'-', 576, 5,5, 625,'_', 50000,'p',16:9, 0x3,30),//1440 x 576p @50 (Format 30) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4,5,1125,'+', 50000,'p',16:9, 0x1,31),//1920 x 1080p @50 (Format 31) + EIA_TIMING(1920, 638, 44,2750,'+',1080, 4,5,1125,'+', 23976,'p',16:9, 0x1,32),//1920 x 1080p @23.97/24 (Format 32) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4,5,1125,'+', 25000,'p',16:9, 0x1,33),//1920 x 1080p @25 (Format 33) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4,5,1125,'+', 29970,'p',16:9, 0x1,34),//1920 x 1080p @29.97/30 (Format 34) + EIA_TIMING(2880, 64,248,3432,'-', 480, 9,6, 525,'-', 59940,'p', 4:3, 0x7,35),//(2880) x 480p @59.94/60 (Format 35) + EIA_TIMING(2880, 64,248,3432,'-', 480, 9,6, 525,'-', 59940,'p',16:9, 0x7,36),//(2880) x 480p @59.94/60 (Format 36) + EIA_TIMING(2880, 48,256,3456,'-', 576, 5,5, 625,'-', 50000,'p', 4:3, 0x7,37),//(2880) x 576p @50 (Format 37) + EIA_TIMING(2880, 48,256,3456,'-', 576, 5,5, 625,'-', 50000,'p',16:9, 0x7,38),//(2880) x 576p @50 (Format 38) + EIA_TIMING(1920, 32,168,2304,'+', 540,23,5, 625,'-', 50000,'i',16:9, 0x1,39),//1920 x 1080i @50 (Format 39) + EIA_TIMING(1920, 528, 44,2640,'+', 540, 2,5, 562,'+',100000,'i',16:9, 0x1,40),//1920 x 1080i @100 (Format 40) + EIA_TIMING(1280, 440, 40,1980,'+', 720, 5,5, 750,'+',100000,'p',16:9, 0x1,41),//1280 x 720p @100 (Format 41) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-',100000,'p', 4:3, 0x1,42),//720 x 576p @100 (Format 42) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-',100000,'p',16:9, 0x1,43),//720 x 576p @100 (Format 43) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-',100000,'i', 4:3, 0x2,44),//720(1440) x 576i @100 (Format 44) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-',100000,'i',16:9, 0x2,45),//720(1440) x 576i @100 (Format 45) + EIA_TIMING(1920, 88, 44,2200,'+', 540, 2,5, 562,'+',119880,'i',16:9, 0x1,46),//1920 x 1080i @119.88/120 (Format 46) + EIA_TIMING(1280, 110, 40,1650,'+', 720, 5,5, 750,'+',119880,'p',16:9, 0x1,47),//1280 x 720p @119.88/120 (Format 47) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-',119880,'p', 4:3, 0x1,48),//720 x 480p @119.88/120 (Format 48) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-',119880,'p',16:9, 0x1,49),//720 x 480p @119.88/120 (Format 49) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-',119880,'i', 4:3, 0x2,50),//720(1440) x 480i @119.88/120 (Format 50) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-',119880,'i',16:9, 0x2,51),//720(1440) x 480i @119.88/120 (Format 51) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-',200000,'p', 4:3, 0x1,52),//720 x 576p @200 (Format 52) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-',200000,'p',16:9, 0x1,53),//720 x 576p @200 (Format 53) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-',200000,'i', 4:3, 0x2,54),//720(1440) x 576i @200 (Format 54) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-',200000,'i',16:9, 0x2,55),//720(1440) x 576i @200 (Format 55) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-',239760,'p', 4:3, 0x1,56),//720 x 480p @239.76/240 (Format 56) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-',239760,'p',16:9, 0x1,57),//720 x 480p @239.76/240 (Format 57) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-',239760,'i', 4:3, 0x2,58),//720(1440) x 480i @239.76/240 (Format 58) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-',239760,'i',16:9, 0x2,59),//720(1440) x 480i @239.76/240 (Format 59) + EIA_TIMING(1280,1760, 40,3300,'+', 720, 5,5, 750,'+',23976, 'p',16:9, 0x1,60),//1280 x 720p @23.97/24 (Format 60) + EIA_TIMING(1280,2420, 40,3960,'+', 720, 5,5, 750,'+',25000, 'p',16:9, 0x1,61),//1280 x 720p @25 (Format 61) + EIA_TIMING(1280,1760, 40,3300,'-', 720, 5,5, 750,'+',29970, 'p',16:9, 0x1,62),//1280 x 720p @29.97/30 (Format 62) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4,5,1125,'+',119880,'p',16:9, 0x1,63),//1920 x 1080p @119.88/120 (Format 63) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4,5,1125,'+',100000,'p',16:9, 0x1,64),//1920 x 1080p @100 (Format 64) + // Following modes are from CEA-861F + EIA_TIMING(1280,1760, 40,3300,'+', 720, 5, 5, 750,'+', 23976,'p', 64:27, 0x1, 65),//1280 x 720p @23.98/24 (Format 65) + EIA_TIMING(1280,2420, 40,3960,'+', 720, 5, 5, 750,'+', 25000,'p', 64:27, 0x1, 66),//1280 x 720p @25 (Format 66) + EIA_TIMING(1280,1760, 40,3300,'+', 720, 5, 5, 750,'+', 29970,'p', 64:27, 0x1, 67),//1280 x 720p @29.97/30 (Format 67) + EIA_TIMING(1280, 440, 40,1980,'+', 720, 5, 5, 750,'+', 50000,'p', 64:27, 0x1, 68),//1280 x 720p @50 (Format 68) + EIA_TIMING(1280, 110, 40,1650,'+', 720, 5, 5, 750,'+', 59940,'p', 64:27, 0x1, 69),//1280 x 720p @59.94/60 (Format 69) + EIA_TIMING(1280, 440, 40,1980,'+', 720, 5, 5, 750,'+',100000,'p', 64:27, 0x1, 70),//1280 x 720p @100 (Format 70) + EIA_TIMING(1280, 110, 40,1650,'+', 720, 5, 5, 750,'+',119880,'p', 64:27, 0x1, 71),//1280 x 720p @119.88/120 (Format 71) + EIA_TIMING(1920, 638, 44,2750,'+',1080, 4, 5,1125,'+', 23976,'p', 64:27, 0x1, 72),//1920 x1080p @23.98/24 (Format 72) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4, 5,1125,'+', 25000,'p', 64:27, 0x1, 73),//1920 x1080p @25 (Format 73) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4, 5,1125,'+', 29970,'p', 64:27, 0x1, 74),//1920 x1080p @29.97/30 (Format 74) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4, 5,1125,'+', 50000,'p', 64:27, 0x1, 75),//1920 x1080p @50 (Format 75) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4, 5,1125,'+', 59940,'p', 64:27, 0x1, 76),//1920 x1080p @59.94/60 (Format 76) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4, 5,1125,'+',100000,'p', 64:27, 0x1, 77),//1920 x1080p @100 (Format 77) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4, 5,1125,'+',119880,'p', 64:27, 0x1, 78),//1920 x1080p @119.88/120 (Format 78) + EIA_TIMING(1680,1360, 40,3300,'+', 720, 5, 5, 750,'+', 23976,'p', 64:27, 0x1, 79),//1680 x 720p @23.98/24 (Format 79) + EIA_TIMING(1680,1228, 40,3168,'+', 720, 5, 5, 750,'+', 25000,'p', 64:27, 0x1, 80),//1680 x 720p @25 (Format 80) + EIA_TIMING(1680, 700, 40,2640,'+', 720, 5, 5, 750,'+', 29970,'p', 64:27, 0x1, 81),//1680 x 720p @29.97/30 (Format 81) + EIA_TIMING(1680, 260, 40,2200,'+', 720, 5, 5, 750,'+', 50000,'p', 64:27, 0x1, 82),//1680 x 720p @50 (Format 82) + EIA_TIMING(1680, 260, 40,2200,'+', 720, 5, 5, 750,'+', 59940,'p', 64:27, 0x1, 83),//1680 x 720p @59.94/60 (Format 83) + EIA_TIMING(1680, 60, 40,2000,'+', 720, 5, 5, 825,'+',100000,'p', 64:27, 0x1, 84),//1680 x 720p @100 (Format 84) + EIA_TIMING(1680, 60, 40,2000,'+', 720, 5, 5, 825,'+',119880,'p', 64:27, 0x1, 85),//1680 x 720p @119.88/120 (Format 85) + EIA_TIMING(2560, 998, 44,3750,'+',1080, 4, 5,1100,'+', 23976,'p', 64:27, 0x1, 86),//2560 x1080p @23.98/24 (Format 86) + EIA_TIMING(2560, 448, 44,3200,'+',1080, 4, 5,1125,'+', 25000,'p', 64:27, 0x1, 87),//2560 x1080p @25 (Format 87) + EIA_TIMING(2560, 768, 44,3520,'+',1080, 4, 5,1125,'+', 29970,'p', 64:27, 0x1, 88),//2560 x1080p @29.97/30 (Format 88) + EIA_TIMING(2560, 548, 44,3300,'+',1080, 4, 5,1125,'+', 50000,'p', 64:27, 0x1, 89),//2560 x1080p @50 (Format 89) + EIA_TIMING(2560, 248, 44,3000,'+',1080, 4, 5,1100,'+', 59940,'p', 64:27, 0x1, 90),//2560 x1080p @59.94/60 (Format 90) + EIA_TIMING(2560, 218, 44,2970,'+',1080, 4, 5,1250,'+',100000,'p', 64:27, 0x1, 91),//2560 x1080p @100 (Format 91) + EIA_TIMING(2560, 548, 44,3300,'+',1080, 4, 5,1250,'+',119880,'p', 64:27, 0x1, 92),//2560 x1080p @119.88/120 (Format 92) + EIA_TIMING(3840,1276, 88,5500,'+',2160, 8,10,2250,'+', 23976,'p', 16:9, 0x1, 93),//3840 x2160p @23.98/24 (Format 93) + EIA_TIMING(3840,1056, 88,5280,'+',2160, 8,10,2250,'+', 25000,'p', 16:9, 0x1, 94),//3840 x2160p @25 (Format 94) + EIA_TIMING(3840, 176, 88,4400,'+',2160, 8,10,2250,'+', 29970,'p', 16:9, 0x1, 95),//3840 x2160p @29.97/30 (Format 95) + EIA_TIMING(3840,1056, 88,5280,'+',2160, 8,10,2250,'+', 50000,'p', 16:9, 0x1, 96),//3840 x2160p @50 (Format 96) + EIA_TIMING(3840, 176, 88,4400,'+',2160, 8,10,2250,'+', 59940,'p', 16:9, 0x1, 97),//3840 x2160p @59.94/60 (Format 97) + EIA_TIMING(4096,1020, 88,5500,'+',2160, 8,10,2250,'+', 23976,'p',256:135, 0x1, 98),//4096 x2160p @23.98/24 (Format 98) + EIA_TIMING(4096, 968, 88,5280,'+',2160, 8,10,2250,'+', 25000,'p',256:135, 0x1, 99),//4096 x2160p @25 (Format 99) + EIA_TIMING(4096, 88, 88,4400,'+',2160, 8,10,2250,'+', 29970,'p',256:135, 0x1,100),//4096 x2160p @29.97/30 (Format 100) + EIA_TIMING(4096, 968, 88,5280,'+',2160, 8,10,2250,'+', 50000,'p',256:135, 0x1,101),//4096 x2160p @50 (Format 101) + EIA_TIMING(4096, 88, 88,4400,'+',2160, 8,10,2250,'+', 59940,'p',256:135, 0x1,102),//4096 x2160p @59.94/60 (Format 102) + EIA_TIMING(3840,1276, 88,5500,'+',2160, 8,10,2250,'+', 23976,'p', 64:27, 0x1,103),//3840 x2160p @23.98/24 (Format 103) + EIA_TIMING(3840,1056, 88,5280,'+',2160, 8,10,2250,'+', 25000,'p', 64:27, 0x1,104),//3840 x2160p @25 (Format 104) + EIA_TIMING(3840, 176, 88,4400,'+',2160, 8,10,2250,'+', 29970,'p', 64:27, 0x1,105),//3840 x2160p @29.97/30 (Format 105) + EIA_TIMING(3840,1056, 88,5280,'+',2160, 8,10,2250,'+', 50000,'p', 64:27, 0x1,106),//3840 x2160p @50 (Format 106) + EIA_TIMING(3840, 176, 88,4400,'+',2160, 8,10,2250,'+', 59940,'p', 64:27, 0x1,107),//3840 x2160p @59.94/60 (Format 107) + // VIC 108-127 timings are from CTA-861-G_FINAL_revised_2018_Errata_2.pdf + EIA_TIMING(1280, 960, 40, 2500,'+', 720, 5, 5, 750,'+', 47950,'p', 16:9, 0x1,108),//1280 x 720p @47.95/48 (Format 108) + EIA_TIMING(1280, 960, 40, 2500,'+', 720, 5, 5, 750,'+', 47950,'p', 64:27, 0x1,109),//1280 x 720p @47.95/48 (Format 109) + EIA_TIMING(1680, 810, 40, 2750,'+', 720, 5, 5, 750,'+', 47950,'p', 64:27, 0x1,110),//1680 x 720p @47.95/48 (Format 110) + EIA_TIMING(1920, 638, 44, 2750,'+',1080, 4, 5,1125,'+', 47950,'p', 16:9, 0x1,111),//1920 x 1080p @47.95/48 (Format 111) + EIA_TIMING(1920, 638, 44, 2750,'+',1080, 4, 5,1125,'+', 47950,'p', 64:27, 0x1,112),//1920 x 1080p @47.95/48 (Format 112) + EIA_TIMING(2560, 998, 44, 3750,'+',1080, 4, 5,1100,'+', 47950,'p', 64:27, 0x1,113),//2560 x 1080p @47.95/48 (Format 113) + EIA_TIMING(3840,1276, 88, 5500,'+',2160, 8,10,2250,'+', 47950,'p', 16:9, 0x1,114),//3840 x 2160p @47.95/48 (Format 114) + EIA_TIMING(4096,1020, 88, 5500,'+',2160, 8,10,2250,'+', 47950,'p',256:135, 0x1,115),//4096 x 2160p @47.95/48 (Format 115) + EIA_TIMING(3840,1276, 88, 5500,'+',2160, 8,10,2250,'+', 47950,'p', 64:27, 0x1,116),//3840 x 2160p @47.95/48 (Format 116) + EIA_TIMING(3840,1056, 88, 5280,'+',2160, 8,10,2250,'+',100000,'p', 16:9, 0x1,117),//3840 x 2160p @100 (Format 117) + EIA_TIMING(3840, 176, 88, 4400,'+',2160, 8,10,2250,'+',119880,'p', 16:9, 0x1,118),//3840 x 2160p @119.88/120 (Format 118) + EIA_TIMING(3840,1056, 88, 5280,'+',2160, 8,10,2250,'+',100000,'p', 64:27, 0x1,119),//3840 x 2160p @100 (Format 119) + EIA_TIMING(3840, 176, 88, 4400,'+',2160, 8,10,2250,'+',119880,'p', 64:27, 0x1,120),//3840 x 2160p @119.88/120 (Format 120) + EIA_TIMING(5120,1996, 88, 7500,'+',2160, 8,10,2200,'+', 23976,'p', 64:27, 0x1,121),//5120 x 2160p @23.98/24 (Format 121) + EIA_TIMING(5120,1696, 88, 7200,'+',2160, 8,10,2200,'+', 25000,'p', 64:27, 0x1,122),//5120 x 2160p @25 (Format 122) + EIA_TIMING(5120, 664, 88, 6000,'+',2160, 8,10,2200,'+', 29970,'p', 64:27, 0x1,123),//5120 x 2160p @29.97/30 (Format 123) + EIA_TIMING(5120, 746, 88, 6250,'+',2160, 8,10,2475,'+', 47950,'p', 64:27, 0x1,124),//5120 x 2160p @47.95/48 (Format 124) + EIA_TIMING(5120,1096, 88, 6600,'+',2160, 8,10,2250,'+', 50000,'p', 64:27, 0x1,125),//5120 x 2160p @50 (Format 125) + EIA_TIMING(5120, 164, 88, 5500,'+',2160, 8,10,2250,'+', 59940,'p', 64:27, 0x1,126),//5120 x 2160p @59.94/60 (Format 126) + EIA_TIMING(5120,1096, 88, 6600,'+',2160, 8,10,2250,'+',100000,'p', 64:27, 0x1,127),//5120 x 2160p @100 (Format 127) + // VIC 128-192 are Forbidden and should be never used. But to simplify the SVD access, put a default timing here. + // We can remove these after adding a function to access CEA Timings. + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 128) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 129) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 130) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 131) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 132) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 133) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 134) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 135) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 136) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 137) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 138) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 139) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 140) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 141) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 142) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 143) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 144) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 145) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 146) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 147) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 148) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 149) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 150) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 151) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 152) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 153) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 154) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 155) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 156) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 157) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 158) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 159) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 160) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 161) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 162) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 163) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 164) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 165) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 166) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 167) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 168) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 169) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 170) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 171) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 172) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 173) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 174) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 175) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 176) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 177) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 178) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 179) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 180) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 181) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 182) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 183) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 184) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 185) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 186) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 187) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 188) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 189) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 190) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 191) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 192) + // VIC 193-219 timings are from CTA-861-G_FINAL_revised_2018_Errata_2.pdf + EIA_TIMING( 5120, 164, 88, 5500,'+',2160, 8,10,2250,'+',120000,'p', 64:27,0x1,193),// 5120 x 2160p @119.88/120 (Format 193) + EIA_TIMING( 7680,2552,176,11000,'+',4320,16,20,4500,'+', 23976,'p', 16:9,0x1,194),// 7680 x 4320p @23.98/24 (Format 194) + EIA_TIMING( 7680,2352,176,10800,'+',4320,16,20,4400,'+', 25000,'p', 16:9,0x1,195),// 7680 x 4320p @25 (Format 195) + EIA_TIMING( 7680, 552,176, 9000,'+',4320,16,20,4400,'+', 29970,'p', 16:9,0x1,196),// 7680 x 4320p @29.97/30 (Format 196) + EIA_TIMING( 7680,2552,176,11000,'+',4320,16,20,4500,'+', 47950,'p', 16:9,0x1,197),// 7680 x 4320p @47.95/48 (Format 197) + EIA_TIMING( 7680,2352,176,10800,'+',4320,16,20,4400,'+', 50000,'p', 16:9,0x1,198),// 7680 x 4320p @50 (Format 198) + EIA_TIMING( 7680, 552,176, 9000,'+',4320,16,20,4400,'+', 59940,'p', 16:9,0x1,199),// 7680 x 4320p @59.94/60 (Format 199) + EIA_TIMING( 7680,2112,176,10560,'+',4320,16,20,4500,'+',100000,'p', 16:9,0x1,200),// 7680 x 4320p @100 (Format 200) + EIA_TIMING( 7680, 352,176, 8000,'+',4320,16,20,4500,'+',119880,'p', 16:9,0x1,201),// 7680 x 4320p @119.88/120 (Format 201) + EIA_TIMING( 7680,2552,176,11000,'+',4320,16,20,4500,'+', 23976,'p', 64:27,0x1,202),// 7680 x 4320p @23.98/24 (Format 202) + EIA_TIMING( 7680,2352,176,10800,'+',4320,16,20,4400,'+', 25000,'p', 64:27,0x1,203),// 7680 x 4320p @25 (Format 203) + EIA_TIMING( 7680, 552,176, 9000,'+',4320,16,20,4400,'+', 29970,'p', 64:27,0x1,204),// 7680 x 4320p @29.97/30 (Format 204) + EIA_TIMING( 7680,2552,176,11000,'+',4320,16,20,4500,'+', 47950,'p', 64:27,0x1,205),// 7680 x 4320p @47.95/48 (Format 205) + EIA_TIMING( 7680,2352,176,10800,'+',4320,16,20,4400,'+', 50000,'p', 64:27,0x1,206),// 7680 x 4320p @50 (Format 206) + EIA_TIMING( 7680, 552,176, 9000,'+',4320,16,20,4400,'+', 59940,'p', 64:27,0x1,207),// 7680 x 4320p @59.94/60 (Format 207) + EIA_TIMING( 7680,2112,176,10560,'+',4320,16,20,4500,'+',100000,'p', 64:27,0x1,208),// 7680 x 4320p @100 (Format 208) + EIA_TIMING( 7680, 352,176, 8800,'+',4500,16,20,4950,'+',119880,'p', 64:27,0x1,209),// 7680 x 4320p @119.88/120 (Format 209) + EIA_TIMING(10240,1492,176,12500,'+',4320,16,20,4950,'+', 23976,'p', 64:27,0x1,210),//10240 x 4320p @23.98/24 (Format 210) + EIA_TIMING(10240,2492,176,13500,'+',4320,16,20,4400,'+', 25000,'p', 64:27,0x1,211),//10240 x 4320p @25 (Format 211) + EIA_TIMING(10240, 288,176,11000,'+',4320,16,20,4500,'+', 29970,'p', 64:27,0x1,212),//10240 x 4320p @29.97/30 (Format 212) + EIA_TIMING(10240,1492,176,12500,'+',4320,16,20,4950,'+', 47950,'p', 64:27,0x1,213),//10240 x 4320p @47.95/48 (Format 213) + EIA_TIMING(10240,2492,176,13500,'+',4320,16,20,4400,'+', 44000,'p', 64:27,0x1,214),//10240 x 4320p @50 (Format 214) + EIA_TIMING(10240, 288,176,11000,'+',4320,16,20,4500,'+', 59940,'p', 64:27,0x1,215),//10240 x 4320p @59.94/60 (Format 215) + EIA_TIMING(10240,2192,176,13200,'+',4320,16,20,4500,'+',100000,'p', 64:27,0x1,216),//10240 x 4320p @100 (Format 216) + EIA_TIMING(10240, 288,176,11000,'+',4320,16,20,4500,'+',119880,'p', 64:27,0x1,217),//10240 x 4320p @119.88/120 (Format 217) + EIA_TIMING( 4096, 800, 88, 5280,'+',2160, 8,10,2250,'+',100000,'p',256:135,0x1,218),// 4096 x 2160p @100 (Format 218) + EIA_TIMING( 4096, 88, 88, 4400,'+',2160, 8,10,2250,'+',119880,'p',256:135,0x1,219),// 4096 x 2160p @119.88/120 (Format 219) + // 220-255 Reserved for the Future + // the end + EIA_TIMING(0,0,0,0,'-',0,0,0,0,'-',0,'p',4:3,0,0) +}; +static NvU32 MAX_CEA861B_FORMAT = sizeof(EIA861B)/sizeof(EIA861B[0]) - 1; + +static const NvU32 EIA861B_DUAL_ASPECT_VICS[][2] = +{ + { 2, 3 }, // 720x480p 59.94Hz/60Hz + { 4, 69 }, // 1280x720p 59.94Hz/60Hz + { 6, 7 }, // 720(1440)x480i 59.94Hz/60Hz + { 8, 9 }, // 720(1440)x240p 59.94Hz/60Hz + + { 10, 11 }, // 2880x480i 59.94Hz/60Hz + { 12, 13 }, // 2880x240p 59.94Hz/60Hz + { 14, 15 }, // 1440x480p 59.94Hz/60Hz + { 16, 76 }, // 1920x1080p 59.94Hz/60Hz + { 17, 18 }, // 720x576p 50Hz + { 19, 68 }, // 1280x720p 50Hz + + { 21, 22 }, // 720(1440)x576i 50Hz + { 23, 24 }, // 720(1440)x288p 50Hz + { 25, 26 }, // 2880x576i 50Hz + { 27, 28 }, // 2880x288p 50Hz + { 29, 30 }, // 1440x576p 50Hz + + { 31, 75 }, // 1920x1080p 50Hz + { 32, 72 }, // 1920x1080p 23.98Hz/24Hz + { 33, 73 }, // 1920x1080p 25Hz + { 34, 74 }, // 1920x1080p 29.97Hz/30Hz + { 35, 36 }, // 2880x480p 59.94Hz/60Hz + { 37, 38 }, // 2880x576p 50Hz + + { 41, 70 }, // 1280x720p 100Hz + { 42, 43 }, // 720x576p 100Hz + { 44, 45 }, // 720(1440)x576i 100Hz + { 47, 71 }, // 1280x720p 119.88/120Hz + { 48, 49 }, // 720x480p 119.88/120Hz + + { 50, 51 }, // 720(1440)x480i 119.88/120Hz + { 52, 53 }, // 720x576p 200Hz + { 54, 55 }, // 720(1440)x576i 200Hz + { 56, 57 }, // 720x480p 239.76/240Hz + { 58, 59 }, // 720(1440)x480i 239.76/240Hz + + { 60, 65 }, // 1280x720p 23.98Hz/24Hz + { 61, 66 }, // 1280x720p 25Hz + { 62, 67 }, // 1280x720p 29.97Hz/30Hz + { 63, 78 }, // 1920x1080p 119.88/120Hz + { 64, 77 }, // 1920x1080p 100Hz + + { 93, 103 }, // 3840x2160p 23.98Hz/24Hz + { 94, 104 }, // 3840x2160p 25Hz + { 95, 105 }, // 3840x2160p 29.97Hz/30Hz + { 96, 106 }, // 3840x2160p 50Hz + { 97, 107 }, // 3840x2160p 59.94Hz/60Hz +}; +static NvU32 MAX_EIA861B_DUAL_ASPECT_VICS = sizeof(EIA861B_DUAL_ASPECT_VICS) / sizeof(EIA861B_DUAL_ASPECT_VICS[0]); + +static const NVT_TIMING PSF_TIMING[]= +{ + NVT_TIMING( 1920,600, 88,2750,'+', 540, 2,5,562,'+',47952,'i',16:9, 0x1, 1, "ITU-R BT.709-5:1080i/24Psf"),//1920x1080i @47.952Hz | 24/PsF | ITU-R BT.709-5 + NVT_TIMING( 1920,488, 88,2640,'+', 540, 2,5,562,'+',49950,'i',16:9, 0x1, 2, "ITU-R BT.709-5:1080i/25Psf"),//1920x1080i @49.950Hz | 25/PsF | ITU-R BT.709-5 + + // the end + EIA_TIMING(0,0,0,0,'-',0,0,0,0,'-',0,'p',4:3,0,0) +}; +static NvU32 MAX_PSF_FORMAT = sizeof(PSF_TIMING)/sizeof(PSF_TIMING[0]) - 1; + +static const NVT_TIMING HDMI_EXT_4Kx2K_TIMING[]= +{ + HDMI_EXT_TIMING( 3840, 176, 88,4400,'+', 2160, 8,10,2250,'+',29970,'p',16:9, 0x1, NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx30Hz, "HDMI EXT: 3840x2160x29.97/30hz"),//3840x2160 @29.97/30Hz VIC: 0x01 + HDMI_EXT_TIMING( 3840,1056, 88,5280,'+', 2160, 8,10,2250,'+',25000,'p',16:9, 0x1, NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx25Hz, "HDMI EXT: 3840x2160x25hz"), //3840x2160 @25Hz VIC: 0x02 + HDMI_EXT_TIMING( 3840,1276, 88,5500,'+', 2160, 8,10,2250,'+',23976,'p',16:9, 0x1, NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx24Hz, "HDMI EXT: 3840x2160x23.98/24hz"),//3840x2160 @23.98/24Hz VIC: 0x03 + HDMI_EXT_TIMING( 4096,1020, 88,5500,'+', 2160, 8,10,2250,'+',24000,'p',16:9, 0x1, NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx24Hz_SMPTE, "HDMI EXT: 4096x2160x24hzSmpte"), //4096x2160 @24Hz VIC: 0x04 + + // the end + EIA_TIMING(0,0,0,0,'-',0,0,0,0,'-',0,'p',4:3,0,0) +}; +static NvU32 MAX_HDMI_EXT_4Kx2K_FORMAT = sizeof(HDMI_EXT_4Kx2K_TIMING)/sizeof(HDMI_EXT_4Kx2K_TIMING[0]) - 1; + +// HDMI 1.4a mandatory 3D video formats. +// From HDMI 1.4a specification page 147 of 201, table 8-15. And HDMI 1.4a Complaince test specification page 190. +static const HDMI3DDETAILS HDMI_MANDATORY_3D_FORMATS[] = +{ + {32, NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK | NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK, 0}, // 1920 x 1080p @ 24 Hz + { 4, NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK | NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK, 0}, // 1280 x 720p @ 60 Hz + {19, NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK | NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK, 0}, // 1280 x 720p @ 50 Hz + { 5, NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH}, // 1920 x 1080i @ 60 Hz + {20, NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH} // 1920 x 1080i @ 50 Hz +}; +static NvU32 MAX_HDMI_MANDATORY_3D_FORMAT = sizeof(HDMI_MANDATORY_3D_FORMATS) / sizeof(HDMI_MANDATORY_3D_FORMATS[0]); + +static const NVT_VIDEO_INFOFRAME DEFAULT_VIDEO_INFOFRAME = {/*header*/2,2,13, /*byte1*/0, /*byte2*/0x8, /*byte3*/0, /*byte4*/0, /*byte5*/0, /*byte6~13*/0,0,0,0,0,0,0,0}; +static const NVT_AUDIO_INFOFRAME DEFAULT_AUDIO_INFOFRAME = {/*header*/4,1,10, /*byte1*/0, /*byte2*/0, /*byte3*/0, /*byte*/0, /*byte5*/0, /*byte6~10*/0,0,0,0,0}; +static const NVT_VENDOR_SPECIFIC_INFOFRAME DEFAULT_VENDOR_SPECIFIC_INFOFRAME = {/*header*/{0x01,1,6}, {/*byte1*/3, /*byte2*/0x0c, /*byte3*/0, /*byte4*/0, /*byte5*/0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}}}; + +// parse the 861 detailed timing info +CODE_SEGMENT(PAGE_DD_CODE) +void parse861ExtDetailedTiming(NvU8 *pEdidExt, + NvU8 basicCaps, + NVT_EDID_INFO *pInfo) +{ + NvU32 count = 0; + EIA861EXTENSION *pEIA861 = (EIA861EXTENSION *) pEdidExt; + DETAILEDTIMINGDESCRIPTOR *pDTD; + NVT_TIMING newTiming; + + // sanity check for CEA ext block + if ((pEIA861->tag != 0x2) || (0 == pEIA861->offset) || (NVT_CEA861_REV_NONE == pEIA861->revision)) + { + // no CEA ext block, return + return; + } + + // Get all detailed timings in CEA ext block + pDTD = (DETAILEDTIMINGDESCRIPTOR *)&pEdidExt[pEIA861->offset]; + + while((NvU8 *)pDTD < (pEdidExt + sizeof(EDIDV1STRUC)) && // Check that we're not going beyond this extension block. + pDTD->wDTPixelClock != 0) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseEdidDetailedTimingDescriptor((NvU8 *)pDTD, + &newTiming) == NVT_STATUS_SUCCESS) + { + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), + "CTA-861Long:%5dx%4dx%3d.%03dHz/%s", + (int)newTiming.HVisible, + (int)((newTiming.interlaced ? 2 : 1) * newTiming.VVisible), + (int)newTiming.etc.rrx1k/1000, + (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name)-1] = '\0'; + newTiming.etc.status = NVT_STATUS_EDID_EXT_DTDn(++count); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + pDTD ++; + } +} + +// parse the 861B short timing descriptor +CODE_SEGMENT(PAGE_DD_CODE) +void parse861bShortTiming(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NvU32 i; + NvU32 vic, bytePos, bitPos; + NVT_TIMING newTiming; + NVT_HDMI_FORUM_INFO *pHfvs = NULL; + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + + NvU8 *pVic = pExt861->video; + NvU32 total_svd = pExt861->total_svd; + NvU8 *pYuv420Map = pExt861->valid.y420cmdb ? pExt861->map_y420cmdb : NULL; + NvU8 yuv420MapCount = pExt861->total_y420cmdb; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + pHfvs = &pInfo->hdmiForumInfo; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + pHfvs = &pDisplayID20->vendor_specific.hfvs; + } + else + { + return; + } + + for (i = 0; i < total_svd; i++) + { + vic = NVT_GET_CTA_8BIT_VIC(pVic[i]); + + if (vic == 0 || vic > MAX_CEA861B_FORMAT) + continue; + + // assign corresponding CEA format's timing from pre-defined CE timing table, EIA861B + newTiming = EIA861B[vic-1]; + newTiming.etc.status = NVT_STATUS_EDID_861STn(vic); + + // set CEA format to location of _CEA_FORMAT. _CEA_FORMAT isn't set in pre-defined CE timing from + // EIA861B table + if (NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status) != + NVT_CEA861_640X480P_59940HZ_4X3) + { + // Although IT 640x480 video timing has a CE id, it is not a CE timing. See 3.1 + // "General Video Format Requirements" section in CEA-861-E spec + NVT_SET_CEA_FORMAT(newTiming.etc.status, + NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status)); + } + + // calculate the pixel clock + newTiming.pclk = RRx1kToPclk(&newTiming); + + if ((vic <= 64) && (pVic[i] & NVT_CTA861_VIDEO_NATIVE_MASK)) + { + NVT_SET_NATIVE_TIMING_FLAG(newTiming.etc.status); + } + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), + "CTA-861G:#%3d:%5dx%4dx%3d.%03dHz/%s", (int)vic, + (int)newTiming.HVisible, + (int)((newTiming.interlaced ? 2 : 1)*newTiming.VVisible), + (int)newTiming.etc.rrx1k/1000, (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name)-1] = '\0'; + + // if yuv420 is supported in the video SVDs, it is indicated by CMDB bitmap + bytePos = i / (8 * sizeof(NvU8)); + if (bytePos < yuv420MapCount) + { + bitPos = 1 << (i % (8 * sizeof(NvU8))); + if (pYuv420Map[bytePos] & bitPos) + { + // pHfvs->dcXXX are only for YCbCr420; when bitPos is set, 8bpc yuv420 always supported + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, + pHfvs->dc_30bit_420, + pHfvs->dc_36bit_420, 0, + pHfvs->dc_48bit_420); + } + } + + // Y420CMDB with L == 1, implies yuv420MapCount == 0 but all SVDs support 420 + if (pYuv420Map && yuv420MapCount == 0) + { + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, + pHfvs->dc_30bit_420, + pHfvs->dc_36bit_420, 0, + pHfvs->dc_48bit_420); + } + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + newTiming.etc.flag |= NVT_FLAG_DISPLAYID_2_0_TIMING; + + if (!assignNextAvailableDisplayId20Timing(pDisplayID20, &newTiming)) + { + break; + } + } + } +} + +// parse the 861B short Yuv420 timing descriptor +CODE_SEGMENT(PAGE_DD_CODE) +void parse861bShortYuv420Timing(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NvU32 i; + NvU8 vic; + NVT_TIMING newTiming; + NVT_HDMI_FORUM_INFO *pHfvs = NULL; + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + NvU8 *pYuv420Vic = pExt861->svd_y420vdb; + NvU32 total_y420vdb = pExt861->total_y420vdb; + NvU8 *pVdb = pExt861->video; + NvU32 total_svd = pExt861->total_svd; + NvU32 total_timings = 0; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + pHfvs = &pInfo->hdmiForumInfo; + total_timings = pInfo->total_timings; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + pHfvs = &pDisplayID20->vendor_specific.hfvs; + total_timings = pDisplayID20->total_timings; + } + else + { + return; + } + + if (total_timings == 0) + { + return; + } + + + for (i = 0; i < total_y420vdb; i++) + { + vic = NVT_GET_CTA_8BIT_VIC(pYuv420Vic[i]); + + if (vic == 0 || vic > MAX_CEA861B_FORMAT) + continue; + + // assign corresponding CEA format's timing from pre-defined CE timing table, EIA861B + newTiming = EIA861B[vic-1]; + + // if yuv420 is supported in the video SVDs, it is indicated by yuv420vdb + if(total_svd > 0) + { + NvU8 idx, j; + NvBool bFound = NV_FALSE; + for (idx=0; idx < total_svd; idx++) + { + if (pVdb[idx] == vic) + { + for (j=0; j < total_timings; j++) + { + NVT_TIMING *timing = NULL; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + timing = &pInfo->timing[j]; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + timing = &pDisplayID20->timing[j]; + } + + if (NvTiming_IsTimingExactEqual(timing, &newTiming)) + { + bFound = NV_TRUE; + // we found one in pExt861->video[]. pHfvs->dcXXX are only for YCbCr420, so we can support: + // 1. 8bpc yuv420 always supported. + // 2. only add yuv420 and its deep colour caps into Video Data Block + UPDATE_BPC_FOR_COLORFORMAT(timing->etc.yuv420, 0, 1, + pHfvs->dc_30bit_420, + pHfvs->dc_36bit_420, 0, + pHfvs->dc_48bit_420); + break; + } + } + } + } + if (bFound) continue; + } + + newTiming.etc.status = NVT_STATUS_EDID_861STn(vic); + + // set CEA format to location of _CEA_FORMAT. _CEA_FORMAT isn't set in pre-defined CE timing from + // EIA861B table + if (NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status) != + NVT_CEA861_640X480P_59940HZ_4X3) + { + // Although IT 640x480 video timing has a CE id, it is not a CE timing. See 3.1 + // "General Video Format Requirements" section in CEA-861-E spec + NVT_SET_CEA_FORMAT(newTiming.etc.status, + NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status)); + } + + // calculate the pixel clock + newTiming.pclk = RRx1kToPclk(&newTiming); + + // From CTA-861-F: By default, Y420VDB SVDs, when present in the EDID, shall be less preferred than all regular Video Data Block SVDs. + // So it should use normal VIC code without native flag. + //if ((vic <= 64) && (pVic[i] & NVT_CTA861_VIDEO_NATIVE_MASK)) + //{ + // NVT_SET_NATIVE_TIMING_FLAG(newTiming.etc.status); + //} + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), + "CTA-861G:#%3d:%5dx%4dx%3d.%03dHz/%s", (int)vic, + (int)newTiming.HVisible, + (int)((newTiming.interlaced ? 2 : 1)*newTiming.VVisible), + (int)newTiming.etc.rrx1k/1000, (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name)-1] = '\0'; + + // update supported color space; any VICs enumerated in the Y420VDB are yuv420 only modes + // update 8bpc supported color space; other bpc updated once VSDB is parsed + + // pHfvs->dcXXX are only for YCbCr420; when Vic enumerated here, 8bpc yuv420 always supported + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, + pHfvs->dc_30bit_420, + pHfvs->dc_36bit_420, 0, + pHfvs->dc_48bit_420); + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + newTiming.etc.flag |= NVT_FLAG_DISPLAYID_2_0_TIMING; + + if (assignNextAvailableDisplayId20Timing(pDisplayID20, &newTiming)) + { + + break; + } + } + } +} + +// Currently, we only focus on the particular application in CEA861-F spec described +// "One particular application is a Sink that prefers a Video Format that is not listed as an SVD in a VDB +// but instead listed in a YCBCR 4:2:0 Video Data Block" +CODE_SEGMENT(PAGE_DD_CODE) +void parse861bShortPreferredTiming(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NvU32 isMatch,i,j = 0; + + NVT_TIMING preferTiming; + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + NvU8 *pSvr = pExt861->svr_vfpdb; + NvU8 totalSvr = pExt861->total_vfpdb; + NvU8 kth = 0; + NvU8 extKth = 0; + NvU8 DTDCount = 0; + NvU8 extDTDCount = 0; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + } + else + { + return; + } + + // finding all the DTD + if (flag == FROM_CTA861_EXTENSION) + { + for (j = 0; j < pInfo->total_timings; j++) + { + if (NVT_IS_DTD(pInfo->timing[j].etc.status)) + { + DTDCount++; + } + else if (NVT_IS_EXT_DTD(pInfo->timing[j].etc.status)) + { + extDTDCount++; + } + } + } + + // TODO : this only handle single SVR right now + for (i = 0; i < totalSvr; i++) + { + NvU8 svr = pSvr[i]; + NvU8 vic = 0; + + if (svr == 0 || svr == 128 || (svr >= 161 && svr <= 192) || svr == 255) + continue; + + // Kth 18bytes DTD in the EDID + if (svr >= 129 && svr <= 144) + { + kth = svr - 128; + // only base EDID and CTA861 can support 18bytes + if (flag == FROM_CTA861_EXTENSION) + { + for (j = 0; j < pInfo->total_timings; j++) + { + if (kth <= DTDCount) + { + if (NVT_IS_DTDn(pInfo->timing[j].etc.status, kth)) + { + pInfo->timing[j].etc.flag |= NVT_FLAG_CEA_PREFERRED_TIMING; + break; + } + } + else + { + extKth = kth - DTDCount; + if (NVT_IS_EXT_DTDn(pInfo->timing[j].etc.status, extKth)) + { + pInfo->timing[j].etc.flag |= NVT_FLAG_CEA_PREFERRED_TIMING; + break; + } + } + } + } + } + else if (svr >= 145 && svr <= 160) + { + // TODO : Interpret as the Nth 20-byte DTD or 6- or 7-byte CVT-based descriptor, + // where N = SVR – 144 (for N = 1 to 16) + break; + } + else if (svr == 254) + { + // TODO : Interpret as the timing format indicated by the first code of the first T8VTDB + break; + } + else // assign corresponding CEA format's timing from pre-defined CE timing table, EIA861B + { + // ( SVR >= 1 and SVR <= 127) and (SVR >= 193 and SVR <= 253) + vic = NVT_GET_CTA_8BIT_VIC(svr); + preferTiming = EIA861B[vic-1]; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + for (j = 0; j < pInfo->total_timings; j++) + { + isMatch = NvTiming_IsTimingExactEqual(&pInfo->timing[j], &preferTiming); + if (isMatch && (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[j].etc.status) == NVT_TYPE_EDID_861ST)) + { + pInfo->timing[j].etc.flag |= NVT_FLAG_CEA_PREFERRED_TIMING; + break; + } + } + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + for (j = 0; j < pDisplayID20->total_timings; j++) + { + isMatch = NvTiming_IsTimingExactEqual(&pDisplayID20->timing[j], &preferTiming); + if (isMatch && (NVT_GET_TIMING_STATUS_TYPE(pDisplayID20->timing[j].etc.status) == NVT_TYPE_EDID_861ST)) + { + pDisplayID20->timing[j].etc.flag |= NVT_FLAG_CEA_PREFERRED_TIMING | NVT_FLAG_DISPLAYID_2_0_TIMING; + break; + } + } + } + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCea861HdrStaticMetadataDataBlock(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + NVT_HDR_STATIC_METADATA *pHdrInfo = NULL; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + pHdrInfo = &pInfo->hdr_static_metadata_info; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + pHdrInfo = &pDisplayID20->cta.hdrInfo; + } + else + { + return; + } + + if (pExt861 == NULL || pHdrInfo == NULL) + { + return; + } + + // Parse the EOTF capability information. It's possible to have multiple EOTF + if (pExt861->hdr_static_metadata.byte1 & NVT_CEA861_EOTF_GAMMA_SDR) + { + pHdrInfo->supported_eotf.trad_gamma_sdr_eotf = 1; + } + if (pExt861->hdr_static_metadata.byte1 & NVT_CEA861_EOTF_GAMMA_HDR) + { + pHdrInfo->supported_eotf.trad_gamma_hdr_eotf = 1; + } + if (pExt861->hdr_static_metadata.byte1 & NVT_CEA861_EOTF_SMPTE_ST2084) + { + pHdrInfo->supported_eotf.smpte_st_2084_eotf = 1; + } + if (pExt861->hdr_static_metadata.byte1 & NVT_CEA861_EOTF_FUTURE) + { + pHdrInfo->supported_eotf.future_eotf = 1; + } + + // Parse the static metadata descriptor + if (pExt861->hdr_static_metadata.byte2) + { + pHdrInfo->static_metadata_type = 1; + } + else + { + pHdrInfo->static_metadata_type = 0; + } + + pHdrInfo->max_cll = pExt861->hdr_static_metadata.byte3 & NVT_CEA861_MAX_CLL_MASK; + pHdrInfo->max_fall = pExt861->hdr_static_metadata.byte4 & NVT_CEA861_MAX_FALL_MASK; + pHdrInfo->min_cll = pExt861->hdr_static_metadata.byte5 & NVT_CEA861_MIN_CLL_MASK; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCea861DvStaticMetadataDataBlock(NVT_EDID_CEA861_INFO *pExt861, NVT_DV_STATIC_METADATA *pDvInfo) +{ + NvU32 vsvdbVersion = 0; + NVT_DV_STATIC_METADATA_TYPE0 *pDvType0 = NULL; + NVT_DV_STATIC_METADATA_TYPE1 *pDvType1 = NULL; + NVT_DV_STATIC_METADATA_TYPE1_1 *pvDvType1_1 = NULL; + NVT_DV_STATIC_METADATA_TYPE2 *pDvType2 = NULL; + + if (pExt861 == NULL || pDvInfo == NULL) + { + return; + } + + if(pExt861->vsvdb.ieee_id != NVT_CEA861_DV_IEEE_ID) + { + return; + } + + //init + NVMISC_MEMSET(pDvInfo, 0, sizeof(NVT_DV_STATIC_METADATA)); + + // copy ieee id + pDvInfo->ieee_id = pExt861->vsvdb.ieee_id; + + vsvdbVersion = (pExt861->vsvdb.vendor_data[0] & NVT_CEA861_VSVDB_VERSION_MASK) >> NVT_CEA861_VSVDB_VERSION_MASK_SHIFT; + + switch (vsvdbVersion) + { + case 0: + if (pExt861->vsvdb.vendor_data_size < sizeof(NVT_DV_STATIC_METADATA_TYPE0)) + { + return; + } + pDvType0 = (NVT_DV_STATIC_METADATA_TYPE0 *)(&pExt861->vsvdb.vendor_data); + // copy the data + pDvInfo->VSVDB_version = pDvType0->VSVDB_version; + pDvInfo->supports_2160p60hz = pDvType0->supports_2160p60hz; + pDvInfo->supports_YUV422_12bit = pDvType0->supports_YUV422_12bit; + pDvInfo->supports_global_dimming = pDvType0->supports_global_dimming; + pDvInfo->colorimetry = 0; // this field does not exist in type0 + pDvInfo->dm_version = (pDvType0->dm_version_major << 4) | pDvType0->dm_version_minor; + pDvInfo->target_min_luminance = (pDvType0->target_min_pq_11_4 << 4) | pDvType0->target_min_pq_3_0; + pDvInfo->target_max_luminance = (pDvType0->target_max_pq_11_4 << 4) | pDvType0->target_max_pq_3_0; + pDvInfo->cc_red_x = (pDvType0->cc_red_x_11_4 << 4) | pDvType0->cc_red_x_3_0; + pDvInfo->cc_red_y = (pDvType0->cc_red_y_11_4 << 4) | pDvType0->cc_red_y_3_0; + pDvInfo->cc_green_x = (pDvType0->cc_green_x_11_4 << 4) | pDvType0->cc_green_x_3_0; + pDvInfo->cc_green_y = (pDvType0->cc_green_y_11_4 << 4) | pDvType0->cc_green_y_3_0; + pDvInfo->cc_blue_x = (pDvType0->cc_blue_x_11_4 << 4) | pDvType0->cc_blue_x_3_0; + pDvInfo->cc_blue_y = (pDvType0->cc_blue_y_11_4 << 4) | pDvType0->cc_blue_y_3_0; + pDvInfo->cc_white_x = (pDvType0->cc_white_x_11_4 << 4) | pDvType0->cc_white_x_3_0; + pDvInfo->cc_white_y = (pDvType0->cc_white_y_11_4 << 4) | pDvType0->cc_white_y_3_0; + pDvInfo->supports_backlight_control = 0; + pDvInfo->backlt_min_luma = 0; + pDvInfo->interface_supported_by_sink = 0; + pDvInfo->supports_10b_12b_444 = 0; + break; + case 1: + if (pExt861->vsvdb.vendor_data_size == sizeof(NVT_DV_STATIC_METADATA_TYPE1)) + { + pDvType1 = (NVT_DV_STATIC_METADATA_TYPE1 *)(&pExt861->vsvdb.vendor_data); + // copy the data + pDvInfo->VSVDB_version = pDvType1->VSVDB_version; + pDvInfo->supports_2160p60hz = pDvType1->supports_2160p60hz; + pDvInfo->supports_YUV422_12bit = pDvType1->supports_YUV422_12bit; + pDvInfo->dm_version = pDvType1->dm_version; + pDvInfo->supports_global_dimming = pDvType1->supports_global_dimming; + pDvInfo->colorimetry = pDvType1->colorimetry; + pDvInfo->target_min_luminance = pDvType1->target_min_luminance; + pDvInfo->target_max_luminance = pDvType1->target_max_luminance; + pDvInfo->cc_red_x = pDvType1->cc_red_x; + pDvInfo->cc_red_y = pDvType1->cc_red_y; + pDvInfo->cc_green_x = pDvType1->cc_green_x; + pDvInfo->cc_green_y = pDvType1->cc_green_y; + pDvInfo->cc_blue_x = pDvType1->cc_blue_x; + pDvInfo->cc_blue_y = pDvType1->cc_blue_y; + pDvInfo->supports_backlight_control = 0; + pDvInfo->backlt_min_luma = 0; + pDvInfo->interface_supported_by_sink = 0; + pDvInfo->supports_10b_12b_444 = 0; + pDvInfo->cc_white_x = 0; + pDvInfo->cc_white_y = 0; + } + else if (pExt861->vsvdb.vendor_data_size == sizeof(NVT_DV_STATIC_METADATA_TYPE1_1)) + { + pvDvType1_1 = (NVT_DV_STATIC_METADATA_TYPE1_1 *)(&pExt861->vsvdb.vendor_data); + // copy the data + pDvInfo->VSVDB_version = pvDvType1_1->VSVDB_version; + pDvInfo->supports_2160p60hz = pvDvType1_1->supports_2160p60hz; + pDvInfo->supports_YUV422_12bit = pvDvType1_1->supports_YUV422_12bit; + pDvInfo->dm_version = pvDvType1_1->dm_version; + pDvInfo->supports_global_dimming = pvDvType1_1->supports_global_dimming; + pDvInfo->colorimetry = pvDvType1_1->colorimetry; + pDvInfo->target_min_luminance = pvDvType1_1->target_min_luminance; + pDvInfo->target_max_luminance = pvDvType1_1->target_max_luminance; + pDvInfo->cc_green_x = NVT_DOLBY_CHROMATICITY_MSB_GX | pvDvType1_1->unique_Gx; + pDvInfo->cc_green_y = NVT_DOLBY_CHROMATICITY_MSB_GY | pvDvType1_1->unique_Gy; + pDvInfo->cc_blue_x = NVT_DOLBY_CHROMATICITY_MSB_BX | pvDvType1_1->unique_Bx; + pDvInfo->cc_blue_y = NVT_DOLBY_CHROMATICITY_MSB_BY | pvDvType1_1->unique_By; + pDvInfo->cc_red_x = NVT_DOLBY_CHROMATICITY_MSB_RX | pvDvType1_1->unique_Rx; + pDvInfo->cc_red_y = NVT_DOLBY_CHROMATICITY_MSB_RY | (pvDvType1_1->unique_Ry_bit_0 | (pvDvType1_1->unique_Ry_bit_1 <<1) | (pvDvType1_1->unique_Ry_bit_2_to_4 << 2)); + pDvInfo->supports_backlight_control = 0; + pDvInfo->backlt_min_luma = 0; + pDvInfo->interface_supported_by_sink = pvDvType1_1->interface_supported_by_sink; + pDvInfo->supports_10b_12b_444 = 0; + pDvInfo->cc_white_x = 0; + pDvInfo->cc_white_y = 0; + } + else + { + return; + } + + break; + case 2: + if (pExt861->vsvdb.vendor_data_size < sizeof(NVT_DV_STATIC_METADATA_TYPE2)) + { + return; + } + pDvType2 = (NVT_DV_STATIC_METADATA_TYPE2 *)(&pExt861->vsvdb.vendor_data); + // copy the data + pDvInfo->VSVDB_version = pDvType2->VSVDB_version; + pDvInfo->supports_backlight_control = pDvType2->supports_backlight_control; + pDvInfo->supports_YUV422_12bit = pDvType2->supports_YUV422_12bit; + pDvInfo->dm_version = pDvType2->dm_version; + pDvInfo->backlt_min_luma = pDvType2->backlt_min_luma; + pDvInfo->supports_global_dimming = pDvType2->supports_global_dimming; + pDvInfo->target_min_luminance = pDvType2->target_min_luminance; + pDvInfo->interface_supported_by_sink = pDvType2->interface_supported_by_sink; + pDvInfo->target_max_luminance = pDvType2->target_max_luminance; + pDvInfo->cc_green_x = NVT_DOLBY_CHROMATICITY_MSB_GX | pDvType2->unique_Gx; + pDvInfo->cc_green_y = NVT_DOLBY_CHROMATICITY_MSB_GY | pDvType2->unique_Gy; + pDvInfo->cc_blue_x = NVT_DOLBY_CHROMATICITY_MSB_BX | pDvType2->unique_Bx; + pDvInfo->cc_blue_y = NVT_DOLBY_CHROMATICITY_MSB_BY | pDvType2->unique_By; + pDvInfo->cc_red_x = NVT_DOLBY_CHROMATICITY_MSB_RX | pDvType2->unique_Rx; + pDvInfo->cc_red_y = NVT_DOLBY_CHROMATICITY_MSB_RY | pDvType2->unique_Ry; + pDvInfo->supports_10b_12b_444 = pDvType2->supports_10b_12b_444_bit0 | (pDvType2->supports_10b_12b_444_bit1 << 1); + pDvInfo->colorimetry = 0; + pDvInfo->supports_2160p60hz = 0; + pDvInfo->cc_white_x = 0; + pDvInfo->cc_white_y = 0; + break; + default: + break; + } +} + +// find both hdmi llc and hdmi forum vendor specific data block and return basic hdmi information +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861VsdbBlocks(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag + ) +{ + NvU32 i; + + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + NVT_HDMI_LLC_INFO *pHdmiLlc = NULL; + NVT_HDMI_FORUM_INFO *pHfvs = NULL; + NVDA_VSDB_PARSED_INFO *pNvVsdb = NULL; + MSFT_VSDB_PARSED_INFO *pMsftVsdb = NULL; + + if (pExt861 == NULL || pRawInfo == NULL) + { + return; + } + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + pHdmiLlc = &pInfo->hdmiLlcInfo; + pHfvs = &pInfo->hdmiForumInfo; + pNvVsdb = &pInfo->nvdaVsdbInfo; + pMsftVsdb = &pInfo->msftVsdbInfo; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + pHdmiLlc = &pDisplayID20->vendor_specific.hdmiLlc; + pHfvs = &pDisplayID20->vendor_specific.hfvs; + pNvVsdb = &pDisplayID20->vendor_specific.nvVsdb; + pMsftVsdb = &pDisplayID20->vendor_specific.msftVsdb; + } + else + { + return; + } + + if (pHdmiLlc == NULL || pHfvs == NULL || pNvVsdb == NULL || pMsftVsdb == NULL || (pExt861->total_vsdb == 0)) + { + return; + } + + for (i = 0; i < pExt861->total_vsdb; i++) + { + // Assumes each vsdb is unique for this CEA block, e.g., no two HDMI_IEEE_ID + switch (pExt861->vsdb[i].ieee_id) + { + case NVT_CEA861_HDMI_IEEE_ID: + // set any 3D timings and HDMI extended timing specified in the VSDB + parseEdidHdmiLlcBasicInfo((VSDB_DATA *)(&pExt861->vsdb[i]), pHdmiLlc); + pExt861->valid.H14B_VSDB = 1; + break; + + case NVT_CEA861_HDMI_FORUM_IEEE_ID: + parseEdidHdmiForumVSDB((VSDB_DATA *)(&pExt861->vsdb[i]), pHfvs); + pExt861->valid.H20_HF_VSDB = 1; + break; + + case NVT_CEA861_NVDA_IEEE_ID: + parseEdidNvidiaVSDBBlock((VSDB_DATA *)(&pExt861->vsdb[i]), pNvVsdb); + break; + + case NVT_CEA861_MSFT_IEEE_ID: + parseEdidMsftVsdbBlock((VSDB_DATA *)(&pExt861->vsdb[i]), pMsftVsdb); + break; + + } + } + + // H20_HF_VSDB shall be listed only if H14B_VSDB is also listed + // H20_HF_VSDB should not specify > 600MHz + nvt_assert(!pExt861->valid.H20_HF_VSDB || (pExt861->valid.H14B_VSDB && (pHfvs->max_TMDS_char_rate <= 0x78))); + + // Done with reading CEA VSDB blocks, sanitize them now + if (pExt861->valid.SCDB) + { + pHdmiLlc->effective_tmds_clock = pExt861->hfscdb[1]; + } + else if (pExt861->valid.H14B_VSDB) + { + // HDMI 2.0 Spec - section 10.3.2 + // The maximum Rate = Max_TMDS_Character_Rate * 5 MHz. + // If the Sink does not support TMDS Character Rates > 340 Mcsc, then the Sink shall set this field to 0. + // If the Sink supports TMDS Character Rates > 340 Mcsc, the Sink shall set Max_TMDS_Character_Rate appropriately and non - zero. + + // Pick updated TMDS clock rate + pHdmiLlc->effective_tmds_clock = (pExt861->valid.H20_HF_VSDB) ? + MAX(pHdmiLlc->max_tmds_clock, pHfvs->max_TMDS_char_rate) : + MIN(pHdmiLlc->max_tmds_clock, 0x44); + } + +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861HfEeodb(NVT_EDID_CEA861_INFO *pExt861, + NvU32 *pTotalEdidExtensions) +{ + // *pTotalEdidExtensions set by the edid extension flag should be >= 1 for HFEEODB to be valid. + if (pTotalEdidExtensions == NULL || pExt861 == NULL || !pExt861->valid.HF_EEODB || *pTotalEdidExtensions == 0) + { + return; + } + + // HDMI 2.1 AmendmentA1 specifies that if EEODB is present sources shall ignore the Extension flag. + // This effectively overrides the extension count from extension flag. + *pTotalEdidExtensions = pExt861->hfeeodb; +} + + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861HfScdb(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NVT_EDID_INFO *pInfo = (NVT_EDID_INFO *)pRawInfo; + VSDB_DATA vsdbData; + + if (pExt861 == NULL || pRawInfo == NULL) + { + return; + } + + if (!pExt861->valid.SCDB || pExt861->valid.H20_HF_VSDB) + { + return; + } + NVMISC_MEMSET(&vsdbData, 0, sizeof(vsdbData)); + NVMISC_MEMCPY(&vsdbData.vendor_data, pExt861->hfscdb, sizeof(vsdbData.vendor_data)); + + vsdbData.vendor_data_size = pExt861->hfscdbSize; + + parseEdidHdmiForumVSDB(&vsdbData, &pInfo->hdmiForumInfo); +} + + +CODE_SEGMENT(PAGE_DD_CODE) +void getEdidHDM1_4bVsdbTiming(NVT_EDID_INFO *pInfo) +{ + NvU32 i = 0, j = 0; + + for (i = 0; i < 2; ++i) + { + NVT_EDID_CEA861_INFO *pExt861 = (0 == i) ? &pInfo->ext861 : &pInfo->ext861_2; + + for (j = 0; j < pExt861->total_vsdb; ++j) + { + switch (pExt861->vsdb[j].ieee_id) + { + case NVT_CEA861_HDMI_IEEE_ID: + { + NvU32 count = 0; + // set any 3D timings and HDMI extended timing specified in the VSDB + parseEdidHDMILLCTiming(pInfo, (VSDB_DATA *)(&pExt861->vsdb[j]), &count, &(pInfo->Hdmi3Dsupport)); + pInfo->HDMI3DSupported = 0 < count; + break; + } + + default: + break; + } + } + } +} + +// get the full EDID 861 extension info +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS get861ExtInfo(NvU8 *p, NvU32 size, NVT_EDID_CEA861_INFO *p861info) +{ + + NvU32 dtd_offset; + // sanity check + if (p == NULL || size < sizeof(EDIDV1STRUC)) + { + return NVT_STATUS_ERR; + } + + // make sure we have 861 extension + if (p[0] != 0x2 || p[1] < NVT_CEA861_REV_ORIGINAL) + { + return NVT_STATUS_ERR; + } + + + // don't do anything further if p is NULL + if (p861info == NULL) + { + return NVT_STATUS_SUCCESS; + } + + // init + NVMISC_MEMSET(p861info, 0, sizeof(NVT_EDID_CEA861_INFO)); + + // get the revision number + p861info->revision = p[1]; + + // no extra info for 861-original, returning from here + if (p861info->revision == NVT_CEA861_REV_ORIGINAL) + { + return NVT_STATUS_SUCCESS; + } + + p861info->basic_caps = p[3]; + + // no extra info for 861-A, returning from here + if (p861info->revision == NVT_CEA861_REV_A) + { + return NVT_STATUS_SUCCESS; + } + + dtd_offset = (NvU32)p[2]; + if (dtd_offset == 0 || dtd_offset == 4) + { + return NVT_STATUS_SUCCESS; + } + + // resolve all short descriptors in the reserved block + // reserved block starts from offset 04 to dtd_offset-1 + return parseCta861DataBlockInfo(&p[4], dtd_offset - 4, p861info); +} + +// get the 861 extension tags info +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS parseCta861DataBlockInfo(NvU8 *p, + NvU32 size, + NVT_EDID_CEA861_INFO *p861info) +{ + NvU32 i, j; + NvU32 video_index = 0; + NvU32 audio_index = 0; + NvU32 speaker_index = 0; + NvU32 vendor_index = 0; + NvU32 yuv420vdb_index = 0; + NvU32 yuv420cmdb_index = 0; + NvU8 svr_index = 0; + NvU32 tag, ext_tag, payload; + i= 0; + + while (i < size) + { + // get the descriptor's tag and payload size + tag = NVT_CEA861_GET_SHORT_DESCRIPTOR_TAG(p[i]); + payload = NVT_CEA861_GET_SHORT_DESCRIPTOR_SIZE(p[i]); + + // move the pointer to the payload section + i++; + + // loop through all descriptors + if (tag == NVT_CEA861_TAG_VIDEO) + { + // short video descriptor + for (j = 0; j < payload; j ++, i ++, video_index ++) + { + if (video_index < NVT_CEA861_VIDEO_MAX_DESCRIPTOR) + { + p861info->video[video_index] = p[i]; + } + else + { + break; + } + } + p861info->total_svd = (NvU8)video_index; + } + else if (tag == NVT_CEA861_TAG_AUDIO) + { + // short audio descriptor + for (j = 0; j < payload / 3; j ++, i += 3, audio_index ++) + { + if (audio_index < NVT_CEA861_AUDIO_MAX_DESCRIPTOR) + { + p861info->audio[audio_index].byte1 = p[i]; + p861info->audio[audio_index].byte2 = p[i+1]; + p861info->audio[audio_index].byte3 = p[i+2]; + } + else + { + break; + } + } + p861info->total_sad = (NvU8)audio_index; + } + else if (tag == NVT_CEA861_TAG_SPEAKER_ALLOC) + { + // speaker allocation descriptor + for (j = 0; j < payload / 3; j ++, i += 3, speaker_index ++) + { + if (speaker_index < NVT_CEA861_SPEAKER_MAX_DESCRIPTOR) + { + p861info->speaker[speaker_index].byte1 = p[i]; + p861info->speaker[speaker_index].byte2 = p[i+1]; + p861info->speaker[speaker_index].byte3 = p[i+2]; + } + else + { + break; + } + } + p861info->total_ssd = (NvU8)speaker_index; + } + else if (tag == NVT_CEA861_TAG_VENDOR) + { + if (vendor_index < NVT_CEA861_VSDB_MAX_BLOCKS) + { + if (payload < 3) + { + // This malformed payload will cause a hang below. + return NVT_STATUS_ERR; + } + + p861info->vsdb[vendor_index].ieee_id = p[i]; //IEEE ID low byte + p861info->vsdb[vendor_index].ieee_id |= (p[i+1]) << 8; //IEEE ID middle byte + p861info->vsdb[vendor_index].ieee_id |= (p[i+2]) << 16; //IEEE ID high byte + + p861info->vsdb[vendor_index].vendor_data_size = payload - 3; + + // move the pointer to the payload + i += 3; + + // get the other vendor specific data + for (j = 0; j < payload - 3; j ++, i ++) + { + if (j < NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH) + { + p861info->vsdb[vendor_index].vendor_data[j] = p[i]; + } + } + vendor_index++; + } + } + else if (tag == NVT_CEA861_TAG_EXTENDED_FLAG) + { + if (payload >= 1) + { + ext_tag = p[i]; + if (ext_tag == NVT_CEA861_EXT_TAG_VIDEO_CAP && payload >= 2) + { + p861info->video_capability = p[i + 1] & NVT_CEA861_VIDEO_CAPABILITY_MASK; + p861info->valid.VCDB = 1; + i += 2; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_COLORIMETRY && payload >= 3) + { + p861info->colorimetry.byte1 = p[i + 1] & NVT_CEA861_COLORIMETRY_MASK; + p861info->colorimetry.byte2 = p[i + 2] & NVT_CEA861_GAMUT_METADATA_MASK; + p861info->valid.colorimetry = 1; + i += 3; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_VIDEO_FORMAT_PREFERENCE && payload >= 2) + { + // when present, indicates the order of preference for selected Video Formats listed as DTDs and/or SVDs throughout Block 0 and the CTA Extensions of the + // order of SVD preferred modes shall take precedence over preferred modes defined elsewhere in the EDID/CEA861 blocks + + // exclude the extended tag + i++; payload--; + + for (j = 0; (j < payload) && (svr_index < NVT_CEA861_VFPDB_MAX_DESCRIPTOR); j++, i++, svr_index++) + { + p861info->svr_vfpdb[svr_index] = p[i]; + } + p861info->total_vfpdb = svr_index; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_YCBCR420_VIDEO && payload >= 2) + { + // when present, list SVDs that are only supported in YCbCr 4:2:0 + + // exclude the extended tag + i++; payload--; + + for (j = 0; (j < payload) && (yuv420vdb_index < NVT_CEA861_Y420VDB_MAX_DESCRIPTOR); j++, i++, yuv420vdb_index++) + { + p861info->svd_y420vdb[yuv420vdb_index] = p[i]; + } + p861info->total_y420vdb = (NvU8)yuv420vdb_index; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_YCBCR420_CAP && payload >= 1) + { + // when present, provides bitmap to video SVDs that also support YCbCr 4:2:0 in addition to RGB, YCbCr 4:4:4, and/or YCbCr 4: 2:0 + + // exclude the extended tag + i++; payload--; + + for (j = 0; (j < payload) && (yuv420cmdb_index < NVT_CEA861_Y420CMDB_MAX_DESCRIPTOR); j++, i++, yuv420cmdb_index++) + { + p861info->map_y420cmdb[yuv420cmdb_index] = p[i]; + } + p861info->total_y420cmdb = (NvU8)yuv420cmdb_index; + + p861info->valid.y420cmdb = 1; // total_y420cmdb is not enough as this could be 0. See CEA861-F 7.5.11 + } + else if(ext_tag == NVT_CEA861_EXT_TAG_HDR_STATIC_METADATA && payload >= 3) + { + p861info->hdr_static_metadata.byte1 = p[i + 1] & NVT_CEA861_EOTF_MASK; // This byte has bits which identify which EOTF supported by the sink. + p861info->hdr_static_metadata.byte2 = p[i + 2] & NVT_CEA861_STATIC_METADATA_DESCRIPTOR_MASK; // This byte has bits which identify which Static Metadata descriptors are supported by the sink. + + i += 3; + + if (payload > 3) + { + p861info->hdr_static_metadata.byte3 = p[i]; + i++; + } + + if (payload > 4) + { + p861info->hdr_static_metadata.byte4 = p[i]; + i++; + } + + if (payload > 5) + { + p861info->hdr_static_metadata.byte5 = p[i]; + i++; + } + + p861info->valid.hdr_static_metadata = 1; + } + else if(ext_tag == NVT_CEA861_EXT_TAG_VENDOR_SPECIFIC_VIDEO && ((payload >= 14) || (payload >= 11))) //version 2 of VSDB has 11 bytes of data and version 1 has 14 + { + + // exclude the extended tag + i++; payload--; + + p861info->vsvdb.ieee_id = p[i]; //IEEE ID low byte + p861info->vsvdb.ieee_id |= (p[i + 1]) << 8; //IEEE ID middle byte + p861info->vsvdb.ieee_id |= (p[i + 2]) << 16; //IEEE ID high byte + + p861info->vsvdb.vendor_data_size = payload - 3; + + // move the pointer to the payload + i += 3; + + // get the other vendor specific video data + for (j = 0; j < payload - 3; j++, i++) + { + if (j < NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH) + { + p861info->vsvdb.vendor_data[j] = p[i]; + } + } + p861info->valid.dv_static_metadata = 1; + } + else if(ext_tag == NVT_CTA861_EXT_TAG_SCDB && payload >= 7) // sizeof(HDMI Forum Sink Capability Data Block) ranges between 7 to 31 bytes + { + // As per HDMI2.1 A1 amendment Sink Capability Data Structure(SCDS) can alternatively be included in HDMI Forum Sink Capability Data Block(HF-SCDB), + // instead of HF-VSDB, to indicate HDMI2.1 capability. + // Sinks will expose HF-SCDB if they do not expose HF-VSDB. + + // move pointer to SCDS + i += 3; + + // Copy SCDS over to p861info->vsdb[vendor_index]. Parsing will later be handled in parseEdidHdmiForumVSDB(). + for (j = 0; (j < payload - 3) && (j < NVT_CTA861_EXT_SCDB_PAYLOAD_MAX_LENGTH); j ++, i ++) + { + p861info->hfscdb[j] = p[i]; + } + p861info->hfscdbSize = MIN(payload - 3, NVT_CTA861_EXT_SCDB_PAYLOAD_MAX_LENGTH); + p861info->valid.SCDB = 1; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_HF_EEODB && payload == 2) + { + // Skip over extended tag + i++; payload--; + + p861info->hfeeodb = p[i]; + p861info->valid.HF_EEODB = 1; + i += payload; + } + else + { + // skip the unrecognized extended block + i += payload; + } + } + } + else + { + // reserved block, just skip here + i += payload; + } + } + + p861info->total_vsdb = (NvU8)vendor_index; + + return NVT_STATUS_SUCCESS; +} + +// enum the EIA/CEA 861B predefined timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumCEA861bTiming(NvU32 ceaFormat, NVT_TIMING *pT) +{ + if (pT == NULL || ceaFormat == 0 || ceaFormat > MAX_CEA861B_FORMAT) + { + return NVT_STATUS_ERR; + } + + ceaFormat = NVT_GET_CTA_8BIT_VIC(ceaFormat); + + *pT = EIA861B[ceaFormat - 1]; + + // calculate the pixel clock + pT->pclk = RRx1kToPclk (pT); + NVT_SET_CEA_FORMAT(pT->etc.status, ceaFormat); + + NVT_SNPRINTF((char *)pT->etc.name, sizeof(pT->etc.name), "CTA-861G:#%3d:%dx%dx%3d.%03dHz/%s", (int)ceaFormat, (int)pT->HVisible, (int)((pT->interlaced ? 2 : 1)*pT->VVisible), (int)pT->etc.rrx1k/1000, (int)pT->etc.rrx1k%1000, (pT->interlaced ? "I":"P")); + pT->etc.name[sizeof(pT->etc.name) - 1] = '\0'; + + return NVT_STATUS_SUCCESS; +} + + +// Check whether the given timing is a CEA 861 timing. +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_GetCEA861TimingIndex (NVT_TIMING *pT) +{ + NvU32 i = 0, j = 0; + NvU32 ceaIndex = 0; + NvU32 aspect_x; + NvU32 aspect_y; + + if (pT == NULL) + { + return ceaIndex; + } + + if (NVT_GET_CEA_FORMAT(pT->etc.status) != 0) + { + // CEA format has been set, done + return NVT_GET_CEA_FORMAT(pT->etc.status); + } + + aspect_x = nvt_aspect_x(pT->etc.aspect); + aspect_y = nvt_aspect_y(pT->etc.aspect); + + // loop through the pre-defined CEA 861 table + // Skip VIC1 - Although IT 640x480 video timing has a CE id, it is not a CE timing. See 3.1 + // "General Video Format Requirements" section in CEA-861-E spec + for (i = 1; i < MAX_CEA861B_FORMAT; i++) + { + if (NvTiming_IsTimingRelaxedEqual(pT, &EIA861B[i])) + { + // The timing matches with a CEA 861 timing. Set CEA format to NVT_TIMING.etc.status. + ceaIndex = NVT_GET_TIMING_STATUS_SEQ(EIA861B[i].etc.status); + + if (!aspect_x || !aspect_y) + { + return ceaIndex; + } + + // for the dual-aspect ratio timings we should further check the aspect ratio matching(16:9 or 4:3) based on the integer rounding error + for (j = 0; j < MAX_EIA861B_DUAL_ASPECT_VICS; j++) + { + if (ceaIndex == EIA861B_DUAL_ASPECT_VICS[j][0]) + { + NvU32 ceaIndex1 = EIA861B_DUAL_ASPECT_VICS[j][1]; + + NvU32 format1 = axb_div_c(aspect_x, nvt_aspect_y(EIA861B[ceaIndex - 1].etc.aspect), aspect_y); + NvU32 format2 = axb_div_c(aspect_x, nvt_aspect_y(EIA861B[ceaIndex1 - 1].etc.aspect), aspect_y); + + NvU32 format_1_diff = abs_delta(format1, nvt_aspect_x(EIA861B[ceaIndex - 1].etc.aspect)); + NvU32 format_2_diff = abs_delta(format2, nvt_aspect_x(EIA861B[ceaIndex1 - 1].etc.aspect)); + + if (format_2_diff < format_1_diff) + { + ceaIndex = ceaIndex1; + } + break; + } + else if (ceaIndex < EIA861B_DUAL_ASPECT_VICS[j][0]) // not a dual-dspect ratio timing + { + break; + } + } + break; + } + } + return ceaIndex; +} + +// calculate 861B based timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCEA861bTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NvU32 pixelRepeatCount, NVT_TIMING *pT) + +{ + NvU32 i = 0; + NvU16 pixelRepeatMask = 1 << (pixelRepeatCount - 1); + + nvt_assert(pixelRepeatCount > 0 && pixelRepeatCount <= 10); + + if (pT == NULL) + { + return NVT_STATUS_ERR; + } + + // loop through the table + for (i = 0; i < MAX_CEA861B_FORMAT; i ++) + { + + if ((EIA861B[i].etc.rep & pixelRepeatMask) == 0) + { + continue; + } + + if (width == (NvU32)NvTiming_MaxFrameWidth(EIA861B[i].HVisible, pixelRepeatMask) && + height == frame_height(EIA861B[i])&& + rr == EIA861B[i].etc.rr && + (!!(flag & NVT_PVT_INTERLACED_MASK)) == (!!EIA861B[i].interlaced)) + { + *pT = EIA861B[i]; + + // calculate the pixel clock + pT->pclk = RRx1kToPclk (pT); + + NVT_SET_CEA_FORMAT(pT->etc.status, NVT_GET_TIMING_STATUS_SEQ(pT->etc.status)); + + NVT_SNPRINTF((char *)pT->etc.name, sizeof(pT->etc.name), "CTA-861G:#%3d:%dx%dx%3d.%03dHz/%s", (int)NVT_GET_TIMING_STATUS_SEQ(pT->etc.status), (int)pT->HVisible, (int)((pT->interlaced ? 2 : 1)*pT->VVisible), (int)pT->etc.rrx1k/1000, (int)pT->etc.rrx1k%1000, (pT->interlaced ? "I":"P")); + pT->etc.name[sizeof(pT->etc.name) - 1] = '\0'; + + return NVT_STATUS_SUCCESS; + } + } + + return NVT_STATUS_ERR; + +} + +// Assign fields in NVT_VIDEO_INFOFRAME_CTRL, using NVT_TIMING +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructVideoInfoframeCtrl(const NVT_TIMING *pTiming, NVT_VIDEO_INFOFRAME_CTRL *pCtrl) +{ + // setup VIC code it is not specified + if (pCtrl->video_format_id == NVT_INFOFRAME_CTRL_DONTCARE || + pCtrl->video_format_id == 0 || + pCtrl->video_format_id > NVT_CEA861_1920X1080P_29970HZ_16X9) + { + // setup video format ID + pCtrl->video_format_id = (NvU8)NVT_GET_CEA_FORMAT(pTiming->etc.status); + if (pCtrl->video_format_id < NVT_CEA861_640X480P_59940HZ_4X3 || + pCtrl->video_format_id > NVT_CTA861_4096x2160p_119880HZ_256X135) + { + // Prior RFE 543088 + if (pCtrl->video_format_id == 0 && + NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status) == NVT_TYPE_EDID_861ST) + { + pCtrl->video_format_id = (NvU8)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status); + } + if (pCtrl->video_format_id == 0 && + pTiming->HVisible == 640 && + pTiming->VVisible == 480 && + pTiming->interlaced == 0 && + pTiming->etc.rr == 60) + { + pCtrl->video_format_id = NVT_CEA861_640X480P_59940HZ_4X3; + } + } + } + + // for HDMI_EXT timing, AVI VIC should be 0. + if (NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status) == NVT_TYPE_HDMI_EXT) + { + pCtrl->video_format_id = 0; + } + + // setup aspect ratio it is not specified + if (pCtrl->pic_aspect_ratio == NVT_INFOFRAME_CTRL_DONTCARE || + pCtrl->pic_aspect_ratio == NVT_VIDEO_INFOFRAME_BYTE2_M1M0_NO_DATA || + pCtrl->pic_aspect_ratio > NVT_VIDEO_INFOFRAME_BYTE2_M1M0_FUTURE) + { + // extract the screen measurements from the DTD aspect ratio. + // (we pack the height & width in a DWORD to form the aspect ratio) + + NvU32 x,y; + x = (pTiming->etc.aspect & 0x0fff); + y = ((pTiming->etc.aspect >> 16) & 0x0fff); + + if (axb_div_c(y,3,x) == 4) + { + pCtrl->pic_aspect_ratio = NVT_VIDEO_INFOFRAME_BYTE2_M1M0_4X3; + } + else if (axb_div_c(y,9,x) == 16) + { + pCtrl->pic_aspect_ratio = NVT_VIDEO_INFOFRAME_BYTE2_M1M0_16X9; + } + else if (pCtrl->video_format_id == NVT_CEA861_640X480P_59940HZ_4X3) + { + pCtrl->pic_aspect_ratio = NVT_VIDEO_INFOFRAME_BYTE2_M1M0_4X3; + } + else + { + // default to no data, to cover other non-cea modes + pCtrl->pic_aspect_ratio = NVT_VIDEO_INFOFRAME_BYTE2_M1M0_NO_DATA; + } + } + + if (pCtrl->it_content == NVT_INFOFRAME_CTRL_DONTCARE) + { + // Initialize ITC flag to NVT_VIDEO_INFOFRAME_BYTE3_ITC_IT_CONTENT + pCtrl->it_content = NVT_VIDEO_INFOFRAME_BYTE3_ITC_IT_CONTENT; + pCtrl->it_content_type = NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_GRAPHICS; + } + + if (pCtrl->pixel_repeat == NVT_INFOFRAME_CTRL_DONTCARE) + { + // Initialize pixel repetitions + NvU32 pixelRepeat = pTiming->etc.rep; + LOWESTBITIDX_32(pixelRepeat); + pCtrl->pixel_repeat = (NvU8)pixelRepeat; + } + + return NVT_STATUS_SUCCESS; +} + + +// construct AVI video infoframe based on the user control and the current context state +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructVideoInfoframe(NVT_EDID_INFO *pEdidInfo, NVT_VIDEO_INFOFRAME_CTRL *pCtrl, NVT_VIDEO_INFOFRAME *pContext, NVT_VIDEO_INFOFRAME *pInfoFrame) +{ + // parameter check + if (pEdidInfo == NULL || pInfoFrame == NULL) + { + return NVT_STATUS_ERR; + } + + // infoframe is only supported on 861A and later + if (pEdidInfo->ext861.revision < NVT_CEA861_REV_A) + { + return NVT_STATUS_ERR; + } + + // if context state is provided, use it to initialize the infoframe buffer + if (pContext != NULL) + { + *pInfoFrame = *pContext; + } + else + { + *pInfoFrame = DEFAULT_VIDEO_INFOFRAME; + } + + // init the header + pInfoFrame->type = NVT_INFOFRAME_TYPE_VIDEO; + + // TODO : This is just to check the version, we still need to change lots of structure + // "NVT_VIDEO_INFOFRAME" / "VIDEO_INFOFRAME" / "DEFAULT_VIDEO_INFOFRAME" / "NVM_DISP_STATE" etc.. + // to accept the new ACE0-3 bits supported in the future. Right now no any sink to support this. + // + // Based on the latest CTA-861-G-Errata.pdf file, we need to do following logic to get the correct CTA861 version + // When Y2 = 0, the following algorithm shall be used for AVI InfoFrame version selection: + // if (C=3 and EC=7) + // Sources shall use AVI InfoFrame Version 4. + // Else if (VIC>=128) + // Sources shall use AVI InfoFrame Version 3. + // Else + // Sources shall use AVI InfoFrame Version 2. + // End if + // + if (pCtrl) + { + if (nvt_get_bits(pInfoFrame->byte1, NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_SHIFT) <= NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_FUTURE) // this shall be as 0 always. + { + if ((nvt_get_bits(pInfoFrame->byte2, NVT_VIDEO_INFOFRAME_BYTE2_C1C0_MASK, NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SHIFT) == NVT_VIDEO_INFOFRAME_BYTE2_C1C0_EXT_COLORIMETRY) && + //EC2-0 is based on the 7.5.5 at CTA861-G which DCI-P3 bit defined or notat byte4 + (nvt_get_bits(pInfoFrame->byte3, NVT_VIDEO_INFOFRAME_BYTE3_EC_MASK, NVT_VIDEO_INFOFRAME_BYTE3_EC_SHIFT) == NVT_VIDEO_INFOFRAME_BYTE3_EC_AdditionalColorExt)) + { + pInfoFrame->version = NVT_VIDEO_INFOFRAME_VERSION_4; // just put the logic to get the correct version 4, but it shall not be used at currently stage. + } + else + { + pInfoFrame->version = (((pCtrl->video_format_id & NVT_VIDEO_INFOFRAME_BYTE4_VIC7) == NVT_VIDEO_INFOFRAME_BYTE4_VIC7) ? NVT_VIDEO_INFOFRAME_VERSION_3 : + ((pEdidInfo->ext861.revision >= NVT_CEA861_REV_B) ? NVT_VIDEO_INFOFRAME_VERSION_2 : NVT_VIDEO_INFOFRAME_VERSION_1)); + } + } + } + else + { + pInfoFrame->version = (pEdidInfo->ext861.revision >= NVT_CEA861_REV_B) ? NVT_VIDEO_INFOFRAME_VERSION_2 : NVT_VIDEO_INFOFRAME_VERSION_1; + } + pInfoFrame->length = sizeof(NVT_VIDEO_INFOFRAME) - sizeof(NVT_INFOFRAME_HEADER); + + if (pInfoFrame->version < NVT_VIDEO_INFOFRAME_VERSION_3) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, 0, NVT_VIDEO_INFOFRAME_BYTE1_RESERVED_MASK, NVT_VIDEO_INFOFRAME_BYTE1_RESERVED_SHIFT); + } + + if (pInfoFrame->version == NVT_VIDEO_INFOFRAME_VERSION_2) + { + nvt_nvu8_set_bits(pInfoFrame->byte4, 0, NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V2_MASK, NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V2_SHIFT); + } + else if (pInfoFrame->version == NVT_VIDEO_INFOFRAME_VERSION_1) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, 0, NVT_VIDEO_INFOFRAME_BYTE3_RESERVED_V1_MASK, NVT_VIDEO_INFOFRAME_BYTE3_RESERVED_V1_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->byte4, 0, NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V1_MASK, NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V1_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->byte5, 0, NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V1_MASK, NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V1_SHIFT); + } + + // construct the desired infoframe contents based on the control + if (pCtrl) + { + // byte 1 + if (pCtrl->color_space != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, pCtrl->color_space, NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_SHIFT); + } + + if (pCtrl->active_format_info_present != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, pCtrl->active_format_info_present, NVT_VIDEO_INFOFRAME_BYTE1_A0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_A0_SHIFT); + } + + if (pCtrl->bar_info != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, pCtrl->bar_info, NVT_VIDEO_INFOFRAME_BYTE1_B1B0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_B1B0_SHIFT); + } + + if (pCtrl->scan_info != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, pCtrl->scan_info, NVT_VIDEO_INFOFRAME_BYTE1_S1S0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_S1S0_SHIFT); + } + + // byte 2 + if (pCtrl->colorimetry != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, pCtrl->colorimetry, NVT_VIDEO_INFOFRAME_BYTE2_C1C0_MASK, NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SHIFT); + } + + if (pCtrl->pic_aspect_ratio != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, pCtrl->pic_aspect_ratio, NVT_VIDEO_INFOFRAME_BYTE2_M1M0_MASK, NVT_VIDEO_INFOFRAME_BYTE2_M1M0_SHIFT); + } + + if (pCtrl->active_format_aspect_ratio != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, pCtrl->active_format_aspect_ratio, NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_MASK, NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_SHIFT); + } + + // byte 3 + if (pCtrl->it_content != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, pCtrl->it_content, NVT_VIDEO_INFOFRAME_BYTE3_ITC_MASK, NVT_VIDEO_INFOFRAME_BYTE3_ITC_SHIFT); + } + + if (pCtrl->extended_colorimetry != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, pCtrl->extended_colorimetry, NVT_VIDEO_INFOFRAME_BYTE3_EC_MASK, NVT_VIDEO_INFOFRAME_BYTE3_EC_SHIFT); + } + + if (pCtrl->rgb_quantization_range != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, pCtrl->rgb_quantization_range, NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_MASK, NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_SHIFT); + } + + if (pCtrl->nonuniform_scaling != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, pCtrl->nonuniform_scaling, NVT_VIDEO_INFOFRAME_BYTE3_SC_MASK, NVT_VIDEO_INFOFRAME_BYTE3_SC_SHIFT); + } + + // byte 4 and byte 5 only supported on InfoFrame 2.0 + if (pInfoFrame->version >= NVT_VIDEO_INFOFRAME_VERSION_2) + { + // byte 4 + if (pCtrl->video_format_id != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte4, pCtrl->video_format_id, NVT_VIDEO_INFOFRAME_BYTE4_VIC_MASK, NVT_VIDEO_INFOFRAME_BYTE4_VIC_SHIFT); + } + + // byte 5 + if (pCtrl->pixel_repeat != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte5, pCtrl->pixel_repeat, NVT_VIDEO_INFOFRAME_BYTE5_PR_MASK, NVT_VIDEO_INFOFRAME_BYTE5_PR_SHIFT); + } + + // byte5 + if (pCtrl->it_content_type != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte5, pCtrl->it_content_type, NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_MASK, NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_SHIFT); + } + } + + // byte 6~13, bar info + if (pCtrl->top_bar != 0xFFFF) + { + pInfoFrame->top_bar_low = (NvU8)(pCtrl->top_bar % 0x100); + pInfoFrame->top_bar_high = (NvU8)(pCtrl->top_bar / 0x100); + } + if (pCtrl->bottom_bar != 0xFFFF) + { + pInfoFrame->bottom_bar_low = (NvU8)(pCtrl->bottom_bar % 0x100); + pInfoFrame->bottom_bar_high = (NvU8)(pCtrl->bottom_bar / 0x100); + } + if (pCtrl->left_bar != 0xFFFF) + { + pInfoFrame->left_bar_low = (NvU8)(pCtrl->left_bar % 0x100); + pInfoFrame->left_bar_high = (NvU8)(pCtrl->left_bar / 0x100); + } + if (pCtrl->right_bar != 0xFFFF) + { + pInfoFrame->right_bar_low = (NvU8)(pCtrl->right_bar % 0x100); + pInfoFrame->right_bar_high = (NvU8)(pCtrl->right_bar / 0x100); + } + } + + return NVT_STATUS_SUCCESS; +} + +// construct AVI audio infoframe based on the user control and the current context state +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructAudioInfoframe(NVT_AUDIO_INFOFRAME_CTRL *pUserCtrl, NVT_AUDIO_INFOFRAME *pContext, NVT_AUDIO_INFOFRAME *pInfoFrame) +{ + NVT_AUDIO_INFOFRAME_CTRL ctrl; + + // parameter check + if (pInfoFrame == NULL) + { + return NVT_STATUS_ERR; + } + + // use the user provided control if possible + if (pUserCtrl) + { + ctrl = *pUserCtrl; + } + else + { + // otherwise use the default control + NVMISC_MEMSET(&ctrl, NVT_INFOFRAME_CTRL_DONTCARE, sizeof(ctrl)); + } + + // if context state is provided, use it to initialize the infoframe buffer + if (pContext != NULL) + { + *pInfoFrame = *pContext; + } + else + { + *pInfoFrame = DEFAULT_AUDIO_INFOFRAME; + + // if the context state is not provide, we should user EDID info to build a default ctrl + //buildDefaultAudioInfoframeCtrl(pEdidInfo, &ctrl); + } + + // init the header + pInfoFrame->type = NVT_INFOFRAME_TYPE_AUDIO; + pInfoFrame->version = NVT_VIDEO_INFOFRAME_VERSION_1; + pInfoFrame->length = sizeof(NVT_AUDIO_INFOFRAME) - sizeof(NVT_INFOFRAME_HEADER); + + // init the reserved fields + nvt_nvu8_set_bits(pInfoFrame->byte1, 0, NVT_AUDIO_INFOFRAME_BYTE1_RESERVED_MASK, NVT_AUDIO_INFOFRAME_BYTE1_RESERVED_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->byte2, 0, NVT_AUDIO_INFOFRAME_BYTE2_RESERVED_MASK, NVT_AUDIO_INFOFRAME_BYTE2_RESERVED_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->byte5, 0, NVT_AUDIO_INFOFRAME_BYTE5_RESERVED_MASK, NVT_AUDIO_INFOFRAME_BYTE5_RESERVED_SHIFT); + pInfoFrame->rsvd_byte6 = 0; + pInfoFrame->rsvd_byte7 = 0; + pInfoFrame->rsvd_byte8 = 0; + pInfoFrame->rsvd_byte9 = 0; + pInfoFrame->rsvd_byte10 = 0; + + // byte 1 + if (ctrl.channel_count != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, ctrl.channel_count, NVT_AUDIO_INFOFRAME_BYTE1_CC_MASK, NVT_AUDIO_INFOFRAME_BYTE1_CC_SHIFT); + } + + if (ctrl.coding_type != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, ctrl.coding_type, NVT_AUDIO_INFOFRAME_BYTE1_CT_MASK, NVT_AUDIO_INFOFRAME_BYTE1_CT_SHIFT); + } + + // byte 2 + if (ctrl.sample_depth != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, ctrl.sample_depth, NVT_AUDIO_INFOFRAME_BYTE2_SS_MASK, NVT_AUDIO_INFOFRAME_BYTE2_SS_SHIFT); + } + + if (ctrl.sample_rate != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, ctrl.sample_rate, NVT_AUDIO_INFOFRAME_BYTE2_SF_MASK, NVT_AUDIO_INFOFRAME_BYTE2_SF_SHIFT); + } + + // byte 3 + pInfoFrame->byte3 = 0; + + // byte 4 + if (ctrl.speaker_placement != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte4, ctrl.speaker_placement, NVT_AUDIO_INFOFRAME_BYTE4_CA_MASK, NVT_AUDIO_INFOFRAME_BYTE4_CA_SHIFT); + } + + // byte 5 + if (ctrl.level_shift != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte5, ctrl.level_shift, NVT_AUDIO_INFOFRAME_BYTE5_LSV_MASK, NVT_AUDIO_INFOFRAME_BYTE5_LSV_SHIFT); + } + + if (ctrl.down_mix_inhibit != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte5, ctrl.down_mix_inhibit, NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_MASK, NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_SHIFT); + } + + + return NVT_STATUS_SUCCESS; + +} + +// Construct Vendor Specific Infoframe +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructVendorSpecificInfoframe(NVT_EDID_INFO *pEdidInfo, NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL *pCtrl, NVT_VENDOR_SPECIFIC_INFOFRAME *pInfoFrame) +{ + NVT_STATUS RetCode = NVT_STATUS_SUCCESS; + NvU8 optIdx = 0; + NvU8 HDMIFormat; + + // parameter check + if (pEdidInfo == NULL || pInfoFrame == NULL) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // infoframe is only supported on 861A and later + if (pEdidInfo->ext861.revision < NVT_CEA861_REV_A) + { + return NVT_STATUS_ERR; + } + + + // initialize the infoframe buffer + *pInfoFrame = DEFAULT_VENDOR_SPECIFIC_INFOFRAME; + + // init the header (mostly done in default Infoframe) + pInfoFrame->Header.length = offsetof(NVT_VENDOR_SPECIFIC_INFOFRAME_PAYLOAD, optionalBytes); + + // construct the desired infoframe contents based on the control + if (pCtrl) + { + // clear all static reserved fields + nvt_nvu8_set_bits(pInfoFrame->Data.byte4, 0, NVT_HDMI_VS_BYTE4_RSVD_MASK, NVT_HDMI_VS_BYTE4_RSVD_SHIFT); + + // setup the parameters + nvt_nvu8_set_bits(pInfoFrame->Data.byte4, pCtrl->HDMIFormat, NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_MASK, NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_SHIFT); + + // determine what the format is -- if disabled, force the format to NONE. + if (pCtrl->Enable) + { + HDMIFormat = pCtrl->HDMIFormat; + } + else + { + HDMIFormat = NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_NONE; + } + + switch(HDMIFormat) + { + case NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_NONE: + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, 0, NVT_HDMI_VS_BYTENv_RSVD_MASK, NVT_HDMI_VS_BYTENv_RSVD_SHIFT); + break; + } + case NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_EXT: + { + // Note: extended resolution frames are not yet fully supported + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, pCtrl->HDMI_VIC, NVT_HDMI_VS_BYTE5_HDMI_VIC_MASK, NVT_HDMI_VS_BYTE5_HDMI_VIC_SHIFT); + break; + } + case NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_3D: + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, 0, NVT_HDMI_VS_BYTE5_HDMI_RSVD_MASK, NVT_HDMI_VS_BYTE5_HDMI_RSVD_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, pCtrl->ThreeDStruc, NVT_HDMI_VS_BYTE5_HDMI_3DS_MASK, NVT_HDMI_VS_BYTE5_HDMI_3DS_SHIFT); + + // side by side half requires additional format data in the infoframe. + if (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF == pCtrl->ThreeDStruc) + { + nvt_nvu8_set_bits(pInfoFrame->Data.optionalBytes[optIdx], pCtrl->ThreeDDetail, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_MASK, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SHIFT); + optIdx++; + } + if (pCtrl->MetadataPresent) + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_PRES, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT); + + switch(pCtrl->MetadataType) + { + case NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_PARALLAX: + { + if (sizeof(pCtrl->Metadata) >= NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX && + sizeof(pInfoFrame->Data.optionalBytes) - (optIdx + 1) >= NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX) + { + nvt_nvu8_set_bits(pInfoFrame->Data.optionalBytes[optIdx], NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX, NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_MASK, NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->Data.optionalBytes[optIdx], NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_PARALLAX, NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_MASK, NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_SHIFT); + ++optIdx; + + NVMISC_MEMCPY(pCtrl->Metadata, &pInfoFrame->Data.optionalBytes[optIdx], NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX); + optIdx += NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX; + } + else + { + // not enough data in the control struct or not enough room in the infoframe -- BOTH compile time issues!! + // ignore metadata. + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_NOTPRES, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT); + } + break; + } + default: + { + // unrecognised metadata, recover the best we can. + // note -- can not copy whatever is there because type implies length. + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_NOTPRES, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT); + RetCode = NVT_STATUS_ERR; + } + } + + } + else + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_NOTPRES, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT); + } + break; + } + } + // clear last byte of infoframe (reserved per spec). + pInfoFrame->Header.length += optIdx + 1; + for (; optIdx < sizeof(pInfoFrame->Data.optionalBytes); ++optIdx) + { + nvt_nvu8_set_bits(pInfoFrame->Data.optionalBytes[optIdx], NVT_HDMI_VS_BYTENv_RSVD, NVT_HDMI_VS_BYTENv_RSVD_MASK, NVT_HDMI_VS_BYTENv_RSVD_SHIFT); + } + } + return RetCode; +} + +// Construct Extended Metadata Packet Infoframe +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructExtendedMetadataPacketInfoframe( + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL *pCtrl, + NVT_EXTENDED_METADATA_PACKET_INFOFRAME *pInfoFrame) +{ + NVT_STATUS RetCode = NVT_STATUS_SUCCESS; + if (!pCtrl || !pInfoFrame) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // Initialize the infoframe + NVMISC_MEMSET(pInfoFrame, 0, sizeof(*pInfoFrame)); + + // Construct an infoframe to enable or disable HDMI 2.1 VRR + pInfoFrame->Header.type = NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET; + pInfoFrame->Header.firstLast = NVT_EMP_HEADER_FIRST_LAST; + pInfoFrame->Header.sequenceIndex = 0x00; + + nvt_nvu8_set_bits(pInfoFrame->Data.byte1, NVT_HDMI_EMP_BYTE1_VFR_ENABLE, + NVT_HDMI_EMP_BYTE1_VFR_MASK, + NVT_HDMI_EMP_BYTE1_VFR_SHIFT); + + nvt_nvu8_set_bits(pInfoFrame->Data.byte1, NVT_HDMI_EMP_BYTE1_NEW_ENABLE, + NVT_HDMI_EMP_BYTE1_NEW_MASK, + NVT_HDMI_EMP_BYTE1_NEW_SHIFT); + + if (!pCtrl->EnableVRR) + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte1, NVT_HDMI_EMP_BYTE1_END_ENABLE, + NVT_HDMI_EMP_BYTE1_END_MASK, + NVT_HDMI_EMP_BYTE1_END_SHIFT); + } + + nvt_nvu8_set_bits(pInfoFrame->Data.byte3, + NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_SPEC_DEFINED, + NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_MASK, + NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_SHIFT); + + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, 1, + NVT_HDMI_EMP_BYTE5_DATA_SET_TAG_LSB_MASK, + NVT_HDMI_EMP_BYTE5_DATA_SET_TAG_LSB_SHIFT); + + nvt_nvu8_set_bits(pInfoFrame->Data.byte7, (pCtrl->EnableVRR ? 4 : 0), + NVT_HDMI_EMP_BYTE7_DATA_SET_LENGTH_LSB_MASK, + NVT_HDMI_EMP_BYTE7_DATA_SET_LENGTH_LSB_SHIFT); + + if (pCtrl->EnableVRR) + { + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[0], + NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_ENABLE, + NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_MASK, + NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_SHIFT); + } + + if (pCtrl->ITTiming) + { + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[1], + pCtrl->BaseVFP, + NVT_HDMI_EMP_BYTE8_MD1_BASE_VFRONT_MASK, + NVT_HDMI_EMP_BYTE8_MD1_BASE_VFRONT_SHIFT); + + // In HDMI2.1, MD2 bit 2 is set when RB timing is used. + // In HDMI2.1A, MD2 bit 2 is RSVD as 0 + if (pCtrl->version == NVT_EXTENDED_METADATA_PACKET_INFOFRAME_VER_HDMI21) + { + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[2], + pCtrl->ReducedBlanking, + NVT_HDMI_EMP_BYTE8_MD2_RB_MASK, + NVT_HDMI_EMP_BYTE8_MD2_RB_SHIFT); + } + + // MSB for Base Refresh Rate + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[2], + pCtrl->BaseRefreshRate >> 8, + NVT_HDMI_EMP_BYTE8_MD2_BASE_RR_MSB_MASK, + NVT_HDMI_EMP_BYTE8_MD2_BASE_RR_MSB_SHIFT); + + // LSB for Base Refresh Rate + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[3], + pCtrl->BaseRefreshRate, + NVT_HDMI_EMP_BYTE8_MD3_BASE_RR_LSB_MASK, + NVT_HDMI_EMP_BYTE8_MD3_BASE_RR_LSB_SHIFT); + } + + return RetCode; +} + +// Enumerate Psf Timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumNvPsfTiming(NvU32 nvPsfFormat, NVT_TIMING *pT) +{ + if (pT == NULL || nvPsfFormat == 0 || nvPsfFormat > MAX_PSF_FORMAT) + { + return NVT_STATUS_ERR; + } + + *pT = PSF_TIMING[nvPsfFormat - 1]; + + // calculate the pixel clock + pT->pclk = RRx1kToPclk (pT); + + return NVT_STATUS_SUCCESS; +} + +// Set ActiveSpace for HDMI 3D stereo timing +CODE_SEGMENT(PAGE_DD_CODE) +void SetActiveSpaceForHDMI3DStereo(const NVT_TIMING *pTiming, NVT_EXT_TIMING *pExtTiming) +{ + // Note -- this assumes that the Timng is the 2D instance. + NvU16 VBlank; + + // assume no active space to start. + pExtTiming->HDMI3D.VActiveSpace[0] = 0; + pExtTiming->HDMI3D.VActiveSpace[1] = 0; + + if (NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK == pExtTiming->HDMI3D.StereoStructureType) + { + VBlank = pTiming->VTotal - pTiming->VVisible; + if (pTiming->interlaced) + { + //++++ This need to be revisited, not sure when active space 1 & 2 should be different. + // (fortunately, we are not supporting any interlaced packed frame modes yet). + pExtTiming->HDMI3D.VActiveSpace[0] = VBlank + 1; + pExtTiming->HDMI3D.VActiveSpace[1] = VBlank - 1; + } + else + { + pExtTiming->HDMI3D.VActiveSpace[0] = VBlank; + } + } + return; +} + +// Generate HDMI stereo timing from 2D timing +CODE_SEGMENT(PAGE_DD_CODE) +void NvTiming_GetHDMIStereoTimingFrom2DTiming(const NVT_TIMING *pTiming, NvU8 StereoStructureType, NvU8 SideBySideHalfDetail, NVT_EXT_TIMING *pExtTiming) +{ + NvU16 VBlank; + NvU16 HBlank; + + if ((NULL == pTiming) || (NULL == pExtTiming) || (!isHdmi3DStereoType(StereoStructureType))) + { + return; + } + // init the extended timing + NVMISC_MEMSET(pExtTiming, 0, sizeof(NVT_EXT_TIMING)); + + // copy the 2D timing to the 3D timing. + pExtTiming->timing = *pTiming; + + // init the extension w/in the 3D timing + pExtTiming->HDMI3D.StereoStructureType = StereoStructureType; + pExtTiming->HDMI3D.SideBySideHalfDetail = SideBySideHalfDetail; + + + switch(StereoStructureType) + { + case NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK: + { + // calculate VBlank + VBlank = pTiming->VTotal - pTiming->VVisible; + + // Use the 2D timing to calculate the Active Space + SetActiveSpaceForHDMI3DStereo(pTiming, pExtTiming); + + // Calculate the 3D VVisible size based on the 2D VVisible and the active space. + if (pTiming->interlaced) + { + pExtTiming->timing.VVisible = ((pTiming->VVisible * 4) + (pExtTiming->HDMI3D.VActiveSpace[0]) * 2) + pExtTiming->HDMI3D.VActiveSpace[1]; + } + else + { + pExtTiming->timing.VVisible = (pTiming->VVisible * 2) + pExtTiming->HDMI3D.VActiveSpace[0]; + } + // Calculate the 3D VTotal from the 3D VVisible & the VBlank. + pExtTiming->timing.VTotal = pExtTiming->timing.VVisible + VBlank; + + pExtTiming->timing.etc.status = NVT_SET_TIMING_STATUS_TYPE(pExtTiming->timing.etc.status, NVT_TYPE_HDMI_STEREO); + + break; + } + case NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEFULL: + { + // calculate HBlank before calculating new HVisible + HBlank = pTiming->HTotal - pTiming->HVisible; + + pExtTiming->timing.HVisible = pTiming->HVisible * 2; + + pExtTiming->timing.HTotal = pExtTiming->timing.HVisible + HBlank; + + pExtTiming->timing.etc.status = NVT_SET_TIMING_STATUS_TYPE(pExtTiming->timing.etc.status, NVT_TYPE_HDMI_STEREO); + + break; + } + case NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF: // valid formats with no timing changes. + case NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM: + { + break; + } + case NVT_HDMI_VS_BYTE5_HDMI_3DS_FIELD_ALT: // formats we are not supporting. + case NVT_HDMI_VS_BYTE5_HDMI_3DS_LINE_ALT: + case NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTH: + case NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTHGFX: + { + break; + } + } + // calculate the pixel clock + pExtTiming->timing.pclk = RRx1kToPclk (&(pExtTiming->timing)); + return; +} + +// Add mode to 3D stereo support map +CODE_SEGMENT(PAGE_DD_CODE) +void AddModeToSupportMap(HDMI3DSUPPORTMAP * pMap, NvU8 Vic, NvU8 StereoStructureType, NvU8 SideBySideHalfDetail) +{ + NvU32 i; + + if (0 < Vic) + { + // first check if the vic is already listed. + for (i = 0; i < pMap->total; ++i) + { + if (pMap->map[i].Vic == Vic) + { + break; + } + } + if (i == pMap->total) + { + // vic is not in the map. + // add it. + // note that we can't add the VIC to one of the 1st 16 entries. + // 1st 16 entries in the map are reserved for the vics from the EDID. + // if we add this VIC to the 1st 16, & there are any optional modes listed, + // the optional mode(s) will be improperly applied to this VIC as well + i = MAX(MAX_EDID_ADDRESSABLE_3D_VICS, pMap->total); + if (i < MAX_3D_VICS_SUPPORTED) + { + pMap->map[i].Vic = Vic; + pMap->total = i + 1; + } + } + nvt_assert(pMap->total <= MAX_3D_VICS_SUPPORTED); + if (i < pMap->total) + { + pMap->map[i].StereoStructureMask = pMap->map[i].StereoStructureMask | NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(StereoStructureType); + if (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF == StereoStructureType) + { + pMap->map[i].SideBySideHalfDetail = SideBySideHalfDetail; + } + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidHdmiLlcBasicInfo(VSDB_DATA *pVsdb, NVT_HDMI_LLC_INFO *pHdmiLlc) +{ + NVT_HDMI_LLC_VSDB_PAYLOAD *p; + if (pVsdb == NULL || pHdmiLlc == NULL) + { + return; + } + + p = (NVT_HDMI_LLC_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + + // Minimum vendor_data_size is 2 + pHdmiLlc->addrA = p->A; + pHdmiLlc->addrB = p->B; + pHdmiLlc->addrC = p->C; + pHdmiLlc->addrD = p->D; + + // If more data is provided, we read it as well each field at a time up to video latency + if (pVsdb->vendor_data_size >= 3) + { + pHdmiLlc->supports_AI = p->Supports_AI; + pHdmiLlc->dc_48_bit = p->DC_48bit; + pHdmiLlc->dc_36_bit = p->DC_36bit; + pHdmiLlc->dc_30_bit = p->DC_30bit; + pHdmiLlc->dc_y444 = p->DC_Y444; + pHdmiLlc->dual_dvi = p->DVI_Dual; + + if (pVsdb->vendor_data_size >= 4) + { + pHdmiLlc->max_tmds_clock = p->Max_TMDS_Clock; + + if (pVsdb->vendor_data_size >= 5) + { + pHdmiLlc->latency_field_present = p->Latency_Fields_Present; + pHdmiLlc->i_latency_field_present = p->I_Latency_Fields_Present; + pHdmiLlc->hdmi_video_present = p->HDMI_Video_present; + pHdmiLlc->cnc3 = p->CNC3; + pHdmiLlc->cnc2 = p->CNC2; + pHdmiLlc->cnc1 = p->CNC1; + pHdmiLlc->cnc0 = p->CNC0; + } + } + } + +} + +// get HDMI 1.4 specific timing (3D stereo timings and extended mode timings) +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidHDMILLCTiming(NVT_EDID_INFO *pInfo, VSDB_DATA *pVsdb, NvU32 *pMapSz, HDMI3DSUPPORTMAP * pM) +{ + NVT_HDMI_LLC_VSDB_PAYLOAD *pHdmiLLC; + NVT_HDMI_VIDEO *pHDMIVideo; + NvU32 DataCnt = 0; + NvU32 DataSz; + NvU16 i, j, k; + NvU16 Supports50Hz; + NvU16 Supports60Hz; + NvU32 vendorDataSize; + + if ((NULL == pInfo) || (NULL == pVsdb) || (NULL == pM)) + { + return; + } + + // init the support map + NVMISC_MEMSET(pM, 0, sizeof(HDMI3DSUPPORTMAP)); + Supports50Hz = 0; + Supports60Hz = 0; + + nvt_assert(pInfo->total_timings <= COUNT(pInfo->timing)); + + for (i = 0; i < pInfo->total_timings; ++i) + { + if (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_EDID_861ST) + { + if (MAX_EDID_ADDRESSABLE_3D_VICS > pM->total) + { + // fill in the VICs from the EDID (up to the 1st 16). These are used for applying any 3D optional modes listed in the LLC + // -- the optional modes are addressed based on their relative location within the EDID. + pM->map[pM->total].Vic = (NvU8) NVT_GET_TIMING_STATUS_SEQ(pInfo->timing[i].etc.status); + ++pM->total; + } + + // since we are spinning through the timing array anyway, + // check to see which refresh rates are supported. + if (50 == pInfo->timing[i].etc.rr) + { + Supports50Hz = 1; + } + else if (60 == pInfo->timing[i].etc.rr) + { + Supports60Hz = 1; + } + } + } + + if (0 == pM->total) + { + if (NULL != pMapSz) + { + *pMapSz = 0; + } + } + + vendorDataSize = pVsdb->vendor_data_size; + if ((NVT_CEA861_HDMI_IEEE_ID == pVsdb->ieee_id) && + (offsetof(NVT_HDMI_LLC_VSDB_PAYLOAD, Data) < vendorDataSize)) + { + pHdmiLLC = (NVT_HDMI_LLC_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + DataSz = (NvU32) MIN(vendorDataSize - offsetof(NVT_HDMI_LLC_VSDB_PAYLOAD, Data), sizeof(pHdmiLLC->Data)); + + if (5 <= vendorDataSize) + { + if (pHdmiLLC->Latency_Fields_Present) + { + DataCnt += (NvU32) sizeof(NVT_CEA861_LATENCY); + + if (pHdmiLLC->I_Latency_Fields_Present) + { + DataCnt += (NvU32) sizeof(NVT_CEA861_LATENCY); + } + } + + if ((pHdmiLLC->HDMI_Video_present) && + (DataSz > DataCnt) && + (DataSz - DataCnt >= sizeof(NVT_HDMI_VIDEO))) + { + pHDMIVideo = (NVT_HDMI_VIDEO *) &pHdmiLLC->Data[DataCnt]; + DataCnt += (NvU32) sizeof(NVT_HDMI_VIDEO); + + // If 3D is present, then add the basic 3D modes 1st. + if (pHDMIVideo->ThreeD_Present) + { + if ((0 != Supports50Hz) || (0 != Supports60Hz)) + { + // 50 and / or 60 Hz is supported, add 1920 x 1080 @ 24Hz 3D modes. + AddModeToSupportMap(pM, 32, NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK, 0); // 1920 x 1080p @ 24 Hz + AddModeToSupportMap(pM, 32, NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM, 0); // 1920 x 1080p @ 24 Hz + + if (0 != Supports50Hz) + { + // add the mandatory modes for 50 Hz + AddModeToSupportMap(pM, 19, NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK, 0); // 1280 x 720p @ 50 Hz + AddModeToSupportMap(pM, 19, NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM, 0); // 1280 x 720p @ 50 Hz + // 1920 x 1080i @ 50 Hz + AddModeToSupportMap(pM, 20, NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH); + } + + if (0 != Supports60Hz) + { + // add the mandatory modes for 60 Hz + AddModeToSupportMap(pM, 4, NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK, 0); // 1280 x 720p @ 60 Hz + AddModeToSupportMap(pM, 4, NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM, 0); // 1280 x 720p @ 60 Hz + // 1920 x 1080i @ 60 Hz + AddModeToSupportMap(pM, 5, NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH); + } + } + } + + if ((DataSz > DataCnt) && + (DataSz - DataCnt >= pHDMIVideo->HDMI_VIC_Len)) + { + // handle HDMI VIC entries to add HDMI 1.4a 4kx2k extended modes + NVT_HDMI_VIC_LIST * pVicList = (NVT_HDMI_VIC_LIST *) &pHdmiLLC->Data[DataCnt]; + + for ( k = 0; k < pHDMIVideo->HDMI_VIC_Len; ++k) + { + NVT_TIMING newTiming; + + // extended mode VIC code from 1 - 4. + if ((0 < pVicList->HDMI_VIC[k]) && (pVicList->HDMI_VIC[k] <= MAX_HDMI_EXT_4Kx2K_FORMAT)) + { + NVMISC_MEMCPY(&newTiming, + &HDMI_EXT_4Kx2K_TIMING[pVicList->HDMI_VIC[k] - 1], + sizeof(newTiming)); + + // Fill in the pixel clock + newTiming.pclk = RRx1kToPclk(&newTiming); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + } + + DataCnt += pHDMIVideo->HDMI_VIC_Len; + } + + // the following code implements parsing the HDMI 3D additional modes (all modes bitmap & additional vic modes) + // Kepler and above support 3D secondary modes + if ((pHDMIVideo->ThreeD_Present) && + ((1 == pHDMIVideo->ThreeD_Multi_Present) || (2 == pHDMIVideo->ThreeD_Multi_Present)) && + (0 < pHDMIVideo->HDMI_3D_Len) && + (DataSz > (DataCnt + 1)) && //make sure pHdmiLLC->Data[DataCnt + 1] is valid + (DataSz - DataCnt >= pHDMIVideo->HDMI_3D_Len)) + { + NvU16 AllVicStructMask; + NvU16 AllVicIdxMask; + NvU8 AllVicDetail; + + // determine which modes to apply to all VICs. + AllVicStructMask = (pHdmiLLC->Data[DataCnt] << 8) | pHdmiLLC->Data[DataCnt + 1]; + AllVicStructMask = AllVicStructMask & NVT_ALL_HDMI_3D_STRUCT_SUPPORTED_MASK; + DataCnt += 2; + + if ((2 == pHDMIVideo->ThreeD_Multi_Present) && (DataSz > (DataCnt+1))) //make sure pHdmiLLC->Data[DataCnt + 1] is valid + { + AllVicIdxMask = pHdmiLLC->Data[DataCnt] << 8 | pHdmiLLC->Data[DataCnt + 1]; + DataCnt += 2; + } + else + { + AllVicIdxMask = 0xffff; + } + + // determine what the detail should be. + AllVicDetail = 0 != (AllVicStructMask & NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK) ? NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH : 0; + + // add the modes to the Support map for all the listed VICs. + for (k = 0; k < MIN(MAX_EDID_ADDRESSABLE_3D_VICS, pM->total); ++k) + { + if ((0 != (AllVicIdxMask & (1 << k))) && (0 != pM->map[k].Vic)) + { + pM->map[k].StereoStructureMask = pM->map[k].StereoStructureMask | AllVicStructMask; + pM->map[k].SideBySideHalfDetail = AllVicDetail; + } + } + } + + // handle any additional per vic modes listed in the EDID + while (DataSz > DataCnt) + { + // get a pointer to the entry. + NVT_3D_MULTI_LIST * pMultiListEntry = (NVT_3D_MULTI_LIST *) &pHdmiLLC->Data[DataCnt]; + + // apply the specified structure to the Support Map + pM->map[pMultiListEntry->TwoD_VIC_order].StereoStructureMask = + pM->map[pMultiListEntry->TwoD_VIC_order].StereoStructureMask | NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(pMultiListEntry->ThreeD_Structure); + + // increment the Data count by 2 if this is side by side half, + // or 1 if it is any other structure. + if (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF <= pMultiListEntry->ThreeD_Structure) + { + pM->map[pMultiListEntry->TwoD_VIC_order].SideBySideHalfDetail = pMultiListEntry->ThreeD_Detail; + DataCnt += 2; + } + else + { + pM->map[pMultiListEntry->TwoD_VIC_order].SideBySideHalfDetail = 0; + DataCnt += 1; + } + } + } + } + } + + + // compress out entries where there is no 3D support. + for (i = 0, j = 0; i < pM->total; ++i) + { + if (0 != pM->map[i].StereoStructureMask) + { + pM->map[j] = pM->map[i]; + ++j; + } + } + + pM->total = j; + + if (NULL != pMapSz) + { + *pMapSz = pM->total; + } +} + +// get HDMI 1.4 3D mandatory stereo format datail base on the input vic. +// If the vic is not in the mandatory format list, return error. +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetHDMIStereoMandatoryFormatDetail(const NvU8 vic, NvU16 *pStereoStructureMask, NvU8 *pSideBySideHalfDetail) +{ + NvU32 i; + + if ((vic < 1) || (vic > MAX_CEA861B_FORMAT)) + { + return NVT_STATUS_ERR; + } + + for (i = 0; i < MAX_HDMI_MANDATORY_3D_FORMAT; i++) + { + if (vic == HDMI_MANDATORY_3D_FORMATS[i].Vic) + { + if (pStereoStructureMask != NULL) + { + *pStereoStructureMask = HDMI_MANDATORY_3D_FORMATS[i].StereoStructureMask; + } + + if (pSideBySideHalfDetail != NULL) + { + *pSideBySideHalfDetail = HDMI_MANDATORY_3D_FORMATS[i].SideBySideHalfDetail; + } + + return NVT_STATUS_SUCCESS; + } + } + + return NVT_STATUS_ERR; +} +// return the aspect ratio of a given CEA/EIA 861 timing +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 getCEA861TimingAspectRatio(NvU32 vic) +{ + return (vic > 0 && vic < MAX_CEA861B_FORMAT + 1) ? EIA861B[vic-1].etc.aspect : 0; +} + +// expose the HDMI extended video timing defined by the HDMI LLC VSDB +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumHdmiVsdbExtendedTiming(NvU32 hdmi_vic, NVT_TIMING *pT) +{ + if (hdmi_vic > MAX_HDMI_EXT_4Kx2K_FORMAT || hdmi_vic == 0 || pT == NULL) + { + return NVT_STATUS_ERR; + } + *pT = HDMI_EXT_4Kx2K_TIMING[hdmi_vic - 1]; + pT->pclk = RRx1kToPclk(pT); + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidNvidiaVSDBBlock(VSDB_DATA *pVsdb, NVDA_VSDB_PARSED_INFO *vsdbInfo) +{ + NVT_NVDA_VSDB_PAYLOAD *pNvda; + + if ((pVsdb == NULL) || (vsdbInfo == NULL)) + { + return; + } + + if ((NVT_CEA861_NVDA_IEEE_ID == pVsdb->ieee_id) && + (pVsdb->vendor_data_size >= sizeof(NVT_NVDA_VSDB_PAYLOAD))) + { + pNvda = (NVT_NVDA_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + + // only version 0x1 is supported + if (pNvda->opcode == 0x1) + { + vsdbInfo->vsdbVersion = pNvda->opcode; + } + + switch (vsdbInfo->vsdbVersion) + { + case 1: + vsdbInfo->valid = NV_TRUE; + vsdbInfo->vrrData.v1.supportsVrr = NV_TRUE; + vsdbInfo->vrrData.v1.minRefreshRate = pNvda->vrrMinRefreshRate; + break; + + default: + break; + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidMsftVsdbBlock(VSDB_DATA *pVsdb, MSFT_VSDB_PARSED_INFO *pVsdbInfo) +{ + if ((pVsdb == NULL) || (pVsdbInfo == NULL)) + { + return; + } + + NVMISC_MEMSET(pVsdbInfo, 0, sizeof(MSFT_VSDB_PARSED_INFO)); + + if ((NVT_CEA861_MSFT_IEEE_ID == pVsdb->ieee_id) && + (pVsdb->vendor_data_size >= sizeof(NVT_MSFT_VSDB_PAYLOAD))) + { + NvU32 i = 0; + NVT_MSFT_VSDB_PAYLOAD *pMsftVsdbPayload = (NVT_MSFT_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + + pVsdbInfo->version = pMsftVsdbPayload->version; + + if (pVsdbInfo->version >= 1) + { + for (i = 0; i < MSFT_VSDB_CONTAINER_ID_SIZE; i++) + { + pVsdbInfo->containerId[i] = pMsftVsdbPayload->containerId[i]; + } + + pVsdbInfo->desktopUsage = pMsftVsdbPayload->desktopUsage; + pVsdbInfo->thirdPartyUsage = pMsftVsdbPayload->thirdPartyUsage; + pVsdbInfo->valid = NV_TRUE; + } + // Version 3 is the latest version of MSFT VSDB at the time of writing this code + // Any update from newer version will be ignored and be parsed as Version 3, till + // we have an explicit handling for newer version here. + if (pVsdbInfo->version >= 3) + { + // Primary Use case is valid from Version 3 and is ignored on previous versions. + pVsdbInfo->primaryUseCase = pMsftVsdbPayload->primaryUseCase; + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidHdmiForumVSDB(VSDB_DATA *pVsdb, NVT_HDMI_FORUM_INFO *pHdmiInfo) +{ + NVT_HDMI_FORUM_VSDB_PAYLOAD *pHdmiForum; + NvU32 remainingSize; + + if ((pVsdb == NULL) || pHdmiInfo == NULL) + { + return; + } + + pHdmiForum = (NVT_HDMI_FORUM_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + switch(pHdmiForum->Version) + { + case 1: + // From HDMI spec the payload data size is from 7 to 31 + // In parseCta861DataBlockInfo(), the payload size recorded in pHdmiForum is + // subtracted by 3. Thus the expected range here is 4 - 28. + // Assert if the the vendor_data_size < 4. + nvt_assert(pVsdb->vendor_data_size >= 4); + + remainingSize = pVsdb->vendor_data_size; + + // second byte + pHdmiInfo->max_TMDS_char_rate = pHdmiForum->Max_TMDS_Character_Rate; + + // third byte + pHdmiInfo->threeD_Osd_Disparity = pHdmiForum->ThreeD_Osd_Disparity; + pHdmiInfo->dual_view = pHdmiForum->Dual_View; + pHdmiInfo->independent_View = pHdmiForum->Independent_View; + pHdmiInfo->lte_340Mcsc_scramble = pHdmiForum->Lte_340mcsc_Scramble; + pHdmiInfo->ccbpci = pHdmiForum->CCBPCI; + pHdmiInfo->cable_status = pHdmiForum->CABLE_STATUS; + pHdmiInfo->rr_capable = pHdmiForum->RR_Capable; + pHdmiInfo->scdc_present = pHdmiForum->SCDC_Present; + + // fourth byte + pHdmiInfo->dc_30bit_420 = pHdmiForum->DC_30bit_420; + pHdmiInfo->dc_36bit_420 = pHdmiForum->DC_36bit_420; + pHdmiInfo->dc_48bit_420 = pHdmiForum->DC_48bit_420; + pHdmiInfo->uhd_vic = pHdmiForum->UHD_VIC; + pHdmiInfo->max_FRL_Rate = pHdmiForum->Max_FRL_Rate; + + remainingSize -= 4; + + // fifth byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->fapa_start_location = pHdmiForum->FAPA_start_location; + pHdmiInfo->allm = pHdmiForum->ALLM; + pHdmiInfo->fva = pHdmiForum->FVA; + pHdmiInfo->cnmvrr = pHdmiForum->CNMVRR; + pHdmiInfo->cinemaVrr = pHdmiForum->CinemaVRR; + pHdmiInfo->m_delta = pHdmiForum->M_delta; + pHdmiInfo->fapa_end_extended = pHdmiForum->FAPA_End_Extended; + + // sixth byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->vrr_min = pHdmiForum->VRR_min; + pHdmiInfo->vrr_max = ((NvU16)pHdmiForum->VRR_max_high) << 8; + + // seventh byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->vrr_max |= (pHdmiForum->VRR_max_low); + + // eighth byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->dsc_10bpc = pHdmiForum->DSC_10bpc; + pHdmiInfo->dsc_12bpc = pHdmiForum->DSC_12bpc; + pHdmiInfo->dsc_16bpc = pHdmiForum->DSC_16bpc; + pHdmiInfo->dsc_All_bpp = pHdmiForum->DSC_All_bpp; + pHdmiInfo->dsc_Native_420 = pHdmiForum->DSC_Native_420; + pHdmiInfo->dsc_1p2 = pHdmiForum->DSC_1p2; + + // ninth byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->dsc_MaxSlices = 0; + pHdmiInfo->dsc_MaxPclkPerSliceMHz = 0; + switch(pHdmiForum->DSC_MaxSlices) + { + case 7: pHdmiInfo->dsc_MaxSlices = 16; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 400; break; + case 6: pHdmiInfo->dsc_MaxSlices = 12; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 400; break; + case 5: pHdmiInfo->dsc_MaxSlices = 8; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 400; break; + case 4: pHdmiInfo->dsc_MaxSlices = 8; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 340; break; + case 3: pHdmiInfo->dsc_MaxSlices = 4; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 340; break; + case 2: pHdmiInfo->dsc_MaxSlices = 2; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 340; break; + case 1: pHdmiInfo->dsc_MaxSlices = 1; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 340; break; + default: break; + } + + pHdmiInfo->dsc_Max_FRL_Rate = pHdmiForum->DSC_Max_FRL_Rate; + + // tenth byte + if (!remainingSize--) + { + break; + } + + // Per spec, number of bytes has to be computed as 1024 x (1 + DSC_TotalChunkKBytes). + // For driver parser purposes, add 1 here so that the field means max num of KBytes in a link of chunks + pHdmiInfo->dsc_totalChunkKBytes = (pHdmiForum->DSC_totalChunkKBytes == 0) ? 0 : pHdmiForum->DSC_totalChunkKBytes + 1; + break; + + default: + break; + + } + +} + +POP_SEGMENTS diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid.c new file mode 100644 index 0000000..bf9f398 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid.c @@ -0,0 +1,1437 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_edidext_displayid.c +// +// Purpose: the provide edid related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "edid.h" + +PUSH_SEGMENTS + +static NVT_STATUS parseDisplayIdSection(DISPLAYID_SECTION * section, + NvU32 max_length, + NVT_EDID_INFO *pEdidInfo); + +// Specific blocks that can be parsed based on DisplayID +static NVT_STATUS parseDisplayIdProdIdentityBlock(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdParam(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdColorChar(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdTiming1(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTiming2(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTiming3(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTiming4(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTiming5(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTimingVesa(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTimingEIA(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdRangeLimits(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdSerialNumber(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdAsciiString(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdDeviceData(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdInterfacePower(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdTransferChar(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdDisplayInterface(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdStereo(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdTiledDisplay(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdCtaData(NvU8 * block, NVT_EDID_INFO *pInfo); +static NVT_STATUS parseDisplayIdDisplayInterfaceFeatures(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); + +static NVT_STATUS parseDisplayIdTiming1Descriptor(DISPLAYID_TIMING_1_DESCRIPTOR * desc, NVT_TIMING *pT); +static NVT_STATUS parseDisplayIdTiming2Descriptor(DISPLAYID_TIMING_2_DESCRIPTOR * desc, NVT_TIMING *pT); +static NVT_STATUS parseDisplayIdTiming3Descriptor(DISPLAYID_TIMING_3_DESCRIPTOR * desc, NVT_TIMING *pT); +static NVT_STATUS parseDisplayIdTiming5Descriptor(DISPLAYID_TIMING_5_DESCRIPTOR * desc, NVT_TIMING *pT); + +/** + * @brief Parses a displayID Extension block, with timings stored in pT and + * other info stored in pInfo + * @param p The EDID Extension Block (With a DisplayID in it) + * @param size Size of the displayID Extension Block + * @param pEdidInfo EDID struct containing DisplayID information and + * the timings + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS getDisplayIdEDIDExtInfo(NvU8 *p, NvU32 size, + NVT_EDID_INFO *pEdidInfo) +{ + DISPLAYID_SECTION * section; + + if (p == NULL || size < sizeof(EDIDV1STRUC)) + return NVT_STATUS_ERR; + if (p[0] != NVT_EDID_EXTENSION_DISPLAYID) + return NVT_STATUS_ERR; + + section = (DISPLAYID_SECTION *)(p + 1); + pEdidInfo->ext_displayid.version = section->version; + if (section->product_type > NVT_DISPLAYID_PROD_MAX_NUMBER) + return NVT_STATUS_ERR; + + return parseDisplayIdSection(section, sizeof(EDIDV1STRUC) - 1, pEdidInfo); +} + +/** + * @brief updates the color format for each bpc for each timing + * @param pInfo EDID struct containing DisplayID information and + * the timings + * @param timingIdx Index of the first display ID timing in the + * pInfo->timing[] timing array. + */ +CODE_SEGMENT(PAGE_DD_CODE) +void updateColorFormatForDisplayIdExtnTimings(NVT_EDID_INFO *pInfo, + NvU32 timingIdx) +{ + // pDisplayIdInfo is the parsed display ID info + NVT_DISPLAYID_INFO *pDisplayIdInfo = &pInfo->ext_displayid; + NVT_TIMING *pT = &pInfo->timing[timingIdx]; + + nvt_assert((timingIdx) <= COUNT(pInfo->timing)); + + if ((pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_A_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_B_SUPPORTED || + pInfo->ext861.valid.H14B_VSDB || pInfo->ext861.valid.H20_HF_VSDB) && pInfo->ext861.revision >= NVT_CEA861_REV_A) + { + if (!pInfo->ext_displayid.supported_displayId2_0) + { + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 0, + 1, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_10b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_12b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_14b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_16b); + } + else + { + // rgb444 (always support 6bpc and 8bpc as per DP spec 5.1.1.1.1 RGB Colorimetry) + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 0, + 1, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_16b); + } + } + else // DP + { + if (!pInfo->ext_displayid.supported_displayId2_0) + { + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 1, + 1, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_10b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_12b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_14b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_16b); + } + else + { + // rgb444 (always support 6bpc and 8bpc as per DP spec 5.1.1.1.1 RGB Colorimetry) + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 1, + 1, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_16b); + } + } + + if (!pInfo->ext_displayid.supported_displayId2_0) + { + // yuv444 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv444, 0, /* yuv444 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_8b, + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_10b, + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_12b, + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_14b, + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_16b); + // yuv422 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv422, 0, /* yuv422 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_8b, + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_10b, + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_12b, + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_14b, + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_16b); + } + else + { + // yuv444 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv444, 0, /* yuv444 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_8b, + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_16b); + // yuv422 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv422, 0, /* yuv422 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_8b, + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_16b); + // yuv420 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv420, 0, /* yuv420 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_8b, + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_16b); + } +} + +/** + * @brief Parses a displayID Section + * @param section The DisplayID Section to parse + * @param max_length The indicated total length of the displayID as given (or + * sizeof(EDIDV1STRUCT) for an extension block) + * @param pEdidInfo EDID struct containing DisplayID information and + * the timings + */ +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdSection(DISPLAYID_SECTION * section, + NvU32 max_length, + NVT_EDID_INFO *pEdidInfo) +{ + NvU8 block_location = 0; + NvU8 section_length; + NvU8 remaining_length; + + if (section == NULL || max_length <= NVT_DISPLAYID_SECTION_HEADER_LEN) + return NVT_STATUS_ERR; + if (section->section_bytes > max_length - NVT_DISPLAYID_SECTION_HEADER_LEN) + return NVT_STATUS_ERR; + + remaining_length = section->section_bytes; + + while (block_location < section->section_bytes) + { + DISPLAYID_DATA_BLOCK_HEADER * hdr = (DISPLAYID_DATA_BLOCK_HEADER *) (section->data + block_location); + NvBool is_prod_id = remaining_length > 3 && block_location == 0 && hdr->type == 0 && hdr->data_bytes > 0; + NvU8 i; + + // Check the padding. + if (hdr->type == 0 && !is_prod_id) + { + for (i = 1 ; i < remaining_length; i++) + { + // All remaining bytes must all be 0. + if (section->data[block_location + i] != 0) + { + return NVT_STATUS_ERR; + } + } + + section_length = remaining_length; + } + else + { + if (parseDisplayIdBlock((NvU8 *)(section->data + block_location), + section->section_bytes - block_location, + §ion_length, + pEdidInfo) != NVT_STATUS_SUCCESS) + return NVT_STATUS_ERR; + } + + block_location += section_length; + remaining_length -= section_length; + } + + return NVT_STATUS_SUCCESS; +} + +/** + * @brief Parses a displayID data block + * @param block The DisplayID data block to parse + * @param max_length The indicated total length of the each data block for checking + * @param pLength return the indicated length of the each data block + * @param pEdidInfo EDID struct containing DisplayID information and + * the timings or validation purpose if it is NULL + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS parseDisplayIdBlock(NvU8* pBlock, + NvU8 max_length, + NvU8* pLength, + NVT_EDID_INFO *pEdidInfo) +{ + DISPLAYID_DATA_BLOCK_HEADER * hdr = (DISPLAYID_DATA_BLOCK_HEADER *) pBlock; + NVT_STATUS ret = NVT_STATUS_SUCCESS; + NVT_DISPLAYID_INFO *pInfo; + + if (pBlock == NULL || max_length <= NVT_DISPLAYID_DATABLOCK_HEADER_LEN) + return NVT_STATUS_ERR; + + if (hdr->data_bytes > max_length - NVT_DISPLAYID_DATABLOCK_HEADER_LEN) + return NVT_STATUS_ERR; + + pInfo = pEdidInfo == NULL ? NULL : &pEdidInfo->ext_displayid; + + *pLength = hdr->data_bytes + NVT_DISPLAYID_DATABLOCK_HEADER_LEN; + + switch (hdr->type) + { + case NVT_DISPLAYID_BLOCK_TYPE_PRODUCT_IDENTITY: + ret = parseDisplayIdProdIdentityBlock(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_PARAM: + ret = parseDisplayIdParam(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_COLOR_CHAR: + ret = parseDisplayIdColorChar(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_1: + ret = parseDisplayIdTiming1(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_2: + ret = parseDisplayIdTiming2(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_3: + ret = parseDisplayIdTiming3(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_4: + ret = parseDisplayIdTiming4(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_5: + ret = parseDisplayIdTiming5(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_VESA: + ret = parseDisplayIdTimingVesa(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_CEA: + ret = parseDisplayIdTimingEIA(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_RANGE_LIMITS: + ret = parseDisplayIdRangeLimits(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_SERIAL_NUMBER: + ret = parseDisplayIdSerialNumber(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_ASCII_STRING: + ret = parseDisplayIdAsciiString(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_DEVICE_DATA: + ret = parseDisplayIdDeviceData(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_INTERFACE_POWER: + ret = parseDisplayIdInterfacePower(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TRANSFER_CHAR: + ret = parseDisplayIdTransferChar(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_INTERFACE: + ret = parseDisplayIdDisplayInterface(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_STEREO: + ret = parseDisplayIdStereo(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TILEDDISPLAY: + ret = parseDisplayIdTiledDisplay(pBlock, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_CTA_DATA: + ret = parseDisplayIdCtaData(pBlock, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_INTERFACE_FEATURES: + ret = parseDisplayIdDisplayInterfaceFeatures(pBlock, pInfo); + break; + default: + ret = NVT_STATUS_ERR; + break; + } + + if (pEdidInfo == NULL) return ret; + + return NVT_STATUS_SUCCESS; +} +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdColorChar(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + NvU32 i, j; + NvU16 x_p, y_p; + DISPLAYID_COLOR_CHAR_BLOCK * blk = (DISPLAYID_COLOR_CHAR_BLOCK *)block; + + /** unused flag - uncomment if you wish to use it in the future + NvU8 isTemp = DRF_VAL(T_DISPLAYID, _COLOR, _TEMPORAL, blk->point_info); + */ + NvU8 wp_num = DRF_VAL(T_DISPLAYID, _COLOR, _WHITE_POINTS, blk->point_info); + NvU8 prim_num = DRF_VAL(T_DISPLAYID, _COLOR, _PRIMARIES, blk->point_info); + + if ((prim_num + wp_num) * sizeof(DISPLAYID_COLOR_POINT) + 1 != blk->header.data_bytes) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + for (i = 0; i < prim_num; i++) + { + x_p = (blk->points)[i].color_x_bits_low + + (DRF_VAL(T_DISPLAYID, _COLOR, _POINT_X, (blk->points)[i].color_bits_mid) << 8); + y_p = DRF_VAL(T_DISPLAYID, _COLOR, _POINT_Y, (blk->points)[i].color_bits_mid) + + ((blk->points)[i].color_y_bits_high << 4); + pInfo->primaries[i].x = x_p; + pInfo->primaries[i].y = y_p; + } + + for (j = 0; j < wp_num; j++) + { + x_p = (blk->points)[i].color_x_bits_low + + (DRF_VAL(T_DISPLAYID, _COLOR, _POINT_X, (blk->points)[i].color_bits_mid) << 8); + y_p = DRF_VAL(T_DISPLAYID, _COLOR, _POINT_Y, (blk->points)[i].color_bits_mid) + + ((blk->points)[i].color_y_bits_high << 4); + pInfo->white_points[pInfo->total_primaries + j].x = x_p; + pInfo->white_points[pInfo->total_primaries + j].y = y_p; + + i++; + } + pInfo->total_primaries = prim_num; + pInfo->total_white_points += wp_num; + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdProdIdentityBlock(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_PROD_IDENTIFICATION_BLOCK * blk = (DISPLAYID_PROD_IDENTIFICATION_BLOCK *)block; + if (blk->header.data_bytes - blk->productid_string_size != NVT_DISPLAYID_PRODUCT_IDENTITY_MIN_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + pInfo->vendor_id = (blk->vendor)[2] | ((blk->vendor)[1] << 8) | ((blk->vendor)[0] << 16); + pInfo->product_id = blk->product_code; + pInfo->serial_number = blk->serial_number; + pInfo->week = blk->model_tag; + pInfo->year = blk->model_year; + + if (blk->productid_string_size != 0) + NVMISC_STRNCPY((char *)pInfo->product_string, (const char *)blk->productid_string, blk->productid_string_size); + pInfo->product_string[blk->productid_string_size] = '\0'; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdParam(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_DISPLAY_PARAM_BLOCK * blk = (DISPLAYID_DISPLAY_PARAM_BLOCK *)block; + if (blk->header.data_bytes != NVT_DISPLAYID_DISPLAY_PARAM_BLOCK_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + pInfo->horiz_size = blk->horizontal_image_size; + pInfo->vert_size = blk->vertical_image_size; + pInfo->horiz_pixels = blk->horizontal_pixel_count; + pInfo->vert_pixels = blk->vertical_pixel_count; + + pInfo->support_audio = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _SUPPORT_AUDIO, blk->feature); + pInfo->separate_audio = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _SEPARATE_AUDIO, blk->feature); + pInfo->audio_override = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _AUDIO_INPUT_OVERRIDE, blk->feature); + pInfo->power_management = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _POWER_MANAGEMENT, blk->feature); + pInfo->fixed_timing = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _FIXED_TIMING, blk->feature); + pInfo->fixed_pixel_format = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _FIXED_PIXEL_FORMAT, blk->feature); + pInfo->deinterlace = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _DEINTERLACING, blk->feature); + + pInfo->gamma = (NvU16)(blk->transfer_char_gamma - 1) * 100; + pInfo->aspect_ratio = blk->aspect_ratio; + + pInfo->depth_overall = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _DEPTH_OVERALL, blk->color_bit_depth); + pInfo->depth_native = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _DEPTH_NATIVE, blk->color_bit_depth); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming1(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + NVT_TIMING newTiming; + DISPLAYID_TIMING_1_BLOCK * blk = (DISPLAYID_TIMING_1_BLOCK *)block; + if (blk->header.data_bytes % sizeof(DISPLAYID_TIMING_1_DESCRIPTOR) != 0) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + for (i = 0; i * sizeof(DISPLAYID_TIMING_1_DESCRIPTOR) < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayIdTiming1Descriptor(blk->descriptors + i, + &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + return NVT_STATUS_SUCCESS; +} +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming1Descriptor(DISPLAYID_TIMING_1_DESCRIPTOR * type1, NVT_TIMING *pT) +{ + NvU32 totalPixels_in_2_fields; + if (type1 == NULL || pT == NULL) + return NVT_STATUS_ERR; + + // the pixel clock + pT->pclk = (NvU32)((type1->pixel_clock_high << 16) + (type1->pixel_clock_mid << 8) + type1->pixel_clock_low_minus_0_01MHz + 1); + + // the DisplayID spec does not support border + pT->HBorder = pT->VBorder = 0; + + // get horizontal timing parameters + pT->HVisible = (NvU16)((type1->horizontal.active_image_pixels_high << 8) + type1->horizontal.active_image_pixels_low_minus_1 + 1); + pT->HTotal = (NvU16)((type1->horizontal.blank_pixels_high << 8) + type1->horizontal.blank_pixels_low_minus_1 + 1) + pT->HVisible; + pT->HFrontPorch = (NvU16)((type1->horizontal.front_porch_high << 8) + type1->horizontal.front_porch_low_minus_1 + 1); + pT->HSyncWidth = (NvU16)((type1->horizontal.sync_width_high << 8) + type1->horizontal.sync_width_low_minus_1 + 1); + pT->HSyncPol = type1->horizontal.sync_polarity ? NVT_H_SYNC_POSITIVE : NVT_H_SYNC_NEGATIVE; + + // get vertical timings + pT->VVisible = (NvU16)((type1->vertical.active_image_lines_high << 8) + type1->vertical.active_image_lines_low_minus_1 + 1); + pT->VTotal = (NvU16)((type1->vertical.blank_lines_high << 8) + type1->vertical.blank_lines_low_minus_1 + 1) + pT->VVisible; + pT->VFrontPorch = (NvU16)((type1->vertical.front_porch_lines_high << 8) + type1->vertical.front_porch_lines_low_minus_1 + 1); + pT->VSyncWidth = (NvU16)((type1->vertical.sync_width_lines_high << 8) + type1->vertical.sync_width_lines_low_minus_1 + 1); + pT->VSyncPol = type1->vertical.sync_polarity ? NVT_V_SYNC_POSITIVE : NVT_V_SYNC_NEGATIVE; + + // EDID used in DP1.4 Compliance test had incorrect HBlank listed, leading to wrong raster sizes being set by driver (bug 2714607) + // Filter incorrect timings here. HTotal must cover sufficient blanking time + if (pT->HTotal < (pT->HVisible + pT->HFrontPorch + pT->HSyncWidth)) + { + return NVT_STATUS_ERR; + } + + // the frame scanning type + pT->interlaced = type1->options.interface_frame_scanning_type; + + // the aspect ratio + switch (type1->options.aspect_ratio) + { + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_1_1: + pT->etc.aspect = (1 << 16) | 1; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_5_4: + pT->etc.aspect = (5 << 16) | 4; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_4_3: + pT->etc.aspect = (4 << 16) | 3; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_15_9: + pT->etc.aspect = (15 << 16) | 9; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_16_9: + pT->etc.aspect = (16 << 16) | 9; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_16_10: + pT->etc.aspect = (16 << 16) | 10; + break; + default: + pT->etc.aspect = 0; + break; + } + + // the refresh rate + if (pT->interlaced) + { + // in interlaced mode, adjust for one extra line in every other frame. pT->VTotal is field based here + totalPixels_in_2_fields = (NvU32)pT->HTotal * ((NvU32)pT->VTotal * 2 + 1); + // calculate the field rate in interlaced mode + pT->etc.rr = (NvU16)axb_div_c(pT->pclk * 2, 10000, totalPixels_in_2_fields); + pT->etc.rrx1k = axb_div_c(pT->pclk * 2, 10000000, totalPixels_in_2_fields); + } + else + { + // calculate frame rate in progressive mode + // in progressive mode filed = frame + pT->etc.rr = (NvU16)axb_div_c(pT->pclk, 10000, (NvU32)pT->HTotal * (NvU32)pT->VTotal); + pT->etc.rrx1k = axb_div_c(pT->pclk, 10000000, (NvU32)pT->HTotal * (NvU32)pT->VTotal); + } + pT->etc.name[39] = '\0'; + pT->etc.rep = 0x1; // bit mask for no pixel repetition + + pT->etc.status = NVT_STATUS_DISPLAYID_1; + // Unlike the PTM in EDID base block, DisplayID type I/II preferred timing does not have dependency on sequence + // so we'll just update the preferred flag, not sequence them + //pT->etc.status = NVT_STATUS_DISPLAYID_1N(1); + pT->etc.flag |= type1->options.is_preferred_detailed_timing ? NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING : 0; + + /* Fields currently not used. Uncomment them for future use + type1->options.stereo_support; + */ + + // the DisplayID spec covers the timing parameter(Visible/FrontPorch/SyncWidth/Total) range from 1~65536 while our NVT_TIMING structure which is mostly based on NvU16 only covers 0~65535 + nvt_assert(pT->HVisible != 0); + nvt_assert(pT->HFrontPorch != 0); + nvt_assert(pT->HSyncWidth != 0); + nvt_assert(pT->VVisible != 0); + nvt_assert(pT->VFrontPorch != 0); + nvt_assert(pT->VSyncWidth != 0); + + // cover the possible overflow + nvt_assert(pT->HTotal > pT->HVisible); + nvt_assert(pT->VTotal > pT->VVisible); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming2(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + DISPLAYID_TIMING_2_BLOCK * blk = (DISPLAYID_TIMING_2_BLOCK *)block; + NVT_TIMING newTiming; + + if (blk->header.data_bytes % sizeof(DISPLAYID_TIMING_2_DESCRIPTOR) != 0) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + for (i = 0; i * sizeof(DISPLAYID_TIMING_2_DESCRIPTOR) < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayIdTiming2Descriptor(blk->descriptors + i, + &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming2Descriptor(DISPLAYID_TIMING_2_DESCRIPTOR * type2, NVT_TIMING *pT) +{ + NvU32 totalPixels_in_2_fields; + if (type2 == NULL || pT == NULL) + return NVT_STATUS_ERR; + + // the pixel clock + pT->pclk = (NvU32)((type2->pixel_clock_high << 16) + (type2->pixel_clock_mid << 8) + type2->pixel_clock_low_minus_0_01MHz + 1); + + // the DisplayID spec does not support border + pT->HBorder = pT->VBorder = 0; + + // get horizontal timing parameters + pT->HVisible = (NvU16)((type2->horizontal.active_image_in_char_high << 8) + type2->horizontal.active_image_in_char_minus_1 + 1) * NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS; + pT->HTotal = (NvU16)(type2->horizontal.blank_in_char_minus_1 + 1) * NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS + pT->HVisible; + pT->HFrontPorch = (NvU16)(type2->horizontal.front_porch_in_char_minus_1 + 1) * NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS; + pT->HSyncWidth = (NvU16)(type2->horizontal.sync_width_in_char_minus_1 + 1) * NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS; + pT->HSyncPol = type2->options.hsync_polarity ? NVT_H_SYNC_POSITIVE : NVT_H_SYNC_NEGATIVE; + + // get vertical timing parameters + pT->VVisible = (NvU16)((type2->vertical.active_image_lines_high << 8) + type2->vertical.active_image_lines_low_minus_1 + 1); + pT->VTotal = (NvU16)(type2->vertical.blank_lines_minus_1 + 1) + pT->VVisible; + pT->VFrontPorch = (NvU16)(type2->vertical.front_porch_lines_minus_1 + 1); + pT->VSyncWidth = (NvU16)(type2->vertical.sync_width_lines_minus_1 + 1); + pT->VSyncPol = type2->options.vsync_polarity ? NVT_V_SYNC_POSITIVE : NVT_V_SYNC_NEGATIVE; + + // the frame scanning type + pT->interlaced = type2->options.interface_frame_scanning_type; + + // the refresh rate + if (pT->interlaced) + { + // in interlaced mode, adjust for one extra line in every other frame. pT->VTotal is field based here + totalPixels_in_2_fields = (NvU32)pT->HTotal * ((NvU32)pT->VTotal * 2 + 1); + // calculate the field rate in interlaced mode + pT->etc.rr = (NvU16)axb_div_c(pT->pclk * 2, 10000, totalPixels_in_2_fields); + pT->etc.rrx1k = axb_div_c(pT->pclk * 2, 10000000, totalPixels_in_2_fields); + } + else + { + // calculate frame rate in progressive mode + // in progressive mode filed = frame + pT->etc.rr = (NvU16)axb_div_c(pT->pclk, 10000, (NvU32)pT->HTotal * (NvU32)pT->VTotal); + pT->etc.rrx1k = axb_div_c(pT->pclk, 10000000, (NvU32)pT->HTotal * (NvU32)pT->VTotal); + } + + pT->etc.aspect = 0; + pT->etc.name[39] = '\0'; + pT->etc.rep = 0x1; // Bit mask for no pixel repetition + + pT->etc.status = NVT_STATUS_DISPLAYID_2; + // Unlike the PTM in EDID base block, DisplayID type I/II preferred timing does not have dependency on sequence + // so we'll just update the preferred flag, not sequence them + //pT->etc.status = NVT_STATUS_DISPLAYID_1N(1); + pT->etc.flag |= type2->options.is_preferred_detailed_timing ? NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING : 0; + + /* Fields currently not used. Uncomment them for future use + type1->options.stereo_support; + */ + + // the DisplayID spec covers the timing parameter(Visible/FrontPorch/SyncWidth/Total) range from 1~65536 while our NVT_TIMING structure which is mostly based on NvU16 only covers 0~65535 + nvt_assert(pT->HVisible != 0); + nvt_assert(pT->HFrontPorch != 0); + nvt_assert(pT->HSyncWidth != 0); + nvt_assert(pT->VVisible != 0); + nvt_assert(pT->VFrontPorch != 0); + nvt_assert(pT->VSyncWidth != 0); + + // cover the possible overflow + nvt_assert(pT->HTotal > pT->HVisible); + nvt_assert(pT->VTotal > pT->VVisible); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming3Descriptor(DISPLAYID_TIMING_3_DESCRIPTOR * desc, + NVT_TIMING *pT) +{ + NvU8 formula, aspect; + NvU32 horiz, vert, rr; + NvU32 interlace; + if (desc == NULL || pT == NULL) + return NVT_STATUS_ERR; + + formula = DRF_VAL(T_DISPLAYID, _TIMING_3, _FORMULA, desc->optns); + /* Fields currently not used, uncomment for use + preferred = DRF_VAL(T_DISPLAYID, _TIMING, _PREFERRED, desc->optns); + */ + aspect = DRF_VAL(T_DISPLAYID, _TIMING_3, _ASPECT_RATIO, desc->optns); + interlace = DRF_VAL(T_DISPLAYID, _TIMING_3, _INTERLACE, desc->transfer) ? NVT_INTERLACED : NVT_PROGRESSIVE; + rr = (NvU32)(DRF_VAL(T_DISPLAYID, _TIMING_3, _REFRESH_RATE, desc->transfer) + 1); + + horiz = (NvU32)((desc->horizontal_active_pixels + 1) << 3); + + switch (aspect) + { + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_1_1: + vert = horiz; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_5_4: + vert = horiz * 4 / 5; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_4_3: + vert = horiz * 3 / 4; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_15_9: + vert = horiz * 9 / 15; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_16_9: + vert = horiz * 9 / 16; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_16_10: + vert = horiz * 10 / 16; + break; + default: + return NVT_STATUS_ERR; + } + + switch (formula) + { + case NVT_DISPLAYID_TIMING_3_FORMULA_STANDARD: + if (NvTiming_CalcCVT(horiz, vert, rr, interlace, pT) != NVT_STATUS_SUCCESS) + return NVT_STATUS_ERR; + break; + case NVT_DISPLAYID_TIMING_3_FORMULA_REDUCED_BLANKING: + if (NvTiming_CalcCVT_RB(horiz, vert, rr, interlace, pT) != NVT_STATUS_SUCCESS) + return NVT_STATUS_ERR; + break; + default: + return NVT_STATUS_ERR; + } + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming3(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + DISPLAYID_TIMING_3_BLOCK * blk = (DISPLAYID_TIMING_3_BLOCK *)block; + NVT_TIMING newTiming; + + if (blk->header.data_bytes % sizeof(DISPLAYID_TIMING_3_DESCRIPTOR) != 0) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + for (i = 0; i * sizeof(DISPLAYID_TIMING_3_DESCRIPTOR) < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayIdTiming3Descriptor(blk->descriptors + i, + &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming4(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + NVT_TIMING newTiming; + DISPLAYID_TIMING_4_BLOCK * blk = (DISPLAYID_TIMING_4_BLOCK *)block; + if (blk->header.data_bytes < 1 || blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + for (i = 0; i < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (NvTiming_EnumDMT((NvU32)(blk->timing_codes[i]), + &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming5Descriptor(DISPLAYID_TIMING_5_DESCRIPTOR * desc, NVT_TIMING *pT) +{ + NvU32 width, height, rr; + NvBool is1000div1001 = NV_FALSE; + + // we don't handle stereo type nor custom reduced blanking yet + //NvU8 stereoType, formula; + //stereoType = (desc->optns & NVT_DISPLAYID_TIMING_5_STEREO_SUPPORT_MASK); + //formula = desc->optns & NVT_DISPLAYID_TIMING_5_FORMULA_SUPPORT_MASK; + + if (desc->optns & NVT_DISPLAYID_TIMING_5_FRACTIONAL_RR_SUPPORT_MASK) + { + is1000div1001 = NV_TRUE; + } + width = ((desc->horizontal_active_pixels_high << 8) | desc->horizontal_active_pixels_low) + 1; + height = ((desc->vertical_active_pixels_high << 8) | desc->vertical_active_pixels_low) + 1; + rr = desc->refresh_rate + 1; + return NvTiming_CalcCVT_RB2(width, height, rr, is1000div1001, pT); +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming5(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + NVT_TIMING newTiming; + DISPLAYID_TIMING_5_BLOCK * blk = (DISPLAYID_TIMING_5_BLOCK *)block; + if (blk->header.data_bytes < 1 || blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + for (i = 0; i * sizeof(DISPLAYID_TIMING_5_DESCRIPTOR) < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayIdTiming5Descriptor(blk->descriptors + i, &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTimingVesa(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU8 i, j; + NVT_TIMING newTiming; + DISPLAYID_TIMING_MODE_BLOCK * blk = (DISPLAYID_TIMING_MODE_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_TIMING_VESA_BLOCK_SIZE) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + for (i = 0; i < DISPLAYID_TIMING_VESA_BLOCK_SIZE; i++) + { + for (j = 0; j < 8; j++) + { + if (blk->timing_modes[i] & (1 << j)) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (NvTiming_EnumDMT((NvU32)(i * 8 + j + 1), + &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTimingEIA(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU8 i, j; + NVT_TIMING newTiming; + DISPLAYID_TIMING_MODE_BLOCK * blk = (DISPLAYID_TIMING_MODE_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_TIMING_CEA_BLOCK_SIZE) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + for (i = 0; i < DISPLAYID_TIMING_CEA_BLOCK_SIZE; i++) + { + for (j = 0; j < 8; j++) + { + if (blk->timing_modes[i] & (1 << j)) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (NvTiming_EnumCEA861bTiming((NvU32)(i * 8 + j + 1), + &newTiming) == NVT_STATUS_SUCCESS) + { + if (pEdidInfo == NULL) continue; + + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + else + { + if (pEdidInfo == NULL) return NVT_STATUS_ERR; + } + } + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdRangeLimits(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + NVT_DISPLAYID_RANGE_LIMITS * rl; + DISPLAYID_RANGE_LIMITS_BLOCK * blk = (DISPLAYID_RANGE_LIMITS_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_RANGE_LIMITS_BLOCK_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + if (pInfo->rl_num >= NVT_DISPLAYID_RANGE_LIMITS_MAX_COUNT) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + rl = pInfo->range_limits + pInfo->rl_num; + (pInfo->rl_num)++; + + rl->pclk_min = blk->pixel_clock_min[0] | (blk->pixel_clock_min[1] << 8) | (blk->pixel_clock_min[2] << 16); + rl->pclk_max = blk->pixel_clock_max[0] | (blk->pixel_clock_max[1] << 8) | (blk->pixel_clock_max[2] << 16); + + rl->interlaced = DRF_VAL(T_DISPLAYID, _RANGE_LIMITS, _INTERLACE, blk->optns); + rl->cvt = DRF_VAL(T_DISPLAYID, _RANGE_LIMITS, _CVT_STANDARD, blk->optns); + rl->cvt_reduced = DRF_VAL(T_DISPLAYID, _RANGE_LIMITS, _CVT_REDUCED, blk->optns); + rl->dfd = DRF_VAL(T_DISPLAYID, _RANGE_LIMITS, _DFD, blk->optns); + + rl->hfreq_min = blk->horizontal_frequency_min; + rl->hfreq_max = blk->horizontal_frequency_max; + rl->hblank_min = blk->horizontal_blanking_min; + rl->vfreq_min = blk->vertical_refresh_rate_min; + rl->vfreq_max = blk->vertical_refresh_rate_max; + rl->vblank_min = blk->vertical_blanking_min; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdSerialNumber(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_ASCII_STRING_BLOCK * blk = (DISPLAYID_ASCII_STRING_BLOCK *)block; + if (blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + // Nothing is currently done to store any ASCII Serial Number, if it is + // required. Code here may need to be modified sometime in the future, along + // with NVT_DISPLAYID_INFO struct + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdAsciiString(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_ASCII_STRING_BLOCK * blk = (DISPLAYID_ASCII_STRING_BLOCK *)block; + if (blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + // Nothing is currently done to store any ASCII String Data, if it is + // required. Code here may need to be modified sometime in the future, along + // with NVT_DISPLAYID_INFO struct + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdDeviceData(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_DEVICE_DATA_BLOCK * blk = (DISPLAYID_DEVICE_DATA_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_DEVICE_DATA_BLOCK_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + pInfo->tech_type = blk->technology; + + pInfo->device_op_mode = DRF_VAL(T_DISPLAYID, _DEVICE, _OPERATING_MODE, blk->operating_mode); + pInfo->support_backlight = DRF_VAL(T_DISPLAYID, _DEVICE, _BACKLIGHT, blk->operating_mode); + pInfo->support_intensity = DRF_VAL(T_DISPLAYID, _DEVICE, _INTENSITY, blk->operating_mode); + + pInfo->horiz_pixel_count = blk->horizontal_pixel_count; + pInfo->vert_pixel_count = blk->vertical_pixel_count; + + pInfo->orientation = DRF_VAL(T_DISPLAYID, _DEVICE, _ORIENTATION, blk->orientation); + pInfo->rotation = DRF_VAL(T_DISPLAYID, _DEVICE, _ROTATION, blk->orientation); + pInfo->zero_pixel = DRF_VAL(T_DISPLAYID, _DEVICE, _ZERO_PIXEL, blk->orientation); + pInfo->scan_direction = DRF_VAL(T_DISPLAYID, _DEVICE, _SCAN, blk->orientation); + + pInfo->subpixel_info = blk->subpixel_info; + pInfo->horiz_pitch = blk->horizontal_pitch; + pInfo->vert_pitch = blk->vertical_pitch; + + pInfo->color_bit_depth = DRF_VAL(T_DISPLAYID, _DEVICE, _COLOR_DEPTH, blk->color_bit_depth); + pInfo->white_to_black = DRF_VAL(T_DISPLAYID, _DEVICE, _WHITE_BLACK, blk->response_time); + pInfo->response_time = DRF_VAL(T_DISPLAYID, _DEVICE, _RESPONSE_TIME, blk->response_time); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdInterfacePower(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_INTERFACE_POWER_BLOCK * blk = (DISPLAYID_INTERFACE_POWER_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_INTERFACE_POWER_BLOCK_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + // Note specifically that the data inside T1/T2 variables are the exact + // interface power data. the millisecond increments are dependent on the + // DisplayID specification. + pInfo->t1_min = DRF_VAL(T_DISPLAYID, _POWER, _T1_MIN, blk->power_sequence_T1); + pInfo->t1_max = DRF_VAL(T_DISPLAYID, _POWER, _T1_MAX, blk->power_sequence_T1); + pInfo->t2_max = DRF_VAL(T_DISPLAYID, _POWER, _T2, blk->power_sequence_T2); + pInfo->t3_max = DRF_VAL(T_DISPLAYID, _POWER, _T3, blk->power_sequence_T3); + pInfo->t4_min = DRF_VAL(T_DISPLAYID, _POWER, _T4_MIN, blk->power_sequence_T4_min); + pInfo->t5_min = DRF_VAL(T_DISPLAYID, _POWER, _T5_MIN, blk->power_sequence_T5_min); + pInfo->t6_min = DRF_VAL(T_DISPLAYID, _POWER, _T6_MIN, blk->power_sequence_T6_min); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTransferChar(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + // Transfer Characteristics are currently not supported, but parsing of the + // block should be added in the future when more specifications on monitors + // that require this information is located here. + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdDisplayInterface(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_INTERFACE_DATA_BLOCK * blk = (DISPLAYID_INTERFACE_DATA_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_INTERFACE_DATA_BLOCK_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + pInfo->supported_displayId2_0 = 0; + + // Type/Link Info + pInfo->u4.display_interface.interface_type = DRF_VAL(T_DISPLAYID, _INTERFACE, _TYPE, blk->info); + pInfo->u4.display_interface.u1.digital_num_links = DRF_VAL(T_DISPLAYID, _INTERFACE, _NUMLINKS, blk->info); + pInfo->u4.display_interface.interface_version = blk->version; + + // Color Depths + pInfo->u4.display_interface.rgb_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB16, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB14, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB12, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB10, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB8, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_6b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB6, blk->color_depth_rgb); + pInfo->u4.display_interface.ycbcr444_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_16, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_14, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_12, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_10, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_8, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_6b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_6, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr422_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_16, blk->color_depth_ycbcr422); + pInfo->u4.display_interface.ycbcr422_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_14, blk->color_depth_ycbcr422); + pInfo->u4.display_interface.ycbcr422_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_12, blk->color_depth_ycbcr422); + pInfo->u4.display_interface.ycbcr422_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_10, blk->color_depth_ycbcr422); + pInfo->u4.display_interface.ycbcr422_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_8, blk->color_depth_ycbcr422); + + // Content Protection + pInfo->u4.display_interface.content_protection = DRF_VAL(T_DISPLAYID, _INTERFACE, _CONTENT, blk->content_protection); + pInfo->u4.display_interface.content_protection_version = blk->content_protection_version; + + // Spread + pInfo->u4.display_interface.spread_spectrum = DRF_VAL(T_DISPLAYID, _INTERFACE, _SPREAD_TYPE, blk->spread); + pInfo->u4.display_interface.spread_percent = DRF_VAL(T_DISPLAYID, _INTERFACE, _SPREAD_PER, blk->spread); + + // Proprietary Information + switch (pInfo->u4.display_interface.interface_type) + { + case NVT_DISPLAYID_INTERFACE_TYPE_LVDS: + pInfo->u2.lvds.color_map = DRF_VAL(T_DISPLAYID, _LVDS, _COLOR, blk->interface_attribute_1); + pInfo->u2.lvds.support_2_8v = DRF_VAL(T_DISPLAYID, _LVDS, _2_8, blk->interface_attribute_1); + pInfo->u2.lvds.support_12v = DRF_VAL(T_DISPLAYID, _LVDS, _12, blk->interface_attribute_1); + pInfo->u2.lvds.support_5v = DRF_VAL(T_DISPLAYID, _LVDS, _5, blk->interface_attribute_1); + pInfo->u2.lvds.support_3_3v = DRF_VAL(T_DISPLAYID, _LVDS, _3_3, blk->interface_attribute_1); + pInfo->u2.lvds.DE_mode = DRF_VAL(T_DISPLAYID, _INTERFACE, _DE, blk->interface_attribute_2); + pInfo->u2.lvds.polarity = DRF_VAL(T_DISPLAYID, _INTERFACE, _POLARITY, blk->interface_attribute_2); + pInfo->u2.lvds.data_strobe = DRF_VAL(T_DISPLAYID, _INTERFACE, _STROBE, blk->interface_attribute_2); + break; + case NVT_DISPLAYID_INTERFACE_TYPE_PROPRIETARY: + pInfo->u2.proprietary.DE_mode = DRF_VAL(T_DISPLAYID, _INTERFACE, _DE, blk->interface_attribute_1); + pInfo->u2.proprietary.polarity = DRF_VAL(T_DISPLAYID, _INTERFACE, _POLARITY, blk->interface_attribute_1); + pInfo->u2.proprietary.data_strobe = DRF_VAL(T_DISPLAYID, _INTERFACE, _STROBE, blk->interface_attribute_1); + break; + default: + break; + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdStereo(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + NvU8 * sub; + + DISPLAYID_STEREO_INTERFACE_METHOD_BLOCK * blk = (DISPLAYID_STEREO_INTERFACE_METHOD_BLOCK *)block; + if (blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + sub = blk->timing_sub_block; + + pInfo->stereo_code = blk->stereo_code; + switch (blk->stereo_code) + { + case NVT_DISPLAYID_STEREO_FIELD_SEQUENTIAL: + pInfo->u3.field_sequential.stereo_polarity = sub[0]; + break; + case NVT_DISPLAYID_STEREO_SIDE_BY_SIDE: + pInfo->u3.side_by_side.view_identity = sub[0]; + break; + case NVT_DISPLAYID_STEREO_PIXEL_INTERLEAVED: + NVMISC_MEMCPY(pInfo->u3.pixel_interleaved.interleave_pattern, sub, 8); + break; + case NVT_DISPLAYID_STEREO_DUAL_INTERFACE: + pInfo->u3.left_right_separate.mirroring = DRF_VAL(T_DISPLAYID, _STEREO, _MIRRORING, sub[0]); + pInfo->u3.left_right_separate.polarity = DRF_VAL(T_DISPLAYID, _STEREO, _POLARITY, sub[0]); + break; + case NVT_DISPLAYID_STEREO_MULTIVIEW: + pInfo->u3.multiview.num_views = sub[0]; + pInfo->u3.multiview.code = sub[1]; + break; + case NVT_DISPLAYID_STEREO_PROPRIETARY: + break; + default: + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiledDisplay(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_TILED_DISPLAY_BLOCK * blk = (DISPLAYID_TILED_DISPLAY_BLOCK *)block; + if (blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + // For revision 0, we only allow one tiled display data block. + if (!blk->header.revision && pInfo->tile_topology_id.vendor_id) + return NVT_STATUS_SUCCESS; + + pInfo->tiled_display_revision = blk->header.revision; + + pInfo->tile_capability.bSingleEnclosure = blk->capability.single_enclosure; + pInfo->tile_capability.bHasBezelInfo = blk->capability.has_bezel_info; + pInfo->tile_capability.multi_tile_behavior = blk->capability.multi_tile_behavior; + pInfo->tile_capability.single_tile_behavior = blk->capability.single_tile_behavior; + + pInfo->tile_topology.row = ((blk->topo_loc_high.row << 5) | blk->topology_low.row) + 1; + pInfo->tile_topology.col = ((blk->topo_loc_high.col << 5) | blk->topology_low.col) + 1; + + pInfo->tile_location.x = (blk->topo_loc_high.x << 5) | blk->location_low.x; + pInfo->tile_location.y = (blk->topo_loc_high.y << 5) | blk->location_low.y; + + pInfo->native_resolution.width = ((blk->native_resolution.width_high<<8)|blk->native_resolution.width_low) + 1; + pInfo->native_resolution.height = ((blk->native_resolution.height_high<<8)|blk->native_resolution.height_low) + 1; + + pInfo->bezel_info.pixel_density = blk->bezel_info.pixel_density; + pInfo->bezel_info.top = (blk->bezel_info.top * blk->bezel_info.pixel_density) / 10; + pInfo->bezel_info.bottom = (blk->bezel_info.bottom * blk->bezel_info.pixel_density) / 10; + pInfo->bezel_info.right = (blk->bezel_info.right * blk->bezel_info.pixel_density) / 10; + pInfo->bezel_info.left = (blk->bezel_info.left * blk->bezel_info.pixel_density) / 10; + + pInfo->tile_topology_id.vendor_id = (blk->topology_id.vendor_id[2] << 16) | + (blk->topology_id.vendor_id[1] << 8 ) | + blk->topology_id.vendor_id[0]; + + pInfo->tile_topology_id.product_id = (blk->topology_id.product_id[1] << 8) | blk->topology_id.product_id[0]; + + pInfo->tile_topology_id.serial_number = (blk->topology_id.serial_number[3] << 24) | + (blk->topology_id.serial_number[2] << 16) | + (blk->topology_id.serial_number[1] << 8 ) | + blk->topology_id.serial_number[0]; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdCtaData(NvU8 * block, NVT_EDID_INFO *pInfo) +{ + DISPLAYID_DATA_BLOCK_HEADER * blk = (DISPLAYID_DATA_BLOCK_HEADER*)block; + NVT_EDID_CEA861_INFO *p861info; + if (blk->data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + p861info = &pInfo->ext861; + + pInfo->ext_displayid.cea_data_block_present = 1; + p861info->revision = blk->revision; + + //parse CEA tags which starts at 3rd byte from block + parseCta861DataBlockInfo(&block[3], blk->data_bytes, p861info); + + // update pInfo with basic hdmi info + // assumes each edid will only have one such block across multiple cta861 blocks (otherwise may create declaration conflict) + // in case of multiple such blocks, the last one takes precedence + parseCta861VsdbBlocks(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + + parseCta861HfScdb(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + + //parse HDR related information from the HDR static metadata data block + parseCea861HdrStaticMetadataDataBlock(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + + // base video + parse861bShortTiming(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + // yuv420-only video + parse861bShortYuv420Timing(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + // CEA861-F at 7.5.12 section about VFPDB block. + if (p861info->total_vfpdb != 0) + { + parse861bShortPreferredTiming(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + } + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdDisplayInterfaceFeatures(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + NvU8 i; + DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK * blk = (DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK *)block; + if (blk->header.data_bytes > DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK_MAX_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pInfo == NULL) return NVT_STATUS_SUCCESS; + + pInfo->supported_displayId2_0 = 1; + + // Color Depths + pInfo->u4.display_interface_features.rgb_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB16, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB14, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB12, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB10, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB8, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_6b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB6, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.ycbcr444_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_16, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_14, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_12, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_10, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_8, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_6b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_6, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr422_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_16, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr422_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_14, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr422_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_12, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr422_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_10, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr422_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_8, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr420_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_16, blk->supported_color_depth_ycbcr420); + pInfo->u4.display_interface_features.ycbcr420_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_14, blk->supported_color_depth_ycbcr420); + pInfo->u4.display_interface_features.ycbcr420_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_12, blk->supported_color_depth_ycbcr420); + pInfo->u4.display_interface_features.ycbcr420_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_10, blk->supported_color_depth_ycbcr420); + pInfo->u4.display_interface_features.ycbcr420_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_8, blk->supported_color_depth_ycbcr420); + + // Minimum Pixel Rate at Which YCbCr 4:2:0 Encoding Is Supported + pInfo->u4.display_interface_features.minimum_pixel_rate_ycbcr420 = blk->minimum_pixel_rate_ycbcr420; + + // Audio capability + pInfo->u4.display_interface_features.audio_capability.support_32khz = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _AUDIO_SUPPORTED_32KHZ, blk->supported_audio_capability); + pInfo->u4.display_interface_features.audio_capability.support_44_1khz = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _AUDIO_SUPPORTED_44_1KHZ, blk->supported_audio_capability); + pInfo->u4.display_interface_features.audio_capability.support_48khz = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _AUDIO_SUPPORTED_48KHZ, blk->supported_audio_capability); + + // Colorspace and EOTF combination + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_bt2020_eotf_smpte_st2084 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_BT2020_EOTF_SMPTE_ST2084, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_bt2020_eotf_bt2020 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_BT2020_EOTF_BT2020, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_dci_p3_eotf_dci_p3 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_DCI_P3_EOTF_DCI_P3, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_adobe_rgb_eotf_adobe_rgb = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_ADOBE_RGB_EOTF_ADOBE_RGB, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_bt709_eotf_bt1886 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_BT709_EOTF_BT1886, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_bt601_eotf_bt601 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_BT601_EOTF_BT601, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_srgb_eotf_srgb = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_SRGB_EOTF_SRGB, blk->supported_colorspace_eotf_combination_1); + + // Additional support Colorspace and EOTF + pInfo->u4.display_interface_features.total_additional_colorspace_eotf.total = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _ADDITIONAL_SUPPORTED_COLORSPACE_EOTF_TOTAL, blk->additional_supported_colorspace_eotf_total); + + for (i = 0; i < pInfo->u4.display_interface_features.total_additional_colorspace_eotf.total; i++) + { + pInfo->u4.display_interface_features.additional_colorspace_eotf[i].support_colorspace = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _ADDITIONAL_SUPPORTED_COLORSPACE, blk->additional_supported_colorspace_eotf[i]); + pInfo->u4.display_interface_features.additional_colorspace_eotf[i].support_eotf = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _ADDITIONAL_SUPPORTED_EOTF, blk->additional_supported_colorspace_eotf[i]); + + } + return NVT_STATUS_SUCCESS; +} + +POP_SEGMENTS diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid20.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid20.c new file mode 100644 index 0000000..efd06c6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid20.c @@ -0,0 +1,381 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_edidext_displayid20.c +// +// Purpose: the provide displayID 2.0 related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "edid.h" + +PUSH_SEGMENTS + +// DisplayId2 as EDID extension entry point functions +static NVT_STATUS parseDisplayId20EDIDExtSection(DISPLAYID_2_0_SECTION *section, NVT_EDID_INFO *pEdidInfo); + +/** + * + * @brief Parses a displayId20 EDID Extension block, with timings stored in p and + * other info stored in pInfo + * @param p The EDID Extension Block (With a DisplayID in it) + * @param size Size of the displayId Extension Block + * @param pEdidInfo EDID struct containing DisplayID information and + * the timings + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +getDisplayId20EDIDExtInfo( + NvU8 *p, + NvU32 size, + NVT_EDID_INFO *pEdidInfo) +{ + DISPLAYID_2_0_SECTION *extSection = NULL; + + if (p == NULL || + size < sizeof(EDIDV1STRUC) || + size > sizeof(EDIDV1STRUC) || + p[0] != NVT_EDID_EXTENSION_DISPLAYID || + pEdidInfo == NULL) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // Calculate the All DisplayID20 Extension checksum + // The function name + if (computeDisplayId20SectionCheckSum(p, sizeof(EDIDV1STRUC)) != 0) + { + return NVT_STATUS_ERR; + } + + extSection = (DISPLAYID_2_0_SECTION *)(p + 1); + + return parseDisplayId20EDIDExtSection(extSection, pEdidInfo); +} + +/* + * @brief DisplayId20 as EDID extension block's "Section" entry point functions + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20EDIDExtSection( + DISPLAYID_2_0_SECTION * extSection, + NVT_EDID_INFO *pEdidInfo) +{ + NvU8 datablock_location = 0; + NvU8 datablock_length; + NvU8 remaining_length; + + if ((extSection == NULL) || + (extSection->header.section_bytes != 121)) + { + return NVT_STATUS_ERR; + } + + // It is based on the DisplayID v2.0 Errata E7 + // First DisplayID2.0 section as EDID extension shall populate "Display Product Primary Use Case" byte with a value from 1h-8h based on the intended primary use case of the sink. + // Any subsequent DisplayID2.0 section EDID extension shall set the "Display Product Primary Use Case" byte to 0h. + pEdidInfo->total_did2_extensions++; + + if (extSection->header.version == DISPLAYID_2_0_VERSION) + { + if (((pEdidInfo->total_did2_extensions == 1) && (extSection->header.product_type == 0 || + extSection->header.product_type > DISPLAYID_2_0_PROD_HMD_AR || + extSection->header.extension_count != 0)) || + (pEdidInfo->total_did2_extensions > 1 && extSection->header.product_type != 0)) + { + nvt_assert(0); // product_type value set incorrect in Display Product Primary Use Case field + } + + pEdidInfo->ext_displayid20.version = extSection->header.version; + pEdidInfo->ext_displayid20.revision = extSection->header.revision; + pEdidInfo->ext_displayid20.as_edid_extension = NV_TRUE; + } + else + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // validate for section checksum before processing the data block + if (computeDisplayId20SectionCheckSum((const NvU8*)extSection, DISPLAYID_2_0_SECTION_SIZE_TOTAL(extSection->header)) != 0) + { + return NVT_STATUS_ERR; + } + + remaining_length = extSection->header.section_bytes; + + while (datablock_location < extSection->header.section_bytes) + { + DISPLAYID_2_0_DATA_BLOCK_HEADER * dbHeader = (DISPLAYID_2_0_DATA_BLOCK_HEADER *) (extSection->data + datablock_location); + NvU8 is_reserve = remaining_length > 3 && datablock_location == 0 && dbHeader->type == 0 && dbHeader->data_bytes > 0; + NvU8 i; + + // Check the padding. + if (dbHeader->type == 0 && !is_reserve) + { + for (i = 1 ; i < remaining_length; i++) + { + // All remaining bytes must all be 0. + if (extSection->data[datablock_location + i] != 0) + { + return NVT_STATUS_ERR; + } + } + + datablock_length = remaining_length; + } + else + { + if (parseDisplayId20EDIDExtDataBlocks((NvU8 *)(extSection->data + datablock_location), + extSection->header.section_bytes - datablock_location, + &datablock_length, + pEdidInfo) != NVT_STATUS_SUCCESS) + return NVT_STATUS_ERR; + } + + datablock_location += datablock_length; + remaining_length -= datablock_length; + } + + return NVT_STATUS_SUCCESS; +} + +/* + * @brief DisplayId20 as EDID extension block's "Data Block" entry point functions + * For validation, passed the NULL pEdidInfo, and client will check the return value + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20EDIDExtDataBlocks( + NvU8 *pDataBlock, + NvU8 RemainSectionLength, + NvU8 *pCurrentDBLength, + NVT_EDID_INFO *pEdidInfo) +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER * block_header = (DISPLAYID_2_0_DATA_BLOCK_HEADER *) pDataBlock; + NVT_STATUS status = NVT_STATUS_SUCCESS; + NVT_DISPLAYID_2_0_INFO *pDisplayId20Info = NULL; + + // size sanity checking + if ((pDataBlock == NULL || RemainSectionLength <= NVT_DISPLAYID_DATABLOCK_HEADER_LEN) || + (block_header->data_bytes > RemainSectionLength - NVT_DISPLAYID_DATABLOCK_HEADER_LEN)) + return NVT_STATUS_ERR; + + if (block_header->type < DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pEdidInfo != NULL) + { + pDisplayId20Info = &pEdidInfo->ext_displayid20; + } + + *pCurrentDBLength = block_header->data_bytes + NVT_DISPLAYID_DATABLOCK_HEADER_LEN; + + status = parseDisplayId20DataBlock(block_header, pDisplayId20Info); + + if (pDisplayId20Info == NULL) return status; + + // TODO : All the data blocks shall sync the data from the datablock in DisplayID2_0 to pEdidInfo + if (status == NVT_STATUS_SUCCESS && pDisplayId20Info->as_edid_extension == NV_TRUE) + { + switch (block_header->type) + { + case DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY: + pDisplayId20Info->valid_data_blocks.product_id_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_INTERFACE_FEATURES: + pDisplayId20Info->valid_data_blocks.interface_feature_present = NV_TRUE; + + // Supported - Color depth is supported for all supported timings. Supported timing includes all Display-ID exposed timings + // (that is timing exposed using DisplayID timing types and CTA VICs) + if (IS_BPC_SUPPORTED_COLORFORMAT(pDisplayId20Info->interface_features.yuv444.bpcs)) + { + pDisplayId20Info->basic_caps |= NVT_DISPLAY_2_0_CAP_YCbCr_444; + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pDisplayId20Info->interface_features.yuv422.bpcs)) + { + pDisplayId20Info->basic_caps |= NVT_DISPLAY_2_0_CAP_YCbCr_422; + } + + if (pDisplayId20Info->interface_features.audio_capability.support_48khz || + pDisplayId20Info->interface_features.audio_capability.support_44_1khz || + pDisplayId20Info->interface_features.audio_capability.support_32khz) + { + pDisplayId20Info->basic_caps |= NVT_DISPLAY_2_0_CAP_BASIC_AUDIO; + } + + break; + + // DisplayID_v2.0 E5 defined + // if inside CTA embedded block existed 420 VDB/CMDB, then we follow these two blocks only. + // * support for 420 pixel encoding is limited to the timings exposed in the restricted set exposed in the CTA data block. + // * field of "Mini Pixel Rate at YCbCr420" shall be set 00h + case DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA: + pDisplayId20Info->valid_data_blocks.cta_data_present = NV_TRUE; + + // copy all the vendor specific data block from DisplayId20 to pEdidInfo + // TODO: mixed CTA extension block and DID2.0 extension block is not handled + NVMISC_MEMCPY(&pEdidInfo->hdmiLlcInfo, &pDisplayId20Info->vendor_specific.hdmiLlc, sizeof(NVT_HDMI_LLC_INFO)); + NVMISC_MEMCPY(&pEdidInfo->hdmiForumInfo, &pDisplayId20Info->vendor_specific.hfvs, sizeof(NVT_HDMI_FORUM_INFO)); + NVMISC_MEMCPY(&pEdidInfo->nvdaVsdbInfo, &pDisplayId20Info->vendor_specific.nvVsdb, sizeof(NVDA_VSDB_PARSED_INFO)); + NVMISC_MEMCPY(&pEdidInfo->msftVsdbInfo, &pDisplayId20Info->vendor_specific.msftVsdb, sizeof(MSFT_VSDB_PARSED_INFO)); + NVMISC_MEMCPY(&pEdidInfo->hdr_static_metadata_info, &pDisplayId20Info->cta.hdrInfo, sizeof(NVT_HDR_STATIC_METADATA)); + NVMISC_MEMCPY(&pEdidInfo->dv_static_metadata_info, &pDisplayId20Info->cta.dvInfo, sizeof(NVT_DV_STATIC_METADATA)); + + // If the CTA861 extension existed already, we need to transfer the revision/basic_caps to cta embedded in DID20. + if (pEdidInfo->ext861.revision >= NVT_CEA861_REV_B) + { + pDisplayId20Info->cta.cta861_info.revision = pEdidInfo->ext861.revision; + pDisplayId20Info->cta.cta861_info.basic_caps = pEdidInfo->ext861.basic_caps; + pDisplayId20Info->basic_caps = pEdidInfo->ext861.basic_caps; + } + + // this is the DisplayID20 Extension, so we need to copy from what is the CTA raw data in DID20 to Edid's CTA block + NVMISC_MEMCPY(&pEdidInfo->ext861, &pDisplayId20Info->cta.cta861_info, sizeof(NVT_EDID_CEA861_INFO)); + break; + + case DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM: + pDisplayId20Info->valid_data_blocks.parameters_present = NV_TRUE; + + // EDID only supported 10bits chromaitcity to match the OS D3DKMDT_2DOFFSET 10bits, so we don't need to transfer it here. + + pEdidInfo->input.u.digital.bpc = NVT_COLORDEPTH_HIGHEST_BPC(pDisplayId20Info->display_param.native_color_depth); + pEdidInfo->gamma = pDisplayId20Info->display_param.gamma_x100; + + if (pDisplayId20Info->display_param.audio_speakers_integrated == AUDIO_SPEAKER_INTEGRATED_SUPPORTED) + { + pDisplayId20Info->basic_caps |= NVT_DISPLAY_2_0_CAP_BASIC_AUDIO; + } + + break; + case DISPLAYID_2_0_BLOCK_TYPE_STEREO: + pDisplayId20Info->valid_data_blocks.stereo_interface_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TILED_DISPLAY: + pDisplayId20Info->valid_data_blocks.tiled_display_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_CONTAINER_ID: + pDisplayId20Info->valid_data_blocks.container_id_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_7: + pDisplayId20Info->valid_data_blocks.type7Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_8: + pDisplayId20Info->valid_data_blocks.type8Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_9: + pDisplayId20Info->valid_data_blocks.type9Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_10: + pDisplayId20Info->valid_data_blocks.type10Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_RANGE_LIMITS: + pDisplayId20Info->valid_data_blocks.dynamic_range_limit_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_ADAPTIVE_SYNC: + pDisplayId20Info->valid_data_blocks.adaptive_sync_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC: + pDisplayId20Info->valid_data_blocks.vendor_specific_present = NV_TRUE; + break; + default: + break; + } + } + + return status; +} + +/* @brief Update the correct color format / attribute of timings from interface feature data block + */ +CODE_SEGMENT(PAGE_DD_CODE) +void +updateColorFormatForDisplayId20ExtnTimings( + NVT_EDID_INFO *pInfo, + NvU32 timingIdx) +{ + // pDisplayId20Info parsed displayID20 info + NVT_DISPLAYID_2_0_INFO *pDisplayId20Info = &pInfo->ext_displayid20; + NVT_TIMING *pT= &pInfo->timing[timingIdx]; + + nvt_assert(timingIdx <= COUNT(pInfo->timing)); + + if (pDisplayId20Info->as_edid_extension) + { + if ((pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_A_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_B_SUPPORTED || + pInfo->ext861.valid.H14B_VSDB || pInfo->ext861.valid.H20_HF_VSDB) && pInfo->ext861.revision >= NVT_CEA861_REV_A) + { + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 0, + 1, + pDisplayId20Info->interface_features.rgb444.bpc.bpc10, + pDisplayId20Info->interface_features.rgb444.bpc.bpc12, + pDisplayId20Info->interface_features.rgb444.bpc.bpc14, + pDisplayId20Info->interface_features.rgb444.bpc.bpc16); + } + else + { + // rgb444 (always support 6bpc and 8bpc as per DP spec 5.1.1.1.1 RGB Colorimetry) + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 1, + 1, + pDisplayId20Info->interface_features.rgb444.bpc.bpc10, + pDisplayId20Info->interface_features.rgb444.bpc.bpc12, + pDisplayId20Info->interface_features.rgb444.bpc.bpc14, + pDisplayId20Info->interface_features.rgb444.bpc.bpc16); + } + + // yuv444 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv444, 0, /* yuv444 does not support 6bpc */ + pDisplayId20Info->interface_features.yuv444.bpc.bpc8, + pDisplayId20Info->interface_features.yuv444.bpc.bpc10, + pDisplayId20Info->interface_features.yuv444.bpc.bpc12, + pDisplayId20Info->interface_features.yuv444.bpc.bpc14, + pDisplayId20Info->interface_features.yuv444.bpc.bpc16); + // yuv422 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv422, 0, /* yuv422 does not support 6bpc */ + pDisplayId20Info->interface_features.yuv422.bpc.bpc8, + pDisplayId20Info->interface_features.yuv422.bpc.bpc10, + pDisplayId20Info->interface_features.yuv422.bpc.bpc12, + pDisplayId20Info->interface_features.yuv422.bpc.bpc14, + pDisplayId20Info->interface_features.yuv422.bpc.bpc16); + + if (!NVT_DID20_TIMING_IS_CTA861(pInfo->timing[timingIdx].etc.flag, pInfo->timing[timingIdx].etc.status)) + { + // yuv420 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv420, 0, /* yuv420 does not support 6bpc */ + pDisplayId20Info->interface_features.yuv420.bpc.bpc8, + pDisplayId20Info->interface_features.yuv420.bpc.bpc10, + pDisplayId20Info->interface_features.yuv420.bpc.bpc12, + pDisplayId20Info->interface_features.yuv420.bpc.bpc14, + pDisplayId20Info->interface_features.yuv420.bpc.bpc16); + } + } +} + +POP_SEGMENTS diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_gtf.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_gtf.c new file mode 100644 index 0000000..405a16c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_gtf.c @@ -0,0 +1,138 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_gtf.c +// +// Purpose: calculate gtf timing +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "nvtiming_pvt.h" + +// calculate GTF timing + +PUSH_SEGMENTS + +CONS_SEGMENT(PAGE_CONS) + +const NvU32 NVT_GTF_CELL_GRAN=8; +const NvU32 NVT_GTF_MIN_VSYNCBP=11; // in 550us (!!) [1000000:550 = 20000:11] +const NvU32 NVT_GTF_MIN_VPORCH=1; + +const NvU32 NVT_GTF_C_PRIME=30; // (gtf_C-gtf_J)*gtf_K/256+gtf_J; +const NvU32 NVT_GTF_M_PRIME=300; // NVT_GTFK/256*gtf_M; +const NvU32 NVT_GTF_VSYNC_RQD=3; +const NvU32 NVT_GTF_HSYNC_PERCENTAGE=8; // 8% HSync for GTF + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcGTF(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NvU32 dwXCells, dwVSyncBP, dwVTotal, dwIdN, dwIdD, dwHBlank, dwHTCells, dwHSync, dwHFrontPorch, dwRefreshRate; + + // parameter check + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + dwRefreshRate = rr; + dwXCells = a_div_b(width, NVT_GTF_CELL_GRAN); + + if(dwRefreshRate * NVT_GTF_MIN_VSYNCBP >= 20000) + return NVT_STATUS_ERR;//NVT_STATUS_ERR_OUTOFRANGE; // H period estimate less than 0 + + dwVSyncBP = a_div_b((height + NVT_GTF_MIN_VPORCH) * NVT_GTF_MIN_VSYNCBP * dwRefreshRate, + (20000 - NVT_GTF_MIN_VSYNCBP * dwRefreshRate)); + dwVTotal = dwVSyncBP + height + NVT_GTF_MIN_VPORCH; + + // Calculate the numerator and denominator of Ideal Duty Cycle + // NOTE: here dwIdN/dwIdN = IdealDutyCycle/GTF_C_Prime + dwIdD = dwVTotal * dwRefreshRate; + + if(dwIdD <= NVT_GTF_M_PRIME * 1000 / NVT_GTF_C_PRIME) + return NVT_STATUS_ERR;//NVT_STATUS_ERR_OUTOFRANGE; // Ideal duty cycle less than 0 + + dwIdN = dwIdD - NVT_GTF_M_PRIME * 1000 / NVT_GTF_C_PRIME; + + // A proper way to calculate dwXCells*dwIdN/(100*dwIdD/GTF_C_PRIME-dwIdN) + dwHBlank = axb_div_c(dwIdN*3, dwXCells, 2*(300*dwIdD/NVT_GTF_C_PRIME - dwIdN*3)); + dwHBlank = ( dwHBlank ) * 2 * NVT_GTF_CELL_GRAN; + dwHTCells = dwXCells + dwHBlank / NVT_GTF_CELL_GRAN; + dwHSync = a_div_b(dwHTCells * NVT_GTF_HSYNC_PERCENTAGE, 100) * NVT_GTF_CELL_GRAN; + if((dwHSync == 0) || (dwHSync*2 > dwHBlank)) + return NVT_STATUS_ERR;//NVT_STATUS_ERR_OUTOFRANGE; // HSync too small or too big. + + dwHFrontPorch = dwHBlank/2-dwHSync; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + + pT->HVisible = (NvU16)(dwXCells*NVT_GTF_CELL_GRAN); + pT->VVisible = (NvU16)height; + + pT->HTotal = (NvU16)(dwHTCells*NVT_GTF_CELL_GRAN); + pT->HFrontPorch = (NvU16)dwHFrontPorch; + pT->HSyncWidth = (NvU16)dwHSync; + + pT->VTotal = (NvU16)dwVTotal; + pT->VFrontPorch = (NvU16)NVT_GTF_MIN_VPORCH; + pT->VSyncWidth = (NvU16)NVT_GTF_VSYNC_RQD; + + // A proper way to calculate fixed HTotal*VTotal*Rr/10000 + pT->pclk = axb_div_c(dwHTCells*dwVTotal, dwRefreshRate, 10000/NVT_GTF_CELL_GRAN); + + pT->HSyncPol = NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = NVT_V_SYNC_POSITIVE; + pT->interlaced = 0; + + // fill in the extra timing info + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, (NvU32)10000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + pT->etc.status = NVT_STATUS_GTF; + NVT_SNPRINTF((char *)pT->etc.name, 40, "GTF:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + pT->etc.rgb444.bpc.bpc8 = 1; + + // interlaced adjustment + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + { + if ((pT->VTotal & 0x1) != 0) + pT->interlaced = NVT_INTERLACED_EXTRA_VBLANK_ON_FIELD2; + else + pT->interlaced = NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2; + + pT->pclk >>= 1; + pT->VTotal >>= 1; + pT->VVisible = (pT->VVisible + 1) / 2; + } + + return NVT_STATUS_SUCCESS; +} + +POP_SEGMENTS diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_tv.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_tv.c new file mode 100644 index 0000000..7ff4a6e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_tv.c @@ -0,0 +1,192 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_tv.c +// +// Purpose: calculate tv based timing timing +// +//***************************************************************************** + +#include "nvBinSegment.h" + +#include "nvtiming_pvt.h" + +PUSH_SEGMENTS + +CONS_SEGMENT(PAGE_CONS) + +static const NVT_TIMING TV_TIMING[] = +{ + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,240, 0,10,6,262, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1407, {0,60,59940,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_NTSC_M, "SDTV:NTSC_M"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,240, 0,10,6,262, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1407, {0,60,59940,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_NTSC_J, "SDTV:NTSC_J"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10,8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_PAL_M, "SDTV:PAL_M"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10,8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_PAL_A, "SDTV:PAL_A"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10,8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_PAL_N, "SDTV:PAL_N"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10,8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_PAL_NC, "SDTV:PAL_NC"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,240, 0,10,6,262, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1407, {0,60,59940,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_480I, "HDTV(analog):480i"}}, + {720, 0,15,8, 858, NVT_H_SYNC_NEGATIVE,480, 0,10,4,525, NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,2700, {0,60,59940,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_480P, "HDTV(analog):480p"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10,8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_576I, "HDTV(analog):576i"}}, + {720, 0,10,8, 864, NVT_H_SYNC_NEGATIVE,576, 0,5, 4,625, NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,2700, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_576P, "HDTV(analog):576p"}}, + {1280,0,70,80, 1650,NVT_H_SYNC_NEGATIVE,720,0,5, 5,750, NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,7418, {0,60,59940,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_720P, "HDTV(analog):720p"}}, + {1920,0,44,88,2200,NVT_H_SYNC_NEGATIVE,540, 0,2, 5,562, NVT_V_SYNC_NEGATIVE,NVT_INTERLACED, 7418, {0,60,59940,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080I, "HDTV(analog):1080i"}}, + {1920,0,44,88,2200,NVT_H_SYNC_NEGATIVE,1080,0,4, 5,1125,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,14835,{0,60,59940,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080P, "HDTV(analog):1080p"}}, + {1280,0,400,80,1980,NVT_H_SYNC_NEGATIVE,720,0,5, 5,750, NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,7425, {0,50,50000,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_720P50, "HDTV(analog):720p50"}}, + {1920,0,594,88,2750,NVT_H_SYNC_NEGATIVE,1080,0,4, 5,1125,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,7425,{0,24,24000,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080P24,"HDTV(analog):1080p24"}}, + {1920,0,484,88,2640,NVT_H_SYNC_NEGATIVE,540, 0,4, 5,562, NVT_V_SYNC_NEGATIVE,NVT_INTERLACED, 7425,{0,50,50000,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080I50,"HDTV(analog):1080i50"}}, + {1920,0,484,88,2640,NVT_H_SYNC_NEGATIVE,1080,0,4, 5,1125,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,14850,{0,50,50000,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080P50,"HDTV(analog):1080p50"}}, + {0,0,0,0,0,NVT_H_SYNC_NEGATIVE,0,0,0,0,0,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,0,{0,0,0,0,0,{0},{0},{0},{0},0,""}} +}; + +//*********************************************** +//** Wrapper Structure to store Fake EDID data ** +//*********************************************** +typedef struct tagFAKE_TV_EDID +{ + NvU32 EdidType; + NvU32 EdidSize; + const NvU8* FakeEdid; +} FAKE_TV_EDID; + +// calculate the backend TV timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetTvTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NvU32 tvFormat, NVT_TIMING *pT) +{ + NvU32 i, j, k; + + // input check + if (pT == NULL) + return NVT_STATUS_ERR; + + if ((width == 0 || height == 0 || rr == 0) && tvFormat >= NVT_MAX_TV_FORMAT) + return NVT_STATUS_ERR; + + // handle double scan + if (height <= NVT_PVT_DOUBLE_SCAN_HEIGHT) + { + width <<= 1; + height <<= 1; + } + + // try the exact match first + if (tvFormat != NVT_AUTO_HDTV_FORMAT) + { + i = 0; + while (TV_TIMING[i].HVisible != 0) + { + if (NVT_GET_TIMING_STATUS_SEQ(TV_TIMING[i].etc.status) == tvFormat) + { + // find the match + *pT = TV_TIMING[i]; + return NVT_STATUS_SUCCESS; + } + + // move to the next entry + i++; + } + + // unknown TV format, return failure here + *pT = TV_TIMING[0]; + return NVT_STATUS_ERR; + } + + // we are doing auto HDTV format binding here + i = 0; + j = k = sizeof(TV_TIMING)/sizeof(TV_TIMING[0]) - 1; + while (TV_TIMING[i].HVisible != 0) + { + // #1: try the exact resolution/refreshrate/interlaced match + if (width == TV_TIMING[i].HVisible && + height == frame_height(TV_TIMING[i])&& + rr == TV_TIMING[i].etc.rr && + !!(flag & NVT_PVT_INTERLACED_MASK) == !!TV_TIMING[i].interlaced && + NVT_GET_TIMING_STATUS_TYPE(TV_TIMING[i].etc.status) == NVT_TYPE_HDTV) + { + // exact match, return from here + *pT = TV_TIMING[i]; + return NVT_STATUS_SUCCESS; + } + + // #2: try to closest match with interlaced check ON + if (!!(flag & NVT_PVT_INTERLACED_MASK) == !!TV_TIMING[i].interlaced && + NVT_GET_TIMING_STATUS_TYPE(TV_TIMING[i].etc.status) == NVT_TYPE_HDTV) + { + if (abs_delta(width, TV_TIMING[i].HVisible) <= abs_delta(width, TV_TIMING[j].HVisible) && + abs_delta(height, frame_height(TV_TIMING[i])) <= abs_delta(height, frame_height(TV_TIMING[j])) && + abs_delta(rr, TV_TIMING[i].etc.rr) <= abs_delta(rr, TV_TIMING[j].etc.rr) && + width <= TV_TIMING[i].HVisible && + height <= frame_height(TV_TIMING[i])) + { + j = i; + } + } + + // #3: try to closest match with interlaced check OFF + if (NVT_GET_TIMING_STATUS_TYPE(TV_TIMING[i].etc.status) == NVT_TYPE_HDTV) + { + if (abs_delta(width, TV_TIMING[i].HVisible) <= abs_delta(width, TV_TIMING[k].HVisible) && + abs_delta(height, frame_height(TV_TIMING[i])) <= abs_delta(height, frame_height(TV_TIMING[k])) && + abs_delta(rr, TV_TIMING[i].etc.rr) <= abs_delta(rr, TV_TIMING[j].etc.rr) && + width <= TV_TIMING[i].HVisible && + height <= frame_height(TV_TIMING[i])) + { + k = i; + } + } + + // move to the next entry + i++; + } + + // return the closest matched timing here + if (TV_TIMING[j].HVisible != 0) + { + *pT = TV_TIMING[j]; + } + else if (TV_TIMING[k].HVisible != 0) + { + *pT = TV_TIMING[k]; + } + else + { + *pT = TV_TIMING[0]; + } + + // set the mismatch status + if (pT->HVisible != width || frame_height(*pT) != height) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_SIZE); + } + if (pT->etc.rr != rr) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_RR); + } + if (!!pT->interlaced != !!(flag & NVT_PVT_INTERLACED_MASK)) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_FORMAT); + } + + return NVT_STATUS_SUCCESS; + +} + +POP_SEGMENTS diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_util.c b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_util.c new file mode 100644 index 0000000..0c181e3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_util.c @@ -0,0 +1,370 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_util.c +// +// Purpose: provide the utility functions for timing library +// +//***************************************************************************** + +#include "nvBinSegment.h" + +#include "nvtiming_pvt.h" + +PUSH_SEGMENTS + +CONS_SEGMENT(PAGE_CONS) + +// The following table was generated w/ this program: +/* +#include + +#define CRC32_POLYNOMIAL 0xEDB88320 + +void main() +{ + unsigned int crc = 0, i = 0, j = 0; + unsigned int CRCTable[256]; + + for (i = 0; i < 256 ; i++) + { + crc = i; + for (j = 8; j > 0; j--) + { + if (crc & 1) + crc = (crc >> 1) ^ CRC32_POLYNOMIAL; + else + crc >>= 1; + } + CRCTable[i] = crc; + } + + printf("static const NvU32 s_CRCTable[256] = {"); + for (i = 0; i < 256; i++) + { + printf("%s0x%08X%s", + ((i % 10 == 0) ? "\n " : ""), + CRCTable[i], + ((i != 255) ? ", " : " ")); + } + printf("};\n"); +} +*/ +static const NvU32 s_CRCTable[256] = { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, + 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, + 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, + 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, + 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, + 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, + 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, + 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, + 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, + 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, + 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, + 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, + 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, + 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D }; + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 a_div_b(NvU32 a, NvU32 b) +{ + if (b == 0) + return 0xFFFFFFFF; + + return (a + b/2)/b; +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 axb_div_c(NvU32 a, NvU32 b, NvU32 c) +{ + NvU32 AhxBl, AlxBh; + NvU32 AxB_high, AxB_low; + NvU32 AxB_div_C_low; + + if (c==0) + return 0xFFFFFFFF; + + // calculate a*b + AhxBl = (a>>16)*(b&0xFFFF); + AlxBh = (a&0xFFFF)*(b>>16); + + AxB_high = (a>>16) * (b>>16); + AxB_low = (a&0xFFFF) * (b&0xFFFF); + + AxB_high += AlxBh >> 16; + AxB_high += AhxBl >> 16; + + if ((AxB_low + (AlxBh<<16))< AxB_low) + AxB_high ++; + AxB_low += AlxBh << 16; + + if ((AxB_low + (AhxBl<<16)) < AxB_low) + AxB_high ++; + AxB_low += AhxBl << 16; + + AxB_div_C_low = AxB_low/c; + AxB_div_C_low += 0xFFFFFFFF / c * (AxB_high % c); + AxB_div_C_low += ((0xFFFFFFFF % c + 1) * (AxB_high % c) + (AxB_low % c) + c/2) / c; + + + return AxB_div_C_low; +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU64 axb_div_c_64(NvU64 a, NvU64 b, NvU64 c) +{ + // NvU64 arithmetic to keep precision and avoid floats + // a*b/c = (a/c)*b + ((a%c)*b + c/2)/c + return ((a/c)*b + ((a%c)*b + c/2)/c); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 calculateCRC32(NvU8* pBuf, NvU32 bufsize) +{ + NvU32 crc32 = 0xFFFFFFFF, temp1, temp2, count = bufsize; + + if (bufsize == 0 || pBuf == NULL) + { + return 0; + } + + while (count-- != 0) + { + temp1 = (crc32 >> 8) & 0x00FFFFFF; + temp2 = s_CRCTable[(crc32 ^ *pBuf++) & 0xFF]; + crc32 = temp1 ^ temp2; + } + crc32 ^= 0xFFFFFFFF; + + return crc32; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool isChecksumValid(NvU8 *pBuf) +{ + NvU8 i; + NvU8 checksum = 0; + + for (i= 0; i < NVT_EDID_BLOCK_SIZE; i++) + { + checksum += pBuf[i]; + } + + if ((checksum & 0xFF) == 0) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void patchChecksum(NvU8 *pBuf) +{ + NvU8 i; + NvU8 chksum = 0; + + for (i = 0; i < NVT_EDID_BLOCK_SIZE; i++) + { + chksum += pBuf[i]; + } + chksum &= 0xFF; + + // The 1-byte sum of all 128 bytes in this EDID block shall equal zero + // The Checksum Byte (at address 7Fh) shall contain a value such that a checksum of the entire + // 128-byte BASE EDID equals 00h. + if (chksum) + { + pBuf[127] = 0xFF & (pBuf[127] + (0x100 - chksum)); + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ComposeCustTimingString(NVT_TIMING *pT) +{ + if (pT == NULL) + return NVT_STATUS_ERR; + + NVT_SNPRINTF((char *)pT->etc.name, 40, "CUST:%dx%dx%d.%03dHz%s",pT->HVisible, (pT->interlaced ? 2 : 1)*pT->VVisible , pT->etc.rrx1k/1000, pT->etc.rrx1k%1000, (pT->interlaced ? "/i" : "")); + pT->etc.name[39] = '\0'; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU16 NvTiming_CalcRR(NvU32 pclk, NvU16 interlaced, NvU16 HTotal, NvU16 VTotal) +{ + NvU16 rr = 0; + + if (interlaced) + { + NvU32 totalPixelsIn2Fields = (NvU32)HTotal * ((NvU32)VTotal * 2 + 1); + + if (totalPixelsIn2Fields != 0) + { + rr = (NvU16)axb_div_c(pclk * 2, 10000, totalPixelsIn2Fields); + } + } + else + { + NvU32 totalPixels = (NvU32)HTotal * VTotal; + + if (totalPixels != 0) + { + rr = (NvU16)axb_div_c(pclk, 10000, totalPixels); + } + } + return rr; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_CalcRRx1k(NvU32 pclk, NvU16 interlaced, NvU16 HTotal, NvU16 VTotal) +{ + NvU32 rrx1k = 0; + + if (interlaced) + { + NvU32 totalPixelsIn2Fields = (NvU32)HTotal * ((NvU32)VTotal * 2 + 1); + + if (totalPixelsIn2Fields != 0) + { + rrx1k = (NvU32)axb_div_c(pclk * 2, 10000000, totalPixelsIn2Fields); + } + } + else + { + NvU32 totalPixels = (NvU32)HTotal * VTotal; + + if (totalPixels != 0) + { + rrx1k = (NvU32)axb_div_c(pclk, 10000000, totalPixels); + } + } + + return rrx1k; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_IsRoundedRREqual(NvU16 rr1, NvU32 rr1x1k, NvU16 rr2) +{ + return ((rr1 >= (rr1x1k/1000)) && (rr1 <= (rr1x1k + 500) / 1000) && + (rr2 >= (rr1x1k/1000)) && (rr2 <= (rr1x1k + 500) / 1000)); +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 NvTiming_IsTimingExactEqual(const NVT_TIMING *pT1, const NVT_TIMING *pT2) +{ + if ((pT1 == NULL) || (pT2 == NULL)) + { + return 0; + } + + return (( pT1->HVisible == pT2->HVisible) && + ( pT1->HBorder == pT2->HBorder) && + ( pT1->HFrontPorch == pT2->HFrontPorch) && + ( pT1->HSyncWidth == pT2->HSyncWidth) && + ( pT1->HSyncPol == pT2->HSyncPol) && + ( pT1->HTotal == pT2->HTotal) && + ( pT1->VVisible == pT2->VVisible) && + ( pT1->VBorder == pT2->VBorder) && + ( pT1->VFrontPorch == pT2->VFrontPorch) && + ( pT1->VSyncWidth == pT2->VSyncWidth) && + ( pT1->VSyncPol == pT2->VSyncPol) && + ( pT1->VTotal == pT2->VTotal) && + ( pT1->etc.rr == pT2->etc.rr) && + (!!pT1->interlaced == !!pT2->interlaced)); +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 NvTiming_IsTimingExactEqualEx(const NVT_TIMING *pT1, const NVT_TIMING *pT2) +{ + NvU32 bIsTimingExactEqual = NvTiming_IsTimingExactEqual(pT1, pT2); + return (bIsTimingExactEqual && (pT1->etc.rrx1k == pT2->etc.rrx1k)); +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 NvTiming_IsTimingRelaxedEqual(const NVT_TIMING *pT1, const NVT_TIMING *pT2) +{ + if ((pT1 == NULL) || (pT2 == NULL)) + { + return 0; + } + + return (( pT1->HVisible == pT2->HVisible) && + ( pT1->HBorder == pT2->HBorder) && + ( pT1->HFrontPorch == pT2->HFrontPorch) && + ( pT1->HSyncWidth == pT2->HSyncWidth) && + //( pT1->HSyncPol == pT2->HSyncPol) && // skip the polarity check to tolerate mismatch h/v sync polarities in 18-byte DTD + ( pT1->HTotal == pT2->HTotal) && + ( pT1->VVisible == pT2->VVisible) && + ( pT1->VBorder == pT2->VBorder) && + ( pT1->VFrontPorch == pT2->VFrontPorch) && + ( pT1->VSyncWidth == pT2->VSyncWidth) && + //( pT1->VSyncPol == pT2->VSyncPol) && // skip the polarity check to tolerate mismatch h/v sync polarities in 18-byte DTD + ( pT1->VTotal == pT2->VTotal) && + ( pT1->etc.rr == pT2->etc.rr) && + (!!pT1->interlaced == !!pT2->interlaced)); +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 RRx1kToPclk (NVT_TIMING *pT) +{ + return axb_div_c(pT->HTotal * (pT->VTotal + ((pT->interlaced != 0) ? (pT->VTotal + 1) : 0)), + pT->etc.rrx1k, + 1000 * ((pT->interlaced != 0) ? 20000 : 10000)); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU16 NvTiming_MaxFrameWidth(NvU16 HVisible, NvU16 repMask) +{ + NvU16 minPixelRepeat; + + if (repMask == 0) + { + return HVisible; + } + + minPixelRepeat = 1; + while ((repMask & 1) == 0) + { + repMask >>= 1; + minPixelRepeat++; + } + + return (HVisible / minPixelRepeat); +} + +POP_SEGMENTS diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming.h b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming.h new file mode 100644 index 0000000..2c53e36 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming.h @@ -0,0 +1,5415 @@ +//**************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvtiming.h +// +// Purpose: This file is the common header all nv timing library clients. +// +//***************************************************************************** + +#ifndef __NVTIMING_H__ +#define __NVTIMING_H__ + +#include "nvtypes.h" + + +#define abs_delta(a,b) ((a)>(b)?((a)-(b)):((b)-(a))) + +//*********************** +// The Timing Structure +//*********************** +// +// Nvidia specific timing extras +typedef struct tagNVT_HDMIEXT +{ + // in the case of stereo, the NVT_TIMING structure will hold the 2D + // instance of the timing parameters, and the stereo extension will + // contain the variants required to produce the stereo frame. + NvU8 StereoStructureType; + NvU8 SideBySideHalfDetail; + NvU16 VActiveSpace[2]; +} NVT_HDMIEXT; +#define NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(x) (1 << (x)) +#define NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK) +#define NVT_HDMI_3D_SUPPORTED_FIELD_ALT_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_FIELD_ALT) +#define NVT_HDMI_3D_SUPPORTED_LINE_ALT_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_LINE_ALT) +#define NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEFULL_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEFULL) +#define NVT_HDMI_3D_SUPPORTED_LDEPTH_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTH) +#define NVT_HDMI_3D_SUPPORTED_LDEPTHGFX_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTHGFX) +#define NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM) +#define NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF) +#define NVT_ALL_HDMI_3D_STRUCT_SUPPORTED_MASK (NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK | NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK | NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK) + +typedef union tagNVT_COLORDEPTH +{ + NvU8 bpcs; + struct + { + NvU8 bpc6 : 1; + NvU8 bpc8 : 1; + NvU8 bpc10 : 1; + NvU8 bpc12 : 1; + NvU8 bpc14 : 1; + NvU8 bpc16 : 1; + NvU8 rsrvd1 : 1; // must be 0 + NvU8 rsrvd2 : 1; // must be 0 + } bpc; +} NVT_COLORDEPTH; + +#define IS_BPC_SUPPORTED_COLORFORMAT(colorDepth) (!!((NvU8)(colorDepth))) +#define UPDATE_BPC_FOR_COLORFORMAT(colorFormat, b6bpc, b8bpc, b10bpc, b12bpc, b14bpc, b16bpc) \ + if ((b6bpc)) ((colorFormat).bpc.bpc6 = 1); \ + if ((b8bpc)) ((colorFormat).bpc.bpc8 = 1); \ + if ((b10bpc)) ((colorFormat).bpc.bpc10 = 1); \ + if ((b12bpc)) ((colorFormat).bpc.bpc12 = 1); \ + if ((b14bpc)) ((colorFormat).bpc.bpc14 = 1); \ + if ((b16bpc)) ((colorFormat).bpc.bpc16 = 1); + +#define SET_BPC_FOR_COLORFORMAT(_colorFormat, _bpc) \ + if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_6) \ + ((_colorFormat).bpc.bpc6 = 1); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_8) \ + ((_colorFormat).bpc.bpc8 = 1); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_10) \ + ((_colorFormat).bpc.bpc10 = 1); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_12) \ + ((_colorFormat).bpc.bpc12 = 1); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_16) ((_colorFormat).bpc.bpc16 = 1); + +#define CLEAR_BPC_FOR_COLORFORMAT(_colorFormat, _bpc) \ + if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_6) \ + ((_colorFormat).bpc.bpc6 = 0); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_8) \ + ((_colorFormat).bpc.bpc8 = 0); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_10) \ + ((_colorFormat).bpc.bpc10 = 0); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_12) \ + ((_colorFormat).bpc.bpc12 = 0); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_16) ((_colorFormat).bpc.bpc16 = 0); + +#define NVT_COLORDEPTH_HIGHEST_BPC(_colorFormat) \ + (_colorFormat).bpc.bpc16 ? NVT_EDID_VIDEOSIGNAL_BPC_16 : \ + (_colorFormat).bpc.bpc12 ? NVT_EDID_VIDEOSIGNAL_BPC_12 : \ + (_colorFormat).bpc.bpc10 ? NVT_EDID_VIDEOSIGNAL_BPC_10 : \ + (_colorFormat).bpc.bpc8 ? NVT_EDID_VIDEOSIGNAL_BPC_8 : \ + (_colorFormat).bpc.bpc6 ? NVT_EDID_VIDEOSIGNAL_BPC_6 : NVT_EDID_VIDEOSIGNAL_BPC_NOT_DEFINED + +typedef struct tagNVT_TIMINGEXT +{ + NvU32 flag; // reserve for NV h/w based enhancement like double-scan. + NvU16 rr; // the logical refresh rate to present + NvU32 rrx1k; // the physical vertical refresh rate in 0.001Hz + NvU32 aspect; // the display aspect ratio Hi(aspect):horizontal-aspect, Low(aspect):vertical-aspect + // + // Bitmask of one-hot encoded possible pixel repetitions: + // 0x1: no pixel repetition (i.e., display each pixel once) + // 0x2: each pixel is displayed twice horizontally; + // 0x3: use either no pixel repetition or display each pixel twice + // ... + // + NvU16 rep; + NVT_COLORDEPTH rgb444; // each bit within is set if rgb444 supported on that bpc + NVT_COLORDEPTH yuv444; // each bit within is set if yuv444 supported on that bpc + NVT_COLORDEPTH yuv422; // each bit within is set if yuv422 supported on that bpc + NVT_COLORDEPTH yuv420; // each bit within is set if yuv420 supported on that bpc + NvU32 status; // the timing standard being used + NvU8 name[51]; // the name of the timing +}NVT_TIMINGEXT; +// +// +//The very basic timing structure based on the VESA standard: +// +// |<----------------------------htotal--------------------------->| +// ---------"active" video-------->|<-------blanking------>|<----- +// |<-------hvisible-------->|<-hb->|<-hfp->|<-hsw->|<-hbp->|<-hb->| +// ----------+-------------------------+ | | | | | +// A A | | | | | | | +// : : | | | | | | | +// : : | | | | | | | +// :verical| addressable video | | | | | | +// :visible| | | | | | | +// : : | | | | | | | +// : : | | | | | | | +// verical V | | | | | | | +// total --+-------------------------+ | | | | | +// : vb border | | | | | +// : -----------------------------------+ | | | | +// : vfp front porch | | | | +// : -------------------------------------------+ | | | +// : vsw sync width | | | +// : ---------------------------------------------------+ | | +// : vbp back porch | | +// : -----------------------------------------------------------+ | +// V vb border | +// --------------------------------------------------------------------------+ +// +typedef struct tagNVT_TIMING +{ + // VESA scan out timing parameters: + NvU16 HVisible; //horizontal visible + NvU16 HBorder; //horizontal border + NvU16 HFrontPorch; //horizontal front porch + NvU16 HSyncWidth; //horizontal sync width + NvU16 HTotal; //horizontal total + NvU8 HSyncPol; //horizontal sync polarity: 1-negative, 0-positive + + NvU16 VVisible; //vertical visible + NvU16 VBorder; //vertical border + NvU16 VFrontPorch; //vertical front porch + NvU16 VSyncWidth; //vertical sync width + NvU16 VTotal; //vertical total + NvU8 VSyncPol; //vertical sync polarity: 1-negative, 0-positive + + NvU16 interlaced; //1-interlaced, 0-progressive + NvU32 pclk; //pixel clock in 10KHz + + //other timing related extras + NVT_TIMINGEXT etc; +}NVT_TIMING; + +#define NVT_MAX_TOTAL_TIMING 128 + +// +// The below VSync/HSync Polarity definition have been inverted to match +// HW Display Class definition. +// timing related constants: +#define NVT_H_SYNC_POSITIVE 0 +#define NVT_H_SYNC_NEGATIVE 1 +#define NVT_H_SYNC_DEFAULT NVT_H_SYNC_NEGATIVE +// +#define NVT_V_SYNC_POSITIVE 0 +#define NVT_V_SYNC_NEGATIVE 1 +#define NVT_V_SYNC_DEFAULT NVT_V_SYNC_POSITIVE +// +#define NVT_PROGRESSIVE 0 +#define NVT_INTERLACED 1 +#define NVT_INTERLACED_EXTRA_VBLANK_ON_FIELD2 1 +#define NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2 2 + +// timing related macros: +#define NVT_FRAME_HEIGHT(_vvisible_, _interlaced_) ((_vvisible_) * ((_interlaced_ != 0) ? 2 : 1)) + +//************************************* +// The Timing Status encoded in +// NVT_TIMING::NVT_TIMINGEXT::status +//************************************* +// +// TIMING_STATUS has the following kinds of info: +// +// NVT_TIMING::NVT_TIMINGEXT::status +// +// +----+----+---------+----+----+------------------------------+---+---------------+---+----------------+ +// bit31 bit30 bit29 bit22 bit21 bit20 bit16 bit15 bit8 bit7 bit0 +// |native|cust|<-cta format->|Dual|<--------mismatch status-------->|<---timing type--->|<---timing seq#--->| +// +// 1. the monitor preferred timing flag and cust EDID entry flag +// +#define NVT_STATUS_TIMING_NATIVE_FLAG_MASK 0x80000000 +#define NVT_STATUS_TIMING_NATIVE_FLAG_SHIFT 31 +#define NVT_IS_NATIVE_TIMING(n) (((n)&NVT_STATUS_TIMING_NATIVE_FLAG_MASK)>>NVT_STATUS_TIMING_NATIVE_FLAG_SHIFT) +#define NVT_SET_NATIVE_TIMING_FLAG(n) ((n)|=1U<< NVT_STATUS_TIMING_NATIVE_FLAG_SHIFT) +#define NVT_PREFERRED_TIMING_MODE_MASK 0x2 +// +#define NVT_STATUS_TIMING_CUST_ENTRY_MASK 0x40000000 +#define NVT_STATUS_TIMING_CUST_ENTRY_SHIFT 30 +#define NVT_IS_CUST_ENTRY(n) (((n)&NVT_STATUS_TIMING_CUST_ENTRY_MASK)>>NVT_STATUS_TIMING_CUST_ENTRY_SHIFT) +#define NVT_SET_CUST_ENTRY_FLAG(n) ((n)|=1<>NVT_STATUS_TIMING_CEA_FORMAT_SHIFT) +#define NVT_SET_CEA_FORMAT(n,index) {(n)&=~NVT_STATUS_TIMING_CEA_FORMAT_MASK;(n)|=(index)<>NVT_STATUS_TIMING_CEA_DMT_SHIFT) +#define NVT_SET_CEA_DMT_DUAL_STANDARD_FLAG(n) ((n)|=NVT_STATUS_TIMING_CEA_DMT_MASK) +// +// +// 3. the mismatch status +#define NVT_STATUS_TIMING_MISMATCH_MASK 0x001F0000 +#define NVT_STATUS_TIMING_MISMATCH_SHIFT 16 +#define NVT_STATUS_TIMING_MISMATCH_SIZE 0x1 //visible width and height don't match with the asked width/height +#define NVT_STATUS_TIMING_MISMATCH_RR 0x2 //the refresh rate doesn't match with the requested +#define NVT_STATUS_TIMING_MISMATCH_FORMAT 0x4 //other timing info doesn't match (i.e. progressive/interlaced, double, reduced-blanking etc...) +#define NVT_STATUS_TIMING_MISMATCH_ALIGNMENT 0x8 //the asking alignment doesn't match the spec +// +// macroes to set/get the timing mismatch status +#define NVT_SET_TIMING_STATUS_MISMATCH(m,n) ((m)|=(((n)<>NVT_STATUS_TIMING_MISMATCH_SHIFT) +// +// +// 4. the timing type +// +#define NVT_STATUS_TIMING_TYPE_MASK 0x0000FF00 +#define NVT_STATUS_TIMING_TYPE_SHIFT 8 +// +typedef enum NVT_TIMING_TYPE +{ + NVT_TYPE_DMT = 1, // DMT + NVT_TYPE_GTF, // GTF + NVT_TYPE_ASPR, // wide aspect ratio timing, for legacy support only + NVT_TYPE_NTSC_TV, // NTSC TV timing. for legacy support only + NVT_TYPE_PAL_TV, // PAL TV timing, legacy support only + NVT_TYPE_CVT, // CVT timing + NVT_TYPE_CVT_RB, // CVT timing with reduced blanking + NVT_TYPE_CUST, // Customized timing + NVT_TYPE_EDID_DTD, // EDID detailed timing + NVT_TYPE_EDID_STD, // EDID standard timing + NVT_TYPE_EDID_EST, // EDID established timing + NVT_TYPE_EDID_CVT, // EDID defined CVT timing (EDID 1.4) + NVT_TYPE_EDID_861ST, // EDID defined CEA/EIA 861 timing (in the EDID 861 extension) + NVT_TYPE_NV_PREDEFINED, // NV pre-defined timings (PsF timings) + NVT_TYPE_DMT_RB, // DMT timing with reduced blanking + NVT_TYPE_EDID_EXT_DTD, // EDID detailed timing in the extension + NVT_TYPE_SDTV, // SDTV timing (including NTSC, PAL etc) + NVT_TYPE_HDTV, // HDTV timing (480p,480i,720p, 1080i etc) + NVT_TYPE_SMPTE, // deprecated ? still used by drivers\unix\nvkms\src\nvkms-dpy.c + NVT_TYPE_EDID_VTB_EXT, // EDID defined VTB extension timing + NVT_TYPE_EDID_VTB_EXT_STD, // EDID defined VTB extension standard timing + NVT_TYPE_EDID_VTB_EXT_DTD, // EDID defined VTB extension detailed timing + NVT_TYPE_EDID_VTB_EXT_CVT, // EDID defined VTB extension cvt timing + NVT_TYPE_HDMI_STEREO, // EDID defined HDMI stereo timing + NVT_TYPE_DISPLAYID_1, // DisplayID Type 1 timing + NVT_TYPE_DISPLAYID_2, // DisplayID Type 2 timing + NVT_TYPE_HDMI_EXT, // EDID defined HDMI extended resolution timing (UHDTV - 4k, 8k etc.) + NVT_TYPE_CUST_AUTO, // Customized timing generated automatically by NVCPL + NVT_TYPE_CUST_MANUAL, // Customized timing entered manually by user + NVT_TYPE_CVT_RB_2, // CVT timing with reduced blanking V2 + NVT_TYPE_DMT_RB_2, // DMT timing with reduced blanking V2 + NVT_TYPE_DISPLAYID_7, // DisplayID 2.0 detailed timing - Type VII + NVT_TYPE_DISPLAYID_8, // DisplayID 2.0 enumerated timing - Type VIII + NVT_TYPE_DISPLAYID_9, // DisplayID 2.0 formula-based timing - Type IX + NVT_TYPE_DISPLAYID_10, // DisplayID 2.0 formula-based timing - Type X + NVT_TYPE_CVT_RB_3, // CVT timing with reduced blanking V3 +}NVT_TIMING_TYPE; +// +// 5. the timing sequence number like the TV format and EIA861B predefined timing format +// **the numbers are chosen to match with the NV h/w format** +// +#define NVT_STATUS_TIMING_SEQ_MASK 0x000000FF +// +typedef enum NVT_TV_FORMAT +{ + NVT_NTSC = 0, + NVT_NTSC_M = 0, + NVT_NTSC_J = 1, + NVT_PAL = 2, + NVT_PAL_M = 2, + NVT_PAL_A = 3, + NVT_PAL_N = 4, + NVT_PAL_NC = 5, + NVT_HD576I = 8, + NVT_HD480I, + NVT_HD480P, + NVT_HD576P, + NVT_HD720P, + NVT_HD1080I, + NVT_HD1080P, + NVT_HD720P50, + NVT_HD1080P24, + NVT_HD1080I50, + NVT_HD1080P50, + NVT_MAX_TV_FORMAT, + NVT_AUTO_SDTV_FORMAT = (NvU32)(-2), // Not supported in NvTiming_GetTvTiming + NVT_AUTO_HDTV_FORMAT = (NvU32)(-1), +}NVT_TV_FORMAT; + +#define NVT_DEFAULT_HDTV_FMT NVT_HD1080I +// +// macros to set/get the timing type and seq number +// +#define NVT_DEF_TIMING_STATUS(type, seq) ((((type)<>NVT_STATUS_TIMING_TYPE_SHIFT +#define NVT_GET_TIMING_STATUS_SEQ(n) ((n)&NVT_STATUS_TIMING_SEQ_MASK) +// +// +// +// the timing type definitions +#define NVT_STATUS_DMT NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT, 0) // DMT +#define NVT_STATUS_GTF NVT_DEF_TIMING_STATUS(NVT_TYPE_GTF, 0) // GTF +#define NVT_STATUS_ASPR NVT_DEF_TIMING_STATUS(NVT_TYPE_ASPR, 0) // ASPR +#define NVT_STATUS_NTSC_TV NVT_DEF_TIMING_STATUS(NVT_TYPE_NTSC_TV, 0) // TVN +#define NVT_STATUS_PAL_TV NVT_DEF_TIMING_STATUS(NVT_TYPE_PAL_TV, 0) // TVP +#define NVT_STATUS_CVT NVT_DEF_TIMING_STATUS(NVT_TYPE_CVT, 0) // CVT timing with regular blanking +#define NVT_STATUS_CVT_RB NVT_DEF_TIMING_STATUS(NVT_TYPE_CVT_RB, 0) // CVT_RB timing V1 +#define NVT_STATUS_CVT_RB_2 NVT_DEF_TIMING_STATUS(NVT_TYPE_CVT_RB_2, 0) // CVT_RB timing V2 +#define NVT_STATUS_CVT_RB_3 NVT_DEF_TIMING_STATUS(NVT_TYPE_CVT_RB_3, 0) // CVT_RB timing V3 +#define NVT_STATUS_CUST NVT_DEF_TIMING_STATUS(NVT_TYPE_CUST, 0) // Customized timing +#define NVT_STATUS_EDID_DTD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_DTD, 0) +#define NVT_STATUS_EDID_STD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_STD, 0) +#define NVT_STATUS_EDID_EST NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_EST, 0) +#define NVT_STATUS_EDID_CVT NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_CVT, 0) +#define NVT_STATUS_EDID_861ST NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_861ST, 0) +#define NVT_STATUS_DMT_RB NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT_RB, 0) +#define NVT_STATUS_EDID_EXT_DTD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_EXT_DTD, 0) +#define NVT_STATUS_SDTV_NTSC NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_NTSC) +#define NVT_STATUS_SDTV_NTSC_M NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_NTSC) +#define NVT_STATUS_SDTV_NTSC_J NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_NTSC_J) +#define NVT_STATUS_SDTV_PAL NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL) +#define NVT_STATUS_SDTV_PAL_M NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL) +#define NVT_STATUS_SDTV_PAL_A NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL_A) +#define NVT_STATUS_SDTV_PAL_N NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL_N) +#define NVT_STATUS_SDTV_PAL_NC NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL_NC) +#define NVT_STATUS_HDTV_480I NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD480I) +#define NVT_STATUS_HDTV_480P NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD480P) +#define NVT_STATUS_HDTV_576I NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD576I) +#define NVT_STATUS_HDTV_576P NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD576P) +#define NVT_STATUS_HDTV_720P NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD720P) +#define NVT_STATUS_HDTV_1080I NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080I) +#define NVT_STATUS_HDTV_1080P NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080P) +#define NVT_STATUS_HDTV_720P50 NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD720P50) +#define NVT_STATUS_HDTV_1080P24 NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080P24) +#define NVT_STATUS_HDTV_1080I50 NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080I50) +#define NVT_STATUS_HDTV_1080P50 NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080P50) +#define NVT_STATUS_EDID_VTB_EXT NVT_DEF_TIMING_STATUS(NVT_TYPE_VTB_EXT, 0) +#define NVT_STATUS_EDID_VTB_EXT_DTD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_DTD, 0) +#define NVT_STATUS_EDID_VTB_EXT_CVT NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_CVT, 0) +#define NVT_STATUS_EDID_VTB_EXT_STD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_STD, 0) +#define NVT_STATUS_HDMI_STEREO NVT_DEF_TIMING_STATUS(NVT_TYPE_HDMI_STEREO, 0) +#define NVT_STATUS_DISPLAYID_1 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_1, 0) +#define NVT_STATUS_DISPLAYID_2 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_2, 0) +#define NVT_STATUS_DISPLAYID_7 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_7, 0) +#define NVT_STATUS_DISPLAYID_8 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_8, 0) +#define NVT_STATUS_DISPLAYID_9 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_9, 0) +#define NVT_STATUS_DISPLAYID_10 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_10, 0) +#define NVT_STATUS_HDMI_EXT NVT_DEF_TIMING_STATUS(NVT_TYPE_HDMI_EXT, 0) +#define NVT_STATUS_CUST_AUTO NVT_DEF_TIMING_STATUS(NVT_TYPE_CUST_AUTO, 0) +#define NVT_STATUS_CUST_MANUAL NVT_DEF_TIMING_STATUS(NVT_TYPE_CUST_MANUAL, 0) + +// +// adding the timing sequence (from the EDID) to the modeset status +#define NVT_STATUS_DTD1 NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_DTD, 1) +#define NVT_STATUS_EDID_DTDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_DTD, n) +#define NVT_STATUS_EDID_STDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_STD, n) +#define NVT_STATUS_EDID_ESTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_EST, n) +#define NVT_STATUS_EDID_CVTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_CVT, n) +#define NVT_STATUS_EDID_861STn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_861ST, n) +#define NVT_STATUS_EDID_EXT_DTDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_EXT_DTD, n) +#define NVT_STATUS_CUSTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_CUST, n) +#define NVT_TYPE_NV_PREDEFINEDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_NV_PREDEFINED, n) +#define NVT_STATUS_EDID_VTB_EXTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT, n) +#define NVT_STATUS_EDID_VTB_EXT_DTDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_DTD, n) +#define NVT_STATUS_EDID_VTB_EXT_STDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_STD, n) +#define NVT_STATUS_EDID_VTB_EXT_CVTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_CVT, n) +#define NVT_STATUS_HDMI_STEREO_REQn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_HDMI_STEREO_REQ, n) +#define NVT_STATUS_DISPLAYID_1N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_1, n) +#define NVT_STATUS_DISPLAYID_2N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_2, n) +#define NVT_STATUS_DISPLAYID_7N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_7, n) +#define NVT_STATUS_DISPLAYID_8N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_8, n) +#define NVT_STATUS_DISPLAYID_9N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_9, n) +#define NVT_STATUS_DISPLAYID_10N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_10, n) +#define NVT_STATUS_HDMI_EXTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_HDMI_EXT, n) + + +//******************************** +// CEA/EIA 861 related EDID info +//******************************** +#define NVT_CEA861_REV_NONE 0 +#define NVT_CEA861_REV_ORIGINAL 1 +#define NVT_CEA861_REV_A 2 +#define NVT_CEA861_REV_B 3 +#define NVT_CEA861_REV_C 3 +#define NVT_CEA861_REV_D 3 +#define NVT_CEA861_REV_E 3 +#define NVT_CEA861_REV_F 3 +#define NVT_CTA861_REV_G 3 +// +// max data after misc/basic_caps in EIA861EXTENSION +#define NVT_CEA861_MAX_PAYLOAD 123 +// +// the basic info encoded in byte[3] +#define NVT_CEA861_CAP_UNDERSCAN 0x80 // DTV monitor supports underscan +#define NVT_CEA861_CAP_BASIC_AUDIO 0x40 // DTV monitor supports basic audio +#define NVT_CEA861_CAP_YCbCr_444 0x20 // DTV monitor supports YCbCr4:4:4 +#define NVT_CEA861_CAP_YCbCr_422 0x10 // DTV monitor supports YCbCr4:2:2 +// +#define NVT_CEA861_TOTAL_LT_MASK 0x0F //the max number of 18-byte detailed timing descriptor +// +// +#define NVT_CEA861_SHORT_DESCRIPTOR_SIZE_MASK 0x1F +#define NVT_CEA861_SHORT_DESCRIPTOR_TAG_MASK 0xE0 +#define NVT_CEA861_SHORT_DESCRIPTOR_TAG_SHIFT 5 +// +// the descriptor type tags +#define NVT_CEA861_TAG_RSVD 0 // reserved block +#define NVT_CEA861_TAG_NONE 0 // reserved block +#define NVT_CEA861_TAG_AUDIO 1 // Audio Data Block +#define NVT_CEA861_TAG_VIDEO 2 // Video Data Block +#define NVT_CEA861_TAG_VENDOR 3 // Vendor Specific Data Block +#define NVT_CEA861_TAG_SPEAKER_ALLOC 4 // Speaker Allocation Data Block +#define NVT_CEA861_TAG_VESA_DTC 5 // VESA DTC data block +#define NVT_CEA861_TAG_RSVD1 6 // reserved block +#define NVT_CEA861_TAG_EXTENDED_FLAG 7 // use Extended Tag +// +// the extended tag codes when NVT_CEA861_TAG_EXTENDED_FLAG +#define NVT_CEA861_EXT_TAG_VIDEO_CAP 0 // Video Capability Data Block +#define NVT_CEA861_EXT_TAG_VENDOR_SPECIFIC_VIDEO 1 // Vendor-Specific Video Data Block +#define NVT_CEA861_EXT_TAG_VESA_VIDEO_DISPLAY_DEVICE 2 // Reserved for VESA Video Display Device Information Data Block +#define NVT_CEA861_EXT_TAG_VESA_VIDEO 3 // Reserved for VESA Video Data BLock +#define NVT_CEA861_EXT_TAG_HDMI_VIDEO 4 // Reserved for HDMI Video Data Block +#define NVT_CEA861_EXT_TAG_COLORIMETRY 5 // Colorimetry Data Block +#define NVT_CEA861_EXT_TAG_HDR_STATIC_METADATA 6 // HDR Static Metadata Data Block CEA861.3 HDR extension for HDMI 2.0a +#define NVT_CEA861_EXT_TAG_VIDEO_RSVD_MIN 7 // 7...12 : Reserved for video-related blocks +#define NVT_CEA861_EXT_TAG_VIDEO_RSVD_MAX 12 +#define NVT_CEA861_EXT_TAG_VIDEO_FORMAT_PREFERENCE 13 // CEA861F Video Format Preference Data Block +#define NVT_CEA861_EXT_TAG_YCBCR420_VIDEO 14 // CEA861F YCBCR 4:2:0 Video Data Block +#define NVT_CEA861_EXT_TAG_YCBCR420_CAP 15 // CEA861F YCBCR 4:2:0 Capability Map Data Block +#define NVT_CEA861_EXT_TAG_MISC_AUDIO 16 // CEA Miscellaneous Audio Fields +#define NVT_CEA861_EXT_TAG_VENDOR_SPECIFIC_AUDIO 17 // Vendor-Specific Audio Data Block +#define NVT_CEA861_EXT_TAG_HDMI_AUDIO 18 // Reserved for HDMI Audio Data Block +#define NVT_CEA861_EXT_TAG_AUDIO_RSVD_MIN 19 // 19...31 : Reserved for audio-related blocks +#define NVT_CEA861_EXT_TAG_AUDIO_RSVD_MAX 31 +#define NVT_CEA861_EXT_TAG_INFOFRAME 32 // Infoframe Data Block +#define NVT_CEA861_EXT_TAG_RSVD_MIN_1 33 // 33...120 : Reserved for general +#define NVT_CEA861_EXT_TAG_RSVD_MAX_1 119 +#define NVT_CEA861_EXT_TAG_HF_EEODB 120 // HDMI Forum Edid Extension Override Data Block +#define NVT_CTA861_EXT_TAG_SCDB 121 // 0x79 == Tag for Sink Capability Data Block +#define NVT_CEA861_EXT_TAG_RSVD_MIN_2 122 // 122...255 : Reserved for general +#define NVT_CEA861_EXT_TAG_RSVD_MAX_2 255 +// +//the extended tag payload size; the size includes the extended tag code +#define NVT_CEA861_EXT_VIDEO_CAP_SD_SIZE 2 +#define NVT_CEA861_EXT_COLORIMETRY_SD_SIZE 3 +#define NVT_CTA861_EXT_HDR_STATIC_METADATA_SIZE 6 +#define NVT_CTA861_EXT_SCDB_PAYLOAD_MAX_LENGTH NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH +// +// +#define NVT_CEA861_GET_SHORT_DESCRIPTOR_TAG(a) (((a)&NVT_CEA861_SHORT_DESCRIPTOR_TAG_MASK)>>NVT_CEA861_SHORT_DESCRIPTOR_TAG_SHIFT) +#define NVT_CEA861_GET_SHORT_DESCRIPTOR_SIZE(a) ((NvU32)((a)&NVT_CEA861_SHORT_DESCRIPTOR_SIZE_MASK)) + + +//******************************** +// VTB Extension related info +//******************************** + +#define NVT_VTB_REV_NONE 0 +#define NVT_VTB_REV_A 1 + +#define NVT_VTB_MAX_PAYLOAD 122 + +//************************* +// short descriptor +//************************* +#define NVT_CEA861_SD_HEADER_SIZE 1 +#define NVT_CEA861_SD_PAYLOAD_SIZE 31 +#define NVT_CEA861_SD_TOTAL_SIZE (NVT_CEA861_SD_HEADER_SIZE + NVT_CEA861_SD_PAYLOAD_SIZE) + +//************************* +// short video descriptor +//************************* +#define NVT_CEA861_VIDEO_SD_SIZE 1 +// the max total short video descriptors possible; See CEA-861-E, section 7.5, +// "It is also possible to have more than one of a specific type of data block if necessary +// to include all of the descriptors needed to describe the sinks capabilities." +#define NVT_CEA861_VIDEO_MAX_DESCRIPTOR ((NVT_CEA861_MAX_PAYLOAD / NVT_CEA861_SD_TOTAL_SIZE) * (NVT_CEA861_SD_PAYLOAD_SIZE / NVT_CEA861_VIDEO_SD_SIZE) + \ + (NVT_CEA861_MAX_PAYLOAD % NVT_CEA861_SD_TOTAL_SIZE - NVT_CEA861_SD_HEADER_SIZE) / NVT_CEA861_VIDEO_SD_SIZE) +#define NVT_CTA861_VIDEO_VIC_MASK 0xFF //the VIC mask of the short video descriptor +#define NVT_CTA861_7BIT_VIDEO_VIC_MASK 0x7F //the 7 bits VIC mask of the short video descriptor +#define NVT_CTA861_VIDEO_NATIVE_MASK 0x80 //the Native mask of the short video descriptor +#define NVT_HDMI_YUV_420_PCLK_SUPPORTED_MIN 59000 //the vale shall equal or larger than 590MHz to support YCbCr in HDMI2.1 + +// CTA-861G supports more SVDs which is over 0x7F index +// All value below 192 will be treated as 7 bit VIC. Value 128~192 shall be forbidden. +#define NVT_GET_CTA_8BIT_VIC(vic) (((vic) <= NVT_CTA861_7BIT_8BIT_SEPARATE_VALUE) ? ((vic) & NVT_CTA861_7BIT_VIDEO_VIC_MASK) : ((vic) & NVT_CTA861_VIDEO_VIC_MASK)) +// + +// According to CEA-861-E Spec. +// Note 3. A video timing with a vertical frequency that is an integer multiple +// of 6.00 Hz (i.e. 24.00, 30.00, 60.00, 120.00 or 240.00 Hz) is considered to +// be the same as a video timing with the equivalent detailed timing +// information but where the vertical frequency is adjusted by a factor of +// 1000/1001 (i.e., 24/1.001, 30/1.001, 60/1.001, 120/1.001 or 240/1.001). +// Excluding ceaIndex 1 640x480 which is a PC Mode. +#define NVT_CEA861_TIMING_FRR(_VID_, _RR_) ((_VID_) > 1 && ((_RR_) % 6) == 0) +#define NVT_CEA861_640X480P_59940HZ_4X3 1 // Video Identification Code: format 1 +#define NVT_CEA861_720X480P_59940HZ_4X3 2 // Video Identification Code: format 2 +#define NVT_CEA861_720X480P_59940HZ_16X9 3 // Video Identification Code: format 3 +#define NVT_CEA861_1280X720P_59940HZ_16X9 4 // ... +#define NVT_CEA861_1920X1080I_59940HZ_16X9 5 // ... +#define NVT_CEA861_1440X480I_59940HZ_4X3 6 // ... +#define NVT_CEA861_1440X480I_59940HZ_16X9 7 // ... +#define NVT_CEA861_1440X240P_59940HZ_4X3 8 // ... +#define NVT_CEA861_1440X240P_59940HZ_16X9 9 // ... +#define NVT_CEA861_2880X480I_59940HZ_4X3 10 // ... +#define NVT_CEA861_2880X480I_59940HZ_16X9 11 // ... +#define NVT_CEA861_2880X240P_59940HZ_4X3 12 // ... +#define NVT_CEA861_2880X240P_59940HZ_16X9 13 // ... +#define NVT_CEA861_1440X480P_59940HZ_4X3 14 // ... +#define NVT_CEA861_1440X480P_59940HZ_16X9 15 // ... +#define NVT_CEA861_1920X1080P_59940HZ_16X9 16 // ... +#define NVT_CEA861_720X576P_50000HZ_4X3 17 // ... +#define NVT_CEA861_720X576P_50000HZ_16X9 18 // ... +#define NVT_CEA861_1280X720P_50000HZ_16X9 19 // ... +#define NVT_CEA861_1920X1080I_50000HZ_16X9 20 // ... +#define NVT_CEA861_1440X576I_50000HZ_4X3 21 // ... +#define NVT_CEA861_1440X576I_50000HZ_16X9 22 // ... +#define NVT_CEA861_1440X288P_50000HZ_4X3 23 // ... +#define NVT_CEA861_1440X288P_50000HZ_16X9 24 // ... +#define NVT_CEA861_2880X576I_50000HZ_4X3 25 // ... +#define NVT_CEA861_2880X576I_50000HZ_16X9 26 // ... +#define NVT_CEA861_2880X288P_50000HZ_4X3 27 // ... +#define NVT_CEA861_2880X288P_50000HZ_16X9 28 // ... +#define NVT_CEA861_1440X576P_50000HZ_4X3 29 // ... +#define NVT_CEA861_1440X576P_50000HZ_16X9 30 // ... +#define NVT_CEA861_1920X1080P_50000HZ_16X9 31 // ... +#define NVT_CEA861_1920X1080P_23976HZ_16X9 32 // ... +#define NVT_CEA861_1920X1080P_25000HZ_16X9 33 // ... +#define NVT_CEA861_1920X1080P_29970HZ_16X9 34 // ... +#define NVT_CEA861_2880X480P_59940HZ_4X3 35 // ... +#define NVT_CEA861_2880X480P_59940HZ_16X9 36 // ... +#define NVT_CEA861_2880X576P_50000HZ_4X3 37 // ... +#define NVT_CEA861_2880X576P_50000HZ_16X9 38 // ... +#define NVT_CEA861_1920X1250I_50000HZ_16X9 39 // ... +#define NVT_CEA861_1920X1080I_100000HZ_16X9 40 // ... +#define NVT_CEA861_1280X720P_100000HZ_16X9 41 // ... +#define NVT_CEA861_720X576P_100000HZ_4X3 42 // ... +#define NVT_CEA861_720X576P_100000HZ_16X9 43 // ... +#define NVT_CEA861_1440X576I_100000HZ_4X3 44 // ... +#define NVT_CEA861_1440X576I_100000HZ_16X9 45 // ... +#define NVT_CEA861_1920X1080I_119880HZ_16X9 46 // ... +#define NVT_CEA861_1280X720P_119880HZ_16X9 47 // ... +#define NVT_CEA861_720X480P_119880HZ_4X3 48 // ... +#define NVT_CEA861_720X480P_119880HZ_16X9 49 // ... +#define NVT_CEA861_1440X480I_119880HZ_4X3 50 // ... +#define NVT_CEA861_1440X480I_119880HZ_16X9 51 // ... +#define NVT_CEA861_720X576P_200000HZ_4X3 52 // ... +#define NVT_CEA861_720X576P_200000HZ_16X9 53 // ... +#define NVT_CEA861_1440X576I_200000HZ_4X3 54 // ... +#define NVT_CEA861_1440X576I_200000HZ_16X9 55 // ... +#define NVT_CEA861_720X480P_239760HZ_4X3 56 // ... +#define NVT_CEA861_720X480P_239760HZ_16X9 57 // ... +#define NVT_CEA861_1440X480I_239760HZ_4X3 58 // Video Identification Code: format 58 +#define NVT_CEA861_1440X480I_239760HZ_16X9 59 // Video Identification Code: format 59 +#define NVT_CEA861_1280X720P_23976HZ_16X9 60 // ... +#define NVT_CEA861_1280X720P_25000HZ_16X9 61 // ... +#define NVT_CEA861_1280X720P_29970HZ_16X9 62 // ... +#define NVT_CEA861_1920X1080P_119880HZ_16X9 63 // ... +#define NVT_CEA861_1920X1080P_100000HZ_16X9 64 // ... + +// Following modes are from CEA-861F +#define NVT_CEA861_1280X720P_23980HZ_64X27 65 // Video Identification Code: format 65 +#define NVT_CEA861_1280X720P_25000HZ_64X27 66 // Video Identification Code: format 66 +#define NVT_CEA861_1280X720P_29970HZ_64X27 67 // Video Identification Code: format 67 +#define NVT_CEA861_1280X720P_50000HZ_64X27 68 +#define NVT_CEA861_1280X720P_59940HZ_64X27 69 +#define NVT_CEA861_1280X720P_100000HZ_64X27 70 +#define NVT_CEA861_1280X720P_119880HZ_64X27 71 +#define NVT_CEA861_1920X1080P_23980HZ_64X27 72 +#define NVT_CEA861_1920X1080P_25000HZ_64X27 73 +#define NVT_CEA861_1920X1080P_29970HZ_64X27 74 +#define NVT_CEA861_1920X1080P_50000HZ_64X27 75 +#define NVT_CEA861_1920X1080P_59940HZ_64X27 76 +#define NVT_CEA861_1920X1080P_100000HZ_64X27 77 +#define NVT_CEA861_1920X1080P_119880HZ_64X27 78 +#define NVT_CEA861_1680X720P_23980HZ_64X27 79 +#define NVT_CEA861_1680X720P_25000HZ_64X27 80 +#define NVT_CEA861_1680X720P_29970HZ_64X27 81 +#define NVT_CEA861_1680X720P_50000HZ_64X27 82 +#define NVT_CEA861_1680X720P_59940HZ_64X27 83 +#define NVT_CEA861_1680X720P_100000HZ_64X27 84 +#define NVT_CEA861_1680X720P_119880HZ_64X27 85 +#define NVT_CEA861_2560X1080P_23980HZ_64X27 86 +#define NVT_CEA861_2560X1080P_25000HZ_64X27 87 +#define NVT_CEA861_2560X1080P_29970HZ_64X27 88 +#define NVT_CEA861_2560X1080P_50000HZ_64X27 89 +#define NVT_CEA861_2560X1080P_59940HZ_64X27 90 +#define NVT_CEA861_2560X1080P_100000HZ_64X27 91 +#define NVT_CEA861_2560X1080P_119880HZ_64X27 92 +#define NVT_CEA861_3840X2160P_23980HZ_16X9 93 +#define NVT_CEA861_3840X2160P_25000HZ_16X9 94 +#define NVT_CEA861_3840X2160P_29970HZ_16X9 95 +#define NVT_CEA861_3840X2160P_50000HZ_16X9 96 +#define NVT_CEA861_3840X2160P_59940HZ_16X9 97 +#define NVT_CEA861_4096X2160P_23980HZ_256X135 98 +#define NVT_CEA861_4096X2160P_25000HZ_256X135 99 +#define NVT_CEA861_4096X2160P_29970HZ_256X135 100 +#define NVT_CEA861_4096X2160P_50000HZ_256X135 101 +#define NVT_CEA861_4096X2160P_59940HZ_256X135 102 +#define NVT_CEA861_4096X2160P_23980HZ_64X27 103 +#define NVT_CEA861_4096X2160P_25000HZ_64X27 104 +#define NVT_CEA861_4096X2160P_29970HZ_64X27 105 +#define NVT_CEA861_4096X2160P_50000HZ_64X27 106 +#define NVT_CEA861_4096X2160P_59940HZ_64X27 107 + +// Following modes are from CTA-861G +#define NVT_CTA861_1280X720P_47950HZ_16X9 108 +#define NVT_CTA861_1280X720P_47950HZ_64x27 109 +#define NVT_CTA861_1680X720P_47950HZ_64x27 110 +#define NVT_CTA861_1920X1080P_47950HZ_16X9 111 +#define NVT_CTA861_1920X1080P_47950HZ_64x27 112 +#define NVT_CTA861_2560X1080P_47950HZ_64x27 113 +#define NVT_CTA861_3840X2160P_47950HZ_16X9 114 +#define NVT_CTA861_4096x2160p_47950HZ_256X135 115 +#define NVT_CTA861_3840x2160p_47950HZ_64x276 116 +#define NVT_CTA861_3840x2160p_100000HZ_16X9 117 +#define NVT_CTA861_3840x2160p_119880HZ_16X9 118 +#define NVT_CTA861_3840x2160p_100000HZ_64X276 119 +#define NVT_CTA861_3840x2160p_119880HZ_64X276 120 +#define NVT_CTA861_5120x2160p_23980HZ_64X276 121 +#define NVT_CTA861_5120x2160p_25000HZ_64X276 122 +#define NVT_CTA861_5120x2160p_29970HZ_64X276 123 +#define NVT_CTA861_5120x2160p_47950Hz_64X276 124 +#define NVT_CTA861_5120x2160p_50000HZ_64X276 125 +#define NVT_CTA861_5120x2160p_59940HZ_64X276 126 +#define NVT_CTA861_5120x2160p_100000HZ_64X276 127 + +#define NVT_CTA861_7BIT_8BIT_SEPARATE_VALUE 192 + +#define NVT_CTA861_5120x2160p_119880HZ_64X276 193 +#define NVT_CTA861_7680x4320p_23980HZ_16X9 194 +#define NVT_CTA861_7680x4320p_25000HZ_16X9 195 +#define NVT_CTA861_7680x4320p_29970HZ_16X9 196 +#define NVT_CTA861_7680x4320p_47950HZ_16X9 197 +#define NVT_CTA861_7680x4320p_50000HZ_16X9 198 +#define NVT_CTA861_7680x4320p_59940HZ_16X9 199 +#define NVT_CTA861_7680x4320p_100000HZ_16X9 200 +#define NVT_CTA861_7680x4320p_119880HZ_16X9 201 +#define NVT_CTA861_7680x4320p_23980HZ_64X276 202 +#define NVT_CTA861_7680x4320p_25000HZ_64X276 203 +#define NVT_CTA861_7680x4320p_29970HZ_64X276 204 +#define NVT_CTA861_7680x4320p_47950HZ_64X276 205 +#define NVT_CTA861_7680x4320p_50000HZ_64X276 206 +#define NVT_CTA861_7680x4320p_59940HZ_64X276 207 +#define NVT_CTA861_7680x4320p_100000HZ_64X276 208 +#define NVT_CTA861_7680x4320p_119880HZ_64X276 209 +#define NVT_CTA861_10240x4320p_23980HZ_64X276 210 +#define NVT_CTA861_10240x4320p_25000HZ_64X276 211 +#define NVT_CTA861_10240x4320p_29970HZ_64X276 212 +#define NVT_CTA861_10240x4320p_47950HZ_64X276 213 +#define NVT_CTA861_10240x4320p_50000HZ_64X276 214 +#define NVT_CTA861_10240x4320p_59940HZ_64X276 215 +#define NVT_CTA861_10240x4320p_100000HZ_64X276 216 +#define NVT_CTA861_10240x4320p_119880HZ_64X276 217 +#define NVT_CTA861_4096x2160p_100000HZ_256X135 218 +#define NVT_CTA861_4096x2160p_119880HZ_256X135 219 + +// When defining new CEA861 format: +// Search code base to update array of certain category of CEA formats, such as 720p, 1080i, etc... +// Ideally, it's better to define these groups in one module. However, they should not reside +// in this .h file, thus updating these groups in other file is still needed. +// example of the group: 720p: NVT_CEA861_1280X720P_59940HZ_16X9, +// NVT_CEA861_1280X720P_100000HZ_16X9, +// NVT_CEA861_1280X720P_119880HZ_16X9 + +//************************* +// short audio descriptor +//************************* +#define NVT_CEA861_AUDIO_SD_SIZE sizeof(NVT_3BYTES) +// the max total short audio descriptors possible; See CEA-861-E, section 7.5 on repeated types +#define NVT_CEA861_AUDIO_MAX_DESCRIPTOR ((NVT_CEA861_MAX_PAYLOAD / NVT_CEA861_SD_TOTAL_SIZE) * (NVT_CEA861_SD_PAYLOAD_SIZE / NVT_CEA861_AUDIO_SD_SIZE) + \ + (NVT_CEA861_MAX_PAYLOAD % NVT_CEA861_SD_TOTAL_SIZE - NVT_CEA861_SD_HEADER_SIZE) / NVT_CEA861_AUDIO_SD_SIZE) +// +// short audio descriptor - byte 1 +#define NVT_CEA861_AUDIO_FORMAT_MASK 0x78 //the audio format mask of the CEA short +#define NVT_CEA861_AUDIO_FORMAT_SHIFT 3 //the audio format data shift +// +#define NVT_CEA861_AUDIO_FORMAT_RSVD 0 // short audio descriptor format - reserved +#define NVT_CEA861_AUDIO_FORMAT_LINEAR_PCM 1 // short audio descriptor format - Linear PCM (uncompressed) +#define NVT_CEA861_AUDIO_FORMAT_AC3 2 // short audio descriptor format - AC3 +#define NVT_CEA861_AUDIO_FORMAT_MPEG1 3 // short audio descriptor format - MPEG1(layer 1&2) +#define NVT_CEA861_AUDIO_FORMAT_MP3 4 // short audio descriptor format - MP3(MPEG1 layer 3) +#define NVT_CEA861_AUDIO_FORMAT_MPEG2 5 // short audio descriptor format - MPEG2 (multichannel) +#define NVT_CEA861_AUDIO_FORMAT_AAC 6 // short audio descriptor format - AAC +#define NVT_CEA861_AUDIO_FORMAT_DTS 7 // short audio descriptor format - DTS +#define NVT_CEA861_AUDIO_FORMAT_ATRAC 8 // short audio descriptor format - ATRAC +#define NVT_CEA861_AUDIO_FORMAT_ONE_BIT 9 // short audio descriptor format - one bit audio +#define NVT_CEA861_AUDIO_FORMAT_DDP 10 // short audio descriptor format - dolby digital + +#define NVT_CEA861_AUDIO_FORMAT_DTS_HD 11 // short audio descriptor format - DTS_HD +#define NVT_CEA861_AUDIO_FORMAT_MAT 12 // short audio descriptor format - MAT(MLP) +#define NVT_CEA861_AUDIO_FORMAT_DST 13 // short audio descriptor format - DST +#define NVT_CEA861_AUDIO_FORMAT_WMA_PRO 14 // short audio descriptor format - WMA Pro +#define NVT_CEA861_AUDIO_FORMAT_RSVD15 15 // short audio descriptor format - reserved +// +#define NVT_CEA861_AUDIO_MAX_CHANNEL_MASK 7 // short audio descriptor format - Max Number of channels - 1 +#define NVT_CEA861_AUDIO_MAX_CHANNEL_SHIFT 0 // short audio descriptor format shift +// +// short audio descriptor - byte 2 +#define NVT_CEA861_AUDIO_SAMPLE_RATE_MASK 0x7F //the sample rate mask +#define NVT_CEA861_AUDIO_SAMPLE_RATE_SHIFT 0 //the sample rate shift +// +#define NVT_CEA861_AUDIO_SAMPLE_RATE_32KHZ 0x01 // short audio descriptor - sample rate : 32KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_44KHZ 0x02 // short audio descriptor - sample rate : 44KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_48KHZ 0x04 // short audio descriptor - sample rate : 48KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_88KHZ 0x08 // short audio descriptor - sample rate : 88KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_96KHZ 0x10 // short audio descriptor - sample rate : 96KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_176KHZ 0x20 // short audio descriptor - sample rate : 176KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_192KHZ 0x40 // short audio descriptor - sample rate : 192KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_RSVD 0x80 // short audio descriptor - sample rate : reserved +// +// short audio descriptor - byte 3 +#define NVT_CEA861_AUDIO_SAMPLE_DEPTH_MASK 0x07 // the uncompressed audio resolution mask +#define NVT_CEA861_AUDIO_SAMPLE_DEPTH_SHIFT 0 // the uncompressed audio resolution shift +// +#define NVT_CEA861_AUDIO_SAMPLE_SIZE_16BIT 0x01 // uncompressed (Linear PCM) audio A/D resolution - 16bit +#define NVT_CEA861_AUDIO_SAMPLE_SIZE_20BIT 0x02 // uncompressed (Linear PCM) audio A/D resolution - 20bit +#define NVT_CEA861_AUDIO_SAMPLE_SIZE_24BIT 0x04 // uncompressed (Linear PCM) audio A/D resolution - 24bit + +//************************** +// speaker allocation data +//************************** +#define NVT_CEA861_SPEAKER_SD_SIZE sizeof(NVT_3BYTES) +// the max total short speaker descriptors possible; See CEA-861-E, section 7.5 on repeated types +#define NVT_CEA861_SPEAKER_MAX_DESCRIPTOR ((NVT_CEA861_MAX_PAYLOAD / NVT_CEA861_SD_TOTAL_SIZE) * (NVT_CEA861_SD_PAYLOAD_SIZE / NVT_CEA861_SPEAKER_SD_SIZE) + \ + (NVT_CEA861_MAX_PAYLOAD % NVT_CEA861_SD_TOTAL_SIZE - NVT_CEA861_SD_HEADER_SIZE) / NVT_CEA861_SPEAKER_SD_SIZE) +#define NVT_CEA861_SPEAKER_ALLOC_MASK 0x7F // the speaker allocation mask +#define NVT_CEA861_SPEAKER_ALLOC_SHIFT 0 // the speaker allocation mask shift +// +#define NVT_CEA861_SPEAKER_ALLOC_FL_FR 0x01 // speaker allocation : Front Left + Front Right +#define NVT_CEA861_SPEAKER_ALLOC_LFE 0x02 // speaker allocation : Low Frequency Effect +#define NVT_CEA861_SPEAKER_ALLOC_FC 0x04 // speaker allocation : Front Center +#define NVT_CEA861_SPEAKER_ALLOC_RL_RR 0x08 // speaker allocation : Rear Left + Rear Right +#define NVT_CEA861_SPEAKER_ALLOC_RC 0x10 // speaker allocation : Rear Center +#define NVT_CEA861_SPEAKER_ALLOC_FLC_FRC 0x20 // speaker allocation : Front Left Center + Front Right Center +#define NVT_CEA861_SPEAKER_ALLOC_RLC_RRC 0x40 // speaker allocation : Rear Left Center + Rear Right Center + +//*********************** +// vendor specific data +//*********************** +#define NVT_CEA861_VSDB_HEADER_SIZE 4 +#define NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH 28 // max allowed vendor specific data block payload (in byte) +#define NVT_CEA861_HDMI_IEEE_ID 0x0C03 +#define NVT_CEA861_HDMI_LLC_IEEE_ID NVT_CEA861_HDMI_IEEE_ID +#define NVT_CEA861_NVDA_IEEE_ID 0x44B +#define NVT_CEA861_HDMI_FORUM_IEEE_ID 0xC45DD8 +#define NVT_CEA861_MSFT_IEEE_ID 0xCA125C + +#define NVT_CEA861_VSDB_MAX_BLOCKS 4 // NOTE: The maximum number of VSDB blocks should be: + // (NVT_CEA861_MAX_PAYLOAD / (NVT_CEA861_VSDB_HEADER_SIZE + 1)) (assume at least 1 byte of payload) + // As of Sept 2013, there are 3 different VSDBs defined in the spec. Hence allocating space for all 24 + // is overkill. As a tradeoff, we define this limit as 4 for now. If required, this should be increased later. + +typedef struct VSDB_DATA +{ + NvU32 ieee_id; + NvU32 vendor_data_size; // size of data copied to vendor_data (excludes ieee_id from frame) + NvU8 vendor_data[NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH]; +} VSDB_DATA; + +//******************************* +// vendor specific video data +//******************************* +#define NVT_CEA861_DV_IEEE_ID 0x00D046 +#define NVT_CEA861_VSVDB_PAYLOAD_MAX_LENGTH 25 // max allowed vendor specific video data block payload (in byte) +#define NVT_CEA861_VSVDB_VERSION_MASK 0xE0 // vsdb version mask +#define NVT_CEA861_VSVDB_VERSION_MASK_SHIFT 5 // vsdb version shift mask + +typedef struct VSVDB_DATA +{ + NvU32 ieee_id; + NvU32 vendor_data_size; // size of data copied to vendor_data (excludes ieee_id from frame) + NvU8 vendor_data[NVT_CEA861_VSVDB_PAYLOAD_MAX_LENGTH]; +} VSVDB_DATA; + +#pragma pack(1) +typedef struct tagNVT_DV_STATIC_METADATA_TYPE0 +{ + // first byte + NvU8 supports_YUV422_12bit : 1; + NvU8 supports_2160p60hz : 1; + NvU8 supports_global_dimming : 1; + NvU8 reserved_1 : 2; + NvU8 VSVDB_version : 3; + + // second- fourth byte + NvU8 cc_red_y_3_0 : 4; + NvU8 cc_red_x_3_0 : 4; + NvU8 cc_red_x_11_4 : 8; + NvU8 cc_red_y_11_4 : 8; + + NvU8 cc_green_y_3_0 : 4; + NvU8 cc_green_x_3_0 : 4; + NvU8 cc_green_x_11_4 : 8; + NvU8 cc_green_y_11_4 : 8; + + NvU8 cc_blue_y_3_0 : 4; + NvU8 cc_blue_x_3_0 : 4; + NvU8 cc_blue_x_11_4 : 8; + NvU8 cc_blue_y_11_4 : 8; + + NvU8 cc_white_y_3_0 : 4; + NvU8 cc_white_x_3_0 : 4; + NvU8 cc_white_x_11_4 : 8; + NvU8 cc_white_y_11_4 : 8; + + NvU8 target_max_pq_3_0 : 4; + NvU8 target_min_pq_3_0 : 4; + NvU8 target_min_pq_11_4 : 8; + NvU8 target_max_pq_11_4 : 8; + + NvU8 dm_version_minor : 4; + NvU8 dm_version_major : 4; + + NvU8 reserved_2 : 8; + NvU8 reserved_3 : 8; + NvU8 reserved_4 : 8; + NvU8 reserved_5 : 8; +} NVT_DV_STATIC_METADATA_TYPE0; + +typedef struct tagNVT_DV_STATIC_METADATA_TYPE1 +{ + // first byte + NvU8 supports_YUV422_12bit : 1; + NvU8 supports_2160p60hz : 1; + NvU8 dm_version : 3; + NvU8 VSVDB_version : 3; + + // second byte + NvU8 supports_global_dimming : 1; + NvU8 target_max_luminance : 7; + + // third byte + NvU8 colorimetry : 1; + NvU8 target_min_luminance : 7; + + //fourth byte + NvU8 reserved : 8; + //fith to tenth byte + NvU8 cc_red_x : 8; + NvU8 cc_red_y : 8; + NvU8 cc_green_x : 8; + NvU8 cc_green_y : 8; + NvU8 cc_blue_x : 8; + NvU8 cc_blue_y : 8; +} NVT_DV_STATIC_METADATA_TYPE1; + +typedef struct tagNVT_DV_STATIC_METADATA_TYPE1_1 +{ + // first byte + NvU8 supports_YUV422_12bit : 1; + NvU8 supports_2160p60hz : 1; + NvU8 dm_version : 3; + NvU8 VSVDB_version : 3; + + // second byte + NvU8 supports_global_dimming : 1; + NvU8 target_max_luminance : 7; + + // third byte + NvU8 colorimetry : 1; + NvU8 target_min_luminance : 7; + + //fourth byte + NvU8 interface_supported_by_sink : 2; + NvU8 unique_By : 3; + NvU8 unique_Bx : 3; + + //fifth byte + NvU8 unique_Ry_bit_0 : 1; + NvU8 unique_Gx : 7; + + //sixth byte + NvU8 unique_Ry_bit_1 : 1; + NvU8 unique_Gy : 7; + + //seventh byte + NvU8 unique_Rx : 5; + NvU8 unique_Ry_bit_2_to_4 : 3; + +} NVT_DV_STATIC_METADATA_TYPE1_1; + +typedef struct tagNVT_DV_STATIC_METADATA_TYPE2 +{ + // first byte + NvU8 supports_YUV422_12bit : 1; + NvU8 supports_backlight_control : 1; + NvU8 dm_version : 3; + NvU8 VSVDB_version : 3; + + // second byte + NvU8 backlt_min_luma : 2; + NvU8 supports_global_dimming : 1; + NvU8 target_min_luminance : 5; + + // third byte + NvU8 interface_supported_by_sink : 2; + NvU8 reserved : 1; + NvU8 target_max_luminance : 5; + + //fourth byte + NvU8 supports_10b_12b_444_bit1 : 1; + NvU8 unique_Gx : 7; + + //fifth byte + NvU8 supports_10b_12b_444_bit0 : 1; + NvU8 unique_Gy : 7; + + //sixth byte + NvU8 unique_Bx : 3; + NvU8 unique_Rx : 5; + + //seventh byte + NvU8 unique_By : 3; + NvU8 unique_Ry : 5; + +} NVT_DV_STATIC_METADATA_TYPE2; +#pragma pack() + +//*************************** +// colorimetry data block +//*************************** +// +// Colorimetry capabilities - byte 3 +#define NVT_CEA861_COLORIMETRY_MASK 0xFF // the colorimetry cap mask +#define NVT_CEA861_COLORIMETRY_SHIFT 0 // the colorimetry cap shift + +#define NVT_CEA861_COLORIMETRY_NO_DATA 0x00 +#define NVT_CEA861_COLORIMETRY_xvYCC_601 0x01 // xvYCC601 capable +#define NVT_CEA861_COLORIMETRY_xvYCC_709 0x02 // xvYCC709 capable +#define NVT_CEA861_COLORIMETRY_sYCC_601 0x04 // sYCC601 capable +#define NVT_CEA861_COLORIMETRY_AdobeYCC_601 0x08 // AdobeYCC601 capable +#define NVT_CEA861_COLORIMETRY_AdobeRGB 0x10 // AdobeRGB capable +#define NVT_CEA861_COLORIMETRY_BT2020cYCC 0x20 // BT2020 cYCbCr (constant luminance) capable +#define NVT_CEA861_COLORIMETRY_BT2020YCC 0x40 // BT2020 Y'CbCr capable +#define NVT_CEA861_COLORIMETRY_BT2020RGB 0x80 // BT2020 RGB capable +// Colorimetry capabilities - byte 4 +#define NVT_CEA861_COLORIMETRY_DCI_P3 0x80 // DCI-P3 + +// +// gamut-related metadata capabilities - byte 4 +#define NVT_CEA861_GAMUT_METADATA_MASK 0x8F // the colorimetry or gamut-related metadata block mask +#define NVT_CEA861_GAMUT_METADATA_SHIFT 0 // the metadata block shift +// +#define NVT_CEA861_GAMUT_METADATA_MD0 0x01 // MD0 +#define NVT_CEA861_GAMUT_METADATA_MD1 0x02 // MD1 +#define NVT_CEA861_GAMUT_METADATA_MD2 0x04 // MD2 +#define NVT_CEA861_GAMUT_METADATA_MD3 0x08 // MD2 + +//*************************** +// HDR static metadata data block +//*************************** +// +typedef struct tagNVT_5BYTES +{ + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; +} NVT_5BYTES; + +// Supported Electro-Optical Transfer Function - byte 3 +#define NVT_CEA861_EOTF_MASK 0x3F // the EOTF cap mask +#define NVT_CEA861_EOTF_SHIFT 0 // the EOTF cap shift +// +#define NVT_CEA861_EOTF_GAMMA_SDR 0x01 // ET_0 Traditional gamma - SDR Luminance Range +#define NVT_CEA861_EOTF_GAMMA_HDR 0x02 // ET_1 Traditional gamma - HDR Luminance Range +#define NVT_CEA861_EOTF_SMPTE_ST2084 0x04 // ET_2 SMPTE ST2084 EOTF (a.k.a PQ - Perceptual Quantizer EOTF) +#define NVT_CEA861_EOTF_FUTURE 0x08 // ET_3 Future EOTF + +// +// Supported Static Metadata Descriptor - byte 4 +#define NVT_CEA861_STATIC_METADATA_DESCRIPTOR_MASK 0x01 // the supported static metadata descriptor block mask +#define NVT_CEA861_STATIC_METADATA_SHIFT 0 // the metadata block shift +// +#define NVT_CEA861_STATIC_METADATA_SM0 0x00 // Static Metadata Type 1 + +// +// Desired Content Max Luminance data - byte 5 +#define NVT_CEA861_MAX_CLL_MASK 0xFF // the desired content max luminance level (MaxCLL) data block mask +#define NVT_CEA861_MAX_CLL_SHIFT 0 // the metadata block shift + +// Desired Content Max Frame-Average Luminance data - byte 6 +#define NVT_CEA861_MAX_FALL_MASK 0xFF // the desired content max frame-average luminance (MaxFALL) data block mask +#define NVT_CEA861_MAX_FALL_SHIFT 0 // the metadata block shift + +// Desired Content Min Luminance data - byte 7 +#define NVT_CEA861_MIN_CLL_MASK 0xFF // the desired content min luminance level (MinCLL) data block mask +#define NVT_CEA861_MIN_CLL_SHIFT 0 // the metadata block shift + +//*************************** +// video capability data block +//*************************** +// +#define NVT_CEA861_VIDEO_CAPABILITY_MASK 0x7F // the video capability data block mask +#define NVT_CEA861_VIDEO_CAPABILITY_SHIFT 0 // the video capability data block shift +// +#define NVT_CEA861_VIDEO_CAPABILITY_S_CE0 0x01 // S_CE0 +#define NVT_CEA861_VIDEO_CAPABILITY_S_CE1 0x02 // S_CE1 +#define NVT_CEA861_VIDEO_CAPABILITY_S_IT0 0x04 // S_IT0 +#define NVT_CEA861_VIDEO_CAPABILITY_S_IT1 0x08 // S_IT1 +#define NVT_CEA861_VIDEO_CAPABILITY_S_PT0 0x10 // S_PT0 +#define NVT_CEA861_VIDEO_CAPABILITY_S_PT1 0x20 // S_PT1 +#define NVT_CEA861_VIDEO_CAPABILITY_S_QS 0x40 // S_QS + +//************************** +// EDID 861 Extension Info +//************************** +typedef struct tagNVT_3BYTES +{ + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; +} NVT_3BYTES; + +//*********************** +// VCDB specific data +//*********************** +#define NVT_CEA861_VCDB_QS_MASK 0x40 // quantization range selectable mask +#define NVT_CEA861_VCDB_QS_SHIFT 6 // quantization range selectable shift + +#define NVT_CEA861_VCDB_S_PT_MASK 0x30 // PT over/underscan behavior mask +#define NVT_CEA861_VCDB_S_PT_SHIFT 4 // PT over/underscan behavior shift +#define NVT_CEA861_VCDB_S_PT_NO_DATA 0x00 +#define NVT_CEA861_VCDB_S_PT_ALWAYS_OVERSCAN 0x01 +#define NVT_CEA861_VCDB_S_PT_ALWAYS_UNDERSCAN 0x02 +#define NVT_CEA861_VCDB_S_PT_OVER_OR_UNDERSCAN 0x03 + +#define NVT_CEA861_VCDB_S_IT_MASK 0x0C // IT over/underscan behavior mask +#define NVT_CEA861_VCDB_S_IT_SHIFT 2 // IT over/underscan behavior shift +#define NVT_CEA861_VCDB_S_IT_NOT_SUPPORTED 0x00 +#define NVT_CEA861_VCDB_S_IT_ALWAYS_OVERSCAN 0x01 +#define NVT_CEA861_VCDB_S_IT_ALWAYS_UNDERSCAN 0x02 +#define NVT_CEA861_VCDB_S_IT_OVER_OR_UNDERSCAN 0x03 + +#define NVT_CEA861_VCDB_S_CE_MASK 0x03 // CE over/underscan behavior mask +#define NVT_CEA861_VCDB_S_CE_SHIFT 0 // CE over/underscan behavior shift +#define NVT_CEA861_VCDB_S_CE_NOT_SUPPORTED 0x00 +#define NVT_CEA861_VCDB_S_CE_ALWAYS_OVERSCAN 0x01 +#define NVT_CEA861_VCDB_S_CE_ALWAYS_UNDERSCAN 0x02 +#define NVT_CEA861_VCDB_S_CE_OVER_OR_UNDERSCAN 0x03 + +// +typedef struct tagNVT_2BYTES +{ + NvU8 byte1; + NvU8 byte2; +} NVT_2BYTES; +// +// See CEA-861E, Table 42, 43 Extended Tags; indicates that the corresponding CEA extended data block value is valid, e.g. if colorimetry is set, then NVT_EDID_CEA861_INFO::colorimetry is valid +typedef struct tagNVT_VALID_EXTENDED_BLOCKS +{ + NvU32 VCDB : 1; + NvU32 VSVD : 1; + NvU32 colorimetry : 1; + NvU32 H14B_VSDB : 1; + NvU32 H20_HF_VSDB : 1; + NvU32 y420cmdb : 1; + NvU32 hdr_static_metadata : 1; + NvU32 dv_static_metadata : 1; + NvU32 SCDB : 1; + NvU32 HF_EEODB : 1; +} NVT_VALID_EXTENDED_BLOCKS; + + +//************************* +// extended data blocks +//************************* +#define NVT_CEA861_SD_EXT_HEADER_SIZE 1 + +#define NVT_CEA861_Y420VDB_SD_SIZE 1 + +// Max number of YUV420 VDBs for each VDB block is 30 per CTA-861-G spec sec. 7.5.10 +// Accomodate 2 blocks +#define NVT_CEA861_Y420VDB_MAX_DESCRIPTOR 60 + +#define NVT_CEA861_Y420CMDB_SD_SIZE 1 + +// Max number of YUV420 SVDs for each VDB block is 30 per CTA-861-G spec sec. 7.5.11 +// Accomodate 2 blocks +#define NVT_CEA861_Y420CMDB_MAX_DESCRIPTOR 60 +#define NVT_CEA861_VFPDB_SD_SIZE 1 +#define NVT_CEA861_VFPDB_MAX_DESCRIPTOR 16 // NOTE: Limiting to 16 to not allocate too much space. The maximum descriptor should be: + // ((NVT_CEA861_MAX_PAYLOAD / NVT_CEA861_SD_TOTAL_SIZE) * (NVT_CEA861_SD_PAYLOAD_SIZE / NVT_CEA861_VFPDB_SD_SIZE) + + // (NVT_CEA861_MAX_PAYLOAD % NVT_CEA861_SD_TOTAL_SIZE - NVT_CEA861_SD_HEADER_SIZE - NVT_CEA861_SD_EXT_HEADER_SIZE) / NVT_CEA861_VFPDB_SD_SIZE) + +typedef enum tagNVT_CTA861_ORIGIN +{ + FROM_CTA861_EXTENSION, + FROM_DISPLAYID_13_DATA_BLOCK, + FROM_DISPLAYID_20_DATA_BLOCK, +} NVT_CTA861_ORIGIN; + +// +typedef struct tagEDID_CEA861_INFO +{ + NvU8 revision; + NvU8 basic_caps; + + // short video descriptor + NvU8 total_svd; + NvU8 video[NVT_CEA861_VIDEO_MAX_DESCRIPTOR]; + + // short audio descriptor + NvU8 total_sad; + NVT_3BYTES audio[NVT_CEA861_AUDIO_MAX_DESCRIPTOR]; + + // speaker allocation data + NvU8 total_ssd; + NVT_3BYTES speaker[NVT_CEA861_SPEAKER_MAX_DESCRIPTOR]; + + // vendor specific data + NvU8 total_vsdb; + VSDB_DATA vsdb[NVT_CEA861_VSDB_MAX_BLOCKS]; + + // vendor specific video data + VSVDB_DATA vsvdb; + + // indicates which of the extended data blocks below contain valid data excluding extended blocks with total count + NVT_VALID_EXTENDED_BLOCKS valid; + // extended data blocks + NVT_2BYTES colorimetry; // Colorimetry Data Block + NvU8 video_capability; // Video Capability Block + + // HDR Static Metadata Data Block. See CEA-861.3 HDR Static Metadata Extensions, Section 4.2 + NVT_5BYTES hdr_static_metadata; + + // VFPDB extended block. See CEA861-H, Section 7.5.12 Video Format Preference Data Block + NvU8 total_vfpdb; + NvU8 svr_vfpdb[NVT_CEA861_VFPDB_MAX_DESCRIPTOR]; // svr of preferred video formats + + // Y420VDB extended block. See CEA861-F, Section 7.5.10 YCBCR 4:2:0 Video Data Block + NvU8 total_y420vdb; + NvU8 svd_y420vdb[NVT_CEA861_Y420VDB_MAX_DESCRIPTOR]; // svd of video formats that only support YCbCr 4:2:0 + + // Y420CMDB extended block. See CEA861-F, Section 7.5.11 YCBCR 4:2:0 Capability Map Data Block + NvU8 total_y420cmdb; + NvU8 map_y420cmdb[NVT_CEA861_Y420CMDB_MAX_DESCRIPTOR]; // bit map to svd in video[] that also supports YCbCr 4:2:0 + + // NVT_EDID_CEA861_INFO::vsvdb.SCDB = 1 in case hfscdb is exposed by sink. + NvU32 hfscdbSize; + NvU8 hfscdb[NVT_CTA861_EXT_SCDB_PAYLOAD_MAX_LENGTH]; + + NvU8 hfeeodb; // HDMI Forum Edid Extension Override Data Block. +} NVT_EDID_CEA861_INFO; + + +//******************* +// Parsed DisplayID Information +//******************* +#define NVT_DISPLAYID_SECTION_MAX_SIZE 251 +#define NVT_DISPLAYID_SECTION_HEADER_LEN 5 +#define NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN 248 +#define NVT_DISPLAYID_DATABLOCK_HEADER_LEN 3 + +#define NVT_DISPLAYID_PRODUCT_STRING_MAX_LEN 233 +#define NVT_DISPLAYID_COLOR_MAX_WHITEPOINTS 5 +#define NVT_DISPLAYID_COLOR_MAX_PRIMARIES 3 +#define NVT_DISPLAYID_RANGE_LIMITS_MAX_COUNT 2 +#define NVT_DISPLAYID_DISPLAY_INTERFACE_FEATURES_MAX_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF 7 + +typedef enum tagNVT_SINGLE_TILE_BEHAVIOR +{ + NVT_SINGLE_TILE_BEHAVIOR_OTHER = 0, + NVT_SINGLE_TILE_BEHAVIOR_SOURCE_DRIVEN, + NVT_SINGLE_TILE_BEHAVIOR_SCALE, + NVT_SINGLE_TILE_BEHAVIOR_CLONE +} NVT_SINGLE_TILE_BEHAVIOR; + +typedef enum tagNVT_MULTI_TILE_BEHAVIOR +{ + NVT_MULTI_TILE_BEHAVIOR_OTHER = 0, + NVT_MULTI_TILE_BEHAVIOR_SOURCE_DRIVEN +} NVT_MULTI_TILE_BEHAVIOR; + +typedef struct _tagNVT_TILEDDISPLAY_TOPOLOGY_ID +{ + NvU32 vendor_id; + NvU16 product_id; + NvU32 serial_number; +} NVT_TILEDDISPLAY_TOPOLOGY_ID; + +typedef struct _tagNVT_COLOR_POINT +{ + NvU16 x; + NvU16 y; +} NVT_COLOR_POINT; + +typedef struct _tagNVT_DISPLAYID_RANGE_LIMITS +{ + NvU32 revision; + NvU32 pclk_min; + NvU32 pclk_max; + NvU8 hfreq_min; + NvU8 hfreq_max; + NvU16 hblank_min; + NvU8 vfreq_min; + NvU16 vfreq_max; + NvU16 vblank_min; + NvU8 interlaced : 1; + NvU8 cvt : 1; + NvU8 cvt_reduced : 1; + NvU8 dfd : 1; + NvU8 seamless_dynamic_video_timing_change : 1; +} NVT_DISPLAYID_RANGE_LIMITS; + +#define NVT_DID_MAX_EXT_PAYLOAD 122 + +typedef struct _tagNVT_DISPLAYID_INFO +{ + // Top Level Header Information + NvU8 version; + NvU8 product_type; + + // Product Identification (0 or 1 Blocks Allowed) + NvU32 vendor_id; + NvU16 product_id; + NvU32 serial_number; + NvU8 week; + NvU8 year; + NvU8 product_string[NVT_DISPLAYID_PRODUCT_STRING_MAX_LEN + 1]; + + // Display Parameters + NvU16 horiz_size; + NvU16 vert_size; + NvU16 horiz_pixels; + NvU16 vert_pixels; + NvU8 support_audio : 1; + NvU8 separate_audio : 1; + NvU8 audio_override : 1; + NvU8 power_management : 1; + NvU8 fixed_timing : 1; + NvU8 fixed_pixel_format : 1; + NvU8 rsvd4 : 1; + NvU8 deinterlace : 1; + NvU16 gamma; + NvU8 aspect_ratio; + NvU8 depth_overall : 4; + NvU8 depth_native : 4; + + // Color Characteristics + NvU8 total_white_points; + NvU8 total_primaries : 3; + NvU8 temporal : 1; + NVT_COLOR_POINT white_points[NVT_DISPLAYID_COLOR_MAX_WHITEPOINTS]; + NVT_COLOR_POINT primaries[NVT_DISPLAYID_COLOR_MAX_PRIMARIES]; + + // Range Limits + NvU8 rl_num; + NVT_DISPLAYID_RANGE_LIMITS range_limits[NVT_DISPLAYID_RANGE_LIMITS_MAX_COUNT]; + + // Display Data + NvU8 tech_type; + NvU8 device_op_mode : 4; + NvU8 support_backlight : 1; + NvU8 support_intensity : 1; + NvU8 rsvd1 : 2; + NvU16 horiz_pixel_count; + NvU16 vert_pixel_count; + NvU8 orientation : 2; + NvU8 rotation : 2; + NvU8 zero_pixel : 2; + NvU8 scan_direction : 2; + NvU8 subpixel_info; + NvU8 horiz_pitch; + NvU8 vert_pitch; + NvU8 rsvd2 : 4; + NvU8 color_bit_depth : 4; + NvU8 white_to_black : 1; + NvU8 response_time : 7; + + // Power Settings + NvU8 t1_min : 4; + NvU8 t1_max : 4; + NvU8 t2_max; + NvU8 t3_max; + NvU8 t4_min; + NvU8 t5_min; + NvU8 t6_min; + + union + { + struct + { + NvU8 rsvd : 3; + NvU8 color_map : 1; + NvU8 support_2_8v : 1; + NvU8 support_12v : 1; + NvU8 support_5v : 1; + NvU8 support_3_3v : 1; + NvU8 rsvd2 : 5; + NvU8 DE_mode : 1; + NvU8 polarity : 1; + NvU8 data_strobe : 1; + } lvds; + + struct + { + NvU8 rsvd : 5; + NvU8 DE_mode : 1; + NvU8 polarity : 1; + NvU8 data_strobe : 1; + } proprietary; + } u2; + + // Stereo Interface + NvU8 stereo_code; + union + { + struct + { + NvU8 stereo_polarity; + } field_sequential; + + struct + { + NvU8 view_identity; + } side_by_side; + + struct + { + NvU8 interleave_pattern[8]; + } pixel_interleaved; + + struct + { + NvU8 rsvd : 5; + NvU8 mirroring : 2; + NvU8 polarity : 1; + } left_right_separate; + + struct + { + NvU8 num_views; + NvU8 code; + } multiview; + } u3; + + NvU32 tiled_display_revision; + struct + { + NvBool bSingleEnclosure; + NvBool bHasBezelInfo; + NVT_SINGLE_TILE_BEHAVIOR single_tile_behavior; + NVT_MULTI_TILE_BEHAVIOR multi_tile_behavior; + } tile_capability; + + struct + { + NvU32 row; + NvU32 col; + } tile_topology; + + struct + { + NvU32 x; + NvU32 y; + } tile_location; + + struct + { + NvU32 width; + NvU32 height; + } native_resolution; + + struct + { + NvU32 pixel_density; + NvU32 top; + NvU32 bottom; + NvU32 right; + NvU32 left; + } bezel_info; + + NVT_TILEDDISPLAY_TOPOLOGY_ID tile_topology_id; + NvU8 cea_data_block_present; + + NvU8 supported_displayId2_0; + union + { + // Display Interface + struct + { + NvU8 interface_type : 4; + union + { + NvU8 analog_subtype : 4; + NvU8 digital_num_links : 4; + } u1; + + NvU8 interface_version; + + struct + { + NvU8 rsvd : 2; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + NvU8 support_6b : 1; + } rgb_depth; + + struct + { + NvU8 rsvd : 2; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + NvU8 support_6b : 1; + } ycbcr444_depth; + + struct + { + NvU8 rsvd : 3; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + } ycbcr422_depth; + + NvU8 content_protection; + NvU8 content_protection_version; + NvU8 spread_spectrum : 2; + NvU8 rsvd3 : 2; + NvU8 spread_percent : 4; + + } display_interface; + + //display interface features for DID2.0 + struct + { + struct + { + NvU8 rsvd : 2; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + NvU8 support_6b : 1; + } rgb_depth; + + struct + { + NvU8 rsvd : 2; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + NvU8 support_6b : 1; + } ycbcr444_depth; + + struct + { + NvU8 rsvd : 3; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + } ycbcr422_depth; + + struct + { + NvU8 rsvd : 3; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + } ycbcr420_depth; + + // based on the DID2.0 spec. minimum pixel rate at which the Sink device shall support YCbCr 4:2:0 encoding + NvU8 minimum_pixel_rate_ycbcr420; + + struct + { + NvU8 support_32khz : 1; + NvU8 support_44_1khz : 1; + NvU8 support_48khz : 1; + NvU8 rsvd : 5; + } audio_capability; + + struct + { + NvU8 rsvd : 1; + NvU8 support_colorspace_bt2020_eotf_smpte_st2084: 1; + NvU8 support_colorspace_bt2020_eotf_bt2020 : 1; + NvU8 support_colorspace_dci_p3_eotf_dci_p3 : 1; + NvU8 support_colorspace_adobe_rgb_eotf_adobe_rgb: 1; + NvU8 support_colorspace_bt709_eotf_bt1886 : 1; + NvU8 support_colorspace_bt601_eotf_bt601 : 1; + NvU8 support_colorspace_srgb_eotf_srgb : 1; + } colorspace_eotf_combination_1; + + struct + { + NvU8 rsvd : 8; + } colorspace_eotf_combination_2; + + struct + { + NvU8 rsvd : 5; + NvU8 total : 3; + } total_additional_colorspace_eotf; + + struct + { + NvU8 support_colorspace : 4; + NvU8 support_eotf : 4; + } additional_colorspace_eotf[NVT_DISPLAYID_DISPLAY_INTERFACE_FEATURES_MAX_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF]; + } display_interface_features; + } u4; + +} NVT_DISPLAYID_INFO; + +//*********************************** +// EDID 18-byte display descriptors +//*********************************** +// +// +//*** (Tag = 0xFF) ***/ +// Display Product Serial Number +#define NVT_EDID_LDD_PAYLOAD_SIZE 13 +typedef struct tagNVT_EDID_DD_SERIAL_NUMBER +{ + NvU8 str[NVT_EDID_LDD_PAYLOAD_SIZE]; + NvU8 padding[16 - NVT_EDID_LDD_PAYLOAD_SIZE]; +} NVT_EDID_DD_SERIAL_NUMBER; +// +// +// +//*** (Tag = 0xFE) ***/ +// Alphanumeric Data String (ASCII) +typedef struct tagNVT_EDID_DD_DATA_STRING +{ + NvU8 str[NVT_EDID_LDD_PAYLOAD_SIZE]; + NvU8 padding[16 - NVT_EDID_LDD_PAYLOAD_SIZE]; +} NVT_EDID_DD_DATA_STRING; +// +// +// +//*** (Tag = 0xFD) ***/ +// Display Range Limit +// +typedef struct tagNVT_EDID_DD_RANGE_GTF2 +{ + NvU8 C; + NvU8 K; + NvU8 J; + NvU16 M; +} NVT_EDID_DD_RANGE_GTF2; + +typedef struct tagNVT_EDID_DD_RANGE_CVT +{ + NvU16 max_active_pixels_per_line; + + NvU8 pixel_clock_adjustment : 2; // this is in 0.25Hz, subtract from max_pixel_clock + // the whole number part (if existing) gets subtracted + // from max_pclk_MHz right away + NvU8 aspect_supported : 5; + + NvU8 aspect_preferred : 3; + NvU8 blanking_support : 2; + NvU8 reserved1 : 3; + + NvU8 scaling_support : 4; + NvU8 reserved2 : 4; + + NvU8 preferred_refresh_rate; +} NVT_EDID_DD_RANGE_CVT; + +typedef struct tagNVT_EDID_DD_RANGE_LIMIT +{ + NvU16 min_v_rate; + NvU16 max_v_rate; + NvU16 min_h_rate; + NvU16 max_h_rate; + NvU16 max_pclk_MHz; + NvU8 timing_support; // indicates 2nd GTF / CVT support + union + { + // if timing_support = 0x02 + NVT_EDID_DD_RANGE_GTF2 gtf2; + + // if timing_support = 0x04 + NVT_EDID_DD_RANGE_CVT cvt; + }u; +} NVT_EDID_DD_RANGE_LIMIT; + +typedef struct tagNVT_EDID_RANGE_LIMIT +{ + NvU32 min_v_rate_hzx1k; + NvU32 max_v_rate_hzx1k; + NvU32 min_h_rate_hz; + NvU32 max_h_rate_hz; + NvU32 max_pclk_10khz; +} NVT_EDID_RANGE_LIMIT; + +// timing support +#define NVT_EDID_RANGE_SUPPORT_GTF2 0x02 +#define NVT_EDID_RANGE_SUPPORT_CVT 0x04 + +// supported aspect ratios +#define NVT_EDID_CVT_ASPECT_SUPPORT_MAX 5 + +#define NVT_EDID_CVT_ASPECT_SUPPORT_4X3 0x10 +#define NVT_EDID_CVT_ASPECT_SUPPORT_16X9 0x08 +#define NVT_EDID_CVT_ASPECT_SUPPORT_16X10 0x04 +#define NVT_EDID_CVT_ASPECT_SUPPORT_5X4 0x02 +#define NVT_EDID_CVT_ASPECT_SUPPORT_15X9 0x01 + +// preferred aspect ratios +#define NVT_EDID_CVT_ASPECT_PREFER_4X3 0x00 +#define NVT_EDID_CVT_ASPECT_PREFER_16X9 0x01 +#define NVT_EDID_CVT_ASPECT_PREFER_16X10 0x02 +#define NVT_EDID_CVT_ASPECT_PREFER_5X4 0x03 +#define NVT_EDID_CVT_ASPECT_PREFER_15X9 0x04 + +// cvt blanking support +#define NVT_EDID_CVT_BLANKING_STANDARD 0x01 +#define NVT_EDID_CVT_BLANKING_REDUCED 0x02 + +// scaling support +#define NVT_EDID_CVT_SCALING_HOR_SHRINK 0x08 +#define NVT_EDID_CVT_SCALING_HOR_STRETCH 0x04 +#define NVT_EDID_CVT_SCALING_VER_SHRINK 0x02 +#define NVT_EDID_CVT_SCALING_VER_STRETCH 0x01 + +// +// +// +//*** (Tag = 0xFC) ***/ +// Display Product Name +typedef struct tagNVT_EDID_DD_PRODUCT_NAME +{ + NvU8 str[NVT_EDID_LDD_PAYLOAD_SIZE]; + NvU8 padding[16 - NVT_EDID_LDD_PAYLOAD_SIZE]; +} NVT_EDID_DD_PRODUCT_NAME; +// +// +// +//*** (Tag = 0xFB) ***/ +// the 18-byte display descriptors +// Display Color Point Data +typedef struct tagNVT_EDID_DD_COLOR_POINT +{ + NvU8 wp1_index; + NvU16 wp1_x; + NvU16 wp1_y; + NvU16 wp1_gamma; + NvU8 wp2_index; + NvU16 wp2_x; + NvU16 wp2_y; + NvU16 wp2_gamma; +} NVT_EDID_DD_COLOR_POINT; +// +// +// +//*** (Tag = 0xFA) ***/ +// Standard Timing Identifications +#define NVT_EDID_DD_STI_NUM 6 + +typedef struct tagNVT_EDID_DD_STD_TIMING +{ + NvU16 descriptor[NVT_EDID_DD_STI_NUM]; +} NVT_EDID_DD_STD_TIMING; +// +// +// +//*** (Tag = 0xF9) ***/ +// Display Color Management Data (DCM) +typedef struct tagNVT_EDID_DD_COLOR_MANAGEMENT_DATA +{ + NvU16 red_a3; + NvU16 red_a2; + NvU16 green_a3; + NvU16 green_a2; + NvU16 blue_a3; + NvU16 blue_a2; +} NVT_EDID_DD_COLOR_MANAGEMENT_DATA; +// +// +// +//*** (Tag = 0xF8) ***/ +// CVT 3 Byte Timing Code +#define NVT_EDID_DD_MAX_CVT3_PER_DESCRITPOR 4 + +typedef struct tagEDID_DD_CVT_3BYTE_BLOCK +{ + NvU16 addressable_lines : 14; + NvU8 aspect_ratio : 2; + NvU8 reserved0 : 1; + NvU8 preferred_vert_rates : 2; + NvU8 supported_vert_rates : 5; + +} NVT_EDID_DD_CVT_3BYTE_BLOCK; + +typedef struct tagNVT_EDID_DD_CVT_3BYTE +{ + NVT_EDID_DD_CVT_3BYTE_BLOCK block[NVT_EDID_DD_MAX_CVT3_PER_DESCRITPOR]; +} NVT_EDID_DD_CVT_3BYTE; + +#define NVT_EDID_CVT3_ASPECT_4X3 0x00 +#define NVT_EDID_CVT3_ASPECT_16X9 0x01 +#define NVT_EDID_CVT3_ASPECT_16X10 0x02 +#define NVT_EDID_CVT3_ASPECT_15X9 0x03 + +#define NVT_EDID_CVT3_PREFFERED_RATE_50HZ 0x00 +#define NVT_EDID_CVT3_PREFFERED_RATE_60HZ 0x01 +#define NVT_EDID_CVT3_PREFFERED_RATE_75HZ 0x02 +#define NVT_EDID_CVT3_PREFFERED_RATE_85HZ 0x03 + +#define NVT_EDID_CVT3_SUPPORTED_RATE_50HZ 0x10 +#define NVT_EDID_CVT3_SUPPORTED_RATE_60HZ 0x08 +#define NVT_EDID_CVT3_SUPPORTED_RATE_75HZ 0x04 +#define NVT_EDID_CVT3_SUPPORTED_RATE_85HZ 0x02 +#define NVT_EDID_CVT3_SUPPORTED_RATE_60HZ_REDUCED_BLANKING 0x01 +// +// +// +//*** (Tag = 0xF7) ***/ +// Established Timings III +// +#define NVT_EDID_DD_EST_TIMING3_NUM 6 + +typedef struct tagNVT_EDID_DD_EST_TIMING3 +{ + NvU8 revision; + NvU8 data[NVT_EDID_DD_EST_TIMING3_NUM]; +} NVT_EDID_DD_EST_TIMING3; +// +// +// +//*** (Tag = 0x10) ***/ +// Dummy Descriptor Definition +typedef struct tagNVT_EDID_DD_DUMMY_DESCRIPTOR +{ + NvU8 data[13]; +} NVT_EDID_DD_DUMMY_DESCRIPTOR; +// +// +// +//*** (Tag = 0x0F) ***/ +// Manufacturer Special Data +typedef struct tagNVT_EDID_DD_MANUF_DATA +{ + NvU8 data[13]; +} NVT_EDID_DD_MANUF_DATA; +// +// +// +// the translated generic 18-byte long descriptor +typedef struct tagNVT_EDID_18BYTE_DESCRIPTOR +{ + NvU8 tag; + union + { + NVT_EDID_DD_SERIAL_NUMBER serial_number; + NVT_EDID_DD_DATA_STRING data_str; + NVT_EDID_DD_RANGE_LIMIT range_limit; + NVT_EDID_DD_PRODUCT_NAME product_name; + NVT_EDID_DD_COLOR_POINT color_point; + NVT_EDID_DD_STD_TIMING std_timing; + NVT_EDID_DD_COLOR_MANAGEMENT_DATA color_man; + NVT_EDID_DD_CVT_3BYTE cvt; + NVT_EDID_DD_EST_TIMING3 est3; + NVT_EDID_DD_DUMMY_DESCRIPTOR dummy; + NVT_EDID_DD_MANUF_DATA manuf_data; + } u; +} NVT_EDID_18BYTE_DESCRIPTOR; +// +// +// Display Descriptor Tags +#define NVT_EDID_DISPLAY_DESCRIPTOR_DPSN 0xFF // display product serial number +#define NVT_EDID_DISPLAY_DESCRIPTOR_ADS 0xFE // alphanumeric data string (ASCII) +#define NVT_EDID_DISPLAY_DESCRIPTOR_DRL 0xFD // display range limit +#define NVT_EDID_DISPLAY_DESCRITPOR_DPN 0xFC // display product name +#define NVT_EDID_DISPLAY_DESCRIPTOR_CPD 0xFB // color point data +#define NVT_EDID_DISPLAY_DESCRIPTOR_STI 0xFA // standard timing identification +#define NVT_EDID_DISPLAY_DESCRIPTOR_DCM 0xF9 // display color management +#define NVT_EDID_DISPLAY_DESCRIPTOR_CVT 0xF8 // CVT 3-byte timing code +#define NVT_EDID_DISPLAY_DESCRIPTOR_ESTIII 0xF7 // establishied timing III +#define NVT_EDID_DISPLAY_DESCRIPTOR_DUMMY 0x10 // dummy descriptor + +//******************* +// Raw EDID offsets and info +//******************* +// +// Byte 14, video input definition +// +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_MASK 0x0F // dvi/hdmi/dp +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_MASK 0x70 // bpc support +#define NVT_EDID_VIDEO_INPUT_DEFINITION_DIGITAL_MASK 0x80 // digital/analog +// +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_SHIFT 0 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_SHIFT 4 +#define NVT_EDID_VIDEO_INPUT_DEFINITION_DIGITAL_SHIFT 7 +// +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_UNDEFINED 0 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_DVI_SUPPORTED 1 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_A_SUPPORTED 2 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_B_SUPPORTED 3 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_MDDI_SUPPORTED 4 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_DISPLAYPORT_SUPPORTED 5 +//#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_RESERVED 6 - 15 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_UNDEFINED 0 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_6BPC 1 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_8BPC 2 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_10BPC 3 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_12BPC 4 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_14BPC 5 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_16BPC 6 +//#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_RESERVED 7 +#define NVT_EDID_VIDEO_INPUT_DEFINITION_DIGITAL 0x01 +// +// Byte 18, feature support +// +#define NVT_EDID_OTHER_FEATURES_MASK 0x07 // sRGB space, preferred timing, continuous freq. +#define NVT_EDID_DISPLAY_COLOR_TYPE_MASK 0x18 // for analog, see byte 14, bit 7 +#define NVT_EDID_DISPLAY_COLOR_ENCODING_MASK 0x18 // for digital +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_MASK 0xE0 // standby/suspend/active off +// +#define NVT_EDID_OTHER_FEATURES_SHIFT 0 +#define NVT_EDID_DISPLAY_COLOR_TYPE_SHIFT 3 +#define NVT_EDID_DISPLAY_COLOR_ENCODING_SHIFT 3 +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_SHIFT 5 +// +#define NVT_EDID_OTHER_FEATURES_USES_CONTINUOUS_FREQ (1 << 0) +#define NVT_EDID_OTHER_FEATURES_PTM_INCLUDE_NATIVE (1 << 1) +#define NVT_EDID_OTHER_FEATURES_SRGB_DEFAULT_COLORSPACE (1 << 2) +// +#define NVT_EDID_DISPLAY_COLOR_TYPE_MONOCHROME 0 +#define NVT_EDID_DISPLAY_COLOR_TYPE_RGB 1 +#define NVT_EDID_DISPLAY_COLOR_TYPE_NON_RGB 2 +#define NVT_EDID_DISPLAY_COLOR_TYPE_UNDEFINED 3 +// +#define NVT_EDID_DISPLAY_COLOR_ENCODING_YCBCR_444 (1 << 0) // RGB is always supported +#define NVT_EDID_DISPLAY_COLOR_ENCODING_YCBCR_422 (1 << 1) // RGB is always supported +// +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_SUPPORTS_ACTIVE_OFF (1 << 0) +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_SUPPORTS_SUSPENDED_MODE (1 << 1) +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_SUPPORTS_STANDBY_MODE (1 << 2) +// +// edid offsets +// +#define NVT_EDID_VIDEO_INPUT_DEFINITION 0x14 +#define NVT_EDID_FEATURE_SUPPORT 0x18 + + +//******************* +// Parsed EDID info +//******************* +// +#define NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR 4 +#define NVT_EDID_MAX_STANDARD_TIMINGS 8 +#define NVT_EDID_MAX_TOTAL_TIMING NVT_MAX_TOTAL_TIMING +#define NVT_EDID_VER_1_1 0x101 +#define NVT_EDID_VER_1_2 0x102 +#define NVT_EDID_VER_1_3 0x103 +#define NVT_EDID_VER_1_4 0x104 +// +// byte 0x14, Digital +// bits 0-3 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_NOT_DEFINED 0x0 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_DVI 0x1 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_HDMI_A 0x2 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_HDMI_B 0x3 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_MDDI 0x4 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_DP 0x5 +// bits 4-6; these are translated values. See NvTiming_ParseEDIDInfo() +#define NVT_EDID_VIDEOSIGNAL_BPC_NOT_DEFINED 0 +#define NVT_EDID_VIDEOSIGNAL_BPC_6 6 +#define NVT_EDID_VIDEOSIGNAL_BPC_8 8 +#define NVT_EDID_VIDEOSIGNAL_BPC_10 10 +#define NVT_EDID_VIDEOSIGNAL_BPC_12 12 +#define NVT_EDID_VIDEOSIGNAL_BPC_14 14 +#define NVT_EDID_VIDEOSIGNAL_BPC_16 16 +// +// byte 0x18, edid 1.3 +// bits 3-4 +#define NVT_EDID_FEATURESUPPORT_COLOR_MONOCHROME 0x0 /* Monochrome/grayscale display */ +#define NVT_EDID_FEATURESUPPORT_COLOR_RGB 0x1 /* R/G/B color display */ +#define NVT_EDID_FEATURESUPPORT_COLOR_MULTICOLOR 0x2 /* non R/G/B multicolor displays e.g. R/G/Y */ +#define NVT_EDID_FEATURESUPPORT_COLOR_UNDEFINED 0x3 /* Undefined */ +// +// byte 0x18, edid 1.4 +// bits 3-4 +#define NVT_EDID_FEATURESUPPORT_COLOR_ENCODING_RBG 0x0 /* RGB always supported */ +#define NVT_EDID_FEATURESUPPORT_COLOR_ENCODING_YCRCB444 0x1 /* RGB + 444 */ +#define NVT_EDID_FEATURESUPPORT_COLOR_ENCODING_YCRCB422 0x2 /* RGB + 422 */ +#define NVT_EDID_FEATURESUPPORT_COLOR_ENCODING_YCRCB 0x3 /* RGB + 444 + 422 supported */ +// +// +// structure used internally to map support for HDMI 3D modes. +#define MAX_EDID_ADDRESSABLE_3D_VICS 16 +#define MAX_3D_VICS_RESERVED_FOR_MANDATORY 8 +#define MAX_3D_VICS_SUPPORTED (MAX_EDID_ADDRESSABLE_3D_VICS + MAX_3D_VICS_RESERVED_FOR_MANDATORY) + +//Constants given by Dolby to be appended for chromaticity information +#define NVT_DOLBY_CHROMATICITY_MSB_BX 0x20 +#define NVT_DOLBY_CHROMATICITY_MSB_BY 0x08 +#define NVT_DOLBY_CHROMATICITY_MSB_GX 0x00 +#define NVT_DOLBY_CHROMATICITY_MSB_GY 0x80 +#define NVT_DOLBY_CHROMATICITY_MSB_RX 0xA0 +#define NVT_DOLBY_CHROMATICITY_MSB_RY 0x40 + +typedef struct _HDMI3DDetails +{ + NvU8 Vic; + NvU16 StereoStructureMask; + NvU8 SideBySideHalfDetail; +} HDMI3DDETAILS; + +typedef struct _SupportMap +{ + HDMI3DDETAILS map[MAX_3D_VICS_SUPPORTED]; + NvU32 total; +} HDMI3DSUPPORTMAP; + +typedef struct tagNVT_EXT_TIMING +{ + NVT_TIMING timing; + NVT_HDMIEXT HDMI3D; +} NVT_EXT_TIMING; + +typedef struct _NVDA_VSDB_PARSED_INFO +{ + NvBool valid; + NvU8 vsdbVersion; + + // these fields are specified in version 1 of the NVDA VSDB + union + { + struct + { + NvBool supportsVrr; + NvU8 minRefreshRate; + } v1; + } vrrData; + +} NVDA_VSDB_PARSED_INFO; + +typedef enum _MSFT_VSDB_DESKTOP_USAGE +{ + MSFT_VSDB_NOT_USABLE_BY_DESKTOP = 0, + MSFT_VSDB_USABLE_BY_DESKTOP = 1 +} MSFT_VSDB_DESKTOP_USAGE; + +typedef enum _MSFT_VSDB_THIRD_PARTY_USAGE +{ + MSFT_VSDB_NOT_USABLE_BY_THIRD_PARTY = 0, + MSFT_VSDB_USABLE_BY_THIRD_PARTY = 1 +} MSFT_VSDB_THIRD_PARTY_USAGE; + +typedef enum _MSFT_VSDB_PRIMARY_USE_CASE +{ + MSFT_VSDB_FOR_UNDEFINED = 0, + MSFT_VSDB_FOR_TEST_EQUIPMENT = 0x1, + MSFT_VSDB_FOR_GENERIC_DISPLAY = 0x2, + MSFT_VSDB_FOR_TELEVISION_DISPLAY = 0x3, + MSFT_VSDB_FOR_DESKTOP_PRODUCTIVITY_DISPLAY = 0x4, + MSFT_VSDB_FOR_DESKTOP_GAMING_DISPLAY = 0x5, + MSFT_VSDB_FOR_PRESENTATION_DISPLAY = 0x6, + MSFT_VSDB_FOR_VIRTUAL_REALITY_HEADSETS = 0x7, + MSFT_VSDB_FOR_AUGMENTED_REALITY = 0x8, + MSFT_VSDB_FOR_VIDEO_WALL_DISPLAY = 0x10, + MSFT_VSDB_FOR_MEDICAL_IMAGING_DISPLAY = 0x11, + MSFT_VSDB_FOR_DEDICATED_GAMING_DISPLAY = 0x12, + MSFT_VSDB_FOR_DEDICATED_VIDEO_MONITOR_DISPLAY = 0x13, + MSFT_VSDB_FOR_ACCESSORY_DISPLAY = 0X14 +} MSFT_VSDB_PRIMARY_USE_CASE; + +#define MSFT_VSDB_CONTAINER_ID_SIZE (16) +#define MSFT_VSDB_MAX_VERSION_SUPPORT (3) + +typedef struct _MSFT_VSDB_PARSED_INFO +{ + NvBool valid; + NvU8 version; + + MSFT_VSDB_DESKTOP_USAGE desktopUsage; + MSFT_VSDB_THIRD_PARTY_USAGE thirdPartyUsage; + MSFT_VSDB_PRIMARY_USE_CASE primaryUseCase; + NvU8 containerId[MSFT_VSDB_CONTAINER_ID_SIZE]; + +} MSFT_VSDB_PARSED_INFO; + +typedef struct tagNVT_HDMI_LLC_INFO +{ + // A.B.C.D address + NvU8 addrA; + NvU8 addrB; + NvU8 addrC; + NvU8 addrD; + + NvU8 supports_AI : 1; + NvU8 dc_48_bit : 1; + NvU8 dc_36_bit : 1; + NvU8 dc_30_bit : 1; + NvU8 dc_y444 : 1; + NvU8 dual_dvi : 1; + NvU8 max_tmds_clock; + NvU8 effective_tmds_clock; + NvU8 latency_field_present : 1; + NvU8 i_latency_field_present : 1; + NvU8 hdmi_video_present : 1; + NvU8 cnc3 : 1; + NvU8 cnc2 : 1; + NvU8 cnc1 : 1; + NvU8 cnc0 : 1; + NvU8 video_latency; + NvU8 audio_latency; + NvU8 interlaced_video_latency; + NvU8 interlaced_audio_latency; + NvU8 threeD_present : 1; + NvU8 threeD_multi_present : 2; + NvU8 image_size : 2; + NvU8 hdmi_vic_len : 3; + NvU8 hdmi_3d_len : 5; + // for now ignoring the other extensions + // .... +} NVT_HDMI_LLC_INFO; + +typedef struct tagNVT_HDMI_FORUM_INFO +{ + NvU8 max_TMDS_char_rate; + + NvU8 threeD_Osd_Disparity : 1; + NvU8 dual_view : 1; + NvU8 independent_View : 1; + NvU8 lte_340Mcsc_scramble : 1; + NvU8 ccbpci : 1; + NvU8 cable_status : 1; + NvU8 rr_capable : 1; + NvU8 scdc_present : 1; + + NvU8 dc_30bit_420 : 1; + NvU8 dc_36bit_420 : 1; + NvU8 dc_48bit_420 : 1; + NvU8 uhd_vic : 1; + NvU8 max_FRL_Rate : 4; + + NvU8 fapa_start_location : 1; + NvU8 allm : 1; + NvU8 fva : 1; + NvU8 cnmvrr : 1; + NvU8 cinemaVrr : 1; + NvU8 m_delta : 1; + NvU8 fapa_end_extended : 1; + NvU8 rsvd : 1; + + NvU16 vrr_min : 6; + NvU16 vrr_max : 10; + + NvU16 dsc_MaxSlices : 6; + NvU16 dsc_MaxPclkPerSliceMHz : 10; + + NvU8 dsc_10bpc : 1; + NvU8 dsc_12bpc : 1; + NvU8 dsc_16bpc : 1; + NvU8 dsc_All_bpp : 1; + NvU8 dsc_Max_FRL_Rate : 4; + + NvU8 dsc_Native_420 : 1; + NvU8 dsc_1p2 : 1; + NvU8 rsvd_2 : 6; + + NvU8 dsc_totalChunkKBytes : 7; // = 1 + EDID reported DSC_TotalChunkKBytes + NvU8 rsvd_3 : 1; + +} NVT_HDMI_FORUM_INFO; + +typedef struct tagNVT_HDR_STATIC_METADATA +{ + struct + { + NvU8 trad_gamma_sdr_eotf : 1; + NvU8 trad_gamma_hdr_eotf : 1; + NvU8 smpte_st_2084_eotf : 1; + NvU8 future_eotf : 1; + } supported_eotf; + + NvU8 static_metadata_type; // set to 1 if the sink support for static meta data type 1 + NvU8 max_cll; // maximum luminance level value + NvU8 max_fall; // maximum fram-average luminance + NvU8 min_cll; // minimum luminance level value + +}NVT_HDR_STATIC_METADATA; + +typedef struct tagNVT_DV_STATIC_METADATA +{ + NvU32 ieee_id : 24; + NvU32 VSVDB_version : 3; + NvU32 dm_version : 8; + NvU32 supports_2160p60hz : 1; + NvU32 supports_YUV422_12bit : 1; + NvU32 supports_global_dimming : 1; + NvU32 colorimetry : 1; + NvU32 target_min_luminance : 12; + NvU32 target_max_luminance : 12; + NvU32 cc_red_x : 12; + NvU32 cc_red_y : 12; + NvU32 cc_green_x : 12; + NvU32 cc_green_y : 12; + NvU32 cc_blue_x : 12; + NvU32 cc_blue_y : 12; + NvU32 cc_white_x : 12; + NvU32 cc_white_y : 12; + NvU32 supports_backlight_control : 2; + NvU32 backlt_min_luma : 2; + NvU32 interface_supported_by_sink : 2; + NvU32 supports_10b_12b_444 : 2; +}NVT_DV_STATIC_METADATA; + +//*********************************** +// parsed DisplayID 2.0 definitions +//*********************************** +#define NVT_DISPLAYID_2_0_PRODUCT_STRING_MAX_LEN 236 + +// the basic info encoded in byte[3] +#define NVT_DISPLAY_2_0_CAP_BASIC_AUDIO 0x40 // DTV monitor supports basic audio +#define NVT_DISPLAY_2_0_CAP_YCbCr_444 0x20 // DTV monitor supports YCbCr4:4:4 +#define NVT_DISPLAY_2_0_CAP_YCbCr_422 0x10 // DTV monitor supports YCbCr4:2:2 + +// vendor specific +#define NVT_VESA_VENDOR_SPECIFIC_IEEE_ID 0x3A0292 +#define NVT_VESA_VENDOR_SPECIFIC_LENGTH 7 + +#define NVT_VESA_ORG_VSDB_DATA_TYPE_MASK 0x07 +#define NVT_VESA_ORG_VSDB_COLOR_SPACE_AND_EOTF_MASK 0x80 +#define NVT_VESA_ORG_VSDB_COLOR_SPACE_AND_EOTF_SHIFT 7 +#define NVT_VESA_ORG_VSDB_PIXELS_OVERLAPPING_MASK 0x0F +#define NVT_VESA_ORG_VSDB_MULTI_SST_MODE_MASK 0x60 +#define NVT_VESA_ORG_VSDB_MULTI_SST_MODE_SHIFT 5 +#define NVT_VESA_ORG_VSDB_PASS_THROUGH_INTEGER_MASK 0x3F +#define NVT_VESA_ORG_VSDB_PASS_THROUGH_FRACTIOINAL_MASK 0x0F + +// adaptive-sync +#define NVT_ADAPTIVE_SYNC_DESCRIPTOR_MAX_COUNT 0x04 + +typedef enum _tagNVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE +{ + PRODUCT_PRIMARY_USE_TEST_EQUIPMENT = 1, + PRODUCT_PRIMARY_USE_GENERIC_DISPLAY = 2, + PRODUCT_PRIMARY_USE_TELEVISION = 3, + PRODUCT_PRIMARY_USE_DESKTOP_PRODUCTIVITY = 4, + PRODUCT_PRIMARY_USE_DESKTOP_GAMING = 5, + PRODUCT_PRIMARY_USE_PRESENTATION = 6, + PRODUCT_PRIMARY_USE_HEAD_MOUNT_VIRTUAL_REALITY = 7, + PRODUCT_PRIMARY_USE_HEAD_MOUNT_AUGMENTED_REALITY = 8, +} NVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE; + +typedef enum _tagNVT_DISPLAYID_SCAN_ORIENTATION +{ + SCAN_ORIENTATION_LRTB = 0, + SCAN_ORIENTATION_RLTB = 1, + SCAN_ORIENTATION_TBRL = 2, + SCAN_ORIENTATION_BTRL = 3, + SCAN_ORIENTATION_RLBT = 4, + SCAN_ORIENTATION_LRBT = 5, + SCAN_ORIENTATION_BTLR = 6, + SCAN_ORIENTATION_TBLR = 7, +} NVT_DISPLAYID_SCAN_ORIENTATION; + +typedef enum _tagNVT_DISPLAYID_INTERFACE_EOTF +{ + INTERFACE_EOTF_NOT_DEFINED = 0x0, + INTERFACE_EOTF_SRGB = 0x1, + INTERFACE_EOTF_BT601 = 0x2, + INTERFACE_EOTF_BT1886 = 0x3, + INTERFACE_EOTF_ADOBE_RGB = 0x4, + INTERFACE_EOTF_DCI_P3 = 0x5, + INTERFACE_EOTF_BT2020 = 0x6, + INTERFACE_EOTF_NATIVE_GAMMA = 0x7, + INTERFACE_EOTF_SMPTE_ST2084 = 0x8, + INTERFACE_EOTF_HYBRID_LOG = 0x9, + INTERFACE_EOTF_CUSTOM = 0x10, +} NVT_DISPLAYID_INTERFACE_EOTF; + +typedef enum _tagNVT_DISPLAYID_INTERFACE_COLOR_SPACE +{ + INTERFACE_COLOR_SPACE_NOT_DEFINED = 0x0, + INTERFACE_COLOR_SPACE_SRGB = 0x1, + INTERFACE_COLOR_SPACE_BT601 = 0x2, + INTERFACE_COLOR_SPACE_BT709 = 0x3, + INTERFACE_COLOR_SPACE_ADOBE_RGB = 0x4, + INTERFACE_COLOR_SPACE_DCI_P3 = 0x5, + INTERFACE_COLOR_SPACE_BT2020 = 0x6, + INTERFACE_COLOR_SPACE_CUSTOM = 0x7, +} NVT_DISPLAYID_INTERFACE_COLOR_SPACE; + +typedef enum _tagNVT_DISPLAYID_DEVICE_TECHNOLOGY +{ + DEVICE_TECHNOLOGY_NOT_SPECIFIED, + DEVICE_TECHNOLOGY_LCD, + DEVICE_TECHNOLOGY_OLED, +} NVT_DISPLAYID_DEVICE_TECHNOLOGY; + +typedef struct _tagNVT_DISPLAYID_TILED_DISPLAY_TOPOLOGY +{ + NvU32 revision; + + struct + { + NvBool bSingleEnclosure; + NvBool bHasBezelInfo; + NVT_SINGLE_TILE_BEHAVIOR single_tile_behavior; + NVT_MULTI_TILE_BEHAVIOR multi_tile_behavior; + } capability; + + struct + { + NvU32 row; + NvU32 col; + } topology; + + struct + { + NvU32 x; + NvU32 y; + } location; + + struct + { + NvU32 width; + NvU32 height; + } native_resolution; + + struct + { + NvU32 top; // Top bezel in pixels + NvU32 bottom; // Bottom bezel in pixels + NvU32 right; // Right bezel in pixels + NvU32 left; // Left bezel in pixels + } bezel_info; + + NVT_TILEDDISPLAY_TOPOLOGY_ID tile_topology_id; +} NVT_DISPLAYID_TILED_DISPLAY_TOPOLOGY; + +typedef struct _tagNVT_DISPLAYID_CONTAINERID +{ + NvU32 revision; + NvU32 data1; + NvU16 data2; + NvU16 data3; + NvU16 data4; + NvU8 data5[6]; +} NVT_DISPLAYID_CONTAINERID; + +typedef struct _tagNVT_DISPLAYID_INTERFACE_FEATURES +{ + NvU32 revision; + + NVT_COLORDEPTH rgb444; // each bit within is set if rgb444 supported on that bpc + NVT_COLORDEPTH yuv444; // each bit within is set if yuv444 supported on that bpc + NVT_COLORDEPTH yuv422; // each bit within is set if yuv422 supported on that bpc + NVT_COLORDEPTH yuv420; // each bit within is set if yuv420 supported on that bpc + + NvU32 yuv420_min_pclk; + + struct + { + NvU8 support_32khz : 1; + NvU8 support_44_1khz : 1; + NvU8 support_48khz : 1; + NvU8 rsvd : 5; + } audio_capability; + + NvU32 combination_count; + struct + { + NVT_DISPLAYID_INTERFACE_EOTF eotf; + NVT_DISPLAYID_INTERFACE_COLOR_SPACE color_space; + } colorspace_eotf_combination[NVT_DISPLAYID_DISPLAY_INTERFACE_FEATURES_MAX_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF + 1]; + +} NVT_DISPLAYID_INTERFACE_FEATURES; + +typedef struct _tagNVT_DISPLAYID_PRODUCT_IDENTITY +{ + NvU32 revision; + NvU32 vendor_id; + NvU16 product_id; + NvU32 serial_number; + NvU16 week; + NvU16 year; + NvU8 product_string[NVT_DISPLAYID_2_0_PRODUCT_STRING_MAX_LEN + 1]; +} NVT_DISPLAYID_PRODUCT_IDENTITY; + +typedef enum _tagNVT_COLOR_MAP_STANDARD +{ + COLOR_MAP_CIE_1931, + COLOR_MAP_CIE_1976, +} NVT_COLOR_MAP_STANDARD; + +typedef enum _tagNVT_AUDIO_SPEAKER_INTEGRATED +{ + AUDIO_SPEAKER_INTEGRATED_SUPPORTED = 0, + AUDIO_SPEAKER_INTEGRATED_NOT_SUPPORTED = 1, +} NVT_AUDIO_SPEAKER_INTEGRATED; + +typedef enum _tagNVT_NATIVE_LUMINANCE_INFO +{ + NATIVE_LUMINANCE_INFO_MIN_GURANTEE_VALUE = 0, + NATIVE_LUMINANCE_INFO_SOURCE_DEVICE_GUIDANCE = 1, +} NVT_NATIVE_LUMINANCE_INFO; + +typedef struct _tagNVT_DISPLAYID_DISPLAY_PARAMETERS +{ + NvU32 revision; + NvU32 h_image_size_micro_meter; + NvU32 v_image_size_micro_meter; + NvU16 h_pixels; + NvU16 v_pixels; + NVT_DISPLAYID_SCAN_ORIENTATION scan_orientation; + NVT_COLOR_MAP_STANDARD color_map_standard; + NVT_COLOR_POINT primaries[3]; + NVT_COLOR_POINT white; + NVT_NATIVE_LUMINANCE_INFO native_luminance_info; + NvU16 native_max_luminance_full_coverage; + NvU16 native_max_luminance_1_percent_rect_coverage; + NvU16 native_min_luminance; + NVT_COLORDEPTH native_color_depth; + NvU16 gamma_x100; + NVT_DISPLAYID_DEVICE_TECHNOLOGY device_technology; + NvBool device_theme_Preference; + NvBool audio_speakers_integrated; +} NVT_DISPLAYID_DISPLAY_PARAMETERS; + +typedef struct _tagNVT_DISPLAYID_ADAPTIVE_SYNC +{ + union + { + NvU8 operation_range_info; + struct + { + NvU8 adaptive_sync_range : 1; + NvU8 duration_inc_flicker_perf : 1; + NvU8 modes : 2; + NvU8 seamless_not_support : 1; + NvU8 duration_dec_flicker_perf : 1; + NvU8 reserved : 2; + } information; + } u; + + NvU8 max_duration_inc; + NvU8 min_rr; + NvU16 max_rr; + NvU8 max_duration_dec; +} NVT_DISPLAYID_ADAPTIVE_SYNC; + +typedef struct _tagVESA_VSDB_PARSED_INFO +{ + struct + { + NvU8 type : 3; + NvU8 reserved : 4; + NvU8 color_space_and_eotf : 1; + } data_struct_type; + + struct + { + NvU8 pixels_overlapping_count : 4; + NvU8 reserved_0 : 1; + NvU8 multi_sst : 2; + NvU8 reserved_1 : 1; + } overlapping; + + struct + { + NvU8 pass_through_integer_dsc : 6; + NvU8 reserved : 2; + } pass_through_integer; + + struct + { + NvU8 pass_through_fraction_dsc : 4; + NvU8 reserved : 4; + } pass_through_fractional; +} VESA_VSDB_PARSED_INFO; + +typedef struct _tagNVT_DISPLAYID_VENDOR_SPECIFIC +{ + NVT_HDMI_LLC_INFO hdmiLlc; + NVT_HDMI_FORUM_INFO hfvs; + NVDA_VSDB_PARSED_INFO nvVsdb; + MSFT_VSDB_PARSED_INFO msftVsdb; + VESA_VSDB_PARSED_INFO vesaVsdb; +} NVT_DISPLAYID_VENDOR_SPECIFIC; + +typedef struct _tagNVT_DISPLAYID_CTA +{ + NVT_EDID_CEA861_INFO cta861_info; + NVT_HDR_STATIC_METADATA hdrInfo; + NVT_DV_STATIC_METADATA dvInfo; +} NVT_DISPLAYID_CTA; + +typedef struct _tagNVT_VALID_DATA_BLOCKS +{ + NvBool product_id_present; + NvBool parameters_present; + NvBool type7Timing_present; + NvBool type8Timing_present; + NvBool type9Timing_present; + NvBool dynamic_range_limit_present; + NvBool interface_feature_present; + NvBool stereo_interface_present; + NvBool tiled_display_present; + NvBool container_id_present; + NvBool type10Timing_present; + NvBool adaptive_sync_present; + NvBool arvr_hmd_present; + NvBool arvr_layer_present; + NvBool vendor_specific_present; + NvBool cta_data_present; +} NVT_VALID_DATA_BLOCKS; + +#define NVT_DISPLAYID_MAX_TOTAL_TIMING NVT_MAX_TOTAL_TIMING +typedef struct _tagNVT_DISPLAYID_2_0_INFO +{ + NvU8 revision; + NvU8 version; + + // support audio/yuv444/yuv422 color for CTA861 compatible + NvU8 basic_caps; + + // the all extensions that may appear following the base section + NvU32 extension_count; + + // this displayID20 is EDID extension or not + NvBool as_edid_extension; + + // data blocks present or not + NVT_VALID_DATA_BLOCKS valid_data_blocks; + + NVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE primary_use_case; + + // Product Identification Data Block (Mandatory) + NVT_DISPLAYID_PRODUCT_IDENTITY product_identity; + + // Display Parameter Data Block (Mandatory for Display Use) + NVT_DISPLAYID_DISPLAY_PARAMETERS display_param; + + // Detailed Timing Data Block (Mandatory for Display Use) + NvU32 total_timings; + NVT_TIMING timing[NVT_DISPLAYID_MAX_TOTAL_TIMING]; + + // Enumerated Timing Code Data Block (Not Mandatory) + + // Formula-based Timing Data Block (Not Mandatory) + + // Dynamic Video Timing Range Limits Data Block (Not Mandatory) + NVT_DISPLAYID_RANGE_LIMITS range_limits; + + // Display Interface Features Data Block (Mandatory) + NVT_DISPLAYID_INTERFACE_FEATURES interface_features; + + // Stereo Display Interface Data Block (Not Mandatory) + + // Tiled Display Topology Data Block (Not Mandatory) + NVT_DISPLAYID_TILED_DISPLAY_TOPOLOGY tile_topo; + + // ContainerID Data Block (Mandatory for Multi-function Device) + NVT_DISPLAYID_CONTAINERID container_id; + + // Adaptive-Sync Data Block (Mandatory for display device supports Adaptive-Sync) + NvU32 total_adaptive_sync_descriptor; + NVT_DISPLAYID_ADAPTIVE_SYNC adaptive_sync_descriptor[NVT_ADAPTIVE_SYNC_DESCRIPTOR_MAX_COUNT]; + + // Vendor-specific Data Block (Not Mandatory) + NVT_DISPLAYID_VENDOR_SPECIFIC vendor_specific; + + // CTA DisplayID Data Block (Not Mandatory) + NVT_DISPLAYID_CTA cta; +} NVT_DISPLAYID_2_0_INFO; + +#define NVT_EDID_PRIMARY_COLOR_FP2INT_FACTOR 1024 // Per EDID 1.4, 10bit color primary is encoded in floating point as (bit9/2 + bit8/4 + bi7/8 + ... + bit0) +typedef struct tagNVT_EDID_INFO +{ + // generic edid info + NvU32 version; + NvU16 manuf_id; + NvU16 manuf_id_hi; + NvU8 manuf_name[4]; + NvU16 product_id; + NvU32 serial_number; + NvU8 week; + NvU16 year; + + // the interface info + struct + { + union + { + struct + { + NvU8 serrations : 1; + NvU8 sync_type : 3; + NvU8 video_setup : 1; + NvU8 vp_p : 2; + } analog; + struct + { + NvU8 video_interface : 4; + NvU8 bpc : 5; + } digital; + NvU8 analog_data : 7; + } u; + NvU8 isDigital : 1; + } input; + + // the screen size info + NvU8 screen_size_x; // horizontal screen size in cm + NvU8 screen_size_y; // verical screen size in cm + NvU16 screen_aspect_x; // aspect ratio + NvU16 screen_aspect_y; // aspect ratio + + // display transfer characteristics + NvU16 gamma; + + // features support + union + { + NvU8 feature; + struct + { + NvU8 support_gtf : 1; + NvU8 preferred_timing_is_native : 1; // should be "Preferred_timing_is_dtd1". To be exact, "Native" is referenced as the native HDTV timing by CEA861 extension block + NvU8 default_colorspace_srgb : 1; + NvU8 color_type : 2; + NvU8 support_active_off : 1; + NvU8 support_suspend : 1; + NvU8 support_standby : 1; + + } feature_ver_1_3; + struct + { + NvU8 continuous_frequency : 1; + NvU8 preferred_timing_is_native : 1; // should be "Preferred_timing_is_dtd1". To be exact, "Native" is referenced as the native HDTV timing by CEA861 extension block + NvU8 default_colorspace_srgb : 1; + NvU8 color_type : 2; + NvU8 support_active_off : 1; + NvU8 support_suspend : 1; + NvU8 support_standby : 1; + } feature_ver_1_4_analog; + struct + { + NvU8 continuous_frequency : 1; + NvU8 preferred_timing_is_native : 1; // should be "Preferred_timing_is_dtd1". To be exact, "Native" is referenced as the native HDTV timing by CEA861 extension block + NvU8 default_colorspace_srgb : 1; + NvU8 support_ycrcb_444 : 1; + NvU8 support_ycrcb_422 : 1; + NvU8 support_active_off : 1; + NvU8 support_suspend : 1; + NvU8 support_standby : 1; + } feature_ver_1_4_digital; + }u; + + // chromaticity coordinates + NvU16 cc_red_x; + NvU16 cc_red_y; + NvU16 cc_green_x; + NvU16 cc_green_y; + NvU16 cc_blue_x; + NvU16 cc_blue_y; + NvU16 cc_white_x; + NvU16 cc_white_y; + + // established timings 1 and 2 + NvU16 established_timings_1_2; + + // Manufacturer reserved timings + NvU16 manufReservedTimings; + + // standard timings + NvU16 standard_timings[NVT_EDID_MAX_STANDARD_TIMINGS]; + + // 18-bytes display descriptor info + NVT_EDID_18BYTE_DESCRIPTOR ldd[NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR]; + + // the parse timing + NVT_TIMING timing[NVT_EDID_MAX_TOTAL_TIMING]; + + // Note: This contains the timing after validation. + NvU32 total_timings; + + // This contains the count timing that were invalidated because they don't meet + // some policies (PClk, etc). + NvU32 total_invalidTimings; + + // indicates support for HDMI 1.4+ 3D stereo modes are present + NvU32 HDMI3DSupported; + + HDMI3DSUPPORTMAP Hdmi3Dsupport; + + // Data parsed from NVDA VSDB - Variable Refresh Rate Monitor capabilities + NVDA_VSDB_PARSED_INFO nvdaVsdbInfo; + + // Data parsed from MSFT VSDB - HMD and Specialized (Direct display) Monitor capabilities + MSFT_VSDB_PARSED_INFO msftVsdbInfo; + + // HDR capability information from the HDR Metadata Data Block + NVT_HDR_STATIC_METADATA hdr_static_metadata_info; + + // DV capability information from the DV Metadata Data Block + NVT_DV_STATIC_METADATA dv_static_metadata_info; + + // HDMI LLC info + NVT_HDMI_LLC_INFO hdmiLlcInfo; + + // HDMI 2.0 information + NVT_HDMI_FORUM_INFO hdmiForumInfo; + // deprecating the following, please use hdmiForumInfo; + struct + { + NvU8 max_TMDS_char_rate; + NvU8 lte_340Mcsc_scramble :1; + NvU8 rr_capable :1; + NvU8 SCDC_present :1; + } hdmi_2_0_info; + + // the total edid extension(s) attached to the basic block + NvU32 total_extensions; + // the total displayid2 extension(s) attached to the basic block. + NvU32 total_did2_extensions; + + NvU8 checksum; + NvU8 checksum_ok; + + // extension info + NVT_EDID_CEA861_INFO ext861; + + // for the 2nd CEA/EIA861 extension + // note: "ext861" should really be an array but since it requires massive name change and it's hard + // to find more than one 861 extension in the real world, I made a trade off like this for now. + NVT_EDID_CEA861_INFO ext861_2; + + NVT_DISPLAYID_INFO ext_displayid; + NVT_DISPLAYID_2_0_INFO ext_displayid20; +} NVT_EDID_INFO; + +typedef enum +{ + NVT_PROTOCOL_UNKNOWN = 0, + NVT_PROTOCOL_DP = 1, + NVT_PROTOCOL_HDMI = 2, +} NVT_PROTOCOL; + +// the display interface/connector claimed by the EDID +#define NVT_EDID_INPUT_DIGITAL_UNDEFINED 0x00 // undefined digital interface +#define NVT_EDID_INPUT_DVI 0x01 +#define NVT_EDID_INPUT_HDMI_TYPE_A 0x02 +#define NVT_EDID_INPUT_HDMI_TYPE_B 0x03 +#define NVT_EDID_INPUT_MDDI 0x04 +#define NVT_EDID_INPUT_DISPLAY_PORT 0x05 + + +// the EDID extension TAG +#define NVT_EDID_EXTENSION_CTA 0x02 // CTA 861 series extensions +#define NVT_EDID_EXTENSION_VTB 0x10 // video timing block extension +#define NVT_EDID_EXTENSION_DI 0x40 // display information extension +#define NVT_EDID_EXTENSION_LS 0x50 // localized string extension +#define NVT_EDID_EXTENSION_DPVL 0x60 // digital packet video link extension +#define NVT_EDID_EXTENSION_DISPLAYID 0x70 // display id +#define NVT_EDID_EXTENSION_BM 0xF0 // extension block map +#define NVT_EDID_EXTENSION_OEM 0xFF // extension defined by the display manufacturer + +//************************************ +// Audio and Video Infoframe Control +//************************************ +// +// the control info for generating infoframe data +#define NVT_INFOFRAME_CTRL_DONTCARE 0xFF +// +typedef struct tagNVT_VIDEO_INFOFRAME_CTRL +{ + NvU8 color_space; + NvU8 active_format_info_present; + NvU8 bar_info; + NvU8 scan_info; + NvU8 colorimetry; + NvU8 pic_aspect_ratio; + NvU8 active_format_aspect_ratio; + NvU8 it_content; + NvU8 it_content_type; + NvU8 extended_colorimetry; + NvU8 rgb_quantization_range; + NvU8 nonuniform_scaling; + NvU8 video_format_id; + NvU8 pixel_repeat; + NvU16 top_bar; + NvU16 bottom_bar; + NvU16 left_bar; + NvU16 right_bar; +}NVT_VIDEO_INFOFRAME_CTRL; +// +typedef struct tagNVT_AUDIO_INFOFRAME_CTRL +{ + NvU8 coding_type; + NvU8 channel_count; + NvU8 sample_rate; + NvU8 sample_depth; + NvU8 speaker_placement; + NvU8 level_shift; + NvU8 down_mix_inhibit; +}NVT_AUDIO_INFOFRAME_CTRL; + +typedef struct tagNVT_VENDOR_SPECIFIC_INFOFRAME_CTRL +{ + NvU32 Enable; + NvU8 HDMIFormat; + NvU8 HDMI_VIC; + NvU8 ThreeDStruc; + NvU8 ThreeDDetail; + NvU8 MetadataPresent; + NvU8 MetadataType; + NvU8 Metadata[8]; // type determines length + +} NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL; +#define NVT_3D_METADTATA_TYPE_PARALAX 0x00 +#define NVT_3D_METADTATA_PARALAX_LEN 0x08 + +#define NVT_EXTENDED_METADATA_PACKET_INFOFRAME_VER_HDMI21 0x0 +#define NVT_EXTENDED_METADATA_PACKET_INFOFRAME_VER_HDMI21A 0x1 +typedef struct tagNVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL +{ + NvU32 version; // See #define NVT_EXTENDED_METADATA_PACKET_INFOFRAME_VER + NvU32 EnableVRR; + NvU32 ITTiming; + NvU32 BaseVFP; + NvU32 ReducedBlanking; + NvU32 BaseRefreshRate; +} NVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL; + +//*********************************** +// the actual Auido/Video Infoframe +//*********************************** +// +// info frame type code +#define NVT_INFOFRAME_TYPE_VENDOR_SPECIFIC 1 +#define NVT_INFOFRAME_TYPE_VIDEO 2 +#define NVT_INFOFRAME_TYPE_SOURCE_PRODUCT_DESCRIPTION 3 +#define NVT_INFOFRAME_TYPE_AUDIO 4 +#define NVT_INFOFRAME_TYPE_MPEG_SOURCE 5 +#define NVT_INFOFRAME_TYPE_SELF_REFRESH 6 +#define NVT_INFOFRAME_TYPE_DYNAMIC_RANGE_MASTERING 7 +#define NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET 8 +// +// +typedef struct tagNVT_INFOFRAME_HEADER +{ + NvU8 type; + NvU8 version; + NvU8 length; +}NVT_INFOFRAME_HEADER; + +typedef struct tagNVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER +{ + NvU8 type; + NvU8 firstLast; + NvU8 sequenceIndex; +} NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER; + +#define NVT_EMP_HEADER_FIRST 0x80 +#define NVT_EMP_HEADER_LAST 0x40 +#define NVT_EMP_HEADER_FIRST_LAST 0xC0 + +// SPD Infoframe +typedef struct tagNVT_SPD_INFOFRAME_PAYLOAD +{ + NvU8 vendorBytes[8]; + NvU8 productBytes[16]; + + NvU8 sourceInformation; + + // Since HDMI Library doesn't clear the rest of the bytes and checksum is calculated for all the 32 bytes : Temporary WAR + NvU8 paddingBytes[3]; + + +} NVT_SPD_INFOFRAME_PAYLOAD; + +typedef struct tagNVT_SPD_INFOFRAME +{ + NVT_INFOFRAME_HEADER Header; + NVT_SPD_INFOFRAME_PAYLOAD Data; +} NVT_SPD_INFOFRAME; + +// the video infoframe version 1-3 structure +typedef struct tagNVT_VIDEO_INFOFRAME +{ + NvU8 type; + NvU8 version; + NvU8 length; + + // byte 1~5 + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; + + // byte 6~13 + NvU8 top_bar_low; + NvU8 top_bar_high; + NvU8 bottom_bar_low; + NvU8 bottom_bar_high; + NvU8 left_bar_low; + NvU8 left_bar_high; + NvU8 right_bar_low; + NvU8 right_bar_high; + +}NVT_VIDEO_INFOFRAME; +// +#define NVT_VIDEO_INFOFRAME_VERSION_1 1 +#define NVT_VIDEO_INFOFRAME_VERSION_2 2 +#define NVT_VIDEO_INFOFRAME_VERSION_3 3 +#define NVT_VIDEO_INFOFRAME_VERSION_4 4 +// +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_MASK 0x03 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_OVERSCANNED 1 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_UNDERSCANNED 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_FUTURE 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_MASK 0x0C +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_SHIFT 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_NOT_VALID 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_VERT_VALID 1 +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_HORIZ_VALID 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_H_V_VALID 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE1_A0_MASK 0x10 // active format info present +#define NVT_VIDEO_INFOFRAME_BYTE1_A0_SHIFT 4 // active format info present +#define NVT_VIDEO_INFOFRAME_BYTE1_A0_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_A0_VALID 1 +// +// CTA-861G new requirement - DD changed this policy +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2_MASK 8 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_MASK 0xE0 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_SHIFT 0x5 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_RGB 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_YCbCr422 1 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_YCbCr444 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_YCbCr420 3 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_FUTURE 3 // nvlEscape still uses this lline 4266 +// CEA-861-F - Unix still used this one +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_MASK 0x60 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_SHIFT 0x5 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_RGB 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr422 1 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr444 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr420 3 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_FUTURE 3 // nvlEscape still uses this lline 4266 +// +#define NVT_VIDEO_INFOFRAME_BYTE1_RESERVED_MASK 0x80 // for Inforframe V1 / V2 +#define NVT_VIDEO_INFOFRAME_BYTE1_RESERVED_SHIFT 7 +// +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_MASK 0x0F // active format aspect ratio +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_SAME_AS_M1M0 8 +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_4X3_CENTER 9 +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_16X9_CENTER 10 +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_14x9_CENTER 11 +// +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_MASK 0x30 // picture aspect ratio +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_SHIFT 4 // picture aspect ratio +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_4X3 1 +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_16X9 2 +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_FUTURE 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_MASK 0xC0 // colorimetry +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SHIFT 6 +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SMPTE170M_ITU601 1 +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_ITU709 2 +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_EXT_COLORIMETRY 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_MASK 0x03 // non-uniform scaling +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_NONE 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_HORIZ_SCALED 1 +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_VERT_SCALED 2 +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_H_V_SCALED 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_MASK 0x0C // quantization +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_SHIFT 2 +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_DEFAULT 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_LIMITED_RANGE 1 +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_FULL_RANGE 2 +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_RESERVED 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_MASK 0x70 // extended colorimetry +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_SHIFT 4 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_xvYCC_601 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_xvYCC_709 1 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_sYCC_601 2 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_AdobeYCC_601 3 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_AdobeRGB 4 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_BT2020cYCC 5 // CEA-861-F define it as "ITU-R BT.2020 YcCbcCrc" at Table 12 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_BT2020RGBYCC 6 // CEA-861-F define it as "ITU-R BT.2020 YcCbCr" at Table 12 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_RESERVED7 7 // CEA-861-F define it as "Reserved" at Table 12 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_AdditionalColorExt 7 // CTA-861-G define it as "Additional Colorimtry Ext Info Valid" at Table_13 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_ITC_MASK 0x80 // IT content +#define NVT_VIDEO_INFOFRAME_BYTE3_ITC_SHIFT 7 +#define NVT_VIDEO_INFOFRAME_BYTE3_ITC_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_ITC_IT_CONTENT 1 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_RESERVED_V1_MASK 0x60 // reserved +#define NVT_VIDEO_INFOFRAME_BYTE3_RESERVED_V1_SHIFT 5 +// +#define NVT_VIDEO_INFOFRAME_BYTE4_VIC_MASK 0xFF // video identification code +#define NVT_VIDEO_INFOFRAME_BYTE4_VIC_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE4_VIC7 0x80 +// +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V3_MASK 0x00 +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V3_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V2_MASK 0x80 +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V2_SHIFT 7 +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V1_MASK 0xFF +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V1_SHIFT 0 +// +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_MASK 0x0F // pixel repetitions +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_NO_PEP 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_2X 1 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_3X 2 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_4X 3 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_5X 4 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_6X 5 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_7X 6 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_8X 7 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_9X 8 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_10X 9 +// +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_MASK 0x30 // Content Information +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_SHIFT 4 +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_NODATA 0 // ITC = 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_GRAPHICS 0 // ITC = 1 +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_PHOTO 1 // ITC = don't care +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_CINEMA 2 // ITC = don't care +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_GAME 3 // ITC = don't care + +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_MASK 0xC0 // YCC quantization +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_SHIFT 6 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_LIMITED_RANGE 1 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_FULL_RANGE 2 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_RESERVED3 3 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_RESERVED4 4 +// +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_MASK 0xc0 // content type +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_SHIFT 6 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_LIMITED 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_FULL 1 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_RSVD1 2 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_RSVD2 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V2_MASK 0x00 +#define NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V2_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V1_MASK 0xFF +#define NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V1_SHIFT 0 +// +#define NVT_VIDEO_INFOFRAME_BYTE14_RESERVED_V4_MASK 0xF0 +#define NVT_VIDEO_INFOFRAME_BYTE14_RESERVED_V4_SHIFT 4 +#define NVT_VIDEO_INFOFRAME_BYTE14_ACE0_0 0 +#define NVT_VIDEO_INFOFRAME_BYTE14_ACE0_1 1 +// +#define NVT_VIDEO_INFOFRAME_BYTE14_RESERVED_MASK 0x0F +#define NVT_VIDEO_INFOFRAME_BYTE14_RESERVED_SHIFT 0 +// +#define NVT_VIDEO_INFOFRAME_CONTENT_VIDEO 0 +#define NVT_VIDEO_INFOFRAME_CONTENT_GRAPHICS 1 +#define NVT_VIDEO_INFOFRAME_CONTENT_PHOTO 2 +#define NVT_VIDEO_INFOFRAME_CONTENT_CINEMA 3 +#define NVT_VIDEO_INFOFRAME_CONTENT_GAME 4 +#define NVT_VIDEO_INFOFRAME_CONTENT_LAST 4 + +#pragma pack(1) +typedef struct +{ + // byte 1 + struct + { + NvU8 scanInfo : 2; + NvU8 barInfo : 2; + NvU8 activeFormatInfoPresent : 1; + NvU8 colorSpace : 2; + NvU8 rsvd_bits_byte1 : 1; + } byte1; + + // byte 2 + struct + { + NvU8 activeFormatAspectRatio : 4; + NvU8 picAspectRatio : 2; + NvU8 colorimetry : 2; + } byte2; + + // byte 3 + struct + { + NvU8 nonuniformScaling : 2; + NvU8 rgbQuantizationRange : 2; + NvU8 extendedColorimetry : 3; + NvU8 itContent : 1; + } byte3; + + // byte 4 + struct + { + NvU8 vic : 7; + NvU8 rsvd_bits_byte4 : 1; + } byte4; + + // byte 5 + struct + { + NvU8 pixelRepeat : 4; + NvU8 contentTypes : 2; + NvU8 yccQuantizationRange : 2; + } byte5; + + NvU16 topBar; + NvU16 bottomBar; + NvU16 leftBar; + NvU16 rightBar; +} NVT_VIDEO_INFOFRAME_OVERRIDE; +#pragma pack() + +typedef struct +{ + NvU32 vic : 8; + NvU32 pixelRepeat : 5; + NvU32 colorSpace : 3; + NvU32 colorimetry : 3; + NvU32 extendedColorimetry : 4; + NvU32 rgbQuantizationRange : 3; + NvU32 yccQuantizationRange : 3; + NvU32 itContent : 2; + NvU32 contentTypes : 3; + NvU32 scanInfo : 3; + NvU32 activeFormatInfoPresent : 2; + NvU32 activeFormatAspectRatio : 5; + NvU32 picAspectRatio : 3; + NvU32 nonuniformScaling : 3; + NvU32 barInfo : 3; + NvU32 top_bar : 17; + NvU32 bottom_bar : 17; + NvU32 left_bar : 17; + NvU32 right_bar : 17; + NvU32 Future17 : 2; + NvU32 Future47 : 2; +} NVT_INFOFRAME_VIDEO; + + +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_S1S0_MASK 0x3 +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_B1B0_MASK 0x3 +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_A0_MASK 0x1 // active format info present +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_Y1Y0_MASK 0x3 +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_Y2Y1Y0_MASK 0x7 +// +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE2_R3R2R1R0_MASK 0xF // active format aspect ratio +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE2_M1M0_MASK 0x3 // picture aspect ratio +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE2_C1C0_MASK 0x3 // colorimetry +// +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE3_SC_MASK 0x3 // non-uniform scaling +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE3_Q1Q0_MASK 0x3 // quantization +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE3_EC_MASK 0x7 // extended colorimetry +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE3_ITC_MASK 0x1 // IT content +// +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE4_VIC_MASK 0x7F // video identification code +// +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE5_PR_MASK 0xF // pixel repetitions +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE5_CN1CN0_MASK 0x3 +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE5_YQ1YQ0_MASK 0x3 // YCC quantization + +// audio infoframe structure +typedef struct tagNVT_AUDIO_INFOFRAME +{ + NvU8 type; + NvU8 version; + NvU8 length; + + // byte 1~5 + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; + + // byte 6~10 + NvU8 rsvd_byte6; + NvU8 rsvd_byte7; + NvU8 rsvd_byte8; + NvU8 rsvd_byte9; + NvU8 rsvd_byte10; + +}NVT_AUDIO_INFOFRAME; + +// self refresh infoframe structure. See SR spec. +typedef struct tagNVT_SR_INFOFRAME +{ + NvU8 type; + NvU8 version; + NvU8 length; + + NvU8 data; + +}NVT_SR_INFOFRAME; + +// +#define NVT_AUDIO_INFOFRAME_VERSION_1 1 +// +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_MASK 0x07 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_REF_HEADER 0 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_2CH 1 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_DO_NOT_USE 2 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_4CH 3 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_5CH 4 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_6CH 5 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_7CH 6 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_8CH 7 +// +#define NVT_AUDIO_INFOFRAME_BYTE1_RESERVED_MASK 0x08 +#define NVT_AUDIO_INFOFRAME_BYTE1_RESERVED_SHIFT 3 +// +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_MASK 0xF0 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_SHIFT 4 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_REF_HEADER 0 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_PCM 1 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_DO_NOT_USE 2 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_MPEG1 3 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_MP3 4 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_MPEG2 5 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_AAC 6 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_DTS 7 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_USE_CODING_EXTENSION_TYPE 15 +// +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_MASK 0x3 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_REF_HEADER 0 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_16BIT 1 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_20BIT 2 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_24BIT 3 +// +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_MASK 0x1C +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_SHIFT 2 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_HEADER 0 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_32KHz 1 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_44KHz 2 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_48KHz 3 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_88KHz 4 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_96KHz 5 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_176KHz 6 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_192KHz 7 +// +#define NVT_AUDIO_INFOFRAME_BYTE2_RESERVED_MASK 0xE0 +#define NVT_AUDIO_INFOFRAME_BYTE2_RESERVED_SHIFT 5 +// +#define NVT_AUDIO_INFOFRAME_BYTE3_CXT_MASK 0x1F +#define NVT_AUDIO_INFOFRAME_BYTE3_CXT_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE3_CXT_RESERVE31 31 +// +#define NVT_AUDIO_INFOFRAME_BYTE3_RESERVED_MASK 0xE0 +#define NVT_AUDIO_INFOFRAME_BYTE3_RESERVED_SHIFT 5 +// +#define NVT_AUDIO_INFOFRAME_BYTE4_CA_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE4_CA_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE4_CA_FRW_FLW_RR_RL_FC_LFE_FR_FL 49 +// +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_MASK 0x03 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_NO_DATA 0 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_0DB 1 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_PLUS10DB 2 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_RESERVED03 3 +// +#define NVT_AUDIO_INFOFRAME_BYTE5_RESERVED_MASK 0x4 +#define NVT_AUDIO_INFOFRAME_BYTE5_RESERVED_SHIFT 2 +// +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_MASK 0x78 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_SHIFT 3 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_0dB 0 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_1dB 1 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_2dB 2 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_3dB 3 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_4dB 4 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_5dB 5 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_6dB 6 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_7dB 7 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_8dB 8 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_9dB 9 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_10dB 10 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_11dB 11 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_12dB 12 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_13dB 13 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_14dB 14 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_15dB 15 +// +#define NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_MASK 0x80 +#define NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_SHIFT 7 +#define NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_PERMITTED 0 +#define NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_PROHIBITED 1 +// +#define NVT_AUDIO_INFOFRAME_BYTE6_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE6_RESERVED_SHIFT 0 +// +// +#define NVT_AUDIO_INFOFRAME_BYTE7_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE7_RESERVED_SHIFT 0 +// +/// +#define NVT_AUDIO_INFOFRAME_BYTE8_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE8_RESERVED_SHIFT 0 +// +// +#define NVT_AUDIO_INFOFRAME_BYTE9_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE9_RESERVED_SHIFT 0 +// +// +#define NVT_AUDIO_INFOFRAME_BYTE10_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE10_RESERVED_SHIFT 0 +// + +typedef struct +{ + // byte 1 + struct + { + NvU8 channelCount : 3; + NvU8 rsvd_bits_byte1 : 1; + NvU8 codingType : 4; + } byte1; + + // byte 2 + struct + { + NvU8 sampleSize : 2; + NvU8 sampleRate : 3; + NvU8 rsvd_bits_byte2 : 3; + } byte2; + + + // byte 3 + struct + { + NvU8 codingExtensionType : 5; + NvU8 rsvd_bits_byte3 : 3; + } byte3; + + // byte 4 + NvU8 speakerPlacement; + + // byte 5 + struct + { + NvU8 lfePlaybackLevel : 2; + NvU8 rsvd_bits_byte5 : 1; + NvU8 levelShift : 4; + NvU8 downmixInhibit : 1; + } byte5; + + // byte 6~10 + NvU8 rsvd_byte6; + NvU8 rsvd_byte7; + NvU8 rsvd_byte8; + NvU8 rsvd_byte9; + NvU8 rsvd_byte10; +} NVT_AUDIO_INFOFRAME_OVERRIDE; + +typedef struct +{ + NvU32 codingType : 5; + NvU32 codingExtensionType : 6; + NvU32 sampleSize : 3; + NvU32 sampleRate : 4; + NvU32 channelCount : 4; + NvU32 speakerPlacement : 9; + NvU32 downmixInhibit : 2; + NvU32 lfePlaybackLevel : 3; + NvU32 levelShift : 5; + NvU32 Future12 : 2; + NvU32 Future2x : 4; + NvU32 Future3x : 4; + NvU32 Future52 : 2; + NvU32 Future6 : 9; + NvU32 Future7 : 9; + NvU32 Future8 : 9; + NvU32 Future9 : 9; + NvU32 Future10 : 9; +} NVT_INFOFRAME_AUDIO; + +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE1_CC_MASK 0x07 +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE1_CT_MASK 0x0F +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE2_SS_MASK 0x03 +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE2_SF_MASK 0x03 +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE3_CXT_MASK 0x1F +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE4_CA_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE5_LFEPBL_MASK 0x03 +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE5_LSV_MASK 0x0F +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE5_DM_INH_MASK 0x01 + +// +// HDMI 1.3a GCP, ColorDepth +// +#define NVT_HDMI_COLOR_DEPTH_DEFAULT 0x0 +#define NVT_HDMI_COLOR_DEPTH_RSVD1 0x1 +#define NVT_HDMI_COLOR_DEPTH_RSVD2 0x2 +#define NVT_HDMI_COLOR_DEPTH_RSVD3 0x3 +#define NVT_HDMI_COLOR_DEPTH_24 0x4 +#define NVT_HDMI_COLOR_DEPTH_30 0x5 +#define NVT_HDMI_COLOR_DEPTH_36 0x6 +#define NVT_HDMI_COLOR_DEPTH_48 0x7 +#define NVT_HDMI_COLOR_DEPTH_RSVD8 0x8 +#define NVT_HDMI_COLOR_DEPTH_RSVD9 0x9 +#define NVT_HDMI_COLOR_DEPTH_RSVD10 0xA +#define NVT_HDMI_COLOR_DEPTH_RSVD11 0xB +#define NVT_HDMI_COLOR_DEPTH_RSVD12 0xC +#define NVT_HDMI_COLOR_DEPTH_RSVD13 0xD +#define NVT_HDMI_COLOR_DEPTH_RSVD14 0xE +#define NVT_HDMI_COLOR_DEPTH_RSVD15 0xF + +// HDMI 1.3a GCP, PixelPacking Phase +#define NVT_HDMI_PIXELPACKING_PHASE4 0x0 +#define NVT_HDMI_PIXELPACKING_PHASE1 0x1 +#define NVT_HDMI_PIXELPACKING_PHASE2 0x2 +#define NVT_HDMI_PIXELPACKING_PHASE3 0x3 +#define NVT_HDMI_PIXELPACKING_RSVD4 0x4 +#define NVT_HDMI_PIXELPACKING_RSVD5 0x5 +#define NVT_HDMI_PIXELPACKING_RSVD6 0x6 +#define NVT_HDMI_PIXELPACKING_RSVD7 0x7 +#define NVT_HDMI_PIXELPACKING_RSVD8 0x8 +#define NVT_HDMI_PIXELPACKING_RSVD9 0x9 +#define NVT_HDMI_PIXELPACKING_RSVD10 0xA +#define NVT_HDMI_PIXELPACKING_RSVD11 0xB +#define NVT_HDMI_PIXELPACKING_RSVD12 0xC +#define NVT_HDMI_PIXELPACKING_RSVD13 0xD +#define NVT_HDMI_PIXELPACKING_RSVD14 0xE +#define NVT_HDMI_PIXELPACKING_RSVD15 0xF + +#define NVT_HDMI_RESET_DEFAULT_PIXELPACKING_PHASE 0x0 +#define NVT_HDMI_SET_DEFAULT_PIXELPACKING_PHASE 0x1 + +#define NVT_HDMI_GCP_SB1_CD_SHIFT 0 +#define NVT_HDMI_GCP_SB1_PP_SHIFT 4 + + +// Vendor specific info frame (HDMI 1.4 specific) +typedef struct tagNVT_VENDOR_SPECIFIC_INFOFRAME_PAYLOAD +{ + // byte 1~5 + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; + NvU8 optionalBytes[22]; +}NVT_VENDOR_SPECIFIC_INFOFRAME_PAYLOAD; +typedef struct tagNVT_VENDOR_SPECIFIC_INFOFRAME +{ + NVT_INFOFRAME_HEADER Header; + NVT_VENDOR_SPECIFIC_INFOFRAME_PAYLOAD Data; +} NVT_VENDOR_SPECIFIC_INFOFRAME; +// +#define NVT_HDMI_VS_INFOFRAME_VERSION_1 1 + +// +#define NVT_HDMI_VS_BYTE4_RSVD_MASK 0x1f +#define NVT_HDMI_VS_BYTE4_RSVD_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_MASK 0xe0 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_SHIFT 0x05 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_NONE 0x00 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_EXT 0x01 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_3D 0x02 +// 0x03-0x07 reserved +// +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_MASK 0xff // HDMI_VID_FMT = HDMI_VID_FMT_EXT +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_NA 0xfe +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_RSVD 0x00 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx30Hz 0x01 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx25Hz 0x02 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx24Hz 0x03 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx24Hz_SMPTE 0x04 +// 0x05-0xff reserved +// +#define NVT_HDMI_VS_BYTE5_HDMI_RSVD_MASK 0x07 // HDMI_VID_FMT = HDMI_VID_FMT_3D +#define NVT_HDMI_VS_BYTE5_HDMI_RSVD_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK 0x01 +#define NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT 0x03 +#define NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_NOTPRES 0x00 // HDMI Metadata is not present +#define NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_PRES 0x01 // HDMI Metadata is present +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_MASK 0xf0 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_SHIFT 0x04 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_NA 0xfe +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK 0x00 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_FIELD_ALT 0x01 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_LINE_ALT 0x02 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEFULL 0x03 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTH 0x04 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTHGFX 0x05 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM 0x06 +//0x06-0x07 reserved +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF 0x08 +//0x09-0x0f reserved +// +// bytes 6-21 are optional depending on the 3D mode & the presence/abcense of metadata +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_RSVD_MASK 0x0f // HDMI_VID_FMT = HDMI_VID_FMT_3D +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_RSVD_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_MASK 0xf0 +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SHIFT 0x04 +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_NA 0xfe // Extended data is not applicable +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH 0x01 // Horizontal subsampling 1.4a defines a single subsampling vs 1.4s 4. +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_H_OL_OR 0x00 // Horizontal subsampling Odd Left Odd Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_H_OL_ER 0x01 // Horizontal subsampling Odd Left Even Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_H_EL_OR 0x02 // Horizontal subsampling Even Left Odd Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_H_EL_ER 0x03 // Horizontal subsampling Even Left Even Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_Q_OL_OR 0x04 // Quincunx matrix Odd Left Odd Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_Q_OL_ER 0x05 // Quincunx matrix Odd Left Even Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_Q_EL_OR 0x06 // Quincunx matrix Even Left Odd Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_Q_EL_ER 0x07 // Quincunx matrix Even Left Even Right +//0x08-0x0f reserved +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_MASK 0xf0 // HDMI_VID_FMT = HDMI_VID_FMT_3D; HDMI_3D_META_PRESENT = 1 +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_SHIFT 0x04 // +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_NONE 0x00 // length of no metadata +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX 0x08 // length of paralax data + +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_MASK 0x0f // HDMI_VID_FMT = HDMI_VID_FMT_3D +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_PARALLAX 0x00 // parallax metadata in the frame +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_NA 0xfe // no metadata in the frame + +#define NVT_HDMI_VS_BYTENv_RSVD_MASK 0xff // if last byte of infoframe, will move depending on HDMI_VID_FMT, 3D metadata present, 3D_Metadata type. +#define NVT_HDMI_VS_BYTENv_RSVD_SHIFT 0x00 +#define NVT_HDMI_VS_BYTENv_RSVD 0x00 + + +// Extended Metadata Packet (HDMI 2.1 specific) +typedef struct tagNVT_EXTENDED_METADATA_PACKET_INFOFRAME_PAYLOAD +{ + // byte 1~7 + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; + NvU8 byte6; + NvU8 byte7; + + NvU8 metadataBytes[21]; +} NVT_EXTENDED_METADATA_PACKET_INFOFRAME_PAYLOAD; + +typedef struct tagNVT_EXTENDED_METADATA_PACKET_INFOFRAME +{ + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER Header; + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_PAYLOAD Data; +} NVT_EXTENDED_METADATA_PACKET_INFOFRAME; + +#define NVT_HDMI_EMP_BYTE1_RSVD_MASK 0x01 +#define NVT_HDMI_EMP_BYTE1_RSVD_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE1_SYNC_MASK 0x02 +#define NVT_HDMI_EMP_BYTE1_SYNC_SHIFT 1 +#define NVT_HDMI_EMP_BYTE1_SYNC_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_SYNC_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE1_VFR_MASK 0x04 +#define NVT_HDMI_EMP_BYTE1_VFR_SHIFT 2 +#define NVT_HDMI_EMP_BYTE1_VFR_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_VFR_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE1_AFR_MASK 0x08 +#define NVT_HDMI_EMP_BYTE1_AFR_SHIFT 3 +#define NVT_HDMI_EMP_BYTE1_AFR_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_AFR_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_MASK 0x30 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_SHIFT 4 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_PERIODIC_PSEUDO_STATIC 0 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_PERIODIC_DYNAMIC 1 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_UNIQUE 2 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_RSVD 3 + +#define NVT_HDMI_EMP_BYTE1_END_MASK 0x40 +#define NVT_HDMI_EMP_BYTE1_END_SHIFT 6 +#define NVT_HDMI_EMP_BYTE1_END_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_END_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE1_NEW_MASK 0x80 +#define NVT_HDMI_EMP_BYTE1_NEW_SHIFT 7 +#define NVT_HDMI_EMP_BYTE1_NEW_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_NEW_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE2_RSVD_MASK 0xff +#define NVT_HDMI_EMP_BYTE2_RSVD_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_MASK 0xff +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_SHIFT 0 +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_VENDOR_SPECIFIC 0 +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_SPEC_DEFINED 1 +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_CTA_DEFINED 2 +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_VESA_DEFINED 3 + +#define NVT_HDMI_EMP_BYTE4_DATA_SET_TAG_MSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE4_DATA_SET_TAG_MSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE5_DATA_SET_TAG_LSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE5_DATA_SET_TAG_LSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE6_DATA_SET_LENGTH_MSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE6_DATA_SET_LENGTH_MSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE7_DATA_SET_LENGTH_LSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE7_DATA_SET_LENGTH_LSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_MASK 0x01 +#define NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_SHIFT 0 +#define NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_DISABLE 0 +#define NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE8_MD1_BASE_VFRONT_MASK 0xff +#define NVT_HDMI_EMP_BYTE8_MD1_BASE_VFRONT_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE8_MD2_RB_MASK 0x04 +#define NVT_HDMI_EMP_BYTE8_MD2_RB_SHIFT 2 +#define NVT_HDMI_EMP_BYTE8_MD2_RB_DISABLE 0 +#define NVT_HDMI_EMP_BYTE8_MD2_RB_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE8_MD2_BASE_RR_MSB_MASK 0x03 +#define NVT_HDMI_EMP_BYTE8_MD2_BASE_RR_MSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE8_MD3_BASE_RR_LSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE8_MD3_BASE_RR_LSB_SHIFT 0 + + + +// the Vendor-Specific-Data-Block header +typedef struct tagNVT_CEA861_VSDB_HEADER +{ + // byte 0 + NvU32 length : 5; + NvU32 vendorSpecificTag : 3; + // byte 1-3 + NvU32 ieee_id : 24; + +} NVT_CEA861_VSDB_HEADER; + +// HDMI LLC Vendor-Specific data block +// from HDMI 1.4 spec (superset of VSDB from HDMI 1.3a spec) +typedef struct tagNVT_CEA861_LATENCY +{ + NvU8 Video_Latency; + NvU8 Audio_Latency; + +} NVT_CEA861_LATENCY; + +typedef struct tagNVT_HDMI_VIDEO +{ + NvU8 Rsvd_1 : 3; + NvU8 ImageSize : 2; + NvU8 ThreeD_Multi_Present : 2; + NvU8 ThreeD_Present : 1; + NvU8 HDMI_3D_Len : 5; + NvU8 HDMI_VIC_Len : 3; +} NVT_HDMI_VIDEO; + +typedef struct tagNVT_HDMI_VIC_LIST +{ + NvU8 HDMI_VIC[1]; // note: list length is actually specified in HDMI_VIC_Len +} NVT_HDMI_VIC_LIST; + +typedef struct tagNVT_3D_STRUCT_ALL +{ + NvU8 ThreeDStructALL0_FramePacking : 1; + NvU8 ThreeDStructALL1_FieldAlt : 1; + NvU8 ThreeDStructALL2_LineAlt : 1; + NvU8 ThreeDStructALL3_SSFull : 1; + NvU8 ThreeDStructALL4_LDepth : 1; + NvU8 ThreeDStructALL5_LDepthGFX : 1; + NvU8 ThreeDStructALL6_TopBottom : 1; + NvU8 ThreeDStructALL7 : 1; + NvU8 ThreeDStructALL8_SSHalf : 1; + NvU8 Rsvd_1 : 7; +} NVT_3D_STRUCT_ALL; + +typedef struct tagNVT_3D_MULTI_LIST +{ + NvU8 ThreeD_Structure : 4; + NvU8 TwoD_VIC_order : 4; + NvU8 Rsvd_2 : 4; + NvU8 ThreeD_Detail : 4; +} NVT_3D_MULTI_LIST; + +#define NVT_3D_DETAILS_ALL 0x00 +#define NVT_3D_DETAILS_ALL_HORIZONTAL 0x01 +#define NVT_3D_DETAILS_HORIZONTAL_ODD_LEFT_ODD_RIGHT 0x02 +#define NVT_3D_DETAILS_HORIZONTAL_ODD_LEFT_EVEN_RIGHT 0x03 +#define NVT_3D_DETAILS_HORIZONTAL_EVEN_LEFT_ODD_RIGHT 0x04 +#define NVT_3D_DETAILS_HORIZONTAL_EVEN_LEFT_EVEN_RIGHT 0x05 +#define NVT_3D_DETAILS_ALL_QUINCUNX 0x06 +#define NVT_3D_DETAILS_QUINCUNX_ODD_LEFT_ODD_RIGHT 0x07 +#define NVT_3D_DETAILS_QUINCUNX_ODD_LEFT_EVEN_RIGHT 0x08 +#define NVT_3D_DETAILS_QUINCUNX_EVEN_LEFT_ODD_RIGHT 0x09 +#define NVT_3D_DETAILS_QUINCUNX_EVEN_LEFT_EVEN_RIGHT 0x0a + +typedef struct tagNVT_HDMI_LLC_VSDB_PAYLOAD +{ + // 1st byte + NvU8 B : 4; + NvU8 A : 4; + // 2nd byte + NvU8 D : 4; + NvU8 C : 4; + // 3rd byte + NvU8 DVI_Dual : 1; + NvU8 Rsvd_3 : 2; + NvU8 DC_Y444 : 1; + NvU8 DC_30bit : 1; + NvU8 DC_36bit : 1; + NvU8 DC_48bit : 1; + NvU8 Supports_AI : 1; + // 4th byte + NvU8 Max_TMDS_Clock; + // 5th byte + NvU8 CNC0 : 1; + NvU8 CNC1 : 1; + NvU8 CNC2 : 1; + NvU8 CNC3 : 1; + NvU8 Rsvd_5 : 1; + NvU8 HDMI_Video_present : 1; + NvU8 I_Latency_Fields_Present : 1; + NvU8 Latency_Fields_Present : 1; + + // the rest of the frame may contain optional data as defined + // in the NVT_CEA861_LATENCY, HDMI_VIDEO, HDMI_VIC, NVT_3D_STRUCT_ALL & 3D_MULTI_LIST structures + // and as specified by the corresponding control bits + NvU8 Data[NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH - 5]; + +} NVT_HDMI_LLC_VSDB_PAYLOAD; + +// HDMI LLC Vendor Specific Data Block +typedef struct tagNVT_HDMI_LLC_DATA +{ + NVT_CEA861_VSDB_HEADER header; + NVT_HDMI_LLC_VSDB_PAYLOAD payload; +} NVT_HDMI_LLC_DATA; + +typedef struct tagNVT_NVDA_VSDB_PAYLOAD +{ + NvU8 opcode; // Nvidia specific opcode - please refer to VRR monitor spec v5 + NvU8 vrrMinRefreshRate; // Minimum refresh rate supported by this monitor +} NVT_NVDA_VSDB_PAYLOAD; + +// NVIDIA Vendor Specific Data Block +typedef struct tagNVT_NVDA_VSDB_DATA +{ + NVT_CEA861_VSDB_HEADER header; + NVT_NVDA_VSDB_PAYLOAD payload; +} NVT_NVDA_VSDB_DATA; + +typedef struct _NVT_MSFT_VSDB_PAYLOAD +{ + NvU8 version; + NvU8 primaryUseCase : 5; + NvU8 thirdPartyUsage : 1; + NvU8 desktopUsage : 1; + NvU8 reserved : 1; + NvU8 containerId[MSFT_VSDB_CONTAINER_ID_SIZE]; +} NVT_MSFT_VSDB_PAYLOAD; + +typedef struct _NVT_MSFT_VSDB_DATA +{ + NVT_CEA861_VSDB_HEADER header; + NVT_MSFT_VSDB_PAYLOAD payload; +} NVT_MSFT_VSDB_DATA; + +#define NVT_MSFT_VSDB_BLOCK_SIZE (sizeof(NVT_MSFT_VSDB_DATA)) + +typedef struct tagNVT_HDMI_FORUM_VSDB_PAYLOAD +{ + // first byte + NvU8 Version; + // second byte + NvU8 Max_TMDS_Character_Rate; + // third byte + NvU8 ThreeD_Osd_Disparity : 1; + NvU8 Dual_View : 1; + NvU8 Independent_View : 1; + NvU8 Lte_340mcsc_Scramble : 1; + NvU8 CCBPCI : 1; + NvU8 CABLE_STATUS : 1; + NvU8 RR_Capable : 1; + NvU8 SCDC_Present : 1; + // fourth byte + NvU8 DC_30bit_420 : 1; + NvU8 DC_36bit_420 : 1; + NvU8 DC_48bit_420 : 1; + NvU8 UHD_VIC : 1; + NvU8 Max_FRL_Rate : 4; + // fifth byte + NvU8 FAPA_start_location : 1; + NvU8 ALLM : 1; + NvU8 FVA : 1; + NvU8 CNMVRR : 1; + NvU8 CinemaVRR : 1; + NvU8 M_delta : 1; + NvU8 Rsvd_2 : 1; + NvU8 FAPA_End_Extended : 1; + + // sixth byte + NvU8 VRR_min : 6; + NvU8 VRR_max_high : 2; + // seventh byte + NvU8 VRR_max_low : 8; + // eighth byte + NvU8 DSC_10bpc : 1; + NvU8 DSC_12bpc : 1; + NvU8 DSC_16bpc : 1; + NvU8 DSC_All_bpp : 1; + NvU8 Rsvd_3 : 2; + NvU8 DSC_Native_420 : 1; + NvU8 DSC_1p2 : 1; + // ninth byte + NvU8 DSC_MaxSlices : 4; + NvU8 DSC_Max_FRL_Rate : 4; + // tenth byte + NvU8 DSC_totalChunkKBytes : 6; + NvU8 Rsvd_4 : 2; +} NVT_HDMI_FORUM_VSDB_PAYLOAD; + +// HDMI Forum Vendor Specific Data Block +typedef struct tagNVT_HDMI_FORUM_DATA +{ + NVT_CEA861_VSDB_HEADER header; + NVT_HDMI_FORUM_VSDB_PAYLOAD payload; +} NVT_HDMI_FORUM_DATA; + +// +// +// Video Capability Data Block (VCDB) +typedef struct _NV_ESC_MONITOR_CAPS_VCDB +{ + NvU8 quantizationRangeYcc : 1; + NvU8 quantizationRangeRgb : 1; + NvU8 scanInfoPreferredVideoFormat : 2; + NvU8 scanInfoITVideoFormats : 2; + NvU8 scanInfoCEVideoFormats : 2; +} NVT_HDMI_VCDB_DATA; + +// +// +//*********************************************************** +// Dynamic Range and Mastering Infoframe (HDR) +//*********************************************************** +// +typedef struct tagNVT_HDR_INFOFRAME_MASTERING_DATA +{ + NvU16 displayPrimary_x0; //!< x coordinate of color primary 0 (e.g. Red) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + NvU16 displayPrimary_y0; //!< y coordinate of color primary 0 (e.g. Red) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + + NvU16 displayPrimary_x1; //!< x coordinate of color primary 1 (e.g. Green) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + NvU16 displayPrimary_y1; //!< y coordinate of color primary 1 (e.g. Green) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + + NvU16 displayPrimary_x2; //!< x coordinate of color primary 2 (e.g. Blue) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + NvU16 displayPrimary_y2; //!< y coordinate of color primary 2 (e.g. Blue) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + + NvU16 displayWhitePoint_x; //!< x coordinate of white point of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + NvU16 displayWhitePoint_y; //!< y coordinate of white point of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + + NvU16 max_display_mastering_luminance; //!< Maximum display mastering luminance ([0x0001-0xFFFF] = [1.0 - 65535.0] cd/m^2) + NvU16 min_display_mastering_luminance; //!< Minimum display mastering luminance ([0x0001-0xFFFF] = [1.0 - 6.55350] cd/m^2) + + NvU16 max_content_light_level; //!< Maximum Content Light level (MaxCLL) ([0x0001-0xFFFF] = [1.0 - 65535.0] cd/m^2) + NvU16 max_frame_average_light_level; //!< Maximum Frame-Average Light Level (MaxFALL) ([0x0001-0xFFFF] = [1.0 - 65535.0] cd/m^2) +} NVT_HDR_INFOFRAME_MASTERING_DATA; + +#define NVT_CEA861_HDR_INFOFRAME_EOTF_SDR_GAMMA 0 //SDR Luminance Range +#define NVT_CEA861_HDR_INFOFRAME_EOTF_HDR_GAMMA 1 //HDR Luminance Range +#define NVT_CEA861_HDR_INFOFRAME_EOTF_ST2084 2 +#define NVT_CEA861_HDR_INFOFRAME_EOTF_Future 3 +#define NVT_CEA861_STATIC_METADATA_TYPE1_PRIMARY_COLOR_NORMALIZE_FACTOR 0xC350 // Per CEA-861.3 spec + +typedef struct tagNVT_HDR_INFOFRAME_PAYLOAD +{ + //byte 1 + NvU8 eotf : 3; + NvU8 f13_17 : 5; // These bits are reserved for future use + //byte 2 + NvU8 static_metadata_desc_id : 3; + NvU8 f23_27 : 5; // These bits are reserved for future use + + NVT_HDR_INFOFRAME_MASTERING_DATA type1; +} NVT_HDR_INFOFRAME_PAYLOAD; + +#pragma pack(1) +typedef struct tagNVT_HDR_INFOFRAME +{ + NVT_INFOFRAME_HEADER header; + NVT_HDR_INFOFRAME_PAYLOAD payload; +} NVT_HDR_INFOFRAME; +#pragma pack() + +// +// +//*********************************************************** +// Gamut Metadata Range and Vertices structures +//*********************************************************** +// +// GBD structure formats +// +#define NVT_GAMUT_FORMAT_VERTICES 0 +#define NVT_GAMUT_FORMAT_RANGE 1 + +typedef struct tagNVT_GAMUT_HEADER +{ + NvU8 type:8; + + // byte 1 + NvU8 AGSNum:4; + NvU8 GBD_profile:3; + NvU8 Next_Field:1; + + // byte 2 + NvU8 CGSNum:4; + NvU8 Packet_Seq:2; + NvU8 Rsvd:1; + NvU8 No_Cmt_GBD:1; + +} NVT_GAMUT_HEADER; + +typedef struct tagNVT_GAMUT_METADATA_RANGE_8BIT{ + + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:2; + NvU8 Format_Flag:1; + + // Packaged data + NvU8 Min_Red_Data:8; + NvU8 Max_Red_Data:8; + NvU8 Min_Green_Data:8; + NvU8 Max_Green_Data:8; + NvU8 Min_Blue_Data:8; + NvU8 Max_Blue_Data:8; +} NVT_GAMUT_METADATA_RANGE_8BIT; + +typedef struct tagNVT_GAMUT_METADATA_RANGE_10BIT{ + + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:2; + NvU8 Format_Flag:1; + + // Packaged data + NvU8 Min_Red_Data_HI:8; + + NvU8 Max_Red_Data_HI:6; + NvU8 Min_Red_Data_LO:2; + + NvU8 Min_Green_Data_HI:4; + NvU8 Max_Red_Data_LO:4; + + NvU8 Max_Green_Data_HI:2; + NvU8 Min_Green_Data_LO:6; + + NvU8 Max_Green_Data_LO:8; + + NvU8 Min_Blue_Data_HI:8; + + NvU8 Max_Blue_Data_HI:6; + NvU8 Min_Blue_Data_LO:2; + + NvU8 Data_Rsvd:4; + NvU8 Max_Blue_Data_LO:4; + +} NVT_GAMUT_METADATA_RANGE_10BIT; + +typedef struct tagNVT_GAMUT_METADATA_RANGE_12BIT{ + + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:2; + NvU8 Format_Flag:1; + + // Packaged data + NvU8 Min_Red_Data_HI:8; + + NvU8 Max_Red_Data_HI:4; + NvU8 Min_Red_Data_LO:4; + + NvU8 Max_Red_Data_LO:8; + + NvU8 Min_Green_Data_HI:8; + + NvU8 Max_Green_Data_HI:4; + NvU8 Min_Green_Data_LO:4; + + NvU8 Max_Green_Data_LO:8; + + NvU8 Min_Blue_Data_HI:8; + + NvU8 Max_Blue_Data_HI:4; + NvU8 Min_Blue_Data_LO:4; + + NvU8 Max_Blue_Data_LO:8; + +} NVT_GAMUT_METADATA_RANGE_12BIT; + +typedef struct tagNVT_GAMUT_METADATA_VERTICES_8BIT +{ + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:1; + NvU8 Facet_Mode:1; // Must be set to 0 + NvU8 Format_Flag:1; // Must be set to 0 + NvU8 Number_Vertices_H:8; // Must be set to 0 + NvU8 Number_Vertices_L:8; // Must be set to 4 + + // Packaged data + NvU8 Black_Y_R; + NvU8 Black_Cb_G; + NvU8 Black_Cr_B; + NvU8 Red_Y_R; + NvU8 Red_Cb_G; + NvU8 Red_Cr_B; + NvU8 Green_Y_R; + NvU8 Green_Cb_G; + NvU8 Green_Cr_B; + NvU8 Blue_Y_R; + NvU8 Blue_Cb_G; + NvU8 Blue_Cr_B; +} NVT_GAMUT_METADATA_VERTICES_8BIT; + +typedef struct tagNVT_GAMUT_METADATA_VERTICES_10BIT +{ + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:1; + NvU8 Facet_Mode:1; // Must be set to 0 + NvU8 Format_Flag:1; // Must be set to 0 + NvU8 Number_Vertices_H:8; // Must be set to 0 + NvU8 Number_Vertices_L:8; // Must be set to 4 + + // Packaged data + NvU8 Black_Y_R_HI; + + NvU8 Black_Cb_G_HI:6; + NvU8 Black_Y_R_LO:2; + + NvU8 Black_Cr_B_HI:4; + NvU8 Black_Cb_G_LO:4; + + NvU8 Red_Y_R_HI:2; + NvU8 Black_Cr_B_LO:6; + + NvU8 Red_Y_R_LO; + + NvU8 Red_Cb_G_HI; + + NvU8 Red_Cr_B_HI:6; + NvU8 Red_Cb_G_LO:2; + + NvU8 Green_Y_R_HI:4; + NvU8 Red_Cr_B_LO:4; + + NvU8 Green_Cb_G_HI:2; + NvU8 Green_Y_R_LO:6; + + NvU8 Green_Cb_G_LO; + + NvU8 Green_Cr_B_HI; + + NvU8 Blue_Y_R_HI:6; + NvU8 Green_Cr_B_LO:2; + + NvU8 Blue_Cb_G_HI:4; + NvU8 Blue_Y_R_LO:4; + + NvU8 Blue_Cr_B_HI:2; + NvU8 Blue_Cb_G_LO:6; + + NvU8 Blue_Cr_B_LO; +} NVT_GAMUT_METADATA_VERTICES_10BIT; + +typedef struct tagNVT_GAMUT_METADATA_VERTICES_12BIT +{ + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:1; + NvU8 Facet_Mode:1; // Must be set to 0 + NvU8 Format_Flag:1; // Must be set to 0 + NvU8 Number_Vertices_H:8; // Must be set to 0 + NvU8 Number_Vertices_L:8; // Must be set to 4 + + // Packaged data + NvU8 Black_Y_R_HI; + + NvU8 Black_Cb_G_HI:4; + NvU8 Black_Y_R_LO:4; + + NvU8 Black_Cb_G_LO; + + NvU8 Black_Cr_B_HI; + + NvU8 Red_Y_R_HI:4; + NvU8 Black_Cr_B_LO:4; + + NvU8 Red_Y_R_LO; + + NvU8 Red_Cb_G_HI; + + NvU8 Red_Cr_B_HI:4; + NvU8 Red_Cb_G_LO:4; + + NvU8 Red_Cr_B_LO; + + NvU8 Green_Y_R_HI; + + NvU8 Green_Cb_G_HI:4; + NvU8 Green_Y_R_LO:4; + + NvU8 Green_Cb_G_LO; + + NvU8 Green_Cr_B_HI; + + NvU8 Blue_Y_R_HI:4; + NvU8 Green_Cr_B_LO:4; + + NvU8 Blue_Y_R_LO; + + NvU8 Blue_Cb_G_HI; + + NvU8 Blue_Cr_B_HI:4; + NvU8 Blue_Cb_G_LO:4; + + NvU8 Blue_Cr_B_LO; +} NVT_GAMUT_METADATA_VERTICES_12BIT; + +typedef struct tagNVT_GAMUT_METADATA +{ + NVT_GAMUT_HEADER header; + + union + { + NVT_GAMUT_METADATA_RANGE_8BIT range8Bit; + NVT_GAMUT_METADATA_RANGE_10BIT range10Bit; + NVT_GAMUT_METADATA_RANGE_12BIT range12Bit; + NVT_GAMUT_METADATA_VERTICES_8BIT vertices8bit; + NVT_GAMUT_METADATA_VERTICES_10BIT vertices10bit; + NVT_GAMUT_METADATA_VERTICES_12BIT vertices12bit; + }payload; + +}NVT_GAMUT_METADATA; +// +//*********************************** +// Display Port Configuration Data +//*********************************** +// +// DPCD field offset +#define NVT_DPCD_ADDRESS_RECEIVER_CAPABILITY_FIELD 0x00000 +#define NVT_DPCD_ADDRESS_LINK_CONFIG_FIELD 0x00100 +#define NVT_DPCD_ADDRESS_MSTM_CTRL_FIELD 0x00111 //DPMST Control MST <-> ST +#define NVT_DPCD_ADDRESS_MSTM_BRANCH_DEVICE 0x001A1 +#define NVT_DPCD_ADDRESS_LINK_SINK_STATUS_FIELD 0x00200 +#define NVT_DPCD_ADDRESS_VENDOR_SPECIFIC_SOURCE_DEVICE 0x00300 +#define NVT_DPCD_ADDRESS_VENDOR_SPECIFIC_SINK_DEVICE 0x00400 +#define NVT_DPCD_ADDRESS_VENDOR_SPECIFIC_BRANCH_DEVICE 0x00500 +#define NVT_DPCD_ADDRESS_SINK_CTRL_FIELD 0x00600 +#define NVT_DPCD_ADDRESS_DOWN_REQ_BUFFER_FIELD 0x01000 +#define NVT_DPCD_ADDRESS_UP_REP_BUFFER_FIELD 0x01200 +#define NVT_DPCD_ADDRESS_DOWN_REP_BUFFER_FIELD 0x01400 +#define NVT_DPCD_ADDRESS_UP_REQ_BUFFER_FIELD 0x01600 +#define NVT_DPCD_ADDRESS_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x02003 +#define NVT_DPCD_ADDRESS_DP_TUNNELING_CAPS_SUPPORT_FIELD 0xE000D +#define NVT_DPCD_ADDRESS_DP_IN_ADAPTER_INFO_FIELD 0xE000E +#define NVT_DPCD_ADDRESS_USB4_DRIVER_ID_FIELD 0xE000F +#define NVT_DPCD_ADDRESS_USB4_ROUTER_TOPOLOGY_ID_FIELD 0xE001B + +// +// Raw DPCD data format - Receiver Capability Field // 00000h - 000FFh +typedef struct tagNVT_DPCD_RECEIVER_CAP +{ + NvU8 rev; // 00000h + NvU8 max_link_rate; // 00001h + NvU8 max_lane_count; // 00002h + NvU8 max_downspread; // 00003h + NvU8 norp; // 00004h + NvU8 downstream_port_present; // 00005h + NvU8 main_link_ch_coding; // 00006h + NvU8 down_stream_port_count; // 00007h + NvU8 receive_port0_cap_0; // 00008h + NvU8 receive_port0_cap_1; // 00009h + NvU8 receive_port1_cap_0; // 0000Ah + NvU8 receive_port1_cap_1; // 0000Bh + NvU8 reserved_0[0x7F - 0xC + 1]; // 0000Ch - 0007Fh + NvU8 down_strm_port0_cap[0x8F - 0x80 + 1]; // 00080h - 0008Fh + //NvU8 reserved_1[0xFF - 0x90 + 1]; // 00090h - 000FFh +}NVT_DPCD_RECEIVER_CAP; + +// +// Raw DPCD data format - Link Configuration Field // 00100h - 001FFh +typedef struct tagNVT_DPCD_LINK_CFG +{ + NvU8 link_bw_set; // 00100h + NvU8 lane_count_set; // 00101h + NvU8 training_pattern_set; // 00102h + NvU8 training_lane0_set; // 00103h + NvU8 training_lane1_set; // 00104h + NvU8 training_lane2_set; // 00105h + NvU8 training_lane3_set; // 00106h + NvU8 downspread_ctrl; // 00107h + NvU8 main_link_ch_coding_set; // 00108h + NvU8 reserved_0[0x110 - 0x109 + 1]; // 00110h - 00109h + NvU8 mstm_ctrl; // 00111h + // NvU8 reserved_0[0x1FF - 0x111 + 1]; +}NVT_DPCD_LINK_CFG; +// +// Raw DPCD data format - Link/Sink Status Field // 00200h - 002FFh +typedef struct tagNVT_DPCD_LINK_SINK_STATUS +{ + NvU8 sink_count; // 00200h + NvU8 device_service_irq_vector; // 00201h + NvU8 lane0_1_status; // 00202h + NvU8 lane2_3_status; // 00203h + NvU8 lane_align_status_update; // 00204h + NvU8 sink_status; // 00205h + NvU8 adjust_req_lane0_1; // 00206h + NvU8 adjust_req_lane2_3; // 00207h + NvU8 training_score_lane0; // 00208h + NvU8 training_score_lane1; // 00209h + NvU8 training_score_lane2; // 0020Ah + NvU8 training_score_lane3; // 0020Bh + NvU8 reserved_0[0x20F - 0x20C + 1]; // 0020Fh - 0020Ch + NvU16 sym_err_count_lane0; // 00210h - 00211h + NvU16 sym_err_count_lane1; // 00212h - 00213h + NvU16 sym_err_count_lane2; // 00214h - 00215h + NvU16 sym_err_count_lane3; // 00217h - 00216h + NvU8 test_req; // 00218h + NvU8 test_link_rate; // 00219h + NvU8 reserved_1[0x21F - 0x21A + 1]; // 0021Fh - 0021Ah + NvU8 test_lane_count; // 00220h + NvU8 test_pattern; // 00221h + NvU16 test_h_total; // 00222h - 00223h + NvU16 test_v_total; // 00224h - 00225h + NvU16 test_h_start; // 00226h - 00227h + NvU16 test_v_start; // 00228h - 00229h + NvU16 test_hsync; // 0022Ah - 0022Bh + NvU16 test_vsync; // 0022Ch - 0022Dh + NvU16 test_h_width; // 0022Eh - 0022Fh + NvU16 test_v_height; // 00230h - 00231h + NvU16 test_misc; // 00232h - 00233h + NvU8 test_refresh_rate_numerator; // 00234h + NvU8 reserved_2[0x23F - 0x235 + 1]; // 00235h - 0023Fh + NvU16 test_crc_R_Cr; // 00240h - 00241h + NvU16 test_crc_G_Y; // 00242h - 00243h + NvU16 test_crc_B_Cb; // 00244h - 00245h + NvU8 test_sink_misc; // 00246h + NvU8 reserved_3[0x25F - 0x247 + 1]; // 00247h - 0025fh + NvU8 test_response; // 00260h + NvU8 test_edid_checksum; // 00261h + NvU8 reserved_4[0x26F - 0x262 + 1]; // 00262h - 0026Fh + NvU8 test_sink; // 00270h + //NvU8 reserved_5[0x27F - 0x271 + 1]; // 00271h - 0027Fh + //NvU8 reserved_6[0x2FF - 0x280 + 1]; // 00280h - 002FFh +}NVT_DPCD_LINK_SINK_STATUS; + +#define NV_DPCD_DONGLE_NVIDIA_OUI 0x00044B + +// +// Raw DPCD data format - Vendor-Specific Field for Source Device // 00300h - 003FFh +// Raw DPCD data format - Vendor-Specific Field for Sink Device // 00400h - 004FFh +// Raw DPCD data format - Vendor-Specific Field for Branch Device // 00500h - 005FFh +typedef struct tagNVT_DPCD_VENDOR_SPECIFIC_FIELD +{ + NvU8 ieee_oui7_0; // 00300h + NvU8 ieee_oui15_8; // 00301h + NvU8 ieee_oui23_16; // 00302h + //NvU8 reserved[0x3FF - 0x303 + 1]; // 003FFh - 00303h +}NVT_DPCD_VENDOR_SPECIFIC_FIELD; +// +// Raw DPCD data format - Dongle Specific Field +typedef struct tagNVT_DPCD_DONGLE_SPECIFIC_FIELD +{ + NvU8 vendor_b0; // 00300h + NvU8 vendor_b1; // 00301h + NvU8 vendor_b2; // 00302h + NvU8 model[6]; // 00303h - 00308h + NvU8 chipIDVersion; // 00309h + //NvU8 reserved[0x3FF - 0x30A + 1]; // 0030Ah - 005FFh +}NVT_DPCD_DONGLE_SPECIFIC_FIELD; +// +// Raw DPCD data format - DualDP Specific Field +typedef struct tagNVT_DPCD_DUALDP_SPECIFIC_FIELD +{ + NvU8 vendor_b0; // 00300h + NvU8 vendor_b1; // 00301h + NvU8 vendor_b2; // 00302h + NvU8 model[6]; // 00303h - 00308h + NvU8 chipd_id_version; // 00309h + NvU8 reserved_1[0x3AF - 0x30A + 1]; // 0030Ah - 003AFh + NvU8 dual_dp_cap; // 003B0h + NvU8 dual_dp_base_addr[3]; // 003B1h - 003B3h + //NvU8 reserved_2[0x3FF - 0x3B4 + 1]; // 003B4h - 003FFh +}NVT_DPCD_DUALDP_SPECIFIC_FIELD; + +// +// Raw DPCD data format - Sink Control Field // 00600h - 006FFh +typedef struct tagNVT_DPCD_SINK_CTRL_FIELD +{ + NvU8 set_power; // 00600h + //NvU8 reserved[0x6FF - 0x601 + 1]; // 00601h - 006FFh +}NVT_DPCD_SINK_CTRL_FIELD; +// +// The entire DPCD data block +typedef struct tagNVT_DPCD +{ + NVT_DPCD_RECEIVER_CAP receiver_cap; + NVT_DPCD_LINK_CFG link_cfg; + NVT_DPCD_LINK_SINK_STATUS link_status; + NVT_DPCD_VENDOR_SPECIFIC_FIELD vsp_source_device; + NVT_DPCD_VENDOR_SPECIFIC_FIELD vsp_sink_device; + NVT_DPCD_VENDOR_SPECIFIC_FIELD vsp_branch_device; + NVT_DPCD_SINK_CTRL_FIELD sink_ctrl; +}NVT_DPCD; +// +// +// Parsed DPCD info +// +// +#define NVT_DPCD_REV_10 NVT_DPCD_DPCD_REV_10 // DPCD revision 1.0 +#define NVT_DPCD_REV_11 NVT_DPCD_DPCD_REV_11 // DPCD revision 1.1 +#define NVT_DPCD_REV_12 NVT_DPCD_DPCD_REV_12 // DPCD revision 1.2 +#define NVT_DPCD_RECEIVER_MAX_DOWNSTREAM_PORT 16 // the max downstream port possible per device +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_DP NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DISPLAYPORT // Display Port +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_VGA NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_VGA // analog VGA or analog video over DVI-I +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_DVI NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DVI // DVI +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_HDMI NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_HDMI // HDMI +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_OTHERS NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_NO_EDID // the downstream port type will have no EDID in sink device such as Composite/SVideo. +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_DP_PP NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DISPLAYPORT_PP // Display Port++ +#define NVT_DPCD_LINK_RATE_1_62_GBPS NVT_DPCD_LINK_BW_SET_LINK_BW_SET_1_62GPBS_PER_LANE // 1.62Gbps per lane +#define NVT_DPCD_LINK_RATE_2_70_GBPS NVT_DPCD_LINK_BW_SET_LINK_BW_SET_2_70GPBS_PER_LANE // 2.70Gbps per lane +#define NVT_DPCD_LINK_RATE_5_40_GBPS NVT_DPCD_LINK_BW_SET_LINK_BW_SET_5_40GPBS_PER_LANE // 5.40Gbps per lane +#define NVT_DPCD_LINK_RATE_8_10_GBPS NVT_DPCD_LINK_BW_SET_LINK_BW_SET_8_10GPBS_PER_LANE // 5.40Gbps per lane +#define NVT_DPCD_LINK_RATE_FACTOR_IN_10KHZ_MBPS 2700 // e.g. NVT_DPCD_LINK_RATE_1_62_GBPS * 0.27Gbps per lane (in 10KHz) +#define NVT_DPCD_LANE_COUNT_1 NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_1_LANE +#define NVT_DPCD_LANE_COUNT_2 NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_2_LANES +#define NVT_DPCD_LANE_COUNT_4 NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_4_LANES +#define NVT_DPCD_LANE_COUNT_8 8 + +// note: the values of NVT_COLOR_FORMAT_* are fixed in order to match the equivalent NV classes +typedef enum +{ + NVT_COLOR_FORMAT_RGB = 0, + NVT_COLOR_FORMAT_YCbCr422 = 1, + NVT_COLOR_FORMAT_YCbCr444 = 2, + NVT_COLOR_FORMAT_YCbCr420 = 3, + NVT_COLOR_FORMAT_Y = 4, + NVT_COLOR_FORMAT_RAW = 5, + NVT_COLOR_FORMAT_INVALID = 0xFF +} NVT_COLOR_FORMAT; + +typedef enum +{ + NVT_COLOR_RANGE_FULL = 0, + NVT_COLOR_RANGE_LIMITED = 1 +} NVT_COLOR_RANGE; + +// note: the values of NVT_COLORIMETRY_* are fixed in order to match the equivalent NV classes +typedef enum +{ + NVT_COLORIMETRY_RGB = 0, + NVT_COLORIMETRY_YUV_601 = 1, + NVT_COLORIMETRY_YUV_709 = 2, + NVT_COLORIMETRY_EXTENDED = 3, + NVT_COLORIMETRY_XVYCC_601 = 4, + NVT_COLORIMETRY_XVYCC_709 = 5, + NVT_COLORIMETRY_ADOBERGB = 6, + NVT_COLORIMETRY_BT2020cYCC = 7, + NVT_COLORIMETRY_BT2020YCC = 8, + NVT_COLORIMETRY_BT2020RGB = 9, + NVT_COLORIMETRY_INVALID = 0xFF +} NVT_COLORIMETRY; + +#define NVT_DPCD_BPC_DEFAULT 0x00 +#define NVT_DPCD_BPC_6 0x01 +#define NVT_DPCD_BPC_8 0x02 +#define NVT_DPCD_BPC_10 0x03 +#define NVT_DPCD_BPC_12 0x04 +#define NVT_DPCD_BPC_16 0x05 + +#define NVT_DPCD_AUTOMATED_TEST 0x02 +#define NVT_DPCD_CP_IRQ 0x04 + +#define NVT_DPCD_LANES_2_3_TRAINED 0x77 +#define NVT_DPCD_LANE_1_TRAINED 0x07 +#define NVT_DPCD_LANE_0_TRAINED 0x07 +#define NVT_DPCD_INTERLANE_ALIGN_DONE 0x1 + +#define NVT_DPCD_LANE_1_STATUS 7:4 +#define NVT_DPCD_LANE_0_STATUS 3:0 +#define NVT_DPCD_ADDRESS_LANE_STATUS 0x00202 + +#define NVT_DPCD_TEST_REQ_LINK_TRAINING 0x01 +#define NVT_DPCD_TEST_REQ_TEST_PATTERN 0x02 +#define NVT_DPCD_TEST_REQ_EDID_READ 0x04 +#define NVT_DPCD_TEST_REQ_PHY_TEST_PATTERN 0x08 + +#define NVT_DPCD_TEST_ACK 0x01 +#define NVT_DPCD_TEST_NAK 0x02 +#define NVT_DPCD_TEST_EDID_CHECKSUM_WRITE 0x04 + +#define NVT_DPCD_TEST_MISC_COLOR_FORMAT 2:1 +#define NVT_DPCD_TEST_MISC_DYNAMIC_RANGE 3:3 +#define NVT_DPCD_TEST_MISC_YCbCr_COEFFICIENT 4:4 +#define NVT_DPCD_TEST_MISC_BIT_DEPTH 7:5 + +#define NVT_DPCD_TEST_EDID_CHECKSUM_ADDRESS 0x261 +#define NVT_DPCD_TEST_RESPONSE_ADDRESS 0x260 +#define NVT_EDID_CHECKSUM_BYTE 127 + +#define NVT_DPCD_POWER_STATE_NORMAL 0x01 +#define NVT_DPCD_POWER_STATE_POWER_DOWN 0x02 + +// ******************* +// ** DPCD 1.1 Spec ** +// ******************* + +// 0x000h DPCD_REV +#define NVT_DPCD_DPCD_REV 0x000 +#define NVT_DPCD_DPCD_REV_MINOR_VER 3:0 +#define NVT_DPCD_DPCD_REV_MAJOR_VER 7:4 +#define NVT_DPCD_DPCD_REV_10 0x10 +#define NVT_DPCD_DPCD_REV_11 0x11 +#define NVT_DPCD_DPCD_REV_12 0x12 + +// 0x001h MAX_LINK_RATE +#define NVT_DPCD_MAX_LINK_RATE 0x001 +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE 7:0 +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE_1_62GPS_PER_LANE 0x06 +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE_2_70GPS_PER_LANE 0x0A +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE_5_40GPS_PER_LANE 0x14 +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE_8_10GPS_PER_LANE 0x1E + +// 0x002h - MAX_LANE_COUNT +#define NVT_DPCD_MAX_LANE_COUNT 0x002 +#define NVT_DPCD_MAX_LANE_COUNT_MAX_LANE_COUNT 4:0 +#define NVT_DPCD_MAX_LANE_COUNT_RSVD 6:5 +#define NVT_DPCD_MAX_LANE_COUNT_ENHANCED_FRAME_CAP 7:7 + +// 0x003h - MAX_DOWNSPREAD +#define NVT_DPCD_MAX_DOWNSPREAD 0x003 +#define NVT_DPCD_MAX_DOWNSPREAD_MAX_DOWNSPREAD 0:0 +#define NVT_DPCD_MAX_DOWNSPREAD_MAX_DOWNSPREAD_NO 0 +#define NVT_DPCD_MAX_DOWNSPREAD_MAX_DOWNSPREAD_YES 1 +#define NVT_DPCD_MAX_DOWNSPREAD_RSVD 5:1 +#define NVT_DPCD_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LINK_TRAINING 6:6 +#define NVT_DPCD_MAX_DOWNSPREAD_RSVD_2 7:7 + +// 0x004h - NORP +#define NVT_DPCD_NORP 0x004 +#define NVT_DPCD_NORP_NUMBER_OF_RECEIVER_PORT_SUBTRACT_ONE 0:0 +#define NVT_DPCD_NORP_RSVD 7:1 + +// 0x005 - DOWNSTREAMPORT_PRESENT +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT 0x005 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_PRESENT 0:0 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE 2:1 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE_DISPLAYPORT 0 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE_VGA 1 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE_DVI_HDMI 2 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE_OTHERS 3 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_FORMAT_CONVERSION 3:3 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_RSVD 7:4 + +// 0x006 - MAIN_LINK_CHANNEL_CODING +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING 0x006 +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI8B_10B 0:0 +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_RSVD 7:1 + +// 0x007 - DOWN_STREAM_PORT_COUNT +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT 0x007 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_DWN_STRM_PORT_COUNT 3:0 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_RSVD 6:4 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_OUI_SUPPORT 7:7 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_OUI_SUPPORT_YES 1 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_OUI_SUPPORT_NO 0 + +// 0x008h - RECEIVE_PORT0_CAP_0 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0 0x008 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_RSVD 0:0 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_LOCAL_EDID_PRESENT 1:1 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_LOCAL_EDID_PRESENT_YES 1 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_LOCAL_EDID_PRESENT_NO 0 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_ASSOCIATED_TO_PRECEDING_PORT 2:2 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_ASSOCIATED_TO_PRECEDING_PORT_YES 1 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_ASSOCIATED_TO_PRECEDING_PORT_NO 0 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_RSVD_2 7:3 + +// 0x009h - RECEIVE_PORT0_CAP_1 +#define NVT_DPCD_RECEIVE_PORT0_CAP_1 0x009 +#define NVT_DPCD_RECEIVE_PORT0_CAP_1_BUFFER_SIZE 7:0 + +// 0x00Ah - RECEIVE_PORT1_CAP_0 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0 0x00A +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_RSVD 0:0 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_LOCAL_EDID_PRESENT 1:1 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_LOCAL_EDID_PRESENT_YES 1 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_LOCAL_EDID_PRESENT_NO 0 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_ASSOCIATED_TO_PRECEDING_PORT 2:2 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_ASSOCIATED_TO_PRECEDING_PORT_YES 1 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_ASSOCIATED_TO_PRECEDING_PORT_NO 0 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_RSVD_2 7:3 + +// 0x00Bh - RECEIVE_PORT1_CAP_1 +#define NVT_DPCD_RECEIVE_PORT1_CAP_1 0x00B +#define NVT_DPCD_RECEIVE_PORT1_CAP_1_BUFFER_SIZE 7:0 + +// 0x021h - MST_CAP +#define NVT_DPCD_MSTM_CAP 0x021 +#define NVT_DPCD_MSTM_CAP_MST_CAP 0:0 +#define NVT_DPCD_MSTM_CAP_MST_CAP_NO 0 +#define NVT_DPCD_MSTM_CAP_MST_CAP_YES 1 + +// 0x080h ~ 0x08Fh - DWN_STRM_PORT0_CAP +#define NVT_DPCD_DWN_STRM_PORT0_CAP 0x080 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE 2:0 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DISPLAYPORT 0 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_VGA 1 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DVI 2 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_HDMI 3 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_NO_EDID 4 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DISPLAYPORT_PP 5 //Defined in Post DP 1.2 draft +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_HPD 3:3 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_HPD_AWARE_YES 1 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_HPD_AWARE_NO 0 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_RSVD 7:4 + +// 0x100h - LINK_BW_SET +#define NVT_DPCD_LINK_BW_SET 0x100 +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET 7:0 +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET_1_62GPBS_PER_LANE 0x06 +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET_2_70GPBS_PER_LANE 0x0A +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET_5_40GPBS_PER_LANE 0x14 +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET_8_10GPBS_PER_LANE 0x1E + +// 0x101h - LANE_COUNT_SET +#define NVT_DPCD_LANE_COUNT_SET 0x101 +#define NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET 4:0 +#define NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_1_LANE 1 +#define NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_2_LANES 2 +#define NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_4_LANES 4 +#define NVT_DPCD_LANE_COUNT_SET_RSVD 6:5 +#define NVT_DPCD_LANE_COUNT_SET_ENHANCED_FRAME_EN 7:7 +#define NVT_DPCD_LANE_COUNT_SET_ENHANCED_FRAME_EN_YES 1 +#define NVT_DPCD_LANE_COUNT_SET_ENHANCED_FRAME_EN_NO 0 + +// 0x102h - TRAINING_PATTERN_SET +#define NVT_DPCD_TRAINING_PATTERN_SET 0x102 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET 1:0 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET_NOT_IN_PROGRESS 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET_PATTERN_1 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET_PATTERN_2 2 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET_RSVD 3 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET 3:2 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET_NOT_TRANSMITTED 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET_D10_2 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET_SERMPT 2 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET_PRBS7 3 +#define NVT_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN 4:4 +#define NVT_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_NO 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_YES 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLE 5:5 +#define NVT_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLE_NO 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLE_YES 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL 7:6 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL_DIS_ERROR 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL_D_ERROR 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL_IS_ERROR 2 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL_RSVD 3 + +// 0x103h ~ 0x106h - TRAINING_LANE?_SET +#define NVT_DPCD_TRAINING_LANE0_SET 0x103 +#define NVT_DPCD_TRAINING_LANE1_SET 0x104 +#define NVT_DPCD_TRAINING_LANE2_SET 0x105 +#define NVT_DPCD_TRAINING_LANE3_SET 0x106 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET 1:0 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET_TP1_VS_L0 0 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET_TP1_VS_L1 1 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET_TP1_VS_L2 2 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET_TP1_VS_L3 3 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_SWING_REACHED 2:2 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_SWING_REACHED_NO 0 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_SWING_REACHED_YES 1 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET 4:3 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET_TP2_PE_NONE 0 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET_TP2_PE_L1 1 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET_TP2_PE_L2 2 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET_TP2_PE_L3 3 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_PRE_EMPHASIS_REACHED 5:5 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_PRE_EMPHASIS_REACHED_NO 0 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_PRE_EMPHASIS_REACHED_YES 1 +#define NVT_DPCD_TRAINING_LANE0_SET_RSVD 7:6 + +// 0x107h - DOWNSPREAD_CTRL +#define NVT_DPCD_DOWNSPREAD_CTRL 0x107 +#define NVT_DPCD_DOWNSPREAD_CTRL_RSVD 3:0 +#define NVT_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP 4:4 +#define NVT_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP_NO 0 +#define NVT_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP_YES 1 +#define NVT_DPCD_DOWNSPREAD_CTRL_RSVD_2 7:5 + +// 0x108h - MAIN_LINK_CHANNEL_CODING_SET +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_SET 0x108 +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_SET_SET_ANSI8B10B 0:0 +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_SET_RSVD 7:1 + +// 0x111h - MSTM_CTRL +#define NVT_DPCD_MSTM_CTRL 0x111 +#define NVT_DPCD_MSTM_CTRL_MST_EN 0:0 +#define NVT_DPCD_MSTM_CTRL_MST_EN_NO 0 +#define NVT_DPCD_MSTM_CTRL_MST_EN_YES 1 +#define NVT_DPCD_MSTM_CTRL_UP_REQ_EN 1:1 +#define NVT_DPCD_MSTM_CTRL_UP_REQ_EN_NO 0 +#define NVT_DPCD_MSTM_CTRL_UP_REQ_EN_YES 1 +#define NVT_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC 2:2 +#define NVT_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC_NO 0 +#define NVT_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC_YES 1 +#define NVT_DPCD_MSTM_CTRL_MST_RSVD 7:3 + +// 0x1A1h - BRANCH_DEVICE_CTRL +#define NVT_DPCD_BRANCH_DEVICE_CTRL 0x1A1 +#define NVT_DPCD_BRANCH_DEVICE_CTRL_HPD_NOTIF_TYPE 0:0 +#define NVT_DPCD_BRANCH_DEVICE_CTRL_HPD_NOTIF_TYPE_LONG_HPD_PULSE 0 +#define NVT_DPCD_BRANCH_DEVICE_CTRL_HPD_NOTIF_TYPE_SHORT_IRQ_PULSE 1 +#define NVT_DPCD_BRANCH_DEVICE_CTRL_RSVD 7:1 + +#define NVT_DPCD_PAYLOAD_ALLOCATE_SET 0x1C0 +#define NVT_DPCD_PAYLOAD_ALLOCATE_SET_VC_ID 6:0 + +#define NVT_DPCD_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1C1 +#define NVT_DPCD_PAYLOAD_ALLOCATE_START_TIME_SLOT_FIELD 5:0 + +#define NVT_DPCD_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1C2 +#define NVT_DPCD_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT_FIELD 5:0 + +// 0x200h - SINK_COUNT +#define NVT_DPCD_SINK_COUNT 0x200 +#define NVT_DPCD_SINK_COUNT_SINK_COUNT 5:0 +#define NVT_DPCD_SINK_COUNT_CP_READY 6:6 +#define NVT_DPCD_SINK_COUNT_RSVD 7:7 + +// 0x201h - DEVICE_SERVICE_IRQ_VECTOR +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR 0x201 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_RSVD_REMOTE_CTRL_CMD_PENDING 0:0 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_AUTOMATED_TEST_REQUEST 1:1 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_CP_IRQ 2:2 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_MCCS_IRQ 3:3 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_DOWN_REP_MSG_READY 4:4 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_UP_REQ_MSG_READY 5:5 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_SINK_SPECIFIC_IRQ 6:6 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_RSVD_2 7:7 + +// 0x202h ~ 0x203h - LANE0_1_STATUS +#define NVT_DPCD_LANE0_1_STATUS 0x202 +#define NVT_DPCD_LANE2_3_STATUS 0x203 +#define NVT_DPCD_LANE0_1_STATUS_LANE0_CR_DONE 0:0 +#define NVT_DPCD_LANE0_1_STATUS_LANE0_CHANNEL_EQ_DONE 1:1 +#define NVT_DPCD_LANE0_1_STATUS_LANE0_SYMBOL_LOCKED 2:2 +#define NVT_DPCD_LANE0_1_STATUS_RSVD 3:3 +#define NVT_DPCD_LANE0_1_STATUS_LANE1_CR_DONE 4:4 +#define NVT_DPCD_LANE0_1_STATUS_LANE1_CHANNEL_EQ_DONE 5:5 +#define NVT_DPCD_LANE0_1_STATUS_LANE1_SYMBOL_LOCKED 6:6 +#define NVT_DPCD_LANE0_1_STATUS_RSVD_2 7:7 + +// 0x204h - LANE_ALIGN_STATUS_UPDATED +// Temporary until Linux/Apple change their code. +#define NVT_DPCD_LANE_ALIGN_STAUTS_UPDATED NVT_DPCD_LANE_ALIGN_STATUS_UPDATED +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED 0x204 +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED_INTERLANE_ALIGN_DONE 0:0 +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED_RSVD 5:1 +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED_DOWNSTREAM_PORT_STATUS_CHANGED 6:6 +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED_LINK_STATUS_UPDATED 7:7 + +// 0x205 - SINK_STATUS +#define NVT_DPCD_SINK_STATUS 0x205 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS 0:0 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS_OUT_OF_SYNC 0 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS_IN_SYNC 1 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS 1:1 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS_OUT_OF_SYNC 0 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS_IN_SYNC 1 +#define NVT_DPCD_SINK_STATUS_RSVD 7:2 + +// 0x206h ~ 0x207h - ADJUST_REQUEST_LANE0_1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1 0x206 +#define NVT_DPCD_ADJUST_REQUEST_LANE2_3 0x207 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0 1:0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0_LEVEL_0 0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0_LEVEL_1 1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0_LEVEL_2 2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0_LEVEL_3 3 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0 3:2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0_LEVEL_0 0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0_LEVEL_1 1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0_LEVEL_2 2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0_LEVEL_3 3 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1 5:4 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1_LEVEL_0 0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1_LEVEL_1 1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1_LEVEL_2 2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1_LEVEL_3 3 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1 7:6 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1_LEVEL_0 0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1_LEVEL_1 1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1_LEVEL_2 2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1_LEVEL_3 3 + +// 0x208h ~ 0x20Bh TRAINING_SCORE_LANE0~3 +#define NVT_DPCD_TRAINING_SCORE_LANE0 0x208 +#define NVT_DPCD_TRAINING_SCORE_LANE1 0x209 +#define NVT_DPCD_TRAINING_SCORE_LANE2 0x20A +#define NVT_DPCD_TRAINING_SCORE_LANE3 0x20B + +// 0x210h ~ 0x217h SYMBOL_ERROR_COUNT_LANE0 (16bit) +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_LO 0x210 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_HI 0x211 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE1_LO 0x212 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE1_HI 0x213 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE2_LO 0x214 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE2_HI 0x215 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE3_LO 0x216 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE3_HI 0x217 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_ERROR_COUNT_LO 7:0 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_ERROR_COUNT_HI 6:0 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_ERROR_COUNT_VALID 7:7 + +// 0x218h TEST_REQUEST +#define NVT_DPCD_TEST_REQUEST 0x218 +#define NVT_DPCD_TEST_REQUEST_TEST_LINK_TRAINING 0:0 +#define NVT_DPCD_TEST_REQUEST_TEST_PATTERN 1:1 +#define NVT_DPCD_TEST_REQUEST_TEST_EDID_READ 2:2 +#define NVT_DPCD_TEST_REQUEST_PHY_TEST_PATTERN 3:3 +#define NVT_DPCD_TEST_REQUEST_RSVD 7:4 + +// 0x219h TEST_LINK_RATE +#define NVT_DPCD_TEST_LINK_RATE 0x219 + +// 0x220h TEST_LANE_COUNT +#define NVT_DPCD_TEST_LANE_COUNT 0x220 +#define NVT_DPCD_TEST_LANE_COUNT_TEST_LANE_COUNT 4:0 +#define NVT_DPCD_TEST_LANE_COUNT_TEST_LANE_COUNT_ONE_LANE 1 +#define NVT_DPCD_TEST_LANE_COUNT_TEST_LANE_COUNT_TWO_LANES 2 +#define NVT_DPCD_TEST_LANE_COUNT_TEST_LANE_COUNT_FOUR_LANES 4 +#define NVT_DPCD_TEST_LANE_COUNT_RSVD 7:5 + +// 0x221h TEST_PATTERN +#define NVT_DPCD_TEST_PATTERN 0x221 +#define NVT_DPCD_TEST_PATTERN_NO_TEST_PATTERN_TRANSMITTED 0 +#define NVT_DPCD_TEST_PATTERN_COLOR_RAMPS 1 +#define NVT_DPCD_TEST_PATTERN_BLACK_AND_WHITE_VERTICAL_LINES 2 +#define NVT_DPCD_TEST_PATTERN_COLOR_SQUARE 3 + +// 0x222h ~ 0x223h TEST_H_TOTAL +#define NVT_DPCD_TEST_H_TOTAL_HI 0x222 +#define NVT_DPCD_TEST_H_TOTAL_LO 0x223 + +// 0x224h ~ 0x225h TEST_V_TOTAL +#define NVT_DPCD_TEST_V_TOTAL_HI 0x224 +#define NVT_DPCD_TEST_V_TOTAL_LO 0x225 + +// 0x226h ~ 0x227h TEST_H_START +#define NVT_DPCD_TEST_H_START_HI 0x226 +#define NVT_DPCD_TEST_H_START_LO 0x227 + +// 0x228h ~ 0x229h TEST_V_START +#define NVT_DPCD_TEST_V_START_HI 0x228 +#define NVT_DPCD_TEST_V_START_LO 0x229 + +// 0x22Ah ~ 0x22Bh TEST_HSYNC +#define NVT_DPCD_TEST_HSYNC_HI 0x22A +#define NVT_DPCD_TEST_HSYNC_LO 0x22B +#define NVT_DPCD_TEST_HSYNC_HI_TEST_HSYNC_WIDTH_14_8 6:0 +#define NVT_DPCD_TEST_HSYNC_HI_TEST_HSYNC_POLARITY 7:7 + +// 0x22Ch ~ 0x22Dh TEST_VSYNC +#define NVT_DPCD_TEST_VSYNC_HI 0x22C +#define NVT_DPCD_TEST_VSYNC_LO 0x22D +#define NVT_DPCD_TEST_VSYNC_HI_TEST_VSYNC_WIDTH_14_8 6:0 +#define NVT_DPCD_TEST_VSYNC_HI_TEST_VSYNC_POLARITY 7:7 + +// 0x22Eh ~ 0x22Fh TEST_H_WIDTH +#define NVT_DPCD_TEST_H_WIDTH_HI 0x22E +#define NVT_DPCD_TEST_H_WIDTH_LO 0x22F + +// 0x230h ~ 0x231h TEST_V_WIDTH +#define NVT_DPCD_TEST_V_HEIGHT_HI 0x230 +#define NVT_DPCD_TEST_V_HEIGHT_LO 0x231 + +// 0x232h ~ 0x233h TEST_MISC +#define NVT_DPCD_TEST_MISC_LO 0x232 +#define NVT_DPCD_TEST_MISC_LO_TEST_SYNCHRONOUS_CLOCK 0:0 +#define NVT_DPCD_TEST_MISC_LO_TEST_SYNCHRONOUS_CLOCK_ASYNC 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_SYNCHRONOUS_CLOCK_SYNC 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT 2:1 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT_RGB 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT_YCbCr422 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT_YCbCr444 2 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT_RSVD 3 +#define NVT_DPCD_TEST_MISC_LO_TEST_DYNAMIC_RANGE 3:3 +#define NVT_DPCD_TEST_MISC_LO_TEST_DYNAMIC_RANGE_VESA 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_DYNAMIC_RANGE_CEA 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_YCBCR_COEFFICIENTS 4:4 +#define NVT_DPCD_TEST_MISC_LO_TEST_YCBCR_COEFFICIENTS_ITU601 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_YCBCR_COEFFICIENTS_ITU709 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH 7:5 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_6BPC 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_8BPC 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_10BPC 2 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_12BPC 3 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_16BPC 4 +#define NVT_DPCD_TEST_MISC_HI 0x233 +#define NVT_DPCD_TEST_MISC_HI_TEST_REFRESH_DENOMINATOR 0:0 +#define NVT_DPCD_TEST_MISC_HI_TEST_REFRESH_DENOMINATOR_1 0 +#define NVT_DPCD_TEST_MISC_HI_TEST_REFRESH_DENOMINATOR_1001 1 +#define NVT_DPCD_TEST_MISC_HI_TEST_INTERLACED 1:1 +#define NVT_DPCD_TEST_MISC_HI_TEST_INTERLACED_NO 0 +#define NVT_DPCD_TEST_MISC_HI_TEST_INTERLACED_YES 1 +#define NVT_DPCD_TEST_MISC_HI_TEST_INTERLACED_RSVD 7:2 + +// 0x234h TEST_REFRESH_RATE_NUMERATOR +#define NVT_DPCD_TEST_REFRESH_RATE_NUMERATOR 0x234 + +// 0x240h ~ 0x241h TEST_CRC_R_Cr +#define NVT_DPCD_TEST_CRC_R_Cr_LO 0x240 +#define NVT_DPCD_TEST_CRC_R_Cr_HI 0x241 + +// 0x242h ~ 0x243h TEST_CRC_G_Y +#define NVT_DPCD_TEST_CRC_G_Y_LO 0x242 +#define NVT_DPCD_TEST_CRC_G_Y_HI 0x243 + +// 0x244h ~ 0x245h TEST_CRC_B_Cb +#define NVT_DPCD_TEST_CRC_B_Cb_LO 0x244 +#define NVT_DPCD_TEST_CRC_B_Cb_HI 0x245 + +// 0x246h TEST_SINC_MISC +#define NVT_DPCD_TEST_SINK_MISC 0x246 +#define NVT_DPCD_TEST_SINK_MISC_TEST_CRC_COUNT 3:0 +#define NVT_DPCD_TEST_SINK_MISC_TEST_CRC_SUPPORTED 5:5 +#define NVT_DPCD_TEST_SINK_MISC_TEST_CRC_SUPPORTED_NO 0 +#define NVT_DPCD_TEST_SINK_MISC_TEST_CRC_SUPPORTED_YES 1 +#define NVT_DPCD_TEST_SINK_MISC_RSVD 7:6 + +// 0x248h PHY_TEST_PATTERN +#define NVT_DPCD_PHY_TEST_PATTERN 0x248 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL 1:0 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL_NO_TEST_PATTERN 0 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL_D10_2 1 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL_SEMC 2 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL_PRBS7 3 +#define NVT_DPCD_PHY_TEST_PATTERN_RSVD 7:2 + +// 0x260h TEST_RESPONSE +#define NVT_DPCD_TEST_RESPONSE 0x260 +#define NVT_DPCD_TEST_RESPONSE_TEST_ACK 0:0 +#define NVT_DPCD_TEST_RESPONSE_TEST_ACK_KEEP_TEST_REQ 0 +#define NVT_DPCD_TEST_RESPONSE_TEST_ACK_CLEAR_TEST_REQ 1 +#define NVT_DPCD_TEST_RESPONSE_TEST_NAK 1:1 +#define NVT_DPCD_TEST_RESPONSE_TEST_NACK_KEEP_TEST_REQ 0 +#define NVT_DPCD_TEST_RESPONSE_TEST_NACK_CLEAR_TEST_REQ 1 +#define NVT_DPCD_TEST_RESPONSE_TEST_EDID_CHECKSUM_WRITE 2:2 +#define NVT_DPCD_TEST_RESPONSE_TEST_EDID_CHECKSUM_WRITE_NO 0 +#define NVT_DPCD_TEST_RESPONSE_TEST_EDID_CHECKSUM_WRITE_YES 1 +#define NVT_DPCD_TEST_RESPONSE_RSVD 7:3 + +// 0x261h TEST_EDID_CHECKSUM +#define NVT_DPCD_TEST_EDID_CHECKSUM 0x261 + +// 0x270 TEST_SINK +#define NVT_DPCD_TEST_SINK 0x270 +#define NVT_DPCD_TEST_SINK_TEST_SINK_START 0:0 +#define NVT_DPCD_TEST_SINK_TEST_SINK_START_STOP_CALC_CRC 0 +#define NVT_DPCD_TEST_SINK_TEST_SINK_START_START_CALC_CRC 1 +#define NVT_DPCD_TEST_SINK_RSVD 7:1 + +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS 0x2C0 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_TABLE_UPDATED 0:0 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_TABLE_UPDATED_NO 0 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_TABLE_UPDATED_YES 1 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED 1:1 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED_NO 0 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED_YES 1 + +// 0x300h ~ 0x302h SOURCE_IEEE_OUT +#define NVT_DPCD_SOURCE_IEEE_OUT_7_0 0x300 +#define NVT_DPCD_SOURCE_IEEE_OUT_15_8 0x301 +#define NVT_DPCD_SOURCE_IEEE_OUT_23_16 0x302 + +// 0x400h ~ 0x402h SINK_IEEE_OUT +#define NVT_DPCD_SINK_IEEE_OUT_7_0 0x400 +#define NVT_DPCD_SINK_IEEE_OUT_15_8 0x401 +#define NVT_DPCD_SINK_IEEE_OUT_23_16 0x402 + +// 0x500h ~ 0x502h BRANCH_IEEE_OUT +#define NVT_DPCD_BRANCH_IEEE_OUT_7_0 0x500 +#define NVT_DPCD_BRANCH_IEEE_OUT_15_8 0x501 +#define NVT_DPCD_BRANCH_IEEE_OUT_23_16 0x502 + +// 0x600 SET_POWER +#define NVT_DPCD_SET_POWER 0x600 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE 1:0 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE_RSVD 0 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE_D0 1 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE_D3 2 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE_RSVD_2 3 +#define NVT_DPCD_SET_POWER_RSVD 7:2 + +//************************************* +// DP 1.2 Main Stream Attribute Fiedls +//************************************* + +#define NVT_DP_INFOFRAME_MSA_MISC0_SYNC_CLOCK_MASK 0x01 // MISC0 bit 0 Synchronous Clock +#define NVT_DP_INFOFRAME_MSA_MISC0_SYNC_CLOCK_SHIFT 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC0_SYNC_CLOCK_ASYNC 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC0_SYNC_CLOCK_INSYNC 0x1 + +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_MASK 0xe0 // MISC0 bits 7:5 number of bits per color +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_SHIFT 0x5 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_6 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_8 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_10 0x2 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_12 0x3 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_16 0x4 + +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_MASK 0x1e // MISC0 bits 4:1 Color Encoding Format and Content Color Gamut +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_SHIFT 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_LEGACY 0x0 // RGB unspecified color space (legacy RGB mode) +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_CEA_RGB 0x4 // CEA RGB (sRGB primaries) +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_WIDE_GAMUT_FIXED_POINT 0x3 // RGB wide gamut fixed point (XR8,XR10, XR12) +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_WIDE_GAMUT_FLOAT_POINT 0xb // RGB wide gamut floating point(scRGB) +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_YCBCR_422_ITU601 0x5 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_YCBCR_422_ITU709 0xd +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_YCBCR_444_ITU601 0x6 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_YCBCR_444_ITU709 0xe +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_XVYCC_422_ITU601 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_XVYCC_422_ITU709 0x9 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_XVYCC_444_ITU601 0x2 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_XVYCC_444_ITU709 0xa +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_ADOBE_RGB 0xc + +#define NVT_DP_INFOFRAME_MSA_MISC1_INTERLACED_V_TOTAL_MASK 0x01 // MISC1 bit 0 Interlaced Vertical Total +#define NVT_DP_INFOFRAME_MSA_MISC1_INTERLACED_V_TOTAL_SHIFT 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_INTERLACED_V_TOTAL_ODD 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_INTERLACED_V_TOTAL_EVEN 0x1 + +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_MASK 0x06 // MISC1 bits 2:1 stereo video attribute +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_SHIFT 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_NONE 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_RIGHT_LEFT 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_LEFT_RIGHT 0x3 +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_RESERVED 0x2 + +#define NVT_DP_INFOFRAME_MSA_MISC1_RESERVED_MASK 0x38 // MISC1 bits 5:3 reserved (DP1.3). Note: DP1.2 MISC 6:3 is reserved and undefined. +#define NVT_DP_INFOFRAME_MSA_MISC1_RESERVED_SHIFT 0x3 +#define NVT_DP_INFOFRAME_MSA_MISC1_RESERVED_DEFAULT 0x0 + +#define NVT_DP_INFOFRAME_MSA_MISC1_VSC_SDP_MASK 0x40 // MISC1 bit Using VSC SDP, and sink to ignore MISC1 bit 7 and MISC0 7:1. +#define NVT_DP_INFOFRAME_MSA_MISC1_VSC_SDP_SHIFT 0x6 +#define NVT_DP_INFOFRAME_MSA_MISC1_VSC_SDP_DISABLE 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_VSC_SDP_ENABLE 0x1 + +#define NVT_DP_INFOFRAME_MSA_MISC1_BITS_PER_COLOR_OR_LUMINANCE_MASK 0x80 // MISC1 bit 7 Y-Only Video +#define NVT_DP_INFOFRAME_MSA_MISC1_BITS_PER_COLOR_OR_LUMINANCE_SHIFT 0x7 +#define NVT_DP_INFOFRAME_MSA_MISC1_BITS_PER_COLOR 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_BITS_PER_LUMINANCE 0x1 + +// ************************ +// ** HDCP DPCD 1.0 Spec ** +// ************************ + +// 0x68029 BSTATUS +#define NVT_DPCD_HDCP_BSTATUS 0x68029 +#define NVT_DPCD_HDCP_BSTATUS_LINK_INTEGRITY_FAILURE 0x04 +#define NVT_DPCD_HDCP_BSTATUS_REAUTHENTICATION_REQUEST 0x08 + +#define NVT_DPCD_HDCP_BCAPS_OFFSET 0x00068028 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE 0:0 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE_NO 0x00000000 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE_YES 0x00000001 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER 1:1 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER_NO 0x00000000 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER_YES 0x00000001 + +#define NVT_DPCD_HDCP_BKSV_OFFSET 0x00068000 +#define HDCP_KSV_SIZE 5 + + +// ********************************************* +// ** Vendor DPCD for Apple's mDP->VGA dongle ** +// ********************************************* + +// 0x30F DP2VGA_I2C_SPEED_CONTROL +#define NVT_DPCD_DP2VGA_I2C_SPEED_CONTROL 0x30F + +// 0x50C DP2VGA_GENERAL_STATUS +#define NVT_DPCD_DP2VGA_GENERAL_STATUS 0x50C + +// 0x50D DP2VGA_I2C_SPEED_CAP +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP 0x50D +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_SLOWEST 0xFF +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_1KBPS 0x01 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_3KBPS 0x02 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_10KBPS 0x04 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_100KBPS 0x08 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_400KBPS 0x10 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_1MBPS 0x20 + + +// +// HDMI/DP common definitions + +#define NVT_DYNAMIC_RANGE_VESA 0x00 +#define NVT_DYNAMIC_RANGE_CEA 0x01 +#define NVT_DYNAMIC_RANGE_AUTO 0xFF + + +typedef struct tagNVT_PARSED_DPCD_INFO_DOWNSTREAM_PORT +{ + NvU8 type : 3; // the downstream port type + NvU8 isHpdAware : 1; // if it's HPD aware + NvU8 reserved : 4; +}NVT_PARSED_DPCD_INFO_DOWNSTREAM_PORT; +// +typedef struct tagNVT_DPCD_PARSED_RECEIVER_INFO +{ + // receiver info + NvU32 rev; // DPCD version number + NvU32 maxLinkRate; // the max link rate of main link lanes in 10KHz + NvU32 maxLaneCount; // the max number of lanes + NvU32 numOfPorts; // the number of receiver ports + NvU32 p0BufferSizePerLane; // the buffer size per lane (in BYTE) + NvU32 p1BufferSizePerLane; // the buffer size per lane (in BYTE) + + // downstream port info + NvU32 downstreamPortCount; // the total number of down stream ports + NvU32 downstreamPort0Type; // type of downstream port 0 + NVT_PARSED_DPCD_INFO_DOWNSTREAM_PORT downstreamPort[NVT_DPCD_RECEIVER_MAX_DOWNSTREAM_PORT]; + + // other misc info + NvU32 cap_support0_005DownSpread : 1; + NvU32 cap_supportEnhancedFrame : 1; + NvU32 cap_noAuxHandshakeLinkTraining : 1; + NvU32 cap_downstreamPortHasFormatConvBlk : 1; + NvU32 cap_mainLinkChSupportANSI8B10B : 1; + NvU32 cap_downstreamPortSupportOUI : 1; + NvU32 cap_p0HasEDID : 1; + NvU32 cap_p0AssociatedToPrecedingPort : 1; + NvU32 cap_p1HasEDID : 1; + NvU32 cap_p1AssociatedToPrecedingPort : 1; + + // DP 1.2 fields + NvU32 cap_mstm : 1; + NvU32 cap_reserved : 21; +}NVT_DPCD_PARSED_RECEIVER_INFO; + +#define NVT_DPCD_NUM_TRAINING_LANES 4 + +typedef struct tagNVT_TRAINING_LANE_SETTING +{ + NvU8 voltageSwing; + NvU8 maxSwingReached; + NvU8 preEmphasis; + NvU8 maxPreEmphasisReached; +}NVT_TRAINING_LANE_SETTING; + +// 00100h LINK CONFIGURATION FIELD +typedef struct tagNVT_DPCD_PARSED_LINK_CONFIG +{ + NvU8 linkRate; + NvU8 laneCount; + + NVT_TRAINING_LANE_SETTING trainLaneSetting[NVT_DPCD_NUM_TRAINING_LANES]; + + NvU32 enhancedFrameEnabled : 1; + NvU32 trainingPatternSetting : 2; + NvU32 linkQualityPatternSetting : 2; + NvU32 recoveredClockOutputEnabled : 1; + NvU32 scramblingDisable : 1; + NvU32 symbolErrorCount : 2; + NvU32 spreadAmp : 1; + NvU32 mainLinkCoding8b10b : 1; + NvU32 multiStreamEnabled : 1; + NvU32 reserved : 19; +}NVT_DPCD_PARSED_LINK_CONFIG; + +typedef struct tagNVT_DPCD_INFO +{ + NVT_DPCD_PARSED_RECEIVER_INFO receiver; + NVT_DPCD_PARSED_LINK_CONFIG linkConfig; + NvU32 sourceOUI; + NvU32 sinkOUI; + NvU32 branchOUI; +}NVT_DPCD_INFO; + +typedef struct tagNVT_DPCD_CONFIG +{ + NvU32 dpInfoFlags; +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENABLED 0:0 +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENABLED_FALSE (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENABLED_TRUE (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE 7:4 +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_NONE (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2DVI (0x00000001) // B2: dp2dvi-singlelink +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2HDMI (0x00000002) // dp2hdmi +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2DVI2 (0x00000003) // B3: dp2dvi-duallink +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2VGA (0x00000004) // B4: dp2vga +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2TV (0x00000005) // Composite/SVideo +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LANECOUNT 10:8 // Maximum supported laneCount +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LANECOUNT_1_LANE (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LANECOUNT_2_LANE (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LANECOUNT_4_LANE (0x00000002) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE 13:11 // Maximum supported linkRate +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE_1_62GBPS (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE_2_70GBPS (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE_5_40GBPS (0x00000002) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE_8_10GBPS (0x00000003) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MULTISTREAM 16:16 // Bit to check MST/SST +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MULTISTREAM_DISABLED (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MULTISTREAM_ENABLED (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENHANCED_FRAMING 17:17 // Bit to check enhanced framing support +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENHANCED_FRAMING_DISABLED (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENHANCED_FRAMING_ENABLED (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_DOWNSPREAD 18:18 // Bit to check downspread support +#define NV_DISPLAYPORT_INFO_FLAGS_DP_DOWNSPREAD_DISABLED (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_DOWNSPREAD_ENABLED (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_SCRAMBLING 19:19 // Bit to check scrambling +#define NV_DISPLAYPORT_INFO_FLAGS_DP_SCRAMBLING_DISABLED (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_SCRAMBLING_ENABLED (0x00000001) + NvU32 linkRate; + NvU32 laneCount; + NvU32 colorFormat; + NvU32 dynamicRange; + NvU32 colorimetry; + NvU32 bpc; + NvU32 bpp; + + // pre-emphasis and drive current level (EFI might need this information) + NvU8 laneData[4]; + // DP max pixelClock supported based on DP max laneCount/linkRate + NvU32 dpMaxPixelClk; + NvU8 maxCapLinkRate; + NvU8 maxCapLaneCount; + + // B4 (DP2VGA) Vendor Specific I2C Speed Control + NvU8 dp2vga_i2cCap; + NvU8 dp2vga_i2cCtrl; + + NvU8 bDpOffline; +}NVT_DPCD_CONFIG; + +typedef struct tagNVT_DPCD_DP_TUNNELING_CAPS +{ + NvU8 dpTunnelingSupport : 1; // DP Tunneling through USB4 Support + NvU8 reserved : 5; // Reserved. + NvU8 dpPanelReplayTunnelingOptSupport : 1; // Panel Replay Tunneling Optimization Support + NvU8 dpInBwAllocationModeSupport : 1; // DP IN Bandwidth Allocation Mode Support +}NVT_DPCD_DP_TUNNELING_CAPS; + +typedef struct tagNVT_DPCD_DP_IN_ADAPTER_INFO +{ + NvU8 dpInAdapterNumber : 6; // DP IN Adapter Number + NvU8 reserved : 2; +}NVT_DPCD_DP_IN_ADAPTER_INFO; + +typedef struct tagNVT_DPCD_USB4_DRIVER_ID +{ + NvU8 usb4DriverId : 4; // USB4 Driver ID + NvU8 reserved : 4; +}NVT_DPCD_USB4_DRIVER_ID; + +//****************************** +// Intel EDID Like Data (ELD) +//****************************** +#define NVT_ELD_VER_1 0x1 // ELD version 1, which is an obsolete ELD structure. Treated as reserved +#define NVT_ELD_VER_2 0x2 // ELD version 2, which supports CEA version 861-D or below. Max baseline ELD size of 80 bytes (15 short audio descriptors) +#define NVT_ELD_VER_VIDEO_DRIVER_UNLOAD 0x1F // Indicates an ELD that has been partially populated through implementation specific mean of default programming before an external + // graphics driver is load, Only the fields that is called out as "canned" fields will be populated, and audio driver should + // ignore the non "canned" fields. +#define NVT_ELD_CONN_TYPE_HDMI 0x0 // indicates an HDMI connection type +#define NVT_ELD_CONN_TYPE_DP 0x1 // indicates a DP connection type + + +//****************************** +// Audio +//****************************** +#define NVT_AUDIO_768KHZ 768000 // HBR Audio +#define NVT_AUDIO_384KHZ 384000 // HBR Audio +#define NVT_AUDIO_192KHZ 192000 +#define NVT_AUDIO_176KHZ 176000 +#define NVT_AUDIO_96KHZ 96000 +#define NVT_AUDIO_88KHZ 88000 +#define NVT_AUDIO_48KHZ 48000 +#define NVT_AUDIO_44KHZ 44000 +#define NVT_AUDIO_32KHZ 32000 + +//Default format for HDTV is NVT_DEFAULT_HDTV_FMT i.e 1080i +#define NVT_DEFAULT_HDTV_PREFERRED_TIMING(x, y, z, p) \ + if(((x) == 1920) && ((y) == 1080) && ((z) != D3DDDI_VSSLO_PROGRESSIVE )) p = 1; + +//Default format for non-DDC displays is 10x7 +#define NVT_DEFAULT_NONDCC_PREFERRED_TIMING(x, y, z, p) \ + if(((x) == 1024) && ((y) == 768) && ((z) == 60 )) p = 1; + + +// Length of user-friendly monitor name, derived from the EDID's +// Display Product Name descriptor block, plus the EDID manufacturer PNP +// ID. The Display Product can be distributed across four 13-byte +// descriptor blocks, and the PNP ID currently decodes to at most 40 +// characters: 4*13 + 40 = 92 +#define NVT_EDID_MONITOR_NAME_STRING_LENGTH 96 + +// Compute the actual size of an EDID with a pointer to an NVT_EDID_INFO. +static NV_INLINE NvU32 NVT_EDID_ACTUAL_SIZE(const NVT_EDID_INFO *pInfo) +{ + return (pInfo->total_extensions + 1) * 128; +} + +//****************************** +//****************************** +//** the export functions ** +//****************************** +//****************************** + +// the common timing function return values +typedef enum +{ + NVT_STATUS_SUCCESS = 0, // Success (no status) + NVT_STATUS_ERR = 0x80000000, // generic get timing error + NVT_STATUS_INVALID_PARAMETER, // passed an invalid parameter + NVT_STATUS_NO_MEMORY, // memory allocation failed +} NVT_STATUS; + +//************************************* +// The EDID validation Mask +//************************************* +#define NVT_EDID_VALIDATION_MASK 0xFFFFFFFF +#define NVT_IS_EDID_VALIDATION_FLAGS(x, n) ((((x)&NVT_EDID_VALIDATION_MASK)) & NVBIT32(n)) +#define NVT_CLEAR_EDID_VALIDATION_FLAGS(x, n) ((x)&=(~NVBIT32(n))) + +typedef enum +{ + // errors returned as a bitmask by NvTiming_EDIDValidationMask() + NVT_EDID_VALIDATION_ERR_EXT = 0, + NVT_EDID_VALIDATION_ERR_VERSION, + NVT_EDID_VALIDATION_ERR_SIZE, + NVT_EDID_VALIDATION_ERR_CHECKSUM, + NVT_EDID_VALIDATION_ERR_RANGE_LIMIT, + NVT_EDID_VALIDATION_ERR_DTD, + NVT_EDID_VALIDATION_ERR_EXT_DTD, + NVT_EDID_VALIDATION_ERR_EXTENSION_COUNT, + NVT_EDID_VALIDATION_ERR_DESCRIPTOR, + NVT_EDID_VALIDATION_ERR_EXT_CTA, + NVT_EDID_VALIDATION_ERR_EXT_CTA_DTD, + NVT_EDID_VALIDATION_ERR_EXT_CTA_TAG, + NVT_EDID_VALIDATION_ERR_EXT_CTA_SVD, + NVT_EDID_VALIDATION_ERR_EXT_CTA_CHECKSUM, + NVT_EDID_VALIDATION_ERR_EXT_DID_VERSION, + NVT_EDID_VALIDATION_ERR_EXT_DID_EXTCOUNT, + NVT_EDID_VALIDATION_ERR_EXT_DID_CHECKSUM, + NVT_EDID_VALIDATION_ERR_EXT_DID_SEC_SIZE, + NVT_EDID_VALIDATION_ERR_EXT_DID_TAG, + NVT_EDID_VALIDATION_ERR_EXT_DID_TYPE1, + NVT_EDID_VALIDATION_ERR_EXT_DID2_TAG, + NVT_EDID_VALIDATION_ERR_EXT_DID2_CHECKSUM, + NVT_EDID_VALIDATION_ERR_EXT_DID2_MANDATORY_BLOCKS, + NVT_EDID_VALIDATION_ERR_EXT_DID2_TYPE7, + NVT_EDID_VALIDATION_ERR_EXT_DID2_TYPE10, + NVT_EDID_VALIDATION_ERR_EXT_DID2_ADAPTIVE_SYNC, +} NVT_EDID_VALIDATION_ERR_STATUS; +#define NVT_EDID_VALIDATION_ERR_MASK(x) NVBIT32(x) + +typedef enum +{ + // errors returned as a bitmask by NvTiming_DisplayID2ValidationMask() + NVT_DID2_VALIDATION_ERR_VERSION = 0, + NVT_DID2_VALIDATION_ERR_SIZE, + NVT_DID2_VALIDATION_ERR_CHECKSUM, + NVT_DID2_VALIDATION_ERR_PRODUCT_ID, + NVT_DID2_VALIDATION_ERR_NO_DATA_BLOCK, + NVT_DID2_VALIDATION_ERR_RANGE_LIMIT, + NVT_DID2_VALIDATION_ERR_NATIVE_DTD, + NVT_DID2_VALIDATION_ERR_MANDATORY_BLOCKS, + NVT_DID2_VALIDATION_ERR_PRODUCT_IDENTIFY, + NVT_DID2_VALIDATION_ERR_PARAMETER, + NVT_DID2_VALIDATION_ERR_INTERFACE, + NVT_DID2_VALIDATION_ERR_TYPE7, + NVT_DID2_VALIDATION_ERR_TYPE10, + NVT_DID2_VALIDATION_ERR_ADAPTIVE_SYNC, +} NVT_DID2_VALIDATION_ERR_STATUS; +#define NVT_DID2_VALIDATION_ERR_MASK(x) NVBIT32(x) + +// timing calculation flags: +#define NVT_FLAG_PROGRESSIVE_TIMING 0x00000000 +#define NVT_FLAG_INTERLACED_TIMING NVT_INTERLACED +#define NVT_FLAG_INTERLACED_TIMING2 NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2 //without extra vblank on field 2 +#define NVT_FLAG_DOUBLE_SCAN_TIMING 0x00000010 +#define NVT_FLAG_REDUCED_BLANKING_TIMING 0x00000020 +#define NVT_FLAG_MAX_EDID_TIMING 0x00000040 +#define NVT_FLAG_NV_DOUBLE_SCAN_TIMING 0x00000080 +#define NVT_FLAG_NATIVE_TIMING 0x00000100 +#define NVT_FLAG_EDID_TIMING 0x00000200 +#define NVT_FLAG_CEA_4X3_TIMING 0x00000400 +#define NVT_FLAG_CEA_16X9_TIMING 0x00000800 +#define NVT_FLAG_OS_ADDED_TIMING 0x00001000 +#define NVT_FLAG_SPECTRUM_SPREAD 0x00002000 +#define NVT_FLAG_EDID_TIMING_RR_MATCH 0x00004000 +#define NVT_FLAG_EDID_861_ST 0x00008000 +#define NVT_FLAG_EDID_DTD_EIZO_SPLIT 0x00010000 +#define NVT_FLAG_DTD1_TIMING 0x00020000 +#define NVT_FLAG_NV_PREFERRED_TIMING 0x00040000 +#define NVT_FLAG_DTD1_PREFERRED_TIMING 0x00080000 +#define NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING 0x00100000 +#define NVT_FLAG_CEA_PREFERRED_TIMING 0x00200000 +#define NVT_FLAG_DISPLAYID_T7_DSC_PASSTHRU 0x00400000 +#define NVT_FLAG_DISPLAYID_2_0_TIMING 0x00800000 // this one for the CTA861 embedded in DID20 +#define NVT_FLAG_DISPLAYID_T7_T8_EXPLICT_YUV420 0x01000000 // DID2 E7 spec. supported yuv420 indicated + +#define NVT_FLAG_INTERLACED_MASK (NVT_FLAG_INTERLACED_TIMING | NVT_FLAG_INTERLACED_TIMING2) + +#ifdef __cplusplus +extern "C" { +#endif + +// Generic timing parameter calculation +NvU16 NvTiming_CalcRR(NvU32 pclk, NvU16 interlaced, NvU16 HTotal, NvU16 VTotal); +NvU32 NvTiming_CalcRRx1k(NvU32 pclk, NvU16 interlaced, NvU16 HTotal, NvU16 VTotal); + +NvU32 NvTiming_IsRoundedRREqual(NvU16 rr1, NvU32 rr1x1k, NvU16 rr2); +NvU32 NvTiming_IsTimingExactEqual(const NVT_TIMING *pT1, const NVT_TIMING *pT2); +NvU32 NvTiming_IsTimingExactEqualEx(const NVT_TIMING *pT1, const NVT_TIMING *pT2); +NvU32 NvTiming_IsTimingRelaxedEqual(const NVT_TIMING *pT1, const NVT_TIMING *pT2); +NvU16 NvTiming_MaxFrameWidth(NvU16 HVisible, NvU16 rep); + +// Establish timing enumeration +NVT_STATUS NvTiming_EnumEST(NvU32 index, NVT_TIMING *pT); +NVT_STATUS NvTiming_EnumESTIII(NvU32 index, NVT_TIMING *pT); + +// GTF timing calculation +NVT_STATUS NvTiming_CalcGTF(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); + +// DMT timing calculation +NVT_STATUS NvTiming_EnumDMT(NvU32 dmtId, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcDMT(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcDMT_RB(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); + +// CVT timing calculation +NVT_STATUS NvTiming_CalcCVT(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcCVT_RB(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcCVT_RB2(NvU32 width, NvU32 height, NvU32 rr, NvBool is1000div1001, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcCVT_RB3(NvU32 width, NvU32 height, NvU32 rr, NvU32 deltaHBlank, NvU32 vBlankMicroSec, NvBool isEarlyVSync, NVT_TIMING *pT); +NvBool NvTiming_IsTimingCVTRB(const NVT_TIMING *pTiming); + +// CEA/EIA/Psf timing +NVT_STATUS NvTiming_CalcCEA861bTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NvU32 pixelRepeatCount, NVT_TIMING *pT); +NVT_STATUS NvTiming_EnumCEA861bTiming(NvU32 ceaFormat, NVT_TIMING *pT); +NVT_STATUS NvTiming_EnumNvPsfTiming(NvU32 nvPsfFormat, NVT_TIMING *pT); +NvU32 NvTiming_GetCEA861TimingIndex(NVT_TIMING *pT); + +//expose the HDMI extended video timing defined by the HDMI LLC VSDB +NVT_STATUS NvTiming_EnumHdmiVsdbExtendedTiming(NvU32 hdmi_vic, NVT_TIMING *pT); + +// TV(analog) based timing +NVT_STATUS NvTiming_GetTvTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NvU32 tvFormat, NVT_TIMING *pT); + +// Get EDID timing +NVT_STATUS NvTiming_GetEdidTimingEx(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT, NvU32 rrx1k); +NVT_STATUS NvTiming_GetEdidTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT); + +// Get EDID based HDMI Stereo timing +NVT_STATUS NvTiming_GetHDMIStereoExtTimingFromEDID(NvU32 width, NvU32 height, NvU32 rr, NvU8 structure, NvU8 detail, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_EXT_TIMING *pT); +void NvTiming_GetHDMIStereoTimingFrom2DTiming(const NVT_TIMING *pTiming, NvU8 StereoStructureType, NvU8 SideBySideHalfDetail, NVT_EXT_TIMING *pExtTiming); +NVT_STATUS NvTiming_GetHDMIStereoMandatoryFormatDetail(const NvU8 vic, NvU16 *pStereoStructureMask, NvU8 *pSideBySideHalfDetail); + +// EDID based AspectRatio Timing +NVT_STATUS NvTiming_GetEDIDBasedASPRTiming(NvU16 width, NvU16 height, NvU16 rr, NVT_EDID_INFO *pEI, NVT_TIMING *ft); + + +// EDID or DISPLAYID2 version +NvU32 NvTiming_GetVESADisplayDescriptorVersion(NvU8 *rawData, NvU32 *pVer); + +// EDID entry parse +NVT_STATUS NV_STDCALL NvTiming_ParseEDIDInfo(NvU8 *pEdid, NvU32 length, NVT_EDID_INFO *pEdidInfo); +NvU32 NvTiming_EDIDValidationMask(NvU8 *pEdid, NvU32 length, NvBool bIsStrongValidation); +NVT_STATUS NvTiming_EDIDValidation(NvU8 *pEdid, NvU32 length, NvBool bIsStrongValidation); + +// DisplayID20 standalone entry parse +NVT_STATUS NV_STDCALL NvTiming_parseDisplayId20Info(const NvU8 *pDisplayId, NvU32 length, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); +NvU32 NvTiming_DisplayID2ValidationMask(NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, NvBool bIsStrongValidation); +NVT_STATUS NvTiming_DisplayID2ValidationDataBlocks(NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, NvBool bIsStrongValidation); + +NVT_STATUS NvTiming_Get18ByteLongDescriptorIndex(NVT_EDID_INFO *pEdidInfo, NvU8 tag, NvU32 *dtdIndex); +NVT_STATUS NvTiming_GetProductName(const NVT_EDID_INFO *pEdidInfo, + NvU8 *pProductName, + const NvU32 productNameLength); +NvU32 NvTiming_CalculateEDIDCRC32(NvU8* pEDIDBuffer, NvU32 edidsize); +NvU32 NvTiming_CalculateCommonEDIDCRC32(NvU8* pEDIDBuffer, NvU32 edidVersion); +NVT_STATUS NvTiming_CalculateEDIDLimits(NVT_EDID_INFO *pEdidInfo, NVT_EDID_RANGE_LIMIT *pLimit); +void NvTiming_GetMonitorName(NVT_EDID_INFO *pEdidInfo, NvU8 monitor_name[NVT_EDID_MONITOR_NAME_STRING_LENGTH]); + +// utility routines +NvU64 axb_div_c_64(NvU64 a, NvU64 b, NvU64 c); +NvU32 axb_div_c(NvU32 a, NvU32 b, NvU32 c); +NvU32 a_div_b(NvU32 a, NvU32 b); +NvU32 calculateCRC32(NvU8* pBuf, NvU32 bufsize); +void patchChecksum(NvU8* pBuf); +NvBool isChecksumValid(NvU8* pBuf); + +NvU32 RRx1kToPclk (NVT_TIMING *pT); + +NVT_STATUS NvTiming_ComposeCustTimingString(NVT_TIMING *pT); + +// Infoframe composer +NVT_STATUS NvTiming_ConstructVideoInfoframeCtrl(const NVT_TIMING *pTiming, NVT_VIDEO_INFOFRAME_CTRL *pCtrl); +NVT_STATUS NvTiming_ConstructVideoInfoframe(NVT_EDID_INFO *pEdidInfo, NVT_VIDEO_INFOFRAME_CTRL *pCtrl, NVT_VIDEO_INFOFRAME *pContext, NVT_VIDEO_INFOFRAME *p); +NVT_STATUS NvTiming_ConstructAudioInfoframe(NVT_AUDIO_INFOFRAME_CTRL *pCtrl, NVT_AUDIO_INFOFRAME *pContext, NVT_AUDIO_INFOFRAME *p); +NVT_STATUS NvTiming_ConstructVendorSpecificInfoframe(NVT_EDID_INFO *pEdidInfo, NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL *pCtrl, NVT_VENDOR_SPECIFIC_INFOFRAME *p); +NVT_STATUS NvTiming_ConstructExtendedMetadataPacketInfoframe(NVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL *pCtrl, NVT_EXTENDED_METADATA_PACKET_INFOFRAME *p); + +// Get specific timing from parsed EDID +NVT_STATUS NvTiming_GetDTD1Timing (NVT_EDID_INFO * pEdidInfo, NVT_TIMING * pT); + +#define NVT_IS_DTD(d) (NVT_GET_TIMING_STATUS_TYPE((d)) == NVT_TYPE_EDID_DTD) +#define NVT_IS_EXT_DTD(d) (NVT_GET_TIMING_STATUS_TYPE((d)) == NVT_TYPE_EDID_EXT_DTD) +#define NVT_IS_CTA861(d) (NVT_GET_TIMING_STATUS_TYPE((d)) == NVT_TYPE_EDID_861ST) + +#define NVT_IS_DTD1(d) ((NVT_IS_DTD((d))) && (NVT_GET_TIMING_STATUS_SEQ((d)) == 1)) +#define NVT_IS_DTDn(d, n) ((NVT_IS_DTD((d))) && (NVT_GET_TIMING_STATUS_SEQ((d)) == n)) +#define NVT_IS_EXT_DTDn(d, n) ((NVT_IS_EXT_DTD((d))) && (NVT_GET_TIMING_STATUS_SEQ((d)) == n)) + +#define NVT_DID20_TIMING_IS_CTA861(flag, status) ((NVT_IS_CTA861((status))) && (0 != (NVT_FLAG_DISPLAYID_2_0_TIMING & (flag)))) +#define NVT_PREFERRED_TIMING_IS_DTD1(flag, status) ((NVT_IS_DTD1((status))) && (0 != (NVT_FLAG_DTD1_PREFERRED_TIMING & (flag)))) +#define NVT_PREFERRED_TIMING_IS_DISPLAYID(flag) (0 != (NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING & flag)) +#define NVT_PREFERRED_TIMING_IS_CEA(flag) (0 != (NVT_FLAG_CEA_PREFERRED_TIMING & flag)) + +#ifdef __cplusplus +} +#endif + +#endif //__NVTIMING_H__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming_pvt.h b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming_pvt.h new file mode 100644 index 0000000..378b2d5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming_pvt.h @@ -0,0 +1,144 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvtiming_pvt.h +// +// Purpose: the private functions/structures which are only used inside +// the nv timing library. +// +//***************************************************************************** + +#ifndef __NVTIMING_PVT_H_ +#define __NVTIMING_PVT_H_ + +#include "nvtiming.h" + +#if defined(NVT_USE_NVKMS) + #include "nvidia-modeset-os-interface.h" + #define NVT_SNPRINTF nvkms_snprintf +#else + #include + #include + #define NVT_SNPRINTF snprintf +#endif + +#define nvt_assert(p) ((void)0) + +#include // NULL + +// EDID related private functions +NvU32 getEdidVersion(NvU8 *pData, NvU32 *pVer); +NvBool assignNextAvailableTiming(NVT_EDID_INFO *pInfo, const NVT_TIMING *pTiming); +void parseEdidCvtTiming(NVT_EDID_INFO *pInfo); +void parseEdidEstablishedTiming(NVT_EDID_INFO *pInfo); +void parseEdidStandardTiming(NVT_EDID_INFO *pInfo); +void parseEdidDetailedTiming(NvU8 *pEdid, NVT_EDID_INFO *pInfo); +NVT_STATUS parseEdidDetailedTimingDescriptor(NvU8 *pDTD, NVT_TIMING *pT); +void parseEdidCvt3ByteDescriptor(NvU8 *p, NVT_EDID_INFO *pInfo, NvU32 *vtbCount); +void parseEdidStandardTimingDescriptor(NvU16 timing, NVT_EDID_INFO *pInfo, NvU32 count, NVT_TIMING * pT); +void parseVTBExtension(NvU8 *pEdidExt, NVT_EDID_INFO *pInfo); +void updateHDMILLCDeepColorForTiming(NVT_EDID_INFO *pInfo, NvU32 index); +void updateBpcForTiming(NVT_EDID_INFO *pInfo, NvU32 index); +void updateColorFormatAndBpcTiming(NVT_EDID_INFO *pInfo); +// End EDID + +// CTA861 related private functions +NVT_STATUS get861ExtInfo(NvU8 *pEdid, NvU32 edidSize, NVT_EDID_CEA861_INFO *p); +NVT_STATUS parseCta861DataBlockInfo(NvU8 *pEdid, NvU32 size, NVT_EDID_CEA861_INFO *p); +void parse861ExtDetailedTiming(NvU8 *pEdidExt, NvU8 basicCaps, NVT_EDID_INFO *pInfo); +void parse861bShortTiming(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parse861bShortYuv420Timing(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parse861bShortPreferredTiming(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parseCta861VsdbBlocks(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parseCta861HfScdb(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parseCta861HfEeodb(NVT_EDID_CEA861_INFO *pExt861, NvU32 *pTotalEdidExtensions); +void parseEdidMsftVsdbBlock(VSDB_DATA *pVsdb, MSFT_VSDB_PARSED_INFO *vsdbInfo); +void parseEdidHdmiLlcBasicInfo(VSDB_DATA *pVsdb, NVT_HDMI_LLC_INFO *pHdmiLlc); +void parseEdidHdmiForumVSDB(VSDB_DATA *pVsdb, NVT_HDMI_FORUM_INFO *pHdmiInfo); +void getEdidHDM1_4bVsdbTiming(NVT_EDID_INFO *pInfo); +void parseEdidHDMILLCTiming(NVT_EDID_INFO *pInfo, VSDB_DATA *pVsdb, NvU32 *pSupported, HDMI3DSUPPORTMAP * pM); +void parseEdidNvidiaVSDBBlock(VSDB_DATA *pVsdb, NVDA_VSDB_PARSED_INFO *vsdbInfo); +void parseCea861HdrStaticMetadataDataBlock(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN); +void parseCea861DvStaticMetadataDataBlock(NVT_EDID_CEA861_INFO *pExt861, NVT_DV_STATIC_METADATA *pDvInfo); +NvBool isMatchedCTA861Timing(NVT_EDID_INFO *pInfo, NVT_TIMING *pT); +NvU32 isHdmi3DStereoType(NvU8 StereoStructureType); +NvU32 getCEA861TimingAspectRatio(NvU32 vic); +void SetActiveSpaceForHDMI3DStereo(const NVT_TIMING *pTiming, NVT_EXT_TIMING *pExtTiming); +void AddModeToSupportMap(HDMI3DSUPPORTMAP * pMap, NvU8 vic, NvU8 structure, NvU8 Detail); +void getMonitorDescriptorString(NvU8 *pEdid, NvU8 tag, char *str, int onceOnly); +// End CTA861 + +// DispalyID base / extension related functions +NvU32 getDID2Version(NvU8 *pData, NvU32 *pVer); +NVT_STATUS getDisplayIdEDIDExtInfo(NvU8* pEdid, NvU32 edidSize, NVT_EDID_INFO* pEdidInfo); +NVT_STATUS parseDisplayIdBlock(NvU8* pBlock, NvU8 max_length, NvU8* pLength, NVT_EDID_INFO* pEdidInfo); +NVT_STATUS getDisplayId20EDIDExtInfo(NvU8* pDisplayid, NvU32 edidSize, NVT_EDID_INFO* pEdidInfo); +NVT_STATUS parseDisplayId20EDIDExtDataBlocks(NvU8* pDataBlock, NvU8 remainSectionLength, NvU8* pCurrentDBLength, NVT_EDID_INFO* pEdidInfo); +void updateColorFormatForDisplayIdExtnTimings(NVT_EDID_INFO* pInfo, NvU32 timingIdx); +void updateColorFormatForDisplayId20ExtnTimings(NVT_EDID_INFO* pInfo, NvU32 timingIdx); +NvBool assignNextAvailableDisplayId20Timing(NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, const NVT_TIMING *pTiming); +void updateColorFormatForDisplayId20Timings(NVT_DISPLAYID_2_0_INFO* pDisplayId2Info, NvU32 timingIdx); +// End DisplayID + +NvU32 axb_div_c_old(NvU32 a, NvU32 b, NvU32 c); + +#define NVT_EDID_BLOCK_SIZE 128 + +#define NVT_PVT_INTERLACED_MASK 0xF +#define NVT_PVT_DOUBLESCAN_MASK 0x10 +#define NVT_PVT_RB_MASK 0x20 + +#define NVT_PVT_DOUBLE_SCAN_HEIGHT 384 +#define NVT_PVT_DOUBLE_SCAN_HEIGHT_VGA 600 +#define NVT_PVT_DOUBLE_SCAN_PCLK_MIN 1200 //in 10KHz + +#define abs(a) ((a)>0?(a):-(a)) +#define set_rrx1k(a,b,c) ((b)*(c)==0?(0):(NvU32)(((NvU64)(a)*10000*1000+(b)*(c)/2)/((b)*(c)))) +#define frame_height(a) ((NvU32)((a).VVisible * ((a).interlaced!=0?2:1))) +#define nvt_is_wideaspect(width,height) ((width)*5 >= (height)*8) + +#ifndef MIN +#define MIN(x, y) ((x)>(y) ? (y) : (x)) +#endif +#ifndef MAX +#define MAX(x,y) ((x) > (y) ? (x) : (y)) +#endif + + +#ifndef COUNT +#define COUNT(a) (sizeof(a)/sizeof(a[0])) +#endif +#ifndef offsetof +#define offsetof(st, m) ((size_t) ( (char *)&((st *)(0))->m - (char *)0 )) +#endif +#define nvt_nvu8_set_bits(d, s, m, shift) {(d)&=(NvU8)((NvU8)(m)^0xFFU);(d)|=((s)<<(shift))&(m);} +#define nvt_get_bits(d, m, shift) (((d)&(m))>>shift) +#define nvt_lowest_bit(n) ((n)&(~((n)-1))) +#define nvt_aspect_x(n) ((n)>>16) +#define nvt_aspect_y(n) ((n)&0xFFFF) + +// Sentinel values for NVT_TIMING +#define NVT_TIMINGEXT_SENTINEL {0,0,0,0,0,{0},{0},{0},{0},0,""} +#define NVT_TIMING_SENTINEL {0,0,0,0,0,0,0,0,0,0,0,0,0,0,NVT_TIMINGEXT_SENTINEL} + +#endif //__NVTIMING_PVT_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000.h new file mode 100644 index 0000000..713eb7b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000.h @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl0000.finn +// + +#include "nvlimits.h" +#include "cl0000_notification.h" + +/* object NV01_NULL_OBJECT */ +#define NV01_NULL_OBJECT (0x00000000) + +/* obsolete alises */ +#define NV1_NULL_OBJECT NV01_NULL_OBJECT + +#define NV01_ROOT (0x00000000) + +/* NvAlloc parameteters */ +#define NV0000_ALLOC_PARAMETERS_MESSAGE_ID (0x0000U) + +typedef struct NV0000_ALLOC_PARAMETERS { + NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */ + NvU32 processID; + char processName[NV_PROC_NAME_MAX_LENGTH]; +} NV0000_ALLOC_PARAMETERS; + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000_notification.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000_notification.h new file mode 100644 index 0000000..79b51d9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000_notification.h @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0000_notification_h_ +#define _cl0000_notification_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/*event values*/ +#define NV0000_NOTIFIERS_DISPLAY_CHANGE (0) +#define NV0000_NOTIFIERS_EVENT_NONE_PENDING (1) +#define NV0000_NOTIFIERS_VM_START (2) +#define NV0000_NOTIFIERS_GPU_BIND_EVENT (3) +#define NV0000_NOTIFIERS_NVTELEMETRY_REPORT_EVENT (4) +#define NV0000_NOTIFIERS_MAXCOUNT (5) + +/*Status definitions for NV0000_NOTIFIERS_DISPLAY_CHANGE event*/ + +#define NV0000_NOTIFIERS_STATUS_ACPI_DISPLAY_DEVICE_CYCLE (0) + +//--------------------------------------------------------------------------- + +/* NvNotification[] fields and values */ +#define NV000_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + + +/* pio method data structure */ +typedef volatile struct _cl0000_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv000Typedef, Nv01Root; + +/* obsolete aliases */ +#define NV000_TYPEDEF Nv01Root +#define Nv1Root Nv01Root +#define nv1Root Nv01Root +#define nv01Root Nv01Root + +/*event values*/ +#define NV0000_NOTIFIERS_ENABLE_CPU_UTIL_CTRL (1) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0000_notification_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0001.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0001.h new file mode 100644 index 0000000..10a8af4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0001.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0001_h_ +#define _cl0001_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV01_ROOT_NON_PRIV (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0001_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0002.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0002.h new file mode 100644 index 0000000..dd1d6c6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0002.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2001-2001, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0002_h_ +#define _cl0002_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_CONTEXT_DMA_FROM_MEMORY (0x00000002) +/* NvNotification[] fields and values */ +#define NV002_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl0002_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv002Typedef, Nv01ContextDmaFromMemory; +#define NV002_TYPEDEF Nv01ContextDmaFromMemory +/* obsolete stuff */ +#define NV1_CONTEXT_DMA_FROM_MEMORY (0x00000002) +#define NV01_CONTEXT_DMA (0x00000002) +#define Nv1ContextDmaFromMemory Nv01ContextDmaFromMemory +#define nv1ContextDmaFromMemory Nv01ContextDmaFromMemory +#define nv01ContextDmaFromMemory Nv01ContextDmaFromMemory + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0002_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0004.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0004.h new file mode 100644 index 0000000..32e25e7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0004.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0004_h_ +#define _cl0004_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_TIMER (0x00000004) +/* NvNotification[] elements */ +#define NV004_NOTIFIERS_SET_ALARM_NOTIFY (0) +#define NV004_NOTIFIERS_MAXCOUNT (1) + +/* mapped timer registers */ +typedef volatile struct _Nv01TimerMapTypedef { + NvU32 Reserved00[0x100]; + NvU32 PTimerTime0; /* 0x00009400 */ + NvU32 Reserved01[0x3]; + NvU32 PTimerTime1; /* 0x00009410 */ +} Nv01TimerMap; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0004_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005.h new file mode 100644 index 0000000..a963d15 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl0005.finn +// + +#include "cl0005_notification.h" + +#define NV01_EVENT (0x00000005) + +/* NvRmAlloc() parameters */ +#define NV0005_ALLOC_PARAMETERS_MESSAGE_ID (0x0005U) + +typedef struct NV0005_ALLOC_PARAMETERS { + NvHandle hParentClient; + NvHandle hSrcResource; + + NvV32 hClass; + NvV32 notifyIndex; + NV_DECLARE_ALIGNED(NvP64 data, 8); +} NV0005_ALLOC_PARAMETERS; + + +/* NV0005_ALLOC_PARAMETERS's notifyIndex field is overloaded to contain the + * notifyIndex value itself, plus flags, and optionally a subdevice field if + * flags contains NV01_EVENT_SUBDEVICE_SPECIFIC. Note that NV01_EVENT_* + * contain the full 32-bit flag value that is OR'd into notifyIndex, not the + * contents of the FLAGS field (i.e. NV01_EVENT_* are pre-shifted into place). + */ +#define NV0005_NOTIFY_INDEX_INDEX 15:0 +#define NV0005_NOTIFY_INDEX_SUBDEVICE 23:16 +#define NV0005_NOTIFY_INDEX_FLAGS 31:24 diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005_notification.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005_notification.h new file mode 100644 index 0000000..5e6b32e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005_notification.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0005_notification_h_ +#define _cl0005_notification_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* NvNotification[] fields and values */ +#define NV003_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + +/* pio method data structure */ +typedef volatile struct _cl0005_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv005Typedef, Nv01Event; + +#define NV005_TYPEDEF Nv01Event + +/* obsolete stuff */ +#define NV1_TIMER (0x00000004) +#define Nv1Event Nv01Event +#define nv1Event Nv01Event +#define nv01Event Nv01Event + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0005_notification_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0020.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0020.h new file mode 100644 index 0000000..87154a9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0020.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0020_h_ +#define _cl0020_h_ + +#include "nvtypes.h" + +#define NV0020_GPU_MANAGEMENT (0x00000020) + +#endif /* _cl0020_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl003e.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl003e.h new file mode 100644 index 0000000..d23e642 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl003e.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2001-2001, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl003e_h_ +#define _cl003e_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_CONTEXT_ERROR_TO_MEMORY (0x0000003E) +#define NV01_MEMORY_SYSTEM (0x0000003E) +/* NvNotification[] fields and values */ +#define NV03E_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl003e_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv03eTypedef, Nv01ContextErrorToMemory; +#define NV03E_TYPEDEF Nv01ContextErrorToMemory +/* obsolete stuff */ +#define NV1_CONTEXT_ERROR_TO_MEMORY (0x0000003E) +#define Nv1ContextErrorToMemory Nv01ContextErrorToMemory +#define nv1ContextErrorToMemory Nv01ContextErrorToMemory +#define nv01ContextErrorToMemory Nv01ContextErrorToMemory + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl003e_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0040.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0040.h new file mode 100644 index 0000000..103ec3a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0040.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2001 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl0040_h_ +#define _cl0040_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_MEMORY_LOCAL_USER (0x00000040) +/* NvNotification[] fields and values */ +#define NV040_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl0040_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv040Typedef, Nv01MemoryLocalUser; +#define NV040_TYPEDEF Nv01MemoryLocalUser +/* obsolete stuff */ +#define NV01_MEMORY_USER (0x00000040) +#define NV1_MEMORY_USER (0x00000040) +#define Nv01MemoryUser Nv01MemoryLocalUser +#define nv01MemoryUser Nv01MemoryLocalUser +#define Nv1MemoryUser Nv01MemoryLocalUser +#define nv1MemoryUser Nv01MemoryLocalUser +#define nv01MemoryLocalUser Nv01MemoryLocalUser + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0040_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0041.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0041.h new file mode 100644 index 0000000..fa8707a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0041.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2001-2005, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0041_h_ +#define _cl0041_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV04_MEMORY (0x00000041) +/* NvNotification[] fields and values */ +#define NV041_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl0041_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv041Typedef, Nv04Memory; +#define NV041_TYPEDEF Nv04Memory; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0041_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0071.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0071.h new file mode 100644 index 0000000..be106d3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0071.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2001-2001, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0071_h_ +#define _cl0071_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_MEMORY_SYSTEM_OS_DESCRIPTOR (0x00000071) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0071_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0073.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0073.h new file mode 100644 index 0000000..3775287 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0073.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2001-2021, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0073_h_ +#define _cl0073_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV04_DISPLAY_COMMON (0x00000073) + +/* event values */ +#define NV0073_NOTIFIERS_SW (0) +#define NV0073_NOTIFIERS_MAXCOUNT (5) + + +#define NV0073_NOTIFICATION_STATUS_IN_PROGRESS (0x8000) +#define NV0073_NOTIFICATION_STATUS_BAD_ARGUMENT (0x4000) +#define NV0073_NOTIFICATION_STATUS_ERROR_INVALID_STATE (0x2000) +#define NV0073_NOTIFICATION_STATUS_ERROR_STATE_IN_USE (0x1000) +#define NV0073_NOTIFICATION_STATUS_DONE_SUCCESS (0x0000) + +/* pio method data structure */ +typedef volatile struct _cl0073_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv073Typedef, Nv04DisplayCommon; +#define NV073_TYPEDEF Nv04DisplayCommon + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0073_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0076.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0076.h new file mode 100644 index 0000000..50bd0a9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0076.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0076_h_ +#define _cl0076_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_MEMORY_FRAMEBUFFER_CONSOLE (0x00000076) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0076_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080.h new file mode 100644 index 0000000..e1f6a94 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl0080.finn +// + +#include "nvlimits.h" +#include "cl0080_notification.h" + +#define NV01_DEVICE_0 (0x00000080) + +/* NvAlloc parameteters */ +#define NV0080_MAX_DEVICES NV_MAX_DEVICES + +/** + * @brief Alloc param + * + * @param vaMode mode for virtual address space allocation + * Three modes: + * NV_DEVICE_ALLOCATION_VAMODE_OPTIONAL_MULTIPLE_VASPACES + * NV_DEVICE_ALLOCATION_VAMODE_SINGLE_VASPACE + * NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES + * Detailed description of these modes is in nvos.h + **/ + +#define NV0080_ALLOC_PARAMETERS_MESSAGE_ID (0x0080U) + +typedef struct NV0080_ALLOC_PARAMETERS { + NvU32 deviceId; + NvHandle hClientShare; + NvHandle hTargetClient; + NvHandle hTargetDevice; + NvV32 flags; + NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8); + NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8); + NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8); + NvV32 vaMode; +} NV0080_ALLOC_PARAMETERS; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080_notification.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080_notification.h new file mode 100644 index 0000000..a976d3f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080_notification.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0080_notification_h_ +#define _cl0080_notification_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* NvNotification[] fields and values */ +#define NV080_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + +/* pio method data structure */ +typedef volatile struct _cl0080_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv080Typedef, Nv01Device0; + +#define NV080_TYPEDEF Nv01Device0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0080_notification_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0092.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0092.h new file mode 100644 index 0000000..8a90070 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0092.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef SDK_CL0092_H +#define SDK_CL0092_H + +#include "nvtypes.h" + +/* + * This RgLineCallback class allows RM clients to register/unregister the RG line callback functions. + * + * Must be allocated with kernel access rights. + * + * Allocation params: + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the NV04_DISPLAY_COMMON parent device to which the + * operation should be directed. + * head + * This parameter specifies the head for which the callback is to be egistered/unregistered. This value must be + * less than the maximum number of heads supported by the GPU subdevice. + * rgLineNum + * This indicates the RG scanout line number on which the callback will be executed. + * 1/ Client should set the proper RG line number based on mode in which the display head is running and + * subsequent possible modeset that may affect the line number. + * 2/ Client is expected to clear/set the interrupts around modesets or power-transitions (like s3/hibernation). + * 3/ Client should make sure that this param does not exceed the raster settings. + * pCallbkFn + * Pointer to callback function. Cannot be NULL. + * pCallbkParams + * Pointer to the ctrl call param struct. + */ + +#define NV0092_RG_LINE_CALLBACK 0x0092 + +typedef void (*NV0092_REGISTER_RG_LINE_CALLBACK_FN)(NvU32 rgIntrLine, void *param1, NvBool bIsIrqlIsr); + +typedef struct +{ + NvU32 subDeviceInstance; + NvU32 head; + NvU32 rgLineNum; + + NV0092_REGISTER_RG_LINE_CALLBACK_FN pCallbkFn; + + void *pCallbkParams; +} NV0092_RG_LINE_CALLBACK_ALLOCATION_PARAMETERS; + +#endif // SDK_CL0092_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00b1.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00b1.h new file mode 100644 index 0000000..bc6e69f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00b1.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _CL00B1_H_ +#define _CL00B1_H_ + +#define NV01_MEMORY_HW_RESOURCES 0x00b1 + +#endif // _CL00B1_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c1.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c1.h new file mode 100644 index 0000000..956929c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c1.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl00c1_h_ +#define _cl00c1_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvlimits.h" + +#define NV_FB_SEGMENT (0x000000C1) + +/* + * NV_FB_SEGMENT_ALLOCATION_PARAMS - Allocation params to create FB segment through + * NvRmAlloc. + */ +typedef struct +{ + NvHandle hCtxDma; // unused + NvU32 subDeviceIDMask NV_ALIGN_BYTES(8); + NvU64 dmaOffset NV_ALIGN_BYTES(8); // unused + NvU64 VidOffset NV_ALIGN_BYTES(8); + NvU64 Offset NV_ALIGN_BYTES(8); // To be deprecated + NvU64 pOffset[NV_MAX_SUBDEVICES] NV_ALIGN_BYTES(8); + NvU64 Length NV_ALIGN_BYTES(8); + NvU64 ValidLength NV_ALIGN_BYTES(8); + NvP64 pPageArray NV_ALIGN_BYTES(8); + NvU32 startPageIndex; + NvHandle AllocHintHandle; + NvU32 Flags; + NvHandle hMemory; // Not used in NvRmAlloc path; only used in CTRL path + NvHandle hClient; // Not used in NvRmAlloc path; only used in CTRL path + NvHandle hDevice; // Not used in NvRmAlloc path; only used in CTRL path + NvP64 pCpuAddress NV_ALIGN_BYTES(8); // To be deprecated + NvP64 ppCpuAddress[NV_MAX_SUBDEVICES] NV_ALIGN_BYTES(8); + NvU64 GpuAddress NV_ALIGN_BYTES(8); // To be deprecated + NvU64 pGpuAddress[NV_MAX_SUBDEVICES] NV_ALIGN_BYTES(8); + NvHandle hAllocHintClient; + NvU32 kind; + NvU32 compTag; +} NV_FB_SEGMENT_ALLOCATION_PARAMS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl00c1_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c3.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c3.h new file mode 100644 index 0000000..b9b31bc --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c3.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl00c3.finn +// + +#define NV01_MEMORY_SYNCPOINT 0x00C3 + +/* + * NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS - Allocation params to create syncpoint + * through NvRmAlloc. + */ +#define NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS_MESSAGE_ID (0x00c3U) + +typedef struct NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS { + NvU32 syncpointId; +} NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS; + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00f2.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00f2.h new file mode 100644 index 0000000..13b0624 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00f2.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _cl00f2_h_ +#define _cl00f2_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define IO_VASPACE_A (0x000000f2) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl00f2_h + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00fc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00fc.h new file mode 100644 index 0000000..aa50ead --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00fc.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _cl00fc_h_ +#define _cl00fc_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define FABRIC_VASPACE_A (0x000000fc) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl00fc_h + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2080.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2080.h new file mode 100644 index 0000000..b43209c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2080.h @@ -0,0 +1,497 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl2080_h_ +#define _cl2080_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvlimits.h" + +#define NV20_SUBDEVICE_0 (0x00002080) + +/* event values */ +#define NV2080_NOTIFIERS_SW (0) +#define NV2080_NOTIFIERS_HOTPLUG (1) +#define NV2080_NOTIFIERS_POWER_CONNECTOR (2) +#define NV2080_NOTIFIERS_THERMAL_SW (3) +#define NV2080_NOTIFIERS_THERMAL_HW (4) +#define NV2080_NOTIFIERS_FULL_SCREEN_CHANGE (5) +#define NV2080_NOTIFIERS_EVENTBUFFER (6) +#define NV2080_NOTIFIERS_DP_IRQ (7) +#define NV2080_NOTIFIERS_GR_DEBUG_INTR (8) +#define NV2080_NOTIFIERS_PMU_EVENT (9) +#define NV2080_NOTIFIERS_PMU_COMMAND (10) +#define NV2080_NOTIFIERS_TIMER (11) +#define NV2080_NOTIFIERS_GRAPHICS (12) +#define NV2080_NOTIFIERS_PPP (13) +#define NV2080_NOTIFIERS_VLD (14) // also known as BSP +#define NV2080_NOTIFIERS_NVDEC0 NV2080_NOTIFIERS_VLD +#define NV2080_NOTIFIERS_NVDEC1 (15) +#define NV2080_NOTIFIERS_NVDEC2 (16) +#define NV2080_NOTIFIERS_NVDEC3 (17) +#define NV2080_NOTIFIERS_NVDEC4 (18) +#define NV2080_NOTIFIERS_RESERVED19 (19) +#define NV2080_NOTIFIERS_RESERVED20 (20) +#define NV2080_NOTIFIERS_RESERVED21 (21) +#define NV2080_NOTIFIERS_PDEC (22) // also known as VP +#define NV2080_NOTIFIERS_CE0 (23) +#define NV2080_NOTIFIERS_CE1 (24) +#define NV2080_NOTIFIERS_CE2 (25) +#define NV2080_NOTIFIERS_CE3 (26) +#define NV2080_NOTIFIERS_CE4 (27) +#define NV2080_NOTIFIERS_CE5 (28) +#define NV2080_NOTIFIERS_CE6 (29) +#define NV2080_NOTIFIERS_CE7 (30) +#define NV2080_NOTIFIERS_CE8 (31) +#define NV2080_NOTIFIERS_CE9 (32) +#define NV2080_NOTIFIERS_PSTATE_CHANGE (33) +#define NV2080_NOTIFIERS_HDCP_STATUS_CHANGE (34) +#define NV2080_NOTIFIERS_FIFO_EVENT_MTHD (35) +#define NV2080_NOTIFIERS_PRIV_RING_HANG (36) +#define NV2080_NOTIFIERS_RC_ERROR (37) +#define NV2080_NOTIFIERS_MSENC (38) +#define NV2080_NOTIFIERS_NVENC0 NV2080_NOTIFIERS_MSENC +#define NV2080_NOTIFIERS_NVENC1 (39) +#define NV2080_NOTIFIERS_NVENC2 (40) +#define NV2080_NOTIFIERS_UNUSED_0 (41) // Unused +#define NV2080_NOTIFIERS_ACPI_NOTIFY (42) +#define NV2080_NOTIFIERS_COOLER_DIAG_ZONE (43) +#define NV2080_NOTIFIERS_THERMAL_DIAG_ZONE (44) +#define NV2080_NOTIFIERS_AUDIO_HDCP_REQUEST (45) +#define NV2080_NOTIFIERS_WORKLOAD_MODULATION_CHANGE (46) +#define NV2080_NOTIFIERS_GPIO_0_RISING_INTERRUPT (47) +#define NV2080_NOTIFIERS_GPIO_1_RISING_INTERRUPT (48) +#define NV2080_NOTIFIERS_GPIO_2_RISING_INTERRUPT (49) +#define NV2080_NOTIFIERS_GPIO_3_RISING_INTERRUPT (50) +#define NV2080_NOTIFIERS_GPIO_4_RISING_INTERRUPT (51) +#define NV2080_NOTIFIERS_GPIO_5_RISING_INTERRUPT (52) +#define NV2080_NOTIFIERS_GPIO_6_RISING_INTERRUPT (53) +#define NV2080_NOTIFIERS_GPIO_7_RISING_INTERRUPT (54) +#define NV2080_NOTIFIERS_GPIO_8_RISING_INTERRUPT (55) +#define NV2080_NOTIFIERS_GPIO_9_RISING_INTERRUPT (56) +#define NV2080_NOTIFIERS_GPIO_10_RISING_INTERRUPT (57) +#define NV2080_NOTIFIERS_GPIO_11_RISING_INTERRUPT (58) +#define NV2080_NOTIFIERS_GPIO_12_RISING_INTERRUPT (59) +#define NV2080_NOTIFIERS_GPIO_13_RISING_INTERRUPT (60) +#define NV2080_NOTIFIERS_GPIO_14_RISING_INTERRUPT (61) +#define NV2080_NOTIFIERS_GPIO_15_RISING_INTERRUPT (62) +#define NV2080_NOTIFIERS_GPIO_16_RISING_INTERRUPT (63) +#define NV2080_NOTIFIERS_GPIO_17_RISING_INTERRUPT (64) +#define NV2080_NOTIFIERS_GPIO_18_RISING_INTERRUPT (65) +#define NV2080_NOTIFIERS_GPIO_19_RISING_INTERRUPT (66) +#define NV2080_NOTIFIERS_GPIO_20_RISING_INTERRUPT (67) +#define NV2080_NOTIFIERS_GPIO_21_RISING_INTERRUPT (68) +#define NV2080_NOTIFIERS_GPIO_22_RISING_INTERRUPT (69) +#define NV2080_NOTIFIERS_GPIO_23_RISING_INTERRUPT (70) +#define NV2080_NOTIFIERS_GPIO_24_RISING_INTERRUPT (71) +#define NV2080_NOTIFIERS_GPIO_25_RISING_INTERRUPT (72) +#define NV2080_NOTIFIERS_GPIO_26_RISING_INTERRUPT (73) +#define NV2080_NOTIFIERS_GPIO_27_RISING_INTERRUPT (74) +#define NV2080_NOTIFIERS_GPIO_28_RISING_INTERRUPT (75) +#define NV2080_NOTIFIERS_GPIO_29_RISING_INTERRUPT (76) +#define NV2080_NOTIFIERS_GPIO_30_RISING_INTERRUPT (77) +#define NV2080_NOTIFIERS_GPIO_31_RISING_INTERRUPT (78) +#define NV2080_NOTIFIERS_GPIO_0_FALLING_INTERRUPT (79) +#define NV2080_NOTIFIERS_GPIO_1_FALLING_INTERRUPT (80) +#define NV2080_NOTIFIERS_GPIO_2_FALLING_INTERRUPT (81) +#define NV2080_NOTIFIERS_GPIO_3_FALLING_INTERRUPT (82) +#define NV2080_NOTIFIERS_GPIO_4_FALLING_INTERRUPT (83) +#define NV2080_NOTIFIERS_GPIO_5_FALLING_INTERRUPT (84) +#define NV2080_NOTIFIERS_GPIO_6_FALLING_INTERRUPT (85) +#define NV2080_NOTIFIERS_GPIO_7_FALLING_INTERRUPT (86) +#define NV2080_NOTIFIERS_GPIO_8_FALLING_INTERRUPT (87) +#define NV2080_NOTIFIERS_GPIO_9_FALLING_INTERRUPT (88) +#define NV2080_NOTIFIERS_GPIO_10_FALLING_INTERRUPT (89) +#define NV2080_NOTIFIERS_GPIO_11_FALLING_INTERRUPT (90) +#define NV2080_NOTIFIERS_GPIO_12_FALLING_INTERRUPT (91) +#define NV2080_NOTIFIERS_GPIO_13_FALLING_INTERRUPT (92) +#define NV2080_NOTIFIERS_GPIO_14_FALLING_INTERRUPT (93) +#define NV2080_NOTIFIERS_GPIO_15_FALLING_INTERRUPT (94) +#define NV2080_NOTIFIERS_GPIO_16_FALLING_INTERRUPT (95) +#define NV2080_NOTIFIERS_GPIO_17_FALLING_INTERRUPT (96) +#define NV2080_NOTIFIERS_GPIO_18_FALLING_INTERRUPT (97) +#define NV2080_NOTIFIERS_GPIO_19_FALLING_INTERRUPT (98) +#define NV2080_NOTIFIERS_GPIO_20_FALLING_INTERRUPT (99) +#define NV2080_NOTIFIERS_GPIO_21_FALLING_INTERRUPT (100) +#define NV2080_NOTIFIERS_GPIO_22_FALLING_INTERRUPT (101) +#define NV2080_NOTIFIERS_GPIO_23_FALLING_INTERRUPT (102) +#define NV2080_NOTIFIERS_GPIO_24_FALLING_INTERRUPT (103) +#define NV2080_NOTIFIERS_GPIO_25_FALLING_INTERRUPT (104) +#define NV2080_NOTIFIERS_GPIO_26_FALLING_INTERRUPT (105) +#define NV2080_NOTIFIERS_GPIO_27_FALLING_INTERRUPT (106) +#define NV2080_NOTIFIERS_GPIO_28_FALLING_INTERRUPT (107) +#define NV2080_NOTIFIERS_GPIO_29_FALLING_INTERRUPT (108) +#define NV2080_NOTIFIERS_GPIO_30_FALLING_INTERRUPT (109) +#define NV2080_NOTIFIERS_GPIO_31_FALLING_INTERRUPT (110) +#define NV2080_NOTIFIERS_ECC_SBE (111) +#define NV2080_NOTIFIERS_ECC_DBE (112) +#define NV2080_NOTIFIERS_STEREO_EMITTER_DETECTION (113) +#define NV2080_NOTIFIERS_GC5_GPU_READY (114) +#define NV2080_NOTIFIERS_SEC2 (115) +#define NV2080_NOTIFIERS_GC6_REFCOUNT_INC (116) +#define NV2080_NOTIFIERS_GC6_REFCOUNT_DEC (117) +#define NV2080_NOTIFIERS_POWER_EVENT (118) +#define NV2080_NOTIFIERS_CLOCKS_CHANGE (119) +#define NV2080_NOTIFIERS_HOTPLUG_PROCESSING_COMPLETE (120) +#define NV2080_NOTIFIERS_PHYSICAL_PAGE_FAULT (121) +#define NV2080_NOTIFIERS_RESERVED_122 (122) +#define NV2080_NOTIFIERS_NVLINK_ERROR_FATAL (123) +#define NV2080_NOTIFIERS_PRIV_REG_ACCESS_FAULT (124) +#define NV2080_NOTIFIERS_NVLINK_ERROR_RECOVERY_REQUIRED (125) +#define NV2080_NOTIFIERS_NVJPG (126) +#define NV2080_NOTIFIERS_NVJPEG0 NV2080_NOTIFIERS_NVJPG +#define NV2080_NOTIFIERS_RESERVED127 (127) +#define NV2080_NOTIFIERS_RESERVED128 (128) +#define NV2080_NOTIFIERS_RESERVED129 (129) +#define NV2080_NOTIFIERS_RESERVED130 (130) +#define NV2080_NOTIFIERS_RESERVED131 (131) +#define NV2080_NOTIFIERS_RESERVED132 (132) +#define NV2080_NOTIFIERS_RESERVED133 (133) +#define NV2080_NOTIFIERS_RUNLIST_AND_ENG_IDLE (134) +#define NV2080_NOTIFIERS_RUNLIST_ACQUIRE (135) +#define NV2080_NOTIFIERS_RUNLIST_ACQUIRE_AND_ENG_IDLE (136) +#define NV2080_NOTIFIERS_RUNLIST_IDLE (137) +#define NV2080_NOTIFIERS_TSG_PREEMPT_COMPLETE (138) +#define NV2080_NOTIFIERS_RUNLIST_PREEMPT_COMPLETE (139) +#define NV2080_NOTIFIERS_CTXSW_TIMEOUT (140) +#define NV2080_NOTIFIERS_INFOROM_ECC_OBJECT_UPDATED (141) +#define NV2080_NOTIFIERS_NVTELEMETRY_REPORT_EVENT (142) +#define NV2080_NOTIFIERS_DSTATE_XUSB_PPC (143) +#define NV2080_NOTIFIERS_FECS_CTX_SWITCH (144) +#define NV2080_NOTIFIERS_XUSB_PPC_CONNECTED (145) +#define NV2080_NOTIFIERS_GR0 NV2080_NOTIFIERS_GRAPHICS +#define NV2080_NOTIFIERS_GR1 (146) +#define NV2080_NOTIFIERS_GR2 (147) +#define NV2080_NOTIFIERS_GR3 (148) +#define NV2080_NOTIFIERS_GR4 (149) +#define NV2080_NOTIFIERS_GR5 (150) +#define NV2080_NOTIFIERS_GR6 (151) +#define NV2080_NOTIFIERS_GR7 (152) +#define NV2080_NOTIFIERS_OFA (153) +#define NV2080_NOTIFIERS_DSTATE_HDA (154) +#define NV2080_NOTIFIERS_POISON_ERROR_NON_FATAL (155) +#define NV2080_NOTIFIERS_POISON_ERROR_FATAL (156) +#define NV2080_NOTIFIERS_UCODE_RESET (157) +#define NV2080_NOTIFIERS_PLATFORM_POWER_MODE_CHANGE (158) +#define NV2080_NOTIFIERS_SMC_CONFIG_UPDATE (159) +#define NV2080_NOTIFIERS_INFOROM_RRL_OBJECT_UPDATED (160) +#define NV2080_NOTIFIERS_INFOROM_PBL_OBJECT_UPDATED (161) +#define NV2080_NOTIFIERS_LPWR_DIFR_PREFETCH_REQUEST (162) +#define NV2080_NOTIFIERS_SEC_FAULT_ERROR (163) +#define NV2080_NOTIFIERS_POSSIBLE_ERROR (164) +#define NV2080_NOTIFIERS_MAXCOUNT (165) + +// Indexed GR notifier reference +#define NV2080_NOTIFIERS_GR(x) ((x == 0) ? (NV2080_NOTIFIERS_GR0) : (NV2080_NOTIFIERS_GR1 + (x-1))) +#define NV2080_NOTIFIER_TYPE_IS_GR(x) (((x) == NV2080_NOTIFIERS_GR0) || (((x) >= NV2080_NOTIFIERS_GR1) && ((x) <= NV2080_NOTIFIERS_GR7))) +// Indexed CE notifier reference +#define NV2080_NOTIFIERS_CE(x) (NV2080_NOTIFIERS_CE0 + (x)) +#define NV2080_NOTIFIER_TYPE_IS_CE(x) (((x) >= NV2080_NOTIFIERS_CE0) && ((x) <= NV2080_NOTIFIERS_CE9)) +// Indexed MSENC notifier reference +#define NV2080_NOTIFIERS_NVENC(x) (NV2080_NOTIFIERS_NVENC0 + (x)) +#define NV2080_NOTIFIER_TYPE_IS_NVENC(x) (((x) >= NV2080_NOTIFIERS_NVENC0) && ((x) <= NV2080_NOTIFIERS_NVENC2)) +// Indexed NVDEC notifier reference +#define NV2080_NOTIFIERS_NVDEC(x) (NV2080_NOTIFIERS_NVDEC0 + (x)) + +#define NV2080_NOTIFIER_TYPE_IS_NVDEC(x) (((x) >= NV2080_NOTIFIERS_NVDEC0) && ((x) <= NV2080_NOTIFIERS_NVDEC4)) + +// Indexed NVJPEG notifier reference +#define NV2080_NOTIFIERS_NVJPEG(x) (NV2080_NOTIFIERS_NVJPEG0 + (x)) +#define NV2080_NOTIFIER_TYPE_IS_NVJPEG(x) (((x) >= NV2080_NOTIFIERS_NVJPEG0) && ((x) <= NV2080_NOTIFIERS_NVJPEG0)) + +#define NV2080_NOTIFIERS_GPIO_RISING_INTERRUPT(pin) (NV2080_NOTIFIERS_GPIO_0_RISING_INTERRUPT+(pin)) +#define NV2080_NOTIFIERS_GPIO_FALLING_INTERRUPT(pin) (NV2080_NOTIFIERS_GPIO_0_FALLING_INTERRUPT+(pin)) + +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_IN_PROGRESS (0x8000) +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_BAD_ARGUMENT (0x4000) +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_ERROR_INVALID_STATE (0x2000) +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_ERROR_STATE_IN_USE (0x1000) +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_DONE_SUCCESS (0x0000) + +/* exported engine defines */ +#define NV2080_ENGINE_TYPE_NULL (0x00000000) +#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001) +#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS +#define NV2080_ENGINE_TYPE_GR1 (0x00000002) +#define NV2080_ENGINE_TYPE_GR2 (0x00000003) +#define NV2080_ENGINE_TYPE_GR3 (0x00000004) +#define NV2080_ENGINE_TYPE_GR4 (0x00000005) +#define NV2080_ENGINE_TYPE_GR5 (0x00000006) +#define NV2080_ENGINE_TYPE_GR6 (0x00000007) +#define NV2080_ENGINE_TYPE_GR7 (0x00000008) +#define NV2080_ENGINE_TYPE_COPY0 (0x00000009) +#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a) +#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b) +#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c) +#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d) +#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e) +#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f) +#define NV2080_ENGINE_TYPE_COPY7 (0x00000010) +#define NV2080_ENGINE_TYPE_COPY8 (0x00000011) +#define NV2080_ENGINE_TYPE_COPY9 (0x00000012) +#define NV2080_ENGINE_TYPE_BSP (0x00000013) +#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP +#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014) +#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015) +#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016) +#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017) +#define NV2080_ENGINE_TYPE_RESERVED18 (0x00000018) +#define NV2080_ENGINE_TYPE_RESERVED19 (0x00000019) +#define NV2080_ENGINE_TYPE_RESERVED1A (0x0000001a) +#define NV2080_ENGINE_TYPE_MSENC (0x0000001b) +#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */ +#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c) +#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d) +#define NV2080_ENGINE_TYPE_VP (0x0000001e) +#define NV2080_ENGINE_TYPE_ME (0x0000001f) +#define NV2080_ENGINE_TYPE_PPP (0x00000020) +#define NV2080_ENGINE_TYPE_MPEG (0x00000021) +#define NV2080_ENGINE_TYPE_SW (0x00000022) +#define NV2080_ENGINE_TYPE_CIPHER (0x00000023) +#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER +#define NV2080_ENGINE_TYPE_VIC (0x00000024) +#define NV2080_ENGINE_TYPE_MP (0x00000025) +#define NV2080_ENGINE_TYPE_SEC2 (0x00000026) +#define NV2080_ENGINE_TYPE_HOST (0x00000027) +#define NV2080_ENGINE_TYPE_DPU (0x00000028) +#define NV2080_ENGINE_TYPE_PMU (0x00000029) +#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a) +#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b) +#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG +#define NV2080_ENGINE_TYPE_RESERVED2C (0x0000002c) +#define NV2080_ENGINE_TYPE_RESERVED2D (0x0000002d) +#define NV2080_ENGINE_TYPE_RESERVED2E (0x0000002e) +#define NV2080_ENGINE_TYPE_RESERVED2F (0x0000002f) +#define NV2080_ENGINE_TYPE_RESERVED30 (0x00000030) +#define NV2080_ENGINE_TYPE_RESERVED31 (0x00000031) +#define NV2080_ENGINE_TYPE_RESERVED32 (0x00000032) +#define NV2080_ENGINE_TYPE_OFA (0x00000033) +#define NV2080_ENGINE_TYPE_LAST (0x00000034) +#define NV2080_ENGINE_TYPE_ALLENGINES (0xffffffff) + +#define NV2080_ENGINE_TYPE_COPY_SIZE 10 +#define NV2080_ENGINE_TYPE_NVENC_SIZE 3 + +#define NV2080_ENGINE_TYPE_NVJPEG_SIZE 1 + +#define NV2080_ENGINE_TYPE_NVDEC_SIZE 5 + +#define NV2080_ENGINE_TYPE_GR_SIZE 8 + +// Indexed engines +#define NV2080_ENGINE_TYPE_COPY(i) (NV2080_ENGINE_TYPE_COPY0+(i)) +#define NV2080_ENGINE_TYPE_IS_COPY(i) (((i) >= NV2080_ENGINE_TYPE_COPY0) && ((i) < NV2080_ENGINE_TYPE_COPY(NV2080_ENGINE_TYPE_COPY_SIZE))) +#define NV2080_ENGINE_TYPE_COPY_IDX(i) ((i) - NV2080_ENGINE_TYPE_COPY0) + +#define NV2080_ENGINE_TYPE_NVENC(i) (NV2080_ENGINE_TYPE_NVENC0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVENC(i) (((i) >= NV2080_ENGINE_TYPE_NVENC0) && ((i) < NV2080_ENGINE_TYPE_NVENC(NV2080_ENGINE_TYPE_NVENC_SIZE))) +#define NV2080_ENGINE_TYPE_NVENC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVENC0) + +#define NV2080_ENGINE_TYPE_NVDEC(i) (NV2080_ENGINE_TYPE_NVDEC0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVDEC(i) (((i) >= NV2080_ENGINE_TYPE_NVDEC0) && ((i) < NV2080_ENGINE_TYPE_NVDEC(NV2080_ENGINE_TYPE_NVDEC_SIZE))) +#define NV2080_ENGINE_TYPE_NVDEC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVDEC0) + +#define NV2080_ENGINE_TYPE_NVJPEG(i) (NV2080_ENGINE_TYPE_NVJPEG0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVJPEG(i) (((i) >= NV2080_ENGINE_TYPE_NVJPEG0) && ((i) < NV2080_ENGINE_TYPE_NVJPEG(NV2080_ENGINE_TYPE_NVJPEG_SIZE))) +#define NV2080_ENGINE_TYPE_NVJPEG_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVJPEG0) + +#define NV2080_ENGINE_TYPE_GR(i) (NV2080_ENGINE_TYPE_GR0 + (i)) +#define NV2080_ENGINE_TYPE_IS_GR(i) (((i) >= NV2080_ENGINE_TYPE_GR0) && ((i) < NV2080_ENGINE_TYPE_GR(NV2080_ENGINE_TYPE_GR_SIZE))) +#define NV2080_ENGINE_TYPE_GR_IDX(i) ((i) - NV2080_ENGINE_TYPE_GR0) + +#define NV2080_ENGINE_TYPE_IS_VALID(i) (((i) > (NV2080_ENGINE_TYPE_NULL)) && ((i) < (NV2080_ENGINE_TYPE_LAST))) + +/* exported client defines */ +#define NV2080_CLIENT_TYPE_TEX (0x00000001) +#define NV2080_CLIENT_TYPE_COLOR (0x00000002) +#define NV2080_CLIENT_TYPE_DEPTH (0x00000003) +#define NV2080_CLIENT_TYPE_DA (0x00000004) +#define NV2080_CLIENT_TYPE_FE (0x00000005) +#define NV2080_CLIENT_TYPE_SCC (0x00000006) +#define NV2080_CLIENT_TYPE_WID (0x00000007) +#define NV2080_CLIENT_TYPE_MSVLD (0x00000008) +#define NV2080_CLIENT_TYPE_MSPDEC (0x00000009) +#define NV2080_CLIENT_TYPE_MSPPP (0x0000000a) +#define NV2080_CLIENT_TYPE_VIC (0x0000000b) +#define NV2080_CLIENT_TYPE_ALLCLIENTS (0xffffffff) + +/* GC5 Gpu Ready event defines */ +#define NV2080_GC5_EXIT_COMPLETE (0x00000001) +#define NV2080_GC5_ENTRY_ABORTED (0x00000002) + +/* Platform Power Mode event defines */ +#define NV2080_PLATFORM_POWER_MODE_CHANGE_COMPLETION (0x00000000) +#define NV2080_PLATFORM_POWER_MODE_CHANGE_ACPI_NOTIFICATION (0x00000001) + +/* NvNotification[] fields and values */ +#define NV2080_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl2080_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv2080Typedef, Nv20Subdevice0; +#define NV2080_TYPEDEF Nv20Subdevice0 + +/* NvAlloc parameteters */ +#define NV2080_MAX_SUBDEVICES NV_MAX_SUBDEVICES +typedef struct { + NvU32 subDeviceId; +} NV2080_ALLOC_PARAMETERS; + +/* HDCP Status change notification information */ +typedef struct Nv2080HdcpStatusChangeNotificationRec { + NvU32 displayId; + NvU32 hdcpStatusChangeNotif; +} Nv2080HdcpStatusChangeNotification; + +/* Pstate change notification information */ +typedef struct Nv2080PStateChangeNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 NewPstate; +} Nv2080PStateChangeNotification; + +/* Clocks change notification information */ +typedef struct Nv2080ClocksChangeNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ +} Nv2080ClocksChangeNotification; + +/* WorkLoad Modulation state change notification information*/ +typedef struct Nv2080WorkloadModulationChangeNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvBool WorkloadModulationEnabled; +} Nv2080WorkloadModulationChangeNotification; + +/* Hotplug notification information */ +typedef struct { + NvU32 plugDisplayMask; + NvU32 unplugDisplayMask; +} Nv2080HotplugNotification; + +/* Power state changing notification information */ +typedef struct { + NvBool bSwitchToAC; + NvBool bGPUCapabilityChanged; + NvU32 displayMaskAffected; +} Nv2080PowerEventNotification; + +/* DP IRQ notification information */ +typedef struct Nv2080DpIrqNotificationRec { + NvU32 displayId; +} Nv2080DpIrqNotification; + +/* XUSB/PPC D-State change notification information */ +typedef struct Nv2080DstateXusbPpcNotificationRec { + NvU32 dstateXusb; + NvU32 dstatePpc; +} Nv2080DstateXusbPpcNotification; + +/* XUSB/PPC Connection status notification information */ +typedef struct Nv2080XusbPpcConnectStateNotificationRec { + NvBool bConnected; +} Nv2080XusbPpcConnectStateNotification; + +/* ACPI event notification information */ +typedef struct Nv2080ACPIEvent { + NvU32 event; +} Nv2080ACPIEvent; + +/* Cooler Zone notification information */ +typedef struct _NV2080_COOLER_DIAG_ZONE_NOTIFICATION_REC { + NvU32 currentZone; +} NV2080_COOLER_DIAG_ZONE_NOTIFICATION_REC; + +/* Thermal Zone notification information */ +typedef struct _NV2080_THERM_DIAG_ZONE_NOTIFICATION_REC { + NvU32 currentZone; +} NV2080_THERM_DIAG_ZONE_NOTIFICATION_REC; + +/* HDCP ref count change notification information */ +typedef struct Nv2080AudioHdcpRequestRec { + NvU32 displayId; + NvU32 requestedState; +} Nv2080AudioHdcpRequest; + +/* Gpu ready event information */ +typedef struct Nv2080GC5GpuReadyParams { + NvU32 event; + NvU32 sciIntr0; + NvU32 sciIntr1; +} Nv2080GC5GpuReadyParams; + +/* Priv reg access fault notification information */ +typedef struct { + NvU32 errAddr; +} Nv2080PrivRegAccessFaultNotification; + +/* HDA D-State change notification information + * See @HDACODEC_DSTATE for definitions + */ +typedef struct Nv2080DstateHdaCodecNotificationRec { + NvU32 dstateHdaCodec; +} Nv2080DstateHdaCodecNotification; + +/* + * Platform Power Mode event information + */ +typedef struct _NV2080_PLATFORM_POWER_MODE_CHANGE_STATUS { + NvU8 platformPowerModeIndex; + NvU8 platformPowerModeMask; + NvU8 eventReason; +} NV2080_PLATFORM_POWER_MODE_CHANGE_STATUS; + +#define NV2080_PLATFORM_POWER_MODE_CHANGE_INFO_INDEX 7:0 +#define NV2080_PLATFORM_POWER_MODE_CHANGE_INFO_MASK 15:8 +#define NV2080_PLATFORM_POWER_MODE_CHANGE_INFO_REASON 23:16 + +/* + * ENGINE_INFO_TYPE_NV2080 of the engine for which the QOS interrupt has been raised + */ +typedef struct { + NvU32 engineType; +} Nv2080QosIntrNotification; + +typedef struct { + NvU64 physAddress NV_ALIGN_BYTES(8); +} Nv2080EccDbeNotification; + +/* + * LPWR DIFR Prefetch Request - Size of L2 Cache + */ +typedef struct { + NvU32 l2CacheSize; +} Nv2080LpwrDifrPrefetchNotification; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl2080_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2081.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2081.h new file mode 100644 index 0000000..e1301bd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2081.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl2081_h_ +#define _cl2081_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV2081_BINAPI (0x00002081) + +typedef struct{ + NvU32 reserved; +}NV2081_ALLOC_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2082.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2082.h new file mode 100644 index 0000000..8beda61 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2082.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl2082_h_ +#define _cl2082_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV2082_BINAPI_PRIVILEGED (0x00002082) + +typedef struct{ + NvU32 reserved; +}NV2082_ALLOC_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1.h new file mode 100644 index 0000000..2a8e3c9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1.h @@ -0,0 +1,56 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl30f1.finn +// + +#include "cl30f1_notification.h" + +/* class NV30_GSYNC */ +#define NV30_GSYNC (0x000030F1) + +#define NV30F1_GSYNC_CONNECTOR_ONE (0) +#define NV30F1_GSYNC_CONNECTOR_TWO (1) +#define NV30F1_GSYNC_CONNECTOR_THREE (2) +#define NV30F1_GSYNC_CONNECTOR_FOUR (3) + +#define NV30F1_GSYNC_CONNECTOR_PRIMARY NV30F1_GSYNC_CONNECTOR_ONE +#define NV30F1_GSYNC_CONNECTOR_SECONDARY NV30F1_GSYNC_CONNECTOR_TWO + +#define NV30F1_GSYNC_CONNECTOR_COUNT (4) + +/* NvRmAlloc parameters */ +#define NV30F1_MAX_GSYNCS (0x0000004) + +#define NV30F1_ALLOC_PARAMETERS_MESSAGE_ID (0x30f1U) + +typedef struct NV30F1_ALLOC_PARAMETERS { + NvU32 gsyncInstance; +} NV30F1_ALLOC_PARAMETERS; + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1_notification.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1_notification.h new file mode 100644 index 0000000..290eb5c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1_notification.h @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl30f1_notification_h_ +#define _cl30f1_notification_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * A client should use NV01_EVENT_OS_EVENT as hClass and NV30F1_GSYNC_NOTIFIERS_* as + * notify index when allocating event, if separate event notifications are needed for + * separate event types. + * + * A client should use NV01_EVENT_KERNEL_CALLBACK as hClass and + * NV30F1_GSYNC_NOTIFIERS_ALL as notify index, if a single event is required. + * In this case RM would set event data equal to a pointer to NvNotification structure. + * The info32 field of NvNotification structure would be equal a bitmask of + * NV30F1_GSYNC_NOTIFIERS_* values. + */ + +/* NvNotification[] fields and values */ + +/* Framelock sync gain and loss events. These are connector specific events. */ +#define NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(c) (0x00+(c)) +#define NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(c) (0x04+(c)) + +/* Framelock stereo gain and loss events. These are connector specific events. */ +#define NV30F1_GSYNC_NOTIFIERS_STEREO_LOSS(c) (0x08+(c)) +#define NV30F1_GSYNC_NOTIFIERS_STEREO_GAIN(c) (0x0C+(c)) + +/* House cable gain(plug in) and loss(plug out) events. */ +#define NV30F1_GSYNC_NOTIFIERS_HOUSE_GAIN (0x10) +#define NV30F1_GSYNC_NOTIFIERS_HOUSE_LOSS (0x11) + +/* RJ45 cable gain(plug in) and loss(plug out) events. */ +#define NV30F1_GSYNC_NOTIFIERS_RJ45_GAIN (0x12) +#define NV30F1_GSYNC_NOTIFIERS_RJ45_LOSS (0x13) + +#define NV30F1_GSYNC_NOTIFIERS_MAXCOUNT (0x14) + +/* + * For handling all event types. + * Note for Windows, it only handles NV01_EVENT_KERNEL_CALLBACK_EX; as for NV01_EVENT_OS_EVENT, it can only + * signal an event but not handle over any information. + */ +#define NV30F1_GSYNC_NOTIFIERS_ALL NV30F1_GSYNC_NOTIFIERS_MAXCOUNT + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl30f1_notification_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl402c.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl402c.h new file mode 100644 index 0000000..9ba7c3b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl402c.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl402c_h_ +#define _cl402c_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* I2C operations */ +#define NV40_I2C (0x0000402c) + +typedef volatile struct _cl402c_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv402cTypedef, Nv40I2c; +#define NV402C_TYPEDEF Nv40I2c + + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl402c_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070.h new file mode 100644 index 0000000..d8a7f2d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl5070.finn +// + +#include "cl5070_notification.h" + +#define NV50_DISPLAY (0x00005070) + +#define NV5070_ALLOCATION_PARAMETERS_MESSAGE_ID (0x5070U) + +typedef struct NV5070_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV5070_ALLOCATION_PARAMETERS; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070_notification.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070_notification.h new file mode 100644 index 0000000..fd0628a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070_notification.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl5070_notification_h_ +#define _cl5070_notification_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* event values */ +#define NV5070_NOTIFIERS_SW (0) +#define NV5070_NOTIFIERS_MAXCOUNT (1) + +#define NV5070_NOTIFICATION_STATUS_IN_PROGRESS (0x8000) +#define NV5070_NOTIFICATION_STATUS_BAD_ARGUMENT (0x4000) +#define NV5070_NOTIFICATION_STATUS_ERROR_INVALID_STATE (0x2000) +#define NV5070_NOTIFICATION_STATUS_ERROR_STATE_IN_USE (0x1000) +#define NV5070_NOTIFICATION_STATUS_DONE_SUCCESS (0x0000) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl5070_notification_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl84a0.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl84a0.h new file mode 100644 index 0000000..599a47b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl84a0.h @@ -0,0 +1,158 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl84A0_h_ +#define _cl84A0_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* + * Class definitions for creating a memory descriptor from a list of page numbers + * in RmAllocMemory. No memory is allocated: only a memory descriptor and + * memory object are created for later use in other calls. These classes + * are used by vGPU to create references to memory assigned to a guest VM. + * In all cases, the list is passed as reference, in the pAddress argument + * of RmAllocMemory, to a Nv01MemoryList structure (cast to a void **). + */ + +/* List of system memory physical page numbers */ +#define NV01_MEMORY_LIST_SYSTEM (0x00000081) +/* List of frame buffer physical page numbers */ +#define NV01_MEMORY_LIST_FBMEM (0x00000082) +/* List of page numbers relative to the start of the specified object */ +#define NV01_MEMORY_LIST_OBJECT (0x00000083) + +/* + * List structure of NV01_MEMORY_LIST_* classes + * + * The pageNumber array is variable in length, with pageCount elements, + * so the allocated size of the structure must reflect that. + * + * FBMEM items apply only to NV01_MEMORY_LIST_FBMEM and to + * NV01_MEMORY_LIST_OBJECT when the underlying object is + * FBMEM (must be zero for other cases) + * + * Nv01MemoryList is deprecated. NV_MEMORY_LIST_ALLOCATION_PARAMS should be used + * instead. + */ +typedef struct Nv01MemoryListRec { + NvHandle hClient; /* client to which object belongs + * (may differ from client creating the mapping). + * May be NV01_NULL_OBJECT, in which case client + * handle is used */ + NvHandle hParent; /* device with which object is associated. + * Must be NV01_NULL_OBJECT if hClient is NV01_NULL_OBJECT. + * Must not be NV01_NULL_OBJECT if hClient is + * not NV01_NULL_OBJECT. */ + NvHandle hObject; /* object to which pages are relative + * (NV01_NULL_OBJECT for NV01_MEMORY_LIST_SYSTEM + * and NV01_MEMORY_LIST_FBMEM) */ + NvHandle hHwResClient;/* client associated with the backdoor vnc surface*/ + NvHandle hHwResDevice;/* device associated to the bacdoor vnc surface*/ + NvHandle hHwResHandle;/* handle to hardware resources allocated to + * backdoor vnc surface*/ + NvU32 pteAdjust; /* offset of data in first page */ + NvU32 type; /* FBMEM: NVOS32_TYPE_* */ + NvU32 flags; /* FBMEM: NVOS32_ALLOC_FLAGS_* */ + NvU32 attr; /* FBMEM: NVOS32_ATTR_* */ + NvU32 attr2; /* FBMEM: NVOS32_ATTR2_* */ + NvU32 height; /* FBMEM: height in pixels */ + NvU32 width; /* FBMEM: width in pixels */ + NvU32 format; /* FBMEM: memory kind */ + NvU32 comprcovg; /* FBMEM: compression coverage */ + NvU32 zcullcovg; /* FBMEM: Z-cull coverage */ + NvU32 pageCount; /* count of elements in pageNumber array */ + NvU32 heapOwner; /* heap owner information from client */ + NvU32 reserved_1; /* reserved: must be 0 */ + NvU64 NV_DECLARE_ALIGNED(guestId,8); + /* ID of the guest VM. e.g., domain ID in case of Xen */ + NvU64 NV_DECLARE_ALIGNED(rangeBegin,8); + /* preferred VA range start address */ + NvU64 NV_DECLARE_ALIGNED(rangeEnd,8); + /* preferred VA range end address */ + NvU32 pitch; + NvU32 ctagOffset; + NvU64 size; + NvU64 align; + NvU64 pageNumber[1]; /* variable length array of page numbers */ +} Nv01MemoryList; + +/* + * NV_MEMORY_LIST_ALLOCATION_PARAMS - Allocation params to create memory list + * through NvRmAlloc. + */ +typedef struct +{ + NvHandle hClient; /* client to which object belongs + * (may differ from client creating the mapping). + * May be NV01_NULL_OBJECT, in which case client + * handle is used */ + NvHandle hParent; /* device with which object is associated. + * Must be NV01_NULL_OBJECT if hClient is NV01_NULL_OBJECT. + * Must not be NV01_NULL_OBJECT if hClient is + * not NV01_NULL_OBJECT. */ + NvHandle hObject; /* object to which pages are relative + * (NV01_NULL_OBJECT for NV01_MEMORY_LIST_SYSTEM + * and NV01_MEMORY_LIST_FBMEM) */ + NvHandle hHwResClient;/* client associated with the backdoor vnc surface*/ + NvHandle hHwResDevice;/* device associated to the bacdoor vnc surface*/ + NvHandle hHwResHandle;/* handle to hardware resources allocated to + * backdoor vnc surface*/ + NvU32 pteAdjust; /* offset of data in first page */ + NvU32 reserved_0; /* reserved: must be 0 */ + NvU32 type; /* FBMEM: NVOS32_TYPE_* */ + NvU32 flags; /* FBMEM: NVOS32_ALLOC_FLAGS_* */ + NvU32 attr; /* FBMEM: NVOS32_ATTR_* */ + NvU32 attr2; /* FBMEM: NVOS32_ATTR2_* */ + NvU32 height; /* FBMEM: height in pixels */ + NvU32 width; /* FBMEM: width in pixels */ + NvU32 format; /* FBMEM: memory kind */ + NvU32 comprcovg; /* FBMEM: compression coverage */ + NvU32 zcullcovg; /* FBMEM: Z-cull coverage */ + NvU32 pageCount; /* count of elements in pageNumber array */ + NvU32 heapOwner; /* heap owner information from client */ + + NvU64 NV_DECLARE_ALIGNED(guestId,8); + /* ID of the guest VM. e.g., domain ID in case of Xen */ + NvU64 NV_DECLARE_ALIGNED(rangeBegin,8); + /* preferred VA range start address */ + NvU64 NV_DECLARE_ALIGNED(rangeEnd,8); + /* preferred VA range end address */ + NvU32 pitch; + NvU32 ctagOffset; + NvU64 size; + NvU64 align; + NvP64 pageNumberList NV_ALIGN_BYTES(8); + NvU64 limit NV_ALIGN_BYTES(8); + NvU32 flagsOs02; +} NV_MEMORY_LIST_ALLOCATION_PARAMS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl84A0_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl900e.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl900e.h new file mode 100644 index 0000000..6684889 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl900e.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl900e_h_ +#define _cl900e_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define MPS_COMPUTE (0x0000900E) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl900e_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9010.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9010.h new file mode 100644 index 0000000..1ec334b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9010.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef SDK_CL9010_H +#define SDK_CL9010_H + +#include "nvtypes.h" + +#define NV9010_VBLANK_CALLBACK 0x9010 + +typedef void (*OSVBLANKCALLBACKPROC)(void * pParm1, void * pParm2); + +typedef struct +{ + OSVBLANKCALLBACKPROC pProc; // Routine to call at vblank time + NvV32 LogicalHead; // Logical Head + void *pParm1; // pParm1 + void *pParm2; // pParm2 +} NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS; + +#endif // SDK_CL9010_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl907dswspare.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl907dswspare.h new file mode 100644 index 0000000..ee9ddaa --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl907dswspare.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2010-2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl907d_sw_spare_h_ +#define _cl907d_sw_spare_h_ + +/* This file is *not* auto-generated. */ + +#define NV907D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF 1:0 +#define NV907D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_NO_PREF (0x00000000) +#define NV907D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_GSYNC (0x00000001) + +#define NV907D_PIOR_SET_SW_SPARE_A_CODE_FOR_LOCK_SIGNAL_PROPAGATION_ONLY 1:0 +#define NV907D_PIOR_SET_SW_SPARE_A_CODE_FOR_LOCK_SIGNAL_PROPAGATION_ONLY_FALSE (0x00000000) +#define NV907D_PIOR_SET_SW_SPARE_A_CODE_FOR_LOCK_SIGNAL_PROPAGATION_ONLY_TRUE (0x00000001) + +#endif // _cl907d_sw_spare_h_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90cd.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90cd.h new file mode 100644 index 0000000..2b8c2e5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90cd.h @@ -0,0 +1,244 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl90cd_h_ +#define _cl90cd_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* +* NV_EVENT_BUFFER +* An event buffer is shared between user (RO) and kernel(RW). +* It holds debug/profile event data provided by the kernel. +* +*/ +#define NV_EVENT_BUFFER (0x000090CD) + +/* +* NV_EVENT_BUFFER_HEADER +* This structure holds the get and put values used to index/consume event buffer. +* Along with other RO data shared with the user. +* +* recordGet/Put: These "pointers" work in the traditional sense: +* - when GET==PUT, the fifo is empty +* - when GET==PUT+1, the fifo is full +* This implies a full fifo always has one "wasted" element. +* +* recordCount: This is the total number of records added to the buffer by the kernel +* This information is filled out when the buffer is setup to keep newest records. +* recordCount = number of records currently in the buffer + overflow count. +* +* recordDropcount: This is the number of event records that are dropped because the +* buffer is full. +* This information is filled out when event buffer is setup to keep oldest records. +* +* vardataDropcount: Event buffer provides a dual stream of data, where the record can contain +* an optional offset to a variable length data buffer. +* This is the number of variable data records that are dropped because the +* buffer is full. +* This information is filled out when event buffer is setup to keep oldest records. +*/ +typedef struct +{ + NvU32 recordGet; + NvU32 recordPut; + NvU64 recordCount; + NvU64 recordDropcount; + NvU64 vardataDropcount; +} NV_EVENT_BUFFER_HEADER; + +/* +* NV_EVENT_BUFFER_RECORD_HEADER +* This is the header added to each event record. +* This helps identify the event type and variable length data is associated with it. +*/ +typedef struct +{ + NvU16 type; + NvU16 subtype; + NvU32 varData; // [31: 5] = (varDataOffset >> 5); 0 < vardataOffset <= vardataBufferSize + // [ 4: 1] = reserved for future use + // [ 0: 0] = isVardataStartOffsetZero +} NV_EVENT_BUFFER_RECORD_HEADER; + +/* +* NV_EVENT_BUFFER_RECORD +* This structure defines a generic event record. +* The size of this record is fixed for a given event buffer. +* It is configured by the user during allocation. +*/ +typedef struct +{ + NV_EVENT_BUFFER_RECORD_HEADER recordHeader; + NvU64 inlinePayload[1] NV_ALIGN_BYTES(8); // 1st element of the payload/data + // Do not add more elements here, inlinePayload can contain more than one elements +} NV_EVENT_BUFFER_RECORD; + +#define NV_EVENT_VARDATA_GRANULARITY 32 +#define NV_EVENT_VARDATA_OFFSET_MASK (~(NV_EVENT_VARDATA_GRANULARITY - 1)) +#define NV_EVENT_VARDATA_START_OFFSET_ZERO 0x01 + +/* +* NV_EVENT_BUFFER_ALLOC_PARAMETERS +* +* bufferHeader [OUT] +* This is the user VA offset pointing to the base of NV_EVENT_BUFFER_HEADER. +* +* recordBuffer [OUT] +* This is the user VA offset pointing to the base of the event record buffer. +* This buffer will contain NV_EVENT_BUFFER_RECORDs added by the kernel. +* +* recordSize [IN] +* This is the size of NV_EVENT_BUFFER_RECORD used by this buffer +* +* recordCount [IN] +* This is the number of records that recordBuffer can hold. +* +* vardataBuffer [OUT] +* This is the user VA offset pointing to the base of the variable data buffer. +* +* vardataBufferSize [IN] +* Size of the variable data buffer in bytes. +* +* recordsFreeThreshold [IN] +* This is the notification threshold for the event record buffer. +* This felid specifies the number of records that the buffer can +* still hold before it gets full. +* +* vardataFreeThreshold [IN] +* This is the notification threshold for the vardata buffer. +* This felid specifies the number of bytes that the buffer can +* still hold before it gets full. +* +* notificationHandle [IN] +* When recordsFreeThreshold or vardataFreeThreshold is met, kernel will notify +* user on this handle. If notificationHandle = NULL, event notification +* is disabled. This is an OS specific notification handle. +* It is a Windows event handle or a fd pointer on Linux. +* +* hSubDevice [IN] +* An event buffer can either hold sub-device related events or system events. +* This handle specifies the sub-device to associate this buffer with. +* If this parameter is NULL, then the buffer is tied to the client instead. +* +* flags [IN] +* Set to 0 by default. +* This field can hold any future flags to configure the buffer if needed. +* +* hBufferHeader [IN] +* The backing memory object for the buffer header. Must be a NV01_MEMORY_DEVICELESS object. +* On Windows platforms, a buffer will be internally generated if hBufferHeader is 0. +* +* hRecordBuffer [IN] +* The backing memory object for the record buffer. Must be a NV01_MEMORY_DEVICELESS object. +* On Windows platforms, a buffer will be internally generated if hRecordBuffer is 0. +* +* hVardataBuffer [IN] +* The backing memory object for the vardata buffer. Must be a NV01_MEMORY_DEVICELESS object. +* On Windows platforms, a buffer will be internally generated if hVardataBuffer is 0. +* +*/ +typedef struct +{ + NvP64 bufferHeader NV_ALIGN_BYTES(8); + NvP64 recordBuffer NV_ALIGN_BYTES(8); + NvU32 recordSize; + NvU32 recordCount; + NvP64 vardataBuffer NV_ALIGN_BYTES(8); + NvU32 vardataBufferSize; + NvU32 recordsFreeThreshold; + NvU64 notificationHandle NV_ALIGN_BYTES(8); + NvU32 vardataFreeThreshold; + NvHandle hSubDevice; + NvU32 flags; + + NvHandle hBufferHeader; + NvHandle hRecordBuffer; + NvHandle hVardataBuffer; +} NV_EVENT_BUFFER_ALLOC_PARAMETERS; + +/* +* NV_EVENT_BUFFER_BIND +* This class is used to allocate an Event Type object bound to a given event buffer. +* This allocation call associates an event type with an event buffer. +* Multiple event types can be associated with the same buffer as long as they belong to +* the same category i.e. either sub-device or system. +* When event buffer is enabled, if an event bound to this buffer occurs, +* some relevant data gets added to it. +* cl2080.h has a list of sub-device events that can be associated with a buffer +* cl0000.h has a list of system events that can be associated with a buffer +* These defines are also used in class NV01_EVENT_OS_EVENT (0x79) to get event notification +* and class NV01_EVENT_KERNEL_CALLBACK_EX (0x7E) to get kernel callbacks. +* This class extends that support to additionally get relevant data in an event buffer +* +*/ +#define NV_EVENT_BUFFER_BIND (0x0000007F) + +/* +* NV_EVENT_BUFFER_BIND_PARAMETERS +* +* bufferHandle [IN] +* Event buffer handle used to bind the given event type +* +* eventType [IN] +* This is one of the eventTypeIDs from cl2080.h/cl000.h +* e.g. NV2080_NOTIFIERS_PSTATE_CHANGE +* +* eventSubtype [IN] +* Event subtype for a given type of event. +* This field is optional depending on if an eventtype has a subtype. +* +* hClientTarget [IN] +* Handle of the target client whose events are to be bound to the given buffer +* e.g. context switch events can be tracked for a given client. +* This field is optional depending on the event type. +* e.g. pstate change events are per gpu but do not depend on a client. +* +* hSrcResource [IN] +* source resource handle for the event type +* e.g. channel handle: RC/context switch can be tracked for a given channel +* This field is optional depending on the event type. +* e.g. pstate change events are per gpu and cannot be sub-categorized +* +* KernelCallbackdata [IN] +* This field is reserved for KERNEL ONLY clients. +* +*/ +typedef struct +{ + NvHandle bufferHandle; + NvU16 eventType; + NvU16 eventSubtype; + NvHandle hClientTarget; + NvHandle hSrcResource; + NvP64 KernelCallbackdata NV_ALIGN_BYTES(8); +} NV_EVENT_BUFFER_BIND_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl90cd_h_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90ec.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90ec.h new file mode 100644 index 0000000..182640a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90ec.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl90ec_h_ +#define _cl90ec_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* Class within the subdevice used for communicating with HDACODEC*/ +#define GF100_HDACODEC (0x000090EC) + + /* pio method data structure */ +typedef volatile struct _cl90ec_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv90ECTypedef, GF100Hdacodec; +#define NV90EC_TYPEDEF GF100Hdacodec + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl9071_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90f1.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90f1.h new file mode 100644 index 0000000..7523ef1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90f1.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _cl90f1_h_ +#define _cl90f1_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define FERMI_VASPACE_A (0x000090f1) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl90f1_h + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9170.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9170.h new file mode 100644 index 0000000..9f774a6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9170.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl9170.finn +// + +#define NV9170_DISPLAY (0x00009170) + +#define NV9170_ALLOCATION_PARAMETERS_MESSAGE_ID (0x9170U) + +typedef struct NV9170_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9170_ALLOCATION_PARAMETERS; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9171.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9171.h new file mode 100644 index 0000000..dcbd90b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9171.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9171_h_ +#define _cl9171_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV9171_DISP_SF_USER 0x9171 + +typedef volatile struct _cl9171_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x00690FFF:0x00690000 */ +} _Nv9171DispSfUser, Nv9171DispSfUserMap; + +#define NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NV9171_SF_HDMI_INFO_IDX_GCP 0x00000003 /* */ +#define NV9171_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NV9171_SF_HDMI_INFO_CTRL(i,j) (0x00690000-0x00690000+(i)*1024+(j)*64) /* RWX4A */ +#define NV9171_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_STATUS(i,j) (0x00690004-0x00690000+(i)*1024+(j)*64) /* R--4A */ +#define NV9171_SF_HDMI_INFO_STATUS__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_INFO_STATUS__SIZE_2 5 /* */ +#define NV9171_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NV9171_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9171_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9171_SF_HDMI_INFO_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x00690000-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x00690008-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x0069000C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x00690010-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x00690014-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x00690018-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_HEADER(i) (0x00690048-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_HEADER__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW(i) (0x0069004C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH(i) (0x00690050-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW(i) (0x00690054-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH(i) (0x00690058-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW(i) (0x0069005C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH(i) (0x00690060-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW(i) (0x00690064-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH(i) (0x00690068-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GCP_SUBPACK(i) (0x006900CC-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GCP_SUBPACK__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_HEADER(i) (0x00690108-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_HEADER__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_HEADER_HB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_HEADER_HB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_HEADER_HB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW(i) (0x0069010C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH(i) (0x00690110-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW(i) (0x00690114-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH(i) (0x00690118-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW(i) (0x0069011C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH(i) (0x00690120-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW(i) (0x00690124-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH(i) (0x00690128-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl9171_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917a.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917a.h new file mode 100644 index 0000000..b6d6239 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917a.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917a_h_ +#define _cl917a_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917A_CURSOR_CHANNEL_PIO (0x0000917A) + +typedef volatile struct { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x1D]; + NvV32 Update; // 0x00000080 - 0x00000083 + NvV32 SetCursorHotSpotPointsOut[2]; // 0x00000084 - 0x0000008B + NvV32 Reserved02[0x3DD]; +} GK104DispCursorControlPio; + +#define NV917A_FREE (0x00000008) +#define NV917A_FREE_COUNT 5:0 +#define NV917A_UPDATE (0x00000080) +#define NV917A_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV917A_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV917A_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV917A_SET_CURSOR_HOT_SPOT_POINTS_OUT(b) (0x00000084 + (b)*0x00000004) +#define NV917A_SET_CURSOR_HOT_SPOT_POINTS_OUT_X 15:0 +#define NV917A_SET_CURSOR_HOT_SPOT_POINTS_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917a_h + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917b.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917b.h new file mode 100644 index 0000000..05270e2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917b.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917b_h_ +#define _cl917b_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917B_OVERLAY_IMM_CHANNEL_PIO (0x0000917B) + +typedef volatile struct { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x1D]; + NvV32 Update; // 0x00000080 - 0x00000083 + NvV32 SetPointsOut[2]; // 0x00000084 - 0x0000008B + NvV32 Reserved02[0x1]; + NvV32 AwakenOnceFlippedTo; // 0x00000090 - 0x00000093 + NvV32 Reserved03[0x3DB]; +} GK104DispOverlayImmControlPio; + +#define NV917B_FREE (0x00000008) +#define NV917B_FREE_COUNT 5:0 +#define NV917B_UPDATE (0x00000080) +#define NV917B_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV917B_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV917B_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV917B_SET_POINTS_OUT(b) (0x00000084 + (b)*0x00000004) +#define NV917B_SET_POINTS_OUT_X 15:0 +#define NV917B_SET_POINTS_OUT_Y 31:16 +#define NV917B_AWAKEN_ONCE_FLIPPED_TO (0x00000090) +#define NV917B_AWAKEN_ONCE_FLIPPED_TO_AWAKEN_COUNT 11:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917b_h + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917c.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917c.h new file mode 100644 index 0000000..2b7c6a2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917c.h @@ -0,0 +1,298 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917c_h_ +#define _cl917c_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917C_BASE_CHANNEL_DMA (0x0000917C) + +#define NV_DISP_BASE_NOTIFIER_1 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1_SIZEOF 0x00000004 +#define NV_DISP_BASE_NOTIFIER_1__0 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1__0_PRESENTATION_COUNT 15:0 +#define NV_DISP_BASE_NOTIFIER_1__0_TIMESTAMP 29:16 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS 31:30 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_FINISHED 0x00000002 + + +#define NV_DISP_NOTIFICATION_2 0x00000000 +#define NV_DISP_NOTIFICATION_2_SIZEOF 0x00000010 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0 0x00000000 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0_NANOSECONDS0 31:0 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1 0x00000001 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1_NANOSECONDS1 31:0 +#define NV_DISP_NOTIFICATION_2_INFO32_2 0x00000002 +#define NV_DISP_NOTIFICATION_2_INFO32_2_R0 31:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3 0x00000003 +#define NV_DISP_NOTIFICATION_2_INFO16_3_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3_FIELD 8:8 +#define NV_DISP_NOTIFICATION_2_INFO16_3_R1 15:9 +#define NV_DISP_NOTIFICATION_2__3_STATUS 31:16 +#define NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED 0x00000000 + + +#define NV_DISP_NOTIFICATION_INFO16 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_INFO16__0 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_INFO16__0_FIELD 8:8 +#define NV_DISP_NOTIFICATION_INFO16__0_R1 15:9 + + +#define NV_DISP_NOTIFICATION_STATUS 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_STATUS__0 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS 15:0 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_FINISHED 0x00000000 + + +// dma opcode instructions +#define NV917C_DMA 0x00000000 +#define NV917C_DMA_OPCODE 31:29 +#define NV917C_DMA_OPCODE_METHOD 0x00000000 +#define NV917C_DMA_OPCODE_JUMP 0x00000001 +#define NV917C_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917C_DMA_OPCODE 31:29 +#define NV917C_DMA_OPCODE_METHOD 0x00000000 +#define NV917C_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917C_DMA_METHOD_COUNT 27:18 +#define NV917C_DMA_METHOD_OFFSET 11:2 +#define NV917C_DMA_DATA 31:0 +#define NV917C_DMA_DATA_NOP 0x00000000 +#define NV917C_DMA_OPCODE 31:29 +#define NV917C_DMA_OPCODE_JUMP 0x00000001 +#define NV917C_DMA_JUMP_OFFSET 11:2 +#define NV917C_DMA_OPCODE 31:29 +#define NV917C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917C_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV917C_PUT (0x00000000) +#define NV917C_PUT_PTR 11:2 +#define NV917C_GET (0x00000004) +#define NV917C_GET_PTR 11:2 +#define NV917C_GET_SCANLINE (0x00000010) +#define NV917C_GET_SCANLINE_LINE 15:0 +#define NV917C_UPDATE (0x00000080) +#define NV917C_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV917C_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV917C_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV917C_UPDATE_SPECIAL_HANDLING 25:24 +#define NV917C_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV917C_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV917C_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV917C_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV917C_SET_PRESENT_CONTROL (0x00000084) +#define NV917C_SET_PRESENT_CONTROL_BEGIN_MODE 9:8 +#define NV917C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NV917C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NV917C_SET_PRESENT_CONTROL_BEGIN_MODE_ON_LINE (0x00000002) +#define NV917C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE 3:3 +#define NV917C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_PAIR_FLIP (0x00000000) +#define NV917C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_AT_ANY_FRAME (0x00000001) +#define NV917C_SET_PRESENT_CONTROL_TIMESTAMP_MODE 2:2 +#define NV917C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NV917C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NV917C_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4 +#define NV917C_SET_PRESENT_CONTROL_BEGIN_LINE 30:16 +#define NV917C_SET_PRESENT_CONTROL_ON_LINE_MARGIN 15:10 +#define NV917C_SET_PRESENT_CONTROL_MODE 1:0 +#define NV917C_SET_PRESENT_CONTROL_MODE_MONO (0x00000000) +#define NV917C_SET_PRESENT_CONTROL_MODE_STEREO (0x00000001) +#define NV917C_SET_PRESENT_CONTROL_MODE_SPEC_FLIP (0x00000002) +#define NV917C_SET_SEMAPHORE_CONTROL (0x00000088) +#define NV917C_SET_SEMAPHORE_CONTROL_OFFSET 11:2 +#define NV917C_SET_SEMAPHORE_CONTROL_DELAY 26:26 +#define NV917C_SET_SEMAPHORE_CONTROL_DELAY_DISABLE (0x00000000) +#define NV917C_SET_SEMAPHORE_CONTROL_DELAY_ENABLE (0x00000001) +#define NV917C_SET_SEMAPHORE_CONTROL_FORMAT 28:28 +#define NV917C_SET_SEMAPHORE_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917C_SET_SEMAPHORE_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917C_SET_SEMAPHORE_ACQUIRE (0x0000008C) +#define NV917C_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NV917C_SET_SEMAPHORE_RELEASE (0x00000090) +#define NV917C_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NV917C_SET_CONTEXT_DMA_SEMAPHORE (0x00000094) +#define NV917C_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NV917C_SET_NOTIFIER_CONTROL (0x000000A0) +#define NV917C_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV917C_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV917C_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV917C_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV917C_SET_NOTIFIER_CONTROL_DELAY 26:26 +#define NV917C_SET_NOTIFIER_CONTROL_DELAY_DISABLE (0x00000000) +#define NV917C_SET_NOTIFIER_CONTROL_DELAY_ENABLE (0x00000001) +#define NV917C_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV917C_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917C_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917C_SET_CONTEXT_DMA_NOTIFIER (0x000000A4) +#define NV917C_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV917C_SET_CONTEXT_DMAS_ISO(b) (0x000000C0 + (b)*0x00000004) +#define NV917C_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV917C_SET_BASE_LUT_LO (0x000000E0) +#define NV917C_SET_BASE_LUT_LO_ENABLE 31:30 +#define NV917C_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917C_SET_BASE_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) +#define NV917C_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000002) +#define NV917C_SET_BASE_LUT_LO_MODE 27:24 +#define NV917C_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV917C_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV917C_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917C_SET_BASE_LUT_HI (0x000000E4) +#define NV917C_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV917C_SET_OUTPUT_LUT_LO (0x000000E8) +#define NV917C_SET_OUTPUT_LUT_LO_ENABLE 31:30 +#define NV917C_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917C_SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) +#define NV917C_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000002) +#define NV917C_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV917C_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917C_SET_OUTPUT_LUT_HI (0x000000EC) +#define NV917C_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV917C_SET_CONTEXT_DMA_LUT (0x000000FC) +#define NV917C_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV917C_SET_PROCESSING (0x00000110) +#define NV917C_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV917C_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV917C_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV917C_SET_CONVERSION_RED (0x00000114) +#define NV917C_SET_CONVERSION_RED_GAIN 15:0 +#define NV917C_SET_CONVERSION_RED_OFS 31:16 +#define NV917C_SET_CONVERSION_GRN (0x00000118) +#define NV917C_SET_CONVERSION_GRN_GAIN 15:0 +#define NV917C_SET_CONVERSION_GRN_OFS 31:16 +#define NV917C_SET_CONVERSION_BLU (0x0000011C) +#define NV917C_SET_CONVERSION_BLU_GAIN 15:0 +#define NV917C_SET_CONVERSION_BLU_OFS 31:16 +#define NV917C_SET_TIMESTAMP_ORIGIN_LO (0x00000130) +#define NV917C_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NV917C_SET_TIMESTAMP_ORIGIN_HI (0x00000134) +#define NV917C_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NV917C_SET_UPDATE_TIMESTAMP_LO (0x00000138) +#define NV917C_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NV917C_SET_UPDATE_TIMESTAMP_HI (0x0000013C) +#define NV917C_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NV917C_SET_CSC_RED2RED (0x00000140) +#define NV917C_SET_CSC_RED2RED_OWNER 31:31 +#define NV917C_SET_CSC_RED2RED_OWNER_CORE (0x00000000) +#define NV917C_SET_CSC_RED2RED_OWNER_BASE (0x00000001) +#define NV917C_SET_CSC_RED2RED_COEFF 18:0 +#define NV917C_SET_CSC_GRN2RED (0x00000144) +#define NV917C_SET_CSC_GRN2RED_COEFF 18:0 +#define NV917C_SET_CSC_BLU2RED (0x00000148) +#define NV917C_SET_CSC_BLU2RED_COEFF 18:0 +#define NV917C_SET_CSC_CONSTANT2RED (0x0000014C) +#define NV917C_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV917C_SET_CSC_RED2GRN (0x00000150) +#define NV917C_SET_CSC_RED2GRN_COEFF 18:0 +#define NV917C_SET_CSC_GRN2GRN (0x00000154) +#define NV917C_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV917C_SET_CSC_BLU2GRN (0x00000158) +#define NV917C_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV917C_SET_CSC_CONSTANT2GRN (0x0000015C) +#define NV917C_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV917C_SET_CSC_RED2BLU (0x00000160) +#define NV917C_SET_CSC_RED2BLU_COEFF 18:0 +#define NV917C_SET_CSC_GRN2BLU (0x00000164) +#define NV917C_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV917C_SET_CSC_BLU2BLU (0x00000168) +#define NV917C_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV917C_SET_CSC_CONSTANT2BLU (0x0000016C) +#define NV917C_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV917C_SET_SPARE (0x000003BC) +#define NV917C_SET_SPARE_UNUSED 31:0 +#define NV917C_SET_SPARE_NOOP(b) (0x000003C0 + (b)*0x00000004) +#define NV917C_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV917C_SURFACE_SET_OFFSET(a,b) (0x00000400 + (a)*0x00000020 + (b)*0x00000004) +#define NV917C_SURFACE_SET_OFFSET_ORIGIN 31:0 +#define NV917C_SURFACE_SET_SIZE(a) (0x00000408 + (a)*0x00000020) +#define NV917C_SURFACE_SET_SIZE_WIDTH 15:0 +#define NV917C_SURFACE_SET_SIZE_HEIGHT 31:16 +#define NV917C_SURFACE_SET_STORAGE(a) (0x0000040C + (a)*0x00000020) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV917C_SURFACE_SET_STORAGE_PITCH 20:8 +#define NV917C_SURFACE_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV917C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV917C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV917C_SURFACE_SET_PARAMS(a) (0x00000410 + (a)*0x00000020) +#define NV917C_SURFACE_SET_PARAMS_FORMAT 15:8 +#define NV917C_SURFACE_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV917C_SURFACE_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV917C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV917C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV917C_SURFACE_SET_PARAMS_GAMMA 2:2 +#define NV917C_SURFACE_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV917C_SURFACE_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV917C_SURFACE_SET_PARAMS_LAYOUT 5:4 +#define NV917C_SURFACE_SET_PARAMS_LAYOUT_FRM (0x00000000) +#define NV917C_SURFACE_SET_PARAMS_LAYOUT_FLD1 (0x00000001) +#define NV917C_SURFACE_SET_PARAMS_LAYOUT_FLD2 (0x00000002) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917c_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917cswspare.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917cswspare.h new file mode 100644 index 0000000..39e88c0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917cswspare.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2010-2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl917c_sw_spare_h_ +#define _cl917c_sw_spare_h_ + +/* This file is *not* auto-generated. */ + +/* NV917C_SET_SPARE_PRE_UPDATE_TRAP is an alias of NV917C_SET_SPARE_NOOP(0) */ +#define NV917C_SET_SPARE_PRE_UPDATE_TRAP (0x000003C0) +#define NV917C_SET_SPARE_PRE_UPDATE_TRAP_UNUSED 31:0 + +/* NV917C_SET_SPARE_POST_UPDATE_TRAP is an alias of NV917C_SET_SPARE_NOOP(1) */ +#define NV917C_SET_SPARE_POST_UPDATE_TRAP (0x000003C4) +#define NV917C_SET_SPARE_POST_UPDATE_TRAP_UNUSED 31:0 + + +#endif /* _cl917c_sw_spare_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917d.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917d.h new file mode 100644 index 0000000..4f70d81 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917d.h @@ -0,0 +1,1551 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917d_h_ +#define _cl917d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917D_CORE_CHANNEL_DMA (0x0000917D) + +#define NV917D_CORE_NOTIFIER_3 0x00000000 +#define NV917D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV917D_CORE_NOTIFIER_3__1 0x00000001 +#define NV917D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV917D_CORE_NOTIFIER_3__2 0x00000002 +#define NV917D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV917D_CORE_NOTIFIER_3__3 0x00000003 +#define NV917D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44 0x0000002C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45 0x0000002D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45_R1 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46 0x0000002E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47 0x0000002F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47_R1 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48 0x00000030 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49 0x00000031 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49_R1 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50 0x00000032 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51 0x00000033 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51_R1 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV917D_DMA 0x00000000 +#define NV917D_DMA_OPCODE 31:29 +#define NV917D_DMA_OPCODE_METHOD 0x00000000 +#define NV917D_DMA_OPCODE_JUMP 0x00000001 +#define NV917D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917D_DMA_OPCODE 31:29 +#define NV917D_DMA_OPCODE_METHOD 0x00000000 +#define NV917D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917D_DMA_METHOD_COUNT 27:18 +#define NV917D_DMA_METHOD_OFFSET 11:2 +#define NV917D_DMA_DATA 31:0 +#define NV917D_DMA_DATA_NOP 0x00000000 +#define NV917D_DMA_OPCODE 31:29 +#define NV917D_DMA_OPCODE_JUMP 0x00000001 +#define NV917D_DMA_JUMP_OFFSET 11:2 +#define NV917D_DMA_OPCODE 31:29 +#define NV917D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV917D_PUT (0x00000000) +#define NV917D_PUT_PTR 11:2 +#define NV917D_GET (0x00000004) +#define NV917D_GET_PTR 11:2 +#define NV917D_UPDATE (0x00000080) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV917D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV917D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV917D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV917D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV917D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV917D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV917D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV917D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV917D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV917D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV917D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV917D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV917D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV917D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV917D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV917D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV917D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV917D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV917D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV917D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV917D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV917D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV917D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV917D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV917D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV917D_GET_CAPABILITIES (0x0000008C) +#define NV917D_GET_CAPABILITIES_DUMMY 31:0 +#define NV917D_SET_SPARE (0x0000016C) +#define NV917D_SET_SPARE_UNUSED 31:0 +#define NV917D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV917D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV917D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV917D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV917D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV917D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV917D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV917D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV917D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV917D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV917D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV917D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV917D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV917D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV917D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV917D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV917D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV917D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV917D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV917D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV917D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV917D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV917D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV917D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV917D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV917D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV917D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV917D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV917D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV917D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV917D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV917D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV917D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV917D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV917D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV917D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV917D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV917D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV917D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV917D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV917D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV917D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV917D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV917D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV917D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV917D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV917D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV917D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV917D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV917D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV917D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV917D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV917D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV917D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV917D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV917D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV917D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV917D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV917D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV917D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV917D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV917D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV917D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV917D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV917D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV917D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV917D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV917D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV917D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV917D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV917D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV917D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV917D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV917D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV917D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV917D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV917D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV917D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV917D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV917D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV917D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV917D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV917D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV917D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV917D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV917D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV917D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV917D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV917D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV917D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV917D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV917D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV917D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV917D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV917D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV917D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV917D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV917D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV917D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV917D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV917D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV917D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV917D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV917D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV917D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV917D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV917D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV917D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV917D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV917D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV917D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV917D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV917D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV917D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV917D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV917D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV917D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV917D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV917D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV917D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV917D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV917D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV917D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV917D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV917D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV917D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV917D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV917D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV917D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV917D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV917D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV917D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV917D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV917D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV917D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV917D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV917D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV917D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV917D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV917D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV917D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV917D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV917D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV917D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV917D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV917D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV917D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV917D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV917D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV917D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV917D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV917D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV917D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV917D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV917D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV917D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV917D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV917D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV917D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV917D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV917D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV917D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV917D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917d_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h new file mode 100644 index 0000000..9abc605 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2003-2010, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __cl917dcrcnotif_h__ +#define __cl917dcrcnotif_h__ +/* This file is autogenerated. Do not edit */ + +#define NV917D_NOTIFIER_CRC_1_STATUS_0 0x00000000 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_DONE 0:0 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_FALSE 0x00000000 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_TRUE 0x00000001 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW 3:3 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW_FALSE 0x00000000 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW_TRUE 0x00000001 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW 4:4 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW_FALSE 0x00000000 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW_TRUE 0x00000001 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_COUNT 31:24 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3 0x00000003 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3_COMPOSITOR_CRC 31:0 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4 0x00000004 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4_PRIMARY_OUTPUT_CRC 31:0 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY1_8 0x00000008 + +#endif // __cl917dcrcnotif_h__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917e.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917e.h new file mode 100644 index 0000000..6586bda --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917e.h @@ -0,0 +1,265 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917e_h_ +#define _cl917e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917E_OVERLAY_CHANNEL_DMA (0x0000917E) + +#define NV_DISP_NOTIFICATION_2 0x00000000 +#define NV_DISP_NOTIFICATION_2_SIZEOF 0x00000010 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0 0x00000000 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0_NANOSECONDS0 31:0 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1 0x00000001 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1_NANOSECONDS1 31:0 +#define NV_DISP_NOTIFICATION_2_INFO32_2 0x00000002 +#define NV_DISP_NOTIFICATION_2_INFO32_2_R0 31:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3 0x00000003 +#define NV_DISP_NOTIFICATION_2_INFO16_3_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3_FIELD 8:8 +#define NV_DISP_NOTIFICATION_2_INFO16_3_R1 15:9 +#define NV_DISP_NOTIFICATION_2__3_STATUS 31:16 +#define NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED 0x00000000 + + +#define NV_DISP_NOTIFICATION_INFO16 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_INFO16__0 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_INFO16__0_FIELD 8:8 +#define NV_DISP_NOTIFICATION_INFO16__0_R1 15:9 + + +#define NV_DISP_NOTIFICATION_STATUS 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_STATUS__0 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS 15:0 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_FINISHED 0x00000000 + + +// dma opcode instructions +#define NV917E_DMA 0x00000000 +#define NV917E_DMA_OPCODE 31:29 +#define NV917E_DMA_OPCODE_METHOD 0x00000000 +#define NV917E_DMA_OPCODE_JUMP 0x00000001 +#define NV917E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917E_DMA_OPCODE 31:29 +#define NV917E_DMA_OPCODE_METHOD 0x00000000 +#define NV917E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917E_DMA_METHOD_COUNT 27:18 +#define NV917E_DMA_METHOD_OFFSET 11:2 +#define NV917E_DMA_DATA 31:0 +#define NV917E_DMA_DATA_NOP 0x00000000 +#define NV917E_DMA_OPCODE 31:29 +#define NV917E_DMA_OPCODE_JUMP 0x00000001 +#define NV917E_DMA_JUMP_OFFSET 11:2 +#define NV917E_DMA_OPCODE 31:29 +#define NV917E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV917E_PUT (0x00000000) +#define NV917E_PUT_PTR 11:2 +#define NV917E_GET (0x00000004) +#define NV917E_GET_PTR 11:2 +#define NV917E_UPDATE (0x00000080) +#define NV917E_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV917E_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV917E_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV917E_UPDATE_SPECIAL_HANDLING 25:24 +#define NV917E_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV917E_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV917E_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV917E_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV917E_SET_PRESENT_CONTROL (0x00000084) +#define NV917E_SET_PRESENT_CONTROL_BEGIN_MODE 1:0 +#define NV917E_SET_PRESENT_CONTROL_BEGIN_MODE_ASAP (0x00000000) +#define NV917E_SET_PRESENT_CONTROL_BEGIN_MODE_TIMESTAMP (0x00000003) +#define NV917E_SET_PRESENT_CONTROL_STEREO_FLIP_MODE 3:3 +#define NV917E_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_PAIR_FLIP (0x00000000) +#define NV917E_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_AT_ANY_FRAME (0x00000001) +#define NV917E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4 +#define NV917E_SET_PRESENT_CONTROL_MODE 11:10 +#define NV917E_SET_PRESENT_CONTROL_MODE_MONO (0x00000000) +#define NV917E_SET_PRESENT_CONTROL_MODE_STEREO (0x00000001) +#define NV917E_SET_PRESENT_CONTROL_MODE_SPEC_FLIP (0x00000002) +#define NV917E_SET_SEMAPHORE_ACQUIRE (0x00000088) +#define NV917E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NV917E_SET_SEMAPHORE_RELEASE (0x0000008C) +#define NV917E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NV917E_SET_SEMAPHORE_CONTROL (0x00000090) +#define NV917E_SET_SEMAPHORE_CONTROL_OFFSET 11:2 +#define NV917E_SET_SEMAPHORE_CONTROL_FORMAT 28:28 +#define NV917E_SET_SEMAPHORE_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917E_SET_SEMAPHORE_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917E_SET_CONTEXT_DMA_SEMAPHORE (0x00000094) +#define NV917E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NV917E_SET_NOTIFIER_CONTROL (0x000000A0) +#define NV917E_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV917E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV917E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV917E_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV917E_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV917E_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917E_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917E_SET_CONTEXT_DMA_NOTIFIER (0x000000A4) +#define NV917E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV917E_SET_CONTEXT_DMA_LUT (0x000000B0) +#define NV917E_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV917E_SET_OVERLAY_LUT_LO (0x000000B4) +#define NV917E_SET_OVERLAY_LUT_LO_ENABLE 31:31 +#define NV917E_SET_OVERLAY_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917E_SET_OVERLAY_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV917E_SET_OVERLAY_LUT_LO_MODE 27:24 +#define NV917E_SET_OVERLAY_LUT_LO_MODE_LORES (0x00000000) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_HIRES (0x00000001) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917E_SET_OVERLAY_LUT_HI (0x000000B8) +#define NV917E_SET_OVERLAY_LUT_HI_ORIGIN 31:0 +#define NV917E_SET_CONTEXT_DMAS_ISO(b) (0x000000C0 + (b)*0x00000004) +#define NV917E_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV917E_SET_POINT_IN (0x000000E0) +#define NV917E_SET_POINT_IN_X 14:0 +#define NV917E_SET_POINT_IN_Y 30:16 +#define NV917E_SET_SIZE_IN (0x000000E4) +#define NV917E_SET_SIZE_IN_WIDTH 14:0 +#define NV917E_SET_SIZE_IN_HEIGHT 30:16 +#define NV917E_SET_SIZE_OUT (0x000000E8) +#define NV917E_SET_SIZE_OUT_WIDTH 14:0 +#define NV917E_SET_COMPOSITION_CONTROL (0x00000100) +#define NV917E_SET_COMPOSITION_CONTROL_MODE 3:0 +#define NV917E_SET_COMPOSITION_CONTROL_MODE_SOURCE_COLOR_VALUE_KEYING (0x00000000) +#define NV917E_SET_COMPOSITION_CONTROL_MODE_DESTINATION_COLOR_VALUE_KEYING (0x00000001) +#define NV917E_SET_COMPOSITION_CONTROL_MODE_OPAQUE (0x00000002) +#define NV917E_SET_KEY_COLOR_LO (0x00000104) +#define NV917E_SET_KEY_COLOR_LO_COLOR 31:0 +#define NV917E_SET_KEY_COLOR_HI (0x00000108) +#define NV917E_SET_KEY_COLOR_HI_COLOR 31:0 +#define NV917E_SET_KEY_MASK_LO (0x0000010C) +#define NV917E_SET_KEY_MASK_LO_MASK 31:0 +#define NV917E_SET_KEY_MASK_HI (0x00000110) +#define NV917E_SET_KEY_MASK_HI_MASK 31:0 +#define NV917E_SET_PROCESSING (0x00000118) +#define NV917E_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV917E_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV917E_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV917E_SET_CONVERSION_RED (0x0000011C) +#define NV917E_SET_CONVERSION_RED_GAIN 15:0 +#define NV917E_SET_CONVERSION_RED_OFS 31:16 +#define NV917E_SET_CONVERSION_GRN (0x00000120) +#define NV917E_SET_CONVERSION_GRN_GAIN 15:0 +#define NV917E_SET_CONVERSION_GRN_OFS 31:16 +#define NV917E_SET_CONVERSION_BLU (0x00000124) +#define NV917E_SET_CONVERSION_BLU_GAIN 15:0 +#define NV917E_SET_CONVERSION_BLU_OFS 31:16 +#define NV917E_SET_TIMESTAMP_ORIGIN_LO (0x00000130) +#define NV917E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NV917E_SET_TIMESTAMP_ORIGIN_HI (0x00000134) +#define NV917E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NV917E_SET_UPDATE_TIMESTAMP_LO (0x00000138) +#define NV917E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NV917E_SET_UPDATE_TIMESTAMP_HI (0x0000013C) +#define NV917E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NV917E_SET_CSC_RED2RED (0x00000140) +#define NV917E_SET_CSC_RED2RED_COEFF 18:0 +#define NV917E_SET_CSC_GRN2RED (0x00000144) +#define NV917E_SET_CSC_GRN2RED_COEFF 18:0 +#define NV917E_SET_CSC_BLU2RED (0x00000148) +#define NV917E_SET_CSC_BLU2RED_COEFF 18:0 +#define NV917E_SET_CSC_CONSTANT2RED (0x0000014C) +#define NV917E_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV917E_SET_CSC_RED2GRN (0x00000150) +#define NV917E_SET_CSC_RED2GRN_COEFF 18:0 +#define NV917E_SET_CSC_GRN2GRN (0x00000154) +#define NV917E_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV917E_SET_CSC_BLU2GRN (0x00000158) +#define NV917E_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV917E_SET_CSC_CONSTANT2GRN (0x0000015C) +#define NV917E_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV917E_SET_CSC_RED2BLU (0x00000160) +#define NV917E_SET_CSC_RED2BLU_COEFF 18:0 +#define NV917E_SET_CSC_GRN2BLU (0x00000164) +#define NV917E_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV917E_SET_CSC_BLU2BLU (0x00000168) +#define NV917E_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV917E_SET_CSC_CONSTANT2BLU (0x0000016C) +#define NV917E_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV917E_SET_SPARE (0x000003BC) +#define NV917E_SET_SPARE_UNUSED 31:0 +#define NV917E_SET_SPARE_NOOP(b) (0x000003C0 + (b)*0x00000004) +#define NV917E_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV917E_SURFACE_SET_OFFSET(b) (0x00000400 + (b)*0x00000004) +#define NV917E_SURFACE_SET_OFFSET_ORIGIN 31:0 +#define NV917E_SURFACE_SET_SIZE (0x00000408) +#define NV917E_SURFACE_SET_SIZE_WIDTH 15:0 +#define NV917E_SURFACE_SET_SIZE_HEIGHT 31:16 +#define NV917E_SURFACE_SET_STORAGE (0x0000040C) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV917E_SURFACE_SET_STORAGE_PITCH 20:8 +#define NV917E_SURFACE_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV917E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV917E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV917E_SURFACE_SET_PARAMS (0x00000410) +#define NV917E_SURFACE_SET_PARAMS_FORMAT 15:8 +#define NV917E_SURFACE_SET_PARAMS_FORMAT_VE8YO8UE8YE8 (0x00000028) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_YO8VE8YE8UE8 (0x00000029) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV917E_SURFACE_SET_PARAMS_COLOR_SPACE 1:0 +#define NV917E_SURFACE_SET_PARAMS_COLOR_SPACE_RGB (0x00000000) +#define NV917E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_601 (0x00000001) +#define NV917E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_709 (0x00000002) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917e_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9270.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9270.h new file mode 100644 index 0000000..c6ce40b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9270.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl9270.finn +// + +#define NV9270_DISPLAY (0x00009270) + +#define NV9270_ALLOCATION_PARAMETERS_MESSAGE_ID (0x9270U) + +typedef struct NV9270_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9270_ALLOCATION_PARAMETERS; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9271.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9271.h new file mode 100644 index 0000000..397cdc2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9271.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9271_h_ +#define _cl9271_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV9271_DISP_SF_USER 0x9271 + +typedef volatile struct _cl9271_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x00690FFF:0x00690000 */ +} _Nv9271DispSfUser, Nv9271DispSfUserMap; + +#define NV9271_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NV9271_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NV9271_SF_HDMI_INFO_IDX_GCP 0x00000003 /* */ +#define NV9271_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NV9271_SF_HDMI_INFO_CTRL(i,j) (0x00690000-0x00690000+(i)*1024+(j)*64) /* RWX4A */ +#define NV9271_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_STATUS(i,j) (0x00690004-0x00690000+(i)*1024+(j)*64) /* R--4A */ +#define NV9271_SF_HDMI_INFO_STATUS__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_INFO_STATUS__SIZE_2 5 /* */ +#define NV9271_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NV9271_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9271_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9271_SF_HDMI_INFO_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x00690000-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x00690008-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x0069000C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x00690010-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x00690014-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x00690018-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_HEADER(i) (0x00690048-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_HEADER__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW(i) (0x0069004C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH(i) (0x00690050-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW(i) (0x00690054-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH(i) (0x00690058-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW(i) (0x0069005C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH(i) (0x00690060-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW(i) (0x00690064-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH(i) (0x00690068-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GCP_SUBPACK(i) (0x006900CC-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GCP_SUBPACK__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_HEADER(i) (0x00690108-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_HEADER__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_HEADER_HB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_HEADER_HB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_HEADER_HB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW(i) (0x0069010C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH(i) (0x00690110-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW(i) (0x00690114-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH(i) (0x00690118-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW(i) (0x0069011C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH(i) (0x00690120-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW(i) (0x00690124-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH(i) (0x00690128-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl9271_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927c.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927c.h new file mode 100644 index 0000000..1b78305 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927c.h @@ -0,0 +1,299 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl927c_h_ +#define _cl927c_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV927C_BASE_CHANNEL_DMA (0x0000927C) + +#define NV_DISP_BASE_NOTIFIER_1 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1_SIZEOF 0x00000004 +#define NV_DISP_BASE_NOTIFIER_1__0 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1__0_PRESENTATION_COUNT 15:0 +#define NV_DISP_BASE_NOTIFIER_1__0_TIMESTAMP 29:16 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS 31:30 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_FINISHED 0x00000002 + + +#define NV_DISP_NOTIFICATION_2 0x00000000 +#define NV_DISP_NOTIFICATION_2_SIZEOF 0x00000010 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0 0x00000000 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0_NANOSECONDS0 31:0 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1 0x00000001 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1_NANOSECONDS1 31:0 +#define NV_DISP_NOTIFICATION_2_INFO32_2 0x00000002 +#define NV_DISP_NOTIFICATION_2_INFO32_2_R0 31:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3 0x00000003 +#define NV_DISP_NOTIFICATION_2_INFO16_3_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3_FIELD 8:8 +#define NV_DISP_NOTIFICATION_2_INFO16_3_R1 15:9 +#define NV_DISP_NOTIFICATION_2__3_STATUS 31:16 +#define NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED 0x00000000 + + +#define NV_DISP_NOTIFICATION_INFO16 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_INFO16__0 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_INFO16__0_FIELD 8:8 +#define NV_DISP_NOTIFICATION_INFO16__0_R1 15:9 + + +#define NV_DISP_NOTIFICATION_STATUS 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_STATUS__0 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS 15:0 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_FINISHED 0x00000000 + + +// dma opcode instructions +#define NV927C_DMA 0x00000000 +#define NV927C_DMA_OPCODE 31:29 +#define NV927C_DMA_OPCODE_METHOD 0x00000000 +#define NV927C_DMA_OPCODE_JUMP 0x00000001 +#define NV927C_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV927C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV927C_DMA_OPCODE 31:29 +#define NV927C_DMA_OPCODE_METHOD 0x00000000 +#define NV927C_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV927C_DMA_METHOD_COUNT 27:18 +#define NV927C_DMA_METHOD_OFFSET 11:2 +#define NV927C_DMA_DATA 31:0 +#define NV927C_DMA_DATA_NOP 0x00000000 +#define NV927C_DMA_OPCODE 31:29 +#define NV927C_DMA_OPCODE_JUMP 0x00000001 +#define NV927C_DMA_JUMP_OFFSET 11:2 +#define NV927C_DMA_OPCODE 31:29 +#define NV927C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV927C_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV927C_PUT (0x00000000) +#define NV927C_PUT_PTR 11:2 +#define NV927C_GET (0x00000004) +#define NV927C_GET_PTR 11:2 +#define NV927C_GET_SCANLINE (0x00000010) +#define NV927C_GET_SCANLINE_LINE 15:0 +#define NV927C_UPDATE (0x00000080) +#define NV927C_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV927C_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV927C_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV927C_UPDATE_SPECIAL_HANDLING 25:24 +#define NV927C_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV927C_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV927C_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV927C_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV927C_SET_PRESENT_CONTROL (0x00000084) +#define NV927C_SET_PRESENT_CONTROL_BEGIN_MODE 9:8 +#define NV927C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NV927C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NV927C_SET_PRESENT_CONTROL_BEGIN_MODE_ON_LINE (0x00000002) +#define NV927C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE 3:3 +#define NV927C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_PAIR_FLIP (0x00000000) +#define NV927C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_AT_ANY_FRAME (0x00000001) +#define NV927C_SET_PRESENT_CONTROL_TIMESTAMP_MODE 2:2 +#define NV927C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NV927C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NV927C_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4 +#define NV927C_SET_PRESENT_CONTROL_BEGIN_LINE 30:16 +#define NV927C_SET_PRESENT_CONTROL_ON_LINE_MARGIN 15:10 +#define NV927C_SET_PRESENT_CONTROL_MODE 1:0 +#define NV927C_SET_PRESENT_CONTROL_MODE_MONO (0x00000000) +#define NV927C_SET_PRESENT_CONTROL_MODE_STEREO (0x00000001) +#define NV927C_SET_PRESENT_CONTROL_MODE_SPEC_FLIP (0x00000002) +#define NV927C_SET_SEMAPHORE_CONTROL (0x00000088) +#define NV927C_SET_SEMAPHORE_CONTROL_OFFSET 11:2 +#define NV927C_SET_SEMAPHORE_CONTROL_DELAY 26:26 +#define NV927C_SET_SEMAPHORE_CONTROL_DELAY_DISABLE (0x00000000) +#define NV927C_SET_SEMAPHORE_CONTROL_DELAY_ENABLE (0x00000001) +#define NV927C_SET_SEMAPHORE_CONTROL_FORMAT 28:28 +#define NV927C_SET_SEMAPHORE_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV927C_SET_SEMAPHORE_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV927C_SET_SEMAPHORE_ACQUIRE (0x0000008C) +#define NV927C_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NV927C_SET_SEMAPHORE_RELEASE (0x00000090) +#define NV927C_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NV927C_SET_CONTEXT_DMA_SEMAPHORE (0x00000094) +#define NV927C_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NV927C_SET_NOTIFIER_CONTROL (0x000000A0) +#define NV927C_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV927C_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV927C_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV927C_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV927C_SET_NOTIFIER_CONTROL_DELAY 26:26 +#define NV927C_SET_NOTIFIER_CONTROL_DELAY_DISABLE (0x00000000) +#define NV927C_SET_NOTIFIER_CONTROL_DELAY_ENABLE (0x00000001) +#define NV927C_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV927C_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV927C_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV927C_SET_CONTEXT_DMA_NOTIFIER (0x000000A4) +#define NV927C_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV927C_SET_CONTEXT_DMAS_ISO(b) (0x000000C0 + (b)*0x00000004) +#define NV927C_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV927C_SET_BASE_LUT_LO (0x000000E0) +#define NV927C_SET_BASE_LUT_LO_ENABLE 31:30 +#define NV927C_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV927C_SET_BASE_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) +#define NV927C_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000002) +#define NV927C_SET_BASE_LUT_LO_MODE 27:24 +#define NV927C_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV927C_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV927C_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV927C_SET_BASE_LUT_HI (0x000000E4) +#define NV927C_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV927C_SET_OUTPUT_LUT_LO (0x000000E8) +#define NV927C_SET_OUTPUT_LUT_LO_ENABLE 31:30 +#define NV927C_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV927C_SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) +#define NV927C_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000002) +#define NV927C_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV927C_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV927C_SET_OUTPUT_LUT_HI (0x000000EC) +#define NV927C_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV927C_SET_CONTEXT_DMA_LUT (0x000000FC) +#define NV927C_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV927C_SET_PROCESSING (0x00000110) +#define NV927C_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV927C_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV927C_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV927C_SET_CONVERSION_RED (0x00000114) +#define NV927C_SET_CONVERSION_RED_GAIN 15:0 +#define NV927C_SET_CONVERSION_RED_OFS 31:16 +#define NV927C_SET_CONVERSION_GRN (0x00000118) +#define NV927C_SET_CONVERSION_GRN_GAIN 15:0 +#define NV927C_SET_CONVERSION_GRN_OFS 31:16 +#define NV927C_SET_CONVERSION_BLU (0x0000011C) +#define NV927C_SET_CONVERSION_BLU_GAIN 15:0 +#define NV927C_SET_CONVERSION_BLU_OFS 31:16 +#define NV927C_SET_TIMESTAMP_ORIGIN_LO (0x00000130) +#define NV927C_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NV927C_SET_TIMESTAMP_ORIGIN_HI (0x00000134) +#define NV927C_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NV927C_SET_UPDATE_TIMESTAMP_LO (0x00000138) +#define NV927C_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NV927C_SET_UPDATE_TIMESTAMP_HI (0x0000013C) +#define NV927C_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NV927C_SET_CSC_RED2RED (0x00000140) +#define NV927C_SET_CSC_RED2RED_OWNER 31:31 +#define NV927C_SET_CSC_RED2RED_OWNER_CORE (0x00000000) +#define NV927C_SET_CSC_RED2RED_OWNER_BASE (0x00000001) +#define NV927C_SET_CSC_RED2RED_COEFF 18:0 +#define NV927C_SET_CSC_GRN2RED (0x00000144) +#define NV927C_SET_CSC_GRN2RED_COEFF 18:0 +#define NV927C_SET_CSC_BLU2RED (0x00000148) +#define NV927C_SET_CSC_BLU2RED_COEFF 18:0 +#define NV927C_SET_CSC_CONSTANT2RED (0x0000014C) +#define NV927C_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV927C_SET_CSC_RED2GRN (0x00000150) +#define NV927C_SET_CSC_RED2GRN_COEFF 18:0 +#define NV927C_SET_CSC_GRN2GRN (0x00000154) +#define NV927C_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV927C_SET_CSC_BLU2GRN (0x00000158) +#define NV927C_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV927C_SET_CSC_CONSTANT2GRN (0x0000015C) +#define NV927C_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV927C_SET_CSC_RED2BLU (0x00000160) +#define NV927C_SET_CSC_RED2BLU_COEFF 18:0 +#define NV927C_SET_CSC_GRN2BLU (0x00000164) +#define NV927C_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV927C_SET_CSC_BLU2BLU (0x00000168) +#define NV927C_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV927C_SET_CSC_CONSTANT2BLU (0x0000016C) +#define NV927C_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV927C_SET_SPARE (0x000003BC) +#define NV927C_SET_SPARE_UNUSED 31:0 +#define NV927C_SET_SPARE_NOOP(b) (0x000003C0 + (b)*0x00000004) +#define NV927C_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV927C_SURFACE_SET_OFFSET(a,b) (0x00000400 + (a)*0x00000020 + (b)*0x00000004) +#define NV927C_SURFACE_SET_OFFSET_ORIGIN 31:0 +#define NV927C_SURFACE_SET_SIZE(a) (0x00000408 + (a)*0x00000020) +#define NV927C_SURFACE_SET_SIZE_WIDTH 15:0 +#define NV927C_SURFACE_SET_SIZE_HEIGHT 31:16 +#define NV927C_SURFACE_SET_STORAGE(a) (0x0000040C + (a)*0x00000020) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV927C_SURFACE_SET_STORAGE_PITCH 20:8 +#define NV927C_SURFACE_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV927C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV927C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV927C_SURFACE_SET_PARAMS(a) (0x00000410 + (a)*0x00000020) +#define NV927C_SURFACE_SET_PARAMS_FORMAT 15:8 +#define NV927C_SURFACE_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV927C_SURFACE_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV927C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV927C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV927C_SURFACE_SET_PARAMS_GAMMA 2:2 +#define NV927C_SURFACE_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV927C_SURFACE_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV927C_SURFACE_SET_PARAMS_LAYOUT 5:4 +#define NV927C_SURFACE_SET_PARAMS_LAYOUT_FRM (0x00000000) +#define NV927C_SURFACE_SET_PARAMS_LAYOUT_FLD1 (0x00000001) +#define NV927C_SURFACE_SET_PARAMS_LAYOUT_FLD2 (0x00000002) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl927c_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927d.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927d.h new file mode 100644 index 0000000..df45fec --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927d.h @@ -0,0 +1,1556 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl927d_h_ +#define _cl927d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV927D_CORE_CHANNEL_DMA (0x0000927D) + +#define NV927D_CORE_NOTIFIER_3 0x00000000 +#define NV927D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV927D_CORE_NOTIFIER_3__1 0x00000001 +#define NV927D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV927D_CORE_NOTIFIER_3__2 0x00000002 +#define NV927D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV927D_CORE_NOTIFIER_3__3 0x00000003 +#define NV927D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44 0x0000002C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45 0x0000002D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45_R1 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46 0x0000002E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47 0x0000002F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47_R1 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48 0x00000030 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49 0x00000031 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49_R1 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50 0x00000032 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51 0x00000033 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51_R1 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV927D_DMA 0x00000000 +#define NV927D_DMA_OPCODE 31:29 +#define NV927D_DMA_OPCODE_METHOD 0x00000000 +#define NV927D_DMA_OPCODE_JUMP 0x00000001 +#define NV927D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV927D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV927D_DMA_OPCODE 31:29 +#define NV927D_DMA_OPCODE_METHOD 0x00000000 +#define NV927D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV927D_DMA_METHOD_COUNT 27:18 +#define NV927D_DMA_METHOD_OFFSET 11:2 +#define NV927D_DMA_DATA 31:0 +#define NV927D_DMA_DATA_NOP 0x00000000 +#define NV927D_DMA_OPCODE 31:29 +#define NV927D_DMA_OPCODE_JUMP 0x00000001 +#define NV927D_DMA_JUMP_OFFSET 11:2 +#define NV927D_DMA_OPCODE 31:29 +#define NV927D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV927D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV927D_PUT (0x00000000) +#define NV927D_PUT_PTR 11:2 +#define NV927D_GET (0x00000004) +#define NV927D_GET_PTR 11:2 +#define NV927D_UPDATE (0x00000080) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV927D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV927D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV927D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV927D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV927D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV927D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV927D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV927D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV927D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV927D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV927D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV927D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV927D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV927D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV927D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV927D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV927D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV927D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV927D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV927D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV927D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV927D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV927D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV927D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV927D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV927D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV927D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV927D_GET_CAPABILITIES (0x0000008C) +#define NV927D_GET_CAPABILITIES_DUMMY 31:0 +#define NV927D_SET_SPARE (0x0000016C) +#define NV927D_SET_SPARE_UNUSED 31:0 +#define NV927D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV927D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV927D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV927D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV927D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV927D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV927D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV927D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV927D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV927D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV927D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV927D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV927D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV927D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV927D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV927D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV927D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV927D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV927D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV927D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV927D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV927D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV927D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV927D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV927D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV927D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV927D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV927D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV927D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV927D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV927D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV927D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV927D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV927D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV927D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV927D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV927D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV927D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV927D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV927D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV927D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV927D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV927D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV927D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV927D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV927D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV927D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 20:13 +#define NV927D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV927D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV927D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV927D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV927D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV927D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV927D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV927D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV927D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV927D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV927D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV927D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV927D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV927D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV927D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV927D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV927D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV927D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV927D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV927D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV927D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV927D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV927D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV927D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV927D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV927D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV927D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV927D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV927D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV927D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV927D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV927D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV927D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV927D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV927D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV927D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV927D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV927D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV927D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV927D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV927D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV927D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV927D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV927D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV927D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV927D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV927D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV927D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV927D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV927D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV927D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV927D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV927D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV927D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV927D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV927D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV927D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV927D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV927D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV927D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV927D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV927D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV927D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV927D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV927D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV927D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV927D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV927D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV927D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV927D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV927D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV927D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV927D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV927D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV927D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV927D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV927D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV927D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV927D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV927D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV927D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV927D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV927D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV927D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV927D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV927D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV927D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV927D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV927D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV927D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV927D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV927D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV927D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV927D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV927D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV927D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV927D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV927D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV927D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV927D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV927D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV927D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV927D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV927D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV927D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV927D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV927D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV927D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV927D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV927D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV927D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV927D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV927D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV927D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV927D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV927D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV927D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV927D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV927D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV927D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV927D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV927D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV927D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV927D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV927D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV927D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV927D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV927D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV927D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV927D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV927D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV927D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV927D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV927D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV927D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV927D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV927D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV927D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV927D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV927D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV927D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV927D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV927D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl927d_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9470.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9470.h new file mode 100644 index 0000000..f7b1519 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9470.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl9470.finn +// + +#define NV9470_DISPLAY (0x00009470) + +#define NV9470_ALLOCATION_PARAMETERS_MESSAGE_ID (0x9470U) + +typedef struct NV9470_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9470_ALLOCATION_PARAMETERS; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9471.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9471.h new file mode 100644 index 0000000..c5f557f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9471.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9471_h_ +#define _cl9471_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV9471_DISP_SF_USER 0x9471 + +typedef volatile struct _cl9471_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x00690FFF:0x00690000 */ +} _Nv9471DispSfUser, Nv9471DispSfUserMap; + +#define NV9471_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NV9471_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NV9471_SF_HDMI_INFO_IDX_GCP 0x00000003 /* */ +#define NV9471_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NV9471_SF_HDMI_INFO_CTRL(i,j) (0x00690000-0x00690000+(i)*1024+(j)*64) /* RWX4A */ +#define NV9471_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_STATUS(i,j) (0x00690004-0x00690000+(i)*1024+(j)*64) /* R--4A */ +#define NV9471_SF_HDMI_INFO_STATUS__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_INFO_STATUS__SIZE_2 5 /* */ +#define NV9471_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NV9471_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9471_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9471_SF_HDMI_INFO_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x00690000-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x00690008-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x0069000C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x00690010-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x00690014-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x00690018-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_HEADER(i) (0x00690048-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_HEADER__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW(i) (0x0069004C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH(i) (0x00690050-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW(i) (0x00690054-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH(i) (0x00690058-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW(i) (0x0069005C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH(i) (0x00690060-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW(i) (0x00690064-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH(i) (0x00690068-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GCP_SUBPACK(i) (0x006900CC-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GCP_SUBPACK__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_HEADER(i) (0x00690108-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_HEADER__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_HEADER_HB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_HEADER_HB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_HEADER_HB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW(i) (0x0069010C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH(i) (0x00690110-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW(i) (0x00690114-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH(i) (0x00690118-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW(i) (0x0069011C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH(i) (0x00690120-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW(i) (0x00690124-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH(i) (0x00690128-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl9471_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl947d.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl947d.h new file mode 100644 index 0000000..6842406 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl947d.h @@ -0,0 +1,1606 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl947d_h_ +#define _cl947d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV947D_CORE_CHANNEL_DMA (0x0000947D) + +#define NV947D_CORE_NOTIFIER_3 0x00000000 +#define NV947D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV947D_CORE_NOTIFIER_3__1 0x00000001 +#define NV947D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV947D_CORE_NOTIFIER_3__2 0x00000002 +#define NV947D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV947D_CORE_NOTIFIER_3__3 0x00000003 +#define NV947D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44 0x0000002C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44_R0 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45 0x0000002D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45_R1 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46 0x0000002E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46_R0 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47 0x0000002F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47_R1 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48 0x00000030 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48_R0 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49 0x00000031 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49_R1 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50 0x00000032 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50_R0 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51 0x00000033 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51_R1 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_REORDER_BANK_WIDTH_SIZE_MAX 13:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_REORDER_BANK_WIDTH_SIZE_MAX 13:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_REORDER_BANK_WIDTH_SIZE_MAX 13:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_REORDER_BANK_WIDTH_SIZE_MAX 13:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV947D_DMA 0x00000000 +#define NV947D_DMA_OPCODE 31:29 +#define NV947D_DMA_OPCODE_METHOD 0x00000000 +#define NV947D_DMA_OPCODE_JUMP 0x00000001 +#define NV947D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV947D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV947D_DMA_METHOD_COUNT 27:18 +#define NV947D_DMA_METHOD_OFFSET 11:2 +#define NV947D_DMA_DATA 31:0 +#define NV947D_DMA_DATA_NOP 0x00000000 +#define NV947D_DMA_JUMP_OFFSET 11:2 +#define NV947D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV947D_PUT (0x00000000) +#define NV947D_PUT_PTR 11:2 +#define NV947D_GET (0x00000004) +#define NV947D_GET_PTR 11:2 +#define NV947D_UPDATE (0x00000080) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV947D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV947D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV947D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV947D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV947D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV947D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV947D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV947D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV947D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV947D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV947D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV947D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV947D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV947D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV947D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV947D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV947D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV947D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV947D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV947D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV947D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV947D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV947D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV947D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV947D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV947D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV947D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV947D_GET_CAPABILITIES (0x0000008C) +#define NV947D_GET_CAPABILITIES_DUMMY 31:0 +#define NV947D_SET_SPARE (0x0000016C) +#define NV947D_SET_SPARE_UNUSED 31:0 +#define NV947D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV947D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV947D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV947D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV947D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV947D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV947D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV947D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV947D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV947D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV947D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV947D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV947D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV947D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV947D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV947D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV947D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV947D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV947D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV947D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV947D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV947D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV947D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV947D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV947D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV947D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV947D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV947D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV947D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV947D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV947D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV947D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV947D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV947D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV947D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV947D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV947D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV947D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV947D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV947D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV947D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV947D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV947D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV947D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV947D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV947D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV947D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 20:13 +#define NV947D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV947D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV947D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV947D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV947D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV947D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV947D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV947D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV947D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV947D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV947D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV947D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV947D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV947D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV947D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV947D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV947D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV947D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV947D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV947D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV947D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV947D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV947D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV947D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV947D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV947D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV947D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV947D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV947D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV947D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV947D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV947D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV947D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV947D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV947D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV947D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV947D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV947D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV947D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV947D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV947D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV947D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV947D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV947D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV947D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV947D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV947D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV947D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV947D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV947D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV947D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV947D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV947D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV947D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV947D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV947D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV947D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV947D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV947D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV947D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV947D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV947D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV947D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV947D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV947D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV947D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV947D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV947D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV947D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV947D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV947D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV947D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV947D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV947D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV947D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV947D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV947D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV947D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV947D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV947D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV947D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV947D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV947D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV947D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV947D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV947D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV947D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV947D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV947D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV947D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV947D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV947D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV947D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV947D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV947D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV947D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV947D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV947D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV947D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV947D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV947D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV947D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV947D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV947D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV947D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV947D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV947D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV947D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV947D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV947D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV947D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV947D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV947D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV947D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV947D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV947D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV947D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV947D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV947D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV947D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV947D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV947D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV947D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV947D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV947D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV947D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV947D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV947D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV947D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV947D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV947D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV947D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00000528 + (a)*0x00000300) +#define NV947D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NV947D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV947D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV947D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV947D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV947D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV947D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV947D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION(a) (0x00000560 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_ENABLE 0:0 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_CHUNK_BANDWIDTH 12:1 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LAST_BANDWIDTH 24:13 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA(a) (0x00000564 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY1 7:4 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY2 11:8 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY3 15:12 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA_CHUNK_SIZE 23:16 +#define NV947D_HEAD_SET_STALL_LOCK(a) (0x00000568 + (a)*0x00000300) +#define NV947D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NV947D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NV947D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NV947D_HEAD_SET_STALL_LOCK_MODE 1:1 +#define NV947D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NV947D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN 6:2 +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 7:7 +#define NV947D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NV947D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV947D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV947D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV947D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV947D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl947d_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9570.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9570.h new file mode 100644 index 0000000..8f88763 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9570.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl9570.finn +// + +#define NV9570_DISPLAY (0x00009570) + +#define NV9570_ALLOCATION_PARAMETERS_MESSAGE_ID (0x9570U) + +typedef struct NV9570_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9570_ALLOCATION_PARAMETERS; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9571.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9571.h new file mode 100644 index 0000000..be40b1f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9571.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9571_h_ +#define _cl9571_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV9571_DISP_SF_USER 0x9571 + +typedef volatile struct _cl9571_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x00690FFF:0x00690000 */ +} _Nv9571DispSfUser, Nv9571DispSfUserMap; + +#define NV9571_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NV9571_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NV9571_SF_HDMI_INFO_IDX_GCP 0x00000003 /* */ +#define NV9571_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NV9571_SF_HDMI_INFO_CTRL(i,j) (0x00690000-0x00690000+(i)*1024+(j)*64) /* RWX4A */ +#define NV9571_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_STATUS(i,j) (0x00690004-0x00690000+(i)*1024+(j)*64) /* R--4A */ +#define NV9571_SF_HDMI_INFO_STATUS__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_INFO_STATUS__SIZE_2 5 /* */ +#define NV9571_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NV9571_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9571_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9571_SF_HDMI_INFO_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x00690000-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x00690008-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x0069000C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x00690010-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x00690014-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x00690018-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_HEADER(i) (0x00690048-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_HEADER__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW(i) (0x0069004C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH(i) (0x00690050-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW(i) (0x00690054-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH(i) (0x00690058-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW(i) (0x0069005C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH(i) (0x00690060-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW(i) (0x00690064-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH(i) (0x00690068-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GCP_SUBPACK(i) (0x006900CC-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GCP_SUBPACK__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_HEADER(i) (0x00690108-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_HEADER__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_HEADER_HB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_HEADER_HB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_HEADER_HB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW(i) (0x0069010C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH(i) (0x00690110-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW(i) (0x00690114-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH(i) (0x00690118-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW(i) (0x0069011C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH(i) (0x00690120-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW(i) (0x00690124-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH(i) (0x00690128-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl9571_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl957d.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl957d.h new file mode 100644 index 0000000..ea10694 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl957d.h @@ -0,0 +1,1602 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl957d_h_ +#define _cl957d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV957D_CORE_CHANNEL_DMA (0x0000957D) + +#define NV957D_CORE_NOTIFIER_3 0x00000000 +#define NV957D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV957D_CORE_NOTIFIER_3__1 0x00000001 +#define NV957D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV957D_CORE_NOTIFIER_3__2 0x00000002 +#define NV957D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV957D_CORE_NOTIFIER_3__3 0x00000003 +#define NV957D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44 0x0000002C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44_R0 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45 0x0000002D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45_R1 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46 0x0000002E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46_R0 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47 0x0000002F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47_R1 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48 0x00000030 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48_R0 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49 0x00000031 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49_R1 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50 0x00000032 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50_R0 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51 0x00000033 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51_R1 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV957D_DMA 0x00000000 +#define NV957D_DMA_OPCODE 31:29 +#define NV957D_DMA_OPCODE_METHOD 0x00000000 +#define NV957D_DMA_OPCODE_JUMP 0x00000001 +#define NV957D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV957D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV957D_DMA_METHOD_COUNT 27:18 +#define NV957D_DMA_METHOD_OFFSET 11:2 +#define NV957D_DMA_DATA 31:0 +#define NV957D_DMA_DATA_NOP 0x00000000 +#define NV957D_DMA_JUMP_OFFSET 11:2 +#define NV957D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV957D_PUT (0x00000000) +#define NV957D_PUT_PTR 11:2 +#define NV957D_GET (0x00000004) +#define NV957D_GET_PTR 11:2 +#define NV957D_UPDATE (0x00000080) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV957D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV957D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV957D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV957D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV957D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV957D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV957D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV957D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV957D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV957D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV957D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV957D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV957D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV957D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV957D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV957D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV957D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV957D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV957D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV957D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV957D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV957D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV957D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV957D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV957D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV957D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV957D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV957D_GET_CAPABILITIES (0x0000008C) +#define NV957D_GET_CAPABILITIES_DUMMY 31:0 +#define NV957D_SET_SPARE (0x0000016C) +#define NV957D_SET_SPARE_UNUSED 31:0 +#define NV957D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV957D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV957D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV957D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV957D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV957D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV957D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV957D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV957D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV957D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV957D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV957D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV957D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV957D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV957D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV957D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV957D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV957D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV957D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV957D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV957D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV957D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV957D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV957D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV957D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV957D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV957D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV957D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV957D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV957D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV957D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV957D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV957D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV957D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV957D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV957D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV957D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV957D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV957D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV957D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV957D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV957D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV957D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV957D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV957D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV957D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV957D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 20:13 +#define NV957D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV957D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV957D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV957D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV957D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV957D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV957D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV957D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV957D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV957D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV957D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV957D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV957D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV957D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV957D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV957D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV957D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV957D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV957D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV957D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV957D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV957D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV957D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV957D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV957D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV957D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV957D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV957D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV957D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV957D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV957D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV957D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV957D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV957D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV957D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV957D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV957D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV957D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV957D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV957D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV957D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV957D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV957D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV957D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV957D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV957D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV957D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV957D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV957D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV957D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV957D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV957D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV957D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV957D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV957D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV957D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV957D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV957D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV957D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV957D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV957D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV957D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV957D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV957D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV957D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV957D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV957D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV957D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV957D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV957D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV957D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV957D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV957D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV957D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV957D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV957D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV957D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV957D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV957D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV957D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV957D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV957D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV957D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV957D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV957D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV957D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV957D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV957D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV957D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV957D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV957D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV957D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV957D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV957D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV957D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV957D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV957D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV957D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV957D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV957D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV957D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV957D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV957D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV957D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV957D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV957D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV957D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV957D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV957D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV957D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV957D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV957D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV957D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV957D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV957D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV957D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV957D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV957D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV957D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV957D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV957D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV957D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV957D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV957D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV957D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV957D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV957D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV957D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV957D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV957D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV957D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV957D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00000528 + (a)*0x00000300) +#define NV957D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NV957D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV957D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV957D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV957D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV957D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV957D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV957D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION(a) (0x00000560 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_ENABLE 0:0 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_CHUNK_BANDWIDTH 12:1 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LAST_BANDWIDTH 24:13 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA(a) (0x00000564 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY1 7:4 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY2 11:8 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY3 15:12 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA_CHUNK_SIZE 23:16 +#define NV957D_HEAD_SET_STALL_LOCK(a) (0x00000568 + (a)*0x00000300) +#define NV957D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NV957D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NV957D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NV957D_HEAD_SET_STALL_LOCK_MODE 1:1 +#define NV957D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NV957D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN 6:2 +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 7:7 +#define NV957D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NV957D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV957D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV957D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV957D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV957D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl957d_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9770.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9770.h new file mode 100644 index 0000000..87dccee --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9770.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl9770.finn +// + +#define NV9770_DISPLAY (0x00009770) + +#define NV9770_ALLOCATION_PARAMETERS_MESSAGE_ID (0x9770U) + +typedef struct NV9770_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9770_ALLOCATION_PARAMETERS; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl977d.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl977d.h new file mode 100644 index 0000000..adf59a2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl977d.h @@ -0,0 +1,1587 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl977d_h_ +#define _cl977d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV977D_CORE_CHANNEL_DMA (0x0000977D) + +#define NV977D_CORE_NOTIFIER_3 0x00000000 +#define NV977D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV977D_CORE_NOTIFIER_3__1 0x00000001 +#define NV977D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV977D_CORE_NOTIFIER_3__2 0x00000002 +#define NV977D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV977D_CORE_NOTIFIER_3__3 0x00000003 +#define NV977D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV977D_DMA 0x00000000 +#define NV977D_DMA_OPCODE 31:29 +#define NV977D_DMA_OPCODE_METHOD 0x00000000 +#define NV977D_DMA_OPCODE_JUMP 0x00000001 +#define NV977D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV977D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV977D_DMA_METHOD_COUNT 27:18 +#define NV977D_DMA_METHOD_OFFSET 11:2 +#define NV977D_DMA_DATA 31:0 +#define NV977D_DMA_DATA_NOP 0x00000000 +#define NV977D_DMA_JUMP_OFFSET 11:2 +#define NV977D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV977D_PUT (0x00000000) +#define NV977D_PUT_PTR 11:2 +#define NV977D_GET (0x00000004) +#define NV977D_GET_PTR 11:2 +#define NV977D_UPDATE (0x00000080) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV977D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV977D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV977D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV977D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV977D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV977D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV977D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV977D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV977D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV977D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV977D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV977D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV977D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV977D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV977D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV977D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV977D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV977D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV977D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV977D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV977D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV977D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV977D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV977D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV977D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV977D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV977D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV977D_GET_CAPABILITIES (0x0000008C) +#define NV977D_GET_CAPABILITIES_DUMMY 31:0 +#define NV977D_SET_SPARE (0x0000016C) +#define NV977D_SET_SPARE_UNUSED 31:0 +#define NV977D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV977D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV977D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV977D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV977D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV977D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV977D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV977D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV977D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV977D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV977D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV977D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV977D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV977D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV977D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV977D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV977D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV977D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV977D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV977D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV977D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV977D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV977D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV977D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV977D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV977D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV977D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV977D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV977D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV977D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV977D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV977D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV977D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV977D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV977D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV977D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV977D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV977D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV977D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV977D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV977D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV977D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV977D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV977D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV977D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV977D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV977D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 24:13 +#define NV977D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV977D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV977D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV977D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV977D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV977D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV977D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV977D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV977D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV977D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV977D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV977D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV977D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV977D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV977D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV977D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV977D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV977D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV977D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV977D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV977D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV977D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV977D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV977D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV977D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV977D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV977D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV977D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV977D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV977D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV977D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV977D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV977D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV977D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV977D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV977D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV977D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV977D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV977D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV977D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV977D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV977D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV977D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV977D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV977D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV977D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV977D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV977D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV977D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV977D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV977D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV977D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV977D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV977D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV977D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV977D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV977D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV977D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV977D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV977D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV977D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV977D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV977D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV977D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV977D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV977D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV977D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV977D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV977D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV977D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV977D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV977D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV977D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV977D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV977D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV977D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV977D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV977D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV977D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV977D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV977D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV977D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV977D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NV977D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV977D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV977D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV977D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV977D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV977D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV977D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV977D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV977D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV977D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV977D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV977D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV977D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV977D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV977D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV977D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV977D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV977D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV977D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV977D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV977D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV977D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV977D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV977D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV977D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV977D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV977D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV977D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV977D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV977D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV977D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV977D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV977D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV977D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV977D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV977D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV977D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV977D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV977D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV977D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV977D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV977D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV977D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV977D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV977D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00000528 + (a)*0x00000300) +#define NV977D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NV977D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV977D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV977D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV977D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV977D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV977D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV977D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION(a) (0x00000560 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_ENABLE 0:0 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_CHUNK_BANDWIDTH 12:1 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LAST_BANDWIDTH 24:13 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA(a) (0x00000564 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY1 7:4 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY2 11:8 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY3 15:12 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA_CHUNK_SIZE 23:16 +#define NV977D_HEAD_SET_STALL_LOCK(a) (0x00000568 + (a)*0x00000300) +#define NV977D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NV977D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NV977D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NV977D_HEAD_SET_STALL_LOCK_MODE 1:1 +#define NV977D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NV977D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN 6:2 +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 7:7 +#define NV977D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NV977D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV977D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV977D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV977D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV977D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl977d_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9870.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9870.h new file mode 100644 index 0000000..2307a6b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9870.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: class/cl9870.finn +// + +#define NV9870_DISPLAY (0x00009870) + +#define NV9870_ALLOCATION_PARAMETERS_MESSAGE_ID (0x9870U) + +typedef struct NV9870_ALLOCATION_PARAMETERS { + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9870_ALLOCATION_PARAMETERS; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl987d.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl987d.h new file mode 100644 index 0000000..ab01f62 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl987d.h @@ -0,0 +1,1590 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl987d_h_ +#define _cl987d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV987D_CORE_CHANNEL_DMA (0x0000987D) + +#define NV987D_CORE_NOTIFIER_3 0x00000000 +#define NV987D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV987D_CORE_NOTIFIER_3__1 0x00000001 +#define NV987D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV987D_CORE_NOTIFIER_3__2 0x00000002 +#define NV987D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV987D_CORE_NOTIFIER_3__3 0x00000003 +#define NV987D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV987D_DMA 0x00000000 +#define NV987D_DMA_OPCODE 31:29 +#define NV987D_DMA_OPCODE_METHOD 0x00000000 +#define NV987D_DMA_OPCODE_JUMP 0x00000001 +#define NV987D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV987D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV987D_DMA_METHOD_COUNT 27:18 +#define NV987D_DMA_METHOD_OFFSET 11:2 +#define NV987D_DMA_DATA 31:0 +#define NV987D_DMA_DATA_NOP 0x00000000 +#define NV987D_DMA_JUMP_OFFSET 11:2 +#define NV987D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV987D_PUT (0x00000000) +#define NV987D_PUT_PTR 11:2 +#define NV987D_GET (0x00000004) +#define NV987D_GET_PTR 11:2 +#define NV987D_UPDATE (0x00000080) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV987D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV987D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV987D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV987D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV987D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV987D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV987D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV987D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV987D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV987D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV987D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV987D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV987D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV987D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV987D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV987D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV987D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV987D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV987D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV987D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV987D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV987D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV987D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV987D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV987D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV987D_SET_NOTIFIER_CONTROL_NO_WAIT_ACTIVE 0:0 +#define NV987D_SET_NOTIFIER_CONTROL_NO_WAIT_ACTIVE_FALSE (0x00000000) +#define NV987D_SET_NOTIFIER_CONTROL_NO_WAIT_ACTIVE_TRUE (0x00000001) +#define NV987D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV987D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV987D_GET_CAPABILITIES (0x0000008C) +#define NV987D_GET_CAPABILITIES_DUMMY 31:0 +#define NV987D_SET_SPARE (0x0000016C) +#define NV987D_SET_SPARE_UNUSED 31:0 +#define NV987D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV987D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV987D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV987D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV987D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV987D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV987D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV987D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV987D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV987D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV987D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV987D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV987D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV987D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV987D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV987D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV987D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV987D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV987D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV987D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV987D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV987D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV987D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV987D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV987D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV987D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV987D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV987D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV987D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV987D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV987D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV987D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV987D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV987D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV987D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV987D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV987D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV987D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV987D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV987D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV987D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV987D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV987D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV987D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV987D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV987D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV987D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 24:13 +#define NV987D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV987D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV987D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV987D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV987D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV987D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV987D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV987D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV987D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV987D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV987D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV987D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV987D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV987D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV987D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV987D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV987D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV987D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV987D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV987D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV987D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV987D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV987D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV987D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV987D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV987D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV987D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV987D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV987D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV987D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV987D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV987D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV987D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV987D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV987D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV987D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV987D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV987D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV987D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV987D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV987D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV987D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV987D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV987D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV987D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV987D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV987D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV987D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV987D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV987D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV987D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV987D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV987D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV987D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV987D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV987D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV987D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV987D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV987D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV987D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV987D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV987D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV987D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV987D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV987D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV987D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV987D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV987D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV987D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV987D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV987D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV987D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV987D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV987D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV987D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV987D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV987D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV987D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV987D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV987D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV987D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV987D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV987D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NV987D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV987D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV987D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV987D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV987D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV987D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV987D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV987D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV987D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV987D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV987D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV987D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV987D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV987D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV987D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV987D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV987D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV987D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV987D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV987D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV987D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV987D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV987D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV987D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV987D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV987D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV987D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV987D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV987D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV987D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV987D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV987D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV987D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV987D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV987D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV987D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV987D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV987D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV987D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV987D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV987D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV987D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV987D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV987D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV987D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00000528 + (a)*0x00000300) +#define NV987D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NV987D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV987D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV987D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV987D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV987D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV987D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV987D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION(a) (0x00000560 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_ENABLE 0:0 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_CHUNK_BANDWIDTH 12:1 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LAST_BANDWIDTH 24:13 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA(a) (0x00000564 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY1 7:4 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY2 11:8 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY3 15:12 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA_CHUNK_SIZE 23:16 +#define NV987D_HEAD_SET_STALL_LOCK(a) (0x00000568 + (a)*0x00000300) +#define NV987D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NV987D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NV987D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NV987D_HEAD_SET_STALL_LOCK_MODE 1:1 +#define NV987D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NV987D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN 6:2 +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 7:7 +#define NV987D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NV987D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV987D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV987D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV987D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV987D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl987d_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clb0b5sw.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clb0b5sw.h new file mode 100644 index 0000000..f0e0d31 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clb0b5sw.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _clb0b5sw_h_ +#define _clb0b5sw_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* This file is *not* auto-generated. */ + +// +// Using VERSION_0 will cause the API to interpret +// engineType as a CE engine instance. This allows +// for backward compatibility with 85B5sw and 90B5sw. +// +#define NVB0B5_ALLOCATION_PARAMETERS_VERSION_0 0 + +// +// Using VERSION_1 will cause the API to interpret +// engineType as an NV2080_ENGINE_TYPE ordinal. +// +#define NVB0B5_ALLOCATION_PARAMETERS_VERSION_1 1 + +typedef struct +{ + NvU32 version; + NvU32 engineType; +} NVB0B5_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clb0b5sw_h_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc370.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc370.h new file mode 100644 index 0000000..075f081 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc370.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc370_h_ +#define _clc370_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#include "class/cl5070.h" + +#define NVC370_DISPLAY (0x0000C370) + +/* event values */ +#define NVC370_NOTIFIERS_SW NV5070_NOTIFIERS_SW +#define NVC370_NOTIFIERS_BEGIN NV5070_NOTIFIERS_MAXCOUNT +#define NVC370_NOTIFIERS_VPR NVC370_NOTIFIERS_BEGIN + (0) +#define NVC370_NOTIFIERS_RG_SEM_NOTIFICATION NVC370_NOTIFIERS_VPR + (1) +#define NVC370_NOTIFIERS_WIN_SEM_NOTIFICATION NVC370_NOTIFIERS_RG_SEM_NOTIFICATION + (1) +#define NVC370_NOTIFIERS_MAXCOUNT NVC370_NOTIFIERS_WIN_SEM_NOTIFICATION + (1) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NVC370_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc370_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc371.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc371.h new file mode 100644 index 0000000..a8398cc --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc371.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc371_h_ +#define _clc371_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC371_DISP_SF_USER (0x000C371) + +typedef volatile struct _clc371_tag0 { + NvU32 dispSfUserOffset[0x400]; +} _NvC371DispSfUser, NvC371DispSfUserMap; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _clc371_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc372sw.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc372sw.h new file mode 100644 index 0000000..552ea08 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc372sw.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc372sw_h_ +#define _clc372sw_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC372_DISPLAY_SW (0x0000C372) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc372sw_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc373.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc373.h new file mode 100644 index 0000000..c707022 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc373.h @@ -0,0 +1,350 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc373_h_ +#define _clc373_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC373_DISP_CAPABILITIES 0xC373 + +typedef volatile struct _clc373_tag0 { + NvU32 dispCapabilities[0x400]; +} _NvC373DispCapabilities,NvC373DispCapabilities_Map ; + + +#define NVC373_SYS_CAP 0x0 /* RW-4R */ +#define NVC373_SYS_CAP_HEAD0_EXISTS 0:0 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD1_EXISTS 1:1 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD2_EXISTS 2:2 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD3_EXISTS 3:3 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD4_EXISTS 4:4 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD5_EXISTS 5:5 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD6_EXISTS 6:6 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD7_EXISTS 7:7 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC373_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NVC373_SYS_CAP_HEAD_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR0_EXISTS 8:8 /* RWIVF */ +#define NVC373_SYS_CAP_SOR0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR1_EXISTS 9:9 /* RWIVF */ +#define NVC373_SYS_CAP_SOR1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR2_EXISTS 10:10 /* RWIVF */ +#define NVC373_SYS_CAP_SOR2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR3_EXISTS 11:11 /* RWIVF */ +#define NVC373_SYS_CAP_SOR3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR4_EXISTS 12:12 /* RWIVF */ +#define NVC373_SYS_CAP_SOR4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR5_EXISTS 13:13 /* RWIVF */ +#define NVC373_SYS_CAP_SOR5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR6_EXISTS 14:14 /* RWIVF */ +#define NVC373_SYS_CAP_SOR6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR7_EXISTS 15:15 /* RWIVF */ +#define NVC373_SYS_CAP_SOR7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR_EXISTS(i) (8+(i)):(8+(i)) /* RWIVF */ +#define NVC373_SYS_CAP_SOR_EXISTS__SIZE_1 8 /* */ +#define NVC373_SYS_CAP_SOR_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB 0x4 /* RW-4R */ +#define NVC373_SYS_CAPB_WINDOW0_EXISTS 0:0 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW1_EXISTS 1:1 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW2_EXISTS 2:2 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW3_EXISTS 3:3 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW4_EXISTS 4:4 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW5_EXISTS 5:5 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW6_EXISTS 6:6 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW7_EXISTS 7:7 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW8_EXISTS 8:8 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW8_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW8_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW8_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW9_EXISTS 9:9 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW9_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW9_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW9_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW10_EXISTS 10:10 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW10_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW10_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW10_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW11_EXISTS 11:11 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW11_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW11_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW11_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW12_EXISTS 12:12 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW12_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW12_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW12_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW13_EXISTS 13:13 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW13_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW13_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW13_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW14_EXISTS 14:14 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW14_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW14_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW14_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW15_EXISTS 15:15 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW15_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW15_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW15_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW16_EXISTS 16:16 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW16_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW16_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW16_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW17_EXISTS 17:17 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW17_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW17_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW17_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW18_EXISTS 18:18 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW18_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW18_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW18_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW19_EXISTS 19:19 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW19_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW19_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW19_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW20_EXISTS 20:20 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW20_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW20_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW20_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW21_EXISTS 21:21 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW21_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW21_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW21_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW22_EXISTS 22:22 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW22_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW22_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW22_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW23_EXISTS 23:23 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW23_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW23_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW23_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW24_EXISTS 24:24 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW24_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW24_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW24_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW25_EXISTS 25:25 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW25_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW25_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW25_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW26_EXISTS 26:26 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW26_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW26_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW26_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW27_EXISTS 27:27 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW27_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW27_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW27_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW28_EXISTS 28:28 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW28_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW28_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW28_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW29_EXISTS 29:29 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW29_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW29_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW29_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW30_EXISTS 30:30 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW30_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW30_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW30_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW31_EXISTS 31:31 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW31_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW31_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW31_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS__SIZE_1 32 /* */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA(i) (0x30+(i)*32) /* RW-4A */ +#define NVC373_HEAD_CAPA__SIZE_1 8 /* */ +#define NVC373_HEAD_CAPA_SCALER 0:0 /* RWIVF */ +#define NVC373_HEAD_CAPA_SCALER_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_SCALER_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_SCALER_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_SCALER_HAS_YUV422 1:1 /* RWIVF */ +#define NVC373_HEAD_CAPA_SCALER_HAS_YUV422_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_SCALER_HAS_YUV422_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_SCALER_HAS_YUV422_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_HSAT 2:2 /* RWIVF */ +#define NVC373_HEAD_CAPA_HSAT_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_HSAT_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_HSAT_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_OCSC 3:3 /* RWIVF */ +#define NVC373_HEAD_CAPA_OCSC_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_OCSC_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_OCSC_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_YUV422 4:4 /* RWIVF */ +#define NVC373_HEAD_CAPA_YUV422_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_YUV422_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_YUV422_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_LUT_TYPE 6:5 /* RWIVF */ +#define NVC373_HEAD_CAPA_LUT_TYPE_NONE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_TYPE_257 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_TYPE_1025 0x00000002 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_TYPE_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_LUT_LOCATION 7:7 /* RWIVF */ +#define NVC373_HEAD_CAPA_LUT_LOCATION_EARLY 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_LOCATION_LATE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_LOCATION_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPD(i) (0x3c+(i)*32) /* RW-4A */ +#define NVC373_HEAD_CAPD__SIZE_1 8 /* */ +#define NVC373_HEAD_CAPD_MAX_PIXELS_2TAP422 15:0 /* RWIUF */ +#define NVC373_HEAD_CAPD_MAX_PIXELS_2TAP422_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPD_MAX_PIXELS_2TAP444 31:16 /* RWIUF */ +#define NVC373_HEAD_CAPD_MAX_PIXELS_2TAP444_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP(i) (0x144+(i)*8) /* RW-4A */ +#define NVC373_SOR_CAP__SIZE_1 8 /* */ +#define NVC373_SOR_CAP_SINGLE_LVDS_18 0:0 /* RWIVF */ +#define NVC373_SOR_CAP_SINGLE_LVDS_18_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_24 1:1 /* RWIVF */ +#define NVC373_SOR_CAP_SINGLE_LVDS_24_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_LVDS_18 2:2 /* RWIVF */ +#define NVC373_SOR_CAP_DUAL_LVDS_18_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DUAL_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_LVDS_24 3:3 /* RWIVF */ +#define NVC373_SOR_CAP_DUAL_LVDS_24_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DUAL_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_A 8:8 /* RWIVF */ +#define NVC373_SOR_CAP_SINGLE_TMDS_A_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_A_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_A_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_B 9:9 /* RWIVF */ +#define NVC373_SOR_CAP_SINGLE_TMDS_B_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_B_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_B_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_TMDS 11:11 /* RWIVF */ +#define NVC373_SOR_CAP_DUAL_TMDS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DUAL_TMDS_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_TMDS_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DISPLAY_OVER_PCIE 13:13 /* RWIVF */ +#define NVC373_SOR_CAP_DISPLAY_OVER_PCIE_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DISPLAY_OVER_PCIE_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DISPLAY_OVER_PCIE_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_SDI 16:16 /* RWIVF */ +#define NVC373_SOR_CAP_SDI_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SDI_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SDI_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DP_A 24:24 /* RWIVF */ +#define NVC373_SOR_CAP_DP_A_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DP_A_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DP_A_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DP_B 25:25 /* RWIVF */ +#define NVC373_SOR_CAP_DP_B_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DP_B_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DP_B_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DP_INTERLACE 26:26 /* RWIVF */ +#define NVC373_SOR_CAP_DP_INTERLACE_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DP_INTERLACE_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DP_INTERLACE_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DP_8_LANES 27:27 /* RWIVF */ +#define NVC373_SOR_CAP_DP_8_LANES_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DP_8_LANES_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DP_8_LANES_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CLK_CAP(i) (0x608+(i)*4) /* RW-4A */ +#define NVC373_SOR_CLK_CAP__SIZE_1 8 /* */ +#define NVC373_SOR_CLK_CAP_DP_MAX 7:0 /* RWIUF */ +#define NVC373_SOR_CLK_CAP_DP_MAX_INIT 0x00000036 /* RWI-V */ +#define NVC373_SOR_CLK_CAP_TMDS_MAX 23:16 /* RWIUF */ +#define NVC373_SOR_CLK_CAP_TMDS_MAX_INIT 0x0000003C /* RWI-V */ +#define NVC373_SOR_CLK_CAP_LVDS_MAX 31:24 /* RWIUF */ +#define NVC373_SOR_CLK_CAP_LVDS_MAX_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; +#endif /* extern C */ +#endif //_clc373_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37a.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37a.h new file mode 100644 index 0000000..4a096b6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37a.h @@ -0,0 +1,213 @@ +/* + * Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clc37a__h_ +#define _clc37a__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC37A_CURSOR_IMM_CHANNEL_PIO (0x0000C37A) + +typedef volatile struct _clc37a_tag0 { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x7D]; + NvV32 Update; // 0x00000200 - 0x00000203 + NvV32 SetInterlockFlags; // 0x00000204 - 0x00000207 + NvV32 SetCursorHotSpotPointOut[2]; // 0x00000208 - 0x0000020F + NvV32 SetWindowInterlockFlags; // 0x00000210 - 0x00000213 + NvV32 Reserved02[0x37B]; +} NVC37ADispCursorImmControlPio; + +#define NVC37A_FREE (0x00000008) +#define NVC37A_FREE_COUNT 5:0 +#define NVC37A_UPDATE (0x00000200) +#define NVC37A_UPDATE_RELEASE_ELV 0:0 +#define NVC37A_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC37A_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC37A_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37A_SET_INTERLOCK_FLAGS (0x00000204) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT_X 15:0 +#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y 31:16 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS (0x00000210) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc37a_h + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37b.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37b.h new file mode 100644 index 0000000..b61700e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37b.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clC37b_h_ +#define _clC37b_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC37B_WINDOW_IMM_CHANNEL_DMA (0x0000C37B) + +// dma opcode instructions +#define NVC37B_DMA +#define NVC37B_DMA_OPCODE 31:29 +#define NVC37B_DMA_OPCODE_METHOD 0x00000000 +#define NVC37B_DMA_OPCODE_JUMP 0x00000001 +#define NVC37B_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC37B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC37B_DMA_METHOD_COUNT 27:18 +#define NVC37B_DMA_METHOD_OFFSET 13:2 +#define NVC37B_DMA_DATA 31:0 +#define NVC37B_DMA_DATA_NOP 0x00000000 +#define NVC37B_DMA_JUMP_OFFSET 11:2 +#define NVC37B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC37B_PUT (0x00000000) +#define NVC37B_PUT_PTR 9:0 +#define NVC37B_GET (0x00000004) +#define NVC37B_GET_PTR 9:0 +#define NVC37B_UPDATE (0x00000200) +#define NVC37B_UPDATE_RELEASE_ELV 0:0 +#define NVC37B_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC37B_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW 1:1 +#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC37B_SET_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC37B_SET_POINT_OUT_X 15:0 +#define NVC37B_SET_POINT_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC37b_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37d.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37d.h new file mode 100644 index 0000000..9ac7050 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37d.h @@ -0,0 +1,953 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clC37d_h_ +#define _clC37d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC37D_CORE_CHANNEL_DMA (0x0000C37D) + +#define NV_DISP_NOTIFIER 0x00000000 +#define NV_DISP_NOTIFIER_SIZEOF 0x00000010 +#define NV_DISP_NOTIFIER__0 0x00000000 +#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFIER__0_FIELD 8:8 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001 +#define NV_DISP_NOTIFIER__0_R1 15:10 +#define NV_DISP_NOTIFIER__0_R2 23:16 +#define NV_DISP_NOTIFIER__0_R3 29:24 +#define NV_DISP_NOTIFIER__0_STATUS 31:30 +#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002 +#define NV_DISP_NOTIFIER__1 0x00000001 +#define NV_DISP_NOTIFIER__1_R4 31:0 +#define NV_DISP_NOTIFIER__2 0x00000002 +#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0 +#define NV_DISP_NOTIFIER__3 0x00000003 +#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0 + + +// dma opcode instructions +#define NVC37D_DMA +#define NVC37D_DMA_OPCODE 31:29 +#define NVC37D_DMA_OPCODE_METHOD 0x00000000 +#define NVC37D_DMA_OPCODE_JUMP 0x00000001 +#define NVC37D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC37D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC37D_DMA_METHOD_COUNT 27:18 +#define NVC37D_DMA_METHOD_OFFSET 13:2 +#define NVC37D_DMA_DATA 31:0 +#define NVC37D_DMA_DATA_NOP 0x00000000 +#define NVC37D_DMA_JUMP_OFFSET 11:2 +#define NVC37D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// if cap SUPPORT_FLEXIBLE_WIN_MAPPING is FALSE, this define can be used to obtain which head a window is mapped to +#define NVC37D_WINDOW_MAPPED_TO_HEAD(w) ((w)>>1) +#define NVC37D_GET_VALID_WINDOWMASK_FOR_HEAD(h) ((1<<((h)*2)) | (1<<((h)*2+1))) + +// class methods +#define NVC37D_PUT (0x00000000) +#define NVC37D_PUT_PTR 9:0 +#define NVC37D_GET (0x00000004) +#define NVC37D_GET_PTR 9:0 +#define NVC37D_UPDATE (0x00000200) +#define NVC37D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVC37D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVC37D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVC37D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVC37D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVC37D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVC37D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVC37D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVC37D_UPDATE_RELEASE_ELV 0:0 +#define NVC37D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC37D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC37D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_SET_CONTEXT_DMA_NOTIFIER (0x00000208) +#define NVC37D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC37D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVC37D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC37D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC37D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC37D_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL (0x00000210) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN(i) ((i)+0):((i)+0) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN__SIZE_1 4 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN0 0:0 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN0_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN0_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN1 1:1 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN1_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN1_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN2 2:2 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN2_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN2_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN3 3:3 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN3_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN3_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC37D_GET_RG_SCAN_LINE(b) (0x00000220 + (b)*0x00000004) +#define NVC37D_GET_RG_SCAN_LINE_LINE 15:0 +#define NVC37D_GET_RG_SCAN_LINE_VBLANK 16:16 +#define NVC37D_GET_RG_SCAN_LINE_VBLANK_FALSE (0x00000000) +#define NVC37D_GET_RG_SCAN_LINE_VBLANK_TRUE (0x00000001) +#define NVC37D_SET_GET_BLANKING_CTRL(b) (0x00000240 + (b)*0x00000004) +#define NVC37D_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NVC37D_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NVC37D_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NVC37D_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NVC37D_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NVC37D_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) + +#define NVC37D_PIOR_SET_CONTROL(a) (0x00000280 + (a)*0x00000020) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_EXT_SDI_SD_ENC (0x00000001) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_EXT_SDI_HD_ENC (0x00000002) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_DIST_RENDER_OUT (0x00000004) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_DIST_RENDER_IN (0x00000005) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_DIST_RENDER_INOUT (0x00000006) +#define NVC37D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC37D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_PIOR_SET_CUSTOM_REASON(a) (0x00000284 + (a)*0x00000020) +#define NVC37D_PIOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC37D_PIOR_SET_SW_SPARE_A(a) (0x00000288 + (a)*0x00000020) +#define NVC37D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC37D_PIOR_SET_SW_SPARE_B(a) (0x0000028C + (a)*0x00000020) +#define NVC37D_PIOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC37D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DSI (0x0000000A) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NVC37D_SOR_SET_CUSTOM_REASON(a) (0x00000304 + (a)*0x00000020) +#define NVC37D_SOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC37D_SOR_SET_SW_SPARE_A(a) (0x00000308 + (a)*0x00000020) +#define NVC37D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC37D_SOR_SET_SW_SPARE_B(a) (0x0000030C + (a)*0x00000020) +#define NVC37D_SOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC37D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVC37D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(a) (0x0000100C + (a)*0x00000080) +#define NVC37D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC37D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT 17:16 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_NONE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_257 (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_1025 (0x00000002) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) + +#define NVC37D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000400) +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_SAT_COS 15:4 +#define NVC37D_HEAD_SET_PROCAMP_SAT_SINE 27:16 +#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 29:29 +#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL 31:30 +#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_AUTO (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_VIDEO (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_GRAPHICS (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVC37D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVC37D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVC37D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 11:10 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 8:4 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 15:12 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 23:22 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 20:16 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NVC37D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000400) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC37D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00002010 + (a)*0x00000400) +#define NVC37D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00002014 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC37D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000400) +#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVC37D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000400) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NVC37D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000400 + (b)*0x00000004) +#define NVC37D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000400) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC37D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(a) (0x0000202C + (a)*0x00000400) +#define NVC37D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC37D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000400) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT 5:4 +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK(a) (0x00002034 + (a)*0x00000400) +#define NVC37D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NVC37D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK_MODE 2:2 +#define NVC37D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN 8:4 +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 12:12 +#define NVC37D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK_TEPOLARITY 14:14 +#define NVC37D_HEAD_SET_STALL_LOCK_TEPOLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_TEPOLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_LOCK_CHAIN(a) (0x00002044 + (a)*0x00000400) +#define NVC37D_HEAD_SET_LOCK_CHAIN_POSITION 3:0 +#define NVC37D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x00002048 + (a)*0x00000400) +#define NVC37D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NVC37D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000400) +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000400) +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVC37D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x0000205C + (a)*0x00000400) +#define NVC37D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NVC37D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NVC37D_HEAD_SET_DESKTOP_COLOR(a) (0x00002060 + (a)*0x00000400) +#define NVC37D_HEAD_SET_DESKTOP_COLOR_ALPHA 7:0 +#define NVC37D_HEAD_SET_DESKTOP_COLOR_RED 15:8 +#define NVC37D_HEAD_SET_DESKTOP_COLOR_GREEN 23:16 +#define NVC37D_HEAD_SET_DESKTOP_COLOR_BLUE 31:24 +#define NVC37D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000400) +#define NVC37D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NVC37D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NVC37D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000400) +#define NVC37D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVC37D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVC37D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000400) +#define NVC37D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVC37D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVC37D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400) +#define NVC37D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVC37D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC37D_HEAD_SET_OVERSCAN_COLOR(a) (0x00002078 + (a)*0x00000400) +#define NVC37D_HEAD_SET_OVERSCAN_COLOR_RED_CR 9:0 +#define NVC37D_HEAD_SET_OVERSCAN_COLOR_GREEN_Y 19:10 +#define NVC37D_HEAD_SET_OVERSCAN_COLOR_BLUE_CB 29:20 +#define NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(a) (0x0000207C + (a)*0x00000400) +#define NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_RED_CR 9:0 +#define NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_GREEN_Y 19:10 +#define NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_BLUE_CB 29:20 +#define NVC37D_HEAD_SET_HDMI_CTRL(a) (0x00002080 + (a)*0x00000400) +#define NVC37D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NVC37D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NVC37D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NVC37D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NVC37D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NVC37D_HEAD_SET_CONTEXT_DMA_CURSOR(a,b) (0x00002088 + (a)*0x00000400 + (b)*0x00000004) +#define NVC37D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 +#define NVC37D_HEAD_SET_OFFSET_CURSOR(a,b) (0x00002090 + (a)*0x00000400 + (b)*0x00000004) +#define NVC37D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x00002098 + (a)*0x00000400) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 0:0 +#define NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA 29:28 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_NONE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_SRGB (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_YUV8_10 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_YUV12 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT(a) (0x000020A4 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE 1:0 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_257 (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_1025 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE 5:4 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_UNITY (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_XRBIAS (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_XVYCC (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE 9:8 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INDEX (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INTERPOLATE (0x00000001) +#define NVC37D_HEAD_SET_OFFSET_OUTPUT_LUT(a) (0x000020A8 + (a)*0x00000400) +#define NVC37D_HEAD_SET_OFFSET_OUTPUT_LUT_ORIGIN 31:0 +#define NVC37D_HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(a) (0x000020AC + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTEXT_DMA_OUTPUT_LUT_HANDLE 31:0 +#define NVC37D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NVC37D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 4:0 +#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR(i) (0x00000060 +(i)) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR__SIZE_1 4 +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR0 (0x00000060) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR1 (0x00000061) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR2 (0x00000062) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR3 (0x00000063) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR(i) (0x00000060 +(i)) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR__SIZE_1 4 +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR0 (0x00000060) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR1 (0x00000061) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR2 (0x00000062) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR3 (0x00000063) +#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PRESENT_CONTROL(a) (0x0000218C + (a)*0x00000400) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 0:0 +#define NVC37D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NVC37D_HEAD_SET_SW_SPARE_A(a) (0x00002194 + (a)*0x00000400) +#define NVC37D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NVC37D_HEAD_SET_SW_SPARE_B(a) (0x00002198 + (a)*0x00000400) +#define NVC37D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NVC37D_HEAD_SET_SW_SPARE_C(a) (0x0000219C + (a)*0x00000400) +#define NVC37D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NVC37D_HEAD_SET_SW_SPARE_D(a) (0x000021A0 + (a)*0x00000400) +#define NVC37D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NVC37D_HEAD_SET_DISPLAY_RATE(a) (0x000021A8 + (a)*0x00000400) +#define NVC37D_HEAD_SET_DISPLAY_RATE_RUN_MODE 0:0 +#define NVC37D_HEAD_SET_DISPLAY_RATE_RUN_MODE_CONTINUOUS (0x00000000) +#define NVC37D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT (0x00000001) +#define NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL 25:4 +#define NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH 2:2 +#define NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_MIN_FRAME_IDLE(a) (0x00002218 + (a)*0x00000400) +#define NVC37D_HEAD_SET_MIN_FRAME_IDLE_LEADING_RASTER_LINES 14:0 +#define NVC37D_HEAD_SET_MIN_FRAME_IDLE_TRAILING_RASTER_LINES 30:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC37d_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h new file mode 100644 index 0000000..88b5b77 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2003-2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __clc37dcrcnotif_h__ +#define __clc37dcrcnotif_h__ +/* This file is autogenerated. Do not edit */ + +#define NVC37D_NOTIFIER_CRC_STATUS_0 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_DONE 0:0 +#define NVC37D_NOTIFIER_CRC_STATUS_0_DONE_FALSE 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_DONE_TRUE 0x00000001 +#define NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW 3:3 +#define NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW_FALSE 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW_TRUE 0x00000001 +#define NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW 4:4 +#define NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW_FALSE 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW_TRUE 0x00000001 +#define NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW 5:5 +#define NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW_FALSE 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW_TRUE 0x00000001 +#define NVC37D_NOTIFIER_CRC_STATUS_0_COUNT 27:16 +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11 0x0000000B +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11_COMPOSITOR_CRC 31:0 +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12 0x0000000C +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12_RG_CRC 31:0 +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13 0x0000000D +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13_PRIMARY_OUTPUT_CRC 31:0 +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY1_21 0x00000015 + +#endif // __clc37dcrcnotif_h__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dswspare.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dswspare.h new file mode 100644 index 0000000..44b21a8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dswspare.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc37d_sw_spare_h_ +#define _clc37d_sw_spare_h_ + +/* This file is *not* auto-generated. */ + +#define NVC37D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF 1:0 +#define NVC37D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_NO_PREF (0x00000000) +#define NVC37D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_QSYNC (0x00000001) + +#define NVC37D_HEAD_SET_SW_SPARE_A_DISABLE_MID_FRAME_AND_DWCF_WATERMARK 2:2 +#define NVC37D_HEAD_SET_SW_SPARE_A_DISABLE_MID_FRAME_AND_DWCF_WATERMARK_FALSE (0x00000000) +#define NVC37D_HEAD_SET_SW_SPARE_A_DISABLE_MID_FRAME_AND_DWCF_WATERMARK_TRUE (0x00000001) + +#endif // _clc37d_sw_spare_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37e.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37e.h new file mode 100644 index 0000000..f46929a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37e.h @@ -0,0 +1,498 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clC37e_h_ +#define _clC37e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC37E_WINDOW_CHANNEL_DMA (0x0000C37E) + +// dma opcode instructions +#define NVC37E_DMA +#define NVC37E_DMA_OPCODE 31:29 +#define NVC37E_DMA_OPCODE_METHOD 0x00000000 +#define NVC37E_DMA_OPCODE_JUMP 0x00000001 +#define NVC37E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC37E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC37E_DMA_METHOD_COUNT 27:18 +#define NVC37E_DMA_METHOD_OFFSET 13:2 +#define NVC37E_DMA_DATA 31:0 +#define NVC37E_DMA_DATA_NOP 0x00000000 +#define NVC37E_DMA_JUMP_OFFSET 11:2 +#define NVC37E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC37E_PUT (0x00000000) +#define NVC37E_PUT_PTR 9:0 +#define NVC37E_GET (0x00000004) +#define NVC37E_GET_PTR 9:0 +#define NVC37E_UPDATE (0x00000200) +#define NVC37E_UPDATE_RELEASE_ELV 0:0 +#define NVC37E_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC37E_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC37E_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM 12:12 +#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE (0x00000000) +#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE (0x00000001) +#define NVC37E_GET_LINE (0x00000208) +#define NVC37E_GET_LINE_LINE 15:0 +#define NVC37E_SET_SEMAPHORE_CONTROL (0x0000020C) +#define NVC37E_SET_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC37E_SET_SEMAPHORE_ACQUIRE (0x00000210) +#define NVC37E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NVC37E_SET_SEMAPHORE_RELEASE (0x00000214) +#define NVC37E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NVC37E_SET_CONTEXT_DMA_SEMAPHORE (0x00000218) +#define NVC37E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NVC37E_SET_CONTEXT_DMA_NOTIFIER (0x0000021C) +#define NVC37E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC37E_SET_NOTIFIER_CONTROL (0x00000220) +#define NVC37E_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC37E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC37E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC37E_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC37E_SET_SIZE (0x00000224) +#define NVC37E_SET_SIZE_WIDTH 15:0 +#define NVC37E_SET_SIZE_HEIGHT 31:16 +#define NVC37E_SET_STORAGE (0x00000228) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC37E_SET_STORAGE_MEMORY_LAYOUT 4:4 +#define NVC37E_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC37E_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC37E_SET_PARAMS (0x0000022C) +#define NVC37E_SET_PARAMS_FORMAT 7:0 +#define NVC37E_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NVC37E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F) +#define NVC37E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NVC37E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC37E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E) +#define NVC37E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC37E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6) +#define NVC37E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NVC37E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9) +#define NVC37E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NVC37E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NVC37E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NVC37E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NVC37E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NVC37E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NVC37E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NVC37E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028) +#define NVC37E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N422R (0x00000037) +#define NVC37E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N422R (0x00000057) +#define NVC37E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10___V10_N444 (0x0000005A) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10___V10_N420 (0x0000005B) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N422R (0x00000077) +#define NVC37E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12___V12_N444 (0x0000007A) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12___V12_N420 (0x0000007B) +#define NVC37E_SET_PARAMS_COLOR_SPACE 9:8 +#define NVC37E_SET_PARAMS_COLOR_SPACE_RGB (0x00000000) +#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC37E_SET_PARAMS_INPUT_RANGE 13:12 +#define NVC37E_SET_PARAMS_INPUT_RANGE_BYPASS (0x00000000) +#define NVC37E_SET_PARAMS_INPUT_RANGE_LIMITED (0x00000001) +#define NVC37E_SET_PARAMS_INPUT_RANGE_FULL (0x00000002) +#define NVC37E_SET_PARAMS_UNDERREPLICATE 16:16 +#define NVC37E_SET_PARAMS_UNDERREPLICATE_DISABLE (0x00000000) +#define NVC37E_SET_PARAMS_UNDERREPLICATE_ENABLE (0x00000001) +#define NVC37E_SET_PARAMS_DE_GAMMA 21:20 +#define NVC37E_SET_PARAMS_DE_GAMMA_NONE (0x00000000) +#define NVC37E_SET_PARAMS_DE_GAMMA_SRGB (0x00000001) +#define NVC37E_SET_PARAMS_DE_GAMMA_YUV8_10 (0x00000002) +#define NVC37E_SET_PARAMS_DE_GAMMA_YUV12 (0x00000003) +#define NVC37E_SET_PARAMS_CSC 17:17 +#define NVC37E_SET_PARAMS_CSC_DISABLE (0x00000000) +#define NVC37E_SET_PARAMS_CSC_ENABLE (0x00000001) +#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18 +#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000) +#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001) +#define NVC37E_SET_PARAMS_SWAP_UV 19:19 +#define NVC37E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000) +#define NVC37E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001) +#define NVC37E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004) +#define NVC37E_SET_PLANAR_STORAGE_PITCH 12:0 +#define NVC37E_SET_CONTEXT_DMA_ISO(b) (0x00000240 + (b)*0x00000004) +#define NVC37E_SET_CONTEXT_DMA_ISO_HANDLE 31:0 +#define NVC37E_SET_OFFSET(b) (0x00000260 + (b)*0x00000004) +#define NVC37E_SET_OFFSET_ORIGIN 31:0 +#define NVC37E_SET_PROCESSING (0x00000280) +#define NVC37E_SET_PROCESSING_USE_GAIN_OFFSETS 0:0 +#define NVC37E_SET_PROCESSING_USE_GAIN_OFFSETS_DISABLE (0x00000000) +#define NVC37E_SET_PROCESSING_USE_GAIN_OFFSETS_ENABLE (0x00000001) +#define NVC37E_SET_CONVERSION_RED (0x00000284) +#define NVC37E_SET_CONVERSION_RED_GAIN 15:0 +#define NVC37E_SET_CONVERSION_RED_OFFSET 31:16 +#define NVC37E_SET_CONVERSION_GREEN (0x00000288) +#define NVC37E_SET_CONVERSION_GREEN_GAIN 15:0 +#define NVC37E_SET_CONVERSION_GREEN_OFFSET 31:16 +#define NVC37E_SET_CONVERSION_BLUE (0x0000028C) +#define NVC37E_SET_CONVERSION_BLUE_GAIN 15:0 +#define NVC37E_SET_CONVERSION_BLUE_OFFSET 31:16 +#define NVC37E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004) +#define NVC37E_SET_POINT_IN_X 15:0 +#define NVC37E_SET_POINT_IN_Y 31:16 +#define NVC37E_SET_SIZE_IN (0x00000298) +#define NVC37E_SET_SIZE_IN_WIDTH 14:0 +#define NVC37E_SET_SIZE_IN_HEIGHT 30:16 +#define NVC37E_SET_SIZE_OUT (0x000002A4) +#define NVC37E_SET_SIZE_OUT_WIDTH 14:0 +#define NVC37E_SET_SIZE_OUT_HEIGHT 30:16 +#define NVC37E_SET_CONTROL_INPUT_SCALER (0x000002A8) +#define NVC37E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC37E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC37E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC37E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC37E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC37E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC37E_SET_INPUT_SCALER_COEFF_VALUE (0x000002AC) +#define NVC37E_SET_INPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC37E_SET_INPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC37E_SET_CONTROL_INPUT_LUT (0x000002B0) +#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE 1:0 +#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_257 (0x00000000) +#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_1025 (0x00000002) +#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE 5:4 +#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_UNITY (0x00000000) +#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_XRBIAS (0x00000001) +#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_XVYCC (0x00000002) +#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE 9:8 +#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INDEX (0x00000000) +#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INTERPOLATE (0x00000001) +#define NVC37E_SET_OFFSET_INPUT_LUT (0x000002B4) +#define NVC37E_SET_OFFSET_INPUT_LUT_ORIGIN 31:0 +#define NVC37E_SET_CONTEXT_DMA_INPUT_LUT (0x000002B8) +#define NVC37E_SET_CONTEXT_DMA_INPUT_LUT_HANDLE 31:0 +#define NVC37E_SET_CSC_RED2RED (0x000002BC) +#define NVC37E_SET_CSC_RED2RED_COEFF 18:0 +#define NVC37E_SET_CSC_GREEN2RED (0x000002C0) +#define NVC37E_SET_CSC_GREEN2RED_COEFF 18:0 +#define NVC37E_SET_CSC_BLUE2RED (0x000002C4) +#define NVC37E_SET_CSC_BLUE2RED_COEFF 18:0 +#define NVC37E_SET_CSC_CONSTANT2RED (0x000002C8) +#define NVC37E_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NVC37E_SET_CSC_RED2GREEN (0x000002CC) +#define NVC37E_SET_CSC_RED2GREEN_COEFF 18:0 +#define NVC37E_SET_CSC_GREEN2GREEN (0x000002D0) +#define NVC37E_SET_CSC_GREEN2GREEN_COEFF 18:0 +#define NVC37E_SET_CSC_BLUE2GREEN (0x000002D4) +#define NVC37E_SET_CSC_BLUE2GREEN_COEFF 18:0 +#define NVC37E_SET_CSC_CONSTANT2GREEN (0x000002D8) +#define NVC37E_SET_CSC_CONSTANT2GREEN_COEFF 18:0 +#define NVC37E_SET_CSC_RED2BLUE (0x000002DC) +#define NVC37E_SET_CSC_RED2BLUE_COEFF 18:0 +#define NVC37E_SET_CSC_GREEN2BLUE (0x000002E0) +#define NVC37E_SET_CSC_GREEN2BLUE_COEFF 18:0 +#define NVC37E_SET_CSC_BLUE2BLUE (0x000002E4) +#define NVC37E_SET_CSC_BLUE2BLUE_COEFF 18:0 +#define NVC37E_SET_CSC_CONSTANT2BLUE (0x000002E8) +#define NVC37E_SET_CSC_CONSTANT2BLUE_COEFF 18:0 +#define NVC37E_SET_COMPOSITION_CONTROL (0x000002EC) +#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0 +#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000) +#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001) +#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002) +#define NVC37E_SET_COMPOSITION_CONTROL_DEPTH 11:4 +#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA (0x000002F0) +#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA_K1 7:0 +#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA_K2 15:8 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT (0x000002F4) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37E_SET_KEY_ALPHA (0x000002F8) +#define NVC37E_SET_KEY_ALPHA_MIN 15:0 +#define NVC37E_SET_KEY_ALPHA_MAX 31:16 +#define NVC37E_SET_KEY_RED_CR (0x000002FC) +#define NVC37E_SET_KEY_RED_CR_MIN 15:0 +#define NVC37E_SET_KEY_RED_CR_MAX 31:16 +#define NVC37E_SET_KEY_GREEN_Y (0x00000300) +#define NVC37E_SET_KEY_GREEN_Y_MIN 15:0 +#define NVC37E_SET_KEY_GREEN_Y_MAX 31:16 +#define NVC37E_SET_KEY_BLUE_CB (0x00000304) +#define NVC37E_SET_KEY_BLUE_CB_MIN 15:0 +#define NVC37E_SET_KEY_BLUE_CB_MAX 31:16 +#define NVC37E_SET_PRESENT_CONTROL (0x00000308) +#define NVC37E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4 +#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8 +#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NVC37E_SET_PRESENT_CONTROL_STEREO_MODE 13:12 +#define NVC37E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000) +#define NVC37E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001) +#define NVC37E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002) +#define NVC37E_SET_TIMESTAMP_ORIGIN_LO (0x00000340) +#define NVC37E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NVC37E_SET_TIMESTAMP_ORIGIN_HI (0x00000344) +#define NVC37E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NVC37E_SET_UPDATE_TIMESTAMP_LO (0x00000348) +#define NVC37E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NVC37E_SET_UPDATE_TIMESTAMP_HI (0x0000034C) +#define NVC37E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NVC37E_SET_INTERLOCK_FLAGS (0x00000370) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 0:0 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+1):((i)+1) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 1:1 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 2:2 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 3:3 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 4:4 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 5:5 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 6:6 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 7:7 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 8:8 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS (0x00000374) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC37e_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc570.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc570.h new file mode 100644 index 0000000..12d200f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc570.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc570_h_ +#define _clc570_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#include "class/cl5070.h" + +#define NVC570_DISPLAY (0x0000C570) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NVC570_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc570_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc573.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc573.h new file mode 100644 index 0000000..a7bb1f4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc573.h @@ -0,0 +1,598 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc573_h_ +#define _clc573_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC573_DISP_CAPABILITIES 0xC573 + +typedef volatile struct _clc573_tag0 { + NvU32 dispCapabilities[0x400]; +} _NvC573DispCapabilities,NvC573DispCapabilities_Map ; + + +#define NVC573_SYS_CAP 0x0 /* RW-4R */ +#define NVC573_SYS_CAP_HEAD0_EXISTS 0:0 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD1_EXISTS 1:1 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD2_EXISTS 2:2 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD3_EXISTS 3:3 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD4_EXISTS 4:4 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD5_EXISTS 5:5 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD6_EXISTS 6:6 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD7_EXISTS 7:7 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC573_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NVC573_SYS_CAP_HEAD_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR0_EXISTS 8:8 /* RWIVF */ +#define NVC573_SYS_CAP_SOR0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR1_EXISTS 9:9 /* RWIVF */ +#define NVC573_SYS_CAP_SOR1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR2_EXISTS 10:10 /* RWIVF */ +#define NVC573_SYS_CAP_SOR2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR3_EXISTS 11:11 /* RWIVF */ +#define NVC573_SYS_CAP_SOR3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR4_EXISTS 12:12 /* RWIVF */ +#define NVC573_SYS_CAP_SOR4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR5_EXISTS 13:13 /* RWIVF */ +#define NVC573_SYS_CAP_SOR5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR6_EXISTS 14:14 /* RWIVF */ +#define NVC573_SYS_CAP_SOR6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR7_EXISTS 15:15 /* RWIVF */ +#define NVC573_SYS_CAP_SOR7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR_EXISTS(i) (8+(i)):(8+(i)) /* RWIVF */ +#define NVC573_SYS_CAP_SOR_EXISTS__SIZE_1 8 /* */ +#define NVC573_SYS_CAP_SOR_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB 0x4 /* RW-4R */ +#define NVC573_SYS_CAPB_WINDOW0_EXISTS 0:0 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW1_EXISTS 1:1 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW2_EXISTS 2:2 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW3_EXISTS 3:3 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW4_EXISTS 4:4 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW5_EXISTS 5:5 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW6_EXISTS 6:6 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW7_EXISTS 7:7 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW8_EXISTS 8:8 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW8_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW8_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW8_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW9_EXISTS 9:9 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW9_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW9_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW9_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW10_EXISTS 10:10 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW10_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW10_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW10_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW11_EXISTS 11:11 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW11_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW11_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW11_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW12_EXISTS 12:12 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW12_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW12_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW12_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW13_EXISTS 13:13 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW13_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW13_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW13_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW14_EXISTS 14:14 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW14_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW14_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW14_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW15_EXISTS 15:15 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW15_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW15_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW15_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW16_EXISTS 16:16 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW16_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW16_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW16_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW17_EXISTS 17:17 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW17_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW17_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW17_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW18_EXISTS 18:18 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW18_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW18_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW18_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW19_EXISTS 19:19 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW19_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW19_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW19_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW20_EXISTS 20:20 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW20_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW20_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW20_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW21_EXISTS 21:21 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW21_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW21_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW21_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW22_EXISTS 22:22 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW22_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW22_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW22_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW23_EXISTS 23:23 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW23_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW23_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW23_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW24_EXISTS 24:24 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW24_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW24_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW24_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW25_EXISTS 25:25 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW25_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW25_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW25_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW26_EXISTS 26:26 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW26_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW26_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW26_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW27_EXISTS 27:27 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW27_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW27_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW27_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW28_EXISTS 28:28 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW28_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW28_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW28_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW29_EXISTS 29:29 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW29_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW29_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW29_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW30_EXISTS 30:30 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW30_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW30_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW30_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW31_EXISTS 31:31 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW31_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW31_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW31_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS__SIZE_1 32 /* */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA 0x10 /* RW-4R */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRIES 15:0 /* RWIUF */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRIES_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH 17:16 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_32B 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_64B 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_128B 0x00000002 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_256B 0x00000003 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR 19:19 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_VGA 20:20 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_VGA_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION 21:21 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MSCG 22:22 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MSCG_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MSCG_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MSCG_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH 23:23 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT 26:26 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION 31:30 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_32B 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_64B 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_128B 0x00000002 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_256B 0x00000003 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA(i) (0x680+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_FULL_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_UNIT_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT 16:16 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT 17:17 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT 18:18 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OLPF_PRESENT 19:19 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OLPF_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OLPF_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OLPF_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT 20:20 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT 21:21 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT 22:22 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB(i) (0x684+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_VGA 0:0 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_VGA_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGSZ 9:6 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGNR 12:10 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT 15:15 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC(i) (0x688+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_PRECISION 4:0 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_FALSE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_PRECISION 12:8 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP 13:13 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_FALSE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_SF_PRECISION 20:16 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_SF_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_CI_PRECISION 24:21 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_CI_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB 25:25 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_FALSE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD(i) (0x68c+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_2TAP_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_5TAP_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE(i) (0x690+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_RATEBUFSIZE 3:0 /* RWIUF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_RATEBUFSIZE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_LINEBUFSIZE 13:8 /* RWIUF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_LINEBUFSIZE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422 16:16 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420 17:17 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP(i) (0x144+(i)*8) /* RW-4A */ +#define NVC573_SOR_CAP__SIZE_1 8 /* */ +#define NVC573_SOR_CAP_SINGLE_LVDS_18 0:0 /* RWIVF */ +#define NVC573_SOR_CAP_SINGLE_LVDS_18_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_24 1:1 /* RWIVF */ +#define NVC573_SOR_CAP_SINGLE_LVDS_24_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_LVDS_18 2:2 /* RWIVF */ +#define NVC573_SOR_CAP_DUAL_LVDS_18_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DUAL_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_LVDS_24 3:3 /* RWIVF */ +#define NVC573_SOR_CAP_DUAL_LVDS_24_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DUAL_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_A 8:8 /* RWIVF */ +#define NVC573_SOR_CAP_SINGLE_TMDS_A_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_A_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_A_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_B 9:9 /* RWIVF */ +#define NVC573_SOR_CAP_SINGLE_TMDS_B_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_B_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_B_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_TMDS 11:11 /* RWIVF */ +#define NVC573_SOR_CAP_DUAL_TMDS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DUAL_TMDS_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_TMDS_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DISPLAY_OVER_PCIE 13:13 /* RWIVF */ +#define NVC573_SOR_CAP_DISPLAY_OVER_PCIE_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DISPLAY_OVER_PCIE_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DISPLAY_OVER_PCIE_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_SDI 16:16 /* RWIVF */ +#define NVC573_SOR_CAP_SDI_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SDI_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SDI_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DP_A 24:24 /* RWIVF */ +#define NVC573_SOR_CAP_DP_A_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DP_A_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DP_A_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DP_B 25:25 /* RWIVF */ +#define NVC573_SOR_CAP_DP_B_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DP_B_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DP_B_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DP_INTERLACE 26:26 /* RWIVF */ +#define NVC573_SOR_CAP_DP_INTERLACE_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DP_INTERLACE_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DP_INTERLACE_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DP_8_LANES 27:27 /* RWIVF */ +#define NVC573_SOR_CAP_DP_8_LANES_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DP_8_LANES_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DP_8_LANES_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA(i) (0x780+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_FULL_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_UNIT_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_ALPHA_WIDTH 13:10 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_ALPHA_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT 16:16 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT 17:17 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT 18:18 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT 19:19 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT 20:20 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT 21:21 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT 22:22 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT 23:23 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT 24:24 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB(i) (0x784+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_FMT_PRECISION 4:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_FMT_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGSZ 9:6 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGNR 12:10 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT 15:15 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC(i) (0x788+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_PRECISION 4:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGNR 12:10 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT 15:15 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_PRECISION 20:16 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD(i) (0x78c+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGSZ 3:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGNR 6:4 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD 8:8 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT 9:9 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_SF_PRECISION 16:12 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_SF_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_CI_PRECISION 20:17 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_CI_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB 21:21 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA 22:22 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE(i) (0x790+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_PRECISION 4:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGNR 12:10 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT 15:15 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_PRECISION 20:16 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF(i) (0x794+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_2TAP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_5TAP_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; +#endif /* extern C */ +#endif //_clc573_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc574.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc574.h new file mode 100644 index 0000000..6df8d03 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc574.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc574_h_ +#define _clc574_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define UVM_CHANNEL_RETAINER (0x0000C574) + +typedef struct +{ + NvHandle hClient; + NvHandle hChannel; +}NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc574_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57a.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57a.h new file mode 100644 index 0000000..54eb741 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57a.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) 1993-2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc57a__h_ +#define _clc57a__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC57A_CURSOR_IMM_CHANNEL_PIO (0x0000C57A) + +typedef volatile struct _clc57a_tag0 { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x7D]; + NvV32 Update; // 0x00000200 - 0x00000203 + NvV32 SetInterlockFlags; // 0x00000204 - 0x00000207 + NvV32 SetCursorHotSpotPointOut[2]; // 0x00000208 - 0x0000020F + NvV32 SetWindowInterlockFlags; // 0x00000210 - 0x00000213 + NvV32 Reserved02[0x37B]; +} NVC57ADispCursorImmControlPio; + +#define NVC57A_FREE (0x00000008) +#define NVC57A_FREE_COUNT 5:0 +#define NVC57A_UPDATE (0x00000200) +#define NVC57A_SET_INTERLOCK_FLAGS (0x00000204) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC57A_SET_CURSOR_HOT_SPOT_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC57A_SET_CURSOR_HOT_SPOT_POINT_OUT_X 15:0 +#define NVC57A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y 31:16 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS (0x00000210) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc57a_h + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57b.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57b.h new file mode 100644 index 0000000..545ccc5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57b.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC57b_h_ +#define _clC57b_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC57B_WINDOW_IMM_CHANNEL_DMA (0x0000C57B) + +// dma opcode instructions +#define NVC57B_DMA +#define NVC57B_DMA_OPCODE 31:29 +#define NVC57B_DMA_OPCODE_METHOD 0x00000000 +#define NVC57B_DMA_OPCODE_JUMP 0x00000001 +#define NVC57B_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC57B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC57B_DMA_METHOD_COUNT 27:18 +#define NVC57B_DMA_METHOD_OFFSET 13:2 +#define NVC57B_DMA_DATA 31:0 +#define NVC57B_DMA_DATA_NOP 0x00000000 +#define NVC57B_DMA_JUMP_OFFSET 11:2 +#define NVC57B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC57B_PUT (0x00000000) +#define NVC57B_PUT_PTR 9:0 +#define NVC57B_GET (0x00000004) +#define NVC57B_GET_PTR 9:0 +#define NVC57B_UPDATE (0x00000200) +#define NVC57B_UPDATE_INTERLOCK_WITH_WINDOW 1:1 +#define NVC57B_UPDATE_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC57B_UPDATE_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC57B_SET_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC57B_SET_POINT_OUT_X 15:0 +#define NVC57B_SET_POINT_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC57b_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57d.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57d.h new file mode 100644 index 0000000..4f415d0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57d.h @@ -0,0 +1,1277 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC57d_h_ +#define _clC57d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC57D_CORE_CHANNEL_DMA (0x0000C57D) + +#define NV_DISP_NOTIFIER 0x00000000 +#define NV_DISP_NOTIFIER_SIZEOF 0x00000010 +#define NV_DISP_NOTIFIER__0 0x00000000 +#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFIER__0_FIELD 8:8 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001 +#define NV_DISP_NOTIFIER__0_R1 15:10 +#define NV_DISP_NOTIFIER__0_R2 23:16 +#define NV_DISP_NOTIFIER__0_R3 29:24 +#define NV_DISP_NOTIFIER__0_STATUS 31:30 +#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002 +#define NV_DISP_NOTIFIER__1 0x00000001 +#define NV_DISP_NOTIFIER__1_R4 31:0 +#define NV_DISP_NOTIFIER__2 0x00000002 +#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0 +#define NV_DISP_NOTIFIER__3 0x00000003 +#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0 + + +// dma opcode instructions +#define NVC57D_DMA +#define NVC57D_DMA_OPCODE 31:29 +#define NVC57D_DMA_OPCODE_METHOD 0x00000000 +#define NVC57D_DMA_OPCODE_JUMP 0x00000001 +#define NVC57D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC57D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC57D_DMA_METHOD_COUNT 27:18 +#define NVC57D_DMA_METHOD_OFFSET 13:2 +#define NVC57D_DMA_DATA 31:0 +#define NVC57D_DMA_DATA_NOP 0x00000000 +#define NVC57D_DMA_JUMP_OFFSET 11:2 +#define NVC57D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// if cap SUPPORT_FLEXIBLE_WIN_MAPPING is FALSE, this define can be used to obtain which head a window is mapped to +#define NVC37D_WINDOW_MAPPED_TO_HEAD(w) ((w)>>1) +#define NVC37D_GET_VALID_WINDOWMASK_FOR_HEAD(h) ((1<<((h)*2)) | (1<<((h)*2+1))) + +// class methods +#define NVC57D_PUT (0x00000000) +#define NVC57D_PUT_PTR 9:0 +#define NVC57D_GET (0x00000004) +#define NVC57D_GET_PTR 9:0 +#define NVC57D_UPDATE (0x00000200) +#define NVC57D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVC57D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVC57D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVC57D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVC57D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVC57D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVC57D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVC57D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVC57D_UPDATE_RELEASE_ELV 0:0 +#define NVC57D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC57D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC57D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_SET_CONTEXT_DMA_NOTIFIER (0x00000208) +#define NVC57D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC57D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVC57D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC57D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC57D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC57D_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC57D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVC57D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVC57D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL (0x00000210) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN(i) ((i)+0):((i)+0) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN__SIZE_1 4 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN0 0:0 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN0_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN0_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN1 1:1 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN1_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN1_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN2 2:2 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN2_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN2_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN3 3:3 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN3_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN3_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC57D_GET_RG_SCAN_LINE(b) (0x00000220 + (b)*0x00000004) +#define NVC57D_GET_RG_SCAN_LINE_LINE 15:0 +#define NVC57D_GET_RG_SCAN_LINE_VBLANK 16:16 +#define NVC57D_GET_RG_SCAN_LINE_VBLANK_FALSE (0x00000000) +#define NVC57D_GET_RG_SCAN_LINE_VBLANK_TRUE (0x00000001) +#define NVC57D_SET_GET_BLANKING_CTRL(b) (0x00000240 + (b)*0x00000004) +#define NVC57D_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NVC57D_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NVC57D_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NVC57D_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NVC57D_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NVC57D_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) + +#define NVC57D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_DSI (0x0000000A) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVC57D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC57D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC57D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC57D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVC57D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC57D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC57D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NVC57D_SOR_SET_CUSTOM_REASON(a) (0x00000304 + (a)*0x00000020) +#define NVC57D_SOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC57D_SOR_SET_SW_SPARE_A(a) (0x00000308 + (a)*0x00000020) +#define NVC57D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC57D_SOR_SET_SW_SPARE_B(a) (0x0000030C + (a)*0x00000020) +#define NVC57D_SOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC57D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVC57D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(a) (0x0000100C + (a)*0x00000080) +#define NVC57D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC57D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) + +#define NVC57D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000400) +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F) +#define NVC57D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVC57D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVC57D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 11:10 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 8:4 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 15:12 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 23:22 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 20:16 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NVC57D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000400) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC57D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00002010 + (a)*0x00000400) +#define NVC57D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00002014 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC57D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVC57D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVC57D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005) +#define NVC57D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000400) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NVC57D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000400 + (b)*0x00000004) +#define NVC57D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000400) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC57D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(a) (0x0000202C + (a)*0x00000400) +#define NVC57D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC57D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000400) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK(a) (0x00002034 + (a)*0x00000400) +#define NVC57D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NVC57D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK_MODE 2:2 +#define NVC57D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN 8:4 +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 12:12 +#define NVC57D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK_TEPOLARITY 14:14 +#define NVC57D_HEAD_SET_STALL_LOCK_TEPOLARITY_POSITIVE_TRUE (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_TEPOLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_LOCK_CHAIN(a) (0x00002044 + (a)*0x00000400) +#define NVC57D_HEAD_SET_LOCK_CHAIN_POSITION 3:0 +#define NVC57D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x00002048 + (a)*0x00000400) +#define NVC57D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NVC57D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000400) +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000400) +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVC57D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x0000205C + (a)*0x00000400) +#define NVC57D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NVC57D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NVC57D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000400) +#define NVC57D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NVC57D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NVC57D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000400) +#define NVC57D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVC57D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVC57D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000400) +#define NVC57D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVC57D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVC57D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400) +#define NVC57D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVC57D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC57D_HEAD_SET_OVERSCAN_COLOR(a) (0x00002078 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OVERSCAN_COLOR_RED_CR 9:0 +#define NVC57D_HEAD_SET_OVERSCAN_COLOR_GREEN_Y 19:10 +#define NVC57D_HEAD_SET_OVERSCAN_COLOR_BLUE_CB 29:20 +#define NVC57D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(a) (0x0000207C + (a)*0x00000400) +#define NVC57D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_RED_CR 9:0 +#define NVC57D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_GREEN_Y 19:10 +#define NVC57D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_BLUE_CB 29:20 +#define NVC57D_HEAD_SET_HDMI_CTRL(a) (0x00002080 + (a)*0x00000400) +#define NVC57D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NVC57D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NVC57D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NVC57D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NVC57D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NVC57D_HEAD_SET_CONTEXT_DMA_CURSOR(a,b) (0x00002088 + (a)*0x00000400 + (b)*0x00000004) +#define NVC57D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 +#define NVC57D_HEAD_SET_OFFSET_CURSOR(a,b) (0x00002090 + (a)*0x00000400 + (b)*0x00000004) +#define NVC57D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NVC57D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x00002098 + (a)*0x00000400) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 0:0 +#define NVC57D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NVC57D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0 +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020) +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PRESENT_CONTROL(a) (0x0000218C + (a)*0x00000400) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 0:0 +#define NVC57D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NVC57D_HEAD_SET_SW_SPARE_A(a) (0x00002194 + (a)*0x00000400) +#define NVC57D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NVC57D_HEAD_SET_SW_SPARE_B(a) (0x00002198 + (a)*0x00000400) +#define NVC57D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NVC57D_HEAD_SET_SW_SPARE_C(a) (0x0000219C + (a)*0x00000400) +#define NVC57D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NVC57D_HEAD_SET_SW_SPARE_D(a) (0x000021A0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NVC57D_HEAD_SET_DISPLAY_RATE(a) (0x000021A8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DISPLAY_RATE_RUN_MODE 0:0 +#define NVC57D_HEAD_SET_DISPLAY_RATE_RUN_MODE_CONTINUOUS (0x00000000) +#define NVC57D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT (0x00000001) +#define NVC57D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL 25:4 +#define NVC57D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH 2:2 +#define NVC57D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(a) (0x00002214 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC57D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC57D_HEAD_SET_MIN_FRAME_IDLE(a) (0x00002218 + (a)*0x00000400) +#define NVC57D_HEAD_SET_MIN_FRAME_IDLE_LEADING_RASTER_LINES 14:0 +#define NVC57D_HEAD_SET_MIN_FRAME_IDLE_TRAILING_RASTER_LINES 30:16 +#define NVC57D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(a) (0x00002220 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_ALPHA 7:0 +#define NVC57D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_RED 31:16 +#define NVC57D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(a) (0x00002224 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_GREEN 15:0 +#define NVC57D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_BLUE 31:16 +#define NVC57D_HEAD_SET_CURSOR_COLOR_NORM_SCALE(a) (0x00002228 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CURSOR_COLOR_NORM_SCALE_VALUE 15:0 +#define NVC57D_HEAD_SET_XOR_BLEND_FACTOR(a) (0x0000222C + (a)*0x00000400) +#define NVC57D_HEAD_SET_XOR_BLEND_FACTOR_LOG2PEAK_LUMINANCE 3:0 +#define NVC57D_HEAD_SET_XOR_BLEND_FACTOR_S1 16:4 +#define NVC57D_HEAD_SET_XOR_BLEND_FACTOR_S2 30:18 +#define NVC57D_HEAD_SET_CLAMP_RANGE_GREEN(a) (0x00002238 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CLAMP_RANGE_GREEN_LOW 11:0 +#define NVC57D_HEAD_SET_CLAMP_RANGE_GREEN_HIGH 27:16 +#define NVC57D_HEAD_SET_CLAMP_RANGE_RED_BLUE(a) (0x0000223C + (a)*0x00000400) +#define NVC57D_HEAD_SET_CLAMP_RANGE_RED_BLUE_LOW 11:0 +#define NVC57D_HEAD_SET_CLAMP_RANGE_RED_BLUE_HIGH 27:16 +#define NVC57D_HEAD_SET_OCSC0CONTROL(a) (0x00002240 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_OCSC0CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_OCSC0CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C00(a) (0x00002244 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C00_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C01(a) (0x00002248 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C01_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C02(a) (0x0000224C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C02_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C03(a) (0x00002250 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C03_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C10(a) (0x00002254 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C10_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C11(a) (0x00002258 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C11_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C12(a) (0x0000225C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C12_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C13(a) (0x00002260 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C13_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C20(a) (0x00002264 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C20_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C21(a) (0x00002268 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C21_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C22(a) (0x0000226C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C22_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C23(a) (0x00002270 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C23_VALUE 20:0 +#define NVC57D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0 +#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1 +#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE 3:2 +#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC57D_HEAD_SET_OLUT_CONTROL_SIZE 18:8 +#define NVC57D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0 +#define NVC57D_HEAD_SET_CONTEXT_DMA_OLUT(a) (0x00002288 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTEXT_DMA_OLUT_HANDLE 31:0 +#define NVC57D_HEAD_SET_OFFSET_OLUT(a) (0x0000228C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OFFSET_OLUT_ORIGIN 31:0 +#define NVC57D_HEAD_SET_OCSC1CONTROL(a) (0x0000229C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_OCSC1CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_OCSC1CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C00(a) (0x000022A0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C00_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C01(a) (0x000022A4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C01_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C02(a) (0x000022A8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C02_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C03(a) (0x000022AC + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C03_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C10(a) (0x000022B0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C10_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C11(a) (0x000022B4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C11_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C12(a) (0x000022B8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C12_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C13(a) (0x000022BC + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C13_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C20(a) (0x000022C0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C20_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C21(a) (0x000022C4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C21_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C22(a) (0x000022C8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C22_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C23(a) (0x000022CC + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C23_VALUE 20:0 +#define NVC57D_HEAD_SET_TILE_POSITION(a) (0x000022D0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_TILE_POSITION_X 2:0 +#define NVC57D_HEAD_SET_TILE_POSITION_Y 6:4 +#define NVC57D_HEAD_SET_DSC_CONTROL(a) (0x000022D4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_DSC_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE 2:1 +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE_SINGLE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE_DUAL (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE_QUAD (0x00000002) +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE_DROP (0x00000003) +#define NVC57D_HEAD_SET_DSC_CONTROL_AUTO_RESET 3:3 +#define NVC57D_HEAD_SET_DSC_CONTROL_AUTO_RESET_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_AUTO_RESET_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION 4:4 +#define NVC57D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET 5:5 +#define NVC57D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_FALSE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_TRUE (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_FLATNESS_DET_THRESH 15:6 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL(a) (0x000022D8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_LOCATION 1:1 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_SIZE 9:2 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY 10:10 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC57D_HEAD_SET_DSC_PPS_HEAD(a) (0x000022DC + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_HEAD_BYTE0 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_HEAD_BYTE1 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_HEAD_BYTE2 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_HEAD_BYTE3 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0(a) (0x000022E0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MINOR 3:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MAJOR 7:4 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_PPS_IDENTIFIER 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_RESERVED 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_LINEBUF_DEPTH 27:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_BITS_PER_COMPONENT 31:28 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1(a) (0x000022E4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_HIGH 1:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_VBR_ENABLE 2:2 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_SIMPLE422 3:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_CONVERT_RGB 4:4 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_BLOCK_PRED_ENABLE 5:5 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_RESERVED 7:6 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA2(a) (0x000022E8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA3(a) (0x000022EC + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4(a) (0x000022F0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_HIGH 1:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_RESERVED 7:2 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5(a) (0x000022F4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_RESERVED0 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_INITIAL_SCALE_VALUE 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_RESERVED1 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6(a) (0x000022F8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_HIGH 3:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_RESERVED0 7:4 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_RESERVED1 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_FIRST_LINE_BPG_OFFSET 28:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_RESERVED2 31:29 +#define NVC57D_HEAD_SET_DSC_PPS_DATA7(a) (0x000022FC + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA8(a) (0x00002300 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9(a) (0x00002304 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MIN_QP 4:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_RESERVED0 7:5 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MAX_QP 12:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_RESERVED1 15:13 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10(a) (0x00002308 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_EDGE_FACTOR 3:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RESERVED0 7:4 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT0 12:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RESERVED1 15:13 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT1 20:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RESERVED2 23:21 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_LO 27:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_HI 31:28 +#define NVC57D_HEAD_SET_DSC_PPS_DATA11(a) (0x0000230C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH0 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH1 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH2 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH3 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA12(a) (0x00002310 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH4 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH5 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH6 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH7 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA13(a) (0x00002314 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH8 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH9 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH10 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH11 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14(a) (0x00002318 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH12 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH13 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_HIGH0 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MIN_QP0 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_BPG_OFFSET0 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_LOW0 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15(a) (0x0000231C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH1 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP1 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET1 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW1 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH2 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP2 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET2 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW2 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16(a) (0x00002320 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH3 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP3 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET3 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW3 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH4 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP4 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET4 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW4 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17(a) (0x00002324 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH5 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP5 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET5 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW5 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH6 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP6 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET6 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW6 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18(a) (0x00002328 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH7 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP7 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET7 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW7 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH8 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP8 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET8 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW8 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19(a) (0x0000232C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH9 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP9 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET9 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW9 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH10 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP10 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET10 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW10 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20(a) (0x00002330 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH11 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP11 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET11 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW11 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH12 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP12 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET12 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW12 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21(a) (0x00002334 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH13 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP13 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET13 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW13 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH14 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP14 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET14 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW14 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22(a) (0x00002338 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_NATIVE422 0:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_NATIVE420 1:1 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_RESERVED0 7:2 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_SECOND_LINE_BPG_OFFSET 12:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_RESERVED1 15:13 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSET_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSETLOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA23(a) (0x0000233C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA23_RESERVED 31:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA24(a) (0x00002340 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA24_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA25(a) (0x00002344 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA25_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA26(a) (0x00002348 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA26_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA27(a) (0x0000234C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA27_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA28(a) (0x00002350 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA28_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA29(a) (0x00002354 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA29_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA30(a) (0x00002358 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA30_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA31(a) (0x0000235C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA31_RESERVED 31:0 +#define NVC57D_HEAD_SET_RG_MERGE(a) (0x00002360 + (a)*0x00000400) +#define NVC57D_HEAD_SET_RG_MERGE_MODE 1:0 +#define NVC57D_HEAD_SET_RG_MERGE_MODE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_RG_MERGE_MODE_SETUP (0x00000001) +#define NVC57D_HEAD_SET_RG_MERGE_MODE_MASTER (0x00000002) +#define NVC57D_HEAD_SET_RG_MERGE_MODE_SLAVE (0x00000003) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC57d_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57e.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57e.h new file mode 100644 index 0000000..d613410 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57e.h @@ -0,0 +1,657 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC57e_h_ +#define _clC57e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC57E_WINDOW_CHANNEL_DMA (0x0000C57E) + +// dma opcode instructions +#define NVC57E_DMA +#define NVC57E_DMA_OPCODE 31:29 +#define NVC57E_DMA_OPCODE_METHOD 0x00000000 +#define NVC57E_DMA_OPCODE_JUMP 0x00000001 +#define NVC57E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC57E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC57E_DMA_METHOD_COUNT 27:18 +#define NVC57E_DMA_METHOD_OFFSET 13:2 +#define NVC57E_DMA_DATA 31:0 +#define NVC57E_DMA_DATA_NOP 0x00000000 +#define NVC57E_DMA_JUMP_OFFSET 11:2 +#define NVC57E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC57E_PUT (0x00000000) +#define NVC57E_PUT_PTR 9:0 +#define NVC57E_GET (0x00000004) +#define NVC57E_GET_PTR 9:0 +#define NVC57E_UPDATE (0x00000200) +#define NVC57E_UPDATE_RELEASE_ELV 0:0 +#define NVC57E_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC57E_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC57E_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57E_UPDATE_INTERLOCK_WITH_WIN_IMM 12:12 +#define NVC57E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE (0x00000000) +#define NVC57E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE (0x00000001) +#define NVC57E_GET_LINE (0x00000208) +#define NVC57E_GET_LINE_LINE 15:0 +#define NVC57E_SET_SEMAPHORE_CONTROL (0x0000020C) +#define NVC57E_SET_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC57E_SET_SEMAPHORE_ACQUIRE (0x00000210) +#define NVC57E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NVC57E_SET_SEMAPHORE_RELEASE (0x00000214) +#define NVC57E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NVC57E_SET_CONTEXT_DMA_SEMAPHORE (0x00000218) +#define NVC57E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NVC57E_SET_CONTEXT_DMA_NOTIFIER (0x0000021C) +#define NVC57E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC57E_SET_NOTIFIER_CONTROL (0x00000220) +#define NVC57E_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC57E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC57E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC57E_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC57E_SET_SIZE (0x00000224) +#define NVC57E_SET_SIZE_WIDTH 15:0 +#define NVC57E_SET_SIZE_HEIGHT 31:16 +#define NVC57E_SET_STORAGE (0x00000228) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC57E_SET_STORAGE_MEMORY_LAYOUT 4:4 +#define NVC57E_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC57E_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC57E_SET_PARAMS (0x0000022C) +#define NVC57E_SET_PARAMS_FORMAT 7:0 +#define NVC57E_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NVC57E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F) +#define NVC57E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NVC57E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC57E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E) +#define NVC57E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC57E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6) +#define NVC57E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NVC57E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9) +#define NVC57E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NVC57E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NVC57E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NVC57E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NVC57E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NVC57E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NVC57E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NVC57E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028) +#define NVC57E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029) +#define NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035) +#define NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036) +#define NVC57E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038) +#define NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055) +#define NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056) +#define NVC57E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058) +#define NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075) +#define NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076) +#define NVC57E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078) +#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18 +#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000) +#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001) +#define NVC57E_SET_PARAMS_SWAP_UV 19:19 +#define NVC57E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000) +#define NVC57E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001) +#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE 22:22 +#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000) +#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001) +#define NVC57E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004) +#define NVC57E_SET_PLANAR_STORAGE_PITCH 12:0 +#define NVC57E_SET_CONTEXT_DMA_ISO(b) (0x00000240 + (b)*0x00000004) +#define NVC57E_SET_CONTEXT_DMA_ISO_HANDLE 31:0 +#define NVC57E_SET_OFFSET(b) (0x00000260 + (b)*0x00000004) +#define NVC57E_SET_OFFSET_ORIGIN 31:0 +#define NVC57E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004) +#define NVC57E_SET_POINT_IN_X 15:0 +#define NVC57E_SET_POINT_IN_Y 31:16 +#define NVC57E_SET_SIZE_IN (0x00000298) +#define NVC57E_SET_SIZE_IN_WIDTH 15:0 +#define NVC57E_SET_SIZE_IN_HEIGHT 31:16 +#define NVC57E_SET_SIZE_OUT (0x000002A4) +#define NVC57E_SET_SIZE_OUT_WIDTH 15:0 +#define NVC57E_SET_SIZE_OUT_HEIGHT 31:16 +#define NVC57E_SET_CONTROL_INPUT_SCALER (0x000002A8) +#define NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC57E_SET_INPUT_SCALER_COEFF_VALUE (0x000002AC) +#define NVC57E_SET_INPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC57E_SET_INPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC57E_SET_COMPOSITION_CONTROL (0x000002EC) +#define NVC57E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0 +#define NVC57E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000) +#define NVC57E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001) +#define NVC57E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002) +#define NVC57E_SET_COMPOSITION_CONTROL_DEPTH 11:4 +#define NVC57E_SET_COMPOSITION_CONTROL_BYPASS 16:16 +#define NVC57E_SET_COMPOSITION_CONTROL_BYPASS_DISABLE (0x00000000) +#define NVC57E_SET_COMPOSITION_CONTROL_BYPASS_ENABLE (0x00000001) +#define NVC57E_SET_COMPOSITION_CONSTANT_ALPHA (0x000002F0) +#define NVC57E_SET_COMPOSITION_CONSTANT_ALPHA_K1 7:0 +#define NVC57E_SET_COMPOSITION_CONSTANT_ALPHA_K2 15:8 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT (0x000002F4) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57E_SET_KEY_ALPHA (0x000002F8) +#define NVC57E_SET_KEY_ALPHA_MIN 15:0 +#define NVC57E_SET_KEY_ALPHA_MAX 31:16 +#define NVC57E_SET_KEY_RED_CR (0x000002FC) +#define NVC57E_SET_KEY_RED_CR_MIN 15:0 +#define NVC57E_SET_KEY_RED_CR_MAX 31:16 +#define NVC57E_SET_KEY_GREEN_Y (0x00000300) +#define NVC57E_SET_KEY_GREEN_Y_MIN 15:0 +#define NVC57E_SET_KEY_GREEN_Y_MAX 31:16 +#define NVC57E_SET_KEY_BLUE_CB (0x00000304) +#define NVC57E_SET_KEY_BLUE_CB_MIN 15:0 +#define NVC57E_SET_KEY_BLUE_CB_MAX 31:16 +#define NVC57E_SET_PRESENT_CONTROL (0x00000308) +#define NVC57E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4 +#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8 +#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NVC57E_SET_PRESENT_CONTROL_STEREO_MODE 13:12 +#define NVC57E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000) +#define NVC57E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001) +#define NVC57E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002) +#define NVC57E_SET_TIMESTAMP_ORIGIN_LO (0x00000340) +#define NVC57E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NVC57E_SET_TIMESTAMP_ORIGIN_HI (0x00000344) +#define NVC57E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NVC57E_SET_UPDATE_TIMESTAMP_LO (0x00000348) +#define NVC57E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NVC57E_SET_UPDATE_TIMESTAMP_HI (0x0000034C) +#define NVC57E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NVC57E_SET_INTERLOCK_FLAGS (0x00000370) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 0:0 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+1):((i)+1) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 1:1 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 2:2 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 3:3 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 4:4 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 5:5 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 6:6 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 7:7 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 8:8 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS (0x00000374) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL (0x00000398) +#define NVC57E_SET_EXT_PACKET_CONTROL_ENABLE 0:0 +#define NVC57E_SET_EXT_PACKET_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_EXT_PACKET_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL_LOCATION 4:4 +#define NVC57E_SET_EXT_PACKET_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC57E_SET_EXT_PACKET_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL_FREQUENCY 8:8 +#define NVC57E_SET_EXT_PACKET_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC57E_SET_EXT_PACKET_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE 12:12 +#define NVC57E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_DISABLE (0x00000000) +#define NVC57E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_ENABLE (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL_SIZE 27:16 +#define NVC57E_SET_EXT_PACKET_DATA (0x0000039C) +#define NVC57E_SET_EXT_PACKET_DATA_DB0 7:0 +#define NVC57E_SET_EXT_PACKET_DATA_DB1 15:8 +#define NVC57E_SET_EXT_PACKET_DATA_DB2 23:16 +#define NVC57E_SET_EXT_PACKET_DATA_DB3 31:24 +#define NVC57E_SET_FMT_COEFFICIENT_C00 (0x00000400) +#define NVC57E_SET_FMT_COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C01 (0x00000404) +#define NVC57E_SET_FMT_COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C02 (0x00000408) +#define NVC57E_SET_FMT_COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C03 (0x0000040C) +#define NVC57E_SET_FMT_COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C10 (0x00000410) +#define NVC57E_SET_FMT_COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C11 (0x00000414) +#define NVC57E_SET_FMT_COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C12 (0x00000418) +#define NVC57E_SET_FMT_COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C13 (0x0000041C) +#define NVC57E_SET_FMT_COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C20 (0x00000420) +#define NVC57E_SET_FMT_COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C21 (0x00000424) +#define NVC57E_SET_FMT_COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C22 (0x00000428) +#define NVC57E_SET_FMT_COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C23 (0x0000042C) +#define NVC57E_SET_FMT_COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_ILUT_CONTROL (0x00000440) +#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE 0:0 +#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57E_SET_ILUT_CONTROL_MIRROR 1:1 +#define NVC57E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC57E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC57E_SET_ILUT_CONTROL_MODE 3:2 +#define NVC57E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC57E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC57E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC57E_SET_ILUT_CONTROL_SIZE 18:8 +#define NVC57E_SET_CONTEXT_DMA_ILUT (0x00000444) +#define NVC57E_SET_CONTEXT_DMA_ILUT_HANDLE 31:0 +#define NVC57E_SET_OFFSET_ILUT (0x00000448) +#define NVC57E_SET_OFFSET_ILUT_ORIGIN 31:0 +#define NVC57E_SET_CSC00CONTROL (0x0000045C) +#define NVC57E_SET_CSC00CONTROL_ENABLE 0:0 +#define NVC57E_SET_CSC00CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC00CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC00COEFFICIENT_C00 (0x00000460) +#define NVC57E_SET_CSC00COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C01 (0x00000464) +#define NVC57E_SET_CSC00COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C02 (0x00000468) +#define NVC57E_SET_CSC00COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C03 (0x0000046C) +#define NVC57E_SET_CSC00COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C10 (0x00000470) +#define NVC57E_SET_CSC00COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C11 (0x00000474) +#define NVC57E_SET_CSC00COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C12 (0x00000478) +#define NVC57E_SET_CSC00COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C13 (0x0000047C) +#define NVC57E_SET_CSC00COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C20 (0x00000480) +#define NVC57E_SET_CSC00COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C21 (0x00000484) +#define NVC57E_SET_CSC00COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C22 (0x00000488) +#define NVC57E_SET_CSC00COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C23 (0x0000048C) +#define NVC57E_SET_CSC00COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_CSC0LUT_CONTROL (0x000004A0) +#define NVC57E_SET_CSC0LUT_CONTROL_INTERPOLATE 0:0 +#define NVC57E_SET_CSC0LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57E_SET_CSC0LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57E_SET_CSC0LUT_CONTROL_MIRROR 1:1 +#define NVC57E_SET_CSC0LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC57E_SET_CSC0LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC57E_SET_CSC0LUT_CONTROL_ENABLE 4:4 +#define NVC57E_SET_CSC0LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC0LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC0LUT_SEGMENT_SIZE (0x000004A4) +#define NVC57E_SET_CSC0LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC57E_SET_CSC0LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC57E_SET_CSC0LUT_ENTRY (0x000004A8) +#define NVC57E_SET_CSC0LUT_ENTRY_IDX 10:0 +#define NVC57E_SET_CSC0LUT_ENTRY_VALUE 31:16 +#define NVC57E_SET_CSC01CONTROL (0x000004BC) +#define NVC57E_SET_CSC01CONTROL_ENABLE 0:0 +#define NVC57E_SET_CSC01CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC01CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC01COEFFICIENT_C00 (0x000004C0) +#define NVC57E_SET_CSC01COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C01 (0x000004C4) +#define NVC57E_SET_CSC01COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C02 (0x000004C8) +#define NVC57E_SET_CSC01COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C03 (0x000004CC) +#define NVC57E_SET_CSC01COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C10 (0x000004D0) +#define NVC57E_SET_CSC01COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C11 (0x000004D4) +#define NVC57E_SET_CSC01COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C12 (0x000004D8) +#define NVC57E_SET_CSC01COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C13 (0x000004DC) +#define NVC57E_SET_CSC01COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C20 (0x000004E0) +#define NVC57E_SET_CSC01COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C21 (0x000004E4) +#define NVC57E_SET_CSC01COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C22 (0x000004E8) +#define NVC57E_SET_CSC01COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C23 (0x000004EC) +#define NVC57E_SET_CSC01COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_TMO_CONTROL (0x00000500) +#define NVC57E_SET_TMO_CONTROL_INTERPOLATE 0:0 +#define NVC57E_SET_TMO_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57E_SET_TMO_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57E_SET_TMO_CONTROL_SAT_MODE 3:2 +#define NVC57E_SET_TMO_CONTROL_SIZE 18:8 +#define NVC57E_SET_TMO_LOW_INTENSITY_ZONE (0x00000508) +#define NVC57E_SET_TMO_LOW_INTENSITY_ZONE_END 29:16 +#define NVC57E_SET_TMO_LOW_INTENSITY_VALUE (0x0000050C) +#define NVC57E_SET_TMO_LOW_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC57E_SET_TMO_LOW_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC57E_SET_TMO_LOW_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_ZONE (0x00000510) +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_ZONE_START 13:0 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_ZONE_END 29:16 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_VALUE (0x00000514) +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC57E_SET_TMO_HIGH_INTENSITY_ZONE (0x00000518) +#define NVC57E_SET_TMO_HIGH_INTENSITY_ZONE_START 13:0 +#define NVC57E_SET_TMO_HIGH_INTENSITY_VALUE (0x0000051C) +#define NVC57E_SET_TMO_HIGH_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC57E_SET_TMO_HIGH_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC57E_SET_TMO_HIGH_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC57E_SET_CONTEXT_DMA_TMO_LUT (0x00000528) +#define NVC57E_SET_CONTEXT_DMA_TMO_LUT_HANDLE 31:0 +#define NVC57E_SET_OFFSET_TMO_LUT (0x0000052C) +#define NVC57E_SET_OFFSET_TMO_LUT_ORIGIN 31:0 +#define NVC57E_SET_CSC10CONTROL (0x0000053C) +#define NVC57E_SET_CSC10CONTROL_ENABLE 0:0 +#define NVC57E_SET_CSC10CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC10CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC10COEFFICIENT_C00 (0x00000540) +#define NVC57E_SET_CSC10COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C01 (0x00000544) +#define NVC57E_SET_CSC10COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C02 (0x00000548) +#define NVC57E_SET_CSC10COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C03 (0x0000054C) +#define NVC57E_SET_CSC10COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C10 (0x00000550) +#define NVC57E_SET_CSC10COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C11 (0x00000554) +#define NVC57E_SET_CSC10COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C12 (0x00000558) +#define NVC57E_SET_CSC10COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C13 (0x0000055C) +#define NVC57E_SET_CSC10COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C20 (0x00000560) +#define NVC57E_SET_CSC10COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C21 (0x00000564) +#define NVC57E_SET_CSC10COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C22 (0x00000568) +#define NVC57E_SET_CSC10COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C23 (0x0000056C) +#define NVC57E_SET_CSC10COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_CSC1LUT_CONTROL (0x00000580) +#define NVC57E_SET_CSC1LUT_CONTROL_INTERPOLATE 0:0 +#define NVC57E_SET_CSC1LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57E_SET_CSC1LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57E_SET_CSC1LUT_CONTROL_MIRROR 1:1 +#define NVC57E_SET_CSC1LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC57E_SET_CSC1LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC57E_SET_CSC1LUT_CONTROL_ENABLE 4:4 +#define NVC57E_SET_CSC1LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC1LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC1LUT_SEGMENT_SIZE (0x00000584) +#define NVC57E_SET_CSC1LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC57E_SET_CSC1LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC57E_SET_CSC1LUT_ENTRY (0x00000588) +#define NVC57E_SET_CSC1LUT_ENTRY_IDX 10:0 +#define NVC57E_SET_CSC1LUT_ENTRY_VALUE 31:16 +#define NVC57E_SET_CSC11CONTROL (0x0000059C) +#define NVC57E_SET_CSC11CONTROL_ENABLE 0:0 +#define NVC57E_SET_CSC11CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC11CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC11COEFFICIENT_C00 (0x000005A0) +#define NVC57E_SET_CSC11COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C01 (0x000005A4) +#define NVC57E_SET_CSC11COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C02 (0x000005A8) +#define NVC57E_SET_CSC11COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C03 (0x000005AC) +#define NVC57E_SET_CSC11COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C10 (0x000005B0) +#define NVC57E_SET_CSC11COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C11 (0x000005B4) +#define NVC57E_SET_CSC11COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C12 (0x000005B8) +#define NVC57E_SET_CSC11COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C13 (0x000005BC) +#define NVC57E_SET_CSC11COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C20 (0x000005C0) +#define NVC57E_SET_CSC11COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C21 (0x000005C4) +#define NVC57E_SET_CSC11COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C22 (0x000005C8) +#define NVC57E_SET_CSC11COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C23 (0x000005CC) +#define NVC57E_SET_CSC11COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_CLAMP_RANGE (0x000005D0) +#define NVC57E_SET_CLAMP_RANGE_LOW 15:0 +#define NVC57E_SET_CLAMP_RANGE_HIGH 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC57e_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57esw.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57esw.h new file mode 100644 index 0000000..8c106b4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57esw.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2009-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc57e_sw_h_ +#define _clc57e_sw_h_ + +/* This file is *not* auto-generated. */ + +#define NVC57E_WINDOWS_NOTIFY_RM (0x0000058C) +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_CHANGE 0:0 +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_CHANGE_FALSE (0x00000000) +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_CHANGE_TRUE (0x00000001) +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE 1:1 +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_OFF (0x00000000) +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_ON (0x00000001) +#define NVC57E_WINDOWS_NOTIFY_RM_ASSOCIATED_HEAD 7:4 + +#define SwSetMClkSwitch Reserved05[1] + +#define NVC57E_SW_SET_MCLK_SWITCH (0x000002B4) +#define NVC57E_SW_SET_MCLK_SWITCH_ENABLE 0:0 +#define NVC57E_SW_SET_MCLK_SWITCH_ENABLE_FALSE (0x00000000) +#define NVC57E_SW_SET_MCLK_SWITCH_ENABLE_TRUE (0x00000001) + +#endif // _clc57e_sw_h_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc670.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc670.h new file mode 100644 index 0000000..e981a30 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc670.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc670_h_ +#define _clc670_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NVC670_DISPLAY (0x0000C670) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numDsis; // Number of DSIs in this chip/display +} NVC670_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc670_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc671.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc671.h new file mode 100644 index 0000000..11f77d7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc671.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc671_h_ +#define _clc671_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC671_DISP_SF_USER (0x000C671) + +typedef volatile struct _clc671_tag0 { + NvU32 dispSfUserOffset[0x400]; +} _NvC671DispSfUser, NvC671DispSfUserMap; + +#define NVC671_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NVC671_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NVC671_SF_HDMI_INFO_CTRL(i,j) (0x000E0000-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC671_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NVC671_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _clc671_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc673.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc673.h new file mode 100644 index 0000000..7ae1334 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc673.h @@ -0,0 +1,399 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc673_h_ +#define _clc673_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC673_DISP_CAPABILITIES 0xC673 + +typedef volatile struct _clc673_tag0 { + NvU32 dispCapabilities[0x400]; +} _NvC673DispCapabilities,NvC673DispCapabilities_Map ; + + +#define NVC673_SYS_CAP 0x0 /* RW-4R */ +#define NVC673_SYS_CAP_HEAD0_EXISTS 0:0 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD1_EXISTS 1:1 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD2_EXISTS 2:2 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD3_EXISTS 3:3 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD4_EXISTS 4:4 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD5_EXISTS 5:5 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD6_EXISTS 6:6 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD7_EXISTS 7:7 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC673_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NVC673_SYS_CAP_HEAD_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR0_EXISTS 8:8 /* RWIVF */ +#define NVC673_SYS_CAP_SOR0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR1_EXISTS 9:9 /* RWIVF */ +#define NVC673_SYS_CAP_SOR1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR2_EXISTS 10:10 /* RWIVF */ +#define NVC673_SYS_CAP_SOR2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR3_EXISTS 11:11 /* RWIVF */ +#define NVC673_SYS_CAP_SOR3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR4_EXISTS 12:12 /* RWIVF */ +#define NVC673_SYS_CAP_SOR4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR5_EXISTS 13:13 /* RWIVF */ +#define NVC673_SYS_CAP_SOR5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR6_EXISTS 14:14 /* RWIVF */ +#define NVC673_SYS_CAP_SOR6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR7_EXISTS 15:15 /* RWIVF */ +#define NVC673_SYS_CAP_SOR7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR_EXISTS(i) (8+(i)):(8+(i)) /* RWIVF */ +#define NVC673_SYS_CAP_SOR_EXISTS__SIZE_1 8 /* */ +#define NVC673_SYS_CAP_SOR_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI0_EXISTS 20:20 /* RWIVF */ +#define NVC673_SYS_CAP_DSI0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI1_EXISTS 21:21 /* RWIVF */ +#define NVC673_SYS_CAP_DSI1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI2_EXISTS 22:22 /* RWIVF */ +#define NVC673_SYS_CAP_DSI2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI3_EXISTS 23:23 /* RWIVF */ +#define NVC673_SYS_CAP_DSI3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI_EXISTS(i) (20+(i)):(20+(i)) /* RWIVF */ +#define NVC673_SYS_CAP_DSI_EXISTS__SIZE_1 4 /* */ +#define NVC673_SYS_CAP_DSI_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA 0x10 /* RW-4R */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRIES 15:0 /* RWIUF */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH 17:16 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_32B 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_64B 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_128B 0x00000002 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_256B 0x00000003 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_ROTATION 18:18 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_ROTATION_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_ROTATION_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_PLANAR 19:19 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_PLANAR_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_VGA 20:20 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION 21:21 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MSCG 22:22 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MSCG_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MSCG_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH 23:23 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT 26:26 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION 31:30 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_32B 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_64B 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_128B 0x00000002 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_256B 0x00000003 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC 0x18 /* RW-4R */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE 1:0 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_32B 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_64B 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_128B 0x00000002 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_256B 0x00000003 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED 6:4 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_NONE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_TWO 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_FOUR 0x00000002 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_EIGHT 0x00000003 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_SIXTEEN 0x00000004 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR 11:11 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR_FALSE 0x00000000 /* RWI-V */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP 12:12 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP_FALSE 0x00000000 /* RWI-V */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA(i) (0x680+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT 16:16 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT 17:17 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT 18:18 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_HCLPF_PRESENT 19:19 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_HCLPF_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_HCLPF_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT 20:20 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT 21:21 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT 22:22 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_VFILTER_PRESENT 23:23 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_VFILTER_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_VFILTER_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_RCRC_PRESENT 24:24 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_RCRC_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_RCRC_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB(i) (0x684+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_VGA 0:0 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGSZ 9:6 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGNR 12:10 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT 15:15 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC(i) (0x688+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC0_PRECISION 4:0 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC1_PRECISION 12:8 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP 13:13 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_SF_PRECISION 20:16 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_CI_PRECISION 24:21 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB 25:25 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPD(i) (0x68c+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPD__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE(i) (0x690+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_RATEBUFSIZE 3:0 /* RWIUF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_LINEBUFSIZE 13:8 /* RWIUF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422 16:16 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420 17:17 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPF(i) (0x694+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPF__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPF_VFILTER_MAX_PIXELS 15:0 /* RWIVF */ +#define NVC673_SOR_CAP(i) (0x144+(i)*8) /* RW-4A */ +#define NVC673_SOR_CAP__SIZE_1 8 /* */ +#define NVC673_SOR_CAP_SINGLE_LVDS_18 0:0 /* RWIVF */ +#define NVC673_SOR_CAP_SINGLE_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_LVDS_24 1:1 /* RWIVF */ +#define NVC673_SOR_CAP_SINGLE_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_LVDS_18 2:2 /* RWIVF */ +#define NVC673_SOR_CAP_DUAL_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_LVDS_24 3:3 /* RWIVF */ +#define NVC673_SOR_CAP_DUAL_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_TMDS_A 8:8 /* RWIVF */ +#define NVC673_SOR_CAP_SINGLE_TMDS_A_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_TMDS_A_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_TMDS_B 9:9 /* RWIVF */ +#define NVC673_SOR_CAP_SINGLE_TMDS_B_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_TMDS_B_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_TMDS 11:11 /* RWIVF */ +#define NVC673_SOR_CAP_DUAL_TMDS_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_TMDS_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DISPLAY_OVER_PCIE 13:13 /* RWIVF */ +#define NVC673_SOR_CAP_DISPLAY_OVER_PCIE_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DISPLAY_OVER_PCIE_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_SDI 16:16 /* RWIVF */ +#define NVC673_SOR_CAP_SDI_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SDI_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DP_A 24:24 /* RWIVF */ +#define NVC673_SOR_CAP_DP_A_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DP_A_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DP_B 25:25 /* RWIVF */ +#define NVC673_SOR_CAP_DP_B_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DP_B_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DP_INTERLACE 26:26 /* RWIVF */ +#define NVC673_SOR_CAP_DP_INTERLACE_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DP_INTERLACE_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DP_8_LANES 27:27 /* RWIVF */ +#define NVC673_SOR_CAP_DP_8_LANES_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DP_8_LANES_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_HDMI_FRL 28:28 /* RWIVF */ +#define NVC673_SOR_CAP_HDMI_FRL_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_HDMI_FRL_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA(i) (0x780+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_ALPHA_WIDTH 13:10 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT 16:16 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT 17:17 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT 18:18 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT 19:19 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT 20:20 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT 21:21 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT 22:22 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT 23:23 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT 24:24 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB(i) (0x784+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_FMT_PRECISION 4:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGSZ 9:6 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGNR 12:10 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT 15:15 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC(i) (0x788+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_PRECISION 4:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGNR 12:10 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT 15:15 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_PRECISION 20:16 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD(i) (0x78c+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGSZ 3:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGNR 6:4 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD 8:8 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT 9:9 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_SF_PRECISION 16:12 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_CI_PRECISION 20:17 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB 21:21 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA 22:22 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE(i) (0x790+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_PRECISION 4:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGNR 12:10 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT 15:15 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_PRECISION 20:16 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPF(i) (0x794+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPF__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ + +#ifdef __cplusplus +}; +#endif /* extern C */ +#endif //_clc673_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67a.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67a.h new file mode 100644 index 0000000..ab6f1d4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67a.h @@ -0,0 +1,181 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef _clc67a__h_ +#define _clc67a__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC67A_CURSOR_IMM_CHANNEL_PIO (0x0000C67A) + +typedef volatile struct _clc67a_tag0 { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x7D]; + NvV32 Update; // 0x00000200 - 0x00000203 + NvV32 SetInterlockFlags; // 0x00000204 - 0x00000207 + NvV32 SetCursorHotSpotPointOut[2]; // 0x00000208 - 0x0000020F + NvV32 SetWindowInterlockFlags; // 0x00000210 - 0x00000213 + NvV32 Reserved02[0x37B]; +} NVC67ADispCursorImmControlPio; + +#define NVC67A_FREE (0x00000008) +#define NVC67A_FREE_COUNT 5:0 +#define NVC67A_UPDATE (0x00000200) +#define NVC67A_SET_INTERLOCK_FLAGS (0x00000204) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC67A_SET_CURSOR_HOT_SPOT_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC67A_SET_CURSOR_HOT_SPOT_POINT_OUT_X 15:0 +#define NVC67A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y 31:16 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS (0x00000210) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc67a_h + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67b.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67b.h new file mode 100644 index 0000000..c9779a0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67b.h @@ -0,0 +1,66 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + + +#ifndef _clC67b_h_ +#define _clC67b_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC67B_WINDOW_IMM_CHANNEL_DMA (0x0000C67B) + +// dma opcode instructions +#define NVC67B_DMA +#define NVC67B_DMA_OPCODE 31:29 +#define NVC67B_DMA_OPCODE_METHOD 0x00000000 +#define NVC67B_DMA_OPCODE_JUMP 0x00000001 +#define NVC67B_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC67B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC67B_DMA_METHOD_COUNT 27:18 +#define NVC67B_DMA_METHOD_OFFSET 13:2 +#define NVC67B_DMA_DATA 31:0 +#define NVC67B_DMA_DATA_NOP 0x00000000 +#define NVC67B_DMA_JUMP_OFFSET 11:2 +#define NVC67B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC67B_PUT (0x00000000) +#define NVC67B_PUT_PTR 9:0 +#define NVC67B_GET (0x00000004) +#define NVC67B_GET_PTR 9:0 +#define NVC67B_UPDATE (0x00000200) +#define NVC67B_UPDATE_INTERLOCK_WITH_WINDOW 1:1 +#define NVC67B_UPDATE_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC67B_UPDATE_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC67B_SET_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC67B_SET_POINT_OUT_X 15:0 +#define NVC67B_SET_POINT_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC67b_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67d.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67d.h new file mode 100644 index 0000000..dd2a4f6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67d.h @@ -0,0 +1,1339 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + + +#ifndef _clC67d_h_ +#define _clC67d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC67D_CORE_CHANNEL_DMA (0x0000C67D) + +#define NV_DISP_NOTIFIER 0x00000000 +#define NV_DISP_NOTIFIER_SIZEOF 0x00000010 +#define NV_DISP_NOTIFIER__0 0x00000000 +#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFIER__0_FIELD 8:8 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001 +#define NV_DISP_NOTIFIER__0_R1 15:10 +#define NV_DISP_NOTIFIER__0_R2 23:16 +#define NV_DISP_NOTIFIER__0_R3 29:24 +#define NV_DISP_NOTIFIER__0_STATUS 31:30 +#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002 +#define NV_DISP_NOTIFIER__1 0x00000001 +#define NV_DISP_NOTIFIER__1_R4 31:0 +#define NV_DISP_NOTIFIER__2 0x00000002 +#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0 +#define NV_DISP_NOTIFIER__3 0x00000003 +#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0 + + +// dma opcode instructions +#define NVC67D_DMA +#define NVC67D_DMA_OPCODE 31:29 +#define NVC67D_DMA_OPCODE_METHOD 0x00000000 +#define NVC67D_DMA_OPCODE_JUMP 0x00000001 +#define NVC67D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC67D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC67D_DMA_METHOD_COUNT 27:18 +#define NVC67D_DMA_METHOD_OFFSET 13:2 +#define NVC67D_DMA_DATA 31:0 +#define NVC67D_DMA_DATA_NOP 0x00000000 +#define NVC67D_DMA_JUMP_OFFSET 11:2 +#define NVC67D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// if cap SUPPORT_FLEXIBLE_WIN_MAPPING is FALSE, this define can be used to obtain which head a window is mapped to +#define NVC37D_WINDOW_MAPPED_TO_HEAD(w) ((w)>>1) +#define NVC37D_GET_VALID_WINDOWMASK_FOR_HEAD(h) ((1<<((h)*2)) | (1<<((h)*2+1))) + +// class methods +#define NVC67D_PUT (0x00000000) +#define NVC67D_PUT_PTR 9:0 +#define NVC67D_GET (0x00000004) +#define NVC67D_GET_PTR 9:0 +#define NVC67D_UPDATE (0x00000200) +#define NVC67D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVC67D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVC67D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVC67D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVC67D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVC67D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVC67D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVC67D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVC67D_UPDATE_RELEASE_ELV 0:0 +#define NVC67D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC67D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC67D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_SET_CONTEXT_DMA_NOTIFIER (0x00000208) +#define NVC67D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC67D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVC67D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC67D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC67D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC67D_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC67D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVC67D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVC67D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL (0x00000210) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN(i) ((i)+0):((i)+0) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN__SIZE_1 4 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN0 0:0 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN0_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN0_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN1 1:1 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN1_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN1_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN2 2:2 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN2_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN2_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN3 3:3 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN3_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN3_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC67D_GET_RG_SCAN_LINE(b) (0x00000220 + (b)*0x00000004) +#define NVC67D_GET_RG_SCAN_LINE_LINE 15:0 +#define NVC67D_GET_RG_SCAN_LINE_VBLANK 16:16 +#define NVC67D_GET_RG_SCAN_LINE_VBLANK_FALSE (0x00000000) +#define NVC67D_GET_RG_SCAN_LINE_VBLANK_TRUE (0x00000001) +#define NVC67D_SET_GET_BLANKING_CTRL(b) (0x00000240 + (b)*0x00000004) +#define NVC67D_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NVC67D_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NVC67D_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NVC67D_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NVC67D_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NVC67D_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) + +#define NVC67D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL (0x0000000C) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVC67D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC67D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC67D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC67D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVC67D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC67D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC67D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NVC67D_SOR_SET_CUSTOM_REASON(a) (0x00000304 + (a)*0x00000020) +#define NVC67D_SOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC67D_SOR_SET_SW_SPARE_A(a) (0x00000308 + (a)*0x00000020) +#define NVC67D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC67D_SOR_SET_SW_SPARE_B(a) (0x0000030C + (a)*0x00000020) +#define NVC67D_SOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC67D_DSI_SET_CONTROL(a) (0x00000500 + (a)*0x00000020) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK 7:0 +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC67D_DSI_SET_CUSTOM_REASON(a) (0x00000504 + (a)*0x00000020) +#define NVC67D_DSI_SET_CUSTOM_REASON_CODE 31:0 +#define NVC67D_DSI_SET_SW_SPARE_A(a) (0x00000508 + (a)*0x00000020) +#define NVC67D_DSI_SET_SW_SPARE_A_CODE 31:0 +#define NVC67D_DSI_SET_SW_SPARE_B(a) (0x0000050C + (a)*0x00000020) +#define NVC67D_DSI_SET_SW_SPARE_B_CODE 31:0 + +#define NVC67D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVC67D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(a) (0x0000100C + (a)*0x00000080) +#define NVC67D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC67D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED 30:30 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) + +#define NVC67D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000400) +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_DOWN_V 4:4 +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVC67D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVC67D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_444 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444NP (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F) +#define NVC67D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVC67D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVC67D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_YUV420PACKER 3:3 +#define NVC67D_HEAD_SET_CONTROL_YUV420PACKER_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_YUV420PACKER_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 11:10 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 8:4 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 15:12 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 23:22 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 20:16 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NVC67D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000400) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC67D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00002010 + (a)*0x00000400) +#define NVC67D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00002014 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC67D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVC67D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVC67D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005) +#define NVC67D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000400) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NVC67D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000400) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC67D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(a) (0x0000202C + (a)*0x00000400) +#define NVC67D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC67D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000400) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED 16:16 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK(a) (0x00002034 + (a)*0x00000400) +#define NVC67D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NVC67D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_MODE 2:2 +#define NVC67D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN 8:4 +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 12:12 +#define NVC67D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_TEPOLARITY 14:14 +#define NVC67D_HEAD_SET_STALL_LOCK_TEPOLARITY_POSITIVE_TRUE (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_TEPOLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_UNSTALL_SYNC_ADVANCE 25:16 +#define NVC67D_HEAD_SET_LOCK_CHAIN(a) (0x00002044 + (a)*0x00000400) +#define NVC67D_HEAD_SET_LOCK_CHAIN_POSITION 3:0 +#define NVC67D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x00002048 + (a)*0x00000400) +#define NVC67D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NVC67D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000400) +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000400) +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVC67D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x0000205C + (a)*0x00000400) +#define NVC67D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NVC67D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NVC67D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NVC67D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NVC67D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVC67D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVC67D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVC67D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVC67D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVC67D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC67D_HEAD_SET_OVERSCAN_COLOR(a) (0x00002078 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OVERSCAN_COLOR_RED_CR 9:0 +#define NVC67D_HEAD_SET_OVERSCAN_COLOR_GREEN_Y 19:10 +#define NVC67D_HEAD_SET_OVERSCAN_COLOR_BLUE_CB 29:20 +#define NVC67D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(a) (0x0000207C + (a)*0x00000400) +#define NVC67D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_RED_CR 9:0 +#define NVC67D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_GREEN_Y 19:10 +#define NVC67D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_BLUE_CB 29:20 +#define NVC67D_HEAD_SET_HDMI_CTRL(a) (0x00002080 + (a)*0x00000400) +#define NVC67D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NVC67D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NVC67D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NVC67D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NVC67D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NVC67D_HEAD_SET_CONTEXT_DMA_CURSOR(a,b) (0x00002088 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 +#define NVC67D_HEAD_SET_OFFSET_CURSOR(a,b) (0x00002090 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NVC67D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x00002098 + (a)*0x00000400) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 0:0 +#define NVC67D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NVC67D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0 +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020) +#define NVC67D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC67D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC67D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC67D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PRESENT_CONTROL(a) (0x0000218C + (a)*0x00000400) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 0:0 +#define NVC67D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NVC67D_HEAD_SET_SW_SPARE_A(a) (0x00002194 + (a)*0x00000400) +#define NVC67D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NVC67D_HEAD_SET_SW_SPARE_B(a) (0x00002198 + (a)*0x00000400) +#define NVC67D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NVC67D_HEAD_SET_SW_SPARE_C(a) (0x0000219C + (a)*0x00000400) +#define NVC67D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NVC67D_HEAD_SET_SW_SPARE_D(a) (0x000021A0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NVC67D_HEAD_SET_DISPLAY_RATE(a) (0x000021A8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DISPLAY_RATE_RUN_MODE 0:0 +#define NVC67D_HEAD_SET_DISPLAY_RATE_RUN_MODE_CONTINUOUS (0x00000000) +#define NVC67D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT (0x00000001) +#define NVC67D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL 25:4 +#define NVC67D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH 2:2 +#define NVC67D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE(a,b) (0x000021AC + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE_HANDLE 31:0 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL(a,b) (0x000021CC + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE 14:14 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE (0x00000000) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE 10:10 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_ONE_TIME (0x00000000) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_CONTINUOUS (0x00000001) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RASTER_LINE 30:16 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_VALUE(a,b) (0x000021EC + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_VALUE 31:0 +#define NVC67D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(a) (0x00002214 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC67D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC67D_HEAD_SET_MIN_FRAME_IDLE(a) (0x00002218 + (a)*0x00000400) +#define NVC67D_HEAD_SET_MIN_FRAME_IDLE_LEADING_RASTER_LINES 14:0 +#define NVC67D_HEAD_SET_MIN_FRAME_IDLE_TRAILING_RASTER_LINES 30:16 +#define NVC67D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(a) (0x00002220 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_ALPHA 7:0 +#define NVC67D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_RED 31:16 +#define NVC67D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(a) (0x00002224 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_GREEN 15:0 +#define NVC67D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_BLUE 31:16 +#define NVC67D_HEAD_SET_CURSOR_COLOR_NORM_SCALE(a) (0x00002228 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CURSOR_COLOR_NORM_SCALE_VALUE 15:0 +#define NVC67D_HEAD_SET_XOR_BLEND_FACTOR(a) (0x0000222C + (a)*0x00000400) +#define NVC67D_HEAD_SET_XOR_BLEND_FACTOR_LOG2PEAK_LUMINANCE 3:0 +#define NVC67D_HEAD_SET_XOR_BLEND_FACTOR_S1 16:4 +#define NVC67D_HEAD_SET_XOR_BLEND_FACTOR_S2 30:18 +#define NVC67D_HEAD_SET_CLAMP_RANGE_GREEN(a) (0x00002238 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CLAMP_RANGE_GREEN_LOW 11:0 +#define NVC67D_HEAD_SET_CLAMP_RANGE_GREEN_HIGH 27:16 +#define NVC67D_HEAD_SET_CLAMP_RANGE_RED_BLUE(a) (0x0000223C + (a)*0x00000400) +#define NVC67D_HEAD_SET_CLAMP_RANGE_RED_BLUE_LOW 11:0 +#define NVC67D_HEAD_SET_CLAMP_RANGE_RED_BLUE_HIGH 27:16 +#define NVC67D_HEAD_SET_OCSC0CONTROL(a) (0x00002240 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_OCSC0CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_OCSC0CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C00(a) (0x00002244 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C00_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C01(a) (0x00002248 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C01_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C02(a) (0x0000224C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C02_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C03(a) (0x00002250 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C03_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C10(a) (0x00002254 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C10_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C11(a) (0x00002258 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C11_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C12(a) (0x0000225C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C12_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C13(a) (0x00002260 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C13_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C20(a) (0x00002264 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C20_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C21(a) (0x00002268 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C21_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C22(a) (0x0000226C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C22_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C23(a) (0x00002270 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C23_VALUE 20:0 +#define NVC67D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0 +#define NVC67D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1 +#define NVC67D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MODE 3:2 +#define NVC67D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC67D_HEAD_SET_OLUT_CONTROL_SIZE 18:8 +#define NVC67D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0 +#define NVC67D_HEAD_SET_CONTEXT_DMA_OLUT(a) (0x00002288 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTEXT_DMA_OLUT_HANDLE 31:0 +#define NVC67D_HEAD_SET_OFFSET_OLUT(a) (0x0000228C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OFFSET_OLUT_ORIGIN 31:0 +#define NVC67D_HEAD_SET_OCSC1CONTROL(a) (0x0000229C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_OCSC1CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_OCSC1CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C00(a) (0x000022A0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C00_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C01(a) (0x000022A4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C01_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C02(a) (0x000022A8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C02_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C03(a) (0x000022AC + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C03_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C10(a) (0x000022B0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C10_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C11(a) (0x000022B4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C11_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C12(a) (0x000022B8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C12_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C13(a) (0x000022BC + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C13_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C20(a) (0x000022C0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C20_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C21(a) (0x000022C4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C21_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C22(a) (0x000022C8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C22_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C23(a) (0x000022CC + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C23_VALUE 20:0 +#define NVC67D_HEAD_SET_TILE_POSITION(a) (0x000022D0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_TILE_POSITION_X 2:0 +#define NVC67D_HEAD_SET_TILE_POSITION_Y 6:4 +#define NVC67D_HEAD_SET_DSC_CONTROL(a) (0x000022D4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_DSC_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_MODE 2:1 +#define NVC67D_HEAD_SET_DSC_CONTROL_MODE_SINGLE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_MODE_DUAL (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_MODE_QUAD (0x00000002) +#define NVC67D_HEAD_SET_DSC_CONTROL_AUTO_RESET 3:3 +#define NVC67D_HEAD_SET_DSC_CONTROL_AUTO_RESET_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_AUTO_RESET_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION 4:4 +#define NVC67D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET 5:5 +#define NVC67D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_FALSE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_TRUE (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_FLATNESS_DET_THRESH 15:6 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL(a) (0x000022D8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_LOCATION 1:1 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_SIZE 9:2 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY 10:10 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC67D_HEAD_SET_DSC_PPS_HEAD(a) (0x000022DC + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_HEAD_BYTE0 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_HEAD_BYTE1 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_HEAD_BYTE2 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_HEAD_BYTE3 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0(a) (0x000022E0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MINOR 3:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MAJOR 7:4 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_PPS_IDENTIFIER 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_RESERVED 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_LINEBUF_DEPTH 27:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_BITS_PER_COMPONENT 31:28 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1(a) (0x000022E4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_HIGH 1:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_VBR_ENABLE 2:2 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_SIMPLE422 3:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_CONVERT_RGB 4:4 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_BLOCK_PRED_ENABLE 5:5 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_RESERVED 7:6 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA2(a) (0x000022E8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA3(a) (0x000022EC + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4(a) (0x000022F0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_HIGH 1:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_RESERVED 7:2 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5(a) (0x000022F4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_RESERVED0 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_INITIAL_SCALE_VALUE 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_RESERVED1 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6(a) (0x000022F8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_HIGH 3:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_RESERVED0 7:4 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_RESERVED1 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_FIRST_LINE_BPG_OFFSET 28:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_RESERVED2 31:29 +#define NVC67D_HEAD_SET_DSC_PPS_DATA7(a) (0x000022FC + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA8(a) (0x00002300 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9(a) (0x00002304 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MIN_QP 4:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_RESERVED0 7:5 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MAX_QP 12:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_RESERVED1 15:13 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10(a) (0x00002308 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_EDGE_FACTOR 3:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RESERVED0 7:4 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT0 12:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RESERVED1 15:13 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT1 20:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RESERVED2 23:21 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_LO 27:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_HI 31:28 +#define NVC67D_HEAD_SET_DSC_PPS_DATA11(a) (0x0000230C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH0 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH1 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH2 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH3 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA12(a) (0x00002310 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH4 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH5 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH6 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH7 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA13(a) (0x00002314 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH8 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH9 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH10 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH11 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14(a) (0x00002318 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH12 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH13 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_HIGH0 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MIN_QP0 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_BPG_OFFSET0 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_LOW0 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15(a) (0x0000231C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH1 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP1 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET1 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW1 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH2 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP2 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET2 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW2 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16(a) (0x00002320 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH3 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP3 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET3 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW3 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH4 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP4 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET4 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW4 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17(a) (0x00002324 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH5 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP5 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET5 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW5 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH6 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP6 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET6 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW6 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18(a) (0x00002328 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH7 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP7 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET7 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW7 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH8 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP8 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET8 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW8 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19(a) (0x0000232C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH9 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP9 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET9 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW9 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH10 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP10 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET10 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW10 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20(a) (0x00002330 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH11 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP11 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET11 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW11 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH12 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP12 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET12 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW12 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21(a) (0x00002334 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH13 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP13 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET13 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW13 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH14 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP14 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET14 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW14 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22(a) (0x00002338 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_NATIVE422 0:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_NATIVE420 1:1 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_RESERVED0 7:2 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_SECOND_LINE_BPG_OFFSET 12:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_RESERVED1 15:13 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSET_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSETLOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA23(a) (0x0000233C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA23_RESERVED 31:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA24(a) (0x00002340 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA24_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA25(a) (0x00002344 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA25_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA26(a) (0x00002348 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA26_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA27(a) (0x0000234C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA27_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA28(a) (0x00002350 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA28_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA29(a) (0x00002354 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA29_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA30(a) (0x00002358 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA30_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA31(a) (0x0000235C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA31_RESERVED 31:0 +#define NVC67D_HEAD_SET_RG_MERGE(a) (0x00002360 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RG_MERGE_MODE 1:0 +#define NVC67D_HEAD_SET_RG_MERGE_MODE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_RG_MERGE_MODE_SETUP (0x00000001) +#define NVC67D_HEAD_SET_RG_MERGE_MODE_MASTER (0x00000002) +#define NVC67D_HEAD_SET_RG_MERGE_MODE_SLAVE (0x00000003) +#define NVC67D_HEAD_SET_RASTER_HBLANK_DELAY(a) (0x00002364 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_START 15:0 +#define NVC67D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_END 31:16 +#define NVC67D_HEAD_SET_HDMI_DSC_HCACTIVE(a) (0x00002368 + (a)*0x00000400) +#define NVC67D_HEAD_SET_HDMI_DSC_HCACTIVE_BYTES 15:0 +#define NVC67D_HEAD_SET_HDMI_DSC_HCACTIVE_TRI_BYTES 31:16 +#define NVC67D_HEAD_SET_HDMI_DSC_HCBLANK(a) (0x0000236C + (a)*0x00000400) +#define NVC67D_HEAD_SET_HDMI_DSC_HCBLANK_WIDTH 15:0 +#define NVC67D_HEAD_SW_RESERVED(a,b) (0x00002370 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SW_RESERVED_VALUE 31:0 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI(a,b) (0x00002380 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI_VALUE 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC67d_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67e.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67e.h new file mode 100644 index 0000000..516e6ea --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67e.h @@ -0,0 +1,700 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + + +#ifndef _clC67e_h_ +#define _clC67e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC67E_WINDOW_CHANNEL_DMA (0x0000C67E) + +// dma opcode instructions +#define NVC67E_DMA +#define NVC67E_DMA_OPCODE 31:29 +#define NVC67E_DMA_OPCODE_METHOD 0x00000000 +#define NVC67E_DMA_OPCODE_JUMP 0x00000001 +#define NVC67E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC67E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC67E_DMA_METHOD_COUNT 27:18 +#define NVC67E_DMA_METHOD_OFFSET 13:2 +#define NVC67E_DMA_DATA 31:0 +#define NVC67E_DMA_DATA_NOP 0x00000000 +#define NVC67E_DMA_JUMP_OFFSET 11:2 +#define NVC67E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC67E_PUT (0x00000000) +#define NVC67E_PUT_PTR 9:0 +#define NVC67E_GET (0x00000004) +#define NVC67E_GET_PTR 9:0 +#define NVC67E_UPDATE (0x00000200) +#define NVC67E_UPDATE_RELEASE_ELV 0:0 +#define NVC67E_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC67E_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC67E_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67E_UPDATE_INTERLOCK_WITH_WIN_IMM 12:12 +#define NVC67E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE (0x00000000) +#define NVC67E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE (0x00000001) +#define NVC67E_SET_SEMAPHORE_ACQUIRE_HI (0x00000204) +#define NVC67E_SET_SEMAPHORE_ACQUIRE_HI_VALUE 31:0 +#define NVC67E_GET_LINE (0x00000208) +#define NVC67E_GET_LINE_LINE 15:0 +#define NVC67E_SET_SEMAPHORE_CONTROL (0x0000020C) +#define NVC67E_SET_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC67E_SET_SEMAPHORE_CONTROL_SKIP_ACQ 11:11 +#define NVC67E_SET_SEMAPHORE_CONTROL_SKIP_ACQ_FALSE (0x00000000) +#define NVC67E_SET_SEMAPHORE_CONTROL_SKIP_ACQ_TRUE (0x00000001) +#define NVC67E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC67E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC67E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC67E_SET_SEMAPHORE_CONTROL_ACQ_MODE 13:12 +#define NVC67E_SET_SEMAPHORE_CONTROL_ACQ_MODE_EQ (0x00000000) +#define NVC67E_SET_SEMAPHORE_CONTROL_ACQ_MODE_CGEQ (0x00000001) +#define NVC67E_SET_SEMAPHORE_CONTROL_ACQ_MODE_STRICT_GEQ (0x00000002) +#define NVC67E_SET_SEMAPHORE_CONTROL_REL_MODE 14:14 +#define NVC67E_SET_SEMAPHORE_CONTROL_REL_MODE_WRITE (0x00000000) +#define NVC67E_SET_SEMAPHORE_CONTROL_REL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC67E_SET_SEMAPHORE_ACQUIRE (0x00000210) +#define NVC67E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NVC67E_SET_SEMAPHORE_RELEASE (0x00000214) +#define NVC67E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NVC67E_SET_CONTEXT_DMA_SEMAPHORE (0x00000218) +#define NVC67E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NVC67E_SET_CONTEXT_DMA_NOTIFIER (0x0000021C) +#define NVC67E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC67E_SET_NOTIFIER_CONTROL (0x00000220) +#define NVC67E_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC67E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC67E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC67E_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC67E_SET_SIZE (0x00000224) +#define NVC67E_SET_SIZE_WIDTH 15:0 +#define NVC67E_SET_SIZE_HEIGHT 31:16 +#define NVC67E_SET_STORAGE (0x00000228) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC67E_SET_PARAMS (0x0000022C) +#define NVC67E_SET_PARAMS_FORMAT 7:0 +#define NVC67E_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NVC67E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F) +#define NVC67E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NVC67E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC67E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E) +#define NVC67E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC67E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6) +#define NVC67E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NVC67E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9) +#define NVC67E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NVC67E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NVC67E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NVC67E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NVC67E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NVC67E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028) +#define NVC67E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029) +#define NVC67E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035) +#define NVC67E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036) +#define NVC67E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038) +#define NVC67E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A) +#define NVC67E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B) +#define NVC67E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055) +#define NVC67E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056) +#define NVC67E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058) +#define NVC67E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075) +#define NVC67E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076) +#define NVC67E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078) +#define NVC67E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18 +#define NVC67E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000) +#define NVC67E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001) +#define NVC67E_SET_PARAMS_SWAP_UV 19:19 +#define NVC67E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000) +#define NVC67E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001) +#define NVC67E_SET_PARAMS_FMT_ROUNDING_MODE 22:22 +#define NVC67E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000) +#define NVC67E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001) +#define NVC67E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004) +#define NVC67E_SET_PLANAR_STORAGE_PITCH 12:0 +#define NVC67E_SET_SEMAPHORE_RELEASE_HI (0x0000023C) +#define NVC67E_SET_SEMAPHORE_RELEASE_HI_VALUE 31:0 +#define NVC67E_SET_CONTEXT_DMA_ISO(b) (0x00000240 + (b)*0x00000004) +#define NVC67E_SET_CONTEXT_DMA_ISO_HANDLE 31:0 +#define NVC67E_SET_OFFSET(b) (0x00000260 + (b)*0x00000004) +#define NVC67E_SET_OFFSET_ORIGIN 31:0 +#define NVC67E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004) +#define NVC67E_SET_POINT_IN_X 15:0 +#define NVC67E_SET_POINT_IN_Y 31:16 +#define NVC67E_SET_SIZE_IN (0x00000298) +#define NVC67E_SET_SIZE_IN_WIDTH 15:0 +#define NVC67E_SET_SIZE_IN_HEIGHT 31:16 +#define NVC67E_SET_SIZE_OUT (0x000002A4) +#define NVC67E_SET_SIZE_OUT_WIDTH 15:0 +#define NVC67E_SET_SIZE_OUT_HEIGHT 31:16 +#define NVC67E_SET_CONTROL_INPUT_SCALER (0x000002A8) +#define NVC67E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC67E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC67E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC67E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC67E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC67E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC67E_SET_INPUT_SCALER_COEFF_VALUE (0x000002AC) +#define NVC67E_SET_INPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC67E_SET_INPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC67E_SET_COMPOSITION_CONTROL (0x000002EC) +#define NVC67E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0 +#define NVC67E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000) +#define NVC67E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001) +#define NVC67E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002) +#define NVC67E_SET_COMPOSITION_CONTROL_DEPTH 11:4 +#define NVC67E_SET_COMPOSITION_CONTROL_BYPASS 16:16 +#define NVC67E_SET_COMPOSITION_CONTROL_BYPASS_DISABLE (0x00000000) +#define NVC67E_SET_COMPOSITION_CONTROL_BYPASS_ENABLE (0x00000001) +#define NVC67E_SET_COMPOSITION_CONSTANT_ALPHA (0x000002F0) +#define NVC67E_SET_COMPOSITION_CONSTANT_ALPHA_K1 7:0 +#define NVC67E_SET_COMPOSITION_CONSTANT_ALPHA_K2 15:8 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT (0x000002F4) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67E_SET_KEY_ALPHA (0x000002F8) +#define NVC67E_SET_KEY_ALPHA_MIN 15:0 +#define NVC67E_SET_KEY_ALPHA_MAX 31:16 +#define NVC67E_SET_KEY_RED_CR (0x000002FC) +#define NVC67E_SET_KEY_RED_CR_MIN 15:0 +#define NVC67E_SET_KEY_RED_CR_MAX 31:16 +#define NVC67E_SET_KEY_GREEN_Y (0x00000300) +#define NVC67E_SET_KEY_GREEN_Y_MIN 15:0 +#define NVC67E_SET_KEY_GREEN_Y_MAX 31:16 +#define NVC67E_SET_KEY_BLUE_CB (0x00000304) +#define NVC67E_SET_KEY_BLUE_CB_MIN 15:0 +#define NVC67E_SET_KEY_BLUE_CB_MAX 31:16 +#define NVC67E_SET_PRESENT_CONTROL (0x00000308) +#define NVC67E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NVC67E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4 +#define NVC67E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NVC67E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NVC67E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8 +#define NVC67E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NVC67E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NVC67E_SET_PRESENT_CONTROL_STEREO_MODE 13:12 +#define NVC67E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000) +#define NVC67E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001) +#define NVC67E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002) +#define NVC67E_SET_ACQ_SEMAPHORE_VALUE_HI (0x0000030C) +#define NVC67E_SET_ACQ_SEMAPHORE_VALUE_HI_VALUE 31:0 +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL (0x00000330) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE 13:12 +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_EQ (0x00000000) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_CGEQ (0x00000001) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_STRICT_GEQ (0x00000002) +#define NVC67E_SET_ACQ_SEMAPHORE_VALUE (0x00000334) +#define NVC67E_SET_ACQ_SEMAPHORE_VALUE_VALUE 31:0 +#define NVC67E_SET_CONTEXT_DMA_ACQ_SEMAPHORE (0x00000338) +#define NVC67E_SET_CONTEXT_DMA_ACQ_SEMAPHORE_HANDLE 31:0 +#define NVC67E_SET_SCAN_DIRECTION (0x0000033C) +#define NVC67E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION 0:0 +#define NVC67E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION_FROM_LEFT (0x00000000) +#define NVC67E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION_FROM_RIGHT (0x00000001) +#define NVC67E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION 1:1 +#define NVC67E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION_FROM_TOP (0x00000000) +#define NVC67E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION_FROM_BOTTOM (0x00000001) +#define NVC67E_SET_SCAN_DIRECTION_COLUMN_ORDER 2:2 +#define NVC67E_SET_SCAN_DIRECTION_COLUMN_ORDER_FALSE (0x00000000) +#define NVC67E_SET_SCAN_DIRECTION_COLUMN_ORDER_TRUE (0x00000001) +#define NVC67E_SET_TIMESTAMP_ORIGIN_LO (0x00000340) +#define NVC67E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NVC67E_SET_TIMESTAMP_ORIGIN_HI (0x00000344) +#define NVC67E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NVC67E_SET_UPDATE_TIMESTAMP_LO (0x00000348) +#define NVC67E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NVC67E_SET_UPDATE_TIMESTAMP_HI (0x0000034C) +#define NVC67E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NVC67E_SET_INTERLOCK_FLAGS (0x00000370) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 0:0 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+1):((i)+1) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 1:1 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 2:2 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 3:3 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 4:4 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 5:5 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 6:6 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 7:7 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 8:8 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS (0x00000374) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL (0x00000398) +#define NVC67E_SET_EXT_PACKET_CONTROL_ENABLE 0:0 +#define NVC67E_SET_EXT_PACKET_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_EXT_PACKET_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL_LOCATION 4:4 +#define NVC67E_SET_EXT_PACKET_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC67E_SET_EXT_PACKET_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL_FREQUENCY 8:8 +#define NVC67E_SET_EXT_PACKET_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC67E_SET_EXT_PACKET_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE 12:12 +#define NVC67E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_DISABLE (0x00000000) +#define NVC67E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_ENABLE (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL_SIZE 27:16 +#define NVC67E_SET_EXT_PACKET_DATA (0x0000039C) +#define NVC67E_SET_EXT_PACKET_DATA_DB0 7:0 +#define NVC67E_SET_EXT_PACKET_DATA_DB1 15:8 +#define NVC67E_SET_EXT_PACKET_DATA_DB2 23:16 +#define NVC67E_SET_EXT_PACKET_DATA_DB3 31:24 +#define NVC67E_SET_FMT_COEFFICIENT_C00 (0x00000400) +#define NVC67E_SET_FMT_COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C01 (0x00000404) +#define NVC67E_SET_FMT_COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C02 (0x00000408) +#define NVC67E_SET_FMT_COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C03 (0x0000040C) +#define NVC67E_SET_FMT_COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C10 (0x00000410) +#define NVC67E_SET_FMT_COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C11 (0x00000414) +#define NVC67E_SET_FMT_COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C12 (0x00000418) +#define NVC67E_SET_FMT_COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C13 (0x0000041C) +#define NVC67E_SET_FMT_COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C20 (0x00000420) +#define NVC67E_SET_FMT_COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C21 (0x00000424) +#define NVC67E_SET_FMT_COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C22 (0x00000428) +#define NVC67E_SET_FMT_COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C23 (0x0000042C) +#define NVC67E_SET_FMT_COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_ILUT_CONTROL (0x00000440) +#define NVC67E_SET_ILUT_CONTROL_INTERPOLATE 0:0 +#define NVC67E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67E_SET_ILUT_CONTROL_MIRROR 1:1 +#define NVC67E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC67E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC67E_SET_ILUT_CONTROL_MODE 3:2 +#define NVC67E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC67E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC67E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC67E_SET_ILUT_CONTROL_SIZE 18:8 +#define NVC67E_SET_CONTEXT_DMA_ILUT (0x00000444) +#define NVC67E_SET_CONTEXT_DMA_ILUT_HANDLE 31:0 +#define NVC67E_SET_OFFSET_ILUT (0x00000448) +#define NVC67E_SET_OFFSET_ILUT_ORIGIN 31:0 +#define NVC67E_SET_CSC00CONTROL (0x0000045C) +#define NVC67E_SET_CSC00CONTROL_ENABLE 0:0 +#define NVC67E_SET_CSC00CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC00CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC00COEFFICIENT_C00 (0x00000460) +#define NVC67E_SET_CSC00COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C01 (0x00000464) +#define NVC67E_SET_CSC00COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C02 (0x00000468) +#define NVC67E_SET_CSC00COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C03 (0x0000046C) +#define NVC67E_SET_CSC00COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C10 (0x00000470) +#define NVC67E_SET_CSC00COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C11 (0x00000474) +#define NVC67E_SET_CSC00COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C12 (0x00000478) +#define NVC67E_SET_CSC00COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C13 (0x0000047C) +#define NVC67E_SET_CSC00COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C20 (0x00000480) +#define NVC67E_SET_CSC00COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C21 (0x00000484) +#define NVC67E_SET_CSC00COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C22 (0x00000488) +#define NVC67E_SET_CSC00COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C23 (0x0000048C) +#define NVC67E_SET_CSC00COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_CSC0LUT_CONTROL (0x000004A0) +#define NVC67E_SET_CSC0LUT_CONTROL_INTERPOLATE 0:0 +#define NVC67E_SET_CSC0LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67E_SET_CSC0LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67E_SET_CSC0LUT_CONTROL_MIRROR 1:1 +#define NVC67E_SET_CSC0LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC67E_SET_CSC0LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC67E_SET_CSC0LUT_CONTROL_ENABLE 4:4 +#define NVC67E_SET_CSC0LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC0LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC0LUT_SEGMENT_SIZE (0x000004A4) +#define NVC67E_SET_CSC0LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC67E_SET_CSC0LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC67E_SET_CSC0LUT_ENTRY (0x000004A8) +#define NVC67E_SET_CSC0LUT_ENTRY_IDX 10:0 +#define NVC67E_SET_CSC0LUT_ENTRY_VALUE 31:16 +#define NVC67E_SET_CSC01CONTROL (0x000004BC) +#define NVC67E_SET_CSC01CONTROL_ENABLE 0:0 +#define NVC67E_SET_CSC01CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC01CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC01COEFFICIENT_C00 (0x000004C0) +#define NVC67E_SET_CSC01COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C01 (0x000004C4) +#define NVC67E_SET_CSC01COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C02 (0x000004C8) +#define NVC67E_SET_CSC01COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C03 (0x000004CC) +#define NVC67E_SET_CSC01COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C10 (0x000004D0) +#define NVC67E_SET_CSC01COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C11 (0x000004D4) +#define NVC67E_SET_CSC01COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C12 (0x000004D8) +#define NVC67E_SET_CSC01COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C13 (0x000004DC) +#define NVC67E_SET_CSC01COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C20 (0x000004E0) +#define NVC67E_SET_CSC01COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C21 (0x000004E4) +#define NVC67E_SET_CSC01COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C22 (0x000004E8) +#define NVC67E_SET_CSC01COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C23 (0x000004EC) +#define NVC67E_SET_CSC01COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_TMO_CONTROL (0x00000500) +#define NVC67E_SET_TMO_CONTROL_INTERPOLATE 0:0 +#define NVC67E_SET_TMO_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67E_SET_TMO_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67E_SET_TMO_CONTROL_SAT_MODE 3:2 +#define NVC67E_SET_TMO_CONTROL_SIZE 18:8 +#define NVC67E_SET_TMO_LOW_INTENSITY_ZONE (0x00000508) +#define NVC67E_SET_TMO_LOW_INTENSITY_ZONE_END 29:16 +#define NVC67E_SET_TMO_LOW_INTENSITY_VALUE (0x0000050C) +#define NVC67E_SET_TMO_LOW_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC67E_SET_TMO_LOW_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC67E_SET_TMO_LOW_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_ZONE (0x00000510) +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_ZONE_START 13:0 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_ZONE_END 29:16 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_VALUE (0x00000514) +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC67E_SET_TMO_HIGH_INTENSITY_ZONE (0x00000518) +#define NVC67E_SET_TMO_HIGH_INTENSITY_ZONE_START 13:0 +#define NVC67E_SET_TMO_HIGH_INTENSITY_VALUE (0x0000051C) +#define NVC67E_SET_TMO_HIGH_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC67E_SET_TMO_HIGH_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC67E_SET_TMO_HIGH_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC67E_SET_CONTEXT_DMA_TMO_LUT (0x00000528) +#define NVC67E_SET_CONTEXT_DMA_TMO_LUT_HANDLE 31:0 +#define NVC67E_SET_OFFSET_TMO_LUT (0x0000052C) +#define NVC67E_SET_OFFSET_TMO_LUT_ORIGIN 31:0 +#define NVC67E_SET_CSC10CONTROL (0x0000053C) +#define NVC67E_SET_CSC10CONTROL_ENABLE 0:0 +#define NVC67E_SET_CSC10CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC10CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC10COEFFICIENT_C00 (0x00000540) +#define NVC67E_SET_CSC10COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C01 (0x00000544) +#define NVC67E_SET_CSC10COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C02 (0x00000548) +#define NVC67E_SET_CSC10COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C03 (0x0000054C) +#define NVC67E_SET_CSC10COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C10 (0x00000550) +#define NVC67E_SET_CSC10COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C11 (0x00000554) +#define NVC67E_SET_CSC10COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C12 (0x00000558) +#define NVC67E_SET_CSC10COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C13 (0x0000055C) +#define NVC67E_SET_CSC10COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C20 (0x00000560) +#define NVC67E_SET_CSC10COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C21 (0x00000564) +#define NVC67E_SET_CSC10COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C22 (0x00000568) +#define NVC67E_SET_CSC10COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C23 (0x0000056C) +#define NVC67E_SET_CSC10COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_CSC1LUT_CONTROL (0x00000580) +#define NVC67E_SET_CSC1LUT_CONTROL_INTERPOLATE 0:0 +#define NVC67E_SET_CSC1LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67E_SET_CSC1LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67E_SET_CSC1LUT_CONTROL_MIRROR 1:1 +#define NVC67E_SET_CSC1LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC67E_SET_CSC1LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC67E_SET_CSC1LUT_CONTROL_ENABLE 4:4 +#define NVC67E_SET_CSC1LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC1LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC1LUT_SEGMENT_SIZE (0x00000584) +#define NVC67E_SET_CSC1LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC67E_SET_CSC1LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC67E_SET_CSC1LUT_ENTRY (0x00000588) +#define NVC67E_SET_CSC1LUT_ENTRY_IDX 10:0 +#define NVC67E_SET_CSC1LUT_ENTRY_VALUE 31:16 +#define NVC67E_SET_CSC11CONTROL (0x0000059C) +#define NVC67E_SET_CSC11CONTROL_ENABLE 0:0 +#define NVC67E_SET_CSC11CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC11CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC11COEFFICIENT_C00 (0x000005A0) +#define NVC67E_SET_CSC11COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C01 (0x000005A4) +#define NVC67E_SET_CSC11COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C02 (0x000005A8) +#define NVC67E_SET_CSC11COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C03 (0x000005AC) +#define NVC67E_SET_CSC11COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C10 (0x000005B0) +#define NVC67E_SET_CSC11COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C11 (0x000005B4) +#define NVC67E_SET_CSC11COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C12 (0x000005B8) +#define NVC67E_SET_CSC11COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C13 (0x000005BC) +#define NVC67E_SET_CSC11COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C20 (0x000005C0) +#define NVC67E_SET_CSC11COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C21 (0x000005C4) +#define NVC67E_SET_CSC11COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C22 (0x000005C8) +#define NVC67E_SET_CSC11COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C23 (0x000005CC) +#define NVC67E_SET_CSC11COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_CLAMP_RANGE (0x000005D0) +#define NVC67E_SET_CLAMP_RANGE_LOW 15:0 +#define NVC67E_SET_CLAMP_RANGE_HIGH 31:16 +#define NVC67E_SW_RESERVED(b) (0x000005D4 + (b)*0x00000004) +#define NVC67E_SW_RESERVED_VALUE 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC67e_h diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc770.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc770.h new file mode 100644 index 0000000..53851df --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc770.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc770_h_ +#define _clc770_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NVC770_DISPLAY (0x0000C770) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numDsis; // Number of DSIs in this chip/display +} NVC770_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc770_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc77f.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc77f.h new file mode 100644 index 0000000..3651a58 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc77f.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC77F_h_ +#define _clC77F_h_ + +// +// This class provides functional support of Display ANYChannel, Display Contextdmas bound to +// ANYChannel can be used on any other display window channels. +// +#define NVC77F_ANY_CHANNEL_DMA (0x0000C77F) + +#endif // _clC77F_h_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/cpuopsys.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/cpuopsys.h new file mode 100644 index 0000000..90a2626 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/cpuopsys.h @@ -0,0 +1,419 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! \brief + * Define compile time symbols for CPU type and operating system type. + * This file should only contain preprocessor commands so that + * there are no dependencies on other files. + * + * cpuopsys.h + * + * Copyright (c) 2001, Nvidia Corporation. All rights reserved. + */ + +/*! + * Uniform names are defined for compile time options to distinguish + * CPU types and Operating systems. + * Distinctions between CPU and OpSys should be orthogonal. + * + * These uniform names have initially been defined by keying off the + * makefile/build names defined for builds in the OpenGL group. + * Getting the uniform names defined for other builds may require + * different qualifications. + * + * The file is placed here to allow for the possibility of all driver + * components using the same naming convention for conditional compilation. + */ + +#ifndef CPUOPSYS_H +#define CPUOPSYS_H + +/*****************************************************************************/ +/* Define all OS/CPU-Chip related symbols */ + +/* ***** WINDOWS variations */ +#if defined(_WIN32) || defined(_WIN16) +# define NV_WINDOWS + +# if defined(_WIN32_WINNT) +# define NV_WINDOWS_NT +# elif defined(_WIN32_WCE) +# define NV_WINDOWS_CE +# else +# define NV_WINDOWS_9X +# endif +#endif /* _WIN32 || defined(_WIN16) */ + +/* ***** Unix variations */ +#if defined(__linux__) && !defined(NV_LINUX) && !defined(NV_VMWARE) +# define NV_LINUX +#endif /* defined(__linux__) */ + +#if defined(__VMWARE__) && !defined(NV_VMWARE) +# define NV_VMWARE +#endif /* defined(__VMWARE__) */ + +/* SunOS + gcc */ +#if defined(__sun__) && defined(__svr4__) && !defined(NV_SUNOS) +# define NV_SUNOS +#endif /* defined(__sun__) && defined(__svr4__) */ + +/* SunOS + Sun Compiler (named SunPro, Studio or Forte) */ +#if defined(__SUNPRO_C) || defined(__SUNPRO_CC) +# define NV_SUNPRO_C +# define NV_SUNOS +#endif /* defined(_SUNPRO_C) || defined(__SUNPRO_CC) */ + +#if defined(__FreeBSD__) && !defined(NV_BSD) +# define NV_BSD +#endif /* defined(__FreeBSD__) */ + +/* XXXar don't define NV_UNIX on MacOSX or vxworks or QNX */ +#if (defined(__unix__) || defined(__unix) || defined(__INTEGRITY) ) && !defined(nvmacosx) && !defined(vxworks) && !defined(NV_UNIX) && !defined(__QNX__) && !defined(__QNXNTO__)/* XXX until removed from Makefiles */ +# define NV_UNIX +#endif /* defined(__unix__) */ + +#if (defined(__QNX__) || defined(__QNXNTO__)) && !defined(NV_QNX) +# define NV_QNX +#endif + +#if (defined(__ANDROID__) || defined(ANDROID)) && !defined(NV_ANDROID) +# define NV_ANDROID +#endif + +#if defined(DceCore) && !defined(NV_DCECORE) +# define NV_DCECORE +#endif + +/* ***** Apple variations */ +#if defined(macintosh) || defined(__APPLE__) +# define NV_MACINTOSH +# if defined(__MACH__) +# define NV_MACINTOSH_OSX +# else +# define NV_MACINTOSH_OS9 +# endif +# if defined(__LP64__) +# define NV_MACINTOSH_64 +# endif +#endif /* defined(macintosh) */ + +/* ***** VxWorks */ +/* Tornado 2.21 is gcc 2.96 and #defines __vxworks. */ +/* Tornado 2.02 is gcc 2.7.2 and doesn't define any OS symbol, so we rely on */ +/* the build system #defining vxworks. */ +#if defined(__vxworks) || defined(vxworks) +# define NV_VXWORKS +#endif + +/* ***** Integrity OS */ +#if defined(__INTEGRITY) +# if !defined(NV_INTEGRITY) +# define NV_INTEGRITY +# endif +#endif + +/* ***** Processor type variations */ +/* Note: The prefix NV_CPU_* is taken by Nvcm.h */ + +#if ((defined(_M_IX86) || defined(__i386__) || defined(__i386)) && !defined(NVCPU_X86)) /* XXX until removed from Makefiles */ +/* _M_IX86 for windows, __i386__ for Linux (or any x86 using gcc) */ +/* __i386 for Studio compiler on Solaris x86 */ +# define NVCPU_X86 /* any IA32 machine (not x86-64) */ +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(NV_LINUX) && defined(__ia64__) +# define NVCPU_IA64_LINUX /* any IA64 for Linux opsys */ +#endif +#if defined(NVCPU_IA64_WINDOWS) || defined(NVCPU_IA64_LINUX) || defined(IA64) +# define NVCPU_IA64 /* any IA64 for any opsys */ +#endif + +#if (defined(NV_MACINTOSH) && !(defined(__i386__) || defined(__x86_64__))) || defined(__PPC__) || defined(__ppc) +# if defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) +# ifndef NVCPU_PPC64LE +# define NVCPU_PPC64LE /* PPC 64-bit little endian */ +# endif +# else +# ifndef NVCPU_PPC +# define NVCPU_PPC /* any non-PPC64LE PowerPC architecture */ +# endif +# ifndef NV_BIG_ENDIAN +# define NV_BIG_ENDIAN +# endif +# endif +# define NVCPU_FAMILY_PPC +#endif + +#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) +# define NVCPU_X86_64 /* any x86-64 for any opsys */ +#endif + +#if defined(NVCPU_X86) || defined(NVCPU_X86_64) +# define NVCPU_FAMILY_X86 +#endif + +#if defined(__riscv) && (__riscv_xlen==64) +# define NVCPU_RISCV64 +# if defined(__nvriscv) +# define NVCPU_NVRISCV64 +# endif +#endif + +#if defined(__arm__) || defined(_M_ARM) +/* + * 32-bit instruction set on, e.g., ARMv7 or AArch32 execution state + * on ARMv8 + */ +# define NVCPU_ARM +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(__aarch64__) || defined(__ARM64__) || defined(_M_ARM64) +# define NVCPU_AARCH64 /* 64-bit A64 instruction set on ARMv8 */ +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(NVCPU_ARM) || defined(NVCPU_AARCH64) +# define NVCPU_FAMILY_ARM +#endif + +#if defined(__SH4__) +# ifndef NVCPU_SH4 +# define NVCPU_SH4 /* Renesas (formerly Hitachi) SH4 */ +# endif +# if defined NV_WINDOWS_CE +# define NVCPU_MIN_PAGE_SHIFT 12 +# endif +#endif + +/* For Xtensa processors */ +#if defined(__XTENSA__) +# define NVCPU_XTENSA +# if defined(__XTENSA_EB__) +# define NV_BIG_ENDIAN +# endif +#endif + + +/* + * Other flavors of CPU type should be determined at run-time. + * For example, an x86 architecture with/without SSE. + * If it can compile, then there's no need for a compile time option. + * For some current GCC limitations, these may be fixed by using the Intel + * compiler for certain files in a Linux build. + */ + +/* The minimum page size can be determined from the minimum page shift */ +#if defined(NVCPU_MIN_PAGE_SHIFT) +#define NVCPU_MIN_PAGE_SIZE (1 << NVCPU_MIN_PAGE_SHIFT) +#endif + +#if defined(NVCPU_IA64) || defined(NVCPU_X86_64) || \ + defined(NV_MACINTOSH_64) || defined(NVCPU_AARCH64) || \ + defined(NVCPU_PPC64LE) || defined(NVCPU_RISCV64) +# define NV_64_BITS /* all architectures where pointers are 64 bits */ +#else +/* we assume 32 bits. I don't see a need for NV_16_BITS. */ +#endif + +/* For verification-only features not intended to be included in normal drivers */ + +/* + * New, safer family of #define's -- these ones use 0 vs. 1 rather than + * defined/!defined. This is advantageous because if you make a typo, + * say misspelled ENDIAN: + * + * #if NVCPU_IS_BIG_ENDAIN + * + * ...some compilers can give you a warning telling you that you screwed up. + * The compiler can also give you a warning if you forget to #include + * "cpuopsys.h" in your code before the point where you try to use these + * conditionals. + * + * Also, the names have been prefixed in more cases with "CPU" or "OS" for + * increased clarity. You can tell the names apart from the old ones because + * they all use "_IS_" in the name. + * + * Finally, these can be used in "if" statements and not just in #if's. For + * example: + * + * if (NVCPU_IS_BIG_ENDIAN) x = Swap32(x); + * + * Maybe some day in the far-off future these can replace the old #define's. + */ + +#define NV_IS_MODS 0 + +#define NVOS_IS_WINDOWS 0 +#if defined(NV_WINDOWS_CE) +#define NVOS_IS_WINDOWS_CE 1 +#else +#define NVOS_IS_WINDOWS_CE 0 +#endif +#if defined(NV_LINUX) +#define NVOS_IS_LINUX 1 +#else +#define NVOS_IS_LINUX 0 +#endif +#if defined(NV_UNIX) +#define NVOS_IS_UNIX 1 +#else +#define NVOS_IS_UNIX 0 +#endif +#if defined(NV_BSD) +#define NVOS_IS_FREEBSD 1 +#else +#define NVOS_IS_FREEBSD 0 +#endif +#if defined(NV_SUNOS) +#define NVOS_IS_SOLARIS 1 +#else +#define NVOS_IS_SOLARIS 0 +#endif +#define NVOS_IS_VMWARE 0 +#if defined(NV_QNX) +#define NVOS_IS_QNX 1 +#else +#define NVOS_IS_QNX 0 +#endif +#if defined(NV_ANDROID) +#define NVOS_IS_ANDROID 1 +#else +#define NVOS_IS_ANDROID 0 +#endif +#if defined(NV_MACINTOSH) +#define NVOS_IS_MACINTOSH 1 +#else +#define NVOS_IS_MACINTOSH 0 +#endif +#if defined(NV_VXWORKS) +#define NVOS_IS_VXWORKS 1 +#else +#define NVOS_IS_VXWORKS 0 +#endif +#if defined(NV_LIBOS) +#define NVOS_IS_LIBOS 1 +#else +#define NVOS_IS_LIBOS 0 +#endif +#if defined(NV_INTEGRITY) +#define NVOS_IS_INTEGRITY 1 +#else +#define NVOS_IS_INTEGRITY 0 +#endif + +#if defined(NVCPU_X86) +#define NVCPU_IS_X86 1 +#else +#define NVCPU_IS_X86 0 +#endif +#if defined(NVCPU_RISCV64) +#define NVCPU_IS_RISCV64 1 +#else +#define NVCPU_IS_RISCV64 0 +#endif +#if defined(NVCPU_NVRISCV64) +#define NVCPU_IS_NVRISCV64 1 +#else +#define NVCPU_IS_NVRISCV64 0 +#endif +#if defined(NVCPU_IA64) +#define NVCPU_IS_IA64 1 +#else +#define NVCPU_IS_IA64 0 +#endif +#if defined(NVCPU_X86_64) +#define NVCPU_IS_X86_64 1 +#else +#define NVCPU_IS_X86_64 0 +#endif +#if defined(NVCPU_FAMILY_X86) +#define NVCPU_IS_FAMILY_X86 1 +#else +#define NVCPU_IS_FAMILY_X86 0 +#endif +#if defined(NVCPU_PPC) +#define NVCPU_IS_PPC 1 +#else +#define NVCPU_IS_PPC 0 +#endif +#if defined(NVCPU_PPC64LE) +#define NVCPU_IS_PPC64LE 1 +#else +#define NVCPU_IS_PPC64LE 0 +#endif +#if defined(NVCPU_FAMILY_PPC) +#define NVCPU_IS_FAMILY_PPC 1 +#else +#define NVCPU_IS_FAMILY_PPC 0 +#endif +#if defined(NVCPU_ARM) +#define NVCPU_IS_ARM 1 +#else +#define NVCPU_IS_ARM 0 +#endif +#if defined(NVCPU_AARCH64) +#define NVCPU_IS_AARCH64 1 +#else +#define NVCPU_IS_AARCH64 0 +#endif +#if defined(NVCPU_FAMILY_ARM) +#define NVCPU_IS_FAMILY_ARM 1 +#else +#define NVCPU_IS_FAMILY_ARM 0 +#endif +#if defined(NVCPU_SH4) +#define NVCPU_IS_SH4 1 +#else +#define NVCPU_IS_SH4 0 +#endif +#if defined(NVCPU_XTENSA) +#define NVCPU_IS_XTENSA 1 +#else +#define NVCPU_IS_XTENSA 0 +#endif +#if defined(NV_BIG_ENDIAN) +#define NVCPU_IS_BIG_ENDIAN 1 +#else +#define NVCPU_IS_BIG_ENDIAN 0 +#endif +#if defined(NV_64_BITS) +#define NVCPU_IS_64_BITS 1 +#else +#define NVCPU_IS_64_BITS 0 +#endif +#if defined(NVCPU_FAMILY_ARM) +#define NVCPU_IS_PCIE_CACHE_COHERENT 0 +#else +#define NVCPU_IS_PCIE_CACHE_COHERENT 1 +#endif +#if defined(NV_DCECORE) +#define NVOS_IS_DCECORE 1 +#else +#define NVOS_IS_DCECORE 0 +#endif +/*****************************************************************************/ + +#endif /* CPUOPSYS_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h new file mode 100644 index 0000000..00de834 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h @@ -0,0 +1,69 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV01_ROOT (client) control commands and parameters */ + +#define NV0000_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0000,NV0000_CTRL_##cat,idx) + +/* Client command categories (6bits) */ +#define NV0000_CTRL_RESERVED (0x00) +#define NV0000_CTRL_SYSTEM (0x01) +#define NV0000_CTRL_GPU (0x02) +#define NV0000_CTRL_GSYNC (0x03) +#define NV0000_CTRL_DIAG (0x04) +#define NV0000_CTRL_EVENT (0x05) +#define NV0000_CTRL_NVD (0x06) +#define NV0000_CTRL_SWINSTR (0x07) +#define NV0000_CTRL_GSPC (0x08) +#define NV0000_CTRL_PROC (0x09) +#define NV0000_CTRL_SYNC_GPU_BOOST (0x0A) +#define NV0000_CTRL_GPUACCT (0x0B) +#define NV0000_CTRL_VGPU (0x0C) +#define NV0000_CTRL_CLIENT (0x0D) + +// per-OS categories start at highest category and work backwards +#define NV0000_CTRL_OS_WINDOWS (0x3F) +#define NV0000_CTRL_OS_MACOS (0x3E) +#define NV0000_CTRL_OS_UNIX (0x3D) + + +/* + * NV0000_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_NULL (0x0) /* finn: Evaluated from "(FINN_NV01_ROOT_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl0000_base_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h new file mode 100644 index 0000000..6fdcd88 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h @@ -0,0 +1,166 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000client.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +#include "class/cl0000.h" +#include "rs_access.h" + +/* + * NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE + * + * This command may be used to query memory address space type associated with an object + * + * Parameters: + * hObject[IN] + * handle of the object to look up + * addrSpaceType[OUT] + * addrSpaceType with associated memory descriptor + * + * Possible status values are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_OBJECT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE (0xd01) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS { + NvHandle hObject; /* [in] - Handle of object to look up */ + NvU32 mapFlags; /* [in] - Flags that will be used when mapping the object */ + NvU32 addrSpaceType; /* [out] - Memory Address Space Type */ +} NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS; + +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID 0x00000000 +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM 0x00000001 +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM 0x00000002 +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_REGMEM 0x00000003 +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC 0x00000004 + + + +/* + * NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO + * + * This command may be used to query information on a handle + */ +#define NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO (0xd02) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS { + NvHandle hObject; /* [in] - Handle of object to look up */ + NvU32 index; /* [in] - Type of lookup */ + + union { + NvHandle hResult; /* [out] - Result of lookup when result is a handle type */ + NV_DECLARE_ALIGNED(NvU64 iResult, 8); /* [out] - Result of lookup when result is a integer */ + } data; +} NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS; + +#define NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_INVALID 0x00000000 +#define NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_PARENT 0x00000001 +#define NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_CLASSID 0x00000002 + +/* + * NV0000_CTRL_CMD_CLIENT_GET_ACCESS_RIGHTS + * + * This command may be used to get this client's access rights for an object + * The object to which access rights are checked does not have to be owned by + * the client calling the command, it is owned by the hClient parameter + */ +#define NV0000_CTRL_CMD_CLIENT_GET_ACCESS_RIGHTS (0xd03) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS { + NvHandle hObject; /* [in] - Handle of object to look up */ + NvHandle hClient; /* [in] - Handle of client which owns hObject */ + RS_ACCESS_MASK maskResult; /* [out] - Result of lookup */ +} NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS; + +/* + * NV0000_CTRL_CMD_CLIENT_SET_INHERITED_SHARE_POLICY + * + * DEPRECATED: Calls NV0000_CTRL_CMD_CLIENT_SHARE_OBJECT with hObject=hClient + * + * This command will modify a client's inherited share policy list + * The policy is applied in the same way that NvRmShare applies policies, + * except to the client's inherited policy list instead of an object's policy list + */ +#define NV0000_CTRL_CMD_CLIENT_SET_INHERITED_SHARE_POLICY (0xd04) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS { + RS_SHARE_POLICY sharePolicy; /* [in] - Share Policy to apply */ +} NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS; + +/* + * NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE + * + * This command may be used to get a handle of a child of a given type + */ +#define NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE (0xd05) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS { + NvHandle hParent; /* [in] - Handle of parent object */ + NvU32 classId; /* [in] - Class ID of the child object */ + NvHandle hObject; /* [out] - Handle of the child object (0 if not found) */ +} NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS; + +/* + * NV0000_CTRL_CMD_CLIENT_SHARE_OBJECT + * + * This command is meant to imitate the NvRmShare API. + * Applies a share policy to an object, which should be owned by the caller's client. + * The policy is applied in the same way that NvRmShare applies policies. + * + * This ctrl command is only meant to be used in older branches. For releases after R450, + * use NvRmShare directly instead. + */ +#define NV0000_CTRL_CMD_CLIENT_SHARE_OBJECT (0xd06) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS { + NvHandle hObject; /* [in] - Handle of object to share */ + RS_SHARE_POLICY sharePolicy; /* [in] - Share Policy to apply */ +} NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS; + +/* _ctrl0000client_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h new file mode 100644 index 0000000..9b72f20 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h @@ -0,0 +1,326 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000diag.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +/* NV01_ROOT (client) system control commands and parameters */ + +/* + * NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_STATE + * + * This command returns the current lock meter logging state. + * + * state + * This parameter returns the current lock meter logging state. + * NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_DISABLED + * This value indicates lock metering is disabled. + * NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_ENABLED + * This value indicates lock metering is enabled. + * count + * This parameter returns the total number of lock metering entries + * (NV0000_CTRL_DIAG_LOCK_METER_ENTRY) available. This value will + * not exceed NV0000_CTRL_DIAG_LOCK_METER_MAX_ENTRIES. When lock metering + * is enabled this parameter will return zero. + * missedCount + * This parameter returns the number of lock metering entries that had + * to be discarded due to a full lock metering table. This value will + * not exceed NV0000_CTRL_DIAG_LOCK_METER_MAX_TABLE_ENTRIES. When lock + * metering is enabled this parameter will return zero. + * bCircularBuffer + * This parameter returns type of buffer. + * TRUE + * Buffer is circular + * FALSE + * Buffer is sequential + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_STATE (0x480) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_PARAMS { + NvU32 state; + NvU32 count; + NvU32 missedCount; + NvBool bCircularBuffer; +} NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_PARAMS; + +/* valid lock metering state values */ +#define NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_DISABLED (0x00000000) +#define NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_ENABLED (0x00000001) + +/* maximum possible number of lock metering entries stored internally */ +#define NV0000_CTRL_DIAG_LOCK_METER_MAX_TABLE_ENTRIES (0x20000) + +/* + * NV0000_CTRL_CMD_DIAG_SET_LOCK_METER_STATE + * + * This command sets the current lock meter logging state. + * + * state + * This parameter specifies the new state of the lock metering mechanism. + * Legal state values are: + * NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_DISABLE + * This value disables lock metering. + * NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_ENABLE + * This value enables lock metering. + * NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_RESET + * This value resets, or clears, all lock metering state. Lock + * metering must be disabled prior to attempting a reset. + * bCircularBuffer + * This parameter specifies type of buffer. + * Possible values are: + * TRUE + * For circular buffer. + * FALSE + * For sequential buffer. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0000_CTRL_CMD_DIAG_SET_LOCK_METER_STATE (0x481) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_PARAMS { + NvU32 state; + NvBool bCircularBuffer; +} NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_PARAMS; + +/* valid lock metering state values */ +#define NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_DISABLE (0x00000000) +#define NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_ENABLE (0x00000001) +#define NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_RESET (0x00000002) + +/* + * NV0000_CTRL_DIAG_LOCK_METER_ENTRY + * + * This structure represents a single lock meter entry. + * + * counter + * This field contains the number of nanonseconds elapsed since the + * the last system boot when the lock meter entry was generated. + * freq + * This field contains the CPU performance counter frequency in units + * of ticks per second. + * line + * This field contains the relevant line number. + * filename + * This field contains the relevant file name. + * tag + * This field contains a tag uniquely identifying the user of the metered + * lock operations. + * cpuNum + * This field contains the CPU number from which the metered operation + * was initiated. + * irql + * This field contains the IRQL at which the metered operation was + * initiated. + * data0 + * data1 + * data2 + * These fields contain tag-specific data. + */ +#define NV0000_CTRL_DIAG_LOCK_METER_ENTRY_FILENAME_LENGTH (0xc) + +typedef struct NV0000_CTRL_DIAG_LOCK_METER_ENTRY { + NV_DECLARE_ALIGNED(NvU64 counter, 8); + + NvU32 line; + NvU8 filename[NV0000_CTRL_DIAG_LOCK_METER_ENTRY_FILENAME_LENGTH]; + + NvU16 tag; + NvU8 cpuNum; + NvU8 irql; + + NV_DECLARE_ALIGNED(NvU64 threadId, 8); + + NvU32 data0; + NvU32 data1; + NvU32 data2; +} NV0000_CTRL_DIAG_LOCK_METER_ENTRY; + +/* valid lock meter entry tag values */ +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_SEMA (0x00000001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_SEMA_FORCED (0x00000002) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_SEMA_COND (0x00000003) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_RELEASE_SEMA (0x00000004) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_API (0x00000010) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_RELEASE_API (0x00000011) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_GPUS (0x00000020) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_RELEASE_GPUS (0x00000021) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_DATA (0x00000100) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_RMCTRL (0x00001000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_CFG_GET (0x00002000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_CFG_SET (0x00002001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_CFG_GETEX (0x00002002) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_CFG_SETEX (0x00002003) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_VIDHEAP (0x00003000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_MAPMEM (0x00003001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_UNMAPMEM (0x00003002) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_MAPMEM_DMA (0x00003003) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_UNMAPMEM_DMA (0x00003004) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ALLOC (0x00004000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ALLOC_MEM (0x00004001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_DUP_OBJECT (0x00004010) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_CLIENT (0x00005000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_DEVICE (0x00005001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_SUBDEVICE (0x00005002) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_SUBDEVICE_DIAG (0x00005003) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_DISP (0x00005004) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_DISP_CMN (0x00005005) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_CHANNEL (0x00005006) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_CHANNEL_MPEG (0x00005007) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_CHANNEL_DISP (0x00005008) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_MEMORY (0x00005009) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_FBMEM (0x0000500A) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_OBJECT (0x0000500B) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_EVENT (0x0000500C) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_IDLE_CHANNELS (0x00006000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_BIND_CTXDMA (0x00007000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ALLOC_CTXDMA (0x00007001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ISR (0x0000F000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_DPC (0x0000F00F) + +/* + * NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_ENTRIES + * + * This command returns lock metering data in a fixed-sized array of entries. + * Each request will return up NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_MAX_ENTRIES + * entries. + * + * It is up to the caller to repeat these requests to retrieve the total number + * of entries reported by NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_STATE. + * + * entryCount + * This parameter returns the total number of valid entries returned + * in the entries array. This value will not exceed + * NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_MAX but may be less. + * A value of zero indicates there are no more valid entries. + * entries + * This parameter contains the storage into which lock metering entry + * data is returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_ENTRIES (0x485) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_PARAMS_MESSAGE_ID" */ + +/* total number of entries returned */ +#define NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_MAX (0x40) + +#define NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_PARAMS { + NvU32 entryCount; + NV_DECLARE_ALIGNED(NV0000_CTRL_DIAG_LOCK_METER_ENTRY entries[NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_MAX], 8); +} NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_PARAMS; + +/* + * NV0000_CTRL_CMD_DIAG_PROFILE_RPC + * + * This command returns the RPC runtime information, and + * will only return valid when it is running inside VGX mode. + * + * rpcProfileCmd: + * RPC profiler command issued by rpc profiler utility + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_DIAG_PROFILE_RPC (0x488) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_PROFILE_RPC_PARAMS_MESSAGE_ID" */ + +typedef struct RPC_METER_ENTRY { + NV_DECLARE_ALIGNED(NvU64 startTimeInNs, 8); + NV_DECLARE_ALIGNED(NvU64 endTimeInNs, 8); + NV_DECLARE_ALIGNED(NvU64 rpcDataTag, 8); + NV_DECLARE_ALIGNED(NvU64 rpcExtraData, 8); +} RPC_METER_ENTRY; + +#define NV0000_CTRL_DIAG_PROFILE_RPC_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV0000_CTRL_DIAG_PROFILE_RPC_PARAMS { + NvU32 rpcProfileCmd; +} NV0000_CTRL_DIAG_PROFILE_RPC_PARAMS; + +#define NV0000_CTRL_PROFILE_RPC_CMD_DISABLE (0x00000000) +#define NV0000_CTRL_PROFILE_RPC_CMD_ENABLE (0x00000001) +#define NV0000_CTRL_PROFILE_RPC_CMD_RESET (0x00000002) + +/* + * NV0000_CTRL_CMD_DIAG_DUMP_RPC + * + * This command returns the RPC runtime information, which + * will be logged by NV0000_CTRL_CMD_DIAG_PROFILE_RPC command + * when running inside VGX mode. + * + * When issuing this command, the RPC profiler has to be disabled. + * + * firstEntryOffset: + * [IN] offset for first entry. + * + * outputEntryCout: + * [OUT] number of entries returned in rpcProfilerBuffer. + * + * remainingEntryCount: + * [OUT] number of entries remaining. + * + * elapsedTimeInNs: + * [OUT] runtime for the RPC profiler tool. + * + * rpcProfilerBuffer: + * [OUT] buffer to store the RPC entries + */ + +#define NV0000_CTRL_CMD_DIAG_DUMP_RPC (0x489) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_DUMP_RPC_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_DIAG_RPC_MAX_ENTRIES (100) + +#define NV0000_CTRL_DIAG_DUMP_RPC_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV0000_CTRL_DIAG_DUMP_RPC_PARAMS { + NvU32 firstEntryOffset; + NvU32 outputEntryCount; + NvU32 remainingEntryCount; + NV_DECLARE_ALIGNED(NvU64 elapsedTimeInNs, 8); + NV_DECLARE_ALIGNED(RPC_METER_ENTRY rpcProfilerBuffer[NV0000_CTRL_DIAG_RPC_MAX_ENTRIES], 8); +} NV0000_CTRL_DIAG_DUMP_RPC_PARAMS; + +/* _ctrl0000diag_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h new file mode 100644 index 0000000..1679f84 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h @@ -0,0 +1,113 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000event.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +#include "class/cl0000.h" +/* + * NV0000_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * This command sets event notification for the system events. + * + * event + * This parameter specifies the type of event to which the specified + * action is to be applied. The valid event values can be found in + * cl0000.h. + * + * action + * This parameter specifies the desired event notification action. + * Valid notification actions include: + * NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE + * This action disables event notification for the specified + * event. + * NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE + * This action enables single-shot event notification for the + * specified event. + * NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT + * This action enables repeated event notification for the + * specified event. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_CLIENT + * + */ + +#define NV0000_CTRL_CMD_EVENT_SET_NOTIFICATION (0x501) /* finn: Evaluated from "(FINN_NV01_ROOT_EVENT_INTERFACE_ID << 8) | NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 event; + NvU32 action; +} NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001) +#define NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + +/* + * NV0000_CTRL_CMD_GET_SYSTEM_EVENT_STATUS + * + * This command returns the status of the specified system event type. + * See the description of NV01_EVENT for details on registering events. + * + * event + * This parameter specifies the event type. Valid event type values + * can be found in cl0000.h. + * status + * This parameter returns the status for a given event type. Valid + * status values can be found in cl0000.h. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_CLIENT + * + */ + +#define NV0000_CTRL_CMD_GET_SYSTEM_EVENT_STATUS (0x502) /* finn: Evaluated from "(FINN_NV01_ROOT_EVENT_INTERFACE_ID << 8) | NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS { + NvU32 event; + NvU32 status; +} NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS; + +/* _ctrl0000event_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h new file mode 100644 index 0000000..f581935 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h @@ -0,0 +1,847 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000gpu.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" +#include "ctrl/ctrl0000/ctrl0000system.h" +#include "ctrl/ctrlxxxx.h" +#include "nvlimits.h" + +/* NV01_ROOT (client) GPU control commands and parameters */ + +/* + * NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS + * + * This command returns a table of attached gpuId values. + * The table is NV0000_CTRL_GPU_MAX_ATTACHED_GPUS entries in size. + * + * gpuIds[] + * This parameter returns the table of attached GPU IDs. + * The GPU ID is an opaque platform-dependent value that can be used + * with the NV0000_CTRL_CMD_GPU_GET_ID_INFO command to retrieve + * additional information about the GPU. The valid entries in gpuIds[] + * are contiguous, with a value of NV0000_CTRL_GPU_INVALID_ID indicating + * the invalid entries. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS (0x201U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_MAX_ATTACHED_GPUS 32U +#define NV0000_CTRL_GPU_INVALID_ID (0xffffffffU) + +#define NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_GPU_MAX_ATTACHED_GPUS]; +} NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS; + +/* + * Deprecated. Please use NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 instead. + */ +#define NV0000_CTRL_CMD_GPU_GET_ID_INFO (0x202U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_ID_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_MAX_SZNAME 128U + +#define NV0000_CTRL_NO_NUMA_NODE (-1) + +#define NV0000_CTRL_GPU_GET_ID_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_GPU_GET_ID_INFO_PARAMS { + NvU32 gpuId; + NvU32 gpuFlags; + NvU32 deviceInstance; + NvU32 subDeviceInstance; + NV_DECLARE_ALIGNED(NvP64 szName, 8); + NvU32 sliStatus; + NvU32 boardId; + NvU32 gpuInstance; + NvS32 numaId; +} NV0000_CTRL_GPU_GET_ID_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 + * This command returns GPU instance information for the specified GPU. + * + * [in] gpuId + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * [out] gpuFlags + * This parameter returns various flags values for the specified GPU. + * Valid flag values include: + * NV0000_CTRL_GPU_ID_INFO_IN_USE + * When true this flag indicates there are client references + * to the GPU in the form of device class instantiations (see + * NV01_DEVICE or NV03_DEVICE descriptions for details). + * NV0000_CTRL_GPU_ID_INFO_LINKED_INTO_SLI_DEVICE + * When true this flag indicates the GPU is linked into an + * active SLI device. + * NV0000_CTRL_GPU_ID_INFO_MOBILE + * When true this flag indicates the GPU is a mobile GPU. + * NV0000_CTRL_GPU_ID_BOOT_MASTER + * When true this flag indicates the GPU is the boot master GPU. + * NV0000_CTRL_GPU_ID_INFO_SOC + * When true this flag indicates the GPU is part of a + * System-on-Chip (SOC). + * NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED + * When ATS is enabled on the system. + * [out] deviceInstance + * This parameter returns the broadcast device instance number associated + * with the specified GPU. This value can be used to instantiate + * a broadcast reference to the GPU using the NV01_DEVICE classes. + * [out] subDeviceInstance + * This parameter returns the unicast subdevice instance number + * associated with the specified GPU. This value can be used to + * instantiate a unicast reference to the GPU using the NV20_SUBDEVICE + * classes. + * [out] sliStatus + * This parameters returns the SLI status for the specified GPU. + * Legal values for this member are described by NV0000_CTRL_SLI_STATUS. + * [out] boardId + * This parameter returns the board ID value with which the + * specified GPU is associated. Multiple GPUs can share the + * same board ID in multi-GPU configurations. + * [out] gpuInstance + * This parameter returns the GPU instance number for the specified GPU. + * GPU instance numbers are assigned in bus-probe order beginning with + * zero and are limited to one less the number of GPUs in the system. + * [out] numaId + * This parameter returns the ID of NUMA node for the specified GPU. + * In case there is no NUMA node, NV0000_CTRL_NO_NUMA_NODE is returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + + + +#define NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 (0x205U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS { + NvU32 gpuId; + NvU32 gpuFlags; + NvU32 deviceInstance; + NvU32 subDeviceInstance; + NvU32 sliStatus; + NvU32 boardId; + NvU32 gpuInstance; + NvS32 numaId; +} NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS; + + +/* valid flags values */ +#define NV0000_CTRL_GPU_ID_INFO_IN_USE 0:0 +#define NV0000_CTRL_GPU_ID_INFO_IN_USE_FALSE (0x00000000U) +#define NV0000_CTRL_GPU_ID_INFO_IN_USE_TRUE (0x00000001U) +#define NV0000_CTRL_GPU_ID_INFO_LINKED_INTO_SLI_DEVICE 1:1 +#define NV0000_CTRL_GPU_ID_INFO_LINKED_INTO_SLI_DEVICE_FALSE (0x00000000U) +#define NV0000_CTRL_GPU_ID_INFO_LINKED_INTO_SLI_DEVICE_TRUE (0x00000001U) +#define NV0000_CTRL_GPU_ID_INFO_MOBILE 2:2 +#define NV0000_CTRL_GPU_ID_INFO_MOBILE_FALSE (0x00000000U) +#define NV0000_CTRL_GPU_ID_INFO_MOBILE_TRUE (0x00000001U) +#define NV0000_CTRL_GPU_ID_INFO_BOOT_MASTER 3:3 +#define NV0000_CTRL_GPU_ID_INFO_BOOT_MASTER_FALSE (0x00000000U) +#define NV0000_CTRL_GPU_ID_INFO_BOOT_MASTER_TRUE (0x00000001U) + + +#define NV0000_CTRL_GPU_ID_INFO_SOC 5:5 +#define NV0000_CTRL_GPU_ID_INFO_SOC_FALSE (0x00000000U) +#define NV0000_CTRL_GPU_ID_INFO_SOC_TRUE (0x00000001U) +#define NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED 6:6 +#define NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED_FALSE (0x00000000U) +#define NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED_TRUE (0x00000001U) + +/* + * NV0000_CTRL_CMD_GPU_GET_INIT_STATUS + * + * This command returns the initialization status for the specified GPU, and + * will return NV_ERR_INVALID_STATE if called prior to GPU + * initialization. + * + * gpuId + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * status + * This parameter returns the status code identifying the initialization + * state of the GPU. If this parameter has the value NV_OK, + * then no errors were detected during GPU initialization. Otherwise, this + * parameter specifies the top-level error that was detected during GPU + * initialization. Note that a value of NV_OK only means that + * no errors were detected during the actual GPU initialization, and other + * errors may have occurred that prevent the GPU from being attached or + * accessible via the NV01_DEVICE or NV20_SUBDEVICE classes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0000_CTRL_CMD_GPU_GET_INIT_STATUS (0x203U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS { + NvU32 gpuId; + NvU32 status; +} NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_GET_DEVICE_IDS + * + * This command returns a mask of valid device IDs. These device IDs + * can be used to instantiate the NV01_DEVICE_0 class (see NV01_DEVICE_0 + * for more information). + * + * deviceIds + * This parameter returns the mask of valid device IDs. Each enabled bit + * in the mask corresponds to a valid device instance. Valid device + * instances can be used to initialize the NV0080_ALLOC_PARAMETERS + * structure when using NvRmAlloc to instantiate device handles. The + * number of device IDs will not exceed NV_MAX_DEVICES in number. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0000_CTRL_CMD_GPU_GET_DEVICE_IDS (0x204U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS { + NvU32 deviceIds; +} NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS; + + + +/* + * NV0000_CTRL_CMD_GPU_GET_PROBED_IDS + * + * This command returns a table of probed gpuId values. + * The table is NV0000_CTRL_GPU_MAX_PROBED_GPUS entries in size. + * + * gpuIds[] + * This parameter returns the table of probed GPU IDs. + * The GPU ID is an opaque platform-dependent value that can + * be used with the NV0000_CTRL_CMD_GPU_ATTACH_IDS and + * NV0000_CTRL_CMD_GPU_DETACH_ID commands to attach and detach + * the GPU. + * The valid entries in gpuIds[] are contiguous, with a value + * of NV0000_CTRL_GPU_INVALID_ID indicating the invalid entries. + * excludedGpuIds[] + * This parameter returns the table of excluded GPU IDs. + * An excluded GPU ID is an opaque platform-dependent value that + * can be used with NV0000_CTRL_CMD_GPU_GET_PCI_INFO and + * NV0000_CTRL_CMD_GPU_GET_UUID_INFO. + * The valid entries in excludedGpuIds[] are contiguous, with a value + * of NV0000_CTRL_GPU_INVALID_ID indicating the invalid entries. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_GPU_GET_PROBED_IDS (0x214U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_MAX_PROBED_GPUS NV_MAX_DEVICES + +#define NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_GPU_MAX_PROBED_GPUS]; + NvU32 excludedGpuIds[NV0000_CTRL_GPU_MAX_PROBED_GPUS]; +} NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_GET_PCI_INFO + * + * This command takes a gpuId and returns PCI bus information about + * the device. If the OS does not support returning PCI bus + * information, this call will return NV_ERR_NOT_SUPPORTED + * + * gpuId + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * + * domain + * This parameter returns the PCI domain of the GPU. + * + * bus + * This parameter returns the PCI bus of the GPU. + * + * slot + * This parameter returns the PCI slot of the GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_GPU_GET_PCI_INFO (0x21bU) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS_MESSAGE_ID (0x1BU) + +typedef struct NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS { + NvU32 gpuId; + NvU32 domain; + NvU16 bus; + NvU16 slot; +} NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_ATTACH_IDS + * + * This command attaches the GPUs with the gpuIds matching those in + * the table provided by the client. + * The table is NV0000_CTRL_GPU_MAX_PROBED_GPUS entries in size. + * + * gpuIds[] + * This parameter holds the table of gpuIds to attach. At least + * one gpuId must be specified; clients may use the special + * gpuId value NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS to indicate + * that all probed GPUs are to be attached. + * The entries in gpuIds[] must be contiguous, with a value of + * NV0000_CTRL_GPU_INVALID_ID to indicate the first invalid + * entry. + * If one or more of the gpuId values do not specify a GPU found + * in the system, the NV_ERR_INVALID_ARGUMENT error + * status is returned. + * + * failedId + * If NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS is specified and + * a GPU cannot be attached, the NV0000_CTRL_CMD_GPU_ATTACH_IDS + * command returns an error code and saves the failing GPU's + * gpuId in this field. + * + * If a table of gpuIds is provided, these gpuIds will be validated + * against the RM's table of probed gpuIds and attached in turn, + * if valid; if NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS is used, all + * probed gpuIds will be attached, in the order the associated GPUs + * were probed in by the RM. + * + * If a gpuId fails to attach, this gpuId is stored in the failedId + * field. Any GPUs attached by the command prior the failure are + * detached. + * + * If multiple clients use NV0000_CTRL_CMD_GPU_ATTACH_IDS to attach + * a gpuId, the RM ensures that the gpuId won't be detached until + * all clients have issued a call to NV0000_CTRL_CMD_GPU_DETACH_IDS + * to detach the gpuId (or have terminated). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_IRQ_EDGE_TRIGGERED + * NV_ERR_IRQ_NOT_FIRING + */ +#define NV0000_CTRL_CMD_GPU_ATTACH_IDS (0x215U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_ATTACH_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS (0x0000ffffU) + +#define NV0000_CTRL_GPU_ATTACH_IDS_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV0000_CTRL_GPU_ATTACH_IDS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_GPU_MAX_PROBED_GPUS]; + NvU32 failedId; +} NV0000_CTRL_GPU_ATTACH_IDS_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_DETACH_IDS + * + * This command detaches the GPUs with the gpuIds matching those in + * the table provided by the client. + * The table is NV0000_CTRL_GPU_MAX_ATTACHED_GPUS entries in size. + * + * gpuIds[] + * This parameter holds the table of gpuIds to detach. At least + * one gpuId must be specified; clients may use the special + * gpuId NV0000_CTRL_GPU_DETACH_ALL_ATTACHED_IDS to indicate that + * all attached GPUs are to be detached. + * The entries in gpuIds[] must be contiguous, with a value of + * NV0000_CTRL_GPU_INVALID_ID to indicate the first invalid + * entry. + * If one or more of the gpuId values do not specify a GPU found + * in the system, the NV_ERR_INVALID_ARGUMENT error + * status is returned. + * + * If a table of gpuIds is provided, these gpuIds will be validated + * against the RM's list of attached gpuIds; each valid gpuId is + * detached immediately if it's no longer in use (i.e. if there are + * no client references to the associated GPU in the form of + * device class instantiations (see the NV01_DEVICE or NV03_DEVICE + * descriptions for details)) and if no other client still requires + * the associated GPU to be attached. + * + * If a given gpuId can't be detached immediately, it will instead + * be detached when the last client reference is freed or when + * the last client that issued NV0000_CTRL_CMD_GPU_ATTACH_IDS for + * this gpuId either issues NV0000_CTRL_CMD_GPU_DETACH_IDS or exits + * without detaching the gpuId explicitly. + * + * Clients may use the NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS command + * to obtain a table of the attached gpuIds. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_GPU_DETACH_IDS (0x216U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_DETACH_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_DETACH_ALL_ATTACHED_IDS (0x0000ffffU) + +#define NV0000_CTRL_GPU_DETACH_IDS_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV0000_CTRL_GPU_DETACH_IDS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_GPU_MAX_ATTACHED_GPUS]; +} NV0000_CTRL_GPU_DETACH_IDS_PARAMS; + + + +/* + * NV0000_CTRL_CMD_GPU_GET_SVM_SIZE + * + * This command is used to get the SVM size. + * + * gpuId + * This parameter uniquely identifies the GPU whose associated + * SVM size is to be returned. The value of this field must + * match one of those in the table returned by + * NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS + * + * SvmSize + * SVM size is returned in this. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * + */ +#define NV0000_CTRL_CMD_GPU_GET_SVM_SIZE (0x240U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS_MESSAGE_ID (0x40U) + +typedef struct NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS { + NvU32 gpuId; + NvU32 svmSize; +} NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS; + + + +/* + * NV0000_CTRL_CMD_GPU_GET_UUID_INFO + * + * This command returns requested information pertaining to the GPU + * specified by the GPU UUID passed in. + * + * Generally only GPUs that have been attached are visible to this call. Therefore + * queries on unattached GPUs will fail with NV_ERR_OBJECT_NOT_FOUND. However, + * a query for a SHA1 UUID may succeed for an unattached GPU in cases where the GID + * is cached, such as an excluded GPU. + * + * gpuGuid (INPUT) + * The GPU UUID of the gpu whose parameters are to be returned. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * flags (INPUT) + * The _FORMAT* flags designate ascii string format or a binary format. + * + * The _TYPE* flags designate either SHA-1-based (32-hex-character) or + * SHA-256-based (64-hex-character). + * + * gpuId (OUTPUT) + * The GPU ID of the GPU identified by gpuGuid. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * deviceInstance (OUTPUT) + * The device instance of the GPU identified by gpuGuid. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * subdeviceInstance (OUTPUT) + * The subdevice instance of the GPU identified by gpuGuid. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * + */ +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO (0x274U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum possible number of bytes of GID information */ +#define NV0000_GPU_MAX_GID_LENGTH (0x00000100U) + +#define NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS { + NvU8 gpuUuid[NV0000_GPU_MAX_GID_LENGTH]; + NvU32 flags; + NvU32 gpuId; + NvU32 deviceInstance; + NvU32 subdeviceInstance; +} NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS; + +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_FORMAT 1:0 +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_FORMAT_ASCII (0x00000000U) +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_FORMAT_BINARY (0x00000002U) + +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_TYPE 2:2 +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_TYPE_SHA1 (0x00000000U) +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_TYPE_SHA256 (0x00000001U) + +/* + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID + * + * This command returns the GPU UUID for the provided GPU ID. + * Note that only GPUs that have been attached are visible to this call. + * Therefore queries on unattached GPUs will fail + * with NV_ERR_OBJECT_NOT_FOUND. + * + * gpuId (INPUT) + * The GPU ID whose parameters are to be returned. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * flags (INPUT) + * + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT_ASCII + * This value is used to request the GPU UUID in ASCII format. + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT_BINARY + * This value is used to request the GPU UUID in binary format. + * + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE_SHA1 + * This value is used to request that the GPU UUID value + * be SHA1-based (32-hex-character). + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE_SHA256 + * This value is used to request that the GPU UUID value + * be SHA256-based (64-hex-character). + * + * gpuUuid[NV0000_GPU_MAX_GID_LENGTH] (OUTPUT) + * The GPU UUID of the GPU identified by GPU ID. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * uuidStrLen (OUTPUT) + * The length of the UUID returned which is related to the format that + * was requested using flags. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID (0x275U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS_MESSAGE_ID (0x75U) + +typedef struct NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS { + NvU32 gpuId; + NvU32 flags; + NvU8 gpuUuid[NV0000_GPU_MAX_GID_LENGTH]; + NvU32 uuidStrLen; +} NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS; + +/* valid format values */ +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT 1:0 +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT_ASCII (0x00000000U) +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT_BINARY (0x00000002U) + +/*valid type values*/ +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE 2:2 +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE_SHA1 (0x00000000U) +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE_SHA256 (0x00000001U) + + + +/* + * NV0000_CTRL_CMD_GPU_MODIFY_DRAIN_STATE + * + * This command is used to enter or exit the so called "drain" state. + * When this state is enabled, the existing clients continue executing + * as usual, however no new client connections are allowed. + * This is done in order to "drain" the system of the running clients + * in preparation to selectively powering down the GPU. + * No GPU can enter a bleed state if that GPU is in an SLI group. + * In that case, NV_ERR_IN_USE is returned. + * Requires administrator privileges. + * + * It is expected, that the "drain" state will be eventually deprecated + * and replaced with another mechanism to quiesce a GPU (Bug 1718113). + * + * gpuId (INPUT) + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * newState (INPUT) + * This input parameter is used to enter or exit the "drain" + * software state of the GPU specified by the gpuId parameter. + * Possible values are: + * NV0000_CTRL_GPU_DRAIN_STATE_ENABLED + * NV0000_CTRL_GPU_DRAIN_STATE_DISABLED + * flags (INPUT) + * NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE + * if set, upon reaching quiescence, a request will be made to + * the OS to "forget" the PCI device associated with the + * GPU specified by the gpuId parameter, in case such a request + * is supported by the OS. Otherwise, NV_ERR_NOT_SUPPORTED + * will be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_IN_USE + */ + +#define NV0000_CTRL_CMD_GPU_MODIFY_DRAIN_STATE (0x278U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS_MESSAGE_ID" */ + +/* Possible values of newState */ +#define NV0000_CTRL_GPU_DRAIN_STATE_DISABLED (0x00000000U) +#define NV0000_CTRL_GPU_DRAIN_STATE_ENABLED (0x00000001U) + +/* Defined bits for the "flags" argument */ +#define NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE (0x00000001U) +#define NV0000_CTRL_GPU_DRAIN_STATE_FLAG_LINK_DISABLE (0x00000002U) + +#define NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS_MESSAGE_ID (0x78U) + +typedef struct NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS { + NvU32 gpuId; + NvU32 newState; + NvU32 flags; +} NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_QUERY_DRAIN_STATE + * + * gpuId (INPUT) + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NVOS_STATUS_ERROR_INVALID_ARGUMENT is returned. + * drainState (OUTPUT) + * This parameter returns a value indicating if the "drain" + * state is currently enabled or not for the specified GPU. See the + * description of NV0000_CTRL_CMD_GPU_MODIFY_DRAIN_STATE. + * Possible values are: + * NV0000_CTRL_GPU_DRAIN_STATE_ENABLED + * NV0000_CTRL_GPU_DRAIN_STATE_DISABLED + * flags (OUTPUT) + * NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE + * if set, upon reaching quiesence, the GPU device will be + * removed automatically from the kernel space, similar + * to what writing "1" to the sysfs "remove" node does. + * NV0000_CTRL_GPU_DRAIN_STATE_FLAG_LINK_DISABLE + * after removing the GPU, also disable the parent bridge's + * PCIe link. This flag can only be set in conjunction with + * NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE, and then + * only when the GPU is already idle (not attached). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_GPU_QUERY_DRAIN_STATE (0x279U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS_MESSAGE_ID (0x79U) + +typedef struct NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS { + NvU32 gpuId; + NvU32 drainState; + NvU32 flags; +} NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_DISCOVER + * + * This request asks the OS to scan the PCI tree or a sub-tree for GPUs, + * that are not yet known to the OS, and to make them available for use. + * If all of domain:bus:slot.function are zeros, the entire tree is scanned, + * otherwise the parameters identify the bridge device, that roots the + * subtree to be scanned. + * Requires administrator privileges. + * + * domain (INPUT) + * PCI domain of the bridge + * bus (INPUT) + * PCI bus of the bridge + * slot (INPUT) + * PCI slot of the bridge + * function (INPUT) + * PCI function of the bridge + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_DEVICE + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0000_CTRL_CMD_GPU_DISCOVER (0x27aU) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | 0x7A" */ + +typedef struct NV0000_CTRL_GPU_DISCOVER_PARAMS { + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU8 function; +} NV0000_CTRL_GPU_DISCOVER_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_GET_MEMOP_ENABLE + * + * This command is used to get the content of the MemOp (CUDA Memory Operation) + * enablement mask, which can be overridden by using the MemOpOverride RegKey. + * + * The enableMask member must be treated as a bitmask, where each bit controls + * the enablement of a feature. + * + * So far, the only feature which is defined controls to whole MemOp APIs. + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV0000_CTRL_CMD_GPU_GET_MEMOP_ENABLE (0x27bU) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS_MESSAGE_ID (0x7BU) + +typedef struct NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS { + NvU32 enableMask; +} NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS; + +#define NV0000_CTRL_GPU_FLAGS_MEMOP_ENABLE (0x00000001U) + + + +/* + * NV0000_CTRL_CMD_GPU_DISABLE_NVLINK_INIT + * + * This privileged command is used to disable initialization for the NVLinks + * provided in the mask. + * + * The mask must be applied before the GPU is attached. DISABLE_NVLINK_INIT + * is an NOP for non-NVLink GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_DEVICE + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_STATE + * NV_ERR_IN_USE + * + */ +#define NV0000_CTRL_CMD_GPU_DISABLE_NVLINK_INIT (0x281U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS { + NvU32 gpuId; + NvU32 mask; + NvBool bSkipHwNvlinkDisable; +} NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS; + + +#define NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PARAM_DATA 0x00000175U +#define NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PROPERTIES_IN 6U +#define NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PROPERTIES_OUT 5U + +/* + * NV0000_CTRL_CMD_GPU_LEGACY_CONFIG + * + * Path to use legacy RM GetConfig/Set API. This API is being phased out. + */ +#define NV0000_CTRL_CMD_GPU_LEGACY_CONFIG (0x282U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS_MESSAGE_ID (0x82U) + +typedef struct NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS { + NvHandle hContext; /* [in] - Handle of object to perform operation on (Device, Subdevice, etc) */ + NvU32 opType; /* [in] - Type of API */ + NvV32 index; /* [in] - command type */ + NvU32 dataType; /* [out] - data union type */ + + union { + struct { + NvU32 newValue; + NvU32 oldValue; + } configSet; + struct { + NvU8 paramData[NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PARAM_DATA]; + NvU32 paramSize; + } configEx; + struct { + NvU32 propertyId; + NvU32 propertyIn[NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PROPERTIES_IN]; + NvU32 propertyOut[NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PROPERTIES_OUT]; + } reservedProperty; + } data; +} NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS; + +#define NV0000_CTRL_GPU_LEGACY_CONFIG_OP_TYPE_SET (0x00000001U) +#define NV0000_CTRL_GPU_LEGACY_CONFIG_OP_TYPE_GET_EX (0x00000002U) +#define NV0000_CTRL_GPU_LEGACY_CONFIG_OP_TYPE_SET_EX (0x00000003U) +#define NV0000_CTRL_GPU_LEGACY_CONFIG_OP_TYPE_RESERVED (0x00000004U) + +/* + * NV0000_CTRL_CMD_IDLE_CHANNELS + */ +#define NV0000_CTRL_CMD_IDLE_CHANNELS (0x283U) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS_MESSAGE_ID (0x83U) + +typedef struct NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS { + NvHandle hDevice; + NvHandle hChannel; + NvV32 numChannels; + /* C form: NvP64 phClients NV_ALIGN_BYTES(8); */ + NV_DECLARE_ALIGNED(NvP64 phClients, 8); + /* C form: NvP64 phDevices NV_ALIGN_BYTES(8); */ + NV_DECLARE_ALIGNED(NvP64 phDevices, 8); + /* C form: NvP64 phChannels NV_ALIGN_BYTES(8); */ + NV_DECLARE_ALIGNED(NvP64 phChannels, 8); + NvV32 flags; + NvV32 timeout; +} NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS; + +/* _ctrl0000gpu_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h new file mode 100644 index 0000000..6f8ff13 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h @@ -0,0 +1,255 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000gpuacct.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +/* + * NV0000_CTRL_CMD_GPUACCT_SET_ACCOUNTING_STATE + * + * This command is used to enable or disable the per process GPU accounting. + * This is part of GPU's software state and will persist if persistent + * software state is enabled. Refer to the description of + * NV0080_CTRL_CMD_GPU_MODIFY_SW_STATE_PERSISTENCE for more information. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This input parameter specifies the process id of the process for which + * the accounting state needs to be set. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid. This + * parameter is set only when this RM control is called from VGPU plugin, + * otherwise it is zero meaning set/reset the accounting state for the + * specified GPU. + * newState + * This input parameter is used to enable or disable the GPU accounting. + * Possible values are: + * NV0000_CTRL_GPU_ACCOUNTING_STATE_ENABLED + * NV0000_CTRL_GPU_ACCOUNTING_STATE_DISABLED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_GPUACCT_SET_ACCOUNTING_STATE (0xb01) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS_MESSAGE_ID" */ + +/* Possible values of persistentSwState */ +#define NV0000_CTRL_GPU_ACCOUNTING_STATE_ENABLED (0x00000000) +#define NV0000_CTRL_GPU_ACCOUNTING_STATE_DISABLED (0x00000001) + +#define NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS { + NvU32 gpuId; + NvU32 pid; + NvU32 newState; +} NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS; + +/* + * NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_STATE + * + * This command is used to get the current state of GPU accounting. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This input parameter specifies the process id of the process of which the + * accounting state needs to be queried. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid. This + * parameter is set only when this RM control is called from VGPU plugin, + * otherwise it is zero meaning the accounting state needs to be queried for + * the specified GPU. + * state + * This parameter returns a value indicating if per process GPU accounting + * is currently enabled or not for the specified GPU. See the + * description of NV0000_CTRL_CMD_GPU_SET_ACCOUNTING_STATE. + * Possible values are: + * NV0000_CTRL_GPU_ACCOUNTING_STATE_ENABLED + * NV0000_CTRL_GPU_ACCOUNTING_STATE_DISABLED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_STATE (0xb02) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS { + NvU32 gpuId; + NvU32 pid; + NvU32 state; +} NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS; + +/* + * NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS + * + * This command returns GPU accounting data for the process. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This parameter specifies the PID of the process for which information is + * to be queried. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid inside + * which the subPid is running. This parameter is set to VGPU plugin pid + * when this RM control is called from VGPU plugin. + * subPid + * In case of VGX host, this parameter specifies the PID of the process for + * which information is to be queried. In other cases, it is zero. + * gpuUtil + * This parameter returns the average GR utilization during the process's + * lifetime. + * fbUtil + * This parameter returns the average FB bandwidth utilization during the + * process's lifetime. + * maxFbUsage + * This parameter returns the maximum FB allocated (in bytes) by the process. + * startTime + * This parameter returns the time stamp value in micro seconds at the time + * process started utilizing GPU. + * stopTime + * This parameter returns the time stamp value in micro seconds at the time + * process stopped utilizing GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_GPUACCT_GET_PROC_ACCOUNTING_INFO (0xb03) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS { + NvU32 gpuId; + NvU32 pid; + NvU32 subPid; + NvU32 gpuUtil; + NvU32 fbUtil; + NV_DECLARE_ALIGNED(NvU64 maxFbUsage, 8); + NV_DECLARE_ALIGNED(NvU64 startTime, 8); + NV_DECLARE_ALIGNED(NvU64 endTime, 8); +} NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_PIDS + * + * This command is used to get the PIDS of processes with accounting + * information in the driver. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This input parameter specifies the process id of the process of which the + * information needs to be queried. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid. This + * parameter is set only when this RM control is called from VGPU plugin, + * otherwise it is zero meaning get the pid list of the all the processes + * running on the specified GPU. + * pidTbl + * This parameter returns the table of all PIDs for which driver has + * accounting info. + * pidCount + * This parameter returns the number of entries in the PID table. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_PIDS (0xb04) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS_MESSAGE_ID" */ + +/* max size of pidTable */ +#define NV0000_GPUACCT_PID_MAX_COUNT 4000 + +#define NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS { + NvU32 gpuId; + NvU32 pid; + NvU32 pidTbl[NV0000_GPUACCT_PID_MAX_COUNT]; + NvU32 pidCount; +} NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS; + +/* + * NV0000_CTRL_CMD_GPUACCT_CLEAR_ACCOUNTING_DATA + * + * This command is used to clear previously collected GPU accounting data. This + * will have no affect on data for the running processes, accounting data for + * these processes will not be cleared and will still be logged for these + * processes. In order to clear ALL accounting data, accounting needs to be + * disabled using NV0000_CTRL_CMD_GPUACCT_SET_ACCOUNTING_STATE before executing + * this command. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This input parameter specifies the process id of the process for which + * the accounting data needs to be cleared. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid for + * which the accounting data needs to be cleared. This parameter is set only + * when this RM control is called from VGPU plugin, otherwise it is zero + * meaning clear the accounting data of processes running on baremetal + * system. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV0000_CTRL_CMD_GPUACCT_CLEAR_ACCOUNTING_DATA (0xb05) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS { + NvU32 gpuId; + NvU32 pid; +} NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS; + + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gspc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gspc.h new file mode 100644 index 0000000..419d006 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gspc.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000gspc.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h new file mode 100644 index 0000000..2df6c42 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h @@ -0,0 +1,101 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000gsync.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +#include "class/cl30f1.h" +/* NV01_ROOT (client) system controller control commands and parameters */ + +/* + * NV0000_CTRL_CMD_GSYNC_GET_ATTACHED_IDS + * + * This command returns a table of attached gsyncId values. + * The table is NV0000_CTRL_GSYNC_MAX_ATTACHED_GSYNCS entries in size. + * + * gsyncIds[] + * This parameter returns the table of attached gsync IDs. + * The gsync ID is an opaque platform-dependent value that + * can be used with the NV0000_CTRL_CMD_GSYNC_GET_ID_INFO command to + * retrieve additional information about the gsync device. + * The valid entries in gsyncIds[] are contiguous, with a value + * of NV0000_CTRL_GSYNC_INVALID_ID indicating the invalid entries. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_GSYNC_GET_ATTACHED_IDS (0x301) /* finn: Evaluated from "(FINN_NV01_ROOT_GSYNC_INTERFACE_ID << 8) | NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS { + NvU32 gsyncIds[NV30F1_MAX_GSYNCS]; +} NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS; + +/* this value marks entries in gsyncIds[] as invalid */ +#define NV0000_CTRL_GSYNC_INVALID_ID (0xffffffff) + +/* + * NV0000_CTRL_CMD_GSYNC_GET_ID_INFO + * + * This command returns gsync instance information for the + * specified gsync device. + * + * gsyncId + * This parameter should specify a valid gsync ID value. + * If there is no gsync present with the specified ID, a + * status of NV_ERR_INVALID_ARGUMENT is returned. + * gsyncFlags + * This parameter returns the current state of the gsync device. + * gsyncInstance + * This parameter returns the instance number associated with the + * specified gsync. This value can be used to instantiate + * a reference to the gsync using one of the NV30_GSYNC + * classes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_GSYNC_GET_ID_INFO (0x302) /* finn: Evaluated from "(FINN_NV01_ROOT_GSYNC_INTERFACE_ID << 8) | NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS { + NvU32 gsyncId; + NvU32 gsyncFlags; + NvU32 gsyncInstance; +} NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS; + +/* _ctrl0000gsync_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h new file mode 100644 index 0000000..41a4954 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h @@ -0,0 +1,636 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000nvd.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +/* NV01_ROOT (client) nvd control commands and parameters */ + +/* + * NV0080_CTRL_NVD_DUMP_COMPONENT + * + * The following dump components are used to describe legal ranges in + * commands below: + * + * NV0080_CTRL_CMD_NVD_DUMP_COMPONENT_SYS + * This is the system dump component. + * NV0080_CTRL_CMD_NVD_DUMP_COMPONENT_NVLOG + * This is the nvlog dump component. + * NV0080_CTRL_CMD_NVD_DUMP_COMPONENT_RESERVED + * This component is reserved. + * + * See nvdump.h for more information on dump component values. + */ +#define NV0000_CTRL_NVD_DUMP_COMPONENT_SYS (0x400) +#define NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG (0x800) +#define NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED (0xB00) + +/* + * NV0000_CTRL_CMD_NVD_GET_DUMP_SIZE + * + * This command gets the expected dump size of a particular system + * dump component. Note that events that occur between this command + * and a later NV0000_CTRL_CMD_NVD_GET_DUMP command could alter the size of + * the buffer required. + * + * component + * This parameter specifies the system dump component for which the + * dump size is desired. Legal values for this parameter must + * be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_SYS and + * less than NV0000_CTRL_NVD_GET_DUMP_COMPONENT_NVLOG. + * size + * This parameter returns the expected size in bytes. The maximum + * value of this call is NV0000_CTRL_NVD_MAX_DUMP_SIZE. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if components are invalid. + */ + +#define NV0000_CTRL_CMD_NVD_GET_DUMP_SIZE (0x601) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS { + NvU32 component; + NvU32 size; +} NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS; + +/* Max size that a GET_DUMP_SIZE_PARAMS call can return */ +#define NV0000_CTRL_NVD_MAX_DUMP_SIZE (1000000) + +/* + * NV0000_CTRL_CMD_NVD_GET_DUMP + * + * This command gets a dump of a particular system dump component. If triggers + * is non-zero, the command waits for the trigger to occur before it returns. + * + * pBuffer + * This parameter points to the buffer for the data. + * component + * This parameter specifies the system dump component for which the + * dump is to be retrieved. Legal values for this parameter must + * be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_SYS and + * less than NV0000_CTRL_NVD_GET_DUMP_COMPONENT_NVLOG. + * size + * On entry, this parameter specifies the maximum length for + * the returned data. On exit, it specifies the number of bytes + * returned. + * + * Possible status values returned are: + * NV_OK + * NVOS_ERROR_INVALID_ARGUMENT if components are invalid. + * NVOS_ERROR_INVALID_ADDRESS if pBuffer is invalid + * NVOS_ERROR_INVALID_???? if the buffer was too small + */ +#define NV0000_CTRL_CMD_NVD_GET_DUMP (0x602) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_DUMP_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_DUMP_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_NVD_GET_DUMP_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pBuffer, 8); + NvU32 component; + NvU32 size; +} NV0000_CTRL_NVD_GET_DUMP_PARAMS; + +/* + * NV0000_CTRL_CMD_NVD_GET_TIMESTAMP + * + * This command returns the current value of the timestamp used + * by the RM in NvDebug dumps. It is provided to keep the RM and NvDebug + * clients on the same time base. + * + * cpuClkId + * See also NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO + * This parameter specifies the source of the CPU clock. Legal values for + * this parameter include: + * NV0000_NVD_CPU_TIME_CLK_ID_DEFAULT and NV0000_NVD_CPU_TIME_CLK_ID_OSTIME + * This clock id will provide real time in microseconds since 00:00:00 UTC on January 1, 1970. + * It is calculated as follows: + * (seconds * 1000000) + uSeconds + * NV0000_NVD_CPU_TIME_CLK_ID_PLATFORM_API + * This clock id will provide time stamp that is constant-rate, high + * precision using platform API that is also available in the user mode. + * NV0000_NVD_CPU_TIME_CLK_ID_TSC + * This clock id will provide time stamp using CPU's time stamp counter. + * + * timestamp + * Retrieved timestamp + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_NVD_CPU_TIME_CLK_ID_DEFAULT (0x00000000) +#define NV0000_NVD_CPU_TIME_CLK_ID_OSTIME (0x00000001) +#define NV0000_NVD_CPU_TIME_CLK_ID_TSC (0x00000002) +#define NV0000_NVD_CPU_TIME_CLK_ID_PLATFORM_API (0x00000003) + +#define NV0000_CTRL_CMD_NVD_GET_TIMESTAMP (0x603) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS { + NV_DECLARE_ALIGNED(NvU64 timestamp, 8); + NvU8 cpuClkId; +} NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS; + +/* + * NV0000_CTRL_CMD_NVD_GET_NVLOG_INFO + * + * This command gets the current state of the NvLog buffer system. + * + * component (in) + * This parameter specifies the system dump component for which the + * NvLog info is desired. Legal values for this parameter must + * be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG and + * less than NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED. + * version (out) + * This parameter returns the version of the Nvlog subsystem. + * runtimeSizes (out) + * This parameter returns the array of sizes for all supported printf + * specifiers. This information is necessary to know how many bytes + * to decode when given a certain specifier (such as %d). + * The following describes the contents of each array entry: + * NV0000_CTRL_NVD_RUNTIME_SIZE_UNUSED + * This array entry has special meaning and is unused in the + * runtimeSizes array. + * NV0000_CTRL_NVD_RUNTIME_SIZE_INT + * This array entry returns the size of integer types for use in + * interpreting the %d, %u, %x, %X, %i, %o specifiers. + * NV0000_CTRL_NVD_RUNTIME_SIZE_LONG_LONG + * This array entry returns the size of long long integer types for + * using in interpreting the %lld, %llu, %llx, %llX, %lli, %llo + * specifiers. + * NV0000_CTRL_NVD_RUNTIME_SIZE_STRING + * This array entry returns zero as strings are not allowed. + * NV0000_CTRL_NVD_RUNTIME_SIZE_PTR + * This array entry returns the size of the pointer type for use + * in interpreting the %p specifier. + * NV0000_CTRL_NVD_RUNTIME_SIZE_CHAR + * This array entry returns the size of the char type for use in + * intpreting the %c specifier. + * NV0000_CTRL_NVD_RUNTIME_SIZE_FLOAT + * This array entry returns the size of the float types for use in + * in interpreting the %f, %g, %e, %F, %G, %E specifiers. + * All remaining entries are reserved and return 0. + * printFlags (out) + * This parameter returns the flags of the NvLog system. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_BUFFER_FLAGS + * See NV0000_CTRL_CMD_NVD_GET_NVLOG_BUF_INFO for more details. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_BUFFER_SIZE + * This field returns the buffer size in KBytes. A value of zero + * is returned when logging is disabled. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP + * This field returns the format of the timestamp. Legal values + * for this parameter include: + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP_NONE + * This value indicates no timestamp. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP_32BIT + * This value indicates a 32-bit timestamp. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP_64BIT + * This value indicates a 64-bit timestamp. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP_32BIT_DIFF + * This value indicates a 32-bit differential timestamp. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_RESERVED + * This field is reserved. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_RUNTIME_LEVEL + * This field returns the lowest debug level for which logging + * is enabled by default. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_INIT + * This field indicates if logging for the specified component has + * been initialized. Legal values for this parameter include: + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_INIT_NO + * This value indicates NvLog is uninitialized. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_INIT_YES + * This value indicates NvLog has been initialized. + * signature (out) + * This parameter is the signature of the database + * required to decode these logs, autogenerated at buildtime. + * bufferTags (out) + * This parameter identifies the buffer tag used during allocation + * or a value of '0' if buffer is unallocated for each possible + * buffer. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if components are invalid. + */ +#define NV0000_CTRL_CMD_NVD_GET_NVLOG_INFO (0x604) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum size of the runtimeSizes array */ +#define NV0000_CTRL_NVD_MAX_RUNTIME_SIZES (16) + +/* size of signature parameter */ +#define NV0000_CTRL_NVD_SIGNATURE_SIZE (4) + +/* Maximum number of buffers */ +#define NV0000_CTRL_NVD_MAX_BUFFERS (256) + +#define NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS { + NvU32 component; + NvU32 version; + NvU8 runtimeSizes[NV0000_CTRL_NVD_MAX_RUNTIME_SIZES]; + NvU32 printFlags; + NvU32 signature[NV0000_CTRL_NVD_SIGNATURE_SIZE]; + NvU32 bufferTags[NV0000_CTRL_NVD_MAX_BUFFERS]; +} NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS; + +/* runtimeSize array indices */ +#define NV0000_CTRL_NVD_RUNTIME_SIZE_UNUSED (0) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_INT (1) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_LONG_LONG (2) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_STRING (3) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_PTR (4) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_CHAR (5) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_FLOAT (6) + +/* printFlags fields and values */ +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_BUFFER_INFO 7:0 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_BUFFER_SIZE 23:8 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_BUFFER_SIZE_DISABLE (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_BUFFER_SIZE_DEFAULT (0x00000004) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_RUNTIME_LEVEL 28:25 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP 30:29 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP_NONE (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP_32 (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP_64 (0x00000002) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP_32_DIFF (0x00000003) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_INITED 31:31 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_INITED_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_INITED_YES (0x00000001) + +/* + * NV0000_CTRL_CMD_NVD_GET_NVLOG_BUFFER_INFO + * + * This command gets the current state of a specific buffer in the NvLog + * buffer system. + * + * component (in) + * This parameter specifies the system dump component for which the + * NvLog info is desired. Legal values for this parameter must + * be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG and + * less than NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED. + * buffer (in/out) + * This parameter specifies the buffer number from which to retrieve the + * buffer information. Valid values are 0 to (NV0000_CTRL_NVD_MAX_BUFFERS - 1). + * If the buffer is specified using the 'tag' parameter, the buffer + * number is returned through this one. + * tag (in/out) + * If this parameter is non-zero, it will be used to specify the buffer, + * instead of 'buffer' parameter. It returns the tag of the specified buffer + * size (out) + * This parameter returns the size of the specified buffer. + * flags (in/out) + * On input, this parameter sets the following behavior: + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE + * This flag controls if the nvlog system should pause output + * to this buffer. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE_YES + * The buffer should be paused until another command + * unpauses this buffer. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE_NO + * The buffer should not be paused. + * On output, this parameter returns the flags of a specified buffer: + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_DISABLED + * This flag indicates if logging to the specified buffer is + * disabled or not. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE + * This flag indicates the buffer logging type: + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE_RING + * This type value indicates logging to the buffer wraps. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE_NOWRAP + * This type value indicates logging to the buffer does not wrap. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE + * This flag indicates if the buffer size is expandable. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE_NO + * The buffer is not expandable. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE_YES + * The buffer is expandable. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NON_PAGED + * This flag indicates if the buffer occupies non-paged or pageable + * memory. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NON_PAGED_NO + * The buffer is in pageable memory. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NON_PAGES_YES + * The buffer is in non-paged memory. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING + * This flag indicates the locking mode for the specified buffer. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_NONE + * This locking value indicates that no locking is performed. This + * locking mode is typically used for inherently single-threaded + * buffers. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_STATE + * This locking value indicates that the buffer is locked only + * during state changes and that memory copying is unlocked. This + * mode should not be used tiny buffers that overflow every write + * or two. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_FULL + * This locking value indicates the buffer is locked for the full + * duration of the write. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA + * This flag indicates if the buffer is stored in OCA dumps. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA_NO + * The buffer is not included in OCA dumps. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA_YES + * The buffer is included in OCA dumps. + * pos (out) + * This parameter is the current position of the tracker/cursor in the + * buffer. + * overflow (out) + * This parameter is the number of times the buffer has overflowed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if components are invalid. + */ + +#define NV0000_CTRL_CMD_NVD_GET_NVLOG_BUFFER_INFO (0x605) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS { + NvU32 component; + NvU32 buffer; + NvU32 tag; + NvU32 size; + NvU32 flags; + NvU32 pos; + NvU32 overflow; +} NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS; + +/* flags fields and values */ +/* input */ +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE 0:0 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE_YES (0x00000001) + +/* output */ +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_DISABLED 0:0 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_DISABLED_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_DISABLED_YES (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE 1:1 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE_RING (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE_NOWRAP (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE 2:2 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE_YES (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NONPAGED 3:3 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NONPAGED_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NONPAGED_YES (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING 5:4 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_NONE (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_STATE (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_FULL (0x00000002) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA 6:6 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA_YES (0x00000001) + +/* + * NV0000_CTRL_CMD_NVD_GET_NVLOG + * + * This command retrieves the specified dump block from the specified + * NvLog buffer. To retrieve the entire buffer, the caller should start + * with blockNum set to 0 and continue issuing calls with an incremented + * blockNum until the returned size value is less than + * NV0000_CTRL_NVD_NVLOG_MAX_BLOCK_SIZE. + * + * component (in) + * This parameter specifies the system dump component for which the NvLog + * dump operation is to be directed. Legal values for this parameter + * must be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG + * and less than NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED. + * buffer (in) + * This parameter specifies the NvLog buffer to dump. + * blockNum (in) + * This parameter specifies the block number for which data is to be + * dumped. + * size (in/out) + * On entry, this parameter specifies the maximum length in bytes for + * the returned data (should be set to NV0000_CTRL_NVLOG_MAX_BLOCK_SIZE). + * On exit, it specifies the number of bytes returned. + * data (out) + * This parameter returns the data for the specified block. The size + * patameter values indicates the number of valid bytes returned. + * + * Possible status values returned are: + * NV_OK + * NVOS_ERROR_INVALID_ARGUMENT if components are invalid. + */ +#define NV0000_CTRL_CMD_NVD_GET_NVLOG (0x606) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_NVLOG_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVLOG_MAX_BLOCK_SIZE (4000) + +#define NV0000_CTRL_NVD_GET_NVLOG_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0000_CTRL_NVD_GET_NVLOG_PARAMS { + NvU32 component; + NvU32 buffer; + NvU32 blockNum; + NvU32 size; + NvU8 data[NV0000_CTRL_NVLOG_MAX_BLOCK_SIZE]; +} NV0000_CTRL_NVD_GET_NVLOG_PARAMS; + +/* + * NV0000_CTRL_CMD_NVD_GET_RCERR_RPT + * + * This command returns block of registers that were recorded at the time + * of an RC error for the current process. + * + * reqIdx: + * [IN] the index of the report being requested. + * index rolls over to 0. + * if the requested index is not in the circular buffer, then no data is + * transferred & either NV_ERR_INVALID_INDEX (indicating the specified + * index is not in the table) is returned. + * + * rptIdx: + * [OUT] the index of the report being returned. + * if the requested index is not in the circular buffer, then the value is + * undefined, no data istransferred & NV_ERR_INVALID_INDEX is returned. + * if the the specified index is present, but does not meet the requested + * criteria (refer to the owner & processId fields). the rptIdx will be + * set to a value that does not match the reqIdx, and no data will be + * transferred. NV_ERR_INSUFFICIENT_PERMISSIONS is still returned. + * + * gpuTag: + * [OUT] id of the GPU whose data was collected. + * + * rptTimeInNs: + * [OUT] the timestamp for when the report was created. + * + * startIdx: + * [OUT] the index of the oldest start record for the first report that + * matches the specified criteria (refer to the owner & processId + * fields). If no records match the specified criteria, this value is + * undefined, the failure code NV_ERR_MISSING_TABLE_ENTRY will + * be returned, and no data will be transferred. + * + * endIdx: + * [OUT] the index of the newest end record for the most recent report that + * matches the specified criteria (refer to the owner & processId + * fields). If no records match the specified criteria, this value is + * undefined, the failure code NV_ERR_MISSING_TABLE_ENTRY will + * be returned, and no data will be transferred. + * + * rptType: + * [OUT] indicator of what data is in the report. + * + * flags + * [OUT] a set odf flags indicating attributes of the record + * NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_FIRST -- indicates this is the first record of a report. + * NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_LAST -- indicates this is the last record of the report. + * NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_RANGE_VALID -- indicates this is the response contains a valid +* index range. + * Note, this may be set when an error is returned indicating a valid range was found, but event of + * the requested index was not. + * NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_DATA_VALID -- indicates this is the response contains valid data. + * + * rptCount: + * [OUT] number of entries returned in report. + * + * owner: + * [IN] Entries are only returned if they have the same owner as the specified owner or the specified + * owner Id is NV0000_CTRL_CMD_NVD_RPT_ANY_OWNER_ID. + * if the requested index is not owned by the specified owner, the rptIdx + * will be set to a value that does not match the reqIdx, and no data will + * be transferred. NV_ERR_INSUFFICIENT_PERMISSIONS is returned. + * + * processId: + * [IN] Deprecated + * report: + * [OUT] array of rptCount enum/value pair entries containing the data from the report. + * entries beyond rptCount are undefined. + * + * + * Possible status values returned are: + * NV_OK -- we found & transferred the requested record. + * NV_ERR_MISSING_TABLE_ENTRY -- we don't find any records that meet the criteria. + * NV_ERR_INVALID_INDEX -- the requested index was not found in the buffer. + * NV_ERR_INSUFFICIENT_PERMISSIONS -- the requested record was found, but it did not meet the criteria. + * NV_ERR_BUSY_RETRY -- We could not access the circular buffer. + * + */ + +#define NV0000_CTRL_CMD_NVD_GET_RCERR_RPT (0x607) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_MAX_ENTRIES 200 + +// report types +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_TYPE_TEST 0 +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_TYPE_GRSTATUS 1 +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_TYPE_GPCSTATUS 2 +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_TYPE_MMU_FAULT_STATUS 3 + +// pseudo register enums attribute content +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_EMPTY 0x00000000 +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_OVERFLOWED 0x00000001 // number of missed entries. +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_MAX_PSEDO_REG 0x0000000f + + + +// Flags Definitions +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_FIRST 0x00000001 // indicates this is the first record of a report. +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_LAST 0x00000002 // indicates this is the last record of the report. +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_RANGE_VALID 0x00000004 // indicates this is the response contains a valid range +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_DATA_VALID 0x00000008 // indicates this is the response contains valid data + + +// Attribute Definitions +#define TPC_REG_ATTR(gpcId, tpcId) ((gpcId << 8) | (tpcId)) +#define ROP_REG_ATTR(gpcId, ropId) ((gpcId << 8) | (ropId)) +#define SM_REG_ATTR(gpcId, tpcId, smId) ((((gpcId) << 16) | ((tpcId) << 8)) | (smId)) + +// Process Id Pseudo values +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_ANY_PROCESS_ID 0x00000000 // get report for any process ID + +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_ANY_OWNER_ID 0xFFFFFFFF // get report for any owner ID + + +typedef struct NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_ENTRY { + NvU32 tag; + NvU32 value; + NvU32 attribute; +} NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_ENTRY; + +#define NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS { + NvU16 reqIdx; + NvU16 rptIdx; + NvU32 GPUTag; + NvU32 rptTime; // time in seconds since 1/1/1970 + NvU16 startIdx; + NvU16 endIdx; + NvU16 rptType; + NvU32 flags; + NvU16 rptCount; + NvU32 owner; // indicating whose reports to get + NvU32 processId; // deprecated field + + NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_ENTRY report[NV0000_CTRL_CMD_NVD_RCERR_RPT_MAX_ENTRIES]; +} NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS; + +/* + * NV0000_CTRL_CMD_NVD_GET_DPC_ISR_TS + * + * This command returns the time stamp information that are collected from + * the execution of various DPCs/ISRs. This time stamp information is for + * debugging purposes only and would help with analyzing regressions and + * latencies for DPC/ISR execution times. + * + * tsBufferSize + * This field specifies the size of the buffer that the caller allocates. + * tsBuffer + * THis field specifies a pointer in the callers address space to the + * buffer into which the timestamp info on DPC/ISR is to be returned. + * This buffer must at least be as big as tsBufferSize. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_NVD_GET_DPC_ISR_TS (0x608) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_DPC_ISR_TS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_DPC_ISR_TS_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV0000_CTRL_NVD_GET_DPC_ISR_TS_PARAMS { + NvU32 tsBufferSize; + NV_DECLARE_ALIGNED(NvP64 pTSBuffer, 8); +} NV0000_CTRL_NVD_GET_DPC_ISR_TS_PARAMS; + +/* _ctrl0000nvd_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h new file mode 100644 index 0000000..9a48202 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000proc.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" +#include "nvlimits.h" + +/* + * NV0000_CTRL_CMD_SET_SUB_PROCESS_ID + * + * Save the sub process ID and sub process name in client database + * subProcID + * Sub process ID + * subProcessName + * Sub process name + * + * In vGPU environment, sub process means the guest user/kernel process running + * within a single VM. It also refers to any sub process (or sub-sub process) + * within a parent process. + * + * Please refer to the wiki for more details about sub process concept: Resource_Server + * + * Possible return values are: + * NV_OK + */ +#define NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS { + NvU32 subProcessID; + char subProcessName[NV_PROC_NAME_MAX_LENGTH]; +} NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS; + +/* + * NV0000_CTRL_CMD_DISABLE_SUB_PROCESS_USERD_ISOLATION + * + * Disable sub process USERD isolation. + * bIsSubProcIsolated + * NV_TRUE to disable sub process USERD isolation + * + * USERD allocated by different domains should not be put into the same physical page. + * This provides the basic security isolation because a physical page is the unit of + * granularity at which OS can provide isolation between processes. + * + * GUEST_USER: USERD allocated by guest user process + * GUEST_KERNEL: USERD allocated by guest kernel process + * GUEST_INSECURE: USERD allocated by guest/kernel process, + * INSECURE means there is no isolation between guest user and guest kernel + * HOST_USER: USERD allocated by host user process + * HOST_KERNEL: USERD allocated by host kernel process + * + * When sub process USERD isolation is disabled, we won't distinguish USERD allocated by guest + * user and guest kernel. They all belong to the GUEST_INSECURE domain. + * + * Please refer to wiki for more details: RM_USERD_Isolation + * + * Possible return values are: + * NV_OK + */ +#define NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS { + NvBool bIsSubProcessDisabled; +} NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS; + +#define NV0000_CTRL_CMD_SET_SUB_PROCESS_ID (0x901) /* finn: Evaluated from "(FINN_NV01_ROOT_PROC_INTERFACE_ID << 8) | NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_DISABLE_SUB_PROCESS_USERD_ISOLATION (0x902) /* finn: Evaluated from "(FINN_NV01_ROOT_PROC_INTERFACE_ID << 8) | NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS_MESSAGE_ID" */ + +/* _ctrl0000proc_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h new file mode 100644 index 0000000..b8e921e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000syncgpuboost.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +#include "nvtypes.h" +#include "nvlimits.h" + +/* --------------------------- Macros ----------------------------------------*/ +// There are at least 2 GPUs in a sync group. Hence max is half of max devices. +#define NV0000_SYNC_GPU_BOOST_MAX_GROUPS (0x10) /* finn: Evaluated from "((NV_MAX_DEVICES) >> 1)" */ +#define NV0000_SYNC_GPU_BOOST_INVALID_GROUP_ID 0xFFFFFFFF + +/*-------------------------Command Prototypes---------------------------------*/ + +/*! + * Query whether SYNC GPU BOOST MANAGER is enabled or disabled. + */ +#define NV0000_CTRL_CMD_SYNC_GPU_BOOST_INFO (0xa01) /* finn: Evaluated from "(FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID << 8) | NV0000_SYNC_GPU_BOOST_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_SYNC_GPU_BOOST_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_SYNC_GPU_BOOST_INFO_PARAMS { + // [out] Specifies if Sync Gpu Boost Manager is enabled or not. + NvBool bEnabled; +} NV0000_SYNC_GPU_BOOST_INFO_PARAMS; + +/*! + * Creates a Synchronized GPU-Boost Group (SGBG) + */ +#define NV0000_CTRL_CMD_SYNC_GPU_BOOST_GROUP_CREATE (0xa02) /* finn: Evaluated from "(FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID << 8) | NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS_MESSAGE_ID" */ + +/*! + * Describes a Synchronized GPU-Boost Group configuration + */ +typedef struct NV0000_SYNC_GPU_BOOST_GROUP_CONFIG { + // [in] Number of elements in @ref gpuIds + NvU32 gpuCount; + + // [in] IDs of GPUs to be put in the Sync Boost Group + NvU32 gpuIds[NV_MAX_DEVICES]; + + // [out] Unique ID of the SGBG, if created + NvU32 boostGroupId; + + // [in] If this group represents bridgeless SLI + NvBool bBridgeless; +} NV0000_SYNC_GPU_BOOST_GROUP_CONFIG; + +#define NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS { + NV0000_SYNC_GPU_BOOST_GROUP_CONFIG boostConfig; +} NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS; + +/*! + * Destroys a previously created Synchronized GPU-Boost Group(SGBG) + */ +#define NV0000_CTRL_CMD_SYNC_GPU_BOOST_GROUP_DESTROY (0xa03) /* finn: Evaluated from "(FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID << 8) | NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS_MESSAGE_ID" */ + +#define NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS { + // [[in] Unique ID of the SGBG to be destroyed + NvU32 boostGroupId; +} NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS; + +/*! + * Get configuration information for all Synchronized Boost Groups in the system. + */ +#define NV0000_CTRL_CMD_SYNC_GPU_BOOST_GROUP_INFO (0xa04) /* finn: Evaluated from "(FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID << 8) | NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS { + // [out] Number of groups retrieved. @ref NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS::boostGroups + NvU32 groupCount; + + // [out] @ref NV0000_SYNC_GPU_BOOST_GROUP_CONFIG + NV0000_SYNC_GPU_BOOST_GROUP_CONFIG pBoostGroups[NV0000_SYNC_GPU_BOOST_MAX_GROUPS]; +} NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS; + +/* _ctrl0000syncgpuboost_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h new file mode 100644 index 0000000..c65a989 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h @@ -0,0 +1,1276 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000system.finn +// + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl0000/ctrl0000base.h" + +/* NV01_ROOT (client) system control commands and parameters */ + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_FEATURES + * + * This command returns a mask of supported features for the SYSTEM category + * of the 0000 class. + * + * Valid features include: + * + * NV0000_CTRL_GET_FEATURES_SLI + * When this bit is set, SLI is supported. + * NV0000_CTRL_GET_FEATURES_UEFI + * When this bit is set, it is a UEFI system. + * NV0000_CTRL_SYSTEM_GET_FEATURES_IS_EFI_INIT + * When this bit is set, EFI has initialized core channel + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_FEATURES (0x1f0U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS_MESSAGE_ID (0xF0U) + +typedef struct NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS { + NvU32 featuresMask; +} NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS; + + + +/* Valid feature values */ +#define NV0000_CTRL_SYSTEM_GET_FEATURES_SLI 0:0 +#define NV0000_CTRL_SYSTEM_GET_FEATURES_SLI_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_FEATURES_SLI_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_FEATURES_UEFI 1:1 +#define NV0000_CTRL_SYSTEM_GET_FEATURES_UEFI_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_FEATURES_UEFI_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_FEATURES_IS_EFI_INIT 2:2 +#define NV0000_CTRL_SYSTEM_GET_FEATURES_IS_EFI_INIT_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_FEATURES_IS_EFI_INIT_TRUE (0x00000001U) +/* + * NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION + * + * This command returns the current driver information. + * The first time this is called the size of strings is + * set with the greater of NV_BUILD_BRANCH_VERSION and + * NV_DISPLAY_DRIVER_TITLE. The client then allocates memory + * of size sizeOfStrings for pVersionBuffer and pTitleBuffer + * and calls the command again to receive driver info. + * + * sizeOfStrings + * This field returns the size in bytes of the pVersionBuffer and + * pTitleBuffer strings. + * pDriverVersionBuffer + * This field returns the version (NV_VERSION_STRING). + * pVersionBuffer + * This field returns the version (NV_BUILD_BRANCH_VERSION). + * pTitleBuffer + * This field returns the title (NV_DISPLAY_DRIVER_TITLE). + * changelistNumber + * This field returns the changelist value (NV_BUILD_CHANGELIST_NUM). + * officialChangelistNumber + * This field returns the last official changelist value + * (NV_LAST_OFFICIAL_CHANGELIST_NUM). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION (0x101U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS { + NvU32 sizeOfStrings; + NV_DECLARE_ALIGNED(NvP64 pDriverVersionBuffer, 8); + NV_DECLARE_ALIGNED(NvP64 pVersionBuffer, 8); + NV_DECLARE_ALIGNED(NvP64 pTitleBuffer, 8); + NvU32 changelistNumber; + NvU32 officialChangelistNumber; +} NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_CPU_INFO + * + * This command returns system CPU information. + * + * type + * This field returns the processor type. + * Legal processor types include: + * Intel processors: + * P55 : P55C - MMX + * P6 : PPro + * P2 : PentiumII + * P2XC : Xeon & Celeron + * CELA : Celeron-A + * P3 : Pentium-III + * P3_INTL2 : Pentium-III w/integrated L2 (fullspeed, on die, 256K) + * P4 : Pentium 4 + * CORE2 : Core2 Duo Conroe + * AMD processors + * K62 : K6-2 w/ 3DNow + * IDT/Centaur processors + * C6 : WinChip C6 + * C62 : WinChip 2 w/ 3DNow + * Cyrix processors + * GX : MediaGX + * M1 : 6x86 + * M2 : M2 + * MGX : MediaGX w/ MMX + * Transmeta processors + * TM_CRUSOE : Transmeta Crusoe(tm) + * PowerPC processors + * PPC603 : PowerPC 603 + * PPC604 : PowerPC 604 + * PPC750 : PowerPC 750 + * + * capabilities + * This field returns the capabilities of the processor. + * Legal processor capabilities include: + * MMX : supports MMX + * SSE : supports SSE + * 3DNOW : supports 3DNow + * SSE2 : supports SSE2 + * SFENCE : supports SFENCE + * WRITE_COMBINING : supports write-combining + * ALTIVEC : supports ALTIVEC + * PUT_NEEDS_IO : requires OUT inst w/PUT updates + * NEEDS_WC_WORKAROUND : requires workaround for P4 write-combining bug + * 3DNOW_EXT : supports 3DNow Extensions + * MMX_EXT : supports MMX Extensions + * CMOV : supports CMOV + * CLFLUSH : supports CLFLUSH + * SSE3 : supports SSE3 + * NEEDS_WAR_124888 : requires write to GPU while spinning on + * : GPU value + * HT : support hyper-threading + * clock + * This field returns the processor speed in MHz. + * L1DataCacheSize + * This field returns the level 1 data (or unified) cache size + * in kilobytes. + * L2DataCacheSize + * This field returns the level 2 data (or unified) cache size + * in kilobytes. + * dataCacheLineSize + * This field returns the bytes per line in the level 1 data cache. + * numLogicalCpus + * This field returns the number of logical processors. On Intel x86 + * systems that support it, this value will incorporate the current state + * of HyperThreading. + * numPhysicalCpus + * This field returns the number of physical processors. + * name + * This field returns the CPU name in ASCII string format. + * family + * Vendor defined Family and Extended Family combined + * model + * Vendor defined Model and Extended Model combined + * stepping + * Silicon stepping + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_CPU_INFO (0x102U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS { + NvU32 type; /* processor type */ + NvU32 capabilities; /* processor caps */ + NvU32 clock; /* processor speed (MHz) */ + NvU32 L1DataCacheSize; /* L1 dcache size (KB) */ + NvU32 L2DataCacheSize; /* L2 dcache size (KB) */ + NvU32 dataCacheLineSize; /* L1 dcache bytes/line */ + NvU32 numLogicalCpus; /* logial processor cnt */ + NvU32 numPhysicalCpus; /* physical processor cnt*/ + NvU8 name[52]; /* embedded cpu name */ + NvU32 family; /* Vendor defined Family and Extended Family combined */ + NvU32 model; /* Vendor defined Model and Extended Model combined */ + NvU8 stepping; /* Silicon stepping */ + NvU32 coresOnDie; /* cpu cores per die */ +} NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS; + +/* processor type values */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_UNKNOWN (0x00000000U) +/* Intel types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P5 (0x00000001U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P55 (0x00000002U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P6 (0x00000003U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P2 (0x00000004U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P2XC (0x00000005U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_CELA (0x00000006U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P3 (0x00000007U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P3_INTL2 (0x00000008U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P4 (0x00000009U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_CORE2 (0x00000010U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_CELN_M16H (0x00000011U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_CORE2_EXTRM (0x00000012U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ATOM (0x00000013U) +/* AMD types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K5 (0x00000030U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K6 (0x00000031U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K62 (0x00000032U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K63 (0x00000033U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K7 (0x00000034U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K8 (0x00000035U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K10 (0x00000036U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K11 (0x00000037U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_RYZEN (0x00000038U) +/* IDT/Centaur types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_C6 (0x00000060U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_C62 (0x00000061U) +/* Cyrix types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_GX (0x00000070U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_M1 (0x00000071U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_M2 (0x00000072U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_MGX (0x00000073U) +/* Transmeta types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_TM_CRUSOE (0x00000080U) +/* IBM types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_PPC603 (0x00000090U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_PPC604 (0x00000091U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_PPC750 (0x00000092U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_POWERN (0x00000093U) +/* Unknown ARM architecture CPU type */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARM_UNKNOWN (0xA0000000U) +/* ARM Ltd types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARM_A9 (0xA0000009U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARM_A15 (0xA000000FU) +/* NVIDIA types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_NV_DENVER_1_0 (0xA0001000U) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_NV_DENVER_2_0 (0xA0002000U) + +/* Generic types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARMV8A_GENERIC (0xA00FF000U) + +/* processor capabilities */ +#define NV0000_CTRL_SYSTEM_CPU_CAP_MMX (0x00000001U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE (0x00000002U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW (0x00000004U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE2 (0x00000008U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SFENCE (0x00000010U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_WRITE_COMBINING (0x00000020U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_ALTIVEC (0x00000040U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_PUT_NEEDS_IO (0x00000080U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_NEEDS_WC_WORKAROUND (0x00000100U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW_EXT (0x00000200U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_MMX_EXT (0x00000400U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_CMOV (0x00000800U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_CLFLUSH (0x00001000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_NEEDS_WAR_190854 (0x00002000U) /* deprecated */ +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE3 (0x00004000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_NEEDS_WAR_124888 (0x00008000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_HT_CAPABLE (0x00010000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE41 (0x00020000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE42 (0x00040000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_AVX (0x00080000U) +#define NV0000_CTRL_SYSTEM_CPU_CAP_ERMS (0x00100000U) + +/* feature mask (as opposed to bugs, requirements, etc.) */ +#define NV0000_CTRL_SYSTEM_CPU_CAP_FEATURE_MASK (0x1f5e7fU) /* finn: Evaluated from "(NV0000_CTRL_SYSTEM_CPU_CAP_MMX | NV0000_CTRL_SYSTEM_CPU_CAP_SSE | NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW | NV0000_CTRL_SYSTEM_CPU_CAP_SSE2 | NV0000_CTRL_SYSTEM_CPU_CAP_SFENCE | NV0000_CTRL_SYSTEM_CPU_CAP_WRITE_COMBINING | NV0000_CTRL_SYSTEM_CPU_CAP_ALTIVEC | NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW_EXT | NV0000_CTRL_SYSTEM_CPU_CAP_MMX_EXT | NV0000_CTRL_SYSTEM_CPU_CAP_CMOV | NV0000_CTRL_SYSTEM_CPU_CAP_CLFLUSH | NV0000_CTRL_SYSTEM_CPU_CAP_SSE3 | NV0000_CTRL_SYSTEM_CPU_CAP_HT_CAPABLE | NV0000_CTRL_SYSTEM_CPU_CAP_SSE41 | NV0000_CTRL_SYSTEM_CPU_CAP_SSE42 | NV0000_CTRL_SYSTEM_CPU_CAP_AVX | NV0000_CTRL_SYSTEM_CPU_CAP_ERMS)" */ + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_CAPS + * + * This command returns the set of system capabilities in the + * form of an array of unsigned bytes. System capabilities include + * supported features and required workarounds for the system, + * each represented by a byte offset into the table and a bit + * position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0000_CTRL_SYSTEM_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the system caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_CAPS (0x103U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | 0x3" */ + +typedef struct NV0000_CTRL_SYSTEM_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0000_CTRL_SYSTEM_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0000_CTRL_SYSTEM_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0000_CTRL_SYSTEM_CAPS_POWER_SLI_SUPPORTED 0:0x01 + +/* size in bytes of system caps table */ +#define NV0000_CTRL_SYSTEM_CAPS_TBL_SIZE 1U + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_CHIPSET_INFO + * + * This command returns system chipset information. + * + * vendorId + * This parameter returns the vendor identification for the chipset. + * A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates the chipset + * cannot be identified. + * deviceId + * This parameter returns the device identification for the chipset. + * A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates the chipset + * cannot be identified. + * subSysVendorId + * This parameter returns the subsystem vendor identification for the + * chipset. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates the + * chipset cannot be identified. + * subSysDeviceId + * This parameter returns the subsystem device identification for the + * chipset. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates the + * chipset cannot be identified. + * HBvendorId + * This parameter returns the vendor identification for the chipset's + * host bridge. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates + * the chipset's host bridge cannot be identified. + * HBdeviceId + * This parameter returns the device identification for the chipset's + * host bridge. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates + * the chipset's host bridge cannot be identified. + * HBsubSysVendorId + * This parameter returns the subsystem vendor identification for the + * chipset's host bridge. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID + * indicates the chipset's host bridge cannot be identified. + * HBsubSysDeviceId + * This parameter returns the subsystem device identification for the + * chipset's host bridge. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID + * indicates the chipset's host bridge cannot be identified. + * sliBondId + * This parameter returns the SLI bond identification for the chipset. + * vendorNameString + * This parameter returns the vendor name string. + * chipsetNameString + * This parameter returns the vendor name string. + * sliBondNameString + * This parameter returns the SLI bond name string. + * flag + * This parameter specifies NV0000_CTRL_SYSTEM_CHIPSET_FLAG_XXX flags: + * _HAS_RESIZABLE_BAR_ISSUE_YES: Chipset where the use of resizable BAR1 + * should be disabled - bug 3440153 + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_CHIPSET_INFO (0x104U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum name string length */ +#define NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH (0x0000020U) + +/* invalid id */ +#define NV0000_SYSTEM_CHIPSET_INVALID_ID (0xffffU) + +#define NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS { + NvU16 vendorId; + NvU16 deviceId; + NvU16 subSysVendorId; + NvU16 subSysDeviceId; + NvU16 HBvendorId; + NvU16 HBdeviceId; + NvU16 HBsubSysVendorId; + NvU16 HBsubSysDeviceId; + NvU32 sliBondId; + NvU8 vendorNameString[NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH]; + NvU8 subSysVendorNameString[NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH]; + NvU8 chipsetNameString[NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH]; + NvU8 sliBondNameString[NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH]; + NvU32 flags; +} NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS; + +#define NV0000_CTRL_SYSTEM_CHIPSET_FLAG_HAS_RESIZABLE_BAR_ISSUE 0:0 +#define NV0000_CTRL_SYSTEM_CHIPSET_FLAG_HAS_RESIZABLE_BAR_ISSUE_NO (0x00000000U) +#define NV0000_CTRL_SYSTEM_CHIPSET_FLAG_HAS_RESIZABLE_BAR_ISSUE_YES (0x00000001U) + + + +/* + * NV0000_CTRL_CMD_SYSTEM_SET_MEMORY_SIZE + * + * This command is used to set the system memory size in pages. + * + * memorySize + * This parameter specifies the system memory size in pages. All values + * are considered legal. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0000_CTRL_CMD_SYSTEM_SET_MEMORY_SIZE (0x107U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS { + NvU32 memorySize; +} NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_CLASSLIST + * + * This command is used to retrieve the set of system-level classes + * supported by the platform. + * + * numClasses + * This parameter returns the number of valid entries in the returned + * classes[] list. This parameter will not exceed + * Nv0000_CTRL_SYSTEM_MAX_CLASSLIST_SIZE. + * classes + * This parameter returns the list of supported classes + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_CLASSLIST (0x108U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS_MESSAGE_ID" */ + +/* maximum number of classes returned in classes[] array */ +#define NV0000_CTRL_SYSTEM_MAX_CLASSLIST_SIZE (32U) + +#define NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS { + NvU32 numClasses; + NvU32 classes[NV0000_CTRL_SYSTEM_MAX_CLASSLIST_SIZE]; +} NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_NOTIFY_EVENT + * + * This command is used to send triggered mobile related system events + * to the RM. + * + * eventType + * This parameter indicates the triggered event type. This parameter + * should specify a valid NV0000_CTRL_SYSTEM_EVENT_TYPE value. + * eventData + * This parameter specifies the type-dependent event data associated + * with EventType. This parameter should specify a valid + * NV0000_CTRL_SYSTEM_EVENT_DATA value. + * bEventDataForced + * This parameter specifies what we have to do, Whether trust current + * Lid/Dock state or not. This parameter should specify a valid + * NV0000_CTRL_SYSTEM_EVENT_DATA_FORCED value. + + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * Sync this up (#defines) with one in nvapi.spec! + * (NV_ACPI_EVENT_TYPE & NV_ACPI_EVENT_DATA) + */ +#define NV0000_CTRL_CMD_SYSTEM_NOTIFY_EVENT (0x110U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS { + NvU32 eventType; + NvU32 eventData; + NvBool bEventDataForced; +} NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS; + +/* valid eventType values */ +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_LID_STATE (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_POWER_SOURCE (0x00000001U) +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_DOCK_STATE (0x00000002U) +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_TRUST_LID (0x00000003U) +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_TRUST_DOCK (0x00000004U) + +/* valid eventData values */ +#define NV0000_CTRL_SYSTEM_EVENT_DATA_LID_OPEN (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_LID_CLOSED (0x00000001U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_POWER_BATTERY (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_POWER_AC (0x00000001U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_UNDOCKED (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_DOCKED (0x00000001U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_DSM (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_DCS (0x00000001U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_NVIF (0x00000002U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_ACPI (0x00000003U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_POLL (0x00000004U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_COUNT (0x5U) /* finn: Evaluated from "(NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_POLL + 1)" */ +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_DSM (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_DCS (0x00000001U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_NVIF (0x00000002U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_ACPI (0x00000003U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_POLL (0x00000004U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_COUNT (0x5U) /* finn: Evaluated from "(NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_POLL + 1)" */ + +/* valid bEventDataForced values */ +#define NV0000_CTRL_SYSTEM_EVENT_DATA_FORCED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_FORCED_TRUE (0x00000001U) + +/* + * NV000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE + * + * This command is used to query the platform type. + * + * systemType + * This parameter returns the type of the system. + * Legal values for this parameter include: + * NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_DESKTOP + * The system is a desktop platform. + * NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_MOBILE_GENERIC + * The system is a mobile (non-Toshiba) platform. + * NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_DESKTOP + * The system is a mobile Toshiba platform. + * NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_SOC + * The system is a system-on-a-chip (SOC) platform. + * + + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE (0x111U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS { + NvU32 systemType; +} NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS; + +/* valid systemType values */ +#define NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_DESKTOP (0x000000U) +#define NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_MOBILE_GENERIC (0x000001U) +#define NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_MOBILE_TOSHIBA (0x000002U) +#define NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_SOC (0x000003U) + + + + +/* + * NV0000_CTRL_CMD_SYSTEM_DEBUG_RMMSG_CTRL + * + * This command controls the current RmMsg filters. + * + * It is only supported if RmMsg is enabled (e.g. debug builds). + * + * cmd + * GET - Gets the current RmMsg filter string. + * SET - Sets the current RmMsg filter string. + * + * count + * The length of the RmMsg filter string. + * + * data + * The RmMsg filter string. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_SYSTEM_DEBUG_RMMSG_CTRL (0x121U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE 512U + +#define NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_CMD_GET (0x00000000U) +#define NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_CMD_SET (0x00000001U) + +#define NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS { + NvU32 cmd; + NvU32 count; + NvU8 data[NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE]; +} NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS; + +/* + * NV0000_CTRL_SYSTEM_HWBC_INFO + * + * This structure contains information about the HWBC (BR04) specified by + * hwbcId. + * + * hwbcId + * This field specifies the HWBC ID. + * firmwareVersion + * This field returns the version of the firmware on the HWBC (BR04), if + * present. This is a packed binary number of the form 0x12345678, which + * corresponds to a firmware version of 12.34.56.78. + * subordinateBus + * This field returns the subordinate bus number of the HWBC (BR04). + * secondaryBus + * This field returns the secondary bus number of the HWBC (BR04). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +typedef struct NV0000_CTRL_SYSTEM_HWBC_INFO { + NvU32 hwbcId; + NvU32 firmwareVersion; + NvU32 subordinateBus; + NvU32 secondaryBus; +} NV0000_CTRL_SYSTEM_HWBC_INFO; + +#define NV0000_CTRL_SYSTEM_HWBC_INVALID_ID (0xFFFFFFFFU) + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_HWBC_INFO + * + * This command returns information about all Hardware Broadcast (HWBC) + * devices present in the system that are BR04s. To get the complete + * list of HWBCs in the system, all GPUs present in the system must be + * initialized. See the description of NV0000_CTRL_CMD_GPU_ATTACH_IDS to + * accomplish this. + * + * hwbcInfo + * This field is an array of NV0000_CTRL_SYSTEM_HWBC_INFO structures into + * which HWBC information is placed. There is one entry for each HWBC + * present in the system. Valid entries are contiguous, invalid entries + * have the hwbcId equal to NV0000_CTRL_SYSTEM_HWBC_INVALID_ID. If no HWBC + * is present in the system, all the entries would be marked invalid, but + * the return value would still be SUCCESS. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_HWBC_INFO (0x124U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_MAX_HWBCS (0x00000080U) + +#define NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS { + NV0000_CTRL_SYSTEM_HWBC_INFO hwbcInfo[NV0000_CTRL_SYSTEM_MAX_HWBCS]; +} NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS; + + + +/* + * Deprecated. Please use NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 instead. + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS (0x127U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_MESSAGE_ID" */ + +/* + * NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS_SQUARED must remain equal to the square of + * NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS due to Check RM parsing issues. + * NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS is the maximum size of GPU groups + * allowed for batched P2P caps queries provided by the RM control + * NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX. + */ +#define NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS 32U +#define NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS_SQUARED 1024U +#define NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS 8U +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INVALID_PEER 0xffffffffU + +/* P2P capabilities status index values */ +#define NV0000_CTRL_P2P_CAPS_INDEX_READ 0U +#define NV0000_CTRL_P2P_CAPS_INDEX_WRITE 1U +#define NV0000_CTRL_P2P_CAPS_INDEX_NVLINK 2U +#define NV0000_CTRL_P2P_CAPS_INDEX_ATOMICS 3U +#define NV0000_CTRL_P2P_CAPS_INDEX_PROP 4U +#define NV0000_CTRL_P2P_CAPS_INDEX_LOOPBACK 5U +#define NV0000_CTRL_P2P_CAPS_INDEX_PCI 6U +#define NV0000_CTRL_P2P_CAPS_INDEX_C2C 7U +#define NV0000_CTRL_P2P_CAPS_INDEX_PCI_BAR1 8U + +#define NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE 9U + + +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_MESSAGE_ID (0x27U) + +typedef struct NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; + NvU32 gpuCount; + NvU32 p2pCaps; + NvU32 p2pOptimalReadCEs; + NvU32 p2pOptimalWriteCEs; + NvU8 p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE]; + NV_DECLARE_ALIGNED(NvP64 busPeerIds, 8); +} NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS; + +/* valid p2pCaps values */ +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED 0:0 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED 1:1 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED 2:2 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED 3:3 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED 4:4 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED 5:5 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED 6:6 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED 7:7 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED 8:8 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED 9:9 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED 10:10 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED_TRUE (0x00000001U) + + +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_C2C_SUPPORTED 12:12 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_C2C_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_C2C_SUPPORTED_TRUE (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_BAR1_SUPPORTED 13:13 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_BAR1_SUPPORTED_FALSE (0x00000000U) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_BAR1_SUPPORTED_TRUE (0x00000001U) + +/* P2P status codes */ +#define NV0000_P2P_CAPS_STATUS_OK (0x00U) +#define NV0000_P2P_CAPS_STATUS_CHIPSET_NOT_SUPPORTED (0x01U) +#define NV0000_P2P_CAPS_STATUS_GPU_NOT_SUPPORTED (0x02U) +#define NV0000_P2P_CAPS_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED (0x03U) +#define NV0000_P2P_CAPS_STATUS_DISABLED_BY_REGKEY (0x04U) +#define NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED (0x05U) + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 + * + * This command returns peer to peer capabilities present between GPUs. + * Valid requests must present a list of GPU Ids. + * + * [in] gpuIds + * This member contains the array of GPU IDs for which we query the P2P + * capabilities. Valid entries are contiguous, beginning with the first + * entry in the list. + * [in] gpuCount + * This member contains the number of GPU IDs stored in the gpuIds[] array. + * [out] p2pCaps + * This member returns the peer to peer capabilities discovered between the + * GPUs. Valid p2pCaps values include: + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED + * When this bit is set, peer to peer writes between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED + * When this bit is set, peer to peer reads between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED + * When this bit is set, peer to peer PROP between subdevices owned + * by this device are supported. This is enabled by default + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED + * When this bit is set, PCI is supported for all P2P between subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED + * When this bit is set, NVLINK is supported for all P2P between subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED + * When this bit is set, peer to peer atomics between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED + * When this bit is set, peer to peer loopback is supported for subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED + * When this bit is set, indirect peer to peer writes between subdevices + * owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED + * When this bit is set, indirect peer to peer reads between subdevices + * owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED + * When this bit is set, indirect peer to peer atomics between + * subdevices owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED + * When this bit is set, indirect NVLINK is supported for subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_C2C_SUPPORTED + * When this bit is set, C2C P2P is supported between the GPUs + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_BAR1_SUPPORTED + * When this bit is set, BAR1 P2P is supported between the GPUs + * mentioned in @ref gpuIds + * [out] p2pOptimalReadCEs + * For a pair of GPUs, return mask of CEs to use for p2p reads over Nvlink + * [out] p2pOptimalWriteCEs + * For a pair of GPUs, return mask of CEs to use for p2p writes over Nvlink + * [out] p2pCapsStatus + * This member returns status of all supported p2p capabilities. Valid + * status values include: + * NV0000_P2P_CAPS_STATUS_OK + * P2P capability is supported. + * NV0000_P2P_CAPS_STATUS_CHIPSET_NOT_SUPPORTED + * Chipset doesn't support p2p capability. + * NV0000_P2P_CAPS_STATUS_GPU_NOT_SUPPORTED + * GPU doesn't support p2p capability. + * NV0000_P2P_CAPS_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED + * IOH topology isn't supported. For e.g. root ports are on different + * IOH. + * NV0000_P2P_CAPS_STATUS_DISABLED_BY_REGKEY + * P2P Capability is disabled by a regkey. + * NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED + * P2P Capability is not supported. + * NV0000_P2P_CAPS_STATUS_NVLINK_SETUP_FAILED + * Indicates that NvLink P2P link setup failed. + * [out] busPeerIds + * Peer ID matrix. It is a one-dimentional array. + * busPeerIds[X * gpuCount + Y] maps from index X to index Y in + * the gpuIds[] table. For invalid or non-existent peer busPeerIds[] + * has the value NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INVALID_PEER. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ + + + +#define NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 (0x12bU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS_MESSAGE_ID (0x2BU) + +typedef struct NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS { + NvU32 gpuIds[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; + NvU32 gpuCount; + NvU32 p2pCaps; + NvU32 p2pOptimalReadCEs; + NvU32 p2pOptimalWriteCEs; + NvU8 p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE]; + NvU32 busPeerIds[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS_SQUARED]; +} NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX + * + * This command returns peer to peer capabilities present between all pairs of + * GPU IDs {(a, b) : a in gpuIdGrpA and b in gpuIdGrpB}. This can be used to + * collect all P2P capabilities in the system - see the SRT: + * NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX_TEST + * for a demonstration. + * + * The call will query for all pairs between set A and set B, and returns + * results in both link directions. The results are two-dimensional arrays where + * the first dimension is the index within the set-A array of one GPU ID under + * consideration, and the second dimension is the index within the set-B array + * of the other GPU ID under consideration. + * + * That is, the result arrays are *ALWAYS* to be indexed first with the set-A + * index, then with the set-B index. The B-to-A direction of results are put in + * the b2aOptimal(Read|Write)CEs. This makes it unnecessary to call the query + * twice, since the usual use case requires both directions. + * + * If a set is being compared against itself (by setting grpBCount to 0), then + * the result matrices are symmetric - it doesn't matter which index is first. + * However, the choice of indices is effectively a choice of which ID is "B" and + * which is "A" for the "a2b" and "b2a" directional results. + * + * [in] grpACount + * This member contains the number of GPU IDs stored in the gpuIdGrpA[] + * array. Must be >= 0. + * [in] grpBCount + * This member contains the number of GPU IDs stored in the gpuIdGrpB[] + * array. Can be == 0 to specify a check of group A against itself. + * [in] gpuIdGrpA + * This member contains the array of GPU IDs in "group A", each of which + * will have its P2P capabilities returned with respect to each GPU ID in + * "group B". Valid entries are contiguous, beginning with the first entry + * in the list. + * [in] gpuIdGrpB + * This member contains the array of GPU IDs in "group B", each of which + * will have its P2P capabilities returned with respect to each GPU ID in + * "group A". Valid entries are contiguous, beginning with the first entry + * in the list. May be equal to gpuIdGrpA, but best performance requires + * that the caller specifies grpBCount = 0 in this case, and ignores this. + * [out] p2pCaps + * This member returns the peer to peer capabilities discovered between the + * pairs of input GPUs between the groups, indexed by [A_index][B_index]. + * Valid p2pCaps values include: + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED + * When this bit is set, peer to peer writes between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED + * When this bit is set, peer to peer reads between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED + * When this bit is set, peer to peer PROP between subdevices owned + * by this device are supported. This is enabled by default + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED + * When this bit is set, PCI is supported for all P2P between subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED + * When this bit is set, NVLINK is supported for all P2P between subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED + * When this bit is set, peer to peer atomics between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED + * When this bit is set, peer to peer loopback is supported for subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED + * When this bit is set, indirect peer to peer writes between subdevices + * owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED + * When this bit is set, indirect peer to peer reads between subdevices + * owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED + * When this bit is set, indirect peer to peer atomics between + * subdevices owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED + * When this bit is set, indirect NVLINK is supported for subdevices + * owned by this device. + * [out] a2bOptimalReadCes + * For a pair of GPUs, return mask of CEs to use for p2p reads over Nvlink + * in the A-to-B direction. + * [out] a2bOptimalWriteCes + * For a pair of GPUs, return mask of CEs to use for p2p writes over Nvlink + * in the A-to-B direction. + * [out] b2aOptimalReadCes + * For a pair of GPUs, return mask of CEs to use for p2p reads over Nvlink + * in the B-to-A direction. + * [out] b2aOptimalWriteCes + * For a pair of GPUs, return mask of CEs to use for p2p writes over Nvlink + * in the B-to-A direction. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ + + + +#define NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX (0x13aU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_MESSAGE_ID" */ + +typedef NvU32 NV0000_CTRL_P2P_CAPS_MATRIX_ROW[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_MESSAGE_ID (0x3AU) + +typedef struct NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS { + NvU32 grpACount; + NvU32 grpBCount; + NvU32 gpuIdGrpA[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NvU32 gpuIdGrpB[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW p2pCaps[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW a2bOptimalReadCes[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW a2bOptimalWriteCes[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW b2aOptimalReadCes[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW b2aOptimalWriteCes[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; +} NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS; + + + +#define GPS_MAX_COUNTERS_PER_BLOCK 32U +typedef struct NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS { + NvU32 objHndl; + NvU32 blockId; + NvU32 nextExpectedSampleTimems; + NvU32 countersReq; + NvU32 countersReturned; + NvU32 counterBlock[GPS_MAX_COUNTERS_PER_BLOCK]; +} NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS; + +#define NV0000_CTRL_CMD_SYSTEM_GPS_GET_PERF_SENSORS (0x12cU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | 0x2C" */ + +#define NV0000_CTRL_CMD_SYSTEM_GPS_GET_EXTENDED_PERF_SENSORS (0x12eU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | 0x2E" */ + + + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO + * + * This command returns the current host driver, host OS and + * plugin information. It is only valid when VGX is setup. + * szHostDriverVersionBuffer + * This field returns the host driver version (NV_VERSION_STRING). + * szHostVersionBuffer + * This field returns the host driver version (NV_BUILD_BRANCH_VERSION). + * szHostTitleBuffer + * This field returns the host driver title (NV_DISPLAY_DRIVER_TITLE). + * szPluginTitleBuffer + * This field returns the plugin build title (NV_DISPLAY_DRIVER_TITLE). + * szHostUnameBuffer + * This field returns the call of 'uname' on the host OS. + * iHostChangelistNumber + * This field returns the changelist value of the host driver (NV_BUILD_CHANGELIST_NUM). + * iPluginChangelistNumber + * This field returns the changelist value of the plugin (NV_BUILD_CHANGELIST_NUM). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE 256U +#define NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO (0x133U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS_MESSAGE_ID (0x33U) + +typedef struct NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS { + char szHostDriverVersionBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + char szHostVersionBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + char szHostTitleBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + char szPluginTitleBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + char szHostUnameBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + NvU32 iHostChangelistNumber; + NvU32 iPluginChangelistNumber; +} NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_GPUS_POWER_STATUS + * + * This command returns the power status of the GPUs in the system, successfully attached or not because of + * insufficient power. It is supported on Kepler and up only. + * gpuCount + * This field returns the count into the following arrays. + * busNumber + * This field returns the busNumber of a GPU. + * gpuExternalPowerStatus + * This field returns the corresponding external power status: + * NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_CONNECTED + * NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_NOT_CONNECTED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_GPUS_POWER_STATUS (0x134U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS_MESSAGE_ID (0x34U) + +typedef struct NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS { + NvU8 gpuCount; + NvU8 gpuBus[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; + NvU8 gpuExternalPowerStatus[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; +} NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS; + +/* Valid gpuExternalPowerStatus values */ +#define NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_CONNECTED 0U +#define NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_NOT_CONNECTED 1U + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_PRIVILEGED_STATUS + * + * This command returns the caller's API access privileges using + * this client handle. + * + * privStatus + * This parameter returns a mask of possible access privileges: + * NV0000_CTRL_SYSTEM_PRIVILEGED_STATUS_PRIV_USER_FLAG + * The caller is running with elevated privileges + * NV0000_CTRL_SYSTEM_PRIVILEGED_STATUS_ROOT_HANDLE_FLAG + * Client is of NV01_ROOT class. + * NV0000_CTRL_SYSTEM_PRIVILEGED_STATUS_PRIV_HANDLE_FLAG + * Client has PRIV bit set. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + + +#define NV0000_CTRL_CMD_SYSTEM_GET_PRIVILEGED_STATUS (0x135U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS_MESSAGE_ID (0x35U) + +typedef struct NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS { + NvU8 privStatusFlags; +} NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS; + + +/* Valid privStatus values */ +#define NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PRIV_USER_FLAG (0x00000001U) +#define NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_KERNEL_HANDLE_FLAG (0x00000002U) +#define NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PRIV_HANDLE_FLAG (0x00000004U) + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_FABRIC_STATUS + * + * The fabric manager (FM) notifies RM that fabric (system) is ready for peer to + * peer (P2P) use or still initializing the fabric. This command allows clients + * to query fabric status to allow P2P operations. + * + * Note, on systems where FM isn't used, RM just returns _SKIP. + * + * fabricStatus + * This parameter returns current fabric status: + * NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_SKIP + * NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_UNINITIALIZED + * NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_IN_PROGRESS + * NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_INITIALIZED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_PARAM_STRUCT + */ + +typedef enum NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS { + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_SKIP = 1, + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_UNINITIALIZED = 2, + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_IN_PROGRESS = 3, + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_INITIALIZED = 4, +} NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS; + +#define NV0000_CTRL_CMD_SYSTEM_GET_FABRIC_STATUS (0x136U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS { + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS fabricStatus; +} NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS; + + + +/* + * NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID + * + * This command is used to get a unique identifier for the instance of RM. + * The returned value will only change when the driver is reloaded. A previous + * value will never be reused on a given machine. + * + * rm_instance_id; + * The instance ID of the current RM instance + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_RM_INSTANCE_ID (0x139U) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS_MESSAGE_ID" */ + +/* + * NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS + */ +#define NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS_MESSAGE_ID (0x39U) + +typedef struct NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS { + NV_DECLARE_ALIGNED(NvU64 rm_instance_id, 8); +} NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS; + + + +/* + * NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT + * + * This API is used to sync the external fabric management status with + * GSP-RM + * + * bExternalFabricMgmt + * Whether fabric is externally managed + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT (0x13cU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS_MESSAGE_ID (0x3CU) + +typedef struct NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS { + NvBool bExternalFabricMgmt; +} NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS; + +/* + * NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO + * + * This API is used to get information about the RM client + * database. + * + * clientCount [OUT] + * This field indicates the number of clients currently allocated. + * + * resourceCount [OUT] + * This field indicates the number of resources currently allocated + * across all clients. + * + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_CLIENT_DATABASE_INFO (0x13dU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS_MESSAGE_ID (0x3DU) + +typedef struct NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS { + NvU32 clientCount; + NV_DECLARE_ALIGNED(NvU64 resourceCount, 8); +} NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION_V2 + * + * This command returns the current driver information in + * statically sized character arrays. + * + * driverVersionBuffer + * This field returns the version (NV_VERSION_STRING). + * versionBuffer + * This field returns the version (NV_BUILD_BRANCH_VERSION). + * titleBuffer + * This field returns the title (NV_DISPLAY_DRIVER_TITLE). + * changelistNumber + * This field returns the changelist value (NV_BUILD_CHANGELIST_NUM). + * officialChangelistNumber + * This field returns the last official changelist value + * (NV_LAST_OFFICIAL_CHANGELIST_NUM). + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_MAX_STRING_SIZE 256U +#define NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION_V2 (0x13eU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS_MESSAGE_ID (0x3EU) + +typedef struct NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS { + char driverVersionBuffer[NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_MAX_STRING_SIZE]; + char versionBuffer[NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_MAX_STRING_SIZE]; + char titleBuffer[NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_MAX_STRING_SIZE]; + NvU32 changelistNumber; + NvU32 officialChangelistNumber; +} NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS; + +/* _ctrl0000system_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h new file mode 100644 index 0000000..57a9758 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h @@ -0,0 +1,433 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000unix.finn +// + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +/* NV01_ROOT (client) Linux control commands and parameters */ + +/* + * NV0000_CTRL_CMD_OS_UNIX_FLUSH_USER_CACHE + * + * This command may be used to force a cache flush for a range of virtual addresses in + * memory. Can be used for either user or kernel addresses. + * + * offset, length + * These parameters specify the offset within the memory block + * and the number of bytes to flush/invalidate + * cacheOps + * This parameter flags whether to flush, invalidate or do both. + * Possible values are: + * NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH + * NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_INVALIDATE + * NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH_INVALIDATE + * hDevice + * This parameter is the handle to the device + * hObject + * This parameter is the handle to the memory structure being operated on. + * internalOnly + * Intended for internal use unless client is running in MODS UNIX environment, + * in which case this parameter specify the virtual address of the memory block + * to flush. + * + * Possible status values are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_COMMAND + * NV_ERR_INVALID_LIMIT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_OS_UNIX_FLUSH_USER_CACHE (0x3d02) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvU64 length, 8); + NvU32 cacheOps; + NvHandle hDevice; + NvHandle hObject; + NV_DECLARE_ALIGNED(NvU64 internalOnly, 8); +} NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS; + +#define NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH (0x00000001) +#define NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_INVALIDATE (0x00000002) +#define NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH_INVALIDATE (0x00000003) + + +/* + * NV0000_CTRL_CMD_OS_UNIX_GET_CONTROL_FILE_DESCRIPTOR + * + * This command is used to get the control file descriptor. + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV0000_CTRL_CMD_OS_UNIX_GET_CONTROL_FILE_DESCRIPTOR (0x3d04) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | 0x4" */ + +typedef struct NV0000_CTRL_OS_UNIX_GET_CONTROL_FILE_DESCRIPTOR_PARAMS { + NvS32 fd; +} NV0000_CTRL_OS_UNIX_GET_CONTROL_FILE_DESCRIPTOR_PARAMS; + +typedef enum NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE { + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_NONE = 0, + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM = 1, +} NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE; + +typedef struct NV0000_CTRL_OS_UNIX_EXPORT_OBJECT { + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE type; + + union { + struct { + NvHandle hDevice; + NvHandle hParent; + NvHandle hObject; + } rmObject; + } data; +} NV0000_CTRL_OS_UNIX_EXPORT_OBJECT; + +/* + * NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD + * + * This command may be used to export NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE + * object to file descriptor. + * + * Note that the 'fd' parameter is an input parameter at the kernel level, but + * an output parameter for usermode RMAPI clients -- the RMAPI library will + * open a new FD automatically if a usermode RMAPI client exports an object. + * + * Kernel-mode RM clients can export an object to an FD in two steps: + * 1. User client calls this RMControl with the flag 'EMPTY_FD_TRUE' to create + * an empty FD to receive the object, then passes that FD to the kernel-mode + * RM client. + * 2. Kernel-mode RM client fills in the rest of the + * NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS as usual and calls RM to + * associate its desired RM object with the empty FD from its usermode + * client. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_PARAMETER + */ +#define NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD (0x3d05) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS { + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT object; /* IN */ + NvS32 fd; /* IN/OUT */ + NvU32 flags; /* IN */ +} NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS; + +/* + * If EMPTY_FD is TRUE, the 'fd' will be created but no object will be + * associated with it. The hDevice parameter is still required, to determine + * the correct device node on which to create the file descriptor. + * (An empty FD can then be passed to a kernel-mode driver to associate it with + * an actual object.) + */ +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_FLAGS_EMPTY_FD 0:0 +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_FLAGS_EMPTY_FD_FALSE (0x00000000) +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_FLAGS_EMPTY_FD_TRUE (0x00000001) + +/* + * NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD + * + * This command may be used to import back + * NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE object from file descriptor. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_PARAMETER + */ +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD (0x3d06) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS { + NvS32 fd; /* IN */ + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT object; /* IN */ +} NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_GET_GPU_INFO + * + * This command will query the OS specific info for the specified GPU. + * + * gpuId + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * minorNum + * This parameter returns minor number of device node. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_OS_GET_GPU_INFO (0x3d07) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | 0x7" */ + +typedef struct NV0000_CTRL_OS_GET_GPU_INFO_PARAMS { + NvU32 gpuId; /* IN */ + NvU32 minorNum; /* OUT */ +} NV0000_CTRL_OS_GET_GPU_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_UNIX_GET_EXPORT_OBJECT_INFO + * + * This command will query the deviceInstance for the specified FD + * which is referencing an exported object. + * + * fd + * File descriptor parameter is referencing an exported object on a Unix system. + * + * deviceInstatnce + * This parameter returns a deviceInstance on which the object is located. + * + * NV_MAX_DEVICES is returned if the object is parented by a client instead + * of a device. + * + * maxObjects + * This parameter returns the maximum number of object handles that may be + * contained in the file descriptor. + * + * metadata + * This parameter returns the user metadata passed into the + * _EXPORT_OBJECTS_TO_FD control call. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OBJECT_NOT_FOUND + */ + +#define NV0000_CTRL_CMD_OS_UNIX_GET_EXPORT_OBJECT_INFO (0x3d08) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_OS_UNIX_EXPORT_OBJECT_FD_BUFFER_SIZE 64 + +#define NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS { + NvS32 fd; /* IN */ + NvU32 deviceInstance; /* OUT */ + NvU16 maxObjects; /* OUT */ + NvU8 metadata[NV0000_OS_UNIX_EXPORT_OBJECT_FD_BUFFER_SIZE]; /* OUT */ +} NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_UNIX_REFRESH_RMAPI_DEVICE_LIST + * + * This command will re-fetch probed GPUs information and update RMAPI library's + * internal detected GPU context information accordingly. Without this, GPUs + * attached to RM after RMAPI client initialization will not be accessible and + * all RMAPI library calls will fail on them. + * Currently this is used by NVSwitch Fabric Manager in conjunction with NVSwitch + * Shared Virtualization feature where GPUs are hot-plugged to OS/RM (by Hypervisor) + * and Fabric Manager is signaled externally by the Hypervisor to initialize those GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_OPERATING_SYSTEM + */ + +#define NV0000_CTRL_CMD_OS_UNIX_REFRESH_RMAPI_DEVICE_LIST (0x3d09) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | 0x9" */ + +/* + * This control call has been deprecated. It will be deleted soon. + * Use NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD (singular) or + * NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECTS_TO_FD (plural) instead. + */ +#define NV0000_CTRL_CMD_OS_UNIX_CREATE_EXPORT_OBJECT_FD (0x3d0a) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_BUFFER_SIZE NV0000_OS_UNIX_EXPORT_OBJECT_FD_BUFFER_SIZE + +#define NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS { + NvHandle hDevice; /* IN */ + NvU16 maxObjects; /* IN */ + NvU8 metadata[NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_BUFFER_SIZE]; /* IN */ + NvS32 fd; /* IN/OUT */ +} NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECTS_TO_FD + * + * Exports RM handles to an fd that was provided, also creates an FD if + * requested. + * + * The objects in the 'handles' array are exported into the fd + * as the range [index, index + numObjects). + * + * If index + numObjects is greater than the maxObjects value used + * to create the file descriptor, NV_ERR_INVALID_PARAMETER is returned. + * + * If 'numObjects and 'index' overlap with a prior call, the newer call's RM object + * handles will overwrite the previously exported handles from the previous call. + * This overlapping behavior can also be used to unexport a handle by setting + * the appropriate object in 'objects' to 0. + * + * fd + * A file descriptor. If -1, a new FD will be created. + * + * hDevice + * The owning device of the objects to be exported (must be the same for + * all objects). + * + * maxObjects + * The total number of objects that the client wishes to export to the FD. + * This parameter will be honored only when the FD is getting created. + * + * metadata + * A buffer for clients to write some metadata to and pass to the importing + * client. This parameter will be honored only when the FD is getting + * created. + * + * objects + * Array of RM object handles to export to the fd. + * + * numObjects + * The number of handles the user wishes to export in this call. + * + * index + * The index into the export fd at which to start exporting the handles in + * 'objects' (for use in iterative calls). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OUT_OF_RANGE + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + */ +#define NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECTS_TO_FD (0x3d0b) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_MAX_OBJECTS 512 + +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS { + NvS32 fd; /* IN/OUT */ + NvHandle hDevice; /* IN */ + NvU16 maxObjects; /* IN */ + NvU8 metadata[NV0000_OS_UNIX_EXPORT_OBJECT_FD_BUFFER_SIZE]; /* IN */ + NvHandle objects[NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_MAX_OBJECTS]; /* IN */ + NvU16 numObjects; /* IN */ + NvU16 index; /* IN */ +} NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECTS_FROM_FD + * + * This command can be used to import back RM handles + * that were exported to an fd using the + * NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECTS_TO_FD control call. + * + * If index + numObjects is greater than the maxObjects value used + * to create the file descriptor, NV_ERR_INVALID_PARAMETER is returned + * and no objects are imported. + * + * For each valid handle in the 'objects' array parameter at index 'i', + * the corresponding object handle at index ('i' + 'index') contained by + * the fd will be imported. If the object at index ('i' + 'index') has + * not been exported into the fd, no object will be imported. + * + * If any of handles contained in the 'objects' array parameter are invalid + * and the corresponding export object handle is valid, + * NV_ERR_INVALID_PARAMETER will be returned and no handles will be imported. + * + * fd + * The export fd on which to import handles out of. + * + * hParent + * The parent RM handle of which all of the exported objects will + * be duped under. + * + * objects + * An array of RM handles. The exported objects will be duped under + * these handles during the import process. + * + * objectTypes + * An array of RM handle types. The type _NONE will be returned if + * the object was not imported. Other possible object types are + * mentioned below. + * + * numObjects + * The number of valid object handles in the 'objects' array. This should + * be set to the number of objects that the client wishes to import. + * + * index + * The index into the fd in which to start importing from. For + * use in iterative calls. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OUT_OF_RANGE + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_PARAMETER + */ +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECTS_FROM_FD (0x3d0c) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS_MESSAGE_ID" */ + +// +// TODO Bump this back up to 512 after the FLA revamp is complete +// +#define NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_TO_FD_MAX_OBJECTS 128 + +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_NONE 0 +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_VIDMEM 1 +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_SYSMEM 2 +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC 3 + +#define NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS { + NvS32 fd; /* IN */ + NvHandle hParent; /* IN */ + NvHandle objects[NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_TO_FD_MAX_OBJECTS]; /* IN */ + NvU8 objectTypes[NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_TO_FD_MAX_OBJECTS]; /* OUT */ + NvU16 numObjects; /* IN */ + NvU16 index; /* IN */ +} NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS; + +/* _ctrl0000unix_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h new file mode 100644 index 0000000..817d1d5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000vgpu.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h new file mode 100644 index 0000000..046c931 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h @@ -0,0 +1,178 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0002.finn +// + +#include "ctrl/ctrlxxxx.h" +#define NV0002_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0002, NV0002_CTRL_##cat, idx) + +/* Client command categories (6bits) */ +#define NV0002_CTRL_RESERVED (0x00) +#define NV0002_CTRL_DMA (0x01) + + +/* + * NV0002_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0002_CTRL_CMD_NULL (0x20000) /* finn: Evaluated from "(FINN_NV01_CONTEXT_DMA_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NV0002_CTRL_CMD_UPDATE_CONTEXTDMA + * + * This command will update the parameters of the specified context dma. The + * context dma must be bound to a display channel. The update is limited + * to the display view of the context dma. Other use cases will continue to + * use the original allocation parameters. + * + * This is used on platforms where memory may be moved by the operating + * system after allocation. + * + * This control call supports the NVOS54_FLAGS_LOCK_BYPASS flag. + * + * baseAddress + * This parameter, if selected by flags, indicates the new baseAddress for + * the ctxdma + * limit + * This parameter, if selected by flags, indicates the new limit of the + * ctxdma. + * hCtxDma + * ContextDma handle on which to operate. Must match the handle given to the control + * call. + * hChannel + * Display channel handle. This field is ignored. + * hintHandle + * Hint value returned from HeapAllocHint which encodes information about + * the surface. This is used by chips without generic kind. Newer chips + * use the COMPR_INFO flag and the hintHandle must be zero. + * flags + * This parameter specifies flags which indicate which other parameters are + * valid. + * FLAGS_PAGESIZE updates the context DMA pagesize field, if not _DEFAULT + * FLAGS_USE_COMPR_INFO uses the surface format specified in the params, instead of hintHandle. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT + * NV_ERR_INVALID_ARGUMENT + * NVOS_STATUS_NOT_SUPPORTED + */ +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA (0x20101) /* finn: Evaluated from "(FINN_NV01_CONTEXT_DMA_DMA_INTERFACE_ID << 8) | NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS_MESSAGE_ID" */ + +#define NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS { + NV_DECLARE_ALIGNED(NvU64 baseAddress, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NvHandle hSubDevice; + NvHandle hCtxDma; + NvHandle hChannel; + NvHandle hintHandle; + NvU32 flags; +} NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS; + +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_BASEADDRESS 0:0 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_BASEADDRESS_INVALID (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_BASEADDRESS_VALID (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_LIMIT 1:1 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_LIMIT_INVALID (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_LIMIT_VALID (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_HINT 2:2 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_HINT_INVALID (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_HINT_VALID (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_PAGESIZE 4:3 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_PAGESIZE_DEFAULT (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_PAGESIZE_4K (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_PAGESIZE_BIG (0x00000002) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO 6:5 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_NONE (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_FORMAT_PITCH (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_FORMAT_BLOCK_LINEAR (0x00000002) + +/* + * NV0002_CTRL_CMD_BIND_CONTEXTDMA + * + * Bind a context dma to a display channel. Binding is no longer required for + * Host channels, but does silently succeed. + * + * This control call supports the NVOS54_FLAGS_LOCK_BYPASS flag. + * + * This control replaces the obsolete RmBindContextDma() API. + * + * hChannel + * The channel for ctxdma bind + * + * Possible error codes include + * NV_OK + * NV_ERR_TOO_MANY_PRIMARIES hash table is full + * NV_ERR_NO_MEMORY instance memory is full + * NV_ERR_INVALID_OFFSET surface is not correctly aligned + * NV_ERR_STATE_IN_USE context dma was already bound given channel + */ +#define NV0002_CTRL_CMD_BIND_CONTEXTDMA (0x20102) /* finn: Evaluated from "(FINN_NV01_CONTEXT_DMA_DMA_INTERFACE_ID << 8) | NV0002_CTRL_BIND_CONTEXTDMA_PARAMS_MESSAGE_ID" */ + +#define NV0002_CTRL_BIND_CONTEXTDMA_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0002_CTRL_BIND_CONTEXTDMA_PARAMS { + NvHandle hChannel; +} NV0002_CTRL_BIND_CONTEXTDMA_PARAMS; + +/* + * NV0002_CTRL_CMD_UNBIND_CONTEXTDMA + * + * Unbind a context dma from a display channel. + * + * This control call supports the NVOS54_FLAGS_LOCK_BYPASS flag. + * + * hChannel + * The display channel to unbind from + * + * Possible error codes include + * NV_OK + * NV_ERR_INVALID_STATE channel was not bound + */ +#define NV0002_CTRL_CMD_UNBIND_CONTEXTDMA (0x20103) /* finn: Evaluated from "(FINN_NV01_CONTEXT_DMA_DMA_INTERFACE_ID << 8) | NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS_MESSAGE_ID" */ + +#define NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS { + NvHandle hChannel; +} NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS; + +/* _ctrl0002.h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h new file mode 100644 index 0000000..91431ce --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0004.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV01_TIMER control commands and parameters */ + +#define NV0004_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0004, NV0004_CTRL_##cat, idx) + +/* NV01_TIMER command categories (8bits) */ +#define NV0004_CTRL_RESERVED (0x00) +#define NV0004_CTRL_TMR (0x01) + +/* + * NV0004_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0004_CTRL_CMD_NULL (0x40000) /* finn: Evaluated from "(FINN_NV01_TIMER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NV0004_CTRL_CMD_TMR_SET_ALARM_NOTIFY + * + * This command can be used to set a PTIMER alarm to trigger at the + * specified time in the future on the subdevice associated with this + * NV01_TIMER object instance. + * + * hEvent + * This parameter specifies the handle of an NV01_EVENT object instance + * that is to be signaled when the alarm triggers. This NV01_EVENT + * object instance must have been allocated with this NV01_TIMER object + * instance as its parent. If this parameter is set to NV01_NULL_OBJECT + * then all NV01_EVENT object instances associated with this NV01_TIMER + * object instance are signaled. + * alarmTimeUsecs + * This parameter specifies the relative time in nanoseconds at which + * the alarm should trigger. Note that the accuracy between the alarm + * trigger and the subsequent notification to the caller can vary + * depending on system conditions. + * + * Possible status values returned include: + * NVOS_STATUS_SUCCES + * NVOS_STATUS_INVALID_PARAM_STRUCT + * NVOS_STATUS_INVALID_OBJECT_HANDLE + */ + +#define NV0004_CTRL_CMD_TMR_SET_ALARM_NOTIFY (0x40110) /* finn: Evaluated from "(FINN_NV01_TIMER_TMR_INTERFACE_ID << 8) | NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS_MESSAGE_ID" */ + +#define NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS { + NvHandle hEvent; + NV_DECLARE_ALIGNED(NvU64 alarmTimeNsecs, 8); +} NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS; + +/* _ctrl0004_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h new file mode 100644 index 0000000..60f6684 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0020.finn +// + +#include "ctrl/ctrlxxxx.h" +#define NV0020_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0x0020, NV0020_CTRL_##cat, idx) + +/* NV0020_GPU_MANAGEMENT command categories (6bits) */ +#define NV0020_CTRL_RESERVED (0x00) +#define NV0020_CTRL_GPU_MGMT (0x01) + +/* + * NV0020_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0020_CTRL_CMD_NULL (0x200000) /* finn: Evaluated from "(FINN_NV0020_GPU_MANAGEMENT_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* Maximum possible number of bytes of GID information */ +#define NV0020_GPU_MAX_GID_LENGTH (0x00000100) + +/* + * NV0020_CTRL_CMD_GPU_MGMT_SET_SHUTDOWN_STATE + * + * This command modifies GPU zero power state for the desired GPU in the + * database. This state is set by a privileged client, after the GPU is + * completely unregistered from RM as well as PCI subsystem. On Linux, + * clients perform this operation through pci-sysfs. + * This control call requires admin privileges. + * + * uuid (INPUT) + * The UUID of the gpu. + * Supports binary format and SHA-1 type. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0020_CTRL_CMD_GPU_MGMT_SET_SHUTDOWN_STATE (0x200101) /* finn: Evaluated from "(FINN_NV0020_GPU_MANAGEMENT_GPU_MGMT_INTERFACE_ID << 8) | NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS { + NvU8 uuid[NV0020_GPU_MAX_GID_LENGTH]; +} NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS; + +/* _ctrl0020_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h new file mode 100644 index 0000000..880fcc8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h @@ -0,0 +1,191 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl003e.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV01_MEMORY_SYSTEM control commands and parameters */ + +#define NV003E_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x003E, NV003E_CTRL_##cat, idx) + +/* NV01_MEMORY_SYSTEM command categories (6bits) */ +#define NV003E_CTRL_RESERVED (0x00) +#define NV003E_CTRL_MEMORY (0x01) + +/* + * NV003E_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV003E_CTRL_CMD_NULL (0x3e0000) /* finn: Evaluated from "(FINN_NV01_MEMORY_SYSTEM_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NV003E_CTRL_CMD_GET_SURFACE_PHYS_ATTR + * + * This command returns attributes associated with the memory object + * at the given offset. The architecture dependent return parameter + * comprFormat determines the meaningfulness (or not) of comprOffset. + * + * This call is currently only supported in the MODS environment. + * + * memOffset + * This parameter is both an input and an output. As input, this + * parameter holds an offset into the memory surface. The return + * value is the physical address of the surface at the given offset. + * memFormat + * This parameter returns the memory kind of the surface. + * comprOffset + * This parameter returns the compression offset of the surface. + * comprFormat + * This parameter returns the type of compression of the surface. + * gpuCacheAttr + * gpuCacheAttr returns the gpu cache attribute of the surface. + * Legal return values for this field are + * NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN + * NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED + * NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED + * gpuP2PCacheAttr + * gpuP2PCacheAttr returns the gpu peer-to-peer cache attribute of the surface. + * Legal return values for this field are + * NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN + * NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED + * NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED + * mmuContext + * mmuContext returns the requested type of physical address + * Legal return values for this field are + * TEGRA_VASPACE_A -- return the non-GPU device physical address ( the system physical address itself) for Tegra engines. + * returns the system physical address, may change to use a class value in future. + * FERMI_VASPACE_A -- return the GPU device physical address( the system physical address, or the SMMU VA) for Big GPU engines. + * 0 -- return the GPU device physical address( the system physical address, or the SMMU VA) for Big GPU engines. + * use of zero may be deprecated in future. + * contigSegmentSize + * If the underlying surface is physically contiguous, this parameter + * returns the size in bytes of the piece of memory starting from + * the offset specified in the memOffset parameter extending to the last + * byte of the surface. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_BAD_OBJECT_HANDLE + * NVOS_STATUS_BAD_OBJECT_PARENT + * NVOS_STATUS_NOT_SUPPORTED + * + */ +#define NV003E_CTRL_CMD_GET_SURFACE_PHYS_ATTR (0x3e0101) /* finn: Evaluated from "(FINN_NV01_MEMORY_SYSTEM_MEMORY_INTERFACE_ID << 8) | NV003E_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS_MESSAGE_ID" */ + +#define NV003E_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV003E_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS { + NV_DECLARE_ALIGNED(NvU64 memOffset, 8); + NvU32 memFormat; + NvU32 comprOffset; + NvU32 comprFormat; + NvU32 gpuCacheAttr; + NvU32 gpuP2PCacheAttr; + NvU32 mmuContext; + NV_DECLARE_ALIGNED(NvU64 contigSegmentSize, 8); +} NV003E_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS; + +/* valid gpuCacheAttr return values */ +#define NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN (0x00000000) +#define NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED (0x00000001) +#define NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED (0x00000002) + +/* NV003E_CTRL_CMD_GET_SURFACE_NUM_PHYS_PAGES + * + * This command returns the number of physical pages associated with the + * memory object. + * + * This call is currently only implemented on Linux and assumes that linux + * kernel in which RM module will be loaded has same page size as defined + * in linux kernel source with which RM module was built. + * + * numPages + * This parameter returns total number of physical pages associated with + * the memory object. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV003E_CTRL_CMD_GET_SURFACE_NUM_PHYS_PAGES (0x3e0102) /* finn: Evaluated from "(FINN_NV01_MEMORY_SYSTEM_MEMORY_INTERFACE_ID << 8) | NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS { + NvU32 numPages; +} NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS; + + +/* NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES + * + * This command returns physical pages associated with the memory object. + * + * This call is currently only implemented on Linux and assumes that linux + * kernel in which RM module will be loaded has same page size as defined + * in linux kernel source with which RM module was built. + * + * pPages + * This parameter returns physical pages associated with the memory object. + * + * numPages + * This parameter is both an input and an output. As an input parameter, + * it's value indicates maximum number of physical pages to be copied to + * pPages. As an output parameter, it's value indicates number of physical + * pages copied to pPages. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES (0x3e0103) /* finn: Evaluated from "(FINN_NV01_MEMORY_SYSTEM_MEMORY_INTERFACE_ID << 8) | NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pPages, 8); + NvU32 numPages; +} NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS; + +/* _ctrl003e_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h new file mode 100644 index 0000000..6ff491b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h @@ -0,0 +1,472 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0041.finn +// + +#include "nvos.h" +#include "ctrl/ctrlxxxx.h" +/* NV04_MEMORY control commands and parameters */ + +#define NV0041_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0041, NV0041_CTRL_##cat, idx) + +/* NV04_MEMORY command categories (6bits) */ +#define NV0041_CTRL_RESERVED (0x00) +#define NV0041_CTRL_MEMORY (0x01) + +/* + * NV0041_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0041_CTRL_CMD_NULL (0x410000) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR + * + * This command returns attributes associated with the memory object + * at the given offset. The architecture dependent return parameter + * comprFormat determines the meaningfulness (or not) of comprOffset. + * + * This call is only currently supported in the MODS environment. + * + * memOffset + * This parameter is both an input and an output. As input, this + * parameter holds an offset into the memory surface. The return + * value is the physical address of the surface at the given offset. + * memFormat + * This parameter returns the memory kind of the surface. + * comprOffset + * This parameter returns the compression offset of the surface. + * comprFormat + * This parameter returns the type of compression of the surface. + * memAperture + * The aperture of the surface is returned in this field. + * Legal return values for this parameter are + * NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_VIDMEM + * NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_SYSMEM + * gpuCacheAttr + * gpuCacheAttr returns the gpu cache attribute of the surface. + * Legal return values for this field are + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED + * gpuP2PCacheAttr + * gpuP2PCacheAttr returns the gpu peer-to-peer cache attribute of the surface. + * Legal return values for this field are + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED + * mmuContext + * mmuContext indicates the type of physical address to be returned (input parameter). + * Legal return values for this field are + * TEGRA_VASPACE_A -- return the device physical address for Tegra (non-GPU) engines. This is the system physical address itself. + * returns the system physical address. This may change to use a class value in future. + * FERMI_VASPACE_A -- return the device physical address for GPU engines. This can be a system physical address or a GPU SMMU virtual address. + * 0 -- return the device physical address for GPU engines. This can be a system physical address or a GPU SMMU virtual address. + * use of zero may be deprecated in future. + * contigSegmentSize + * If the underlying surface is physically contiguous, this parameter + * returns the size in bytes of the piece of memory starting from + * the offset specified in the memOffset parameter extending to the last + * byte of the surface. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_BAD_OBJECT_HANDLE + * NVOS_STATUS_BAD_OBJECT_PARENT + * NVOS_STATUS_NOT_SUPPORTED + * + */ +#define NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR (0x410103) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS { + NV_DECLARE_ALIGNED(NvU64 memOffset, 8); + NvU32 memFormat; + NvU32 comprOffset; + NvU32 comprFormat; + NvU32 memAperture; + NvU32 gpuCacheAttr; + NvU32 gpuP2PCacheAttr; + NvU32 mmuContext; + NV_DECLARE_ALIGNED(NvU64 contigSegmentSize, 8); +} NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS; + +/* valid memAperture return values */ +#define NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_VIDMEM (0x00000000) +#define NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_SYSMEM (0x00000001) + +/* valid gpuCacheAttr return values */ +#define NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN (0x00000000) +#define NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED (0x00000001) +#define NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED (0x00000002) + +/* + * NV0041_CTRL_CMD_GET_SURFACE_ZCULL_ID + * + * This command returns the Z-cull identifier for a surface. + * The value of ~0 is returned if there is none associated. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_BAD_OBJECT_HANDLE + * NVOS_STATUS_BAD_OBJECT_PARENT + * NVOS_STATUS_NOT_SUPPORTED + * + */ +#define NV0041_CTRL_CMD_GET_SURFACE_ZCULL_ID (0x410104) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_ZCULL_ID_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_ZCULL_ID_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0041_CTRL_GET_SURFACE_ZCULL_ID_PARAMS { + NvU32 zcullId; +} NV0041_CTRL_GET_SURFACE_ZCULL_ID_PARAMS; + +/* + * NV0041_CTRL_CMD_GET_SURFACE_PARTITION_STRIDE + * + * This command returns the partition stride (in bytes) for real memory + * associated with the memory object. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_BAD_OBJECT_HANDLE + * NVOS_STATUS_BAD_OBJECT_PARENT + * NVOS_STATUS_NOT_SUPPORTED + * + */ +#define NV0041_CTRL_CMD_GET_SURFACE_PARTITION_STRIDE (0x410105) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_PARTITION_STRIDE_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_PARTITION_STRIDE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0041_CTRL_GET_SURFACE_PARTITION_STRIDE_PARAMS { + NvU32 partitionStride; +} NV0041_CTRL_GET_SURFACE_PARTITION_STRIDE_PARAMS; + + + +// return values for 'tilingFormat' +// XXX - the names for these are misleading +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_INVALID (0x00000000) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_FB (0x00000001) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_FB_1HIGH (0x00000002) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_FB_4HIGH (0x00000003) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_UMA_1HIGH (0x00000004) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_UMA_4HIGH (0x00000005) + +/* + * NV0041_CTRL_SURFACE_INFO + * + * This structure represents a single 32bit surface value. Clients + * request a particular surface value by specifying a unique surface + * information index. + * + * Legal surface information index values are: + * NV0041_CTRL_SURFACE_INFO_INDEX_ATTRS + * This index is used to request the set of hw attributes associated + * with the surface. Each distinct attribute is represented by a + * single bit flag in the returned value. + * Legal flags values for this index are: + * NV0041_CTRL_SURFACE_INFO_ATTRS_COMPR + * This surface has compression resources bound to it. + * NV0041_CTRL_SURFACE_INFO_ATTRS_ZCULL + * This surface has zcull resources bound to it. + * NV0041_CTRL_SURFACE_INFO_INDEX_COMPR_COVERAGE + * This index is used to request the compression coverage (if any) + * in units of 64K for the associated surface. A value of zero indicates + * there are no compression resources associated with the surface. + * Legal return values range from zero to a maximum number of 64K units + * that is GPU implementation dependent. + * NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE + * This index is used to request the physically allocated size in units + * of 4K(NV0041_CTRL_SURFACE_INFO_PHYS_SIZE_SCALE_FACTOR) for the associated + * surface. + * NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_ATTR + * This index is used to request the surface attribute field. The returned + * field value can be decoded using the NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_* + * DRF-style macros provided below. + * NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE + * This index is used to request the surface address space type. + * Returned values are described by NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE. + */ +typedef struct NV0041_CTRL_SURFACE_INFO { + NvU32 index; + NvU32 data; +} NV0041_CTRL_SURFACE_INFO; + +/* valid surface info index values */ +#define NV0041_CTRL_SURFACE_INFO_INDEX_ATTRS (0x00000001) +#define NV0041_CTRL_SURFACE_INFO_INDEX_COMPR_COVERAGE (0x00000005) +#define NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE (0x00000007) +#define NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_ATTR (0x00000008) +#define NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE (0x00000009) + +/* + * This define indicates the scale factor of the reported physical size to the + * actual size in bytes. We use the scale factor to save space from the + * interface and account for large surfaces. To get the actual size, + * use `(NvU64)reported_size * NV0041_CTRL_SURFACE_INFO_PHYS_SIZE_SCALE_FACTOR`. + */ +#define NV0041_CTRL_SURFACE_INFO_PHYS_SIZE_SCALE_FACTOR (0x1000) + +/* valid surface info attr flags */ +#define NV0041_CTRL_SURFACE_INFO_ATTRS_COMPR (0x00000002) +#define NV0041_CTRL_SURFACE_INFO_ATTRS_ZCULL (0x00000004) + +/* Valid surface info page size */ +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE NVOS32_ATTR_PAGE_SIZE +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE_DEFAULT NVOS32_ATTR_PAGE_SIZE_DEFAULT +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE_4KB NVOS32_ATTR_PAGE_SIZE_4KB +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE_BIG NVOS32_ATTR_PAGE_SIZE_BIG +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE_HUGE NVOS32_ATTR_PAGE_SIZE_HUGE + +/* Valid surface info CPU coherency */ +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY NVOS32_ATTR_COHERENCY +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_UNCACHED NVOS32_ATTR_COHERENCY_UNCACHED +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_CACHED NVOS32_ATTR_COHERENCY_CACHED +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_WRITE_COMBINE NVOS32_ATTR_COHERENCY_WRITE_COMBINE +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_WRITE_THROUGH NVOS32_ATTR_COHERENCY_WRITE_THROUGH +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_WRITE_PROTECT NVOS32_ATTR_COHERENCY_WRITE_PROTECT +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_WRITE_BACK NVOS32_ATTR_COHERENCY_WRITE_BACK + +/* + * NV0041_CTRL_CMD_GET_SURFACE_INFO + * + * This command returns surface information for the associated memory object. + * Requests to retrieve surface information use a list of one or more + * NV0041_CTRL_SURFACE_INFO structures. + * + * surfaceInfoListSize + * This field specifies the number of entries on the caller's + * surfaceInfoList. + * surfaceInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the surface information is to be returned. + * This buffer must be at least as big as surfaceInfoListSize multiplied + * by the size of the NV0041_CTRL_SURFACE_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0041_CTRL_CMD_GET_SURFACE_INFO (0x410110) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_INFO_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0041_CTRL_GET_SURFACE_INFO_PARAMS { + NvU32 surfaceInfoListSize; + NV_DECLARE_ALIGNED(NvP64 surfaceInfoList, 8); +} NV0041_CTRL_GET_SURFACE_INFO_PARAMS; + +/* + * NV0041_CTRL_CMD_GET_SURFACE_COMPRESSION_COVERAGE + * + * This command returns the percentage of surface compression tag coverage. + * The value of 0 is returned if there are no tags associated. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_BAD_OBJECT_HANDLE + * NVOS_STATUS_BAD_OBJECT_PARENT + * NVOS_STATUS_NOT_SUPPORTED + * + */ +#define NV0041_CTRL_CMD_GET_SURFACE_COMPRESSION_COVERAGE (0x410112) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS { + NvHandle hSubDevice; /* if non zero subDevice handle of local GPU */ + NvU32 lineMin; + NvU32 lineMax; + NvU32 format; +} NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS; + +/* + * NV0041_CTRL_CMD_GET_FBMEM_BUS_ADDR + * + * This command returns the BAR1 physical address of a + * Memory mapping made using NvRmMapMemory() + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_INVALID_DATA + * NV_ERR_INVALID_CLIENT + * NV_ERR_INVALID_OBJECT_HANDLE + * + */ +#define NV0041_CTRL_CMD_GET_FBMEM_BUS_ADDR (0x410114) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_FBMEM_BUS_ADDR_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_FBMEM_BUS_ADDR_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV0041_CTRL_GET_FBMEM_BUS_ADDR_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pLinearAddress, 8); /* [in] Linear address of CPU mapping */ + NV_DECLARE_ALIGNED(NvU64 busAddress, 8); /* [out] BAR1 address */ +} NV0041_CTRL_GET_FBMEM_BUS_ADDR_PARAMS; + +/* + * NV0041_CTRL_CMD_SURFACE_FLUSH_GPU_CACHE + * + * This command flushes a cache on the GPU which all memory accesses go + * through. The types of flushes supported by this API may not be supported by + * all hardware. Attempting an unsupported flush type will result in an error. + * + * flags + * Contains flags to control various aspects of the flush. Valid values + * are defined in NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS*. Not all + * flags are valid for all GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NVOS_STATUS_INVALID_ARGUMENT + * NVOS_STATUS_INVALID_STATE + * + * See Also: + * NV0080_CTRL_CMD_DMA_FLUSH + * Performs flush operations in broadcast for the GPU cache and other hardware + * engines. Use this call if you want to flush all GPU caches in a + * broadcast device. + * NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE + * Flushes the entire GPU cache or a set of physical addresses (if the + * hardware supports it). Use this call if you want to flush a set of + * addresses or the entire GPU cache in unicast mode. + * + */ +#define NV0041_CTRL_CMD_SURFACE_FLUSH_GPU_CACHE (0x410116) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS { + NvU32 flags; +} NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS; + +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK 0:0 +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK_NO (0x00000000) +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK_YES (0x00000001) +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_INVALIDATE 1:1 +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_INVALIDATE_NO (0x00000000) +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_INVALIDATE_YES (0x00000001) + +/* + * NV0041_CTRL_CMD_GET_EME_PAGE_SIZE + * + * This command may be used to get the memory page size + * + * Parameters: + * pageSize [OUT] + * pageSize with associated memory descriptor + * + * Possible status values are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_NOT_SUPPORTED + */ +#define NV0041_CTRL_CMD_GET_MEM_PAGE_SIZE (0x410118) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS { + NvU32 pageSize; /* [out] - page size */ +} NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS; + +/* + * NV0041_CTRL_CMD_UPDATE_SURFACE_COMPRESSION + * + * Acquire/release compression for surface + * + * Parameters: + * bRelease [IN] + * true = release compression; false = acquire compression + */ +#define NV0041_CTRL_CMD_UPDATE_SURFACE_COMPRESSION (0x410119) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS { + NvBool bRelease; /* [in] - acquire/release setting */ +} NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS; + +#define NV0041_CTRL_CMD_PRINT_LABELS_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV0041_CTRL_CMD_PRINT_LABELS_PARAMS { + NvU32 tag; /* [in] */ +} NV0041_CTRL_CMD_PRINT_LABELS_PARAMS; +#define NV0041_CTRL_CMD_SET_LABEL_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV0041_CTRL_CMD_SET_LABEL_PARAMS { + NvU32 tag; /* [in] */ +} NV0041_CTRL_CMD_SET_LABEL_PARAMS; +#define NV0041_CTRL_CMD_SET_LABEL (0x410151) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_CMD_SET_LABEL_PARAMS_MESSAGE_ID" */ +#define NV0041_CTRL_CMD_GET_LABEL (0x410152) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_CMD_GET_LABEL_PARAMS_MESSAGE_ID" */ +#define NV0041_CTRL_CMD_GET_LABEL_PARAMS_MESSAGE_ID (0x52U) + +typedef struct NV0041_CTRL_CMD_GET_LABEL_PARAMS { + NvU32 tag; /* [in] */ +} NV0041_CTRL_CMD_GET_LABEL_PARAMS; + +/* + * NV0041_CTRL_CMD_SET_TAG + * + * This command sets memory allocation tag used for debugging. +* Every client has it's own memory allocation tag and tag is copying when object is duping. + * This control can be used for shared allocations to change it's tag. + */ +#define NV0041_CTRL_CMD_SET_TAG (0x410120) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_CMD_SET_TAG_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_CMD_SET_TAG_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV0041_CTRL_CMD_SET_TAG_PARAMS { + NvU32 tag; /* [in] */ +} NV0041_CTRL_CMD_SET_TAG_PARAMS; + +/* + * NV0041_CTRL_CMD_GET_TAG + * + * This command returns memory allocation tag used for debugging. + */ +#define NV0041_CTRL_CMD_GET_TAG (0x410121) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_CMD_GET_TAG_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_CMD_GET_TAG_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV0041_CTRL_CMD_GET_TAG_PARAMS { + NvU32 tag; /* [out] */ +} NV0041_CTRL_CMD_GET_TAG_PARAMS; + +/* _ctrl0041_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h new file mode 100644 index 0000000..fa7d8f7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015,2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* category-specific includes */ +#include "ctrl0073/ctrl0073system.h" +#include "ctrl0073/ctrl0073specific.h" +#include "ctrl0073/ctrl0073stereo.h" +#include "ctrl0073/ctrl0073event.h" +#include "ctrl0073/ctrl0073internal.h" +#include "ctrl0073/ctrl0073dfp.h" +#include "ctrl0073/ctrl0073dp.h" +#include "ctrl0073/ctrl0073svp.h" +#include "ctrl0073/ctrl0073dpu.h" +#include "ctrl0073/ctrl0073psr.h" diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h new file mode 100644 index 0000000..970ea3b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV04_DISPLAY_COMMON control commands and parameters */ + +#define NV0073_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0073, NV0073_CTRL_##cat, idx) + +/* NV04_DISPLAY_COMMON command categories (6bits) */ +#define NV0073_CTRL_RESERVED (0x00U) +#define NV0073_CTRL_SYSTEM (0x01U) +#define NV0073_CTRL_SPECIFIC (0x02U) +#define NV0073_CTRL_EVENT (0x03U) +#define NV0073_CTRL_INTERNAL (0x04U) +#define NV0073_CTRL_DFP (0x11U) +#define NV0073_CTRL_DP (0x13U) +#define NV0073_CTRL_SVP (0x14U) +#define NV0073_CTRL_DPU (0x15U) +#define NV0073_CTRL_PSR (0x16U) +#define NV0073_CTRL_STEREO (0x17U) + +/* + * NV0073_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0073_CTRL_CMD_NULL (0x730000U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl0073base_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h new file mode 100644 index 0000000..b3b3eb2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h @@ -0,0 +1,1261 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073dfp.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" + +/* NV04_DISPLAY_COMMON dfp-display-specific control commands and parameters */ + +/* + * NV0073_CTRL_CMD_DFP_GET_INFO + * + * This command can be used to determine the associated display type for + * the specified displayId. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must be a dfp display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * flags + * This parameter returns the information specific to this dfp. Here are + * the possible fields: + * NV0073_CTRL_DFP_FLAGS_SIGNAL + * This specifies the type of signal used for this dfp. + * NV0073_CTRL_DFP_FLAGS_LANES + * This specifies whether the board supports 1, 2, or 4 lanes + * for DISPLAYPORT signals. + * NV0073_CTRL_DFP_FLAGS_LIMIT + * Some GPUs were not qualified to run internal TMDS except at 60 HZ + * refresh rates. So, if LIMIT_60HZ_RR is set, then the client must + * make sure to only allow 60 HZ refresh rate modes to the OS/User. + * NV0073_CTRL_DFP_FLAGS_SLI_SCALER + * While running in SLI, if SLI_SCALER_DISABLE is set, the GPU cannot + * scale any resolutions. So, the output timing must match the + * memory footprint. + * NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE + * This specifies whether the DFP displayId is capable of + * transmitting HDMI. + * NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE + * This specifies whether the displayId is capable of sending a + * limited color range out from the board. + * NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE + * This specifies whether the displayId is capable of auto-configuring + * the color range. + * NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE + * This specifies whether the displayId is capable of sending the + * YCBCR422 color format out from the board. + * NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE + * This specifies whether the displayId is capable of sending + * YCBCR444 color format out from the board. + * NV0073_CTRL_DFP_FLAGS_DP_LINK_BANDWIDTH + * This specifies whether the displayId is capable of doing high + * bit-rate (2.7Gbps) or low bit-rate (1.62Gbps) if the DFP is + * display port. + * NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED + * This specifies whether the DFP displayId is allowed to transmit HDMI + * based on the VBIOS settings. + * NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT + * This specifies whether the DFP displayId is actually an embedded display + * port based on VBIOS connector information AND ASSR cap. + * NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT + * This specifies whether the DFP displayId must be trained to RBR mode + * (if it is using DP protocol) whenever possible. + * NV0073_CTRL_DFP_FLAGS_LINK + * This specifies whether the board supports single or dual links + * for TMDS, LVDS, and SDI signals. + * NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED + * This specifies if PostCursor2 is disabled in the VBIOS + * NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID + * This indicates whether this SOR uses DSI-A, DSI-B or both (ganged mode). + * NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE + * This indicates whether this DFP supports Dynamic MUX + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID (0x40U) + +typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; +} NV0073_CTRL_DFP_GET_INFO_PARAMS; + +/* valid display types */ +#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0 +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U) +#define NV0073_CTRL_DFP_FLAGS_LANE 5:3 +#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6 +#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7 +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8 +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9 +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10 +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14 +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15 +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LINK 21:20 +#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22 +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23 +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25 +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U) + + + +/* + * NV0073_CTRL_CMD_DFP_GET_DP2TMDS_DONGLE_INFO + * + * This command can be used to determine information about dongles attached + * to a displayport connection. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the dfp display which owns the + * panel power to adjust. The display ID must be a dfp display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * flags + * This parameter provide state information about the dongle attachments. + * NV0073_CTRL_DFP_GET_DP2TMDS_DONGLE_INFO_FLAGS_CAPABLE + * Specifies if the connection is capable of a dongle. This field + * returns false in all cases of signal types except for those capable + * of outputting TMDS. Even then the if a gpio is not defined, the + * the a statement of false will also be returned. + * NV0073_CTRL_DFP_GET_DP2TMDS_DONGLE_INFO_FLAGS_ATTACHED + * When attached, this value specifies that a dongle is detected and + * attached. The client should read the _TYPE field to determine + * if it is a dp2hdmi or dp2dvi dongle. + * NV0073_CTRL_DFP_GET_DP2TMDS_DONGLE_INFO_FLAGS_TYPE + * _DP2DVI: no response to i2cAddr 0x80 per DP interop guidelines. + * clients MUST avoid outputting HDMI even if capable. + * _DP2HDMI: dongle responds to i2cAddr 0x80 per DP interop guidelines. + * client is allowed to output HDMI when possible. + * _LFH_DVI: DMS59-DVI breakout dongle is in use. + * _LFH_VGA: DMS59-VGA breakout dongle is in use. + * NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE_TYPE + * _1: Max TMDS Clock rate is 165 MHz for both DVI and HDMI. + * _2: Max TMDS Clock rate will be specified in the dongle + * address space at device address 0x80. + * DVI is up to 165 MHz + * HDMI is up to 300 MHz + * There are type 2 devices that support beyond 600 MHz + * though not defined in the spec. + * maxTmdsClkRateHz + * This defines the max TMDS clock rate for dual mode adaptor in Hz. + */ +#define NV0073_CTRL_CMD_DFP_GET_DISPLAYPORT_DONGLE_INFO (0x731142U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 maxTmdsClkRateHz; +} NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS; + +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_CAPABLE 0:0 +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_ATTACHED 1:1 +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_ATTACHED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_ATTACHED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE 7:4 +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE_DP2DVI (0x00000000U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE_DP2HDMI (0x00000001U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE_LFH_DVI (0x00000002U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE_LFH_VGA (0x00000003U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE_TYPE 8:8 +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE_TYPE_1 (0x00000000U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE_TYPE_2 (0x00000001U) + + + +/* + * NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS + * + * This command is used to inform hardware the receiver's audio capabilities + * using the new EDID Like Data (ELD) memory structure. The ELD memory + * structure is read by the audio driver by issuing the ELD Data command verb. + * This mechanism is used for passing sink device' audio EDID information + * from graphics software to audio software. ELD contents will contain a + * subset of the sink device's EDID information. + * The client should inform hardware at initial boot, a modeset, and whenever + * a hotplug event occurs. + * + * displayId + * This parameter indicates the digital display device's + * mask. This comes as input to this command. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * numELDSize + * This parameter specifies how many bytes of data RM should write to the + * ELD buffer. Section 7.3.3.36 of the ECN specifies that the ELD buffer + * size of zero based. HDAudio driver will then use this information to + * determine how many bytes of the ELD buffer the HDAudio should read. + * The maximum size of the buffer is 96 bytes. + * bufferELD + * This buffer contains data as defined in the ECR HDMI ELD memory structure. + * Refer to the ELD Memory Structure Specification for more details. + * The format should be: + * - Header block is fixed at 4 bytes + * The header block contains the ELD version and baseline ELD len as + * well as some reserved fields. + * - Baseline block for audio descriptors is 76 bytes + * (15 SAD; each SAD=3 bytes requiring 45 bytes with 31 bytes to spare) + * As well as some other bits used to denote the CEA version, + * the speaker allocation data, monitor name, connector type, and + * hdcp capabilities. + * - Vendor specific block of 16 bytes + * maxFreqSupported + * Supply the maximum frequency supported for the overall audio caps. + * This value should match CEA861-X defines for sample freq. + * ctrl: + * NV0073_CTRL_DFP_SET_ELD_AUDIO_CAPS_CTRL_PD: + * Specifies the presence detect of the receiver. On a hotplug + * or modeset client should set this bit to TRUE. + * NV0073_CTRL_DFP_SET_ELD_AUDIO_CAPS_CTRL_ELDV: + * Specifies whether the ELD buffer contents are valid. + * An intrinsic unsolicited response (UR) is generated whenever + * the ELDV bit changes in value and the PD=1. When _PD=1(hotplug), + * RM will set the ELDV bit after ELD buffer contents are written. + * If _ELDV bit is set to false such as during a unplug, then the + * contents of the ELD buffer will be cleared. + * deviceEntry: + * The deviceEntry number from which the SF should accept packets. + * _NONE if disabling audio. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U + +#define NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID (0x44U) + +typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 numELDSize; + NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER]; + NvU32 maxFreqSupported; + NvU32 ctrl; + NvU32 deviceEntry; +} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS; + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0320KHZ (0x00000001U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0441KHZ (0x00000002U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0480KHZ (0x00000003U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0882KHZ (0x00000004U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0960KHZ (0x00000005U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_1764KHZ (0x00000006U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_1920KHZ (0x00000007U) + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0 +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1 +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U) + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_0 (0x00000000U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_1 (0x00000001U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_2 (0x00000002U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_3 (0x00000003U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_NONE (0x00000007U) + + + +/* + * NV0073_CTRL_CMD_DFP_GET_SPREAD_SPECTRUM_STATUS + * + * This command is used to get spread spectrum status for a display device. + * + * displayId + * Display ID for which the spread spectrum status is needed. + * checkRMSsState + * Default is to check in Vbios. This flag lets this control call to check in register. + * status + * Return status value. + */ + +#define NV0073_CTRL_CMD_DFP_GET_SPREAD_SPECTRUM (0x73114cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS_MESSAGE_ID (0x4CU) + +typedef struct NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS { + NvU32 displayId; + NvBool enabled; +} NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS; + +/* + * NV0073_CTRL_CMD_DFP_UPDATE_DYNAMIC_DFP_CACHE + * + * Update the Dynamic DFP with Bcaps read from remote display. + * Also updates hdcpFlags, gpu hdcp capable flags in DFP. + * If bResetDfp is true, all the flags are reset before making changes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_UPDATE_DYNAMIC_DFP_CACHE (0x73114eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS_MESSAGE_ID (0x4EU) + +typedef struct NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS { + NvU32 subDeviceInstance; + NvU32 headIndex; + NvU8 bcaps; + NvU8 bksv[5]; + NvU32 hdcpFlags; + NvBool bHdcpCapable; + NvBool bResetDfp; + NvU8 updateMask; +} NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS; + +#define NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BCAPS 0x01U +#define NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BKSV 0x02U +#define NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_FLAGS 0x03U + +/* + * NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE + * + * This command sets the audio enable state of the DFP. When disabled, + * no audio stream packets or audio timestamp packets will be sent. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * audio should be enabled or disabled. The display ID must be a dfp display. + * If the displayId is not a dfp, this call will return + * NV_ERR_INVALID_ARGUMENT. + * enable + * This parameter specifies whether to enable (NV_TRUE) or disable (NV_FALSE) + * audio to the display. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool enable; +} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS; + + + +/* + * NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG + * + * This enum defines default/primary/secondary sor sublinks to be configured. + * These access modes are: + * + * NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_NONE + * Default link config + * NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_PRIMARY_SOR_LINK + * Primary sor sublink to be configured + * NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_SECONDARY_SOR_LINK + * Secondary sor sublink to be configured + */ +typedef enum NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG { + NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_NONE = 0, + NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_PRIMARY_SOR_LINK = 1, + NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_SECONDARY_SOR_LINK = 2, +} NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG; + +/* + * NV0073_CTRL_DFP_ASSIGN_SOR_INFO + * + * This structure describes info about assigned SOR + * + * displayMask + * The displayMask for the SOR corresponding to its HW routings + * sorType + * This parameter specifies the SOR type + * Here are the current defined fields: + * NV0073_CTRL_DFP_SOR_TYPE_NONE + * Unallocated SOR + * NV0073_CTRL_DFP_SOR_TYPE_2H1OR_PRIMARY + * Primary SOR for 2H1OR stream + * NV0073_CTRL_DFP_SOR_TYPE_2H1OR_SECONDARY + * Secondary SOR for 2H1OR stream + * NV0073_CTRL_DFP_SOR_TYPE_SINGLE + * Default Single SOR + * Note - sorType should only be referred to identify 2H1OR Primary and Secondary SOR + * + */ + +typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO { + NvU32 displayMask; + NvU32 sorType; +} NV0073_CTRL_DFP_ASSIGN_SOR_INFO; + +#define NV0073_CTRL_DFP_SOR_TYPE_NONE (0x00000000U) +#define NV0073_CTRL_DFP_SOR_TYPE_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_SOR_TYPE_2H1OR_PRIMARY (0x00000002U) +#define NV0073_CTRL_DFP_SOR_TYPE_2H1OR_SECONDARY (0x00000003U) + +/* + * NV0073_CTRL_CMD_DFP_ASSIGN_SOR + * + * This command is used by the clients to assign SOR to DFP for CROSS-BAR + * when the default SOR-DFP routing that comes from vbios is not considered. + * SOR shall be assigned to a DFP at the runtime. This call should be called + * before a modeset is done on any dfp display and also before LinkTraining for DP displays. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * DisplayId of the primary display for which SOR is to be assigned. However, if + * displayId is 0 then RM shall return the XBAR config it has stored in it's structures. + * sorExcludeMask + * sorMask of the SORs which should not be used for assignment. If this is 0, + * then SW is free to allocate any available SOR. + * slaveDisplayId + * displayId of the slave device in case of dualSST mode. This ctrl call will + * allocate SORs to both slave and the master if slaveDisplayId is set. + * forceSublinkConfig + * forces RM to configure primary or secondary sor sublink on the given diaplayId. + * If not set, then RM will do the default configurations. + * bIs2Head1Or + * Specifies that SOR allocation is required for 2 head 1 OR. This will allocate + * 2 SOR for same displayId - one Master and one Slave. Slave SOR would be disconnected + * from any padlink and get feedback clock from Master SOR's padlink. + * sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS] + * returns the displayMask for all SORs corresponding to their HW routings. + * sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS] + * returns the displayMask for all SORs corresponding to their HW routings along with + * SOR type to identify 2H1OR Primary and Secondary SORs. SOR type would be identified by + * NV0073_CTRL_DFP_SOR_TYPE. sorAssignList would look as below - + * sorAssignListWithTag[] = { DisplayMask, SOR Type + * {0x100, SECONDARY_SOR} + * {0x200, SINGLE_SOR} + * {0x100, PRIMARY_SOR} + * {0, NONE}} + * } + * Here, for display id = 0x100, SOR2 is Primary and SOR0 is Secondary. + * Note - sorAssignList parameter would be removed after Bug 200749158 is resolved + * reservedSorMask + * returns the sorMask reserved for the internal panels. + * flags + * Other detail settings. + * _AUDIO_OPTIMAL: Client requests trying to get audio SOR if possible. + * If there's no audio capable SOR and OD is HDMI/DP, + * RM will fail the control call. + * _AUDIO_DEFAULT: RM does not check audio-capability of SOR. + * + * _ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES : RM returns Active SOR which is not Audio capable. + * _ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO : RM is not returning 'Active non-audio capable SOR'. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + + +#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U + +#define NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID (0x52U) + +typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 sorExcludeMask; + NvU32 slaveDisplayId; + NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig; + NvBool bIs2Head1Or; + NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]; + NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]; + NvU8 reservedSorMask; + NvU32 flags; +} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS; + +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0 +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1 +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U) + +/* +* NV0073_CTRL_CMD_DFP_GET_PADLINK_MASK +* +* This command will only be used by chipTB tests to get the padlinks corresponding +* to the given displayId. RM gets this information from vbios. This control call is +* only for verif purpose. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior. +* displayId +* DisplayId of the display for which the client needs analog link Mask +* padlinkMask +* analogLinkMask for the given displayId. This value returned is 0xffffffff if +* the given displayId is invalid else RM returns the corresponding padlinkMask. +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_NOT_SUPPORTED +*/ + + +#define NV0073_CTRL_CMD_DFP_GET_PADLINK_MASK (0x731153U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 padlinkMask; +} NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS; + +/* + * NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE + * This enum defines the functions that are supported for which a + * corresponding GPIO pin number could be retrieved + * Values copied from objgpio.h GPIO_FUNC_TYPE_LCD_*. Please keep the + * values in sync between the 2 files + */ + +typedef enum NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE { + // GPIO types of LCD GPIO functions common to all internal panels + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_BACKLIGHT = 268435456, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_POWER = 285212672, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_POWER_OK = 301989888, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_SELF_TEST = 318767104, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_LAMP_STATUS = 335544320, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_BRIGHTNESS = 352321536, +} NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE; + +/* + * NV0073_CTRL_CMD_DFP_GET_LCD_GPIO_PIN_NUM + * + * This command can be used to get the GPIO pin number that corresponds to one + * of the LCD functions + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NVOS_STATUS_ERROR_INVALID_ARGUMENT. + * funcType + * The LDC function for which the GPIO pin number is needed + * lcdGpioPinNum + * The GPIO pin number that corresponds to the LCD function. + * +*/ +#define NV0073_CTRL_CMD_DFP_GET_LCD_GPIO_PIN_NUM (0x731154U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS_MESSAGE_ID (0x54U) + +typedef struct NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE funcType; + NvU32 lcdGpioPinNum; +} NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DFP_CONFIG_TWO_HEAD_ONE_OR + * + * This command is used for configuration of 2 head 1 OR. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * Display Id of the panel for which Two Head One OR is going to be used + * bEnable + * Enable/Disable 2 Head 1 OR + * masterSorIdx + * Master SOR Index which will send pixels to panel + * slaveSorIdx + * Slave SOR Index which will take feedback clock from Master SOR's + * padlink + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_NOT_SUPPORTED + */ + + +#define NV0073_CTRL_CMD_DFP_CONFIG_TWO_HEAD_ONE_OR (0x731156U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bEnable; + NvU32 masterSorIdx; + NvU32 slaveSorIdx; +} NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS; + +/* + * NV0073_CTRL_CMD_DFP_DSC_CRC_CONTROL + * + * This command is used to enable/disable CRC on the GPU or query the registers + * related to it + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * headIndex + * index of the head + * cmd + * specifying if setup or querying is done + * bEnable + * enable or disable CRC on the GPU + * gpuCrc0 + * 0-indexed CRC register of the GPU + * gpuCrc1 + * 1-indexed CRC register of the GPU + * gpuCrc0 + * 2-indexed CRC register of the GPU + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_NOT_SUPPORTED + */ + + +#define NV0073_CTRL_CMD_DFP_DSC_CRC_CONTROL (0x731157U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS_MESSAGE_ID (0x57U) + +typedef struct NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS { + NvU32 subDeviceInstance; + NvU32 headIndex; + NvU32 cmd; + NvBool bEnable; + NvU16 gpuCrc0; + NvU16 gpuCrc1; + NvU16 gpuCrc2; +} NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS; + +#define NV0073_CTRL_DP_CRC_CONTROL_CMD 0:0 +#define NV0073_CTRL_DP_CRC_CONTROL_CMD_SETUP (0x00000000U) +#define NV0073_CTRL_DP_CRC_CONTROL_CMD_QUERY (0x00000001U) + +/* + * NV0073_CTRL_CMD_DFP_INIT_MUX_DATA + * + * This control call is used to configure the display MUX related data + * for the given display device. Clients to RM are expected to call this + * control call to initialize the data related to MUX before any MUX related + * operations such mux switch or PSR entry/ exit are performed. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the mux state has to be initialized + * manfId (in) + * Specifies the manufacturer ID of panel obtained from the EDID. This + * parameter is expected to be non-zero only in case of internal panel. + * productId (in) + * Specifies the product ID of panel obtained from the EDID. This + * parameter is expected to be non-zero only in case of internal panel. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_INIT_MUX_DATA (0x731158U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS_MESSAGE_ID (0x58U) + +typedef struct NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU16 manfId; + NvU16 productId; +} NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX + * + * This command is used to switch the dynamic display mux between + * integrated GPU and discrete GPU. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the display MUX has to be switched + * flags (in) + * Flags indicating the action to be performed. Here are the possible + * valid values- + * NV0073_CTRL_DFP_DISP_MUX_SWITCH_IGPU_TO_DGPU + * When set mux is switched from integrated to discrete GPU. + * NV0073_CTRL_DFP_DISP_MUX_SWITCH_DGPU_TO_IGPU + * When set mux is switched from discrete to integrated GPU. + * NV0073_CTRL_DFP_DISP_MUX_SWITCH_SKIP_SIDEBAND_ACCESS + * Set to true for PSR panels as we skip sideband access. + * auxSettleDelay (in) + * Time, in milliseconds, necessary for AUX channel to settle and become + * accessible after a mux switch. Set to zero to use the default delay. + * muxSwitchLatencyMs (out) + * mux switch latency stats in milli-seconds + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX (0x731160U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 auxSettleDelay; + NvU32 muxSwitchLatencyMs; +} NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS; + +/* valid flags*/ +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH 0:0 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_IGPU_TO_DGPU 0x00000000 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_DGPU_TO_IGPU 0x00000001 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_SKIP_SIDEBAND_ACCESS 1:1 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_SKIP_SIDEBAND_ACCESS_YES 0x00000001 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_SKIP_SIDEBAND_ACCESS_NO 0x00000000 + +/* + * NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS + * + * This command is used to perform all the operations that need to be + * performed before a mux switch is started. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the pre mux switch operations have + * to be performed. + * flags (in) + * Flags indicating the action to be performed. Here are the possible + * valid values - + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU + * Indicates a switch from i to d is initiated + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU + * Indicates a switch from d to i is initiated + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP_NO + * When set RM will execute the PSR enter sequence. By default RM will + * not skip SR enter sequence + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP_YES + * When set RM will skip the PSR enter sequence + * iGpuBrightness (in) + * iGPU brightness value (scale 0~100) before switching mux from I2D. + * This is used to match brightness after switching mux to dGPU + * preOpsLatencyMs (out) + * premux switch operations latency stats in milli-seconds. This includes - + * - disabling SOR sequencer and enable BL GPIO control + * - toggling LCD VDD, BL EN and PWM MUX GPIOs + * - PSR entry, if not skipped + * psrEntryLatencyMs (out) + * psr entry latency stats in milli-seconds + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS (0x731161U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS_MESSAGE_ID (0x61U) + +typedef struct NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 iGpuBrightness; + NvU32 preOpsLatencyMs; + NvU32 psrEntryLatencyMs; +} NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS; + +/* valid flags*/ +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE 0:0 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU 0x00000001U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP 1:1 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP_NO 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP_YES 0x00000001U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING 2:2 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING_KNOWN 0x00000000 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING_UNKNOWN 0x00000001 + +#define NV0073_CTRL_DISP_MUX_BACKLIGHT_BRIGHTNESS_MIN 0U +#define NV0073_CTRL_DISP_MUX_BACKLIGHT_BRIGHTNESS_MAX 100U + +/* + * NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS + * + * This command is used to perform all the operations that need to be + * performed after a successful mux switch is completed. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the post mux switch operations have + * to be performed. + * flags (in) + * Flags indicating the action to be performed. Here are the possible + * valid values - + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU + * Indicates a switch from i to d is initiated + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU + * Indicates a switch from d to i is initiated + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP_NO + * When set RM will execute the PSR exit sequence. By default RM will + * not skip SR exit sequence + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP_YES + * When set RM will skip the PSR exit sequence + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING_KNOWN + * Indicates mux switches where we know when igpu powers up + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING_UNKNOWN + * Indicates mux switches where we don't know when igpu powers up + * postOpsLatencyMs (out) + * postmux switch operations latency stats in milli-seconds. This includes - + * - restoring SOR sequencer and BL GPIO control + * - toggling LCD VDD, BL EN and PWM MUX GPIOs + * - PSR exit, if not skipped + * psrExitLatencyMs (out) + * psr exit latency stats in milli-seconds + * psrExitTransitionToInactiveLatencyMs (out) + * psr exit latency stats in milli-seconds, from state 2 (SR active) to state 4 (transition to inactive) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_TIMEOUT in case of SR exit failure + */ + +#define NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS (0x731162U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS_MESSAGE_ID (0x62U) + +typedef struct NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 postOpsLatencyMs; + NvU32 psrExitLatencyMs; + NvU32 psrExitTransitionToInactiveLatencyMs; +} NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS; + +/* valid flags*/ +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE 0:0 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP 1:1 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP_NO 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP_YES 0x00000001U + +/* + * NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS + * + * This command is used to query the display mux status for the given + * display device + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the post mux switch operations have + * to be performed. + * muxStatus (out) + * status indicating the current state of the mux. + * valid values - + * NV0073_CTRL_DFP_DISP_MUX_STATE_INTEGRATED_GPU + * Indicates that the MUX is currently switched to integrated GPU. + * NV0073_CTRL_DFP_DISP_MUX_STATE_DISCRETE_GPU + * Indicates that the MUX is currently switched to discrete GPU. + * NV0073_CTRL_DFP_DISP_MUX_MODE_DISCRETE_ONLY + * Indicates that the MUX mode is set to discrete mode, where all displays + * are driven by discrete GPU. + * NV0073_CTRL_DFP_DISP_MUX_MODE_INTEGRATED_ONLY + * Indicates that the MUX mode is set to integrated mode, where all + * displays are driven by Integrated GPU. + * NV0073_CTRL_DFP_DISP_MUX_MODE_HYBRID + * Indicates that the MUX mode is set to hybrid, where internal panel is + * driven by integrated GPU, while external displays might be driven by + * discrete GPU. + * NV0073_CTRL_DFP_DISP_MUX_MODE_DYNAMIC + * Indicates that the MUX mode is dynamic. It is only in this mode, the + * display MUX can be toggled between discrete and hybrid dynamically. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS (0x731163U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS_MESSAGE_ID (0x63U) + +typedef struct NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 muxStatus; +} NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS; + +/* valid flags */ +#define NV0073_CTRL_DFP_DISP_MUX_STATE 1:0 +#define NV0073_CTRL_DFP_DISP_MUX_STATE_INVALID 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_STATE_INTEGRATED_GPU 0x00000001U +#define NV0073_CTRL_DFP_DISP_MUX_STATE_DISCRETE_GPU 0x00000002U +#define NV0073_CTRL_DFP_DISP_MUX_MODE 4:2 +#define NV0073_CTRL_DFP_DISP_MUX_MODE_INVALID 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_MODE_INTEGRATED_ONLY 0x00000001U +#define NV0073_CTRL_DFP_DISP_MUX_MODE_DISCRETE_ONLY 0x00000002U +#define NV0073_CTRL_DFP_DISP_MUX_MODE_HYBRID 0x00000003U +#define NV0073_CTRL_DFP_DISP_MUX_MODE_DYNAMIC 0x00000004U + + + +/* +* NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING +* +* This command can be used to get DSI mode timing parameters. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior. +* displayId +* This parameter specifies the ID of the display on which the DSI +* info will be set. The display ID must be a DSI-capable display. +* hActive +* This parameter specifies the horizontal length of the active pixel +* data in the raster. +* vActive +* This parameter specifies the vertical lines of the active pixel +* data in the raster. +* hFrontPorch +* This parameter specifies the number of horizontal front porch +* blanking pixels in the raster. +* vFrontPorch +* This parameter specifies the numer of lines of the vertical front +* porch in the raster. +* hBackPorch +* This parameter specifies the number of horizontal back porch +* blanking pixels in the raster. +* vBackPorch +* This parameter specifies the numer of lines of the vertical back +* porch in the raster. +* hSyncWidth +* This parameter specifies the number of horizontal sync pixels in +* the raster. +* vSyncWidth +* This parameter specifies the numer of lines of the vertical sync +* in the raster. +* bpp +* This parameter specifies the depth (Bits per Pixel) of the output +* display stream. +* refresh +* This parameter specifies the refresh rate of the panel (in Hz). +* pclkHz +* This parameter specifies the pixel clock rate in Hz. +* numLanes +* Number of DSI data lanes. +* dscEnable +* Flag to indicate if DSC an be enabled, which in turn indicates if +* panel supports DSC. +* dscBpp +* DSC Bits per pixel +* dscNumSlices +* Number of slices for DSC. +* dscDuaDsc +* Flag to indicate if panel supports DSC streams from two DSI +* controllers. +* dscSliceHeight +* Height of DSC slices. +* dscBlockPrediction +* Flag to indicate if DSC Block Prediction needs to be enabled. +* dscDecoderVersionMajor +* Major version number of DSC decoder on Panel. +* dscDecoderVersionMinor +* Minor version number of DSC decoder on Panel. +* dscEncoderCaps +* Capabilities of DSC encoder in SoC. +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_NOT_SUPPORTED +*/ + +#define NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING (0x731166U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS_MESSAGE_ID (0x66U) + +typedef struct NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 hActive; + NvU32 vActive; + NvU32 hFrontPorch; + NvU32 vFrontPorch; + NvU32 hBackPorch; + NvU32 vBackPorch; + NvU32 hSyncWidth; + NvU32 vSyncWidth; + NvU32 bpp; + NvU32 refresh; + NvU32 pclkHz; + NvU32 numLanes; + NvU32 dscEnable; + NvU32 dscBpp; + NvU32 dscNumSlices; + NvU32 dscDualDsc; + NvU32 dscSliceHeight; + NvU32 dscBlockPrediction; + NvU32 dscDecoderVersionMajor; + NvU32 dscDecoderVersionMinor; + + struct { + NvBool bDscSupported; + NvU32 encoderColorFormatMask; + NvU32 lineBufferSizeKB; + NvU32 rateBufferSizeKB; + NvU32 bitsPerPixelPrecision; + NvU32 maxNumHztSlices; + NvU32 lineBufferBitDepth; + } dscEncoderCaps; +} NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DFP_GET_FIXED_MODE_TIMING + * + * This control call is used to retrieve the display mode timing info that's + * specified for a given DFP from an offline configuration blob (e.g., Device Tree). + * This display timing info is intended to replace the timings exposed in a + * sink's EDID. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the timings should be retrieved. + * stream (in) + * For MST connectors with static topologies (e.g., DP serializers), + * this parameter further identifies the video stream for which the + * timings should be retrieved. + * valid (out) + * Indicates whether a valid display timing was found for this DFP. + * hActive (out) + * Horizontal active width in pixels + * hFrontPorch (out) + * Horizontal front porch + * hSyncWidth (out) + * Horizontal sync width + * hBackPorch (out) + * Horizontal back porch + * vActive (out) + * Vertical active height in lines + * vFrontPorch (out) + * Vertical front porch + * vSyncWidth (out) + * Vertical sync width + * vBackPorch (out) + * Vertical back porch + * pclkKHz (out) + * Pixel clock frequency in KHz + * rrx1k (out) + * Refresh rate in units of 0.001Hz + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DFP_GET_FIXED_MODE_TIMING (0x731172) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8 | NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS_MESSAGE_ID)" */ + +#define NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS_MESSAGE_ID (0x72U) + +typedef struct NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 stream; + + NvBool valid; + + NvU16 hActive; + NvU16 hFrontPorch; + NvU16 hSyncWidth; + NvU16 hBackPorch; + + NvU16 vActive; + NvU16 vFrontPorch; + NvU16 vSyncWidth; + NvU16 vBackPorch; + + NvU32 pclkKHz; + NvU32 rrx1k; +} NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS; + +/* _ctrl0073dfp_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h new file mode 100644 index 0000000..5c48ad5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h @@ -0,0 +1,2752 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073dp.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" + +/* NV04_DISPLAY_COMMON dfp-display-specific control commands and parameters */ + +/* + * NV0073_CTRL_CMD_DP_AUXCH_CTRL + * + * This command can be used to perform an aux channel transaction to the + * displayPort receiver. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * bAddrOnly + * If set to NV_TRUE, this parameter prompts an address-only + * i2c-over-AUX transaction to be issued, if supported. Else the + * call fails with NVOS_STATUS_ERR_NOT_SUPPORTED. The size parameter is + * expected to be 0 for address-only transactions. + * cmd + * This parameter is an input to this command. The cmd parameter follows + * Section 2.4 AUX channel syntax in the DisplayPort spec. + * Here are the current defined fields: + * NV0073_CTRL_DP_AUXCH_CMD_TYPE + * This specifies the request command transaction + * NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C + * Set this value to indicate a I2C transaction. + * NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX + * Set this value to indicate a DisplayPort transaction. + * NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT + * This field is dependent on NV0073_CTRL_DP_AUXCH_CMD_TYPE. + * It is only valid if NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C + * is specified above and indicates a middle of transaction. + * In the case of AUX, this field should be set to zero. The valid + * values are: + * NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE + * The I2C transaction is not in the middle of a transaction. + * NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE + * The I2C transaction is in the middle of a transaction. + * NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE + * The request type specifies if we are doing a read/write or write + * status request: + * NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ + * An I2C or AUX channel read is requested. + * NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE + * An I2C or AUX channel write is requested. + * NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS + * An I2C write status request desired. This value should + * not be set in the case of an AUX CH request and only applies + * to I2C write transaction command. + * addr + * This parameter is an input to this command. The addr parameter follows + * Section 2.4 in DisplayPort spec and the client should refer to the valid + * address in DisplayPort spec. Only the first 20 bits are valid. + * data[] + * In the case of a read transaction, this parameter returns the data from + * transaction request. In the case of a write transaction, the client + * should write to this buffer for the data to send. The max # of bytes + * allowed is NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE. + * size + * Specifies how many data bytes to read/write depending on the transaction type. + * The input size value should be indexed from 0. That means if you want to read + * 1 byte -> size = 0, 2 bytes -> size = 1, 3 bytes -> size = 2, up to 16 bytes + * where size = 15. On return, this parameter returns total number of data bytes + * successfully read/written from/to the transaction (indexed from 1). That is, + * if you successfully requested 1 byte, you would send down size = 0. On return, + * you should expect size = 1 if all 1 byte were successfully read. (Note that + * it is valid for a display to reply with fewer than the requested number of + * bytes; in that case, it is up to the client to make a new request for the + * remaining bytes.) + * replyType + * This parameter is an output to this command. It returns the auxChannel + * status after the end of the aux Ch transaction. The valid values are + * based on the DisplayPort spec: + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_ACK + * In the case of a write, + * AUX: write transaction completed and all data bytes written. + * I2C: return size bytes has been written to i2c slave. + * In the case of a read, return of ACK indicates ready to reply + * another read request. + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_NACK + * In the case of a write, first return size bytes have been written. + * In the case of a read, implies that does not have requested data + * for the read request transaction. + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_DEFER + * Not ready for the write/read request and client should retry later. + * NV0073_CTRL_DP_DISPLAYPORT_AUXCH_REPLYTYPE_I2CNACK + * Applies to I2C transactions only. For I2C write transaction: + * has written the first return size bytes to I2C slave before getting + * NACK. For a read I2C transaction, the I2C slave has NACKED the I2C + * address. + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CDEFER + * Applicable to I2C transactions. For I2C write and read + * transactions, I2C slave has yet to ACK or NACK the I2C transaction. + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_TIMEOUT + * The receiver did not respond within the timeout period defined in + * the DisplayPort 1.1a specification. + * retryTimeMs + * This parameter is an output to this command. In case of + * NVOS_STATUS_ERROR_RETRY return status, this parameter returns the time + * duration in milli-seconds after which client should retry this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_RETRY + */ +#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U +#define NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID (0x41U) + +typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bAddrOnly; + NvU32 cmd; + NvU32 addr; + NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE]; + NvU32 size; + NvU32 replyType; + NvU32 retryTimeMs; +} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS; + +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3 +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2 +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0 +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U) + +#define NV0073_CTRL_DP_AUXCH_ADDR 20:0 + +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE 3:0 +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_ACK (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_NACK (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_DEFER (0x00000002U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_TIMEOUT (0x00000003U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CNACK (0x00000004U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CDEFER (0x00000008U) + +//This is not the register field, this is software failure case when we +//have invalid argument +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_INVALID_ARGUMENT (0xffffffffU) + +/* + * NV0073_CTRL_CMD_DP_AUXCH_SET_SEMA + * + * This command can be used to set the semaphore in order to gain control of + * the aux channel. This control is only used in HW verification. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * owner + * This parameter is an input to this command. + * Here are the current defined fields: + * NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_RM + * Write the aux channel semaphore for resource manager to own the + * the aux channel. + * NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_VBIOS + * Write the aux channel semaphore for vbios/efi to own the + * the aux channel. This value is used only for HW verification + * and should not be used in normal driver operation. + * NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_PMU + * Write the aux channel semaphore for pmu to own the + * the aux channel. This value is used only by pmu + * and should not be used in normal driver operation. + * NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_DPU + * Write the aux channel semaphore for dpu to own the + * the aux channel and should not be used in normal + * driver operation. + * NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_SEC2 + * Write the aux channel semaphore for sec2 to own the + * the aux channel and should not be used in normal + * driver operation. + * NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_RELEASE + * Write the aux channel semaphore for hardware to own the + * the aux channel. This value is used only for HW verification + * and should not be used in normal driver operation. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_DP_AUXCH_SET_SEMA (0x731342U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 owner; +} NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS; + +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER 2:0 +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_RELEASE (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_RM (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_VBIOS (0x00000002U) +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_PMU (0x00000003U) +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_DPU (0x00000004U) +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_SEC2 (0x00000005U) + +/* + * NV0073_CTRL_CMD_DP_CTRL + * + * This command is used to set various displayPort configurations for + * the specified displayId such a lane count and link bandwidth. It + * is assumed that link training has already occurred. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * cmd + * This parameter is an input to this command. + * Here are the current defined fields: + * NV0073_CTRL_DP_CMD_SET_LANE_COUNT + * Set to specify the number of displayPort lanes to configure. + * NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE + * No request to set the displayport lane count. + * NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE + * Set this value to indicate displayport lane count change. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH + * Set to specify a request to change the link bandwidth. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH_FALSE + * No request to set the displayport link bandwidth. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH_TRUE + * Set this value to indicate displayport link bandwidth change. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH + * Set to specify a request to change the link bandwidth. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH_FALSE + * No request to set the displayport link bandwidth. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH_TRUE + * Set this value to indicate displayport link bandwidth change. + * NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD + * Set to disable downspread during link training. + * NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE + * Downspread will be enabled. + * NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE + * Downspread will be disabled (e.g. for compliance testing). + * NV0073_CTRL_DP_CMD_SET_FORMAT_MODE + * This field specifies the DP stream mode. + * NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM + * This value indicates that single stream mode is specified. + * NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM + * This value indicates that multi stream mode is specified. + * NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING + * Set to do Fast link training (avoid AUX transactions for link + * training). We need to restore all the previous trained link settings + * (e.g. the drive current/preemphasis settings) before doing FLT. + * During FLT, we send training pattern 1 followed by training pattern 2 + * each for a period of 500us. + * NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO + * Not a fast link training scenario. + * NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES + * Do fast link training. + * NV0073_CTRL_DP_CMD_NO_LINK_TRAINING + * Set to do No link training. We need to restore all the previous + * trained link settings (e.g. the drive current/preemphasis settings) + * before doing NLT, but we don't need to do the Clock Recovery and + * Channel Equalization. (Please refer to NVIDIA PANEL SELFREFRESH + * CONTROLLER SPECIFICATION 3.1.6 for detail flow) + * NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO + * Not a no link training scenario. + * NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES + * Do no link training. + * NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING + * Specifies whether RM should use the DP Downspread setting specified by + * NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD command regardless of what the Display + * is capable of. This is used along with the Fake link training option so that + * we can configure the GPU to enable/disable spread when a real display is + * not connected. + * NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE + * RM Always use the DP Downspread setting specified. + * NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT + * RM will enable Downspread only if the display supports it. (default) + * NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING + * Specifies whether RM should skip HW training of the link. + * If this is the case then RM only updates its SW state without actually + * touching any HW registers. Clients should use this ONLY if it has determined - + * a. link is trained and not lost + * b. desired link config is same as current trained link config + * c. link is not in D3 (should be in D0) + * NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO + * RM doesn't skip HW LT as the current Link Config is not the same as the + * requested Link Config. + * NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES + * RM skips HW LT and only updates its SW state as client has determined that + * the current state of the link and the requested Link Config is the same. + * NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG + * Set if the client does not want link training to happen. + * This should ONLY be used for HW verification. + * NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE + * This is normal production behaviour which shall perform + * link training or follow the normal procedure for lane count + * reduction. + * NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE + * Set this value to not perform link config steps, this should + * only be turned on for HW verif testing. If _LINK_BANDWIDTH + * or _LANE_COUNT is set, RM will only write to the TX DP registers + * and perform no link training. + * NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED + * This field specifies if source grants Post Link training Adjustment request or not. + * NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO + * Source does not grant Post Link training Adjustment request + * NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES + * Source grants Post Link training Adjustment request + * Source wants to link train LT Tunable Repeaters + * NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING + * This field specifies if fake link training is to be done. This will + * program enough of the hardware to avoid any hardware hangs and + * depending upon option chosen by the client, OR will be enabled for + * transmisssion. + * NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO + * No Fake LT will be performed + * NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION + * SOR will be not powered up during Fake LT + * NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON + * SOR will be powered up during Fake LT + * NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER + * This field specifies if source wants to link train LT Tunable Repeaters or not. + * NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO + * Source does not want to link train LT Tunable Repeaters + * NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES + * NV0073_CTRL_DP_CMD_BANDWIDTH_TEST + * Set if the client wants to reset the link after the link + * training is done. As a part of uncommtting a DP display. + * NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO + * This is for normal operation, if DD decided not to reset the link. + * NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES + * This is to reset the link, if DD decided to uncommit the display because + * the link is no more required to be enabled, as in a DP compliance test. + * NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE + * Set if the client does not want link training to happen. + * This should ONLY be used for HW verification if necessary. + * NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE + * This is normal production behaviour which shall perform + * pre link training checks such as if both rx and tx are capable + * of the requested config for lane and link bw. + * NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE + * Set this value to bypass link config check, this should + * only be turned on for HW verif testing. If _LINK_BANDWIDTH + * or _LANE_COUNT is set, RM will not check TX and DX caps. + * NV0073_CTRL_DP_CMD_FALLBACK_CONFIG + * Set if requested config by client fails and if link if being + * trained for the fallback config. + * NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE + * This is the normal case when the link is being trained for a requested config. + * NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE + * Set this value in case the link configuration for requested config fails + * and the link is being trained for a fallback config. + * NV0073_CTRL_DP_CMD_ENABLE_FEC + * Specifies whether RM should set NV_DPCD14_FEC_CONFIGURATION_FEC_READY + * before link training if client has determined that FEC is required(for DSC). + * If required to be enabled RM sets FEC enable bit in panel, start link training. + * Enabling/disabling FEC on GPU side is not done during Link training + * and RM Ctrl call NV0073_CTRL_CMD_DP_CONFIGURE_FEC has to be called + * explicitly to enable/disable FEC after LT(including PostLT LQA). + * If enabled, FEC would be disabled while powering down the link. + * Client has to make sure to account for 3% overhead of transmitting + * FEC symbols while calculating DP bandwidth. + * NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE + * This is the normal case when FEC is not required + * NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE + * Set this value in case FEC needs to be enabled + * data + * This parameter is an input and output to this command. + * Here are the current defined fields: + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT + * This field specifies the desired setting for lane count. A client + * may choose any lane count as long as it does not exceed the + * capability of DisplayPort receiver as indicated in the + * receiver capability field. The valid values for this field are: + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 + * For zero-lane configurations, link training is shut down. + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 + * For one-lane configurations, lane0 is used. + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 + * For two-lane configurations, lane0 and lane1 is used. + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 + * For four-lane configurations, all lanes are used. + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 + * For devices that supports 8-lane DP. + * On return, the lane count setting is returned which may be + * different from the requested input setting. + * NV0073_CTRL_DP_DATA_SET_LINK_BW + * This field specifies the desired setting for link bandwidth. There + * are only four supported main link bandwidth settings. The + * valid values for this field are: + * NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS + * NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS + * NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS + * NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS + * On return, the link bandwidth setting is returned which may be + * different from the requested input setting. + * NV0073_CTRL_DP_DATA_TARGET + * This field specifies which physical repeater or sink to be trained. + * Client should make sure + * 1. Physical repeater should be targeted in order, start from the one closest to GPU. + * 2. All physical repeater is properly trained before targets sink. + * The valid values for this field are: + * NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_X + * 'X' denotes physical repeater index. It's a 1-based index to + * reserve 0 for _SINK. + * 'X' can't be more than 8. + * NV0073_CTRL_DP_DATA_TARGET_SINK + * err + * This parameter specifies provides info regarding the outcome + * of this calling control call. If zero, no errors were found. + * Otherwise, this parameter will specify the error detected. + * The valid parameter is broken down as follows: + * NV0073_CTRL_DP_ERR_SET_LANE_COUNT + * If set to _ERR, set lane count failed. + * NV0073_CTRL_DP_ERR_SET_LINK_BANDWIDTH + * If set to _ERR, set link bandwidth failed. + * NV0073_CTRL_DP_ERR_DISABLE_DOWNSPREAD + * If set to _ERR, disable downspread failed. + * NV0073_CTRL_DP_ERR_INVALID_PARAMETER + * If set to _ERR, at least one of the calling functions + * failed due to an invalid parameter. + * NV0073_CTRL_DP_ERR_SET_LINK_TRAINING + * If set to _ERR, link training failed. + * NV0073_CTRL_DP_ERR_TRAIN_PHY_REPEATER + * If set to _ERR, the operation to Link Train repeater is failed. + * NV0073_CTRL_DP_ERR_ENABLE_FEC + * If set to _ERR, the operation to enable FEC is failed. + * retryTimeMs + * This parameter is an output to this command. In case of + * NVOS_STATUS_ERROR_RETRY return status, this parameter returns the time + * duration in milli-seconds after which client should retry this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_RETRY + */ + +#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV0073_CTRL_DP_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 cmd; + NvU32 data; + NvU32 err; + NvU32 retryTimeMs; + NvU32 eightLaneDpcdBaseAddr; +} NV0073_CTRL_DP_CTRL_PARAMS; + +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0 +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1 +#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2 +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_UNUSED 3:3 +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4 +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5 +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6 +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7 +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8 +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U) +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U) +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9 +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10 +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11 +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U) +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13 +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14 +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15 +#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U) + +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29 +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30 +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31 +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U) + +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0 +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8 +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU) +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18 +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U) +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U) +#define NV0073_CTRL_DP_DATA_TARGET 22:19 +#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U) + +#define NV0073_CTRL_DP_ERR_SET_LANE_COUNT 0:0 +#define NV0073_CTRL_DP_ERR_SET_LANE_COUNT_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_SET_LANE_COUNT_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_SET_LINK_BW 1:1 +#define NV0073_CTRL_DP_ERR_SET_LINK_BW_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_SET_LINK_BW_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_DISABLE_DOWNSPREAD 2:2 +#define NV0073_CTRL_DP_ERR_DISABLE_DOWNSPREAD_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_DISABLE_DOWNSPREAD_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_UNUSED 3:3 +#define NV0073_CTRL_DP_ERR_CLOCK_RECOVERY 4:4 +#define NV0073_CTRL_DP_ERR_CLOCK_RECOVERY_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_CLOCK_RECOVERY_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_CHANNEL_EQUALIZATION 5:5 +#define NV0073_CTRL_DP_ERR_CHANNEL_EQUALIZATION_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_CHANNEL_EQUALIZATION_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_TRAIN_PHY_REPEATER 6:6 +#define NV0073_CTRL_DP_ERR_TRAIN_PHY_REPEATER_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_TRAIN_PHY_REPEATER_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_ENABLE_FEC 7:7 +#define NV0073_CTRL_DP_ERR_ENABLE_FEC_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_ENABLE_FEC_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE 11:8 +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_0_LANE (0x00000000U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_1_LANE (0x00000001U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_2_LANE (0x00000002U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_4_LANE (0x00000004U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_8_LANE (0x00000008U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE 15:12 +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_0_LANE (0x00000000U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_1_LANE (0x00000001U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_2_LANE (0x00000002U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_4_LANE (0x00000004U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_8_LANE (0x00000008U) +#define NV0073_CTRL_DP_ERR_INVALID_PARAMETER 30:30 +#define NV0073_CTRL_DP_ERR_INVALID_PARAMETER_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_INVALID_PARAMETER_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_LINK_TRAINING 31:31 +#define NV0073_CTRL_DP_ERR_LINK_TRAINING_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_LINK_TRAINING_ERR (0x00000001U) + +/* + * NV0073_CTRL_DP_LANE_DATA_PARAMS + * + * This structure provides lane characteristics. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * numLanes + * Indicates number of lanes for which the data is valid + * data + * This parameter is an input to this command. + * Here are the current defined fields: + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS + * This field specifies the preemphasis level set in the lane. + * The valid values for this field are: + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE + * No-preemphais for this lane. + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 + * Preemphasis set to 3.5 dB. + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 + * Preemphasis set to 6.0 dB. + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 + * Preemphasis set to 9.5 dB. + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT + * This field specifies the drive current set in the lane. + * The valid values for this field are: + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 + * Drive current level is set to 8 mA + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 + * Drive current level is set to 12 mA + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 + * Drive current level is set to 16 mA + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 + * Drive current level is set to 24 mA + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_MAX_LANES 8U + +typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 numLanes; + NvU32 data[NV0073_CTRL_MAX_LANES]; +} NV0073_CTRL_DP_LANE_DATA_PARAMS; + +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0 +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2 +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U) + +/* + * NV0073_CTRL_CMD_GET_DP_LANE_DATA + * + * This command is used to get the current pre-emphasis and drive current + * level values for the specified number of lanes. + * + * The command takes a NV0073_CTRL_DP_LANE_DATA_PARAMS structure as the + * argument with the appropriate subDeviceInstance and displayId filled. + * The arguments of this structure and the format of preemphasis and drive- + * current levels are described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * NOTE: This control call is only for testing purposes and + * should not be used in normal DP operations. Preemphais + * and drive current level will be set during Link training + * in normal DP operations + * + */ + +#define NV0073_CTRL_CMD_DP_GET_LANE_DATA (0x731345U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | 0x45" */ + + +/* + * NV0073_CTRL_CMD_SET_DP_LANE_DATA + * + * This command is used to set the pre-emphasis and drive current + * level values for the specified number of lanes. + * + * The command takes a NV0073_CTRL_DP_LANE_DATA_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId, number of + * lanes, preemphasis and drive current values filled in. + * The arguments of this structure and the format of preemphasis and drive- + * current levels are described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * NOTE: This control call is only for testing purposes and + * should not be used in normal DP operations. Preemphais + * and drivecurrent will be set during Link training in + * normal DP operations + * + */ + +#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | 0x46" */ + +/* + * NV0073_CTRL_DP_CSTM + * + * This structure specifies the 80 bit DP CSTM Test Pattern data + * The fields of this structure are to be specified as follows: + * lower takes bits 31:0 + * middle takes bits 63:32 + * upper takes bits 79:64 + * + */ +typedef struct NV0073_CTRL_DP_CSTM { + NvU32 lower; + NvU32 middle; + NvU32 upper; +} NV0073_CTRL_DP_CSTM; + +/* + * NV0073_CTRL_DP_TESTPATTERN + * + * This structure specifies the possible test patterns available in + * display port. The field testPattern can be one of the following + * values. + * NV0073_CTRL_DP_SET_TESTPATTERN_DATA_NONE + * No test pattern on the main link + * NV0073_CTRL_DP_SET_TESTPATTERN_DATA_D10_2 + * D10.2 pattern on the main link + * NV0073_CTRL_DP_SET_TESTPATTERN_DATA_SERMP + * SERMP pattern on main link + * NV0073_CTRL_DP_SET_TESTPATTERN_DATA_PRBS_7 + * PRBS7 pattern on the main link + * + */ + +typedef struct NV0073_CTRL_DP_TESTPATTERN { + NvU32 testPattern; +} NV0073_CTRL_DP_TESTPATTERN; + +#define NV0073_CTRL_DP_TESTPATTERN_DATA 2:0 +#define NV0073_CTRL_DP_TESTPATTERN_DATA_NONE (0x00000000U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_D10_2 (0x00000001U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_SERMP (0x00000002U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_PRBS_7 (0x00000003U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_CSTM (0x00000004U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_HBR2COMPLIANCE (0x00000005U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_CP2520PAT3 (0x00000006U) + +/* + * NV0073_CTRL_CMD_DP_SET_TESTPATTERN + * + * This command forces the main link to output the selected test patterns + * supported in DP specs. + * + * The command takes a NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId and test pattern + * to be set as inputs. + * The arguments of this structure and the format of test patterns are + * described above. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * testPattern + * This parameter is of type NV0073_CTRL_DP_TESTPATTERN and specifies + * the testpattern to set on displayport. The format of this structure + * is described above. + * laneMask + * This parameter specifies the bit mask of DP lanes on which test + * pattern is to be applied. + * lower + * This parameter specifies the lower 64 bits of the CSTM test pattern + * upper + * This parameter specifies the upper 16 bits of the CSTM test pattern + * bIsHBR2 + * This Boolean parameter is set to TRUE if HBR2 compliance test is + * being performed. + * bSkipLaneDataOverride + * skip override of pre-emp and drive current + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * NOTE: This control call is only for testing purposes and + * should not be used in normal DP operations. Preemphais + * and drivecurrent will be set during Link training in + * normal DP operations + * + */ + +#define NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_MESSAGE_ID (0x47U) + +typedef struct NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_DP_TESTPATTERN testPattern; + NvU8 laneMask; + NV0073_CTRL_DP_CSTM cstm; + NvBool bIsHBR2; + NvBool bSkipLaneDataOverride; +} NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_TESTPATTERN (0x731347U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_CSTM0 31:0 +#define NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_CSTM1 63:32 +#define NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_CSTM2 15:0 + +/* + * NV0073_CTRL_CMD_GET_DP_TESTPATTERN + * + * This command returns the current test pattern set on the main link of + * Display Port. + * + * The command takes a NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId as inputs and + * returns the current test pattern in testPattern field of the structure. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * testPattern + * This parameter is of type NV0073_CTRL_DP_TESTPATTERN and specifies the + * testpattern set on displayport. The format of this structure is + * described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * NOTE: This control call is only for testing purposes and + * should not be used in normal DP operations. + * + */ + +#define NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS_MESSAGE_ID (0x48U) + +typedef struct NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_DP_TESTPATTERN testPattern; +} NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS; + + +#define NV0073_CTRL_CMD_DP_GET_TESTPATTERN (0x731348U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS_MESSAGE_ID" */ + +/* + * NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA + * + * This structure specifies the Pre-emphasis/Drive Current/Postcursor2/TxPu information + * for a display port device. These are the the current values that RM is + * using to map the levels for Pre-emphasis and Drive Current for Link + * Training. + * preEmphasis + * This field specifies the preemphasis values. + * driveCurrent + * This field specifies the driveCurrent values. + * postcursor2 + * This field specifies the postcursor2 values. + * TxPu + * This field specifies the pull-up current source drive values. + */ +#define NV0073_CTRL_MAX_DRIVECURRENT_LEVELS 4U +#define NV0073_CTRL_MAX_PREEMPHASIS_LEVELS 4U +#define NV0073_CTRL_MAX_POSTCURSOR2_LEVELS 4U + +typedef struct NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_1 { + NvU32 preEmphasis; + NvU32 driveCurrent; + NvU32 postCursor2; + NvU32 TxPu; +} NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_1; + +typedef NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_1 NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_SLICE1[NV0073_CTRL_MAX_PREEMPHASIS_LEVELS]; + +typedef NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_SLICE1 NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_SLICE2[NV0073_CTRL_MAX_DRIVECURRENT_LEVELS]; + +typedef NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_SLICE2 NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA[NV0073_CTRL_MAX_POSTCURSOR2_LEVELS]; + + +/* + * NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA + * + * This command is used to override the Pre-emphasis/Drive Current/PostCursor2/TxPu + * data in the RM. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the digital display for which the + * data should be returned. The display ID must a digital display. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * dpData + * This parameter is of type NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA + * and specifies the Pre-emphasis/Drive Current/Postcursor2/TxPu information + * for a display port device. + * The command takes a NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS + * structure as the argument with the appropriate subDeviceInstance, displayId, + * and dpData. The fields of this structure are described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA dpData; +} NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA (0x731351U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS_MESSAGE_ID" */ + +/* + * NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA + * + * This command is used to get the Pre-emphasis/Drive Current/PostCursor2/TxPu data. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the digital display for which the + * data should be returned. The display ID must a digital display. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * The command takes a NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS + * structure as the argument with the appropriate subDeviceInstance, displayId, + * and dpData. The fields of this structure are described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS_MESSAGE_ID (0x52U) + +typedef struct NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA dpData; +} NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA (0x731352U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS_MESSAGE_ID" */ + + + +/* + * NV0073_CTRL_CMD_DP_MAIN_LINK_CTRL + * + * This command is used to set various Main Link configurations for + * the specified displayId such as powering up/down Main Link. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the DP display which owns + * the Main Link to be adjusted. The display ID must a DP display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * ctrl + * Here are the current defined fields: + * NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERDOWN + * This value will power down Main Link. + * NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERUP + * This value will power up Main Link. + * +*/ +#define NV0073_CTRL_CMD_DP_MAIN_LINK_CTRL (0x731356U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 ctrl; +} NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS; + +#define NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE 0:0 +#define NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERDOWN (0x00000000U) +#define NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERUP (0x00000001U) + + + +/* + * NV0073_CTRL_CMD_DP_GET_AUDIO_MUTESTREAM + * + * This command returns the current audio mute state on the main link of Display Port + * + * The command takes a NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId as inputs and returns the + * current mute status in mute field of the structure. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the audio stream + * state should be returned. The display ID must a DP display. + * If the display ID is invalid or if it is not a DP display, + * this call will return NV_ERR_INVALID_ARGUMENT. + * mute + * This parameter will return one of the following values: + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_DISABLE + * Audio mute is currently disabled. + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_ENABLE + * Audio mute is currently enabled. + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_AUTO + * Audio mute is automatically controlled by hardware. + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_UNKNOWN + * Audio mute is currently in an unknown state. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV0073_CTRL_CMD_DP_GET_AUDIO_MUTESTREAM (0x731358U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID (0x58U) + +typedef struct NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 mute; +} NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS; + +#define NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_DISABLE (0x00000000U) +#define NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_ENABLE (0x00000001U) +#define NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_AUTO (0x00000002U) +#define NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_UNKNOWN (0x00000003U) + +/* + * NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM + * + * This command sets the current audio mute state on the main link of Display Port + * + * The command takes a NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId as inputs and whether to enable + * or disable mute in the parameter - mute. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the audio stream + * state should be returned. The display ID must a DP display. + * If the display ID is invalid or if it is not a DP display, + * this call will return NV_ERR_INVALID_ARGUMENT. + * mute + * This parameter is an input to this command. + * Here are the current defined values: + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_DISABLE + * Audio mute will be disabled. + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_ENABLE + * Audio mute will be enabled. + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_AUTO + * Audio mute will be automatically controlled by hardware. + * + * Note: Any other value for mute in NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS is not allowed and + * the API will return an error. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID (0x59U) + +typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 mute; +} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_ASSR_CTRL + * + * This command is used to control and query DisplayPort ASSR + * settings for the specified displayId. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the DP display which owns + * the Main Link to be adjusted. The display ID must a DP display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * cmd + * This input parameter specifies the command to execute. Legal + * values for this parameter include: + * NV0073_CTRL_DP_ASSR_CMD_QUERY_STATE + * This field can be used to query ASSR state. When used the ASSR + * state value is returned in the data parameter. + * NV0073_CTRL_DP_ASSR_CMD_DISABLE + * This field can be used to control the ASSR disable state. + * NV0073_CTRL_DP_ASSR_CMD_FORCE_STATE + * This field can be used to control ASSR State without looking at + * whether the display supports it. Used in conjunction with + * Fake link training. Note that this updates the state on the + * source side only. The sink is assumed to be configured for ASSR + * by the client (DD). + * data + * This parameter specifies the data associated with the cmd + * parameter. + * NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED + * This field indicates the state of ASSR when queried using cmd + * parameter. When used to control the State, it indicates whether + * ASSR should be enabled or disabled. + * NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED_NO + * When queried this flag indicates that ASSR is not enabled on the sink. + * When used as the data for CMD_FORCE_STATE, it requests ASSR to + * to be disabled on the source side. + * NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED_YES + * When queried this flag indicates that ASSR is not enabled on the sink. + * When used as the data for CMD_FORCE_STATE, it requests ASSR to + * to be enabled on the source side. + * err + * This output parameter specifies any errors associated with the cmd + * parameter. + * NV0073_CTRL_DP_ASSR_ERR_CAP + * This field indicates the error pertaining to ASSR capability of + * the sink device. + * NV0073_CTRL_DP_ASSR_ERR_CAP_NOERR + * This flag indicates there is no error. + * NV0073_CTRL_DP_ASSR_ERR_CAP_ERR + * This flag indicates that the sink is not ASSR capable. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_CMD_DP_ASSR_CTRL (0x73135aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_ASSR_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_ASSR_CTRL_PARAMS_MESSAGE_ID (0x5AU) + +typedef struct NV0073_CTRL_DP_ASSR_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 cmd; + NvU32 data; + NvU32 err; +} NV0073_CTRL_DP_ASSR_CTRL_PARAMS; + +#define NV0073_CTRL_DP_ASSR_CMD 31:0 +#define NV0073_CTRL_DP_ASSR_CMD_QUERY_STATE (0x00000001U) +#define NV0073_CTRL_DP_ASSR_CMD_DISABLE (0x00000002U) +#define NV0073_CTRL_DP_ASSR_CMD_FORCE_STATE (0x00000003U) +#define NV0073_CTRL_DP_ASSR_CMD_ENABLE (0x00000004U) +#define NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED 0:0 +#define NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED_NO (0x00000000U) +#define NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED_YES (0x00000001U) +#define NV0073_CTRL_DP_ASSR_ERR_CAP 0:0 +#define NV0073_CTRL_DP_ASSR_ERR_CAP_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ASSR_ERR_CAP_ERR (0x00000001U) + +/* + * NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID + * + * This command is used to assign a displayId from the free pool + * to a specific AUX Address in a DP 1.2 topology. The topology + * is uniquely identified by the DisplayId of the DP connector. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This is the DisplayId of the DP connector to which the topology + * is rooted. + * preferredDisplayId + * Client can sent a preferredDisplayID which RM can use during allocation + * if available. If this Id is a part of allDisplayMask in RM then we return + * a free available Id to the client. However, if this is set to + * NV0073_CTRL_CMD_DP_INVALID_PREFERRED_DISPLAY_ID then we return allDisplayMask value. + * useBFM + * Set to true if DP-BFM is used during emulation/RTL Sim. + * + * [out] displayIdAssigned + * This is the out field that will receive the new displayId. If the + * function fails this is guaranteed to be 0. + * [out] allDisplayMask + * This is allDisplayMask RM variable which is returned only when + * preferredDisplayId is set to NV0073_CTRL_CMD_DP_INVALID_PREFERRED_DISPLAY_ID + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */ + +/* + * There cannot be more than 128 devices in a topology (also by DP 1.2 specification) + * NOTE: Temporarily lowered to pass XAPI RM tests. Should be reevaluated! + */ +#define NV0073_CTRL_CMD_DP_MAX_TOPOLOGY_NODES 120U +#define NV0073_CTRL_CMD_DP_INVALID_PREFERRED_DISPLAY_ID 0xffffffffU + +#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID (0x5BU) + +typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 preferredDisplayId; + + NvBool force; + NvBool useBFM; + + NvU32 displayIdAssigned; + NvU32 allDisplayMask; +} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID + * + * This command is used to return a multistream displayid to the unused pool. + * You must not call this function while either the ARM or ASSEMBLY state cache + * refers to this display-id. The head must not be attached. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This is the displayId to free. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + * + */ +#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID (0x5CU) + +typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; +} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DP_GET_LINK_CONFIG + * + * This command is used to query DisplayPort link config + * settings on the transmitter side. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the DP display which owns + * the Main Link to be queried. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * laneCount + * Number of lanes the DP transmitter hardware is set up to drive. + * linkBW + * The BW of each lane that the DP transmitter hardware is set up to drive. + * The values returned will be according to the DP specifications. + * + */ +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG (0x731360U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 laneCount; + NvU32 linkBW; +} NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT 3:0 +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_0 (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_2 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_4 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW 3:0 +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW_1_62GBPS (0x00000006U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW_2_70GBPS (0x0000000aU) + +/* + * NV0073_CTRL_CMD_DP_GET_EDP_DATA + * + * This command is used to query Embedded DisplayPort information. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the eDP display which owns + * the Main Link to be queried. + * If more than one displayId bit is set or the displayId is not a eDP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * data + * This output parameter specifies the data associated with the eDP display. + * It is only valid if this function returns NV_OK. + * NV0073_CTRL_CMD_DP_GET_EDP_DATA_PANEL_POWER + * This field indicates the state of the eDP panel power. + * NV0073_CTRL_CMD_DP_GET_EDP_DATA_PANEL_POWER_OFF + * This eDP panel is powered off. + * NV0073_CTRL_CMD_DP_GET_EDP_DATA_PANEL_POWER_ON + * This eDP panel is powered on. + * NV0073_CTRL_CMD_DP_GET_EDP_DATA_DPCD_POWER_OFF + * This field tells the client if DPCD power off command + * should be used for the current eDP panel. + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF_ENABLE + * This eDP panel can use DPCD to power off the panel. + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF_DISABLE + * This eDP panel cannot use DPCD to power off the panel. + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_SET_POWER + * This field tells the client current eDP panel DPCD SET_POWER (0x600) status + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_SET_POWER_D0 + * This eDP panel is current up and in full power mode. + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_SET_POWER_D3 + * This eDP panel is current standby. + */ +#define NV0073_CTRL_CMD_DP_GET_EDP_DATA (0x731361U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_EDP_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_GET_EDP_DATA_PARAMS_MESSAGE_ID (0x61U) + +typedef struct NV0073_CTRL_DP_GET_EDP_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 data; +} NV0073_CTRL_DP_GET_EDP_DATA_PARAMS; + +#define NV0073_CTRL_DP_GET_EDP_DATA_PANEL_POWER 0:0 +#define NV0073_CTRL_DP_GET_EDP_DATA_PANEL_POWER_OFF (0x00000000U) +#define NV0073_CTRL_DP_GET_EDP_DATA_PANEL_POWER_ON (0x00000001U) +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF 1:1 +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF_ENABLE (0x00000000U) +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF_DISABLE (0x00000001U) +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_STATE 2:2 +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_STATE_D0 (0x00000000U) +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_STATE_D3 (0x00000001U) +/* + * NV0073_CTRL_CMD_DP_CONFIG_STREAM + * + * This command sets various multi/single stream related params for + * for a given head. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * Head + * Specifies the head index for the stream. + * sorIndex + * Specifies the SOR index for the stream. + * dpLink + * Specifies the DP link: either 0 or 1 (A , B) + * bEnableOverride + * Specifies whether we're manually configuring this stream. + * If not set, none of the remaining settings have any effect. + * bMST + * Specifies whether in Multistream or Singlestream mode. + * MST/SST + * Structures for passing in either Multistream or Singlestream params + * slotStart + * Specifies the start value of the timeslot + * slotEnd + * Specifies the end value of the timeslot + * PBN + * Specifies the PBN for the timeslot. + * minHBlank + * Specifies the min HBlank + * minVBlank + * Specifies the min VBlank + * sendACT -- deprecated. A new control call has been added. + * Specifies whether ACT has to be sent or not. + * tuSize + * Specifies TU size value + * watermark + * Specifies stream watermark. + * linkClkFreqHz -- moving to MvidWarParams. Use that instead. + * Specifies the link freq in Hz. Note that this is the byte clock. + * eg: = (5.4 Ghz / 10) + * actualPclkHz; -- moving to MvidWarParams. Use that instead. + * Specifies the actual pclk freq in Hz. + * mvidWarEnabled + * Specifies whether MVID WAR is enabled. + * MvidWarParams + * Is valid if mvidWarEnabled is true. + * bEnableTwoHeadOneOr + * Whether two head one OR is enabled. If this is set then RM will + * replicate SF settings of Master head on Slave head. Head index + * passed should be of Master Head. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC: when this command has already been called + * + */ +#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID (0x62U) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 sorIndex; + NvU32 dpLink; + + NvBool bEnableOverride; + NvBool bMST; + NvU32 singleHeadMultistreamMode; + NvU32 hBlankSym; + NvU32 vBlankSym; + NvU32 colorFormat; + NvBool bEnableTwoHeadOneOr; + + struct { + NvU32 slotStart; + NvU32 slotEnd; + NvU32 PBN; + NvU32 Timeslice; + NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT + NvU32 singleHeadMSTPipeline; + NvBool bEnableAudioOverRightPanel; + } MST; + + struct { + NvBool bEnhancedFraming; + NvU32 tuSize; + NvU32 waterMark; + NvU32 actualPclkHz; // deprecated -Use MvidWarParams + NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams + NvBool bEnableAudioOverRightPanel; + struct { + NvU32 activeCnt; + NvU32 activeFrac; + NvU32 activePolarity; + NvBool mvidWarEnabled; + struct { + NvU32 actualPclkHz; + NvU32 linkClkFreqHz; + } MvidWarParams; + } Legacy; + } SST; +} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_SET_RATE_GOV + * + * This command enables rate governing for a MST. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * Head + * Specifies the head index for the stream. + * sorIndex + * Specifies the SOR index for the stream. + * flags + * Specifies Rate Governing, trigger type and wait on trigger and operation type. + * + * _FLAGS_OPERATION: whether this control call should program or check for status of previous operation. + * + * _FLAGS_STATUS: Out only. Caller should check the status for _FLAGS_OPERATION_CHECK_STATUS through + * this bit. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC: when this command has already been called + * + */ +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV (0x731363U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS_MESSAGE_ID (0x63U) + +typedef struct NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 sorIndex; + NvU32 flags; +} NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_ENABLE_RG 0:0 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_ENABLE_RG_OFF (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_ENABLE_RG_ON (0x00000001U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_TRIGGER_MODE 1:1 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_TRIGGER_MODE_LOADV (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_TRIGGER_MODE_IMMEDIATE (0x00000001U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_WAIT_TRIGGER 2:2 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_WAIT_TRIGGER_OFF (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_WAIT_TRIGGER_ON (0x00000001U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_OPERATION 3:3 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_OPERATION_PROGRAM (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_OPERATION_CHECK_STATUS (0x00000001U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_STATUS 31:31 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_STATUS_FAIL (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_STATUS_PASS (0x00000001U) + +/* + * NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT + * + * This call is used by the displayport library. Once + * all of the platforms have ported, this call will be + * deprecated and made the default behavior. + * + * Disables automatic watermark programming + * Disables automatic DP IRQ handling (CP IRQ) + * Disables automatic retry on defers + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID (0x65U) + +typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS { + NvU32 subDeviceInstance; +} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_SET_ECF + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * sorIndex + * This parameter specifies the Index of sor for which ecf + * should be updated. + * ecf + * This parameter has the ECF bit mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_CMD_DP_SET_ECF (0x731366U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_ECF_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_ECF_PARAMS_MESSAGE_ID (0x66U) + +typedef struct NV0073_CTRL_CMD_DP_SET_ECF_PARAMS { + NvU32 subDeviceInstance; + NvU32 sorIndex; + NV_DECLARE_ALIGNED(NvU64 ecf, 8); + NvBool bForceClearEcf; + NvBool bAddStreamBack; +} NV0073_CTRL_CMD_DP_SET_ECF_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_SEND_ACT + * + * This command sends ACT. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * displayId + * Specifies the root port displayId for which the trigger has to be done. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC: when this command has already been called + * + */ +#define NV0073_CTRL_CMD_DP_SEND_ACT (0x731367U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS_MESSAGE_ID (0x67U) + +typedef struct NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; +} NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DP_GET_CAPS + * + * This command returns the following info + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * sorIndex + * Specifies the SOR index. + * bIsDp12Supported + * Returns NV_TRUE if DP1.2 is supported by the GPU else NV_FALSE + * bIsDp14Supported + * Returns NV_TRUE if DP1.4 is supported by the GPU else NV_FALSE + * bIsMultistreamSupported + * Returns NV_TRUE if MST is supported by the GPU else NV_FALSE + * bIsSCEnabled + * Returns NV_TRUE if Stream cloning is supported by the GPU else NV_FALSE + * maxLinkRate + * Returns Maximum allowed orclk for DP mode of SOR + * 1 signifies 5.40(HBR2), 2 signifies 2.70(HBR), 3 signifies 1.62(RBR) + * bHasIncreasedWatermarkLimits + * Returns NV_TRUE if the GPU uses higher watermark limits, else NV_FALSE + * bIsPC2Disabled + * Returns NV_TRUE if VBIOS flag to disable PostCursor2 is set, else NV_FALSE + * bFECSupported + * Returns NV_TRUE if GPU supports FEC, else NV_FALSE + * bIsTrainPhyRepeater + * Returns NV_TRUE if LTTPR Link Training feature is set + * bOverrideLinkBw + * Returns NV_TRUE if DFP limits defined in DCB have to be honored, else NV_FALSE + * + * DSC caps - + * bDscSupported + * If GPU supports DSC or not + * + * encoderColorFormatMask + * Mask of all color formats for which DSC + * encoding is supported by GPU + * + * lineBufferSizeKB + * Size of line buffer. + * + * rateBufferSizeKB + * Size of rate buffer per slice. + * + * bitsPerPixelPrecision + * Bits per pixel precision for DSC e.g. 1/16, 1/8, 1/4, 1/2, 1bpp + * + * maxNumHztSlices + * Maximum number of horizontal slices supported by DSC encoder + * + * lineBufferBitDepth + * Bit depth used by the GPU to store the reconstructed pixels within + * the line buffer + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ + + + +#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U) + +typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 sorIndex; + NvU32 maxLinkRate; + NvBool bIsDp12Supported; + NvBool bIsDp14Supported; + + + NvBool reserved; +/* NVRM_PUBLISHED */ + + + NvBool bIsMultistreamSupported; + NvBool bIsSCEnabled; + NvBool bHasIncreasedWatermarkLimits; + NvBool bIsPC2Disabled; + NvBool isSingleHeadMSTSupported; + NvBool bFECSupported; + NvBool bIsTrainPhyRepeater; + NvBool bOverrideLinkBw; + + struct { + NvBool bDscSupported; + NvU32 encoderColorFormatMask; + NvU32 lineBufferSizeKB; + NvU32 rateBufferSizeKB; + NvU32 bitsPerPixelPrecision; + NvU32 maxNumHztSlices; + NvU32 lineBufferBitDepth; + } DSC; +} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U) + +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U) + +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U) + +/* + * NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES + * + * This command returns the following info + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * should be for DP only + * bEnableMSA + * To enable or disable MSA + * bStereoPhaseInverse + * To enable or disable Stereo Phase Inverse value + * bCacheMsaOverrideForNextModeset + * Cache the values and don't apply them until next modeset + * featureMask + * Enable/Disable mask of individual MSA property + * featureValues + * MSA property value to write + * pFeatureDebugValues + * It will actual MSA property value being written on HW. + * If its NULL then no error but return nothing + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_TIMEOUT + * + */ +#define NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES (0x73136aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_MSA_PROPERTIES_SYNC_POLARITY_LOW (0U) +#define NV0073_CTRL_CMD_DP_MSA_PROPERTIES_SYNC_POLARITY_HIGH (1U) + +typedef struct NV0073_CTRL_DP_MSA_PROPERTIES_MASK { + NvU8 miscMask[2]; + NvBool bRasterTotalHorizontal; + NvBool bRasterTotalVertical; + NvBool bActiveStartHorizontal; + NvBool bActiveStartVertical; + NvBool bSurfaceTotalHorizontal; + NvBool bSurfaceTotalVertical; + NvBool bSyncWidthHorizontal; + NvBool bSyncPolarityHorizontal; + NvBool bSyncHeightVertical; + NvBool bSyncPolarityVertical; + NvBool bReservedEnable[3]; +} NV0073_CTRL_DP_MSA_PROPERTIES_MASK; + +typedef struct NV0073_CTRL_DP_MSA_PROPERTIES_VALUES { + NvU8 misc[2]; + NvU16 rasterTotalHorizontal; + NvU16 rasterTotalVertical; + NvU16 activeStartHorizontal; + NvU16 activeStartVertical; + NvU16 surfaceTotalHorizontal; + NvU16 surfaceTotalVertical; + NvU16 syncWidthHorizontal; + NvU16 syncPolarityHorizontal; + NvU16 syncHeightVertical; + NvU16 syncPolarityVertical; + NvU8 reserved[3]; +} NV0073_CTRL_DP_MSA_PROPERTIES_VALUES; + +#define NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS_MESSAGE_ID (0x6AU) + +typedef struct NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bEnableMSA; + NvBool bStereoPhaseInverse; + NvBool bCacheMsaOverrideForNextModeset; + NV0073_CTRL_DP_MSA_PROPERTIES_MASK featureMask; + NV0073_CTRL_DP_MSA_PROPERTIES_VALUES featureValues; + NV_DECLARE_ALIGNED(struct NV0073_CTRL_DP_MSA_PROPERTIES_VALUES *pFeatureDebugValues, 8); +} NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT + * + * This command can be used to invoke a fake interrupt for the operation of DP1.2 branch device + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * interruptType + * This parameter specifies the type of fake interrupt to be invoked. Possible values are: + * 0 => IRQ + * 1 => HPDPlug + * 2 => HPDUnPlug + * displayId + * should be for DP only + * + */ + +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT (0x73136bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS_MESSAGE_ID (0x6BU) + +typedef struct NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 interruptType; +} NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS; + +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_IRQ (0x00000000U) +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PLUG (0x00000001U) +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_UNPLUG (0x00000002U) + +/* + * NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG + * + * This command sets the MS displayId lit up by driver for further use of VBIOS + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * should be for DP only + * activeDevAddr + * Active MS panel address + * sorIndex + * SOR Index + * dpLink + * DP Sub Link Index + * hopCount + * Maximum hopcounts in MS address + * dpMsDevAddrState + * DP Multistream Device Address State. The values can be + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_TIMEOUT + * + */ +#define NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG (0x73136cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS_MESSAGE_ID (0x6CU) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 activeDevAddr; + NvU32 sorIndex; + NvU32 dpLink; + NvU32 hopCount; + NvU32 dpMsDevAddrState; +} NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS; + + + +/* +* NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT +* +* This command configures a new bit, NV_PDISP_SF_DP_LINKCTL_TRIGGER_SELECT +* to indicate which pipeline will handle the +* time slots allocation in single head MST mode +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior +* Head +* Specifies the head index for the stream +* sorIndex +* Specifies the SOR index for the stream +* streamIndex +* Stream Identifier +* +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_GENERIC: when this command has already been called +* +*/ +#define NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT (0x73136fU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS_MESSAGE_ID (0x6FU) + +typedef struct NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 sorIndex; + NvU32 singleHeadMSTPipeline; +} NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS; + +/* +* NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM +* +* This call is used by the displayport library.& clients of RM +* Its main function is to configure single Head Multi stream mode + * this call configures internal RM datastructures to support required mode. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior. +* +* displayIDs +* This parameter specifies array of DP displayIds to be configured which are driven out from a single head. +* +* numStreams +* This parameter specifies number of streams driven from a single head +* ex: for 2SST & 2MST its value is 2. +* +* mode +* This parameter specifies single head multi stream mode to be configured. +* +* bSetConfigure +* This parameter configures single head multistream mode +* if TRUE it sets SST or MST based on 'mode' parameter and updates internal driver data structures with the given information. +* if FALSE clears the configuration of single head multi stream mode. +* +* vbiosPrimaryDispIdIndex +* This parameter specifies vbios master displayID index in displayIDs input array. +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_NOT_SUPPORTED +* +*/ +#define NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM (0x73136eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SINGLE_HEAD_MAX_STREAMS (0x00000002U) +#define NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS_MESSAGE_ID (0x6EU) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayIDs[NV0073_CTRL_CMD_DP_SINGLE_HEAD_MAX_STREAMS]; + NvU32 numStreams; + NvU32 mode; + NvBool bSetConfig; + NvU8 vbiosPrimaryDispIdIndex; +} NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS; + +#define NV0073_CTRL_CMD_DP_SINGLE_HEAD_MULTI_STREAM_NONE (0x00000000U) +#define NV0073_CTRL_CMD_DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST (0x00000001U) +#define NV0073_CTRL_CMD_DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST (0x00000002U) + +/* +* NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL +* +* This command configures a new bit, NV_PDISP_SF_DP_LINKCTL_TRIGGER_ALL +* to indicate which if all the pipelines to take affect on ACT (sorFlushUpdates) +* in single head MST mode +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior +* Head +* Specifies the head index for the stream +* sorIndex +* Specifies the SOR index for the stream +* streamIndex +* Stream Identifier +* +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_GENERIC: when this command has already been called +* +*/ +#define NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL (0x731370U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS_MESSAGE_ID (0x70U) + +typedef struct NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvBool enable; +} NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS; + + + +/* +* NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA +* +* This command collects the DP AUX log from the RM aux buffer and +* sends it to the application. +* +* dpAuxBufferReadSize +* Specifies the number of logs to be read from the +* AUX buffer in RM +* dpNumMessagesRead +* Specifies the number of logs read from the AUX buffer +* dpAuxBuffer +* The local buffer to copy the specified number of logs +* from RM to user application +* +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_GENERIC: when this command has already been called +* +* +*DPAUXPACKET - This structure holds the log information +* auxPacket - carries the hex dump of the message transaction +* auxEvents - Contains the information as in what request and reply type where +* auxRequestTimeStamp - Request timestamp +* auxMessageReqSize - Request Message size +* auxMessageReplySize - Reply message size(how much information was actually send by receiver) +* auxOutPort - DP port number +* auxPortAddress - Address to which data was requested to be read or written +* auxReplyTimeStamp - Reply timestamp +* auxCount - Serial number to keep track of transactions +*/ + +/*Maximum dp messages size is 16 as per the protocol*/ +#define DP_MAX_MSG_SIZE 16U +#define MAX_LOGS_PER_POLL 50U + +/* Various kinds of DP Aux transactions */ +#define NV_DP_AUXLOGGER_REQUEST_TYPE 3:0 +#define NV_DP_AUXLOGGER_REQUEST_TYPE_NULL 0x00000000U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_I2CWR 0x00000001U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_I2CREQWSTAT 0x00000002U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_MOTWR 0x00000003U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_MOTREQWSTAT 0x00000004U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_AUXWR 0x00000005U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_I2CRD 0x00000006U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_MOTRD 0x00000007U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_AUXRD 0x00000008U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_UNKNOWN 0x00000009U + +#define NV_DP_AUXLOGGER_REPLY_TYPE 7:4 +#define NV_DP_AUXLOGGER_REPLY_TYPE_NULL 0x00000000U +#define NV_DP_AUXLOGGER_REPLY_TYPE_SB_ACK 0x00000001U +#define NV_DP_AUXLOGGER_REPLY_TYPE_RETRY 0x00000002U +#define NV_DP_AUXLOGGER_REPLY_TYPE_TIMEOUT 0x00000003U +#define NV_DP_AUXLOGGER_REPLY_TYPE_DEFER 0x00000004U +#define NV_DP_AUXLOGGER_REPLY_TYPE_DEFER_TO 0x00000005U +#define NV_DP_AUXLOGGER_REPLY_TYPE_ACK 0x00000006U +#define NV_DP_AUXLOGGER_REPLY_TYPE_ERROR 0x00000007U +#define NV_DP_AUXLOGGER_REPLY_TYPE_UNKNOWN 0x00000008U + +#define NV_DP_AUXLOGGER_EVENT_TYPE 9:8 +#define NV_DP_AUXLOGGER_EVENT_TYPE_AUX 0x00000000U +#define NV_DP_AUXLOGGER_EVENT_TYPE_HOT_PLUG 0x00000001U +#define NV_DP_AUXLOGGER_EVENT_TYPE_HOT_UNPLUG 0x00000002U +#define NV_DP_AUXLOGGER_EVENT_TYPE_IRQ 0x00000003U + +#define NV_DP_AUXLOGGER_AUXCTL_CMD 15:12 +#define NV_DP_AUXLOGGER_AUXCTL_CMD_INIT 0x00000000U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_I2CWR 0x00000000U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_I2CRD 0x00000001U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_I2CREQWSTAT 0x00000002U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_MOTWR 0x00000004U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_MOTRD 0x00000005U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_MOTREQWSTAT 0x00000006U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_AUXWR 0x00000008U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_AUXRD 0x00000009U + + +typedef struct DPAUXPACKET { + NvU32 auxEvents; + NvU32 auxRequestTimeStamp; + NvU32 auxMessageReqSize; + NvU32 auxMessageReplySize; + NvU32 auxOutPort; + NvU32 auxPortAddress; + NvU32 auxReplyTimeStamp; + NvU32 auxCount; + NvU8 auxPacket[DP_MAX_MSG_SIZE]; +} DPAUXPACKET; +typedef struct DPAUXPACKET *PDPAUXPACKET; + +#define NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA (0x731373U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS_MESSAGE_ID (0x73U) + +typedef struct NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS { + //In + NvU32 subDeviceInstance; + NvU32 dpAuxBufferReadSize; + + //Out + NvU32 dpNumMessagesRead; + DPAUXPACKET dpAuxBuffer[MAX_LOGS_PER_POLL]; +} NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS; + + + + +/* NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES + * + * This setup link rate table for target display to enable indexed link rate + * and export valid link rates back to client. Client may pass empty table to + * reset previous setting. + * + * subDeviceInstance + * client will give a subdevice to get right pGpu/pDisp for it + * displayId + * DisplayId of the display for which the client targets + * linkRateTbl + * Link rates in 200KHz as native granularity from eDP 1.4 + * linkBwTbl + * Link rates in 270MHz and valid for client to apply to + * linkBwCount + * Total valid link rates + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U + +#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID (0x77U) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS { + // In + NvU32 subDeviceInstance; + NvU32 displayId; + NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + + // Out + NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + NvU8 linkBwCount; +} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS; + + +/* + * NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES + * + * This command is used to not depend on supervisor interrupts for setting the + * stereo msa params. We will not cache the values and can toggle stereo using + * this ctrl call on demand. Note that this control call will only change stereo + * settings and will leave other settings as is. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * should be for DP only + * bEnableMSA + * To enable or disable MSA + * bStereoPhaseInverse + * To enable or disable Stereo Phase Inverse value + * featureMask + * Enable/Disable mask of individual MSA property. + * featureValues + * MSA property value to write + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_TIMEOUT + * + */ +#define NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES (0x731378U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS_MESSAGE_ID (0x78U) + +typedef struct NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bEnableMSA; + NvBool bStereoPhaseInverse; + NV0073_CTRL_DP_MSA_PROPERTIES_MASK featureMask; + NV0073_CTRL_DP_MSA_PROPERTIES_VALUES featureValues; +} NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DP_CONFIGURE_FEC + * + * This command is used to enable/disable FEC on DP Mainlink. + * FEC is a prerequisite to DSC. This should be called only + * after LT completes (including PostLT LQA) while enabling. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * displayId + * Can only be 1 and must be DP. + * + * bEnableFec + * To enable or disable FEC + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV0073_CTRL_CMD_DP_CONFIGURE_FEC (0x73137aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS_MESSAGE_ID (0x7AU) + +typedef struct NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bEnableFec; +} NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior + * cmd + * This parameter is an input to this command. + * Here are the current defined fields: + * NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER + * Set to specify what operation to run. + * NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER_UP + * Request to power up pad. + * NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER_DOWN + * Request to power down the pad. + * linkBw + * This parameter is used to pass in the link bandwidth required to run the + * power up sequence. Refer enum DP_LINK_BANDWIDTH for valid values. + * laneCount + * This parameter is used to pass the lanecount. + * sorIndex + * This parameter is used to pass the SOR index. + * sublinkIndex + * This parameter is used to pass the sublink index. Please refer + * enum DFPLINKINDEX for valid values + * priPadLinkIndex + * This parameter is used to pass the padlink index for primary link. + * Please refer enum DFPPADLINK for valid index values for Link A~F. + * secPadLinkIndex + * This parameter is used to pass the padlink index for secondary link. + * For Single SST pass in NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PADLINK_INDEX_INVALID + * bEnableSpread + * This parameter is boolean value used to indicate if spread is to be enabled or disabled. + */ + +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD (0x73137bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS_MESSAGE_ID (0x7BU) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS { + NvU32 subDeviceInstance; + NvU32 cmd; + NvU32 linkBw; + NvU32 laneCount; + NvU32 sorIndex; + NvU32 sublinkIndex; // sublink A/B + NvU32 priPadLinkIndex; // padlink A/B/C/D/E/F + NvU32 secPadLinkIndex; // padlink A/B/C/D/E/F for secondary link in DualSST case. + NvBool bEnableSpread; +} NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS; + +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER 0:0 +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER_UP (0x00000000U) +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER_DOWN (0x00000001U) + +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PADLINK_INDEX_INVALID (0x000000FFU) + +/* + * NV0073_CTRL_CMD_DP_AUXCH_CTRL + * + * This command can be used to perform the I2C Bulk transfer over + * DP Aux channel. This is the display port specific implementation + * for sending bulk data over the DpAux channel, by splitting up the + * data into pieces and retrying for pieces that aren't ACK'd. + * + * subDeviceInstance [IN] + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId [IN] + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * addr [IN] + * This parameter is an input to this command. The addr parameter follows + * Section 2.4 in DisplayPort spec and the client should refer to the valid + * address in DisplayPort spec. Only the first 20 bits are valid. + * bWrite [IN] + * This parameter specifies whether the command is a I2C write (NV_TRUE) or + * a I2C read (NV_FALSE). + * data [IN/OUT] + * In the case of a read transaction, this parameter returns the data from + * transaction request. In the case of a write transaction, the client + * should write to this buffer for the data to send. + * size [IN/OUT] + * Specifies how many data bytes to read/write depending on the + * transaction type. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DP_AUXCH_I2C_TRANSFER_CTRL (0x73137cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE 256U + +#define NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS_MESSAGE_ID (0x7CU) + +typedef struct NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 addr; + NvBool bWrite; + NvU8 data[NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE]; + NvU32 size; +} NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_ENABLE_VRR + * + * The command is used to enable VRR. + * + * subDeviceInstance [IN] + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior + * displayId [IN] + * This parameter is an input to this command, specifies the ID of the display + * for client targeted to. + * The display ID must a DP display. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * cmd [IN] + * This parameter is an input to this command. + * + * _STAGE: specifies the stage id to execute in the VRR enablement sequence. + * _MONITOR_ENABLE_BEGIN: Send command to the monitor to start monitor + * enablement procedure. + * _MONITOR_ENABLE_CHALLENGE: Send challenge to the monitor + * _MONITOR_ENABLE_CHECK: Read digest from the monitor, and verify + * if the result is valid. + * _DRIVER_ENABLE_BEGIN: Send command to the monitor to start driver + * enablement procedure. + * _DRIVER_ENABLE_CHALLENGE: Read challenge from the monitor and write back + * corresponding digest. + * _DRIVER_ENABLE_CHECK: Check if monitor enablement worked. + * _RESET_MONITOR: Set the FW state m/c to a known state. + * _INIT_PUBLIC_INFO: Send command to the monitor to prepare public info. + * _GET_PUBLIC_INFO: Read public info from the monitor. + * _STATUS_CHECK: Check if monitor is ready for next command. + * result [OUT] + * This is an output parameter to reflect the result of the operation. + */ +#define NV0073_CTRL_CMD_DP_ENABLE_VRR (0x73137dU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS_MESSAGE_ID (0x7DU) + +typedef struct NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 cmd; + NvU32 result; +} NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS; + +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0 +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U) + +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U) + +/* + * NV0073_CTRL_CMD_DP_GET_GENERIC_INFOFRAME + * + * This command is used to capture the display output packets for DP protocol. + * Common supported packets are Dynamic Range and mastering infoframe SDP for HDR, + * VSC SDP for colorimetry and pixel encoding info. + * + * displayID (in) + * This parameter specifies the displayID for the display resource to configure. + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the NV04_DISPLAY_COMMON + * parent device to which the operation should be directed. + * infoframeIndex (in) + * HW provides support to program 2 generic infoframes per frame for DP. + * This parameter indicates which infoframe packet is to be captured. + * Possible flags are as follows: + * NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_CAPTURE_MODE + * This flag indicates the INFOFRAME that needs to be read. + * Set to _INFOFRAME0 if RM should read GENERIC_INFOFRAME + * Set to _INFOFRAME1 if RM should read GENERIC_INFOFRAME1 + * packet (out) + * pPacket points to the memory for reading the infoframe packet. + * bTransmitControl (out) + * This gives the transmit mode of infoframes. + * If set, means infoframe will be sent as soon as possible and then on + * every frame during vblank. + * If cleared, means the infoframe will be sent once as soon as possible. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DP_GET_GENERIC_INFOFRAME (0x73137eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_GENERIC_INFOFRAME_MAX_PACKET_SIZE 36U + +#define NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS_MESSAGE_ID (0x7EU) + +typedef struct NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 infoframeIndex; + NvU8 packet[NV0073_CTRL_DP_GENERIC_INFOFRAME_MAX_PACKET_SIZE]; + NvBool bTransmitControl; +} NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS; + + +#define NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_CAPTURE_MODE 0:0 +#define NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_CAPTURE_MODE_INFOFRAME0 (0x0000000U) +#define NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_CAPTURE_MODE_INFOFRAME1 (0x0000001U) + + +/* + * NV0073_CTRL_CMD_DP_GET_MSA_ATTRIBUTES + * + * This command is used to capture the various data attributes sent in the MSA for DP protocol. + * Refer table 2-94 'MSA Data Fields' in DP1.4a spec document for MSA data field description. + * + * displayID (in) + * This parameter specifies the displayID for the display resource to configure. + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the NV04_DISPLAY_COMMON + * parent device to which the operation should be directed. + * mvid, nvid (out) + * Video timestamp used by DP sink for regenerating pixel clock. + * misc0, misc1 (out) + * Miscellaneous MSA attributes. + * hTotal, vTotal (out) + * Htotal measured in pixel count and vtotal measured in line count. + * hActiveStart, vActiveStart (out) + * Active start measured from start of leading edge of the sync pulse. + * hActiveWidth, vActiveWidth (out) + * Active video width and height. + * hSyncWidth, vSyncWidth (out) + * Width of sync pulse. + * hSyncPolarity, vSyncPolarity (out) + * Polarity of sync pulse. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DP_GET_MSA_ATTRIBUTES (0x73137fU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_MSA_MAX_DATA_SIZE 7U + +#define NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS_MESSAGE_ID (0x7FU) + +typedef struct NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 mvid; + NvU32 nvid; + NvU8 misc0; + NvU8 misc1; + NvU16 hTotal; + NvU16 vTotal; + NvU16 hActiveStart; + NvU16 vActiveStart; + NvU16 hActiveWidth; + NvU16 vActiveWidth; + NvU16 hSyncWidth; + NvU16 vSyncWidth; + NvBool hSyncPolarity; + NvBool vSyncPolarity; +} NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS; + +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_MVID 23:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_NVID 23:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_MISC0 7:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_MISC1 15:8 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HTOTAL 15:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VTOTAL 31:16 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HACTIVE_START 15:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VACTIVE_START 31:16 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HACTIVE_WIDTH 15:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VACTIVE_WIDTH 31:16 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HSYNC_WIDTH 14:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HSYNC_POLARITY 15:15 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VSYNC_WIDTH 30:16 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VSYNC_POLARITY 31:31 + +/* + * NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL + * + * This command is used to query OD capability and status as well as + * control OD functionality of eDP LCD panels. + * + * subDeviceInstance [in] + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId [in] + * This parameter specifies the ID of the DP display which owns + * the Main Link to be adjusted. The display ID must a DP display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * cmd [in] + * This parameter is an input to this command. The cmd parameter tells + * whether we have to get the value of a specific field or set the + * value in case of a writeable field. + * control [in] + * This parameter is input by the user. It is used by the user to decide the control + * value to be written to change the Sink OD mode. The command to write is + * the NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET command. + * bOdCapable [out] + * This parameter reflects the OD capability of the Sink which can be + * fetched by using the NV0073_CTRL_CMD_DP_AUXCH_OD_CAPABLE_QUERY command. + * bOdControlCapable [out] + * This parameter reflects the OD control capability of the Sink which can be + * fetched by using the NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_CAPABLE_QUERY command. + * bOdStatus [out] + * This parameter reflects the Sink OD status which can be + * fetched by using the NV0073_CTRL_CMD_DP_AUXCH_OD_STATUS_QUERY command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL (0x731380U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 control; + NvU8 cmd; + NvBool bOdCapable; + NvBool bOdControlCapable; + NvBool bOdStatus; +} NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS; + +/* _ctrl0073dp_h_ */ + +/* valid commands */ +#define NV0073_CTRL_CMD_DP_AUXCHQUERY_OD_CAPABLE 0x00000000 +#define NV0073_CTRL_CMD_DP_AUXCHQUERY_OD_CTL_CAPABLE 0x00000001 +#define NV0073_CTRL_CMD_DP_AUXCHQUERY_OD_STATUS 0x00000002 +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET 0x00000003 + +/* valid state values */ +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET_AUTONOMOUS 0x00000000 +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET_DISABLE_OD 0x00000002 +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET_ENABLE_OD 0x00000003 diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h new file mode 100644 index 0000000..1ac3e6f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073dpu.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h new file mode 100644 index 0000000..a2423a8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073event.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h new file mode 100644 index 0000000..2b4965d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073internal.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" +#include "ctrl/ctrl0073/ctrl0073system.h" + +#define NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE (0x730401U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_INTERNAL_INTERFACE_ID << 8) | NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE_FINN_PARAMS_MESSAGE_ID" */ + + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE_FINN_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE_FINN_PARAMS { + NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS params; +} NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE_FINN_PARAMS; + + +/* ctrl0073internal_h */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h new file mode 100644 index 0000000..7ac6f6e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073psr.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h new file mode 100644 index 0000000..2163c31 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h @@ -0,0 +1,1841 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073specific.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" + +#include "ctrl/ctrlxxxx.h" +/* NV04_DISPLAY_COMMON display-specific control commands and parameters */ + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_TYPE + * + * This command can be used to determine the associated display type for + * the specified displayId. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the display + * type is to be returned. Only one display may be indicated in this + * parameter. + * displayType + * This parameter returns the display type associated with the specified + * displayId parameter. Valid displayType values are: + * NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT + * NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP + * NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_TV + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_TYPE (0x730240U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS_MESSAGE_ID (0x40U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 displayType; +} NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS; + +/* valid display types */ +#define NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_UNKNOWN (0x00000000U) +#define NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT (0x00000001U) +#define NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP (0x00000002U) +#define NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_TV (0x00000003U) + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 + * + * This command can be used to request the EDID for the specified displayId. + * + * [in] subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the total + * number of subdevices within the parent device. This parameter should + * be set to zero for default behavior. + * [in] displayId + * This parameter specifies the display to read the EDID. The display ID + * must specify a display with a positive connect state as determined + * with the NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE command. Only one + * display may be indicated in this parameter. If a more than one + * display Id is used, the RM will return NV_ERR_INVALID_ARGUMENT. + * [out] bufferSize + * This parameter returns the number of bytes copied into edidBuffer after + * performing the requested EDID operations. + * [out] edidBuffer + * The array of EDIDs that RM will fill after the requested operations. If + * the size of the array is not large enough to hold the number of bytes to + * be copied, NV_ERR_INVALID_ARGUMENT will be returned. + * [in] flags + * This parameter defines the specific operations that will be performed + * in reading the EDID. + * Here are the current defined fields: + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE + * A client uses this field to indicate whether to return the cached + * copy of the EDID or to use DDC to read the EDID from the display. + * Possible values are: + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_NO + * The RM will use DDC to grab the EDID. + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_YES + * The RM will copy the last EDID found into the clients + * buffer. No DDC will be performed. + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE + * A client uses this field to indicate whether to read from + * the HW and return the EDID w/o any patching + * Possible values are: + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE_COOKED + * Use the _COPY_CACHE policy + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE_RAW + * Perform the read and return an unadulterated EDID. + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE + * A client uses this field to indicate whether to read EDID + * from SBIOS using ACPI sub function for display dynamic switching + * feature. This flag should only be set on internal display with + * dynamic switching feature enabled. + * Possible values are: + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE_ACPI + * RM reads the EDID from SBIOS and returns the raw EDID provided + * by SBIOS. + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE_DEFAULT + * EDID is read based on rest of the 'flags' that are passed to + * this function. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U + +#define NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID (0x45U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 bufferSize; + NvU32 flags; + NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES]; +} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS; + +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE 0:0 +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_NO 0x00000000U +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_YES 0x00000001U + +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE 1:1 +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE_COOKED 0x00000000U +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE_RAW 0x00000001U + +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE 3:2 +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE_DEFAULT 0x00000000U +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE_ACPI 0x00000001U + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2 + * + * This command can be used to set or remove a complete EDID for the + * specified displayId. Once the EDID is set, any requests + * to read the EDID or use DDC detection will always use a cached copy of + * the EDID. That is, the EDID becomes static until disabled by calling + * this same function with edidBuffer. Note, that DDC based + * detection will always pass for any displayId that has set an EDID. Also, + * this path will not store any value across reboots. If an EDID needs to + * remain set after a reboot, RM clients must call this function again. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the total + * number of subdevices within the parent device. This parameter should + * be set to zero for default behavior. + * displayId + * This parameter specifies the display to store or the EDID. Only one + * display may be indicated in this parameter. If more than one displayId + * is used, the RM will return NV_ERR_INVALID_ARGUMENT. + * If the displayId does not use DDC and hence would not have an EDID, + * then the RM could also return NV_ERR_INVALID_ARGUMENT. + * bufferSize + * This parameter specifies the size of the EDID buffer pointed to by + * pEdidBuffer. If the EDID write contains more bytes than bufferSize, + * the RM will extend the bufferSize of the EDID inside the RM to match. + * Note a bufferSize of 0 would mean no bytes will be copied, but set the + * current cached EDID as static. + * edidBuffer + * This parameter specifies the EDID buffer that the RM will copy into + * the RM buffer. If the EDID buffer is empty, the RM will remove any + * previous set EDID and allow further detection and EDID reads to use DDC. + * The RM will not check to see if the EDID is valid here or not. + * The client should validate the EDID if needed before calling this function. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2 (0x730246U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS_MESSAGE_ID (0x46U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 bufferSize; + NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES]; +} NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE + * + * This Control Cmd is for providing the Faking device(s) support from RM. + * This command serves as the entry point for all interaction of RM with + * user mode component of the any internal [test] tool. The Faking framework + * in RM will be activated only after the usermode app sends in a proper ENABLE + * cmd first. Any attempt to issue other cmds while the faking code has not + * been enabled will result in RM err _INVALID_DATA. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the total + * number of subdevices within the parent device. This parameter should + * be set to zero for default behavior. + * cmd + * This field will carry the command to be executed by the framework. + * This includes Enabling/Disabling the test framework and faking devices + * like CRT/DVI/TV. + * data + * This field is to carry the data required for executing the cmd. + * Except for Enable and Disable, the other faking device commands will + * require the device mask of the device to be faked/removed. + * tvType + * This field specifies a specific TV type while faking a TV. + * Possible values are: + * NV0073_FAKE_DEVICE_TV_NONE + * NV0073_FAKE_DEVICE_TV_SVIDEO + * NV0073_FAKE_DEVICE_TV_COMPOSITE + * NV0073_FAKE_DEVICE_TV_COMPONENT + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_DATA + * + */ + +#define NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE (0x730243U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS_MESSAGE_ID" */ + +/* valid fake device TV connector types */ +#define NV0073_FAKE_DEVICE_TV_NONE (0U) +#define NV0073_FAKE_DEVICE_TV_SVIDEO (1U) +#define NV0073_FAKE_DEVICE_TV_COMPOSITE (2U) +#define NV0073_FAKE_DEVICE_TV_COMPONENT (3U) + +#define NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS { + NvU32 subDeviceInstance; + NvU32 cmd; + NvU32 data; + NvU32 tvType; +} NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS; + +/* Faking Support commands */ +/* some random value to enable/disable test code */ +#define NV0073_FAKE_DEVICE_SUPPORT_ENABLE 0x11faU +#define NV0073_FAKE_DEVICE_SUPPORT_DISABLE 0x99ceU +#define NV0073_FAKE_DEVICE_SUPPORT_ATTACH_DEVICES 0x100U +#define NV0073_FAKE_DEVICE_SUPPORT_REMOVE_DEVICES 0x101U + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID + * + * This command returns the I2C portID for the specified display device. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * commPortId + * This parameter returns the I2C communication port ID of the + * display device indicated by the displayId parameter. + * ddcPortId + * This parameter returns the I2C DDC port ID of the display device + * indicated by the displayId parameter. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID (0x730211U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 commPortId; + NvU32 ddcPortId; +} NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS; + +#define NV0073_CTRL_SPECIFIC_I2C_PORT_NONE (0x0U) + + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA + * + * This command can be used to get display connector data. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * DDCPartners + * This parameter specifies an NV0073_DISPLAY_MASK value describing + * the set of displays that share the same DDC line as displayId. This + * parameter will always be returned even if we also return the + * NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT_NO flag. + * flags + * This parameter specifies optional flags to be used while retrieving + * the connector data for a given displayId. + * Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT + * This flag describes whether the connector data is present + * inside the firmware. + * count + * This parameter returns the number of connectors associated with + * the displayId argument. This value indicates the number of +* valid entries returned in the data parameter. + * data + * This parameter returns an array of structures containing the connector + * data associated with each connector for the given displayId argument. + * The count field specifies how many entries in this array are returned. + * Each entry in the array contains the following members: + * index + * This value is the index associated with the given connector. If + * two displayIds share the same index, then they share the same + * connector. + * type + * This value defines the type of connector associated with the + * displayId argument. + * location + * This value provides a possible means to determine the relative + * location of the connector in association to other connectors. + * For desktop boards, a value of zero defines the south most + * connector (the connector closest to the bus slot into which + * the board is inserted). + * platform + * This value defines the type of system with which to associate the + * location of each connector. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */ + +/* maximum number of connectors */ +#define NV0073_CTRL_MAX_CONNECTORS 4U + +#define NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 DDCPartners; + NvU32 count; + struct { + NvU32 index; + NvU32 type; + NvU32 location; + } data[NV0073_CTRL_MAX_CONNECTORS]; + NvU32 platform; +} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS; + +/* defines for the flags field */ +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT 0:0 +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT_NO 0x00000000U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT_YES 0x00000001U + +/* defines for the data[].type field */ +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VGA_15_PIN 0x00000000U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_COMPOSITE 0x00000010U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_SVIDEO 0x00000011U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_HDTV_COMPONENT 0x00000013U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_SCART 0x00000014U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_COMPOSITE_SCART_OVER_EIAJ4120_BLUE 0x00000016U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_HDTV_EIAJ4120 0x00000017U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_PC_POD_HDTV_YPRPB 0x00000018U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_PC_POD_SVIDEO 0x00000019U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_PC_POD_COMPOSITE 0x0000001AU +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_SVIDEO 0x00000020U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_COMPOSITE 0x00000021U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I 0x00000030U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_D 0x00000031U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_ADC 0x00000032U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_1 0x00000038U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_2 0x00000039U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_SPWG 0x00000040U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_OEM 0x00000041U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_EXT 0x00000046U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_INT 0x00000047U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_MINI_EXT 0x00000048U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_SERIALIZER 0x00000049U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_A 0x00000061U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_C_MINI 0x00000063U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_1 0x00000064U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_2 0x00000065U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VIRTUAL_WFD 0x00000070U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_USB_C 0x00000071U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DSI 0x00000072U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_STEREO_3PIN_DIN 0x00000073U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_UNKNOWN 0xFFFFFFFFU + +/* defines for the platform field */ +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_DEFAULT_ADD_IN_CARD 0x00000000U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_TWO_PLATE_ADD_IN_CARD 0x00000001U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_CONFIGURABLE 0x00000002U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_DESKTOP_FULL_DP 0x00000007U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MOBILE_ADD_IN_CARD 0x00000008U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MXM 0x00000009U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MOBILE_BACK 0x00000010U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MOBILE_BACK_LEFT 0x00000011U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MOBILE_BACK_DOCK 0x00000018U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_CRUSH_DEFAULT 0x00000020U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_UNKNOWN 0xFFFFFFFFU + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE + * + * This command is used to signal the resource manager that the upcoming mode + * shall be hdmi vs dvi. This is required since the resource manager + * does not read the display edid. The resource manager shall enable hdmi + * components such as turning on the audio engine for instance. This should + * be called prior to every modeset in which the displayId is capable of hdmi. + * displayId + * This parameter specifies the displayId of HDMI resource to configure. + * This comes as input to this command. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * enable + * This field specifies the legal values: + * NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_TRUE + * NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_FALSE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID (0x73U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS { + NvU8 subDeviceInstance; + NvU32 displayId; + NvU8 enable; +} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_FALSE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_TRUE (0x00000001U) + +/* + * NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI + * + * This command can be used to enable HDMI communication on the associated GPU. + * This should be called prior to every modeset in which the displayId is capable of HDMI. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * displayId + * This parameter specifies the displayId of HDMI resource to configure. + * This comes as input to this command. + * enable + * This field specifies the legal values: + * NV0073_CTRL_SPECIFIC_CTRL_HDMI_DISABLE + * NV0073_CTRL_SPECIFIC_CTRL_HDMI_ENABLE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI (0x730274U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS { + NvU8 subDeviceInstance; + NvU32 displayId; + NvBool bEnable; +} NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS; + +#define NV0073_CTRL_SPECIFIC_CTRL_HDMI_DISABLE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_CTRL_HDMI_ENABLE (0x00000001U) + + + +/* + * NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING + * + * This structure defines the mapping between the ACPI ID and the corresponding + * display ID of a display device + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * acpiId + * The ACPI ID of the display device + * displayId + * The corresponding display ID + * dodIndex + * The corresponding DOD index + */ +typedef struct NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING { + NvU32 subDeviceInstance; + NvU32 acpiId; + NvU32 displayId; + NvU32 dodIndex; +} NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING; + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_ACPI_ID_MAPPING + * + * This call will update the RM data structure which holds the + * ACPI ID to display ID mapping of the display devices + * + * The input parameter is an array of structures of type + * NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING + * + * If some of the array elements remain unused, the acpiId field of the + * structure must be set to 0x0000 + * + * The size of the array is given by + * NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES (defined below) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * +*/ +#define NV0073_CTRL_CMD_SPECIFIC_SET_ACPI_ID_MAPPING (0x730284U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES 16U + +#define NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS_MESSAGE_ID (0x84U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS { + NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING mapTable[NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES]; +} NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK + * + * This call will return all head mask. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * headMask + * headMask is the mask of all heads that are usable. For example, if + * head 0 and head 2 are present, headMask would be NVBIT(0)|NVBIT(2). This + * parameter returns to the client. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID (0x87U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS { + NvU32 subDeviceInstance; + NvU32 headMask; +} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET + * + * This command is used to program the display output packets. + * Currently it supports DP and HDMI. + * Common supported packets are AVI infoframes, Audio Infoframes, Gamma + * Metadata, Vendor Specific infoframes and General Control Packets (GCP). + * + GCP AVMute Enable should be performed before the start of the modeset. + * + GCP AVMute Disable should be performed after the end of the modeset. + * GCP AVMute should contain HDR + 7 bytes. + * + AVI infoframes should occur after the modeset but before a GCP AVMute + * Disable. AVI infoframe should contain HDR + 14 bytes + * + Audio infoframes should occur after the modeset but before a GCP AVMute + * Enable. + * Audio infoframe should contain HDR + 11 bytes. + * + Gamma Metadata packets should contain HDR + 28 bytes. + * + Vendor Specific packets are variable length. + * By HDMI 1.4 June 5 2009 spec, payload can be 5 bytes, 6 bytes, 7 bytes or + * 16 bytes, depending on the packets spec. + * Unused data bytes should be zero-ed out. + * + * displayID + * This parameter specifies the displayID for the display resource to + * configure. + * This comes as input to this command. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * transmitControl + * This parameter controls how the packet is to be sent by setting the + * control bits. + * Possible flags are as follows: + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE + * Set to _ENABLE to start sending the packet at next frame, set to + * _DISABLE to stop sending. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME + * Set to _ENABLE to send the packet at other frame, set to _DISABLE to + * send at every frame. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME + * Set to _ENABLE to send once next frame, set to _DISABLE to send at + * every frame. + * Note: A setting to set both _OTHER_FRAME and _SINGLE_FRAME is invalid + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK + * Set to _ENABLE to send the packet once on next HBLANK, set to + * _DISABLE to send on VBLANK. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE + * Set to _ENABLE to send the info frame packet as soon as possible. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT + * Set to _SW_CONTROLLED to set HDMI_Video_Format field and 3D_Structure field + * from NV_PDISP_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 and PB5, if it is set to _HW_CONTROLLED + * then HW will get them based on the state of the setHdmiCtrl method. + * Btw this applies only for stereo ovverides. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY + * Set to TRUE to send Vendor specific info frame used for 3D stereo LR sync. + * Set PACKET_TYPE=pktType_VendorSpecInfoFrame along with this flag. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING + * Set to TRUE to send Vendor specific info frame used for Self Refresh panels + * Set PACKET_TYPE=pktType_VendorSpecInfoFrame along with this flag. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE + * HW provides support to program 2 generic infoframes per frame for DP with GP10X+. + * This flag indicates the INFOFRAME that needs to be programmed. + * Set to _INFOFRAME0 if RM should program GENERIC_INFOFRAME + * Set to _INFOFRAME1 if RM should program GENERIC_INFOFRAME1 + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE + * This option is reserved for backward compatibility with + * NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_PACKET and + * NV0073_CTRL_CMD_DP_SET_PACKET. + * It is temporal and will be deprecated soon. + * packetSize + * packet size of packets in pPacket to send, including header and payload. + * targetHead + * Specifies the target head number for which SDP needs to be updated. + * bUsePsrHeadforSdp + * Indicates use targetHead field for setting SDP or infoframe packet instead + * of deriving the active head from displayID. + * pPacket + * pPacket points to the packets to send. + * For HDMI 1.1, the maximum allowed bytes is 31. + * The packet array includes the 3 bytes of header + data depending on + * the type of packet. For an infoframe, the header bytes refer to type, + * version and length respectively. This comes as input to this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 transmitControl; + NvU32 packetSize; + NvU32 targetHead; + NvBool bUsePsrHeadforSdp; + NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE]; +} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U) + + +/* + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS + * + * This command is used to enable/disable sending of display output packets. + * Currently it supports HDMI only. + * Unused data bytes should be zero-ed out. + * + * displayID + * This parameter specifies the displayID for the display output resource to + * configure. + * This comes as input to this command. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * type + * The client shall specify the type of display output packet. For HDMI, set + * this according to HDMI specification 1.4. + * This comes as input to this command. + * transmitControl + * This parameter controls how the packet is to be sent by setting the control + * bits. + * Possible flags are as follows: + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ENABLE + * Set to _ENABLE to start sending the packet at next frame, set to + * _DISABLE to stop sending. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_OTHER_FRAME + * Set to _ENABLE to send the packet at other frame, set to _DISABLE to + * send at every frame. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SINGLE_FRAME + * Set to _ENABLE to send once next frame, set to _DISABLE to send at + * every frame. + * Note: A setting to set both _OTHER_FRAME and _SINGLE_FRAME is invalid + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ON_HBLANK + * Set to _ENABLE to send the packet once on next HBLANK, set to _DISABLE + * to send on VBLANK. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_VIDEO_FMT + * Set to _SW_CONTROLLED to set HDMI_Video_Format field and 3D_Structure field + * from NV_PDISP_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 and PB5, if it is set to _HW_CONTROLLED + * then HW will get them based on the state of the setHdmiCtrl method. + * Btw this applies only for stereo ovverides. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_STEREO_POLARITY + * Set to TRUE to enable Vendor specific info frame used for 3D stereo LR sync + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING + * Set to TRUE to enable Vendor specific info frame used for Self Refresh panels + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE + * This option is reserved for backward compatibility with + * NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_PACKET and + * NV0073_CTRL_CMD_DP_SET_PACKET. + * It is temporal and will be deprecated soon. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET_CTRL (0x730289U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 type; + NvU32 transmitControl; +} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ENABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ENABLE_NO NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ENABLE_YES NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_OTHER_FRAME NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SINGLE_FRAME NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ON_HBLANK NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ON_HBLANK_DISABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ON_HBLANK_ENABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_VIDEO_FMT NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_STEREO_POLARITY NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_PCLK_LIMIT + * + * This command returns the maximum supported pixel clock rate that is + * supported by the specified display device. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * pclkLimit + * This parameter returns the min of orPclkLimit and vbPclkLimit in KHz. + * It may be used for SLI configs that use a video bridge. For non-SLI + * configs and bridgeless SLI configs, the client should use orPclkLimit instead. + * orPclkLimit + * This parameter returns the maximum pixel clock frequency of OR in KHz. + * vbPclkLimit + * This parameter returns the maximum pixel clock frequency of the + * video bridge (SLI) in KHz (or zero if there is no video bridge). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_PCLK_LIMIT (0x73028aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS_MESSAGE_ID (0x8AU) + +typedef struct NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 pclkLimit; + NvU32 orPclkLimit; + NvU32 vbPclkLimit; +} NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO + * + * This command returns output resource information for the specified display + * device. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * type + * This parameter returns the output resource type. Legal values for + * this parameter include: + * NV0073_CTRL_SPECIFIC_OR_TYPE_DAC + * The output resource is a DAC. + * NV0073_CTRL_SPECIFIC_OR_TYPE_SOR + * The output resource is a serial output resource. + * NV0073_CTRL_SPECIFIC_OR_TYPE_DSI + * The output resource is a Display Serial Interface output resource. + * NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR + * The output resource is a parallel input output resource. + * index + * This parameter returns the type-specific index of the output + * resource associated with the specified displayId. + * protocol + * This parameter returns the type-specific protocol used by the + * output resource. Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN + * ditherType + * This parameter returns the dither type for the output resource. + * Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS + * NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS + * NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_10_BITS + * NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF + * ditherAlgo + * This parameter returns the dithering algorithm used by the output + * resource. Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_DYNAMIC_ERR_ACC + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_STATIC_ERR_ACC + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_DYNAMIC_2X2 + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_STATIC_2X2 + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_TEMPORAL + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_UNKNOWN + * location + * This parameter returns the physical location of the output resource. + * Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP + * NV0073_CTRL_SPECIFIC_OR_LOCATION_BOARD + * rootPortId + * This parameter specifies the Root-Port ID for the given display. + * dcbIndex + * This parameter returns the DCB index of the display device. + * vbiosAddress + * This parameter is the VBIOS IP address which will have valid value + * only if displayId is allocated by VBIOS. + * bIsLitByVbios + * This parameter specifies that whether displayID allocation was + * requested by VBIOS or not. + * bIsDispDynamic + * Returns NV_TRUE if DisplayID is allocated Dynamically else NV_FALSE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID (0x8BU) + +typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 index; + NvU32 type; + NvU32 protocol; + NvU32 ditherType; + NvU32 ditherAlgo; + NvU32 location; + NvU32 rootPortId; + NvU32 dcbIndex; + NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8); + NvBool bIsLitByVbios; + NvBool bIsDispDynamic; +} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS; + +/* valid type values */ +#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U) + + +#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U) + +/* valid DAC protocol values */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U) + + + +/* valid SOR protocol values */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U) + +/* valid DSI protocol values */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U) + +/* valid PIOR protocol values */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U) + +/* valid UNKNOWN protocol value */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU) + +/* valid ditherType values */ +#define NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_10_BITS (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF (0x00000003U) + +/* valid ditherAlgo values */ +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_ERR_ACC (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_ERR_ACC (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2 (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2 (0x00000003U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL (0x00000004U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN (0xFFFFFFFFU) + +/* valid location values */ +#define NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_LOCATION_BOARD (0x00000001U) + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS + * NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS + * + * These commands retrieve and set the user backlight brightness for + * the specified display. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId + * Display for which brightness is to be retrieved or set. + * brightness + * The backlight brightness in the range [0,100], inclusive. This + * is an input for SET_BACKLIGHT_BRIGHTNESS, and an output for + * GET_BACKLIGHT_BRIGHTNESS. + * + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | 0x91" */ + +#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | 0x92" */ + +#define NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MIN_VALUE 0U +#define NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MAX_VALUE 100U + +typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 brightness; +} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS + * + * This command is used to inform RM about the scrambling, clock mode, FRL and + * DSC caps of the HDMI sink device. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed.. + * displayID + * This parameter specifies the displayID for the display output resource to + * configure. + * caps + * This parameter specifies the sink caps. + * GT_340MHZ_CLOCK_SUPPORTED refers to whether sink supports TMDS clock (sorClk) rate greater than 340 MHz + * LTE_340MHZ_SCRAMBLING_SUPPORTED refers to whether scrambling is supported for clock rate at or below 340 MHz + * SCDC_SUPPORTED refers to whether SCDC access is supported on sink + * MAX_FRL_RATE_SUPPORTED refers to the maximum HDMI 2.1 FRL rate supported + * DSC_12_SUPPORTED refers to whether VESA DSC v1.2a is supported + * DSC_12_MAX_FRL_RATE_SUPPORTED refers to the maximum HDMI 2.1 FRL rate supported when VESA DSC v1.2a is supported + * + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID (0x93U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 caps; +} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U) + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_MONITOR_POWER + * + * This command sets monitor power on/off. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId + * This parameter specifies the displayID for the display output resource to + * configure. + * powerState + * This parameter should be one of the valid + * NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_* values. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_MONITOR_POWER (0x730295U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS_MESSAGE_ID (0x95U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 powerState; +} NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_OFF (0x00000000U) +#define NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_ON (0x00000001U) + + + +/* +* NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG +* +* This command is used to perform HDMI FRL link training and enable FRL mode for +* the specified displayId. The link configuration will be returned after link +* training success. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. +* displayID +* This parameter specifies the displayID for the display output resource to +* configure. +* data +* This parameter is an input and output to this command. +* Here are the current defined fields: +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE +* This field specifies the desired setting for lane count. A client may +* choose any lane count as long as it does not exceed the capability of +* HDMI FRL sink as indicated in the sink capability field. +* The valid values for this field are: +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE +* For 0 lane configuration, link training is shut down (disable FRL). +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_3G +* For FRL 3-lane configuration and 3 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_6G +* For FRL 3-lane configuration and 6 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_6G +* For FRL 4-lane configuration and 6 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_8G +* For FRL 4-lane configuration and 8 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_10G +* For FRL 4-lane configuration and 10 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_12G +* For FRL 4-lane configuration and 12 Gbps bandwidth per lane. +* On return, the link bandwidth setting is returned which may be +* different from the requested input setting. +* bFakeLt +* This flag as input to this command. +* It indicates the FRL link training is a fake link training or not. +* TRUE if the FRL link training is fake and no real sink device attached. +* bLtSkipped +* The flag returned indicating whether link training is skipped or not. +* TRUE if link training is skipped due to the link config is not changed. +* +* Possible status values returned include: +* NV_OK - +* Affter finishing link tranning, NV_OK status will be returned along with +* the updated link congiration. In case of link training failure, FRL_RATE_NONE +* will be returned with NV_OK. +* NV_ERR_NOT_SUPPORTED - +* If the GPU/sink is not capable for HDMI FRL, NV_ERR_NOT_SUPPORTED status +* will be returned. +* NV_ERR_INVALID_ARGUMENT +* If any argument is valid for this control call, NV_ERR_INVALID_ARGUMENT +* status will be returned. +*/ +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG (0x73029aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS_MESSAGE_ID (0x9AU) + +typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 data; + NvBool bFakeLt; + NvBool bLtSkipped; +} NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS; + +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE 2:0 +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE (0x00000000U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_3G (0x00000001U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_6G (0x00000002U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_6G (0x00000003U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_8G (0x00000004U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_10G (0x00000005U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_12G (0x00000006U) + + + +#define NV0073_CTRL_SPECIFIC_MAX_CRC_REGIONS 9U + +#define NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS_MESSAGE_ID (0xA0U) + +typedef struct NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 regionCrcs[NV0073_CTRL_SPECIFIC_MAX_CRC_REGIONS]; + NvU16 reqRegionCrcMask; +} NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS + * + * This command is used to capture the active viewport region CRCs + * + * [in]subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * [in]displayId + * This parameter specifies the displayId of panel, for which region CRC to be captured +.* + * [out]regionCrcs + * This field holds the region CRC values to be returned after successful completion of the control command. + * + * [in]reqRegionCrcMask + * This parameter specifies BIT mask value of requested CRC regions. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS (0x7302a0U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS_MESSAGE_ID" */ + +/* +* NV0073_CTRL_CMD_SPECIFIC_APPLY_EDID_OVERRIDE_V2 +* +* Apply EDID override on specific OD. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. +* displayId (in) +* ID of panel on which the operation is to be performed. +* bufferSize (in) +* Size of the EDID buffer. +* edidBuffer (in/out) +* The buffer which stores the EDID before and after override. +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_PARAMETER +*/ +#define NV0073_CTRL_CMD_SPECIFIC_APPLY_EDID_OVERRIDE_V2 (0x7302a1U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS_MESSAGE_ID (0xA1U) + +typedef struct NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 bufferSize; + NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES]; +} NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS + * + * This command is used to get the HDMI FRL caps of GPU side. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * caps + * This parameter specifies the GPU caps. + * MAX_FRL_RATE_SUPPORTED refers to the maximum HDMI 2.1 FRL link rate supported + * + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS (0x7302a2U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS_MESSAGE_ID (0xA2U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 caps; +} NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED 2:0 +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_3LANES_3G (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_3LANES_6G (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_4LANES_6G (0x00000003U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_4LANES_8G (0x00000004U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_4LANES_10G (0x00000005U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_4LANES_12G (0x00000006U) + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE + * + * Notifies the system that a display change is about to begin/end. + * Also performs the necessary synchronizations for the same. + * + * The command takes a NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS structure as an + * argument with appropriate subDeviceInstance. + * + * [in]subDeviceInstance + * The sub-device instance + * [in]newDevices + * Bitmask of devices that are planned on being enabled with the + * pending device change. See NV_CFGEX_GET_DEVICES_CONFIGURATION for bit defs. + * [in]properties + * Bitmask of display attributes for new configuration (none used at the moment). + * [in]enable + * Parameter to decide between display change start and end. Can take values + * NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_START or NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_END. + * Possible return values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS_MESSAGE_ID (0xA4U) + +typedef struct NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS { + + NvU32 subDeviceInstance; + NvU32 newDevices; + NvU32 properties; + NvU32 enable; +} NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS; + +#define NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_END (0x00000000U) +#define NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_START (0x00000001U) + +#define NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PROPERTIES_SPANNING (0x00000001U) + +#define NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE (0x7302a4U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS_MESSAGE_ID" */ + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA + * + * This command is used to get the HDMI sink status/caps via Status and Control + * Data Channel (SCDC). + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId + * This parameter specifies the displayId of HDMI sink. + * offset + * This parameter specifies the SCDC offset which the read operation + * should be used. + * data + * This field specifies the return data from sink for reading the specified + * SCDC offset. + * + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA (0x7302a6U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS_MESSAGE_ID (0xA6U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 offset; + NvU8 data; +} NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET 7:0 +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_SINK_VERSION (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_SOURCE_VERSION (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_UPDATE_FLAGS_0 (0x00000010U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_TMDS_CONFIGURATION (0x00000020U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_SCRAMBLER_STATUS (0x00000021U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CONFIGURATION_0 (0x00000030U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CONFIGURATION_1 (0x00000031U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_SOURCE_TEST_CONFIGURATION (0x00000035U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_STATUS_FLAGS_0 (0x00000040U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_STATUS_FLAGS_1 (0x00000041U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_STATUS_FLAGS_2 (0x00000042U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_0 (0x00000050U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_1 (0x00000051U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_2 (0x00000052U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_3 (0x00000053U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_4 (0x00000054U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_5 (0x00000055U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_6 (0x00000056U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_7 (0x00000057U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_8 (0x00000058U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_RSED_0 (0x00000059U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_RSED_1 (0x0000005AU) + +/* + * NV0073_CTRL_CMD_SPECIFIC_IS_DIRECTMODE_DISPLAY + * + * This command is used to query whether the specified monitor should be used + * with directmode. + * + * [in]manufacturerID + * This parameter specifies the 16-bit EDID Manufacturer ID. + * [in]productID + * This parameter specifies the 16-bit EDID Product ID. + * [out]bIsDirectmode; + * This indicates whether the monitor should be used with directmode. + * Possible return values: + * NV_OK + */ + +#define NV0073_CTRL_CMD_SPECIFIC_IS_DIRECTMODE_DISPLAY (0x7302a7U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS_MESSAGE_ID (0xA7U) + +typedef struct NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS { + NvU16 manufacturerID; + NvU16 productID; + NvBool bIsDirectmode; +} NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION + * + * This command is used to get the HDMI FRL capacity computation result. + * + * [in] cmd + * This parameter specifies the command for the HDMI FRL capacity computation. + * [in] input + * This parameter specifies the input data for the HDMI FRL capacity + * computation. + * [out] result + * This indicates the computation result of HDMI FRL capacity computation. + * [in/out] preCalc + * This indicates the pre-caculation result of HDMI FRL capacity computation. + * [in/out] dsc + * This indicates the DSC parameters of HDMI FRL capacity computation. + * Possible return values: + * NV_OK + */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION (0x7302a8U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS { + NvU32 numLanes; + NvU32 frlBitRateGbps; + NvU32 pclk10KHz; + NvU32 hTotal; + NvU32 hActive; + NvU32 bpc; + NvU32 pixelPacking; + NvU32 audioType; + NvU32 numAudioChannels; + NvU32 audioFreqKHz; + + struct { + NvU32 bppTargetx16; + NvU32 hSlices; + NvU32 sliceWidth; + NvU32 dscTotalChunkKBytes; + } compressionInfo; +} NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS; + +typedef struct NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT { + NvU32 frlRate; + NvU32 bppTargetx16; + NvBool engageCompression; + NvBool isAudioSupported; + NvBool dataFlowDisparityReqMet; + NvBool dataFlowMeteringReqMet; + NvBool isVideoTransportSupported; + NvU32 triBytesBorrowed; + NvU32 hcActiveBytes; + NvU32 hcActiveTriBytes; + NvU32 hcBlankTriBytes; + NvU32 tBlankToTTotalX1k; +} NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT; + +typedef struct NV0073_CTRL_FRL_PRE_CALC_CONFIG { + NvU32 vic; + NvU32 packing; + NvU32 bpc; + NvU32 frlRate; + NvU32 bppX16; + NvBool bHasPreCalcFRLData; +} NV0073_CTRL_FRL_PRE_CALC_CONFIG; + +typedef struct NV0073_CTRL_IS_FRL_DSC_POSSIBLE_PARAMS { + NvU32 maxSliceCount; + NvU32 maxSliceWidth; + NvBool bIsDSCPossible; +} NV0073_CTRL_IS_FRL_DSC_POSSIBLE_PARAMS; + +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS_MESSAGE_ID (0xA8U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS { + NvU8 cmd; + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS input; + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT result; + NV0073_CTRL_FRL_PRE_CALC_CONFIG preCalc; + NV0073_CTRL_IS_FRL_DSC_POSSIBLE_PARAMS dsc; +} NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS; + +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_NULL (0x00000000U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_UNCOMPRESSED_VIDEO (0x00000001U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_COMPRESSED_VIDEO (0x00000002U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_HAS_PRECAL_FRL_DATA (0x00000003U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_GET_PRECAL_UNCOMPRESSED_FRL_CONFIG (0x00000004U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_GET_PRECAL_COMPRESSED_FRL_CONFIG (0x00000005U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_IS_FRL_DSC_POSSIBLE (0x00000006U) + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_SHARED_GENERIC_PACKET + * + * This command is used to program the display output packets. + * This generic packets can be used for both HDMI and DP. + * HW has added 6 new generic packets for each head because some usecases have + * requirement to send infoframe in particular location (vsync, vblank, loadV). + * + * Note: 1. Client first needs to reserve or acquire a free infoframe index + * using NV0073_CTRL_CMD_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET. + * 2. Client needs to update the SDP index for head through control call + * NV0073_CTRL_CMD_SPECIFIC_SET_SHARED_GENERIC_PACKET + * 3. Client needs to Release the infoframe index using control call + * NV0073_CTRL_CMD_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET + * + * [in]subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * [in]transmitControl + * This parameter controls how the packet is to be sent by setting the + * control bits. + * Possible flags are as follows: + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_ENABLE + * Setting this field to _YES will enable this generic infoframe, + * Setting this field to _NO will disable this generic infoframe. + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_SINGLE + * Set to _YES will cause new infoframe to be transmitted exactly once. + * Set to _NO will cause new infoframe to be transmitted every frame. + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC + * SDP can be sent in 3 different locations: + * VBLANK - new infoframe will be sent at Vblank. + * VSYNC - new infoframe will be sent at Vsync. + * LOADV - new infoframe will be triggered by LOADV, and sent at Vsync + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE + * _ENABLE: override DB1 bit1 with existence of loadv (for Panel Self Refresh) + * _DISABLE: do not override shared generic infoframe subpacker DB1 bit1. + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE + * _ENABLE: override DB1 bit3 with existence of loadv (for Panel Replay) + * _DISABLE: do not override shared generic infoframe subpacker DB1 bit3. + * [in]packetSize + * size of packets in Packet array to send, including header and payload. + * [in]infoframeIndex + * Specifies the target head number for which SDP needs to be updated. + * [in]infoframeIndex + * Specifies the index of infoframe. + * [in]packet + * pPacket points to the packets to send. + * For HDMI 1.1, the maximum allowed bytes is 31. + * The packet array includes the 3 bytes of header + data depending on + * the type of packet. For an infoframe, the header bytes refer to type, + * version and length respectively. This comes as input to this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SPECIFIC_SET_SHARED_GENERIC_PACKET (0x7302a9) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID (0xA9U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 transmitControl; + NvU32 packetSize; + NvU32 targetHeadIndex; + NvU32 infoframeIndex; + NvU8 packet[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE]; +} NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_ENABLE 0:0 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_ENABLE_NO (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_ENABLE_YES (0x0000001) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_SINGLE 1:1 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_SINGLE_NO (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_SINGLE_YES (0x0000001) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC 5:2 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC_VBLANK (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC_VSYNC (0x0000001) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC_LOADV (0x0000002) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_STATE_OVERRIDE 6:6 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_STATE_OVERRIDE_DISABLE (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_STATE_OVERRIDE_ENABLE (0x0000001) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE 7:7 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE_DISABLE (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE_ENABLE (0x0000001) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE 8:8 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE_DISABLE (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE_ENABLE (0x0000001) + +/* + * NV0073_CTRL_CMD_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET + * + * This command is used to reserve the infoframe for head and RM would assign + * free infoframe index and return the index of infoframe. Later client needs + * to call control call NV0073_CTRL_CMD_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET + * to release the index. + * + * [in]subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * [in]targetHeadIndex + * target Head for which SDP needs to be sent + * [out]infoframeIndex + * return Infoframe Index for head. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFIENT_RESOURCES + */ + +#define NV0073_CTRL_CMD_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET (0x7302aa) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID (0xAAU) + +typedef struct NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 targetHeadIndex; + NvU32 infoframeIndex; +} NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET + * + * This command is used to release the infoframe index which was acquired by + * client. + * + * [in]subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * [in]targetHeadIndex + * Specifies the target head number for which SDP needs to be updated. + * [in]infoframeIndex + * Infoframe index for the target head + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET (0x7302ab) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID (0xABU) + +typedef struct NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 targetHeadIndex; + NvU32 infoframeIndex; +} NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_DISP_I2C_READ_WRITE + * + * This command is used to do I2C R/W to slave on display i2c instance. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * i2cPort + * This parameter specifies the I2C CCB port ID. + * i2cSlaveAddress + * This parameter specifies the I2C slave address. + * readWriteFlag + * This parameter specifies whether its read/write operation. + * readWriteLen + * This parameter specifies the length of the read/write buffer + * readBuffer + * This parameter reads the data from slave address and copies to this + * buffer + * writeBuffer + * This parameter specifies this buffer data that would be written to + * slave address + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SPECIFIC_DISP_I2C_READ_WRITE (0x7302acU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_BUF_LEN 128U + +#define NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS_MESSAGE_ID (0xACU) + +typedef struct NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS { + NvU32 subDeviceInstance; + NvU32 i2cPort; + NvU32 i2cSlaveAddress; + NvU32 readWriteFlag; + NvU32 readWriteLen; + NvU8 readBuffer[NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_BUF_LEN]; + NvU8 writeBuffer[NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_BUF_LEN]; +} NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS; + +#define NV0073_CTRL_SPECIFIC_DISP_I2C_READ_MODE (0x00000001) +#define NV0073_CTRL_SPECIFIC_DISP_I2C_WRITE_MODE (0x00000000) + +/* _ctrl0073specific_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h new file mode 100644 index 0000000..9a585aa --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h @@ -0,0 +1,166 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073stereo.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" + + + +/* + * NV0073_CTRL_CMD_STEREO_DONGLE_SET_TIMINGS + * + * Sets new video mode timings + * E.g. from display driver on mode set + * + * Parameters: + * [IN] subDeviceInstance - This parameter specifies the subdevice instance + * within the NV04_DISPLAY_COMMON parent device to which the operation + * should be directed. This parameter must specify a value between + * zero and the total number of subdevices within the parent device. + * This parameter should be set to zero for default behavior. + * [IN] head - head to be passed to stereoDongleControl + * [IN] timings - new timings to be set + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED - stereo is not initialized on the GPU + */ +#define NV0073_CTRL_CMD_STEREO_DONGLE_SET_TIMINGS (0x731703U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_STEREO_INTERFACE_ID << 8) | NV0073_CTRL_STEREO_DONGLE_SET_TIMINGS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_STEREO_VIDEO_MODE_TIMINGS { + NvU32 PixelClock; + NvU16 TotalWidth; + NvU16 VisibleImageWidth; + NvU16 HorizontalBlankStart; + NvU16 HorizontalBlankWidth; + NvU16 HorizontalSyncStart; + NvU16 HorizontalSyncWidth; + NvU16 TotalHeight; + NvU16 VisibleImageHeight; + NvU16 VerticalBlankStart; + NvU16 VerticalBlankHeight; + NvU16 VerticalSyncStart; + NvU16 VerticalSyncHeight; + NvU16 InterlacedMode; + NvU16 DoubleScanMode; + + NvU16 MonitorVendorId; + NvU16 MonitorProductId; +} NV0073_CTRL_STEREO_VIDEO_MODE_TIMINGS; + +#define NV0073_CTRL_STEREO_DONGLE_SET_TIMINGS_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0073_CTRL_STEREO_DONGLE_SET_TIMINGS_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NV0073_CTRL_STEREO_VIDEO_MODE_TIMINGS timings; +} NV0073_CTRL_STEREO_DONGLE_SET_TIMINGS_PARAMS; + +/* + * NV0073_CTRL_CMD_STEREO_DONGLE_ACTIVATE + * + * stereoDongleActivate wrapper / NV_STEREO_DONGLE_ACTIVATE_DATA_ACTIVE_YES + * Updates sbios of 3D stereo state active + * + * Parameters: + * [IN] subDeviceInstance - This parameter specifies the subdevice instance + * within the NV04_DISPLAY_COMMON parent device to which the operation + * should be directed. This parameter must specify a value between + * zero and the total number of subdevices within the parent device. + * This parameter should be set to zero for default behavior. + * [IN] head - head to be passed to stereoDongleActivate + * [IN] bSDA - enable stereo on DDC SDA + * [IN] bWorkStation - is workstation stereo? + * [IN] bDLP - is checkerboard DLP Stereo? + * [IN] IRPower - IR power value + * [IN] flywheel - FlyWheel value + * [IN] bRegIgnore - use reg? + * [IN] bI2cEmitter - Sets NV_STEREO_DONGLE_ACTVATE_DATA_I2C_EMITTER_YES and pStereo->bAegisDT + * [IN] bForcedSupported - Sets NV_STEREO_DONGLE_FORCED_SUPPORTED_YES and pStereo->GPIOControlledDongle + * [IN] bInfoFrame - Aegis DT with DP InfoFrame + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT - if (head > OBJ_MAX_HEADS) + * NV_ERR_NOT_SUPPORTED - stereo is not initialized on the GPU + */ +#define NV0073_CTRL_CMD_STEREO_DONGLE_ACTIVATE (0x731704U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_STEREO_INTERFACE_ID << 8) | NV0073_CTRL_STEREO_DONGLE_ACTIVATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_STEREO_DONGLE_ACTIVATE_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0073_CTRL_STEREO_DONGLE_ACTIVATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvBool bSDA; + NvBool bWorkStation; + NvBool bDLP; + NvU8 IRPower; + NvU8 flywheel; + NvBool bRegIgnore; + NvBool bI2cEmitter; + NvBool bForcedSupported; + NvBool bInfoFrame; +} NV0073_CTRL_STEREO_DONGLE_ACTIVATE_PARAMS; + +/* + * NV0073_CTRL_CMD_STEREO_DONGLE_DEACTIVATE + * + * stereoDongleActivate wrapper / NV_STEREO_DONGLE_ACTIVATE_DATA_ACTIVE_NO + * + * If active count<=0 then no 3D app is running which indicates + * that we have really deactivated the stereo, updates sbios of 3D stereo state NOT ACTIVE. + * + * Parameters: + * [IN] subDeviceInstance - This parameter specifies the subdevice instance + * within the NV04_DISPLAY_COMMON parent device to which the operation + * should be directed. This parameter must specify a value between + * zero and the total number of subdevices within the parent device. + * This parameter should be set to zero for default behavior. + * [IN] head - head to be passed to stereoDongleActivate + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT - if (head > OBJ_MAX_HEADS) + * NV_ERR_NOT_SUPPORTED - stereo is not initialized on the GPU + */ +#define NV0073_CTRL_CMD_STEREO_DONGLE_DEACTIVATE (0x731705U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_STEREO_INTERFACE_ID << 8) | NV0073_CTRL_STEREO_DONGLE_DEACTIVATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_STEREO_DONGLE_DEACTIVATE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0073_CTRL_STEREO_DONGLE_DEACTIVATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; +} NV0073_CTRL_STEREO_DONGLE_DEACTIVATE_PARAMS; + + + +/* _ctrl0073stereo_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h new file mode 100644 index 0000000..08a06ae --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073svp.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h new file mode 100644 index 0000000..5a54df7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h @@ -0,0 +1,1072 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073system.finn +// + +#include "ctrl/ctrl0073/ctrl0073base.h" + +/* NV04_DISPLAY_COMMON system-level control commands and parameters */ + +/* extract cap bit setting from tbl */ +#define NV0073_CTRL_SYSTEM_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* Caps format is byte_index:bit_mask. + * Important: keep the number of bytes needed for these fields in sync with + * NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE + */ +#define NV0073_CTRL_SYSTEM_CAPS_AA_FOS_GAMMA_COMP_SUPPORTED 0:0x01 +#define NV0073_CTRL_SYSTEM_CAPS_TV_LOWRES_BUG_85919 0:0x02 +#define NV0073_CTRL_SYSTEM_CAPS_DFP_GPU_SCALING_BUG_154102 0:0x04 +#define NV0073_CTRL_SYSTEM_CAPS_SLI_INTERLACED_MODE_BUG_235218 0:0x08 // Deprecated +#define NV0073_CTRL_SYSTEM_CAPS_STEREO_DIN_AVAILABLE 0:0x10 +#define NV0073_CTRL_SYSTEM_CAPS_OFFSET_PCLK_DFP_FOR_EMI_BUG_443891 0:0x20 +#define NV0073_CTRL_SYSTEM_CAPS_GET_DMI_SCANLINE_SUPPORTED 0:0x40 +/* + * Indicates support for HDCP Key Selection Vector (KSV) list and System + * Renewability Message (SRM) validation +*/ +#define NV0073_CTRL_SYSTEM_CAPS_KSV_SRM_VALIDATION_SUPPORTED 0:0x80 + +#define NV0073_CTRL_SYSTEM_CAPS_SINGLE_HEAD_MST_SUPPORTED 1:0x01 +#define NV0073_CTRL_SYSTEM_CAPS_SINGLE_HEAD_DUAL_SST_SUPPORTED 1:0x02 +#define NV0073_CTRL_SYSTEM_CAPS_HDMI_2_0_SUPPORTED 1:0x04 +#define NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED 1:0x08 +#define NV0073_CTRL_SYSTEM_CAPS_RASTER_LOCK_NEEDS_MIO_POWER 1:0x10 +/* + * Indicates that modesets where no heads are increasing resource requirements, + * or no heads are decreasing resource requirements, can be done glitchlessly. + */ +#define NV0073_CTRL_SYSTEM_CAPS_GLITCHLESS_MODESET_SUPPORTED 1:0x20 +/* Indicates the SW ACR is enabled for HDMI 2.1 due to Bug 3275257. */ +#define NV0073_CTRL_SYSTEM_CAPS_HDMI21_SW_ACR_BUG_3275257 1:0x40 + +/* Size in bytes of display caps table. Keep in sync with # of fields above. */ +#define NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE 2U + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2 + * + * This command returns the set of display capabilities for the parent device + * in the form of an array of unsigned bytes. Display capabilities + * include supported features and required workarounds for the display + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. The set of display capabilities + * will be normalized across all GPUs within the device (a feature capability + * will be set only if it's supported on all GPUs while a required workaround + * capability will be set if any of the GPUs require it). + * + * [out] capsTbl + * The display caps bits will be transferred by the RM into this array of + * unsigned bytes. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2 (0x730138U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE]; +} NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS + * + * This commands returns the number of heads supported by the specified + * subdevice and available for use by displays. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies optional flags to be used to while retrieving + * the number of heads. + * Possible valid flags are: + * NV0073_CTRL_SYSTEM_GET_NUM_HEADS_CLIENT + * This flag is used to request the number of heads that are + * currently in use by an NV client using a user display class + * instance (see NV15_VIDEO_LUT_CURSOR_DAC for an examle). If this + * flag is disabled then the total number of heads supported is + * returned. + * numHeads + * This parameter returns the number of usable heads for the specified + * subdevice. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 numHeads; +} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS; + +/* valid get num heads flags */ +#define NV0073_CTRL_SYSTEM_GET_NUM_HEADS_FLAGS_CLIENT 0:0 +#define NV0073_CTRL_SYSTEM_GET_NUM_HEADS_FLAGS_CLIENT_DISABLE (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_NUM_HEADS_FLAGS_CLIENT_ENABLE (0x00000001U) + + + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE + * + * This command returns the current RG scanline of the specified head on the + * specified subdevice. To get the DMI scanline on supported chips, use + * NV0073_CTRL_CMD_SYSTEM_GET_DMI_SCANLINE + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * head + * This parameter specifies the head for which the active display + * should be retrieved. This value must be between zero and the + * maximum number of heads supported by the subdevice. + * currentScanline + * This parameter returns the current RG scanline value for the specified + * head. If the head does not have a valid mode enabled then a scanline + * value of 0xffffffff is returned. + * bStereoEyeSupported (out) + * This parameter specifies whether stereoEye reporting is supported (this + * is hw dependent). Note that this value doesn't actually reflect whether + * given head is really in stereo mode. + * stereoEye (out) + * If supported (ie bStereoEyeSupported is TRUE), this parameter returns + * either NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS_RIGHT_EYE or + * NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS_LEFT_EYE, reflecting the + * stereo eye that is currently scanned out. Although this value typically + * changes at the beginning of vblank, the exact guarantee isn't more + * accurate than "somewhere in vblank". + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE (0x730108U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE_RIGHT_EYE 0x00000000U +#define NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE_LEFT_EYE 0x00000001U + +#define NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 currentScanline; + NvBool bStereoEyeSupported; + NvU32 stereoEye; +} NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_VBLANK_COUNTER + * + * This command returns the current VBlank counter of the specified head on the + * specified subdevice. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * head + * This parameter specifies the head for which the vblank counter + * should be retrieved. This value must be between zero and the + * maximum number of heads supported by the subdevice. + * verticalBlankCounter + * This parameter returns the vblank counter value for the specified + * head. If the display mode is not valid or vblank not active then + * the verticalBlankCounter value is undefined. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_VBLANK_COUNTER (0x730109U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 verticalBlankCounter; +} NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_VBLANK_ENABLE + * + * This command returns the current VBlank enable status for the specified + * head. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * head + * This parameter specifies the head for which the vblank status + * should be retrieved. This value must be between zero and the + * maximum number of heads supported by the subdevice. + * bEnabled + * This parameter returns the vblank enable status for the specified head. + * A value of NV_FALSE indicates that vblank interrupts are not currently + * enabled while a value of NV_TRUE indicates that vblank are currently + * enabled. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_VBLANK_ENABLE (0x73010aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvBool bEnabled; +} NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED + * + * This command returns the set of supported display IDs for the specified + * subdevice in the form of a 32bit display mask. State from internal + * display connectivity tables is used to determine the set of possible + * display connections for the GPU. The presence of a display in the + * display mask only indicates the display is supported. The connectivity + * status of the display should be determined using the + * NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE command. The displayMask + * value returned by NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED is static + * and will remain consistent across boots of the system. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayMask + * This parameter returns a NV0073_DISPLAY_MASK value describing the set + * of displays supported by the subdevice. An enabled bit in displayMask + * indicates the support of a display device with that displayId. + * displayMaskDDC + * This parameter returns a NV0073_DISPLAY_MASK value, indicating the + * subset of displayMask that supports DDC. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayMask; + NvU32 displayMaskDDC; +} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE + * + * This command can be used to check the presence of a mask of display + * devices on the specified subdevice. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies optional flags to be used while retrieving + * the connection state information. + * Here are the current defined fields: + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD + * A client uses this field to indicate what method it wishes the + * system to use when determining the presence of attached displays. + * Possible values are: + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_DEFAULT + * The system decides what method to use. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_CACHED + * Return the last full detection state for the display mask. + * safety.) + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_ECONODDC + * Ping the DDC address of the given display mask to check for + * a connected device. This is a lightweight method to check + * for a present device. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC + * A client uses this field to indicate whether to allow DDC during + * this detection or to not use it. + * Possible values are: + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC_DEFAULT + * The system will use DDC as needed for each display. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC_DISABLE + * The system will not use DDC for any display. If DDC is + * disabled, this detection state will not be cached. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD + * A client uses this field to indicate whether to detect loads + * during this detection or to not use it. + * Possible values are: + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD_DEFAULT + * The system will use load detection as needed for each display. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD_DISABLE + * The system will not use load detection for any display. If + * load detection is disabled, this detection state will not + * be cached. + * displayMask + * This parameter specifies an NV0073_DISPLAY_MASK value describing + * the set of displays for which connectivity status is to be checked. + * If a display is present then the corresponding bit in the display + * mask is left enabled. If the display is not present then the + * corresponding bit in the display mask is disabled. Upon return this + * parameter contains the subset of displays in the mask that are + * connected. + * + * If displayMask includes bit(s) that correspond to a TV encoder, the + * result will be simply 'yes' or 'no' without any indication of which + * connector(s) are actually attached. For fine-grained TV attachment + * detection, please see NV0073_CTRL_CMD_TV_GET_ATTACHMENT_STATUS. + * retryTimeMs + * This parameter is an output to this command. In case of + * NVOS_STATUS_ERROR_RETRY return status, this parameter returns the time + * duration in milli-seconds after which client should retry this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_RETRY + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 displayMask; + NvU32 retryTimeMs; +} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS; + +/* valid get connect state flags */ +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD 1:0 +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_DEFAULT (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_CACHED (0x00000001U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_ECONODDC (0x00000002U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC 4:4 +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC_DEFAULT (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC_DISABLE (0x00000001U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD 5:5 +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD_DEFAULT (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD_DISABLE (0x00000001U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_VBLANK 6:6 +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_VBLANK_DEFAULT (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_VBLANK_SAFE (0x00000001U) + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_CONFIG + * + * This command can be used to retrieve dynamic hotplug state information that + * are currently recorded by the RM. This information can be used by the client + * to determine which displays to detect after a hotplug event occurs. Or if + * the client knows that this device generates a hot plug/unplug signal on all + * connectors, then this can be used to cull displays from detection. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies optional flags to be used while retrieving + * or changing the hotplug configuration. + * No flags are currently defined. + * hotplugEventMask + * For _GET_HOTPLUG_CONFIG, this returns which connectors the client + * has asked for notifications for, when a hotplug event is detected. + * Events can only be provided for connectors whose displayID is set + * by the system in the hotplugInterruptible field. + * hotplugPollable + * For _GET_HOTPLUG_CONFIG, this returns which connectors are pollable + * in some non-destructive fashion. + * hotplugInterruptible + * For _GET_HOTPLUG_CONFIG, this returns which connectors are capable + * of generating interrupts. + * + * This display mask specifies an NV0073_DISPLAY_MASK value describing + * the set of displays that have seen a hotplug or hotunplug event + * sometime after the last valid EDID read. If the device never has + * a valid EDID read, then it will always be listed here. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + + +#define NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_CONFIG (0x730123U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 hotplugEventMask; + NvU32 hotplugPollable; + NvU32 hotplugInterruptible; + NvU32 hotplugAlwaysAttached; +} NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HEAD_ROUTING_MAP + * + * This command can be used to retrieve the suggested head routing map + * for the specified display mask. A head routing map describes the + * suggested crtc (or head) assignments for each display in the specified + * mask. + * + * Up to MAX_DISPLAYS displays may be specified in the display mask. Displays + * are numbered from zero beginning with the lowest bit position set in the + * mask. The corresponding head assignment for each of specified displays can + * then be found in the respective per-device field in the routing map. + * + * If a particular display cannot be successfully assigned a position in the + * head routing map then it is removed from the display mask. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayMask + * This parameter specifies the NV0073_DISPLAY_MASK value for which + * the head routing map is desired. Each enabled bit indicates + * a display device to include in the routing map. Enabled bits + * must represent supported displays as indicated by the + * NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED command. If a particular + * display cannot be included in the routing map then it's corresponding + * bit in the displayMask will be disabled. A return value of 0 in + * displayMask indicates that a head routing map could not be constructed + * with the given display devices. + * oldDisplayMask + * This optional parameter specifies a prior display mask to be + * used when generating the head routing map to be returned in + * headRoutingMap. Displays set in oldDisplayMask are retained + * if possible in the new routing map. + * oldHeadRoutingMap + * This optional parameter specifies a prior head routing map to be + * used when generating the new routing map to be returned in + * headRoutingMap. Head assignments in oldHeadRoutingMap are + * retained if possible in the new routing map. + * headRoutingMap + * This parameter returns the new head routing map. This parameter + * is organized into eight distinct fields, each containing the head + * assignment for the corresponding display in display mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_HEAD_ROUTING_MAP (0x730125U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayMask; + NvU32 oldDisplayMask; + NvU32 oldHeadRoutingMap; + NvU32 headRoutingMap; +} NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS; + +/* maximum number of allowed displays in a routing map */ +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_MAX_DISPLAYS (8U) + +/* per-display head assignments in a routing map */ +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY0 3:0 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY1 7:4 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY2 11:8 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY3 15:12 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY4 19:16 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY5 23:20 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY6 27:24 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY7 31:28 + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE + * + * This command returns the active display ID for the specified head + * on the specified subdevice. The active display may be established + * at system boot by low-level software and can then be later modified + * by an NV client using a user display class instance (see + * NV15_VIDEO_LUT_CURSOR_DAC). + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * head + * This parameter specifies the head for which the active display + * should be retrieved. This value must be between zero and the + * maximum number of heads supported by the subdevice. + * flags + * This parameter specifies optional flags to be used to while retrieving + * the active display information. + * Possible valid flags are: + * NV0073_CTRL_SYSTEM_GET_ACTIVE_FLAGS_CLIENT + * This flag is used to limit the search for the active display to + * that established by an NV client. If this flag is not specified, + * then any active display is returned (setup at system boot by + * low-level software or later by an NV client). + * displayId + * This parameter returns the displayId of the active display. A value + * of zero indicates no display is active. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID (0x26U) + +typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 flags; + NvU32 displayId; +} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS; + +/* valid get active flags */ +#define NV0073_CTRL_SYSTEM_GET_ACTIVE_FLAGS_CLIENT 0:0 +#define NV0073_CTRL_SYSTEM_GET_ACTIVE_FLAGS_CLIENT_DISABLE (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_ACTIVE_FLAGS_CLIENT_ENABLE (0x00000001U) + + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS + * + * This command returns the set of internal (safe) display IDs for the specified + * subdevice in the form of a 32bit display mask. Safe means the displays do + * not require copy protection as they are on the motherboard. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * internalDisplaysMask + * This parameter returns a NV0073_DISPLAY_MASK value describing the set + * of displays that are internal (safe) and which do not require copy + * protection schemes. + * availableInternalDisplaysMask + * This parameter returns a NV0073_DISPLAY_MASK value describing the set + * of displays that are internal and available for use. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS (0x73015bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS_MESSAGE_ID (0x5BU) + +typedef struct NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS { + NvU32 subDeviceInstance; + NvU32 internalDisplaysMask; + NvU32 availableInternalDisplaysMask; +} NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_BOOT_DISPLAYS + * + * This command returns a mask of boot display IDs. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * bootDisplayMask + * This parameter returns the mask of boot display IDs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_BOOT_DISPLAYS (0x730166U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS_MESSAGE_ID (0x66U) + +typedef struct NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS { + NvU32 subDeviceInstance; + NvU32 bootDisplayMask; +} NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE + * + * This command can be used to retrieve hotplug and unplug state + * information that are currently recorded by the RM. This information is + * used by the client to determine which displays to detect after a + * hotplug event occurs. Or if the client knows that this device generates + * a hot plug/unplug signal on all connectors, then this can be used to call + * displays from detection. The displayIds on which hotplug/unplug has + * happened will be reported only ONCE to the client. That is if the call + * is done multiple times for the same event update, then for consequent + * calls the display mask will be reported as 0. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies optional flags to be used while retrieving + * the hotplug state information. + * Here are the current defined fields: + * NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID + * A client uses this field to determine the lid state. + * Possible values are: + * NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID_OPEN + * The lid is open. + * NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID_CLOSED + * The lid is closed. The client should remove devices a + * reported inside the + * NV0073_CTRL_SYSTEM_GET_CONNECT_POLICY_PARAMS.lidClosedMask. + * hotPlugMask + * This display mask specifies an NV0073_DISPLAY_MASK value describing + * the set of displays that have seen a hotplug. + * hotUnplugMask + * This display mask specifies an NV0073_DISPLAY_MASK value describing + * the set of displays that have seen a hot unplug + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE (0x73017bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | 0x7B" */ + +typedef struct NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 hotPlugMask; + NvU32 hotUnplugMask; +} NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS; + +/* valid get hoplug state flags */ +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_FLAGS_LID 0:0 +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_FLAGS_LID_OPEN (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_FLAGS_LID_CLOSED (0x00000001U) + +/* + * NV0073_CTRL_CMD_SYSTEM_CLEAR_ELV_BLOCK + * + * This command instructs the RM to explicitly clear any + * ELV block. Clients should call this before attempting core-channel + * updates when in VRR one-shot mode. ELV block mode will be + * properly restored to its appropriate setting based on the stall-lock + * in Supervisor3 after the core channel update + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * displayId + * The public ID of the Output Display which is to be used for VRR. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV0073_CTRL_CMD_SYSTEM_CLEAR_ELV_BLOCK (0x73017dU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS_MESSAGE_ID (0x7DU) + +typedef struct NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; +} NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR + * + * This command arms the display modeset supervisor to operate in + * a lightweight mode. By calling this, the client is implicitly + * promising not to make any changes in the next modeset that require + * the full supervisor. After SV3, the LWSV will disarm and any subsequent + * modesets will revert to full supervisors. This must be called separately + * for every display that will be part of the modeset. + * It is recommended that the client explicitly disarm the lightweight + * supervisor after every modeset as null modesets will not trigger the + * supervisor interrupts and the RM will not be able to disarm automatically + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * displayId + * The public ID of the Output Display which is to be used for VRR. + * + * bArmLWSV + * If this is set to NV_TRUE, the RM will arm the lightweight supervisor + * for the next modeset. + * If this is set to NV_FALSE, the RM will disarm the lightweight supervisor + * + * bVrrState + * VRR state to be changed. + * + * vActive + * GPU-SRC vertical active value + * + * vfp + * GPU-SRC vertical front porch + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV0073_CTRL_CMD_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR (0x73017eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS_MESSAGE_ID (0x7EU) + +typedef struct NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bArmLWSV; + NvBool bVrrState; + NvU32 vActive; + NvU32 vfp; +} NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS; + + + +/* +* NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS +* +* This command is used to configure pstate switch parameters on VRR monitors +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed.This parameter must specify a value between zero and the +* total number of subdevices within the parent device.This parameter +* should be set to zero for default behavior. +* +* displayId +* DisplayId of the monitor being vrr configured +* +* bVrrState +* When set to NV_TRUE, signifies that the vrr is about to become active. +* When set to NV_FALSE, signifies that the vrr is about to become suspended. +* +* bVrrDirty +* When set to NV_TRUE, indicates that vrr configuration has been changed +* When set to NV_FALSE, this will indicate transitions from One shot mode to +* Continuous mode and vice versa +* +* bVrrEnabled +* When set to NV_TRUE, indicates that vrr has been enabled, i.e. vBp extended by 2 lines +* +* maxVblankExtension +* When VRR is enabled, this is the maximum amount of lines that the vblank can be extended. +* Only updated when bVrrDirty = true +* +* internalVRRHeadVblankStretch +* When VRR is enabled, this is the maximum amount of lines that the vblank can be extended. +* On NVSR and DD panels . Only updated when bVrrDirty = true +* +* minVblankExtension +* When VRR is enabled, this is the minimum amount of lines that should be present in the Vblank. The purpose is to cap the maximum refresh (currently only for HDMI 2.1 VRR compliance) +*/ +#define NV0073_CTRL_CMD_SYSTEM_CONFIG_VRR_PSTATE_SWITCH (0x730184U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS_MESSAGE_ID (0x84U) + +typedef struct NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bVrrState; + NvBool bVrrDirty; + NvBool bVrrEnabled; + NvU32 maxVblankExtension; + NvU32 internalVRRHeadVblankStretch; + NvU32 minVblankExtension; +} NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX + * + * This command is used to query the display mask of all displays + * that support dynamic display MUX. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayMask (out) + * Mask of all displays that support dynamic display MUX + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX (0x730190U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS_MESSAGE_ID (0x90U) + +typedef struct NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS { + NvU32 subDeviceInstance; + NvU32 muxDisplayMask; +} NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH + * + * This command allocates a specified amount of ISO memory bandwidth for + * display. If the requested amount of bandwidth cannot be allocated (either + * because it exceeds the total bandwidth available to the system, or because + * too much bandwidth is already allocated to other clients), the call will + * fail and NV_ERR_INSUFFICIENT_RESOURCES will be returned. + * + * If bandwidth has already been allocated via a prior call, and a new + * allocation is requested, the new allocation will replace the old one. (If + * the new allocation fails, the old allocation remains in effect.) + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * averageBandwidthKBPS + * This parameter specifies the amount of ISO memory bandwidth requested. + * floorBandwidthKBPS + * This parameter specifies the minimum required (i.e., floor) dramclk + * frequency, multiplied by the width of the pipe over which the display + * data will travel. (It is understood that the bandwidth calculated by + * multiplying the clock frequency by the pipe width will not be + * realistically achievable, due to overhead in the memory subsystem. The + * API will not actually use the bandwidth value, except to reverse the + * calculation to get the required dramclk frequency.) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_NOT_SUPPORTED + * NV_ERR_GENERIC + */ + +#define NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH (0x730196U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS_MESSAGE_ID (0x96U) + +typedef struct NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS { + NvU32 subDeviceInstance; + NvU32 averageBandwidthKBPS; + NvU32 floorBandwidthKBPS; +} NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS; + +/* + * NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS + * + * This structure represents the hotplug event config control parameters. + * + * subDeviceInstance + * This parameter should specify the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * + * deviceMapFilter + * This parameter returns (in GET) or should specify (in SET) a device map + * indicating device(s) to sense. + * + * hotPluginSense + * This parameter returns (in GET) or should specify (in SET) a device map + * indicating device(s) plugged in that caused the most recent hotplug + * event. + * + * hotUnplugSense + * This parameter returns (in GET) or should specify (in SET) a device map + * indicating device(s) un plugged that caused the most recent hotplug + * event. + */ + +typedef struct NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS { + NvU32 subDeviceInstance; + NvU32 deviceMapFilter; + NvU32 hotPluginSense; + NvU32 hotUnplugSense; +} NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_EVENT_CONFIG + * + * This command fetches the hotplug event configuration. + * + * See @ref NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS for documentation on + * the parameters. + */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_EVENT_CONFIG (0x730197U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | 0x97" */ + +/* + * NV0073_CTRL_CMD_SYSTEM_SET_HOTPLUG_EVENT_CONFIG + * + * This command sets the hotplug event configuration. + * + * See @ref NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS for documentation on + * the parameters. + */ + +#define NV0073_CTRL_CMD_SYSTEM_SET_HOTPLUG_EVENT_CONFIG (0x730198U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | 0x98" */ + + + +/* +* NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS +* +* This command is used to read Core channel, Cursor channel, Window channel, and Head register values and encode these values with ProtoDmp. +* +* subDeviceInstance (in) +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. +* headMask (in) +* Head mask representing which register values should be encoded +* windowMask (in) +* Window channel mask whose register values should be encoded +* bRecordCoreChannel (in) +* Indicates whether or not to encode core channel register values +* bRecordCursorChannel (in) +* Indicates whether or not to encode cursor channel register values +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_NOT_SUPPORTED +*/ +#define NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS (0x73019bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS_MESSAGE_ID (0x9BU) + +typedef struct NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS { + NvU32 subDeviceInstance; + NvU32 headMask; + NvU32 windowMask; + NvBool bRecordCoreChannel; + NvBool bRecordCursorChannel; +} NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT + * + * This command is used to query the display mux status for the given + * display device + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT (0x73019cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS_MESSAGE_ID (0x9CU) + +typedef struct NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS { + NvU32 subDeviceInstance; + NvBool bIsSidebandI2cSupported; +} NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS; + +/* _ctrl0073system_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h new file mode 100644 index 0000000..238c0d6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080.finn +// + + + + +#include "ctrl0080/ctrl0080bif.h" +#include "ctrl0080/ctrl0080gpu.h" +#include "ctrl0080/ctrl0080clk.h" +#include "ctrl0080/ctrl0080dma.h" +#include "ctrl0080/ctrl0080gr.h" +#include "ctrl0080/ctrl0080cipher.h" +#include "ctrl0080/ctrl0080fb.h" +#include "ctrl0080/ctrl0080fifo.h" +#include "ctrl0080/ctrl0080host.h" + + +#include "ctrl0080/ctrl0080perf.h" +#include "ctrl0080/ctrl0080msenc.h" +#include "ctrl0080/ctrl0080bsp.h" +#include "ctrl0080/ctrl0080rc.h" +#include "ctrl0080/ctrl0080nvjpg.h" +#include "ctrl0080/ctrl0080unix.h" +#include "ctrl0080/ctrl0080internal.h" diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h new file mode 100644 index 0000000..4b20dc1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV01_DEVICE_XX/NV03_DEVICE control commands and parameters */ + +#define NV0080_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0080, NV0080_CTRL_##cat, idx) + +/* GPU device command categories (6bits) */ +#define NV0080_CTRL_RESERVED (0x00) +#define NV0080_CTRL_BIF (0x01) +#define NV0080_CTRL_GPU (0x02) +#define NV0080_CTRL_CLK (0x10) +#define NV0080_CTRL_GR (0x11) +#define NV0080_CTRL_CIPHER (0x12) +#define NV0080_CTRL_FB (0x13) +#define NV0080_CTRL_HOST (0x14) +#define NV0080_CTRL_VIDEO (0x15) +#define NV0080_CTRL_FIFO (0x17) +#define NV0080_CTRL_DMA (0x18) +#define NV0080_CTRL_PERF (0x19) +#define NV0080_CTRL_PERF_LEGACY_NON_PRIVILEGED (0x99) /* finn: Evaluated from "(NV0080_CTRL_PERF | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV0080_CTRL_MSENC (0x1B) +#define NV0080_CTRL_BSP (0x1C) +#define NV0080_CTRL_RC (0x1D) +#define NV0080_CTRL_OS_UNIX (0x1E) +#define NV0080_CTRL_NVJPG (0x1F) +#define NV0080_CTRL_INTERNAL (0x20) +#define NV0080_CTRL_NVLINK (0x21) + +/* + * NV0080_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0080_CTRL_CMD_NULL (0x800000) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl0080base_h_ */ + +/* extract device cap setting from specified category-specific caps table */ +#define NV0080_CTRL_GET_CAP(cat,tbl,c) \ + NV0080_CTRL_##cat##_GET_CAP(tbl, NV0080_CTRL_##cat##_CAPS_##c) diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h new file mode 100644 index 0000000..05a18bf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h @@ -0,0 +1,138 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080bif.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* + * NV0080_CTRL_CMD_BIF_RESET + * + * This command initiates the specified reset type on the GPU. + * + * flags + * Specifies various arguments to the reset operation. + * + * Supported fields include: + * + * NV0080_CTRL_BIF_RESET_FLAGS_TYPE + * When set to _SW_RESET, a SW (fullchip) reset is performed. When set + * to _SBR, a secondary-bus reset is performed. When set to + * _FUNDAMENTAL, a fundamental reset is performed. + * + * NOTE: _FUNDAMENTAL is not yet supported. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_BIF_RESET (0x800102) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BIF_INTERFACE_ID << 8) | NV0080_CTRL_BIF_RESET_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_BIF_RESET_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_BIF_RESET_PARAMS { + NvU32 flags; +} NV0080_CTRL_BIF_RESET_PARAMS; + +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE 2:0 +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_SW_RESET (0x00000001) +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_SBR (0x00000002) +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_FUNDAMENTAL (0x00000003) + +/* + * NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR + * + * baseDmaSysmemAddr + * This parameter represents the base DMA address for sysmem which will be + * added to all DMA accesses issued by GPU. Currently GPUs do not support 64-bit physical address, + * hence if sysmem is greater than max GPU supported physical address width, this address + * will be non-zero + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT_PARENT + */ + +#define NV0080_CTRL_CMD_BIF_GET_DMA_BASE_SYSMEM_ADDR (0x800103) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BIF_INTERFACE_ID << 8) | NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS { + NV_DECLARE_ALIGNED(NvU64 baseDmaSysmemAddr, 8); +} NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS; + +/* + * NV0080_CTRL_BIF_SET_ASPM_FEATURE + * + * aspmFeatureSupported + * ASPM feature override by client + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0080_CTRL_CMD_BIF_SET_ASPM_FEATURE (0x800104) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BIF_INTERFACE_ID << 8) | NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS { + NvU32 aspmFeatureSupported; +} NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS; + +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L0S 0:0 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L0S_ENABLED 0x000000001 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L0S_DISABLED 0x000000000 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L1 1:1 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L1_ENABLED 0x000000001 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L1_DISABLED 0x000000000 + +/* + * NV0080_CTRL_BIF_ASPM_CYA_UPDATE + * + * bL0sEnable + * bL1Enable + * ASPM CYA update by client + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0080_CTRL_CMD_BIF_ASPM_CYA_UPDATE (0x800105) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BIF_INTERFACE_ID << 8) | NV0080_CTRL_BIF_ASPM_CYA_UPDATE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_BIF_ASPM_CYA_UPDATE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0080_CTRL_BIF_ASPM_CYA_UPDATE_PARAMS { + NvBool bL0sEnable; + NvBool bL1Enable; +} NV0080_CTRL_BIF_ASPM_CYA_UPDATE_PARAMS; + +/* _ctrl0080bif_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h new file mode 100644 index 0000000..d4911f5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080bsp.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE bit stream processor control commands and parameters */ + +/* + * NV0080_CTRL_CMD_BSP_GET_CAPS + * + * This command returns the set of BSP capabilities for the device + * in the form of an array of unsigned bytes. BSP capabilities + * include supported features and required workarounds for the decoder + * within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_BSP_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the BSP caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * instanceId + * This parameter specifies the instance Id of NVDEC for which + * cap bits are requested. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_BSP_GET_CAPS (0x801c01) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BSP_INTERFACE_ID << 8) | NV0080_CTRL_BSP_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_BSP_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_BSP_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); + NvU32 instanceId; +} NV0080_CTRL_BSP_GET_CAPS_PARAMS; + + + +/* + * Size in bytes of bsp caps table. This value should be one greater + * than the largest byte_index value above. + */ +#define NV0080_CTRL_BSP_CAPS_TBL_SIZE 8 + +/* + * NV0080_CTRL_CMD_BSP_GET_CAPS_V2 + * + * This command returns the set of BSP capabilities for the device + * in the form of an array of unsigned bytes. BSP capabilities + * include supported features and required workarounds for the decoder + * within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * (The V2 version flattens the capsTbl array pointer). + * + * capsTbl + * This parameter is an array of unsigned bytes where the BSP caps bits + * will be transferred by the RM. + * instanceId + * This parameter specifies the instance Id of NVDEC for which + * cap bits are requested. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_BSP_GET_CAPS_V2 (0x801c02) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BSP_INTERFACE_ID << 8) | NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2_MESSAGE_ID" */ + +#define NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2 { + NvU8 capsTbl[NV0080_CTRL_BSP_CAPS_TBL_SIZE]; + NvU32 instanceId; +} NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2; + +/* _ctrl0080bsp_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h new file mode 100644 index 0000000..279151b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080cipher.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h new file mode 100644 index 0000000..de61930 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080clk.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h new file mode 100644 index 0000000..cf2f90e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h @@ -0,0 +1,908 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080dma.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE dma control commands and parameters */ + +/* + * NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK + * + * This parameter returns the parameters specific to a PTE as follows: + * pageSize + * GET: This parameter returns the page size of the PTE information + * being returned. If 0, then this pteBlock[] array entry is + * invalid or not used. (pteBlock[0] is always used.) + * SET: This parameter specifies the page size of the PTE information + * to be set. If 0, then this pteBlock[] array entry is invalid + * or not used. (pteBlock[0] is always used.) + * pteEntrySize + * GET: This parameter returns the size of the PTE in bytes for this GPU. + * SET: N/A + * comptagLine + * GET: This parameter returns the comptagline field of the corresponding PTE. + * SET: This parameter sets the comptagline field of the corresponding PTE. + * Incorrect values may lead to dire consequences. + * kind + * GET: This parameter returns the kind field of the corresponding PTE. + * SET: This parameter sets the kind field of the corresponding PTE. + * Incorrect values may lead to undesirable consequences. + * pteFlags + * This parameter returns various fields from the PTE, these are: + * FLAGS_VALID: + * GET: This flag returns the valid bit of the PTE. + * SET: This flag sets the valid bit of the PTE. + * FLAGS_ENCRYPTED: + * GET: This flag returns the encrypted bit of the PTE. Not all GPUs + * support encryption. If not supported, this flag will be set to + * NOT_SUPPORTED. + * SET: This flag sets the encrypted bit of the PTE. + * FLAGS_APERTURE: + * GET: This flag returns the aperture field of the PTE. See + * NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS_FLAGS_APERTURE_* for values. + * SET: This flag sets the aperture field of the PTE. See + * NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS_FLAGS_APERTURE_* for values. + * FLAGS_COMPTAGS: + * GET: This flag returns the comptags field of the PTE. (Not used on Fermi) + * SET: N/A + * FLAGS_GPU_CACHED: + * GET: This flag returns the GPU cacheable bit of the PTE. GPU caching of + * sysmem was added in iGT21a and Fermi. If not supported, this flag + * will be set to NOT_SUPPORTED. + * SET: N/A for specific chips, e.g., GF100 + * FLAGS_SHADER_ACCESS: + * GET: This flag returns the shader access control of the PTE. This feature + * was introduced in Kepler. If not supported, this flag will be set to + * NOT_SUPPORTED. + * SET: N/A + */ + +typedef struct NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK { + NvU32 pageSize; + NV_DECLARE_ALIGNED(NvU64 pteEntrySize, 8); + NvU32 comptagLine; + NvU32 kind; + NvU32 pteFlags; +} NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK; + +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_VALID 0:0 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_VALID_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_VALID_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ENCRYPTED 2:1 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ENCRYPTED_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ENCRYPTED_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ENCRYPTED_NOT_SUPPORTED (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE 6:3 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE_VIDEO_MEMORY (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE_PEER_MEMORY (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE_SYSTEM_COHERENT_MEMORY (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE_SYSTEM_NON_COHERENT_MEMORY (0x00000003U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS 10:7 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS_NONE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS_1 (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS_2 (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS_4 (0x00000004U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_GPU_CACHED 12:11 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_GPU_CACHED_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_GPU_CACHED_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_GPU_CACHED_NOT_SUPPORTED (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS 14:13 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS_READ_WRITE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS_READ_ONLY (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS_WRITE_ONLY (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS_NOT_SUPPORTED (0x00000003U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_READ_ONLY 15:15 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_READ_ONLY_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_READ_ONLY_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ATOMIC 16:16 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ATOMIC_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ATOMIC_ENABLE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ACCESS_COUNTING 17:17 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ACCESS_COUNTING_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ACCESS_COUNTING_ENABLE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_PRIVILEGED 18:18 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_PRIVILEGED_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_PRIVILEGED_TRUE (0x00000001U) + +/* + * NV0080_CTRL_DMA_GET_PTE_INFO + * + * This command queries PTE information for the specified GPU virtual address. + * + * gpuAddr + * This parameter specifies the GPU virtual address for which PTE + * information is to be returned. + * skipVASpaceInit + * This parameter specifies(true/false) whether the VA Space + * initialization should be skipped in this ctrl call. + * pteBlocks + * This parameter returns the page size-specific attributes of a PTE. + * Please see NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * NV_ERR_GENERIC + */ + +#define NV0080_CTRL_CMD_DMA_GET_PTE_INFO (0x801801U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_GET_PTE_INFO_PTE_BLOCKS 4U + +#define NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 gpuAddr, 8); + NvU32 subDeviceId; + NvU8 skipVASpaceInit; + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK pteBlocks[NV0080_CTRL_DMA_GET_PTE_INFO_PTE_BLOCKS], 8); + NvHandle hVASpace; +} NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS; + +/* + * NV0080_CTRL_DMA_SET_PTE_INFO + * + * This command sets PTE information for the specified GPU virtual address. + * Usage of parameter and field definitions is identical to that of + * NV0080_CTRL_DMA_GET_PTE_INFO, with the following exception: + * + * - pteFlags field NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS is ignored, + * as this setting is specified via the kind specification. + * - pteEntrySize is ignored, as this setting is read-only in the GET case. + * - hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space. + * + */ + +#define NV0080_CTRL_CMD_DMA_SET_PTE_INFO (0x80180aU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_SET_PTE_INFO_PTE_BLOCKS 4U + +#define NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 gpuAddr, 8); + NvU32 subDeviceId; + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK pteBlocks[NV0080_CTRL_DMA_SET_PTE_INFO_PTE_BLOCKS], 8); + NvHandle hVASpace; +} NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS; + + +#define NV0080_CTRL_CMD_DMA_FILL_PTE_MEM (0x801802U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS { + NvU32 pageCount; + struct { + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU32 subDeviceId; + } hwResource; + struct { + NvU32 fbKind; + NvU32 sysKind; + NvU32 compTagStartOffset; + } comprInfo; + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvU64 gpuAddr, 8); + NV_DECLARE_ALIGNED(NvP64 pageArray, 8); + NV_DECLARE_ALIGNED(NvP64 pteMem, 8); + NvU32 pteMemPfn; + NvU32 pageSize; + NvU32 startPageIndex; + NvU32 flags; + NvHandle hSrcVASpace; + NvHandle hTgtVASpace; + NvU32 peerId; +} NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS; + + + +/* + * NV0080_CTRL_DMA_FLUSH + * + * This command flushes the specified target unit + * + * targetUnit + * The unit to flush, either L2 cache or compression tag cache. + * This field is a logical OR of the individual fields such as + * L2 cache or compression tag cache. Also L2 invalidation for + * either SYSMEM/PEERMEM is triggered. But this invalidation is + * for FERMI. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * + * See Also: + * NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE + * Flushes the entire GPU cache or a set of physical addresses (if the + * hardware supports it). Use this call if you want to flush a set of + * addresses or the entire GPU cache in unicast mode. + * NV0041_CTRL_CMD_SURFACE_FLUSH_GPU_CACHE + * Flushes memory associated with a single allocation if the hardware + * supports it. Use this call if you want to flush a single allocation and + * you have a memory object describing the physical memory. + */ +#define NV0080_CTRL_CMD_DMA_FLUSH (0x801805U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_FLUSH_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_FLUSH_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0080_CTRL_DMA_FLUSH_PARAMS { + NvU32 targetUnit; +} NV0080_CTRL_DMA_FLUSH_PARAMS; + +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2 0:0 +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_ENABLE (0x00000001U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_COMPTAG 1:1 +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_COMPTAG_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_COMPTAG_ENABLE (0x00000001U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_FB 2:2 +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_FB_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_FB_ENABLE (0x00000001U) + +// This is exclusively for Fermi +// The selection of non-zero valued bit-fields avoids the routing +// into the above cases and vice-versa +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_INVALIDATE 4:3 +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_INVALIDATE_SYSMEM (0x00000001U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_INVALIDATE_PEERMEM (0x00000002U) + + +/** + * NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS + * + * This command returns information about the VA caps on the GPU + * + * vaBitCount + * Returns number of bits in a virtual address + * pdeCoverageBitCount + * Returns number of VA bits covered in each PDE. One PDE covers + * 2^pdeCoverageBitCount bytes. + * + * bigPageSize + * Size of the big page + * compressionPageSize + * Size of region each compression tag covers + * dualPageTableSupported + * TRUE if one page table can map with both 4KB and big pages + * + * numPageTableFormats + * Returns the number of different page table sizes supported by the RM + * pageTableBigFormat + * pageTable4KFormat[] + * Returns size in bytes and number of VA bits covered by each page table + * format. Up to MAX_NUM_PAGE_TABLE_FORMATS can be returned. The most + * compact format will be pageTableSize[0] and the least compact format + * will be last. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space. + * vaRangeLo + * Indicates the start of usable VA range. + * + * hugePageSize + * Size of the huge page if supported, 0 otherwise. + * + * vaSpaceId + * Virtual Address Space id assigned by RM. + * Only relevant on AMODEL. + * + * pageSize512MB + * Size of the 512MB page if supported, 0 otherwise. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV0080_CTRL_CMD_DMA_ADV_SCHED_GET_VA_CAPS (0x801806U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PAGE_TABLE_FORMAT { + NvU32 pageTableSize; + NvU32 pageTableCoverage; +} NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PAGE_TABLE_FORMAT; + +#define NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_MAX_NUM_PAGE_TABLE_FORMATS (16U) +#define NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS { + NvU32 vaBitCount; + NvU32 pdeCoverageBitCount; + NvU32 num4KPageTableFormats; + NvU32 bigPageSize; + NvU32 compressionPageSize; + NvU32 dualPageTableSupported; + NvU32 idealVRAMPageSize; + NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PAGE_TABLE_FORMAT pageTableBigFormat; + NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PAGE_TABLE_FORMAT pageTable4KFormat[NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_MAX_NUM_PAGE_TABLE_FORMATS]; + NvHandle hVASpace; + NV_DECLARE_ALIGNED(NvU64 vaRangeLo, 8); + NvU32 hugePageSize; + NvU32 vaSpaceId; + NvU32 pageSize512MB; +} NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS; + +/* + * Adding a version define to allow clients to access valid + * parameters based on version. + */ +#define NV0080_CTRL_CMD_DMA_ADV_SCHED_GET_VA_CAPS_WITH_VA_RANGE_LO 0x1U + +/* + * NV0080_CTRL_DMA_GET_PDE_INFO + * + * This command queries PDE information for the specified GPU virtual address. + * + * gpuAddr + * This parameter specifies the GPU virtual address for which PDE + * information is to be returned. + * pdeVirtAddr + * This parameter returns the GPU virtual address of the PDE. + * pdeEntrySize + * This parameter returns the size of the PDE in bytes for this GPU. + * pdeAddrSpace + * This parameter returns the GPU address space of the PDE. + * pdeSize + * This parameter returns the fractional size of the page table(s) as + * actually set in the PDE, FULL, 1/2, 1/4 or 1/8. (This amount may + * differ from that derived from pdeVASpaceSize.) Intended for VERIF only. + * pteBlocks + * This parameter returns the page size-specific parameters as follows: + * ptePhysAddr + * This parameter returns the GPU physical address of the page table. + * pteCacheAttrib + * This parameter returns the caching attribute of the + * GPU physical address of the page table. + * pteEntrySize + * This parameter returns the size of the PTE in bytes for this GPU. + * pageSize + * This parameter returns the page size of the page table. + * If pageSize == 0, then this PTE block is not valid. + * pteAddrSpace + * This parameter returns the GPU address space of the page table. + * pdeVASpaceSize + * This parameter returns the size of the VA space addressable by + * the page table if fully used (i.e., if all PTEs marked VALID). + * pdbAddr + * This parameter returns the PDB address for the PDE. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV0080_CTRL_CMD_DMA_GET_PDE_INFO (0x801809U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_MESSAGE_ID" */ + +typedef struct NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCK { + NV_DECLARE_ALIGNED(NvU64 ptePhysAddr, 8); + NvU32 pteCacheAttrib; + NvU32 pteEntrySize; + NvU32 pageSize; + NvU32 pteAddrSpace; + NvU32 pdeVASpaceSize; + NvU32 pdeFlags; +} NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCK; + +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PTE_ADDR_SPACE_VIDEO_MEMORY (0x00000000U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PTE_ADDR_SPACE_SYSTEM_COHERENT_MEMORY (0x00000001U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PTE_ADDR_SPACE_SYSTEM_NON_COHERENT_MEMORY (0x00000002U) + +#define NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCKS 4U + +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 gpuAddr, 8); + NV_DECLARE_ALIGNED(NvU64 pdeVirtAddr, 8); + NvU32 pdeEntrySize; + NvU32 pdeAddrSpace; + NvU32 pdeSize; + NvU32 subDeviceId; + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCK pteBlocks[NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCKS], 8); + NV_DECLARE_ALIGNED(NvU64 pdbAddr, 8); + NvHandle hVASpace; +} NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS; + +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_ADDR_SPACE_VIDEO_MEMORY (0x00000000U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_ADDR_SPACE_SYSTEM_COHERENT_MEMORY (0x00000001U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_ADDR_SPACE_SYSTEM_NON_COHERENT_MEMORY (0x00000002U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_SIZE_FULL 1U +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_SIZE_HALF 2U +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_SIZE_QUARTER 3U +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_SIZE_EIGHTH 4U + +/* + * NV0080_CTRL_CMD_DMA_INVALIDATE_PDB_TARGET + * + * This command invalidates PDB target setting in hardware. + * After execeution of this command PDB target would be in undefined state. + * + * Returns error if the PDB target can not be invalidate. + * + * This call is only supported on chips fermi and later chips. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0080_CTRL_CMD_DMA_INVALIDATE_PDB_TARGET (0x80180bU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | 0xB" */ + +/* + * NV0080_CTRL_CMD_DMA_INVALIDATE_TLB + * + * This command invalidates the GPU TLB. This is intended to be used + * for RM clients that manage their own TLB consistency when updating + * page tables on their own, or with DEFER_TLB_INVALIDATION options + * to other RM APIs. + * + * hVASpace + * This parameter specifies the VASpace object whose MMU TLB entries + * needs to be invalidated, if the flag is set to NV0080_CTRL_DMA_INVALIDATE_TLB_ALL_FALSE. + * Specifying a GMMU VASpace object handle will invalidate the GMMU TLB for the particular VASpace. + * Specifying a SMMU VASpace object handle will flush the entire SMMU TLB & PTC. + * + * flags + * This parameter can be used to specify any flags needed + * for the invlalidation request. + * NV0080_CTRL_DMA_INVALIDATE_TLB_ALL + * When set to TRUE this flag requests a global invalidate. + * When set to FALSE this flag requests a chip-specfic + * optimization to invalidate only the address space bound + * to the associated hDevice. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_TIMEOUT_RETRY + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0080_CTRL_CMD_DMA_INVALIDATE_TLB (0x80180cU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS { + NvHandle hVASpace; + NvU32 flags; +} NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS; + +#define NV0080_CTRL_DMA_INVALIDATE_TLB_ALL 0:0 +#define NV0080_CTRL_DMA_INVALIDATE_TLB_ALL_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_INVALIDATE_TLB_ALL_TRUE (0x00000001U) + +/** + * NV0080_CTRL_CMD_DMA_GET_CAPS + * + * This command returns the set of DMA capabilities for the device + * in the form of an array of unsigned bytes. DMA capabilities + * include supported features and required workarounds for address + * translation system within the device, each represented by a byte + * offset into the table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_DMA_CAPS_TBL_SIZE. + * + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the framebuffer caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * 32BIT_POINTER_ENFORCED + * If this property is TRUE NVOS32 and NVOS46 calls with + * 32BIT_POINTER_DISABLED will return addresses above 4GB. + * + * SHADER_ACCESS_SUPPORTED + * If this property is set, the MMU in the system supports the independent + * access bits for the shader. This is accessed with the following fields: + * NVOS46_FLAGS_SHADER_ACCESS + * NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS_FLAGS_SHADER_ACCESS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_DMA_GET_CAPS (0x80180dU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_GET_CAPS_PARAMS_MESSAGE_ID" */ +/* size in bytes of fb caps table */ +#define NV0080_CTRL_DMA_CAPS_TBL_SIZE 8U +#define NV0080_CTRL_DMA_GET_CAPS_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV0080_CTRL_DMA_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NvU8 capsTbl[NV0080_CTRL_DMA_CAPS_TBL_SIZE]; +} NV0080_CTRL_DMA_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_DMA_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0080_CTRL_DMA_CAPS_32BIT_POINTER_ENFORCED 0:0x01 +#define NV0080_CTRL_DMA_CAPS_SHADER_ACCESS_SUPPORTED 0:0x04 +#define NV0080_CTRL_DMA_CAPS_SPARSE_VIRTUAL_SUPPORTED 0:0x08 +#define NV0080_CTRL_DMA_CAPS_MULTIPLE_VA_SPACES_SUPPORTED 0:0x10 + +/* + * NV0080_CTRL_DMA_SET_VA_SPACE_SIZE + * + * Change the size of an existing VA space. + * NOTE: Currently this only supports growing the size, not shrinking. + * + * 1. Allocate new page directory able to map extended range. + * 2. Copy existing PDEs from old directory to new directory. + * 3. Initialize new PDEs to invalid. + * 4. Update instmem to point to new page directory. + * 5. Free old page directory. + * + * vaSpaceSize + * On input, the requested size of the VA space in bytes. + * On output, the actual resulting VA space size. + * + * The actual size will be greater than or equal to the requested size, + * unless NV0080_CTRL_DMA_GROW_VA_SPACE_SIZE_MAX is requested, which + * requests the maximum available. + * + * NOTE: Specific size requests (e.g. other than SIZE_MAX) must account + * for the VA hole at the beginning of the range which is used to + * distinguish NULL pointers. This region is not counted as part + * of the vaSpaceSize since it is not allocatable. + * + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space + * associated with the client/device pair. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INSUFFICIENT_RESOURCES + */ +#define NV0080_CTRL_CMD_DMA_SET_VA_SPACE_SIZE (0x80180eU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8); + NvHandle hVASpace; +} NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS; + +#define NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_MAX (0xFFFFFFFFFFFFFFFFULL) + +/* + * NV0080_CTRL_DMA_UPDATE_PDE_2 + * + * This command updates a single PDE for the given (hClient, hDevice) + * with specific attributes. + * This command is only available on Windows and MODS platforms. + * This command can be called by kernel clients only. + * + * The VA range the PDE maps must be contained by a VA allocation marked with + * NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED. + * However if the MODS-only FORCE_OVERRIDE flag is set this restriction is relaxed. + * + * RM does not track the PDE's attributes in SW - this control simply stuffs + * the PDE in memory after translating and checking the parameters. + * + * Parameters are checked for relative consistency (e.g. valid domains), + * but it is the client's responsibility to provide correct page table + * addresses, e.g. global consistency is not checked. + * + * It is also the client's responsibility to flush/invalidate the MMU + * when appropriate, either by setting the _FLUSH_PDE_CACHE flag for this + * call or by flushing through other APIs. + * This control does not flush automatically to allow batches of calls + * to be made before a single flush. + * + * ptParams + * Page-size-specific parameters, as follows: + * + * physAddr + * Base address of physically contiguous memory of page table. + * Must be aligned sufficiently for the PDE address field. + * numEntries + * Deprecated and ignored. + * Use FLAGS_PDE_SIZE that applies to the tables for all page sizes. + * aperture + * Address space the base address applies to. + * Can be left as INVALID to ignore this page table size. + * + * pdeIndex + * The PDE index this update applies to. + * flags + * See NV0080_CTRL_DMA_UPDATE_PDE_FLAGS_*. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space + * associated with the client/device pair. + * pPdeBuffer [out] + * Kernel pointer to 64 bit unsigned integer representing a Page Dir Entry + * that needs to be updated. It should point to memory as wide as the Page Dir + * Entry. + * + * If NULL, Page Dir Entry updates will go to the internally managed Page Dir. + * If not NULL, the updates will be written to this buffer. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_DMA_UPDATE_PDE_2 (0x80180fU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS_MESSAGE_ID" */ + +typedef struct NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 physAddr, 8); + NvU32 numEntries; // deprecated + NvU32 aperture; +} NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS; + +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_INVALID (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_VIDEO_MEMORY (0x00000001U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_SYSTEM_COHERENT_MEMORY (0x00000002U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_SYSTEM_NON_COHERENT_MEMORY (0x00000003U) + +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX_SMALL 0U +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX_BIG 1U +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX__SIZE 2U + +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS { + NvU32 pdeIndex; + NvU32 flags; + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS ptParams[NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX__SIZE], 8); + NvHandle hVASpace; + NV_DECLARE_ALIGNED(NvP64 pPdeBuffer, 8); // NV_MMU_VER2_PDE__SIZE + NvU32 subDeviceId; // ID+1, 0 for BC +} NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS; + +/*! + * If set a PDE cache flush (MMU invalidate) will be performed. + */ +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FLUSH_PDE_CACHE 0:0 +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FLUSH_PDE_CACHE_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FLUSH_PDE_CACHE_TRUE (0x00000001U) + +/*! + * For verification purposes (MODS-only) this flag may be set to modify any PDE + * in the VA space (RM managed or externally managed). + * It is up to caller to restore any changes properly (or to expect faults). + */ +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FORCE_OVERRIDE 1:1 +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FORCE_OVERRIDE_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FORCE_OVERRIDE_TRUE (0x00000001U) + +/*! + * Directly controls the PDE_SIZE field (size of the page tables pointed to by this PDE). + */ +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE 3:2 +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE_FULL (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE_HALF (0x00000001U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE_QUARTER (0x00000002U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE_EIGHTH (0x00000003U) + +/*! + * Used to specify if the allocation is sparse. Applicable only in case of + * VA Space managed by OS, as in WDDM2.0 + */ +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_SPARSE 4:4 +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_SPARSE_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_SPARSE_TRUE (0x00000001U) + +/* + * NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE + * This interface will create a corresponding privileged + * kernel address space that will mirror user space allocations in this + * VASPACE. + * The user can either pass a FERMI_VASPACE_A handle or RM will use the + * vaspace associated with the client/device if hVaspace is passed as + * NULL. + * Once this property is set, the user will not be able to make allocations + * from the top most PDE of this address space. + * + * The user is expected to call this function as soon as he has created + * the device/Vaspace object. If the user has already made VA allocations + * in this vaspace then this call will return a failure + * (NV_ERR_INVALID_STATE). + * The Vaspace should have no VA allocations when this call is made. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE +*/ +#define NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE (0x801810U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS { + NvHandle hVASpace; +} NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS; + +/* + * NV0080_CTRL_DMA_SET_DEFAULT_VASPACE + * This is a special control call provided for KMD to use. + * It will associate an allocated Address Space Object as the + * default address space of the device. + * + * This is added so that the USER can move to using address space objects when they + * want to specify the size of the big page size they want to use but still want + * to use the rest of the relevant RM apis without specifying the hVASpace. + * + * This call will succeed only if there is already no VASPACE associated with the + * device. This means the user will have to call this before he has made any allocations + * on this device/address space. + * + * The hVASpace that is passed in to be associated shoould belong to the parent device that + * this call is made for. This call will fail if we try to associate a VASpace belonging to + * some other client/device. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * + */ +#define NV0080_CTRL_DMA_SET_DEFAULT_VASPACE (0x801812U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS { + NvHandle hVASpace; +} NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS; + +/*! + * NV0080_CTRL_DMA_SET_PAGE_DIRECTORY + * + * Move an existing VA space to an externally-managed top-level page directory. + * The VA space must have been created in SHARED_MANAGEMENT mode. + * For lifecycle details, see NV_VASPACE_ALLOCATION_PARAMETERS documentation in nvos.h. + * + * RM will propagate the update to all channels using the VA space. + * + * NOTE: All channels using this VA space are expected to be idle and unscheduled prior + * to and during this control call - it is responsibility of caller to ensure this. + * + * physAddress + * Physical address of the new page directory within the aperture specified by flags. + * numEntries + * Number of entries in the new page directory. + * The backing phyical memory must be at least this size (multiplied by entry size). + * flags + * APERTURE + * Specifies which physical aperture the page directory resides. + * PRESERVE_PDES + * Deprecated - RM will always copy the RM-managed PDEs from the old page directory + * to the new page directory. + * ALL_CHANNELS + * If true, RM will update the instance blocks for all channels using + * the VAS and ignore the chId parameter. + * EXTEND_VASPACE + * If true, RM will use the client VA for client VA requests in VASPACE_SHARED_MANAGEMENT mode + * If false, RM will use the internal VA for client VA requests. + * IGNORE_CHANNEL_BUSY + * If true, RM will ignore the channel busy status during set page + * directory operation. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space + * associated with the client/device pair. + * chId + * ID of the Channel to be updated. + * pasid + * PASID (Process Address Space IDentifier) of the process corresponding to + * the VA space. Ignored unless the VA space has ATS enabled. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_LIMIT + * NV_ERR_GENERIC + */ +#define NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY (0x801813U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS { + NV_DECLARE_ALIGNED(NvU64 physAddress, 8); + NvU32 numEntries; + NvU32 flags; + NvHandle hVASpace; + NvU32 chId; + NvU32 subDeviceId; // ID+1, 0 for BC + NvU32 pasid; +} NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS; + +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE 1:0 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_VIDMEM (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_COH (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_NONCOH (0x00000002U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES 2:2 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS 3:3 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY 4:4 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE 5:5 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_TRUE (0x00000001U) + +/*! + * NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY + * + * Restore an existing VA space to an RM-managed top-level page directory. + * The VA space must have been created in SHARED_MANAGEMENT mode and + * previously relocated to an externally-managed page directory with + * NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY (these two API are symmetric operations). + * For lifecycle details, see NV_VASPACE_ALLOCATION_PARAMETERS documentation in nvos.h. + * + * RM will propagate the update to all channels using the VA space. + * + * NOTE: All channels using this VA space are expected to be idle and unscheduled prior + * to and during this control call - it is responsibility of caller to ensure this. + * + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space + * associated with the client/device pair. + */ +#define NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY (0x801814U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS { + NvHandle hVASpace; + NvU32 subDeviceId; // ID+1, 0 for BC +} NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS; + + + +/* _ctrl0080dma_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h new file mode 100644 index 0000000..7fcc696 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h @@ -0,0 +1,232 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080fb.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE fb control commands and parameters */ + +/** + * NV0080_CTRL_CMD_FB_GET_CAPS + * + * This command returns the set of framebuffer capabilities for the device + * in the form of an array of unsigned bytes. Framebuffer capabilities + * include supported features and required workarounds for the framebuffer + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_FB_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the framebuffer caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + */ +#define NV0080_CTRL_CMD_FB_GET_CAPS (0x801301) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FB_INTERFACE_ID << 8) | NV0080_CTRL_FB_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FB_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_FB_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_FB_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_FB_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0080_CTRL_FB_CAPS_SUPPORT_RENDER_TO_SYSMEM 0:0x01 +#define NV0080_CTRL_FB_CAPS_BLOCKLINEAR 0:0x02 +#define NV0080_CTRL_FB_CAPS_SUPPORT_SCANOUT_FROM_SYSMEM 0:0x04 +#define NV0080_CTRL_FB_CAPS_SUPPORT_CACHED_SYSMEM 0:0x08 +#define NV0080_CTRL_FB_CAPS_SUPPORT_C24_COMPRESSION 0:0x10 // Deprecated +#define NV0080_CTRL_FB_CAPS_SUPPORT_SYSMEM_COMPRESSION 0:0x20 +#define NV0080_CTRL_FB_CAPS_NISO_CFG0_BUG_534680 0:0x40 // Deprecated +#define NV0080_CTRL_FB_CAPS_ISO_FETCH_ALIGN_BUG_561630 0:0x80 // Deprecated + +#define NV0080_CTRL_FB_CAPS_BLOCKLINEAR_GOBS_512 1:0x01 +#define NV0080_CTRL_FB_CAPS_L2_TAG_BUG_632241 1:0x02 +#define NV0080_CTRL_FB_CAPS_SINGLE_FB_UNIT 1:0x04 // Deprecated +#define NV0080_CTRL_FB_CAPS_CE_RMW_DISABLE_BUG_897745 1:0x08 // Deprecated +#define NV0080_CTRL_FB_CAPS_OS_OWNS_HEAP_NEED_ECC_SCRUB 1:0x10 +#define NV0080_CTRL_FB_CAPS_ASYNC_CE_L2_BYPASS_SET 1:0x20 // Deprecated +#define NV0080_CTRL_FB_CAPS_DISABLE_TILED_CACHING_INVALIDATES_WITH_ECC_BUG_1521641 1:0x40 + +#define NV0080_CTRL_FB_CAPS_DISABLE_MSCG_WITH_VR_BUG_1681803 2:0x01 +#define NV0080_CTRL_FB_CAPS_VIDMEM_ALLOCS_ARE_CLEARED 2:0x02 +#define NV0080_CTRL_FB_CAPS_DISABLE_PLC_GLOBALLY 2:0x04 +#define NV0080_CTRL_FB_CAPS_PLC_BUG_3046774 2:0x08 + + +/* size in bytes of fb caps table */ +#define NV0080_CTRL_FB_CAPS_TBL_SIZE 3 + + + +/*! + * NV0080_CTRL_CMD_FB_COMPBIT_STORE_GET_INFO + * + * This command returns compbit backing store-related information. + * + * size + * [out] Size of compbit store, in bytes + * address + * [out] Address of compbit store + * addressSpace + * [out] Address space of compbit store (corresponds to type NV_ADDRESS_SPACE in nvrm.h) + * maxCompbitLine + * [out] Maximum compbitline possible, determined based on size + * comptagsPerCacheLine + * [out] Number of compression tags per compression cache line, across all + * L2 slices. + * cacheLineSize + * [out] Size of compression cache line, across all L2 slices. (bytes) + * cacheLineSizePerSlice + * [out] Size of the compression cache line per slice (bytes) + * cacheLineFetchAlignment + * [out] Alignment used while fetching the compression cacheline range in FB. + * If start offset of compcacheline in FB is S and end offset is E, then + * the range to fetch to ensure entire compcacheline data is extracted is: + * (align_down(S) , align_up(E)) + * This is needed in GM20X+ because of interleaving of data in Linear FB space. + * Example - In GM204 every other 1K FB chunk of data is offset by 16K. + * backingStoreBase + * [out] Address of start of Backing Store in linear FB Physical Addr space. + * This is the actual offset in FB which HW starts using as the Backing Store and + * in general will be different from the start of the region that driver allocates + * as the backing store. This address is expected to be 2K aligned. + * gobsPerComptagPerSlice + * [out] (Only on Pascal) Number of GOBS(512 bytes of surface PA) that correspond to one 64KB comptgaline, per slice. + * One GOB stores 1 byte of compression bits. + * 0 value means this field is not applicable for the current architecture. + * backingStoreCbcBase + * [out] 2KB aligned base address of CBC (post divide address) + * comptaglineAllocationPolicy + * [out] Policy used to allocate comptagline from CBC for the device + * privRegionStartOffset + * [out] Starting offset for any priv region allocated by clients. only used by MODS + * Possible status values returned are: + * NV_OK + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO (0x801306) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FB_INTERFACE_ID << 8) | NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 Size, 8); + NV_DECLARE_ALIGNED(NvU64 Address, 8); + NvU32 AddressSpace; + NvU32 MaxCompbitLine; + NvU32 comptagsPerCacheLine; + NvU32 cacheLineSize; + NvU32 cacheLineSizePerSlice; + NvU32 cacheLineFetchAlignment; + NV_DECLARE_ALIGNED(NvU64 backingStoreBase, 8); + NvU32 gobsPerComptagPerSlice; + NvU32 backingStoreCbcBase; + NvU32 comptaglineAllocationPolicy; + NV_DECLARE_ALIGNED(NvU64 privRegionStartOffset, 8); +} NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS; + +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_ADDRESS_SPACE_UNKNOWN 0 // ADDR_UNKNOWN +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_ADDRESS_SPACE_SYSMEM 1 // ADDR_SYSMEM +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_ADDRESS_SPACE_FBMEM 2 // ADDR_FBMEM + +// Policy used to allocate comptaglines +/** + * Legacy mode allocates a comptagline for 64kb page. This mode will always allocate + * contiguous comptaglines from a ctag heap. + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_POLICY_LEGACY 0 +/** + * 1TO1 mode allocates a comptagline for 64kb page. This mode will calculate + * comptagline offset based on physical address. This mode will allocate + * contiguous comptaglines if the surface is contiguous and non-contiguous + * comptaglines for non-contiguous surfaces. + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_POLICY_1TO1 1 +/** + * 1TO4_Heap mode allocates a comptagline for 256kb page granularity. This mode + * will allocate comptagline from a heap. This mode will align the surface allocations + * to 256kb before allocating comptaglines. The comptaglines allocated will always be + * contiguous here. + * TODO: For GA10x, this mode will support < 256kb surface allocations, by sharing + * a comptagline with at most 3 different 64Kb allocations. This will result in + * miixed-contiguity config where comptaglines will be allocated contiguously as well + * as non-contiguous when shared with other allocations. + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_POLICY_1TO4 2 +/** + * Rawmode will transfer allocation of comptaglines to HW, where HW manages + * comptaglines based on physical offset. The comptaglines are cleared when SW + * issues physical/virtual scrub to the surface before reuse. + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_POLICY_RAWMODE 3 + +/** + * NV0080_CTRL_CMD_FB_GET_CAPS_V2 + * + * This command returns the same set of framebuffer capabilities for the + * device as @ref NV0080_CTRL_CMD_FB_GET_CAPS. The difference is in the structure + * NV0080_CTRL_FB_GET_CAPS_V2_PARAMS, which contains a statically sized array, + * rather than a caps table pointer and a caps table size in + * NV0080_CTRL_FB_GET_CAPS_PARAMS. + * + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the framebuffer caps bits will be written by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + */ +#define NV0080_CTRL_CMD_FB_GET_CAPS_V2 (0x801307) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FB_INTERFACE_ID << 8) | NV0080_CTRL_FB_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FB_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0080_CTRL_FB_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_FB_CAPS_TBL_SIZE]; +} NV0080_CTRL_FB_GET_CAPS_V2_PARAMS; + + + + +/* _ctrl0080fb_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h new file mode 100644 index 0000000..823cc0c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h @@ -0,0 +1,642 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080fifo.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE fifo control commands and parameters */ + +/** + * NV0080_CTRL_FIFO_GET_CAPS + * + * This command returns the set of FIFO engine capabilities for the device + * in the form of an array of unsigned bytes. FIFO capabilities + * include supported features and required workarounds for the FIFO + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_FIFO_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the framebuffer caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_FIFO_GET_CAPS (0x801701) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_FIFO_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_FIFO_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_FIFO_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0080_CTRL_FIFO_CAPS_SUPPORT_SCHED_EVENT 0:0x01 +#define NV0080_CTRL_FIFO_CAPS_SUPPORT_PCI_PB 0:0x02 +#define NV0080_CTRL_FIFO_CAPS_SUPPORT_VID_PB 0:0x04 +#define NV0080_CTRL_FIFO_CAPS_USERD_IN_SYSMEM 0:0x40 +/* do not use pipelined PTE BLITs to update PTEs: call the RM */ +#define NV0080_CTRL_FIFO_CAPS_NO_PIPELINED_PTE_BLIT 0:0x80 +#define NV0080_CTRL_FIFO_CAPS_GPU_MAP_CHANNEL 1:0x01 +#define NV0080_CTRL_FIFO_CAPS_BUFFEREDMODE_SCHEDULING 1:0x02 // Deprecated +#define NV0080_CTRL_FIFO_CAPS_WFI_BUG_898467 1:0x08 // Deprecated +#define NV0080_CTRL_FIFO_CAPS_HAS_HOST_LB_OVERFLOW_BUG_1667921 1:0x10 +/* + * To indicate Volta subcontext support with multiple VA spaces in a TSG. + * We are not using "subcontext" tag for the property, since we also use + * subcontext to represent pre-VOlta SCG feature, which only allows a single + * VA space in a TSG. + */ +#define NV0080_CTRL_FIFO_CAPS_MULTI_VAS_PER_CHANGRP 1:0x20 + + +#define NV0080_CTRL_FIFO_CAPS_SUPPORT_WDDM_INTERLEAVING 1:0x40 + +/* size in bytes of fifo caps table */ +#define NV0080_CTRL_FIFO_CAPS_TBL_SIZE 2 + +/* + * NV0080_CTRL_CMD_FIFO_ENABLE_SCHED_EVENTS + * + * This command enables the GPU to place various scheduling events in the + * off chip event buffer (with optional interrupt) for those GPUs that support + * it. + * + * record + * This parameter specifies a mask of event types to record. + * interrupt + * This parameter specifies a mask of event types for which to interrupt + * the CPU when the event occurs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_FIFO_ENABLE_SCHED_EVENTS (0x801703) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | 0x3" */ + +typedef struct NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PARAMS { + NvU32 record; + NvU32 interrupt; +} NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PARAMS; + +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_START_CTX 0:0 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_START_CTX_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_START_CTX_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_END_CTX 1:1 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_END_CTX_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_END_CTX_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_NEW_RUNLIST 2:2 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_NEW_RUNLIST_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_NEW_RUNLIST_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_SEM_ACQUIRE 3:3 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_SEM_ACQUIRE_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_SEM_ACQUIRE_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PAGE_FAULT 4:4 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PAGE_FAULT_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PAGE_FAULT_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PREEMPT 5:5 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PREEMPT_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PREEMPT_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_YIELD 6:6 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_YIELD_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_YIELD_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_IDLE_CTX 7:7 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_IDLE_CTX_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_IDLE_CTX_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_HI_PRI 8:8 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_HI_PRI_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_HI_PRI_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_ENG_STALLED 9:9 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_ENG_STALLED_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_ENG_STALLED_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_VSYNC 10:10 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_VSYNC_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_VSYNC_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_FGCS_FAULT 11:11 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_FGCS_FAULT_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_FGCS_FAULT_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_ALL 11:0 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_ALL_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_ALL_ENABLE (0x00000fff) + +/* + * NV0080_CTRL_CMD_FIFO_START_SELECTED_CHANNELS + * + * This command allows the caller to request that a set of channels + * be added to the runlist. + * + * fifoStartChannelListSize + * Size of the fifoStartChannelList. The units are in entries, not + * bytes. + * fifoStartChannelList + * This will be a list of NV0080_CTRL_FIFO_CHANNEL data structures, + * one for each channel that is to be started. + * channelHandle + * deprecated + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +/* + * hChannel + * This is the handle to the channel that is scheduled to be started. + */ +typedef struct NV0080_CTRL_FIFO_CHANNEL { + NvHandle hChannel; +} NV0080_CTRL_FIFO_CHANNEL; + +#define NV0080_CTRL_CMD_FIFO_START_SELECTED_CHANNELS (0x801705) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS { + NvU32 fifoStartChannelListCount; + NvHandle channelHandle[8]; + NV_DECLARE_ALIGNED(NvP64 fifoStartChannelList, 8); +} NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS; + +#define NV0080_CTRL_FIFO_ENGINE_ID_GRAPHICS (0x00000000) +#define NV0080_CTRL_FIFO_ENGINE_ID_MPEG (0x00000001) +#define NV0080_CTRL_FIFO_ENGINE_ID_MOTION_ESTIMATION (0x00000002) +#define NV0080_CTRL_FIFO_ENGINE_ID_VIDEO (0x00000003) +#define NV0080_CTRL_FIFO_ENGINE_ID_BITSTREAM (0x00000004) +#define NV0080_CTRL_FIFO_ENGINE_ID_ENCRYPTION (0x00000005) +#define NV0080_CTRL_FIFO_ENGINE_ID_FGT (0x00000006) + +/* + * NV0080_CTRL_CMD_FIFO_GET_ENGINE_CONTEXT_PROPERTIES + * + * This command is used to provide the caller with the alignment and size + * of the context save region for an engine + * + * engineId + * This parameter is an input parameter specifying the engineId for which + * the alignment/size is requested. + * alignment + * This parameter is an output parameter which will be filled in with the + * minimum alignment requirement. + * size + * This parameter is an output parameter which will be filled in with the + * minimum size of the context save region for the engine. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_FIFO_GET_ENGINE_CONTEXT_PROPERTIES (0x801707) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0 +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x00000019) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS { + NvU32 engineId; + NvU32 alignment; + NvU32 size; +} NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS; + +/* + * NV0080_CTRL_CMD_FIFO_RUNLIST_GROUP_CHANNELS + * + * This command allows the caller to group two sets of channels. A channel + * set includes one or more channels. After grouping, the grouped channel IDs + * are set to next to each other in the runlist. This command can be used + * several times to group more than two channels. + * + * Using a NV0080_CTRL_CMD_FIFO_RUNLIST_DIVIDE_TIMESLICE after + * NV0080_CTRL_CMD_FIFO_RUNLIST_GROUP_CHANNELS is the general usage. A + * NV0080_CTRL_CMD_FIFO_RUNLIST_GROUP_CHANNELS after a + * NV0080_CTRL_CMD_FIFO_RUNLIST_DIVIDE_TIMESLICE for a channel handle is not + * allowed. + * + * NV0080_CTRL_FIFO_RUNLIST_GROUP_MAX_CHANNELS defines the max channels in a + * group. + * + * hChannel1 + * This parameter specifies the handle of the channel that belongs to the + * base set of channels. + * hChannel2 + * This parameter specifies the handle of the channel that belongs to the + * additional set of channels. + + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_FIFO_RUNLIST_GROUP_CHANNELS (0x801709) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | 0x9" */ + +typedef struct NV0080_CTRL_FIFO_RUNLIST_GROUP_CHANNELS_PARAM { + NvHandle hChannel1; + NvHandle hChannel2; +} NV0080_CTRL_FIFO_RUNLIST_GROUP_CHANNELS_PARAM; + +#define NV0080_CTRL_FIFO_RUNLIST_GROUP_MAX_CHANNELS (8) + +/* + * NV0080_CTRL_CMD_FIFO_RUNLIST_DIVIDE_TIMESLICE + * + * This command allows the caller to divide the timeslice (DMA_TIMESLICE) of a + * channel between the channels in the group in which the channel resides. + * After applying this command, a timeslice divided channel (group) has a + * short timeslice and repeats more than once in the runlist. The total + * available execution time is not changed. + * + * Using this command after NV0080_CTRL_CMD_FIFO_RUNLIST_GROUP_CHANNELS is the + * general usage. A NV0080_CTRL_CMD_FIFO_RUNLIST_GROUP_CHANNELS after a + * NV0080_CTRL_CMD_FIFO_RUNLIST_DIVIDE_TIMESLICE for a channel handle is not + * allowed. + * + * hChannel + * This parameter specifies the handle of the channel for the channel + * group to which the divided timeslice operation will apply. + * tsDivisor + * This parameter specifies the timeslice divisor value. This value + * should not exceed NV0080_CTRL_FIFO_RUNLIST_MAX_TIMESLICE_DIVISOR + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_RESOURCES + */ +#define NV0080_CTRL_CMD_FIFO_RUNLIST_DIVIDE_TIMESLICE (0x80170b) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | 0xB" */ + +typedef struct NV0080_CTRL_FIFO_RUNLIST_DIVIDE_TIMESLICE_PARAM { + NvHandle hChannel; + NvU32 tsDivisor; +} NV0080_CTRL_FIFO_RUNLIST_DIVIDE_TIMESLICE_PARAM; + +#define NV0080_CTRL_FIFO_RUNLIST_MAX_TIMESLICE_DIVISOR (12) + +/* + * NV0080_CTRL_CMD_FIFO_PREEMPT_RUNLIST + * + * This command preepmts the engine represented by the specified runlist. + * + * hRunlist + * This parameter specifies the per engine runlist handle. This + * parameter is being retained to maintain backwards compatibility + * with clients that have not transitioned over to using runlists + * on a per subdevice basis. + * + * engineID + * This parameter specifies the engine to be preempted. Engine defines + * can be found in cl2080.h. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_FIFO_PREEMPT_RUNLIST (0x80170c) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | 0xC" */ + +typedef struct NV0080_CTRL_FIFO_PREEMPT_RUNLIST_PARAMS { + NvHandle hRunlist; + NvU32 engineID; +} NV0080_CTRL_FIFO_PREEMPT_RUNLIST_PARAMS; + + +/* + * NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST + * + * Takes a list of hChannels as input and returns the + * corresponding Channel IDs that they corresponding to + * on hw. + * + * numChannels + * Size of input hChannellist + * pChannelHandleList + * List of input channel handles + * pChannelList + * List of Channel ID's corresponding to the + * each entry in the hChannelList. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST (0x80170d) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS { + NvU32 numChannels; + NV_DECLARE_ALIGNED(NvP64 pChannelHandleList, 8); + NV_DECLARE_ALIGNED(NvP64 pChannelList, 8); +} NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS; + + +/* + * NV0080_CTRL_CMD_FIFO_GET_LATENCY_BUFFER_SIZE + * + * This control call is used to return the number of gp methods(gpsize) and push buffer methods(pbsize) + * allocated to each engine. + * + *engineID + * The engine ID which is an input + * + *gpEntries + * number of gp entries + * + *pbEntries + * number of pb entries (in units of 32B rows) + * + */ + + +#define NV0080_CTRL_CMD_FIFO_GET_LATENCY_BUFFER_SIZE (0x80170e) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS { + NvU32 engineID; + NvU32 gpEntries; + NvU32 pbEntries; +} NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS; + +#define NV0080_CTRL_FIFO_GET_CHANNELLIST_INVALID_CHANNEL (0xffffffff) + +/* + * NV0080_CTRL_CMD_FIFO_SET_CHANNEL_PROPERTIES + * + * This command allows internal properties of the channel + * to be modified even when the channel is active. Most of these properties + * are not meant to be modified during normal runs hence have been + * kept separate from channel alloc params. It is the + * responsibility of the underlying hal routine to make + * sure the channel properties are changed while the channel + * is *NOT* in a transient state. + * + * hChannel + * The handle to the channel. + * + * property + * The channel property to be modified. + * NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_xxx provides the entire list + * of properties. + * + * value + * The new value for the property. + * When property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_ENGINETIMESLICEINMICROSECONDS + * value = timeslice in microseconds + * desc: Used to change a channel's engine timeslice in microseconds + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PBDMATIMESLICEINMICROSECONDS + * value = timeslice in microseconds + * desc: Used to change a channel's pbdma timeslice in microseconds + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_ENGINETIMESLICEDISABLE + * value is ignored + * desc: Disables a channel from being timesliced out from an engine. + * Other scheduling events like explicit yield, acquire failures will + * switch out the channel though. + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PBDMATIMESLICEDISABLE + * value is ignored + * desc: Disables a channel from being timesliced out from its pbdma. + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_INVALIDATE_PDB_TARGET + * value is ignored + * desc: Override the channel's page directory pointer table with an + * erroneous aperture value. (TODO: make test calls NV_VERIF_FEATURES + * only)(VERIF ONLY) + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_RESETENGINECONTEXT + * value = engineID of engine that will have its context pointer reset. + * engineID defines can be found in cl2080.h + * (e.g., NV2080_ENGINE_TYPE_GRAPHICS) + * desc: Override the channel's engine context pointer with a non existent + * buffer forcing it to fault. (VERIF ONLY) + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_RESETENGINECONTEXT_NOPREEMPT + * value = engineID of engine that will have its context pointer reset. + * engineID defines can be found in cl2080.h + * (e.g., NV2080_ENGINE_TYPE_GRAPHICS) + * desc: Override the channel's engine context pointer with a non existent + * buffer forcing it to fault. However the channel will not be preempted + * before having its channel state modified.(VERIF ONLY) + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_NOOP + * value is ignored + * desc: does not change any channel state exercises a full channel preempt/ + * unbind/bind op. (VERIF ONLY) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0080_CTRL_CMD_FIFO_SET_CHANNEL_PROPERTIES (0x80170f) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS { + NvHandle hChannel; + NvU32 property; + NV_DECLARE_ALIGNED(NvU64 value, 8); +} NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS; + +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_ENGINETIMESLICEINMICROSECONDS (0x00000000) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PBDMATIMESLICEINMICROSECONDS (0x00000001) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_ENGINETIMESLICEDISABLE (0x00000002) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PBDMATIMESLICEDISABLE (0x00000003) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_INVALIDATE_PDB_TARGET (0x00000004) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_RESETENGINECONTEXT (0x00000005) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_NOOP (0x00000007) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_RESETENGINECONTEXT_NOPREEMPT (0x00000008) + + + +/* + * NV0080_CTRL_CMD_FIFO_STOP_RUNLIST + * + * Stops all processing on the runlist for the given engine. This is only + * valid in per-engine round-robin scheduling mode. + * + * engineID + * This parameter specifies the engine to be stopped. Engine defines + * can be found in cl2080.h. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0080_CTRL_CMD_FIFO_STOP_RUNLIST (0x801711) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS { + NvU32 engineID; +} NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS; + +/* + * NV0080_CTRL_CMD_FIFO_START_RUNLIST + * + * Restarts a runlist previously stopped with NV0080_CTRL_CMD_FIFO_STOP_RUNLIST. + * This is only valid for per-engine round-robin mode. + * + * engineID + * This parameter specifies the engine to be started. Engine defines + * can be found in cl2080.h. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0080_CTRL_CMD_FIFO_START_RUNLIST (0x801712) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_START_RUNLIST_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_START_RUNLIST_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV0080_CTRL_FIFO_START_RUNLIST_PARAMS { + NvU32 engineID; +} NV0080_CTRL_FIFO_START_RUNLIST_PARAMS; + +/** + * NV0080_CTRL_FIFO_GET_CAPS_V2 + * + * This command returns the same set of FIFO engine capabilities for the device + * as @ref NV0080_CTRL_FIFO_GET_CAPS. The difference is in the structure + * NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS, which contains a statically sized array, + * rather than a caps table pointer and a caps table size in + * NV0080_CTRL_FIFO_GET_CAPS_PARAMS. + * + * capsTbl + * This parameter is an array of the client's caps table buffer. + * The framebuffer caps bits will be written by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_FIFO_GET_CAPS_V2 (0x801713) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_FIFO_CAPS_TBL_SIZE]; +} NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS; + +/** + * NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS + * + * @brief This command idles (deschedules and waits for pending work to complete) channels + * belonging to a particular device. + * + * numChannels + * Number of channels to idle + * + * hChannels + * Array of channel handles to idle + * + * flags + * NVOS30_FLAGS that control aspects of how the channel is idled + * + * timeout + * GPU timeout in microseconds, for each CHID Manager's idling operation + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_TIMEOUT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_LOCK_STATE + */ +#define NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS (0x801714) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS_MESSAGE_ID" */ +#define NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS_MAX_CHANNELS 4096 + +#define NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS { + NvU32 numChannels; + NvHandle hChannels[NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS_MAX_CHANNELS]; + NvU32 flags; + NvU32 timeout; +} NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS; + +/* _ctrl0080fifo_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h new file mode 100644 index 0000000..58f2953 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h @@ -0,0 +1,585 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080gpu.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" +#include "nvlimits.h" + + +/* NV01_DEVICE_XX/NV03_DEVICE gpu control commands and parameters */ + +/* + * NV0080_CTRL_CMD_GPU_GET_CLASSLIST + * + * This command returns supported class information for the specified device. + * If the device is comprised of more than one GPU, the class list represents + * the set of supported classes common to all GPUs within the device. + * + * It has two modes: + * + * If the classList pointer is NULL, then this command returns the number + * of classes supported by the device in the numClasses field. The value + * should then be used by the client to allocate a classList buffer + * large enough to hold one 32bit value per numClasses entry. + * + * If the classList pointer is non-NULL, then this command returns the + * set of supported class numbers in the specified buffer. + * + * numClasses + * If classList is NULL, then this parameter will return the + * number of classes supported by the device. If classList is non-NULL, + * then this parameter indicates the number of entries in classList. + * classList + * This parameter specifies a pointer to the client's buffer into + * which the supported class numbers should be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0080_CTRL_CMD_GPU_GET_CLASSLIST (0x800201) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS { + NvU32 numClasses; + NV_DECLARE_ALIGNED(NvP64 classList, 8); +} NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS; + +/** + * NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES + * + * This command returns the number of subdevices for the device. + * + * numSubDevices + * This parameter returns the number of subdevices within the device. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES (0x800280) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS { + NvU32 numSubDevices; +} NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_GET_VIDLINK_ORDER + * + * This command returns the video link order of each subdevice id inside the + * device. This call can only be made after SLI is enabled. This call is + * intended for 3D clients to use to determine the vidlink order of the + * devices. The Display Output Parent will always be the first subdevice + * mask listed in the array. Note that this command should not be used in + * case of bridgeless SLI. The order of the subdevices returned by this + * command will not be correct in case of bridgeless SLI. + * + * ConnectionCount + * Each HW can provide 1 or 2 links between all GPUs in a device. This + * number tells how many links are available between GPUs. This data + * also represents the number of concurrent SLI heads that can run at + * the same time over this one device. + * + * Order + * This array returns the order of subdevices that are used through + * the vidlink for display output. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0080_CTRL_CMD_GPU_GET_VIDLINK_ORDER (0x800281) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS { + NvU32 ConnectionCount; + NvU32 Order[NV_MAX_SUBDEVICES]; +} NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_SET_DISPLAY_OWNER + * + * This command sets display ownership within the device to the specified + * subdevice instance. The actual transfer of display ownership will take + * place at the next modeset. + * + * subDeviceInstance + * This member specifies the subdevice instance of the new display + * owner. The subdevice instance must be in the legal range + * indicated by the NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0080_CTRL_CMD_GPU_SET_DISPLAY_OWNER (0x800282) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_SET_DISPLAY_OWNER_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_SET_DISPLAY_OWNER_PARAMS_MESSAGE_ID (0x82U) + +typedef struct NV0080_CTRL_GPU_SET_DISPLAY_OWNER_PARAMS { + NvU32 subDeviceInstance; +} NV0080_CTRL_GPU_SET_DISPLAY_OWNER_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_GET_DISPLAY_OWNER + * + * This command returns the subdevice instance of the current display owner + * within the device. + * + * subDeviceInstance + * This member returns the subdevice instance of the current display + * owner. The subdevice instance will be in the legal range + * indicated by the NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0080_CTRL_CMD_GPU_GET_DISPLAY_OWNER (0x800283) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_DISPLAY_OWNER_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_DISPLAY_OWNER_PARAMS_MESSAGE_ID (0x83U) + +typedef struct NV0080_CTRL_GPU_GET_DISPLAY_OWNER_PARAMS { + NvU32 subDeviceInstance; +} NV0080_CTRL_GPU_GET_DISPLAY_OWNER_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_SET_VIDLINK + * + * This command enables or disables the VIDLINK of all subdevices in the + * current SLI configuration. + * + * enable + * Enables or disables the vidlink + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_GPU_SET_VIDLINK (0x800285) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_SET_VIDLINK_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_SET_VIDLINK_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV0080_CTRL_GPU_SET_VIDLINK_PARAMS { + NvU32 enable; +} NV0080_CTRL_GPU_SET_VIDLINK_PARAMS; + +#define NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_FALSE (0x00000000) +#define NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_TRUE (0x00000001) + +/* commands */ +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_GET_STATUS 0 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_POWERDOWN 1 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_POWERUP 2 + +/* status */ +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_STATUS_POWER_ON 0 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_STATUS_POWERING_DOWN 1 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_STATUS_GATED 2 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_STATUS_POWERING_UP 3 + +/* + * NV0080_CTRL_CMD_GPU_MODIFY_SW_STATE_PERSISTENCE + * + * This command is used to enable or disable the persistence of a GPU's + * software state when no clients exist. With persistent software state enabled + * the GPU's software state is not torn down when the last client exits, but is + * retained until either the kernel module unloads or persistent software state + * is disabled. + * + * newState + * This input parameter is used to enable or disable the persistence of the + * software state of all subdevices within the device. + * Possible values are: + * NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED + * NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_GPU_MODIFY_SW_STATE_PERSISTENCE (0x800287) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS_MESSAGE_ID" */ + +/* Possible values of persistentSwState */ +#define NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED (0x00000000) +#define NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED (0x00000001) + +#define NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS_MESSAGE_ID (0x87U) + +typedef struct NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS { + NvU32 newState; +} NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_QUERY_SW_STATE_PERSISTENCE + * + * swStatePersistence + * This parameter returns a value indicating if persistent software + * state is currently enabled or not for the specified GPU. See the + * description of NV0080_CTRL_CMD_GPU_MODIFY_SW_STATE_PERSISTENCE. + * Possible values are: + * NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED + * NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_GPU_QUERY_SW_STATE_PERSISTENCE (0x800288) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS { + NvU32 swStatePersistence; +} NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS; + +/** + * NV0080_CTRL_CMD_GPU_GET_VIRTUALIZATION_MODE + * + * This command returns a value indicating virtualization mode in + * which the GPU is running. + * + * virtualizationMode + * This parameter returns the virtualization mode of the device. + * Possible values are: + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NONE + * This value indicates that there is no virtualization mode associated with the + * device (i.e. it's a baremetal GPU). + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NMOS + * This value indicates that the device is associated with the NMOS. + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_VGX + * This value indicates that the device is associated with VGX(guest GPU). + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VGPU + * This value indicates that the device is associated with vGPU(host GPU). + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VSGA + * This value indicates that the device is associated with vSGA(host GPU). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_GPU_GET_VIRTUALIZATION_MODE (0x800289) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NONE (0x00000000) +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NMOS (0x00000001) +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_VGX (0x00000002) +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST (0x00000003) +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VGPU NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VSGA (0x00000004) + +#define NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS { + NvU32 virtualizationMode; +} NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS; + + + +/* + * NV0080_CTRL_CMD_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE + * + * This command returns the setting information for sparse texture compute + * mode optimization on the associated GPU. This setting indicates how the + * large page size should be selected by the RM for the GPU. + * + * defaultSetting + * This field specifies what the OS default setting is for the associated + * GPU. See NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE for a list + * of possible values. + * currentSetting + * This field specifies which optimization mode was applied when the + * driver was loaded. See + * NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE for a list of + * possible values. + * pendingSetting + * This field specifies which optimization mode will be applied on the + * next driver reload. See + * NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE for a list of + * possible values. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0080_CTRL_CMD_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE (0x80028c) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS_MESSAGE_ID (0x8CU) + +typedef struct NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS { + NvU32 defaultSetting; + NvU32 currentSetting; + NvU32 pendingSetting; +} NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE + * + * This command sets the pending setting for sparse texture compute mode. This + * setting indicates how the large page size should be selected by the RM for + * the GPU on the next driver reload. + * + * setting + * This field specifies which use case the RM should optimize the large + * page size for on the next driver reload. Possible values for this + * field are: + * NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_DEFAULT + * This value indicates that the RM should use the default setting for + * the GPU's large page size. The default setting is reported by + * NV0080_CTRL_CMD_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE. + * NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_COMPUTE + * This value indicates that the RM should select the GPU's large page + * size to optimize for compute use cases. + * NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_SPARSE_TEXTURE + * This value indicates that the RM should select the GPU's large page + * size to optimize for sparse texture use cases. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +#define NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE (0x80028d) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS_MESSAGE_ID (0x8DU) + +typedef struct NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS { + NvU32 setting; +} NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS; + +/* Possible sparse texture compute mode setting values */ +#define NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_DEFAULT 0 +#define NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_COMPUTE 1 +#define NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_SPARSE_TEXTURE 2 + +/* + * NV0080_CTRL_CMD_GPU_GET_VGX_CAPS + * + * This command gets the VGX capability of the GPU depending on the status of + * the VGX hardware fuse. + * + * isVgx + * This field is set to NV_TRUE is VGX fuse is enabled for the GPU otherwise + * it is set to NV_FALSE. + * + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_GPU_GET_VGX_CAPS (0x80028e) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS_MESSAGE_ID (0x8EU) + +typedef struct NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS { + NvBool isVgx; +} NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS; + + + +/* + * NV0080_CTRL_CMD_GPU_GET_SRIOV_CAPS + * + * This command is used to query GPU SRIOV capabilities + * totalVFs + * Total number of virtual functions supported. + * + * firstVfOffset + * Offset of the first VF. + * + * vfFeatureMask + * Bitmask of features managed by the guest + * + * FirstVFBar0Address + * Address of BAR0 region of first VF. + * + * FirstVFBar1Address + * Address of BAR1 region of first VF. + * + * FirstVFBar2Address + * Address of BAR2 region of first VF. + * + * bar0Size + * Size of BAR0 region on VF. + * + * bar1Size + * Size of BAR1 region on VF. + * + * bar2Size + * Size of BAR2 region on VF. + * + * b64bitBar0 + * If the VF BAR0 is 64-bit addressable. + * + * b64bitBar1 + * If the VF BAR1 is 64-bit addressable. + * + * b64bitBar2 + * If the VF BAR2 is 64-bit addressable. + * + * bSriovEnabled + * Flag for SR-IOV enabled or not. + * + * bSriovHeavyEnabled + * Flag for whether SR-IOV is enabled in standard or heavy mode. + * + * bEmulateVFBar0TlbInvalidationRegister + * Flag for whether VF's TLB Invalidate Register region needs emulation. + * + * bClientRmAllocatedCtxBuffer + * Flag for whether engine ctx buffer is managed by client RM. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_GPU_GET_SRIOV_CAPS (0x800291) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS_MESSAGE_ID (0x91U) + +typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS { + NvU32 totalVFs; + NvU32 firstVfOffset; + NvU32 vfFeatureMask; + NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8); + NV_DECLARE_ALIGNED(NvU64 bar0Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar1Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar2Size, 8); + NvBool b64bitBar0; + NvBool b64bitBar1; + NvBool b64bitBar2; + NvBool bSriovEnabled; + NvBool bSriovHeavyEnabled; + NvBool bEmulateVFBar0TlbInvalidationRegister; + NvBool bClientRmAllocatedCtxBuffer; +} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS; + + +// Update this macro if new HW exceeds GPU Classlist MAX_SIZE +#define NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE 116 + +#define NV0080_CTRL_CMD_GPU_GET_CLASSLIST_V2 (0x800292) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS_MESSAGE_ID (0x92U) + +typedef struct NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS { + NvU32 numClasses; // __OUT__ + NvU32 classList[NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE]; // __OUT__ +} NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_FIND_SUBDEVICE_HANDLE + * + * Find a subdevice handle allocated under this device + */ +#define NV0080_CTRL_CMD_GPU_FIND_SUBDEVICE_HANDLE (0x800293) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM_MESSAGE_ID (0x93U) + +typedef struct NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM { + NvU32 subDeviceInst; // [in] + NvHandle hSubDevice; // [out] +} NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM; + +/* + * NV0080_CTRL_CMD_GPU_GET_BRAND_CAPS + * + * This command gets branding information for the device. + * + * brands + * Mask containing branding information. A bit in this + * mask is set if the GPU has particular branding. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0080_CTRL_GPU_GET_BRAND_CAPS_QUADRO NVBIT(0) +#define NV0080_CTRL_GPU_GET_BRAND_CAPS_NVS NVBIT(1) +#define NV0080_CTRL_GPU_GET_BRAND_CAPS_TITAN NVBIT(2) + +#define NV0080_CTRL_CMD_GPU_GET_BRAND_CAPS (0x800294) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS_MESSAGE_ID (0x94U) + +typedef struct NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS { + NvU32 brands; +} NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS; + +/* + * These are the per-VF BAR1 sizes that we support in MB. + * They are used with the NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE control call and + * should match the NV_XVE_BAR1_CONFIG_SIZE register defines. + */ +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_64M 64 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_128M 128 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_256M 256 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_512M 512 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_1G 1024 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_2G 2048 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_4G 4096 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_8G 8192 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_16G 16384 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_32G 32768 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_64G 65536 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_128G 131072 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_MIN NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_64M +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_MAX NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_128G + +#define NV0080_CTRL_GPU_VGPU_NUM_VFS_INVALID NV_U32_MAX + +/* + * NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE + * + * @brief Resize BAR1 per-VF on the given GPU + * vfBar1SizeMB[in] size of per-VF BAR1 size in MB + * numVfs[out] number of VFs that can be created given the new BAR1 size + */ +#define NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE (0x800296) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS_MESSAGE_ID (0x96U) + +typedef struct NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS { + NvU32 vfBar1SizeMB; + NvU32 numVfs; +} NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS; + +/* _ctrl0080gpu_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h new file mode 100644 index 0000000..fe7c63a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h @@ -0,0 +1,274 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080gr.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +typedef struct NV0080_CTRL_GR_ROUTE_INFO { + NvU32 flags; + NV_DECLARE_ALIGNED(NvU64 route, 8); +} NV0080_CTRL_GR_ROUTE_INFO; + +/* NV01_DEVICE_XX/NV03_DEVICE gr engine control commands and parameters */ + +/** + * NV0080_CTRL_CMD_GR_GET_CAPS + * + * This command returns the set of graphics capabilities for the device + * in the form of an array of unsigned bytes. Graphics capabilities + * include supported features and required workarounds for the graphics + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_GR_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the graphics caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + */ +#define NV0080_CTRL_CMD_GR_GET_CAPS (0x801102) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GR_GET_CAPS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_GR_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_GR_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_GR_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + + + +/* + * Size in bytes of gr caps table. This value should be one greater + * than the largest byte_index value above. + */ +#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23 + + + +/* + * NV0080_CTRL_CMD_GR_INFO + * + * This structure represents a single 32bit graphics engine value. Clients + * request a particular graphics engine value by specifying a unique bus + * information index. + * + * Legal graphics information index values are: + * NV0080_CTRL_GR_INFO_INDEX_MAXCLIPS + * This index is used to request the number of clip IDs supported by + * the device. + * NV0080_CTRL_GR_INFO_INDEX_MIN_ATTRS_BUG_261894 + * This index is used to request the minimum number of attributes that + * need to be enabled to avoid bug 261894. A return value of 0 + * indicates that there is no minimum and the bug is not present on this + * system. + */ +typedef struct NV0080_CTRL_GR_INFO { + NvU32 index; + NvU32 data; +} NV0080_CTRL_GR_INFO; + +/* valid graphics info index values */ +#define NV0080_CTRL_GR_INFO_INDEX_MAXCLIPS (0x00000000) +#define NV0080_CTRL_GR_INFO_INDEX_MIN_ATTRS_BUG_261894 (0x00000001) +#define NV0080_CTRL_GR_INFO_XBUF_MAX_PSETS_PER_BANK (0x00000002) +#define NV0080_CTRL_GR_INFO_INDEX_BUFFER_ALIGNMENT (0x00000003) +#define NV0080_CTRL_GR_INFO_INDEX_SWIZZLE_ALIGNMENT (0x00000004) +#define NV0080_CTRL_GR_INFO_INDEX_VERTEX_CACHE_SIZE (0x00000005) +#define NV0080_CTRL_GR_INFO_INDEX_VPE_COUNT (0x00000006) +#define NV0080_CTRL_GR_INFO_INDEX_SHADER_PIPE_COUNT (0x00000007) +#define NV0080_CTRL_GR_INFO_INDEX_THREAD_STACK_SCALING_FACTOR (0x00000008) +#define NV0080_CTRL_GR_INFO_INDEX_SHADER_PIPE_SUB_COUNT (0x00000009) +#define NV0080_CTRL_GR_INFO_INDEX_SM_REG_BANK_COUNT (0x0000000A) +#define NV0080_CTRL_GR_INFO_INDEX_SM_REG_BANK_REG_COUNT (0x0000000B) +#define NV0080_CTRL_GR_INFO_INDEX_SM_VERSION (0x0000000C) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_WARPS_PER_SM (0x0000000D) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_THREADS_PER_WARP (0x0000000E) +#define NV0080_CTRL_GR_INFO_INDEX_GEOM_GS_OBUF_ENTRIES (0x0000000F) +#define NV0080_CTRL_GR_INFO_INDEX_GEOM_XBUF_ENTRIES (0x00000010) +#define NV0080_CTRL_GR_INFO_INDEX_FB_MEMORY_REQUEST_GRANULARITY (0x00000011) +#define NV0080_CTRL_GR_INFO_INDEX_HOST_MEMORY_REQUEST_GRANULARITY (0x00000012) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_SP_PER_SM (0x00000013) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS (0x00000014) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPS (0x00000015) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_ZCULL_BANKS (0x00000016) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPC_PER_GPC (0x00000017) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MIN_FBPS (0x00000018) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_FBP_PORTS (0x00000019) +#define NV0080_CTRL_GR_INFO_INDEX_TIMESLICE_ENABLED (0x0000001A) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPAS (0x0000001B) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_PES_PER_GPC (0x0000001C) +#define NV0080_CTRL_GR_INFO_INDEX_GPU_CORE_COUNT (0x0000001D) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPCS_PER_PES (0x0000001E) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_HUB_PORTS (0x0000001F) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_SM_PER_TPC (0x00000020) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_HSHUB_FBP_PORTS (0x00000021) +#define NV0080_CTRL_GR_INFO_INDEX_RT_CORE_COUNT (0x00000022) +#define NV0080_CTRL_GR_INFO_INDEX_TENSOR_CORE_COUNT (0x00000023) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GRS (0x00000024) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTCS (0x00000025) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_SLICES (0x00000026) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCMMU_PER_GPC (0x00000027) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_PER_FBP (0x00000028) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_ROP_PER_GPC (0x00000029) +#define NV0080_CTRL_GR_INFO_INDEX_FAMILY_MAX_TPC_PER_GPC (0x0000002A) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPA_PER_FBP (0x0000002B) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_SUBCONTEXT_COUNT (0x0000002C) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_LEGACY_SUBCONTEXT_COUNT (0x0000002D) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_PER_ENGINE_SUBCONTEXT_COUNT (0x0000002E) + + + +/* When adding a new INDEX, please update MAX_SIZE accordingly + * NOTE: 0080 functionality is merged with 2080 functionality, so this max size + * reflects that. + */ +#define NV0080_CTRL_GR_INFO_INDEX_MAX (0x00000031) +#define NV0080_CTRL_GR_INFO_MAX_SIZE (0x32) /* finn: Evaluated from "(NV0080_CTRL_GR_INFO_INDEX_MAX + 1)" */ + +/* + * NV0080_CTRL_CMD_GR_GET_INFO + * + * This command returns graphics engine information for the associate GPU. + * Request to retrieve graphics information use a list of one or more + * NV0080_CTRL_GR_INFO structures. + * + * grInfoListSize + * This field specifies the number of entries on the caller's + * grInfoList. + * grInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the bus information is to be returned. + * This buffer must be at least as big as grInfoListSize multiplied + * by the size of the NV0080_CTRL_GR_INFO structure. + */ +#define NV0080_CTRL_CMD_GR_GET_INFO (0x801104) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GR_GET_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0080_CTRL_GR_GET_INFO_PARAMS { + NvU32 grInfoListSize; + NV_DECLARE_ALIGNED(NvP64 grInfoList, 8); +} NV0080_CTRL_GR_GET_INFO_PARAMS; + +/* + * NV0080_CTRL_CMD_GR_GET_TPC_PARTITION_MODE + * This command gets the current partition mode of a TSG context. + * + * NV0080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE + * This command sets the partition mode of a TSG context. + * + * NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS + * This structure defines the parameters used for TPC partitioning mode SET/GET commands + * + * hChannelGroup [IN] + * RM Handle to the TSG + * + * mode [IN/OUT] + * Partitioning mode enum value + * For the SET cmd, this is an input parameter + * For the GET cmd, this is an output parameter + * + * bEnableAllTpcs [IN] + * Flag to enable all TPCs by default + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + */ +#define NV0080_CTRL_CMD_GR_GET_TPC_PARTITION_MODE (0x801107) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | 0x7" */ + +#define NV0080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE (0x801108) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | 0x8" */ + +/* Enum for listing TPC partitioning modes */ +typedef enum NV0080_CTRL_GR_TPC_PARTITION_MODE { + NV0080_CTRL_GR_TPC_PARTITION_MODE_NONE = 0, + NV0080_CTRL_GR_TPC_PARTITION_MODE_STATIC = 1, + NV0080_CTRL_GR_TPC_PARTITION_MODE_DYNAMIC = 2, +} NV0080_CTRL_GR_TPC_PARTITION_MODE; + +typedef struct NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS { + NvHandle hChannelGroup; // [in] + NV0080_CTRL_GR_TPC_PARTITION_MODE mode; // [in/out] + NvBool bEnableAllTpcs; // [in/out] + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); // [in] +} NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS; + +/** + * NV0080_CTRL_CMD_GR_GET_CAPS_V2 + * + * This command returns the same set of graphics capabilities for the device + * as @ref NV0080_CTRL_CMD_GR_GET_CAPS. The difference is in the structure + * NV0080_CTRL_GR_GET_INFO_V2_PARAMS, which contains a statically sized array, + * rather than a caps table pointer and a caps table size in + * NV0080_CTRL_GR_GET_INFO_PARAMS. Additionally, + * NV0080_CTRL_GR_GET_INFO_V2_PARAMS contains a parameter for specifying routing + * information, used for MIG. + * + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the graphics caps bits will be written by the RM. + * The caps table is an array of unsigned bytes. + * + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * bCapsPopulated + * This parameter indicates that the capsTbl has been partially populated by + * previous calls to NV0080_CTRL_CMD_GR_GET_CAPS_V2 on other subdevices. + */ +#define NV0080_CTRL_CMD_GR_GET_CAPS_V2 (0x801109) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GR_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV0080_CTRL_GR_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_GR_CAPS_TBL_SIZE]; + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvBool bCapsPopulated; +} NV0080_CTRL_GR_GET_CAPS_V2_PARAMS; + +#define NV0080_CTRL_CMD_GR_GET_INFO_V2 (0x801110) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GR_GET_INFO_V2_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0080_CTRL_GR_GET_INFO_V2_PARAMS { + NvU32 grInfoListSize; + NV0080_CTRL_GR_INFO grInfoList[NV0080_CTRL_GR_INFO_MAX_SIZE]; + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV0080_CTRL_GR_GET_INFO_V2_PARAMS; + +/* _ctrl0080gr_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h new file mode 100644 index 0000000..096c451 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080host.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE host control commands and parameters */ + +/* + * NV0080_CTRL_CMD_HOST_GET_CAPS + * + * This command returns the set of host capabilities for the device + * in the form of an array of unsigned bytes. Host capabilities + * include supported features and required workarounds for the host-related + * engine(s) within the device, each represented by a byte offset into + * the table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_HOST_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the host caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + */ +#define NV0080_CTRL_CMD_HOST_GET_CAPS (0x801401) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_HOST_INTERFACE_ID << 8) | NV0080_CTRL_HOST_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_HOST_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_HOST_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_HOST_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_HOST_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0080_CTRL_HOST_CAPS_SEMA_ACQUIRE_BUG_105665 0:0x01 +#define NV0080_CTRL_HOST_CAPS_DUP_CMPLT_BUG_126020 0:0x02 +/* + * This bit indicates whether CPU mappings obtained with NvRmMapMemory() are + * coherent with the GPU. When this bit is _not_ set, all mappings are to the + * "raw" memory; i.e., they behave as it the NVOS33_FLAGS_MAPPING_DIRECT flag + * were used on a sysmem mapping. + */ +#define NV0080_CTRL_HOST_CAPS_GPU_COHERENT_MAPPING_SUPPORTED 0:0x04 +#define NV0080_CTRL_HOST_CAPS_SYS_SEMA_DEADLOCK_BUG_148216 0:0x08 +#define NV0080_CTRL_HOST_CAPS_SLOWSLI 0:0x10 +#define NV0080_CTRL_HOST_CAPS_SEMA_READ_ONLY_BUG 0:0x20 +#define NV0080_CTRL_HOST_CAPS_LARGE_NONCOH_UPSTR_WRITE_BUG_114871 0:0x40 +#define NV0080_CTRL_HOST_CAPS_LARGE_UPSTREAM_WRITE_BUG_115115 0:0x80 +#define NV0080_CTRL_HOST_CAPS_SEP_VIDMEM_PB_NOTIFIERS_BUG_83923 1:0x02 +#define NV0080_CTRL_HOST_CAPS_P2P_4_WAY 1:0x08 // Deprecated +#define NV0080_CTRL_HOST_CAPS_P2P_8_WAY 1:0x10 // Deprecated +#define NV0080_CTRL_HOST_CAPS_P2P_DEADLOCK_BUG_203825 1:0x20 // Deprecated +#define NV0080_CTRL_HOST_CAPS_VIRTUAL_P2P 1:0x40 +#define NV0080_CTRL_HOST_CAPS_BUG_254580 1:0x80 +#define NV0080_CTRL_HOST_CAPS_COMPRESSED_BL_P2P_BUG_257072 2:0x02 // Deprecated +#define NV0080_CTRL_HOST_CAPS_CROSS_BLITS_BUG_270260 2:0x04 // Deprecated +/* unused 2:0x08 */ +#define NV0080_CTRL_HOST_CAPS_MEM2MEM_BUG_365782 2:0x10 +#define NV0080_CTRL_HOST_CAPS_CPU_WRITE_WAR_BUG_420495 2:0x20 +#define NV0080_CTRL_HOST_CAPS_EXPLICIT_CACHE_FLUSH_REQD 2:0x40 +#define NV0080_CTRL_HOST_CAPS_BAR1_READ_DEADLOCK_BUG_511418 2:0x80 // Deprecated + +/* size in bytes of host caps table */ +#define NV0080_CTRL_HOST_CAPS_TBL_SIZE 3 + +#define NV0080_CTRL_CMD_HOST_GET_CAPS_V2 (0x801402) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_HOST_INTERFACE_ID << 8) | NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_HOST_CAPS_TBL_SIZE]; +} NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS; + +/* _ctrl0080host_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h new file mode 100644 index 0000000..8607cc2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h @@ -0,0 +1,103 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080internal.finn +// + +#include "nvlimits.h" +#include "ctrl0080gr.h" +#include "ctrl/ctrl0080/ctrl0080base.h" +#include "ctrl/ctrl0080/ctrl0080perf.h" + + + + +/*! + * @ref NV0080_CTRL_CMD_GR_GET_TPC_PARTITION_MODE + */ +#define NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE (0x802002) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID" */ + + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE_FINN_PARAMS { + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS params, 8); +} NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE_FINN_PARAMS; + + +/*! + * @ref NV0080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE + */ +#define NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE (0x802003) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID" */ + + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS { + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS params, 8); +} NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS; + + +/*! + * @ref NV0080_CTRL_CMD_PERF_CUDA_LIMIT_SET_CONTROL + */ +#define NV0080_CTRL_CMD_INTERNAL_PERF_CUDA_LIMIT_SET_CONTROL (0x802009) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS_MESSAGE_ID" */ + + +/*! + * This command disables cuda limit activation at teardown of the client. + */ +#define NV0080_CTRL_CMD_INTERNAL_PERF_CUDA_LIMIT_DISABLE (0x802004) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x4" */ + +/*! + * @ref NV0080_CTRL_CMD_PERF_SLI_GPU_BOOST_SYNC_SET_CONTROL + */ +#define NV0080_CTRL_CMD_INTERNAL_PERF_SLI_GPU_BOOST_SYNC_SET_CONTROL (0x802007) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_PERF_SLI_GPU_BOOST_SYNC_CONTROL_PARAMS_MESSAGE_ID" */ + + + +/*! + * @ref NV0080_CTRL_CMD_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT + */ +#define NV0080_CTRL_CMD_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT (0x802006) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0080_CTRL_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT_PARAMS { + NvU8 powerDisconnectedGpuBus[NV_MAX_DEVICES]; + NvU8 powerDisconnectedGpuCount; +} NV0080_CTRL_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT_PARAMS; + +/* ctrl0080internal_h */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h new file mode 100644 index 0000000..60af5a6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080msenc.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE MSENC control commands and parameters */ + +/* + * NV0080_CTRL_CMD_MSENC_GET_CAPS + * + * This command returns the set of MSENC capabilities for the device + * in the form of an array of unsigned bytes. MSENC capabilities + * include supported features and required workarounds for the MSENC-related + * engine(s) within the device, each represented by a byte offset into + * the table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_MSENC_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the MSENC caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_MSENC_GET_CAPS (0x801b01) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_MSENC_INTERFACE_ID << 8) | NV0080_CTRL_MSENC_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_MSENC_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_MSENC_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_MSENC_GET_CAPS_PARAMS; + + + +/* size in bytes of MSENC caps table */ +#define NV0080_CTRL_MSENC_CAPS_TBL_SIZE 4 + +/* _ctrl0080msenc_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h new file mode 100644 index 0000000..689fe4b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h @@ -0,0 +1,75 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080nvjpg.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE NVJPG control commands and parameters */ + + + +/* + * Size in bytes of NVJPG caps table. This value should be one greater + * than the largest byte_index value above. + */ +#define NV0080_CTRL_NVJPG_CAPS_TBL_SIZE 9 + +/* + * NV0080_CTRL_CMD_NVJPG_GET_CAPS_V2 + * + * This command returns the set of NVJPG capabilities for the device + * in the form of an array of unsigned bytes. NVJPG capabilities + * include supported features of the NVJPG engine(s) within the device, + * each represented by a byte offset into the table and a bit position within + * that byte. + * + * [out] capsTbl + * This caps table array is where the NVJPG caps bits will be transferred + * by the RM. The caps table is an array of unsigned bytes. + * instanceId + * This parameter specifies the instance Id of NVDEC for which + * cap bits are requested. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_NVJPG_GET_CAPS_V2 (0x801f02) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_NVJPG_INTERFACE_ID << 8) | NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_NVJPG_CAPS_TBL_SIZE]; + NvU32 instanceId; +} NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS; + +/* _ctrl0080NVJPG_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h new file mode 100644 index 0000000..3001076 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080perf.finn +// + +#define NV0080_CTRL_PERF_SLI_GPU_BOOST_SYNC_CONTROL_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0080_CTRL_PERF_SLI_GPU_BOOST_SYNC_CONTROL_PARAMS { + NvBool bActivate; +} NV0080_CTRL_PERF_SLI_GPU_BOOST_SYNC_CONTROL_PARAMS; + +#define NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS { + NvBool bCudaLimit; +} NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS; + + + +/* _ctrl0080perf_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080rc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080rc.h new file mode 100644 index 0000000..100a612 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080rc.h @@ -0,0 +1,57 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080rc.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE gpu control commands and parameters */ + +/* + * NV0080_CTRL_CMD_RC_DISABLE_RESET_CHANNEL_CALLBACK + * + * This command prevents RM from using callbacks when resetting a channel due + * to a page fault. + * + * Possible status return values are: + * NV_OK + */ +#define NV0080_CTRL_CMD_RC_DISABLE_RESET_CHANNEL_CALLBACK (0x801d01) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_RC_INTERFACE_ID << 8) | 0x1" */ + +/* + * NV0080_CTRL_CMD_RC_ENABLE_RESET_CHANNEL_CALLBACK + * + * This command permits RM to use callbacks when resetting a channel due + * to a page fault. + * + * Possible status return values are: + * NV_OK + */ +#define NV0080_CTRL_CMD_RC_ENABLE_RESET_CHANNEL_CALLBACK (0x801d02) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_RC_INTERFACE_ID << 8) | 0x2" */ + +/* _ctrl0080rc_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h new file mode 100644 index 0000000..fbbdf73 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080unix.finn +// + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE UNIX-specific control commands and parameters */ + +/* + * NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH + * + * This command notifies RM to save or restore the current console state. It is + * intended to be called just before the display driver starts using the display + * engine, and after it has finished using it. + * + * cmd + * Indicates which operation should be performed. + * + * SAVE_VT_STATE + * Records the current state of the console, to be restored later. + * RESTORE_VT_STATE + * Restores the previously-saved console state. + * + * fbInfo + * Returns information about the system's framebuffer console, if one + * exists. If no console is present, all fields will be zero. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH (0x801e01) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS { + NvU32 cmd; /* in */ +} NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS; + +/* Called when the display driver needs RM to save the console data, + * which will be used in RM based console restore */ +#define NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE (0x00000001) + +/* Called when the display driver needs RM to restore the console */ +#define NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_RESTORE_VT_STATE (0x00000002) + +/* Called when the display driver has restored the console -- RM doesn't + * need to do anything further, but needs to be informed to avoid turning the + * GPU off and thus destroying the console state. */ +#define NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_CONSOLE_RESTORED (0x00000003) + +#define NV0080_CTRL_CMD_OS_UNIX_VT_GET_FB_INFO (0x801e02) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS { + NvU32 subDeviceInstance; /* out */ + + NvU16 width; /* out */ + NvU16 height; /* out */ + NvU16 depth; /* out */ + NvU16 pitch; /* out */ +} NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS; + +/* _ctrl0080unix_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h new file mode 100644 index 0000000..c7fc38e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h @@ -0,0 +1,83 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#include "ctrl2080/ctrl2080gpu.h" +#include "ctrl2080/ctrl2080fuse.h" +#include "ctrl2080/ctrl2080event.h" +#include "ctrl2080/ctrl2080tmr.h" +#include "ctrl2080/ctrl2080bios.h" +#include "ctrl2080/ctrl2080mc.h" +#include "ctrl2080/ctrl2080fifo.h" +#include "ctrl2080/ctrl2080fb.h" +#include "ctrl2080/ctrl2080gr.h" +#include "ctrl2080/ctrl2080bus.h" +#include "ctrl2080/ctrl2080thermal.h" +#include "ctrl2080/ctrl2080fan.h" +#include "ctrl2080/ctrl2080i2c.h" +#include "ctrl2080/ctrl2080internal.h" +#include "ctrl2080/ctrl2080spi.h" +#include "ctrl2080/ctrl2080gpio.h" +#include "ctrl2080/ctrl2080clk.h" +#include "ctrl2080/ctrl2080perf.h" +#include "ctrl2080/ctrl2080perf_cf.h" + + +#include "ctrl2080/ctrl2080rc.h" +#include "ctrl2080/ctrl2080dma.h" +#include "ctrl2080/ctrl2080dmabuf.h" +#include "ctrl2080/ctrl2080nvd.h" +#include "ctrl2080/ctrl2080boardobj.h" +#include "ctrl2080/ctrl2080pmgr.h" +#include "ctrl2080/ctrl2080power.h" +#include "ctrl2080/ctrl2080lpwr.h" +#include "ctrl2080/ctrl2080acr.h" +#include "ctrl2080/ctrl2080ce.h" +#include "ctrl2080/ctrl2080nvlink.h" +#include "ctrl2080/ctrl2080flcn.h" +#include "ctrl2080/ctrl2080volt.h" +#include "ctrl2080/ctrl2080ecc.h" +#include "ctrl2080/ctrl2080cipher.h" +#include "ctrl2080/ctrl2080fla.h" +#include "ctrl2080/ctrl2080gsp.h" + + +#include "ctrl2080/ctrl2080grmgr.h" +#include "ctrl2080/ctrl2080ucodefuzzer.h" + + +#include "ctrl2080/ctrl2080hshub.h" +/* include appropriate os-specific command header */ + + +#include "ctrl2080/ctrl2080unix.h" diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h new file mode 100644 index 0000000..322e119 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080acr.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h new file mode 100644 index 0000000..7df6a6d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV20_SUBDEVICE_XX control commands and parameters */ + +#define NV2080_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x2080, NV2080_CTRL_##cat, idx) + +/* Subdevice command categories (6bits) */ +#define NV2080_CTRL_RESERVED (0x00) +#define NV2080_CTRL_GPU (0x01) +#define NV2080_CTRL_GPU_LEGACY_NON_PRIVILEGED (0x81) /* finn: Evaluated from "(NV2080_CTRL_GPU | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_FUSE (0x02) +#define NV2080_CTRL_FUSE_LEGACY_NON_PRIVILEGED (0x82) /* finn: Evaluated from "(NV2080_CTRL_FUSE | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_EVENT (0x03) +#define NV2080_CTRL_TIMER (0x04) +#define NV2080_CTRL_THERMAL (0x05) +#define NV2080_CTRL_THERMAL_LEGACY_PRIVILEGED (0xc5) /* finn: Evaluated from "(NV2080_CTRL_THERMAL | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_THERMAL_LEGACY_NON_PRIVILEGED (0x85) /* finn: Evaluated from "(NV2080_CTRL_THERMAL | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_I2C (0x06) +#define NV2080_CTRL_EXTI2C (0x07) +#define NV2080_CTRL_BIOS (0x08) +#define NV2080_CTRL_CIPHER (0x09) +#define NV2080_CTRL_INTERNAL (0x0A) +#define NV2080_CTRL_CLK_LEGACY_PRIVILEGED (0xd0) /* finn: Evaluated from "(NV2080_CTRL_CLK | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_CLK_LEGACY_NON_PRIVILEGED (0x90) /* finn: Evaluated from "(NV2080_CTRL_CLK | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_CLK (0x10) +#define NV2080_CTRL_FIFO (0x11) +#define NV2080_CTRL_GR (0x12) +#define NV2080_CTRL_FB (0x13) +#define NV2080_CTRL_MC (0x17) +#define NV2080_CTRL_BUS (0x18) +#define NV2080_CTRL_PERF_LEGACY_PRIVILEGED (0xe0) /* finn: Evaluated from "(NV2080_CTRL_PERF | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_PERF_LEGACY_NON_PRIVILEGED (0xa0) /* finn: Evaluated from "(NV2080_CTRL_PERF | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_PERF (0x20) +#define NV2080_CTRL_NVIF (0x21) +#define NV2080_CTRL_RC (0x22) +#define NV2080_CTRL_GPIO (0x23) +#define NV2080_CTRL_GPIO_LEGACY_NON_PRIVILEGED (0xa3) /* finn: Evaluated from "(NV2080_CTRL_GPIO | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_NVD (0x24) +#define NV2080_CTRL_DMA (0x25) +#define NV2080_CTRL_PMGR (0x26) +#define NV2080_CTRL_PMGR_LEGACY_PRIVILEGED (0xe6) /* finn: Evaluated from "(NV2080_CTRL_PMGR | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_PMGR_LEGACY_NON_PRIVILEGED (0xa6) /* finn: Evaluated from "(NV2080_CTRL_PMGR | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_POWER (0x27) +#define NV2080_CTRL_POWER_LEGACY_NON_PRIVILEGED (0xa7) /* finn: Evaluated from "(NV2080_CTRL_POWER | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_LPWR (0x28) +#define NV2080_CTRL_LPWR_LEGACY_NON_PRIVILEGED (0xa8) /* finn: Evaluated from "(NV2080_CTRL_LPWR | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_LPWR_LEGACY_PRIVILEGED (0xe8) /* finn: Evaluated from "(NV2080_CTRL_LPWR | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_ACR (0x29) +#define NV2080_CTRL_CE (0x2A) +#define NV2080_CTRL_SPI (0x2B) +#define NV2080_CTRL_NVLINK (0x30) +#define NV2080_CTRL_FLCN (0x31) +#define NV2080_CTRL_VOLT (0x32) +#define NV2080_CTRL_VOLT_LEGACY_PRIVILEGED (0xf2) /* finn: Evaluated from "(NV2080_CTRL_VOLT | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_VOLT_LEGACY_NON_PRIVILEGED (0xb2) /* finn: Evaluated from "(NV2080_CTRL_VOLT | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_FAS (0x33) +#define NV2080_CTRL_ECC (0x34) +#define NV2080_CTRL_ECC_NON_PRIVILEGED (0xb4) /* finn: Evaluated from "(NV2080_CTRL_ECC | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_FLA (0x35) +#define NV2080_CTRL_GSP (0x36) +#define NV2080_CTRL_NNE (0x37) +#define NV2080_CTRL_GRMGR (0x38) +#define NV2080_CTRL_UCODE_FUZZER (0x39) +#define NV2080_CTRL_DMABUF (0x3A) + +// per-OS categories start at highest category and work backwards +#define NV2080_CTRL_OS_WINDOWS (0x3F) +#define NV2080_CTRL_OS_MACOS (0x3E) +#define NV2080_CTRL_OS_UNIX (0x3D) + + +/* + * NV2080_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_NULL (0x20800000) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl2080base_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h new file mode 100644 index 0000000..9cbad13 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h @@ -0,0 +1,240 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080bios.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX bios-related control commands and parameters */ + + + +typedef struct NV2080_CTRL_BIOS_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_BIOS_INFO; + +/* Maximum number of bios infos that can be queried at once */ +#define NV2080_CTRL_BIOS_INFO_MAX_SIZE (0x0000000F) + +#define NV2080_CTRL_BIOS_INFO_INDEX_REVISION (0x00000000) +#define NV2080_CTRL_BIOS_INFO_INDEX_OEM_REVISION (0x00000001) + + + +/* + * NV2080_CTRL_CMD_BIOS_GET_INFO + * + * This command returns bios information for the associated GPU. + * Requests to retrieve bios information use a list of one or more + * NV2080_CTRL_BIOS_INFO structures. + * + * biosInfoListSize + * This field specifies the number of entries on the caller's + * biosInfoList. + * biosInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the bios information is to be returned. + * This buffer must be at least as big as biosInfoListSize multiplied + * by the size of the NV2080_CTRL_BIOS_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_BIOS_GET_INFO (0x20800802) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | 0x2" */ + +typedef struct NV2080_CTRL_BIOS_GET_INFO_PARAMS { + NvU32 biosInfoListSize; + NV_DECLARE_ALIGNED(NvP64 biosInfoList, 8); +} NV2080_CTRL_BIOS_GET_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_BIOS_GET_INFO_V2 + * + * This command returns bios information for the associated GPU. + * Requests to retrieve bios information use a list of one or more + * NV2080_CTRL_BIOS_INFO structures. + * + * biosInfoListSize + * This field specifies the number of entries on the caller's + * biosInfoList. + * biosInfoList + * Bios information to be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_BIOS_GET_INFO_V2 (0x20800810) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS { + NvU32 biosInfoListSize; + NV2080_CTRL_BIOS_INFO biosInfoList[NV2080_CTRL_BIOS_INFO_MAX_SIZE]; +} NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS; + + + +/* + * NV2080_CTRL_CMD_BIOS_GET_SKU_INFO + * + * This command returns information about the current board SKU. + * NV_ERR_INVALID_OWNER will be returned if the call + * isn't made with the OS as the administrator. + * + * chipSKU + * This field returns the sku for the current chip. + * chipSKUMod + * This field returns the SKU modifier. + * project + * This field returns the Project (Board) number. + * projectSKU + * This field returns the Project (Board) SKU number. + * CDP + * This field returns the Collaborative Design Project Number. + * projectSKUMod + * This field returns the Project (Board) SKU Modifier. + * businessCycle + * This field returns the business cycle the board is associated with. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OWNER + */ +#define NV2080_CTRL_CMD_BIOS_GET_SKU_INFO (0x20800808) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum length of parameter strings */ + + +#define NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS { + NvU32 BoardID; + char chipSKU[4]; + char chipSKUMod[2]; + char project[5]; + char projectSKU[5]; + char CDP[6]; + char projectSKUMod[2]; + NvU32 businessCycle; +} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_BIOS_GET_POST_TIME + + * This command is used to get the GPU POST time (in milliseconds). + * If the associated GPU is the master GPU this value will be recorded + * by the VBIOS and retrieved from the KDA buffer. If the associated + * GPU is a secondaryGPU then this value will reflect the devinit + * processing time. + * + * vbiosPostTime + * This parameter returns the vbios post time in msec. + * + * Possible return status values are + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_BIOS_GET_POST_TIME (0x20800809) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS { + NV_DECLARE_ALIGNED(NvU64 vbiosPostTime, 8); +} NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS; + + + +/* + * NV2080_CTRL_CMD_BIOS_GET_UEFI_SUPPORT + * + * This function is used to give out the UEFI version, UEFI image presence and + * Graphics Firmware Mode i.e. whether system is running in UEFI or not. + * + * version + * This parameter returns the UEFI version. + * + * flags + * This parameter indicates UEFI image presence and Graphics Firmware mode. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE + * This field returns UEFI presence value. Legal values for this + * field include: + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_NO + * This value indicates that UEFI image is not present. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_YES + * This value indicates that UEFI image is present. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_PLACEHOLDER + * This value indicates that there is a dummy UEFI placeholder, + * which can later be updated with a valid UEFI image. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_HIDDEN + * This value indicates that UEFI image is hidden. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING + * This field indicates the UEFI running value. Legal values for + * this parameter include: + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_FALSE + * This value indicates that UEFI is not running. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_TRUE + * This value indicates that UEFI is running. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_READY + * NV_ERR_INVALID_STATE + */ + +#define NV2080_CTRL_CMD_BIOS_GET_UEFI_SUPPORT (0x2080080b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS { + NvU32 version; + NvU32 flags; +} NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS; + +/* Legal values for flags parameter */ +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE 1:0 +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_NO (0x00000000) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_YES (0x00000001) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_PLACEHOLDER (0x00000002) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_HIDDEN (0x00000003) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING 2:2 +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_FALSE (0x00000000) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_TRUE (0x00000001) + + + +/* _ctrl2080bios_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h new file mode 100644 index 0000000..6f69c5c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080boardobj.finn +// + + + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h new file mode 100644 index 0000000..caafd74 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080boardobjgrpclasses.finn +// + + + +#include "nvtypes.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h new file mode 100644 index 0000000..ee3ed7a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h @@ -0,0 +1,1493 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080bus.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX bus control commands and parameters */ + +/** + * NV2080_CTRL_CMD_BUS_GET_PCI_INFO + * + * This command returns PCI bus identifier information for the specified GPU. + * + * pciDeviceId + * This parameter specifies the internal PCI device and vendor + * identifiers for the GPU. + * pciSubSystemId + * This parameter specifies the internal PCI subsystem identifier for + * the GPU. + * pciRevisionId + * This parameter specifies the internal PCI device-specific revision + * identifier for the GPU. + * pciExtDeviceId + * This parameter specifies the external PCI device identifier for + * the GPU. It contains only the 16-bit device identifier. This + * value is identical to the device identifier portion of + * pciDeviceId since non-transparent bridges are no longer supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_BUS_GET_PCI_INFO (0x20801801) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS { + NvU32 pciDeviceId; + NvU32 pciSubSystemId; + NvU32 pciRevisionId; + NvU32 pciExtDeviceId; +} NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS; + +/* + * NV2080_CTRL_BUS_INFO + * + * This structure represents a single 32bit bus engine value. Clients + * request a particular bus engine value by specifying a unique bus + * information index. + * + * Legal bus information index values are: + * NV2080_CTRL_BUS_INFO_INDEX_TYPE + * This index is used to request the bus type of the GPU. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_TYPE_PCI + * NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS + * NV2080_CTRL_BUS_INFO_TYPE_FPCI + * NV2080_CTRL_BUS_INFO_INDEX_INTLINE + * This index is used to request the interrupt line (or irq) assignment + * for the GPU. The return value is system-dependent. + * NV2080_CTRL_BUS_INFO_INDEX_CAPS + * This index is used to request the bus engine capabilities for the GPU. + * The return value is specified as a mask of capabilities. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_CAPS_NEED_IO_FLUSH + * NV2080_CTRL_BUS_INFO_CAPS_CHIP_INTEGRATED + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CAPS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CAPS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CAPS + * These indices are used to request PCI Express link-specific + * capabilities values. A value of zero is returned for non-PCIE GPUs. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CTRL_STATUS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CTRL_STATUS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CTRL_STATUS + * These indices are used to request PCI Express link-specific + * control status values. A value of zero is returned for non-PCIE GPUs. + * NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS + * This index is used to request coherent dma transfer flags. + * Valid coherent dma transfer flags include: + * NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA + * NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART + * NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS + * This index is used to request noncoherent dma transfer flags. + * Valid noncoherent dma transfer flags include: + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE + * NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_SIZE + * This index is used to request the size of the GPU GART in MBytes. + * NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_FLAGS + * This index is used to request GPU GART flags. + * Valid gart flags include: + * NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH + * This flag indicates that GPU GART clients need to do an explicit + * flush via an appropriate SetContextDma method. + * NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED + * This flag indicates that the GART address range includes both + * system and video memory. + * NV2080_CTRL_BUS_INFO_INDEX_BUS_NUMBER + * This index is used to request the PCI-based bus number of the GPU. + * Support for this index is platform-dependent. + * NV2080_CTRL_BUS_INFO_INDEX_DEVICE_NUMBER + * This index is used to request the PCI-based device number of the GPU. + * Support for this index is platform-dependent. + * NV2080_CTRL_BUS_INFO_INDEX_DOMAIN_NUMBER + * This index is used to request the PCI-based domain number of the GPU. + * Support for this index is platform-dependent. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_ERRORS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_ERRORS + * These indices are used to request PCI Express error status. + * The current status is cleared as part of these requests. + * Valid PCI Express error status values include: + * NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR + * NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_NON_FATAL_ERROR + * NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_FATAL_ERROR + * NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_UNSUPP_REQUEST + * NV2080_CTRL_BUS_INFO_INDEX_INTERFACE_TYPE + * This index is used to request the bus interface type of the GPU. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_TYPE_PCI + * NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS + * NV2080_CTRL_BUS_INFO_TYPE_FPCI + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN2_INFO // DEPRECATED + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN_INFO // REPLACES "GEN2" variant + * This index is used to retrieve PCI Express Gen configuration support + * This index is used to retrieve PCI Express Gen2 configuration support + * for the GPU. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN1 + * The GPU is PCI Express Gen1 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN2 + * The GPU is PCI Express Gen2 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN3 + * The GPU is PCI Express Gen3 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN4 + * The GPU is PCI Express Gen4 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN5 + * The GPU is PCI Express Gen5 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN1 + * The GPU is configured in PCI Express Gen1 mode. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN2 + * The GPU is configured in PCI Express Gen2 mode. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN3 + * The GPU is configured in PCI Express Gen3 mode. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN4 + * The GPU is configured in PCI Express Gen4 mode. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN5 + * The GPU is configured in PCI Express Gen5 mode. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_AER + * This index retrieves PCI Express Advanced Error Reporting (AER) errors + * for the GPU. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CAPS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CAPS + * This index retrieves the PCI Express link capabilities for the + * board. For example, a Quadro FX4700X2 has two GPUs and PCIe + * switch. With this board, this index returns the link + * capabilities of the PCIe switch. In a single GPU board, this + * index returns the link capabilities of the GPU. A value of + * zero is returned for non-PCIE GPUs. + * UPSTREAM_LINK_CAPS is kept for backwards compatibility. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CTRL_STATUS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CTRL_STATUS + * This index retrieves the PCI Express link status for the board. + * For example, a Quadro FX4700X2 has two GPUs and PCIe switch. + * With this board, this index returns the link capabilities of + * the PCIe switch. In a single GPU board, this index returns the + * link status of the GPU. A value of zero is returned for + * non-PCIE GPUs. + * UPSTREAM_LINK_CTRL_STATUS is kept for backwards compatibility. + * NV2080_CTRL_BUS_INFO_INDEX_ASLM_STATUS + * This index is used to request the PCI Express ASLM settings. + * This index is only valid when NV2080_CTRL_BUS_INFO_TYPE indicates PCIE. + * A value of zero is returned for non-PCI Express bus type. + * _ASLM_STATUS_PCIE is always _PRESENT if PCI Express bus type. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_LINK_WIDTH_SWITCH_ERROR_COUNT + * This index is used to get the ASLM switching error count. + * A value of zero will be returned if no errors occurs while + * ASLM switching + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN2_SWITCH_ERROR_COUNT + * This index is used to get the Gen1<-->Gen2 switching error count + * A value of zero will be returned in case speed change from Gen1 to + * Gen2 is clean or if chipset is not gen2 capable or if gen1<-->gen2 + * switching is disabled. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_CYA_ASPM + * This index is used to get the ASPM CYA L0s\L1 enable\disable status. + * Legal return value is specified as a mask of valid and data field + * possible return values are: + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID_NO + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID_YES + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_DISABLED + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L0S + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L1 + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L0S_L1 + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS + * These indices are used to request detailed PCI Express error counters. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS_CLEAR + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS_CLEAR + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED_CLEAR + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS_CLEAR + * These indices are used to clear detailed PCI Express error counters. + * NV2080_CTRL_BUS_INFO_INDEX_GPU_INTERFACE_TYPE + * This index is used to request the internal interface type of the GPU. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_TYPE_PCI + * NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS + * NV2080_CTRL_BUS_INFO_TYPE_FPCI + * NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE + * This index queries the type of sysmem connection to CPU + * NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_PCIE + * NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_NVLINK + * NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_C2C + * + */ + +typedef struct NV2080_CTRL_BUS_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_BUS_INFO; + +/* valid bus info index values */ + +/** + * This index is used to request the bus type of the GPU. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_TYPE_PCI + * NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS + * NV2080_CTRL_BUS_INFO_TYPE_FPCI + */ +#define NV2080_CTRL_BUS_INFO_INDEX_TYPE (0x00000000) +#define NV2080_CTRL_BUS_INFO_INDEX_INTLINE (0x00000001) +#define NV2080_CTRL_BUS_INFO_INDEX_CAPS (0x00000002) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CAPS (0x00000003) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CAPS (0x00000004) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CAPS (0x00000005) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CAPS (0x00000006) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CTRL_STATUS (0x00000007) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CTRL_STATUS (0x00000008) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CTRL_STATUS (0x00000009) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CTRL_STATUS (0x0000000A) +/** + * This index is used to request coherent dma transfer flags. + * Valid coherent dma transfer flags include: + * NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA + * NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART + */ +#define NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS (0x0000000B) +/** + * This index is used to request noncoherent dma transfer flags. + * Valid noncoherent dma transfer flags include: + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE + */ +#define NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS (0x0000000C) +/** + * This index is used to request the size of the GPU GART in MBytes. + */ +#define NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_SIZE (0x0000000D) +/** + * This index is used to request GPU GART flags. + * Valid gart flags include: + * NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH + * This flag indicates that GPU GART clients need to do an explicit + * flush via an appropriate SetContextDma method. + * NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED + * This flag indicates that the GART address range includes both + * system and video memory. + */ +#define NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_FLAGS (0x0000000E) +#define NV2080_CTRL_BUS_INFO_INDEX_BUS_NUMBER (0x0000000F) +#define NV2080_CTRL_BUS_INFO_INDEX_DEVICE_NUMBER (0x00000010) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_ERRORS (0x00000011) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_ERRORS (0x00000012) +#define NV2080_CTRL_BUS_INFO_INDEX_INTERFACE_TYPE (0x00000013) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN2_INFO (0x00000014) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_AER (0x00000015) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CAPS (0x00000016) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CTRL_STATUS (0x00000017) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_ASLM_STATUS (0x00000018) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_LINK_WIDTH_SWITCH_ERROR_COUNT (0x00000019) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_LINK_SPEED_SWITCH_ERROR_COUNT (0x0000001A) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_CYA_ASPM (0x0000001B) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS (0x0000001C) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS (0x0000001D) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED (0x0000001E) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS (0x0000001F) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS_CLEAR (0x00000020) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS_CLEAR (0x00000021) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED_CLEAR (0x00000022) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS_CLEAR (0x00000023) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CORRECTABLE_ERRORS (0x00000024) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NONFATAL_ERRORS (0x00000025) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FATAL_ERRORS (0x00000026) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_UNSUPPORTED_REQUESTS (0x00000027) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CORRECTABLE_ERRORS_CLEAR (0x00000028) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NONFATAL_ERRORS_CLEAR (0x00000029) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FATAL_ERRORS_CLEAR (0x0000002A) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_UNSUPPORTED_REQUESTS_CLEAR (0x0000002B) +#define NV2080_CTRL_BUS_INFO_INDEX_DOMAIN_NUMBER (0x0000002C) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN_INFO (0x0000002D) +#define NV2080_CTRL_BUS_INFO_INDEX_GPU_INTERFACE_TYPE (0x0000002E) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_GEN_INFO (0x0000002F) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_GEN_INFO (0x00000030) +#define NV2080_CTRL_BUS_INFO_INDEX_MSI_INFO (0x00000031) +/** + * This index is used to request the top 32 bits of the size of the GPU + * GART in MBytes. + */ +#define NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_SIZE_HI (0x00000032) +#define NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE (0x00000033) +#define NV2080_CTRL_BUS_INFO_INDEX_MAX NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE +#define NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE (0x00000034) + +/* valid bus info type return values */ +#define NV2080_CTRL_BUS_INFO_TYPE_PCI (0x00000001) +#define NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS (0x00000003) +#define NV2080_CTRL_BUS_INFO_TYPE_FPCI (0x00000004) +#define NV2080_CTRL_BUS_INFO_TYPE_AXI (0x00000008) + +/* valid bus capability flags */ +#define NV2080_CTRL_BUS_INFO_CAPS_NEED_IO_FLUSH (0x00000001) +#define NV2080_CTRL_BUS_INFO_CAPS_CHIP_INTEGRATED (0x00000002) + +/* + * Format of PCIE link caps return values + * Note that Link Capabilities register format is followed only for bits 11:0 + */ +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED 3:0 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_2500MBPS (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_5000MBPS (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_8000MBPS (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_16000MBPS (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_32000MBPS (0x00000005) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_WIDTH 9:4 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_ASPM 11:10 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_ASPM_NONE (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_ASPM_L0S (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_ASPM_L0S_L1 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN 15:12 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN1 (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN2 (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN3 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN4 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN5 (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL 19:16 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN1 (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN2 (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN3 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN4 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN5 (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN 23:20 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN1 (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN2 (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN3 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN4 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN5 (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_SPEED_CHANGES 24:24 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_SPEED_CHANGES_ENABLED (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_SPEED_CHANGES_DISABLED (0x00000001) + +/* format of PCIE control status return values */ +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM 1:0 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM_DISABLED (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM_L0S (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM_L1 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM_L0S_L1 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED 19:16 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_2500MBPS (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_5000MBPS (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_8000MBPS (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_16000MBPS (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_32000MBPS (0x00000005) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH 25:20 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_UNDEFINED (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X1 (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X2 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X4 (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X8 (0x00000008) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X12 (0x0000000C) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X16 (0x00000010) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X32 (0x00000020) + +/* coherent dma transfer flags */ +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA 0:0 +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART 2:2 +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART_TRUE (0x00000001) + +/* noncoherent dma transfer flags */ +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA 0:0 +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART 2:2 +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE 3:3 +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE_TRUE (0x00000001) + +/* GPU GART flags */ +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH 0:0 +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED 1:1 +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED_TRUE (0x00000001) + +/* format of PCIE errors return values */ +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_NON_FATAL_ERROR (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_FATAL_ERROR (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_UNSUPP_REQUEST (0x00000008) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_ENTERED_RECOVERY (0x00000010) + +/* PCIE Gen2 capability and current level */ +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CAP 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CAP_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CAP_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CURR_LEVEL 1:1 +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CURR_LEVEL_GEN1 (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CURR_LEVEL_GEN2 (0x00000001) + +/* format of PCIE AER return values */ +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_TRAINING_ERR (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_DLINK_PROTO_ERR (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_POISONED_TLP (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_FC_PROTO_ERR (0x00000008) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_TIMEOUT (0x00000010) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_ABORT (0x00000020) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNEXP_CPL (0x00000040) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_RCVR_OVERFLOW (0x00000080) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_MALFORMED_TLP (0x00000100) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_ECRC_ERROR (0x00000200) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNSUPPORTED_REQ (0x00000400) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RCV_ERR (0x00010000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_TLP (0x00020000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_DLLP (0x00040000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_ROLLOVER (0x00080000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_TIMEOUT (0x00100000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_ADVISORY_NONFATAL (0x00200000) + +/* format of PCIE ASLM status return value */ +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_PCIE 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_PCIE_ERROR (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_PCIE_PRESENT (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_SUPPORTED 1:1 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_CL_CAPABLE 2:2 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_CL_CAPABLE_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_CL_CAPABLE_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_OS_SUPPORTED 3:3 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_OS_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_OS_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_BR04 4:4 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_BR04_MISSING (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_BR04_PRESENT (0x00000001) + +/* format of GPU CYA CAPS return value */ +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM 2:1 +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_DISABLED (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L0S (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L1 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L0S_L1 (0x00000003) + +/* format of MSI INFO return value */ +#define NV2080_CTRL_BUS_INFO_MSI_STATUS 0:0 +#define NV2080_CTRL_BUS_INFO_MSI_STATUS_DISABLED (0x00000000) +#define NV2080_CTRL_BUS_INFO_MSI_STATUS_ENABLED (0x00000001) + +/*format of L1PM Substates capabilities information */ +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_2_SUPPORTED 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_2_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_2_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_1_SUPPORTED 1:1 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_1_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_1_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_2_SUPPORTED 2:2 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_2_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_2_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_1_SUPPORTED 3:3 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_1_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_1_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_L1PM_SUPPORTED 4:4 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_L1PM_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_L1PM_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_RESERVED 7:5 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PORT_RESTORE_TIME 15:8 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_T_POWER_ON_SCALE 17:16 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_T_POWER_ON_VALUE 23:19 + +/*format of L1 PM Substates Control 1 Register */ +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_2_ENABLED 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_2_ENABLED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_2_ENABLED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_1_ENABLED 1:1 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_1_ENABLED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_1_ENABLED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_2_ENABLED 2:2 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_2_ENABLED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_2_ENABLED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_1_ENABLED 3:3 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_1_ENABLED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_1_ENABLED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_COMMON_MODE_RESTORE_TIME 15:8 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_LTR_L1_2_THRESHOLD_VALUE 25:16 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_LTR_L1_2_THRESHOLD_SCALE 31:29 + +/*format of L1 PM Substates Control 2 Register */ +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL2_T_POWER_ON_SCALE 1:0 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL2_T_POWER_ON_VALUE 7:3 + +/* valid sysmem connection type values */ +#define NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_PCIE (0x00000000) +#define NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_NVLINK (0x00000001) +#define NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_C2C (0x00000002) + +/** + * NV2080_CTRL_CMD_BUS_GET_INFO + * + * This command returns bus engine information for the associated GPU. + * Requests to retrieve bus information use a list of one or more + * NV2080_CTRL_BUS_INFO structures. + * + * busInfoListSize + * This field specifies the number of entries on the caller's + * busInfoList. + * busInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the bus information is to be returned. + * This buffer must be at least as big as busInfoListSize multiplied + * by the size of the NV2080_CTRL_BUS_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_BUS_GET_INFO (0x20801802) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_BUS_GET_INFO_PARAMS { + NvU32 busInfoListSize; + NV_DECLARE_ALIGNED(NvP64 busInfoList, 8); +} NV2080_CTRL_BUS_GET_INFO_PARAMS; + +#define NV2080_CTRL_CMD_BUS_GET_INFO_V2 (0x20801823) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_INFO_V2_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV2080_CTRL_BUS_GET_INFO_V2_PARAMS { + NvU32 busInfoListSize; + NV2080_CTRL_BUS_INFO busInfoList[NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE]; +} NV2080_CTRL_BUS_GET_INFO_V2_PARAMS; + +/* + * NV2080_CTRL_BUS_PCI_BAR_INFO + * + * This structure describes PCI bus BAR information. + * + * flags + * This field contains any flags for the associated BAR. + * barSize + * This field contains the size in megabytes of the associated BAR. + * DEPRECATED, please use barSizeBytes. + * barSizeBytes + * This field contains the size in bytes of the associated BAR. + * barOffset + * This field contains the PCI bus offset in bytes of the associated BAR. + */ +typedef struct NV2080_CTRL_BUS_PCI_BAR_INFO { + NvU32 flags; + NvU32 barSize; + NV_DECLARE_ALIGNED(NvU64 barSizeBytes, 8); + NV_DECLARE_ALIGNED(NvU64 barOffset, 8); +} NV2080_CTRL_BUS_PCI_BAR_INFO; + +/* + * NV2080_CTRL_CMD_BUS_GET_PCI_BAR_INFO + * + * This command returns PCI bus BAR information. + * + * barCount + * This field returns the number of BARs for the associated subdevice. + * Legal values for this parameter will be between one to + * NV2080_CTRL_BUS_MAX_BARS. + * barInfo + * This field returns per-BAR information in the form of an array of + * NV2080_CTRL_BUS_PCI_BAR_INFO structures. Information for as many as + * NV2080_CTRL_BUS_MAX_PCI_BARS will be returned. Any unused entries will + * be initialized to zero. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_BUS_GET_PCI_BAR_INFO (0x20801803) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum number of BARs per subdevice */ +#define NV2080_CTRL_BUS_MAX_PCI_BARS (8) + +#define NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS { + NvU32 pciBarCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_BUS_PCI_BAR_INFO pciBarInfo[NV2080_CTRL_BUS_MAX_PCI_BARS], 8); +} NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_SET_PCIE_LINK_WIDTH + * + * This command sets PCI-E link width to the specified new value. + * + * pcieLinkWidth + * This field specifies the new PCI-E link width. + * + * failingReason + * This field specifies the reason why the change of link width fails. + * It is valid only when this routine returns NV_ERR_GENERIC. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_GENERIC + */ +#define NV2080_CTRL_CMD_BUS_SET_PCIE_LINK_WIDTH (0x20801804) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS { + NvU32 pcieLinkWidth; + NvU32 failingReason; +} NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS; + +#define NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_ERROR_PSTATE (0x00000001) +#define NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_ERROR_PCIE_CFG_ACCESS (0x00000002) +#define NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_ERROR_TRAINING (0x00000004) + +/* + * NV2080_CTRL_CMD_BUS_SET_PCIE_SPEED + * + * This command Initiates a change in PCIE Bus Speed + * + * busSpeed + * This field is the target speed to train to. + * Legal values for this parameter are: + * NV2080_CTRL_BUS_SET_PCIE_SPEED_2500MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_5000MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_8000MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_16000MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_32000MBPS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_BUS_SET_PCIE_SPEED (0x20801805) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS { + NvU32 busSpeed; +} NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS; + +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_2500MBPS (0x00000001) +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_5000MBPS (0x00000002) +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_8000MBPS (0x00000003) +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_16000MBPS (0x00000004) +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_32000MBPS (0x00000005) + +/* + * NV2080_CTRL_CMD_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED + * + * This command Initiates a change in PCIE Bus Speed for a HWBC device's upstream + * link. + * + * busSpeed + * This field specifies the target speed to which to train. + * Legal values for this parameter are: + * NV2080_CTRL_BUS_SET_PCIE_SPEED_2500MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_5000MBPS + * primaryBus + * This field is the PCI Express Primary Bus number that uniquely identifies + * a HWBC device's upstream port, i.e. the BR04 Upstream Port. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED (0x20801806) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS { + NvU32 busSpeed; + NvU8 primaryBus; +} NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS; + +#define NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_2500MBPS (0x00000001) +#define NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_5000MBPS (0x00000002) + +/* + * NV2080_CTRL_CMD_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED + * + * This command gets the current PCIE Bus Speed for a HWBC device's upstream + * link. + * + * primaryBus + * This field is the PCI Express Primary Bus number that uniquely identifies + * a HWBC device's upstream port, i.e. the BR04 Upstream Port. + * busSpeed + * This field specifies a pointer in the caller's address space + * to the NvU32 variable into which the bus speed is to be returned. + * On success, this parameter will contain one of the following values: + * NV2080_CTRL_BUS_SET_PCIE_SPEED_2500MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_5000MBPS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED (0x20801807) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS { + NvU32 busSpeed; + NvU8 primaryBus; +} NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS; + +#define NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_2500MBPS (0x00000001) +#define NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_5000MBPS (0x00000002) + +/* + * NV2080_CTRL_CMD_BUS_MAP_BAR2 + * + * This command sets up BAR2 page tables for passed-in memory handle. + * This command MUST be executed before NV2080_CTRL_CMD_BUS_UNMAP_BAR2 + * or NV2080_CTRL_CMD_BUS_VERIFY_BAR2. Not supported on SLI. + * + * hMemory + * This field is a handle to physical memory. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_BUS_MAP_BAR2 (0x20801809) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_MAP_BAR2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_MAP_BAR2_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_BUS_MAP_BAR2_PARAMS { + NvHandle hMemory; +} NV2080_CTRL_BUS_MAP_BAR2_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_UNMAP_BAR2 + * + * This command unmaps any pending BAR2 page tables created with + * NV2080_CTRL_CMD_BUS_MAP_BAR2 command. The handle passed in must + * match the handle used to map the page tables. Not supported on SLI. + * + * hMemory + * This field is a handle to physical memory. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_BUS_UNMAP_BAR2 (0x2080180a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_UNMAP_BAR2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_UNMAP_BAR2_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_BUS_UNMAP_BAR2_PARAMS { + NvHandle hMemory; +} NV2080_CTRL_BUS_UNMAP_BAR2_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_VERIFY_BAR2 + * + * This command tests BAR2 against BAR0 if there are BAR2 page tables + * set up with NV2080_CTRL_CMD_BUS_MAP_BAR2 command. The handle passed + * in must match the handle used to map the page tables. Not supported on SLI. + * + * hMemory + * This field is a handle to physical memory. + * offset + * Base offset of the surface where the test will make its first dword write. + * size + * Test will write '(size/4)*4' bytes starting at surface offset `offset'. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_BUS_VERIFY_BAR2 (0x2080180b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_VERIFY_BAR2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_VERIFY_BAR2_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_BUS_VERIFY_BAR2_PARAMS { + NvHandle hMemory; + NvU32 offset; + NvU32 size; +} NV2080_CTRL_BUS_VERIFY_BAR2_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_HWBC_GET_UPSTREAM_BAR0 + * + * This command gets the BAR0 for a HWBC device's upstream port. + * + * primaryBus + * This field is the PCI Express Primary Bus number that uniquely identifies + * a HWBC device's upstream port, i.e. the BR04 Upstream Port. + * physBAR0 + * This field returns the BAR0 physical address of the HWBC device's + * upstream port. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_BUS_HWBC_GET_UPSTREAM_BAR0 (0x2080180e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_HWBC_GET_UPSTREAM_BAR0_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_HWBC_GET_UPSTREAM_BAR0_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_BUS_HWBC_GET_UPSTREAM_BAR0_PARAMS { + NV_DECLARE_ALIGNED(NvU64 physBAR0, 8); + NvU8 primaryBus; +} NV2080_CTRL_BUS_HWBC_GET_UPSTREAM_BAR0_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_SERVICE_GPU_MULTIFUNC_STATE + * This command would reports the current Audio device power state or Sets new power state. + * + * command + * This parametrer specifies the target GPU multifunction state. + * NV2080_CTRL_BUS_ENABLE_GPU_MULTIFUNC_STATE Enables the multi function state + * NV2080_CTRL_BUS_DISABLE_GPU_MULTIFUNC_STATE Disables the multi function state. + * NV2080_CTRL_BUS_GET_GPU_MULTIFUNC_STATE Get the Current device power state. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + */ + +#define NV2080_CTRL_CMD_BUS_SERVICE_GPU_MULTIFUNC_STATE (0x20801812) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS { + NvU8 command; + NvU32 deviceState; +} NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS; + +#define NV2080_CTRL_BUS_ENABLE_GPU_MULTIFUNC_STATE (0x00000000) +#define NV2080_CTRL_BUS_DISABLE_GPU_MULTIFUNC_STATE (0x00000001) +#define NV2080_CTRL_BUS_GET_GPU_MULTIFUNC_STATE (0x00000002) + +/* + * NV2080_CTRL_CMD_BUS_GET_PEX_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counter types. + * + * pexTotalCorrectableErrors + * This parameter gives the total correctable errors which includes + * NV_XVE_ERROR_COUNTER1 plus LCRC Errors, 8B10B Errors, NAKS and Failed L0s + * + * pexCorrectableErrors + * This parameter only includes NV_XVE_ERROR_COUNTER1 value. + * + * pexTotalNonFatalErrors + * This parameter returns total Non-Fatal Errors which may or may not + * include Correctable Errors. + * + * pexTotalFatalErrors + * This parameter returns Total Fatal Errors + * + * pexTotalUnsupportedReqs + * This parameter returns Total Unsupported Requests + * + * pexErrors + * This array contains the error counts for each error type as requested from + * the pexCounterMask. The array indexes correspond to the mask bits one-to-one. + */ + +#define NV2080_CTRL_CMD_BUS_GET_PEX_COUNTERS (0x20801813) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PEX_MAX_COUNTER_TYPES 31 +#define NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS { + NvU32 pexCounterMask; + NvU32 pexTotalCorrectableErrors; + NvU16 pexCorrectableErrors; + NvU8 pexTotalNonFatalErrors; + NvU8 pexTotalFatalErrors; + NvU8 pexTotalUnsupportedReqs; + NvU16 pexCounters[NV2080_CTRL_PEX_MAX_COUNTER_TYPES]; +} NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS; + +/* + * Note that MAX_COUNTER_TYPES will need to be updated each time + * a new counter type gets added to the list below. The value + * depends on the bits set for the last valid define. Look + * at pexCounters[] comments above for details. + * + */ +#define NV2080_CTRL_BUS_PEX_COUNTER_TYPE 0x00000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_RECEIVER_ERRORS 0x00000001 +#define NV2080_CTRL_BUS_PEX_COUNTER_REPLAY_COUNT 0x00000002 +#define NV2080_CTRL_BUS_PEX_COUNTER_REPLAY_ROLLOVER_COUNT 0x00000004 +#define NV2080_CTRL_BUS_PEX_COUNTER_BAD_DLLP_COUNT 0x00000008 +#define NV2080_CTRL_BUS_PEX_COUNTER_BAD_TLP_COUNT 0x00000010 +#define NV2080_CTRL_BUS_PEX_COUNTER_8B10B_ERRORS_COUNT 0x00000020 +#define NV2080_CTRL_BUS_PEX_COUNTER_SYNC_HEADER_ERRORS_COUNT 0x00000040 +#define NV2080_CTRL_BUS_PEX_COUNTER_LCRC_ERRORS_COUNT 0x00000080 +#define NV2080_CTRL_BUS_PEX_COUNTER_FAILED_L0S_EXITS_COUNT 0x00000100 +#define NV2080_CTRL_BUS_PEX_COUNTER_NAKS_SENT_COUNT 0x00000200 +#define NV2080_CTRL_BUS_PEX_COUNTER_NAKS_RCVD_COUNT 0x00000400 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_ERRORS 0x00000800 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_TO_RECOVERY_COUNT 0x00001000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L0_TO_RECOVERY_COUNT 0x00002000 +#define NV2080_CTRL_BUS_PEX_COUNTER_RECOVERY_COUNT 0x00004000 +#define NV2080_CTRL_BUS_PEX_COUNTER_CHIPSET_XMIT_L0S_ENTRY_COUNT 0x00008000 +#define NV2080_CTRL_BUS_PEX_COUNTER_GPU_XMIT_L0S_ENTRY_COUNT 0x00010000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_ENTRY_COUNT 0x00020000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1P_ENTRY_COUNT 0x00040000 +#define NV2080_CTRL_BUS_PEX_COUNTER_DEEP_L1_ENTRY_COUNT 0x00080000 +#define NV2080_CTRL_BUS_PEX_COUNTER_ASLM_COUNT 0x00100000 +#define NV2080_CTRL_BUS_PEX_COUNTER_TOTAL_CORR_ERROR_COUNT 0x00200000 +#define NV2080_CTRL_BUS_PEX_COUNTER_CORR_ERROR_COUNT 0x00400000 +#define NV2080_CTRL_BUS_PEX_COUNTER_NON_FATAL_ERROR_COUNT 0x00800000 +#define NV2080_CTRL_BUS_PEX_COUNTER_FATAL_ERROR_COUNT 0x01000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_UNSUPP_REQ_COUNT 0x02000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_1_ENTRY_COUNT 0x04000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_2_ENTRY_COUNT 0x08000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_2_ABORT_COUNT 0x10000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1SS_TO_DEEP_L1_TIMEOUT_COUNT 0x20000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_SHORT_DURATION_COUNT 0x40000000 + +/* + * NV2080_CTRL_CMD_BUS_CLEAR_PEX_COUNTER_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counters to be + * cleared. Note that all counters cannot be cleared. + */ + +#define NV2080_CTRL_CMD_BUS_CLEAR_PEX_COUNTERS (0x20801814) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS { + NvU32 pexCounterMask; +} NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_FREEZE_PEX_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counters to be + * freezed. Note that all counters cannot be frozen. + * + * bFreezeRmCounter + * This parameter decides whether API will freeze it or unfreeze it. + * NV_TRUE for freeze and NV_FALSE for unfreeze. + */ + +#define NV2080_CTRL_CMD_BUS_FREEZE_PEX_COUNTERS (0x20801815) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS { + NvU32 pexCounterMask; + NvBool bFreezeRmCounter; +} NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS + * This command gets the per Lane Counters and the type of errors. + * + * pexLaneErrorStatus + * This mask specifies the type of error detected on any of the Lanes. + * + * pexLaneCounter + * This array gives the counters per Lane. Each index corresponds to Lane + * index + 1 + */ + +#define NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS (0x20801816) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PEX_MAX_LANES 16 +#define NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS { + NvU16 pexLaneErrorStatus; + NvU8 pexLaneCounter[NV2080_CTRL_PEX_MAX_LANES]; +} NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS; + +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_TYPE 0x00000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_SYNC_HDR_CODING_ERR 0x00000001 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_SYNC_HDR_ORDER_ERR 0x00000002 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_OS_DATA_SEQ_ERR 0x00000004 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_TSX_DATA_SEQ_ERR 0x00000008 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_SKPOS_LFSR_ERR 0x00000010 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_RX_CLK_FIFO_OVERFLOW 0x00000020 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_ELASTIC_FIFO_OVERFLOW 0x00000040 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_RCVD_LINK_NUM_ERR 0x00000080 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_RCVD_LANE_NUM_ERR 0x00000100 + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY (0x20801817) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS_MESSAGE_ID (0x17U) + +typedef struct NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS { + NvBool bPexLtrRegkeyOverride; + NvBool bPexRootPortLtrSupported; + NvBool bPexGpuLtrSupported; + NvU16 pexLtrSnoopLatencyValue; + NvU8 pexLtrSnoopLatencyScale; + NvU16 pexLtrNoSnoopLatencyValue; + NvU8 pexLtrNoSnoopLatencyScale; +} NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS; + +#define NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY (0x20801818) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS { + NvU16 pexLtrSnoopLatencyValue; + NvU8 pexLtrSnoopLatencyScale; + NvU16 pexLtrNoSnoopLatencyValue; + NvU8 pexLtrNoSnoopLatencyScale; +} NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_PEX_UTIL_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counter types. + * + */ +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_TX_BYTES 0x00000001 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_RX_BYTES 0x00000002 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_TX_L0 0x00000004 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_RX_L0 0x00000008 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_TX_L0S 0x00000010 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_RX_L0S 0x00000020 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_NON_L0_L0S 0x00000040 +#define NV2080_CTRL_PEX_UTIL_MAX_COUNTER_TYPES 7 + +#define NV2080_CTRL_CMD_BUS_GET_PEX_UTIL_COUNTERS (0x20801819) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS { + NvU32 pexCounterMask; + NvU32 pexCounters[NV2080_CTRL_PEX_UTIL_MAX_COUNTER_TYPES]; +} NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_CLEAR_PEX_UTIL_COUNTER_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counters to be + * cleared. Note that all counters cannot be cleared. + * + * NOTE: EX_UTIL_COUNTER_UPSTREAM & NV2080_CTRL_BUS_PEX_UTIL_COUNTER_DOWNSTREAM + * belongs to PMU. The ctrl function will not reset nor disable/enable them. + */ +#define NV2080_CTRL_CMD_BUS_CLEAR_PEX_UTIL_COUNTERS (0x20801820) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS { + NvU32 pexCounterMask; +} NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_BUS_GET_BFD (0x20801821) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_BFD_PARAMSARR_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_BUS_GET_BFD_PARAMS { + NvBool valid; + NvU16 deviceID; + NvU16 vendorID; + NvU32 domain; + NvU16 bus; + NvU16 device; + NvU8 function; +} NV2080_CTRL_BUS_GET_BFD_PARAMS; + +#define NV2080_CTRL_BUS_GET_BFD_PARAMSARR_MESSAGE_ID (0x21U) + +typedef struct NV2080_CTRL_BUS_GET_BFD_PARAMSARR { + NV2080_CTRL_BUS_GET_BFD_PARAMS params[32]; +} NV2080_CTRL_BUS_GET_BFD_PARAMSARR; + +/* + * NV2080_CTRL_CMD_BUS_GET_ASPM_DISABLE_FLAGS + * This command gets the following mentioned PDB Properties + * + * aspmDisableFlags[] + * NvBool array stores each of the properties' state. the array size can + * be increased as per requirement. + * + * NOTE: When adding more properties, increment NV2080_CTRL_ASPM_DISABLE_FLAGS_MAX_FLAGS. + */ + +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_L1_MASK_REGKEY_OVERRIDE 0x00000000 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_OS_RM_MAKES_POLICY_DECISIONS 0x00000001 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_BEHIND_BRIDGE 0x00000002 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_UPSTREAM_PORT_L1_UNSUPPORTED 0x00000003 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED 0x00000004 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY 0x00000005 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_CL_ASPM_L1_CHIPSET_DISABLED 0x00000006 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY 0x00000007 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_BIF_ENABLE_ASPM_DT_L1 0x00000008 +//append properties here + +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_MAX_FLAGS 9 + +#define NV2080_CTRL_CMD_BUS_GET_ASPM_DISABLE_FLAGS (0x20801822) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS { + NvBool aspmDisableFlags[NV2080_CTRL_ASPM_DISABLE_FLAGS_MAX_FLAGS]; +} NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS; + +#define NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS (0x20801824) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS { + NvBool bEnable; +} NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_NVLINK_PEER_ID_MASK + * + * This command returns cached(SW only) NVLINK peer id mask. Currently, this control + * call is only needed inside a SR-IOV enabled guest where page table management is + * being done by the guest. Guest needs this mask to derive the peer id corresponding + * to the peer GPU. This peer id will then be programmed inside the PTEs by guest RM. + * + * nvlinkPeerIdMask[OUT] + * - The peer id mask is returned in this array. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_BUS_GET_NVLINK_PEER_ID_MASK (0x20801825) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_MAX_NUM_GPUS 32 + +#define NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS { + NvU32 nvlinkPeerIdMask[NV2080_CTRL_BUS_MAX_NUM_GPUS]; +} NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS + * This command takes parameters eomMode, eomNblks and eomNerrs from the client + * and then sends it out to PMU. + */ +#define NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS (0x20801826) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS_MESSAGE_ID (0x26U) + +typedef struct NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS { + NvU8 eomMode; + NvU8 eomNblks; + NvU8 eomNerrs; +} NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE + * This command takes parameters UPHY register's address and lane from the client + * and then sends it out to PMU. + */ +#define NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE (0x20801827) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS_MESSAGE_ID (0x27U) + +typedef struct NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS { + NvU32 regAddress; + NvU32 laneSelectMask; + NvU16 regValue; +} NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_EOM_STATUS + * + */ +#define NV2080_CTRL_CMD_BUS_GET_EOM_STATUS (0x20801828) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_MAX_NUM_LANES 32 + +#define NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS { + NvU8 eomMode; + NvU8 eomNblks; + NvU8 eomNerrs; + NvU8 eomBerEyeSel; + NvU8 eomPamEyeSel; + NvU32 laneMask; + NvU16 eomStatus[NV2080_CTRL_BUS_MAX_NUM_LANES]; +} NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS + * + * This command returns the PCIe requester atomics operation capabilities + * from GPU to coherent SYSMEM. + * + * atomicsCaps[OUT] + * Mask of supported PCIe atomic operations in the form of + * NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_* + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS (0x20801829) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS_MESSAGE_ID (0x29U) + +typedef struct NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS { + NvU32 atomicsCaps; +} NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS; + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_FETCHADD_32 0:0 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_FETCHADD_32_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_FETCHADD_32_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_FETCHADD_64 1:1 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_FETCHADD_64_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_FETCHADD_64_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_SWAP_32 2:2 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_SWAP_32_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_SWAP_32_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_SWAP_64 3:3 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_SWAP_64_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_SWAP_64_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_32 4:4 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_32_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_32_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_64 5:5 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_64_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_64_NO (0x00000000) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_128 6:6 +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_128_YES (0x00000001) +#define NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_CAS_128_NO (0x00000000) + +/* + * NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS + * + * This command returns the supported GPU atomic operations + * that map to the capable PCIe atomic operations from GPU to + * coherent SYSMEM. + * + * atomicOp[OUT] + * Array of structure that contains the atomic operation + * supported status and its attributes. The array can be + * indexed using one of NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_* + * + * bSupported[OUT] + * Is the GPU atomic operation natively supported by the PCIe? + * + * attributes[OUT] + * Provides the attributes mask of the GPU atomic operation when supported + * in the form of + * NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_REDUCTION_* + * + */ +#define NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS (0x2080182a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_IADD 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_IMIN 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_IMAX 2 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_INC 3 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_DEC 4 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_IAND 5 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_IOR 6 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_IXOR 7 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_EXCH 8 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_CAS 9 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_FADD 10 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_FMIN 11 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_FMAX 12 + +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_COUNT 13 + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS_PARAMS_MESSAGE_ID (0x2AU) + +typedef struct NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS_PARAMS { + struct { + NvBool bSupported; + NvU32 attributes; + } atomicOp[NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_COUNT]; +} NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS_PARAMS; + +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SCALAR 0:0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SCALAR_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SCALAR_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_VECTOR 1:1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_VECTOR_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_VECTOR_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_REDUCTION 2:2 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_REDUCTION_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_REDUCTION_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_32 3:3 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_32_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_32_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_64 4:4 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_64_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_64_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_128 5:5 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_128_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIZE_128_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIGNED 6:6 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIGNED_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_SIGNED_NO 0 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_UNSIGNED 7:7 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_UNSIGNED_YES 1 +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_ATTRIB_UNSIGNED_NO 0 + +/* + * NV2080_CTRL_CMD_BUS_GET_C2C_INFO + * + * This command returns the C2C links information. + * + * bIsLinkUp[OUT] + * NV_TRUE if the C2C links are present and the links are up. + * The below remaining fields are valid only if return value is + * NV_OK and bIsLinkUp is NV_TRUE. + * nrLinks[OUT] + * Total number of C2C links that are up. + * linkMask[OUT] + * Bitmask of the C2C links present and up. + * perLinkBwMBps[OUT] + * Theoretical per link bandwidth in MBps. + * remoteType[OUT] + * Type of the device connected to the remote end of the C2C link. + * Valid values are : + * NV2080_CTRL_BUS_GET_C2C_INFO_REMOTE_TYPE_CPU - connected to a CPU + * NV2080_CTRL_BUS_GET_C2C_INFO_REMOTE_TYPE_GPU - connected to another GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * + * Please also review the information below for additional information on + * select fields: + * + * remoteType[OUT] + * NV2080_CTRL_BUS_GET_C2C_INFO_REMOTE_TYPE_CPU - connected to a CPU + */ + + + +#define NV2080_CTRL_CMD_BUS_GET_C2C_INFO (0x2080182b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_C2C_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_GET_C2C_INFO_PARAMS_MESSAGE_ID (0x2BU) + +typedef struct NV2080_CTRL_CMD_BUS_GET_C2C_INFO_PARAMS { + NvBool bIsLinkUp; + NvU32 nrLinks; + NvU32 linkMask; + NvU32 perLinkBwMBps; + NvU32 remoteType; +} NV2080_CTRL_CMD_BUS_GET_C2C_INFO_PARAMS; + +#define NV2080_CTRL_BUS_GET_C2C_INFO_REMOTE_TYPE_CPU 1 +#define NV2080_CTRL_BUS_GET_C2C_INFO_REMOTE_TYPE_GPU 2 + +/* + * NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS + * + * This command disables the GPU system memory access after quiescing the GPU, + * or re-enables sysmem access. + * + * bDisable + * If NV_TRUE the GPU is quiesced and system memory access is disabled . + * If NV_FALSE the GPU system memory access is re-enabled and the GPU is resumed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS (0x2080182c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS_MESSAGE_ID (0x2CU) + +typedef struct NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS { + NvBool bDisable; +} NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_C2C_ERR_INFO + * + * This command returns the C2C error info for a C2C links. + * + * errCnts[OUT] + * Array of structure that contains the error counts for + * number of times one of C2C fatal error interrupt has happened. + * The array size should be NV2080_CTRL_BUS_GET_C2C_ERR_INFO_MAX_NUM_C2C_INSTANCES + * * NV2080_CTRL_BUS_GET_C2C_ERR_INFO_MAX_C2C_LINKS_PER_INSTANCE. + * + * nrCrcErrIntr[OUT] + * Number of times CRC error interrupt triggered. + * nrReplayErrIntr[OUT] + * Number of times REPLAY error interrupt triggered. + * nrReplayB2bErrIntr[OUT] + * Number of times REPLAY_B2B error interrupt triggered. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_BUS_GET_C2C_ERR_INFO (0x2080182d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_C2C_ERR_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_C2C_ERR_INFO_MAX_NUM_C2C_INSTANCES 2 +#define NV2080_CTRL_BUS_GET_C2C_ERR_INFO_MAX_C2C_LINKS_PER_INSTANCE 5 + +#define NV2080_CTRL_BUS_GET_C2C_ERR_INFO_PARAMS_MESSAGE_ID (0x2DU) + +typedef struct NV2080_CTRL_BUS_GET_C2C_ERR_INFO_PARAMS { + struct { + NvU32 nrCrcErrIntr; + NvU32 nrReplayErrIntr; + NvU32 nrReplayB2bErrIntr; + } errCnts[NV2080_CTRL_BUS_GET_C2C_ERR_INFO_MAX_NUM_C2C_INSTANCES * NV2080_CTRL_BUS_GET_C2C_ERR_INFO_MAX_C2C_LINKS_PER_INSTANCE]; +} NV2080_CTRL_BUS_GET_C2C_ERR_INFO_PARAMS; + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h new file mode 100644 index 0000000..179297e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h @@ -0,0 +1,314 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080ce.finn +// + + + +/* NV20_SUBDEVICE_XX ce control commands and parameters */ + +#include "ctrl2080common.h" + +/* + * NV2080_CTRL_CMD_CE_GET_CAPS + * + * This command returns the set of CE capabilities for the device + * in the form of an array of unsigned bytes. + * + * ceEngineType + * This parameter specifies the copy engine type + * capsTblSize + * This parameter specifies the size in bytes of the caps table per CE. + * This value should be set to NV2080_CTRL_CE_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the CE caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_CE_GET_CAPS (0x20802a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_CAPS_PARAMS_MESSAGE_ID" */ + +/* + * Size in bytes of CE caps table. This value should be one greater + * than the largest byte_index value below. + */ +#define NV2080_CTRL_CE_CAPS_TBL_SIZE 2 + +#define NV2080_CTRL_CE_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_CE_GET_CAPS_PARAMS { + NvU32 ceEngineType; + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV2080_CTRL_CE_GET_CAPS_PARAMS; + +#define NV2080_CTRL_CMD_CE_GET_CAPS_V2 (0x20802a03) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_CE_GET_CAPS_V2_PARAMS { + NvU32 ceEngineType; + NvU8 capsTbl[NV2080_CTRL_CE_CAPS_TBL_SIZE]; +} NV2080_CTRL_CE_GET_CAPS_V2_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV2080_CTRL_CE_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV2080_CTRL_CE_CAPS_CE_GRCE 0:0x01 +#define NV2080_CTRL_CE_CAPS_CE_SHARED 0:0x02 +#define NV2080_CTRL_CE_CAPS_CE_SYSMEM_READ 0:0x04 +#define NV2080_CTRL_CE_CAPS_CE_SYSMEM_WRITE 0:0x08 +#define NV2080_CTRL_CE_CAPS_CE_NVLINK_P2P 0:0x10 +#define NV2080_CTRL_CE_CAPS_CE_SYSMEM 0:0x20 +#define NV2080_CTRL_CE_CAPS_CE_P2P 0:0x40 +#define NV2080_CTRL_CE_CAPS_CE_BL_SIZE_GT_64K_SUPPORTED 0:0x80 +#define NV2080_CTRL_CE_CAPS_CE_SUPPORTS_NONPIPELINED_BL 1:0x01 +#define NV2080_CTRL_CE_CAPS_CE_SUPPORTS_PIPELINED_BL 1:0x02 + + + +/* + * NV2080_CTRL_CE_CAPS_CE_GRCE + * Set if the CE is synchronous with GR + * + * NV2080_CTRL_CE_CAPS_CE_SHARED + * Set if the CE shares physical CEs with any other CE + * + * NV2080_CTRL_CE_CAPS_CE_SYSMEM_READ + * Set if the CE can give enhanced performance for SYSMEM reads over other CEs + * + * NV2080_CTRL_CE_CAPS_CE_SYSMEM_WRITE + * Set if the CE can give enhanced performance for SYSMEM writes over other CEs + * + * NV2080_CTRL_CE_CAPS_CE_NVLINK_P2P + * Set if the CE can be used for P2P transactions using NVLINK + * Once a CE is exposed for P2P over NVLINK, it will remain available for the life of RM + * PCE2LCE mapping may change based on the number of GPUs registered in RM however + * + * NV2080_CTRL_CE_CAPS_CE_SYSMEM + * Set if the CE can be used for SYSMEM transactions + * + * NV2080_CTRL_CE_CAPS_CE_P2P + * Set if the CE can be used for P2P transactions + * + * NV2080_CTRL_CE_CAPS_CE_BL_SIZE_GT_64K_SUPPORTED + * Set if the CE supports BL copy size greater than 64K + * + * NV2080_CTRL_CE_CAPS_CE_SUPPORTS_NONPIPELINED_BL + * Set if the CE supports non-pipelined Block linear + * + * NV2080_CTRL_CE_CAPS_CE_SUPPORTS_PIPELINED_BL + * Set if the CE supports pipelined Block Linear + */ + + + +/* + * NV2080_CTRL_CMD_CE_GET_CE_PCE_MASK + * + * This command returns the mapping of PCE's for the given LCE + * + * ceEngineType + * This parameter specifies the copy engine type + * pceMask + * This parameter specifies a mask of PCEs that correspond + * to the LCE specified in ceEngineType + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_CE_GET_CE_PCE_MASK (0x20802a02) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS { + NvU32 ceEngineType; + NvU32 pceMask; +} NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_SET_PCE_LCE_CONFIG + * + * This command sets the PCE2LCE configuration + * + * pceLceConfig[NV2080_CTRL_MAX_PCES] + * This parameter specifies the PCE-LCE mapping requested + * grceLceConfig[NV2080_CTRL_MAX_GRCES] + * This parameter specifies which LCE is the GRCE sharing with + * 0xF -> Does not share with any LCE + * 0-MAX_LCE -> Shares with the given LCE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_CE_SET_PCE_LCE_CONFIG (0x20802a04) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_SET_PCE_LCE_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MAX_PCES 32 +#define NV2080_CTRL_MAX_GRCES 2 + +#define NV2080_CTRL_CE_SET_PCE_LCE_CONFIG_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_CE_SET_PCE_LCE_CONFIG_PARAMS { + NvU32 ceEngineType; + NvU32 pceLceMap[NV2080_CTRL_MAX_PCES]; + NvU32 grceSharedLceMap[NV2080_CTRL_MAX_GRCES]; +} NV2080_CTRL_CE_SET_PCE_LCE_CONFIG_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_UPDATE_PCE_LCE_MAPPINGS + * + * This command updates the PCE-LCE mappings + * + * pPceLceMap [IN] + * This parameter contains the array of PCE to LCE mappings. + * The array is indexed by the PCE index, and contains the + * LCE index that the PCE is assigned to. A unused PCE is + * tagged with NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_INVALID_LCE. + * + * pGrceConfig [IN] + * This parameter contains the array of GRCE configs. + * 0xF -> GRCE does not share with any LCE + * 0-MAX_LCE -> GRCE shares with the given LCE + * + * exposeCeMask [IN] + * This parameter specifies the mask of LCEs to export to the + * clients after the update. + * + * bUpdateNvlinkPceLce [IN] + * Whether PCE-LCE mappings need to be updated for nvlink topology. + * If this is NV_FALSE, RM would ignore the above values. However, + * PCE-LCE mappings will still be updated if there were any regkey + * overrides. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ + +#define NV2080_CTRL_CMD_CE_UPDATE_PCE_LCE_MAPPINGS (0x20802a05) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS { + NvU32 pceLceMap[NV2080_CTRL_MAX_PCES]; + NvU32 grceConfig[NV2080_CTRL_MAX_GRCES]; + NvU32 exposeCeMask; + NvBool bUpdateNvlinkPceLce; +} NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS; + +#define NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_INVALID_LCE 0xf + +#define NV2080_CTRL_CMD_CE_UPDATE_CLASS_DB (0x20802a06) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS { + NvU32 stubbedCeMask; +} NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_GET_PHYSICAL_CAPS + * + * Query _CE_GRCE, _CE_SHARED, _CE_SUPPORTS_PIPELINED_BL, _CE_SUPPORTS_NONPIPELINED_BL bits of CE + * capabilities. + * + */ + +#define NV2080_CTRL_CMD_CE_GET_PHYSICAL_CAPS (0x20802a07) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | 0x7" */ + +#define NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS { + NvU32 size; +} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS; + +#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_CE_GET_HUB_PCE_MASKS + * + * Get HSHUB and FBHUB PCE masks. + * + * [out] hshubPceMasks + * PCE mask for each HSHUB + * [out] fbhubPceMask + * FBHUB PCE mask + */ + +#define NV2080_CTRL_CMD_CE_GET_HUB_PCE_MASK (0x20802a09) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_MAX_HSHUBS 5 + +#define NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS { + NvU32 hshubPceMasks[NV2080_CTRL_CE_MAX_HSHUBS]; + NvU32 fbhubPceMask; +} NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_GET_ALL_CAPS + * + * Query caps of all CEs. + * + * [out] capsTbl + * Array of CE caps in the order of CEs. The caps bits interpretation is the same as in + * NV2080_CTRL_CMD_CE_GET_CAPS. + * [out] present + * Bit mask indicating which CEs are usable by the client and have their caps indicated in capsTbl. + * If a CE is not marked present, its caps bits should be ignored. + * If client is subscribed to a MIG instance, only the CEs present in the instance are tagged as such. + */ + +#define NV2080_CTRL_CMD_CE_GET_ALL_CAPS (0x20802a0a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS_MESSAGE_ID (0xaU) + +typedef struct NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS { + NvU8 capsTbl[NV2080_CTRL_MAX_PCES][NV2080_CTRL_CE_CAPS_TBL_SIZE]; + NvU32 present; +} NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS; + +#define NV2080_CTRL_CMD_CE_GET_ALL_PHYSICAL_CAPS (0x20802a0b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | 0xb" */ + +/* _ctrl2080ce_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h new file mode 100644 index 0000000..55b9964 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080cipher.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h new file mode 100644 index 0000000..20b8f4f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080clk.finn +// + + + +/* _ctrl2080clk_h_ */ +#include "nvfixedtypes.h" +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobj.h" +#include "ctrl/ctrl2080/ctrl2080gpumon.h" +#include "ctrl/ctrl2080/ctrl2080clkavfs.h" +#include "ctrl/ctrl2080/ctrl2080volt.h" +#include "ctrl/ctrl2080/ctrl2080pmumon.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h new file mode 100644 index 0000000..570987b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080clkavfs.finn +// + + + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobj.h" +#include "ctrl/ctrl2080/ctrl2080volt.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080common.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080common.h new file mode 100644 index 0000000..4857c3e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080common.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2004 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080common.finn +// + + + + +#define NV2080_CTRL_CMD_MAX_HEADS 2 diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h new file mode 100644 index 0000000..1ad616f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h @@ -0,0 +1,185 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080dma.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX dma control commands and parameters */ + +#include "ctrl2080common.h" + +/* + * NV2080_CTRL_CMD_DMA_INVALIDATE_TLB + * + * This command invalidates the GPU TLB. This is intended to be used + * by RM clients that manage their own TLB consistency when updating + * page tables on their own, or with DEFER_TLB_INVALIDATION options + * to other RM APIs. + * + * hVASpace + * This parameter specifies the VASpace object whose MMU TLB entries needs to be invalidated. + * Specifying a GMMU VASpace object handle will invalidate the GMMU TLB for the particular VASpace. + * Specifying a SMMU VASpace object handle will flush the entire SMMU TLB & PTC. + * + * This call can be used with the NV50_DEFERRED_API_CLASS (class 0x5080). + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_TIMEOUT_RETRY + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_DMA_INVALIDATE_TLB (0x20802502) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_DMA_INTERFACE_ID << 8) | NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS { + NvHandle hClient; // Deprecated. Kept here for compactibility with chips_GB9-2-1-1 + NvHandle hDevice; // Deprecated. Kept here for compactibility with chips_GB9-2-1-1 + NvU32 engine; // Deprecated. Kept here for compactibility with chips_GB9-2-1-1 + NvHandle hVASpace; +} NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS; + +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_GRAPHICS 0:0 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_GRAPHICS_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_GRAPHICS_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VIDEO 1:1 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VIDEO_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VIDEO_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_DISPLAY 2:2 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_DISPLAY_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_DISPLAY_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_CAPTURE 3:3 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_CAPTURE_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_CAPTURE_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_IFB 4:4 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_IFB_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_IFB_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MV 5:5 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MV_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MV_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MPEG 6:6 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MPEG_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MPEG_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VLD 7:7 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VLD_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VLD_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_ENCRYPTION 8:8 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_ENCRYPTION_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_ENCRYPTION_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_PERFMON 9:9 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_PERFMON_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_PERFMON_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_POSTPROCESS 10:10 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_POSTPROCESS_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_POSTPROCESS_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_BAR 11:11 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_BAR_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_BAR_TRUE (0x00000001) + +/* + * NV2080_CTRL_DMA_INFO + * + * This structure represents a single 32bit dma engine value. Clients + * request a particular DMA engine value by specifying a unique dma + * information index. + * + * Legal dma information index values are: + * NV2080_CTRL_DMA_INFO_INDEX_SYSTEM_ADDRESS_SIZE + * This index can be used to request the system address size in bits. + */ +typedef struct NV2080_CTRL_DMA_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_DMA_INFO; + +/* valid dma info index values */ +#define NV2080_CTRL_DMA_INFO_INDEX_SYSTEM_ADDRESS_SIZE (0x000000000) + +/* set INDEX_MAX to greatest possible index value */ +#define NV2080_CTRL_DMA_INFO_INDEX_MAX NV2080_CTRL_DMA_INFO_INDEX_SYSTEM_ADDRESS_SIZE + +/* + * NV2080_CTRL_CMD_DMA_GET_INFO + * + * This command returns dma engine information for the associated GPU. + * Requests to retrieve dma information use an array of one or more + * NV2080_CTRL_DMA_INFO structures. + * + * dmaInfoTblSize + * This field specifies the number of valid entries in the dmaInfoList + * array. This value cannot exceed NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES. + * dmaInfoTbl + * This parameter contains the client's dma info table into + * which the dma info values will be transferred by the RM. + * The dma info table is an array of NV2080_CTRL_DMA_INFO structures. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_DMA_GET_INFO (0x20802503) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_DMA_INTERFACE_ID << 8) | NV2080_CTRL_DMA_GET_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum number of NV2080_CTRL_DMA_INFO entries per request */ +#define NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES (256) + +#define NV2080_CTRL_DMA_GET_INFO_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_DMA_GET_INFO_PARAMS { + NvU32 dmaInfoTblSize; + /* + * C form: + * NV2080_CTRL_DMA_INFO dmaInfoTbl[NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES]; + */ + NV2080_CTRL_DMA_INFO dmaInfoTbl[NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES]; +} NV2080_CTRL_DMA_GET_INFO_PARAMS; + +typedef struct NV2080_CTRL_DMA_UPDATE_COMPTAG_INFO_TILE_INFO { + /*! + * 64KB aligned address of source 64KB tile for comptag reswizzle. + */ + NvU32 srcAddr; + + /*! + * 64KB aligned address of destination 64KB tile for comptag reswizzle. + */ + NvU32 dstAddr; + + /*! + * Comptag index assigned to the 64K sized tile relative to + * the compcacheline. Absolute comptag index would be: + * startComptagIndex + relComptagIndex. + */ + NvU16 relComptagIndex; +} NV2080_CTRL_DMA_UPDATE_COMPTAG_INFO_TILE_INFO; + +// _ctrl2080dma_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h new file mode 100644 index 0000000..982381f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h @@ -0,0 +1,105 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080dmabuf.finn +// + + + +/* + * NV2080_CTRL_CMD_DMABUF_EXPORT_OBJECTS_TO_FD + * + * Exports RM vidmem handles to a dma-buf fd. + * + * The objects in the 'handles' array are exported to the fd as range: + * [index, index + numObjects). + * + * A dma-buf fd is created the first time this control call is called. + * The fd is an input parameter for subsequent calls to attach additional handles + * over NV2080_CTRL_DMABUF_MAX_HANDLES. + * + * fd + * A dma-buf file descriptor. If -1, a new FD will be created. + * + * totalObjects + * The total number of objects that the client wishes to export to the FD. + * This parameter will be honored only when the FD is getting created. + * + * numObjects + * The number of handles the user wishes to export in this call. + * + * index + * The index into the export fd at which to start exporting the handles in + * 'handles'. This index cannot overlap a previously used index. + * + * totalSize + * The total size of memory being exported in bytes, needed to create the dma-buf. + * This size includes the memory that will be exported in future export calls + * for this dma-buf. + * + * handles + * An array of {handle, offset, size} that describes the dma-buf. + * The offsets and sizes must be OS page-size aligned. + * + * Limitations: + * 1. This call only supports vidmem objects for now. + * 2. All memory handles should belong to the same GPU or the same GPU MIG instance. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_NO_MEMORY + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_IN_USE + * NV_ERR_INVALID_OBJECT + * NV_ERR_INVALID_OBJECT_PARENT + */ +#define NV2080_CTRL_CMD_DMABUF_EXPORT_OBJECTS_TO_FD (0x20803a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_DMABUF_INTERFACE_ID << 8) | NV2080_CTRL_DMABUF_EXPORT_MEM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_DMABUF_MAX_HANDLES 128 + +typedef struct NV2080_CTRL_DMABUF_MEM_HANDLE_INFO { + NvHandle hMemory; + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_DMABUF_MEM_HANDLE_INFO; + +#define NV2080_CTRL_DMABUF_EXPORT_MEM_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_DMABUF_EXPORT_MEM_PARAMS { + NvS32 fd; + NvU32 totalObjects; + NvU32 numObjects; + NvU32 index; + NV_DECLARE_ALIGNED(NvU64 totalSize, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_DMABUF_MEM_HANDLE_INFO handles[NV2080_CTRL_DMABUF_MAX_HANDLES], 8); +} NV2080_CTRL_DMABUF_EXPORT_MEM_PARAMS; + +// _ctrl2080dmabuf_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h new file mode 100644 index 0000000..7d0fabe --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h @@ -0,0 +1,63 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080ecc.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + + + +#define NV2080_CTRL_CMD_ECC_GET_CLIENT_EXPOSED_COUNTERS (0x20803400U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_ECC_INTERFACE_ID << 8) | NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS + * + * sramLastClearedTimestamp [out] + * dramLastClearedTimestamp [out] + * unix-epoch based timestamp. These fields indicate when the error counters + * were last cleared by the user. + * + * sramErrorCounts [out] + * dramErrorCounts [out] + * Aggregate error counts for SRAM and DRAM + */ + +#define NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS_MESSAGE_ID (0x0U) + +typedef struct NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS { + NvU32 sramLastClearedTimestamp; + NvU32 dramLastClearedTimestamp; + + NV_DECLARE_ALIGNED(NvU64 sramCorrectedTotalCounts, 8); + NV_DECLARE_ALIGNED(NvU64 sramUncorrectedTotalCounts, 8); + NV_DECLARE_ALIGNED(NvU64 dramCorrectedTotalCounts, 8); + NV_DECLARE_ALIGNED(NvU64 dramUncorrectedTotalCounts, 8); +} NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS; +/* _ctrl2080ecc_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h new file mode 100644 index 0000000..e71eb81 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h @@ -0,0 +1,372 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080event.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +#include "nv_vgpu_types.h" +/* NV20_SUBDEVICE_XX event-related control commands and parameters */ + +/* + * NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * This command sets event notification state for the associated subdevice. + * This command requires that an instance of NV01_EVENT has been previously + * bound to the associated subdevice object. + * + * event + * This parameter specifies the type of event to which the specified + * action is to be applied. This parameter must specify a valid + * NV2080_NOTIFIERS value (see cl2080.h for more details) and should + * not exceed one less NV2080_NOTIFIERS_MAXCOUNT. + * action + * This parameter specifies the desired event notification action. + * Valid notification actions include: + * NV2080_CTRL_SET_EVENT_NOTIFICATION_DISABLE + * This action disables event notification for the specified + * event for the associated subdevice object. + * NV2080_CTRL_SET_EVENT_NOTIFICATION_SINGLE + * This action enables single-shot event notification for the + * specified event for the associated subdevice object. + * NV2080_CTRL_SET_EVENT_NOTIFICATION_REPEAT + * This action enables repeated event notification for the specified + * event for the associated system controller object. + * bNotifyState + * This boolean is used to indicate the current state of the notifier + * at the time of event registration. This is optional and its semantics + * needs to be agreed upon by the notifier and client using the notifier + * info32 + * This is used to send 32-bit initial state info with the notifier at + * time of event registration + * info16 + * This is used to send 16-bit initial state info with the notifier at + * time of event registration + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 event; + NvU32 action; + NvBool bNotifyState; + NvU32 info32; + NvU16 info16; +} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001) +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + +/* XUSB/PPC D-state defines */ +#define NV2080_EVENT_DSTATE_XUSB_D0 (0x00000000) +#define NV2080_EVENT_DSTATE_XUSB_D3 (0x00000003) +#define NV2080_EVENT_DSTATE_XUSB_INVALID (0xFFFFFFFF) +#define NV2080_EVENT_DSTATE_PPC_D0 (0x00000000) +#define NV2080_EVENT_DSTATE_PPC_D3 (0x00000003) +#define NV2080_EVENT_DSTATE_PPC_INVALID (0xFFFFFFFF) + +// HDACODEC Decice DState, D3_COLD is only for verbose mapping, it cannot be logged +typedef enum NV2080_EVENT_HDACODEC_DSTATE { + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D0 = 0, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D1 = 1, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D2 = 2, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D3_HOT = 3, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D3_COLD = 4, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_DSTATE_MAX = 5, +} NV2080_EVENT_HDACODEC_DSTATE; + +/* + * NV2080_CTRL_CMD_EVENT_SET_TRIGGER + * + * This command triggers a software event for the associated subdevice. + * This command accepts no parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_EVENT_SET_TRIGGER (0x20800302) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | 0x2" */ + +/* + * NV2080_CTRL_CMD_EVENT_SET_NOTIFIER_MEMORY + * + * hMemory + * This parameter specifies the handle of the memory object + * that identifies the memory address translation for this + * subdevice instance's notification(s). The beginning of the + * translation points to an array of notification data structures. + * The size of the translation must be at least large enough to hold the + * maximum number of notification data structures identified by + * the NV2080_MAX_NOTIFIERS value. + * Legal argument values must be instances of the following classes: + * NV01_NULL + * NV04_MEMORY + * When hMemory specifies the NV01_NULL_OBJECT value then any existing + * memory translation connection is cleared. There must not be any + * pending notifications when this command is issued. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_EVENT_SET_MEMORY_NOTIFIES (0x20800303) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS { + NvHandle hMemory; +} NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS; + +#define NV2080_EVENT_MEMORY_NOTIFIES_STATUS_NOTIFIED 0 +#define NV2080_EVENT_MEMORY_NOTIFIES_STATUS_PENDING 1 +#define NV2080_EVENT_MEMORY_NOTIFIES_STATUS_ERROR 2 + +/* + * NV2080_CTRL_CMD_EVENT_SET_SEMAPHORE_MEMORY + * + * hSemMemory + * This parameter specifies the handle of the memory object that + * identifies the semaphore memory associated with this subdevice + * event notification. Once this is set RM will generate an event + * only when there is a change in the semaphore value. It is + * expected that the semaphore memory value will be updated by + * the GPU indicating that there is an event pending. This + * command is used by VGX plugin to determine which virtual + * machine has generated a particular event. + * + * semOffset + * This parameter indicates the memory offset of the semaphore. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_EVENT_SET_SEMAPHORE_MEMORY (0x20800304) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS { + NvHandle hSemMemory; + NvU32 semOffset; +} NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS; + +/* + * NV2080_CTRL_CMD_EVENT_SET_GUEST_MSI + * + * hSemMemory + * This parameter specifies the handle of the memory object that + * identifies the semaphore memory associated with this subdevice + * event notification. Once this is set RM will generate an event + * only when there is a change in the semaphore value. It is + * expected that the semaphore memory value will be updated by + * the GPU indicating that there is an event pending. This + * command is used by VGX plugin to determine which virtual + * machine has generated a particular event. + * + * guestMSIAddr + * This parameter indicates the guest allocated MSI address. + * + * guestMSIData + * This parameter indicates the MSI data set by the guest OS. + * + * vmIdType + * This parameter specifies the type of guest virtual machine identifier + * + * guestVmId + * This parameter specifies the guest virtual machine identifier + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_EVENT_SET_GUEST_MSI (0x20800305) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_GUEST_MSI_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_GUEST_MSI_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_EVENT_SET_GUEST_MSI_PARAMS { + NV_DECLARE_ALIGNED(NvU64 guestMSIAddr, 8); + NvU32 guestMSIData; + NvHandle hSemMemory; + NvBool isReset; + VM_ID_TYPE vmIdType; + NV_DECLARE_ALIGNED(VM_ID guestVmId, 8); +} NV2080_CTRL_EVENT_SET_GUEST_MSI_PARAMS; + + +/* + * NV2080_CTRL_CMD_EVENT_SET_SEMA_MEM_VALIDATION + * + * hSemMemory + * This parameter specifies the handle of the memory object that + * identifies the semaphore memory associated with this subdevice + * event notification. Once this is set RM will generate an event + * only when there is a change in the semaphore value. It is + * expected that the semaphore memory value will be updated by + * the GPU indicating that there is an event pending. This + * command is used by VGX plugin to determine which virtual + * machine has generated a particular event. + * + * isSemaMemValidationEnabled + * This parameter used to enable/disable change in sema value check + * while generating an event. + * + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_INVALID_OBJECT_HANDLE + * NVOS_STATUS_ERROR_INVALID_ARGUMENT + */ + + +#define NV2080_CTRL_CMD_EVENT_SET_SEMA_MEM_VALIDATION (0x20800306) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS { + NvHandle hSemMemory; + NvBool isSemaMemValidationEnabled; +} NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS; + + +/* + * NV2080_CTRL_CMD_EVENT_SET_VMBUS_CHANNEL + * + * hSemMemory + * This parameter specifies the handle of the memory object that + * identifies the semaphore memory associated with this subdevice + * event notification. Once this is set RM will generate an event + * only when there is a change in the semaphore value. It is + * expected that the semaphore memory value will be updated by + * the GPU indicating that there is an event pending. This + * command is used by VGX plugin to determine which virtual + * machine has generated a particular event. + * + * vmIdType + * This parameter specifies the type of guest virtual machine identifier + * + * guestVmId + * This parameter specifies the guest virtual machine identifier + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_EVENT_SET_VMBUS_CHANNEL (0x20800307) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_VMBUS_CHANNEL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_VMBUS_CHANNEL_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_EVENT_SET_VMBUS_CHANNEL_PARAMS { + NvHandle hSemMemory; + VM_ID_TYPE vmIdType; + NV_DECLARE_ALIGNED(VM_ID guestVmId, 8); +} NV2080_CTRL_EVENT_SET_VMBUS_CHANNEL_PARAMS; + + +/* + * NV2080_CTRL_CMD_EVENT_SET_TRIGGER_FIFO + * + * This command triggers a FIFO event for the associated subdevice. + * + * hEvent + * Handle of the event that should be notified. If zero, all + * non-stall interrupt events for this subdevice will be notified. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_EVENT_SET_TRIGGER_FIFO (0x20800308) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS { + NvHandle hEvent; +} NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS; + +/* + * NV2080_CTRL_CMD_EVENT_VIDEO_BIND_EVTBUF_FOR_UID + * + * This command is used to create a video bind-point to an event buffer that + * is filtered by UID. + * + * hEventBuffer[IN] + * The event buffer to bind to + * + * recordSize[IN] + * The size of the FECS record in bytes + * + * levelOfDetail[IN] + * One of NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD_: + * FULL: Report all CtxSw events + * SIMPLE: Report engine start and engine end events only + * CUSTOM: Report events in the eventFilter field + * NOTE: RM may override the level-of-detail depending on the caller + * + * eventFilter[IN] + * Bitmask of events to report if levelOfDetail is CUSTOM + * + * bAllUsers[IN] + * Only report video data for the current user if false, for all users if true + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_EVENT_VIDEO_BIND_EVTBUF (0x20800309) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD { + NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD_FULL = 0, + NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD_SIMPLE = 1, + NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD_CUSTOM = 2, +} NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD; + +#define NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_PARAMS { + NvHandle hEventBuffer; + NvU32 recordSize; + NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD levelOfDetail; + NvU32 eventFilter; + NvBool bAllUsers; +} NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_PARAMS; + +/* _ctrl2080event_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h new file mode 100644 index 0000000..a338bab --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fan.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h new file mode 100644 index 0000000..00198e1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h @@ -0,0 +1,2907 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fb.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX fb control commands and parameters */ + +#include "ctrl2080common.h" +#include "nvlimits.h" + +/* + * NV2080_CTRL_FB_INFO + * + * This structure represents a single 32bit fb engine value. Clients + * request a particular fb engine value by specifying a unique fb + * information index. + * + * Legal fb information index values are: + * NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_COUNT + * This index is used to request the number of tiled regions supported + * by the associated subdevice. The return value is GPU + * implementation-dependent. A return value of 0 indicates the GPU + * does not support tiling. + * NV2080_CTRL_FB_INFO_INDEX_COMPRESSION_SIZE + * This index is used to request the amount of compression (in bytes) + * supported by the associated subdevice. The return value is GPU + * implementation-dependent. A return value of 0 indicates the GPU + * does not support compression. + * Nv2080_CTRL_FB_INFO_INDEX_DRAM_PAGE_STRIDE + * This index is used to request the DRAM page stride (in bytes) + * supported by the associated subdevice. The return value is GPU + * implementation-dependent. + * NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_FREE_COUNT + * This index is used to request the number of free tiled regions on + * the associated subdevice. The return value represents the current + * number of free tiled regions at the time the command is processed and + * is not guaranteed to remain unchanged. A return value of 0 indicates + * that there are no available tiled regions on the associated subdevice. + * NV2080_CTRL_FB_INFO_INDEX_PARTITION_COUNT + * This index is used to request the number of frame buffer partitions + * on the associated subdevice. Starting with Fermi there are now two units + * with the name framebuffer partitions. On those chips this index returns + * the number of FBPAs. For number of FBPs use + * NV2080_CTRL_FB_INFO_INDEX_FBP_COUNT. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_RAM_SIZE + * This index is used to request the amount of framebuffer memory in + * kilobytes physically present on the associated subdevice. This + * value will never exceed the value reported by + * NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE. + * This an SMC aware attribute, so the per-partition framebuffer memory + * size will be returned when the client has a partition subscription. + * NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE + * This index is used to request the total amount of video memory in + * kilobytes for use with the associated subdevice. This value will + * reflect both framebuffer memory as well as any system memory dedicated + * for use with the subdevice. + * This an SMC aware attribute, so the per-partition video memory size + * will be returned when the client has a partition subscription. + * NV2080_CTRL_FB_INFO_INDEX_HEAP_SIZE + * This index is used to request the amount of total RAM in kilobytes + * available for user allocations. This value reflects the total ram + * size less the amount of memory reserved for internal use. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_HEAP_START + * This index is used to request the offset for start of heap in + * kilobytes. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_HEAP_FREE + * This index is used to request the available amount of video memory in + * kilobytes for use with the associated subdevice or the SMC partition. + * This an SMC aware attribute, thus necessary partition subscription is + * required to query per partition information, if the device is partitioned. + * Alternatively, the SMC/MIG monitor capability can be acquired to query + * aggregate available memory across all the valid partitions. + * NV2080_CTRL_FB_INFO_INDEX_MAPPABLE_HEAP_SIZE + * This index reflects the amount of heap memory in kilobytes that + * is accessible by the CPU. On subdevices with video memory sizes that + * exceed the amount that can be bus mappable this value will be less + * than that reported by NV2080_CTRL_FB_INFO_INDEX_HEAP_SIZE. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_BUS_WIDTH + * This index is used to request the FB bus bandwidth on the associated + * subdevice. + * NV2080_CTRL_FB_INFO_INDEX_RAM_CFG + * This index is used to request the implementation-dependent RAM + * configuration value of the associated subdevice. + * NV2080_CTRL_FB_INFO_INDEX_RAM_TYPE + * This index is used to request the type of RAM used for the framebuffer + * on the associated subdevice. Legal RAM types include: + * NV2080_CTRL_FB_INFO_RAM_TYPE_UNKNOWN + * NV2080_CTRL_FB_INFO_RAM_TYPE_SDRAM + * NV2080_CTRL_FB_INFO_RAM_TYPE_DDR1 + * NV2080_CTRL_FB_INFO_RAM_TYPE_DDR2 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR2 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR3 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR4 + * NV2080_CTRL_FB_INFO_RAM_TYPE_DDR3 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR5 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR5X + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR6 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR6X + * NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR2 + * NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR4 + * NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR5 + * NV2080_CTRL_FB_INFO_INDEX_BANK_COUNT + * This index is used to request the number of FB banks on the associated + * subdevice. + * NV2080_CTRL_FB_INFO_INDEX_OVERLAY_OFFSET_ADJUSTMENT + * This index is used to request the offset relative to the start of the + * overlay surface(s), in bytes, at which scanout should happen if the + * primary and the overlay surfaces are all aligned on large page + * boundaries. + * NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_SPACE_SIZE_KB + * This index is used to request the size of the GPU's virtual address + * space in kilobytes. + * NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_HEAP_SIZE_KB + * This index is used to request the size of the GPU's virtual address + * space heap (minus RM-reserved space) in kilobytes. + * NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_MAPPBLE_SIZE_KB + * This index is used to request the size of the GPU's BAR1 mappable + * virtual address space in kilobytes. + * NV2080_CTRL_FB_INFO_INDEX_EFFECTIVE_BW + * This index is deprecated, and returns zero value. + * NV2080_CTRL_FB_INFO_INDEX_PARTITION_MASK + * This index is used to request the mask of currently active partitions. + * Each active partition has an ID that's equivalent to the corresponding + * bit position in the mask. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_VISTA_RESERVED_HEAP_SIZE + * This index is used to request the amount of total RAM in kilobytes + * reserved for internal RM allocations on Vista. This will need to + * be subtracted from the total heap size to get the amount available to + * KMD. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_RAM_LOCATION + * This index is used to distinguish between different memory + * configurations. + * NV2080_CTRL_FB_INFO_INDEX_FB_IS_BROKEN + * This index is used to check if the FB is functional + * NV2080_CTRL_FB_INFO_INDEX_FBP_COUNT + * This index is used to get the number of FBPs on the subdevice. This + * field is not to be confused with + * NV2080_CTRL_FB_INFO_INDEX_PARTITION_COUNT (returns number of FBPAs). + * Starting with Fermi the term partition is an ambiguous term, both FBP + * and FBPA mean FB partitions. The FBPA is the low level DRAM controller, + * while a FBP is the aggregation of one or more FBPAs, L2, ROP, and some + * other units. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_L2CACHE_SIZE + * This index is used to get the size of the L2 cache in Bytes. + * A value of zero indicates that the L2 cache isn't supported on the + * associated subdevice. + * NV2080_CTRL_FB_INFO_INDEX_MEMORYINFO_VENDOR_ID + * This index is used to get the memory vendor ID information from + * the Memory Information Table in the VBIOS. Legal memory Vendor ID + * values include: + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_UNKNOWN + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_RESERVED + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_SAMSUNG + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_QIMONDA + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ELPIDA + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ETRON + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_NANYA + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_HYNIX + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_MOSEL + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_WINBOND + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ESMT + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_MICRON + * NV2080_CTRL_FB_INFO_INDEX_BAR1_AVAIL_SIZE + * This index is used to request the amount of unused bar1 space. The + * data returned is a value in KB. It is not guaranteed to be entirely + * accurate since it is a snapshot at a particular time and can + * change quickly. + * NV2080_CTRL_FB_INFO_INDEX_BAR1_MAX_CONTIGUOUS_AVAIL_SIZE + * This index is used to request the amount of largest unused contiguous + * block in bar1 space. The data returned is a value in KB. It is not + * guaranteed to be entirely accurate since it is a snapshot at a particular + * time and can change quickly. + * NV2080_CTRL_FB_INFO_INDEX_USABLE_RAM_SIZE + * This index is used to request the amount of usable framebuffer memory in + * kilobytes physically present on the associated subdevice. This + * value will never exceed the value reported by + * NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_LTC_COUNT + * Returns the active LTC count across all active FBPs. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_LTS_COUNT + * Returns the active LTS count across all active LTCs. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_PSEUDO_CHANNEL_MODE + * This is used to identify if pseudo-channel mode is enabled for HBM + * NV2080_CTRL_FB_INFO_INDEX_SMOOTHDISP_RSVD_BAR1_SIZE + * This is used by WDDM-KMD to determine whether and how much RM reserved BAR1 for smooth transition + * NV2080_CTRL_FB_INFO_INDEX_HEAP_OFFLINE_SIZE + * Returns the total size of the all dynamically offlined pages in KiB + * NV2080_CTRL_FB_INFO_INDEX_1TO1_COMPTAG_ENABLED + * Returns true if 1to1 comptag is enabled + * NV2080_CTRL_FB_INFO_INDEX_SUSPEND_RESUME_RSVD_SIZE + * Returns the total size of the memory(FB) that will saved/restored during save/restore cycle + * NV2080_CTRL_FB_INFO_INDEX_ALLOW_PAGE_RETIREMENT + * Returns true if page retirement is allowed + * NV2080_CTRL_FB_INFO_POISON_FUSE_ENABLED + * Returns true if poison fuse is enabled + * NV2080_CTRL_FB_INFO_FBPA_ECC_ENABLED + * Returns true if ECC is enabled for FBPA + * NV2080_CTRL_FB_INFO_DYNAMIC_PAGE_OFFLINING_ENABLED + * Returns true if dynamic page blacklisting is enabled + * NV2080_CTRL_FB_INFO_INDEX_FORCED_BAR1_64KB_MAPPING_ENABLED + * Returns true if 64KB mapping on BAR1 is force-enabled + * NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_SIZE + * Returns the P2P mailbox size to be allocated by the client. + * Returns 0 if the P2P mailbox is allocated by RM. + * NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_ALIGNMENT_SIZE + * Returns the P2P mailbox alignment requirement. + * Returns 0 if the P2P mailbox is allocated by RM. + * NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_BAR1_MAX_OFFSET_64KB + * Returns the P2P mailbox max offset requirement. + * Returns 0 if the P2P mailbox is allocated by RM. + * NV2080_CTRL_FB_INFO_INDEX_PROTECTED_MEM_SIZE_TOTAL_KB + * Returns total protected memory when memory protection is enabled + * Returns 0 when memory protection is not enabled. + * NV2080_CTRL_FB_INFO_INDEX_PROTECTED_MEM_SIZE_FREE_KB + * Returns protected memory available for allocation when memory + * protection is enabled. + * Returns 0 when memory protection is not enabled. + * NV2080_CTRL_FB_INFO_INDEX_ECC_STATUS_SIZE + * Returns the ECC status size (corresponds to subpartitions or channels + * depending on architecture/memory type). + */ +typedef struct NV2080_CTRL_FB_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_FB_INFO; + +/* valid fb info index values */ +#define NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_COUNT (0x00000000U) // Deprecated +#define NV2080_CTRL_FB_INFO_INDEX_COMPRESSION_SIZE (0x00000001U) +#define NV2080_CTRL_FB_INFO_INDEX_DRAM_PAGE_STRIDE (0x00000002U) +#define NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_FREE_COUNT (0x00000003U) +#define NV2080_CTRL_FB_INFO_INDEX_PARTITION_COUNT (0x00000004U) +#define NV2080_CTRL_FB_INFO_INDEX_BAR1_SIZE (0x00000005U) +#define NV2080_CTRL_FB_INFO_INDEX_BANK_SWIZZLE_ALIGNMENT (0x00000006U) +#define NV2080_CTRL_FB_INFO_INDEX_RAM_SIZE (0x00000007U) +#define NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE (0x00000008U) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_SIZE (0x00000009U) +#define NV2080_CTRL_FB_INFO_INDEX_MAPPABLE_HEAP_SIZE (0x0000000AU) +#define NV2080_CTRL_FB_INFO_INDEX_BUS_WIDTH (0x0000000BU) +#define NV2080_CTRL_FB_INFO_INDEX_RAM_CFG (0x0000000CU) +#define NV2080_CTRL_FB_INFO_INDEX_RAM_TYPE (0x0000000DU) +#define NV2080_CTRL_FB_INFO_INDEX_BANK_COUNT (0x0000000EU) +#define NV2080_CTRL_FB_INFO_INDEX_OVERLAY_OFFSET_ADJUSTMENT (0x0000000FU) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_SPACE_SIZE_KB (0x0000000FU) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_HEAP_SIZE_KB (0x0000000FU) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_MAPPBLE_SIZE_KB (0x0000000FU) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_EFFECTIVE_BW (0x0000000FU) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_FB_TAX_SIZE_KB (0x00000010U) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_BASE_KB (0x00000011U) +#define NV2080_CTRL_FB_INFO_INDEX_LARGEST_FREE_REGION_SIZE_KB (0x00000012U) +#define NV2080_CTRL_FB_INFO_INDEX_LARGEST_FREE_REGION_BASE_KB (0x00000013U) +#define NV2080_CTRL_FB_INFO_INDEX_PARTITION_MASK (0x00000014U) +#define NV2080_CTRL_FB_INFO_INDEX_VISTA_RESERVED_HEAP_SIZE (0x00000015U) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_FREE (0x00000016U) +#define NV2080_CTRL_FB_INFO_INDEX_RAM_LOCATION (0x00000017U) +#define NV2080_CTRL_FB_INFO_INDEX_FB_IS_BROKEN (0x00000018U) +#define NV2080_CTRL_FB_INFO_INDEX_FBP_COUNT (0x00000019U) +#define NV2080_CTRL_FB_INFO_INDEX_FBP_MASK (0x0000001AU) +#define NV2080_CTRL_FB_INFO_INDEX_L2CACHE_SIZE (0x0000001BU) +#define NV2080_CTRL_FB_INFO_INDEX_MEMORYINFO_VENDOR_ID (0x0000001CU) +#define NV2080_CTRL_FB_INFO_INDEX_BAR1_AVAIL_SIZE (0x0000001DU) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_START (0x0000001EU) +#define NV2080_CTRL_FB_INFO_INDEX_BAR1_MAX_CONTIGUOUS_AVAIL_SIZE (0x0000001FU) +#define NV2080_CTRL_FB_INFO_INDEX_USABLE_RAM_SIZE (0x00000020U) +#define NV2080_CTRL_FB_INFO_INDEX_TRAINIG_2T (0x00000021U) +#define NV2080_CTRL_FB_INFO_INDEX_LTC_COUNT (0x00000022U) +#define NV2080_CTRL_FB_INFO_INDEX_LTS_COUNT (0x00000023U) +#define NV2080_CTRL_FB_INFO_INDEX_L2CACHE_ONLY_MODE (0x00000024U) +#define NV2080_CTRL_FB_INFO_INDEX_PSEUDO_CHANNEL_MODE (0x00000025U) +#define NV2080_CTRL_FB_INFO_INDEX_SMOOTHDISP_RSVD_BAR1_SIZE (0x00000026U) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_OFFLINE_SIZE (0x00000027U) +#define NV2080_CTRL_FB_INFO_INDEX_1TO1_COMPTAG_ENABLED (0x00000028U) +#define NV2080_CTRL_FB_INFO_INDEX_SUSPEND_RESUME_RSVD_SIZE (0x00000029U) +#define NV2080_CTRL_FB_INFO_INDEX_ALLOW_PAGE_RETIREMENT (0x0000002AU) +#define NV2080_CTRL_FB_INFO_INDEX_LTC_MASK (0x0000002BU) +#define NV2080_CTRL_FB_INFO_POISON_FUSE_ENABLED (0x0000002CU) +#define NV2080_CTRL_FB_INFO_FBPA_ECC_ENABLED (0x0000002DU) +#define NV2080_CTRL_FB_INFO_DYNAMIC_PAGE_OFFLINING_ENABLED (0x0000002EU) +#define NV2080_CTRL_FB_INFO_INDEX_FORCED_BAR1_64KB_MAPPING_ENABLED (0x0000002FU) +#define NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_SIZE (0x00000030U) +#define NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_ALIGNMENT (0x00000031U) +#define NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_BAR1_MAX_OFFSET_64KB (0x00000032U) +#define NV2080_CTRL_FB_INFO_INDEX_PROTECTED_MEM_SIZE_TOTAL_KB (0x00000033U) +#define NV2080_CTRL_FB_INFO_INDEX_PROTECTED_MEM_SIZE_FREE_KB (0x00000034U) +#define NV2080_CTRL_FB_INFO_INDEX_ECC_STATUS_SIZE (0x00000035U) +#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE (0x00000036U) + +#define NV2080_CTRL_FB_INFO_INDEX_MAX (0x35U) /* finn: Evaluated from "(NV2080_CTRL_FB_INFO_MAX_LIST_SIZE - 1)" */ + +/* valid fb RAM type values */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_UNKNOWN (0x00000000U) +#define NV2080_CTRL_FB_INFO_RAM_TYPE_SDRAM (0x00000001U) +#define NV2080_CTRL_FB_INFO_RAM_TYPE_DDR1 (0x00000002U) /* SDDR and GDDR (aka DDR1 and GDDR1) */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR2 (0x00000003U) /* SDDR2 Used on NV43 and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_DDR2 NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR2 /* Deprecated alias */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR2 (0x00000004U) /* GDDR2 Used on NV30 and some NV36 */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR3 (0x00000005U) /* GDDR3 Used on NV40 and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR4 (0x00000006U) /* GDDR4 Used on G80 and later (deprecated) */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR3 (0x00000007U) /* SDDR3 Used on G9x and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_DDR3 NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR3 /* Deprecated alias */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR5 (0x00000008U) /* GDDR5 Used on GT21x and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR2 (0x00000009U) /* LPDDR (Low Power SDDR) used on T2x and later. */ + + +#define NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR4 (0x0000000CU) /* SDDR4 Used on Maxwell and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR4 (0x0000000DU) /* LPDDR (Low Power SDDR) used on T21x and later.*/ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_HBM1 (0x0000000EU) /* HBM1 (High Bandwidth Memory) used on GP100 */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_HBM2 (0x0000000FU) /* HBM2 (High Bandwidth Memory-pseudo channel) */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR5X (0x00000010U) /* GDDR5X Used on GP10x */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR6 (0x00000011U) /* GDDR6 Used on TU10x */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR6X (0x00000012U) /* GDDR6X Used on GA10x */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR5 (0x00000013U) /* LPDDR (Low Power SDDR) used on T23x and later.*/ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_HBM3 (0x00000014U) /* HBM3 (High Bandwidth Memory) v3 */ + +/* valid RAM LOCATION types */ +#define NV2080_CTRL_FB_INFO_RAM_LOCATION_GPU_DEDICATED (0x00000000U) +#define NV2080_CTRL_FB_INFO_RAM_LOCATION_SYS_SHARED (0x00000001U) +#define NV2080_CTRL_FB_INFO_RAM_LOCATION_SYS_DEDICATED (0x00000002U) + +/* valid Memory Vendor ID values */ +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_SAMSUNG (0x00000001U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_QIMONDA (0x00000002U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ELPIDA (0x00000003U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ETRON (0x00000004U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_NANYA (0x00000005U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_HYNIX (0x00000006U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_MOSEL (0x00000007U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_WINBOND (0x00000008U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ESMT (0x00000009U) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_MICRON (0x0000000FU) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_UNKNOWN (0xFFFFFFFFU) + +#define NV2080_CTRL_FB_INFO_PSEUDO_CHANNEL_MODE_UNSUPPORTED (0x00000000U) +#define NV2080_CTRL_FB_INFO_PSEUDO_CHANNEL_MODE_DISABLED (0x00000001U) +#define NV2080_CTRL_FB_INFO_PSEUDO_CHANNEL_MODE_ENABLED (0x00000002U) + +/** + * NV2080_CTRL_CMD_FB_GET_INFO + * + * This command returns fb engine information for the associated GPU. + * Requests to retrieve fb information use a list of one or more + * NV2080_CTRL_FB_INFO structures. + * + * fbInfoListSize + * This field specifies the number of entries on the caller's + * fbInfoList. + * fbInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the fb information is to be returned. + * This buffer must be at least as big as fbInfoListSize multiplied + * by the size of the NV2080_CTRL_FB_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_FB_GET_INFO (0x20801301U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_FB_GET_INFO_PARAMS { + NvU32 fbInfoListSize; + NV_DECLARE_ALIGNED(NvP64 fbInfoList, 8); +} NV2080_CTRL_FB_GET_INFO_PARAMS; + +#define NV2080_CTRL_CMD_FB_GET_INFO_V2 (0x20801303U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_INFO_V2_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_FB_GET_INFO_V2_PARAMS { + NvU32 fbInfoListSize; + NV2080_CTRL_FB_INFO fbInfoList[NV2080_CTRL_FB_INFO_MAX_LIST_SIZE]; +} NV2080_CTRL_FB_GET_INFO_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_TILE_ADDRESS_INFO + * + * This command returns tile addressing information. + * + * StartAddr + * This parameter returns BAR1 plus the size of the local FB. + * SpaceSize + * This parameter returns the BAR1 aperture size less the size of the + * local FB. + * + * Note that both parameters will contain zero if there is no system tile + * address space. + */ +#define NV2080_CTRL_CMD_FB_GET_TILE_ADDRESS_INFO (0x20801302U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2" */ + +typedef struct NV2080_CTRL_FB_GET_SYSTEM_TILE_ADDRESS_SPACE_INFO { + NV_DECLARE_ALIGNED(NvU64 StartAddr, 8); + NV_DECLARE_ALIGNED(NvU64 SpaceSize, 8); +} NV2080_CTRL_FB_GET_SYSTEM_TILE_ADDRESS_SPACE_INFO; + +/* + * NV2080_CTRL_CMD_FB_GET_BAR1_OFFSET + * + * This command returns the GPU virtual address of a bar1 + * allocation, given the CPU virtual address. + * + * cpuVirtAddress + * This field specifies the associated CPU virtual address of the + * memory allocation. + * gpuVirtAddress + * The GPU virtual address associated with the allocation + * is returned in this field. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_BAR1_OFFSET (0x20801310U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS { + NV_DECLARE_ALIGNED(NvP64 cpuVirtAddress, 8); + NV_DECLARE_ALIGNED(NvU64 gpuVirtAddress, 8); +} NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS; + +/* + * Note: Returns Zeros if no System carveout address info + * + * NV2080_CTRL_CMD_FB_GET_CARVEOUT_ADDRESS_INFO + * + * This command returns FB carveout address space information + * + * StartAddr + * Returns the system memory address of the start of carveout space. + * SpaceSize + * Returns the size of carveout space. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_FB_GET_CARVEOUT_ADDRESS_INFO (0x2080130bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_SYSTEM_CARVEOUT_ADDRESS_SPACE_INFO_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_SYSTEM_CARVEOUT_ADDRESS_SPACE_INFO_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_FB_GET_SYSTEM_CARVEOUT_ADDRESS_SPACE_INFO { + NV_DECLARE_ALIGNED(NvU64 StartAddr, 8); + NV_DECLARE_ALIGNED(NvU64 SpaceSize, 8); +} NV2080_CTRL_FB_GET_SYSTEM_CARVEOUT_ADDRESS_SPACE_INFO; + +/* + * NV2080_CTRL_FB_CMD_GET_CALIBRATION_LOCK_FAILED + * + * This command returns the failure counts for calibration. + * + * uFlags + * Just one for now -- ehether to reset the counts. + * driveStrengthRiseCount + * This parameter specifies the failure count for drive strength rising. + * driveStrengthFallCount + * This parameter specifies the failure count for drive strength falling. + * driveStrengthTermCount + * This parameter specifies the failure count for drive strength + * termination. + * slewStrengthRiseCount + * This parameter specifies the failure count for slew strength rising. + * slewStrengthFallCount + * This parameter specifies the failure count for slew strength falling. + * slewStrengthTermCount + * This parameter specifies the failure count for slew strength + * termination. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_INVALID_PARAM_STRUCT + * NVOS_STATUS_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_CALIBRATION_LOCK_FAILED (0x2080130cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS { + NvU32 flags; + NvU32 driveStrengthRiseCount; + NvU32 driveStrengthFallCount; + NvU32 driveStrengthTermCount; + NvU32 slewStrengthRiseCount; + NvU32 slewStrengthFallCount; +} NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS; + +/* valid flags parameter values */ +#define NV2080_CTRL_CMD_FB_GET_CAL_FLAG_NONE (0x00000000U) +#define NV2080_CTRL_CMD_FB_GET_CAL_FLAG_RESET (0x00000001U) + +/* + * NV2080_CTRL_CMD_FB_SET_SCANOUT_COMPACTION_ALLOWED + * + * This command specifies to RM if scanout compaction feature is allowed or + * not in the current configuration. In hybrid mode when dGPU is rendering the + * image, the dGPU blit to the scanout surface happens without mGPU's + * knowledge (directly to system memory), which results in stale compacted + * data resulting in corruption. + * + * This control call can be used to disable the compaction whenever the KMD + * (client) is switching to the pref mode in Hybrid i.e., whenever there is a + * possibility of dGPU doing a blit to mGpu scanout surface. Compaction can + * be enabled when system is back in hybrid power mode as mGpu will be + * rendering the image. + * + * allowCompaction + * This parameter specifies if the display compaction feature is allowed + * or not allowed. + * immediate + * This parameter specifies whether compaction has to be enabled or + * disabled immediately (based on the value of allowCompaction field) or + * during the next modeset. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_INVALID_PARAM_STRUCT + * NVOS_STATUS_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_FB_SET_SCANOUT_COMPACTION_ALLOWED (0x2080130dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0xD" */ // Deprecated, removed form RM + +typedef struct NV2080_CTRL_FB_SET_SCANOUT_COMPACTION_ALLOWED_PARAMS { + NvU32 allowCompaction; + NvU32 immediate; +} NV2080_CTRL_FB_SET_SCANOUT_COMPACTION_ALLOWED_PARAMS; + +/* valid allowCompaction values */ +#define NV2080_CTRL_CMD_FB_SET_SCANOUT_COMPACTION_ALLOW (0x00000001U) +#define NV2080_CTRL_CMD_FB_SET_SCANOUT_COMPACTION_DISALLOW (0x00000000U) + +/* valid immediate values */ +#define NV2080_CTRL_CMD_FB_SET_SCANOUT_COMPACTION_IMMEDIATE (000000001U) +#define NV2080_CTRL_CMD_FB_SET_SCANOUT_COMPACTION_NOT_IMMEDIATE (000000000U) + +/* + * NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE + * + * This command flushes a cache on the GPU which all memory accesses go + * through. The types of flushes supported by this API may not be supported by + * all hardware. Attempting an unsupported flush type will result in an error. + * + * addressArray + * An array of physical addresses in the aperture defined by + * NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE. Each entry points to a + * contiguous block of memory of size memBlockSizeBytes. The addresses are + * aligned down to addressAlign before coalescing adjacent addresses and + * sending flushes to hardware. + * addressAlign + * Used to align-down addresses held in addressArray. A value of 0 will be + * forced to 1 to avoid a divide by zero. Value is treated as minimum + * alignment and any hardware alignment requirements above this value will + * be honored. + * addressArraySize + * The number of entries in addressArray. + * memBlockSizeBytes + * The size in bytes of each memory block pointed to by addressArray. + * flags + * Contains flags to control various aspects of the flush. Valid values + * are defined in NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS*. Not all flags are + * valid for all defined FLUSH_MODEs or all GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * + * See Also: + * NV0080_CTRL_CMD_DMA_FLUSH + * Performs flush operations in broadcast for the GPU cache and other hardware + * engines. Use this call if you want to flush all GPU caches in a + * broadcast device. + * NV0041_CTRL_CMD_SURFACE_FLUSH_GPU_CACHE + * Flushes memory associated with a single allocation if the hardware + * supports it. Use this call if you want to flush a single allocation and + * you have a memory object describing the physical memory. + */ +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE (0x2080130eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_MAX_ADDRESSES 500U + +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 addressArray[NV2080_CTRL_FB_FLUSH_GPU_CACHE_MAX_ADDRESSES], 8); + NvU32 addressArraySize; + NvU32 addressAlign; + NV_DECLARE_ALIGNED(NvU64 memBlockSizeBytes, 8); + NvU32 flags; +} NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS; + +/* valid fields and values for flags */ +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE 1:0 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE_VIDEO_MEMORY (0x00000000U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE_SYSTEM_MEMORY (0x00000001U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE_PEER_MEMORY (0x00000002U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK 2:2 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK_NO (0x00000000U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK_YES (0x00000001U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_INVALIDATE 3:3 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_INVALIDATE_NO (0x00000000U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_INVALIDATE_YES (0x00000001U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FLUSH_MODE 4:4 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FLUSH_MODE_ADDRESS_ARRAY (0x00000000U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FLUSH_MODE_FULL_CACHE (0x00000001U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FB_FLUSH 5:5 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FB_FLUSH_NO (0x00000000U) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FB_FLUSH_YES (0x00000001U) + +/* + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY (deprecated; use NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2 instead) + * + * These commands access the cache allocation policy on a specific + * engine, if supported. + * + * engine + * Specifies the target engine. Possible values are defined in + * NV2080_ENGINE_TYPE. + * allocPolicy + * Specifies the read/write allocation policy of the cache on the specified + * engine. Possible values are defined in + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_READS and + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_WRITES. + * + */ +typedef struct NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS { + NvU32 engine; + NvU32 allocPolicy; +} NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS; + +/* valid values for allocPolicy */ +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_READS 0:0 +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_READS_NO (0x00000000U) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_READS_YES (0x00000001U) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_WRITES 1:1 +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_WRITES_NO (0x00000000U) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_WRITES_YES (0x00000001U) + + +/* + * NV2080_CTRL_CMD_FB_SET_GPU_CACHE_ALLOC_POLICY + * + * This command is deprecated. + * Use NV2080_CTRL_CMD_FB_SET_GPU_CACHE_ALLOC_POLICY_V2 instead. + * + * This command sets the state of the cache allocation policy on a specific + * engine, if supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_SET_GPU_CACHE_ALLOC_POLICY (0x2080130fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0xF" */ + +/* + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAM + * + * These commands access the cache allocation policy on a specific + * client, if supported. + * + * count + * Specifies the number of entries in entry. + * entry + * Specifies an array of allocation policy entries. + * + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY + * + * clients + * Specifies the target client. Possible values are defined in + * NV2080_CLIENT_TYPE_*. + * allocPolicy + * Specifies the read/write allocation policy of the cache on the specified + * engine. Possible values are defined in + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS and + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES. + * + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY_SIZE + * + * Specifies the maximum number of allocation policy entries allowed + */ +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY_SIZE 11U + +typedef struct NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY { + NvU32 client; + NvU32 allocPolicy; +} NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY; + +typedef struct NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS { + NvU32 count; + NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY entry[NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY_SIZE]; +} NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS; + +/* valid values for allocPolicy */ +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS 0:0 +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS_DISABLE (0x00000000U) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS_ENABLE (0x00000001U) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS_ALLOW 1:1 +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS_ALLOW_NO (0x00000000U) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS_ALLOW_YES (0x00000001U) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES 2:2 +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES_DISABLE (0x00000000U) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES_ENABLE (0x00000001U) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES_ALLOW 3:3 +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES_ALLOW_NO (0x00000000U) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES_ALLOW_YES (0x00000001U) + + +/* + * NV2080_CTRL_CMD_FB_SET_GPU_CACHE_ALLOC_POLICY_V2 + * + * This command sets the state of the cache allocation policy on a specific + * engine, if supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_SET_GPU_CACHE_ALLOC_POLICY_V2 (0x20801318U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x18" */ + +/* + * NV2080_CTRL_CMD_FB_GET_GPU_CACHE_ALLOC_POLICY (deprecated; use NV2080_CTRL_CMD_FB_GET_GPU_CACHE_ALLOC_POLICY_V2 instead) + * + * This command gets the state of the cache allocation policy on a specific + * engine, if supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_GPU_CACHE_ALLOC_POLICY (0x20801312U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x12" */ + +/* + * NV2080_CTRL_CMD_FB_GET_GPU_CACHE_ALLOC_POLICY_V2 + * + * This command gets the state of the cache allocation policy on a specific + * engine, if supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_GPU_CACHE_ALLOC_POLICY_V2 (0x20801319U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x19" */ + + +/* + * NV2080_CTRL_CMD_FB_IS_KIND + * + * This command is used to perform various operations like 'IS_KIND_VALID', + * 'IS_KIND_COMPRESSIBLE'on the kind passed by the caller. The operation to be + * performed should be passed in the 'operation' parameter of + * NV2080_CTRL_FB_IS_KIND_PARAMS, the kind on which the operation is to be + * performed should be passed in the 'kind' parameter. The result of the + * operation (true/false) will be returned in the 'result' parameter. + * + * operation + * Specifies what operation is to be performed on the kind passed by the + * caller. The supported operations are + * NV2080_CTRL_FB_IS_KIND_OPERATION_SUPPORTED + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure is + * supported for this GPU. Returns nonzero value in 'result' parameter + * if the input kind is supported, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure is + * compressible. Returns nonzero value in 'result' parameter if the + * input kind is compressible, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_1 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure supports + * 1 bit compression. Returns nonzero value in 'result' parameter if + * kind supports 1 bit compression, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_2 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure supports + * 2 bit compression. Returns nonzero value in 'result' parameter if + * kind supports 1 bit compression, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_4 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure supports + * 4 bit compression. Returns nonzero value in 'result' parameter if + * kind supports 4 bit compression, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure + * supports ZBC. Returns nonzero value in 'result' parameter if the + * input kind supports ZBC, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_1 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure + * supports 1 bit ZBC. Returns nonzero value in 'result' parameter if + * the input kind supports 1 bit ZBC, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_2 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure + * supports 2 bit ZBC. Returns nonzero value in 'result' parameter if + * the input kind supports 2 bit ZBC, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_4 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure + * supports 4 bit ZBC. Returns nonzero value in 'result' parameter if + * the input kind supports 4 bit ZBC, else returns zero in the result. + * kind + * Specifies the kind on which the operation is to be carried out. The + * legal range of values for the kind parameter is different on different + * GPUs. For e.g. on Fermi, valid range is 0x00 to 0xfe. Still, some values + * inside this legal range can be invalid i.e. not defined. + * So its always better to first check if a particular kind is supported on + * the current GPU with 'NV2080_CTRL_FB_IS_KIND_SUPPORTED' operation. + * result + * Upon return, this parameter will hold the result (true/false) of the + * operation performed on the kind. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_IS_KIND (0x20801313U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_IS_KIND_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_IS_KIND_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_FB_IS_KIND_PARAMS { + NvU32 operation; + NvU32 kind; + NvBool result; +} NV2080_CTRL_FB_IS_KIND_PARAMS; + +/* valid values for operation */ +#define NV2080_CTRL_FB_IS_KIND_OPERATION_SUPPORTED (0x00000000U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE (0x00000001U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_1 (0x00000002U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_2 (0x00000003U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_4 (0x00000004U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC (0x00000005U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_1 (0x00000006U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_2 (0x00000007U) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_4 (0x00000008U) + +/** + * NV2080_CTRL_CMD_FB_GET_GPU_CACHE_INFO + * + * This command returns the state of a cache which all GPU memory accesess go + * through. + * + * powerState + * Returns the power state of the cache. Possible values are defined in + * NV2080_CTRL_FB_GET_GPU_CACHE_INFO_POWER_STATE. + * + * writeMode + * Returns the write mode of the cache. Possible values are defined in + * NV2080_CTRL_FB_GET_GPU_CACHE_INFO_WRITE_MODE. + * + * bypassMode + * Returns the bypass mode of the L2 cache. Possible values are defined in + * NV2080_CTRL_FB_GET_GPU_CACHE_INFO_BYPASS_MODE. + * + * rcmState + * Returns the RCM state of the cache. Possible values are defined in + * NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_FB_GET_GPU_CACHE_INFO (0x20801315U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS { + NvU32 powerState; + NvU32 writeMode; + NvU32 bypassMode; + NvU32 rcmState; +} NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS; + +/* valid values for powerState */ +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_POWER_STATE_ENABLED (0x00000000U) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_POWER_STATE_DISABLED (0x00000001U) +/* valid values for writeMode */ +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_WRITE_MODE_WRITETHROUGH (0x00000000U) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_WRITE_MODE_WRITEBACK (0x00000001U) +/* valid values for bypassMode */ +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_BYPASS_MODE_DISABLED (0x00000000U) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_BYPASS_MODE_ENABLED (0x00000001U) +/* valid values for rcmState */ +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE_FULL (0x00000000U) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE_TRANSITIONING (0x00000001U) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE_REDUCED (0x00000002U) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE_ZERO_CACHE (0x00000003U) + +/* + * NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY + * + * These commands access the cache promotion policy on a specific + * engine, if supported by the hardware. + * + * Cache promotion refers to the GPU promoting a memory read to a larger + * size to preemptively fill the cache so future reads to nearby memory + * addresses will hit in the cache. + * + * engine + * Specifies the target engine. Possible values are defined in + * NV2080_ENGINE_TYPE. + * promotionPolicy + * Specifies the promotion policy of the cache on the specified + * engine. Possible values are defined by + * NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_*. These values are in terms + * of the hardware cache line size. + * + */ +typedef struct NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_PARAMS { + NvU32 engine; + NvU32 promotionPolicy; +} NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_PARAMS; + +/* valid values for promotionPolicy */ +#define NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_NONE (0x00000000U) +#define NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_QUARTER (0x00000001U) +#define NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_HALF (0x00000002U) +#define NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_FULL (0x00000003U) + + +/* + * NV2080_CTRL_CMD_FB_SET_GPU_CACHE_PROMOTION_POLICY + * + * This command sets the cache promotion policy on a specific engine, if + * supported by the hardware. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_SET_GPU_CACHE_PROMOTION_POLICY (0x20801316U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x16" */ // Deprecated, removed form RM + + +/* + * NV2080_CTRL_CMD_FB_GET_GPU_CACHE_PROMOTION_POLICY + * + * This command gets the cache promotion policy on a specific engine, if + * supported by the hardware. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_GPU_CACHE_PROMOTION_POLICY (0x20801317U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x17" */ // Deprecated, removed form RM + +/* + * NV2080_CTRL_FB_CMD_GET_FB_REGION_INFO + * + * This command returns the FB memory region characteristics. + * + * numFBRegions + * Number of valid regions returned in fbRegion[] + * fbRegion[].base + * Base address of region. The first valid address in the range + * [base..limit]. + * fbRegion[].limit + * Last/end address of region. The last valid address in the range + * [base..limit]. + * (limit - base + 1) = size of the region + * fbRegion[].reserved + * Amount of memory that RM speculatively needs within the region. A + * client doing its own memory management should leave at least this much + * memory available for RM use. This particularly applies to a driver + * model like LDDM. + * fbRegion[].performance + * Relative performance of this region compared to other regions. + * The definition is vague, and only connotes relative bandwidth or + * performance. The higher the value, the higher the performance. + * fbRegion[].supportCompressed + * TRUE if compressed surfaces/kinds are supported + * FALSE if compressed surfaces/kinds are not allowed to be allocated in + * this region + * fbRegion[].supportISO + * TRUE if ISO surfaces/kinds are supported (Display, cursor, video) + * FALSE if ISO surfaces/kinds are not allowed to be allocated in this + * region + * fbRegion[].bProtected + * TRUE if this region is a protected memory region. If true only + * allocations marked as protected (NVOS32_ALLOC_FLAGS_PROTECTED) can be + * allocated in this region. + * fbRegion[].blackList[] - DEPRECATED: Use supportISO + * TRUE for each NVOS32_TYPE_IMAGE* that is NOT allowed in this region. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO (0x20801320U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U + +typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES]; + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NV_DECLARE_ALIGNED(NvU64 reserved, 8); + NvU32 performance; + NvBool supportCompressed; + NvBool supportISO; + NvBool bProtected; + NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList; +} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS { + NvU32 numFBRegions; + NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8); +} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_OFFLINE_PAGES + * + * This command adds video memory page addresses to the list of offlined + * addresses so that they're not allocated to any client. The newly offlined + * addresses take effect after a reboot. + * + * offlined + * This input parameter is an array of NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO + * structures, containing the video memory physical page numbers that + * are to be blacklisted. This array can hold a maximum of NV2080_CTRL_FB_ + * BLACKLIST_PAGES_MAX_PAGES address pairs. Valid entries are adjacent. + * pageSize + * This input parameter contains the size of the page that is to be + * blacklisted. + * validEntries + * This input parameter specifies the number of valid entries in the + * offlined array. + * numPagesAdded + * This output parameter specifies how many of the validEntries were + * actually offlined. If numPagesAdded < validEntries, it + * means that only addresses from offlined[0] to offlined[numPagesAdded - 1] + * were added to the list of offlined addresses. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_OFFLINE_PAGES (0x20801321U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_OFFLINE_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES (0x00000040U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_INVALID_ADDRESS (0xffffffffffffffffULL) +#define NV2080_CTRL_FB_OFFLINED_PAGES_PAGE_SIZE_4K (0x00000000U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_PAGE_SIZE_64K (0x00000001U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_PAGE_SIZE_128K (0x00000002U) + +/* + * NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO + * + * pageAddressWithEccOn + * Address of the memory page retired when ECC is enabled on the board. + * pageAddressWithEccOff + * Address of the memory page retired when ECC is disabled on the board. + * rbcAddress + * Row/Bank/Column Address of the faulty memory which caused the page to + * be retired + * source + * The reason for the page to be retired + * status + * Non-exceptional reasons for a page retirement failure + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_OK + * No error + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_PENDING_RETIREMENT + * The given address is already pending retirement or has + * been retired during the current driver run. The page + * will be offlined during the next driver run. + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_BLACKLISTING_FAILED + * The given page was retired on a previous driver run, + * so it should not be accessible unless offlining failed. + * Failing to offline a page is strongly indicative of a + * driver offlining bug. + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_TABLE_FULL + * The PBL is full and no more pages can be retired + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_INTERNAL_ERROR + * Internal driver error + * + */ + + + +typedef struct NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO { + NV_DECLARE_ALIGNED(NvU64 pageAddressWithEccOn, 8); + NV_DECLARE_ALIGNED(NvU64 pageAddressWithEccOff, 8); + NvU32 rbcAddress; + NvU32 source; + NvU32 status; + NvU32 timestamp; +} NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO; + +/* valid values for source */ +#define NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_MULTIPLE_SBE (0x00000002U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_DBE (0x00000004U) + + + +/* valid values for status */ +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_OK (0x00000000U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_PENDING_RETIREMENT (0x00000001U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_BLACKLISTING_FAILED (0x00000002U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_TABLE_FULL (0x00000003U) +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_INTERNAL_ERROR (0x00000004U) + +/* deprecated */ +#define NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_MULTIPLE_SBE NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_MULTIPLE_SBE +#define NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DBE NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_DBE + + +#define NV2080_CTRL_FB_OFFLINE_PAGES_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV2080_CTRL_FB_OFFLINE_PAGES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO offlined[NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES], 8); + NvU32 pageSize; + NvU32 validEntries; + NvU32 numPagesAdded; +} NV2080_CTRL_FB_OFFLINE_PAGES_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_OFFLINED_PAGES + * + * This command returns the list of video memory page addresses in the + * Inforom's blacklist. + * + * offlined + * This output parameter is an array of NV2080_CTRL_FB_BLACKLIST_ADDRESS_ + * INFO structures, containing the video memory physical page numbers that + * are blacklisted. This array can hold a maximum of NV2080_CTRL_FB_ + * BLACKLIST_PAGES_MAX_PAGES address pairs. Valid entries are adjacent. + * The array also contains the Row/Bank/Column Address and source. + * validEntries + * This output parameter specifies the number of valid entries in the + * offlined array. + * bRetirementPending (DEPRECATED, use retirementPending instead) + * This output parameter returns if any pages on the list are pending + * retirement. + * retirementPending + * Communicates to the caller whether retirement updates are pending and the + * reason for the updates. Possible fields are: + * NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_*: + * NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_SBE: + * Indicates whether pages are pending retirement due to SBE. + * NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_DBE: + * Indicates whether pages are pending retirement due to DBE. Driver + * reload needed to retire bad memory pages and allow compute app runs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + + + +#define NV2080_CTRL_CMD_FB_GET_OFFLINED_PAGES (0x20801322U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_SBE 0:0 +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_SBE_FALSE 0U +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_SBE_TRUE 1U +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_DBE 1:1 +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_DBE_FALSE 0U +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_DBE_TRUE 1U + + + +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO offlined[NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES], 8); + NvU32 validEntries; + NvBool bRetirementPending; + NvU8 retirementPending; +} NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_QUERY_ACR_REGION + * + * This control command is used to query the secured region allocated + * + * queryType + * NV2080_CTRL_CMD_FB_ACR_QUERY_GET_REGION_STATUS: Provides the alloc + * status and ACR region ID. + * NV2080_CTRL_CMD_FB_QUERY_MAP_REGION : Maps the region on BAR1 + * it returns the "pCpuAddr" and pPriv to user. + * NV2080_CTRL_CMD_FB_QUERY_UNMAP_REGION: Unmaps the mapped region. + * it takes the pPriv as input + * + * clientReq : struct ACR_REQUEST_PARAMS + * It is used to find the allocated ACR region for that client + * clientId : ACR Client ID + * reqReadMask : read mask of ACR region + * reqWriteMask : Write mask of ACR region + * regionSize : ACR region Size + * + * clientReqStatus : struct ACR_STATUS_PARAMS + * This struct is stores the output of requested ACR region. + * allocStatus : Allocated Status of ACR region + * regionId : ACR region ID + * physicalAddress : Physical address on FB + * + * + * NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_CODE + * NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_NONE : Control command executed successfully + * NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_INVALID_CLIENT_REQUEST : Please check the parameter + * for ACR client request + * NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_FAILED_TO_MAP_ON_BAR1 : RM Fails to map ACR region + * on BAR1 + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED +*/ +#define NV2080_CTRL_CMD_FB_QUERY_ACR_REGION (0x20801325U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_QUERY_ACR_REGION_PARAMS_MESSAGE_ID" */ + +// +// We can create an ACR region by using RMCreateAcrRegion[1|2] regkey or mods -acr[1|2]_size +// Client ID for such region is 2 in RM. +// +#define NV2080_CTRL_CMD_FB_ACR_CLIENT_ID 2U + +typedef enum NV2080_CTRL_CMD_FB_ACR_QUERY_TYPE { + NV2080_CTRL_CMD_FB_ACR_QUERY_GET_CLIENT_REGION_STATUS = 0, + NV2080_CTRL_CMD_FB_ACR_QUERY_GET_REGION_PROPERTY = 1, + NV2080_CTRL_CMD_FB_ACR_QUERY_GET_FALCON_STATUS = 2, +} NV2080_CTRL_CMD_FB_ACR_QUERY_TYPE; + +typedef enum NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_CODE { + NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_NONE = 0, + NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_INVALID_CLIENT_REQUEST = 1, +} NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_CODE; + +typedef struct ACR_REQUEST_PARAMS { + NvU32 clientId; + NvU32 reqReadMask; + NvU32 reqWriteMask; + NvU32 regionSize; +} ACR_REQUEST_PARAMS; + +typedef struct ACR_REGION_ID_PROP { + NvU32 regionId; + NvU32 readMask; + NvU32 writeMask; + NvU32 regionSize; + NvU32 clientMask; + NV_DECLARE_ALIGNED(NvU64 physicalAddress, 8); +} ACR_REGION_ID_PROP; + +typedef struct ACR_STATUS_PARAMS { + NvU32 allocStatus; + NvU32 regionId; + NV_DECLARE_ALIGNED(NvU64 physicalAddress, 8); +} ACR_STATUS_PARAMS; + +typedef struct ACR_REGION_HANDLE { + NvHandle hClient; + NvHandle hParent; + NvHandle hMemory; + NvU32 hClass; + NvHandle hDevice; +} ACR_REGION_HANDLE; + +typedef struct ACR_FALCON_LS_STATUS { + NvU16 falconId; + NvBool bIsInLs; +} ACR_FALCON_LS_STATUS; + +#define NV2080_CTRL_CMD_FB_QUERY_ACR_REGION_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_CMD_FB_QUERY_ACR_REGION_PARAMS { + NV2080_CTRL_CMD_FB_ACR_QUERY_TYPE queryType; + NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_CODE errorCode; + NV_DECLARE_ALIGNED(ACR_REGION_ID_PROP acrRegionIdProp, 8); + ACR_REQUEST_PARAMS clientReq; + NV_DECLARE_ALIGNED(ACR_STATUS_PARAMS clientReqStatus, 8); + ACR_REGION_HANDLE handle; + ACR_FALCON_LS_STATUS falconStatus; +} NV2080_CTRL_CMD_FB_QUERY_ACR_REGION_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_CLEAR_OFFLINED_PAGES + * + * This command clears offlined video memory page addresses from the Inforom. + * + * sourceMask + * This is a bit mask of NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE. Pages + * offlined from the specified sources will be cleared/removed from the + * Inforom PBL object denylist. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_CLEAR_OFFLINED_PAGES (0x20801326U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_CLEAR_OFFLINED_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_CLEAR_OFFLINED_PAGES_PARAMS_MESSAGE_ID (0x26U) + +typedef struct NV2080_CTRL_FB_CLEAR_OFFLINED_PAGES_PARAMS { + NvU32 sourceMask; +} NV2080_CTRL_FB_CLEAR_OFFLINED_PAGES_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO + * + * Gets pointer to then object of class CompBitCopy, which is used for swizzling + * compression bits in the compression backing store. The caller is expected to + * have the appropriate headers for class CompBitCopy. Also retrieves values of some + * parameters needed to call the compbit swizzling method. + * + * @params[out] void *pCompBitCopyObj + * Opaque pointer to object of class CompBitCopy + * @params[out] void *pSwizzleParams + * Opaque pointer to values needed to call the compbit + * swizzle method. + * + * Possible status values returned are: + * NV_OK NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO (0x20801327U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO_PARAMS_MESSAGE_ID (0x27U) + +typedef struct NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pCompBitCopyObj, 8); + NV_DECLARE_ALIGNED(NvP64 pSwizzleParams, 8); +} NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_LTC_INFO_FOR_FBP + * + * Gets the count and mask of LTCs for a given FBP. + * + * fbpIndex + * The physical index of the FBP to get LTC info for. + * ltcMask + * The mask of active LTCs for the given FBP. + * ltcCount + * The count of active LTCs for the given FBP. + * ltsMask + * The mask of active LTSs for the given FBP + * ltsCount + * The count of active LTSs for the given FBP + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_LTC_INFO_FOR_FBP (0x20801328U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS { + NvU8 fbpIndex; + NvU32 ltcMask; + NvU32 ltcCount; + NvU32 ltsMask; + NvU32 ltsCount; +} NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS; + + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_CONTEXT < Deprecated > + * + * "set the context" for following CompBitCopy member functions. + * These are the CompBitCopy member variables that remain connstant + * over multiple CompBitCopy member function calls, yet stay the same + * throughout a single surface eviction. + * + * @params[in] UINT64 backingStorePA; + * Physical Address of the Backing Store + * @params[in] UINT08 *backingStoreVA; + * Virtual Address of the Backing Store + * @params[in] UINT64 backingStoreChunkPA; + * Physical Address of the "Chunk Buffer" + * @params[in] UINT08 *backingStoreChunkVA; + * Virtual Address of the "Chunk Buffer" + * @params[in] UINT32 backingStoreChunkSize; + * Size of the "Chunk Buffer" + * @params[in] UINT08 *cacheWriteBitMap; + * Pointer to the bitmap which parts of the + * "Chunk" was updated. + * @params[in] bool backingStoreChunkOverfetch; + * Overfetch factor. + * @params[in] UINT32 PageSizeSrc; + * Page size of Source Surface. + * @params[in] UINT32 PageSizeDest; + * page size of Destination Surface. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_CONTEXT (0x20801329U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x29" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_CONTEXT_PARAMS { + NvU32 CBCBaseAddress; + NV_DECLARE_ALIGNED(NvU64 backingStorePA, 8); + NV_DECLARE_ALIGNED(NvU8 *backingStoreVA, 8); + NV_DECLARE_ALIGNED(NvU64 backingStoreChunkPA, 8); + NV_DECLARE_ALIGNED(NvU8 *backingStoreChunkVA, 8); + NvU32 backingStoreChunkSize; + NV_DECLARE_ALIGNED(NvU8 *cacheWriteBitMap, 8); + NvBool backingStoreChunkOverfetch; + NvU32 PageSizeSrc; + NvU32 PageSizeDest; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_CONTEXT_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITS < Deprecated > + * + * Retrieves the Compression and Fast Clear bits for the surface+offset given. + * + * @params[out] NvU32 *fcbits; + * Fast Clear Bits returned + * @params[out] NvU32 *compbits; + * Compression Bits returned + * @params[in] NvU64 dataPhysicalStart; + * Start Address of Data + * @params[in] NvU64 surfaceOffset; + * Offset in the surface + * @params[in] NvU32 comptagLine; + * Compression Tag Number + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTEDD + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITS (0x2080132aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2A" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITS_PARAMS { + NV_DECLARE_ALIGNED(NvU32 *fcbits, 8); + NV_DECLARE_ALIGNED(NvU32 *compbits, 8); + NV_DECLARE_ALIGNED(NvU64 dataPhysicalStart, 8); + NV_DECLARE_ALIGNED(NvU64 surfaceOffset, 8); + NvU32 comptagLine; + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITS < Deprecated > + * + * Sets the Compression and Fast Clear bits for the surface+offset given. + * + * @params[in] NvU32 fcbits; + * Fast Clear Bits to write. + * @params[in] NvU32 compbits; + * Compression Bits to write + * @params[in] NvBool writeFc; + * Indicates if Fast Clear Bits should be written + * @params[in] NvU64 dataPhysicalStart; + * Start Address of Data + * @params[in] NvU64 surfaceOffset; + * Offset in the surface + * @params[in] NvU32 comptagLine; + * Compression Tag Number + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITS (0x2080132bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2B" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITS_PARAMS { + NvU32 fcbits; + NvU32 compbits; + NvBool writeFc; + NV_DECLARE_ALIGNED(NvU64 dataPhysicalStart, 8); + NV_DECLARE_ALIGNED(NvU64 surfaceOffset, 8); + NvU32 comptagLine; + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPBITS64KB < Deprecated > + * + * Read 64KB chunk of CompBits + * + * @params[in] NvU64 SrcDataPhysicalStart; + * Start Address of Data + * @params[in] NvU32 SrcComptagLine; + * Compression Tag Number + * @params[in] NvU32 page64KB; + * Which 64K block to read from. + * @params[out] NvU32 *compbitBuffer; + * Buffer for CompBits read, + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPBITS64KB (0x2080132cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2C" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPBITS64KB_PARAMS { + NV_DECLARE_ALIGNED(NvU64 SrcDataPhysicalStart, 8); + NvU32 SrcComptagLine; + NvU32 page64KB; + NV_DECLARE_ALIGNED(NvU32 *compbitBuffer, 8); + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPBITS64KB_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPBITS64KB < Deprecated > + * + * Write 64K chunk of COmpBits. + * + * @params[in] NvU64 SrcDataPhysicalStart; + * Start Address of Data + * @params[in] NvU32 SrcComptagLine; + * Compression Tag Number + * @params[in] NvU32 page64KB; + * Which 64K block to read from. + * @params[in] NvU32 *compbitBuffer; + * Buffer for CompBits to write. + * @params[in] NvBool upper64KBCompbitSel + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPBITS64KB (0x2080132dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2D" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPBITS64KB_PARAMS { + NV_DECLARE_ALIGNED(NvU64 DstDataPhysicalStart, 8); + NvU32 DstComptagLine; + NvU32 page64KB; + NV_DECLARE_ALIGNED(NvU32 *compbitBuffer, 8); + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPBITS64KB_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITSPS < Deprecated > + * + * The PS (Performance Path, or Optimized path, or Per Slice version) + * of PutCompBits. + * + * @params[out] NvU32 *fcbits; + * Buffer to receive Fast Clear Bits. + * @params[out] NvU32 *compbits; + * Buffer to receive Compression Bits. + * @params[out] NvU32 *compCacheLine; + * Buffer to receive Comp Cache Line data. + * @params[in] NvU64 dataPhysicalStart; + * Start Address of Data + * @params[in] NvU64 surfaceOffset; + * Offset in the surface + * @params[in] NvU32 comptagLine; + * Compression Tag Line Number + * @params[in] NvU32 ROPTile_offset; + * Offset in the surface of the ROP tile. + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * @params[in] NvBool getFcBits; + * Indicates if fast clear bits should be returned. + * @params[in] NvP64 derivedParams + * Actually a CompBitDerivedParams structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITSPS (0x2080132eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2E" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITSPS_PARAMS { + NV_DECLARE_ALIGNED(NvU32 *fcbits, 8); + NV_DECLARE_ALIGNED(NvU32 *compbits, 8); + NV_DECLARE_ALIGNED(NvU32 *compCacheLine, 8); + NV_DECLARE_ALIGNED(NvU64 dataPhysicalStart, 8); + NV_DECLARE_ALIGNED(NvU64 surfaceOffset, 8); + NvU32 comptagLine; + NvU32 ROPTile_offset; + NvBool upper64KBCompbitSel; + NvBool getFcBits; + NV_DECLARE_ALIGNED(NvP64 derivedParams, 8); +} NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITSPS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITSPS < Deprecated > + * + * The PS (Performance Path, or Optimized path, or Per Slice version) + * of GetCompBits. + * + * @params[in] NvU32 fcbits; + * Buffer with Fast Clear Bits to write. + * @params[in] NvU32 compbits; + * Buffer to receive Compression Bits. + * @params[in] NvBool writeFc + * Indicates of Fast Clear Bits should be written. + * @params[in] NvU32 *compCacheLine; + * Buffer to receive Comp Cache Line data. + * @params[in] NvU64 dataPhysicalStart; + * Start Address of Data + * @params[in] NvU64 surfaceOffset; + * Offset in the surface + * @params[in] NvU32 comptagLine; + * Compression Tag Line Number + * @params[in] NvU32 ROPTile_offset; + * Offset in the surface of the ROP tile. + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * @params[in] NvP64 derivedParams + * Actually a CompBitDerivedParams structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITSPS (0x2080132fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2F" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITSPS_PARAMS { + NvU32 fcbits; + NvU32 compbits; + NvBool writeFc; + NV_DECLARE_ALIGNED(NvU32 *compCacheLine, 8); + NV_DECLARE_ALIGNED(NvU64 dataPhysicalStart, 8); + NV_DECLARE_ALIGNED(NvU64 surfaceOffset, 8); + NvU32 comptagLine; + NvU32 ROPTile_offset; + NvBool upper64KBCompbitSel; + NV_DECLARE_ALIGNED(NvP64 derivedParams, 8); +} NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITSPS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPCACHELINEPS < Deprecated > + * + * The PS (Performance Path, or Optimized path, or Per Slice version) + * of ReadCompCacheLine. + * + * @paramsNvU32 *compCacheLine; + * Buffer for Comp Cache Line Read + * @paramsNvU32 comptagLine; + * Comp Tag Line Number to read + * @paramsNvU32 partition; + * FB Partition of the desired Comp Cache Line + * @paramsNvU32 slice; + * Slice of the desired Comp Cache Line + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPCACHELINEPS (0x20801330U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x30" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPCACHELINEPS_PARAMS { + NV_DECLARE_ALIGNED(NvU32 *compCacheLine, 8); + NvU32 comptagLine; + NvU32 partition; + NvU32 slice; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPCACHELINEPS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPCACHELINEPS < Deprecated > + * + * The PS (Performance Path, or Optimized path, or Per Slice version) + * of WriteCompCacheLine. + * + * @params[in] NvU32 *compCacheLine; + * Buffer for Comp Cache Line to Write + * @params[in] NvU32 comptagLine; + * Comp Tag Line Number to Write + * @params[in] NvU32 partition; + * FB Partition of the desired Comp Cache Line + * @params[in] NvU32 slice; + * Slice of the desired Comp Cache Line + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPCACHELINEPS (0x20801331U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x31" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPCACHELINEPS_PARAMS { + NV_DECLARE_ALIGNED(NvU32 *compCacheLine, 8); + NvU32 comptagLine; + NvU32 partition; + NvU32 slice; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPCACHELINEPS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPCACHELINE_BOUNDS < Deprecated > + * + * Used by PS (Performance Path, or Optimized path, or Per Slice version) + * to retrieve upper and lower Address of the CompCacheLine. + * + * @params[out] NvU64 *minCPUAddress; + * Minimum (lower bound) of the ComCacheLine. + * @params[out] NvU64 *minCPUAddress; + * Minimum (lower bound) of the ComCacheLine. + * @params[in] NvU32 comptagLine; + * CompTagLine to fetch the bounds of. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPCACHELINE_BOUNDS (0x20801332U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x32" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPCACHELINE_BOUNDS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 *minCPUAddress, 8); + NV_DECLARE_ALIGNED(NvU64 *maxCPUAddress, 8); + NvU32 comptagLine; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPCACHELINE_BOUNDS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_PART_SLICE_OFFSET < Deprecated > + * + * Used by PS (Performance Path, or Optimized path, or Per Slice version) + * to retrieve partition, slice and ROP Tile Offset of the passed in + * surface location. + * + * @params[out] NvU64 *part; + * Partition in which the target part of the surface resides. + * @params[out] NvU64 *slice; + * Slice in which the target part of the surface resides. + * @params[out] NvU64 *ropTileoffset; + * Offset to the start of the ROP Tile in which the target part of + * the surface resides. + * @params[in] NvU64 *dataPhysicalStart; + * Start address of data for which part/slice/offset is desired. + * @params[in] NvU64 surfaceOffset; + * Byte offset of data for which part/slice/offset is desired. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_PART_SLICE_OFFSET (0x20801333U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x33" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_PART_SLICE_OFFSET_PARAMS { + NV_DECLARE_ALIGNED(NvU64 *part, 8); + NV_DECLARE_ALIGNED(NvU64 *slice, 8); + NV_DECLARE_ALIGNED(NvU64 *ropTileoffset, 8); + NV_DECLARE_ALIGNED(NvU64 dataPhysicalStart, 8); + NV_DECLARE_ALIGNED(NvU64 surfaceOffset, 8); +} NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_PART_SLICE_OFFSET_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_ALLOC_AND_INIT_DERIVEDPARAMS < Deprecated > + * + * Used by PS (Performance Path, or Optimized path, or Per Slice version) + * to create a CompBitCopy::CompBitDerivedParams object + * + * @params[out] NvP64 derivedParams + * Actually a CompBitDerivedParams structure. + * @params[in] NvU32 comptagLine; + * Compression Tag Line Number + * @params[in] NvU32 ROPTile_offset; + * Offset in the surface of the ROP tile. + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_ALLOC_AND_INIT_DERIVEDPARAMS (0x20801334U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x34" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_ALLOC_AND_INIT_DERIVEDPARAMS_PARAMS { + NV_DECLARE_ALIGNED(NvP64 derivedParams, 8); + NvU32 comptagLine; + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_ALLOC_AND_INIT_DERIVEDPARAMS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_FORCE_BAR1 < Deprecated > + * + * Used by MODS (and possibly other clients) to have compbit code write + * write directly to BAR1, rather than a intermediate buffer. + * + * @params[in] NvBool bForceBar1; + * Enables or disables direct writes to BAR1. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_FORCE_BAR1 (0x20801335U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x35" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_FORCE_BAR1_PARAMS { + NvBool bForceBar1; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_FORCE_BAR1_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_AMAP_CONF + * + * Fills in fields of a structure of class ConfParamsV1, which is used for + * swizzling compression bits in the compression backing store. + * The caller is expected to have the appropriate headers for class ConfParamsV1. + * + * @params[in|out] void *pAmapConfParms + * Opaque pointer to structure of values for ConfParamsV1 + * @params[in|out] void *pCbcSwizzleParms + * Opaque pointer to structure of values for CbcSwizzleParamsV1 + * + * Possible status values returned are: + * NV_OK NV_ERR_NOT_SUPPORTED + * + * pCbcSwizzleParams will be filled in with certain parameters from + * @CbcSwizzleParamsV1. However, the caller is responsible for making sure + * all parameters are filled in before using it. + */ +#define NV2080_CTRL_CMD_FB_GET_AMAP_CONF (0x20801336U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pAmapConfParams, 8); + NV_DECLARE_ALIGNED(NvP64 pCbcSwizzleParams, 8); +} NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_CBC_OP + * + * Provides a way for clients to request a CBC Operation + * + * @params[in] CTRL_CMD_FB_CBC_OP fbCBCOp + * CBC Operation requested. + * Valid Values: + * CTRL_CMD_FB_CBC_OP_CLEAN + * CTRL_CMD_FB_CBC_OP_INVALIDATE + * + * Possible status values returned are: + * NV_OK NV_ERR_NOT_SUPPORTED NV_ERR_INVALID_ARGUMENT NV_ERR_TIMEOUT + */ +#define NV2080_CTRL_CMD_FB_CBC_OP (0x20801337U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_CBC_OP_PARAMS_MESSAGE_ID" */ + +/*! + * Permitted CBC Operations + */ +typedef enum CTRL_CMD_FB_CBC_OP { + CTRL_CMD_FB_CBC_OP_CLEAN = 0, + CTRL_CMD_FB_CBC_OP_INVALIDATE = 1, +} CTRL_CMD_FB_CBC_OP; + +#define NV2080_CTRL_CMD_FB_CBC_OP_PARAMS_MESSAGE_ID (0x37U) + +typedef struct NV2080_CTRL_CMD_FB_CBC_OP_PARAMS { + CTRL_CMD_FB_CBC_OP fbCBCOp; +} NV2080_CTRL_CMD_FB_CBC_OP_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_CTAGS_FOR_CBC_EVICTION + * + * The call will fetch the compression tags reserved for CBC eviction. + * + * Each comptag will correspond to a unique compression cacheline. The usage of + * these comptags is to evict the CBC by making accesses to a dummy compressed page, + * thereby evicting each CBC line. + * + * @param [in][out] NvU32 pCompTags + * Array of reserved compression tags of size @ref NV2080_MAX_CTAGS_FOR_CBC_EVICTION + * @param [out] numCompTags + * Number of entries returned in @ref pCompTags + * + * @returns + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_OUT_OF_RANGE + * NV_ERR_INVALID_PARAMETER + */ +#define NV2080_CTRL_CMD_FB_GET_CTAGS_FOR_CBC_EVICTION (0x20801338U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS_MESSAGE_ID" */ + +/*! + * Max size of @ref NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS::pCompTags + * Arbitrary, but sufficiently large number. Should be checked against CBC size. + */ +#define NV2080_MAX_CTAGS_FOR_CBC_EVICTION 0x7FU + + +#define NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS { + NvU32 pCompTags[NV2080_MAX_CTAGS_FOR_CBC_EVICTION]; + NvU32 numCompTags; +} NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE + * + * This Call will allocate compression tag + * + * @params[in] NvU32 attr + * Stores the information: + * 1. NVOS32_ATTR_COMPR_REQUIRED or not + * 2. NVOS32_ATTR_PAGE_SIZE + * @params[in] NvU32 attr2 + * Determine whether to allocate + * an entire cache line or allocate by size + * @params[in] NvU32 size + * Specify the size of allocation, in pages not bytes + * @params[in] NvU32 ctagOffset + * Determine the offset usage of the allocation + * @params[out] NvU32 hwResId + * Stores the result of the allocation + * @params[out] NvU32 RetcompTagLineMin + * The resulting min Ctag Number from the allocation + * @params[out] NvU32 RetcompTagLineMax + * The resulting max Ctag Number from the allocation + * @returns + * NV_OK + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE (0x20801339U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE_PARAMS_MESSAGE_ID (0x39U) + +typedef struct NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE_PARAMS { + NvU32 attr; + NvU32 attr2; + NvU32 size; + NvU32 ctagOffset; + NvU32 hwResId; + NvU32 retCompTagLineMin; + NvU32 retCompTagLineMax; +} NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_FREE_TILE + * + * This control call is used to release tile back to the free pool + * + * @params[in] NvU32 hwResId + * Stores the information of a previous allocation + * @returns + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV2080_CTRL_CMD_FB_FREE_TILE (0x2080133aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_FREE_TILE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_FREE_TILE_PARAMS_MESSAGE_ID (0x3AU) + +typedef struct NV2080_CTRL_CMD_FB_FREE_TILE_PARAMS { + NvU32 hwResId; +} NV2080_CTRL_CMD_FB_FREE_TILE_PARAMS; + + +/* + * NV2080_CTRL_CMD_FB_SETUP_VPR_REGION + * + * This control command is used to request vpr region setup + * + * requestType + * NV2080_CTRL_CMD_FB_SET_VPR: Request to setup VPR + * + * requestParams : struct VPR_REQUEST_PARAMS + * It contains the VPR region request details like, + * startAddr : FB offset from which we need to setup VPR + * size : required size of the region + * + * statusParams : struct VPR_STATUS_PARAMS + * This struct stores the output of requested VPR region + * status : Whether the request was successful + * + * NV2080_CTRL_CMD_FB_VPR_ERROR_CODE : + * NV2080_CTRL_CMD_FB_VPR_ERROR_GENERIC : Some unknown error occurred + * NV2080_CTRL_CMD_FB_VPR_ERROR_INVALID_CLIENT_REQUEST : Request was invalid + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_SETUP_VPR_REGION (0x2080133bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_CMD_FB_VPR_REQUEST_TYPE { + NV2080_CTRL_CMD_FB_SET_VPR = 0, +} NV2080_CTRL_CMD_FB_VPR_REQUEST_TYPE; + +typedef enum NV2080_CTRL_CMD_FB_VPR_ERROR_CODE { + NV2080_CTRL_CMD_FB_VPR_ERROR_GENERIC = 0, + NV2080_CTRL_CMD_FB_VPR_ERROR_INVALID_CLIENT_REQUEST = 1, +} NV2080_CTRL_CMD_FB_VPR_ERROR_CODE; + +typedef struct VPR_REQUEST_PARAMS { + NvU32 startAddr; + NvU32 size; +} VPR_REQUEST_PARAMS; + +typedef struct VPR_STATUS_PARAMS { + NvU32 status; +} VPR_STATUS_PARAMS; + +#define NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS_MESSAGE_ID (0x3BU) + +typedef struct NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS { + NV2080_CTRL_CMD_FB_VPR_REQUEST_TYPE requestType; + VPR_REQUEST_PARAMS requestParams; + VPR_STATUS_PARAMS statusParams; +} NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS; +typedef struct NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS *PNV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_CLI_MANAGED_OFFLINED_PAGES + * + * This command returns the list of offlined video memory page addresses in the + * region managed by Client + * + * offlinedPages + * This output parameter is an array of video memory physical page numbers that + * are offlined. This array can hold a maximum of NV2080_CTRL_FB_ + * OFFLINED_PAGES_MAX_PAGES addresses. + * pageSize + * This output parameter contains the size of the page that is offlined. + * validEntries + * This output parameter specifies the number of valid entries in the + * offlined array. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_FB_GET_CLI_MANAGED_OFFLINED_PAGES (0x2080133cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS_MESSAGE_ID (0x3CU) + +typedef struct NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS { + NvU32 offlinedPages[NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES]; // A 32B can hold enough. + NvU32 pageSize; + NvU32 validEntries; +} NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO + * + * This command returns parameters required to initialize compbit copy object + * used by address mapping library + * + * defaultPageSize + * Page size used by @ref CompBitCopy methods + * comptagsPerCacheLine + * Number of compression tags in a single compression cache line. + * unpackedComptagLinesPerCacheLine; + * From hw (not adjusted for CompBits code) Number of compression tags + * in a single compression cache line. + * compCacheLineSizePerLTC; + * Size of compression cache line per L2 slice. Size in Bytes. + * unpackedCompCacheLineSizePerLTC; + * From hw (not adjusted for CompBits code) size of compression + * cache line per L2 slice. Size in Bytes + * slicesPerLTC; + * Number of L2 slices per L2 cache. + * numActiveLTCs; + * Number of active L2 caches. (Not floorswept) + * familyName; + * Family name for the GPU. + * chipName; + * Chip name for the GPU. + * bitsPerRAMEntry; + * Bits per RAM entry. (Need better doc) + * ramBankWidth; + * Width of RAM bank. (Need better doc) + * bitsPerComptagLine; + * Number of bits per compression tag line. + * ramEntriesPerCompCacheLine; + * Number of RAM entries spanned by 1 compression cache line. + * comptagLineSize; + * Size of compression tag line, in Bytes. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO (0x2080133dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS_MESSAGE_ID (0x3DU) + +typedef struct NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS { + NvU32 defaultPageSize; + NvU32 comptagsPerCacheLine; + NvU32 unpackedComptagLinesPerCacheLine; + NvU32 compCacheLineSizePerLTC; + NvU32 unpackedCompCacheLineSizePerLTC; + NvU32 slicesPerLTC; + NvU32 numActiveLTCs; + NvU32 familyName; + NvU32 chipName; + NvU32 bitsPerRAMEntry; + NvU32 ramBankWidth; + NvU32 bitsPerComptagLine; + NvU32 ramEntriesPerCompCacheLine; + NvU32 comptagLineSize; +} NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_SET_RRD + * + * Sets the row-to-row delay on the GPU's FB + * + * Possible status values returned are: + * NV_OK + * Any error code + */ +#define NV2080_CTRL_CMD_FB_SET_RRD (0x2080133eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_SET_RRD_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_SET_RRD_RESET_VALUE (~((NvU32)0)) +#define NV2080_CTRL_FB_SET_RRD_PARAMS_MESSAGE_ID (0x3EU) + +typedef struct NV2080_CTRL_FB_SET_RRD_PARAMS { + NvU32 rrd; +} NV2080_CTRL_FB_SET_RRD_PARAMS; + +/* + * NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS + * + * This is not a control call of it's own, but there are common definitions for + * the two NV2080_CTRL_CMD_FB_SET_READ/WRITE_LIMIT control calls. + */ +typedef struct NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS { + NvU8 limit; +} NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS; +#define NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_RESET_VALUE (0xffU) + +/* + * NV2080_CTRL_CMD_FB_SET_READ_LIMIT + * + * Sets the READ_LIMIT to be used in the NV_PFB_FBPA_DIR_ARB_CFG0 register + * + * limit + * The limit value to use + * + * Possible status values returned are: + * NV_OK + * Any error code + */ +#define NV2080_CTRL_CMD_FB_SET_READ_LIMIT (0x2080133fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_SET_READ_LIMIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_SET_READ_LIMIT_RESET_VALUE NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_RESET_VALUE +#define NV2080_CTRL_FB_SET_READ_LIMIT_PARAMS_MESSAGE_ID (0x3FU) + +typedef NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS NV2080_CTRL_FB_SET_READ_LIMIT_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_SET_WRITE_LIMIT + * + * Sets the WRITE_LIMIT to be used in the NV_PFB_FBPA_DIR_ARB_CFG0 register + * + * limit + * The limit value to us + * + * Possible status values returned are: + * NV_OK + * Any error code + */ +#define NV2080_CTRL_CMD_FB_SET_WRITE_LIMIT (0x20801340U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_SET_WRITE_LIMIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_SET_WRITE_LIMIT_RESET_VALUE NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_RESET_VALUE +#define NV2080_CTRL_FB_SET_WRITE_LIMIT_PARAMS_MESSAGE_ID (0x40U) + +typedef NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS NV2080_CTRL_FB_SET_WRITE_LIMIT_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_PATCH_PBR_FOR_MINING + * + * Patches some VBIOS values related to PBR to better suit mining applications + * + * bEnable + * Set the mining-specific values or reset to the original values + * + * Possible status values returned are: + * NV_OK + * Any error code + */ +#define NV2080_CTRL_CMD_FB_PATCH_PBR_FOR_MINING (0x20801341U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS_MESSAGE_ID (0x41U) + +typedef struct NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS { + NvBool bEnable; +} NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_MEM_ALIGNMENT + * + * Get memory alignment. Replacement for NVOS32_FUNCTION_GET_MEM_ALIGNMENT + */ +#define NV2080_CTRL_CMD_FB_GET_MEM_ALIGNMENT (0x20801342U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_MEM_ALIGNMENT_MAX_BANKS (4U) +#define NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS { + NvU32 alignType; // Input + NvU32 alignAttr; + NvU32 alignInputFlags; + NvU32 alignHead; + NV_DECLARE_ALIGNED(NvU64 alignSize, 8); + NvU32 alignHeight; + NvU32 alignWidth; + NvU32 alignPitch; + NvU32 alignPad; + NvU32 alignMask; + NvU32 alignOutputFlags[NV2080_CTRL_FB_GET_MEM_ALIGNMENT_MAX_BANKS]; + NvU32 alignBank[NV2080_CTRL_FB_GET_MEM_ALIGNMENT_MAX_BANKS]; + NvU32 alignKind; + NvU32 alignAdjust; // Output -- If non-zero the amount we need to adjust the offset + NvU32 alignAttr2; +} NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_CBC_BASEADDR + * + * Get the CBC Base physical address + * This control call is required by error containment tests + * NV2080_CTRL_CMD_FB_GET_AMAP_CONF can also return CBC base address + * but it requires kernel privilege, and it not callalble from SRT test + * + * @params[out] NvU64 cbcBaseAddr + * Base physical address for CBC data. + * + * Possible status values returned are: + * NV_OK NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR (0x20801343U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR_PARAMS { + NvU32 cbcBaseAddress; + NvU32 compCacheLineSize; + NV_DECLARE_ALIGNED(NvU64 backingStoreStartPA, 8); + NV_DECLARE_ALIGNED(NvU64 backingStoreAllocPA, 8); + NvU32 backingStoreChunkOverfetch; +} NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR_PARAMS; + +#define NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING 0:0 +#define NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING_FALSE 0U +#define NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING_TRUE 1U + + + +typedef struct NV2080_CTRL_FB_REMAP_ENTRY { + NvU32 remapRegVal; + NvU32 timestamp; + NvU8 fbpa; + NvU8 sublocation; + NvU8 source; + NvU8 flags; +} NV2080_CTRL_FB_REMAP_ENTRY; + +/* valid values for source */ + + +#define NV2080_CTRL_FB_REMAPPED_ROW_SOURCE_SBE_FIELD (0x00000002U) +#define NV2080_CTRL_FB_REMAPPED_ROW_SOURCE_DBE_FIELD (0x00000003U) + +#define NV2080_CTRL_FB_REMAPPED_ROWS_MAX_ROWS (0x00000200U) + +/* + * NV2080_CTRL_CMD_FB_GET_REMAPPED_ROWS + * + * This command returns the list of remapped rows stored in the Inforom. + * + * entryCount + * This output parameter specifies the number of remapped rows + * flags + * This output parameter contains info on whether or not there are pending + * remappings and whether or not a remapping failed + * entries + * This output parameter is an array of NV2080_CTRL_FB_REMAP_ENTRY + * containing inforomation on the remapping that occurred. This array can + * hold a maximum of NV2080_CTRL_FB_REMAPPED_ROWS_MAX_ROWS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_REMAPPED_ROWS (0x20801344U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_PENDING \ + NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_PENDING_FALSE NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING_FALSE +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_PENDING_TRUE NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING_TRUE +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_FAILURE 1:1 +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_FAILURE_FALSE 0U +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_FAILURE_TRUE 1U + +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS_MESSAGE_ID (0x44U) + +typedef struct NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS { + NvU32 entryCount; + NvU8 flags; + NV2080_CTRL_FB_REMAP_ENTRY entries[NV2080_CTRL_FB_REMAPPED_ROWS_MAX_ROWS]; +} NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS; + +// Max size of the queryParams in Bytes, so that the NV2080_CTRL_FB_FS_INFO_QUERY struct is still 32B +#define NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE 24U + +/*! + * Structure holding the out params for NV2080_CTRL_FB_FS_INFO_INVALID_QUERY. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS { + // Unused param, will ensure the size of NV2080_CTRL_FB_FS_INFO_QUERY struct to be 32B + NvU8 data[NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE]; +} NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_FBP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS { + /*! + * [IN]: swizzId + * PartitionID associated with a created smc partition. Currently used only for a + * device monitoring client to get the physical values of the FB. The client needs to pass + * 'NV2080_CTRL_GPU_PARTITION_ID_INVALID' explicitly if it wants RM to ignore the swizzId. + * RM will consider this request similar to a legacy case. + * The client's subscription is used only as a capability check and not as an input swizzId. + */ + NvU32 swizzId; + /*! + * [OUT]: physical/local fbp mask. + */ + NV_DECLARE_ALIGNED(NvU64 fbpEnMask, 8); +} NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_LTC_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS { + /*! + * [IN]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [OUT]: physical/local ltc mask. + */ + NvU32 ltcEnMask; +} NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_LTS_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS { + /*! + * [IN]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [OUT]: physical/local lts mask. + * Note that lts bits are flattened out for all ltc with in a fbp. + */ + NvU32 ltsEnMask; +} NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_FBPA_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS { + /*! + * [IN]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [OUT]: physical/local FBPA mask. + */ + NvU32 fbpaEnMask; +} NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS { + /*! + * [IN]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [OUT]: physical/local FBPA-SubPartition mask. + */ + NvU32 fbpaSubpEnMask; +} NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP + */ +typedef struct NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS { + /*! + * [IN]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [OUT]: Logical/local FBP index + */ + NvU32 fbpLogicalIndex; +} NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_ROP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS { + /*! + * [IN]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [OUT]: physical/local ROP mask. + */ + NvU32 ropEnMask; +} NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS { + /*! + * [IN]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [IN]: swizzId + * PartitionID associated with a created smc partition. + */ + NvU32 swizzId; + /*! + * [OUT]: physical ltc mask. + */ + NvU32 ltcEnMask; +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS { + /*! + * [IN]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [IN]: swizzId + * PartitionID associated with a created smc partition. + */ + NvU32 swizzId; + /*! + * [OUT]: physical lts mask. + */ + NvU32 ltsEnMask; +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS { + /*! + * [IN]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [IN]: swizzId + * PartitionID associated with a created smc partition. + */ + NvU32 swizzId; + /*! + * [OUT]: physical fbpa mask. + */ + NvU32 fbpaEnMask; +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS { + /*! + * [IN]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [IN]: swizzId + * PartitionID associated with a created smc partition. + */ + NvU32 swizzId; + /*! + * [OUT]: physical rop mask. + */ + NvU32 ropEnMask; +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS { + /*! + * [IN]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [IN]: swizzId + * PartitionID associated with a created smc partition. Currently used only for a + * device monitoring client to get the physical values of the FB. The client needs to pass + * 'NV2080_CTRL_GPU_PARTITION_ID_INVALID' explicitly if it wants RM to ignore the swizzId. + * RM will consider this request similar to a legacy case. + * The client's subscription is used only as a capability check and not as an input swizzId. + */ + NvU32 swizzId; + /*! + * [OUT]: physical FBPA_SubPartition mask associated with requested partition. + */ + NV_DECLARE_ALIGNED(NvU64 fbpaSubpEnMask, 8); +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS; + +// Possible values for queryType +#define NV2080_CTRL_FB_FS_INFO_INVALID_QUERY 0x0U +#define NV2080_CTRL_FB_FS_INFO_FBP_MASK 0x1U +#define NV2080_CTRL_FB_FS_INFO_LTC_MASK 0x2U +#define NV2080_CTRL_FB_FS_INFO_LTS_MASK 0x3U +#define NV2080_CTRL_FB_FS_INFO_FBPA_MASK 0x4U +#define NV2080_CTRL_FB_FS_INFO_ROP_MASK 0x5U +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK 0x6U +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK 0x7U +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK 0x8U +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK 0x9U +#define NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK 0xAU +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK 0xBU +#define NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP 0xCU + +typedef struct NV2080_CTRL_FB_FS_INFO_QUERY { + NvU16 queryType; + NvU8 reserved[2]; + NvU32 status; + union { + NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS inv; + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS fbp, 8); + NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS ltc; + NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS lts; + NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS fbpa; + NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS rop; + NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS dmLtc; + NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS dmLts; + NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS dmFbpa; + NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS dmRop; + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS dmFbpaSubp, 8); + NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS fbpaSubp; + NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS fbpLogicalMap; + } queryParams; +} NV2080_CTRL_FB_FS_INFO_QUERY; + +// Max number of queries that can be batched in a single call to NV2080_CTRL_CMD_FB_GET_FS_INFO +#define NV2080_CTRL_FB_FS_INFO_MAX_QUERIES 96U + +#define NV2080_CTRL_FB_GET_FS_INFO_PARAMS_MESSAGE_ID (0x46U) + +typedef struct NV2080_CTRL_FB_GET_FS_INFO_PARAMS { + NvU16 numQueries; + NvU8 reserved[6]; + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_FS_INFO_QUERY queries[NV2080_CTRL_FB_FS_INFO_MAX_QUERIES], 8); +} NV2080_CTRL_FB_GET_FS_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_FS_INFO + * + * This control call returns the fb engine information for a partition/GPU. + * Supports an interface so that the caller can issue multiple queries by batching them + * in a single call. Returns the first error it encounters. + * + * numQueries[IN] + * - Specifies the number of valid queries. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_FS_INFO (0x20801346U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_FS_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_HISTOGRAM_IDX_NO_REMAPPED_ROWS (0x0U) +#define NV2080_CTRL_FB_HISTOGRAM_IDX_SINGLE_REMAPPED_ROW (0x1U) +#define NV2080_CTRL_FB_HISTOGRAM_IDX_MIXED_REMAPPED_REMAINING_ROWS (0x2U) +#define NV2080_CTRL_FB_HISTOGRAM_IDX_SINGLE_REMAINING_ROW (0x3U) +#define NV2080_CTRL_FB_HISTOGRAM_IDX_MAX_REMAPPED_ROWS (0x4U) + +#define NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS_MESSAGE_ID (0x47U) + +typedef struct NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS { + NvU32 histogram[5]; +} NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_ROW_REMAPPER_HISTOGRAM + * + * This control call returns stats on the number of banks that have a certain + * number of rows remapped in the bank. Specifically the number of banks that + * have 0, 1, 2 through (max-2), max-1 and max number of rows remapped in the + * bank. Values will be returned in an array. + * + * Index values are: + * + * NV2080_CTRL_FB_HISTOGRAM_IDX_NO_REMAPPED_ROWS + * Number of banks with zero rows remapped + NV2080_CTRL_FB_HISTOGRAM_IDX_SINGLE_REMAPPED_ROW + * Number of banks with one row remapped + NV2080_CTRL_FB_HISTOGRAM_IDX_MIXED_REMAPPED_REMAINING_ROWS + * Number of banks with 2 through (max-2) rows remapped + NV2080_CTRL_FB_HISTOGRAM_IDX_SINGLE_REMAINING_ROW + * Number of banks with (max-1) rows remapped + NV2080_CTRL_FB_HISTOGRAM_IDX_MAX_REMAPPED_ROWS + * Number of banks with max rows remapped + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_ROW_REMAPPER_HISTOGRAM (0x20801347U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_FB_GET_DYNAMICALLY_BLACKLISTED_PAGES + * + * This command returns the list of dynamically blacklisted video memory page addresses + * after last driver load. + * + * blackList + * This output parameter is an array of NV2080_CTRL_FB_DYNAMIC_BLACKLIST_ADDRESS_INFO + * This array can hold a maximum of NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES. + * validEntries + * This output parameter specifies the number of valid entries in the + * blackList array. + * baseIndex + * With the limit of up to 512 blacklisted pages, the size of this array + * exceeds the rpc buffer limit. This control call will collect the data + * in multiple passes. This parameter indicates the start index of the + * data to be passed back to the caller + * This cannot be greater than NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_PAGES + * bMore + * This parameter indicates whether there are more valid elements to be + * fetched. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_FB_GET_DYNAMIC_OFFLINED_PAGES (0x20801348U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS_MESSAGE_ID" */ + +/* Maximum pages that can be dynamically blacklisted */ +#define NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_PAGES 512U + +/* + * Maximum entries that can be sent in a single pass of + * NV2080_CTRL_CMD_FB_GET_DYNAMIC_OFFLINED_PAGES + */ +#define NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES 64U + +/** + * NV2080_CTRL_FB_DYNAMIC_OFFLINED_ADDRESS_INFO + * pageNumber + * This output parameter specifies the dynamically blacklisted page number. + * source + * The reason for the page to be retired. Valid values for + * this parameter include: + * NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_INVALID + * Invalid source. + * NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_DBE + * Page retired by dynamic page retirement due to a double bit + * error seen. + */ +typedef struct NV2080_CTRL_FB_DYNAMIC_OFFLINED_ADDRESS_INFO { + NV_DECLARE_ALIGNED(NvU64 pageNumber, 8); + NvU8 source; +} NV2080_CTRL_FB_DYNAMIC_OFFLINED_ADDRESS_INFO; + +#define NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS_MESSAGE_ID (0x48U) + +typedef struct NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_DYNAMIC_OFFLINED_ADDRESS_INFO offlined[NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES], 8); + NvU32 validEntries; + NvU32 baseIndex; + NvBool bMore; +} NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS; + +/* valid values for source */ + +#define NV2080_CTRL_FB_DYNAMIC_BLACKLISTED_PAGES_SOURCE_INVALID (0x00000000U) +#define NV2080_CTRL_FB_DYNAMIC_BLACKLISTED_PAGES_SOURCE_DPR_DBE (0x00000001U) + +/* + * NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO + * + * This control command is used by clients to query information pertaining to client allocations. + * + * + * @params [IN/OUT] NvU64 allocCount: + * Client specifies the allocation count that it received using the + * previous NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO control call. + * RM will get the total number of allocations known by RM and fill + * allocCount with it. + * + * @params [IN] NvP64 pAllocInfo: + * Pointer to the buffer allocated by client of size NV2080_CTRL_CMD_FB_ALLOCATION_INFO * + * allocCount. RM returns the info pertaining to each of the contiguous client + * allocation chunks in pAllocInfo. The format of the allocation information is given by + * NV2080_CTRL_CMD_FB_ALLOCATION_INFO. The client has to sort the returned information if + * it wants to retain the legacy behavior of SORTED BY OFFSET. Information is only returned + * if and only if allocCount[IN]>=allocCount[OUT] and clientCount[IN]>=clientCount[OUT]. + * + * @params [IN/OUT] NvP64 clientCount: + * Client specifies the client count that it received using the + * previous NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO control call. + * RM will get the total number of clients that have allocations with RM + * and fill clientCount with it. + * + * @params [IN] NvP64 pClientInfo: + * Pointer to the buffer allocated by client of size NV2080_CTRL_CMD_FB_CLIENT_INFO * + * clientCount. RM returns the info pertaining to each of the clients that have allocations + * known about by RM in pClientInfo. The format of the allocation information is given by + * NV2080_CTRL_CMD_FB_CLIENT_INFO. Information is only returned if and only if + * allocCount[IN]>=allocCount[OUT] and clientCount[IN]>=clientCount[OUT]. + * + * @returns Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_POINTER + * NV_ERR_NO_MEMORY + * + * @Usage: All privileged RM clients for debugging only. Initially, call this with allocCount = + * clientCount = 0 to get client count, and then call again with allocated memory and sizes. + * Client can repeat with the new count-sized allocations until a maximum try count is + * reached or client is out of memory. + */ + +#define NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO (0x20801349U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO_PARAMS_MESSAGE_ID" */ + +/* + * These work with the FLD_SET_REF_NUM and FLD_TEST_REF macros and describe the 'flags' member + * of the NV2080_CTRL_CMD_FB_ALLOCATION_INFO struct. + */ + +// Address space of the allocation +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_TYPE 4:0 +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_TYPE_SYSMEM 0U +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_TYPE_VIDMEM 1U + +// Whether the allocation is shared +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_SHARED 5:5 +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_SHARED_FALSE 0U +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_SHARED_TRUE 1U + +// Whether this client owns this allocation +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_OWNER 6:6 +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_OWNER_FALSE 0U +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_OWNER_TRUE 1U + +typedef struct NV2080_CTRL_CMD_FB_ALLOCATION_INFO { + NvU32 client; /* [OUT] Identifies the client that made or shares the allocation (index into pClientInfo)*/ + NvU32 flags; /* [OUT] Flags associated with the allocation (see previous defines) */ + NV_DECLARE_ALIGNED(NvU64 beginAddr, 8); /* [OUT] Starting physical address of the chunk */ + NV_DECLARE_ALIGNED(NvU64 size, 8); /* [OUT] Size of the allocated contiguous chunk in bytes */ +} NV2080_CTRL_CMD_FB_ALLOCATION_INFO; + +typedef struct NV2080_CTRL_CMD_FB_CLIENT_INFO { + NvHandle handle; /* [OUT] Handle of the client that made or shares the allocation */ + NvU32 pid; /* [OUT] PID of the client that made or shares the allocation */ + + /* For the definition of the subprocessID and subprocessName params, see NV0000_CTRL_CMD_SET_SUB_PROCESS_ID */ + NvU32 subProcessID; /* [OUT] Subprocess ID of the client that made or shares the allocation */ + char subProcessName[NV_PROC_NAME_MAX_LENGTH]; /* [OUT] Subprocess Name of the client that made or shares the allocation */ +} NV2080_CTRL_CMD_FB_CLIENT_INFO; + +#define NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO_PARAMS_MESSAGE_ID (0x49U) + +typedef struct NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 allocCount, 8); + NV_DECLARE_ALIGNED(NvP64 pAllocInfo, 8); + NV_DECLARE_ALIGNED(NvU64 clientCount, 8); + NV_DECLARE_ALIGNED(NvP64 pClientInfo, 8); +} NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_UPDATE_NUMA_STATUS + * + * This control command is used by clients to update the NUMA status. + * + * @params [IN] NvBool bOnline: + * + * @returns Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * + */ +#define NV2080_CTRL_CMD_FB_UPDATE_NUMA_STATUS (0x20801350U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS { + NvBool bOnline; +} NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_NUMA_INFO + * + * This control command is used by clients to get per-subdevice NUMA memory + * information as assigned by the system. + * + * numaNodeId[OUT] + * - Specifies the NUMA node ID. + * + * numaMemAddr[OUT] + * - Specifies the NUMA memory address. + * + * numaMemSize[OUT] + * - Specifies the NUMA memory size. + * + * numaOfflineAddressesCount[IN/OUT] + * - If non-zero, then it specifies the maximum number of entries in + * numaOfflineAddresses[] for which the information is required. + * It will be updated with the actual number of entries present in + * the numaOfflineAddresses[]. + * + * numaOfflineAddresses[OUT] + * - If numaOfflineAddressesCount is non-zero, it contains the addresses + * of offline pages in the NUMA region. + * + * @returns Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_NUMA_INFO (0x20801351U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_NUMA_INFO_MAX_OFFLINE_ADDRESSES 64U + +#define NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS { + NvS32 numaNodeId; + NV_DECLARE_ALIGNED(NvU64 numaMemAddr, 8); + NV_DECLARE_ALIGNED(NvU64 numaMemSize, 8); + NvU32 numaOfflineAddressesCount; + NV_DECLARE_ALIGNED(NvU64 numaOfflineAddresses[NV2080_CTRL_FB_NUMA_INFO_MAX_OFFLINE_ADDRESSES], 8); +} NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS; + +/* _ctrl2080fb_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h new file mode 100644 index 0000000..b7dcf07 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h @@ -0,0 +1,759 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fifo.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* + * NV2080_CTRL_CMD_SET_GPFIFO + * + * This command set the GPFIFO offset and number of entries for a channel + * after it has been allocated. The channel must be idle and not pending, + * otherwise ERROR_IN_USE will be returned. + * + * hChannel + * The handle to the channel. + * base + * The base of the GPFIFO in the channel ctxdma. + * numEntries + * The number of entries in the GPFIFO. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_CHANNEL + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_SET_GPFIFO (0x20801102) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_SET_GPFIFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_SET_GPFIFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_CMD_SET_GPFIFO_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 base, 8); + NvU32 numEntries; +} NV2080_CTRL_CMD_SET_GPFIFO_PARAMS; + +/* + * NV2080_CTRL_FIFO_BIND_CHANNEL + * + * This structure is used to describe a channel that is to have + * it's bindable engines bound to those of other channels. + * + * hClient + * This structure member contains the handle of the client object + * that owns the channel object specified by hChannel. + * + * hChannel + * This structure member contains the channel handle of the channel + * object. + */ + +typedef struct NV2080_CTRL_FIFO_BIND_CHANNEL { + NvHandle hClient; + NvHandle hChannel; +} NV2080_CTRL_FIFO_BIND_CHANNEL; + +/* + * NV2080_CTRL_CMD_FIFO_BIND_ENGINES + * + * This control call is now deprecated. + * This command can be used to bind different video engines on G8X from separate + * channels together for operations such as idling. The set of bindable engines + * includes the NV2080_ENGINE_TYPE_BSP, NV2080_ENGINE_TYPE_VP and + * NV2080_ENGINE_TYPE_PPP engines. + * + * bindChannelCount + * This parameter specifies the number of channels to bind together. This + * parameter cannot exceed NV2080_CTRL_FIFO_BIND_ENGINES_MAX_CHANNELS. + * + * bindChannels + * The parameter specifies the array of channels to bind together. The first + * bindChannelCount entries are used in the bind channel operation. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_FIFO_BIND_ENGINES_MAX_CHANNELS (16) + +#define NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS { + NvU32 bindChannelCount; + NV2080_CTRL_FIFO_BIND_CHANNEL bindChannels[NV2080_CTRL_FIFO_BIND_ENGINES_MAX_CHANNELS]; +} NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS; + +#define NV2080_CTRL_CMD_FIFO_BIND_ENGINES (0x20801103) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES + * + * This command is used for a client to setup specialized custom operational + * properties that may be specific to an environment, or properties that + * should be set generally but are not for reasons of backward compatibility + * with previous chip generations + * + * flags + * This field specifies the operational properties to be applied + * + * Possible return status values returned are + * NV_OK + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES (0x20801104) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS { + NvU32 flags; +} NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS; + +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_FLAGS_ERROR_ON_STUCK_SEMAPHORE 0:0 +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_FLAGS_ERROR_ON_STUCK_SEMAPHORE_FALSE (0x00000000) +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_FLAGS_ERROR_ON_STUCK_SEMAPHORE_TRUE (0x00000001) + +/* + * NV2080_CTRL_CMD_FIFO_GET_PHYSICAL_CHANNEL_COUNT + * + * This command returns the maximum number of physical channels available for + * allocation on the current GPU. This may be less than or equal to the total + * number of channels supported by the current hardware. + * + * physChannelCount + * This output parameter contains the maximum physical channel count. + * + * physChannelCountInUse + * This output parameter contains the number of physical channels in use + * + * Possible return status values returned are + * NV_OK + * + */ +#define NV2080_CTRL_CMD_FIFO_GET_PHYSICAL_CHANNEL_COUNT (0x20801108) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS { + NvU32 physChannelCount; + NvU32 physChannelCountInUse; +} NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS; + +/* + * NV2080_CTRL_FIFO_INFO + * + * This structure represents a single 32bit fifo engine value. Clients + * request a particular FIFO engine value by specifying a unique fifo + * information index. + * + * Legal fifo information index values are: + * NV2080_CTRL_FIFO_INFO_INDEX_INSTANCE_TOTAL + * This index can be used to request the amount of instance space + * in kilobytes reserved by the fifo engine. + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS + * This index can be used to query the maximum number of channel groups + * that can be allocated on the GPU. + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNELS_PER_GROUP + * This index can be used to query the maximum number of channels that can + * be allocated in a single channel group. + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP + * This index can be used to query the maximum number of subcontext that can + * be allocated in a single channel group. + * NV2080_CTRL_FIFO_INFO_INDEX_BAR1_USERD_START_OFFSET + * This index can be used to query the starting offset of the RM + * pre-allocated USERD range in BAR1. This index query is honored only + * on Legacy-vGPU host RM. + * NV2080_CTRL_FIFO_INFO_INDEX_DEFAULT_CHANNEL_TIMESLICE + * This index can be used to query the default timeslice value + * (microseconds) used for a channel or channel group. + * NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE + * This index can be used to query the number of channel groups that are + * already allocated on the GPU. + * NV2080_CTRL_FIFO_INFO_INDEX_IS_PER_RUNLIST_CHANNEL_RAM_SUPPORTED + * This index can be used to check if per runlist channel ram is supported, and + * to query the supported number of channels per runlist. + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS_PER_ENGINE + * This index can be used to get max channel groups supported per engine/runlist. + * NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE_PER_ENGINE + * This index can be used too get channel groups currently in use per engine/runlist. + * + */ +typedef struct NV2080_CTRL_FIFO_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_FIFO_INFO; + +/* valid fifo info index values */ +#define NV2080_CTRL_FIFO_INFO_INDEX_INSTANCE_TOTAL (0x000000000) +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS (0x000000001) +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNELS_PER_GROUP (0x000000002) +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP (0x000000003) +#define NV2080_CTRL_FIFO_INFO_INDEX_BAR1_USERD_START_OFFSET (0x000000004) +#define NV2080_CTRL_FIFO_INFO_INDEX_DEFAULT_CHANNEL_TIMESLICE (0x000000005) +#define NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE (0x000000006) +#define NV2080_CTRL_FIFO_INFO_INDEX_IS_PER_RUNLIST_CHANNEL_RAM_SUPPORTED (0x000000007) +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS_PER_ENGINE (0x000000008) +#define NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE_PER_ENGINE (0x000000009) + + +/* set INDEX_MAX to greatest possible index value */ +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX NV2080_CTRL_FIFO_INFO_INDEX_DEFAULT_CHANNEL_TIMESLICE + +#define NV2080_CTRL_FIFO_GET_INFO_USERD_OFFSET_SHIFT (12) + +/* + * NV2080_CTRL_CMD_FIFO_GET_INFO + * + * This command returns fifo engine information for the associated GPU. + * Requests to retrieve fifo information use an array of one or more + * NV2080_CTRL_FIFO_INFO structures. + * + * fifoInfoTblSize + * This field specifies the number of valid entries in the fifoInfoList + * array. This value cannot exceed NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES. + * fifoInfoTbl + * This parameter contains the client's fifo info table into + * which the fifo info values will be transferred by the RM. + * The fifo info table is an array of NV2080_CTRL_FIFO_INFO structures. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FIFO_GET_INFO (0x20801109) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum number of NV2080_CTRL_FIFO_INFO entries per request */ +#define NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES (256) + +#define NV2080_CTRL_FIFO_GET_INFO_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_FIFO_GET_INFO_PARAMS { + NvU32 fifoInfoTblSize; + /* + * C form: + * NV2080_CTRL_FIFO_INFO fifoInfoTbl[NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES]; + */ + NV2080_CTRL_FIFO_INFO fifoInfoTbl[NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES]; + NvU32 engineType; +} NV2080_CTRL_FIFO_GET_INFO_PARAMS; + + + +/* + * NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL + * + * This command removes the specified channel from the associated GPU's runlist + * and then initiates RC recovery. If the channel is active it will first be preempted. + * hChannel + * The handle to the channel to be preempted. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL + */ +#define NV2080_CTRL_CMD_FIFO_CHANNEL_PREEMPTIVE_REMOVAL (0x2080110a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL_PARAMS { + NvHandle hChannel; +} NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS + * + * This command will disable or enable scheduling of channels described in the + * list provided. Whether or not the channels are also preempted off the GPU + * can be controlled by bOnlyDisableScheduling. By default channels are preempted + * off the GPU. + * + * bDisable + * This value determines whether to disable or + * enable the set of channels. + * numChannels + * The number of channels to be stopped. + * bOnlyDisableScheduling + * When false and bDisable=NV_TRUE,the call will ensure none of the listed + * channels are running in hardware and will not run until a call with + * bDisable=NV_FALSE is made. When true and bDisable=NV_TRUE, the control + * call will ensure that none of the listed channels can be scheduled on the + * GPU until a call with bDisable=NV_FALSE is made, but will not remove any + * of the listed channels from hardware if they are currently running. When + * bDisable=NV_FALSE this field is ignored. + * bRewindGpPut + * If a channel is being disabled and bRewindGpPut=NV_TRUE, the channel's RAMFC + * will be updated so that GP_PUT is reset to the value of GP_GET. + * hClientList + * An array of NvU32 listing the client handles + * hChannelList + * An array of NvU32 listing the channel handles + * to be stopped. + * pRunlistPreemptEvent + * KEVENT handle for Async HW runlist preemption (unused on preMaxwell) + * When NULL, will revert to synchronous preemption with spinloop + * + * Possible status values returned are: + * NV_OK + * NVOS_INVALID_STATE + */ + +#define NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS (0x2080110b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES (64) + +#define NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS { + NvBool bDisable; + NvU32 numChannels; + NvBool bOnlyDisableScheduling; + NvBool bRewindGpPut; + NV_DECLARE_ALIGNED(NvP64 pRunlistPreemptEvent, 8); + // C form: NvHandle hClientList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES] + NvHandle hClientList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES]; + // C form: NvHandle hChannelList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES] + NvHandle hChannelList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES]; +} NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS; + +#define NV2080_CTRL_FIFO_DISABLE_CHANNEL_FALSE (0x00000000) +#define NV2080_CTRL_FIFO_DISABLE_CHANNEL_TRUE (0x00000001) +#define NV2080_CTRL_FIFO_ONLY_DISABLE_SCHEDULING_FALSE (0x00000000) +#define NV2080_CTRL_FIFO_ONLY_DISABLE_SCHEDULING_TRUE (0x00000001) + +/* + * NV2080_CTRL_FIFO_MEM_INFO + * + * This structure describes the details of a block of memory. It consists + * of the following fields + * + * aperture + * One of the NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_* values + * base + * Physical base address of the memory + * size + * Size in bytes of the memory +*/ +typedef struct NV2080_CTRL_FIFO_MEM_INFO { + NvU32 aperture; + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_FIFO_MEM_INFO; + +/* + * NV2080_CTRL_FIFO_CHANNEL_MEM_INFO + * + * This structure describes the details of the instance memory, ramfc + * and method buffers a channel. It consists of the following fields + * + * inst + * Structure describing the details of instance memory + * ramfc + * Structure describing the details of ramfc + * methodBuf + * Array of structures describing the details of method buffers + * methodBufCount + * Number of method buffers(one per runqueue) + */ + +// max runqueues +#define NV2080_CTRL_FIFO_GET_CHANNEL_MEM_INFO_MAX_COUNT 0x2 + +typedef struct NV2080_CTRL_FIFO_CHANNEL_MEM_INFO { + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_MEM_INFO inst, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_MEM_INFO ramfc, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_MEM_INFO methodBuf[NV2080_CTRL_FIFO_GET_CHANNEL_MEM_INFO_MAX_COUNT], 8); + NvU32 methodBufCount; +} NV2080_CTRL_FIFO_CHANNEL_MEM_INFO; + +/* + * NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM + * + * This command returns the memory aperture, physical base address and the + * size of each of the instance memory, cache1 and ramfc of a channel. + * + * hChannel + * The handle to the channel for which the memory information is desired. + * chMemInfo + * A NV2080_CTRL_FIFO_CHANNEL_MEM_INFO structure + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL +*/ + +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO (0x2080110c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_CHANNEL_MEM_INFO chMemInfo, 8); +} NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS; + +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_INVALID 0x00000000 +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_VIDMEM 0x00000001 +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_COH 0x00000002 +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_NCOH 0x00000003 + +/* + * NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION + * + * This command determines the location (vidmem/sysmem) + * and attribute (cached/uncached/write combined) of memory where USERD is located. + * + * aperture + * One of the NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_* values. + * + * attribute + * One of the NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_* values. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_POINTER +*/ + +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION (0x2080110d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS { + NvU32 aperture; + NvU32 attribute; +} NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS; + +// support for CPU coherent vidmem (VIDMEM_NVILINK_COH) is not yet available in RM + +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_VIDMEM 0x00000000 +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_SYSMEM 0x00000001 + +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_CACHED 0x00000000 +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_UNCACHED 0X00000001 +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_WRITECOMBINED 0X00000002 + + + +/* + * NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE + * + * This command retrieves entries from the SW encoded GPU device info table + * from Host RM. + * + * Parameters: + * + * baseIndex [in] + * The starting index to read from the devinfo table. Must be a multiple of + * MAX_ENTRIES. + * + * entries [out] + * A buffer to store up to MAX_ENTRIES entries of the devinfo table. + * + * numEntries [out] + * Number of populated entries in the provided buffer. + * + * bMore [out] + * A boolean flag indicating whether more valid entries are available to be + * read. A value of NV_TRUE indicates that a further call to this control + * with baseIndex incremented by MAX_ENTRIES will yield further valid data. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_DEVICES 256 +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32 +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16 +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2 +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16 + +/* + * NV2080_CTRL_FIFO_DEVICE_ENTRY + * + * This structure contains the engine, engine name and + * push buffers information of FIFO device entry. It consists of the following fields + * + * engineData + * Type of the engine + * pbdmaIds + * List of pbdma ids associated with engine + * pbdmaFaultIds + * List of pbdma fault ids associated with engine + * numPbdmas + * Number of pbdmas + * engineName + * Name of the engine + */ +typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY { + NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES]; + NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA]; + NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA]; + NvU32 numPbdmas; + char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN]; +} NV2080_CTRL_FIFO_DEVICE_ENTRY; + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS { + NvU32 baseIndex; + NvU32 numEntries; + NvBool bMore; + // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES]; + NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES]; +} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT + * + * This command clears the ENGINE or PBDMA FAULTED bit and reschedules the faulted channel + * by ringing channel's doorbell + * + * Parameters: + * + * engineType [in] + * The NV2080_ENGINE_TYPE of the engine to which the faulted + * channel is bound. This may be a logical id for guest RM in + * case of SMC. + * + * vChid [in] + * Virtual channel ID on which the fault occurred + * + * faultType [in] + * Whether fault was triggered by engine (_ENGINE_FAULTED) or PBDMA (_PBDMA_FAULTED) + * The value specified must be one of the NV2080_CTRL_FIFO_CLEAR_FAULTED_BIT_FAULT_TYPE_* values + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT (0x20801113) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_CLEAR_FAULTED_BIT_FAULT_TYPE_ENGINE 0x00000001 +#define NV2080_CTRL_FIFO_CLEAR_FAULTED_BIT_FAULT_TYPE_PBDMA 0x00000002 + +#define NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS { + NvU32 engineType; + NvU32 vChid; + NvU32 faultType; +} NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS; + + + +/* + * NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY + * + * Allows clients to set the global scheduling policy for all runlists + * associated to the given subdevice. + * + * Currently, this is only supported for HW runlists. + * + * Since this is a global setting, only privileged clients will be allowed to + * set it. Regular clients will get NV_ERR_INSUFFICIENT_PERMISSIONS error. + * + * Once a certain scheduling policy is set, that policy cannot be changed to a + * different one unless all clients which set it have either restored the policy + * (using the corresponding restore flag) or died. Clients trying to set a + * policy while a different one is locked by another client will get a + * NV_ERR_INVALID_STATE error. + * + * The same client can set a scheduling policy and later change to another one + * only when no other clients have set the same policy. Such sequence will be + * equivalent to restoring the policy in between. + * + * For instance, the following sequence: + * + * 1. Set policy A + * 2. Set policy B + * + * is equivalent to: + * + * 1. Set policy A + * 2. Restore policy + * 3. Set policy B + * + * Parameters: + * + * flags + * This field specifies the operational properties to be applied: + * + * - NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE_FALSE + * Try to set the provided 'schedPolicy' scheduling policy. If the + * operation succeeds, other clients will be prevented from setting a + * different scheduling policy until all clients using it have either + * restored it or died. + * + * - NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE_TRUE + * Let the scheduler know the client no longer requires the current + * scheduling policy. This may or may not actually change the + * scheduling policy, depending on how many other clients are also + * using the current policy. + * + * The 'schedPolicy' parameter is ignored when this flag is set. + * + * schedPolicy + * One of: + * + * - NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_DEFAULT + * Set the default scheduling policy and prevent other clients from + * changing it. + * + * - NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED + * This scheduling policy will make channels to be scheduled according + * to their interleave level. See NVA06C_CTRL_CMD_SET_INTERLEAVE_LEVEL + * description for more details. + * - NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED_WDDM + * This scheduling policy will make channels to be scheduled according + * to their interleave level per WDDM policy. + * See NVA06C_CTRL_CMD_SET_INTERLEAVE_LEVEL description for more details. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_STATE + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY (0x20801115) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS_MESSAGE_ID" */ + +/* schedPolicy values */ +#define NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_DEFAULT 0x0 +#define NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED 0x1 +#define NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED_WDDM 0x2 + +/* SET_SCHED_POLICY flags */ +#define NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE 0:0 +#define NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE_FALSE (0x00000000) +#define NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE_TRUE (0x00000001) + +#define NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS { + NvU32 flags; + NvU32 schedPolicy; +} NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_UPDATE_CHANNEL_INFO + * + * This command updates the channel info params for an existing channel + * + * Can be a deferred Api. The control call can be used for migrating a + * + * channel to a new userd and gpfifo + * + * Parameters: + * [in] hClient - Client handle + * [in] hChannel - Channel handle + * [in] hUserdMemory - UserD handle + * [in] gpFifoEntries - Number of Gpfifo Entries + * [in] gpFifoOffset - Gpfifo Virtual Offset + * [in] userdOffset - UserD offset + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FIFO_UPDATE_CHANNEL_INFO (0x20801116) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS { + NvHandle hClient; + NvHandle hChannel; + NvHandle hUserdMemory; + NvU32 gpFifoEntries; + NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); + NV_DECLARE_ALIGNED(NvU64 userdOffset, 8); +} NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_DISABLE_USERMODE_CHANNELS + * + * This command will disable or enable scheduling of all usermode channels. + * + * bDisable + * This value determines whether to disable or enable the usermode channels. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FIFO_DISABLE_USERMODE_CHANNELS (0x20801117) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS_MESSAGE_ID (0x17U) + +typedef struct NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS { + NvBool bDisable; +} NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB + * + * When a VF subcontext is marked as a zombie, host RM points its PDB to a dummy + * page allocated by guest RM in GPA space. This command provides the parameters + * of the guest RMs memory descriptor to be able to create a corresponding + * memory descriptor on the host RM. Host RM uses this to program the PDB of a + * zombie subcontext. + * + * Parameters: + * Input parameters to describe the memory descriptor + * [in] base + * [in] size + * [in] addressSpace + * [in] cacheAttrib + */ +#define NV2080_CTRL_CMD_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB (0x20801118) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 addressSpace; + NvU32 cacheAttrib; +} NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS; + +/* _ctrl2080fifo_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h new file mode 100644 index 0000000..7040d33 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h @@ -0,0 +1,210 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fla.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX FLA control commands and parameters */ + +#include "ctrl2080common.h" + +/* + * NV2080_CTRL_CMD_FLA_RANGE + * + * This command is used to initialize/destroy FLA VAS for a GPU.This is intended + * to be used by RM clients that manages the FLA VASpace range. The mode of the + * command is decided based on the parameter passed by the client. + * + * base + * This parameter specifies the base of the FLA VAS that needs to be allocated + * for this GPU + * + * size + * This parameter specifies the size of the FLA VAS that needs to be allocated + * for this GPU + * + * mode + * This parameter specifies the functionality of the command. + * MODE_INITIALIZE + * Setting this mode, will initialize the FLA VASpace for the gpu with + * base and size passed as arguments. FLA VASpace will be owned by RM. + * if the client calls the command more than once before destroying + * the FLA VAS, then this command will verify the range exported before and + * return success if it matches. If FLA is not supported for the platform, + * will return NV_ERR_NOT_SUPPORTED. + * MODE_DESTROY (deprecated) + * This command is NOP. + * MODE_HOST_MANAGED_VAS_INITIALIZE + * This mode will initialize the FLA VASpace for the gpu with hVASpace + * handle in addition to base and size arguments. FLA VASpace will be initiated + * and owned by guest RM. Used only in virtualization platforms by internal clients. + * MODE_HOST_MANAGED_VAS_DESTROY + * This mode will destroy the FLA VAS associated with the device. It will destruct + * only the resources associated with host RM side. Used only in virtualization platforms + * by internal clients. + * + * hVASpace + * This paramete specifies the FLA VAspace that needs to be associated with + * device. This parameter takes effect only for internal client in virtualization + * platforms. For any other platform and external clients, this parameter has no effect. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_IN_USE + * NV_ERR_INVALID_OWNER + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_FLA_RANGE (0x20803501) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID << 8) | NV2080_CTRL_FLA_RANGE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLA_RANGE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_FLA_RANGE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 mode; + NvHandle hVASpace; +} NV2080_CTRL_FLA_RANGE_PARAMS; + +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_NONE 0x00000000 +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_INITIALIZE NVBIT(0) +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_DESTROY NVBIT(1) +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_HOST_MANAGED_VAS_INITIALIZE NVBIT(2) +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_HOST_MANAGED_VAS_DESTROY NVBIT(3) + + +/* + * NV2080_CTRL_CMD_FLA_SETUP_INSTANCE_MEM_BLOCK + * + * This command is used to (un)bind FLA Instance Memory Block(IMB) with MMU. + * This control call is created for vGPU platform, when a FLA VAS is created/destroyed + * by Guest RM. Guest RM doesn't have privilege to (un)bind the IMB with MMU, hence + * need to be RPC-ed to Host RM to (un)bind + * The mode of the command is decided based on the actionParam passed by the client. + * + * imbPhysAddr + * This parameter specifies the FLA Instance Memory Block PA to be programmed + * to MMU. IMB address should be 4k aligned. This parameter is needed only + * for ACTION_BIND. + * + * addrSpace + * This parameter specifies the address space of FLA Instance Memory Block. This + * parmater is needed only for ACTION_BIND. + * Available options are: + * NV2080_CTRL_FLA_ADDRSPACE_SYSMEM + * Clients need to use this address space if the IMB is located in sysmem + * NV2080_CTRL_FLA_ADDRSPACE_FBMEM + * Clients need to use this address space if the IMB is located in FB + * + * actionParam + * This parameter specifies the functionality of the command. + * NV2080_CTRL_FLA_ACTION_BIND + * Setting this type, will call busBindFla helper HAL + * NV2080_CTRL_FLA_ACTION_UNBIND + * Setting this type, will call busUnbindFla helper HAL + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +typedef enum NV2080_CTRL_FLA_ADDRSPACE { + NV2080_CTRL_FLA_ADDRSPACE_SYSMEM = 0, + NV2080_CTRL_FLA_ADDRSPACE_FBMEM = 1, +} NV2080_CTRL_FLA_ADDRSPACE; + +typedef enum NV2080_CTRL_FLA_ACTION { + NV2080_CTRL_FLA_ACTION_BIND = 0, + NV2080_CTRL_FLA_ACTION_UNBIND = 1, +} NV2080_CTRL_FLA_ACTION; + +#define NV2080_CTRL_CMD_FLA_SETUP_INSTANCE_MEM_BLOCK (0x20803502) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID << 8) | NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS { + NV_DECLARE_ALIGNED(NvU64 imbPhysAddr, 8); + NV2080_CTRL_FLA_ADDRSPACE addrSpace; + NV2080_CTRL_FLA_ACTION flaAction; +} NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS; + + +/* + * NV2080_CTRL_CMD_FLA_GET_RANGE + * + * This command is used to query the FLA base and size from plugin to return as static info to Guest RM. + * + * base + * This parameter returns the base address of FLA range registered to the subdevice. + * size + * This parameter returns the size of FLA range registered to the subdevice. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FLA_GET_RANGE (0x20803503) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID << 8) | NV2080_CTRL_FLA_GET_RANGE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLA_GET_RANGE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_FLA_GET_RANGE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_FLA_GET_RANGE_PARAMS; + +/* + * NV2080_CTRL_CMD_FLA_GET_FABRIC_MEM_STATS + * + * This command returns the total size and the free size of the fabric vaspace. + * Note: This returns the information for the FABRIC_VASPACE_A class. + * + * totalSize[OUT] + * - Total fabric vaspace. + * + * freeSize [OUT] + * - Available fabric vaspace. + * + * Possible status values returned are: + * + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FLA_GET_FABRIC_MEM_STATS (0x20803504) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID << 8) | NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 totalSize, 8); + NV_DECLARE_ALIGNED(NvU64 freeSize, 8); +} NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS; + +// _ctrl2080fla_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h new file mode 100644 index 0000000..2b6ea26 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h @@ -0,0 +1,410 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080flcn.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + + +// +// XAPICHK/XAPI_TEST breaks on including "nvmisc.h". Workaround: don't include +// the header in that case and just redefine the macros we need. +// +#include "nvmisc.h" +/* + * Obsolete Falcon ID type. Use NV2080_ENGINE_TYPE_ instead + */ +#define FALCON_ID_PMU (NV2080_ENGINE_TYPE_PMU) +#define FALCON_ID_DPU (NV2080_ENGINE_TYPE_DPU) +#define FALCON_ID_SEC2 (NV2080_ENGINE_TYPE_SEC2) +#define FALCON_ID_FBFLCN (NV2080_ENGINE_TYPE_FBFLCN) + +/* + * NV2080_CTRL_CMD_FLCN_GET_DMEM_USAGE + * + * This command returns total heap size and free heap size of a falcon engine + * + * flcnID + * The falcon ID + * + * heapSize + * Total heap size in byte + * + * heapFree + * Total free heap size in byte + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT + */ +#define NV2080_CTRL_CMD_FLCN_GET_DMEM_USAGE (0x20803101) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS { + NvU32 flcnID; + NvU32 heapSize; + NvU32 heapFree; +} NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS; + +/*! + * @defgroup NVOS_INST_EVT Instrumentation event types. + * @{ + */ +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_RSVD_DO_NOT_USE 0x00U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_CTXSW_END 0x01U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_HW_IRQ_BEGIN 0x02U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_HW_IRQ_END 0x03U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TIMER_TICK 0x04U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_EVENT_BEGIN 0x05U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_EVENT_END 0x06U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_ODP_BEGIN 0x07U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_ODP_END 0x08U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_DMA_BEGIN 0x09U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_DMA_END 0x0AU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_GENERIC_BEGIN 0x0BU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_GENERIC_END 0x0CU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_EVENT_LATENCY 0x0DU +/*!@}*/ + +#define NV2080_CTRL_FLCN_NVOS_INST_INVALID_TASK_ID 0xFFU + +/* + * NV2080_CTRL_CMD_FLCN_GET_ENGINE_ARCH + * + * Get the egine arch i.e FALCON, RISCV etc given the NV2080_ENGINE_TYPE_*. + * + */ +#define NV2080_CTRL_CMD_FLCN_GET_ENGINE_ARCH (0x20803118) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS { + //! The engine type, from NV2080_ENGINE_TYPE_* + NvU32 engine; + + //! The engine architecture - FALCON or RISC-V + NvU32 engineArch; +} NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS; + +/*! + * @defgroup Engine Arch types + * @{ + */ +#define NV2080_CTRL_FLCN_GET_ENGINE_ARCH_DEFAULT 0x0 +#define NV2080_CTRL_FLCN_GET_ENGINE_ARCH_FALCON 0x1 +#define NV2080_CTRL_FLCN_GET_ENGINE_ARCH_RISCV 0x2 +/*!@}*/ + + +/* ----------------------- uStreamer (INST v2) ------------------------------ */ +/*! + * @defgroup NV2080_CTRL_FLCN_USTREAMER_EVENT uStreamer event fields. + * + * This is a binary-packed representation of uStreamer events. There are + * three main types of entry: Head, Payload, and Tail. COMM here is used + * when a field is shared among multiple event types. + * + * @{ + */ +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_COMM_FLAG 31:31 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_COMM_HEAD 30:30 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_VARIABLE 29:29 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EXTEND 28:28 + +/*! + * Below DRF's need constants assigned to start and end so they can be represented in FINN properly + * This is because FINN v1 will not have the ability to represent DRF's and bit fields yet. + */ +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_EXTENT (27) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_BASE (20) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID \ + (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_EXTENT) : \ + (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_BASE) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_EXTENT (28) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_BASE (24) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT \ + (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_EXTENT) : \ + (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_BASE) + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_LENGTH 19:8 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOAD 7:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT 23:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_HEAD_TIME 29:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_DATA_PAYLOAD 30:0 +/*!@}*/ + + +/*! + * @defgroup NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_BREAKDOWN + * + * These DRFs define breakdown of the compact payload for various event IDs. + * + * @{ + */ + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_TASK_ID 7:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_REASON 10:8 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_REASON_YIELD 0x0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_REASON_INT0 0x1 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_REASON_TIMER_TICK 0x2 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_REASON_QUEUE_BLOCK 0x3 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_REASON_DMA_SUSPENDED 0x4 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_CTXSW_END_ODP_MISS_COUNT 23:11 + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TIMER_TICK_TIME_SLIP 23:0 + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_BEGIN_TASK_ID 7:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_BEGIN_UNIT_ID 15:8 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_BEGIN_EVENT_TYPE 23:16 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_BEGIN_EVENT_TYPE_RPC_BOBJ_GRP_CMD 0xFF + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_END_TASK_ID 7:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_END_BOBJ_GRP_CLASS_ID 15:8 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_END_BOBJ_GRP_CMD_ID 23:16 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_END_RPC_FUNCTION 23:16 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_END_CALLBACK_DELAY 23:8 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_END_CALLBACK_DELAY_CANCELLED 0xFFFF + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT_TASK_EVENT_LATENCY_SHIFT 0x6 + +/*!@}*/ + + +/*! + * @defgroup NV2080_CTRL_FLCN_USTREAMER_FEATURE + * + * This defines all the features currently supported by uStreamer. For a new + * usecase of uStreamer, a feature should be defined here describing the usecase. + * This value should be unique for each queue. + * + * @{ + */ +#define NV2080_CTRL_FLCN_USTREAMER_FEATURE_DEFAULT 0U +#define NV2080_CTRL_FLCN_USTREAMER_FEATURE_PMUMON 1U +#define NV2080_CTRL_FLCN_USTREAMER_FEATURE__COUNT 2U +/*!@}*/ + +/*! + * @defgroup NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY + * + * This defines the DRF used for ustreamer queue policy + * + * @{ + */ + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IDLE_FLUSH 0:0 +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IDLE_FLUSH_DISABLED 0U +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IDLE_FLUSH_ENABLED 1U + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_FULL_FLUSH 1:1 +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_FULL_FLUSH_DISABLED 0U +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_FULL_FLUSH_ENABLED 1U + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IMMEDIATE_FLUSH 2:2 +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IMMEDIATE_FLUSH_DISABLED 0U +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IMMEDIATE_FLUSH_ENABLED 1U + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IDLE_THRESHOLD 31:8 + +/*!@}*/ + +/*! + * The maximum number of compact event types, calculated from the number of bits + * in the event structure. + */ +#define NV2080_CTRL_FLCN_USTREAMER_NUM_EVT_TYPES_COMPACT (0x20U) /* finn: Evaluated from "(1 << (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_EXTENT - NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_BASE + 1))" */ + +/*! + * The maximum number of event types, calculated from the number of bits in the + * event structure. + */ +#define NV2080_CTRL_FLCN_USTREAMER_NUM_EVT_TYPES (0x120U) /* finn: Evaluated from "((1 << (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_EXTENT - NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_BASE + 1)) + NV2080_CTRL_FLCN_USTREAMER_NUM_EVT_TYPES_COMPACT)" */ + +/*! + * The number of bytes required in the event mask to contain all event types. + */ +#define NV2080_CTRL_FLCN_USTREAMER_MASK_SIZE_BYTES (0x24U) /* finn: Evaluated from "((NV2080_CTRL_FLCN_USTREAMER_NUM_EVT_TYPES + 7) / 8)" */ + +/*! + * uStreamer Event Filter type, stored as a bitmask. + */ +typedef struct NV2080_CTRL_FLCN_USTREAMER_EVENT_FILTER { + NvU8 mask[NV2080_CTRL_FLCN_USTREAMER_MASK_SIZE_BYTES]; +} NV2080_CTRL_FLCN_USTREAMER_EVENT_FILTER; + +/*! + * NV2080_CTRL_CMD_FLCN_USTREAMER_QUEUE_INFO + * Get queue info for mapping / unmapping + */ +#define NV2080_CTRL_CMD_FLCN_USTREAMER_QUEUE_INFO (0x20803120) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS { + //! The engine type, from NV2080_ENGINE_TYPE_* + NvU32 engine; + + //! + // The page size of the requested queue in bytes. + // + NvU32 pageSize; + + //! Offset of the queue buffer in FB. + NV_DECLARE_ALIGNED(NvUPtr offset, 8); + + //! + // The size of the user-mapped instrumentation buffer. Measured in bytes. + // + NvU32 size; + + //! + // The feature ID of the queue. + // + NvU8 queueFeatureId; +} NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FLCN_USTREAMER_CONTROL_GET/SET + * + * Get/set the event bitmask for the default queue. + */ +#define NV2080_CTRL_CMD_FLCN_USTREAMER_CONTROL_GET (0x20803122) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | 0x22" */ + +#define NV2080_CTRL_CMD_FLCN_USTREAMER_CONTROL_SET (0x20803123) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | 0x23" */ + +typedef struct NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS { + //! The engine type, from NV2080_ENGINE_TYPE_* + NvU32 engine; + + /*! + * The bitmask of which event types to log. An event type corresponding to + * a bit with a zero will be ignored at the log site, which prevents it + * from filling up the resident buffer in the PMU. In general, set this to + * only log the event types you actually want to use. + * Refer to NVOS_BM_* in nvos_utility.h for usage. + */ + NV2080_CTRL_FLCN_USTREAMER_EVENT_FILTER eventFilter; + + //! The queueId of the queue whose eventFilter we want to interact with + NvU8 queueId; +} NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS; + +/* + * NV2080_CTRL_CMD_FLCN_GET_CTX_BUFFER_INFO + * + * This command provides the attributes of the falcon engine context buffer + * + * hUserClient [IN] + * This parameter specifies the client handle that owns this channel. + * hChannel [IN] + * This parameter specifies the channel or channel group (TSG) handle + * alignment + * Specifies the alignment requirement for each context buffer + * size + * Aligned size of context buffer + * bufferHandle + * Opaque pointer to memdesc. Used by kernel clients for tracking purpose only. + * pageCount + * allocation size in the form of pageCount + * physAddr + * Physical address of the buffer first page + * aperture + * allocation aperture. Could be SYSMEM, VIDMEM, UNKNOWN + * kind + * PTE kind of this allocation. + * pageSize + * Page size of the buffer. + * bIsContigous + * States if physical allocation for this buffer is contiguous. PageSize will + * have no meaning if this flag is set. + * bDeviceDescendant + * TRUE if the allocation is a constructed under a Device or Subdevice. + * uuid + * SHA1 UUID of the Device or Subdevice. Valid when bDeviceDescendant is TRUE. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FLCN_GET_CTX_BUFFER_INFO (0x20803124) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS { + NvHandle hUserClient; + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 alignment, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NV_DECLARE_ALIGNED(NvP64 bufferHandle, 8); + NV_DECLARE_ALIGNED(NvU64 pageCount, 8); + NV_DECLARE_ALIGNED(NvU64 physAddr, 8); + NvU32 aperture; + NvU32 kind; + NvU32 pageSize; + NvBool bIsContigous; + NvBool bDeviceDescendant; + NvU8 uuid[16]; +} NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS; + +// Aperture flags +#define NV2080_CTRL_FLCN_CTX_BUFFER_INFO_APERTURE_UNKNWON ADDR_UNKNOWN +#define NV2080_CTRL_FLCN_CTX_BUFFER_INFO_APERTURE_SYSMEM ADDR_SYSMEM +#define NV2080_CTRL_FLCN_CTX_BUFFER_INFO_APERTURE_FBMEM ADDR_FBMEM + +/* + * NV2080_CTRL_CMD_FLCN_GET_CTX_BUFFER_SIZE + * + * This command provides the size of the falcon engine context buffer + * + * hChannel [IN] + * This parameter specifies the channel or channel group (TSG) handle + * totalBufferSize [OUT] + * This parameter returns the total context buffers size. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FLCN_GET_CTX_BUFFER_SIZE (0x20803125) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 totalBufferSize, 8); +} NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS; + + + +/* _ctrl2080flcn_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h new file mode 100644 index 0000000..2af63e7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fuse.finn +// + + + +/* _ctrl2080fuse_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h new file mode 100644 index 0000000..41f9767 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gpio.finn +// + + + +/* _ctrl2080gpio_h_ */ + +#include "ctrl/ctrl2080/ctrl2080base.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h new file mode 100644 index 0000000..84177f0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h @@ -0,0 +1,3781 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gpu.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080gr.h" +#include "ctrl/ctrl0000/ctrl0000system.h" + + + +/* NV20_SUBDEVICE_XX gpu control commands and parameters */ + +/* Valid feature values */ +#define NV2080_CTRL_GPU_GET_FEATURES_CLK_ARCH_DOMAINS 0:0 +#define NV2080_CTRL_GPU_GET_FEATURES_CLK_ARCH_DOMAINS_FALSE (0x00000000U) +#define NV2080_CTRL_GPU_GET_FEATURES_CLK_ARCH_DOMAINS_TRUE (0x00000001U) + + + +typedef struct NV2080_CTRL_GPU_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_GPU_INFO; + +/* valid gpu info index values */ + + + +#define NV2080_CTRL_GPU_INFO_INDEX_MINOR_REVISION_EXT (0x00000004U) + + +#define NV2080_CTRL_GPU_INFO_INDEX_NETLIST_REV0 (0x00000012U) +#define NV2080_CTRL_GPU_INFO_INDEX_NETLIST_REV1 (0x00000013U) + + +#define NV2080_CTRL_GPU_INFO_INDEX_SYSMEM_ACCESS (0x0000001fU) + + +#define NV2080_CTRL_GPU_INFO_INDEX_GEMINI_BOARD (0x00000022U) + + +#define NV2080_CTRL_GPU_INFO_INDEX_SURPRISE_REMOVAL_POSSIBLE (0x00000025U) +#define NV2080_CTRL_GPU_INFO_INDEX_IBMNPU_RELAXED_ORDERING (0x00000026U) +#define NV2080_CTRL_GPU_INFO_INDEX_GLOBAL_POISON_FUSE_ENABLED (0x00000027U) +#define NV2080_CTRL_GPU_INFO_INDEX_NVSWITCH_PROXY_DETECTED (0x00000028U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SR_SUPPORT (0x00000029U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SMC_MODE (0x0000002aU) +#define NV2080_CTRL_GPU_INFO_INDEX_SPLIT_VAS_MGMT_SERVER_CLIENT_RM (0x0000002bU) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SM_VERSION (0x0000002cU) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_FLA_CAPABILITY (0x0000002dU) + + +#define NV2080_CTRL_GPU_INFO_INDEX_PER_RUNLIST_CHANNEL_RAM (0x0000002fU) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_ATS_CAPABILITY (0x00000030U) +#define NV2080_CTRL_GPU_INFO_INDEX_NVENC_STATS_REPORTING_STATE (0x00000031U) + + +#define NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED (0x00000033U) +#define NV2080_CTRL_GPU_INFO_INDEX_DISPLAY_ENABLED (0x00000034U) +#define NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED (0x00000035U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_PROFILING_CAPABILITY (0x00000036U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_DEBUGGING_CAPABILITY (0x00000037U) + + +#define NV2080_CTRL_GPU_INFO_INDEX_CMP_SKU (0x0000003cU) +#define NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY (0x0000003dU) +#define NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE (0x0000003eU) + +/* valid minor revision extended values */ +#define NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_NONE (0x00000000U) +#define NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_P (0x00000001U) +#define NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_V (0x00000002U) +#define NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_PV (0x00000003U) + + + +/* valid system memory access capability values */ +#define NV2080_CTRL_GPU_INFO_SYSMEM_ACCESS_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_SYSMEM_ACCESS_YES (0x00000001U) + + + +/* valid gemini board values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GEMINI_BOARD_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GEMINI_BOARD_YES (0x00000001U) + +/* valid surprise removal values */ +#define NV2080_CTRL_GPU_INFO_INDEX_SURPRISE_REMOVAL_POSSIBLE_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_SURPRISE_REMOVAL_POSSIBLE_YES (0x00000001U) + +/* valid relaxed ordering values */ +#define NV2080_CTRL_GPU_INFO_IBMNPU_RELAXED_ORDERING_DISABLED (0x00000000U) +#define NV2080_CTRL_GPU_INFO_IBMNPU_RELAXED_ORDERING_ENABLED (0x00000001U) +#define NV2080_CTRL_GPU_INFO_IBMNPU_RELAXED_ORDERING_UNSUPPORTED (0xFFFFFFFFU) + +/* valid poison fuse capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GLOBAL_POISON_FUSE_ENABLED_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GLOBAL_POISON_FUSE_ENABLED_YES (0x00000001U) + +/* valid nvswitch proxy detected values */ +#define NV2080_CTRL_GPU_INFO_NVSWITCH_PROXY_DETECTED_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_NVSWITCH_PROXY_DETECTED_YES (0x00000001U) + +/* valid NVSR GPU support info values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SR_SUPPORT_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SR_SUPPORT_YES (0x00000001U) + +/* valid SMC mode values */ +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_UNSUPPORTED (0x00000000U) +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_ENABLED (0x00000001U) +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_DISABLED (0x00000002U) +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_ENABLE_PENDING (0x00000003U) +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_DISABLE_PENDING (0x00000004U) + +/* valid split VAS mode values */ +#define NV2080_CTRL_GPU_INFO_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_YES (0x00000001U) + +/* valid grid capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_FLA_CAPABILITY_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_FLA_CAPABILITY_YES (0x00000001U) + +/* valid per runlist channel ram capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_PER_RUNLIST_CHANNEL_RAM_DISABLED (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_PER_RUNLIST_CHANNEL_RAM_ENABLED (0x00000001U) + +/* valid ATS capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_ATS_CAPABILITY_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_ATS_CAPABILITY_YES (0x00000001U) + +/* valid Nvenc Session Stats reporting state values */ +#define NV2080_CTRL_GPU_INFO_NVENC_STATS_REPORTING_STATE_DISABLED (0x00000000U) +#define NV2080_CTRL_GPU_INFO_NVENC_STATS_REPORTING_STATE_ENABLED (0x00000001U) +#define NV2080_CTRL_GPU_INFO_NVENC_STATS_REPORTING_STATE_NOT_SUPPORTED (0x00000002U) + +/* valid 4K PAGE isolation requirement values */ +#define NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED_YES (0x00000001U) + +/* valid display enabled values */ +#define NV2080_CTRL_GPU_INFO_DISPLAY_ENABLED_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_DISPLAY_ENABLED_YES (0x00000001U) + +/* valid mobile config enabled values */ +#define NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED_YES (0x00000001U) + + +/* valid profiling capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_PROFILING_CAPABILITY_DISABLED (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_PROFILING_CAPABILITY_ENABLED (0x00000001U) + +/* valid debugging capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_DEBUGGING_CAPABILITY_DISABLED (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_DEBUGGING_CAPABILITY_ENABLED (0x00000001U) + + + +/* valid CMP (Crypto Mining Processor) SKU values */ +#define NV2080_CTRL_GPU_INFO_INDEX_CMP_SKU_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_CMP_SKU_YES (0x00000001U) + + +/* valid dma-buf suport values */ +#define NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY_NO (0x00000000U) +#define NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY_YES (0x00000001U) + +/* + * NV2080_CTRL_CMD_GPU_GET_INFO + * + * This command returns gpu information for the associated GPU. Requests + * to retrieve gpu information use a list of one or more NV2080_CTRL_GPU_INFO + * structures. + * + * gpuInfoListSize + * This field specifies the number of entries on the caller's + * gpuInfoList. + * gpuInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the gpu information is to be returned. + * This buffer must be at least as big as gpuInfoListSize multiplied + * by the size of the NV2080_CTRL_GPU_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_GPU_GET_INFO (0x20800101U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_GPU_GET_INFO_PARAMS { + NvU32 gpuInfoListSize; + NV_DECLARE_ALIGNED(NvP64 gpuInfoList, 8); +} NV2080_CTRL_GPU_GET_INFO_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_INFO_V2 (0x20800102U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x2" */ + +typedef struct NV2080_CTRL_GPU_GET_INFO_V2_PARAMS { + NvU32 gpuInfoListSize; + NV2080_CTRL_GPU_INFO gpuInfoList[NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_INFO_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_NAME_STRING + * + * This command returns the name of the GPU in string form in either ASCII + * or UNICODE format. + * + * gpuNameStringFlags + * This field specifies flags to use while creating the GPU name string. + * Valid flags values: + * NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII + * The returned name string should be in standard ASCII format. + * NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_UNICODE + * The returned name string should be in unicode format. + * gpuNameString + * This field contains the buffer into which the name string should be + * returned. The length of the returned string will be no more than + * NV2080_CTRL_GPU_MAX_NAME_STRING_LENGTH bytes in size. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_GPU_GET_NAME_STRING (0x20800110U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS_MESSAGE_ID" */ + +#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U) + +// This field is deprecated - 'gpuNameStringFlags' is now a simple scalar. +// Field maintained (and extended from 0:0) for compile-time compatibility. +#define NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE 31:0 + +/* valid gpu name string flags */ +#define NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII (0x00000000U) +#define NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_UNICODE (0x00000001U) + +#define NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS { + NvU32 gpuNameStringFlags; + union { + NvU8 ascii[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU16 unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + } gpuNameString; +} NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_SHORT_NAME_STRING + * + * This command returns the short name of the GPU in ASCII string form. + * + * gpuShortNameString + * This field contains the buffer into which the short name string should + * be returned. The length of the returned string will be no more than + * NV2080_MAX_NAME_STRING_LENGTH bytes in size. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_GPU_GET_SHORT_NAME_STRING (0x20800111U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS { + NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; +} NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_POWER + * + * This command sets the power state for the GPU as a whole, various engines, + * or clocks. + * + * target + * One of NV2080_CTRL_GPU_SET_POWER_TARGET_* + * + * newLevel + * One of NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_* + * NV2080_CTRL_GPU_SET_POWER_STATE_ENGINE_LEVEL_* + * NV2080_CTRL_GPU_SET_POWER_STATE_CLOCK_LEVEL_* + * depending on the target above. + * + * oldLevel + * Previous level as appropriate. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GPU_SET_POWER (0x20800112U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_POWER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_SET_POWER_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV2080_CTRL_GPU_SET_POWER_PARAMS { + NvU32 target; + NvU32 newLevel; + NvU32 oldLevel; +} NV2080_CTRL_GPU_SET_POWER_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_SDM + * + * This command returns the subdevice mask value for the associated subdevice. + * The subdevice mask value can be used with the SET_SUBDEVICE_MASK instruction + * provided by the NV36_CHANNEL_DMA and newer channel dma classes. + * + * subdeviceMask [out] + * This field return the subdevice mask value. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_SDM (0x20800118U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_SDM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_SDM_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_GPU_GET_SDM_PARAMS { + NvU32 subdeviceMask; +} NV2080_CTRL_GPU_GET_SDM_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_SDM + * + * This command sets the subdevice instance and mask value for the associated subdevice. + * The subdevice mask value can be used with the SET_SUBDEVICE_MASK instruction + * provided by the NV36_CHANNEL_DMA and newer channel dma classes. + * It must be called before the GPU HW is initialized otherwise + * NV_ERR_INVALID_STATE is being returned. + * + * subdeviceMask [in] + * This field configures the subdevice mask value for the GPU/Subdevice + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_DATA + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_SET_SDM (0x20800120U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_SDM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_SET_SDM_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_GPU_SET_SDM_PARAMS { + NvU32 subdeviceMask; +} NV2080_CTRL_GPU_SET_SDM_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO + * + * This command returns the associated subdevices' simulation information. + * + * type + * This field returns the simulation type. + * One of NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_* + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO (0x20800119U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS { + NvU32 type; +} NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS; + +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE (0x00000000U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_MODS_AMODEL (0x00000001U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_LIVE_AMODEL (0x00000002U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_FMODEL (0x00000003U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_RTL (0x00000004U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_EMU (0x00000005U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_EMU_LOW_POWER (0x00000006U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_DFPGA (0x00000007U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_DFPGA_RTL (0x00000008U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_DFPGA_FMODEL (0x00000009U) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_UNKNOWN (0xFFFFFFFFU) + +/* + * NV2080_CTRL_GPU_REG_OP + * + * This structure describes register operation information for use with + * the NV2080_CTRL_CMD_GPU_EXEC_REG_OPS command. The structure describes + * a single register operation. The operation can be a read or write and + * can involve either 32bits or 64bits of data. + * + * For 32bit read operations, the operation takes the following form: + * + * regValueLo = read(bar0 + regOffset) + * regValueHi = 0 + * + * For 64bit read operations, the operation takes the following form: + * + * regValueLo = read(bar0 + regOffset) + * regValueHi = read(bar0 + regOffset + 4) + * + * For 32bit write operations, the operation takes the following form: + * + * new = ((read(bar0 + regOffset) & ~regAndNMaskLo) | regValueLo) + * write(bar0 + regOffset, new) + * + * For 64bit write operations, the operation takes the following form: + * + * new_lo = ((read(bar0 + regOffset) & ~regAndNMaskLo) | regValueLo) + * new_hi = ((read(bar0 + regOffset + 4) & ~regAndNMaskHi) | regValueHi) + * write(bar0 + regOffset, new_lo) + * write(bar0 + regOffset + 4, new_hi) + * + * Details on the parameters follow: + * + * regOp + * This field specifies the operation to be applied to the register + * specified by the regOffset parameter. Valid values for this + * parameter are: + * NV2080_CTRL_GPU_REG_OP_READ_08 + * The register operation should be a 8bit global privileged register read. + * NV2080_CTRL_GPU_REG_OP_WRITE_08 + * The register operation should be a 8bit global privileged register write. + * NV2080_CTRL_GPU_REG_OP_READ_32 + * The register operation should be a 32bit register read. + * NV2080_CTRL_GPU_REG_OP_WRITE_32 + * The register operation should be a 32bit register write. + * NV2080_CTRL_GPU_REG_OP_READ_64 + * The register operation should be a 64bit register read. + * NV2080_CTRL_GPU_REG_OP_WRITE_64 + * The register operation should be a 64bit register write. + * regType + * This field specifies the type of the register specified by the + * regOffset parameter. Valid values for this parameter are: + * NV2080_CTRL_GPU_REG_OP_TYPE_GLOBAL + * The register is a global privileged register. Read operations + * return the current value from the associated global register. + * Write operations for registers of this type take effect immediately. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX + * The register is a graphics context register. Read operations + * return the current value from the associated global register. + * Write operations are applied to all existing graphics engine + * contexts. Any newly created graphics engine contexts will also + * be modified. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_TPC + * This is a graphics context TPC register group. Write operations are + * applied to TPC group(s) specified by regGroupMask. + * This field is ignored for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_SM + * This is a graphics context SM register group that is inside TPC + * group. Write operations are applied to SM group(s) specified by + * regGroupMask (TPC) and regSubGroupMask (SM). This field is ignored + * for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_CROP + * This is a graphics context CROP register group. Write operations + * are applied to registers specified by regGroupMask. This field is + * ignored for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_ZROP + * This is a graphics context ZROP register group. Write operations + * are applied to registers specified by regGroupMask. This field is + * ignored for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_FB + * This is a fb register group. Write operations are applied to + * registers specified by regGroupMask. This field is + * ignored for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_QUAD + * This is a graphics context QUAD register group. Operations + * are applied to registers specified by regQuad value. + * regQuad + * This field specifies the quad to be accessed for register regOffsetwhen + * the regType specified is NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_QUAD. + * regGroupMask + * This field specifies which registers inside an array should be updated. + * This field is used when regType is one of below: + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_TPC + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_SM + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_CROP + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_ZROP + * NV2080_CTRL_GPU_REG_OP_TYPE_FB + * When regGroupMask is used, the regOffset MUST be the first register in + * an array. + * regSubGroupMask + * This field specifies which registers inside a group should be updated. + * This field is used for updating SM registers when regType is: + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_TPC + * When regSubGroupMask is used, regOffset MUST be the first register in an + * array AND also the first one in sub array. regGroupMask specifies + * TPC(X) and regSubGroupMask specifies SM_CTX_N(Y) + * regStatus + * This field returns the completion status for the associated register + * operation in the form of a bitmask. Possible status values for this + * field are: + * NV2080_CTRL_GPU_REG_OP_STATUS_SUCCESS + * This value indicates the operation completed successfully. + * NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_OP + * This bit value indicates that the regOp value is not valid. + * NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_TYPE + * This bit value indicates that the regType value is not valid. + * NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_OFFSET + * This bit value indicates that the regOffset value is invalid. + * The regOffset value must be within the legal BAR0 range for the + * associated GPU and must target a supported register with a + * supported operation. + * NV2080_CTRL_GPU_REG_OP_STATUS_UNSUPPORTED_OFFSET + * This bit value indicates that the operation to the register + * specified by the regOffset value is not supported for the + * associated GPU. + * NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_MASK + * This bit value indicates that the regTpcMask value is invalid. + * The regTpcMask must be a subset of TPCs that are enabled on the + * associated GPU. + * NV2080_CTRL_GPU_REG_OP_STATUS_NOACCESS + * The caller does not have access to the register at given offset + * regOffset + * This field specifies the register offset to access. The specified + * offset must be a valid BAR0 offset for the associated GPU. + * regValueLo + * This field contains the low 32bits of the register value. + * For read operations, this value returns the current value of the + * register specified by regOffset. For write operations, this field + * specifies the logical OR value applied to the current value + * contained in the register specified by regOffset. + * regValueHi + * This field contains the high 32bits of the register value. + * For read operations, this value returns the current value of the + * register specified by regOffset + 4. For write operations, this field + * specifies the logical OR value applied to the current value + * contained in the register specified by regOffset + 4. + * regAndNMaskLo + * This field contains the mask used to clear a desired field from + * the current value contained in the register specified by regOffsetLo. + * This field is negated and ANDed to this current register value. + * This field is only used for write operations. This field is ignored + * for read operations. + * regAndNMaskHi + * This field contains the mask used to clear a desired field from + * the current value contained in the register specified by regOffsetHi. + * This field is negated and ANDed to this current register value. + * This field is only used for write operations. This field is ignored + * for read operations. + */ +typedef struct NV2080_CTRL_GPU_REG_OP { + NvU8 regOp; + NvU8 regType; + NvU8 regStatus; + NvU8 regQuad; + NvU32 regGroupMask; + NvU32 regSubGroupMask; + NvU32 regOffset; + NvU32 regValueHi; + NvU32 regValueLo; + NvU32 regAndNMaskHi; + NvU32 regAndNMaskLo; +} NV2080_CTRL_GPU_REG_OP; + +/* valid regOp values */ +#define NV2080_CTRL_GPU_REG_OP_READ_32 (0x00000000U) +#define NV2080_CTRL_GPU_REG_OP_WRITE_32 (0x00000001U) +#define NV2080_CTRL_GPU_REG_OP_READ_64 (0x00000002U) +#define NV2080_CTRL_GPU_REG_OP_WRITE_64 (0x00000003U) +#define NV2080_CTRL_GPU_REG_OP_READ_08 (0x00000004U) +#define NV2080_CTRL_GPU_REG_OP_WRITE_08 (0x00000005U) + +/* valid regType values */ +#define NV2080_CTRL_GPU_REG_OP_TYPE_GLOBAL (0x00000000U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX (0x00000001U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_TPC (0x00000002U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_SM (0x00000004U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_CROP (0x00000008U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_ZROP (0x00000010U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_FB (0x00000020U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_QUAD (0x00000040U) +#define NV2080_CTRL_GPU_REG_OP_TYPE_DEVICE (0x00000080U) + +/* valid regStatus values (note: NvU8 ie, 1 byte) */ +#define NV2080_CTRL_GPU_REG_OP_STATUS_SUCCESS (0x00U) +#define NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_OP (0x01U) +#define NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_TYPE (0x02U) +#define NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_OFFSET (0x04U) +#define NV2080_CTRL_GPU_REG_OP_STATUS_UNSUPPORTED_OP (0x08U) +#define NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_MASK (0x10U) +#define NV2080_CTRL_GPU_REG_OP_STATUS_NOACCESS (0x20U) + +/* + * NV2080_CTRL_CMD_GPU_EXEC_REG_OPS + * + * This command is used to submit a buffer containing one or more + * NV2080_CTRL_GPU_REG_OP structures for processing. Each entry in the + * buffer specifies a single read or write operation. Each entry is checked + * for validity in an initial pass over the buffer with the results for + * each operation stored in the corresponding regStatus field. Unless + * bNonTransactional flag is set to true, if any invalid entries are found + * during this initial pass then none of the operations are executed. Entries + * are processed in order within each regType with NV2080_CTRL_GPU_REG_OP_TYPE_GLOBAL + * entries processed first followed by NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX entries. + * + * hClientTarget + * This parameter specifies the handle of the client that owns the channel + * specified by hChannelTarget. If this parameter is set to 0 then the set + * of channel-specific register operations are applied to all current and + * future channels. + * hChannelTarget + * This parameter specifies the handle of the target channel (or channel + * group) object instance to which channel-specific register operations are + * to be directed. If hClientTarget is set to 0 then this parameter must + * also be set to 0. + * bNonTransactional + * This field specifies if command is non-transactional i.e. if set to + * true, all the valid operations will be executed. + * reserved00 + * This parameter is reserved for future use. It should be initialized to + * zero for correct operation. + * regOpCount + * This field specifies the number of entries on the caller's regOps + * list. + * regOps + * This field specifies a pointer in the caller's address space + * to the buffer from which the desired register information is to be + * retrieved. This buffer must be at least as big as regInfoCount + * multiplied by the size of the NV2080_CTRL_GPU_REG_OP structure. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. When SMC is enabled, this + * is a mandatory parameter. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_EXEC_REG_OPS (0x20800122U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x22" */ + +typedef struct NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS { + NvHandle hClientTarget; + NvHandle hChannelTarget; + NvU32 bNonTransactional; + NvU32 reserved00[2]; + NvU32 regOpCount; + NV_DECLARE_ALIGNED(NvP64 regOps, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINES + * + * Returns a list of supported engine types along with the number of instances + * of each type. Querying with engineList NULL returns engineCount. + * + * engineCount + * This field specifies the number of entries on the caller's engineList + * field. + * engineList + * This field is a pointer to a buffer of NvU32 values representing the + * set of engines supported by the associated subdevice. Refer to cl2080.h + * for the complete set of supported engine types. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINES (0x20800123U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ENGINES_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINES_PARAMS { + NvU32 engineCount; + NV_DECLARE_ALIGNED(NvP64 engineList, 8); +} NV2080_CTRL_GPU_GET_ENGINES_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_ENGINES_V2 (0x20800170U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS_MESSAGE_ID" */ + +/* Must match NV2080_ENGINE_TYPE_LAST from cl2080.h */ +#define NV2080_GPU_MAX_ENGINES_LIST_SIZE 0x34U + +#define NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS_MESSAGE_ID (0x70U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS { + NvU32 engineCount; + NvU32 engineList[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST + * + * Returns a list of classes supported by a given engine type. + * + * engineType + * This field specifies the engine type being queried. + * NV2080_CTRL_ENGINE_TYPE_ALLENGINES will return classes + * supported by all engines. + * + * numClasses + * This field specifies the number of classes supported by + * engineType. + * + * classList + * This field is an array containing the list of supported + * classes. Is of type (NvU32*) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST (0x20800124U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS { + NvU32 engineType; + NvU32 numClasses; + NV_DECLARE_ALIGNED(NvP64 classList, 8); +} NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS; + + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_FAULT_INFO + * + * This command returns the fault properties of the specified engine type. + * + * engineType + * Input parameter. + * This field specifies the engine type being queried. + * Engine type is specified using the NV2080_ENGINE_TYPE_* defines in cl2080.h. + * The list of engines supported by a chip can be got using the + * NV2080_CTRL_CMD_GPU_GET_ENGINES ctrl call. + * + * mmuFaultId + * Output parameter. + * This field returns the MMU fault ID for the specified engine. + * If the engine supports subcontext, this field provides the base fault id. + * + * bSubcontextSupported + * Output parameter. + * Returns TRUE if subcontext faulting is supported by the engine. + * Engine that support subcontext use fault IDs in the range [mmuFaultId, mmuFaultId + maxSubCtx). + * "maxSubctx" can be found using the NV2080_CTRL_FIFO_INFO ctrl call with + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP as the index. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_FAULT_INFO (0x20800125U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS { + NvU32 engineType; + NvU32 mmuFaultId; + NvBool bSubcontextSupported; +} NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_QUERY_MODE + * + * This command is used to detect the mode of the GPU associated with the + * subdevice. + * + * mode + * This parameter returns the current mode of GPU. Legal values for + * this parameter include: + * NV2080_CTRL_GPU_QUERY_MODE_GRAPHICS_MODE + * The GPU is currently operating in graphics mode. + * NV2080_CTRL_GPU_QUERY_MODE_COMPUTE_MODE + * The GPU is currently operating in compute mode. + * NV2080_CTRL_GPU_QUERY_MODE_UNKNOWN_MODE + * The current mode of the GPU could not be determined. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_GPU_QUERY_MODE (0x20800128U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_MODE_PARAMS_MESSAGE_ID" */ + +/* valid mode parameter values */ +#define NV2080_CTRL_GPU_QUERY_MODE_UNKNOWN_MODE (0x00000000U) +#define NV2080_CTRL_GPU_QUERY_MODE_GRAPHICS_MODE (0x00000001U) +#define NV2080_CTRL_GPU_QUERY_MODE_COMPUTE_MODE (0x00000002U) + +#define NV2080_CTRL_GPU_QUERY_MODE_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_GPU_QUERY_MODE_PARAMS { + NvU32 mode; +} NV2080_CTRL_GPU_QUERY_MODE_PARAMS; + + + +/*! + * NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY + * Data block describing a virtual context buffer to be promoted + * + * gpuPhysAddr [IN] + * GPU Physical Address for the buffer + * gpuVirtAddr [IN] + * GPU Virtual Address for the buffer + * size[IN] + * Size of this virtual context buffer + * physAttr [IN] + * Physical memory attributes (aperture, cacheable) + * bufferId [IN] + * Virtual context buffer type, data type NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_* + * bInitialize [IN] + * Flag indicating that this virtual context buffer should be initialized prior to promotion. + * The client must clear (memset) the buffer to 0x0 prior to initialization. + * Following buffers need initialization: + * 1. NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN + * 2. NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH + * 3. NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP + * 4. NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP + * bNonmapped [IN] + * Flag indicating that the virtual address is not to be promoted with this + * call. It is illegal to set this flag and not set bInitialize. + */ +typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY { + NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8); + NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 physAttr; + NvU16 bufferId; + NvU8 bInitialize; + NvU8 bNonmapped; +} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY; + +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U + +#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U + +/* + * NV2080_CTRL_CMD_GPU_PROMOTE_CTX + * + * This command is used to promote a Virtual Context + * + * engineType + * Engine Virtual Context is for + * hClient + * Client Handle for hVirtMemory + * ChID + * Hw Channel -- Actually hw index for channel (deprecated) + * hChanClient + * The client handle for hObject + * hObject + * Passed in object handle for either a single channel or a channel group + * hVirtMemory + * Virtual Address handle to map Virtual Context to + * virtAddress + * Virtual Address to map Virtual Context to + * size + * size of the Virtual Context + * entryCount + * Number of valid entries in the promotion entry list + * promoteEntry + * List of context buffer entries to issue promotions for. + * + * When not using promoteEntry, only hVirtMemory or (virtAddress, size) should be + * specified, the code cases based on hVirtMemory(NULL vs non-NULL) so + * if both are specified, hVirtMemory has precedence. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED - The Class does not support version info retrieval + * NV_ERR_INVALID_DEVICE - The Class/Device is not yet ready to provide this info. + * NV_ERR_INVALID_ARGUMENT - Bad/Unknown Class ID specified. + */ +#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID (0x2BU) + +typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS { + NvU32 engineType; + NvHandle hClient; + NvU32 ChID; + NvHandle hChanClient; + NvHandle hObject; + NvHandle hVirtMemory; + NV_DECLARE_ALIGNED(NvU64 virtAddress, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 entryCount; + // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8); +} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS; +typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *PNV2080_CTRL_GPU_PROMOTE_CTX_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_EVICT_CTX + * + * This command is used to evict a Virtual Context + * + * engineType + * Engine Virtual Context is for + * hClient + * Client Handle + * ChID + * Hw Channel -- Actually hw index for channel (deprecated) + * hChanClient + * Client handle for hObject + * hObject + * Passed in object handle for either a single channel or a channel group + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED - The Class does not support version info retrieval + * NV_ERR_INVALID_DEVICE - The Class/Device is not yet ready to provide this info. + * NV_ERR_INVALID_ARGUMENT - Bad/Unknown Class ID specified. + */ +#define NV2080_CTRL_CMD_GPU_EVICT_CTX (0x2080012cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_EVICT_CTX_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_EVICT_CTX_PARAMS_MESSAGE_ID (0x2CU) + +typedef struct NV2080_CTRL_GPU_EVICT_CTX_PARAMS { + NvU32 engineType; + NvHandle hClient; + NvU32 ChID; + NvHandle hChanClient; + NvHandle hObject; +} NV2080_CTRL_GPU_EVICT_CTX_PARAMS; +typedef struct NV2080_CTRL_GPU_EVICT_CTX_PARAMS *PNV2080_CTRL_GPU_EVICT_CTX_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_INITIALIZE_CTX + * + * This command is used to initialize a Virtual Context. The ctx buffer must be + * cleared (zerod) by the caller prior to invoking this method. + * + * engineType + * Engine Virtual Context is for + * hClient + * Client Handle for the hVirtMemory + * ChID + * Hw channel -- Actually channel index (deprecated) + * hChanClient + * The client handle for hObject + * hObject + * Passed in object handle for either a single channel or a channel group + * hVirtMemory + * Virtual Address where to map Virtual Context to + * physAddress + * Physical offset in FB to use as Virtual Context + * physAttr + * Physical memory attributes + * hDmaHandle + * Dma Handle when using discontiguous context buffers + * index + * Start offset in Virtual DMA Context + * size + * Size of the Virtual Context + * + * Only hVirtMemory or size should be specified, the code cases based on hVirtMemory + * (NULL vs non-NULL) so if both are specified, hVirtMemory has precedence. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED - The Class does not support version info retrieval + * NV_ERR_INVALID_DEVICE - The Class/Device is not yet ready to provide this info. + * NV_ERR_INVALID_ARGUMENT - Bad/Unknown Class ID specified. + */ +#define NV2080_CTRL_CMD_GPU_INITIALIZE_CTX (0x2080012dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_MESSAGE_ID (0x2DU) + +typedef struct NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS { + NvU32 engineType; + NvHandle hClient; + NvU32 ChID; + NvHandle hChanClient; + NvHandle hObject; + NvHandle hVirtMemory; + NV_DECLARE_ALIGNED(NvU64 physAddress, 8); + NvU32 physAttr; + NvHandle hDmaHandle; + NvU32 index; + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS; +typedef struct NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *PNV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS; + +#define NV2080_CTRL_GPU_INITIALIZE_CTX_APERTURE 1:0 +#define NV2080_CTRL_GPU_INITIALIZE_CTX_APERTURE_VIDMEM (0x00000000U) +#define NV2080_CTRL_GPU_INITIALIZE_CTX_APERTURE_COH_SYS (0x00000001U) +#define NV2080_CTRL_GPU_INITIALIZE_CTX_APERTURE_NCOH_SYS (0x00000002U) + +#define NV2080_CTRL_GPU_INITIALIZE_CTX_GPU_CACHEABLE 2:2 +#define NV2080_CTRL_GPU_INITIALIZE_CTX_GPU_CACHEABLE_YES (0x00000000U) +#define NV2080_CTRL_GPU_INITIALIZE_CTX_GPU_CACHEABLE_NO (0x00000001U) + +/* + * NV2080_CTRL_GPU_INITIALIZE_CTX_PRESERVE_CTX - Tells RM Whether this Ctx buffer needs to + * do a full initialization (Load the golden image). When a context is promoted on a different + * channel than it was originally inited, the client can use this flag to tell RM + * that this is an already inited Context. In such cases RM will update the internal state + * to update the context address and state variables. + */ + +#define NV2080_CTRL_GPU_INITIALIZE_CTX_PRESERVE_CTX 3:3 +#define NV2080_CTRL_GPU_INITIALIZE_CTX_PRESERVE_CTX_NO (0x00000000U) +#define NV2080_CTRL_GPU_INITIALIZE_CTX_PRESERVE_CTX_YES (0x00000001U) + +/* + * NV2080_CTRL_CMD_CPU_QUERY_ECC_INTR + * Queries the top level ECC PMC PRI register + * TODO remove these parameters, tracked in bug #1975721 + */ +#define NV2080_CTRL_CMD_GPU_QUERY_ECC_INTR (0x2080012eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x2E" */ + +typedef struct NV2080_CTRL_GPU_QUERY_ECC_INTR_PARAMS { + NvU32 eccIntrStatus; +} NV2080_CTRL_GPU_QUERY_ECC_INTR_PARAMS; + +/** + * NV2080_CTRL_CMD_GPU_QUERY_ECC_STATUS + * + * This command is used to query the ECC status of a GPU by a subdevice + * handle. Please see the NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS + * data structure description below for details on the data reported + * per hardware unit. + * + * units + * Array of structures used to describe per-unit state + * + * flags + * See interface flag definitions below. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + + + +#define NV2080_CTRL_CMD_GPU_QUERY_ECC_STATUS (0x2080012fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS_MESSAGE_ID" */ + + +#define NV2080_CTRL_GPU_ECC_UNIT_COUNT (0x00000016U) + + + +// Deprecated do not use +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_FLAGS_TYPE 0:0 +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_FLAGS_TYPE_FILTERED (0x00000000U) +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_FLAGS_TYPE_RAW (0x00000001U) + +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_UNC_ERR_FALSE 0U +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_UNC_ERR_TRUE 1U +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_UNC_ERR_INDETERMINATE 2U + +/* + * NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS + * + * This structure represents the exception status of a class of per-unit + * exceptions + * + * count + * number of exceptions that have occurred since boot + */ +typedef struct NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS { + NV_DECLARE_ALIGNED(NvU64 count, 8); +} NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS; + +/* + * NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS + * + * This structure represents the per-unit ECC exception status + * + * enabled + * ECC enabled yes/no for this unit + * scrubComplete + * Scrub has completed yes/no. A scrub is performed for some units to ensure + * the checkbits are consistent with the protected data. + * supported + * Whether HW supports ECC in this unit for this GPU + * dbe + * Double bit error (DBE) status. The value returned reflects a counter + * that is monotonic, but can be reset by clients. + * dbeNonResettable (deprecated do not use) + * Double bit error (DBE) status, not client resettable. + * sbe + * Single bit error (SBE) status. The value returned reflects a counter + * that is monotonic, but can be reset by clients. + * sbeNonResettable (deprecated do not use) + * Single bit error (SBE) status, not client resettable. + * + */ +typedef struct NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS { + NvBool enabled; + NvBool scrubComplete; + NvBool supported; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS dbe, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS dbeNonResettable, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS sbe, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS sbeNonResettable, 8); +} NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS; + +/* + * NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS + * + * This structure returns ECC exception status and GPU Fatal Poison for all units + * + * units + * This structure represents ECC exception status for all Units. + * bFatalPoisonError + * Whether GPU Fatal poison error occurred in this GPU. This will be set for Ampere_and_later + * uncorrectableError + * Indicates whether any uncorrectable GR ECC errors have occurred. When + * SMC is enabled, uncorrectableError is only valid when the client is + * subscribed to a partition. Check QUERY_ECC_STATUS_UNC_ERR_* + * flags + * Flags passed by caller. Refer NV2080_CTRL_GPU_QUERY_ECC_STATUS_FLAGS_TYPE_* for details. + * grRouteInfo + * SMC partition information. This input is only valid when SMC is + * enabled on Ampere_and_later. + * + */ +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS_MESSAGE_ID (0x2FU) + +typedef struct NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS units[NV2080_CTRL_GPU_ECC_UNIT_COUNT], 8); + NvBool bFatalPoisonError; + NvU8 uncorrectableError; + NvU32 flags; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_COMPUTE_MODE_RULES + * + * This command sets the compute mode rules for the associated subdevice. The + * default mode is equivalent to NV2080_CTRL_GPU_COMPUTE_MODE_RULES_NONE. This + * command is available to clients with administrator privileges only. An + * attempt to use this command by a client without administrator privileged + * results in the return of an NV_ERR_INSUFFICIENT_PERMISSIONS status. + * + * rules + * This parameter is used to specify the rules that govern the GPU with + * respect to NV50_COMPUTE objects. Legal values for this parameter include: + * + * NV2080_CTRL_GPU_COMPUTE_MODE_RULES_NONE + * This mode indicate that no special restrictions apply to the + * allocation of NV50_COMPUTE objects. + * + * NV2080_CTRL_GPU_COMPUTE_MODE_RULES_EXCLUSIVE_COMPUTE + * This mode means that only one instance of NV50_COMPUTE will be + * allowed at a time. This restriction is enforced at each subsequent + * NV50_COMPUTE allocation attempt. Setting this mode will not affect + * any existing compute programs that may be running. For example, + * if this mode is set while three compute programs are running, then + * all of those programs will be allowed to continue running. However, + * until they all finish running, no new NV50_COMPUTE objects may be + * allocated. User-mode clients should treat this as restricting access + * to a NV50_COMPUTE object to a single thread within a process. + * + * NV2080_CTRL_GPU_COMPUTE_MODE_RULES_COMPUTE_PROHIBITED + * This mode means that that GPU is not ever allowed to instantiate an + * NV50_COMPUTE object, and thus cannot run any new compute programs. + * This restriction is enforced at each subsequent NV50_COMPUTE object + * allocation attempt. Setting this mode will not affect any existing + * compute programs that may be running. For example, if this mode is + * set while three compute programs are running, then all of those + * programs will be allowed to continue running. However, no new + * NV50_COMPUTE objects may be allocated. + * + * + * NV2080_CTRL_GPU_COMPUTE_MODE_EXCLUSIVE_COMPUTE_PROCESS + * This mode is identical to EXCLUSIVE_COMPUTE, where only one instance + * of NV50_COMPUTE will be allowed at a time. It is separate from + * EXCLUSIVE_COMPUTE to allow user-mode clients to differentiate + * exclusive access to a compute object from a single thread of a + * process from exclusive access to a compute object from all threads + * of a process. User-mode clients should not limit access to a + * NV50_COMPUTE object to a single thread when the GPU is set to + * EXCLUSIVE_COMPUTE_PROCESS. + * + * An invalid rules parameter value results in the return of an + * NV_ERR_INVALID_ARGUMENT status. + * + * flags + * Reserved. Caller should set this field to zero. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT (if an invalid rule number is provided) + * NV_ERR_INSUFFICIENT_PERMISSIONS (if the user is not the Administrator or superuser) + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_SET_COMPUTE_MODE_RULES (0x20800130U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS_MESSAGE_ID" */ + +/* valid rules parameter values */ +#define NV2080_CTRL_GPU_COMPUTE_MODE_RULES_NONE (0x00000000U) +#define NV2080_CTRL_GPU_COMPUTE_MODE_RULES_EXCLUSIVE_COMPUTE (0x00000001U) +#define NV2080_CTRL_GPU_COMPUTE_MODE_RULES_COMPUTE_PROHIBITED (0x00000002U) +#define NV2080_CTRL_GPU_COMPUTE_MODE_RULES_EXCLUSIVE_COMPUTE_PROCESS (0x00000003U) + +#define NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS { + NvU32 rules; + NvU32 flags; +} NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_QUERY_COMPUTE_MODE_RULES + * + * This command queries the compute mode rules for the associated subdevice. + * Please see the NV2080_CTRL_CMD_GPU_SET_COMPUTE_MODE_RULES command, above, for + * details as to what the rules mean. + * + * rules + * Specifies the rules that govern the GPU, with respect to NV50_COMPUTE + * objects. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_QUERY_COMPUTE_MODE_RULES (0x20800131U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS_MESSAGE_ID (0x31U) + +typedef struct NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS { + NvU32 rules; +} NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_QUERY_ECC_CONFIGURATION + * + * This command returns the current ECC configuration setting for + * a GPU given its subdevice handle. The value returned is + * the current ECC setting for the GPU stored in non-volatile + * memory on the board. + * + * currentConfiguration + * The current ECC configuration setting. + * + * defaultConfiguration + * The factory default ECC configuration setting. + * + * Please see the NV2080_CTRL_CMD_GPU_QUERY_ECC_STATUS command if + * you wish to determine if ECC is currently enabled. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_GPU_QUERY_ECC_CONFIGURATION (0x20800133U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_ECC_CONFIGURATION_DISABLED (0x00000000U) +#define NV2080_CTRL_GPU_ECC_CONFIGURATION_ENABLED (0x00000001U) + +#define NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS_MESSAGE_ID (0x33U) + +typedef struct NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS { + NvU32 currentConfiguration; + NvU32 defaultConfiguration; +} NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_ECC_CONFIGURATION + * + * This command changes the ECC configuration setting for a GPU + * given its subdevice handle. The value specified is + * stored in non-volatile memory on the board and will take + * effect with the next GPU reset + * + * newConfiguration + * The new configuration setting to take effect with + * the next GPU reset. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_SET_ECC_CONFIGURATION (0x20800134U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_ECC_CONFIGURATION_DISABLE (0x00000000U) +#define NV2080_CTRL_GPU_ECC_CONFIGURATION_ENABLE (0x00000001U) + +#define NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS_MESSAGE_ID (0x34U) + +typedef struct NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS { + NvU32 newConfiguration; +} NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_RESET_ECC_ERROR_STATUS + * + * This command resets volatile and/or persistent ECC error + * status information for a GPU given its subdevice + * handle. + * + * statuses + * The ECC error statuses (the current, volatile + * and/or the persistent error counter(s)) to + * be reset by the command. + * flags + * FORCE_PURGE + * Forcibly clean all the ECC InfoROM state if this flag is set + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_RESET_ECC_ERROR_STATUS (0x20800136U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_ECC_ERROR_STATUS_NONE (0x00000000U) +#define NV2080_CTRL_GPU_ECC_ERROR_STATUS_VOLATILE (0x00000001U) +#define NV2080_CTRL_GPU_ECC_ERROR_STATUS_AGGREGATE (0x00000002U) + +#define NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_FLAGS_FORCE_PURGE 0:0 +#define NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_FLAGS_FORCE_PURGE_FALSE 0U +#define NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_FLAGS_FORCE_PURGE_TRUE 1U + +#define NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS { + NvU32 statuses; + NvU8 flags; +} NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO + * + * This command returns a mask of enabled GPCs for the associated GPU. + * + * gpcMask + * This parameter returns a mask of enabled GPCs. Each GPC has an ID + * that's equivalent to the corresponding bit position in the mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO (0x20800137U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS_MESSAGE_ID (0x37U) + +typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS { + NvU32 gpcMask; +} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO + * + * This command returns a mask of enabled TPCs for a specified GPC. + * + * gpcId + * This parameter specifies the GPC for which TPC information is + * to be retrieved. If the GPC with this ID is not enabled this command + * will return an tpcMask value of zero. + * + * tpcMask + * This parameter returns a mask of enabled TPCs for the specified GPC. + * Each TPC has an ID that's equivalent to the corresponding bit + * position in the mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO (0x20800138U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS { + NvU32 gpcId; + NvU32 tpcMask; +} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_FERMI_ZCULL_INFO + * + * This command returns a mask of enabled ZCULLs for a specified GPC. + * + * gpcId + * This parameter specifies the GPC for which ZCULL information is to be + * retrieved. If the GPC with this ID is not enabled this command will + * return an zcullMask value of zero. + * + * zcullMask + * This parameter returns a mask of enabled ZCULLs for the specified GPC. + * Each ZCULL has an ID that's equivalent to the corresponding bit + * position in the mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_PARAM_STRUCT + * + * Deprecated: Please use GR based control call + * NV2080_CTRL_CMD_GR_GET_ZCULL_MASK + * + */ +#define NV2080_CTRL_CMD_GPU_GET_FERMI_ZCULL_INFO (0x20800139U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS_MESSAGE_ID (0x39U) + +typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS { + NvU32 gpcId; + NvU32 zcullMask; +} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_OEM_BOARD_INFO + * + * If an InfoROM with a valid OEM Board Object is present, this + * command returns relevant information from the object to the + * caller. + * + * The following data are currently reported: + * + * buildDate + * The board's build date (8 digit BCD in format YYYYMMDD). + * + * marketingName + * The board's marketing name (24 ASCII letters e.g. "Quadro FX5800"). + * + * boardSerialNumber + * The board's serial number. + * + * memoryManufacturer + * The board's memory manufacturer ('S'amsung/'H'ynix/'I'nfineon). + * + * memoryDateCode + * The board's memory datecode (LSB justified ASCII field with 0x00 + * denoting empty space). + * + * productPartNumber + * The board's 900 product part number (LSB justified ASCII field with 0x00 + * denoting empty space e.g. "900-21228-0208-200"). + * + * boardRevision + * The board's revision (for e.g. A02, B01) + * + * boardType + * The board's type ('E'ngineering/'P'roduction) + * + * board699PartNumber + * The board's 699 product part number (LSB justified ASCII field with 0x00 + * denoting empty space e.g. "699-21228-0208-200"). + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_OEM_BOARD_INFO (0x2080013fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_GPU_MAX_MARKETING_NAME_LENGTH (0x00000018U) +#define NV2080_GPU_MAX_SERIAL_NUMBER_LENGTH (0x00000010U) +#define NV2080_GPU_MAX_MEMORY_PART_ID_LENGTH (0x00000014U) +#define NV2080_GPU_MAX_MEMORY_DATE_CODE_LENGTH (0x00000006U) +#define NV2080_GPU_MAX_PRODUCT_PART_NUMBER_LENGTH (0x00000014U) + +#define NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS_MESSAGE_ID (0x3FU) + +typedef struct NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS { + NvU32 buildDate; + NvU8 marketingName[NV2080_GPU_MAX_MARKETING_NAME_LENGTH]; + NvU8 serialNumber[NV2080_GPU_MAX_SERIAL_NUMBER_LENGTH]; + NvU8 memoryManufacturer; + NvU8 memoryPartID[NV2080_GPU_MAX_MEMORY_PART_ID_LENGTH]; + NvU8 memoryDateCode[NV2080_GPU_MAX_MEMORY_DATE_CODE_LENGTH]; + NvU8 productPartNumber[NV2080_GPU_MAX_PRODUCT_PART_NUMBER_LENGTH]; + NvU8 boardRevision[3]; + NvU8 boardType; + NvU8 board699PartNumber[NV2080_GPU_MAX_PRODUCT_PART_NUMBER_LENGTH]; +} NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_ID + * + * This command returns the gpuId of the associated object. + * + * gpuId + * This field return the gpuId. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_ID (0x20800142U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ID_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV2080_CTRL_GPU_GET_ID_PARAMS { + NvU32 gpuId; +} NV2080_CTRL_GPU_GET_ID_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_GPU_DEBUG_MODE + * + * This command is used to enable or disable GPU debug mode. While this mode + * is enabled, some client RM calls that can potentially timeout return + * NV_ERR_BUSY_RETRY, signalling the client to try again once GPU + * debug mode is disabled. + * + * mode + * This parameter specifies whether GPU debug mode is to be enabled or + * disabled. Possible values are: + * + * NV2080_CTRL_GPU_DEBUG_MODE_ENABLED + * NV2080_CTRL_GPU_DEBUG_MODE_DISABLED + * + * Possible return status values are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_GPU_SET_GPU_DEBUG_MODE (0x20800143U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_GPU_DEBUG_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_SET_GPU_DEBUG_MODE_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV2080_CTRL_GPU_SET_GPU_DEBUG_MODE_PARAMS { + NvU32 mode; +} NV2080_CTRL_GPU_SET_GPU_DEBUG_MODE_PARAMS; + +#define NV2080_CTRL_GPU_DEBUG_MODE_ENABLED (0x00000001U) +#define NV2080_CTRL_GPU_DEBUG_MODE_DISABLED (0x00000002U) + +/* + * NV2080_CTRL_CMD_GPU_GET_GPU_DEBUG_MODE + * + * This command is used to query whether debug mode is enabled on the current + * GPU. Please see the description of NV2080_CTRL_CMD_GPU_SET_GPU_DEBUG_MODE + * for more details on GPU debug mode. + * + * currentMode + * This parameter returns the state of GPU debug mode for the current GPU. + * Possible values are: + * + * NV2080_CTRL_GPU_DEBUG_MODE_ENABLED + * NV2080_CTRL_GPU_DEBUG_MODE_DISABLED + * + * Possible return status values are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_GPU_GET_GPU_DEBUG_MODE (0x20800144U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_GPU_DEBUG_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_GPU_DEBUG_MODE_PARAMS_MESSAGE_ID (0x44U) + +typedef struct NV2080_CTRL_GPU_GET_GPU_DEBUG_MODE_PARAMS { + NvU32 currentMode; +} NV2080_CTRL_GPU_GET_GPU_DEBUG_MODE_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_PARTNERLIST + * + * Returns a list of engines that can partner or coexist + * when using the target channel or partnership class. + * This list may include all engines (pre-Kepler), or as few + * as 1 engine (Kepler and beyond). + * + * engineType + * This field specifies the target engine type. + * See cl2080.h for a list of valid engines. + * + * partnershipClassId + * This field specifies the target channel + * or partnership class ID. + * An example of such a class is GF100_CHANNEL_GPFIFO. + * + * runqueue + * This field is an index which indicates the runqueue to + * return the list of supported engines for. This is the + * same field as what NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE + * specifies. This is only valid for TSG. + * + * numPartners; + * This field returns the number of + * valid entries in the partnersList array + * + * partnerList + * This field is an array containing the list of supported + * partner engines types, in no particular order, and + * may even be empty (numPartners = 0). + * See cl2080.h for a list of possible engines. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_PARTNERLIST (0x20800147U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS_MESSAGE_ID" */ + +/* this macro specifies the maximum number of partner entries */ +#define NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS (0x00000020U) + +#define NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS_MESSAGE_ID (0x47U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS { + NvU32 engineType; + NvU32 partnershipClassId; + NvU32 runqueue; + NvU32 numPartners; + // C form: NvU32 partnerList[NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS]; + NvU32 partnerList[NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS]; +} NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_GID_INFO + * + * This command returns the GPU ID (GID) string for the associated + * GPU. This value can be useful for GPU identification and security + * system validation. + * + * The GPU ID is a SHA-1 based 16 byte ID, formatted as a 32 character + * hexadecimal string as "GPU-%08x-%04x-%04x-%04x-%012x" (the + * canonical format of a UUID) + * + * The GPU IDs are generated using the ECID, PMC_BOOT_0, and + * PMC_BOOT_42 of the GPU as the hash message. + * + * index + * (Input) "Select which GID set to get." Or so the original documentation + * said. In reality, there is only one GID per GPU, and the implementation + * completely ignores this parameter. You can too. + * + * flags (Input) The _FORMAT* flags designate ascii or binary format. Binary + * format returns the raw bytes of either the 16-byte SHA-1 ID or the + * 32-byte SHA-256 ID. + * + * The _TYPE* flags needs to specify the _SHA1 type. + * + * length + * (Output) Actual GID length, in bytes. + * + * data[NV2080_BUS_MAX_GID_LENGTH] + * (Output) Result buffer: the GID itself, in a format that is determined by + * the "flags" field (described above). + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_GPU_GET_GID_INFO (0x2080014aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_GID_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum possible number of bytes of GID information returned */ +#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL) + +#define NV2080_CTRL_GPU_GET_GID_INFO_PARAMS_MESSAGE_ID (0x4AU) + +typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS { + NvU32 index; + NvU32 flags; + NvU32 length; + NvU8 data[NV2080_GPU_MAX_GID_LENGTH]; +} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS; + +/* valid flags values */ +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_FORMAT 1:0 +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_FORMAT_ASCII (0x00000000U) +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_FORMAT_BINARY (0x00000002U) + +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_TYPE 2:2 +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_TYPE_SHA1 (0x00000000U) + +/* + * NV2080_CTRL_CMD_GPU_GET_INFOROM_OBJECT_VERSION + * + * This command can be used by clients to retrieve the version of an + * InfoROM object. + * + * objectType + * This parameter specifies the name of the InfoROM object whose version + * should be queried. + * + * version + * This parameter returns the version of the InfoROM object specified by + * the objectType parameter. + * + * subversion + * This parameter returns the subversion of the InfoROM object specified + * by the objectType parameter. + * + * Possible return status values: + * NV_OK + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_GPU_GET_INFOROM_OBJECT_VERSION (0x2080014bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_INFOROM_OBJ_TYPE_LEN 3U + +#define NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS_MESSAGE_ID (0x4BU) + +typedef struct NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS { + char objectType[NV2080_CTRL_GPU_INFOROM_OBJ_TYPE_LEN]; + NvU8 version; + NvU8 subversion; +} NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS; + + +/* + * NV2080_CTRL_CMD_SET_GPU_OPTIMUS_INFO + * + * This command will specify that system is Optimus enabled. + * + * isOptimusEnabled + * Set NV_TRUE if system is Optimus enabled. + * + * Possible status return values are: + * NV_OK + */ +#define NV2080_CTRL_CMD_SET_GPU_OPTIMUS_INFO (0x2080014cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS_MESSAGE_ID (0x4CU) + +typedef struct NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS { + NvBool isOptimusEnabled; +} NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_IP_VERSION + * + * Will return the IP VERSION on the given engine for engines that support + * this capability. + * + * targetEngine + * This parameter specifies the target engine type to query for IP_VERSION. + * + * ipVersion + * This parameter returns the IP VERSION read from the unit's IP_VER + * register. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_GPU_GET_IP_VERSION (0x2080014dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS_MESSAGE_ID (0x4DU) + +typedef struct NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS { + NvU32 targetEngine; + NvU32 ipVersion; +} NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS; + +#define NV2080_CTRL_GPU_GET_IP_VERSION_DISPLAY (0x00000001U) +#define NV2080_CTRL_GPU_GET_IP_VERSION_HDACODEC (0x00000002U) +#define NV2080_CTRL_GPU_GET_IP_VERSION_PMGR (0x00000003U) +#define NV2080_CTRL_GPU_GET_IP_VERSION_PPWR_PMU (0x00000004U) +#define NV2080_CTRL_GPU_GET_IP_VERSION_DISP_FALCON (0x00000005U) + + + +/* + * NV2080_CTRL_CMD_GPU_ID_ILLUM_SUPPORT + * + * This command returns an indicator which reports if the specified Illumination control + * attribute is supported + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_GPU_ILLUM_ATTRIB_LOGO_BRIGHTNESS 0U +#define NV2080_CTRL_GPU_ILLUM_ATTRIB_SLI_BRIGHTNESS 1U +#define NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT (0x20800153U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS { + NvU32 attribute; + NvBool bSupported; +} NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ID_ILLUM + * + * This command returns the current value of the specified Illumination control attribute. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_ILLUM (0x20800154U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x54" */ + +typedef struct NV2080_CTRL_CMD_GPU_ILLUM_PARAMS { + NvU32 attribute; + NvU32 value; +} NV2080_CTRL_CMD_GPU_ILLUM_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_ID_ILLUM + * + * This command sets a new valuefor the specified Illumination control attribute. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_SET_ILLUM (0x20800155U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x55" */ + +/* + * NV2080_CTRL_CMD_GPU_GET_INFOROM_IMAGE_VERSION + * + * This command can be used by clients to retrieve the version of the entire + * InfoROM image. + * + * version + * This parameter returns the version of the InfoROM image as a NULL- + * terminated character string of the form "XXXX.XXXX.XX.XX" where each + * 'X' is an integer character. + * + * Possible status return values are: + * NVOS_STATUS_SUCCES + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_DATA + */ +#define NV2080_CTRL_CMD_GPU_GET_INFOROM_IMAGE_VERSION (0x20800156U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_INFOROM_IMAGE_VERSION_LEN 16U + +#define NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS { + NvU8 version[NV2080_CTRL_GPU_INFOROM_IMAGE_VERSION_LEN]; +} NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_QUERY_INFOROM_ECC_SUPPORT + * + * This command returns whether or not ECC is supported via the InfoROM. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_QUERY_INFOROM_ECC_SUPPORT (0x20800157U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x57" */ + +/* + * NV2080_CTRL_GPU_PHYSICAL_BRIDGE_VERSION + * + * This structure contains information about a single physical bridge. + * + * fwVersion + * This field specifies Firmware Version of the bridge stored in + * bridge EEPROM. + * oemVersion + * This field specifies Oem Version of the firmware stored in + * bridge EEPROM. + * siliconRevision + * This field contains the silicon revision of the bridge hardware. + * It is set by the chip manufacturer. + * hwbcResourceType + * This field specifies the hardware broadcast resource type. + * Value denotes the kind of bridge - PLX or BR04 + * + */ + +typedef struct NV2080_CTRL_GPU_PHYSICAL_BRIDGE_VERSION_PARAMS { + NvU32 fwVersion; + NvU8 oemVersion; + NvU8 siliconRevision; + NvU8 hwbcResourceType; +} NV2080_CTRL_GPU_PHYSICAL_BRIDGE_VERSION_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO + * + * This command returns physical bridge information in the system. + * Information consists of bridgeCount and a list of bridgeId's. + * The bridge Id's are used by NV2080_CTRL_CMD_GPU_GET_PHYSICAL_BRIDGE_VERSION + * to get firmware version, oem version and silicon revision info. + * + * bridgeCount + * This field specifies the number of physical brides present + * in the system. + * hPhysicalBridges + * This field specifies an array of size NV2080_CTRL_MAX_PHYSICAL_BRIDGE. + * In this array, the bridge Id's are stored. + * bridgeList + * This field specifies an array of size NV2080_CTRL_MAX_PHYSICAL_BRIDGE. + * In this array, the bridge version details are stored. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO (0x2080015aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MAX_PHYSICAL_BRIDGE (100U) +#define NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS_MESSAGE_ID (0x5AU) + +typedef struct NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS { + NvU8 bridgeCount; + NvHandle hPhysicalBridges[NV2080_CTRL_MAX_PHYSICAL_BRIDGE]; + NV2080_CTRL_GPU_PHYSICAL_BRIDGE_VERSION_PARAMS bridgeList[NV2080_CTRL_MAX_PHYSICAL_BRIDGE]; +} NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS; + +/* + * NV2080_CTRL_GPU_BRIDGE_VERSION + * + * This structure contains information about a single physical bridge. + * + * bus + * This field specifies the bus id of the bridge. + * device + * This field specifies the device id of the bridge. + * func + * This field specifies the function id of the bridge. + * oemVersion + * This field specifies Oem Version of the firmware stored in + * bridge EEPROM. + * siliconRevision + * This field contains the silicon revision of the bridge hardware. + * It is set by the chip manufacturer. + * hwbcResourceType + * This field specifies the hardware broadcast resource type. + * Value denotes the kind of bridge - PLX or BR04 + * domain + * This field specifies the respective domain of the PCI device. + * fwVersion + * This field specifies Firmware Version of the bridge stored in + * bridge EEPROM. + * + * If (fwVersion, oemVersion, siliconRevision) == 0, it would mean that RM + * was unable to fetch the value from the bridge device. + * + */ + +typedef struct NV2080_CTRL_GPU_BRIDGE_VERSION_PARAMS { + NvU8 bus; + NvU8 device; + NvU8 func; + NvU8 oemVersion; + NvU8 siliconRevision; + NvU8 hwbcResourceType; + NvU32 domain; + NvU32 fwVersion; +} NV2080_CTRL_GPU_BRIDGE_VERSION_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU + * + * This command returns information about all the upstream bridges of the GPU. + * Information consists of bridge firmware version and its bus topology. + * + * bridgeCount + * This field specifies the number of physical brides present + * in the system. + * physicalBridgeIds + * This field specifies an array of size NV2080_CTRL_MAX_PHYSICAL_BRIDGE. + * In this array, the bridge Ids are stored. + * bridgeList + * This field specifies an array of size NV2080_CTRL_MAX_PHYSICAL_BRIDGE. + * In this array, the bridge version details are stored. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU (0x2080015bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS_MESSAGE_ID (0x5BU) + +typedef struct NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS { + NvU8 bridgeCount; + NvU32 physicalBridgeIds[NV2080_CTRL_MAX_PHYSICAL_BRIDGE]; + NV2080_CTRL_GPU_BRIDGE_VERSION_PARAMS bridgeList[NV2080_CTRL_MAX_PHYSICAL_BRIDGE]; +} NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_QUERY_SCRUBBER_STATUS + * + * This command is used to query the status of the HW scrubber. If a scrub is + * in progress then the range which is being scrubbed is also reported back. + * + * scrubberStatus + * Reports the status of the scrubber unit - running/idle. + * + * remainingtimeMs + * If scrubbing is going on, reports the remaining time in milliseconds + * required to finish the scrub. + * + * scrubStartAddr + * This parameter reports the start address of the ongoing scrub if scrub + * is going on, otherwise reports the start addr of the last finished scrub + * + * scrubEndAddr + * This parameter reports the end address of the ongoing scrub if scrub + * is going on, otherwise reports the end addr of the last finished scrub. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_GPU_QUERY_SCRUBBER_STATUS (0x2080015fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS_MESSAGE_ID (0x5FU) + +typedef struct NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS { + NvU32 scrubberStatus; + NvU32 remainingTimeMs; + NV_DECLARE_ALIGNED(NvU64 scrubStartAddr, 8); + NV_DECLARE_ALIGNED(NvU64 scrubEndAddr, 8); +} NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS; + +/* valid values for scrubber status */ +#define NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_SCRUBBER_RUNNING (0x00000000U) +#define NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_SCRUBBER_IDLE (0x00000001U) + +/* + * NV2080_CTRL_CMD_GPU_GET_VPR_CAPS + * + * This command is used to query the VPR capability information for a + * GPU. If VPR is supported, the parameters are filled accordingly. + * The addresses returned are all physical addresses. + * + * minStartAddr + * Returns the minimum start address that can be possible for VPR. + * + * maxEndAddr + * Returns the maximum end address that can be possible for VPR. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_GPU_GET_VPR_CAPS (0x20800160U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 minStartAddr, 8); + NV_DECLARE_ALIGNED(NvU64 maxEndAddr, 8); +} NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_HANDLE_GPU_SR + * + * Communicates to RM to handle GPU Surprise Removal + * Called from client when it receives SR IRP from OS + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + */ +#define NV2080_CTRL_CMD_GPU_HANDLE_GPU_SR (0x20800167U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x67" */ + + +/* + * NV2080_CTRL_CMD_GPU_GET_PES_INFO + * + * This command provides the PES count and mask of enabled PES for a + * specified GPC. It also returns the TPC to PES mapping information + * for a given GPU. + * + * gpcId[IN] + * This parameter specifies the GPC for which PES information is to be + * retrieved. If the GPC with this ID is not enabled this command will + * return an activePesMask of zero + * + * numPesInGpc[OUT] + * This parameter returns the number of PES in this GPC. + * + * activePesMask[OUT] + * This parameter returns a mask of enabled PESs for the specified GPC. + * Each PES has an ID that is equivalent to the corresponding bit position + * in the mask. + * + * maxTpcPerGpcCount[OUT] + * This parameter returns the max number of TPCs in a GPC. + * + * tpcToPesMap[OUT] + * This array stores the TPC to PES mappings. The value at tpcToPesMap[tpcIndex] + * is the index of the PES it belongs to. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_PES_INFO (0x20800168U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PES_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_GPU_GET_PES_INFO_MAX_TPC_PER_GPC_COUNT 10U + +#define NV2080_CTRL_GPU_GET_PES_INFO_PARAMS_MESSAGE_ID (0x68U) + +typedef struct NV2080_CTRL_GPU_GET_PES_INFO_PARAMS { + NvU32 gpcId; + NvU32 numPesInGpc; + NvU32 activePesMask; + NvU32 maxTpcPerGpcCount; + NvU32 tpcToPesMap[NV2080_CTRL_CMD_GPU_GET_PES_INFO_MAX_TPC_PER_GPC_COUNT]; +} NV2080_CTRL_GPU_GET_PES_INFO_PARAMS; + +/* NV2080_CTRL_CMD_GPU_GET_OEM_INFO + * + * If an InfoROM with a valid OEM Object is present, this + * command returns relevant information from the object to the + * caller. + * + * oemInfo + * This array stores information specifically for OEM use + * (e.g. "their own serial number", "lot codes", etc) + * "The byte definition is up to the OEM" + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_GPU_GET_OEM_INFO (0x20800169U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_GPU_MAX_OEM_INFO_LENGTH (0x000001F8U) + +#define NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS_MESSAGE_ID (0x69U) + +typedef struct NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS { + NvU8 oemInfo[NV2080_GPU_MAX_OEM_INFO_LENGTH]; +} NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS; + +/* NV2080_CTRL_CMD_GPU_PROCESS_POST_GC6_EXIT_TASKS + * + * Complete any pending tasks the need to be run after GC6 exit is complete at OS/KMD level + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_READY + */ +#define NV2080_CTRL_CMD_GPU_PROCESS_POST_GC6_EXIT_TASKS (0x2080016aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x6A" */ + +/* + * NV2080_CTRL_CMD_GPU_GET_VPR_INFO + * + * This command is used to query the VPR information for a GPU. + * The following VPR related information can be queried by selecting the queryType: + * 1. The current VPR range. + * 2. The max VPR range ever possible on this GPU. + * + * queryType [in] + * This input parameter is used to select the type of information to query. + * Possible values for this parameter are: + * 1. NV2080_CTRL_GPU_GET_VPR_INFO_QUERY_VPR_CAPS: Use this to query the + * max VPR range ever possible on this GPU. + * 2. NV2080_CTRL_GPU_GET_VPR_INFO_QUERY_CUR_VPR_RANGE: Use this to query + * the current VPR range on this GPU. + * + * bVprEnabled [out] + * For query type "NV2080_CTRL_GPU_GET_VPR_INFO_CUR_RANGE", this parameter + * returns if VPR is currently enabled or not. + * + * vprStartAddress [out] + * For NV2080_CTRL_GPU_GET_VPR_INFO_CAPS, it returns minimum allowed VPR start address. + * For NV2080_CTRL_GPU_GET_VPR_INFO_RANGE, it returns current VPR start address. + * + * vprEndAddress [out] + * For NV2080_CTRL_GPU_GET_VPR_INFO_CAPS, it returns maximum allowed VPR end address. + * For NV2080_CTRL_GPU_GET_VPR_INFO_RANGE, it returns current VPR end address. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_VPR_INFO (0x2080016bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS_MESSAGE_ID" */ + + +typedef enum NV2080_CTRL_VPR_INFO_QUERY_TYPE { + NV2080_CTRL_GPU_GET_VPR_INFO_QUERY_VPR_CAPS = 0, + NV2080_CTRL_GPU_GET_VPR_INFO_QUERY_CUR_VPR_RANGE = 1, +} NV2080_CTRL_VPR_INFO_QUERY_TYPE; + +#define NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS_MESSAGE_ID (0x6BU) + +typedef struct NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS { + NV2080_CTRL_VPR_INFO_QUERY_TYPE queryType; + NvBool bIsVprEnabled; + NV_DECLARE_ALIGNED(NvU64 vprStartAddressInBytes, 8); + NV_DECLARE_ALIGNED(NvU64 vprEndAddressInBytes, 8); +} NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ENCODER_CAPACITY + * + * This command is used to query the encoder capacity of the GPU. + * + * queryType [in] + * This input parameter is used to select the type of information to query. + * Possible values for this parameter are: + * 1. NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_H264: Use this to query the + * H.264 encoding capacity on this GPU. + * 2. NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_HEVC: Use this to query the + * H.265/HEVC encoding capacity on this GPU. + * + * encoderCapacity [out] + * Encoder capacity value from 0 to 100. Value of 0x00 indicates encoder performance + * may be minimal for this GPU and software should fall back to CPU-based encode. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_GPU_GET_ENCODER_CAPACITY (0x2080016cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_ENCODER_CAPACITY_QUERY_TYPE { + NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_H264 = 0, + NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_HEVC = 1, +} NV2080_CTRL_ENCODER_CAPACITY_QUERY_TYPE; + +#define NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS_MESSAGE_ID (0x6CU) + +typedef struct NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS { + NV2080_CTRL_ENCODER_CAPACITY_QUERY_TYPE queryType; + NvU32 encoderCapacity; +} NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS; + +/* + * NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS + * + * This command is used to retrieve the GPU's count of encoder sessions, + * trailing average FPS and encode latency over all active sessions. + * + * encoderSessionCount + * This field specifies count of all active encoder sessions on this GPU. + * + * averageEncodeFps + * This field specifies the average encode FPS for this GPU. + * + * averageEncodeLatency + * This field specifies the average encode latency in microseconds for this GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS (0x2080016dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS_MESSAGE_ID (0x6DU) + +typedef struct NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS { + NvU32 encoderSessionCount; + NvU32 averageEncodeFps; + NvU32 averageEncodeLatency; +} NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS; + +#define NV2080_CTRL_GPU_NVENC_SESSION_INFO_MAX_COPYOUT_ENTRIES 0x200U // 512 entries. + +/* + * NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO + * + * This command returns NVENC software sessions information for the associate GPU. + * Request to retrieve session information use a list of one or more + * NV2080_CTRL_NVENC_SW_SESSION_INFO structures. + * + * sessionInfoTblEntry + * This field specifies the number of entries on the that are filled inside + * sessionInfoTbl. Max value of this field once returned from RM would be + * NV2080_CTRL_GPU_NVENC_SESSION_INFO_MAX_COPYOUT_ENTRIES, + * + * sessionInfoTbl + * This field specifies a pointer in the caller's address space + * to the buffer into which the NVENC session information is to be returned. + * When buffer is NULL, RM assume that client is querying sessions count value + * and return the current encoder session counts in sessionInfoTblEntry field. + * To get actual buffer data, client should allocate sessionInfoTbl of size + * NV2080_CTRL_GPU_NVENC_SESSION_INFO_MAX_COPYOUT_ENTRIES multiplied by the + * size of the NV2080_CTRL_NVENC_SW_SESSION_INFO structure. RM will fill the + * current session data in sessionInfoTbl buffer and then update the + * sessionInfoTblEntry to reflect current session count value. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NO_MEMORY + * NV_ERR_INVALID_LOCK_STATE + * NV_ERR_INVALID_ARGUMENT + */ + +typedef struct NV2080_CTRL_NVENC_SW_SESSION_INFO { + NvU32 processId; + NvU32 subProcessId; + NvU32 sessionId; + NvU32 codecType; + NvU32 hResolution; + NvU32 vResolution; + NvU32 averageEncodeFps; + NvU32 averageEncodeLatency; +} NV2080_CTRL_NVENC_SW_SESSION_INFO; + +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS_MESSAGE_ID (0x6EU) + +typedef struct NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS { + NvU32 sessionInfoTblEntry; + NV_DECLARE_ALIGNED(NvP64 sessionInfoTbl, 8); +} NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS; + +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO (0x2080016eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_SET_FABRIC_BASE_ADDR + * + * The command sets fabric base address which represents top N bits of a + * peer memory address. These N bits will be used to index NvSwitch routing + * tables to forward peer memory accesses to associated GPUs. + * + * The command is available to clients with administrator privileges only. + * An attempt to use this command by a client without administrator privileged + * results in the return of NV_ERR_INSUFFICIENT_PERMISSIONS status. + * + * The command allows to set fabricAddr once in a lifetime of a GPU. A GPU must + * be destroyed in order to re-assign a different fabricAddr. An attempt to + * re-assign address without destroying a GPU would result in the return of + * NV_ERR_STATE_IN_USE status. + * + * fabricBaseAddr[IN] + * - An address with at least 32GB alignment. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_NOT_SUPPORTED + * NV_ERR_STATE_IN_USE + */ + +#define NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS_MESSAGE_ID (0x6FU) + +typedef struct NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS { + NV_DECLARE_ALIGNED(NvU64 fabricBaseAddr, 8); +} NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS; + +#define NV2080_CTRL_CMD_GPU_SET_FABRIC_BASE_ADDR (0x2080016fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_VIRTUAL_INTERRUPT + * + * The command will trigger the specified interrupt on the host from a guest. + * + * handle[IN] + * - An opaque handle that will be passed in along with the interrupt + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_GPU_VIRTUAL_INTERRUPT_PARAMS_MESSAGE_ID (0x72U) + +typedef struct NV2080_CTRL_GPU_VIRTUAL_INTERRUPT_PARAMS { + NvU32 handle; +} NV2080_CTRL_GPU_VIRTUAL_INTERRUPT_PARAMS; + +#define NV2080_CTRL_CMD_GPU_VIRTUAL_INTERRUPT (0x20800172U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_VIRTUAL_INTERRUPT_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS + * + * This control call is to query the status of gpu function registers + * + * statusMask[IN] + * - Input mask of required status registers + * xusbData[OUT] + * - data from querying XUSB status register + * ppcData[OUT] + * - data from querying PPC status register + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + + + +#define NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS_MESSAGE_ID (0x73U) + +typedef struct NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS { + NvU32 statusMask; + NvU32 xusbData; + NvU32 ppcData; +} NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS (0x20800173U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_GPU_PARTITION_SPAN + * + * This struct represents the span of a memory partition, which represents the + * slices a given partition occupies (or may occupy) within a fixed range which + * is defined per-chip. A partition containing more resources will cover more + * GPU slices and therefore cover a larger span. + * + * lo + * - The starting unit of this span, inclusive + * + * hi + * - The ending unit of this span, inclusive + * + */ +typedef struct NV2080_CTRL_GPU_PARTITION_SPAN { + NV_DECLARE_ALIGNED(NvU64 lo, 8); + NV_DECLARE_ALIGNED(NvU64 hi, 8); +} NV2080_CTRL_GPU_PARTITION_SPAN; + +/* + * NV2080_CTRL_GPU_SET_PARTITION_INFO + * + * This command partitions a GPU into different SMC-Memory partitions. + * The command will configure HW partition table to create work and memory + * isolation. + * + * The command is available to clients with administrator privileges only. + * An attempt to use this command by a client without administrator privileged + * results in the return of NV_ERR_INSUFFICIENT_PERMISSIONS status. + * + * The command allows partitioning an invalid partition only. An attempt to + * re-partition a valid partition will resule in NV_ERR_STATE_IN_USE. + * Repartitioning can be done only if a partition has been destroyed/invalidated + * before re-partitioning. + * + * swizzId[IN/OUT] + * - PartitionID associated with a newly created partition. Input in case + * of partition invalidation. + * + * partitionFlag[IN] + * - Flags to determine if GPU is requested to be partitioned in FULL, + * HALF, QUARTER or ONE_EIGHTHED and whether the partition requires + * any additional resources. + * When flags include NV2080_CTRL_GPU_PARTITION_FLAG_REQ_DEC_JPG_OFA + * partition will be created with at least one video decode, jpeg and + * optical flow engines. This flag is valid only for partitions with + * a single GPC. + * + * bValid[IN] + * - NV_TRUE if creating a partition. NV_FALSE if destroying a partition. + * + * placement[IN] + * - Optional placement span to allocate the partition into. Valid + * placements are returned from NV2080_CTRL_CMD_GPU_GET_PARTITION_CAPACITY. + * The partition flag NV2080_CTRL_GPU_PARTITION_FLAG_PLACE_AT_SPAN must + * be set for this parameter to be used. If the flag is set and the given + * placement is not valid, an error will be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_NOT_SUPPORTED + * NV_ERR_STATE_IN_USE + */ +typedef struct NV2080_CTRL_GPU_SET_PARTITION_INFO { + NvU32 swizzId; + NvU32 partitionFlag; + NvBool bValid; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PARTITION_SPAN placement, 8); +} NV2080_CTRL_GPU_SET_PARTITION_INFO; + +#define PARTITIONID_INVALID NV2080_CTRL_GPU_PARTITION_ID_INVALID +#define NV2080_CTRL_GPU_PARTITION_ID_INVALID 0xFFFFFFFFU +#define NV2080_CTRL_GPU_MAX_PARTITIONS 0x00000008U +#define NV2080_CTRL_GPU_MAX_PARTITION_IDS 0x00000009U +#define NV2080_CTRL_GPU_MAX_SMC_IDS 0x00000008U +#define NV2080_CTRL_GPU_MAX_GPC_PER_SMC 0x0000000cU +#define NV2080_CTRL_GPU_MAX_CE_PER_SMC 0x00000008U + +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE 1:0 +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_FULL 0x00000000U +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_HALF 0x00000001U +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_QUARTER 0x00000002U +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_EIGHTH 0x00000003U +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE__SIZE 4U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE 4:2 +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_FULL 0x00000000U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_HALF 0x00000001U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_HALF 0x00000002U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_QUARTER 0x00000003U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_EIGHTH 0x00000004U +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE__SIZE 5U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE 7:5 +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_FULL 0x00000001U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_HALF 0x00000002U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_MINI_HALF 0x00000003U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_QUARTER 0x00000004U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_EIGHTH 0x00000005U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE_NONE 0x00000000U +#define NV2080_CTRL_GPU_PARTITION_FLAG_GFX_SIZE__SIZE 6U +#define NV2080_CTRL_GPU_PARTITION_MAX_TYPES 8U +#define NV2080_CTRL_GPU_PARTITION_FLAG_REQ_DEC_JPG_OFA 30:30 +#define NV2080_CTRL_GPU_PARTITION_FLAG_REQ_DEC_JPG_OFA_DISABLE 0U +#define NV2080_CTRL_GPU_PARTITION_FLAG_REQ_DEC_JPG_OFA_ENABLE 1U +#define NV2080_CTRL_GPU_PARTITION_FLAG_PLACE_AT_SPAN 31:31 +#define NV2080_CTRL_GPU_PARTITION_FLAG_PLACE_AT_SPAN_DISABLE 0U +#define NV2080_CTRL_GPU_PARTITION_FLAG_PLACE_AT_SPAN_ENABLE 1U + +// TODO XXX Bug 2657907 Remove these once clients update +#define NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _FULL) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _FULL)) +#define NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _HALF) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _HALF)) +#define NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_HALF_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _HALF) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _MINI_HALF)) +#define NV2080_CTRL_GPU_PARTITION_FLAG_ONE_QUARTER_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _QUARTER) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _QUARTER)) +#define NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _EIGHTH) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _EIGHTH)) + +#define NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS { + NvU32 partitionCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_SET_PARTITION_INFO partitionInfo[NV2080_CTRL_GPU_MAX_PARTITIONS], 8); +} NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_SET_PARTITIONS (0x20800174U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_GPU_GET_PARTITION_INFO + * + * This command gets the partition information for requested partitions. + * If GPU is not partitioned, the control call will return NV_ERR_NOT_SUPPORTED. + * + * The command will can return global partition information as well as single + * partition information if global flag is not set. + * In bare-metal user-mode can request all partition info while in virtualization + * plugin should make an RPC with swizzId which is assigned to the requesting + * VM. + * + * swizzId[IN] + * - HW Partition ID associated with the requested partition. + * + * partitionFlag[OUT] + * - partitionFlag that was provided during partition creation. + * + * grEngCount[OUT] + * - Number of SMC engines/GR engines allocated in partition + * GrIDs in a partition will always start from 0 and end at grEngCount-1 + * + * veidCount[OUT] + * - VEID Count assigned to a partition. These will be divided across + * SMC engines once CONFIGURE_PARTITION call has been made. The current + * algorithm is to assign veidPerGpc * gpcCountPerSmc to a SMC engine. + * + * smCount[OUT] + * - SMs assigned to a partition. + * + * ceCount[OUT] + * - Copy Engines assigned to a partition. + * + * nvEncCount[OUT] + * - NvEnc Engines assigned to a partition. + * + * nvDecCount[OUT] + * - NvDec Engines assigned to a partition. + * + * nvJpgCount[OUT] + * - NvJpg Engines assigned to a partition. + * + * gpcCount[OUT] + * - Max GPCs assigned to a partition, including the GfxCapable ones. + * + * gfxGpcCount[OUT] + * - Max GFX GPCs assigned to a partition. This is a subset of the GPCs incuded in gpcCount. + * + * gpcsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS][OUT] + * - GPC count associated with every valid SMC/Gr, including the GPCs capable of GFX + * + * gfxGpcsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS][OUT] + * - GFX GPC count associated with every valid SMC/Gr. This is a subset of the GPCs included in gfxGpcCount + * + * veidsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS][OUT] + * - VEID count associated with every valid SMC. VEIDs within this SMC + * will start from 0 and go till veidCount[SMC_ID] - 1. + * + * span[OUT] + * - The span covered by this partition + * + * bValid[OUT] + * - NV_TRUE if partition is valid else NV_FALSE. + * + * bPartitionError[OUT] + * - NV_TRUE if partition had poison error which requires drain and reset + * else NV_FALSE. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_NOT_SUPPORTED + */ +typedef struct NV2080_CTRL_GPU_GET_PARTITION_INFO { + NvU32 swizzId; + NvU32 partitionFlag; + NvU32 grEngCount; + NvU32 veidCount; + NvU32 smCount; + NvU32 ceCount; + NvU32 nvEncCount; + NvU32 nvDecCount; + NvU32 nvJpgCount; + NvU32 nvOfaCount; + NvU32 gpcCount; + NvU32 gfxGpcCount; + NvU32 gpcsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS]; + NvU32 gfxGpcPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS]; + NvU32 veidsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS]; + NV_DECLARE_ALIGNED(NvU64 memSize, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PARTITION_SPAN span, 8); + NvBool bValid; + NvBool bPartitionError; +} NV2080_CTRL_GPU_GET_PARTITION_INFO; + +/* + * NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS + * + * queryPartitionInfo[IN] + * - Max sized array of NV2080_CTRL_GPU_GET_PARTITION_INFO to get partition + * Info + * + * bGetAllPartitionInfo[In] + * - Flag to get all partitions info. Only root client will receive all + * partition's info. Non-Root clients should not use this flag + * + * validPartitionCount[Out] + * - Valid partition count which has been filled by RM as part of the call + * + */ +#define NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS_MESSAGE_ID (0x75U) + +typedef struct NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_GET_PARTITION_INFO queryPartitionInfo[NV2080_CTRL_GPU_MAX_PARTITIONS], 8); + NvU32 validPartitionCount; + NvBool bGetAllPartitionInfo; +} NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_PARTITIONS (0x20800175U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_GPU_CONFIGURE_PARTITION + * + * This command configures a partition by associating GPCs with SMC Engines + * available in that partition. Engines which are to have GPCs assigned to them + * shall not already have any GPCs assigned to them. It is not valid to both + * assign GPCs and remove GPCs as part of a single call to this function. + * + * swizzId[IN] + * - PartitionID for configuring partition. If partition has a valid + * context created, then configuration is not allowed. + * + * gpcCountPerSmcEng[IN] + * - Number of GPCs expected to be configured per SMC. Supported + * configurations are 0, 1, 2, 4 or 8. "0" means a particular SMC + * engine will be disabled with no GPC connected to it. + * + * updateSmcEngMask[IN] + * - Mask tracking valid entries of gpcCountPerSmcEng. A value of + * 0 in bit index i indicates that engine i will keep its current + * configuration. + * + * bUseAllGPCs[IN] + * - Flag specifying alternate configuration mode, indicating that in + * swizzid 0 only, all non-floorswept GPCs should be connected to the + * engine indicated by a raised bit in updateSmcEngMask. Only a single + * engine may be targeted by this operation. The gpcCountPerSmcEng + * parameter should not be used with this flag. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_NOT_SUPPORTED + * NV_ERR_STATE_IN_USE + */ +#define NV2080_CTRL_CMD_GPU_CONFIGURE_PARTITION (0x20800176U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_CONFIGURE_PARTITION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_CONFIGURE_PARTITION_PARAMS_MESSAGE_ID (0x76U) + +typedef struct NV2080_CTRL_GPU_CONFIGURE_PARTITION_PARAMS { + NvU32 swizzId; + NvU32 gpcCountPerSmcEng[NV2080_CTRL_GPU_MAX_SMC_IDS]; + NvU32 updateSmcEngMask; + NvBool bUseAllGPCs; +} NV2080_CTRL_GPU_CONFIGURE_PARTITION_PARAMS; + + +/* + * NV2080_CTRL_GPU_FAULT_PACKET + * + * This struct represents a GMMU fault packet. + * + */ +#define NV2080_CTRL_GPU_FAULT_PACKET_SIZE 32U +typedef struct NV2080_CTRL_GPU_FAULT_PACKET { + NvU8 data[NV2080_CTRL_GPU_FAULT_PACKET_SIZE]; +} NV2080_CTRL_GPU_FAULT_PACKET; + +/* + * NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT + * + * This command reports a nonreplayable fault packet to RM. + * It is only used by UVM. + * + * pFaultPacket[IN] + * - A fault packet that will be later cast to GMMU_FAULT_PACKET *. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GPU_REPORT_NON_REPLAYABLE_FAULT (0x20800177U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS_MESSAGE_ID (0x77U) + +typedef struct NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS { + NV2080_CTRL_GPU_FAULT_PACKET faultPacket; +} NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_EXEC_REG_OPS_VGPU + * + * This command is similar to NV2080_CTRL_CMD_GPU_EXEC_REG_OPS, except it is used + * by the VGPU plugin client only. This command provides access to the subset of + * privileged registers. + * + * See confluence page "vGPU UMED Security" for details. + * + */ +#define NV2080_CTRL_CMD_GPU_EXEC_REG_OPS_VGPU (0x20800178U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x78" */ + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_RUNLIST_PRI_BASE + * + * This command returns the runlist pri base of the specified engine(s). + * + * engineList + * Input array. + * This array specifies the engines being queried for information. + * The list of engines supported by a chip can be fetched using the + * NV2080_CTRL_CMD_GPU_GET_ENGINES/GET_ENGINES_V2 ctrl call. + * + * runlistPriBase + * Output array. + * Returns the runlist pri base for the specified engines + * Else, will return _NULL when the input is a NV2080_ENGINE_TYPE_NULL + * and will return _ERROR when the control call fails due to an invalid argument + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_RUNLIST_PRI_BASE (0x20800179U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS_MESSAGE_ID (0x79U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS { + NvU32 engineList[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; + NvU32 runlistPriBase[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS; + +#define NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_NULL (0xFFFFFFFFU) +#define NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_ERROR (0xFFFFFFFBU) + +/* + * NV2080_CTRL_CMD_GPU_GET_HW_ENGINE_ID + * + * This command returns the host hardware defined engine ID of the specified engine(s). + * + * engineList + * Input array. + * This array specifies the engines being queried for information. + * The list of engines supported by a chip can be fetched using the + * NV2080_CTRL_CMD_GPU_GET_ENGINES/GET_ENGINES_V2 ctrl call. + * + * hwEngineID + * Output array. + * Returns the host hardware engine ID(s) for the specified engines + * Else, will return _NULL when the input is a NV2080_ENGINE_TYPE_NULL + * and will return _ERROR when the control call fails due to an invalid argument + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_HW_ENGINE_ID (0x2080017aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS_MESSAGE_ID (0x7AU) + +typedef struct NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS { + NvU32 engineList[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; + NvU32 hwEngineID[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS; + +#define NV2080_CTRL_GPU_GET_HW_ENGINE_ID_NULL (0xFFFFFFFFU) +#define NV2080_CTRL_GPU_GET_HW_ENGINE_ID_ERROR (0xFFFFFFFBU) + +/* + * NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS + * + * This command is used to retrieve the GPU's count of FBC sessions, + * average FBC calls and FBC latency over all active sessions. + * + * sessionCount + * This field specifies count of all active fbc sessions on this GPU. + * + * averageFPS + * This field specifies the average frames captured. + * + * averageLatency + * This field specifies the average FBC latency in microseconds. + * + * Possible status values returned are : + * NV_OK + * NV_ERR_INVALID_ARGUMENT +*/ +#define NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS (0x2080017bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS_MESSAGE_ID (0x7BU) + +typedef struct NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS { + NvU32 sessionCount; + NvU32 averageFPS; + NvU32 averageLatency; +} NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS; + +/* +* NV2080_CTRL_NVFBC_SW_SESSION_INFO +* +* processId[OUT] +* Process id of the process owning the NvFBC session. +* On VGX host, this will specify the vGPU plugin process id. +* subProcessId[OUT] +* Process id of the process owning the NvFBC session if the +* session is on VGX guest, else the value is zero. +* vgpuInstanceId[OUT] +* vGPU on which the process owning the NvFBC session +* is running if session is on VGX guest, else +* the value is zero. +* sessionId[OUT] +* Unique session id of the NvFBC session. +* sessionType[OUT] +* Type of NvFBC session. +* displayOrdinal[OUT] +* Display identifier associated with the NvFBC session. +* sessionFlags[OUT] +* One or more of NV2080_CTRL_NVFBC_SESSION_FLAG_xxx. +* hMaxResolution[OUT] +* Max horizontal resolution supported by the NvFBC session. +* vMaxResolution[OUT] +* Max vertical resolution supported by the NvFBC session. +* hResolution[OUT] +* Horizontal resolution requested by caller in grab call. +* vResolution[OUT] +* Vertical resolution requested by caller in grab call. +* averageFPS[OUT] +* Average no. of frames captured per second. +* averageLatency[OUT] +* Average frame capture latency in microseconds. +*/ + +#define NV2080_CTRL_NVFBC_SESSION_FLAG_DIFFMAP_ENABLED 0x00000001U +#define NV2080_CTRL_NVFBC_SESSION_FLAG_CLASSIFICATIONMAP_ENABLED 0x00000002U +#define NV2080_CTRL_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_NO_WAIT 0x00000004U +#define NV2080_CTRL_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE 0x00000008U +#define NV2080_CTRL_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT 0x00000010U + +typedef struct NV2080_CTRL_NVFBC_SW_SESSION_INFO { + NvU32 processId; + NvU32 subProcessId; + NvU32 vgpuInstanceId; + NvU32 sessionId; + NvU32 sessionType; + NvU32 displayOrdinal; + NvU32 sessionFlags; + NvU32 hMaxResolution; + NvU32 vMaxResolution; + NvU32 hResolution; + NvU32 vResolution; + NvU32 averageFPS; + NvU32 averageLatency; +} NV2080_CTRL_NVFBC_SW_SESSION_INFO; + +/* +* NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO +* +* This command returns NVFBC software sessions information for the associate GPU. +* +* sessionInfoCount +* This field specifies the number of entries that are filled inside +* sessionInfoTbl. Max value of this field once returned from RM would be +* NV2080_GPU_NVFBC_MAX_COUNT. +* +* sessionInfoTbl +* This field specifies the array in which the NVFBC session information is to +* be returned. RM will fill the current session data in sessionInfoTbl array +* and then update the sessionInfoCount to reflect current session count value. +* +* Possible status values returned are: +* NV_OK +* NV_ERR_NO_MEMORY +* NV_ERR_INVALID_LOCK_STATE +* NV_ERR_INVALID_ARGUMENT +*/ + +#define NV2080_GPU_NVFBC_MAX_SESSION_COUNT 256U + +#define NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS_MESSAGE_ID (0x7CU) + +typedef struct NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS { + NvU32 sessionInfoCount; + NV2080_CTRL_NVFBC_SW_SESSION_INFO sessionInfoTbl[NV2080_GPU_NVFBC_MAX_SESSION_COUNT]; +} NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS; + +#define NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO (0x2080017cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS_MESSAGE_ID" */ + + + +/* + * NV2080_CTRL_CMD_GPU_GET_VMMU_SEGMENT_SIZE + * + * This command returns the VMMU page size + * + * vmmuSegmentSize + * Output parameter. + * Returns the VMMU segment size (in bytes) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_VMMU_SEGMENT_SIZE (0x2080017eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS_MESSAGE_ID (0x7EU) + +typedef struct NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 vmmuSegmentSize, 8); +} NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS; + +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_32MB 0x02000000U +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_64MB 0x04000000U +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_128MB 0x08000000U +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_256MB 0x10000000U +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_512MB 0x20000000U + + + +/* + * NV2080_CTRL_GPU_GET_PARTITION_CAPACITY + * + * This command returns the count of partitions of given size (represented by + * NV2080_CTRL_GPU_PARTITION_FLAG_*) which can be requested via + * NV2080_CTRL_GPU_SET_PARTITIONS ctrl call. + * Note that this API does not "reserve" any partitions, and there is no + * guarantee that the reported count of available partitions of a given size + * will remain consistent following creation of partitions of different size + * through NV2080_CTRL_GPU_SET_PARTITIONS. + * Note that this API is unsupported if SMC is feature-disabled. + * + * partitionFlag[IN] + * - Partition flag indicating size of requested partitions + * + * partitionCount[OUT] + * - Available number of partitions of the given size which can currently be created. + * + * availableSpans[OUT] + * - For each partition able to be created of the specified size, the span + * it could occupy. + * + * availableSpansCount[OUT] + * - Number of valid entries in availableSpans. + * + * totalPartitionCount[OUT] + * - Total number of partitions of the given size which can be created. + * + * totalSpans[OUT] + * - List of spans which can possibly be occupied by partitions of the + * given type. + * + * totalSpansCount[OUT] + * - Number of valid entries in totalSpans. + * + * bStaticInfo[IN] + * - Flag indicating that client requests only the information from + * totalPartitionCount and totalSpans. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_PARTITION_CAPACITY (0x20800181U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS { + NvU32 partitionFlag; + NvU32 partitionCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PARTITION_SPAN availableSpans[NV2080_CTRL_GPU_MAX_PARTITIONS], 8); + NvU32 availableSpansCount; + NvU32 totalPartitionCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PARTITION_SPAN totalSpans[NV2080_CTRL_GPU_MAX_PARTITIONS], 8); + NvU32 totalSpansCount; + NvBool bStaticInfo; +} NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_CACHED_INFO + * + * This command returns cached(SW only) gpu information for the associated GPU. + * Requests to retrieve gpu information use a list of one or more NV2080_CTRL_GPU_INFO + * structures. + * The gpuInfoList is aligned with NV2080_CTRL_GPU_GET_INFO_V2_PARAMS for security concern + * + * gpuInfoListSize + * This field specifies the number of entries on the caller's + * gpuInfoList. + * gpuInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the gpu information is to be returned. + * This buffer must be at least as big as gpuInfoListSize multiplied + * by the size of the NV2080_CTRL_GPU_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_GPU_GET_CACHED_INFO (0x20800182U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x82" */ + +typedef struct NV2080_CTRL_GPU_GET_CACHED_INFO_PARAMS { + NvU32 gpuInfoListSize; + NV2080_CTRL_GPU_INFO gpuInfoList[NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_CACHED_INFO_PARAMS; + +/* + * NV2080_CTRL_GPU_SET_PARTITIONING_MODE + * + * This command configures this GPU to control global mode for partitioning. + * This command may not be sent to a GPU with any active partitions. + * This command may be used to set the following modes: + * + * NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING + * NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_LEGACY + * This is the default mode. While this GPU is in this mode, no partitions + * will be allowed to be created via SET_PARTITIONS - a client must set one + * of the below modes prior to partitioning the GPU. When a client sets a + * GPU into this mode, any performance changes resulting from partitions + * made while in either of the below modes will be cleared. A + * physical-function-level reset is required after setting this mode. + * + * NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_MAX_PERF + * In this mode, when the GPU is partitioned, each partition will have the + * maximum possible performance which can be evenly distributed among all + * partitions. The total performance of the GPU, taking into account all + * partitions created in this mode, may be less than that of a GPU running + * in legacy non-SMC mode. Partitions created while in this mode require a + * physical-function-level reset before the partitioning may take full + * effect. Destroying all partitions while in this mode may be + * insufficient to restore full performance to the GPU - only by setting + * the mode to _LEGACY can this be achieved. A physical-function-level + * reset is NOT required after setting this mode. + * + * NV2080_CTRL_GPU_SET_PARTIITONING_MODE_REPARTITIONING_FAST_RECONFIG + * By setting this mode, the performance of the GPU will be restricted such + * that all partitions will have a consistent fraction of the total + * available performance, which may be less than the maximum possible + * performance available to each partition. Creating or destroying + * partitions on this GPU while in this mode will not require a + * physical-function-level reset, and will not affect other active + * partitions. Destroying all partitions while in this mode may be + * insufficient to restore full performance to the GPU - only by setting + * the mode to _LEGACY can this be achieved. A physical-function-level + * reset is required after setting this mode. + * + * Parameters: + * partitioningMode[IN] + * - Partitioning Mode to set for this GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_SET_PARTITIONING_MODE (0x20800183U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING 1:0 +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_LEGACY 0U +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_MAX_PERF 1U +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_FAST_RECONFIG 2U + +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS_MESSAGE_ID (0x83U) + +typedef struct NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS { + NvU32 partitioningMode; +} NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS; + + + +/* NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_INFO + * + * This structure describes resources available in a partition requested of a + * given type. + * + * [OUT] partitionFlag + * - Flags to specify in NV2080_CTRL_CMD_GPU_SET_PARTITIONS to request this + * partition + * + * [OUT] grCount + * - Total Number of SMC engines/GR engines (including GFX capable ones in this parition) + * + * [OUT] gfxGrCount + * - Number of SMC engines/GR engines capable of GFX. This is a subset of the engines included in grCount + * + * [OUT] gpcCount + * - Number of GPCs in this partition, including the GFX Capable ones. + * + * [OUT] gfxGpcCount + * - Number of GFX Capable GPCs in this partition. This is a subset of the GPCs included in gpcCount. + * + * [OUT] veidCount + * - Number of VEIDS in this partition + * + * [OUT] smCount + * - Number of SMs in this partition + * + * [OUT] ceCount + * - Copy Engines in this partition + * + * [OUT] nvEncCount + * - Encoder Engines in this partition + * + * [OUT] nvDecCount + * - Decoder Engines in this partition + * + * [OUT] nvJpgCount + * - Jpg Engines in this partition + * + * [OUT] nvOfaCount + * - Ofa engines in this partition + * [OUT] memorySize + * - Total available memory within this partition + */ +typedef struct NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_INFO { + NvU32 partitionFlag; + NvU32 grCount; + NvU32 gfxGrCount; + NvU32 gpcCount; + NvU32 grGpcCount; + NvU32 veidCount; + NvU32 smCount; + NvU32 ceCount; + NvU32 nvEncCount; + NvU32 nvDecCount; + NvU32 nvJpgCount; + NvU32 nvOfaCount; + NV_DECLARE_ALIGNED(NvU64 memorySize, 8); +} NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_INFO; + +/* + * NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS + * + * This command returns information regarding GPU partitions which can be + * requested via NV2080_CTRL_CMD_GPU_SET_PARTITIONS. + * + * [OUT] descCount + * - Number of valid partition types + * + * [OUT] partitionDescs + * - Information describing available partitions + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS { + NvU32 descCount; + // C form: NV2080_CTRL_GPU_DESCRIBE_PARTITION_INFO partitionDescs[NV2080_CTRL_GPU_PARTITION_MAX_TYPES]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_INFO partitionDescs[NV2080_CTRL_GPU_PARTITION_MAX_TYPES], 8); +} NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_DESCRIBE_PARTITIONS (0x20800185U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS_MESSAGE_ID" */ + + + +/* + * NV2080_CTRL_CMD_GPU_GET_MAX_SUPPORTED_PAGE_SIZE + * + * This command returns information regarding maximum page size supported + * by GMMU on the platform on which RM is running. + * + * [OUT] maxSupportedPageSize + * - Maximum local vidmem page size supported by GMMU of a given GPU (HW) + * on a given platform (OS) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_MAX_SUPPORTED_PAGE_SIZE (0x20800188U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS { + NvU32 maxSupportedPageSize; +} NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS; + + + +/* + * NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC + * + * This command returns the max number of MMUs per GPC + * + * gpcId [IN] + * Logical GPC id + * count [OUT] + * The number of MMUs per GPC + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. When SMC is enabled, this + * is a mandatory parameter. + */ +#define NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC_PARAMS_MESSAGE_ID (0x8AU) + +typedef struct NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC_PARAMS { + NvU32 gpcId; + NvU32 count; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_NUM_MMUS_PER_GPC (0x2080018aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_GET_ACTIVE_PARTITION_IDS + * + * This command returns the GPU partition IDs for all active partitions + * If GPU is not partitioned, the control call will return partition count as "0" + * + * swizzId[OUT] + * - HW Partition ID associated with the active partitions + * + * partitionCount[OUT] + * - Number of active partitions in system + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS_MESSAGE_ID (0x8BU) + +typedef struct NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS { + NvU32 swizzId[NV2080_CTRL_GPU_MAX_PARTITION_IDS]; + NvU32 partitionCount; +} NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_ACTIVE_PARTITION_IDS (0x2080018bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS_MESSAGE_ID" */ + + + +/* + * NV2080_CTRL_CMD_GPU_GET_PIDS + * + * Given a resource identifier and its type, this command returns a set of + * process identifiers (PIDs) of processes that have instantiated this resource. + * For example, given a class number, this command returns a list of all + * processes with clients that have matching object allocations. + * This is a SMC aware call and the scope of the information gets restricted + * based on partition subscription. + * The call enforces partition subscription if SMC is enabled, and client is not + * a monitoring client. + * Monitoring clients get global information without any scope based filtering. + * Monitoring clients are also not expected to subscribe to a partition when + * SMC is enabled. + * + * idType + * Type of the resource identifier. See below for a list of valid types. + * id + * Resource identifier. + * pidTblCount + * Number of entries in the PID table. + * pidTbl + * Table which will contain the PIDs. Each table entry is of type NvU32. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GPU_GET_PIDS (0x2080018dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PIDS_PARAMS_MESSAGE_ID" */ + +/* max size of pidTable */ +#define NV2080_CTRL_GPU_GET_PIDS_MAX_COUNT 950U + +#define NV2080_CTRL_GPU_GET_PIDS_PARAMS_MESSAGE_ID (0x8DU) + +typedef struct NV2080_CTRL_GPU_GET_PIDS_PARAMS { + NvU32 idType; + NvU32 id; + NvU32 pidTblCount; + NvU32 pidTbl[NV2080_CTRL_GPU_GET_PIDS_MAX_COUNT]; +} NV2080_CTRL_GPU_GET_PIDS_PARAMS; + +/* + * Use class NV20_SUBDEVICE_0 with NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_CLASS to query + * PIDs with or without GPU contexts. For any other class id, PIDs only with GPU + * contexts are returned. + */ +#define NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_CLASS (0x00000000U) +#define NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_VGPU_GUEST (0x00000001U) + +/* + * NV2080_CTRL_SMC_SUBSCRIPTION_INFO + * + * This structure contains information about the SMC subscription type. + * If MIG is enabled a valid ID is returned, it is set to PARTITIONID_INVALID otherwise. + * + * computeInstanceId + * This parameter returns a valid compute instance ID + * gpuInstanceId + * This parameter returns a valid GPU instance ID + */ +typedef struct NV2080_CTRL_SMC_SUBSCRIPTION_INFO { + NvU32 computeInstanceId; + NvU32 gpuInstanceId; +} NV2080_CTRL_SMC_SUBSCRIPTION_INFO; + +/* + * NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA + * + * This structure contains the video memory usage information. + * + * memPrivate + * This parameter returns the amount of memory exclusively owned + * (i.e. private) to the client + * memSharedOwned + * This parameter returns the amount of shared memory owned by the client + * memSharedDuped + * This parameter returns the amount of shared memory duped by the client + * protectedMemPrivate + * This parameter returns the amount of protected memory exclusively owned + * (i.e. private) to the client whenever memory protection is enabled + * protectedMemSharedOwned + * This parameter returns the amount of shared protected memory owned by the + * client whenever memory protection is enabled + * protectedMemSharedDuped + * This parameter returns the amount of shared protected memory duped by the + * client whenever memory protection is enabled + */ +typedef struct NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA { + NV_DECLARE_ALIGNED(NvU64 memPrivate, 8); + NV_DECLARE_ALIGNED(NvU64 memSharedOwned, 8); + NV_DECLARE_ALIGNED(NvU64 memSharedDuped, 8); + NV_DECLARE_ALIGNED(NvU64 protectedMemPrivate, 8); + NV_DECLARE_ALIGNED(NvU64 protectedMemSharedOwned, 8); + NV_DECLARE_ALIGNED(NvU64 protectedMemSharedDuped, 8); +} NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA; + +#define NV2080_CTRL_GPU_PID_INFO_INDEX_VIDEO_MEMORY_USAGE (0x00000000U) + +#define NV2080_CTRL_GPU_PID_INFO_INDEX_MAX NV2080_CTRL_GPU_PID_INFO_INDEX_VIDEO_MEMORY_USAGE + +typedef union NV2080_CTRL_GPU_PID_INFO_DATA { + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA vidMemUsage, 8); +} NV2080_CTRL_GPU_PID_INFO_DATA; + + +/* + * NV2080_CTRL_GPU_PID_INFO + * + * This structure contains the per pid information. Each type of information + * retrievable via NV2080_CTRL_CMD_GET_PID_INFO is assigned a unique index + * below. In addition the process for which the lookup is for is also defined. + * This is a SMC aware call and the scope of the information gets restricted + * based on partition subscription. + * The call enforces partition subscription if SMC is enabled, and client is not + * a monitoring client. + * Monitoring clients get global information without any scope based filtering. + * Monitoring clients are also not expected to subscribe to a partition when + * SMC is enabled. + * + * pid + * This parameter specifies the PID of the process for which information is + * to be queried. + * index + * This parameter specifies the type of information being queried for the + * process of interest. + * result + * This parameter returns the result of the instruction's execution. + * data + * This parameter returns the data corresponding to the information which is + * being queried. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + * + * Valid PID information indices are: + * + * NV2080_CTRL_GPU_PID_INFO_INDEX_VIDEO_MEMORY_USAGE + * This index is used to request the amount of video memory on this GPU + * allocated to the process. + */ +typedef struct NV2080_CTRL_GPU_PID_INFO { + NvU32 pid; + NvU32 index; + NvU32 result; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PID_INFO_DATA data, 8); + NV2080_CTRL_SMC_SUBSCRIPTION_INFO smcSubscription; +} NV2080_CTRL_GPU_PID_INFO; + +/* + * NV2080_CTRL_CMD_GPU_GET_PID_INFO + * + * This command allows querying per-process information from the RM. Clients + * request information by specifying a unique informational index and the + * Process ID of the process in question. The result is set to indicate success + * and the information queried (if available) is returned in the data parameter. + * + * pidInfoListCount + * The number of valid entries in the pidInfoList array. + * pidInfoList + * An array of NV2080_CTRL_GPU_PID_INFO of maximum length + * NV2080_CTRL_GPU_GET_PID_INFO_MAX_COUNT. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GPU_GET_PID_INFO (0x2080018eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PID_INFO_PARAMS_MESSAGE_ID" */ + +/* max size of pidInfoList */ +#define NV2080_CTRL_GPU_GET_PID_INFO_MAX_COUNT 200U + +#define NV2080_CTRL_GPU_GET_PID_INFO_PARAMS_MESSAGE_ID (0x8EU) + +typedef struct NV2080_CTRL_GPU_GET_PID_INFO_PARAMS { + NvU32 pidInfoListCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PID_INFO pidInfoList[NV2080_CTRL_GPU_GET_PID_INFO_MAX_COUNT], 8); +} NV2080_CTRL_GPU_GET_PID_INFO_PARAMS; + + +/*! + * Compute policy types to be specified by callers to set a config. + * + * _TIMESLICE + * Set the timeslice config for the requested GPU. + * Check @ref NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_DATA_TIMESLICE for + * permissible timeslice values. + */ +#define NV2080_CTRL_GPU_COMPUTE_POLICY_TIMESLICE 0U +#define NV2080_CTRL_GPU_COMPUTE_POLICY_MAX 1U + +/*! + * Enum consisting of permissible timeslice options that can configured + * for a GPU. These can be queried by compute clients and the exact + * timeslice values can be chosen appropriately as per GPU support + */ +typedef enum NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_DATA_TIMESLICE { + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_DEFAULT = 0, + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_SHORT = 1, + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_MEDIUM = 2, + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_LONG = 3, + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_MAX = 4, +} NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_DATA_TIMESLICE; + +typedef struct NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG { + /*! + * NV2080_CTRL_GPU_COMPUTE_POLICY_ + */ + NvU32 type; + + /*! + * Union of type-specific data + */ + union { + NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_DATA_TIMESLICE timeslice; + } data; +} NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG; + +#define NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS_MESSAGE_ID (0x94U) + +typedef struct NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS { + NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG config; +} NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_COMPUTE_POLICY_CONFIG + * + * This command retrieves all compute policies configs for the associated gpu. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV2080_CTRL_CMD_GPU_GET_COMPUTE_POLICY_CONFIG (0x20800195U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS_MESSAGE_ID" */ + +/*! + * This define limits the max number of policy configs that can be handled by + * NV2080_CTRL_CMD_GPU_GET_COMPUTE_POLICY_CONFIG command. + * + * @note Needs to be in sync (greater or equal) to NV2080_CTRL_GPU_COMPUTE_POLICY_MAX. + */ + +#define NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_LIST_MAX 32U + +#define NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS_MESSAGE_ID (0x95U) + +typedef struct NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS { + NvU32 numConfigs; + + /*! + * C form: + * NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG configList[NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_LIST_MAX]; + */ + NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG configList[NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_LIST_MAX]; +} NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS; + + +/*! + * NV2080_CTRL_CMD_GPU_VALIDATE_MEM_MAP_REQUEST + * + * @brief Validate the address range for memory map request by comparing the + * user supplied address range with GPU BAR0/BAR1 range. + * + * @param[in] addressStart Start address for memory map request + * @param[in] addressLength Length for for memory map request + * @param[out] protection NV_PROTECT_READ_WRITE, if both read/write is allowed + * NV_PROTECT_READABLE, if only read is allowed + * + * Possible status values returned are: + * NV_OK + * NV_ERR_PROTECTION_FAULT + * + */ +#define NV2080_CTRL_CMD_GPU_VALIDATE_MEM_MAP_REQUEST (0x20800198U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS_MESSAGE_ID (0x98U) + +typedef struct NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS { + NV_DECLARE_ALIGNED(NvU64 addressStart, 8); + NV_DECLARE_ALIGNED(NvU64 addressLength, 8); + NvU32 protection; +} NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_LOAD_TIMES + * + * This command is used to retrieve the load time (latency) of each engine. + * + * engineCount + * This field specifies the number of entries of the following + * three arrays. + * + * engineList[NV2080_GPU_MAX_ENGINE_OBJECTS] + * An array of NvU32 which stores each engine's descriptor. + * + * engineStateLoadTime[NV2080_GPU_MAX_ENGINE_OBJECTS] + * A array of NvU64 which stores each engine's load time. + * + * engineIsInit[NV2080_GPU_MAX_ENGINE_OBJECTS] + * A array of NvBool which stores each engine's initialization status. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_LOAD_TIMES (0x2080019bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS 0x90U + +#define NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS_MESSAGE_ID (0x9BU) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS { + NvU32 engineCount; + NvU32 engineList[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS]; + NV_DECLARE_ALIGNED(NvU64 engineStateLoadTime[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS], 8); + NvBool engineIsInit[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS]; +} NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ID_NAME_MAPPING + * + * This command is used to retrieve the mapping of engine ID and engine Name. + * + * engineCount + * This field specifies the size of the mapping. + * + * engineID + * An array of NvU32 which stores each engine's descriptor. + * + * engineName + * An array of char[100] which stores each engine's name. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_GPU_GET_ID_NAME_MAPPING (0x2080019cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ID_NAME_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ID_NAME_MAPPING_PARAMS_MESSAGE_ID (0x9CU) + +typedef struct NV2080_CTRL_GPU_GET_ID_NAME_MAPPING_PARAMS { + NvU32 engineCount; + NvU32 engineID[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS]; + char engineName[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS][100]; +} NV2080_CTRL_GPU_GET_ID_NAME_MAPPING_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_EXEC_REG_OPS_NOPTRS + * + * Same as above NV2080_CTRL_CMD_GPU_EXEC_REG_OPS except that this CTRL CMD will + * not allow any embedded pointers. The regOps array is inlined as part of the + * struct. + * NOTE: This intended for gsp plugin only as it may override regOp access + * restrictions + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_EXEC_REG_OPS_NOPTRS (0x2080019dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_EXEC_REG_OPS_NOPTRS_PARAMS_MESSAGE_ID" */ + +/* setting this to 100 keeps it right below 4k in size */ +#define NV2080_CTRL_REG_OPS_ARRAY_MAX 100U +#define NV2080_CTRL_GPU_EXEC_REG_OPS_NOPTRS_PARAMS_MESSAGE_ID (0x9DU) + +typedef struct NV2080_CTRL_GPU_EXEC_REG_OPS_NOPTRS_PARAMS { + NvHandle hClientTarget; + NvHandle hChannelTarget; + NvU32 bNonTransactional; + NvU32 reserved00[2]; + NvU32 regOpCount; + NV2080_CTRL_GPU_REG_OP regOps[NV2080_CTRL_REG_OPS_ARRAY_MAX]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GPU_EXEC_REG_OPS_NOPTRS_PARAMS; + + + + +/*! + * NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO + * + * [in/out] gpuId + * GPU ID for which the capabilities are queried. + * For the NV2080_CTRL_CMD_GET_P2P_CAPS control: + * If bAllCaps == NV_TRUE, this parameter is an out parameter and equals to + * the GPU ID of an attached GPU. + * If bAllCaps == NV_FALSE, this parameter is an in parameter and the requester + * should set it to the ID of the GPU that needs to be queried from. + * [out] p2pCaps + * Peer to peer capabilities discovered between the GPUs. + * See NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 for the list of valid values. + * [out] p2pOptimalReadCEs + * Mask of CEs to use for p2p reads over Nvlink. + * [out] p2pOptimalWriteCEs + * Mask of CEs to use for p2p writes over Nvlink. + * [out] p2pCapsStatus + * Status of all supported p2p capabilities. + * See NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 for the list of valid values. + * [out] busPeerId + * Bus peer ID. For an invalid or a non-existent peer this field + * has the value NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INVALID_PEER. + */ +typedef struct NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO { + NvU32 gpuId; + NvU32 p2pCaps; + NvU32 p2pOptimalReadCEs; + NvU32 p2pOptimalWriteCEs; + NvU8 p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE]; + NvU32 busPeerId; +} NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO; + +/*! + * NV2080_CTRL_CMD_GET_P2P_CAPS + * + * Returns peer to peer capabilities present between GPUs. + * The caller must either specify bAllCaps to query the capabilities for + * all the attached GPUs or they must pass a valid list of GPU IDs. + * + * [in] bAllCaps + * Set to NV_TRUE to query the capabilities for all the attached GPUs. + * Set to NV_FALSE and specify peerGpuCount and peerGpuCaps[].gpuId + * to retrieve the capabilities only for the specified GPUs. + * [in/out] peerGpuCount + * The number of the peerGpuCaps entries. + * If bAllCaps == NV_TRUE, this parameter is an out parameter and equals to + * the total number of the attached GPUs. + * If bAllCaps == NV_FALSE, this parameter is an in parameter and the requester + * should set it to the number of the peerGpuCaps entries. + * [in/out] peerGpuCaps + * The array of NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO entries, describing + * the peer to peer capabilities of the GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT - Invalid peerGpuCount + * NV_ERR_OBJECT_NOT_FOUND - Invalid peerGpuCaps[].gpuId + */ +#define NV2080_CTRL_CMD_GET_P2P_CAPS (0x208001a0U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GET_P2P_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GET_P2P_CAPS_PARAMS_MESSAGE_ID (0xA0U) + +typedef struct NV2080_CTRL_GET_P2P_CAPS_PARAMS { + NvBool bAllCaps; + NvU32 peerGpuCount; + NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO peerGpuCaps[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; +} NV2080_CTRL_GET_P2P_CAPS_PARAMS; + +/* _ctrl2080gpu_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h new file mode 100644 index 0000000..742b21a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gpumon.finn +// + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/*! + * This structure represents base class of GPU monitoring sample. + */ +typedef struct NV2080_CTRL_GPUMON_SAMPLE { + /*! + * Timestamps in nano-seconds. + */ + NV_DECLARE_ALIGNED(NvU64 timeStamp, 8); +} NV2080_CTRL_GPUMON_SAMPLE; + +/*! + * This structure represents base GPU monitoring sample. + */ +typedef struct NV2080_CTRL_GPUMON_SAMPLES { + /*! + * Type of the sample, see NV2080_CTRL_GPUMON_SAMPLE_TYPE_* for reference. + */ + NvU8 type; + /*! + * Size of the buffer, this should be + * bufSize == NV2080_CTRL_*_GPUMON_SAMPLE_COUNT_* + * sizeof(derived type of NV2080_CTRL_GPUMON_SAMPLE). + */ + NvU32 bufSize; + /*! + * Number of samples in ring buffer. + */ + NvU32 count; + /*! + * tracks the offset of the tail in the circular queue array pSamples. + */ + NvU32 tracker; + /*! + * Pointer to a circular queue based on array of NV2080_CTRL_GPUMON_SAMPLE + * or its derived types structs with size == bufSize. + * + * @note This circular queue wraps around after 10 seconds of sampling, + * and it is clients' responsibility to query within this time frame in + * order to avoid losing samples. + * @note With one exception, this queue contains last 10 seconds of samples + * with tracker poiniting to oldest entry and entry before tracker as the + * newest entry. Exception is when queue is not full (i.e. tracker is + * pointing to a zeroed out entry), in that case valid entries are between 0 + * and tracker. + * @note Clients can store tracker from previous query in order to provide + * samples since last read. + */ + NV_DECLARE_ALIGNED(NvP64 pSamples, 8); +} NV2080_CTRL_GPUMON_SAMPLES; + +/*! + * Enumeration of GPU monitoring sample types. + */ +#define NV2080_CTRL_GPUMON_SAMPLE_TYPE_PWR_MONITOR_STATUS 0x00000001 +#define NV2080_CTRL_GPUMON_SAMPLE_TYPE_PERFMON_UTIL 0x00000002 + +/*! + * Macro for invalid PID. + */ +#define NV2080_GPUMON_PID_INVALID ((NvU32)(~0)) diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h new file mode 100644 index 0000000..7cf4380 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h @@ -0,0 +1,1789 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gr.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +#include "ctrl/ctrl0080/ctrl0080gr.h" /* 2080 is partially derivative of 0080 */ +/* + * NV2080_CTRL_GR_ROUTE_INFO + * + * This structure specifies the routing information used to + * disambiguate the target GR engine. + * + * flags + * This field decides how the route field is interpreted + * + * route + * This field has the data to identify target GR engine + * + */ +#define NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE 1:0 +#define NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_NONE 0x0U +#define NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_ENGID 0x1U +#define NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_CHANNEL 0x2U + +#define NV2080_CTRL_GR_ROUTE_INFO_DATA_CHANNEL_HANDLE 31:0 +#define NV2080_CTRL_GR_ROUTE_INFO_DATA_ENGID 31:0 + +typedef NV0080_CTRL_GR_ROUTE_INFO NV2080_CTRL_GR_ROUTE_INFO; + +/* NV20_SUBDEVICE_XX gr control commands and parameters */ + +/* + * NV2080_CTRL_GR_INFO + * + * This structure represents a single 32bit gr engine value. Clients + * request a particular gr engine value by specifying a unique gr + * information index. + * + * Legal gr information index values are: + * NV2080_CTRL_GR_INFO_INDEX_BUFFER_ALIGNMENT + * This index is used to request the surface buffer alignment (in bytes) + * required by the associated subdevice. The return value is GPU + * implementation-dependent. + * NV2080_CTRL_GR_INFO_INDEX_SWIZZLE_ALIGNMENT + * This index is used to request the required swizzled surface alignment + * (in bytes) supported by the associated subdevice. The return value + * is GPU implementation-dependent. A return value of 0 indicates the GPU + * does not support swizzled surfaces. + * NV2080_CTRL_GR_INFO_INDEX_VERTEX_CACHE_SIZE + * This index is used to request the vertex cache size (in entries) + * supported by the associated subdevice. The return value is GPU + * implementation-dependent. A value of 0 indicates the GPU does + * have a vertex cache. + * NV2080_CTRL_GR_INFO_INDEX_VPE_COUNT + * This index is used to request the number of VPE units supported by the + * associated subdevice. The return value is GPU implementation-dependent. + * A return value of 0 indicates the GPU does not contain VPE units. + * NV2080_CTRL_GR_INFO_INDEX_SHADER_PIPE_COUNT + * This index is used to request the number of shader pipes supported by + * the associated subdevice. The return value is GPU + * implementation-dependent. A return value of 0 indicates the GPU does + * not contain dedicated shader units. + * For tesla: this value is the number of enabled TPCs + * NV2080_CTRL_GR_INFO_INDEX_SHADER_PIPE_SUB_COUNT + * This index is used to request the number of sub units per + * shader pipes supported by the associated subdevice. The return + * value is GPU implementation-dependent. A return value of 0 indicates + * the GPU does not contain dedicated shader units. + * For tesla: this value is the number of enabled SMs (per TPC) + * NV2080_CTRL_GR_INFO_INDEX_THREAD_STACK_SCALING_FACTOR + * This index is used to request the scaling factor for thread stack + * memory. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_SM_REG_BANK_COUNT + * This index is used to request the number of SM register banks supported. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_SM_REG_BANK_REG_COUNT + * This index is used to request the number of registers per SM register + * bank. A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_SM_VERSION + * This index is used to determine the SM version. + * A value of 0 indicates the GPU does not support this function. + * Otherwise one of NV2080_CTRL_GR_INFO_SM_VERSION_*. + * NV2080_CTRL_GR_INFO_INDEX_MAX_WARPS_PER_SM + * This index is used to determine the maximum number of warps + * (thread groups) per SM. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_MAX_THREADS_PER_WARP + * This index is used to determine the maximum number of threads + * in each warp (thread group). + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_FB_MEMORY_REQUEST_GRANULARITY + * This index is used to request the default fb memory read/write request + * size in bytes (typically based on the memory configuration/controller). + * Smaller memory requests are likely to take as long as a full one. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_HOST_MEMORY_REQUEST_GRANULARITY + * This index is used to request the default host memory read/write request + * size in bytes (typically based on the memory configuration/controller). + * Smaller memory requests are likely to take as long as a full one. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_MAX_SP_PER_SM + * This index is used to request the maximum number of streaming processors + * per SM. + * NV2080_CTRL_GR_INFO_INDEX_LITTER_* + * This index is used to query the various LITTER size information from + * the chip. + * NV2080_CTRL_GR_INFO_INDEX_TIMESLICE_ENABLED + * This index is used to query whether the chip has timeslice mode enabled. + * NV2080_CTRL_GR_INFO_INDEX_GPU_CORE_COUNT + * This index is used to return the number of "GPU Cores" + * supported by the graphics pipeline + * NV2080_CTRL_GR_INFO_INDEX_RT_CORE_COUNT + * This index is used to return the number of "Ray Tracing Cores" + * supported by the graphics pipeline + * NV2080_CTRL_GR_INFO_INDEX_TENSOR_CORE_COUNT + * This index is used to return the number of "Tensor Cores" + * supported by the graphics pipeline + */ +typedef NV0080_CTRL_GR_INFO NV2080_CTRL_GR_INFO; + +/* + * Valid GR info index values + * These indices are offset from supporting the 0080 version of this call + */ +#define NV2080_CTRL_GR_INFO_INDEX_MAXCLIPS NV0080_CTRL_GR_INFO_INDEX_MAXCLIPS +#define NV2080_CTRL_GR_INFO_INDEX_MIN_ATTRS_BUG_261894 NV0080_CTRL_GR_INFO_INDEX_MIN_ATTRS_BUG_261894 +#define NV2080_CTRL_GR_INFO_XBUF_MAX_PSETS_PER_BANK NV0080_CTRL_GR_INFO_XBUF_MAX_PSETS_PER_BANK +/** + * This index is used to request the surface buffer alignment (in bytes) + * required by the associated subdevice. The return value is GPU + * implementation-dependent. + */ +#define NV2080_CTRL_GR_INFO_INDEX_BUFFER_ALIGNMENT NV0080_CTRL_GR_INFO_INDEX_BUFFER_ALIGNMENT +#define NV2080_CTRL_GR_INFO_INDEX_SWIZZLE_ALIGNMENT NV0080_CTRL_GR_INFO_INDEX_SWIZZLE_ALIGNMENT +#define NV2080_CTRL_GR_INFO_INDEX_VERTEX_CACHE_SIZE NV0080_CTRL_GR_INFO_INDEX_VERTEX_CACHE_SIZE +/** + * This index is used to request the number of VPE units supported by the + * associated subdevice. The return value is GPU implementation-dependent. + * A return value of 0 indicates the GPU does not contain VPE units. + */ +#define NV2080_CTRL_GR_INFO_INDEX_VPE_COUNT NV0080_CTRL_GR_INFO_INDEX_VPE_COUNT +/** + * This index is used to request the number of shader pipes supported by + * the associated subdevice. The return value is GPU + * implementation-dependent. A return value of 0 indicates the GPU does + * not contain dedicated shader units. + * For tesla: this value is the number of enabled TPCs + */ +#define NV2080_CTRL_GR_INFO_INDEX_SHADER_PIPE_COUNT NV0080_CTRL_GR_INFO_INDEX_SHADER_PIPE_COUNT +/** + * This index is used to request the scaling factor for thread stack + * memory. + * A value of 0 indicates the GPU does not support this function. + */ +#define NV2080_CTRL_GR_INFO_INDEX_THREAD_STACK_SCALING_FACTOR NV0080_CTRL_GR_INFO_INDEX_THREAD_STACK_SCALING_FACTOR +/** + * This index is used to request the number of sub units per + * shader pipes supported by the associated subdevice. The return + * value is GPU implementation-dependent. A return value of 0 indicates + * the GPU does not contain dedicated shader units. + * For tesla: this value is the number of enabled SMs (per TPC) + */ +#define NV2080_CTRL_GR_INFO_INDEX_SHADER_PIPE_SUB_COUNT NV0080_CTRL_GR_INFO_INDEX_SHADER_PIPE_SUB_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_SM_REG_BANK_COUNT NV0080_CTRL_GR_INFO_INDEX_SM_REG_BANK_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_SM_REG_BANK_REG_COUNT NV0080_CTRL_GR_INFO_INDEX_SM_REG_BANK_REG_COUNT +/** + * This index is used to determine the SM version. + * A value of 0 indicates the GPU does not support this function. + * Otherwise one of NV2080_CTRL_GR_INFO_SM_VERSION_*. + */ +#define NV2080_CTRL_GR_INFO_INDEX_SM_VERSION NV0080_CTRL_GR_INFO_INDEX_SM_VERSION +/** + * This index is used to determine the maximum number of warps + * (thread groups) per SM. + * A value of 0 indicates the GPU does not support this function. + */ +#define NV2080_CTRL_GR_INFO_INDEX_MAX_WARPS_PER_SM NV0080_CTRL_GR_INFO_INDEX_MAX_WARPS_PER_SM +/** + * This index is used to determine the maximum number of threads + * in each warp (thread group). + * A value of 0 indicates the GPU does not support this function. + */ +#define NV2080_CTRL_GR_INFO_INDEX_MAX_THREADS_PER_WARP NV0080_CTRL_GR_INFO_INDEX_MAX_THREADS_PER_WARP +#define NV2080_CTRL_GR_INFO_INDEX_GEOM_GS_OBUF_ENTRIES NV0080_CTRL_GR_INFO_INDEX_GEOM_GS_OBUF_ENTRIES +#define NV2080_CTRL_GR_INFO_INDEX_GEOM_XBUF_ENTRIES NV0080_CTRL_GR_INFO_INDEX_GEOM_XBUF_ENTRIES +#define NV2080_CTRL_GR_INFO_INDEX_FB_MEMORY_REQUEST_GRANULARITY NV0080_CTRL_GR_INFO_INDEX_FB_MEMORY_REQUEST_GRANULARITY +#define NV2080_CTRL_GR_INFO_INDEX_HOST_MEMORY_REQUEST_GRANULARITY NV0080_CTRL_GR_INFO_INDEX_HOST_MEMORY_REQUEST_GRANULARITY +#define NV2080_CTRL_GR_INFO_INDEX_MAX_SP_PER_SM NV0080_CTRL_GR_INFO_INDEX_MAX_SP_PER_SM +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_ZCULL_BANKS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_ZCULL_BANKS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPC_PER_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPC_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_MIN_FBPS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MIN_FBPS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_FBP_PORTS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_FBP_PORTS +#define NV2080_CTRL_GR_INFO_INDEX_TIMESLICE_ENABLED NV0080_CTRL_GR_INFO_INDEX_TIMESLICE_ENABLED +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPAS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPAS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_PES_PER_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_PES_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_GPU_CORE_COUNT NV0080_CTRL_GR_INFO_INDEX_GPU_CORE_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPCS_PER_PES NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPCS_PER_PES +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_HUB_PORTS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_HUB_PORTS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_SM_PER_TPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_SM_PER_TPC +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_HSHUB_FBP_PORTS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_HSHUB_FBP_PORTS +/** + * This index is used to return the number of "Ray Tracing Cores" + * supported by the graphics pipeline + */ +#define NV2080_CTRL_GR_INFO_INDEX_RT_CORE_COUNT NV0080_CTRL_GR_INFO_INDEX_RT_CORE_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_TENSOR_CORE_COUNT NV0080_CTRL_GR_INFO_INDEX_TENSOR_CORE_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GRS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GRS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTCS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTCS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_SLICES NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_SLICES +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCMMU_PER_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCMMU_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_PER_FBP NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_PER_FBP +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_ROP_PER_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_ROP_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_FAMILY_MAX_TPC_PER_GPC NV0080_CTRL_GR_INFO_INDEX_FAMILY_MAX_TPC_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPA_PER_FBP NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPA_PER_FBP +#define NV2080_CTRL_GR_INFO_INDEX_MAX_SUBCONTEXT_COUNT NV0080_CTRL_GR_INFO_INDEX_MAX_SUBCONTEXT_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_MAX_LEGACY_SUBCONTEXT_COUNT NV0080_CTRL_GR_INFO_INDEX_MAX_LEGACY_SUBCONTEXT_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_MAX_PER_ENGINE_SUBCONTEXT_COUNT NV0080_CTRL_GR_INFO_INDEX_MAX_PER_ENGINE_SUBCONTEXT_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_SINGLETON_GPCS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_SINGLETON_GPCS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_GPCS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_GPCS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_TPCS_PER_GFXC_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_TPCS_PER_GFXC_GPC + +/* When adding a new INDEX, please update INDEX_MAX and MAX_SIZE accordingly + * NOTE: 0080 functionality is merged with 2080 functionality, so this max size + * reflects that. + */ +#define NV2080_CTRL_GR_INFO_INDEX_MAX NV0080_CTRL_GR_INFO_INDEX_MAX +#define NV2080_CTRL_GR_INFO_MAX_SIZE NV0080_CTRL_GR_INFO_MAX_SIZE + +/* valid SM version return values */ + +#define NV2080_CTRL_GR_INFO_SM_VERSION_NONE (0x00000000U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_05 (0x00000105U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_1 (0x00000110U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_2 (0x00000120U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_3 (0x00000130U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_4 (0x00000140U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_5 (0x00000150U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_2_0 (0x00000200U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_2_1 (0x00000210U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_2_2 (0x00000220U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_0 (0x00000300U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_1 (0x00000310U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_2 (0x00000320U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_3 (0x00000330U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_5 (0x00000350U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_6 (0x00000360U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_8 (0x00000380U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_9 (0x00000390U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_4_0 (0x00000400U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_0 (0x00000500U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_02 (0x00000502U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_03 (0x00000503U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_0 (0x00000600U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_01 (0x00000601U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_02 (0x00000602U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_0 (0x00000700U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_01 (0x00000701U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_02 (0x00000702U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_03 (0x00000703U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_05 (0x00000705U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_02 (0x00000802U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_06 (0x00000806U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_07 (0x00000807U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_08 (0x00000808U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_09 (0x00000809U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_9_00 (0x00000900U) + +/* compatibility SM versions to match the official names in the ISA (e.g., SM5.2) */ +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_2 (NV2080_CTRL_GR_INFO_SM_VERSION_5_02) +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_3 (NV2080_CTRL_GR_INFO_SM_VERSION_5_03) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_1 (NV2080_CTRL_GR_INFO_SM_VERSION_6_01) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_2 (NV2080_CTRL_GR_INFO_SM_VERSION_6_02) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_1 (NV2080_CTRL_GR_INFO_SM_VERSION_7_01) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_2 (NV2080_CTRL_GR_INFO_SM_VERSION_7_02) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_3 (NV2080_CTRL_GR_INFO_SM_VERSION_7_03) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_5 (NV2080_CTRL_GR_INFO_SM_VERSION_7_05) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_2 (NV2080_CTRL_GR_INFO_SM_VERSION_8_02) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_6 (NV2080_CTRL_GR_INFO_SM_VERSION_8_06) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_7 (NV2080_CTRL_GR_INFO_SM_VERSION_8_07) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_8 (NV2080_CTRL_GR_INFO_SM_VERSION_8_08) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_9 (NV2080_CTRL_GR_INFO_SM_VERSION_8_09) +#define NV2080_CTRL_GR_INFO_SM_VERSION_9_0 (NV2080_CTRL_GR_INFO_SM_VERSION_9_00) + + +/** + * NV2080_CTRL_CMD_GR_GET_INFO + * + * This command returns gr engine information for the associated GPU. + * Requests to retrieve gr information use a list of one or more + * NV2080_CTRL_GR_INFO structures. + * + * grInfoListSize + * This field specifies the number of entries on the caller's + * grInfoList. + * grInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the gr information is to be returned. + * This buffer must be at least as big as grInfoListSize multiplied + * by the size of the NV2080_CTRL_GR_INFO structure. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. When MIG is enabled, this + * is a mandatory parameter. + */ +#define NV2080_CTRL_CMD_GR_GET_INFO (0x20801201U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_GR_GET_INFO_PARAMS { + NvU32 grInfoListSize; + NV_DECLARE_ALIGNED(NvP64 grInfoList, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_GET_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_CTXSW_ZCULL_MODE + * + * This command is used to set the zcull context switch mode for the specified + * channel. A value of NV_ERR_NOT_SUPPORTED is returned if the + * target channel does not support zcull context switch mode changes. + * + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have it's zcull context switch mode changed. + * hShareClient + * Support for sharing zcull buffers across RM clients is no longer + * supported. To maintain API compatibility, this field must match + * the hClient used in the control call. + * hShareChannel + * This parameter specifies the channel handle of + * the channel with which the zcull context buffer is to be shared. This + * parameter is valid when zcullMode is set to SEPARATE_BUFFER. This + * parameter should be set to the same value as hChannel if no + * sharing is intended. + * zcullMode + * This parameter specifies the new zcull context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_GLOBAL + * This mode is the normal zcull operation where it is not + * context switched and there is one set of globally shared + * zcull memory and tables. This mode is only supported as + * long as all channels use this mode. + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_NO_CTXSW + * This mode causes the zcull tables to be reset on a context + * switch, but the zcull buffer will not be saved/restored. + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_SEPARATE_BUFFER + * This mode will cause the zcull buffers and tables to be + * saved/restored on context switches. If a share channel + * ID is given (shareChID), then the 2 channels will share + * the zcull context buffers. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_ZCULL_MODE (0x20801205U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x5" */ + +typedef struct NV2080_CTRL_GR_CTXSW_ZCULL_MODE_PARAMS { + NvHandle hChannel; + NvHandle hShareClient; + NvHandle hShareChannel; + NvU32 zcullMode; +} NV2080_CTRL_GR_CTXSW_ZCULL_MODE_PARAMS; +/* valid zcullMode values */ +#define NV2080_CTRL_CTXSW_ZCULL_MODE_GLOBAL (0x00000000U) +#define NV2080_CTRL_CTXSW_ZCULL_MODE_NO_CTXSW (0x00000001U) +#define NV2080_CTRL_CTXSW_ZCULL_MODE_SEPARATE_BUFFER (0x00000002U) + +/** + * NV2080_CTRL_CMD_GR_GET_ZCULL_INFO + * + * This command is used to query the RM for zcull information that the + * driver will need to allocate and manage the zcull regions. + * + * widthAlignPixels + * This parameter returns the width alignment restrictions in pixels + * used to adjust a surface for proper aliquot coverage (typically + * #TPC's * 16). + * + * heightAlignPixels + * This parameter returns the height alignment restrictions in pixels + * used to adjust a surface for proper aliquot coverage (typically 32). + * + * pixelSquaresByAliquots + * This parameter returns the pixel area covered by an aliquot + * (typically #Zcull_banks * 16 * 16). + * + * aliquotTotal + * This parameter returns the total aliquot pool available in HW. + * + * zcullRegionByteMultiplier + * This parameter returns multiplier used to convert aliquots in a region + * to the number of bytes required to save/restore them. + * + * zcullRegionHeaderSize + * This parameter returns the region header size which is required to be + * allocated and accounted for in any save/restore operation on a region. + * + * zcullSubregionHeaderSize + * This parameter returns the subregion header size which is required to be + * allocated and accounted for in any save/restore operation on a region. + * + * subregionCount + * This parameter returns the subregion count. + * + * subregionWidthAlignPixels + * This parameter returns the subregion width alignment restrictions in + * pixels used to adjust a surface for proper aliquot coverage + * (typically #TPC's * 16). + * + * subregionHeightAlignPixels + * This parameter returns the subregion height alignment restrictions in + * pixels used to adjust a surface for proper aliquot coverage + * (typically 62). + * + * The callee should compute the size of a zcull region as follows. + * (numBytes = aliquots * zcullRegionByteMultiplier + + * zcullRegionHeaderSize + zcullSubregionHeaderSize) + */ +#define NV2080_CTRL_CMD_GR_GET_ZCULL_INFO (0x20801206U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS_SUBREGION_SUPPORTED +#define NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS { + NvU32 widthAlignPixels; + NvU32 heightAlignPixels; + NvU32 pixelSquaresByAliquots; + NvU32 aliquotTotal; + NvU32 zcullRegionByteMultiplier; + NvU32 zcullRegionHeaderSize; + NvU32 zcullSubregionHeaderSize; + NvU32 subregionCount; + NvU32 subregionWidthAlignPixels; + NvU32 subregionHeightAlignPixels; +} NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_CTXSW_PM_MODE + * + * This command is used to set the pm context switch mode for the specified + * channel. A value of NV_ERR_NOT_SUPPORTED is returned if the + * target channel does not support pm context switch mode changes. + * + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have its pm context switch mode changed. + * pmMode + * This parameter specifies the new pm context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_CTXSW_PM_MODE_NO_CTXSW + * This mode says that the pms are not to be context switched. + * NV2080_CTRL_CTXSW_PM_MODE_CTXSW + * This mode says that the pms in Mode-B are to be context switched. + * NV2080_CTRL_CTXSW_PM_MODE_STREAM_OUT_CTXSW + * This mode says that the pms in Mode-E (stream out) are to be context switched. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_PM_MODE (0x20801207U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x7" */ + +typedef struct NV2080_CTRL_GR_CTXSW_PM_MODE_PARAMS { + NvHandle hChannel; + NvU32 pmMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_CTXSW_PM_MODE_PARAMS; + +/* valid pmMode values */ +#define NV2080_CTRL_CTXSW_PM_MODE_NO_CTXSW (0x00000000U) +#define NV2080_CTRL_CTXSW_PM_MODE_CTXSW (0x00000001U) +#define NV2080_CTRL_CTXSW_PM_MODE_STREAM_OUT_CTXSW (0x00000002U) + +/* + * NV2080_CTRL_CMD_GR_CTXSW_ZCULL_BIND + * + * This command is used to set the zcull context switch mode and virtual address + * for the specified channel. A value of NV_ERR_NOT_SUPPORTED is + * returned if the target channel does not support zcull context switch mode + * changes. + * + * hClient + * This parameter specifies the client handle of + * that owns the zcull context buffer. This field must match + * the hClient used in the control call for non-kernel clients. + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have its zcull context switch mode changed. + * vMemPtr + * This parameter specifies the 64 bit virtual address + * for the allocated zcull context buffer. + * zcullMode + * This parameter specifies the new zcull context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_GLOBAL + * This mode is the normal zcull operation where it is not + * context switched and there is one set of globally shared + * zcull memory and tables. This mode is only supported as + * long as all channels use this mode. + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_NO_CTXSW + * This mode causes the zcull tables to be reset on a context + * switch, but the zcull buffer will not be saved/restored. + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_SEPARATE_BUFFER + * This mode will cause the zcull buffers and tables to be + * saved/restored on context switches. If a share channel + * ID is given (shareChID), then the 2 channels will share + * the zcull context buffers. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_ZCULL_BIND (0x20801208U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x8" */ + +typedef struct NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS { + NvHandle hClient; + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 vMemPtr, 8); + NvU32 zcullMode; +} NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS; +/* valid zcullMode values same as above NV2080_CTRL_CTXSW_ZCULL_MODE */ + +/* + * NV2080_CTRL_CMD_GR_CTXSW_PM_BIND + * + * This command is used to set the PM context switch mode and virtual address + * for the specified channel. A value of NV_ERR_NOT_SUPPORTED is + * returned if the target channel does not support PM context switch mode + * changes. + * + * hClient + * This parameter specifies the client handle of + * that owns the PM context buffer. + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have its PM context switch mode changed. + * vMemPtr + * This parameter specifies the 64 bit virtual address + * for the allocated PM context buffer. + * pmMode + * This parameter specifies the new PM context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_CTXSW_PM_MODE_NO_CTXSW + * This mode says that the pms are not to be context switched + * NV2080_CTRL_GR_SET_CTXSW_PM_MODE_CTXSW + * This mode says that the pms are to be context switched + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_PM_BIND (0x20801209U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x9" */ + +typedef struct NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS { + NvHandle hClient; + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 vMemPtr, 8); + NvU32 pmMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS; +/* valid pmMode values same as above NV2080_CTRL_CTXSW_PM_MODE */ + +/* + * NV2080_CTRL_CMD_GR_SET_GPC_TILE_MAP + * + * Send a list of values used to describe GPC/TPC tile mapping tables. + * + * mapValueCount + * This field specifies the number of actual map entries. This count + * should equal the number of TPCs in the system. + * mapValues + * This field is a pointer to a buffer of NvU08 values representing map + * data. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_GR_SET_GPC_TILE_MAP_MAX_VALUES 128U +#define NV2080_CTRL_CMD_GR_SET_GPC_TILE_MAP (0x2080120aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0xA" */ + +typedef struct NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS { + NvU32 mapValueCount; + NvU8 mapValues[NV2080_CTRL_GR_SET_GPC_TILE_MAP_MAX_VALUES]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_CTXSW_SMPC_MODE + * + * This command is used to set the SMPC context switch mode for the specified + * channel or channel group (TSG). A value of NV_ERR_NOT_SUPPORTED + * is returned if the target channel/TSG does not support SMPC context switch + * mode changes. If a channel is part of a TSG, the user must send in the TSG + * handle and not an individual channel handle, an error will be returned if a + * channel handle is used in this case. + * + * SMPC = SM Performance Counters + * + * hChannel + * This parameter specifies the channel or channel group (TSG) handle + * that is to have its SMPC context switch mode changed. + * If this parameter is set to 0, then the mode below applies to all current + * and future channels (i.e. we will be enabling/disabling global mode) + * smpcMode + * This parameter specifies the new SMPC context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_CTXSW_SMPC_MODE_NO_CTXSW + * This mode says that the SMPC data is not to be context switched. + * NV2080_CTRL_GR_SET_CTXSW_SMPC_MODE_CTXSW + * This mode says that the SMPC data is to be context switched. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_SMPC_MODE (0x2080120eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0xE" */ + +typedef struct NV2080_CTRL_GR_CTXSW_SMPC_MODE_PARAMS { + NvHandle hChannel; + NvU32 smpcMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_CTXSW_SMPC_MODE_PARAMS; + +/* valid smpcMode values */ +#define NV2080_CTRL_CTXSW_SMPC_MODE_NO_CTXSW (0x00000000U) +#define NV2080_CTRL_CTXSW_SMPC_MODE_CTXSW (0x00000001U) + +/* + * NV2080_CTRL_CMD_GR_GET_SM_TO_GPC_TPC_MAPPINGS + * + * This command returns an array of the mappings between SMs and GPC/TPCs. + * + * smId + * An array of the mappings between SMs and GPC/TPCs. + * smCount + * Returns the number of valid mappings in the array. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_GET_SM_TO_GPC_TPC_MAPPINGS (0x2080120fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_MAX_SM_COUNT 144U +#define NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS { + struct { + NvU32 gpcId; + NvU32 tpcId; + } smId[NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_MAX_SM_COUNT]; + NvU32 smCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE + * + * This command is used to set the preemption context switch mode for the specified + * channel. A value of NV_ERR_NOT_SUPPORTED is returned if the + * target channel does not support preemption context switch mode changes. + * + * flags + * This field specifies flags for the preemption mode changes. + * These flags can tell callee which mode is valid in the call + * since we handle graphics and/or compute + * hChannel + * This parameter specifies the channel handle of the channel + * that is to have it's preemption context switch mode set. + * gfxpPreemptMode + * This parameter specifies the new Graphics preemption context switch + * mode. Legal values for this parameter include: + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_WFI + * This mode is the normal wait-for-idle context switch mode. + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_GFXP + * This mode causes the graphics engine to allow preempting the + * channel mid-triangle. + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_GFXP_POOL + * This mode causes the graphics engine to use a shared pool of buffers + * to support GfxP with lower memory overhead + * cilpPreemptMode + * This parameter specifies the new Compute preemption context switch + * mode. Legal values for this parameter include: + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_WFI + * This mode is the normal wait-for-idle context switch mode. + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_CTA + * This mode causes the compute engine to allow preempting the channel + * at the instruction level. + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_CILP + * This mode causes the compute engine to allow preempting the channel + * at the instruction level. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE (0x20801210U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x10" */ + +typedef struct NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS { + NvU32 flags; + NvHandle hChannel; + NvU32 gfxpPreemptMode; + NvU32 cilpPreemptMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS; + +/* valid preemption flags */ +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_CILP 0:0 +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_CILP_IGNORE (0x00000000U) +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_CILP_SET (0x00000001U) +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_GFXP 1:1 +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_GFXP_IGNORE (0x00000000U) +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_GFXP_SET (0x00000001U) + +/* valid Graphics mode values */ +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_WFI (0x00000000U) +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_GFXP (0x00000001U) +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_GFXP_POOL (0x00000002U) + +/* valid Compute mode values */ +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_WFI (0x00000000U) +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_CTA (0x00000001U) +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_CILP (0x00000002U) + +/* valid preemption buffers */ +typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS { + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8, +} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS; + +/* + * NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND + * + * This command is used to set the preemption context switch mode and virtual + * addresses of the preemption buffers for the specified channel. A value of + * NV_ERR_NOT_SUPPORTED is returned if the target channel does not + * support preemption context switch mode changes. + * + * flags + * This field specifies flags for the preemption mode changes. + * These flags can tell callee which mode is valid in the call + * since we handle graphics and/or compute + * hClient + * This parameter specifies the client handle of + * that owns the preemption context buffer. + * hChannel + * This parameter specifies the channel handle of the channel + * that is to have its preemption context switch mode set. + * vMemPtr + * This parameter specifies the 64 bit virtual address + * for the allocated preemption context buffer. + * gfxpPreemptMode + * This parameter specifies the new Graphics preemption context switch + * mode. Legal values for this parameter include: + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_GFX_WFI + * This mode is the normal wait-for-idle context switch mode. + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_GFX_GFXP + * This mode causes the graphics engine to allow preempting the + * channel mid-triangle. + * cilpPreemptMode + * This parameter specifies the new Compute preemption context switch + * mode. Legal values for this parameter include: + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_COMPUTE_WFI + * This mode is the normal wait-for-idle context switch mode. + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_COMPUTE_CTA + * This mode causes the compute engine to allow preempting the channel + * at the instruction level. + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_COMPUTE_CILP + * This mode causes the compute engine to allow preempting the channel + * at the instruction level. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND (0x20801211U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x11" */ + +typedef struct NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS { + NvU32 flags; + NvHandle hClient; + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 vMemPtrs[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END], 8); + NvU32 gfxpPreemptMode; + NvU32 cilpPreemptMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS; +/* valid mode and flag values same as above NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE */ + +/* + * NV2080_CTRL_CMD_GR_PC_SAMPLING_MODE + * + * This command is used to apply the WAR for PC sampling to avoid hang in + * multi-ctx scenario. + * + * hChannel + * This parameter specifies the channel or channel group (TSG) handle + * that is to have its PC Sampling mode changed. + * samplingMode + * This parameter specifies whether sampling is turned ON or OFF. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_PC_SAMPLING_MODE_DISABLED + * This mode says that PC sampling is disabled for current context. + * NV2080_CTRL_GR_SET_PC_SAMPLING_MODE_ENABLED + * This mode says that PC sampling is disabled for current context. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_PC_SAMPLING_MODE (0x20801212U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x12" */ + +typedef struct NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS { + NvHandle hChannel; + NvU32 samplingMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS; + +/* valid samplingMode values */ +#define NV2080_CTRL_PC_SAMPLING_MODE_DISABLED (0x00000000U) +#define NV2080_CTRL_PC_SAMPLING_MODE_ENABLED (0x00000001U) + +/* + * NV2080_CTRL_CMD_GR_GET_ROP_INFO + * + * Gets information about ROPs including the ROP unit count and information + * about ROP operations per clock. + * + * ropUnitCount + * The count of active ROP units. + * ropOperationsFactor. + * The number of ROP operations per clock for a single ROP unit. + * ropOperationsCount + * The number of ROP operations per clock across all active ROP units. + */ +#define NV2080_CTRL_CMD_GR_GET_ROP_INFO (0x20801213U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ROP_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ROP_INFO_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_GR_GET_ROP_INFO_PARAMS { + NvU32 ropUnitCount; + NvU32 ropOperationsFactor; + NvU32 ropOperationsCount; +} NV2080_CTRL_GR_GET_ROP_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_GET_CTXSW_STATS + * + * This command is used to get the context switch statistics. The user can + * also add a flag to tell RM to reset the stats counters back to 0. + * + * hChannel + * This parameter specifies the channel or channel group (TSG) handle + * that is to have the stats returned. Note, must be the TSG handle if + * channel is part of a TSG. + * flags + * This parameter specifies processing flags. See possible flags below. + * saveCnt + * This parameter returns the number of saves on the channel. + * restoreCnt + * This parameter returns the number of restores on the channel. + * wfiSaveCnt + * This parameter returns the number of WFI saves on the channel. + * ctaSaveCnt + * This parameter returns the number of CTA saves on the channel. + * cilpSaveCnt + * This parameter returns the number of CILP saves on the channel. + * gfxpSaveCnt + * This parameter returns the number of GfxP saves on the channel. + */ +#define NV2080_CTRL_CMD_GR_GET_CTXSW_STATS (0x20801215U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x15" */ + +typedef struct NV2080_CTRL_GR_GET_CTXSW_STATS_PARAMS { + NvHandle hChannel; + NvU32 flags; + NvU32 saveCnt; + NvU32 restoreCnt; + NvU32 wfiSaveCnt; + NvU32 ctaSaveCnt; + NvU32 cilpSaveCnt; + NvU32 gfxpSaveCnt; +} NV2080_CTRL_GR_GET_CTXSW_STATS_PARAMS; +/* valid GET_CTXSW_STATS flags settings */ +#define NV2080_CTRL_GR_GET_CTXSW_STATS_FLAGS_RESET 0:0 +#define NV2080_CTRL_GR_GET_CTXSW_STATS_FLAGS_RESET_FALSE (0x00000000U) +#define NV2080_CTRL_GR_GET_CTXSW_STATS_FLAGS_RESET_TRUE (0x00000001U) + + + +/* + * NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_SIZE + * + * This command provides the size, alignment of all context buffers including global and + * local context buffers which has been created & will be mapped on a context + * + * hChannel [IN] + * This parameter specifies the channel or channel group (TSG) handle + * totalBufferSize [OUT] + * This parameter returns the total context buffers size. + */ +#define NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_SIZE (0x20801218U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 totalBufferSize, 8); +} NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS; + +/* + * NV2080_CTRL_GR_CTX_BUFFER_INFO + * alignment + * Specifies the alignment requirement for each context buffer + * size + * Aligned size of context buffer + * bufferHandle [deprecated] + * Opaque pointer to memdesc. Used by kernel clients for tracking purpose only. + * pageCount + * allocation size in the form of pageCount + * physAddr + * Physical address of the buffer first page + * bufferType + * NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID type of this buffer + * aperture + * allocation aperture. Could be SYSMEM, VIDMEM, UNKNOWN + * kind + * PTE kind of this allocation. + * pageSize + * Page size of the buffer. + * bIsContigous + * States if physical allocation for this buffer is contiguous. PageSize will + * have no meaning if this flag is set. + * bGlobalBuffer + * States if a defined buffer is global as global buffers need to be mapped + * only once in TSG. + * bLocalBuffer + * States if a buffer is local to a channel. + * bDeviceDescendant + * TRUE if the allocation is a constructed under a Device or Subdevice. + * uuid + * SHA1 UUID of the Device or Subdevice. Valid when deviceDescendant is TRUE. + */ +typedef struct NV2080_CTRL_GR_CTX_BUFFER_INFO { + NV_DECLARE_ALIGNED(NvU64 alignment, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NV_DECLARE_ALIGNED(NvP64 bufferHandle, 8); + NV_DECLARE_ALIGNED(NvU64 pageCount, 8); + NV_DECLARE_ALIGNED(NvU64 physAddr, 8); + NvU32 bufferType; + NvU32 aperture; + NvU32 kind; + NvU32 pageSize; + NvBool bIsContigous; + NvBool bGlobalBuffer; + NvBool bLocalBuffer; + NvBool bDeviceDescendant; + NvU8 uuid[16]; +} NV2080_CTRL_GR_CTX_BUFFER_INFO; +typedef struct NV2080_CTRL_GR_CTX_BUFFER_INFO *PNV2080_CTRL_GR_CTX_BUFFER_INFO; + +#define NV2080_CTRL_GR_MAX_CTX_BUFFER_COUNT 64U + +/* + * NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_INFO + * + * This command provides the size, alignment of all context buffers including global and + * local context buffers which has been created & will be mapped on a context. + * If the client invoking the command is a kernel client, the buffers are retained. + * + * hUserClient [IN] + * This parameter specifies the client handle that owns this channel. + * hChannel [IN] + * This parameter specifies the channel or channel group (TSG) handle + * bufferCount [OUT] + * This parameter specifies the number of entries in ctxBufferInfo filled + * by the command. + * ctxBufferInfo [OUT] + * Array of context buffer info containing alignment, size etc. + */ +#define NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_INFO (0x20801219U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS { + NvHandle hUserClient; + NvHandle hChannel; + NvU32 bufferCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_CTX_BUFFER_INFO ctxBufferInfo[NV2080_CTRL_GR_MAX_CTX_BUFFER_COUNT], 8); +} NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS; + +// Aperture flags +#define NV2080_CTRL_GR_CTX_BUFFER_INFO_APERTURE_UNKNWON ADDR_UNKNOWN +#define NV2080_CTRL_GR_CTX_BUFFER_INFO_APERTURE_SYSMEM ADDR_SYSMEM +#define NV2080_CTRL_GR_CTX_BUFFER_INFO_APERTURE_FBMEM ADDR_FBMEM + +/* + * NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER + * This command returns the global logical ordering of SM w.r.t GPCs/TPCs. + * + * NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS + * This structure holds the TPC/SM ordering info. + * + * gpcId + * Logical GPC Id. + * This is the ordering of enabled GPCs post floor sweeping. + * The GPCs are numbered from 0 to N-1, where N is the enabled GPC count. + * + * localTpcId + * Local Logical TPC Id. + * This is the ordering of enabled TPCs within a GPC post floor sweeping. + * This ID is used in conjunction with the gpcId. + * The TPCs are numbered from 0 to N-1, where N is the enabled TPC count for the given GPC. + * + * localSmId + * Local Logical SM Id. + * This is the ordering of enabled SMs within a TPC post floor sweeping. + * This ID is used in conjunction with the localTpcId. + * The SMs are numbered from 0 to N-1, where N is the enabled SM count for the given TPC. + * + * globalTpcId + * Global Logical TPC Id. + * This is the ordering of all enabled TPCs in the GPU post floor sweeping. + * The TPCs are numbered from 0 to N-1, where N is the enabled TPC count across all GPCs + * + * globalSmId + * Global Logical SM Id array. + * This is the global ordering of all enabled SMs in the GPU post floor sweeping. + * The SMs are numbered from 0 to N-1, where N is the enabled SM count across all GPCs. + * + * virtualGpcId + * Virtual GPC Id. + * This is the ordering of enabled GPCs post floor sweeping (ordered in increasing + * number of TPC counts) The GPCs are numbered from 0 to N-1, where N is the + * enabled GPC count and 8-23 for singleton TPC holders. + * + * migratableTpcId + * Migratable TPC Id. + * This is the same as the Local Tpc Id for virtual GPC 0-8 (true physical gpcs) and 0 for + * virtual gpcs 8-23 that represent singleton tpcs. + * + * numSm + * Enabled SM count across all GPCs. + * This represent the valid entries in the globalSmId array + * + * numTpc + * Enabled TPC count across all GPCs. + * + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + */ +#define NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER (0x2080121bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER_MAX_SM_COUNT 512U + +#define NV2080_CTRL_GR_DISABLED_SM_VGPC_ID 0xFFU + +#define NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS_MESSAGE_ID (0x1BU) + +typedef struct NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS { + struct { + NvU16 gpcId; + NvU16 localTpcId; + NvU16 localSmId; + NvU16 globalTpcId; + NvU16 virtualGpcId; + NvU16 migratableTpcId; + } globalSmId[NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER_MAX_SM_COUNT]; + + NvU16 numSm; + NvU16 numTpc; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS; + +/* +* NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL +* +* This command gives current resident channel on GR engine +* +* chID [OUT] +* RM returns current resident channel on GR engine +* grRouteInfo [IN] +* This parameter specifies the routing information used to +* disambiguate the target GR engine. +*/ +#define NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL (0x2080121cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x1C" */ + +typedef struct NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL_PARAMS { + NvU32 chID; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_VAT_ALARM_DATA + * + * This command provides the _VAT_ALARM data i.e. error and warning, counter and + * timestamps along with max GPC and TPC per GPC count. + * + * smVatAlarm [OUT] + * VAT Alarm data array per SM containing per GPC per TPC, counter and + * timestamp values for error and warning alarms. + * maxGpcCount [OUT] + * This parameter returns max GPC count. + * maxTpcPerGpcCount [OUT] + * This parameter returns the max TPC per GPC count. + */ +#define NV2080_CTRL_CMD_GR_GET_VAT_ALARM_MAX_GPC_COUNT 10U +#define NV2080_CTRL_CMD_GR_GET_VAT_ALARM_MAX_TPC_PER_GPC_COUNT 10U + +#define NV2080_CTRL_CMD_GR_GET_VAT_ALARM_DATA (0x2080121dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x1D" */ + +typedef struct NV2080_CTRL_GR_VAT_ALARM_DATA_PER_TPC { + NV_DECLARE_ALIGNED(NvU64 errorCounter, 8); + NV_DECLARE_ALIGNED(NvU64 errorTimestamp, 8); + NV_DECLARE_ALIGNED(NvU64 warningCounter, 8); + NV_DECLARE_ALIGNED(NvU64 warningTimestamp, 8); +} NV2080_CTRL_GR_VAT_ALARM_DATA_PER_TPC; + +typedef struct NV2080_CTRL_GR_VAT_ALARM_DATA_PER_GPC { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_VAT_ALARM_DATA_PER_TPC tpc[NV2080_CTRL_CMD_GR_GET_VAT_ALARM_MAX_TPC_PER_GPC_COUNT], 8); +} NV2080_CTRL_GR_VAT_ALARM_DATA_PER_GPC; + +typedef struct NV2080_CTRL_GR_VAT_ALARM_DATA { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_VAT_ALARM_DATA_PER_GPC gpc[NV2080_CTRL_CMD_GR_GET_VAT_ALARM_MAX_GPC_COUNT], 8); +} NV2080_CTRL_GR_VAT_ALARM_DATA; + +typedef struct NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_VAT_ALARM_DATA smVatAlarm, 8); + NvU32 maxGpcCount; + NvU32 maxTpcPerGpcCount; +} NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS; +typedef struct NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS *PNV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_ATTRIBUTE_BUFFER_SIZE + * + * This command provides the size of GR attribute buffer. + * + * attribBufferSize [OUT] + * This parameter returns the attribute buffer size. + */ +#define NV2080_CTRL_CMD_GR_GET_ATTRIBUTE_BUFFER_SIZE (0x2080121eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x1EU) + +typedef struct NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS { + NvU32 attribBufferSize; +} NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GFX_POOL_QUERY_SIZE + * + * This API queries size parameters for a request maximum graphics preemption + * pool size. It is only available to kernel callers + * + * NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS + * struct to return the size parameters + * + * maxSlots + * Input specifying the maximum number of slots, RM will calculate the output + * parameters based on this. Must be non-zero + * ctrlStructSize + * Output indicating the required size in bytes of the control structure to + * support a pool of maxSlots size. + * ctrlStructAlign + * Output indicating the required alignment of the control structure + * poolSize + * Output indicating the required size in bytes of the GfxP Pool. + * poolAlign + * Output indicating the required alignment of the GfxP Pool + * slotStride + * The number of bytes in each slot, i * slotStride gives the offset from the + * base of the pool to a given slot + */ +#define NV2080_CTRL_CMD_GR_GFX_POOL_QUERY_SIZE (0x2080121fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x1F" */ + +typedef struct NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS { + NvU32 maxSlots; + NvU32 slotStride; + NV_DECLARE_ALIGNED(NvU64 ctrlStructSize, 8); + NV_DECLARE_ALIGNED(NvU64 ctrlStructAlign, 8); + NV_DECLARE_ALIGNED(NvU64 poolSize, 8); + NV_DECLARE_ALIGNED(NvU64 poolAlign, 8); +} NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GFX_POOL_INITIALIZE + * + * This API takes a CPU pointer to a GFxP Pool Control Structure and does the + * required onetime initialization. It should be called once and only once + * before a pool is used. It is only accessible to kernel callers. + * + * NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS + * struct to hand in the required info to RM + * + * pControlStructure + * This input is the kernel CPU pointer to the control structure. + */ +#define NV2080_CTRL_CMD_GR_GFX_POOL_INITIALIZE (0x20801220U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x20" */ + +typedef struct NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pControlStructure, 8); + NvU32 maxSlots; +} NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS; + +#define NV2080_CTRL_GR_GFX_POOL_MAX_SLOTS 64U + +/* + * NV2080_CTRL_CMD_GR_GFX_POOL_ADD_SLOTS + * + * This API adds a list of buffer slots to a given control structure. It can + * only be called when no channel using the given pool is running or may become + * running for the duration of this call. If more slots are added than there + * is room for in the control structure the behavior is undefined. It is only + * accessible to kernel callers. + * + * NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS + * + * pControlStructure + * This input is the kernel CPU pointer to the control structure + * numSlots + * This input indicates how many slots are being added and are contained in the slots parameter + * slots + * This input contains an array of the slots to be added to the control structure + */ +#define NV2080_CTRL_CMD_GR_GFX_POOL_ADD_SLOTS (0x20801221U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x21" */ + +typedef struct NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pControlStructure, 8); + NvU32 numSlots; + NvU32 slots[NV2080_CTRL_GR_GFX_POOL_MAX_SLOTS]; +} NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GFX_POOL_REMOVE_SLOTS + * + * This API removes buffer slots from a given control structure. It can + * only be called when no channel using the given pool is running or may become + * running for the duration of this call. It can operate in two modes, either + * it will a specified number of slots, or a specified list of slots. + * + * It is only accessible to kernel callers. + * + * NV2080_CTRL_CMD_GR_GFX_POOL_REMOVE_SLOTS_PARAMS + * + * pControlStructure + * This input is the kernel CPU pointer to the control structure + * numSlots + * This input indicates how many slots are being removed. if + * bRemoveSpecificSlots is true, then it also indicates how many entries in + * the slots array are populated. + * slots + * This array is either an input or output. If bRemoveSpecificSlots is true, + * then this will contain the list of slots to remove. If it is false, then + * it will be populated by RM with the indexes of the slots that were + * removed. + * bRemoveSpecificSlots + * This input determines which mode the call will run in. If true the caller + * will specify the list of slots they want removed, if any of those slots + * are not on the freelist, the call will fail. If false they only specify + * the number of slots they want removed and RM will pick up to that + * many. If there are not enough slots on the freelist to remove the + * requested amount, RM will return the number it was able to remove. + */ +#define NV2080_CTRL_CMD_GR_GFX_POOL_REMOVE_SLOTS (0x20801222U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x22" */ + +typedef struct NV2080_CTRL_GR_GFX_POOL_REMOVE_SLOTS_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pControlStructure, 8); + NvU32 numSlots; + NvU32 slots[NV2080_CTRL_GR_GFX_POOL_MAX_SLOTS]; + NvBool bRemoveSpecificSlots; +} NV2080_CTRL_GR_GFX_POOL_REMOVE_SLOTS_PARAMS; + + + +#define NV2080_CTRL_CMD_GR_GET_CAPS_V2 (0x20801227U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x27U) + +typedef NV0080_CTRL_GR_GET_CAPS_V2_PARAMS NV2080_CTRL_GR_GET_CAPS_V2_PARAMS; + +#define NV2080_CTRL_CMD_GR_GET_INFO_V2 (0x20801228U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_INFO_V2_PARAMS_MESSAGE_ID (0x28U) + +typedef NV0080_CTRL_GR_GET_INFO_V2_PARAMS NV2080_CTRL_GR_GET_INFO_V2_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_GET_GPC_MASK + * + * This command returns a mask of enabled GPCs for the associated subdevice. + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * gpcMask[OUT] + * This parameter returns a mask of enabled GPCs. Each GPC has an ID + * that's equivalent to the corresponding bit position in the mask. + */ +#define NV2080_CTRL_CMD_GR_GET_GPC_MASK (0x2080122aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_GPC_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_GPC_MASK_PARAMS_MESSAGE_ID (0x2AU) + +typedef struct NV2080_CTRL_GR_GET_GPC_MASK_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 gpcMask; +} NV2080_CTRL_GR_GET_GPC_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_TPC_MASK + * + * This command returns a mask of enabled TPCs for a specified GPC. + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * gpcId[IN] + * This parameter specifies the GPC for which TPC information is + * to be retrieved. If the GPC with this ID is not enabled this command + * will return an tpcMask value of zero. + * + * tpcMask[OUT] + * This parameter returns a mask of enabled TPCs for the specified GPC. + * Each TPC has an ID that's equivalent to the corresponding bit + * position in the mask. + */ +#define NV2080_CTRL_CMD_GR_GET_TPC_MASK (0x2080122bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_TPC_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_TPC_MASK_PARAMS_MESSAGE_ID (0x2BU) + +typedef struct NV2080_CTRL_GR_GET_TPC_MASK_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 gpcId; + NvU32 tpcMask; +} NV2080_CTRL_GR_GET_TPC_MASK_PARAMS; + +#define NV2080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE (0x2080122cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x2C" */ + +typedef NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS NV2080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NV2080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID (0x2CU) + +typedef struct NV2080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS params, 8); +} NV2080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_GET_ENGINE_CONTEXT_PROPERTIES + * + * This command is used to provide the caller with the alignment and size + * of the context save region for an engine + * + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * engineId + * This parameter is an input parameter specifying the engineId for which + * the alignment/size is requested. + * alignment + * This parameter is an output parameter which will be filled in with the + * minimum alignment requirement. + * size + * This parameter is an output parameter which will be filled in with the + * minimum size of the context save region for the engine. + * bInfoPopulated + * This parameter will be set if alignment and size are already set with + * valid values from a previous call. + */ + +#define NV2080_CTRL_CMD_GR_GET_ENGINE_CONTEXT_PROPERTIES (0x2080122dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS_MESSAGE_ID (0x2DU) + +typedef struct NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 engineId; + NvU32 alignment; + NvU32 size; + NvBool bInfoPopulated; +} NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_GET_SM_ISSUE_RATE_MODIFIER + * + * This command provides an interface to retrieve the speed select values of + * various instruction types. + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * imla0[OUT] + * The current speed select for IMLA0. + * + * fmla16[OUT] + * The current speed select for FMLA16. + * + * dp[OUT] + * The current speed select for DP. + * + * fmla32[OUT] + * The current speed select for FMLA32. + * + * ffma[OUT] + * The current speed select for FFMA. + * + * imla1[OUT] + * The current speed select for IMLA1. + * + * imla2[OUT] + * The current speed select for IMLA2. + * + * imla3[OUT] + * The current speed select for IMLA3. + * + * imla4[OUT] + * The current speed select for IMLA4. + */ +#define NV2080_CTRL_CMD_GR_GET_SM_ISSUE_RATE_MODIFIER (0x20801230U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_32 (0x5U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_DP_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_DP_REDUCED_SPEED (0x1U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_32 (0x5U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_32 (0x5U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU8 imla0; + NvU8 fmla16; + NvU8 dp; + NvU8 fmla32; + NvU8 ffma; + NvU8 imla1; + NvU8 imla2; + NvU8 imla3; + NvU8 imla4; +} NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID + * + * *DEPRECATED* Use NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID_V2 instead + * + * This command is used to create a FECS bind-point to an event buffer that + * is filtered by UID. + * + * hEventBuffer[IN] + * The event buffer to bind to + * + * recordSize[IN] + * The size of the FECS record in bytes + * + * levelOfDetail[IN] + * One of NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_: + * FULL: Report all CtxSw events + * SIMPLE: Report ACTIVE_REGION_START and ACTIVE_REGION_END only + * COMPAT: Events that KMD is interested in (for backwards compatibility) + * CUSTOM: Report events in the eventFilter field + * NOTE: RM may override the level-of-detail depending on the caller + * + * eventFilter[IN] + * Bitmask of events to report if levelOfDetail is CUSTOM + * + * bAllUsers[IN] + * Only report FECS CtxSw data for the current user if false, for all users if true + */ + +#define NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID (0x20801231U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD { + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_FULL = 0, + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_SIMPLE = 1, + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_COMPAT = 2, + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_CUSTOM = 3, +} NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD; + +#define NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS_MESSAGE_ID (0x31U) + +typedef struct NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS { + NvHandle hEventBuffer; + NvU32 recordSize; + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD levelOfDetail; + NvU32 eventFilter; + NvBool bAllUsers; +} NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_PHYS_GPC_MASK + * + * This command returns a mask of physical GPC Ids for the associated syspipe + * + * physSyspipeId[IN] + * This parameter specifies syspipe for which phys GPC mask is requested + * + * gpcMask[OUT] + * This parameter returns a mask of mapped GPCs to provided syspipe. + * Each GPC-ID has a corresponding bit position in the mask. + */ +#define NV2080_CTRL_CMD_GR_GET_PHYS_GPC_MASK (0x20801232U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS_MESSAGE_ID (0x32U) + +typedef struct NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS { + NvU32 physSyspipeId; + NvU32 gpcMask; +} NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_PPC_MASK + * + * This command returns a mask of enabled PPCs for a specified GPC. + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * gpcId[IN] + * This parameter specifies the GPC for which TPC information is + * to be retrieved. If the GPC with this ID is not enabled this command + * will return an ppcMask value of zero. + * + * ppcMask[OUT] + * This parameter returns a mask of enabled PPCs for the specified GPC. + * Each PPC has an ID that's equivalent to the corresponding bit + * position in the mask. + */ +#define NV2080_CTRL_CMD_GR_GET_PPC_MASK (0x20801233U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_PPC_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_PPC_MASK_PARAMS_MESSAGE_ID (0x33U) + +typedef struct NV2080_CTRL_GR_GET_PPC_MASK_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 gpcId; + NvU32 ppcMask; +} NV2080_CTRL_GR_GET_PPC_MASK_PARAMS; + +#define NV2080_CTRL_CMD_GR_GET_NUM_TPCS_FOR_GPC (0x20801234U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS_MESSAGE_ID (0x34U) + +typedef struct NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS { + NvU32 gpcId; + NvU32 numTpcs; +} NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_CTXSW_MODES + * + * This command is used to get context switch modes for the specified + * channel. A value of NV_ERR_NOT_SUPPORTED is returned if the + * target channel does not support context switch mode changes. + * + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have its context switch modes retrieved. + * zcullMode + * See NV2080_CTRL_CMD_GR_CTXSW_ZCULL_MODE for possible return values + * pmMode + * See NV2080_CTRL_CMD_GR_CTXSW_PM_MODE for possible return values + * smpcMode + * See NV2080_CTRL_CMD_GR_CTXSW_SMPC_MODE for possible return values + * cilpPreemptMode + * See NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE for possible return values + * gfxpPreemptMode + * See NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE for possible return values + */ +#define NV2080_CTRL_CMD_GR_GET_CTXSW_MODES (0x20801235U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS_MESSAGE_ID (0x35U) + +typedef struct NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS { + NvHandle hChannel; + NvU32 zcullMode; + NvU32 pmMode; + NvU32 smpcMode; + NvU32 cilpPreemptMode; + NvU32 gfxpPreemptMode; +} NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_GPC_TILE_MAP + * + * Get a list of values used to describe GPC/TPC tile mapping tables. + * + * mapValueCount + * This field specifies the number of actual map entries. This count + * should equal the number of TPCs in the system. + * mapValues + * This field is a pointer to a buffer of NvU08 values representing map + * data. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_GET_GPC_TILE_MAP (0x20801236U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x36" */ + +typedef NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS NV2080_CTRL_GR_GET_GPC_TILE_MAP_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NV2080_CTRL_CMD_GR_GET_GPC_TILE_MAP_FINN_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV2080_CTRL_CMD_GR_GET_GPC_TILE_MAP_FINN_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_GET_GPC_TILE_MAP_PARAMS params, 8); +} NV2080_CTRL_CMD_GR_GET_GPC_TILE_MAP_FINN_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_GET_ZCULL_MASK + * + * This command returns a mask of enabled ZCULLs for a specified GPC. + * + * gpcId[IN] + * This parameter, physical GPC index, specifies the GPC for which ZCULL + * information is to be retrieved. If the GPC with this ID is not enabled + * this command will return a zcullMask value of zero. + * + * zcullMask[OUT] + * This parameter returns a mask of enabled ZCULLs for the specified GPC. + * Each ZCULL has an ID that's equivalent to the corresponding bit + * position in the mask. + */ + +#define NV2080_CTRL_CMD_GR_GET_ZCULL_MASK (0x20801237U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS_MESSAGE_ID (0x37U) + +typedef struct NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS { + NvU32 gpcId; + NvU32 zcullMask; +} NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID_V2 + * + * This command is used to create a FECS bind-point to an event buffer that + * is filtered by UID. + * + * hEventBuffer[IN] + * The event buffer to bind to + * + * recordSize[IN] + * The size of the FECS record in bytes + * + * levelOfDetail[IN] + * One of NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_: + * FULL: Report all CtxSw events + * SIMPLE: Report ACTIVE_REGION_START and ACTIVE_REGION_END only + * COMPAT: Events that KMD is interested in (for backwards compatibility) + * CUSTOM: Report events in the eventFilter field + * NOTE: RM may override the level-of-detail depending on the caller + * + * eventFilter[IN] + * Bitmask of events to report if levelOfDetail is CUSTOM + * + * bAllUsers[IN] + * Only report FECS CtxSw data for the current user if false, for all users if true + * + * reasonCode [OUT] + * Reason for failure + */ +#define NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID_V2 (0x20801238U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_GR_FECS_BIND_EVTBUF_REASON_CODE { + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NONE = 0, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_GPU_TOO_OLD = 1, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NOT_ENABLED_GPU = 2, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NOT_ENABLED = 3, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NEED_ADMIN = 4, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NEED_CAPABILITY = 5, +} NV2080_CTRL_GR_FECS_BIND_EVTBUF_REASON_CODE; + +#define NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS { + NvHandle hEventBuffer; + NvU32 recordSize; + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD levelOfDetail; + NvU32 eventFilter; + NvBool bAllUsers; + NvU32 reasonCode; +} NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS; + +/* _ctrl2080gr_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h new file mode 100644 index 0000000..481b59a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h @@ -0,0 +1,263 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080grmgr.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX grmgr control commands and parameters */ + +// +// NV2080_CTRL_CMD_GRMGR_GET_GR_FS_INFO +// +// This control call works as a batched query interface where we +// have multiple different queries that can be passed in +// and RM will return the associated data and status type +// If there is any error in NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS, +// we will immediately fail the call. +// However, if there is an error in the query-specific calls, we will +// log the error and march on. +// +// NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS +// numQueries[IN] +// - Specifies the number of valid queries that the caller will be passing in +// +// Possible status values returned are: +// NV_OK +// NV_ERR_INVALID_ARGUMENT +// NV_ERR_INVALID_STATE +// +#define NV2080_CTRL_CMD_GRMGR_GET_GR_FS_INFO (0x20803801) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GRMGR_INTERFACE_ID << 8) | NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_MESSAGE_ID" */ + +// Max number of queries that can be batched in a single call to NV2080_CTRL_CMD_GRMGR_GET_GR_FS_INFO +#define NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES 96 + +// +// Preference is to keep max.size of union at 24 bytes (i.e. 6 32-bit members) +// so that the size of entire query struct is maintained at 32 bytes, to ensure +// that overall params struct does not exceed 4kB +// +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_MAX_SIZE 32 +#define NV2080_CTRL_GRMGR_MAX_SMC_IDS 8 + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS + * gpcCount[OUT] + * - No. of logical/local GPCs which client can use to create the + * logical/local mask respectively + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS { + NvU32 gpcCount; // param[out] - logical/local GPC mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS + * gpcId[IN] + * - Logical/local GPC ID + * chipletGpcMap[OUT] + * - Returns chiplet GPC ID for legacy case and device monitoring client + * - Returns local GPC ID (== input gpcId) for SMC client + * - Does not support DM attribution case + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS { + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 chipletGpcMap; // param[out] - chiplet GPC ID +} NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS + * gpcId[IN] + * - Logical/local GPC ID + * tpcMask[OUT] + * - Returns physical TPC mask for legacy, DM client and SMC cases + * - Does not support DM attribution case + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS { + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 tpcMask; // param[out] - physical TPC mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS + * gpcId[IN] + * - Logical/local GPC ID + * ppcMask[OUT] + * - Returns physical PPC mask for legacy, DM client and SMC cases + * - Does not support DM attribution case + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS { + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 ppcMask; // param[out] - physical PPC mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS; + +/*! + * !!! DEPRECATED - This query will return NV_ERR_NOT_SUPPORTED since deleting + * it would break driver compatibility !!! + * + * NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS + * swizzId[IN] + * - Swizz ID of partition + * - A DM client with an invalid swizz ID, will fail this call + * - This parameter is not compulsory for an SMC client; the subscription + * itself will do the necessary validation. + * gpcId[IN] + * - Logical/local GPC ID + * chipletGpcMap[OUT] + * - Returns chiplet GPC ID for legacy case and device monitoring client + * - Returns local GPC ID (== input gpcId) for SMC client + * - Does not support non-attribution case for DM client + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS { + NvU32 swizzId; // param[in] - swizz ID of partition + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 chipletGpcMap; // param[out] - chiplet GPC ID +} NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS + * gpcId[IN] + * - Logical/local GPC ID + * ropMask[OUT] + * - Returns physical ROP mask for legacy, DM client + * - Returns logical ROP mask for SMC + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS { + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 ropMask; // param[out] - physical ROP mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS + * chipletSyspipeMask [OUT] + * - Mask of chiplet SMC-IDs for DM client attribution case + * - Mask of local SMC-IDs for SMC client + * - Legacy case returns 1 GR + * - Does not support attribution case for DM client + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS { + NvU32 chipletSyspipeMask; // param[out] - Mask of chiplet SMC IDs +} NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS + * swizzId[IN] + * - Swizz ID of partition + * - A DM client with an invalid swizz ID, will fail this call + * physSyspipeId[GRMGR_MAX_SMC_IDS] [OUT] + * - Physical SMC-IDs mapped to partition local idx for DM client attribution case + * - Does not support non-attribution case for DM client, SMC clients, legacy case + * physSyspipeIdCount[OUT] + * - Valid count of physSmcIds which has been populated in above array. + * - Failure case will return 0 + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS { + NvU16 swizzId; // param[in] - swizz ID of partition + NvU16 physSyspipeIdCount; // param[out] - Count of physSmcIds in above array + NvU8 physSyspipeId[NV2080_CTRL_GRMGR_MAX_SMC_IDS]; // param[out] - physical/local SMC IDs +} NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS + * swizzId[IN] + * - Swizz ID of partition + * - Mandatory parameter + * - A DM client with an invalid swizz ID, will fail this call + * grIdx[IN] + * - Local grIdx for a partition + * - Mandatory parameter + * gpcEnMask[OUT] + * - Logical enabled GPC mask associated with requested grIdx of the partition i.e swizzid->engineId->gpcMask + * - These Ids should be used as input further + * - Does not support non-attribution case for DM client, SMC clients, legacy case + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS { + NvU32 swizzId; // param[in] - swizz ID of partition + NvU32 grIdx; // param[in] - partition local GR ID + NvU32 gpcEnMask; // param[out] - logical enabled GPC mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID + * syspipeId[OUT] + * - Partition-local GR idx for client subscribed to exec partition + * - Does not support legacy case, DM client, or SMC client subscribed only to partition + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID_PARAMS { + NvU32 syspipeId; // param[out] - partition-local Gr idx +} NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID_PARAMS; + +/*! + * queryType[IN] + * - Use queryType defines to specify what information is being requested + * status[OUT] + * - Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARAMS { + NvU16 queryType; + NvU8 reserved[2]; // To keep the struct aligned for now and available for future use (if needed) + NvU32 status; + union { + NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS gpcCountData; + NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS chipletGpcMapData; + NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS tpcMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS ppcMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS partitionGpcMapData; + NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS syspipeMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS partitionChipletSyspipeData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS dmGpcMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID_PARAMS partitionSyspipeIdData; + NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS ropMaskData; + } queryData; +} NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARAMS; + +#define NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS { + NvU16 numQueries; + NvU8 reserved[6]; // To keep the struct aligned for now and available for future use (if needed) + NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARAMS queries[NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES]; +} NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS; + +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_INVALID 0 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_GPC_COUNT 1 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_GPC_MAP 2 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_TPC_MASK 3 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PPC_MASK 4 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_GPC_MAP 5 /* deprecated */ +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_SYSPIPE_MASK 6 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_SYSPIPE_IDS 7 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PROFILER_MON_GPC_MASK 8 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_SYSPIPE_ID 9 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_ROP_MASK 10 + +/* _ctrl2080grmgr_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h new file mode 100644 index 0000000..ee04d61 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h @@ -0,0 +1,82 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gsp.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX GSP control commands and parameters */ + +/* + * NV2080_CTRL_CMD_GSP_GET_FEATURES + * + * This command is used to determine which GSP features are + * supported on this GPU. + * + * gspFeatures + * Bit mask that specifies GSP features supported. + * bValid + * If this field is set to NV_TRUE, then above bit mask is + * considered valid. Otherwise, bit mask should be ignored + * as invalid. bValid will be set to NV_TRUE when RM is a + * GSP client with GPU support offloaded to GSP firmware. + * bDefaultGspRmGpu + * If this field is set to NV_TRUE, it indicates that the + * underlying GPU has GSP-RM enabled by default. If set to NV_FALSE, + * it indicates that the GPU has GSP-RM disabled by default. + * firmwareVersion + * This field contains the buffer into which the firmware build version + * should be returned, if GPU is offloaded. Otherwise, the buffer + * will remain untouched. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GSP_GET_FEATURES (0x20803601) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GSP_INTERFACE_ID << 8) | NV2080_CTRL_GSP_GET_FEATURES_PARAMS_MESSAGE_ID" */ + +#define NV2080_GSP_MAX_BUILD_VERSION_LENGTH (0x0000040) + +#define NV2080_CTRL_GSP_GET_FEATURES_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_GSP_GET_FEATURES_PARAMS { + NvU32 gspFeatures; + NvBool bValid; + NvBool bDefaultGspRmGpu; + NvU8 firmwareVersion[NV2080_GSP_MAX_BUILD_VERSION_LENGTH]; +} NV2080_CTRL_GSP_GET_FEATURES_PARAMS; + +/* Valid feature values */ +#define NV2080_CTRL_GSP_GET_FEATURES_UVM_ENABLED 0:0 +#define NV2080_CTRL_GSP_GET_FEATURES_UVM_ENABLED_FALSE (0x00000000) +#define NV2080_CTRL_GSP_GET_FEATURES_UVM_ENABLED_TRUE (0x00000001) + +// _ctrl2080gsp_h_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h new file mode 100644 index 0000000..f0df6c5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080hshub.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* + * NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK + * + * This command get active HSHUB masks. + * + * hshubNcisocMask + * NCISOC enabled active HSHUBs + * hshubNvlMask + * NVLINK capable active HSHUBs. + */ + +#define NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS { + NvU32 hshubNcisocMask; + NvU32 hshubNvlMask; +} NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS; + +#define NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK (0x20804101) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_HSHUB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS_MESSAGE_ID" */ + +/* _ctrl2080hshub_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h new file mode 100644 index 0000000..b3314d9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h @@ -0,0 +1,365 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080i2c.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX i2c-related control commands and parameters */ + +/* + * NV2080_CTRL_I2C_VERSION + * + * NV2080_CTRL_I2C_VERSION_0_0: + * This return state specifies that support is only available + * for single subAddr reads. + * + */ +#define NV2080_CTRL_I2C_VERSION_0 0x00 + +/* maximum number of i2c entries support */ +#define NV2080_CTRL_I2C_MAX_ENTRIES 256 +#define NV2080_CTRL_I2C_MAX_REG_LEN 8 +#define NV2080_CTRL_I2C_MAX_ADDR_ENTRIES 20 + +/* + * NV2080_CTRL_I2C_FLAGS + * + * NV2080_CTRL_I2C_FLAGS_NONSTD_SI1930UC: + * This option specified that non-compliant i2c for SI1930UC is required + * + * NV2080_CTRL_I2C_FLAGS_PRIVILEGE + * This option specified that the i2c access is privileged + * + * NV2080_CTRL_I2C_FLAGS_PX3540 + * This option specified that the i2c device -PX3540/3544- is accessed + */ +#define NV2080_CTRL_I2C_FLAGS_NONSTD_SI1930UC (0x00000001) +#define NV2080_CTRL_I2C_FLAGS_PRIVILEGE (0x00000002) +#define NV2080_CTRL_I2C_FLAGS_DATA_ENCRYPTED (0x00000004) +#define NV2080_CTRL_I2C_FLAGS_PX3540 (0x00000010) +#define NV2080_CTRL_I2C_FLAGS_ADDR_AUTO_INC_NOT_SUPPORTED (0x00000008) + +/* + * NV2080_CTRL_CMD_I2C_READ_BUFFER + * + * This command allocates video memory for a particular subset of microcode. + * + * version + * This field is returned to the client and indicates the current + * supported I2C controls available. + * + * port + * This field must be specified by the client to indicate which port/bus + * in which i2c access is desired. + * + * flags + * This field is specified by the client to request additional options + * as provided by NV2080_CTRL_I2C_FLAGS. + * + * inputCount + * This field specifies the total # of elements contained in inputBuffer + * + * inputBuffer + * This should contain the chipaddr as the first element, followed by + * the each subAddress in which to access the first element of data + * Eg. ... + * In general, client will only have 2 elements + * + * outputCount + * This field specifies how many registers from the start register index. + * The maximum values allow are NV2080_CTRL_I2C_MAX_ENTRIES. + * + * outputBuffer + * This buffer is returned to the client with the data read from + * the start register index. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_STATE + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_I2C_READ_BUFFER_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_I2C_READ_BUFFER_PARAMS { + NvU32 version; + NvU32 port; + NvU32 flags; + NvU32 inputCount; + // C form: NvU8 inputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU8 inputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU32 outputCount; + // C form: NvU8 outputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU8 outputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; +} NV2080_CTRL_I2C_READ_BUFFER_PARAMS; + +#define NV2080_CTRL_CMD_I2C_READ_BUFFER (0x20800601) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_READ_BUFFER_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_I2C_WRITE_BUFFER + * + * This command allocates video memory for a particular subset of microcode. + * + * version + * This field is returned to the client and indicates the current + * supported I2C controls available. + * + * port + * This field must be specified by the client to indicate which port/bus + * in which i2c access is desired. + * + * flags + * This field is specified by the client to request additional options. + * NV2080_CTRL_I2C_FLAGS_NONSTD_SI1930UC: + * - Specifies that non-compliant i2c access for SI1930UC is required + * + * inputCount + * This field specifies the total # of elements contained in inputBuffer + * + * inputBuffer + * This should contain the chipaddr as the first element, followed by + * the each subAddress in which to access the first element of data, + * and finally the data to be programmed. + * Eg. ... ... + * In general, client will have 2 elements + data to be programmed. + * ... + * + * encrClientID + * This field is specified by client, which is used to uniquely access + * the client's encryption context + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_STATE + * + */ + +#define NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS { + NvU32 version; + NvU32 port; + NvU32 flags; + NvU32 inputCount; + // C form: NvU8 inputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU8 inputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU32 encrClientID; +} NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS; + +#define NV2080_CTRL_CMD_I2C_WRITE_BUFFER (0x20800602) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS_MESSAGE_ID" */ + + +/* + * NV2080_CTRL_CMD_I2C_READ_REG + * + * This command allocates video memory for a particular subset of microcode. + * + * version + * This field is returned to the client and indicates the current + * supported I2C controls available. + * + * port + * This field must be specified by the client to indicate which port/bus + * in which i2c access is desired. + * + * flags + * This field is specified by the client to request additional options. + * NV2080_CTRL_I2C_FLAGS_NONSTD_SI1930UC: + * - Specifies that non-compliant i2c access for SI1930UC is required + * addr + * This field is specified by the client to target address. + * reg + * This field is specified by the client to target register address. + * + * bufsize + * This field specifies the total bytes # of register size + * + * buffer + * when used for read, it used as buffer that store returned register content + * when used for write, It include data that will be written. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_STATE + * + */ +typedef struct NV2080_CTRL_I2C_RW_REG_PARAMS { + NvU32 version; + NvU32 port; + NvU32 flags; + NvU32 addr; + NvU8 reg; + NvU8 bufsize; + // C form: NvU8 buffer[NV2080_CTRL_I2C_MAX_ENTRIES - 1]; + NvU8 buffer[(NV2080_CTRL_I2C_MAX_ENTRIES - 1)]; +} NV2080_CTRL_I2C_RW_REG_PARAMS; + +// provide NV2080_CTRL_I2C_READ_REG_PARAMS as the historical name +typedef NV2080_CTRL_I2C_RW_REG_PARAMS NV2080_CTRL_I2C_READ_REG_PARAMS; +#define NV2080_CTRL_CMD_I2C_READ_REG (0x20800603) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | 0x3" */ + +#define NV2080_CTRL_CMD_I2C_WRITE_REG (0x20800604) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | 0x4" */ + +/* + * NV006F_CTRL_CMD_SYSTEM_I2C_ACCESS + * + * This command allows Clients to read and write data using the I2C ports + * + * token [IN] + * This used in i2cAcquirePort + * + * cmd [IN] + * The I2CAccess command + * + * port [IN] + * The port ID of the concerned display + * + * flags [IN] + * The I2CAccess Flags such ack,start,stop + * + * data [OUT/IN] + * Data that needs to be pass or read out + * + * dataBuffSize [IN] + * Size of the data buffer. + * + * speed [IN] + * Speed of transaction. + * + * status [OUT] + * The I2CAccess Status returned + * + * encrClientID [IN] + * This field is specified by client, which is used to uniquely access + * the client's encryption context + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_I2C_ACCESS (0x20800610) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_ACCESS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_I2C_ACCESS_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_I2C_ACCESS_PARAMS { + NvU32 token; + NvU32 cmd; + NvU32 port; + NvU32 flags; + NV_DECLARE_ALIGNED(NvP64 data, 8); + NvU32 status; + NvU32 dataBuffSize; + NvU32 speed; + NvU32 encrClientID; +} NV2080_CTRL_I2C_ACCESS_PARAMS; + +// commands +#define NV2080_CTRL_I2C_ACCESS_CMD_ACQUIRE 0x1 +#define NV2080_CTRL_I2C_ACCESS_CMD_RELEASE 0x2 +#define NV2080_CTRL_I2C_ACCESS_CMD_WRITE_BYTE 0x3 +#define NV2080_CTRL_I2C_ACCESS_CMD_READ_BYTE 0x4 +#define NV2080_CTRL_I2C_ACCESS_CMD_NULL 0x5 +#define NV2080_CTRL_I2C_ACCESS_CMD_RESET 0x6 +#define NV2080_CTRL_I2C_ACCESS_CMD_TEST_PORT 0x11 +#define NV2080_CTRL_I2C_ACCESS_CMD_SET_FAST_MODE 0x12 +#define NV2080_CTRL_I2C_ACCESS_CMD_SET_NORMAL_MODE 0x13 +#define NV2080_CTRL_I2C_ACCESS_CMD_WRITE_BUFFER 0x14 +#define NV2080_CTRL_I2C_ACCESS_CMD_READ_BUFFER 0x15 +#define NV2080_CTRL_I2C_ACCESS_CMD_START 0x17 +#define NV2080_CTRL_I2C_ACCESS_CMD_STOP 0x18 +#define NV2080_CTRL_I2C_ACCESS_CMD_SET_SLOW_MODE 0x20 + +// flags +#define NV2080_CTRL_I2C_ACCESS_FLAG_START 0x1 +#define NV2080_CTRL_I2C_ACCESS_FLAG_STOP 0x2 +#define NV2080_CTRL_I2C_ACCESS_FLAG_ACK 0x4 +#define NV2080_CTRL_I2C_ACCESS_FLAG_RAB 0x8 +#define NV2080_CTRL_I2C_ACCESS_FLAG_ADDR_10BITS 0x10 +#define NV2080_CTRL_I2C_ACCESS_FLAG_PRIVILEGE 0x20 +#define NV2080_CTRL_I2C_ACCESS_FLAG_DATA_ENCRYPTED 0x40 +#define NV2080_CTRL_I2C_ACCESS_FLAG_RESTART 0x80 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_33_33PCT 0x100 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_33PCT 0x200 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_10PCT 0x400 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_3_33PCT 0x800 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_3PCT 0x1000 + +// port +#define NV2080_CTRL_I2C_ACCESS_PORT_DYNAMIC 0x0 +#define NV2080_CTRL_I2C_ACCESS_PORT_PRIMARY 0x1 +#define NV2080_CTRL_I2C_ACCESS_PORT_SECONDARY 0x2 +#define NV2080_CTRL_I2C_ACCESS_PORT_TERTIARY 0x3 +#define NV2080_CTRL_I2C_ACCESS_PORT_QUARTIARY 0x4 + +// Alternate numeric port designators +#define NV2080_CTRL_I2C_ACCESS_PORT_1 0x1 +#define NV2080_CTRL_I2C_ACCESS_PORT_2 0x2 +#define NV2080_CTRL_I2C_ACCESS_PORT_3 0x3 +#define NV2080_CTRL_I2C_ACCESS_PORT_4 0x4 +#define NV2080_CTRL_I2C_ACCESS_PORT_5 0x5 +#define NV2080_CTRL_I2C_ACCESS_PORT_6 0x6 +#define NV2080_CTRL_I2C_ACCESS_PORT_7 0x7 +#define NV2080_CTRL_I2C_ACCESS_PORT_8 0x8 +#define NV2080_CTRL_I2C_ACCESS_PORT_9 0x9 +#define NV2080_CTRL_I2C_ACCESS_PORT_10 0x10 + +// Total ports count +#define NV2080_CTRL_I2C_ACCESS_NUM_PORTS NV2080_CTRL_I2C_ACCESS_PORT_10 + +// status +#define NV2080_CTRL_I2C_ACCESS_STATUS_SUCCESS 0x0 +#define NV2080_CTRL_I2C_ACCESS_STATUS_ERROR 0x1 +#define NV2080_CTRL_I2C_ACCESS_STATUS_PROTOCOL_ERROR 0x2 +#define NV2080_CTRL_I2C_ACCESS_STATUS_DEVICE_BUSY 0x3 +#define NV2080_CTRL_I2C_ACCESS_STATUS_NACK_AFTER_SEND 0x4 +#define NV2080_CTRL_I2C_ACCESS_STATUS_DP2TMDS_DONGLE_MISSING 0x5 + +#define NV2080_CTRL_CMD_I2C_ENABLE_MONITOR_3D_MODE (0x20800620) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_ENABLE_MONITOR_3D_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_I2C_ENABLE_MONITOR_3D_MODE_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_I2C_ENABLE_MONITOR_3D_MODE_PARAMS { + NvU32 head; + NvU32 authType; + NvU32 status; +} NV2080_CTRL_I2C_ENABLE_MONITOR_3D_MODE_PARAMS; + +/* _ctrl2080i2c_h_ */ + + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h new file mode 100644 index 0000000..73db051 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080illum.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h new file mode 100644 index 0000000..50d6ba9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h @@ -0,0 +1,2291 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080internal.finn +// + +#include "nvimpshared.h" +#include "ctrl/ctrl2080/ctrl2080base.h" + +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrl2080/ctrl2080gr.h" /* Some controls derivative of 2080gr */ +#include "ctrl/ctrl0080/ctrl0080msenc.h" /* NV0080_CTRL_MSENC_CAPS_TBL_SIZE */ +#include "ctrl/ctrl0080/ctrl0080bsp.h" /* NV0080_CTRL_BSP_CAPS_TBL_SIZE */ +#include "ctrl/ctrl2080/ctrl2080fifo.h" /* NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO */ +#include "ctrl/ctrl0000/ctrl0000system.h" +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO + * + * This command obtains information from physical RM for use by CPU-RM. + * + * feHwSysCap + * Display IP v03_00 and later. + * Contents of capability register. + * + * windowPresentMask + * Display IP v03_00 and later. + * Mask for the present WINDOWs actually on the current chip. + * bFbRemapperEnabled + * Display IP v02_01 and later. + * Indicates that the display remapper HW exists and is enabled. + * numHeads + * Display IP v02_01 and later. + * Provides the number of heads HW support. + */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS { + NvU32 feHwSysCap; + NvU32 windowPresentMask; + NvBool bFbRemapperEnabled; + NvU32 numHeads; + NvBool bPrimaryVga; +} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS; + + + +// +// MemorySystem settings that are static after GPU state init/load is finished. +// +// Fields are shared between the VGPU guest/GSP Client as well as the VGPU +// host/GSP-RM. +// +#define NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS_MESSAGE_ID (0x1CU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS { + /*! Determines if RM should use 1 to 1 Comptagline allocation policy */ + NvBool bOneToOneComptagLineAllocation; + + /*! Determines if RM should use 1 to 4 Comptagline allocation policy */ + NvBool bUseOneToFourComptagLineAllocation; + + /*! Determines if RM should use raw Comptagline allocation policy */ + NvBool bUseRawModeComptaglineAllocation; + + /*! Has COMPBIT_BACKING_SIZE been overridden to zero (i.e. disabled)? */ + NvBool bDisableCompbitBacking; + + /*! Determine if we need to disable post L2 compression */ + NvBool bDisablePostL2Compression; + + /*! Is ECC DRAM feature supported? */ + NvBool bEnabledEccFBPA; + + NvBool bL2PreFill; + + /*! L2 cache size */ + NV_DECLARE_ALIGNED(NvU64 l2CacheSize, 8); + + NvBool bReservedMemAtBottom; + + /*! Indicate whether fpba is present or not */ + NvBool bFbpaPresent; + + /*! Size covered by one comptag */ + NvU32 comprPageSize; + + /*! log32(comprPageSize) */ + NvU32 comprPageShift; + + /*! Maximum number of pages that can be dynamaically blacklisted */ + NvU16 maximumBlacklistPages; + + /*! RAM type */ + NvU32 ramType; + + /*! LTC count */ + NvU32 ltcCount; + + /*! LTS per LTC count */ + NvU32 ltsPerLtcCount; + + /*! Ampere PLC bug */ + NvBool bDisablePlcForCertainOffsetsBug3046774; +} NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS; + +/*! + * Retrieve Memory System Static data. + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_STATIC_CONFIG (0x20800a1c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_REGISTER_UVM_ACCESS_CNTR_BUFFER + * + * This command sends access counter buffer pages allocated by CPU-RM + * to be setup and enabled in physical RM. + * + * bufferSize + * Size of the access counter buffer to register. + * + * bufferPteArray + * Pages of access counter buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER (0x20800a1d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_UVM_ACCESS_CNTR_BUFFER_MAX_PAGES 64 +#define NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS_MESSAGE_ID (0x1DU) + +typedef struct NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS { + NvU32 bufferSize; + NV_DECLARE_ALIGNED(NvU64 bufferPteArray[NV2080_CTRL_INTERNAL_UVM_ACCESS_CNTR_BUFFER_MAX_PAGES], 8); +} NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_UVM_UNREGISTER_ACCESS_CNTR_BUFFER + * + * This command requests physical RM to disable the access counter buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_UVM_UNREGISTER_ACCESS_CNTR_BUFFER (0x20800a1e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x1E" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_UVM_SERVICE_ACCESS_CNTR_BUFFER + * + * This command requests physical RM to service the access counter buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_UVM_SERVICE_ACCESS_CNTR_BUFFER (0x20800a21) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x21" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE + * + * This command retrieves the access counter buffer size from physical RM. + * + * bufferSize[OUT] + * Size of the access counter buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE (0x20800a29) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x29U) + +typedef struct NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS { + NvU32 bufferSize; +} NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS; + +#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8 + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_CAPS_V2 + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CAPS (0x20800a1f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x1F" */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CAPS { + NvU8 capsTbl[NV0080_CTRL_GR_CAPS_TBL_SIZE]; +} NV2080_CTRL_INTERNAL_STATIC_GR_CAPS; +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CAPS_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_CAPS engineCaps[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CAPS_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER + * @ref NV2080_CTRL_CMD_GR_GET_SM_TO_GPC_TPC_MAPPINGS + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_GLOBAL_SM_ORDER (0x20800a22) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x22" */ + + + +#define NV2080_CTRL_INTERNAL_GR_MAX_SM 240 + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GLOBAL_SM_ORDER { + struct { + NvU16 gpcId; + NvU16 localTpcId; + NvU16 localSmId; + NvU16 globalTpcId; + NvU16 virtualGpcId; + NvU16 migratableTpcId; + } globalSmId[NV2080_CTRL_INTERNAL_GR_MAX_SM]; + + NvU16 numSm; + NvU16 numTpc; +} NV2080_CTRL_INTERNAL_STATIC_GR_GLOBAL_SM_ORDER; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_GLOBAL_SM_ORDER globalSmOrder[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS; + + +/*! + * Retrieve BSP Static data. + */ +#define NV2080_CTRL_CMD_INTERNAL_BSP_GET_CAPS (0x20800a24) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BSP_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_MAX_BSPS 8 + +typedef struct NV2080_CTRL_INTERNAL_BSP_CAPS { + NvU8 capsTbl[NV0080_CTRL_BSP_CAPS_TBL_SIZE]; +} NV2080_CTRL_INTERNAL_BSP_CAPS; + +#define NV2080_CTRL_INTERNAL_BSP_GET_CAPS_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_INTERNAL_BSP_GET_CAPS_PARAMS { + NV2080_CTRL_INTERNAL_BSP_CAPS caps[NV2080_CTRL_CMD_INTERNAL_MAX_BSPS]; + NvBool valid[NV2080_CTRL_CMD_INTERNAL_MAX_BSPS]; +} NV2080_CTRL_INTERNAL_BSP_GET_CAPS_PARAMS; + +/*! + * Retrieve MSENC Static data. + */ +#define NV2080_CTRL_CMD_INTERNAL_MSENC_GET_CAPS (0x20800a25) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS 3 + +typedef struct NV2080_CTRL_INTERNAL_MSENC_CAPS { + NvU8 capsTbl[NV0080_CTRL_MSENC_CAPS_TBL_SIZE]; +} NV2080_CTRL_INTERNAL_MSENC_CAPS; + +#define NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS { + NV2080_CTRL_INTERNAL_MSENC_CAPS caps[NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS]; + NvBool valid[NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS]; +} NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS; + + +#define NV2080_CTRL_INTERNAL_GR_MAX_GPC 12 +#define NV2080_CTRL_INTERNAL_MAX_TPC_PER_GPC_COUNT 10 + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_GPC_MASK + * @ref NV2080_CTRL_CMD_GR_GET_TPC_MASK + * @ref NV2080_CTRL_CMD_GR_GET_PHYS_GPC_MASK + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FLOORSWEEPING_MASKS (0x20800a26) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x26" */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_FLOORSWEEPING_MASKS { + NvU32 gpcMask; + + /*! + * tpcMask is indexed by logical GPC ID for MIG case + * and indexed by physical GPC ID for non-MIG case + */ + NvU32 tpcMask[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; + + /*! + * tpcCount is always indexed by logical GPC ID + */ + NvU32 tpcCount[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; + NvU32 physGpcMask; + NvU32 mmuPerGpc[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; + + NvU32 tpcToPesMap[NV2080_CTRL_INTERNAL_MAX_TPC_PER_GPC_COUNT]; + NvU32 numPesPerGpc[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; + + /*! + * zcullMask is always indexed by physical GPC ID + */ + NvU32 zcullMask[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; +} NV2080_CTRL_INTERNAL_STATIC_GR_FLOORSWEEPING_MASKS; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS { + /*! + * floorsweeping masks which are indexed via local GR index + */ + NV2080_CTRL_INTERNAL_STATIC_GR_FLOORSWEEPING_MASKS floorsweepingMasks[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS; + +/* + * NV2080_CTRL_CMD_KGR_GET_CTX_BUFFER_PTES + * + * This command returns physical addresses of specified context buffer. + * To obtain addresses of whole buffer firstPage has to be advanced on + * subsequent invocations of the control until whole buffer is probed. + * If the buffer is contiguous, only single address will be returned by + * this control. + * + * bufferType[IN] + * Buffer type as returned by GET_CTX_BUFFER_INFO. + * + * firstPage[IN] + * Index of the first page to return in 'physAddrs' array. + * + * numPages[OUT] + * Number of entries filled in 'physAddrs' array. This will be 0 + * if firstPage is greater or equal to number of pages managed by 'hBuffer'. + * + * physAddrs[OUT] + * Physical addresses of pages comprising specified buffer. + * + * bNoMorePages[OUT] + * End of buffer reached. Either 'physAddrs' contains last page of the + * buffer or 'firstPage' specifies index past the buffer. + */ +#define NV2080_CTRL_KGR_MAX_BUFFER_PTES 128 +#define NV2080_CTRL_CMD_KGR_GET_CTX_BUFFER_PTES (0x20800a28) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS { + NvHandle hUserClient; + NvHandle hChannel; + NvU32 bufferType; + NvU32 firstPage; + NvU32 numPages; + NV_DECLARE_ALIGNED(NvU64 physAddrs[NV2080_CTRL_KGR_MAX_BUFFER_PTES], 8); + NvBool bNoMorePages; +} NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS; + +/*! + * @ref NV0080_CTRL_CMD_GR_GET_INFO + * @ref NV0080_CTRL_CMD_GR_GET_INFO_V2 + * @ref NV2080_CTRL_CMD_GR_GET_INFO + * @ref NV2080_CTRL_CMD_GR_GET_INFO_V2 + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_INFO (0x20800a2a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x2A" */ + + + +/*! + * @ref NV2080_CTRL_GR_INFO + */ +typedef struct NV2080_CTRL_INTERNAL_GR_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_INTERNAL_GR_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_INFO { + NV2080_CTRL_INTERNAL_GR_INFO infoList[NV0080_CTRL_GR_INFO_MAX_SIZE]; +} NV2080_CTRL_INTERNAL_STATIC_GR_INFO; +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_INFO engineInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_ZCULL_INFO + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_ZCULL_INFO (0x20800a2c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x2C" */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_ZCULL_INFO { + NvU32 widthAlignPixels; + NvU32 heightAlignPixels; + NvU32 pixelSquaresByAliquots; + NvU32 aliquotTotal; + NvU32 zcullRegionByteMultiplier; + NvU32 zcullRegionHeaderSize; + NvU32 zcullSubregionHeaderSize; + NvU32 subregionCount; + NvU32 subregionWidthAlignPixels; + NvU32 subregionHeightAlignPixels; +} NV2080_CTRL_INTERNAL_STATIC_GR_ZCULL_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_ZCULL_INFO engineZcullInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_ROP_INFO + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_ROP_INFO (0x20800a2e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x2E" */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_ROP_INFO { + NvU32 ropUnitCount; + NvU32 ropOperationsFactor; + NvU32 ropOperationsCount; +} NV2080_CTRL_INTERNAL_STATIC_GR_ROP_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_ROP_INFO engineRopInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_PPC_MASK + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_PPC_MASKS (0x20800a30) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x30" */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_PPC_MASKS { + NvU32 mask[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; +} NV2080_CTRL_INTERNAL_STATIC_GR_PPC_MASKS; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_PPC_MASKS enginePpcMasks[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_ENGINE_CONTEXT_PROPERTIES + * @ref NV2080_CTRL_CMD_GR_GET_ATTRIBUTE_BUFFER_SIZE + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x32" */ + + + +#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19 + +typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO { + NvU32 size; + NvU32 alignment; +} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO { + NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT]; +} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_SM_ISSUE_RATE_MODIFIER + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_SM_ISSUE_RATE_MODIFIER (0x20800a34) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x34" */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_SM_ISSUE_RATE_MODIFIER { + NvU8 imla0; + NvU8 fmla16; + NvU8 dp; + NvU8 fmla32; + NvU8 ffma; + NvU8 imla1; + NvU8 imla2; + NvU8 imla3; + NvU8 imla4; +} NV2080_CTRL_INTERNAL_STATIC_GR_SM_ISSUE_RATE_MODIFIER; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_SM_ISSUE_RATE_MODIFIER smIssueRateModifier[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS; + +/* + * NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS + * + * This command obtains information from physical RM for use by CPU-RM. + */ + +#define NV2080_CTRL_CMD_INTERNAL_GPU_GET_CHIP_INFO (0x20800a36) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS_MESSAGE_ID" */ + +/* + * Maximum number of register bases to return. + * These are indexed by NV_REG_BASE_* constants from gpu.h, and this value needs + * to be updated if NV_REG_BASE_LAST ever goes over it. See the ct_assert() in gpu.h + */ +#define NV2080_CTRL_INTERNAL_GET_CHIP_INFO_REG_BASE_MAX 16 +#define NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS { + NvU8 chipSubRev; + NvU32 emulationRev1; + NvBool isCmpSku; + NvU32 bar1Size; + NvU32 pciDeviceId; + NvU32 pciSubDeviceId; + NvU32 pciRevisionId; + NvU32 regBases[NV2080_CTRL_INTERNAL_GET_CHIP_INFO_REG_BASE_MAX]; +} NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE + * + * Set whether or not context switch logging is enabled + * + * bEnable + * Enable/Disable status for context switch logging + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE (0x20800a37) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS_MESSAGE_ID" */ + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE + * + * Retrieve whether or not context switch logging is enabled + * + * bEnable + * Enable/Disable status for context switch logging + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE (0x20800a38) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_INTERNAL_GR_FECS_TRACE_HW_ENABLE_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvBool bEnable; +} NV2080_CTRL_INTERNAL_GR_FECS_TRACE_HW_ENABLE_PARAMS; +#define NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS_MESSAGE_ID (0x37U) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_HW_ENABLE_PARAMS NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS; +#define NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE_PARAMS_MESSAGE_ID (0x38U) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_HW_ENABLE_PARAMS NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET + * + * Set read offset into FECS context switch trace record + * + * offset + * Value indicating number of records by which to offset + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET (0x20800a39) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS_MESSAGE_ID" */ + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET + * + * Set write offset into FECS context switch trace record + * + * offset + * Value indicating number of records by which to offset + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET (0x20800a3a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET_PARAMS_MESSAGE_ID" */ + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET + * + * Get read offset into FECS context switch trace record + * + * offset + * Value indicating number of records by which to offset + */ + +#define NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET (0x20800a3b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 offset; +} NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS; + +#define NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS_MESSAGE_ID (0x39U) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS; +#define NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET_PARAMS_MESSAGE_ID (0x3AU) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET_PARAMS; +#define NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET_PARAMS_MESSAGE_ID (0x3BU) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE + * + * Get size of FECS record + * + * fecsRecordSize + * Size of FECS record + */ + + + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE (0x20800a3d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE { + NvU32 fecsRecordSize; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS_MESSAGE_ID (0x3CU) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE fecsRecordSize[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE_PARAMS_MESSAGE_ID (0x3DU) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_TRACE_DEFINES + * + * Get HW defines used to extract information from FECS records + * + * fecsRecordSize + * Size of FECS record + * + * timestampHiTagMask + * Mask for high bits of raw timestamp to extract tag + * + * timestampHiTagShift + * Shift for high bits of raw timestamp to extract tag + * + * timestampVMask + * Mask to extract timestamp from raw timestamp + * + * numLowerBitsZeroShift + * Number of bits timestamp is shifted by + */ + + + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_TRACE_DEFINES (0x20800a3f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x3F" */ + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES { + NvU32 fecsRecordSize; + NvU32 timestampHiTagMask; + NvU8 timestampHiTagShift; + NV_DECLARE_ALIGNED(NvU64 timestampVMask, 8); + NvU8 numLowerBitsZeroShift; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES fecsTraceDefines[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES], 8); +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_GET_DEVICE_INFO_TABLE + * + * Parse the DEVICE_INFO2_TABLE on the physical side and return it to kernel. + */ +typedef struct NV2080_CTRL_INTERNAL_DEVICE_INFO { + NvU32 faultId; + NvU32 instanceId; + NvU32 typeEnum; + NvU32 resetId; + NvU32 devicePriBase; + NvU32 isEngine; + NvU32 rlEngId; + NvU32 runlistPriBase; +} NV2080_CTRL_INTERNAL_DEVICE_INFO; +#define NV2080_CTRL_CMD_INTERNAL_DEVICE_INFO_MAX_ENTRIES 88 + +#define NV2080_CTRL_CMD_INTERNAL_GET_DEVICE_INFO_TABLE (0x20800a40) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID (0x40U) + +typedef struct NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS { + NvU32 numEntries; + NV2080_CTRL_INTERNAL_DEVICE_INFO deviceInfoTable[NV2080_CTRL_CMD_INTERNAL_DEVICE_INFO_MAX_ENTRIES]; +} NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP (0x20800a41) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GPU_USER_REGISTER_ACCESS_MAP_MAX_COMPRESSED_SIZE 4096 +#define NV2080_CTRL_INTERNAL_GPU_USER_REGISTER_ACCESS_MAP_MAX_PROFILING_RANGES 4096 + +#define NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS_MESSAGE_ID (0x41U) + +typedef struct NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS { + NvU32 userRegisterAccessMapSize; + NvU32 compressedSize; + NvU8 compressedData[NV2080_CTRL_INTERNAL_GPU_USER_REGISTER_ACCESS_MAP_MAX_COMPRESSED_SIZE]; + NvU32 profilingRangesSize; + NvU8 profilingRanges[NV2080_CTRL_INTERNAL_GPU_USER_REGISTER_ACCESS_MAP_MAX_PROFILING_RANGES]; +} NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS; + +typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO { + NvU32 engDesc; + NvU32 ctxAttr; + NvU32 ctxBufferSize; + NvU32 addrSpaceList; + NvU32 registerBase; +} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO; +#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40 + +#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS { + NvU32 numConstructedFalcons; + NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS]; +} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS; + +/** + * Get GR PDB properties synchronized between Kernel and Physical + * + * bPerSubCtxheaderSupported + * @ref PDB_PROP_GR_SUPPORTS_PER_SUBCONTEXT_CONTEXT_HEADER + */ + + + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_PDB_PROPERTIES (0x20800a48) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x48" */ + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES { + NvBool bPerSubCtxheaderSupported; +} NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES pdbTable[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM + * + * This command writes instance memory information in the display hardware registers. + * + * instMemPhysAddr + * GPU physical address or IOVA address of the display instance memory. + * + * instMemSize + * Size of the display instance memory. + * + * instMemAddrSpace + * Address space of the display instance memory. + * + * instMemCpuCacheAttr + * Cache attribute of the display instance memory. + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID (0x49U) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS { + NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8); + NV_DECLARE_ALIGNED(NvU64 instMemSize, 8); + NvU32 instMemAddrSpace; + NvU32 instMemCpuCacheAttr; +} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS; + +/* + * NV2080_CTRL_INTERNAL_GPU_RECOVER_ALL_COMPUTE_CONTEXTS + * + * This command issues RC recovery for all compute contexts running on the given GPU. + */ +#define NV2080_CTRL_CMD_INTERNAL_RECOVER_ALL_COMPUTE_CONTEXTS (0x20800a4a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x4A" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_IP_VERSION + * + * This command obtains IP version of display engine for use by Kernel RM. + * + * ipVersion + * IP Version of display engine. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED - DISP has been disabled + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_IP_VERSION (0x20800a4b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS_MESSAGE_ID (0x4BU) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS { + NvU32 ipVersion; +} NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GPU_GET_SMC_MODE + * + * This command determines the current status of MIG MODE from Physical RM. + * + * smcMode [OUT] + * Current MIG MODE of the GPU. Values range NV2080_CTRL_GPU_INFO_GPU_SMC_MODE* + */ +#define NV2080_CTRL_CMD_INTERNAL_GPU_GET_SMC_MODE (0x20800a4c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS_MESSAGE_ID (0x4CU) + +typedef struct NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS { + NvU32 smcMode; +} NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR + * + * head + * This parameter specifies the head for which the callback is to be registered/unregistered. This value must be + * less than the maximum number of heads supported by the GPU subdevice. + * + * rgLineNum + * This indicates the RG scanout line number on which the callback will be executed. + * + * intrLine + * Enable: [out] Which interrupt line was allocated for this head. + * Disable: [in] Which interrupt line to deallocate. + * + * bEnable + * Should we allocate or deallocate an interrupt line? + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC - There was no available interrupt to allocate. + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR (0x20800a4d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS_MESSAGE_ID (0x4DU) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS { + NvU32 head; + NvU32 rgLineNum; + NvU32 intrLine; + NvBool bEnable; +} NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS; + +/*! + * NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO + * + * Description of a supported GPU instance. + * + * partitionFlag [OUT] + * Allocation flag to be used to allocate a partition with this profile. + * + * grCount [OUT] + * # GR engines, including the GFX capable ones. + * + * gfxGrCount [OUT] + * # GR engines capable of Gfx, which is a subset of the GR engines included in grCount + * + * gpcCount [OUT] + * # total gpcs, including the GFX capable ones. + * + * gfxGpcCount [OUT] + * # total gpcs capable of Gfx. This is a subset of the GPCs included in gpcCount. + * + * veidCount [OUT] + * # total veids + * + * smCount [OUT] + * # total SMs + * + * ceCount [OUT] + * # CE engines + * + * nvEncCount [OUT] + * # NVENC engines + * + * nvDecCount [OUT] + * # NVDEC engines + * + * nvJpgCount [OUT] + * # NVJPG engines + * + * nvOfaCount [OUT] + * # NVOFA engines + */ +#define NV2080_CTRL_INTERNAL_GRMGR_PARTITION_MAX_TYPES 20 + + + +typedef struct NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO { + NvU32 partitionFlag; + NvU32 grCount; + NvU32 gfxGrCount; + NvU32 gpcCount; + NvU32 gfxGpcCount; + NvU32 veidCount; + NvU32 smCount; + NvU32 ceCount; + NvU32 nvEncCount; + NvU32 nvDecCount; + NvU32 nvJpgCount; + NvU32 nvOfaCount; +} NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO; + +/*! + * NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS + * + * Returns the list of supported GPU instance profiles. + * + * count [OUT] + * Number of supported profiles. + * + * table [OUT] + * Supported profiles. + */ +typedef struct NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS { + NvU32 count; + NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO table[NV2080_CTRL_INTERNAL_GRMGR_PARTITION_MAX_TYPES]; +} NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM + * + * This command instructs the physical MemorySystem to set up memory partitioning + * exterior boundaries in hardware. + * + * partitionableMemSize [input] + * Size of the partitionable memory in bytes + * + * bottomRsvdSize [input] + * Size of the reserved region below partitionable memory in bytes + * + * topRsvdSize [input] + * Size of the reserved region above partitionable memory in bytes + * + * partitionableStartAddr [output] + * Start address of the partitionable memory, aligned to HW constraints + * + * partitionableEndAddr [output] + * End address of the partitionable memory, aligned to HW constraints + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM (0x20800a51) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS { + NV_DECLARE_ALIGNED(NvU64 partitionableMemSize, 8); + NV_DECLARE_ALIGNED(NvU64 bottomRsvdSize, 8); + NV_DECLARE_ALIGNED(NvU64 topRsvdSize, 8); + NV_DECLARE_ALIGNED(NvU64 partitionableStartAddr, 8); + NV_DECLARE_ALIGNED(NvU64 partitionableEndAddr, 8); +} NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS; + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS { + NV_DECLARE_ALIGNED(NvU64 engineMask, 8); +} NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS; + + +/*! + * NV2080_CTRL_INTERNAL_MEMDESC_INFO + * + * A generic container structure representing a memory region to be used as a + * component of other control call parameters. + * + */ +typedef struct NV2080_CTRL_INTERNAL_MEMDESC_INFO { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NV_DECLARE_ALIGNED(NvU64 alignment, 8); + NvU32 addressSpace; + NvU32 cpuCacheAttrib; +} NV2080_CTRL_INTERNAL_MEMDESC_INFO; + +#define NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_BUFFERS 2 +#define NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_ID 64 +/*! + * NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS + * + * Promote a single partition's runlist buffers allocated by kernel Client RM to Physical RM + * + * rlBuffers [IN] + * 2D array of runlist buffers for a single partition + * + * runlistIdMask [IN] + * Mask of runlists belonging to partition + * + */ +#define NV2080_CTRL_CMD_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS (0x20800a53) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_MEMDESC_INFO rlBuffers[NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_ID][NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_BUFFERS], 8); + NV_DECLARE_ALIGNED(NvU64 runlistIdMask, 8); + NvU32 swizzId; +} NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_SET_IMP_INIT_INFO (0x20800a54) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS_MESSAGE_ID (0x54U) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS { + TEGRA_IMP_IMPORT_DATA tegraImpImportData; +} NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P + * + * Binds local GFID for SR-IOV P2P requests + * + * localGfid [IN] + * GFID to bind in the P2P source GPU + * + * peerId [IN] + * Peer ID of the P2P destination GPU + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P (0x20800a55) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P_PARAMS_MESSAGE_ID (0x55U) + +typedef struct NV2080_CTRL_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P_PARAMS { + NvU32 localGfid; + NvU32 peerId; +} NV2080_CTRL_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P + * + * Binds remote GFID for SR-IOV P2P requests + * + * remoteGfid [IN] + * GFID to bind in the P2P destination GPU + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P (0x20800a56) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV2080_CTRL_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P_PARAMS { + NvU32 remoteGfid; +} NV2080_CTRL_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_BUS_FLUSH_WITH_SYSMEMBAR + * + * This command triggers a sysmembar to flush VIDMEM writes. + * This command accepts no parameters. + * + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_FLUSH_WITH_SYSMEMBAR (0x20800a70) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x70" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL + * + * Setup local PCIE P2P Mailbox + * + * local2Remote[IN] + * Local peer ID of remote gpu on local gpu + * + * remote2Local[IN] + * Remote peer ID of local gpu on remote gpu + * + * localP2PDomainRemoteAddr[IN] + * P2P domain remote address on local gpu + * + * remoteP2PDomainLocalAddr[IN] + * P2P domain local address on remote gpu + * + * remoteWMBoxLocalAddr[IN] + * Local mailbox address on remote gpu + * + * p2pWmbTag[OUT] + * Tag for mailbox to transport from local to remote GPU + * + * bNeedWarBug999673[IN] + * Set to true if WAR for bug 999673 is required + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL (0x20800a71) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS_MESSAGE_ID (0x71U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS { + NvU32 local2Remote; + NvU32 remote2Local; + NV_DECLARE_ALIGNED(NvU64 localP2PDomainRemoteAddr, 8); + NV_DECLARE_ALIGNED(NvU64 remoteP2PDomainLocalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 remoteWMBoxLocalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 p2pWmbTag, 8); + NvBool bNeedWarBug999673; +} NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS; + + /* + * NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE + * + * Setup remote PCIE P2P Mailbox + * + * local2Remote[IN] + * Local peer ID of remote gpu on local gpu + * + * remote2Local[IN] + * Remote peer ID of local gpu on remote gpu + * + * localP2PDomainRemoteAddr[IN] + * P2P domain remote address on local gpu + * + * remoteP2PDomainLocalAddr[IN] + * P2P domain local address on remote gpu + * + * remoteWMBoxAddrU64[IN] + * Mailbox address on remote gpu + * + * p2pWmbTag[IN] + * Tag for mailbox to transport from local to remote GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE (0x20800a72) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS_MESSAGE_ID (0x72U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS { + NvU32 local2Remote; + NvU32 remote2Local; + NV_DECLARE_ALIGNED(NvU64 localP2PDomainRemoteAddr, 8); + NV_DECLARE_ALIGNED(NvU64 remoteP2PDomainLocalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 remoteWMBoxAddrU64, 8); + NV_DECLARE_ALIGNED(NvU64 p2pWmbTag, 8); +} NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_BUS_DESTROY_P2P_MAILBOX + * + * Destroy PCIE P2P Mailbox + * + * peerIdx[IN] + * Peer ID of the P2P destination GPU + * + * bNeedWarBug999673[IN] + * Set to true if WAR for bug 999673 is required + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_DESTROY_P2P_MAILBOX (0x20800a73) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS_MESSAGE_ID (0x73U) + +typedef struct NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS { + NvU32 peerIdx; + NvBool bNeedWarBug999673; +} NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING + * + * Create C2C mapping to a given peer GPU + * + * peerId[IN] + * Peer ID for local to remote GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING (0x20800a74) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS { + NvU32 peerId; +} NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING + * + * Remove C2C mapping to a given peer GPU + * + * peerId[IN] + * Peer ID for local to remote GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING (0x20800a75) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS_MESSAGE_ID (0x75U) + +typedef struct NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS { + NvU32 peerId; +} NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES + * + * Retrieves the corresponding SPAs (per the given GFID's VMMU mappings) + * for the given array of GPAs. + * + * gfid [IN] + * GFID to translate GPAs for + * + * numEntries [IN] + * Number of entries (<= NV2080_CTRL_CMD_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES) + * to translate (i.e. number of elements in gpaEntries) + * + * gpaEntries [IN] + * Array of GPAs to translate + * + * spaEntries [OUT] + * Resulting array of SPAs + */ +#define NV2080_CTRL_CMD_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES (0x20800a57) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_VMMU_MAX_SPA_FOR_GPA_ENTRIES 128 + +#define NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS_MESSAGE_ID (0x57U) + +typedef struct NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS { + NvU32 gfid; + NvU32 numEntries; + NV_DECLARE_ALIGNED(NvU64 gpaEntries[NV2080_CTRL_INTERNAL_VMMU_MAX_SPA_FOR_GPA_ENTRIES], 8); + NV_DECLARE_ALIGNED(NvU64 spaEntries[NV2080_CTRL_INTERNAL_VMMU_MAX_SPA_FOR_GPA_ENTRIES], 8); +} NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER + * + * Pass required pushbuffer parameters from kernel RM to Physical RM + * + * addressSpace [IN] + * Address space of pushbuffer whtether it is ADDR_SYSMEM or ADDR_FBMEM + * + * physicalAddr [IN] + * Physical address of pushbuffer + * + * addressSpace [IN] + * Limit of the pushbuffer address, it should be less than 4K + * + * cacheSnoop [IN] + * Cachesnoop supported or not + * + * channelInstance [IN] + * Channel instance pass by the client to get corresponding dispchannel + * + * hclass [IN] + * External class ID pass by the client to get the channel class + * + * valid [IN] + * This bit indicates whether pushbuffer parameters are valid or not + * + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID (0x58U) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS { + NvU32 addressSpace; + NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NvU32 cacheSnoop; + NvU32 hclass; + NvU32 channelInstance; + NvBool valid; +} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GMMU_GET_STATIC_INFO + * + * This command obtains information from physical RM for use by CPU-RM. + * + * replayableFaultBufferSize + * Default size of replayable fault buffer + * + * nonReplayableFaultBufferSize + * Default size of non-replayable fault buffer + * + */ + +#define NV2080_CTRL_CMD_INTERNAL_GMMU_GET_STATIC_INFO (0x20800a59) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS_MESSAGE_ID (0x59U) + +typedef struct NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS { + NvU32 replayableFaultBufferSize; + NvU32 nonReplayableFaultBufferSize; +} NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_CTXSW_MODES + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_GET_CTXSW_MODES (0x20800a5a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x5A" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE + * + * Get heap reservation size needed by different module + */ +#define NV2080_CTRL_CMD_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE (0x20800a5b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS_MESSAGE_ID (0x5BU) + +typedef struct NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS { + NvU32 moduleIndex; + NvU32 size; +} NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE + * + * tableLen [OUT] + * Number of valid records in table field. + * + * table [OUT] + * Interrupt table for Kernel RM. + */ +#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128 + +typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY { + NvU16 engineIdx; + NvU32 pmcIntrMask; + NvU32 vectorStall; + NvU32 vectorNonStall; +} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY; + +#define NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID (0x5CU) + +typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS { + NvU32 tableLen; + NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE]; +} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS; + +/* Index to retrieve the needed heap space for specific module */ +#define NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_GR (0x00000000) + +/* + * NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK + * + * Checking if the reservation / release of the PERFMON HW is possible + * + * bReservation [IN] + * NV_TRUE -> request for reservation, NV_FALSE -> request for release + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK (0x20800a98) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS_MESSAGE_ID" */ + + +#define NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS_MESSAGE_ID (0x98U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS { + NvBool bReservation; +} NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET + * + * Reserving / Releasing PERFMON HW + * + * bReservation [IN] + * NV_TRUE -> request for reservation, NV_FALSE -> request for release + * + * bClientHandlesGrGating [IN] + * DM-TODO: Add comment for this + * + * bRmHandlesIdleSlow [IN] + * If the IDLE slowdown is required + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET (0x20800a99) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS_MESSAGE_ID (0x99U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS { + NvBool bReservation; + NvBool bClientHandlesGrGating; + NvBool bRmHandlesIdleSlow; +} NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES + * + * Get active display devices + * + * displayMask [OUT] + * Get the mask of the active display devices in VBIOS + * + * numHeads [OUT] + * Number of heads display supported. + * + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES (0x20800a5d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS_MESSAGE_ID (0x5DU) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS { + + NvU32 displayMask; + NvU32 numHeads; +} NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS; + + + +/* + * NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES + * + * Get FB Mem page ranges for all possible swizzIds + * + * fbMemPageRanges [OUT] + * Mem page ranges for each swizzId in the form of {lo, hi} + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES (0x20800a60) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MAX_SWIZZ_ID 15 + +typedef struct NV2080_CTRL_INTERNAL_NV_RANGE { + NV_DECLARE_ALIGNED(NvU64 lo, 8); + NV_DECLARE_ALIGNED(NvU64 hi, 8); +} NV2080_CTRL_INTERNAL_NV_RANGE; + +#define NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NV_RANGE fbMemPageRanges[NV2080_CTRL_INTERNAL_MAX_SWIZZ_ID], 8); +} NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_FIFO_GET_NUM_CHANNELS + * + * This command is an internal command sent from Kernel RM to Physical RM + * to get number of channels for a given runlist ID + * + * runlistId [IN] + * numChannels [OUT] + */ +#define NV2080_CTRL_CMD_INTERNAL_FIFO_GET_NUM_CHANNELS (0x20800a61) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS_MESSAGE_ID (0x61U) + +typedef struct NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS { + NvU32 runlistId; + NvU32 numChannels; +} NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PROFILES + * @ref NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_PROFILES + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PROFILES (0x20800a63) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x63" */ + +/*! + * @ref NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_VALID_SWIZZID_MASK + * @ref NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_VALID_SWIZZID_MASK + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_VALID_SWIZZID_MASK (0x20800a64) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x64" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PARTITIONABLE_ENGINES + * NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PARTITIONABLE_ENGINES (0x20800a65) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x65" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES + * NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES (0x20800a66) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x66" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_KMEMSYS_GET_MIG_MEMORY_CONFIG + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG + * + * This command retrieves memory config from HW + * + * memBoundaryCfgA [OUT] + * Memory boundary config A (4KB aligned) + * + * memBoundaryCfgB [OUT] + * Memory boundary config B (4KB aligned) + * + * memBoundaryCfgC [OUT] + * Memory boundary config C (64KB aligned) + * + * memBoundaryCfg [OUT] + * Memory boundary config (64KB aligned) + * + * memBoundaryCfgValInit [OUT] + * Memory boundary config initial value (64KB aligned) + */ +#define NV2080_CTRL_CMD_INTERNAL_KMEMSYS_GET_MIG_MEMORY_CONFIG (0x20800a67) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x67" */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG (0x20800a68) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x68" */ + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS { + NV_DECLARE_ALIGNED(NvU64 memBoundaryCfgA, 8); + NV_DECLARE_ALIGNED(NvU64 memBoundaryCfgB, 8); + NvU32 memBoundaryCfgC; + NvU32 memBoundaryCfg; + NvU32 memBoundaryCfgValInit; +} NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE (0x20800a6b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_SIZE 8 + +#define NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS_MESSAGE_ID (0x6BU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS { + NvU32 data[NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_SIZE]; +} NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_RC_WATCHDOG_TIMEOUT + * + * Invoke RC recovery after watchdog timeout is hit. + */ +#define NV2080_CTRL_CMD_INTERNAL_RC_WATCHDOG_TIMEOUT (0x20800a6a) /* finn: Evaluated from "((FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x6a)" */ + +/* ! + * This command disables cuda limit activation at teardown of the client. + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_CUDA_LIMIT_DISABLE (0x20800a7a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x7A" */ + +/* + * This command is cleaning up OPTP when a client is found to have + * been terminated unexpectedly. + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_OPTP_CLI_CLEAR (0x20800a7c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x7C" */ + + +/*! + * This command can be used to boost P-State up one level or to the highest for a limited + * duration for the associated subdevice. Boosts from different clients are being tracked + * independently. Note that there are other factors that can limit P-States so the resulting + * P-State may differ from expectation. + * + * flags + * This parameter specifies the actual command. _CLEAR is to clear existing boost. + * _BOOST_1LEVEL is to boost P-State one level higher. _BOOST_TO_MAX is to boost + * to the highest P-State. + * duration + * This parameter specifies the duration of the boost in seconds. This has to be less + * than NV2080_CTRL_PERF_BOOST_DURATION_MAX. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_SET_2X (0x20800a9a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X_MESSAGE_ID (0x9AU) + +typedef struct NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X { + + NvBool flags; + NvU32 duration; +} NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X; + +#define NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_PSTATE 0U +#define NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_GPCCLK 1U +#define NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_LAST NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_GPCCLK +#define NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM (0x2U) /* finn: Evaluated from "NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_LAST + 1" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_CONTROL + * + * Activate/Deactivate GPU Boost Sync algorithm + * + * bActivate [IN] + * GPU Boost Sync algorithm: + * NV_TRUE -> activate + * NV_FALSE -> deactivate + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_CONTROL (0x20800a7e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS_MESSAGE_ID (0x7EU) + +typedef struct NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS { + NvBool bActivate; +} NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS + * + * Apply given limits to a specific subdevice + * + * flags [IN] + * DM-TODO: write description here + * + * bBridgeless [IN] + * Bridgeless information, for now supporting only MIO bridges + * + * currLimits + * Array of limits that will be applied + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS (0x20800a7f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_MESSAGE_ID (0x7FU) + +typedef struct NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS { + NvU32 flags; + NvBool bBridgeless; + NvU32 currLimits[NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM]; +} NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO + * + * Data for GPU Boost Sync structure initialization + * + * hysteresisus [OUT] + * Hysteresis value for GPU Boost synchronization hysteresis algorithm. + * + * bHystersisEnable [OUT] + * hysteresis algorithm for SLI GPU Boost synchronization: + * NV_TRUE -> enabled, + * NV_FALSE -> disabled + * + * bSliGpuBoostSyncEnable [OUT] + * SLI GPU Boost feature is: + * NV_TRUE -> enabled, + * NV_FALSE -> disabled + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO (0x20800a80) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 hysteresisus, 8); + NvBool bHystersisEnable; + NvBool bSliGpuBoostSyncEnable; +} NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_FAULT_BUFFER + * + * This command sends replayable fault buffer pages allocated by CPU-RM + * to be setup and enabled in physical RM. + * + * hClient + * Client handle. + * + * hObject + * Object handle. + * + * faultBufferSize + * Size of the replayable fault buffer to register. + * + * faultBufferPteArray + * Pages of replayable fault buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_FAULT_BUFFER (0x20800a9b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GMMU_FAULT_BUFFER_MAX_PAGES 256 +#define NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS_MESSAGE_ID (0x9BU) + +typedef struct NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS { + NvHandle hClient; + NvHandle hObject; + NvU32 faultBufferSize; + NV_DECLARE_ALIGNED(NvU64 faultBufferPteArray[NV2080_CTRL_INTERNAL_GMMU_FAULT_BUFFER_MAX_PAGES], 8); +} NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_FAULT_BUFFER + * + * This command requests physical RM to disable the replayable fault buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_FAULT_BUFFER (0x20800a9c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x9C" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER + * + * This command sends client shadow fault buffer pages allocated by CPU-RM + * to be setup and enabled in physical RM. + * + * shadowFaultBufferQueuePhysAddr + * Physical address of shadow fault buffer queue. + * + * faultBufferSize + * Size of the client shadow fault buffer to register. + * + * shadowFaultBufferPteArray + * Pages of client shadow fault buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER (0x20800a9d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GMMU_CLIENT_SHADOW_FAULT_BUFFER_MAX_PAGES 1500 +#define NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS_MESSAGE_ID (0x9DU) + +typedef struct NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS { + NV_DECLARE_ALIGNED(NvU64 shadowFaultBufferQueuePhysAddr, 8); + NvU32 shadowFaultBufferSize; + NV_DECLARE_ALIGNED(NvU64 shadowFaultBufferPteArray[NV2080_CTRL_INTERNAL_GMMU_CLIENT_SHADOW_FAULT_BUFFER_MAX_PAGES], 8); +} NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER + * + * This command requests physical RM to disable the client shadow fault buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER (0x20800a9e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x9E" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_SET_3X + * + * This command can be used to boost P-State the highest for a limited + * duration for the associated subdevice. Boosts from different clients are being tracked + * independently. Note that there are other factors that can limit P-States so the resulting + * P-State may differ from expectation. + * + * flags [IN] + * This parameter specifies the actual command. _CLEAR is to clear existing boost. + * and _BOOST_TO_MAX is to boost to the highest P-State. + * + * boostDuration [IN] + * This parameter specifies the duration of the boost in seconds. This has to be less + * than NV2080_CTRL_PERF_BOOST_DURATION_MAX. + * + * gfId [IN] + * This specifies Id of the Kernel RM that is requesting the Boost + * + * bOverrideInfinite[IN] + * This parameter specifies if we want to override already registered infinite boost for the specific Kernel RM. + * This should be NV_TRUE only in case when we are removing the current infinite boost for a specific Kernel RM + * and setting the boost duration to a next maximum duration registered for the Kernel RM in question. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_SET_3X (0x20800aa0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X_MESSAGE_ID (0xA0U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X { + NvU32 flags; + NvU32 boostDuration; + NvU32 gfId; + NvBool bOverrideInfinite; +} NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_CLEAR_3X + * + * Clear the boost for specific Kernel RM + * + * bIsCudaClient [IN] + * Specifies if the request is for clearing the CUDA boost or regular boost + * NV_TRUE -> CUDA boost, NV_FALSE otherwise + * + * gfId [IN] + * Specifies Id of the Kernel RM that is requesting Boost clear + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_CLEAR_3X (0x20800aa1) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X_MESSAGE_ID (0xA1U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X { + + NvBool bIsCudaClient; + NvU32 gfId; +} NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X; + +/* + * NV2080_CTRL_CMD_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO + * + * Retrieves skyline information about the GPU. Params are sized to currently known max + * values, but will need to be modified in the future should that change. + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO (0x208038a2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GRMGR_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO_MAX_SKYLINES 8 +#define NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO_MAX_NON_SINGLETON_VGPCS 8 +/*! + * NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO + * skylineVgpcSize[OUT] + * - TPC count of non-singleton VGPCs + * singletonVgpcMask[OUT] + * - Mask of active Singletons + * maxInstances[OUT] + * - Max allowed instances of this skyline concurrently on a GPU + * computeSizeFlag + * - One of NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_* flags which is associated with this skyline + * numNonSingletonVgpcs + * - Number of VGPCs with non-zero TPC counts which are not singletons + */ +typedef struct NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO { + NvU8 skylineVgpcSize[NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO_MAX_NON_SINGLETON_VGPCS]; + NvU32 singletonVgpcMask; + NvU32 maxInstances; + NvU32 computeSizeFlag; + NvU32 numNonSingletonVgpcs; +} NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO; + +/*! + * NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS + * skylineTable[OUT] + * - TPC count of non-singleton VGPCs + * - Mask of singleton vGPC IDs active + * - Max Instances of this skyline possible concurrently + * - Associated compute size with the indexed skyline + * - Number of VGPCs with non-zero TPC counts which are not singletons + * validEntries[OUT] + * - Number of entries which contain valid info in skylineInfo + */ +#define NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS_MESSAGE_ID (0xA2U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS { + NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO skylineTable[NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO_MAX_SKYLINES]; + NvU32 validEntries; +} NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GPU_SET_PARTITIONING_MODE + */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_SET_PARTITIONING_MODE (0x20800aa3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA3" */ + +/*! + * @ref NV2080_CTRL_CMD_GPU_CONFIGURE_PARTITION + */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_CONFIGURE_GPU_INSTANCE (0x20800aa4) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA4" */ + +/*! + * @ref NV2080_CTRL_CMD_GPU_SET_PARTITIONS + */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_SET_GPU_INSTANCES (0x20800aa5) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA5" */ + +/*! + * @ref NV2080_CTRL_CMD_GPU_GET_PARTITIONS + */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_GET_GPU_INSTANCES (0x20800aa6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA6" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED + * + * Tell Physical RM whether any ZBC-kind surfaces are allocated. + * If PF and all VFs report false, ZBC table can be flushed by Physical RM. + * + * bZbcReferenced [IN] + * NV_TRUE -> ZBC-kind (and no _SKIP_ZBCREFCOUNT flag) are allocated in Kernel RM + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED (0x20800a69) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_MESSAGE_ID (0x69U) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS { + NvBool bZbcSurfacesExist; +} NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_KMIGMGR_EXPORT_GPU_INSTANCE + * + * Export the resource and placement information about a gpu instance such that a + * similar gpu instance can be recreated from scratch in the same position on the + * same or similar GPU. Note that different GPUs may have different physical + * resources due to floorsweeping, and an imported gpu instance is not guaranteed + * to get the exact same resources as the exported gpu instance, but the imported + * gpu instance should behave identically with respect to fragmentation and + * placement / span positioning. + */ +#define NV2080_CTRL_CMD_INTERNAL_KMIGMGR_EXPORT_GPU_INSTANCE (0x20800aa7) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA7" */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_EXPORT_GPU_INSTANCE (0x20800aa8) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA8" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_KMIGMGR_IMPORT_GPU_INSTANCE + * + * Create a gpu instance resembling the exported instance info. Note that + * different GPUs may have different physical resources due to floorsweeping, + * and an imported gpu instance is not guaranteed to get the exact same resources + * as the exported gpu instance, but the imported gpu instance should behave + * identically with respect to fragmentation and placement / span positioning. + */ +#define NV2080_CTRL_CMD_INTERNAL_KMIGMGR_IMPORT_GPU_INSTANCE (0x20800aa9) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA9" */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_IMPORT_GPU_INSTANCE (0x20800aaa) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xAA" */ + +#define NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_MAX_ENGINES_MASK_SIZE 4 +typedef struct NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_INFO { + NV_DECLARE_ALIGNED(NvU64 enginesMask[NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_MAX_ENGINES_MASK_SIZE], 8); + NvU32 partitionFlags; + NvU32 gpcMask; + NvU32 veidOffset; + NvU32 veidCount; +} NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_INFO; + +typedef struct NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS { + NvU32 swizzId; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_INFO info, 8); +} NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT + * + * Invalidate and/or evict the L2 cache + * + * flags [IN] + * flags that specify required actions + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT (0x20800a6c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS_MESSAGE_ID (0x6cU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS { + NvU32 flags; +} NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS; + +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_ALL (0x00000001) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_FIRST (0x00000002) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_LAST (0x00000004) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_NORMAL (0x00000008) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_CLEAN (0x00000010) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_WAIT_FB_PULL (0x00000020) + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_FLUSH_L2_ALL_RAMS_AND_CACHES + * + * Flush all L2 Rams and Caches using the ELPG flush + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_FLUSH_L2_ALL_RAMS_AND_CACHES (0x20800a6d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x6D" */ + + + +/*! + * NV2080_CTRL_CMD_INTERNAL_BIF_GET_STATIC_INFO + * + * This command obtains information from physical RM for use by CPU-RM + * + * Data fields -> + * bPcieGen4Capable - tells whether PCIe is Gen4 capable + */ +#define NV2080_CTRL_CMD_INTERNAL_BIF_GET_STATIC_INFO (0x20800aac) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS_MESSAGE_ID (0xacU) + +typedef struct NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS { + NvBool bPcieGen4Capable; + NvBool bIsC2CLinkUp; + NV_DECLARE_ALIGNED(NvU64 dmaWindowStartAddress, 8); +} NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG + * + * Program HSHUB Peer Conn Config space. + * + * programPeerMask[IN] + * If nonzero, the peer mask for programming peers based on hshub connectivity. + * + * invalidatePeerMask[IN] + * If nonzero, the peer mask for invalidating peers. + * + * programPciePeerMask[IN] + * If nonzero, the peer mask for programming peers in pcie case. + * + * Possible status values returned are: + * NV_OK + * NV_WARN_NOTHING_TO_DO + * If all peer masks are zero. + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG (0x20800a88) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS { + NvU32 programPeerMask; + NvU32 invalidatePeerMask; + NvU32 programPciePeerMask; +} NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_FIRST_LINK_PEER_ID + * + * Given a mask of link ids, find the first with a valid peerId. + * + * linkMask[IN] + * Mask of linkIds to check. + * + * peerId[OUT] + * The peerId for the lowest-index link with a valid peerId, if any. + * If none found, NV2080_CTRLINTERNAL_HSHUB_FIRST_LINK_PEER_ID_INVALID_PEER (return value will still be NV_OK). + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_FIRST_LINK_PEER_ID (0x20800a89) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_INVALID_PEER 0xffffffff + +#define NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_PARAMS { + NvU32 linkMask; + NvU32 peerId; +} NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS + * + * Get the Hshub Ids connected to the other end of links. + * + * linkMask[IN] + * A mask of link ids to query. + * + * hshubIds[OUT] + * For each set bit in the link mask, the peer Hshub Id. + * + * Possible status values returned are: + * NV_OK + * NV_WARN_NOTHING_TO_DO + * If the mask is zero. + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS (0x20800a8a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_TABLE_SIZE 32 + +#define NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS_MESSAGE_ID (0x8aU) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS { + NvU32 linkMask; + NvU8 hshubIds[NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_TABLE_SIZE]; +} NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_NUM_UNITS + * + * Return the number of HSHUB units. + * + * numHshubs[OUT] + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_NUM_UNITS (0x20800a8b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS_MESSAGE_ID (0x8bU) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS { + NvU32 numHshubs; +} NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_NEXT_HSHUB_ID + * + * Return the next hshubId after the given hshubId. + * + * hshubId[IN/OUT] + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_NEXT_HSHUB_ID (0x20800a8c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS_MESSAGE_ID (0x8cU) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS { + NvU8 hshubId; +} NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_COMPUTE_PEER_ADDR + * + * Enable compute peer addressing mode + * This command accepts no parameters. + */ + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_COMPUTE_PEER_ADDR (0x20800aad) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xad" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR + * + * Get/Set NVSwitch fabric address for FLA + * + * [In] bGet + * Whether to get or set the NVSwitch fabric address + * [In/Out] addr + * Address that is to be set or retrieved. + */ +#define NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS_MESSAGE_ID (0xaeU) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS { + NvBool bGet; + NV_DECLARE_ALIGNED(NvU64 addr, 8); +} NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR (0x20800aae) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS_MESSAGE_ID" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_BIF_GET_ASPM_L1_FLAGS + * + * This command obtains information from physical RM for use by CPU-RM + * + * Data fields -> + * bCyaMaskL1 + * bEnableAspmDtL1 + */ +#define NV2080_CTRL_CMD_INTERNAL_BIF_GET_ASPM_L1_FLAGS (0x20800ab0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS_MESSAGE_ID (0xb0U) + +typedef struct NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS { + NvBool bCyaMaskL1; + NvBool bEnableAspmDtL1; +} NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT + * + * Sets number of VM slots that are active in VGPU's scheduler + * + * maxActiveVGpuVMCount [IN] + * Number of VM slots that are active in vGPU's scheduler. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OUT_OF_RANGE - Passed value is out of range + * NV_ERR_NO_MEMORY - Out of memory + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT (0x20800ab1) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS_MESSAGE_ID" */ + +/*! + * Maximum value of VM slots that are active in vGPU's scheduler. + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_CF_CONTROLLERS_MAX_ACTIVE_VGPU_VM_COUNT_MAX_VALUE 32 + +#define NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS_MESSAGE_ID (0xB1U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS { + NvU8 maxActiveVGpuVMCount; +} NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_DISABLE_NVLINK_PEERS + * + * Disable all NVLINK FB peers + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_DISABLE_NVLINK_PEERS (0x20800a6e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x6E" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE + * + * Program GPU in raw / legacy compression mode + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE (0x20800a6f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS_MESSAGE_ID (0x6fU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS { + NvBool bRawMode; +} NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS; + + + +/*! + * NV2080_CTRL_CMD_INTERNAL_BUS_GET_PCIE_P2P_CAPS + * + * This command returns the GPU's PCIE P2P caps + * + * [in] bCommonPciSwitchFound + * All GPUs are under the same PCI switch + * [out] p2pReadCapsStatus + * [out] p2pWriteCapsStatus + * These members returns status of all supported p2p capabilities. Valid + * status values include: + * NV0000_P2P_CAPS_STATUS_OK + * P2P capability is supported. + * NV0000_P2P_CAPS_STATUS_CHIPSET_NOT_SUPPORTED + * Chipset doesn't support p2p capability. + * NV0000_P2P_CAPS_STATUS_GPU_NOT_SUPPORTED + * GPU doesn't support p2p capability. + * NV0000_P2P_CAPS_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED + * IOH topology isn't supported. For e.g. root ports are on different + * IOH. + * NV0000_P2P_CAPS_STATUS_DISABLED_BY_REGKEY + * P2P Capability is disabled by a regkey. + * NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED + * P2P Capability is not supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_INTERNAL_GET_PCIE_P2P_CAPS (0x20800ab8) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS_MESSAGE_ID (0xB8U) + +typedef struct NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS { + NvBool bCommonPciSwitchFound; + NvU8 p2pReadCapsStatus; + NvU8 p2pWriteCapsStatus; +} NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_BIF_SET_PCIE_RO + * + * Enable/disable PCIe Relaxed Ordering. + * + */ +#define NV2080_CTRL_CMD_INTERNAL_BIF_SET_PCIE_RO (0x20800ab9) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BIF_SET_PCIE_RO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BIF_SET_PCIE_RO_PARAMS_MESSAGE_ID (0xb9U) + +typedef struct NV2080_CTRL_INTERNAL_BIF_SET_PCIE_RO_PARAMS { + // Enable/disable PCIe relaxed ordering + NvBool enableRo; +} NV2080_CTRL_INTERNAL_BIF_SET_PCIE_RO_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_UNIX_CONSOLE + * + * An internal call to invoke the sequence VGA register reads & writes to + * perform save and restore of VGA + * + * [in] saveOrRestore + * To indicate whether save or restore needs to be performed. + * [in] useVbios + * Primary VGA indication from OS. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_UNIX_CONSOLE (0x20800a76) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_DISPLAY_UNIX_CONSOLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_UNIX_CONSOLE_PARAMS_MESSAGE_ID (0x76U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_DISPLAY_UNIX_CONSOLE_PARAMS { + + NvBool bSaveOrRestore; + NvBool bUseVbios; +} NV2080_CTRL_CMD_INTERNAL_DISPLAY_UNIX_CONSOLE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_RESTORE + * + * To perform restore operation from saved fonts. + * + * [in] saveOrRestore + * To indicate whether save or restore needs to be performed. + * [in] useVbios + * Primary VGA indication from OS. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_RESTORE (0x20800a77) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_RESTORE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_RESTORE_PARAMS_MESSAGE_ID (0x77U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_RESTORE_PARAMS { + + NvBool bWriteCr; +} NV2080_CTRL_CMD_INTERNAL_DISPLAY_POST_RESTORE_PARAMS; + +/* ctrl2080internal_h */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h new file mode 100644 index 0000000..068d4f2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080lpwr.finn +// + + + +// _ctrl2080lpwr_h_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h new file mode 100644 index 0000000..adff634 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h @@ -0,0 +1,323 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080mc.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX mc control commands and parameters */ + +/** + * NV2080_CTRL_CMD_MC_GET_ARCH_INFO + * + * This command returns chip architecture information from the + * master control engine in the specified GPU. + * + * architecture + * This parameter specifies the architecture level for the GPU. + * implementation + * This parameter specifies the implementation of the architecture + * for the GPU. + * revision + * This parameter specifies the revision of the mask used to produce + * the GPU. + * subRevision + * This parameter specific the sub revision of the GPU. Value is one of + * NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_* + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_MC_GET_ARCH_INFO (0x20801701) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS { + NvU32 architecture; + NvU32 implementation; + NvU32 revision; + NvU8 subRevision; +} NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS; + +/* valid architecture values */ + + +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_T23X (0xE0000023) + + +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_TU100 (0x00000160) +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GA100 (0x00000170) + + + +/* valid ARCHITECTURE_T23X implementation values */ +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_T232 (0x00000002) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_T234 (0x00000004) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_T234D (0x00000005) + + +/* valid ARCHITECTURE_TU10x implementation values */ +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU100 (0x00000000) + + +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU102 (0x00000002) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU104 (0x00000004) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU106 (0x00000006) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU116 (0x00000008) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU117 (0x00000007) + + +/* valid ARCHITECTURE_GA10x implementation values */ +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA100 (0x00000000) + + +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA102 (0x00000002) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA103 (0x00000003) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA104 (0x00000004) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA106 (0x00000006) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA107 (0x00000007) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA10B (0x0000000B) + + + +/* Valid Chip sub revisions */ +#define NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_NO_SUBREVISION (0x00000000) +#define NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_P (0x00000001) +#define NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_Q (0x00000002) +#define NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_R (0x00000003) + +/* + * NV2080_CTRL_CMD_MC_SERVICE_INTERRUPTS + * + * This command instructs the RM to service interrupts for the specified + * engine(s). + * + * engines + * This parameter specifies which engines should have their interrupts + * serviced. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_MC_SERVICE_INTERRUPTS (0x20801702) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_ENGINE_ID_GRAPHICS 0x00000001 +#define NV2080_CTRL_MC_ENGINE_ID_ALL 0xFFFFFFFF + +#define NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS { + NvU32 engines; +} NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS; + + +/* + * NV2080_CTRL_CMD_MC_GET_MANUFACTURER + * + * This command returns the GPU manufacturer information for the associated + * subdevice. + * + * manufacturer + * This parameter returns the manufacturer value for the GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_MC_GET_MANUFACTURER (0x20801703) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS { + NvU32 manufacturer; +} NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS; + + + +/* + * NV2080_CTRL_CMD_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS + * + * This command is used to allow clients to query whether hostclk slowdown is + * disabled. + * + * bDisabled + * This parameter will hold the status of hostclk slowdown + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV2080_CTRL_CMD_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS (0x20801708) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS_PARAMS { + NvBool bDisabled; +} NV2080_CTRL_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_MC_SET_HOSTCLK_SLOWDOWN_STATUS + * + * This command is used to allow clients to disable/enable hostclk slowdown. + * + * bDisable + * When this parameter is set to TRUE, RM should disable hostclk slowdown. + * If it is set to FALSE, RM will attempt to enable hostclk slowdown, but + * in this case, slowdown is NOT guaranteed to be enabled since there may + * be other reason (like regkey) preventing slowdown. + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV2080_CTRL_CMD_MC_SET_HOSTCLK_SLOWDOWN_STATUS (0x20801709) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_SET_HOSTCLK_SLOWDOWN_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_SET_HOSTCLK_SLOWDOWN_STATUS_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_MC_SET_HOSTCLK_SLOWDOWN_STATUS_PARAMS { + NvBool bDisable; +} NV2080_CTRL_MC_SET_HOSTCLK_SLOWDOWN_STATUS_PARAMS; + + + +/* + * NV2080_CTRL_CMD_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP + * + * This call will setup RM to either service or ignore the + * repayable fault interrupt. + * This is a privileged call that can only be called by the UVM driver + * when it will take ownership of the repalayable fault interrupt. + * + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP (0x2080170c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS { + NvBool bOwnedByRm; +} NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS; + +/* + * NV2080_CTRL_CMD_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS + * + * This command gets the notification interrupt vectors device for all VGPU engines from Host RM. + * + * Parameters: + * + * entries [out] + * A buffer to store up to MAX_ENGINES entries of type + * NV2080_CTRL_MC_ENGINE_NOTIFICATION_INTR_VECTOR_ENTRY. + * + * numEntries [out] + * Number of populated entries in the provided buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS (0x2080170d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_MAX_ENGINES 256 + +typedef struct NV2080_CTRL_MC_ENGINE_NOTIFICATION_INTR_VECTOR_ENTRY { + NvU32 nv2080EngineType; + NvU32 notificationIntrVector; +} NV2080_CTRL_MC_ENGINE_NOTIFICATION_INTR_VECTOR_ENTRY; + +#define NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS { + NvU32 numEntries; + NV2080_CTRL_MC_ENGINE_NOTIFICATION_INTR_VECTOR_ENTRY entries[NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_MAX_ENGINES]; +} NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS; + +/* + * NV2080_CTRL_CMD_MC_GET_STATIC_INTR_TABLE + * + * This command gets the static interrupts needed by VGPU from Host RM. + * + * Parameters: + * + * entries [out] + * A buffer to store up to MAX_ENGINES entries of type + * NV2080_CTRL_MC_STATIC_INTR_ENTRY. + * + * numEntries [out] + * Number of populated entries in the provided buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_MC_GET_STATIC_INTR_TABLE (0x2080170e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_MAX 32 + +// Interface defines for static MC_ENGINE_IDX defines +#define NV2080_INTR_TYPE_NULL (0x00000000) +#define NV2080_INTR_TYPE_NON_REPLAYABLE_FAULT (0x00000001) +#define NV2080_INTR_TYPE_NON_REPLAYABLE_FAULT_ERROR (0x00000002) +#define NV2080_INTR_TYPE_INFO_FAULT (0x00000003) +#define NV2080_INTR_TYPE_REPLAYABLE_FAULT (0x00000004) +#define NV2080_INTR_TYPE_REPLAYABLE_FAULT_ERROR (0x00000005) +#define NV2080_INTR_TYPE_ACCESS_CNTR (0x00000006) +#define NV2080_INTR_TYPE_TMR (0x00000007) +#define NV2080_INTR_TYPE_CPU_DOORBELL (0x00000008) +#define NV2080_INTR_TYPE_GR0_FECS_LOG (0x00000009) +#define NV2080_INTR_TYPE_GR1_FECS_LOG (0x0000000A) +#define NV2080_INTR_TYPE_GR2_FECS_LOG (0x0000000B) +#define NV2080_INTR_TYPE_GR3_FECS_LOG (0x0000000C) +#define NV2080_INTR_TYPE_GR4_FECS_LOG (0x0000000D) +#define NV2080_INTR_TYPE_GR5_FECS_LOG (0x0000000E) +#define NV2080_INTR_TYPE_GR6_FECS_LOG (0x0000000F) +#define NV2080_INTR_TYPE_GR7_FECS_LOG (0x00000010) + +typedef struct NV2080_CTRL_MC_STATIC_INTR_ENTRY { + NvU32 nv2080IntrType; + NvU32 pmcIntrMask; + NvU32 intrVectorStall; + NvU32 intrVectorNonStall; +} NV2080_CTRL_MC_STATIC_INTR_ENTRY; + +#define NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS { + NvU32 numEntries; + NV2080_CTRL_MC_STATIC_INTR_ENTRY entries[NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_MAX]; +} NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS; + +/* _ctrl2080mc_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h new file mode 100644 index 0000000..f1e490b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h @@ -0,0 +1,338 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080nvd.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +#include "ctrl/ctrlxxxx.h" +/* + * NV2080_CTRL_CMD_NVD_GET_DUMP_SIZE + * + * This command gets the expected dump size of a particular GPU dump component. + * Note that events that occur between this command and a later + * NV2080_CTRL_CMD_NVD_GET_DUMP command could alter the size of + * the buffer required. + * + * component + * One of NVDUMP_COMPONENT < 0x400 defined in nvdump.h to estimate + * the size of. + * size + * This parameter returns the expected size. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if component is invalid. + * + */ + +#define NV2080_CTRL_CMD_NVD_GET_DUMP_SIZE (0x20802401) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID << 8) | NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS { + NvU32 component; + NvU32 size; +} NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVD_GET_DUMP + * + * This command gets a dump of a particular GPU dump component. If triggers + * is non-zero, the command waits for the trigger to occur + * before it returns. + * + * pBuffer + * This parameter points to the buffer for the data. + * component + * One of NVDUMP_COMPONENT < 0x400 defined in nvdump.h to select + * for dumping. + * size + * On entry, this parameter specifies the maximum length for + * the returned data. On exit, it specifies the number of bytes + * returned. + * + * Possible status values returned are: + * NV_OK + * NVOS_ERROR_INVALID_ARGUMENT if component is invalid. + * NVOS_ERROR_INVALID_ADDRESS if pBuffer is invalid + * NVOS_ERROR_INVALID_???? if the buffer was too small + * + * + */ +#define NV2080_CTRL_CMD_NVD_GET_DUMP (0x20802402) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID << 8) | NV2080_CTRL_NVD_GET_DUMP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVD_GET_DUMP_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_NVD_GET_DUMP_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pBuffer, 8); + NvU32 component; + NvU32 size; +} NV2080_CTRL_NVD_GET_DUMP_PARAMS; + +/* + * NV2080_CTRL_CMD_NVD_GET_NOCAT_JOURNAL + * + * This command returns the contents of the Journal used by NOCAT, and + * optionally clears the data + * + * clear: + * [IN] indicates if should the data be cleared after reporting + * + * JournalRecords : + * [OUT] an array of Journal records reported. + * + * outstandingAssertCount: + * [OUT] number of asserts that remain to be reported on. + * + * reportedAssertCount: + * [OUT] the number of asserts contained in the report + * + * asserts: + * [OUT] an array of up to NV2080_NOCAT_JOURNAL_MAX_ASSERT_RECORDS assert reports + */ + + +#define NV2080_CTRL_CMD_NVD_GET_NOCAT_JOURNAL (0x20802409) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID << 8) | NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS_MESSAGE_ID" */ + +#define NV2080_NOCAT_JOURNAL_MAX_DIAG_BUFFER 1024 +#define NV2080_NOCAT_JOURNAL_MAX_STR_LEN 65 +#define NV2080_NOCAT_JOURNAL_MAX_JOURNAL_RECORDS 10 +#define NV2080_NOCAT_JOURNAL_MAX_ASSERT_RECORDS 32 + +// structure to hold clock details. +typedef struct NV2080_NOCAT_JOURNAL_OVERCLOCK_DETAILS { + NvS32 userMinOffset; + NvS32 userMaxOffset; + NvU32 factoryMinOffset; + NvU32 factoryMaxOffset; + NvU32 lastActiveClock; + NvU32 lastActiveVolt; + NvU32 lastActivePoint; + NvU32 kappa; +} NV2080_NOCAT_JOURNAL_OVERCLOCK_DETAILS; + + +// structure to hold clock configuration & state. +typedef struct NV2080_NOCAT_JOURNAL_OVERCLOCK_CFG { + NvU32 pstateVer; + NV2080_NOCAT_JOURNAL_OVERCLOCK_DETAILS gpcOverclock; + NV2080_NOCAT_JOURNAL_OVERCLOCK_DETAILS mclkOverclock; + NvBool bUserOverclocked; + NvBool bFactoryOverclocked; +} NV2080_NOCAT_JOURNAL_OVERCLOCK_CFG; + +// structure to hold the GPU context at the time of the report. +typedef struct NV2080_NOCAT_JOURNAL_GPU_STATE { + NvBool bValid; + NvU32 strap; + NvU16 deviceId; + NvU16 vendorId; + NvU16 subsystemVendor; + NvU16 subsystemId; + NvU16 revision; + NvU16 type; + NvU32 vbiosVersion; + NvBool bOptimus; + NvBool bMsHybrid; + NvBool bFullPower; + NvU32 vbiosOemVersion; + NvU16 memoryType; + NvU8 tag[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU8 vbiosProject[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvBool bInFullchipReset; + NvBool bInSecBusReset; + NvBool bInGc6Reset; + NV2080_NOCAT_JOURNAL_OVERCLOCK_CFG overclockCfg; +} NV2080_NOCAT_JOURNAL_GPU_STATE; + +#define NV2080_NOCAT_JOURNAL_REC_TYPE_UNKNOWN 0 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_BUGCHECK 1 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_ENGINE 2 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_TDR 3 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_RC 4 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_ASSERT 5 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_ANY 6 + +// this should be relative to the highest type value +#define NV2080_NOCAT_JOURNAL_REC_TYPE_COUNT (0x7) /* finn: Evaluated from "NV2080_NOCAT_JOURNAL_REC_TYPE_ANY + 1" */ +typedef struct NV2080_NOCAT_JOURNAL_ENTRY { + NvU8 recType; + NvU32 bugcheck; + NvU32 tdrBucketId; + NvU8 source[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 subsystem; + NV_DECLARE_ALIGNED(NvU64 errorCode, 8); + NvU32 diagBufferLen; + NvU8 diagBuffer[NV2080_NOCAT_JOURNAL_MAX_DIAG_BUFFER]; + NvU8 faultingEngine[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 mmuFaultType; + NvU32 mmuErrorSrc; + NvU8 tdrReason[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; +} NV2080_NOCAT_JOURNAL_ENTRY; + +typedef struct NV2080_NOCAT_JOURNAL_RECORD { + NvU32 GPUTag; + NV_DECLARE_ALIGNED(NvU64 loadAddress, 8); + NV_DECLARE_ALIGNED(NvU64 timeStamp, 8); + NV_DECLARE_ALIGNED(NvU64 stateMask, 8); + NV2080_NOCAT_JOURNAL_GPU_STATE nocatGpuState; + NV_DECLARE_ALIGNED(NV2080_NOCAT_JOURNAL_ENTRY nocatJournalEntry, 8); +} NV2080_NOCAT_JOURNAL_RECORD; + +// NOCAT activity counter indexes +// collection activity +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECT_REQ_IDX 0 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_ALLOCATED_IDX 1 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECTED_IDX 2 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_ALLOC_FAILED_IDX 3 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECT_FAILED_IDX 4 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECT_REQ_DROPPED_IDX 5 + +// reporting activity +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_REQUESTED_IDX 6 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_REPORTED_IDX 7 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_DROPPED_IDX 8 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_MISSED_IDX 9 + +// update activity +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_UPDATE_REQ_IDX 10 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_UPDATED_IDX 11 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_UPDATE_FAILED_IDX 12 + +// general errors +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BUSY_IDX 13 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BAD_PARAM_IDX 14 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BAD_TYPE_IDX 15 + +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_RES1_IDX 16 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_RES2_IDX 17 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_CACHE_UPDATE_IDX 18 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_INSERT_RECORDS_IDX 19 + +// this should be relative to the highest counter index +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COUNTER_COUNT (0x14) /* finn: Evaluated from "NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_INSERT_RECORDS_IDX + 1" */ + +#define NV2080_CTRL_NOCAT_GET_COUNTERS_ONLY 0:0 +#define NV2080_CTRL_NOCAT_GET_COUNTERS_ONLY_YES 1 +#define NV2080_CTRL_NOCAT_GET_COUNTERS_ONLY_NO 0 + +#define NV2080_CTRL_NOCAT_GET_RESET_COUNTERS 1:1 +#define NV2080_CTRL_NOCAT_GET_RESET_COUNTERS_YES 1 +#define NV2080_CTRL_NOCAT_GET_RESET_COUNTERS_NO 0 + + +#define NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS { + NvU32 flags; + NvU32 nocatRecordCount; + NvU32 nocatOutstandingRecordCount; + NV_DECLARE_ALIGNED(NV2080_NOCAT_JOURNAL_RECORD journalRecords[NV2080_NOCAT_JOURNAL_MAX_JOURNAL_RECORDS], 8); + NvU32 activityCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COUNTER_COUNT]; + NvU8 reserved[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; +} NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS; + + /* + * NV2080_CTRL_CMD_NVD_SET_NOCAT_JOURNAL_DATA + * + * This command reports the TDR data collected by KMD to be added to the + * nocat record + * + * dataType: + * [IN] specifies the type of data provided. + * targetRecordType + * [IN] specifies record type the data is intended for. + * nocatJournalData + * [IN] specifies the data to be added. + */ + +#define NV2080_CTRL_CMD_NVD_SET_NOCAT_JOURNAL_DATA (0x2080240b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID << 8) | NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS_MESSAGE_ID" */ + +// data types & structures +#define NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_EMPTY 0 +#define NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_TDR_REASON 1 +#define NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_INSERT_RECORD 2 +#define NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_SET_TAG 3 + +#define NV2080_CTRL_NOCAT_TDR_TYPE_NONE 0 +#define NV2080_CTRL_NOCAT_TDR_TYPE_LEGACY 1 +#define NV2080_CTRL_NOCAT_TDR_TYPE_FULLCHIP 2 +#define NV2080_CTRL_NOCAT_TDR_TYPE_BUSRESET 3 +#define NV2080_CTRL_NOCAT_TDR_TYPE_GC6_RESET 4 +#define NV2080_CTRL_NOCAT_TDR_TYPE_SURPRISE_REMOVAL 5 +#define NV2080_CTRL_NOCAT_TDR_TYPE_UCODE_RESET 6 +#define NV2080_CTRL_NOCAT_TDR_TYPE_TEST 7 + +typedef struct NV2080CtrlNocatJournalDataTdrReason { + NvU32 flags; + NvU8 source[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 subsystem; + NV_DECLARE_ALIGNED(NvU64 errorCode, 8); + NvU32 reasonCode; +} NV2080CtrlNocatJournalDataTdrReason; + +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_NULL_STR 0:0 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_NULL_STR_YES 1 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_NULL_STR_NO 0 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_0_LEN_BUFFER 1:1 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_0_LEN_BUFFER_YES 1 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_0_LEN_BUFFER_NO 0 +typedef struct NV2080CtrlNocatJournalInsertRecord { + NvU32 flags; + NvU8 recType; + NvU32 bugcheck; + NvU8 source[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 subsystem; + NV_DECLARE_ALIGNED(NvU64 errorCode, 8); + NvU8 faultingEngine[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 tdrReason; +} NV2080CtrlNocatJournalInsertRecord; + +#define NV2080_CTRL_NOCAT_TAG_CLEAR 0:0 +#define NV2080_CTRL_NOCAT_TAG_CLEAR_YES 1 +#define NV2080_CTRL_NOCAT_TAG_CLEAR_NO 0 +typedef struct NV2080CtrlNocatJournalSetTag { + NvU32 flags; + NvU8 tag[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; +} NV2080CtrlNocatJournalSetTag; + +#define NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS { + NvU32 dataType; + NvU32 targetRecordType; + union { + NV_DECLARE_ALIGNED(NV2080CtrlNocatJournalDataTdrReason tdrReason, 8); + NV_DECLARE_ALIGNED(NV2080CtrlNocatJournalInsertRecord insertData, 8); + NV2080CtrlNocatJournalSetTag tagData; + } nocatJournalData; +} NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS; +/* _ctr2080nvd_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h new file mode 100644 index 0000000..adeac56 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080nvlink.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h new file mode 100644 index 0000000..e5dbf05 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h @@ -0,0 +1,505 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080perf.finn +// + +#include "nvfixedtypes.h" +#include "ctrl/ctrl2080/ctrl2080base.h" + +#define NV_SUBPROC_NAME_MAX_LENGTH 100 + +#include "nvmisc.h" + +#include "ctrl/ctrl2080/ctrl2080clk.h" +#include "ctrl/ctrl2080/ctrl2080gpumon.h" +#include "ctrl/ctrl2080/ctrl2080volt.h" +#include "ctrl/ctrl2080/ctrl2080vfe.h" +#include "ctrl/ctrl2080/ctrl2080pmumon.h" +#include "ctrl/ctrl0080/ctrl0080perf.h" +// +// XAPICHK/XAPI_TEST chokes on the "static NVINLINE" defines in nvmisc.h. +// However, we don't need any of those definitions for those tests (XAPICHK is a +// syntactical check, not a functional test). So, instead, just #define out the +// macros referenced below. +// + +/* + * NV2080_CTRL_CMD_PERF_BOOST + * + * This command can be used to boost P-State up one level or to the highest for a limited + * duration for the associated subdevice. Boosts from different clients are being tracked + * independently. Note that there are other factors that can limit P-States so the resulting + * P-State may differ from expectation. + * + * flags + * This parameter specifies the actual command. _CLEAR is to clear existing boost. + * _BOOST_1LEVEL is to boost P-State one level higher. _BOOST_TO_MAX is to boost + * to the highest P-State. + * duration + * This parameter specifies the duration of the boost in seconds. This has to be less + * than NV2080_CTRL_PERF_BOOST_DURATION_MAX. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + + +#define NV2080_CTRL_PERF_BOOST_FLAGS_CMD 1:0 +#define NV2080_CTRL_PERF_BOOST_FLAGS_CMD_CLEAR (0x00000000) +#define NV2080_CTRL_PERF_BOOST_FLAGS_CMD_BOOST_1LEVEL (0x00000001) +#define NV2080_CTRL_PERF_BOOST_FLAGS_CMD_BOOST_TO_MAX (0x00000002) + +#define NV2080_CTRL_PERF_BOOST_FLAGS_CUDA 4:4 +#define NV2080_CTRL_PERF_BOOST_FLAGS_CUDA_NO (0x00000000) +#define NV2080_CTRL_PERF_BOOST_FLAGS_CUDA_YES (0x00000001) + +#define NV2080_CTRL_PERF_BOOST_FLAGS_ASYNC 5:5 +#define NV2080_CTRL_PERF_BOOST_FLAGS_ASYNC_NO (0x00000000) +#define NV2080_CTRL_PERF_BOOST_FLAGS_ASYNC_YES (0x00000001) + +#define NV2080_CTRL_PERF_BOOST_DURATION_MAX 3600 //The duration can be specified up to 1 hour +#define NV2080_CTRL_PERF_BOOST_DURATION_INFINITE 0xffffffff // If set this way, the boost will last until cleared. + +#define NV2080_CTRL_CMD_PERF_BOOST (0x2080200a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_BOOST_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_BOOST_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_PERF_BOOST_PARAMS { + NvU32 flags; + NvU32 duration; +} NV2080_CTRL_PERF_BOOST_PARAMS; + +/* + * NV2080_CTRL_CMD_PERF_RESERVE_PERFMON_HW + * + * This command reserves HW Performance Monitoring capabilities for exclusive + * use by the requester. If the HW Performance Monitoring capabilities are + * currently in use then NVOS_STATUS_ERROR_STATE_IN_USE is returned. + * + * bAcquire + * When set to TRUE this parameter indicates that the client wants to + * acquire the Performance Monitoring capabilities on the subdevice. + * When set to FALSE this parameter releases the Performance Monitoring + * capabilities on the subdevice. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_ERROR_STATE_IN_USE + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_PERF_RESERVE_PERFMON_HW (0x20802093) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS_MESSAGE_ID (0x93U) + +typedef struct NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS { + NvBool bAcquire; +} NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS; + +/*! + * Enumeration of the RATED_TDP arbitration clients which make requests to force + * enable/disable VF points above the RATED_TDP point. + * + * These clients are sorted in descending priority - the RM will arbitrate + * between all clients in order of priority, taking as output the first client + * whose input action != @ref NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT. + */ +typedef enum NV2080_CTRL_PERF_RATED_TDP_CLIENT { + /*! + * Internal RM client corresponding to the RM's internal state and features. + * The RM client will either allow default behavior (@ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT) or will limit to RATED_TDP + * (@ref NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT) when no power + * controllers are active. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM = 0, + /*! + * This Client is specifically for Bug 1785342 where we need to limit the TDP + * to Min value on boot. And clear the Max TDP limit. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_WAR_BUG_1785342 = 1, + /*! + * Global client request. This client is expected to be used by a global + * switch functionality in an end-user tool, such as EVGA Precision, to + * either force enabling boost above RATED_TDP (@ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED) or to force limiting to + * RATED_TDP (@ref NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT) across the + * board, regardless of any app-profie settings. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_GLOBAL = 2, + /*! + * Operating system request. This client is expected to be used by the + * operating system to set @ref NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LOCK + * for performance profiling. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_OS = 3, + /*! + * App profile client requests. This client is expected to be used by the + * app-profile settings to either default to whatever was requested by + * higher-priority clients (@ref NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT) + * or to limit to RATED_TDP (@ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT) for apps which have shown + * bad behavior when boosting. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_PROFILE = 4, + /*! + * Number of supported clients. + * + * @Note MUST ALWAYS BE LAST! + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS = 5, +} NV2080_CTRL_PERF_RATED_TDP_CLIENT; + +/*! + * Enumeration RATED_TDP actions - these are the requested actions clients can + * make to change the behavior of the RATED_TDP functionality. + */ +typedef enum NV2080_CTRL_PERF_RATED_TDP_ACTION { + /*! + * The default action - meaning no explicit request from the client other + * than to take the default behavior (allowing boosting above RATED_TDP) or + * any explicit actions from lower priority clients. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT = 0, + /*! + * Force allow boosting above RATED_TDP - this action explicitly requests + * boosting above RATED_TDP, preventing lower priority clients to limit to + * RATED_TDP. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED = 1, + /*! + * Force to limit above RATED_TDP - this action explicitly requests to limit + * to RATED_TDP. This is the opposite of the default behavior to allow + * boosting above RATED_TDP. Clients specify this action when they + * explicitly need boost to be disabled (e.g. eliminating perf variation, + * special apps which exhibit bad behavior, etc.). + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT = 2, + /*! + * Lock to RATED_TDP - this action requests the clocks to be fixed at the + * RATED_TDP. Used for achieving stable clocks required for profiling. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LOCK = 3, + /*! + * Lock to Min TDP - This requests min to be fixed at RATED_TDP but allow + * boosting for max + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_FLOOR = 4, +} NV2080_CTRL_PERF_RATED_TDP_ACTION; + +/*! + * Structure describing dynamic state of the RATED_TDP feature. + */ +#define NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS_MESSAGE_ID (0x6DU) + +typedef struct NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS { + /*! + * Structure of internal RM state - these values are used to determine the + * behavior of NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM per the RM's @ref + * perfPwrRatedTdpLimitRegisterClientActive() interface. + */ + struct { + /*! + * [out] - Mask of active client controllers (@ref + * PERF_PWR_RATED_TDP_CLIENT) which are currently regulating TDP. When + * this mask is zero, NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM will request + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT. + */ + NvU32 clientActiveMask; + /*! + * [out] - Boolean indicating that user has requested locking to + * RATED_TDP vPstate via corresponding regkey + * NV_REG_STR_RM_PERF_RATED_TDP_LIMIT. When the boolean value is true, + * NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM will request + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT. + */ + NvU8 bRegkeyLimitRatedTdp; + } rm; + + /*! + * [out] - Arbitrated output action of all client requests (@ref inputs). + * This is the current state of the RATED_TDP feature. Will only be @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED or @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION output; + /*! + * [out] - Array of input client request actions, indexed via @ref + * NV2080_CTRL_PERF_RATED_TDP_CLIENT_. RM will arbitrate between these + * requests, choosing the highest priority request != @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT or fallback to choosing @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION inputs[NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS]; +} NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS; + +/*! + * NV2080_CTRL_CMD_PERF_RATED_TDP_GET_CONTROL + * + * This command retrieves the current requested RATED_TDP action corresponding + * to the specified client. + * + * See @ref NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS for documentation of + * parameters. + * + * Possible status values returned are + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_PERF_RATED_TDP_GET_CONTROL (0x2080206e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | 0x6E" */ + +/*! + * NV2080_CTRL_CMD_PERF_RATED_TDP_SET_CONTROL + * + * This command sets the requested RATED_TDP action corresponding to the + * specified client. @Note, however, that this command is unable to set @ref + * NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM. + * + * See @ref NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS for documentation of + * parameters. + * + * Possible status values returned are + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_PERF_RATED_TDP_SET_CONTROL (0x2080206f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | 0x6F" */ + +/*! + * Structure containing the requested action for a RATED_TDP client (@ref + * NV2080_CTRL_PERF_RATED_TDP_CLIENT). + */ +typedef struct NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS { + /*! + * [in] - Specified client for request. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT client; + /*! + * [in/out] - Client's requested action. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION input; +} NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS; + +/*! + * This struct represents the GPU monitoring perfmon sample for an engine. + */ +typedef struct NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE { + /*! + * Percentage during the sample that the engine remains busy. This + * is in units of pct*100. + */ + NvU32 util; + /*! + * Scaling factor to convert utilization from full GPU to per vGPU. + */ + NvU32 vgpuScale; + /*! + * Process ID of the process that was active on the engine when the + * sample was taken. If no process is active then NV2080_GPUMON_PID_INVALID + * will be returned. + */ + NvU32 procId; + /*! + * Process ID of the process in the vGPU VM that was active on the engine when + * the sample was taken. If no process is active then NV2080_GPUMON_PID_INVALID + * will be returned. + */ + NvU32 subProcessID; + /*! + * Process name of the process in the vGPU VM that was active on the engine when + * the sample was taken. If no process is active then NULL will be returned. + */ + char subProcessName[NV_SUBPROC_NAME_MAX_LENGTH]; +} NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE; + +/*! + * This struct represents the GPU monitoring perfmon sample. + */ +typedef struct NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE { + /*! + * Base GPU monitoring sample. + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_GPUMON_SAMPLE base, 8); + /*! + * FB bandwidth utilization sample. + */ + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE fb; + /*! + * GR utilization sample. + */ + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE gr; + /*! + * NV ENCODER utilization sample. + */ + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE nvenc; + /*! + * NV DECODER utilization sample. + */ + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE nvdec; +} NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE; + +/*! + * This struct represents the GPU monitoring samples of perfmon values that + * client wants the access to. + */ +#define NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM_MESSAGE_ID (0x83U) + +typedef NV2080_CTRL_GPUMON_SAMPLES NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM; + +/*! + * Number of GPU monitoring sample in their respective buffers. + */ +#define NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL 100 + +#define NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_BUFFER_SIZE \ + NV_SIZEOF32(NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE) * \ + NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL + +/*! + * NV2080_CTRL_CMD_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2 + * + * This command returns perfmon gpu monitoring utilization samples. + * This command is not supported with SMC enabled. + * + * See NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM_V2 for documentation + * on the parameters. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + * Note this is the same as NV2080_CTRL_CMD_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES + * but without the embedded pointer. + * + */ +#define NV2080_CTRL_CMD_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2 (0x20802096) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_MESSAGE_ID" */ + +/*! + * This structure represents the GPU monitoring samples of utilization values that + * the client wants access to. + */ +#define NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_MESSAGE_ID (0x96U) + +typedef struct NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS { + /*! + * Type of the sample, see NV2080_CTRL_GPUMON_SAMPLE_TYPE_* for reference. + */ + NvU8 type; + /*! + * Size of the buffer, this should be + * bufSize == NV2080_CTRL_*_GPUMON_SAMPLE_COUNT_* + * sizeof(derived type of NV2080_CTRL_GPUMON_SAMPLE). + */ + NvU32 bufSize; + /*! + * Number of samples in ring buffer. + */ + NvU32 count; + /*! + * tracks the offset of the tail in the circular queue array pSamples. + */ + NvU32 tracker; + /*! + * A circular queue with size == bufSize. + * + * @note This circular queue wraps around after 10 seconds of sampling, + * and it is clients' responsibility to query within this time frame in + * order to avoid losing samples. + * @note With one exception, this queue contains last 10 seconds of samples + * with tracker poiniting to oldest entry and entry before tracker as the + * newest entry. Exception is when queue is not full (i.e. tracker is + * pointing to a zeroed out entry), in that case valid entries are between 0 + * and tracker. + * @note Clients can store tracker from previous query in order to provide + * samples since last read. + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE samples[NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL], 8); +} NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_PERF_GPU_IS_IDLE + * + * This command notifies RM to make p state switching aggressive by setting + * required limiting factors to speed up GC6 Entry initiation. + * + * prevPstate [out] + * This parameter will contain the pstate before the switch was initiated + * + * Possible status return values are: + * NV_OK : If P State Switch is successful + * NV_INVALID_STATE : If unable to access P State structure + * NVOS_STATUS_ERROR : If P State Switch is unsuccessful + */ +#define NV2080_CTRL_CMD_PERF_GPU_IS_IDLE (0x20802089) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_GPU_IS_IDLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_GPU_IS_IDLE_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV2080_CTRL_PERF_GPU_IS_IDLE_PARAMS { + NvU32 prevPstate; + NvU32 action; +} NV2080_CTRL_PERF_GPU_IS_IDLE_PARAMS; + +#define NV2080_CTRL_PERF_GPU_IS_IDLE_TRUE (0x00000001) +#define NV2080_CTRL_PERF_GPU_IS_IDLE_FALSE (0x00000002) + +/* + * NV2080_CTRL_CMD_PERF_AGGRESSIVE_PSTATE_NOTIFY + * + * This command is for the KMD Aggressive P-state feature. + * + * bGpuIsIdle [in] + * When true, applies cap to lowest P-state/GPCCLK. When false, releases cap. + * idleTimeUs [in] + * The amount of time (in microseconds) the GPU was idle since previous + * call, part of the GPU utilization data from KMD. + * busyTimeUs [in] + * The amount of time (in microseconds) the GPU was not idle since + * previous call, part of the GPU utilization data from KMD. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + + +#define NV2080_CTRL_CMD_PERF_AGGRESSIVE_PSTATE_NOTIFY (0x2080208f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_AGGRESSIVE_PSTATE_NOTIFY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_AGGRESSIVE_PSTATE_NOTIFY_PARAMS_MESSAGE_ID (0x8FU) + +typedef struct NV2080_CTRL_PERF_AGGRESSIVE_PSTATE_NOTIFY_PARAMS { + NvBool bGpuIsIdle; + NvBool bRestoreToMax; + NV_DECLARE_ALIGNED(NvU64 idleTimeUs, 8); + NV_DECLARE_ALIGNED(NvU64 busyTimeUs, 8); +} NV2080_CTRL_PERF_AGGRESSIVE_PSTATE_NOTIFY_PARAMS; + + +/* _ctrl2080perf_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h new file mode 100644 index 0000000..8eb6c01 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080perf_cf.finn +// + + +/* _ctrl2080perf_cf_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h new file mode 100644 index 0000000..0a3e6b2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.finn +// + + + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h new file mode 100644 index 0000000..abbc4cf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080pmgr.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h new file mode 100644 index 0000000..53cb620 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080pmumon.finn +// + + + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h new file mode 100644 index 0000000..2d01ed5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080power.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h new file mode 100644 index 0000000..e2471c3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h @@ -0,0 +1,368 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080rc.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* + * NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM + * + * This command returns data read from the specified virtual memory address for + * the associated subdevice. + * + * hChannel + * This parameter specifies the channel object handle from which the virtual + * memory range applies. + * virtAddress + * This parameter specifies the GPU base virtual memory address from which data should + * be read. The amount of data read is specified by the bufferSize parameter. + * bufferPtr + * This parameter specifies the buffer address in the caller's address space into which + * the data is to be returned. The address must be aligned on an 8-byte boundary. + * The buffer must be at least as big as the value specified bufferSize parameter (in bytes). + * bufferSize + * This parameter specifies the size of the buffer referenced by the bufferPtr parameter. + * This parameter also indicates the total number of bytes to be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_XLATE + */ +#define NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 virtAddress, 8); + NV_DECLARE_ALIGNED(NvP64 bufferPtr, 8); + NvU32 bufferSize; +} NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS; + +#define NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM (0x20802204) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_RC_GET_ERROR_COUNT + * + * This command returns the number of RC errors. + * + * errorCount + * Number of RC errors. + * + * Note: If SMC is enabled, mig/monitor capability must be acquired to query + * aggregate information. Otherwise, the control call returns + * NV_ERR_INSUFFICIENT_PERMISSIONS. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INSUFFICIENT_PERMISSIONS. + */ +#define NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS { + NvU32 errorCount; +} NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS; + +#define NV2080_CTRL_CMD_RC_GET_ERROR_COUNT (0x20802205) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_RC_ERROR_PARAMS_BUFFER_SIZE (0x2000) /* finn: Evaluated from "(8 * 1024)" */ + +#define NV2080_CTRL_CMD_RC_GET_ERROR (0x20802206) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0x6" */ + +/* + * NV2080_CTRL_CMD_RC_GET_ERROR_V2 + * + * This command returns an error element in the RC error list. + * + * whichBuffer + * Which Error to return (0 is oldest) + * outputRecordSize + * Output Size of Buffer -- Zero if error record doesn't exist + * recordBuffer + * buffer + * + * Note: If SMC is enabled, mig/monitor capability must be acquired to query + * aggregate information. Otherwise, the control call returns + * NV_ERR_INSUFFICIENT_PERMISSIONS. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INSUFFICIENT_PERMISSIONS. + * + */ + +#define NV2080_CTRL_RC_GET_ERROR_V2_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_RC_GET_ERROR_V2_PARAMS { + + NvU32 whichBuffer; // [IN] - which error to return (0 is oldest) + NvU32 outputRecordSize; // [OUT] + NvU8 recordBuffer[NV2080_CTRL_RC_ERROR_PARAMS_BUFFER_SIZE]; +} NV2080_CTRL_RC_GET_ERROR_V2_PARAMS; + +#define NV2080_CTRL_CMD_RC_GET_ERROR_V2 (0x20802213) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_RC_GET_ERROR_V2_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_RC_SET_CLEAN_ERROR_HISTORY + * + * This command cleans error history. + * + * This command has no input parameters. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV2080_CTRL_CMD_RC_SET_CLEAN_ERROR_HISTORY (0x20802207) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0x7" */ + +/* + * NV2080_CTRL_CMD_RC_GET_WATCHDOG_INFO + * + * This command returns information about the RC watchdog. + * + * watchdogStatusFlags + * This output parameter is a combination of one or more of the following: + * + * NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_NONE + * This is the value of watchdogStatusFlags if no flags are set. + * + * NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_DISABLED + * This means that the watchdog is disabled. + * + * NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_RUNNING + * This means that the watchdog is running. + * + * NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_INITIALIZED + * This means that the watchdog has been initialized. + * + * A typical result would be either "running and initialized", or + * "disabled". However, "initialized, but not running, and not disabled" + * is also quite reasonable (if the computer is hibernating, for example). + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS { + NvU32 watchdogStatusFlags; +} NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS; + +#define NV2080_CTRL_CMD_RC_GET_WATCHDOG_INFO (0x20802209) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS_MESSAGE_ID" */ + +/* valid values for watchdogStatusFlags */ +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_NONE (0x00000000) +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_DISABLED (0x00000001) +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_RUNNING (0x00000002) +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_INITIALIZED (0x00000004) + +/* + * NV2080_CTRL_CMD_RC_DISABLE_WATCHDOG + * + * This command disables the RC watchdog, if possible. + * If, however, another RM client has already explicitly (via NV2080 call) enabled + * the RC watchdog, then this method returns NV_ERR_STATE_IN_USE. + * + * This command, if successful, will prevent other clients from enabling the + * watchdog until the calling RM client releases its request with + * NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS or frees its NV20_SUBDEVICE. + * + * See NV2080_CTRL_CMD_RC_SOFT_DISABLE_WATCHDOG for disabling the watchdog + * without preventing other clients from enabling it. + * + * Possible status return values are: + * NV_OK + * NV_ERR_STATE_IN_USE + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_RC_DISABLE_WATCHDOG (0x2080220a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0xA" */ + +/* + * NV2080_CTRL_CMD_RC_ENABLE_WATCHDOG + * + * This command enables the RC watchdog, if possible. + * If, however, another RM client has already explicitly (via NV2080 call) disabled + * the RC watchdog, then this method returns NV_ERR_STATE_IN_USE. + * + * Possible status return values are: + * NV_OK + * NV_ERR_STATE_IN_USE + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_RC_ENABLE_WATCHDOG (0x2080220b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0xB" */ + +/* + * NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS + * + * This command releases all of the RM client's outstanding requests to enable + * or disable the watchdog. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS (0x2080220c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0xC" */ + +/* + * NV2080_CTRL_CMD_SET_RC_RECOVERY/NV2080_CTRL_CMD_GET_RC_RECOVERY + * + * This command disables/enables RC recovery. + * + * rcEnable + * NV2080_CTRL_CMD_SET_RC_RECOVERY_DISABLED + * Disable robust channel recovery. + * + * NV2080_CTRL_CMD_SET_RC_RECOVERY_ENABLED + * Enable robust channel recovery with default breakpoint handling. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +typedef struct NV2080_CTRL_CMD_RC_RECOVERY_PARAMS { + NvU32 rcEnable; +} NV2080_CTRL_CMD_RC_RECOVERY_PARAMS; + +#define NV2080_CTRL_CMD_SET_RC_RECOVERY (0x2080220d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0xD" */ + +#define NV2080_CTRL_CMD_GET_RC_RECOVERY (0x2080220e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0xE" */ + +/* valid values for rcEnable */ +#define NV2080_CTRL_CMD_RC_RECOVERY_DISABLED (0x00000000) +#define NV2080_CTRL_CMD_RC_RECOVERY_ENABLED (0x00000001) + +/* + * NV2080_CTRL_CMD_TDR_SET_TIMEOUT_STATE + * + * This command can be used to set TDR timeout state. + * + * It can be used to indicate that a timeout has occurred and that a GPU + * reset will start. It can also be used to indicate that the reset has + * completed along with the corresponding complition status. + * + * cmd + * This parameter is used to indicate the stage of the TDR recovery + * process. Legal values for this parameter are: + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_BEGIN + * This value indicates that TDR recovery is about to begin. + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_END + * This value indicates that TDR recovery has completed. + * + * status + * This parameter is valid when the cmd parameter is set to + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_END. It is used + * to specify the completion status of the TDR recovery. Legal + * values for this parameter include: + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_STATUS_FAIL + * This value indicates the recovery failed. + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_STATUS_SUCCESS + * This value indicates the recovery succeeded. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_COMMAND + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_TDR_SET_TIMEOUT_STATE (0x2080220f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_TDR_SET_TIMEOUT_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV2080_CTRL_TDR_SET_TIMEOUT_STATE_PARAMS { + NvU32 cmd; + NvS32 status; +} NV2080_CTRL_TDR_SET_TIMEOUT_STATE_PARAMS; + +/* valid cmd values */ +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_BEGIN (0x00000000) +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_END (0x00000001) + +/* valid status values */ +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_STATUS_SUCCESS (0x00000000) +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_STATUS_FAIL (0x00000001) + +/* + * NV2080_CTRL_CMD_RC_SOFT_DISABLE_WATCHDOG + * + * This command disables the RC watchdog, similarly to how + * NV2080_CTRL_CMD_RC_DISABLE_WATCHDOG does. However, unlike that command, this + * command will not prevent another RM client from explicitly enabling the RC + * watchdog with NV2080_CTRL_CMD_RC_ENABLE_WATCHDOG. + * + * Possible status return values are: + * NV_OK + * NV_ERR_STATE_IN_USE + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_RC_SOFT_DISABLE_WATCHDOG (0x20802210) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0x10" */ + +/* + * NV2080_CTRL_CMD_GET_RC_INFO/NV2080_CTRL_CMD_SET_RC_INFO + * + * This command can be used to set robust channel parameters. + * + * rcMode + * NV2080_CTRL_CMD_SET_RC_INFO_MODE_DISABLE + * Disable robust channel operation. + * + * NV2080_CTRL_CMD_SET_RC_INFO_MODE_ENABLE + * Enable robust channel operation. + * + * rcBreak + * NV2080_CTRL_CMD_SET_RC_INFO_BREAK_DISABLE + * Disable breakpoint handling during robust channel operation. + * + * NV2080_CTRL_CMD_SET_RC_INFO_BREAK_ENABLE + * Enable breakpoint handling during robust channel operation. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +typedef struct NV2080_CTRL_CMD_RC_INFO_PARAMS { + NvU32 rcMode; + NvU32 rcBreak; +} NV2080_CTRL_CMD_RC_INFO_PARAMS; + +#define NV2080_CTRL_CMD_SET_RC_INFO (0x20802211) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0x11" */ + +#define NV2080_CTRL_CMD_GET_RC_INFO (0x20802212) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0x12" */ + +/* valid rcMode values */ +#define NV2080_CTRL_CMD_RC_INFO_MODE_DISABLE (0x00000000) +#define NV2080_CTRL_CMD_RC_INFO_MODE_ENABLE (0x00000001) + +/* valid rcBreak values */ +#define NV2080_CTRL_CMD_RC_INFO_BREAK_DISABLE (0x00000000) +#define NV2080_CTRL_CMD_RC_INFO_BREAK_ENABLE (0x00000001) + +/* _ctrl2080rc_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h new file mode 100644 index 0000000..448723c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080spi.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h new file mode 100644 index 0000000..abe66ef --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080thermal.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h new file mode 100644 index 0000000..d018e20 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h @@ -0,0 +1,233 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080tmr.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_TIMER related control commands and parameters */ + +/* + * NV2080_CTRL_CMD_TIMER_SCHEDULE + * + * This command schedules a GPU timer event to fire at the specified time interval. + * Can be called without API & GPU locks if NVOS54_FLAGS_IRQL_RAISED and + * NVOS54_FLAGS_LOCK_BYPASS are set in NVOS54_PARAMETERS.flags + * + * time_nsec + * This parameter specifies the time in nanoseconds at which the GPU timer + * event is to fire. + * flags + * This parameter determines the interpretation of the value specified by + * the time_nsec parameter: + * NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_ABS + * This flag indicates that time_nsec is in absolute time. + * NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_REL + * This flag indicates that time_nsec is in relative time. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_TIMER_SCHEDULE (0x20800401) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 time_nsec, 8); + NvU32 flags; +} NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS; + +/* valid flag values */ +#define NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME 0:0 +#define NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_ABS (0x00000000) +#define NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_REL (0x00000001) + +/* + * NV2080_CTRL_CMD_TIMER_CANCEL + * + * This command cancels any pending timer events initiated with the + * NV2080_CTRL_CMD_TIMER_SCHEDULE command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_TIMER_CANCEL (0x20800402) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | 0x2" */ + +/* + * NV2080_CTRL_CMD_TIMER_GET_TIME + * + * This command returns the current GPU timer value. The current time is + * expressed in elapsed nanoseconds since 00:00 GMT, January 1, 1970 + * (zero hour) with a resolution of 32 nanoseconds. + * + * Can be called without API & GPU locks if NVOS54_FLAGS_IRQL_RAISED and + * NVOS54_FLAGS_LOCK_BYPASS are set in NVOS54_PARAMETERS.flags + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_TIMER_GET_TIME (0x20800403) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_TIMER_GET_TIME_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_TIMER_GET_TIME_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_TIMER_GET_TIME_PARAMS { + NV_DECLARE_ALIGNED(NvU64 time_nsec, 8); +} NV2080_CTRL_TIMER_GET_TIME_PARAMS; + +/* + * NV2080_CTRL_CMD_TIMER_GET_REGISTER_OFFSET + * + * The command returns the offset of the timer registers, so that clients may + * map them directly. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV2080_CTRL_CMD_TIMER_GET_REGISTER_OFFSET (0x20800404) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS { + NvU32 tmr_offset; +} NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS; + +/* + * NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE + * + * This structure describes the information obtained with + * NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO command. + * + * gpuTime + * GPU time is the value of GPU global timer (PTIMER) with a resolution + * of 32 nano seconds. + * cpuTime + * CPU time. Resolution of the cpu time depends on its source. Refer to + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_* for more information. + + */ +typedef struct NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE { + NV_DECLARE_ALIGNED(NvU64 cpuTime, 8); + NV_DECLARE_ALIGNED(NvU64 gpuTime, 8); +} NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE; + + +/* + * NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO + * + * This command returns correlation information between GPU time and CPU time + * for a given CPU clock type. + * + * cpuClkId + * This parameter specifies the source of the CPU clock. Legal values for + * this parameter include: + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_OSTIME + * This clock id will provide real time in microseconds since + * 00:00:00 UTC on January 1, 1970. + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PLATFORM_API + * This clock id will provide time stamp that is constant-rate, high + * precision using platform API that is also available in the user mode. + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_TSC + * This clock id will provide time stamp using CPU's time stamp counter. + * + * sampleCount + * This field specifies the number of clock samples to be taken. + * This value cannot exceed NV2080_CTRL_TIMER_GPU_CPU_TIME_MAX_SAMPLES. + * + * samples + * This field returns an array of requested samples. Refer to + * NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE to get details about each entry + * in the array. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO (0x20800406) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_TIMER_GPU_CPU_TIME_MAX_SAMPLES 16 + +#define NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS { + NvU8 cpuClkId; + NvU8 sampleCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE samples[NV2080_CTRL_TIMER_GPU_CPU_TIME_MAX_SAMPLES], 8); +} NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS; + +/* Legal cpuClkId values */ +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_OSTIME (0x00000001) +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_TSC (0x00000002) +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PLATFORM_API (0x00000003) +/*! + * NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ + * + * This command changes the frequency at which Graphics Engine time stamp is + * updated. Frequency can either be set to max or restored to default. + * Clients can independently use this call to increase the timer frequency + * as a global reference count is maintained for requests to Max frequency. + * Client is assured that the system stays in this state till the requested + * client releases the state or is killed. Timer frequency will automatically + * be restored to default when there is no pending request to increase. + * + * Note that recursive requests for the same state from the same client + * are considered invalid. + * + * bSetMaxFreq + * Set to NV_TRUE if GR tick frequency needs to be set to Max. + * + * See @ref NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS for + * documentation of parameters. + * + * Possible status values returned are + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_OPERATION + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ (0x20800407) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_MESSAGE_ID" */ + +/*! + * This struct contains bSetMaxFreq flag. + */ +#define NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS { + NvBool bSetMaxFreq; +} NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS; + +/* _ctrl2080tmr_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h new file mode 100644 index 0000000..93b1113 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080ucodefuzzer.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h new file mode 100644 index 0000000..782e182 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h @@ -0,0 +1,190 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080unix.finn +// + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX OS control commands and parameters */ + +/* + * NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT + * + * This command increases or decreases the value of the per-GPU GC6 blocker + * refCount used by Linux kernel clients to prevent the GPU from entering GC6. + * + * When the refCount is non-zero, the GPU cannot enter GC6. When the refCount + * transitions from zero to non-zero as a result of this command, the GPU will + * automatically come out of GC6. + * + * action Whether to increment or decrement the value of the refCount. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT (0x20803d01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS { + NvU32 action; +} NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS; + +// Possible values for action +#define NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC (0x00000001) +#define NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC (0x00000002) + +/* + * NV2080_CTRL_CMD_OS_UNIX_ALLOW_DISALLOW_GCOFF + * + * RM by default allows GCOFF but when the X driver disallows to enter in GCOFF + * then this rmcontrol sets flag as NV_FALSE and if it allows to enter in GCOFF + * then the flag is set as NV_TRUE. + * + * action Whether to allow or disallow the user mode clients to enter in GCOFF. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_OS_UNIX_ALLOW_DISALLOW_GCOFF (0x20803d02) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS { + NvU32 action; +} NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS; + +// Possible values for action +#define NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_ALLOW (0x00000001) +#define NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_DISALLOW (0x00000002) + +/* + * NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER + * + * GPU can have integrated HDA (High Definition Audio) controller which + * can be in active or suspended state during dynamic power management. + * This command will perform HDA controller wakeup (if bEnter is false) or + * suspend (if bEnter is true). + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER (0x20803d03) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS { + NvBool bEnter; +} NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS; + +/* + * NV2080_CTRL_CMD_OS_UNIX_INSTALL_PROFILER_HOOKS + * + * Initialize cyclestats HWPM support in the kernel. This will set up a callback + * event for the channel indicated by hNotifierResource. This callback will execute + * perf register read / write commands enqueued in the shared buffer indicated by + * hNotifyDataMemory. Only one client may use HWPM functionality at a time. + * + * Additionally, if perfmonIdCount is greater than zero, mode-e HWPM streaming into + * the buffer indicated by hSnapshotMemory will be initialized (but not turned on). + * Data will be copied into the provided buffer every 10ms, or whenever a + * NV2080_CTRL_CMD_OS_UNIX_FLUSH_SNAPSHOT_BUFFER command is issued. + */ +#define NV2080_CTRL_CMD_OS_UNIX_INSTALL_PROFILER_HOOKS (0x20803d04) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_INSTALL_PROFILER_HOOKS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_INSTALL_PROFILER_HOOKS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_OS_UNIX_INSTALL_PROFILER_HOOKS_PARAMS { + NvHandle hNotifierResource; + NvU32 notifyDataSize; + NvHandle hNotifyDataMemory; + NvU32 perfmonIdCount; + NvU32 snapshotBufferSize; + NvHandle hSnapshotMemory; +} NV2080_CTRL_OS_UNIX_INSTALL_PROFILER_HOOKS_PARAMS; + +/* + * NV2080_CTRL_CMD_OS_UNIX_FLUSH_SNAPSHOT_BUFFER + * + * Immediately copies any pending mode-e HWPM data into the previously + * installed snapshot buffer instead of waiting for the timer. + */ +#define NV2080_CTRL_CMD_OS_UNIX_FLUSH_SNAPSHOT_BUFFER (0x20803d05) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | 0x5" */ + +/* + * NV2080_CTRL_CMD_OS_UNIX_STOP_PROFILER + * + * Stop the timer responsible for copying mode-e HWPM data to the snapshot buffer. + * The snapshot buffer must not be freed by the client before this command is issued. + */ +#define NV2080_CTRL_CMD_OS_UNIX_STOP_PROFILER (0x20803d06) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | 0x6" */ + +/* + * NV2080_CTRL_CMD_OS_UNIX_VIDMEM_PERSISTENCE_STATUS + * + * This command will be used by clients to check if the GPU video memory will + * be persistent during system suspend/resume cycle. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_OS_UNIX_VIDMEM_PERSISTENCE_STATUS (0x20803d07) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS { + NvBool bVidmemPersistent; +} NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_OS_UNIX_UPDATE_TGP_STATUS + * + * This command will be used by clients to set restore TGP flag which will + * help to restore TGP limits when clients are destroyed. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_OS_UNIX_UPDATE_TGP_STATUS (0x20803d08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS { + NvBool bUpdateTGP; +} NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS; +/* _ctrl2080unix_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h new file mode 100644 index 0000000..a5a9db1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080vfe.finn +// + + + +/* _ctrl2080vfe_h_ */ +#include "nvfixedtypes.h" +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobj.h" +#include "ctrl/ctrl2080/ctrl2080bios.h" + +/* --------------------------- Forward Defines ----------------------------- */ +/* --------------------------- VFE Variable -------------------------------- */ +/* --------------------------- VFE Equation -------------------------------- */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h new file mode 100644 index 0000000..eacd596 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080volt.finn +// + + + +/* _ctrl2080volt_h_ */ + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobj.h" +#include "ctrl/ctrl2080/ctrl2080pmumon.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h new file mode 100644 index 0000000..fdfa8fb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h @@ -0,0 +1,1493 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl30f1.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV30_GSYNC_CTRL control commands and parameters */ + +#define NV30F1_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x30F1, NV30F1_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NV30F1_CTRL_RESERVED (0x00) +#define NV30F1_CTRL_GSYNC (0x01) + +/* + * NV30F1_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV30F1_CTRL_CMD_NULL (0x30f10000) /* finn: Evaluated from "(FINN_NV30_GSYNC_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* NV30F1_CTRL_GSYNC + * + * Gsync board access/control functionality. + * + */ + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_VERSION + * + * This command will get the current gsync api version info. + * + * version + * The api's major version. Does not change often. + * + * revision + * The api's minor version. + * Bumped with each change, no matter how minor. + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_VERSION (0x30f10101) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_VERSION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV30F1_CTRL_GSYNC_GET_VERSION_PARAMS { + NvU32 version; + NvU32 revision; +} NV30F1_CTRL_GSYNC_GET_VERSION_PARAMS; + +#define NV30F1_CTRL_GSYNC_API_VER 1 +#define NV30F1_CTRL_GSYNC_API_REV 0 + +/* + * NV30F1_CTRL_GSYNC api + * + * The gsync interface provides access to gsync devices in the system. + * + * There are commands: + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SIGNALS + * Status on input sync signals. + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_PARAMS + * Get gsync parameters. + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS + * Get gsync parameters. + * NV30F1_CTRL_CMD_GSYNC_GET_INFO_CAPS + * Get basic info about the device and its connected displays + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC + * Enable frame sync. + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC + * Disable frame sync. + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS + * Get status info relevant for the control panel + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_TESTING + * Test signal enabling/disabling + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_WATCHDOG + * Control the gsync watchdog + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_INTERLACE_MODE + * Set the interlace mode + * + */ + + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SIGNALS + * + * This command returns information associated with incoming signals to the + * gsync device. + * + * RJ45 + * This parameter contains the signal information for each of the two RJ45 + * ports on the gsync device. A value of ~0 indicates that a signal is + * detected, but no rate information is available. Anything else is a rate + * in units of 10e-4 Hz. + * house + * This parameter contains the signal information for the house sync signal + * (i.e. the bnc port). A value of 0 means that no signal is present. A value + * of ~0 indicates that a signal is detected, but no rate information is + * available. Anything else is a rate in units of 10e-4 Hz. + * rate + * A mask representing signals for which we would like rate information (if + * available). + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + * + */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SIGNALS (0x30f10102) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_PARAMS { + NvU32 RJ45[2]; + NvU32 house; + NvU32 rate; +} NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_PARAMS; + +/* + * rate values + * + */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_RJ45_0 (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_RJ45_1 (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_SIGNALS_HOUSE (0x00000004) + + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_PARAMS + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS + * + * These commands respectively return and set state related to the operation + * of the gsync device. + * + * which + * This value is a mask set by the client representing which parameters are + * to be updated. In the case of a GET command, these parameters will + * be used to change the state of the hardware. For both a GET + * command and a SET command, the hardware state will be returned by + * the RM in the respective parameter. All other parameters are untouched. + * syncPolarity + * This parameter specifies which edge of the house sync signal to sync with. + * videoMode + * This parameter specifies which video mode to use to decode the house sync + * signal. + * nSync + * This parameter specifies the number of pulses to wait between frame lock + * signal generation. 0 indicates that every incomming pulse should result in + * a frame lock sync pulse being generated (i.e. the input and output rate + * matches). + * syncSkew + * This parameter specifies the time delay between the frame sync signal and + * the GPUs signal in units of 0.977 us. Maximum value for SyncSkew is defined + * in respective header files of gsync boards. e.g. For P2060 board value + * is defined in drivers/resman/kernel/inc/dacp2060.h + * syncStartDelay + * In master mode, the amount of time to wait before generating the first + * sync pulse in units of 7.81 us, max 512 ms (i.e 65535 units). + * useHouseSync + * When a house sync signal is detected, this parameter indicates that it + * should be used as the reference to generate the frame sync signal. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_PARAMS (0x30f10103) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS (0x30f10104) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS { + NvU32 which; + NvU32 syncPolarity; + NvU32 syncVideoMode; + NvU32 nSync; + NvU32 syncSkew; + NvU32 syncStartDelay; + NvU32 useHouseSync; +} NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS; +#define NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS_MESSAGE_ID (0x4U) + +typedef NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS; + +/* + * which values + * + */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY 0x0001 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE 0x0002 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_NSYNC 0x0004 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_SKEW 0x0008 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_START_DELAY 0x0010 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_USE_HOUSE 0x0020 + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_POLARITY NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE +#define NV30F1_CTRL_GSYNC_GET_CONTROL_NSYNC NV30F1_CTRL_GSYNC_SET_CONTROL_NSYNC +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_SKEW NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_SKEW +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_START_DELAY NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_START_DELAY +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_USE_HOUSE NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_USE_HOUSE + +/* + * syncPolarity values + * + */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_RISING_EDGE 0 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_FALLING_EDGE 1 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_BOTH_EDGES 2 + +/* + * syncVideoMode values + * Video_Mode_Composite is valid for P2060 only. + * + */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NONE 0 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_TTL 1 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NTSCPALSECAM 2 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_HDTV 3 + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_NONE NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NONE +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_TTL NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_TTL +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_NTSCPALSECAM NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NTSCPALSECAM +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_HDTV NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_HDTV +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_COMPOSITE 4 + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_CAPS + * + * This command returns the capabilities of this gsync device. + * + * revId + * This parameter is set by the RM to indicate the combined + * FPGA revision (low 4 bits) and board ID (high 4 bits). + * + * boardId + * This parameter is set by the RM to indicate the board ID, + * allowing disambiguation of P2060 and so forth. + * + * minRevRequired + * This parameter is set by the RM to indicate the minimum + * Qsync FPGA revision required for a specific CHIP Familiy + * + * isFirmwareRevMismatch + * This parameter is set to TRUE by RM when the Qsync Firmware + * Revision is incompatibled with the connected GPU chip family. + * + * revision + * This parameter is set by the RM to indicate the device revision, + * also known as major version. + * + * extendedRevision + * This parameter is set by the RM to indicate the device extended + * revision, also known as minor version. + * + * capFlags + * This parameter is set by the RM to indicate capabilities of + * the board, preventing the client from needing to keep track + * of the feature lists supported by each revision of each board. + * + * maxSyncSkew + * This parameter returns that maximum units of sync skew the + * board supports. The value prgrammed into the board has to be + * between 0 and maxSyncSkew, inclusive. The value of each unit + * can be learned from the syncSkewResolution parameter. + * + * syncSkewResolution + * This parameter returns the number of nanoseconds that one unit + * of sync skew corresponds to. + * + * maxStartDelay + * This parameter returns that maximum units of sync start delay + * the board supports. The value prgrammed into the board has to be + * between 0 and maxStartDelay, inclusive. The value of each unit + * can be learned from the startDelayResolution parameter. + * + * startDelayResolution + * This parameter returns the number of nanoseconds that one unit + * of sync start delay corresponds to. + * + * maxSyncInterval + * This parameter returns the maximum duration of house sync interval + * between frame lock sync cycle that the board supports. The value + * programmed into the board has to be between 0 and maxSyncInterval, + * inclusive. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_CAPS (0x30f10105) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS { + NvU32 revId; + NvU32 boardId; + NvU32 minRevRequired; + NvBool isFirmwareRevMismatch; + NvU32 revision; + NvU32 extendedRevision; + NvU32 capFlags; + NvU32 maxSyncSkew; + NvU32 syncSkewResolution; + NvU32 maxStartDelay; + NvU32 startDelayResolution; + NvU32 maxSyncInterval; +} NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS; + +#define NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2060 (0x00002060) +#define NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2061 (0x00002061) + +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_2DPS (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_3DPS (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_4DPS (0x00000004) + +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_NEED_MASTER_BARRIER_WAR (0x00000010) + +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_SYNC_LOCK_EVENT (0x10000000) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_HOUSE_SYNC_EVENT (0x20000000) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FRAME_COUNT_EVENT (0x40000000) + +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ONLY_PRIMARY_CONNECTOR_EVENT (0x01000000) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ALL_CONNECTOR_EVENT (0x02000000) + +// For P2060, clients can only request for video modes at BNC connector +// e.g. NO HS, TTL and Composite etc. +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ONLY_GET_VIDEO_MODE (0x00100000) + +/* + * NV30F1_CTRL_CMD_GET_GSYNC_GPU_TOPOLOGY + * + * This command returns the list of GPU IDs connected with the associated + * gsync device. + * + * gpus + * This array is set by RM to contain the gpu connection information + * for gpus attached to the gsync device. Valid entries are contiguous, + * beginning with the first entry in the list. The elements of this array contain + * the following fields: + * gpuId + * This field contains the ID of the connected GPU. If the entry in the + * table is invalid, this fields contains NV30F1_CTRL_GPU_INVALID_ID. + * connector + * This field indicates which connector on the device the GPU is connected + * to (i.e. the primary or secondary connector), if any. + * proxyGpuId + * If the 'connector' field indicates that the GPU is not connected to + * a G-Sync device directly, then this field contains the ID of the + * GPU that acts as a proxy, i.e. the GPU to which this GPU should be + * a RasterLock slave. + * connectorCount + * This parameter indicates the number of GPU connectors available on + * the gsync device. The connector count of the gsync device may be + * less than NV30F1_CTRL_MAX_GPUS_PER_GSYNC. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV30F1_CTRL_CMD_GET_GSYNC_GPU_TOPOLOGY (0x30f10106) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_MAX_GPUS_PER_GSYNC 4 +#define NV30F1_CTRL_GPU_INVALID_ID (0xffffffff) + +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS { + struct { + NvU32 gpuId; + NvU32 connector; + NvU32 proxyGpuId; + } gpus[NV30F1_CTRL_MAX_GPUS_PER_GSYNC]; + NvU32 connectorCount; +} NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS; + +/* + * connector values + * + */ +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_ONE 1 +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_TWO 2 +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_THREE 3 +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_FOUR 4 + +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_NONE 0 +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PRIMARY NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_ONE +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_SECONDARY NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_TWO + + + + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC + * + * This command enables frame sync on displays. + * + * gpuId + * The parameter is set by the client to indicate the gpuId on which + * frame lock will be enabled. + * master + * This parameter is set by the client to specify whether this/these + * displays should be set as the master or as slaves. If this is a GET + * and displays is not 0, this will be set by the RM to indicate if + * the display can be the master. + * displays + * This is a device mask set by the client to indicate which display(s) + * are to be synched. Note that only one display may be set as master. + * If this is a GET, this set by the client to indicate which display + * is to be queried. If the display cannot be synched to this device, + * the RM will overwrite the mask with a 0. + * validateExternal + * This parameter is set by the client to tell the RM to validate the + * presence of an external sync source when enabling a master. + * refresh + * This parameter is set by the client to indicate the desired refresh rate + * The value is in 0.0001 Hertz (i.e. it has been multiplied by 10000). + * configFlags + * contains flags for specific options. So far only + * NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_CONFIG_FLAGS_KEEP_MASTER_SWAPBARRIER_DISABLED + * is supported which allows the caller to prevent the rm code to automatically + * enable the swapbarrier on framelock masters on fpga revisions <= 5. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SYNC (0x30f10110) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC (0x30f10111) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS_MESSAGE_ID" */ + +// If set the swapbarrier is not enable automatically when enablign a framelock master on fpga revs <= 5. +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_CONFIG_FLAGS_KEEP_MASTER_SWAPBARRIER_DISABLED (0x00000001) + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS { + NvU32 gpuId; + NvU32 master; + NvU32 displays; + NvU32 validateExternal; + NvU32 refresh; + NvU32 configFlags; +} NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS; +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS_MESSAGE_ID (0x11U) + +typedef NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC + * + * This command disables frame sync on displays + * + * gpuId + * The parameter is set by the client to indicate the gpuId on which + * frame lock will be disabled. + * master + * This parameter is set by the client to specify whether this/these + * display(s) to be unset is a master/are slaves. + * displays + * This is a device mask set by the client to indicate which display(s) + * are to be unsynched. + * retainMaster + * Retain the designation as master, but unsync the displays. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC (0x30f10112) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS { + NvU32 gpuId; + NvU32 master; + NvU32 displays; + NvU32 retainMaster; +} NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC + * + * This command gets the sync state for the gpus attached to the + * framelock device. Note that the frame lock device only has + * knowledge of sync status at the gpu level, not the display + * device level. + * + * gpuId + * The parameter is set by the client to indicate which gpuId is to be + * queried. + * bTiming + * This parameter is set by the RM to indicate that timing on the GPU is + * in sync with the master sync signal. + * bStereoSync + * This parameter is set by the RM to indicate whether the phase of the + * timing signal coming from the GPU is the same as the phase of the + * master sync signal. + * bSyncReady + * This parameter is set by the RM to indicate if a sync signal has + * been detected. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC (0x30f10113) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS { + NvU32 gpuId; + NvU32 bTiming; + NvU32 bStereoSync; + NvU32 bSyncReady; +} NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS; + + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS + * + * This command gets status information for the frame lock device + * relevant to a control panel. + * + * which + * This is a mask set by the client describing which of the other + * parameters we should collect status information for. + * bLeadingEdge + * This parameter is set by the RM to indicate that the gsync device is + * set to sync to the leading edge of a house sync signal. Note that + * this does not mean that house sync is the signal source. + * bFallingEdge + * This parameter is set by the RM to indicate that the gsync device is + * set to sync to the falling edge of a house sync signal. Note that + * this does not mean that house sync is the signal source. + * syncDelay + * This parameter is set by the RM to indicate the sync delay in + * microseconds, + * refresh + * This parameter is set by the RM to indicate the rate of frame sync pulse in + * 0.0001 Hertz (i.e. it has been multiplied by 10000). This is not the refresh + * rate of display device. This is same as incoming house sync rate if + * framelocked to an external house sync signal. Otherwise, this is same + * as the refresh rate of the master display device. + * houseSyncIncomming + * This parameter is set by the RM to indicate the rate of an incomming + * house sync signal in 0.0001 Hertz (i.e. it has been multiplied by 10000). + * syncInterval + * This parameter is set by the RM to indicate the number of incoming + * sync pulses to wait before the generation of the frame sync pulse. + * bSyncReady + * This paramater is set by the RM to indicate if a sync signal has + * been detected (this parameter is also available from the + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC method). + * bSwapReady + * This paramater is set by the RM to indicate if the hardware is + * ready to swap. + * bHouseSync + * This parameter is set by the RM to indicate that a house sync signal + * should be used as the source signal if it is available. + * bPort0Input + * This parameter is set by the RM to indicate that RJ45 port 0 is + * configured as an input. + * bPort1Input + * This parameter is set by the RM to indicate that RJ45 port 1 is + * configured as an input + * bPort0Ehternet + * This parameter is set by the RM to indicate that RJ45 port 0 has + * been connected to an ethernet hub (this is not the right thing to do). + * bPort1Ehternet + * This parameter is set by the RM to indicate that RJ45 port 1 has + * been connected to an ethernet hub (this is not the right thing to do). + * universalFrameCount + * This parameter is set by the RM to indicate the value of the + * Universal frame counter. + * bInternalSlave + * This parameter is set by the RM to indicate that a p2061 has been + * configured as internal slave. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_STATUS (0x30f10114) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS { + NvU32 which; + NvU32 bLeadingEdge; + NvU32 bFallingEdge; + NvU32 syncDelay; + NvU32 refresh; + NvU32 houseSyncIncoming; + NvU32 syncInterval; + NvU32 bSyncReady; + NvU32 bSwapReady; + NvU32 bHouseSync; + NvU32 bPort0Input; + NvU32 bPort1Input; + NvU32 bPort0Ethernet; + NvU32 bPort1Ethernet; + NvU32 universalFrameCount; + NvU32 bInternalSlave; +} NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS; + +/* + * which values + * + */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_POLARITY (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_STATUS_LEADING_EDGE (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_STATUS_FALLING_EDGE (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_DELAY (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_STATUS_REFRESH (0x00000004) +#define NV30F1_CTRL_GSYNC_GET_STATUS_HOUSE_SYNC_INCOMING (0x00000008) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_INTERVAL (0x00000010) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_READY (0x00000020) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SWAP_READY (0x00000040) +#define NV30F1_CTRL_GSYNC_GET_STATUS_TIMING (0x00000080) +#define NV30F1_CTRL_GSYNC_GET_STATUS_STEREO_SYNC (0x00000100) +#define NV30F1_CTRL_GSYNC_GET_STATUS_HOUSE_SYNC (0x00000200) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT_INPUT (0x00000400) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT0_INPUT (0x00000400) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT1_INPUT (0x00000400) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT_ETHERNET (0x00000800) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT0_ETHERNET (0x00000800) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT1_ETHERNET (0x00000800) +#define NV30F1_CTRL_GSYNC_GET_STATUS_UNIVERSAL_FRAME_COUNT (0x00001000) +#define NV30F1_CTRL_GSYNC_GET_STATUS_INTERNAL_SLAVE (0x00002000) + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_TESTING + * + * This command controls the test signal on the gsync device. + * + * bEmitTestSignal + * This parameter is set by the client to emit or stop emitting the test + * signal. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_TESTING (0x30f10120) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_TESTING_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_TESTING (0x30f10121) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS { + NvU32 bEmitTestSignal; +} NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS; +#define NV30F1_CTRL_GSYNC_GET_CONTROL_TESTING_PARAMS_MESSAGE_ID (0x20U) + +typedef NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS NV30F1_CTRL_GSYNC_GET_CONTROL_TESTING_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_WATCHDOG + * + * This command enables and disables the gsync watchdog + * + * enable + * This parameter is set by the client to enable or disable the + * gsync watchdog. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_WATCHDOG (0x30f10130) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS { + NvU32 enable; +} NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS; + + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_INTERLACE_MODE + * + * This command enables or disables interlace mode. + * + * enable + * This parameter is set by the client to enable or disable + * interlace mode + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_INTERLACE_MODE (0x30f10140) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_INTERLACE_MODE_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_INTERLACE_MODE (0x30f10141) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_INTERLACE_MODE_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_INTERLACE_MODE_PARAMS_MESSAGE_ID (0x41U) + +typedef struct NV30F1_CTRL_GSYNC_SET_CONTROL_INTERLACE_MODE_PARAMS { + NvU32 enable; +} NV30F1_CTRL_GSYNC_SET_CONTROL_INTERLACE_MODE_PARAMS; +#define NV30F1_CTRL_GSYNC_GET_CONTROL_INTERLACE_MODE_PARAMS_MESSAGE_ID (0x40U) + +typedef NV30F1_CTRL_GSYNC_SET_CONTROL_INTERLACE_MODE_PARAMS NV30F1_CTRL_GSYNC_GET_CONTROL_INTERLACE_MODE_PARAMS; + +/* + * + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_BARRIER + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SWAP_BARRIER + * + * These commands enables or disables the swap barrier + * connection between a GPU and the rest of the gsync + * network + * + * gpuId + * The parameter is set by the client to indicate which gpuId is to be + * queried. + * enable + * In a set command, this parameter is set by the client to + * indicate if the barrier should be enabled (i.e. connected + * to the rest of the network) or disabled (disconnected). + * In both a set and a get command, if successful, the RM + * uses this parameter to return the current (i.e. post-set) + * value. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_BARRIER (0x30f10150) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_BARRIER_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SWAP_BARRIER (0x30f10151) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS { + NvU32 gpuId; + NvBool enable; +} NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS; +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_BARRIER_PARAMS_MESSAGE_ID (0x50U) + +typedef NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_BARRIER_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW + * + * This command allow clients to obtain time period for which SwapLock window will + * remain HIGH for GSYNC III (P2060) i.e. TswapRdyHi. RM clients will use this value + * for programming SWAP_LOCKOUT_START on all heads of GPU connected to P2060. + * + * tSwapRdyHi + * RM will return swap lock window High time period in this variable. By default + * tSwapRdyHi is 250 micro seconds. RM also provide regkey to change this value. + * tSwapRdyHi also used by RM to configure value of LSR_MIN_TIME while programming + * swap barrier. + * Client should consider tSwapRdyHi only for Gsync III (P2060) network. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW (0x30f10153) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS { + NvU32 tSwapRdyHi; +} NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS; + + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_OPTIMIZED_TIMING + * + * This command allows the client to obtain suggested + * adjustments to vertical and horizontal timing values + * that will improve the ability of gsync to lock. + * + * gpuId + * This parameter is set by the client to indicate the + * gpuId of the GPU to which the display to be optimized + * is attached. + * display + * This parameter is not used by RM currently. + * Clients can ignore this parameter. Note that this + * parameter will be removed in future. + * output + * This parameter is set by the client to indicate the + * output resource type of the display to be optimized. + * For example, CRTs use DAC output, while DFPs use SOR + * (Serial Output Resource) type. + * protocol + * This parameter is set by the client to indicate the + * data protocol of output resource. For DAC displays, + * the format of the standard mode most closely matching + * the desired mode is used. For SOR display devices, + * the LVDS/TMDS/etc format is the protocol. + * structure + * This parameter is set by the client to indicate the + * raster structure of the mode, either progressive or + * interlaced. Diagrams of the raster structures are + * provided below. + * adjust + * This parameter is set by the client to specify which + * of the timing values, other than hTotal and vTotal, + * may be adjusted during optimization. + * If the client does not obtain instructions from the + * user about where adjustments should be applied, safe + * default values for progressive/interlaced modes are + * provided below. + * hTotal + * This parameter is set by the client to specify the + * initial Horizontal Pixel Total, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vTotal + * This parameter is set by the client to specify the + * initial Vertical Pixel Total, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * hBlankStart + * This parameter is set by the client to specify the + * initial Horizontal Blanking Start, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vBlankStart + * This parameter is set by the client to specify the + * initial Vertical Blanking Start, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * hBlankEnd + * This parameter is set by the client to specify the + * initial Horizontal Blanking End, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vBlankEnd + * This parameter is set by the client to specify the + * initial Vertical Blanking End, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vInterlacedBlankStart + * This parameter is set by the client to specify the + * initial Interlaced Vertical Blanking Start, from + * which the RM will begin optimizing. The RM will + * ignore this parameter for non-interlaced modes, as + * it has no meaning in those modes. In modes where + * it is meaningful, the RM also uses the parameter + * to return the optimized value. + * vInterlacedBlankEnd + * This parameter is set by the client to specify the + * initial Interlaced Vertical Blanking End, from + * which the RM will begin optimizing. The RM will + * ignore this parameter for non-interlaced modes, as + * it has no meaning in those modes. In modes where + * it is meaningful, the RM also uses the parameter + * to return the optimized value. + * hSyncEnd + * This parameter is set by the client to specify the + * initial Horizontal Raster Sync End, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vSyncEnd + * This parameter is set by the client to specify the + * initial Vertical Raster Sync End, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * hDeltaStep + * This parameter is set by the client to specify the + * increments by which the Horizontal Pixel Total may + * be adjusted by the RM, during optimization. + * If the client does not obtain a custom value for + * this parameter from the user, setting all four of + * hDeltaStep, vDeltaStep, hDeltaMax, and vDeltaMax + * to zero will result in a safe default for all four. + * vDeltaStep + * This parameter is set by the client to specify the + * increments by which the vertical timings of each + * frame (in interlaced modes, each field) may be + * adjusted by the RM, during optimization. + * In interlaced modes, the adjustments to vTotal, + * vInterlacedBlankStart, and vInterlacedBlankEnd may + * be in increments of vDeltaStep or twice vDeltaStep, + * depending on where adjustments are made. + * In progressive modes, the adjustment to the vTotal + * will simply be in increments of vDeltaStep. + * If the client does not obtain a custom value for + * this parameter from the user, setting all four of + * hDeltaStep, vDeltaStep, hDeltaMax, and vDeltaMax + * to zero will result in a safe default for all four. + * hDeltaMax + * This parameter is set by the client to specify the + * maximum amount that the Horizontal Pixel Total may + * be adjusted by the RM, during optimization. + * If the client does not obtain a custom value for + * this parameter from the user, setting all four of + * hDeltaStep, vDeltaStep, hDeltaMax, and vDeltaMax + * to zero will result in a safe default for all four. + * vDeltaMax + * This parameter is set by the client to specify the + * maximum amount that vertical timings of each frame + * (in interlaced modes, each field) may be adjusted + * by the RM, during optimization. + * In interlaced modes, the adjustments to vTotal, + * vInterlacedBlankStart, and vInterlacedBlankEnd may + * be up to twice vDeltaMax. + * In progressive modes, the adjustment to the vTotal + * may simply be up to vDeltaMax. + * If the client does not obtain a custom value for + * this parameter from the user, setting all four of + * hDeltaStep, vDeltaStep, hDeltaMax, and vDeltaMax + * to zero will result in a safe default for all four. + * refreshX10K + * This parameter is set by the client to specify the + * desired refresh rate, multiplied by 10000. This + * allows refresh rate to be set in units of 0.0001 Hz. + * For example, a 59.94 Hz rate would be set as 599400. + * The client can alternatively specify a the + * pixelClockHz parameter (if the passed in refreshX10K + * parameter is set to 0, the pixelClockHz parameter + * will be used). + * pixelClockHz + * This parameter is set by the client to specify the + * desired pixel clock frequency in units of Hz. The + * client can alternatively specify the refreshX10K parameter. + * This parameter is returned by the RM to report the + * optimal pixel clock to use with the adjusted mode, + * in units of Hz. + * + * Progressive Raster Structure + * + * hSyncEnd hTotal + * 0 | hBlankEnd hBlankStart | + * | | | | | vSync vBlank + * 0--+--------------------------------------------+ +-+ | + * | Sync | | | + * vSyncEnd--| +----------------------------------------+ +-+ | + * | | Back Porch | | | + * vBlankEnd--| | +--------------------------------+ | | +-+ + * | | | Active Area | | | | + * | | | +------------------------+ | | | | + * | | | | | | | | | + * | S | B | A | | A | F | | | + * | y | a | c | | c | r | | | + * | n | c | t | | t | o | | | + * | c | k | i | | i | n | | | + * | | | v | | v | t | | | + * | | P | e | Output Viewport | e | | | | + * | | o | | | | P | | | + * | | r | A | | A | o | | | + * | | c | r | | r | r | | | + * | | h | e | | e | c | | | + * | | | a | | a | h | | | + * | | | | | | | | | + * | | | +------------------------+ | | | | + * | | | Active Area | | | | + * vBlankStart-| | +--------------------------------+ | | +-+ + * | | Front Porch | | | + * vTotal--+---+----------------------------------------+ +-+ | + * ___ + * / \________________________________________/ hSync + * ________ ____ + * \________________________________/ hBlank + * + * + * + * Interlaced Raster Structure + * + * hSyncEnd hTotal + * 0 | hBlankEnd hBlankStart | + * | | | | | vSync vBlank + * 0--+--------------------------------------------+ +-+ | + * | Sync | | | + * vSyncEnd--| +----------------------------------------+ +-+ | + * | | Back Porch | | | + * vBlankEnd--| | +--------------------------------+ | | +-+ + * | | | Active Area | | | | + * | | | +------------------------+ | | | | + * | | | | | | | | | + * | S | B | A | | A | F | | | + * | y | a | c | | c | r | | | + * | n | c | t | | t | o | | | + * | c | k | i | | i | n | | | + * | | | v | | v | t | | | + * | | P | e | Output Viewport | e | | | | + * | | o | | | | P | | | + * | | r | A | | A | o | | | + * | | c | r | | r | r | | | + * | | h | e | | e | c | | | + * | | | a | | a | h | | | + * | | | | | | | | | + * | | | +------------------------+ | | | | + * | | | Active Area | | | | + * vBlankStart-| | +--------------------------------+ | | +-+ + * | | | | | + * | | Front Porch +--------------------+ | | + * | | | | +-+ | + * | +-------------------+ | | | + * | | | | + * | Sync +--------------------+ | | + * | | | +-+ | + * | +-------------------+ | | | + * vInterlaced | | Back Porch | | | + * BlankEnd--| | +--------------------------------+ | | +-+ + * | | | Active Area | | | | + * | | | +------------------------+ | | | | + * | | | | | | | | | + * | S | B | A | | A | F | | | + * | y | a | c | | c | r | | | + * | n | c | t | | t | o | | | + * | c | k | i | | i | n | | | + * | | | v | | v | t | | | + * | | P | e | Output Viewport | e | | | | + * | | o | | | | P | | | + * | | r | A | | A | o | | | + * | | c | r | | r | r | | | + * | | h | e | | e | c | | | + * | | | a | | a | h | | | + * | | | | | | | | | + * | | | +------------------------+ | | | | + * vInterlaced | | | Active Area | | | | + * BlankStart-| | +--------------------------------+ | | +-+ + * | | Front Porch | | | + * vTotal--+---+----------------------------------------+ +-+ | + * ___ + * / \________________________________________/ hSync + * ________ ____ + * \________________________________/ hBlank + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * +*/ + +#define NV30F1_CTRL_CMD_GSYNC_GET_OPTIMIZED_TIMING (0x30f10160) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS { + NvU32 gpuId; + NvU32 display; + NvU32 output; + NvU32 protocol; + NvU32 structure; + NvU32 adjust; + NvU32 hDeltaStep; + NvU32 hDeltaMax; + NvU32 vDeltaStep; + NvU32 vDeltaMax; + NvU32 hSyncEnd; + NvU32 hBlankEnd; + NvU32 hBlankStart; + NvU32 hTotal; + NvU32 vSyncEnd; + NvU32 vBlankEnd; + NvU32 vBlankStart; + NvU32 vInterlacedBlankEnd; + NvU32 vInterlacedBlankStart; + NvU32 vTotal; + NvU32 refreshX10K; + NvU32 pixelClockHz; +} NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS; + +/* output values */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_DAC (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_SOR (0x00000004) + +/* protocol values for DAC displays (e.g. CRTs) */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_DAC_RGB_CRT (0x00000000) + +/* protocol values for SOR displays (e.g. DFPs) */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DUAL_TMDS (0x00000005) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_A (0x00000008) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_B (0x00000009) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_HDMI_FRL (0x0000000C) + +/* structure values */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_INTERLACED (0x00000001) + +/* adjust values */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_H_FRONT_PORCH (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_V_FRONT_PORCH (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_H_ACTIVE_AREA (0x00000004) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_V_ACTIVE_AREA (0x00000008) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_H_BACK_PORCH (0x00000010) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_V_BACK_PORCH (0x00000020) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_H_RASTER_SYNC (0x00000040) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_V_RASTER_SYNC (0x00000080) + +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_CRT (0x00000030) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_DFP (0x00000020) + +/* DeltaStep and DeltaMax values to trigger default settings */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_STEP_USE_DEFAULTS (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_MAX_USE_DEFAULTS (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_STEP_USE_DEFAULTS (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_MAX_USE_DEFAULTS (0x00000000) + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_EVENT_NOTIFICATION + * + * This command sets event notification state for the associated Gsync + * object. This command requires that an instance of NV01_EVENT has + * been previously bound to the associated Gsync object. + * + * If one or more of the "smart event notification" options are set in the + * action parameter, multiple sequential events of the same type will only + * trigger one notification. After that, only an event of a different type + * will trigger a new notification. + * + * action + * This member specifies the desired event notification action. + * Valid notification actions include: + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_DISABLE + * This action disables event notification for the associated + * Gsync object. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_SYNC_LOSS + * This action enables smart event notification for the + * associated Gsync object, for "sync loss" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_SYNC_GAIN + * This action enables smart event notification for the + * associated Gsync object, for "sync gained" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_STEREO_LOSS + * This action enables smart event notification for the + * associated Gsync object, for "stereo lost" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_STEREO_GAIN + * This action enables smart event notification for the + * associated Gsync object, for "stereo gained" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_HOUSE_GAIN + * This action enables smart event notification for the + * associated Gsync object, for "house sync (BNC) plug in" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_HOUSE_LOSS + * This action enables smart event notification for the + * associated Gsync object, for "house sync (BNC) plug out" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_RJ45_GAIN + * This action enables smart event notification for the + * associated Gsync object, for "ethernet (RJ45) plug in" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_RJ45_LOSS + * This action enables smart event notification for the + * associated Gsync object, for "ethernet (RJ45) plug out" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_COUNT_MATCH + * This action enables smart event notification for the + * associated Gsync object, for "frame counter match" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_ALL + * This action enables smart event notification for the + * associated Gsync object, for any type of event. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_EVENT_NOTIFICATION (0x30f10170) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_PARAMS_MESSAGE_ID (0x70U) + +typedef struct NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_PARAMS { + NvU32 action; +} NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_PARAMS; + +/* valid action values */ + +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_SYNC_LOSS (0x00000001) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_SYNC_GAIN (0x00000002) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_STEREO_LOSS (0x00000004) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_STEREO_GAIN (0x00000008) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_HOUSE_GAIN (0x00000010) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_HOUSE_LOSS (0x00000020) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_RJ45_GAIN (0x00000040) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_RJ45_LOSS (0x00000080) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_COUNT_MATCH (0x00000100) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_ALL (0x000001FF) + +#define NV30F1_CTRL_GSYNC_EVENT_TYPES 9 + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE + * + * These commands can be used to get/set the stereo lock assistance mode of + * the GSYNC device. This is supported by GSYNC III device only. + * 1] In this mode the GSYNC recreates the hidden VS either by -> + * (a) using local stereo edge if stereo is toggle or + * (b) counting lines and generate the missing VS. + * 2] Master GSYNC card recreates the stereo and passes it along to + * the slave GSYNC cards. + * 3] Slave GSYNC cards generates the stereo raster sync structure to + * synchronize the GPU. + * 4] For stereo sync status reporting, under this mode, the GSYNC automatically + * reports stereo lock whenever it gets the master stereo signal. The + * assumption is local stereo will be in synced with the new structure. + * 5] If the slave GSYNC card does not observed master stereo for any reason, + * (a) it clears the stereo sync bit and + * (b) it generates its own version of stereo and sync the GPU. + * + * Parameters: + * gpuId + * This parameter is set by the client to indicate the gpuId on which + * the stereo lock mode should be enabled/disabled. + * + * enable + * In SET query, this parameter is set by the client to indicate whether + * RM should enable or disable stereo lock mode for GPU specified in gpuId. + * 1 and 0 indicates enable and disable stereo lock mode respectively. In + * GET query, RM will set this parameter to 1 or 0 depending on StereoLock + * mode is enabled or not respectively for specified GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE (0x30f10172) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE (0x30f10173) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS_MESSAGE_ID (0x72U) + +typedef struct NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS { + NvU32 gpuId; + NvU32 enable; +} NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS; +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE_PARAMS_MESSAGE_ID (0x73U) + +typedef NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_READ_REGISTER + * + * This command is used to read raw i2c registers from the gsync device, via + * the given GPU (registers on the same gsync device may have different values + * depending on which GPU is used to do the read). + * + * This may only be used by a privileged client. + * + * Parameters: + * gpuId + * This parameter is set by the client to specify which GPU to use to + * perform the read. + * + * reg + * This parameter is set by the client to specify which i2c register to + * read. + * + * data + * This parameter is written by the RM and returned to the client upon a + * successful read. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +#define NV30F1_CTRL_CMD_GSYNC_READ_REGISTER (0x30f10180) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_READ_REGISTER_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_READ_REGISTER_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV30F1_CTRL_GSYNC_READ_REGISTER_PARAMS { + NvU32 gpuId; + NvU8 reg; + NvU8 data; +} NV30F1_CTRL_GSYNC_READ_REGISTER_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_WRITE_REGISTER + * + * This command is used to write raw i2c registers on the gsync device, via the + * given GPU (registers on the same gsync device may have different values + * depending on which GPU is used to do the write). + * + * This may only be used by a privileged client. + * + * Parameters: + * gpuId + * This parameter is set by the client to specify which GPU to use to + * perform the write. + * + * reg + * This parameter is set by the client to specify which i2c register to + * write. + * + * data + * This parameter is set by the client to specify what data to write to the + * given i2c register. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ + +#define NV30F1_CTRL_CMD_GSYNC_WRITE_REGISTER (0x30f10181) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_WRITE_REGISTER_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_WRITE_REGISTER_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV30F1_CTRL_GSYNC_WRITE_REGISTER_PARAMS { + NvU32 gpuId; + NvU8 reg; + NvU8 data; +} NV30F1_CTRL_GSYNC_WRITE_REGISTER_PARAMS; + + + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_LOCAL_SYNC + * + * This command enables/disables raster sync on displays i.e. + * mosaic groups between gpus. + * + * gpuTimingSource + * The parameter is set by the client to indicate the gpuId of the + * Timing Source gpu for specified mosaic group. + * gpuTimingSlaves[] + * This parameter is set by the client to indicate the gpuIds of the + * timing slave gpus for specified mosaic group. It should not contain + * more gpuids than slaveGpuCount. + * slaveGpuCount + * This parameter is set by the client to indicate the count of timing + * slave gpus under specified group. + * Referring to gsync3-P2060, slaveGpuCount can vary from 0x01 to 0x03 + * as maximum possible connected gpus are four and one gpu must be + * timing master for mosaic group. + * mosaicGroupNumber + * This parameter is set by the client to tell the RM to which mosaic + * group it should refer. + * Referring to gsync3-P2060, mosaicGroupNumber can contain 0x00 or + * 0x01 as only two mosaic groups are possible. + * enableMosaic + * This parameter is set by the client to indicate RM that whether RM + * should enable mosaic or disable mosaic. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_LOCAL_SYNC (0x30f10185) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_LOCAL_SYNC_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_LOCAL_SYNC_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV30F1_CTRL_GSYNC_SET_LOCAL_SYNC_PARAMS { + NvU32 gpuTimingSource; + NvU32 gpuTimingSlaves[NV30F1_CTRL_MAX_GPUS_PER_GSYNC]; + NvU32 slaveGpuCount; + NvU32 mosaicGroupNumber; + NvBool enableMosaic; +} NV30F1_CTRL_GSYNC_SET_LOCAL_SYNC_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH + * + * This command configure GSYNC registers for pre-flash and post-flash + * operations. This is currenly used for GSYNC-3 (P2060) only. RM clients + * has to make sure that they perform both pre-flash and post-flash + * operations on GSYNC board. Avoiding, post-flash will cause mismatch + * between RM cached-data and GSYNC register values. + * + * Parameters: + * gpuId + * This parameter is set by the client to indicate the gpuId for which + * GSYNC board connected to that GPU will be configured for pre-flash + * or post-flash operation depending on preFlash value. + * + * preFlash + * This parameter is set by the client to indicate whether RM has to configure + * GSYNC registers and SW state for pre-flash or post-flash operation. Values + * 1 and 0 indicates that RM will configure GSYNC board for pre-flash and + * post-flash operations respectively. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH (0x30f10186) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH_PARAMS_MESSAGE_ID (0x86U) + +typedef struct NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH_PARAMS { + NvU32 gpuId; + NvU32 preFlash; +} NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_HOUSE_SYNC_MODE + * NV30F1_CTRL_CMD_GSYNC_SET_HOUSE_SYNC_MODE + * + * These two commands gets/sets house sync mode as input or output. + * + * Parameters: + * houseSyncMode + * This parameter indicates whether the house sync mode is input or + * output. For GET_HOUSE_SYNC_MODE, the current mode will be written + * by RM and returned to the client; for SET_HOUSE_SYNC_MODE, the client + * will write the new mode value to this parameter and pass it to RM + * for execution. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_STATE + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_HOUSE_SYNC_MODE (0x30f10187) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | 0x87" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_HOUSE_SYNC_MODE (0x30f10188) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | 0x88" */ + +typedef struct NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_PARAMS { + NvU8 houseSyncMode; +} NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_PARAMS; + + +#define NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_INPUT (0x00) +#define NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_OUTPUT (0x01) + +/* _ctrl30f1_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h new file mode 100644 index 0000000..eb9bef5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h @@ -0,0 +1,971 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl402c.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV40_I2C control commands and parameters */ +#define NV402C_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0x402C, NV402C_CTRL_##cat, idx) + +/* I2C command categories (6 bits) */ +#define NV402C_CTRL_RESERVED (0x00) +#define NV402C_CTRL_I2C (0x01) + + +/* This field specifies the maximum regular port identifier allowed. */ +#define NV402C_CTRL_NUM_I2C_PORTS 16 +/* This temporary field specifies the dynamic port identifier. */ +#define NV402C_CTRL_DYNAMIC_PORT NV_U8_MAX + +/* + * NV402C_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV402C_CTRL_CMD_NULL (0x402c0000) /* finn: Evaluated from "(FINN_NV40_I2C_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NV402C_CTRL_I2C_GET_PORT_INFO_IMPLEMENTED + * The port exists on this hardware. + * NV402C_CTRL_I2C_GET_PORT_INFO_DCB_DECLARED + * The port has an entry in the DCB. + * NV402C_CTRL_I2C_GET_PORT_INFO_DDC_CHANNEL + * The port is used to read EDIDs via DDC. + * NV402C_CTRL_I2C_GET_PORT_INFO_CRTC_MAPPED + * The port is accessible via the CRTC register space. + * NV402C_CTRL_I2C_GET_PORT_INFO_VALID + * The port is validated using I2C device. + */ +#define NV402C_CTRL_I2C_GET_PORT_INFO_IMPLEMENTED 0:0 +#define NV402C_CTRL_I2C_GET_PORT_INFO_IMPLEMENTED_NO 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_IMPLEMENTED_YES 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DCB_DECLARED 1:1 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DCB_DECLARED_NO 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DCB_DECLARED_YES 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DDC_CHANNEL 2:2 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DDC_CHANNEL_ABSENT 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DDC_CHANNEL_PRESENT 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_CRTC_MAPPED 3:3 +#define NV402C_CTRL_I2C_GET_PORT_INFO_CRTC_MAPPED_NO 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_CRTC_MAPPED_YES 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_VALID 4:4 +#define NV402C_CTRL_I2C_GET_PORT_INFO_VALID_NO 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_VALID_YES 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_ALL 4:0 +#define NV402C_CTRL_I2C_GET_PORT_INFO_ALL_DEFAULT 0x00 + +/* + * NV402C_CTRL_CMD_I2C_GET_PORT_INFO + * + * Returns information for the first eight I2C ports. + * + * info + * This parameter is an output from the command and is ignored as an + * input. Each element contains the flags described previously named + * NV402C_CTRL_I2C_GET_PORT_INFO*. Note that the index into the info + * array is one less than the port identifier that would be returned from + * NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID; the port numbers here are + * 0-indexed as opposed to 1-indexed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + + +#define NV402C_CTRL_CMD_I2C_GET_PORT_INFO (0x402c0101) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS_MESSAGE_ID" */ + + + +#define NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS { + NvU8 info[NV402C_CTRL_NUM_I2C_PORTS]; +} NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS; +#define NV402C_CTRL_I2C_INDEX_LENGTH_MAX 4 +#define NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX 4096 + +//! Minimum and maximum valid read/write message length for block process protocol. +#define NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MIN 3 +#define NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX 32 + +/* + * NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE + * A client uses this field to indicate the I2C addressing mode to be + * used. + * Possible values are: + * NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_7BIT + * The default, this value specifies the master to operate in the + * basic 7-bit addressing mode, which is available on all + * implementations. + * NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_10BIT + * This I2C mode allows for 10 bits of addressing space and is + * reverse compatible with 7-bit addressing. + */ +#define NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE 0:0 +#define NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_7BIT (0x00000000) +#define NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_10BIT (0x00000001) +#define NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_DEFAULT NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_7BIT +/* + * NV402C_CTRL_I2C_FLAGS_SPEED_MODE + * A client uses this field to indicate the target speed at which the + * I2C master should attempt to drive the bus. The master may throttle + * its own speed for various reasons, and devices may slow the bus + * using clock-streching. Neither of these possibilities are + * considered failures. + */ +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE 4:1 +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_100KHZ (0x00000000) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_200KHZ (0x00000001) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_400KHZ (0x00000002) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_33KHZ (0x00000003) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_10KHZ (0x00000004) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_3KHZ (0x00000005) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_DEFAULT (0x00000006) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_300KHZ (0x00000007) + + +/* + * NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE + * A client uses this field to specify a transaction mode. + * Possible values are: + * NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_NORMAL + * The default, this value indicates to use the normal I2C transaction + * mode which will involve read/write operations depending on client's + * needs. + * NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_PING + * This value specifies that the device only needs to be pinged. No need + * of performing a complete read/write transaction. This will send a + * single byte to the device to be pinged. On receiving an ACK, we will + * get a confirmation on the device's availability. + */ +#define NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE 11:10 +#define NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_NORMAL (0x00000000) +#define NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_PING (0x00000001) +#define NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_DEFAULT NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_NORMAL +/*! + * NV402C_CTRL_I2C_FLAGS_RESERVED + * A client must leave this field as 0, as it is reserved for future use. + */ +#define NV402C_CTRL_I2C_FLAGS_RESERVED 31:12 + +/*! + * The following defines specify WAR flags that can be specified during + * I2C Quick Read or Write command (Refer NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW). + * + * _NONE + * No workaround is needed. + * + * _TEST_PORT + * Use this flag to have the client sent a request to test a port instead + * of performing any transaction on it. Transaction type has to be + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW. + */ +#define NV402C_CTRL_I2C_SMBUS_QUICK_RW_WAR_FLAGS 0:0 +#define NV402C_CTRL_I2C_SMBUS_QUICK_RW_WAR_FLAGS_NONE 0x00000000 +#define NV402C_CTRL_I2C_SMBUS_QUICK_RW_WAR_FLAGS_TEST_PORT 0x00000001 + +/*! + * The following defines specify WAR flags that can be specified during + * I2C Register Read or Write buffer command + * (Refer NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW) + * + * _NONE + * No workaround is needed. + * + * _SI1930 + * SI1930 microcontroller register read or write requested by a client. + * Transaction type has to be NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW. + * + * _PX3540 + * Register read from PX3540 or PX3544 device. Transaction type has to be + * NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW and bWrite must be TRUE to + * indicate READ operation + */ +#define NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS 1:0 +#define NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS_NONE 0x00000000 +#define NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS_SI1930 0x00000001 +#define NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS_PX3540 0x00000002 + +/*! + * The following defines specify WAR flags that can be specified during + * I2C buffer Read or Write to Multibyte Register + * (Refer NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW) + * + * _NONE + * No workaround is needed. + * + * _NO_AUTO_INC + * This value specifies that the device does not support auto-increment. + * Most devices allow you to write multiple bytes after specifying a + * register address, and the subsequent bytes will go to incremented + * addresses. Without auto-increment, we write a buffer of data as a + * sequence of address-register-value triplets separated by starts. + */ +#define NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS 0:0 +#define NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS_NONE 0x00000000 +#define NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS_NO_AUTO_INC 0x00000001 + +/* + * NV402C_CTRL_CMD_I2C_SYNC + * + * Perform a basic I2C transaction synchronously. + * + * portId + * This field must be specified by the client to indicate the logical + * port/bus for which the transaction is requested. The port identifier + * is one less than the value returned by + * NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID unless that value was 0 (the + * 'dynamic' port). For the 'dynamic' port, this should be 0xFF. Note + * that future versions of the API may obsolete use of the 'dynamic' port; + * please contact the RM if you begin using this portion of the API so we + * can help you migrate when the time comes. + * + * bIsWrite + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + * + * flags + * This parameter specifies optional flags used to control certain modal + * features such as target speed and addressing mode. The currently + * defined fields are described previously; see NV402C_I2C_FLAGS_*. + * + * address + * The address of the I2C slave. The address should be shifted left by + * one. For example, the I2C address 0x50, often used for reading EDIDs, + * would be stored here as 0xA0. This matches the position within the + * byte sent by the master, as the last bit is reserved to specify the + * read or write direction. + * + * indexLength + * This required parameter specifies how many bytes to write as part of the + * first index. If zero is specified, then no index will be sent. + * + * index + * This parameter, required of the client if index is one or more, + * specifies the index to be written. The buffer should be arranged such + * that index[0] will be the first byte sent. + * + * messageLength + * This parameter, required of the client, specifies the number of bytes to + * read or write from the slave after the index is written. + * + * pMessage + * This parameter, required of the client, specifies the data to be written + * to the slave. The buffer should be arranged such that pMessage[0] will + * be the first byte read or written. If the transaction is a read, then + * it will follow the combined format described in the I2C specification. + * If the transaction is a write, the message will immediately follow the + * index without a restart. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_STATE_IN_USE + * NV_ERR_GENERIC, if the I2C transaction fails. + */ +#define NV402C_CTRL_I2C_INDEXED_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV402C_CTRL_I2C_INDEXED_PARAMS { + NvU8 portId; + NvU8 bIsWrite; + NvU16 address; + NvU32 flags; + + NvU32 indexLength; + NvU8 index[NV402C_CTRL_I2C_INDEX_LENGTH_MAX]; + + NvU32 messageLength; + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_INDEXED_PARAMS; + +#define NV402C_CTRL_CMD_I2C_INDEXED (0x402c0102) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_INDEXED_PARAMS_MESSAGE_ID" */ + +/* + * NV402C_CTRL_CMD_I2C_GET_PORT_SPEED + * + * Returns information for the I2C ports. + * + * portSpeed + * This parameter is an output from the command and is ignored as an + * input. Each element contains the current I2C speed of the port. + * Note that the index into the info array is one less than the + * port identifier that would be returned from + * NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID; the port numbers here are + * 0-indexed as opposed to 1-indexed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS { + NvU32 portSpeed[NV402C_CTRL_NUM_I2C_PORTS]; +} NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS; + +#define NV402C_CTRL_CMD_I2C_GET_PORT_SPEED (0x402c0103) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS_MESSAGE_ID" */ + +/* + * NV402C_CTRL_I2C_DEVICE_INFO + * + * This structure describes the basic I2C Device information. + * + * type + * This field return the type of device NV_DCB4X_I2C_DEVICE_TYPE_ + * i2cAddress + * This field contains the 7 bit/10 bit address of the I2C device. + * i2cLogicalPort + * This field contains the Logical port of the I2C device. + */ +typedef struct NV402C_CTRL_I2C_DEVICE_INFO { + NvU8 type; + NvU16 i2cAddress; + NvU8 i2cLogicalPort; + NvU8 i2cDevIdx; +} NV402C_CTRL_I2C_DEVICE_INFO; + +/* Maximum number of I2C devices in DCB */ +#define NV402C_CTRL_I2C_MAX_DEVICES 32 + +/* + * NV402C_CTRL_CMD_I2C_TABLE_GET_DEV_INFO + * + * RM Control to get I2C device info from the DCB I2C Devices Table. + * + * i2cDevCount + * The value of this parameter will give the number of + * I2C devices found in DCB. + * + * i2cDevInfo + * For each device found in DCB the control call will write the info + * in this parameter. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS { + NvU8 i2cDevCount; + NV402C_CTRL_I2C_DEVICE_INFO i2cDevInfo[NV402C_CTRL_I2C_MAX_DEVICES]; +} NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS; + +#define NV402C_CTRL_CMD_I2C_TABLE_GET_DEV_INFO (0x402c0104) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS_MESSAGE_ID" */ + +/*! + * The IDs of each type of I2C command available. + */ +typedef enum NV402C_CTRL_I2C_TRANSACTION_TYPE { + /*! + * This transaction type is used to perform the Quick SMBus Read/write command + * on a slave device. No data is sent or received, just used to verify the + * presence of the device. + * Refer SMBus spec 2.0 (section 5.5.1 Quick Command) + * SMBus Quick Write : S Addr|Wr [A] P + * SMBus Quick Read : S Addr|Rd [A] P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW = 0, + /*! + * This transaction type is used to perform the I2C byte read/write from/to + * a slave device. As per the spec last byte should be NA (Not Acknolwedged) + * by slave. + * Refer I2CBus spec 3.0 (section 9 Fig 11 and Fig 12) or Refer SMBus spec + * 2.0 (section 5.5.2 Send Byte and 5.5.3 Receive Byte). + * I2C Byte Write : S Addr|Wr [A] Data [NA] P + * I2C Byte Read : S Addr|Rd [A] Data NA P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW = 1, + /*! + * This transaction type is used to perform the I2C block (buffer) + * read/write from/to a slave device. As per the spec last byte should be NA + * (Not Acknolwedged) by slave. + * Refer I2CBus spec 3.0 (section 9 Fig 11 and Fig 12) + * I2C Byte Write : S Addr|Wr [A] Data1 [A]...Data(N-1) [A] DataN [NA] P + * I2C Byte Read : S Addr|Rd [A] Data1 A...Data(N-1) A DataN NA P + * + * Distinction between I2C_BLOCK and SMBUS_BLOCK protocol: + * In I2C Block write it is the slave device (and in I2C Block read it's + * the master device) that determines the number of bytes to transfer by + * asserting the NAK at last bit before stop. This differs from the SMBus + * block mode write command in which the master determines the block + * write transfer size. In I2c Block read there is no limit to maximum size + * of data that could be transferred whereas in SMBus block it is restricted + * to 255 bytes (0xFF). + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW = 2, + /*! + * This transaction type is used to perform the I2C Buffer read/write + * from/to a register of a slave device. It does not send bytecount as + * part of data buffer. + * Not a part of SMBus spec. + * I2C Buffer Write : S Addr|Wr [A] cmd [A] Data1 [A]...DataN[A] P + * I2C Buffer Read : S Addr|Wr [A] cmd [A] Sr Addr|Rd [A] Data1 A... + * DataN-1 A DataN A P + * + * Distinction between SMBUS_BLOCK and I2C_BUFFER protocol: + * In SMBUS_BLOCK Read/write the first byte of data buffer contains the + * count size (The number of bytes to be transferred) and it is restricted + * to 255 bytes whereas in I2C_BUFFER, count size is not sent during the + * transfer and there is no restriction in terms of size. + * + * Distinction between I2C_BLOCK and I2C_BUFFER protocol: + * I2C_BUFFER takes the register address as argument whereas I2C_BLOCK does + * not have any register or command provision. + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW = 3, + /*! + * This transaction type is used to perform the I2C byte read/write from/to + * a slave device + * Refer SMBus spec 2.0 (section 5.5.4 Write Byte and 5.5.5 Read Byte) + * SMBus Byte Write : S Addr|Wr [A] cmd [A] Data [A] P + * SMBus Byte Read : S Addr|Wr [A] cmd [A] Sr Addr|Rd [A] Data A P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW = 4, + /*! + * This transaction type is used to perform the SMBus byte read/write + * from/to a register of a slave device + * Refer SMBus spec 2.0 (section 5.5.4 Write Word and 5.5.5 Read Word) + * SMBus Word Write : S Addr|Wr [A] cmd [A] DataLow [A] DataHigh [A] P + * SMBus Word Read : S Addr|Wr [A] cmd [A] Sr Addr|Rd [A] DataLow A + * DataHigh A P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW = 5, + /*! + * This transaction type is used to perform the SMBus Block read/write + * from/to a register of a slave device + * Refer SMBus spec 2.0 (section 5.5.7 Block Write/Read) + * SMBus Block Write : S Addr|Wr [A] cmd [A] ByteCount [A] Data1 [A]... + * DataN-1 [A] DataN[A] P + * SMBus Block Read : S Addr|Wr [A] cmd [A] Sr Addr|Rd [A] ByteCount A + * Data1 A...DataN-1 A DataN A P + * + * Distinction between I2C_BLOCK and SMBUS_BLOCK protocol: + * In I2C Block write it is the slave device (and in I2C Block read it's + * the master device) that determines the number of bytes to transfer by + * asserting the NAK at last bit before stop. This differs from the SMBus + * block mode write/Read command in which the master determines the block + * write transfer size. In I2c Block read/Write there is no limit to maximum + * size of data that could be transferred whereas in SMBus block it is + * restricted to 255 bytes (0xFF). + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW = 6, + /*! + * This transaction type is used to perform the SMBus process call. It sends + * data and waits for the slave to return a value dependent on that data. + * The protocol is simply a SMBus write Word followed by a SMBus Read Word + * without the Read-Word command field and the Write-Word STOP bit. + * Note that there is no STOP condition before the repeated START condition, + * and that a NACK signifies the end of the read transfer. + * + * Refer SMBus spec 2.0 (section 5.5.6 Process Call) + * SMBus Process Call : S Addr|Wr [A] cmd [A] DataLow [A] DataHigh [A] + * Sr Addr|Rd [A] DataLow [A] DataHigh [NA] P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL = 7, + /*! + * This transaction type is used to perform the SMBus Block Write Block Read + * process call. + * The block write-block read process call is a two-part message. The call + * begins with a slave address and a write condition. After the command code + * the host issues a write byte count (M) that describes how many more bytes + * will be written in the first part of the message. + * If a master has 6 bytes to send, the byte count field will have the value + * 6 (0000 0110b), followed by the 6 bytes of data. The write byte count (M) + * cannot be zero. + * The second part of the message is a block of read data beginning with a + * repeated start condition followed by the slave address and a Read bit. + * The next byte is the read byte count (N), which may differ from the write + * byte count (M). The read byte count (N) cannot be zero. The combined data + * payload must not exceed 32 bytes. + * The byte length restrictions of this process call are summarized as + * follows: + * M >= 1 byte + * N >= 1 byte + * M + N <= 32 bytes + * Note that there is no STOP condition before the repeated START condition, + * and that a NACK signifies the end of the read transfer. + * + * Refer SMBus spec 2.0 (section 5.5.8 Block Write Block Read Process Call) + * SMBus Process Call : S Addr|Wr [A] cmd [A] ByteCount=M [A] Data1 [A]... + * DataN-1 [A] DataM[A] Sr Addr|Rd [A] ByteCount=N [A] + * Data1 [A]...DataN [NA] P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL = 8, + /*! + * This transaction type is used to perform SMBus buffer read/write + * from/to multiple registers of a slave device known as Auto Increment. + * It is not a part of any standard I2C/SMBus spec but a feature of many + * SMBus devices like EEPROM. + * It is also used for reading a block of bytes from a designated register + * that is specified through the two Comm bytes.of a slave device or writing + * a block of bytes from a designated register of a slave device (Note : The + * command byte in this case could be 0, 2 or 4 Bytes) + * SMBus Multi-Byte Register Block Write : S Addr|Wr [A] cmd1 A cmd 2 [A]... + * cmdN [A] data1 [A] Data2 [A].....DataN [A] P + * SMBus Multi-Byte Register Block Read : S Addr|Rd [A] cmd1 A cmd 2 [A]... + * cmdN [A] data1 [A] Sr Addr [A] Data1 A Data2 A...DataN A P + * + * This transaction type could be also used for those devices which supports + * AUTO_INC. Even though it is frequently related to I2C/SMBus, automatic + * incrementation is not part of any I2C standard but rather a common + * feature found in many I2C devices. What it means is that the device + * maintains in internal pointer which is automatically incremented upon + * data read or write activities and which can be manually set to a fixed + * value. This comes in handy when storing larger amounts of data for + * instance in an ordinary I2C RAM or EEPROM. + * SMBus AUTO_INC Write : S Addr|Wr [A] cmd1 A Data1 [A] Data2 [A]... + * DataN [A] P + * SMBus AUTO_INC Read : S Addr|Rd [A] cmd1 A data1 [A] Sr Addr [A] Data1 A + * Data2 A...DataN A P + * If the device does not support AUTO_INC set warFlags of + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW to + * NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS_NO_AUTO_INC. + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW = 9, + /*! + * This transaction type is used to perform the EDID read via DDC. + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC = 10, +} NV402C_CTRL_I2C_TRANSACTION_TYPE; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * Transaction specific flags + * (see NV402C_CTRL_I2C_SMBUS_QUICK_RW_WAR_FLAGS_*). + */ + NvU32 warFlags; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * The main message data. + */ + NvU8 message; +} NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * The address of the register. + */ + NvU8 registerAddress; + /*! + * The main message data. + */ + NvU8 message; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * The address of the register. + */ + NvU8 registerAddress; + /*! + * The main message data. + */ + NvU16 message; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BUFFER_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BUFFER_RW. + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * Transaction specific flags to be set (see + * NV_NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS_*) + */ + NvU32 warFlags; + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU8 registerAddress; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + * C form: NvU8 message[NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX] + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW. + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU8 registerAddress; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + * C form: NvU8 message[NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX] + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL. + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL { + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU8 registerAddress; + /*! + * The message data to be written to the slave. + */ + NvU16 writeMessage; + /*! + * The message data to be read from the slave. + */ + NvU16 readMessage; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL { + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU8 registerAddress; + /*! + * This parameter specifies the number of bytes to write the the slave + * after the writeByteCount is sent to the slave. + */ + NvU32 writeMessageLength; + /*! + * The message buffer to be written to the slave. + * C form: NvU8 writeMessage[NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX] + */ + NvU8 writeMessage[NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX]; + /*! + * This parameter specifies the number of bytes to read from the slave + * after the readByteCount is sent to the slave. + */ + NvU32 readMessageLength; + /*! + * The message buffer to be read from the slave. + * C form: NvU8 readMessage[NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX] + */ + NvU8 readMessage[NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX]; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW. + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * Transaction specific flags (see + * NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS_*) + */ + NvU32 warFlags; + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU32 indexLength; + /*! + * Optional indexing data; aka register address. + * C form: NvU8 index[NV402C_CTRL_I2C_INDEX_LENGTH_MAX] + */ + NvU8 index[NV402C_CTRL_I2C_INDEX_LENGTH_MAX]; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + * C form: NvU8 message[NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX] + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC { + /*! + * The segment number of the EDID block which is to be read. + */ + NvU8 segmentNumber; + /*! + * The address of the register. + */ + NvU8 registerAddress; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + * C form: NvU8 message[NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX] + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA + * + * This union encapsulates the transaction data corresponding to the + * transaction type enlisted above. + */ +typedef union NV402C_CTRL_I2C_TRANSACTION_DATA { + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW smbusQuickData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW i2cByteData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW i2cBlockData, 8); + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW i2cBufferData, 8); + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW smbusByteData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW smbusWordData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW smbusBlockData, 8); + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL smbusProcessData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL smbusBlockProcessData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW smbusMultibyteRegisterData, 8); + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC edidData, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA; + + +/*! + * NV402C_CTRL_I2C_TRANSACTION_PARAMS + * + * The params data structure for NV402C_CTRL_CMD_I2C_TRANSACTION. + */ +#define NV402C_CTRL_I2C_TRANSACTION_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV402C_CTRL_I2C_TRANSACTION_PARAMS { + /*! + * The logical port ID. + */ + NvU8 portId; + /*! + * This parameter specifies optional flags used to control certain modal + * features such as target speed and addressing mode. The currently + * defined fields are described previously; see NV402C_CTRL_I2C_FLAGS_* + */ + NvU32 flags; + /*! + * The address of the I2C slave. + */ + NvU16 deviceAddress; + /*! + * The transaction type. + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE transType; + /*! + * The transaction data corresponding transaction type. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA transData, 8); +} NV402C_CTRL_I2C_TRANSACTION_PARAMS; + +#define NV402C_CTRL_CMD_I2C_TRANSACTION (0x402c0105) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_TRANSACTION_PARAMS_MESSAGE_ID" */ + + +/* _ctrl402c_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h new file mode 100644 index 0000000..49297f7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV5070_DISPLAY control commands and parameters */ + +#define NV5070_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x5070, NV5070_CTRL_##cat, idx) + +/* Display command categories (6bits) */ +#define NV5070_CTRL_RESERVED (0x00) +#define NV5070_CTRL_CHNCTL (0x01) +#define NV5070_CTRL_RG (0x02) +#define NV5070_CTRL_SEQ (0x03) +#define NV5070_CTRL_OR (0x04) +#define NV5070_CTRL_INST (0x05) +#define NV5070_CTRL_VERIF (0x06) +#define NV5070_CTRL_SYSTEM (0x07) +#define NV5070_CTRL_EVENT (0x09) + +/* + * NV5070_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV5070_CTRL_CMD_NULL (0x50700000) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + +// This struct must be the first member of all +// 5070 control calls +typedef struct NV5070_CTRL_CMD_BASE_PARAMS { + NvU32 subdeviceIndex; +} NV5070_CTRL_CMD_BASE_PARAMS; + +/* _ctrl5070base_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h new file mode 100644 index 0000000..1916d7f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h @@ -0,0 +1,1181 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070chnc.finn +// + +#include "ctrl/ctrl5070/ctrl5070base.h" +#include "ctrl5070common.h" +#include "nvdisptypes.h" + +#define NV5070_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD 2 + + + +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_NONE (0x00000000) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_PI (NVBIT(0)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_SKIP_NOTIF (NVBIT(1)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_SKIP_SEMA (NVBIT(2)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_INTERLOCK (NVBIT(3)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_FLIPLOCK (NVBIT(4)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_TRASH_ONLY (NVBIT(5)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_TRASH_AND_ABORT (NVBIT(6)) + +#define NV5070_CTRL_IDLE_CHANNEL_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV5070_CTRL_IDLE_CHANNEL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + + NvU32 desiredChannelStateMask; + NvU32 accelerators; // For future expansion. Not yet implemented + NvU32 timeout; // For future expansion. Not yet implemented + NvBool restoreDebugMode; +} NV5070_CTRL_IDLE_CHANNEL_PARAMS; + +/* + * NV5070_CTRL_CMD_STOP_OVERLAY + * + * This command tries to turn the overlay off ASAP. + * + * channelInstance + * This field indicates which of the two instances of the overlay + * channel the cmd is meant for. + * + * notifyMode + * This field indicates the action RM should take once the overlay has + * been successfully stopped. The options are (1) Set a notifier + * (2) Set the notifier and generate and OS event + * + * hNotifierCtxDma + * Handle to the ctx dma for the notifier that must be written once + * overlay is stopped. The standard NvNotification notifier structure + * is used. + * + * offset + * Offset within the notifier context dma where the notifier begins + * Offset must be 16 byte aligned. + * + * hEvent + * Handle to the event that RM must use to awaken the client when + * notifyMode is WRITE_AWAKEN. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT: Invalid notify mode + * NV_ERR_INVALID_CHANNEL: When the overlay is unallocated + * NV_ERR_INVALID_OWNER: Callee isn't the owner of the channel + * NV_ERR_INVALID_OBJECT_HANDLE: Notif ctx dma not found + * NV_ERR_INVALID_OFFSET: Bad offset within notif ctx dma + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_TIMEOUT: RM timedout waiting to inject methods + */ +#define NV5070_CTRL_CMD_STOP_OVERLAY (0x50700102) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_STOP_OVERLAY_NOTIFY_MODE_WRITE (0x00000000) +#define NV5070_CTRL_CMD_STOP_OVERLAY_NOTIFY_MODE_WRITE_AWAKEN (0x00000001) + +#define NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelInstance; + NvU32 notifyMode; + NvHandle hNotifierCtxDma; + NvU32 offset; + NV_DECLARE_ALIGNED(NvP64 hEvent, 8); +} NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS; + + + +/* + * NV5070_CTRL_CMD_IS_MODE_POSSIBLE + * + * This command is used by DD to determine whether or not a given mode + * is possible given the current nvclk, mclk, dispclk and potentially some + * other parameters that are normally hidden from it. All the parameters + * except IsPossible (output), Force422(output), MinPstate (input/output), + * minPerfLevel (output), CriticalWatermark (output), worstCaseMargin (output), + * and worstCaseDomain (output) params are supplied by the caller. + * + * HeadActive + * Whether or not the params for this head are relevant. + * + * PixelClock + * Frequency: Pixel clk frequency in KHz. + * Adj1000Div1001: 1000/1001 multiplier for pixel clock. + * + * RasterSize + * Width: Total width of the raster. Also referred to as HTotal. + * Height: Total height of the raster. Also referred to as VTotal. + * + * RasterBlankStart + * X: Start of horizontal blanking for the raster. + * Y: Start of vertical blanking for the raster. + * + * RasterBlankEnd + * X: End of horizontal blanking for the raster. + * Y: End of vertical blanking for the raster. + * + * RasterVertBlank2 + * YStart: Start of second blanking for second field for an + * interlaced raster. This field is irrelevant when raster is + * progressive. + * YEnd: End of second blanking for second field for an + * interlaced raster. This field is irrelevant when raster is + * progressive. + * + * Control + * RasterStructure: Whether the raster ir progressive or interlaced. + * + * OutputScaler + * VerticalTaps: Vertical scaler taps. + * HorizontalTaps: Horizontal scaler taps. + * Force422: Whether OutputScaler is operating in 422 mode or not. + * + * ViewportSizeOut + * Width: Width of output viewport. + * Height: Height of output viewport. + * Both the above fields are irrelevant for G80. + * + * ViewportSizeOutMin + * Width: Minimum possible/expected width of output viewport. + * Height: Minimum possible/expected height of output viewport. + * + * ViewportSizeIn + * Width: Width of input viewport. + * Height: Height of input viewport. + * + * Params + * Format: Core channel's pixel format. See the enumerants following + * the variable declaration for possible options. + * SuperSample: Whether to use X1AA or X4AA in core channel. + * This parameter is ignored for G80. + * + * BaseUsageBounds + * Usable: Whether or not the base channel is expected to be used. + * PixelDepth: Maximum pixel depth allowed in base channel. + * SuperSample: Whether or not X4AA is allowed in base channel. + * BaseLutUsage: Base LUT Size + * OutputLutUsage: Output LUT size + * + * OverlayUsageBounds + * Usable: Whether or not the overlay channel is expected to be used. + * PixelDepth: Maximum pixel depth allowed in overlay channel. + * OverlayLutUsage: Overlay LUT Size + * + * BaseLutLo + * Enable: Specifies Core Channel's Base LUT is enable or not. + * Mode: Specifies the LUT Mode. + * NeverYieldToBase: Specifies whether NEVER_YIELD_TO_BASE is enabled or not. + * + * OutputLutLo + * Enable: Specifies Core Channel's Output LUT is enable or not. + * Mode: Specifies the LUT Mode. + * NeverYieldToBase: Specifies whether NEVER_YIELD_TO_BASE is enabled or not. + * + * outputResourcePixelDepthBPP + * Specifies the output pixel depth with scaler mode. + * + * CriticalWatermark + * If MinPState is set to _NEED_MIN_PSTATE, this will return the critical + * watermark level at the minimum Pstate. Otherwise, this will return + * the critical watermark at the level that the IMP calculations are + * otherwise performed at. + * + * pixelReplicateMode + * Specifies the replication mode whether it is X2 or X4. Need to set the parameter + * to OFF if there is no pixel replication. + * + * numSSTLinks + * Number of Single Stream Transport links which will be used by the + * SOR. "0" means to use the number indicated by the most recent + * NV0073_CTRL_CMD_DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST call. + * + * RequestedOperation + * This parameter is used to determine whether + * 1. DD is simplying querying whether or not the specified mode is + * possible (REQUESTED_OPER = _QUERY) or + * 2. DD is about to set the specified mode and RM should make + * appropriate preparations to make the mode possible. DD should + * never pass in a mode that was never indicated by RM as possible + * when DD queried for the possibility of the mode. This + * corresponds to REQUESTED_OPER = _PRE_MODESET. + * 3. DD just finished setting the specified mode. RM can go ahead + * and make changes like lowering the perf level if desired. This + * corresponds to REQUESTED_OPER = _POST_MODESET. This parameter is + * useful when we are at a higher perf level in a mode that's not + * possible at a lower perf level and want to go to a mode that is + * possible even at a lower perf level. In such cases, lowering + * perf level before modeset is complete is dangerous as it will + * cause underflow. RM will wait until the end of modeset to lower + * the perf level. + * + * options + * Specifies a bitmask for options. + * NV5070_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN + * Tells IMP to calculate worstCaseMargin and worstCaseDomain. + * + * IsPossible + * This is the first OUT param for this call. It indicates whether + * or not the current mode is possible. + * + * MinPState + * MinPState is an IO (in/out) variable; it gives the minimum p-state + * value at which the mode is possible on a PStates 2.0 system if the + * parameter is initialized by the caller with _NEED_MIN_PSTATE. If + * _NEED_MIN_PSTATE is not specified, IMP query will just run at the + * max available perf level and return results for that pstate. + * + * If the minimum pstate is required, then MasterLockMode, + * MasterLockPin, SlaveLockMode, and SlaveLockPin must all be + * initialized. + * + * On a PStates 3.0 system, the return value for MinPState is + * undefined, but minPerfLevel can return the minimum IMP v-pstate. + * + * minPerfLevel + * On a PStates 3.0 system, minPerfLevel returns the minimum IMP + * v-pstate at which the mode is possible. On a PStates 2.0 system, + * minPerfLevel returns the minimum perf level at which the mode is + * possible. + * + * minPerfLevel is valid only if MinPState is initialized to + * _NEED_MIN_PSTATE. + * + * worstCaseMargin + * Returns the ratio of available bandwidth to required bandwidth, + * multiplied by NV5070_CTRL_IMP_MARGIN_MULTIPLIER. Available + * bandwidth is calculated in the worst case bandwidth domain, i.e., + * the domain with the least available margin. Bandwidth domains + * include the IMP-relevant clock domains, and possibly other virtual + * bandwidth domains such as AWP. + * + * Note that IMP checks additional parameters besides the bandwidth + * margins, but only the bandwidth margin is reported here, so it is + * possible for a mode to have a more restrictive domain that is not + * reflected in the reported margin result. + * + * This result is not guaranteed to be valid if the mode is not + * possible. + * + * Note also that the result is generally calculated for the highest + * pstate possible (usually P0). But if _NEED_MIN_PSTATE is specified + * with the MinPState parameter, the result will be calculated for the + * min possible pstate (or the highest possible pstate, if the mode is + * not possible). + * + * The result is valid only if + * NV5070_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN is set in + * "options". + * + * worstCaseDomain + * Returns a short text string naming the domain for the margin + * returned in "worstCaseMargin". See "worstCaseMargin" for more + * information. + * + * bUseCachedPerfState + * Indicates that RM should use cached values for the fastest + * available perf level (v-pstate for PStates 3.0 or pstate for + * PStates 2.0) and dispclk. This feature allows the query call to + * execute faster, and is intended to be used, for example, during + * mode enumeration, when many IMP query calls are made in close + * succession, and perf conditions are not expected to change between + * query calls. When IMP has not been queried recently, it is + * recommended to NOT use cached values, in case perf conditions have + * changed and the cached values no longer reflect the current + * conditions. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * + * Assumptions/Limitations: + * - If the caller sends any methods to alter the State Cache, before calling of + * the following functions: + * NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_QUERY_USE_SC + * NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_PRE_MODESET_USE_SC + * NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_POST_MODESET_USE_SC + * the caller must repeatedly issue NV5070_CTRL_CMD_GET_CHANNEL_INFO, and delay until the + * returned channelState is either: + * NV5070_CTRL_CMD_GET_CHANNEL_INFO_STATE_IDLE, + * NV5070_CTRL_CMD_GET_CHANNEL_INFO_STATE_WRTIDLE, or + * NV5070_CTRL_CMD_GET_CHANNEL_INFO_STATE_EMPTY. + * This ensures that all commands have reached the State Cache before RM reads + * them. + * + * + */ +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE (0x50700109) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_QUERY (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_PRE_MODESET (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_POST_MODESET (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_QUERY_USE_SC (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_PRE_MODESET_USE_SC (0x00000004) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_POST_MODESET_USE_SC (0x00000005) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_SUPERVISOR (0x00000007) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN (0x00000001) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_IS_POSSIBLE_NO (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_IS_POSSIBLE_YES (0x00000001) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_UNDEFINED (0x00000000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P0 (0x00000001) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P1 (0x00000002) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P2 (0x00000004) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P3 (0x00000008) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P8 (0x00000100) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P10 (0x00000400) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P12 (0x00001000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P15 (0x00008000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_MAX NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P15 +#define NV5070_CTRL_IS_MODE_POSSIBLE_NEED_MIN_PSTATE (0x10101010) +#define NV5070_CTRL_IS_MODE_POSSIBLE_NEED_MIN_PSTATE_DEFAULT (0x00000000) + +#define NV5070_CTRL_IMP_MARGIN_MULTIPLIER (0x00000400) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_HEAD_ACTIVE_NO (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_HEAD_ACTIVE_YES (0x00000001) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_DISPLAY_ID_SKIP_IMP_OUTPUT_CHECK (0xAAAAAAAA) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PIXEL_CLOCK_ADJ1000DIV1001_NO (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PIXEL_CLOCK_ADJ1000DIV1001_YES (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_INTERLACED (0x00000001) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_1 (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_2 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_3 (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_3_ADAPTIVE (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_5 (0x00000004) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_HORIZONTAL_TAPS_1 (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_HORIZONTAL_TAPS_2 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_HORIZONTAL_TAPS_8 (0x00000002) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_SCALER_FORCE422_MODE_DISABLE (0x00000000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_SCALER_FORCE422_MODE_ENABLE (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_I8 (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_VOID16 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_VOID32 (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8 (0x00000004) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A2B10G10R10 (0x00000005) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8B8G8R8 (0x00000006) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_R5G6B5 (0x00000007) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A1R5G5B5 (0x00000008) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_SUPER_SAMPLE_X1AA (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_SUPER_SAMPLE_X4AA (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_NO (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_YES (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_8 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_16 (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_32 (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_64 (0x00000004) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_SUPER_SAMPLE_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_SUPER_SAMPLE_X1AA (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_SUPER_SAMPLE_X4AA (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_NO (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_YES (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_16 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_32 (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_64 (0x00000003) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000004) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000005) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000006) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000007) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000004) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000005) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000006) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000007) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_X4 (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + struct { + NvU32 HeadActive; + struct { + NvU32 Frequency; + + NvU32 Adj1000Div1001; + } PixelClock; + + struct { + NvU32 Width; + NvU32 Height; + } RasterSize; + + struct { + NvU32 X; + NvU32 Y; + } RasterBlankStart; + + struct { + NvU32 X; + NvU32 Y; + } RasterBlankEnd; + + struct { + NvU32 YStart; + NvU32 YEnd; + } RasterVertBlank2; + + struct { + NvU32 Structure; +/* + * Note: For query calls, the lock modes and lock pins are used only if the min + * pstate is required (i.e., if MinPState is set to + * NV5070_CTRL_IS_MODE_POSSIBLE_NEED_MIN_PSTATE). + */ + NV_DISP_LOCK_MODE MasterLockMode; + NV_DISP_LOCK_PIN MasterLockPin; + NV_DISP_LOCK_MODE SlaveLockMode; + NV_DISP_LOCK_PIN SlaveLockPin; + } Control; + + struct { + NvU32 VerticalTaps; + NvU32 HorizontalTaps; + NvBool Force422; + } OutputScaler; + + struct { + NvU32 Width; + NvU32 Height; + } ViewportSizeOut; + + struct { + NvU32 Width; + NvU32 Height; + } ViewportSizeOutMin; + + struct { + NvU32 Width; + NvU32 Height; + } ViewportSizeOutMax; + + struct { + NvU32 Width; + NvU32 Height; + } ViewportSizeIn; + + struct { + NvU32 Format; + NvU32 SuperSample; + } Params; + + struct { + NvU32 Usable; + NvU32 PixelDepth; + NvU32 SuperSample; + NvU32 BaseLutUsage; + NvU32 OutputLutUsage; + } BaseUsageBounds; + + struct { + NvU32 Usable; + NvU32 PixelDepth; + NvU32 OverlayLutUsage; + } OverlayUsageBounds; + + struct { + NvBool Enable; + NvU32 Mode; + NvBool NeverYieldToBase; + } BaseLutLo; + + struct { + NvBool Enable; + NvU32 Mode; + NvBool NeverYieldToBase; + } OutputLutLo; + + NvU32 displayId[NV5070_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD]; + NvU32 outputResourcePixelDepthBPP; + + NvU32 CriticalWatermark; // in pixels + + } Head[NV5070_CTRL_CMD_MAX_HEADS]; + + struct { + NvU32 owner; + NvU32 protocol; + } Dac[NV5070_CTRL_CMD_MAX_DACS]; + + struct { +// +// owner field is deprecated. In the future, all client calls should set +// ownerMask and bUseSorOwnerMask. bUseSorOwnerMask must be set in order +// to use ownerMask. +// + NvU32 owner; + NvU32 ownerMask; // Head mask owned this sor + + NvU32 protocol; + NvU32 pixelReplicateMode; + + NvU8 numSSTLinks; + } Sor[NV5070_CTRL_CMD_MAX_SORS]; + + NvBool bUseSorOwnerMask; + + struct { + NvU32 owner; + NvU32 protocol; + } Pior[NV5070_CTRL_CMD_MAX_PIORS]; + + + NvU32 RequestedOperation; +// This argument is for VERIF and INTERNAL use only + NvU32 options; + NvU32 IsPossible; + NvU32 MinPState; + + NvU32 minPerfLevel; +// +// Below are the possible Output values for MinPState variable. +// Lower the p-state value higher the power consumption; if no p-states are defined on chip +// then it will return as zero. +// + +// +// Below are the possible input values for MinPstate Variable, by default it calculate +// mode is possible or not at max available p-state and return the same state in that variable. +// + NvU32 worstCaseMargin; + +// +// The calculated margin is multiplied by a constant, so that it can be +// represented as an integer with reasonable precision. "0x400" was chosen +// because it is a power of two, which might allow some compilers/CPUs to +// simplify the calculation by doing a shift instead of a multiply/divide. +// (And 0x400 is 1024, which is close to 1000, so that may simplify visual +// interpretation of the raw margin value.) +// + char worstCaseDomain[8]; + + NvBool bUseCachedPerfState; +} NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS; + + + +/* + * NV5070_CTRL_CMD_GET_CHANNEL_INFO + * + * This command returns the current channel state. + * + * channelClass + * This field indicates the hw class number (507A-507E) + * + * channelInstance + * This field indicates which of the two instances of the channel + * (in case there are two. ex: base, overlay etc) the cmd is meant for. + * Note that core channel has only one instance and the field should + * be set to 0 for core channel. + * + * channelState + * This field indicates the desired channel state in a mask form that + * is compatible with NV5070_CTRL_CMD_IDLE_CHANNEL. A mask format + * allows clients to check for one from a group of states. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * + * Display driver uses this call to ensure that all it's methods have + * propagated through hardware's internal fifo + * (NV5070_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING) before it calls + * RM to check whether or not the mode it set up in Assembly State Cache will + * be possible. Note that display driver can not use completion notifier in + * this case because completion notifier is associated with Update and Update + * will propagate the state from Assembly to Armed and when checking the + * possibility of a mode, display driver wouldn't want Armed state to be + * affected. + */ +#define NV5070_CTRL_CMD_GET_CHANNEL_INFO (0x5070010b) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_IDLE NV5070_CTRL_CMD_CHANNEL_STATE_IDLE +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_WRTIDLE NV5070_CTRL_CMD_CHANNEL_STATE_WRTIDLE +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_EMPTY NV5070_CTRL_CMD_CHANNEL_STATE_EMPTY +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_FLUSHED NV5070_CTRL_CMD_CHANNEL_STATE_FLUSHED +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_BUSY NV5070_CTRL_CMD_CHANNEL_STATE_BUSY +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_DEALLOC NV5070_CTRL_CMD_CHANNEL_STATE_DEALLOC +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_DEALLOC_LIMBO NV5070_CTRL_CMD_CHANNEL_STATE_DEALLOC_LIMBO +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_LIMBO1 NV5070_CTRL_CMD_CHANNEL_STATE_LIMBO1 +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_LIMBO2 NV5070_CTRL_CMD_CHANNEL_STATE_LIMBO2 +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_FCODEINIT NV5070_CTRL_CMD_CHANNEL_STATE_FCODEINIT +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_FCODE NV5070_CTRL_CMD_CHANNEL_STATE_FCODE +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_VBIOSINIT NV5070_CTRL_CMD_CHANNEL_STATE_VBIOSINIT +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_VBIOSOPER NV5070_CTRL_CMD_CHANNEL_STATE_VBIOSOPER +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_UNCONNECTED NV5070_CTRL_CMD_CHANNEL_STATE_UNCONNECTED +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_INITIALIZE NV5070_CTRL_CMD_CHANNEL_STATE_INITIALIZE +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_SHUTDOWN1 NV5070_CTRL_CMD_CHANNEL_STATE_SHUTDOWN1 +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_SHUTDOWN2 NV5070_CTRL_CMD_CHANNEL_STATE_SHUTDOWN2 +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING (NV5070_CTRL_GET_CHANNEL_INFO_STATE_EMPTY | NV5070_CTRL_GET_CHANNEL_INFO_STATE_WRTIDLE | NV5070_CTRL_GET_CHANNEL_INFO_STATE_IDLE) +#define NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + NvBool IsChannelInDebugMode; + + NvU32 channelState; +} NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS; + + + +/* + * NV5070_CTRL_CMD_SET_ACCL + * + * This command turns accelerators on and off. The use of this command + * should be restricted as it may have undesirable effects. It's + * purpose is to provide a mechanism for clients to use the + * accelerator bits to get into states that are either not detectable + * by the RM or may take longer to reach than we think is reasonable + * to wait in the RM. + * + * NV5070_CTRL_CMD_GET_ACCL + * + * This command queries the current state of the accelerators. + * + * channelClass + * This field indicates the hw class number (507A-507E) + * + * channelInstance + * This field indicates which of the two instances of the channel + * (in case there are two. ex: base, overlay etc) the cmd is meant for. + * Note that core channel has only one instance and the field should + * be set to 0 for core channel. + * + * accelerators + * Accelerators to be set in the SET_ACCEL command. Returns the + * currently set accelerators on the GET_ACCEL command. + * + * accelMask + * A mask to specify which accelerators to change with the + * SET_ACCEL command. This field does nothing in the GET_ACCEL + * command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_OWNER + * NV_ERR_GENERIC + * + */ + +#define NV5070_CTRL_CMD_SET_ACCL (0x5070010c) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_SET_ACCL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_ACCL (0x5070010d) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_GET_ACCL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_ACCL_NONE NV5070_CTRL_IDLE_CHANNEL_ACCL_NONE +#define NV5070_CTRL_ACCL_IGNORE_PI NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_PI +#define NV5070_CTRL_ACCL_SKIP_NOTIF NV5070_CTRL_IDLE_CHANNEL_ACCL_SKIP_NOTIF +#define NV5070_CTRL_ACCL_SKIP_SEMA NV5070_CTRL_IDLE_CHANNEL_ACCL_SKIP_SEMA +#define NV5070_CTRL_ACCL_IGNORE_INTERLOCK NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_INTERLOCK +#define NV5070_CTRL_ACCL_IGNORE_FLIPLOCK NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_FLIPLOCK +#define NV5070_CTRL_ACCL_TRASH_ONLY NV5070_CTRL_IDLE_CHANNEL_ACCL_TRASH_ONLY +#define NV5070_CTRL_ACCL_TRASH_AND_ABORT NV5070_CTRL_IDLE_CHANNEL_ACCL_TRASH_AND_ABORT +#define NV5070_CTRL_SET_ACCL_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV5070_CTRL_SET_ACCL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + + NvU32 accelerators; + NvU32 accelMask; +} NV5070_CTRL_SET_ACCL_PARAMS; +#define NV5070_CTRL_GET_ACCL_PARAMS_MESSAGE_ID (0xDU) + +typedef NV5070_CTRL_SET_ACCL_PARAMS NV5070_CTRL_GET_ACCL_PARAMS; + +/* + * NV5070_CTRL_CMD_STOP_BASE + * + * This command tries to turn the base channel off ASAP. + * + * channelInstance + * This field indicates which of the two instances of the base + * channel the cmd is meant for. + * + * notifyMode + * This field indicates the action RM should take once the base + * channel has been successfully stopped. The options are (1) Set a + * notifier (2) Set the notifier and generate and OS event + * + * hNotifierCtxDma + * Handle to the ctx dma for the notifier that must be written once + * base channel is stopped. The standard NvNotification notifier + * structure is used. + * + * offset + * Offset within the notifier context dma where the notifier begins + * Offset must be 16 byte aligned. + * + * hEvent + * Handle to the event that RM must use to awaken the client when + * notifyMode is WRITE_AWAKEN. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT: Invalid notify mode + * NV_ERR_INVALID_CHANNEL: When the overlay is unallocated + * NV_ERR_INVALID_OWNER: Callee isn't the owner of the channel + * NV_ERR_INVALID_OBJECT_HANDLE: Notif ctx dma not found + * NV_ERR_INVALID_OFFSET: Bad offset within notif ctx dma + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_TIMEOUT: RM timedout waiting to inject methods + */ +#define NV5070_CTRL_CMD_STOP_BASE (0x5070010e) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_STOP_BASE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_STOP_BASE_NOTIFY_MODE_WRITE (0x00000000) +#define NV5070_CTRL_CMD_STOP_BASE_NOTIFY_MODE_WRITE_AWAKEN (0x00000001) + +#define NV5070_CTRL_CMD_STOP_BASE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV5070_CTRL_CMD_STOP_BASE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelInstance; + NvU32 notifyMode; + NvHandle hNotifierCtxDma; + NvU32 offset; + NV_DECLARE_ALIGNED(NvP64 hEvent, 8); +} NV5070_CTRL_CMD_STOP_BASE_PARAMS; + + + +/* + * NV5070_CTRL_CMD_GET_PINSET_COUNT + * + * Get number of pinsets on this GPU. + * + * pinsetCount [out] + * Number of pinsets on this GPU is returned in this parameter. + * This count includes pinsets that are not connected. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_GET_PINSET_COUNT (0x50700115) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_GET_PINSET_COUNT_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_GET_PINSET_COUNT_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV5070_CTRL_GET_PINSET_COUNT_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 pinsetCount; +} NV5070_CTRL_GET_PINSET_COUNT_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_PINSET_PEER + * + * Retrieve the pinset/GPU that is connected to the specified pinset on + * this GPU. + * + * pinset [in] + * Pinset on this GPU for which peer info is to be returned must be + * specified in this parameter. + * + * peerGpuId [out] + * Instance of the GPU on the other side of the connection is + * returned in this parameter. + * + * peerPinset [out] + * Pinset on the other side of the connection is returned in this + * parameter. If there is no connection then the value is + * NV5070_CTRL_CMD_GET_PINSET_PEER_PEER_PINSET_NONE. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_GET_PINSET_PEER (0x50700116) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_GET_PINSET_PEER_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_PINSET_PEER_PEER_GPUINSTANCE_NONE (0xffffffff) + +#define NV5070_CTRL_CMD_GET_PINSET_PEER_PEER_PINSET_NONE (0xffffffff) + +#define NV5070_CTRL_GET_PINSET_PEER_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV5070_CTRL_GET_PINSET_PEER_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 pinset; + + NvU32 peerGpuInstance; + NvU32 peerPinset; +} NV5070_CTRL_GET_PINSET_PEER_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_RMFREE_FLAGS + * + * This command sets the flags for an upcoming call to RmFree(). + * After the RmFree() API runs successfully or not, the flags are cleared. + * + * flags + * This parameter holds the NV0000_CTRL_GPU_SET_RMFREE_FLAGS_* + * flags to be passed for the next RmFree() command only. + * The flags can be one of those: + * - NV0000_CTRL_GPU_SET_RMFREE_FLAGS_NONE: + * explicitly clears the flags + * - NV0000_CTRL_GPU_SET_RMFREE_FLAGS_FREE_PRESERVES_HW: + * instructs RmFree() to preserve the HW configuration. After + * RmFree() is run this flag is cleared. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_SET_RMFREE_FLAGS (0x50700117) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SET_RMFREE_FLAGS_NONE 0x00000000 +#define NV5070_CTRL_SET_RMFREE_FLAGS_PRESERVE_HW 0x00000001 +#define NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS_MESSAGE_ID (0x17U) + +typedef struct NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 flags; +} NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS; + + +/* + * NV5070_CTRL_CMD_IMP_SET_GET_PARAMETER + * + * This command allows to set or get certain IMP parameters. Change of + * values take effect on next modeset and is persistent across modesets + * until the driver is unloaded or user changes the override. + * + * index + * One of NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_XXX defines - + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IMP_ENABLE + * Only supports "get" operation. If FALSE, IMP is being bypassed and + * all Is Mode Possible queries are answered with "mode is possible" + * and registers normally set by IMP are not changed from their defaults. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED + * Should IMP consider using ASR. ASR won't be allowed unless it is set to + * "allowed" through both _IS_ASR_ALLOWED and _IS_ASR_ALLOWED_PER_PSTATE. + * Note that IMP will not run ASR and MSCG at the same time. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED_PER_PSTATE + * Should IMP consider using ASR when this pstate is being used. ASR won't + * be allowed unless it is set to "allowed" through both + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED and + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED_PER_PSTATE. + * So when NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED + * returns FALSE, IMP won't consider ASR for any p-state. Note that IMP + * will not run ASR and MSCG at the same time. This function is valid + * only on PStates 2.0 systems. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_MSCG_ALLOWED_PER_PSTATE + * Should IMP consider using MSCG when this pstate is being used. MSCG + * won't be allowed if the MSCG feature isn't enabled even if we set to + * "allowed" through + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_MSCG_ALLOWED_PER_PSTATE. + * Use NV2080_CTRL_CMD_MC_QUERY_POWERGATING_PARAMETER to query if MSCG is + * supported and enabled. Note that IMP will not run ASR and MSCG at the + * same time. This function is valid only on PStates 2.0 systems. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PER_PSTATE + * Only supports "get" operation. Returns which stutter feature is being + * engaged in hardware when running on the given pstate. Valid values are: + * NV5070_CTRL_IMP_STUTTER_FEATURE_NONE + * This value indicates no stutter feature is enabled. + * NV5070_CTRL_IMP_STUTTER_FEATURE_ASR + * This value indicates ASR is the current enabled stutter feature. + * NV5070_CTRL_IMP_STUTTER_FEATURE_MSCG + * This value indicates MSCG is the current enabled stutter feature. + * Note that system will not run ASR and MSCG at the same time. This + * function is valid only on PStates 2.0 systems. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PREDICTED_EFFICIENCY_PER_PSTATE + * Only supports "get" operation. Returns the efficiency which IMP + * predicted for the engaged stutter feature (ASR or MSCG) when running + * on the given pstate. Normally, the actual efficiency should be higher + * than the calculated predicted efficiency. For MSCG, the predicted + * efficiency assumes no mempool compression. If compression is enabled + * with MSCG, the actual efficiency may be significantly higher. Returns + * 0 if no stutter feature is running. On PStates 3.0 systems, the + * pstateApi parameter is ignored, and the result is returned for the min + * IMP v-pstate possible. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS + * Only supports "get" operation. Returns information about what the possible + * mclk switch is. Valid fields are: + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_POSSIBLE + * This field is not head-specific and indicates if mclk switch is + * possible with the current mode. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_OVERRIDE_MEMPOOL + * This field is not head-specific and indicates if mclk switch is + * possible with the nominal mempool settings (_NO) or if special + * settings are required in order for mclk switch to be possible (_YES). + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_MID_WATERMARK + * Each head has its own setting for this field. If this field is + * set to _YES, then the specified head will allow mclk switch to + * begin if mempool occupancy exceeds the MID_WATERMARK setting. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_DWCF + * Each head has its own setting for this field. If this field is + * set to _YES, then the specified head will allow mclk switch to + * begin if the head is in its DWCF interval, and the mempool + * occupancy is greater than or equal to the DWCF watermark. + * Note: If neither _MID_WATERMARK nor _DWCF is set to _YES, then the + * specified head is ignored when determining when it is OK to start an + * mclk switch. Mclk switch must be allowed (or ignored) by all heads + * before an mclk switch will actually begin. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_FORCE_MIN_MEMPOOL + * Should min mempool be forced. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MEMPOOL_COMPRESSION + * Should mempool compression be enabled. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_CURSOR_SIZE + * The cursor size (in horizontal pixels) used by IMP (rather than the + * actual cursor size) for its computation. + * A maximum value is in place for what can be set. It can be queried + * after resetting the value - it gets reset to the maximum possible + * value. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_ENABLE + * This is to Enable/Disable ISO FB Latency Test. + * The test records the max ISO FB latency for all heads during the test period (excluding modeset time). + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_WC_TOTAL_LATENCY + * This is used to retrieve calculated wcTotalLatency of ISO FB Latency Test. + * wcTotalLatency is the worst case time for a request's data to come back after the request is issued. + * It is the sum of IMP calculated FbLatency and stream delay. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_MAX_LATENCY + * This is used to retrieve the max latency among all heads during the whole ISO FB Latency Test. + * The max latency can be used to compare with the wcTotalLatency we calculated. + * It decides whether the ISO FB Latency Test is passed or not. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_MAX_TEST_PERIOD + * This is used to retrieve the max test period during the whole ISO FB Latency Test. + * By experimental result, the test period should be at least 10 secs to approximate the + * worst case Fb latency in real situation. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_GLITCHLESS_MODESET_ENABLE + * This enables or disables glitchless modesets. Modesets can be + * glitchless if: + * (1) There are no raster timing changes, and + * (2) The resource requirements of all bandwidth clients are either not + * changing, or they are all changing in the same direction (all + * increasing or all decreasing). + * If glitchless modeset is disabled, or is not possible, heads will be + * blanked during the modeset transition. + * pstateApi + * NV2080_CTRL_PERF_PSTATES_PXXX value. + * Required for NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED_PER_PSTATE, + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_MSCG_ALLOWED_PER_PSTATE, + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PER_PSTATE and + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PREDICTED_EFFICIENCY_PER_PSTATE + * on PStates 2.0 systems. For other indices must be + * NV2080_CTRL_PERF_PSTATES_UNDEFINED. Not used on PStates 3.0 systems. + * head + * Head index, which is required when querying Mclk switch feature. + * (index = NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS) + * operation + * NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_GET + * Indicates a "get" operation. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_SET + * Indicates a "set" operation. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_RESET + * Indicates a "reset" operation. This operation will reset the values for + * all indices to their RM defaults. + * value + * Value for new setting of a "set" operation, or the returned value of a + * "get" operation; for enable/disable operations, "enable" is non-zero, + * and "disable" is zero. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + * NV_ERR_INVALID_INDEX specified index is not supported + * NV_ERR_INSUFFICIENT_RESOURCES cannot handle any more overrides + * NV_ERR_INVALID_OBJECT the struct needed to get the specified information + * is not marked as valid + * NV_ERR_INVALID_STATE the parameter has been set but resetting will + * not be possible + */ +#define NV5070_CTRL_CMD_IMP_SET_GET_PARAMETER (0x50700118) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 index; + NvU32 pstateApi; + NvU32 head; + NvU32 operation; + NvU32 value; +} NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS; + +/* valid operation values */ +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_GET 0 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_SET 1 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_RESET 2 + +/* valid index value */ +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_NONE (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IMP_ENABLE (0x00000001) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED (0x00000002) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED_PER_PSTATE (0x00000003) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_MSCG_ALLOWED_PER_PSTATE (0x00000004) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PER_PSTATE (0x00000005) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PREDICTED_EFFICIENCY_PER_PSTATE (0x00000006) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS (0x00000007) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_FORCE_MIN_MEMPOOL (0x00000008) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MEMPOOL_COMPRESSION (0x00000009) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_CURSOR_SIZE (0x0000000A) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_ENABLE (0x0000000B) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_WC_TOTAL_LATENCY (0x0000000C) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_MAX_LATENCY (0x0000000D) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_MAX_TEST_PERIOD (0x0000000E) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_GLITCHLESS_MODESET_ENABLE (0x0000000F) + +/* valid NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOHUB_STUTTER_FEATURE values */ +#define NV5070_CTRL_IMP_STUTTER_FEATURE_NONE 0 +#define NV5070_CTRL_IMP_STUTTER_FEATURE_ASR 1 +#define NV5070_CTRL_IMP_STUTTER_FEATURE_MSCG 2 + +/* valid NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE values */ +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_POSSIBLE 0:0 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_POSSIBLE_NO (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_POSSIBLE_YES (0x00000001) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_OVERRIDE_MEMPOOL 1:1 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_OVERRIDE_MEMPOOL_NO (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_OVERRIDE_MEMPOOL_YES (0x00000001) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_MID_WATERMARK 2:2 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_MID_WATERMARK_NO (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_MID_WATERMARK_YES (0x00000001) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_DWCF 3:3 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_DWCF_NO (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_DWCF_YES (0x00000001) + +/* + * NV5070_CTRL_CMD_SET_MEMPOOL_WAR_FOR_BLIT_TEARING + * + * This command engages the WAR for blit tearing caused by huge mempool size and + * mempool compression. The EVR in aero off mode uses scanline info to predict + * where the scanline will be at a later time. Since RG scanline is used to perform + * front buffer blits and isohub buffers large amount of display data it may have + * fetched several lines of data ahead of where the RG is scanning out leading to + * video tearing. The WAR for this problem is to reduce the amount of data fetched. + * + * base + * This struct must be the first member of all 5070 control calls containing + * the subdeviceIndex. + * bEngageWAR + * Indicates if mempool WAR has to be engaged or disengaged. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV5070_CTRL_CMD_SET_MEMPOOL_WAR_FOR_BLIT_TEARING (0x50700119) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvBool bEngageWAR; +} NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS; +typedef struct NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS *PNV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS; + +#define NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE (0x50700120) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + NvU32 activeViewportBase; +} NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS; +typedef struct NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS *PNV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS; + +/* _ctrl5070chnc_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h new file mode 100644 index 0000000..bbfa794 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070common.finn +// + + + +#define NV5070_CTRL_CMD_CHANNEL_STATE_IDLE NVBIT(0) +#define NV5070_CTRL_CMD_CHANNEL_STATE_WRTIDLE NVBIT(1) +#define NV5070_CTRL_CMD_CHANNEL_STATE_QUIESCENT1 NVBIT(2) +#define NV5070_CTRL_CMD_CHANNEL_STATE_QUIESCENT2 NVBIT(3) +#define NV5070_CTRL_CMD_CHANNEL_STATE_EMPTY NVBIT(4) +#define NV5070_CTRL_CMD_CHANNEL_STATE_FLUSHED NVBIT(5) +#define NV5070_CTRL_CMD_CHANNEL_STATE_BUSY NVBIT(6) +#define NV5070_CTRL_CMD_CHANNEL_STATE_DEALLOC NVBIT(7) +#define NV5070_CTRL_CMD_CHANNEL_STATE_DEALLOC_LIMBO NVBIT(8) +#define NV5070_CTRL_CMD_CHANNEL_STATE_LIMBO1 NVBIT(9) +#define NV5070_CTRL_CMD_CHANNEL_STATE_LIMBO2 NVBIT(10) +#define NV5070_CTRL_CMD_CHANNEL_STATE_FCODEINIT NVBIT(11) +#define NV5070_CTRL_CMD_CHANNEL_STATE_FCODE NVBIT(12) +#define NV5070_CTRL_CMD_CHANNEL_STATE_VBIOSINIT NVBIT(13) +#define NV5070_CTRL_CMD_CHANNEL_STATE_VBIOSOPER NVBIT(14) +#define NV5070_CTRL_CMD_CHANNEL_STATE_UNCONNECTED NVBIT(15) +#define NV5070_CTRL_CMD_CHANNEL_STATE_INITIALIZE NVBIT(16) +#define NV5070_CTRL_CMD_CHANNEL_STATE_SHUTDOWN1 NVBIT(17) +#define NV5070_CTRL_CMD_CHANNEL_STATE_SHUTDOWN2 NVBIT(18) +#define NV5070_CTRL_CMD_CHANNEL_STATE_INIT NVBIT(19) + +#define NV5070_CTRL_CMD_MAX_HEADS 4U +#define NV5070_CTRL_CMD_MAX_DACS 4U +#define NV5070_CTRL_CMD_MAX_SORS 8U +#define NV5070_CTRL_CMD_MAX_PIORS 4U + +#define NV5070_CTRL_CMD_OR_OWNER_NONE (0xFFFFFFFFU) +#define NV5070_CTRL_CMD_OR_OWNER_HEAD(i) (i) +#define NV5070_CTRL_CMD_OR_OWNER_HEAD__SIZE_1 NV5070_CTRL_CMD_MAX_HEADS + +#define NV5070_CTRL_CMD_SOR_OWNER_MASK_NONE (0x00000000U) +#define NV5070_CTRL_CMD_SOR_OWNER_MASK_HEAD(i) (1 << i) + +#define NV5070_CTRL_CMD_DAC_PROTOCOL_RGB_CRT (0x00000000U) + + + +#define NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_A (0x00000000U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_B (0x00000001U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_DUAL_TMDS (0x00000002U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_LVDS_CUSTOM (0x00000003U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_DP_A (0x00000004U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_DP_B (0x00000005U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_SUPPORTED (0xFFFFFFFFU) + +#define NV5070_CTRL_CMD_PIOR_PROTOCOL_EXT_TMDS_ENC (0x00000000U) + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h new file mode 100644 index 0000000..5154638 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h @@ -0,0 +1,143 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070event.finn +// + +#include "ctrl/ctrl5070/ctrl5070base.h" + +/* NV50_DISPLAY event-related control commands and parameters */ + +/* + * NV5070_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * This command sets event notification state for the NV50_DISPLAY object. + * This command requires that an instance of NV01_EVENT has been previously + * bound to the NV50_DISPLAY object. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the NV50_DISPLAY + * parent device to which the operation should be directed. This parameter + * must specify a value between zero and the total number of subdevices + * within the parent device. This parameter should be set to zero for + * default behavior. + * hEvent + * This parameter specifies the handle of the NV01_EVENT instance + * to be bound to the given subDeviceInstance. + * event + * This parameter specifies the type of event to which the specified + * action is to be applied. This parameter must specify a valid + * NOTIFIERS value of display class. + * action + * This parameter specifies the desired event notification action. + * Valid notification actions include: + * NV5070_CTRL_SET_EVENT_NOTIFICATION_DISABLE + * This action disables event notification for the specified + * event for the associated subdevice object. + * NV5070_CTRL_SET_EVENT_NOTIFICATION_SINGLE + * This action enables single-shot event notification for the + * specified event for the associated subdevice object. + * NV5070_CTRL_SET_EVENT_NOTIFICATION_REPEAT + * This action enables repeated event notification for the specified + * event for the associated system controller object. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV5070_CTRL_CMD_EVENT_SET_NOTIFICATION (0x50700901) /* finn: Evaluated from "(FINN_NV50_DISPLAY_EVENT_INTERFACE_ID << 8) | NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 subDeviceInstance; + NvHandle hEvent; + NvU32 event; + NvU32 action; +} NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001) +#define NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + + +/* + * NV5070_CTRL_CMD_EVENT_SET_TRIGGER + * + * This command triggers a software event for the NV50_DISPLAY object. + * This command accepts no parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV5070_CTRL_CMD_EVENT_SET_TRIGGER (0x50700902) /* finn: Evaluated from "(FINN_NV50_DISPLAY_EVENT_INTERFACE_ID << 8) | 0x2" */ + + +/* + * NV5070_CTRL_CMD_EVENT_SET_NOTIFIER_MEMORY + * + * hMemory + * This parameter specifies the handle of the memory object + * that identifies the memory address translation for this + * subdevice instance's notification(s). The beginning of the + * translation points to an array of notification data structures. + * The size of the translation must be at least large enough to hold the + * maximum number of notification data structures. + * Legal argument values must be instances of the following classes: + * NV01_NULL + * NV04_MEMORY + * When hMemory specifies the NV01_NULL_OBJECT value then any existing + * memory translation connection is cleared. There must not be any + * pending notifications when this command is issued. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV5070_CTRL_CMD_EVENT_SET_MEMORY_NOTIFIES (0x50700903) /* finn: Evaluated from "(FINN_NV50_DISPLAY_EVENT_INTERFACE_ID << 8) | NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS { + NvU32 subDeviceInstance; + NvHandle hMemory; +} NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS; + +#define NV5070_EVENT_MEMORY_NOTIFIES_STATUS_NOTIFIED 0 +#define NV5070_EVENT_MEMORY_NOTIFIES_STATUS_PENDING 1 +#define NV5070_EVENT_MEMORY_NOTIFIES_STATUS_ERROR 2 + + + +/* _ctrl5070event_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h new file mode 100644 index 0000000..11b79b2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070impoverrides.finn +// + + + + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h new file mode 100644 index 0000000..0052168 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h @@ -0,0 +1,936 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070or.finn +// + +#include "ctrl5070common.h" +#include "ctrl/ctrl5070/ctrl5070base.h" + + + +/* + * NV5070_CTRL_CMD_SET_DAC_PWR + * + * This command sets the DAC power control register. orNumber, normalPower, + * and safePower will always have to be specified. However, HSync, VSync, + * and data for normal and/or safe mode can be empty, leaving the current + * values intact. + * + * orNumber + * The dac for which the settings need to be programmed. + * + * normalHSync + * The normal operating state for the H sync signal. + * + * normalVSync + * The normal operating state for the V sync signal. + * + * normalData + * The normal video data input pin of the d/a converter. + * + * normalPower + * The normal state of the dac macro power. + * + * safeHSync + * The safe operating state for the H sync signal. + * + * safeVSync + * The safe operating state for the V sync signal. + * + * safeData + * The safe video data input pin of the d/a converter. + * + * safePower + * The safe state of the dac macro power. + * + * flags + * The following flags have been defined: + * (1) SPECIFIED_NORMAL: Indicates whether HSync, VSync, data, + * for normal state have been specified in the parameters. + * (2) SPECIFIED_SAFE: Indicates whether HSync, VSync, data, + * for safe state have been specified in the parameters. + * (3) SPECIFIED_FORCE_SWITCH: Indicates whether to force the + * change immediately instead of waiting for VSync + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * NV_ERR_TIMEOUT + */ +#define NV5070_CTRL_CMD_SET_DAC_PWR (0x50700404) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_HSYNC 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_HSYNC_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_HSYNC_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_HSYNC_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_VSYNC 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_VSYNC_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_VSYNC_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_VSYNC_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_DATA 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_DATA_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_DATA_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_DATA_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_PWR 0:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_PWR_OFF (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_PWR_ON (0x00000001) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_HSYNC 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_HSYNC_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_HSYNC_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_HSYNC_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_VSYNC 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_VSYNC_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_VSYNC_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_VSYNC_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_DATA 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_DATA_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_DATA_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_DATA_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_PWR 0:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_PWR_OFF (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_PWR_ON (0x00000001) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_NORMAL 0:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_NORMAL_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_NORMAL_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_SAFE 1:1 +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_SAFE_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_SAFE_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_FORCE_SWITCH 2:2 +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_FORCE_SWITCH_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_FORCE_SWITCH_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 normalHSync; + NvU32 normalVSync; + NvU32 normalData; + NvU32 normalPower; + NvU32 safeHSync; + NvU32 safeVSync; + NvU32 safeData; + NvU32 safePower; + NvU32 flags; +} NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS; + + + +/* + * NV5070_CTRL_CMD_GET_SOR_PWM + * + * This command returns SOR's current PWM settings. + * + * orNumber + * The OR number for which the seq ctrls are to be modified. + * + * targetFreq + * The target PWM freq. This is the PWM frequency we planned on + * programming. + * + * actualFreq + * Actual PWM freq programmed into PWM. + * + * div + * The divider being used currently for generating PWM clk. + * A valued of 0 means that PWM is disabled. + * + * resolution + * The resolution of steps currently programmed or the max number of + * clocks per cycle. The possible values for NV50 are 128, 256, 512 + * and 1024. This field is irrelevant when div is 0. + * + * dutyCycle + * Duty cycle in range 0-1024 + * + * sourcePCLK (OUT) + * The PWM source clock selector. This field is non-zero if the PCLK + * is selected as the PWM source clock. Otherwise, the PWM source + * clock is XTAL. + * + * head (IN) + * The head for which the pixel clock is sourced from. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_SOR_PWM (0x50700420) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + NvU32 targetFreq; + NvU32 actualFreq; + NvU32 div; + NvU32 resolution; + NvU32 dutyCycle; + NvU32 sourcePCLK; + NvU32 head; +} NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS; + + +/* + * NV5070_CTRL_CMD_SET_SOR_PWM + * + * This command returns SOR's current PWM settings. + * + * orNumber + * The OR number for which the seq ctrls are to be modified. + * + * targetFreq + * The target PWM freq to be programmed. + * + * actualFreq + * Actual PWM freq programmed into PWM after all the specified + * settings have been applied. + * + * div + * The divider to use for generating PWM clk. + * Set this to 0 to disable PWM. Note that only one of div + * or targetFreq can be specified at a time since specifying one + * automatically determines the value of the other. Selection is + * done via USE_SPECIFIED_DIV flag. + * + * resolution + * The resolution or the max number of clocks per cycle desired. + * Note that if it's not possible to program the given resolution + * and frequency (or div) combination, RM would not attempt to + * smartly lower the resolution. The call would return failure. + * The possible values for NV50 are 128, 256, 512 and 1024. This + * field is irrelevant when div is 0. + * + * dutyCycle + * Duty cycle in range 0-1024 + * + * flags + * The following flags have been defined: + * (1) USE_SPECIFIED_DIV: Indicates whether RM should use + * specified div or targetFreq when determining the divider + * for xtal clock. + * (2) PROG_DUTY_CYCLE: Indicates whether or not the caller + * desires to program duty cycle. Normally whenever pwm freq + * and range need to be programmed, it's expected that duty + * cycle would be reprogrammed as well but this is not + * enforced. + * (3) PROG_FREQ_AND_RANGE: Indicates whether or not the caller + * desires to program a new PWM setting (div and resolution). + * (4) SOURCE_CLOCK: Indicates whether the PCLK or XTAL is used + * as the PWM clock source. GT21x and better. + * + * head (IN) + * The head for which the pixel clock is sourced from. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_SOR_PWM (0x50700421) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_USE_SPECIFIED_DIV 0:0 +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_USE_SPECIFIED_DIV_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_USE_SPECIFIED_DIV_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_PROG_DUTY_CYCLE 1:1 +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_PROG_DUTY_CYCLE_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_PROG_DUTY_CYCLE_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_PROG_FREQ_AND_RANGE 2:2 +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_PROG_FREQ_AND_RANGE_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_PROG_FREQ_AND_RANGE_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_SOURCE_CLOCK 3:3 +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_SOURCE_CLOCK_XTAL (0x00000000) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_SOURCE_CLOCK_PCLK (0x00000001) + +#define NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + NvU32 targetFreq; + NvU32 actualFreq; + NvU32 div; // equivalent of NV_PDISP_SOR_PWM_DIV_DIVIDE + NvU32 resolution; // equivalent of NV_PDISP_SOR_PWM_DIV_RANGE + NvU32 dutyCycle; + NvU32 flags; + NvU32 head; +} NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS; + + +/* + * NV5070_CTRL_CMD_GET_SOR_OP_MODE + * + * This command returns current settings for the specified SOR. + * + * orNumber + * The OR number for which the operating mode needs to be read. + * + * category + * Whether LVDS or CSTM setting are desired. + * + * puTxda + * Status of data pins of link A + * + * puTxdb + * Status of data pins of link B + * + * puTxca + * Status of link A clock + * + * puTxcb + * Status of link B clock + * + * upper + * Whether LVDS bank A is the upper, odd, or first pixel. + * + * mode + * Current protocol. + * + * linkActA + * Status of link B clock + * + * linkActB + * Status of link B clock + * + * lvdsEn + * Output driver configuration. + * + * lvdsDual + * Whether LVDS dual-link mode is turned on or not. + * + * dupSync + * Whether DE, HSYNC, and VSYNC are used for encoding instead of + * RES, CNTLE, and CNTLF. + * + * newMode + * Whether new or old mode is being used. + * + * balanced + * Whether balanced encoding is enabled. + * + * plldiv + * Feedback divider for the hi-speed pll + * + * rotClk + * Skew of TXC clock. + * + * rotDat + * How much are the 8 bits of each color channel rotated by + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE (0x50700422) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_CATEGORY 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_CATEGORY_LVDS 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_CATEGORY_CUSTOM 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_0 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_0_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_0_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_1 1:1 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_1_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_1_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_2 2:2 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_2_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_2_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_3 3:3 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_3_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_3_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_0 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_0_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_0_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_1 1:1 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_1_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_1_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_2 2:2 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_2_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_2_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_3 3:3 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_3_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_3_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCA 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCA_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCA_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCB 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCB_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCB_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_UPPER 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_UPPER_UPPER_RESET 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_MODE 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_MODE_LVDS 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_MODE_TMDS 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTA 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTA_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTA_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTB 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTB_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTB_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_EN 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_EN_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_EN_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_DUAL 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_DUAL_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_DUAL_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_DUP_SYNC 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_DUP_SYNC_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_DUP_SYNC_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_NEW_MODE 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_NEW_MODE_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_NEW_MODE_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_BALANCED 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_BALANCED_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_BALANCED_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PLLDIV 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PLLDIV_BY_7 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PLLDIV_BY_10 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_ROTCLK 3:0 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_ROTDAT 2:0 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 category; + NvU32 puTxda; + NvU32 puTxdb; + NvU32 puTxca; + NvU32 puTxcb; + NvU32 upper; + NvU32 mode; + NvU32 linkActA; + NvU32 linkActB; + NvU32 lvdsEn; + NvU32 lvdsDual; + NvU32 dupSync; + NvU32 newMode; + NvU32 balanced; + NvU32 plldiv; + NvU32 rotClk; + NvU32 rotDat; +} NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS; + + +/* + * NV5070_CTRL_CMD_SET_SOR_OP_MODE + * + * This command applies the specified settings to the specified SOR. + * + * orNumber + * The OR number for which the operating mode needs to be read. + * Note that if DCB doesn't report LVDS for the specified orNumber, + * the call will return failure. + * + * category + * Whether LVDS or CSTM settings are specified. + * + * puTxda + * Used to enable or disable the data pins of link A. + * + * puTxdb + * Used to enable or disable the data pins of link B. + * + * puTxca + * Used to enable or disable link A clock. + * + * puTxcb + * Used to enable or disable link B clock. + * + * upper + * Whether LVDS bank A should be the upper, odd, or first pixel. + * + * mode + * What protocol (LVDS/TMDS to use). + * + * linkActA + * Used to enable or disable the digital logic of link A. + * + * linkActB + * Used to enable or disable the digital logic of link B. + * + * lvdsEn + * Output driver configuration. + * + * lvdsDual + * Whether to turn on LVDS dual-link mode. + * + * dupSync + * Whether to use DE, HSYNC, and VSYNC for encoding instead of + * RES, CNTLE, and CNTLF. + * + * newMode + * Whether to use new or old mode. + * + * balanced + * Whether or not to use balanced encoding. + * + * plldiv + * Feedback divider to use for the hi-speed pll. + * + * rotClk + * How much to skew TXC clock. + * + * rotDat + * How much to rotate the 8 bits of each color channel by. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE (0x50700423) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_CATEGORY 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_CATEGORY_LVDS 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_CATEGORY_CUSTOM 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_0 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_0_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_0_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_1 1:1 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_1_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_1_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_2 2:2 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_2_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_2_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_3 3:3 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_3_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_3_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_0 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_0_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_0_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_1 1:1 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_1_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_1_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_2 2:2 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_2_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_2_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_3 3:3 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_3_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_3_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCA 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCA_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCA_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCB 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCB_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCB_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_UPPER 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_UPPER_UPPER_RESET 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_MODE 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_MODE_LVDS 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_MODE_TMDS 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTA 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTA_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTA_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTB 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTB_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTB_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_EN 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_EN_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_EN_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_DUAL 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_DUAL_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_DUAL_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_DUP_SYNC 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_DUP_SYNC_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_DUP_SYNC_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_NEW_MODE 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_NEW_MODE_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_NEW_MODE_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_BALANCED 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_BALANCED_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_BALANCED_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PLLDIV 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PLLDIV_BY_7 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PLLDIV_BY_10 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_ROTCLK 3:0 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_ROTDAT 2:0 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 category; + NvU32 puTxda; + NvU32 puTxdb; + NvU32 puTxca; + NvU32 puTxcb; + NvU32 upper; + NvU32 mode; + NvU32 linkActA; + NvU32 linkActB; + NvU32 lvdsEn; + NvU32 lvdsDual; + NvU32 dupSync; + NvU32 newMode; + NvU32 balanced; + NvU32 plldiv; + NvU32 rotClk; + NvU32 rotDat; +} NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_PIOR_OP_MODE + * + * This command returns current settings for the specified PIOR. + * + * orNumber + * The OR number for which the operating mode needs to be programmed. + * + * category + * Whether ext TMDS, TV, DRO or DRI settings are desired. + * EXT TV is not supported at the moment. + * EXT DisplayPort is specified through EXT 10BPC 444. + * + * clkPolarity + * Whether or not output clock is inverted relative to generated clock. + * + * clkMode + * Whether data being transmitted is SDR or DDR. + * + * clkPhs + * Position of the edge on which data is launched. + * + * unusedPins + * Status of unused pins of this PIOR. + * + * polarity + * Whether or not sync and DE pin polarities are inverted. + * + * dataMuxing + * How are the bits are multiplexed together. + * + * clkDelay + * Extra delay for the clock. + * + * dataDelay + * Extra delay for the data. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE (0x50700430) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_PIOR_OP_MODE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CATEGORY 2:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CATEGORY_EXT_TMDS 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CATEGORY_EXT_TV 0x00000001 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CATEGORY_DRO 0x00000003 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CATEGORY_DRI 0x00000004 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CATEGORY_EXT_10BPC_444 0x00000005 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_POLARITY 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_POLARITY_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_POLARITY_INV 0x00000001 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_MODE 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_MODE_SDR 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_MODE_DDR 0x00000001 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_PHS 1:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_PHS_0 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_PHS_1 0x00000001 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_PHS_2 0x00000002 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_PHS_3 0x00000003 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_UNUSED_PINS 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_UNUSED_PINS_LO 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_UNUSED_PINS_TS 0x00000001 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_H 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_H_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_H_INV 0x00000001 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_V 1:1 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_V_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_V_INV 0x00000001 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_DE 2:2 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_DE_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_DE_INV 0x00000001 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_MUXING 3:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_MUXING_RGB_0 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_MUXING_RGB_1 0x00000001 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_MUXING_DIST_RNDR 0x00000003 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_MUXING_YUV_0 0x00000004 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_MUXING_UYVY 0x00000005 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_DLY 2:0 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_DLY 2:0 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV5070_CTRL_CMD_GET_PIOR_OP_MODE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 category; + NvU32 clkPolarity; + NvU32 clkMode; + NvU32 clkPhs; + NvU32 unusedPins; + NvU32 polarity; + NvU32 dataMuxing; + NvU32 clkDelay; + NvU32 dataDelay; +} NV5070_CTRL_CMD_GET_PIOR_OP_MODE_PARAMS; + + +/* + * NV5070_CTRL_CMD_SET_PIOR_OP_MODE + * + * This command applies the specified settings to the specified PIOR. + * + * orNumber + * The OR number for which the operating mode needs to be programmed. + * + * category + * Whether ext TMDS, TV, DRO or DRI settings are to be programmed. + * EXT TV is not supported at the moment. + * EXT DisplayPort is specified through EXT 10BPC 444. + * + * clkPolarity + * Whether or not to invert output clock relative to generated clock. + * + * clkMode + * Whether data being transmitted should be SDR or DDR. + * + * clkPhs + * Position of the edge on which data should be launched. + * + * unusedPins + * What to do with unused pins of this PIOR. + * + * polarity + * Whether or not to invert sync and DE pin polarities. + * + * dataMuxing + * How to multiplex the bits together. + * + * clkDelay + * Extra delay for the clock. + * + * dataDelay + * Extra delay for the data. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE (0x50700431) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_PIOR_OP_MODE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CATEGORY 2:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CATEGORY_EXT_TMDS 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CATEGORY_EXT_TV 0x00000001 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CATEGORY_DRO 0x00000003 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CATEGORY_DRI 0x00000004 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CATEGORY_EXT_10BPC_444 0x00000005 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_POLARITY 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_POLARITY_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_POLARITY_INV 0x00000001 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_MODE 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_MODE_SDR 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_MODE_DDR 0x00000001 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_PHS 1:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_PHS_0 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_PHS_1 0x00000001 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_PHS_2 0x00000002 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_PHS_3 0x00000003 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_UNUSED_PINS 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_UNUSED_PINS_LO 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_UNUSED_PINS_TS 0x00000001 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_H 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_H_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_H_INV 0x00000001 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_V 1:1 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_V_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_V_INV 0x00000001 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_DE 2:2 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_DE_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_DE_INV 0x00000001 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_MUXING 3:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_MUXING_RGB_0 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_MUXING_RGB_1 0x00000001 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_MUXING_DIST_RNDR 0x00000003 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_MUXING_YUV_0 0x00000004 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_MUXING_UYVY 0x00000005 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_DLY 2:0 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_DLY 2:0 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DRO_MASTER 1:0 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DRO_DRIVE_PIN_SET 2:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DRO_DRIVE_PIN_SET_NEITHER 0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DRO_DRIVE_PIN_SET_A 1 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DRO_DRIVE_PIN_SET_B 2 + + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_PARAMS_MESSAGE_ID (0x31U) + +typedef struct NV5070_CTRL_CMD_SET_PIOR_OP_MODE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 category; + NvU32 clkPolarity; + NvU32 clkMode; + NvU32 clkPhs; + NvU32 unusedPins; + NvU32 polarity; + NvU32 dataMuxing; + NvU32 clkDelay; + NvU32 dataDelay; + NvU32 dro_master; + NvU32 dro_drive_pin_set; +} NV5070_CTRL_CMD_SET_PIOR_OP_MODE_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE + * + * Set the given SOR number into flush mode in preparation for DP link training. + * + * orNumber [in] + * The SOR number to set into flush mode. + * + * bEnable [in] + * Whether to enable or disable flush mode on this SOR. + * + * bImmediate [in] + * If set to true, will enable flush in immediate mode. + * If not, will enable flush in loadv mode. + * NOTE: We do not support exit flush in LoadV mode. + * + * headMask [in] + * Optional. If set brings only the heads in the head mask out of flush + * OR will stay in flush mode until last head is out of flush mode. + * Caller can use _HEAD__ALL to specify all the heads are to be brought out. + * NOTE: headMask would be considered only while exiting from flush mode. + * + * bForceRgDiv [in] + * If set forces RgDiv. Should be used only for HW/SW testing + * + * bUseBFM [in] + * If Set then it mean we are using BFM else executing on non-BFM paltforms. + * + * bFireAndForget [in] + * Fire the flush mode & perform post-processing without waiting for it + * to be done. This is required for special cases like GC5 where we have + * ELV blocked, RG stall & we trigger flush for one shot mode & then do + * a modeset by disabling it without actually waiting for it to get + * disabled. We will not get any vblank interrupt in this case as we have + * stalled RG. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE (0x50700457) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_MESSAGE_ID (0x57U) + +typedef struct NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 sorNumber; + NvBool bEnable; + NvBool bImmediate; + NvU32 headMask; + NvBool bForceRgDiv; + NvBool bUseBFM; + NvBool bFireAndForget; +} NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS; + +#define NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_HEADMASK_HEAD(i) (i):(i) +#define NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_HEADMASK_HEAD__SIZE_1 NV5070_CTRL_CMD_MAX_HEADS +#define NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_HEADMASK_HEAD_ALL 0xFFFFFFFF + + + +/* _ctrl5070or_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h new file mode 100644 index 0000000..64269d8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h @@ -0,0 +1,578 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070rg.finn +// + +#include "ctrl/ctrl5070/ctrl5070base.h" + + + +/* + * NV5070_CTRL_CMD_GET_RG_STATUS + * + * This 'get' command returns the status of raster generator + * + * head + * The head for which RG status is desired. + * + * scanLocked + * Whether or not RG is scan (raster or frame) locked. + * flipLocked + * Whether or not RG is flip locked. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_RG_STATUS (0x50700202) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_YES (0x00000001) + +#define NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_YES (0x00000001) + +#define NV5070_CTRL_CMD_GET_RG_STATUS_STALLED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_STATUS_STALLED_YES (0x00000001) + +#define NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + NvU32 scanLocked; // [OUT] + NvU32 flipLocked; // [OUT] + NvU32 rgStalled; +} NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS; + +/* + * NV5070_CTRL_CMD_UNDERFLOW_PARAMS + * + * This structure contains data for + * NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP and + * NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP CTRL calls + * + * head + * The head for which RG underflow properties needed to be set/get. + * Valid values for this parameter are 0 to NV5070_CTRL_CMD_MAX_HEADS. + * enable + * _SET_RG_: Whether to enable or disable RG underflow reporting. + * _GET_RG_: Whether or not RG underflow reporting is enabled. + * underflow + * _SET_RG_: Clear underflow (CLEAR_UNDERFLOW_YES) or leave it alone + * (CLEAR_UNDERFLOW_NO). + * Note: The GET_RG function automatically clears the underflow. + * It is recommended that GET_RG be used to clear any initial + * underflows, and that the "underflow" field be set to + * CLEAR_UNDERFLOW_NO in any SET_RG calls. This field may be + * deprecated in the future, for SET_RG calls. + * _GET_RG_: UNDERFLOWED_YES if an RG underflow occurred since the most + * recent prior call to to NV5070_CTRL_CMD_GET_RG_STATUS. + * epfifoUnderflow + * _SET_RG_: Not used. + * _GET_RG_: EPFIFO_UNDERFLOWED_YES if an EPFIFO underflow occurred since + * the most recent prior call to NV5070_CTRL_CMD_GET_RG_STATUS. + * mode + * _SET_RG_: What mode to use when underflow occurs. This is + * independent from enable field. This is always active. + * _GET_RG_: What mode is used when underflow occurs. This is + * independent from enable field. This is always active. + */ +typedef struct NV5070_CTRL_CMD_UNDERFLOW_PARAMS { + NvU32 head; + NvU32 enable; + NvU32 underflow; + NvU32 epfifoUnderflow; + NvU32 mode; +} NV5070_CTRL_CMD_UNDERFLOW_PARAMS; + +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_ENABLED_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_ENABLED_YES (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_UNDERFLOWED_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_UNDERFLOWED_YES (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_EPFIFO_UNDERFLOWED_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_EPFIFO_UNDERFLOWED_YES (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_MODE_REPEAT (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_MODE_RED (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_ENABLE_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_ENABLE_YES (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_CLEAR_UNDERFLOW_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_CLEAR_UNDERFLOW_YES (0x00000001) + +/* + * NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP + * + * This command returns the underflow reporting parameters inside + * NV5070_CTRL_CMD_UNDERFLOW_PARAMS structure + * + * underflowParams + * Contains data for underflow logging. + * Check NV5070_CTRL_CMD_UNDERFLOW_PARAMS structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP (0x50700203) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NV5070_CTRL_CMD_UNDERFLOW_PARAMS underflowParams; +} NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS; + + +/* + * NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP + * + * This command sets up the underflow parameters using + * NV5070_CTRL_CMD_UNDERFLOW_PARAMS structure + * + * underflowParams + * Contains data for underflow logging. + * Check NV5070_CTRL_CMD_UNDERFLOW_PARAMS structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP (0x50700204) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NV5070_CTRL_CMD_UNDERFLOW_PARAMS underflowParams; +} NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS; + + +/* + * NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP + * + * This command gets the timing parameters associated with the lockout period. + * + * head + * The head for which RG fliplock properties are desired. + * + * maxSwapLockoutSkew + * The maximum possible skew between the swap lockout signals for all + * heads which are fliplocked to this head. + * + * swapLockoutStart + * Determines the start of the start lockout period, expressed as the + * number of lines before the end of the frame. The minimum allowed + * value is 1. + + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP (0x50700205) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW 9:0 + +#define NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START 15:0 + +#define NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + NvU32 maxSwapLockoutSkew; + NvU32 swapLockoutStart; +} NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP + * + * This command sets the timing parameters associated with the lockout period. + * + * head + * The head for which RG fliplock properties are desired. + * + * maxSwapLockoutSkew + * The maximum possible skew between the swap lockout signals for all + * heads which are fliplocked to this head. + * + * swapLockoutStart + * Determines the start of the start lockout period, expressed as the + * number of lines before the end of the frame. The minimum allowed + * value is 1. + + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP (0x50700206) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW 9:0 +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW_INIT (0x00000000) + +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START 15:0 +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START_INIT (0x00000000) + +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + NvU32 maxSwapLockoutSkew; + NvU32 swapLockoutStart; +} NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN + * + * This command returns which lockpin has been connected for the specified + * subdevice in the current SLI and/or framelock configuration. + * + * head + * The head for which the locking is associated with + * + * masterScanLock + * Indicate the connection status and pin number of master scanlock + * + * slaveScanLock + * Indicate the connection status and pin number of slave scanlock + * + * flipLock + * Indicate the connection status and pin number of fliplock + * + * stereoLock + * Indicate the connection status and pin number of stereo lock + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN (0x50700207) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_MASTER_SCAN_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_MASTER_SCAN_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_MASTER_SCAN_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_MASTER_SCAN_LOCK_PIN 3:1 + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_SLAVE_SCAN_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_SLAVE_SCAN_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_SLAVE_SCAN_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_SLAVE_SCAN_LOCK_PIN 3:1 + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_FLIP_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_FLIP_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_FLIP_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_FLIP_LOCK_PIN 3:1 + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STEREO_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STEREO_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STEREO_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STEREO_LOCK_PIN 3:1 + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + NvU32 masterScanLock; + NvU32 slaveScanLock; + NvU32 flipLock; + NvU32 stereoLock; +} NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_VIDEO_STATUS + * + * This command is used to set the current video playback status for use + * by the Display Power Saving (nvDPS) feature. The playback status is + * used to maximize power savings by altering the DFP refresh rate used for + * video playback. + * + * displayId + * This parameter specifies the ID of the video playback display. + * Only one display may be indicated in this parameter. + * clientId + * This parameter specifies the opaque client ID associated with + * the video playback application. + * mode + * This parameter specifies the video playback mode. Valid values + * for this parameter include: + * NV5070_CTRL_DFP_SET_VIDEO_STATUS_MODE_NON_FULLSCREEN + * This value indicates that there is either no video playback or + * that video playback is windowed. + * NV5070_CTRL_DFP_SET_VIDEO_STATUS_MODE_FULLSCREEN + * This value indicates that video playback is fullscreen. + * NV5070_CTRL_DFP_SET_VIDEO_STATUS_MODE_D3D + * This value indicates that there is a D3D app started. + * frameRate + * The parameter indicates the current video playback frame rate. + * The value is a 32 bit unsigned fixed point number, 24 bit unsigned + * integer (bits 31:7), and 8 fraction bits (bits 7:0), measured in + * number of frames per second. + * A value of 0 indicates that video playback is stopped or not playing. + * frameRateAlarmUpperLimit + * The parameter indicates the upper limit which will can be tolerated in + * notifying frame rate change. If the frame rate changed but is still + * below the limit. The newer frame rate doesn't have to be set till it's + * over the limit. + * The value is a 32 bit unsigned fixed point number, 24 bit unsigned + * integer (bits 31:7), and 8 fraction bits (bits 7:0), measured in + * number of frames per second. + * A value of 0 indicates no tolerance of frame rate notifying. Instant + * frame rate has to be set when it has changed. + * frameRateAlarmLowerLimit + * The parameter indicates the lower limit which will can be tolerated in + * notifying frame rate change. If the frame rate changed but is still + * above the limit. The newer frame rate doesn't have to be set till it's + * below the limit. + * The value is a 32 bit unsigned fixed point number, 24 bit unsigned + * integer (bits 31:7), and 8 fraction bits (bits 7:0), measured in + * number of frames per second. + * A value of 0 indicates no tolerance of frame rate notifying. Instant + * frame rate has to be set when it has changed. + * + * The frameRateAlarm limit values can be used by the video client to + * indicate the the range in which frame rate changes do not require + * notification (i.e. frame rates outside these limits will result in + * notification). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV5070_CTRL_CMD_SET_VIDEO_STATUS (0x50700209) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_DFP_SET_VIDEO_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_DFP_SET_VIDEO_STATUS_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV5070_CTRL_DFP_SET_VIDEO_STATUS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + + NvU32 displayId; + NvU32 clientId; + NvU32 mode; + NvU32 frameRate; + NvU32 frameRateAlarmUpperLimit; + NvU32 frameRateAlarmLowerLimit; +} NV5070_CTRL_DFP_SET_VIDEO_STATUS_PARAMS; + +/* valid mode flags */ +#define NV5070_CTRL_DFP_SET_VIDEO_STATUS_MODE_NON_FULLSCREEN (0x00000000) +#define NV5070_CTRL_DFP_SET_VIDEO_STATUS_MODE_FULLSCREEEN (0x00000001) +#define NV5070_CTRL_DFP_SET_VIDEO_STATUS_MODE_D3D (0x00000002) + +/* + * NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS + * + * This command returns which set of lockpins needs to be used in order to + * successfully raster lock two heads on different GPUs together. The + * second GPU is not inferred from linked SLI state, if any, and needs to + * be specified explicitly. + * + * head + * The local head to be locked with the peer head. + * + * peer.hDisplay + * The handle identifying a display object allocated on another + * GPU. It specifies the peer of interest with a subdevice + * index (see below) and needs to be be distinct from the handle + * supplied directly to NvRmControl(). + * + * peer.subdeviceIndex + * The index of the peer subdevice of interest. + * + * peer.head + * The peer head to be locked with the local head. + * + * masterScanLockPin + * slaveScanLockPin + * Returns the master and slave scanlock pins that would need to + * be used to lock the specified heads together, if any. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_OBJECT_PARENT + */ +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS (0x5070020a) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_MASTER_SCAN_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_MASTER_SCAN_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_MASTER_SCAN_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_MASTER_SCAN_LOCK_PIN 2:1 + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_SLAVE_SCAN_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_SLAVE_SCAN_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_SLAVE_SCAN_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_SLAVE_SCAN_LOCK_PIN 2:1 + +#define NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + struct { + NvHandle hDisplay; + NvU32 subdeviceIndex; + NvU32 head; + } peer; + + NvU32 masterScanLock; + NvU32 slaveScanLock; +} NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_PINSET_LOCKPINS + * + * Get the lockpins for the specified pinset. + * + * pinset [in] + * The pinset whose corresponding lockpin numbers need to be determined + * must be specified with this parameter. + * + * scanLockPin [out] + * The scanlock lockpin (rasterlock or framelock) index, which can be + * either master or slave, is returned in this parameter. + * + * flipLockPin [out] + * The fliplock lockpin index, is returned in this parameter. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_GET_PINSET_LOCKPINS (0x5070020b) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_GET_PINSET_LOCKPINS_SCAN_LOCK_PIN_NONE 0xffffffff + +#define NV5070_CTRL_GET_PINSET_LOCKPINS_FLIP_LOCK_PIN_NONE 0xffffffff + +#define NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 pinset; + NvU32 scanLockPin; + NvU32 flipLockPin; +} NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_RG_SCAN_LINE + * + * This 'get' command returns the current scan line value from raster generator + * + * head + * The head for which current scan line number is desired. + * + * scanLine + * Current scan line number. + * + * inVblank + * Whether or not in vblank. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_RG_SCAN_LINE (0x5070020c) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_SCAN_LINE_IN_VBLANK_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_SCAN_LINE_IN_VBLANK_YES (0x00000001) + +#define NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + NvU32 scanLine; // [OUT] + NvU32 inVblank; // [OUT] +} NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS + * + * This command returns FrameLock header lock pin information. + * Lock pin index returned by this command corresponds to the + * evo lock pin number. Example - lock pin index 0 means + * LOCKPIN_0. + * + * frameLockPin [out] + * This parameter returns the FrameLock pin index + * connected to FrameLock header. + * + * rasterLockPin [out] + * This parameter returns the RasterLock pin index + * connected to FrameLock header. + * + * flipLockPin [out] + * This parameter returns the FlipLock pin index + * connected to FrameLock header. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS (0x5070020d) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_FRAME_LOCK_PIN_NONE (0xffffffff) +#define NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_RASTER_LOCK_PIN_NONE (0xffffffff) +#define NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_FLIP_LOCK_PIN_NONE (0xffffffff) +#define NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 frameLockPin; + NvU32 rasterLockPin; + NvU32 flipLockPin; +} NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS; + + + +/* _ctrl5070rg_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h new file mode 100644 index 0000000..0da723f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h @@ -0,0 +1,521 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070seq.finn +// + +#include "ctrl/ctrl5070/ctrl5070base.h" + +/* + * NV5070_CTRL_CMD_GET_SOR_SEQ_CTL + * + * This command returns SOR sequencer's power up and down PCs and sequencer + * program to be used for power up and dowm. + * + * orNumber + * The OR number for which the seq ctrls are to be modified. + * + * puPcAlt + * Alternate power up PC. + * + * pdPc + * Power down PC. + * + * pdPcAlt + * Alternate power down PC. + * + * normalStart + * Whether normal mode is using normal or alt PC + * + * safeStart + * Whether safe mode is using normal or alt PC + * + * normalState + * Whether normal state is PD or PU. + * + * safeState + * Whether safe state is PD or PU. + * + * flags + * There is only one flag defined currently + * 1. GET_SEQ_PROG: Whether or not current seq program must be + * return back. Caller should set this to _YES to read the + * current seq program. + * + * seqProgram + * The sequencer program consisting of power up and down sequences. + * For NV50, this consists of 16 DWORDS. The program is + * relevant only when GET_SEQ_PROG flags is set to _YES. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL (0x50700301U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PU_PC_ALT_VALUE 3:0 + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PD_PC_VALUE 3:0 + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PD_PC_ALT_VALUE 3:0 + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_START_VAL 0:0 +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_START_VAL_ALT (0x00000001U) + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_START_VAL 0:0 +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_START_VAL_ALT (0x00000001U) + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_STATE_VAL_PU (0x00000001U) + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_STATE_VAL_PU (0x00000001U) + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_FLAGS_GET_SEQ_PROG 0:0 +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_FLAGS_GET_SEQ_PROG_NO (0x00000000U) +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_FLAGS_GET_SEQ_PROG_YES (0x00000001U) + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SEQ_PROG_SIZE 16U +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 puPcAlt; + NvU32 pdPc; + NvU32 pdPcAlt; + NvU32 normalStart; + NvU32 safeStart; + NvU32 normalState; + NvU32 safeState; + NvU32 flags; + NvU32 seqProgram[NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SEQ_PROG_SIZE]; +} NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_SOR_SEQ_CTL + * + * This command does the following in that order + * (a) Loads a specified sequencer program for power up and down. + * (b) Updates SOR sequencer's power up and down PCs, tells seq to SKIP + * current wait for vsync and waits until sequencer actually SKIPs or halts + * (see more below under SKIP_WAIT_FOR_VSYNC flag) and + * (c) Update power settings (safe/normal start and state). + * + * orNumber + * The OR number for which the seq ctrls are to be modified. + * + * puPcAlt + * Alternate power up PC. + * + * pdPc + * Power down PC. + * + * pdPcAlt + * Alternate power down PC. + * + * normalStart + * Whether normal mode should use normal or alt PC. + * + * safeStart + * Whether safe mode should use normal or alt PC. + * + * normalState + * Whether normal state should be PD or PU. + * + * safeState + * Whether safe state should be PD or PU. + * + * flags + * The following flags have been defined + * 1. SKIP_WAIT_FOR_VSYNC: Should seq be forced to skip waiting + * for vsync if it's currently waiting on such an instruction. + * If the current instruction doesn't have a wait for vsync, + * SKIP will be applied to the next one and so on until + * either sequencer halts or an instruction with a wait for + * vsync is found. The call will block until seq halts or + * SKIPs a wait for vsync. + * 2. SEQ_PROG_PRESENT: Whether or not a new seq program has + * been specified. + * + * seqProgram + * The sequencer program consisting of power up and down sequences. + * For NV50, this consists of 16 DWORDS. The program is + * relevant only when SEQ_PROG_PRESENT flags is set to _YES. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL (0x50700302U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PU_PC_ALT_VALUE 3:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PU_PC_ALT_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PU_PC_ALT_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PU_PC_ALT_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_VALUE 3:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_ALT_VALUE 3:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_ALT_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_ALT_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_ALT_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_VAL 0:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_VAL_ALT (0x00000001U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_VAL 0:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_VAL_ALT (0x00000001U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_VAL_PU (0x00000001U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_VAL_PU (0x00000001U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC 0:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT 1:1 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT_YES (0x00000001U) + + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SEQ_PROG_SIZE 16U +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 puPcAlt; + NvU32 pdPc; + NvU32 pdPcAlt; + NvU32 normalStart; + NvU32 safeStart; + NvU32 normalState; + NvU32 safeState; + NvU32 flags; + NvU32 seqProgram[NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SEQ_PROG_SIZE]; +} NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL + * + * This command returns PIOR sequencer's power up and down PCs and sequencer + * program to be used for power up and dowm. + * + * orNumber + * The OR number for which the seq ctrls are to be modified. + * + * puPcAlt + * Alternate power up PC. + * + * pdPc + * Power down PC. + * + * pdPcAlt + * Alternate power down PC. + * + * normalStart + * Whether normal mode is using normal or alt PC + * + * safeStart + * Whether safe mode is using normal or alt PC + * + * normalState + * Whether normal state is PD or PU. + * + * safeState + * Whether safe state is PD or PU. + * + * flags + * There is only one flag defined currently + * 1. GET_SEQ_PROG: Whether or not current seq program must be + * return back. Caller should set this to _YES to read the + * current seq program. + * + * seqProgram + * The sequencer program consisting of power up and down sequences. + * For NV50, this consists of 16 DWORDS. The program is + * relevant only when GET_SEQ_PROG flags is set to _YES. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL (0x50700303U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PU_PC_ALT_VALUE 3:0 + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PD_PC_VALUE 3:0 + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PD_PC_ALT_VALUE 3:0 + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_START_VAL 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_START_VAL_ALT (0x00000001U) + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_START_VAL 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_START_VAL_ALT (0x00000001U) + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_STATE_VAL_PU (0x00000001U) + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_STATE_VAL_PU (0x00000001U) + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_FLAGS_GET_SEQ_PROG 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_FLAGS_GET_SEQ_PROG_NO (0x00000000U) +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_FLAGS_GET_SEQ_PROG_YES (0x00000001U) + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SEQ_PROG_SIZE 8U +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 puPcAlt; + NvU32 pdPc; + NvU32 pdPcAlt; + NvU32 normalStart; + NvU32 safeStart; + NvU32 normalState; + NvU32 safeState; + NvU32 flags; + NvU32 seqProgram[NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SEQ_PROG_SIZE]; +} NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL + * + * This command does the following in that order + * (a) Loads a specified sequencer program for power up and down. + * (b) Updates PIOR sequencer's power up and down PCs, tells seq to SKIP + * current wait for vsync and waits until sequencer actually SKIPs or halts + * (see more below under SKIP_WAIT_FOR_VSYNC flag) and + * (c) Update power settings (safe/normal start and state). + * + * orNumber + * The OR number for which the seq ctrls are to be modified. + * + * puPcAlt + * Alternate power up PC. + * + * pdPc + * Power down PC. + * + * pdPcAlt + * Alternate power down PC. + * + * normalStart + * Whether normal mode should use normal or alt PC + * + * safeStart + * Whether safe mode should use normal or alt PC + * + * normalState + * Whether normal state should be PD or PU. + * + * safeState + * Whether safe state should be PD or PU. + * + * flags + * The following flags have been defined + * 1. SKIP_WAIT_FOR_VSYNC: Should seq be forced to skip waiting + * for vsync if it's currently waiting on such an instruction. + * If the current instruction doesn't have a wait for vsync, + * SKIP will be applied to the next one and so on until + * either sequencer halts or an instruction with a wait for + * vsync is found. The call will block until seq halts or + * SKIPs a wait for vsync. + * 2. SEQ_PROG_PRESENT: Whether or not a new seq program has + * been specified. + * + * seqProgram + * The sequencer program consisting of power up and down sequences. + * For NV50, this consists of 8 DWORDS. The program is + * relevant only when SEQ_PROG_PRESENT flags is set to _YES. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL (0x50700304U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PU_PC_ALT_VALUE 3:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PU_PC_ALT_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PU_PC_ALT_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PU_PC_ALT_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_VALUE 3:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_ALT_VALUE 3:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_ALT_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_ALT_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_ALT_SPECIFIED_YES (0x00000001U) + + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_VAL 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_VAL_ALT (0x00000001U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_VAL 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_VAL_ALT (0x00000001U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_VAL_PU (0x00000001U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_VAL_PU (0x00000001U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT 1:1 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SEQ_PROG_SIZE 8U +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 puPcAlt; + NvU32 pdPc; + NvU32 pdPcAlt; + NvU32 normalStart; + NvU32 safeStart; + NvU32 normalState; + NvU32 safeState; + NvU32 flags; + NvU32 seqProgram[NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SEQ_PROG_SIZE]; +} NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS; + +/* + * NV5070_CTRL_CMD_CTRL_SEQ_PROG_SPEED + * + * This call allows a fast sequencer program to be selected. It's intended for + * situations where panel sequencing is not required and the usual sequencing + * delays cost too much time. + * + * displayId + * The corresponding display ID. (Note that this call is currently only + * supported for LVDS on an internal encoder, i.e. a SOR.) + * cmd + * The command to perform. Valid values are: + * NV5070_CTRL_SEQ_PROG_SPEED_CMD_GET + * Get the current state. + * NV5070_CTRL_SEQ_PROG_SPEED_CMD_SET + * Set the current state. + * state + * The state of panel sequencing for this displayId. This is an input + * when cmd = SET and an output when cmd = GET. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + * + */ + +#define NV5070_CTRL_CMD_CTRL_SEQ_PROG_SPEED (0x50700305U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_SEQ_PROG_SPEED_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SEQ_PROG_SPEED_CMD_GET (0x00000000U) +#define NV5070_CTRL_SEQ_PROG_SPEED_CMD_SET (0x00000001U) + +#define NV5070_CTRL_SEQ_PROG_SPEED_STATE_NORMAL (0x00000000U) +#define NV5070_CTRL_SEQ_PROG_SPEED_STATE_FAST (0x00000001U) + +#define NV5070_CTRL_SEQ_PROG_SPEED_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV5070_CTRL_SEQ_PROG_SPEED_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + + NvU32 displayId; + + NvU32 cmd; + NvU32 state; +} NV5070_CTRL_SEQ_PROG_SPEED_PARAMS; + +/* _ctrl5070seq_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h new file mode 100644 index 0000000..bbb07e6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h @@ -0,0 +1,81 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070system.finn +// + +#include "ctrl/ctrl5070/ctrl5070base.h" +#include "ctrl/ctrl5070/ctrl5070common.h" // NV5070_CTRL_CMD_MAX_HEADS + +/* extract cap bit setting from tbl */ +#define NV5070_CTRL_SYSTEM_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV5070_CTRL_SYSTEM_CAPS_BUG_237734_REQUIRES_DMI_WAR 0:0x01 // Deprecated +#define NV5070_CTRL_SYSTEM_CAPS_STEREO_DIN_AVAILABLE 0:0x02 +#define NV5070_CTRL_SYSTEM_CAPS_BUG_381003_MULTIWAY_AFR_WAR 0:0x04 +#define NV5070_CTRL_SYSTEM_CAPS_BUG_538079_COLOR_COMPRESSION_SUPPORTED 0:0x08 // Deprecated +#define NV5070_CTRL_SYSTEM_CAPS_BUG_2052012_GLITCHY_MCLK_SWITCH 0:0x10 +#define NV5070_CTRL_SYSTEM_CAPS_DEEP_COLOR_SUPPORT 0:0x20 +#define NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY 0:0x40 + + +/* size in bytes of display caps table */ +#define NV5070_CTRL_SYSTEM_CAPS_TBL_SIZE 1 + +/* + * NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2 + * + * This command returns the set of display capabilities for the parent device + * in the form of an array of unsigned bytes. Display capabilities + * include supported features and required workarounds for the display + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. The set of display capabilities + * will be normalized across all GPUs within the device (a feature capability + * will be set only if it's supported on all GPUs while a required workaround + * capability will be set if any of the GPUs require it). + * + * [out] capsTbl + * This caps table array is where the display cap bits will be transferred + * by the RM. The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + */ +#define NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2 (0x50700709) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SYSTEM_INTERFACE_ID << 8) | NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + + NvU8 capsTbl[NV5070_CTRL_SYSTEM_CAPS_TBL_SIZE]; +} NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS; + +/* _ctrl5070system_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h new file mode 100644 index 0000000..6741891 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070verif.finn +// + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h new file mode 100644 index 0000000..29449ab --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h @@ -0,0 +1,174 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90cd.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NV_EVENT_BUFFER control commands and parameters */ + +#define NV_EVENT_BUFFER_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x90CD, NV90CD_CTRL_##cat, idx) + +#define NV90CD_CTRL_RESERVED (0x00) +#define NV90CD_CTRL_EVENT (0x01) + +/* +* NV_EVENT_BUFFER_CTRL_CMD_NULL +* +* This command does nothing. +* This command does not take any parameters. +* +* Possible status values returned are: +* NV_OK +*/ +#define NV_EVENT_BUFFER_CTRL_CMD_NULL (0x90cd0000) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* +* NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS +* This interface enables all the events that are associated to the event buffer +*/ +#define NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS (0x90cd0101) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x1" */ + +#define NV_EVENT_BUFFER_FLAG 0:32 + +/* +* NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY +* This flag defines the kernel behavior when the buffer is full +* +* DEFAULT/DISABLED: By default kernel doesn't assume any policy. To enable events +* an overflow policy has to be set to retain older or newer events +* +* KEEP_OLDEST: kernel would retain older events and drop newer events if the buffer is full +* +* KEEP_NEWEST: kernel would retain newer events and drop older events if the buffer is full +* +*/ +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY 0:1 +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_DISABLED 0 +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_OLDEST 1 +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_NEWEST 2 +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_DEFAULT NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_DISABLED + +/* +* NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS +* +* enable [IN] +* This field is used to enable or disable events +* +* flags[IN] +* This field sets NV_EVENT_BUFFER_FLAG parameter used to configure event buffer overflow options +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +*/ +typedef struct NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS { + NvBool enable; + NvU32 flags; +} NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS; + +/* +* NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET +* This interface allows the user to update get pointers. +* This call is useful in the KEEP_OLDEST policy to update free space available in the buffer. +* In keep oldest policy, kernel adds new entries in the buffer only if there is free space. +* The full/empty decision is made as follows: +* - when GET==PUT, the fifo is empty +* - when GET==PUT+1, the fifo is full +*/ +#define NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET (0x90cd0102) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x2" */ + +/* +* NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS +* +* recordBufferGet [IN] +* Value to be used to update the get offset of record buffer +* +* varDataBufferGet[IN] +* This is the buffer offset up to which user has consumed the vardataBuffer +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT: if any of the get offsets is greater than respective bufferSize. +*/ +typedef struct NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS { + NvU32 recordBufferGet; + NvU32 varDataBufferGet; +} NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS; + +/* + * Send a test event-buffer notification (verification-only) + */ +#define NV_EVENT_BUFFER_CTRL_CMD_VERIF_NOTIFY (0x90cd0103) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x3" */ + +/* + * Synchronous flush + */ +#define NV_EVENT_BUFFER_CTRL_CMD_FLUSH (0x90cd0104) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x4" */ + +/* + * post event + */ +#define NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT (0x90cd0105) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x5" */ + + /* + * NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS + * + * eventType [IN] + * the NvTelemetry event type. + * typeVersion [IN] + * the version of the event structure + * eventData [IN] + * an array of 256 bytes used to hold the event data. + * eventDataSz [IN] + * the amount of valid data in the eventData buffer. + * varData [IN] + * an array of 256 bytes used to hold the var data. + * varDataSz [IN] + * the amount of valid data in the varData buffer. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +typedef struct NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS { + NvU32 eventType; + NvU16 typeVersion; + NvU8 eventData[256]; + NvU16 eventDataSz; + NvU8 varData[256]; + NvU16 varDataSz; +} NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS; + +/* _ctr l90cd_h_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h new file mode 100644 index 0000000..00094c1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h @@ -0,0 +1,124 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90ec.finn +// + +#include "ctrl/ctrlxxxx.h" +/* GK104 HDACODEC control commands and parameters */ + +#define NV90EC_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x90EC, NV90EC_CTRL_##cat, idx) + +/* NV04_DISPLAY_COMMON command categories (6bits) */ +#define NV90EC_CTRL_RESERVED (0x00) +#define NV90EC_CTRL_HDACODEC (0x01) + +/* + * NV90EC_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV90EC_CTRL_CMD_NULL (0x90ec0000) /* finn: Evaluated from "(FINN_GF100_HDACODEC_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE + * + * This command sets the CP_READY bit. It basically informs RM whether + * the DD has worked upon the HDCP request requested by the Audio driver + * or not. DD asks RM to enable CP_READY bit (by setting CpReadyEnable to NV_TRUE) + * once it is done honouring/dishonouring the request. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the cp ready + * bit should be enabled. The display ID must a dfp display. + * If the displayId is not a dfp, this call will return + * NV_ERR_INVALID_ARGUMENT. + * CpReadyEnable + * This parameter specifies whether to enable (NV_TRUE) or not. If CpReady + * is enabled then AudioCodec can send more HDCP requests. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE (0x90ec0101) /* finn: Evaluated from "(FINN_GF100_HDACODEC_HDACODEC_INTERFACE_ID << 8) | NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bCpReadyEnable; +} NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE_PARAMS; + +/* + * NV90EC_CTRL_CMD_HDACODEC_NOTIFY_AUDIO_EVENT + * + * This command notifies Audio of any events to audio + * like notification of PD bit being set. + * + * audioEvent + * This parameter specifies the event type. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV90EC_CTRL_CMD_HDACODEC_NOTIFY_AUDIO_EVENT (0x90ec0102) /* finn: Evaluated from "(FINN_GF100_HDACODEC_HDACODEC_INTERFACE_ID << 8) | NV90EC_CTRL_HDACODEC_NOTIFY_AUDIO_EVENT_PARAMS_MESSAGE_ID" */ + +#define NV90EC_CTRL_HDACODEC_NOTIFY_AUDIO_EVENT_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV90EC_CTRL_HDACODEC_NOTIFY_AUDIO_EVENT_PARAMS { + NvU32 audioEvent; +} NV90EC_CTRL_HDACODEC_NOTIFY_AUDIO_EVENT_PARAMS; + +/* + * This command notifies audio driver that PD bit is set by DD, by writing to scratch register + */ +#define NV90EC_CTRL_HDACODEC_AUDIOEVENT_PD_BIT_SET (0x00000001) + +/* _ctrl90ec_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h new file mode 100644 index 0000000..c132cc0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NVC370_DISPLAY control commands and parameters */ + +#define NVC370_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0XC370, NVC370_CTRL_##cat, idx) + +/* NVC370_DISPLAY command categories (6bits) */ +#define NVC370_CTRL_RESERVED (0x00) +#define NVC370_CTRL_CHNCTL (0x01) +#define NVC370_CTRL_RG (0x02) +#define NVC370_CTRL_SEQ (0x03) +#define NVC370_CTRL_OR (0x04) +#define NVC370_CTRL_INST (0x05) +#define NVC370_CTRL_VERIF (0x06) +#define NVC370_CTRL_SYSTEM (0x07) +#define NVC370_CTRL_EVENT (0x09) + +// This struct must be the first member of all C370 control calls +typedef struct NVC370_CTRL_CMD_BASE_PARAMS { + NvU32 subdeviceIndex; +} NVC370_CTRL_CMD_BASE_PARAMS; + + +/* + * NVC370_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVC370_CTRL_CMD_NULL (0xc3700000) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + +/* _ctrlc370base_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h new file mode 100644 index 0000000..a231232 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h @@ -0,0 +1,300 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370chnc.finn +// + +#include "ctrl/ctrlc370/ctrlc370base.h" +/* C370 is partially derived from 5070 */ +#include "ctrl/ctrl5070/ctrl5070chnc.h" + + + + +#define NVC370_CTRL_CMD_CHANNEL_STATE_IDLE NVBIT(0) +#define NVC370_CTRL_CMD_CHANNEL_STATE_QUIESCENT1 NVBIT(2) +#define NVC370_CTRL_CMD_CHANNEL_STATE_QUIESCENT2 NVBIT(3) +#define NVC370_CTRL_CMD_CHANNEL_STATE_BUSY NVBIT(6) +#define NVC370_CTRL_CMD_CHANNEL_STATE_DEALLOC NVBIT(7) +#define NVC370_CTRL_CMD_CHANNEL_STATE_DEALLOC_LIMBO NVBIT(8) +#define NVC370_CTRL_CMD_CHANNEL_STATE_EFI_INIT1 NVBIT(11) +#define NVC370_CTRL_CMD_CHANNEL_STATE_EFI_INIT2 NVBIT(12) +#define NVC370_CTRL_CMD_CHANNEL_STATE_EFI_OPERATION NVBIT(13) +#define NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_INIT1 NVBIT(14) +#define NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_INIT2 NVBIT(15) +#define NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_OPERATION NVBIT(16) +#define NVC370_CTRL_CMD_CHANNEL_STATE_UNCONNECTED NVBIT(17) +#define NVC370_CTRL_CMD_CHANNEL_STATE_INIT1 NVBIT(18) +#define NVC370_CTRL_CMD_CHANNEL_STATE_INIT2 NVBIT(19) +#define NVC370_CTRL_CMD_CHANNEL_STATE_SHUTDOWN1 NVBIT(20) +#define NVC370_CTRL_CMD_CHANNEL_STATE_SHUTDOWN2 NVBIT(21) + +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CORE 1 +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW 32 +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW_IMM 32 +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WRITEBACK 8 +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CURSOR 8 + +/* + * NVC370_CTRL_CMD_IDLE_CHANNEL + * + * This command tries to wait or forces the desired channel state. + * + * channelClass + * This field indicates the hw class number (0xC378 - 0xC37E). + * It's defined in the h/w header (i.e. clc37d.h, etc.). + * + * channelInstance + * This field indicates which instance of the channelClass the cmd is + * meant for. (zero-based) + * + * desiredChannelStateMask + * This field indicates the desired channel states. When more than + * one bit is set, RM will return whenever it finds hardware on one + * of the states in the bistmask. + * Normal options are IDLE, QUIESCENT1 and QUIESCENT2. + * Verif only option includes BUSY as well. + * Note: + * (1) When QUIESCENT1 or QUIESCENT2 is chosen only one bit should + * be set in the bitmask. RM will ignore any other state. + * (2) Accelerators should not be required for QUIESCENT states as + * RM tries to ensure QUIESCENT forcibly on it's own. + * + * accelerators + * What accelerator bits should be used if RM timesout trying to + * wait for the desired state. This is not yet implemented since it + * should normally not be required to use these. Usage of accelerators + * should be restricted and be done very carefully as they may have + * undesirable effects. + * NOTE: accelerators should not be used directly in production code. + * + * timeout + * Timeout to use when waiting for the desired state. This is also for + * future expansion and not yet implemented. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_TIMEOUT + */ +#define NVC370_CTRL_CMD_IDLE_CHANNEL (0xc3700101) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NVC370_CTRL_IDLE_CHANNEL_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_CORE NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CORE +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_WINDOW NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_WINDOW_IMM NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW_IMM +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_WRITEBACK NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WRITEBACK +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_CURSOR NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CURSOR + +#define NVC370_CTRL_IDLE_CHANNEL_STATE_IDLE NVC370_CTRL_CMD_CHANNEL_STATE_IDLE +#define NVC370_CTRL_IDLE_CHANNEL_STATE_QUIESCENT1 NVC370_CTRL_CMD_CHANNEL_STATE_QUIESCENT1 +#define NVC370_CTRL_IDLE_CHANNEL_STATE_QUIESCENT2 NVC370_CTRL_CMD_CHANNEL_STATE_QUIESCENT2 + +#define NVC370_CTRL_IDLE_CHANNEL_STATE_BUSY NVC370_CTRL_CMD_CHANNEL_STATE_BUSY + +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_NONE (0x00000000) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_PI (NVBIT(0)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_NOTIF (NVBIT(1)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_SEMA (NVBIT(2)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_INTERLOCK (NVBIT(3)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_FLIPLOCK (NVBIT(4)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_TRASH_ONLY (NVBIT(5)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_TRASH_AND_ABORT (NVBIT(6)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_SYNCPOINT (NVBIT(7)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_TIMESTAMP (NVBIT(8)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_MGI (NVBIT(9)) + +#define NVC370_CTRL_IDLE_CHANNEL_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC370_CTRL_IDLE_CHANNEL_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + NvU32 desiredChannelStateMask; + NvU32 accelerators; // For future expansion. Not yet implemented + NvU32 timeout; // For future expansion. Not yet implemented + NvBool restoreDebugMode; +} NVC370_CTRL_IDLE_CHANNEL_PARAMS; + +/* + * NVC370_CTRL_CMD_SET_ACCL + * + * This command turns accelerators on and off. The use of this command + * should be restricted as it may have undesirable effects. It's + * purpose is to provide a mechanism for clients to use the + * accelerator bits to get into states that are either not detectable + * by the RM or may take longer to reach than we think is reasonable + * to wait in the RM. + * + * NVC370_CTRL_CMD_GET_ACCL + * + * This command queries the current state of the accelerators. + * + * channelClass + * This field indicates the hw class number (0xC378 - 0xC37E). + * It's defined in the h/w header (i.e. clc37d.h, etc.). + * + * channelInstance + * This field indicates which instance of the channelClass the cmd is + * meant for. (zero-based) + * + * accelerators + * Accelerators to be set in the SET_ACCEL command. Returns the + * currently set accelerators on the GET_ACCEL command. + */ + + +/* + * + * accelMask + * A mask to specify which accelerators to change with the + * SET_ACCEL command. This field does nothing in the GET_ACCEL + * command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_OWNER + * NV_ERR_GENERIC + * + */ + +#define NVC370_CTRL_CMD_SET_ACCL (0xc3700102) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NVC370_CTRL_SET_ACCL_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_CMD_GET_ACCL (0xc3700103) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID << 8) | 0x3" */ + +#define NVC370_CTRL_ACCL_MAX_INSTANCE_CORE NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CORE +#define NVC370_CTRL_ACCL_MAX_INSTANCE_WINDOW NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW +#define NVC370_CTRL_ACCL_MAX_INSTANCE_WINDOW_IMM NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW_IMM +#define NVC370_CTRL_ACCL_MAX_INSTANCE_WRITEBACK NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WRITEBACK +#define NVC370_CTRL_ACCL_MAX_INSTANCE_CURSOR NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CURSOR + +#define NVC370_CTRL_ACCL_NONE NVC370_CTRL_IDLE_CHANNEL_ACCL_NONE +#define NVC370_CTRL_ACCL_IGNORE_PI NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_PI +#define NVC370_CTRL_ACCL_SKIP_NOTIF NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_NOTIF +#define NVC370_CTRL_ACCL_SKIP_SEMA NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_SEMA +#define NVC370_CTRL_ACCL_IGNORE_INTERLOCK NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_INTERLOCK +#define NVC370_CTRL_ACCL_IGNORE_FLIPLOCK NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_FLIPLOCK +#define NVC370_CTRL_ACCL_TRASH_ONLY NVC370_CTRL_IDLE_CHANNEL_ACCL_TRASH_ONLY +#define NVC370_CTRL_ACCL_TRASH_AND_ABORT NVC370_CTRL_IDLE_CHANNEL_ACCL_TRASH_AND_ABORT +#define NVC370_CTRL_ACCL_SKIP_SYNCPOINT NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_SYNCPOINT +#define NVC370_CTRL_ACCL_IGNORE_TIMESTAMP NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_TIMESTAMP +#define NVC370_CTRL_ACCL_IGNORE_MGI NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_MGI +#define NVC370_CTRL_SET_ACCL_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC370_CTRL_SET_ACCL_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + NvU32 accelerators; + NvU32 accelMask; +} NVC370_CTRL_SET_ACCL_PARAMS; +typedef NVC370_CTRL_SET_ACCL_PARAMS NVC370_CTRL_GET_ACCL_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVC370_CTRL_CMD_GET_ACCL_FINN_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVC370_CTRL_CMD_GET_ACCL_FINN_PARAMS { + NVC370_CTRL_GET_ACCL_PARAMS params; +} NVC370_CTRL_CMD_GET_ACCL_FINN_PARAMS; + + + + +/* + * NVC370_CTRL_CMD_GET_CHANNEL_INFO + * + * This command returns the current channel state. + * + * channelClass + * This field indicates the hw class number (0xC378 - 0xC37E). + * It's defined in the h/w header (i.e. clc37d.h, etc.). + * + * channelInstance + * This field indicates which instance of the channelClass the cmd is + * meant for. (zero-based) + * + * channelState + * This field indicates the desired channel state in a mask form that + * is compatible with NVC370_CTRL_CMD_IDLE_CHANNEL. A mask format + * allows clients to check for one from a group of states. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * + * Display driver uses this call to ensure that all it's methods have + * propagated through hardware's internal fifo + * (NVC370_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING) before it calls + * RM to check whether or not the mode it set up in Assembly State Cache will + * be possible. Note that display driver can not use completion notifier in + * this case because completion notifier is associated with Update and Update + * will propagate the state from Assembly to Armed and when checking the + * possibility of a mode, display driver wouldn't want Armed state to be + * affected. + */ + + + +#define NVC370_CTRL_CMD_GET_CHANNEL_INFO (0xc3700104) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_CORE NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CORE +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_WINDOW NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_WINDOW_IMM NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW_IMM +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_WRITEBACK NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WRITEBACK +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_CURSOR NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CURSOR + +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_IDLE NVC370_CTRL_CMD_CHANNEL_STATE_IDLE +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_BUSY NVC370_CTRL_CMD_CHANNEL_STATE_BUSY +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_DEALLOC NVC370_CTRL_CMD_CHANNEL_STATE_DEALLOC +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_DEALLOC_LIMBO NVC370_CTRL_CMD_CHANNEL_STATE_DEALLOC_LIMBO +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_EFI_INIT1 NVC370_CTRL_CMD_CHANNEL_STATE_EFI_INIT1 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_EFI_INIT2 NVC370_CTRL_CMD_CHANNEL_STATE_EFI_INIT2 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_EFI_OPERATION NVC370_CTRL_CMD_CHANNEL_STATE_EFI_OPERATION +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_VBIOS_INIT1 NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_INIT1 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_VBIOS_INIT2 NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_INIT2 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_VBIOS_OPERATION NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_OPERATION +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_UNCONNECTED NVC370_CTRL_CMD_CHANNEL_STATE_UNCONNECTED +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_INIT1 NVC370_CTRL_CMD_CHANNEL_STATE_INIT1 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_INIT2 NVC370_CTRL_CMD_CHANNEL_STATE_INIT2 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_SHUTDOWN1 NVC370_CTRL_CMD_CHANNEL_STATE_SHUTDOWN1 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_SHUTDOWN2 NVC370_CTRL_CMD_CHANNEL_STATE_SHUTDOWN2 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING NVC370_CTRL_GET_CHANNEL_INFO_STATE_IDLE +#define NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + NvBool IsChannelInDebugMode; + NvU32 channelState; +} NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS; + + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h new file mode 100644 index 0000000..ef18c7d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370event.finn +// + +#include "ctrl/ctrlc370/ctrlc370base.h" +/* C370 is partially derived from 5070 */ +#include "ctrl/ctrl5070/ctrl5070event.h" + + + + + +/* valid action values */ +#define NVC370_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE +#define NVC370_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE +#define NVC370_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT + +/* _ctrlc370event_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h new file mode 100644 index 0000000..2a49284 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h @@ -0,0 +1,123 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370rg.finn +// + +#include "ctrl/ctrlc370/ctrlc370base.h" +/* C370 is partially derived from 5070 */ +#include "ctrl/ctrl5070/ctrl5070rg.h" + + + + +/* + * NVC370_CTRL_CMD_GET_LOCKPINS_CAPS + * + * This command returns lockpins for the specified pinset, + * as well as lockpins' HW capabilities. + * + * pinset [in] + * This parameter takes the pinset whose corresponding + * lockpin numbers need to be determined. This only affects + * the return value for the RaterLock and FlipLock pins. + * + * frameLockPin [out] + * This parameter returns the FrameLock pin index. + * + * rasterLockPin [out] + * This parameter returns the RasterLock pin index. + * + * flipLockPin [out] + * This parameter returns the FlipLock pin index. + * + * stereoPin [out] + * This parameter returns the Stereo pin index. + * + * numScanLockPins [out] + * This parameter returns the HW capability of ScanLock pins. + * + * numFlipLockPins [out] + * This parameter returns the HW capability of FlipLock pins. + * + * numStereoPins [out] + * This parameter returns the HW capability of Stereo pins. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NVC370_CTRL_CMD_GET_LOCKPINS_CAPS (0xc3700201) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_RG_INTERFACE_ID << 8) | NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_GET_LOCKPINS_CAPS_FRAME_LOCK_PIN_NONE (0xffffffff) +#define NVC370_CTRL_GET_LOCKPINS_CAPS_RASTER_LOCK_PIN_NONE (0xffffffff) +#define NVC370_CTRL_GET_LOCKPINS_CAPS_FLIP_LOCK_PIN_NONE (0xffffffff) +#define NVC370_CTRL_GET_LOCKPINS_CAPS_STEREO_PIN_NONE (0xffffffff) +#define NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 pinset; + NvU32 frameLockPin; + NvU32 rasterLockPin; + NvU32 flipLockPin; + NvU32 stereoPin; + NvU32 numScanLockPins; + NvU32 numFlipLockPins; + NvU32 numStereoPins; +} NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS; + +/* + * NVC370_CTRL_CMD_SET_SWAPRDY_GPIO_WAR + * + * This command switches SWAP_READY_OUT GPIO between SW + * and HW control to WAR bug 200374184 + * + * bEnable [in]: + * This parameter indicates enable/disable external fliplock + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ + +#define NVC370_CTRL_CMD_SET_SWAPRDY_GPIO_WAR (0xc3700202) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_RG_INTERFACE_ID << 8) | NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvBool bEnable; +} NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS; + + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h new file mode 100644 index 0000000..d891974 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h @@ -0,0 +1,133 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370verif.finn +// + +#include "ctrl/ctrlc370/ctrlc370base.h" +/* C370 is partially derived from 5070 */ +#include "ctrl/ctrl5070/ctrl5070verif.h" + + + + +#define NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES (0xc3700601) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_VERIF_INTERFACE_ID << 8) | NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 headInstance; + NvU32 modesetValue; +} NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS; + +/* + * NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES + * + * This command sets modeswitch flags, prior to a modeset. The flags will be + * automatically cleared at the end of each modeset, so this function must be + * called for each modeset where overrides are desired. + * + * headInstance + * this field specifies the head for which modeswitch flags will be overridden + * + * modesetMaskValid + * this field specifies the maskset at which modeswitch flags will be overridden + * + * modesetValue + * this field specifies the override value + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES (0xc3700602) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_VERIF_INTERFACE_ID << 8) | NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 headInstance; + NvU32 modesetMaskValid; + NvU32 modesetValue; +} NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS; + +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_BLANK 0:0 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_BLANK_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_BLANK_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK 1:1 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_SHUTDOWN 2:2 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_SHUTDOWN_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_SHUTDOWN_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOSHUTDOWN 3:3 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOSHUTDOWN_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOSHUTDOWN_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_CHANGE_VPLL 4:4 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_CHANGE_VPLL_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_CHANGE_VPLL_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOCHANGE_VPLL 5:5 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOCHANGE_VPLL_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOCHANGE_VPLL_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_WAKEUP 6:6 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_WAKEUP_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_WAKEUP_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_SHUTDOWN 7:7 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_SHUTDOWN_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_SHUTDOWN_INVALID 0x00000000 + + +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_BLANK 0:0 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_BLANK_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_BLANK_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK 1:1 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_SHUTDOWN 2:2 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_SHUTDOWN_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_SHUTDOWN_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOSHUTDOWN 3:3 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOSHUTDOWN_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOSHUTDOWN_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_CHANGE_VPLL 4:4 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_CHANGE_VPLL_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_CHANGE_VPLL_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOCHANGE_VPLL 5:5 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOCHANGE_VPLL_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOCHANGE_VPLL_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_WAKEUP 6:6 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_WAKEUP_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_WAKEUP_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_SHUTDOWN 7:7 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_SHUTDOWN_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_SHUTDOWN_NO 0x00000000 + + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h new file mode 100644 index 0000000..2d4aea5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc372/ctrlc372base.finn +// + +#include "ctrl/ctrlxxxx.h" +/* NVC372_DISPLAY_SW control commands and parameters */ + +#define NVC372_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0XC372, NVC372_CTRL_##cat, idx) + +/* NVC372_DISPLAY_SW command categories (6 bits) */ +#define NVC372_CTRL_RESERVED (0x00) +#define NVC372_CTRL_CHNCTL (0x01) +#define NVC372_CTRL_VERIF (0x02) + +/* + * NVC372_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVC372_CTRL_CMD_NULL (0xc3720000) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + +// This struct must be the first member of all +// C372 control calls +typedef struct NVC372_CTRL_CMD_BASE_PARAMS { + NvU32 subdeviceIndex; +} NVC372_CTRL_CMD_BASE_PARAMS; + +/* _ctrlc372base_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h new file mode 100644 index 0000000..0ceda23 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h @@ -0,0 +1,680 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc372/ctrlc372chnc.finn +// + +#include "nvdisptypes.h" +#include "ctrl/ctrlc372/ctrlc372base.h" + +#define NVC372_CTRL_MAX_POSSIBLE_HEADS 8 +#define NVC372_CTRL_MAX_POSSIBLE_WINDOWS 32 + +#define NVC372_CTRL_CMD_IS_MODE_POSSIBLE (0xc3720101) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID << 8) | NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS_MESSAGE_ID" */ + +/* + * NVC372_CTRL_CMD_IS_MODE_POSSIBLE + * + * This command tells whether or not the specified display config is possible. + * A config might not be possible if the display requirements exceed the GPU + * capabilities. Display requirements will be higher with more display + * surfaces, higher resolutions, higher downscaling factors, etc. GPU + * capabilities are determined by clock frequencies, the width of data pipes, + * amount of mempool available, number of thread groups available, etc. + * + * Inputs: + * head.headIndex + * This is the hardware index number for the head. Only active heads + * should be included in the input structure. + * + * head.maxPixelClkKHz + * This parameter specifies the pixel scanout rate (in KHz). + * + * head.rasterSize + * This structure specifies the total width and height of the raster that + * is sent to the display. (The width and height are also referred to as + * HTotal and VTotal, respectively.) + * + * head.rasterBlankStart + * X specifies the pixel column where horizontal blanking begins; + * Y specifies the pixel row where vertical blanking begins. + * + * head.rasterBlankEnd + * X specifies the pixel column where horizontal blanking ends; + * Y specifies the pixel row where vertical blanking ends. + * + * head.rasterVertBlank2 + * X and Y specify the pixel column/row where horizontal/vertical blanking + * ends on the second field of every pair for an interlaced raster. This + * field is not used when the raster is progressive. + * + * head.control.masterLockMode + * head.control.masterLockPin + * head.control.slaveLockMode + * head.control.slaveLockPin + * Heads that are raster locked or frame locked together will have + * synchronized timing. For example, vblank will occur at the same time on + * all of the heads that are locked together. + * + * "LockMode" tells if a head is raster locked, frame locked, or not locked. + * + * "LockPin" tells which heads are in a group of locked heads. There + * should be one master per group, and all slave heads that are locked to + * that master should have the same slaveLockPin number as the master's + * masterLockPin number. + * + * Note: The LockModes and LockPins are used only if the min v-pstate is + * required (i.e., if NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE + * is set). + * + * head.maxDownscaleFactorH + * head.maxDownscaleFactorV + * maxDownscaleFactorH and maxDownscaleFactorV represent the maximum amount + * by which the the composited image can be reduced in size, horizontally + * and vertically, respectively, multiplied by 0x400. For example, if the + * scaler input width is 1024, and the scaler output width is 2048, the + * downscale factor would be 1024 / 2048 = 0.5, and multiplying by 0x400 + * would give 512. + * + * head.outputScalerVerticalTaps + * scalerVerticalTaps indicates the maximum number of vertical taps + * allowed in the output scaler. + * + * Note that there are no #defines for tap values; the parameter is simply + * the number of taps (e.g., "2" for 2 taps). + * + * head.bUpscalingAllowedV + * bUpscalingAllowed indicates whether or not the composited image can be + * increased in size, vertically. + * + * head.bOverfetchEnabled + * bOverfetchEnabled indicates whether or not the vertical overfetch is + * enabled in postcomp scaler. + * + * head.minFrameIdle.leadingRasterLines + * leadingRasterLines defines the number of lines between the start of the + * frame (vsync) and the start of the active region. This includes Vsync, + * Vertical Back Porch, and the top part of the overscan border. The + * minimum value is 2 because vsync and VBP must be at least 1 line each. + * + * head.minFrameIdle.trailingRasterLines + * trailingRasterLines defines the number of lines between the end of the + * active region and the end of the frame. This includes the bottom part + * of the overscan border and the Vertical Front Porch. + * + * head.lut + * This parameter specifies whether or not the output LUT is enabled, and + * the size of the LUT. The parameter should be an + * NVC372_CTRL_IMP_LUT_USAGE_xxx value. + * + * head.cursorSize32p + * This parameter specifies the width of the cursor, in units of 32 pixels. + * So, for example, "8" would mean 8 * 32 = 256, for a 256x256 cursor. Zero + * means the cursor is disabled. + * + * head.bEnableDsc + * bEnableDsc indicates whether or not DSC is enabled + * + * head.bYUV420Format + * This parameter indicates output format is YUV420. + * Refer to NVD_YUV420_Output_Functional_Description.docx for more details. + * + * head.bIs2Head1Or + * This parameter specifies if the head operates in 2Head1Or mode. + * Refer to NVD_2_Heads_Driving_1_OR_Functional_Description.docx for more details. + * + * head.bDisableMidFrameAndDWCFWatermark + * WAR for bug 200508242. + * In linux it is possible that there will be no fullscreen window visible + * for a head. For these cases we would not hit dwcf or midframe watermarks + * leading to fbflcn timing out waiting on ok_to_switch and forcing mclk + * switch. This could lead to underflows. So if that scenario is caught (by + * Display Driver) bDisableMidFrameAndDWCFWatermark will be set to true and + * IMP will exclude dwcf and midframe contribution from the "is mclk switch + * guaranteed" calculation for the bandwidth clients of that head. + * + * window.windowIndex + * This is the hardware index number for the window. Only active windows + * should be included in the input structure. + * + * window.owningHead + * This is the hardware index of the head that will receive the window's + * output. + * + * window.formatUsageBound + * This parameter is a bitmask of all possible non-rotated mode data + * formats (NVC372_CTRL_FORMAT_xxx values). + * + * window.rotatedFormatUsageBound + * This parameter is a bitmask of all possible rotated mode data formats + * (NVC372_CTRL_FORMAT_xxx values). + * + * window.maxPixelsFetchedPerLine + * This parameter defines the maximum number of pixels that may need to be + * fetched in a single line for this window. Often, this can be set to the + * viewportSizeIn.Width. But if the window is known to be clipped, such + * that an entire line will never be fetched, then this parameter can be + * set to the clipped size (to improve the chances of the mode being + * possible, or possible at a lower v-pstate). + * + * In some cases, the value of this parameter must be increased by a few + * pixels in order to account for scaling overfetch, input chroma overfetch + * (420/422->444), and/or chroma output low pass filter overfetch + * (444->422/420). This value is chip dependent; refer to the + * MaxPixelsFetchedPerLine parameter in nvdClass_01.mfs for the exact + * value. In no case does the maxPixelsFetchedPerLine value need to exceed + * the surface width. + * + * window.maxDownscaleFactorH + * window.maxDownscaleFactorV + * maxDownscaleFactorH and maxDownscaleFactorV represent the maximum amount + * by which the the window image can be reduced in size, horizontally and + * vertically, respectively, multiplied by + * NVC372_CTRL_SCALING_FACTOR_MULTIPLIER. For example, + * if the scaler input width is 1024, and the scaler output width is 2048, + * the downscale factor would be 1024 / 2048 = 0.5, and multiplying by + * NVC372_CTRL_SCALING_FACTOR_MULTIPLIER if 0x400 would give 512. + * + * window.inputScalerVerticalTaps + * scalerVerticalTaps indicates the maximum number of vertical taps + * allowed in the input scaler. + * + * Note that there are no #defines for tap values; the parameter is simply + * the number of taps (e.g., "2" for 2 taps). + * + * window.bUpscalingAllowedV + * bUpscalingAllowed indicates whether or not the composited image can be + * increased in size, vertically. + * + * window.bOverfetchEnabled + * bOverfetchEnabled indicates whether or not the vertical overfetch is + * enabled in precomp scaler. + * + * window.lut + * This parameter specifies whether or not the input LUT is enabled, and + * the size of the LUT. The parameter should be an + * NVC372_CTRL_IMP_LUT_USAGE_xxx value. + * + * window.tmoLut + * This parameter specifies whether or not the tmo LUT is enabled, and + * the size of the LUT. This lut is used for HDR. The parameter should be + * an NVC372_CTRL_IMP_LUT_USAGE_xxx value. + * + * numHeads + * This is the number of heads in the "head" array of the + * NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS struct. Only active heads should be + * included in the struct. + * + * numWindows + * This is the number of windows in the "window" array of the + * NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS struct. Only active windows should + * be included in the struct. + * + * options + * This parameter specifies a bitmask for options. + * + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN + * tells IMP to calculate worstCaseMargin and worstCaseDomain. + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE + * tells IMP to calculate and report the minimum v-pstate at which the + * mode is possible. + * + * bUseCachedPerfState + * Indicates that RM should use cached values for the fastest + * available perf level (v-pstate for PStates 3.0 or pstate for + * PStates 2.0) and dispclk. This feature allows the query call to + * execute faster, and is intended to be used, for example, during + * mode enumeration, when many IMP query calls are made in close + * succession, and perf conditions are not expected to change between + * query calls. When IMP has not been queried recently, it is + * recommended to NOT use cached values, in case perf conditions have + * changed and the cached values no longer reflect the current + * conditions. + * + * testMclkFreqKHz + * This is the mclk frequency specified by the client, in KHz. RM will + * use this value to compare with the minimum dramclk required by the + * given mode. The parameter will have value 0 if the client doesn't want + * IMP query to consider this. This input is valid only on Tegra and only + * for verification purposes on internal builds. + * For this input to work, client must set + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE in the + * "options" field. + * + * Outputs: + * bIsPossible + * This output tells if the specified mode can be supported. + * + * minImpVPState + * minImpVPState returns the minimum v-pstate at which the mode is possible + * (assuming bIsPossible is TRUE). This output is valid only on dGPU, and + * only if NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in + * the "options" field. + * + * If the minimum v-pstate is required for a multi-head config, then + * masterLockMode, masterLockPin, slaveLockMode, and slaveLockPin must all + * be initialized. + * minPState + * minPState returns the pstate value corresponding to minImpVPState. It + * is returned as the numeric value of the pstate (P0 -> 0, P1 -> 1, etc.). + * This output is valid only on dGPU, and only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set + * in the "options" field. + * + * Note that the pstate returned by minPstateForGlitchless is not + * necessarily sufficient to meet IMP requirements. The pstate corresponds + * to the vpstate returned by minImpVPState, and this vpstate represents + * clocks that are sufficient for IMP requirements, but the pstate + * typically covers a range of frequencies (depending on the clock), and it + * is possible that only part of the range is sufficient for IMP. + * + * minRequiredBandwidthKBPS + * minRequiredBandwidthKBPS returns the minimum bandwidth that must be + * allocated to display in order to make the mode possible (assuming + * bIsPossible is TRUE). This output is valid only on Tegra, and only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in the + * "options" field. + * + * floorBandwidthKBPS + * floorBandwidthKBPS returns the minimum mclk frequency that can support + * the mode, and allow glitchless mclk switch, multiplied by the width of + * the data pipe. (This is an approximation of the bandwidth that can be + * provided by the min required mclk frequency, ignoring overhead.) If the + * mode is possible, but glitchless mclk switch is not, floorBandwidthKBPS + * will be calculated based on the maximum possible mclk frequency. This + * output is valid only on Tegra, and only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in the + * "options" field. + * + * minRequiredHubclkKHz + * minRequiredHubclkKHz returns the minimum hubclk frequency that can + * support the mode. This output is valid only on Tegra, and only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in the + * "options" field. + * + * worstCaseMargin + * worstCaseMargin returns the ratio of available bandwidth to required + * bandwidth, multiplied by NV5070_CTRL_IMP_MARGIN_MULTIPLIER. Available + * bandwidth is calculated in the worst case bandwidth domain, i.e., the + * domain with the least available margin. Bandwidth domains include the + * IMP-relevant clock domains, and possibly other virtual bandwidth + * domains such as AWP. + * + * Note that IMP checks additional parameters besides the bandwidth margins + * but only the bandwidth margin is reported here, so it is possible for a + * mode to have a more restrictive domain that is not reflected in the + * reported margin result. + * + * This result is not guaranteed to be valid if the mode is not possible. + * + * Note also that the result is generally calculated for the highest + * v-pstate possible (usually P0). But if the _NEED_MIN_VPSTATE is + * specified, the result will be calculated for the min possible v-pstate + * (or the highest possible v-pstate, if the mode is not possible). + * + * The result is valid only if + * NV5070_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN is set in "options". + * + * dispClkKHz + * This is the dispclk frequency selected by IMP for this mode. For dGPU, + * it will be one of the fixed frequencies from the list of frequencies + * supported by the vbios. + * + * worstCaseDomain + * Returns a short text string naming the domain for the margin returned in + * "worstCaseMargin". See "worstCaseMargin" for more information. + * + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_GENERIC + */ +#define NVC372_CTRL_IMP_LUT_USAGE_NONE 0 +#define NVC372_CTRL_IMP_LUT_USAGE_257 1 +#define NVC372_CTRL_IMP_LUT_USAGE_1025 2 + +typedef struct NVC372_CTRL_IMP_HEAD { + NvU8 headIndex; + + NvU32 maxPixelClkKHz; + + struct { + NvU32 width; + NvU32 height; + } rasterSize; + + struct { + NvU32 X; + NvU32 Y; + } rasterBlankStart; + + struct { + NvU32 X; + NvU32 Y; + } rasterBlankEnd; + + struct { + NvU32 yStart; + NvU32 yEnd; + } rasterVertBlank2; + + struct { + NV_DISP_LOCK_MODE masterLockMode; + NV_DISP_LOCK_PIN masterLockPin; + NV_DISP_LOCK_MODE slaveLockMode; + NV_DISP_LOCK_PIN slaveLockPin; + } control; + + NvU32 maxDownscaleFactorH; + NvU32 maxDownscaleFactorV; + NvU8 outputScalerVerticalTaps; + NvBool bUpscalingAllowedV; + NvBool bOverfetchEnabled; + + struct { + NvU16 leadingRasterLines; + NvU16 trailingRasterLines; + } minFrameIdle; + + NvU8 lut; + NvU8 cursorSize32p; + + NvBool bEnableDsc; + + NvBool bYUV420Format; + + NvBool bIs2Head1Or; + + NvBool bDisableMidFrameAndDWCFWatermark; +} NVC372_CTRL_IMP_HEAD; +typedef struct NVC372_CTRL_IMP_HEAD *PNVC372_CTRL_IMP_HEAD; + +typedef struct NVC372_CTRL_IMP_WINDOW { + NvU32 windowIndex; + NvU32 owningHead; + NvU32 formatUsageBound; + NvU32 rotatedFormatUsageBound; + NvU32 maxPixelsFetchedPerLine; + NvU32 maxDownscaleFactorH; + NvU32 maxDownscaleFactorV; + NvU8 inputScalerVerticalTaps; + NvBool bUpscalingAllowedV; + NvBool bOverfetchEnabled; + NvU8 lut; + NvU8 tmoLut; +} NVC372_CTRL_IMP_WINDOW; +typedef struct NVC372_CTRL_IMP_WINDOW *PNVC372_CTRL_IMP_WINDOW; + +#define NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN (0x00000001) +#define NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE (0x00000002) + +#define NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS { + NVC372_CTRL_CMD_BASE_PARAMS base; + + NvU8 numHeads; + NvU8 numWindows; + + NVC372_CTRL_IMP_HEAD head[NVC372_CTRL_MAX_POSSIBLE_HEADS]; + + // C form: NVC372_CTRL_IMP_WINDOW window[NVC372_CTRL_MAX_POSSIBLE_WINDOWS]; + NVC372_CTRL_IMP_WINDOW window[NVC372_CTRL_MAX_POSSIBLE_WINDOWS]; + + NvU32 options; + + NvU32 testMclkFreqKHz; + + NvBool bIsPossible; + + NvU32 minImpVPState; + + NvU32 minPState; + + NvU32 minRequiredBandwidthKBPS; + + NvU32 floorBandwidthKBPS; + + NvU32 minRequiredHubclkKHz; + + NvU32 worstCaseMargin; + + NvU32 dispClkKHz; + + char worstCaseDomain[8]; + + NvBool bUseCachedPerfState; +} NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS; +typedef struct NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *PNVC372_CTRL_IS_MODE_POSSIBLE_PARAMS; + +/* valid format values */ +#define NVC372_CTRL_FORMAT_RGB_PACKED_1_BPP (0x00000001) +#define NVC372_CTRL_FORMAT_RGB_PACKED_2_BPP (0x00000002) +#define NVC372_CTRL_FORMAT_RGB_PACKED_4_BPP (0x00000004) +#define NVC372_CTRL_FORMAT_RGB_PACKED_8_BPP (0x00000008) +#define NVC372_CTRL_FORMAT_YUV_PACKED_422 (0x00000010) +#define NVC372_CTRL_FORMAT_YUV_PLANAR_420 (0x00000020) +#define NVC372_CTRL_FORMAT_YUV_PLANAR_444 (0x00000040) +#define NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_420 (0x00000080) +#define NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_422 (0x00000100) +#define NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_422R (0x00000200) +#define NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_444 (0x00000400) +#define NVC372_CTRL_FORMAT_EXT_YUV_PLANAR_420 (0x00000800) +#define NVC372_CTRL_FORMAT_EXT_YUV_PLANAR_444 (0x00001000) +#define NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_420 (0x00002000) +#define NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_422 (0x00004000) +#define NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_422R (0x00008000) +#define NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_444 (0x00010000) + +/* valid impResult values */ +#define NVC372_CTRL_IMP_MODE_POSSIBLE 0 +#define NVC372_CTRL_IMP_NOT_ENOUGH_MEMPOOL 1 +#define NVC372_CTRL_IMP_REQ_LIMIT_TOO_HIGH 2 +#define NVC372_CTRL_IMP_VBLANK_TOO_SMALL 3 +#define NVC372_CTRL_IMP_HUBCLK_TOO_LOW 4 +#define NVC372_CTRL_IMP_INSUFFICIENT_BANDWIDTH 5 +#define NVC372_CTRL_IMP_DISPCLK_TOO_LOW 6 +#define NVC372_CTRL_IMP_ELV_START_TOO_HIGH 7 +#define NVC372_CTRL_IMP_INSUFFICIENT_THREAD_GROUPS 8 +#define NVC372_CTRL_IMP_INVALID_PARAMETER 9 +#define NVC372_CTRL_IMP_UNRECOGNIZED_FORMAT 10 +#define NVC372_CTRL_IMP_UNSPECIFIED 11 + +/* + * The calculated margin is multiplied by a constant, so that it can be + * represented as an integer with reasonable precision. "0x400" was chosen + * because it is a power of two, which might allow some compilers/CPUs to + * simplify the calculation by doing a shift instead of a multiply/divide. + * (And 0x400 is 1024, which is close to 1000, so that may simplify visual + * interpretation of the raw margin value.) + */ +#define NVC372_CTRL_IMP_MARGIN_MULTIPLIER (0x00000400) + +/* scaling factor */ +#define NVC372_CTRL_SCALING_FACTOR_MULTIPLIER (0x00000400) + +#define NVC372_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD 2 +#define NVC372_CTRL_CMD_MAX_SORS 4 + +#define NVC372_CTRL_CMD_IS_MODE_POSSIBLE_OR_SETTINGS (0xc3720102) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID << 8) | NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS_MESSAGE_ID" */ + +/* + * NVC372_CTRL_CMD_IS_MODE_POSSIBLE_OR_SETTINGS + * + * This command tells us if output resource pixel clocks requested by client + * is possible or not. Note that this will not be used for displayport sor as + * it will be handled by displayport library. + * + * Inputs: + * numHeads + * This is the number of heads in the "head" array of the + * NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS struct. Only active heads + * should be included in the struct. + * + * head.headIndex + * This is the hardware index number for the head. Only an active head + * should be included in the input structure. + * + * head.maxPixelClkKHz + * This parameter specifies the pixel scanout rate (in KHz). + * + * head.displayId + * Array of displayId's associated with the head. This is limited by + * NVC372_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD. + * + * sor.ownerMask + * Consists of a mask of all heads that drive this sor. + * + * sor.protocol + * Defines the protocol of the sor in question. + * + * sor.pixelReplicateMode + * Defines which pixel replication mode is requested. This can be off + * or X2 or X4 mode. + * + * Outputs: + * bIsPossible + * This tells us that the requested pixel clock can be supported. + */ + + +#define NVC372_CTRL_IS_MODE_POSSIBLE_DISPLAY_ID_SKIP_IMP_OUTPUT_CHECK (0xAAAAAAAA) + +typedef struct NVC372_CTRL_IMP_OR_SETTINGS_HEAD { + NvU8 headIndex; + NvU32 maxPixelClkKhz; + + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP outputResourcePixelDepthBPP; + + NvU32 displayId[NVC372_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD]; +} NVC372_CTRL_IMP_OR_SETTINGS_HEAD; +typedef struct NVC372_CTRL_IMP_OR_SETTINGS_HEAD *PNVC372_CTRL_IMP_OR_SETTINGS_HEAD; + +#define NVC372_CTRL_CMD_SOR_OWNER_MASK_NONE (0x00000000) +#define NVC372_CTRL_CMD_SOR_OWNER_MASK_HEAD(i) (1 << i) + +#define NVC372_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_A (0x00000000) +#define NVC372_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_B (0x00000001) +#define NVC372_CTRL_CMD_SOR_PROTOCOL_DUAL_TMDS (0x00000002) +#define NVC372_CTRL_CMD_SOR_PROTOCOL_SUPPORTED (0xFFFFFFFF) + +#define NVC372_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC372_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC372_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_X4 (0x00000002) + +typedef struct NVC372_CTRL_IMP_OR_SETTINGS_SOR { + NvU32 ownerMask; + NvU32 protocol; + NvU32 pixelReplicateMode; +} NVC372_CTRL_IMP_OR_SETTINGS_SOR; +typedef struct NVC372_CTRL_IMP_OR_SETTINGS_SOR *PNVC372_CTRL_IMP_OR_SETTINGS_SOR; + +#define NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS { + NVC372_CTRL_CMD_BASE_PARAMS base; + + NvU32 numHeads; + + NVC372_CTRL_IMP_OR_SETTINGS_HEAD head[NVC372_CTRL_MAX_POSSIBLE_HEADS]; + + NVC372_CTRL_IMP_OR_SETTINGS_SOR sor[NVC372_CTRL_CMD_MAX_SORS]; + + NvBool bIsPossible; +} NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS; +typedef struct NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS *PNVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS; + +#define NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE (0xc3720103) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID << 8) | NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS_MESSAGE_ID" */ + +/* + * NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE + * + * This control call is used by clients to inform RM about video adaptive refresh rate enable/disable. + * Based on the state, RM will enable/disable supported low power features. + * + * Inputs: + * displayID + * displayId of panel on which video adaptive refresh rate is enabled/disabled. + * + * bEnable + * NV_TRUE to enable video adaptive refresh rate mode. + * NV_FALSE to disable video adaptive refresh rate mode. + * + * Outputs: + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS { + NvU32 displayID; + NvBool bEnable; +} NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS; +typedef struct NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS *PNVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS; + + +#define NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN (0xc3720104) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID << 8) | NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS_MESSAGE_ID" */ + +/* + * NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN + * + * This control call is used by clients to query the active viewport for the + * provided window precalculated at the beginning of each frame. + * + * Inputs: + * windowIndex + * Index of the window to be queried. Must be connected to an active head. + * + * Outputs: + * activeViewportPointIn + * X and Y coordinates of the active viewport on the provided window for + * the most recent frame. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if the window index is invalid + * NV_ERR_INVALID_STATE if the window index isn't connected to a head + * NV_ERR_NOT_SUPPORTED + */ +#define NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS { + NVC372_CTRL_CMD_BASE_PARAMS base; + + NvU32 windowIndex; + + struct { + NvU32 x; + NvU32 y; + } activeViewportPointIn; +} NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS; +typedef struct NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS *PNVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS; + +/* _ctrlc372chnc_h_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h new file mode 100644 index 0000000..7a5eaad --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2008,2013,2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlxxxx.finn +// + + + +#include "nvtypes.h" + +/* definitions shared by all CTRL interfaces */ + +/* Basic command format: +* cmd_class [31:16], +* cmd_reserved [15:15], +* cmd_reserved [14:14], +* cmd_category [13:8], +* cmd_index [7:0] +*/ + +#define NVXXXX_CTRL_CMD_CLASS 31:16 + +#define NVXXXX_CTRL_CMD_CATEGORY 13:8 +#define NVXXXX_CTRL_CMD_INDEX 7:0 + +/* don't use DRF_NUM - not always available */ +# define NVXXXX_CTRL_CMD(cls,cat,idx) \ + (((cls) << 16) | ((0) << 15) | ((0) << 14) \ + | ((cat) << 8) | ((idx) & 0xFF)) +/* + * NVXXXX_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * This command is valid for all classes. + * + * Possible status values returned are: + * NV_OK + */ +#define NVXXXX_CTRL_CMD_NULL (0x00000000) + +#define NVxxxx_CTRL_LEGACY_PRIVILEGED (0xC0) +#define NVxxxx_CTRL_LEGACY_NON_PRIVILEGED (0x80) diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/dpringbuffertypes.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/dpringbuffertypes.h new file mode 100644 index 0000000..b61abaf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/dpringbuffertypes.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef INCLUDED_DPRINGBUFFERTYPES_H +#define INCLUDED_DPRINGBUFFERTYPES_H + +#define MAX_MESSAGE_LEN 100 +#define MAX_RECORD_COUNT 15 + +typedef enum _DP_RECORD_TYPE +{ + ASSERT_HIT = 135, + LOG_CALL = 136, +} DP_RECORD_TYPE; + +typedef struct _DpAssertHitRecord +{ + NvU64 breakpointAddr; +} DpAssertHitRecord, *PDpAssertHitRecord; + +typedef struct _DpLogCallRecord +{ + char msg[MAX_MESSAGE_LEN]; + NvU64 addr; +} DpLogCallRecord, *PDpLogCallRecord; + +typedef union _DpLogRecord +{ + DpAssertHitRecord dpAssertHitRecord; + DpLogCallRecord dpLogCallRecord; +} DpLogRecord, *PDpLogRecord; + +typedef enum _DpLogQueryType +{ + DpLogQueryTypeAssert = 1, + DpLogQueryTypeCallLog = 2, +} DpLogQueryType, *PDpLogQueryType; + +#endif //INCLUDED_DPRINGBUFFERTYPES_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-hypervisor.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-hypervisor.h new file mode 100644 index 0000000..ddc6a91 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-hypervisor.h @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_HYPERVISOR_H_ +#define _NV_HYPERVISOR_H_ + +#include + +// Enums for supported hypervisor types. +// New hypervisor type should be added before OS_HYPERVISOR_CUSTOM_FORCED +typedef enum _HYPERVISOR_TYPE +{ + OS_HYPERVISOR_XEN = 0, + OS_HYPERVISOR_VMWARE, + OS_HYPERVISOR_HYPERV, + OS_HYPERVISOR_KVM, + OS_HYPERVISOR_PARALLELS, + OS_HYPERVISOR_CUSTOM_FORCED, + OS_HYPERVISOR_UNKNOWN +} HYPERVISOR_TYPE; + +#define CMD_VGPU_VFIO_WAKE_WAIT_QUEUE 0 +#define CMD_VGPU_VFIO_INJECT_INTERRUPT 1 +#define CMD_VGPU_VFIO_REGISTER_MDEV 2 +#define CMD_VGPU_VFIO_PRESENT 3 + +#define MAX_VF_COUNT_PER_GPU 64 + +typedef enum _VGPU_TYPE_INFO +{ + VGPU_TYPE_NAME = 0, + VGPU_TYPE_DESCRIPTION, + VGPU_TYPE_INSTANCES, +} VGPU_TYPE_INFO; + +typedef struct +{ + void *vgpuVfioRef; + void *waitQueue; + void *nv; + NvU32 *vgpuTypeIds; + NvU32 numVgpuTypes; + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU8 function; + NvBool is_virtfn; +} vgpu_vfio_info; + +typedef struct +{ + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU8 function; + NvBool isNvidiaAttached; + NvBool isMdevAttached; +} vgpu_vf_pci_info; + +typedef enum VGPU_CMD_PROCESS_VF_INFO_E +{ + NV_VGPU_SAVE_VF_INFO = 0, + NV_VGPU_REMOVE_VF_PCI_INFO = 1, + NV_VGPU_REMOVE_VF_MDEV_INFO = 2, + NV_VGPU_GET_VF_INFO = 3 +} VGPU_CMD_PROCESS_VF_INFO; + +typedef enum VGPU_DEVICE_STATE_E +{ + NV_VGPU_DEV_UNUSED = 0, + NV_VGPU_DEV_OPENED = 1, + NV_VGPU_DEV_IN_USE = 2 +} VGPU_DEVICE_STATE; + +typedef enum _VMBUS_CMD_TYPE +{ + VMBUS_CMD_TYPE_INVALID = 0, + VMBUS_CMD_TYPE_SETUP = 1, + VMBUS_CMD_TYPE_SENDPACKET = 2, + VMBUS_CMD_TYPE_CLEANUP = 3, +} VMBUS_CMD_TYPE; + +typedef struct +{ + NvU32 request_id; + NvU32 page_count; + NvU64 *pPfns; + void *buffer; + NvU32 bufferlen; +} vmbus_send_packet_cmd_params; + + +typedef struct +{ + NvU32 override_sint; + NvU8 *nv_guid; +} vmbus_setup_cmd_params; + +/* + * Function prototypes + */ + +HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void); + +#endif // _NV_HYPERVISOR_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h new file mode 100644 index 0000000..183f9b4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_KERNEL_INTERFACE_API_H +#define _NV_KERNEL_INTERFACE_API_H +/************************************************************************************************************** +* +* File: nv-kernel-interface-api.h +* +* Description: +* Defines the NV API related macros. +* +**************************************************************************************************************/ + +#if NVOS_IS_UNIX && NVCPU_IS_X86_64 && defined(__use_altstack__) +#define NV_API_CALL __attribute__((altstack(0))) +#else +#define NV_API_CALL +#endif + +#endif /* _NV_KERNEL_INTERFACE_API_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_stdarg.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_stdarg.h new file mode 100644 index 0000000..b23f7f7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_stdarg.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_STDARG_H_ +#define _NV_STDARG_H_ + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) + #include "conftest.h" + #if defined(NV_LINUX_STDARG_H_PRESENT) + #include + #else + #include + #endif +#else + #include +#endif + +#endif // _NV_STDARG_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_vgpu_types.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_vgpu_types.h new file mode 100644 index 0000000..fae9985 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_vgpu_types.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nv_vgpu_types.finn +// + + + + +/* XAPIGEN - this file is not suitable for (nor needed by) xapigen. */ +/* Rather than #ifdef out every such include in every sdk */ +/* file, punt here. */ +#include "nvtypes.h" + /* ! XAPIGEN */ + +#define VM_UUID_SIZE 16 +#define INVALID_VGPU_DEV_INST 0xFFFFFFFFU +#define MAX_VGPU_DEVICES_PER_VM 16U + +/* This enum represents the current state of guest dependent fields */ +typedef enum GUEST_VM_INFO_STATE { + GUEST_VM_INFO_STATE_UNINITIALIZED = 0, + GUEST_VM_INFO_STATE_INITIALIZED = 1, +} GUEST_VM_INFO_STATE; + +/* This enum represents types of VM identifiers */ +typedef enum VM_ID_TYPE { + VM_ID_DOMAIN_ID = 0, + VM_ID_UUID = 1, +} VM_ID_TYPE; + +/* This structure represents VM identifier */ +typedef union VM_ID { + NvU8 vmUuid[VM_UUID_SIZE]; + NV_DECLARE_ALIGNED(NvU64 vmId, 8); +} VM_ID; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvcfg_sdk.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvcfg_sdk.h new file mode 100644 index 0000000..4ce8d4b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvcfg_sdk.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_CFG_SDK_INCLUDED +#define NV_CFG_SDK_INCLUDED + + +#endif // NV_CFG_SDK_INCLUDED diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvdisptypes.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvdisptypes.h new file mode 100644 index 0000000..90d8ac0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvdisptypes.h @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /***************************************************************************\ +|* *| +|* NV Display Common Types *| +|* *| +|* defines the common display types. *| +|* *| + \***************************************************************************/ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvdisptypes.finn +// + + + + +#include "nvtypes.h" + + + +typedef enum NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP { + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_DEFAULT = 0, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 = 1, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 = 2, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 = 3, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 = 4, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 = 5, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 = 6, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 = 7, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 = 8, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 = 9, +} NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP; + + + +typedef NvU32 NV_DISP_LOCK_PIN; + +#define NV_DISP_LOCK_PIN_0 0x0 +#define NV_DISP_LOCK_PIN_1 0x1 +#define NV_DISP_LOCK_PIN_2 0x2 +#define NV_DISP_LOCK_PIN_3 0x3 +#define NV_DISP_LOCK_PIN_4 0x4 +#define NV_DISP_LOCK_PIN_5 0x5 +#define NV_DISP_LOCK_PIN_6 0x6 +#define NV_DISP_LOCK_PIN_7 0x7 +#define NV_DISP_LOCK_PIN_8 0x8 +#define NV_DISP_LOCK_PIN_9 0x9 +#define NV_DISP_LOCK_PIN_A 0xA +#define NV_DISP_LOCK_PIN_B 0xB +#define NV_DISP_LOCK_PIN_C 0xC +#define NV_DISP_LOCK_PIN_D 0xD +#define NV_DISP_LOCK_PIN_E 0xE +#define NV_DISP_LOCK_PIN_F 0xF + +// Value used solely for HW initialization +#define NV_DISP_LOCK_PIN_UNSPECIFIED 0x10 + + + +typedef NvU32 NV_DISP_LOCK_MODE; + +#define NV_DISP_LOCK_MODE_NO_LOCK 0x0 +#define NV_DISP_LOCK_MODE_FRAME_LOCK 0x1 +#define NV_DISP_LOCK_MODE_RASTER_LOCK 0x3 + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nverror.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nverror.h new file mode 100644 index 0000000..bce7bea --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nverror.h @@ -0,0 +1,281 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVERROR_H +#define NVERROR_H +/****************************************************************************** +* +* File: nverror.h +* +* Description: +* This file contains the error codes set when the error notifier +* is signaled. +* +******************************************************************************/ + +#define ROBUST_CHANNEL_GR_EXCEPTION (13) +#define ROBUST_CHANNEL_GR_ERROR_SW_NOTIFY (13) +#define ROBUST_CHANNEL_FAKE_ERROR (14) +#define ROBUST_CHANNEL_DISP_MISSED_NOTIFIER (19) +#define ROBUST_CHANNEL_MPEG_ERROR_SW_METHOD (20) +#define ROBUST_CHANNEL_ME_ERROR_SW_METHOD (21) +#define ROBUST_CHANNEL_VP_ERROR_SW_METHOD (22) +#define ROBUST_CHANNEL_RC_LOGGING_ENABLED (23) +#define ROBUST_CHANNEL_VP_ERROR (27) +#define ROBUST_CHANNEL_VP2_ERROR (28) +#define ROBUST_CHANNEL_BSP_ERROR (29) +#define ROBUST_CHANNEL_BAD_ADDR_ACCESS (30) +#define ROBUST_CHANNEL_FIFO_ERROR_MMU_ERR_FLT (31) +#define ROBUST_CHANNEL_PBDMA_ERROR (32) +#define ROBUST_CHANNEL_SEC_ERROR (33) +#define ROBUST_CHANNEL_MSVLD_ERROR (34) +#define ROBUST_CHANNEL_MSPDEC_ERROR (35) +#define ROBUST_CHANNEL_MSPPP_ERROR (36) +#define ROBUST_CHANNEL_CE0_ERROR (39) +#define ROBUST_CHANNEL_CE1_ERROR (40) +#define ROBUST_CHANNEL_CE2_ERROR (41) +#define ROBUST_CHANNEL_VIC_ERROR (42) +#define ROBUST_CHANNEL_RESETCHANNEL_VERIF_ERROR (43) +#define ROBUST_CHANNEL_GR_FAULT_DURING_CTXSW (44) +#define ROBUST_CHANNEL_PREEMPTIVE_REMOVAL (45) +#define ROBUST_CHANNEL_NVENC0_ERROR (47) +#define ROBUST_CHANNEL_GPU_ECC_DBE (48) +#define PMU_ERROR (59) +#define ROBUST_CHANNEL_SEC2_ERROR (60) +#define PMU_BREAKPOINT (61) +#define PMU_HALT_ERROR (62) +#define INFOROM_PAGE_RETIREMENT_EVENT (63) +#define INFOROM_PAGE_RETIREMENT_FAILURE (64) +#define INFOROM_DRAM_RETIREMENT_EVENT (63) +#define INFOROM_DRAM_RETIREMENT_FAILURE (64) +#define ROBUST_CHANNEL_NVENC1_ERROR (65) +#define ROBUST_CHANNEL_NVDEC0_ERROR (68) +#define ROBUST_CHANNEL_GR_CLASS_ERROR (69) +#define ROBUST_CHANNEL_CE3_ERROR (70) +#define ROBUST_CHANNEL_CE4_ERROR (71) +#define ROBUST_CHANNEL_CE5_ERROR (72) +#define ROBUST_CHANNEL_NVENC2_ERROR (73) +#define NVLINK_ERROR (74) +#define ROBUST_CHANNEL_CE6_ERROR (75) +#define ROBUST_CHANNEL_CE7_ERROR (76) +#define ROBUST_CHANNEL_CE8_ERROR (77) +#define VGPU_START_ERROR (78) +#define ROBUST_CHANNEL_GPU_HAS_FALLEN_OFF_THE_BUS (79) +#define PBDMA_PUSHBUFFER_CRC_MISMATCH (80) +#define ROBUST_CHANNEL_VGA_SUBSYSTEM_ERROR (81) +#define ROBUST_CHANNEL_NVJPG0_ERROR (82) +#define ROBUST_CHANNEL_NVDEC1_ERROR (83) +#define ROBUST_CHANNEL_NVDEC2_ERROR (84) +#define ROBUST_CHANNEL_CE9_ERROR (85) +#define ROBUST_CHANNEL_OFA0_ERROR (86) +#define NVTELEMETRY_DRIVER_REPORT (87) +#define ROBUST_CHANNEL_NVDEC3_ERROR (88) +#define ROBUST_CHANNEL_NVDEC4_ERROR (89) +#define LTC_ERROR (90) +#define RESERVED_XID (91) +#define EXCESSIVE_SBE_INTERRUPTS (92) +#define INFOROM_ERASE_LIMIT_EXCEEDED (93) +#define ROBUST_CHANNEL_CONTAINED_ERROR (94) +#define ROBUST_CHANNEL_UNCONTAINED_ERROR (95) +#define SEC_FAULT_ERROR (110) +#define GSP_RPC_TIMEOUT (119) +#define GSP_ERROR (120) +#define C2C_ERROR (121) +#define SPI_PMU_RPC_READ_FAIL (122) +#define SPI_PMU_RPC_WRITE_FAIL (123) +#define SPI_PMU_RPC_ERASE_FAIL (124) +#define INFOROM_FS_ERROR (125) +#define ROBUST_CHANNEL_LAST_ERROR (INFOROM_FS_ERROR) + + +// Indexed CE reference +#define ROBUST_CHANNEL_CE_ERROR(x) \ + (x < 3 ? ROBUST_CHANNEL_CE0_ERROR + (x) : \ + ((x < 6) ? (ROBUST_CHANNEL_CE3_ERROR + (x - 3)) : \ + ((x < 9) ? (ROBUST_CHANNEL_CE6_ERROR + (x - 6)) : \ + ROBUST_CHANNEL_CE9_ERROR))) + +#define ROBUST_CHANNEL_IS_CE_ERROR(x) \ + ((x == ROBUST_CHANNEL_CE0_ERROR) || (x == ROBUST_CHANNEL_CE1_ERROR) || \ + (x == ROBUST_CHANNEL_CE2_ERROR) || (x == ROBUST_CHANNEL_CE3_ERROR) || \ + (x == ROBUST_CHANNEL_CE4_ERROR) || (x == ROBUST_CHANNEL_CE5_ERROR) || \ + (x == ROBUST_CHANNEL_CE6_ERROR) || (x == ROBUST_CHANNEL_CE7_ERROR) || \ + (x == ROBUST_CHANNEL_CE8_ERROR) || (x == ROBUST_CHANNEL_CE9_ERROR)) + +#define ROBUST_CHANNEL_CE_ERROR_IDX(x) \ + (((x >= ROBUST_CHANNEL_CE0_ERROR) && (x <= ROBUST_CHANNEL_CE2_ERROR)) ? \ + (x - ROBUST_CHANNEL_CE0_ERROR) : \ + (((x >= ROBUST_CHANNEL_CE3_ERROR) && \ + (x <= ROBUST_CHANNEL_CE5_ERROR)) ? \ + (x - ROBUST_CHANNEL_CE3_ERROR) : \ + (((x >= ROBUST_CHANNEL_CE6_ERROR) && \ + (x <= ROBUST_CHANNEL_CE8_ERROR)) ? \ + (x - ROBUST_CHANNEL_CE6_ERROR) : \ + (x - ROBUST_CHANNEL_CE9_ERROR)))) + +// Indexed NVDEC reference +#define ROBUST_CHANNEL_NVDEC_ERROR(x) \ + ((x == 0) ? \ + (ROBUST_CHANNEL_NVDEC0_ERROR) : \ + (((x >= 1) && (x <= 2)) ? (ROBUST_CHANNEL_NVDEC1_ERROR + x - 1) : \ + (ROBUST_CHANNEL_NVDEC3_ERROR + x - 3))) + +#define ROBUST_CHANNEL_IS_NVDEC_ERROR(x) \ + ((x == ROBUST_CHANNEL_NVDEC0_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC1_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC2_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC3_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC4_ERROR)) + +#define ROBUST_CHANNEL_NVDEC_ERROR_IDX(x) \ + (((x == ROBUST_CHANNEL_NVDEC0_ERROR)) ? \ + (x - ROBUST_CHANNEL_NVDEC0_ERROR) : \ + (((x >= ROBUST_CHANNEL_NVDEC1_ERROR) && \ + (x <= ROBUST_CHANNEL_NVDEC2_ERROR)) ? \ + (x - ROBUST_CHANNEL_NVDEC1_ERROR + 1) : \ + (x - ROBUST_CHANNEL_NVDEC3_ERROR + 3))) + +// Indexed NVENC reference +#define ROBUST_CHANNEL_NVENC_ERROR(x) \ + ((x == 0) ? (ROBUST_CHANNEL_NVENC0_ERROR) : \ + ((x == 1) ? (ROBUST_CHANNEL_NVENC1_ERROR) : \ + (ROBUST_CHANNEL_NVENC2_ERROR))) + +#define ROBUST_CHANNEL_IS_NVENC_ERROR(x) \ + ((x == ROBUST_CHANNEL_NVENC0_ERROR) || \ + (x == ROBUST_CHANNEL_NVENC1_ERROR) || \ + (x == ROBUST_CHANNEL_NVENC2_ERROR)) + +#define ROBUST_CHANNEL_NVENC_ERROR_IDX(x) \ + (((x == ROBUST_CHANNEL_NVENC0_ERROR)) ? \ + (x - ROBUST_CHANNEL_NVENC0_ERROR) : \ + (((x == ROBUST_CHANNEL_NVENC1_ERROR)) ? \ + (x - ROBUST_CHANNEL_NVENC1_ERROR + 1) : \ + (x - ROBUST_CHANNEL_NVENC2_ERROR + 2))) + +// Error Levels +#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_INFO (0) +#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_NON_FATAL (1) +#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_FATAL (2) + +#define ROBUST_CHANNEL_ERROR_STR_PUBLIC_PUBLISHED \ + {"Unknown Error", \ + "DMA Engine Error (FIFO Error 1)", \ + "DMA Engine Error (FIFO Error 2)", \ + "DMA Engine Error (FIFO Error 3)", \ + "DMA Engine Error (FIFO Error 4)", \ + "DMA Engine Error (FIFO Error 5)", \ + "DMA Engine Error (FIFO Error 6)", \ + "DMA Engine Error (FIFO Error 7)", \ + "DMA Engine Error (FIFO Error 8)", \ + "Graphics Engine Error (GR Error 1)", \ + "Graphics Engine Error (GR Error 2)", \ + "Graphics Engine Error (GR Error 3)", \ + "Graphics Engine Error (GR Error 4)", \ + "Graphics Engine Error (GR Exception Error)",\ + "Fake Error", \ + "Display Engine Error (CRTC Error 1)", \ + "Display Engine Error (CRTC Error 2)", \ + "Display Engine Error (CRTC Error 3)", \ + "Bus Interface Error (BIF Error)", \ + "Client Reported Error", \ + "Video Engine Error (MPEG Error)", \ + "Video Engine Error (ME Error)", \ + "Video Engine Error (VP Error 1)", \ + "Error Reporting Enabled", \ + "Graphics Engine Error (GR Error 6)", \ + "Graphics Engine Error (GR Error 7)", \ + "DMA Engine Error (FIFO Error 9)", \ + "Video Engine Error (VP Error 2)", \ + "Video Engine Error (VP2 Error)", \ + "Video Engine Error (BSP Error)", \ + "Access Violation Error (MMU Error 1)", \ + "Access Violation Error (MMU Error 2)", \ + "DMA Engine Error (PBDMA Error)", \ + "Security Engine Error (SEC Error)", \ + "Video Engine Error (MSVLD Error)", \ + "Video Engine Error (MSPDEC Error)", \ + "Video Engine Error (MSPPP Error)", \ + "Graphics Engine Error (FECS Error 1)", \ + "Graphics Engine Error (FECS Error 2)", \ + "DMA Engine Error (CE Error 1)", \ + "DMA Engine Error (CE Error 2)", \ + "DMA Engine Error (CE Error 3)", \ + "Video Engine Error (VIC Error)", \ + "Verification Error", \ + "Access Violation Error (MMU Error 3)", \ + "Operating System Error (OS Error 1)", \ + "Operating System Error (OS Error 2)", \ + "Video Engine Error (MSENC/NVENC0 Error)",\ + "ECC Error (DBE Error)", \ + "Power State Locked", \ + "Power State Event (RC Error)", \ + "Power State Event (Stress Test Error)", \ + "Power State Event (Thermal Event 1)", \ + "Power State Event (Thermal Event 2)", \ + "Power State Event (Power Event)", \ + "Power State Event (Thermal Event 3)", \ + "Display Engine Error (EVO Error)", \ + "FB Interface Error (FBPA Error 1)", \ + "FB Interface Error (FBPA Error 2)", \ + "PMU error", \ + "SEC2 error", \ + "PMU Breakpoint (non-fatal)", \ + "PMU Halt Error", \ + "INFOROM Page Retirement Event", \ + "INFOROM Page Retirement Failure", \ + "Video Engine Error (NVENC1 Error)", \ + "Graphics Engine Error (FECS Error 3)", \ + "Graphics Engine Error (FECS Error 4)", \ + "Video Engine Error (NVDEC0 Error)", \ + "Graphics Engine Error (GR Class Error)",\ + "DMA Engine Error (CE Error 4)", \ + "DMA Engine Error (CE Error 5)", \ + "DMA Engine Error (CE Error 6)", \ + "Video Engine Error (NVENC2 Error)", \ + "NVLink Error", \ + "DMA Engine Error (CE Error 6)", \ + "DMA Engine Error (CE Error 7)", \ + "DMA Engine Error (CE Error 8)", \ + "vGPU device cannot be started", \ + "GPU has fallen off the bus", \ + "DMA Engine Error (Pushbuffer CRC mismatch)",\ + "VGA Subsystem Error", \ + "Video JPEG Engine Error (NVJPG Error)", \ + "Video Engine Error (NVDEC1 Error)", \ + "Video Engine Error (NVDEC2 Error)", \ + "DMA Engine Error (CE Error 9)", \ + "Video OFA Engine Error (OFA0 Error)", \ + "NvTelemetry Driver Reoprt", \ + "Video Engine Error (NVDEC3 Error)", \ + "Video Engine Error (NVDEC4 Error)", \ + "FB Interface Error (FBPA Error 3)", \ + "Reserved Xid", \ + "Excessive SBE interrupts", \ + "INFOROM Erase Limit Exceeded", \ + "Contained error", \ + "Uncontained error" + +#define ROBUST_CHANNEL_ERROR_STR_PUBLIC \ + ROBUST_CHANNEL_ERROR_STR_PUBLIC_PUBLISHED} + +#endif // NVERROR_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvfixedtypes.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvfixedtypes.h new file mode 100644 index 0000000..53b6882 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvfixedtypes.h @@ -0,0 +1,379 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVFIXEDTYPES_INCLUDED +#define NVFIXEDTYPES_INCLUDED + +#include "nvtypes.h" + +/*! + * Fixed-point master data types. + * + * These are master-types represent the total number of bits contained within + * the FXP type. All FXP types below should be based on one of these master + * types. + */ +typedef NvS16 NvSFXP16; +typedef NvS32 NvSFXP32; +typedef NvS64 NvSFXP64; +typedef NvU16 NvUFXP16; +typedef NvU32 NvUFXP32; +typedef NvU64 NvUFXP64; + + +/*! + * Fixed-point data types. + * + * These are all integer types with precision indicated in the naming of the + * form: NvFXP_. The actual + * size of the data type is calculated as num_bits_above_radix + + * num_bit_below_radix. + * + * All of these FXP types should be based on one of the master types above. + */ +typedef NvSFXP16 NvSFXP11_5; +typedef NvSFXP16 NvSFXP4_12; +typedef NvSFXP16 NvSFXP8_8; +typedef NvSFXP32 NvSFXP8_24; +typedef NvSFXP32 NvSFXP10_22; +typedef NvSFXP32 NvSFXP16_16; +typedef NvSFXP32 NvSFXP18_14; +typedef NvSFXP32 NvSFXP20_12; +typedef NvSFXP32 NvSFXP24_8; +typedef NvSFXP32 NvSFXP27_5; +typedef NvSFXP32 NvSFXP28_4; +typedef NvSFXP32 NvSFXP29_3; +typedef NvSFXP32 NvSFXP31_1; +typedef NvSFXP64 NvSFXP52_12; + +typedef NvUFXP16 NvUFXP0_16; +typedef NvUFXP16 NvUFXP4_12; +typedef NvUFXP16 NvUFXP8_8; +typedef NvUFXP32 NvUFXP3_29; +typedef NvUFXP32 NvUFXP4_28; +typedef NvUFXP32 NvUFXP7_25; +typedef NvUFXP32 NvUFXP8_24; +typedef NvUFXP32 NvUFXP9_23; +typedef NvUFXP32 NvUFXP10_22; +typedef NvUFXP32 NvUFXP15_17; +typedef NvUFXP32 NvUFXP16_16; +typedef NvUFXP32 NvUFXP18_14; +typedef NvUFXP32 NvUFXP20_12; +typedef NvUFXP32 NvUFXP24_8; +typedef NvUFXP32 NvUFXP25_7; +typedef NvUFXP32 NvUFXP26_6; +typedef NvUFXP32 NvUFXP28_4; + +typedef NvUFXP64 NvUFXP40_24; +typedef NvUFXP64 NvUFXP48_16; +typedef NvUFXP64 NvUFXP52_12; + +/*! + * Utility macros used in converting between signed integers and fixed-point + * notation. + * + * - COMMON - These are used by both signed and unsigned. + */ +#define NV_TYPES_FXP_INTEGER(x, y) ((x)+(y)-1):(y) +#define NV_TYPES_FXP_FRACTIONAL(x, y) ((y)-1):0 +#define NV_TYPES_FXP_FRACTIONAL_MSB(x, y) ((y)-1):((y)-1) +#define NV_TYPES_FXP_FRACTIONAL_MSB_ONE 0x00000001 +#define NV_TYPES_FXP_FRACTIONAL_MSB_ZERO 0x00000000 +#define NV_TYPES_FXP_ZERO (0) + +/*! + * - UNSIGNED - These are only used for unsigned. + */ +#define NV_TYPES_UFXP_INTEGER_MAX(x, y) (~(NVBIT((y))-1U)) +#define NV_TYPES_UFXP_INTEGER_MIN(x, y) (0U) + +/*! + * - SIGNED - These are only used for signed. + */ +#define NV_TYPES_SFXP_INTEGER_SIGN(x, y) ((x)+(y)-1):((x)+(y)-1) +#define NV_TYPES_SFXP_INTEGER_SIGN_NEGATIVE 0x00000001 +#define NV_TYPES_SFXP_INTEGER_SIGN_POSITIVE 0x00000000 +#define NV_TYPES_SFXP_S32_SIGN_EXTENSION(x, y) 31:(x) +#define NV_TYPES_SFXP_S32_SIGN_EXTENSION_POSITIVE(x, y) 0x00000000 +#define NV_TYPES_SFXP_S32_SIGN_EXTENSION_NEGATIVE(x, y) (NVBIT(32-(x))-1U) +#define NV_TYPES_SFXP_INTEGER_MAX(x, y) (NVBIT((x))-1U) +#define NV_TYPES_SFXP_INTEGER_MIN(x, y) (~(NVBIT((x))-1U)) +#define NV_TYPES_SFXP_S64_SIGN_EXTENSION(x, y) 63:(x) +#define NV_TYPES_SFXP_S64_SIGN_EXTENSION_POSITIVE(x, y) 0x0000000000000000 +#define NV_TYPES_SFXP_S64_SIGN_EXTENSION_NEGATIVE(x, y) (NVBIT64(64-(x))-1U) +#define NV_TYPES_SFXP_S64_INTEGER_MAX(x, y) (NVBIT64((x)-1)-1U) +#define NV_TYPES_SFXP_S64_INTEGER_MIN(x, y) (~(NVBIT64((x)-1)-1U)) + +/*! + * Conversion macros used for converting between integer and fixed point + * representations. Both signed and unsigned variants. + * + * Warning: + * Note that most of the macros below can overflow if applied on values that can + * not fit the destination type. It's caller responsibility to ensure that such + * situations will not occur. + * + * Some conversions perform some commonly performed tasks other than just + * bit-shifting: + * + * - _SCALED: + * For integer -> fixed-point we add handling divisors to represent + * non-integer values. + * + * - _ROUNDED: + * For fixed-point -> integer we add rounding to integer values. + */ + +// 32-bit Unsigned FXP: +#define NV_TYPES_U32_TO_UFXP_X_Y(x, y, integer) \ + ((NvUFXP##x##_##y) (((NvU32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y))))) + +#define NV_TYPES_U32_TO_UFXP_X_Y_SCALED(x, y, integer, scale) \ + ((NvUFXP##x##_##y) ((((((NvU32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y))))) / \ + (scale)) + \ + ((((((NvU32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y)))) % \ + (scale)) > ((scale) >> 1)) ? 1U : 0U))) + +#define NV_TYPES_UFXP_X_Y_TO_U32(x, y, fxp) \ + ((NvU32) (DRF_VAL(_TYPES, _FXP, _INTEGER((x), (y)), \ + ((NvUFXP##x##_##y) (fxp))))) + +#define NV_TYPES_UFXP_X_Y_TO_U32_ROUNDED(x, y, fxp) \ + (NV_TYPES_UFXP_X_Y_TO_U32(x, y, (fxp)) + \ + (FLD_TEST_DRF_NUM(_TYPES, _FXP, _FRACTIONAL_MSB((x), (y)), \ + NV_TYPES_FXP_FRACTIONAL_MSB_ONE, ((NvUFXP##x##_##y) (fxp))) ? \ + 1U : 0U)) + +// 64-bit Unsigned FXP +#define NV_TYPES_U64_TO_UFXP_X_Y(x, y, integer) \ + ((NvUFXP##x##_##y) (((NvU64) (integer)) << \ + DRF_SHIFT64(NV_TYPES_FXP_INTEGER((x), (y))))) + +#define NV_TYPES_U64_TO_UFXP_X_Y_SCALED(x, y, integer, scale) \ + ((NvUFXP##x##_##y) (((((NvU64) (integer)) << \ + DRF_SHIFT64(NV_TYPES_FXP_INTEGER((x), (y)))) + \ + ((scale) >> 1)) / \ + (scale))) + +#define NV_TYPES_UFXP_X_Y_TO_U64(x, y, fxp) \ + ((NvU64) (DRF_VAL64(_TYPES, _FXP, _INTEGER((x), (y)), \ + ((NvUFXP##x##_##y) (fxp))))) + +#define NV_TYPES_UFXP_X_Y_TO_U64_ROUNDED(x, y, fxp) \ + (NV_TYPES_UFXP_X_Y_TO_U64(x, y, (fxp)) + \ + (FLD_TEST_DRF_NUM64(_TYPES, _FXP, _FRACTIONAL_MSB((x), (y)), \ + NV_TYPES_FXP_FRACTIONAL_MSB_ONE, ((NvUFXP##x##_##y) (fxp))) ? \ + 1U : 0U)) + +// +// 32-bit Signed FXP: +// Some compilers do not support left shift negative values +// so typecast integer to NvU32 instead of NvS32 +// +// Note that there is an issue with the rounding in +// NV_TYPES_S32_TO_SFXP_X_Y_SCALED. In particular, when the signs of the +// numerator and denominator don't match, the rounding is done towards positive +// infinity, rather than away from 0. This will need to be fixed in a follow-up +// change. +// +#define NV_TYPES_S32_TO_SFXP_X_Y(x, y, integer) \ + ((NvSFXP##x##_##y) (((NvU32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y))))) + +#define NV_TYPES_S32_TO_SFXP_X_Y_SCALED(x, y, integer, scale) \ + ((NvSFXP##x##_##y) (((((NvS32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y)))) + \ + ((scale) >> 1)) / \ + (scale))) + +#define NV_TYPES_SFXP_X_Y_TO_S32(x, y, fxp) \ + ((NvS32) ((DRF_VAL(_TYPES, _FXP, _INTEGER((x), (y)), \ + ((NvSFXP##x##_##y) (fxp)))) | \ + ((DRF_VAL(_TYPES, _SFXP, _INTEGER_SIGN((x), (y)), (fxp)) == \ + NV_TYPES_SFXP_INTEGER_SIGN_NEGATIVE) ? \ + DRF_NUM(_TYPES, _SFXP, _S32_SIGN_EXTENSION((x), (y)), \ + NV_TYPES_SFXP_S32_SIGN_EXTENSION_NEGATIVE((x), (y))) : \ + DRF_NUM(_TYPES, _SFXP, _S32_SIGN_EXTENSION((x), (y)), \ + NV_TYPES_SFXP_S32_SIGN_EXTENSION_POSITIVE((x), (y)))))) + +/*! + * Note: The rounding action for signed numbers should ideally round away from + * 0 in both the positive and the negative regions. + * For positive numbers, we add 1 if the fractional MSb is 1. + * For negative numbers, we add -1 (equivalent to subtracting 1) if the + * fractional MSb is 1. + */ +#define NV_TYPES_SFXP_X_Y_TO_S32_ROUNDED(x, y, fxp) \ + (NV_TYPES_SFXP_X_Y_TO_S32(x, y, (fxp)) + \ + (FLD_TEST_DRF_NUM(_TYPES, _FXP, _FRACTIONAL_MSB((x), (y)), \ + NV_TYPES_FXP_FRACTIONAL_MSB_ONE, ((NvSFXP##x##_##y) (fxp))) ? \ + ((DRF_VAL(_TYPES, _SFXP, _INTEGER_SIGN((x), (y)), (fxp)) == \ + NV_TYPES_SFXP_INTEGER_SIGN_POSITIVE) ? 1 : -1) : 0)) + +#define NV_TYPES_SFXP_X_Y_TO_FLOAT32(x, y, fxp) \ + ((NvF32) NV_TYPES_SFXP_X_Y_TO_S32(x, y, (fxp)) + \ + ((NvF32) DRF_NUM(_TYPES, _FXP, _FRACTIONAL((x), (y)), \ + ((NvSFXP##x##_##y) (fxp))) / (NvF32) (1 << (y)))) + +// +// 64-bit Signed FXP: +// Some compilers do not support left shift negative values +// so typecast integer to NvU64 instead of NvS64 +// +// Note that there is an issue with the rounding in +// NV_TYPES_S64_TO_SFXP_X_Y_SCALED. In particular, when the signs of the +// numerator and denominator don't match, the rounding is done towards positive +// infinity, rather than away from 0. This will need to be fixed in a follow-up +// change. +// +#define NV_TYPES_S64_TO_SFXP_X_Y(x, y, integer) \ + ((NvSFXP##x##_##y) (((NvU64) (integer)) << \ + DRF_SHIFT64(NV_TYPES_FXP_INTEGER((x), (y))))) + +#define NV_TYPES_S64_TO_SFXP_X_Y_SCALED(x, y, integer, scale) \ + ((NvSFXP##x##_##y) (((((NvS64) (integer)) << \ + DRF_SHIFT64(NV_TYPES_FXP_INTEGER((x), (y)))) + \ + ((scale) >> 1)) / \ + (scale))) + +#define NV_TYPES_SFXP_X_Y_TO_S64(x, y, fxp) \ + ((NvS64) ((DRF_VAL64(_TYPES, _FXP, _INTEGER((x), (y)), \ + ((NvSFXP##x##_##y) (fxp)))) | \ + ((DRF_VAL64(_TYPES, _SFXP, _INTEGER_SIGN((x), (y)), (fxp)) == \ + NV_TYPES_SFXP_INTEGER_SIGN_NEGATIVE) ? \ + DRF_NUM64(_TYPES, _SFXP, _S64_SIGN_EXTENSION((x), (y)), \ + NV_TYPES_SFXP_S64_SIGN_EXTENSION_NEGATIVE((x), (y))) : \ + DRF_NUM64(_TYPES, _SFXP, _S64_SIGN_EXTENSION((x), (y)), \ + NV_TYPES_SFXP_S64_SIGN_EXTENSION_POSITIVE((x), (y)))))) + +/*! + * Note: The rounding action for signed numbers should ideally round away from + * 0 in both the positive and the negative regions. + * For positive numbers, we add 1 if the fractional MSb is 1. + * For negative numbers, we add -1 (equivalent to subtracting 1) if the + * fractional MSb is 1. + */ +#define NV_TYPES_SFXP_X_Y_TO_S64_ROUNDED(x, y, fxp) \ + (NV_TYPES_SFXP_X_Y_TO_S64(x, y, (fxp)) + \ + (FLD_TEST_DRF_NUM64(_TYPES, _FXP, _FRACTIONAL_MSB((x), (y)), \ + NV_TYPES_FXP_FRACTIONAL_MSB_ONE, ((NvSFXP##x##_##y) (fxp))) ? \ + ((DRF_VAL64(_TYPES, _SFXP, _INTEGER_SIGN((x), (y)), (fxp)) == \ + NV_TYPES_SFXP_INTEGER_SIGN_POSITIVE) ? 1 : -1) : 0)) + +/*! + * Macros representing the single-precision IEEE 754 floating point format for + * "binary32", also known as "single" and "float". + * + * Single precision floating point format wiki [1] + * + * _SIGN + * Single bit representing the sign of the number. + * _EXPONENT + * Unsigned 8-bit number representing the exponent value by which to scale + * the mantissa. + * _BIAS - The value by which to offset the exponent to account for sign. + * _MANTISSA + * Explicit 23-bit significand of the value. When exponent != 0, this is an + * implicitly 24-bit number with a leading 1 prepended. This 24-bit number + * can be conceptualized as FXP 9.23. + * + * With these definitions, the value of a floating point number can be + * calculated as: + * (-1)^(_SIGN) * + * 2^(_EXPONENT - _EXPONENT_BIAS) * + * (1 + _MANTISSA / (1 << 23)) + */ +// [1] : http://en.wikipedia.org/wiki/Single_precision_floating-point_format +#define NV_TYPES_SINGLE_SIGN 31:31 +#define NV_TYPES_SINGLE_SIGN_POSITIVE 0x00000000 +#define NV_TYPES_SINGLE_SIGN_NEGATIVE 0x00000001 +#define NV_TYPES_SINGLE_EXPONENT 30:23 +#define NV_TYPES_SINGLE_EXPONENT_ZERO 0x00000000 +#define NV_TYPES_SINGLE_EXPONENT_BIAS 0x0000007F +#define NV_TYPES_SINGLE_MANTISSA 22:0 + + +/*! + * Helper macro to return a IEEE 754 single-precision value's mantissa as an + * unsigned FXP 9.23 value. + * + * @param[in] single IEEE 754 single-precision value to manipulate. + * + * @return IEEE 754 single-precision values mantissa represented as an unsigned + * FXP 9.23 value. + */ +#define NV_TYPES_SINGLE_MANTISSA_TO_UFXP9_23(single) \ + ((NvUFXP9_23)(FLD_TEST_DRF(_TYPES, _SINGLE, _EXPONENT, _ZERO, single) ? \ + NV_TYPES_U32_TO_UFXP_X_Y(9, 23, 0) : \ + (NV_TYPES_U32_TO_UFXP_X_Y(9, 23, 1) + \ + DRF_VAL(_TYPES, _SINGLE, _MANTISSA, single)))) + +/*! + * Helper macro to return an IEEE 754 single-precision value's exponent, + * including the bias. + * + * @param[in] single IEEE 754 single-precision value to manipulate. + * + * @return Signed exponent value for IEEE 754 single-precision. + */ +#define NV_TYPES_SINGLE_EXPONENT_BIASED(single) \ + ((NvS32)(DRF_VAL(_TYPES, _SINGLE, _EXPONENT, single) - \ + NV_TYPES_SINGLE_EXPONENT_BIAS)) + +/*! + * NvTemp - temperature data type introduced to avoid bugs in conversion between + * various existing notations. + */ +typedef NvSFXP24_8 NvTemp; + +/*! + * Macros for NvType <-> Celsius temperature conversion. + */ +#define NV_TYPES_CELSIUS_TO_NV_TEMP(cel) \ + NV_TYPES_S32_TO_SFXP_X_Y(24,8,(cel)) +#define NV_TYPES_NV_TEMP_TO_CELSIUS_TRUNCED(nvt) \ + NV_TYPES_SFXP_X_Y_TO_S32(24,8,(nvt)) +#define NV_TYPES_NV_TEMP_TO_CELSIUS_ROUNDED(nvt) \ + NV_TYPES_SFXP_X_Y_TO_S32_ROUNDED(24,8,(nvt)) +#define NV_TYPES_NV_TEMP_TO_CELSIUS_FLOAT(nvt) \ + NV_TYPES_SFXP_X_Y_TO_FLOAT32(24,8,(nvt)) + +/*! + * Macro for NvType -> number of bits conversion + */ +#define NV_NBITS_IN_TYPE(type) (8 * sizeof(type)) + +/*! + * Macro to convert SFXP 11.5 to NvTemp. + */ +#define NV_TYPES_NVSFXP11_5_TO_NV_TEMP(x) ((NvTemp)(x) << 3) + +/*! + * Macro to convert UFXP11.5 Watts to NvU32 milli-Watts. + */ +#define NV_TYPES_NVUFXP11_5_WATTS_TO_NVU32_MILLI_WATTS(x) ((((NvU32)(x)) * ((NvU32)1000)) >> 5) + +#endif /* NVFIXEDTYPES_INCLUDED */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvgputypes.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvgputypes.h new file mode 100644 index 0000000..d018414 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvgputypes.h @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2006 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + /***************************************************************************\ +|* *| +|* NV GPU Types *| +|* *| +|* This header contains definitions describing NVIDIA's GPU hardware state. *| +|* *| + \***************************************************************************/ + + +#ifndef NVGPUTYPES_INCLUDED +#define NVGPUTYPES_INCLUDED +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + + /***************************************************************************\ +|* NvNotification *| + \***************************************************************************/ + +/***** NvNotification Structure *****/ +/* + * NV objects return information about method completion to clients via an + * array of notification structures in main memory. + * + * The client sets the status field to NV???_NOTIFICATION_STATUS_IN_PROGRESS. + * NV fills in the NvNotification[] data structure in the following order: + * timeStamp, otherInfo32, otherInfo16, and then status. + */ + +/* memory data structures */ +typedef volatile struct NvNotificationRec { + struct { /* 0000- */ + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvV32 info32; /* info returned depends on method 0008-000b*/ + NvV16 info16; /* info returned depends on method 000c-000d*/ + NvV16 status; /* user sets bit 15, NV sets status 000e-000f*/ +} NvNotification; + + /***************************************************************************\ +|* NvGpuSemaphore *| + \***************************************************************************/ + +/***** NvGpuSemaphore Structure *****/ +/* + * NvGpuSemaphore objects are used by the GPU to synchronize multiple + * command-streams. + * + * Please refer to class documentation for details regarding the content of + * the data[] field. + */ + +/* memory data structures */ +typedef volatile struct NvGpuSemaphoreRec { + NvV32 data[2]; /* Payload/Report data 0000-0007*/ + struct { /* 0008- */ + NvV32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 8- f*/ + } timeStamp; /* -000f*/ +} NvGpuSemaphore; + + /***************************************************************************\ +|* NvGetReport *| + \***************************************************************************/ + +/* + * NV objects, starting with Kelvin, return information such as pixel counts to + * the user via the NV*_GET_REPORT method. + * + * The client fills in the "zero" field to any nonzero value and waits until it + * becomes zero. NV fills in the timeStamp, value, and zero fields. + */ +typedef volatile struct NVGetReportRec { + struct { /* 0000- */ + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 value; /* info returned depends on method 0008-000b*/ + NvU32 zero; /* always written to zero 000c-000f*/ +} NvGetReport; + + /***************************************************************************\ +|* NvRcNotification *| + \***************************************************************************/ + +/* + * NV robust channel notification information is reported to clients via + * standard NV01_EVENT objects bound to instance of the NV*_CHANNEL_DMA and + * NV*_CHANNEL_GPFIFO objects. + */ +typedef struct NvRcNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 exceptLevel; /* exception level 000c-000f*/ + NvU32 exceptType; /* exception type 0010-0013*/ +} NvRcNotification; + + /***************************************************************************\ +|* NvSyncPointFence *| + \***************************************************************************/ + +/***** NvSyncPointFence Structure *****/ +/* + * NvSyncPointFence objects represent a syncpoint event. The syncPointID + * identifies the syncpoint register and the value is the value that the + * register will contain right after the event occurs. + * + * If syncPointID contains NV_INVALID_SYNCPOINT_ID then this is an invalid + * event. This is often used to indicate an event in the past (i.e. no need to + * wait). + * + * For more info on syncpoints refer to Mobile channel and syncpoint + * documentation. + */ +typedef struct NvSyncPointFenceRec { + NvU32 syncPointID; + NvU32 value; +} NvSyncPointFence; + +#define NV_INVALID_SYNCPOINT_ID ((NvU32)-1) + + /***************************************************************************\ +|* *| +|* 64 bit type definitions for use in interface structures. *| +|* *| + \***************************************************************************/ + +typedef NvU64 NvOffset; /* GPU address */ + +#define NvOffset_HI32(n) ((NvU32)(((NvU64)(n)) >> 32)) +#define NvOffset_LO32(n) ((NvU32)((NvU64)(n))) + +/* +* There are two types of GPU-UUIDs available: +* +* (1) a SHA-256 based 32 byte ID, formatted as a 64 character +* hexadecimal string as "GPU-%16x-%08x-%08x-%08x-%024x"; this is +* deprecated. +* +* (2) a SHA-1 based 16 byte ID, formatted as a 32 character +* hexadecimal string as "GPU-%08x-%04x-%04x-%04x-%012x" (the +* canonical format of a UUID); this is the default. +*/ +#define NV_GPU_UUID_SHA1_LEN (16) +#define NV_GPU_UUID_SHA256_LEN (32) +#define NV_GPU_UUID_LEN NV_GPU_UUID_SHA1_LEN + +#ifdef __cplusplus +}; +#endif + +#endif /* NVGPUTYPES_INCLUDED */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvi2c.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvi2c.h new file mode 100644 index 0000000..28c1ba5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvi2c.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_I2C_H_ +#define _NV_I2C_H_ + +#define NV_I2C_MSG_WR 0x0000 +#define NV_I2C_MSG_RD 0x0001 + +typedef struct nv_i2c_msg_s +{ + NvU16 addr; + NvU16 flags; + NvU16 len; + NvU8* buf; +} nv_i2c_msg_t; + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvimpshared.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvimpshared.h new file mode 100644 index 0000000..eb4dc72 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvimpshared.h @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************************************************************\ +* * +* Description: * +* Accommodates sharing of IMP-related structures between kernel interface * +* files and core RM. * +* * +\******************************************************************************/ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvimpshared.finn +// + + + + +// +// There are only a small number of discrete dramclk frequencies available on +// the system. This structure contains IMP-relevant information associated +// with a specific dramclk frequency. +// +typedef struct DRAM_CLK_INSTANCE { + NvU32 dram_clk_freq_khz; + + NvU32 mchub_clk_khz; + + NvU32 mc_clk_khz; + + NvU32 max_iso_bw_kbps; + + // + // switch_latency_ns is the maximum time required to switch the dramclk + // frequency to the frequency specified in dram_clk_freq_khz. + // + NvU32 switch_latency_ns; +} DRAM_CLK_INSTANCE; + +// +// This table is used to collect information from other modules that is needed +// for RM IMP calculations. (Used on Tegra only.) +// +#define TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_UNKNOWN 0U +#define TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR4 1U +#define TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR5 2U + +typedef struct TEGRA_IMP_IMPORT_DATA { + // + // max_iso_bw_kbps stores the maximum possible ISO bandwidth available to + // display, assuming display is the only active ISO client. (Note that ISO + // bandwidth will typically be allocated to multiple clients, so display + // will generally not have access to the maximum possible bandwidth.) + // + NvU32 max_iso_bw_kbps; + + NvU32 dram_type; + // On Orin, each dram channel is 16 bits wide. + NvU32 num_dram_channels; + + // + // dram_clk_instance stores entries for all possible dramclk frequencies, + // sorted by dramclk frequency in increasing order. + // + // "24" is expected to be larger than the actual number of required entries + // (which is provided by a BPMP API), but it can be increased if necessary. + // + // num_dram_clk_entries is filled in with the actual number of distinct + // dramclk entries. + // + NvU32 num_dram_clk_entries; + DRAM_CLK_INSTANCE dram_clk_instance[24]; +} TEGRA_IMP_IMPORT_DATA; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvlimits.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvlimits.h new file mode 100644 index 0000000..a896adf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvlimits.h @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvlimits.finn +// + + + + +/* + * This is the maximum number of GPUs supported in a single system. + */ +#define NV_MAX_DEVICES 32 + +/* + * This is the maximum number of subdevices within a single device. + */ +#define NV_MAX_SUBDEVICES 8 + +/* + * This is the maximum length of the process name string. + */ +#define NV_PROC_NAME_MAX_LENGTH 100U + +/* + * This is the maximum number of heads per GPU. + */ +#define NV_MAX_HEADS 4 diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvmisc.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvmisc.h new file mode 100644 index 0000000..210e237 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvmisc.h @@ -0,0 +1,915 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * nvmisc.h + */ +#ifndef __NV_MISC_H +#define __NV_MISC_H + +#ifdef __cplusplus +extern "C" { +#endif //__cplusplus + +#include "nvtypes.h" + +#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS) +// +// Miscellaneous macros useful for bit field manipulations +// +// STUPID HACK FOR CL 19434692. Will revert when fix CL is delivered bfm -> chips_a. +#ifndef BIT +#define BIT(b) (1U<<(b)) +#endif +#ifndef BIT32 +#define BIT32(b) ((NvU32)1U<<(b)) +#endif +#ifndef BIT64 +#define BIT64(b) ((NvU64)1U<<(b)) +#endif + +#endif + +// +// It is recommended to use the following bit macros to avoid macro name +// collisions with other src code bases. +// +#ifndef NVBIT +#define NVBIT(b) (1U<<(b)) +#endif +#ifndef NVBIT_TYPE +#define NVBIT_TYPE(b, t) (((t)1U)<<(b)) +#endif +#ifndef NVBIT32 +#define NVBIT32(b) NVBIT_TYPE(b, NvU32) +#endif +#ifndef NVBIT64 +#define NVBIT64(b) NVBIT_TYPE(b, NvU64) +#endif + +// Helper macro's for 32 bit bitmasks +#define NV_BITMASK32_ELEMENT_SIZE (sizeof(NvU32) << 3) +#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5) +#define NV_BITMASK32_OFFSET(chId) ((chId) & (0x1F)) +#define NV_BITMASK32_SET(pChannelMask, chId) \ + (pChannelMask)[NV_BITMASK32_IDX(chId)] |= NVBIT(NV_BITMASK32_OFFSET(chId)) +#define NV_BITMASK32_GET(pChannelMask, chId) \ + ((pChannelMask)[NV_BITMASK32_IDX(chId)] & NVBIT(NV_BITMASK32_OFFSET(chId))) + + +// Index of the 'on' bit (assuming that there is only one). +// Even if multiple bits are 'on', result is in range of 0-31. +#define BIT_IDX_32(n) \ + (((((n) & 0xFFFF0000U) != 0U) ? 0x10U: 0U) | \ + ((((n) & 0xFF00FF00U) != 0U) ? 0x08U: 0U) | \ + ((((n) & 0xF0F0F0F0U) != 0U) ? 0x04U: 0U) | \ + ((((n) & 0xCCCCCCCCU) != 0U) ? 0x02U: 0U) | \ + ((((n) & 0xAAAAAAAAU) != 0U) ? 0x01U: 0U) ) + +// Index of the 'on' bit (assuming that there is only one). +// Even if multiple bits are 'on', result is in range of 0-63. +#define BIT_IDX_64(n) \ + (((((n) & 0xFFFFFFFF00000000ULL) != 0U) ? 0x20U: 0U) | \ + ((((n) & 0xFFFF0000FFFF0000ULL) != 0U) ? 0x10U: 0U) | \ + ((((n) & 0xFF00FF00FF00FF00ULL) != 0U) ? 0x08U: 0U) | \ + ((((n) & 0xF0F0F0F0F0F0F0F0ULL) != 0U) ? 0x04U: 0U) | \ + ((((n) & 0xCCCCCCCCCCCCCCCCULL) != 0U) ? 0x02U: 0U) | \ + ((((n) & 0xAAAAAAAAAAAAAAAAULL) != 0U) ? 0x01U: 0U) ) + +/*! + * DRF MACRO README: + * + * Glossary: + * DRF: Device, Register, Field + * FLD: Field + * REF: Reference + * + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA 0xDEADBEEF + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_GAMMA 27:0 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA 31:28 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ZERO 0x00000000 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ONE 0x00000001 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_TWO 0x00000002 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_THREE 0x00000003 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FOUR 0x00000004 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FIVE 0x00000005 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SIX 0x00000006 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SEVEN 0x00000007 + * + * + * Device = _DEVICE_OMEGA + * This is the common "base" that a group of registers in a manual share + * + * Register = _REGISTER_ALPHA + * Register for a given block of defines is the common root for one or more fields and constants + * + * Field(s) = _FIELD_GAMMA, _FIELD_ZETA + * These are the bit ranges for a given field within the register + * Fields are not required to have defined constant values (enumerations) + * + * Constant(s) = _ZERO, _ONE, _TWO, ... + * These are named values (enums) a field can contain; the width of the constants should not be larger than the field width + * + * MACROS: + * + * DRF_SHIFT: + * Bit index of the lower bound of a field + * DRF_SHIFT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 28 + * + * DRF_SHIFT_RT: + * Bit index of the higher bound of a field + * DRF_SHIFT_RT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 31 + * + * DRF_MASK: + * Produces a mask of 1-s equal to the width of a field + * DRF_MASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF (four 1s starting at bit 0) + * + * DRF_SHIFTMASK: + * Produces a mask of 1s equal to the width of a field at the location of the field + * DRF_SHIFTMASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF0000000 + * + * DRF_DEF: + * Shifts a field constant's value to the correct field offset + * DRF_DEF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE) == 0x30000000 + * + * DRF_NUM: + * Shifts a number to the location of a particular field + * DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 3) == 0x30000000 + * NOTE: If the value passed in is wider than the field, the value's high bits will be truncated + * + * DRF_SIZE: + * Provides the width of the field in bits + * DRF_SIZE(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 4 + * + * DRF_VAL: + * Provides the value of an input within the field specified + * DRF_VAL(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xABCD1234) == 0xA + * This is sort of like the inverse of DRF_NUM + * + * DRF_IDX...: + * These macros are similar to the above but for fields that accept an index argumment + * + * FLD_SET_DRF: + * Set the field bits in a given value with the given field constant + * NvU32 x = 0x00001234; + * x = FLD_SET_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, x); + * x == 0x30001234; + * + * FLD_SET_DRF_NUM: + * Same as FLD_SET_DRF but instead of using a field constant a literal/variable is passed in + * NvU32 x = 0x00001234; + * x = FLD_SET_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xF, x); + * x == 0xF0001234; + * + * FLD_IDX...: + * These macros are similar to the above but for fields that accept an index argumment + * + * FLD_TEST_DRF: + * Test if location specified by drf in 'v' has the same value as NV_drfc + * FLD_TEST_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE + * + * FLD_TEST_DRF_NUM: + * Test if locations specified by drf in 'v' have the same value as n + * FLD_TEST_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0x3, 0x3000ABCD) == NV_TRUE + * + * REF_DEF: + * Like DRF_DEF but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_DEF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE) == 0x30000000 + * + * REF_VAL: + * Like DRF_VAL but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_VAL(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xABCD1234) == 0xA + * + * REF_NUM: + * Like DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xA) == 0xA00000000 + * + * FLD_SET_REF_NUM: + * Like FLD_SET_DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * NvU32 x = 0x00001234; + * x = FLD_SET_REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xF, x); + * x == 0xF0001234; + * + * FLD_TEST_REF: + * Like FLD_TEST_DRF but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * FLD_TEST_REF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE + * + * Other macros: + * There a plethora of other macros below that extend the above (notably Multi-Word (MW), 64-bit, and some + * reg read/write variations). I hope these are self explanatory. If you have a need to use them, you + * probably have some knowledge of how they work. + */ + +// tegra mobile uses nvmisc_macros.h and can't access nvmisc.h... and sometimes both get included. +#ifndef _NVMISC_MACROS_H +// Use Coverity Annotation to mark issues as false positives/ignore when using single bit defines. +#define DRF_ISBIT(bitval,drf) \ + ( /* coverity[identical_branches] */ \ + (bitval != 0) ? drf ) +#define DEVICE_BASE(d) (0?d) // what's up with this name? totally non-parallel to the macros below +#define DEVICE_EXTENT(d) (1?d) // what's up with this name? totally non-parallel to the macros below +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +#ifdef MISRA_14_3 +#define DRF_BASE(drf) (drf##_LOW_FIELD) +#define DRF_EXTENT(drf) (drf##_HIGH_FIELD) +#define DRF_SHIFT(drf) ((drf##_LOW_FIELD) % 32U) +#define DRF_SHIFT_RT(drf) ((drf##_HIGH_FIELD) % 32U) +#define DRF_MASK(drf) (0xFFFFFFFFU >> (31U - ((drf##_HIGH_FIELD) % 32U) + ((drf##_LOW_FIELD) % 32U))) +#else +#define DRF_BASE(drf) (NV_FALSE?drf) // much better +#define DRF_EXTENT(drf) (NV_TRUE?drf) // much better +#define DRF_SHIFT(drf) (((NvU32)DRF_BASE(drf)) % 32U) +#define DRF_SHIFT_RT(drf) (((NvU32)DRF_EXTENT(drf)) % 32U) +#define DRF_MASK(drf) (0xFFFFFFFFU>>(31U - DRF_SHIFT_RT(drf) + DRF_SHIFT(drf))) +#endif +#define DRF_DEF(d,r,f,c) (((NvU32)(NV ## d ## r ## f ## c))<>(31-((DRF_ISBIT(1,drf)) % 32)+((DRF_ISBIT(0,drf)) % 32))) +#define DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<>DRF_SHIFT(NV ## d ## r ## f))&DRF_MASK(NV ## d ## r ## f)) +#endif + +// Signed version of DRF_VAL, which takes care of extending sign bit. +#define DRF_VAL_SIGNED(d,r,f,v) (((DRF_VAL(d,r,f,(v)) ^ (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U)))) - (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U))) +#define DRF_IDX_DEF(d,r,f,i,c) ((NV ## d ## r ## f ## c)<>DRF_SHIFT(NV##d##r##f(i)))&DRF_MASK(NV##d##r##f(i))) +#define DRF_IDX_OFFSET_VAL(d,r,f,i,o,v) (((v)>>DRF_SHIFT(NV##d##r##f(i,o)))&DRF_MASK(NV##d##r##f(i,o))) +// Fractional version of DRF_VAL which reads Fx.y fixed point number (x.y)*z +#define DRF_VAL_FRAC(d,r,x,y,v,z) ((DRF_VAL(d,r,x,(v))*z) + ((DRF_VAL(d,r,y,v)*z) / (1<>(63-((DRF_ISBIT(1,drf)) % 64)+((DRF_ISBIT(0,drf)) % 64))) +#define DRF_SHIFTMASK64(drf) (DRF_MASK64(drf)<<(DRF_SHIFT64(drf))) + +#define DRF_DEF64(d,r,f,c) (((NvU64)(NV ## d ## r ## f ## c))<>DRF_SHIFT64(NV ## d ## r ## f))&DRF_MASK64(NV ## d ## r ## f)) + +#define DRF_VAL_SIGNED64(d,r,f,v) (((DRF_VAL64(d,r,f,(v)) ^ (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1)))) - (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1))) +#define DRF_IDX_DEF64(d,r,f,i,c) (((NvU64)(NV ## d ## r ## f ## c))<>DRF_SHIFT64(NV##d##r##f(i)))&DRF_MASK64(NV##d##r##f(i))) +#define DRF_IDX_OFFSET_VAL64(d,r,f,i,o,v) (((NvU64)(v)>>DRF_SHIFT64(NV##d##r##f(i,o)))&DRF_MASK64(NV##d##r##f(i,o))) + +#define FLD_SET_DRF64(d,r,f,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c)) +#define FLD_SET_DRF_NUM64(d,r,f,n,v) ((((NvU64)(v)) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_NUM64(d,r,f,n)) +#define FLD_IDX_SET_DRF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c)) +#define FLD_IDX_OFFSET_SET_DRF64(d,r,f,i,o,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i,o))) | DRF_IDX_OFFSET_DEF64(d,r,f,i,o,c)) +#define FLD_IDX_SET_DRF_DEF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c)) +#define FLD_IDX_SET_DRF_NUM64(d,r,f,i,n,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_NUM64(d,r,f,i,n)) +#define FLD_SET_DRF_IDX64(d,r,f,c,i,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c(i))) + +#define FLD_TEST_DRF64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) == NV##d##r##f##c) +#define FLD_TEST_DRF_AND64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) & NV##d##r##f##c) +#define FLD_TEST_DRF_NUM64(d,r,f,n,v) (DRF_VAL64(d, r, f, (v)) == (n)) +#define FLD_IDX_TEST_DRF64(d,r,f,i,c,v) (DRF_IDX_VAL64(d, r, f, i, (v)) == NV##d##r##f##c) +#define FLD_IDX_OFFSET_TEST_DRF64(d,r,f,i,o,c,v) (DRF_IDX_OFFSET_VAL64(d, r, f, i, o, (v)) == NV##d##r##f##c) + +#define REF_DEF64(drf,d) (((drf ## d)&DRF_MASK64(drf))<>DRF_SHIFT64(drf))&DRF_MASK64(drf)) +#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3) +#define REF_NUM64(drf,n) (((NvU64)(n)&(0xFFFFFFFFFFFFFFFFU>>(63U-((drf##_HIGH_FIELD) % 63U)+((drf##_LOW_FIELD) % 63U)))) << ((drf##_LOW_FIELD) % 63U)) +#else +#define REF_NUM64(drf,n) (((NvU64)(n)&DRF_MASK64(drf))<>DRF_SHIFT(drf))&DRF_MASK(drf)) +#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3) +#define REF_NUM(drf,n) (((n)&(0xFFFFFFFFU>>(31U-((drf##_HIGH_FIELD) % 32U)+((drf##_LOW_FIELD) % 32U)))) << ((drf##_LOW_FIELD) % 32U)) +#else +#define REF_NUM(drf,n) (((n)&DRF_MASK(drf))<>DRF_SHIFT(CR ## d ## r ## f))&DRF_MASK(CR ## d ## r ## f)) + +// Multi-word (MW) field manipulations. For multi-word structures (e.g., Fermi SPH), +// fields may have bit numbers beyond 32. To avoid errors using "classic" multi-word macros, +// all the field extents are defined as "MW(X)". For example, MW(127:96) means +// the field is in bits 0-31 of word number 3 of the structure. +// +// DRF_VAL_MW() macro is meant to be used for native endian 32-bit aligned 32-bit word data, +// not for byte stream data. +// +// DRF_VAL_BS() macro is for byte stream data used in fbQueryBIOS_XXX(). +// +#define DRF_EXPAND_MW(drf) drf // used to turn "MW(a:b)" into "a:b" +#define DRF_PICK_MW(drf,v) ((v)? DRF_EXPAND_##drf) // picks low or high bits +#define DRF_WORD_MW(drf) (DRF_PICK_MW(drf,0)/32) // which word in a multi-word array +#define DRF_BASE_MW(drf) (DRF_PICK_MW(drf,0)%32) // which start bit in the selected word? +#define DRF_EXTENT_MW(drf) (DRF_PICK_MW(drf,1)%32) // which end bit in the selected word +#define DRF_SHIFT_MW(drf) (DRF_PICK_MW(drf,0)%32) +#define DRF_MASK_MW(drf) (0xFFFFFFFFU>>((31-(DRF_EXTENT_MW(drf))+(DRF_BASE_MW(drf)))%32)) +#define DRF_SHIFTMASK_MW(drf) ((DRF_MASK_MW(drf))<<(DRF_SHIFT_MW(drf))) +#define DRF_SIZE_MW(drf) (DRF_EXTENT_MW(drf)-DRF_BASE_MW(drf)+1) + +#define DRF_DEF_MW(d,r,f,c) ((NV##d##r##f##c) << DRF_SHIFT_MW(NV##d##r##f)) +#define DRF_NUM_MW(d,r,f,n) (((n)&DRF_MASK_MW(NV##d##r##f))<>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f)) +#define DRF_SPANS(drf) ((DRF_PICK_MW(drf,0)/32) != (DRF_PICK_MW(drf,1)/32)) +#define DRF_WORD_MW_LOW(drf) (DRF_PICK_MW(drf,0)/32) +#define DRF_WORD_MW_HIGH(drf) (DRF_PICK_MW(drf,1)/32) +#define DRF_MASK_MW_LOW(drf) (0xFFFFFFFFU) +#define DRF_MASK_MW_HIGH(drf) (0xFFFFFFFFU>>(31-(DRF_EXTENT_MW(drf)))) +#define DRF_SHIFT_MW_LOW(drf) (DRF_PICK_MW(drf,0)%32) +#define DRF_SHIFT_MW_HIGH(drf) (0) +#define DRF_MERGE_SHIFT(drf) ((32-((DRF_PICK_MW(drf,0)%32)))%32) +#define DRF_VAL_MW_2WORD(d,r,f,v) (((((v)[DRF_WORD_MW_LOW(NV##d##r##f)])>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \ + (((((v)[DRF_WORD_MW_HIGH(NV##d##r##f)])>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f))) +#define DRF_VAL_MW(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_MW_2WORD(d,r,f,v) : DRF_VAL_MW_1WORD(d,r,f,v) ) + +#define DRF_IDX_DEF_MW(d,r,f,i,c) ((NV##d##r##f##c)<>DRF_SHIFT_MW(NV##d##r##f(i)))&DRF_MASK_MW(NV##d##r##f(i))) + +// +// Logically OR all DRF_DEF constants indexed from zero to s (semiinclusive). +// Caution: Target variable v must be pre-initialized. +// +#define FLD_IDX_OR_DRF_DEF(d,r,f,c,s,v) \ +do \ +{ NvU32 idx; \ + for (idx = 0; idx < (NV ## d ## r ## f ## s); ++idx)\ + { \ + v |= DRF_IDX_DEF(d,r,f,idx,c); \ + } \ +} while(0) + + +#define FLD_MERGE_MW(drf,n,v) (((v)[DRF_WORD_MW(drf)] & ~DRF_SHIFTMASK_MW(drf)) | n) +#define FLD_ASSIGN_MW(drf,n,v) ((v)[DRF_WORD_MW(drf)] = FLD_MERGE_MW(drf, n, v)) +#define FLD_IDX_MERGE_MW(drf,i,n,v) (((v)[DRF_WORD_MW(drf(i))] & ~DRF_SHIFTMASK_MW(drf(i))) | n) +#define FLD_IDX_ASSIGN_MW(drf,i,n,v) ((v)[DRF_WORD_MW(drf(i))] = FLD_MERGE_MW(drf(i), n, v)) + +#define FLD_SET_DRF_MW(d,r,f,c,v) FLD_MERGE_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v) +#define FLD_SET_DRF_NUM_MW(d,r,f,n,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_NUM_MW(d,r,f,n), v) +#define FLD_SET_DRF_DEF_MW(d,r,f,c,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v) +#define FLD_IDX_SET_DRF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v) +#define FLD_IDX_SET_DRF_DEF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v) +#define FLD_IDX_SET_DRF_NUM_MW(d,r,f,i,n,v) FLD_IDX_ASSIGN_MW(NV##d##r##f, i, DRF_IDX_NUM_MW(d,r,f,i,n), v) + +#define FLD_TEST_DRF_MW(d,r,f,c,v) ((DRF_VAL_MW(d, r, f, (v)) == NV##d##r##f##c)) +#define FLD_TEST_DRF_NUM_MW(d,r,f,n,v) ((DRF_VAL_MW(d, r, f, (v)) == n)) +#define FLD_IDX_TEST_DRF_MW(d,r,f,i,c,v) ((DRF_IDX_VAL_MW(d, r, f, i, (v)) == NV##d##r##f##c)) + +#define DRF_VAL_BS(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_BS_2WORD(d,r,f,(v)) : DRF_VAL_BS_1WORD(d,r,f,(v)) ) + +//------------------------------------------------------------------------// +// // +// Common defines for engine register reference wrappers // +// // +// New engine addressing can be created like: // +// \#define ENG_REG_PMC(o,d,r) NV##d##r // +// \#define ENG_IDX_REG_CE(o,d,i,r) CE_MAP(o,r,i) // +// // +// See FB_FBPA* for more examples // +//------------------------------------------------------------------------// + +#define ENG_RD_REG(g,o,d,r) GPU_REG_RD32(g, ENG_REG##d(o,d,r)) +#define ENG_WR_REG(g,o,d,r,v) GPU_REG_WR32(g, ENG_REG##d(o,d,r), (v)) +#define ENG_RD_DRF(g,o,d,r,f) ((GPU_REG_RD32(g, ENG_REG##d(o,d,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define ENG_WR_DRF_DEF(g,o,d,r,f,c) GPU_REG_WR32(g, ENG_REG##d(o,d,r),(GPU_REG_RD32(g,ENG_REG##d(o,d,r))&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define ENG_TEST_IDX_DRF_DEF(g,o,d,r,f,c,i) (ENG_RD_IDX_DRF(g, o, d, r, f, (i)) == NV##d##r##f##c) + +#define ENG_IDX_RD_REG(g,o,d,i,r) GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r)) +#define ENG_IDX_WR_REG(g,o,d,i,r,v) GPU_REG_WR32(g, ENG_IDX_REG##d(o,d,i,r), (v)) + +#define ENG_IDX_RD_DRF(g,o,d,i,r,f) ((GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +// +// DRF_READ_1WORD_BS() and DRF_READ_1WORD_BS_HIGH() do not read beyond the bytes that contain +// the requested value. Reading beyond the actual data causes a page fault panic when the +// immediately following page happened to be protected or not mapped. +// +#define DRF_VAL_BS_1WORD(d,r,f,v) ((DRF_READ_1WORD_BS(d,r,f,v)>>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f)) +#define DRF_VAL_BS_2WORD(d,r,f,v) (((DRF_READ_4BYTE_BS(NV##d##r##f,v)>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \ + (((DRF_READ_1WORD_BS_HIGH(d,r,f,v)>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f))) + +#define DRF_READ_1BYTE_BS(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4])) +#define DRF_READ_2BYTE_BS(drf,v) (DRF_READ_1BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+1])<<8)) +#define DRF_READ_3BYTE_BS(drf,v) (DRF_READ_2BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+2])<<16)) +#define DRF_READ_4BYTE_BS(drf,v) (DRF_READ_3BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+3])<<24)) + +#define DRF_READ_1BYTE_BS_HIGH(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4])) +#define DRF_READ_2BYTE_BS_HIGH(drf,v) (DRF_READ_1BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+1])<<8)) +#define DRF_READ_3BYTE_BS_HIGH(drf,v) (DRF_READ_2BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+2])<<16)) +#define DRF_READ_4BYTE_BS_HIGH(drf,v) (DRF_READ_3BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+3])<<24)) + +// Calculate 2^n - 1 and avoid shift counter overflow +// +// On Windows amd64, 64 << 64 => 1 +// +#define NV_TWO_N_MINUS_ONE(n) (((1ULL<<(n/2))<<((n+1)/2))-1) + +#define DRF_READ_1WORD_BS(d,r,f,v) \ + ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS(NV##d##r##f,(v)): \ + DRF_READ_4BYTE_BS(NV##d##r##f,(v))))) + +#define DRF_READ_1WORD_BS_HIGH(d,r,f,v) \ + ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS_HIGH(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS_HIGH(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS_HIGH(NV##d##r##f,(v)): \ + DRF_READ_4BYTE_BS_HIGH(NV##d##r##f,(v))))) + +#define LOWESTBIT(x) ( (x) & (((x) - 1U) ^ (x)) ) +// Destructive operation on n32 +#define HIGHESTBIT(n32) \ +{ \ + HIGHESTBITIDX_32(n32); \ + n32 = NVBIT(n32); \ +} +#define ONEBITSET(x) ( ((x) != 0U) && (((x) & ((x) - 1U)) == 0U) ) + +// Destructive operation on n32 +#define NUMSETBITS_32(n32) \ +{ \ + n32 = n32 - ((n32 >> 1) & 0x55555555); \ + n32 = (n32 & 0x33333333) + ((n32 >> 2) & 0x33333333); \ + n32 = (((n32 + (n32 >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; \ +} + +/*! + * Calculate number of bits set in a 32-bit unsigned integer. + * Pure typesafe alternative to @ref NUMSETBITS_32. + */ +static NV_FORCEINLINE NvU32 +nvPopCount32(const NvU32 x) +{ + NvU32 temp = x; + temp = temp - ((temp >> 1) & 0x55555555U); + temp = (temp & 0x33333333U) + ((temp >> 2) & 0x33333333U); + temp = (((temp + (temp >> 4)) & 0x0F0F0F0FU) * 0x01010101U) >> 24; + return temp; +} + +/*! + * Calculate number of bits set in a 64-bit unsigned integer. + */ +static NV_FORCEINLINE NvU32 +nvPopCount64(const NvU64 x) +{ + NvU64 temp = x; + temp = temp - ((temp >> 1) & 0x5555555555555555ULL); + temp = (temp & 0x3333333333333333ULL) + ((temp >> 2) & 0x3333333333333333ULL); + temp = (temp + (temp >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + temp = (temp * 0x0101010101010101ULL) >> 56; + return (NvU32)temp; +} + +/*! + * Determine how many bits are set below a bit index within a mask. + * This assigns a dense ordering to the set bits in the mask. + * + * For example the mask 0xCD contains 5 set bits: + * nvMaskPos32(0xCD, 0) == 0 + * nvMaskPos32(0xCD, 2) == 1 + * nvMaskPos32(0xCD, 3) == 2 + * nvMaskPos32(0xCD, 6) == 3 + * nvMaskPos32(0xCD, 7) == 4 + */ +static NV_FORCEINLINE NvU32 +nvMaskPos32(const NvU32 mask, const NvU32 bitIdx) +{ + return nvPopCount32(mask & (NVBIT32(bitIdx) - 1U)); +} + +// Destructive operation on n32 +#define LOWESTBITIDX_32(n32) \ +{ \ + n32 = BIT_IDX_32(LOWESTBIT(n32));\ +} + +// Destructive operation on n32 +#define HIGHESTBITIDX_32(n32) \ +{ \ + NvU32 count = 0; \ + while (n32 >>= 1) \ + { \ + count++; \ + } \ + n32 = count; \ +} + +// Destructive operation on n32 +#define ROUNDUP_POW2(n32) \ +{ \ + n32--; \ + n32 |= n32 >> 1; \ + n32 |= n32 >> 2; \ + n32 |= n32 >> 4; \ + n32 |= n32 >> 8; \ + n32 |= n32 >> 16; \ + n32++; \ +} + +/*! + * Round up a 32-bit unsigned integer to the next power of 2. + * Pure typesafe alternative to @ref ROUNDUP_POW2. + * + * param[in] x must be in range [0, 2^31] to avoid overflow. + */ +static NV_FORCEINLINE NvU32 +nvNextPow2_U32(const NvU32 x) +{ + NvU32 y = x; + y--; + y |= y >> 1; + y |= y >> 2; + y |= y >> 4; + y |= y >> 8; + y |= y >> 16; + y++; + return y; +} + + +static NV_FORCEINLINE NvU32 +nvPrevPow2_U32(const NvU32 x ) +{ + NvU32 y = x; + y |= (y >> 1); + y |= (y >> 2); + y |= (y >> 4); + y |= (y >> 8); + y |= (y >> 16); + return y - (y >> 1); +} + +static NV_FORCEINLINE NvU64 +nvPrevPow2_U64(const NvU64 x ) +{ + NvU64 y = x; + y |= (y >> 1); + y |= (y >> 2); + y |= (y >> 4); + y |= (y >> 8); + y |= (y >> 16); + y |= (y >> 32); + return y - (y >> 1); +} + +// Destructive operation on n64 +#define ROUNDUP_POW2_U64(n64) \ +{ \ + n64--; \ + n64 |= n64 >> 1; \ + n64 |= n64 >> 2; \ + n64 |= n64 >> 4; \ + n64 |= n64 >> 8; \ + n64 |= n64 >> 16; \ + n64 |= n64 >> 32; \ + n64++; \ +} + +#define NV_SWAP_U8(a,b) \ +{ \ + NvU8 temp; \ + temp = a; \ + a = b; \ + b = temp; \ +} + +#define NV_SWAP_U32(a,b) \ +{ \ + NvU32 temp; \ + temp = a; \ + a = b; \ + b = temp; \ +} + +/*! + * @brief Macros allowing simple iteration over bits set in a given mask. + * + * @param[in] maskWidth bit-width of the mask (allowed: 8, 16, 32, 64) + * + * @param[in,out] index lvalue that is used as a bit index in the loop + * (can be declared as any NvU* or NvS* variable) + * @param[in] mask expression, loop will iterate over set bits only + */ +#define FOR_EACH_INDEX_IN_MASK(maskWidth,index,mask) \ +{ \ + NvU##maskWidth lclMsk = (NvU##maskWidth)(mask); \ + for ((index) = 0U; lclMsk != 0U; (index)++, lclMsk >>= 1U)\ + { \ + if (((NvU##maskWidth)NVBIT64(0) & lclMsk) == 0U) \ + { \ + continue; \ + } +#define FOR_EACH_INDEX_IN_MASK_END \ + } \ +} + +// +// Size to use when declaring variable-sized arrays +// +#define NV_ANYSIZE_ARRAY 1 + +// +// Returns ceil(a/b) +// +#define NV_CEIL(a,b) (((a)+(b)-1)/(b)) + +// Clearer name for NV_CEIL +#ifndef NV_DIV_AND_CEIL +#define NV_DIV_AND_CEIL(a, b) NV_CEIL(a,b) +#endif + +#ifndef NV_MIN +#define NV_MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif + +#ifndef NV_MAX +#define NV_MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif + +// +// Returns absolute value of provided integer expression +// +#define NV_ABS(a) ((a)>=0?(a):(-(a))) + +// +// Returns 1 if input number is positive, 0 if 0 and -1 if negative. Avoid +// macro parameter as function call which will have side effects. +// +#define NV_SIGN(s) ((NvS8)(((s) > 0) - ((s) < 0))) + +// +// Returns 1 if input number is >= 0 or -1 otherwise. This assumes 0 has a +// positive sign. +// +#define NV_ZERO_SIGN(s) ((NvS8)((((s) >= 0) * 2) - 1)) + +// Returns the offset (in bytes) of 'member' in struct 'type'. +#ifndef NV_OFFSETOF + #if defined(__GNUC__) && (__GNUC__ > 3) + #define NV_OFFSETOF(type, member) ((NvU32)__builtin_offsetof(type, member)) + #else + #define NV_OFFSETOF(type, member) ((NvU32)(NvU64)&(((type *)0)->member)) // shouldn't we use PtrToUlong? But will need to include windows header. + #endif +#endif + +// +// Performs a rounded division of b into a (unsigned). For SIGNED version of +// NV_ROUNDED_DIV() macro check the comments in bug 769777. +// +#define NV_UNSIGNED_ROUNDED_DIV(a,b) (((a) + ((b) / 2U)) / (b)) + +/*! + * Performs a ceiling division of b into a (unsigned). A "ceiling" division is + * a division is one with rounds up result up if a % b != 0. + * + * @param[in] a Numerator + * @param[in] b Denominator + * + * @return a / b + a % b != 0 ? 1 : 0. + */ +#define NV_UNSIGNED_DIV_CEIL(a, b) (((a) + (b - 1)) / (b)) + +/*! + * Performs subtraction where a negative difference is raised to zero. + * Can be used to avoid underflowing an unsigned subtraction. + * + * @param[in] a Minuend + * @param[in] b Subtrahend + * + * @return a > b ? a - b : 0. + */ +#define NV_SUBTRACT_NO_UNDERFLOW(a, b) ((a)>(b) ? (a)-(b) : 0) + +/*! + * Performs a rounded right-shift of 32-bit unsigned value "a" by "shift" bits. + * Will round result away from zero. + * + * @param[in] a 32-bit unsigned value to shift. + * @param[in] shift Number of bits by which to shift. + * + * @return Resulting shifted value rounded away from zero. + */ +#define NV_RIGHT_SHIFT_ROUNDED(a, shift) \ + (((a) >> (shift)) + !!((NVBIT((shift) - 1) & (a)) == NVBIT((shift) - 1))) + +// +// Power of 2 alignment. +// (Will give unexpected results if 'gran' is not a power of 2.) +// +#ifndef NV_ALIGN_DOWN +// +// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type. +// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed. +// +#define NV_ALIGN_DOWN(v, gran) ((v) & ~((v) - (v) + (gran) - 1)) +#endif + +#ifndef NV_ALIGN_UP +// +// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type. +// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed. +// +#define NV_ALIGN_UP(v, gran) (((v) + ((gran) - 1)) & ~((v) - (v) + (gran) - 1)) +#endif + +#ifndef NV_ALIGN_DOWN64 +#define NV_ALIGN_DOWN64(v, gran) ((v) & ~(((NvU64)gran) - 1)) +#endif + +#ifndef NV_ALIGN_UP64 +#define NV_ALIGN_UP64(v, gran) (((v) + ((gran) - 1)) & ~(((NvU64)gran)-1)) +#endif + +#ifndef NV_IS_ALIGNED +#define NV_IS_ALIGNED(v, gran) (0U == ((v) & ((gran) - 1U))) +#endif + +#ifndef NV_IS_ALIGNED64 +#define NV_IS_ALIGNED64(v, gran) (0U == ((v) & (((NvU64)gran) - 1U))) +#endif + +#ifndef NVMISC_MEMSET +static NV_FORCEINLINE void *NVMISC_MEMSET(void *s, NvU8 c, NvLength n) +{ + NvU8 *b = (NvU8 *) s; + NvLength i; + + for (i = 0; i < n; i++) + { + b[i] = c; + } + + return s; +} +#endif + +#ifndef NVMISC_MEMCPY +static NV_FORCEINLINE void *NVMISC_MEMCPY(void *dest, const void *src, NvLength n) +{ + NvU8 *destByte = (NvU8 *) dest; + const NvU8 *srcByte = (const NvU8 *) src; + NvLength i; + + for (i = 0; i < n; i++) + { + destByte[i] = srcByte[i]; + } + + return dest; +} +#endif + +static NV_FORCEINLINE char *NVMISC_STRNCPY(char *dest, const char *src, NvLength n) +{ + NvLength i; + + for (i = 0; i < n; i++) + { + dest[i] = src[i]; + if (src[i] == '\0') + { + break; + } + } + + for (; i < n; i++) + { + dest[i] = '\0'; + } + + return dest; +} + +/*! + * Convert a void* to an NvUPtr. This is used when MISRA forbids us from doing a direct cast. + * + * @param[in] ptr Pointer to be converted + * + * @return Resulting NvUPtr + */ +static NV_FORCEINLINE NvUPtr NV_PTR_TO_NVUPTR(void *ptr) +{ + union + { + NvUPtr v; + void *p; + } uAddr; + + uAddr.p = ptr; + return uAddr.v; +} + +/*! + * Convert an NvUPtr to a void*. This is used when MISRA forbids us from doing a direct cast. + * + * @param[in] ptr Pointer to be converted + * + * @return Resulting void * + */ +static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address) +{ + union + { + NvUPtr v; + void *p; + } uAddr; + + uAddr.v = address; + return uAddr.p; +} + +#ifdef __cplusplus +} +#endif //__cplusplus + +#endif // __NV_MISC_H + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvos.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvos.h new file mode 100644 index 0000000..f825e6f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvos.h @@ -0,0 +1,3163 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /***************************************************************************\ +|* *| +|* NV Architecture Interface *| +|* *| +|* defines the Operating System function and ioctl interfaces to *| +|* NVIDIA's Unified Media Architecture (TM). *| +|* *| + \***************************************************************************/ + +#ifndef NVOS_INCLUDED +#define NVOS_INCLUDED +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvstatus.h" + +#include "nvgputypes.h" +#include "rs_access.h" + +/* local defines here */ +#define FILE_DEVICE_NV 0x00008000 +#define NV_IOCTL_FCT_BASE 0x00000800 + +// This is the maximum number of subdevices supported in an SLI +// configuration. +#define NVOS_MAX_SUBDEVICES 8 + +/* Define to indicate the use of Unified status codes - bug 200043705*/ +#define UNIFIED_NV_STATUS 1 + + /***************************************************************************\ +|* NV OS Functions *| + \***************************************************************************/ + +/* + Result codes for RM APIs, shared for all the APIs + + *** IMPORTANT *** + + Ensure that no NVOS_STATUS value has the highest bit set. That bit + is used to passthrough the NVOS_STATUS on code expecting an RM_STATUS. +*/ +#define NVOS_STATUS NV_STATUS + +#define NVOS_STATUS_SUCCESS NV_OK +#define NVOS_STATUS_ERROR_CARD_NOT_PRESENT NV_ERR_CARD_NOT_PRESENT +#define NVOS_STATUS_ERROR_DUAL_LINK_INUSE NV_ERR_DUAL_LINK_INUSE +#define NVOS_STATUS_ERROR_GENERIC NV_ERR_GENERIC +#define NVOS_STATUS_ERROR_GPU_NOT_FULL_POWER NV_ERR_GPU_NOT_FULL_POWER +#define NVOS_STATUS_ERROR_ILLEGAL_ACTION NV_ERR_ILLEGAL_ACTION +#define NVOS_STATUS_ERROR_IN_USE NV_ERR_STATE_IN_USE +#define NVOS_STATUS_ERROR_INSUFFICIENT_RESOURCES NV_ERR_INSUFFICIENT_RESOURCES +#define NVOS_STATUS_ERROR_INVALID_ACCESS_TYPE NV_ERR_INVALID_ACCESS_TYPE +#define NVOS_STATUS_ERROR_INVALID_ARGUMENT NV_ERR_INVALID_ARGUMENT +#define NVOS_STATUS_ERROR_INVALID_BASE NV_ERR_INVALID_BASE +#define NVOS_STATUS_ERROR_INVALID_CHANNEL NV_ERR_INVALID_CHANNEL +#define NVOS_STATUS_ERROR_INVALID_CLASS NV_ERR_INVALID_CLASS +#define NVOS_STATUS_ERROR_INVALID_CLIENT NV_ERR_INVALID_CLIENT +#define NVOS_STATUS_ERROR_INVALID_COMMAND NV_ERR_INVALID_COMMAND +#define NVOS_STATUS_ERROR_INVALID_DATA NV_ERR_INVALID_DATA +#define NVOS_STATUS_ERROR_INVALID_DEVICE NV_ERR_INVALID_DEVICE +#define NVOS_STATUS_ERROR_INVALID_DMA_SPECIFIER NV_ERR_INVALID_DMA_SPECIFIER +#define NVOS_STATUS_ERROR_INVALID_EVENT NV_ERR_INVALID_EVENT +#define NVOS_STATUS_ERROR_INVALID_FLAGS NV_ERR_INVALID_FLAGS +#define NVOS_STATUS_ERROR_INVALID_FUNCTION NV_ERR_INVALID_FUNCTION +#define NVOS_STATUS_ERROR_INVALID_HEAP NV_ERR_INVALID_HEAP +#define NVOS_STATUS_ERROR_INVALID_INDEX NV_ERR_INVALID_INDEX +#define NVOS_STATUS_ERROR_INVALID_LIMIT NV_ERR_INVALID_LIMIT +#define NVOS_STATUS_ERROR_INVALID_METHOD NV_ERR_INVALID_METHOD +#define NVOS_STATUS_ERROR_INVALID_OBJECT_BUFFER NV_ERR_BUFFER_TOO_SMALL +#define NVOS_STATUS_ERROR_INVALID_OBJECT_ERROR NV_ERR_INVALID_OBJECT +#define NVOS_STATUS_ERROR_INVALID_OBJECT_HANDLE NV_ERR_INVALID_OBJECT_HANDLE +#define NVOS_STATUS_ERROR_INVALID_OBJECT_NEW NV_ERR_INVALID_OBJECT_NEW +#define NVOS_STATUS_ERROR_INVALID_OBJECT_OLD NV_ERR_INVALID_OBJECT_OLD +#define NVOS_STATUS_ERROR_INVALID_OBJECT_PARENT NV_ERR_INVALID_OBJECT_PARENT +#define NVOS_STATUS_ERROR_INVALID_OFFSET NV_ERR_INVALID_OFFSET +#define NVOS_STATUS_ERROR_INVALID_OWNER NV_ERR_INVALID_OWNER +#define NVOS_STATUS_ERROR_INVALID_PARAM_STRUCT NV_ERR_INVALID_PARAM_STRUCT +#define NVOS_STATUS_ERROR_INVALID_PARAMETER NV_ERR_INVALID_PARAMETER +#define NVOS_STATUS_ERROR_INVALID_POINTER NV_ERR_INVALID_POINTER +#define NVOS_STATUS_ERROR_INVALID_REGISTRY_KEY NV_ERR_INVALID_REGISTRY_KEY +#define NVOS_STATUS_ERROR_INVALID_STATE NV_ERR_INVALID_STATE +#define NVOS_STATUS_ERROR_INVALID_STRING_LENGTH NV_ERR_INVALID_STRING_LENGTH +#define NVOS_STATUS_ERROR_INVALID_XLATE NV_ERR_INVALID_XLATE +#define NVOS_STATUS_ERROR_IRQ_NOT_FIRING NV_ERR_IRQ_NOT_FIRING +#define NVOS_STATUS_ERROR_MULTIPLE_MEMORY_TYPES NV_ERR_MULTIPLE_MEMORY_TYPES +#define NVOS_STATUS_ERROR_NOT_SUPPORTED NV_ERR_NOT_SUPPORTED +#define NVOS_STATUS_ERROR_OPERATING_SYSTEM NV_ERR_OPERATING_SYSTEM +#define NVOS_STATUS_ERROR_LIB_RM_VERSION_MISMATCH NV_ERR_LIB_RM_VERSION_MISMATCH +#define NVOS_STATUS_ERROR_PROTECTION_FAULT NV_ERR_PROTECTION_FAULT +#define NVOS_STATUS_ERROR_TIMEOUT NV_ERR_TIMEOUT +#define NVOS_STATUS_ERROR_TOO_MANY_PRIMARIES NV_ERR_TOO_MANY_PRIMARIES +#define NVOS_STATUS_ERROR_IRQ_EDGE_TRIGGERED NV_ERR_IRQ_EDGE_TRIGGERED +#define NVOS_STATUS_ERROR_INVALID_OPERATION NV_ERR_INVALID_OPERATION +#define NVOS_STATUS_ERROR_NOT_COMPATIBLE NV_ERR_NOT_COMPATIBLE +#define NVOS_STATUS_ERROR_MORE_PROCESSING_REQUIRED NV_WARN_MORE_PROCESSING_REQUIRED +#define NVOS_STATUS_ERROR_INSUFFICIENT_PERMISSIONS NV_ERR_INSUFFICIENT_PERMISSIONS +#define NVOS_STATUS_ERROR_TIMEOUT_RETRY NV_ERR_TIMEOUT_RETRY +#define NVOS_STATUS_ERROR_NOT_READY NV_ERR_NOT_READY +#define NVOS_STATUS_ERROR_GPU_IS_LOST NV_ERR_GPU_IS_LOST +#define NVOS_STATUS_ERROR_IN_FULLCHIP_RESET NV_ERR_GPU_IN_FULLCHIP_RESET +#define NVOS_STATUS_ERROR_INVALID_LOCK_STATE NV_ERR_INVALID_LOCK_STATE +#define NVOS_STATUS_ERROR_INVALID_ADDRESS NV_ERR_INVALID_ADDRESS +#define NVOS_STATUS_ERROR_INVALID_IRQ_LEVEL NV_ERR_INVALID_IRQ_LEVEL +#define NVOS_STATUS_ERROR_MEMORY_TRAINING_FAILED NV_ERR_MEMORY_TRAINING_FAILED +#define NVOS_STATUS_ERROR_BUSY_RETRY NV_ERR_BUSY_RETRY +#define NVOS_STATUS_ERROR_INSUFFICIENT_POWER NV_ERR_INSUFFICIENT_POWER +#define NVOS_STATUS_ERROR_OBJECT_NOT_FOUND NV_ERR_OBJECT_NOT_FOUND +#define NVOS_STATUS_ERROR_RESOURCE_LOST NV_ERR_RESOURCE_LOST +#define NVOS_STATUS_ERROR_BUFFER_TOO_SMALL NV_ERR_BUFFER_TOO_SMALL +#define NVOS_STATUS_ERROR_RESET_REQUIRED NV_ERR_RESET_REQUIRED +#define NVOS_STATUS_ERROR_INVALID_REQUEST NV_ERR_INVALID_REQUEST + +#define NVOS_STATUS_ERROR_PRIV_SEC_VIOLATION NV_ERR_PRIV_SEC_VIOLATION +#define NVOS_STATUS_ERROR_GPU_IN_DEBUG_MODE NV_ERR_GPU_IN_DEBUG_MODE + +/* + Note: + This version of the architecture has been changed to allow the + RM to return a client handle that will subsequently used to + identify the client. NvAllocRoot() returns the handle. All + other functions must specify this client handle. + +*/ +/* macro NV01_FREE */ +#define NV01_FREE (0x00000000) + +/* NT ioctl data structure */ +typedef struct +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +} NVOS00_PARAMETERS; + +/* valid hClass values. */ +#define NV01_ROOT (0x00000000) +// +// Redefining it here to maintain consistency with current code +// This is also defined in class cl0001.h +// +#define NV01_ROOT_NON_PRIV (0x00000001) + +// Deprecated, please use NV01_ROOT_CLIENT +#define NV01_ROOT_USER NV01_ROOT_CLIENT + +// +// This will eventually replace NV01_ROOT_USER in RM client code. Please use this +// RM client object type for any new RM client object allocations that are being +// added. +// +#define NV01_ROOT_CLIENT (0x00000041) + +/* macro NV01_ALLOC_MEMORY */ +#define NV01_ALLOC_MEMORY (0x00000002) + +/* parameter values */ +#define NVOS02_FLAGS_PHYSICALITY 7:4 +#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000) +#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001) +#define NVOS02_FLAGS_LOCATION 11:8 +#define NVOS02_FLAGS_LOCATION_PCI (0x00000000) +#define NVOS02_FLAGS_LOCATION_AGP (0x00000001) +#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002) +#define NVOS02_FLAGS_COHERENCY 15:12 +#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000) +#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001) +#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002) +#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003) +#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004) +#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005) +#define NVOS02_FLAGS_ALLOC 17:16 +#define NVOS02_FLAGS_ALLOC_NONE (0x00000001) +#define NVOS02_FLAGS_GPU_CACHEABLE 18:18 +#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000) +#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001) +// If requested, RM will create a kernel mapping of this memory. +// Default is no map. +#define NVOS02_FLAGS_KERNEL_MAPPING 19:19 +#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000) +#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001) +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20 +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001) + +// +// If the flag is set, the RM will only allow read-only CPU user mappings to the +// allocation. +// +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21 +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001) + +// +// If the flag is set, the RM will only allow read-only DMA mappings to the +// allocation. +// +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22 +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001) + +// +// If the flag is set, the IO memory allocation can be registered with the RM if +// the RM regkey peerMappingOverride is set or the client is privileged. +// +// See Bug 1630288 "[PeerSync] threat related to GPU.." for more details. +// +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23 +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000) +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001) + +// If the flag is set RM will assume the memory pages are of type syncpoint. +#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24 +#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001) + +// +// If _NO_MAP is requested, the RM in supported platforms will not map the +// allocated system or IO memory into user space. The client can later map +// memory through the RmMapMemory() interface. +// If _NEVER_MAP is requested, the RM will never map the allocated system or +// IO memory into user space +// +#define NVOS02_FLAGS_MAPPING 31:30 +#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000) +#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001) +#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002) + +// ------------------------------------------------------------------------------------- + +/* parameters */ +typedef struct +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvV32 flags; + NvP64 pMemory NV_ALIGN_BYTES(8); + NvU64 limit NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS02_PARAMETERS; + +/* parameter values */ +#define NVOS03_FLAGS_ACCESS 1:0 +#define NVOS03_FLAGS_ACCESS_READ_WRITE (0x00000000) +#define NVOS03_FLAGS_ACCESS_READ_ONLY (0x00000001) +#define NVOS03_FLAGS_ACCESS_WRITE_ONLY (0x00000002) + +#define NVOS03_FLAGS_PREALLOCATE 2:2 +#define NVOS03_FLAGS_PREALLOCATE_DISABLE (0x00000000) +#define NVOS03_FLAGS_PREALLOCATE_ENABLE (0x00000001) + +#define NVOS03_FLAGS_GPU_MAPPABLE 15:15 +#define NVOS03_FLAGS_GPU_MAPPABLE_DISABLE (0x00000000) +#define NVOS03_FLAGS_GPU_MAPPABLE_ENABLE (0x00000001) + +// ------------------------------------------------------------------------------------ +// This flag is required for a hack to be placed inside DD that allows it to +// access a dummy ctxdma as a block linear surface. Refer bug 1562766 for details. +// +// This flag is deprecated, use NVOS03_FLAGS_PTE_KIND. +// +#define NVOS03_FLAGS_PTE_KIND_BL_OVERRIDE 16:16 +#define NVOS03_FLAGS_PTE_KIND_BL_OVERRIDE_FALSE (0x00000000) +#define NVOS03_FLAGS_PTE_KIND_BL_OVERRIDE_TRUE (0x00000001) + +/* + * This field allows to specify the page kind. If the page kind + * is not specified then the page kind associated with the memory will be used. + * + * In tegra display driver stack, the page kind remains unknown at the time + * of memory allocation/import, the page kind can only be known when display + * driver client creates a framebuffer from allocated/imported memory. + * + * This field compatible with NVOS03_FLAGS_PTE_KIND_BL_OVERRIDE flag. + */ +#define NVOS03_FLAGS_PTE_KIND 17:16 +#define NVOS03_FLAGS_PTE_KIND_NONE (0x00000000) +#define NVOS03_FLAGS_PTE_KIND_BL (0x00000001) +#define NVOS03_FLAGS_PTE_KIND_PITCH (0x00000002) + +#define NVOS03_FLAGS_TYPE 23:20 +#define NVOS03_FLAGS_TYPE_NOTIFIER (0x00000001) + +/* + * This is an alias into the LSB of the TYPE field which + * actually indicates if a Kernel Mapping should be created. + * If the RM should have access to the memory then Enable this + * flag. + * + * Note that the NV_OS03_FLAGS_MAPPING is an alias to + * the LSB of the NV_OS03_FLAGS_TYPE. And in fact if + * type is NV_OS03_FLAGS_TYPE_NOTIFIER (bit 20 set) + * then it implicitly means that NV_OS03_FLAGS_MAPPING + * is _MAPPING_KERNEL. If the client wants to have a + * Kernel Mapping, it should use the _MAPPING_KERNEL + * flag set and the _TYPE_NOTIFIER should be used only + * with NOTIFIERS. + */ + +#define NVOS03_FLAGS_MAPPING 20:20 +#define NVOS03_FLAGS_MAPPING_NONE (0x00000000) +#define NVOS03_FLAGS_MAPPING_KERNEL (0x00000001) + +#define NVOS03_FLAGS_CACHE_SNOOP 28:28 +#define NVOS03_FLAGS_CACHE_SNOOP_ENABLE (0x00000000) +#define NVOS03_FLAGS_CACHE_SNOOP_DISABLE (0x00000001) + +// HASH_TABLE:ENABLE means that the context DMA is automatically bound into all +// channels in the client. This can lead to excessive hash table usage. +// HASH_TABLE:DISABLE means that the context DMA must be explicitly bound into +// any channel that needs to use it via NvRmBindContextDma. +// HASH_TABLE:ENABLE is not supported on NV50 and up, and HASH_TABLE:DISABLE should +// be preferred for all new code. +#define NVOS03_FLAGS_HASH_TABLE 29:29 +#define NVOS03_FLAGS_HASH_TABLE_ENABLE (0x00000000) +#define NVOS03_FLAGS_HASH_TABLE_DISABLE (0x00000001) + +/* macro NV01_ALLOC_OBJECT */ +#define NV01_ALLOC_OBJECT (0x00000005) + +/* parameters */ +typedef struct +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvV32 status; +} NVOS05_PARAMETERS; + +/* Valid values for hClass in Nv01AllocEvent */ +/* Note that NV01_EVENT_OS_EVENT is same as NV01_EVENT_WIN32_EVENT */ +/* TODO: delete the WIN32 name */ +#define NV01_EVENT_KERNEL_CALLBACK (0x00000078) +#define NV01_EVENT_OS_EVENT (0x00000079) +#define NV01_EVENT_WIN32_EVENT NV01_EVENT_OS_EVENT +#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007E) + +/* NOTE: NV01_EVENT_KERNEL_CALLBACK is deprecated. Please use NV01_EVENT_KERNEL_CALLBACK_EX. */ +/* For use with NV01_EVENT_KERNEL_CALLBACK. */ +/* NVOS10_EVENT_KERNEL_CALLBACK data structure storage needs to be retained by the caller. */ +typedef void (*Callback1ArgVoidReturn)(void *arg); +typedef void (*Callback5ArgVoidReturn)(void *arg1, void *arg2, NvHandle hEvent, NvU32 data, NvU32 status); + +/* NOTE: the 'void* arg' below is ok (but unfortunate) since this interface + can only be used by other kernel drivers which must share the same ptr-size */ +typedef struct +{ + Callback1ArgVoidReturn func; + void *arg; +} NVOS10_EVENT_KERNEL_CALLBACK; + +/* For use with NV01_EVENT_KERNEL_CALLBACK_EX. */ +/* NVOS10_EVENT_KERNEL_CALLBACK_EX data structure storage needs to be retained by the caller. */ +/* NOTE: the 'void* arg' below is ok (but unfortunate) since this interface + can only be used by other kernel drivers which must share the same ptr-size */ +typedef struct +{ + Callback5ArgVoidReturn func; + void *arg; +} NVOS10_EVENT_KERNEL_CALLBACK_EX; + +/* Setting this bit in index will set the Event to a Broadcast type */ +/* i.e. each subdevice under a device needs to see the Event before it's signaled */ +#define NV01_EVENT_BROADCAST (0x80000000) + +/* allow non-root resman client to create NV01_EVENT_KERNEL_CALLBACK events */ +/* -- this works in debug/develop drivers only (for security reasons)*/ +#define NV01_EVENT_PERMIT_NON_ROOT_EVENT_KERNEL_CALLBACK_CREATION (0x40000000) + +/* RM event should be triggered only by the specified subdevice; see cl0005.h + * for details re: how to specify subdevice. */ +#define NV01_EVENT_SUBDEVICE_SPECIFIC (0x20000000) + +/* RM should trigger the event but shouldn't do the book-keeping of data + * associated with that event */ +#define NV01_EVENT_WITHOUT_EVENT_DATA (0x10000000) + +/* RM event should be triggered only by the non-stall interrupt */ +#define NV01_EVENT_NONSTALL_INTR (0x08000000) + +/* RM event was allocated from client RM, post events back to client RM */ +#define NV01_EVENT_CLIENT_RM (0x04000000) + +/* function OS19 */ +#define NV04_I2C_ACCESS (0x00000013) + +#define NVOS_I2C_ACCESS_MAX_BUFFER_SIZE 2048 + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 paramSize; + NvP64 paramStructPtr NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS_I2C_ACCESS_PARAMS; + +/* current values for command */ +#define NVOS20_COMMAND_unused0001 0x0001 +#define NVOS20_COMMAND_unused0002 0x0002 +#define NVOS20_COMMAND_STRING_PRINT 0x0003 + +/* function OS21 */ +#define NV04_ALLOC (0x00000015) + +/* parameters */ +typedef struct +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvP64 pAllocParms NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS21_PARAMETERS; + +/* New struct with rights requested */ +typedef struct +{ + NvHandle hRoot; // [IN] client handle + NvHandle hObjectParent; // [IN] parent handle of new object + NvHandle hObjectNew; // [INOUT] new object handle, 0 to generate + NvV32 hClass; // [in] class num of new object + NvP64 pAllocParms NV_ALIGN_BYTES(8); // [IN] class-specific alloc parameters + NvP64 pRightsRequested NV_ALIGN_BYTES(8); // [IN] RS_ACCESS_MASK to request rights, or NULL + NvV32 status; // [OUT] status +} NVOS64_PARAMETERS; + +/* RM Alloc header + * + * Replacement for NVOS21/64_PARAMETERS where embedded pointers are not allowed. + * Input layout for RM Alloc user space calls should be + * + * +--- NVOS62_PARAMETERS ---+--- RM Alloc parameters ---+ + * +--- NVOS65_PARAMETERS ---+--- Rights Requested ---+--- RM Alloc parameters ---+ + * + * NVOS62_PARAMETERS::paramsSize is the size of RM Alloc parameters + * If NVOS65_PARAMETERS::maskSize is 0, Rights Requested will not be present in memory. + * + */ +typedef struct +{ + NvHandle hRoot; // [IN] client handle + NvHandle hObjectParent; // [IN] parent handle of the new object + NvHandle hObjectNew; // [IN] new object handle + NvV32 hClass; // [IN] class num of the new object + NvU32 paramSize; // [IN] size in bytes of the RM alloc parameters + NvV32 status; // [OUT] status +} NVOS62_PARAMETERS; + +#define NVOS65_PARAMETERS_VERSION_MAGIC 0x77FEF81E + +typedef struct +{ + NvHandle hRoot; // [IN] client handle + NvHandle hObjectParent; // [IN] parent handle of the new object + NvHandle hObjectNew; // [INOUT] new object handle, 0 to generate + NvV32 hClass; // [IN] class num of the new object + NvU32 paramSize; // [IN] size in bytes of the RM alloc parameters + NvU32 versionMagic; // [IN] NVOS65_PARAMETERS_VERISON_MAGIC + NvU32 maskSize; // [IN] size in bytes of access mask, or 0 if NULL + NvV32 status; // [OUT] status +} NVOS65_PARAMETERS; + +/* function OS30 */ +#define NV04_IDLE_CHANNELS (0x0000001E) + +/* parameter values */ +#define NVOS30_FLAGS_BEHAVIOR 3:0 +#define NVOS30_FLAGS_BEHAVIOR_SPIN (0x00000000) +#define NVOS30_FLAGS_BEHAVIOR_SLEEP (0x00000001) +#define NVOS30_FLAGS_BEHAVIOR_QUERY (0x00000002) +#define NVOS30_FLAGS_BEHAVIOR_FORCE_BUSY_CHECK (0x00000003) +#define NVOS30_FLAGS_CHANNEL 7:4 +#define NVOS30_FLAGS_CHANNEL_LIST (0x00000000) +#define NVOS30_FLAGS_CHANNEL_SINGLE (0x00000001) +#define NVOS30_FLAGS_IDLE 30:8 +#define NVOS30_FLAGS_IDLE_PUSH_BUFFER (0x00000001) +#define NVOS30_FLAGS_IDLE_CACHE1 (0x00000002) +#define NVOS30_FLAGS_IDLE_GRAPHICS (0x00000004) +#define NVOS30_FLAGS_IDLE_MPEG (0x00000008) +#define NVOS30_FLAGS_IDLE_MOTION_ESTIMATION (0x00000010) +#define NVOS30_FLAGS_IDLE_VIDEO_PROCESSOR (0x00000020) +#define NVOS30_FLAGS_IDLE_MSPDEC (0x00000020) +#define NVOS30_FLAGS_IDLE_BITSTREAM_PROCESSOR (0x00000040) +#define NVOS30_FLAGS_IDLE_MSVLD (0x00000040) +#define NVOS30_FLAGS_IDLE_NVDEC0 NVOS30_FLAGS_IDLE_MSVLD +#define NVOS30_FLAGS_IDLE_CIPHER_DMA (0x00000080) +#define NVOS30_FLAGS_IDLE_SEC (0x00000080) +#define NVOS30_FLAGS_IDLE_CALLBACKS (0x00000100) +#define NVOS30_FLAGS_IDLE_MSPPP (0x00000200) +#define NVOS30_FLAGS_IDLE_CE0 (0x00000400) +#define NVOS30_FLAGS_IDLE_CE1 (0x00000800) +#define NVOS30_FLAGS_IDLE_CE2 (0x00001000) +#define NVOS30_FLAGS_IDLE_CE3 (0x00002000) +#define NVOS30_FLAGS_IDLE_CE4 (0x00004000) +#define NVOS30_FLAGS_IDLE_CE5 (0x00008000) +#define NVOS30_FLAGS_IDLE_VIC (0x00010000) +#define NVOS30_FLAGS_IDLE_MSENC (0x00020000) +#define NVOS30_FLAGS_IDLE_NVENC0 NVOS30_FLAGS_IDLE_MSENC +#define NVOS30_FLAGS_IDLE_NVENC1 (0x00040000) +#define NVOS30_FLAGS_IDLE_NVENC2 (0x00080000) +#define NVOS30_FLAGS_IDLE_NVJPG (0x00100000) +#define NVOS30_FLAGS_IDLE_NVDEC1 (0x00200000) +#define NVOS30_FLAGS_IDLE_NVDEC2 (0x00400000) +#define NVOS30_FLAGS_IDLE_ACTIVECHANNELS (0x00800000) +#define NVOS30_FLAGS_IDLE_ALL_ENGINES (NVOS30_FLAGS_IDLE_GRAPHICS | \ + NVOS30_FLAGS_IDLE_MPEG | \ + NVOS30_FLAGS_IDLE_MOTION_ESTIMATION | \ + NVOS30_FLAGS_IDLE_VIDEO_PROCESSOR | \ + NVOS30_FLAGS_IDLE_BITSTREAM_PROCESSOR | \ + NVOS30_FLAGS_IDLE_CIPHER_DMA | \ + NVOS30_FLAGS_IDLE_MSPDEC | \ + NVOS30_FLAGS_IDLE_NVDEC0 | \ + NVOS30_FLAGS_IDLE_SEC | \ + NVOS30_FLAGS_IDLE_MSPPP | \ + NVOS30_FLAGS_IDLE_CE0 | \ + NVOS30_FLAGS_IDLE_CE1 | \ + NVOS30_FLAGS_IDLE_CE2 | \ + NVOS30_FLAGS_IDLE_CE3 | \ + NVOS30_FLAGS_IDLE_CE4 | \ + NVOS30_FLAGS_IDLE_CE5 | \ + NVOS30_FLAGS_IDLE_NVENC0 | \ + NVOS30_FLAGS_IDLE_NVENC1 | \ + NVOS30_FLAGS_IDLE_NVENC2 | \ + NVOS30_FLAGS_IDLE_VIC | \ + NVOS30_FLAGS_IDLE_NVJPG | \ + NVOS30_FLAGS_IDLE_NVDEC1 | \ + NVOS30_FLAGS_IDLE_NVDEC2) +#define NVOS30_FLAGS_WAIT_FOR_ELPG_ON 31:31 +#define NVOS30_FLAGS_WAIT_FOR_ELPG_ON_NO (0x00000000) +#define NVOS30_FLAGS_WAIT_FOR_ELPG_ON_YES (0x00000001) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hChannel; + NvV32 numChannels; + + NvP64 phClients NV_ALIGN_BYTES(8); + NvP64 phDevices NV_ALIGN_BYTES(8); + NvP64 phChannels NV_ALIGN_BYTES(8); + + NvV32 flags; + NvV32 timeout; + NvV32 status; +} NVOS30_PARAMETERS; + +/* function OS32 */ +typedef void (*BindResultFunc)(void * pVoid, NvU32 gpuMask, NvU32 bState, NvU32 bResult); + +#define NV04_VID_HEAP_CONTROL (0x00000020) +/************************************************************************* +************************ New Heap Interface ****************************** +*************************************************************************/ +// NVOS32 Descriptor types +// +// NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR: The dma-buf object +// pointer, provided by the linux kernel buffer sharing sub-system. +// This descriptor can only be used by kernel space rm-clients. +// +#define NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS 0 +#define NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY 1 +#define NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY 2 +#define NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR 3 +#define NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE 4 +#define NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR 5 +#define NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR 6 +#define NVOS32_DESCRIPTOR_TYPE_KERNEL_VIRTUAL_ADDRESS 7 +// NVOS32 function +#define NVOS32_FUNCTION_ALLOC_DEPTH_WIDTH_HEIGHT 1 +#define NVOS32_FUNCTION_ALLOC_SIZE 2 +#define NVOS32_FUNCTION_FREE 3 +// #define NVOS32_FUNCTION_HEAP_PURGE 4 +#define NVOS32_FUNCTION_INFO 5 +#define NVOS32_FUNCTION_ALLOC_TILED_PITCH_HEIGHT 6 +// #define NVOS32_FUNCTION_DESTROY 7 +// #define NVOS32_FUNCTION_RETAIN 9 +// #define NVOS32_FUNCTION_REALLOC 10 +#define NVOS32_FUNCTION_DUMP 11 +// #define NVOS32_FUNCTION_INFO_TYPE_ALLOC_BLOCKS 12 +#define NVOS32_FUNCTION_ALLOC_SIZE_RANGE 14 +#define NVOS32_FUNCTION_REACQUIRE_COMPR 15 +#define NVOS32_FUNCTION_RELEASE_COMPR 16 +// #define NVOS32_FUNCTION_MODIFY_DEFERRED_TILES 17 +#define NVOS32_FUNCTION_GET_MEM_ALIGNMENT 18 +#define NVOS32_FUNCTION_HW_ALLOC 19 +#define NVOS32_FUNCTION_HW_FREE 20 +// #define NVOS32_FUNCTION_SET_OFFSET 21 +// #define NVOS32_FUNCTION_IS_TILED 22 +// #define NVOS32_FUNCTION_ENABLE_RESOURCE 23 +// #define NVOS32_FUNCTION_BIND_COMPR 24 +#define NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR 27 + +typedef struct +{ + NvP64 sgt NV_ALIGN_BYTES(8); + NvP64 gem NV_ALIGN_BYTES(8); +} NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS; + +#define NVOS32_FLAGS_BLOCKINFO_VISIBILITY_CPU (0x00000001) +typedef struct +{ + NvU64 startOffset NV_ALIGN_BYTES(8); + NvU64 size NV_ALIGN_BYTES(8); + NvU32 flags; +} NVOS32_BLOCKINFO; + +// NVOS32 IVC-heap number delimiting value +#define NVOS32_IVC_HEAP_NUMBER_DONT_ALLOCATE_ON_IVC_HEAP 0 // When IVC heaps are present, + // IVC-heap number specified + // as part of 'NVOS32_PARAMETERS' + // which is less or equal to this + // constant indicates that allocation + // should not be done on IVC heap. + // Explanation of IVC-heap number is + // under 'AllocSize' structure below. + +typedef struct +{ + NvHandle hRoot; // [IN] - root object handle + NvHandle hObjectParent; // [IN] - device handle + NvU32 function; // [IN] - heap function, see below FUNCTION* defines + NvHandle hVASpace; // [IN] - VASpace handle + NvS16 ivcHeapNumber; // [IN] - When IVC heaps are present: either 1) number of the IVC heap + // shared between two VMs or 2) number indicating that allocation + // should not be done on an IVC heap. Values greater than constant + // 'NVOS32_IVC_HEAP_NUMBER_DONT_ALLOCATE_ON_IVC_HEAP' define set 1) + // and values less or equal to that constant define set 2). + // When IVC heaps are present, correct IVC-heap number must be specified. + // When IVC heaps are absent, IVC-heap number is diregarded. + // RM provides for each VM a bitmask of heaps with each bit + // specifying the other peer that can use the partition. + // Each bit set to one can be enumerated, such that the bit + // with lowest significance is enumerated with one. + // 'ivcHeapNumber' parameter specifies this enumeration value. + // This value is used to uniquely identify a heap shared between + // two particular VMs. + // Illustration: + // bitmask: 1 1 0 1 0 = 0x1A + // possible 'ivcHeapNumber' values: 3, 2, 1 + NvV32 status; // [OUT] - returned NVOS32* status code, see below STATUS* defines + NvU64 total NV_ALIGN_BYTES(8); // [OUT] - returned total size of heap + NvU64 free NV_ALIGN_BYTES(8); // [OUT] - returned free space available in heap + + union + { + // NVOS32_FUNCTION_ALLOC_DEPTH_WIDTH_HEIGHT + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 depth; // [IN] - depth of surface in bits + NvU32 width; // [IN] - width of surface in pixels + NvU32 height; // [IN] - height of surface in pixels + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + NvU32 partitionStride; // [IN/OUT] - 0 means "RM" chooses + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvP64 address NV_ALIGN_BYTES(8);// [OUT] - returned address + NvU64 rangeBegin NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeEnd NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + } AllocDepthWidthHeight; + + // NVOS32_FUNCTION_ALLOC_SIZE + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + NvU32 partitionStride; // [IN/OUT] - 0 means "RM" chooses + NvU32 width; // [IN] - width "hint" used for zcull region allocations + NvU32 height; // [IN] - height "hint" used for zcull region allocations + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvP64 address NV_ALIGN_BYTES(8);// [OUT] - returned address + NvU64 rangeBegin NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeEnd NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + } AllocSize; + + // NVOS32_FUNCTION_ALLOC_TILED_PITCH_HEIGHT + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 height; // [IN] - height of surface in pixels + NvS32 pitch; // [IN/OUT] - desired pitch AND returned actual pitch allocated + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 width; //[IN] - width of surface in pixels + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + NvU32 partitionStride; // [IN/OUT] - 0 means "RM" chooses + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvP64 address NV_ALIGN_BYTES(8);// [OUT] - returned address + NvU64 rangeBegin NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeEnd NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + } AllocTiledPitchHeight; + + // NVOS32_FUNCTION_FREE + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN] - unique memory handle + NvU32 flags; // [IN] - heap free flags (must be NVOS32_FREE_FLAGS_MEMORY_HANDLE_PROVIDED) + } Free; + + // NVOS32_FUNCTION_RELEASE_COMPR + struct + { + NvU32 owner; // [IN] - memory owner ID + NvU32 flags; // [IN] - must be NVOS32_RELEASE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED + NvHandle hMemory; // [IN] - unique memory handle (valid if _RELEASE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED + } ReleaseCompr; + + // NVOS32_FUNCTION_REACQUIRE_COMPR + struct + { + NvU32 owner; // [IN] - memory owner ID + NvU32 flags; // [IN] - must be NVOS32_REACQUIRE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED + NvHandle hMemory; // [IN] - unique memory handle (valid if _REACQUIRE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED + } ReacquireCompr; + + // NVOS32_FUNCTION_INFO + struct + { + NvU32 attr; // [IN] - memory heap attributes requested + NvU64 offset NV_ALIGN_BYTES(8); // [OUT] - base of largest free block + NvU64 size NV_ALIGN_BYTES(8); // [OUT] - size of largest free block + NvU64 base NV_ALIGN_BYTES(8); // [OUT] - returned heap phys base + } Info; + + // NVOS32_FUNCTION_DUMP + struct + { + NvU32 flags; // [IN] - see _DUMP_FLAGS + // [IN] - if NULL, numBlocks is the returned number of blocks in + // heap, else returns all blocks in eHeap + // if non-NULL points to a buffer that is at least numBlocks + // * sizeof(NVOS32_HEAP_DUMP_BLOCK) bytes. + NvP64 pBuffer NV_ALIGN_BYTES(8); + // [IN/OUT] - if pBuffer is NULL, will number of blocks in heap + // if pBuffer is non-NULL, is input containing the size of + // pBuffer in units of NVOS32_HEAP_DUMP_BLOCK. This must + // be greater than or equal to the number of blocks in the + // heap. + NvU32 numBlocks; + } Dump; + + // NVOS32_FUNCTION_DESTROY - no extra parameters needed + + // NVOS32_FUNCTION_ALLOC_SIZE_RANGE + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN] - unique memory handle + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + NvU32 partitionStride; // [IN/OUT] - 0 means "RM" chooses + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvU64 rangeBegin NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeEnd NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + NvP64 address NV_ALIGN_BYTES(8);// [OUT] - returned address + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + } AllocSizeRange; + + // additions for Longhorn +#define NVAL_MAX_BANKS (4) +#define NVAL_MAP_DIRECTION 0:0 +#define NVAL_MAP_DIRECTION_DOWN 0x00000000 +#define NVAL_MAP_DIRECTION_UP 0x00000001 + + // NVOS32_FUNCTION_GET_MEM_ALIGNMENT + struct + { + NvU32 alignType; // Input + NvU32 alignAttr; + NvU32 alignInputFlags; + NvU64 alignSize NV_ALIGN_BYTES(8); + NvU32 alignHeight; + NvU32 alignWidth; + NvU32 alignPitch; + NvU32 alignPad; + NvU32 alignMask; + NvU32 alignOutputFlags[NVAL_MAX_BANKS]; // We could compress this information but it is probably not that big of a deal + NvU32 alignBank[NVAL_MAX_BANKS]; + NvU32 alignKind; + NvU32 alignAdjust; // Output -- If non-zero the amount we need to adjust the offset + NvU32 alignAttr2; + } AllocHintAlignment; + + struct + { + NvU32 allocOwner; // [IN] - memory owner ID + NvHandle allochMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 flags; + NvU32 allocType; // Input + NvU32 allocAttr; + NvU32 allocInputFlags; + NvU64 allocSize NV_ALIGN_BYTES(8); + NvU32 allocHeight; + NvU32 allocWidth; + NvU32 allocPitch; + NvU32 allocMask; + NvU32 allocComprCovg; + NvU32 allocZcullCovg; + NvP64 bindResultFunc NV_ALIGN_BYTES(8); // BindResultFunc + NvP64 pHandle NV_ALIGN_BYTES(8); + NvHandle hResourceHandle; // Handle to RM container + NvU32 retAttr; // Output Indicates the resources that we allocated + NvU32 kind; + NvU64 osDeviceHandle NV_ALIGN_BYTES(8); + NvU32 allocAttr2; + NvU32 retAttr2; // Output Indicates the resources that we allocated + NvU64 allocAddr NV_ALIGN_BYTES(8); + // [out] from GMMU_COMPR_INFO in drivers/common/shared/inc/mmu/gmmu_fmt.h + struct + { + NvU32 compPageShift; + NvU32 compressedKind; + NvU32 compTagLineMin; + NvU32 compPageIndexLo; + NvU32 compPageIndexHi; + NvU32 compTagLineMultiplier; + } comprInfo; + // [out] fallback uncompressed kind. + NvU32 uncompressedKind; + } HwAlloc; + + // NVOS32_FUNCTION_HW_FREE + struct + { + NvHandle hResourceHandle; // Handle to RM Resource Info + NvU32 flags; // Indicate if HW Resources and/or Memory + } HwFree; +// Updated interface check. +#define NV_RM_OS32_ALLOC_OS_DESCRIPTOR_WITH_OS32_ATTR 1 + + // NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR + struct + { + NvHandle hMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 attr; // [IN] - attributes for memory placement/properties, see below + NvU32 attr2; // [IN] - attributes GPU_CACHEABLE + NvP64 descriptor NV_ALIGN_BYTES(8); // [IN] - descriptor address + NvU64 limit NV_ALIGN_BYTES(8); // [IN] - allocated size -1 + NvU32 descriptorType; // [IN] - descriptor type(Virtual | nvmap Handle) + } AllocOsDesc; + + } data; +} NVOS32_PARAMETERS; + +typedef struct +{ + NvU32 owner; // owner id - NVOS32_BLOCK_TYPE_FREE or defined by client during heap_alloc + NvU32 format; // arch specific format/kind + NvU64 begin NV_ALIGN_BYTES(8); // start of allocated memory block + NvU64 align NV_ALIGN_BYTES(8); // actual start of usable memory, aligned to chip specific boundary + NvU64 end NV_ALIGN_BYTES(8); // end of usable memory. end - align + 1 = size of block +} NVOS32_HEAP_DUMP_BLOCK; + + +#define NVOS32_DELETE_RESOURCES_ALL 0 + +// type field +#define NVOS32_TYPE_IMAGE 0 +#define NVOS32_TYPE_DEPTH 1 +#define NVOS32_TYPE_TEXTURE 2 +#define NVOS32_TYPE_VIDEO 3 +#define NVOS32_TYPE_FONT 4 +#define NVOS32_TYPE_CURSOR 5 +#define NVOS32_TYPE_DMA 6 +#define NVOS32_TYPE_INSTANCE 7 +#define NVOS32_TYPE_PRIMARY 8 +#define NVOS32_TYPE_ZCULL 9 +#define NVOS32_TYPE_UNUSED 10 +#define NVOS32_TYPE_SHADER_PROGRAM 11 +#define NVOS32_TYPE_OWNER_RM 12 +#define NVOS32_TYPE_NOTIFIER 13 +#define NVOS32_TYPE_RESERVED 14 +#define NVOS32_TYPE_PMA 15 +#define NVOS32_TYPE_STENCIL 16 +#define NVOS32_NUM_MEM_TYPES 17 + +// Surface attribute field - bitmask of requested attributes the surface +// should have. +// This value is updated to reflect what was actually allocated, and so this +// field must be checked after every allocation to determine what was +// allocated. Pass in the ANY tags to indicate that RM should fall back but +// still succeed the alloc. +// for example, if tiled_any is passed in, but no tile ranges are available, +// RM will allocate normal memory and indicate that in the returned attr field. +// Each returned attribute will have the REQUIRED field set if that attribute +// applies to the allocated surface. + +#define NVOS32_ATTR_NONE 0x00000000 + +#define NVOS32_ATTR_DEPTH 2:0 +#define NVOS32_ATTR_DEPTH_UNKNOWN 0x00000000 +#define NVOS32_ATTR_DEPTH_8 0x00000001 +#define NVOS32_ATTR_DEPTH_16 0x00000002 +#define NVOS32_ATTR_DEPTH_24 0x00000003 +#define NVOS32_ATTR_DEPTH_32 0x00000004 +#define NVOS32_ATTR_DEPTH_64 0x00000005 +#define NVOS32_ATTR_DEPTH_128 0x00000006 + +#define NVOS32_ATTR_COMPR_COVG 3:3 +#define NVOS32_ATTR_COMPR_COVG_DEFAULT 0x00000000 +#define NVOS32_ATTR_COMPR_COVG_PROVIDED 0x00000001 + +// Surface description - number of AA samples +// This number should only reflect AA done in hardware, not in software. For +// example, OpenGL's 8x AA mode is a mix of 2x hardware multisample and 2x2 +// software supersample. +// OpenGL should specify ATTR_AA_SAMPLES of 2 in this case, not 8, because +// the hardware will be programmed to run in 2x AA mode. +// Note that X_VIRTUAL_Y means X real samples with Y samples total (i.e. Y +// does not indicate the number of virtual samples). For instance, what +// arch and HW describe as NV_PGRAPH_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 +// corresponds to NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_16 here. + +#define NVOS32_ATTR_AA_SAMPLES 7:4 +#define NVOS32_ATTR_AA_SAMPLES_1 0x00000000 +#define NVOS32_ATTR_AA_SAMPLES_2 0x00000001 +#define NVOS32_ATTR_AA_SAMPLES_4 0x00000002 +#define NVOS32_ATTR_AA_SAMPLES_4_ROTATED 0x00000003 +#define NVOS32_ATTR_AA_SAMPLES_6 0x00000004 +#define NVOS32_ATTR_AA_SAMPLES_8 0x00000005 +#define NVOS32_ATTR_AA_SAMPLES_16 0x00000006 +#define NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_8 0x00000007 +#define NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_16 0x00000008 +#define NVOS32_ATTR_AA_SAMPLES_8_VIRTUAL_16 0x00000009 +#define NVOS32_ATTR_AA_SAMPLES_8_VIRTUAL_32 0x0000000A + +// Tiled region +#define NVOS32_ATTR_TILED 9:8 +#define NVOS32_ATTR_TILED_NONE 0x00000000 +#define NVOS32_ATTR_TILED_REQUIRED 0x00000001 +#define NVOS32_ATTR_TILED_ANY 0x00000002 +#define NVOS32_ATTR_TILED_DEFERRED 0x00000003 + +// Zcull region (NV40 and up) +// If ATTR_ZCULL is REQUIRED or ANY and ATTR_DEPTH is UNKNOWN, the +// allocation will fail. +// If ATTR_DEPTH or ATTR_AA_SAMPLES is not accurate, erroneous rendering +// may result. +#define NVOS32_ATTR_ZCULL 11:10 +#define NVOS32_ATTR_ZCULL_NONE 0x00000000 +#define NVOS32_ATTR_ZCULL_REQUIRED 0x00000001 +#define NVOS32_ATTR_ZCULL_ANY 0x00000002 +#define NVOS32_ATTR_ZCULL_SHARED 0x00000003 + +// Compression (NV20 and up) +// If ATTR_COMPR is REQUIRED or ANY and ATTR_DEPTH is UNKNOWN, the +// allocation will fail. +// If ATTR_DEPTH or ATTR_AA_SAMPLES is not accurate, performance will +// suffer heavily +#define NVOS32_ATTR_COMPR 13:12 +#define NVOS32_ATTR_COMPR_NONE 0x00000000 +#define NVOS32_ATTR_COMPR_REQUIRED 0x00000001 +#define NVOS32_ATTR_COMPR_ANY 0x00000002 +#define NVOS32_ATTR_COMPR_PLC_REQUIRED NVOS32_ATTR_COMPR_REQUIRED +#define NVOS32_ATTR_COMPR_PLC_ANY NVOS32_ATTR_COMPR_ANY +#define NVOS32_ATTR_COMPR_DISABLE_PLC_ANY 0x00000003 + +// Format +// _BLOCK_LINEAR is only available for nv50+. +#define NVOS32_ATTR_FORMAT 17:16 +// Macros representing the low/high bits of NVOS32_ATTR_FORMAT +// bit range. These provide direct access to the range limits +// without needing to split the low:high representation via +// ternary operator, thereby avoiding MISRA 14.3 violation. +#define NVOS32_ATTR_FORMAT_LOW_FIELD 16 +#define NVOS32_ATTR_FORMAT_HIGH_FIELD 17 +#define NVOS32_ATTR_FORMAT_PITCH 0x00000000 +#define NVOS32_ATTR_FORMAT_SWIZZLED 0x00000001 +#define NVOS32_ATTR_FORMAT_BLOCK_LINEAR 0x00000002 + +#define NVOS32_ATTR_Z_TYPE 18:18 +#define NVOS32_ATTR_Z_TYPE_FIXED 0x00000000 +#define NVOS32_ATTR_Z_TYPE_FLOAT 0x00000001 + +#define NVOS32_ATTR_ZS_PACKING 21:19 +#define NVOS32_ATTR_ZS_PACKING_S8 0x00000000 // Z24S8 and S8 share definition +#define NVOS32_ATTR_ZS_PACKING_Z24S8 0x00000000 +#define NVOS32_ATTR_ZS_PACKING_S8Z24 0x00000001 +#define NVOS32_ATTR_ZS_PACKING_Z32 0x00000002 +#define NVOS32_ATTR_ZS_PACKING_Z24X8 0x00000003 +#define NVOS32_ATTR_ZS_PACKING_X8Z24 0x00000004 +#define NVOS32_ATTR_ZS_PACKING_Z32_X24S8 0x00000005 +#define NVOS32_ATTR_ZS_PACKING_X8Z24_X24S8 0x00000006 +#define NVOS32_ATTR_ZS_PACKING_Z16 0x00000007 +// NOTE: ZS packing and color packing fields are overlaid +#define NVOS32_ATTR_COLOR_PACKING NVOS32_ATTR_ZS_PACKING +#define NVOS32_ATTR_COLOR_PACKING_A8R8G8B8 0x00000000 +#define NVOS32_ATTR_COLOR_PACKING_X8R8G8B8 0x00000001 + + + +// +// For virtual allocs to choose page size for the region. Specifying +// _DEFAULT will select a virtual page size that allows for a surface +// to be mixed between video and system memory and allow the surface +// to be migrated between video and system memory. For tesla chips, +// 4KB will be used. For fermi chips with dual page tables, a virtual +// address with both page tables will be used. +// +// For physical allocation on chips with page swizzle this field is +// used to select the page swizzle. This later also sets the virtual +// page size, but does not have influence over selecting a migratable +// virtual address. That must be selected when mapping the physical +// memory. +// +// BIG_PAGE = 64 KB on PASCAL +// = 64 KB or 128 KB on pre_PASCAL chips +// +// HUGE_PAGE = 2 MB on PASCAL+ +// = 2 MB or 512 MB on AMPERE+ +// = not supported on pre_PASCAL chips. +// +// To request for a HUGE page size, +// set NVOS32_ATTR_PAGE_SIZE to _HUGE and NVOS32_ATTR2_PAGE_SIZE_HUGE to +// the desired size. +// +#define NVOS32_ATTR_PAGE_SIZE 24:23 +#define NVOS32_ATTR_PAGE_SIZE_DEFAULT 0x00000000 +#define NVOS32_ATTR_PAGE_SIZE_4KB 0x00000001 +#define NVOS32_ATTR_PAGE_SIZE_BIG 0x00000002 +#define NVOS32_ATTR_PAGE_SIZE_HUGE 0x00000003 + +#define NVOS32_ATTR_LOCATION 26:25 +#define NVOS32_ATTR_LOCATION_VIDMEM 0x00000000 +#define NVOS32_ATTR_LOCATION_PCI 0x00000001 +#define NVOS32_ATTR_LOCATION_AGP 0x00000002 +#define NVOS32_ATTR_LOCATION_ANY 0x00000003 + +// +// _DEFAULT implies _CONTIGUOUS for video memory currently, but +// may be changed to imply _NONCONTIGUOUS in the future. +// _ALLOW_NONCONTIGUOUS enables falling back to the noncontiguous +// vidmem allocator if contig allocation fails. +// +#define NVOS32_ATTR_PHYSICALITY 28:27 +#define NVOS32_ATTR_PHYSICALITY_DEFAULT 0x00000000 +#define NVOS32_ATTR_PHYSICALITY_NONCONTIGUOUS 0x00000001 +#define NVOS32_ATTR_PHYSICALITY_CONTIGUOUS 0x00000002 +#define NVOS32_ATTR_PHYSICALITY_ALLOW_NONCONTIGUOUS 0x00000003 + +#define NVOS32_ATTR_COHERENCY 31:29 +#define NVOS32_ATTR_COHERENCY_UNCACHED 0x00000000 +#define NVOS32_ATTR_COHERENCY_CACHED 0x00000001 +#define NVOS32_ATTR_COHERENCY_WRITE_COMBINE 0x00000002 +#define NVOS32_ATTR_COHERENCY_WRITE_THROUGH 0x00000003 +#define NVOS32_ATTR_COHERENCY_WRITE_PROTECT 0x00000004 +#define NVOS32_ATTR_COHERENCY_WRITE_BACK 0x00000005 + +// ATTR2 fields +#define NVOS32_ATTR2_NONE 0x00000000 + +// +// DEFAULT - Let lower level drivers pick optimal page kind. +// PREFER_NO_ZBC - Prefer other types of compression over ZBC when +// selecting page kind. +// PREFER_ZBC - Prefer ZBC over other types of compression when +// selecting page kind. +// REQUIRE_ONLY_ZBC - Require a page kind that enables ZBC but disables +// other types of compression (i.e. 2C page kind). +// INVALID - Aliases REQUIRE_ONLY_ZBC, which is not supported +// by all RM implementations. +// +#define NVOS32_ATTR2_ZBC 1:0 +#define NVOS32_ATTR2_ZBC_DEFAULT 0x00000000 +#define NVOS32_ATTR2_ZBC_PREFER_NO_ZBC 0x00000001 +#define NVOS32_ATTR2_ZBC_PREFER_ZBC 0x00000002 +#define NVOS32_ATTR2_ZBC_REQUIRE_ONLY_ZBC 0x00000003 +#define NVOS32_ATTR2_ZBC_INVALID 0x00000003 + +// +// DEFAULT - Highest performance cache policy that is coherent with the highest +// performance CPU mapping. Typically this is gpu cached for video +// memory and gpu uncached for system memory. +// YES - Enable gpu caching if supported on this surface type. For system +// memory this will not be coherent with direct CPU mappings. +// NO - Disable gpu caching if supported on this surface type. +// INVALID - Clients should never set YES and NO simultaneously. +// +#define NVOS32_ATTR2_GPU_CACHEABLE 3:2 +#define NVOS32_ATTR2_GPU_CACHEABLE_DEFAULT 0x00000000 +#define NVOS32_ATTR2_GPU_CACHEABLE_YES 0x00000001 +#define NVOS32_ATTR2_GPU_CACHEABLE_NO 0x00000002 +#define NVOS32_ATTR2_GPU_CACHEABLE_INVALID 0x00000003 + +// +// DEFAULT - GPU-dependent cache policy +// YES - Enable gpu caching for p2p mem +// NO - Disable gpu caching for p2p mem +// +#define NVOS32_ATTR2_P2P_GPU_CACHEABLE 5:4 +#define NVOS32_ATTR2_P2P_GPU_CACHEABLE_DEFAULT 0x00000000 +#define NVOS32_ATTR2_P2P_GPU_CACHEABLE_YES 0x00000001 +#define NVOS32_ATTR2_P2P_GPU_CACHEABLE_NO 0x00000002 + +// This applies to virtual allocs only. See NVOS46_FLAGS_32BIT_POINTER. +#define NVOS32_ATTR2_32BIT_POINTER 6:6 +#define NVOS32_ATTR2_32BIT_POINTER_DISABLE 0x00000000 +#define NVOS32_ATTR2_32BIT_POINTER_ENABLE 0x00000001 + +// +// Indicates address conversion to be used, which affects what +// pitch alignment needs to be used +// +#define NVOS32_ATTR2_TILED_TYPE 7:7 +#define NVOS32_ATTR2_TILED_TYPE_LINEAR 0x00000000 +#define NVOS32_ATTR2_TILED_TYPE_XY 0x00000001 + +// +// Force SMMU mapping on GPU physical allocation in Tegra +// SMMU mapping for GPU physical allocation decided internally by RM +// This attribute provide an override to RM policy for verification purposes. +// +#define NVOS32_ATTR2_SMMU_ON_GPU 10:8 +#define NVOS32_ATTR2_SMMU_ON_GPU_DEFAULT 0x00000000 +#define NVOS32_ATTR2_SMMU_ON_GPU_DISABLE 0x00000001 +#define NVOS32_ATTR2_SMMU_ON_GPU_ENABLE 0x00000002 + +// +// Make comptag allocation aligned to compression cacheline size. +// Specifying this attribute will make RM allocate comptags worth an entire +// comp cacheline. The allocation will be offset aligned to number of comptags/comp cacheline. +// +#define NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN 11:11 +#define NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN_OFF 0x0 +#define NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN_ON 0x1 +#define NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN_DEFAULT \ + NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN_OFF + +// Allocation preferred in high or low priority memory +#define NVOS32_ATTR2_PRIORITY 13:12 +#define NVOS32_ATTR2_PRIORITY_DEFAULT 0x0 +#define NVOS32_ATTR2_PRIORITY_HIGH 0x1 +#define NVOS32_ATTR2_PRIORITY_LOW 0x2 + +// PMA: Allocation is an RM internal allocation (RM-only) +#define NVOS32_ATTR2_INTERNAL 14:14 +#define NVOS32_ATTR2_INTERNAL_NO 0x0 +#define NVOS32_ATTR2_INTERNAL_YES 0x1 + +// Allocate 2C instead of 2CZ +#define NVOS32_ATTR2_PREFER_2C 15:15 +#define NVOS32_ATTR2_PREFER_2C_NO 0x00000000 +#define NVOS32_ATTR2_PREFER_2C_YES 0x00000001 + +// Allocation used by display engine; RM verifies display engine has enough +// address bits or remapper available. +#define NVOS32_ATTR2_NISO_DISPLAY 16:16 +#define NVOS32_ATTR2_NISO_DISPLAY_NO 0x00000000 +#define NVOS32_ATTR2_NISO_DISPLAY_YES 0x00000001 + +// +// !!WARNING!!! +// +// This flag is introduced as a temporary WAR to enable color compression +// without ZBC. +// +// This dangerous flag can be used by UMDs to instruct RM to skip the zbc +// table refcounting that RM does today, when the chosen PTE kind has ZBC +// support. +// +// Currently we do not have a safe per process zbc slot management and +// refcounting mechanism between RM and UMD and hence, any process can +// access any other process's zbc entry in the global zbc table (without mask) +// Inorder to flush the ZBC table for slot reuse RM cannot track which +// process is using which zbc slot. Hence RM has a global refcount for the +// zbc table to flush and reuse the entries if the PTE kind supports zbc. +// +// This scheme poses a problem if there are apps that are persistent such as +// the desktop components that can have color compression enabled which will +// always keep the refcount active. Since these apps can live without +// ZBC, UMD can disable ZBC using masks. +// +// In such a case, if UMD so chooses to disable ZBC, this flag should be used +// to skip refcounting as by default RM would refcount the ZBC table. +// +// NOTE: There is no way for RM to enforce/police this, and we totally rely +// on UMD to use a zbc mask in the pushbuffer method to prevent apps from +// accessing the ZBC table. +// +#define NVOS32_ATTR2_ZBC_SKIP_ZBCREFCOUNT 17:17 +#define NVOS32_ATTR2_ZBC_SKIP_ZBCREFCOUNT_NO 0x00000000 +#define NVOS32_ATTR2_ZBC_SKIP_ZBCREFCOUNT_YES 0x00000001 + +// Allocation requires ISO bandwidth guarantees +#define NVOS32_ATTR2_ISO 18:18 +#define NVOS32_ATTR2_ISO_NO 0x00000000 +#define NVOS32_ATTR2_ISO_YES 0x00000001 + +// +// Turn off blacklist feature for video memory allocation +// This attribute should be used only by Kernel client (KMD), to mask +// the blacklisted pages for the allocation. This is done so that the clients +// will manage the above masked blacklisted pages after the allocation. It will +// return to RM's pool after the allocation was free-d.RmVidHeapCtrl returns +// NV_ERR_INSUFFICIENT_PERMISSIONS if it is being called by non-kernel clients. +// + +// TODO: Project ReLingo - This term is marked for deletion. Use PAGE_OFFLINING. +#define NVOS32_ATTR2_BLACKLIST 19:19 +#define NVOS32_ATTR2_BLACKLIST_ON 0x00000000 +#define NVOS32_ATTR2_BLACKLIST_OFF 0x00000001 +#define NVOS32_ATTR2_PAGE_OFFLINING 19:19 +#define NVOS32_ATTR2_PAGE_OFFLINING_ON 0x00000000 +#define NVOS32_ATTR2_PAGE_OFFLINING_OFF 0x00000001 + +// +// For virtual allocs to choose the HUGE page size for the region. +// NVOS32_ATTR_PAGE_SIZE must be set to _HUGE to use this. +// Currently, the default huge page is 2MB, so a request with _DEFAULT +// set will always be interpreted as 2MB. +// Not supported on pre_AMPERE chips. +// +#define NVOS32_ATTR2_PAGE_SIZE_HUGE 21:20 +#define NVOS32_ATTR2_PAGE_SIZE_HUGE_DEFAULT 0x00000000 +#define NVOS32_ATTR2_PAGE_SIZE_HUGE_2MB 0x00000001 +#define NVOS32_ATTR2_PAGE_SIZE_HUGE_512MB 0x00000002 + +// Allow read-only or read-write user CPU mappings +#define NVOS32_ATTR2_PROTECTION_USER 22:22 +#define NVOS32_ATTR2_PROTECTION_USER_READ_WRITE 0x00000000 +#define NVOS32_ATTR2_PROTECTION_USER_READ_ONLY 0x00000001 + +// Allow read-only or read-write device mappings +#define NVOS32_ATTR2_PROTECTION_DEVICE 23:23 +#define NVOS32_ATTR2_PROTECTION_DEVICE_READ_WRITE 0x00000000 +#define NVOS32_ATTR2_PROTECTION_DEVICE_READ_ONLY 0x00000001 + +// +// Force the allocation to go to guest subheap. +// This flag is used by vmiop plugin to allocate from GPA +// +#define NVOS32_ATTR2_ALLOCATE_FROM_SUBHEAP 27:27 +#define NVOS32_ATTR2_ALLOCATE_FROM_SUBHEAP_NO 0x00000000 +#define NVOS32_ATTR2_ALLOCATE_FROM_SUBHEAP_YES 0x00000001 + +/** + * NVOS32 ALLOC_FLAGS + * + * NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT + * + * NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP + * + * NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN + * + * NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE + * + * NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE + * + * NVOS32_ALLOC_FLAGS_BANK_HINT + * + * NVOS32_ALLOC_FLAGS_BANK_FORCE + * + * NVOS32_ALLOC_FLAGS_ALIGNMENT_HINT + * + * NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE + * + * NVOS32_ALLOC_FLAGS_BANK_GROW_UP + * Only relevant if bank_hint or bank_force are set + * + * NVOS32_ALLOC_FLAGS_BANK_GROW_DOWN + * Only relevant if bank_hint or bank_force are set + * + * NVOS32_ALLOC_FLAGS_LAZY + * Lazy allocation (deferred pde, pagetable creation) + * + * NVOS32_ALLOC_FLAGS_NO_SCANOUT + * Set if surface will never be scanned out + * + * NVOS32_ALLOC_FLAGS_PITCH_FORCE + * Fail alloc if supplied pitch is not aligned + * + * NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED + * Memory handle provided to be associated with this allocation + * + * NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED + * By default memory is mapped into the CPU address space + * + * NVOS32_ALLOC_FLAGS_PERSISTENT_VIDMEM + * Allocate persistent video memory + * + * NVOS32_ALLOC_FLAGS_USE_BEGIN_END + * Use rangeBegin & rangeEnd fields in allocs other than size/range + * + * NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED + * Allocate TurboCipher encrypted region + * + * NVOS32_ALLOC_FLAGS_VIRTUAL + * Allocate virtual memory address space + * + * NVOS32_ALLOC_FLAGS_FORCE_INTERNAL_INDEX + * Force allocation internal index + * + * NVOS32_ALLOC_FLAGS_ZCULL_COVG_SPECIFIED + * This flag is depreciated and allocations will fail. + * + * NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED + * Must be used with NVOS32_ALLOC_FLAGS_VIRTUAL. + * Page tables for this allocation will be managed outside of RM. + * + * NVOS32_ALLOC_FLAGS_FORCE_DEDICATED_PDE + * + * NVOS32_ALLOC_FLAGS_PROTECTED + * Allocate in a protected memory region if available + * + * NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP + * Map kernel os descriptor + * + * NVOS32_ALLOC_FLAGS_MAXIMIZE_ADDRESS_SPACE + * On WDDM all address spaces are created with MINIMIZE_PTETABLE_SIZE + * to reduce the overhead of private address spaces per application, + * at the cost of holes in the virtual address space. + * + * Shaders have short pointers that are required to be within a + * GPU dependent 32b range. + * + * MAXIMIZE_ADDRESS_SPACE will reverse the MINIMIZE_PTE_TABLE_SIZE + * flag with certain restrictions: + * - This flag only has an effect when the allocation has the side + * effect of creating a new PDE. It does not affect existing PDEs. + * - The first few PDEs of the address space are kept minimum to allow + * small applications to use fewer resources. + * - By default this operations on the 0-4GB address range. + * - If USE_BEGIN_END is specified the setting will apply to the + * specified range instead of the first 4GB. + * + * NVOS32_ALLOC_FLAGS_SPARSE + * Denote that a virtual address range is "sparse". Must be used with + * NVOS32_ALLOC_FLAGS_VIRTUAL. Creation of a "sparse" virtual address range + * denotes that an unmapped virtual address range should "not" fault but simply + * return 0's. + * + * NVOS32_ALLOC_FLAGS_ALLOCATE_KERNEL_PRIVILEGED + * This a special flag that can be used only by kernel(root) clients + * to allocate memory out of a protected region of the address space + * If this flag is set by non kernel clients then the allocation will + * fail. + * + * NVOS32_ALLOC_FLAGS_SKIP_RESOURCE_ALLOC + * + * NVOS32_ALLOC_FLAGS_PREFER_PTES_IN_SYSMEMORY + * If new pagetable need to be allocated prefer them in sysmem (if supported by the gpu) + * + * NVOS32_ALLOC_FLAGS_SKIP_ALIGN_PAD + * As per KMD request to eliminate extra allocation + * + * NVOS32_ALLOC_FLAGS_WPR1 + * Allocate in a WPR1 region if available + * + * NVOS32_ALLOC_FLAGS_ZCULL_DONT_ALLOCATE_SHARED_1X + * If using zcull sharing and this surface is fsaa, then don't allocate an additional non-FSAA region. + * + * NVOS32_ALLOC_FLAGS_WPR2 + * Allocate in a WPR1 region if available + */ +#define NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT 0x00000001 +#define NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP 0x00000002 +#define NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN 0x00000004 +#define NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE 0x00000008 +#define NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE 0x00000010 +#define NVOS32_ALLOC_FLAGS_BANK_HINT 0x00000020 +#define NVOS32_ALLOC_FLAGS_BANK_FORCE 0x00000040 +#define NVOS32_ALLOC_FLAGS_ALIGNMENT_HINT 0x00000080 +#define NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE 0x00000100 +#define NVOS32_ALLOC_FLAGS_BANK_GROW_UP 0x00000000 +#define NVOS32_ALLOC_FLAGS_BANK_GROW_DOWN 0x00000200 +#define NVOS32_ALLOC_FLAGS_LAZY 0x00000400 +// unused 0x00000800 +#define NVOS32_ALLOC_FLAGS_NO_SCANOUT 0x00001000 +#define NVOS32_ALLOC_FLAGS_PITCH_FORCE 0x00002000 +#define NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED 0x00004000 +#define NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED 0x00008000 +#define NVOS32_ALLOC_FLAGS_PERSISTENT_VIDMEM 0x00010000 +#define NVOS32_ALLOC_FLAGS_USE_BEGIN_END 0x00020000 +#define NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED 0x00040000 +#define NVOS32_ALLOC_FLAGS_VIRTUAL 0x00080000 +#define NVOS32_ALLOC_FLAGS_FORCE_INTERNAL_INDEX 0x00100000 +#define NVOS32_ALLOC_FLAGS_ZCULL_COVG_SPECIFIED 0x00200000 +#define NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED 0x00400000 +#define NVOS32_ALLOC_FLAGS_FORCE_DEDICATED_PDE 0x00800000 +#define NVOS32_ALLOC_FLAGS_PROTECTED 0x01000000 +#define NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP 0x02000000 // TODO BUG 2488679: fix alloc flag aliasing +#define NVOS32_ALLOC_FLAGS_MAXIMIZE_ADDRESS_SPACE 0x02000000 +#define NVOS32_ALLOC_FLAGS_SPARSE 0x04000000 +#define NVOS32_ALLOC_FLAGS_USER_READ_ONLY 0x04000000 // TODO BUG 2488682: remove this after KMD transition +#define NVOS32_ALLOC_FLAGS_DEVICE_READ_ONLY 0x08000000 // TODO BUG 2488682: remove this after KMD transition +#define NVOS32_ALLOC_FLAGS_ALLOCATE_KERNEL_PRIVILEGED 0x08000000 +#define NVOS32_ALLOC_FLAGS_SKIP_RESOURCE_ALLOC 0x10000000 +#define NVOS32_ALLOC_FLAGS_PREFER_PTES_IN_SYSMEMORY 0x20000000 +#define NVOS32_ALLOC_FLAGS_SKIP_ALIGN_PAD 0x40000000 +#define NVOS32_ALLOC_FLAGS_WPR1 0x40000000 // TODO BUG 2488672: fix alloc flag aliasing +#define NVOS32_ALLOC_FLAGS_ZCULL_DONT_ALLOCATE_SHARED_1X 0x80000000 +#define NVOS32_ALLOC_FLAGS_WPR2 0x80000000 // TODO BUG 2488672: fix alloc flag aliasing + +// Internal flags used for RM's allocation paths +#define NVOS32_ALLOC_INTERNAL_FLAGS_CLIENTALLOC 0x00000001 // RM internal flags - not sure if this should be exposed even. Keeping it here. +#define NVOS32_ALLOC_INTERNAL_FLAGS_SKIP_SCRUB 0x00000004 // RM internal flags - not sure if this should be exposed even. Keeping it here. +#define NVOS32_ALLOC_FLAGS_MAXIMIZE_4GB_ADDRESS_SPACE NVOS32_ALLOC_FLAGS_MAXIMIZE_ADDRESS_SPACE // Legacy name + +// +// Bitmask of flags that are only valid for virtual allocations. +// +#define NVOS32_ALLOC_FLAGS_VIRTUAL_ONLY ( \ + NVOS32_ALLOC_FLAGS_VIRTUAL | \ + NVOS32_ALLOC_FLAGS_LAZY | \ + NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED | \ + NVOS32_ALLOC_FLAGS_SPARSE | \ + NVOS32_ALLOC_FLAGS_MAXIMIZE_ADDRESS_SPACE | \ + NVOS32_ALLOC_FLAGS_PREFER_PTES_IN_SYSMEMORY ) + +// COMPR_COVG_* allows for specification of what compression resources +// are required (_MIN) and necessary (_MAX). Default behavior is for +// RM to provide as much as possible, including none if _ANY is allowed. +// Values for min/max are (0-100, a %) * _COVG_SCALE (so max value is +// 100*100==10000). _START is used to specify the % offset into the +// region to begin the requested coverage. +// _COVG_BITS allows specification of the number of comptags per ROP tile. +// A value of 0 is default and allows RM to choose based upon MMU/FB rules. +// All other values for _COVG_BITS are arch-specific. +// Note: NVOS32_ATTR_COMPR_COVG_PROVIDED must be set for this feature +// to be available (verif-only). +#define NVOS32_ALLOC_COMPR_COVG_SCALE 10 +#define NVOS32_ALLOC_COMPR_COVG_BITS 1:0 +#define NVOS32_ALLOC_COMPR_COVG_BITS_DEFAULT 0x00000000 +#define NVOS32_ALLOC_COMPR_COVG_BITS_1 0x00000001 +#define NVOS32_ALLOC_COMPR_COVG_BITS_2 0x00000002 +#define NVOS32_ALLOC_COMPR_COVG_BITS_4 0x00000003 +#define NVOS32_ALLOC_COMPR_COVG_MAX 11:2 +#define NVOS32_ALLOC_COMPR_COVG_MIN 21:12 +#define NVOS32_ALLOC_COMPR_COVG_START 31:22 + + +// Note: NVOS32_ALLOC_FLAGS_ZCULL_COVG_SPECIFIED must be set for this feature +// to be enabled. +// If FALLBACK_ALLOW is set, a fallback from LOW_RES_Z or LOW_RES_ZS +// to HIGH_RES_Z is allowed if the surface can't be fully covered. +#define NVOS32_ALLOC_ZCULL_COVG_FORMAT 3:0 +#define NVOS32_ALLOC_ZCULL_COVG_FORMAT_LOW_RES_Z 0x00000000 +#define NVOS32_ALLOC_ZCULL_COVG_FORMAT_HIGH_RES_Z 0x00000002 +#define NVOS32_ALLOC_ZCULL_COVG_FORMAT_LOW_RES_ZS 0x00000003 +#define NVOS32_ALLOC_ZCULL_COVG_FALLBACK 4:4 +#define NVOS32_ALLOC_ZCULL_COVG_FALLBACK_DISALLOW 0x00000000 +#define NVOS32_ALLOC_ZCULL_COVG_FALLBACK_ALLOW 0x00000001 + + +// _ALLOC_COMPTAG_OFFSET allows the caller to specify the starting +// offset for the comptags for a given surface, primarily for test only. +// To specify an offset, set _USAGE_FIXED or _USAGE_MIN in conjunction +// with _START. +// +// _USAGE_FIXED sets a surface's comptagline to start at the given +// starting value. If the offset has already been assigned, then +// the alloc call fails. +// +// _USAGE_MIN sets a surface's comptagline to start at the given +// starting value or higher, depending on comptagline availability. +// In this case, if the offset has already been assigned, the next +// available comptagline (in increasing order) will be assigned. +// +// For Fermi, up to 2^17 comptags may be allowed, but the actual, +// usable limit depends on the size of the compbit backing store. +// +// For Pascal, up to 2 ^ 18 comptags may be allowed +// From Turing. up to 2 ^ 20 comptags may be allowed +// +// See also field ctagOffset in struct NVOS32_PARAMETERS. +#define NVOS32_ALLOC_COMPTAG_OFFSET_START 19:0 +#define NVOS32_ALLOC_COMPTAG_OFFSET_START_DEFAULT 0x00000000 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE 31:30 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE_DEFAULT 0x00000000 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE_OFF 0x00000000 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE_FIXED 0x00000001 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE_MIN 0x00000002 + + +// REALLOC flags field +#define NVOS32_REALLOC_FLAGS_GROW_ALLOCATION 0x00000000 +#define NVOS32_REALLOC_FLAGS_SHRINK_ALLOCATION 0x00000001 +#define NVOS32_REALLOC_FLAGS_REALLOC_UP 0x00000000 // towards/from high memory addresses +#define NVOS32_REALLOC_FLAGS_REALLOC_DOWN 0x00000002 // towards/from memory address 0 + +// RELEASE_COMPR, REACQUIRE_COMPR flags field +#define NVOS32_RELEASE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED 0x000000001 + +#define NVOS32_REACQUIRE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED 0x000000001 + + +// FREE flags field +#define NVOS32_FREE_FLAGS_MEMORY_HANDLE_PROVIDED 0x00000001 + +// DUMP flags field +#define NVOS32_DUMP_FLAGS_TYPE 1:0 +#define NVOS32_DUMP_FLAGS_TYPE_FB 0x00000000 +#define NVOS32_DUMP_FLAGS_TYPE_CLIENT_PD 0x00000001 +#define NVOS32_DUMP_FLAGS_TYPE_CLIENT_VA 0x00000002 +#define NVOS32_DUMP_FLAGS_TYPE_CLIENT_VAPTE 0x00000003 + +#define NVOS32_BLOCK_TYPE_FREE 0xFFFFFFFF +#define NVOS32_INVALID_BLOCK_FREE_OFFSET 0xFFFFFFFF + +#define NVOS32_MEM_TAG_NONE 0x00000000 + +/* + * NV_CONTEXT_DMA_ALLOCATION_PARAMS - Allocation params to create context dma + through NvRmAlloc. + */ +typedef struct +{ + NvHandle hSubDevice; + NvV32 flags; + NvHandle hMemory; + NvU64 offset NV_ALIGN_BYTES(8); + NvU64 limit NV_ALIGN_BYTES(8); +} NV_CONTEXT_DMA_ALLOCATION_PARAMS; + +/* + * NV_MEMORY_ALLOCATION_PARAMS - Allocation params to create memory through + * NvRmAlloc. Flags are populated with NVOS32_ defines. + */ +typedef struct +{ + NvU32 owner; // [IN] - memory owner ID + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + + NvU32 width; // [IN] - width of surface in pixels + NvU32 height; // [IN] - height of surface in pixels + NvS32 pitch; // [IN/OUT] - desired pitch AND returned actual pitch allocated + + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + + NvU64 rangeLo NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeHi NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvP64 address NV_ALIGN_BYTES(8); // [OUT] - returned address + + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + NvHandle hVASpace; // [IN] - VASpace handle. Used when flag is VIRTUAL. + + NvU32 internalflags; // [IN] - internal flags to change allocation behaviors from internal paths + + NvU32 tag; // [IN] - memory tag used for debugging +} NV_MEMORY_ALLOCATION_PARAMS; + +/* + * NV_OS_DESC_MEMORY_ALLOCATION_PARAMS - Allocation params to create OS + * described memory through NvRmAlloc. Flags are populated with NVOS32_ defines. + */ +typedef struct +{ + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 attr; // [IN] - attributes for memory placement/properties, see below + NvU32 attr2; // [IN] - attributes GPU_CACHEABLE + NvP64 descriptor NV_ALIGN_BYTES(8); // [IN] - descriptor address + NvU64 limit NV_ALIGN_BYTES(8); // [IN] - allocated size -1 + NvU32 descriptorType; // [IN] - descriptor type(Virtual | nvmap Handle) + NvU32 tag; // [IN] - memory tag used for debugging +} NV_OS_DESC_MEMORY_ALLOCATION_PARAMS; + +/* + * NV_USER_LOCAL_DESC_MEMORY_ALLOCATION_PARAMS - Allocation params to create a memory + * object from user allocated video memory. Flags are populated with NVOS32_* + * defines. + */ +typedef struct +{ + NvU32 flags; // [IN] - allocation modifier flags, see NVOS02_FLAGS* defines + NvU64 physAddr NV_ALIGN_BYTES(8); // [IN] - physical address + NvU64 size NV_ALIGN_BYTES(8); // [IN] - mem size + NvU32 tag; // [IN] - memory tag used for debugging + NvBool bGuestAllocated; // [IN] - Set if memory is guest allocated (mapped by VMMU) +} NV_USER_LOCAL_DESC_MEMORY_ALLOCATION_PARAMS; + +/* + * NV_MEMORY_HW_RESOURCES_ALLOCATION_PARAMS - Allocation params to create + * memory HW resources through NvRmAlloc. Flags are populated with NVOS32_ + * defines. + */ +typedef struct +{ + NvU32 owner; // [IN] - memory owner ID + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 type; // [IN] - surface type, see below TYPE* defines + + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + + NvU32 height; + NvU32 width; + NvU32 pitch; + NvU32 alignment; + NvU32 comprCovg; + NvU32 zcullCovg; + + NvU32 kind; + + NvP64 bindResultFunc NV_ALIGN_BYTES(8); // BindResultFunc + NvP64 pHandle NV_ALIGN_BYTES(8); + NvU64 osDeviceHandle NV_ALIGN_BYTES(8); + NvU64 size NV_ALIGN_BYTES(8); + NvU64 allocAddr NV_ALIGN_BYTES(8); + + // [out] from GMMU_COMPR_INFO in drivers/common/shared/inc/mmu/gmmu_fmt.h + NvU32 compPageShift; + NvU32 compressedKind; + NvU32 compTagLineMin; + NvU32 compPageIndexLo; + NvU32 compPageIndexHi; + NvU32 compTagLineMultiplier; + + // [out] fallback uncompressed kind. + NvU32 uncompressedKind; + + NvU32 tag; // [IN] - memory tag used for debugging +} NV_MEMORY_HW_RESOURCES_ALLOCATION_PARAMS; + +/* function OS33 */ +#define NV04_MAP_MEMORY (0x00000021) + +// Legacy map and unmap memory flags that don't use DRF_DEF scheme +#define NV04_MAP_MEMORY_FLAGS_NONE (0x00000000) +#define NV04_MAP_MEMORY_FLAGS_USER (0x00004000) + +// New map and unmap memory flags. These flags are used for both NvRmMapMemory +// and for NvRmUnmapMemory. + +// Mappings can have restricted permissions (read-only, write-only). Some +// RM implementations may choose to ignore these flags, or they may work +// only for certain memory spaces (system, AGP, video memory); in such cases, +// you may get a read/write mapping even if you asked for a read-only or +// write-only mapping. +#define NVOS33_FLAGS_ACCESS 1:0 +#define NVOS33_FLAGS_ACCESS_READ_WRITE (0x00000000) +#define NVOS33_FLAGS_ACCESS_READ_ONLY (0x00000001) +#define NVOS33_FLAGS_ACCESS_WRITE_ONLY (0x00000002) + +// Persistent mappings are no longer supported +#define NVOS33_FLAGS_PERSISTENT 4:4 +#define NVOS33_FLAGS_PERSISTENT_DISABLE (0x00000000) +#define NVOS33_FLAGS_PERSISTENT_ENABLE (0x00000001) + +// This flag is a hack to work around bug 150889. It disables the error +// checking in the RM that verifies that the client is not trying to map +// memory past the end of the memory object. This error checking needs to +// be shut off in some cases for a PAE bug workaround in certain kernels. +#define NVOS33_FLAGS_SKIP_SIZE_CHECK 8:8 +#define NVOS33_FLAGS_SKIP_SIZE_CHECK_DISABLE (0x00000000) +#define NVOS33_FLAGS_SKIP_SIZE_CHECK_ENABLE (0x00000001) + +// Normally, a mapping is created in the same memory space as the client -- in +// kernel space for a kernel RM client, or in user space for a user RM client. +// However, a kernel RM client can specify MEM_SPACE:USER to create a user-space +// mapping in the current RM client. +#define NVOS33_FLAGS_MEM_SPACE 14:14 +#define NVOS33_FLAGS_MEM_SPACE_CLIENT (0x00000000) +#define NVOS33_FLAGS_MEM_SPACE_USER (0x00000001) + +// The client can ask for direct memory mapping (i.e. no BAR1) if remappers and +// blocklinear are not required. RM can do direct mapping in this case if +// carveout is available. +// DEFAULT: Use direct mapping if available and no address/data translation +// is necessary; reflected otherwise +// DIRECT: Use direct mapping if available, even if some translation is +// necessary (the client is responsible for translation) +// REFLECTED: Always use reflected mapping +#define NVOS33_FLAGS_MAPPING 16:15 +#define NVOS33_FLAGS_MAPPING_DEFAULT (0x00000000) +#define NVOS33_FLAGS_MAPPING_DIRECT (0x00000001) +#define NVOS33_FLAGS_MAPPING_REFLECTED (0x00000002) + +// The client requests a fifo mapping but doesn't know the offset or length +// DEFAULT: Do error check length and offset +// ENABLE: Don't error check length and offset but have the RM fill them in +#define NVOS33_FLAGS_FIFO_MAPPING 17:17 +#define NVOS33_FLAGS_FIFO_MAPPING_DEFAULT (0x00000000) +#define NVOS33_FLAGS_FIFO_MAPPING_ENABLE (0x00000001) + +// The client can require that the CPU mapping be to a specific CPU address +// (akin to MAP_FIXED for mmap). +// DISABLED: RM will map the allocation at a CPU VA that RM selects. +// ENABLED: RM will map the allocation at the CPU VA specified by the address +// pass-back parameter to NvRmMapMemory +// NOTES: +// - Used for controlling CPU addresses in CUDA's unified CPU+GPU virtual +// address space +// - Only valid on NvRmMapMemory +// - Only implemented on Linux +#define NVOS33_FLAGS_MAP_FIXED 18:18 +#define NVOS33_FLAGS_MAP_FIXED_DISABLE (0x00000000) +#define NVOS33_FLAGS_MAP_FIXED_ENABLE (0x00000001) + +// The client can specify to the RM that the CPU virtual address range for an +// allocation should remain reserved after the allocation is unmapped. +// DISABLE: When this mapping is destroyed, RM will unmap the CPU virtual +// address space used by this allocation. On Linux this corresponds +// to calling munmap on the CPU VA region. +// ENABLE: When the map object is freed, RM will leave the CPU virtual +// address space used by allocation reserved. On Linux this means +// that RM will overwrite the previous mapping with an anonymous +// mapping of instead calling munmap. +// NOTES: +// - When combined with MAP_FIXED, this allows the client to exert +// significant control over the CPU heap +// - Used in CUDA's unified CPU+GPU virtual address space +// - Only valid on NvRmMapMemory (specifies RM's behavior whenever the +// mapping is destroyed, regardless of mechanism) +// - Only implemented on Linux +#define NVOS33_FLAGS_RESERVE_ON_UNMAP 19:19 +#define NVOS33_FLAGS_RESERVE_ON_UNMAP_DISABLE (0x00000000) +#define NVOS33_FLAGS_RESERVE_ON_UNMAP_ENABLE (0x00000001) + +// Systems with a coherent NVLINK2 connection between the CPU and GPU +// have the option of directly mapping video memory over that connection. +// During mapping you may specify a preference. +// +#define NVOS33_FLAGS_BUS 21:20 +#define NVOS33_FLAGS_BUS_ANY 0 +#define NVOS33_FLAGS_BUS_NVLINK_COHERENT 1 +#define NVOS33_FLAGS_BUS_PCIE 2 + +// Internal use only +#define NVOS33_FLAGS_OS_DESCRIPTOR 22:22 +#define NVOS33_FLAGS_OS_DESCRIPTOR_DISABLE (0x00000000) +#define NVOS33_FLAGS_OS_DESCRIPTOR_ENABLE (0x00000001) + +// +// For use in the linux mapping path. This flag sets the +// caching mode for pcie BAR mappings (from nv_memory_type.h). +// Internal use only. +// +#define NVOS33_FLAGS_CACHING_TYPE 25:23 +#define NVOS33_FLAGS_CACHING_TYPE_CACHED 0 +#define NVOS33_FLAGS_CACHING_TYPE_UNCACHED 1 +#define NVOS33_FLAGS_CACHING_TYPE_WRITECOMBINED 2 +#define NVOS33_FLAGS_CACHING_TYPE_WRITEBACK 5 +#define NVOS33_FLAGS_CACHING_TYPE_DEFAULT 6 +#define NVOS33_FLAGS_CACHING_TYPE_UNCACHED_WEAK 7 + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; // device or sub-device handle + NvHandle hMemory; // handle to memory object if provided -- NULL if not + NvU64 offset NV_ALIGN_BYTES(8); + NvU64 length NV_ALIGN_BYTES(8); + NvP64 pLinearAddress NV_ALIGN_BYTES(8); // pointer for returned address + NvU32 status; + NvU32 flags; +} NVOS33_PARAMETERS; + + +/* function OS34 */ +#define NV04_UNMAP_MEMORY (0x00000022) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pLinearAddress NV_ALIGN_BYTES(8); // ptr to virtual address of mapped memory + NvU32 status; + NvU32 flags; +} NVOS34_PARAMETERS; + +/* function OS38 */ +#define NV04_ACCESS_REGISTRY (0x00000026) + +/* parameter values */ +#define NVOS38_ACCESS_TYPE_READ_DWORD 1 +#define NVOS38_ACCESS_TYPE_WRITE_DWORD 2 +#define NVOS38_ACCESS_TYPE_READ_BINARY 6 +#define NVOS38_ACCESS_TYPE_WRITE_BINARY 7 + +#define NVOS38_MAX_REGISTRY_STRING_LENGTH 256 +#define NVOS38_MAX_REGISTRY_BINARY_LENGTH 256 + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hObject; + NvV32 AccessType; + + NvV32 DevNodeLength; + NvP64 pDevNode NV_ALIGN_BYTES(8); + + NvV32 ParmStrLength; + NvP64 pParmStr NV_ALIGN_BYTES(8); + + NvV32 BinaryDataLength; + NvP64 pBinaryData NV_ALIGN_BYTES(8); + + NvV32 Data; + NvV32 Entry; + NvV32 status; +} NVOS38_PARAMETERS; + +#define NV04_ALLOC_CONTEXT_DMA (0x00000027) + +/* parameter values are the same as NVOS03 -- not repeated here */ + +/* parameters */ +typedef struct +{ + NvHandle hObjectParent; + NvHandle hSubDevice; + NvHandle hObjectNew; + NvV32 hClass; + NvV32 flags; + NvU32 selector; + NvHandle hMemory; + NvU64 offset NV_ALIGN_BYTES(8); + NvU64 limit NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS39_PARAMETERS; + + +#define NV04_GET_EVENT_DATA (0x00000028) + +typedef struct +{ + NvHandle hObject; + NvV32 NotifyIndex; + + // + // Holds same information as that of nvgputypes.h::NvNotification's + // info32 and info16. + // + NvV32 info32; + NvU16 info16; +} NvUnixEvent; + +/* parameters */ +typedef struct +{ + NvP64 pEvent NV_ALIGN_BYTES(8); + NvV32 MoreEvents; + NvV32 status; +} NVOS41_PARAMETERS; + +/* function NVOS43 -- deleted 4/09 */ +/* #define NV04_UNIFIED_FREE (0x0000002B) */ + + +#define NVSIM01_BUS_XACT (0x0000002C) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // n/a currently + NvHandle hDevice; // n/a currently + NvU32 offset; // phy bus offset + NvU32 bar; // ~0 := phy addr, {0..2} specify gpu bar + NvU32 bytes; // # of bytes + NvU32 write; // 0 := read request + NvU32 data; // in/out based upon 'write' + NvU32 status; +} NVOS2C_PARAMETERS; + +/* function NVOS2D -- deleted 4/09 */ +/* #define NVSIM01_BUS_GET_IFACES (0x0000002D) */ + + +/* function OS46 */ +#define NV04_MAP_MEMORY_DMA (0x0000002E) + +/* parameter values */ +#define NVOS46_FLAGS_ACCESS 1:0 +#define NVOS46_FLAGS_ACCESS_READ_WRITE (0x00000000) +#define NVOS46_FLAGS_ACCESS_READ_ONLY (0x00000001) +#define NVOS46_FLAGS_ACCESS_WRITE_ONLY (0x00000002) + +// +// Compute shaders support both 32b and 64b pointers. This allows mappings +// to be restricted to the bottom 4GB of the address space. How _DISABLE +// is handled is chip specific and may force a pointer above 4GB. +// +#define NVOS46_FLAGS_32BIT_POINTER 2:2 +#define NVOS46_FLAGS_32BIT_POINTER_DISABLE (0x00000000) +#define NVOS46_FLAGS_32BIT_POINTER_ENABLE (0x00000001) + +#define NVOS46_FLAGS_PAGE_KIND 3:3 +#define NVOS46_FLAGS_PAGE_KIND_PHYSICAL (0x00000000) +#define NVOS46_FLAGS_PAGE_KIND_VIRTUAL (0x00000001) + +#define NVOS46_FLAGS_CACHE_SNOOP 4:4 +#define NVOS46_FLAGS_CACHE_SNOOP_DISABLE (0x00000000) +#define NVOS46_FLAGS_CACHE_SNOOP_ENABLE (0x00000001) + +// The client requests a CPU kernel mapping so that SW class could use it +// DEFAULT: Don't map CPU address +// ENABLE: Map CPU address +#define NVOS46_FLAGS_KERNEL_MAPPING 5:5 +#define NVOS46_FLAGS_KERNEL_MAPPING_NONE (0x00000000) +#define NVOS46_FLAGS_KERNEL_MAPPING_ENABLE (0x00000001) + +// +// Compute shader access control. +// GPUs that support this feature set the NV0080_CTRL_DMA_CAPS_SHADER_ACCESS_SUPPORTED +// property. These were first supported in Kepler. _DEFAULT will match the ACCESS field. +// +#define NVOS46_FLAGS_SHADER_ACCESS 7:6 +#define NVOS46_FLAGS_SHADER_ACCESS_DEFAULT (0x00000000) +#define NVOS46_FLAGS_SHADER_ACCESS_READ_ONLY (0x00000001) +#define NVOS46_FLAGS_SHADER_ACCESS_WRITE_ONLY (0x00000002) +#define NVOS46_FLAGS_SHADER_ACCESS_READ_WRITE (0x00000003) + +// +// How the PAGE_SIZE field is interpreted is architecture specific. +// +// On Curie chips it is ignored. +// +// On Tesla it is used to guide is used to select which type PDE +// to use. By default the RM will select 4KB for system memory +// and BIG (64KB) for video memory. BOTH is not supported. +// +// Likewise on Fermi this used to select the PDE type. Fermi cannot +// mix page sizes to a single mapping so the page size is determined +// at surface alloation time. 4KB or BIG may be specified but they +// must match the page size selected at allocation time. DEFAULT +// allows the RM to select either a single page size or both PDE, +// while BOTH forces the RM to select a dual page size PDE. +// +// BIG_PAGE = 64 KB on PASCAL +// = 64 KB or 128 KB on pre_PASCAL chips +// +// HUGE_PAGE = 2 MB on PASCAL +// = not supported on pre_PASCAL chips. +// +#define NVOS46_FLAGS_PAGE_SIZE 11:8 +#define NVOS46_FLAGS_PAGE_SIZE_DEFAULT (0x00000000) +#define NVOS46_FLAGS_PAGE_SIZE_4KB (0x00000001) +#define NVOS46_FLAGS_PAGE_SIZE_BIG (0x00000002) +#define NVOS46_FLAGS_PAGE_SIZE_BOTH (0x00000003) +#define NVOS46_FLAGS_PAGE_SIZE_HUGE (0x00000004) + +// Some systems allow the device to use the system L3 cache when accessing the +// system memory. For example, the iGPU on T19X can allocate from the system L3 +// provided the SoC L3 cache is configured for device allocation. +// +// NVOS46_FLAGS_SYSTEM_L3_ALLOC_DEFAULT - Use the default L3 allocation +// policy. When using this policy, device memory access will be coherent with +// non-snooping devices such as the display on Tegra. +// +// NVOS46_FLAGS_SYSTEM_L3_ALLOC_ENABLE_HINT - Enable L3 allocation if possible. +// When L3 allocation is enabled, device memory access may be cached, and the +// memory access will be coherent only with other snoop-enabled access. This +// flag is a hint and will be ignored if the system does not support L3 +// allocation for the device. NVOS46_FLAGS_CACHE_SNOOP_ENABLE must also be set +// for this flag to be effective. +// +// Note: This flag is implemented only by rmapi_tegra. It is not implemented by +// Resman. +// +#define NVOS46_FLAGS_SYSTEM_L3_ALLOC 13:13 +#define NVOS46_FLAGS_SYSTEM_L3_ALLOC_DEFAULT (0x00000000) +#define NVOS46_FLAGS_SYSTEM_L3_ALLOC_ENABLE_HINT (0x00000001) + +#define NVOS46_FLAGS_DMA_OFFSET_GROWS 14:14 +#define NVOS46_FLAGS_DMA_OFFSET_GROWS_UP (0x00000000) +#define NVOS46_FLAGS_DMA_OFFSET_GROWS_DOWN (0x00000001) + +// +// DMA_OFFSET_FIXED is overloaded for two purposes. +// +// 1. For CTXDMA mappings that use DMA_UNICAST_REUSE_ALLOC_FALSE, +// DMA_OFFSET_FIXED_TRUE indicates to use the dmaOffset parameter +// for a fixed address allocation out of the VA space heap. +// DMA_OFFSET_FIXED_FALSE indicates dmaOffset input will be ignored. +// +// 2. For CTXDMA mappings that use DMA_UNICAST_REUSE_ALLOC_TRUE and +// for *ALL* non-CTXDMA mappings, DMA_OFFSET_FIXED_TRUE indicates +// to treat the input dmaOffset as an absolute virtual address +// instead of an offset relative to the virtual allocation being +// mapped into. Whether relative or absolute, the resulting +// virtual address *must* be contained within the specified +// virtual allocation. +// +// Internally, it is also required that the virtual address be aligned +// to the page size of the mapping (obviously cannot map sub-pages). +// For client flexibility the physical offset does not require page alignment. +// This is handled by adding the physical misalignment +// (internally called pteAdjust) to the returned virtual address. +// The *input* dmaOffset can account for this pteAdjust (or not), +// but the returned virtual address always will. +// +#define NVOS46_FLAGS_DMA_OFFSET_FIXED 15:15 +#define NVOS46_FLAGS_DMA_OFFSET_FIXED_FALSE (0x00000000) +#define NVOS46_FLAGS_DMA_OFFSET_FIXED_TRUE (0x00000001) + +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP 19:16 +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_DEFAULT (0x00000000) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_1 (0x00000001) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_2 (0x00000002) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_4 (0x00000003) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_8 (0x00000004) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_16 (0x00000005) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_32 (0x00000006) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_64 (0x00000007) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_128 (0x00000008) +#define NVOS46_FLAGS_P2P 27:20 + +#define NVOS46_FLAGS_P2P_ENABLE 21:20 +#define NVOS46_FLAGS_P2P_ENABLE_NO (0x00000000) +#define NVOS46_FLAGS_P2P_ENABLE_YES (0x00000001) +#define NVOS46_FLAGS_P2P_ENABLE_NONE NVOS46_FLAGS_P2P_ENABLE_NO +#define NVOS46_FLAGS_P2P_ENABLE_SLI NVOS46_FLAGS_P2P_ENABLE_YES +#define NVOS46_FLAGS_P2P_ENABLE_NOSLI (0x00000002) +// Subdevice ID. Reserved 3 bits for the possibility of 8-way SLI +#define NVOS46_FLAGS_P2P_SUBDEVICE_ID 24:22 +#define NVOS46_FLAGS_P2P_SUBDEV_ID_SRC NVOS46_FLAGS_P2P_SUBDEVICE_ID +#define NVOS46_FLAGS_P2P_SUBDEV_ID_TGT 27:25 +#define NVOS46_FLAGS_TLB_LOCK 28:28 +#define NVOS46_FLAGS_TLB_LOCK_DISABLE (0x00000000) +#define NVOS46_FLAGS_TLB_LOCK_ENABLE (0x00000001) +#define NVOS46_FLAGS_DMA_UNICAST_REUSE_ALLOC 29:29 +#define NVOS46_FLAGS_DMA_UNICAST_REUSE_ALLOC_FALSE (0x00000000) +#define NVOS46_FLAGS_DMA_UNICAST_REUSE_ALLOC_TRUE (0x00000001) +// +// This flag must be used with caution. Improper use can leave stale entries in the TLB, +// and allow access to memory no longer owned by the RM client or cause page faults. +// Also see corresponding flag for NvUnmapMemoryDma. +// +#define NVOS46_FLAGS_DEFER_TLB_INVALIDATION 31:31 +#define NVOS46_FLAGS_DEFER_TLB_INVALIDATION_FALSE (0x00000000) +#define NVOS46_FLAGS_DEFER_TLB_INVALIDATION_TRUE (0x00000001) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hDevice; // [IN] device handle for mapping + NvHandle hDma; // [IN] dma handle for mapping + NvHandle hMemory; // [IN] memory handle for mapping + NvU64 offset NV_ALIGN_BYTES(8); // [IN] offset of region + NvU64 length NV_ALIGN_BYTES(8); // [IN] limit of region + NvV32 flags; // [IN] flags + NvU64 dmaOffset NV_ALIGN_BYTES(8); // [OUT] offset of mapping + // [IN] if FLAGS_DMA_OFFSET_FIXED_TRUE + // *OR* hDma is NOT a CTXDMA handle + // (see NVOS46_FLAGS_DMA_OFFSET_FIXED) + NvV32 status; // [OUT] status +} NVOS46_PARAMETERS; + + +/* function OS47 */ +#define NV04_UNMAP_MEMORY_DMA (0x0000002F) + +#define NVOS47_FLAGS_DEFER_TLB_INVALIDATION 0:0 +#define NVOS47_FLAGS_DEFER_TLB_INVALIDATION_FALSE (0x00000000) +#define NVOS47_FLAGS_DEFER_TLB_INVALIDATION_TRUE (0x00000001) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hDevice; // [IN] device handle for mapping + NvHandle hDma; // [IN] dma handle for mapping + NvHandle hMemory; // [IN] memory handle for mapping + NvV32 flags; // [IN] flags + NvU64 dmaOffset NV_ALIGN_BYTES(8); // [IN] dma offset from NV04_MAP_MEMORY_DMA + NvV32 status; // [OUT] status +} NVOS47_PARAMETERS; + + +#define NV04_BIND_CONTEXT_DMA (0x00000031) +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hChannel; // [IN] channel handle for binding + NvHandle hCtxDma; // [IN] ctx dma handle for binding + NvV32 status; // [OUT] status +} NVOS49_PARAMETERS; + + +/* function OS54 */ +#define NV04_CONTROL (0x00000036) + +#define NVOS54_FLAGS_NONE (0x00000000) +#define NVOS54_FLAGS_IRQL_RAISED (0x00000001) +#define NVOS54_FLAGS_LOCK_BYPASS (0x00000002) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hObject; + NvV32 cmd; + NvU32 flags; + NvP64 params NV_ALIGN_BYTES(8); + NvU32 paramsSize; + NvV32 status; +} NVOS54_PARAMETERS; + +/* RM Control header + * + * Replacement for NVOS54_PARAMETERS where embedded pointers are not allowed. + * Input layout for user space RM Control calls should be: + * + * +--- NVOS63_PARAMETERS ---+--- RM Control parameters ---+ + * + * NVOS63_PARAMETERS::paramsSize is the size of RM Control parameters + * + */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hObject; // [IN] object handle + NvV32 cmd; // [IN] control command ID + NvU32 paramsSize; // [IN] size in bytes of the RM Control parameters + NvV32 status; // [OUT] status +} NVOS63_PARAMETERS; + + +/* function OS55 */ +#define NV04_DUP_OBJECT (0x00000037) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] destination client handle + NvHandle hParent; // [IN] parent of new object + NvHandle hObject; // [INOUT] destination (new) object handle + NvHandle hClientSrc; // [IN] source client handle + NvHandle hObjectSrc; // [IN] source (old) object handle + NvU32 flags; // [IN] flags + NvU32 status; // [OUT] status +} NVOS55_PARAMETERS; + +#define NV04_DUP_HANDLE_FLAGS_NONE (0x00000000) +#define NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE (0x00000001) // If set, prevents an RM kernel client from duping unconditionally + // NOTE: Do not declare a NV04_DUP_HANDLE_FLAGS_* value of 0x00000008 + // until Bug 2859347 is resolved! This is due to conflicting usage + // of RS_RES_DUP_PARAMS_INTERNAL.flags to pass + // NVOS32_ALLOC_INTERNAL_FLAGS_FLA_MEMORY to an object constructor. + +/* function OS56 */ +#define NV04_UPDATE_DEVICE_MAPPING_INFO (0x00000038) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pOldCpuAddress NV_ALIGN_BYTES(8); + NvP64 pNewCpuAddress NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS56_PARAMETERS; + +/* function OS57 */ +#define NV04_SHARE (0x0000003E) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] owner client handle + NvHandle hObject; // [IN] resource to share + RS_SHARE_POLICY sharePolicy; // [IN] share policy entry + NvU32 status; // [OUT] status +} NVOS57_PARAMETERS; + +/* parameters */ +typedef struct +{ + NvU32 deviceReference; + NvU32 head; + NvU32 state; + NvU8 forceMonitorState; + NvU8 bForcePerfBiosLevel; + NvU8 bIsD3HotTransition; // [OUT] To tell client if it's a D3Hot transition + NvU32 fastBootPowerState; +} NVPOWERSTATE_PARAMETERS, *PNVPOWERSTATE_PARAMETERS; + + /***************************************************************************\ +|* Object Allocation Parameters *| + \***************************************************************************/ + +// GR engine creation parameters +typedef struct { + NvU32 version; // set to 0x2 + NvU32 flags; // input param from a rm client (no flags are currently defined) + NvU32 size; // sizeof(NV_GR_ALLOCATION_PARAMETERS) + NvU32 caps; // output param for a rm client - class dependent +} NV_GR_ALLOCATION_PARAMETERS; + +// +// NvAlloc parameters for NV03_DEVICE_XX class +// hClientShare +// For NV50+ this can be set to virtual address space for this +// device. On previous chips this field is ignored. There are +// three possible settings +// NV01_NULL_OBJECT - Use the default global VA space +// Handle to current client - Create a new private address space +// Handle to another client - Attach to other clients address space +// flags +// MAP_PTE_GLOBALLY Deprecated. +// MINIMIZE_PTETABLE_SIZE Pass hint to DMA HAL to use partial page +// tables. Depending on allocation pattern +// this may actually use more instance memory. +// RETRY_PTE_ALLOC_IN_SYS Fallback to PTEs allocation in sysmem. This +// is now enabled by default. +// VASPACE_SIZE Honor vaSpaceSize field. +// +// MAP_PTE Deprecated. +// +// VASPACE_IS_MIRRORED This flag will tell RM to create a mirrored +// kernel PDB for the address space associated +// with this device. When this flag is set +// the address space covered by the top PDE +// is restricted and cannot be allocated out of. +// +// +// VASPACE_BIG_PAGE_SIZE_64k ***Warning this flag will be deprecated do not use***** +// VASPACE_BIG_PAGE_SIZE_128k This flag will choose the big page size of the VASPace +// to 64K/128k if the system supports a configurable size. +// If the system does not support a configurable size then +// defaults will be chosen. +// If the user sets both these bits then this API will fail. +// +// SHARED_MANAGEMENT +// *** Warning: This will be deprecated - see NV_VASPACE_ALLOCATION_PARAMETERS. *** +// +// +// hTargetClient/hTargetDevice +// Deprecated. Can be deleted once client code has removed references. +// +// vaBase +// *** Warning: This will be deprecated - see NV_VASPACE_ALLOCATION_PARAMETERS. *** +// +// vaSpaceSize +// Set the size of the VA space used for this client if allocating +// a new private address space. Is expressed as a size such as +// (1<<32) for a 32b address space. Reducing the size of the address +// space allows the dma chip specific code to reduce the instance memory +// used for page tables. +// +// vaMode +// The vaspace allocation mode. There are three modes supported: +// 1. SINGLE_VASPACE +// An old abstraction that provides a single VA space under a +// device and it's allocated implicityly when an object requires a VA +// space. Typically, this VA space is also shared across clients. +// +// 2. OPTIONAL_MULTIPLE_VASPACES +// Global + multiple private va spaces. In this mode, the old abstraction, +// a single vaspace under a device that is allocated implicitly is still +// being supported. A private VA space is an entity under a device, which/ +// cannot be shared with other clients, but multiple channels under the +// same device can still share a private VA space. +// Private VA spaces (class:90f1,FERMI_VASPACE_A) can be allocated as +// objects through RM APIs. This mode requires the users to know what they +// are doing in terms of using VA spaces. Page fault can easily occur if +// one is not careful with a mixed of an implicit VA space and multiple +// VA spaces. +// +// 3. MULTIPLE_VASPACES +// In this mode, all VA spaces have to be allocated explicitly through RM +// APIs and users have to specify which VA space to use for each object. +// This case prevents users to use context dma, which is not supported and +// can be misleading if used. Therefore, it's more a safeguard mode to +// prevent people making mistakes that are hard to debug. +// +// DEFAULT MODE: 2. OPTIONAL_MULTIPLE_VASPACES +// +// See NV0080_ALLOC_PARAMETERS for allocation parameter structure. +// + +#define NV_DEVICE_ALLOCATION_SZNAME_MAXLEN 128 +#define NV_DEVICE_ALLOCATION_FLAGS_NONE (0x00000000) +#define NV_DEVICE_ALLOCATION_FLAGS_MAP_PTE_GLOBALLY (0x00000001) +#define NV_DEVICE_ALLOCATION_FLAGS_MINIMIZE_PTETABLE_SIZE (0x00000002) +#define NV_DEVICE_ALLOCATION_FLAGS_RETRY_PTE_ALLOC_IN_SYS (0x00000004) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SIZE (0x00000008) +#define NV_DEVICE_ALLOCATION_FLAGS_MAP_PTE (0x00000010) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_IS_TARGET (0x00000020) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SHARED_MANAGEMENT (0x00000100) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_64k (0x00000200) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_128k (0x00000400) +#define NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS (0x00000800) + +/* + *TODO: Delete this flag once CUDA moves to the ctrl call + */ +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_IS_MIRRORED (0x00000040) + +// XXX NV_DEVICE_ALLOCATION_FLAGS_VASPACE_PTABLE_PMA_MANAGED should not +// should not be exposed to clients. It should be the default RM +// behavior. +// +// Until it is made the default, certain clients such as OpenGL +// might still need PTABLE allocations to go through PMA, so this +// flag has been temporary exposed. +// +// See bug 1880192 +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_PTABLE_PMA_MANAGED (0x00001000) + +// +// Indicates this device is being created by guest and requires a +// HostVgpuDeviceKernel creation in client. +// +#define NV_DEVICE_ALLOCATION_FLAGS_HOST_VGPU_DEVICE (0x00002000) + +// +// Indicates this device is being created for VGPU plugin use. +// Requires a HostVgpuDevice handle to indicate the guest on which +// this plugin operates. +// +#define NV_DEVICE_ALLOCATION_FLAGS_PLUGIN_CONTEXT (0x00004000) + +#define NV_DEVICE_ALLOCATION_VAMODE_OPTIONAL_MULTIPLE_VASPACES (0x00000000) +#define NV_DEVICE_ALLOCATION_VAMODE_SINGLE_VASPACE (0x00000001) +#define NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES (0x00000002) + +/* + * NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS.flags values. + * + * These flags may apply to all channel types: PIO, DMA, and GPFIFO. + * They are also designed so that zero is always the correct default. + * + * NVOS04_FLAGS_CHANNEL_TYPE: + * This flag specifies the type of channel to allocate. Legal values + * for this flag include: + * + * NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL: + * This flag specifies that a physical channel is to be allocated. + * + * NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL: + * OBSOLETE - NOT SUPPORTED + * + * NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL: + * OBSOLETE - NOT SUPPORTED + */ + +/* valid NVOS04_FLAGS_CHANNEL_TYPE values */ +#define NVOS04_FLAGS_CHANNEL_TYPE 1:0 +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000 +#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE + +/* + * NVOS04_FLAGS_VPR: + * This flag specifies if channel is intended for work with + * Video Protected Regions (VPR) + * + * NVOS04_FLAGS_VPR_TRUE: + * The channel will only write to protected memory regions. + * + * NVOS04_FLAGS_VPR_FALSE: + * The channel will never read from protected memory regions. + */ +#define NVOS04_FLAGS_VPR 2:2 +#define NVOS04_FLAGS_VPR_FALSE 0x00000000 +#define NVOS04_FLAGS_VPR_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING: + * This flag specifies if the channel can skip refcounting of potentially + * accessed mappings on job kickoff. This flag is only meaningful for + * kernel drivers which perform refcounting of memory mappings. + * + * NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE: + * The channel cannot not skip refcounting of memory mappings + * + * NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE: + * The channel can skip refcounting of memory mappings + */ +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE: + * This flag specifies which "runqueue" the allocated channel will be + * executed on in a TSG. Channels on different runqueues within a TSG + * may be able to feed methods into the engine simultaneously. + * Non-default values are only supported on GP10x and later and only for + * channels within a TSG. + */ +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001 + +/* + * NVOS04_FLAGS_PRIVILEGED_CHANNEL: + * This flag tells RM whether to give the channel admin privilege. This + * flag will only take effect if the client is GSP-vGPU plugin. It is + * needed so that guest can update page tables in physical mode and do + * scrubbing. + */ +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING: + * This flags tells RM not to schedule a newly created channel within a + * channel group immediately even if channel group is currently scheduled. + * Channel will not be scheduled until NVA06F_CTRL_GPFIFO_SCHEDULE is + * invoked. This is used eg. for CUDA which needs to do additional + * initialization before starting up a channel. + * Default is FALSE. + */ +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_DENY_PHYSICAL_MODE_CE: + * This flag specifies whether or not to deny access to the physical + * mode of CopyEngine regardless of whether or not the client handle + * is admin. If set to true, this channel allocation will always result + * in an unprivileged channel. If set to false, the privilege of the channel + * will depend on the privilege level of the client handle. + * This is primarily meant for vGPU since all client handles + * granted to guests are admin. + */ +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE + * + * This flag specifies the channel offset in terms of within a page of + * USERD. For example, value 3 means the 4th channel within a USERD page. + * Given the USERD size is 512B, we will have 8 channels total, so 3 bits + * are reserved. + * + * When _USERD_INDEX_FIXED_TRUE is set but INDEX_PAGE_FIXED_FALSE is set, + * it will ask for a new USERD page. + * + */ +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8 + +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE + * + * This flag specifies the channel offset in terms of USERD page. When + * this PAGE_FIXED_TRUE is set, the INDEX_FIXED_FALSE bit should also + * be set, otherwise INVALID_STATE will be returned. + * + * And the field _USERD_INDEX_VALUE will be used to request the specific + * offset within a USERD page. + */ + +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12 + +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_DENY_AUTH_LEVEL_PRIV + * This flag specifies whether or not to deny access to the privileged + * host methods TLB_INVALIDATE and ACCESS_COUNTER_CLR + */ +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER + * + * This flag specifies scrubbing should be skipped for any internal + * allocations made for this channel from PMA using ctx buf pools. + * Only kernel clients are allowed to use this setting. + */ +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO + * + * This flag specifies that the client is expected to map USERD themselves + * and RM need not do so. + */ +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL + */ +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT + * + * This flag specifies whether the channel calling context is from CPU + * VGPU plugin. + */ +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001 + + /* + * NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT + * + * This flag specifies the channel PBDMA ACQUIRE timeout option. + * _FALSE to disable it, _TRUE to enable it. + * When this flag is enabled, if a host semaphore acquire does not + * complete in about 2 sec, it will time out and trigger a RC error. + */ +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_GROUP_CHANNEL_THREAD: + * This flags specifies the thread id in which an allocated channel + * will be executed in a TSG. The relationship between the thread id + * in A TSG and respective definitions are implementation specific. + * Also, not all classes will be supported at thread > 0. + * This field cannot be used on non-TSG channels and must be set to + * the default value (0) in that case. If thread > 0 on a non-TSG + * channel, the allocation will fail + */ +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002 + +#define NVOS04_FLAGS_MAP_CHANNEL 30:30 +#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001 + +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001 + +typedef struct +{ + NvU64 base NV_ALIGN_BYTES(8); + NvU64 size NV_ALIGN_BYTES(8); + NvU32 addressSpace; + NvU32 cacheAttrib; +} NV_MEMORY_DESC_PARAMS; + +typedef struct +{ + NvHandle hObjectError; // error context DMA + NvHandle hObjectBuffer; // no longer used + NvU64 gpFifoOffset NV_ALIGN_BYTES(8); // offset to beginning of GP FIFO + NvU32 gpFifoEntries; // number of GP FIFO entries + NvU32 flags; + NvHandle hContextShare; // context share handle + NvHandle hVASpace; // VASpace for the channel + NvHandle hUserdMemory[NVOS_MAX_SUBDEVICES]; // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0 + NvU64 userdOffset[NVOS_MAX_SUBDEVICES] NV_ALIGN_BYTES(8); // offset to beginning of UserD within hUserdMemory[x] + NvU32 engineType; // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated + NvU32 cid; // Channel identifier that is unique for the duration of a RM session + NvU32 subDeviceId; // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods + NvHandle hObjectEccError; // ECC error context DMA + NV_MEMORY_DESC_PARAMS instanceMem; + NV_MEMORY_DESC_PARAMS userdMem; + NV_MEMORY_DESC_PARAMS ramfcMem; + NV_MEMORY_DESC_PARAMS mthdbufMem; + + NvHandle hPhysChannelGroup; // reserved + NvU32 internalFlags; // reserved + NV_MEMORY_DESC_PARAMS errorNotifierMem; // reserved + NV_MEMORY_DESC_PARAMS eccErrorNotifierMem; // reserved + NvU32 ProcessID; // reserved + NvU32 SubProcessID; // reserved +} NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS; + +#define NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR 0x00000000 +#define NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN 0x00000001 +#define NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1 2 +#define NV_CHANNELGPFIFO_NOTIFICATION_STATUS_VALUE 14:0 +#define NV_CHANNELGPFIFO_NOTIFICATION_STATUS_IN_PROGRESS 15:15 +#define NV_CHANNELGPFIFO_NOTIFICATION_STATUS_IN_PROGRESS_TRUE 0x1 +#define NV_CHANNELGPFIFO_NOTIFICATION_STATUS_IN_PROGRESS_FALSE 0x0 + +typedef struct +{ + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS gpfifoAllocationParams; + NvHandle hKernelChannel; +} NV_PHYSICALCHANNEL_ALLOC_PARAMS; + +typedef struct +{ + NvHandle hRunlistBase; // Handle to physmem runlist base + NvU32 engineID; // Engine associated with the runlist +} NV_CHANNELRUNLIST_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvV32 channelInstance; // One of the n channel instances of a given channel type. + // Note that core channel has only one instance + // while all others have two (one per head). + NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer + NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications + NvU32 offset; // Initial offset for put/get, usually zero. + NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs + + NvU32 flags; +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001 + +} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvV32 channelInstance; // One of the n channel instances of a given channel type. + // All PIO channels have two instances (one per head). + NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors. + NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel +} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS; + +// Used for allocating a channel group +typedef struct +{ + NvHandle hObjectError; // Error notifier for TSG + NvHandle hObjectEccError; // ECC Error notifier for TSG + NvHandle hVASpace; // VA space handle for TSG + NvU32 engineType; // Engine to which all channels in this TSG are associated with + NvBool bIsCallingContextVgpuPlugin; +} NV_CHANNEL_GROUP_ALLOCATION_PARAMETERS; + +/* +* @params: +* @engineId : Engine to which the software runlist be associated with. +* @maxTSGs : Maximum number of TSG entries that will be submitted in this software runlist +* The size of the runlist buffer will be determined by +* 2 * // double buffer +* maxTSGs * // determined by KMD +* maxChannelPerTSG * // Determined by RM +* sizeof(RunlistEntry) // Determined by HW format +* @qosIntrEnableMask: QOS Interrupt bitmask that needs to be enabled for the SW runlist defined below. +*/ +typedef struct +{ + NvU32 engineId; //(IN) + NvU32 maxTSGs; //(IN) // Size of the RM could return error if the request cannot be accommodated. + NvU32 qosIntrEnableMask; //(IN) // Bitmask for QOS interrupts that needs to be enabled +} NV_SWRUNLIST_ALLOCATION_PARAMS; + +#define NV_SWRUNLIST_QOS_INTR_NONE 0x00000000 +#define NV_SWRUNLIST_QOS_INTR_RUNLIST_AND_ENG_IDLE_ENABLE NVBIT32(0) +#define NV_SWRUNLIST_QOS_INTR_RUNLIST_IDLE_ENABLE NVBIT32(1) +#define NV_SWRUNLIST_QOS_INTR_RUNLIST_ACQUIRE_ENABLE NVBIT32(2) +#define NV_SWRUNLIST_QOS_INTR_RUNLIST_ACQUIRE_AND_ENG_IDLE_ENABLE NVBIT32(3) + +typedef struct +{ + NvU32 size; + NvU32 caps; +} NV_ME_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; + NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2 +} NV_BSP_ALLOCATION_PARAMETERS; + +// +// These are referenced by mdiag mods tests, but do not appear to be used during +// in the RM any longer +// +#define NV_VP_ALLOCATION_FLAGS_STANDARD_UCODE (0x00000000) +#define NV_VP_ALLOCATION_FLAGS_STATIC_UCODE (0x00000001) +#define NV_VP_ALLOCATION_FLAGS_DYNAMIC_UCODE (0x00000002) + +// +// NV_VP_ALLOCATION_PARAMETERS.flags +// +// NV_VP_ALLOCATION_FLAGS_AVP_CLIENT are used by Tegra to specify if +// the current allocation with be used by Video or Audio +// +#define NV_VP_ALLOCATION_FLAGS_AVP_CLIENT_VIDEO (0x00000000) +#define NV_VP_ALLOCATION_FLAGS_AVP_CLIENT_AUDIO (0x00000001) + +typedef struct +{ + NvU32 size; + NvU32 caps; + NvU32 flags; + NvU32 altUcode; + NvP64 rawUcode NV_ALIGN_BYTES(8); + NvU32 rawUcodeSize; + NvU32 numSubClasses; + NvU32 numSubSets; + NvP64 subClasses NV_ALIGN_BYTES(8); + NvU32 prohibitMultipleInstances; + NvP64 pControl NV_ALIGN_BYTES(8); // Used by Tegra to return a mapping to NvE276Control + NvHandle hMemoryCmdBuffer NV_ALIGN_BYTES(8); // Used by Tegra to specify cmd buffer + NvU64 offset NV_ALIGN_BYTES(8); // Used by Tegra to specify an offset into the cmd buffer + +} NV_VP_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; +} NV_PPP_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC? + NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2 +} NV_MSENC_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of SEC2? +} NV_SEC2_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG? + NvU32 engineInstance; +} NV_NVJPG_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA? +} NV_OFA_ALLOCATION_PARAMETERS; + +#define NV04_ADD_VBLANK_CALLBACK (0x0000003D) + +#include "class/cl9010.h" // for OSVBLANKCALLBACKPROC + +/* parameters */ +/* NOTE: the "void* pParm's" below are ok (but unfortunate) since this interface + can only be used by other kernel drivers which must share the same ptr-size */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hDevice; // [IN] device handle for mapping + NvHandle hVblank; // [IN] Vblank handle for control + OSVBLANKCALLBACKPROC pProc; // Routine to call at vblank time + + NvV32 LogicalHead; // Logical Head + void *pParm1; + void *pParm2; + NvU32 bAdd; // Add or Delete + NvV32 status; // [OUT] status +} NVOS61_PARAMETERS; + +/** + * @brief NvAlloc parameters for VASPACE classes + * + * Used to create a new private virtual address space. + * + * index + * Tegra: With TEGRA_VASPACE_A, index specifies the IOMMU + * virtual address space to be created. Based on the + * index, RM/NVMEM will decide the HW ASID to be used with + * this VA Space. "index" takes values from the + * NVMEM_CLIENT_* defines in + * "drivers/common/inc/tegra/memory/ioctl.h". + * + * Big GPU: With FERMI_VASPACE_A, see NV_VASPACE_ALLOCATION_INDEX_GPU_*. + * + * flags + * MINIMIZE_PTETABLE_SIZE Pass hint to DMA HAL to use partial page tables. + * Depending on allocation pattern this may actually + * use more instance memory. + * + * RETRY_PTE_ALLOC_IN_SYS Fallback to PTEs allocation in sysmem. This is now + * enabled by default. + * + * SHARED_MANAGEMENT + * Indicates management of the VA space is shared with another + * component (e.g. driver layer, OS, etc.). + * + * The initial VA range from vaBase (inclusive) through vaSize (exclusive) + * is managed by RM. The range must be aligned to a top-level PDE's VA + * coverage since backing page table levels for this range are managed by RM. + * All normal RM virtual memory management APIs work within this range. + * + * An external component can manage the remaining VA ranges, + * from 0 (inclusive) to vaBase (exclusive) and from vaSize (inclusive) up to the + * maximum VA limit supported by HW. + * Management of these ranges includes VA sub-allocation and the + * backing lower page table levels. + * + * The top-level page directory is special since it is a shared resource. + * Management of the page directory is as follows: + * 1. Initially RM allocates a page directory for RM-managed PDEs. + * 2. The external component may create a full page directory and commit it + * with NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY. + * This will copy the RM-managed PDEs from the RM-managed page directory + * into the external page directory and commit channels to the external page directory. + * After this point RM will update the external page directory directly for + * operations that modify RM-managed PDEs. + * 3. The external component may use NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY repeatedly + * if it needs to update the page directory again (e.g. to resize or migrate). + * This will copy the RM-managed PDEs from the old external page directory + * into the new external page directory and commit channels to the new page directory. + * 4. The external component may restore management of the page directory back to + * RM with NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY. + * This will copy the RM-managed PDEs from the external page directory + * into the RM-managed page directory and commit channels to the RM-managed page directory. + * After this point RM will update the RM-managed page directory for + * operations that modify RM-managed PDEs. + * Note that operations (2) and (4) are symmetric - the RM perspective of management is identical + * before and after a sequence of SET => ... => UNSET. + * + * IS_MIRRORED + * This flag will tell RM to create a mirrored + * kernel PDB for the address space associated + * with this device. When this flag is set + * the address space covered by the top PDE + * is restricted and cannot be allocated out of. + * ENABLE_PAGE_FAULTING + * Enable page faulting if the architecture supports it. + * As of now page faulting is only supported for compute on pascal+. + * IS_EXTERNALLY_OWNED + * This vaspace that has been allocated will be managed by + * an external driver. RM will not own the pagetables for this vaspace. + * + * ENABLE_NVLINK_ATS + * Enables VA translation for this address space using NVLINK ATS. + * Note, the GMMU page tables still exist and take priority over NVLINK ATS. + * VA space object creation will fail if: + * - hardware support is not available (NV_ERR_NOT_SUPPORTED) + * - incompatible options IS_MIRRORED or IS_EXTERNALLY_OWNED are set (NV_ERR_INVALID_ARGUMENT) + * IS_FLA + * Sets FLA flag for this VASPACE + * + * ALLOW_ZERO_ADDRESS + * Allows VASPACE Range to start from zero + * SKIP_SCRUB_MEMPOOL + * Skip scrubbing in MemPool + * + * vaBase [in, out] + * On input, the lowest usable base address of the VA space. + * If 0, RM will pick a default value - 0 is always reserved to respresent NULL pointers. + * The value must be aligned to the largest page size of the VA space. + * Larger values aid in debug since offsets added to NULL pointers will still fault. + * + * On output, the actual usable base address is returned. + * + * vaSize [in,out] + * On input, requested size of the virtual address space in bytes. + * Requesting a smaller size reduces the memory required for the initial + * page directory, but the VAS may be resized later (NV0080_CTRL_DMA_SET_VA_SPACE_SIZE). + * If 0, the default VA space size will be used. + * + * On output, the actual size of the VAS in bytes. + * NOTE: This corresponds to the VA_LIMIT + 1, so the usable size is (vaSize - vaBase). + * + * bigPageSIze + * Set the size of the big page in this address space object. Current HW supports + * either 64k or 128k as the size of the big page. HW that support multiple big + * page size per address space will use this size. Hw that do not support this feature + * will override to the default big page size that is supported by the system. + * If the big page size value is set to ZERO then we will pick the default page size + * of the system. + **/ +typedef struct +{ + NvU32 index; + NvV32 flags; + NvU64 vaSize NV_ALIGN_BYTES(8); + NvU64 vaStartInternal NV_ALIGN_BYTES(8); + NvU64 vaLimitInternal NV_ALIGN_BYTES(8); + NvU32 bigPageSize; + NvU64 vaBase NV_ALIGN_BYTES(8); +} NV_VASPACE_ALLOCATION_PARAMETERS; + +#define NV_VASPACE_ALLOCATION_FLAGS_NONE (0x00000000) +#define NV_VASPACE_ALLOCATION_FLAGS_MINIMIZE_PTETABLE_SIZE BIT(0) +#define NV_VASPACE_ALLOCATION_FLAGS_RETRY_PTE_ALLOC_IN_SYS BIT(1) +#define NV_VASPACE_ALLOCATION_FLAGS_SHARED_MANAGEMENT BIT(2) +#define NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED BIT(3) +#define NV_VASPACE_ALLOCATION_FLAGS_ENABLE_NVLINK_ATS BIT(4) +#define NV_VASPACE_ALLOCATION_FLAGS_IS_MIRRORED BIT(5) +#define NV_VASPACE_ALLOCATION_FLAGS_ENABLE_PAGE_FAULTING BIT(6) +#define NV_VASPACE_ALLOCATION_FLAGS_VA_INTERNAL_LIMIT BIT(7) +#define NV_VASPACE_ALLOCATION_FLAGS_ALLOW_ZERO_ADDRESS BIT(8) +#define NV_VASPACE_ALLOCATION_FLAGS_IS_FLA BIT(9) +#define NV_VASPACE_ALLOCATION_FLAGS_SKIP_SCRUB_MEMPOOL BIT(10) +#define NV_VASPACE_ALLOCATION_FLAGS_OPTIMIZE_PTETABLE_MEMPOOL_USAGE BIT(11) + +#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 // NV_STATUS_LEVEL_WARN + * to determine success v. failure of a call. + */ +#define NV_STATUS_LEVEL_WARN 1 + +/*! + * @def NV_STATUS_LEVEL_ERR + * @see NV_STATUS_LEVEL + * @brief Unrecoverable error condition + */ +#define NV_STATUS_LEVEL_ERR 3 + +/*! + * @def NV_STATUS_LEVEL + * @see NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL_ERR + * @brief Level of the status code + * + * @warning IMPORTANT: When comparing NV_STATUS_LEVEL(_S) against one of + * these constants, it is important to use '<=' or '>' (rather + * than '<' or '>='). + * + * For example. do: + * if (NV_STATUS_LEVEL(status) <= NV_STATUS_LEVEL_WARN) + * rather than: + * if (NV_STATUS_LEVEL(status) < NV_STATUS_LEVEL_ERR) + * + * By being consistent in this manner, it is easier to systematically + * add additional level constants. New levels are likely to lower + * (rather than raise) the severity of _ERR codes. For example, + * if we were to add NV_STATUS_LEVEL_RETRY to indicate hardware + * failures that may be recoverable (e.g. RM_ERR_TIMEOUT_RETRY + * or RM_ERR_BUSY_RETRY), it would be less severe than + * NV_STATUS_LEVEL_ERR the level to which these status codes now + * belong. Using '<=' and '>' ensures your code is not broken in + * cases like this. + */ +#define NV_STATUS_LEVEL(_S) \ + ((_S) == NV_OK? NV_STATUS_LEVEL_OK: \ + ((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? NV_STATUS_LEVEL_WARN: \ + NV_STATUS_LEVEL_ERR)) + +/*! + * @def NV_STATUS_LEVEL + * @see NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL_ERR + * @brief Character representing status code level + */ +#define NV_STATUS_LEVEL_CHAR(_S) \ + ((_S) == NV_OK? '0': \ + ((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? 'W': \ + 'E')) + +// Function definitions +const char *nvstatusToString(NV_STATUS nvStatusIn); + +#ifdef __cplusplus +} +#endif + +#endif /* SDK_NVSTATUS_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvstatuscodes.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvstatuscodes.h new file mode 100644 index 0000000..4d8af82 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvstatuscodes.h @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef SDK_NVSTATUSCODES_H +#define SDK_NVSTATUSCODES_H + +NV_STATUS_CODE(NV_OK, 0x00000000, "Success") +NV_STATUS_CODE(NV_ERR_GENERIC, 0x0000FFFF, "Failure: Generic Error") + +NV_STATUS_CODE(NV_ERR_BROKEN_FB, 0x00000001, "Frame-Buffer broken") +NV_STATUS_CODE(NV_ERR_BUFFER_TOO_SMALL, 0x00000002, "Buffer passed in is too small") +NV_STATUS_CODE(NV_ERR_BUSY_RETRY, 0x00000003, "System is busy, retry later") +NV_STATUS_CODE(NV_ERR_CALLBACK_NOT_SCHEDULED, 0x00000004, "The requested callback API not scheduled") +NV_STATUS_CODE(NV_ERR_CARD_NOT_PRESENT, 0x00000005, "Card not detected") +NV_STATUS_CODE(NV_ERR_CYCLE_DETECTED, 0x00000006, "Call cycle detected") +NV_STATUS_CODE(NV_ERR_DMA_IN_USE, 0x00000007, "Requested DMA is in use") +NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_LOCKED, 0x00000008, "Requested DMA memory is not locked") +NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_UNLOCKED, 0x00000009, "Requested DMA memory is not unlocked") +NV_STATUS_CODE(NV_ERR_DUAL_LINK_INUSE, 0x0000000A, "Dual-Link is in use") +NV_STATUS_CODE(NV_ERR_ECC_ERROR, 0x0000000B, "Generic ECC error") +NV_STATUS_CODE(NV_ERR_FIFO_BAD_ACCESS, 0x0000000C, "FIFO: Invalid access") +NV_STATUS_CODE(NV_ERR_FREQ_NOT_SUPPORTED, 0x0000000D, "Requested frequency is not supported") +NV_STATUS_CODE(NV_ERR_GPU_DMA_NOT_INITIALIZED, 0x0000000E, "Requested DMA not initialized") +NV_STATUS_CODE(NV_ERR_GPU_IS_LOST, 0x0000000F, "GPU lost from the bus") +NV_STATUS_CODE(NV_ERR_GPU_IN_FULLCHIP_RESET, 0x00000010, "GPU currently in full-chip reset") +NV_STATUS_CODE(NV_ERR_GPU_NOT_FULL_POWER, 0x00000011, "GPU not in full power") +NV_STATUS_CODE(NV_ERR_GPU_UUID_NOT_FOUND, 0x00000012, "GPU UUID not found") +NV_STATUS_CODE(NV_ERR_HOT_SWITCH, 0x00000013, "System in hot switch") +NV_STATUS_CODE(NV_ERR_I2C_ERROR, 0x00000014, "I2C Error") +NV_STATUS_CODE(NV_ERR_I2C_SPEED_TOO_HIGH, 0x00000015, "I2C Error: Speed too high") +NV_STATUS_CODE(NV_ERR_ILLEGAL_ACTION, 0x00000016, "Current action is not allowed") +NV_STATUS_CODE(NV_ERR_IN_USE, 0x00000017, "Generic busy error") +NV_STATUS_CODE(NV_ERR_INFLATE_COMPRESSED_DATA_FAILED, 0x00000018, "Failed to inflate compressed data") +NV_STATUS_CODE(NV_ERR_INSERT_DUPLICATE_NAME, 0x00000019, "Found a duplicate entry in the requested btree") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_RESOURCES, 0x0000001A, "Ran out of a critical resource, other than memory") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_PERMISSIONS, 0x0000001B, "The requester does not have sufficient permissions") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_POWER, 0x0000001C, "Generic Error: Low power") +NV_STATUS_CODE(NV_ERR_INVALID_ACCESS_TYPE, 0x0000001D, "This type of access is not allowed") +NV_STATUS_CODE(NV_ERR_INVALID_ADDRESS, 0x0000001E, "Address not valid") +NV_STATUS_CODE(NV_ERR_INVALID_ARGUMENT, 0x0000001F, "Invalid argument to call") +NV_STATUS_CODE(NV_ERR_INVALID_BASE, 0x00000020, "Invalid base") +NV_STATUS_CODE(NV_ERR_INVALID_CHANNEL, 0x00000021, "Given channel-id not valid") +NV_STATUS_CODE(NV_ERR_INVALID_CLASS, 0x00000022, "Given class-id not valid") +NV_STATUS_CODE(NV_ERR_INVALID_CLIENT, 0x00000023, "Given client not valid") +NV_STATUS_CODE(NV_ERR_INVALID_COMMAND, 0x00000024, "Command passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_DATA, 0x00000025, "Invalid data passed") +NV_STATUS_CODE(NV_ERR_INVALID_DEVICE, 0x00000026, "Current device is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_DMA_SPECIFIER, 0x00000027, "The requested DMA specifier is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_EVENT, 0x00000028, "Invalid event occurred") +NV_STATUS_CODE(NV_ERR_INVALID_FLAGS, 0x00000029, "Invalid flags passed") +NV_STATUS_CODE(NV_ERR_INVALID_FUNCTION, 0x0000002A, "Called function is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_HEAP, 0x0000002B, "Heap corrupted") +NV_STATUS_CODE(NV_ERR_INVALID_INDEX, 0x0000002C, "Index invalid") +NV_STATUS_CODE(NV_ERR_INVALID_IRQ_LEVEL, 0x0000002D, "Requested IRQ level is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_LIMIT, 0x0000002E, "Generic Error: Invalid limit") +NV_STATUS_CODE(NV_ERR_INVALID_LOCK_STATE, 0x0000002F, "Requested lock state not valid") +NV_STATUS_CODE(NV_ERR_INVALID_METHOD, 0x00000030, "Requested method not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT, 0x00000031, "Object not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_BUFFER, 0x00000032, "Object buffer passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_HANDLE, 0x00000033, "Object handle is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_NEW, 0x00000034, "New object is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_OLD, 0x00000035, "Old object is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_PARENT, 0x00000036, "Object parent is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OFFSET, 0x00000037, "The offset passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OPERATION, 0x00000038, "Requested operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OWNER, 0x00000039, "Owner not valid") +NV_STATUS_CODE(NV_ERR_INVALID_PARAM_STRUCT, 0x0000003A, "Invalid structure parameter") +NV_STATUS_CODE(NV_ERR_INVALID_PARAMETER, 0x0000003B, "At least one of the parameters passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_PATH, 0x0000003C, "The requested path is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_POINTER, 0x0000003D, "Pointer not valid") +NV_STATUS_CODE(NV_ERR_INVALID_REGISTRY_KEY, 0x0000003E, "Found an invalid registry key") +NV_STATUS_CODE(NV_ERR_INVALID_REQUEST, 0x0000003F, "Generic Error: Invalid request") +NV_STATUS_CODE(NV_ERR_INVALID_STATE, 0x00000040, "Generic Error: Invalid state") +NV_STATUS_CODE(NV_ERR_INVALID_STRING_LENGTH, 0x00000041, "The string length is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_READ, 0x00000042, "The requested read operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_WRITE, 0x00000043, "The requested write operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_XLATE, 0x00000044, "The requested translate operation is not valid") +NV_STATUS_CODE(NV_ERR_IRQ_NOT_FIRING, 0x00000045, "Requested IRQ is not firing") +NV_STATUS_CODE(NV_ERR_IRQ_EDGE_TRIGGERED, 0x00000046, "IRQ is edge triggered") +NV_STATUS_CODE(NV_ERR_MEMORY_TRAINING_FAILED, 0x00000047, "Failed memory training sequence") +NV_STATUS_CODE(NV_ERR_MISMATCHED_SLAVE, 0x00000048, "Slave mismatch") +NV_STATUS_CODE(NV_ERR_MISMATCHED_TARGET, 0x00000049, "Target mismatch") +NV_STATUS_CODE(NV_ERR_MISSING_TABLE_ENTRY, 0x0000004A, "Requested entry missing not found in the table") +NV_STATUS_CODE(NV_ERR_MODULE_LOAD_FAILED, 0x0000004B, "Failed to load the requested module") +NV_STATUS_CODE(NV_ERR_MORE_DATA_AVAILABLE, 0x0000004C, "There is more data available") +NV_STATUS_CODE(NV_ERR_MORE_PROCESSING_REQUIRED, 0x0000004D, "More processing required for the given call") +NV_STATUS_CODE(NV_ERR_MULTIPLE_MEMORY_TYPES, 0x0000004E, "Multiple memory types found") +NV_STATUS_CODE(NV_ERR_NO_FREE_FIFOS, 0x0000004F, "No more free FIFOs found") +NV_STATUS_CODE(NV_ERR_NO_INTR_PENDING, 0x00000050, "No interrupt pending") +NV_STATUS_CODE(NV_ERR_NO_MEMORY, 0x00000051, "Out of memory") +NV_STATUS_CODE(NV_ERR_NO_SUCH_DOMAIN, 0x00000052, "Requested domain does not exist") +NV_STATUS_CODE(NV_ERR_NO_VALID_PATH, 0x00000053, "Caller did not specify a valid path") +NV_STATUS_CODE(NV_ERR_NOT_COMPATIBLE, 0x00000054, "Generic Error: Incompatible types") +NV_STATUS_CODE(NV_ERR_NOT_READY, 0x00000055, "Generic Error: Not ready") +NV_STATUS_CODE(NV_ERR_NOT_SUPPORTED, 0x00000056, "Call not supported") +NV_STATUS_CODE(NV_ERR_OBJECT_NOT_FOUND, 0x00000057, "Requested object not found") +NV_STATUS_CODE(NV_ERR_OBJECT_TYPE_MISMATCH, 0x00000058, "Specified objects do not match") +NV_STATUS_CODE(NV_ERR_OPERATING_SYSTEM, 0x00000059, "Generic operating system error") +NV_STATUS_CODE(NV_ERR_OTHER_DEVICE_FOUND, 0x0000005A, "Found other device instead of the requested one") +NV_STATUS_CODE(NV_ERR_OUT_OF_RANGE, 0x0000005B, "The specified value is out of bounds") +NV_STATUS_CODE(NV_ERR_OVERLAPPING_UVM_COMMIT, 0x0000005C, "Overlapping unified virtual memory commit") +NV_STATUS_CODE(NV_ERR_PAGE_TABLE_NOT_AVAIL, 0x0000005D, "Requested page table not available") +NV_STATUS_CODE(NV_ERR_PID_NOT_FOUND, 0x0000005E, "Process-Id not found") +NV_STATUS_CODE(NV_ERR_PROTECTION_FAULT, 0x0000005F, "Protection fault") +NV_STATUS_CODE(NV_ERR_RC_ERROR, 0x00000060, "Generic RC error") +NV_STATUS_CODE(NV_ERR_REJECTED_VBIOS, 0x00000061, "Given Video BIOS rejected/invalid") +NV_STATUS_CODE(NV_ERR_RESET_REQUIRED, 0x00000062, "Reset required") +NV_STATUS_CODE(NV_ERR_STATE_IN_USE, 0x00000063, "State in use") +NV_STATUS_CODE(NV_ERR_SIGNAL_PENDING, 0x00000064, "Signal pending") +NV_STATUS_CODE(NV_ERR_TIMEOUT, 0x00000065, "Call timed out") +NV_STATUS_CODE(NV_ERR_TIMEOUT_RETRY, 0x00000066, "Call timed out, please retry later") +NV_STATUS_CODE(NV_ERR_TOO_MANY_PRIMARIES, 0x00000067, "Too many primaries") +NV_STATUS_CODE(NV_ERR_UVM_ADDRESS_IN_USE, 0x00000068, "Unified virtual memory requested address already in use") +NV_STATUS_CODE(NV_ERR_MAX_SESSION_LIMIT_REACHED, 0x00000069, "Maximum number of sessions reached") +NV_STATUS_CODE(NV_ERR_LIB_RM_VERSION_MISMATCH, 0x0000006A, "Library version doesn't match driver version") //Contained within the RMAPI library +NV_STATUS_CODE(NV_ERR_PRIV_SEC_VIOLATION, 0x0000006B, "Priv security violation") +NV_STATUS_CODE(NV_ERR_GPU_IN_DEBUG_MODE, 0x0000006C, "GPU currently in debug mode") +NV_STATUS_CODE(NV_ERR_FEATURE_NOT_ENABLED, 0x0000006D, "Requested Feature functionality is not enabled") +NV_STATUS_CODE(NV_ERR_RESOURCE_LOST, 0x0000006E, "Requested resource has been destroyed") +NV_STATUS_CODE(NV_ERR_PMU_NOT_READY, 0x0000006F, "PMU is not ready or has not yet been initialized") +NV_STATUS_CODE(NV_ERR_FLCN_ERROR, 0x00000070, "Generic falcon assert or halt") +NV_STATUS_CODE(NV_ERR_FATAL_ERROR, 0x00000071, "Fatal/unrecoverable error") +NV_STATUS_CODE(NV_ERR_MEMORY_ERROR, 0x00000072, "Generic memory error") +NV_STATUS_CODE(NV_ERR_INVALID_LICENSE, 0x00000073, "License provided is rejected or invalid") +NV_STATUS_CODE(NV_ERR_NVLINK_INIT_ERROR, 0x00000074, "Nvlink Init Error") +NV_STATUS_CODE(NV_ERR_NVLINK_MINION_ERROR, 0x00000075, "Nvlink Minion Error") +NV_STATUS_CODE(NV_ERR_NVLINK_CLOCK_ERROR, 0x00000076, "Nvlink Clock Error") +NV_STATUS_CODE(NV_ERR_NVLINK_TRAINING_ERROR, 0x00000077, "Nvlink Training Error") +NV_STATUS_CODE(NV_ERR_NVLINK_CONFIGURATION_ERROR, 0x00000078, "Nvlink Configuration Error") +NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC-V assert or halt") + +// Warnings: +NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch") +NV_STATUS_CODE(NV_WARN_INCORRECT_PERFMON_DATA, 0x00010002, "WARNING Incorrect performance monitor data") +NV_STATUS_CODE(NV_WARN_MISMATCHED_SLAVE, 0x00010003, "WARNING Slave mismatch") +NV_STATUS_CODE(NV_WARN_MISMATCHED_TARGET, 0x00010004, "WARNING Target mismatch") +NV_STATUS_CODE(NV_WARN_MORE_PROCESSING_REQUIRED, 0x00010005, "WARNING More processing required for the call") +NV_STATUS_CODE(NV_WARN_NOTHING_TO_DO, 0x00010006, "WARNING Nothing to do") +NV_STATUS_CODE(NV_WARN_NULL_OBJECT, 0x00010007, "WARNING NULL object found") +NV_STATUS_CODE(NV_WARN_OUT_OF_RANGE, 0x00010008, "WARNING value out of range") + +#endif /* SDK_NVSTATUSCODES_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvtypes.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvtypes.h new file mode 100644 index 0000000..53a60f9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvtypes.h @@ -0,0 +1,625 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVTYPES_INCLUDED +#define NVTYPES_INCLUDED + +#ifdef __cplusplus +extern "C" { +#endif + +#include "cpuopsys.h" + +#ifndef NVTYPES_USE_STDINT +#define NVTYPES_USE_STDINT 0 +#endif + +#if NVTYPES_USE_STDINT +#ifdef __cplusplus +#include +#include +#else +#include +#include +#endif // __cplusplus +#endif // NVTYPES_USE_STDINT + +#ifndef __cplusplus +// Header includes to make sure wchar_t is defined for C-file compilation +// (C++ is not affected as it is a fundamental type there) +// _MSC_VER is a hack to avoid failures for old setup of UEFI builds which are +// currently set to msvc100 but do not properly set the include paths +#endif // __cplusplus + +#if defined(MAKE_NV64TYPES_8BYTES_ALIGNED) && defined(__i386__) +// ensure or force 8-bytes alignment of NV 64-bit types +#define OPTIONAL_ALIGN8_ATTR __attribute__((aligned(8))) +#else +// nothing needed +#define OPTIONAL_ALIGN8_ATTR +#endif // MAKE_NV64TYPES_8BYTES_ALIGNED && i386 + + /***************************************************************************\ +|* Typedefs *| + \***************************************************************************/ + +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +//Typedefs for MISRA COMPLIANCE +typedef unsigned long long UInt64; +typedef signed long long Int64; +typedef unsigned int UInt32; +typedef signed int Int32; +typedef unsigned short UInt16; +typedef signed short Int16; +typedef unsigned char UInt8 ; +typedef signed char Int8 ; + +typedef void Void; +typedef float float32_t; +typedef double float64_t; +#endif + + +// Floating point types +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef float32_t NvF32; /* IEEE Single Precision (S1E8M23) */ +typedef float64_t NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */ +#else +typedef float NvF32; /* IEEE Single Precision (S1E8M23) */ +typedef double NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */ +#endif + + +// 8-bit: 'char' is the only 8-bit in the C89 standard and after. +#if NVTYPES_USE_STDINT +typedef uint8_t NvV8; /* "void": enumerated or multiple fields */ +typedef uint8_t NvU8; /* 0 to 255 */ +typedef int8_t NvS8; /* -128 to 127 */ +#else +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt8 NvV8; /* "void": enumerated or multiple fields */ +typedef UInt8 NvU8; /* 0 to 255 */ +typedef Int8 NvS8; /* -128 to 127 */ +#else +typedef unsigned char NvV8; /* "void": enumerated or multiple fields */ +typedef unsigned char NvU8; /* 0 to 255 */ +typedef signed char NvS8; /* -128 to 127 */ +#endif +#endif // NVTYPES_USE_STDINT + + +#if NVTYPES_USE_STDINT +typedef uint16_t NvV16; /* "void": enumerated or multiple fields */ +typedef uint16_t NvU16; /* 0 to 65535 */ +typedef int16_t NvS16; /* -32768 to 32767 */ +#else +// 16-bit: If the compiler tells us what we can use, then use it. +#ifdef __INT16_TYPE__ +typedef unsigned __INT16_TYPE__ NvV16; /* "void": enumerated or multiple fields */ +typedef unsigned __INT16_TYPE__ NvU16; /* 0 to 65535 */ +typedef signed __INT16_TYPE__ NvS16; /* -32768 to 32767 */ + +// The minimal standard for C89 and after +#else // __INT16_TYPE__ +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt16 NvV16; /* "void": enumerated or multiple fields */ +typedef UInt16 NvU16; /* 0 to 65535 */ +typedef Int16 NvS16; /* -32768 to 32767 */ +#else +typedef unsigned short NvV16; /* "void": enumerated or multiple fields */ +typedef unsigned short NvU16; /* 0 to 65535 */ +typedef signed short NvS16; /* -32768 to 32767 */ +#endif +#endif // __INT16_TYPE__ +#endif // NVTYPES_USE_STDINT + +// wchar type (fixed size types consistent across Linux/Windows boundaries) +#if defined(NV_HAS_WCHAR_T_TYPEDEF) + typedef wchar_t NvWchar; +#else + typedef NvV16 NvWchar; +#endif + +// Macro to build an NvU32 from four bytes, listed from msb to lsb +#define NvU32_BUILD(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d)) + +#if NVTYPES_USE_STDINT +typedef uint32_t NvV32; /* "void": enumerated or multiple fields */ +typedef uint32_t NvU32; /* 0 to 4294967295 */ +typedef int32_t NvS32; /* -2147483648 to 2147483647 */ +#else +// 32-bit: If the compiler tells us what we can use, then use it. +#ifdef __INT32_TYPE__ +typedef unsigned __INT32_TYPE__ NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned __INT32_TYPE__ NvU32; /* 0 to 4294967295 */ +typedef signed __INT32_TYPE__ NvS32; /* -2147483648 to 2147483647 */ + +// Older compilers +#else // __INT32_TYPE__ + +// For historical reasons, NvU32/NvV32 are defined to different base intrinsic +// types than NvS32 on some platforms. +// Mainly for 64-bit linux, where long is 64 bits and win9x, where int is 16 bit. +#if (defined(NV_UNIX) || defined(vxworks) || defined(NV_WINDOWS_CE) || \ + defined(__arm) || defined(__IAR_SYSTEMS_ICC__) || defined(NV_QNX) || \ + defined(NV_INTEGRITY) || defined(NV_MODS) || \ + defined(__GNUC__) || defined(__clang__) || defined(NV_MACINTOSH_64)) && \ + (!defined(NV_MACINTOSH) || defined(NV_MACINTOSH_64)) +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt32 NvV32; /* "void": enumerated or multiple fields */ +typedef UInt32 NvU32; /* 0 to 4294967295 */ +#else +typedef unsigned int NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned int NvU32; /* 0 to 4294967295 */ +#endif + +// The minimal standard for C89 and after +#else // (defined(NV_UNIX) || defined(vxworks) || ... +typedef unsigned long NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned long NvU32; /* 0 to 4294967295 */ +#endif // (defined(NV_UNIX) || defined(vxworks) || ... + +// Mac OS 32-bit still needs this +#if defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64) +typedef signed long NvS32; /* -2147483648 to 2147483647 */ +#else +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef Int32 NvS32; /* -2147483648 to 2147483647 */ +#else +typedef signed int NvS32; /* -2147483648 to 2147483647 */ +#endif +#endif // defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64) +#endif // __INT32_TYPE__ +#endif // NVTYPES_USE_STDINT + + + +#if NVTYPES_USE_STDINT +typedef uint64_t NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef int64_t NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ + +#define NvU64_fmtX PRIX64 +#define NvU64_fmtx PRIx64 +#define NvU64_fmtu PRIu64 +#define NvU64_fmto PRIo64 +#define NvS64_fmtd PRId64 +#define NvS64_fmti PRIi64 +#else +// 64-bit types for compilers that support them, plus some obsolete variants +#if defined(__GNUC__) || defined(__clang__) || defined(__arm) || \ + defined(__IAR_SYSTEMS_ICC__) || defined(__ghs__) || defined(_WIN64) || \ + defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined (__xlC__) +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef Int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ +#else +typedef unsigned long long NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef long long NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ +#endif + +#define NvU64_fmtX "llX" +#define NvU64_fmtx "llx" +#define NvU64_fmtu "llu" +#define NvU64_fmto "llo" +#define NvS64_fmtd "lld" +#define NvS64_fmti "lli" + +// Microsoft since 2003 -- https://msdn.microsoft.com/en-us/library/29dh1w7z.aspx +#else +typedef unsigned __int64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef __int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ + +#define NvU64_fmtX "I64X" +#define NvU64_fmtx "I64x" +#define NvU64_fmtu "I64u" +#define NvU64_fmto "I64o" +#define NvS64_fmtd "I64d" +#define NvS64_fmti "I64i" + +#endif +#endif // NVTYPES_USE_STDINT + +#ifdef NV_TYPESAFE_HANDLES +/* + * Can't use opaque pointer as clients might be compiled with mismatched + * pointer sizes. TYPESAFE check will eventually be removed once all clients + * have transistioned safely to NvHandle. + * The plan is to then eventually scale up the handle to be 64-bits. + */ +typedef struct +{ + NvU32 val; +} NvHandle; +#else +/* + * For compatibility with modules that haven't moved typesafe handles. + */ +typedef NvU32 NvHandle; +#endif // NV_TYPESAFE_HANDLES + +/* Boolean type */ +typedef NvU8 NvBool; +#define NV_TRUE ((NvBool)(0 == 0)) +#define NV_FALSE ((NvBool)(0 != 0)) + +/* Tristate type: NV_TRISTATE_FALSE, NV_TRISTATE_TRUE, NV_TRISTATE_INDETERMINATE */ +typedef NvU8 NvTristate; +#define NV_TRISTATE_FALSE ((NvTristate) 0) +#define NV_TRISTATE_TRUE ((NvTristate) 1) +#define NV_TRISTATE_INDETERMINATE ((NvTristate) 2) + +/* Macros to extract the low and high parts of a 64-bit unsigned integer */ +/* Also designed to work if someone happens to pass in a 32-bit integer */ +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffffU)) +#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffffU)) +#else +#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffff)) +#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffff)) +#endif +#define NvU40_HI32(n) ((NvU32)((((NvU64)(n)) >> 8) & 0xffffffffU)) +#define NvU40_HI24of32(n) ((NvU32)( (NvU64)(n) & 0xffffff00U)) + +/* Macros to get the MSB and LSB of a 32 bit unsigned number */ +#define NvU32_HI16(n) ((NvU16)((((NvU32)(n)) >> 16) & 0xffffU)) +#define NvU32_LO16(n) ((NvU16)(( (NvU32)(n)) & 0xffffU)) + + /***************************************************************************\ +|* *| +|* 64 bit type definitions for use in interface structures. *| +|* *| + \***************************************************************************/ + +#if defined(NV_64_BITS) + +typedef void* NvP64; /* 64 bit void pointer */ +typedef NvU64 NvUPtr; /* pointer sized unsigned int */ +typedef NvS64 NvSPtr; /* pointer sized signed int */ +typedef NvU64 NvLength; /* length to agree with sizeof */ + +#define NvP64_VALUE(n) (n) +#define NvP64_fmt "%p" + +#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(v)) +#define NvP64_PLUS_OFFSET(p,o) (NvP64)((NvU64)(p) + (NvU64)(o)) + +#define NvUPtr_fmtX NvU64_fmtX +#define NvUPtr_fmtx NvU64_fmtx +#define NvUPtr_fmtu NvU64_fmtu +#define NvUPtr_fmto NvU64_fmto +#define NvSPtr_fmtd NvS64_fmtd +#define NvSPtr_fmti NvS64_fmti + +#else + +typedef NvU64 NvP64; /* 64 bit void pointer */ +typedef NvU32 NvUPtr; /* pointer sized unsigned int */ +typedef NvS32 NvSPtr; /* pointer sized signed int */ +typedef NvU32 NvLength; /* length to agree with sizeof */ + +#define NvP64_VALUE(n) ((void *)(NvUPtr)(n)) +#define NvP64_fmt "0x%llx" + +#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(NvUPtr)(v)) +#define NvP64_PLUS_OFFSET(p,o) ((p) + (NvU64)(o)) + +#define NvUPtr_fmtX "X" +#define NvUPtr_fmtx "x" +#define NvUPtr_fmtu "u" +#define NvUPtr_fmto "o" +#define NvSPtr_fmtd "d" +#define NvSPtr_fmti "i" + +#endif + +#define NvP64_NULL (NvP64)0 + +/*! + * Helper macro to pack an @ref NvU64_ALIGN32 structure from a @ref NvU64. + * + * @param[out] pDst Pointer to NvU64_ALIGN32 structure to pack + * @param[in] pSrc Pointer to NvU64 with which to pack + */ +#define NvU64_ALIGN32_PACK(pDst, pSrc) \ +do { \ + (pDst)->lo = NvU64_LO32(*(pSrc)); \ + (pDst)->hi = NvU64_HI32(*(pSrc)); \ +} while (NV_FALSE) + +/*! + * Helper macro to unpack a @ref NvU64_ALIGN32 structure into a @ref NvU64. + * + * @param[out] pDst Pointer to NvU64 in which to unpack + * @param[in] pSrc Pointer to NvU64_ALIGN32 structure from which to unpack + */ +#define NvU64_ALIGN32_UNPACK(pDst, pSrc) \ +do { \ + (*(pDst)) = NvU64_ALIGN32_VAL(pSrc); \ +} while (NV_FALSE) + +/*! + * Helper macro to unpack a @ref NvU64_ALIGN32 structure as a @ref NvU64. + * + * @param[in] pSrc Pointer to NvU64_ALIGN32 structure to unpack + */ +#define NvU64_ALIGN32_VAL(pSrc) \ + ((NvU64) ((NvU64)((pSrc)->lo) | (((NvU64)(pSrc)->hi) << 32U))) + +/*! + * Helper macro to check whether the 32 bit aligned 64 bit number is zero. + * + * @param[in] _pU64 Pointer to NvU64_ALIGN32 structure. + * + * @return + * NV_TRUE _pU64 is zero. + * NV_FALSE otherwise. + */ +#define NvU64_ALIGN32_IS_ZERO(_pU64) \ + (((_pU64)->lo == 0U) && ((_pU64)->hi == 0U)) + +/*! + * Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor. + * + * @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure. + * @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure. + * @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure. + */ +#define NvU64_ALIGN32_ADD(pDst, pSrc1, pSrc2) \ +do { \ + NvU64 __dst, __src1, __scr2; \ + \ + NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \ + NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \ + __dst = __src1 + __scr2; \ + NvU64_ALIGN32_PACK((pDst), &__dst); \ +} while (NV_FALSE) + +/*! + * Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor. + * + * @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure. + * @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure. + * @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure. + */ +#define NvU64_ALIGN32_SUB(pDst, pSrc1, pSrc2) \ +do { \ + NvU64 __dst, __src1, __scr2; \ + \ + NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \ + NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \ + __dst = __src1 - __scr2; \ + NvU64_ALIGN32_PACK((pDst), &__dst); \ +} while (NV_FALSE) + +/*! + * Structure for representing 32 bit aligned NvU64 (64-bit unsigned integer) + * structures. This structure must be used because the 32 bit processor and + * 64 bit processor compilers will pack/align NvU64 differently. + * + * One use case is RM being 64 bit proc whereas PMU being 32 bit proc, this + * alignment difference will result in corrupted transactions between the RM + * and PMU. + * + * See the @ref NvU64_ALIGN32_PACK and @ref NvU64_ALIGN32_UNPACK macros for + * packing and unpacking these structures. + * + * @note The intention of this structure is to provide a datatype which will + * packed/aligned consistently and efficiently across all platforms. + * We don't want to use "NV_DECLARE_ALIGNED(NvU64, 8)" because that + * leads to memory waste on our 32-bit uprocessors (e.g. FALCONs) where + * DMEM efficiency is vital. + */ +typedef struct +{ + /*! + * Low 32 bits. + */ + NvU32 lo; + /*! + * High 32 bits. + */ + NvU32 hi; +} NvU64_ALIGN32; + +/* Useful macro to hide required double cast */ +#define NV_PTR_TO_NvP64(n) (NvP64)(NvUPtr)(n) +#define NV_SIGN_EXT_PTR_TO_NvP64(p) ((NvP64)(NvS64)(NvSPtr)(p)) +#define KERNEL_POINTER_TO_NvP64(p) ((NvP64)(uintptr_t)(p)) + + /***************************************************************************\ +|* *| +|* Limits for common types. *| +|* *| + \***************************************************************************/ + +/* Explanation of the current form of these limits: + * + * - Decimal is used, as hex values are by default positive. + * - Casts are not used, as usage in the preprocessor itself (#if) ends poorly. + * - The subtraction of 1 for some MIN values is used to get around the fact + * that the C syntax actually treats -x as NEGATE(x) instead of a distinct + * number. Since 214748648 isn't a valid positive 32-bit signed value, we + * take the largest valid positive signed number, negate it, and subtract 1. + */ +#define NV_S8_MIN (-128) +#define NV_S8_MAX (+127) +#define NV_U8_MIN (0U) +#define NV_U8_MAX (+255U) +#define NV_S16_MIN (-32768) +#define NV_S16_MAX (+32767) +#define NV_U16_MIN (0U) +#define NV_U16_MAX (+65535U) +#define NV_S32_MIN (-2147483647 - 1) +#define NV_S32_MAX (+2147483647) +#define NV_U32_MIN (0U) +#define NV_U32_MAX (+4294967295U) +#define NV_S64_MIN (-9223372036854775807LL - 1LL) +#define NV_S64_MAX (+9223372036854775807LL) +#define NV_U64_MIN (0ULL) +#define NV_U64_MAX (+18446744073709551615ULL) + +/* Aligns fields in structs so they match up between 32 and 64 bit builds */ +#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX) +#define NV_ALIGN_BYTES(size) __attribute__ ((aligned (size))) +#elif defined(__arm) +#define NV_ALIGN_BYTES(size) __align(ALIGN) +#else +// XXX This is dangerously nonportable! We really shouldn't provide a default +// version of this that doesn't do anything. +#define NV_ALIGN_BYTES(size) +#endif + +// NV_DECLARE_ALIGNED() can be used on all platforms. +// This macro form accounts for the fact that __declspec on Windows is required +// before the variable type, +// and NV_ALIGN_BYTES is required after the variable name. +#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) TYPE_VAR __attribute__ ((aligned (ALIGN))) +#elif defined(__arm) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) __align(ALIGN) TYPE_VAR +#endif + + /***************************************************************************\ +|* Function Declaration Types *| + \***************************************************************************/ + +// stretching the meaning of "nvtypes", but this seems to least offensive +// place to re-locate these from nvos.h which cannot be included by a number +// of builds that need them + + #if defined(__GNUC__) + #if (__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) && (__GNUC_PATCHLEVEL__ >= 1)) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + #elif defined(__clang__) + #if __has_attribute(noinline) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + #elif defined(__arm) && (__ARMCC_VERSION >= 300000) + #define NV_NOINLINE __attribute__((__noinline__)) + #elif (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)) ||\ + (defined(__SUNPRO_CC) && (__SUNPRO_CC >= 0x590)) + #define NV_NOINLINE __attribute__((__noinline__)) + #elif defined (__INTEL_COMPILER) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + + #if !defined(NV_NOINLINE) + #define NV_NOINLINE + #endif + + /* GreenHills compiler defines __GNUC__, but doesn't support + * __inline__ keyword. */ + #if defined(__ghs__) + #define NV_INLINE inline + #elif defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER) + #define NV_INLINE __inline__ + #elif defined (macintosh) || defined(__SUNPRO_C) || defined(__SUNPRO_CC) + #define NV_INLINE inline + #elif defined(__arm) + #define NV_INLINE __inline + #else + #define NV_INLINE + #endif + + /* Don't force inline on DEBUG builds -- it's annoying for debuggers. */ + #if !defined(DEBUG) + /* GreenHills compiler defines __GNUC__, but doesn't support + * __attribute__ or __inline__ keyword. */ + #if defined(__ghs__) + #define NV_FORCEINLINE inline + #elif defined(__GNUC__) + // GCC 3.1 and beyond support the always_inline function attribute. + #if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)) + #define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__ + #else + #define NV_FORCEINLINE __inline__ + #endif + #elif defined(__clang__) + #if __has_attribute(always_inline) + #define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__ + #else + #define NV_FORCEINLINE __inline__ + #endif + #elif defined(__arm) && (__ARMCC_VERSION >= 220000) + // RVDS 2.2 also supports forceinline, but ADS 1.2 does not + #define NV_FORCEINLINE __forceinline + #else /* defined(__GNUC__) */ + #define NV_FORCEINLINE NV_INLINE + #endif + #else + #define NV_FORCEINLINE NV_INLINE + #endif + + #define NV_APIENTRY + #define NV_FASTCALL + #define NV_CDECLCALL + #define NV_STDCALL + + /* + * The 'warn_unused_result' function attribute prompts GCC to issue a + * warning if the result of a function tagged with this attribute + * is ignored by a caller. In combination with '-Werror', it can be + * used to enforce result checking in RM code; at this point, this + * is only done on UNIX. + */ + #if defined(__GNUC__) && defined(NV_UNIX) + #if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4)) + #define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__)) + #else + #define NV_FORCERESULTCHECK + #endif + #elif defined(__clang__) + #if __has_attribute(warn_unused_result) + #define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__)) + #else + #define NV_FORCERESULTCHECK + #endif + #else /* defined(__GNUC__) */ + #define NV_FORCERESULTCHECK + #endif + + #if defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER) + #define NV_ATTRIBUTE_UNUSED __attribute__((__unused__)) + #else + #define NV_ATTRIBUTE_UNUSED + #endif + + /* + * Functions decorated with NV_FORMAT_PRINTF(f, a) have a format string at + * parameter number 'f' and variadic arguments start at parameter number 'a'. + * (Note that for C++ methods, there is an implicit 'this' parameter so + * explicit parameters are numbered from 2.) + */ + #if defined(__GNUC__) + #define NV_FORMAT_PRINTF(_f, _a) __attribute__((format(printf, _f, _a))) + #else + #define NV_FORMAT_PRINTF(_f, _a) + #endif + +#ifdef __cplusplus +} +#endif + +#endif /* NVTYPES_INCLUDED */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/rs_access.h b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/rs_access.h new file mode 100644 index 0000000..7c71a78 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/rs_access.h @@ -0,0 +1,272 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: rs_access.finn +// + + + + +#include "nvtypes.h" +#include "nvmisc.h" + + +/****************************************************************************/ +/* Access right definitions */ +/****************************************************************************/ + +// +// The meaning of each access right is documented in +// resman/docs/rmapi/resource_server/rm_capabilities.adoc +// +// RS_ACCESS_COUNT is the number of access rights that have been defined +// and are in use. All integers in the range [0, RS_ACCESS_COUNT) should +// represent valid access rights. +// +// When adding a new access right, don't forget to update +// 1) The descriptions in the resman/docs/rmapi/resource_server/rm_capabilities.adoc +// 2) RS_ACCESS_COUNT, defined below +// 3) The declaration of g_rsAccessMetadata in rs_access_rights.c +// 4) The list of access rights in drivers/common/chip-config/Chipcontrols.pm +// 5) Any relevant access right callbacks +// + +#define RS_ACCESS_DUP_OBJECT 0U +#define RS_ACCESS_NICE 1U +#define RS_ACCESS_DEBUG 2U +#define RS_ACCESS_COUNT 3U + + +/****************************************************************************/ +/* Access right data structures */ +/****************************************************************************/ + +/*! + * @brief A type that can be used to represent any access right. + */ +typedef NvU16 RsAccessRight; + +/*! + * @brief An internal type used to represent one limb in an access right mask. + */ +typedef NvU32 RsAccessLimb; +#define SDK_RS_ACCESS_LIMB_BITS 32 + +/*! + * @brief The number of limbs in the RS_ACCESS_MASK struct. + */ +#define SDK_RS_ACCESS_MAX_LIMBS 1 + +/*! + * @brief The maximum number of possible access rights supported by the + * current data structure definition. + * + * You probably want RS_ACCESS_COUNT instead, which is the number of actual + * access rights defined. + */ +#define SDK_RS_ACCESS_MAX_COUNT (0x20) /* finn: Evaluated from "(SDK_RS_ACCESS_LIMB_BITS * SDK_RS_ACCESS_MAX_LIMBS)" */ + +/** + * @brief A struct representing a set of access rights. + * + * Note that the values of bit positions larger than RS_ACCESS_COUNT is + * undefined, and should not be assumed to be 0 (see RS_ACCESS_MASK_FILL). + */ +typedef struct RS_ACCESS_MASK { + RsAccessLimb limbs[SDK_RS_ACCESS_MAX_LIMBS]; +} RS_ACCESS_MASK; + +/** + * @brief A struct representing auxiliary information about each access right. + */ +typedef struct RS_ACCESS_INFO { + NvU32 flags; +} RS_ACCESS_INFO; + + +/****************************************************************************/ +/* Access right macros */ +/****************************************************************************/ + +#define SDK_RS_ACCESS_LIMB_INDEX(index) ((index) / SDK_RS_ACCESS_LIMB_BITS) +#define SDK_RS_ACCESS_LIMB_POS(index) ((index) % SDK_RS_ACCESS_LIMB_BITS) + +#define SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) \ + ((pAccessMask)->limbs[SDK_RS_ACCESS_LIMB_INDEX(index)]) +#define SDK_RS_ACCESS_OFFSET_MASK(index) \ + NVBIT_TYPE(SDK_RS_ACCESS_LIMB_POS(index), RsAccessLimb) + +/*! + * @brief Checks that accessRight represents a valid access right. + * + * The valid range of access rights is [0, RS_ACCESS_COUNT). + * + * @param[in] accessRight The access right value to check + * + * @return true if accessRight is valid + * @return false otherwise + */ +#define RS_ACCESS_BOUNDS_CHECK(accessRight) \ + (accessRight < RS_ACCESS_COUNT) + +/*! + * @brief Test whether an access right is present in a set + * + * @param[in] pAccessMask The set of access rights to read + * @param[in] index The access right to examine + * + * @return NV_TRUE if the access right specified by index was present in the set, + * and NV_FALSE otherwise + */ +#define RS_ACCESS_MASK_TEST(pAccessMask, index) \ + (RS_ACCESS_BOUNDS_CHECK(index) && \ + (SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) & SDK_RS_ACCESS_OFFSET_MASK(index)) != 0) + +/*! + * @brief Add an access right to a mask + * + * @param[in] pAccessMask The set of access rights to modify + * @param[in] index The access right to set + */ +#define RS_ACCESS_MASK_ADD(pAccessMask, index) \ + do \ + { \ + if (RS_ACCESS_BOUNDS_CHECK(index)) { \ + SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) |= SDK_RS_ACCESS_OFFSET_MASK(index); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Remove an access right from a mask + * + * @param[in] pAccessMask The set of access rights to modify + * @param[in] index The access right to unset + */ +#define RS_ACCESS_MASK_REMOVE(pAccessMask, index) \ + do \ + { \ + if (RS_ACCESS_BOUNDS_CHECK(index)) { \ + SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) &= ~SDK_RS_ACCESS_OFFSET_MASK(index); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Performs an in-place union between two access right masks + * + * @param[in,out] pMaskOut The access rights mask to be updated + * @param[in] pMaskIn The set of access rights to be added to pMaskOut + */ +#define RS_ACCESS_MASK_UNION(pMaskOut, pMaskIn) \ + do \ + { \ + NvLength limb; \ + for (limb = 0; limb < SDK_RS_ACCESS_MAX_LIMBS; limb++) \ + { \ + SDK_RS_ACCESS_LIMB_ELT(pMaskOut, limb) |= SDK_RS_ACCESS_LIMB_ELT(pMaskIn, limb); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Performs an in-place subtract of one mask's rights from another + * + * @param[in,out] pMaskOut The access rights mask to be updated + * @param[in] pMaskIn The set of access rights to be removed from pMaskOut + */ +#define RS_ACCESS_MASK_SUBTRACT(pMaskOut, pMaskIn) \ + do \ + { \ + NvLength limb; \ + for (limb = 0; limb < SDK_RS_ACCESS_MAX_LIMBS; limb++) \ + { \ + SDK_RS_ACCESS_LIMB_ELT(pMaskOut, limb) &= ~SDK_RS_ACCESS_LIMB_ELT(pMaskIn, limb); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Removes all rights from an access rights mask + * + * @param[in,out] pAccessMask The access rights mask to be updated + */ +#define RS_ACCESS_MASK_CLEAR(pAccessMask) \ + do \ + { \ + portMemSet(pAccessMask, 0, sizeof(*pAccessMask)); \ + } while (NV_FALSE) + +/*! + * @brief Adds all rights to an access rights mask + * + * @param[in,out] pAccessMask The access rights mask to be updated + */ +#define RS_ACCESS_MASK_FILL(pAccessMask) \ + do \ + { \ + portMemSet(pAccessMask, 0xff, sizeof(*pAccessMask)); \ + } while (NV_FALSE) + + +/****************************************************************************/ +/* Share definitions */ +/****************************************************************************/ + +// +// The usage of Share Policy and the meaning of each share type is documented in +// resman/docs/rmapi/resource_server/rm_capabilities.adoc +// +#define RS_SHARE_TYPE_NONE (0U) +#define RS_SHARE_TYPE_ALL (1U) +#define RS_SHARE_TYPE_OS_SECURITY_TOKEN (2U) +#define RS_SHARE_TYPE_CLIENT (3U) +#define RS_SHARE_TYPE_PID (4U) +#define RS_SHARE_TYPE_SMC_PARTITION (5U) +#define RS_SHARE_TYPE_GPU (6U) +#define RS_SHARE_TYPE_FM_CLIENT (7U) +// Must be last. Update when a new SHARE_TYPE is added +#define RS_SHARE_TYPE_MAX (8U) + + +// +// Use Revoke to remove an existing policy from the list. +// Allow is based on OR logic, Require is based on AND logic. +// To share a right, at least one Allow (non-Require) must match, and all Require must pass. +// If Compose is specified, policies will be added to the list. Otherwise, they will replace the list. +// +#define RS_SHARE_ACTION_FLAG_REVOKE NVBIT(0) +#define RS_SHARE_ACTION_FLAG_REQUIRE NVBIT(1) +#define RS_SHARE_ACTION_FLAG_COMPOSE NVBIT(2) + +/****************************************************************************/ +/* Share flag data structures */ +/****************************************************************************/ + +typedef struct RS_SHARE_POLICY { + NvU32 target; + RS_ACCESS_MASK accessMask; + NvU16 type; ///< RS_SHARE_TYPE_ + NvU8 action; ///< RS_SHARE_ACTION_ +} RS_SHARE_POLICY; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/shared/nvstatus/nvstatus.c b/NVIDIA-kernel-module-source-TempVersion/src/common/shared/nvstatus/nvstatus.c new file mode 100644 index 0000000..e377dd9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/shared/nvstatus/nvstatus.c @@ -0,0 +1,82 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvstatus.h" + +#if !defined(NV_PRINTF_STRING_SECTION) +#if defined(NVRM) && NVCPU_IS_RISCV64 +#define NV_PRINTF_STRING_SECTION __attribute__ ((section (".logging"))) +#else // defined(NVRM) && NVCPU_IS_RISCV64 +#define NV_PRINTF_STRING_SECTION +#endif // defined(NVRM) && NVCPU_IS_RISCV64 +#endif // !defined(NV_PRINTF_STRING_SECTION) + +/* + * Include nvstatuscodes.h twice. Once for creating constant strings in the + * the NV_PRINTF_STRING_SECTION section of the ececutable, and once to build + * the g_StatusCodeList table. + */ +#undef NV_STATUS_CODE +#undef SDK_NVSTATUSCODES_H +#define NV_STATUS_CODE( name, code, string ) static NV_PRINTF_STRING_SECTION \ + const char rm_pvt_##name##_str[] = string " [" #name "]"; +#include "nvstatuscodes.h" + +#undef NV_STATUS_CODE +#undef SDK_NVSTATUSCODES_H +#define NV_STATUS_CODE( name, code, string ) { name, rm_pvt_##name##_str }, +static struct NvStatusCodeString +{ + NV_STATUS statusCode; + const char *statusString; +} g_StatusCodeList[] = { + #include "nvstatuscodes.h" + { 0xffffffff, "Unknown error code!" } // Some compilers don't like the trailing ',' +}; +#undef NV_STATUS_CODE + +/*! + * @brief Given an NV_STATUS code, returns the corresponding status string. + * + * @param[in] nvStatusIn NV_STATUS code for which the string is required + * + * @returns Corresponding status string from the nvstatuscodes.h + * + * TODO: Bug 200025711: convert this to an array-indexed lookup, instead of a linear search + * +*/ +const char *nvstatusToString(NV_STATUS nvStatusIn) +{ + static NV_PRINTF_STRING_SECTION const char rm_pvt_UNKNOWN_str[] = "Unknown error code!"; + NvU32 i; + NvU32 n = ((NvU32)(sizeof(g_StatusCodeList))/(NvU32)(sizeof(g_StatusCodeList[0]))); + for (i = 0; i < n; i++) + { + if (g_StatusCodeList[i].statusCode == nvStatusIn) + { + return g_StatusCodeList[i].statusString; + } + } + + return rm_pvt_UNKNOWN_str; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/COPYING.txt b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/COPYING.txt new file mode 100644 index 0000000..b577946 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/COPYING.txt @@ -0,0 +1,37 @@ + +License for Berkeley SoftFloat Release 3d + +John R. Hauser +2017 August 10 + +The following applies to the whole of SoftFloat Release 3d as well as to +each source file individually. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions, and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/nv-softfloat.h b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/nv-softfloat.h new file mode 100644 index 0000000..51680ab --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/nv-softfloat.h @@ -0,0 +1,163 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_SOFTFLOAT_H__ +#define __NV_SOFTFLOAT_H__ + +/* + * This header file provides utility code built on top of the softfloat floating + * point emulation library. + */ + +#include "softfloat.h" +#include "nvtypes.h" +#include "platform.h" + +/* + * float32_t stores the bit pattern for a 32-bit single-precision IEEE floating + * point value in a structure containing an uint32_t: + * + * typedef struct { uint32_t v; } float32_t; + * + * In some cases, clients pass in a 32-bit single-precision IEEE floating + * point value in an NvU32. + * + * Define functions to change the "view" between an NvU32 and a float32_t. + */ +INLINE float32_t NvU32viewAsF32(NvU32 u) +{ + float32_t f = { .v = u }; + return f; +} + +INLINE NvU32 F32viewAsNvU32(float32_t f) +{ + return f.v; +} + +/* + * Convert the value of a float32_t to an NvU16. + * + * The conversion requires several steps: + * + * - Clamp the float32_t value to the [0,NV_U16_MAX] range of NvU16. + * + * - Use softfloat to convert the float32_t to ui32, with appropriate rounding. + * + * - Due to the clamping and rounding above, the value in the ui32 should be in + * the range of NvU16 and can be safely returned as NvU16. + */ +INLINE NvU16 F32toNvU16(float32_t f) +{ + const float32_t minF32 = NvU32viewAsF32(0); + const float32_t maxF32 = ui32_to_f32(NV_U16_MAX); + NvU32 u; + + /* clamp to zero: f = (f < minF32) ? minF32 : f */ + f = f32_lt(f, minF32) ? minF32 : f; + + /* clamp to NV_U16_MAX: f = (maxF32 < f) ? maxF32 : f */ + f = f32_lt(maxF32, f) ? maxF32 : f; + + /* + * The "_r_minMag" in "f32_to_ui32_r_minMag" means round "to minimum + * magnitude" (i.e., round towards zero). + * + * The "exact = FALSE" argument means do not raise the inexact exception + * flag, even if the conversion is inexact. + * + * For more on f32_to_ui32_r_minMag() semantics, see + * drivers/common/softfloat/doc/SoftFloat.html + */ + u = f32_to_ui32_r_minMag(f, NV_FALSE /* exact */); + nvAssert(u <= NV_U16_MAX); + + return (NvU16) u; +} + +/* + * Perform the following with float32_t: (a * b) + (c * d) + e + */ +INLINE float32_t F32_AxB_plus_CxD_plus_E( + float32_t a, + float32_t b, + float32_t c, + float32_t d, + float32_t e) +{ + const float32_t tmpA = f32_mul(a, b); + const float32_t tmpB = f32_mul(c, d); + const float32_t tmpC = f32_add(tmpA, tmpB); + + return f32_add(tmpC, e); +} + +/* + * Perform the following with float32_t: (a * b) - (c * d) + */ +INLINE float32_t F32_AxB_minus_CxD( + float32_t a, + float32_t b, + float32_t c, + float32_t d) +{ + const float32_t tmpA = f32_mul(a, b); + const float32_t tmpB = f32_mul(c, d); + + return f32_sub(tmpA, tmpB); +} + +/* + * Perform the following with float64_t: a * -1 + */ +INLINE float64_t F64_negate(float64_t a) +{ + const float64_t negOneF64 = i32_to_f64(-1); + return f64_mul(negOneF64, a); +} + +INLINE float16_t nvUnormToFp16(NvU16 unorm, float32_t maxf) +{ + const float32_t unormf = ui32_to_f32(unorm); + const float32_t normf = f32_div(unormf, maxf); + + return f32_to_f16(normf); +} + +INLINE float16_t nvUnorm10ToFp16(NvU16 unorm10) +{ + const float32_t maxf = NvU32viewAsF32(0x44800000U); // 1024.0f + return nvUnormToFp16(unorm10, maxf); +} + +INLINE float32_t f32_min(float32_t a, float32_t b) +{ + return (f32_lt(a, b)) ? a : b; +} + +INLINE float32_t f32_max(float32_t a, float32_t b) +{ + return (f32_lt(a, b)) ? b : a; +} + +#endif /* __NV_SOFTFLOAT_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/platform.h b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/platform.h new file mode 100644 index 0000000..f6db383 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/platform.h @@ -0,0 +1,56 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef nvidia_softfloat_platform_h +#define nvidia_softfloat_platform_h 1 + +#include "nvtypes.h" + +/* + * Build softfloat for little endian CPUs: all NVIDIA target platforms are + * little endian. + */ +#define LITTLEENDIAN 1 + +/* + * "INLINE" is used by softfloat like this: + * + * INLINE uint32_t softfloat_foo(...) + * { + * ... + * } + */ +#define INLINE static NV_INLINE + +#if !defined(nvAssert) +#define nvAssert(x) +#endif + +/* + * softfloat will use THREAD_LOCAL to tag variables that should be per-thread; + * it could be set to, e.g., gcc's "__thread" keyword. If THREAD_LOCAL is left + * undefined, these variables will default to being ordinary global variables. + */ +#undef THREAD_LOCAL + +#endif /* nvidia_softfloat_platform_h */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c new file mode 100644 index 0000000..cc73833 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 16-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast16_t softfloat_commonNaNToF16UI( const struct commonNaN *aPtr ) +{ + + return (uint_fast16_t) aPtr->sign<<15 | 0x7E00 | aPtr->v64>>54; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c new file mode 100644 index 0000000..278cdcf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 32-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast32_t softfloat_commonNaNToF32UI( const struct commonNaN *aPtr ) +{ + + return (uint_fast32_t) aPtr->sign<<31 | 0x7FC00000 | aPtr->v64>>41; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c new file mode 100644 index 0000000..2346b06 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c @@ -0,0 +1,53 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 64-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast64_t softfloat_commonNaNToF64UI( const struct commonNaN *aPtr ) +{ + + return + (uint_fast64_t) aPtr->sign<<63 | UINT64_C( 0x7FF8000000000000 ) + | aPtr->v64>>12; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c new file mode 100644 index 0000000..0c6e610 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 32-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f32UIToCommonNaN( uint_fast32_t uiA, struct commonNaN *zPtr ) +{ + + if ( softfloat_isSigNaNF32UI( uiA ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + zPtr->sign = uiA>>31; + zPtr->v64 = (uint_fast64_t) uiA<<41; + zPtr->v0 = 0; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c new file mode 100644 index 0000000..c81dfa9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 64-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f64UIToCommonNaN( uint_fast64_t uiA, struct commonNaN *zPtr ) +{ + + if ( softfloat_isSigNaNF64UI( uiA ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + zPtr->sign = uiA>>63; + zPtr->v64 = uiA<<12; + zPtr->v0 = 0; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c new file mode 100644 index 0000000..daaa31d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c @@ -0,0 +1,63 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 32-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast32_t + softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + bool isSigNaNA; + + isSigNaNA = softfloat_isSigNaNF32UI( uiA ); + if ( isSigNaNA || softfloat_isSigNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + if ( isSigNaNA ) return uiA | 0x00400000; + } + return (isNaNF32UI( uiA ) ? uiA : uiB) | 0x00400000; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c new file mode 100644 index 0000000..78a29da --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c @@ -0,0 +1,63 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 64-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast64_t + softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB ) +{ + bool isSigNaNA; + + isSigNaNA = softfloat_isSigNaNF64UI( uiA ); + if ( isSigNaNA || softfloat_isSigNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + if ( isSigNaNA ) return uiA | UINT64_C( 0x0008000000000000 ); + } + return (isNaNF64UI( uiA ) ? uiA : uiB) | UINT64_C( 0x0008000000000000 ); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c new file mode 100644 index 0000000..f2c25ad --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c @@ -0,0 +1,52 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include "platform.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Raises the exceptions specified by `flags'. Floating-point traps can be +| defined here if desired. It is currently not possible for such a trap +| to substitute a result value. If traps are not implemented, this routine +| should be simply `softfloat_exceptionFlags |= flags;'. +*----------------------------------------------------------------------------*/ +void softfloat_raiseFlags( uint_fast8_t flags ) +{ + + softfloat_exceptionFlags |= flags; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/specialize.h b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/specialize.h new file mode 100644 index 0000000..235442c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/specialize.h @@ -0,0 +1,208 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef specialize_h +#define specialize_h 1 + +#include +#include +#include "softfloat_types.h" + +/*---------------------------------------------------------------------------- +| Default value for `softfloat_detectTininess'. +*----------------------------------------------------------------------------*/ +#define init_detectTininess softfloat_tininess_afterRounding + +/*---------------------------------------------------------------------------- +| The values to return on conversions to 32-bit integer formats that raise an +| invalid exception. +*----------------------------------------------------------------------------*/ +#define ui32_fromPosOverflow 0xFFFFFFFF +#define ui32_fromNegOverflow 0 +#define ui32_fromNaN 0xFFFFFFFF +#define i32_fromPosOverflow 0x7FFFFFFF +#define i32_fromNegOverflow (-0x7FFFFFFF - 1) +#define i32_fromNaN 0x7FFFFFFF + +/*---------------------------------------------------------------------------- +| The values to return on conversions to 64-bit integer formats that raise an +| invalid exception. +*----------------------------------------------------------------------------*/ +#define ui64_fromPosOverflow UINT64_C( 0xFFFFFFFFFFFFFFFF ) +#define ui64_fromNegOverflow 0 +#define ui64_fromNaN UINT64_C( 0xFFFFFFFFFFFFFFFF ) +#define i64_fromPosOverflow UINT64_C( 0x7FFFFFFFFFFFFFFF ) +#define i64_fromNegOverflow (-UINT64_C( 0x7FFFFFFFFFFFFFFF ) - 1) +#define i64_fromNaN UINT64_C( 0x7FFFFFFFFFFFFFFF ) + +/*---------------------------------------------------------------------------- +| "Common NaN" structure, used to transfer NaN representations from one format +| to another. +*----------------------------------------------------------------------------*/ +struct commonNaN { + bool sign; +#ifdef LITTLEENDIAN + uint64_t v0, v64; +#else + uint64_t v64, v0; +#endif +}; + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 16-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF16UI 0xFE00 + +/*---------------------------------------------------------------------------- +| Returns true when 16-bit unsigned integer `uiA' has the bit pattern of a +| 16-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF16UI( uiA ) ((((uiA) & 0x7E00) == 0x7C00) && ((uiA) & 0x01FF)) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 16-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast16_t softfloat_commonNaNToF16UI( const struct commonNaN *aPtr ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 32-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF32UI 0xFFC00000 + +/*---------------------------------------------------------------------------- +| Returns true when 32-bit unsigned integer `uiA' has the bit pattern of a +| 32-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF32UI( uiA ) ((((uiA) & 0x7FC00000) == 0x7F800000) && ((uiA) & 0x003FFFFF)) + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 32-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f32UIToCommonNaN( uint_fast32_t uiA, struct commonNaN *zPtr ); + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 32-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast32_t softfloat_commonNaNToF32UI( const struct commonNaN *aPtr ); + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 32-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast32_t + softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 64-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF64UI UINT64_C( 0xFFF8000000000000 ) + +/*---------------------------------------------------------------------------- +| Returns true when 64-bit unsigned integer `uiA' has the bit pattern of a +| 64-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF64UI( uiA ) ((((uiA) & UINT64_C( 0x7FF8000000000000 )) == UINT64_C( 0x7FF0000000000000 )) && ((uiA) & UINT64_C( 0x0007FFFFFFFFFFFF ))) + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 64-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f64UIToCommonNaN( uint_fast64_t uiA, struct commonNaN *zPtr ); + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 64-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast64_t softfloat_commonNaNToF64UI( const struct commonNaN *aPtr ); + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 64-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast64_t + softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 80-bit extended floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNExtF80UI64 0xFFFF +#define defaultNaNExtF80UI0 UINT64_C( 0xC000000000000000 ) + +/*---------------------------------------------------------------------------- +| Returns true when the 80-bit unsigned integer formed from concatenating +| 16-bit `uiA64' and 64-bit `uiA0' has the bit pattern of an 80-bit extended +| floating-point signaling NaN. +| Note: This macro evaluates its arguments more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNExtF80UI( uiA64, uiA0 ) ((((uiA64) & 0x7FFF) == 0x7FFF) && ! ((uiA0) & UINT64_C( 0x4000000000000000 )) && ((uiA0) & UINT64_C( 0x3FFFFFFFFFFFFFFF ))) + + +/*---------------------------------------------------------------------------- +| The following functions are needed only when `SOFTFLOAT_FAST_INT64' is +| defined. +*----------------------------------------------------------------------------*/ + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 128-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF128UI64 UINT64_C( 0xFFFF800000000000 ) +#define defaultNaNF128UI0 UINT64_C( 0 ) + +/*---------------------------------------------------------------------------- +| Returns true when the 128-bit unsigned integer formed from concatenating +| 64-bit `uiA64' and 64-bit `uiA0' has the bit pattern of a 128-bit floating- +| point signaling NaN. +| Note: This macro evaluates its arguments more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF128UI( uiA64, uiA0 ) ((((uiA64) & UINT64_C( 0x7FFF800000000000 )) == UINT64_C( 0x7FFF000000000000 )) && ((uiA0) || ((uiA64) & UINT64_C( 0x00007FFFFFFFFFFF )))) + + +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_add.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_add.c new file mode 100644 index 0000000..314c76e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_add.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_add( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( signF32UI( uiA ^ uiB ) ) { + return softfloat_subMagsF32( uiA, uiB ); + } else { + return softfloat_addMagsF32( uiA, uiB ); + } + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_div.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_div.c new file mode 100644 index 0000000..d817bc0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_div.c @@ -0,0 +1,176 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_div( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signZ; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; +#ifdef SOFTFLOAT_FAST_DIV64TO32 + uint_fast64_t sig64A; + uint_fast32_t sigZ; +#else + uint_fast32_t sigZ; + uint_fast64_t rem; +#endif + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) { + if ( ! (expA | sigA) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0x7E; + sigA |= 0x00800000; + sigB |= 0x00800000; +#ifdef SOFTFLOAT_FAST_DIV64TO32 + if ( sigA < sigB ) { + --expZ; + sig64A = (uint_fast64_t) sigA<<31; + } else { + sig64A = (uint_fast64_t) sigA<<30; + } + sigZ = sig64A / sigB; + if ( ! (sigZ & 0x3F) ) sigZ |= ((uint_fast64_t) sigB * sigZ != sig64A); +#else + if ( sigA < sigB ) { + --expZ; + sigA <<= 8; + } else { + sigA <<= 7; + } + sigB <<= 8; + sigZ = ((uint_fast64_t) sigA * softfloat_approxRecip32_1( sigB ))>>32; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigZ += 2; + if ( (sigZ & 0x3F) < 2 ) { + sigZ &= ~3; + rem = ((uint_fast64_t) sigA<<31) - (uint_fast64_t) sigZ * sigB; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + sigZ -= 4; + } else { + if ( rem ) sigZ |= 1; + } + } +#endif + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ = packToF32UI( signZ, 0xFF, 0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF32UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq.c new file mode 100644 index 0000000..5f07eee --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_eq( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return (uiA == uiB) || ! (uint32_t) ((uiA | uiB)<<1); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq_signaling.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq_signaling.c new file mode 100644 index 0000000..f5fcc82 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq_signaling.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_eq_signaling( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return (uiA == uiB) || ! (uint32_t) ((uiA | uiB)<<1); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_isSignalingNaN.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_isSignalingNaN.c new file mode 100644 index 0000000..5004a5a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_isSignalingNaN( float32_t a ) +{ + union ui32_f32 uA; + + uA.f = a; + return softfloat_isSigNaNF32UI( uA.ui ); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le.c new file mode 100644 index 0000000..77595fb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_le( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA || ! (uint32_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le_quiet.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le_quiet.c new file mode 100644 index 0000000..1ec9101 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_le_quiet( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA || ! (uint32_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt.c new file mode 100644 index 0000000..9e12843 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_lt( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA && ((uint32_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt_quiet.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt_quiet.c new file mode 100644 index 0000000..9f83b81 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_lt_quiet( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA && ((uint32_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mul.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mul.c new file mode 100644 index 0000000..a2a673f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mul.c @@ -0,0 +1,137 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_mul( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signZ; + uint_fast32_t magBits; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; + uint_fast32_t sigZ, uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN; + magBits = expB | sigB; + goto infArg; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + magBits = expA | sigA; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x7F; + sigA = (sigA | 0x00800000)<<7; + sigB = (sigB | 0x00800000)<<8; + sigZ = softfloat_shortShiftRightJam64( (uint_fast64_t) sigA * sigB, 32 ); + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + } else { + uiZ = packToF32UI( signZ, 0xFF, 0 ); + } + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF32UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mulAdd.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mulAdd.c new file mode 100644 index 0000000..e98021b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mulAdd.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_mulAdd( float32_t a, float32_t b, float32_t c ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + union ui32_f32 uC; + uint_fast32_t uiC; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + uC.f = c; + uiC = uC.ui; + return softfloat_mulAddF32( uiA, uiB, uiC, 0 ); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_rem.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_rem.c new file mode 100644 index 0000000..771b1b9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_rem.c @@ -0,0 +1,168 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_rem( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + int_fast16_t expB; + uint_fast32_t sigB; + struct exp16_sig32 normExpSig; + uint32_t rem; + int_fast16_t expDiff; + uint32_t q, recip32, altRem, meanRem; + bool signRem; + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + rem = sigA | 0x00800000; + sigB |= 0x00800000; + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + sigB <<= 6; + if ( expDiff ) { + rem <<= 5; + q = 0; + } else { + rem <<= 6; + q = (sigB <= rem); + if ( q ) rem -= sigB; + } + } else { + recip32 = softfloat_approxRecip32_1( sigB<<8 ); + /*-------------------------------------------------------------------- + | Changing the shift of `rem' here requires also changing the initial + | subtraction from `expDiff'. + *--------------------------------------------------------------------*/ + rem <<= 7; + expDiff -= 31; + /*-------------------------------------------------------------------- + | The scale of `sigB' affects how many bits are obtained during each + | cycle of the loop. Currently this is 29 bits per loop iteration, + | which is believed to be the maximum possible. + *--------------------------------------------------------------------*/ + sigB <<= 6; + for (;;) { + q = (rem * (uint_fast64_t) recip32)>>32; + if ( expDiff < 0 ) break; + rem = -(q * (uint32_t) sigB); + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -30 here.) + *--------------------------------------------------------------------*/ + q >>= ~expDiff & 31; + rem = (rem<<(expDiff + 30)) - q * (uint32_t) sigB; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem -= sigB; + } while ( ! (rem & 0x80000000) ); + meanRem = rem + altRem; + if ( (meanRem & 0x80000000) || (! meanRem && (q & 1)) ) rem = altRem; + signRem = signA; + if ( 0x80000000 <= rem ) { + signRem = ! signRem; + rem = -rem; + } + return softfloat_normRoundPackToF32( signRem, expB, rem ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_roundToInt.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_roundToInt.c new file mode 100644 index 0000000..84e3c62 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_roundToInt.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_roundToInt( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t uiZ, lastBitMask, roundBitsMask; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp <= 0x7E ) { + if ( ! (uint32_t) (uiA<<1) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ = uiA & packToF32UI( 1, 0, 0 ); + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! fracF32UI( uiA ) ) break; + /* fall through */ + case softfloat_round_near_maxMag: + if ( exp == 0x7E ) uiZ |= packToF32UI( 0, 0x7F, 0 ); + break; + case softfloat_round_min: + if ( uiZ ) uiZ = packToF32UI( 1, 0x7F, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ ) uiZ = packToF32UI( 0, 0x7F, 0 ); + break; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x96 <= exp ) { + if ( (exp == 0xFF) && fracF32UI( uiA ) ) { + uiZ = softfloat_propagateNaNF32UI( uiA, 0 ); + goto uiZ; + } + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = uiA; + lastBitMask = (uint_fast32_t) 1<<(0x96 - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ += lastBitMask>>1; + if ( ! (uiZ & roundBitsMask) ) uiZ &= ~lastBitMask; + } else if ( + roundingMode + == (signF32UI( uiZ ) ? softfloat_round_min : softfloat_round_max) + ) { + uiZ += roundBitsMask; + } + uiZ &= ~roundBitsMask; + if ( exact && (uiZ != uiA) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sqrt.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sqrt.c new file mode 100644 index 0000000..5ef659e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sqrt.c @@ -0,0 +1,121 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_sqrt( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA, uiZ; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; + uint_fast32_t sigZ, shiftedSigZ; + uint32_t negRem; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA ) { + uiZ = softfloat_propagateNaNF32UI( uiA, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = ((expA - 0x7F)>>1) + 0x7E; + expA &= 1; + sigA = (sigA | 0x00800000)<<8; + sigZ = + ((uint_fast64_t) sigA * softfloat_approxRecipSqrt32_1( expA, sigA )) + >>32; + if ( expA ) sigZ >>= 1; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigZ += 2; + if ( (sigZ & 0x3F) < 2 ) { + shiftedSigZ = sigZ>>2; + negRem = shiftedSigZ * shiftedSigZ; + sigZ &= ~3; + if ( negRem & 0x80000000 ) { + sigZ |= 1; + } else { + if ( negRem ) --sigZ; + } + } + return softfloat_roundPackToF32( 0, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sub.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sub.c new file mode 100644 index 0000000..604d3bd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sub.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_sub( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( signF32UI( uiA ^ uiB ) ) { + return softfloat_addMagsF32( uiA, uiB ); + } else { + return softfloat_subMagsF32( uiA, uiB ); + } + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f16.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f16.c new file mode 100644 index 0000000..7a97158 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f16.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f32_to_f16( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t frac; + struct commonNaN commonNaN; + uint_fast16_t uiZ, frac16; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + frac = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0xFF ) { + if ( frac ) { + softfloat_f32UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF16UI( &commonNaN ); + } else { + uiZ = packToF16UI( sign, 0x1F, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac16 = frac>>9 | ((frac & 0x1FF) != 0); + if ( ! (exp | frac16) ) { + uiZ = packToF16UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + return softfloat_roundPackToF16( sign, exp - 0x71, frac16 | 0x4000 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f64.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f64.c new file mode 100644 index 0000000..f9e02f2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f64.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f32_to_f64( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t frac; + struct commonNaN commonNaN; + uint_fast64_t uiZ; + struct exp16_sig32 normExpSig; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + frac = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0xFF ) { + if ( frac ) { + softfloat_f32UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF64UI( &commonNaN ); + } else { + uiZ = packToF64UI( sign, 0x7FF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ = packToF64UI( sign, 0, 0 ); + goto uiZ; + } + normExpSig = softfloat_normSubnormalF32Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = packToF64UI( sign, exp + 0x380, (uint_fast64_t) frac<<29 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32.c new file mode 100644 index 0000000..c9f2cf9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f32_to_i32( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + uint_fast64_t sig64; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (i32_fromNaN != i32_fromPosOverflow) || (i32_fromNaN != i32_fromNegOverflow) + if ( (exp == 0xFF) && sig ) { +#if (i32_fromNaN == i32_fromPosOverflow) + sign = 0; +#elif (i32_fromNaN == i32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return i32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<32; + shiftDist = 0xAA - exp; + if ( 0 < shiftDist ) sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + return softfloat_roundToI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32_r_minMag.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32_r_minMag.c new file mode 100644 index 0000000..1a94dcc --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32_r_minMag.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f32_to_i32_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + int_fast32_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x9E - exp; + if ( 32 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( shiftDist <= 0 ) { + if ( uiA == packToF32UI( 1, 0x9E, 0 ) ) return -0x7FFFFFFF - 1; + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig | 0x00800000)<<8; + absZ = sig>>shiftDist; + if ( exact && ((uint_fast32_t) absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f32_to_i64( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + uint_fast64_t sig64, extra; + struct uint64_extra sig64Extra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( shiftDist < 0 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + extra = 0; + if ( shiftDist ) { + sig64Extra = softfloat_shiftRightJam64Extra( sig64, 0, shiftDist ); + sig64 = sig64Extra.v; + extra = sig64Extra.extra; + } + return softfloat_roundToI64( sign, sig64, extra, roundingMode, exact ); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i64_r_minMag.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i64_r_minMag.c new file mode 100644 index 0000000..7d336a4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i64_r_minMag.c @@ -0,0 +1,94 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f32_to_i64_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t sig64; + int_fast64_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( 64 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( shiftDist <= 0 ) { + if ( uiA == packToF32UI( 1, 0xBE, 0 ) ) { + return -INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + absZ = sig64>>shiftDist; + shiftDist = 40 - shiftDist; + if ( exact && (shiftDist < 0) && (uint32_t) (sig<<(shiftDist & 31)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sign ? -absZ : absZ; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32.c new file mode 100644 index 0000000..5ec279b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f32_to_ui32( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + uint_fast64_t sig64; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (ui32_fromNaN != ui32_fromPosOverflow) || (ui32_fromNaN != ui32_fromNegOverflow) + if ( (exp == 0xFF) && sig ) { +#if (ui32_fromNaN == ui32_fromPosOverflow) + sign = 0; +#elif (ui32_fromNaN == ui32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return ui32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<32; + shiftDist = 0xAA - exp; + if ( 0 < shiftDist ) sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + return softfloat_roundToUI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32_r_minMag.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32_r_minMag.c new file mode 100644 index 0000000..12f7261 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f32_to_ui32_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x9E - exp; + if ( 32 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( sign || (shiftDist < 0) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig | 0x00800000)<<8; + z = sig>>shiftDist; + if ( exact && (z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f32_to_ui64( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + uint_fast64_t sig64, extra; + struct uint64_extra sig64Extra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( shiftDist < 0 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + extra = 0; + if ( shiftDist ) { + sig64Extra = softfloat_shiftRightJam64Extra( sig64, 0, shiftDist ); + sig64 = sig64Extra.v; + extra = sig64Extra.extra; + } + return softfloat_roundToUI64( sign, sig64, extra, roundingMode, exact ); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui64_r_minMag.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui64_r_minMag.c new file mode 100644 index 0000000..f96f3e1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui64_r_minMag.c @@ -0,0 +1,90 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f32_to_ui64_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t sig64, z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( 64 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( sign || (shiftDist < 0) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + z = sig64>>shiftDist; + shiftDist = 40 - shiftDist; + if ( exact && (shiftDist < 0) && (uint32_t) (sig<<(shiftDist & 31)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_add.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_add.c new file mode 100644 index 0000000..b1969ca --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_add.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_add( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + if ( signA == signB ) { + return softfloat_addMagsF64( uiA, uiB, signA ); + } else { + return softfloat_subMagsF64( uiA, uiB, signA ); + } + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_div.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_div.c new file mode 100644 index 0000000..c5a2d4f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_div.c @@ -0,0 +1,172 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_div( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + uint32_t recip32, sig32Z, doubleTerm; + uint_fast64_t rem; + uint32_t q; + uint_fast64_t sigZ; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) { + if ( ! (expA | sigA) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0x3FE; + sigA |= UINT64_C( 0x0010000000000000 ); + sigB |= UINT64_C( 0x0010000000000000 ); + if ( sigA < sigB ) { + --expZ; + sigA <<= 11; + } else { + sigA <<= 10; + } + sigB <<= 11; + recip32 = softfloat_approxRecip32_1( sigB>>32 ) - 2; + sig32Z = ((uint32_t) (sigA>>32) * (uint_fast64_t) recip32)>>32; + doubleTerm = sig32Z<<1; + rem = + ((sigA - (uint_fast64_t) doubleTerm * (uint32_t) (sigB>>32))<<28) + - (uint_fast64_t) doubleTerm * ((uint32_t) sigB>>4); + q = (((uint32_t) (rem>>32) * (uint_fast64_t) recip32)>>32) + 4; + sigZ = ((uint_fast64_t) sig32Z<<32) + ((uint_fast64_t) q<<4); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( (sigZ & 0x1FF) < 4<<4 ) { + q &= ~7; + sigZ &= ~(uint_fast64_t) 0x7F; + doubleTerm = q<<1; + rem = + ((rem - (uint_fast64_t) doubleTerm * (uint32_t) (sigB>>32))<<28) + - (uint_fast64_t) doubleTerm * ((uint32_t) sigB>>4); + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + sigZ -= 1<<7; + } else { + if ( rem ) sigZ |= 1; + } + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF64UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq.c new file mode 100644 index 0000000..ccb602a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_eq( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return (uiA == uiB) || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq_signaling.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq_signaling.c new file mode 100644 index 0000000..ee5a441 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq_signaling.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_eq_signaling( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return (uiA == uiB) || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_isSignalingNaN.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_isSignalingNaN.c new file mode 100644 index 0000000..f55acb4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_isSignalingNaN( float64_t a ) +{ + union ui64_f64 uA; + + uA.f = a; + return softfloat_isSigNaNF64UI( uA.ui ); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le.c new file mode 100644 index 0000000..91fc994 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_le( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le_quiet.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le_quiet.c new file mode 100644 index 0000000..a5d332a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le_quiet.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_le_quiet( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt.c new file mode 100644 index 0000000..abf62fd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_lt( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA && ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt_quiet.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt_quiet.c new file mode 100644 index 0000000..6531f57 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt_quiet.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_lt_quiet( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA && ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mul.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mul.c new file mode 100644 index 0000000..caac424 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mul.c @@ -0,0 +1,139 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_mul( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signZ; + uint_fast64_t magBits; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + struct uint128 sig128Z; + uint_fast64_t sigZ, uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN; + magBits = expB | sigB; + goto infArg; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + magBits = expA | sigA; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FF; + sigA = (sigA | UINT64_C( 0x0010000000000000 ))<<10; + sigB = (sigB | UINT64_C( 0x0010000000000000 ))<<11; + sig128Z = softfloat_mul64To128( sigA, sigB ); + sigZ = sig128Z.v64 | (sig128Z.v0 != 0); + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + } else { + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + } + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF64UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mulAdd.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mulAdd.c new file mode 100644 index 0000000..67fc44d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mulAdd.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_mulAdd( float64_t a, float64_t b, float64_t c ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + union ui64_f64 uC; + uint_fast64_t uiC; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + uC.f = c; + uiC = uC.ui; + return softfloat_mulAddF64( uiA, uiB, uiC, 0 ); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_rem.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_rem.c new file mode 100644 index 0000000..79d4105 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_rem.c @@ -0,0 +1,185 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_rem( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + int_fast16_t expB; + uint_fast64_t sigB; + struct exp16_sig64 normExpSig; + uint64_t rem; + int_fast16_t expDiff; + uint32_t q, recip32; + uint_fast64_t q64; + uint64_t altRem, meanRem; + bool signRem; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA < expB - 1 ) return a; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + rem = sigA | UINT64_C( 0x0010000000000000 ); + sigB |= UINT64_C( 0x0010000000000000 ); + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + sigB <<= 9; + if ( expDiff ) { + rem <<= 8; + q = 0; + } else { + rem <<= 9; + q = (sigB <= rem); + if ( q ) rem -= sigB; + } + } else { + recip32 = softfloat_approxRecip32_1( sigB>>21 ); + /*-------------------------------------------------------------------- + | Changing the shift of `rem' here requires also changing the initial + | subtraction from `expDiff'. + *--------------------------------------------------------------------*/ + rem <<= 9; + expDiff -= 30; + /*-------------------------------------------------------------------- + | The scale of `sigB' affects how many bits are obtained during each + | cycle of the loop. Currently this is 29 bits per loop iteration, + | the maximum possible. + *--------------------------------------------------------------------*/ + sigB <<= 9; + for (;;) { + q64 = (uint32_t) (rem>>32) * (uint_fast64_t) recip32; + if ( expDiff < 0 ) break; + q = (q64 + 0x80000000)>>32; + rem <<= 29; + rem -= q * (uint64_t) sigB; + if ( rem & UINT64_C( 0x8000000000000000 ) ) rem += sigB; + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -29 here.) + *--------------------------------------------------------------------*/ + q = (uint32_t) (q64>>32)>>(~expDiff & 31); + rem = (rem<<(expDiff + 30)) - q * (uint64_t) sigB; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + altRem = rem + sigB; + goto selectRem; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem -= sigB; + } while ( ! (rem & UINT64_C( 0x8000000000000000 )) ); + selectRem: + meanRem = rem + altRem; + if ( + (meanRem & UINT64_C( 0x8000000000000000 )) || (! meanRem && (q & 1)) + ) { + rem = altRem; + } + signRem = signA; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + signRem = ! signRem; + rem = -rem; + } + return softfloat_normRoundPackToF64( signRem, expB, rem ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_roundToInt.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_roundToInt.c new file mode 100644 index 0000000..3129a55 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_roundToInt.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_roundToInt( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t uiZ, lastBitMask, roundBitsMask; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp <= 0x3FE ) { + if ( ! (uiA & UINT64_C( 0x7FFFFFFFFFFFFFFF )) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ = uiA & packToF64UI( 1, 0, 0 ); + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! fracF64UI( uiA ) ) break; + /* fall through */ + case softfloat_round_near_maxMag: + if ( exp == 0x3FE ) uiZ |= packToF64UI( 0, 0x3FF, 0 ); + break; + case softfloat_round_min: + if ( uiZ ) uiZ = packToF64UI( 1, 0x3FF, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ ) uiZ = packToF64UI( 0, 0x3FF, 0 ); + break; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x433 <= exp ) { + if ( (exp == 0x7FF) && fracF64UI( uiA ) ) { + uiZ = softfloat_propagateNaNF64UI( uiA, 0 ); + goto uiZ; + } + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = uiA; + lastBitMask = (uint_fast64_t) 1<<(0x433 - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ += lastBitMask>>1; + if ( ! (uiZ & roundBitsMask) ) uiZ &= ~lastBitMask; + } else if ( + roundingMode + == (signF64UI( uiZ ) ? softfloat_round_min : softfloat_round_max) + ) { + uiZ += roundBitsMask; + } + uiZ &= ~roundBitsMask; + if ( exact && (uiZ != uiA) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sqrt.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sqrt.c new file mode 100644 index 0000000..9a06cfa --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sqrt.c @@ -0,0 +1,133 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_sqrt( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA, uiZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + uint32_t sig32A, recipSqrt32, sig32Z; + uint_fast64_t rem; + uint32_t q; + uint_fast64_t sigZ, shiftedSigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA ) { + uiZ = softfloat_propagateNaNF64UI( uiA, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + | (`sig32Z' is guaranteed to be a lower bound on the square root of + | `sig32A', which makes `sig32Z' also a lower bound on the square root of + | `sigA'.) + *------------------------------------------------------------------------*/ + expZ = ((expA - 0x3FF)>>1) + 0x3FE; + expA &= 1; + sigA |= UINT64_C( 0x0010000000000000 ); + sig32A = sigA>>21; + recipSqrt32 = softfloat_approxRecipSqrt32_1( expA, sig32A ); + sig32Z = ((uint_fast64_t) sig32A * recipSqrt32)>>32; + if ( expA ) { + sigA <<= 8; + sig32Z >>= 1; + } else { + sigA <<= 9; + } + rem = sigA - (uint_fast64_t) sig32Z * sig32Z; + q = ((uint32_t) (rem>>2) * (uint_fast64_t) recipSqrt32)>>32; + sigZ = ((uint_fast64_t) sig32Z<<32 | 1<<5) + ((uint_fast64_t) q<<3); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( (sigZ & 0x1FF) < 0x22 ) { + sigZ &= ~(uint_fast64_t) 0x3F; + shiftedSigZ = sigZ>>6; + rem = (sigA<<52) - shiftedSigZ * shiftedSigZ; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + --sigZ; + } else { + if ( rem ) sigZ |= 1; + } + } + return softfloat_roundPackToF64( 0, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sub.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sub.c new file mode 100644 index 0000000..14ea575 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sub.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_sub( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + if ( signA == signB ) { + return softfloat_subMagsF64( uiA, uiB, signA ); + } else { + return softfloat_addMagsF64( uiA, uiB, signA ); + } + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_f32.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_f32.c new file mode 100644 index 0000000..99b13dd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_f32.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f64_to_f32( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t frac; + struct commonNaN commonNaN; + uint_fast32_t uiZ, frac32; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + frac = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FF ) { + if ( frac ) { + softfloat_f64UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF32UI( &commonNaN ); + } else { + uiZ = packToF32UI( sign, 0xFF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac32 = softfloat_shortShiftRightJam64( frac, 22 ); + if ( ! (exp | frac32) ) { + uiZ = packToF32UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + return softfloat_roundPackToF32( sign, exp - 0x381, frac32 | 0x40000000 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32.c new file mode 100644 index 0000000..8712c0a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32.c @@ -0,0 +1,82 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f64_to_i32( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (i32_fromNaN != i32_fromPosOverflow) || (i32_fromNaN != i32_fromNegOverflow) + if ( (exp == 0x7FF) && sig ) { +#if (i32_fromNaN == i32_fromPosOverflow) + sign = 0; +#elif (i32_fromNaN == i32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return i32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x427 - exp; + if ( 0 < shiftDist ) sig = softfloat_shiftRightJam64( sig, shiftDist ); + return softfloat_roundToI32( sign, sig, roundingMode, exact ); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32_r_minMag.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32_r_minMag.c new file mode 100644 index 0000000..b7e1e03 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32_r_minMag.c @@ -0,0 +1,96 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f64_to_i32_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + int_fast32_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( shiftDist < 22 ) { + if ( + sign && (exp == 0x41E) && (sig < UINT64_C( 0x0000000000200000 )) + ) { + if ( exact && sig ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return -0x7FFFFFFF - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig>>shiftDist; + if ( exact && ((uint_fast64_t) (uint_fast32_t) absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f64_to_i64( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + struct uint64_extra sigExtra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x433 - exp; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sigExtra.v = sig<<-shiftDist; + sigExtra.extra = 0; + } else { + sigExtra = softfloat_shiftRightJam64Extra( sig, 0, shiftDist ); + } + return + softfloat_roundToI64( + sign, sigExtra.v, sigExtra.extra, roundingMode, exact ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && fracF64UI( uiA ) ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i64_r_minMag.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i64_r_minMag.c new file mode 100644 index 0000000..3822606 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i64_r_minMag.c @@ -0,0 +1,100 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f64_to_i64_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + int_fast64_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( shiftDist <= 0 ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( shiftDist < -10 ) { + if ( uiA == packToF64UI( 1, 0x43E, 0 ) ) { + return -INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig<<-shiftDist; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig>>shiftDist; + if ( exact && (absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f64_to_ui32( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (ui32_fromNaN != ui32_fromPosOverflow) || (ui32_fromNaN != ui32_fromNegOverflow) + if ( (exp == 0x7FF) && sig ) { +#if (ui32_fromNaN == ui32_fromPosOverflow) + sign = 0; +#elif (ui32_fromNaN == ui32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return ui32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x427 - exp; + if ( 0 < shiftDist ) sig = softfloat_shiftRightJam64( sig, shiftDist ); + return softfloat_roundToUI32( sign, sig, roundingMode, exact ); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui32_r_minMag.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui32_r_minMag.c new file mode 100644 index 0000000..11f0b05 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui32_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f64_to_ui32_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( sign || (shiftDist < 21) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + z = sig>>shiftDist; + if ( exact && ((uint_fast64_t) z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f64_to_ui64( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + struct uint64_extra sigExtra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x433 - exp; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sigExtra.v = sig<<-shiftDist; + sigExtra.extra = 0; + } else { + sigExtra = softfloat_shiftRightJam64Extra( sig, 0, shiftDist ); + } + return + softfloat_roundToUI64( + sign, sigExtra.v, sigExtra.extra, roundingMode, exact ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && fracF64UI( uiA ) ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui64_r_minMag.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui64_r_minMag.c new file mode 100644 index 0000000..25918c4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui64_r_minMag.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f64_to_ui64_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( sign ) goto invalid; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + z = (sig | UINT64_C( 0x0010000000000000 ))<<-shiftDist; + } else { + sig |= UINT64_C( 0x0010000000000000 ); + z = sig>>shiftDist; + if ( exact && (uint64_t) (sig<<(-shiftDist & 63)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f32.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f32.c new file mode 100644 index 0000000..b1aedba --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f32.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t i32_to_f32( int32_t a ) +{ + bool sign; + union ui32_f32 uZ; + uint_fast32_t absA; + + sign = (a < 0); + if ( ! (a & 0x7FFFFFFF) ) { + uZ.ui = sign ? packToF32UI( 1, 0x9E, 0 ) : 0; + return uZ.f; + } + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + return softfloat_normRoundPackToF32( sign, 0x9C, absA ); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f64.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f64.c new file mode 100644 index 0000000..d3901eb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f64.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t i32_to_f64( int32_t a ) +{ + uint_fast64_t uiZ; + bool sign; + uint_fast32_t absA; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + if ( ! a ) { + uiZ = 0; + } else { + sign = (a < 0); + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + shiftDist = softfloat_countLeadingZeros32( absA ) + 21; + uiZ = + packToF64UI( + sign, 0x432 - shiftDist, (uint_fast64_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t i64_to_f32( int64_t a ) +{ + bool sign; + uint_fast64_t absA; + int_fast8_t shiftDist; + union ui32_f32 u; + uint_fast32_t sig; + + sign = (a < 0); + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + shiftDist = softfloat_countLeadingZeros64( absA ) - 40; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF32UI( + sign, 0x95 - shiftDist, (uint_fast32_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t i64_to_f64( int64_t a ) +{ + bool sign; + union ui64_f64 uZ; + uint_fast64_t absA; + + sign = (a < 0); + if ( ! (a & UINT64_C( 0x7FFFFFFFFFFFFFFF )) ) { + uZ.ui = sign ? packToF64UI( 1, 0x43E, 0 ) : 0; + return uZ.f; + } + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + return softfloat_normRoundPackToF64( sign, 0x43C, absA ); + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/internals.h b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/internals.h new file mode 100644 index 0000000..af15800 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/internals.h @@ -0,0 +1,144 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef internals_h +#define internals_h 1 + +#include +#include +#include "primitives.h" +#include "softfloat_types.h" + +union ui16_f16 { uint16_t ui; float16_t f; }; +union ui32_f32 { uint32_t ui; float32_t f; }; +union ui64_f64 { uint64_t ui; float64_t f; }; + +union extF80M_extF80 { struct extFloat80M fM; extFloat80_t f; }; +union ui128_f128 { struct uint128 ui; float128_t f; }; + +enum { + softfloat_mulAdd_subC = 1, + softfloat_mulAdd_subProd = 2 +}; + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +uint_fast32_t softfloat_roundToUI32( bool, uint_fast64_t, uint_fast8_t, bool ); + +uint_fast64_t + softfloat_roundToUI64( + bool, uint_fast64_t, uint_fast64_t, uint_fast8_t, bool ); + +int_fast32_t softfloat_roundToI32( bool, uint_fast64_t, uint_fast8_t, bool ); + +int_fast64_t + softfloat_roundToI64( + bool, uint_fast64_t, uint_fast64_t, uint_fast8_t, bool ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF16UI( a ) ((bool) ((uint16_t) (a)>>15)) +#define expF16UI( a ) ((int_fast8_t) ((a)>>10) & 0x1F) +#define fracF16UI( a ) ((a) & 0x03FF) +#define packToF16UI( sign, exp, sig ) (((uint16_t) (sign)<<15) + ((uint16_t) (exp)<<10) + (sig)) + +#define isNaNF16UI( a ) (((~(a) & 0x7C00) == 0) && ((a) & 0x03FF)) + +float16_t softfloat_roundPackToF16( bool, int_fast16_t, uint_fast16_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF32UI( a ) ((bool) ((uint32_t) (a)>>31)) +#define expF32UI( a ) ((int_fast16_t) ((a)>>23) & 0xFF) +#define fracF32UI( a ) ((a) & 0x007FFFFF) +#define packToF32UI( sign, exp, sig ) (((uint32_t) (sign)<<31) + ((uint32_t) (exp)<<23) + (sig)) + +#define isNaNF32UI( a ) (((~(a) & 0x7F800000) == 0) && ((a) & 0x007FFFFF)) + +struct exp16_sig32 { int_fast16_t exp; uint_fast32_t sig; }; +struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t ); + +float32_t softfloat_roundPackToF32( bool, int_fast16_t, uint_fast32_t ); +float32_t softfloat_normRoundPackToF32( bool, int_fast16_t, uint_fast32_t ); + +float32_t softfloat_addMagsF32( uint_fast32_t, uint_fast32_t ); +float32_t softfloat_subMagsF32( uint_fast32_t, uint_fast32_t ); +float32_t + softfloat_mulAddF32( + uint_fast32_t, uint_fast32_t, uint_fast32_t, uint_fast8_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF64UI( a ) ((bool) ((uint64_t) (a)>>63)) +#define expF64UI( a ) ((int_fast16_t) ((a)>>52) & 0x7FF) +#define fracF64UI( a ) ((a) & UINT64_C( 0x000FFFFFFFFFFFFF )) +#define packToF64UI( sign, exp, sig ) ((uint64_t) (((uint_fast64_t) (sign)<<63) + ((uint_fast64_t) (exp)<<52) + (sig))) + +#define isNaNF64UI( a ) (((~(a) & UINT64_C( 0x7FF0000000000000 )) == 0) && ((a) & UINT64_C( 0x000FFFFFFFFFFFFF ))) + +struct exp16_sig64 { int_fast16_t exp; uint_fast64_t sig; }; +struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t ); + +float64_t softfloat_roundPackToF64( bool, int_fast16_t, uint_fast64_t ); +float64_t softfloat_normRoundPackToF64( bool, int_fast16_t, uint_fast64_t ); + +float64_t softfloat_addMagsF64( uint_fast64_t, uint_fast64_t, bool ); +float64_t softfloat_subMagsF64( uint_fast64_t, uint_fast64_t, bool ); +float64_t + softfloat_mulAddF64( + uint_fast64_t, uint_fast64_t, uint_fast64_t, uint_fast8_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signExtF80UI64( a64 ) ((bool) ((uint16_t) (a64)>>15)) +#define expExtF80UI64( a64 ) ((a64) & 0x7FFF) +#define packToExtF80UI64( sign, exp ) ((uint_fast16_t) (sign)<<15 | (exp)) + +#define isNaNExtF80UI( a64, a0 ) ((((a64) & 0x7FFF) == 0x7FFF) && ((a0) & UINT64_C( 0x7FFFFFFFFFFFFFFF ))) + + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF128UI64( a64 ) ((bool) ((uint64_t) (a64)>>63)) +#define expF128UI64( a64 ) ((int_fast32_t) ((a64)>>48) & 0x7FFF) +#define fracF128UI64( a64 ) ((a64) & UINT64_C( 0x0000FFFFFFFFFFFF )) +#define packToF128UI64( sign, exp, sig64 ) (((uint_fast64_t) (sign)<<63) + ((uint_fast64_t) (exp)<<48) + (sig64)) + +#define isNaNF128UI( a64, a0 ) (((~(a64) & UINT64_C( 0x7FFF000000000000 )) == 0) && (a0 || ((a64) & UINT64_C( 0x0000FFFFFFFFFFFF )))) + + +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitiveTypes.h b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitiveTypes.h new file mode 100644 index 0000000..781d82f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitiveTypes.h @@ -0,0 +1,83 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef primitiveTypes_h +#define primitiveTypes_h 1 + +#include + + +#ifdef LITTLEENDIAN +struct uint128 { uint64_t v0, v64; }; +struct uint64_extra { uint64_t extra, v; }; +struct uint128_extra { uint64_t extra; struct uint128 v; }; +#else +struct uint128 { uint64_t v64, v0; }; +struct uint64_extra { uint64_t v, extra; }; +struct uint128_extra { struct uint128 v; uint64_t extra; }; +#endif + + +/*---------------------------------------------------------------------------- +| These macros are used to isolate the differences in word order between big- +| endian and little-endian platforms. +*----------------------------------------------------------------------------*/ +#ifdef LITTLEENDIAN +#define wordIncr 1 +#define indexWord( total, n ) (n) +#define indexWordHi( total ) ((total) - 1) +#define indexWordLo( total ) 0 +#define indexMultiword( total, m, n ) (n) +#define indexMultiwordHi( total, n ) ((total) - (n)) +#define indexMultiwordLo( total, n ) 0 +#define indexMultiwordHiBut( total, n ) (n) +#define indexMultiwordLoBut( total, n ) 0 +#define INIT_UINTM4( v3, v2, v1, v0 ) { v0, v1, v2, v3 } +#else +#define wordIncr -1 +#define indexWord( total, n ) ((total) - 1 - (n)) +#define indexWordHi( total ) 0 +#define indexWordLo( total ) ((total) - 1) +#define indexMultiword( total, m, n ) ((total) - 1 - (m)) +#define indexMultiwordHi( total, n ) 0 +#define indexMultiwordLo( total, n ) ((total) - (n)) +#define indexMultiwordHiBut( total, n ) 0 +#define indexMultiwordLoBut( total, n ) (n) +#define INIT_UINTM4( v3, v2, v1, v0 ) { v3, v2, v1, v0 } +#endif + +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitives.h b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitives.h new file mode 100644 index 0000000..a0fcfd8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitives.h @@ -0,0 +1,282 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef primitives_h +#define primitives_h 1 + +#include +#include +#include "primitiveTypes.h" + +/*---------------------------------------------------------------------------- +| Shifts 'a' right by the number of bits given in 'dist', which must be in +| the range 1 to 63. If any nonzero bits are shifted off, they are "jammed" +| into the least-significant bit of the shifted value by setting the least- +| significant bit to 1. This shifted-and-jammed value is returned. +*----------------------------------------------------------------------------*/ +INLINE +uint64_t softfloat_shortShiftRightJam64( uint64_t a, uint_fast8_t dist ) + { return a>>dist | ((a & (((uint_fast64_t) 1<>dist | ((uint32_t) (a<<(-dist & 31)) != 0) : (a != 0); +} + +/*---------------------------------------------------------------------------- +| Shifts 'a' right by the number of bits given in 'dist', which must not +| be zero. If any nonzero bits are shifted off, they are "jammed" into the +| least-significant bit of the shifted value by setting the least-significant +| bit to 1. This shifted-and-jammed value is returned. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than 64, the result will be either 0 or 1, depending on whether 'a' +| is zero or nonzero. +*----------------------------------------------------------------------------*/ +INLINE uint64_t softfloat_shiftRightJam64( uint64_t a, uint_fast32_t dist ) +{ + return + (dist < 63) ? a>>dist | ((uint64_t) (a<<(-dist & 63)) != 0) : (a != 0); +} + +/*---------------------------------------------------------------------------- +| A constant table that translates an 8-bit unsigned integer (the array index) +| into the number of leading 0 bits before the most-significant 1 of that +| integer. For integer zero (index 0), the corresponding table element is 8. +*----------------------------------------------------------------------------*/ +extern const uint_least8_t softfloat_countLeadingZeros8[256]; + +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| 'a'. If 'a' is zero, 32 is returned. +*----------------------------------------------------------------------------*/ +INLINE uint_fast8_t softfloat_countLeadingZeros32( uint32_t a ) +{ + uint_fast8_t count = 0; + if ( a < 0x10000 ) { + count = 16; + a <<= 16; + } + if ( a < 0x1000000 ) { + count += 8; + a <<= 8; + } + count += softfloat_countLeadingZeros8[a>>24]; + return count; +} + +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| 'a'. If 'a' is zero, 64 is returned. +*----------------------------------------------------------------------------*/ +uint_fast8_t softfloat_countLeadingZeros64( uint64_t a ); + +extern const uint16_t softfloat_approxRecip_1k0s[16]; +extern const uint16_t softfloat_approxRecip_1k1s[16]; + +/*---------------------------------------------------------------------------- +| Returns an approximation to the reciprocal of the number represented by 'a', +| where 'a' is interpreted as an unsigned fixed-point number with one integer +| bit and 31 fraction bits. The 'a' input must be "normalized", meaning that +| its most-significant bit (bit 31) must be 1. Thus, if A is the value of +| the fixed-point interpretation of 'a', then 1 <= A < 2. The returned value +| is interpreted as a pure unsigned fraction, having no integer bits and 32 +| fraction bits. The approximation returned is never greater than the true +| reciprocal 1/A, and it differs from the true reciprocal by at most 2.006 ulp +| (units in the last place). +*----------------------------------------------------------------------------*/ +#ifdef SOFTFLOAT_FAST_DIV64TO32 +#define softfloat_approxRecip32_1( a ) ((uint32_t) (UINT64_C( 0x7FFFFFFFFFFFFFFF ) / (uint32_t) (a))) +#endif + +extern const uint16_t softfloat_approxRecipSqrt_1k0s[16]; +extern const uint16_t softfloat_approxRecipSqrt_1k1s[16]; + +/*---------------------------------------------------------------------------- +| Returns an approximation to the reciprocal of the square root of the number +| represented by 'a', where 'a' is interpreted as an unsigned fixed-point +| number either with one integer bit and 31 fraction bits or with two integer +| bits and 30 fraction bits. The format of 'a' is determined by 'oddExpA', +| which must be either 0 or 1. If 'oddExpA' is 1, 'a' is interpreted as +| having one integer bit, and if 'oddExpA' is 0, 'a' is interpreted as having +| two integer bits. The 'a' input must be "normalized", meaning that its +| most-significant bit (bit 31) must be 1. Thus, if A is the value of the +| fixed-point interpretation of 'a', it follows that 1 <= A < 2 when 'oddExpA' +| is 1, and 2 <= A < 4 when 'oddExpA' is 0. +| The returned value is interpreted as a pure unsigned fraction, having +| no integer bits and 32 fraction bits. The approximation returned is never +| greater than the true reciprocal 1/sqrt(A), and it differs from the true +| reciprocal by at most 2.06 ulp (units in the last place). The approximation +| returned is also always within the range 0.5 to 1; thus, the most- +| significant bit of the result is always set. +*----------------------------------------------------------------------------*/ +uint32_t softfloat_approxRecipSqrt32_1( unsigned int oddExpA, uint32_t a ); + + +/*---------------------------------------------------------------------------- +| The following functions are needed only when 'SOFTFLOAT_FAST_INT64' is +| defined. +*----------------------------------------------------------------------------*/ + +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a64' and 'a0' left by the +| number of bits given in 'dist', which must be in the range 1 to 63. +*----------------------------------------------------------------------------*/ +INLINE +struct uint128 + softfloat_shortShiftLeft128( uint64_t a64, uint64_t a0, uint_fast8_t dist ) +{ + struct uint128 z; + z.v64 = a64<>(-dist & 63); + z.v0 = a0<>dist; + z.v0 = + a64<<(negDist & 63) | a0>>dist + | ((uint64_t) (a0<<(negDist & 63)) != 0); + return z; +} + +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a' and 'extra' right by 64 +| _plus_ the number of bits given in 'dist', which must not be zero. This +| shifted value is at most 64 nonzero bits and is returned in the 'v' field +| of the 'struct uint64_extra' result. The 64-bit 'extra' field of the result +| contains a value formed as follows from the bits that were shifted off: The +| _last_ bit shifted off is the most-significant bit of the 'extra' field, and +| the other 63 bits of the 'extra' field are all zero if and only if _all_but_ +| _the_last_ bits shifted off were all zero. +| (This function makes more sense if 'a' and 'extra' are considered to form +| an unsigned fixed-point number with binary point between 'a' and 'extra'. +| This fixed-point value is shifted right by the number of bits given in +| 'dist', and the integer part of this shifted value is returned in the 'v' +| field of the result. The fractional part of the shifted value is modified +| as described above and returned in the 'extra' field of the result.) +*----------------------------------------------------------------------------*/ +INLINE +struct uint64_extra + softfloat_shiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast32_t dist ) +{ + struct uint64_extra z; + if ( dist < 64 ) { + z.v = a>>dist; + z.extra = a<<(-dist & 63); + } else { + z.v = 0; + z.extra = (dist == 64) ? a : (a != 0); + } + z.extra |= (extra != 0); + return z; +} + +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a64' and 'a0' right by the +| number of bits given in 'dist', which must not be zero. If any nonzero bits +| are shifted off, they are "jammed" into the least-significant bit of the +| shifted value by setting the least-significant bit to 1. This shifted-and- +| jammed value is returned. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than 128, the result will be either 0 or 1, depending on whether the +| original 128 bits are all zeros. +*----------------------------------------------------------------------------*/ +struct uint128 + softfloat_shiftRightJam128( uint64_t a64, uint64_t a0, uint_fast32_t dist ); + +/*---------------------------------------------------------------------------- +| Returns the sum of the 128-bit integer formed by concatenating 'a64' and +| 'a0' and the 128-bit integer formed by concatenating 'b64' and 'b0'. The +| addition is modulo 2^128, so any carry out is lost. +*----------------------------------------------------------------------------*/ +INLINE +struct uint128 + softfloat_add128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + z.v0 = a0 + b0; + z.v64 = a64 + b64 + (z.v0 < a0); + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the difference of the 128-bit integer formed by concatenating 'a64' +| and 'a0' and the 128-bit integer formed by concatenating 'b64' and 'b0'. +| The subtraction is modulo 2^128, so any borrow out (carry out) is lost. +*----------------------------------------------------------------------------*/ +INLINE +struct uint128 + softfloat_sub128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + z.v0 = a0 - b0; + z.v64 = a64 - b64; + z.v64 -= (a0 < b0); + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the 128-bit product of 'a' and 'b'. +*----------------------------------------------------------------------------*/ +struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b ); + + +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat.h b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat.h new file mode 100644 index 0000000..9e28a57 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat.h @@ -0,0 +1,167 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + + +/*============================================================================ +| Note: If SoftFloat is made available as a general library for programs to +| use, it is strongly recommended that a platform-specific version of this +| header, "softfloat.h", be created that folds in "softfloat_types.h" and that +| eliminates all dependencies on compile-time macros. +*============================================================================*/ + + +#ifndef softfloat_h +#define softfloat_h 1 + +#include +#include +#include "softfloat_types.h" + +#ifndef THREAD_LOCAL +#define THREAD_LOCAL +#endif + +/*---------------------------------------------------------------------------- +| Software floating-point underflow tininess-detection mode. +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_detectTininess; +enum { + softfloat_tininess_beforeRounding = 0, + softfloat_tininess_afterRounding = 1 +}; + +/*---------------------------------------------------------------------------- +| Software floating-point rounding mode. (Mode "odd" is supported only if +| SoftFloat is compiled with macro 'SOFTFLOAT_ROUND_ODD' defined.) +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_roundingMode; +enum { + softfloat_round_near_even = 0, + softfloat_round_minMag = 1, + softfloat_round_min = 2, + softfloat_round_max = 3, + softfloat_round_near_maxMag = 4, + softfloat_round_odd = 5 +}; + +/*---------------------------------------------------------------------------- +| Software floating-point exception flags. +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_exceptionFlags; +enum { + softfloat_flag_inexact = 1, + softfloat_flag_underflow = 2, + softfloat_flag_overflow = 4, + softfloat_flag_infinite = 8, + softfloat_flag_invalid = 16 +}; + +/*---------------------------------------------------------------------------- +| Routine to raise any or all of the software floating-point exception flags. +*----------------------------------------------------------------------------*/ +void softfloat_raiseFlags( uint_fast8_t ); + +/*---------------------------------------------------------------------------- +| Integer-to-floating-point conversion routines. +*----------------------------------------------------------------------------*/ +float32_t ui32_to_f32( uint32_t ); +float64_t ui32_to_f64( uint32_t ); +float32_t ui64_to_f32( uint64_t ); +float64_t ui64_to_f64( uint64_t ); +float32_t i32_to_f32( int32_t ); +float64_t i32_to_f64( int32_t ); +float32_t i64_to_f32( int64_t ); +float64_t i64_to_f64( int64_t ); + +/*---------------------------------------------------------------------------- +| 32-bit (single-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +uint_fast32_t f32_to_ui32( float32_t, uint_fast8_t, bool ); +uint_fast64_t f32_to_ui64( float32_t, uint_fast8_t, bool ); +int_fast32_t f32_to_i32( float32_t, uint_fast8_t, bool ); +int_fast64_t f32_to_i64( float32_t, uint_fast8_t, bool ); +uint_fast32_t f32_to_ui32_r_minMag( float32_t, bool ); +uint_fast64_t f32_to_ui64_r_minMag( float32_t, bool ); +int_fast32_t f32_to_i32_r_minMag( float32_t, bool ); +int_fast64_t f32_to_i64_r_minMag( float32_t, bool ); +float16_t f32_to_f16( float32_t ); +float64_t f32_to_f64( float32_t ); +float32_t f32_roundToInt( float32_t, uint_fast8_t, bool ); +float32_t f32_add( float32_t, float32_t ); +float32_t f32_sub( float32_t, float32_t ); +float32_t f32_mul( float32_t, float32_t ); +float32_t f32_mulAdd( float32_t, float32_t, float32_t ); +float32_t f32_div( float32_t, float32_t ); +float32_t f32_rem( float32_t, float32_t ); +float32_t f32_sqrt( float32_t ); +bool f32_eq( float32_t, float32_t ); +bool f32_le( float32_t, float32_t ); +bool f32_lt( float32_t, float32_t ); +bool f32_eq_signaling( float32_t, float32_t ); +bool f32_le_quiet( float32_t, float32_t ); +bool f32_lt_quiet( float32_t, float32_t ); +bool f32_isSignalingNaN( float32_t ); + +/*---------------------------------------------------------------------------- +| 64-bit (double-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +uint_fast32_t f64_to_ui32( float64_t, uint_fast8_t, bool ); +uint_fast64_t f64_to_ui64( float64_t, uint_fast8_t, bool ); +int_fast32_t f64_to_i32( float64_t, uint_fast8_t, bool ); +int_fast64_t f64_to_i64( float64_t, uint_fast8_t, bool ); +uint_fast32_t f64_to_ui32_r_minMag( float64_t, bool ); +uint_fast64_t f64_to_ui64_r_minMag( float64_t, bool ); +int_fast32_t f64_to_i32_r_minMag( float64_t, bool ); +int_fast64_t f64_to_i64_r_minMag( float64_t, bool ); +float32_t f64_to_f32( float64_t ); +float64_t f64_roundToInt( float64_t, uint_fast8_t, bool ); +float64_t f64_add( float64_t, float64_t ); +float64_t f64_sub( float64_t, float64_t ); +float64_t f64_mul( float64_t, float64_t ); +float64_t f64_mulAdd( float64_t, float64_t, float64_t ); +float64_t f64_div( float64_t, float64_t ); +float64_t f64_rem( float64_t, float64_t ); +float64_t f64_sqrt( float64_t ); +bool f64_eq( float64_t, float64_t ); +bool f64_le( float64_t, float64_t ); +bool f64_lt( float64_t, float64_t ); +bool f64_eq_signaling( float64_t, float64_t ); +bool f64_le_quiet( float64_t, float64_t ); +bool f64_lt_quiet( float64_t, float64_t ); +bool f64_isSignalingNaN( float64_t ); + +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat_types.h b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat_types.h new file mode 100644 index 0000000..af1888f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat_types.h @@ -0,0 +1,81 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef softfloat_types_h +#define softfloat_types_h 1 + +#include + +/*---------------------------------------------------------------------------- +| Types used to pass 16-bit, 32-bit, 64-bit, and 128-bit floating-point +| arguments and results to/from functions. These types must be exactly +| 16 bits, 32 bits, 64 bits, and 128 bits in size, respectively. Where a +| platform has "native" support for IEEE-Standard floating-point formats, +| the types below may, if desired, be defined as aliases for the native types +| (typically 'float' and 'double', and possibly 'long double'). +*----------------------------------------------------------------------------*/ +typedef struct { uint16_t v; } float16_t; +typedef struct { uint32_t v; } float32_t; +typedef struct { uint64_t v; } float64_t; +typedef struct { uint64_t v[2]; } float128_t; + +/*---------------------------------------------------------------------------- +| The format of an 80-bit extended floating-point number in memory. This +| structure must contain a 16-bit field named 'signExp' and a 64-bit field +| named 'signif'. +*----------------------------------------------------------------------------*/ +#ifdef LITTLEENDIAN +struct extFloat80M { uint64_t signif; uint16_t signExp; }; +#else +struct extFloat80M { uint16_t signExp; uint64_t signif; }; +#endif + +/*---------------------------------------------------------------------------- +| The type used to pass 80-bit extended floating-point arguments and +| results to/from functions. This type must have size identical to +| 'struct extFloat80M'. Type 'extFloat80_t' can be defined as an alias for +| 'struct extFloat80M'. Alternatively, if a platform has "native" support +| for IEEE-Standard 80-bit extended floating-point, it may be possible, +| if desired, to define 'extFloat80_t' as an alias for the native type +| (presumably either 'long double' or a nonstandard compiler-intrinsic type). +| In that case, the 'signif' and 'signExp' fields of 'struct extFloat80M' +| must align exactly with the locations in memory of the sign, exponent, and +| significand of the native type. +*----------------------------------------------------------------------------*/ +typedef struct extFloat80M extFloat80_t; + +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF32.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF32.c new file mode 100644 index 0000000..ba64781 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF32.c @@ -0,0 +1,126 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" + +float32_t softfloat_addMagsF32( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + int_fast16_t expA; + uint_fast32_t sigA; + int_fast16_t expB; + uint_fast32_t sigB; + int_fast16_t expDiff; + uint_fast32_t uiZ; + bool signZ; + int_fast16_t expZ; + uint_fast32_t sigZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! expA ) { + uiZ = uiA + sigB; + goto uiZ; + } + if ( expA == 0xFF ) { + if ( sigA | sigB ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + signZ = signF32UI( uiA ); + expZ = expA; + sigZ = 0x01000000 + sigA + sigB; + if ( ! (sigZ & 1) && (expZ < 0xFE) ) { + uiZ = packToF32UI( signZ, expZ, sigZ>>1 ); + goto uiZ; + } + sigZ <<= 6; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + signZ = signF32UI( uiA ); + sigA <<= 6; + sigB <<= 6; + if ( expDiff < 0 ) { + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF32UI( signZ, 0xFF, 0 ); + goto uiZ; + } + expZ = expB; + sigA += expA ? 0x20000000 : sigA; + sigA = softfloat_shiftRightJam32( sigA, -expDiff ); + } else { + if ( expA == 0xFF ) { + if ( sigA ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + sigB += expB ? 0x20000000 : sigB; + sigB = softfloat_shiftRightJam32( sigB, expDiff ); + } + sigZ = 0x20000000 + sigA + sigB; + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + } + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF64.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF64.c new file mode 100644 index 0000000..63e1afe --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF64.c @@ -0,0 +1,128 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" + +float64_t + softfloat_addMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ ) +{ + int_fast16_t expA; + uint_fast64_t sigA; + int_fast16_t expB; + uint_fast64_t sigB; + int_fast16_t expDiff; + uint_fast64_t uiZ; + int_fast16_t expZ; + uint_fast64_t sigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! expA ) { + uiZ = uiA + sigB; + goto uiZ; + } + if ( expA == 0x7FF ) { + if ( sigA | sigB ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + sigZ = UINT64_C( 0x0020000000000000 ) + sigA + sigB; + sigZ <<= 9; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sigA <<= 9; + sigB <<= 9; + if ( expDiff < 0 ) { + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + goto uiZ; + } + expZ = expB; + if ( expA ) { + sigA += UINT64_C( 0x2000000000000000 ); + } else { + sigA <<= 1; + } + sigA = softfloat_shiftRightJam64( sigA, -expDiff ); + } else { + if ( expA == 0x7FF ) { + if ( sigA ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + if ( expB ) { + sigB += UINT64_C( 0x2000000000000000 ); + } else { + sigB <<= 1; + } + sigB = softfloat_shiftRightJam64( sigB, expDiff ); + } + sigZ = UINT64_C( 0x2000000000000000 ) + sigA + sigB; + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt32_1.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt32_1.c new file mode 100644 index 0000000..2695f7f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt32_1.c @@ -0,0 +1,74 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +#ifndef softfloat_approxRecipSqrt32_1 + +extern const uint16_t softfloat_approxRecipSqrt_1k0s[]; +extern const uint16_t softfloat_approxRecipSqrt_1k1s[]; + +uint32_t softfloat_approxRecipSqrt32_1( unsigned int oddExpA, uint32_t a ) +{ + int index; + uint16_t eps, r0; + uint_fast32_t ESqrR0; + uint32_t sigma0; + uint_fast32_t r; + uint32_t sqrSigma0; + + index = (a>>27 & 0xE) + oddExpA; + eps = (uint16_t) (a>>12); + r0 = softfloat_approxRecipSqrt_1k0s[index] + - ((softfloat_approxRecipSqrt_1k1s[index] * (uint_fast32_t) eps) + >>20); + ESqrR0 = (uint_fast32_t) r0 * r0; + if ( ! oddExpA ) ESqrR0 <<= 1; + sigma0 = ~(uint_fast32_t) (((uint32_t) ESqrR0 * (uint_fast64_t) a)>>23); + r = ((uint_fast32_t) r0<<16) + ((r0 * (uint_fast64_t) sigma0)>>25); + sqrSigma0 = ((uint_fast64_t) sigma0 * sigma0)>>32; + r += ((uint32_t) ((r>>1) + (r>>3) - ((uint_fast32_t) r0<<14)) + * (uint_fast64_t) sqrSigma0) + >>48; + if ( ! (r & 0x80000000) ) r = 0x80000000; + return r; + +} + +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c new file mode 100644 index 0000000..a60cf82 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c @@ -0,0 +1,49 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +const uint16_t softfloat_approxRecipSqrt_1k0s[16] = { + 0xB4C9, 0xFFAB, 0xAA7D, 0xF11C, 0xA1C5, 0xE4C7, 0x9A43, 0xDA29, + 0x93B5, 0xD0E5, 0x8DED, 0xC8B7, 0x88C6, 0xC16D, 0x8424, 0xBAE1 +}; +const uint16_t softfloat_approxRecipSqrt_1k1s[16] = { + 0xA5A5, 0xEA42, 0x8C21, 0xC62D, 0x788F, 0xAA7F, 0x6928, 0x94B6, + 0x5CC7, 0x8335, 0x52A6, 0x74E2, 0x4A3E, 0x68FE, 0x432B, 0x5EFD +}; + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros64.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros64.c new file mode 100644 index 0000000..0045741 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros64.c @@ -0,0 +1,73 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_countLeadingZeros64 + +#define softfloat_countLeadingZeros64 softfloat_countLeadingZeros64 +#include "primitives.h" + +uint_fast8_t softfloat_countLeadingZeros64( uint64_t a ) +{ + uint_fast8_t count; + uint32_t a32; + + count = 0; + a32 = a>>32; + if ( ! a32 ) { + count = 32; + a32 = a; + } + /*------------------------------------------------------------------------ + | From here, result is current count + count leading zeros of `a32'. + *------------------------------------------------------------------------*/ + if ( a32 < 0x10000 ) { + count += 16; + a32 <<= 16; + } + if ( a32 < 0x1000000 ) { + count += 8; + a32 <<= 8; + } + count += softfloat_countLeadingZeros8[a32>>24]; + return count; + +} + +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros8.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros8.c new file mode 100644 index 0000000..1158d01 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros8.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +const uint_least8_t softfloat_countLeadingZeros8[256] = { + 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mul64To128.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mul64To128.c new file mode 100644 index 0000000..3b0fb96 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mul64To128.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" +#include "primitives.h" + +#ifndef softfloat_mul64To128 + +struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b ) +{ + uint32_t a32, a0, b32, b0; + struct uint128 z; + uint64_t mid1, mid; + + a32 = a>>32; + a0 = a; + b32 = b>>32; + b0 = b; + z.v0 = (uint_fast64_t) a0 * b0; + mid1 = (uint_fast64_t) a32 * b0; + mid = mid1 + (uint_fast64_t) a0 * b32; + z.v64 = (uint_fast64_t) a32 * b32; + z.v64 += (uint_fast64_t) (mid < mid1)<<32 | mid>>32; + mid <<= 32; + z.v0 += mid; + z.v64 += (z.v0 < mid); + return z; + +} + +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mulAddF32.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mulAddF32.c new file mode 100644 index 0000000..d163ea0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mulAddF32.c @@ -0,0 +1,224 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t + softfloat_mulAddF32( + uint_fast32_t uiA, uint_fast32_t uiB, uint_fast32_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signC; + int_fast16_t expC; + uint_fast32_t sigC; + bool signProd; + uint_fast32_t magBits, uiZ; + struct exp16_sig32 normExpSig; + int_fast16_t expProd; + uint_fast64_t sigProd; + bool signZ; + int_fast16_t expZ; + uint_fast32_t sigZ; + int_fast16_t expDiff; + uint_fast64_t sig64Z, sig64C; + int_fast8_t shiftDist; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signC = signF32UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF32UI( uiC ); + sigC = fracF32UI( uiC ); + signProd = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0xFF ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expProd = expA + expB - 0x7E; + sigA = (sigA | 0x00800000)<<7; + sigB = (sigB | 0x00800000)<<7; + sigProd = (uint_fast64_t) sigA * sigB; + if ( sigProd < UINT64_C( 0x2000000000000000 ) ) { + --expProd; + sigProd <<= 1; + } + signZ = signProd; + if ( ! expC ) { + if ( ! sigC ) { + expZ = expProd - 1; + sigZ = softfloat_shortShiftRightJam64( sigProd, 31 ); + goto roundPack; + } + normExpSig = softfloat_normSubnormalF32Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | 0x00800000)<<6; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expProd - expC; + if ( signProd == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + expZ = expC; + sigZ = sigC + softfloat_shiftRightJam64( sigProd, 32 - expDiff ); + } else { + expZ = expProd; + sig64Z = + sigProd + + softfloat_shiftRightJam64( + (uint_fast64_t) sigC<<32, expDiff ); + sigZ = softfloat_shortShiftRightJam64( sig64Z, 32 ); + } + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig64C = (uint_fast64_t) sigC<<32; + if ( expDiff < 0 ) { + signZ = signC; + expZ = expC; + sig64Z = sig64C - softfloat_shiftRightJam64( sigProd, -expDiff ); + } else if ( ! expDiff ) { + expZ = expProd; + sig64Z = sigProd - sig64C; + if ( ! sig64Z ) goto completeCancellation; + if ( sig64Z & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + sig64Z = -sig64Z; + } + } else { + expZ = expProd; + sig64Z = sigProd - softfloat_shiftRightJam64( sig64C, expDiff ); + } + shiftDist = softfloat_countLeadingZeros64( sig64Z ) - 1; + expZ -= shiftDist; + shiftDist -= 32; + if ( shiftDist < 0 ) { + sigZ = softfloat_shortShiftRightJam64( sig64Z, -shiftDist ); + } else { + sigZ = (uint_fast32_t) sig64Z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + + +float64_t + softfloat_mulAddF64( + uint_fast64_t uiA, uint_fast64_t uiB, uint_fast64_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signC; + int_fast16_t expC; + uint_fast64_t sigC; + bool signZ; + uint_fast64_t magBits, uiZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + struct uint128 sig128Z; + uint_fast64_t sigZ; + int_fast16_t expDiff; + struct uint128 sig128C; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signC = signF64UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF64UI( uiC ); + sigC = fracF64UI( uiC ); + signZ = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0x7FF ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FE; + sigA = (sigA | UINT64_C( 0x0010000000000000 ))<<10; + sigB = (sigB | UINT64_C( 0x0010000000000000 ))<<10; + sig128Z = softfloat_mul64To128( sigA, sigB ); + if ( sig128Z.v64 < UINT64_C( 0x2000000000000000 ) ) { + --expZ; + sig128Z = + softfloat_add128( + sig128Z.v64, sig128Z.v0, sig128Z.v64, sig128Z.v0 ); + } + if ( ! expC ) { + if ( ! sigC ) { + --expZ; + sigZ = sig128Z.v64<<1 | (sig128Z.v0 != 0); + goto roundPack; + } + normExpSig = softfloat_normSubnormalF64Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | UINT64_C( 0x0010000000000000 ))<<9; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expZ - expC; + if ( expDiff < 0 ) { + expZ = expC; + if ( (signZ == signC) || (expDiff < -1) ) { + sig128Z.v64 = softfloat_shiftRightJam64( sig128Z.v64, -expDiff ); + } else { + sig128Z = + softfloat_shortShiftRightJam128( sig128Z.v64, sig128Z.v0, 1 ); + } + } else if ( expDiff ) { + sig128C = softfloat_shiftRightJam128( sigC, 0, expDiff ); + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signZ == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + sigZ = (sigC + sig128Z.v64) | (sig128Z.v0 != 0); + } else { + sig128Z = + softfloat_add128( + sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); + sigZ = sig128Z.v64 | (sig128Z.v0 != 0); + } + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff < 0 ) { + signZ = signC; + sig128Z = softfloat_sub128( sigC, 0, sig128Z.v64, sig128Z.v0 ); + } else if ( ! expDiff ) { + sig128Z.v64 = sig128Z.v64 - sigC; + if ( ! (sig128Z.v64 | sig128Z.v0) ) goto completeCancellation; + if ( sig128Z.v64 & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + sig128Z = softfloat_sub128( 0, 0, sig128Z.v64, sig128Z.v0 ); + } + } else { + sig128Z = + softfloat_sub128( + sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! sig128Z.v64 ) { + expZ -= 64; + sig128Z.v64 = sig128Z.v0; + sig128Z.v0 = 0; + } + shiftDist = softfloat_countLeadingZeros64( sig128Z.v64 ) - 1; + expZ -= shiftDist; + if ( shiftDist < 0 ) { + sigZ = softfloat_shortShiftRightJam64( sig128Z.v64, -shiftDist ); + } else { + sig128Z = + softfloat_shortShiftLeft128( + sig128Z.v64, sig128Z.v0, shiftDist ); + sigZ = sig128Z.v64; + } + sigZ |= (sig128Z.v0 != 0); + } + roundPack: + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN_ABC: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto propagateNaN_ZC; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infProdArg: + if ( magBits ) { + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + if ( expC != 0x7FF ) goto uiZ; + if ( sigC ) goto propagateNaN_ZC; + if ( signZ == signC ) goto uiZ; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + propagateNaN_ZC: + uiZ = softfloat_propagateNaNF64UI( uiZ, uiC ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zeroProd: + uiZ = uiC; + if ( ! (expC | sigC) && (signZ != signC) ) { + completeCancellation: + uiZ = + packToF64UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normRoundPackToF32.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normRoundPackToF32.c new file mode 100644 index 0000000..14e0811 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normRoundPackToF32.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" + +float32_t + softfloat_normRoundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig ) +{ + int_fast8_t shiftDist; + union ui32_f32 uZ; + + shiftDist = softfloat_countLeadingZeros32( sig ) - 1; + exp -= shiftDist; + if ( (7 <= shiftDist) && ((unsigned int) exp < 0xFD) ) { + uZ.ui = packToF32UI( sign, sig ? exp : 0, sig<<(shiftDist - 7) ); + return uZ.f; + } else { + return softfloat_roundPackToF32( sign, exp, sig< +#include +#include "platform.h" +#include "internals.h" + +float64_t + softfloat_normRoundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig ) +{ + int_fast8_t shiftDist; + union ui64_f64 uZ; + + shiftDist = softfloat_countLeadingZeros64( sig ) - 1; + exp -= shiftDist; + if ( (10 <= shiftDist) && ((unsigned int) exp < 0x7FD) ) { + uZ.ui = packToF64UI( sign, sig ? exp : 0, sig<<(shiftDist - 10) ); + return uZ.f; + } else { + return softfloat_roundPackToF64( sign, exp, sig< +#include "platform.h" +#include "internals.h" + +struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t sig ) +{ + int_fast8_t shiftDist; + struct exp16_sig32 z; + + shiftDist = softfloat_countLeadingZeros32( sig ) - 8; + z.exp = 1 - shiftDist; + z.sig = sig< +#include "platform.h" +#include "internals.h" + +struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t sig ) +{ + int_fast8_t shiftDist; + struct exp16_sig64 z; + + shiftDist = softfloat_countLeadingZeros64( sig ) - 11; + z.exp = 1 - shiftDist; + z.sig = sig< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t + softfloat_roundPackToF16( bool sign, int_fast16_t exp, uint_fast16_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + bool isTiny; + uint_fast16_t uiZ; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x8; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xF + : 0; + } + roundBits = sig & 0xF; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x1D <= (unsigned int) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) || (sig + roundIncrement < 0x8000); + sig = softfloat_shiftRightJam32( sig, -exp ); + exp = 0; + roundBits = sig & 0xF; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( (0x1D < exp) || (0x8000 <= sig + roundIncrement) ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF16UI( sign, 0x1F, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>4; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast16_t) (! (roundBits ^ 8) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF16UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF32.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF32.c new file mode 100644 index 0000000..cc34508 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF32.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t + softfloat_roundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + bool isTiny; + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x40; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x7F + : 0; + } + roundBits = sig & 0x7F; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0xFD <= (unsigned int) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) || (sig + roundIncrement < 0x80000000); + sig = softfloat_shiftRightJam32( sig, -exp ); + exp = 0; + roundBits = sig & 0x7F; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( (0xFD < exp) || (0x80000000 <= sig + roundIncrement) ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF32UI( sign, 0xFF, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>7; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast32_t) (! (roundBits ^ 0x40) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF32UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF64.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF64.c new file mode 100644 index 0000000..aaff008 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF64.c @@ -0,0 +1,117 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t + softfloat_roundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + bool isTiny; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x200; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x3FF + : 0; + } + roundBits = sig & 0x3FF; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x7FD <= (uint16_t) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) + || (sig + roundIncrement < UINT64_C( 0x8000000000000000 )); + sig = softfloat_shiftRightJam64( sig, -exp ); + exp = 0; + roundBits = sig & 0x3FF; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( + (0x7FD < exp) + || (UINT64_C( 0x8000000000000000 ) <= sig + roundIncrement) + ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF64UI( sign, 0x7FF, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>10; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast64_t) (! (roundBits ^ 0x200) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF64UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI32.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI32.c new file mode 100644 index 0000000..20a3ff4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t + softfloat_roundToI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + uint_fast32_t sig32; + union { uint32_t ui; int32_t i; } uZ; + int_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x800; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xFFF + : 0; + } + roundBits = sig & 0xFFF; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFF00000000000 ) ) goto invalid; + sig32 = sig>>12; + sig32 &= ~(uint_fast32_t) (! (roundBits ^ 0x800) & roundNearEven); + uZ.ui = sign ? -sig32 : sig32; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i32_fromNegOverflow : i32_fromPosOverflow; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI64.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI64.c new file mode 100644 index 0000000..fcddbc2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI64.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t + softfloat_roundToI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + union { uint64_t ui; int64_t i; } uZ; + int_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + uZ.ui = sign ? -sig : sig; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI32.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI32.c new file mode 100644 index 0000000..180899b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI32.c @@ -0,0 +1,80 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t + softfloat_roundToUI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x800; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xFFF + : 0; + } + roundBits = sig & 0xFFF; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFF00000000000 ) ) goto invalid; + z = sig>>12; + z &= ~(uint_fast32_t) (! (roundBits ^ 0x800) & roundNearEven); + if ( sign && z ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI64.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI64.c new file mode 100644 index 0000000..de35b5e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI64.c @@ -0,0 +1,85 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t + softfloat_roundToUI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + if ( sign && sig ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sig; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_shiftRightJam128.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_shiftRightJam128.c new file mode 100644 index 0000000..7f3d4c8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_shiftRightJam128.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" +#include "primitives.h" + +#ifndef softfloat_shiftRightJam128 + +struct uint128 + softfloat_shiftRightJam128( uint64_t a64, uint64_t a0, uint_fast32_t dist ) +{ + uint_fast8_t u8NegDist; + struct uint128 z; + + if ( dist < 64 ) { + u8NegDist = -dist; + z.v64 = a64>>dist; + z.v0 = + a64<<(u8NegDist & 63) | a0>>dist + | ((uint64_t) (a0<<(u8NegDist & 63)) != 0); + } else { + z.v64 = 0; + z.v0 = + (dist < 127) + ? a64>>(dist & 63) + | (((a64 & (((uint_fast64_t) 1<<(dist & 63)) - 1)) | a0) + != 0) + : ((a64 | a0) != 0); + } + return z; + +} + +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_subMagsF32.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_subMagsF32.c new file mode 100644 index 0000000..86e89f2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_subMagsF32.c @@ -0,0 +1,143 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t softfloat_subMagsF32( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + int_fast16_t expA; + uint_fast32_t sigA; + int_fast16_t expB; + uint_fast32_t sigB; + int_fast16_t expDiff; + uint_fast32_t uiZ; + int_fast32_t sigDiff; + bool signZ; + int_fast8_t shiftDist; + int_fast16_t expZ; + uint_fast32_t sigX, sigY; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + goto uiZ; + } + sigDiff = sigA - sigB; + if ( ! sigDiff ) { + uiZ = + packToF32UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + goto uiZ; + } + if ( expA ) --expA; + signZ = signF32UI( uiA ); + if ( sigDiff < 0 ) { + signZ = ! signZ; + sigDiff = -sigDiff; + } + shiftDist = softfloat_countLeadingZeros32( sigDiff ) - 8; + expZ = expA - shiftDist; + if ( expZ < 0 ) { + shiftDist = expA; + expZ = 0; + } + uiZ = packToF32UI( signZ, expZ, sigDiff< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t + softfloat_subMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ ) +{ + int_fast16_t expA; + uint_fast64_t sigA; + int_fast16_t expB; + uint_fast64_t sigB; + int_fast16_t expDiff; + uint_fast64_t uiZ; + int_fast64_t sigDiff; + int_fast8_t shiftDist; + int_fast16_t expZ; + uint_fast64_t sigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + goto uiZ; + } + sigDiff = sigA - sigB; + if ( ! sigDiff ) { + uiZ = + packToF64UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + goto uiZ; + } + if ( expA ) --expA; + if ( sigDiff < 0 ) { + signZ = ! signZ; + sigDiff = -sigDiff; + } + shiftDist = softfloat_countLeadingZeros64( sigDiff ) - 11; + expZ = expA - shiftDist; + if ( expZ < 0 ) { + shiftDist = expA; + expZ = 0; + } + uiZ = packToF64UI( signZ, expZ, sigDiff< +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +#ifndef THREAD_LOCAL +#define THREAD_LOCAL +#endif + +THREAD_LOCAL uint_fast8_t softfloat_roundingMode = softfloat_round_near_even; +THREAD_LOCAL uint_fast8_t softfloat_detectTininess = init_detectTininess; +THREAD_LOCAL uint_fast8_t softfloat_exceptionFlags = 0; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f32.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f32.c new file mode 100644 index 0000000..7e5ece6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f32.c @@ -0,0 +1,57 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t ui32_to_f32( uint32_t a ) +{ + union ui32_f32 uZ; + + if ( ! a ) { + uZ.ui = 0; + return uZ.f; + } + if ( a & 0x80000000 ) { + return softfloat_roundPackToF32( 0, 0x9D, a>>1 | (a & 1) ); + } else { + return softfloat_normRoundPackToF32( 0, 0x9C, a ); + } + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f64.c b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f64.c new file mode 100644 index 0000000..5e5f843 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f64.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t ui32_to_f64( uint32_t a ) +{ + uint_fast64_t uiZ; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + if ( ! a ) { + uiZ = 0; + } else { + shiftDist = softfloat_countLeadingZeros32( a ) + 21; + uiZ = + packToF64UI( 0, 0x432 - shiftDist, (uint_fast64_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t ui64_to_f32( uint64_t a ) +{ + int_fast8_t shiftDist; + union ui32_f32 u; + uint_fast32_t sig; + + shiftDist = softfloat_countLeadingZeros64( a ) - 40; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF32UI( + 0, 0x95 - shiftDist, (uint_fast32_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t ui64_to_f64( uint64_t a ) +{ + union ui64_f64 uZ; + + if ( ! a ) { + uZ.ui = 0; + return uZ.f; + } + if ( a & UINT64_C( 0x8000000000000000 ) ) { + return + softfloat_roundPackToF64( + 0, 0x43D, softfloat_shortShiftRightJam64( a, 1 ) ); + } else { + return softfloat_normRoundPackToF64( 0, 0x43C, a ); + } + +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv-float.h b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv-float.h new file mode 100644 index 0000000..95fc719 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv-float.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NV_FLOAT_H) +#define NV_FLOAT_H + +/* Floating-point constants, expressed as integer constants */ + +#define NV_FLOAT_NEG_ONE 0xBF800000 /* -1.00f */ +#define NV_FLOAT_NEG_QUARTER 0xBE800000 /* -0.25f */ +#define NV_FLOAT_ZERO 0x00000000 /* 0.00f */ +#define NV_FLOAT_QUARTER 0x3E800000 /* 0.25f */ +#define NV_FLOAT_HALF 0x3F000000 /* 0.50f */ +#define NV_FLOAT_ONE 0x3F800000 /* 1.00f */ +#define NV_FLOAT_TWO 0x40000000 /* 2.00f */ +#define NV_FLOAT_255 0x437F0000 /* 255.00f */ +#define NV_FLOAT_1024 0x44800000 /* 1024.00f */ +#define NV_FLOAT_65536 0x47800000 /* 65536.00f */ + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_assert.h b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_assert.h new file mode 100644 index 0000000..8c62ef5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_assert.h @@ -0,0 +1,82 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_ASSERT_H__ +#define __NV_ASSERT_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * nvAssert() has three possible configurations: __COVERITY__, DEBUG, + * and non-DEBUG. In DEBUG builds, the includer should provide an + * implementation of nvDebugAssert(). + */ + +#if defined(__COVERITY__) + /* + * Coverity assert handling -- basically inform coverity that the + * condition is verified independently and coverity can assume that + * it is true. + */ + void __coverity_panic__(void); + + #define nvAssert(exp) \ + do { \ + if (exp) { \ + } else { \ + __coverity_panic__(); \ + } \ + } while (0) + +#elif defined(DEBUG) + + void nvDebugAssert(const char *expString, const char *filenameString, + const char *funcString, const unsigned int lineNumber); + + /* + * Assert that (exp) is TRUE. We use 'if (exp) { } else { fail }' + * instead of 'if (!(exp)) { fail }' to cause warnings when people + * accidentally write nvAssert(foo = bar) instead of nvAssert(foo == + * bar). + */ + #define nvAssert(exp) \ + do { \ + if (exp) { \ + } else { \ + nvDebugAssert(#exp, __FILE__, __FUNCTION__, __LINE__); \ + } \ + } while (0) + +#else + + #define nvAssert(exp) {} + +#endif + +#ifdef __cplusplus +}; +#endif + +#endif /* __NV_ASSERT_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_common_utils.h b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_common_utils.h new file mode 100644 index 0000000..a7701d1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_common_utils.h @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_COMMON_UTILS_H__ +#define __NV_COMMON_UTILS_H__ + +#include "nvtypes.h" + +#if !defined(TRUE) +#define TRUE NV_TRUE +#endif + +#if !defined(FALSE) +#define FALSE NV_FALSE +#endif + +#define NV_IS_UNSIGNED(x) ((__typeof__(x))-1 > 0) + +/* Get the length of a statically-sized array. */ +#define ARRAY_LEN(_arr) (sizeof(_arr) / sizeof(_arr[0])) + +#define NV_INVALID_HEAD 0xFFFFFFFF + +#define NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION (~0) + +#if !defined(NV_MIN) +# define NV_MIN(a,b) (((a)<(b))?(a):(b)) +#endif + +#define NV_MIN3(a,b,c) NV_MIN(NV_MIN(a, b), c) +#define NV_MIN4(a,b,c,d) NV_MIN3(NV_MIN(a,b),c,d) + +#if !defined(NV_MAX) +# define NV_MAX(a,b) (((a)>(b))?(a):(b)) +#endif + +#define NV_MAX3(a,b,c) NV_MAX(NV_MAX(a, b), c) +#define NV_MAX4(a,b,c,d) NV_MAX3(NV_MAX(a,b),c,d) + +static inline int NV_LIMIT_VAL_TO_MIN_MAX(int val, int min, int max) +{ + if (val < min) { + return min; + } + if (val > max) { + return max; + } + return val; +} + +#define NV_ROUNDUP_DIV(x,y) ((x) / (y) + (((x) % (y)) ? 1 : 0)) + +/* + * Macros used for computing palette entries: + * + * NV_UNDER_REPLICATE(val, source_size, result_size) expands a value + * of source_size bits into a value of target_size bits by shifting + * the source value into the high bits and replicating the high bits + * of the value into the low bits of the result. + * + * PALETTE_DEPTH_SHIFT(val, w) maps a colormap entry for a component + * that has w bits to an appropriate entry in a LUT of 256 entries. + */ +static inline unsigned int NV_UNDER_REPLICATE(unsigned short val, + int source_size, + int result_size) +{ + return (val << (result_size - source_size)) | + (val >> ((source_size << 1) - result_size)); +} + + +static inline unsigned short PALETTE_DEPTH_SHIFT(unsigned short val, int depth) +{ + return NV_UNDER_REPLICATE(val, depth, 8); +} + +#endif /* __NV_COMMON_UTILS_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_dpy_id.h b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_dpy_id.h new file mode 100644 index 0000000..7c50e97 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_dpy_id.h @@ -0,0 +1,369 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * This header file defines the types NVDpyId and NVDpyIdList, as well + * as inline functions to manipulate these types. NVDpyId and + * NVDpyIdList should be treated as opaque by includers of this header + * file. + */ + +#ifndef __NV_DPY_ID_H__ +#define __NV_DPY_ID_H__ + +#include "nvtypes.h" +#include "nvmisc.h" +#include /* NV_MAX_SUBDEVICES */ + +typedef struct { + NvU32 opaqueDpyId; +} NVDpyId; + +typedef struct { + NvU32 opaqueDpyIdList; +} NVDpyIdList; + +#define NV_DPY_ID_MAX_SUBDEVICES NV_MAX_SUBDEVICES +#define NV_DPY_ID_MAX_DPYS_IN_LIST 32 + +/* + * For use in combination with nvDpyIdToPrintFormat(); e.g., + * + * printf("dpy id: " NV_DPY_ID_PRINT_FORMAT "\n", + * nvDpyIdToPrintFormat(dpyId)); + * + * The includer should not make assumptions about the return type of + * nvDpyIdToPrintFormat(). + */ +#define NV_DPY_ID_PRINT_FORMAT "0x%08x" + +/* functions to return an invalid DpyId and empty DpyIdList */ + +static inline NVDpyId nvInvalidDpyId(void) +{ + NVDpyId dpyId = { 0 }; + return dpyId; +} + +static inline NVDpyIdList nvEmptyDpyIdList(void) +{ + NVDpyIdList dpyIdList = { 0 }; + return dpyIdList; +} + +static inline NVDpyIdList nvAllDpyIdList(void) +{ + NVDpyIdList dpyIdList = { ~0U }; + return dpyIdList; +} + +static inline void +nvEmptyDpyIdListSubDeviceArray(NVDpyIdList dpyIdList[NV_DPY_ID_MAX_SUBDEVICES]) +{ + int dispIndex; + for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) { + dpyIdList[dispIndex] = nvEmptyDpyIdList(); + } +} + +/* set operations on DpyIds and DpyIdLists: Add, Subtract, Intersect, Xor */ + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvAddDpyIdToDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList | + dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +/* Passing an invalid display ID makes this function return an empty list. */ +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvAddDpyIdToEmptyDpyIdList(NVDpyId dpyId) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvAddDpyIdListToDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListB.opaqueDpyIdList | + dpyIdListA.opaqueDpyIdList; + return tmpDpyIdList; +} + +/* Returns: dpyIdList - dpyId */ +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvDpyIdListMinusDpyId(NVDpyIdList dpyIdList, NVDpyId dpyId) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList & + (~dpyId.opaqueDpyId); + return tmpDpyIdList; +} + +/* Returns: dpyIdListA - dpyIdListB */ +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvDpyIdListMinusDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList & + (~dpyIdListB.opaqueDpyIdList); + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvIntersectDpyIdAndDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList & + dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvIntersectDpyIdListAndDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList & + dpyIdListB.opaqueDpyIdList; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvXorDpyIdAndDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList ^ + dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvXorDpyIdListAndDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList ^ + dpyIdListB.opaqueDpyIdList; + return tmpDpyIdList; +} + + +/* boolean checks */ + +static inline NvBool nvDpyIdIsInDpyIdList(NVDpyId dpyId, + NVDpyIdList dpyIdList) +{ + return !!(dpyIdList.opaqueDpyIdList & dpyId.opaqueDpyId); +} + +static inline NvBool nvDpyIdIsInvalid(NVDpyId dpyId) +{ + return (dpyId.opaqueDpyId == 0); +} + +static inline NvBool nvDpyIdListIsEmpty(NVDpyIdList dpyIdList) +{ + return (dpyIdList.opaqueDpyIdList == 0); +} + +static inline NvBool +nvDpyIdListSubDeviceArrayIsEmpty(NVDpyIdList + dpyIdList[NV_DPY_ID_MAX_SUBDEVICES]) +{ + int dispIndex; + for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) { + if (!nvDpyIdListIsEmpty(dpyIdList[dispIndex])) { + return NV_FALSE; + } + } + return NV_TRUE; +} + + +static inline NvBool nvDpyIdsAreEqual(NVDpyId dpyIdA, NVDpyId dpyIdB) +{ + return (dpyIdA.opaqueDpyId == dpyIdB.opaqueDpyId); +} + +static inline NvBool nvDpyIdListsAreEqual(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + return (dpyIdListA.opaqueDpyIdList == dpyIdListB.opaqueDpyIdList); +} + +static inline NvBool nvDpyIdListIsASubSetofDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList intersectedDpyIdList = + nvIntersectDpyIdListAndDpyIdList(dpyIdListA, dpyIdListB); + + return nvDpyIdListsAreEqual(intersectedDpyIdList, dpyIdListA); +} + + +/* + * retrieve the individual dpyIds from dpyIdList; if dpyId is invalid, + * start at the beginning of the list; otherwise, start at the dpyId + * after the specified dpyId + */ + +static inline __attribute__ ((warn_unused_result)) +NVDpyId nvNextDpyIdInDpyIdListUnsorted(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + if (nvDpyIdIsInvalid(dpyId)) { + dpyId.opaqueDpyId = 1; + } else { + dpyId.opaqueDpyId <<= 1; + } + + while (dpyId.opaqueDpyId) { + + if (nvDpyIdIsInDpyIdList(dpyId, dpyIdList)) { + return dpyId; + } + + dpyId.opaqueDpyId <<= 1; + } + + /* no dpyIds left in dpyIdlist; return the invalid dpyId */ + + return nvInvalidDpyId(); +} + +#define FOR_ALL_DPY_IDS(_dpyId, _dpyIdList) \ + for ((_dpyId) = nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(), \ + (_dpyIdList)); \ + !nvDpyIdIsInvalid(_dpyId); \ + (_dpyId) = nvNextDpyIdInDpyIdListUnsorted((_dpyId), \ + (_dpyIdList))) + +/* report how many dpyIds are in the dpyIdList */ + +static inline int nvCountDpyIdsInDpyIdList(NVDpyIdList dpyIdList) +{ + return nvPopCount32(dpyIdList.opaqueDpyIdList); +} + +static inline int +nvCountDpyIdsInDpyIdListSubDeviceArray(NVDpyIdList + dpyIdList[NV_DPY_ID_MAX_SUBDEVICES]) +{ + int dispIndex, n = 0; + + for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) { + n += nvCountDpyIdsInDpyIdList(dpyIdList[dispIndex]); + } + + return n; +} + +/* convert between dpyId/dpyIdList and NV-CONTROL values */ + +static inline int nvDpyIdToNvControlVal(NVDpyId dpyId) +{ + return (int) dpyId.opaqueDpyId; +} + +static inline int nvDpyIdListToNvControlVal(NVDpyIdList dpyIdList) +{ + return (int) dpyIdList.opaqueDpyIdList; +} + +static inline NVDpyId nvNvControlValToDpyId(int val) +{ + NVDpyId dpyId; + dpyId.opaqueDpyId = (val == 0) ? 0 : 1 << (__builtin_ffs(val)-1); + return dpyId; +} + +static inline NVDpyIdList nvNvControlValToDpyIdList(int val) +{ + NVDpyIdList dpyIdList; + dpyIdList.opaqueDpyIdList = val; + return dpyIdList; +} + + +/* convert between dpyId and NvU32 */ + +static inline NVDpyId nvNvU32ToDpyId(NvU32 val) +{ + NVDpyId dpyId; + dpyId.opaqueDpyId = (val == 0) ? 0 : 1 << (__builtin_ffs(val)-1); + return dpyId; +} + +static inline NVDpyIdList nvNvU32ToDpyIdList(NvU32 val) +{ + NVDpyIdList dpyIdList; + dpyIdList.opaqueDpyIdList = val; + return dpyIdList; +} + +static inline NvU32 nvDpyIdToNvU32(NVDpyId dpyId) +{ + return dpyId.opaqueDpyId; +} + +static inline NvU32 nvDpyIdListToNvU32(NVDpyIdList dpyIdList) +{ + return dpyIdList.opaqueDpyIdList; +} + +/* Return the bit position of dpyId: a number in the range [0..31]. */ +static inline NvU32 nvDpyIdToIndex(NVDpyId dpyId) +{ + return __builtin_ffs(dpyId.opaqueDpyId) - 1; +} + +/* Return a display ID that is not in the list passed in. */ + +static inline NVDpyId nvNewDpyId(NVDpyIdList excludeList) +{ + NVDpyId dpyId; + if (~excludeList.opaqueDpyIdList == 0) { + return nvInvalidDpyId(); + } + dpyId.opaqueDpyId = + 1U << (__builtin_ffs(~excludeList.opaqueDpyIdList) - 1); + return dpyId; +} + +/* See comment for NV_DPY_ID_PRINT_FORMAT. */ +static inline NvU32 nvDpyIdToPrintFormat(NVDpyId dpyId) +{ + return nvDpyIdToNvU32(dpyId); +} + +/* Prevent usage of opaque values. */ +#define opaqueDpyId __ERROR_ACCESS_ME_VIA_NV_DPY_ID_H +#define opaqueDpyIdList __ERROR_ACCESS_ME_VIA_NV_DPY_ID_H + +#endif /* __NV_DPY_ID_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_mode_timings.h b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_mode_timings.h new file mode 100644 index 0000000..7854d8a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_mode_timings.h @@ -0,0 +1,163 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_MODE_TIMINGS_H__ +#define __NV_MODE_TIMINGS_H__ + +#include "nvtypes.h" + +/* + * NvModeTimings: hardware-independent modetiming structure. + * + * For interlaced modes, the vertical values are stored in frame size, + * rather than field size (e.g., for 1080i modes, vVisible is 1080, + * not 540); similarly, for doublescan modes, the vertical values are + * stored in normal framesize (not doubled vertically). + * + * RRx1k should be field rate for interlaced modes, and should be + * frame rate for doubleScan modes; e.g., 1920x1080@60i and + * 640x480@60d, not 1920x1080@30i or 640x480@120d. + * + * RRx1k is also the "real" refresh rate (time spent displaying one eye) + * for HDMI 3D frame packed modes, e.g. 47940 (2x24hz) for 1920x1080@24 + * HDMI 3D mode. This needs to be halved again for all user-visible reported + * refresh rates (which needs to report time spent between each vblank, or + * each pair of eyes). + * + * pixelClock is doubled for doubleScan and HDMI 3D frame packed modes. + * + * The intent is that this structure match the X configuration file + * ModeLine. + * + * hdmi3D reflects whether this mode is a HDMI 3D frame packed mode. True only + * if the user selected HDMI 3D stereo mode and the GPU supports it. If true, + * then pixelClock is doubled. + * + * yuv420Mode reflects whether this mode requires YUV 4:2:0 decimation into a + * half-width output through headsurface (SW YUV420) or >=nvdisplay 4.0 HW CSC + * (HW YUV420). + * + * If a mode requires SW YUV 4:2:0 emulation, the pixelClock and width values + * in NvModeTimings will still be the full width values specified by the mode + * parsed from the EDID (e.g. 3840x2160@60), but the pixelClock and width values + * in NVHwModeTimingsEvo will be the "real" half width values programmed in HW + * and rendered to through a headSurface transform (e.g. 1920x2160@60). If a + * mode requires HW YUV 4:2:0 CSC, the pixelClock and width values in both + * NvModeTimings and NVHwModeTimingsEvo will be full width, and the decimation + * to the half width scanout surface is performed in HW. In both cases, only + * the full width values should ever be reported to the client. + */ + +enum NvYuv420Mode { + NV_YUV420_MODE_NONE = 0, + NV_YUV420_MODE_SW, + NV_YUV420_MODE_HW, +}; + +typedef struct _NvModeTimings { + NvU32 RRx1k; + NvU32 pixelClockHz; /* in Hz units */ + NvU16 hVisible; + NvU16 hSyncStart; + NvU16 hSyncEnd; + NvU16 hTotal; + NvU16 hSkew; /* Just placeholder for XRRModeInfo.hSkew */ + NvU16 vVisible; + NvU16 vSyncStart; + NvU16 vSyncEnd; + NvU16 vTotal; + struct { + NvU16 w; + NvU16 h; + } sizeMM; + NvBool interlaced; + NvBool doubleScan; + /* + * Note: hSyncPos and vSyncPos are ignored, and the polarity is positive if + * [hv]SyncNeg is false. However, X.Org has separate flags for each, and + * treats modes with positive, negative, both, and neither as separate + * modes. + */ + NvBool hSyncPos; + NvBool hSyncNeg; + NvBool vSyncPos; + NvBool vSyncNeg; + NvBool hdmi3D; + enum NvYuv420Mode yuv420Mode; +} NvModeTimings, *NvModeTimingsPtr; + +static inline NvBool NvModeTimingsMatch(const NvModeTimings *pA, + const NvModeTimings *pB, + NvBool ignoreSizeMM, + NvBool ignoreRRx1k) +{ + /* + * Ignore sizeMM and/or RRx1k, if requested. The sizeMM and RRx1k fields + * don't impact hardware modetiming values, so it is reasonable that some + * callers may choose to ignore them when comparing NvModeTimings. + */ + NvBool sizeMMmatches = ignoreSizeMM || ((pA->sizeMM.w == pB->sizeMM.w) && + (pA->sizeMM.h == pB->sizeMM.h)); + + NvBool rrx1kMatches = ignoreRRx1k || (pA->RRx1k == pB->RRx1k); + + return (sizeMMmatches && rrx1kMatches && + (pA->pixelClockHz == pB->pixelClockHz) && + (pA->hVisible == pB->hVisible) && + (pA->hSyncStart == pB->hSyncStart) && + (pA->hSyncEnd == pB->hSyncEnd) && + (pA->hTotal == pB->hTotal) && + (pA->hSkew == pB->hSkew) && + (pA->vVisible == pB->vVisible) && + (pA->vSyncStart == pB->vSyncStart) && + (pA->vSyncEnd == pB->vSyncEnd) && + (pA->vTotal == pB->vTotal) && + (pA->interlaced == pB->interlaced) && + (pA->doubleScan == pB->doubleScan) && + (pA->hSyncPos == pB->hSyncPos) && + (pA->hSyncNeg == pB->hSyncNeg) && + (pA->vSyncPos == pB->vSyncPos) && + (pA->vSyncNeg == pB->vSyncNeg) && + (pA->hdmi3D == pB->hdmi3D) && + (pA->yuv420Mode == pB->yuv420Mode)); +} + +/* + * Convert between Hz and kHz. + * + * Note that Hz ==> kHz ==> Hz is lossy. + * + * We do +500 before /1000 in order to round, rather than truncate. + */ +static inline NvU32 HzToKHz(NvU32 hz) +{ + return (hz + 500) / 1000; +} + +static inline NvU32 KHzToHz(NvU32 kHz) +{ + return kHz * 1000; +} + + +#endif /* __NV_MODE_TIMINGS_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_memory_tracker.h b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_memory_tracker.h new file mode 100644 index 0000000..75f0c03 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_memory_tracker.h @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_MEMORY_TRACKER_H__ +#define __NV_MEMORY_TRACKER_H__ + +#include "nv_list.h" + +#include /* size_t */ + +/* + * The following functions allocate and free memory, and track the + * allocations in a linked list, such that the includer can call + * nvMemoryTrackerPrintUnfreedAllocations() to print any leaked + * allocations. + */ + +void *nvMemoryTrackerTrackedAlloc(NVListPtr list, size_t size, + int line, const char *file); + +void *nvMemoryTrackerTrackedCalloc(NVListPtr list, size_t nmemb, size_t size, + int line, const char *file); + +void *nvMemoryTrackerTrackedRealloc(NVListPtr list, void *ptr, size_t size, + int line, const char *file); + +void nvMemoryTrackerTrackedFree(void *ptr); + +void nvMemoryTrackerPrintUnfreedAllocations(NVListPtr list); + +/* + * Users of nv_memory_tracker must provide implementations of the + * following helper functions. + */ +void *nvMemoryTrackerAlloc(size_t size); +void nvMemoryTrackerFree(void *ptr, size_t size); +void nvMemoryTrackerPrintf(const char *format, ...) + __attribute__((format (printf, 1, 2))); +void nvMemoryTrackerMemset(void *s, int c, size_t n); +void nvMemoryTrackerMemcpy(void *dest, const void *src, size_t n); + +#endif /* __NV_MEMORY_TRACKER_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_mode_timings_utils.h b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_mode_timings_utils.h new file mode 100644 index 0000000..72deb14 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_mode_timings_utils.h @@ -0,0 +1,135 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_MODE_TIMINGS_UTILS_H__ +#define __NV_MODE_TIMINGS_UTILS_H__ + +/* + * Utility code to operate on NvModeTimings and NVT_TIMINGs. + */ + +#include "nvtypes.h" + +#include "nv_mode_timings.h" +#include "timing/nvtiming.h" + +#include /* size_t */ + +/* + * Macros used for printing values divided by 1000 without floating + * point division and printing. + * + * Example: + * printf("pclk is %.2f MHz\n", pclk_khz / 1000.0f); + * becomes: + * printf("pclk is " NV_FMT_DIV_1000_POINT_2 " MHz\n", + * NV_VA_DIV_1000_POINT_2(pclk_khz)); + * + * Different precision controls the number of digits printed after the + * decimal point. Bias is added for correct rounding. + */ +#define NV_FMT_DIV_1000_POINT_1 "%d.%d" +#define NV_FMT_DIV_1000_POINT_2 "%d.%02d" +#define NV_VA_DIV_1000_POINT_1(x) \ + ((x) + 49) / 1000, (((x) + 49) % 1000) / 100 +#define NV_VA_DIV_1000_POINT_2(x) \ + ((x) + 4) / 1000, (((x) + 4) % 1000) / 10 + +/* + * macro to use integer math to convert an NvU32 kHz value to Hz; we + * add 500 Hz before dividing by 1000 to round rather than truncate. + */ + +#define NV_U32_KHZ_TO_HZ(_x) (((_x) + 500) / 1000) + +/* + * NVT_TIMING stores HVisible multiplied by the horizontal replication + * factor (e.g., a 720 mode with hrep=2 has HVisible of 1440). For + * reporting purposes, divide HVisible by hrep. + */ +static inline NvU16 NV_NVT_TIMING_HVISIBLE(const NVT_TIMING *pTiming) +{ + if (pTiming->etc.rep > 1) { + return pTiming->HVisible / pTiming->etc.rep; + } else { + return pTiming->HVisible; + } +} + +/* + * NVT_TIMING stores VVisible as half height when interlaced (e.g., + * 1920x1080i has VVisible 540). + */ +static inline NvU16 NV_NVT_TIMING_VVISIBLE(const NVT_TIMING *pTiming) +{ + return pTiming->VVisible * (pTiming->interlaced ? 2 : 1); +} + +/* + * When non-zero, NVT_TIMING::etc::aspect contains bytes 12, 13, and + * 14 from the Detailed Timing Definition of the EDID. This contains + * a packed width and height. The width and height is either an + * aspect ratio (16:9 or 4:3), or a physical image size in + * millimeters. See Table 3.21, and the subsequent notes, in the + * E-EDID 1.4 specification. + */ +static inline NvU16 NV_NVT_TIMING_IMAGE_SIZE_WIDTH(const NVT_TIMING *pTiming) +{ + return (pTiming->etc.aspect >> 16) & 0xFFFF; +} + +static inline NvU16 NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(const NVT_TIMING *pTiming) +{ + return pTiming->etc.aspect & 0xFFFF; +} + +static inline NvBool NV_NVT_TIMING_HAS_ASPECT_RATIO(const NVT_TIMING *pTiming) +{ + NvU16 w = NV_NVT_TIMING_IMAGE_SIZE_WIDTH(pTiming); + NvU16 h = NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(pTiming); + + return (((w == 16) && (h == 9)) || + ((w == 4) && (h == 3))); +} + +static inline NvBool NV_NVT_TIMING_HAS_IMAGE_SIZE(const NVT_TIMING *pTiming) +{ + return ((pTiming->etc.aspect != 0) && + !NV_NVT_TIMING_HAS_ASPECT_RATIO(pTiming)); +} + +NvBool IsEdid640x480_60_NVT_TIMING(const NVT_TIMING *pTiming); + +void NVT_TIMINGtoNvModeTimings(const NVT_TIMING *pTiming, + NvModeTimingsPtr pModeTimings); + +void nvBuildModeName(NvU16 width, NvU16 height, char *name, size_t nameLen); + +/* + * Users of nvBuildModeName() should provide an implementation of + * nvBuildModeNameSnprintf(). + */ +int nvBuildModeNameSnprintf(char *str, size_t size, const char *format, ...) + __attribute__((format (printf, 3, 4))); + +#endif /* __NV_MODE_TIMINGS_UTILS_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_vasprintf.h b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_vasprintf.h new file mode 100644 index 0000000..ec94beb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_vasprintf.h @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_VASPRINTF_H__ +#define __NV_VASPRINTF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/* + * nv_vasprintf() depends on nv_vasprintf_{alloc,free,vsnprintf}(). + * Those functions should be implemented by the user of + * nv_vasprintf(). + */ +void *nv_vasprintf_alloc(size_t size); +void nv_vasprintf_free(void *ptr); +int nv_vasprintf_vsnprintf(char *str, size_t size, + const char *format, va_list ap); + +char* nv_vasprintf(const char *f, va_list ap); + +/* + * NV_VSNPRINTF(): macro that assigns b using nv_vasprintf(); intended to + * be used by vararg printing functions. + * + * This macro allocates memory for b; the caller should free the + * memory when done. + */ + +#define NV_VSNPRINTF(b, f) do { \ + va_list ap; \ + va_start(ap, f); \ + (b) = nv_vasprintf(f, ap); \ + va_end(ap); \ +} while(0) + +#ifdef __cplusplus +}; +#endif + +#endif /* __NV_VASPRINTF_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/unix_rm_handle.h b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/unix_rm_handle.h new file mode 100644 index 0000000..d795fda --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/unix_rm_handle.h @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __UNIX_RM_HANDLE_H__ +#define __UNIX_RM_HANDLE_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV_UNIX_RM_HANDLE_INITIAL_HANDLES 512 +#define NV_UNIX_RM_HANDLE_BITMAP_SIZE(_numHandles) ((_numHandles) >> 5) + +#if defined(DEBUG) +typedef struct _nv_unix_rm_handle_allocation *NVUnixRmHandleAllocationPtr; + +typedef struct _nv_unix_rm_handle_allocation { + const char *file; + int line; +} NVUnixRmHandleAllocationRec; +#endif + +typedef struct _nv_unix_rm_handle_allocator *NVUnixRmHandleAllocatorPtr; + +typedef struct _nv_unix_rm_handle_allocator { + NvU32 rmClient; + NvU32 clientData; + + NvU32 *bitmap; + NvU32 maxHandles; + +#if defined(DEBUG) + NVUnixRmHandleAllocationRec *allocationTable; +#endif +} NVUnixRmHandleAllocatorRec; + +NvBool nvInitUnixRmHandleAllocator(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 rmClient, NvU32 clientData); + +NvU32 nvGenerateUnixRmHandleInternal(NVUnixRmHandleAllocatorPtr pAllocator); +void nvFreeUnixRmHandleInternal(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 UnixRmHandle); + +void nvTearDownUnixRmHandleAllocator(NVUnixRmHandleAllocatorPtr pAllocator); + +#if defined(DEBUG) + +#define NV_UNIX_RM_HANDLE_DEBUG_ERROR 0 +#define NV_UNIX_RM_HANDLE_DEBUG_MSG 1 +#define NV_UNIX_RM_HANDLE_DEBUG_VERBOSE 2 + +/* + * Users of the handle generator need to provide implementations + * of nvUnixRmHandleDebugAssert() and nvUnixRmHandleLogMsg() + * in builds where DEBUG is defined. + */ +void nvUnixRmHandleDebugAssert(const char *expString, + const char *filenameString, + const char *funcString, + const unsigned lineNumber); +#define nvUnixRmHandleAssert(_exp) \ + do { \ + if (_exp) { \ + } else { \ + nvUnixRmHandleDebugAssert(#_exp, __FILE__, __FUNCTION__, __LINE__); \ + } \ + } while (0) + +void nvUnixRmHandleLogMsg(NvU32 level, const char *fmt, ...) __attribute__((format (printf, 2, 3))); + +NvU32 nvDebugGenerateUnixRmHandle(NVUnixRmHandleAllocatorPtr pAllocator, + const char *file, int line); +#define nvGenerateUnixRmHandle(s) \ + nvDebugGenerateUnixRmHandle((s), __FILE__, __LINE__) + +void nvDebugFreeUnixRmHandle(NVUnixRmHandleAllocatorPtr pAllocator, NvU32 handle); +#define nvFreeUnixRmHandle(n,s) nvDebugFreeUnixRmHandle((n), (s)) + +#else + +#define nvUnixRmHandleAssert(_exp) do {} while(0) +#define nvUnixRmHandleLogMsg(__fmt, ...) do {} while(0) + +#define nvGenerateUnixRmHandle(s) nvGenerateUnixRmHandleInternal((s)) +#define nvFreeUnixRmHandle(n, s) nvFreeUnixRmHandleInternal((n), (s)) + +#endif /* DEBUG */ + +/* + * Users of the handle generator always need to provide implementations + * of nvUnixRmHandleReallocMem(), and nvUnixRmHandleFreeMem(). + */ +void *nvUnixRmHandleReallocMem(void *oldPtr, NvLength newSize); +void nvUnixRmHandleFreeMem(void *ptr); + +#ifdef __cplusplus +}; +#endif + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_memory_tracker.c b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_memory_tracker.c new file mode 100644 index 0000000..7b467d9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_memory_tracker.c @@ -0,0 +1,230 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if defined(DEBUG) + +#include "nv_memory_tracker.h" + +/* + * Define NV_MEMORY_TRACKER_BACKTRACES in the including makefile to enable + * backtrace capture/reporting for memory leaks. E.g., + * NV_DEFINES += NV_MEMORY_TRACKER_BACKTRACES + * Note that this probably only works with glibc (backtrace() and friends are + * GNU extensions). + */ + +#if defined(NV_MEMORY_TRACKER_BACKTRACES) + #include /* backtrace() and backtrace_symbols() */ + #include /* free(3) */ + #define MAX_BACKTRACE_DEPTH 30 +#endif + + +typedef union { + struct { + NVListRec entry; + const char *file; + int line; + size_t size; +#if defined(NV_MEMORY_TRACKER_BACKTRACES) + void *backtrace[MAX_BACKTRACE_DEPTH]; + int backtraceSize; +#endif + } header; + /* + * Unused. For alignment purposes only. Guarantee alignment to + * twice pointer size. That is the alignment guaranteed by glibc: + * http://www.gnu.org/software/libc/manual/html_node/Aligned-Memory-Blocks.html + * which seems reasonable to match here. + */ + NvU8 align __attribute__((aligned(sizeof(void*) * 2))); +} NvMemoryAllocation; + + +static void PrintAllocationBacktrace(const NvMemoryAllocation *alloc) +{ +#if defined(NV_MEMORY_TRACKER_BACKTRACES) + char **symbols; + const int numSymbols = alloc->header.backtraceSize; + int j; + + symbols = backtrace_symbols(alloc->header.backtrace, numSymbols); + + if (symbols == NULL) { + return; + } + + nvMemoryTrackerPrintf("Allocation context:"); + + for (j = 0; j < numSymbols; j++) { + if (symbols[j] == NULL) { + continue; + } + + nvMemoryTrackerPrintf("#%-2d %s", j, symbols[j]); + } + free(symbols); +#endif +} + + +static void RegisterAllocation(NVListPtr list, NvMemoryAllocation *alloc, + const char *file, int line, size_t size) +{ + nvListAdd(&alloc->header.entry, list); + + alloc->header.file = file; + alloc->header.line = line; + alloc->header.size = size; + +#if defined(NV_MEMORY_TRACKER_BACKTRACES) + /* Record the backtrace at this point (only addresses, not symbols) */ + alloc->header.backtraceSize = + backtrace(alloc->header.backtrace, MAX_BACKTRACE_DEPTH); +#endif +} + + +static NvBool IsAllocationSane(NvMemoryAllocation *alloc) +{ + NVListPtr entry = &alloc->header.entry; + if (entry->prev->next != entry || entry->next->prev != entry) { + /* + * This will likely have already crashed, but we might as well + * report it if we can. + */ + nvMemoryTrackerPrintf("Attempted to free untracked memory %p!", + alloc + 1); + return NV_FALSE; + } + return NV_TRUE; +} + + +static void UnregisterAllocation(NvMemoryAllocation *alloc) +{ + if (!IsAllocationSane(alloc)) { + return; + } + + nvListDel(&alloc->header.entry); +} + + +void *nvMemoryTrackerTrackedAlloc(NVListPtr list, size_t size, + int line, const char *file) +{ + NvMemoryAllocation *alloc = nvMemoryTrackerAlloc(sizeof(*alloc) + size); + + if (alloc == NULL) { + return NULL; + } + + RegisterAllocation(list, alloc, file, line, size); + + return alloc + 1; +} + + +void *nvMemoryTrackerTrackedCalloc(NVListPtr list, size_t nmemb, size_t size, + int line, const char *file) +{ + size_t totalSize = size * nmemb; + void *ptr = nvMemoryTrackerTrackedAlloc(list, totalSize, line, file); + + if (ptr == NULL) { + return NULL; + } + + nvMemoryTrackerMemset(ptr, 0, totalSize); + + return ptr; +} + + +void *nvMemoryTrackerTrackedRealloc(NVListPtr list, void *ptr, size_t size, + int line, const char *file) +{ + NvMemoryAllocation *oldAlloc = NULL; + void *newptr; + + if (ptr == NULL) { + /* realloc with a ptr of NULL is equivalent to malloc. */ + return nvMemoryTrackerTrackedAlloc(list, size, line, file); + } + + if (size == 0) { + /* realloc with a size of 0 is equivalent to free. */ + nvMemoryTrackerTrackedFree(ptr); + return NULL; + } + + oldAlloc = ((NvMemoryAllocation *) ptr) - 1; + newptr = nvMemoryTrackerTrackedAlloc(list, size, line, file); + + if (newptr != NULL) { + nvMemoryTrackerMemcpy(newptr, ptr, NV_MIN(size, oldAlloc->header.size)); + nvMemoryTrackerTrackedFree(ptr); + } + + return newptr; +} + + +void nvMemoryTrackerTrackedFree(void *ptr) +{ + NvMemoryAllocation *alloc; + size_t size; + + if (ptr == NULL) { + return; + } + + alloc = ((NvMemoryAllocation *) ptr) - 1; + + UnregisterAllocation(alloc); + + size = alloc->header.size + sizeof(NvMemoryAllocation); + + /* Poison the memory. */ + nvMemoryTrackerMemset(alloc, 0x55, size); + + nvMemoryTrackerFree(alloc, size); +} + + +void nvMemoryTrackerPrintUnfreedAllocations(NVListPtr list) +{ + NvMemoryAllocation *iter; + + nvListForEachEntry(iter, list, header.entry) { + nvMemoryTrackerPrintf("Unfreed allocation: %18p (size: %5u) (%s:%d)", + iter + 1, + (unsigned int) iter->header.size, + iter->header.file, + iter->header.line); + PrintAllocationBacktrace(iter); + } +} + +#endif /* defined(DEBUG) */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_mode_timings_utils.c b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_mode_timings_utils.c new file mode 100644 index 0000000..26c0197 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_mode_timings_utils.c @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv_mode_timings_utils.h" + +/* + * Check if this NVT_TIMING is the 640x480@60Hz Industry standard + * mode; but due to the lack of precision of the pclk field in the + * NVT_TIMING data structure, is not quite correct: pclk should be + * 2517.5, and rrx1k should be 59940. See bug 263631. + * + * Note that we check if rrx1k is either 60000 or 59940 because we may + * use this function immediately after receiving the NVT_TIMINGs from + * the EDID and patch rrx1k, or we may use this function later from + * NVT_TIMINGtoNvModeTimings(), at which point we'll have already + * patched rrx1k. + */ + +NvBool IsEdid640x480_60_NVT_TIMING(const NVT_TIMING *pTiming) +{ + return ((pTiming->pclk == 2518) && + (pTiming->HVisible == 640) && + (pTiming->VVisible == 480) && + (pTiming->HTotal == 800) && + (pTiming->HFrontPorch == 16) && + (pTiming->HSyncWidth == 96) && + (pTiming->VTotal == 525) && + (pTiming->VFrontPorch == 10) && + (pTiming->VSyncWidth == 2) && + (pTiming->HBorder == 0) && + (pTiming->VBorder == 0) && + (pTiming->HSyncPol == NVT_H_SYNC_NEGATIVE) && + (pTiming->VSyncPol == NVT_V_SYNC_NEGATIVE) && + (pTiming->interlaced == 0) && + ((pTiming->etc.flag & + NVT_FLAG_NV_DOUBLE_SCAN_TIMING) == 0) && + ((pTiming->etc.rrx1k == 60000) || + (pTiming->etc.rrx1k == 59940))); +} + +/* + * Convert from NVT_TIMING to NvModeTimings; this is a safe operation + * to perform because NvModeTimings has higher precision (pixelclockHz + * in Hz, and vertical values doubled for interlaced) than NVT_TIMING + */ + +void NVT_TIMINGtoNvModeTimings(const NVT_TIMING *pTiming, + NvModeTimingsPtr pModeTimings) +{ + char *bytePtr = (char *)pModeTimings; + size_t i; + + for (i = 0; i < sizeof(*pModeTimings); i++) { + bytePtr[i] = 0; + } + + pModeTimings->RRx1k = pTiming->etc.rrx1k; + + /* pTiming->pclk is in 10*kHz; pModeTimings->pixelClockHz is in Hz */ + + pModeTimings->pixelClockHz = KHzToHz(pTiming->pclk) * 10; + + pModeTimings->hVisible = pTiming->HVisible; + pModeTimings->hSyncStart = pTiming->HFrontPorch + pTiming->HVisible; + pModeTimings->hSyncEnd = + pTiming->HFrontPorch + pTiming->HVisible + pTiming->HSyncWidth; + pModeTimings->hTotal = pTiming->HTotal; + + pModeTimings->vVisible = pTiming->VVisible; + pModeTimings->vSyncStart = pTiming->VFrontPorch + pTiming->VVisible; + pModeTimings->vSyncEnd = + pTiming->VFrontPorch + pTiming->VVisible + pTiming->VSyncWidth; + pModeTimings->vTotal = pTiming->VTotal; + + pModeTimings->interlaced = pTiming->interlaced; + pModeTimings->doubleScan = + !!(pTiming->etc.flag & NVT_FLAG_NV_DOUBLE_SCAN_TIMING); + + /* + * pTiming stores vertical values divided by two when interlaced; so + * double the vertical values in pModeTimings + */ + + if (pModeTimings->interlaced) { + pModeTimings->vVisible *= 2; + pModeTimings->vSyncStart *= 2; + pModeTimings->vSyncEnd *= 2; + pModeTimings->vTotal *= 2; + } + + /* + * pTiming: 0 is positive, 1 is negative + * pModeTimings: FALSE is positive, TRUE is negative + */ + + if (pTiming->HSyncPol == NVT_H_SYNC_POSITIVE) { + pModeTimings->hSyncNeg = NV_FALSE; + } else { + pModeTimings->hSyncNeg = NV_TRUE; + } + + if (pTiming->VSyncPol == NVT_V_SYNC_POSITIVE) { + pModeTimings->vSyncNeg = NV_FALSE; + } else { + pModeTimings->vSyncNeg = NV_TRUE; + } + + pModeTimings->hSyncPos = !pModeTimings->hSyncNeg; + pModeTimings->vSyncPos = !pModeTimings->vSyncNeg; + + /* + * Save any physical size information for this mode from the + * Detailed Timing Definition of the EDID. + */ + if (NV_NVT_TIMING_HAS_IMAGE_SIZE(pTiming)) { + pModeTimings->sizeMM.w = NV_NVT_TIMING_IMAGE_SIZE_WIDTH(pTiming); + pModeTimings->sizeMM.h = NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(pTiming); + } + + /* + * XXX work around lack of precision in NVT_TIMING: catch the + * 640x480@60Hz EDID mode and patch pixelClockHz and RRx1k. + */ + + if (IsEdid640x480_60_NVT_TIMING(pTiming)) { + pModeTimings->RRx1k = 59940; + pModeTimings->pixelClockHz = 25175000; + } +} + + +/*! + * Build a mode name, of the format 'WIDTHxHEIGHT'. + */ +void nvBuildModeName(NvU16 width, NvU16 height, char *name, size_t nameLen) +{ + nvBuildModeNameSnprintf(name, nameLen, "%dx%d", width, height); + name[nameLen - 1] = '\0'; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_vasprintf.c b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_vasprintf.c new file mode 100644 index 0000000..390ad9f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_vasprintf.c @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv_vasprintf.h" + +/* + * nv_vasprintf(): function that returns a string using vsnprintf(); intended to + * be used by vararg printing functions. This is supposedly correct + * for differing semantics of vsnprintf() in different versions of + * glibc: + * + * different semantics of the return value from (v)snprintf: + * + * -1 when the buffer is not long enough (glibc < 2.1) + * + * or + * + * the length the string would have been if the buffer had been large + * enough (glibc >= 2.1) + * + * This function allocates memory for the returned string; the caller should use + * free() the memory when done. + * + * The includer should implement nv_vasprintf_{alloc,free,vsnprintf}. + */ + +#define __NV_VASPRINTF_LEN 64 + +char* nv_vasprintf(const char *f, va_list ap) +{ + int len, current_len = __NV_VASPRINTF_LEN; + char *b = (char *)nv_vasprintf_alloc(current_len); + + while (b) { + va_list tmp_ap; + + va_copy(tmp_ap, ap); + len = nv_vasprintf_vsnprintf(b, current_len, f, tmp_ap); + va_end(tmp_ap); + + if ((len > -1) && (len < current_len)) { + break; + } else if (len > -1) { + current_len = len + 1; + } else { + current_len += __NV_VASPRINTF_LEN; + } + + nv_vasprintf_free(b); + b = (char *)nv_vasprintf_alloc(current_len); + } + + return b; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/unix_rm_handle.c b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/unix_rm_handle.c new file mode 100644 index 0000000..ed6ebbf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/unix_rm_handle.c @@ -0,0 +1,385 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains functions for dealing with dynamic allocation and + * management of resource handles. + * + * Note that dynamic handles are not suitable for all use cases. If a + * handle is placed in the pushbuffer, and the pushbuffer will be + * replayed during channel recovery, the handle value must be kept + * constant. For such handles, use an invariant handle value. + * + * We keep a bitmap of which handles we've used. + * + * Composition of an object handle: + * [31:16] Client data + * [15:00] Handle constant + */ + +#include + +#include "unix_rm_handle.h" + +#define INVALID_HANDLE 0 +#define UNIX_RM_HANDLE_CLIENT_DATA_SHIFT 16 + +/* Mask to AND only client data */ +#define CLIENT_DATA_MASK ((~(NvU32)0) << UNIX_RM_HANDLE_CLIENT_DATA_SHIFT) +/* Mask to AND off client data */ +#define HANDLE_MASK (~(CLIENT_DATA_MASK)) +/* Handle 0 is reserved, so subtract one from a handle to get its index */ +#define HANDLE_INDEX(_handle) (((_handle) - 1) & HANDLE_MASK) + +/* Bits to OR in for client data */ +#define GET_CLIENT_DATA_BITS(_data) \ + (((_data) << UNIX_RM_HANDLE_CLIENT_DATA_SHIFT)) + +#define DWORD_FROM_HANDLE(_handle) (HANDLE_INDEX(_handle) >> 5) +#define BIT_FROM_HANDLE(_handle) (HANDLE_INDEX(_handle) & 0x1f) + +/* Check if a handle is used */ +#define USED(_bitmap, _handle) \ + ((_bitmap)[DWORD_FROM_HANDLE(_handle)] & (1U << BIT_FROM_HANDLE(_handle))) +/* Reserve a handle in the bitmap */ +#define RESERVE(_bitmap, _handle) \ + ((_bitmap)[DWORD_FROM_HANDLE(_handle)] |= (1U << BIT_FROM_HANDLE(_handle))) +/* Unreserve a handle in the bitmap */ +#define UNRESERVE(_bitmap, _handle) \ + ((_bitmap)[DWORD_FROM_HANDLE(_handle)] &= (~(1U << BIT_FROM_HANDLE(_handle)))) + +#if defined(DEBUG) +static void +nvReportUnfreedUnixRmHandleAllocations(NVUnixRmHandleAllocatorPtr pAllocator); +#endif + + +static void UnixRmHandleMemset(void *ptr, char data, NvLength size) +{ + char *byte = (char *)ptr; + NvLength i; + + for (i = 0; i < size; i++) { + byte[i] = data; + } +} + +static NvBool UnixRmHandleReallocBitmap(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 newMaxHandles) +{ + NvU32 *newBitmap; +#if defined(DEBUG) + NVUnixRmHandleAllocationPtr newAllocationTable; +#endif /* defined(DEBUG) */ + const NvLength newMemSize = NV_UNIX_RM_HANDLE_BITMAP_SIZE(newMaxHandles) * + sizeof(*newBitmap); + const NvU32 oldBitmapSize = + NV_UNIX_RM_HANDLE_BITMAP_SIZE(pAllocator->maxHandles); + + /* New handle limit must fit in the bitmask */ + if (newMaxHandles > GET_CLIENT_DATA_BITS(1)) { + return NV_FALSE; + } + + /* New handle limit must be a power of 2 */ + nvUnixRmHandleAssert(!(newMaxHandles & (newMaxHandles - 1))); + + newBitmap = (NvU32 *)nvUnixRmHandleReallocMem(pAllocator->bitmap, newMemSize); + + if (!newBitmap) { + return NV_FALSE; + } + + UnixRmHandleMemset(&newBitmap[oldBitmapSize], 0, + newMemSize - (oldBitmapSize * sizeof(*newBitmap))); + pAllocator->bitmap = newBitmap; + +#if defined(DEBUG) + newAllocationTable = + (NVUnixRmHandleAllocationPtr) + nvUnixRmHandleReallocMem(pAllocator->allocationTable, + newMaxHandles * + sizeof(*pAllocator->allocationTable)); + + if (!newAllocationTable) { + /* + * Leave the new bitmap allocation in place. If that realloc + * succeeded, the old bitmap allocation is gone, and it is at + * least big enough to hold the old pAllocator->maxHandles, + * since a shrinking of the allocation table shouldn't have + * failed, and maxHandles currently never decreases anyway. + */ + nvUnixRmHandleAssert(newMaxHandles >= pAllocator->maxHandles); + + return NV_FALSE; + } + + pAllocator->allocationTable = newAllocationTable; +#endif /* defined(DEBUG) */ + + pAllocator->maxHandles = newMaxHandles; + + return NV_TRUE; +} + +NvBool nvInitUnixRmHandleAllocator(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 rmClient, NvU32 clientData) +{ + nvUnixRmHandleAssert(pAllocator != NULL && + rmClient != 0 && clientData != 0); + nvUnixRmHandleAssert((clientData & 0x0000FFFF) == clientData); + + UnixRmHandleMemset(pAllocator, 0, sizeof(*pAllocator)); + + pAllocator->rmClient = rmClient; + pAllocator->clientData = clientData; + + if (!UnixRmHandleReallocBitmap(pAllocator, + NV_UNIX_RM_HANDLE_INITIAL_HANDLES)) { + nvUnixRmHandleAssert(!"Failed to init RM handle allocator bitmap"); + nvTearDownUnixRmHandleAllocator(pAllocator); + + return NV_FALSE; + } + + /* + * If the RM-provided client handle falls within the allocator range + * then reserve it up-front. + */ + if ((pAllocator->rmClient & CLIENT_DATA_MASK) == + GET_CLIENT_DATA_BITS(pAllocator->clientData)) { + NvU32 handleId = pAllocator->rmClient & HANDLE_MASK; + + if ((handleId <= pAllocator->maxHandles) && + (handleId != INVALID_HANDLE)) { + RESERVE(pAllocator->bitmap, handleId); + } + } + + return NV_TRUE; +} + +/* + * nvGenerateUnixRmHandleInternal() + * Return a unique, random handle. Be sure to free the handle + * when you're done with it! Returns 0 if we run out of handles. + */ +NvU32 nvGenerateUnixRmHandleInternal(NVUnixRmHandleAllocatorPtr pAllocator) +{ + NvU32 handleId; + NvU32 handle; + + nvUnixRmHandleAssert(pAllocator != NULL && + pAllocator->rmClient != 0 && + pAllocator->clientData != 0); + + /* Find free handle */ + handleId = 1; + while (USED(pAllocator->bitmap, handleId) && + (handleId <= pAllocator->maxHandles)) { + handleId++; + } + + if (handleId > pAllocator->maxHandles) { + if (!UnixRmHandleReallocBitmap(pAllocator, pAllocator->maxHandles * 2)) { + nvUnixRmHandleAssert(!"Failed to grow RM handle allocator bitmap"); + return INVALID_HANDLE; + } + } + + nvUnixRmHandleAssert(!USED(pAllocator->bitmap, handleId)); + + RESERVE(pAllocator->bitmap, handleId); + + handle = GET_CLIENT_DATA_BITS(pAllocator->clientData) | handleId; + + nvUnixRmHandleAssert(handle != pAllocator->rmClient); + + return handle; +} + +/* + * nvFreeUnixRmHandleInternal() + * Mark the handle passed in as free in the bitmap. + */ +void nvFreeUnixRmHandleInternal(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 unixHandle) +{ + NvU32 handle = unixHandle & HANDLE_MASK; + + if (!unixHandle) { + return; + } + + nvUnixRmHandleAssert(pAllocator != NULL && + pAllocator->rmClient != 0 && pAllocator->clientData != 0); + + nvUnixRmHandleAssert(USED(pAllocator->bitmap, handle)); + + UNRESERVE(pAllocator->bitmap, handle); +} + +/* + * This function just makes sure we freed all of the handles we allocated, for + * debugging purposes. + */ +void nvTearDownUnixRmHandleAllocator(NVUnixRmHandleAllocatorPtr pAllocator) +{ + if (pAllocator == NULL) { + return; + } + + /* + * If the RM-provided client handle falls within the allocator range, + * then it is reserved up-front. so make sure that it is get unreserved + * before teardown. + */ + if ((pAllocator->rmClient & CLIENT_DATA_MASK) == + GET_CLIENT_DATA_BITS(pAllocator->clientData)) { + NvU32 handleId = pAllocator->rmClient & HANDLE_MASK; + + if ((handleId <= pAllocator->maxHandles) && + (handleId != INVALID_HANDLE)) { + UNRESERVE(pAllocator->bitmap, handleId); + } + } + +#if defined(DEBUG) + nvReportUnfreedUnixRmHandleAllocations(pAllocator); + nvUnixRmHandleFreeMem(pAllocator->allocationTable); +#endif + + nvUnixRmHandleFreeMem(pAllocator->bitmap); + + UnixRmHandleMemset(pAllocator, 0, sizeof(*pAllocator)); +} + +/* + * Handle allocation tracking code; in a debug build, the below + * functions wrap the actual allocation functions above. + */ + +#if defined(DEBUG) + +#define UNIX_RM_HANDLE_ALLOC_LABEL "NVIDIA UNIX RM HANDLE TRACKER: " + +static NVUnixRmHandleAllocationPtr +FindUnixRmHandleAllocation(NVUnixRmHandleAllocatorPtr pAllocator, NvU32 handle) +{ + if (((handle & HANDLE_MASK) == INVALID_HANDLE) || + ((handle & HANDLE_MASK) > pAllocator->maxHandles)) { + return NULL; + } + + return &pAllocator->allocationTable[HANDLE_INDEX(handle)]; +} + +static void RecordUnixRmHandleAllocation(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 handle, const char *file, int line) +{ + /* Find a free allocation table slot. */ + NVUnixRmHandleAllocationPtr alloc = FindUnixRmHandleAllocation(pAllocator, handle); + + if (!alloc) { + nvUnixRmHandleLogMsg(NV_UNIX_RM_HANDLE_DEBUG_ERROR, + UNIX_RM_HANDLE_ALLOC_LABEL + "NVUnixRmHandleAllocator is corrupted." + "(table entry not found for handle)"); + return; + } + + nvUnixRmHandleLogMsg(NV_UNIX_RM_HANDLE_DEBUG_VERBOSE, + UNIX_RM_HANDLE_ALLOC_LABEL + "Recording handle allocation: 0x%08x (%s:%d)", + handle, file, line); + + alloc->file = file; + alloc->line = line; +} + +static void FreeUnixRmHandleAllocation(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 handle) +{ + NVUnixRmHandleAllocationPtr alloc = + FindUnixRmHandleAllocation(pAllocator, handle); + + if (!alloc) { + return; + } + + nvUnixRmHandleLogMsg(NV_UNIX_RM_HANDLE_DEBUG_VERBOSE, + UNIX_RM_HANDLE_ALLOC_LABEL + "Freeing handle allocation: 0x%08x (%s:%d)", + handle, alloc->file, alloc->line); + + UnixRmHandleMemset(alloc, 0, sizeof(*alloc)); +} + + +NvU32 +nvDebugGenerateUnixRmHandle(NVUnixRmHandleAllocatorPtr pAllocator, + const char *file, int line) +{ + NvU32 handle = nvGenerateUnixRmHandleInternal(pAllocator); + + RecordUnixRmHandleAllocation(pAllocator, handle, file, line); + return handle; +} + +void nvDebugFreeUnixRmHandle(NVUnixRmHandleAllocatorPtr pAllocator, NvU32 handle) +{ + if (!handle) { + return; + } + + FreeUnixRmHandleAllocation(pAllocator, handle); + + nvFreeUnixRmHandleInternal(pAllocator, handle); +} + +void nvReportUnfreedUnixRmHandleAllocations(NVUnixRmHandleAllocatorPtr pAllocator) +{ + NvU32 handleId; + + for (handleId = 1; handleId <= pAllocator->maxHandles; handleId++) { + if (USED(pAllocator->bitmap, handleId)) { + + NVUnixRmHandleAllocationPtr alloc = + FindUnixRmHandleAllocation(pAllocator, handleId); + + if (alloc == NULL) { + continue; + } + + nvUnixRmHandleLogMsg(NV_UNIX_RM_HANDLE_DEBUG_MSG, + UNIX_RM_HANDLE_ALLOC_LABEL + "Unfreed handle ID allocation: 0x%08x (%s:%d)", + handleId, + alloc->file, + alloc->line); + } + } +} + +#endif /* DEBUG */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/Makefile b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/Makefile new file mode 100644 index 0000000..bb69e56 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/Makefile @@ -0,0 +1,144 @@ +########################################################################### +# Makefile for nv-modeset-kernel.o +########################################################################### + +NV_MODULE_LOGGING_NAME ?= nvidia-modeset + +VERSION_MK_DIR = ../../ +include ../../utils.mk + +include srcs.mk + +# The source files for nv-modeset-kernel.o are all SRCS and SRCS_CXX defined in +# srcs.mk, and the NVIDIA ID string +ALL_SRCS = $(SRCS) $(SRCS_CXX) +ALL_SRCS += $(NVIDSTRING) + +SRC_COMMON = ../common + +CFLAGS += -include $(SRC_COMMON)/sdk/nvidia/inc/cpuopsys.h + +CFLAGS += -I $(SRC_COMMON)/sdk/nvidia/inc +CFLAGS += -I $(SRC_COMMON)/shared/inc +CFLAGS += -I $(SRC_COMMON)/inc +CFLAGS += -I $(SRC_COMMON)/softfloat/nvidia +CFLAGS += -I $(SRC_COMMON)/softfloat/source/include +CFLAGS += -I $(SRC_COMMON)/softfloat/source/8086-SSE +CFLAGS += -I $(SRC_COMMON)/unix/common/utils/interface +CFLAGS += -I $(SRC_COMMON)/unix/common/inc +CFLAGS += -I $(SRC_COMMON)/modeset +CFLAGS += -I os-interface/include +CFLAGS += -I kapi/interface +CFLAGS += -I ../nvidia/arch/nvalloc/unix/include +CFLAGS += -I interface +CFLAGS += -I include +CFLAGS += -I kapi/include +CFLAGS += -I generated +CFLAGS += -I $(SRC_COMMON)/displayport/inc +CFLAGS += -I $(SRC_COMMON)/displayport/inc/dptestutil +CFLAGS += -I $(SRC_COMMON)/inc/displayport + +CFLAGS += -DNDEBUG +CFLAGS += -D_LANGUAGE_C +CFLAGS += -D__NO_CTYPE + +CFLAGS += -DNV_CPU_INTRINSICS_KERNEL +CFLAGS += -DNVHDMIPKT_RM_CALLS_INTERNAL=0 + +# XXX it would be nice to only define these for appropriate files... +CFLAGS += -DSOFTFLOAT_ROUND_ODD +CFLAGS += -DSOFTFLOAT_FAST_DIV32TO16 +CFLAGS += -DSOFTFLOAT_FAST_DIV64TO32 + +# Tell nvtiming to use nvkms import functions +CFLAGS += -DNVT_USE_NVKMS + +CFLAGS += -Wformat +CFLAGS += -Wreturn-type +CFLAGS += -Wswitch +CFLAGS += -Wunused-local-typedefs +CFLAGS += -Wchar-subscripts +CFLAGS += -Wparentheses +CFLAGS += -Wpointer-arith +CFLAGS += -Wcast-qual +CFLAGS += -Wall +CFLAGS += -Wextra +CFLAGS += -Wno-sign-compare +CFLAGS += -Wno-unused-parameter +CFLAGS += -Wno-missing-field-initializers +CFLAGS += -Wno-format-zero-length +CFLAGS += -Wmissing-declarations +CFLAGS += -Wno-cast-qual + +CFLAGS += -O2 + +ifeq ($(TARGET_ARCH),x86_64) + CFLAGS += -msoft-float + CFLAGS += -mno-red-zone + CFLAGS += -mcmodel=kernel + CFLAGS += -mno-mmx + CFLAGS += -mno-sse + CFLAGS += -mno-sse2 + CFLAGS += -mno-3dnow +endif + +ifeq ($(TARGET_ARCH),aarch64) + CFLAGS += -mgeneral-regs-only + CFLAGS += -march=armv8-a +endif + +CFLAGS += -fno-pic +CFLAGS += -fno-common +CFLAGS += -fomit-frame-pointer +CFLAGS += -fno-strict-aliasing +CFLAGS += -ffunction-sections +CFLAGS += -fdata-sections +CFLAGS += -ffreestanding +CFLAGS += -fno-stack-protector + +CONDITIONAL_CFLAGS := $(call TEST_CC_ARG, -fcf-protection=none) +CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -Wformat-overflow=2) +CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -Wformat-truncation=1) +ifeq ($(TARGET_ARCH),x86_64) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch=thunk-extern) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch-register) +endif + +CFLAGS += $(CONDITIONAL_CFLAGS) + +CC_ONLY_CFLAGS += -Wimplicit +CC_ONLY_CFLAGS += -Wstrict-prototypes +CC_ONLY_CFLAGS += -Wmissing-prototypes +CC_ONLY_CFLAGS += -std=gnu11 + +CXX_ONLY_CFLAGS += -std=gnu++11 +CXX_ONLY_CFLAGS += -fno-operator-names +CXX_ONLY_CFLAGS += -fno-rtti +CXX_ONLY_CFLAGS += -fno-exceptions +CXX_ONLY_CFLAGS += -fcheck-new + +SHADER_OBJS = + + CFLAGS += -DNVKMS_INCLUDE_HEADSURFACE=0 + +OBJS = $(call BUILD_OBJECT_LIST,$(ALL_SRCS)) +OBJS += $(SHADER_OBJS) + +# Define how to generate the NVIDIA ID string +$(eval $(call GENERATE_NVIDSTRING, \ + NV_KMS_ID, \ + UNIX Open Kernel Mode Setting Driver, $(OBJS))) + +# Define how to build each object file from the corresponding source file. +$(foreach src, $(ALL_SRCS), $(eval $(call DEFINE_OBJECT_RULE,TARGET,$(src)))) + +NV_MODESET_KERNEL_O = $(OUTPUTDIR)/nv-modeset-kernel.o + +.PNONY: all clean +all: $(NV_MODESET_KERNEL_O) + +$(NV_MODESET_KERNEL_O): $(OBJS) + $(call quiet_cmd,LD) -r -o $(NV_MODESET_KERNEL_O) $(OBJS) + +clean: + $(RM) -rf $(OUTPUTDIR) diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h new file mode 100644 index 0000000..03ce91c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DP_NVDP_CONNECTOR_EVENT_SINK_H__ +#define __NVKMS_DP_NVDP_CONNECTOR_EVENT_SINK_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvDPLibSetAdaptiveSync(const NVDispEvoRec *pDispEvo, NvU32 head, + NvBool enable); +void nvDPLibUpdateDpyLinkConfiguration(NVDpyEvoPtr pDpyEvo); +NvBool nvDPLibDpyIsConnected(NVDpyEvoPtr pDpyEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DP_NVDP_CONNECTOR_EVENT_SINK_H__ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector.h new file mode 100644 index 0000000..4e2823f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector.h @@ -0,0 +1,100 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DP_NVDP_CONNECTOR_H__ +#define __NVKMS_DP_NVDP_CONNECTOR_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvkms-types.h" + +NVDPLibConnectorPtr nvDPCreateConnector(NVConnectorEvoPtr pConnectorEvo); + +void nvDPNotifyLongPulse(NVConnectorEvoPtr pConnectorEvo, + NvBool connected); + +void nvDPNotifyShortPulse(NVDPLibConnectorPtr pNVDpLibConnector); + +void nvDPDestroyConnector(NVDPLibConnectorPtr pNVDpLibConnector); + +NvBool nvDPIsLinkAwaitingTransition(NVConnectorEvoPtr pConnectorEvo); + +NVDPLibModesetStatePtr nvDPLibCreateModesetState( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 displayId, + const NVDpyIdList dpyIdList, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + NVHwModeTimingsEvo *pTimings); + +void nvDPLibFreeModesetState(NVDPLibModesetStatePtr pDpLibModesetState); + +void nvDPBeginValidation(NVDispEvoPtr pDispEvo); + +NvBool nvDPLibValidateTimings( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 displayId, + const NVDpyIdList dpyIdList, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const struct NvKmsModeValidationParams *pModeValidationParams, + NVHwModeTimingsEvo *pTimings); + +NvBool nvDPEndValidation(NVDispEvoPtr pDispEvo); + +NvBool nvDPValidateModeForDpyEvo( + const NVDpyEvoRec *pDpyEvo, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const struct NvKmsModeValidationParams *pModeValidationParams, + NVHwModeTimingsEvo *pTimings); + +void nvDPPreSetMode(NVDPLibConnectorPtr pDpLibConnector, + const NVEvoModesetUpdateState *pModesetUpdateState); + +void nvDPPostSetMode(NVDPLibConnectorPtr pDpLibConnector); + +void nvDPPause(NVDPLibConnectorPtr pNVDpLibConnector); + +NvBool nvDPResume(NVDPLibConnectorPtr pNVDpLibConnector, NvBool plugged); + +void nvDPSetAllowMultiStreamingOneConnector( + NVDPLibConnectorPtr pDpLibConnector, + NvBool allowMST); + +void nvDPSetAllowMultiStreaming(NVDevEvoPtr pDevEvo, NvBool allowMST); + +enum NVDpLinkMode { + NV_DP_LINK_MODE_OFF, + NV_DP_LINK_MODE_SST, + NV_DP_LINK_MODE_MST, +}; + +enum NVDpLinkMode nvDPGetActiveLinkMode(NVDPLibConnectorPtr pDpLibConnector); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DP_NVDP_CONNECTOR_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-device.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-device.h new file mode 100644 index 0000000..93d8a1c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-device.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DP_NVDP_DEVICE_H__ +#define __NVKMS_DP_NVDP_DEVICE_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvDPDeviceSetPowerState(NVDpyEvoPtr pDpyEvo, NvBool on); +unsigned int nvDPGetEDIDSize(const NVDpyEvoRec *pDpyEvo); +NvBool nvDPGetEDID(const NVDpyEvoRec *pDpyEvo, void *buffer, unsigned int size); +void nvDPGetDpyGUID(NVDpyEvoPtr pDpyEvo); +void nvDPDpyFree(NVDpyEvoPtr pDpyEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DP_NVDP_DEVICE_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-timer.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-timer.h new file mode 100644 index 0000000..1126eff --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-timer.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DP_NVDP_TIMER_H__ +#define __NVKMS_DP_NVDP_TIMER_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NvBool nvDPTimersPending(void); +NVDPLibTimerPtr nvDPAllocTimer(NVDevEvoPtr pDevEvo); +void nvDPFreeTimer(NVDPLibTimerPtr pTimer); +void nvDPFireExpiredTimers(NVDevEvoPtr pDevEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DP_NVDP_TIMER_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/g_nvkms-evo-states.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/g_nvkms-evo-states.h new file mode 100644 index 0000000..4250227 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/g_nvkms-evo-states.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __EVO_STATE_H__ +#define __EVO_STATE_H__ + + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvEvoStateStartNoLock(NVEvoSubDevPtr); + +#ifdef __cplusplus +}; +#endif + +#endif /* __EVO_STATE_H__ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-3dvision.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-3dvision.h new file mode 100644 index 0000000..1dae056 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-3dvision.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_3DVISION_H__ +#define __NVKMS_3DVISION_H__ + +#include "nvkms-types.h" + +void nv3DVisionAuthenticationEvo(NVDispEvoRec *pDispEvo, const NvU32 head); + +void nvDpyCheck3DVisionCapsEvo(NVDpyEvoPtr pDpyEvo); +NvBool +nvPatch3DVisionModeTimingsEvo(NVT_TIMING *pTiming, NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString); +void nvDisable3DVisionAegis(const NVDpyEvoRec *pDpyEvo); +void nvSendHwModeTimingsToAegisEvo(const NVDispEvoRec *pDispEvo, + const NvU32 head); + +#endif /* __NVKMS_3DVISION_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-attributes.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-attributes.h new file mode 100644 index 0000000..d2cea1d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-attributes.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_ATTRIBUTES_H__ +#define __NVKMS_ATTRIBUTES_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NvS64 nvRMLaneCountToNvKms(NvU32 rmLaneCount); + +NvBool nvSetDpyAttributeEvo(NVDpyEvoPtr pDpyEvo, + struct NvKmsSetDpyAttributeParams *pParams); + +NvBool nvGetDpyAttributeEvo(const NVDpyEvoRec *pDpyEvo, + struct NvKmsGetDpyAttributeParams *pParams); + +NvBool nvGetDpyAttributeValidValuesEvo( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsGetDpyAttributeValidValuesParams *pParams); + +NvBool nvDpyValidateColorSpace(const NVDpyEvoRec *pDpyEvo, NvS64 value); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_ATTRIBUTES_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-console-restore.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-console-restore.h new file mode 100644 index 0000000..5b5abeb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-console-restore.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_CONSOLE_RESTORE_H__ +#define __NVKMS_CONSOLE_RESTORE_H__ + +#include "nvkms-types.h" + +NvBool nvEvoRestoreConsole(NVDevEvoPtr pDevEvo, const NvBool allowMST); + +#endif // __NVKMS_CONSOLE_RESTORE_H__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-cursor.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-cursor.h new file mode 100644 index 0000000..f8b2358 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-cursor.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_CURSOR_H__ +#define __NVKMS_CURSOR_H__ + +#include "nvkms-types.h" + +NvBool nvGetCursorImageSurfaces( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsSetCursorImageCommonParams *pParams, + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]); + +NvBool nvSetCursorImage( + NVDispEvoPtr pDispEvo, + const struct NvKmsPerOpenDev *pOpenDevice, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvU32 head, + const struct NvKmsSetCursorImageCommonParams *pParams); + +void nvEvoMoveCursorInternal(NVDispEvoPtr pDispEvo, + NvU32 head, NvS16 x, NvS16 y); + +void nvEvoMoveCursor(NVDispEvoPtr pDispEvo, NvU32 head, + const struct NvKmsMoveCursorCommonParams *pParams); + +NvBool nvAllocCursorEvo(NVDevEvoPtr pDevEvo); +void nvFreeCursorEvo(NVDevEvoPtr pDevEvo); + +enum NvKmsAllocDeviceStatus nvInitDispHalCursorEvo(NVDevEvoPtr pDevEvo); + +#endif /* __NVKMS_CURSOR_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dma.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dma.h new file mode 100644 index 0000000..b180230 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dma.h @@ -0,0 +1,286 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* this file contains dma push buffer inlined routines */ + +#ifndef __NVKMS_DMA_H__ +#define __NVKMS_DMA_H__ + +#include + +#include "nvkms-types.h" +#include "nvkms-utils.h" + +#include "class/cl917d.h" + +/* declare prototypes: */ +void nvDmaKickoffEvo(NVEvoChannelPtr); + +void nvEvoMakeRoom(NVEvoChannelPtr pChannel, NvU32 count); +void nvWriteEvoCoreNotifier(const NVDispEvoRec *, NvU32 offset, NvU32 value); + +NvBool nvEvoIsCoreNotifierComplete(NVDispEvoPtr pDispEvo, + NvU32 offset, NvU32 done_base_bit, + NvU32 done_extent_bit, + NvU32 done_false_value); +void nvEvoWaitForCoreNotifier(const NVDispEvoRec *pDispEvo, NvU32 offset, + NvU32 done_base_bit, + NvU32 done_extent_bit, NvU32 done_false_value); +void nvEvoSetSubdeviceMask(NVEvoChannelPtr pChannel, NvU32 mask); + +NvU32 nvEvoReadCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 entry_stride, + NvU32 entry_count, + NvU32 status_offset, + NvU32 field_count, + NvU32 flag_count, + const CRC32NotifierEntryRec *field_info, + const CRC32NotifierEntryFlags *flag_info); +void nvEvoResetCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 offset, + NvU32 reset_base_bit, + NvU32 reset_value); +NvBool nvEvoWaitForCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 offset, + NvU32 done_base_bit, + NvU32 done_extent_bit, + NvU32 done_value); + +#define SUBDEVICE_MASK_ALL DRF_MASK(NV917D_DMA_SET_SUBDEVICE_MASK_VALUE) + +static inline void nvDmaStorePioMethod( + void *pBase, NvU32 offset, NvU32 value) +{ + NvU32 *ptr = ((NvU32 *)pBase) + (offset/sizeof(NvU32)); + + /* + * Use gcc built-in atomic store to ensure the write happens exactly once + * and to ensure ordering. We can use the weaker "relaxed" model because we + * separately use appropriate fencing on anything that needs to preceed this + * write. + */ + __atomic_store_n(ptr, value, __ATOMIC_RELAXED); +} + +static inline NvU32 nvDmaLoadPioMethod( + const void *pBase, NvU32 offset) +{ + const NvU32 *ptr = ((const NvU32 *)pBase) + (offset/sizeof(NvU32)); + + /* + * Use gcc built-in atomic load to ensure the read happens exactly once and + * to ensure ordering. We use the "acquire" model to ensure anything after + * this read doesn't get reordered earlier than this read. (E.g., we don't + * want any writes to the pushbuffer that are waiting on GET to advance to + * get reordered before this read, potentially clobbering the pushbuffer + * before it's been read.) + */ + return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); +} + +static inline NvBool nvDmaSubDevMaskMatchesCurrent( + const NVEvoChannel *pChannel, + const NvU32 subDevMask) +{ + const NvU32 allSubDevices = (1 << pChannel->pb.num_channels) - 1; + + return (subDevMask & allSubDevices) == + (pChannel->pb.currentSubDevMask & allSubDevices); +} + +static inline void nvDmaSetEvoMethodData( + NVEvoChannelPtr pChannel, + const NvU32 data) +{ + *(pChannel->pb.buffer) = data; + pChannel->pb.buffer++; +} + +static inline void nvDmaSetEvoMethodDataU64( + NVEvoChannelPtr pChannel, + const NvU64 data) +{ + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(data)); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(data)); +} + + +/* Get the SDM for a given pDisp */ +static inline NvU32 nvDispSubDevMaskEvo(const NVDispEvoRec *pDispEvo) +{ + return NVBIT(pDispEvo->displayOwner); +} + +/* Initialize the EVO SDM stack */ +static inline void nvInitEvoSubDevMask(NVDevEvoPtr pDevEvo) { + pDevEvo->subDevMaskStackDepth = 0; + pDevEvo->subDevMaskStack[0] = SUBDEVICE_MASK_ALL; +} + +/* Return the SDM at the top of the stack (i.e. the currently active one) */ +static inline NvU32 nvPeekEvoSubDevMask(NVDevEvoPtr pDevEvo) { + return pDevEvo->subDevMaskStack[pDevEvo->subDevMaskStackDepth]; +} + +/* Push the given mask onto the stack and set it. */ +static inline void nvPushEvoSubDevMask(NVDevEvoPtr pDevEvo, NvU32 mask) { + pDevEvo->subDevMaskStackDepth++; + + nvAssert(pDevEvo->subDevMaskStackDepth < NV_EVO_SUBDEV_STACK_SIZE); + + pDevEvo->subDevMaskStack[pDevEvo->subDevMaskStackDepth] = mask; +} + +/* Automagically push the SDM for broadcast to disp. */ +static inline void nvPushEvoSubDevMaskDisp(const NVDispEvoRec *pDispEvo) { + NvU32 mask = nvDispSubDevMaskEvo(pDispEvo); + + nvPushEvoSubDevMask(pDispEvo->pDevEvo, mask); +} + +/* Pop the last entry on the stack */ +static inline void nvPopEvoSubDevMask(NVDevEvoPtr pDevEvo) { + pDevEvo->subDevMaskStackDepth--; +} + +/* + * Update the state tracked in updateState to indicate that pChannel has + * pending methods and requires an update/kickoff. + */ +static inline void nvUpdateUpdateState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].channelMask |= pChannel->channelMask; + } + } +} + +/* + * Update the state tracked in updateState to indicate that pChannel has + * pending WindowImmediate methods. + */ +static inline void nvWinImmChannelUpdateState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].winImmChannelMask |= pChannel->channelMask; + } + } +} + +/* + * Update the state tracked in updateState to prevent pChannel from + * interlocking with the core channel on the next UPDATE. + */ +static inline +void nvDisableCoreInterlockUpdateState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].noCoreInterlockMask |= + pChannel->channelMask; + } + } +} + +// These macros verify that the values used in the methods fit +// into the defined ranges. +#define ASSERT_DRF_NUM(d, r, f, n) \ + nvAssert(!(~DRF_MASK(NV ## d ## r ## f) & (n))) + +// From resman nv50/dev_disp.h +#define NV_UDISP_DMA_OPCODE 31:29 /* RWXUF */ +#define NV_UDISP_DMA_OPCODE_METHOD 0x00000000 /* RW--V */ +#define NV_UDISP_DMA_METHOD_COUNT 27:18 /* RWXUF */ +// Technically, the METHOD_OFFSET field is 13:2 for nvdisplay (classes c3*), +// and only 11:2 for older display classes. But, the higher bits were +// unused in the older classes, and we should never push any methods of that +// size on them anyway, so we always use the wider definition here. +#define NV_UDISP_DMA_METHOD_OFFSET 13:2 /* RWXUF */ + +// Start an EVO method. +static inline void nvDmaSetStartEvoMethod( + NVEvoChannelPtr pChannel, + NvU32 method, + NvU32 count) +{ + NVDmaBufferEvoPtr p = &pChannel->pb; + const NvU32 sdMask = nvPeekEvoSubDevMask(p->pDevEvo); + + // We add 1 to the count for the method header. + const NvU32 countPlusHeader = count + 1; + + const NvU32 methodDwords = method >> 2; + + nvAssert((method & 0x3) == 0); + + ASSERT_DRF_NUM(_UDISP, _DMA, _METHOD_COUNT, count); + ASSERT_DRF_NUM(_UDISP, _DMA, _METHOD_OFFSET, methodDwords); + + if (!nvDmaSubDevMaskMatchesCurrent(pChannel, sdMask)) { + if (p->num_channels > 1) { + nvEvoSetSubdeviceMask(pChannel, sdMask); + } + } + + if (p->fifo_free_count <= countPlusHeader) { + nvEvoMakeRoom(pChannel, countPlusHeader); + } + + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(_UDISP, _DMA, _OPCODE, _METHOD) | + DRF_NUM(_UDISP, _DMA, _METHOD_COUNT, count) | + DRF_NUM(_UDISP, _DMA, _METHOD_OFFSET, methodDwords)); + + p->fifo_free_count -= countPlusHeader; +} + +static inline NvBool nvIsUpdateStateEmpty(const NVDevEvoRec *pDevEvo, + const NVEvoUpdateState *updateState) +{ + NvU32 sd; + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (updateState->subdev[sd].channelMask != 0x0) { + return FALSE; + } + } + return TRUE; +} + +#endif /* __NVKMS_DMA_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dpy.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dpy.h new file mode 100644 index 0000000..3762379 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dpy.h @@ -0,0 +1,85 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DPY_H__ +#define __NVKMS_DPY_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvDpyProbeMaxPixelClock(NVDpyEvoPtr pDpyEvo); +void nvDpySetValidSyncsEvo(const NVDpyEvoRec *pDpyEvo, + struct NvKmsModeValidationValidSyncs *pValidSyncs); +NVDpyEvoPtr nvAllocDpyEvo(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo, + NVDpyId dpyId, const char *dpAddress); +void nvFreeDpyEvo(NVDispEvoPtr pDispEvo, NVDpyEvoPtr pDpyEvo); +NVConnectorEvoPtr nvGetConnectorFromDisp(NVDispEvoPtr pDispEvo, NVDpyId dpyId); + +void nvUpdateInfoFrames(NVDpyEvoRec *pDpyEvo); + +NvBool nvDpyRequiresDualLinkEvo(const NVDpyEvoRec *pDpyEvo, + const NVHwModeTimingsEvo *pTimings); + +NVDpyEvoPtr nvGetDpyEvoFromDispEvo(const NVDispEvoRec *pDispEvo, NVDpyId dpyId); + +NVDpyEvoPtr nvGetDPMSTDpyEvo(NVConnectorEvoPtr pConnectorEvo, + const char *address, NvBool *pDynamicDpyCreated); + +typedef enum { + NVKMS_EDID_READ_MODE_DEFAULT, + NVKMS_EDID_READ_MODE_ACPI, +} NvKmsEdidReadMode; + +NvBool nvDpyReadAndParseEdidEvo( + const NVDpyEvoRec *pDpyEvo, + const struct NvKmsQueryDpyDynamicDataRequest *pRequest, + NvKmsEdidReadMode readMode, + NVEdidRec *pEdid, + NVParsedEdidEvoPtr *ppParsedEdid, + NVEvoInfoStringPtr pInfoString); + +char *nvGetDpyIdListStringEvo(NVDispEvoPtr pDispEvo, + const NVDpyIdList dpyIdList); + +NvBool nvDpyGetDynamicData( + NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams); + +void nvDpyUpdateCurrentAttributes(NVDpyEvoRec *pDpyEvo); + +NvBool nvDpyIsAdaptiveSync(const NVDpyEvoRec *pDpyEvo); + +NvBool nvDpyIsAdaptiveSyncDefaultlisted(const NVParsedEdidEvoRec *pParsedEdid); + +enum NvKmsDpyAttributeDigitalSignalValue +nvGetDefaultDpyAttributeDigitalSignalValue(const NVConnectorEvoRec *pConnectorEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DPY_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-event.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-event.h new file mode 100644 index 0000000..087476e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-event.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_EVENT_H__ +#define __NVKMS_EVENT_H__ + +#include "nvkms.h" + +void nvHandleHotplugEventDeferredWork(void *dataPtr, NvU32 dataU32); +void nvHandleDPIRQEventDeferredWork(void *dataPtr, NvU32 dataU32); + +#endif /* __NVKMS_EVENT_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo-states.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo-states.h new file mode 100644 index 0000000..dcde326 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo-states.h @@ -0,0 +1,107 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_STATES_H__ +#define __NVKMS_STATES_H__ + +#include "nvkms-types.h" + +#include "g_nvkms-evo-states.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum NVEvoLockSignal { + NV_EVO_LOCK_SIGNAL_FLIP_LOCK, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK, + NV_EVO_LOCK_SIGNAL_RASTER_LOCK, + NV_EVO_LOCK_SIGNAL_STEREO, +} NVEvoLockSignal; + +typedef enum NVEvoLockAction { + NV_EVO_LOCK_HEADS, + NV_EVO_UNLOCK_HEADS, + NV_EVO_ADD_FRAME_LOCK_SERVER, + NV_EVO_REM_FRAME_LOCK_SERVER, + NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC, + NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC, + NV_EVO_ADD_FRAME_LOCK_CLIENT, + NV_EVO_REM_FRAME_LOCK_CLIENT, + NV_EVO_ENABLE_VRR, + NV_EVO_DISABLE_VRR, + NV_EVO_ADD_FRAME_LOCK_REF, + NV_EVO_REM_FRAME_LOCK_REF, + NV_EVO_ADD_SLI_SECONDARY, + NV_EVO_ADD_SLI_LAST_SECONDARY, + NV_EVO_ADD_SLI_PRIMARY, + NV_EVO_REM_SLI, +} NVEvoLockAction; + +/* nv_evo.c */ + +NVEvoLockPin nvEvoGetPinForSignal(const NVDispEvoRec *, + NVEvoSubDevPtr, + NVEvoLockSignal); +NvBool nvEvoRefFrameLockSli(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads); +NvBool nvEvoUnRefFrameLockSli(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads); + +/* nvkms-hw-states.c */ + +NvBool nvEvoLockHWStateNoLock(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockServerManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimary(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliSecondary(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliLastSecondary(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliLastSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliLastSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_STATES_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo.h new file mode 100644 index 0000000..27caf8a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo.h @@ -0,0 +1,299 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_H__ +#define __NVKMS_H__ + +#include "nvkms-types.h" +#include "nvkms-modeset-types.h" +#include "nvkms-api.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern NVEvoInfoStringRec dummyInfoString; + +NVDevEvoPtr nvFindDevEvoByDeviceId(NvU32 deviceId); +NvU8 nvGetGpuLogIndex(void); +void nvEvoDetachConnector(NVConnectorEvoRec *pConnectorEvo, const NvU32 head, + NVEvoModesetUpdateState *pModesetUpdateState); +void nvEvoAttachConnector(NVConnectorEvoRec *pConnectorEvo, + const NvU32 head, + NVDPLibModesetStatePtr pDpLibModesetState, + NVEvoModesetUpdateState *pModesetUpdateState); +void nvEvoUpdateAndKickOff(const NVDispEvoRec *pDispEvo, NvBool sync, + NVEvoUpdateState *updateState, NvBool releaseElv); +void nvDoIMPUpdateEvo(NVDispEvoPtr pDispEvo, + NVEvoUpdateState *updateState); +void nvEvoArmLightweightSupervisor(NVDispEvoPtr pDispEvo, + const NvU32 head, + NvBool isVrr, + NvBool enable); + +void nvSetViewPortsEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, NVEvoUpdateState *updateState); +void nvSetViewPortPointInEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, + const NvU16 x, + NvU16 y, + NVEvoUpdateState *updateState); +void +nvConstructNvModeTimingsFromHwModeTimings(const NVHwModeTimingsEvo *pTimings, + NvModeTimingsPtr pModeTimings); +void nvEvoSetTimings(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState); +NvBool nvGetDfpProtocol(const NVDpyEvoRec *pDpyEvo, + NVHwModeTimingsEvoPtr pTimings); +void nvInitScalingUsageBounds(const NVDevEvoRec *pDevEvo, + struct NvKmsScalingUsageBounds *pScaling); +NvBool nvComputeScalingUsageBounds(const NVEvoScalerCaps *pScalerCaps, + const NvU32 inWidth, const NvU32 inHeight, + const NvU32 outWidth, const NvU32 outHeight, + NVEvoScalerTaps hTaps, NVEvoScalerTaps vTaps, + struct NvKmsScalingUsageBounds *out); +NvBool nvAssignScalerTaps(const NVDevEvoRec *pDevEvo, + const NVEvoScalerCaps *pScalerCaps, + const NvU32 inWidth, const NvU32 inHeight, + const NvU32 outWidth, const NvU32 outHeight, + NvBool doubleScan, + NVEvoScalerTaps *hTapsOut, NVEvoScalerTaps *vTapsOut); +NvBool nvValidateHwModeTimingsViewPort(const NVDevEvoRec *pDevEvo, + const NVEvoScalerCaps *pScalerCaps, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString); +void nvAssignDefaultUsageBounds(const NVDispEvoRec *pDispEvo, + NVHwModeViewPortEvo *pViewPort); +struct NvKmsUsageBounds nvUnionUsageBounds(const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b); +NvBool UsageBoundsEqual(const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b); +NvU64 nvEvoGetFormatsWithEqualOrLowerUsageBound( + const enum NvKmsSurfaceMemoryFormat format, + const NvU64 supportedFormatsCapMask); +void nvCancelLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo); +void nvScheduleLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo); +void nvAssertAllDpysAreInactive(NVDevEvoPtr pDevEvo); +void nvEvoLockStatePreModeset(NVDevEvoPtr pDevEvo, NvU32 *dispNeedsEarlyUpdate, + NVEvoUpdateState *updateState); +void nvEvoLockStatePostModeset(NVDevEvoPtr pDevEvo, const NvBool doRasterLock); +NvBool nvSetUsageBoundsEvo( + NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState); +void nvEnableMidFrameAndDWCFWatermark(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NvBool enable, + NVEvoUpdateState *pUpdateState); + +void nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo, + const NvU32 head, NVEvoUpdateState *pUpdateState); + +void nvChooseDitheringEvo( + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsPixelDepth pixelDepth, + const NVDpyAttributeRequestedDitheringConfig *pReqDithering, + NVDpyAttributeCurrentDitheringConfig *pCurrDithering); + +void nvSetDitheringEvo( + NVDispEvoPtr pDispEvo, + const NvU32 head, + const NVDpyAttributeCurrentDitheringConfig *pCurrDithering, + NVEvoUpdateState *pUpdateState); + +NvBool nvEnableFrameLockEvo(NVDispEvoPtr pDispEvo); +NvBool nvDisableFrameLockEvo(NVDispEvoPtr pDispEvo); +NvBool nvQueryRasterLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val); +NvBool nvSetFlipLockEvo(NVDpyEvoPtr pDpyEvo, NvS64 value); +NvBool nvGetFlipLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue); +NvBool nvAllowFlipLockEvo(NVDispEvoPtr pDispEvo, NvS64 value); +NvBool nvSetStereoEvo(const NVDispEvoRec *pDispEvo, + const NvU32 head, NvBool enable); +NvBool nvGetStereoEvo(const NVDispEvoRec *pDispEvo, const NvU32 head); +NvBool nvAllocCoreChannelEvo(NVDevEvoPtr pDevEvo); +void nvFreeCoreChannelEvo(NVDevEvoPtr pDevEvo); + +void nvEvoUpdateSliVideoBridge(NVDevEvoPtr pDevEvo); + +void nvSetDVCEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, + NvS32 dvc, + NVEvoUpdateState *updateState); +void nvSetImageSharpeningEvo(NVDispEvoRec *pDispEvo, const NvU32 head, + const NvU32 value, NVEvoUpdateState *updateState); + +NvBool nvLayerSetPositionEvo( + NVDevEvoPtr pDevEvo, + const struct NvKmsSetLayerPositionRequest *pRequest); + +NvBool nvConstructHwModeTimingsEvo(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsMode *pKmsMode, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams + *pParams, + NVEvoInfoStringPtr pInfoString); + +NvBool nvConstructHwModeTimingsImpCheckEvo( + const NVConnectorEvoRec *pConnectorEvo, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString, + const int head); + +NvBool nvDowngradeHwModeTimingsDpPixelDepthEvo( + NVHwModeTimingsEvoPtr pTimings, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace); + +NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams *pParams); + +NvBool nvEvoUpdateHwModeTimingsViewPort( + const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pModeValidationParams, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvo *pTimings); + +typedef struct _NVValidateImpOneDispHeadParamsRec +{ + const NVConnectorEvoRec *pConnectorEvo; + const struct NvKmsUsageBounds *pUsage; + NvU32 activeRmId; + NVHwModeTimingsEvoPtr pTimings; +} NVValidateImpOneDispHeadParamsRec; + +NvBool nvValidateImpOneDisp( + NVDispEvoPtr pDispEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NvU32 *pMinIsoBandwidthKBPS, + NvU32 *pMinDramFloorKBPS); + +NvBool nvAllocateDisplayBandwidth( + NVDispEvoPtr pDispEvo, + NvU32 newIsoBandwidthKBPS, + NvU32 newDramFloorKBPS); + +NvBool nvValidateImpOneDispDowngrade( + NVDispEvoPtr pDispEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NvU32 downgradePossibleHeadsBitMask); + +NvBool nvFrameLockServerPossibleEvo(const NVDpyEvoRec *pDpyEvo); +NvBool nvFrameLockClientPossibleEvo(const NVDpyEvoRec *pDpyEvo); + +void nvEvoSetLut(NVDispEvoPtr pDispEvo, NvU32 head, NvBool kickoff, + const struct NvKmsSetLutCommonParams *pParams); +NvBool nvValidateSetLutCommonParams( + const NVDevEvoRec *pDevEvo, + const struct NvKmsSetLutCommonParams *pParams); + +void nvChooseCurrentColorSpaceAndRangeEvo( + enum nvKmsPixelDepth pixelDepth, + enum NvYuv420Mode yuv420Mode, + const enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace, + const enum NvKmsDpyAttributeColorRangeValue requestedColorRange, + enum NvKmsDpyAttributeCurrentColorSpaceValue *pCurrentColorSpace, + enum NvKmsDpyAttributeColorRangeValue *pCurrentColorRange); + +void nvUpdateCurrentHardwareColorSpaceAndRangeEvo( + NVDispEvoPtr pDispEvo, + const NvU32 head, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const enum NvKmsDpyAttributeColorRangeValue colorRange, + NVEvoUpdateState *pUpdateState); + +NvBool nvAssignSOREvo(NVConnectorEvoPtr pConnectorEvo, NvU32 sorExcludeMask); +void nvRestoreSORAssigmentsEvo(NVDevEvoRec *pDevEvo); + +void nvSetSwapBarrierNotifyEvo(NVDispEvoPtr pDispEvo, + NvBool enable, NvBool isPre); + +void nvUnbloatHwModeTimingsEvo(NVHwModeTimingsEvoPtr pTimings, NvU32 factor); + +NvBool nvReadCRC32Evo(NVDispEvoPtr pDispEvo, NvU32 head, + CRC32NotifierCrcOut *crcOut /* out */); + +NvBool nvFreeDevEvo(NVDevEvoPtr pDevEvo); +NVDevEvoPtr nvAllocDevEvo(const struct NvKmsAllocDeviceRequest *pRequest, + enum NvKmsAllocDeviceStatus *pStatus); +NvU32 nvGetActiveSorMask(const NVDispEvoRec *pDispEvo); +NvBool nvUpdateFlipLockEvoOneHead(NVDispEvoPtr pDispEvo, const NvU32 head, + NvU32 *val, NvBool set, + NvBool *needsEarlyUpdate, + NVEvoUpdateState *updateState); + +void nvEvoUpdateCurrentPalette(NVDispEvoPtr pDispEvo, + NvU32 head, NvBool kickOff); + +NvBool nvEvoPollForNoMethodPending(NVDevEvoPtr pDevEvo, + const NvU32 sd, + NVEvoChannelPtr pChannel, + NvU64 *pStartTime, + const NvU32 timeout); + +static inline void nvAssertSameSemaphoreSurface( + const NVFlipChannelEvoHwState *pHwState) +{ + + /*! + * pHwState->syncObject contains separate fields to track the semaphore + * surface used for acquire, and the semaphore surface used for release. + * Prior to NvDisplay 4.0, display HW only supports using a single semaphore + * surface for both acquire and release. As such, assert that the semaphore + * surfaces in pHwState->syncObject are the same, and that we're also not + * using syncpoints. This is enforced during flip validation. + */ + + nvAssert(pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo == + pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo); + + nvAssert(!pHwState->syncObject.usingSyncpt); +} + +void nvDPSerializerHandleDPIRQ(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo); + +void nvDPSerializerPreSetMode(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo); + +void nvDPSerializerPostSetMode(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo); + +NvBool nvFramelockSetControlUnsyncEvo(NVDispEvoPtr pDispEvo, const NvU32 headMask, + NvBool server); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo1.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo1.h new file mode 100644 index 0000000..8b74629 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo1.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_EVO_1_H__ +#define __NVKMS_EVO_1_H__ + +#include "nvkms-types.h" + +NvBool nvEvo1IsChannelIdle(NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NvBool *result); +NvBool nvEvo1IsChannelMethodPending(NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NvBool *result); + +void nvEvo1IsModePossible(NVDispEvoPtr pDispEvo, + const NVEvoIsModePossibleDispInput *pInput, + NVEvoIsModePossibleDispOutput *pOutput); +void nvEvo1PrePostIMP(NVDispEvoPtr pDispEvo, NvBool isPre); + +void nvEvo1SetDscParams(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings); + +NVEvoChannel* nvEvo1AllocateCoreChannel(NVDevEvoRec *pDevEvo); +void nvEvo1FreeCoreChannel(NVDevEvoRec *pDevEvo, NVEvoChannel *pChannel); + +static inline NvU16 nvEvo1GetColorSpaceFlag(NVDevEvoPtr pDevEvo, + const NvBool colorSpaceOverride) +{ + NvU16 colorSpaceFlag = 0; + + if (colorSpaceOverride) { + nvAssert(pDevEvo->caps.supportsDP13); + colorSpaceFlag = 1 << 11; + } + + return colorSpaceFlag; +} + +#endif /* __NVKMS_EVO_1_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip-workarea.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip-workarea.h new file mode 100644 index 0000000..a2bb25b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip-workarea.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_FLIP_WORKAREA_H__ +#define __NVKMS_FLIP_WORKAREA_H__ + +#include "nvkms-types.h" + +struct NvKmsFlipWorkArea { + struct { + NvBool changed; + struct { + /* + * Pre flip usage bounds are the union of current and new + * usable usage bounds: the unioned usage bounds have to + * allow both the current state and the state being flipped to. + * This field is set and used by PreFlipIMP() and its + * helper functions. + */ + struct NvKmsUsageBounds preFlipUsage; + + NVFlipEvoHwState newState; + NVFlipEvoHwState oldState; + + NvU32 oldAccelerators; + NvBool accelerated; + } head[NVKMS_MAX_HEADS_PER_DISP]; + } sd[NVKMS_MAX_SUBDEVICES]; +}; + +#endif /* __NVKMS_FLIP_WORKAREA_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip.h new file mode 100644 index 0000000..e02f7f6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip.h @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_FLIP_H__ +#define __NVKMS_FLIP_H__ + + +#include "nvkms-types.h" + +void nvClearFlipEvoHwState( + NVFlipEvoHwState *pFlipState); + +void nvInitFlipEvoHwState( + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + NVFlipEvoHwState *pFlipState); + +NvBool nvUpdateFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState, + NvBool allowVrr, + const struct NvKmsUsageBounds *pPossibleUsage); + +NvBool nvValidateFlipEvoHwState( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings, + const NVFlipEvoHwState *pFlipState); + +void +nvUpdateSurfacesFlipRefCount( + NVDevEvoPtr pDevEvo, + const NvU32 head, + NVFlipEvoHwState *pFlipState, + NvBool increase); + +void nvFlipEvoOneHead( + NVDevEvoPtr pDevEvo, + const NvU32 sd, + const NvU32 head, + const NVFlipEvoHwState *pFlipState, + NvBool allowFlipLock, + NVEvoUpdateState *updateState); + +void nvEvoCancelPostFlipIMPTimer( + NVDevEvoPtr pDevEvo); + +NvBool nvHandleSyncptRegistration( + NVDevEvoRec *pDevEvo, + NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState); + +void nvFillPostSyncptReplyOneChannel( + NVEvoChannel *pChannel, + enum NvKmsSyncptType postType, + struct NvKmsSyncpt *postSyncpt, + const NVFlipSyncObjectEvoHwState *pHwSyncObject); + +NvBool nvFlipEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsFlipRequest *request, + struct NvKmsFlipReply *reply, + NvBool skipUpdate, + NvBool allowFlipLock); + +#endif /* __NVKMS_FLIP_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-framelock.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-framelock.h new file mode 100644 index 0000000..9579fb0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-framelock.h @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_FRAMELOCK_H__ +#define __NVKMS_FRAMELOCK_H__ + +#include "nvkms-types.h" + +void nvAllocFrameLocksEvo(NVDevEvoPtr pDevEvo); +void nvFreeFrameLocksEvo(NVDevEvoPtr pDevEvo); + +NvBool nvFrameLockSetUseHouseSyncEvo(NVFrameLockEvoPtr, NvU32); +NvBool nvFrameLockGetStatusEvo(const NVFrameLockEvoRec *, + enum NvKmsFrameLockAttribute attribute, + NvS64*); + +NvBool nvSetFrameLockDisplayConfigEvo(NVDpyEvoRec *pDpyEvo, NvS64 val); +NvBool nvGetFrameLockDisplayConfigEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val); +NvBool nvGetFrameLockDisplayConfigValidValuesEvo( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues); + +NvBool nvSetDispAttributeEvo(NVDispEvoPtr pDispEvo, + struct NvKmsSetDispAttributeParams *pParams); + +NvBool nvGetDispAttributeEvo(NVDispEvoPtr pDispEvo, + struct NvKmsGetDispAttributeParams *pParams); + +NvBool nvGetDispAttributeValidValuesEvo( + const NVDispEvoRec *pDispEvo, + struct NvKmsGetDispAttributeValidValuesParams *pParams); + +NvBool nvSetFrameLockAttributeEvo( + NVFrameLockEvoRec *pFrameLockEvo, + const struct NvKmsSetFrameLockAttributeParams *pParams); + +NvBool nvGetFrameLockAttributeEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsGetFrameLockAttributeParams *pParams); + +NvBool nvGetFrameLockAttributeValidValuesEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsGetFrameLockAttributeValidValuesParams *pParams); + +NvU32 nvGetFramelockServerHead(const NVDispEvoRec *pDispEvo); +NvU32 nvGetFramelockClientHeadsMask(const NVDispEvoRec *pDispEvo); + +static inline NvBool +nvIsFramelockableHead(const NVDispEvoRec *pDispEvo, const NvU32 head) +{ + return (head != NV_INVALID_HEAD) && + ((head == nvGetFramelockServerHead(pDispEvo)) || + ((NVBIT(head) & nvGetFramelockClientHeadsMask(pDispEvo)) != 0x0)); +} + +void nvUpdateGLSFramelock(const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvBool enable, const NvBool server); + +#endif /* __NVKMS_FRAMELOCK_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hal.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hal.h new file mode 100644 index 0000000..6675a0b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hal.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HAL_H__ +#define __NVKMS_HAL_H__ + +#include "nvkms-types.h" + +enum NvKmsAllocDeviceStatus nvAssignEvoCaps(NVDevEvoPtr pDevEvo); + +#endif /* __NVKMS_HAL_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hdmi.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hdmi.h new file mode 100644 index 0000000..93341ec --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hdmi.h @@ -0,0 +1,77 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HDMI_H__ +#define __NVKMS_HDMI_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvkms-types.h" + +void nvUpdateHdmiInfoFrames(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVAttributesSetEvoRec *pAttributesSet, + const NVDispHeadInfoFrameStateEvoRec *pInfoFrameState, + NVDpyEvoRec *pDpyEvo); + +void nvDpyUpdateHdmiPreModesetEvo(NVDpyEvoPtr pDpyEvo); +void nvDpyUpdateHdmiVRRCaps(NVDpyEvoPtr pDpyEvo); +void nvUpdateHdmiCaps(NVDpyEvoPtr pDpyEvo); + +void nvLogEdidCea861InfoEvo(NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString); +NvBool nvDpyIsHdmiEvo(const NVDpyEvoRec *pDpyEvo); + +NvBool nvHdmi204k60HzRGB444Allowed(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming); + +void nvHdmiDpEnableDisableAudio(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable); + +void nvRemoveUnusedHdmiDpAudioDevice(const NVDispEvoRec *pDispEvo); + +void nvHdmiSetVRR(NVDispEvoPtr pDispEvo, NvU32 head, NvBool enable); + +NvBool nvInitHdmiLibrary(NVDevEvoRec *pDevEvo); +void nvTeardownHdmiLibrary(NVDevEvoRec *pDevEvo); + +NvBool nvHdmiFrlAssessLink(NVDpyEvoPtr pDpyEvo); +NvBool nvHdmiFrlQueryConfig(const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + NVHwModeTimingsEvo *pTimings, + const struct NvKmsModeValidationParams *pParams); +void nvHdmiFrlClearConfig(NVDispEvoRec *pDispEvo, NvU32 activeRmId); +void nvHdmiFrlSetConfig(NVDispEvoRec *pDispEvo, NvU32 head); + +void nvHdmiDpConstructHeadAudioState(const NvU32 displayId, + const NVDpyEvoRec *pDpyEvo, + NVDispHeadAudioStateEvoRec *pAudioState); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_HDMI_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-lut.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-lut.h new file mode 100644 index 0000000..a9462ce --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-lut.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_LUT_H__ +#define __NVKMS_LUT_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvkms-types.h" + +NvBool nvAllocLutSurfacesEvo(NVDevEvoPtr pDevEvo); + +void nvFreeLutSurfacesEvo(NVDevEvoPtr pDevEvo); + +void nvUploadDataToLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo, + const NVEvoLutDataRec *pLUTBuffer, + NVDispEvoPtr pDispEvo); + +static inline void nvCancelLutUpdateEvo( + const NVDispEvoRec *pDispEvo, + const NvU32 head) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + nvkms_free_timer(pDevEvo->lut.head[head].disp[pDispEvo->displayOwner].updateTimer); + pDevEvo->lut.head[head].disp[pDispEvo->displayOwner].updateTimer = NULL; +} + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_LUT_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modepool.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modepool.h new file mode 100644 index 0000000..68673a8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modepool.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_MODEPOOL_H__ +#define __NVKMS_MODEPOOL_H__ + +#include "nvkms-types.h" +#include "nvkms-utils.h" /* NVEvoLogType */ + +#ifdef __cplusplus +extern "C" { +#endif + +void +nvValidateModeIndex(NVDpyEvoPtr pDpyEvo, + const struct NvKmsValidateModeIndexRequest *pRequest, + struct NvKmsValidateModeIndexReply *pReply); +void +nvValidateModeEvo(NVDpyEvoPtr pDpyEvo, + const struct NvKmsValidateModeRequest *pRequest, + struct NvKmsValidateModeReply *pReply); + +void nvEvoLogModeValidationModeTimings(NVEvoInfoStringPtr + pInfoString, + const NvModeTimings *pModeTimings); + +NvBool nvValidateModeForModeset(NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const struct NvKmsMode *pKmsMode, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvo *pTimingsEvo, + NVDispHeadInfoFrameStateEvoRec *pInfoFrameState); + +const NVT_TIMING *nvFindEdidNVT_TIMING( + const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + const struct NvKmsModeValidationParams *pParams); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_MODEPOOL_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-types.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-types.h new file mode 100644 index 0000000..e923ae2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-types.h @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_MODESET_TYPES_H__ +#define __NVKMS_MODESET_TYPES_H__ + +/* This header file defines types used internally by the modeset path. */ + +#include "nvkms-types.h" + +typedef struct { + NVHwModeTimingsEvo timings; + NVDpyIdList dpyIdList; + NVConnectorEvoRec *pConnectorEvo; + NvU32 activeRmId; + struct NvKmsSetLutCommonParams lut; + NvU8 allowFlipLockGroup; + NVAttributesSetEvoRec attributes; + struct NvKmsModeValidationParams modeValidationParams; + NvBool changed : 1; + NvBool allowGsync : 1; + NvBool hs10bpcHint : 1; + enum NvKmsAllowAdaptiveSync allowAdaptiveSync; + NvU32 vrrOverrideMinRefreshRate; + NVDPLibModesetStatePtr pDpLibModesetState; + NVDispHeadAudioStateEvoRec audio; + NVDispHeadInfoFrameStateEvoRec infoFrame; +} NVProposedModeSetHwStateOneHead; + +typedef struct { + NVProposedModeSetHwStateOneHead head[NVKMS_MAX_HEADS_PER_DISP]; +} NVProposedModeSetHwStateOneDisp; + +typedef struct { + struct { + NVFlipEvoHwState flip; + } head[NVKMS_MAX_HEADS_PER_DISP]; +} NVProposedModeSetHwStateOneSubDev; + +typedef struct { + NVProposedModeSetHwStateOneDisp disp[NVKMS_MAX_SUBDEVICES]; + NVProposedModeSetHwStateOneSubDev sd[NVKMS_MAX_SUBDEVICES]; + NvBool allowHeadSurfaceInNvKms : 1; +} NVProposedModeSetHwState; + +struct _NVEvoModesetUpdateState { + NVEvoUpdateState updateState; + NVDpyIdList connectorIds; + const NVDPLibModesetStateRec + *pDpLibModesetState[NVKMS_MAX_HEADS_PER_DISP]; + NvBool windowMappingChanged; +}; + +#endif /* __NVKMS_MODESET_TYPES_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-workarea.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-workarea.h new file mode 100644 index 0000000..9890b87 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-workarea.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_MODESET_WORKAREA_H__ +#define __NVKMS_MODESET_WORKAREA_H__ + +typedef struct { + struct { + struct { + NVFlipEvoHwState newState; + NVFlipEvoHwState oldState; + NvU32 oldActiveRmId; + } head[NVKMS_MAX_HEADS_PER_DISP]; + + NVDpyIdList changedDpyIdList; + + NVDpyIdList sorAssignedConnectorsList; + NvU32 assignedSorMask; + + } sd[NVKMS_MAX_SUBDEVICES]; + NVEvoUpdateState earlyUpdateState; + NVEvoModesetUpdateState modesetUpdateState; + + /* + * The display bandwidth values that NVKMS needs to allocate after the + * modeset is complete. + */ + NvU32 postModesetIsoBandwidthKBPS; + NvU32 postModesetDramFloorKBPS; +} NVModeSetWorkArea; + +struct NvKmsVrrTimings { + struct { + struct { + NVHwModeTimingsEvo timings; + NvBool adjusted; + } head[NVKMS_MAX_HEADS_PER_DISP]; + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +#endif /* __NVKMS_MODESET_WORKAREA_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset.h new file mode 100644 index 0000000..9540058 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_MODESET_H__ +#define __NVKMS_MODESET_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NvBool nvSetDispModeEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + NvBool bypassComposition, + NvBool doRasterLock); + +typedef NvBool (*NVShutDownHeadsTestFunc)( + const NVDispEvoRec *pDispEvo, + const NvU32 head); + +void nvShutDownHeads(NVDevEvoPtr pDevEvo, NVShutDownHeadsTestFunc pTestFunc); + +NVVBlankCallbackPtr nvRegisterVBlankCallback(NVDispEvoPtr pDispEvo, + NvU32 head, + NVVBlankCallbackProc pCallback, + void *pUserData); +void nvUnregisterVBlankCallback(NVDispEvoPtr pDispEvo, + NvU32 head, + NVVBlankCallbackPtr pCallback); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_MODESET_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc-types.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc-types.h new file mode 100644 index 0000000..947a2a5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc-types.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_PREALLOC_TYPES_H__ +#define __NVKMS_PREALLOC_TYPES_H__ + +#include "nvtypes.h" + +enum NVPreallocType { + PREALLOC_TYPE_IMP_PARAMS, + PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE, + PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE, + PREALLOC_TYPE_MODE_SET_WORK_AREA, + PREALLOC_TYPE_FLIP_WORK_AREA, + PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE, + PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE, + PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS, + PREALLOC_TYPE_MAX +}; + +struct NVDevPreallocRec { + void *ptr[PREALLOC_TYPE_MAX]; + NvU8 used[(PREALLOC_TYPE_MAX + 7) / 8]; +}; + +#endif /* __NVKMS_PREALLOC_TYPES_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc.h new file mode 100644 index 0000000..2616dce --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_PREALLOC_H__ +#define __NVKMS_PREALLOC_H__ + +#include "nvkms-types.h" +#include "nvkms-prealloc-types.h" + +void *nvPreallocGet(NVDevEvoPtr pDevEvo, enum NVPreallocType type, size_t sizeCheck); +void nvPreallocRelease(NVDevEvoPtr pDevEvo, enum NVPreallocType type); + +NvBool nvPreallocAlloc(NVDevEvoPtr pDevEvo); +void nvPreallocFree(NVDevEvoPtr pDevEvo); + +#endif /* __NVKMS_PREALLOC_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-private.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-private.h new file mode 100644 index 0000000..cfb5eb9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-private.h @@ -0,0 +1,81 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KMS_PRIVATE_H__ +#define __NV_KMS_PRIVATE_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct NvKmsPerOpenDev *nvAllocPerOpenDev(struct NvKmsPerOpen *pOpen, + NVDevEvoPtr pDevEvo, NvBool isPrivileged); + +void nvFreePerOpenDev(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev); + +void nvSendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, const NvU32 eventType); + +void nvSendDpyAttributeChangedEventEvo(const NVDpyEvoRec *pDpyEvo, + const enum NvKmsDpyAttribute attribute, + const NvS64 value); + +void nvSendFrameLockAttributeChangedEventEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + const enum NvKmsFrameLockAttribute attribute, + const NvS64 value); + +void nvSendFlipOccurredEventEvo( + const NVDevEvoRec *pDevEvo, + NVEvoChannelMask channelMask); + +void nvSendUnicastEvent(struct NvKmsPerOpen *pOpen); + +void nvRemoveUnicastEvent(struct NvKmsPerOpen *pOpen); + +#if defined(DEBUG) +NvBool nvSurfaceEvoInAnyOpens(const NVSurfaceEvoRec *pSurfaceEvo); +#endif + +const struct NvKmsFlipPermissions *nvGetFlipPermissionsFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev); + +const struct NvKmsModesetPermissions *nvGetModesetPermissionsFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev); + +NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDev( + struct NvKmsPerOpenDev *pOpenDev); +const NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDevConst( + const struct NvKmsPerOpenDev *pOpenDev); +NVDevEvoPtr nvGetDevEvoFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev); + +void nvKmsServiceNonStallInterrupt(void *dataPtr, NvU32 dataU32); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NV_KMS_PRIVATE_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rm.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rm.h new file mode 100644 index 0000000..a0e2e90 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rm.h @@ -0,0 +1,152 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_RM_H__ +#define __NVKMS_RM_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvkms-types.h" +#include /* NV0092_REGISTER_RG_LINE_CALLBACK_FN */ +#include /* OSVBLANKCALLBACKPROC */ + +NvBool nvWriteDPCDReg(NVConnectorEvoPtr pConnectorEvo, + NvU32 dpcdAddr, + NvU8 dpcdData); + +NvBool nvRmRegisterCallback(const NVDevEvoRec *pDevEvo, + NVOS10_EVENT_KERNEL_CALLBACK_EX *cb, + struct nvkms_ref_ptr *ref_ptr, + NvU32 parentHandle, + NvU32 eventHandle, + Callback5ArgVoidReturn func, + NvU32 event); + +enum NvKmsAllocDeviceStatus nvRmAllocDisplays(NVDevEvoPtr pDevEvo); +void nvRmDestroyDisplays(NVDevEvoPtr pDevEvo); +enum NvKmsBeginEndModeset { + BEGIN_MODESET, + END_MODESET +}; +void nvRmBeginEndModeset(NVDispEvoPtr pDispEvo, enum NvKmsBeginEndModeset, NvU32 mask); +NvU32 nvRmAllocDisplayId(const NVDispEvoRec *pDispEvo, const NVDpyIdList dpyList); +void nvRmFreeDisplayId(const NVDispEvoRec *pDispEvo, NvU32 dpyId); +void nvRmGetConnectorORInfo(NVConnectorEvoPtr pConnectorEvo, NvBool assertOnly); +NVDpyIdList nvRmGetConnectedDpys(const NVDispEvoRec *pDispEvo, + NVDpyIdList dpyIdList); +NvBool nvRmResumeDP(NVDevEvoPtr pDevEvo); +void nvRmPauseDP(NVDevEvoPtr pDevEvo); +NvBool nvRmSetDpmsEvo(NVDpyEvoPtr pDpyEvo, NvS64 value); +NvBool nvRmAllocSysmem(NVDevEvoPtr pDevEvo, NvU32 memoryHandle, + NvU32 *ctxDmaFlags, void **ppBase, NvU64 size, + NvKmsMemoryIsoType isoType); +NvBool nvRMAllocateBaseChannels(NVDevEvoPtr pDevEvo); +NvBool nvRMAllocateOverlayChannels(NVDevEvoPtr pDevEvo); +NvBool nvRMAllocateWindowChannels(NVDevEvoPtr pDevEvo); +NvBool nvRMSetupEvoCoreChannel(NVDevEvoPtr pDevEvo); +void nvRMFreeBaseChannels(NVDevEvoPtr pDevEvo); +void nvRMFreeOverlayChannels(NVDevEvoPtr pDevEvo); +void nvRMFreeWindowChannels(NVDevEvoPtr pDevEvo); +void nvRMFreeEvoCoreChannel(NVDevEvoPtr pDevEvo); +NvBool nvRMSyncEvoChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 errorToken); +NvBool nvRMIdleBaseChannel(NVDevEvoPtr pDevEvo, NvU32 head, NvU32 sd, + NvBool *stoppedBase); +NvBool nvRmEvoClassListCheck(const NVDevEvoRec *pDevEvo, NvU32 classID); +NvU32 nvRmEvoBindDispContextDMA( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 hCtxDma); +NvU32 nvRmEvoAllocateAndBindDispContextDMA( + NVDevEvoPtr pDevEvo, + NvU32 hMemory, + const enum NvKmsSurfaceMemoryLayout layout, + NvU64 limit); +NvBool nvRmEvoAllocAndBindSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel, + NvU32 id, + NvU32 *pSyncptHandle, + NvU32 *pSyncptCtxDmaHandle); +void nvRmEvoFreePreSyncpt(NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel); +NvBool nvRmGarbageCollectSyncpts( + NVDevEvoRec *pDevEvo); +void nvRmEvoFreeSyncpt(NVDevEvoRec *pDevEvo, + NVEvoSyncpt *pEvoSyncpt); +void nvRmEvoFreeDispContextDMA(NVDevEvoPtr pDevEvo, + NvU32 *hDispCtxDma); +void nvRmEvoUnMapVideoMemory(NVDevEvoPtr pDevEvo, + NvU32 memoryHandle, + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES]); +NvBool nvRmEvoMapVideoMemory(NVDevEvoPtr pDevEvo, + NvU32 memoryHandle, NvU64 size, + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES], + NvU32 subDeviceMask); +NvBool nvRmAllocDeviceEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsAllocDeviceRequest *pRequest); +void nvRmFreeDeviceEvo(NVDevEvoPtr pDevEvo); +NvBool nvRmIsPossibleToActivateDpyIdList(NVDispEvoPtr pDispEvo, + const NVDpyIdList dpyIdList); +NvBool nvRmVTSwitch(NVDevEvoPtr pDevEvo, NvU32 cmd); +NvBool nvRmGetVTFBInfo(NVDevEvoPtr pDevEvo); +void nvRmImportFbConsoleMemory(NVDevEvoPtr pDevEvo); +NvBool nvRmAllocEvoDma(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NvU64 limit, + NvU32 ctxDmaFlags, + NvU32 subDeviceMask); +void nvRmFreeEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma); +NvBool nvRmQueryDpAuxLog(NVDispEvoRec *pDispEvo, NvS64 *pValue); +NvU64 nvRmGetGpuTime(NVDevEvoPtr pDevEvo); +NvBool nvRmSetGc6Allowed(NVDevEvoPtr pDevEvo, NvBool allowed); +NvU32 nvRmAddRgLine1Callback( + const NVDispEvoRec *pDispEvo, + NvU32 head, + NV0092_REGISTER_RG_LINE_CALLBACK_FN pCallback); +void nvRmRemoveRgLine1Callback(const NVDispEvoRec *pDispEvo, + NvU32 callbackObjectHandle); +NvU32 nvRmAddVBlankCallback( + const NVDispEvoRec *pDispEvo, + NvU32 head, + OSVBLANKCALLBACKPROC pCallback); +void nvRmRemoveVBlankCallback(const NVDispEvoRec *pDispEvo, + NvU32 callbackObjectHandle); +void nvRmMuxInit(NVDevEvoPtr pDevEvo); +NvBool nvRmMuxPre(const NVDpyEvoRec *pDpyEvo, NvMuxState state); +NvBool nvRmMuxSwitch(const NVDpyEvoRec *pDpyEvo, NvMuxState state); +NvBool nvRmMuxPost(const NVDpyEvoRec *pDpyEvo, NvMuxState state); +NvMuxState nvRmMuxState(const NVDpyEvoRec *pDpyEvo); + +void nvRmRegisterBacklight(NVDispEvoRec *pDispEvo); +void nvRmUnregisterBacklight(NVDispEvoRec *pDispEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_RM_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rmapi.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rmapi.h new file mode 100644 index 0000000..a4f5cf6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rmapi.h @@ -0,0 +1,111 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_RMAPI_H__ + +#define __NVKMS_RMAPI_H__ + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NvU32 nvRmApiAlloc( + NvU32 hClient, + NvU32 hParent, + NvU32 hObject, + NvU32 hClass, + void *pAllocParams); + +NvU32 nvRmApiAllocMemory64( + NvU32 hClient, + NvU32 hParent, + NvU32 hMemory, + NvU32 hClass, + NvU32 flags, + void **ppAddress, + NvU64 *pLimit); + +NvU32 nvRmApiControl( + NvU32 hClient, + NvU32 hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize); + +NvU32 nvRmApiDupObject( + NvU32 hClient, + NvU32 hParent, + NvU32 hObjectDest, + NvU32 hClientSrc, + NvU32 hObjectSrc, + NvU32 flags); + +NvU32 nvRmApiFree( + NvU32 hClient, + NvU32 hParent, + NvU32 hObject); + +NvU32 nvRmApiVidHeapControl( + void *pVidHeapControlParams); + +NvU32 nvRmApiMapMemory( + NvU32 hClient, + NvU32 hDevice, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + void **ppLinearAddress, + NvU32 flags); + +NvU32 nvRmApiUnmapMemory( + NvU32 hClient, + NvU32 hDevice, + NvU32 hMemory, + const void *pLinearAddress, + NvU32 flags); + +NvU32 nvRmApiMapMemoryDma( + NvU32 hClient, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset); + +NvU32 nvRmApiUnmapMemoryDma( + NvU32 hClient, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU32 flags, + NvU64 dmaOffset); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_RMAPI_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-softfloat.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-softfloat.h new file mode 100644 index 0000000..43f9fa5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-softfloat.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_SOFTFLOAT_H__ +#define __NVKMS_SOFTFLOAT_H__ + +/* + * This header file provides utility code built on top of the softfloat floating + * point emulation library. + */ + +#include "nv-softfloat.h" +#include "nvkms-api-types.h" + +/* + * A 3x3 row-major matrix of float32_t's. + */ +struct NvKmsMatrixF32 { + float32_t m[3][3]; +}; + +/* + * A 3x4 row-major matrix of float32_t's. + */ +struct NvKms3x4MatrixF32 { + float32_t m[3][4]; +}; + +/* + * Convert from an NvKmsMatrix (stores floating point values in NvU32s) to an + * NvKmsMatrixF32 (stores floating point values in float32_t). + */ +static inline struct NvKmsMatrixF32 NvKmsMatrixToNvKmsMatrixF32( + const struct NvKmsMatrix in) +{ + struct NvKmsMatrixF32 out = { }; + int i, j; + + for (j = 0; j < 3; j++) { + for (i = 0; i < 3; i++) { + out.m[i][j] = NvU32viewAsF32(in.m[i][j]); + } + } + + return out; +} + +/* + * Compute the matrix product A * B, where A is a 3x3 matrix and B is a 3x4 matrix, + * and return the resulting 3x4 matrix. + */ +static inline struct NvKms3x4MatrixF32 nvMultiply3x4Matrix(const struct NvKmsMatrixF32 *A, + const struct NvKms3x4MatrixF32 *B) +{ + struct NvKms3x4MatrixF32 C = { }; + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 4; ++j) { + for (int k = 0; k < 3; ++k) { + C.m[i][j] = f32_mulAdd(A->m[i][k], B->m[k][j], C.m[i][j]); + } + } + } + + return C; +} + +/* return x**y */ +float64_t nvKmsPow(float64_t x, float64_t y); + +#endif /* __NVKMS_SOFTFLOAT_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-surface.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-surface.h new file mode 100644 index 0000000..89dd4e5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-surface.h @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_SURFACE_H__ +#define __NVKMS_SURFACE_H__ + +#include "nvkms-types.h" + +void nvEvoRegisterSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsRegisterSurfaceParams *pParams, + enum NvHsMapPermissions hsMapPermissions); + +void nvEvoUnregisterSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle, + NvBool skipUpdate); +void nvEvoReleaseSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle); + +void nvEvoFreeClientSurfaces(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NVEvoApiHandlesRec *pOpenDevSurfaceHandles); + +void nvEvoIncrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo); +void nvEvoDecrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo); + +void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo); +void nvEvoDecrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo); + +NvBool nvEvoSurfaceRefCntsTooLarge(const NVSurfaceEvoRec *pSurfaceEvo); + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandle( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle surfaceHandle, + const NVEvoChannelMask channelMask); + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandleNoCtxDmaOk( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvKmsSurfaceHandle surfaceHandle); + +NVDeferredRequestFifoRec *nvEvoRegisterDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo); + +void nvEvoUnregisterDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVDeferredRequestFifoRec *pDeferredRequestFifo); + +static inline NvBool nvEvoIsSurfaceOwner(const NVSurfaceEvoRec *pSurfaceEvo, + const struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle) +{ + return ((pSurfaceEvo->owner.pOpenDev == pOpenDev) && + (pSurfaceEvo->owner.surfaceHandle == surfaceHandle)); +} + +#define ASSERT_EYES_MATCH(_arr, _field) \ + nvAssert((_arr)[NVKMS_RIGHT] == NULL || \ + (_arr)[NVKMS_LEFT]->_field == (_arr)[NVKMS_RIGHT]->_field); + +ct_assert((NVKMS_RIGHT - NVKMS_LEFT) == 1); + +#define FOR_ALL_EYES(_eye) \ + for ((_eye) = NVKMS_LEFT; (_eye) <= NVKMS_RIGHT; (_eye)++) + +#define FOR_ALL_VALID_PLANES(_planeIndex, _pSurface) \ + for ((_planeIndex) = 0; \ + (_planeIndex) < \ + (nvKmsGetSurfaceMemoryFormatInfo((_pSurface)->format))->numPlanes; \ + (_planeIndex)++) + +#endif /* __NVKMS_SURFACE_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-types.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-types.h new file mode 100644 index 0000000..700dd31 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-types.h @@ -0,0 +1,2737 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_TYPES_H__ +#define __NVKMS_TYPES_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvidia-modeset-os-interface.h" + +#include "nvctassert.h" +#include "nv_list.h" + +#include /* NV0073_CTRL_SPECIFIC_OR_PROTOCOL_* */ +#include /* NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE */ +#include /* NV0000_CTRL_GPU_MAX_ATTACHED_GPUS */ +#include /* NV0080_CTRL_OS_UNIX_VT_SWITCH_FB_INFO */ +#include /* NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_* */ +#include /* NV30F1_CTRL_MAX_GPUS_PER_GSYNC */ +#include /* NV5070_CTRL_SYSTEM_CAPS_TBL_SIZE */ +#include + +#include "nvkms-api.h" +#include "nvkms-prealloc-types.h" + +#include "nvos.h" + +#include "nv_common_utils.h" +#include "nv_assert.h" +#include "unix_rm_handle.h" + +#include "nvmisc.h" + +#include "timing/nvtiming.h" +#include "timing/nvt_dsc_pps.h" +#include "hdmipacket/nvhdmi_frlInterface.h" // HDMI_{SRC,SINK}_CAPS + +#include + +#if defined(DEBUG) || defined(DEVELOP) +#define NVKMS_PROCFS_ENABLE 1 +#else +#define NVKMS_PROCFS_ENABLE 0 +#endif + +#define NV_DMA_EVO_PUSH_BUFFER_SIZE (4 * 1024) +#define NV_DMA_EVO_PUSH_BUFFER_PAD_SIZE (4 * 12) +#define NV_DMA_EVO_NOTIFIER_SIZE 4096 + +#define NV_NUM_EVO_LUT_ENTRIES 1025 +/* + * Size of the nvdisplay 3 LUT variable segment size header, in LUT entries + * (which are 8 bytes each). + */ +#define NV_LUT_VSS_HEADER_SIZE 4 + +#define NV_EVO_SUBDEV_STACK_SIZE 10 + +#define NV_DP_READ_EDID_RETRIES 18 +#define NV_DP_REREAD_EDID_DELAY_USEC 500 /* in microseconds */ + +#define NV_EVO_SURFACE_ALIGNMENT 0x1000 + +/* + * Prior to nvdisplay 4.0, the final address for all scanout surfaces must be + * 256B-aligned. + * + * For nvdisplay 4.0, the final address for all scanout surfaces must be + * 512B-aligned for GPU, and 1KB-aligned for Tegra. + * + * NVKMS already uses NV_EVO_SURFACE_ALIGNMENT to force 4KB-alignment for the + * base address of each scanout surface. As such, we're forcing 1KB-alignment + * for the corresponding ctxdma offsets in order to be compatible with all + * display architectures. + */ +#define NV_SURFACE_OFFSET_ALIGNMENT_SHIFT 10 + +#define NVKMS_BLOCK_LINEAR_LOG_GOB_WIDTH 6U /* 64 bytes (2^6) */ +#define NVKMS_BLOCK_LINEAR_GOB_WIDTH ((NvU32)1 << NVKMS_BLOCK_LINEAR_LOG_GOB_WIDTH) + +#define NV_INVALID_OR 0xFFFFFFFF + +#define NVKMS_RM_HEAP_ID 0xDCBA + +#define NVKMS_MAX_WINDOWS_PER_DISP 32 + +#define NV_SYNCPT_GLOBAL_TABLE_LENGTH 1024 + +#define HEAD_MASK_QUERY(_mask, _head) (!!((_mask) & (1 << (_head)))) +#define HEAD_MASK_SET(_mask, _head) ((_mask) | (1 << (_head))) +#define HEAD_MASK_UNSET(_mask, _head) ((_mask) & ~(1 << (_head))) + +#define NVKMS_COMPOSITION_FOR_MATCH_BITS(__colorKeySelect, __match) \ + for ((__match) = (((__colorKeySelect) == \ + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) ? 1 : 0); \ + (__match) <= 1; (__match)++) + +typedef struct _NVEvoApiHandlesRec *NVEvoApiHandlesPtr; +typedef struct _NVEvoSubDeviceRec *NVSubDeviceEvoPtr; +typedef struct _NVEvoDevRec *NVDevEvoPtr; +typedef struct _NVDmaBufferEvoRec *NVDmaBufferEvoPtr; +typedef struct _NVEvoChannel *NVEvoChannelPtr; +typedef struct _NVEvoHeadControl *NVEvoHeadControlPtr; +typedef struct _NVEvoCapabilities *NVEvoCapabilitiesPtr; +typedef struct _NVEvoSubDevHeadStateRec *NVEvoSubDevHeadStatePtr; +typedef struct _NVEvoSubDevRec *NVEvoSubDevPtr; +typedef struct _NVEvoColorRec *NVEvoColorPtr; +typedef struct _NVHwModeViewPortEvo *NVHwModeViewPortEvoPtr; +typedef struct _NVHwModeTimingsEvo *NVHwModeTimingsEvoPtr; +typedef struct _NVConnectorEvoRec *NVConnectorEvoPtr; +typedef struct _NVVblankSyncObjectRec *NVVblankSyncObjectPtr; +typedef struct _NVDispHeadStateEvoRec *NVDispHeadStateEvoPtr; +typedef struct _NVDispEvoRec *NVDispEvoPtr; +typedef struct _NVParsedEdidEvoRec *NVParsedEdidEvoPtr; +typedef struct _NVVBlankCallbackRec *NVVBlankCallbackPtr; +typedef struct _NVDpyEvoRec *NVDpyEvoPtr; +typedef struct _NVLutSurfaceEvo *NVLutSurfaceEvoPtr; +typedef struct _NVFrameLockEvo *NVFrameLockEvoPtr; +typedef struct _NVEvoInfoString *NVEvoInfoStringPtr; +typedef struct _NVSurfaceEvoRec NVSurfaceEvoRec, *NVSurfaceEvoPtr; +typedef struct _NVDeferredRequestFifoRec *NVDeferredRequestFifoPtr; +typedef struct _NVSwapGroupRec *NVSwapGroupPtr; + +/* + * _NVHs*EvoRec are defined in nvkms-headsurface-priv.h; they are intentionally + * opaque outside of the nvkms-headsurface code. + */ +typedef struct _NVHsDeviceEvoRec *NVHsDeviceEvoPtr; +typedef struct _NVHsChannelEvoRec *NVHsChannelEvoPtr; +typedef struct _NVHsSurfaceRec *NVHsSurfacePtr; + +/* _nv_dplibXXX are defined in dp/nvdp-connector-event-sink.h */ +typedef struct _nv_dplibconnector NVDPLibConnectorRec, *NVDPLibConnectorPtr; +typedef struct _nv_dplibdevice NVDPLibDeviceRec, *NVDPLibDevicePtr; +typedef struct __nv_dplibmodesetstate NVDPLibModesetStateRec, *NVDPLibModesetStatePtr; + +/* _nv_dplibtimer is defined in nvdp-timer.hpp */ +typedef struct _nv_dplibtimer NVDPLibTimerRec, *NVDPLibTimerPtr; + +/* _NVEvoModesetUpdateState defined in nvkms-modeset-types.h */ +typedef struct _NVEvoModesetUpdateState NVEvoModesetUpdateState; + +typedef struct _NVEvoApiHandlesRec { + void **pointers; /* Dynamically allocated array of pointers. */ + NvU32 numPointers; /* Number of elements in pointers array. */ + NvU32 defaultSize; +} NVEvoApiHandlesRec; + +typedef struct _NVEvoDma +{ + NvU32 memoryHandle; + NvU32 ctxHandle; + + NvU64 limit; + + /* Whether this is sysmem, or vidmem accessed through a BAR1 mapping. */ + NvBool isBar1Mapping; + + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES]; +} NVEvoDma, *NVEvoDmaPtr; + +typedef struct _NVDmaBufferEvoRec +{ + NVEvoDma dma; + + NvU32 channel_handle; // handles + NvU32 num_channels; + void *control[NVKMS_MAX_SUBDEVICES]; + NvU32 *base; // Push buffer start pointer + NvU32 *buffer;// Push buffer current pointer + NvU32 *end; // Push buffer end pointer + NvU32 offset_max; // Push buffer max offset (in bytes) + NvU32 put_offset; // Push buffer last kicked off offset + NvU32 fifo_free_count; // fifo free space (in words) + NvU32 currentSubDevMask; + NVDevEvoPtr pDevEvo; +} NVDmaBufferEvoRec; + +/* EVO capabilities */ +typedef struct { + NvBool flipLock; + NvBool stereo; + NvBool scanLock; +} NVEvoLockPinCaps; +#define NV_EVO_NUM_LOCK_PIN_CAPS 16 + +typedef struct { + NvBool supportsInterlaced; + NvBool supportsSemiPlanar; + NvBool supportsPlanar; + NvBool supportsHVFlip; + NvBool supportsDSI; +} NVEvoMiscCaps; + +static inline NvU8 NVEvoScalerTapsToNum(NVEvoScalerTaps taps) +{ + NvU8 numTaps = 1; + + switch (taps) { + case NV_EVO_SCALER_8TAPS: + numTaps = 8; + break; + case NV_EVO_SCALER_5TAPS: + numTaps = 5; + break; + case NV_EVO_SCALER_3TAPS: + numTaps = 3; + break; + case NV_EVO_SCALER_2TAPS: + numTaps = 2; + break; + case NV_EVO_SCALER_1TAP: + numTaps = 1; + break; + } + + return numTaps; +} + +#define NV_EVO_SCALE_FACTOR_1X (1 << 10) +#define NV_EVO_SCALE_FACTOR_2X (2 << 10) +#define NV_EVO_SCALE_FACTOR_3X (3 << 10) +#define NV_EVO_SCALE_FACTOR_4X (4 << 10) + +typedef struct { + NvU32 maxPixelsVTaps; + NvU16 maxVDownscaleFactor; /* Scaled by 1024 */ + NvU16 maxHDownscaleFactor; /* Scaled by 1024 */ +} NVEvoScalerTapsCaps; + +typedef struct { + NvBool present; + NVEvoScalerTapsCaps taps[NV_EVO_SCALER_TAPS_MAX + 1]; +} NVEvoScalerCaps; + +typedef struct { + NvBool usable; + NvBool supportsHDMIYUV420HW; + NVEvoScalerCaps scalerCaps; +} NVEvoHeadCaps; +#define NV_EVO_NUM_HEAD_CAPS 8 + +typedef struct { + NvBool dualTMDS; + NvU32 maxTMDSClkKHz; +} NVEvoSorCaps; +#define NV_EVO_NUM_SOR_CAPS 8 + +typedef struct { +} NVEvoPiorCaps; +#define NV_EVO_NUM_PIOR_CAPS 4 + +typedef struct { + NvBool usable; + NvBool csc0MatricesPresent; + NvBool cscLUTsPresent; + NvBool csc1MatricesPresent; + NVEvoScalerCaps scalerCaps; +} NVEvoWindowCaps; +#define NV_EVO_NUM_WINDOW_CAPS 32 + +typedef NvU64 NVEvoChannelMask; + +#define NV_EVO_CHANNEL_MASK_CORE 0:0 +#define NV_EVO_CHANNEL_MASK_CORE_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_CORE_DISABLE 0 +#define NV_EVO_CHANNEL_MASK_WINDOW_FIELD 32:1 +#define NV_EVO_CHANNEL_MASK_WINDOW(_n) (1+(_n)):(1+(_n)) +#define NV_EVO_CHANNEL_MASK_WINDOW__SIZE 32 +#define NV_EVO_CHANNEL_MASK_WINDOW_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_WINDOW_DISABLE 0 +#define NV_EVO_CHANNEL_MASK_CURSOR_FIELD 40:33 +#define NV_EVO_CHANNEL_MASK_CURSOR(_n) (33+(_n)):(33+(_n)) +#define NV_EVO_CHANNEL_MASK_CURSOR__SIZE 8 +#define NV_EVO_CHANNEL_MASK_CURSOR_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_CURSOR_DISABLE 0 +#define NV_EVO_CHANNEL_MASK_BASE_FIELD 44:41 +#define NV_EVO_CHANNEL_MASK_BASE(_n) (41+(_n)):(41+(_n)) +#define NV_EVO_CHANNEL_MASK_BASE__SIZE 4 +#define NV_EVO_CHANNEL_MASK_BASE_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_BASE_DISABLE 0 +#define NV_EVO_CHANNEL_MASK_OVERLAY_FIELD 48:45 +#define NV_EVO_CHANNEL_MASK_OVERLAY(_n) (45+(_n)):(45+(_n)) +#define NV_EVO_CHANNEL_MASK_OVERLAY__SIZE 4 +#define NV_EVO_CHANNEL_MASK_OVERLAY_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_OVERLAY_DISABLE 0 +/* Window Immediate channels get only one bit. */ +#define NV_EVO_CHANNEL_MASK_WINDOW_IMM 49:49 +#define NV_EVO_CHANNEL_MASK_WINDOW_IMM_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_WINDOW_IMM_DISABLE 0 + +#define NV_EVO_CHANNEL_MASK_WINDOW_ALL \ + DRF_SHIFTMASK64(NV_EVO_CHANNEL_MASK_WINDOW_FIELD) +#define NV_EVO_CHANNEL_MASK_CURSOR_ALL \ + DRF_SHIFTMASK64(NV_EVO_CHANNEL_MASK_CURSOR_FIELD) +#define NV_EVO_CHANNEL_MASK_BASE_ALL \ + DRF_SHIFTMASK64(NV_EVO_CHANNEL_MASK_BASE_FIELD) +#define NV_EVO_CHANNEL_MASK_OVERLAY_ALL \ + DRF_SHIFTMASK64(NV_EVO_CHANNEL_MASK_OVERLAY_FIELD) + +static inline NvU32 NV_EVO_CHANNEL_MASK_POPCOUNT(NvU64 mask) +{ + // It's tempting to use __builtin_popcountll here, but that depends on + // intrinsics not available to nvkms in the kernel. + return nvPopCount64(mask); +} + +static inline NvU32 NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(NvU64 mask) +{ + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(mask) == 1); + return BIT_IDX_64(DRF_VAL64(_EVO, _CHANNEL_MASK, _BASE_FIELD, mask)); +} +static inline NvU32 NV_EVO_CHANNEL_MASK_OVERLAY_HEAD_NUMBER(NvU64 mask) +{ + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(mask) == 1); + return BIT_IDX_64(DRF_VAL64(_EVO, _CHANNEL_MASK, _OVERLAY_FIELD, mask)); +} +static inline NvU32 NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(NvU64 mask) +{ + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(mask) == 1); + return BIT_IDX_64(DRF_VAL64(_EVO, _CHANNEL_MASK, _WINDOW_FIELD, mask)); +} + +/* EVO structures */ + +typedef struct { + struct { + NVEvoChannelMask channelMask; + NVEvoChannelMask noCoreInterlockMask; + /* Each channel in this mask was programmed with a "flip lock + * qualifying" flip. */ + NVEvoChannelMask flipLockQualifyingMask; + /* Channels set here are transitioning from NULL ctxdma to non-NULL + * ctxdma or vice-versa on this update. Only necessary/valid on Turing + * (class C5*). */ + NVEvoChannelMask flipTransitionWAR; + + struct { + NvBool vrrTearing; + } base[NVKMS_MAX_HEADS_PER_DISP]; + + /* + * Window immediate channels with pending methods are represented + * here by NV_EVO_CHANNEL_MASK_WINDOW(n) for window immediate + * channel n. + */ + NVEvoChannelMask winImmChannelMask; + + /* + * Each window channel NV_EVO_CHANNEL_MASK_WINDOW(n) needs to + * be interlocked with its corresponding window immediate channel n. + */ + NVEvoChannelMask winImmInterlockMask; + + } subdev[NVKMS_MAX_SUBDEVICES]; + +} NVEvoUpdateState; + +typedef struct { + struct { + NVEvoChannelMask channelMask; + } subdev[NVKMS_MAX_SUBDEVICES]; +} NVEvoIdleChannelState; + +typedef struct { + NvU8 validTimeStampBits; + NvU8 legacyNotifierFormatSizeBytes; + NvBool tearingFlips :1; + NvBool vrrTearingFlips :1; + NvBool perEyeStereoFlips :1; +} NVEvoChannelCaps; + +enum NVEvoImmChannel { + NV_EVO_IMM_CHANNEL_NONE, + NV_EVO_IMM_CHANNEL_PIO, + NV_EVO_IMM_CHANNEL_DMA, +}; + +typedef struct { + NvU32 handle; + void *control[NVKMS_MAX_SUBDEVICES]; +} NVEvoPioChannel; + +/*! basic syncpt structure used for pre and post syncpt usage */ +typedef struct _NVEvoSyncpt { + /*! syncpt id (only useful for post-syncpt) */ + NvU32 id; + /*! bitmask of channels using this syncpt */ + NVEvoChannelMask channelMask; + /*! handle of context dma allocated to this syncpt */ + NvU32 hCtxDma; + /*! handle of syncpt object */ + NvU32 hSyncpt; + /*! stores syncpt max value */ + NvU32 syncptMaxVal; +} NVEvoSyncpt; + +/* Tracks internal state of a vblank sync object. */ +typedef struct _NVVblankSyncObjectRec { + /* Whether the vblank sync object is currently in use by some client. */ + NvBool inUse; + + /* Whether the vblank sync object is enabled or disabled. */ + NvBool enabled; + + /* + * The index of this Rec inside of the HeadState's vblankSyncObjects array. + * Also corresponds with the index of the sync object in hardware. + */ + NvU32 index; + + /* + * This syncpoint object should be created as part of + * nvRmSetupEvoCoreChannel(). + */ + NVEvoSyncpt evoSyncpt; +} NVVblankSyncObjectRec; + +/* EVO channel, encompassing multiple subdevices and a single pushbuf */ +typedef struct _NVEvoChannel { + /* Pointer to array of per subdev notifier dma structs */ + NVEvoDmaPtr notifiersDma; + + NvU32 hwclass; + NvU32 instance; + NVEvoChannelMask channelMask; /* only one bit should be set */ + + NVDmaBufferEvoRec pb; + + NVOS10_EVENT_KERNEL_CALLBACK_EX completionNotifierEventCallback; + NvU32 completionNotifierEventHandle; + struct nvkms_ref_ptr *ref_ptr; + + /* + * GV100 timestamped flips need a duplicate update which only changes + * TIMESTAMP_MODE and MIN_PRESENT_INTERVAL fields in SET_PRESENT_CONTROL; + * to allow updating these fields without changing anything else in + * SET_PRESENT_CONTROL, normal updates to SET_PRESENT_CONTROL are cached + * here. (bug 1990958) + */ + NvU32 oldPresentControl; + + // On Turing, RM wants to be notified when the tearing mode changes. + NvBool oldTearingMode; + + struct { + enum NVEvoImmChannel type; + union { + NVEvoPioChannel *pio; + struct _NVEvoChannel *dma; + } u; + } imm; + + NVEvoChannelCaps caps; + + NVEvoSyncpt postSyncpt; +} NVEvoChannel; + +typedef enum { + NV_EVO_NO_LOCK, + NV_EVO_FRAME_LOCK, + NV_EVO_RASTER_LOCK, +} NVEvoLockMode; + +typedef enum { + NV_EVO_LOCK_PIN_ERROR = -1, + NV_EVO_LOCK_PIN_INTERNAL_0 = 0, + NV_EVO_LOCK_PIN_0 = 0x20, +} NVEvoLockPin; + +typedef struct _NVEvoHeadControl { + NvBool interlaced; + NVEvoLockMode clientLock; + NVEvoLockPin clientLockPin; + int clientLockoutWindow; + NVEvoLockMode serverLock; + NVEvoLockPin serverLockPin; + NvBool flipLock; + NVEvoLockPin flipLockPin; + NVEvoLockPin stereoPin; + + /* + * Whether or not this GPU is stereo locked. True if all heads are either + * frame or raster locked, and all heads are driving non-interlaced modes. + */ + NvBool stereoLocked; + + /* + * Whether or not this head is driving a HDMI 3D frame packed mode. Used + * in headcontrol only on >=GV100. + */ + NvBool hdmi3D; + + /* + * Whether or not this head is driving a mode requiring the HW YUV420 + * packer. Used in headcontrol only on >=nvdisplay 4.0. + */ + NvBool hwYuv420; + + /* This isn't actually part of HeadControl, but it's convenient */ + NvU32 lockChainPosition; +} NVEvoHeadControl; + +typedef struct _NVEvoCapabilities { + NVEvoLockPinCaps pin[NV_EVO_NUM_LOCK_PIN_CAPS]; + NVEvoMiscCaps misc; + NVEvoHeadCaps head[NV_EVO_NUM_HEAD_CAPS]; + NVEvoSorCaps sor[NV_EVO_NUM_SOR_CAPS]; + NVEvoPiorCaps pior[NV_EVO_NUM_PIOR_CAPS]; + NVEvoWindowCaps window[NV_EVO_NUM_WINDOW_CAPS]; +} NVEvoCapabilities; + +typedef struct { + NVSurfaceEvoPtr pSurfaceEvo; + enum NvKmsNIsoFormat format; + NvU16 offsetInWords; +} NVFlipNIsoSurfaceEvoHwState; + +typedef struct { + NVFlipNIsoSurfaceEvoHwState surface; + NvBool awaken; +} NVFlipCompletionNotifierEvoHwState; + +typedef struct { + NvBool usingSyncpt; + union { + struct { + NVFlipNIsoSurfaceEvoHwState acquireSurface; + NvU32 acquireValue; + NVFlipNIsoSurfaceEvoHwState releaseSurface; + NvU32 releaseValue; + } semaphores; + struct { + NvU32 preCtxDma; + NvU32 preValue; + NvU32 postCtxDma; + NvU32 postValue; + } syncpts; + } u; +} NVFlipSyncObjectEvoHwState; + +typedef struct { + NVLutSurfaceEvoPtr pLutSurfaceEvo; +} NVFlipLutHwState; + +typedef struct { + NVSurfaceEvoPtr pSurfaceEvo; + NvS16 x, y; + + struct NvKmsCompositionParams cursorCompParams; +} NVFlipCursorEvoHwState; + +typedef struct { + NVSurfaceEvoPtr pSurfaceEvo[NVKMS_MAX_EYES]; + NVFlipCompletionNotifierEvoHwState completionNotifier; + NVFlipSyncObjectEvoHwState syncObject; + + // Non-zero timeStamp value is only allowed if the channel's + // 'timeStampFlipBits' capability is > 0. + NvU64 timeStamp; + NvU8 minPresentInterval; + // True means immediate or tearing flip. False means flip-at-vblank. + NvBool tearing; + // The tearing mode passed to RM's VRR code via + // NV_VRR_TRAP_ARGUMENT_MAX_FPS_TEARING. + NvBool vrrTearing; + NvBool perEyeStereoFlip; + + struct NvKmsSize sizeIn; + struct NvKmsSize sizeOut; + struct NvKmsSignedPoint outputPosition; + + NVEvoScalerTaps hTaps; + NVEvoScalerTaps vTaps; + + struct NvKmsCscMatrix cscMatrix; + + NVFlipLutHwState inputLut; + + struct NvKmsRRParams rrParams; + + struct NvKmsCompositionParams composition; +} NVFlipChannelEvoHwState; + +typedef struct { + struct NvKmsPoint viewPortPointIn; + NVFlipCursorEvoHwState cursor; + NVFlipChannelEvoHwState layer[NVKMS_MAX_LAYERS_PER_HEAD]; + struct NvKmsUsageBounds usage; + NvBool disableMidFrameAndDWCFWatermark; + struct { + NvBool viewPortPointIn : 1; + NvBool cursorSurface : 1; + NvBool cursorPosition : 1; + + NvBool layerPosition[NVKMS_MAX_LAYERS_PER_HEAD]; + NvBool layer[NVKMS_MAX_LAYERS_PER_HEAD]; + } dirty; +} NVFlipEvoHwState; + +/*! + * State requested through the NVKMS API. This may differ from + * the current hardware state (e.g., if the head has been + * momentarily blanked during DP link training). + */ +typedef struct _NVEvoSubDevHeadStateRec { + struct NvKmsPoint viewPortPointIn; + NVFlipCursorEvoHwState cursor; + NVFlipChannelEvoHwState layer[NVKMS_MAX_LAYERS_PER_HEAD]; + // Current usage bounds programmed into the hardware. + struct NvKmsUsageBounds usage; + // Usage bounds required after the last scheduled flip completes. + struct NvKmsUsageBounds targetUsage; + // Preallocated usage bounds that will be required for upcoming flips. + struct NvKmsUsageBounds preallocatedUsage; + + // Current state of MidFrameAndDWCFWatermark programmed into the hardware. + NvBool disableMidFrameAndDWCFWatermark; + // + // State of MidFrameAndDWCFWatermark required after the last scheduled + // flip completes. + // + NvBool targetDisableMidFrameAndDWCFWatermark; +} NVEvoSubDevHeadStateRec; + +#define NVKMS_HEAD_SURFACE_MAX_BUFFERS 2 + +/* + * HeadSurface state that applies to a single head, but spans across + * all subdevices. + */ +typedef struct { + /* + * The size of the headSurfaces for this head, across all subdevices. + * headSurface might only use a subset of the surfaces on one or more + * subdevices in SLI Mosaic. + */ + struct NvKmsSize size; + struct NvKmsSize stagingSize; + + /* + * The surfaces allocated for use by headSurface on this head. + * Surface allocations are broadcast across subdevices, though + * headSurface may unicast its rendering to the headSurface + * surface allocations on specific subdevices. + */ + struct { + NVHsSurfacePtr pSurface; + NVHsSurfacePtr pStagingSurface; + } surfaces[NVKMS_MAX_EYES][NVKMS_HEAD_SURFACE_MAX_BUFFERS]; + + /* + * The number of surfaces in the NVKMS_HEAD_SURFACE_MAX_BUFFERS dimension of + * the surfaces[][] array. Elements [0,surfaceCount-1] in the surfaces[][] + * array will be populated. + */ + NvU32 surfaceCount; +} NVHsStateOneHeadAllDisps; + +/* Subdevice-specific, channel-independent state */ +typedef struct _NVEvoSubDevRec { + NvU32 subDeviceInstance; + + NVEvoCapabilities capabilities; + + NVDispEvoPtr pDispEvo; + + NvU32 setSwSpareA[NVKMS_MAX_HEADS_PER_DISP]; + + NVEvoSubDevHeadStateRec headState[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoHeadControl headControl[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoHeadControl headControlAssy[NVKMS_MAX_HEADS_PER_DISP]; + void *cursorPio[NVKMS_MAX_HEADS_PER_DISP]; + NvBool (*scanLockState)(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NvU32 action, + /* NV_INVALID_HEAD-terminated + * array of head indices */ + const NvU32 *pHeads); + + /* + * EVO state machine refcounter for the number of SLI or proxy framelock + * clients that are connected to this server. + */ + NvU32 frameLockSliProxyClients; + + /* + * Since we add all active heads as framelock clients whenever we enable + * the second head as a framelock client, there's no need for EVO state + * transitions for heads 3 and more. Instead of those state transitions, + * we use the frameLockExtraClients ref counter to keep track of heads + * 3 and greater being added as framelock clients. + * + * XXX The state machine currently will naively framelock heads 3 and + * greater during this state transition, even if they're not capable + * of being framelocked (for example, when they have very different + * refresh rates). Bug 976532 + */ + NvU32 frameLockExtraClients; + + /* + * All of the following except the "armed" versions are set by the EVO + * state machine to the desired HW configuration given the current locking + * state. + * The "armed" versions represent the current hardware configuration, used + * to avoid excess hardware updates. + */ + NvU32 frameLockServerMaskArmed; + NvU32 frameLockServerMaskAssy; + NvU32 frameLockClientMaskArmed; + NvU32 frameLockClientMaskAssy; + NvU32 frameLockExtRefClkMaskArmed; + NvU32 frameLockExtRefClkMaskAssy; + NvBool frameLockHouseSync; + + NvU8 flipLockPinSetForFrameLockHeadMask; + NvU8 flipLockEnabledForFrameLockHeadMask; + NvU8 flipLockPinSetForSliHeadMask; + NvU8 flipLockEnabledForSliHeadMask; + + NvU32 flipLockProhibitedHeadMask; + + NvU32 sliRasterLockServerMask; + NvU32 sliRasterLockClientMask; + + NVEvoLockPin sliServerLockPin; + NVEvoLockPin sliClientLockPin; + NvBool forceZeroClientLockoutWindow; +} NVEvoSubDevRec; + +typedef struct _NVEvoColorRec { + NvU16 red; + NvU16 green; + NvU16 blue; +} NVEvoColorRec; + +typedef struct { + NvU16 Red; + NvU16 Green; + NvU16 Blue; + NvU16 Unused; +} NVEvoLutEntryRec; + +typedef struct { + NVEvoLutEntryRec base[NV_LUT_VSS_HEADER_SIZE + NV_NUM_EVO_LUT_ENTRIES]; + // The output LUT requires 8-bit alignment. + NVEvoLutEntryRec output[NV_LUT_VSS_HEADER_SIZE + NV_NUM_EVO_LUT_ENTRIES] + __attribute__((aligned(0x100))); +} NVEvoLutDataRec; + +typedef struct { + NvBool supportsDP13 :1; + NvBool supportsInbandStereoSignaling :1; + NvBool supportsHDMI20 :1; + NvBool inputLutAppliesToBase :1; + NvU8 validNIsoFormatMask; + NvU8 genericPageKind; + NvU32 maxPitchValue; + int maxWidthInBytes; + int maxWidthInPixels; + int maxHeight; + NvU32 maxRasterWidth; + NvU32 maxRasterHeight; + struct NvKmsCompositionCapabilities cursorCompositionCaps; + NvU16 validLayerRRTransforms; + struct NvKmsLayerCapabilities layerCaps[NVKMS_MAX_LAYERS_PER_HEAD]; +} NVEvoCapsRec; + +typedef struct { + NvU32 coreChannelClass; + size_t dmaArmedSize; + NvU32 dmaArmedOffset; +} NVEvoCoreChannelDmaRec; + + +typedef struct _NVEvoSubDeviceRec { + NvU32 handle; + NvU32 gpuId; +#define NV_INVALID_GPU_LOG_INDEX 0xFF + NvU8 gpuLogIndex; + char gpuString[NVKMS_GPU_STRING_SIZE]; + + NvU32 numEngines; + NvU32 *supportedEngines; + + /* Core channel memory mapping for ARM values */ + void *pCoreDma; + + /* ISO ctxdma programmed by EVO2 hal, into the overlay channel */ + NvU32 overlayContextDmaIso[NVKMS_MAX_HEADS_PER_DISP]; + enum NvKmsSurfaceMemoryFormat overlaySurfFormat[NVKMS_MAX_HEADS_PER_DISP]; + + /* Per head surface programmed into the core channel */ + const NVSurfaceEvoRec *pCoreChannelSurface[NVKMS_MAX_HEADS_PER_DISP]; + + /* EVO2 only, TRUE if a valid base surface passed to ->Flip() */ + NvBool isBaseSurfSpecified[NVKMS_MAX_HEADS_PER_DISP]; + enum NvKmsSurfaceMemoryFormat baseSurfFormat[NVKMS_MAX_HEADS_PER_DISP]; + + /* Composition parameters considered for hardware programming by EVO2 hal */ + struct { + NvBool initialized; + enum NvKmsCompositionColorKeySelect colorKeySelect; + NVColorKey colorKey; + } baseComp[NVKMS_MAX_HEADS_PER_DISP], overlayComp[NVKMS_MAX_HEADS_PER_DISP]; + +} NVEvoSubDeviceRec; + +/* Device-specific EVO state (subdevice- and channel-independent) */ +typedef struct _NVEvoDevRec { + + NvU8 gpuLogIndex; + NvU32 allocRefCnt; /* number of ALLOC_DEVICE calls */ + NVListRec devListEntry; + + /* array of gpuIds opened with nvkms_open_gpu() */ + NvU32 openedGpuIds[NV0000_CTRL_GPU_MAX_ATTACHED_GPUS]; + + NVUnixRmHandleAllocatorRec handleAllocator; + NvU32 deviceId; + + NvU32 deviceHandle; + struct NvKmsPerOpenDev *pNvKmsOpenDev; + + + /* SLI Info */ + struct { + NvBool mosaic; + struct { + NvBool present :1; + + /* Current hardware state */ + NvBool powered :1; + + /* Software state tracking needs from hardware */ + NvBool powerNeededForRasterLock :1; + } bridge; + } sli; + + NvU32 numHeads; + NvU32 numWindows; /* NVDisplay only. */ + + NvU32 displayHandle; + + /*! + * modesetOwner points to the pOpenDev of the client that called + * NVKMS_IOCTL_GRAB_OWNERSHIP. + */ + const struct NvKmsPerOpenDev *modesetOwner; + + /*! + * The first modeset after a modeset ownership transition should not inherit + * state from the previous modeset that we don't want inherited: LUTs or + * heads not specified in the new modeset request. + */ + NvBool modesetOwnerChanged; + + /*! + * NVEvoDevRec::numSubDevices is the number of GPUs in the SLI + * device. This is the number of NVEvoSubDevPtrs in + * NVEvoDevRec::gpus[] and the number of NVSubDeviceEvoPtr in + * NVEvoDevRec::pSubDevices. + * + * The data structure organization is summarized by the following table: + * + * NVDevEvoRec::numSubDevices (# of pSubDevs) + * | NVDevEvoRec::nDispEvo (# of pDispEvos) + * | | NVDispEvoRec::numSubDevices (# of sd per disp) + * | | | + * no SLI 1 1 1 + * SLI Mosaic N N 1 + */ + NvU32 numSubDevices; + NVSubDeviceEvoPtr pSubDevices[NVKMS_MAX_SUBDEVICES]; + + NvU32 dispClass; + NvU32 displayCommonHandle; + NvU32 rmCtrlHandle; + + unsigned int nDispEvo; + NVDispEvoPtr pDispEvo[NVKMS_MAX_SUBDEVICES]; + + NVEvoChannelPtr base[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoChannelPtr core; + NVEvoChannelPtr overlay[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoChannelPtr window[NVKMS_MAX_WINDOWS_PER_DISP]; + + /* NVDisplay head<->window mapping */ + NvU32 headForWindow[NVKMS_MAX_WINDOWS_PER_DISP]; + + struct { + NVEvoChannelPtr layer[NVKMS_MAX_LAYERS_PER_HEAD]; + NvU32 numLayers; + } head[NVKMS_MAX_HEADS_PER_DISP]; + + /* Pointer to array of subdev structs */ + NVEvoSubDevPtr gpus; + + NvU32 subDevMaskStack[NV_EVO_SUBDEV_STACK_SIZE]; + NvU32 subDevMaskStackDepth; + + NvU32 cursorHandle[NVKMS_MAX_HEADS_PER_DISP]; + + NVDPLibTimerPtr dpTimer; + + NvU8 capsBits[NV5070_CTRL_SYSTEM_CAPS_TBL_SIZE]; + NvU8 commonCapsBits[NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE]; + + NVEvoCapsRec caps; + + NVEvoCoreChannelDmaRec coreChannelDma; + + NvBool mobile : 1; + NvBool usesTegraDevice : 1; + + /* + * IO coherency modes that display supports for ISO and NISO memory + * allocations, respectively. + */ + NvKmsDispIOCoherencyModes isoIOCoherencyModes; + NvKmsDispIOCoherencyModes nisoIOCoherencyModes; + + /* + * Indicates whether the init_no_update methods that were pushed by the + * hardware during core channel allocation are still pending. + */ + NvBool coreInitMethodsPending : 1; + /* + * Indicates that NVKMS restored the console and freeing the core channel + * should leave the display configuration alone. + * + * This should be set to FALSE whenever an update is sent that flips away + * from the framebuffer console. + * + * TODO: Remove this in favor of passing a parameter explicitly to the + * functions that use it. + */ + NvBool skipConsoleRestore : 1; + /* + * Indicates that hotplug events that occur while NVKMS is the modeset owner + * should trigger console restore modesets. + */ + NvBool handleConsoleHotplugs : 1; + /* + * Cached from NvKmsSetModeRequest::allowHeadSurfaceInNvKms when the + * modeset owner does a modeset. This is needed so that when non-modeset + * owners do a partial modeset they don't override this value. + */ + NvBool allowHeadSurfaceInNvKms : 1; + + NvBool gc6Allowed : 1; + + /* + * Indicates whether NVKMS is driving an SOC display device, or an external + * dGPU device. + */ + NvBool isSOCDisplay : 1; + + /* + * Indicates whether NVKMS is supporting syncpts. + */ + NvBool supportsSyncpts : 1; + + /* + * Indicates whether the display device that NVKMS is driving requires all + * memory allocations that display will access to come from sysmem. + * + * For SOC display devices, this should be set to TRUE since the only + * memory aperture that they support is sysmem. + */ + NvBool requiresAllAllocationsInSysmem : 1; + /* + * Indicates whether the device that NVKMS is driving supports headSurface + * composition. + * + * For SOC display devices (e.g., Orin), this should be set to FALSE since + * there's currently zero nvgpu support, and no Tegra clients should be + * using headSurface right now. + */ + NvBool isHeadSurfaceSupported : 1; + + NvU32 validResamplingMethodMask; + + nvkms_timer_handle_t *postFlipIMPTimer; + nvkms_timer_handle_t *consoleRestoreTimer; + + nvkms_timer_handle_t *lowerDispBandwidthTimer; + + NvU32 simulationType; + + NvU32 numClasses; + NvU32 *supportedClasses; + + struct { + /* name[0] == '\0' for unused registryKeys[] array elements. */ + char name[NVKMS_MAX_DEVICE_REGISTRY_KEYNAME_LEN]; + NvU32 value; + } registryKeys[NVKMS_MAX_DEVICE_REGISTRY_KEYS]; + + /* Returns true if the Quadro Sync card connected to this GPU has + * a firmware version incompatible with this GPU. + */ + NvBool badFramelockFirmware; + + const struct _nv_evo_hal *hal; + const struct _nv_evo_cursor_hal *cursorHal; + + /*! + * ref_ptr to the structure. + * + * nvkms_timer_handle_t objects refer to the pDevEvo via references to this, + * so that timers that fire after the pDevEvo has been freed can detect that + * case and do nothing. + */ + struct nvkms_ref_ptr *ref_ptr; + + struct { + void *handle; + } hdmiLib; + + struct { + NvU32 semaphoreHandle; + void *pSemaphores; + NvBool enabled; + NvBool active; + NvU32 flipCounter; + } vrr; + + /* + * Information about the framebuffer console returned by + * NV0080_CTRL_CMD_OS_UNIX_VT_GET_FB_INFO. + */ + NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS vtFbInfo; + + /* + * Handle referencing the memory reserved by RM that is used by the kernel + * as the framebuffer console surface. + */ + NvKmsSurfaceHandle fbConsoleSurfaceHandle; + + NVHsDeviceEvoPtr pHsDevice; + + /* The current headSurface configuration. */ + NVHsStateOneHeadAllDisps headSurfaceAllDisps[NVKMS_MAX_HEADS_PER_DISP]; + + struct NVDevPreallocRec prealloc; + + struct { + NvU32 handle; + NVOS10_EVENT_KERNEL_CALLBACK_EX callback; + } nonStallInterrupt; + + /* + * Track the LUT with per-head, per-pDisp scope. The LUT itself + * is triple buffered. + * + * RM surface allocations are broadcast in SLI, so LUT is allocated with + * per-device scope. However, writes into the LUT are unicast with + * per-pDisp scope. + * + * The LUT surface in the core channel contains both the base and output + * LUTs. + */ + struct { + struct { + NVLutSurfaceEvoPtr LUT[3]; + struct { + NvBool waitForPreviousUpdate; + NvBool curBaseLutEnabled; + NvBool curOutputLutEnabled; + NvU8 curLUTIndex; + nvkms_timer_handle_t *updateTimer; + } disp[NVKMS_MAX_SUBDEVICES]; + } head[NVKMS_MAX_HEADS_PER_DISP]; + NVLutSurfaceEvoPtr defaultLut; + } lut; + + /*! stores pre-syncpts */ + NVEvoSyncpt *preSyncptTable; + NvBool *pAllSyncptUsedInCurrentFlip; + +} NVDevEvoRec; + +/* + * The NVHwModeTimingsEvo structure stores all the values necessary to + * perform a modeset with EVO + */ + +typedef struct _NVHwModeViewPortEvo { + struct { + /* + * note that EVO centers ViewPortOut within the active raster, + * so xAdjust,yAdjust are signed; to position ViewPortOut at + * 0,0 within active raster: + * + * viewPortOut.xAdjust = (activeRaster.w - viewPortOut.w)/2 * -1; + * viewPortOut.yAdjust = (activeRaster.h - viewPortOut.h)/2 * -1; + */ + NvS16 xAdjust; + NvS16 yAdjust; + NvU16 width; + NvU16 height; + } out; + + struct { + NvU16 width; + NvU16 height; + } in; + + NVEvoScalerTaps hTaps; + NVEvoScalerTaps vTaps; + + // These are the window features that may be possible if the required ISO + // bw is available at the time that the feature needs to be enabled. By + // default possibleUsage is set considering that everything is supported + // by the HW and for dGPU, IMP will scale it as needed. + struct NvKmsUsageBounds possibleUsage; + + // Guaranteed usage bounds allowed by IMP. These are never assigned to + // NVDpyEvoRec::usage or the hardware directly, but rather are used to + // validate usage bound change requests. + struct NvKmsUsageBounds guaranteedUsage; +} NVHwModeViewPortEvo; + +static inline NvBool nvIsImageSharpeningAvailable( + const NVHwModeViewPortEvo *pViewPort) +{ + return (pViewPort->out.width != pViewPort->in.width) || + (pViewPort->out.height != pViewPort->in.height); +} + +enum nvKmsPixelDepth { + NVKMS_PIXEL_DEPTH_18_444, + NVKMS_PIXEL_DEPTH_24_444, + NVKMS_PIXEL_DEPTH_30_444, +}; + +enum nvKmsTimingsProtocol { + NVKMS_PROTOCOL_DAC_RGB, + + NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A, + NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B, + NVKMS_PROTOCOL_SOR_DUAL_TMDS, + NVKMS_PROTOCOL_SOR_DP_A, + NVKMS_PROTOCOL_SOR_DP_B, + NVKMS_PROTOCOL_SOR_LVDS_CUSTOM, + NVKMS_PROTOCOL_SOR_HDMI_FRL, + + NVKMS_PROTOCOL_DSI, + + NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC, +}; + +/* + * This structure defines all of the values necessary to program mode timings + * on EVO hardware. + * NOTE: if you add anything to this, consider adding it to + * RasterLockPossible() in nvkms-evo.c + */ +typedef struct _NVHwModeTimingsEvo { + struct NvKmsPoint rasterSize; + struct NvKmsPoint rasterSyncEnd; + struct NvKmsPoint rasterBlankEnd; + struct NvKmsPoint rasterBlankStart; + NvU32 rasterVertBlank2Start; + NvU32 rasterVertBlank2End; + + NvU32 pixelClock; /* in kHz */ + enum nvKmsTimingsProtocol protocol; + enum nvKmsPixelDepth pixelDepth; + /* + * yuv420Mode reflects whether this mode requires YUV 4:2:0 decimation into + * a half-width output through headsurface (SW YUV420) or >=nvdisplay 4.0 HW + * CSC (HW YUV420). + * + * If a mode requires SW YUV 4:2:0 emulation, the pixelClock and width + * values in NvModeTimings will still be the full width values specified by + * the mode parsed from the EDID (e.g. 3840x2160@60), but the pixelClock + * and width values in NVHwModeTimingsEvo will be the "real" half width + * values programmed in HW and rendered to through a headSurface transform + * (e.g. 1920x2160@60). If a mode requires HW YUV 4:2:0 CSC, the + * pixelClock and width values in both NvModeTimings and NVHwModeTimingsEvo + * will be full width, and the decimation to the half width scanout surface + * is performed in HW. In both cases, only the full width values should + * ever be reported to the client. + */ + enum NvYuv420Mode yuv420Mode; + /* *SyncPol is TRUE if negative */ + NvBool hSyncPol : 1; + NvBool vSyncPol : 1; + NvBool interlaced : 1; + NvBool doubleScan : 1; + /* + * hdmi3D reflects whether this mode is a HDMI 3D frame packed mode. True + * only if the user selected HDMI 3D stereo mode and the GPU supports it. + * If true, then pixelClock is doubled. + */ + NvBool hdmi3D : 1; + + struct { + /* The vrr type for which this mode is adjusted. */ + enum NvKmsDpyVRRType type; + } vrr; + + /* DisplayPort Display Stream Compression */ + struct { + NvBool enable; + + /* + * The DSC target bits per pixel (bpp) rate value multiplied by 16 that + * is being used by the DSC encoder. + * + * It maps respectively to {pps4[1:0], pps5[7:0]}. + */ + NvU32 bitsPerPixelX16; + + /* + * The DSC picture parameter set (PPS), which the DSC encoder must + * communicate to the decoder. + */ + NvU32 pps[DSC_MAX_PPS_SIZE_DWORD]; + } dpDsc; + + HDMI_FRL_CONFIG hdmiFrlConfig; + + NVHwModeViewPortEvo viewPort; + + struct { + enum NvKmsStereoMode mode; + NvBool isAegis; + } stereo; +} NVHwModeTimingsEvo; + +static inline NvU64 nvEvoFrametimeUsFromTimings(const NVHwModeTimingsEvo *pTimings) +{ + NvU64 pixelsPerFrame = pTimings->rasterSize.x * pTimings->rasterSize.y; + NvU64 pixelsPerSecond = KHzToHz(pTimings->pixelClock); + NvU64 framesPerSecond = pixelsPerSecond / pixelsPerFrame; + + return 1000000ULL / framesPerSecond; +} + +static inline NvU16 nvEvoVisibleWidth(const NVHwModeTimingsEvo *pTimings) +{ + return pTimings->rasterBlankStart.x - pTimings->rasterBlankEnd.x; +} + +static inline NvU16 nvEvoVisibleHeight(const NVHwModeTimingsEvo *pTimings) +{ + /* rasterVertBlank2{Start,End} should only be != 0 for interlaced modes. */ + nvAssert(pTimings->interlaced || + ((pTimings->rasterVertBlank2Start == 0) && + (pTimings->rasterVertBlank2End == 0))); + + return pTimings->rasterBlankStart.y - pTimings->rasterBlankEnd.y + + pTimings->rasterVertBlank2Start - pTimings->rasterVertBlank2End; +} + +/* + * Calculate BackendSizeHeight, based on this HD or SD quality is + * defined. + */ +static inline NvBool nvEvoIsHDQualityVideoTimings( + const NVHwModeTimingsEvo *pTimings) +{ + NvU32 height = nvEvoVisibleHeight(pTimings); + + // as per windows code, nvva uses < 720. + if (height <= 576) { + // SD quality: 240, 288, 480, 576 + return FALSE; + } + + // HD quality: 720, 1080 + return TRUE; +} + +static inline struct NvKmsRect nvEvoViewPortOutHwView( + const NVHwModeTimingsEvo *pTimings) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + const NvU16 hVisible = nvEvoVisibleWidth(pTimings); + const NvU16 vVisible = nvEvoVisibleHeight(pTimings); + struct NvKmsRect viewPortOut = { 0 }; + + viewPortOut.width = pViewPort->out.width; + viewPortOut.height = pViewPort->out.height; + viewPortOut.x = pViewPort->out.xAdjust + + (hVisible - pViewPort->out.width) / 2; + viewPortOut.y = (pViewPort->out.yAdjust + + (vVisible - pViewPort->out.height) / 2); + + return viewPortOut; +} + +static inline struct NvKmsRect nvEvoViewPortOutClientView( + const NVHwModeTimingsEvo *pTimings) +{ + struct NvKmsRect viewPortOut = nvEvoViewPortOutHwView(pTimings); + + if (pTimings->doubleScan) { + + nvAssert((viewPortOut.x % 2) == 0); + viewPortOut.x /= 2; + + nvAssert((viewPortOut.height % 2) == 0); + viewPortOut.height /= 2; + } + + return viewPortOut; +} + +/* + * The ELD contains a subset of the digital display device's EDID + * information related to audio capabilities. The GPU driver sends the + * ELD to hardware and the audio driver reads it by issuing the ELD + * command verb. + */ + +#define NV_MAX_AUDIO_DEVICE_ENTRIES \ + (NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_3 + 1) + +typedef enum { + NV_ELD_PRE_MODESET = 0, + NV_ELD_POST_MODESET, + NV_ELD_POWER_ON_RESET, +} NvEldCase; + +/* OR indices are per OR-type. The maximum OR index for each type + * on each GPU is: + * + * Pre-GV10X : 8 SORs, 4 PIORs and 4 Dacs; + * GV10X : 8 SORs, 4 PIORs; + * TU10X+ : 8 SORs; + */ +#define NV_EVO_MAX_ORS 8 + +/* + * The scoping of heads, ORs, and dpys relative to connectors can be + * complicated. Here is how objects are scoped for various configurations: + * + * #heads #ORs #dpys #NVConnectorEvoRecs + * DP 1.1 1 1 1 1 + * DP-MST n 1 n 1 + * DP cloning: 1 1 n 1 + * 2-Heads-1-OR: 2 2 1 1 + */ +typedef struct _NVConnectorEvoRec { + char name[NVKMS_DPY_NAME_SIZE]; + + NVDispEvoPtr pDispEvo; + + NVListRec connectorListEntry; + + NvBool detectComplete; /* For sync'ing dpy detection w/ DP lib */ + NVDPLibConnectorPtr pDpLibConnector; // DP Lib + NvBool dpSerializerEnabled; + + struct { + NvU8 maxLinkBW; + NvU8 maxLaneCount; + NvBool supportsMST; + } dpSerializerCaps; + + NVDpyId displayId; // RM Display ID + NvKmsConnectorSignalFormat signalFormat; + NvKmsConnectorType type; + NvU32 typeIndex; + NvU32 legacyType; /* NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_ */ + NvU32 legacyTypeIndex; + NvU32 physicalIndex; + NvU32 physicalLocation; + NvU32 validHeadMask; + + NvU32 dfpInfo; /* DFP info query through NV0073_CTRL_CMD_DFP_GET_INFO */ + + NVDpyIdList ddcPartnerDpyIdsList; + + struct { + NvU32 type; + NvU32 protocol; // NV0073_CTRL_SPECIFIC_OR_PROTOCOL_* + NvU32 location; // NV0073_CTRL_SPECIFIC_OR_LOCATION_* + NvU32 ditherType; + NvU32 ditherAlgo; + /* Hardware heads attached to assigned OR */ + NvU32 ownerHeadMask[NV_EVO_MAX_ORS]; + /* ORs mask assigned to this connector */ + NvU32 mask; + } or; + + struct { + NvBool ycbcr422Capable; + NvBool ycbcr444Capable; + } colorSpaceCaps; + + NvEldCase audioDevEldCase[NV_MAX_AUDIO_DEVICE_ENTRIES]; +} NVConnectorEvoRec; + +static inline NvU32 nvConnectorGetAttachedHeadMaskEvo( + const NVConnectorEvoRec *pConnectorEvo) +{ + NvU32 headMask = 0x0; + NvU32 orIndex; + + FOR_EACH_INDEX_IN_MASK(32, orIndex, pConnectorEvo->or.mask) { + headMask |= pConnectorEvo->or.ownerHeadMask[orIndex]; + } FOR_EACH_INDEX_IN_MASK_END; + + return headMask; +} + +static inline +NvBool nvIsConnectorActiveEvo(const NVConnectorEvoRec *pConnectorEvo) +{ + NvU32 orIndex; + + FOR_EACH_INDEX_IN_MASK(32, orIndex, pConnectorEvo->or.mask) { + if (pConnectorEvo->or.ownerHeadMask[orIndex] != 0x0) { + return TRUE; + } + } FOR_EACH_INDEX_IN_MASK_END; + + return FALSE; +} + +/* + * In case of 2-Heads-1-OR: NV0073_CTRL_CMD_DFP_ASSIGN_SOR assigns 2 SORs, + * lowest SOR index is for primary head. + */ +static inline NvU32 nvEvoConnectorGetPrimaryOr( + const NVConnectorEvoRec *pConnectorEvo) +{ + return (pConnectorEvo->or.mask == 0x0 ? + NV_INVALID_OR : + BIT_IDX_32(LOWESTBIT(pConnectorEvo->or.mask))); +} + +typedef struct _NVDpyAttributeCurrentDitheringConfigRec { + NvBool enabled; + enum NvKmsDpyAttributeCurrentDitheringDepthValue depth; + enum NvKmsDpyAttributeCurrentDitheringModeValue mode; +} NVDpyAttributeCurrentDitheringConfig; + +typedef struct __NVAttributesSetEvoRec { + +#define NV_EVO_DVC_MIN (-1024) +#define NV_EVO_DVC_MAX 1023 +#define NV_EVO_DVC_DEFAULT 0 + + NvS32 dvc; + + /* + * For both colorSpace and colorRange, the value for + * NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_{SPACE,RANGE} sent by the client is + * stored in NVDpyEvoRec::requestedColor(Space, Range}. The structure stores + * the actual color space and color range in use. + * + * Since YUV444 mode only allows limited color range, changes to the + * current color space may trigger changes to the current color + * range (see nvChooseCurrentColorSpaceAndRangeEvo()). + * + * For SW YUV420 mode, these values are ignored in + * HEAD_SET_PROCAMP and applied in the headSurface composite shader. + */ + enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace; + enum NvKmsDpyAttributeColorRangeValue colorRange; + + NVDpyAttributeCurrentDitheringConfig dithering; + +#define NV_EVO_IMAGE_SHARPENING_MIN 0 +#define NV_EVO_IMAGE_SHARPENING_MAX 255 +#define NV_EVO_IMAGE_SHARPENING_DEFAULT 127 + + struct { + NvBool available; + NvU32 value; + } imageSharpening; + + enum NvKmsDpyAttributeDigitalSignalValue digitalSignal; +} NVAttributesSetEvoRec; + +#define NV_EVO_DEFAULT_ATTRIBUTES_SET \ + (NVAttributesSetEvoRec) { \ + .dvc = NV_EVO_DVC_DEFAULT, \ + .colorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB, \ + .colorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL, \ + .dithering = { \ + .enabled = FALSE, \ + .mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE, \ + .depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE, \ + }, \ + .imageSharpening = { \ + .value = NV_EVO_IMAGE_SHARPENING_DEFAULT, \ + }, \ + } + + +typedef struct _NVEldEvoRec { + NvU32 size; + NvU8 buffer[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER]; +} NVEldEvoRec; + +/* + * This structure stores information about the active per-head audio state. + */ +typedef struct _NVDispHeadAudioStateEvoRec { + NvU32 maxFreqSupported; + NVEldEvoRec eld; + + NvBool isAudioOverHdmi : 1; + NvBool supported : 1; + NvBool enabled : 1; +} NVDispHeadAudioStateEvoRec; + +typedef struct _NVDispHeadInfoFrameStateEvoRec { + NVT_VIDEO_INFOFRAME_CTRL videoCtrl; + NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL vendorCtrl; + NvBool hdTimings; +} NVDispHeadInfoFrameStateEvoRec; + +/* + * This structure stores information about the active per-head display state. + */ +typedef struct _NVDispHeadStateEvoRec { + + /*! Cached, to preserve across modesets. */ + struct NvKmsModeValidationParams modeValidationParams; + + /* + * Heads on the same NVDevEvoRec with the same non-zero + * NVDispHeadStateEvoRec::allowFlipLockGroup value are eligible to + * be flipLocked, from an NVKMS client point of view, if the + * hardware requirements for flipLock are met. + */ + NvU8 allowFlipLockGroup; + + /* + * For Turing and newer, enable display composition bypass mode. + * + * This is intended to be used by console restore to avoid bug 2168873. + */ + NvBool bypassComposition : 1; + NvBool hs10bpcHint : 1; + + struct { + NVT_COLOR_FORMAT colorFormat; + NVT_COLORIMETRY colorimetry; + NVT_COLOR_RANGE colorRange; + NvU32 satCos; + } procAmp; + + /* + * The activeRmId is the identifier that we use to talk to RM + * about the display device(s) on this head. It is zero except + * when a mode is being driven by this head. For DP MST, it is the + * identifier of the displayport library group to which the driven + * DP device belongs. Otherwise, it is the identifier of the connector + * driven by the head. + */ + NvU32 activeRmId; + + NVHwModeTimingsEvo timings; + NVConnectorEvoRec *pConnectorEvo; /* NULL if the head is not active */ + + /* + * Each head can have up to NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD + * programmable Core semaphores. + * + * The numVblankSyncObjectsCreated will ideally always be equal to + * NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD, but could be lower if errors + * occured during syncpt allocation in nvRMSetupEvoCoreChannel(). + */ + NvU8 numVblankSyncObjectsCreated; + NVVblankSyncObjectRec vblankSyncObjects[NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD]; + NVDispHeadAudioStateEvoRec audio; + + NvU32 rmVBlankCallbackHandle; + NVListRec vblankCallbackList; +} NVDispHeadStateEvoRec; + +typedef struct _NVDispApiHeadStateEvoRec { + /* + * The mask of hardware heads mapped onto this api head, + * set to zero if the api head is not active. + */ + NvU32 hwHeadsMask; + + NVDpyIdList activeDpys; /* Empty if the head is not active */ + NVAttributesSetEvoRec attributes; + + NVDispHeadInfoFrameStateEvoRec infoFrame; +} NVDispApiHeadStateEvoRec; + +typedef struct _NVDispEvoRec { + NvU8 gpuLogIndex; + NVDevEvoPtr pDevEvo; + NvU32 hotplugEventHandle; + NvU32 DPIRQEventHandle; + NVOS10_EVENT_KERNEL_CALLBACK_EX rmHotplugCallback; + NVOS10_EVENT_KERNEL_CALLBACK_EX rmDPIRQCallback; + + NVDispHeadStateEvoRec headState[NVKMS_MAX_HEADS_PER_DISP]; + NVDispApiHeadStateEvoRec apiHeadState[NVKMS_MAX_HEADS_PER_DISP]; + + NVDpyIdList vbiosDpyConfig[NVKMS_MAX_HEADS_PER_DISP]; + + NvU32 isoBandwidthKBPS; + NvU32 dramFloorKBPS; + + /* + * The list of physical connector display IDs. This is the union + * of pConnectorEvo->displayId values, which is also the union of + * pDpyEvo->id values for non-MST pDpys. + */ + NVDpyIdList connectorIds; + + NVListRec connectorList; + + NvU32 displayOwner; + + NVListRec dpyList; + + NVDpyIdList bootDisplays; + NVDpyIdList validDisplays; + NVDpyIdList connectedDisplays; + + /* + * displayPortMSTIds is a superset of dynamicDpyIds because not all DP MST + * dpys are dynamic dpys. For example, the DP MST dpys that are driven by + * a DP serializer connector are part of a fixed topology, and are static in + * nature. + */ + NVDpyIdList displayPortMSTIds; /* DP MST dpys */ + NVDpyIdList dynamicDpyIds; + + NVDpyIdList muxDisplays; + + struct { + nvkms_timer_handle_t *unstallTimer; + } vrr; + + NVFrameLockEvoPtr pFrameLockEvo; + struct { + NVDpyId server; + NVDpyIdList clients; + NvBool syncEnabled; /* GPU is syncing to framelock */ + NvU32 connectorIndex;/* NV30F1_GSYNC_CONNECTOR_* */ + NvU32 currentServerHead; /* used for disabling */ + NvU32 currentClientHeadsMask; /* used for disabling */ + NvBool currentHouseSync; /* if state machine thinks house sync + is enabled -- used for disabling */ + + /* Framelock event-related data */ +#define NV_FRAMELOCK_SYNC_LOSS 0 +#define NV_FRAMELOCK_SYNC_GAIN 1 +#define NV_FRAMELOCK_NUM_EVENTS 2 + + struct { + NvU32 handle; + NVOS10_EVENT_KERNEL_CALLBACK_EX callback; + } gsyncEvent[NV_FRAMELOCK_NUM_EVENTS]; + + } framelock; + + NVHsChannelEvoPtr pHsChannel[NVKMS_MAX_HEADS_PER_DISP]; + + NVSwapGroupPtr pSwapGroup[NVKMS_MAX_HEADS_PER_DISP]; + + /*! + * ref_ptr to the structure. + * + * nvkms_timer_handle_t objects refer to the pDispEvo via references to + * this, so that timers that fire after the pDispEvo has been freed can + * detect that case and do nothing. + */ + struct nvkms_ref_ptr *ref_ptr; + + /* + * Indicates that NV_KMS_DISP_ATTRIBUTE_QUERY_DP_AUX_LOG has been queried at + * least once on this device. If set, nvRmDestroyDisplays() will flush any + * remaining AUX log messages to the system log. + */ + NvBool dpAuxLoggingEnabled; + + struct nvkms_backlight_device *backlightDevice; +} NVDispEvoRec; + +/* + * XXX[2Head1OR] Remove nvHardwareHeadToApiHead(), before implementing logic to + * map multiple hardware heads onto the single api head. + */ +static inline NvU32 nvHardwareHeadToApiHead(const NvU32 head) +{ + return head; +} + + +static inline NvU32 GetNextHwHead(NvU32 hwHeadsMask, const NvU32 prevHwHead) +{ + if ((hwHeadsMask == 0x0) || + ((prevHwHead != NV_INVALID_HEAD) && + ((hwHeadsMask &= ~((1 << (prevHwHead + 1)) -1 )) == 0x0))) { + return NV_INVALID_HEAD; + } + return BIT_IDX_32(LOWESTBIT(hwHeadsMask)); +} + +#define FOR_EACH_EVO_HW_HEAD_IN_MASK(__hwHeadsMask, __hwHead) \ + for ((__hwHead) = GetNextHwHead((__hwHeadsMask), NV_INVALID_HEAD); \ + (__hwHead) != NV_INVALID_HEAD; \ + (__hwHead) = GetNextHwHead((__hwHeadsMask), (__hwHead))) + +static inline NvU32 nvGetPrimaryHwHead(const NVDispEvoRec *pDispEvo, + const NvU32 apiHead) +{ + return (apiHead != NV_INVALID_HEAD) ? + GetNextHwHead(pDispEvo->apiHeadState[apiHead].hwHeadsMask, + NV_INVALID_HEAD) : NV_INVALID_HEAD; +} + +typedef enum { + NV_EVO_PASSIVE_DP_DONGLE_UNUSED, + NV_EVO_PASSIVE_DP_DONGLE_DP2DVI, + NV_EVO_PASSIVE_DP_DONGLE_DP2HDMI_TYPE_1, + NV_EVO_PASSIVE_DP_DONGLE_DP2HDMI_TYPE_2, +} NVEvoPassiveDpDongleType; + +typedef struct NVEdidRec { + NvU8 *buffer; + size_t length; +} NVEdidRec, *NVEdidPtr; + +typedef struct _NVParsedEdidEvoRec { + NvBool valid; + NVT_EDID_INFO info; + NVT_EDID_RANGE_LIMIT limits; + char monitorName[NVT_EDID_MONITOR_NAME_STRING_LENGTH]; + char serialNumberString[NVT_EDID_LDD_PAYLOAD_SIZE+1]; +} NVParsedEdidEvoRec; + +typedef void (*NVVBlankCallbackProc)(NVDispEvoRec *pDispEvo, + const NvU32 head, + NVVBlankCallbackPtr pCallbackData); + +typedef struct _NVVBlankCallbackRec { + NVListRec vblankCallbackListEntry; + NVVBlankCallbackProc pCallback; + void *pUserData; +} NVVBlankCallbackRec; + +typedef struct _NVDpyAttributeRequestedDitheringConfigRec { + enum NvKmsDpyAttributeRequestedDitheringValue state; + enum NvKmsDpyAttributeRequestedDitheringDepthValue depth; + enum NvKmsDpyAttributeRequestedDitheringModeValue mode; +} NVDpyAttributeRequestedDitheringConfig; + +typedef struct _NVDpyEvoRec { + NVListRec dpyListEntry; + NVDpyId id; + + char name[NVKMS_DPY_NAME_SIZE]; + + NvU32 apiHead; + + struct _NVDispEvoRec *pDispEvo; + NVConnectorEvoPtr pConnectorEvo; + + NvBool hasBacklightBrightness : 1; + NvBool internal : 1; + NvBool allowDVISpecPClkOverride : 1; + + /* whether the connected dpy is HDMI capable */ + NvBool hdmiCapable : 1; + NvBool isVrHmd : 1; + + /* + * Maximum single link and total allowed pixel clock. This is first + * reported by RM through DpyProbeMaxPixelClock, and then potentially + * overridden by the EVO SOR capabilities for HDMI and DVI through + * UpdateMaxPixelClock. + */ + NvU32 maxPixelClockKHz; + NvU32 maxSingleLinkPixelClockKHz; + + NVEdidRec edid; + NVParsedEdidEvoRec parsedEdid; + + NVDpyAttributeRequestedDitheringConfig requestedDithering; + + enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace; + enum NvKmsDpyAttributeColorRangeValue requestedColorRange; + + NVAttributesSetEvoRec currentAttributes; + + struct { + char *addressString; + NVDPLibDevicePtr pDpLibDevice; // DP Lib's notion of the device. + NvBool inbandStereoSignaling; + + NvU8 laneCount; // NV0073_CTRL_DP_DATA_SET_LANE_COUNT + NvU8 linkRate; // NV0073_CTRL_DP_DATA_SET_LINK_BW + enum NvKmsDpyAttributeDisplayportConnectorTypeValue connectorType; + NvBool sinkIsAudioCapable; + + struct { + NvBool valid; + NvU8 buffer[NVKMS_GUID_SIZE]; + char str[NVKMS_GUID_STRING_SIZE]; + } guid; + + /* + * When the DP serializer is in MST mode, this field is used to uniquely + * identify each MST DPY that's connected to the DP serializer. + * + * This field is only valid for DP serializer DPYs, and pDpLibDevice + * must be NULL in this case. + */ + NvU8 serializerStreamIndex; + } dp; + + struct { + HDMI_SRC_CAPS srcCaps; + HDMI_SINK_CAPS sinkCaps; + } hdmi; + + struct { + NvBool ycbcr422Capable; + NvBool ycbcr444Capable; + } colorSpaceCaps; + + struct { + NvBool supported : 1; + NvBool requiresModetimingPatching : 1; + NvBool isDLP : 1; + NvBool isAegis : 1; + NvBool requiresVbiAdjustment : 1; + NvU32 subType; + int indexInOverrideTimings; + } stereo3DVision; + + struct { + enum NvKmsDpyVRRType type; + } vrr; + +} NVDpyEvoRec; + +static inline NvBool nvDpyEvoIsDPMST(const NVDpyEvoRec *pDpyEvo) +{ + return nvDpyIdIsInDpyIdList(pDpyEvo->id, + pDpyEvo->pDispEvo->displayPortMSTIds); +} + +// Return a pDpy's connector's display ID +static inline NvU32 nvDpyEvoGetConnectorId(const NVDpyEvoRec *pDpyEvo) +{ + NvU32 rmDpyId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + + // This function shouldn't be used for DP MST dynamic devices. + nvAssert(!nvDpyEvoIsDPMST(pDpyEvo)); + nvAssert(ONEBITSET(rmDpyId)); + + return rmDpyId; +} + +static inline +NvBool nvConnectorIsInternal(const NVConnectorEvoRec *pConnectorEvo) +{ + /* For mobile GPUs check for LVDS or embedded DisplayPort signal flag. + * If found, DFP is internal*/ + return (pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) && + (((pConnectorEvo->pDispEvo->pDevEvo->mobile) && + (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _SIGNAL, _LVDS, + pConnectorEvo->dfpInfo))) || + (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _SIGNAL, _DSI, + pConnectorEvo->dfpInfo)) || + (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _EMBEDDED_DISPLAYPORT, _TRUE, + pConnectorEvo->dfpInfo))); +} + +static inline NvU32 NV_EVO_LOCK_PIN(NvU32 n) +{ + return NV_EVO_LOCK_PIN_0 + n; +} + +static inline NvU32 NV_EVO_LOCK_PIN_INTERNAL(NvU32 n) +{ + return NV_EVO_LOCK_PIN_INTERNAL_0 + n; +} + +static inline NvBool NV_EVO_LOCK_PIN_IS_INTERNAL(NvU32 n) +{ + ct_assert(NV_IS_UNSIGNED(n) && NV_EVO_LOCK_PIN_INTERNAL_0 == 0); + return n < NV_EVO_LOCK_PIN_0; +} + + +/* + * Utility macro for looping over all the pConnectorsEvo on a pDispEvo. + */ +#define FOR_ALL_EVO_CONNECTORS(_pConnectorEvo, _pDispEvo) \ + nvListForEachEntry((_pConnectorEvo), \ + &(_pDispEvo)->connectorList, connectorListEntry) + +/* + * Utility macro for declaring a for loop to walk over all the + * pDispEvos on a particular pDevEvo. + */ +#define FOR_ALL_EVO_DISPLAYS(_pDispEvo, _i, _pDevEvo) \ + for ((_i) = 0, \ + (_pDispEvo) = (_pDevEvo)->pDispEvo[0]; \ + (_pDispEvo); \ + (_i)++, (_pDispEvo) = ((_i) < (_pDevEvo)->nDispEvo) ? \ + (_pDevEvo)->pDispEvo[(_i)] : NULL) + +#define FOR_ALL_EVO_DPYS(_pDpyEvo, _dpyIdList, _pDispEvo) \ + nvListForEachEntry((_pDpyEvo), &(_pDispEvo)->dpyList, dpyListEntry) \ + if (nvDpyIdIsInDpyIdList((_pDpyEvo)->id, (_dpyIdList))) + +#define FOR_ALL_EVO_FRAMELOCKS(_pFrameLockEvo) \ + nvListForEachEntry(_pFrameLockEvo, &nvEvoGlobal.frameLockList, \ + frameLockListEntry) + +#define FOR_ALL_EVO_DEVS(_pDevEvo) \ + nvListForEachEntry(_pDevEvo, &nvEvoGlobal.devList, devListEntry) + +#define FOR_ALL_DEFERRED_REQUEST_FIFOS_IN_SWAP_GROUP( \ + _pSwapGroup, _pDeferredRequestFifo) \ + nvListForEachEntry((_pDeferredRequestFifo), \ + &(_pSwapGroup)->deferredRequestFifoList, \ + swapGroup.deferredRequestFifoListEntry) + +#define FOR_EACH_SUBDEV_IN_MASK(_sd, _mask) \ + FOR_EACH_INDEX_IN_MASK(32, _sd, _mask) + +#define FOR_EACH_SUBDEV_IN_MASK_END \ + FOR_EACH_INDEX_IN_MASK_END + +static inline NVDpyEvoPtr nvGetOneArbitraryDpyEvo(NVDpyIdList dpyIdList, + const NVDispEvoRec *pDispEvo) +{ + NVDpyEvoPtr pDpyEvo; + + nvAssert(nvDpyIdListIsASubSetofDpyIdList(dpyIdList, + pDispEvo->validDisplays)); + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + return pDpyEvo; + } + + return NULL; +} + + +/* + * Return whether or not the display devices on the connector should + * be handled by the DP library. + */ +static inline NvBool nvConnectorUsesDPLib(const NVConnectorEvoRec + *pConnectorEvo) +{ + return (pConnectorEvo->pDpLibConnector != NULL); +} + +static inline +NvBool nvConnectorIsDPSerializer(const NVConnectorEvoRec *pConnectorEvo) +{ + return (pConnectorEvo->type == NVKMS_CONNECTOR_TYPE_DP_SERIALIZER); +} + +/* + * Return whether or not the display device given is handled by the DP + * library. + */ +static inline NvBool nvDpyUsesDPLib(const NVDpyEvoRec *pDpyEvo) +{ + return nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo); +} + +/* + * Return whether this dpy is active. The dpy is active if it is + * driven by a head. + */ +static inline NvBool nvDpyEvoIsActive(const NVDpyEvoRec *pDpyEvo) +{ + return (pDpyEvo->apiHead != NV_INVALID_HEAD); +} + +/* + * Return true if this dpy reports an EDID supporting HDMI 3D and + * isn't connected via active DisplayPort. + */ +static inline NvBool nvDpyEvoSupportsHdmi3D(const NVDpyEvoRec *pDpyEvo) +{ + return (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.info.HDMI3DSupported && + !((pDpyEvo->pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A) || + (pDpyEvo->pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B))); + +} + +static inline NvBool nvHeadIsActive(const NVDispEvoRec *pDispEvo, + const NvU32 head) +{ + return (head < ARRAY_LEN(pDispEvo->headState)) && + (pDispEvo->headState[head].pConnectorEvo != NULL); +} + +/*! + * Return the mask of active heads on this pDispEvo. + */ +static inline NvU32 nvGetActiveHeadMask(NVDispEvoPtr pDispEvo) +{ + NvU32 head; + NvU32 headMask = 0; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + headMask |= 1 << head; + } + } + + return headMask; +} + +static inline NvBool nvAllHeadsInactive(const NVDevEvoRec *pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvU32 head; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + return FALSE; + } + } + } + + return TRUE; +} + +/* + * Return the list of dpys that are currently active on the given disp. + */ +static inline NVDpyIdList nvActiveDpysOnDispEvo(const NVDispEvoRec *pDispEvo) +{ + NVDpyIdList dpyIdList = nvEmptyDpyIdList(); + NvU32 apiHead; + + for (apiHead = 0; apiHead < NVKMS_MAX_HEADS_PER_DISP; apiHead++) { + const NVDispApiHeadStateEvoRec *pApiHeadState = + &pDispEvo->apiHeadState[apiHead]; + + dpyIdList = nvAddDpyIdListToDpyIdList(dpyIdList, + pApiHeadState->activeDpys); + } + + return dpyIdList; +} + +static inline NvU32 nvGpuIdOfDispEvo(const NVDispEvoRec *pDispEvo) +{ + nvAssert(pDispEvo->displayOwner < pDispEvo->pDevEvo->numSubDevices); + return pDispEvo->pDevEvo->pSubDevices[pDispEvo->displayOwner]->gpuId; +} + +static inline NvBool nvIsEmulationEvo(const NVDevEvoRec *pDevEvo) +{ + return pDevEvo->simulationType != + NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE; +} + +static inline NvBool nvIs3DVisionStereoEvo(const enum NvKmsStereoMode stereo) +{ + return (stereo == NVKMS_STEREO_NVIDIA_3D_VISION || + stereo == NVKMS_STEREO_NVIDIA_3D_VISION_PRO); +} + +/* + * Utility macro for iterating over all head bits set in a head bit mask + */ +#define FOR_ALL_HEADS(_head, _headMask) \ + for((_head) = 0; \ + (_headMask) >> (_head); \ + (_head)++) \ + if ((_headMask) & (1 << (_head))) + +typedef struct _NVLutSurfaceEvo { + NVDevEvoPtr pDevEvo; + + NvU32 handle; + NvU32 size; + + NvU32 dispCtxDma; + + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES]; +} NVLutSurfaceEvoRec; + +typedef struct _NVFrameLockEvo { + NVListRec frameLockListEntry; + + /* array of subdev GPU IDs */ + NvU32 nGpuIds; + NvU32 gpuIds[NV30F1_CTRL_MAX_GPUS_PER_GSYNC]; + + NvU32 gsyncId; + NvU32 device; /* RM device handle for this object */ + + int fpgaIdAndRevision; /* FPGA revId (including firmware version + * and board ID) */ + + int firmwareMajorVersion; /* FPGA firmware major version */ + int firmwareMinorVersion; /* FPGA firmware minor version */ + NvU32 boardId; /* NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_* */ + NvU32 caps; /* Various capabilities flags */ + + NvU32 maxSyncSkew; /* Max sync skew increment */ + NvU32 syncSkewResolution; /* In nanoseconds */ + NvU32 maxSyncInterval; /* Max sync interval */ + + NvU32 houseSyncUseable; + + /* House sync mode requested by user */ + enum NvKmsFrameLockAttributeHouseSyncModeValue houseSyncMode; + NvU32 houseSyncModeValidValues; + + NvBool houseSyncAssy; /* Current desired state */ + NvBool houseSyncArmed; /* Current hardware state */ + + NvU8 connectedGpuMask; /* bitmask of GPUs that are connected */ + NvU8 syncReadyGpuMask; /* bitmask of GPUs that are syncReady */ + + NvBool syncReadyLast; /* Previous NV_CTRL_FRAMELOCK_SYNC_READY + * value changed either from nvctrl or + * the RM, used to avoid resending events + * since RM doesn't trigger a SYNC_READY + * event on framelock disable */ + + NvBool videoModeReadOnly; /* If video mode is read-only */ + + /* Current device state */ + enum NvKmsFrameLockAttributePolarityValue polarity; + NvU32 syncDelay; + NvU32 syncInterval; + enum NvKmsFrameLockAttributeVideoModeValue videoMode; + NvBool testMode; + +} NVFrameLockEvoRec; + +/*! + * The buffer that accumulates a string with information returned to + * the client. + */ +typedef struct _NVEvoInfoString { + NvU16 length; /*! strlen(s); excludes the nul terminator */ + NvU16 totalLength; /*! number of bytes in the buffer pointed to by 's' */ + char *s; /*! pointer to the buffer to be written to */ +} NVEvoInfoStringRec; + +enum NvHsMapPermissions { + NvHsMapPermissionsNone, + NvHsMapPermissionsReadOnly, + NvHsMapPermissionsReadWrite, +}; + +#define NV_HS_BAD_GPU_ADDRESS ((NvU64) -1) + +#define NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP ( \ + NVBIT64(NvKmsSurfaceMemoryFormatI8)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP ( \ + NVBIT64(NvKmsSurfaceMemoryFormatA1R5G5B5) | \ + NVBIT64(NvKmsSurfaceMemoryFormatX1R5G5B5) | \ + NVBIT64(NvKmsSurfaceMemoryFormatR5G6B5)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP ( \ + NVBIT64(NvKmsSurfaceMemoryFormatA8R8G8B8) | \ + NVBIT64(NvKmsSurfaceMemoryFormatX8R8G8B8) | \ + NVBIT64(NvKmsSurfaceMemoryFormatA2B10G10R10) | \ + NVBIT64(NvKmsSurfaceMemoryFormatX2B10G10R10) | \ + NVBIT64(NvKmsSurfaceMemoryFormatA8B8G8R8) | \ + NVBIT64(NvKmsSurfaceMemoryFormatX8B8G8R8)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP ( \ + NVBIT64(NvKmsSurfaceMemoryFormatRF16GF16BF16AF16) | \ + NVBIT64(NvKmsSurfaceMemoryFormatR16G16B16A16)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8V8_N420) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___V8U8_N420)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8V8_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___V8U8_N422)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8V8_N444) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___V8U8_N444)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___U10V10_N420) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___V10U10_N420) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___U12V12_N420) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___V12U12_N420)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___U10V10_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___V10U10_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___U12V12_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___V12U12_N422)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___U10V10_N444) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___V10U10_N444) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___U12V12_N444) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___V12U12_N444)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8___V8_N444)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8___V8_N420)) + +struct _NVSurfaceEvoRec { + /* + * By default, all NVSurfaceEvoRecs will have requireCtxDma == TRUE, and + * have a ctxDma allocated and placed in the display engine hash table for + * each plane. + * + * But, if the client specified the noDisplayHardwareAccess flag, + * requireCtxDma will be FALSE, and ctxDma will be 0 for all planes. + * + * requireCtxDma is used to remember what the client requested, so that + * we correctly honor noDisplayHardwareAccess across + * FreeSurfaceCtxDmasForAllOpens() / + * AllocSurfaceCtxDmasForAllOpens() cycles. + */ + NvBool requireCtxDma; + + struct { + NvU32 rmHandle; + NvU32 ctxDma; + NvU32 pitch; + NvU64 offset; + NvU64 rmObjectSizeInBytes; + } planes[NVKMS_MAX_PLANES_PER_SURFACE]; + + struct { + const struct NvKmsPerOpenDev *pOpenDev; + NvKmsSurfaceHandle surfaceHandle; + } owner; + + NvU32 widthInPixels; + NvU32 heightInPixels; + + NvU32 log2GobsPerBlockY; + + /* + * HeadSurface needs a CPU mapping of surfaces containing semaphores. + */ + void *cpuAddress[NVKMS_MAX_SUBDEVICES]; + + enum NvKmsSurfaceMemoryLayout layout; + enum NvKmsSurfaceMemoryFormat format; + + NvKmsMemoryIsoType isoType; + + /* + * A surface has two reference counts: + * + * - rmRefCnt indicates how many uses of the surface reference + * NVSurfaceEvoRec::planes[]::rmHandle (the surface owner who registered + * the surface, EVO currently displaying the surface, an open + * surface grant file descriptor). + * + * - structRefCnt indicates how many uses of the surface reference + * the NVSurfaceEvoRec. In addition to the rmRefCnt uses, this + * will also count NVKMS clients who acquired the surface + * through GRANT_SURFACE/ACQUIRE_SURFACE. + * + * When a client registers a surface, both reference counts will + * be initialized to 1. The RM surface for each plane will be unduped when + * rmRefCnt reaches zero. The NVSurfaceEvoRec structure will be + * freed when structRefCnt reaches zero. + * + * In most cases, one of the following will be true: + * (rmRefCnt == 0) && (structRefCnt == 0) + * (rmRefCnt != 0) && (structRefCnt != 0) + * The only exception is when the owner of the surface unregisters it while + * other clients still have references to it; in that case, the rmRefCnt + * can drop to zero while structRefCnt is still non-zero. + * + * If rmRefCnt reaches zero before structRefCnt, the surface is + * "orphaned": it still exists in ACQUIRE_SURFACE clients' handle + * namespaces and/or granted FDs, but is not usable in subsequent API + * requests (e.g., to flip, specify cursor image, etc). + * + * Described in a table: + * + * ACTION rmRefCnt structRefCnt + * a) NVKMS_IOCTL_REGISTER_SURFACE =1 =1 + * b) flip to surface +1 +1 + * c) NVKMS_IOCTL_GRANT_SURFACE(grantFd) n/a +1 + * d) NVKMS_IOCTL_ACQUIRE_SURFACE n/a +1 + * e) NVKMS_IOCTL_UNREGISTER_SURFACE -1 -1 + * f) flip away from surface -1 -1 + * g) close(grantFd) n/a -1 + * h) NVKMS_IOCTL_RELEASE_SURFACE n/a -1 + * i) ..._REGISTER_DEFERRED_REQUEST_FIFO +1 +1 + * j) ..._UNREGISTER_DEFERRED_REQUEST_FIFO -1 -1 + * + * (e) complements (a) + * (f) complements (b) + * (g) complements (c) + * (h) complements (d) + * (j) complements (i) + */ + NvU64 rmRefCnt; + NvU64 structRefCnt; + +#if NVKMS_PROCFS_ENABLE + NvBool procFsFlag; +#endif + +}; + +typedef struct _NVDeferredRequestFifoRec { + NVSurfaceEvoPtr pSurfaceEvo; + struct NvKmsDeferredRequestFifo *fifo; + + /* A deferred request fifo may be joined to a swapGroup. */ + struct { + NVSwapGroupPtr pSwapGroup; + NVListRec deferredRequestFifoListEntry; + NvBool ready; + NvBool perEyeStereo; + NvBool pendingJoined; + NvBool pendingReady; + NvU32 semaphoreIndex; + struct NvKmsPerOpen *pOpenUnicastEvent; + } swapGroup; +} NVDeferredRequestFifoRec; + +typedef struct _NVSwapGroupRec { + NVListRec deferredRequestFifoList; + NvBool zombie; + NvBool pendingFlip; + NvU32 nMembers; + NvU32 nMembersReady; + NvU32 nMembersPendingJoined; + + NvU16 nClips; + struct NvKmsRect *pClipList; + NvBool swapGroupIsFullscreen; + + NvU64 refCnt; +} NVSwapGroupRec; + +typedef struct { + NvU32 clientHandle; + + NVListRec devList; + NVListRec frameLockList; + +#if defined(DEBUG) + NVListRec debugMemoryAllocationList; +#endif + + struct NvKmsPerOpen *nvKmsPerOpen; + +} NVEvoGlobal; + +extern NVEvoGlobal nvEvoGlobal; + +/* + * These enums are used during IMP validation: + * - NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE means that no changes will be made to + * the current display bandwidth values. + * - NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE means that NVKMS will increase the + * current display bandwidth values if required by IMP. This is typically + * specified pre-modeset/flip. + * - NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST means that NVKMS may potentially + * decrease the current display bandwidth values to match the current display + * configuration. This is typically specified post-modeset/flip. + */ +typedef enum { + NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE = 0, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE = 1, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST = 2, +} NVEvoReallocateBandwidthMode; + +typedef struct { + struct { + /* pTimings == NULL => this head is disabled */ + const NVHwModeTimingsEvo *pTimings; + const struct NvKmsUsageBounds *pUsage; + NvU32 displayId; + NvU32 orIndex; + NvU8 orType; /* NV0073_CTRL_SPECIFIC_OR_TYPE_* */ + } head[NVKMS_MAX_HEADS_PER_DISP]; + + NvBool requireBootClocks; + NVEvoReallocateBandwidthMode reallocBandwidth; +} NVEvoIsModePossibleDispInput; + +typedef struct { + NvBool possible; + NvU32 minRequiredBandwidthKBPS; + NvU32 floorBandwidthKBPS; +} NVEvoIsModePossibleDispOutput; + +/* CRC-query specific defines */ +/*! + * Structure that defines information about where a single variable is stored in + * the CRC32NotifierEntry structure + */ +typedef struct _CRC32NotifierEntryRec { + NvU32 field_offset; /* Var's offset from start of CRC32Notifier Struct */ + NvU32 field_base_bit; /* LSB bit index for variable in entry */ + NvU32 field_extent_bit; /* MSB bit index for variable in entry */ + struct NvKmsDpyCRC32 *field_frame_values; /* Array to store read field values across frames */ +} CRC32NotifierEntryRec; + +/*! + * Internally identifies flag read from CRC32Notifier's Status for error-checking + */ +enum CRC32NotifierFlagType { + NVEvoCrc32NotifierFlagCount, + NVEvoCrc32NotifierFlagCrcOverflow, +}; + +/*! + * Structure that defines information about where a single flag is stored in + * the Status of the CRC32NotifierEntry structure + */ +typedef struct _CRC32NotifierEntryFlags { + NvU32 flag_base_bit; /* LSB bit index for flag in entry */ + NvU32 flag_extent_bit; /* MSB bit index for flag in entry */ + enum CRC32NotifierFlagType flag_type; /* Type of error-checking to perform on flag */ +} CRC32NotifierEntryFlags; + +/*! + * Internal Crc32NotifierRead structure used to collect multiple frames of CRC + * data from a QueryCRC32 call. Arrays should be allocated to match + * entry_count frames. + */ +typedef struct _CRC32NotifierCrcOut { + /*! + * Array of CRCs generated from the Compositor hardware + */ + struct NvKmsDpyCRC32 *compositorCrc32; + + /*! + * CRCs generated from the RG hardware, if head is driving RG/SF. + */ + struct NvKmsDpyCRC32 *rasterGeneratorCrc32; + + /*! + * Crc values generated from the target SF/OR depending on connector's OR type + */ + struct NvKmsDpyCRC32 *outputCrc32; + +} CRC32NotifierCrcOut; + + +typedef const struct _nv_evo_hal { + void (*SetRasterParams) (NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState); + void (*SetProcAmp) (NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState); + void (*SetHeadControl) (NVDevEvoPtr, int sd, int head, + NVEvoUpdateState *updateState); + void (*SetHeadRefClk) (NVDevEvoPtr, int head, NvBool external, + NVEvoUpdateState *updateState); + void (*HeadSetControlOR) (NVDevEvoPtr pDevEvo, + const int head, + const NVHwModeTimingsEvo *pTimings, + const NvBool colorSpaceOverride, + NVEvoUpdateState *updateState); + void (*ORSetControl) (NVDevEvoPtr pDevEvo, + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask, + NVEvoUpdateState *updateState); + void (*HeadSetDisplayId) (NVDevEvoPtr pDevEvo, + const NvU32 head, const NvU32 displayId, + NVEvoUpdateState *updateState); + NvBool (*SetUsageBounds) (NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState); + void (*Update) (NVDevEvoPtr, + const NVEvoUpdateState *updateState, + NvBool releaseElv); + void (*IsModePossible) (NVDispEvoPtr, + const NVEvoIsModePossibleDispInput *, + NVEvoIsModePossibleDispOutput *); + void (*PrePostIMP) (NVDispEvoPtr, NvBool isPre); + void (*SetNotifier) (NVDevEvoRec *pDevEvo, + const NvBool notify, + const NvBool awaken, + const NvU32 notifier, + NVEvoUpdateState *updateState); + NvBool (*GetCapabilities) (NVDevEvoPtr); + void (*Flip) (NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + void (*FlipTransitionWAR) (NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState); + void (*FillLUTSurface) (NVEvoLutEntryRec *pLUTBuffer, + const NvU16 *red, + const NvU16 *green, + const NvU16 *blue, + int nColorMapEntries, int depth); + void (*SetLUTContextDma) (const NVDispEvoRec *pDispEvo, + const int head, + NVLutSurfaceEvoPtr pLutSurfEvo, + NvBool enableBaseLut, + NvBool enableOutputLut, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + void (*SetOutputScaler) (const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvU32 imageSharpeningValue, + NVEvoUpdateState *updateState); + void (*SetViewportPointIn) (NVDevEvoPtr pDevEvo, const int head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState); + void (*SetViewportInOut) (NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortOutMin, + const NVHwModeViewPortEvo *pViewPortOut, + const NVHwModeViewPortEvo *pViewPortOutMax, + NVEvoUpdateState *updateState); + void (*SetCursorImage) (NVDevEvoPtr pDevEvo, const int head, + const NVSurfaceEvoRec *, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams); + NvBool (*ValidateCursorSurface)(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo); + NvBool (*ValidateWindowFormat)(const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut); + void (*InitCompNotifier) (const NVDispEvoRec *pDispEvo, int idx); + NvBool (*IsCompNotifierComplete) (NVDispEvoPtr pDispEvo, int idx); + void (*WaitForCompNotifier) (const NVDispEvoRec *pDispEvo, int idx); + void (*SetDither) (NVDispEvoPtr pDispEvo, const int head, + const NvBool enabled, const NvU32 type, + const NvU32 algo, + NVEvoUpdateState *updateState); + void (*SetStallLock) (NVDispEvoPtr pDispEvo, const int head, + NvBool enable, NVEvoUpdateState *updateState); + void (*SetDisplayRate) (NVDispEvoPtr pDispEvo, const int head, + NvBool enable, + NVEvoUpdateState *updateState, + NvU32 timeoutMicroseconds); + void (*InitChannel) (NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel); + void (*InitDefaultLut) (NVDevEvoPtr pDevEvo); + void (*InitWindowMapping) (const NVDispEvoRec *pDispEvo, + NVEvoModesetUpdateState *pModesetUpdateState); + NvBool (*IsChannelIdle) (NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NvBool *result); + NvBool (*IsChannelMethodPending)(NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NvBool *result); + NvBool (*ForceIdleSatelliteChannel)(NVDevEvoPtr, + const NVEvoIdleChannelState *idleChannelState); + NvBool (*ForceIdleSatelliteChannelIgnoreLock)(NVDevEvoPtr, + const NVEvoIdleChannelState *idleChannelState); + + void (*AccelerateChannel)(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + NvU32 *pOldAccelerators); + + void (*ResetChannelAccelerators)(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + NvU32 oldAccelerators); + + NvBool (*AllocRmCtrlObject) (NVDevEvoPtr); + void (*FreeRmCtrlObject) (NVDevEvoPtr); + void (*SetImmPointOut) (NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NVEvoUpdateState *updateState, + NvU16 x, NvU16 y); + void (*StartCRC32Capture) (NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NVConnectorEvoPtr pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + NvU32 head, + NvU32 sd, + NVEvoUpdateState *updateState /* out */); + void (*StopCRC32Capture) (NVDevEvoPtr pDevEvo, + NvU32 head, + NVEvoUpdateState *updateState /* out */); + NvBool (*QueryCRC32) (NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NvU32 sd, + NvU32 entry_count, + CRC32NotifierCrcOut *crc32 /* out */, + NvU32 *numCRC32 /* out */); + void (*GetScanLine) (const NVDispEvoRec *pDispEvo, + const NvU32 head, + NvU16 *pScanLine, + NvBool *pInBlankingPeriod); + void (*ConfigureVblankSyncObject) (NVDevEvoPtr pDevEvo, + NvU16 rasterLine, + NvU32 head, + NvU32 semaphoreIndex, + NvU32 hCtxDma, + NVEvoUpdateState* pUpdateState); + + void (*SetDscParams) (const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings); + + void (*EnableMidFrameAndDWCFWatermark)(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NvBool enable, + NVEvoUpdateState *pUpdateState); + + NvU32 (*GetActiveViewportOffset)(NVDispEvoRec *pDispEvo, NvU32 head); + + void (*ClearSurfaceUsage) (NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo); + + NvBool (*ComputeWindowScalingTaps)(const NVDevEvoRec *pDevEvo, + const NVEvoChannel *pChannel, + NVFlipChannelEvoHwState *pHwState); + + const NVEvoScalerCaps* (*GetWindowScalingCaps)(const NVDevEvoRec *pDevEvo); + + struct { + NvU32 supportsNonInterlockedUsageBoundsUpdate :1; + NvU32 supportsDisplayRate :1; + NvU32 supportsFlipLockRGStatus :1; + NvU32 needDefaultLutSurface :1; + NvU32 hasUnorm16OLUT :1; + NvU32 supportsDigitalVibrance :1; + NvU32 supportsImageSharpening :1; + NvU32 supportsHDMIVRR :1; + NvU32 supportsCoreChannelSurface :1; + NvU32 supportsHDMIFRL :1; + NvU32 supportsSetStorageMemoryLayout :1; + NvU32 supportsIndependentAcqRelSemaphore :1; + NvU32 supportsCoreLut :1; + NvU32 supportsSynchronizedOverlayPositionUpdate :1; + NvU32 supportsVblankSyncObjects :1; + NvU32 requiresScalingTapsInBothDimensions :1; + + NvU32 supportedDitheringModes; + size_t impStructSize; + NVEvoScalerTaps minScalerTaps; + } caps; +} NVEvoHAL, *NVEvoHALPtr; + +typedef const struct _nv_evo_cursor_hal { + NvU32 klass; + + void (*MoveCursor) (NVDevEvoPtr, NvU32 sd, NvU32 head, + NvS16 x, NvS16 y); + void (*ReleaseElv) (NVDevEvoPtr, NvU32 sd, NvU32 head); + + struct { + NvU16 maxSize; + } caps; +} NVEvoCursorHAL, *NVEvoCursorHALPtr; + +NvU32 nvEvoGetHeadSetStoragePitchValue(const NVDevEvoRec *pDevEvo, + enum NvKmsSurfaceMemoryLayout layout, + NvU32 pitch); + +NvBool nvEvoGetHeadSetControlCursorValue90(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo, + NvU32 *pValue); + +static inline NvBool nvEvoScalingUsageBoundsEqual( + const struct NvKmsScalingUsageBounds *a, + const struct NvKmsScalingUsageBounds *b) +{ + return (a->maxVDownscaleFactor == b->maxVDownscaleFactor) && + (a->maxHDownscaleFactor == b->maxHDownscaleFactor) && + (a->vTaps == b->vTaps) && + (a->vUpscalingAllowed == b->vUpscalingAllowed); +} + +static inline NvBool +nvEvoLayerUsageBoundsEqual(const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b, + const NvU32 layer) +{ + return (a->layer[layer].usable == b->layer[layer].usable) && + (a->layer[layer].supportedSurfaceMemoryFormats == + b->layer[layer].supportedSurfaceMemoryFormats) && + nvEvoScalingUsageBoundsEqual(&a->layer[layer].scaling, + &b->layer[layer].scaling); +} + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_TYPES_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-utils.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-utils.h new file mode 100644 index 0000000..e44394b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-utils.h @@ -0,0 +1,273 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_UTILS_H__ +#define __NVKMS_UTILS_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvidia-modeset-os-interface.h" + +/*! + * Subtract B from A, and handle wrap around. + * + * This is useful for cases where A is a number that is incremented and wrapped; + * e.g., + * + * a = (a + 1) % max; + * + * and we want to subtract some amount from A to get one of its previous values. + */ +static inline NvU8 A_minus_b_with_wrap_U8(NvU8 a, NvU8 b, NvU8 max) +{ + return (a + max - b) % max; +} + +/*! + * Return whether (A + B) > C, avoiding integer overflow in the addition. + */ +static inline NvBool A_plus_B_greater_than_C_U16(NvU16 a, NvU16 b, NvU16 c) +{ + return (NV_U16_MAX - a < b) || ((a + b) > c); +} + +static inline NvS32 clamp_S32(NvS32 val, NvS32 lo, NvS32 hi) +{ + if (val < lo) { + return lo; + } else if (val > hi) { + return hi; + } else { + return val; + } +} + +/*! + * Return whether the bitmask contains bits greater than or equal to + * the maximum. + */ +static inline NvBool nvHasBitAboveMax(NvU32 bitmask, NvU8 max) +{ + nvAssert(max <= 32); + if (max == 32) { + return FALSE; + } + return (bitmask & ~((1 << max) - 1)) != 0; +} + +/*! + * Check if a timeout is exceeded. + * + * This is intended to be used when busy waiting in a loop, like this: + * + * NvU64 startTime = 0; + * + * do { + * if (SOME-CONDITION) { + * break; + * } + * + * if (nvExceedsTimeoutUSec(&startTime, TIMEOUT-IN-USEC)) { + * break; + * } + * + * nvkms_yield(); + * + * } while (TRUE); + * + * The caller should zero-initialize startTime, and nvExceedsTimeoutUSec() will + * set startTime to the starting time on the first call. This is structured + * this way to avoid the nvkms_get_usec() call in the common case where + * SOME-CONDITION is true on the first iteration (nvkms_get_usec() is not + * expected to be a large penalty, but it still seems nice to avoid it when not + * needed). + */ +static inline NvBool nvExceedsTimeoutUSec( + NvU64 *pStartTime, + NvU64 timeoutPeriod) +{ + const NvU64 currentTime = nvkms_get_usec(); + + if (*pStartTime == 0) { + *pStartTime = currentTime; + return FALSE; + } + + if (currentTime < *pStartTime) { /* wraparound?! */ + return TRUE; + } + + return (currentTime - *pStartTime) > timeoutPeriod; +} + +/*! + * Return a non-NULL string. + * + * The first argument, stringMightBeNull, could be NULL. In which + * case, return the second argument, safeString, which the caller + * should ensure is not NULL (e.g., by providing a literal). + * + * This is intended as a convenience for situations like this: + * + * char *s = FunctionThatMightReturnNull(); + * printf("%s\n", nvSafeString(s, "stringLiteral")); + */ +static inline const char *nvSafeString(char *stringMightBeNull, + const char *safeString) +{ + return (stringMightBeNull != NULL) ? stringMightBeNull : safeString; +} + +static inline NvU64 nvCtxDmaOffsetFromBytes(NvU64 ctxDmaOffset) +{ + nvAssert((ctxDmaOffset & ((1 << NV_SURFACE_OFFSET_ALIGNMENT_SHIFT) - 1)) + == 0); + + return (ctxDmaOffset >> 8); +} + +NvU8 nvPixelDepthToBitsPerComponent(enum nvKmsPixelDepth pixelDepth); + +typedef enum { + EVO_LOG_WARN, + EVO_LOG_ERROR, + EVO_LOG_INFO, +} NVEvoLogType; + +void *nvInternalAlloc(size_t size, NvBool zero); +void *nvInternalRealloc(void *ptr, size_t size); +void nvInternalFree(void *ptr); +char *nvInternalStrDup(const char *str); +NvBool nvGetRegkeyValue(const NVDevEvoRec *pDevEvo, + const char *key, NvU32 *val); + +#if defined(DEBUG) + +void nvReportUnfreedAllocations(void); + +void *nvDebugAlloc(size_t size, int line, const char *file); +void *nvDebugCalloc(size_t nmemb, size_t size, int line, const char *file); +void *nvDebugRealloc(void *ptr, size_t size, int line, const char *file); +void nvDebugFree(void *ptr); +char *nvDebugStrDup(const char *str, int line, const char *file); + +#define nvAlloc(s) nvDebugAlloc((s), __LINE__, __FILE__) +#define nvCalloc(n,s) nvDebugCalloc((n), (s), __LINE__, __FILE__) +#define nvFree(p) nvDebugFree(p) +#define nvRealloc(p,s) nvDebugRealloc((p), (s), __LINE__, __FILE__) +#define nvStrDup(s) nvDebugStrDup((s), __LINE__, __FILE__) + +#else + +#define nvAlloc(s) nvInternalAlloc((s), FALSE) +#define nvCalloc(n,s) nvInternalAlloc((n)*(s), TRUE) +#define nvRealloc(p,s) nvInternalRealloc((p),(s)) +#define nvFree(s) nvInternalFree(s) +#define nvStrDup(s) nvInternalStrDup(s) + +#endif + +void nvVEvoLog(NVEvoLogType logType, NvU8 gpuLogIndex, + const char *fmt, va_list ap); + +void nvEvoLogDev(const NVDevEvoRec *pDevEvo, NVEvoLogType logType, + const char *fmt, ...) + __attribute__((format (printf, 3, 4))); + +void nvEvoLogDisp(const NVDispEvoRec *pDispEvo, NVEvoLogType logType, + const char *fmt, ...) + __attribute__((format (printf, 3, 4))); + +void nvEvoLog(NVEvoLogType logType, const char *fmt, ...) + __attribute__((format (printf, 2, 3))); + + + +#if defined(DEBUG) + +void nvEvoLogDebug(NVEvoLogType logType, const char *fmt, ...) + __attribute__((format (printf, 2, 3))); + +void nvEvoLogDevDebug(const NVDevEvoRec *pDevEvo, NVEvoLogType logType, + const char *fmt, ...) + __attribute__((format (printf, 3, 4))); + +void nvEvoLogDispDebug(const NVDispEvoRec *pDispEvo, NVEvoLogType logType, + const char *fmt, ...) + __attribute__((format (printf, 3, 4))); + +#else + +# define nvEvoLogDebug(...) +# define nvEvoLogDevDebug(pDevEvo, ...) +# define nvEvoLogDispDebug(pDispEvo, ...) + +#endif /* DEBUG */ + +void nvInitInfoString(NVEvoInfoStringPtr pInfoString, + char *s, NvU16 totalLength); + +void nvEvoLogInfoStringRaw(NVEvoInfoStringPtr pInfoString, + const char *format, ...) + __attribute__((format (printf, 2, 3))); +void nvEvoLogInfoString(NVEvoInfoStringPtr pInfoString, + const char *format, ...) + __attribute__((format (printf, 2, 3))); + + +typedef NvU32 NvKmsGenericHandle; + +NvBool nvEvoApiHandlePointerIsPresent(NVEvoApiHandlesPtr pEvoApiHandles, + void *pointer); +NvKmsGenericHandle nvEvoCreateApiHandle(NVEvoApiHandlesPtr pEvoApiHandles, + void *pointer); +void *nvEvoGetPointerFromApiHandle(const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsGenericHandle handle); +void *nvEvoGetPointerFromApiHandleNext(const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsGenericHandle *pHandle); +void nvEvoDestroyApiHandle(NVEvoApiHandlesPtr pEvoApiHandles, + NvKmsGenericHandle handle); +NvBool nvEvoInitApiHandles(NVEvoApiHandlesPtr pEvoApiHandles, + NvU32 defaultSize); +void nvEvoDestroyApiHandles(NVEvoApiHandlesPtr pEvoApiHandles); + +#define FOR_ALL_POINTERS_IN_EVO_API_HANDLES(_pEvoApiHandles, \ + _pointer, _handle) \ + for ((_handle) = 0, \ + (_pointer) = nvEvoGetPointerFromApiHandleNext(_pEvoApiHandles, \ + &(_handle)); \ + (_pointer) != NULL; \ + (_pointer) = nvEvoGetPointerFromApiHandleNext(_pEvoApiHandles, \ + &(_handle))) + + + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_UTILS_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-vrr.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-vrr.h new file mode 100644 index 0000000..91c375a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-vrr.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_VRR_H__ +#define __NVKMS_VRR_H__ + +#include "nvkms-types.h" +#include "nvkms-modeset-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvAllocVrrEvo(NVDevEvoPtr pDevEvo); +void nvFreeVrrEvo(NVDevEvoPtr pDevEvo); +void nvDisableVrr(NVDevEvoPtr pDevEvo); +void nvEnableVrr(NVDevEvoPtr pDevEvo, + const struct NvKmsSetModeRequest *pRequest); +void nvCancelVrrFrameReleaseTimers(NVDevEvoPtr pDevEvo); +void nvSetVrrActive(NVDevEvoPtr pDevEvo, NvBool active); +void nvApplyVrrBaseFlipOverrides(const NVDispEvoRec *pDispEvo, NvU32 head, + const NVFlipChannelEvoHwState *pOld, + NVFlipChannelEvoHwState *pNew); +void nvSetNextVrrFlipTypeAndIndex(NVDevEvoPtr pDevEvo, + struct NvKmsFlipReply *reply); +void nvTriggerVrrUnstallMoveCursor(NVDispEvoPtr pDispEvo); +void nvTriggerVrrUnstallSetCursorImage(NVDispEvoPtr pDispEvo, + NvBool ctxDmaChanged); +void nvGetDpyMinRefreshRateValidValues( + const NVHwModeTimingsEvo *pTimings, + const enum NvKmsDpyVRRType vrrType, + const NvU32 edidTimeoutMicroseconds, + NvU32 *minMinRefreshRate, + NvU32 *maxMinRefreshRate); + +NvBool nvDispSupportsVrr(const NVDispEvoRec *pDispEvo); + +NvBool nvExportVrrSemaphoreSurface(const NVDevEvoRec *pDevEvo, int fd); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_VRR_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api-types.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api-types.h new file mode 100644 index 0000000..7bf6cf3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api-types.h @@ -0,0 +1,607 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_API_TYPES_H) +#define NVKMS_API_TYPES_H + +#include +#include +#include + +#define NVKMS_MAX_SUBDEVICES NV_MAX_SUBDEVICES + +#define NVKMS_LEFT 0 +#define NVKMS_RIGHT 1 +#define NVKMS_MAX_EYES 2 + +#define NVKMS_MAIN_LAYER 0 +#define NVKMS_OVERLAY_LAYER 1 +#define NVKMS_MAX_LAYERS_PER_HEAD 8 + +#define NVKMS_MAX_PLANES_PER_SURFACE 3 + +#define NVKMS_DP_ADDRESS_STRING_LENGTH 64 + +#define NVKMS_DEVICE_ID_TEGRA 0x0000ffff + +typedef NvU32 NvKmsDeviceHandle; +typedef NvU32 NvKmsDispHandle; +typedef NvU32 NvKmsConnectorHandle; +typedef NvU32 NvKmsSurfaceHandle; +typedef NvU32 NvKmsFrameLockHandle; +typedef NvU32 NvKmsDeferredRequestFifoHandle; +typedef NvU32 NvKmsSwapGroupHandle; +typedef NvU32 NvKmsVblankSyncObjectHandle; + +struct NvKmsSize { + NvU16 width; + NvU16 height; +}; + +struct NvKmsPoint { + NvU16 x; + NvU16 y; +}; + +struct NvKmsSignedPoint { + NvS16 x; + NvS16 y; +}; + +struct NvKmsRect { + NvU16 x; + NvU16 y; + NvU16 width; + NvU16 height; +}; + +/* + * A 3x3 row-major matrix. + * + * The elements are 32-bit single-precision IEEE floating point values. The + * floating point bit pattern should be stored in NvU32s to be passed into the + * kernel. + */ +struct NvKmsMatrix { + NvU32 m[3][3]; +}; + +typedef enum { + NVKMS_CONNECTOR_TYPE_DP = 0, + NVKMS_CONNECTOR_TYPE_VGA = 1, + NVKMS_CONNECTOR_TYPE_DVI_I = 2, + NVKMS_CONNECTOR_TYPE_DVI_D = 3, + NVKMS_CONNECTOR_TYPE_ADC = 4, + NVKMS_CONNECTOR_TYPE_LVDS = 5, + NVKMS_CONNECTOR_TYPE_HDMI = 6, + NVKMS_CONNECTOR_TYPE_USBC = 7, + NVKMS_CONNECTOR_TYPE_DSI = 8, + NVKMS_CONNECTOR_TYPE_DP_SERIALIZER = 9, + NVKMS_CONNECTOR_TYPE_UNKNOWN = 10, + NVKMS_CONNECTOR_TYPE_MAX = NVKMS_CONNECTOR_TYPE_UNKNOWN, +} NvKmsConnectorType; + +static inline +const char *NvKmsConnectorTypeString(const NvKmsConnectorType connectorType) +{ + switch (connectorType) { + case NVKMS_CONNECTOR_TYPE_DP: return "DP"; + case NVKMS_CONNECTOR_TYPE_VGA: return "VGA"; + case NVKMS_CONNECTOR_TYPE_DVI_I: return "DVI-I"; + case NVKMS_CONNECTOR_TYPE_DVI_D: return "DVI-D"; + case NVKMS_CONNECTOR_TYPE_ADC: return "ADC"; + case NVKMS_CONNECTOR_TYPE_LVDS: return "LVDS"; + case NVKMS_CONNECTOR_TYPE_HDMI: return "HDMI"; + case NVKMS_CONNECTOR_TYPE_USBC: return "USB-C"; + case NVKMS_CONNECTOR_TYPE_DSI: return "DSI"; + case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER: return "DP-SERIALIZER"; + default: break; + } + return "Unknown"; +} + +typedef enum { + NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA = 0, + NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS = 1, + NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS = 2, + NVKMS_CONNECTOR_SIGNAL_FORMAT_DP = 3, + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI = 4, + NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN = 5, + NVKMS_CONNECTOR_SIGNAL_FORMAT_MAX = + NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN, +} NvKmsConnectorSignalFormat; + +/*! + * Description of Notifiers and Semaphores (Non-isochronous (NISO) surfaces). + * + * When flipping, the client can optionally specify a notifier and/or + * a semaphore to use with the flip. The surfaces used for these + * should be registered with NVKMS to get an NvKmsSurfaceHandle. + * + * NvKmsNIsoSurface::offsetInWords indicates the starting location, in + * 32-bit words, within the surface where EVO should write the + * notifier or semaphore. Note that only the first 4096 bytes of a + * surface can be used by semaphores or notifiers; offsetInWords must + * allow for the semaphore or notifier to be written within the first + * 4096 bytes of the surface. I.e., this must be satisfied: + * + * ((offsetInWords * 4) + elementSizeInBytes) <= 4096 + * + * Where elementSizeInBytes is: + * + * if NISO_FORMAT_FOUR_WORD*, elementSizeInBytes = 16 + * if NISO_FORMAT_LEGACY, + * if overlay && notifier, elementSizeInBytes = 16 + * else, elementSizeInBytes = 4 + * + * Note that different GPUs support different semaphore and notifier formats. + * Check NvKmsAllocDeviceReply::validNIsoFormatMask to determine which are + * valid for the given device. + * + * Note also that FOUR_WORD and FOUR_WORD_NVDISPLAY are the same size, but + * FOUR_WORD uses a format compatible with display class 907[ce], and + * FOUR_WORD_NVDISPLAY uses a format compatible with c37e (actually defined by + * the NV_DISP_NOTIFIER definition in clc37d.h). + */ +enum NvKmsNIsoFormat { + NVKMS_NISO_FORMAT_LEGACY, + NVKMS_NISO_FORMAT_FOUR_WORD, + NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY, +}; + +enum NvKmsEventType { + NVKMS_EVENT_TYPE_DPY_CHANGED, + NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED, + NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED, + NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED, + NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED, + NVKMS_EVENT_TYPE_FLIP_OCCURRED, +}; + +typedef enum { + NV_EVO_SCALER_1TAP = 0, + NV_EVO_SCALER_2TAPS = 1, + NV_EVO_SCALER_3TAPS = 2, + NV_EVO_SCALER_5TAPS = 3, + NV_EVO_SCALER_8TAPS = 4, + NV_EVO_SCALER_TAPS_MIN = NV_EVO_SCALER_1TAP, + NV_EVO_SCALER_TAPS_MAX = NV_EVO_SCALER_8TAPS, +} NVEvoScalerTaps; + +/* This structure describes the scaling bounds for a given layer. */ +struct NvKmsScalingUsageBounds { + /* + * Maximum vertical downscale factor (scaled by 1024) + * + * For example, if the downscale factor is 1.5, then maxVDownscaleFactor + * would be 1.5 x 1024 = 1536. + */ + NvU16 maxVDownscaleFactor; + + /* + * Maximum horizontal downscale factor (scaled by 1024) + * + * See the example above for maxVDownscaleFactor. + */ + NvU16 maxHDownscaleFactor; + + /* Maximum vertical taps allowed */ + NVEvoScalerTaps vTaps; + + /* Whether vertical upscaling is allowed */ + NvBool vUpscalingAllowed; +}; + +struct NvKmsUsageBounds { + struct { + NvBool usable; + struct NvKmsScalingUsageBounds scaling; + NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8); + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; +}; + +/* + * A 3x4 row-major colorspace conversion matrix. + * + * The output color C' is the CSC matrix M times the column vector + * [ R, G, B, 1 ]. + * + * Each entry in the matrix is a signed 2's-complement fixed-point number with + * 3 integer bits and 16 fractional bits. + */ +struct NvKmsCscMatrix { + NvS32 m[3][4]; +}; + +#define NVKMS_IDENTITY_CSC_MATRIX \ + (struct NvKmsCscMatrix){{ \ + { 0x10000, 0, 0, 0 }, \ + { 0, 0x10000, 0, 0 }, \ + { 0, 0, 0x10000, 0 } \ + }} + +/*! + * A color key match bit used in the blend equations and one can select the src + * or dst Color Key when blending. Assert key bit means match, de-assert key + * bit means nomatch. + * + * The src Color Key means using the key bit from the current layer, the dst + * Color Key means using key bit from the previous layer composition stage. The + * src or dst key bit will be inherited by blended pixel for the preparation of + * next blending, as dst Color Key. + * + * src: Forward the color key match bit from the current layer pixel to next layer + * composition stage. + * + * dst: Forward the color key match bit from the previous composition stage + * pixel to next layer composition stage. + * + * disable: Forward “1” to the next layer composition stage as the color key. + */ +enum NvKmsCompositionColorKeySelect { + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE = 0, + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC, + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST, +}; + +#define NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS 3 + +/*! + * Composition modes used for surfaces in general. + * The various types of composition are: + * + * Opaque: source pixels are opaque regardless of alpha, + * and will occlude the destination pixel. + * + * Alpha blending: aka opacity, which could be specified + * for a surface in its entirety, or on a per-pixel basis. + * + * Non-premultiplied: alpha value applies to source pixel, + * and also counter-weighs the destination pixel. + * Premultiplied: alpha already applied to source pixel, + * so it only counter-weighs the destination pixel. + * + * Color keying: use a color key structure to decide + * the criteria for matching and compositing. + * (See NVColorKey below.) + */ +enum NvKmsCompositionBlendingMode { + /*! + * Modes that use no other parameters. + */ + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE, + + /*! + * Mode that ignores both per-pixel alpha provided + * by client and the surfaceAlpha, makes source pixel + * totally transparent. + */ + NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT, + + /*! + * Modes that use per-pixel alpha provided by client, + * and the surfaceAlpha must be set to 0. + */ + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA, + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA, + + /*! + * These use both the surface-wide and per-pixel alpha values. + * surfaceAlpha is treated as numerator ranging from 0 to 255 + * of a fraction whose denominator is 255. + */ + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA, + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA, +}; + +static inline NvBool +NvKmsIsCompositionModeUseAlpha(enum NvKmsCompositionBlendingMode mode) +{ + return mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA; +} + +/*! + * Abstract description of a color key. + * + * a, r, g, and b are component values in the same width as the framebuffer + * values being scanned out. + * + * match[ARGB] defines whether that component is considered when matching the + * color key -- TRUE means that the value of the corresponding component must + * match the given value for the given pixel to be considered a 'key match'; + * FALSE means that the value of that component is not a key match criterion. + */ +typedef struct { + NvU16 a, r, g, b; + NvBool matchA, matchR, matchG, matchB; +} NVColorKey; + +/*! + * Describes the composition parameters for the single layer. + */ +struct NvKmsCompositionParams { + enum NvKmsCompositionColorKeySelect colorKeySelect; + NVColorKey colorKey; + /* + * It is possible to assign different blending mode for match pixels and + * nomatch pixels. blendingMode[0] is used to blend a pixel with the color key + * match bit "0", and blendingMode[1] is used to blend a pixel with the color + * key match bit "1". + * + * But because of the hardware restrictions match and nomatch pixels can + * not use blending mode PREMULT_ALPHA, NON_PREMULT_ALPHA, + * PREMULT_SURFACE_ALPHA, and NON_PREMULT_SURFACE_ALPHA at once. + */ + enum NvKmsCompositionBlendingMode blendingMode[2]; + NvU8 surfaceAlpha; /* Applies to all pixels of entire surface */ + /* + * Defines the composition order. A smaller value moves the layer closer to + * the top (away from the background). No need to pick consecutive values, + * requirements are that the value should be different for each of the + * layers owned by the head and the value for the main layer should be + * the greatest one. + * + * Cursor always remains at the top of all other layers, this parameter + * has no effect on cursor. NVKMS assigns default depth to each of the + * supported layers, by default depth of the layer is calculated as + * (NVKMS_MAX_LAYERS_PER_HEAD - index of the layer). If depth is set to + * '0' then default depth value will get used. + */ + NvU8 depth; +}; + +/*! + * Describes the composition capabilities supported by the hardware for + * cursor or layer. It describes supported the color key selects and for each + * of the supported color key selects it describes supported blending modes + * for match and nomatch pixles. + */ +struct NvKmsCompositionCapabilities { + + struct { + /* + * A bitmask of the supported blending modes for match and nomatch + * pixels. It should be the bitwise 'or' of one or more + * NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_*) values. + */ + NvU32 supportedBlendModes[2]; + } colorKeySelect[NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS]; + + /* + * A bitmask of the supported color key selects. + * + * It should be the bitwise 'or' of one or more + * NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_*) + * values. + */ + NvU32 supportedColorKeySelects; +}; + +struct NvKmsLayerCapabilities { + /*! + * Whether Layer supports the window mode. If window mode is supported, + * then clients can set the layer's dimensions so that they're smaller than + * the viewport, and can also change the output position of the layer to a + * non-(0, 0) position. + * + * NOTE: Dimension changes are currently unsupported for the main layer, + * and output position changes for the main layer are currently only + * supported via IOCTL_SET_LAYER_POSITION but not via flips. Support for + * these is coming soon, via changes to flip code. + */ + NvBool supportsWindowMode :1; + + /*! + * Whether layer supports HDR pipe. + */ + NvBool supportsHDR :1; + + + /*! + * Describes the supported Color Key selects and blending modes for + * match and nomatch layer pixels. + */ + struct NvKmsCompositionCapabilities composition; + + /*! + * Which NvKmsSurfaceMemoryFormat enum values are supported by the NVKMS + * device on the given scanout surface layer. + * + * Iff a particular enum NvKmsSurfaceMemoryFormat 'value' is supported, + * then (1 << value) will be set in the appropriate bitmask. + * + * Note that these bitmasks just report the static SW/HW capabilities, + * and are a superset of the formats that IMP may allow. Clients are + * still expected to honor the NvKmsUsageBounds for each head. + */ + NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8); +}; + +/*! + * Surface layouts. + * + * BlockLinear is the NVIDIA GPU native tiling format, arranging pixels into + * blocks or tiles for better locality during common GPU operations. + * + * Pitch is the naive "linear" surface layout with pixels laid out sequentially + * in memory line-by-line, optionally with some padding at the end of each line + * for alignment purposes. + */ +enum NvKmsSurfaceMemoryLayout { + NvKmsSurfaceMemoryLayoutBlockLinear = 0, + NvKmsSurfaceMemoryLayoutPitch = 1, +}; + +static inline const char *NvKmsSurfaceMemoryLayoutToString( + enum NvKmsSurfaceMemoryLayout layout) +{ + switch (layout) { + default: + return "Unknown"; + case NvKmsSurfaceMemoryLayoutBlockLinear: + return "BlockLinear"; + case NvKmsSurfaceMemoryLayoutPitch: + return "Pitch"; + } +} + +typedef enum { + MUX_STATE_GET = 0, + MUX_STATE_INTEGRATED = 1, + MUX_STATE_DISCRETE = 2, + MUX_STATE_UNKNOWN = 3, +} NvMuxState; + +enum NvKmsRotation { + NVKMS_ROTATION_0 = 0, + NVKMS_ROTATION_90 = 1, + NVKMS_ROTATION_180 = 2, + NVKMS_ROTATION_270 = 3, + NVKMS_ROTATION_MIN = NVKMS_ROTATION_0, + NVKMS_ROTATION_MAX = NVKMS_ROTATION_270, +}; + +struct NvKmsRRParams { + enum NvKmsRotation rotation; + NvBool reflectionX; + NvBool reflectionY; +}; + +/*! + * Convert each possible NvKmsRRParams to a unique integer [0..15], + * so that we can describe possible NvKmsRRParams with an NvU16 bitmask. + * + * E.g. + * rotation = 0, reflectionX = F, reflectionY = F == 0|0|0 == 0 + * ... + * rotation = 270, reflectionX = T, reflectionY = T == 3|4|8 == 15 + */ +static inline NvU8 NvKmsRRParamsToCapBit(const struct NvKmsRRParams *rrParams) +{ + NvU8 bitPosition = (NvU8)rrParams->rotation; + if (rrParams->reflectionX) { + bitPosition |= NVBIT(2); + } + if (rrParams->reflectionY) { + bitPosition |= NVBIT(3); + } + return bitPosition; +} + +/* + * NVKMS_MEMORY_ISO is used to tag surface memory that will be accessed via + * display's isochronous interface. Examples of this type of memory are pixel + * data and LUT entries. + * + * NVKMS_MEMORY_NISO is used to tag surface memory that will be accessed via + * display's non-isochronous interface. Examples of this type of memory are + * semaphores and notifiers. + */ +typedef enum { + NVKMS_MEMORY_ISO = 0, + NVKMS_MEMORY_NISO = 1, +} NvKmsMemoryIsoType; + +typedef struct { + NvBool coherent; + NvBool noncoherent; +} NvKmsDispIOCoherencyModes; + +enum NvKmsInputColorSpace { + /* Unknown colorspace; no de-gamma will be applied */ + NVKMS_INPUT_COLORSPACE_NONE = 0, + + /* Linear, Rec.709 [-0.5, 7.5) */ + NVKMS_INPUT_COLORSPACE_SCRGB_LINEAR = 1, + + /* PQ, Rec.2020 unity */ + NVKMS_INPUT_COLORSPACE_BT2100_PQ = 2, +}; + +enum NvKmsOutputTf { + /* + * NVKMS itself won't apply any OETF (clients are still + * free to provide a custom OLUT) + */ + NVKMS_OUTPUT_TF_NONE = 0, + NVKMS_OUTPUT_TF_TRADITIONAL_GAMMA_SDR = 1, + NVKMS_OUTPUT_TF_PQ = 2, +}; + +/*! + * HDR Static Metadata Type1 Descriptor as per CEA-861.3 spec. + * This is expected to match exactly with the spec. + */ +struct NvKmsHDRStaticMetadata { + /*! + * Color primaries of the data. + * These are coded as unsigned 16-bit values in units of 0.00002, + * where 0x0000 represents zero and 0xC350 represents 1.0000. + */ + struct { + NvU16 x, y; + } displayPrimaries[3]; + + /*! + * White point of colorspace data. + * These are coded as unsigned 16-bit values in units of 0.00002, + * where 0x0000 represents zero and 0xC350 represents 1.0000. + */ + struct { + NvU16 x, y; + } whitePoint; + + /** + * Maximum mastering display luminance. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxDisplayMasteringLuminance; + + /*! + * Minimum mastering display luminance. + * This value is coded as an unsigned 16-bit value in units of + * 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF + * represents 6.5535 cd/m2. + */ + NvU16 minDisplayMasteringLuminance; + + /*! + * Maximum content light level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxCLL; + + /*! + * Maximum frame-average light level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxFALL; +}; + +#endif /* NVKMS_API_TYPES_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api.h new file mode 100644 index 0000000..cd17ddb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api.h @@ -0,0 +1,4003 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_API_H) +#define NVKMS_API_H + +/* + * NVKMS API + * + * + * All file operations described in this header file go through a + * single device file that has system-wide scope. The individual + * ioctl request data structures specify the objects to which the + * request is targeted. + * + * + * OBJECTS + * + * The NVKMS API is organized into several objects: + * + * - A device, which corresponds to an RM device. This can either be + * a single GPU, or multiple GPUs linked into SLI. Each GPU is + * called a "subdevice". The subdevices used by an NVKMS device are + * reported in NvKmsAllocDeviceReply::subDeviceMask. + * + * A device is specified by a deviceHandle, returned by + * NVKMS_IOCTL_ALLOC_DEVICE. + * + * - A disp, which represents an individually programmable display + * engine of a GPU. In SLI Mosaic, there is one disp per physical + * GPU. In all other configurations there is one disp for the + * entire device. A disp is specified by a (deviceHandle, + * dispHandle) duple. A dispHandle is only unique within a single + * device: multiple devices may have disps with the same dispHandle + * value. + * + * A disp contains one or more subdevices, as reported by + * NvKmsQueryDispReply::subDeviceMask. A disp will only have + * multiple subdevices in cases where the device only has a single + * disp. Any subdevice specified in + * NvKmsQueryDispReply::subDeviceMask will also be in + * NvKmsAllocDeviceReply::subDeviceMask. + * + * - A connector, which represents an electrical connection to the + * GPU. E.g., a physical DVI-I connector has two NVKMS connector + * objects (a VGA NVKMS connector and a TMDS NVKMS connector). + * However, a physical DisplayPort connector has one NVKMS connector + * object, even if there is a tree of DisplayPort1.2 Multistream + * monitors connected to it. + * + * Connectors are associated with a specific disp. A connector is + * specified by a (deviceHandle, dispHandle, connectorHandle) + * triplet. A connectorHandle is only unique within a single disp: + * multiple disps may have connectors with the same connectorHandle + * value. + * + * - A dpy, which represents a connection of a display device to the + * system. Multiple dpys can map to the same connector in the case + * of DisplayPort1.2 MultiStream. A dpy is specified by a + * (deviceHandle, dispHandle, dpyId) triplet. A dpyId is only + * unique within a single disp: multiple disps may have dpys with + * the same dpyId value. + * + * - A surface, which represents memory to be scanned out. Surfaces + * should be allocated by resman, and then registered and + * unregistered with NVKMS. The NvKmsSurfaceHandle value of 0 is + * reserved to mean no surface. + * + * NVKMS clients should treat the device, disp, connector, and surface + * handles as opaque values. They are specific to the file descriptor + * through which a client allocated and queried them. Dpys should + * also be treated as opaque, though they can be passed between + * clients. + * + * NVKMS clients initialize NVKMS by allocating an NVKMS device. The + * device can either be a single GPU, or an SLI group. It is expected + * that the client has already attached/linked the GPUs through + * resman and created a resman device. + * + * NVKMS device allocation returns a device handle, the disp handles, + * and capabilities of the device. + * + * + * MODE VALIDATION + * + * When a client requests to set a mode via NVKMS_IOCTL_SET_MODE, + * NVKMS will validate the mode at that point in time, honoring the + * NvKmsModeValidationParams specified as part of the request. + * + * Clients can use NVKMS_IOCTL_VALIDATE_MODE to test if a mode is valid. + * + * Clients can use NVKMS_IOCTL_VALIDATE_MODE_INDEX to get the list of + * modes that NVKMS currently considers valid for the dpy (modes from + * the EDID, etc). + * + * IMPLEMENTATION NOTE: the same mode validation common code will be + * used in each of NVKMS_IOCTL_SET_MODE, NVKMS_IOCTL_VALIDATE_MODE, + * and NVKMS_IOCTL_VALIDATE_MODE_INDEX, but NVKMS won't generally maintain + * a "mode pool" with an exhaustive list of the allowable modes for a + * dpy. + * + * + * DYNAMIC DPY HANDLING + * + * Dynamic dpys (namely, DisplayPort multistream dpys) share the NVDpyId + * namespace with non-dynamic dpys on the same disp. However, dynamic dpys will + * not be listed in NvKmsQueryDispReply::validDpys. Instead, dynamic dpys are + * added and removed from the system dynamically. + * + * When a dynamic dpy is first connected, NVKMS will allocate a new NVDpyId for + * it and generate an NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED event. When the + * dynamic dpy is disconnected, NVKMS will generate an + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED event. Whether the corresponding + * NVDpyId is immediately freed and made available for subsequent dynamic dpys + * depends on client behavior. + * + * Clients may require that a dynamic NVDpyId persist even after the dynamic dpy + * is disconnected. Clients who require this can use + * NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST. NVKMS will retain the NVDpyId + * until the dynamic dpy is disconnected and there are no clients who have + * declared "interest" on the particular dynamic dpy. While the NVDpyId + * persists, it will be used for any monitor that is connected at the same + * dynamic dpy address (i.e., port address, in the case of DP MST). + * + * + * FILE DESCRIPTOR HANDLING + * + * With the exception of NVDpyIds, all handles should be assumed to be + * specific to the current file descriptor on which the ioctls are + * performed. + * + * Multiple devices can be allocated on the same file descriptor. + * E.g., to drive the display of multiple GPUs. + * + * If a file descriptor is closed prematurely, either explicitly by + * the client or implicitly by the operating system because the client + * process was terminated, NVKMS will perform an + * NVKMS_IOCTL_FREE_DEVICE for any devices currently allocated by the + * client on the closed file descriptor. + * + * NVKMS file descriptors are normally used as the first argument of + * ioctl(2). However, NVKMS file descriptors are also used for + * granting surfaces (see NVKMS_IOCTL_GRANT_SURFACE) or permissions + * (see NVKMS_IOCTL_GRANT_PERMISSIONS). Any given NVKMS file + * descriptor can only be used for one of these uses. + * + * QUESTIONS: + * + * - Is there any reason for errors to be returned through a status field + * in the Param structures, rather than the ioctl(2) return value? + * + * - Is it too asymmetric that NVKMS_IOCTL_SET_MODE can set a + * mode across heads/disps, but other requests (e.g., + * NVKMS_IOCTL_SET_CURSOR_IMAGE) operate on a single head? + * + * + * IOCTL PARAMETER ORGANIZATION + * + * For table-driven processing of ioctls, it is useful for all ioctl + * parameters to follow the same convention: + * + * struct NvKmsFooRequest { + * (...) + * }; + * + * struct NvKmsFooReply { + * (...) + * }; + * + * struct NvKmsFooParams { + * struct NvKmsFooRequest request; //! in + * struct NvKmsFooReply reply; //! out + * }; + * + * I.e., all ioctl parameter structures NvKmsFooParams should have + * "request" and "reply" fields, with types "struct NvKmsFooRequest" + * and "struct NvKmsFooReply". C doesn't technically support empty + * structures, so the convention is to place a "padding" NvU32 in + * request or reply structures that would otherwise be empty. + */ + +#include "nvtypes.h" +#include "nvlimits.h" +#include "nv_dpy_id.h" +#include "nv_mode_timings.h" +#include "nvkms-api-types.h" +#include "nvgputypes.h" /* NvGpuSemaphore */ +#include "nvkms-format.h" + +/* + * The NVKMS ioctl commands. See the ioctl parameter declarations + * later in this header file for an explanation of each ioctl command. + */ +enum NvKmsIoctlCommand { + NVKMS_IOCTL_ALLOC_DEVICE, + NVKMS_IOCTL_FREE_DEVICE, + NVKMS_IOCTL_QUERY_DISP, + NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA, + NVKMS_IOCTL_QUERY_CONNECTOR_DYNAMIC_DATA, + NVKMS_IOCTL_QUERY_DPY_STATIC_DATA, + NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA, + NVKMS_IOCTL_VALIDATE_MODE_INDEX, + NVKMS_IOCTL_VALIDATE_MODE, + NVKMS_IOCTL_SET_MODE, + NVKMS_IOCTL_SET_CURSOR_IMAGE, + NVKMS_IOCTL_MOVE_CURSOR, + NVKMS_IOCTL_SET_LUT, + NVKMS_IOCTL_IDLE_BASE_CHANNEL, + NVKMS_IOCTL_FLIP, + NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST, + NVKMS_IOCTL_REGISTER_SURFACE, + NVKMS_IOCTL_UNREGISTER_SURFACE, + NVKMS_IOCTL_GRANT_SURFACE, + NVKMS_IOCTL_ACQUIRE_SURFACE, + NVKMS_IOCTL_RELEASE_SURFACE, + NVKMS_IOCTL_SET_DPY_ATTRIBUTE, + NVKMS_IOCTL_GET_DPY_ATTRIBUTE, + NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES, + NVKMS_IOCTL_SET_DISP_ATTRIBUTE, + NVKMS_IOCTL_GET_DISP_ATTRIBUTE, + NVKMS_IOCTL_GET_DISP_ATTRIBUTE_VALID_VALUES, + NVKMS_IOCTL_QUERY_FRAMELOCK, + NVKMS_IOCTL_SET_FRAMELOCK_ATTRIBUTE, + NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE, + NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE_VALID_VALUES, + NVKMS_IOCTL_GET_NEXT_EVENT, + NVKMS_IOCTL_DECLARE_EVENT_INTEREST, + NVKMS_IOCTL_CLEAR_UNICAST_EVENT, + NVKMS_IOCTL_GET_3DVISION_DONGLE_PARAM_BYTES, + NVKMS_IOCTL_SET_3DVISION_AEGIS_PARAMS, + NVKMS_IOCTL_SET_LAYER_POSITION, + NVKMS_IOCTL_GRAB_OWNERSHIP, + NVKMS_IOCTL_RELEASE_OWNERSHIP, + NVKMS_IOCTL_GRANT_PERMISSIONS, + NVKMS_IOCTL_ACQUIRE_PERMISSIONS, + NVKMS_IOCTL_REVOKE_PERMISSIONS, + NVKMS_IOCTL_QUERY_DPY_CRC32, + NVKMS_IOCTL_REGISTER_DEFERRED_REQUEST_FIFO, + NVKMS_IOCTL_UNREGISTER_DEFERRED_REQUEST_FIFO, + NVKMS_IOCTL_ALLOC_SWAP_GROUP, + NVKMS_IOCTL_FREE_SWAP_GROUP, + NVKMS_IOCTL_JOIN_SWAP_GROUP, + NVKMS_IOCTL_LEAVE_SWAP_GROUP, + NVKMS_IOCTL_SET_SWAP_GROUP_CLIP_LIST, + NVKMS_IOCTL_GRANT_SWAP_GROUP, + NVKMS_IOCTL_ACQUIRE_SWAP_GROUP, + NVKMS_IOCTL_RELEASE_SWAP_GROUP, + NVKMS_IOCTL_SWITCH_MUX, + NVKMS_IOCTL_GET_MUX_STATE, + NVKMS_IOCTL_EXPORT_VRR_SEMAPHORE_SURFACE, + NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT, + NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT, +}; + + +#define NVKMS_NVIDIA_DRIVER_VERSION_STRING_LENGTH 32 +#define NVKMS_MAX_CONNECTORS_PER_DISP 16 +#define NVKMS_MAX_HEADS_PER_DISP 4 +#define NVKMS_MAX_GPUS_PER_FRAMELOCK 4 +#define NVKMS_MAX_DEVICE_REGISTRY_KEYS 16 +#define NVKMS_MAX_DEVICE_REGISTRY_KEYNAME_LEN 32 +#define NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD 6 + + +/* + * There can be at most one SwapGroup per-head, per-disp (and, + * in the extreme, there is one disp per-GPU). + */ +#define NVKMS_MAX_SWAPGROUPS (NVKMS_MAX_HEADS_PER_DISP * NV_MAX_DEVICES) + +#define NVKMS_MAX_VALID_SYNC_RANGES 8 + +#define NVKMS_DPY_NAME_SIZE 128 +#define NVKMS_GUID_SIZE 16 +#define NVKMS_3DVISION_DONGLE_PARAM_BYTES 20 +#define NVKMS_GPU_STRING_SIZE 80 + +#define NVKMS_LOG2_LUT_ARRAY_SIZE 10 +#define NVKMS_LUT_ARRAY_SIZE (1 << NVKMS_LOG2_LUT_ARRAY_SIZE) +#define NVKMS_VRR_SEMAPHORE_SURFACE_SIZE 1024 + +/* + * The GUID string has the form: + * XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + * Two Xs per byte, plus four dashes and a NUL byte. + */ +#define NVKMS_GUID_STRING_SIZE ((NVKMS_GUID_SIZE * 2) + 5) + +#define NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH 2048 +#define NVKMS_EDID_INFO_STRING_LENGTH (32 * 1024) + +/*! + * A base EDID is 128 bytes, with 128 bytes per extension block. 2048 + * should be large enough for any EDID we see. + */ +#define NVKMS_EDID_BUFFER_SIZE 2048 + +/*! + * Description of modetimings. + * + * YUV420 modes require special care since some GPUs do not support YUV420 + * scanout in hardware. When timings::yuv420Mode is NV_YUV420_SW, NVKMS will + * set a mode with horizontal values that are half of what are described in + * NvKmsMode, and not enable any color space conversion. When clients allocate + * a surface and populate it with content, the region of interest within the + * surface should be half the width of the NvKmsMode, and the surface content + * should be RGB->YUV color space converted, and decimated from 4:4:4 to 4:2:0. + * + * The NvKmsMode and viewPortOut, specified by the NVKMS client, + * should be in "full" horizontal space, but the surface and + * viewPortIn should be in "half" horizontal space. + */ +struct NvKmsMode { + NvModeTimings timings; + char name[32]; +}; + +/*! + * Mode validation override bit flags, for use in + * NvKmsModeValidationParams::overrides. + */ +enum NvKmsModeValidationOverrides { + NVKMS_MODE_VALIDATION_NO_MAX_PCLK_CHECK = (1 << 0), + NVKMS_MODE_VALIDATION_NO_EDID_MAX_PCLK_CHECK = (1 << 1), + NVKMS_MODE_VALIDATION_NO_HORIZ_SYNC_CHECK = (1 << 2), + NVKMS_MODE_VALIDATION_NO_VERT_REFRESH_CHECK = (1 << 3), + NVKMS_MODE_VALIDATION_NO_EDID_DFP_MAX_SIZE_CHECK = (1 << 4), + NVKMS_MODE_VALIDATION_NO_EXTENDED_GPU_CAPABILITIES_CHECK = (1 << 5), + NVKMS_MODE_VALIDATION_OBEY_EDID_CONTRADICTIONS = (1 << 6), + NVKMS_MODE_VALIDATION_NO_TOTAL_SIZE_CHECK = (1 << 7), + NVKMS_MODE_VALIDATION_NO_DUAL_LINK_DVI_CHECK = (1 << 8), + NVKMS_MODE_VALIDATION_NO_DISPLAYPORT_BANDWIDTH_CHECK = (1 << 9), + NVKMS_MODE_VALIDATION_ALLOW_NON_3DVISION_MODES = (1 << 10), + NVKMS_MODE_VALIDATION_ALLOW_NON_EDID_MODES = (1 << 11), + NVKMS_MODE_VALIDATION_ALLOW_NON_HDMI3D_MODES = (1 << 12), + NVKMS_MODE_VALIDATION_NO_MAX_SIZE_CHECK = (1 << 13), + NVKMS_MODE_VALIDATION_NO_HDMI2_CHECK = (1 << 14), + NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK = (1 << 15), + NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS = (1 << 16), + NVKMS_MODE_VALIDATION_ALLOW_DP_INTERLACED = (1 << 17), + NVKMS_MODE_VALIDATION_NO_INTERLACED_MODES = (1 << 18), +}; + +/*! + * Frequency information used during mode validation (for HorizSync + * and VertRefresh) can come from several possible sources. NVKMS + * selects the frequency information by prioritizing the input sources + * and then reports the selected source. + * + * Without client input, NVKMS will use frequency ranges from the + * EDID, if available. If there is no EDID, NVKMS will fall back to + * builtin conservative defaults. + * + * The client can specify frequency ranges that are used instead of + * anything in the EDID (_CLIENT_BEFORE_EDID), or frequency ranges + * that are used only if no EDID-reported ranges are available + * (_CLIENT_AFTER_EDID). + */ +enum NvKmsModeValidationFrequencyRangesSource { + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_NONE = 0, + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_BEFORE_EDID = 1, + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_EDID = 2, + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_AFTER_EDID = 3, + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CONSERVATIVE_DEFAULTS = 4, +}; + + +/*! + * Mode validation parameters. + */ +struct NvKmsModeValidationFrequencyRanges { + enum NvKmsModeValidationFrequencyRangesSource source; + NvU32 numRanges; + struct { + NvU32 high; + NvU32 low; + } range[NVKMS_MAX_VALID_SYNC_RANGES]; +}; + +struct NvKmsModeValidationValidSyncs { + + /*! If TRUE, ignore frequency information from the EDID. */ + NvBool ignoreEdidSource; + + /*! values are in Hz */ + struct NvKmsModeValidationFrequencyRanges horizSyncHz; + + /*! values are in 1/1000 Hz */ + struct NvKmsModeValidationFrequencyRanges vertRefreshHz1k; +}; + +enum NvKmsStereoMode { + NVKMS_STEREO_DISABLED = 0, + NVKMS_STEREO_NVIDIA_3D_VISION, + NVKMS_STEREO_NVIDIA_3D_VISION_PRO, + NVKMS_STEREO_HDMI_3D, + NVKMS_STEREO_OTHER, +}; + +struct NvKmsModeValidationParams { + NvBool verboseModeValidation; + NvBool moreVerboseModeValidation; + + /*! + * Normally, if a mode supports both YUV 4:2:0 and RGB 4:4:4, + * NVKMS will prefer RGB 4:4:4 if both the monitor and the GPU + * support it. Use preferYUV420 to override that preference. + */ + NvBool preferYUV420; + + enum NvKmsStereoMode stereoMode; + NvU32 overrides; + + struct NvKmsModeValidationValidSyncs validSyncs; + + /*! + * Normally, NVKMS will determine on its own whether to use Display + * Stream Compression (DSC). Use forceDsc to force NVKMS to use DSC, + * when the GPU supports it. + */ + NvBool forceDsc; + + /*! + * When enabled, Display Stream Compression (DSC) has an + * associated bits/pixel rate, which NVKMS normally computes. + * Use dscOverrideBitsPerPixelX16 to override the DSC bits/pixel rate. + * This is in units of 1/16 of a bit per pixel. + * + * This target bits/pixel rate should be >= 8.0 and <= 32.0, i.e. the valid + * bits/pixel values are members of the sequence 8.0, 8.0625, 8.125, ..., + * 31.9375, 32.0. You can convert bits/pixel value to + * the dscOverrideBitsPerPixelX16 as follow: + * + * +------------------+--------------------------------------------+ + * | bits_per_pixel | dscBitsPerPixelX16 = bits_per_pixel * 16 | + * +------------------+--------------------------------------------+ + * | 8.0 | 128 | + * | 8.0625 | 129 | + * | . | . | + * | . | . | + * | . | . | + * | 31.9375 | 511 | + * | 32.0 | 512 | + * +------------------+--------------------------------------------+ + * + * If the specified dscOverrideBitsPerPixelX16 is out of range, + * then mode validation may fail. + * + * When dscOverrideBitsPerPixelX16 is 0, NVKMS compute the rate itself. + */ + NvU32 dscOverrideBitsPerPixelX16; +}; + +/*! + * The list of pixelShift modes. + */ +enum NvKmsPixelShiftMode { + NVKMS_PIXEL_SHIFT_NONE = 0, + NVKMS_PIXEL_SHIFT_4K_TOP_LEFT, + NVKMS_PIXEL_SHIFT_4K_BOTTOM_RIGHT, + NVKMS_PIXEL_SHIFT_8K, +}; + +/*! + * The available resampling methods used when viewport scaling is requested. + */ +enum NvKmsResamplingMethod { + NVKMS_RESAMPLING_METHOD_BILINEAR = 0, + NVKMS_RESAMPLING_METHOD_BICUBIC_TRIANGULAR, + NVKMS_RESAMPLING_METHOD_BICUBIC_BELL_SHAPED, + NVKMS_RESAMPLING_METHOD_BICUBIC_BSPLINE, + NVKMS_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_TRIANGULAR, + NVKMS_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_BELL_SHAPED, + NVKMS_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_BSPLINE, + NVKMS_RESAMPLING_METHOD_NEAREST, + NVKMS_RESAMPLING_METHOD_DEFAULT = NVKMS_RESAMPLING_METHOD_BILINEAR, +}; + +enum NvKmsWarpMeshDataType { + NVKMS_WARP_MESH_DATA_TYPE_TRIANGLES_XYUVRQ, + NVKMS_WARP_MESH_DATA_TYPE_TRIANGLE_STRIP_XYUVRQ, +}; + +/*! + * Description of a cursor image on a single head; this is used by any + * NVKMS request that needs to specify the cursor image. + */ +struct NvKmsSetCursorImageCommonParams { + /*! The surface to display in the cursor. */ + NvKmsSurfaceHandle surfaceHandle[NVKMS_MAX_EYES]; + /*! + * The cursor composition parameters gets read and applied only if the + * specified cursor surface is not null. + */ + struct NvKmsCompositionParams cursorCompParams; +}; + + +/*! + * Description of the cursor position on a single head; this is used + * by any NVKMS request that needs to specify the cursor position. + * + * x,y are relative to the current viewPortIn configured on the head. + */ +struct NvKmsMoveCursorCommonParams { + NvS16 x; /*! in */ + NvS16 y; /*! in */ +}; + +/*! + * Per-component arrays of NvU16s describing the LUT; used for both the input + * LUT and output LUT. + */ +struct NvKmsLutRamps { + NvU16 red[NVKMS_LUT_ARRAY_SIZE]; /*! in */ + NvU16 green[NVKMS_LUT_ARRAY_SIZE]; /*! in */ + NvU16 blue[NVKMS_LUT_ARRAY_SIZE]; /*! in */ +}; + +/*! + * Description of the main layer LUT on a single head; this is used by any NVKMS + * request that needs to specify the LUT. + */ +struct NvKmsSetInputLutParams { + NvBool specified; + NvU32 depth; /*! used bits per pixel (8, 15, 16, 24, 30) */ + + /*! + * The first and last elements (inclusive) in the color arrays to + * use. Valid values are in the range [0,N], where N is a + * function of depth: + * + * Depth N + * 8 256 + * 15 32 + * 16 64 + * 24 256 + * 30 1024 + * + * 'start' is the first element in the color arrays to use. + */ + NvU32 start; + + /*! + * 'end' is the last element (inclusive) in the color arrays to + * use. If end == 0, this command will disable the HW LUT for + * this head. + * + * The other fields in this structure, besides 'specified', are ignored if + * end == 0. + */ + NvU32 end; + + /*! + * Pointer to struct NvKmsLutRamps describing the LUT. + * Elements [start,end] will be used. + * + * Each entry in the input LUT has valid values in the range [0, 65535]. + * However, on pre-Turing GPUs only 11 bits are significant; NVKMS will + * convert values in this range into the appropriate internal format. + * + * Use nvKmsPointerToNvU64() to assign pRamps. + */ + NvU64 pRamps NV_ALIGN_BYTES(8); +}; + + +/*! + * Description of the output LUT on a single head; this is used by any NVKMS + * request that needs to specify the LUT. + * + * Unlike the input LUT: + * - specifying the output LUT updates all values at once. + * + * Each entry in the output LUT has valid values in the range [0, 65535]. + * However, only 11 bits are significant; NVKMS will convert values in this + * range into the appropriate internal format. + */ +struct NvKmsSetOutputLutParams { + NvBool specified; + NvBool enabled; + + /*! + * Pointer to struct NvKmsLutRamps containing the actual LUT data, if + * required. + * Use nvKmsPointerToNvU64() to assign pRamps. + */ + NvU64 pRamps NV_ALIGN_BYTES(8); +}; + +/*! + * Description of the LUT on a single head; this is used by any NVKMS + * request that needs to specify the LUT. + */ +struct NvKmsSetLutCommonParams { + struct NvKmsSetInputLutParams input; + struct NvKmsSetOutputLutParams output; + + NvBool synchronous; /*! block until the LUT update is complete */ +}; + +struct NvKmsNIsoSurface { + NvKmsSurfaceHandle surfaceHandle; + enum NvKmsNIsoFormat format; + NvU16 offsetInWords; +}; + +struct NvKmsCompletionNotifierDescription { + struct NvKmsNIsoSurface surface; + NvBool awaken; +}; + +struct NvKmsSemaphore { + struct NvKmsNIsoSurface surface; + NvU32 value; +}; + +enum NvKmsSyncptType { + NVKMS_SYNCPT_TYPE_NONE, + NVKMS_SYNCPT_TYPE_RAW, + NVKMS_SYNCPT_TYPE_FD, +}; + +struct NvKmsSyncpt { + enum NvKmsSyncptType type; + union { + int fd; + struct { + NvU32 id; + NvU32 value; + } raw; + } u; +}; + +struct NvKmsChannelSyncObjects { + /* + * If useSyncpt is set to FALSE, clients can provide an acquisition and/or + * release semaphore via the 'syncObjects.semaphores' struct. + * + * If NvKmsAllocDeviceReply::supportsIndependentAcqRelSemaphore is + * FALSE, then 'syncObjects.semaphores.acquire.surface' must be the same + * as 'syncObjects.semaphores.release.surface'. In other words, the same + * exact semaphore surface must be used for both acquire and release. + * + * If NvKmsAllocDeviceReply::supportsIndependentAcqRelSemaphore is + * TRUE, then the client is allowed to provide different semaphore + * surfaces for acquire and release. + * + * If useSyncpt is set to TRUE, clients can provide a pre-syncpt that they + * want the display engine to wait on before scanning out from the given + * buffer, and can specify that they want NVKMS to return a post-syncpt + * that they can wait on, via the 'syncObjects.syncpts' struct. + * + * The post-syncpt that NVKMS returns will be signaled once the + * buffer that was activated by this flip is displaced. As a typical + * example: + * - Client flips buffer A, and requests a post-syncpt PS. + * - Buffer A becomes active at the next frame boundary, and display + * starts scanning out buffer A. + * - Client flips buffer B. + * - Once the UPDATE for the buffer B flip is processed and display + * has finished sending the last pixel of buffer A to precomp for + * the current frame, post-syncpt PS will get signaled. + * + * Clients can use this option iff + * NvKmsAllocDeviceReply::supportsSyncpts is TRUE. + */ + NvBool useSyncpt; + + union { + struct { + struct NvKmsSemaphore acquire; + struct NvKmsSemaphore release; + } semaphores; + + struct { + struct NvKmsSyncpt pre; + enum NvKmsSyncptType requestedPostType; + } syncpts; + } u; +}; + +/*! + * Description of how to flip on a single head. + * + * viewPortIn::point describes the position of the viewPortIn that + * should be scaled to the viewPortOut of the head. The + * viewPortSizeIn is specified by NvKmsSetModeOneHeadRequest. Note + * that viewPortIn::point is in desktop coordinate space, and + * therefore applies across all layers. + * + * For YUV420 modes, the surfaces and position should be in "half" + * horizontal space. See the explanation in NvKmsMode. + * + * If 'specified' is FALSE for any of the layers, then the current + * hardware value is used. + */ +struct NvKmsFlipCommonParams { + + struct { + NvBool specified; + struct NvKmsPoint point; + } viewPortIn; + + struct { + struct NvKmsSetCursorImageCommonParams image; + NvBool imageSpecified; + + struct NvKmsMoveCursorCommonParams position; + NvBool positionSpecified; + } cursor; + + /* + * Set the output transfer function. + * + * If output transfer function is HDR and staticMetadata is disabled + * for all the layers, flip request will be rejected. + * + * If output transfer function is HDR and staticMetadata is enabled + * for any of the layers, HDR output will be enabled. In this case, + * output lut values specified during modeset will be ignored and + * output lut will be set with the specified HDR transfer function. + * + * If output transfer function is SDR and staticMetadata is enabled, + * HDR content for that layer will be tonemapped to the SDR output + * range. + */ + struct { + enum NvKmsOutputTf val; + NvBool specified; + } tf; + + struct { + struct { + NvKmsSurfaceHandle handle[NVKMS_MAX_EYES]; + struct NvKmsRRParams rrParams; + NvBool specified; + } surface; + + /* + * sizeIn/sizeOut can be used when + * NvKmsAllocDeviceReply::layerCaps[layer].supportsWindowMode is TRUE. + */ + struct { + struct NvKmsSize val; + NvBool specified; + } sizeIn; + + struct { + struct NvKmsSize val; + NvBool specified; + } sizeOut; + + /* + * Set the position of the layer, relative to the upper left + * corner of the surface. This controls the same state as + * NVKMS_IOCTL_SET_LAYER_POSITION. + * + * This field can be used when + * NvKmsAllocDeviceReply::layerCaps[layer].supportsWindowMode is TRUE. + */ + struct { + struct NvKmsSignedPoint val; + NvBool specified; + } outputPosition; + + struct { + struct NvKmsCompletionNotifierDescription val; + NvBool specified; + } completionNotifier; + + struct { + struct NvKmsChannelSyncObjects val; + + /* If 'specified' is FALSE, then the current hardware value is used. */ + NvBool specified; + } syncObjects; + + /* + * If 'maxDownscaleFactors::specified' is true, nvkms will set the + * max H/V downscale usage bounds to the values specified in + * 'maxDownscaleFactors::horizontal' and 'maxDownscaleFactors::vertical'. + * + * If the 'maxDownscaleFactors::specified' values are within the bounds + * of 'NvKmsSetModeOneHeadReply::guaranteedUsage', then clients can expect + * the flip to succeed. If the 'maxDownscaleFactors::specified' values are + * beyond the bounds of 'NvKmsSetModeOneHeadReply::guaranteedUsage' but + * within 'NvKmsSetModeOneHeadReply::possibleUsage', then the request may + * legitimately fail due to insufficient display bandwidth and clients + * need to be prepared to handle that flip request failure. + * + * If 'maxDownscaleFactors::specified' is false, nvkms will calculate max + * H/V downscale factor by quantizing the range. E.g., max H/V downscale + * factor supported by HW is 4x for 5-tap and 2x for 2-tap mode. If + * 5-tap mode is required, the target usage bound that nvkms will + * attempt to program will either allow up to 2x downscaling, or up to + * 4x downscaling. If 2-tap mode is required, the target usage bound + * that NVKMS will attempt to program will allow up to 2x downscaling. + * Example: to downscale from 4096x2160 -> 2731x864 in 5-tap mode, + * NVKMS would specify up to 2x for the H downscale bound (required is + * 1.5x), and up to 4x for the V downscale bound (required is 2.5x). + */ + struct { + /* + * Maximum vertical downscale factor (scaled by 1024) + * + * For example, if the downscale factor is 1.5, then maxVDownscaleFactor + * would be 1.5 x 1024 = 1536. + */ + NvU16 vertical; + + /* + * Maximum horizontal downscale factor (scaled by 1024) + * + * See the example above for vertical. + */ + NvU16 horizontal; + + NvBool specified; + } maxDownscaleFactors; + + NvBool tearing; + + /* + * When true, we will flip to this buffer whenever the current eye is + * finished scanning out. Otherwise, this flip will only execute after + * both eyes have finished scanout. + * + * Note that if this is FALSE and a vsynced stereo flip is requested, + * the buffers in this flip will be displayed for minPresentInterval*2 + * vblanks, one for each eye. + * + * This flag cannot be used for the overlay layer. + */ + NvBool perEyeStereoFlip; + + /* When non-zero, block the flip until PTIMER >= timeStamp. */ + NvU64 timeStamp NV_ALIGN_BYTES(8); + NvU8 minPresentInterval; + + /* This field cannot be used for the main layer right now. */ + struct { + struct NvKmsCompositionParams val; + NvBool specified; + } compositionParams; + + /* + * Color-space conversion matrix applied to the layer before + * compositing. + * + * If csc::specified is TRUE and csc::useMain is TRUE, then the CSC + * matrix specified in the main layer is used instead of the one here. + * If csc::specified is FALSE, then the CSC matrix used from the previous + * flip is used. csc::useMain must be set to FALSE for the main layer. + */ + struct { + NvBool specified; + NvBool useMain; + struct NvKmsCscMatrix matrix; + } csc; + + /* + * When true, all pending flips and synchronization operations get + * ignored, and channel flips to given buffer. Notifier and semaphore + * should not be specified if this flag is true. This flag does + * nothing if set true for NVKMS_IOCTL_SET_MODE ioctl. + * + * This flag allows client to remove stalled flips and unblock + * the channel. + * + * This flag cannot be used for the overlay layer. + */ + NvBool skipPendingFlips; + + /* + * This field can be used when + * NvKmsAllocDeviceReply::layerCaps[layer].supportsHDR = TRUE. + * + * If staticMetadata is enabled for multiple layers, flip request + * will be rejected. + */ + struct { + NvBool specified; + /*! + * If TRUE, enable HDR static metadata. If FALSE, disable it. + * + * Note that “specified” serves to mark the field as being changed + * in this flip request, rather than as specified for this frame. + * So to disable HDR static metadata, set hdr.specified = TRUE and + * hdr.staticMetadata.enabled = FALSE. + */ + NvBool enabled; + struct NvKmsHDRStaticMetadata staticMetadata; + } hdr; + + /* This field has no effect right now. */ + struct { + enum NvKmsInputColorSpace val; + NvBool specified; + } colorspace; + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; +}; + +struct NvKmsFlipCommonReplyOneHead { + struct { + struct NvKmsSyncpt postSyncpt; + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; +}; + +/*! + * NVKMS_IOCTL_ALLOC_DEVICE: Allocate an NVKMS device object. + * + * This has the scope of a resman SLI device. + * + * Multiple clients can allocate devices (DRM-KMS, multiple X + * servers). Clients should configure SLI before initializing NVKMS. + * NVKMS will query resman for the current SLI topology. + * + * The SLI configuration (both the linked SLI device, and the sliMosaic + * boolean below) will be latched when the specified GPU transitions + * from zero NVKMS devices allocated to one NVKMS device allocated. + * + * The returned information will remain static until the NVKMS device + * object is freed. + */ + +struct NvKmsAllocDeviceRequest { + /*! + * Clients should populate versionString with the value of + * NV_VERSION_STRING from nvUnixVersion.h. This is used for a + * version handshake. + */ + char versionString[NVKMS_NVIDIA_DRIVER_VERSION_STRING_LENGTH]; + + /*! + * The (primary) GPU for this device; this is used as the value + * for NV0080_ALLOC_PARAMETERS::deviceId. + */ + NvU32 deviceId; + + /*! + * Whether SLI Mosaic is requested: i.e., multiple disps, one + * per physical GPU, for the SLI device. + */ + NvBool sliMosaic; + + /*! + * When tryInferSliMosaicFromExistingDevice=TRUE, then the above + * 'sliMosaic' field is ignored and the ALLOC_DEVICE request will + * inherit the current sliMosaic state of the existing device + * identified by deviceId. If there is not an existing device for + * deviceId, then the ALLOC_DEVICE request will proceed normally, honoring + * the requested sliMosaic state. + */ + NvBool tryInferSliMosaicFromExistingDevice; + + /*! + * NVKMS will use the 3D engine for headSurface. If clients want to avoid + * the use of the 3D engine, set no3d = TRUE. Note this will cause modesets + * that require headSurface to fail. + * + * This flag is only honored when there is not already an existing device + * for the deviceId. + */ + NvBool no3d; + + /*! + * When enableConsoleHotplugHandling is TRUE, NVKMS will start handling + * hotplug events at the console when no modeset owner is present. + * + * If FALSE, console hotplug handling behavior is not changed. + * + * This should be set to TRUE for clients that intend to allocate the device + * but don't intend to become the modeset owner right away. It should be set + * to FALSE for clients that may take modeset ownership immediately, in + * order to suppress hotplug handling between the NVKMS_IOCTL_ALLOC_DEVICE + * and NVKMS_IOCTL_GRAB_OWNERSHIP calls when the calling client is the first + * to allocate the device. + * + * Note that NVKMS_IOCTL_RELEASE_OWNERSHIP also enables console hotplug + * handling. Once enabled, console hotplug handling remains enabled until + * the last client frees the device. + */ + NvBool enableConsoleHotplugHandling; + + struct { + /* name[0] == '\0' for unused registryKeys[] array elements. */ + char name[NVKMS_MAX_DEVICE_REGISTRY_KEYNAME_LEN]; + NvU32 value; + } registryKeys[NVKMS_MAX_DEVICE_REGISTRY_KEYS]; +}; + +enum NvKmsAllocDeviceStatus { + NVKMS_ALLOC_DEVICE_STATUS_SUCCESS, + NVKMS_ALLOC_DEVICE_STATUS_VERSION_MISMATCH, + NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST, + NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR, + NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE, + NVKMS_ALLOC_DEVICE_STATUS_CORE_CHANNEL_ALLOC_FAILED, +}; + + +struct NvKmsAllocDeviceReply { + + enum NvKmsAllocDeviceStatus status; + + /*! + * The handle to use when identifying this NVKMS device in + * subsequent calls. + */ + NvKmsDeviceHandle deviceHandle; + + /*! + * A bitmask, indicating the GPUs, one per bit, contained by this + * device. + */ + NvU32 subDeviceMask; + + /*! The number of heads on each disp. */ + NvU32 numHeads; + + /*! The number of disps. */ + NvU32 numDisps; + + /*! The handle to identify each disp, in dispHandles[0..numDisps). */ + NvKmsDispHandle dispHandles[NVKMS_MAX_SUBDEVICES]; + + /*! + * Device-wide Capabilities: of the display engine. + * + * IMPLEMENTATION NOTE: this is the portion of DispHalRec::caps + * that can vary between EVO classes. + */ + NvBool supportsInbandStereoSignaling; + NvBool requiresVrrSemaphores; + NvBool inputLutAppliesToBase; + + /*! + * Whether the client can allocate and manipulate SwapGroup objects via + * NVKMS_IOCTL_ALLOC_SWAP_GROUP and friends. + */ + NvBool supportsSwapGroups; + + /*! + * Whether the NVKMS SwapGroup implementation supports Warp and Blend on + * this device. + */ + NvBool supportsWarpAndBlend; + + /*! + * When nIsoSurfacesInVidmemOnly=TRUE, then only video memory + * surfaces can be used for the surface in + * NvKmsCompletionNotifierDescription or NvKmsSemaphore. + */ + NvBool nIsoSurfacesInVidmemOnly; + + /* + * When requiresAllAllocationsInSysmem=TRUE, then all memory allocations + * that will be accessed by display must come from sysmem. + */ + NvBool requiresAllAllocationsInSysmem; + + /* + * Whether the device that NVKMS is driving supports headSurface GPU + * composition. + */ + NvBool supportsHeadSurface; + + /*! + * The display engine supports a "legacy" format for notifiers and + * semaphores (one word for semaphores and base channel notifiers; + * two words for overlay notifiers). On newer GPUs, the display + * engine also supports a similar four word semaphore and notifier + * format used by graphics. + * + * This describes which values are valid for NvKmsNIsoFormat. + * + * Iff a particular enum NvKmsNIsoFormat 'value' is supported, + * then (1 << value) will be set in validNIsoFormatMask. + */ + NvU8 validNIsoFormatMask; + + /*! + * Which NvKmsResamplingMethod enum values are supported by the NVKMS + * device. + * + * Iff a particular enum NvKmsResamplingMethod 'value' is supported, then (1 + * << value) will be set in validResamplingMethodMask. + */ + NvU32 validResamplingMethodMask; + + NvU32 surfaceAlignment; + NvU32 maxWidthInBytes; + NvU32 maxWidthInPixels; + NvU32 maxHeightInPixels; + NvU32 maxCursorSize; + + /*! + * The page kind used by the GPU's MMU for uncompressed block-linear color + * formats. + */ + NvU8 genericPageKind; + + /*! + * Describes the supported Color Key selects and blending modes for match + * and nomatch cursor pixels. + */ + struct NvKmsCompositionCapabilities cursorCompositionCaps; + + /*! The number of layers attached to each head. */ + NvU32 numLayers[NVKMS_MAX_HEADS_PER_DISP]; + + /*! + * Describes supported functionalities for each layer. + */ + struct NvKmsLayerCapabilities layerCaps[NVKMS_MAX_LAYERS_PER_HEAD]; + + /*! + * This bitmask specifies all of the (rotation, reflectionX, reflectionY) + * combinations that are supported for the main and overlay layers. + * Each bit in this bitmask is mapped to one combination per the scheme + * in NvKmsRRParamsToCapBit(). + */ + NvU16 validLayerRRTransforms; + + /*! + * IO coherency modes that display supports for ISO and NISO memory + * allocations, respectively. + */ + NvKmsDispIOCoherencyModes isoIOCoherencyModes; + NvKmsDispIOCoherencyModes nisoIOCoherencyModes; + + /*! + * 'displayIsGpuL2Coherent' indicates whether display is coherent with + * GPU's L2 cache. + */ + NvBool displayIsGpuL2Coherent; + + /*! + * 'supportsSyncpts' indicates whether NVKMS supports the use of syncpts + * for synchronization. + */ + NvBool supportsSyncpts; + + /*! + * 'supportsIndependentAcqRelSemaphore' indicates whether HW supports + * configuring different semaphores for acquire and release for a buffer + * flip on a given layer. + */ + NvBool supportsIndependentAcqRelSemaphore; + + /*! + * 'supportsVblankSyncObjects' indicates whether HW supports raster + * generator sync objects that signal at vblank. + */ + NvBool supportsVblankSyncObjects; +}; + +struct NvKmsAllocDeviceParams { + struct NvKmsAllocDeviceRequest request; /*! in */ + struct NvKmsAllocDeviceReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_FREE_DEVICE: Free the NVKMS device object specified by + * deviceHandle. + * + * The underlying device is not actually freed until all callers of + * NVKMS_IOCTL_ALLOC_DEVICE have freed their reference to the device. + * + * When a client calls FREE_DEVICE, any configuration specified by + * that client will be removed: + * - Any EDID overrides. + * - Any interest declared on dynamic dpys. + * - Any cursor image on any head. + * - Any custom LUT contents. + * - Any interest declared on any events. + * + * XXX define how FREE_DEVICE interacts with: + * - concurrent X servers on different VTs + * - console restore + */ + +struct NvKmsFreeDeviceRequest { + NvKmsDeviceHandle deviceHandle; +}; + +struct NvKmsFreeDeviceReply { + NvU32 padding; +}; + +struct NvKmsFreeDeviceParams { + struct NvKmsFreeDeviceRequest request; /*! in */ + struct NvKmsFreeDeviceReply reply; /*!out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_DISP: Query information about the NVKMS disp + * object specified by the tuple (deviceHandle, dispHandle). + * + * The returned information will remain static until the NVKMS device + * object is freed. + */ + +struct NvKmsQueryDispRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; +}; + +struct NvKmsQueryDispReply { + /*! + * The instance of the subdevice that owns this disp. + * NVBIT(displayOwner) will be present in subDeviceMask. + */ + NvU32 displayOwner; + + /*! A bitmask of the device's subdevices used by this disp. */ + NvU32 subDeviceMask; + + /*! The possible dpys for this disp, excluding any dynamic dpys. */ + NVDpyIdList validDpys; + + /*! The dpys that were driven at boot-time, if any. */ + NVDpyIdList bootDpys; + + /*! The dpys that are capable of dynamic mux switching, if any. */ + NVDpyIdList muxDpys; + + /*! The framelock device, if any, connected to this disp. */ + NvKmsFrameLockHandle frameLockHandle; + + /*! The number of connectors on this disp. */ + NvU32 numConnectors; + + /*! + * The handle to identify each connector, in + * connectorHandles[0..numConnectors) + */ + NvKmsConnectorHandle connectorHandles[NVKMS_MAX_CONNECTORS_PER_DISP]; + + /*! + * A string describing one of the the GPUs used by this disp. The + * NVKMS log will also print this string to the kernel log. Users + * should be able to correlate GPUs between NVKMS and NVKMS + * clients using this string. + */ + char gpuString[NVKMS_GPU_STRING_SIZE]; +}; + +struct NvKmsQueryDispParams { + struct NvKmsQueryDispRequest request; /*! in */ + struct NvKmsQueryDispReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA: Query information about the NVKMS + * connector object specified by the triplet (deviceHandle, dispHandle, + * connectorHandle). + * + * The returned information will remain static until the NVKMS device + * object is freed. + */ + +struct NvKmsQueryConnectorStaticDataRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvKmsConnectorHandle connectorHandle; +}; + +struct NvKmsQueryConnectorStaticDataReply { + NVDpyId dpyId; + NvBool isDP; + NvBool isLvds; + NvBool locationOnChip; + NvU32 legacyTypeIndex; + NvKmsConnectorType type; + NvU32 typeIndex; + NvKmsConnectorSignalFormat signalFormat; + NvU32 physicalIndex; + NvU32 physicalLocation; + + /* Bitmask of valid heads to drive dpy(s) on this connector. */ + NvU32 headMask; +}; + +struct NvKmsQueryConnectorStaticDataParams { + struct NvKmsQueryConnectorStaticDataRequest request; /*! in */ + struct NvKmsQueryConnectorStaticDataReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_CONNECTOR_DYNAMIC_DATA: Query dynamic information about the + * NVKMS connector object specified by the triplet (deviceHandle, dispHandle, + * connectorHandle). + */ + +struct NvKmsQueryConnectorDynamicDataRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvKmsConnectorHandle connectorHandle; +}; + +struct NvKmsQueryConnectorDynamicDataReply { +#define NVKMS_DP_DETECT_COMPLETE_POLL_INTERVAL_USEC 100000 /* in microseconds */ +#define NVKMS_DP_DETECT_COMPLETE_TIMEOUT_USEC 10000000 /* in microseconds */ + + /* + * For DisplayPort devices, indicates whether the DisplayPort library is + * finished detecting devices on this connector. This is set to TRUE for + * other devices because NVKMS knows as soon as ALLOC_DEVICE is complete + * whether the device is connected or not. + */ + NvBool detectComplete; + /* + * Contains the list of display IDs for dynamic dpys detected on this + * connector. + */ + NVDpyIdList dynamicDpyIdList; +}; + +struct NvKmsQueryConnectorDynamicDataParams { + struct NvKmsQueryConnectorDynamicDataRequest request; /*! in */ + struct NvKmsQueryConnectorDynamicDataReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_DPY_STATIC_DATA: Query static information about + * the NVKMS dpy object specified by the triplet (deviceHandle, + * dispHandle, dpyId). This information should remain static for the + * lifetime of the dpy. + */ + +struct NvKmsQueryDpyStaticDataRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + +struct NvKmsQueryDpyStaticDataReply { + NvKmsConnectorHandle connectorHandle; /*! The connector driving this dpy. */ + NvU32 type; /*! NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_ */ + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]; + NvBool mobileInternal; + NvBool isDpMST; +}; + +struct NvKmsQueryDpyStaticDataParams { + struct NvKmsQueryDpyStaticDataRequest request; /*! in */ + struct NvKmsQueryDpyStaticDataReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA: Query dynamic information about + * the NVKMS dpy object specified by the triplet (deviceHandle, + * dispHandle, dpyId). + * + * This information should be re-queried after an + * NVKMS_EVENT_TYPE_DPY_CHANGED event. + */ + +struct NvKmsQueryDpyDynamicDataRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + + NvBool forceConnected; + NvBool forceDisconnected; + NvBool overrideEdid; + NvBool ignoreEdid; + NvBool ignoreEdidChecksum; + NvBool allowDVISpecPClkOverride; + NvBool dpInbandStereoSignaling; + NvBool disableACPIBrightnessHotkeys; + + /* + * If overrideEdid is TRUE, then edid::buffer[] contains an EDID + * to override anything detected. + */ + struct { + NvU16 bufferSize; + NvU8 buffer[NVKMS_EDID_BUFFER_SIZE]; + } edid; +}; + +enum NvKmsDpyVRRType { + NVKMS_DPY_VRR_TYPE_NONE, + NVKMS_DPY_VRR_TYPE_GSYNC, + NVKMS_DPY_VRR_TYPE_ADAPTIVE_SYNC_DEFAULTLISTED, + NVKMS_DPY_VRR_TYPE_ADAPTIVE_SYNC_NON_DEFAULTLISTED, +}; + +struct NvKmsQueryDpyDynamicDataReply { + char name[NVKMS_DPY_NAME_SIZE]; + + NvU32 maxPixelClockKHz; + NvBool connected; + NvBool isVirtualRealityHeadMountedDisplay; + + struct { + NvU8 heightInCM; /* vertical screen size */ + NvU8 widthInCM; /* horizontal screen size */ + } physicalDimensions; + + /*! + * Which VRR type has been selected for this display, either true + * G-SYNC, Adaptive-Sync defaultlisted, or Adaptive-Sync non-defaultlisted. + */ + enum NvKmsDpyVRRType vrrType; + + struct { + NvBool supported; + NvBool isDLP; + NvBool isAegis; + NvU32 subType; /*! STEREO_PLUG_AND_PLAY_ from nvStereoDisplayDef.h */ + } stereo3DVision; + + struct { + struct { + NvBool valid; + NvU8 buffer[NVKMS_GUID_SIZE]; + char str[NVKMS_GUID_STRING_SIZE]; + } guid; + } dp; + + struct { + /*! + * The size of the EDID in buffer[], or 0 if there is no EDID + * available in buffer[]. + */ + NvU16 bufferSize; + + /*! + * Whether NVKMS determined that the EDID is valid. If the + * EDID is not valid, there may still be information available + * in infoString: the infoString will describe why the EDID + * was deemed invalid. + */ + NvBool valid; + + /*! + * The raw EDID bytes. + */ + NvU8 buffer[NVKMS_EDID_BUFFER_SIZE]; + + /*! + * Parsed information from the EDID. For the raw EDID bytes, + * see NvKmsQueryDpyDynamicDataParams::edid::buffer[]. + */ + char infoString[NVKMS_EDID_INFO_STRING_LENGTH]; + } edid; +}; + +struct NvKmsQueryDpyDynamicDataParams { + struct NvKmsQueryDpyDynamicDataRequest request; /*! in */ + struct NvKmsQueryDpyDynamicDataReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_VALIDATE_MODE_INDEX: Validate a particular mode from a + * dpy's candidate modes. + * + * NVKMS can consider modes from a dpy's EDID, as well as a + * variety of builtin modes. + * + * This ioctl identifies one of those candidate modes by index. NVKMS + * will attempt to validate that candidate mode for the dpy, using the + * specified mode validation parameters. + * + * If the mode index is larger than the list of candidate modes, + * reply::end will be TRUE. Otherwise, reply::end will be FALSE, and + * reply::mode will contain the candidate mode. + * + * If the mode is valid, then reply::valid will be TRUE. Otherwise, + * reply::valid will be FALSE. In either case, request::pInfoString[] + * will contain a description of what happened during mode validation. + * + * To query the full modepool, clients should repeatedly call + * NVKMS_IOCTL_VALIDATE_MODE_INDEX with increasing mode index values, + * until NVKMS reports end==TRUE. + * + * Note that the candidate mode list can change when the dpy changes + * (reported by the NVKMS_EVENT_TYPE_DPY_CHANGED event). The client + * should restart its modepool querying if it receives a DPY_CHANGED + * event. The candidate mode list can also change based on the + * parameters in request::modeValidation. Clients should not change + * request::modeValidation while looping over candidate mode indices. + * + * Pseudocode example usage pattern: + * + * struct NvKmsModeValidationParams modeValidation = Initialize(); + * + * retry: + * NvU32 modeIndex = 0; + * + * while (1) { + * char infoString[INFO_STRING_LENGTH]; + * memset(¶ms); + * params.request.dpyId = dpyId; + * params.request.modeIndex = modeIndex++; + * params.request.modeValidation = modeValidation; + * params.request.pInfoString = nvKmsPointerToNvU64(infoString); + * params.request.infoStringLength = sizeof(infoString); + * + * ioctl(¶ms); + * + * if (params.reply.end) break; + * + * print(infoString); + * + * if (params.reply.valid) { + * AddToModePool(params.reply.mode); + * } + * } + * + * if (dpyChanged) goto retry; + * + */ + +struct NvKmsValidateModeIndexRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + struct NvKmsModeValidationParams modeValidation; + NvU32 modeIndex; + + /* + * Pointer to a string of size 'infoStringSize'. + * Use nvKmsPointerToNvU64() to assign pInfoString. + * The maximum size allowed is + * NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH. + */ + NvU32 infoStringSize; + NvU64 pInfoString NV_ALIGN_BYTES(8); +}; + +struct NvKmsValidateModeIndexReply { + NvBool end; + NvBool valid; + + struct NvKmsMode mode; + + /*! The validSyncs used by NVKMS when validating the mode. */ + struct NvKmsModeValidationValidSyncs validSyncs; + + /*! Whether this mode is marked as "preferred" by the EDID. */ + NvBool preferredMode; + + /*! A text description of the mode. */ + char description[64]; + + /*! Where the mode came from. */ + enum NvKmsModeSource { + NvKmsModeSourceUnknown = 0, + NvKmsModeSourceEdid = 1, + NvKmsModeSourceVesa = 2, + } source; + + /* The number of bytes written to 'pInfoString' (from the request) */ + NvU32 infoStringLenWritten; + + /*! + * These are the usage bounds that may be possible with this mode, + * assuming that only one head is active. For actual usage bounds, + * see guaranteedUsage and possibleUsage returned in + * NvKmsSetModeOneHeadReply. + */ + struct NvKmsUsageBounds modeUsage; +}; + +struct NvKmsValidateModeIndexParams { + struct NvKmsValidateModeIndexRequest request; /*! in */ + struct NvKmsValidateModeIndexReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_VALIDATE_MODE: Validate an individual mode for the + * specified dpy. + * + * Given the validation parameters, NVKMS will test whether the given + * mode is currently valid for the specified dpy. + * + * If the mode is valid, then reply::valid will be TRUE. Otherwise, + * reply::valid will be FALSE. In either case, reply::infoString[] + * will contain a description of what happened during mode validation. + */ + +struct NvKmsValidateModeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + struct NvKmsModeValidationParams modeValidation; + struct NvKmsMode mode; + + /* + * Pointer to a string of size 'infoStringSize'. + * Use nvKmsPointerToNvU64() to assign pInfoString. + * The maximum size allowed is + * NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH. + */ + NvU32 infoStringSize; + NvU64 pInfoString NV_ALIGN_BYTES(8); +}; + +struct NvKmsValidateModeReply { + NvBool valid; + + /*! The validSyncs used by NVKMS when validating the mode. */ + struct NvKmsModeValidationValidSyncs validSyncs; + + /* The number of bytes written to 'pInfoString' (from the request) */ + NvU32 infoStringLenWritten; + + /*! + * These are the usage bounds that may be possible with this mode, + * assuming that only one head is active. For actual usage bounds, + * see guaranteedUsage and possibleUsage returned in + * NvKmsSetModeOneHeadReply. + */ + struct NvKmsUsageBounds modeUsage; +}; + +struct NvKmsValidateModeParams { + struct NvKmsValidateModeRequest request; /*! in */ + struct NvKmsValidateModeReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_MODE: Perform a modeset. + * + * NvKmsSetModeRequest can describe the modetiming configuration + * across all heads of all disps within the SLI device. + * + * The elements in NvKmsSetModeRequest::disp[] correspond to the disps + * returned in NvKmsAllocDeviceReply::dispHandles[]. + * + * To only touch certain heads and disps, use the + * requestedHeadsBitMask and requestedDispsBitMask fields to limit + * which array elements are honored. + * + * If the request is invalid, one or more of the + * NvKmsSetMode{,OneDisp,OneHead}Reply::status fields will have a + * non-SUCCESS value. If the mode set completed successfully, then + * all {NvKmsSetMode{,OneDisp,OneHead}Reply::status fields should be + * SUCCESS. + */ + +struct NvKmsSetModeHeadSurfaceParams { + NvBool forceCompositionPipeline; + NvBool forceFullCompositionPipeline; + NvBool fakeOverlay; + NvBool blendAfterWarp; + NvBool transformSpecified; + + /* Reflect the image along the X axis. */ + NvBool reflectionX; + + /* Reflect the image along the Y axis. */ + NvBool reflectionY; + + /* + * Rotate the image counter-clockwise in 90 degree increments. + * + * Reflection (specified above by ::reflection[XY]) is applied + * before rotation. This matches the semantics of RandR. From: + * + * https://cgit.freedesktop.org/xorg/proto/randrproto/tree/randrproto.txt + * + * "Rotation and reflection and how they interact can be confusing. In + * Randr, the coordinate system is rotated in a counter-clockwise direction + * relative to the normal orientation. Reflection is along the window system + * coordinate system, not the physical screen X and Y axis, so that rotation + * and reflection do not interact. The other way to consider reflection is + * to is specified in the 'normal' orientation, before rotation, if you find + * the other way confusing." + */ + enum NvKmsRotation rotation; + enum NvKmsPixelShiftMode pixelShift; + enum NvKmsResamplingMethod resamplingMethod; + struct NvKmsMatrix transform; /* Only honored if transformSpecified. */ + + NvKmsSurfaceHandle blendTexSurfaceHandle; + NvKmsSurfaceHandle offsetTexSurfaceHandle; + + /* + * When warpMesh::surfaceHandle is non-zero, it indicates a surface + * containing warp mesh vertex data. The surface should: + * + * - Have a width multiple of 1024 pixels. + * - Have a depth of 32. + * - Contain a binary representation of a list of six-component + * vertices. Each of these components is a 32-bit floating point value. + * + * The X, Y components should contain normalized vertex coordinates, to be + * rendered as a triangle list or strip. The X and Y components' [0,1] + * range map to the head's ViewportOut X and Y, respectively. + * + * The U, V, R, and Q components should contain normalized, projective + * texture coordinates: + * + * U, V: 2D texture coordinate. U and V components' [0,1] range maps to the + * display's MetaMode ViewportIn X and Y, respectively. + * + * R: unused + * + * Q: Used for interpolation purposes. This is typically the third + * component of the result of a multiplication by a 3x3 projective transform + * matrix. + * + * warpMesh::vertexCount should contain the amount of vertices stored in the + * surface. + * + * warpMesh::dataType indicates if the vertices describe a triangle list or + * a triangle strip. A triangle list must have a vertexCount that is a + * multiple of 3. + */ + struct { + NvKmsSurfaceHandle surfaceHandle; + NvU32 vertexCount; + enum NvKmsWarpMeshDataType dataType; + } warpMesh; +}; + +#define NVKMS_VRR_MIN_REFRESH_RATE_MAX_VARIANCE 10 // 10hz + +enum NvKmsAllowAdaptiveSync { + NVKMS_ALLOW_ADAPTIVE_SYNC_DISABLED = 0, + NVKMS_ALLOW_ADAPTIVE_SYNC_DEFAULTLISTED_ONLY, + NVKMS_ALLOW_ADAPTIVE_SYNC_ALL, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE attribute. */ +enum NvKmsDpyAttributeRequestedColorSpaceValue { + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB = 0, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422 = 1, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444 = 2, +}; + +/*! + * Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_RANGE and + * NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_RANGE attributes. + */ +enum NvKmsDpyAttributeColorRangeValue { + NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL = 0, + NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED = 1, +}; + +struct NvKmsSetModeOneHeadRequest { + /*! + * The list of dpys to drive with this head; or, empty to disable + * the head. + */ + NVDpyIdList dpyIdList; + + /*! The modetimings to set on the head. */ + struct NvKmsMode mode; + + /*! The above mode will be validated, using these validation parameters. */ + struct NvKmsModeValidationParams modeValidationParams; + + /*! + * The region within the raster timings that should contain an image. + * This is only used when viewPortOutSpecified is TRUE. Otherwise, the + * viewPortOut is inferred from the raster timings. + * + * For YUV420 modes, the viewPortOut should be in "full" + * horizontal space. See the explanation in NvKmsMode. + */ + struct NvKmsRect viewPortOut; + + /*! + * The size, in pixels, that the head will fetch from any surface + * it scans from. The viewPortPointIn is specified in + * NvKmsFlipCommonParams. + * + * For YUV420 modes, the viewPortSizeIn should be in "half" + * horizontal space. See the explanation in NvKmsMode. + */ + struct NvKmsSize viewPortSizeIn; + + /*! + * Describe the LUT to be used with the modeset. + */ + struct NvKmsSetLutCommonParams lut; + + /*! + * Describe the surfaces to present on this head. + */ + struct NvKmsFlipCommonParams flip; + + /*! + * The headSurface configuration requested, if any. + */ + struct NvKmsSetModeHeadSurfaceParams headSurface; + + NvBool viewPortOutSpecified; /*! Whether to use viewPortOut. */ + + /*! + * Allow this head to be flipLocked to any other heads, set as + * part of this NVKMS_IOCTL_SET_MODE, who also have allowFlipLock + * set. FlipLock will only be enabled if additional criteria, + * such as identical modetimings, are also met. + */ + NvBool allowFlipLock; + + /*! + * Allow G-SYNC to be enabled on this head if it is supported by the GPU + * and monitor. + */ + NvBool allowGsync; + + /*! + * Whether to allow Adaptive-Sync to be enabled on this head if it is + * supported by the GPU: + * + * NVKMS_ALLOW_ADAPTIVE_SYNC_ALL: + * VRR is enabled as long as this monitor supports Adaptive-Sync. + * + * NVKMS_ALLOW_ADAPTIVE_SYNC_DEFAULTLISTED_ONLY: + * VRR is only enabled on this head if the monitor is on the + * Adaptive-Sync defaultlist. + * + * NVKMS_ALLOW_ADAPTIVE_SYNC_DISABLED: + * VRR is forced to be disabled if this is an Adaptive-Sync monitor. + */ + enum NvKmsAllowAdaptiveSync allowAdaptiveSync; + + /*! + * Override the minimum refresh rate for VRR monitors specified by the + * EDID (0 to not override the EDID-provided value). Clamped at modeset + * time to within NVKMS_VRR_MIN_REFRESH_RATE_MAX_VARIANCE of the + * EDID-specified minimum refresh rate, as long as the minimum is no + * lower than 1hz and the maximum does not exceed the maximum refresh rate + * defined by the mode timings. The current minimum refresh rate and this + * valid range are exposed through + * NV_KMS_DPY_ATTRIBUTE_VRR_MIN_REFRESH_RATE. + * + * Does not affect G-SYNC monitors, which do not have a minimum refresh + * rate. + */ + NvU32 vrrOverrideMinRefreshRate; + + /*! + * Output colorspace. Valid only when colorSpaceSpecified is true. + */ + enum NvKmsDpyAttributeRequestedColorSpaceValue colorSpace; + NvBool colorSpaceSpecified; + + /*! + * Output color range. Valid only when colorRangeSpecified is true. + */ + enum NvKmsDpyAttributeColorRangeValue colorRange; + NvBool colorRangeSpecified; +}; + +struct NvKmsSetModeOneDispRequest { + /*! + * The bit mask of which head[] elements to look at on this disp; + * any other head will use its existing configuration. + */ + NvU32 requestedHeadsBitMask; + struct NvKmsSetModeOneHeadRequest head[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsSetModeRequest { + NvKmsDeviceHandle deviceHandle; + + /*! + * When a modeset request is made, NVKMS will first perform + * validation to confirm whether the request can be satisfied. If + * the requested configuration cannot be fulfilled, the request + * returns FALSE. + * + * Only the modeset owner can issue a modeset with commit set to TRUE. + * + * If 'commit' is FALSE, then the status of validation will be returned. + * + * If 'commit' is TRUE, and validation passes, then NVKMS will + * apply the requested configuration. + */ + NvBool commit; + + /*! + * The bitmask of which indices within disp[] describe requested + * configuration changes. Any other disps will use their existing + * configuration. + */ + NvU32 requestedDispsBitMask; + + /* + * disp[n] corresponds to the disp named by + * NvKmsAllocDeviceReply::dispHandles[n]. + */ + struct NvKmsSetModeOneDispRequest disp[NVKMS_MAX_SUBDEVICES]; + + /*! + * Whether to use NVKMS's builtin headSurface support when necessary. + * + * XXX NVKMS HEADSURFACE TODO: Make this the default and remove this field. + */ + NvBool allowHeadSurfaceInNvKms; +}; + +enum NvKmsSetModeOneHeadStatus { + NVKMS_SET_MODE_ONE_HEAD_STATUS_SUCCESS = 0, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE = 1, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_DPY = 2, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_CURSOR_IMAGE = 3, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_CURSOR_POSITION = 4, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_LUT = 5, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP = 6, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_PERMISSIONS = 7, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_HEAD_SURFACE = 8, + NVKMS_SET_MODE_ONE_HEAD_STATUS_UNSUPPORTED_HEAD_SURFACE_COMBO = 9, + NVKMS_SET_MODE_ONE_HEAD_STATUS_UNSUPPORTED_HEAD_SURFACE_FEATURE = 10, +}; + +struct NvKmsSetModeOneHeadReply { + /*! + * When the NVKMS_IOCTL_SET_MODE succeeds, then this will be SUCCESS. + * Otherwise, 'status' will be a non-SUCCESS value for one or more + * heads and/or one or more disps. + * + * Note that a failure could occur for a preexisting head + * configuration, so this status could be != SUCCESS for a head + * not listed in NvKmsSetModeOneDispRequest::requestedHeadsBitMask. + */ + enum NvKmsSetModeOneHeadStatus status; + + /*! + * The identifier that we use to talk to RM about the display + * device(s) driven by this head. For DP MST, it is the identifier + * of the DisplayPort library group to which the MST device belongs. + * Otherwise, it is the identifier of the connector. + */ + NvU32 activeRmId; + + /*! + * The usage bounds that may be possible on this head based on the ISO + * BW at that point. + * + * If a flip request is within the bounds of NvKmsSetModeOneHeadReply:: + * guaranteedUsage, then clients can expect the flip to succeed. + * If a flip request is beyond the bounds of NvKmsSetModeOneHeadReply:: + * guaranteedUsage but within NvKmsSetModeOneHeadReply::possibleUsage, + * then the request may legitimately fail due to insufficient display + * bandwidth and clients need to be prepared to handle that flip + * request failure. + */ + struct NvKmsUsageBounds possibleUsage; + + /*! + * The guaranteed usage bounds usable on this head. + */ + struct NvKmsUsageBounds guaranteedUsage; + + /*! + * Whether NVKMS chose to use headSurface on this head. + */ + NvBool usingHeadSurface; + + /*! + * Whether NVKMS enabled VRR on this head. + */ + NvBool vrrEnabled; + + /*! + * Contains the 'postSyncObject' that the client requested via + * NvKmsSetModeOneHeadRequest::flip. + */ + struct NvKmsFlipCommonReplyOneHead flipReply; +}; + +enum NvKmsSetModeOneDispStatus { + NVKMS_SET_MODE_ONE_DISP_STATUS_SUCCESS = 0, + NVKMS_SET_MODE_ONE_DISP_STATUS_INVALID_REQUESTED_HEADS_BITMASK = 1, + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_EXTENDED_GPU_CAPABILITIES_CHECK = 2, + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_DISPLAY_PORT_BANDWIDTH_CHECK = 3, + NVKMS_SET_MODE_ONE_DISP_STATUS_INCOMPATIBLE_DPYS = 4, + NVKMS_SET_MODE_ONE_DISP_STATUS_DUPLICATE_DPYS = 5, +}; + +struct NvKmsSetModeOneDispReply { + /*! + * When the NVKMS_IOCTL_SET_MODE succeeds, then this will be SUCCESS. + * Otherwise, 'status' will be a non-SUCCESS value for one or more + * heads and/or one or more disps. + * + * Note that a failure could occur for a preexisting disp + * configuration, so this status could be != SUCCESS for a disp + * not listed in NvKmsSetModeRequest::requestedDispsBitMask. + */ + enum NvKmsSetModeOneDispStatus status; + struct NvKmsSetModeOneHeadReply head[NVKMS_MAX_HEADS_PER_DISP]; +}; + +enum NvKmsSetModeStatus { + NVKMS_SET_MODE_STATUS_SUCCESS = 0, + NVKMS_SET_MODE_STATUS_INVALID_REQUESTED_DISPS_BITMASK = 1, + NVKMS_SET_MODE_STATUS_NOT_MODESET_OWNER = 2, +}; + +struct NvKmsSetModeReply { + enum NvKmsSetModeStatus status; + struct NvKmsSetModeOneDispReply disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsSetModeParams { + struct NvKmsSetModeRequest request; /*! in */ + struct NvKmsSetModeReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_CURSOR_IMAGE: Set the cursor image for the + * specified head. + */ + +struct NvKmsSetCursorImageRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + struct NvKmsSetCursorImageCommonParams common; +}; + +struct NvKmsSetCursorImageReply { + NvU32 padding; +}; + +struct NvKmsSetCursorImageParams { + struct NvKmsSetCursorImageRequest request; /*! in */ + struct NvKmsSetCursorImageReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_MOVE_CURSOR: Set the cursor position for the specified + * head. + * + * x,y are relative to the current viewPortIn configured on the head. + */ + +struct NvKmsMoveCursorRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + struct NvKmsMoveCursorCommonParams common; +}; + +struct NvKmsMoveCursorReply { + NvU32 padding; +}; + +struct NvKmsMoveCursorParams { + struct NvKmsMoveCursorRequest request; /*! in */ + struct NvKmsMoveCursorReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_LUT: Set the LUT contents for the specified head. + */ + +struct NvKmsSetLutRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + struct NvKmsSetLutCommonParams common; +}; + +struct NvKmsSetLutReply { + NvU32 padding; +}; + +struct NvKmsSetLutParams { + struct NvKmsSetLutRequest request; /*! in */ + struct NvKmsSetLutReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_IDLE_BASE_CHANNEL: Wait for the base channel to be idle on + * the requested heads on the requested subdevices of a device. + * + * Each (head,sd) pair to be idled is described by: + * + * subDevicesPerHead[head] |= NVBIT(sd) + */ + +struct NvKmsIdleBaseChannelRequest { + NvKmsDeviceHandle deviceHandle; + NvU32 subDevicesPerHead[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsIdleBaseChannelReply { + /*! + * If stopping the base channel is necessary due to a timeout, (head,sd) + * pairs will be described with: + * + * stopSubDevicesPerHead[head] |= NVBIT(sd) + * + * indicating that semaphore releases from the stalled channels may not have + * occurred. + */ + NvU32 stopSubDevicesPerHead[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsIdleBaseChannelParams { + struct NvKmsIdleBaseChannelRequest request; /*! in */ + struct NvKmsIdleBaseChannelReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_FLIP: Flip one or more heads on the subdevices of a device. + * + * At least one head on one subdevice must be specified in a flip request. + */ + +struct NvKmsFlipRequestOneSubDevice { + /*! + * The bit mask of which head[] elements to look at on this disp. + */ + NvU32 requestedHeadsBitMask; + struct NvKmsFlipCommonParams head[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsFlipRequest { + NvKmsDeviceHandle deviceHandle; + + /* + * sd[n] corresponds to bit N in NvKmsQueryDispReply::subDeviceMask and + * NvKmsAllocDeviceReply::subDeviceMask. + */ + struct NvKmsFlipRequestOneSubDevice sd[NVKMS_MAX_SUBDEVICES]; + + /*! + * When a flip request is made, NVKMS will first perform + * validation to confirm whether the request can be satisfied. If + * the requested configuration cannot be fulfilled, the request + * returns FALSE. + * + * If 'commit' is FALSE, then the status of validation will be returned. + * + * If 'commit' is TRUE, and validation passes, then NVKMS will + * apply the requested configuration. + */ + NvBool commit; + + /*! + * When set, indicates that the client is capable of releasing the VRR + * semaphore to indicate when the flip is ready. Setting this to FALSE + * disables VRR. + */ + NvBool allowVrr; +}; + +enum NvKmsVrrFlipType { + NV_KMS_VRR_FLIP_NON_VRR = 0, + NV_KMS_VRR_FLIP_GSYNC, + NV_KMS_VRR_FLIP_ADAPTIVE_SYNC, +}; + +struct NvKmsFlipReplyOneSubDevice { + struct NvKmsFlipCommonReplyOneHead head[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsFlipReply { + /*! + * If vrrFlipType != NV_KMS_VRR_FLIP_NON_VRR, then VRR was used for the + * requested flip. In this case, vrrSemaphoreIndex indicates the index + * into the VRR semaphore surface that the client should release to + * trigger the flip. + * + * A value of -1 indicates that no VRR semaphore release is needed. + */ + NvS32 vrrSemaphoreIndex; + + /*! + * Indicates whether the flip was non-VRR, was a VRR flip on one or more + * G-SYNC displays, or was a VRR flip exclusively on Adaptive-Sync + * displays. + */ + enum NvKmsVrrFlipType vrrFlipType; + + /*! + * sd[n] corresponds to bit N in NvKmsQueryDispReply::subDeviceMask and + * NvKmsAllocDeviceReply::subDeviceMask. + */ + struct NvKmsFlipReplyOneSubDevice sd[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsFlipParams { + struct NvKmsFlipRequest request; /*! in */ + struct NvKmsFlipReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST: "Dynamic dpy" reference + * counting. + * + * Most dpys have a lifetime equal to the NVKMS device. However, some + * dpys are dynamic and are created and destroyed in response to + * getting connected or disconnected. DisplayPort MST dpys are dynamic dpys. + * + * When a dynamic dpy is disconnected, its NVDpyId will be freed and + * made available for use by dynamic dpys connected later, unless any + * client has declared "interest" in the NVDpyId. The dynamic NVDpyId + * will persist as long as a client has declared interest on it, and + * will be reused for newly connected monitors at the same dynamic dpy + * address (port address, in the case of DP MST dynamic dpys). + * + * The 'interest' field selects interest in the dynamic dpy. + * + * If the dynamic dpy has already been disconnected (and therefore + * removed) before the client has declared interest in it, this ioctl + * will fail. + * + * The recommended usage pattern is: + * + * - Declare interest in the event types: + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED + * + * - When a DYNAMIC_DPY_CONNECTED event is received, call + * NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST + * to declare interest on the dpy. Be sure to check the return + * value, in case the dynamic dpy was already removed. Update any + * client bookkeeping, to start tracking the dpy. + * + * - When a DYNAMIC_DPY_DISCONNECTED event is received, update any + * client bookkeeping, to stop tracking this dynamic dpy. Call + * NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST + * to remove interest on the dpy. + */ + +struct NvKmsDeclareDynamicDpyInterestRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + NvBool interest; +}; + +struct NvKmsDeclareDynamicDpyInterestReply { + NvU32 padding; +}; + +struct NvKmsDeclareDynamicDpyInterestParams { + struct NvKmsDeclareDynamicDpyInterestRequest request; /*! in */ + struct NvKmsDeclareDynamicDpyInterestReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_{,UN}REGISTER_SURFACE: Register and unregister an + * RM-allocated surface with NVKMS. + * + * A surface must be registered with NVKMS before NVKMS can display + * it. Note that NVKMS will create its own RM object for the registered + * surface. The surface will not be freed by resman until the surface + * is unregistered by the client. + */ + +struct NvKmsRegisterSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + + /*! + * Surfaces can be specified either by file descriptor or by + * (rmClient, rmObject) tuple. useFd indicates which is specified + * in this request. Userspace clients are required to specify surface by + * file descriptor. + */ + NvBool useFd; + + /*! + * The RM client handle that was used to allocate the surface. + * NVKMS will use this as the hClientSrc argument to + * NvRmDupObject(). Only used when useFd is FALSE. + */ + NvU32 rmClient; + + /* + * For multi-plane formats, clients are free to use one memory allocation + * for all planes, or a separate memory allocation per plane: + * - For the first usecase, 'rmObject'/'fd' and 'rmObjectSizeInBytes' + * should be the same for all planes, and each plane should have a + * different 'offset'. + * - For the second usecase, 'rmObject'/'fd' should be different for each + * plane. + * + * The 'planes' array is indexed as follows: + * - For RGB and YUV packed formats, 'planes[0]' refers to the single plane + * that's used for these formats. + * - For YUV semi-planar formats, 'planes[0]' refers to the Y-plane and + * 'planes[1]' refers to the UV-plane. + * - For YUV planar formats, 'planes[0]' refers to the Y-plane, 'planes[1]' + * refers to the U plane, and 'planes[2]' refers to the V plane. + */ + struct { + + union { + NvU32 rmObject; /* RM memory handle */ + NvS32 fd; /* file descriptor describing memory */ + } u; + + /* + * This byte offset will be added to the base address of the RM memory + * allocation, and determines the starting address of this plane within + * that allocation. This offset must be 1KB-aligned. + */ + NvU64 offset NV_ALIGN_BYTES(8); + + /* + * If the surface layout is NvKmsSurfaceMemoryLayoutPitch, then + * 'pitch' should be the pitch of this plane in bytes, and must + * have an alignment of 256 bytes. If the surface layout is + * NvKmsSurfaceMemoryLayoutBlockLinear, then 'pitch' should be the + * pitch of this plane in _blocks_. Blocks are always 64 bytes + * wide. + */ + NvU32 pitch; + + /* + * This is the size of the entire RM memory allocation pointed to by + * rmObject or fd prior to taking the offset into account. This is + * _not_ always the size of this plane since a single RM memory + * allocation can contain multiple planes, and we're also not taking + * the offset into account. + */ + NvU64 rmObjectSizeInBytes NV_ALIGN_BYTES(8); + } planes[NVKMS_MAX_PLANES_PER_SURFACE]; + + NvU32 widthInPixels; + NvU32 heightInPixels; + + enum NvKmsSurfaceMemoryLayout layout; + enum NvKmsSurfaceMemoryFormat format; + + NvBool noDisplayHardwareAccess; + + /* + * If isoType == NVKMS_MEMORY_NISO, NVKMS will create CPU and GPU mappings + * for the surface memory. + */ + NvKmsMemoryIsoType isoType; + + NvU32 log2GobsPerBlockY; +}; + +struct NvKmsRegisterSurfaceReply { + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsRegisterSurfaceParams { + struct NvKmsRegisterSurfaceRequest request; /*! in */ + struct NvKmsRegisterSurfaceReply reply; /*! out */ +}; + +struct NvKmsUnregisterSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsUnregisterSurfaceReply { + NvU32 padding; +}; + +struct NvKmsUnregisterSurfaceParams { + struct NvKmsUnregisterSurfaceRequest request; /*! in */ + struct NvKmsUnregisterSurfaceReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_GRANT_SURFACE: + * NVKMS_IOCTL_ACQUIRE_SURFACE: + * NVKMS_IOCTL_RELEASE_SURFACE: + * + * An NVKMS client can "grant" a registered surface to another NVKMS + * client through the following steps: + * + * - The granting NVKMS client should open /dev/nvidia-modeset, and + * call NVKMS_IOCTL_GRANT_SURFACE to associate an NvKmsSurfaceHandle + * with the file descriptor. + * + * - The granting NVKMS client should pass the file descriptor over a + * UNIX domain socket to one or more clients who should acquire the + * surface. + * + * - The granting NVKMS client can optionally close the file + * descriptor now or later. + * + * - Each acquiring client should call NVKMS_IOCTL_ACQUIRE_SURFACE, + * and pass in the file descriptor it received. This returns an + * NvKmsSurfaceHandle that the acquiring client can use to refer to + * the surface in any other NVKMS API call that takes an + * NvKmsSurfaceHandle. + * + * - The acquiring clients can optionally close the file descriptor + * now or later. + * + * - Each acquiring client should call NVKMS_IOCTL_RELEASE_SURFACE to + * release it when they are done with the surface. + * + * - When the granting client unregisters the surface, it is + * "orphaned": NVKMS will flip away from the surface if necessary, + * the RM surface allocation is unduped, and the surface is + * unregistered from EVO. But, the acquiring clients will continue + * to hold a reference to this orphaned surface until they release + * it. + * + * Notes: + * + * - It is an error to call NVKMS_IOCTL_GRANT_SURFACE more than once + * on a /dev/nvidia-modeset file descriptor, or to use a file + * descriptor other than one created by opening /dev/nvidia-modeset, + * or to use a file descriptor that was previously used as the first + * argument to ioctl(2). + * + * - The special handling of surfaces when the granting client + * unregisters the surface might be a little asymmetric. However, + * this strikes a balance between: + * + * (a) Making sure modesetting NVKMS clients can free memory when + * they intend to. + * + * (b) Making sure acquiring clients don't get a stale view of their + * surface handle namespace: if the surface were completely + * unregistered out from under them, the surface handle could be + * recycled without them knowing. If they later attempted to + * release the original surface, they could inadvertently release a + * different surface that happened to have the recycled handle. + * + * - Do we need an NVKMS_IOCTL_REVOKE_SURFACE? Or is the + * automatic-unregistration-in-acquiring-clients behavior + * sufficient? + */ + +struct NvKmsGrantSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; + int fd; +}; + +struct NvKmsGrantSurfaceReply { + NvU32 padding; +}; + +struct NvKmsGrantSurfaceParams { + struct NvKmsGrantSurfaceRequest request; /*! in */ + struct NvKmsGrantSurfaceReply reply; /*! out */ +}; + +struct NvKmsAcquireSurfaceRequest { + int fd; +}; + +struct NvKmsAcquireSurfaceReply { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsAcquireSurfaceParams { + struct NvKmsAcquireSurfaceRequest request; /*! in */ + struct NvKmsAcquireSurfaceReply reply; /*! out */ +}; + +struct NvKmsReleaseSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsReleaseSurfaceReply { + NvU32 padding; +}; + +struct NvKmsReleaseSurfaceParams { + struct NvKmsReleaseSurfaceRequest request; /*! in */ + struct NvKmsReleaseSurfaceReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_DPY_ATTRIBUTE: + * NVKMS_IOCTL_GET_DPY_ATTRIBUTE: + * NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES: + * + * Dpys have several attributes that can be queried and set. + * + * An attribute has a type (defined by NvKmsAttributeType), read/write + * permissions, and potentially other descriptions of its valid + * values. Use NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES to get the + * valid values of an attribute. + */ + +enum NvKmsAttributeType { + NV_KMS_ATTRIBUTE_TYPE_INTEGER = 0, + NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + NV_KMS_ATTRIBUTE_TYPE_INTBITS, + NV_KMS_ATTRIBUTE_TYPE_RANGE, + NV_KMS_ATTRIBUTE_TYPE_BITMASK, + NV_KMS_ATTRIBUTE_TYPE_DPY_ID, + NV_KMS_ATTRIBUTE_TYPE_DPY_ID_LIST, +}; + +enum NvKmsDpyAttribute { + NV_KMS_DPY_ATTRIBUTE_BACKLIGHT_BRIGHTNESS = 0, + NV_KMS_DPY_ATTRIBUTE_SCANLINE, + NV_KMS_DPY_ATTRIBUTE_HEAD, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_VIBRANCE, + NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING, + NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_AVAILABLE, + NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_DEFAULT, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_RANGE, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_RANGE, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_LINK_RATE, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG, + /* + * XXX NVKMS TODO: Delete UPDATE_GLS_FRAMELOCK; this event-only + * attribute is a kludge to tell GLS about a change in framelock + * configuration made by NVKMS. Eventually, NVKMS should manage + * framelock itself and GLS shouldn't need to be notified. + * + * Note that the event data reports two boolean values: enable + * (bit 0) and server (bit 1). + */ + NV_KMS_DPY_ATTRIBUTE_UPDATE_GLS_FRAMELOCK, + NV_KMS_DPY_ATTRIBUTE_RASTER_LOCK, + NV_KMS_DPY_ATTRIBUTE_UPDATE_FLIPLOCK, + NV_KMS_DPY_ATTRIBUTE_UPDATE_STEREO, + NV_KMS_DPY_ATTRIBUTE_DPMS, + NV_KMS_DPY_ATTRIBUTE_VRR_MIN_REFRESH_RATE, + + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_IS_MULTISTREAM, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_SINK_IS_AUDIO_CAPABLE, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING attribute. */ +enum NvKmsDpyAttributeRequestedDitheringValue { + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO = 0, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_ENABLED = 1, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED = 2, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE attribute. */ +enum NvKmsDpyAttributeRequestedDitheringModeValue { + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO = 0, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2 = 1, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2 = 2, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL = 3, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE attribute. */ +enum NvKmsDpyAttributeCurrentDitheringModeValue { + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE = 0, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_DYNAMIC_2X2 = 1, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_STATIC_2X2 = 2, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_TEMPORAL = 3, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH attribute. */ +enum NvKmsDpyAttributeRequestedDitheringDepthValue { + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO = 0, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_6_BITS = 1, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_8_BITS = 2, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH attribute. */ +enum NvKmsDpyAttributeCurrentDitheringDepthValue { + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE = 0, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS = 1, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS = 2, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE attribute. */ +enum NvKmsDpyAttributeCurrentColorSpaceValue { + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB = 0, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422 = 1, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444 = 2, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420 = 3, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL attribute. */ +enum NvKmsDpyAttributeDigitalSignalValue { + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_LVDS = 0, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_TMDS = 1, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_DISPLAYPORT = 2, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_HDMI_FRL = 3, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_DSI = 4, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE attribute. */ +enum NvKmsDpyAttributeDigitalLinkTypeValue { + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_SINGLE = 0, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_DUAL = 1, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_QUAD = 3, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG attribute. */ +enum NvKmsDpyAttributeFrameLockDisplayConfigValue { + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED = 0, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_CLIENT = 1, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_SERVER = 2, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_DPMS attribute. */ +enum NvKmsDpyAttributeDpmsValue { + NV_KMS_DPY_ATTRIBUTE_DPMS_ON, + NV_KMS_DPY_ATTRIBUTE_DPMS_STANDBY, + NV_KMS_DPY_ATTRIBUTE_DPMS_SUSPEND, + NV_KMS_DPY_ATTRIBUTE_DPMS_OFF, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE attribute */ +enum NvKmsDpyAttributeDisplayportConnectorTypeValue { + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN = 0, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DISPLAYPORT, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_HDMI, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DVI, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_VGA, +}; + +struct NvKmsSetDpyAttributeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsDpyAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsSetDpyAttributeReply { + NvU32 padding; +}; + +struct NvKmsSetDpyAttributeParams { + struct NvKmsSetDpyAttributeRequest request; /*! in */ + struct NvKmsSetDpyAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetDpyAttributeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsDpyAttribute attribute; +}; + +struct NvKmsGetDpyAttributeReply { + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsGetDpyAttributeParams { + struct NvKmsGetDpyAttributeRequest request; /*! in */ + struct NvKmsGetDpyAttributeReply reply; /*! out */ +}; + + +struct NvKmsAttributeValidValuesCommonReply { + NvBool readable; + NvBool writable; + enum NvKmsAttributeType type; + union { + struct { + NvS64 min NV_ALIGN_BYTES(8); + NvS64 max NV_ALIGN_BYTES(8); + } range; /*! Used when type == NV_KMS_ATTRIBUTE_TYPE_RANGE. */ + struct { + NvU32 ints; + } bits; /*! Used when type == NV_KMS_ATTRIBUTE_TYPE_INTBITS. */ + } u; +}; + +struct NvKmsGetDpyAttributeValidValuesRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsDpyAttribute attribute; +}; + +struct NvKmsGetDpyAttributeValidValuesReply { + struct NvKmsAttributeValidValuesCommonReply common; +}; + + +struct NvKmsGetDpyAttributeValidValuesParams { + struct NvKmsGetDpyAttributeValidValuesRequest request; /*! in */ + struct NvKmsGetDpyAttributeValidValuesReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_DISP_ATTRIBUTE: + * NVKMS_IOCTL_GET_DISP_ATTRIBUTE: + * NVKMS_IOCTL_GET_DISP_ATTRIBUTE_VALID_VALUES: + */ + + +enum NvKmsDispAttribute { + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK = 0, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_SYNC, + NV_KMS_DISP_ATTRIBUTE_GPU_FRAMELOCK_FPGA_REVISION_UNSUPPORTED, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_STEREO_SYNC, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TIMING, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TEST_SIGNAL, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_RESET, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_SET_SWAP_BARRIER, + NV_KMS_DISP_ATTRIBUTE_ALLOW_FLIPLOCK, + NV_KMS_DISP_ATTRIBUTE_QUERY_DP_AUX_LOG, +}; + + +struct NvKmsSetDispAttributeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + enum NvKmsDispAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsSetDispAttributeReply { + NvU32 padding; +}; + +struct NvKmsSetDispAttributeParams { + struct NvKmsSetDispAttributeRequest request; /*! in */ + struct NvKmsSetDispAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetDispAttributeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + enum NvKmsDispAttribute attribute; +}; + +struct NvKmsGetDispAttributeReply { + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsGetDispAttributeParams { + struct NvKmsGetDispAttributeRequest request; /*! in */ + struct NvKmsGetDispAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetDispAttributeValidValuesRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + enum NvKmsDispAttribute attribute; +}; + +struct NvKmsGetDispAttributeValidValuesReply { + struct NvKmsAttributeValidValuesCommonReply common; +}; + +struct NvKmsGetDispAttributeValidValuesParams { + struct NvKmsGetDispAttributeValidValuesRequest request; /*! in */ + struct NvKmsGetDispAttributeValidValuesReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_FRAMELOCK: Query information about a framelock + * device. + */ + +struct NvKmsQueryFrameLockRequest { + NvKmsFrameLockHandle frameLockHandle; +}; + +struct NvKmsQueryFrameLockReply { + NvU32 gpuIds[NVKMS_MAX_GPUS_PER_FRAMELOCK]; +}; + +struct NvKmsQueryFrameLockParams { + struct NvKmsQueryFrameLockRequest request; /*! in */ + struct NvKmsQueryFrameLockReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_FRAMELOCK_ATTRIBUTE: + * NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE: + * NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE_VALID_VALUES: + */ + +enum NvKmsFrameLockAttribute { + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_DELAY, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_INTERVAL, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY, + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE, + NV_KMS_FRAMELOCK_ATTRIBUTE_FPGA_REVISION, + NV_KMS_FRAMELOCK_ATTRIBUTE_FIRMWARE_MAJOR_VERSION, + NV_KMS_FRAMELOCK_ATTRIBUTE_FIRMWARE_MINOR_VERSION, + NV_KMS_FRAMELOCK_ATTRIBUTE_BOARD_ID, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_DELAY_RESOLUTION, + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT0_STATUS, + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT1_STATUS, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS, + NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE_4, + NV_KMS_FRAMELOCK_ATTRIBUTE_INCOMING_HOUSE_SYNC_RATE, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY attribute. */ +enum NvKmsFrameLockAttributePolarityValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_RISING_EDGE = 0x1, + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_FALLING_EDGE = 0x2, + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_BOTH_EDGES = 0x3, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE attribute. */ +enum NvKmsFrameLockAttributeHouseSyncModeValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_DISABLED = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_INPUT = 0x1, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_OUTPUT = 0x2, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED attribute. */ +enum NvKmsFrameLockAttributeEthernetDetectedValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_NONE = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_PORT0 = 0x1, + NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_PORT1 = 0x2, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE attribute. */ +enum NvKmsFrameLockAttributeVideoModeValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_AUTO = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_TTL = 1, + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_BI_LEVEL = 2, + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_TRI_LEVEL = 3, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_PORT[01]_STATUS attributes. */ +enum NvKmsFrameLockAttributePortStatusValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_INPUT = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_OUTPUT = 1, +}; + +struct NvKmsSetFrameLockAttributeRequest { + NvKmsFrameLockHandle frameLockHandle; + enum NvKmsFrameLockAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsSetFrameLockAttributeReply { + NvU32 padding; +}; + +struct NvKmsSetFrameLockAttributeParams { + struct NvKmsSetFrameLockAttributeRequest request; /*! in */ + struct NvKmsSetFrameLockAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetFrameLockAttributeRequest { + NvKmsFrameLockHandle frameLockHandle; + enum NvKmsFrameLockAttribute attribute; +}; + +struct NvKmsGetFrameLockAttributeReply { + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsGetFrameLockAttributeParams { + struct NvKmsGetFrameLockAttributeRequest request; /*! in */ + struct NvKmsGetFrameLockAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetFrameLockAttributeValidValuesRequest { + NvKmsFrameLockHandle frameLockHandle; + enum NvKmsFrameLockAttribute attribute; +}; + +struct NvKmsGetFrameLockAttributeValidValuesReply { + struct NvKmsAttributeValidValuesCommonReply common; +}; + +struct NvKmsGetFrameLockAttributeValidValuesParams { + struct NvKmsGetFrameLockAttributeValidValuesRequest request; /*! in */ + struct NvKmsGetFrameLockAttributeValidValuesReply reply; /*! out */ +}; + + + +/*! + * NVKMS_IOCTL_GET_NEXT_EVENT, NVKMS_IOCTL_DECLARE_EVENT_INTEREST: + * Event handling. + * + * Clients should call NVKMS_IOCTL_DECLARE_EVENT_INTEREST to indicate + * the events in which they are interested. Then, block on poll(2) or + * select(2) until there are events available to read on the file + * descriptor. + * + * When events are available, the client should call + * NVKMS_IOCTL_GET_NEXT_EVENT to get an NvKmsEvent structure, and + * interpret the union based on eventType. + * + * Clients can remove interest for events by calling + * NVKMS_IOCTL_DECLARE_EVENT_INTEREST again, specifying a new + * interestMask. + * + * Note that there may still be events queued for the client when the + * client calls NVKMS_IOCTL_DECLARE_EVENT_INTEREST to change its + * interestMask. So, clients should be prepared to ignore unexpected + * events after calling NVKMS_IOCTL_DECLARE_EVENT_INTEREST. + */ + + + +/*! + * NVKMS_EVENT_TYPE_DPY_CHANGED + * + * When a dpy changes, this event will be generated. The client + * should call NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA to get an updated + * NvKmsQueryDpyDynamicDataReply. + */ + +struct NvKmsEventDpyChanged { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + + +/*! + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED + * + * When a dynamic dpy is connected, this event will be generated. + */ + +struct NvKmsEventDynamicDpyConnected { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + + +/*! + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED + * + * When a dynamic dpy is disconnected, this event will be generated. + */ + +struct NvKmsEventDynamicDpyDisconnected { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + + +/*! + * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED + * + * When a dpy attribute changes, this event will be generated. + */ + +struct NvKmsEventDpyAttributeChanged { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsDpyAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + + +/*! + * NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED + * + * When a framelock attribute changes, this event will be generated. + */ + +struct NvKmsEventFrameLockAttributeChanged { + NvKmsFrameLockHandle frameLockHandle; + enum NvKmsFrameLockAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + + +/*! + * NVKMS_EVENT_TYPE_FLIP_OCCURRED + * + * When a client requests a flip and specifies a completion notifier + * with NvKmsCompletionNotifierDescription::awaken == TRUE, this event + * will be generated. This event is only delivered to clients with + * flipping permission. + */ + +struct NvKmsEventFlipOccurred { + NvKmsDeviceHandle deviceHandle; + /* XXX NVKMS TODO: the dispHandle is currently hard-coded to 0. */ + NvKmsDispHandle dispHandle; + NvU32 head; + NvU32 layer; +}; + + +struct NvKmsEvent { + enum NvKmsEventType eventType; + union { + struct NvKmsEventDpyChanged dpyChanged; + struct NvKmsEventDynamicDpyConnected dynamicDpyConnected; + struct NvKmsEventDynamicDpyDisconnected dynamicDpyDisconnected; + struct NvKmsEventDpyAttributeChanged dpyAttributeChanged; + struct NvKmsEventFrameLockAttributeChanged frameLockAttributeChanged; + struct NvKmsEventFlipOccurred flipOccurred; + } u; +}; + + +struct NvKmsGetNextEventRequest { + NvU32 padding; +}; + +struct NvKmsGetNextEventReply { + /*! + * If an event is available, valid = TRUE and the NvKmsEvent + * contains the event. If no event is available, valid = FALSE. + */ + NvBool valid; + struct NvKmsEvent event; +}; + +struct NvKmsGetNextEventParams { + struct NvKmsGetNextEventRequest request; /*! in */ + struct NvKmsGetNextEventReply reply; /*! out */ +}; + + +struct NvKmsDeclareEventInterestRequest { + /*! + * Mask of event types, where each event type is indicated by (1 + * << NVKMS_EVENT_TYPE_). + */ + NvU32 interestMask; +}; + +struct NvKmsDeclareEventInterestReply { + NvU32 padding; +}; + +struct NvKmsDeclareEventInterestParams { + struct NvKmsDeclareEventInterestRequest request; /*! in */ + struct NvKmsDeclareEventInterestReply reply; /*! out */ +}; + +/*! + * NVKMS_IOCTL_CLEAR_UNICAST_EVENT + * + * The events generated through NVKMS_IOCTL_DECLARE_EVENT_INTEREST and + * NVKMS_IOCTL_GET_NEXT_EVENT are most useful for system-wide events which + * multiple clients may be interested in. Clients declare their interest in a + * collection of event types, and when they are notified that some number of + * events arrived, they have to query the events from the event queue. + * + * In contrast, "Unicast Events" are for use in cases where a client is only + * interested in a particular type of event on a particular object. + * + * To use a Unicast Event: + * + * - Create an fd through nvKmsOpen(). + * + * - Do _not_ use the fd for anything else (the first argument to ioctl(2), the + * fd in any of the granting APIs such as NvKmsGrantSurfaceParams::request:fd, + * etc). + * + * - Pass the fd into an API that allows a unicast event. E.g., + * NvKmsJoinSwapGroupParams::request::member::unicastEvent::fd + * + * - Clear the unicast event with NVKMS_IOCTL_CLEAR_UNICAST_EVENT. + * + * - Check if the event arrived; if it hasn't, then wait for the event through + * poll(2) or select(2). + */ + +struct NvKmsClearUnicastEventRequest { + int unicastEventFd; +}; + +struct NvKmsClearUnicastEventReply { + NvU32 padding; +}; + +struct NvKmsClearUnicastEventParams { + struct NvKmsClearUnicastEventRequest request; /*! in */ + struct NvKmsClearUnicastEventReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_LAYER_POSITION: Set the position of the layer + * for the specified heads on the specified disps. The layer + * position is in "desktop coordinate space", i.e., relative to the + * upper left corner of the input viewport. + * + * Note that this is only valid if + * NvKmsAllocDeviceReply::layerCaps[layer].supportsWindowMode is TRUE. + */ +struct NvKmsSetLayerPositionRequest { + NvKmsDeviceHandle deviceHandle; + + /*! + * The bitmask of which indices within disp[] describe requested + * configuration changes. Any other disps will use their existing + * configuration. + */ + NvU32 requestedDispsBitMask; + + struct { + /*! + * The bitmask of which head[] elements to look at on this + * disp; any other head will use its existing configuration. + */ + NvU32 requestedHeadsBitMask; + + struct { + struct NvKmsSignedPoint layerPosition[NVKMS_MAX_LAYERS_PER_HEAD]; + /*! + * The bitmask of which layerPosition[] elements to look at on this + * head; any other layer will use its existing configuration. + */ + NvU32 requestedLayerBitMask; + } head[NVKMS_MAX_HEADS_PER_DISP]; + + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsSetLayerPositionReply { + NvU32 padding; +}; + +struct NvKmsSetLayerPositionParams { + struct NvKmsSetLayerPositionRequest request; /*! in */ + struct NvKmsSetLayerPositionReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_GRAB_OWNERSHIP: + * NVKMS_IOCTL_RELEASE_OWNERSHIP: + * + * NVKMS_IOCTL_GRAB_OWNERSHIP notifies NVKMS that the calling client wants to + * control modesets on the device, and NVKMS_IOCTL_RELEASE_OWNERSHIP indicates + * that the modeset ownership should be released and the VT console mode + * restored. + * + * It is not necessary to call NVKMS_IOCTL_RELEASE_OWNERSHIP during shutdown; + * NVKMS will implicitly clear modeset ownership in nvKmsClose(). + * + * Releasing modeset ownership enables console hotplug handling. See the + * explanation in the comment for enableConsoleHotplugHandling above. + */ + +struct NvKmsGrabOwnershipRequest { + NvKmsDeviceHandle deviceHandle; +}; + +struct NvKmsGrabOwnershipReply { + NvU32 padding; +}; + +struct NvKmsGrabOwnershipParams { + struct NvKmsGrabOwnershipRequest request; /*! in */ + struct NvKmsGrabOwnershipReply reply; /*! out */ +}; + +struct NvKmsReleaseOwnershipRequest { + NvKmsDeviceHandle deviceHandle; +}; + +struct NvKmsReleaseOwnershipReply { + NvU32 padding; +}; + +struct NvKmsReleaseOwnershipParams { + struct NvKmsReleaseOwnershipRequest request; /*! in */ + struct NvKmsReleaseOwnershipReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_GRANT_PERMISSIONS: + * NVKMS_IOCTL_ACQUIRE_PERMISSIONS: + * NVKMS_IOCTL_REVOKE_PERMISSIONS: + * + * By default, only the modeset owning NVKMS client (the one who + * successfully called NVKMS_IOCTL_GRAB_OWNERSHIP) is allowed to flip + * or set modes. + * + * However, the modeset owner can grant various permissions to other + * clients through the following steps: + * + * - The modeset owner should open /dev/nvidia-modeset, and call + * NVKMS_IOCTL_GRANT_PERMISSIONS to define a set of permissions + * associated with the file descriptor. + * + * - The modeset owner should pass the file descriptor over a UNIX + * domain socket to one or more clients who should acquire these + * permissions. + * + * - The modeset owner can optionally close the file descriptor now or + * later. + * + * - The acquiring clients should call NVKMS_IOCTL_ACQUIRE_PERMISSIONS + * and pass in the file descriptor they received, to update their + * client connection to include the permissions specified by the modeset + * owner in the first bullet. + * + * - The acquiring clients can optionally close the file descriptor + * now or later. + * + * - From this point forward, both the modeset owner and the clients + * are allowed to perform the actions allowed by the granted + * permissions. + * + * - The modeset owner can optionally revoke any previously granted + * permissions with NVKMS_IOCTL_REVOKE_PERMISSIONS. + * + * Notes: + * + * - NVKMS_IOCTL_REVOKE_PERMISSIONS has device-scope. It could be + * made finer-grained (e.g., take the file descriptor that was used + * to grant permissions) if that were needed. + * + * - NvKmsPermissions::disp[n] corresponds to the disp named by + * NvKmsAllocDeviceReply::dispHandles[n]. + * + * - It is an error to call NVKMS_IOCTL_GRANT_PERMISSIONS more than + * once on a /dev/nvidia-modeset file descriptor, or to use a file + * descriptor other than one created by opening /dev/nvidia-modeset, + * or to use a file descriptor that was previously used as the first + * argument to ioctl(2). + * + * - Calling NVKMS_IOCTL_ACQUIRE_PERMISSIONS more than once on the + * same NVKMS client will cause the new permissions for that client + * to be the union of the previous permissions and the latest + * permissions being acquired. + */ + +enum NvKmsPermissionsType { + NV_KMS_PERMISSIONS_TYPE_FLIPPING = 1, + NV_KMS_PERMISSIONS_TYPE_MODESET = 2, +}; + +struct NvKmsFlipPermissions { + struct { + struct { + /* + * Bitmask of flippable layers, where each layer is + * indicated by '1 << layer'. It is an error for bits + * above NVKMS_MAX_LAYERS_PER_HEAD to be set. + * + * Only applicable when type==FLIPPING. + */ + NvU8 layerMask; + } head[NVKMS_MAX_HEADS_PER_DISP]; + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsModesetPermissions { + struct { + struct { + /* + * A list of dpys which a particular NVKMS client is + * allowed to use when performing a modeset on this head. + * + * If the NVKMS client is not allowed to set a mode on + * this head, this list will be empty. + * + * If an NVKMS client can drive the head without + * restrictions, this will be nvAllDpyIdList(). + * + * Only applicable when type==MODESET. + */ + NVDpyIdList dpyIdList; + } head[NVKMS_MAX_HEADS_PER_DISP]; + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsPermissions { + enum NvKmsPermissionsType type; + union { + struct NvKmsFlipPermissions flip; + struct NvKmsModesetPermissions modeset; + }; +}; + +struct NvKmsGrantPermissionsRequest { + int fd; + NvKmsDeviceHandle deviceHandle; + struct NvKmsPermissions permissions; +}; + +struct NvKmsGrantPermissionsReply { + NvU32 padding; +}; + +struct NvKmsGrantPermissionsParams { + struct NvKmsGrantPermissionsRequest request; /*! in */ + struct NvKmsGrantPermissionsReply reply; /*! out */ +}; + +struct NvKmsAcquirePermissionsRequest { + int fd; +}; + +struct NvKmsAcquirePermissionsReply { + /*! This client's handle for the device which acquired new permissions */ + NvKmsDeviceHandle deviceHandle; + + /*! + * The acquired permissions. + * + * If permissions::type == FLIPPING, the new combined flipping + * permissions of the calling client on this device, including + * prior permissions and permissions added by this operation. + */ + struct NvKmsPermissions permissions; +}; + +struct NvKmsAcquirePermissionsParams { + struct NvKmsAcquirePermissionsRequest request; /*! in */ + struct NvKmsAcquirePermissionsReply reply; /*! out */ +}; + +struct NvKmsRevokePermissionsRequest { + NvKmsDeviceHandle deviceHandle; + + /* + * A bitmask of permission types to be revoked for this device. + * It should be the bitwise 'or' of one or more + * NVBIT(NV_KMS_PERMISSIONS_TYPE_*) values. + */ + NvU32 permissionsTypeBitmask; +}; + +struct NvKmsRevokePermissionsReply { + NvU32 padding; +}; + +struct NvKmsRevokePermissionsParams { + struct NvKmsRevokePermissionsRequest request; /*! in */ + struct NvKmsRevokePermissionsReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_DPY_CRC32 + * + * Query last CRC32 value from the NVKMS disp head specified by the triplet + * (deviceHandle, dispHandle, head). + */ + +struct NvKmsQueryDpyCRC32Request { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; +}; + +/*! + * Generic CRC-structure type to represent CRC value obtained and if + * hardware architecture supports collection of the CRC type. If + * the CRC is not supported by hardware, its value is undefined. + */ +struct NvKmsDpyCRC32 { + /*! + * Value of the CRC. If it is not supported, value is undefined. + */ + NvU32 value; + + /*! + * Boolean which represents if hardware supports CRC collection + * If this boolean is FALSE, CRC hardware collection is not supported. + */ + NvBool supported; +}; + +/*! + * Reply structure that contains CRC32 values returned from hardware. + * Supported CRCs obtained are represented by supported boolean in crc struct + * Note- Crcs that are not supported will not be updated and will remain at 0 + */ +struct NvKmsQueryDpyCRC32Reply { + /*! + * CRC generated from the Compositor hardware + */ + struct NvKmsDpyCRC32 compositorCrc32; + + /*! + * CRC generated from the RG hardware, if head is driving RG/SF. + * Note that if Dithering is enabled, this CRC will vary across reads + * from the same frame. + */ + struct NvKmsDpyCRC32 rasterGeneratorCrc32; + + /*! + * Crc value generated from the target SF/OR depending on connector's OR type + * Note that if Dithering is enabled, this CRC will vary across reads + * from the same frame. + */ + struct NvKmsDpyCRC32 outputCrc32; + +}; + +struct NvKmsQueryDpyCRC32Params { + struct NvKmsQueryDpyCRC32Request request; /*! in */ + struct NvKmsQueryDpyCRC32Reply reply; /*! out */ +}; + +/*! + * User-space pointers are always passed to NVKMS in an NvU64. + * This user-space address is eventually passed into the platform's + * copyin/copyout functions, in a void* argument. + * + * This utility function converts from a pointer to an NvU64. + */ + +static inline NvU64 nvKmsPointerToNvU64(const void *ptr) +{ + return (NvU64)(NvUPtr)ptr; +} + + +/*! + * NVKMS_IOCTL_REGISTER_DEFERRED_REQUEST_FIFO: + * NVKMS_IOCTL_UNREGISTER_DEFERRED_REQUEST_FIFO: + * + * To make a request that is deferred until after a specific point in a client's + * graphics channel, a client should register a surface with NVKMS as a + * "deferred request fifo". The surface is interpreted as having the layout of + * struct NvKmsDeferredRequestFifo. + * + * To make deferred requests, the client should: + * + * - Write the NVKMS_DEFERRED_REQUEST_OPCODE for the desired operation to + * NvKmsDeferredRequestFifo::request[i], where 'i' is the next available + * element in the request[] array. Repeat as necessary. + * + * - Push NV906F_SEMAPHORE[ABCD] methods in its graphics channel to write + * '(i + 1) % NVKMS_MAX_DEFERRED_REQUESTS' to + * NvKmsDeferredRequestFifo::put. + * + * - Push an NV906F_NON_STALL_INTERRUPT method in its graphics channel. + * + * NVKMS will be notified of the non-stall interrupt, and scan all clients' + * deferred request fifos for put != get. NVKMS will then perform the requests + * specified in request[get] through request[put-1]. Finally, NVKMS will update + * get to indicate how much of the fifo it consumed. + * + * Wrapping behaves as expected. In pseudo code: + * + * while (get != put) { + * do(request[get]); + * get = (get + 1) % NVKMS_MAX_DEFERRED_REQUESTS; + * } + * + * The only time it is safe for clients to write to get is when get == put and + * there are no outstanding semaphore releases to gpuPut. + * + * The surface used for the deferred request fifo must be: + * + * - In system memory (NVKMS will create one device-scoped mapping, not one per + * subdevice, as would be needed if the surface were in video memory). + * + * - At least as large as sizeof(NvKmsDeferredRequestFifo). + * + * Some NVKMS_DEFERRED_REQUESTs may need to write to a semaphore after some + * operation is performed (e.g., to indicate that a SwapGroup is ready, or that + * we've reached vblank). The NVKMS_DEFERRED_REQUEST_SEMAPHORE_INDEX field + * within the request specifies a semaphore within the + * NvKmsDeferredRequestFifo::semaphore[] array. The semantics of that semaphore + * index are opcode-specific. + * + * The opcode and semaphore index are in the low 16-bits of the request. The + * upper 16-bits are opcode-specific. + */ + +#define NVKMS_MAX_DEFERRED_REQUESTS 128 + +#define NVKMS_DEFERRED_REQUEST_OPCODE 7:0 + +#define NVKMS_DEFERRED_REQUEST_SEMAPHORE_INDEX 15:8 + +#define NVKMS_DEFERRED_REQUEST_OPCODE_NOP 0 + +/* + * The SWAP_GROUP_READY request means that this NvKmsDeferredRequestFifo is + * ready for the next swap of the SwapGroup (see NVKMS_IOCTL_JOIN_SWAP_GROUP, + * below). NVKMS_DEFERRED_REQUEST_SEMAPHORE_INDEX should specify an element in + * the semaphore[] array which will be released to + * + * NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_READY + * + * when the SwapGroup actually swaps. + */ +#define NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY 1 +#define NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_NOT_READY 0x00000000 +#define NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_READY 0xFFFFFFFF + + +/* + * The SWAP_GROUP_READY_PER_EYE_STEREO field indicates whether this deferred + * request fifo wants the SwapGroup to present new content at every eye boundary + * (PER_EYE), or present new content only when transitioning from the right eye + * to the left eye (PER_PAIR). + */ +#define NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO 16:16 +#define NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_PAIR 0 +#define NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_EYE 1 + + +struct NvKmsDeferredRequestFifo { + NvU32 put; + NvU32 get; + NvU32 request[NVKMS_MAX_DEFERRED_REQUESTS]; + NvGpuSemaphore semaphore[NVKMS_MAX_DEFERRED_REQUESTS]; +}; + +struct NvKmsRegisterDeferredRequestFifoRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsRegisterDeferredRequestFifoReply { + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle; +}; + +struct NvKmsRegisterDeferredRequestFifoParams { + struct NvKmsRegisterDeferredRequestFifoRequest request; /*! in */ + struct NvKmsRegisterDeferredRequestFifoReply reply; /*! out */ +}; + +struct NvKmsUnregisterDeferredRequestFifoRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle; +}; + +struct NvKmsUnregisterDeferredRequestFifoReply { + NvU32 padding; +}; + +struct NvKmsUnregisterDeferredRequestFifoParams { + struct NvKmsUnregisterDeferredRequestFifoRequest request; /*! in */ + struct NvKmsUnregisterDeferredRequestFifoReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_ALLOC_SWAP_GROUP + * NVKMS_IOCTL_FREE_SWAP_GROUP + * + * An NVKMS client creates a SwapGroup by calling NVKMS_IOCTL_ALLOC_SWAP_GROUP + * and specifying the heads in the SwapGroup with + * NvKmsAllocSwapGroupRequest::disp[]::headMask. + * + * The SwapGroup can be shared with clients through + * NVKMS_IOCTL_GRANT_SWAP_GROUP, and it is destroyed once all clients that have + * acquired the swap group through NVKMS_IOCTL_ACQUIRE_SWAP_GROUP have released + * it through NVKMS_IOCTL_RELEASE_SWAP_GROUP and when the client that created + * the swap group has called NVKMS_IOCTL_FREE_SWAP_GROUP or freed the device. + * + * The SwapGroup allocation is expected to have a long lifetime (e.g., the X + * driver might call ALLOC_SWAP_GROUP from ScreenInit and FREE_SWAP_GROUP from + * CloseScreen). The point of these requests is to define the head topology of + * the SwapGroup (for X driver purposes, presumably all the heads that are + * assigned to the X screen). + * + * As such: + * + * - Not all heads described in the ALLOC_SWAP_GROUP request need to be active + * (they can come and go with different modesets). + * + * - The SwapGroup persists across modesets. + * + * - SwapGroup allocation is expected to be lightweight: the heavyweight + * operations like allocating and freeing headSurface resources are done when + * the number of SwapGroup members (see {JOIN,LEAVE}_SWAP_GROUP below) + * transitions between 0 and 1. + * + * Only an NVKMS modeset owner can alloc or free a SwapGroup. + */ + +struct NvKmsSwapGroupConfig { + struct { + NvU32 headMask; + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsAllocSwapGroupRequest { + NvKmsDeviceHandle deviceHandle; + struct NvKmsSwapGroupConfig config; +}; + +struct NvKmsAllocSwapGroupReply { + NvKmsSwapGroupHandle swapGroupHandle; +}; + +struct NvKmsAllocSwapGroupParams { + struct NvKmsAllocSwapGroupRequest request; /*! in */ + struct NvKmsAllocSwapGroupReply reply; /*! out */ +}; + +struct NvKmsFreeSwapGroupRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; +}; + +struct NvKmsFreeSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsFreeSwapGroupParams { + struct NvKmsFreeSwapGroupRequest request; /*! in */ + struct NvKmsFreeSwapGroupReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_JOIN_SWAP_GROUP + * NVKMS_IOCTL_LEAVE_SWAP_GROUP + * + * Clients can join NvKmsDeferredRequestFifos to SwapGroups using + * NVKMS_IOCTL_JOIN_SWAP_GROUP, and remove NvKmsDeferredRequestFifos from + * SwapGroups using NVKMS_IOCTL_LEAVE_SWAP_GROUP (or freeing the + * NvKmsDeferredRequestFifo, or freeing the device). + * + * Once an NvKmsDeferredRequestFifo is joined to a SwapGroup, the SwapGroup will + * not become ready again until the SwapGroup member sends the + * NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY request through their + * NvKmsDeferredRequestFifo. The NVKMS_DEFERRED_REQUEST_SEMAPHORE_INDEX + * specified as part of the request indicates an index into + * NvKmsDeferredRequestFifo::semaphore[] where NVKMS will write + * + * NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_READY + * + * when the SwapGroup becomes ready. + * + * If unicastEvent::specified is TRUE, then unicastEvent::fd will be interpreted + * as a unicast event file descriptor. See NVKMS_IOCTL_CLEAR_UNICAST_EVENT for + * details. Whenever SWAP_GROUP_READY is written to a semaphore within + * NvKmsDeferredRequestFifo, the unicastEvent fd will be notified. + * + * An NvKmsDeferredRequestFifo can be joined to at most one SwapGroup at a time. + * + * If one client uses multiple NvKmsDeferredRequestFifos joined to multiple + * SwapGroups and wants to synchronize swaps between these fifos, it should + * bundle all of the (deviceHandle, swapGroupHandle, deferredRequestFifoHandle) + * tuples into a single join/leave request. + * + * If any client joins multiple NvKmsDeferredRequestFifos to multiple + * SwapGroups, all NVKMS_IOCTL_JOIN_SWAP_GROUP requests must specify the same + * set of SwapGroups. + */ + +struct NvKmsJoinSwapGroupRequestOneMember { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle; + + struct { + int fd; + NvBool specified; + } unicastEvent; +}; + +struct NvKmsJoinSwapGroupRequest { + NvU32 numMembers; + struct NvKmsJoinSwapGroupRequestOneMember member[NVKMS_MAX_SWAPGROUPS]; +}; + +struct NvKmsJoinSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsJoinSwapGroupParams { + struct NvKmsJoinSwapGroupRequest request; /*! in */ + struct NvKmsJoinSwapGroupReply reply; /*! out */ +}; + +struct NvKmsLeaveSwapGroupRequestOneMember { + NvKmsDeviceHandle deviceHandle; + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle; +}; + +struct NvKmsLeaveSwapGroupRequest { + NvU32 numMembers; + struct NvKmsLeaveSwapGroupRequestOneMember member[NVKMS_MAX_SWAPGROUPS]; +}; + +struct NvKmsLeaveSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsLeaveSwapGroupParams { + struct NvKmsLeaveSwapGroupRequest request; /*! in */ + struct NvKmsLeaveSwapGroupReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_SWAP_GROUP_CLIP_LIST + * + * The X driver needs to define which pixels on-screen are owned by the + * SwapGroup. NVKMS will use this to prevent those pixels from updating until + * all SwapGroup members indicate that they are ready. + * + * The clip list is interpreted by NVKMS as relative to the surface specified + * during a flip or modeset. The clip list is intersected with the ViewPortIn + * of the head, described by + * + * NvKmsFlipCommonParams::viewPortIn::point + * + * and + * + * NvKmsSetModeOneHeadRequest::viewPortSizeIn + * + * The clip list is exclusive. I.e., each NvKmsRect is a region outside of the + * SwapGroup. One surface-sized NvKmsRect would mean that there are no + * SwapGroup-owned pixels. + * + * When no clip list is specified, NVKMS behaves as if there were no + * SwapGroup-owned pixels. + * + * Only an NVKMS modeset owner can set the clip list of a SwapGroup. + */ + +struct NvKmsSetSwapGroupClipListRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; + + /*! The number of struct NvKmsRects pointed to by pClipList. */ + NvU16 nClips; + + /*! + * Pointer to an array of struct NvKmsRects describing the inclusive clip + * list for the SwapGroup. The NvKmsRects are in desktop coordinate space. + * + * Use nvKmsPointerToNvU64() to assign pClipList. + */ + NvU64 pClipList NV_ALIGN_BYTES(8); +}; + +struct NvKmsSetSwapGroupClipListReply { + NvU32 padding; +}; + +struct NvKmsSetSwapGroupClipListParams { + struct NvKmsSetSwapGroupClipListRequest request; /*! in */ + struct NvKmsSetSwapGroupClipListReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_GRANT_SWAP_GROUP: + * NVKMS_IOCTL_ACQUIRE_SWAP_GROUP: + * NVKMS_IOCTL_RELEASE_SWAP_GROUP: + * + * An NVKMS client can "grant" a swap group that it has allocated through + * NVKMS_IOCTL_ALLOC_SWAP_GROUP to another NVKMS client through the following + * steps: + * + * - The granting NVKMS client should open /dev/nvidia-modeset, and call + * NVKMS_IOCTL_GRANT_SWAP_GROUP to associate an NvKmsSwapGroupHandle + * with the file descriptor. + * + * - The granting NVKMS client should pass the file descriptor over a + * UNIX domain socket to one or more clients who should acquire the + * swap group. + * + * - The granting NVKMS client can optionally close the file + * descriptor now or later. + * + * - Each acquiring client should call NVKMS_IOCTL_ACQUIRE_SWAP_GROUP, + * and pass in the file descriptor it received. This returns an + * NvKmsSwapGroupHandle that the acquiring client can use to refer to + * the swap group in any other NVKMS API call that takes an + * NvKmsSwapGroupHandle. + * + * - The acquiring clients can optionally close the file descriptor + * now or later. + * + * - Each acquiring client should call NVKMS_IOCTL_RELEASE_SWAP_GROUP to + * release it when they are done with the swap group. + */ + +struct NvKmsGrantSwapGroupRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; + int fd; +}; + +struct NvKmsGrantSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsGrantSwapGroupParams { + struct NvKmsGrantSwapGroupRequest request; /*! in */ + struct NvKmsGrantSwapGroupReply reply; /*! out */ +}; + +struct NvKmsAcquireSwapGroupRequest { + int fd; +}; + +struct NvKmsAcquireSwapGroupReply { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; +}; + +struct NvKmsAcquireSwapGroupParams { + struct NvKmsAcquireSwapGroupRequest request; /*! in */ + struct NvKmsAcquireSwapGroupReply reply; /*! out */ +}; + +struct NvKmsReleaseSwapGroupRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; +}; + +struct NvKmsReleaseSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsReleaseSwapGroupParams { + struct NvKmsReleaseSwapGroupRequest request; /*! in */ + struct NvKmsReleaseSwapGroupReply reply; /*! out */ +}; + +/*! + * NVKMS_IOCTL_SWITCH_MUX: + * + * Switch the mux for the given Dpy in the given direction. The mux switch is + * performed in three stages. + */ + +enum NvKmsMuxOperation { + NVKMS_SWITCH_MUX_PRE, + NVKMS_SWITCH_MUX, + NVKMS_SWITCH_MUX_POST, +}; + +struct NvKmsSwitchMuxRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsMuxOperation operation; + NvMuxState state; +}; + +struct NvKmsSwitchMuxReply { + NvU32 padding; +}; + +struct NvKmsSwitchMuxParams { + struct NvKmsSwitchMuxRequest request; + struct NvKmsSwitchMuxReply reply; +}; + +struct NvKmsGetMuxStateRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + +struct NvKmsGetMuxStateReply { + NvMuxState state; +}; + +struct NvKmsGetMuxStateParams { + struct NvKmsGetMuxStateRequest request; + struct NvKmsGetMuxStateReply reply; +}; + +/*! + * NVKMS_IOCTL_EXPORT_VRR_SEMAPHORE_SURFACE: + * + * Export the VRR semaphore surface onto the provided RM 'memFd'. + * The RM memory FD should be "empty". An empty FD can be allocated by calling + * NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD with 'EMPTY_FD' set. + */ + +struct NvKmsExportVrrSemaphoreSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + int memFd; +}; + +struct NvKmsExportVrrSemaphoreSurfaceReply { + NvU32 padding; +}; + +struct NvKmsExportVrrSemaphoreSurfaceParams { + struct NvKmsExportVrrSemaphoreSurfaceRequest request; + struct NvKmsExportVrrSemaphoreSurfaceReply reply; +}; + +/*! + * NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT: + * NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT: + * + * The NVKMS client can use NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT to request a + * vblank syncpt that continuously triggers each time the raster generator + * reaches the start of vblank. NVKMS will return the syncpt id in + * 'NvKmsEnableVblankSyncObjectReply::syncptId'. + * + * The NVKMS client can use NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT to disable + * the vblank syncpt. + * + * If a vblank syncpt is currently enabled on a head, and a modeset request is + * issued to reconfigure that head with a new set of mode timings, NVKMS will + * automatically reenable the vblank syncpt so it continues to trigger with the + * new mode timings. + * + * Clients can use these IOCTLs only if both NvKmsAllocDeviceReply:: + * supportsVblankSyncObjects and NvKmsAllocDeviceReply::supportsSyncpts are + * TRUE. + */ + +struct NvKmsEnableVblankSyncObjectRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; +}; + +struct NvKmsEnableVblankSyncObjectReply { + /* + * Clients should explicitly disable the vblank sync object to consume the + * handle. + */ + NvKmsVblankSyncObjectHandle vblankHandle; + + NvU32 syncptId; +}; + +struct NvKmsEnableVblankSyncObjectParams { + struct NvKmsEnableVblankSyncObjectRequest request; /*! in */ + struct NvKmsEnableVblankSyncObjectReply reply; /*! out */ +}; + +struct NvKmsDisableVblankSyncObjectRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + /* This handle is received in NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT. */ + NvKmsVblankSyncObjectHandle vblankHandle; +}; + +struct NvKmsDisableVblankSyncObjectReply { + NvU32 padding; +}; + +struct NvKmsDisableVblankSyncObjectParams { + struct NvKmsDisableVblankSyncObjectRequest request; /*! in */ + struct NvKmsDisableVblankSyncObjectReply reply; /*! out */ +}; + +#endif /* NVKMS_API_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-format.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-format.h new file mode 100644 index 0000000..d1483f8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-format.h @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_FORMAT_H) +#define NVKMS_FORMAT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* + * In order to interpret these pixel format namings, please take note of these + * conventions: + * - The Y8_U8__Y8_V8_N422 and U8_Y8__V8_Y8_N422 formats are both packed formats + * that have an interleaved chroma component across every two pixels. The + * double-underscore is a separator between these two pixel groups. + * - The triple-underscore is a separator between planes. + * - The 'N' suffix is a delimiter for the chroma decimation factor. + * + * As examples of the above rules: + * - The Y8_U8__Y8_V8_N422 format has one 8-bit luma component (Y8) and one + * 8-bit chroma component (U8) in pixel N, and one 8-bit luma component (Y8) + * and one 8-bit chroma component (V8) in pixel (N + 1). This format is + * 422-decimated since the U and V chroma samples are shared between each + * pair of adjacent pixels per line. + * - The Y10___U10V10_N444 format has one plane of 10-bit luma (Y10) components, + * and another plane of 10-bit chroma components (U10V10). This format has no + * chroma decimation since the luma and chroma components are sampled at the + * same rate. + */ +enum NvKmsSurfaceMemoryFormat { + NvKmsSurfaceMemoryFormatI8 = 0, + NvKmsSurfaceMemoryFormatA1R5G5B5 = 1, + NvKmsSurfaceMemoryFormatX1R5G5B5 = 2, + NvKmsSurfaceMemoryFormatR5G6B5 = 3, + NvKmsSurfaceMemoryFormatA8R8G8B8 = 4, + NvKmsSurfaceMemoryFormatX8R8G8B8 = 5, + NvKmsSurfaceMemoryFormatA2B10G10R10 = 6, + NvKmsSurfaceMemoryFormatX2B10G10R10 = 7, + NvKmsSurfaceMemoryFormatA8B8G8R8 = 8, + NvKmsSurfaceMemoryFormatX8B8G8R8 = 9, + NvKmsSurfaceMemoryFormatRF16GF16BF16AF16 = 10, + NvKmsSurfaceMemoryFormatR16G16B16A16 = 11, + NvKmsSurfaceMemoryFormatRF32GF32BF32AF32 = 12, + NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422 = 13, + NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422 = 14, + NvKmsSurfaceMemoryFormatY8___U8V8_N444 = 15, + NvKmsSurfaceMemoryFormatY8___V8U8_N444 = 16, + NvKmsSurfaceMemoryFormatY8___U8V8_N422 = 17, + NvKmsSurfaceMemoryFormatY8___V8U8_N422 = 18, + NvKmsSurfaceMemoryFormatY8___U8V8_N420 = 19, + NvKmsSurfaceMemoryFormatY8___V8U8_N420 = 20, + NvKmsSurfaceMemoryFormatY10___U10V10_N444 = 21, + NvKmsSurfaceMemoryFormatY10___V10U10_N444 = 22, + NvKmsSurfaceMemoryFormatY10___U10V10_N422 = 23, + NvKmsSurfaceMemoryFormatY10___V10U10_N422 = 24, + NvKmsSurfaceMemoryFormatY10___U10V10_N420 = 25, + NvKmsSurfaceMemoryFormatY10___V10U10_N420 = 26, + NvKmsSurfaceMemoryFormatY12___U12V12_N444 = 27, + NvKmsSurfaceMemoryFormatY12___V12U12_N444 = 28, + NvKmsSurfaceMemoryFormatY12___U12V12_N422 = 29, + NvKmsSurfaceMemoryFormatY12___V12U12_N422 = 30, + NvKmsSurfaceMemoryFormatY12___U12V12_N420 = 31, + NvKmsSurfaceMemoryFormatY12___V12U12_N420 = 32, + NvKmsSurfaceMemoryFormatY8___U8___V8_N444 = 33, + NvKmsSurfaceMemoryFormatY8___U8___V8_N420 = 34, + NvKmsSurfaceMemoryFormatMin = NvKmsSurfaceMemoryFormatI8, + NvKmsSurfaceMemoryFormatMax = NvKmsSurfaceMemoryFormatY8___U8___V8_N420, +}; + +typedef struct NvKmsSurfaceMemoryFormatInfo { + enum NvKmsSurfaceMemoryFormat format; + const char *name; + NvU8 depth; + NvBool isYUV; + NvU8 numPlanes; + + union { + struct { + NvU8 bytesPerPixel; + NvU8 bitsPerPixel; + } rgb; + + struct { + NvU8 depthPerComponent; + NvU8 storageBitsPerComponent; + NvU8 horizChromaDecimationFactor; + NvU8 vertChromaDecimationFactor; + } yuv; + }; +} NvKmsSurfaceMemoryFormatInfo; + +const NvKmsSurfaceMemoryFormatInfo *nvKmsGetSurfaceMemoryFormatInfo( + const enum NvKmsSurfaceMemoryFormat format); + +const char *nvKmsSurfaceMemoryFormatToString( + const enum NvKmsSurfaceMemoryFormat format); + +#ifdef __cplusplus +}; +#endif + +#endif /* NVKMS_FORMAT_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-ioctl.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-ioctl.h new file mode 100644 index 0000000..cb27573 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-ioctl.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_IOCTL_H) +#define NVKMS_IOCTL_H + +#include "nvtypes.h" + +/*! + * Some of the NVKMS ioctl parameter data structures are quite large + * and would exceed the parameter size constraints on at least SunOS. + * + * Redirect ioctls through a level of indirection: user-space assigns + * NvKmsIoctlParams with the real command, size, and pointer, and + * passes the NvKmsIoctlParams through the ioctl. + */ + +struct NvKmsIoctlParams { + NvU32 cmd; + NvU32 size; + NvU64 address NV_ALIGN_BYTES(8); +}; + +#define NVKMS_IOCTL_MAGIC 'm' +#define NVKMS_IOCTL_CMD 0 + +#define NVKMS_IOCTL_IOWR \ + _IOWR(NVKMS_IOCTL_MAGIC, NVKMS_IOCTL_CMD, struct NvKmsIoctlParams) + +/*! + * User-space pointers are always passed to NVKMS in an NvU64. + * This user-space address is eventually passed into the platform's + * copyin/copyout functions, in a void* argument. + * + * This utility function converts from an NvU64 to a pointer. + */ + +static inline void *nvKmsNvU64ToPointer(NvU64 value) +{ + return (void *)(NvUPtr)value; +} + +/*! + * Before casting the NvU64 to a void*, check that casting to a pointer + * size within the kernel does not lose any precision in the current + * environment. + */ +static inline NvBool nvKmsNvU64AddressIsSafe(NvU64 address) +{ + return address == (NvU64)(NvUPtr)address; +} + +#endif /* NVKMS_IOCTL_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-sync.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-sync.h new file mode 100644 index 0000000..4f4e1dd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-sync.h @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_SYNC_H) +#define NVKMS_SYNC_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvkms-api-types.h" + +/* These functions are implemented in nvkms-lib. */ + +enum nvKmsNotifierStatus { + NVKMS_NOTIFIER_STATUS_NOT_BEGUN, + NVKMS_NOTIFIER_STATUS_BEGUN, + NVKMS_NOTIFIER_STATUS_FINISHED, +}; + +struct nvKmsParsedNotifier { + NvU64 timeStamp; + NvBool timeStampValid; + enum nvKmsNotifierStatus status; + NvU8 presentCount; +}; + +static inline NvU32 nvKmsSizeOfNotifier(enum NvKmsNIsoFormat format, + NvBool overlay) { + switch (format) { + default: + case NVKMS_NISO_FORMAT_LEGACY: + return overlay ? 16 : 4; + case NVKMS_NISO_FORMAT_FOUR_WORD: + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + return 16; + } +} + +void nvKmsResetNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, void *base); + +void nvKmsParseNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, const void *base, + struct nvKmsParsedNotifier *out); + +struct nvKmsParsedSemaphore { + NvU32 payload; +}; + +static inline NvU32 nvKmsSizeOfSemaphore(enum NvKmsNIsoFormat format) { + switch (format) { + default: + case NVKMS_NISO_FORMAT_LEGACY: + return 4; + case NVKMS_NISO_FORMAT_FOUR_WORD: + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + return 16; + } +} + +NvU32 nvKmsSemaphorePayloadOffset(enum NvKmsNIsoFormat format); + +void nvKmsResetSemaphore(enum NvKmsNIsoFormat format, + NvU32 index, void *base, + NvU32 payload); + +void nvKmsParseSemaphore(enum NvKmsNIsoFormat format, + NvU32 index, const void *base, + struct nvKmsParsedSemaphore *out); + +#ifdef __cplusplus +}; +#endif + +#endif /* NVKMS_SYNC_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h new file mode 100644 index 0000000..7c76685 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h @@ -0,0 +1,176 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_KAPI_INTERNAL_H__ + +#define __NVKMS_KAPI_INTERNAL_H__ + +#include "unix_rm_handle.h" + +#include "nvkms-utils.h" +#include "nvkms-kapi-private.h" + +//XXX Decouple functions like nvEvoLog used for logging from NVKMS + +#define nvKmsKapiLogDebug(__format...) \ + nvEvoLogDebug(EVO_LOG_INFO, "[kapi] "__format) + +#define nvKmsKapiLogDeviceDebug(__device, __format, ...) \ + nvEvoLogDebug(EVO_LOG_INFO, "[kapi][GPU Id 0x%08x] "__format, \ + device->gpuId, ##__VA_ARGS__) + +struct NvKmsKapiDevice { + + NvU32 gpuId; + + nvkms_sema_handle_t *pSema; + + /* RM handles */ + + NvU32 hRmClient; + NvU32 hRmDevice, hRmSubDevice; + NvU32 deviceInstance; + + NVUnixRmHandleAllocatorRec handleAllocator; + + /* NVKMS handles */ + + struct nvkms_per_open *pKmsOpen; + + NvKmsDeviceHandle hKmsDevice; + NvKmsDispHandle hKmsDisp; + NvU32 dispIdx; + + NvU32 subDeviceMask; + + NvBool isSOC; + NvKmsDispIOCoherencyModes isoIOCoherencyModes; + NvKmsDispIOCoherencyModes nisoIOCoherencyModes; + NvBool supportsSyncpts; + + /* Device capabilities */ + + struct { + struct NvKmsCompositionCapabilities cursorCompositionCaps; + struct NvKmsCompositionCapabilities overlayCompositionCaps; + + NvU16 validLayerRRTransforms; + + NvU32 maxWidthInPixels; + NvU32 maxHeightInPixels; + NvU32 maxCursorSizeInPixels; + + NvU8 genericPageKind; + } caps; + + NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX]; + NvBool supportsHDR[NVKMS_KAPI_LAYER_MAX]; + + NvU32 numHeads; + NvU32 numLayers[NVKMS_KAPI_MAX_HEADS]; + + struct { + NvU32 hRmHandle; + NvKmsSurfaceHandle hKmsHandle; + + NvBool mapped; + void *pLinearAddress; + + enum NvKmsNIsoFormat format; + } notifier; + + struct { + NvU32 currFlipNotifierIndex; + } layerState[NVKMS_KAPI_MAX_HEADS][NVKMS_MAX_LAYERS_PER_HEAD]; + + void *privateData; + + void (*eventCallback)(const struct NvKmsKapiEvent *event); +}; + +struct NvKmsKapiMemory { + NvU32 hRmHandle; + NvU64 size; + + struct NvKmsKapiPrivSurfaceParams surfaceParams; +}; + +struct NvKmsKapiSurface { + NvKmsSurfaceHandle hKmsHandle; +}; + +static inline void *nvKmsKapiCalloc(size_t nmem, size_t size) +{ + return nvInternalAlloc(nmem * size, NV_TRUE); +} + +static inline void nvKmsKapiFree(void *ptr) +{ + return nvInternalFree(ptr); +} + +static inline NvU32 nvKmsKapiGenerateRmHandle(struct NvKmsKapiDevice *device) +{ + NvU32 handle; + + nvkms_sema_down(device->pSema); + handle = nvGenerateUnixRmHandle(&device->handleAllocator); + nvkms_sema_up(device->pSema); + + return handle; +} + +static inline void nvKmsKapiFreeRmHandle(struct NvKmsKapiDevice *device, + NvU32 handle) +{ + nvkms_sema_down(device->pSema); + nvFreeUnixRmHandle(&device->handleAllocator, handle); + nvkms_sema_up(device->pSema); +} + +NvBool nvKmsKapiAllocateVideoMemory(struct NvKmsKapiDevice *device, + NvU32 hRmHandle, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + enum NvKmsKapiAllocationType type, + NvU8 *compressible); + +NvBool nvKmsKapiAllocateSystemMemory(struct NvKmsKapiDevice *device, + NvU32 hRmHandle, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + enum NvKmsKapiAllocationType type, + NvU8 *compressible); + +struct NvKmsKapiChannelEvent* +nvKmsKapiAllocateChannelEvent(struct NvKmsKapiDevice *device, + NvKmsChannelEventProc *proc, + void *data, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize); + +void +nvKmsKapiFreeChannelEvent(struct NvKmsKapiDevice *device, + struct NvKmsKapiChannelEvent *cb); + +#endif /* __NVKMS_KAPI_INTERNAL_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h new file mode 100644 index 0000000..13fc8d9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h @@ -0,0 +1,85 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_KAPI_NOTIFIERS_H__ + +#define __NVKMS_KAPI_NOTIFIERS_H__ + +#include "nvkms-kapi-internal.h" + +#define NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER 0x2 +#define NVKMS_KAPI_NOTIFIER_SIZE 0x10 + +static inline NvU32 NVKMS_KAPI_INC_NOTIFIER_INDEX(const NvU32 index) +{ + return (index + 1) % NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER; +} + +static inline NvU32 NVKMS_KAPI_DEC_NOTIFIER_INDEX(const NvU32 index) +{ + if (index == 0) { + /* + * Wrap "backwards" to the largest allowed notifier index. + */ + return NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER - 1; + } + + return index - 1; +} + +static inline NvU32 NVKMS_KAPI_NOTIFIER_INDEX(NvU32 head, NvU32 layer, + NvU32 index) +{ + NvU64 notifierIndex = 0; + + notifierIndex = head * + NVKMS_MAX_LAYERS_PER_HEAD * + NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER; + + notifierIndex += layer * + NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER; + + notifierIndex += index; + + return notifierIndex; +} + +static inline NvU32 NVKMS_KAPI_NOTIFIER_OFFSET(NvU32 head, + NvU32 layer, NvU32 index) +{ + return NVKMS_KAPI_NOTIFIER_INDEX(head, layer, index) * + NVKMS_KAPI_NOTIFIER_SIZE; +} + +NvBool nvKmsKapiAllocateNotifiers(struct NvKmsKapiDevice *device, NvBool inVideoMemory); + +void nvKmsKapiFreeNotifiers(struct NvKmsKapiDevice *device); + +NvBool nvKmsKapiIsNotifierFinish(const struct NvKmsKapiDevice *device, + const NvU32 head, const NvU32 layer, + const NvU32 index); + +void nvKmsKapiNotifierSetNotBegun(struct NvKmsKapiDevice *device, + NvU32 head, NvU32 layer, NvU32 index); + +#endif /* __NVKMS_KAPI_NOTIFIERS_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h new file mode 100644 index 0000000..cd32ac8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__NVKMS_KAPI_PRIVATE_H__) +#define __NVKMS_KAPI_PRIVATE_H__ + +#include "nvtypes.h" +#include "nvkms-api.h" + +struct NvKmsKapiPrivAllocateChannelEventParams { + NvU32 hClient; + NvU32 hChannel; +}; + +struct NvKmsKapiPrivSurfaceParams { + enum NvKmsSurfaceMemoryLayout layout; + + struct { + struct { + NvU32 x; + NvU32 y; + NvU32 z; + } log2GobsPerBlock; + + NvU32 pitchInBlocks; + NvBool genericMemory; + } blockLinear; +}; + +struct NvKmsKapiPrivImportMemoryParams { + int memFd; + struct NvKmsKapiPrivSurfaceParams surfaceParams; +}; + +struct NvKmsKapiPrivExportMemoryParams { + int memFd; +}; + +#endif /* !defined(__NVKMS_KAPI_PRIVATE_H__) */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi.h new file mode 100644 index 0000000..e85351c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi.h @@ -0,0 +1,1081 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__NVKMS_KAPI_H__) + +#include "nvtypes.h" + +#include "nv-gpu-info.h" +#include "nvkms-api-types.h" +#include "nvkms-format.h" + +#define __NVKMS_KAPI_H__ + +#define NVKMS_KAPI_MAX_HEADS 4 + +#define NVKMS_KAPI_MAX_CONNECTORS 16 +#define NVKMS_KAPI_MAX_CLONE_DISPLAYS 16 + +#define NVKMS_KAPI_EDID_BUFFER_SIZE 2048 + +#define NVKMS_KAPI_MODE_NAME_LEN 32 + +/** + * \defgroup Objects + * @{ + */ + +struct NvKmsKapiDevice; +struct NvKmsKapiMemory; +struct NvKmsKapiSurface; +struct NvKmsKapiChannelEvent; + +typedef NvU32 NvKmsKapiConnector; +typedef NvU32 NvKmsKapiDisplay; + +/** @} */ + +/** + * \defgroup FuncPtrs + * @{ + */ + +/* + * Note: The channel event proc should not call back into NVKMS-KAPI driver. + * The callback into NVKMS-KAPI from the channel event proc, may cause + * deadlock. + */ +typedef void NvKmsChannelEventProc(void *dataPtr, NvU32 dataU32); + +/** @} */ + +/** + * \defgroup Structs + * @{ + */ + +struct NvKmsKapiDisplayModeTimings { + + NvU32 refreshRate; + NvU32 pixelClockHz; + NvU32 hVisible; + NvU32 hSyncStart; + NvU32 hSyncEnd; + NvU32 hTotal; + NvU32 hSkew; + NvU32 vVisible; + NvU32 vSyncStart; + NvU32 vSyncEnd; + NvU32 vTotal; + + struct { + + NvU32 interlaced : 1; + NvU32 doubleScan : 1; + NvU32 hSyncPos : 1; + NvU32 hSyncNeg : 1; + NvU32 vSyncPos : 1; + NvU32 vSyncNeg : 1; + + } flags; + + NvU32 widthMM; + NvU32 heightMM; + +}; + +struct NvKmsKapiDisplayMode { + struct NvKmsKapiDisplayModeTimings timings; + char name[NVKMS_KAPI_MODE_NAME_LEN]; +}; + +#define NVKMS_KAPI_LAYER_MAX 8 + +#define NVKMS_KAPI_LAYER_INVALID_IDX 0xff +#define NVKMS_KAPI_LAYER_PRIMARY_IDX 0 + +struct NvKmsKapiDeviceResourcesInfo { + + NvU32 numHeads; + NvU32 numLayers[NVKMS_KAPI_MAX_HEADS]; + + NvU32 numConnectors; + NvKmsKapiConnector connectorHandles[NVKMS_KAPI_MAX_CONNECTORS]; + + struct { + NvU32 validCursorCompositionModes; + NvU64 supportedCursorSurfaceMemoryFormats; + + struct { + NvU16 validRRTransforms; + NvU32 validCompositionModes; + } layer[NVKMS_KAPI_LAYER_MAX]; + + NvU32 minWidthInPixels; + NvU32 maxWidthInPixels; + + NvU32 minHeightInPixels; + NvU32 maxHeightInPixels; + + NvU32 maxCursorSizeInPixels; + + NvU32 pitchAlignment; + + NvU32 hasVideoMemory; + + NvU8 genericPageKind; + + NvBool supportsSyncpts; + } caps; + + NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX]; + NvBool supportsHDR[NVKMS_KAPI_LAYER_MAX]; +}; + +#define NVKMS_KAPI_LAYER_MASK(layerType) (1 << (layerType)) + +typedef enum NvKmsKapiMappingTypeRec { + NVKMS_KAPI_MAPPING_TYPE_USER = 1, + NVKMS_KAPI_MAPPING_TYPE_KERNEL = 2, +} NvKmsKapiMappingType; + +struct NvKmsKapiConnectorInfo { + + NvKmsKapiConnector handle; + + NvU32 physicalIndex; + + NvU32 headMask; + + NvKmsConnectorSignalFormat signalFormat; + NvKmsConnectorType type; + + /* + * List of connectors, not possible to serve together with this connector + * because they are competing for same resources. + */ + NvU32 numIncompatibleConnectors; + NvKmsKapiConnector incompatibleConnectorHandles[NVKMS_KAPI_MAX_CONNECTORS]; + +}; + +struct NvKmsKapiStaticDisplayInfo { + + NvKmsKapiDisplay handle; + + NvKmsKapiConnector connectorHandle; + + /* Set for DisplayPort MST displays (dynamic displays) */ + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]; + + NvBool internal; + + /* List of potential sibling display for cloning */ + NvU32 numPossibleClones; + NvKmsKapiDisplay possibleCloneHandles[NVKMS_KAPI_MAX_CLONE_DISPLAYS]; + +}; + +struct NvKmsKapiSyncpt { + + /*! + * Possible syncpt use case in kapi. + * For pre-syncpt, use only id and value + * and for post-syncpt, use only fd. + */ + NvBool preSyncptSpecified; + NvU32 preSyncptId; + NvU32 preSyncptValue; + + NvBool postSyncptRequested; +}; + +struct NvKmsKapiLayerConfig { + struct NvKmsKapiSurface *surface; + struct { + enum NvKmsCompositionBlendingMode compMode; + NvU8 surfaceAlpha; + } compParams; + struct NvKmsRRParams rrParams; + struct NvKmsKapiSyncpt syncptParams; + + struct NvKmsHDRStaticMetadata hdrMetadata; + NvBool hdrMetadataSpecified; + + enum NvKmsOutputTf tf; + + NvU8 minPresentInterval; + NvBool tearing; + + NvU16 srcX, srcY; + NvU16 srcWidth, srcHeight; + + NvS16 dstX, dstY; + NvU16 dstWidth, dstHeight; + + enum NvKmsInputColorSpace inputColorSpace; +}; + +struct NvKmsKapiLayerRequestedConfig { + struct NvKmsKapiLayerConfig config; + struct { + NvBool surfaceChanged : 1; + NvBool srcXYChanged : 1; + NvBool srcWHChanged : 1; + NvBool dstXYChanged : 1; + NvBool dstWHChanged : 1; + } flags; +}; + +struct NvKmsKapiCursorRequestedConfig { + struct NvKmsKapiSurface *surface; + struct { + enum NvKmsCompositionBlendingMode compMode; + NvU8 surfaceAlpha; + } compParams; + + NvS16 dstX, dstY; + + struct { + NvBool surfaceChanged : 1; + NvBool dstXYChanged : 1; + } flags; +}; + +struct NvKmsKapiHeadModeSetConfig { + /* + * DRM distinguishes between the head state "enabled" (the specified + * configuration for the head is valid, its resources are allocated, + * etc, but the head may not necessarily be currently driving pixels + * to its output resource) and the head state "active" (the head is + * "enabled" _and_ the head is actively driving pixels to its output + * resource). + * + * This distinction is for DPMS: + * + * DPMS On : enabled=true, active=true + * DPMS Off : enabled=true, active=false + * + * "Enabled" state is indicated by numDisplays != 0. + * "Active" state is indicated by bActive == true. + */ + NvBool bActive; + + NvU32 numDisplays; + NvKmsKapiDisplay displays[NVKMS_KAPI_MAX_CLONE_DISPLAYS]; + + struct NvKmsKapiDisplayMode mode; +}; + +struct NvKmsKapiHeadRequestedConfig { + struct NvKmsKapiHeadModeSetConfig modeSetConfig; + struct { + NvBool activeChanged : 1; + NvBool displaysChanged : 1; + NvBool modeChanged : 1; + } flags; + + struct NvKmsKapiCursorRequestedConfig cursorRequestedConfig; + + struct NvKmsKapiLayerRequestedConfig + layerRequestedConfig[NVKMS_KAPI_LAYER_MAX]; +}; + +struct NvKmsKapiRequestedModeSetConfig { + NvU32 headsMask; + struct NvKmsKapiHeadRequestedConfig + headRequestedConfig[NVKMS_KAPI_MAX_HEADS]; +}; + +struct NvKmsKapiLayerReplyConfig { + int postSyncptFd; +}; + +struct NvKmsKapiHeadReplyConfig { + struct NvKmsKapiLayerReplyConfig + layerReplyConfig[NVKMS_KAPI_LAYER_MAX]; +}; + +struct NvKmsKapiModeSetReplyConfig { + struct NvKmsKapiHeadReplyConfig + headReplyConfig[NVKMS_KAPI_MAX_HEADS]; +}; + +struct NvKmsKapiEventDisplayChanged { + NvKmsKapiDisplay display; +}; + +struct NvKmsKapiEventDynamicDisplayConnected { + NvKmsKapiDisplay display; +}; + +struct NvKmsKapiEventFlipOccurred { + NvU32 head; + NvU32 layer; +}; + +struct NvKmsKapiDpyCRC32 { + NvU32 value; + NvBool supported; +}; + +struct NvKmsKapiCrcs { + struct NvKmsKapiDpyCRC32 compositorCrc32; + struct NvKmsKapiDpyCRC32 rasterGeneratorCrc32; + struct NvKmsKapiDpyCRC32 outputCrc32; +}; + +struct NvKmsKapiEvent { + enum NvKmsEventType type; + + struct NvKmsKapiDevice *device; + + void *privateData; + + union { + struct NvKmsKapiEventDisplayChanged displayChanged; + struct NvKmsKapiEventDynamicDisplayConnected dynamicDisplayConnected; + struct NvKmsKapiEventFlipOccurred flipOccurred; + } u; +}; + +struct NvKmsKapiAllocateDeviceParams { + /* [IN] GPU ID obtained from enumerateGpus() */ + NvU32 gpuId; + + /* [IN] Private data of device allocator */ + void *privateData; + /* [IN] Event callback */ + void (*eventCallback)(const struct NvKmsKapiEvent *event); +}; + +struct NvKmsKapiDynamicDisplayParams { + /* [IN] Display Handle returned by getDisplays() */ + NvKmsKapiDisplay handle; + + /* [OUT] Connection status */ + NvU32 connected; + + /* [IN/OUT] EDID of connected monitor/ Input to override EDID */ + struct { + NvU16 bufferSize; + NvU8 buffer[NVKMS_KAPI_EDID_BUFFER_SIZE]; + } edid; + + /* [IN] Set true to override EDID */ + NvBool overrideEdid; + + /* [IN] Set true to force connected status */ + NvBool forceConnected; + + /* [IN] Set true to force disconnect status */ + NvBool forceDisconnected; +}; + +struct NvKmsKapiCreateSurfaceParams { + + /* [IN] Parameter of each plane */ + struct { + /* [IN] Memory allocated for plane, using allocateMemory() */ + struct NvKmsKapiMemory *memory; + /* [IN] Offsets within the memory object */ + NvU32 offset; + /* [IN] Byte pitch of plane */ + NvU32 pitch; + } planes[NVKMS_MAX_PLANES_PER_SURFACE]; + + /* [IN] Width of the surface, in pixels */ + NvU32 width; + /* [IN] Height of the surface, in pixels */ + NvU32 height; + + /* [IN] The format describing number of planes and their content */ + enum NvKmsSurfaceMemoryFormat format; + + /* [IN] Whether to override the surface objects memory layout parameters + * with those provided here. */ + NvBool explicit_layout; + /* [IN] Whether the surface layout is block-linear or pitch. Used only + * if explicit_layout is NV_TRUE */ + enum NvKmsSurfaceMemoryLayout layout; + /* [IN] block-linear block height of surface. Used only when + * explicit_layout is NV_TRUE and layout is + * NvKmsSurfaceMemoryLayoutBlockLinear */ + NvU8 log2GobsPerBlockY; +}; + +enum NvKmsKapiAllocationType { + NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT = 0, + NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER = 1, + NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN = 2, +}; + +struct NvKmsKapiFunctionsTable { + + /*! + * NVIDIA Driver version string. + */ + const char *versionString; + + /*! + * System Information. + */ + struct { + /* Availability of write combining support for video memory */ + NvBool bAllowWriteCombining; + } systemInfo; + + /*! + * Enumerate the available physical GPUs that can be used with NVKMS. + * + * \param [out] gpuInfo The information of the enumerated GPUs. + * It is an array of NVIDIA_MAX_GPUS elements. + * + * \return Count of enumerated gpus. + */ + NvU32 (*enumerateGpus)(nv_gpu_info_t *gpuInfo); + + /*! + * Allocate an NVK device using which you can query/allocate resources on + * GPU and do modeset. + * + * \param [in] params Parameters required for device allocation. + * + * \return An valid device handle on success, NULL on failure. + */ + struct NvKmsKapiDevice* (*allocateDevice) + ( + const struct NvKmsKapiAllocateDeviceParams *params + ); + + /*! + * Frees a device allocated by allocateDevice() and all its resources. + * + * \param [in] device A device returned by allocateDevice(). + * This function is a no-op if device is not valid. + */ + void (*freeDevice)(struct NvKmsKapiDevice *device); + + /*! + * Grab ownership of device, ownership is required to do modeset. + * + * \param [in] device A device returned by allocateDevice(). + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*grabOwnership)(struct NvKmsKapiDevice *device); + + /*! + * Release ownership of device. + * + * \param [in] device A device returned by allocateDevice(). + */ + void (*releaseOwnership)(struct NvKmsKapiDevice *device); + + /*! + * Registers for notification, via + * NvKmsKapiAllocateDeviceParams::eventCallback, of the events specified + * in interestMask. + * + * This call does nothing if eventCallback is NULL when NvKmsKapiDevice + * is allocated. + * + * Supported events are DPY_CHANGED and DYNAMIC_DPY_CONNECTED. + * + * \param [in] device A device returned by allocateDevice(). + * + * \param [in] interestMask A mask of events requested to listen. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*declareEventInterest) + ( + const struct NvKmsKapiDevice *device, + const NvU32 interestMask + ); + + /*! + * Retrieve various static resources like connector, head etc. present on + * device and capacities. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in/out] info A pointer to an NvKmsKapiDeviceResourcesInfo + * struct that the call will fill out with number + * of resources and their handles. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDeviceResourcesInfo) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDeviceResourcesInfo *info + ); + + /*! + * Retrieve the number of displays on a device and an array of handles to + * those displays. + * + * \param [in] device A device allocated using + * allocateDevice(). + * + * \param [in/out] displayCount The caller should set this to the size + * of the displayHandles array it passed + * in. The function will set it to the + * number of displays returned, or the + * total number of displays on the device + * if displayHandles is NULL or array size + * of less than number of number of displays. + * + * \param [out] displayHandles An array of display handles with + * displayCount entries. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDisplays) + ( + struct NvKmsKapiDevice *device, + NvU32 *numDisplays, NvKmsKapiDisplay *displayHandles + ); + + /*! + * Retrieve information about a specified connector. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] connector Which connector to query, handle return by + * getDeviceResourcesInfo(). + * + * \param [out] info A pointer to an NvKmsKapiConnectorInfo struct + * that the call will fill out with information + * about connector. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getConnectorInfo) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiConnector connector, struct NvKmsKapiConnectorInfo *info + ); + + /*! + * Retrieve information about a specified display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display Which connector to query, handle return by + * getDisplays(). + * + * \param [out] info A pointer to an NvKmsKapiStaticDisplayInfo struct + * that the call will fill out with information + * about display. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getStaticDisplayInfo) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, struct NvKmsKapiStaticDisplayInfo *info + ); + + /*! + * Detect/force connection status/EDID of display. + * + * \param [in/out] params Parameters containing display + * handle, EDID and flags to force connection + * status. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDynamicDisplayInfo) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDynamicDisplayParams *params + ); + + /*! + * Allocate some unformatted video memory of the specified size. + * + * This function allocates video memory on the specified GPU. + * It should be suitable for mapping on the CPU as a pitch + * linear or block-linear surface. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] layout BlockLinear or Pitch. + * + * \param [in] type Allocation type. + * + * \param [in] size Size, in bytes, of the memory to allocate. + * + * \param [in/out] compressible For input, non-zero if compression + * backing store should be allocated for + * the memory, for output, non-zero if + * compression backing store was + * allocated for the memory. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*allocateVideoMemory) + ( + struct NvKmsKapiDevice *device, + enum NvKmsSurfaceMemoryLayout layout, + enum NvKmsKapiAllocationType type, + NvU64 size, + NvU8 *compressible + ); + + /*! + * Allocate some unformatted system memory of the specified size. + * + * This function allocates system memory . It should be suitable + * for mapping on the CPU as a pitch linear or block-linear surface. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] layout BlockLinear or Pitch. + * + * \param [in] type Allocation type. + * + * \param [in] size Size, in bytes, of the memory to allocate. + * + * \param [in/out] compressible For input, non-zero if compression + * backing store should be allocated for + * the memory, for output, non-zero if + * compression backing store was + * allocated for the memory. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*allocateSystemMemory) + ( + struct NvKmsKapiDevice *device, + enum NvKmsSurfaceMemoryLayout layout, + enum NvKmsKapiAllocationType type, + NvU64 size, + NvU8 *compressible + ); + + /*! + * Import some unformatted memory of the specified size. + * + * This function accepts a driver-specific parameter structure representing + * memory allocated elsewhere and imports it to a NVKMS KAPI memory object + * of the specified size. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being imported must have been allocated + * against the same physical device this device object + * represents. + * + * \param [in] size Size, in bytes, of the memory being imported. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the memory object being + * imported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*importMemory) + ( + struct NvKmsKapiDevice *device, NvU64 size, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Duplicate an existing NVKMS KAPI memory object, taking a reference on the + * underlying memory. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being imported need not have been allocated + * against the same physical device this device object + * represents. + * + * \param [in] srcDevice The device associated with srcMemory. + * + * \param [in] srcMemory The memory object to duplicate. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*dupMemory) + ( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiDevice *srcDevice, + const struct NvKmsKapiMemory *srcMemory + ); + + /*! + * Export the specified memory object to a userspace object handle. + * + * This function accepts a driver-specific parameter structure representing + * a new handle to be assigned to an existing NVKMS KAPI memory object. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being exported must have been created against + * or imported to the same device object, and the + * destination object handle must be valid for this + * device as well. + * + * \param [in] memory The memory object to export. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters specifying a handle to add to the + * memory object being exported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*exportMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Free memory allocated using allocateMemory() + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory(). + * + * \return NV_TRUE on success, NV_FALSE if memory is in use. + */ + void (*freeMemory) + ( + struct NvKmsKapiDevice *device, struct NvKmsKapiMemory *memory + ); + + /*! + * Create MMIO mappings for a memory object allocated using + * allocateMemory(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \param [in] type Userspace or kernelspace mapping + * + * \param [out] ppLinearAddress The MMIO address where memory object is + * mapped. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*mapMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + void **ppLinearAddress + ); + + /*! + * Destroy MMIO mappings created for a memory object allocated using + * allocateMemory(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \param [in] type Userspace or kernelspace mapping + * + * \param [in] pLinearAddress The MMIO address return by mapMemory() + */ + void (*unmapMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + const void *pLinearAddress + ); + + /*! + * Create a formatted surface from an NvKmsKapiMemory object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] params Parameters to the surface creation. + * + * \return An valid surface handle on success. NULL on failure. + */ + struct NvKmsKapiSurface* (*createSurface) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiCreateSurfaceParams *params + ); + + /*! + * Destroy a surface created by createSurface(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] surface A surface created using createSurface() + */ + void (*destroySurface) + ( + struct NvKmsKapiDevice *device, struct NvKmsKapiSurface *surface + ); + + /*! + * Enumerate the mode timings available on a given display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display A display handle returned by getDisplays(). + * + * \param [in] modeIndex A mode index (Any integer >= 0). + * + * \param [out] mode A pointer to an NvKmsKapiDisplayMode struct that + * the call will fill out with mode-timings of mode + * at index modeIndex. + * + * \param [out] valid Returns TRUE in this param if mode-timings of + * mode at index modeIndex are valid on display. + * + * \param [out] preferredMode Returns TRUE if this mode is marked as + * "preferred" by the EDID. + * + * \return Value >= 1 if more modes are available, 0 if no more modes are + * available, and Value < 0 on failure. + */ + int (*getDisplayMode) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, NvU32 modeIndex, + struct NvKmsKapiDisplayMode *mode, NvBool *valid, + NvBool *preferredMode + ); + + /*! + * Validate given mode timings available on a given display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display A display handle returned by getDisplays(). + * + * \param [in] mode A pointer to an NvKmsKapiDisplayMode struct that + * filled with mode-timings to validate. + * + * \return NV_TRUE if mode-timings are valid, NV_FALSE on failure. + */ + NvBool (*validateDisplayMode) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, const struct NvKmsKapiDisplayMode *mode + ); + + /*! + * Apply a mode configuration to the device. + * + * Client can describe damaged part of configuration but still it is must + * to describe entire configuration. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] requestedConfig Parameters describing a device-wide + * display configuration. + * + * \param [in] commit If set to 0 them call will only validate + * mode configuration, will not apply it. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*applyModeSetConfig) + ( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsKapiModeSetReplyConfig *replyConfig, + const NvBool commit + ); + + /*! + * Return status of flip. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head A head returned by getDeviceResourcesInfo(). + * + * \param [in] layer A layer index. + * + * \param [out] pending Return TRUE if head has pending flip for + * given layer. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getFlipPendingStatus) + ( + const struct NvKmsKapiDevice *device, + const NvU32 head, + const NvU32 layer, + NvBool *pending + ); + + /*! + * Allocate an event callback. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] proc Function pointer to call when triggered. + * + * \param [in] data Argument to pass into function. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the event callback + * being created. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return struct NvKmsKapiChannelEvent* on success, NULL on failure. + */ + struct NvKmsKapiChannelEvent* (*allocateChannelEvent) + ( + struct NvKmsKapiDevice *device, + NvKmsChannelEventProc *proc, + void *data, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Free an event callback. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] cb struct NvKmsKapiChannelEvent* returned from + * allocateChannelEvent() + */ + void (*freeChannelEvent) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiChannelEvent *cb + ); + + /*! + * Get 32-bit CRC value for the last contents presented on the specified + * head. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head A head returned by getDeviceResourcesInfo(). + * + * \param [out] crc32 The CRC32 generated from the content currently + * presented onto the given head + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getCRC32) + ( + struct NvKmsKapiDevice *device, + NvU32 head, + struct NvKmsKapiCrcs *crc32 + ); + + /*! + * Get the list allocation pages corresponding to the specified memory object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory The memory object for which we need to find the + * list of allocation pages and number of pages. + * + * \param [out] pPages A pointer to the list of NvU64 pointers. Caller + * should free pPages on success using freeMemoryPages(). + * + * \param [out] pNumPages It gives the total number of NvU64 pointers + * returned in pPages. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getMemoryPages) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 **pPages, + NvU32 *pNumPages + ); + + /*! + * Free the list of allocation pages returned by getMemoryPages() + * + * \param [in] pPages A list of NvU64 pointers allocated by getMemoryPages(). + * + */ + void (*freeMemoryPages) + ( + NvU64 *pPages + ); + + /* + * Import SGT as a memory handle. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] sgt SGT pointer. + * \param [in] gem GEM pointer that pinned SGT, to be refcounted. + * + * \param [in] limit Size, in bytes, of the memory backed by the SGT. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* + (*getSystemMemoryHandleFromSgt)(struct NvKmsKapiDevice *device, + NvP64 sgt, + NvP64 gem, + NvU32 limit); + + /* + * Import dma-buf in the memory handle. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] dmaBuf DMA-BUF pointer. + * + * \param [in] limit Size, in bytes, of the dma-buf. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* + (*getSystemMemoryHandleFromDmaBuf)(struct NvKmsKapiDevice *device, + NvP64 dmaBuf, + NvU32 limit); + +}; + +/** @} */ + +/** + * \defgroup Functions + * @{ + */ + +NvBool nvKmsKapiGetFunctionsTable +( + struct NvKmsKapiFunctionsTable *funcsTable +); + +/** @} */ + +#endif /* defined(__NVKMS_KAPI_H__) */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-channelevent.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-channelevent.c new file mode 100644 index 0000000..3ca110d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-channelevent.c @@ -0,0 +1,150 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-modeset-os-interface.h" + +#include "nvkms-rmapi.h" + +#include "nvkms-kapi.h" +#include "nvkms-kapi-private.h" +#include "nvkms-kapi-internal.h" + +#include "class/cl0005.h" + +struct NvKmsKapiChannelEvent { + struct NvKmsKapiDevice *device; + + NvKmsChannelEventProc *proc; + void *data; + + struct NvKmsKapiPrivAllocateChannelEventParams nvKmsParams; + + NvHandle hCallback; + NVOS10_EVENT_KERNEL_CALLBACK_EX rmCallback; +}; + +static void ChannelEventHandler(void *arg1, void *arg2, NvHandle hEvent, + NvU32 data, NvU32 status) +{ + struct NvKmsKapiChannelEvent *cb = arg1; + cb->proc(cb->data, 0); +} + +struct NvKmsKapiChannelEvent* nvKmsKapiAllocateChannelEvent +( + struct NvKmsKapiDevice *device, + NvKmsChannelEventProc *proc, + void *data, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize +) +{ + int status; + NvU32 ret; + + struct NvKmsKapiChannelEvent *cb = NULL; + NV0005_ALLOC_PARAMETERS eventParams = { }; + + if (device == NULL || proc == NULL) { + goto fail; + } + + cb = nvKmsKapiCalloc(1, sizeof(*cb)); + if (cb == NULL) { + goto fail; + } + + /* Verify the driver-private params size and copy it in from userspace */ + + if (nvKmsParamsSize != sizeof(cb->nvKmsParams)) { + nvKmsKapiLogDebug( + "NVKMS private memory import parameter size mismatch - " + "expected: 0x%llx, caller specified: 0x%llx", + (NvU64)sizeof(cb->nvKmsParams), nvKmsParamsSize); + goto fail; + } + + status = nvkms_copyin(&cb->nvKmsParams, + nvKmsParamsUser, sizeof(cb->nvKmsParams)); + if (status != 0) { + nvKmsKapiLogDebug( + "NVKMS private memory import parameters could not be read from " + "userspace"); + goto fail; + } + + cb->device = device; + + cb->proc = proc; + cb->data = data; + + cb->rmCallback.func = ChannelEventHandler; + cb->rmCallback.arg = cb; + + cb->hCallback = nvGenerateUnixRmHandle(&device->handleAllocator); + if (cb->hCallback == 0x0) { + nvKmsKapiLogDeviceDebug(device, + "Failed to allocate event callback handle"); + goto fail; + } + + eventParams.hParentClient = cb->nvKmsParams.hClient; + eventParams.hClass = NV01_EVENT_KERNEL_CALLBACK_EX; + eventParams.notifyIndex = 0; + eventParams.data = NV_PTR_TO_NvP64(&cb->rmCallback); + + ret = nvRmApiAlloc(device->hRmClient, + cb->nvKmsParams.hChannel, + cb->hCallback, + NV01_EVENT_KERNEL_CALLBACK_EX, + &eventParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate event callback"); + nvFreeUnixRmHandle(&device->handleAllocator, cb->hCallback); + goto fail; + } + + return cb; +fail: + nvKmsKapiFree(cb); + return NULL; +} + +void nvKmsKapiFreeChannelEvent +( + struct NvKmsKapiDevice *device, + struct NvKmsKapiChannelEvent *cb +) +{ + if (device == NULL || cb == NULL) { + return; + } + + nvRmApiFree(device->hRmClient, + device->hRmClient, + cb->hCallback); + + nvFreeUnixRmHandle(&device->handleAllocator, cb->hCallback); + + nvKmsKapiFree(cb); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c new file mode 100644 index 0000000..ef44285 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-modeset-os-interface.h" + +#include "nvkms-api.h" +#include "nvkms-sync.h" +#include "nvkms-rmapi.h" +#include "nvkms-kapi-notifiers.h" + +#define NVKMS_KAPI_MAX_NOTIFIERS \ + (NVKMS_KAPI_MAX_HEADS * \ + NVKMS_MAX_LAYERS_PER_HEAD * \ + NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER) + +void nvKmsKapiFreeNotifiers(struct NvKmsKapiDevice *device) +{ + if (device->notifier.hKmsHandle != 0) { + struct NvKmsUnregisterSurfaceParams paramsUnreg = { }; + NvBool status; + + paramsUnreg.request.deviceHandle = device->hKmsDevice; + paramsUnreg.request.surfaceHandle = device->notifier.hKmsHandle; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_UNREGISTER_SURFACE, + ¶msUnreg, sizeof(paramsUnreg)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_UNREGISTER_SURFACE failed"); + } + + device->notifier.hKmsHandle = 0; + } + + if (device->notifier.mapped) { + NV_STATUS status; + + status = nvRmApiUnmapMemory(device->hRmClient, + device->hRmSubDevice, + device->notifier.hRmHandle, + device->notifier.pLinearAddress, + 0); + + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "UnmapMemory failed with error code 0x%08x", + status); + } + + device->notifier.mapped = NV_FALSE; + } + + if (device->notifier.hRmHandle != 0) { + NvU32 status; + + status = nvRmApiFree(device->hRmClient, + device->hRmDevice, + device->notifier.hRmHandle); + + if (status != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "RmFree failed with error code 0x%08x", + status); + } + + nvFreeUnixRmHandle(&device->handleAllocator, device->notifier.hRmHandle); + device->notifier.hRmHandle = 0; + } +} + +static void InitNotifier(struct NvKmsKapiDevice *device, + NvU32 head, NvU32 layer, NvU32 index) +{ + nvKmsResetNotifier(device->notifier.format, + (layer == NVKMS_OVERLAY_LAYER), + NVKMS_KAPI_NOTIFIER_INDEX(head, layer, index), + device->notifier.pLinearAddress); +} + +#define NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE 0x1000 + +NvBool nvKmsKapiAllocateNotifiers(struct NvKmsKapiDevice *device, + NvBool inVideoMemory) +{ + struct NvKmsRegisterSurfaceParams surfParams = {}; + NV_STATUS status = 0; + NvU8 compressible = 0; + NvBool ret; + + ct_assert((NVKMS_KAPI_MAX_NOTIFIERS * NVKMS_KAPI_NOTIFIER_SIZE) <= + (NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE)); + + ct_assert(NVKMS_KAPI_NOTIFIER_SIZE >= sizeof(NvNotification)); + nvAssert(NVKMS_KAPI_NOTIFIER_SIZE >= + nvKmsSizeOfNotifier(device->notifier.format, TRUE /* overlay */)); + nvAssert(NVKMS_KAPI_NOTIFIER_SIZE >= + nvKmsSizeOfNotifier(device->notifier.format, FALSE /* overlay */)); + + device->notifier.hRmHandle = + nvGenerateUnixRmHandle(&device->handleAllocator); + + if (device->notifier.hRmHandle == 0x0) { + nvKmsKapiLogDeviceDebug( + device, + "nvGenerateUnixRmHandle() failed"); + return NV_FALSE; + } + + if (inVideoMemory) { + ret = nvKmsKapiAllocateVideoMemory(device, + device->notifier.hRmHandle, + NvKmsSurfaceMemoryLayoutPitch, + NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE, + NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER, + &compressible); + } else { + ret = nvKmsKapiAllocateSystemMemory(device, + device->notifier.hRmHandle, + NvKmsSurfaceMemoryLayoutPitch, + NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE, + NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER, + &compressible); + } + + if (!ret) { + nvFreeUnixRmHandle(&device->handleAllocator, device->notifier.hRmHandle); + device->notifier.hRmHandle = 0x0; + goto failed; + } + + status = nvRmApiMapMemory(device->hRmClient, + device->hRmSubDevice, + device->notifier.hRmHandle, + 0, + NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE, + &device->notifier.pLinearAddress, + 0); + + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "MapMemory failed with error code 0x%08x", + status); + goto failed; + } + + device->notifier.mapped = NV_TRUE; + + surfParams.request.deviceHandle = device->hKmsDevice; + surfParams.request.useFd = FALSE; + surfParams.request.rmClient = device->hRmClient; + + surfParams.request.widthInPixels = NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE; + surfParams.request.heightInPixels = 1; + surfParams.request.layout = NvKmsSurfaceMemoryLayoutPitch; + surfParams.request.format = NvKmsSurfaceMemoryFormatI8; + surfParams.request.log2GobsPerBlockY = 0; + surfParams.request.isoType = NVKMS_MEMORY_NISO; + + surfParams.request.planes[0].u.rmObject = device->notifier.hRmHandle; + surfParams.request.planes[0].pitch = NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE; + surfParams.request.planes[0].rmObjectSizeInBytes = + NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE; + + if (!nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_REGISTER_SURFACE, + &surfParams, sizeof(surfParams))) { + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_REGISTER_SURFACE failed"); + goto failed; + } + + device->notifier.hKmsHandle = surfParams.reply.surfaceHandle; + + /* Init Notifiers */ + + { + NvU32 head; + + for (head = 0; head < device->numHeads; head++) { + NvU32 layer; + + for (layer = 0; layer < NVKMS_MAX_LAYERS_PER_HEAD; layer++) { + NvU32 index; + + for (index = 0; + index < NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER; index++) { + InitNotifier(device, head, layer, index); + } + } + } + } + + return NV_TRUE; + +failed: + + nvKmsKapiFreeNotifiers(device); + + return NV_FALSE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi.c new file mode 100644 index 0000000..4cc5211 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi.c @@ -0,0 +1,3188 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvUnixVersion.h" + +#include "nvidia-modeset-os-interface.h" + +#include "nvkms-api.h" +#include "nvkms-rmapi.h" + +#include "nvkms-kapi.h" +#include "nvkms-kapi-private.h" +#include "nvkms-kapi-internal.h" +#include "nvkms-kapi-notifiers.h" + +#include /* NV01_ROOT/NV01_NULL_OBJECT */ +#include /* NV01_MEMORY_SYSTEM */ +#include /* NV01_DEVICE */ +#include /* NV01_MEMORY_LOCAL_USER */ +#include /* NV01_MEMORY_SYSTEM_OS_DESCRIPTOR */ +#include /* NV20_SUBDEVICE_0 */ + +#include /* NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 */ +#include /* NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD */ +#include /* NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES */ +#include /* NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT */ + +#include "ctrl/ctrl003e.h" /* NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES */ + + +ct_assert(NVKMS_KAPI_LAYER_PRIMARY_IDX == NVKMS_MAIN_LAYER); +ct_assert(NVKMS_KAPI_LAYER_MAX == NVKMS_MAX_LAYERS_PER_HEAD); + +/* XXX Move to NVKMS */ +#define NV_EVO_PITCH_ALIGNMENT 0x100 + +#define NVKMS_KAPI_SUPPORTED_EVENTS_MASK \ + ((1 << NVKMS_EVENT_TYPE_DPY_CHANGED) | \ + (1 << NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED) | \ + (1 << NVKMS_EVENT_TYPE_FLIP_OCCURRED)) + +static NvU32 EnumerateGpus(nv_gpu_info_t *gpuInfo) +{ + return nvkms_enumerate_gpus(gpuInfo); +} + +/* + * Helper function to free RM objects allocated for NvKmsKapiDevice. + */ +static void RmFreeDevice(struct NvKmsKapiDevice *device) +{ + if (device->hRmSubDevice != 0x0) { + nvRmApiFree(device->hRmClient, + device->hRmDevice, + device->hRmSubDevice); + nvKmsKapiFreeRmHandle(device, device->hRmSubDevice); + device->hRmSubDevice = 0x0; + } + + /* Free RM device object */ + + if (device->hRmDevice != 0x0) { + nvRmApiFree(device->hRmClient, + device->hRmClient, + device->hRmDevice); + nvKmsKapiFreeRmHandle(device, device->hRmDevice); + + device->hRmDevice = 0x0; + } + + nvTearDownUnixRmHandleAllocator(&device->handleAllocator); + + device->deviceInstance = 0; + + /* Free RM client */ + + if (device->hRmClient != 0x0) { + nvRmApiFree(device->hRmClient, + device->hRmClient, + device->hRmClient); + + device->hRmClient = 0x0; + } +} + +/* + * Helper function to allocate RM objects for NvKmsKapiDevice. + */ +static NvBool RmAllocateDevice(struct NvKmsKapiDevice *device) +{ + NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS getNumSubDevicesParams = { 0 }; + NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS idInfoParams = { }; + NV2080_ALLOC_PARAMETERS subdevAllocParams = { 0 }; + NV0080_ALLOC_PARAMETERS allocParams = { }; + + NvU32 hRmDevice, hRmSubDevice; + NvU32 ret; + + /* Allocate RM client */ + + ret = nvRmApiAlloc(NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &device->hRmClient); + + if (ret != NVOS_STATUS_SUCCESS || device->hRmClient == 0x0) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM client"); + goto failed; + } + + /* Query device instance */ + + idInfoParams.gpuId = device->gpuId; + + ret = nvRmApiControl(device->hRmClient, + device->hRmClient, + NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2, + &idInfoParams, + sizeof(idInfoParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to query device instance"); + goto failed; + } + + device->deviceInstance = idInfoParams.deviceInstance; + device->isSOC = + FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE, + idInfoParams.gpuFlags); + + /* Initialize RM handle allocator */ + + if (!nvInitUnixRmHandleAllocator(&device->handleAllocator, + device->hRmClient, + device->deviceInstance + 1)) { + nvKmsKapiLogDeviceDebug(device, "Failed to initialize RM handle allocator"); + goto failed; + } + + /* Allocate RM device object */ + + hRmDevice = nvKmsKapiGenerateRmHandle(device); + + if (hRmDevice == 0x0) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM handle"); + goto failed; + } + + allocParams.deviceId = device->deviceInstance; + + allocParams.hClientShare = device->hRmClient; + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmClient, + hRmDevice, + NV01_DEVICE_0, + &allocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM device object"); + nvKmsKapiFreeRmHandle(device, hRmDevice); + goto failed; + } + + device->hRmDevice = hRmDevice; + + ret = nvRmApiControl(device->hRmClient, + device->hRmDevice, + NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES, + &getNumSubDevicesParams, + sizeof(getNumSubDevicesParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to determine number of GPUs"); + goto failed; + } + + if (getNumSubDevicesParams.numSubDevices != 1) { + nvKmsKapiLogDeviceDebug( + device, + "Unsupported number of GPUs: %d", + getNumSubDevicesParams.numSubDevices); + goto failed; + } + + hRmSubDevice = nvKmsKapiGenerateRmHandle(device); + + if (hRmDevice == 0x0) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM handle"); + goto failed; + } + + subdevAllocParams.subDeviceId = 0; + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmDevice, + hRmSubDevice, + NV20_SUBDEVICE_0, + &subdevAllocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to initialize subDevice"); + nvKmsKapiFreeRmHandle(device, hRmSubDevice); + goto failed; + } + + device->hRmSubDevice = hRmSubDevice; + + return NV_TRUE; + +failed: + + RmFreeDevice(device); + + return NV_FALSE; +} + +/* + * Helper function to free NVKMS objects allocated for NvKmsKapiDevice. + */ +static void KmsFreeDevice(struct NvKmsKapiDevice *device) +{ + /* Free notifier memory */ + + nvKmsKapiFreeNotifiers(device); + + /* Free NVKMS device */ + + if (device->hKmsDevice != 0x0) { + struct NvKmsFreeDeviceParams paramsFree = { }; + + paramsFree.request.deviceHandle = device->hKmsDevice; + + nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_FREE_DEVICE, + ¶msFree, sizeof(paramsFree)); + + device->hKmsDevice = device->hKmsDisp = 0x0; + } + + /* Close NVKMS */ + + if (device->pKmsOpen != NULL) { + nvkms_close_from_kapi(device->pKmsOpen); + device->pKmsOpen = NULL; + } +} + +/* + * Helper function to allocate NVKMS objects for NvKmsKapiDevice. + */ +static NvBool KmsAllocateDevice(struct NvKmsKapiDevice *device) +{ + struct NvKmsAllocDeviceParams *paramsAlloc; + NvBool status; + NvBool inVideoMemory = FALSE; + NvU32 head; + NvBool ret = FALSE; + NvU32 layer; + + paramsAlloc = nvKmsKapiCalloc(1, sizeof(*paramsAlloc)); + if (paramsAlloc == NULL) { + return FALSE; + } + + /* Open NVKMS */ + + device->pKmsOpen = nvkms_open_from_kapi(device); + + if (device->pKmsOpen == NULL) { + nvKmsKapiLogDeviceDebug(device, "Failed to Open NVKMS"); + goto done; + } + + /* Allocate NVKMS device */ + + nvkms_strncpy( + paramsAlloc->request.versionString, + NV_VERSION_STRING, + sizeof(paramsAlloc->request.versionString)); + + if (device->isSOC) { + paramsAlloc->request.deviceId = NVKMS_DEVICE_ID_TEGRA; + } else { + paramsAlloc->request.deviceId = device->deviceInstance; + } + paramsAlloc->request.sliMosaic = NV_FALSE; + paramsAlloc->request.enableConsoleHotplugHandling = NV_TRUE; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_ALLOC_DEVICE, + paramsAlloc, sizeof(*paramsAlloc)); + + if (!status || + paramsAlloc->reply.status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) { + + if (paramsAlloc->reply.status == + NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE) { + nvKmsKapiLogDeviceDebug( + device, + "Display hardware is not available; falling back to " + "displayless mode"); + + ret = TRUE; + goto done; + } + + nvKmsKapiLogDeviceDebug( + device, + "Failed to NVKM device %u(%u): %d %d\n", + device->gpuId, + paramsAlloc->request.deviceId, + status, + paramsAlloc->reply.status); + + goto done; + } + + device->hKmsDevice = paramsAlloc->reply.deviceHandle; + + device->caps.cursorCompositionCaps = + paramsAlloc->reply.cursorCompositionCaps; + + device->caps.overlayCompositionCaps = + paramsAlloc->reply.layerCaps[NVKMS_OVERLAY_LAYER].composition; + + device->caps.validLayerRRTransforms = + paramsAlloc->reply.validLayerRRTransforms; + + device->caps.maxWidthInPixels = paramsAlloc->reply.maxWidthInPixels; + device->caps.maxHeightInPixels = paramsAlloc->reply.maxHeightInPixels; + device->caps.maxCursorSizeInPixels = paramsAlloc->reply.maxCursorSize; + device->caps.genericPageKind = paramsAlloc->reply.genericPageKind; + + /* XXX Add LUT support */ + + device->numHeads = paramsAlloc->reply.numHeads; + + for (head = 0; head < device->numHeads; head++) { + if (paramsAlloc->reply.numLayers[head] < 2) { + goto done; + } + device->numLayers[head] = paramsAlloc->reply.numLayers[head]; + } + + for (layer = 0; layer < NVKMS_KAPI_LAYER_MAX; layer++) { + device->supportedSurfaceMemoryFormats[layer] = + paramsAlloc->reply.layerCaps[layer].supportedSurfaceMemoryFormats; + device->supportsHDR[layer] = paramsAlloc->reply.layerCaps[layer].supportsHDR; + } + + if (paramsAlloc->reply.validNIsoFormatMask & + (1 << NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY)) { + device->notifier.format = NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY; + } else if (paramsAlloc->reply.validNIsoFormatMask & + (1 << NVKMS_NISO_FORMAT_FOUR_WORD)) { + device->notifier.format = NVKMS_NISO_FORMAT_FOUR_WORD; + } else { + nvAssert(paramsAlloc->reply.validNIsoFormatMask & + (1 << NVKMS_NISO_FORMAT_LEGACY)); + device->notifier.format = NVKMS_NISO_FORMAT_LEGACY; + } + + /* XXX Add support for SLI/multiple display engines per device */ + if (paramsAlloc->reply.numDisps != 1) + { + nvKmsKapiLogDeviceDebug(device, "Found unsupported SLI configuration"); + goto done; + } + + device->hKmsDisp = paramsAlloc->reply.dispHandles[0]; + device->dispIdx = 0; + + device->subDeviceMask = paramsAlloc->reply.subDeviceMask; + + device->isoIOCoherencyModes = paramsAlloc->reply.isoIOCoherencyModes; + device->nisoIOCoherencyModes = paramsAlloc->reply.nisoIOCoherencyModes; + + device->supportsSyncpts = paramsAlloc->reply.supportsSyncpts; + + if (paramsAlloc->reply.nIsoSurfacesInVidmemOnly) { + inVideoMemory = TRUE; + } + + /* Allocate notifier memory */ + if (!nvKmsKapiAllocateNotifiers(device, inVideoMemory)) { + nvKmsKapiLogDebug( + "Failed to allocate Notifier objects for GPU ID 0x%08x", + device->gpuId); + goto done; + } + + ret = NV_TRUE; + +done: + if (!ret) { + KmsFreeDevice(device); + } + + nvKmsKapiFree(paramsAlloc); + + return ret; +} + +static void FreeDevice(struct NvKmsKapiDevice *device) +{ + /* Free NVKMS objects allocated for NvKmsKapiDevice */ + + KmsFreeDevice(device); + + /* Free RM objects allocated for NvKmsKapiDevice */ + + RmFreeDevice(device); + + /* Lower the reference count of gpu. */ + + nvkms_close_gpu(device->gpuId); + + if (device->pSema != NULL) { + nvkms_sema_free(device->pSema); + } + + nvKmsKapiFree(device); +} + +NvBool nvKmsKapiAllocateSystemMemory(struct NvKmsKapiDevice *device, + NvU32 hRmHandle, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + enum NvKmsKapiAllocationType type, + NvU8 *compressible) +{ + NvU32 ret; + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + const NvKmsDispIOCoherencyModes *pIOCoherencyModes = NULL; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.size = size; + + switch (layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, + memAllocParams.attr); + if (*compressible) { + /* + * RM will choose a compressed page kind and hence allocate + * comptags for color surfaces >= 32bpp. The actual kind + * chosen isn't important, as it can be overridden by creating + * a virtual alloc with a different kind when mapping the + * memory into the GPU. + */ + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _DEPTH, _32, + memAllocParams.attr); + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COMPR, _ANY, + memAllocParams.attr); + } else { + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _DEPTH, _UNKNOWN, + memAllocParams.attr); + } + break; + + case NvKmsSurfaceMemoryLayoutPitch: + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH, + memAllocParams.attr); + break; + + default: + nvKmsKapiLogDeviceDebug(device, "Unknown Memory Layout"); + return NV_FALSE; + } + + switch (type) { + case NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT: + /* XXX Note compression and scanout do not work together on + * any current GPUs. However, some use cases do involve scanning + * out a compression-capable surface: + * + * 1) Mapping the compressible surface as non-compressed when + * generating its content. + * + * 2) Using decompress-in-place to decompress the surface content + * before scanning it out. + * + * Hence creating compressed allocations of TYPE_SCANOUT is allowed. + */ + + pIOCoherencyModes = &device->isoIOCoherencyModes; + + break; + case NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER: + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + nvKmsKapiLogDeviceDebug(device, + "Attempting creation of BlockLinear notifier memory"); + return NV_FALSE; + } + + memAllocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _NISO_DISPLAY, + _YES, memAllocParams.attr2); + + pIOCoherencyModes = &device->nisoIOCoherencyModes; + + break; + case NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN: + memAllocParams.flags |= NVOS32_ALLOC_FLAGS_NO_SCANOUT; + break; + default: + nvKmsKapiLogDeviceDebug(device, "Unknown Allocation Type"); + return NV_FALSE; + } + + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, + memAllocParams.attr); + memAllocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, + memAllocParams.attr2); + + if (pIOCoherencyModes == NULL || !pIOCoherencyModes->coherent) { + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, + _WRITE_COMBINE, memAllocParams.attr); + } else { + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, + _WRITE_BACK, memAllocParams.attr); + } + + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, + memAllocParams.attr); + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmDevice, + hRmHandle, + NV01_MEMORY_SYSTEM, + &memAllocParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "nvRmApiAlloc failed with error code 0x%08x", + ret); + + return NV_FALSE; + } + + if (FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE, + memAllocParams.attr)) { + *compressible = 0; + } else { + *compressible = 1; + } + + return TRUE; +} + +NvBool nvKmsKapiAllocateVideoMemory(struct NvKmsKapiDevice *device, + NvU32 hRmHandle, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + enum NvKmsKapiAllocationType type, + NvU8 *compressible) +{ + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + NvU32 ret; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.size = size; + + switch (layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, + memAllocParams.attr); + + if (*compressible) { + /* + * RM will choose a compressed page kind and hence allocate + * comptags for color surfaces >= 32bpp. The actual kind + * chosen isn't important, as it can be overridden by creating + * a virtual alloc with a different kind when mapping the + * memory into the GPU. + */ + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _DEPTH, _32, + memAllocParams.attr); + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _COMPR, _ANY, + memAllocParams.attr); + } else { + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _DEPTH, _UNKNOWN, + memAllocParams.attr); + } + break; + + case NvKmsSurfaceMemoryLayoutPitch: + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH, + memAllocParams.attr); + break; + + default: + nvKmsKapiLogDeviceDebug(device, "Unknown Memory Layout"); + return NV_FALSE; + } + + + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, + memAllocParams.attr); + memAllocParams.attr2 = + FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, + memAllocParams.attr2); + + switch (type) { + case NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT: + /* XXX [JRJ] Not quite right. This can also be used to allocate + * cursor images. The stuff RM does with this field is kind of + * black magic, and I can't tell if it actually matters. + */ + memAllocParams.type = NVOS32_TYPE_PRIMARY; + + memAllocParams.alignment = NV_EVO_SURFACE_ALIGNMENT; + memAllocParams.flags |= + NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE | /* Pick up above EVO alignment */ + NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP; /* X sets this for cursors */ + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, + memAllocParams.attr); + + /* XXX [JRJ] Note compression and scanout do not work together on + * any current GPUs. However, some use cases do involve scanning + * out a compression-capable surface: + * + * 1) Mapping the compressible surface as non-compressed when + * generating its content. + * + * 2) Using decompress-in-place to decompress the surface content + * before scanning it out. + * + * Hence creating compressed allocations of TYPE_SCANOUT is allowed. + */ + + break; + case NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER: + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + nvKmsKapiLogDeviceDebug(device, + "Attempting creation of BlockLinear notifier memory"); + return NV_FALSE; + } + + memAllocParams.type = NVOS32_TYPE_DMA; + + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB, + memAllocParams.attr); + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, + memAllocParams.attr); + + break; + case NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN: + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.flags |= + NVOS32_ALLOC_FLAGS_NO_SCANOUT | + NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP; + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, + memAllocParams.attr); + break; + default: + nvKmsKapiLogDeviceDebug(device, "Unknown Allocation Type"); + return NV_FALSE; + } + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmDevice, + hRmHandle, + NV01_MEMORY_LOCAL_USER, + &memAllocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "VidHeapControl failed with error code 0x%08x", + ret); + + return NV_FALSE; + } + + if (FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE, + memAllocParams.attr)) { + *compressible = 0; + } else { + *compressible = 1; + } + + return NV_TRUE; +} + +static struct NvKmsKapiDevice* AllocateDevice +( + const struct NvKmsKapiAllocateDeviceParams *params +) +{ + struct NvKmsKapiDevice *device = NULL; + + device = nvKmsKapiCalloc(1, sizeof(*device)); + + if (device == NULL) { + nvKmsKapiLogDebug( + "Failed to allocate memory for NvKmsKapiDevice of GPU ID 0x%08x", + params->gpuId); + goto failed; + } + + device->pSema = nvkms_sema_alloc(); + + if (device->pSema == NULL) { + nvKmsKapiLogDebug( + "Failed to allocate semaphore for NvKmsKapiDevice of GPU ID 0x%08x", + params->gpuId); + goto failed; + } + + /* Raise the reference count of gpu. */ + + if (!nvkms_open_gpu(params->gpuId)) { + nvKmsKapiLogDebug("Failed to open GPU ID 0x%08x", params->gpuId); + goto failed; + } + + device->gpuId = params->gpuId; + + nvKmsKapiLogDebug( + "Allocating NvKmsKapiDevice 0x%p for GPU ID 0x%08x", + device, + device->gpuId); + + /* Allocate RM object for NvKmsKapiDevice */ + + if (!RmAllocateDevice(device)) { + nvKmsKapiLogDebug( + "Failed to allocate RM objects for GPU ID 0x%08x", + device->gpuId); + goto failed; + } + + /* Allocate NVKMS objects for NvKmsKapiDevice */ + + if (!KmsAllocateDevice(device)) { + nvKmsKapiLogDebug( + "Failed to allocate NVKMS objects for GPU ID 0x%08x", + device->gpuId); + goto failed; + } + + device->privateData = params->privateData; + device->eventCallback = params->eventCallback; + + return device; + +failed: + + FreeDevice(device); + + return NULL; +} + +static NvBool GrabOwnership(struct NvKmsKapiDevice *device) +{ + struct NvKmsGrabOwnershipParams paramsGrab = { }; + + if (device->hKmsDevice == 0x0) { + return NV_TRUE; + } + + paramsGrab.request.deviceHandle = device->hKmsDevice; + + return nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_GRAB_OWNERSHIP, + ¶msGrab, sizeof(paramsGrab)); + +} + +static void ReleaseOwnership(struct NvKmsKapiDevice *device) +{ + struct NvKmsReleaseOwnershipParams paramsRelease = { }; + + if (device->hKmsDevice == 0x0) { + return; + } + + paramsRelease.request.deviceHandle = device->hKmsDevice; + + nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_RELEASE_OWNERSHIP, + ¶msRelease, sizeof(paramsRelease)); +} + +static NvBool DeclareEventInterest +( + const struct NvKmsKapiDevice *device, + const NvU32 interestMask +) +{ + struct NvKmsDeclareEventInterestParams kmsEventParams = { }; + + if (device->hKmsDevice == 0x0 || device->eventCallback == NULL) { + return NV_TRUE; + } + + kmsEventParams.request.interestMask = + interestMask & NVKMS_KAPI_SUPPORTED_EVENTS_MASK; + + return nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_DECLARE_EVENT_INTEREST, + &kmsEventParams, sizeof(kmsEventParams)); +} + +static NvBool GetDeviceResourcesInfo +( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDeviceResourcesInfo *info +) +{ + struct NvKmsQueryDispParams paramsDisp = { }; + NvBool status = NV_FALSE; + + NvU32 i; + + nvkms_memset(info, 0, sizeof(*info)); + + info->caps.hasVideoMemory = !device->isSOC; + + if (device->hKmsDevice == 0x0) { + info->caps.pitchAlignment = 0x1; + return NV_TRUE; + } + + paramsDisp.request.deviceHandle = device->hKmsDevice; + paramsDisp.request.dispHandle = device->hKmsDisp; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DISP, + ¶msDisp, sizeof(paramsDisp)); + + if (!status) + { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query display engine information"); + + goto done; + } + + info->numHeads = device->numHeads; + + ct_assert(sizeof(info->numLayers) == sizeof(device->numLayers)); + nvkms_memcpy(info->numLayers, device->numLayers, sizeof(device->numLayers)); + + ct_assert(ARRAY_LEN(info->connectorHandles) >= + ARRAY_LEN(paramsDisp.reply.connectorHandles)); + + info->numConnectors = paramsDisp.reply.numConnectors; + + for (i = 0; i < paramsDisp.reply.numConnectors; i++) { + info->connectorHandles[i] = paramsDisp.reply.connectorHandles[i]; + } + + { + const struct NvKmsCompositionCapabilities *pCaps = + &device->caps.cursorCompositionCaps; + + info->caps.validCursorCompositionModes = + pCaps->colorKeySelect[NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE]. + supportedBlendModes[1]; + } + + for (i = 0; i < NVKMS_KAPI_LAYER_MAX; i++) { + if (i == NVKMS_KAPI_LAYER_PRIMARY_IDX) { + info->caps.layer[i].validCompositionModes = + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE); + } else { + const struct NvKmsCompositionCapabilities *pCaps = + &device->caps.overlayCompositionCaps; + + info->caps.layer[i].validCompositionModes = + pCaps->colorKeySelect[NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE]. + supportedBlendModes[1]; + } + } + + for (i = 0; i < NVKMS_KAPI_LAYER_MAX; i++) { + info->caps.layer[i].validRRTransforms = + device->caps.validLayerRRTransforms; + } + + info->caps.maxWidthInPixels = device->caps.maxWidthInPixels; + info->caps.maxHeightInPixels = device->caps.maxHeightInPixels; + info->caps.maxCursorSizeInPixels = device->caps.maxCursorSizeInPixels; + info->caps.genericPageKind = device->caps.genericPageKind; + + info->caps.pitchAlignment = NV_EVO_PITCH_ALIGNMENT; + + info->caps.supportsSyncpts = device->supportsSyncpts; + + info->caps.supportedCursorSurfaceMemoryFormats = + NVBIT(NvKmsSurfaceMemoryFormatA8R8G8B8); + + ct_assert(sizeof(info->supportedSurfaceMemoryFormats) == + sizeof(device->supportedSurfaceMemoryFormats)); + + nvkms_memcpy(info->supportedSurfaceMemoryFormats, + device->supportedSurfaceMemoryFormats, + sizeof(device->supportedSurfaceMemoryFormats)); + + ct_assert(sizeof(info->supportsHDR) == + sizeof(device->supportsHDR)); + + nvkms_memcpy(info->supportsHDR, + device->supportsHDR, + sizeof(device->supportsHDR)); +done: + + return status; +} + +/* + * XXX Make it per-connector, query valid dpyId list as dynamic data of + * connector. + */ +static NvBool GetDisplays +( + struct NvKmsKapiDevice *device, + NvU32 *numDisplays, NvKmsKapiDisplay *displayHandles +) +{ + struct NvKmsQueryDispParams paramsDisp = { }; + NvBool status = NV_FALSE; + + NVDpyId dpyId; + + if (device->hKmsDevice == 0x0) { + *numDisplays = 0; + return NV_TRUE; + } + + paramsDisp.request.deviceHandle = device->hKmsDevice; + paramsDisp.request.dispHandle = device->hKmsDisp; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DISP, + ¶msDisp, sizeof(paramsDisp)); + + if (!status) + { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query display engine information"); + + return NV_FALSE; + } + + if (*numDisplays == 0) { + goto done; + } + + if (*numDisplays < nvCountDpyIdsInDpyIdList(paramsDisp.reply.validDpys)) { + nvKmsKapiLogDebug( + "Size of display handle array is less than number of displays"); + goto done; + } + + FOR_ALL_DPY_IDS(dpyId, paramsDisp.reply.validDpys) { + *(displayHandles++) = nvDpyIdToNvU32(dpyId); + } + +done: + + *numDisplays = nvCountDpyIdsInDpyIdList(paramsDisp.reply.validDpys); + + return NV_TRUE; +} + +static NvBool GetConnectorInfo +( + struct NvKmsKapiDevice *device, + NvKmsKapiConnector connector, struct NvKmsKapiConnectorInfo *info +) +{ + struct NvKmsQueryConnectorStaticDataParams paramsConnector = { }; + NvBool status = NV_FALSE; + + if (device == NULL || info == NULL) { + goto done; + } + + paramsConnector.request.deviceHandle = device->hKmsDevice; + paramsConnector.request.dispHandle = device->hKmsDisp; + paramsConnector.request.connectorHandle = connector; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA, + ¶msConnector, sizeof(paramsConnector)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query static data of connector 0x%08x", + connector); + + goto done; + } + + info->handle = connector; + + info->physicalIndex = paramsConnector.reply.physicalIndex; + + info->headMask = paramsConnector.reply.headMask; + + info->signalFormat = paramsConnector.reply.signalFormat; + + info->type = paramsConnector.reply.type; + +done: + + return status; +} + +static NvBool GetStaticDisplayInfo +( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, struct NvKmsKapiStaticDisplayInfo *info +) +{ + struct NvKmsQueryDpyStaticDataParams paramsDpyStatic = { }; + NvBool status = NV_FALSE; + + if (device == NULL || info == NULL) { + goto done; + } + + /* Query static data of display */ + + paramsDpyStatic.request.deviceHandle = device->hKmsDevice; + paramsDpyStatic.request.dispHandle = device->hKmsDisp; + + paramsDpyStatic.request.dpyId = nvNvU32ToDpyId(display); + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DPY_STATIC_DATA, + ¶msDpyStatic, sizeof(paramsDpyStatic)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query static data of dpy 0x%08x", + display); + + goto done; + } + + info->handle = display; + + info->connectorHandle = paramsDpyStatic.reply.connectorHandle; + + ct_assert(sizeof(info->dpAddress) == + sizeof(paramsDpyStatic.reply.dpAddress)); + + nvkms_memcpy(info->dpAddress, + paramsDpyStatic.reply.dpAddress, + sizeof(paramsDpyStatic.reply.dpAddress)); + info->dpAddress[sizeof(paramsDpyStatic.reply.dpAddress) - 1] = '\0'; + + info->internal = paramsDpyStatic.reply.mobileInternal; + +done: + + return status; +} + +static NvBool GetDynamicDisplayInfo( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDynamicDisplayParams *params) +{ + struct NvKmsQueryDpyDynamicDataParams *pParamsDpyDynamic = NULL; + NvBool status = NV_FALSE; + + if (device == NULL || params == NULL) { + goto done; + } + + pParamsDpyDynamic = nvKmsKapiCalloc(1, sizeof(*pParamsDpyDynamic)); + + if (pParamsDpyDynamic == NULL) { + goto done; + } + + pParamsDpyDynamic->request.deviceHandle = device->hKmsDevice; + pParamsDpyDynamic->request.dispHandle = device->hKmsDisp; + + pParamsDpyDynamic->request.dpyId = nvNvU32ToDpyId(params->handle); + + if (params->overrideEdid) { + ct_assert(sizeof(params->edid.buffer) == + sizeof(pParamsDpyDynamic->reply.edid.buffer)); + nvkms_memcpy( + pParamsDpyDynamic->request.edid.buffer, + params->edid.buffer, + sizeof(pParamsDpyDynamic->request.edid.buffer)); + + pParamsDpyDynamic->request.edid.bufferSize = params->edid.bufferSize; + + pParamsDpyDynamic->request.overrideEdid = NV_TRUE; + } + + pParamsDpyDynamic->request.forceConnected = params->forceConnected; + + pParamsDpyDynamic->request.forceDisconnected = params->forceDisconnected; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA, + pParamsDpyDynamic, sizeof(*pParamsDpyDynamic)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query dynamic data of dpy 0x%08x", + params->handle); + + goto done; + } + + params->connected = pParamsDpyDynamic->reply.connected; + + if (pParamsDpyDynamic->reply.connected && !params->overrideEdid) { + + nvkms_memcpy( + params->edid.buffer, + pParamsDpyDynamic->reply.edid.buffer, + sizeof(params->edid.buffer)); + + params->edid.bufferSize = pParamsDpyDynamic->reply.edid.bufferSize; + } + +done: + + if (pParamsDpyDynamic != NULL) { + nvKmsKapiFree(pParamsDpyDynamic); + } + + return status; +} + +static void FreeMemory +( + struct NvKmsKapiDevice *device, struct NvKmsKapiMemory *memory +) +{ + if (device == NULL || memory == NULL) { + return; + } + + if (memory->hRmHandle != 0x0) { + NvU32 ret; + + ret = nvRmApiFree(device->hRmClient, + device->hRmDevice, + memory->hRmHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to free RM memory object 0x%08x allocated for " + "NvKmsKapiMemory 0x%p", + memory->hRmHandle, memory); + } + + nvKmsKapiFreeRmHandle(device, memory->hRmHandle); + } + + nvKmsKapiFree(memory); +} + +static struct NvKmsKapiMemory *AllocMemoryObjectAndHandle( + struct NvKmsKapiDevice *device, + NvU32 *handleOut +) +{ + struct NvKmsKapiMemory *memory; + + /* Allocate the container object */ + + memory = nvKmsKapiCalloc(1, sizeof(*memory)); + + if (memory == NULL) { + nvKmsKapiLogDebug( + "Failed to allocate memory for NVKMS memory object on " + "NvKmsKapiDevice 0x%p", + device); + return NULL; + } + + /* Generate RM handle for memory object */ + + *handleOut = nvKmsKapiGenerateRmHandle(device); + + if (*handleOut == 0x0) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to allocate RM handle for memory"); + nvKmsKapiFree(memory); + return NULL; + } + + return memory; +} + +static struct NvKmsKapiMemory* AllocateVideoMemory +( + struct NvKmsKapiDevice *device, + enum NvKmsSurfaceMemoryLayout layout, + enum NvKmsKapiAllocationType type, + NvU64 size, + NvU8 *compressible +) +{ + struct NvKmsKapiMemory *memory = NULL; + NvU32 hRmHandle; + + memory = AllocMemoryObjectAndHandle(device, &hRmHandle); + + if (!memory) { + return NULL; + } + + if (!nvKmsKapiAllocateVideoMemory(device, + hRmHandle, + layout, + size, + type, + compressible)) { + nvKmsKapiFreeRmHandle(device, hRmHandle); + FreeMemory(device, memory); + return NULL; + } + + memory->hRmHandle = hRmHandle; + memory->size = size; + memory->surfaceParams.layout = layout; + + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + memory->surfaceParams.blockLinear.genericMemory = NV_TRUE; + } + + return memory; +} + +static struct NvKmsKapiMemory* AllocateSystemMemory +( + struct NvKmsKapiDevice *device, + enum NvKmsSurfaceMemoryLayout layout, + enum NvKmsKapiAllocationType type, + NvU64 size, + NvU8 *compressible +) +{ + struct NvKmsKapiMemory *memory = NULL; + NvU32 hRmHandle; + + memory = AllocMemoryObjectAndHandle(device, &hRmHandle); + + if (!memory) { + return NULL; + } + + if (!nvKmsKapiAllocateSystemMemory(device, + hRmHandle, + layout, + size, + type, + compressible)) { + nvKmsKapiFreeRmHandle(device, hRmHandle); + FreeMemory(device, memory); + return NULL; + } + + memory->hRmHandle = hRmHandle; + memory->size = size; + memory->surfaceParams.layout = layout; + + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + memory->surfaceParams.blockLinear.genericMemory = NV_TRUE; + } + + return memory; +} + +static struct NvKmsKapiMemory* ImportMemory +( + struct NvKmsKapiDevice *device, + NvU64 memorySize, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize +) +{ + struct NvKmsKapiPrivImportMemoryParams nvKmsParams, *pNvKmsParams = NULL; + NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS importParams = { }; + struct NvKmsKapiMemory *memory = NULL; + NvU32 hMemory; + NvU32 ret; + int status; + + /* Verify the driver-private params size and copy it in from userspace */ + + if (nvKmsParamsSize != sizeof(nvKmsParams)) { + nvKmsKapiLogDebug( + "NVKMS private memory import parameter size mismatch - " + "expected: 0x%llx, caller specified: 0x%llx", + (NvU64)sizeof(nvKmsParams), nvKmsParamsSize); + return NULL; + } + + /* + * Use a heap allocation as the destination pointer passed to + * nvkms_copyin; stack allocations created within core NVKMS may not + * be recognizable to the Linux kernel's CONFIG_HARDENED_USERCOPY + * checker, triggering false errors. But then save the result to a + * variable on the stack, so that we can free the heap memory + * immediately and not worry about its lifetime. + */ + + pNvKmsParams = nvKmsKapiCalloc(1, sizeof(*pNvKmsParams)); + + if (pNvKmsParams == NULL) { + nvKmsKapiLogDebug("Failed to allocate memory for ImportMemory"); + return NULL; + } + + status = nvkms_copyin(pNvKmsParams, nvKmsParamsUser, sizeof(*pNvKmsParams)); + + nvKmsParams = *pNvKmsParams; + + nvKmsKapiFree(pNvKmsParams); + + if (status != 0) { + nvKmsKapiLogDebug( + "NVKMS private memory import parameters could not be read from " + "userspace"); + return NULL; + } + + memory = AllocMemoryObjectAndHandle(device, &hMemory); + + if (!memory) { + return NULL; + } + + importParams.fd = nvKmsParams.memFd; + importParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; + importParams.object.data.rmObject.hDevice = device->hRmDevice; + importParams.object.data.rmObject.hParent = device->hRmDevice; + importParams.object.data.rmObject.hObject = hMemory; + + ret = nvRmApiControl(device->hRmClient, + device->hRmClient, + NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD, + &importParams, + sizeof(importParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to import RM memory object (%d) of size %llu bytes", + nvKmsParams.memFd, memorySize); + + nvKmsKapiFreeRmHandle(device, hMemory); + goto failed; + } + + memory->hRmHandle = hMemory; + memory->size = memorySize; + memory->surfaceParams = nvKmsParams.surfaceParams; + + return memory; + +failed: + + FreeMemory(device, memory); + + return NULL; +} + +static struct NvKmsKapiMemory* DupMemory +( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiDevice *srcDevice, + const struct NvKmsKapiMemory *srcMemory +) +{ + struct NvKmsKapiMemory *memory; + NvU32 hMemory; + NvU32 ret; + + memory = AllocMemoryObjectAndHandle(device, &hMemory); + + if (!memory) { + return NULL; + } + + ret = nvRmApiDupObject(device->hRmClient, + device->hRmDevice, + hMemory, + srcDevice->hRmClient, + srcMemory->hRmHandle, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to dup NVKMS memory object 0x%p (0x%08x, 0x%08x) " + "of size %llu bytes", + srcMemory, srcDevice->hRmClient, srcMemory->hRmHandle, + srcMemory->size); + + nvKmsKapiFreeRmHandle(device, hMemory); + goto failed; + } + + memory->hRmHandle = hMemory; + memory->size = srcMemory->size; + memory->surfaceParams = srcMemory->surfaceParams; + + return memory; + +failed: + FreeMemory(device, memory); + + return NULL; +} + +static NvBool ExportMemory +( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize +) +{ + struct NvKmsKapiPrivExportMemoryParams nvKmsParams, *pNvKmsParams = NULL; + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS exportParams = { }; + int status; + NvU32 ret; + + if (device == NULL || memory == NULL) { + nvKmsKapiLogDebug( + "Invalid device or memory parameter while exporting memory"); + return NV_FALSE; + } + + /* Verify the driver-private params size and copy it in from userspace */ + + if (nvKmsParamsSize != sizeof(nvKmsParams)) { + nvKmsKapiLogDebug( + "NVKMS private memory export parameter size mismatch - " + "expected: 0x%llx, caller specified: 0x%llx", + (NvU64)sizeof(nvKmsParams), nvKmsParamsSize); + return NV_FALSE; + } + + /* + * Use a heap allocation as the destination pointer passed to + * nvkms_copyin; stack allocations created within core NVKMS may not + * be recognizable to the Linux kernel's CONFIG_HARDENED_USERCOPY + * checker, triggering false errors. But then save the result to a + * variable on the stack, so that we can free the heap memory + * immediately and not worry about its lifetime. + */ + + pNvKmsParams = nvKmsKapiCalloc(1, sizeof(*pNvKmsParams)); + + if (pNvKmsParams == NULL) { + nvKmsKapiLogDebug("Failed to allocate scratch memory for ExportMemory"); + return NV_FALSE; + } + + status = nvkms_copyin(pNvKmsParams, nvKmsParamsUser, sizeof(*pNvKmsParams)); + + nvKmsParams = *pNvKmsParams; + nvKmsKapiFree(pNvKmsParams); + + if (status != 0) { + nvKmsKapiLogDebug( + "NVKMS private memory export parameters could not be read from " + "userspace"); + return NV_FALSE; + } + + exportParams.fd = nvKmsParams.memFd; + exportParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; + exportParams.object.data.rmObject.hDevice = device->hRmDevice; + exportParams.object.data.rmObject.hParent = device->hRmDevice; + exportParams.object.data.rmObject.hObject = memory->hRmHandle; + + ret = nvRmApiControl(device->hRmClient, + device->hRmClient, + NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD, + &exportParams, + sizeof(exportParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to export RM memory object of size %llu bytes " + "to (%d)", memory->size, nvKmsParams.memFd); + return NV_FALSE; + } + + return NV_TRUE; +} + +static struct NvKmsKapiMemory* +GetSystemMemoryHandleFromDmaBufSgtHelper(struct NvKmsKapiDevice *device, + NvU32 descriptorType, + NvP64 descriptor, + NvU32 limit) +{ + NvU32 ret; + NV_OS_DESC_MEMORY_ALLOCATION_PARAMS memAllocParams = {0}; + struct NvKmsKapiMemory *memory = NULL; + NvU32 hRmHandle; + + memory = AllocMemoryObjectAndHandle(device, &hRmHandle); + + if (!memory) { + return NULL; + } + + memAllocParams.type = NVOS32_TYPE_PRIMARY; + memAllocParams.descriptorType = descriptorType; + memAllocParams.descriptor = descriptor; + memAllocParams.limit = limit; + + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, memAllocParams.attr); + + memAllocParams.attr2 = + FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, memAllocParams.attr2); + + /* dmabuf import is currently only used for ISO memory. */ + if (!device->isoIOCoherencyModes.coherent) { + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE, + memAllocParams.attr); + } else { + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, + memAllocParams.attr); + } + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmDevice, + hRmHandle, + NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + &memAllocParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "nvRmApiAlloc failed with error code 0x%08x", + ret); + nvKmsKapiFreeRmHandle(device, hRmHandle); + FreeMemory(device, memory); + return NULL; + } + + memory->hRmHandle = hRmHandle; + memory->size = limit + 1; + memory->surfaceParams.layout = NvKmsSurfaceMemoryLayoutPitch; + + return memory; +} + +static struct NvKmsKapiMemory* +GetSystemMemoryHandleFromSgt(struct NvKmsKapiDevice *device, + NvP64 sgt, + NvP64 gem, + NvU32 limit) +{ + NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS params = { + .sgt = sgt, + .gem = gem + }; + + return GetSystemMemoryHandleFromDmaBufSgtHelper( + device, NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR, ¶ms, limit); +} + +static struct NvKmsKapiMemory* +GetSystemMemoryHandleFromDmaBuf(struct NvKmsKapiDevice *device, + NvP64 dmaBuf, + NvU32 limit) +{ + return GetSystemMemoryHandleFromDmaBufSgtHelper( + device, NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR, dmaBuf, limit); +} + +static NvBool RmGc6BlockerRefCntAction(const struct NvKmsKapiDevice *device, + NvU32 action) +{ + NV_STATUS status; + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS params = { 0 }; + + nvAssert((action == NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC) || + (action == NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC)); + + params.action = action; + + status = nvRmApiControl(device->hRmClient, + device->hRmSubDevice, + NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT, + ¶ms, + sizeof(params)); + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to modify GC6 blocker refcount for 0x%x, status: 0x%x", + device->hRmSubDevice, status); + return NV_FALSE; + } + + return NV_TRUE; +} + +static NvBool RmGc6BlockerRefCntInc(const struct NvKmsKapiDevice *device) +{ + return RmGc6BlockerRefCntAction( + device, + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC); +} + +static NvBool RmGc6BlockerRefCntDec(const struct NvKmsKapiDevice *device) +{ + return RmGc6BlockerRefCntAction( + device, + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC); +} + +static NvBool GetMemoryPages +( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 **pPages, + NvU32 *pNumPages +) +{ + NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS paramsGetNumPages = {}; + NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS paramsGetPages = {}; + NvU64 *pages; + NV_STATUS status; + + if (device == NULL || memory == NULL) { + return NV_FALSE; + } + + status = nvRmApiControl(device->hRmClient, + memory->hRmHandle, + NV003E_CTRL_CMD_GET_SURFACE_NUM_PHYS_PAGES, + ¶msGetNumPages, + sizeof(paramsGetNumPages)); + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug(device, + "Failed to get number of physical allocation pages for RM" + "memory object 0x%x", memory->hRmHandle); + return NV_FALSE; + } + + if (!paramsGetNumPages.numPages) { + return NV_FALSE; + } + + pages = nvKmsKapiCalloc(paramsGetNumPages.numPages, sizeof(pages)); + if (!pages) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate memory"); + return NV_FALSE; + } + + paramsGetPages.pPages = NV_PTR_TO_NvP64(pages); + paramsGetPages.numPages = paramsGetNumPages.numPages; + + status = nvRmApiControl(device->hRmClient, + memory->hRmHandle, + NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES, + ¶msGetPages, + sizeof(paramsGetPages)); + if (status != NV_OK) { + nvKmsKapiFree(pages); + nvKmsKapiLogDeviceDebug(device, + "Failed to get physical allocation pages for RM" + "memory object 0x%x", memory->hRmHandle); + return NV_FALSE; + } + + nvAssert(paramsGetPages.numPages == paramsGetNumPages.numPages); + + *pPages = pages; + *pNumPages = paramsGetPages.numPages; + + return NV_TRUE; +} + +static void FreeMemoryPages +( + NvU64 *pPages +) +{ + nvKmsKapiFree(pPages); +} + +static NvBool MapMemory +( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + void **ppLinearAddress +) +{ + NV_STATUS status; + NvU32 flags = 0; + + if (device == NULL || memory == NULL) { + return NV_FALSE; + } + + switch (type) { + case NVKMS_KAPI_MAPPING_TYPE_USER: + /* + * Usermode clients can't be trusted not to access mappings while + * the GPU is in GC6. + * + * TODO: Revoke/restore mappings rather than blocking GC6 + */ + if (!RmGc6BlockerRefCntInc(device)) { + return NV_FALSE; + } + flags |= DRF_DEF(OS33, _FLAGS, _MEM_SPACE, _USER); + break; + case NVKMS_KAPI_MAPPING_TYPE_KERNEL: + /* + * Kernel clients should ensure on their own that the GPU isn't in + * GC6 before making accesses to mapped vidmem surfaces. + */ + break; + } + + status = nvRmApiMapMemory( + device->hRmClient, + device->hRmSubDevice, + memory->hRmHandle, + 0, + memory->size, + ppLinearAddress, + flags); + + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to Map RM memory object 0x%x allocated for NVKMemory 0x%p", + memory->hRmHandle, memory); + if (type == NVKMS_KAPI_MAPPING_TYPE_USER) { + RmGc6BlockerRefCntDec(device); // XXX Can't handle failure. + } + return NV_FALSE; + } + + return NV_TRUE; +} + +static void UnmapMemory +( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + const void *pLinearAddress +) +{ + NV_STATUS status; + NvU32 flags = 0; + + if (device == NULL || memory == NULL) { + return; + } + + switch (type) { + case NVKMS_KAPI_MAPPING_TYPE_USER: + flags |= DRF_DEF(OS33, _FLAGS, _MEM_SPACE, _USER); + break; + case NVKMS_KAPI_MAPPING_TYPE_KERNEL: + break; + } + + status = + nvRmApiUnmapMemory(device->hRmClient, + device->hRmSubDevice, + memory->hRmHandle, + pLinearAddress, + flags); + + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to Ummap RM memory object 0x%x allocated for NVKMemory 0x%p", + memory->hRmHandle, memory); + } + + if (type == NVKMS_KAPI_MAPPING_TYPE_USER) { + RmGc6BlockerRefCntDec(device); // XXX Can't handle failure. + } +} + +static NvBool GetSurfaceParams( + struct NvKmsKapiCreateSurfaceParams *params, + NvU32 *pNumPlanes, + enum NvKmsSurfaceMemoryLayout *pLayout, + NvU32 *pLog2GobsPerBlockY, + NvU32 pitch[]) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(params->format); + enum NvKmsSurfaceMemoryLayout layout = NvKmsSurfaceMemoryLayoutPitch; + NvU32 log2GobsPerBlockY = 0; + NvU32 i; + + if (pFormatInfo->numPlanes == 0) + { + nvKmsKapiLogDebug("Unknown surface format"); + return NV_FALSE; + } + + for (i = 0; i < pFormatInfo->numPlanes; i++) { + struct NvKmsKapiMemory *memory = + params->planes[i].memory; + + if (memory == NULL) { + return FALSE; + } + + pitch[i] = params->planes[i].pitch; + + if (i == 0) { + if (params->explicit_layout) { + layout = params->layout; + } else { + layout = memory->surfaceParams.layout; + } + + switch (layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + if (params->explicit_layout) { + log2GobsPerBlockY = params->log2GobsPerBlockY; + } else { + log2GobsPerBlockY = + memory->surfaceParams.blockLinear.log2GobsPerBlock.y; + } + break; + + case NvKmsSurfaceMemoryLayoutPitch: + log2GobsPerBlockY = 0; + break; + + default: + nvKmsKapiLogDebug("Invalid surface layout: %u", layout); + return NV_FALSE; + } + } else { + if (!params->explicit_layout) { + if (layout != memory->surfaceParams.layout) { + nvKmsKapiLogDebug("All planes are not of same layout"); + return FALSE; + } + + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear && + log2GobsPerBlockY != + memory->surfaceParams.blockLinear.log2GobsPerBlock.y) { + + nvKmsKapiLogDebug( + "All planes do not have the same blocklinear parameters"); + return FALSE; + } + } + } + + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + if (params->explicit_layout) { + if (pitch[i] & 63) { + nvKmsKapiLogDebug( + "Invalid block-linear pitch alignment: %u", pitch[i]); + return NV_FALSE; + } + + pitch[i] = pitch[i] >> 6; + } else { + /* + * The caller (nvidia-drm) is not blocklinear-aware, so the + * passed-in pitch cannot accurately reflect block information. + * Override the pitch with what was specified when the surface + * was imported. + */ + pitch[i] = memory->surfaceParams.blockLinear.pitchInBlocks; + } + } else { + pitch[i] = params->planes[i].pitch; + } + + } + + *pNumPlanes = pFormatInfo->numPlanes; + *pLayout = layout; + *pLog2GobsPerBlockY = log2GobsPerBlockY; + + return NV_TRUE; +} +static struct NvKmsKapiSurface* CreateSurface +( + struct NvKmsKapiDevice *device, + struct NvKmsKapiCreateSurfaceParams *params +) +{ + struct NvKmsRegisterSurfaceParams paramsReg = { }; + NvBool status; + + struct NvKmsKapiSurface *surface = NULL; + + enum NvKmsSurfaceMemoryLayout layout = NvKmsSurfaceMemoryLayoutPitch; + NvU32 log2GobsPerBlockY = 0; + NvU32 numPlanes = 0; + NvU32 pitch[NVKMS_MAX_PLANES_PER_SURFACE] = { 0 }; + NvU32 i; + + if (!GetSurfaceParams(params, + &numPlanes, + &layout, + &log2GobsPerBlockY, + pitch)) + { + goto failed; + } + + surface = nvKmsKapiCalloc(1, sizeof(*surface)); + + if (surface == NULL) { + nvKmsKapiLogDebug( + "Failed to allocate memory for NVKMS surface object on " + "NvKmsKapiDevice 0x%p", + device); + goto failed; + } + + if (device->hKmsDevice == 0x0) { + goto done; + } + + /* Create NVKMS surface */ + + paramsReg.request.deviceHandle = device->hKmsDevice; + + paramsReg.request.useFd = FALSE; + paramsReg.request.rmClient = device->hRmClient; + + paramsReg.request.widthInPixels = params->width; + paramsReg.request.heightInPixels = params->height; + + paramsReg.request.format = params->format; + + paramsReg.request.layout = layout; + paramsReg.request.log2GobsPerBlockY = log2GobsPerBlockY; + + for (i = 0; i < numPlanes; i++) { + struct NvKmsKapiMemory *memory = + params->planes[i].memory; + + paramsReg.request.planes[i].u.rmObject = memory->hRmHandle; + paramsReg.request.planes[i].rmObjectSizeInBytes = memory->size; + paramsReg.request.planes[i].offset = params->planes[i].offset; + paramsReg.request.planes[i].pitch = pitch[i]; + } + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_REGISTER_SURFACE, + ¶msReg, sizeof(paramsReg)); + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to register NVKMS surface of dimensions %ux%u " + "and %s format", + params->width, + params->height, + nvKmsSurfaceMemoryFormatToString(params->format)); + + goto failed; + } + + surface->hKmsHandle = paramsReg.reply.surfaceHandle; + +done: + return surface; + +failed: + nvKmsKapiFree(surface); + + return NULL; +} + +static void DestroySurface +( + struct NvKmsKapiDevice *device, struct NvKmsKapiSurface *surface +) +{ + struct NvKmsUnregisterSurfaceParams paramsUnreg = { }; + NvBool status; + + if (device->hKmsDevice == 0x0) { + goto done; + } + + paramsUnreg.request.deviceHandle = device->hKmsDevice; + paramsUnreg.request.surfaceHandle = surface->hKmsHandle; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_UNREGISTER_SURFACE, + ¶msUnreg, sizeof(paramsUnreg)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to unregister NVKMS surface registered for " + "NvKmsKapiSurface 0x%p", + surface); + } + +done: + nvKmsKapiFree(surface); +} + +/* + * Helper function to convert NvKmsMode to NvKmsKapiDisplayMode. + */ +static void NvKmsModeToKapi +( + const struct NvKmsMode *kmsMode, + struct NvKmsKapiDisplayMode *mode +) +{ + const NvModeTimings *timings = &kmsMode->timings; + + nvkms_memset(mode, 0, sizeof(*mode)); + + mode->timings.refreshRate = timings->RRx1k; + mode->timings.pixelClockHz = timings->pixelClockHz; + mode->timings.hVisible = timings->hVisible; + mode->timings.hSyncStart = timings->hSyncStart; + mode->timings.hSyncEnd = timings->hSyncEnd; + mode->timings.hTotal = timings->hTotal; + mode->timings.hSkew = timings->hSkew; + mode->timings.vVisible = timings->vVisible; + mode->timings.vSyncStart = timings->vSyncStart; + mode->timings.vSyncEnd = timings->vSyncEnd; + mode->timings.vTotal = timings->vTotal; + + mode->timings.flags.interlaced = timings->interlaced; + mode->timings.flags.doubleScan = timings->doubleScan; + mode->timings.flags.hSyncPos = timings->hSyncPos; + mode->timings.flags.hSyncNeg = timings->hSyncNeg; + mode->timings.flags.vSyncPos = timings->vSyncPos; + mode->timings.flags.vSyncNeg = timings->vSyncNeg; + + mode->timings.widthMM = timings->sizeMM.w; + mode->timings.heightMM = timings->sizeMM.h; + + ct_assert(sizeof(mode->name) == sizeof(kmsMode->name)); + + nvkms_memcpy(mode->name, kmsMode->name, sizeof(mode->name)); +} + +static void InitNvKmsModeValidationParams( + const struct NvKmsKapiDevice *device, + struct NvKmsModeValidationParams *params) +{ + /* + * Mode timings structures of KAPI clients may not have field like + * RRx1k, it does not guarantee that computed RRx1k value during + * conversion from - + * KAPI client's mode-timings structure + * -> NvKmsKapiDisplayMode -> NvModeTimings + * is same as what we get from edid, this may cause mode-set to fail. + * + * The RRx1k filed don't impact hardware modetiming values, therefore + * override RRx1k check. + * + * XXX NVKMS TODO: Bug 200156338 is filed to delete NvModeTimings::RRx1k + * if possible. + */ + params->overrides = NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK; +} + +static int GetDisplayMode +( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, NvU32 modeIndex, + struct NvKmsKapiDisplayMode *mode, NvBool *valid, + NvBool *preferredMode +) +{ + struct NvKmsValidateModeIndexParams paramsValidate = { }; + NvBool status; + + if (device == NULL) { + return -1; + } + + paramsValidate.request.deviceHandle = device->hKmsDevice; + paramsValidate.request.dispHandle = device->hKmsDisp; + + paramsValidate.request.dpyId = nvNvU32ToDpyId(display); + + InitNvKmsModeValidationParams(device, + ¶msValidate.request.modeValidation); + + paramsValidate.request.modeIndex = modeIndex; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_VALIDATE_MODE_INDEX, + ¶msValidate, sizeof(paramsValidate)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to get validated mode index 0x%x for NvKmsKapiDisplay 0x%08x", + modeIndex, display); + return -1; + } + + if (mode != NULL) { + NvKmsModeToKapi(¶msValidate.reply.mode, mode); + } + + + if (valid != NULL) { + *valid = paramsValidate.reply.valid; + } + + if (preferredMode != NULL) { + *preferredMode = paramsValidate.reply.preferredMode; + } + + return paramsValidate.reply.end ? 0 : 1; +} + +/* + * Helper function to convert NvKmsKapiDisplayMode to NvKmsMode. + */ +static void NvKmsKapiDisplayModeToKapi +( + const struct NvKmsKapiDisplayMode *mode, + struct NvKmsMode *kmsMode +) +{ + NvModeTimings *timings = &kmsMode->timings; + + nvkms_memset(kmsMode, 0, sizeof(*kmsMode)); + + nvkms_memcpy(kmsMode->name, mode->name, sizeof(mode->name)); + + timings->RRx1k = mode->timings.refreshRate; + timings->pixelClockHz = mode->timings.pixelClockHz; + timings->hVisible = mode->timings.hVisible; + timings->hSyncStart = mode->timings.hSyncStart; + timings->hSyncEnd = mode->timings.hSyncEnd; + timings->hTotal = mode->timings.hTotal; + timings->hSkew = mode->timings.hSkew; + timings->vVisible = mode->timings.vVisible; + timings->vSyncStart = mode->timings.vSyncStart; + timings->vSyncEnd = mode->timings.vSyncEnd; + timings->vTotal = mode->timings.vTotal; + + timings->interlaced = mode->timings.flags.interlaced; + timings->doubleScan = mode->timings.flags.doubleScan; + timings->hSyncPos = mode->timings.flags.hSyncPos; + timings->hSyncNeg = mode->timings.flags.hSyncNeg; + timings->vSyncPos = mode->timings.flags.vSyncPos; + timings->vSyncNeg = mode->timings.flags.vSyncNeg; + + timings->sizeMM.w = mode->timings.widthMM; + timings->sizeMM.h = mode->timings.heightMM; +} + +static NvBool ValidateDisplayMode +( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, const struct NvKmsKapiDisplayMode *mode +) +{ + struct NvKmsValidateModeParams paramsValidate; + NvBool status; + + if (device == NULL) { + return NV_FALSE; + } + + nvkms_memset(¶msValidate, 0, sizeof(paramsValidate)); + + paramsValidate.request.deviceHandle = device->hKmsDevice; + paramsValidate.request.dispHandle = device->hKmsDisp; + + paramsValidate.request.dpyId = nvNvU32ToDpyId(display); + + InitNvKmsModeValidationParams(device, + ¶msValidate.request.modeValidation); + + + NvKmsKapiDisplayModeToKapi(mode, ¶msValidate.request.mode); + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_VALIDATE_MODE, + ¶msValidate, sizeof(paramsValidate)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to get validated mode %ux%u@%uHz for NvKmsKapiDisplay 0x%08x of " + "NvKmsKapiDevice 0x%p", + mode->timings.hVisible, mode->timings.vVisible, + mode->timings.refreshRate/1000, display, + device); + return NV_FALSE; + } + + return paramsValidate.reply.valid; +} + +static NvBool AssignSyncObjectConfig( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiLayerConfig *pLayerConfig, + struct NvKmsChannelSyncObjects *pSyncObject, + NvBool bFromKmsSetMode) +{ + if (!device->supportsSyncpts || bFromKmsSetMode) { + if (pLayerConfig->syncptParams.preSyncptSpecified || + pLayerConfig->syncptParams.postSyncptRequested) { + return NV_FALSE; + } + } + + pSyncObject->useSyncpt = FALSE; + + if (pLayerConfig->syncptParams.preSyncptSpecified) { + pSyncObject->useSyncpt = TRUE; + + pSyncObject->u.syncpts.pre.type = NVKMS_SYNCPT_TYPE_RAW; + pSyncObject->u.syncpts.pre.u.raw.id = pLayerConfig->syncptParams.preSyncptId; + pSyncObject->u.syncpts.pre.u.raw.value = pLayerConfig->syncptParams.preSyncptValue; + } + + if (pLayerConfig->syncptParams.postSyncptRequested) { + pSyncObject->useSyncpt = TRUE; + + pSyncObject->u.syncpts.requestedPostType = NVKMS_SYNCPT_TYPE_FD; + } + return NV_TRUE; +} + +static void AssignHDRMetadataConfig( + const struct NvKmsKapiLayerConfig *layerConfig, + const NvU32 layer, + struct NvKmsFlipCommonParams *params) +{ + if (layerConfig->hdrMetadataSpecified) { + params->layer[layer].hdr.enabled = TRUE; + params->layer[layer].hdr.specified = TRUE; + params->layer[layer].hdr.staticMetadata = layerConfig->hdrMetadata; + } else { + params->layer[layer].hdr.enabled = FALSE; + params->layer[layer].hdr.specified = TRUE; + } +} + +static void NvKmsKapiCursorConfigToKms( + const struct NvKmsKapiCursorRequestedConfig *requestedConfig, + struct NvKmsFlipCommonParams *params, + NvBool bFromKmsSetMode) +{ + if (requestedConfig->flags.surfaceChanged || bFromKmsSetMode) { + params->cursor.imageSpecified = NV_TRUE; + + if (requestedConfig->surface != NULL) { + params->cursor.image.surfaceHandle[NVKMS_LEFT] = + requestedConfig->surface->hKmsHandle; + } + + params->cursor.image.cursorCompParams.colorKeySelect = + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE; + params->cursor.image.cursorCompParams.blendingMode[1] = + requestedConfig->compParams.compMode; + params->cursor.image.cursorCompParams.surfaceAlpha = + requestedConfig->compParams.surfaceAlpha; + } + + if (requestedConfig->flags.dstXYChanged || bFromKmsSetMode) { + params->cursor.position.x = requestedConfig->dstX; + params->cursor.position.y = requestedConfig->dstY; + + params->cursor.positionSpecified = NV_TRUE; + } +} + +static NvBool NvKmsKapiOverlayLayerConfigToKms( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, + const NvU32 layer, + const NvU32 head, + struct NvKmsFlipCommonParams *params, + NvBool commit, + NvBool bFromKmsSetMode) +{ + NvBool ret = NV_FALSE; + const struct NvKmsKapiLayerConfig *layerConfig = + &layerRequestedConfig->config; + + if (layerRequestedConfig->flags.surfaceChanged || bFromKmsSetMode) { + params->layer[layer].syncObjects.specified = NV_TRUE; + params->layer[layer].completionNotifier.specified = NV_TRUE; + params->layer[layer].surface.specified = NV_TRUE; + + if (layerConfig->surface != NULL) { + params->layer[layer].surface.handle[NVKMS_LEFT] = + layerConfig->surface->hKmsHandle; + } + + params->layer[layer].surface.rrParams = + layerConfig->rrParams; + + params->layer[layer].compositionParams.val.colorKeySelect = + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE; + params->layer[layer].compositionParams.val.blendingMode[1] = + layerConfig->compParams.compMode; + params->layer[layer].compositionParams.val.surfaceAlpha = + layerConfig->compParams.surfaceAlpha; + params->layer[layer].compositionParams.specified = TRUE; + params->layer[layer].minPresentInterval = + layerConfig->minPresentInterval; + } + + params->layer[layer].sizeIn.val.width = layerConfig->srcWidth; + params->layer[layer].sizeIn.val.height = layerConfig->srcHeight; + params->layer[layer].sizeIn.specified = TRUE; + + params->layer[layer].sizeOut.val.width = layerConfig->dstWidth; + params->layer[layer].sizeOut.val.height = layerConfig->dstHeight; + params->layer[layer].sizeOut.specified = TRUE; + + if (layerRequestedConfig->flags.dstXYChanged || bFromKmsSetMode) { + params->layer[layer].outputPosition.val.x = layerConfig->dstX; + params->layer[layer].outputPosition.val.y = layerConfig->dstY; + + params->layer[layer].outputPosition.specified = NV_TRUE; + } + + params->layer[layer].colorspace.val = layerConfig->inputColorSpace; + params->layer[layer].colorspace.specified = TRUE; + + AssignHDRMetadataConfig(layerConfig, layer, params); + + if (commit) { + NvU32 nextIndex = NVKMS_KAPI_INC_NOTIFIER_INDEX( + device->layerState[head][layer]. + currFlipNotifierIndex); + + if (layerConfig->surface != NULL) { + NvU32 nextIndexOffsetInBytes = + NVKMS_KAPI_NOTIFIER_OFFSET(head, + layer, nextIndex); + + params->layer[layer].completionNotifier.val. + surface.surfaceHandle = device->notifier.hKmsHandle; + + params->layer[layer].completionNotifier.val. + surface.format = device->notifier.format; + + params->layer[layer].completionNotifier.val. + surface.offsetInWords = nextIndexOffsetInBytes >> 2; + + params->layer[layer].completionNotifier.val.awaken = NV_TRUE; + } + + ret = AssignSyncObjectConfig(device, + layerConfig, + ¶ms->layer[layer].syncObjects.val, + bFromKmsSetMode); + if (ret == NV_FALSE) { + return ret; + } + + /* + * XXX Should this be done after commit? + * What if commit fail? + * + * It is not expected to fail any commit in KAPI layer, + * only validated configuration is expected + * to commit. + */ + device->layerState[head][layer]. + currFlipNotifierIndex = nextIndex; + } + + return NV_TRUE; +} + +static NvBool NvKmsKapiPrimaryLayerConfigToKms( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, + const NvU32 head, + struct NvKmsFlipCommonParams *params, + NvBool commit, + NvBool bFromKmsSetMode) +{ + NvBool ret = NV_FALSE; + const struct NvKmsKapiLayerConfig *layerConfig = + &layerRequestedConfig->config; + + NvBool changed = FALSE; + + if (layerRequestedConfig->flags.surfaceChanged || bFromKmsSetMode) { + params->layer[NVKMS_MAIN_LAYER].surface.specified = NV_TRUE; + params->layer[NVKMS_MAIN_LAYER].completionNotifier.specified = NV_TRUE; + params->layer[NVKMS_MAIN_LAYER].syncObjects.specified = NV_TRUE; + + + params->layer[NVKMS_MAIN_LAYER].minPresentInterval = + layerConfig->minPresentInterval; + params->layer[NVKMS_MAIN_LAYER].tearing = layerConfig->tearing; + params->layer[NVKMS_MAIN_LAYER].surface.rrParams = layerConfig->rrParams; + + if (layerConfig->surface != NULL) { + params->layer[NVKMS_MAIN_LAYER].surface.handle[0] = + layerConfig->surface->hKmsHandle; + + if (params->layer[NVKMS_MAIN_LAYER].surface.handle[0] != 0) { + params->layer[NVKMS_MAIN_LAYER].sizeIn.val.width = layerConfig->srcWidth; + params->layer[NVKMS_MAIN_LAYER].sizeIn.val.height = layerConfig->srcHeight; + params->layer[NVKMS_MAIN_LAYER].sizeIn.specified = TRUE; + + params->layer[NVKMS_MAIN_LAYER].sizeOut.val.width = layerConfig->dstWidth; + params->layer[NVKMS_MAIN_LAYER].sizeOut.val.height = layerConfig->dstHeight; + params->layer[NVKMS_MAIN_LAYER].sizeOut.specified = TRUE; + } + } + + changed = TRUE; + } + + if (layerRequestedConfig->flags.srcXYChanged || bFromKmsSetMode) { + params->viewPortIn.point.x = layerConfig->srcX; + params->viewPortIn.point.y = layerConfig->srcY; + params->viewPortIn.specified = NV_TRUE; + + changed = TRUE; + } + + params->layer[NVKMS_MAIN_LAYER].colorspace.val = layerConfig->inputColorSpace; + params->layer[NVKMS_MAIN_LAYER].colorspace.specified = TRUE; + + AssignHDRMetadataConfig(layerConfig, NVKMS_MAIN_LAYER, params); + + if (commit && changed) { + NvU32 nextIndex = NVKMS_KAPI_INC_NOTIFIER_INDEX( + device->layerState[head][NVKMS_MAIN_LAYER]. + currFlipNotifierIndex); + + if (layerConfig->surface != NULL) { + NvU32 nextIndexOffsetInBytes = + NVKMS_KAPI_NOTIFIER_OFFSET(head, + NVKMS_MAIN_LAYER, nextIndex); + + params->layer[NVKMS_MAIN_LAYER].completionNotifier. + val.surface.surfaceHandle = device->notifier.hKmsHandle; + + params->layer[NVKMS_MAIN_LAYER].completionNotifier. + val.surface.format = device->notifier.format; + + params->layer[NVKMS_MAIN_LAYER].completionNotifier. + val.surface.offsetInWords = nextIndexOffsetInBytes >> 2; + + params->layer[NVKMS_MAIN_LAYER].completionNotifier.val.awaken = NV_TRUE; + } + + ret = AssignSyncObjectConfig(device, + layerConfig, + ¶ms->layer[NVKMS_MAIN_LAYER].syncObjects.val, + bFromKmsSetMode); + if (ret == NV_FALSE) { + return ret; + } + + /* + * XXX Should this be done after commit? + * What if commit fail? + * + * It is not expected to fail any commit in KAPI layer, + * only validated configuration is expected + * to commit. + */ + device->layerState[head][NVKMS_MAIN_LAYER]. + currFlipNotifierIndex = nextIndex; + } + + return NV_TRUE; +} + +static NvBool NvKmsKapiLayerConfigToKms( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, + const NvU32 layer, + const NvU32 head, + struct NvKmsFlipCommonParams *params, + NvBool commit, + NvBool bFromKmsSetMode) +{ + if (layer == NVKMS_KAPI_LAYER_PRIMARY_IDX) { + return NvKmsKapiPrimaryLayerConfigToKms(device, + layerRequestedConfig, + head, + params, + commit, + bFromKmsSetMode); + + } + + return NvKmsKapiOverlayLayerConfigToKms(device, + layerRequestedConfig, + layer, + head, + params, + commit, + bFromKmsSetMode); +} + +static NvBool GetOutputTransferFunction( + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig, + enum NvKmsOutputTf *tf) +{ + NvBool found = NV_FALSE; + NvU32 layer; + + *tf = NVKMS_OUTPUT_TF_NONE; + + for (layer = 0; + layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); + layer++) { + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[layer]; + const struct NvKmsKapiLayerConfig *layerConfig = + &layerRequestedConfig->config; + + if (layerConfig->hdrMetadataSpecified) { + if (!found) { + *tf = layerConfig->tf; + found = NV_TRUE; + } else if (*tf != layerConfig->tf) { + nvKmsKapiLogDebug( + "Output transfer function should be the same for all layers on a head"); + return NV_FALSE; + } + } + } + + return NV_TRUE; +} + +/* + * Helper function to convert NvKmsKapiRequestedModeSetConfig + * to NvKmsSetModeParams. + */ +static NvBool NvKmsKapiRequestedModeSetConfigToKms( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsSetModeParams *params, + NvBool commit) +{ + NvU32 dispIdx = device->dispIdx; + NvU32 head; + + nvkms_memset(params, 0, sizeof(*params)); + + params->request.commit = commit; + params->request.deviceHandle = device->hKmsDevice; + params->request.requestedDispsBitMask = 1 << dispIdx; + + for (head = 0; + head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) { + + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = + &requestedConfig->headRequestedConfig[head]; + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = + &headRequestedConfig->modeSetConfig; + struct NvKmsSetModeOneHeadRequest *paramsHead; + enum NvKmsOutputTf tf; + NvU32 layer; + NvU32 i; + + if ((requestedConfig->headsMask & (1 << head)) == 0x0) { + continue; + } + + params->request.disp[dispIdx].requestedHeadsBitMask |= 1 << head; + + if (headModeSetConfig->numDisplays == 0) { + continue; + } + + if (params->request.commit && !headModeSetConfig->bActive) { + continue; + } + + paramsHead = ¶ms->request.disp[dispIdx].head[head]; + + InitNvKmsModeValidationParams(device, + ¶msHead->modeValidationParams); + + for (i = 0; i < headModeSetConfig->numDisplays; i++) { + paramsHead->dpyIdList = nvAddDpyIdToDpyIdList( + nvNvU32ToDpyId(headModeSetConfig->displays[i]), + paramsHead->dpyIdList); + } + + NvKmsKapiDisplayModeToKapi(&headModeSetConfig->mode, ¶msHead->mode); + + NvKmsKapiCursorConfigToKms(&headRequestedConfig->cursorRequestedConfig, + ¶msHead->flip, + NV_TRUE /* bFromKmsSetMode */); + for (layer = 0; + layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); + layer++) { + + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[layer]; + + if (!NvKmsKapiLayerConfigToKms(device, + layerRequestedConfig, + layer, + head, + ¶msHead->flip, + commit, + NV_TRUE /* bFromKmsSetMode */)) { + return NV_FALSE; + } + } + + if (!GetOutputTransferFunction(headRequestedConfig, &tf)) { + return NV_FALSE; + } + + paramsHead->flip.tf.val = tf; + paramsHead->flip.tf.specified = NV_TRUE; + + paramsHead->viewPortSizeIn.width = + headModeSetConfig->mode.timings.hVisible; + paramsHead->viewPortSizeIn.height = + headModeSetConfig->mode.timings.vVisible; + } + + return NV_TRUE; +} + + +static NvBool KmsSetMode( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + const NvBool commit) +{ + struct NvKmsSetModeParams *params = NULL; + NvBool status = NV_FALSE; + + params = nvKmsKapiCalloc(1, sizeof(*params)); + + if (params == NULL) { + goto done; + } + + if (!NvKmsKapiRequestedModeSetConfigToKms(device, + requestedConfig, + params, + commit)) { + goto done; + } + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_SET_MODE, + params, sizeof(*params)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_SET_MODE ioctl failed"); + goto done; + } + + if (params->reply.status != NVKMS_SET_MODE_STATUS_SUCCESS) + { + int i; + + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_SET_MODE failed! Status:\n"); + + nvKmsKapiLogDeviceDebug( + device, + " top-level status: %d\n", params->reply.status); + + nvKmsKapiLogDeviceDebug( + device, + " disp0 status: %d\n", params->reply.disp[0].status); + + for (i = 0; i < ARRAY_LEN(params->reply.disp[0].head); i++) + { + nvKmsKapiLogDeviceDebug( + device, + " head%d status: %d\n", + i, params->reply.disp[0].head[i].status); + } + + status = NV_FALSE; + } + +done: + + if (params != NULL) { + nvKmsKapiFree(params); + } + + return status; +} + +static NvBool IsHeadConfigValid( + const struct NvKmsFlipParams *params, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig, + NvU32 head) +{ + if ((requestedConfig->headsMask & (1 << head)) == 0x0) { + return NV_FALSE; + } + + if (headModeSetConfig->numDisplays == 0) { + return NV_FALSE; + } + + if (params->request.commit && !headModeSetConfig->bActive) { + return NV_FALSE; + } + return NV_TRUE; +} + +static NvBool KmsFlip( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsKapiModeSetReplyConfig *replyConfig, + const NvBool commit) +{ + NvBool bChanged = NV_FALSE; + struct NvKmsFlipParams *params = NULL; + NvBool status = NV_TRUE; + NvU32 i; + + params = nvKmsKapiCalloc(1, sizeof(*params)); + + if (params == NULL) { + return NV_FALSE; + } + + params->request.deviceHandle = device->hKmsDevice; + params->request.commit = commit; + + for (i = 0; i < ARRAY_LEN(params->request.sd); i++) { + struct NvKmsFlipRequestOneSubDevice *sdParams = ¶ms->request.sd[i]; + NvU32 head; + + if ((device->subDeviceMask & (1 << i)) == 0x0) { + continue; + } + + for (head = 0; + head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) { + + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = + &requestedConfig->headRequestedConfig[head]; + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = + &headRequestedConfig->modeSetConfig; + enum NvKmsOutputTf tf; + + struct NvKmsFlipCommonParams *flipParams = &sdParams->head[head]; + + NvU32 layer; + + if (!IsHeadConfigValid(params, requestedConfig, headModeSetConfig, head)) { + continue; + } + + sdParams->requestedHeadsBitMask |= 1 << head; + + NvKmsKapiCursorConfigToKms(&headRequestedConfig->cursorRequestedConfig, + flipParams, + NV_FALSE /* bFromKmsSetMode */); + + for (layer = 0; + layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); + layer++) { + + const struct NvKmsKapiLayerRequestedConfig + *layerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[layer]; + + status = NvKmsKapiLayerConfigToKms(device, + layerRequestedConfig, + layer, + head, + flipParams, + commit, + NV_FALSE /* bFromKmsSetMode */); + + if (status != NV_TRUE) { + goto done; + } + + bChanged = NV_TRUE; + } + + status = GetOutputTransferFunction(headRequestedConfig, &tf); + if (status != NV_TRUE) { + goto done; + } + + flipParams->tf.val = tf; + flipParams->tf.specified = NV_TRUE; + } + } + + if (!bChanged) { + goto done; + } + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_FLIP, + params, sizeof(*params)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_FLIP ioctl failed"); + goto done; + } + + if (!bChanged || !commit) { + goto done; + } + + /*! fill back flip reply */ + for (i = 0; i < ARRAY_LEN(params->request.sd); i++) { + + struct NvKmsFlipReplyOneSubDevice *sdReplyParams = ¶ms->reply.sd[i]; + + NvU32 head; + + if ((device->subDeviceMask & (1 << i)) == 0x0) { + continue; + } + + for (head = 0; + head < ARRAY_LEN(requestedConfig->headRequestedConfig); + head++) { + + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = + &requestedConfig->headRequestedConfig[head]; + + struct NvKmsKapiHeadReplyConfig *headReplyConfig = + &replyConfig->headReplyConfig[head]; + + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = + &headRequestedConfig->modeSetConfig; + + struct NvKmsFlipCommonReplyOneHead *flipParams = &sdReplyParams->head[head]; + + NvU32 layer; + + if (!IsHeadConfigValid(params, requestedConfig, headModeSetConfig, head)) { + continue; + } + + for (layer = 0; + layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); + layer++) { + + const struct NvKmsKapiLayerConfig *layerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[layer].config; + + struct NvKmsKapiLayerReplyConfig *layerReplyConfig = + &headReplyConfig->layerReplyConfig[layer]; + + /*! initialize explicitly to -1 as 0 is valid file descriptor */ + layerReplyConfig->postSyncptFd = -1; + if (layerRequestedConfig->syncptParams.postSyncptRequested) { + layerReplyConfig->postSyncptFd = + flipParams->layer[layer].postSyncpt.u.fd; + } + } + } + } + +done: + + nvKmsKapiFree(params); + + return status; +} + +static NvBool ApplyModeSetConfig( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsKapiModeSetReplyConfig *replyConfig, + const NvBool commit) +{ + NvBool bRequiredModeset = NV_FALSE; + NvU32 head; + + if (device == NULL || requestedConfig == NULL) { + return NV_FALSE; + } + + for (head = 0; + head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) { + + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = + &requestedConfig->headRequestedConfig[head]; + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = + &headRequestedConfig->modeSetConfig; + + const struct NvKmsKapiLayerRequestedConfig *primaryLayerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[NVKMS_KAPI_LAYER_PRIMARY_IDX]; + + if ((requestedConfig->headsMask & (1 << head)) == 0x0) { + continue; + } + + /* + * Source width/height of primary layer represents width/height of + * ViewPortIn. Destination X, Y, width and height of primary layer + * represents dimensions of ViewPortOut. To apply changes in + * width/height of ViewPortIn and/or changes dimensions of + * ViewPortOut requires full modeset. + */ + + bRequiredModeset = + primaryLayerRequestedConfig->flags.srcWHChanged || + primaryLayerRequestedConfig->flags.dstXYChanged || + primaryLayerRequestedConfig->flags.dstWHChanged || + headRequestedConfig->flags.activeChanged || + headRequestedConfig->flags.displaysChanged || + headRequestedConfig->flags.modeChanged; + + /* + * NVKMS flip ioctl could not validate flip configuration for an + * inactive head, therefore use modeset ioctl if configuration contain + * any such head. + */ + if (!commit && + headModeSetConfig->numDisplays != 0 && !headModeSetConfig->bActive) { + bRequiredModeset = TRUE; + } + + if (bRequiredModeset) { + break; + } + } + + if (bRequiredModeset) { + return KmsSetMode(device, requestedConfig, commit); + } + + return KmsFlip(device, requestedConfig, replyConfig, commit); +} + +void nvKmsKapiHandleEventQueueChange +( + struct NvKmsKapiDevice *device +) +{ + if (device == NULL) { + return; + } + + /* + * If the callback is NULL, event interest declaration should be + * rejected, and no events would be reported. + */ + nvAssert(device->eventCallback != NULL); + + do + { + struct NvKmsGetNextEventParams kmsEventParams = { }; + struct NvKmsKapiEvent kapiEvent = { }; + NvBool err = NV_FALSE; + + if (!nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_GET_NEXT_EVENT, + &kmsEventParams, sizeof(kmsEventParams))) { + break; + } + + if (!kmsEventParams.reply.valid) { + break; + } + + kapiEvent.type = kmsEventParams.reply.event.eventType; + + kapiEvent.device = device; + kapiEvent.privateData = device->privateData; + + switch (kmsEventParams.reply.event.eventType) { + case NVKMS_EVENT_TYPE_DPY_CHANGED: + kapiEvent.u.displayChanged.display = + nvDpyIdToNvU32(kmsEventParams. + reply.event.u.dpyChanged.dpyId); + break; + case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED: + kapiEvent.u.dynamicDisplayConnected.display = + nvDpyIdToNvU32(kmsEventParams. + reply.event.u.dynamicDpyConnected.dpyId); + break; + case NVKMS_EVENT_TYPE_FLIP_OCCURRED: + kapiEvent.u.flipOccurred.head = + kmsEventParams.reply.event.u.flipOccurred.head; + kapiEvent.u.flipOccurred.layer = + kmsEventParams.reply.event.u.flipOccurred.layer; + break; + default: + continue; + } + + if (err) { + nvKmsKapiLogDeviceDebug( + device, + "Error in conversion from " + "NvKmsGetNextEventParams to NvKmsKapiEvent"); + continue; + } + + device->eventCallback(&kapiEvent); + + } while(1); +} + +/* + * Helper function to convert NvKmsQueryDpyCRC32Reply to NvKmsKapiDpyCRC32. + */ +static void NvKmsCrcsToKapi +( + const struct NvKmsQueryDpyCRC32Reply *crcs, + struct NvKmsKapiCrcs *kmsCrcs +) +{ + kmsCrcs->outputCrc32.value = crcs->outputCrc32.value; + kmsCrcs->outputCrc32.supported = crcs->outputCrc32.supported; + kmsCrcs->rasterGeneratorCrc32.value = crcs->rasterGeneratorCrc32.value; + kmsCrcs->rasterGeneratorCrc32.supported = crcs->rasterGeneratorCrc32.supported; + kmsCrcs->compositorCrc32.value = crcs->compositorCrc32.value; + kmsCrcs->compositorCrc32.supported = crcs->compositorCrc32.supported; +} + +static NvBool GetCRC32 +( + struct NvKmsKapiDevice *device, + NvU32 head, + struct NvKmsKapiCrcs *crc32 +) +{ + struct NvKmsQueryDpyCRC32Params params = { }; + NvBool status; + + if (device->hKmsDevice == 0x0) { + return NV_TRUE; + } + + params.request.deviceHandle = device->hKmsDevice; + params.request.dispHandle = device->hKmsDisp; + params.request.head = head; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DPY_CRC32, + ¶ms, sizeof(params)); + + if (!status) { + nvKmsKapiLogDeviceDebug(device, "NVKMS QueryDpyCRC32Data failed."); + return NV_FALSE; + } + NvKmsCrcsToKapi(¶ms.reply, crc32); + return NV_TRUE; +} + +NvBool nvKmsKapiGetFunctionsTableInternal +( + struct NvKmsKapiFunctionsTable *funcsTable +) +{ + if (funcsTable == NULL) { + return NV_FALSE; + } + + if (nvkms_strcmp(funcsTable->versionString, NV_VERSION_STRING) != 0) { + funcsTable->versionString = NV_VERSION_STRING; + return NV_FALSE; + } + + funcsTable->systemInfo.bAllowWriteCombining = + nvkms_allow_write_combining(); + + funcsTable->enumerateGpus = EnumerateGpus; + + funcsTable->allocateDevice = AllocateDevice; + funcsTable->freeDevice = FreeDevice; + + funcsTable->grabOwnership = GrabOwnership; + funcsTable->releaseOwnership = ReleaseOwnership; + + funcsTable->declareEventInterest = DeclareEventInterest; + + funcsTable->getDeviceResourcesInfo = GetDeviceResourcesInfo; + funcsTable->getDisplays = GetDisplays; + funcsTable->getConnectorInfo = GetConnectorInfo; + + funcsTable->getStaticDisplayInfo = GetStaticDisplayInfo; + funcsTable->getDynamicDisplayInfo = GetDynamicDisplayInfo; + + funcsTable->allocateVideoMemory = AllocateVideoMemory; + funcsTable->allocateSystemMemory = AllocateSystemMemory; + funcsTable->importMemory = ImportMemory; + funcsTable->dupMemory = DupMemory; + funcsTable->exportMemory = ExportMemory; + funcsTable->freeMemory = FreeMemory; + funcsTable->getSystemMemoryHandleFromSgt = GetSystemMemoryHandleFromSgt; + funcsTable->getSystemMemoryHandleFromDmaBuf = + GetSystemMemoryHandleFromDmaBuf; + + funcsTable->mapMemory = MapMemory; + funcsTable->unmapMemory = UnmapMemory; + + funcsTable->createSurface = CreateSurface; + funcsTable->destroySurface = DestroySurface; + + funcsTable->getDisplayMode = GetDisplayMode; + funcsTable->validateDisplayMode = ValidateDisplayMode; + + funcsTable->applyModeSetConfig = ApplyModeSetConfig; + + funcsTable->allocateChannelEvent = nvKmsKapiAllocateChannelEvent; + funcsTable->freeChannelEvent = nvKmsKapiFreeChannelEvent; + + funcsTable->getCRC32 = GetCRC32; + + funcsTable->getMemoryPages = GetMemoryPages; + funcsTable->freeMemoryPages = FreeMemoryPages; + + return NV_TRUE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-format.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-format.c new file mode 100644 index 0000000..55901cf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-format.c @@ -0,0 +1,132 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-format.h" +#include "nv_common_utils.h" +#include "nvctassert.h" + +#include + +#define RGB_ENTRY(_format, _depth, _bytesPerPixel) \ + [NvKmsSurfaceMemoryFormat##_format] = { \ + .format = NvKmsSurfaceMemoryFormat##_format, \ + .name = #_format, \ + .depth = _depth, \ + .isYUV = NV_FALSE, \ + .numPlanes = 1, \ + { \ + .rgb = { \ + .bytesPerPixel = _bytesPerPixel, \ + .bitsPerPixel = _bytesPerPixel * 8, \ + }, \ + }, \ + } + +#define YUV_ENTRY(_format, \ + _depth, \ + _numPlanes, \ + _depthPerComponent, \ + _storageBitsPerComponent, \ + _horizChromaDecimationFactor, \ + _vertChromaDecimationFactor) \ + [NvKmsSurfaceMemoryFormat##_format] = { \ + .format = NvKmsSurfaceMemoryFormat##_format, \ + .name = #_format, \ + .depth = _depth, \ + .isYUV = NV_TRUE, \ + .numPlanes = _numPlanes, \ + { \ + .yuv = { \ + .depthPerComponent = _depthPerComponent, \ + .storageBitsPerComponent = _storageBitsPerComponent, \ + .horizChromaDecimationFactor = _horizChromaDecimationFactor, \ + .vertChromaDecimationFactor = _vertChromaDecimationFactor, \ + }, \ + }, \ + } + +static const NvKmsSurfaceMemoryFormatInfo nvKmsEmptyFormatInfo; + +/* + * For 10/12-bit YUV formats, each component is packed in a 16-bit container in + * memory, and fetched by display HW as such. + */ +static const NvKmsSurfaceMemoryFormatInfo nvKmsSurfaceMemoryFormatInfo[] = { + RGB_ENTRY(I8, 8, 1), + RGB_ENTRY(A1R5G5B5, 16, 2), + RGB_ENTRY(X1R5G5B5, 15, 2), + RGB_ENTRY(R5G6B5, 16, 2), + RGB_ENTRY(A8R8G8B8, 32, 4), + RGB_ENTRY(X8R8G8B8, 24, 4), + RGB_ENTRY(A2B10G10R10, 32, 4), + RGB_ENTRY(X2B10G10R10, 30, 4), + RGB_ENTRY(A8B8G8R8, 32, 4), + RGB_ENTRY(X8B8G8R8, 24, 4), + RGB_ENTRY(RF16GF16BF16AF16, 64, 8), + RGB_ENTRY(R16G16B16A16, 64, 8), + RGB_ENTRY(RF32GF32BF32AF32, 128, 16), + YUV_ENTRY(Y8_U8__Y8_V8_N422, 16, 1, 8, 8, 2, 1), + YUV_ENTRY(U8_Y8__V8_Y8_N422, 16, 1, 8, 8, 2, 1), + YUV_ENTRY(Y8___U8V8_N444, 24, 2, 8, 8, 1, 1), + YUV_ENTRY(Y8___V8U8_N444, 24, 2, 8, 8, 1, 1), + YUV_ENTRY(Y8___U8V8_N422, 16, 2, 8, 8, 2, 1), + YUV_ENTRY(Y8___V8U8_N422, 16, 2, 8, 8, 2, 1), + YUV_ENTRY(Y8___U8V8_N420, 12, 2, 8, 8, 2, 2), + YUV_ENTRY(Y8___V8U8_N420, 12, 2, 8, 8, 2, 2), + YUV_ENTRY(Y10___U10V10_N444, 30, 2, 10, 16, 1, 1), + YUV_ENTRY(Y10___V10U10_N444, 30, 2, 10, 16, 1, 1), + YUV_ENTRY(Y10___U10V10_N422, 20, 2, 10, 16, 2, 1), + YUV_ENTRY(Y10___V10U10_N422, 20, 2, 10, 16, 2, 1), + YUV_ENTRY(Y10___U10V10_N420, 15, 2, 10, 16, 2, 2), + YUV_ENTRY(Y10___V10U10_N420, 15, 2, 10, 16, 2, 2), + YUV_ENTRY(Y12___U12V12_N444, 36, 2, 12, 16, 1, 1), + YUV_ENTRY(Y12___V12U12_N444, 36, 2, 12, 16, 1, 1), + YUV_ENTRY(Y12___U12V12_N422, 24, 2, 12, 16, 2, 1), + YUV_ENTRY(Y12___V12U12_N422, 24, 2, 12, 16, 2, 1), + YUV_ENTRY(Y12___U12V12_N420, 18, 2, 12, 16, 2, 2), + YUV_ENTRY(Y12___V12U12_N420, 18, 2, 12, 16, 2, 2), + YUV_ENTRY(Y8___U8___V8_N444, 24, 3, 8, 8, 1, 1), + YUV_ENTRY(Y8___U8___V8_N420, 12, 3, 8, 8, 2, 2), +}; + +ct_assert(ARRAY_LEN(nvKmsSurfaceMemoryFormatInfo) == + (NvKmsSurfaceMemoryFormatMax + 1)); + +const NvKmsSurfaceMemoryFormatInfo *nvKmsGetSurfaceMemoryFormatInfo( + const enum NvKmsSurfaceMemoryFormat format) +{ + if (format >= ARRAY_LEN(nvKmsSurfaceMemoryFormatInfo)) { + return &nvKmsEmptyFormatInfo; + } + + return &nvKmsSurfaceMemoryFormatInfo[format]; +} + +const char *nvKmsSurfaceMemoryFormatToString( + const enum NvKmsSurfaceMemoryFormat format) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(format); + + return (pFormatInfo != NULL) ? pFormatInfo->name : NULL; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-sync.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-sync.c new file mode 100644 index 0000000..928fde8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-sync.c @@ -0,0 +1,377 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include /* NV_DISP_BASE_NOTIFIER_1, NV_DISP_NOTIFICATION_2 */ +#include /* NV_DISP_NOTIFIER */ + +/* + * HW will never write 1 to lower 32bits of timestamp + */ +#define NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID 1 + +/* + * Higher 32bits of timestamp will be 0 only during first ~4sec of + * boot. So for practical purposes, we can consider 0 as invalid. + */ +#define NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID 0 + +static void GetNotifierTimeStamp(volatile const NvU32 *notif, + NvU32 timeStampLoIdx, + NvU32 timeStampHiIdx, + struct nvKmsParsedNotifier *out) +{ + NvU32 lo, hi; + NvU32 pollCount = 0; + + /* + * Caller of ParseNotifier() is expected to poll for notifier + * status to become BEGUN/FINISHED for valid timestamp. + */ + if (out->status == NVKMS_NOTIFIER_STATUS_NOT_BEGUN) { + return; + } + + /* + * HW does 4B writes to notifier, so poll till both timestampLo + * and timestampHi bytes become valid. + */ + do { + lo = notif[timeStampLoIdx]; + hi = notif[timeStampHiIdx]; + + if ((lo != NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID) && + (hi != NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID)) { + out->timeStamp = (NvU64)lo | ((NvU64)hi << 32); + out->timeStampValid = NV_TRUE; + break; + } + + if (++pollCount >= 100) { + break; + } + } while (1); +} + +static void ResetNotifierLegacy(NvBool overlay, volatile void *in) +{ + volatile NvU32 *notif = in; + + if (overlay) { + notif[NV_DISP_NOTIFICATION_2_INFO16_3] = + DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _NOT_BEGUN); + + notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_0] = + NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID; + } else { + notif[NV_DISP_BASE_NOTIFIER_1__0] = + DRF_DEF(_DISP, _BASE_NOTIFIER_1__0, _STATUS, _NOT_BEGUN); + } +} + +static void ResetNotifierFourWord(volatile void *in) +{ + volatile NvU32 *notif = in; + + notif[NV_DISP_NOTIFICATION_2_INFO16_3] = + DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _NOT_BEGUN); + + notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_0] = + NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID; +} + +static void ResetNotifierFourWordNVDisplay(volatile void *in) +{ + volatile NvU32 *notif = in; + + notif[NV_DISP_NOTIFIER__0] = + DRF_DEF(_DISP, _NOTIFIER__0, _STATUS, _NOT_BEGUN); + + notif[NV_DISP_NOTIFIER__2] = + NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID; +} + +void nvKmsResetNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, void *base) +{ + const NvU32 sizeInBytes = nvKmsSizeOfNotifier(format, overlay); + void *notif = + (void *)((char *)base + (sizeInBytes * index)); + + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + ResetNotifierLegacy(overlay, notif); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD: + ResetNotifierFourWord(notif); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + ResetNotifierFourWordNVDisplay(notif); + break; + } +} + +static void ParseNotifierLegacy(NvBool overlay, volatile const void *in, + struct nvKmsParsedNotifier *out) +{ + volatile const NvU32 *notif = in; + + if (overlay) { + NvU32 notif3; + + /* Read this once since it may be in video memory and we need multiple + * fields */ + notif3 = notif[NV_DISP_NOTIFICATION_2_INFO16_3]; + + switch(DRF_VAL(_DISP, _NOTIFICATION_2__3, _STATUS, notif3)) { + case NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN; + break; + case NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_BEGUN; + break; + case NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED: + out->status = NVKMS_NOTIFIER_STATUS_FINISHED; + break; + } + + out->presentCount = + DRF_VAL(_DISP, _NOTIFICATION_2_INFO16_3, _PRESENT_COUNT, notif3); + + GetNotifierTimeStamp(notif, + NV_DISP_NOTIFICATION_2_TIME_STAMP_0, + NV_DISP_NOTIFICATION_2_TIME_STAMP_1, + out); + } else { + NvU32 notif0; + + /* There's a timestamp available in this notifier, but it's a weird + * 14-bit "audit timestamp" that's not useful for us. */ + out->timeStampValid = NV_FALSE; + + /* Read this once since it may be in video memory and we need multiple + * fields */ + notif0 = notif[NV_DISP_BASE_NOTIFIER_1__0]; + + switch(DRF_VAL(_DISP, _BASE_NOTIFIER_1__0, _STATUS, notif0)) { + case NV_DISP_BASE_NOTIFIER_1__0_STATUS_NOT_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN; + break; + case NV_DISP_BASE_NOTIFIER_1__0_STATUS_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_BEGUN; + break; + case NV_DISP_BASE_NOTIFIER_1__0_STATUS_FINISHED: + out->status = NVKMS_NOTIFIER_STATUS_FINISHED; + break; + } + + out->presentCount = + DRF_VAL(_DISP, _BASE_NOTIFIER_1__0, _PRESENTATION_COUNT, notif0); + } +} + +static void ParseNotifierFourWord(const void *in, + struct nvKmsParsedNotifier *out) +{ + volatile const NvU32 *notif = in; + NvU32 notif3; + + /* Read this once since it may be in video memory and we need multiple + * fields */ + notif3 = notif[NV_DISP_NOTIFICATION_2_INFO16_3]; + + switch(DRF_VAL(_DISP, _NOTIFICATION_2__3, _STATUS, notif3)) { + case NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN; + break; + case NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_BEGUN; + break; + case NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED: + out->status = NVKMS_NOTIFIER_STATUS_FINISHED; + break; + } + + out->presentCount = + DRF_VAL(_DISP, _NOTIFICATION_2_INFO16_3, _PRESENT_COUNT, notif3); + + GetNotifierTimeStamp(notif, + NV_DISP_NOTIFICATION_2_TIME_STAMP_0, + NV_DISP_NOTIFICATION_2_TIME_STAMP_1, + out); +} + +static void ParseNotifierFourWordNVDisplay(const void *in, + struct nvKmsParsedNotifier *out) +{ + volatile const NvU32 *notif = in; + NvU32 notif0; + + /* Read this once since it may be in video memory and we need multiple + * fields */ + notif0 = notif[NV_DISP_NOTIFIER__0]; + + switch(DRF_VAL(_DISP, _NOTIFIER__0, _STATUS, notif0)) { + case NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN; + break; + case NV_DISP_NOTIFIER__0_STATUS_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_BEGUN; + break; + case NV_DISP_NOTIFIER__0_STATUS_FINISHED: + out->status = NVKMS_NOTIFIER_STATUS_FINISHED; + break; + } + + out->presentCount = + DRF_VAL(_DISP, _NOTIFIER__0, _PRESENT_COUNT, notif0); + + GetNotifierTimeStamp(notif, + NV_DISP_NOTIFIER__2, + NV_DISP_NOTIFIER__3, + out); +} + +void nvKmsParseNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, const void *base, + struct nvKmsParsedNotifier *out) +{ + const NvU32 sizeInBytes = nvKmsSizeOfNotifier(format, overlay); + const void *notif = + (const void *)((const char *)base + (sizeInBytes * index)); + + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + ParseNotifierLegacy(overlay, notif, out); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD: + ParseNotifierFourWord(notif, out); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + ParseNotifierFourWordNVDisplay(notif, out); + break; + } +} + +NvU32 nvKmsSemaphorePayloadOffset(enum NvKmsNIsoFormat format) +{ + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + return 0; + case NVKMS_NISO_FORMAT_FOUR_WORD: + return NV_DISP_NOTIFICATION_2_INFO32_2; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + return NV_DISP_NOTIFIER__0; + } + + return 0; +} + +static void ResetSemaphoreLegacy(volatile void *in, NvU32 payload) +{ + volatile NvU32 *sema = in; + + *sema = payload; +} + +static void ResetSemaphoreFourWord(volatile void *in, NvU32 payload) +{ + volatile NvU32 *sema = in; + + sema[NV_DISP_NOTIFICATION_2_INFO32_2] = payload; +} + +static void ResetSemaphoreFourWordNVDisplay(volatile void *in, NvU32 payload) +{ + volatile NvU32 *sema = in; + + sema[NV_DISP_NOTIFIER__0] = payload; +} + +void nvKmsResetSemaphore(enum NvKmsNIsoFormat format, + NvU32 index, void *base, + NvU32 payload) +{ + const NvU32 sizeInBytes = nvKmsSizeOfSemaphore(format); + void *sema = + (void *)((char *)base + (sizeInBytes * index)); + + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + ResetSemaphoreLegacy(sema, payload); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD: + ResetSemaphoreFourWord(sema, payload); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + ResetSemaphoreFourWordNVDisplay(sema, payload); + break; + } +} + +static NvU32 ParseSemaphoreLegacy(const volatile void *in) +{ + const volatile NvU32 *sema = in; + + return *sema; +} + +static NvU32 ParseSemaphoreFourWord(const volatile void *in) +{ + const volatile NvU32 *sema = in; + + return sema[NV_DISP_NOTIFICATION_2_INFO32_2]; +} + +static NvU32 ParseSemaphoreFourWordNVDisplay(const volatile void *in) +{ + const volatile NvU32 *sema = in; + + return sema[NV_DISP_NOTIFIER__0]; +} + +void nvKmsParseSemaphore(enum NvKmsNIsoFormat format, + NvU32 index, const void *base, + struct nvKmsParsedSemaphore *out) +{ + const NvU32 sizeInBytes = nvKmsSizeOfSemaphore(format); + const void *sema = + (const void *)((const char *)base + (sizeInBytes * index)); + NvU32 payload = 0; + + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + payload = ParseSemaphoreLegacy(sema); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD: + payload = ParseSemaphoreFourWord(sema); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + payload = ParseSemaphoreFourWordNVDisplay(sema); + break; + } + + out->payload = payload; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h new file mode 100644 index 0000000..91a9a85 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h @@ -0,0 +1,330 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Define the entry points which the NVKMS kernel interface layer + * provides to core NVKMS. + */ + +#if !defined(_NVIDIA_MODESET_OS_INTERFACE_H_) +#define _NVIDIA_MODESET_OS_INTERFACE_H_ + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include /* size_t */ +#else +#include /* size_t */ +#endif +#include "nvtypes.h" /* NvU8 */ + +#include "nvkms.h" +#include "nv_stdarg.h" + +enum NvKmsSyncPtOp { + NVKMS_SYNCPT_OP_ALLOC, + NVKMS_SYNCPT_OP_GET, + NVKMS_SYNCPT_OP_PUT, + NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH, + NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD, + NVKMS_SYNCPT_OP_READ_MINVAL, +}; + +typedef struct { + + struct { + const char *syncpt_name; /* in */ + NvU32 id; /* out */ + } alloc; + + struct { + NvU32 id; /* in */ + } put; + + struct { + NvS32 fd; /* in */ + NvU32 id; /* out */ + NvU32 thresh; /* out */ + } fd_to_id_and_thresh; + + struct { + NvU32 id; /* in */ + NvU32 thresh; /* in */ + NvS32 fd; /* out */ + } id_and_thresh_to_fd; + + struct { + NvU32 id; /* in */ + NvU32 minval; /* out */ + } read_minval; +} NvKmsSyncPtOpParams; + + +void nvkms_call_rm (void *ops); +void* nvkms_alloc (size_t size, + NvBool zero); +void nvkms_free (void *ptr, + size_t size); +void* nvkms_memset (void *ptr, + NvU8 c, + size_t size); +void* nvkms_memcpy (void *dest, + const void *src, + size_t n); +void* nvkms_memmove (void *dest, + const void *src, + size_t n); +int nvkms_memcmp (const void *s1, + const void *s2, + size_t n); +size_t nvkms_strlen (const char *s); +int nvkms_strcmp (const char *s1, + const char *s2); +char* nvkms_strncpy (char *dest, + const char *src, + size_t n); +void nvkms_usleep (NvU64 usec); +NvU64 nvkms_get_usec (void); +int nvkms_copyin (void *kptr, + NvU64 uaddr, + size_t n); +int nvkms_copyout (NvU64 uaddr, + const void *kptr, + size_t n); +void nvkms_yield (void); +void nvkms_dump_stack (void); +NvBool nvkms_syncpt_op (enum NvKmsSyncPtOp op, + NvKmsSyncPtOpParams *params); +int nvkms_snprintf (char *str, + size_t size, + const char *format, ...) + __attribute__((format (printf, 3, 4))); + +int nvkms_vsnprintf (char *str, + size_t size, + const char *format, + va_list ap); + +#define NVKMS_LOG_LEVEL_INFO 0 +#define NVKMS_LOG_LEVEL_WARN 1 +#define NVKMS_LOG_LEVEL_ERROR 2 + +void nvkms_log (const int level, + const char *gpuPrefix, + const char *msg); + +/*! + * Refcounted pointer to an object that may be freed while references still + * exist. + * + * This structure is intended to be used for nvkms timers to refer to objects + * that may be freed while timers with references to the object are still + * pending. + * + * When the owner of an nvkms_ref_ptr is freed, the teardown code should call + * nvkms_free_ref_ptr(). That marks the pointer as invalid so that later calls + * to nvkms_dec_ref() (i.e. from a workqueue callback) return NULL rather than + * the pointer originally passed to nvkms_alloc_ref_ptr(). + */ +struct nvkms_ref_ptr; + +/*! + * Allocate and initialize a ref_ptr. + * + * The pointer stored in the ref_ptr is initialized to ptr, and its refcount is + * initialized to 1. + */ +struct nvkms_ref_ptr* nvkms_alloc_ref_ptr(void *ptr); + +/*! + * Clear a ref_ptr. + * + * This function sets the pointer stored in the ref_ptr to NULL and drops the + * reference created by nvkms_alloc_ref_ptr(). This function should be called + * when the object pointed to by the ref_ptr is freed. + * + * A caller should make sure that no code that can call nvkms_inc_ref() can + * execute after nvkms_free_ref_ptr() is called. + */ +void nvkms_free_ref_ptr(struct nvkms_ref_ptr *ref_ptr); + +/*! + * Increment the refcount of a ref_ptr. + * + * This function should be used when a pointer to the ref_ptr is stored + * somewhere. For example, when the ref_ptr is used as the argument to + * nvkms_alloc_timer. + * + * This may be called outside of the nvkms_lock, for example by an RM callback. + */ +void nvkms_inc_ref(struct nvkms_ref_ptr *ref_ptr); + +/*! + * Decrement the refcount of a ref_ptr and extract the embedded pointer. + * + * This should be used by code that needs to atomically determine whether the + * object pointed to by the ref_ptr still exists. To prevent the object from + * being destroyed while the current thread is executing, this should be called + * from inside the nvkms_lock. + */ +void* nvkms_dec_ref(struct nvkms_ref_ptr *ref_ptr); + +typedef void nvkms_timer_proc_t(void *dataPtr, NvU32 dataU32); +typedef struct nvkms_timer_t nvkms_timer_handle_t; + +/*! + * Schedule a callback function to be called in the future. + * + * The callback function 'proc' will be called with the arguments + * 'dataPtr' and 'dataU32' at 'usec' (or later) microseconds from now. + * If usec==0, the callback will be scheduled to be called as soon as + * possible. + * + * The callback function is guaranteed to be called back with the + * nvkms_lock held, and in process context. + * + * Returns an opaque handle, nvkms_timer_handle_t*, or NULL on + * failure. If non-NULL, the caller is responsible for caching the + * handle and eventually calling nvkms_free_timer() to free the + * memory. + * + * The nvkms_lock may be held when nvkms_alloc_timer() is called, but + * the nvkms_lock is not required. + */ +nvkms_timer_handle_t* nvkms_alloc_timer (nvkms_timer_proc_t *proc, + void *dataPtr, NvU32 dataU32, + NvU64 usec); + +/*! + * Schedule a callback function to be called in the future. + * + * This function is like nvkms_alloc_timer() except that instead of returning a + * pointer to a structure that the caller should free later, the timer will free + * itself after executing the callback function. This is only intended for + * cases where the caller cannot cache the nvkms_alloc_timer() return value. + */ +NvBool +nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc, + struct nvkms_ref_ptr *ref_ptr, + NvU32 dataU32, NvU64 usec); + +/*! + * Free the nvkms_timer_t object. If the callback function has not + * yet been called, freeing the nvkms_timer_handle_t will guarantee + * that it is not called. + * + * The nvkms_lock must be held when calling nvkms_free_timer(). + */ +void nvkms_free_timer (nvkms_timer_handle_t *handle); + + + +/*! + * Notify the NVKMS kernel interface that the event queue has changed. + * + * \param[in] pOpenKernel This indicates the file descriptor + * ("per-open") of the client whose event queue + * has been updated. This is the pointer + * passed by the kernel interface to nvKmsOpen(). + * \param[in] eventsAvailable If TRUE, a new event has been added to the + * event queue. If FALSE, the last event has + * been removed from the event queue. + */ +void +nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel, + NvBool eventsAvailable); + + +/*! + * Get the "per-open" data (the pointer returned by nvKmsOpen()) + * associated with this fd. + */ +void* nvkms_get_per_open_data(int fd); + + +/*! + * Raise and lower the reference count of the specified GPU. + */ +NvBool nvkms_open_gpu(NvU32 gpuId); +void nvkms_close_gpu(NvU32 gpuId); + + +/*! + * Enumerate nvidia gpus. + */ + +NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info); + +/*! + * Availability of write combining support for video memory. + */ + +NvBool nvkms_allow_write_combining(void); + +/*! + * Checks whether the fd is associated with an nvidia character device. + */ +NvBool nvkms_fd_is_nvidia_chardev(int fd); + +/*! + * NVKMS interface for kernel space NVKMS clients like KAPI + */ + +struct nvkms_per_open; + +struct nvkms_per_open* nvkms_open_from_kapi +( + struct NvKmsKapiDevice *device +); + +void nvkms_close_from_kapi(struct nvkms_per_open *popen); + +NvBool nvkms_ioctl_from_kapi +( + struct nvkms_per_open *popen, + NvU32 cmd, void *params_address, const size_t params_size +); + +/*! + * APIs for locking. + */ + +typedef struct nvkms_sema_t nvkms_sema_handle_t; + +nvkms_sema_handle_t* + nvkms_sema_alloc (void); +void nvkms_sema_free (nvkms_sema_handle_t *sema); +void nvkms_sema_down (nvkms_sema_handle_t *sema); +void nvkms_sema_up (nvkms_sema_handle_t *sema); + +/*! + * APIs to register/unregister backlight device. + */ +struct nvkms_backlight_device; + +struct nvkms_backlight_device* +nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv, + NvU32 current_brightness); + +void nvkms_unregister_backlight(struct nvkms_backlight_device *nvkms_bd); + +#endif /* _NVIDIA_MODESET_OS_INTERFACE_H_ */ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvkms.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvkms.h new file mode 100644 index 0000000..1276186 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvkms.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KMS_H__ +#define __NV_KMS_H__ + +#include "nvtypes.h" +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include /* size_t */ +#else +#include /* size_t */ +#endif + +#include "nvkms-kapi.h" + +typedef struct nvkms_per_open nvkms_per_open_handle_t; + +typedef void nvkms_procfs_out_string_func_t(void *data, + const char *str); + +typedef void nvkms_procfs_proc_t(void *data, + char *buffer, size_t size, + nvkms_procfs_out_string_func_t *outString); + +typedef struct { + const char *name; + nvkms_procfs_proc_t *func; +} nvkms_procfs_file_t; + +enum NvKmsClientType { + NVKMS_CLIENT_USER_SPACE, + NVKMS_CLIENT_KERNEL_SPACE, +}; + +NvBool nvKmsIoctl( + void *pOpenVoid, + NvU32 cmd, + NvU64 paramsAddress, + const size_t paramSize); + +void nvKmsClose(void *pOpenVoid); + +void* nvKmsOpen( + NvU32 pid, + enum NvKmsClientType clientType, + nvkms_per_open_handle_t *pOpenKernel); + +NvBool nvKmsModuleLoad(void); + +void nvKmsModuleUnload(void); + +void nvKmsSuspend(NvU32 gpuId); +void nvKmsResume(NvU32 gpuId); + +void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles); + +void nvKmsKapiHandleEventQueueChange +( + struct NvKmsKapiDevice *device +); + +NvBool nvKmsKapiGetFunctionsTableInternal +( + struct NvKmsKapiFunctionsTable *funcsTable +); + +NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness); +NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness); + +#endif /* __NV_KMS_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp new file mode 100644 index 0000000..91368b6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp @@ -0,0 +1,546 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// This file implements the event sink class, which the DisplayPort library +// uses to notify the driver of display devices being connected or +// disconnected. + +#include "dp/nvdp-connector-event-sink.h" + +#include "nvdp-connector-event-sink.hpp" + +#include "nvkms-types.h" +#include "nvkms-dpy.h" +#include "nvkms-utils.h" +#include "nvkms-vrr.h" + +#include "nvkms-attributes.h" +#include "nvkms-private.h" + +namespace nvkmsDisplayPort { + +ConnectorEventSink::ConnectorEventSink(NVConnectorEvoPtr pConnectorEvo) + : pConnectorEvo(pConnectorEvo) +{ +} + +static NVDpyEvoPtr FindDpyByDevice(NVConnectorEvoPtr pConnectorEvo, + DisplayPort::Device *device) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDpyEvoPtr pDpyEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + if (pDpyEvo->dp.pDpLibDevice && + pDpyEvo->dp.pDpLibDevice->device == device) { + return pDpyEvo; + } + } + } + return NULL; +} + +// Looks for a display that matches the given DP device from +// the list of disconnected dpys. +static NVDpyEvoPtr FindMatchingDisconnectedDpy(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo, + NVDPLibDevicePtr pDpLibDevice) +{ + NVDpyEvoPtr pDpyEvo; + + // A match is simply that the display appears on the same connector. + // DP MST devices are matched by topology address in nvGetDPMSTDpy. + const NVDpyIdList dpyIdList = + nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId); + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + if (!pDpyEvo->dp.pDpLibDevice || !pDpyEvo->dp.pDpLibDevice->isPlugged) { + return pDpyEvo; + } + } + return NULL; +} + +const char *nvDPGetDeviceGUIDStr(DisplayPort::Device *device) +{ + DisplayPort::GUID guid; + + if (!device) { + return NULL; + } + + guid = device->getGUID(); + if (!guid.isGuidZero()) { + static DisplayPort::GUID::StringBuffer sb; + guid.toString(sb); + return sb; + } + + return NULL; +} + +bool nvDPGetDeviceGUID(DisplayPort::Device *device, + NvU8 guidData[DPCD_GUID_SIZE]) +{ + DisplayPort::GUID guid; + + if (!device) { + return false; + } + + guid = device->getGUID(); + if (guid.isGuidZero()) { + return false; + } + + nvkms_memcpy((void*)guidData, (void*)guid.data, sizeof(guid.data)); + + return true; +} + + +static const char *DPGetDevicePortStr(DisplayPort::Device *device, + bool skipLeadingZero) +{ + DisplayPort::Address addr; + + if (!device) { + return NULL; + } + + addr = device->getTopologyAddress(); + if (addr.size() > 0) { + static DisplayPort::Address::StringBuffer sb; + addr.toString(sb, skipLeadingZero); + return sb; + } + + return NULL; +} + + +static void nvDPPrintDeviceInfo(NVConnectorEvoPtr pConnectorEvo, + DisplayPort::Device *device) +{ +#if defined(DEBUG) + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + const char *connectorType; + unsigned major, minor; + const char *tmp; + + device->getDpcdRevision(&major, &minor); + + switch (device->getConnectorType()) { + case DisplayPort::connectorDisplayPort: + connectorType = "DisplayPort"; + break; + + case DisplayPort::connectorHDMI: + connectorType = "HDMI"; + break; + + case DisplayPort::connectorDVI: + connectorType = "DVI"; + break; + + case DisplayPort::connectorVGA: + connectorType = "VGA"; + break; + + default: + connectorType = "unknown"; + break; + } + + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + "%s-%d: new DisplayPort %d.%d device detected", + NvKmsConnectorTypeString(pConnectorEvo->type), + pConnectorEvo->typeIndex, major, minor); + tmp = DPGetDevicePortStr(device, false /* skipLeadingZero */); + if (tmp) { + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " Address: %s", tmp); + } + tmp = nvDPGetDeviceGUIDStr(device); + if (tmp) { + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " GUID: {%s}", tmp); + } + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " Connector: %s", connectorType); + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " Video: %s", device->isVideoSink() ? "yes" : "no"); + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " Audio: %s", device->isAudioSink() ? "yes" : "no"); +#endif +} + +static void nvDPAddDeviceToActiveGroup(NVDpyEvoPtr pDpyEvo) +{ + const NVDPLibConnectorRec *pDpLibConnector = + pDpyEvo->pConnectorEvo->pDpLibConnector; + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + NvU32 head; + + // If the device is being driven by the firmware group, then we're just + // tracking it so that it can be shut down by the modeset path, and we + // don't have any timing information for it. + + if (pDpLibConnector->headInFirmware) { + return; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, + pDpLibConnector->dpyIdList[head])) { + pDpLibConnector->pGroup[head]->insert( + pDpyEvo->dp.pDpLibDevice->device); + break; + } + } +} + +// when we get this event, the DP lib has done link training and the +// EDID has been read (by the DP lib) +void ConnectorEventSink::newDevice(DisplayPort::Device *device) +{ + NVDPLibDevicePtr pDpLibDevice = NULL; + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDpyEvoPtr pDpyEvo = NULL; + NvBool dynamicDpyCreated = FALSE; + + // XXX [VM DP MST] Current POR requires we also check/handle: + // - More than 64 DP dpys on a connector = print error. + // - More than 127 dpys on a system = print error. + + nvDPPrintDeviceInfo(pConnectorEvo, device); + + // Only add video sink devices. + if (!device->isVideoSink()) { + return; + } + + // Protect against redundant newDevices() + pDpyEvo = FindDpyByDevice(pConnectorEvo, device); + if (pDpyEvo) { + nvAssert(!"Got (redundant) DP Lib newDevice() on known display, " + "ignoring."); + return; + } + + pDpLibDevice = (NVDPLibDevicePtr)nvCalloc(1, sizeof(*pDpLibDevice)); + if (!pDpLibDevice) { + goto fail; + } + + nvAssert(!device->getOwningGroup()); + + // XXX For DP MST, we'll want to handle dynamic display IDs. For now, + // use the connector's display ID. + pDpLibDevice->device = device; + + if (device->isMultistream()) { + // Get a dynamic pDpy for this device based on its bus topology path. + // This will create one if it doesn't exist. + pDpyEvo = nvGetDPMSTDpyEvo( + pConnectorEvo, + DPGetDevicePortStr(device, true /* skipLeadingZero */), + &dynamicDpyCreated); + + } else { + // Look for a (previously) disconnected pDpy that matches this device. + pDpyEvo = FindMatchingDisconnectedDpy(pDispEvo, pConnectorEvo, + pDpLibDevice); + } + + if (!pDpyEvo) { + goto fail; + } + + nvAssert(pDpyEvo->pConnectorEvo == pConnectorEvo); + + // At this point, the pDpy should no longer be tracking a DP lib device. + if (pDpyEvo->dp.pDpLibDevice) { + nvAssert(!"DP Lib should have already called lostDevice() for this DP " + "device"); + + // Call lost device ourselves, if the DP lib calls this again later, + // we'll ignore it then. + lostDevice(pDpyEvo->dp.pDpLibDevice->device); + } + + nvAssert(device->isPlugged()); + + pDpLibDevice->isPlugged = TRUE; + pDpyEvo->dp.pDpLibDevice = pDpLibDevice; + + // If there's an active group that this pDpy is supposed to be a member of, + // insert it now. + nvDPAddDeviceToActiveGroup(pDpyEvo); + + if (dynamicDpyCreated) { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED); + } + + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + + return; + + fail: + nvAssert(pDpyEvo == NULL); + nvFree(pDpLibDevice); +} + +void ConnectorEventSink::lostDevice(DisplayPort::Device *device) +{ + NVDpyEvoPtr pDpyEvo; + + // Ignore non-video sink devices. + if (!device->isVideoSink()) { + return; + } + + pDpyEvo = FindDpyByDevice(pConnectorEvo, device); + if (!pDpyEvo) { + nvAssert(!"Got DP Lib lostDevice() on unknown display."); + return; + } + + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + nvAssert(pDpLibDevice != NULL); + + if (pDpyEvo->vrr.type != NVKMS_DPY_VRR_TYPE_NONE) { + device->resetVrrEnablement(); + pDpyEvo->vrr.type = NVKMS_DPY_VRR_TYPE_NONE; + } + + if (device->getOwningGroup()) { + device->getOwningGroup()->remove(device); + } + + if (pDpLibDevice->isPlugged) { + pDpLibDevice->isPlugged = FALSE; + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + } + + if (device->isMultistream()) { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED); + } + + pDpyEvo->dp.pDpLibDevice = NULL; + nvFree(pDpLibDevice); +} + +void ConnectorEventSink::notifyMustDisconnect(DisplayPort::Group *grp) +{ +} + +// notifyDetectComplete() is called when DP Library has done a full detect on +// the topology. There is no one-to-one relationship between a long pulse to +// a detectCompleted. +void ConnectorEventSink::notifyDetectComplete() +{ + pConnectorEvo->detectComplete = TRUE; + + // XXX[DP MST] potentially use this call to notify NV-CONTROL of topology + // change; + + // issue: not as current as new/lostDevice and may pose sync issues, but + // less chatty. +} + +void ConnectorEventSink::bandwidthChangeNotification(DisplayPort::Device *dev, + bool isComplianceMode) +{ + nvDPLibUpdateDpyLinkConfiguration(FindDpyByDevice(pConnectorEvo, dev)); +} + +void ConnectorEventSink::notifyZombieStateChange(DisplayPort::Device *dev, + bool zombied) +{ + NVDpyEvoPtr pDpyEvo = FindDpyByDevice(pConnectorEvo, dev); + NvBool sendEvent = FALSE; + + if (pDpyEvo == NULL) { + return; + } + + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + if (zombied) { + dev->getOwningGroup()->remove(dev); + + if (pDpLibDevice->isPlugged && !dev->isPlugged()) { + pDpLibDevice->isPlugged = FALSE; + sendEvent = TRUE; + } + + } else { + if (!pDpLibDevice->isPlugged && dev->isPlugged()) { + pDpLibDevice->isPlugged = TRUE; + sendEvent = TRUE; + } + + nvDPAddDeviceToActiveGroup(pDpyEvo); + } + + if (sendEvent) { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + } +} + +void ConnectorEventSink::notifyCableOkStateChange(DisplayPort::Device *dev, + bool cableOk) +{ +} + +void ConnectorEventSink::notifyHDCPCapDone(DisplayPort::Device *dev, + bool hdcpCap) +{ +} + +void ConnectorEventSink::notifyMCCSEvent(DisplayPort::Device *dev) +{ +} + +}; // namespace nvkmsDisplayPort + +// The functions below are exported to the rest of nvkms. Declare them outside +// of the 'nvkmsDisplayPort' namespace. Their prototypes in +// nvdp-connector-event-sink.h are declared as extern "C". + +NvBool nvDPLibDpyIsConnected(NVDpyEvoPtr pDpyEvo) +{ + nvAssert(nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)); + + return ((pDpyEvo->dp.pDpLibDevice != NULL) && + pDpyEvo->dp.pDpLibDevice->isPlugged); +} + +// Adaptive-Sync is enabled/disabled by setting the MSA_TIMING_PAR_IGNORE_EN +// bit in the DOWNSPREAD_CTRL register (DP spec 1.4a appendix K) +void nvDPLibSetAdaptiveSync(const NVDispEvoRec *pDispEvo, NvU32 head, + NvBool enable) +{ + const NVConnectorEvoRec *pConnectorEvo = + pDispEvo->headState[head].pConnectorEvo; + NVDPLibConnectorPtr pDpLibConnector = pConnectorEvo->pDpLibConnector; + DisplayPort::Group *pGroup = pDpLibConnector->pGroup[head]; + DisplayPort::Device *dev; + + for (dev = pGroup->enumDevices(0); dev != NULL; + dev = pGroup->enumDevices(dev)) { + dev->setIgnoreMSAEnable(enable); + } +} + +// Read the link configuration from the connector and stores it in the pDpy so +// it can be sent to clients via NV-CONTROL. Also generate events if the values +// change. +void nvDPLibUpdateDpyLinkConfiguration(NVDpyEvoPtr pDpyEvo) +{ + if (!pDpyEvo) { + return; + } + + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + DisplayPort::Device *dev = pDpLibDevice ? pDpLibDevice->device : NULL; + DisplayPort::Connector *connector = + pDpyEvo->pConnectorEvo->pDpLibConnector->connector; + unsigned laneCount; + NvU64 linkRate; + enum NvKmsDpyAttributeDisplayportConnectorTypeValue connectorType; + NvBool sinkIsAudioCapable; + + if (!dev || !pDpLibDevice->isPlugged) { + linkRate = 0; + laneCount = NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1; + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN; + sinkIsAudioCapable = FALSE; + } else { + // XXX[AGP]: Can the path down to a single device have a different link + // configuration from the connector itself? + connector->getCurrentLinkConfig(laneCount, linkRate); + + // The DisplayPort library multiplies the link rate enum value by + // 27000000. Convert back to NV-CONTROL's defines. + linkRate /= 27000000; + + switch (pDpLibDevice->device->getConnectorType()) { + case DisplayPort::connectorDisplayPort: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DISPLAYPORT; + break; + case DisplayPort::connectorHDMI: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_HDMI; + break; + case DisplayPort::connectorDVI: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DVI; + break; + case DisplayPort::connectorVGA: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_VGA; + break; + default: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN; + break; + } + + sinkIsAudioCapable = pDpLibDevice->device->isAudioSink(); + } + + // The DisplayPort library reports a disabled link as 0 lanes. NV-CONTROL, + // for historical reasons, uses a setting of "1 lane @ disabled" for a + // disabled link, so translate to that. + if (laneCount == 0) { + linkRate = 0; + laneCount = NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1; + } + + // Update pDpy and send events if anything changed. + if (laneCount != pDpyEvo->dp.laneCount) { + pDpyEvo->dp.laneCount = laneCount; + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE, + nvRMLaneCountToNvKms(laneCount)); + } + + if (linkRate != pDpyEvo->dp.linkRate) { + pDpyEvo->dp.linkRate = linkRate; + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_LINK_RATE, + linkRate); + } + + if (connectorType != pDpyEvo->dp.connectorType) { + pDpyEvo->dp.connectorType = connectorType; + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE, + connectorType); + } + + if (sinkIsAudioCapable != pDpyEvo->dp.sinkIsAudioCapable) { + pDpyEvo->dp.sinkIsAudioCapable = sinkIsAudioCapable; + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_SINK_IS_AUDIO_CAPABLE, + sinkIsAudioCapable); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp new file mode 100644 index 0000000..167c2e8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVDP_CONNECTOR_EVENT_SINK_HPP__ +#define __NVDP_CONNECTOR_EVENT_SINK_HPP__ + +#include +#include + +#include "nvdp-evo-interface.hpp" + + +namespace nvkmsDisplayPort +{ + +class ConnectorEventSink : public DisplayPort::Object, + public DisplayPort::Connector::EventSink +{ +private: + const NVConnectorEvoPtr pConnectorEvo; + +public: + ConnectorEventSink(NVConnectorEvoPtr pConnectorEvo); + + // From DisplayPort::Connector::EventSink + virtual void newDevice(DisplayPort::Device *dev); + virtual void lostDevice(DisplayPort::Device *dev); + virtual void notifyMustDisconnect(DisplayPort::Group *grp); + virtual void notifyDetectComplete(); + virtual void bandwidthChangeNotification(DisplayPort::Device *dev, bool isComplianceMode); + virtual void notifyZombieStateChange(DisplayPort::Device *dev, bool zombied); + virtual void notifyCableOkStateChange(DisplayPort::Device *dev, bool cableOk); + virtual void notifyHDCPCapDone(DisplayPort::Device *dev, bool hdcpCap); + virtual void notifyMCCSEvent(DisplayPort::Device *dev); +}; + +const char *nvDPGetDeviceGUIDStr(DisplayPort::Device *device); +bool nvDPGetDeviceGUID(DisplayPort::Device *device, NvU8 guid[DPCD_GUID_SIZE]); + +}; // namespace nvkmsDisplayPort + +struct _nv_dplibconnector { + DisplayPort::Connector *connector; + nvkmsDisplayPort::EvoInterface *evoInterface; + nvkmsDisplayPort::ConnectorEventSink *evtSink; + DisplayPort::MainLink *mainLink; + DisplayPort::AuxBus *auxBus; + + NvBool isActive; + + // The VBIOS head is actively driving this connector. + bool headInFirmware; + NVConnectorEvoRec *pConnectorEvo; + // Per-head DpLib group, allocated at the time of connector creation: + // In case of multi-streaming, multiple heads can be attached to single + // DP connector driving distinct DP streams. + DisplayPort::Group *pGroup[NVKMS_MAX_HEADS_PER_DISP]; + NVDpyIdList dpyIdList[NVKMS_MAX_HEADS_PER_DISP]; + // Attached heads bitmask + NvU32 headMask; + + // Connection status plugged/unplugged; gets initialized by + // Connector::resume() and gets updated by + // Connector::notifyLongPulse(). + NvBool plugged; +}; + +struct _nv_dplibdevice { + DisplayPort::Device *device; + NvBool isPlugged; +}; + +struct __nv_dplibmodesetstate { + NVDpyIdList dpyIdList; + DisplayPort::DpModesetParams modesetParams; +}; + +#endif // __NVDP_CONNECTOR_EVENT_SINK_HPP__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector.cpp b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector.cpp new file mode 100644 index 0000000..4791736 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector.cpp @@ -0,0 +1,1008 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "dp/nvdp-connector.h" +#include "nvdp-timer.hpp" +#include "nvdp-connector-event-sink.hpp" +#include "dp/nvdp-connector-event-sink.h" +#include "dp/nvdp-timer.h" + +#include "nvkms-evo.h" +#include "nvkms-types.h" +#include "nvkms-modeset.h" +#include "nvkms-modeset-types.h" +#include "nvkms-utils.h" +#include "nvkms-rmapi.h" + +#include + +// Loop over all display devices attached to a connector. +// Connector::enumDevices(NULL) returns the first device, and then +// enumDevices(previous) returns each subsequent device. +#define for_each_device(connector, dev) \ + for (DisplayPort::Device *(dev) = NULL; ((dev) = (connector)->enumDevices(dev)); ) + +NVDPLibConnectorPtr nvDPCreateConnector(NVConnectorEvoPtr pConnectorEvo) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + DisplayPort::Timer *pTimer = &pDevEvo->dpTimer->timer; + NVDPLibConnectorPtr pNVDpLibConnector = + (NVDPLibConnectorPtr) nvCalloc(1, sizeof(*pNVDpLibConnector)); + + if (!pNVDpLibConnector) { + return NULL; + } + + pNVDpLibConnector->pConnectorEvo = pConnectorEvo; + + // Create the EVO interface object. + pNVDpLibConnector->evoInterface = + new nvkmsDisplayPort::EvoInterface(pConnectorEvo); + if (!pNVDpLibConnector->evoInterface) { + goto fail; + } + + // Create the event sink object. + pNVDpLibConnector->evtSink = + new nvkmsDisplayPort::ConnectorEventSink(pConnectorEvo); + if (!pNVDpLibConnector->evtSink) { + goto fail; + } + + // Create the MainLink object. + pNVDpLibConnector->mainLink = + DisplayPort::MakeEvoMainLink(pNVDpLibConnector->evoInterface, pTimer); + if (!pNVDpLibConnector->mainLink) { + goto fail; + } + + // Create the AuxBus object. + pNVDpLibConnector->auxBus = + DisplayPort::MakeEvoAuxBus(pNVDpLibConnector->evoInterface, pTimer); + if (!pNVDpLibConnector->auxBus) { + goto fail; + } + + pNVDpLibConnector->connector = + DisplayPort::createConnector(pNVDpLibConnector->mainLink, + pNVDpLibConnector->auxBus, + pTimer, + pNVDpLibConnector->evtSink); + if (!pNVDpLibConnector->connector) { + goto fail; + } + + pNVDpLibConnector->connector->setPolicyAssessLinkSafely(TRUE); + + return pNVDpLibConnector; + + fail: + nvDPDestroyConnector(pNVDpLibConnector); + return NULL; +} + +void nvDPNotifyLongPulse(NVConnectorEvoPtr pConnectorEvo, + NvBool connected) +{ + NVDPLibConnectorPtr pNVDpLibConnector = pConnectorEvo->pDpLibConnector; + DisplayPort::Connector *c = pNVDpLibConnector->connector; + + pNVDpLibConnector->plugged = connected; + + if (connected && !nvAssignSOREvo(pConnectorEvo, 0 /* sorExcludeMask */)) { + // DPLib takes care of skipping LT on unassigned SOR Display. + } + + c->notifyLongPulse(connected); + +} + +void nvDPNotifyShortPulse(NVDPLibConnectorPtr pNVDpLibConnector) +{ + DisplayPort::Connector *c = pNVDpLibConnector->connector; + + c->notifyShortPulse(); +} + +void nvDPDestroyConnector(NVDPLibConnectorPtr pNVDpLibConnector) +{ + if (!pNVDpLibConnector) return; + + if (pNVDpLibConnector->connector) { + pNVDpLibConnector->connector->destroy(); + } + if (pNVDpLibConnector->auxBus) { + delete pNVDpLibConnector->auxBus; + } + if (pNVDpLibConnector->mainLink) { + delete pNVDpLibConnector->mainLink; + } + if (pNVDpLibConnector->evoInterface) { + delete pNVDpLibConnector->evoInterface; + } + if (pNVDpLibConnector->evtSink) { + delete pNVDpLibConnector->evtSink; + } + + nvFree(pNVDpLibConnector); +} + +NvBool nvDPIsLinkAwaitingTransition(NVConnectorEvoPtr pConnectorEvo) +{ + if (nvConnectorUsesDPLib(pConnectorEvo)) { + DisplayPort::Connector *c = pConnectorEvo->pDpLibConnector->connector; + return c->isLinkAwaitingTransition(); + } + + return FALSE; +} + +/* + * Start DisplayPort mode validation on all connectors on a disp. + */ +void nvDPBeginValidation(NVDispEvoPtr pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + pConnectorEvo->pDpLibConnector->connector->beginCompoundQuery(); + } + } +} + +/*! + * Create a new DisplayPort group and populate it with the devices specified by + * dpyIdList. For MST groups, this allocates a dynamic RM display ID. + * Otherwise, it uses the connector's display ID. + */ +static DisplayPort::Group* CreateGroup( + const NVDPLibConnectorRec *pDpLibConnector, + const NVDpyIdList dpyIdList) +{ + NVDpyEvoPtr pDpyEvo; + DisplayPort::Group *pGroup = NULL; + + pGroup = pDpLibConnector->connector->newGroup(); + if (pGroup == NULL) { + return NULL; + } + + // Populate the group + FOR_ALL_EVO_DPYS(pDpyEvo, + dpyIdList, pDpLibConnector->pConnectorEvo->pDispEvo) { + if (pDpyEvo->dp.pDpLibDevice) { + pGroup->insert(pDpyEvo->dp.pDpLibDevice->device); + } + } + + return pGroup; +} + +/*! + * Returns the bits per pixel for the pixel depth value given + * + * \param[in] pixelDepth nvKmsPixelDepth value + * + * \return The pixel depth configured by this enum value + */ +static NvU32 GetSORBpp( + const enum nvKmsPixelDepth pixelDepth, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace) +{ + NvU32 bpc = nvPixelDepthToBitsPerComponent(pixelDepth); + if (bpc == 0) { + nvAssert(!"Unrecognized SOR pixel depth"); + /* XXX Assume lowest ? */ + bpc = 6; + } + + /* + * In YUV420, HW is programmed with RGB color space and full color range. + * The color space conversion and color range compression happen in a + * headSurface composite shader. + * + * XXX Add support for + * NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422 over DP. + */ + nvAssert(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420 || + colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444 || + colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB); + + /* For RGB/YCbCr444, each pixel is always 3 components. For YCbCr/YUV420, + * we currently always scan out from the headSurface as RGB. */ + return bpc * 3; +} + +/* XXX Instead of tracking pixelDepth, you should track bpc and calculate bpp + * from bpc + colorSpace. */ +static NvU32 GetBpc( + const enum nvKmsPixelDepth pixelDepth, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace) +{ + NvU32 bpc = nvPixelDepthToBitsPerComponent(pixelDepth); + if (bpc == 0) { + nvAssert(!"Unrecognized SOR pixel depth"); + /* XXX Assume lowest ? */ + return 6; + } + + /* + * In YUV420, HW is programmed with RGB color space and full color range. + * The color space conversion and color range compression happen in a + * headSurface composite shader. + * + * XXX Add support for + * NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422 over DP. + */ + nvAssert(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420 || + colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444 || + colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB); + + return bpc; +} + +static void SetDPMSATiming(const NVDispEvoRec *pDispEvo, + const NvU32 displayId, + NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS *msaParams, + const NVHwModeTimingsEvo *pTimings) +{ + nvkms_memset(msaParams, 0, sizeof(*msaParams)); + + /* + * Fill in displayId and subDeviceInstance unconditionally. + * From CL#27980662, dplib started passing the client provided displayId + * to RM for setting MSA properties. + * Default value of displayId is 0, leading to RMControl failure in + * the displayport library. + */ + msaParams->subDeviceInstance = pDispEvo->displayOwner; + msaParams->displayId = displayId; + + if ((pTimings->yuv420Mode == NV_YUV420_MODE_SW) && displayId != 0) { + NV0073_CTRL_DP_MSA_PROPERTIES_MASK *featureMask = &msaParams->featureMask; + NV0073_CTRL_DP_MSA_PROPERTIES_VALUES *featureValues = &msaParams->featureValues; + + msaParams->bEnableMSA = 1; + msaParams->bCacheMsaOverrideForNextModeset = 1; + featureMask->bRasterTotalHorizontal = true; + featureMask->bActiveStartHorizontal = true; + featureMask->bSurfaceTotalHorizontal = true; + featureMask->bSyncWidthHorizontal = true; + featureValues->rasterTotalHorizontal = 2 * pTimings->rasterSize.x; + featureValues->activeStartHorizontal = 2 * (pTimings->rasterBlankEnd.x + 1); + featureValues->surfaceTotalHorizontal = 2 * nvEvoVisibleWidth(pTimings); + featureValues->syncWidthHorizontal = 2 * (pTimings->rasterSyncEnd.x + 1); + } +} + +static void InitDpModesetParams( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 displayId, + const NVHwModeTimingsEvo *pTimings, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + DisplayPort::DpModesetParams *pParams) +{ + pParams->modesetInfo.pixelClockHz = pTimings->pixelClock * 1000; + pParams->modesetInfo.rasterWidth = pTimings->rasterSize.x; + pParams->modesetInfo.rasterHeight = pTimings->rasterSize.y; + pParams->modesetInfo.rasterBlankStartX = pTimings->rasterBlankStart.x; + pParams->modesetInfo.rasterBlankEndX = pTimings->rasterBlankEnd.x; + pParams->modesetInfo.surfaceWidth = nvEvoVisibleWidth(pTimings); + pParams->modesetInfo.surfaceHeight = nvEvoVisibleHeight(pTimings); + + pParams->modesetInfo.depth = + GetSORBpp(pTimings->pixelDepth, colorSpace); + + pParams->modesetInfo.bitsPerComponent = + GetBpc(pTimings->pixelDepth, colorSpace); + + pParams->colorFormat = dpColorFormat_Unknown; + switch (colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + /* HW YUV420 mode is only supported for HDMI, not DP */ + nvAssert(pTimings->yuv420Mode == NV_YUV420_MODE_SW); + pParams->modesetInfo.pixelClockHz *= 2; + pParams->colorFormat = dpColorFormat_YCbCr420; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + pParams->colorFormat = dpColorFormat_YCbCr444; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + pParams->colorFormat = dpColorFormat_YCbCr422; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + pParams->colorFormat = dpColorFormat_RGB; + break; + } + + pParams->headIndex = head; + + SetDPMSATiming(pDispEvo, displayId, &pParams->msaparams, pTimings); +} + +NVDPLibModesetStatePtr nvDPLibCreateModesetState( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 displayId, + const NVDpyIdList dpyIdList, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + NVHwModeTimingsEvo *pTimings) +{ + bool found = false; + const NVDPLibConnectorRec *pDpLibConnector = NULL; + const NVDpyEvoRec *pDpyEvo; + NVDPLibModesetStatePtr pDpLibModesetState = NULL; + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + if (!found) { + pDpLibConnector = pDpyEvo->pConnectorEvo->pDpLibConnector; + found = true; + } else if (pDpLibConnector != pDpyEvo->pConnectorEvo->pDpLibConnector) { + /* All Dpys must belongs to same DP connector */ + return NULL; + } + } + + /* Do nothing if any of the display is not DP */ + if (pDpLibConnector == NULL) { + return NULL; + } + + pDpLibModesetState = + (NVDPLibModesetStatePtr) nvCalloc(1, sizeof(*pDpLibModesetState)); + if (pDpLibModesetState == NULL) { + return NULL; + } + + InitDpModesetParams(pDispEvo, + head, + displayId, + pTimings, + colorSpace, + &pDpLibModesetState->modesetParams); + if (pTimings->dpDsc.enable) { + pDpLibModesetState->modesetParams.modesetInfo.bEnableDsc = true; + + /* + * If DSC is enabled then override normal pixel depth with + * target bpp rate of DSC encoder, the rate at which it is going to + * output compressed stream. + */ + pDpLibModesetState->modesetParams.modesetInfo.depth = + pTimings->dpDsc.bitsPerPixelX16; + } + pDpLibModesetState->dpyIdList = dpyIdList; + + return pDpLibModesetState; +} + +void nvDPLibFreeModesetState(NVDPLibModesetStatePtr pDpLibModesetState) +{ + nvFree(pDpLibModesetState); +} + +/* + * Validate the mode for a given NVHwModeTimingsEvo + dpyIdList. This + * function should be called for each head, and must be called between + * nvDPBeginValidation and nvDPEndValidation. + * + * If validation fails, this function returns FALSE. You must still call + * nvDPEndValidation even if an individual head fails. + * + * If validation succeeds, the DSC fields within pTimings are updated with what + * is returned by compoundQueryAttach(). + */ +NvBool nvDPLibValidateTimings( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 displayId, + const NVDpyIdList dpyIdList, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const struct NvKmsModeValidationParams *pModeValidationParams, + NVHwModeTimingsEvo *pTimings) +{ + const NVDpyEvoRec *pDpyEvo; + const NVDPLibConnectorRec *pDpLibConnector = NULL; + bool found = false; + + DisplayPort::Group *pGroup = NULL; + DisplayPort::DscOutParams *pDscOutParams = NULL; + DisplayPort::DpModesetParams *pModesetParams = NULL; + DisplayPort::DscParams dpDscParams; + NvBool ret = FALSE; + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + if (!found) { + pDpLibConnector = pDpyEvo->pConnectorEvo->pDpLibConnector; + found = true; + } else if (pDpLibConnector != pDpyEvo->pConnectorEvo->pDpLibConnector) { + /* All Dpys must belongs to same DP connector */ + return FALSE; + } + } + + /* Do nothing if any of the display is not DP */ + if (pDpLibConnector == NULL) { + return TRUE; + } + + pGroup = CreateGroup(pDpLibConnector, dpyIdList); + if (pGroup == NULL) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to create a DisplayPort group"); + goto done; + } + + pDscOutParams = + (DisplayPort::DscOutParams*) nvCalloc(1, sizeof(*pDscOutParams)); + if (pDscOutParams == NULL) { + goto done; + } + + + pModesetParams = + (DisplayPort::DpModesetParams*) nvCalloc(1, sizeof(*pModesetParams)); + if (pModesetParams == NULL) { + goto done; + } + + InitDpModesetParams(pDispEvo, + head, + displayId, + pTimings, + colorSpace, + pModesetParams); + + dpDscParams.bCheckWithDsc = true; + dpDscParams.forceDsc = pModeValidationParams->forceDsc ? + DisplayPort::DSC_FORCE_ENABLE : + DisplayPort::DSC_DEFAULT; + dpDscParams.bitsPerPixelX16 = + pModeValidationParams->dscOverrideBitsPerPixelX16; + dpDscParams.pDscOutParams = pDscOutParams; + + ret = pDpLibConnector->connector->compoundQueryAttach( + pGroup, *pModesetParams, + &dpDscParams); + + if (ret) { + pTimings->dpDsc.enable = dpDscParams.bEnableDsc; + pTimings->dpDsc.bitsPerPixelX16 = dpDscParams.bitsPerPixelX16; + + ct_assert(sizeof(pTimings->dpDsc.pps) == sizeof(pDscOutParams->PPS)); + + nvkms_memcpy(pTimings->dpDsc.pps, + pDscOutParams->PPS, sizeof(pTimings->dpDsc.pps)); + } + +done: + nvFree(pDscOutParams); + nvFree(pModesetParams); + if (pGroup != NULL) { + pGroup->destroy(); + } + return ret; +} + +/* + * Finishes DisplayPort mode validation. Returns TRUE if the complete + * configuration is possible, and FALSE if it can't be achieved. + */ +NvBool nvDPEndValidation(NVDispEvoPtr pDispEvo) +{ + NvBool ret = TRUE; + NVConnectorEvoPtr pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + DisplayPort::Connector *connector = + pConnectorEvo->pDpLibConnector->connector; + + /* endCompoundQuery() must be called for all dp connectors */ + ret = connector->endCompoundQuery() && ret; + } + } + + return ret; +} + +NvBool nvDPValidateModeForDpyEvo( + const NVDpyEvoRec *pDpyEvo, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const struct NvKmsModeValidationParams *pModeValidationParams, + NVHwModeTimingsEvo *pTimings) +{ + const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + + nvAssert(nvConnectorUsesDPLib(pConnectorEvo)); + + DisplayPort::Connector *connector = + pConnectorEvo->pDpLibConnector->connector; + + connector->beginCompoundQuery(); + NvBool ret = nvDPLibValidateTimings(pDpyEvo->pDispEvo, + 0 /* head */, + 0 /* displayId */, + nvAddDpyIdToEmptyDpyIdList(pDpyEvo->id), + colorSpace, + pModeValidationParams, + pTimings); + connector->endCompoundQuery(); + + return ret; +} + +/* + * Notify the DisplayPort library that a given mode is about to be set on a + * given head. The configuration for this head must have previously been + * validated by a call to nvDPLibValidateTimings. + */ +static +void NotifyAttachBegin(NVDPLibConnectorPtr pDpLibConnector, + const NvU32 head, + const NVDPLibModesetStateRec *pDpLibModesetState) +{ + const NVConnectorEvoRec *pConnectorEvo = pDpLibConnector->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const DisplayPort::DpModesetParams *pParams = + &pDpLibModesetState->modesetParams; + const NVDpyEvoRec *pDpyEvo = NULL; + + /* Insert active dpys into group */ + pDpLibConnector->dpyIdList[head] = pDpLibModesetState->dpyIdList; + FOR_ALL_EVO_DPYS(pDpyEvo, pDpLibConnector->dpyIdList[head], pDispEvo) { + if (pDpyEvo->dp.pDpLibDevice) { + pDpLibConnector->pGroup[head]->insert( + pDpyEvo->dp.pDpLibDevice->device); + } + } + + pDpLibConnector->connector->notifyAttachBegin( + pDpLibConnector->pGroup[head], + *pParams); +} + +/* + * Notify the DisplayPort library that a modeset on a head begun by + * nvDPNotifyAttachBegin is finished. + */ +static void NotifyAttachEnd(NVDPLibConnectorPtr pDpLibConnector, NvU32 head) +{ + pDpLibConnector->connector->notifyAttachEnd(false); + pDpLibConnector->headMask |= NVBIT(head); +} + +/* + * Notify the DisplayPort library that the given head driving displays on this + * connector is about to be shut down. + */ +static void NotifyDetachBegin(NVDPLibConnectorPtr pDpLibConnector, const NvU32 head) +{ + /* + * The firmware group is the VBIOS monitor group the DP Library manages + * internally. In notifyDetachBegin(NULL), the NULL defaults to firmware + * group. + */ + pDpLibConnector->connector->notifyDetachBegin( + pDpLibConnector->headInFirmware ? + NULL : pDpLibConnector->pGroup[head]); +} + +/* + * Notify the DisplayPort library that the driver has finished shutting down a + * head that was previously driving this connector. + */ +static void NotifyDetachEnd(NVDPLibConnectorPtr pDpLibConnector, const NvU32 head) +{ + pDpLibConnector->connector->notifyDetachEnd(); + + if (!pDpLibConnector->headInFirmware) { + const NVConnectorEvoRec *pConnectorEvo = + pDpLibConnector->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NVDpyEvoRec *pDpyEvo; + + + /* Empty inactive group */ + FOR_ALL_EVO_DPYS(pDpyEvo, pDpLibConnector->dpyIdList[head], pDispEvo) { + if (pDpyEvo->dp.pDpLibDevice) { + pDpLibConnector->pGroup[head]->remove( + pDpyEvo->dp.pDpLibDevice->device); + } + } + pDpLibConnector->dpyIdList[head] = nvEmptyDpyIdList(); + } else { + nvAssert(pDpLibConnector->pGroup[head]->enumDevices(0) == NULL); + pDpLibConnector->headInFirmware = false; + } + + pDpLibConnector->headMask &= ~NVBIT(head); +} + +/* + * Handles DP stream programming requires to be done before committing MODESET + * update. The function should be called for each of affected(change in + * head-connector attachment) DpLib connectors, before commit. + */ +void nvDPPreSetMode(NVDPLibConnectorPtr pDpLibConnector, + const NVEvoModesetUpdateState *pModesetUpdateState) +{ + const NVConnectorEvoRec *pConnectorEvo = + pDpLibConnector->pConnectorEvo; + NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NvU32 oldHeadMask = pDpLibConnector->headMask; + const NvU32 newHeadMask = + nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + + for (NvU32 head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + + if ((newHeadMask & NVBIT(head)) != 0x0 && + (oldHeadMask & NVBIT(head)) == 0x0) { + + NotifyAttachBegin(pDpLibConnector, + head, + pModesetUpdateState->pDpLibModesetState[head]); + + } else if ((newHeadMask & NVBIT(head)) == 0x0 && + (oldHeadMask & NVBIT(head)) != 0x0) { + + NotifyDetachBegin(pDpLibConnector, head); + + } + } +} + +/* + * Handles DP stream programming requires to be done before committing MODESET + * update. The function should be called for each of affected(change in + * head-connector attachment) DpLib connectors, before commit. + */ +void nvDPPostSetMode(NVDPLibConnectorPtr pDpLibConnector) +{ + const NVConnectorEvoRec *pConnectorEvo = + pDpLibConnector->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NvU32 oldHeadMask = pDpLibConnector->headMask; + const NvU32 newHeadMask = + nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + + for (NvU32 head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + + if ((newHeadMask & NVBIT(head)) != 0x0 && + (oldHeadMask & NVBIT(head)) == 0x0) { + + NotifyAttachEnd(pDpLibConnector, head); + + } else if ((newHeadMask & NVBIT(head)) == 0x0 && + (oldHeadMask & NVBIT(head)) != 0x0) { + + NotifyDetachEnd(pDpLibConnector, head); + + } + } + + /* + * Update DisplayPort link information for all displays on DpLib connector + */ + if (newHeadMask != oldHeadMask) { + NVDpyEvoPtr pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + if (pDpyEvo->pConnectorEvo->pDpLibConnector == pDpLibConnector) { + nvDPLibUpdateDpyLinkConfiguration(pDpyEvo); + } + } + } +} + +void nvDPPause(NVDPLibConnectorPtr pNVDpLibConnector) +{ + DisplayPort::Connector *connector = pNVDpLibConnector->connector; + const NVConnectorEvoRec *pConnectorEvo = pNVDpLibConnector->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + if (!pNVDpLibConnector->isActive) { + return; + } + + if (pDevEvo->skipConsoleRestore && pNVDpLibConnector->headMask != 0) { + /* Clear vbios DisplayPort RAD scratch registers, see bug 200471345 */ + + nvAssert(nvPopCount32(pNVDpLibConnector->headMask) == 1); + nvAssert(connector->isDp11ProtocolForced()); + + NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS params = {0}; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + nvAssert(pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A || + pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B); + + params.dpLink = pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A ? 0 : 1; + params.sorIndex = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + + NvU32 ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug( + pDispEvo, + EVO_LOG_ERROR, + "NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG " + "failed, error code 0x%x", + ret); + } + } + + /* Before pausing DpLib, destroy group and clear head bitmask */ + for (NvU32 head = 0; head < ARRAY_LEN(pNVDpLibConnector->pGroup); head++) { + pNVDpLibConnector->pGroup[head]->destroy(); + } + pNVDpLibConnector->headMask = 0x0; + + connector->pause(); + + pNVDpLibConnector->isActive = false; +} + +/*! + * Determine which head, if any, is driving this connector. + */ +static NvU32 GetFirmwareHead(NVConnectorEvoPtr pConnectorEvo) +{ + NvU32 orIndex = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + + if (orIndex == NV_INVALID_OR || + pConnectorEvo->or.ownerHeadMask[orIndex] == 0) { + return NV_INVALID_HEAD; + } + + return BIT_IDX_32(pConnectorEvo->or.ownerHeadMask[orIndex]); +} + +/*! + * Determine whether an active connector shares an OR with this connector. + */ +static bool ConnectorIsSharedWithActiveOR(NVConnectorEvoPtr pConnectorEvo) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVConnectorEvoPtr pOtherConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pOtherConnectorEvo, pDispEvo) { + if (pOtherConnectorEvo != pConnectorEvo && + nvIsConnectorActiveEvo(pOtherConnectorEvo) && + (pOtherConnectorEvo->or.mask & pConnectorEvo->or.mask) != 0x0) { + return true; + } + } + + return false; +} + +NvBool nvDPResume(NVDPLibConnectorPtr pNVDpLibConnector, NvBool plugged) +{ + NVConnectorEvoRec *pConnectorEvo = + pNVDpLibConnector->pConnectorEvo; + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + DisplayPort::Connector *c = pNVDpLibConnector->connector; + const unsigned int firmwareHead = GetFirmwareHead(pConnectorEvo); + const bool firmwareLinkHandsOff = ConnectorIsSharedWithActiveOR(pConnectorEvo); + bool dpyIdIsDynamic = false; + /* By default allow MST */ + bool allowMST = true; + + if (firmwareHead != NV_INVALID_HEAD) { + NVDpyId firmwareDpyId = nvInvalidDpyId(); + + pNVDpLibConnector->headInFirmware = true; + pNVDpLibConnector->headMask = NVBIT(firmwareHead); + + // Use the first displayId in the boot display list. + // + // TODO: What should we do if more than one dpy ID is listed for a boot + // display? + nvAssert(nvCountDpyIdsInDpyIdList(pDispEvo->vbiosDpyConfig[firmwareHead]) == 1); + firmwareDpyId = + nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(), + pDispEvo->vbiosDpyConfig[firmwareHead]); + + dpyIdIsDynamic = !nvDpyIdsAreEqual(firmwareDpyId, + pConnectorEvo->displayId); + + /* Do not allow MST if firmware driving DP connector in SST mode */ + if (!dpyIdIsDynamic) { + allowMST = false; + } + } + + pConnectorEvo->detectComplete = FALSE; + + pNVDpLibConnector->plugged = plugged; + if (plugged && !pNVDpLibConnector->headInFirmware) { + NvBool ret = nvAssignSOREvo(pConnectorEvo, 0 /* sorExcludeMask */); + + nvAssert(ret); + if (!ret) { + // DP lib skips LT for unassigned SOR. + } + } + + c->resume(firmwareLinkHandsOff, + pNVDpLibConnector->headInFirmware, + plugged, + false /* isUefiSystem */, + firmwareHead, + dpyIdIsDynamic /* bFirmwareLinkUseMultistream */, + true /* bDisableVbiosScratchRegisterUpdate, bug 200471345 */, + allowMST); + + for (NvU32 head = 0; head < ARRAY_LEN(pNVDpLibConnector->pGroup); head++) { + pNVDpLibConnector->pGroup[head] = + pNVDpLibConnector->connector->newGroup(); + + if (pNVDpLibConnector->pGroup[head] == NULL) { + for (NvU32 i = 0; i < head; i++) { + pNVDpLibConnector->pGroup[i]->destroy(); + } + goto failed; + } + } + + pNVDpLibConnector->isActive = true; + return TRUE; + +failed: + pNVDpLibConnector->connector->pause(); + return FALSE; +} + +void nvDPSetAllowMultiStreamingOneConnector( + NVDPLibConnectorPtr pDpLibConnector, + NvBool allowMST) +{ + NVConnectorEvoRec *pConnectorEvo = + pDpLibConnector->pConnectorEvo; + + if (pDpLibConnector->connector->getAllowMultiStreaming() == allowMST) { + return; + } + + /* + * If there is change in MST capability and DPlib re-runs device detection + * routine for plugged sink. Reset 'pConnectorEvo->detectComplete' only for + * MST capable sinks, in order to track completion of that fresh detection + * routine. + */ + if (pDpLibConnector->plugged && + pDpLibConnector->connector->getSinkMultiStreamCap()) { + pConnectorEvo->detectComplete = FALSE; + } + pDpLibConnector->connector->setAllowMultiStreaming(allowMST); +} + +static NvBool IsDpSinkMstCapableForceSst(const NVDispEvoRec *pDispEvo, + const NvU32 head) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + + if (pConnectorEvo == NULL || + pConnectorEvo->pDpLibConnector == NULL) { + return FALSE; + } + + DisplayPort::Connector *c = + pConnectorEvo->pDpLibConnector->connector; + + return (c->getSinkMultiStreamCap() && !c->getAllowMultiStreaming()); +} + +static NvBool IsDpLinkTransitionWaitingForHeadShutDown( + const NVDispEvoRec *pDispEvo, + const NvU32 head) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + return pHeadState->pConnectorEvo && + nvDPIsLinkAwaitingTransition(pHeadState->pConnectorEvo); +} + +void nvDPSetAllowMultiStreaming(NVDevEvoPtr pDevEvo, NvBool allowMST) +{ + NvBool needUpdate = FALSE; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NVConnectorEvoPtr pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NVDPLibConnectorPtr pDpLibConnector = + pConnectorEvo->pDpLibConnector; + if (pDpLibConnector && + pDpLibConnector->connector->getAllowMultiStreaming() + != allowMST) { + needUpdate = TRUE; + } + } + } + + if (!needUpdate) { + return; + } + + nvShutDownHeads(pDevEvo, IsDpSinkMstCapableForceSst); + + /* + * Heads driving MST capable sinks in force SST mode, are shut down. Now you + * can allow MST on all DisplayPort Connector, safely in compliance + * of DP 1.2 specification. + * + * The section 5.4 and table 2-75 (of section 2.9.3.1) of DisplayPort 1.2 + * specification, does not allow to enable/disable MST mode of sink while + * transmitting active stream (see description of CL#25551338). + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NVConnectorEvoPtr pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!pConnectorEvo->pDpLibConnector) { + continue; + } + nvDPSetAllowMultiStreamingOneConnector( + pConnectorEvo->pDpLibConnector, + allowMST); + } + } + + /* Shut down all DisplayPort heads that need to transition to/from SST. */ + nvShutDownHeads(pDevEvo, + IsDpLinkTransitionWaitingForHeadShutDown); + + /* + * Handle any pending timers the DP library scheduled to notify us + * about changes in the connected device list. + */ + nvDPFireExpiredTimers(pDevEvo); +} + +enum NVDpLinkMode nvDPGetActiveLinkMode(NVDPLibConnectorPtr pDpLibConnector) +{ + DisplayPort::LinkConfiguration linkConfig = + pDpLibConnector->connector->getActiveLinkConfig(); + if (linkConfig.lanes == 0) { + return NV_DP_LINK_MODE_OFF; + } + return linkConfig.multistream ? NV_DP_LINK_MODE_MST : + NV_DP_LINK_MODE_SST; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-device.cpp b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-device.cpp new file mode 100644 index 0000000..e24e7a2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-device.cpp @@ -0,0 +1,148 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "dp/nvdp-device.h" +#include "nvdp-connector-event-sink.hpp" +#include "dp/nvdp-connector-event-sink.h" + +#include "nvkms-types.h" +#include "nvkms-rm.h" +#include "nvkms-dpy.h" + +#include "nvctassert.h" + +void nvDPDeviceSetPowerState(NVDpyEvoPtr pDpyEvo, NvBool on) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + if (!pDpyEvo->dp.pDpLibDevice) { + return; + } + + nvAssert(nvDpyUsesDPLib(pDpyEvo)); + + DisplayPort::Device *device = pDpyEvo->dp.pDpLibDevice->device; + + nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); + device->setPanelPowerParams(on, on); + + /* + * WAR: Some monitors clear the MSA_TIMING_PAR_IGNORE_EN bit in the + * DOWNSPREAD_CTRL DPCD register after changing power state, which will + * cause the monitor to fail to restore the image after powering back on + * while VRR flipping. To work around this, re-enable Adaptive-Sync + * immediately after powering on. (Bug 200488547) + */ + if (nvDpyIsAdaptiveSync(pDpyEvo) && on) { + NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + NVDPLibConnectorPtr pDpLibConnector = pConnectorEvo->pDpLibConnector; + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, + pDpLibConnector->dpyIdList[head]) && + (pDispEvo->headState[head].timings.vrr.type != + NVKMS_DPY_VRR_TYPE_NONE)) { + nvDPLibSetAdaptiveSync(pDispEvo, head, TRUE); + break; + } + } + } +} + +unsigned int nvDPGetEDIDSize(const NVDpyEvoRec *pDpyEvo) +{ + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + + nvAssert(nvDpyUsesDPLib(pDpyEvo)); + + if (!pDpLibDevice) { + return 0; + } + + return pDpLibDevice->device->getEDIDSize(); +} + +NvBool nvDPGetEDID(const NVDpyEvoRec *pDpyEvo, void *buffer, unsigned int size) +{ + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + + nvAssert(nvDpyUsesDPLib(pDpyEvo)); + + if (!pDpLibDevice) { + return FALSE; + } + + return pDpLibDevice->device->getEDID((char *)buffer, size); +} + +void nvDPGetDpyGUID(NVDpyEvoPtr pDpyEvo) +{ + NVDPLibDevicePtr pDpLibDevice; + const char *str; + + nvkms_memset(&pDpyEvo->dp.guid, 0, sizeof(pDpyEvo->dp.guid)); + + ct_assert(sizeof(pDpyEvo->dp.guid.buffer) == DPCD_GUID_SIZE); + + if (!nvDpyUsesDPLib(pDpyEvo)) { + return; + } + + pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + if (!pDpLibDevice) { + return; + } + + pDpyEvo->dp.guid.valid = + nvkmsDisplayPort::nvDPGetDeviceGUID(pDpLibDevice->device, + pDpyEvo->dp.guid.buffer) == true; + if (!pDpyEvo->dp.guid.valid) { + return; + } + + str = nvkmsDisplayPort::nvDPGetDeviceGUIDStr(pDpLibDevice->device); + if (str != NULL) { + nvkms_strncpy(pDpyEvo->dp.guid.str, str, sizeof(pDpyEvo->dp.guid.str)); + } else { + pDpyEvo->dp.guid.valid = FALSE; + } +} + +// Perform a fake lostDevice during device teardown. This function is called by +// DpyFree before it deletes a pDpy. +void nvDPDpyFree(NVDpyEvoPtr pDpyEvo) +{ + if (!nvDpyUsesDPLib(pDpyEvo)) { + return; + } + + if (!pDpyEvo->dp.pDpLibDevice) { + return; + } + + DisplayPort::Device *device = pDpyEvo->dp.pDpLibDevice->device; + + pDpyEvo->pConnectorEvo->pDpLibConnector->evtSink->lostDevice(device); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp new file mode 100644 index 0000000..70dda08 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp @@ -0,0 +1,149 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// This file implements the EVO RM interface used by the DisplayPort library. + +#include "nvkms-utils.h" + +#include "nvdp-evo-interface.hpp" + +#include "nvkms-rmapi.h" + +namespace nvkmsDisplayPort { + +EvoInterface::EvoInterface(NVConnectorEvoPtr pConnectorEvo) + : pConnectorEvo(pConnectorEvo) +{ +} + +NvU32 EvoInterface::rmControl0073(NvU32 command, void * params, + NvU32 paramSize) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + + return nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + command, + params, + paramSize); +} + +NvU32 EvoInterface::rmControl5070(NvU32 command, void * params, + NvU32 paramSize) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + + return nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + command, + params, + paramSize); +} + +/*! + * Look up the value of a particular key in the DisplayPort-specific registry + * corresponding to this connector. These values are provided at device + * allocation time, copied from the client request during nvAllocDevEvo(). + * + * \param[in] key The name of the key to look up. + * + * \return The unsigned 32-bit value set for the key, or 0 if the key is + * not set. + */ +NvU32 EvoInterface::getRegkeyValue(const char *key) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NvU32 val; + NvBool found = nvGetRegkeyValue(pDevEvo, key, &val); + + if (found) { + return val; + } else { + return 0; + } +} + +bool EvoInterface::isInbandStereoSignalingSupported() +{ + + return FALSE; +} + +NvU32 EvoInterface::getSubdeviceIndex() +{ + return pConnectorEvo->pDispEvo->displayOwner; +} + +NvU32 EvoInterface::getDisplayId() +{ + return nvDpyIdToNvU32(pConnectorEvo->displayId); +} + +NvU32 EvoInterface::getSorIndex() +{ + return nvEvoConnectorGetPrimaryOr(pConnectorEvo); +} + +NvU32 EvoInterface::getLinkIndex() +{ + switch (pConnectorEvo->or.protocol) { + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: + return 0; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: + return 1; + } + + nvAssert(!"Unrecognized DP protocol"); + return -1; +} + +NvU32 EvoInterface::monitorDenylistInfo( + NvU32 manufId, NvU32 productId, + DisplayPort::DpMonitorDenylistData *pDenylistData) +{ + // + // WAR for Toshiba/Dell internal(eDP) panel Sharp , overriding + // optimal link configuration to HBR2. + // + // HBR2 is required to drive 4K resolution, which is supported on DP1.2 + // onward specifications. Panel advertises itself as DP1.2 capable, but + // does not have ESI address space, this is violation the specification + // and hence inside DP library we downgrade the DPCD revision to 1.1. + // With this downgrade in DPCD version, link rate also gets downgraded + // to HBR. + // + if (manufId == 0x104d && + (productId == 0x1414 || productId == 0x1430)) { + + NvU32 warFlags = DisplayPort::DP_MONITOR_CAPABILITY_DP_OVERRIDE_OPTIMAL_LINK_CONFIG; + + pDenylistData->dpOverrideOptimalLinkConfig.linkRate = 0x14; // HBR2 + pDenylistData->dpOverrideOptimalLinkConfig.laneCount = laneCount_4; // 4 lanes + + return warFlags; + } + + return 0; +} + +}; // namespace nvkmsDisplayPort diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp new file mode 100644 index 0000000..114aaa4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVDP_EVO_INTERFACE_HPP__ +#define __NVDP_EVO_INTERFACE_HPP__ + +#include +#include +#include + +namespace nvkmsDisplayPort +{ + +class EvoInterface : public DisplayPort::Object, + public DisplayPort::EvoInterface +{ +public: + const NVConnectorEvoPtr pConnectorEvo; + + EvoInterface(NVConnectorEvoPtr pConnectorEvo); + + // Functions inherited from DisplayPort::EvoInterface + virtual NvU32 rmControl0073(NvU32 command, void * params, NvU32 paramSize); + virtual NvU32 rmControl5070(NvU32 command, void * params, NvU32 paramSize); + + virtual void disconnectHead(unsigned head) { + nvAssert(!"disconnectHead should never be called"); + } + virtual void reattachHead(unsigned head) { + nvAssert(!"reattachHead should never be called"); + } + + virtual NvU32 getSubdeviceIndex(); + virtual NvU32 getDisplayId(); + virtual NvU32 getSorIndex(); + virtual NvU32 getLinkIndex(); + virtual NvU32 getRegkeyValue(const char *key); + virtual bool isInbandStereoSignalingSupported(); + + virtual NvU32 monitorDenylistInfo( + NvU32 manufId, + NvU32 productId, + DisplayPort::DpMonitorDenylistData *pDenylistData); +}; + +}; // namespace nvkmsDisplayPort + +#endif // __NVDP_EVO_INTERFACE_HPP__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-host.cpp b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-host.cpp new file mode 100644 index 0000000..371483f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-host.cpp @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* DisplayPort management routines */ + +#include + +#include "nvkms-utils.h" + +#include "dp_hostimp.h" + +void *dpMalloc(NvLength sz) +{ + return nvAlloc(sz); +} + +void dpFree(void *p) +{ + nvFree(p); +} + +void dpPrint(const char *format, ...) +{ + va_list ap; + va_start(ap, format); + nvVEvoLog(EVO_LOG_INFO, NV_INVALID_GPU_LOG_INDEX, format, ap); + va_end(ap); +} + +void dpDebugBreakpoint(void) +{ + nvAssert(!"DisplayPort library debug breakpoint"); +} + +#if NV_DP_ASSERT_ENABLED +void dpAssert(const char *expression, const char *file, + const char *function, int line) +{ + nvDebugAssert(expression, file, function, line); +} +#endif + +void dpTraceEvent(NV_DP_TRACING_EVENT event, + NV_DP_TRACING_PRIORITY priority, NvU32 numArgs, ...) +{ + // To support DPlib tracing, implement this function. +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.cpp b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.cpp new file mode 100644 index 0000000..bc4d2e2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.cpp @@ -0,0 +1,146 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// This file implements the timer callback mechanism for the DisplayPort +// library. + +#include "nvkms-types.h" + +#include "dp/nvdp-timer.h" +#include "nvdp-timer.hpp" + +namespace nvkmsDisplayPort { + Timer::Callback::Callback(DisplayPort::List *pList, + NVDevEvoPtr pDevEvo, + DisplayPort::RawTimer::Callback *dpCallback, + int ms) + : dpCallback(dpCallback), + ref_ptr(pDevEvo->ref_ptr), + handle(nvkms_alloc_timer(onTimerFired, this, 0, ms * 1000)), + expireTimeUs(nvkms_get_usec() + ms * 1000) + { + if (!allocFailed()) { + pList->insertFront(this); + nvkms_inc_ref(ref_ptr); + } + } + + Timer::Callback::~Callback() + { + nvkms_free_timer(handle); + } + + bool Timer::Callback::allocFailed() const + { + return handle == NULL; + } + + bool Timer::Callback::isExpired(NvU64 timeNowUs) const + { + return timeNowUs >= expireTimeUs; + } + + void Timer::Callback::onTimerFired(void *data, NvU32 dataU32) + { + Timer::Callback *cb = static_cast(data); + cb->onTimerFired(); + } + + void Timer::Callback::onTimerFired() + { + if (nvkms_dec_ref(ref_ptr)) { + dpCallback->expired(); + } + delete this; + } + + void Timer::Callback::fireIfExpired(NvU64 timeNowUs) + { + if (isExpired(timeNowUs)) { + onTimerFired(); + } + } + + Timer::Timer(NVDevEvoPtr pDevEvo) + : pDevEvo(pDevEvo) + { + } + + void Timer::queueCallback(DisplayPort::RawTimer::Callback *dpCallback, int ms) + { + Callback *cb = new Callback(&timerList, pDevEvo, dpCallback, ms); + nvAssert(cb && !cb->allocFailed()); + if (!cb || cb->allocFailed()) { + delete cb; + return; + } + } + + NvU64 Timer::getTimeUs() + { + return nvkms_get_usec(); + } + + void Timer::sleep(int ms) + { + nvkms_usleep(ms * 1000); + } + + void Timer::fireExpiredTimers() + { + const NvU64 timeNowUs = getTimeUs(); + DisplayPort::ListElement *pElem = timerList.begin(); + DisplayPort::ListElement *pNext; + + while (pElem != timerList.end()) { + Callback *cb = static_cast(pElem); + pNext = pElem->next; + + cb->fireIfExpired(timeNowUs); + + pElem = pNext; + } + } + +}; // namespace nvkmsDisplayPort + +NvBool nvDPTimersPending(void) +{ + return FALSE; +} + +NVDPLibTimerPtr nvDPAllocTimer(NVDevEvoPtr pDevEvo) +{ + NVDPLibTimerPtr pTimer = new _nv_dplibtimer(pDevEvo); + return pTimer; +} + +void nvDPFreeTimer(NVDPLibTimerPtr pTimer) +{ + delete pTimer; +} + +void nvDPFireExpiredTimers(NVDevEvoPtr pDevEvo) +{ + pDevEvo->dpTimer->rawTimer.fireExpiredTimers(); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.hpp b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.hpp new file mode 100644 index 0000000..125739e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.hpp @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVDP_TIMER_HPP__ +#define __NVDP_TIMER_HPP__ + +#include +#include +#include + +namespace nvkmsDisplayPort +{ + +class Timer : public DisplayPort::RawTimer +{ + NVDevEvoPtr pDevEvo; + DisplayPort::List timerList; + + class Callback : public DisplayPort::ListElement { + DisplayPort::RawTimer::Callback *dpCallback; + // ref_ptr to the pDevEvo + nvkms_ref_ptr *ref_ptr; + nvkms_timer_handle_t *handle; + NvU64 expireTimeUs; + + static void onTimerFired(void *data, NvU32 dataU32); + void onTimerFired(); + + public: + // Construct an NVKMS timer callback. Since exceptions cannot be used + // in NVKMS code, callers must call Callback::allocFailed() to query + // whether the constructor succeeded. + // + // Scheduling a callback bumps the refcount on the corresponding + // pDevEvo, so that a device isn't freed until all pending callbacks + // have fired. + Callback(DisplayPort::List *pList, + NVDevEvoPtr pDevEvo, + DisplayPort::RawTimer::Callback *dpCallback, + int ms); + ~Callback(); + + // Returns TRUE if the constructor failed. + bool allocFailed() const; + // Returns TRUE if the timer is ready to fire. + bool isExpired(NvU64 timeNowUs) const; + // Fire the timer if it's ready. + // NOTE: If the timer fires, this deletes it. + void fireIfExpired(NvU64 timeNowUs); + }; +public: + Timer(NVDevEvoPtr pDevEvo); + + virtual void queueCallback(DisplayPort::RawTimer::Callback *cb, int ms); + virtual NvU64 getTimeUs(); + virtual void sleep(int ms); + + void fireExpiredTimers(); +}; + +}; // namespace nvkmsDisplayPort + +struct _nv_dplibtimer : public DisplayPort::Object { + nvkmsDisplayPort::Timer rawTimer; + DisplayPort::Timer timer; + + _nv_dplibtimer(NVDevEvoPtr pDevEvo) + : rawTimer(pDevEvo), timer(&rawTimer) + { + } +}; + +#endif // __NVDP_TIMER_HPP__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/g_nvkms-evo-states.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/g_nvkms-evo-states.c new file mode 100644 index 0000000..e8ae3db --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/g_nvkms-evo-states.c @@ -0,0 +1,2818 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-evo-states.h" + +static NvBool EvoLockStateFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockClientManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockClientPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerHouseSyncManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerHouseSyncManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerHouseSyncPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockClientManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockClientPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateNoLock(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondary(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondaryLockHeadsFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimary(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockClientPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockServerHouseSyncPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockServerPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondary(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondaryLockHeadsFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateVrr(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); + +static NvBool EvoLockStateFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockClientManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockClientPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClient; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServer( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSync; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerHouseSync( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerHouseSyncManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerHouseSyncManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerHouseSyncPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServer; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServer; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_ADD_SLI_PRIMARY: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeads; + } + return TRUE; + + case NV_EVO_ADD_SLI_LAST_SECONDARY: + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeads; + } + return TRUE; + + case NV_EVO_UNLOCK_HEADS: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServer; + } + return TRUE; + + case NV_EVO_ADD_SLI_SECONDARY: + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockClientManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockClientPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServer( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSync; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSync( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServer; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServer; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateNoLock( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClient; + } + return TRUE; + + case NV_EVO_ADD_SLI_PRIMARY: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + case NV_EVO_ADD_SLI_LAST_SECONDARY: + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondary; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServer; + } + return TRUE; + + case NV_EVO_LOCK_HEADS: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + case NV_EVO_ENABLE_VRR: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateVrr; + } + return TRUE; + + case NV_EVO_ADD_SLI_SECONDARY: + if (!queryOnly) { + nvEvoLockHWStateSliSecondary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondary( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!nvEvoRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondaryFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!nvEvoUnRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondaryLockHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!nvEvoRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondaryLockHeadsFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeadsFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!nvEvoUnRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondaryLockHeadsFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimary( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockClient; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServer; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockClientPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockClient; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockServer( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerHouseSync; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockServerHouseSync( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockServerHouseSyncPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockServerPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServer; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServer; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServer( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServer; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServer; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondary( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!nvEvoRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondaryFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!nvEvoUnRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliSecondary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondaryLockHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!nvEvoRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondaryLockHeadsFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeadsFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!nvEvoUnRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondaryLockHeadsFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateVrr( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_DISABLE_VRR: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +void nvEvoStateStartNoLock( + NVEvoSubDevPtr pEvoSubDev +) +{ + pEvoSubDev->scanLockState = EvoLockStateNoLock; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-3dvision.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-3dvision.c new file mode 100644 index 0000000..d38a184 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-3dvision.c @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" +#include "nvkms-3dvision.h" + +void nv3DVisionAuthenticationEvo(NVDispEvoRec *pDispEvo, const NvU32 head) +{ + return; +} + +void nvDpyCheck3DVisionCapsEvo(NVDpyEvoPtr pDpyEvo) +{ + return; +} + +NvBool +nvPatch3DVisionModeTimingsEvo(NVT_TIMING *pTiming, NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString) +{ + return FALSE; +} + +void nvDisable3DVisionAegis(const NVDpyEvoRec *pDpyEvo) +{ + return; +} + +void nvSendHwModeTimingsToAegisEvo(const NVDispEvoRec *pDispEvo, + const NvU32 head) +{ + return; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-attributes.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-attributes.c new file mode 100644 index 0000000..a04f257 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-attributes.c @@ -0,0 +1,1354 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-evo.h" +#include "nvkms-types.h" +#include "nvkms-attributes.h" +#include "nvkms-dpy.h" +#include "nvkms-framelock.h" +#include "nvkms-vrr.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvos.h" + +#include // NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_* + +/*! + * Set the current backlight brightness for the given pDpyEvo. + * + * \param[in] pDpyEvo The display device whose backlight brightness + * should be assigned. + * \param[in] brightness The backlight brightness value to program + * + * \return TRUE if backlight brightness is available for this pDpyEvo, + * otherwise FALSE. + */ +static NvBool DpySetBacklightBrightness(NVDpyEvoRec *pDpyEvo, NvS64 brightness) +{ + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (!pDpyEvo->hasBacklightBrightness) { + return FALSE; + } + + if (brightness > NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MAX_VALUE) { + return FALSE; + } + + if (brightness < NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MIN_VALUE) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + params.brightness = brightness; + + ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + return (ret == NVOS_STATUS_SUCCESS); +} + +/*! + * Query the current backlight brightness for the given pDpyEvo. + * + * \param[in] pDpyEvo The display device whose backlight brightness + * should be queried. + * \param[out] pBrightness The backlight brightness value + * + * \return TRUE if backlight brightness is available for this pDpyEvo, + * otherwise FALSE. + */ +static NvBool DpyGetBacklightBrightness(const NVDpyEvoRec *pDpyEvo, + NvS64 *pBrightness) +{ + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + + ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + nvAssert(params.brightness <= NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MAX_VALUE); + + *pBrightness = params.brightness; + + return TRUE; +} + +/*! + * Populate NvKmsAttributeValidValuesCommonReply for backlight brightness. + * + * \param[in] pDpyEvo The display device whose backlight brightness + * should be queried. + * \param[out] pValidValues The ValidValues structure to populate. + * + * \return TRUE if backlight brightness is available for this pDpy, + * otherwise FALSE. + */ +static NvBool DpyGetBacklightBrightnessValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!pDpyEvo->hasBacklightBrightness) { + return FALSE; + } + + pValidValues->type = NV_KMS_ATTRIBUTE_TYPE_RANGE; + + pValidValues->u.range.min = NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MIN_VALUE; + pValidValues->u.range.max = NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MAX_VALUE; + + return TRUE; +} + +/*! + * Query RM for the current scanline of the given pDpyEvo. + * + * \param[in] pDpyEvo The display device whose scanline + * should be queried. + * \param[out] pScanLine The scanline value. + * + * \return TRUE if the scanline could be queried for this pDpyEvo, + * otherwise FALSE. + */ +static NvBool GetScanLine(const NVDpyEvoRec *pDpyEvo, NvS64 *pScanLine) +{ + NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + /* XXX[2Heads1OR] Get scanline of the primary hardware head. */ + const NvU32 head = pDpyEvo->apiHead; + + if (head == NV_INVALID_HEAD) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.head = head; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE, + ¶ms, sizeof(params)); + + if (ret == NVOS_STATUS_SUCCESS) { + *pScanLine = params.currentScanline; + return TRUE; + } + + return FALSE; +} + +/*! + * Retrieve the current head of the given pDpyEvo. + * + * \param[in] pDpyEvo The display device whose head + * should be queried. + * \param[out] pHead The head value. + * + * \return TRUE. If there is no valid head pHead will + * return NV_INVALID_HEAD + */ +static NvBool GetHead(const NVDpyEvoRec *pDpyEvo, NvS64 *pHead) +{ + *pHead = (NvS64)pDpyEvo->apiHead; + return TRUE; +} + +static NvBool DitherConfigurationAllowed(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + return pDevEvo->hal->caps.supportedDitheringModes != 0; +} + +static void SetDitheringCommon(NVDpyEvoPtr pDpyEvo) +{ + NVEvoUpdateState updateState = { }; + const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + NVDispApiHeadStateEvoRec *pApiHeadState; + enum nvKmsPixelDepth pixelDepth; + NvU32 head; + + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + return; + } + pApiHeadState = &pDispEvo->apiHeadState[pDpyEvo->apiHead]; + + nvAssert((pApiHeadState->hwHeadsMask) != 0x0 && + (nvDpyIdIsInDpyIdList(pDpyEvo->id, pApiHeadState->activeDpys))); + + head = nvGetPrimaryHwHead(pDispEvo, pDpyEvo->apiHead); + pixelDepth = pDispEvo->headState[head].timings.pixelDepth; +#if defined(DEBUG) + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + nvAssert(pixelDepth == pDispEvo->headState[head].timings.pixelDepth); + } +#endif + + nvChooseDitheringEvo(pConnectorEvo, + pixelDepth, + &pDpyEvo->requestedDithering, + &pApiHeadState->attributes.dithering); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + nvSetDitheringEvo(pDispEvo, + head, + &pApiHeadState->attributes.dithering, + &updateState); + } + + nvEvoUpdateAndKickOff(pDpyEvo->pDispEvo, FALSE, &updateState, + TRUE /* releaseElv */); +} + +/*! + * Assigns dithering on all dpys driven by pDpyEvo's head. + */ +static NvBool SetDithering(NVDpyEvoRec *pDpyEvo, NvS64 dithering) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + switch (dithering) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_ENABLED: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED: + break; + default: + return FALSE; + } + + pDpyEvo->requestedDithering.state = dithering; + + SetDitheringCommon(pDpyEvo); + + return TRUE; +} + +static NvBool GetDithering(const NVDpyEvoRec *pDpyEvo, NvS64 *pDithering) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pDithering = pDpyEvo->requestedDithering.state; + + return TRUE; +} + +static NvBool GetDitheringGenericValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + return DitherConfigurationAllowed(pDpyEvo); +} + +/*! + * Assigns ditheringMode on all dpys driven by pDpyEvo's head. + */ +static NvBool SetDitheringMode(NVDpyEvoRec *pDpyEvo, NvS64 ditheringMode) +{ + NVDevEvoPtr pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + NvU32 mask = (1 << ditheringMode); + + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + if (!(mask & pDevEvo->hal->caps.supportedDitheringModes)) { + return FALSE; + } + + switch (ditheringMode) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL: + break; + default: + return FALSE; + } + + pDpyEvo->requestedDithering.mode = ditheringMode; + + SetDitheringCommon(pDpyEvo); + + return TRUE; +} + +static NvBool GetDitheringMode(const NVDpyEvoRec *pDpyEvo, + NvS64 *pDitheringMode) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pDitheringMode = pDpyEvo->requestedDithering.mode; + + return TRUE; +} + +static NvBool GetDitheringModeValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = + pDevEvo->hal->caps.supportedDitheringModes; + + return TRUE; +} + +/*! + * Assigns ditheringDepth on all dpys driven by pDpyEvo's head. + */ +static NvBool SetDitheringDepth(NVDpyEvoRec *pDpyEvo, NvS64 ditheringDepth) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + switch (ditheringDepth) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_6_BITS: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_8_BITS: + break; + default: + return FALSE; + } + + pDpyEvo->requestedDithering.depth = ditheringDepth; + + SetDitheringCommon(pDpyEvo); + + return TRUE; +} + +static NvBool GetDitheringDepth(const NVDpyEvoRec *pDpyEvo, + NvS64 *pDitheringDepth) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pDitheringDepth = pDpyEvo->requestedDithering.depth; + + return TRUE; +} + +static NvBool GetCurrentDithering(const NVDpyEvoRec *pDpyEvo, + NvS64 *pCurrentDithering) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pCurrentDithering = pDpyEvo->currentAttributes.dithering.enabled; + + return TRUE; +} + +static NvBool GetCurrentDitheringMode(const NVDpyEvoRec *pDpyEvo, + NvS64 *pCurrentDitheringMode) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pCurrentDitheringMode = + pDpyEvo->currentAttributes.dithering.mode; + + return TRUE; +} + +static NvBool GetCurrentDitheringDepth(const NVDpyEvoRec *pDpyEvo, + NvS64 *pCurrentDitheringDepth) +{ + + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pCurrentDitheringDepth = + pDpyEvo->currentAttributes.dithering.depth; + + return TRUE; +} + +static NvBool DigitalVibranceAvailable(const NVDpyEvoRec *pDpyEvo) +{ + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyEvoIsActive(pDpyEvo)) { + return FALSE; + } + + if (!pDevEvo->hal->caps.supportsDigitalVibrance) { + return FALSE; + } + + return TRUE; +} + +/*! + * Assigns dvc on all dpys driven by pDpyEvo's head. + */ +static NvBool SetDigitalVibrance(NVDpyEvoRec *pDpyEvo, NvS64 dvc) +{ + NVEvoUpdateState updateState = { }; + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NVDispApiHeadStateEvoRec *pApiHeadState; + NvU32 head; + + if ((pDpyEvo->apiHead == NV_INVALID_HEAD) || + !DigitalVibranceAvailable(pDpyEvo)) { + return FALSE; + } + pApiHeadState = &pDispEvo->apiHeadState[pDpyEvo->apiHead]; + + nvAssert((pApiHeadState->hwHeadsMask) != 0x0 && + (nvDpyIdIsInDpyIdList(pDpyEvo->id, pApiHeadState->activeDpys))); + + dvc = NV_MAX(dvc, NV_EVO_DVC_MIN); + dvc = NV_MIN(dvc, NV_EVO_DVC_MAX); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + nvSetDVCEvo(pDispEvo, head, dvc, &updateState); + } + + nvEvoUpdateAndKickOff(pDpyEvo->pDispEvo, FALSE, &updateState, + TRUE /* releaseElv */); + + pApiHeadState->attributes.dvc = dvc; + + return TRUE; +} + +static NvBool GetDigitalVibrance(const NVDpyEvoRec *pDpyEvo, NvS64 *pDvc) +{ + if (!DigitalVibranceAvailable(pDpyEvo)) { + return FALSE; + } + + *pDvc = pDpyEvo->currentAttributes.dvc; + + return TRUE; +} + +static NvBool GetDigitalVibranceValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!DigitalVibranceAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = NV_EVO_DVC_MIN; + pValidValues->u.range.max = NV_EVO_DVC_MAX; + + return TRUE; +} + +static NvBool ImageSharpeningAvailable(const NVDpyEvoRec *pDpyEvo) +{ + if (!pDpyEvo->pDispEvo->pDevEvo->hal->caps.supportsImageSharpening) { + return FALSE; + } + + if (!nvDpyEvoIsActive(pDpyEvo)) { + return FALSE; + } + + return pDpyEvo->currentAttributes.imageSharpening.available; +} + +/*! + * Assigns imageSharpening on all dpys driven by pDpyEvo's head. + */ +static NvBool SetImageSharpening(NVDpyEvoRec *pDpyEvo, NvS64 imageSharpening) +{ + NVEvoUpdateState updateState = { }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDispApiHeadStateEvoRec *pApiHeadState; + NvU32 head; + + if ((pDpyEvo->apiHead == NV_INVALID_HEAD) || + !ImageSharpeningAvailable(pDpyEvo)) { + return FALSE; + } + pApiHeadState = &pDispEvo->apiHeadState[pDpyEvo->apiHead]; + + nvAssert((pApiHeadState->hwHeadsMask) != 0x0 && + (nvDpyIdIsInDpyIdList(pDpyEvo->id, pApiHeadState->activeDpys))); + + imageSharpening = NV_MAX(imageSharpening, NV_EVO_IMAGE_SHARPENING_MIN); + imageSharpening = NV_MIN(imageSharpening, NV_EVO_IMAGE_SHARPENING_MAX); + + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + nvSetImageSharpeningEvo(pDispEvo, head, imageSharpening, &updateState); + } + + nvEvoUpdateAndKickOff(pDispEvo, FALSE, &updateState, + TRUE /* releaseElv */); + + pApiHeadState->attributes.imageSharpening.value = imageSharpening; + + return TRUE; +} + +static NvBool GetImageSharpening(const NVDpyEvoRec *pDpyEvo, + NvS64 *pImageSharpening) +{ + if (!ImageSharpeningAvailable(pDpyEvo)) { + return FALSE; + } + + *pImageSharpening = pDpyEvo->currentAttributes.imageSharpening.value; + + return TRUE; +} + +static NvBool GetImageSharpeningValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!ImageSharpeningAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = NV_EVO_IMAGE_SHARPENING_MIN; + pValidValues->u.range.max = NV_EVO_IMAGE_SHARPENING_MAX; + + return TRUE; +} + +static NvBool GetImageSharpeningAvailable(const NVDpyEvoRec *pDpyEvo, + NvS64 *pImageSharpeningAvailable) +{ + *pImageSharpeningAvailable = ImageSharpeningAvailable(pDpyEvo); + + return TRUE; +} + +static NvBool GetImageSharpeningDefault(const NVDpyEvoRec *pDpyEvo, + NvS64 *pImageSharpeningDefault) +{ + if (!nvDpyEvoIsActive(pDpyEvo)) { + return FALSE; + } + + *pImageSharpeningDefault = NV_EVO_IMAGE_SHARPENING_DEFAULT; + + return TRUE; +} + +static NvBool ColorSpaceAndRangeAvailable(const NVDpyEvoRec *pDpyEvo) +{ + return ((pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) && + (pDpyEvo->pConnectorEvo->signalFormat != + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI)); +} + +/*! + * Send infoFrame with new color{Space,Range}. + */ +static void DpyPostColorSpaceOrRangeSetEvo(NVDpyEvoPtr pDpyEvo) +{ + NVEvoUpdateState updateState = { }; + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NVDispApiHeadStateEvoRec *pApiHeadState; + enum nvKmsPixelDepth pixelDepth; + enum NvYuv420Mode yuv420Mode; + NvU32 head; + + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + return; + } + pApiHeadState = &pDispEvo->apiHeadState[pDpyEvo->apiHead]; + + nvAssert((pApiHeadState->hwHeadsMask) != 0x0 && + (nvDpyIdIsInDpyIdList(pDpyEvo->id, pApiHeadState->activeDpys))); + + head = nvGetPrimaryHwHead(pDispEvo, pDpyEvo->apiHead); + pixelDepth = pDispEvo->headState[head].timings.pixelDepth; + yuv420Mode = pDispEvo->headState[head].timings.yuv420Mode; +#if defined(DEBUG) + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + nvAssert(pixelDepth == pDispEvo->headState[head].timings.pixelDepth); + nvAssert(yuv420Mode == pDispEvo->headState[head].timings.yuv420Mode); + } +#endif + + /* + * Choose current colorSpace and colorRange based on the current mode + * timings and the requested color space and range. + */ + nvChooseCurrentColorSpaceAndRangeEvo(pixelDepth, + yuv420Mode, + pDpyEvo->requestedColorSpace, + pDpyEvo->requestedColorRange, + &pApiHeadState->attributes.colorSpace, + &pApiHeadState->attributes.colorRange); + + /* Update hardware's current colorSpace and colorRange */ + FOR_EACH_EVO_HW_HEAD_IN_MASK(pApiHeadState->hwHeadsMask, head) { + nvUpdateCurrentHardwareColorSpaceAndRangeEvo(pDispEvo, + head, + pApiHeadState->attributes.colorSpace, + pApiHeadState->attributes.colorRange, + &updateState); + } + + /* Update InfoFrames as needed. */ + nvUpdateInfoFrames(pDpyEvo); + + // Kick off + nvEvoUpdateAndKickOff(pDispEvo, FALSE, &updateState, TRUE /* releaseElv */); + + // XXX DisplayPort sets color format. +} + +static NvU32 DpyGetValidColorSpaces(const NVDpyEvoRec *pDpyEvo) +{ + NvU32 val = (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB); + + if (pDpyEvo->pConnectorEvo->colorSpaceCaps.ycbcr422Capable && + pDpyEvo->colorSpaceCaps.ycbcr422Capable) { + val |= (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422); + } + + if (pDpyEvo->pConnectorEvo->colorSpaceCaps.ycbcr444Capable && + pDpyEvo->colorSpaceCaps.ycbcr444Capable) { + val |= (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444); + } + + return val; +} + +NvBool nvDpyValidateColorSpace(const NVDpyEvoRec *pDpyEvo, NvS64 value) +{ + NvU32 validMask = DpyGetValidColorSpaces(pDpyEvo); + + if (!ColorSpaceAndRangeAvailable(pDpyEvo) || !(validMask & (1 << value))) { + return FALSE; + } + + return TRUE; +} + +static NvBool SetRequestedColorSpace(NVDpyEvoRec *pDpyEvo, NvS64 value) +{ + if (!nvDpyValidateColorSpace(pDpyEvo, value)) { + return FALSE; + } + + pDpyEvo->requestedColorSpace = value; + + DpyPostColorSpaceOrRangeSetEvo(pDpyEvo); + + return TRUE; +} + +static NvBool GetCurrentColorSpace(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->currentAttributes.colorSpace; + + return TRUE; +} + +static NvBool GetRequestedColorSpace(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->requestedColorSpace; + + return TRUE; +} + +static NvBool GetCurrentColorSpaceValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = DpyGetValidColorSpaces(pDpyEvo); + + /* + * The current color space may be YUV420 depending on the current mode. + * Rather than determine whether this pDpy is capable of driving any + * YUV420 modes, just assume this is always a valid current color space. + */ + pValidValues->u.bits.ints |= + (1 << NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420); + + return TRUE; +} + +static NvBool GetRequestedColorSpaceValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = DpyGetValidColorSpaces(pDpyEvo); + + return TRUE; +} + +static NvBool SetRequestedColorRange(NVDpyEvoRec *pDpyEvo, NvS64 value) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + pDpyEvo->requestedColorRange = value; + + DpyPostColorSpaceOrRangeSetEvo(pDpyEvo); + + return TRUE; +} + +static NvBool GetCurrentColorRange(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->currentAttributes.colorRange; + + return TRUE; +} + +static NvBool GetRequestedColorRange(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->requestedColorRange; + + return TRUE; +} + +static NvBool GetColorRangeValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + /* + * The preferred color range may always select between full or limited + * range, but the actual resulting color range depends on the current + * color space. Both color ranges are always valid values for both + * preferred and current color range attributes. + */ + pValidValues->u.bits.ints = (1 << NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL) | + (1 << NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED); + + return TRUE; +} + +static NvBool DigitalSignalAvailable(const NVDpyEvoRec *pDpyEvo) +{ + return pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP; +} + +static NvBool GetDigitalSignal(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!DigitalSignalAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->currentAttributes.digitalSignal; + + return TRUE; +} + +static NvBool GetDigitalSignalValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!DigitalSignalAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool DigitalLinkTypeAvailable(const NVDpyEvoRec *pDpyEvo) +{ + return (nvDpyEvoIsActive(pDpyEvo) && + (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP)); +} + +static NvBool GetDigitalLinkType(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!DigitalLinkTypeAvailable(pDpyEvo)) { + return FALSE; + } + + if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + *pValue = nvRMLaneCountToNvKms(pDpyEvo->dp.laneCount); + } else { + const NVHwModeTimingsEvo *pTimings; + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + /* + * XXX[2Heads1OR] Track DIGITAL_LINK_TYPE as per head + * attributes set which get mirrored into NVDpyEvoRec::attributes + * after modeset. + */ + const NvU32 head = pDpyEvo->apiHead; + + if (head == NV_INVALID_HEAD) { + return FALSE; + } + + pTimings = &pDispEvo->headState[head].timings; + + *pValue = nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings) ? + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_DUAL : + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_SINGLE; + } + + return TRUE; +} + +static NvBool GetDigitalLinkTypeValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!DigitalLinkTypeAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool DisplayportLinkRateAvailable(const NVDpyEvoRec *pDpyEvo) +{ + return ((pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) && + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)); +} + +static NvBool GetDisplayportLinkRate(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!DisplayportLinkRateAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->dp.linkRate; + + return TRUE; +} + +static NvBool GetDisplayportLinkRateValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!DisplayportLinkRateAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool GetDisplayportConnectorType(const NVDpyEvoRec *pDpyEvo, + NvS64 *pValue) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->dp.connectorType; + + return TRUE; +} + +static NvBool GetDisplayportConnectorTypeValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool GetDisplayportIsMultistream(const NVDpyEvoRec *pDpyEvo, + NvS64 *pValue) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + *pValue = nvDpyEvoIsDPMST(pDpyEvo); + + return TRUE; +} + +static NvBool GetDisplayportIsMultistreamValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_BOOLEAN); + + return TRUE; +} + +static NvBool GetDisplayportSinkIsAudioCapable(const NVDpyEvoRec *pDpyEvo, + NvS64 *pValue) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->dp.sinkIsAudioCapable; + + return TRUE; +} + +static NvBool GetDisplayportSinkIsAudioCapableValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_BOOLEAN); + + return TRUE; +} + +NvS64 nvRMLaneCountToNvKms(NvU32 rmLaneCount) +{ + switch (rmLaneCount) { + case NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_0: + // fallthrough + default: + nvAssert(!"Unexpected DisplayPort lane configuration!"); + // fallthrough + case NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1: + return NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_SINGLE; + case NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_2: + return NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_DUAL; + case NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_4: + return NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_QUAD; + } +} + +static NvBool SetStereoEvo(NVDpyEvoPtr pDpyEvo, NvS64 value) +{ + NvBool enable = !!value; + + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + return FALSE; + } + + /* XXX[2Heads1OR] Broadcast the stereo setting to the hardware heads. */ + return nvSetStereoEvo(pDpyEvo->pDispEvo, pDpyEvo->apiHead, enable); +} + +static NvBool GetStereoEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + return FALSE; + } + + /* XXX[2Heads1OR] Loop over hardware heads to determine stereo status. */ + *pValue = !!nvGetStereoEvo(pDpyEvo->pDispEvo, pDpyEvo->apiHead); + + return TRUE; +} + +static NvBool GetVrrMinRefreshRate(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + return FALSE; +} + +static NvBool GetVrrMinRefreshRateValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + return FALSE; +} + +static const struct { + NvBool (*set)(NVDpyEvoPtr pDpyEvo, NvS64 value); + NvBool (*get)(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue); + NvBool (*getValidValues)( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues); + enum NvKmsAttributeType type; +} DpyAttributesDispatchTable[] = { + [NV_KMS_DPY_ATTRIBUTE_BACKLIGHT_BRIGHTNESS] = { + .set = DpySetBacklightBrightness, + .get = DpyGetBacklightBrightness, + .getValidValues = DpyGetBacklightBrightnessValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_DPY_ATTRIBUTE_SCANLINE] = { + .set = NULL, + .get = GetScanLine, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_HEAD] = { + .set = NULL, + .get = GetHead, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING] = { + .set = SetDithering, + .get = GetDithering, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE] = { + .set = SetDitheringMode, + .get = GetDitheringMode, + .getValidValues = GetDitheringModeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH] = { + .set = SetDitheringDepth, + .get = GetDitheringDepth, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING] = { + .set = NULL, + .get = GetCurrentDithering, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE] = { + .set = NULL, + .get = GetCurrentDitheringMode, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH] = { + .set = NULL, + .get = GetCurrentDitheringDepth, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DIGITAL_VIBRANCE] = { + .set = SetDigitalVibrance, + .get = GetDigitalVibrance, + .getValidValues = GetDigitalVibranceValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING] = { + .set = SetImageSharpening, + .get = GetImageSharpening, + .getValidValues = GetImageSharpeningValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_AVAILABLE] = { + .set = NULL, + .get = GetImageSharpeningAvailable, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_DEFAULT] = { + .set = NULL, + .get = GetImageSharpeningDefault, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE] = { + .set = SetRequestedColorSpace, + .get = GetRequestedColorSpace, + .getValidValues = GetRequestedColorSpaceValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE] = { + .set = NULL, + .get = GetCurrentColorSpace, + .getValidValues = GetCurrentColorSpaceValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_RANGE] = { + .set = SetRequestedColorRange, + .get = GetRequestedColorRange, + .getValidValues = GetColorRangeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_RANGE] = { + .set = NULL, + .get = GetCurrentColorRange, + .getValidValues = GetColorRangeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL] = { + .set = NULL, + .get = GetDigitalSignal, + .getValidValues = GetDigitalSignalValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE] = { + .set = NULL, + .get = GetDigitalLinkType, + .getValidValues = GetDigitalLinkTypeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_LINK_RATE] = { + .set = NULL, + .get = GetDisplayportLinkRate, + .getValidValues = GetDisplayportLinkRateValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE] = { + .set = NULL, + .get = GetDisplayportConnectorType, + .getValidValues = GetDisplayportConnectorTypeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_IS_MULTISTREAM] = { + .set = NULL, + .get = GetDisplayportIsMultistream, + .getValidValues = GetDisplayportIsMultistreamValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_SINK_IS_AUDIO_CAPABLE] = { + .set = NULL, + .get = GetDisplayportSinkIsAudioCapable, + .getValidValues = GetDisplayportSinkIsAudioCapableValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG] = { + .set = nvSetFrameLockDisplayConfigEvo, + .get = nvGetFrameLockDisplayConfigEvo, + .getValidValues = nvGetFrameLockDisplayConfigValidValuesEvo, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_RASTER_LOCK] = { + .set = NULL, + .get = nvQueryRasterLockEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_UPDATE_FLIPLOCK] = { + .set = nvSetFlipLockEvo, + .get = nvGetFlipLockEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_UPDATE_STEREO] = { + .set = SetStereoEvo, + .get = GetStereoEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_DPMS] = { + .set = nvRmSetDpmsEvo, + .get = NULL, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_VRR_MIN_REFRESH_RATE] = { + .set = NULL, + .get = GetVrrMinRefreshRate, + .getValidValues = GetVrrMinRefreshRateValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, +}; + +/*! + * Set pParams->attribute to pParams->value on the given dpy. + */ +NvBool nvSetDpyAttributeEvo(NVDpyEvoPtr pDpyEvo, + struct NvKmsSetDpyAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DpyAttributesDispatchTable)) { + return FALSE; + } + + if (DpyAttributesDispatchTable[index].set == NULL) { + return FALSE; + } + + if (!DpyAttributesDispatchTable[index].set(pDpyEvo, + pParams->request.value)) { + return FALSE; + } + + if (pDpyEvo->apiHead != NV_INVALID_HEAD) { + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NVDpyEvoRec *pClonedDpyEvo; + + /* + * The current attributes state should be consistent across all cloned + * dpys. + * + * XXX[2Heads1OR] Optimize this loop in follow on code change when + * apiHead -> pDpyEvo mapping will get implemented. + */ + FOR_ALL_EVO_DPYS(pClonedDpyEvo, pDispEvo->validDisplays, pDispEvo) { + if (pClonedDpyEvo->apiHead != pDpyEvo->apiHead) { + continue; + } + nvDpyUpdateCurrentAttributes(pClonedDpyEvo); + } + } else { + nvDpyUpdateCurrentAttributes(pDpyEvo); + } + + return TRUE; +} + +/*! + * Get the value of pParams->attribute on the given dpy. + */ +NvBool nvGetDpyAttributeEvo(const NVDpyEvoRec *pDpyEvo, + struct NvKmsGetDpyAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DpyAttributesDispatchTable)) { + return FALSE; + } + + if (DpyAttributesDispatchTable[index].get == NULL) { + return FALSE; + } + + return DpyAttributesDispatchTable[index].get(pDpyEvo, + &pParams->reply.value); +} + +/*! + * Get the valid values of pParams->attribute on the given dpy. + */ +NvBool nvGetDpyAttributeValidValuesEvo( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsGetDpyAttributeValidValuesParams *pParams) +{ + NvU32 index = pParams->request.attribute; + struct NvKmsAttributeValidValuesCommonReply *pReply = + &pParams->reply.common; + + if (index >= ARRAY_LEN(DpyAttributesDispatchTable)) { + return FALSE; + } + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + pReply->readable = (DpyAttributesDispatchTable[index].get != NULL); + pReply->writable = (DpyAttributesDispatchTable[index].set != NULL); + + pReply->type = DpyAttributesDispatchTable[index].type; + + /* + * The getValidValues function provides three important things: + * - If type==Range, then assigns reply::u::range. + * - If type==IntBits, then assigns reply::u:bits::ints. + * - If the attribute is not currently available, returns FALSE. + * If the getValidValues function is NULL, assume the attribute is + * available. The type must not be something that requires assigning + * to reply::u. + */ + if (DpyAttributesDispatchTable[index].getValidValues == NULL) { + nvAssert(pReply->type != NV_KMS_ATTRIBUTE_TYPE_INTBITS); + nvAssert(pReply->type != NV_KMS_ATTRIBUTE_TYPE_RANGE); + return TRUE; + } + + return DpyAttributesDispatchTable[index].getValidValues(pDpyEvo, pReply); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-console-restore.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-console-restore.c new file mode 100644 index 0000000..28e52d9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-console-restore.c @@ -0,0 +1,876 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-console-restore.h" +#include "nvkms-dpy.h" +#include "nvkms-flip.h" +#include "nvkms-modepool.h" +#include "nvkms-modeset.h" +#include "nvkms-prealloc.h" +#include "nvkms-private.h" +#include "nvkms-rm.h" +#include "nvkms-utils.h" + +#include "dp/nvdp-connector.h" + +/*! + * Find the first valid mode of given dimensions (width and height) that passes + * IMP at boot clocks. If input dimensions are not given then return first + * valid mode that passes IMP at boot clocks. + */ +static NvBool FindMode(NVDpyEvoPtr pDpyEvo, + const enum NvKmsSurfaceMemoryFormat format, + const NvU32 width, + const NvU32 height, + struct NvKmsMode *pModeOut) +{ + NvU32 index = 0; + + while (TRUE) { + struct NvKmsValidateModeIndexParams params = { }; + + params.request.dpyId = pDpyEvo->id; + params.request.modeIndex = index++; + params.request.modeValidation.overrides = NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS; + + nvValidateModeIndex(pDpyEvo, ¶ms.request, ¶ms.reply); + + if (params.reply.end) { + break; + } + + if (!params.reply.valid) { + continue; + } + + if (!(NVBIT64(format) & + params.reply.modeUsage.layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats)) { + continue; + } + + if (height != 0 && height != params.reply.mode.timings.vVisible) { + continue; + } + + if (width != 0 && width != params.reply.mode.timings.hVisible) { + continue; + } + + *pModeOut = params.reply.mode; + return TRUE; + } + + return FALSE; +} + +/*! + * Make sure pDispEvo->connectedDpys is up to date. + * + * Do this by querying the dpy dynamic data for all dpys. The results aren't + * actually important, but querying the dynamic data has the side effect of + * updating pDispEvo->connectedDpys. + */ +static NVDpyIdList UpdateConnectedDpys(NVDispEvoPtr pDispEvo) +{ + NVDpyEvoPtr pDpyEvo; + struct NvKmsQueryDpyDynamicDataParams *pParams = + nvCalloc(1, sizeof(*pParams)); + + if (!pParams) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_WARN, + "Failed to allocate NvKmsQueryDpyDynamicDataParams"); + return pDispEvo->connectedDisplays; + } + + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + nvkms_memset(pParams, 0, sizeof(*pParams)); + nvDpyGetDynamicData(pDpyEvo, pParams); + } + + nvFree(pParams); + + return pDispEvo->connectedDisplays; +} + +static void FlipBaseToNull(NVDevEvoPtr pDevEvo) +{ + struct NvKmsFlipParams *pParams = nvCalloc(1, sizeof(*pParams)); + struct NvKmsFlipRequest *pRequest; + NvU32 sd; + NVDispEvoPtr pDispEvo; + NvBool ret = TRUE; + + if (!pParams) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Failed to allocate flip parameters for console restore base flip " + "to NULL"); + return; + } + + pRequest = &pParams->request; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + struct NvKmsFlipRequestOneSubDevice *pRequestSd = + &pRequest->sd[sd]; + NvU32 head; + for (head = 0; head < pDevEvo->numHeads; head++) { + struct NvKmsFlipCommonParams *pRequestHead = + &pRequestSd->head[head]; + NvU32 layer; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + pRequestSd->requestedHeadsBitMask |= NVBIT(head); + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + pRequestHead->layer[layer].surface.specified = TRUE; + // No need to specify sizeIn/sizeOut as we are flipping NULL surface. + pRequestHead->layer[layer].compositionParams.specified = TRUE; + pRequestHead->layer[layer].completionNotifier.specified = TRUE; + pRequestHead->layer[layer].syncObjects.specified = TRUE; + } + + pRequest->commit = TRUE; + } + } + + // If no heads require changes, there's nothing to do. + if (pRequest->commit) { + ret = nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, pRequest, + &pParams->reply, FALSE /* skipUpdate */, + FALSE /* allowFlipLock */); + } + nvFree(pParams); + + if (!ret) { + nvAssert(!"Console restore failed to flip base to NULL"); + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + for (head = 0; head < pDevEvo->numHeads; head++) { + NvBool stoppedBase; + ret = nvRMIdleBaseChannel(pDevEvo, head, sd, &stoppedBase); + if (!ret) { + nvAssert(!"Console restore failed to idle base"); + } + } + } +} + +static NvBool InitModeOneHeadRequest( + NVDpyEvoRec *pDpyEvo, + NVSurfaceEvoPtr pSurfaceEvo, + const struct NvKmsMode *pOverrideMode, + const struct NvKmsSize *pOverrideViewPortSizeIn, + const struct NvKmsPoint *pOverrideViewPortPointIn, + const NvU32 head, + struct NvKmsSetModeOneHeadRequest *pRequestHead) +{ + + struct NvKmsFlipCommonParams *pFlip = &pRequestHead->flip; + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 layer; + + if (pOverrideMode != NULL) { + pRequestHead->mode = *pOverrideMode; + } else { + if (!FindMode(pDpyEvo, + pSurfaceEvo->format, + 0 /* Ignore mode width */, + 0 /* Ignore mode height */, + &pRequestHead->mode)) { + return FALSE; + } + } + + pRequestHead->dpyIdList = nvAddDpyIdToEmptyDpyIdList(pDpyEvo->id); + pRequestHead->modeValidationParams.overrides = + NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS; + if (pOverrideViewPortSizeIn != NULL) { + pRequestHead->viewPortSizeIn = *pOverrideViewPortSizeIn; + } else { + pRequestHead->viewPortSizeIn.width = pSurfaceEvo->widthInPixels; + pRequestHead->viewPortSizeIn.height = pSurfaceEvo->heightInPixels; + } + + pFlip->viewPortIn.specified = TRUE; + if (pOverrideViewPortPointIn != NULL) { + pFlip->viewPortIn.point = *pOverrideViewPortPointIn; + } + pFlip->layer[NVKMS_MAIN_LAYER].surface.handle[NVKMS_LEFT] = + pDevEvo->fbConsoleSurfaceHandle; + + pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.specified = TRUE; + pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.val.width = pSurfaceEvo->widthInPixels; + pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.val.height = pSurfaceEvo->heightInPixels; + + pFlip->layer[NVKMS_MAIN_LAYER].sizeOut.specified = TRUE; + pFlip->layer[NVKMS_MAIN_LAYER].sizeOut.val = + pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.val; + + /* Disable other layers except Main */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + + if (layer == NVKMS_MAIN_LAYER) { + pFlip->layer[layer].csc.matrix = NVKMS_IDENTITY_CSC_MATRIX; + pFlip->layer[layer].csc.specified = TRUE; + } + pFlip->layer[layer].surface.specified = TRUE; + + pFlip->layer[layer].completionNotifier.specified = TRUE; + pFlip->layer[layer].syncObjects.specified = TRUE; + pFlip->layer[layer].compositionParams.specified = TRUE; + } + + // Disable other features. + pFlip->cursor.imageSpecified = TRUE; + pRequestHead->lut.input.specified = TRUE; + pRequestHead->lut.output.specified = TRUE; + pRequestHead->lut.synchronous = TRUE; + pRequestHead->allowGsync = FALSE; + pRequestHead->allowAdaptiveSync = + NVKMS_ALLOW_ADAPTIVE_SYNC_DISABLED; + + return TRUE; +} + +static NvBool +ConstructModeOneHeadRequestForOneDpy(NVDpyEvoRec *pDpyEvo, + NVSurfaceEvoPtr pSurfaceEvo, + struct NvKmsSetModeParams *pParams, + const NvU32 dispIndex, + NvU32 *pAvailableHeadsMask) +{ + NvBool ret = FALSE; + const NvU32 possibleHeads = *pAvailableHeadsMask & + pDpyEvo->pConnectorEvo->validHeadMask; + + if (possibleHeads == 0 || pDpyEvo->isVrHmd) { + goto done; + } + + const NvU32 head = BIT_IDX_32(LOWESTBIT(possibleHeads)); + + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + struct NvKmsSetModeRequest *pRequest = &pParams->request; + struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[head]; + + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + if (!InitModeOneHeadRequest(pDpyEvo, + pSurfaceEvo, + NULL /* Use default Mode */, + NULL /* Use default ViewPortSizeIn */, + NULL /* Use default ViewPortPointIn */, + head, + pRequestHead)) { + goto done; + } + + nvAssert(!pRequestHead->viewPortOutSpecified); + nvAssert(!pRequest->commit); + + while (!nvSetDispModeEvo(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pRequest, + &pParams->reply, + TRUE /* bypassComposition */, + FALSE /* doRasterLock */)) { + /* + * If validation is failing even after disabling scaling then leave + * this dpy inactive. + */ + if (pRequestHead->viewPortOutSpecified) { + nvkms_memset(pRequestHead, 0, sizeof(*pRequestHead)); + goto done; + } + + /* Disable scaling and try again */ + pRequestHead->viewPortOut = (struct NvKmsRect) { + .height = pRequestHead->viewPortSizeIn.height, + .width = pRequestHead->viewPortSizeIn.width, + .x = 0, + .y = 0, + }; + pRequestHead->viewPortOutSpecified = TRUE; + } + + *pAvailableHeadsMask &= ~NVBIT(head); + + ret = TRUE; + +done: + + return ret; +} + +typedef struct _TiledDisplayInfo { + NVDpyIdList detectedDpysList; + NvBool isDetectComplete; + NvBool isCapToScaleSingleTile; +} TiledDisplayInfo; + +/* + * Detect Tiled-display of topology-id described in given pDisplayIdInfo. + * + * Loop over given all dpys from candidateConnectedDpys list, look for matching + * topology-id. Add dpys of matching topology-id into + * detectedTiledDisplayDpysList list. Mark Tiled-Display detect complete if all + * exact number of tiles are found. + */ +static NvBool DetectTiledDisplay(const NVDispEvoRec *pDispEvo, + const NVT_DISPLAYID_INFO *pDisplayIdInfo, + const NVDpyIdList candidateConnectedDpys, + TiledDisplayInfo *pTiledDisplayInfo) +{ + const NVT_TILEDDISPLAY_TOPOLOGY_ID nullTileDisplayTopoId = { 0 }; + const NVDpyEvoRec *pDpyEvo; + const NvU32 numTiles = pDisplayIdInfo->tile_topology.row * + pDisplayIdInfo->tile_topology.col; + const NvU32 numTilesMask = NVBIT(numTiles) - 1; + NvU32 detectedTilesCount = 0; + NvU32 detectedTilesMask = 0; + + NVDpyIdList detectedTiledDisplayDpysList = nvEmptyDpyIdList(); + + /* + * If parsed edid is valid and tile_topology_id is non-zero then the dpy + * is considered a valid tile of a tiled display. + * + * The 'tile_topology_id' is a triplet of ids consisting of vendor_id, + * product_id, and serial_number. The DisplayId specification does not + * clearly define an invalid 'tile_topology_id', but here the + * tile_topology_id is considered invalid only if all three ids are zero + * which is consistent with other protocols like RandR1.2 'The tile group + * identifier'. + */ + if (!nvkms_memcmp(&pDisplayIdInfo->tile_topology_id, + &nullTileDisplayTopoId, sizeof(nullTileDisplayTopoId))) { + return FALSE; + } + + /* + * Reject Tiled-Display consists of multiple physical display enclosures or + * requires to configure bezel. + */ + if (!pDisplayIdInfo->tile_capability.bSingleEnclosure || + pDisplayIdInfo->tile_capability.bHasBezelInfo) { + return FALSE; + } + + /* + * Reject Tiled-Display which has number of horizontal or vertical tiles + * greater than 4. + */ + if (pDisplayIdInfo->tile_topology.row <= 0 || + pDisplayIdInfo->tile_topology.col <= 0 || + pDisplayIdInfo->tile_topology.row > 4 || + pDisplayIdInfo->tile_topology.col > 4) { + return FALSE; + } + + FOR_ALL_EVO_DPYS(pDpyEvo, candidateConnectedDpys, pDispEvo) { + const NVT_EDID_INFO *pEdidInfo = &pDpyEvo->parsedEdid.info; + const NVT_DISPLAYID_INFO *pDpyDisplayIdInfo = + &pEdidInfo->ext_displayid; + + if (!pDpyEvo->parsedEdid.valid) { + continue; + } + + if (nvkms_memcmp(&pDisplayIdInfo->tile_topology_id, + &pDpyDisplayIdInfo->tile_topology_id, + sizeof(&pDpyDisplayIdInfo->tile_topology_id))) { + continue; + } + + /* + * Tiled-Display Topology: + * + * |-----------col + * + * ___ +------------+------------+... + * | | (x=0,y=0) | (x=1,y=0) | + * | | | | + * | | | | + * | +------------+------------+ + * row | (x=0,y=1) | (x=1,y=1) | + * | | | + * | | | + * +------------+------------+ + * . + * . + * . + */ + if (pDpyDisplayIdInfo->tile_topology.row != + pDisplayIdInfo->tile_topology.row) { + continue; + } + + if (pDpyDisplayIdInfo->tile_topology.col != + pDisplayIdInfo->tile_topology.col) { + continue; + } + + if (pDpyDisplayIdInfo->tile_location.x >= + pDpyDisplayIdInfo->tile_topology.col) { + continue; + } + + if (pDpyDisplayIdInfo->tile_location.y >= + pDpyDisplayIdInfo->tile_topology.row) { + continue; + } + + nvAssert(pDpyDisplayIdInfo->tile_capability.single_tile_behavior == + pDisplayIdInfo->tile_capability.single_tile_behavior); + + detectedTiledDisplayDpysList = + nvAddDpyIdToDpyIdList(pDpyEvo->id, detectedTiledDisplayDpysList); + + detectedTilesMask |= NVBIT((pDpyDisplayIdInfo->tile_location.y * + pDpyDisplayIdInfo->tile_topology.col) + + (pDpyDisplayIdInfo->tile_location.x)); + detectedTilesCount++; + } + + pTiledDisplayInfo->detectedDpysList = detectedTiledDisplayDpysList; + + if (detectedTilesCount != numTiles || detectedTilesMask != numTilesMask) { + pTiledDisplayInfo->isDetectComplete = FALSE; + } else { + pTiledDisplayInfo->isDetectComplete = TRUE; + } + + pTiledDisplayInfo->isCapToScaleSingleTile = + pDisplayIdInfo->tile_capability.single_tile_behavior == + NVT_SINGLE_TILE_BEHAVIOR_SCALE; + + return TRUE; +} + +/* Construct modeset request for given Tiled-display */ +static NvBool +ConstructModeRequestForTiledDisplay(const NVDispEvoRec *pDispEvo, + NVSurfaceEvoPtr pSurfaceEvo, + struct NvKmsSetModeParams *pParams, + const NvU32 dispIndex, + NVDpyIdList tiledDisplayDpysList, + NvU32 *pAvailableHeadsMask) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + /* + * Get arbitrary dpy from tiledDisplayDpysList, + * to extract Tiled-Display information which should be same across all + * tiles. + */ + NVDpyEvoRec *pArbitraryDpyEvo = + nvGetOneArbitraryDpyEvo(tiledDisplayDpysList, pDispEvo); + const NVT_DISPLAYID_INFO *pPrimaryDisplayIdInfo = + &pArbitraryDpyEvo->parsedEdid.info.ext_displayid; + const NvU32 numRows = pPrimaryDisplayIdInfo->tile_topology.row; + const NvU32 numColumns = pPrimaryDisplayIdInfo->tile_topology.col; + /* + * Split entire input viewport across all tiles of Tiled-Display. + */ + const struct NvKmsSize viewPortSizeIn = { + .width = (pSurfaceEvo->widthInPixels / numColumns), + .height = (pSurfaceEvo->heightInPixels / numRows), + }; + struct NvKmsSetModeRequest *pRequest = &pParams->request; + struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + NvU32 firstClaimedHead = NV_INVALID_HEAD; + NvU32 claimedHeadMask = 0x0; + NVDpyEvoRec *pDpyEvo; + NvU32 head; + + /* + * Return failure if not enough number of heads available to construct + * modeset request for Tiled-Display. + */ + if (nvPopCount32(*pAvailableHeadsMask) < + nvCountDpyIdsInDpyIdList(tiledDisplayDpysList)) { + return FALSE; + } + + /* + * Return failure if input viewport has not been split across + * tiles evenly. + */ + if ((pSurfaceEvo->widthInPixels % numRows != 0) || + (pSurfaceEvo->heightInPixels % numColumns != 0)) { + return FALSE; + } + + FOR_ALL_EVO_DPYS(pDpyEvo, tiledDisplayDpysList, pDispEvo) { + const NVT_DISPLAYID_INFO *pDpyDisplayIdInfo = + &pDpyEvo->parsedEdid.info.ext_displayid; + const struct NvKmsPoint viewPortPointIn = { + .x = pDpyDisplayIdInfo->tile_location.x * viewPortSizeIn.width, + .y = pDpyDisplayIdInfo->tile_location.y * viewPortSizeIn.height + }; + const NvU32 possibleHeads = *pAvailableHeadsMask & + pDpyEvo->pConnectorEvo->validHeadMask & + ~claimedHeadMask; + + if (possibleHeads == 0 || pDpyEvo->isVrHmd) { + goto failed; + } + + const NvU32 head = BIT_IDX_32(LOWESTBIT(possibleHeads)); + struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[head]; + struct NvKmsMode mode; + + if (firstClaimedHead == NV_INVALID_HEAD) { + /* + * Find mode of native dimensions reported in Tiled-Display + * information. + */ + if (!FindMode(pDpyEvo, + pSurfaceEvo->format, + pPrimaryDisplayIdInfo->native_resolution.width, + pPrimaryDisplayIdInfo->native_resolution.height, + &mode)) { + goto failed; + } + + firstClaimedHead = head; + } else { + /* All tiles should support same set of modes */ + mode = pRequestDisp->head[firstClaimedHead].mode; + } + + claimedHeadMask |= NVBIT(head); + + if (!InitModeOneHeadRequest(pDpyEvo, + pSurfaceEvo, + &mode, + &viewPortSizeIn, + &viewPortPointIn, + head, + pRequestHead)) { + goto failed; + } + } + + nvAssert(!pRequest->commit); + + if (!nvSetDispModeEvo(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pRequest, + &pParams->reply, + TRUE /* bypassComposition */, + FALSE /* doRasterLock */)) { + goto failed; + } + *pAvailableHeadsMask &= ~claimedHeadMask; + + return TRUE; + +failed: + + for (head = 0; head < ARRAY_LEN(pRequestDisp->head); head++) { + if ((NVBIT(head) & claimedHeadMask) == 0x0) { + continue; + } + nvkms_memset(&pRequestDisp->head[head], + 0, + sizeof(pRequestDisp->head[head])); + } + + return FALSE; +} + +static NvBool isDpMSTModeActiveOnAnyConnector(NVDevEvoPtr pDevEvo) +{ + NvU32 i; + NVDispEvoPtr pDispEvo; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + + if ((pConnectorEvo != NULL) && + nvConnectorUsesDPLib(pConnectorEvo)) { + const enum NVDpLinkMode activeLinkMode = + nvDPGetActiveLinkMode(pConnectorEvo->pDpLibConnector); + + nvAssert(activeLinkMode != NV_DP_LINK_MODE_OFF); + + if (activeLinkMode == NV_DP_LINK_MODE_MST) { + return TRUE; + } + } + } + } + + return FALSE; +} + +/*! + * Attempt to restore the console. + * + * If a framebuffer console surface was successfully imported from RM, then use + * the core channel to set a mode that displays it. + * + * Enables as many heads as possible in a clone configuration. In first pass + * for connected boot dpys and in second pass for other remaining dpys: + * + * 1. Populate modeset request to enable given dpy. + * + * 2. Do modeset request validation, if fails then disable scaling. If + * modeset request validation fails even after disabling scaling then do not + * enable that dpy. + * + * If console restore succeeds, set pDevEvo->skipConsoleRestore to skip + * deallocating the core channel and triggering RM's console restore code. + */ +NvBool nvEvoRestoreConsole(NVDevEvoPtr pDevEvo, const NvBool allowMST) +{ + NvBool ret = FALSE; + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDevConst(pDevEvo->pNvKmsOpenDev); + NVSurfaceEvoPtr pSurfaceEvo = + nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, + pDevEvo->fbConsoleSurfaceHandle); + struct NvKmsSetModeParams *params; + + /* + * If this function fails to restore a console then NVKMS frees + * and reallocates the core channel, to attempt the console + * restore using Resman. The core channel reallocation also may + * fail and nvEvoRestoreConsole() again may get called from + * nvFreeDevEvo() when client frees the NVKMS device. + * + * If nvEvoRestoreConsole() gets called after the core channel + * allocation/reallocation failure then do nothing and return + * early. + */ + if (pDevEvo->displayHandle == 0x0) { + goto done; + } + + /* + * If any DP-MST mode is active on any connector of this device but + * DP-MST is disallowed then force console-restore. + */ + if (pDevEvo->skipConsoleRestore && + !allowMST && isDpMSTModeActiveOnAnyConnector(pDevEvo)) { + pDevEvo->skipConsoleRestore = FALSE; + } + + if (pDevEvo->skipConsoleRestore) { + ret = TRUE; + goto done; + } + + if (!pSurfaceEvo) { + // No console surface to restore. + goto done; + } + + FlipBaseToNull(pDevEvo); + + params = nvPreallocGet(pDevEvo, PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE, + sizeof(*params)); + nvkms_memset(params, 0, sizeof(*params)); + + nvDPSetAllowMultiStreaming(pDevEvo, allowMST); + + // Construct the request. + // + // To start with, try to enable as many connected dpys as possible, + // preferring boot displays first. + struct NvKmsSetModeRequest *pRequest = ¶ms->request; + NvBool foundDpysConfigForConsoleRestore = FALSE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 availableHeadsMask = NVBIT(pDevEvo->numHeads) - 1; + NVDpyIdList connectedDpys = UpdateConnectedDpys(pDispEvo); + const NVDpyIdList connectedBootDpys = + nvIntersectDpyIdListAndDpyIdList(connectedDpys, + pDispEvo->bootDisplays); + struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + int pass; + + pRequest->requestedDispsBitMask |= NVBIT(dispIndex); + pRequestDisp->requestedHeadsBitMask = availableHeadsMask; + + // Only enable heads on the subdevice that actually contains the + // console. + if (dispIndex != pDevEvo->vtFbInfo.subDeviceInstance) { + continue; + } + + NVDpyIdList handledDpysList = nvEmptyDpyIdList(); + + for (pass = 0; pass < 2; pass++) { + NVDpyIdList candidateDpys; + NVDpyEvoPtr pDpyEvo; + + if (availableHeadsMask == 0) { + break; + } + + if (pass == 0) { + candidateDpys = connectedBootDpys; + } else { + candidateDpys = nvDpyIdListMinusDpyIdList(connectedDpys, + connectedBootDpys); + } + + FOR_ALL_EVO_DPYS(pDpyEvo, candidateDpys, pDispEvo) { + NvBool isTiledDisplayFound = FALSE; + TiledDisplayInfo tiledDisplayInfo = { }; + NvBool isTiledDisplayEnable = FALSE; + const NVT_DISPLAYID_INFO *pDpyDisplayIdInfo = + pDpyEvo->parsedEdid.valid ? + &pDpyEvo->parsedEdid.info.ext_displayid : NULL; + NvBool done = FALSE; + + if (availableHeadsMask == 0) { + break; + } + + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, + handledDpysList)) { + continue; + } + + isTiledDisplayFound = + pDpyDisplayIdInfo != NULL && + DetectTiledDisplay(pDispEvo, + pDpyDisplayIdInfo, + nvDpyIdListMinusDpyIdList( + connectedDpys, handledDpysList), + &tiledDisplayInfo); + + /* + * Construct modeset request for Tiled-Display which don't have + * a capability to scale single tile input across entire + * display. If fails then fallback to construct modeset request + * for this single dpy. + */ + + if (isTiledDisplayFound && + tiledDisplayInfo.isDetectComplete && + !tiledDisplayInfo.isCapToScaleSingleTile) { + + done = ConstructModeRequestForTiledDisplay( + pDispEvo, + pSurfaceEvo, + params, + dispIndex, + tiledDisplayInfo.detectedDpysList, + &availableHeadsMask); + isTiledDisplayEnable = done; + } + + /* + * If Tiled-Display has capability to scale single tile input + * across entire display then for console restore it is + * sufficient to light up any single tile and ignore rest of + * remaining tiles. + */ + + if (!done || + !isTiledDisplayFound || + !tiledDisplayInfo.isDetectComplete || + tiledDisplayInfo.isCapToScaleSingleTile) { + + done = ConstructModeOneHeadRequestForOneDpy( + pDpyEvo, + pSurfaceEvo, + params, + dispIndex, + &availableHeadsMask); + isTiledDisplayEnable = + done && tiledDisplayInfo.isCapToScaleSingleTile; + } + + handledDpysList = + nvAddDpyIdToDpyIdList(pDpyEvo->id, handledDpysList); + + if (isTiledDisplayEnable) { + handledDpysList = nvAddDpyIdListToDpyIdList( + tiledDisplayInfo.detectedDpysList, + handledDpysList); + } + + foundDpysConfigForConsoleRestore = + foundDpysConfigForConsoleRestore || done; + + } + } + } + + /* + * Disable all (flip/raster) locks, dirty locking state in hardware + * left behind by NVKMS console restore causes XID errors and engine hang + * on next modeset because the NVKMS doesn't get back existing display + * hardware state at the time of initialization. + */ + + if (foundDpysConfigForConsoleRestore) { + pRequest->commit = TRUE; + + ret = nvSetDispModeEvo(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pRequest, + ¶ms->reply, + TRUE /* bypassComposition */, + FALSE /* doRasterLock */); + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE); + +done: + nvkms_free_timer(pDevEvo->consoleRestoreTimer); + pDevEvo->consoleRestoreTimer = NULL; + + /* If console restore failed then simply shut down all heads */ + if (!ret) { + nvShutDownHeads(pDevEvo, NULL /* pTestFunc, shut down all heads */); + } + + // If restoring the console from here succeeded, then skip triggering RM's + // console restore. + pDevEvo->skipConsoleRestore = ret; + return ret; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor.c new file mode 100644 index 0000000..e5a5928 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor.c @@ -0,0 +1,399 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* this source file contains routines for setting and moving the cursor. + * NV50 specific */ + +#include "nvkms-cursor.h" +#include "nvkms-types.h" +#include "nvkms-dma.h" +#include "nvkms-utils.h" +#include "nvkms-rm.h" +#include "nvkms-evo.h" +#include "nvkms-vrr.h" +#include "nvkms-surface.h" +#include "nvkms-flip.h" + +#include "nvkms-rmapi.h" + +#include /* sizeof(GK104DispCursorControlPio) */ + +#include /* NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS */ + +/*! + * Get the NVSurfaceEvoPtrs described by NvKmsSetCursorImageCommonParams. + * + * Look up the surfaces described by NvKmsSetCursorImageCommonParams, + * and check that the surfaces are valid for use by cursor on the + * given pDevEvo. + * + * \param[in] pDevEvo The device on which the cursor image will be set. + * \param[in] pParams The parameter structure indicating the surfaces. + * \param[out] pSurfaceEvo The array of surfaces to be assigned. + * + * \return If the parameters are valid, return TRUE and assign + * pSurfaceEvo. Otherwise, return FALSE. + */ +NvBool nvGetCursorImageSurfaces( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsSetCursorImageCommonParams *pParams, + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]) +{ + NvU32 eye; + + nvkms_memset(pSurfaceEvos, 0, sizeof(NVSurfaceEvoRec *) * NVKMS_MAX_EYES); + + /* XXX NVKMS TODO: add support for stereo cursor */ + nvAssert(pParams->surfaceHandle[NVKMS_RIGHT] == 0); + + for (eye = 0; eye < ARRAY_LEN(pParams->surfaceHandle); eye++) { + if (pParams->surfaceHandle[eye] != 0) { + NVSurfaceEvoPtr pSurfaceEvo = NULL; + pSurfaceEvo = + nvEvoGetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + pParams->surfaceHandle[eye], + NV_EVO_CHANNEL_MASK_CURSOR_ALL); + if ((pSurfaceEvo == NULL) || + (pSurfaceEvo->isoType != NVKMS_MEMORY_ISO)) { + return FALSE; + } + + pSurfaceEvos[eye] = pSurfaceEvo; + } + } + + return TRUE; +} + +static void +SetCursorImage(NVDispEvoPtr pDispEvo, + const NvU32 head, + NVSurfaceEvoRec *pSurfaceEvoNew, + const struct NvKmsCompositionParams *pCursorCompParams) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoUpdateState updateState = { }; + const NvU32 sd = pDispEvo->displayOwner; + NvBool changed = FALSE; + + NVSurfaceEvoPtr pSurfaceEvoOld = + pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo; + + if (pSurfaceEvoNew != NULL && + nvkms_memcmp(pCursorCompParams, + &pDevEvo->gpus[sd].headState[head].cursor.cursorCompParams, + sizeof(*pCursorCompParams)) != 0) { + pDevEvo->gpus[sd].headState[head].cursor.cursorCompParams = + *pCursorCompParams; + changed = TRUE; + } + + if (pSurfaceEvoNew != pSurfaceEvoOld) { + + if (pSurfaceEvoNew != NULL) { + nvEvoIncrementSurfaceRefCnts(pSurfaceEvoNew); + } + + if (pSurfaceEvoOld) { + nvEvoDecrementSurfaceRefCnts(pSurfaceEvoOld); + } + + pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo = pSurfaceEvoNew; + changed = TRUE; + } + + if (changed) { + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetCursorImage( + pDevEvo, + head, + pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo, + &updateState, + &pDevEvo->gpus[sd].headState[head].cursor.cursorCompParams); + nvEvoUpdateAndKickOff(pDispEvo, FALSE, &updateState, + TRUE /* releaseElv */); + nvPopEvoSubDevMask(pDevEvo); + } + + if (pSurfaceEvoNew) { + nvTriggerVrrUnstallSetCursorImage(pDispEvo, changed); + } +} + +static NvBool +FlipCursorImage(NVDispEvoPtr pDispEvo, + const struct NvKmsPerOpenDev *pOpenDevice, + NvU32 head, + const struct NvKmsSetCursorImageCommonParams *pImageParams) +{ + const NvU32 sd = pDispEvo->displayOwner; + NvBool ret; + struct NvKmsFlipParams *pFlipParams; + struct NvKmsFlipRequest *pFlipRequest; + + pFlipParams = nvCalloc(1, sizeof(*pFlipParams)); + if (pFlipParams == NULL) { + return FALSE; + } + + pFlipRequest = &pFlipParams->request; + + pFlipRequest->sd[sd].head[head] = (struct NvKmsFlipCommonParams) { + .cursor = { + .image = *pImageParams, + .imageSpecified = TRUE, + }, + }; + + pFlipRequest->sd[sd].requestedHeadsBitMask = NVBIT(head); + + pFlipRequest->commit = TRUE; + + ret = nvFlipEvo(pDispEvo->pDevEvo, + pOpenDevice, + pFlipRequest, + &pFlipParams->reply, + FALSE /* skipUpdate */, + FALSE /* allowFlipLock */); + + nvFree(pFlipParams); + + return ret; +} + +NvBool nvSetCursorImage( + NVDispEvoPtr pDispEvo, + const struct NvKmsPerOpenDev *pOpenDevice, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvU32 head, + const struct NvKmsSetCursorImageCommonParams *pParams) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]; + NVSurfaceEvoPtr pSurfaceEvoNew; + NvBool flipCursorImage = FALSE; + + if (!nvGetCursorImageSurfaces(pDevEvo, pOpenDevSurfaceHandles, + pParams, pSurfaceEvos)) { + return FALSE; + } + + pSurfaceEvoNew = pSurfaceEvos[NVKMS_LEFT]; + + /* + * Use flip to apply or remove workaround for hardware bug 2052012 + */ + if (NV5070_CTRL_SYSTEM_GET_CAP( + pDevEvo->capsBits, + NV5070_CTRL_SYSTEM_CAPS_BUG_2052012_GLITCHY_MCLK_SWITCH)) { + const NvU32 sd = pDispEvo->displayOwner; + + NVSurfaceEvoPtr pSurfaceEvoOld = + pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo; + + if ((pSurfaceEvoOld != pSurfaceEvoNew) && + (pSurfaceEvoOld == NULL || pSurfaceEvoNew == NULL)) { + flipCursorImage = TRUE; + } + } + + if (flipCursorImage) { + return FlipCursorImage(pDispEvo, + pOpenDevice, head, pParams); + } + + SetCursorImage(pDispEvo, + head, + pSurfaceEvoNew, + &pParams->cursorCompParams); + return TRUE; +} + +void nvEvoMoveCursorInternal(NVDispEvoPtr pDispEvo, + NvU32 head, NvS16 x, NvS16 y) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + + pDevEvo->cursorHal->MoveCursor(pDevEvo, sd, head, x, y); + + /* If the cursor is visible, trigger VRR unstall to display the + * cursor at the new postion */ + if (pEvoSubDev->headState[head].cursor.pSurfaceEvo) { + nvTriggerVrrUnstallMoveCursor(pDispEvo); + } +} + +void nvEvoMoveCursor(NVDispEvoPtr pDispEvo, NvU32 head, + const struct NvKmsMoveCursorCommonParams *pParams) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + + /* XXX NVKMS TODO: validate x,y against current viewport in? */ + + pDevEvo->gpus[sd].headState[head].cursor.x = pParams->x; + pDevEvo->gpus[sd].headState[head].cursor.y = pParams->y; + + nvEvoMoveCursorInternal(pDispEvo, + head, pParams->x, pParams->y); +} + +// Allocate and map cursor position PIO channels +NvBool nvAllocCursorEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS PioChannelAllocParams = { 0 }; + NVDispEvoPtr pDispEvo; + NvU32 sd; + + PioChannelAllocParams.channelInstance = head; + // No notifiers in cursor channel + PioChannelAllocParams.hObjectNotify = 0; + pDevEvo->cursorHandle[head] = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (nvRmApiAlloc( + nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + pDevEvo->cursorHandle[head], + pDevEvo->cursorHal->klass, + &PioChannelAllocParams) != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate CURSOR PIO for head %d", + head); + nvFreeCursorEvo(pDevEvo); + return FALSE; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + void *pPioDisplayChannel; + NvU32 status; + + status = nvRmApiMapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pDevEvo->cursorHandle[head], + 0, + sizeof(GK104DispCursorControlPio), + &pPioDisplayChannel, + 0); + if (status != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to map CURSOR PIO for head %d", + head); + nvFreeCursorEvo(pDevEvo); + return FALSE; + } + pEvoSubDev->cursorPio[head] = pPioDisplayChannel; + } + } + + return TRUE; +} + +// Free and unmap Cursor PIO Channels +void nvFreeCursorEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDispEvoPtr pDispEvo; + NvU32 sd; + NvU32 status; + + if (pDevEvo->cursorHandle[head] == 0) { + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 status; + + if (pEvoSubDev->cursorPio[head] == NULL) { + continue; + } + + status = nvRmApiUnmapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pDevEvo->cursorHandle[head], + pEvoSubDev->cursorPio[head], + 0); + + if (status != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to unmap cursor channel memory"); + } + pEvoSubDev->cursorPio[head] = NULL; + } + + status = nvRmApiFree( + nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + pDevEvo->cursorHandle[head]); + + if (status != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to tear down Cursor channel"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->cursorHandle[head]); + + pDevEvo->cursorHandle[head] = 0; + } +} + +extern NVEvoCursorHAL nvEvoCursor91; +extern NVEvoCursorHAL nvEvoCursorC3; +extern NVEvoCursorHAL nvEvoCursorC5; +extern NVEvoCursorHAL nvEvoCursorC6; + +enum NvKmsAllocDeviceStatus nvInitDispHalCursorEvo(NVDevEvoPtr pDevEvo) +{ + static const NVEvoCursorHALPtr cursorTable[] = { + &nvEvoCursor91, + &nvEvoCursorC3, + &nvEvoCursorC5, + &nvEvoCursorC6, + }; + + int i; + + for (i = 0; i < ARRAY_LEN(cursorTable); i++) { + if (nvRmEvoClassListCheck(pDevEvo, cursorTable[i]->klass)) { + + pDevEvo->cursorHal = cursorTable[i]; + + return NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + } + } + + return NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor2.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor2.c new file mode 100644 index 0000000..30ba3a8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor2.c @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include + +static void MoveCursor90(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + NvS16 x, NvS16 y) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + GK104DispCursorControlPio *pEvoCursorControl = + pEvoSubDev->cursorPio[head]; + + pEvoCursorControl->SetCursorHotSpotPointsOut[NVKMS_LEFT] = + DRF_NUM(917A, _SET_CURSOR_HOT_SPOT_POINTS_OUT, _X, x) | + DRF_NUM(917A, _SET_CURSOR_HOT_SPOT_POINTS_OUT, _Y, y); + + pEvoCursorControl->Update = + DRF_DEF(917A, _UPDATE, _INTERLOCK_WITH_CORE, _DISABLE); +} + +NVEvoCursorHAL nvEvoCursor91 = { + NV917A_CURSOR_CHANNEL_PIO, /* klass */ + MoveCursor90, /* MoveCursor */ + NULL, /* ReleaseElv */ + { /* caps */ + 256, /* maxSize */ + }, +}; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor3.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor3.c new file mode 100644 index 0000000..354f138 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor3.c @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include +#include +#include + +static void WaitForFreeSpace(NVDevEvoPtr pDevEvo, + NVC37ADispCursorImmControlPio *pEvoCursorControl) +{ + /* + * Wait for Free to be non-zero, indicating there is space to push a method. + * The only case where Free is expected to be zero is when display + * frontend (FE) hardware is processing a previous method. + * .1s should be more than enough time to wait for that. + */ + NvU64 startTime = 0; + const NvU64 timeout = 100000; /* 0.1 seconds */ + + do { + if (pEvoCursorControl->Free != 0) { + return; + } + + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + break; + } + + nvkms_yield(); + + } while (TRUE); + + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Timed out waiting for cursor PIO space"); +} + +static void MoveCursorC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + NvS16 x, NvS16 y) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NVC37ADispCursorImmControlPio *pEvoCursorControl = + pEvoSubDev->cursorPio[head]; + + WaitForFreeSpace(pDevEvo, pEvoCursorControl); + pEvoCursorControl->SetCursorHotSpotPointOut[0] = + DRF_NUM(C37A, _SET_CURSOR_HOT_SPOT_POINT_OUT, _X, x) | + DRF_NUM(C37A, _SET_CURSOR_HOT_SPOT_POINT_OUT, _Y, y); + + WaitForFreeSpace(pDevEvo, pEvoCursorControl); + pEvoCursorControl->Update = + DRF_DEF(C37A, _UPDATE, _FLIP_LOCK_PIN, _LOCK_PIN_NONE); +} + +static void ReleaseElvC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NVC37ADispCursorImmControlPio *pEvoCursorControl = + pEvoSubDev->cursorPio[head]; + + WaitForFreeSpace(pDevEvo, pEvoCursorControl); + pEvoCursorControl->Update = + DRF_DEF(C37A, _UPDATE, _FLIP_LOCK_PIN, _LOCK_PIN_NONE) | + DRF_DEF(C37A, _UPDATE, _RELEASE_ELV, _TRUE); +} + +NVEvoCursorHAL nvEvoCursorC3 = { + NVC37A_CURSOR_IMM_CHANNEL_PIO, /* klass */ + MoveCursorC3, /* MoveCursor */ + ReleaseElvC3, /* ReleaseElv */ + { /* caps */ + 256, /* maxSize */ + }, +}; + +NVEvoCursorHAL nvEvoCursorC5 = { + NVC57A_CURSOR_IMM_CHANNEL_PIO, /* klass */ + MoveCursorC3, /* MoveCursor */ + ReleaseElvC3, /* ReleaseElv */ + { /* caps */ + 256, /* maxSize */ + }, +}; + +NVEvoCursorHAL nvEvoCursorC6 = { + NVC67A_CURSOR_IMM_CHANNEL_PIO, /* klass */ + MoveCursorC3, /* MoveCursor */ + ReleaseElvC3, /* ReleaseElv */ + { /* caps */ + 256, /* maxSize */ + }, +}; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dma.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dma.c new file mode 100644 index 0000000..c6a3c1f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dma.c @@ -0,0 +1,484 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include "nvkms-dma.h" +#include "nvkms-utils.h" +#include "nvkms-rmapi.h" +#include "class/cl917d.h" // NV917DDispControlDma, NV917D_DMA_* +#include // NV0080_CTRL_CMD_DMA_FLUSH +#include "nvos.h" + +#define NV_DMA_PUSHER_CHASE_PAD 5 +#define NV_EVO_NOTIFIER_SHORT_TIMEOUT_USEC 3000000 // 3 seconds + +static void EvoCoreKickoff(NVDmaBufferEvoPtr push_buffer, NvU32 putOffset); + +void nvDmaKickoffEvo(NVEvoChannelPtr pChannel) +{ + NVDmaBufferEvoPtr p = &pChannel->pb; + NvU32 putOffset = (NvU32)((char *)p->buffer - (char *)p->base); + + if (p->put_offset == putOffset) { + return; + } + + EvoCoreKickoff(p, putOffset); +} + +static void EvoCoreKickoff(NVDmaBufferEvoPtr push_buffer, NvU32 putOffset) +{ + NVEvoDmaPtr pDma = &push_buffer->dma; + int i; + + nvAssert(putOffset % 4 == 0); + nvAssert(putOffset <= push_buffer->offset_max); + + /* If needed, copy the chunk to be kicked off into each GPU's FB */ + if (pDma->isBar1Mapping) { + NVDevEvoPtr pDevEvo = push_buffer->pDevEvo; + int sd; + + NV0080_CTRL_DMA_FLUSH_PARAMS flushParams = { 0 }; + NvU32 ret; + + NvU32 *endAddress; + + if (putOffset < push_buffer->put_offset) { + /* If we've wrapped, copy to the end of the pushbuffer */ + nvAssert(putOffset == 0); + endAddress = push_buffer->base + push_buffer->offset_max / + sizeof(NvU32); + } else { + endAddress = push_buffer->buffer; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 startOffset = push_buffer->put_offset / sizeof(NvU32); + + NvU32 *src = push_buffer->base; + NvU32 *dst = pDma->subDeviceAddress[sd]; + + nvAssert(dst != NULL); + + src += startOffset; + dst += startOffset; + while (src < endAddress) { + *dst++ = *src++; + } + } + + /* + * Finally, tell RM to flush so that the data actually lands in FB + * before telling the GPU to fetch it. + */ + flushParams.targetUnit = DRF_DEF(0080_CTRL_DMA, _FLUSH_TARGET, + _UNIT_FB, _ENABLE); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_DMA_FLUSH, + &flushParams, sizeof(flushParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0080_CTRL_CMD_DMA_FLUSH failed"); + } + } + +#if NVCPU_IS_X86_64 + __asm__ __volatile__ ("sfence\n\t" : : : "memory"); +#elif NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("dsb sy\n\t" : : : "memory"); +#endif + + /* Kick off all push buffers */ + push_buffer->put_offset = putOffset; + for (i = 0; i < push_buffer->num_channels; i++) { + void *pControl = push_buffer->control[i]; + nvDmaStorePioMethod(pControl, NV917D_PUT, putOffset); + } +} + +/* Read GET from an EVO core channel */ +static NvU32 EvoCoreReadGet(NVDmaBufferEvoPtr push_buffer, int sd) +{ + void *pControl = push_buffer->control[sd]; + return nvDmaLoadPioMethod(pControl, NV917D_GET); +} + +/* Read GET for all devices and return the minimum or maximum*/ +static NvU32 EvoReadGetOffset(NVDmaBufferEvoPtr push_buffer, NvBool minimum) +{ + int i; + NvU32 get, bestGet = 0; + NvS32 distanceToPut, minmaxDistanceToPut = (minimum ? + 0 : + (push_buffer->dma.limit + 1)); + + if (push_buffer->num_channels <= 1) { + return EvoCoreReadGet(push_buffer, 0); + } + + for (i =0; i < push_buffer->num_channels; i++) { + get = EvoCoreReadGet(push_buffer, i); + + /* Compute distance to put, accounting for wraps */ + distanceToPut = push_buffer->put_offset - get; + if (distanceToPut < 0) + distanceToPut += push_buffer->dma.limit + 1; + + /* Accumulate the maximum distance to put and the corresponding get. */ + if ((minimum && (distanceToPut >= minmaxDistanceToPut)) || + (!minimum && (distanceToPut <= minmaxDistanceToPut))) { + minmaxDistanceToPut = distanceToPut; + bestGet = get; + } + } + return bestGet; +} + +void nvEvoMakeRoom(NVEvoChannelPtr pChannel, NvU32 count) +{ + NVDmaBufferEvoPtr push_buffer = &pChannel->pb; + NvU32 getOffset; + NvU32 putOffset; + NvU64 startTime = 0; + const NvU64 timeout = 5000000; /* 5 seconds */ + + putOffset = (NvU32) ((char *)push_buffer->buffer - + (char *)push_buffer->base); + + if (putOffset >= push_buffer->offset_max) { + *(push_buffer->buffer) = 0x20000000; + push_buffer->buffer = push_buffer->base; + nvDmaKickoffEvo(pChannel); + putOffset = 0; + } + + while (1) { + getOffset = EvoReadGetOffset(push_buffer, TRUE); + + if (putOffset >= getOffset) { + push_buffer->fifo_free_count = + (push_buffer->offset_max - putOffset) >> 2; + + if (push_buffer->fifo_free_count <= count) { + if (getOffset) { + *(push_buffer->buffer) = 0x20000000; + push_buffer->buffer = push_buffer->base; + nvDmaKickoffEvo(pChannel); + putOffset = 0; + } + else if (putOffset != push_buffer->put_offset) { + nvDmaKickoffEvo(pChannel); + // Put offset will have changed if a tail was inserted. + putOffset = push_buffer->put_offset; + } + } + } + else { + getOffset = (getOffset > push_buffer->offset_max) ? + push_buffer->offset_max : getOffset; + + if ((putOffset + (NV_DMA_PUSHER_CHASE_PAD * 4)) >= getOffset) + push_buffer->fifo_free_count = 0; + else + push_buffer->fifo_free_count = + ((getOffset - putOffset) >> 2) - 1; + } + if (push_buffer->fifo_free_count > count) { + break; + } + + /* + * If we have been waiting too long, print an error message. There + * isn't much we can do as currently structured, so just reset + * startTime. + */ + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + nvEvoLogDev(push_buffer->pDevEvo, EVO_LOG_ERROR, + "Error while waiting for GPU progress: " + "0x%08x:%d %d:%d:%d:%d", + pChannel->hwclass, pChannel->instance, + count, push_buffer->fifo_free_count, getOffset, putOffset); + startTime = 0; + } + + nvkms_yield(); + } +} + +static inline void EvoWriteNotifier(volatile NvU32 *pNotifier, NvU32 value) +{ + /* + * Note that we don't need to flush to vidmem here; any subsequent GPU + * write will always be triggered by kicking off pushbuffer methods, + * which will perform a general FB flush. This does assume that the + * pushbuffer and its associated notifier surfaces are either both in + * sysmem or both in vidmem, however. + */ + + *pNotifier = value; +} + +/* Write the EVO core notifier at the given offset to the given value. */ +void nvWriteEvoCoreNotifier( + const NVDispEvoRec *pDispEvo, + NvU32 offset, + NvU32 value) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NVEvoDmaPtr pSubChannel = &pDevEvo->core->notifiersDma[sd]; + volatile NvU32 *pNotifiers = pSubChannel->subDeviceAddress[sd]; + + EvoWriteNotifier(pNotifiers + offset, value); +} + +static NvBool EvoCheckNotifier(const NVDispEvoRec *pDispEvo, + NvU32 offset, NvU32 done_base_bit, + NvU32 done_extent_bit, NvU32 done_value, + NvBool wait) +{ + const NvU32 sd = pDispEvo->displayOwner; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoDmaPtr pSubChannel = &pDevEvo->core->notifiersDma[sd]; + NVDmaBufferEvoPtr p = &pDevEvo->core->pb; + volatile NvU32 *pNotifier; + NvU64 startTime = 0; + + pNotifier = pSubChannel->subDeviceAddress[sd]; + + nvAssert(pNotifier != NULL); + pNotifier += offset; + + // While the completion notifier is not set to done_true + do { + const NvU32 val = *pNotifier; + const NvU32 done_mask = DRF_SHIFTMASK(done_extent_bit:done_base_bit); + const NvU32 done_val = done_value << done_base_bit; + + if ((val & done_mask) == done_val) { + return TRUE; + } + + if (!wait) { + return FALSE; + } + + if (!nvIsEmulationEvo(pDevEvo) && + nvExceedsTimeoutUSec( + &startTime, + NV_EVO_NOTIFIER_SHORT_TIMEOUT_USEC) && + (p->put_offset == EvoCoreReadGet(p, sd))) + { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Lost display notification (%d:0x%08x); " + "continuing.", sd, val); + EvoWriteNotifier(pNotifier, done_value << done_base_bit); + return TRUE; + } + + nvkms_yield(); + } while (TRUE); +} + +/* + * Used by NV_EVO_WAIT_FOR_NOTIFIER() and NV_EVO_WAIT_FOR_CAPS_NOTIFIER() + */ +void nvEvoWaitForCoreNotifier(const NVDispEvoRec *pDispEvo, NvU32 offset, + NvU32 done_base_bit, NvU32 done_extent_bit, + NvU32 done_value) +{ + EvoCheckNotifier(pDispEvo, offset, + done_base_bit, done_extent_bit, done_value, TRUE); +} + +/* + * Used by the EVO HAL IsNotifierComplete functions. Returns TRUE if the + * notifier is complete. + */ +NvBool nvEvoIsCoreNotifierComplete(NVDispEvoPtr pDispEvo, NvU32 offset, + NvU32 done_base_bit, NvU32 done_extent_bit, + NvU32 done_value) +{ + return EvoCheckNotifier(pDispEvo, + offset, done_base_bit, done_extent_bit, + done_value, FALSE); +} + +void nvEvoSetSubdeviceMask(NVEvoChannelPtr pChannel, NvU32 mask) +{ + NVDmaBufferEvoPtr p = &pChannel->pb; + + nvAssert(!nvDmaSubDevMaskMatchesCurrent(pChannel, mask)); + + p->currentSubDevMask = mask; + + ASSERT_DRF_NUM(917D, _DMA, _SET_SUBDEVICE_MASK_VALUE, mask); + + if (p->fifo_free_count <= 1) { + nvEvoMakeRoom(pChannel, 1); + } + + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _DMA, _OPCODE, _SET_SUBDEVICE_MASK) | + DRF_NUM(917D, _DMA, _SET_SUBDEVICE_MASK_VALUE, mask)); + p->fifo_free_count -= 1; +} + +/*! + * Reads CRC values from the notifier. + * + * This function will attempt to read in the first 'entry_count' CRC notifier + * entries that HW generated. The actual number of entries that are read may + * be less. + * + * \param[in] pCRC32Notifier Pointer to the CRC notifier memory. + * \param[in] entry_stride Stride of a single CRC notifier entry + * \param[in] entry_count Expected count of notifier entries to read + * \param[in] status_offset Offset for Status flags header in CRC notifier + * \param[in] field_count Number of fields to read from each CRC notifier + * entry. + * \param[in] flag_count Number of flags to read from the Status Header + * \param[in out] field_info Specifies the offset/base/extent info for each field. + * Each 'field_info' contains an output array for + * storing 'entry_count' field values. + * \param[in] flag_info Specifies the base/extent info for each flag. + * Each 'flag_info' contains a 'flag_type' for + * addressing error cases related to the flags. + * + * \return Returns the MIN(count, entry_count) of successfully + * read entries. + */ +NvU32 nvEvoReadCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 entry_stride, + NvU32 entry_count, + NvU32 status_offset, + NvU32 field_count, + NvU32 flag_count, + const CRC32NotifierEntryRec *field_info, + const CRC32NotifierEntryFlags *flag_info) +{ + NvU32 count = 0; + NvU32 i, j, k; + + nvAssert(pCRC32Notifier != NULL); + + // Iterate over flags (unique at start of the CRC32Notifier Struct) + for (k = 0; k < flag_count; k++) { + CRC32NotifierEntryFlags info = flag_info[k]; + volatile NvU32 *pFlag = pCRC32Notifier + status_offset; + NvU32 flag_mask = + DRF_SHIFTMASK((info.flag_extent_bit):(info.flag_base_bit)); + NvU32 flag = (*pFlag & flag_mask) >> info.flag_base_bit; + + switch (info.flag_type) + { + case NVEvoCrc32NotifierFlagCount: + count = flag; + // entry_count is max of each field_frame_values[i] array + if (count > entry_count) { + nvEvoLog(EVO_LOG_WARN, "Too many CRC32 generated entries " + "(%d expected; %d found)", entry_count, count); + count = entry_count; + } + break; + + case NVEvoCrc32NotifierFlagCrcOverflow: + if (flag) { + count = 0; + nvEvoLog(EVO_LOG_ERROR, "CRC Overflow occured, " + "CRC value unable to be processed fast enough.\n" + "Failing flag index in status_info array: %d", + k); + + return count; + } + break; + } + } + + // Iterate over each collection of fields, for count pairs of values + for (i = 0; i < count; i++) { + for (j = 0; j < field_count; j++) { + CRC32NotifierEntryRec info = field_info[j]; + volatile NvU32 *pEntry = pCRC32Notifier + info.field_offset; + NvU32 field_mask = + DRF_SHIFTMASK((info.field_extent_bit):(info.field_base_bit)); + + info.field_frame_values[i].value = + (*pEntry & field_mask) >> info.field_base_bit; + info.field_frame_values[i].supported = TRUE; + } + pCRC32Notifier += entry_stride; + } + + return count; +} + +void nvEvoResetCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 offset, + NvU32 reset_base_bit, + NvU32 reset_value) +{ + const NvU32 reset_val = reset_value << reset_base_bit; + + nvAssert(pCRC32Notifier != NULL); + pCRC32Notifier += offset; + + EvoWriteNotifier(pCRC32Notifier, reset_val); +} + +NvBool nvEvoWaitForCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 offset, + NvU32 done_base_bit, + NvU32 done_extent_bit, + NvU32 done_value) +{ + const NvU32 done_mask = DRF_SHIFTMASK(done_extent_bit:done_base_bit); + const NvU32 done_val = done_value << done_base_bit; + NvU64 startTime = 0; + + nvAssert(pCRC32Notifier != NULL); + pCRC32Notifier += offset; + + do { + const NvU32 status = *pCRC32Notifier; + + if ((status & done_mask) == done_val) { + return TRUE; + } + + if (nvExceedsTimeoutUSec( + &startTime, + NV_EVO_NOTIFIER_SHORT_TIMEOUT_USEC)) { + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + return FALSE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dpy.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dpy.c new file mode 100644 index 0000000..9667c31 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dpy.c @@ -0,0 +1,2846 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "dp/nvdp-device.h" +#include "dp/nvdp-connector-event-sink.h" + +#include "nvkms-evo.h" +#include "nvkms-dpy.h" +#include "nvkms-hdmi.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-types.h" +#include "nvkms-attributes.h" +#include "nvkms-utils.h" +#include "nvkms-3dvision.h" + +#include "nv_mode_timings_utils.h" + +#include "nvkms-api.h" +#include "nvkms-private.h" + +#include "nvos.h" +#include "timing/dpsdp.h" + +#include // NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_* + +#define TMDS_SINGLE_LINK_PCLK_MAX 165000 +#define TMDS_DUAL_LINK_PCLK_MAX 330000 + +static void DpyGetDynamicDfpProperties( + NVDpyEvoPtr pDpyEvo, + const NvBool disableACPIBrightnessHotkeys); + +static NVEvoPassiveDpDongleType +DpyGetPassiveDpDongleType(const NVDpyEvoRec *pDpyEvo, + NvU32 *passiveDpDongleMaxPclkKHz); +static void +CreateParsedEdidFromNVT_TIMING(NVT_TIMING *pTimings, + NvU8 bpc, + NVParsedEdidEvoPtr pParsedEdid); + +static NvBool ReadEdidFromDP (const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid); +static NvBool ReadEdidFromResman (const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid, + NvKmsEdidReadMode readMode); +static NvBool ValidateEdid (const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid, + NVEvoInfoStringPtr pInfoString, + const NvBool ignoreEdidChecksum); +static void LogEdid (NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString); +static void ClearEdid (NVDpyEvoPtr pDpyEvo); +static void ClearCustomEdid (const NVDpyEvoRec *pDpyEvo); +static void WriteEdidToResman (const NVDpyEvoRec *pDpyEvo, + const NVEdidRec *pEdid); +static void PatchAndParseEdid (const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid, + NVParsedEdidEvoPtr, + NVEvoInfoStringPtr pInfoString); +static void ReadAndApplyEdidEvo (NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams); +static NvBool GetFixedModeTimings (NVDpyEvoPtr pDpyEvo); +static NvBool ReadDSITimingsFromResman (const NVDpyEvoRec *pDpyEvo, + NVT_TIMING *pTimings, + NvU8 *pBpc); +static void AssignDpyEvoName (NVDpyEvoPtr pDpyEvo); + +static NvBool IsConnectorTMDS (NVConnectorEvoPtr); + + +static void DpyDisconnectEvo(NVDpyEvoPtr pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + + pDispEvo->connectedDisplays = + nvDpyIdListMinusDpyId(pDispEvo->connectedDisplays, pDpyEvo->id); + + ClearEdid(pDpyEvo); +} + +static NvBool DpyConnectEvo( + NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + + pDispEvo->connectedDisplays = + nvAddDpyIdToDpyIdList(pDpyEvo->id, pDispEvo->connectedDisplays); + + DpyGetDynamicDfpProperties(pDpyEvo, pParams->request.disableACPIBrightnessHotkeys); + nvDPGetDpyGUID(pDpyEvo); + + if ((pDpyEvo->pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) || + nvConnectorIsDPSerializer(pDpyEvo->pConnectorEvo)) { + return GetFixedModeTimings(pDpyEvo); + } else { + ReadAndApplyEdidEvo(pDpyEvo, pParams); + } + + nvUpdateInfoFrames(pDpyEvo); + + return TRUE; +} + +/* + * DpyAssignColorSpaceCaps() - parse both the CEA-861 extension block and + * the EDID 1.4 block to determine YCbCr422/444 capability. + */ +static void DpyAssignColorSpaceCaps(NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString) +{ + NvBool ycbr422_cap = FALSE; + NvBool ycbr444_cap = FALSE; + const NVParsedEdidEvoRec *pParsedEdid = &pDpyEvo->parsedEdid; + + /* check for edid YCbCr422/YCbCr444 capability */ + if (pParsedEdid->valid) { + NvBool haveCEA861Block = + (pParsedEdid->info.ext861.revision != NVT_CEA861_REV_NONE); + if (haveCEA861Block) { + ycbr422_cap = !!(pParsedEdid->info.ext861.basic_caps & + NVT_CEA861_CAP_YCbCr_422); + ycbr444_cap = !!(pParsedEdid->info.ext861.basic_caps & + NVT_CEA861_CAP_YCbCr_444); + } + /* check EDID 1.4 base block */ + if (pParsedEdid->info.version == 0x104 && + pParsedEdid->info.input.isDigital) { + NvBool edid14_ycbr422 = + pParsedEdid->info.u.feature_ver_1_4_digital.support_ycrcb_422; + NvBool edid14_ycbr444 = + pParsedEdid->info.u.feature_ver_1_4_digital.support_ycrcb_444; + if (haveCEA861Block && ycbr422_cap != edid14_ycbr422) { + nvEvoLogInfoString(pInfoString, + "%s EDID inconsistency: the EDID 1.4 base block %s " + "YCbCr 4:2:2 support, but the CEA-861 extension block " + "%s. Assuming YCbCr 4:2:2 is supported.", + pDpyEvo->name, + edid14_ycbr422 ? "indicates" : "does not indicate", + ycbr422_cap ? "does" : "does not"); + } + if (edid14_ycbr422) { + ycbr422_cap = TRUE; + } + if (haveCEA861Block && ycbr444_cap != edid14_ycbr444) { + nvEvoLogInfoString(pInfoString, + "%s EDID inconsistency: the EDID 1.4 base block %s " + "YCbCr 4:4:4 support, but the CEA-861 extension block " + "%s. Assuming YCbCr 4:4:4 is supported.", + pDpyEvo->name, + edid14_ycbr444 ? "indicates" : "does not indicate", + ycbr444_cap ? "does" : "does not"); + } + if (edid14_ycbr444) { + ycbr444_cap = TRUE; + } + } + } + pDpyEvo->colorSpaceCaps.ycbcr422Capable = ycbr422_cap; + pDpyEvo->colorSpaceCaps.ycbcr444Capable = ycbr444_cap; +} + + + +static NvBool GetEdidOverride( + const struct NvKmsQueryDpyDynamicDataRequest *pRequest, + NVEdidRec *pEdid) +{ + if ((pRequest == NULL) || + !pRequest->overrideEdid || + (pRequest->edid.bufferSize == 0) || + (pRequest->edid.bufferSize > sizeof(pRequest->edid.buffer))) { + return FALSE; + } + + pEdid->buffer = nvAlloc(pRequest->edid.bufferSize); + + if (pEdid->buffer == NULL) { + return FALSE; + } + + nvkms_memcpy(pEdid->buffer, pRequest->edid.buffer, pRequest->edid.bufferSize); + + pEdid->length = pRequest->edid.bufferSize; + + return TRUE; +} + +/*! + * Query resman for the EDID for the pDpyEvo, then parse the EDID into usable + * data. Do not modify the pDpyEvoRec. + */ + +NvBool nvDpyReadAndParseEdidEvo( + const NVDpyEvoRec *pDpyEvo, + const struct NvKmsQueryDpyDynamicDataRequest *pRequest, + NvKmsEdidReadMode readMode, + NVEdidRec *pEdid, + NVParsedEdidEvoPtr *ppParsedEdid, + NVEvoInfoStringPtr pInfoString) +{ + NvBool ignoreEdid = FALSE; + NvBool ignoreEdidChecksum = FALSE; + + if (pRequest != NULL) { + ignoreEdid = pRequest->ignoreEdid; + ignoreEdidChecksum = pRequest->ignoreEdidChecksum; + } + + nvkms_memset(pEdid, 0, sizeof(*pEdid)); + + /* Just return an empty EDID if requested. */ + if (ignoreEdid) { + return TRUE; + } + + /* Load any custom EDID, (or see if DP lib has EDID) */ + ClearCustomEdid(pDpyEvo); + + if ((pRequest && GetEdidOverride(pRequest, pEdid)) || + ReadEdidFromDP(pDpyEvo, pEdid)) { + /* XXX [VSM] Write, clear and re-read the EDID to/from RM here to make + * sure RM and X agree on the final EDID bits. Once RM no longer + * parses the EDID, we can avoid doing this for DP devices. + * + * If it's a DisplayPort 1.2 multistream device then don't bother trying + * to ping-pong the EDID through RM. + */ + if (nvDpyEvoIsDPMST(pDpyEvo)) { + goto validateEdid; + } + + WriteEdidToResman(pDpyEvo, pEdid); + + nvFree(pEdid->buffer); + pEdid->buffer = NULL; + pEdid->length = 0; + } + + if (!ReadEdidFromResman(pDpyEvo, pEdid, readMode)) { + goto fail; + } + +validateEdid: + /* Validate the EDID */ + if (!ValidateEdid(pDpyEvo, pEdid, pInfoString, ignoreEdidChecksum)) { + goto fail; + } + + *ppParsedEdid = nvCalloc(1, sizeof(**ppParsedEdid)); + if (*ppParsedEdid == NULL) { + goto fail; + } + /* Parse the EDID. Note this may *change* the EDID bytes. */ + PatchAndParseEdid(pDpyEvo, pEdid, *ppParsedEdid, pInfoString); + + return TRUE; + +fail: + + /* We failed to read a valid EDID. Free any EDID buffer allocated above. */ + nvFree(pEdid->buffer); + pEdid->buffer = NULL; + pEdid->length = 0; + + return FALSE; +} + +static void AssignIsVrHmd(NVDpyEvoRec *pDpyEvo) +{ + NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS params = { }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + pDpyEvo->isVrHmd = FALSE; + + if (!pDpyEvo->parsedEdid.valid) { + return; + } + + params.manufacturerID = pDpyEvo->parsedEdid.info.manuf_id; + params.productID = pDpyEvo->parsedEdid.info.product_id; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_IS_DIRECTMODE_DISPLAY, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to query VR headset for %s", pDpyEvo->name); + return; + } + + /* + * bIsDirectmode indicates any monitor that by default shouldn't be part of + * a desktop (VR headset, touch panel, etc). But, close enough for our + * usage of isVrHmd. + */ + pDpyEvo->isVrHmd = params.bIsDirectmode; +} + +static NvBool EdidHasChanged( + const NVDpyEvoRec *pDpyEvo, + const NVEdidRec *pEdid, + const NVParsedEdidEvoRec *pParsedEdid) +{ + /* Compare EDID bytes */ + if (pEdid->length != pDpyEvo->edid.length || + nvkms_memcmp(pEdid->buffer, pDpyEvo->edid.buffer, pEdid->length) != 0) { + return TRUE; + } + + /* Compare parsed data */ + if (pParsedEdid != NULL) { + if (nvkms_memcmp(pParsedEdid, &pDpyEvo->parsedEdid, + sizeof(*pParsedEdid)) != 0) { + return TRUE; + } + } else if (pDpyEvo->parsedEdid.valid) { + return TRUE; + } + + return FALSE; +} + +static void ApplyNewEdid( + NVDpyEvoPtr pDpyEvo, + const NVEdidRec *pEdid, + const NVParsedEdidEvoRec *pParsedEdid, + NVEvoInfoStringPtr pInfoString) +{ + if (pDpyEvo->edid.buffer != NULL) { + nvFree(pDpyEvo->edid.buffer); + } + pDpyEvo->edid.buffer = pEdid->buffer; + pDpyEvo->edid.length = pEdid->length; + + if (pParsedEdid != NULL) { + nvkms_memcpy(&pDpyEvo->parsedEdid, pParsedEdid, + sizeof(pDpyEvo->parsedEdid)); + } else { + nvkms_memset(&pDpyEvo->parsedEdid, 0, sizeof(pDpyEvo->parsedEdid)); + } + + /* + * Regenerate the dpy's name, because the parsed EDID monitorName + * may have changed. + */ + AssignDpyEvoName(pDpyEvo); + + /* Write information about the parsed EDID to the infoString. */ + LogEdid(pDpyEvo, pInfoString); + + if (pDpyEvo->parsedEdid.valid) { + /* + * check 3D Vision capability + */ + nvDpyCheck3DVisionCapsEvo(pDpyEvo); + + /* + * Check HDMI VRR capability + */ + nvDpyUpdateHdmiVRRCaps(pDpyEvo); + } + + if (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + DpyAssignColorSpaceCaps(pDpyEvo, pInfoString); + } + + nvUpdateHdmiCaps(pDpyEvo); + + nvDpyProbeMaxPixelClock(pDpyEvo); + + AssignIsVrHmd(pDpyEvo); +} + +/* + * ReadDSITimingsFromResman() - Obtains modetimings for a DSI connector, + * passing it into pTimings + */ +static NvBool ReadDSITimingsFromResman( + const NVDpyEvoRec *pDpyEvo, + NVT_TIMING *pTimings, + NvU8 *pBpc) +{ + NvU32 ret; + NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS dsiModeTimingParams = { 0 }; + + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + dsiModeTimingParams.subDeviceInstance = pDispEvo->displayOwner; + + /* + * Currently displayId must be hardcoded to 0 to receive timings from RM. + * Once the corresponding DCB support is added for DSI, this hack will be + * removed and NVKMS will use the actual displayId instead. + */ + dsiModeTimingParams.displayId = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING, + &dsiModeTimingParams, sizeof(dsiModeTimingParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to read DSI mode timings for display device %s", + pDpyEvo->name); + return FALSE; + } + + // Converts refresh (Hz) into appropriate units for rr1k (units of 0.001Hz) + pTimings->etc.rrx1k = dsiModeTimingParams.refresh * 1000; + pTimings->HVisible = dsiModeTimingParams.hActive; + pTimings->HFrontPorch = dsiModeTimingParams.hFrontPorch; + pTimings->HSyncWidth = dsiModeTimingParams.hSyncWidth; + pTimings->HTotal = dsiModeTimingParams.hActive + + dsiModeTimingParams.hFrontPorch + + dsiModeTimingParams.hSyncWidth + + dsiModeTimingParams.hBackPorch; + + pTimings->VVisible = dsiModeTimingParams.vActive; + pTimings->VFrontPorch = dsiModeTimingParams.vFrontPorch; + pTimings->VSyncWidth = dsiModeTimingParams.vSyncWidth; + pTimings->VTotal = dsiModeTimingParams.vActive + + dsiModeTimingParams.vFrontPorch + + dsiModeTimingParams.vSyncWidth + + dsiModeTimingParams.vBackPorch; + + pTimings->pclk = HzToKHz(dsiModeTimingParams.pclkHz) / 10; + + // DSI only supports RGB444 + *pBpc = dsiModeTimingParams.bpp / 3; + + return TRUE; +} + +static NvBool ReadDPSerializerTimings( + const NVDpyEvoRec *pDpyEvo, + NVT_TIMING *pTimings, + NvU8 *pBpc) +{ + NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS timingParams = { }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + timingParams.subDeviceInstance = pDispEvo->displayOwner; + timingParams.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + timingParams.stream = pDpyEvo->dp.serializerStreamIndex; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_FIXED_MODE_TIMING, + &timingParams, sizeof(timingParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to read fixed mode timings for display device %s", + pDpyEvo->name); + return FALSE; + } + + if (!timingParams.valid) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Fixed mode timings are invalid for display device %s", + pDpyEvo->name); + return FALSE; + } + + nvkms_memset(pTimings, 0, sizeof(NVT_TIMING)); + + pTimings->HVisible = timingParams.hActive; + pTimings->HFrontPorch = timingParams.hFrontPorch; + pTimings->HSyncWidth = timingParams.hSyncWidth; + pTimings->HTotal = timingParams.hActive + timingParams.hFrontPorch + + timingParams.hSyncWidth + timingParams.hBackPorch; + + pTimings->VVisible = timingParams.vActive; + pTimings->VFrontPorch = timingParams.vFrontPorch; + pTimings->VSyncWidth = timingParams.vSyncWidth; + pTimings->VTotal = timingParams.vActive + timingParams.vFrontPorch + + timingParams.vSyncWidth + timingParams.vBackPorch; + + pTimings->pclk = timingParams.pclkKHz / 10; + pTimings->etc.rrx1k = timingParams.rrx1k; + + *pBpc = 0; + + return TRUE; +} + +static NvBool GetFixedModeTimings( + NVDpyEvoPtr pDpyEvo) +{ + NVT_TIMING timings = { }; + NvBool ret = FALSE; + NvU8 bpc; + + if (pDpyEvo->pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + ret = ReadDSITimingsFromResman(pDpyEvo, &timings, &bpc); + } else if (nvConnectorIsDPSerializer(pDpyEvo->pConnectorEvo)) { + ret = ReadDPSerializerTimings(pDpyEvo, &timings, &bpc); + } + + if (!ret) { + return ret; + } + + CreateParsedEdidFromNVT_TIMING(&timings, bpc, &pDpyEvo->parsedEdid); + + AssignDpyEvoName(pDpyEvo); + nvDpyProbeMaxPixelClock(pDpyEvo); + + return TRUE; +} + +static void ReadAndApplyEdidEvo( + NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams) +{ + const struct NvKmsQueryDpyDynamicDataRequest *pRequest = NULL; + NVEdidRec edid = {NULL, 0}; + NVParsedEdidEvoPtr pParsedEdid = NULL; + NVEvoInfoStringRec infoString; + NvBool readSuccess; + + if (pParams != NULL) { + nvInitInfoString(&infoString, pParams->reply.edid.infoString, + sizeof(pParams->reply.edid.infoString)); + pRequest = &pParams->request; + } else { + nvInitInfoString(&infoString, NULL, 0); + } + + readSuccess = nvDpyReadAndParseEdidEvo(pDpyEvo, pRequest, + NVKMS_EDID_READ_MODE_DEFAULT, + &edid, &pParsedEdid, &infoString); + + if (pParams != NULL) { + pParams->reply.edid.valid = readSuccess; + } + + if (EdidHasChanged(pDpyEvo, &edid, pParsedEdid)) { + /* + * Do not plumb pRequest into ApplyNewEdid(). This helps ensure that + * its operation is purely a function of the EDID and parsed EDID data, + * which means that if we get into this function again with the same + * EDID and parsed EDID data, we can safely skip ApplyNewEdid() without + * worrying that this request has different parameters (like CustomEdid + * or mode validation overrides). + */ + ApplyNewEdid(pDpyEvo, &edid, pParsedEdid, &infoString); + } else { + nvFree(edid.buffer); + } + nvFree(pParsedEdid); +} + + +/*! + * Get the maximum allowed pixel clock for pDpyEvo. + * + * This depends on the following conditions: + * + * - The RM's returned value is sufficient for non-TMDS connectors + * - For HDMI, the SOR capabilities exceed the RM's returned value to allow + * for HDMI 1.4 modes that exceed 165MHz on a single link, or + * for HDMI 2.1 modes if the source and sink is capable of FRL + * - For DVI, the user is allowed to set an option to exceed the 165MHz + * per-TMDS limit if the SOR capabilities allow it + * - Contrary to the above, passive DP->DVI and DP->HDMI dongles have their + * own limits + */ +void nvDpyProbeMaxPixelClock(NVDpyEvoPtr pDpyEvo) +{ + NvU32 ret; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + NvU32 displayOwner = pDispEvo->displayOwner; + NVEvoPassiveDpDongleType passiveDpDongleType; + NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS params = { 0 }; + NvU32 passiveDpDongleMaxPclkKHz; + + /* First, get the RM-reported value. */ + + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + params.subDeviceInstance = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_PCLK_LIMIT, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failure reading maximum pixel clock value " + "for display device %s.", pDpyEvo->name); + pDpyEvo->maxPixelClockKHz = 100000; + pDpyEvo->maxSingleLinkPixelClockKHz = pDpyEvo->maxPixelClockKHz; + return; + } + + pDpyEvo->maxPixelClockKHz = params.orPclkLimit; + pDpyEvo->maxSingleLinkPixelClockKHz = pDpyEvo->maxPixelClockKHz; + + /* + * The RM's returned max pclk value is sufficient for non-TMDS + * connectors + */ + if (!IsConnectorTMDS(pConnectorEvo)) { + return; + } + + /* + * The RM returns a 165MHz max pclk for single link TMDS and 330MHz + * max pclk for dual link TMDS. We can exceed that in the + * following cases: + * + * - HDMI 1.4a 4Kx2K and 1080p60hz frame packed stereo modes + * require a 297MHz single TMDS link pixel clock, and HDMI 2.0 + * allows an even higher pixel clock. + * - While the DVI spec mandates a limit of 165MHz per TMDS link, + * since certain GPUs and certain displays support DVI + * connections at higher pixel clocks, we allow users to + * override this limit to allow validation of higher maximum + * pixel clocks over DVI. + */ + if (pDevEvo->gpus != NULL) { + + NVEvoSorCaps *sorCaps = pDevEvo->gpus[displayOwner].capabilities.sor; + NvU32 orIndex = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + + if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) { + /* + * With the SOR crossbar, pConnectorEvo->or.mask is unknown, + * and may change at modeset time. Use the caps of SOR 0 + * for validation. + */ + orIndex = 0; + } + + if (nvDpyIsHdmiEvo(pDpyEvo)) { + pDpyEvo->maxPixelClockKHz = + pDpyEvo->maxSingleLinkPixelClockKHz = + sorCaps[orIndex].maxTMDSClkKHz; + + nvkms_memset(&pDpyEvo->hdmi.srcCaps, 0, sizeof(pDpyEvo->hdmi.srcCaps)); + nvkms_memset(&pDpyEvo->hdmi.sinkCaps, 0, sizeof(pDpyEvo->hdmi.sinkCaps)); + + if (pDevEvo->hal->caps.supportsHDMIFRL) { + /* + * This function is called multiple times for each pDpyEvo: + * - Once when the dpy is created + * - Once when the dpy is connected + * - Once when the dpy is disconnected + * In the first and third cases, we don't yet have an EDID so + * we don't know if the sink supports HDMI FRL. Assume it + * doesn't, since if we try to set a mode anyway there won't be + * a sink to do link training with. + */ + if (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.info.hdmiForumInfo.max_FRL_Rate) { + /* + * An SOR needs to be assigned temporarily to do FRL + * training. + * Since the only other SORs in use at the moment (if any) + * are those driving heads, we don't need to exclude RM + * from selecting any SOR, so an sorExcludeMask of 0 is + * appropriate. + */ + if (nvAssignSOREvo(pConnectorEvo, 0)) { + if (nvHdmiFrlAssessLink(pDpyEvo)) { + + /* + * Note that although we "assessed" the link above, + * the maximum pixel clock set here doesn't take + * that into account -- it's the maximum the GPU + * hardware is capable of on the most capable link, + * mostly for reporting purposes. + * + * The calculation for if a given mode can fit in + * the assessed FRL configuration is complex and + * depends on things like the amount of blanking, + * rather than a simple pclk cutoff. So, we query + * the hdmi library when validating each individual + * mode, when we know actual timings. + */ + pDpyEvo->maxPixelClockKHz = + /* + * This comes from the Windows display driver: + * (4 lanes * 12Gb per lane * + * FRL encoding i.e 16/18) / 1K + */ + ((4 * 12 * 1000 * 1000 * 16) / 18); + } + } + } + } + } else { + /* + * Connector and SOR both must be capable to drive dual-TMDS + * resolutions. + */ + NvBool bDualTMDS = sorCaps[orIndex].dualTMDS && + FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _LINK, _DUAL, + pDpyEvo->pConnectorEvo->dfpInfo); + + pDpyEvo->maxPixelClockKHz = (bDualTMDS ? + TMDS_DUAL_LINK_PCLK_MAX : + TMDS_SINGLE_LINK_PCLK_MAX); + + pDpyEvo->maxSingleLinkPixelClockKHz = TMDS_SINGLE_LINK_PCLK_MAX; + + if (pDpyEvo->allowDVISpecPClkOverride) { + pDpyEvo->maxPixelClockKHz = sorCaps[orIndex].maxTMDSClkKHz * + (bDualTMDS ? 2 : 1); + pDpyEvo->maxSingleLinkPixelClockKHz = + sorCaps[orIndex].maxTMDSClkKHz; + } + } + } + + /* + * Passive DP->DVI and DP->HDMI dongles may have a limit more + * restrictive than the one described above. Check whether one of + * these dongles is in use, and override the limit accordingly. + */ + passiveDpDongleType = DpyGetPassiveDpDongleType(pDpyEvo, + &passiveDpDongleMaxPclkKHz); + + if (passiveDpDongleType != NV_EVO_PASSIVE_DP_DONGLE_UNUSED) { + pDpyEvo->maxPixelClockKHz = NV_MIN(passiveDpDongleMaxPclkKHz, + pDpyEvo->maxPixelClockKHz); + pDpyEvo->maxSingleLinkPixelClockKHz = pDpyEvo->maxPixelClockKHz; + } +} + +static void DpyGetDynamicDfpProperties( + NVDpyEvoPtr pDpyEvo, + const NvBool disableACPIBrightnessHotkeys) +{ + if (disableACPIBrightnessHotkeys) { + return; + } + if (!disableACPIBrightnessHotkeys) { + struct NvKmsGetDpyAttributeParams params; + nvkms_memset(¶ms, 0, sizeof(params)); + params.request.attribute = NV_KMS_DPY_ATTRIBUTE_BACKLIGHT_BRIGHTNESS; + + pDpyEvo->hasBacklightBrightness = + nvGetDpyAttributeEvo(pDpyEvo, ¶ms); + } +} +/* + * DpyGetDfpProperties() - get DFP properties: reduced blanking flags + * and general DFP flags + */ + +static void DpyGetStaticDfpProperties(NVDpyEvoPtr pDpyEvo) +{ + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (pConnectorEvo->legacyType != NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + return; + } + + if (nvDpyEvoIsDPMST(pDpyEvo)) { + // None of this stuff can be queried directly for dynamic DP MST + // displays. + // XXX DP MST: Should we fill in these fields somehow anyway? + return; + } + + pDpyEvo->internal = FALSE; + pDpyEvo->hdmiCapable = FALSE; + + if (pConnectorEvo->dfpInfo == 0x0) { + return; + } + /* Check if the connected DFP is HDMI capable */ + + if (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _HDMI_CAPABLE, _TRUE, + pConnectorEvo->dfpInfo)) { + pDpyEvo->hdmiCapable = TRUE; + } + + pDpyEvo->internal = nvConnectorIsInternal(pDpyEvo->pConnectorEvo); +} + +/*! + * Return true if the connector is single or dual link TMDS (not CRT, not DP). + */ +static NvBool IsConnectorTMDS(NVConnectorEvoPtr pConnectorEvo) +{ + NvU32 protocol = pConnectorEvo->or.protocol; + return ((pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && + ((protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A) || + (protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B) || + (protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS))); +} + +/*! + * Query RM for the passive Displayport dongle type; this can influence + * the maximum pixel clock allowed on that display. + */ +static NVEvoPassiveDpDongleType +DpyGetPassiveDpDongleType(const NVDpyEvoRec *pDpyEvo, + NvU32 *passiveDpDongleMaxPclkKHz) +{ + NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS params = { 0 }; + NvU32 ret; + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NVEvoPassiveDpDongleType passiveDpDongleType = + NV_EVO_PASSIVE_DP_DONGLE_UNUSED; + + // The rmcontrol below fails if we try querying the dongle info on + // non-TMDS connectors. + if (!IsConnectorTMDS(pConnectorEvo)) { + return passiveDpDongleType; + } + + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + params.subDeviceInstance = pDispEvo->displayOwner; + params.flags = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_DISPLAYPORT_DONGLE_INFO, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failure reading DP dongle info " + "for display device %s.", pDpyEvo->name); + return passiveDpDongleType; + } + + if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS, + _ATTACHED, _TRUE, params.flags)) + { + if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS, _TYPE, _DP2DVI, + params.flags)) { + + passiveDpDongleType = NV_EVO_PASSIVE_DP_DONGLE_DP2DVI; + + if (passiveDpDongleMaxPclkKHz) { + *passiveDpDongleMaxPclkKHz = TMDS_SINGLE_LINK_PCLK_MAX; + } + } else if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS, _TYPE, _DP2HDMI, + params.flags)) { + if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE, _TYPE, _1, + params.flags)) { + + passiveDpDongleType = NV_EVO_PASSIVE_DP_DONGLE_DP2HDMI_TYPE_1; + + if (passiveDpDongleMaxPclkKHz) { + *passiveDpDongleMaxPclkKHz = params.maxTmdsClkRateHz / 1000; + } + } else if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE, _TYPE, _2, + params.flags)) { + + passiveDpDongleType = NV_EVO_PASSIVE_DP_DONGLE_DP2HDMI_TYPE_2; + + if (passiveDpDongleMaxPclkKHz) { + *passiveDpDongleMaxPclkKHz = params.maxTmdsClkRateHz / 1000; + } + } + // For other dongle types: LFH_DVI (DMS59-DVI) and LFH_VGA (DMS59-VGA) breakout dongles, + // We consider them as native connection, hence we don't track passiveDpDongleType here + } + } + + return passiveDpDongleType; +} + + +/*! + * Validate an NVKMS client-specified NvKmsModeValidationFrequencyRanges. + */ +static NvBool ValidateFrequencyRanges( + const struct NvKmsModeValidationFrequencyRanges *pRanges) +{ + NvU32 i; + + if (pRanges->numRanges >= ARRAY_LEN(pRanges->range)) { + return FALSE; + } + + for (i = 0; i < pRanges->numRanges; i++) { + if (pRanges->range[i].high < pRanges->range[i].low) { + return FALSE; + } + if (pRanges->range[i].high == 0) { + return FALSE; + } + } + + return TRUE; +} + + +static void DpySetValidSyncsHelper( + struct NvKmsModeValidationFrequencyRanges *pRanges, + const NVParsedEdidEvoRec *pParsedEdid, + NvBool isHorizSync, NvBool ignoreEdidSource) +{ + NvBool found = FALSE; + NvU32 edidMin = 0, edidMax = 0; + + if (pParsedEdid->valid) { + if (isHorizSync) { + edidMin = pParsedEdid->limits.min_h_rate_hz; + edidMax = pParsedEdid->limits.max_h_rate_hz; + } else { + edidMin = pParsedEdid->limits.min_v_rate_hzx1k; + edidMax = pParsedEdid->limits.max_v_rate_hzx1k; + } + } + + /* If the client-specified ranges are invalid, clear them. */ + + if ((pRanges->source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_BEFORE_EDID) || + (pRanges->source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_AFTER_EDID)) { + + if (!ValidateFrequencyRanges(pRanges)) { + nvkms_memset(pRanges, 0, sizeof(*pRanges)); + pRanges->source = NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_NONE; + } + } + + /* Use CLIENT_BEFORE_EDID, if provided. */ + + if (pRanges->source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_BEFORE_EDID) { + found = TRUE; + } + + /* + * Otherwise, if EDID-reported sync ranges are available, use + * those. + */ + if (!found && + !ignoreEdidSource && + (edidMin != 0) && (edidMax != 0)) { + + pRanges->numRanges = 1; + pRanges->range[0].low = edidMin; + pRanges->range[0].high = edidMax; + pRanges->source = NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_EDID; + found = TRUE; + } + + /* + * Otherwise, use CLIENT_AFTER_EDID, if available. + */ + if (!found && + (pRanges->source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_AFTER_EDID)) { + found = TRUE; + } + + /* + * Finally, fall back to conservative defaults if we could not + * find anything else; this will validate 1024x768 @ 60Hz. + */ + if (!found) { + + pRanges->numRanges = 1; + + if (isHorizSync) { + pRanges->range[0].low = 28000; + pRanges->range[0].high = 55000; + } else { + pRanges->range[0].low = 43000; + pRanges->range[0].high = 72000; + } + + pRanges->source = + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CONSERVATIVE_DEFAULTS; + } +} + + +/*! + * Assign NvKmsModeValidationValidSyncs + * + * Assign the HorizSync and VertRefresh ranges in + * NvKmsModeValidationValidSyncs. The priority order is: + * + * (1) Any HorizSync and VertRefresh provided by the client that + * overrides the EDID (CLIENT_BEFORE_EDID). + * (2) Valid range information from the EDID. + * (3) Any HorizSync and VertRefresh specified by the client as a + * fallback for the EDID (CLIENT_AFTER_EDID). + * (4) Conservative builtin defaults. + * + * HorizSync and VertRefresh can come from different sources. (1) and + * (3) are provided through pValidSyncs. (2) and (4) get written to + * pValidSyncs. + * + * \param[in] pDpy The dpy whose EDID will be used. + * \param[in,out] pValidSyncs This is initialized by the client, and + * will be updated based on the frequency + * range priority described above. + */ +void nvDpySetValidSyncsEvo(const NVDpyEvoRec *pDpyEvo, + struct NvKmsModeValidationValidSyncs *pValidSyncs) +{ + const NVParsedEdidEvoRec *pParsedEdid = &pDpyEvo->parsedEdid; + + DpySetValidSyncsHelper(&pValidSyncs->horizSyncHz, + pParsedEdid, + TRUE, /* isHorizSync */ + pValidSyncs->ignoreEdidSource); + + DpySetValidSyncsHelper(&pValidSyncs->vertRefreshHz1k, + pParsedEdid, + FALSE, /* isHorizSync */ + pValidSyncs->ignoreEdidSource); +} + + +/* + * ReadEdidFromDP() - query the EDID for the specified display device from the + * DP lib. + */ + +static NvBool ReadEdidFromDP(const NVDpyEvoRec *pDpyEvo, NVEdidPtr pEdid) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NvU8 *pNewEdid = NULL; + int newEdidLength; + + if (!nvDpyUsesDPLib(pDpyEvo)) { + return FALSE; + } + + /* get size and allocate space for the EDID data */ + newEdidLength = nvDPGetEDIDSize(pDpyEvo); + if (newEdidLength == 0) { + goto fail; + } + + pNewEdid = nvCalloc(newEdidLength, 1); + + if (pNewEdid == NULL) { + goto fail; + } + + if (!nvDPGetEDID(pDpyEvo, pNewEdid, newEdidLength)) { + goto fail; + } + + pEdid->buffer = pNewEdid; + pEdid->length = newEdidLength; + + return TRUE; + + fail: + + nvFree(pNewEdid); + + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to read EDID for display device %s", + pDpyEvo->name); + return FALSE; + +} // ReadEdidFromDP() + + + +/* + * ReadEdidFromResman() - query the EDID for the specified display device + */ + +static NvBool ReadEdidFromResman(const NVDpyEvoRec *pDpyEvo, NVEdidPtr pEdid, + NvKmsEdidReadMode readMode) +{ + NvU32 ret; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *getEdidParams; + int retryEdidReadCount = 0; + NvBool success = FALSE; + + if (nvDpyEvoIsDPMST(pDpyEvo)) { + // RM doesn't track this device, so leave the EDID alone. + return TRUE; + } + + getEdidParams = nvCalloc(sizeof(*getEdidParams), 1); + if (getEdidParams == NULL) { + goto done; + } + + query_edid: + + getEdidParams->subDeviceInstance = pDispEvo->displayOwner; + getEdidParams->displayId = nvDpyEvoGetConnectorId(pDpyEvo); + getEdidParams->flags = NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_NO; + + if (readMode == NVKMS_EDID_READ_MODE_ACPI) { + getEdidParams->flags |= DRF_DEF(0073_CTRL_SPECIFIC, _GET_EDID_FLAGS, + _DISPMUX_READ_MODE, _ACPI); + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, + getEdidParams, sizeof(*getEdidParams)); + + if ((ret != NVOS_STATUS_SUCCESS) || (getEdidParams->bufferSize <= 0)) { + /* WAR for Bug 777646: retry reading the EDID on error for DP + * devices to avoid possible TDR assertion in the RM. + * + * XXX This should be moved to the DP library. + */ + if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) && + (retryEdidReadCount < NV_DP_READ_EDID_RETRIES)) { + retryEdidReadCount++; + + nvkms_usleep(NV_DP_REREAD_EDID_DELAY_USEC); + + goto query_edid; + } + goto done; + } + + pEdid->buffer = nvCalloc(getEdidParams->bufferSize, 1); + + if (pEdid->buffer == NULL) { + goto done; + } + + nvkms_memcpy(pEdid->buffer, &getEdidParams->edidBuffer, + getEdidParams->bufferSize); + pEdid->length = getEdidParams->bufferSize; + + success = TRUE; + + done: + + nvFree(getEdidParams); + + if (!success) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to read EDID for display device %s", + pDpyEvo->name); + } + + return success; +} // ReadEdidFromResman() + + +/* + * Check if the EDID meets basic validation criteria. + */ +static NvBool ValidateEdid(const NVDpyEvoRec *pDpyEvo, NVEdidPtr pEdid, + NVEvoInfoStringPtr pInfoString, + const NvBool ignoreEdidChecksum) +{ + NvU32 status, tmpStatus; + int errorCount = 0; + + status = NvTiming_EDIDValidationMask(pEdid->buffer, pEdid->length, TRUE); + tmpStatus = status; + + if (status == 0) { + return TRUE; + } + + nvEvoLogInfoString(pInfoString, + "The EDID read for display device %s is invalid:", + pDpyEvo->name); + + /* + * Warn about every error we know about, masking it out of tmpStatus, then + * warn about an unknown error if there are still any bits remaining in + * tmpStatus. + */ + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_VERSION)) { + nvEvoLogInfoString(pInfoString, + "- The EDID has an unrecognized version."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_VERSION); + errorCount++; + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_SIZE)) { + nvEvoLogInfoString(pInfoString, + "- The EDID is too short."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_SIZE); + errorCount++; + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM)) { + /* + * XXX NVKMS TODO: massage wording to not reference X + * configuration option. + */ + nvEvoLogInfoString(pInfoString, + "- The EDID has a bad checksum. %s", + ignoreEdidChecksum ? "This error will be ignored. Note " + "that an EDID with a bad checksum could indicate a " + "corrupt EDID. A corrupt EDID may have mode timings " + "beyond the capabilities of your display, and could " + "damage your hardware. Please use with care." : + "The \"IgnoreEDIDChecksum\" X configuration option may " + "be used to attempt using mode timings in this EDID in " + "spite of this error. A corrupt EDID may have mode " + "timings beyond the capabilities of your display, and " + "could damage your hardware. Please use with care."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + errorCount++; + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_RANGE_LIMIT)) { + nvEvoLogInfoString(pInfoString, + "- The EDID has a bad range limit."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_RANGE_LIMIT); + errorCount++; + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD)) { + nvEvoLogInfoString(pInfoString, + "- The EDID has a bad detailed timing descriptor."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD); + errorCount++; + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD)) { + nvEvoLogInfoString(pInfoString, + "- The EDID has an extension block with a bad detailed " + "timing descriptor."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD); + errorCount++; + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT)) { + nvEvoLogInfoString(pInfoString, + "- The EDID extension block is invalid."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT); + errorCount++; + } + + if (tmpStatus) { + nvEvoLogInfoString(pInfoString, + "- The EDID has an unrecognized error."); + errorCount++; + } + + /* + * Unset the bits for errors we don't care about (invalid DTDs in the + * extension block, or checksum errors if ignoreEdidChecksum is in use) + * then return true if there are any remaining errors we do care about. + */ + if (ignoreEdidChecksum) { + status &= ~(NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM)); + } + + if (status == + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD)) { + /* + * If the only problem with the EDID is invalid DTDs in the extension + * block, don't reject the EDID; those timings can be safely skipped in + * NvTiming_ParseEDIDInfo()/parse861ExtDetailedTiming() + */ + nvEvoLogInfoString(pInfoString, + "This bad detailed timing descriptor will be ignored."); + } + + status &= ~(NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD)); + + return (status == 0); +} + +static const char *GetColorDepthBpc(NVT_COLORDEPTH colorDepth) +{ + static char buffer[32]; + NVEvoInfoStringRec infoString; + NvBool first = TRUE; + int i; + + struct { + NvBool val; + int bpc; + } table[] = { + { colorDepth.bpc.bpc6, 6 }, + { colorDepth.bpc.bpc8, 8 }, + { colorDepth.bpc.bpc10, 10 }, + { colorDepth.bpc.bpc12, 12 }, + { colorDepth.bpc.bpc14, 14 }, + { colorDepth.bpc.bpc16, 16 }, + }; + + nvInitInfoString(&infoString, buffer, sizeof(buffer)); + + buffer[0] = '\0'; + + for (i = 0; i < ARRAY_LEN(table); i++) { + if (table[i].val) { + nvEvoLogInfoStringRaw(&infoString, "%s%d", + first ? "" : ", ", + table[i].bpc); + first = FALSE; + } + } + + return buffer; +} + + +/* + * Log information about the EDID. + */ + +static void LogEdid(NVDpyEvoPtr pDpyEvo, NVEvoInfoStringPtr pInfoString) +{ + int k; + NVParsedEdidEvoPtr pParsedEdid; + + static const struct { + NVT_TIMING_TYPE type; + const char *name; + } mode_type_table[] = { + { NVT_TYPE_DMT, "Display Monitor Timings" }, + { NVT_TYPE_GTF, "Generalized Timing Formula Timings" }, + { NVT_TYPE_ASPR, "ASPR Timings"}, + { NVT_TYPE_NTSC_TV, "NTSC Timings" }, + { NVT_TYPE_PAL_TV, "PAL Timings" }, + { NVT_TYPE_CVT, "Coordinated Video Timings"}, + { NVT_TYPE_CVT_RB, "Reduced Blanking Coordinated Video Timings" }, + { NVT_TYPE_CUST, "Customized Timings" }, + { NVT_TYPE_EDID_STD, "Standard Timings" }, + { NVT_TYPE_EDID_DTD, "Detailed Timings" }, + { NVT_TYPE_EDID_CVT, "Coordinated Video Timings" }, + { NVT_TYPE_EDID_EST, "Established Timings"}, + { NVT_TYPE_EDID_861ST, "CEA-861B Timings" }, + { NVT_TYPE_NV_PREDEFINED, "Predefined Timings" }, + { NVT_TYPE_DMT_RB, "Reduced Blanking Display Monitor Timings" }, + { NVT_TYPE_EDID_EXT_DTD, "Extension Block Detailed Timings" }, + { NVT_TYPE_SDTV, "SDTV Timings "}, + { NVT_TYPE_HDTV, "HDTV Timings" }, + { NVT_TYPE_SMPTE, "SMPTE Timings" }, + { NVT_TYPE_EDID_VTB_EXT, "VTB Extension Timings" }, + { NVT_TYPE_EDID_VTB_EXT_STD, "VTB Extension Detailed Timings" }, + { NVT_TYPE_EDID_VTB_EXT_DTD, "VTB Extension Standard Timings" }, + { NVT_TYPE_EDID_VTB_EXT_CVT, "VTB Extension CVT Timings" }, + { NVT_TYPE_HDMI_STEREO, "HDMI Stereo Timings" }, + { NVT_TYPE_DISPLAYID_1, "DisplayID Type 1 Timings" }, + { NVT_TYPE_DISPLAYID_2, "DisplayID Type 2 Timings" }, + { NVT_TYPE_HDMI_EXT, "HDMI Extended Resolution Timings" }, + { NVT_TYPE_CUST_AUTO, "Customized Auto Timings" }, + { NVT_TYPE_CUST_MANUAL, "Customized Manual Timings" }, + { NVT_TYPE_CVT_RB_2,"Reduced Blanking Coordinated Video Timings, v2" }, + { NVT_TYPE_DMT_RB_2, "Display Monitor Timings, V2" }, + { NVT_TYPE_DISPLAYID_7, "DisplayID Type 7 Timings" }, + { NVT_TYPE_DISPLAYID_8, "DisplayID Type 8 Timings" }, + { NVT_TYPE_DISPLAYID_9, "DisplayID Type 9 Timings" }, + { NVT_TYPE_DISPLAYID_10, "DisplayID Type 10 Timings" }, + { NVT_TYPE_CVT_RB_3, "Reduced Blanking Coordinated Video Timings, v3" }, + }; + + /* + * Trigger a warning if new NVT_TIMING_TYPE values are added + * without updating this function. + * + * If a warning is produced about unhandled enum in the below + * switch statement, please update both the switch statement and + * mode_type_table[], or contact the sw-nvkms email alias. + */ + if (pDpyEvo->parsedEdid.valid) { + for (k = 0; k < pDpyEvo->parsedEdid.info.total_timings; k++) { + NvU32 status = pDpyEvo->parsedEdid.info.timing[k].etc.status; + NVT_TIMING_TYPE type = NVT_GET_TIMING_STATUS_TYPE(status); + + switch (type) { + case NVT_TYPE_DMT: + case NVT_TYPE_GTF: + case NVT_TYPE_ASPR: + case NVT_TYPE_NTSC_TV: + case NVT_TYPE_PAL_TV: + case NVT_TYPE_CVT: + case NVT_TYPE_CVT_RB: + case NVT_TYPE_CUST: + case NVT_TYPE_EDID_DTD: + case NVT_TYPE_EDID_STD: + case NVT_TYPE_EDID_EST: + case NVT_TYPE_EDID_CVT: + case NVT_TYPE_EDID_861ST: + case NVT_TYPE_NV_PREDEFINED: + case NVT_TYPE_DMT_RB: + case NVT_TYPE_EDID_EXT_DTD: + case NVT_TYPE_SDTV: + case NVT_TYPE_HDTV: + case NVT_TYPE_SMPTE: + case NVT_TYPE_EDID_VTB_EXT: + case NVT_TYPE_EDID_VTB_EXT_STD: + case NVT_TYPE_EDID_VTB_EXT_DTD: + case NVT_TYPE_EDID_VTB_EXT_CVT: + case NVT_TYPE_HDMI_STEREO: + case NVT_TYPE_DISPLAYID_1: + case NVT_TYPE_DISPLAYID_2: + case NVT_TYPE_HDMI_EXT: + case NVT_TYPE_CUST_AUTO: + case NVT_TYPE_CUST_MANUAL: + case NVT_TYPE_CVT_RB_2: + case NVT_TYPE_DMT_RB_2: + case NVT_TYPE_DISPLAYID_7: + case NVT_TYPE_DISPLAYID_8: + case NVT_TYPE_DISPLAYID_9: + case NVT_TYPE_DISPLAYID_10: + case NVT_TYPE_CVT_RB_3: + default: + break; + } + break; + } + } + + nvEvoLogInfoString(pInfoString, ""); + nvEvoLogInfoString(pInfoString, + "--- EDID for %s ---", pDpyEvo->name); + + if (!pDpyEvo->parsedEdid.valid) { + nvEvoLogInfoString(pInfoString, ""); + nvEvoLogInfoString(pInfoString, "No EDID Available."); + nvEvoLogInfoString(pInfoString, ""); + goto done; + } + + pParsedEdid = &pDpyEvo->parsedEdid; + + nvEvoLogInfoString(pInfoString, + "EDID Version : %d.%d", + pParsedEdid->info.version >> 8, + pParsedEdid->info.version & 0xff); + + nvEvoLogInfoString(pInfoString, + "Manufacturer : %s", + pParsedEdid->info.manuf_name); + + nvEvoLogInfoString(pInfoString, + "Monitor Name : %s", + pParsedEdid->monitorName); + + nvEvoLogInfoString(pInfoString, + "Product ID : 0x%04x", + pParsedEdid->info.product_id); + + nvEvoLogInfoString(pInfoString, + "32-bit Serial Number : 0x%08x", + pParsedEdid->info.serial_number); + + nvEvoLogInfoString(pInfoString, + "Serial Number String : %s", + pParsedEdid->serialNumberString); + + nvEvoLogInfoString(pInfoString, + "Manufacture Date : %d, week %d", + pParsedEdid->info.year, + pParsedEdid->info.week); + + /* + * despite the name feature_ver_1_3, the below features are + * reported on all EDID versions + */ + nvEvoLogInfoString(pInfoString, + "DPMS Capabilities :%s%s%s", + pParsedEdid->info.u.feature_ver_1_3.support_standby ? + " Standby" : "", + pParsedEdid->info.u.feature_ver_1_3.support_suspend ? + " Suspend" : "", + pParsedEdid->info.u.feature_ver_1_3.support_active_off ? + " Active Off" : ""); + + nvEvoLogInfoString(pInfoString, + "Input Type : %s", + pParsedEdid->info.input.isDigital ? + "Digital" : "Analog"); + + nvEvoLogInfoString(pInfoString, + "Prefer first detailed timing : %s", + pParsedEdid->info.u.feature_ver_1_3.preferred_timing_is_native ? + "Yes" : "No"); + + if (pParsedEdid->info.version == NVT_EDID_VER_1_3) { + nvEvoLogInfoString(pInfoString, + "Supports GTF : %s", + pParsedEdid->info.u.feature_ver_1_3.support_gtf ? + "Yes" : "No"); + } + + if (pParsedEdid->info.version >= NVT_EDID_VER_1_4) { + NvBool continuousFrequency = FALSE; + if (pParsedEdid->info.input.isDigital) { + continuousFrequency = + pParsedEdid->info.u.feature_ver_1_4_digital.continuous_frequency; + } else { + continuousFrequency = + pParsedEdid->info.u.feature_ver_1_4_analog.continuous_frequency; + } + + nvEvoLogInfoString(pInfoString, + "Supports Continuous Frequency: %s", + continuousFrequency ? "Yes" : "No"); + + nvEvoLogInfoString(pInfoString, + "EDID 1.4 YCbCr 422 support : %s", + pParsedEdid->info.u.feature_ver_1_4_digital.support_ycrcb_422 + ? "Yes" : "No"); + + nvEvoLogInfoString(pInfoString, + "EDID 1.4 YCbCr 444 support : %s", + pParsedEdid->info.u.feature_ver_1_4_digital.support_ycrcb_444 + ? "Yes" : "No"); + } + + nvEvoLogInfoString(pInfoString, + "Maximum Image Size : %d mm x %d mm", + pParsedEdid->info.screen_size_x * 10, /* screen_size_* is in cm */ + pParsedEdid->info.screen_size_y * 10); + + nvEvoLogInfoString(pInfoString, + "Valid HSync Range : " + NV_FMT_DIV_1000_POINT_1 + " kHz - " NV_FMT_DIV_1000_POINT_1 " kHz", + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.min_h_rate_hz), + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.max_h_rate_hz)); + + nvEvoLogInfoString(pInfoString, + "Valid VRefresh Range : " + NV_FMT_DIV_1000_POINT_1 " Hz - " + NV_FMT_DIV_1000_POINT_1 " Hz", + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.min_v_rate_hzx1k), + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.max_v_rate_hzx1k)); + + nvEvoLogInfoString(pInfoString, + "EDID maximum pixel clock : " + NV_FMT_DIV_1000_POINT_1 " MHz", + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.max_pclk_10khz * 10)); + + if (pParsedEdid->info.nvdaVsdbInfo.valid) { + nvEvoLogInfoString(pInfoString, + "G-Sync capable : %s", + pParsedEdid->info.nvdaVsdbInfo.vrrData.v1.supportsVrr + ? "Yes" : "No"); + nvEvoLogInfoString(pInfoString, + "G-Sync minimum refresh rate : %d Hz", + pParsedEdid->info.nvdaVsdbInfo.vrrData.v1.minRefreshRate); + } + + nvLogEdidCea861InfoEvo(pDpyEvo, pInfoString); + + if (pParsedEdid->info.input.isDigital && + pParsedEdid->info.version >= NVT_EDID_VER_1_4) { + nvEvoLogInfoString(pInfoString, + "EDID bits per component : %d", + pParsedEdid->info.input.u.digital.bpc); + } + + /* print the tiled display extension block, if present */ + if (pParsedEdid->info.ext_displayid.tile_topology_id.vendor_id) { + const NVT_DISPLAYID_INFO *tile = &pParsedEdid->info.ext_displayid; + const char *tmp; + + nvEvoLogInfoString(pInfoString, + "Tiled display information :"); + nvEvoLogInfoString(pInfoString, + " Revision : %d", + tile->tiled_display_revision); + nvEvoLogInfoString(pInfoString, + " Single Enclosure : %s", + tile->tile_capability.bSingleEnclosure ? + "Yes" : "No"); + + tmp = "Unknown"; + switch (tile->tile_capability.multi_tile_behavior) { + case NVT_MULTI_TILE_BEHAVIOR_OTHER: + tmp = "Other"; + break; + case NVT_MULTI_TILE_BEHAVIOR_SOURCE_DRIVEN: + tmp = "Source-driven"; + break; + } + nvEvoLogInfoString(pInfoString, + " Multi-tile Behavior : %s", tmp); + + tmp = "Unknown"; + switch (tile->tile_capability.single_tile_behavior) { + case NVT_SINGLE_TILE_BEHAVIOR_OTHER: + tmp = "Other"; + break; + case NVT_SINGLE_TILE_BEHAVIOR_SOURCE_DRIVEN: + tmp = "Source-driven"; + break; + case NVT_SINGLE_TILE_BEHAVIOR_SCALE: + tmp = "Scale"; + break; + case NVT_SINGLE_TILE_BEHAVIOR_CLONE: + tmp = "Clone"; + break; + } + nvEvoLogInfoString(pInfoString, + " Single-tile Behavior : %s", tmp); + nvEvoLogInfoString(pInfoString, + " Topology : %d row%s, %d column%s", + tile->tile_topology.row, + (tile->tile_topology.row == 1) ? "" : "s", + tile->tile_topology.col, + (tile->tile_topology.col == 1) ? "" : "s"); + nvEvoLogInfoString(pInfoString, + " Location : (%d,%d)", + tile->tile_location.x, tile->tile_location.y); + nvEvoLogInfoString(pInfoString, + " Native Resolution : %dx%d", + tile->native_resolution.width, + tile->native_resolution.height); + if (tile->tile_capability.bHasBezelInfo) { + nvEvoLogInfoString(pInfoString, + " Bezel Information :"); + nvEvoLogInfoString(pInfoString, + " Pixel Density : %d", + tile->bezel_info.pixel_density); + nvEvoLogInfoString(pInfoString, + " Top : %d", + tile->bezel_info.top); + nvEvoLogInfoString(pInfoString, + " Bottom : %d", + tile->bezel_info.bottom); + nvEvoLogInfoString(pInfoString, + " Left : %d", + tile->bezel_info.right); + nvEvoLogInfoString(pInfoString, + " Right : %d", + tile->bezel_info.left); + } + nvEvoLogInfoString(pInfoString, + " Vendor ID : 0x%x", + tile->tile_topology_id.vendor_id); + nvEvoLogInfoString(pInfoString, + " Product ID : 0x%x", + tile->tile_topology_id.product_id); + nvEvoLogInfoString(pInfoString, + " Serial Number : 0x%x", + tile->tile_topology_id.serial_number); + } + + for (k = 0; k < ARRAY_LEN(mode_type_table); k++) { + + int i; + + /* scan through the ModeList to find a mode of the current type */ + + for (i = 0; i < pParsedEdid->info.total_timings; i++) { + NVT_TIMING *pTiming = &pParsedEdid->info.timing[i]; + if (mode_type_table[k].type == + NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status)) { + break; + } + } + + /* if there are none of this type, skip to the next mode type */ + + if (i == pParsedEdid->info.total_timings) { + continue; + } + + nvEvoLogInfoString(pInfoString, ""); + nvEvoLogInfoString(pInfoString, "%s:", mode_type_table[k].name); + + for (i = 0; i < pParsedEdid->info.total_timings; i++) { + + NVT_TIMING *pTiming = &pParsedEdid->info.timing[i]; + NVT_TIMING_TYPE type = + NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status); + int vScale = 1; + + if (mode_type_table[k].type != type) { + continue; + } + + if ((type == NVT_TYPE_EDID_EST) || + (type == NVT_TYPE_EDID_STD)) { + + nvEvoLogInfoString(pInfoString, + " %-4d x %-4d @ %d Hz", + NV_NVT_TIMING_HVISIBLE(pTiming), + NV_NVT_TIMING_VVISIBLE(pTiming), + pTiming->etc.rr); + continue; + } + + if (pTiming->interlaced) { + vScale = 2; + } + + nvEvoLogInfoString(pInfoString, + " %-4d x %-4d @ %d Hz", + NV_NVT_TIMING_HVISIBLE(pTiming), + NV_NVT_TIMING_VVISIBLE(pTiming), + pTiming->etc.rr); + + nvEvoLogInfoString(pInfoString, + " Pixel Clock : " + NV_FMT_DIV_1000_POINT_2 " MHz", + NV_VA_DIV_1000_POINT_2(pTiming->pclk + * 10)); + + nvEvoLogInfoString(pInfoString, + " HRes, HSyncStart : %d, %d", + pTiming->HVisible, + pTiming->HVisible + + pTiming->HFrontPorch); + + nvEvoLogInfoString(pInfoString, + " HSyncEnd, HTotal : %d, %d", + pTiming->HVisible + + pTiming->HFrontPorch + + pTiming->HSyncWidth, + pTiming->HTotal); + + nvEvoLogInfoString(pInfoString, + " VRes, VSyncStart : %d, %d", + pTiming->VVisible * vScale, + (pTiming->VVisible + + pTiming->VFrontPorch) * vScale); + + nvEvoLogInfoString(pInfoString, + " VSyncEnd, VTotal : %d, %d", + (pTiming->VVisible + + pTiming->VFrontPorch + + pTiming->VSyncWidth) * vScale, + pTiming->VTotal * vScale); + + nvEvoLogInfoString(pInfoString, + " H/V Polarity : %s/%s", + (pTiming->HSyncPol == NVT_H_SYNC_NEGATIVE) ? + "-" : "+", + (pTiming->VSyncPol == NVT_V_SYNC_NEGATIVE) ? + "-" : "+"); + + if (pTiming->interlaced) { + nvEvoLogInfoString(pInfoString, + " Interlaced : yes"); + } + if (pTiming->etc.flag & NVT_FLAG_NV_DOUBLE_SCAN_TIMING) { + nvEvoLogInfoString(pInfoString, + " Double Scanned : yes"); + } + + if (type == NVT_TYPE_EDID_861ST) { + nvEvoLogInfoString(pInfoString, + " CEA Format : %d", + NVT_GET_CEA_FORMAT(pTiming->etc.status)); + } + + if (NV_NVT_TIMING_HAS_ASPECT_RATIO(pTiming)) { + nvEvoLogInfoString(pInfoString, + " Aspect Ratio : %d:%d", + NV_NVT_TIMING_IMAGE_SIZE_WIDTH(pTiming), + NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(pTiming)); + } + + if (NV_NVT_TIMING_HAS_IMAGE_SIZE(pTiming)) { + nvEvoLogInfoString(pInfoString, + " Image Size : %d mm x %d mm", + NV_NVT_TIMING_IMAGE_SIZE_WIDTH(pTiming), + NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(pTiming)); + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.rgb444.bpcs)) { + nvEvoLogInfoString(pInfoString, + " RGB 444 bpcs : %s", + GetColorDepthBpc(pTiming->etc.rgb444)); + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.yuv444.bpcs)) { + nvEvoLogInfoString(pInfoString, + " YUV 444 bpcs : %s", + GetColorDepthBpc(pTiming->etc.yuv444)); + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.yuv422.bpcs)) { + nvEvoLogInfoString(pInfoString, + " YUV 422 bpcs : %s", + GetColorDepthBpc(pTiming->etc.yuv422)); + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.yuv420.bpcs)) { + nvEvoLogInfoString(pInfoString, + " YUV 420 bpcs : %s", + GetColorDepthBpc(pTiming->etc.yuv420)); + } + } // i + } // k + + nvEvoLogInfoString(pInfoString, ""); + + done: + nvEvoLogInfoString(pInfoString, + "--- End of EDID for %s ---", pDpyEvo->name); + nvEvoLogInfoString(pInfoString, ""); +} + + + +/* + * Clear the EDID and related fields in the display device data + * structure. + */ + +static void ClearEdid(NVDpyEvoPtr pDpyEvo) +{ + NVEdidRec edid = { }; + NVEvoInfoStringRec infoString; + nvInitInfoString(&infoString, NULL, 0); + + if (EdidHasChanged(pDpyEvo, &edid, NULL)) { + ApplyNewEdid(pDpyEvo, &edid, NULL, &infoString); + } +} + + + +/* + * ClearCustomEdid() - send an empty custom EDID to RM; this is to + * clear out any stale state in RM about custom EDIDs that we may have + * told RM about previous runs of X. + */ + +static void ClearCustomEdid(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *setEdidParams; + + if (nvDpyEvoIsDPMST(pDpyEvo)) { + // RM doesn't track this device, so leave the EDID alone. + return; + } + + setEdidParams = nvCalloc(sizeof(*setEdidParams), 1); + if (setEdidParams == NULL) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to clear custom EDID for display device %s", + pDpyEvo->name); + return; + } + + setEdidParams->subDeviceInstance = pDispEvo->displayOwner; + setEdidParams->displayId = nvDpyEvoGetConnectorId(pDpyEvo); + setEdidParams->bufferSize = 0; + + /* ignore the NvRmControl() return value */ + + (void) nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2, + setEdidParams, sizeof(*setEdidParams)); + + nvFree(setEdidParams); +} // ClearCustomEdid() + + + +/* + * WriteEdidToResman() - send a custom EDID to RM. + */ + +static void WriteEdidToResman(const NVDpyEvoRec *pDpyEvo, + const NVEdidRec *pEdid) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *setEdidParams = NULL; + NvU32 status = NVOS_STATUS_ERROR_OPERATING_SYSTEM; + + if (pEdid->length > sizeof(setEdidParams->edidBuffer)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "EDID for display device %s is too long for NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2", + pDpyEvo->name); + goto done; + } + + setEdidParams = nvCalloc(sizeof(*setEdidParams), 1); + if (setEdidParams == NULL) { + goto done; + } + + setEdidParams->subDeviceInstance = pDispEvo->displayOwner; + setEdidParams->displayId = nvDpyEvoGetConnectorId(pDpyEvo); + nvkms_memcpy(&setEdidParams->edidBuffer, pEdid->buffer, pEdid->length); + setEdidParams->bufferSize = pEdid->length; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2, + setEdidParams, sizeof(*setEdidParams)); + +done: + if (status != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Failure processing EDID for display device " + "%s.", pDpyEvo->name); + } + + nvFree(setEdidParams); +} // WriteEdidToResman() + + +/* + * NvTiming_ParseEDIDInfo() will ignore some modes that are blatantly + * wrong, so we need to apply any patching to the EDID bytes before + * parsing the EDID. + */ +static void PrePatchEdid(const NVDpyEvoRec *pDpyEvo, NVEdidPtr pEdid, + NVEvoInfoStringPtr pInfoString) +{ + NvU8 *pEdidData = pEdid->buffer; + + if (pEdid->buffer == NULL || pEdid->length < 128) { + return; + } + + /* + * Work around bug 628240: some AUO flat panels have invalid + * native modes where HSyncEnd is larger than HTotal, putting the + * end of the sync pulse several columns into the active region of + * the next frame. AUO confirmed these corrected timings: + * + * "1366x768" 69.30 1366 1398 1422 1432 768 771 775 806 -hsync -vsync + */ + if (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP && + pEdidData[0x36] == 0x26 && + pEdidData[0x37] == 0x1b && + pEdidData[0x38] == 0x56 && + pEdidData[0x39] == 0x47 && + pEdidData[0x3a] == 0x50 && + pEdidData[0x3b] == 0x00 && + pEdidData[0x3c] == 0x26 && + pEdidData[0x3d] == 0x30 && + pEdidData[0x3e] == 0x30 && + pEdidData[0x3f] == 0x20 && + pEdidData[0x40] == 0x34 && + pEdidData[0x41] == 0x00 && + pEdidData[0x42] == 0x58 && + pEdidData[0x43] == 0xc1 && + pEdidData[0x44] == 0x10 && + pEdidData[0x45] == 0x00 && + pEdidData[0x46] == 0x00 && + pEdidData[0x47] == 0x18 && + pEdidData[0x7f] == 0x2e) { + + pEdidData[0x36] = 0x12; + pEdidData[0x37] = 0x1b; + pEdidData[0x38] = 0x56; + pEdidData[0x39] = 0x42; + pEdidData[0x3a] = 0x50; + pEdidData[0x3b] = 0x00; + pEdidData[0x3c] = 0x26; + pEdidData[0x3d] = 0x30; + pEdidData[0x3e] = 0x20; + pEdidData[0x3f] = 0x18; + pEdidData[0x40] = 0x34; + pEdidData[0x41] = 0x00; + pEdidData[0x42] = 0x58; + pEdidData[0x43] = 0xc1; + pEdidData[0x44] = 0x10; + pEdidData[0x45] = 0x00; + pEdidData[0x46] = 0x00; + pEdidData[0x47] = 0x18; + pEdidData[0x7f] = 0x5f; + + nvEvoLogInfoString(pInfoString, "Fixed invalid mode for 1366x768"); + } +} + +/* + * CreateParsedEdidFromNVT_TIMING() - Puts modetiming data from RM into an EDID format + */ +static void CreateParsedEdidFromNVT_TIMING( + NVT_TIMING *pTimings, + NvU8 bpc, + NVParsedEdidEvoPtr pParsedEdid) +{ + nvkms_memset(pParsedEdid, 0, sizeof(*pParsedEdid)); + pParsedEdid->info.total_timings = 1; + nvkms_memcpy(&pParsedEdid->info.timing[0], pTimings, sizeof(*pTimings)); + pParsedEdid->info.timing[0].etc.status = NVT_STATUS_CUST; + pParsedEdid->info.u.feature_ver_1_4_digital.continuous_frequency = FALSE; + pParsedEdid->info.version = NVT_EDID_VER_1_4; + pParsedEdid->info.input.isDigital = TRUE; + pParsedEdid->info.input.u.digital.bpc = bpc; + pParsedEdid->limits.min_h_rate_hz = 1; + pParsedEdid->limits.min_v_rate_hzx1k = 1; + pParsedEdid->limits.max_h_rate_hz = NV_U32_MAX; + pParsedEdid->limits.max_v_rate_hzx1k = NV_U32_MAX; + pParsedEdid->valid = TRUE; +} + +/* + * PatchAndParseEdid() - use the nvtiming library to parse the EDID data. The + * EDID data provided in the 'pEdid' argument may be patched or modified. + */ + +static void PatchAndParseEdid( + const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid, + NVParsedEdidEvoPtr pParsedEdid, + NVEvoInfoStringPtr pInfoString) +{ + int i; + NVT_STATUS status; + NvU32 edidSize; + + if (pEdid->buffer == NULL || pEdid->length == 0) { + return; + } + + nvkms_memset(pParsedEdid, 0, sizeof(*pParsedEdid)); + + PrePatchEdid(pDpyEvo, pEdid, pInfoString); + + /* parse the majority of information from the EDID */ + + status = NvTiming_ParseEDIDInfo(pEdid->buffer, pEdid->length, + &pParsedEdid->info); + + if (status != NVT_STATUS_SUCCESS) { + return; + } + + /* interpret the frequency range limits from the EDID */ + + NvTiming_CalculateEDIDLimits(&pParsedEdid->info, &pParsedEdid->limits); + + /* get the user-friendly monitor name */ + + NvTiming_GetMonitorName(&pParsedEdid->info, + (NvU8 *) &pParsedEdid->monitorName); + nvAssert(pParsedEdid->monitorName[0] != '\0'); + + /* find the serial number string */ + + pParsedEdid->serialNumberString[0] = '\0'; + + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) { + if (pParsedEdid->info.ldd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_DPSN) { + nvkms_strncpy( + pParsedEdid->serialNumberString, + (const char *)pParsedEdid->info.ldd[i].u.serial_number.str, + sizeof(pParsedEdid->serialNumberString)); + pParsedEdid->serialNumberString[ + sizeof(pParsedEdid->serialNumberString) - 1] = '\0'; + break; + } + } + + + for (i = 0; i < pParsedEdid->info.total_timings; i++) { + NVT_TIMING *pTiming = &pParsedEdid->info.timing[i]; + + /* patch up RRx1k for 640x480@60Hz */ + + if (IsEdid640x480_60_NVT_TIMING(pTiming)) { + pTiming->etc.rrx1k = 59940; + } + + /* + * Invalidate modes that require pixel repetition (i.e., modes + * that don't support Pixel Repetition 0). See bug 1459376. + */ + + nvAssert(pTiming->etc.rep != 0); + + if ((pTiming->etc.rep & NVBIT(0)) == 0) { + pTiming->etc.status = 0; + } + } + + pParsedEdid->valid = TRUE; + + /* resize the EDID buffer, if necessary */ + + edidSize = NVT_EDID_ACTUAL_SIZE(&pParsedEdid->info); + + if (edidSize < pEdid->length) { + NvU8 *pEdidData = nvAlloc(edidSize); + + if (pEdidData != NULL) { + nvkms_memcpy(pEdidData, pEdid->buffer, edidSize); + + nvFree(pEdid->buffer); + + pEdid->buffer = pEdidData; + pEdid->length = edidSize; + } + } +} + + +/*! + * Assign NVDpyEvoRec::name. + * + * The name has the form: + * + * "edidName (typeName-N.dpAddress)" + * + * If edidName is unavailable, then it, and the parentheses are omitted: + * + * "typeName-N.dpAddress" + * "typeName-N" + * + * if dpAddress is unavailable, then the ".dpAddress" is omitted: + * + * "edidName (typeName-N)" + * "typeName-N" + */ +static void AssignDpyEvoName(NVDpyEvoPtr pDpyEvo) +{ + const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + const char *edidName = ""; + const char *openParen = ""; + const char *closeParen = ""; + const char *dpAddress = ""; + const char *dpAddressSeparator = ""; + + if (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.monitorName[0] != '\0') { + edidName = pDpyEvo->parsedEdid.monitorName; + openParen = " ("; + closeParen = ")"; + } + + if (pDpyEvo->dp.addressString != NULL) { + dpAddress = pDpyEvo->dp.addressString; + dpAddressSeparator = "."; + } + + nvkms_snprintf(pDpyEvo->name, sizeof(pDpyEvo->name), + "%s%s%s%s%s%s", + edidName, + openParen, + pConnectorEvo->name, + dpAddressSeparator, + dpAddress, + closeParen); + + pDpyEvo->name[sizeof(pDpyEvo->name) - 1] = '\0'; +} + +enum NvKmsDpyAttributeDigitalSignalValue +nvGetDefaultDpyAttributeDigitalSignalValue(const NVConnectorEvoRec *pConnectorEvo) +{ + enum NvKmsDpyAttributeDigitalSignalValue signal = + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_LVDS; + + if (pConnectorEvo->legacyType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + signal = NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_DISPLAYPORT; + } else { + nvAssert((pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) || + (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_DSI)); + + if (pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM) { + signal = NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_LVDS; + } else if (pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI) { + signal = NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_DSI; + } else { + // May be later changed to HDMI_FRL at modeset time. + signal = NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_TMDS; + } + } + } + + return signal; +} + +NVDpyEvoPtr nvAllocDpyEvo(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo, + NVDpyId dpyId, const char *dpAddress) +{ + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = nvCalloc(1, sizeof(*pDpyEvo)); + + if (pDpyEvo == NULL) { + return NULL; + } + + pDpyEvo->pDispEvo = pDispEvo; + pDpyEvo->pConnectorEvo = pConnectorEvo; + pDpyEvo->apiHead = NV_INVALID_HEAD; + pDpyEvo->id = dpyId; + + nvListAdd(&pDpyEvo->dpyListEntry, &pDispEvo->dpyList); + + if (dpAddress) { + pDpyEvo->dp.addressString = nvStrDup(dpAddress); + pDispEvo->displayPortMSTIds = + nvAddDpyIdToDpyIdList(dpyId, pDispEvo->displayPortMSTIds); + + if (!nvConnectorIsDPSerializer(pConnectorEvo)) { + pDispEvo->dynamicDpyIds = + nvAddDpyIdToDpyIdList(dpyId, pDispEvo->dynamicDpyIds); + } + } + + AssignDpyEvoName(pDpyEvo); + + nvDpyProbeMaxPixelClock(pDpyEvo); + + pDpyEvo->requestedDithering.state = + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO; + pDpyEvo->requestedDithering.mode = + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO; + pDpyEvo->requestedDithering.depth = + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO; + + // Initialize DP link rate and lane count to sane values. + // This is normally done in nvDPLibUpdateDpyLinkConfiguration, + // but do it here as well in case we query flat panel properties for + // screenless DP devices. + if (nvConnectorUsesDPLib(pConnectorEvo)) { + pDpyEvo->dp.linkRate = 0; + pDpyEvo->dp.laneCount = NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1; + pDpyEvo->dp.connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN; + pDpyEvo->dp.sinkIsAudioCapable = FALSE; + } + + pDpyEvo->requestedColorSpace = + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB; + pDpyEvo->requestedColorRange = + NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL; + + pDpyEvo->currentAttributes = NV_EVO_DEFAULT_ATTRIBUTES_SET; + pDpyEvo->currentAttributes.digitalSignal = + nvGetDefaultDpyAttributeDigitalSignalValue(pConnectorEvo); + + DpyGetStaticDfpProperties(pDpyEvo); + + return pDpyEvo; +} + + +void nvFreeDpyEvo(NVDispEvoPtr pDispEvo, NVDpyEvoPtr pDpyEvo) +{ + DpyDisconnectEvo(pDpyEvo); + + // Let the DP library host implementation handle deleting a pDpy as if the + // library had notified it of a lost device. + nvDPDpyFree(pDpyEvo); + nvAssert(!pDpyEvo->dp.pDpLibDevice); + + pDispEvo->validDisplays = + nvDpyIdListMinusDpyId(pDispEvo->validDisplays, pDpyEvo->id); + + pDispEvo->displayPortMSTIds = + nvDpyIdListMinusDpyId(pDispEvo->displayPortMSTIds, pDpyEvo->id); + pDispEvo->dynamicDpyIds = + nvDpyIdListMinusDpyId(pDispEvo->dynamicDpyIds, pDpyEvo->id); + + nvListDel(&pDpyEvo->dpyListEntry); + + nvFree(pDpyEvo->dp.addressString); + nvFree(pDpyEvo); +} + + +/*! + * Return the pConnectorEvo associated with the given (static) display ID. + * + * XXX[DP] not valid for DP monitors, the connector will be known before + * initialization so this will not be needed. + * + * \param[in] pDisp The pDisp on which to search for the pConnector. + * \param[in] dpyId The ID of the connector to search for. + * + * \return The pConnectorEvo from pDisp that matches the ID, or NULL if + * no connector is found. + */ +NVConnectorEvoPtr nvGetConnectorFromDisp(NVDispEvoPtr pDispEvo, NVDpyId dpyId) +{ + NVConnectorEvoPtr pConnectorEvo; + + nvAssert(nvDpyIdIsInDpyIdList(dpyId, pDispEvo->connectorIds)); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (nvDpyIdsAreEqual(dpyId, pConnectorEvo->displayId)) { + return pConnectorEvo; + } + } + + nvAssert(!"Failed to find pDpy's connector!"); + return NULL; +} + +/* + * Construct the DP 1.3 YUV420 infoframe, and toggle it on or off based on + * whether or not YUV420 mode is in use. + */ +static void UpdateDpInfoFrames(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVAttributesSetEvoRec *pAttributesSet) +{ + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + + if (pAttributesSet->colorSpace == + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420) { + + // DPSDP_DP_VSC_SDP_DESCRIPTOR has a (dataSize, hb, db) layout, while + // NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS.aPacket needs to contain + // (hb, db) without dataSize, so this makes sdp->hb align with aPacket. + DPSDP_DP_VSC_SDP_DESCRIPTOR *sdp = + (DPSDP_DP_VSC_SDP_DESCRIPTOR *)(params.aPacket - + offsetof(DPSDP_DP_VSC_SDP_DESCRIPTOR, hb)); + + nvAssert((void *)&sdp->hb == (void *)params.aPacket); + + // Header + // Per DP1.3 spec + sdp->hb.hb0 = 0; + sdp->hb.hb1 = SDP_PACKET_TYPE_VSC; + sdp->hb.revisionNumber = SDP_VSC_REVNUM_STEREO_PSR2_COLOR; + sdp->hb.numValidDataBytes = SDP_VSC_VALID_DATA_BYTES_PSR2_COLOR; + + sdp->db.stereoInterface = 0; + sdp->db.psrState = 0; + sdp->db.contentType = SDP_VSC_CONTENT_TYPE_GRAPHICS; + sdp->db.pixEncoding = SDP_VSC_PIX_ENC_YCBCR420; + sdp->db.colorimetryFormat = SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT709; + + switch (pTimings->pixelDepth) { + case NVKMS_PIXEL_DEPTH_30_444: + sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_YCBCR_10BPC; + break; + case NVKMS_PIXEL_DEPTH_24_444: + sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_YCBCR_8BPC; + break; + default: + nvAssert(!"Invalid pixelDepth value"); + break; + } + + switch (pAttributesSet->colorRange) { + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL: + sdp->db.dynamicRange = SDP_VSC_DYNAMIC_RANGE_VESA; + break; + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED: + sdp->db.dynamicRange = SDP_VSC_DYNAMIC_RANGE_CEA; + break; + default: + nvAssert(!"Invalid colorRange value"); + break; + } + + params.packetSize = sizeof(sdp->hb) + sdp->hb.numValidDataBytes; + + params.transmitControl = + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ENABLE, _YES) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _OTHER_FRAME, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _SINGLE_FRAME, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ON_HBLANK, _DISABLE); + } else { + params.transmitControl = + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ENABLE, _NO); + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET failed"); + } +} + +void nvUpdateInfoFrames(NVDpyEvoRec *pDpyEvo) +{ + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + const NVDispApiHeadStateEvoRec *pApiHeadState; + NvU32 head; + + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + return; + } + pApiHeadState = &pDispEvo->apiHeadState[pDpyEvo->apiHead]; + + nvAssert((pApiHeadState->hwHeadsMask) != 0x0 && + (nvDpyIdIsInDpyIdList(pDpyEvo->id, pApiHeadState->activeDpys))); + + head = nvGetPrimaryHwHead(pDispEvo, pDpyEvo->apiHead); + + nvAssert(head != NV_INVALID_HEAD); + + if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + UpdateDpInfoFrames(pDispEvo, head, &pApiHeadState->attributes); + } else { + nvUpdateHdmiInfoFrames(pDispEvo, + head, + &pApiHeadState->attributes, + &pApiHeadState->infoFrame, + pDpyEvo); + } +} + +/*! + * nvDpyRequiresDualLinkEvo() - Returns whether or not the given mode exceeds + * the maximum single TMDS link pixel clock. + * + * \param[in] pDpyEvo display to check the maximum single link pixel clock + * + * \param[in] pTimings mode timings to check pixel clock + * + * \return TRUE if pixel clock exceeds display's maximum single link pixel + * clock + */ +NvBool nvDpyRequiresDualLinkEvo(const NVDpyEvoRec *pDpyEvo, + const NVHwModeTimingsEvo *pTimings) +{ + // Dual link HDMI is not possible. + nvAssert(!(nvDpyIsHdmiEvo(pDpyEvo) && + (pTimings->pixelClock > pDpyEvo->maxSingleLinkPixelClockKHz))); + return (pTimings->pixelClock > pDpyEvo->maxSingleLinkPixelClockKHz); +} + +/*! + * Return the NVDpyEvoPtr that corresponds to the given dpyId, on the + * given NVDispEvoPtr, or NULL if no matching NVDpyEvoPtr can be + * found. + */ +NVDpyEvoPtr nvGetDpyEvoFromDispEvo(const NVDispEvoRec *pDispEvo, NVDpyId dpyId) +{ + NVDpyEvoPtr pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, nvAddDpyIdToEmptyDpyIdList(dpyId), pDispEvo) { + return pDpyEvo; + } + + return NULL; +} + +/* + * Find or create a pDpy with a given root connector and topology path. + */ +NVDpyEvoPtr nvGetDPMSTDpyEvo(NVConnectorEvoPtr pConnectorEvo, + const char *address, NvBool *pDynamicDpyCreated) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDpyEvoPtr pDpyEvo = NULL, pTmpDpyEvo; + NVDpyId dpyId; + + // Look for a pDpyEvo on pConnectorEvo whose dp address matches. + FOR_ALL_EVO_DPYS(pTmpDpyEvo, pDispEvo->validDisplays, pDispEvo) { + if (pTmpDpyEvo->pConnectorEvo != pConnectorEvo) { + continue; + } + if (pTmpDpyEvo->dp.addressString == NULL) { + continue; + } + if (nvkms_strcmp(pTmpDpyEvo->dp.addressString, address) == 0) { + pDpyEvo = pTmpDpyEvo; + goto done; + } + } + + // Find a display ID that is not used on this GPU. + dpyId = nvNewDpyId(pDispEvo->validDisplays); + if (nvDpyIdIsInvalid(dpyId)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to allocate a display ID for device %s.%s", + pConnectorEvo->name, + address); + goto done; + } + + // Create a new pDpy for this address. + pDpyEvo = nvAllocDpyEvo(pDispEvo, pConnectorEvo, dpyId, address); + if (pDpyEvo == NULL) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to create a display device object for %s-%u.%s", + NvKmsConnectorTypeString(pConnectorEvo->type), + pConnectorEvo->typeIndex, + address); + goto done; + } + + pDispEvo->validDisplays = + nvAddDpyIdToDpyIdList(dpyId, pDispEvo->validDisplays); + + *pDynamicDpyCreated = TRUE; + +done: + return pDpyEvo; +} + +/*! + * Return a string with a comma-separated list of dpy names, for all + * dpys in dpyIdList. + * + * If there are no dpys in the dpyIdList, return "none". + * + * The string is dynamically allocated and should be freed by the caller. + * + * Return NULL if an allocation failure occurs. + */ +char *nvGetDpyIdListStringEvo(NVDispEvoPtr pDispEvo, + const NVDpyIdList dpyIdList) +{ + NVDpyEvoPtr pDpyEvo; + char *listString = NULL; + NvU32 lengths[NV_DPY_ID_MAX_DPYS_IN_LIST]; + NvU32 totalLength = 0; + NvU32 currentOffset; + NvU32 index; + + index = 0; + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + + nvAssert(index < ARRAY_LEN(lengths)); + + lengths[index] = nvkms_strlen(pDpyEvo->name); + + totalLength += lengths[index]; + + if (index != 0) { + totalLength += 2; /* nvkms_strlen(", ") */ + } + + index++; + } + + totalLength += 1; /* for nul terminator */ + + if (index == 0) { + return nvStrDup("none"); + } + + listString = nvAlloc(totalLength); + + if (listString == NULL) { + return NULL; + } + + index = 0; + currentOffset = 0; + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + + if (index != 0) { + listString[currentOffset] = ','; + listString[currentOffset+1] = ' '; + currentOffset += 2; + } + + nvkms_memcpy(listString + currentOffset, pDpyEvo->name, lengths[index]); + + currentOffset += lengths[index]; + + index++; + } + + listString[currentOffset] = '\0'; + currentOffset += 1; + + nvAssert(currentOffset == totalLength); + + return listString; +} + +NvBool nvDpyGetDynamicData( + NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + const struct NvKmsQueryDpyDynamicDataRequest *pRequest = &pParams->request; + struct NvKmsQueryDpyDynamicDataReply *pReply = &pParams->reply; + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + NVDpyIdList connectedList; + NVDpyIdList oneDpyIdList = nvAddDpyIdToEmptyDpyIdList(pDpyEvo->id); + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + /* + * Check for the connection state of the dpy. + * + * For DP MST, we need to honor the current DPlib state; if a DP + * MST monitor is physically connected but forceDisconnected, its + * hotplug events won't get serviced and DPlib will complain + * loudly. This doesn't apply to DP serializer (which is not managed + * by DPLib) since we don't need to do any topology/branch detection, + * and we can honor force{Connected,Disconnected} in MST & SST mode. + * + * Otherwise, allow the client to override detection. + * + * Otherwise, honor the current DPlib state. + * + * If we're using a DP serializer connector in MST mode, don't expose any + * SST displays as connected. In all other cases, assume that everything + * is connected since the serializer connector has a fixed topology. + * + * Lastly, call RM to check if the dpy is connected. + */ + + if (nvDpyEvoIsDPMST(pDpyEvo) && + nvConnectorUsesDPLib(pConnectorEvo)) { + /* honor DP MST connectedness */ + connectedList = nvDPLibDpyIsConnected(pDpyEvo) ? + oneDpyIdList : nvEmptyDpyIdList(); + } else if (pRequest->forceConnected) { + connectedList = oneDpyIdList; + } else if (pRequest->forceDisconnected) { + connectedList = nvEmptyDpyIdList(); + } else if (nvConnectorUsesDPLib(pConnectorEvo)) { + connectedList = nvDPLibDpyIsConnected(pDpyEvo) ? + oneDpyIdList : nvEmptyDpyIdList(); + } else if (nvConnectorIsDPSerializer(pConnectorEvo)) { + if (pConnectorEvo->dpSerializerCaps.supportsMST && + !nvDpyEvoIsDPMST(pDpyEvo)) { + connectedList = nvEmptyDpyIdList(); + } else { + connectedList = oneDpyIdList; + } + } else { + connectedList = nvRmGetConnectedDpys(pDispEvo, oneDpyIdList); + } + + pDpyEvo->dp.inbandStereoSignaling = pRequest->dpInbandStereoSignaling; + + /* + * XXX NVKMS TODO: once NVKMS is in the kernel and + * nvAllocCoreChannelEvo() is guaranteed to happen before + * nvDpyGetDynamicData(), pass allowDVISpecPClkOverride through to + * nvDpyProbeMaxPixelClock() rather than cache it. + */ + pDpyEvo->allowDVISpecPClkOverride = pRequest->allowDVISpecPClkOverride; + + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, connectedList)) { + if (!DpyConnectEvo(pDpyEvo, pParams)) { + return FALSE; + } + } else { + DpyDisconnectEvo(pDpyEvo); + } + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPLibUpdateDpyLinkConfiguration(pDpyEvo); + } + + ct_assert(sizeof(pDpyEvo->name) == sizeof(pReply->name)); + + nvkms_memcpy(pReply->name, pDpyEvo->name, sizeof(pDpyEvo->name)); + + if (pDpyEvo->parsedEdid.valid) { + pReply->physicalDimensions.heightInCM = + pDpyEvo->parsedEdid.info.screen_size_y; + pReply->physicalDimensions.widthInCM = + pDpyEvo->parsedEdid.info.screen_size_x; + } + + /* + * XXX NVKMS until NVKMS is in the kernel and + * nvAllocCoreChannelEvo() is guaranteed to happen before + * nvDpyGetDynamicData(), pDpyEvo->maxPixelClockKHz could change + * later after the assignment here. + */ + pReply->maxPixelClockKHz = pDpyEvo->maxPixelClockKHz; + + pReply->connected = + nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->connectedDisplays); + + pReply->isVirtualRealityHeadMountedDisplay = pDpyEvo->isVrHmd; + + pReply->vrrType = pDpyEvo->vrr.type; + + pReply->stereo3DVision.supported = pDpyEvo->stereo3DVision.supported; + pReply->stereo3DVision.isDLP = pDpyEvo->stereo3DVision.isDLP; + pReply->stereo3DVision.isAegis = pDpyEvo->stereo3DVision.isAegis; + pReply->stereo3DVision.subType = pDpyEvo->stereo3DVision.subType; + + pReply->dp.guid.valid = pDpyEvo->dp.guid.valid; + + ct_assert(sizeof(pReply->dp.guid.buffer) == + sizeof(pDpyEvo->dp.guid.buffer)); + nvkms_memcpy(pReply->dp.guid.buffer, pDpyEvo->dp.guid.buffer, + sizeof(pDpyEvo->dp.guid.buffer)); + + ct_assert(sizeof(pReply->dp.guid.str) == sizeof(pDpyEvo->dp.guid.str)); + nvkms_memcpy(pReply->dp.guid.str, pDpyEvo->dp.guid.str, + sizeof(pDpyEvo->dp.guid.str)); + + if (pDpyEvo->edid.length > sizeof(pReply->edid.buffer)) { + nvAssert(!"EDID larger than can be returned in NVKMS API"); + return FALSE; + } + + if (pDpyEvo->edid.length > 0) { + pReply->edid.bufferSize = pDpyEvo->edid.length; + nvkms_memcpy(pReply->edid.buffer, pDpyEvo->edid.buffer, pDpyEvo->edid.length); + } + + return TRUE; +} + +void nvDpyUpdateCurrentAttributes(NVDpyEvoRec *pDpyEvo) +{ + NVAttributesSetEvoRec newAttributes = pDpyEvo->currentAttributes; + + if (pDpyEvo->apiHead != NV_INVALID_HEAD) { + newAttributes = + pDpyEvo->pDispEvo->apiHeadState[pDpyEvo->apiHead].attributes; + } else { + newAttributes.dithering.enabled = FALSE; + newAttributes.dithering.depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE; + newAttributes.dithering.mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE; + newAttributes.digitalSignal = + nvGetDefaultDpyAttributeDigitalSignalValue(pDpyEvo->pConnectorEvo); + } + + if (newAttributes.colorSpace != + pDpyEvo->currentAttributes.colorSpace) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE, + newAttributes.colorSpace); + } + + if (newAttributes.colorRange != + pDpyEvo->currentAttributes.colorRange) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_RANGE, + newAttributes.colorRange); + } + + if (newAttributes.dithering.enabled != + pDpyEvo->currentAttributes.dithering.enabled) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING, + newAttributes.dithering.enabled); + } + + if (newAttributes.dithering.depth != + pDpyEvo->currentAttributes.dithering.depth) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH, + newAttributes.dithering.depth); + } + + if (newAttributes.dithering.mode != + pDpyEvo->currentAttributes.dithering.mode) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE, + newAttributes.dithering.mode); + } + + if (newAttributes.imageSharpening.available != + pDpyEvo->currentAttributes.imageSharpening.available) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_AVAILABLE, + newAttributes.imageSharpening.available); + } + + if (newAttributes.digitalSignal != + pDpyEvo->currentAttributes.digitalSignal) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL, + newAttributes.digitalSignal); + } + + pDpyEvo->currentAttributes = newAttributes; +} + +// Returns TRUE if this display is capable of Adaptive-Sync +NvBool nvDpyIsAdaptiveSync(const NVDpyEvoRec *pDpyEvo) +{ + return FALSE; +} + +// Returns TRUE if this display is in the Adaptive-Sync defaultlist +NvBool nvDpyIsAdaptiveSyncDefaultlisted(const NVParsedEdidEvoRec *pParsedEdid) +{ + return FALSE; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-event.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-event.c new file mode 100644 index 0000000..a2285ba --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-event.c @@ -0,0 +1,207 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvos.h" +#include "dp/nvdp-connector.h" +#include "nvkms-event.h" +#include "nvkms-rm.h" +#include "nvkms-types.h" +#include "nvkms-dpy.h" +#include "nvkms-rmapi.h" +#include "nvkms-utils.h" +#include "nvkms-private.h" +#include "nvkms-evo.h" + +/* + * Handle a display device hotplug event. + * + * What "hotplug" means is unclear, but it could mean any of the following: + * - A display device is plugged in. + * - A display device is unlugged. + * - A display device was unplugged and then plugged back in. + * - A display device was plugged in and then unplugged. + * - An already connected display device is turned on. + * - An already connected display device is turned off. + * - A DisplayPort device needs its link status and RX Capabilities fields + * read and may need to be retrained ("long" hotplug event, > 2ms). + * + * DisplayPort "short" hotplug events, which are between 0.25ms and 2ms, are + * handled separately by nvHandleDPIRQEventDeferredWork below. + */ + +void +nvHandleHotplugEventDeferredWork(void *dataPtr, NvU32 dataU32) +{ + NVDispEvoPtr pDispEvo = dataPtr; + NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS hotplugParams = { 0 }; + NvU32 ret; + NVDpyIdList hotplugged, unplugged, tmpUnplugged, changed; + NVDpyIdList connectedDisplays; + NVDpyEvoPtr pDpyEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + // Get the hotplug state. + hotplugParams.subDeviceInstance = pDispEvo->displayOwner; + + if ((ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE, + &hotplugParams, + sizeof(hotplugParams))) + != NVOS_STATUS_SUCCESS) { + + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, "Failed to determine which " + "devices were hotplugged: 0x%x\n", ret); + return; + } + + /* + * Work around an RM bug in hotplug notification when the GPU is in + * GC6. In this case, the RM will notify us of a hotplug event, but + * NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE returns both + * hotPlugMask and hotUnplugMask as 0. + * Bug 200528641 tracks finding a root cause. Until that bug is + * fixed, call NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE to get the + * full list of connected dpys and construct hotplugged and + * unplugged lists from that if we encounter this case. + */ + if ((hotplugParams.hotPlugMask == 0) && + (hotplugParams.hotUnplugMask == 0)) { + const NVDpyIdList updatedDisplayList = nvRmGetConnectedDpys(pDispEvo, + pDispEvo->connectorIds); + hotplugged = nvDpyIdListMinusDpyIdList(updatedDisplayList, + pDispEvo->connectedDisplays); + unplugged = nvDpyIdListMinusDpyIdList(pDispEvo->connectedDisplays, + updatedDisplayList); + } else { + hotplugged = nvNvU32ToDpyIdList(hotplugParams.hotPlugMask); + unplugged = nvNvU32ToDpyIdList(hotplugParams.hotUnplugMask); + } + + // The RM only reports the latest plug/unplug status of each dpy. + nvAssert(nvDpyIdListIsEmpty(nvIntersectDpyIdListAndDpyIdList(hotplugged, + unplugged))); + nvAssert(nvDpyIdListIsASubSetofDpyIdList(hotplugged, + pDispEvo->connectorIds)); + nvAssert(nvDpyIdListIsASubSetofDpyIdList(unplugged, + pDispEvo->connectorIds)); + + connectedDisplays = pDispEvo->connectedDisplays; + + // Ignore non-DP devices that were reported as unplugged while already + // disconnected. + tmpUnplugged = nvEmptyDpyIdList(); + FOR_ALL_EVO_DPYS(pDpyEvo, unplugged, pDispEvo) { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo) || + nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedDisplays)) { + + tmpUnplugged = + nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, tmpUnplugged); + } + } + unplugged = tmpUnplugged; + + // Non-DP devices that were disconnected and connected again should generate an + // unplug / plug pair. + FOR_ALL_EVO_DPYS(pDpyEvo, hotplugged, pDispEvo) { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (!nvConnectorUsesDPLib(pConnectorEvo) && + nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedDisplays)) { + + unplugged = nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, unplugged); + } + } + +#if defined(DEBUG) + if (!nvDpyIdListIsEmpty(hotplugged)) { + char *str = nvGetDpyIdListStringEvo(pDispEvo, hotplugged); + nvEvoLogDispDebug(pDispEvo, EVO_LOG_INFO, + "Received display hotplug event: %s", + nvSafeString(str, "unknown")); + nvFree(str); + } + if (!nvDpyIdListIsEmpty(unplugged)) { + char *str = nvGetDpyIdListStringEvo(pDispEvo, unplugged); + nvEvoLogDispDebug(pDispEvo, EVO_LOG_INFO, + "Received display unplug event: %s", + nvSafeString(str, "unknown")); + nvFree(str); + } +#endif /* DEBUG */ + + // First, the OR configuration of the connector should not change, but + // re-query it to make sure. + changed = nvAddDpyIdListToDpyIdList(hotplugged, unplugged); + FOR_ALL_EVO_DPYS(pDpyEvo, changed, pDispEvo) { + nvRmGetConnectorORInfo(pDpyEvo->pConnectorEvo, TRUE); + } + + // Next, disconnect devices that are in the unplug mask. + FOR_ALL_EVO_DPYS(pDpyEvo, unplugged, pDispEvo) { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPNotifyLongPulse(pConnectorEvo, FALSE); + } else { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + } + } + + // Finally, connect devices that are in the plug mask. + FOR_ALL_EVO_DPYS(pDpyEvo, hotplugged, pDispEvo) { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPNotifyLongPulse(pConnectorEvo, TRUE); + } else { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + } + } +} + +void +nvHandleDPIRQEventDeferredWork(void *dataPtr, NvU32 dataU32) +{ + NVDispEvoPtr pDispEvo = dataPtr; + + // XXX[AGP]: ReceiveDPIRQEvent throws away the DisplayID of the device that + // caused the event, so for now we have to poll all of the connected DP + // devices to see which ones need attention. When RM is fixed, this can be + // improved. + + NVConnectorEvoPtr pConnectorEvo; + + // Notify all connectors which are using DP lib. For DP Serializer connector, + // HPD_IRQ indicates loss of clock/sync, so re-train the link. + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPNotifyShortPulse(pConnectorEvo->pDpLibConnector); + } else if (nvConnectorIsDPSerializer(pConnectorEvo)) { + nvDPSerializerHandleDPIRQ(pDispEvo, pConnectorEvo); + } + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo.c new file mode 100644 index 0000000..aa58608 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo.c @@ -0,0 +1,7370 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" + +#include "nvkms-evo-states.h" +#include "dp/nvdp-connector.h" +#include "nvkms-console-restore.h" +#include "nvkms-rm.h" +#include "nvkms-dpy.h" +#include "nvkms-cursor.h" +#include "nvkms-hal.h" +#include "nvkms-hdmi.h" +#include "nvkms-modepool.h" +#include "nvkms-evo.h" +#include "nvkms-flip.h" +#include "nvkms-dma.h" +#include "nvkms-framelock.h" +#include "nvkms-utils.h" +#include "nvkms-lut.h" +#include "nvkms-modeset.h" +#include "nvkms-prealloc.h" +#include "nvkms-rmapi.h" +#include "nvkms-surface.h" +#include "nvkms-vrr.h" +#include "nvkms-ioctl.h" + +#include "nvctassert.h" + +#include // NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS +#include // NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH +#include // NV0080_CTRL_CMD_GPU_* +#include // NV0080_CTRL_OS_UNIX_VT_SWITCH_* +#include // NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_* +#include // NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS +#include // NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2 +#include // NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE +#include // NV0073_CTRL_DP_CTRL + +#include "nvkms.h" +#include "nvkms-private.h" +#include "nvos.h" + +#include "displayport/dpcd.h" + +#define EVO_RASTER_LOCK 1 +#define EVO_FLIP_LOCK 2 + +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_HEAD 7:0 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT 8:8 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT_DISABLE 0 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT_ENABLE 1 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT 9:9 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT_DISABLE 0 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT_ENABLE 1 + +/* + * This struct is used to describe a single set of GPUs to lock together by + * GetRasterLockTopologies(). + * It is initialized to pDispEvoOrder[i] == NULL, and when filled in NULL is + * used as a terminator. + */ +typedef struct { + NVDispEvoPtr pDispEvoOrder[NVKMS_MAX_SUBDEVICES]; +} RasterLockTopology; + + +static void EvoSetViewportPointIn(NVDispEvoPtr pDispEvo, NvU32 head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState); +static void GetRasterLockPin(NVDispEvoPtr pDispEvo0, NvU32 head0, + NVDispEvoPtr pDispEvo1, NvU32 head1, + NVEvoLockPin *serverPin, NVEvoLockPin *clientPin); +static NvBool EvoWaitForLock(NVDevEvoPtr pDevEvo, + NvU32 sd, NvU32 head, NvU32 type); +static void EvoUpdateHeadParams(const NVDispEvoRec *pDispEvo, NvU32 head, + NVEvoUpdateState *updateState); + +static void SetRefClk(NVDevEvoPtr pDevEvo, + NvU32 sd, NvU32 head, NvBool external, + NVEvoUpdateState *updateState); +static void UnlockRasterLockGroup(NVDevEvoPtr pDevEvo); +static NvBool ApplyLockActionIfPossible(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action); +static void FinishModesetOneTopology(RasterLockTopology *topo); + +static void SyncEvoLockState(void); +static void UpdateEvoLockState(void); + +static void ScheduleLutUpdate(NVDispEvoRec *pDispEvo, + const NvU32 head, const NvU32 data, + const NvU64 usec); + +NVEvoGlobal nvEvoGlobal = { + .clientHandle = 0, + .frameLockList = NV_LIST_INIT(&nvEvoGlobal.frameLockList), + .devList = NV_LIST_INIT(&nvEvoGlobal.devList), +#if defined(DEBUG) + .debugMemoryAllocationList = + NV_LIST_INIT(&nvEvoGlobal.debugMemoryAllocationList), +#endif /* DEBUG */ +}; + +/* + * The dummy infoString should be used in paths that take an + * NVEvoInfoStringPtr where we don't need to log to a + * string. By setting the 's' field to NULL, nothing will be printed + * to the infoString buffer. + */ +NVEvoInfoStringRec dummyInfoString = { + .length = 0, + .totalLength = 0, + .s = NULL, +}; + +/*! + * Return the NVDevEvoPtr, if any, that matches deviceId. + */ +NVDevEvoPtr nvFindDevEvoByDeviceId(NvU32 deviceId) +{ + NVDevEvoPtr pDevEvo; + + FOR_ALL_EVO_DEVS(pDevEvo) { + if (pDevEvo->usesTegraDevice && + (deviceId == NVKMS_DEVICE_ID_TEGRA)) { + return pDevEvo; + } else if (pDevEvo->deviceId == deviceId) { + return pDevEvo; + } + }; + + return NULL; +} + +/*! + * Find the first unused gpuLogIndex. + */ +NvU8 nvGetGpuLogIndex(void) +{ + NVDevEvoPtr pDevEvo; + NvU8 gpuLogIndex = 0; + + tryAgain: + FOR_ALL_EVO_DEVS(pDevEvo) { + NvU32 sd; + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (pDevEvo->pSubDevices[sd] == NULL) { + continue; + } + if (gpuLogIndex == pDevEvo->pSubDevices[sd]->gpuLogIndex) { + gpuLogIndex++; + if (gpuLogIndex == 0xFF) { + nvAssert(!"Too many GPUs"); + return NV_INVALID_GPU_LOG_INDEX; + } + goto tryAgain; + } + } + } + + return gpuLogIndex; +} + +/*! + * Return whether there are active heads on this pDispEvo. + */ +static NvBool HasActiveHeads(NVDispEvoPtr pDispEvo) +{ + return nvGetActiveHeadMask(pDispEvo) != 0; +} + +static void BlankHeadEvo(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + struct NvKmsCompositionParams emptyCursorCompParams = { }; + + /* + * If core channel surface is supported, ->SetSurface() + * disables Lut along with core channel surface. Otherwise need to disable + * Lut explicitly. + */ + if (!pDevEvo->hal->caps.supportsCoreChannelSurface) { + pDevEvo->hal->SetLUTContextDma(pDispEvo, + head, + NULL /* pSurfEvo */, + FALSE /* baseLutEnabled */, + FALSE /* outputLutEnabled */, + updateState, + pHeadState->bypassComposition); + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + + pDevEvo->hal->SetCursorImage(pDevEvo, + head, + NULL /* pSurfaceEvo */, + updateState, + &emptyCursorCompParams); + + { + NVFlipChannelEvoHwState hwState = { { 0 } }; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + pDevEvo->hal->Flip(pDevEvo, + pDevEvo->head[head].layer[layer], + &hwState, + updateState, + FALSE /* bypassComposition */); + } + } + + nvPopEvoSubDevMask(pDevEvo); +} + +void nvEvoDetachConnector(NVConnectorEvoRec *pConnectorEvo, const NvU32 head, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 orIndex = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + + nvAssert(orIndex != NV_INVALID_OR); + nvAssert(pConnectorEvo->or.ownerHeadMask[orIndex] & NVBIT(head)); + + pConnectorEvo->or.ownerHeadMask[orIndex] &= ~NVBIT(head); + + /* Disable the palette, cursor, and ISO ctxDma on this head. */ + BlankHeadEvo(pDispEvo, head, updateState); + + // Only tear down the actual output for SLI primary. + nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner); + + pDevEvo->hal->ORSetControl(pDevEvo, + pConnectorEvo, + pTimings->protocol, + orIndex, + pConnectorEvo->or.ownerHeadMask[orIndex], + updateState); + + /* + * Tell RM that there is no DisplayID is associated with this head anymore. + */ + pDevEvo->hal->HeadSetDisplayId(pDevEvo, head, 0x0, updateState); + + nvPopEvoSubDevMask(pDevEvo); + + pModesetUpdateState->connectorIds = + nvAddDpyIdToDpyIdList(pHeadState->pConnectorEvo->displayId, + pModesetUpdateState->connectorIds); +} + +void nvEvoAttachConnector(NVConnectorEvoRec *pConnectorEvo, + const NvU32 head, + NVDPLibModesetStatePtr pDpLibModesetState, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 orIndex = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + NvU32 i; + + nvAssert(orIndex != NV_INVALID_OR); + nvAssert(!(pConnectorEvo->or.ownerHeadMask[orIndex] & NVBIT(head))); + nvAssert(pHeadState->activeRmId != 0); + + FOR_EACH_INDEX_IN_MASK(32, i, pConnectorEvo->or.ownerHeadMask[orIndex]) { + nvAssert(pTimings->protocol == + pDispEvo->headState[i].timings.protocol); + } FOR_EACH_INDEX_IN_MASK_END; + + pConnectorEvo->or.ownerHeadMask[orIndex] |= NVBIT(head); + + // Only set up the actual output for SLI primary. + nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner); + + pDevEvo->hal->ORSetControl(pDevEvo, + pConnectorEvo, + pTimings->protocol, + orIndex, + pConnectorEvo->or.ownerHeadMask[orIndex], + updateState); + + + /* Tell RM which DisplayID is associated with the head. */ + pDevEvo->hal->HeadSetDisplayId(pDevEvo, + head, pHeadState->activeRmId, + updateState); + + nvPopEvoSubDevMask(pDevEvo); + + pModesetUpdateState->connectorIds = + nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, + pModesetUpdateState->connectorIds); + pModesetUpdateState->pDpLibModesetState[head] = pDpLibModesetState; +} + +void nvSetViewPortPointInEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, + const NvU16 x, + NvU16 y, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[pDispEvo->displayOwner].headState[head]; + + pSdHeadState->viewPortPointIn.x = x; + pSdHeadState->viewPortPointIn.y = y; + + EvoSetViewportPointIn(pDispEvo, head, x, y, updateState); +} + +// +// Sets the Update method which makes all the other methods in the PB to take effect. +// +static void EvoUpdateAndKickOffWithNotifier( + const NVDispEvoRec *pDispEvo, + NvBool notify, + NvBool sync, int notifier, + NVEvoUpdateState *updateState, + NvBool releaseElv) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + // Calling code should reject operations that send updates while the console + // is active. + nvAssert(!pDevEvo->coreInitMethodsPending); + + // It doesn't make sense to request sync without requesting a notifier. + nvAssert(!sync || notify); + + if (notify) { + // Clear the completion notifier. + pDevEvo->hal->InitCompNotifier(pDispEvo, notifier); + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetNotifier(pDevEvo, notify, sync, notifier, + updateState); + pDevEvo->hal->Update(pDevEvo, updateState, releaseElv); + nvPopEvoSubDevMask(pDevEvo); + + // Wait for completion. + if (sync) { + pDevEvo->hal->WaitForCompNotifier(pDispEvo, notifier); + } + + if (notify) { + const NVDispEvoRec *pDispEvoTmp; + NVEvoUpdateState coreUpdateState = { }; + NvU32 sd; + + // To work around HW bug 1945716 and to prevent subsequent core updates + // from triggering unwanted notifier writes, set the core channel + // completion notifier control and context DMA disables when + // notification is not requested. + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetNotifier(pDevEvo, + FALSE /* notify */, + FALSE /* awaken */, + 0 /* notifier */, + &coreUpdateState); + nvPopEvoSubDevMask(pDevEvo); + + // SetCoreNotifier is only expected to push core channel methods. + FOR_ALL_EVO_DISPLAYS(pDispEvoTmp, sd, pDevEvo) { + if (pDispEvoTmp == pDispEvo) { + nvAssert(coreUpdateState.subdev[sd].channelMask == + DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE)); + } else { + nvAssert(coreUpdateState.subdev[sd].channelMask == 0x0); + } + } + + // We don't really need to kick off here, but might as well to keep the + // state cache up to date. Note that we intentionally don't use + // pDevEvo->hal->Update since we don't want another Update. + nvDmaKickoffEvo(pDevEvo->core); + } + + return; +} + +void nvEvoUpdateAndKickOff(const NVDispEvoRec *pDispEvo, NvBool sync, + NVEvoUpdateState *updateState, NvBool releaseElv) +{ + EvoUpdateAndKickOffWithNotifier(pDispEvo, sync, sync, 0, updateState, + releaseElv); +} + +void nvDoIMPUpdateEvo(NVDispEvoPtr pDispEvo, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + // IMP pre-modeset + pDevEvo->hal->PrePostIMP(pDispEvo, TRUE /* isPre */); + + // Do the update + nvEvoUpdateAndKickOff(pDispEvo, TRUE, updateState, TRUE /* releaseElv */); + + // IMP post-modeset + pDevEvo->hal->PrePostIMP(pDispEvo, FALSE /* isPre */); +} + +/*! + * Tell RM not to expect anything other than a stall lock change during the next + * update. + */ +void nvEvoArmLightweightSupervisor(NVDispEvoPtr pDispEvo, + const NvU32 head, + NvBool isVrr, + NvBool enable) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS params = { }; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + + if (!nvHeadIsActive(pDispEvo, head)) { + return; + } + + nvAssert(!pTimings->interlaced && !pTimings->doubleScan); + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + params.bArmLWSV = enable; + params.bVrrState = isVrr; + params.vActive = nvEvoVisibleHeight(pTimings); + params.vfp = pTimings->rasterSize.y - + pTimings->rasterBlankStart.y; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDispEvo->pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR, + ¶ms, sizeof(params)) + != NVOS_STATUS_SUCCESS) { + nvAssert(!"ARM_LIGHTWEIGHT_SUPERVISOR failed"); + } +} + +/* + * Convert from NVHwModeTimingsEvoPtr to NvModeTimingsPtr. + * + * Note that converting from NvModeTimingsPtr to + * NVHwModeTimingsEvoPtr (via + * ConstructHwModeTimingsFromNvModeTimings()) and converting back from + * NVHwModeTimingsEvoPtr to NvModeTimingsPtr (via + * nvConstructNvModeTimingsFromHwModeTimings()) can lose precision in + * the case of interlaced modes due to the division by 2. This + * function should only be used for reporting purposes. + */ + +void +nvConstructNvModeTimingsFromHwModeTimings(const NVHwModeTimingsEvo *pTimings, + NvModeTimingsPtr pModeTimings) +{ + NvU32 rasterBlankEndY, rasterSyncEndY; + + if (!pTimings || !pModeTimings) { + nvAssert(!"Null params"); + return; + } + + pModeTimings->pixelClockHz = KHzToHz(pTimings->pixelClock); + pModeTimings->hVisible = nvEvoVisibleWidth(pTimings); + pModeTimings->hSyncStart = pTimings->rasterSize.x - + pTimings->rasterBlankEnd.x - 1; + pModeTimings->hSyncEnd = pTimings->rasterSize.x - + pTimings->rasterBlankEnd.x + + pTimings->rasterSyncEnd.x; + pModeTimings->hTotal = pTimings->rasterSize.x; + pModeTimings->vVisible = nvEvoVisibleHeight(pTimings); + rasterBlankEndY = pTimings->rasterBlankEnd.y + 1; + rasterSyncEndY = pTimings->rasterSyncEnd.y + 1; + + if (pTimings->interlaced) { + rasterBlankEndY *= 2; + rasterSyncEndY *= 2; + } + + /* + * The real pixel clock and width values for modes using YUV 420 emulation + * are half of the incoming values parsed from the EDID. This conversion is + * performed here, so NvModeTimings will have the user-visible (full width) + * values, and NVHwModeTimingsEvo will have the real (half width) values. + */ + if (pTimings->yuv420Mode == NV_YUV420_MODE_SW) { + pModeTimings->pixelClockHz *= 2; + pModeTimings->hVisible *= 2; + pModeTimings->hSyncStart *= 2; + pModeTimings->hSyncEnd *= 2; + pModeTimings->hTotal *= 2; + } + + pModeTimings->vSyncStart = pTimings->rasterSize.y - rasterBlankEndY; + pModeTimings->vSyncEnd = pTimings->rasterSize.y - rasterBlankEndY + + rasterSyncEndY; + pModeTimings->vTotal = pTimings->rasterSize.y; + pModeTimings->interlaced = pTimings->interlaced; + pModeTimings->doubleScan = pTimings->doubleScan; + pModeTimings->hSyncNeg = pTimings->hSyncPol; + pModeTimings->hSyncPos = !pTimings->hSyncPol; + pModeTimings->vSyncNeg = pTimings->vSyncPol; + pModeTimings->vSyncPos = !pTimings->vSyncPol; + pModeTimings->RRx1k = (pModeTimings->pixelClockHz / + (pModeTimings->hTotal * + pModeTimings->vTotal)); + + if (pModeTimings->doubleScan) { + pModeTimings->vVisible /= 2; + pModeTimings->vSyncStart /= 2; + pModeTimings->vSyncEnd /= 2; + pModeTimings->vTotal /= 2; + } + + pModeTimings->hdmi3D = pTimings->hdmi3D; + pModeTimings->yuv420Mode = pTimings->yuv420Mode; +} + + + +/* + * Tweak pTimings to be compatible with gsync. + */ + +static void TweakTimingsForGsync(const NVDpyEvoRec *pDpyEvo, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString, + const enum NvKmsStereoMode stereo) +{ + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS gsyncOptTimingParams = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NvModeTimings modeTimings; + NvU32 ret; + + /* + * if 3D Vision Stereo is enabled, do not actually + * tweak the modetimings; WAR for bug 692266 + */ + + if (nvIs3DVisionStereoEvo(stereo)) { + + nvEvoLogInfoString(pInfoString, + "Not adjusting mode timings of %s for Quadro Sync " + "compatibility since 3D Vision Stereo is enabled.", + pDpyEvo->name); + return; + } + + gsyncOptTimingParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + + if (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + + gsyncOptTimingParams.output = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_SOR; + gsyncOptTimingParams.adjust = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_DFP; + + } else if (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) { + + gsyncOptTimingParams.output = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_DAC; + gsyncOptTimingParams.adjust = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_CRT; + } + + gsyncOptTimingParams.pixelClockHz = KHzToHz(pTimings->pixelClock); + + if (pTimings->interlaced) { + gsyncOptTimingParams.structure = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_INTERLACED; + } else { + gsyncOptTimingParams.structure = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_PROGRESSIVE; + } + + gsyncOptTimingParams.hDeltaStep = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_STEP_USE_DEFAULTS; + gsyncOptTimingParams.vDeltaStep = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_STEP_USE_DEFAULTS; + gsyncOptTimingParams.hDeltaMax = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_MAX_USE_DEFAULTS; + gsyncOptTimingParams.vDeltaMax = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_MAX_USE_DEFAULTS; + + gsyncOptTimingParams.hSyncEnd = pTimings->rasterSyncEnd.x + 1; + gsyncOptTimingParams.hBlankEnd = pTimings->rasterBlankEnd.x + 1; + gsyncOptTimingParams.hBlankStart = pTimings->rasterBlankStart.x + 1; + gsyncOptTimingParams.hTotal = pTimings->rasterSize.x; + + gsyncOptTimingParams.vSyncEnd = pTimings->rasterSyncEnd.y + 1; + gsyncOptTimingParams.vBlankEnd = pTimings->rasterBlankEnd.y + 1; + gsyncOptTimingParams.vBlankStart = pTimings->rasterBlankStart.y + 1; + gsyncOptTimingParams.vTotal = pTimings->rasterSize.y; + + gsyncOptTimingParams.vInterlacedBlankEnd = pTimings->rasterVertBlank2End; + gsyncOptTimingParams.vInterlacedBlankStart = + pTimings->rasterVertBlank2Start; + + switch (pTimings->protocol) { + case NVKMS_PROTOCOL_DAC_RGB: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_DAC_RGB_CRT; + break; + case NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC: + nvAssert(!"GSYNC_GET_OPTIMIZED_TIMING doesn't handle external TMDS."); + // fallthrough + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_A; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_B; + break; + case NVKMS_PROTOCOL_SOR_DUAL_TMDS: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DUAL_TMDS; + break; + case NVKMS_PROTOCOL_SOR_DP_A: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_A; + break; + case NVKMS_PROTOCOL_SOR_DP_B: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_B; + break; + case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_LVDS_CUSTOM; + break; + case NVKMS_PROTOCOL_SOR_HDMI_FRL: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_HDMI_FRL; + break; + case NVKMS_PROTOCOL_DSI: + nvAssert(!"GSYNC_GET_OPTIMIZED_TIMING doesn't handle DSI."); + return; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDispEvo->pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_OPTIMIZED_TIMING, + &gsyncOptTimingParams, + sizeof(gsyncOptTimingParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to convert to Quadro Sync safe timing"); + /* do not apply the timings returned by RM if the call failed */ + return; + } + + nvConstructNvModeTimingsFromHwModeTimings(pTimings, &modeTimings); + + nvEvoLogInfoString(pInfoString, + "Adjusting Mode Timings for Quadro Sync Compatibility"); + nvEvoLogInfoString(pInfoString, " Old Timings:"); + nvEvoLogModeValidationModeTimings(pInfoString, &modeTimings); + + pTimings->rasterSyncEnd.x = gsyncOptTimingParams.hSyncEnd - 1; + pTimings->rasterSyncEnd.y = gsyncOptTimingParams.vSyncEnd - 1; + pTimings->rasterBlankEnd.x = gsyncOptTimingParams.hBlankEnd - 1; + pTimings->rasterBlankEnd.y = gsyncOptTimingParams.vBlankEnd - 1; + pTimings->rasterBlankStart.x = gsyncOptTimingParams.hBlankStart - 1; + pTimings->rasterBlankStart.y = gsyncOptTimingParams.vBlankStart - 1; + pTimings->rasterSize.x = gsyncOptTimingParams.hTotal; + pTimings->rasterSize.y = gsyncOptTimingParams.vTotal; + + if (gsyncOptTimingParams.structure == + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_INTERLACED) { + pTimings->rasterVertBlank2Start = + gsyncOptTimingParams.vInterlacedBlankStart; + pTimings->rasterVertBlank2End = + gsyncOptTimingParams.vInterlacedBlankEnd; + } + + pTimings->pixelClock = HzToKHz(gsyncOptTimingParams.pixelClockHz); // Hz to KHz + + nvConstructNvModeTimingsFromHwModeTimings(pTimings, &modeTimings); + + nvEvoLogInfoString(pInfoString, " New Timings:"); + nvEvoLogModeValidationModeTimings(pInfoString, &modeTimings); +} + + + +/*! + * Check whether rasterlock is possible between the two sets of rastertimings. + * Note that we don't compare viewports, but I don't believe the viewport size + * affects whether it is possible to rasterlock. + */ + +static NvBool RasterLockPossible(const NVHwModeTimingsEvo *pTimings1, + const NVHwModeTimingsEvo *pTimings2) +{ + return ((pTimings1->rasterSize.x == pTimings2->rasterSize.x) && + (pTimings1->rasterSize.y == pTimings2->rasterSize.y) && + (pTimings1->rasterSyncEnd.x == pTimings2->rasterSyncEnd.x) && + (pTimings1->rasterSyncEnd.y == pTimings2->rasterSyncEnd.y) && + (pTimings1->rasterBlankEnd.x == pTimings2->rasterBlankEnd.x) && + (pTimings1->rasterBlankEnd.y == pTimings2->rasterBlankEnd.y) && + (pTimings1->rasterBlankStart.x == pTimings2->rasterBlankStart.x) && + (pTimings1->rasterBlankStart.y == pTimings2->rasterBlankStart.y) && + (pTimings1->rasterVertBlank2Start == + pTimings2->rasterVertBlank2Start) && + (pTimings1->rasterVertBlank2End == + pTimings2->rasterVertBlank2End) && + (pTimings1->pixelClock == pTimings2->pixelClock) && + (pTimings1->hSyncPol == pTimings2->hSyncPol) && + (pTimings1->vSyncPol == pTimings2->vSyncPol) && + (pTimings1->interlaced == pTimings2->interlaced) && + (pTimings1->doubleScan == pTimings2->doubleScan)); + +} + +/*! + * Fill the overscan color struct to be passed to SetRasterParams based on + * whether or not SW yuv420 is enabled. + * + * \param[out] pOverscanColor The overscan color struct to be filled + * \param[in] yuv420 Whether or not SW yuv420 is enabled + */ +static void SetOverscanColor(NVEvoColorPtr pOverscanColor, NvBool yuv420) +{ + // Black in RGB format. + // If we're using an emulated YUV 4:2:0 mode, set the equivalent in + // YUV ITU-R BT.709 (64/64/512). + if (yuv420) { + pOverscanColor->red = 64; + pOverscanColor->green = 64; + pOverscanColor->blue = 512; + } else { + pOverscanColor->red = 0; + pOverscanColor->green = 0; + pOverscanColor->blue = 0; + } + +#if defined(DEBUG) + // Override the overscan color to red in debug builds. + // XXX This will look different for YUV 4:2:0 + pOverscanColor->red = 1023; + pOverscanColor->green = 0; + pOverscanColor->blue = 0; +#endif +} + +/* + * Send the raster timings for the pDpyEvo to EVO. + */ +void nvEvoSetTimings(NVDispEvoPtr pDispEvo, + const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NVEvoColorRec overscanColor; + + nvPushEvoSubDevMaskDisp(pDispEvo); + SetOverscanColor(&overscanColor, (pTimings->yuv420Mode == + NV_YUV420_MODE_SW)); + + pDevEvo->hal->SetRasterParams(pDevEvo, head, + pTimings, &overscanColor, updateState); + + // Set the head parameters + pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].interlaced = + pTimings->interlaced; + pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].hdmi3D = + pTimings->hdmi3D; + + /* + * Current HW does not support the combination of HW YUV420 and DSC. + * HW YUV420 is currently only supported with HDMI, so we should never see + * the combination of DP DSC and HW YUV420. + * The combination of HDMI FRL DSC and HW YUV420 should be disallowed by + * the HDMI library. + */ + nvAssert(!((pTimings->yuv420Mode == NV_YUV420_MODE_HW) && + (pTimings->dpDsc.enable || + pTimings->hdmiFrlConfig.dscInfo.bEnableDSC))); + + pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].hwYuv420 = + (pTimings->yuv420Mode == NV_YUV420_MODE_HW); + + EvoUpdateHeadParams(pDispEvo, head, updateState); + + pDevEvo->hal->SetDscParams(pDispEvo, head, pTimings); + + nvPopEvoSubDevMask(pDevEvo); +} + +/* + * growTopologies() - Increase the size of the provided raster lock topology by + * 1. + * + * This involves incrementing *numTopologies, reallocating the topos array, and + * initializing the new entry. + */ +static RasterLockTopology *growTopologies(RasterLockTopology *topos, + unsigned int *numTopologies) +{ + RasterLockTopology *newTopos, *topo; + unsigned int i, numTopos; + + numTopos = *numTopologies; + + numTopos++; + newTopos = nvRealloc(topos, numTopos * sizeof(RasterLockTopology)); + if (!newTopos) { + nvFree(topos); + return NULL; + } + + topo = &newTopos[numTopos - 1]; + + for (i = 0; i < NVKMS_MAX_SUBDEVICES; i++) { + topo->pDispEvoOrder[i] = NULL; + } + + *numTopologies = numTopos; + + return newTopos; + +} /* growTopologies() */ + +/* + * GetRasterLockTopologies() - Determine which GPUs to consider for locking (or + * unlocking) displays. This is one of the following: + * 1. SLI video bridge order, if SLI is enabled; + * 2. A single GPU, + * in that order. + * + * Note that we still go through the same codepaths for the last degenerate + * case, in order to potentially lock heads on the same GPU together. + */ +static RasterLockTopology *GetRasterLockTopologies(NVDevEvoPtr pDevEvo, + unsigned int *numTopologies) +{ + unsigned int i; + RasterLockTopology *topos = NULL; + + *numTopologies = 0; + + if (pDevEvo->numSubDevices > 1 && pDevEvo->sli.bridge.present) { + NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS params = { 0 }; + NvU32 ret; + + /* In SLI, with a video bridge. Get the video bridge order from RM. */ + + if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_GET_VIDLINK_ORDER, + ¶ms, sizeof(params))) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "NvRmControl(GET_VIDLINK_ORDER) failed; " + "ret: %d\n", ret); + return NULL; + } + + if (params.ConnectionCount > 0) { + RasterLockTopology *topo; + topos = growTopologies(topos, numTopologies); + + if (!topos) { + return NULL; + } + + topo = &topos[*numTopologies - 1]; + + /* + * For some reason this interface returns a mask instead of an + * index, so we have to convert + */ + for (i = 0; i < pDevEvo->numSubDevices; i++) { + NvU32 subDeviceMask = params.Order[i]; + NvU32 sd = 0; + + nvAssert(nvPopCount32(subDeviceMask) == 1); + + if (!subDeviceMask) continue; + + while (!(subDeviceMask & (1 << sd))) sd++; + + nvAssert(sd < NVKMS_MAX_SUBDEVICES); + nvAssert(pDevEvo->pDispEvo[sd] != NULL); + + /* SLI Mosaic. */ + topo->pDispEvoOrder[i] = pDevEvo->pDispEvo[sd]; + } + } + } else { + /* Single GPU or bridgeless SLI */ + + NVDispEvoPtr pDispEvo; + unsigned int sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + RasterLockTopology *topo; + topos = growTopologies(topos, numTopologies); + + if (!topos) { + return NULL; + } + + topo = &topos[*numTopologies - 1]; + + topo->pDispEvoOrder[0] = pDispEvo; + } + } + + return topos; + +} // GetRasterLockTopologies() + +/* + * ApplyLockActionIfPossible() - Check if the given action is a valid + * transition for this pEvoSubDev's state, and apply it if so. + * Return TRUE if any hardware state needs to be updated, FALSE o.w. + */ +static NvBool ApplyLockActionIfPossible(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action) +{ + NvBool changed = FALSE; + NvU32 head; + + if (!pEvoSubDev) { + return FALSE; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + if (pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, + action, NULL)) { + NvU32 otherHead; + unsigned int i = 0; + NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1]; + + pHeads[i++] = head; + for (otherHead = 0; otherHead < NVKMS_MAX_HEADS_PER_DISP; + otherHead++) { + if (!nvHeadIsActive(pDispEvo, otherHead)) { + continue; + } + if (otherHead == head) { + continue; + } + + pHeads[i++] = otherHead; + } + nvAssert(i <= NVKMS_MAX_HEADS_PER_DISP); + pHeads[i] = NV_INVALID_HEAD; + + pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, pHeads); + + /* + * scanLockState transitions (such as nvEvoLockHWStateLockHeads) + * will update headControlAssy values for all heads, so we should + * update flipLock and flipLockPin for all heads as well. + */ + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[pHeads[i]]; + /* + * Reset the fliplock pin, if it's not in use for framelock, + * and unregister our use of the fliplock pin + */ + if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, + pHeads[i])) { + pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + } + pEvoSubDev->flipLockPinSetForSliHeadMask = + HEAD_MASK_UNSET(pEvoSubDev->flipLockPinSetForSliHeadMask, + pHeads[i]); + + /* + * Disable fliplock, if it's not in use for framelock, and + * unregister our need for fliplock to be enabled + */ + if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, + pHeads[i])) { + pHC->flipLock = FALSE; + } + pEvoSubDev->flipLockEnabledForSliHeadMask = + HEAD_MASK_UNSET(pEvoSubDev->flipLockEnabledForSliHeadMask, + pHeads[i]); + } + + changed = TRUE; + } + } + + return changed; + +} // ApplyLockActionIfPossible() + + +/* + * UnlockRasterLockGroup() - Unlock all GPUs in the rasterlock group associated + * with the given device. + */ + +static void UnlockRasterLockGroup(NVDevEvoPtr pDevEvo) { + RasterLockTopology *topos, *topo; + unsigned int numTopos; + NvBool changed = FALSE; + + topos = GetRasterLockTopologies(pDevEvo, &numTopos); + if (!topos) { + return; + } + + for (topo = topos; topo < topos + numTopos; topo++) { + int maxDisps = 0, i; + + for (i = 0; i < NVKMS_MAX_SUBDEVICES && topo->pDispEvoOrder[i]; i++) { + maxDisps = i; + } + + for (i = maxDisps; i >= 0; i--) { + NVDispEvoPtr pDispEvo = topo->pDispEvoOrder[i]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 sd = pDispEvo->displayOwner; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + + /* Initialize the assembly state */ + SyncEvoLockState(); + + /* We want to evaluate all of these, so don't use || */ + changed |= ApplyLockActionIfPossible(pDispEvo, pEvoSubDev, + NV_EVO_DISABLE_VRR); + changed |= ApplyLockActionIfPossible(pDispEvo, pEvoSubDev, + NV_EVO_REM_SLI); + changed |= ApplyLockActionIfPossible(pDispEvo, pEvoSubDev, + NV_EVO_UNLOCK_HEADS); + + /* Finally, update the hardware if anything has changed */ + if (changed) { + UpdateEvoLockState(); + changed = FALSE; + } + + pEvoSubDev->flipLockProhibitedHeadMask = 0x0; + } + } + + /* Disable any SLI video bridge features we may have enabled for locking. */ + pDevEvo->sli.bridge.powerNeededForRasterLock = FALSE; + nvEvoUpdateSliVideoBridge(pDevEvo); + + nvFree(topos); + +} // UnlockRasterLockGroup() + +void nvAssertAllDpysAreInactive(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NvU32 head; + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + nvAssert(!nvHeadIsActive(pDispEvo, head)); + } + } +} + +/*! + * Disable locking-related state. + */ +static void DisableLockState(NVDevEvoPtr pDevEvo, + NvU32 *dispNeedsUpdate, + NVEvoUpdateState *updateState) +{ + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + + *dispNeedsUpdate = 0; + + /* Disable flip lock. */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 head; + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + NvU32 flipLockEnable = 0; + NvBool needsUpdate; + + if (!nvUpdateFlipLockEvoOneHead(pDispEvo, head, + &flipLockEnable, TRUE /* set */, + &needsUpdate, + updateState)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Unable to update fliplock"); + } + + if (needsUpdate) { + *dispNeedsUpdate |= (1 << dispIndex); + } + } + } + + /* Disable raster lock. */ + + UnlockRasterLockGroup(pDevEvo); + + /* Reset the EVO locking state machine. */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + nvEvoStateStartNoLock(&pDevEvo->gpus[pDispEvo->displayOwner]); + } +} + +void nvEvoLockStatePreModeset(NVDevEvoPtr pDevEvo, NvU32 *dispNeedsEarlyUpdate, + NVEvoUpdateState *updateState) +{ + DisableLockState(pDevEvo, dispNeedsEarlyUpdate, updateState); +} + +/*! + * Set up raster lock between GPUs, if applicable. + */ +void nvEvoLockStatePostModeset(NVDevEvoPtr pDevEvo, const NvBool doRasterLock) +{ + RasterLockTopology *topos, *topo; + unsigned int numTopos; + + /* + * Always unlock everything on this rasterlock group to begin with a clean + * slate. We'll relock below, if possible. + */ + + UnlockRasterLockGroup(pDevEvo); + + if (!doRasterLock) { + return; + } + + topos = GetRasterLockTopologies(pDevEvo, &numTopos); + if (!topos) { + return; + } + + for (topo = topos; topo < topos + numTopos; topo++) { + FinishModesetOneTopology(topo); + } + + nvFree(topos); + +} + +static NvBool EnableVrr(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NvBool ret; + + SyncEvoLockState(); + + ret = pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, NV_EVO_ENABLE_VRR, + pHeads); + if (!ret) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to enable VRR frame lock"); + return FALSE; + } + + UpdateEvoLockState(); + + return TRUE; +} + +/*! + * Updates the hardware based on software needs tracked in pDevEvo->sli.bridge. + * Call this function after changing any of those needs variables. + */ +void nvEvoUpdateSliVideoBridge(NVDevEvoPtr pDevEvo) +{ + NV0080_CTRL_GPU_SET_VIDLINK_PARAMS params = { 0 }; + const NvBool enable = pDevEvo->sli.bridge.powerNeededForRasterLock; + NvU32 status; + + if (pDevEvo->sli.bridge.powered == enable) { + return; + } + + if (enable) { + /* SLI should be prohibited earlier if no bridge is present. */ + nvAssert(pDevEvo->sli.bridge.present); + } + + params.enable = enable ? + NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_TRUE : + NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_FALSE; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_SET_VIDLINK, + ¶ms, sizeof(params)); + if (status != NV_OK) { + nvAssert(!"NV0080_CTRL_CMD_GPU_SET_VIDLINK failed"); + } + + pDevEvo->sli.bridge.powered = enable; +} + +/* + * FinishModesetOneTopology() - Set up raster lock between GPUs, if applicable, + * for one RasterLockTopology. Called in a loop from nvFinishModesetEvo(). + */ + +static void FinishModesetOneTopology(RasterLockTopology *topo) +{ + NVDispEvoPtr *pDispEvoOrder = topo->pDispEvoOrder; + NvU32 numUsedGpus = 0; + const NVHwModeTimingsEvo *pPrevTimings = NULL; + NvBool headInUse[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP]; + NvBool lockPossible = TRUE, foundUnused = FALSE; + NvBool vrrInUse = FALSE; + NvBool flipLockPossible = TRUE; + unsigned int i, j; + NvU8 allowFlipLockGroup = 0; + + /* + * First, look for devices with VRR enabled. If we find any, go into the + * special VRR framelock mode and don't try to rasterlock any other heads. + */ + for (i = 0; i < NVKMS_MAX_SUBDEVICES && pDispEvoOrder[i]; i++) { + NVDispEvoPtr pDispEvo = pDispEvoOrder[i]; + NvU32 sd = pDispEvo->displayOwner; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 vrrHeads[NVKMS_MAX_HEADS_PER_DISP + 1]; + unsigned int numVrrHeads = 0; + NvU32 head; + + if (!pDevEvo->gpus || !pDevEvo->vrr.enabled) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (nvHeadIsActive(pDispEvo, head) && + (pDispEvo->headState[head].timings.vrr.type != + NVKMS_DPY_VRR_TYPE_NONE)) { + vrrHeads[numVrrHeads++] = head; + } + } + + if (numVrrHeads > 0) { + vrrHeads[numVrrHeads] = NV_INVALID_HEAD; + if (EnableVrr(pDispEvo, &pDevEvo->gpus[sd], vrrHeads)) { + vrrInUse = TRUE; + } + } + } + + if (vrrInUse) { + return; + } + + nvkms_memset(headInUse, 0, sizeof(headInUse)); + + /* + * Next, figure out if we can perform locking and which GPUs/heads we can + * use. For now, only attempt locking if all heads on the device have + * compatible timings and consecutive in the video bridge order. + */ + for (i = 0; i < NVKMS_MAX_SUBDEVICES && pDispEvoOrder[i]; i++) { + NVDispEvoPtr pDispEvo = pDispEvoOrder[i]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + + /* + * We can't lock if there is an unused GPU between two used GPUs on the + * video bridge chain. + * We much check if pDevEvo->gpus is NULL in case we haven't been + * through AllocDeviceObject for this pDev (yet?). + */ + if (!HasActiveHeads(pDispEvo) || + !pDevEvo->gpus) { + foundUnused = TRUE; + continue; + } else { + if (foundUnused) { + lockPossible = FALSE; + break; + } + + numUsedGpus++; + } + + /* + * Compare modetimings for each active display with the previous one we + * looked at. If any of them don't match, punt on locking. + */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + /* + * Only flip lock if all of the heads are in the same + * allowFlipLockGroup. + */ + if (allowFlipLockGroup == 0) { + allowFlipLockGroup = pHeadState->allowFlipLockGroup; + } else if (allowFlipLockGroup != pHeadState->allowFlipLockGroup) { + flipLockPossible = FALSE; + } + + if (pPrevTimings && + !RasterLockPossible(pTimings, pPrevTimings)) { + lockPossible = FALSE; + goto exitHeadLoop; + } + + headInUse[i][head] = TRUE; + + pPrevTimings = pTimings; + } + +exitHeadLoop: + if (!lockPossible) { + break; + } + } + + if (!lockPossible) { + return; + } + + /* + * Finally, actually set up locking: go through the video bridge order + * setting it up. + */ + for (i = 0; i < NVKMS_MAX_SUBDEVICES && pDispEvoOrder[i]; i++) { + NVDispEvoPtr pDispEvo = pDispEvoOrder[i]; + NvU32 sd = pDispEvo->displayOwner; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head[NVKMS_MAX_HEADS_PER_DISP + 1]; + unsigned int usedHeads = 0; + NvBool headsLocked = FALSE, gpusLocked = FALSE; + + /* Initialize the assembly state */ + SyncEvoLockState(); + + /* If we're past the end of the chain, we're done. */ + if (i == numUsedGpus) { + break; + } + + for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) { + if (headInUse[i][j]) { + + head[usedHeads] = j; + + usedHeads++; + } + } + head[usedHeads] = NV_INVALID_HEAD; + + nvAssert(head[0] != NV_INVALID_HEAD); + + /* First lock the heads together, if we have enough heads */ + if (usedHeads > 1) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + + if (!pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_HEADS, + head)) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Unable to lock heads"); + } else { + headsLocked = TRUE; + } + } + + /* Then set up cross-GPU locking, if we have enough active GPUs */ + if (numUsedGpus > 1) { + NVEvoLockAction action; + NVEvoLockPin *pServerPin = &pDevEvo->gpus[sd].sliServerLockPin; + NVEvoLockPin *pClientPin = &pDevEvo->gpus[sd].sliClientLockPin; + + *pServerPin = NV_EVO_LOCK_PIN_ERROR; + *pClientPin = NV_EVO_LOCK_PIN_ERROR; + + if (i == 0) { + action = NV_EVO_ADD_SLI_PRIMARY; + } else { + if (i == (numUsedGpus - 1)) { + action = NV_EVO_ADD_SLI_LAST_SECONDARY; + } else { + action = NV_EVO_ADD_SLI_SECONDARY; + } + } + + if (action == NV_EVO_ADD_SLI_PRIMARY || + action == NV_EVO_ADD_SLI_SECONDARY) { + /* Find pin for server to next */ + NVDispEvoPtr pDispEvoNext = pDispEvoOrder[i + 1]; + NvU32 headNext = 0; + + for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) { + if (headInUse[i + 1][j]) { + headNext = j; + break; + } + } + + GetRasterLockPin(pDispEvo, head[0], + pDispEvoNext, headNext, + pServerPin, NULL); + } + + if (action == NV_EVO_ADD_SLI_SECONDARY || + action == NV_EVO_ADD_SLI_LAST_SECONDARY) { + + /* Find pin for client to prev */ + NVDispEvoPtr pDispEvoPrev = pDispEvoOrder[i - 1]; + NvU32 headPrev = 0; + + for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) { + if (headInUse[i - 1][j]) { + headPrev = j; + break; + } + } + + GetRasterLockPin(pDispEvo, head[0], + pDispEvoPrev, headPrev, + NULL, pClientPin); + } + + /* + * Normally, the scanlock state machine can determine the client + * lockout window most appropriate for the given configuration. + * However, if we are driving pixels over the DR bus (rather than + * driving a monitor directly via an OR), then the RM programs the + * VPLL with a multiplier that is double the rate of the DR primary. + * This can be inexact, so we may need to crash lock more often than + * when the VPLL settings are identical; not doing so may cause + * rasterlock to fail. Frequent crash locking when driving pixels + * over the DR bus is okay, since they are cleaned up before being + * sent to a non-DR OR. + */ + pDevEvo->gpus[sd].forceZeroClientLockoutWindow = + (sd != pDispEvo->displayOwner); + + if (!pDevEvo->gpus[sd].scanLockState(pDispEvo, &pDevEvo->gpus[sd], + action, head)) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Unable to set up SLI locking"); + } else { + gpusLocked = TRUE; + } + } + + /* + * On certain GPUs, we need to enable the video bridge (MIO pads) when + * enabling rasterlock. Note that we don't disable in this function, + * so if gpusLocked is true for any iteration of these loops, this bit + * will be on. + */ + if (gpusLocked && NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_RASTER_LOCK_NEEDS_MIO_POWER)) { + pDevEvo->sli.bridge.powerNeededForRasterLock = TRUE; + nvEvoUpdateSliVideoBridge(pDevEvo); + } + + /* If anything changed, update the hardware */ + if (headsLocked || gpusLocked) { + + UpdateEvoLockState(); + + /* + * Enable fliplock, if we can + * + * XXX this should arguably be done in the state machine proper. + * However, in order to guarantee that we get rasterlock before + * attempting fliplock (and to be symmetric with framelock, which + * turns on and off fliplock from GLS), do it here for now. + */ + if (gpusLocked && flipLockPossible) { + NVEvoUpdateState updateState = { }; + + /* + * Before turning on flip lock, we're supposed to wait for + * raster lock sync. The update performed in + * UpdateEvoLockState() to kick off and apply the rasterlock + * params must be synchronous as EVO reports lock success if + * locking isn't enabled, so we could race through the + * WaitForLock check below otherwise. + */ + + for (j = 0; j < usedHeads; j++) { + NvU32 tmpHead = head[j]; + + NVEvoLockPin pin = + nvEvoGetPinForSignal(pDispEvo, &pDevEvo->gpus[sd], + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + /* Wait for the raster lock to sync in.. */ + if (pin == NV_EVO_LOCK_PIN_ERROR || + !EvoWaitForLock(pDevEvo, sd, tmpHead, EVO_RASTER_LOCK)) { + flipLockPossible = FALSE; + break; + } + + /* + * Enable fliplock, and register that we've enabled + * fliplock for SLI to ensure it doesn't get disabled + * later. + */ + pDevEvo->gpus[sd].headControl[tmpHead].flipLockPin = pin; + pDevEvo->gpus[sd].flipLockPinSetForSliHeadMask = + HEAD_MASK_SET(pDevEvo->gpus[sd].flipLockPinSetForSliHeadMask, tmpHead); + + pDevEvo->gpus[sd].headControl[tmpHead].flipLock = TRUE; + pDevEvo->gpus[sd].flipLockEnabledForSliHeadMask = + HEAD_MASK_SET(pDevEvo->gpus[sd].flipLockEnabledForSliHeadMask, tmpHead); + + EvoUpdateHeadParams(pDispEvo, tmpHead, &updateState); + } + + /* + * This must be synchronous as EVO reports lock success if + * locking isn't enabled, so we could race through the + * WaitForLock check below otherwise. + */ + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + + /* + * Wait for flip lock sync. I'm not sure this is really + * necessary, but the docs say to do this before attempting any + * flips in the base channel. + */ + for (j = 0; j < usedHeads; j++) { + if (flipLockPossible && + !EvoWaitForLock(pDevEvo, sd, head[j], EVO_FLIP_LOCK)) { + flipLockPossible = FALSE; + break; + } + } + } + } + } + +} /* FinishModesetOneTopology() */ + +NvBool nvSetUsageBoundsEvo( + NVDevEvoPtr pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + NvBool needCoreUpdate; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + + needCoreUpdate = pDevEvo->hal->SetUsageBounds(pDevEvo, sd, head, pUsage, + updateState); + + nvPopEvoSubDevMask(pDevEvo); + + pDevEvo->gpus[sd].headState[head].usage = *pUsage; + + return needCoreUpdate; +} + +void nvEnableMidFrameAndDWCFWatermark(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NvBool enable, + NVEvoUpdateState *pUpdateState) +{ + pDevEvo->gpus[sd].headState[head]. + disableMidFrameAndDWCFWatermark = !enable; + + if (pDevEvo->hal->EnableMidFrameAndDWCFWatermark == NULL) { + nvEvoLogDev(pDevEvo, + EVO_LOG_ERROR, + "EnableMidFrameAndDWCFWatermark() is not defined"); + return; + } + + pDevEvo->hal->EnableMidFrameAndDWCFWatermark(pDevEvo, + sd, + head, + enable, + pUpdateState); +} + + +/*! + * Choose current colorSpace and colorRange based on the current mode timings + * and the requested color space and range, and notify clients of any changes. + * + * This needs to be called during a modeset when YUV420 mode may have been + * enabled or disabled, as well as when the requested color space or range have + * changed. + * + * RGB/YUV would be selected for DFP, only RGB would be selected for CRT and + * only YUV would be selected for TV. + * + * If SW YUV420 mode is enabled, EVO HW is programmed with default (RGB color + * space, FULL color range) values, and the real values are used in a + * headSurface composite shader. + */ +void nvChooseCurrentColorSpaceAndRangeEvo( + enum nvKmsPixelDepth pixelDepth, + enum NvYuv420Mode yuv420Mode, + const enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace, + const enum NvKmsDpyAttributeColorRangeValue requestedColorRange, + enum NvKmsDpyAttributeCurrentColorSpaceValue *pCurrentColorSpace, + enum NvKmsDpyAttributeColorRangeValue *pCurrentColorRange) +{ + enum NvKmsDpyAttributeCurrentColorSpaceValue newColorSpace = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB; + enum NvKmsDpyAttributeColorRangeValue newColorRange = + NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL; + + /* At depth 18 only RGB and full range are allowed */ + if (pixelDepth == NVKMS_PIXEL_DEPTH_18_444) { + goto done; + } + + /* + * If the current mode timing requires YUV420 compression, we override the + * requested color space with YUV420. + */ + if (yuv420Mode != NV_YUV420_MODE_NONE) { + newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420; + } else { + /* + * Note this is an assignment between different enum types. Checking the + * value of requested colorSpace and then assigning the value to current + * colorSpace, to avoid warnings about cross-enum assignment. + */ + switch (requestedColorSpace) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB: + newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422: + newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444: + newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444; + break; + default: + nvAssert(!"Invalid Requested ColorSpace"); + } + } + + /* Only limited color range is allowed in YUV colorimetry. */ + if ((newColorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444) || + (newColorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422) || + (newColorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420)) { + newColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED; + } else { + newColorRange = requestedColorRange; + } + +done: + *pCurrentColorSpace = newColorSpace; + *pCurrentColorRange = newColorRange; +} + +void nvUpdateCurrentHardwareColorSpaceAndRangeEvo( + NVDispEvoPtr pDispEvo, + const NvU32 head, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const enum NvKmsDpyAttributeColorRangeValue colorRange, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + + nvAssert(pConnectorEvo != NULL); + + // In SW YUV420 mode, HW is programmed with RGB color space and full color + // range. The color space conversion and color range compression happen + // in a headSurface composite shader. + if ((pHeadState->timings.yuv420Mode == NV_YUV420_MODE_SW) && + (colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420)) { + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB; + pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL; + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_RGB; + } else { + + // Set default colorimetry to RGB and default color range to full + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB; + pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL; + + // Set color format + switch (colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr444; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr422; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr420; + break; + default: + nvAssert(!"unrecognized colorSpace"); + } + + switch (pConnectorEvo->legacyType) { + case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP: + // program HW with RGB/YCbCr + switch (colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + if (nvEvoIsHDQualityVideoTimings(&pHeadState->timings)) { + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_YUV_709; + } else { + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_YUV_601; + } + break; + default: + nvAssert(!"unrecognized colorSpace"); + } + break; + case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT: + // colorSpace isn't used for DEVICE_TYPE_CRT and + // hence should be set to the "unchanged" value + // (i.e. the default - RGB) + nvAssert(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB); + + // program HW with RGB only + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB; + break; + default: + nvAssert(!"ERROR: invalid pDpyEvo->type"); + } + + // Only advertise YCbCr444 or YCbCr422 when the corresponding + // colorSpaceCaps is TRUE. + if ((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444) && + !pConnectorEvo->colorSpaceCaps.ycbcr444Capable) { + nvAssert(!"!pConnectorEvo->colorSpaceCaps.ycbcr444Capable"); + } + + if ((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422) && + !pConnectorEvo->colorSpaceCaps.ycbcr422Capable) { + nvAssert(!"!pConnectorEvo->colorSpaceCaps.ycbcr422Capable"); + } + + switch (colorRange) { + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL: + pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL; + break; + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED: + pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_LIMITED; + break; + default: + nvAssert(!"Invalid colorRange"); + break; + } + } + + // In YUV colorimetry, only limited color range is allowed. + nvAssert(!((pHeadState->procAmp.colorimetry != NVT_COLORIMETRY_RGB) && + (pHeadState->procAmp.colorRange != NVT_COLOR_RANGE_LIMITED))); + + // Limited color range is not allowed with 18bpp mode + nvAssert(!((pHeadState->timings.pixelDepth == NVKMS_PIXEL_DEPTH_18_444) && + (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED))); + + nvPushEvoSubDevMaskDisp(pDispEvo); + + // Set the procamp head method + pDevEvo->hal->SetProcAmp(pDispEvo, head, pUpdateState); + + // Clean up + nvPopEvoSubDevMask(pDevEvo); +} + +void nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo, + const NvU32 head, NVEvoUpdateState *pUpdateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoPtr pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NvBool colorSpaceOverride = FALSE; + + /* + * Determine whether or not this dpy will need its color space + * overridden. + * + * This is currently only used for DP 1.3 YUV420 mode, where the + * HW's normal support for carrying color space information + * together with the frame is insufficient. + */ + if ((pTimings->yuv420Mode == NV_YUV420_MODE_SW) && + nvConnectorUsesDPLib(pHeadState->pConnectorEvo)) { + + nvAssert(pDispEvo->pDevEvo->caps.supportsDP13); + colorSpaceOverride = TRUE; + } + + // Only set up the actual output for SLI primary. + nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner); + + pDevEvo->hal->HeadSetControlOR(pDevEvo, head, pTimings, + colorSpaceOverride, + pUpdateState); + + nvPopEvoSubDevMask(pDevEvo); +} + +static const struct { + NvU32 algo; + enum NvKmsDpyAttributeCurrentDitheringModeValue nvKmsDitherMode; +} ditherModeTable[] = { + { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_DYNAMIC_2X2 }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_STATIC_2X2 }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_TEMPORAL }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE } +}; + +static const struct { + NvU32 type; + enum NvKmsDpyAttributeCurrentDitheringDepthValue nvKmsDitherDepth; +} ditherDepthTable[] = { + { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE } +}; + +/*! + * Choose dithering based on the requested dithering config + * NVConnectorEvo::or::dither. + */ +void nvChooseDitheringEvo( + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsPixelDepth pixelDepth, + const NVDpyAttributeRequestedDitheringConfig *pReqDithering, + NVDpyAttributeCurrentDitheringConfig *pCurrDithering) +{ + NvU32 i; + NVDpyAttributeCurrentDitheringConfig currDithering = { + .enabled = FALSE, + .mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE, + .depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE, + }; + + currDithering.enabled = (pConnectorEvo->or.ditherType != + NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF); + + for (i = 0; i < ARRAY_LEN(ditherDepthTable); i++) { + if (ditherDepthTable[i].type == pConnectorEvo->or.ditherType) { + currDithering.depth = ditherDepthTable[i].nvKmsDitherDepth; + break; + } + } + + for (i = 0; i < ARRAY_LEN(ditherModeTable); i++) { + if (ditherModeTable[i].algo == pConnectorEvo->or.ditherAlgo) { + currDithering.mode = ditherModeTable[i].nvKmsDitherMode; + break; + } + } + + switch (pReqDithering->state) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_ENABLED: + currDithering.enabled = TRUE; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED: + currDithering.enabled = FALSE; + break; + default: + nvAssert(!"Unknown Dithering configuration"); + // Fall through + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO: + /* + * Left it initialized + * based on value NVDpyEvoRec::or::dither::init::enabled. + */ + break; + } + + switch (pReqDithering->depth) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_6_BITS: + currDithering.depth = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_8_BITS: + currDithering.depth = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS; + break; + default: + nvAssert(!"Unknown Dithering Depth"); + // Fall through + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO: + /* + * Left it initialized + * based on value NVDpyEvoRec::or::dither::init::type. + */ + break; + } + + + if (nvConnectorUsesDPLib(pConnectorEvo) && + (pReqDithering->state != + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED)) { + NvU32 lutBits = 11; + + /* If we are using DisplayPort panel with bandwidth constraints + * which lowers the color depth, consider that while applying + * dithering effects. + */ + NvU32 dpBits = nvPixelDepthToBitsPerComponent(pixelDepth); + if (dpBits == 0) { + nvAssert(!"Unknown dpBits"); + dpBits = 8; + } + + /* + * If fewer than 8 DP bits are available, dither. Ideally we'd + * dither from lutBits > 10 to 10 bpc, but EVO doesn't have an + * option for that. + * + * XXX TODO: nvdisplay can dither to 10 bpc. + */ + if ((dpBits <= 8) && (lutBits > dpBits)) { + if (pReqDithering->state == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO) { + currDithering.enabled = TRUE; + } + } + + if (pReqDithering->depth == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO) { + if (dpBits <= 6) { + currDithering.mode = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS; + } else if (dpBits <= 8) { + currDithering.mode = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS; + } + } + } + + if (currDithering.enabled) { + switch (pReqDithering->mode) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL: + currDithering.mode = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_TEMPORAL; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2: + currDithering.mode = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_DYNAMIC_2X2; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2: + currDithering.mode = + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_STATIC_2X2; + break; + default: + nvAssert(!"Unknown Dithering Mode"); + // Fall through + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO: + /* + * Left it initialized + * based on value NVDpyEvoRec::or::dither::init::algo. + */ + break; + } + } else { + currDithering.depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE; + currDithering.mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE; + } + + *pCurrDithering = currDithering; +} + +void nvSetDitheringEvo( + NVDispEvoPtr pDispEvo, + const NvU32 head, + const NVDpyAttributeCurrentDitheringConfig *pCurrDithering, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 i; + NvU32 algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN; + NvU32 type = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF; + NvU32 enabled = pCurrDithering->enabled; + + for (i = 0; i < ARRAY_LEN(ditherModeTable); i++) { + if (ditherModeTable[i].nvKmsDitherMode == pCurrDithering->mode) { + algo = ditherModeTable[i].algo; + break; + } + } + nvAssert(i < ARRAY_LEN(ditherModeTable)); + + for (i = 0; i < ARRAY_LEN(ditherDepthTable); i++) { + if (ditherDepthTable[i].nvKmsDitherDepth == pCurrDithering->depth) { + type = ditherDepthTable[i].type; + break; + } + } + nvAssert(i < ARRAY_LEN(ditherDepthTable)); + + /* + * Make sure algo is a recognizable value that we will be able to program + * in hardware. + */ + if (algo == NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN) { + algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2; + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetDither(pDispEvo, head, enabled, type, algo, + pUpdateState); + nvPopEvoSubDevMask(pDevEvo); +} + +/* + * HeadCanStereoLock() - Return whether or not this head can use stereo lock + * mode. This can only be called from UpdateEvoLockState, when the pending + * interlaced/locked values are still in the head control assembly structure. + */ +static NvBool HeadCanStereoLock(NVDevEvoPtr pDevEvo, int sd, int head) +{ + NVEvoHeadControlPtr pHC = &pDevEvo->gpus[sd].headControlAssy[head]; + + return (!pHC->interlaced && + ((pHC->serverLock != NV_EVO_NO_LOCK) || + (pHC->clientLock != NV_EVO_NO_LOCK))); +} + +/* + * SetStereoLockMode() - For stereo lock mode, we need to notify + * the gsync board that this GPU requires stereo lock mode. + */ +static NvBool SetStereoLockMode(NVDispEvoPtr pDispEvo, NvBool stereoLocked) +{ + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS + statusParams = { 0 }; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + + if (!pFrameLockEvo || + ((pFrameLockEvo->boardId != NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2060) && + (pFrameLockEvo->boardId != NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2061))) { + return TRUE; + } + + statusParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + statusParams.enable = stereoLocked ? 1 : 0; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE, + &statusParams, + sizeof(statusParams)) != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to set stereo lock mode"); + return FALSE; + } + + return TRUE; +} + +/* + * SyncEvoLockState() + * + * Set the Assembly state based on the current Armed state. This should be + * called before transitioning between states in the EVO state machine. + */ +static void SyncEvoLockState(void) +{ + NVDispEvoPtr pDispEvo; + unsigned int sd; + NVDevEvoPtr pDevEvo; + + FOR_ALL_EVO_DEVS(pDevEvo) { + + if (!pDevEvo->gpus) { + continue; + } + + if (pDevEvo->displayHandle == 0) { + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 updateHeadMask = nvGetActiveHeadMask(pDispEvo); + unsigned int head; + + /* Update the cached HEAD_SET_CONTROL EVO method state */ + FOR_ALL_HEADS(head, updateHeadMask) { + pEvoSubDev->headControlAssy[head] = + pEvoSubDev->headControl[head]; + + /* + * The following are probably not necessary, since no other + * code touches them (as opposed to headControl above which + * is updated beyond the scope of the state machine). But + * update them here anyway to be consistent. + */ + pEvoSubDev->frameLockClientMaskAssy = + pEvoSubDev->frameLockClientMaskArmed; + pEvoSubDev->frameLockServerMaskAssy = + pEvoSubDev->frameLockServerMaskArmed; + pEvoSubDev->frameLockExtRefClkMaskAssy = + pEvoSubDev->frameLockExtRefClkMaskArmed; + } + } + } +} + +/* + * Determine a unique index for the given (pDevEvo, sd) tuple. + * This is used to index into an array of size NV_MAX_DEVICES. + * + * It would be more straightforward to use a two-dimensional array of + * NV_MAX_DEVICES x NV_MAX_SUBDEVICES and index by (devIndex, sd), but + * that makes the array too large to fit on the stack. This is safe because + * we should only ever have at most NV_MAX_DEVICES GPUs in the system + * total, although at any given time they may be split into many single-GPU + * device or a small number of many-GPU SLI devices. + */ +static NvU32 GpuIndex(const NVDevEvoRec *pDevEvo, NvU32 sd) +{ + const NVDevEvoRec *pDevEvoIter; + NvU32 index = 0; + + nvAssert(sd < pDevEvo->numSubDevices); + + FOR_ALL_EVO_DEVS(pDevEvoIter) { + if (pDevEvoIter == pDevEvo) { + index += sd; + nvAssert(index < NV_MAX_DEVICES); + return index; + } + index += pDevEvo->numSubDevices; + } + + nvAssert(!"Failed to look up GPU index"); + return 0; +} + +/*! + * Get the current refresh rate for the heads in headMask, in 0.0001 Hz units. + * All heads in headMask are expected to have the same refresh rate. + */ +static NvU32 GetRefreshRate10kHz(const NVDispEvoRec *pDispEvo, NvU32 headMask) +{ + const NVHwModeTimingsEvo *pTimings = NULL; + NvU32 head; + + FOR_ALL_HEADS(head, headMask) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + + if (head >= pDispEvo->pDevEvo->numHeads && + pHeadState->activeRmId == 0x0) { + continue; + } + + if (pTimings == NULL) { + pTimings = &pHeadState->timings; + } else { + nvAssert(pTimings->rasterSize.x == + pHeadState->timings.rasterSize.x); + nvAssert(pTimings->rasterSize.y == + pHeadState->timings.rasterSize.y); + nvAssert(pTimings->doubleScan == pHeadState->timings.doubleScan); + nvAssert(pTimings->interlaced == pHeadState->timings.interlaced); + nvAssert(pTimings->pixelClock == pHeadState->timings.pixelClock); + } + } + + if (pTimings == NULL) { + return 0; + } + + /* + * pTimings->pixelClock is in 1000/s + * we want 0.0001/s + * factor = 1000/0.0001 = 10000000. + */ + NvU32 factor = 10000000; + NvU32 totalPixels = pTimings->rasterSize.x * pTimings->rasterSize.y; + + if (pTimings->doubleScan) factor /= 2; + if (pTimings->interlaced) factor *= 2; + + return axb_div_c(pTimings->pixelClock, factor, totalPixels); +} + +/*! + * Return a the mask of RmIds from the heads mask. + */ +static NvU32 HeadMaskToActiveRmIdMask(const NVDispEvoRec *pDispEvo, + const NvU32 headMask) +{ + NvU32 head; + NvU32 rmDisplayMask = 0; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if ((NVBIT(head) & headMask) != 0x0) { + rmDisplayMask |= + pDispEvo->headState[head].activeRmId; + } + } + + return rmDisplayMask; +} + +static NvBool FramelockSetControlSync(NVDispEvoPtr pDispEvo, const NvU32 headMask, + NvBool server) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS gsyncSetControlSyncParams = { 0 }; + NvU32 ret; + + /* There can only be one server. */ + + nvAssert(!server || (nvPopCount32(headMask) == 1)); + + gsyncSetControlSyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + gsyncSetControlSyncParams.master = server; + gsyncSetControlSyncParams.displays = + HeadMaskToActiveRmIdMask(pDispEvo, headMask); + + if (gsyncSetControlSyncParams.displays == 0x0) { + return FALSE; + } + + gsyncSetControlSyncParams.refresh = GetRefreshRate10kHz(pDispEvo, headMask); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC, + &gsyncSetControlSyncParams, + sizeof(gsyncSetControlSyncParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + return TRUE; +} + +NvBool nvFramelockSetControlUnsyncEvo(NVDispEvoPtr pDispEvo, const NvU32 headMask, + NvBool server) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS + gsyncSetControlUnsyncParams = { 0 }; + NvU32 ret; + + gsyncSetControlUnsyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + gsyncSetControlUnsyncParams.master = server; + gsyncSetControlUnsyncParams.displays = + HeadMaskToActiveRmIdMask(pDispEvo, headMask); + + if (gsyncSetControlUnsyncParams.displays == 0x0) { + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC, + &gsyncSetControlUnsyncParams, + sizeof(gsyncSetControlUnsyncParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + return TRUE; +} + +/* + * UpdateEvoLockState() + * + * Update the hardware based on the Assembly state, if it is different from the + * current Armed state. This should be called after transitioning through + * states in the EVO state machine to propagate all of the necessary values to + * HW. + */ +static void UpdateEvoLockState(void) +{ + NVDispEvoPtr pDispEvo; + NVFrameLockEvoPtr pFrameLockEvo; + unsigned int sd; + NVDevEvoPtr pDevEvo; + NvBool ret; + enum { + FIRST_ITERATION, + DISABLE_UNNEEDED_CLIENTS = FIRST_ITERATION, + DISABLE_UNNEEDED_SERVER, + COMPUTE_HOUSE_SYNC, + UPDATE_HOUSE_SYNC, + ENABLE_SERVER, + ENABLE_CLIENTS, + LAST_ITERATION = ENABLE_CLIENTS, + } iteration; + struct { + unsigned char disableServer:1; + unsigned char disableClient:1; + unsigned char enableServer:1; + unsigned char enableClient:1; + } cache[NV_MAX_DEVICES][NVKMS_MAX_HEADS_PER_DISP]; + + nvkms_memset(cache, 0, sizeof(cache)); + + /* XXX NVKMS TODO: idle base channel, first? */ + + /* + * Stereo lock mode is enabled if all heads are either raster locked or + * frame locked, and if all heads are not using interlaced mode. + */ + FOR_ALL_EVO_DEVS(pDevEvo) { + if (!pDevEvo->gpus) { + continue; + } + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvBool gpuCanStereoLock = TRUE; + NvBool testedOneHead = FALSE; + + /* + * If at least one head is not locked or driving an interlaced + * mode, then no heads on this GPU will use stereo lock mode. + */ + NvU32 head; + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NVEvoHeadControlPtr pHC = &pDevEvo->gpus[sd].headControlAssy[head]; + + if (!nvHeadIsActive(pDispEvo, head) || + ((pHC->serverLock == NV_EVO_NO_LOCK) && + (pHC->clientLock == NV_EVO_NO_LOCK))) { + /* + * If the heads aren't scan locked then we should skip + * them as if they aren't connected. NOTE this + * conservative approach means that we won't disable + * StereoLockMode when frameLock is turned off. This + * should be harmless. + */ + continue; + } + testedOneHead = TRUE; + if (!HeadCanStereoLock(pDevEvo, sd, head)) { + gpuCanStereoLock = FALSE; + } + } + /* + * Don't set StereoLockMode for screenless GPUs. As above we'll also + * count heads that can't stereoLock as unconnected. + */ + if (!testedOneHead) { + continue; + } + + /* + * Notify the framelock board whether or not we we will use stereo + * lock mode. If it failed, then don't enable stereo lock mode on + * the GPU. + */ + if (!SetStereoLockMode(pDispEvo, gpuCanStereoLock)) { + gpuCanStereoLock = FALSE; + } + + /* + * Cache whether or not we can use stereo lock mode, so we know + * whether or not to enable stereo lock mode on the GPU during + * SetHeadControl + */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + pEvoSubDev->headControlAssy[head].stereoLocked = + gpuCanStereoLock; + } + } + } + } + + /* + * Go through every GPU on the system, making its framelock state match the + * assembly state that we've saved. + * + * We do this in six steps, in order to keep the overall system state sane + * throughout: + * 1. Disable any clients we no longer need + * 2. Disable server we no longer need + * 3. Compute which framelock devices need house sync + * 4. Update framelock devices with new house sync info + * 5. Enable new server + * 6. Enable new clients + */ + for (iteration = FIRST_ITERATION; + iteration <= LAST_ITERATION; + iteration++) { + + if (iteration == COMPUTE_HOUSE_SYNC) { + /* First, clear assy state */ + FOR_ALL_EVO_FRAMELOCKS(pFrameLockEvo) { + pFrameLockEvo->houseSyncAssy = FALSE; + } + } + + if (iteration == UPDATE_HOUSE_SYNC) { + FOR_ALL_EVO_FRAMELOCKS(pFrameLockEvo) { + /* + * Since nvFrameLockSetUseHouseSyncEvo sets house sync + * output mode in addition to house sync input mode and + * input polarity, this needs to be done unconditionally, + * even if a house sync state transition hasn't occurred. + */ + if (!nvFrameLockSetUseHouseSyncEvo( + pFrameLockEvo, pFrameLockEvo->houseSyncAssy)) { + nvAssert(!"Setting house sync failed"); + } else { + pFrameLockEvo->houseSyncArmed = + pFrameLockEvo->houseSyncAssy; + } + } + + continue; + } + + FOR_ALL_EVO_DEVS(pDevEvo) { + + if (!pDevEvo->gpus) { + continue; + } + + if (pDevEvo->displayHandle == 0) { + /* + * This may happen during init, when setting initial modes on + * one device while other devices have not yet been allocated. + * Skip these devices for now; we'll come back later when + * they've been brought up. + */ + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvBool server = FALSE; + NvU32 needsEnableMask = 0, needsDisableMask = 0; + unsigned int head; + + switch (iteration) { + case COMPUTE_HOUSE_SYNC: + /* Accumulate house sync across pDisps */ + if (pEvoSubDev->frameLockHouseSync) { + pDispEvo->pFrameLockEvo->houseSyncAssy = TRUE; + } + break; + case DISABLE_UNNEEDED_CLIENTS: + needsDisableMask = pEvoSubDev->frameLockClientMaskArmed & + ~pEvoSubDev->frameLockClientMaskAssy; + server = FALSE; + break; + case DISABLE_UNNEEDED_SERVER: + needsDisableMask = pEvoSubDev->frameLockServerMaskArmed & + ~pEvoSubDev->frameLockServerMaskAssy; + server = TRUE; + break; + case ENABLE_SERVER: + needsEnableMask = pEvoSubDev->frameLockServerMaskAssy & + ~pEvoSubDev->frameLockServerMaskArmed; + server = TRUE; + break; + case ENABLE_CLIENTS: + needsEnableMask = pEvoSubDev->frameLockClientMaskAssy & + ~pEvoSubDev->frameLockClientMaskArmed; + server = FALSE; + break; + case UPDATE_HOUSE_SYNC: + nvAssert(!"Shouldn't reach here"); + break; + } + + if (needsDisableMask) { + ret = nvFramelockSetControlUnsyncEvo(pDispEvo, + needsDisableMask, + server); + nvAssert(ret); + + if (ret) { + if (server) { + pEvoSubDev->frameLockServerMaskArmed &= + ~needsDisableMask; + + FOR_ALL_HEADS(head, needsDisableMask) { + cache[GpuIndex(pDevEvo, sd)][head].disableServer = TRUE; + } + } else { + pEvoSubDev->frameLockClientMaskArmed &= + ~needsDisableMask; + + FOR_ALL_HEADS(head, needsDisableMask) { + cache[GpuIndex(pDevEvo, sd)][head].disableClient = TRUE; + } + } + } + } + if (needsEnableMask) { + ret = FramelockSetControlSync(pDispEvo, + needsEnableMask, + server); + + nvAssert(ret); + + if (ret) { + if (server) { + pEvoSubDev->frameLockServerMaskArmed |= + needsEnableMask; + + FOR_ALL_HEADS(head, needsEnableMask) { + cache[GpuIndex(pDevEvo, sd)][head].enableServer = TRUE; + } + } else { + pEvoSubDev->frameLockClientMaskArmed |= + needsEnableMask; + + FOR_ALL_HEADS(head, needsEnableMask) { + cache[GpuIndex(pDevEvo, sd)][head].enableClient = TRUE; + } + } + } + } + + /* After the above process, we should have "promoted" assy + * to armed */ + if (iteration == LAST_ITERATION) { + nvAssert(pEvoSubDev->frameLockServerMaskArmed == + pEvoSubDev->frameLockServerMaskAssy); + nvAssert(pEvoSubDev->frameLockClientMaskArmed == + pEvoSubDev->frameLockClientMaskAssy); + } + } + } + } + + /* + * Update the EVO HW state. Make this a separate set of loops to not + * confuse the one above + */ + FOR_ALL_EVO_DEVS(pDevEvo) { + + if (!pDevEvo->gpus) { + continue; + } + + if (pDevEvo->displayHandle == 0) { + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvBool needUpdate = FALSE; + NVEvoUpdateState updateState = { }; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 extRefClkMaskAssy, extRefClkUpdateMask; + NvU32 possibleHeadMask; + NvBool refClkChanged[NVKMS_MAX_HEADS_PER_DISP] = { FALSE }; + unsigned int head; + + extRefClkMaskAssy = pEvoSubDev->frameLockExtRefClkMaskAssy; + + /* Set the external reference clock, if different */ + extRefClkUpdateMask = extRefClkMaskAssy ^ + pEvoSubDev->frameLockExtRefClkMaskArmed; + + FOR_ALL_HEADS(head, extRefClkUpdateMask) { + NvBool extRefClkNeeded = + !!(extRefClkMaskAssy & (1 << head)); + + SetRefClk(pDevEvo, sd, head, extRefClkNeeded, &updateState); + refClkChanged[head] = TRUE; + + /* Update armed state for this head */ + pEvoSubDev->frameLockExtRefClkMaskArmed = + (pEvoSubDev->frameLockExtRefClkMaskArmed & + (~(1 << head))) | + (extRefClkMaskAssy & (1 << head)); + } + /* After the above process, the armed state should match + * assembly state */ + nvAssert(extRefClkMaskAssy == + pEvoSubDev->frameLockExtRefClkMaskArmed); + + /* Update the HEAD_SET_CONTROL EVO method state */ + + possibleHeadMask = nvGetActiveHeadMask(pDispEvo); + + FOR_ALL_HEADS(head, possibleHeadMask) { + if (nvkms_memcmp(&pEvoSubDev->headControl[head], + &pEvoSubDev->headControlAssy[head], + sizeof(NVEvoHeadControl))) { + + nvPushEvoSubDevMask(pDevEvo, 1 << sd); + + pEvoSubDev->headControl[head] = + pEvoSubDev->headControlAssy[head]; + pDevEvo->hal->SetHeadControl(pDevEvo, sd, head, + &updateState); + needUpdate = TRUE; + + nvPopEvoSubDevMask(pDevEvo); + } else if (refClkChanged[head]) { + needUpdate = TRUE; + } + } + + if (needUpdate) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } + } + } + + /* + * Inform GLS of framelock changes. It uses this information to do things + * like enable fake stereo to get stereo sync when stereo apps start + * without flickering the displays. + */ + for (iteration = FIRST_ITERATION; + iteration <= LAST_ITERATION; + iteration++) { + + FOR_ALL_EVO_DEVS(pDevEvo) { + + if (!pDevEvo->gpus) { + continue; + } + + if (pDevEvo->displayHandle == 0) { + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NvBool sendEvent = FALSE; + NvBool enable = FALSE, server = FALSE; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + switch (iteration) { + case DISABLE_UNNEEDED_CLIENTS: + if (cache[GpuIndex(pDevEvo, sd)][head].disableClient) { + enable = FALSE; + server = FALSE; + sendEvent = TRUE; + } + break; + case DISABLE_UNNEEDED_SERVER: + if (cache[GpuIndex(pDevEvo, sd)][head].disableServer) { + enable = FALSE; + server = TRUE; + sendEvent = TRUE; + } + break; + case ENABLE_SERVER: + if (cache[GpuIndex(pDevEvo, sd)][head].enableServer) { + enable = TRUE; + server = TRUE; + sendEvent = TRUE; + } + break; + case ENABLE_CLIENTS: + if (cache[GpuIndex(pDevEvo, sd)][head].enableClient) { + enable = TRUE; + server = FALSE; + sendEvent = TRUE; + } + break; + case UPDATE_HOUSE_SYNC: + case COMPUTE_HOUSE_SYNC: + sendEvent = FALSE; + break; + } + + if (sendEvent) { + nvUpdateGLSFramelock(pDispEvo, head, enable, server); + } + } + } + } + } +} + +/* + * For every head in the headMask on pDispEvo, construct a prioritized + * list of heads and call into the EVO locking state machine to + * perform the given transition. + * + * Return the list of heads that actually succeeded. + */ +static NvU32 applyActionForHeads(NVDispEvoPtr pDispEvo, + const NvU32 headMask, + NVEvoLockAction action) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NvU32 appliedHeadMask = 0; + NvU32 head; + + FOR_ALL_HEADS(head, headMask) { + NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1]; + unsigned int i = 0; + NvU32 tmpHead, usedHeadMask = 0; + + /* Fill in the array starting with this head, then with the others in + * the list, and finally any other active heads */ + pHeads[i++] = head; + usedHeadMask |= (1 << head); + + FOR_ALL_HEADS(tmpHead, headMask) { + if (usedHeadMask & (1 << tmpHead)) { + continue; + } + pHeads[i++] = tmpHead; + usedHeadMask |= (1 << tmpHead); + } + + for (tmpHead = 0; tmpHead < NVKMS_MAX_HEADS_PER_DISP; tmpHead++) { + if (!nvHeadIsActive(pDispEvo, tmpHead)) { + continue; + } + if (usedHeadMask & (1 << tmpHead)) { + continue; + } + pHeads[i++] = tmpHead; + usedHeadMask |= (1 << tmpHead); + } + + nvAssert(i <= NVKMS_MAX_HEADS_PER_DISP); + pHeads[i] = NV_INVALID_HEAD; + + if (pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, pHeads)) { + appliedHeadMask |= (1 << head); + } + } + + return appliedHeadMask; +} + +// +// Set up raster lock and frame lock for external frame lock +// + +NvBool nvEnableFrameLockEvo(NVDispEvoPtr pDispEvo) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvU32 serverHead = nvGetFramelockServerHead(pDispEvo); + NvU32 clientHeadsMask = nvGetFramelockClientHeadsMask(pDispEvo); + NvU32 appliedHeadMask; + NvU32 activeClientHeadsMask; + NvBool useHouseSync = FALSE; + NvU32 head; + + nvAssert(pDispEvo->framelock.currentServerHead == NV_INVALID_HEAD); + nvAssert(pDispEvo->framelock.currentClientHeadsMask == 0x0); + + if (serverHead != NV_INVALID_HEAD && + (pFrameLockEvo->houseSyncMode == + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_INPUT)) { + + NvS64 houseSync; + + /* + * Only use house sync if present. + * XXX what happens when house sync is unplugged? why not enable it + * now and let the FPGA decide? + */ + if (!nvFrameLockGetStatusEvo(pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS, + &houseSync)) { + return FALSE; + } + + useHouseSync = (houseSync != 0); + } + + /* Initialize the assembly state */ + SyncEvoLockState(); + + /* Enable the server */ + if ((serverHead != NV_INVALID_HEAD) && + nvHeadIsActive(pDispEvo, serverHead)) { + NvU32 serverHeadMask; + + serverHeadMask = (1 << serverHead); + appliedHeadMask = applyActionForHeads(pDispEvo, serverHeadMask, + NV_EVO_ADD_FRAME_LOCK_SERVER); + + nvAssert(appliedHeadMask == serverHeadMask); + pDispEvo->framelock.currentServerHead = serverHead; + + /* Enable house sync, if requested */ + if (useHouseSync) { + appliedHeadMask = + applyActionForHeads(pDispEvo, serverHeadMask, + NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC); + + if (appliedHeadMask == serverHeadMask) { + pDispEvo->framelock.currentHouseSync = TRUE; + } + } + } + + /* Enable the clients */ + activeClientHeadsMask = 0; + FOR_ALL_HEADS(head, clientHeadsMask) { + if (nvHeadIsActive(pDispEvo, head)) { + activeClientHeadsMask |= (1 << head); + } + } + appliedHeadMask = applyActionForHeads(pDispEvo, activeClientHeadsMask, + NV_EVO_ADD_FRAME_LOCK_CLIENT); + + nvAssert(appliedHeadMask == activeClientHeadsMask); + pDispEvo->framelock.currentClientHeadsMask = activeClientHeadsMask; + + /* Finally, update the hardware */ + UpdateEvoLockState(); + + return TRUE; +} + +// +// Disable raster lock and frame lock +// + +NvBool nvDisableFrameLockEvo(NVDispEvoPtr pDispEvo) +{ + NvU32 serverHead = nvGetFramelockServerHead(pDispEvo); + NvU32 clientHeadsMask = nvGetFramelockClientHeadsMask(pDispEvo); + NvU32 activeClientHeadsMask; + NvU32 appliedHeadMask; + NvU32 head; + + /* Initialize the assembly state */ + SyncEvoLockState(); + + /* Disable the clients */ + activeClientHeadsMask = 0; + FOR_ALL_HEADS(head, clientHeadsMask) { + if (nvHeadIsActive(pDispEvo, head)) { + activeClientHeadsMask |= (1 << head); + } + } + appliedHeadMask = applyActionForHeads(pDispEvo, + activeClientHeadsMask, + NV_EVO_REM_FRAME_LOCK_CLIENT); + + nvAssert(appliedHeadMask == activeClientHeadsMask); + pDispEvo->framelock.currentClientHeadsMask &= ~activeClientHeadsMask; + + /* Disable house sync */ + if (serverHead != NV_INVALID_HEAD && + nvHeadIsActive(pDispEvo, serverHead)) { + NvU32 serverHeadMask = (1 << serverHead); + + if (pDispEvo->framelock.currentHouseSync) { + appliedHeadMask = + applyActionForHeads(pDispEvo, serverHeadMask, + NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC); + + nvAssert(appliedHeadMask == serverHeadMask); + pDispEvo->framelock.currentHouseSync = FALSE; + } + + /* Disable the server */ + appliedHeadMask = applyActionForHeads(pDispEvo, serverHeadMask, + NV_EVO_REM_FRAME_LOCK_SERVER); + nvAssert(appliedHeadMask == serverHeadMask); + if (appliedHeadMask == serverHeadMask) { + pDispEvo->framelock.currentServerHead = NV_INVALID_HEAD; + } + } + + /* Finally, update the hardware */ + UpdateEvoLockState(); + + return TRUE; +} + +// +// Enable/Disable External Reference Clock Sync +// +// This function is used by frame lock to make the GPU sync to +// the external device's reference clock. +// +static void SetRefClk(NVDevEvoPtr pDevEvo, + NvU32 sd, NvU32 head, NvBool external, + NVEvoUpdateState *updateState) +{ + nvPushEvoSubDevMask(pDevEvo, 1 << sd); + + pDevEvo->hal->SetHeadRefClk(pDevEvo, head, external, updateState); + + nvPopEvoSubDevMask(pDevEvo); +} + + +// +// Query raster lock state +// + +NvBool nvQueryRasterLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev; + /* + * XXX[2Heads1OR] Loop over hardware heads to determine if this api-head + * is rasterlocked with any other api-head. + */ + const NvU32 head = pDpyEvo->apiHead; + NVEvoHeadControlPtr pHC; + + if ((head == NV_INVALID_HEAD) || (pDevEvo->gpus == NULL)) { + return FALSE; + } + + pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + pHC = &pEvoSubDev->headControl[head]; + + *val = pHC->serverLock == NV_EVO_RASTER_LOCK || + pHC->clientLock == NV_EVO_RASTER_LOCK; + + return TRUE; +} + +/* + * Return the surface format usage bounds that NVKMS will program for the + * requested format. + * + * For an RGB XBPP format, this function will return a bitmask of all RGB YBPP + * formats, where Y <= X. + * + * For a YUV format, this function will return a bitmask of all YUV formats + * that: + * - Have the same number of planes as the requested format + * - Have the same chroma decimation factors as the requested format + * - Have the same or lower effective fetch bpp as the requested format + * + * For example, if the requested format is YUV420 12-bit SP, this function will + * include all YUV420 8/10/12-bit SP formats. + */ +NvU64 nvEvoGetFormatsWithEqualOrLowerUsageBound( + const enum NvKmsSurfaceMemoryFormat format, + NvU64 supportedFormatsCapMask) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(format); + NvU64 supportedFormatsUsageBound = 0; + NvU8 formatIdx; + + FOR_EACH_INDEX_IN_MASK(64, formatIdx, supportedFormatsCapMask) { + + const NvKmsSurfaceMemoryFormatInfo *pOtherFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(formatIdx); + + if ((pFormatInfo->isYUV != pOtherFormatInfo->isYUV) || + (pFormatInfo->numPlanes != pOtherFormatInfo->numPlanes)) { + continue; + } + + if (pFormatInfo->isYUV) { + if ((pFormatInfo->yuv.horizChromaDecimationFactor != + pOtherFormatInfo->yuv.horizChromaDecimationFactor) || + (pFormatInfo->yuv.vertChromaDecimationFactor != + pOtherFormatInfo->yuv.vertChromaDecimationFactor) || + (pFormatInfo->yuv.depthPerComponent < + pOtherFormatInfo->yuv.depthPerComponent)) { + continue; + } + } else { + if (pFormatInfo->rgb.bitsPerPixel < + pOtherFormatInfo->rgb.bitsPerPixel) { + continue; + } + } + + supportedFormatsUsageBound |= NVBIT64(formatIdx); + + } FOR_EACH_INDEX_IN_MASK_END; + + return supportedFormatsUsageBound; +} + +// +// Enable or disable flip lock (or query state) +// + +NvBool nvUpdateFlipLockEvoOneHead(NVDispEvoPtr pDispEvo, const NvU32 head, + NvU32 *val, NvBool set, + NvBool *needsEarlyUpdate, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + + if (needsEarlyUpdate) { + *needsEarlyUpdate = FALSE; + } + + if (set) { + // make sure we're dealing with a bool + NvBool setVal = !!*val; + + if (setVal ^ pHC->flipLock) { + NvBool isMethodPending; + + if (!pDevEvo->hal-> + IsChannelMethodPending(pDevEvo, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + pDispEvo->displayOwner, + &isMethodPending) || + isMethodPending) { + nvAssert(!"Base channel not idle"); + return FALSE; + } + + if (setVal) { + // make sure flip lock is not prohibited and raster lock is enabled + if ((pHC->serverLock == NV_EVO_NO_LOCK && + pHC->clientLock == NV_EVO_NO_LOCK) || + HEAD_MASK_QUERY(pEvoSubDev->flipLockProhibitedHeadMask, + head)) { + return FALSE; + } + pHC->flipLock = TRUE; + } else { + /* Only actually disable fliplock if it's not needed for SLI */ + if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForSliHeadMask, + head)) { + pHC->flipLock = FALSE; + + /* + * When disabling fliplock during a modeset, the core + * channel needs to be updated before issuing further + * base flips. Notify the caller that fliplock has + * been disabled in the core channel's assembly state, + * and needs to be committed before issuing non-fliplocked + * base flips. + */ + if (needsEarlyUpdate) { + *needsEarlyUpdate = TRUE; + } + } + } + + EvoUpdateHeadParams(pDispEvo, head, updateState); + } + + /* Remember if we currently need fliplock enabled for framelock */ + pEvoSubDev->flipLockEnabledForFrameLockHeadMask = + setVal ? + HEAD_MASK_SET(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, head) : + HEAD_MASK_UNSET(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, head); + } + + /* + * XXX should the query return the cached "enabled for framelock" state + * instead? + */ + *val = pHC->flipLock; + + + return TRUE; +} + + +static NvBool UpdateFlipLock50(const NVDpyEvoRec *pDpyEvo, + NvU32 *val, NvBool set) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + /* + * XXX[2Heads1OR] Loop over hardware heads to determine is this api-head + * is rasterlocked with any other api-head and flip lock is not prohibited + * on its corresponding hardware heads. + */ + const NvU32 head = pDpyEvo->apiHead; + NVEvoUpdateState updateState = { }; + NvBool ret; + + if (head == NV_INVALID_HEAD) { + return FALSE; + } + + ret = nvUpdateFlipLockEvoOneHead(pDispEvo, head, val, set, + NULL /* needsEarlyUpdate */, + &updateState); + + if (set && ret) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } + + return ret; +} + +NvBool nvSetFlipLockEvo(NVDpyEvoPtr pDpyEvo, NvS64 value) +{ + NvU32 val32 = !!value; + return UpdateFlipLock50(pDpyEvo, &val32, TRUE /* set */); +} + +NvBool nvGetFlipLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + NvBool ret; + NvU32 val32 = 0; + ret = UpdateFlipLock50(pDpyEvo, &val32, FALSE /* set */); + + if (ret) { + *pValue = !!val32; + } + + return ret; +} + +static void ProhibitFlipLock50(NVDispEvoPtr pDispEvo) +{ + NvU32 head; + NvBool needUpdate = FALSE; + NVEvoUpdateState updateState = { }; + + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NVEvoHeadControlPtr pHC = NULL; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + if (HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, + head)) { + nvAssert(!"Can not prohibit flip lock " + "because it is already enabled for frame lock"); + continue; + } + + pHC = &pEvoSubDev->headControl[head]; + + if (pHC->flipLock) { + needUpdate = TRUE; + + pHC->flipLock = FALSE; + EvoUpdateHeadParams(pDispEvo, head, &updateState); + } + + pEvoSubDev->flipLockProhibitedHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockProhibitedHeadMask, head); + } + + if (needUpdate) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } +} + +static void AllowFlipLock50(NVDispEvoPtr pDispEvo) +{ + NvU32 head; + NvBool needUpdate = FALSE; + NVEvoUpdateState updateState = { }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NVEvoHeadControlPtr pHC = NULL; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + pHC = &pEvoSubDev->headControl[head]; + + if (!pHC->flipLock && + HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForSliHeadMask, + head)) { + needUpdate = TRUE; + + nvAssert(pHC->serverLock != NV_EVO_NO_LOCK || + pHC->clientLock != NV_EVO_NO_LOCK); + + pHC->flipLock = TRUE; + EvoUpdateHeadParams(pDispEvo, head, &updateState); + } + + pEvoSubDev->flipLockProhibitedHeadMask = + HEAD_MASK_UNSET(pEvoSubDev->flipLockProhibitedHeadMask, head); + } + + if (needUpdate) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } +} + +NvBool nvAllowFlipLockEvo(NVDispEvoPtr pDispEvo, NvS64 value) +{ + if (value == 0) { + ProhibitFlipLock50(pDispEvo); + } else { + AllowFlipLock50(pDispEvo); + } + return TRUE; +} + +/*! + * Enable or disable stereo. + * + * XXX SLI+Stereo For now, just set stereo on the display owner. + */ +NvBool nvSetStereoEvo( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + NvBool enable) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NVEvoHeadControlPtr pHC; + NVEvoLockPin pin; + + nvAssert(head != NV_INVALID_HEAD); + + pHC = &pEvoSubDev->headControl[head]; + pin = NV_EVO_LOCK_PIN_INTERNAL(head); + + // make sure we're dealing with a bool + NvBool stereo = !NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin); + + if (enable ^ stereo) { + NVEvoUpdateState updateState = { }; + + if (enable) { + NvU32 otherHead; + NvU32 signalPin; + + // If any other head is already driving stereo, fail + for (otherHead = 0; otherHead < NVKMS_MAX_HEADS_PER_DISP; + otherHead++) { + if (!nvHeadIsActive(pDispEvo, otherHead)) { + continue; + } + if (head == otherHead) { + continue; + } + + const NVEvoHeadControl *pOtherHC = + &pEvoSubDev->headControl[otherHead]; + + if (!NV_EVO_LOCK_PIN_IS_INTERNAL(pOtherHC->stereoPin)) { + return FALSE; + } + } + + signalPin = nvEvoGetPinForSignal(pDispEvo, + pEvoSubDev, + NV_EVO_LOCK_SIGNAL_STEREO); + if (signalPin != NV_EVO_LOCK_PIN_ERROR) { + pin = signalPin; + } + } + + pHC->stereoPin = pin; + + EvoUpdateHeadParams(pDispEvo, head, &updateState); + + // Make method take effect. + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } + + return TRUE; +} + +/*! + * Query stereo state. + * + * XXX SLI+Stereo For now, just get stereo on the display owner. + */ +NvBool nvGetStereoEvo(const NVDispEvoRec *pDispEvo, const NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NVEvoHeadControlPtr pHC; + + nvAssert(head != NV_INVALID_HEAD); + + pHC = &pEvoSubDev->headControl[head]; + + return !NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin); +} + +void nvSetViewPortsEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeViewPortEvo *pViewPort = &pHeadState->timings.viewPort; + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetViewportInOut(pDevEvo, head, + pViewPort, pViewPort, pViewPort, + updateState); + nvPopEvoSubDevMask(pDevEvo); + + /* + * Specify safe default values of 0 for viewPortPointIn x and y; these + * may be changed when panning out of band of a modeset. + */ + EvoSetViewportPointIn(pDispEvo, head, 0 /* x */, 0 /* y */, updateState); +} + + + +static void EvoSetViewportPointIn(NVDispEvoPtr pDispEvo, const NvU32 head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetViewportPointIn(pDevEvo, head, x, y, updateState); + nvPopEvoSubDevMask(pDevEvo); +} + +static inline NvU32 LUTNotifierForHead(const NvU32 head) +{ + nvAssert(head != NV_INVALID_HEAD); + return 1 + head; +} + +//****************************************************************************** +// +// Function: EvoUpdateCurrentPalette +// +// Description: Setting the palette +// +// Arguments: +// +// Return Value: None. +// +//****************************************************************************** +void nvEvoUpdateCurrentPalette(NVDispEvoPtr pDispEvo, + NvU32 head, NvBool kickOff) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const int dispIndex = pDispEvo->displayOwner; + NvU8 lutIndex = pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex; + NVEvoUpdateState updateState = { }; + + pDevEvo->hal->SetLUTContextDma( + pDispEvo, + head, + pDevEvo->lut.head[head].LUT[lutIndex], + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled, + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled, + &updateState, + pHeadState->bypassComposition); + + /* + * EVO2 does not set LUT context DMA if the core channel + * doesn't have a scanout surface set, in that case there is no update + * state to kickoff. + */ + if (kickOff && !nvIsUpdateStateEmpty(pDevEvo, &updateState)) { + // Clear the completion notifier and kick off an update. Wait for it + // here if NV_CTRL_SYNCHRONOUS_PALETTE_UPDATES is enabled. Otherwise, + // don't wait for the notifier -- it'll be checked the next time a LUT + // change request comes in. + EvoUpdateAndKickOffWithNotifier(pDispEvo, + TRUE, /* notify */ + FALSE, /* sync */ + LUTNotifierForHead(head), + &updateState, + TRUE /* releaseElv */); + pDevEvo->lut.head[head].disp[dispIndex].waitForPreviousUpdate = TRUE; + } +} + +static void UpdateMaxPixelClock(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NVDpyEvoPtr pDpyEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + nvDpyProbeMaxPixelClock(pDpyEvo); + } + } +} + +static NvBool AllocEvoSubDevs(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 sd; + + pDevEvo->gpus = nvCalloc(pDevEvo->numSubDevices, sizeof(NVEvoSubDevRec)); + + if (pDevEvo->gpus == NULL) { + return FALSE; + } + + /* Assign the pDispEvo for each evoSubDevice */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + pDevEvo->gpus[sd].pDispEvo = pDispEvo; + } + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + nvAssert(pDevEvo->gpus[sd].pDispEvo != NULL); + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 head; + + pDevEvo->gpus[sd].subDeviceInstance = sd; + // Initialize the lock state. + nvEvoStateStartNoLock(pEvoSubDev); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + NvU32 i; + + for (i = 0; i < ARRAY_LEN(pSdHeadState->layer); i++) { + pSdHeadState->layer[i].cscMatrix = NVKMS_IDENTITY_CSC_MATRIX; + } + } + } + + return TRUE; +} + +static NvBool ValidateConnectorTypes(const NVDevEvoRec *pDevEvo) +{ + const NVDispEvoRec *pDispEvo; + const NVConnectorEvoRec *pConnectorEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + const NVEvoSubDevRec *pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + const NVEvoCapabilities *pEvoCaps = &pEvoSubDev->capabilities; + const NVEvoMiscCaps *pMiscCaps = &pEvoCaps->misc; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!pMiscCaps->supportsDSI && + pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "DSI connectors are unsupported!"); + return FALSE; + } + } + } + return TRUE; +} + +/*! + * Allocate the EVO core channel. + * + * This function trivially succeeds if the core channel is already allocated. + */ +NvBool nvAllocCoreChannelEvo(NVDevEvoPtr pDevEvo) +{ + NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS capsParams = { }; + NvU32 ret; + NvBool bRet; + NVDispEvoRec *pDispEvo; + NvU32 dispIndex; + NvU32 head; + + /* Do nothing if the display was already allocated */ + if (pDevEvo->displayHandle != 0) { + return TRUE; + } + + if (!AllocEvoSubDevs(pDevEvo)) { + goto failed; + } + + // Disallow GC6 in anticipation of touching GPU/displays. + if (!nvRmSetGc6Allowed(pDevEvo, FALSE)) { + goto failed; + } + + /* Query console FB info, and save the result into pDevEvo->vtFbInfo. + * This is done at device allocation time. + * nvRmImportFbConsoleMemory will import the surface for console restore by + * nvEvoRestoreConsole if the surface format is compatible. + * Else, console restore will cause core channel realloc, telling RM to + * restore the console via nvRmVTSwitch. + */ + if (!nvRmGetVTFBInfo(pDevEvo)) { + goto failed; + } + + if (!nvRmVTSwitch(pDevEvo, + NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE)) { + goto failed; + } + + /* Evo object (parent of all other NV50 display stuff) */ + nvAssert(nvRmEvoClassListCheck(pDevEvo, pDevEvo->dispClass)); + pDevEvo->displayHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->displayHandle, + pDevEvo->dispClass, + NULL); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to initialize display engine: 0x%x (%s)", + ret, nvstatusToString(ret)); + goto failed; + } + + /* Get the display caps bits */ + + ct_assert(sizeof(pDevEvo->capsBits) == sizeof(capsParams.capsTbl)); + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2, + &capsParams, sizeof(capsParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to determine display capabilities"); + goto failed; + } + nvkms_memcpy(pDevEvo->capsBits, capsParams.capsTbl, + sizeof(pDevEvo->capsBits)); + + // Evo core channel. Allocated once, shared per GPU + if (!nvRMSetupEvoCoreChannel(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate display engine core DMA push buffer"); + goto failed; + } + + pDevEvo->coreInitMethodsPending = TRUE; + + bRet = pDevEvo->hal->GetCapabilities(pDevEvo); + + if (!bRet) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to query display engine capability bits."); + goto failed; + } + + /* + * XXX NVKMS TODO: if the EVO core channel is allocated (and + * capability notifier queried) before any nvDpyConnectEvo(), then + * we won't need to update the pixelClock here. + */ + UpdateMaxPixelClock(pDevEvo); + + if (pDevEvo->numWindows > 0) { + int win; + + if (!nvRMAllocateWindowChannels(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate display engine window channels"); + goto failed; + } + + for (win = 0; win < pDevEvo->numWindows; win++) { + const NvU32 head = pDevEvo->headForWindow[win]; + + if (head == NV_INVALID_HEAD) { + continue; + } + + pDevEvo->head[head].layer[pDevEvo->head[head].numLayers] = + pDevEvo->window[win]; + pDevEvo->head[head].numLayers++; + } + } else { + // Allocate the base channels + if (!nvRMAllocateBaseChannels(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate display engine base channels"); + goto failed; + } + + // Allocate the overlay channels + if (!nvRMAllocateOverlayChannels(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate display engine overlay channels"); + goto failed; + } + + /* Map base and overlay channels onto main and overlay layers. */ + for (head = 0; head < pDevEvo->numHeads; head++) { + nvAssert(pDevEvo->base[head] != NULL && pDevEvo->overlay[head] != NULL); + + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER] = pDevEvo->base[head]; + pDevEvo->head[head].layer[NVKMS_OVERLAY_LAYER] = pDevEvo->overlay[head]; + pDevEvo->head[head].numLayers = 2; + } + } + + // Allocate and map the cursor controls for all heads + bRet = nvAllocCursorEvo(pDevEvo); + if (!bRet) { + goto failed; + } + + if (!nvAllocLutSurfacesEvo(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate memory for the display color lookup table."); + goto failed; + } + + // Resume the DisplayPort library's control of the device. + if (!nvRmResumeDP(pDevEvo)) { + nvEvoLogDev( + pDevEvo, + EVO_LOG_ERROR, + "Failed to initialize DisplayPort sub-system."); + goto failed; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + nvRmRegisterBacklight(pDispEvo); + } + + // Allow GC6 if no heads are active. + if (nvAllHeadsInactive(pDevEvo)) { + if (!nvRmSetGc6Allowed(pDevEvo, TRUE)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "No head is active, but failed to allow GC6"); + } + } + + return TRUE; + +failed: + nvFreeCoreChannelEvo(pDevEvo); + + return FALSE; +} + +/*! + * Clear the pConnectorEvo->or.mask tracking. + */ +static void ClearSORAssignmentsOneDisp(NVDispEvoPtr pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo; + + nvAssert(NV0073_CTRL_SYSTEM_GET_CAP(pDispEvo->pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + continue; + } + + pConnectorEvo->or.mask = 0x0; + } +} + +/*! + * Update pConnectorEvo->or.mask from the list given to us by RM. + */ +static void RefreshSORAssignments(NVDispEvoPtr pDispEvo, + const NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *pParams) +{ + NVConnectorEvoPtr pConnectorEvo; + + ClearSORAssignmentsOneDisp(pDispEvo); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + const NvU32 displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + NvU32 sorIndex; + + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + continue; + } + + for (sorIndex = 0; + sorIndex < ARRAY_LEN(pParams->sorAssignList) && + sorIndex < ARRAY_LEN(pConnectorEvo->or.ownerHeadMask); + sorIndex++) { + if ((pParams->sorAssignList[sorIndex] & displayId) == displayId) { + pConnectorEvo->or.mask |= NVBIT(sorIndex); + } + } + } +} + +/* + * Ask RM to assign an SOR for the given connector. + * + * Note that this assignment may be temporary. This function will always call + * RM, and unless the connector is currently in use (i.e., being driven by a + * head), a previously-assigned SOR may be reused. + * + * The RM will either: + * a) return an SOR that's already assigned/attached to this connector, or + * b) pick a new "unused" SOR, assign and attach it to this connector, and + * return that -- where "unused" means both not being actively driven by a + * head and not in the "exclude mask" argument. + * The "exclude mask" is useful if we need to assign multiple SORs up front + * before activating heads to drive them. + * + * For example, if head 0 is currently actively scanning out to SOR 0 and we + * are doing a modeset to activate currently-inactive heads 1 and 2: + * 1. nvkms calls RM for nvAssignSOREvo(pConnectorForHead1, 0); + * RM returns any SOR other than 0 (say 3) + * 2. nvkms calls RM for nvAssignSOREvo(pConnectorForHead2, (1 << 3)); + * RM returns any SOR other than 0 and 3 (say 1) + * 3. At this point nvkms can push methods and UPDATE to enable heads 1 and 2 + * to drive SORs 3 and 1. + * In the example above, the sorExcludeMask == (1 << 3) at step 2 is important + * to ensure that RM doesn't reuse the SOR 3 from step 1. It won't reuse SOR 0 + * because it's in use by head 0. + * + * If an SOR is only needed temporarily (e.g., to do link training to "assess" + * a DisplayPort or HDMI FRL link), then sorExcludeMask should be 0 -- any SOR + * that's not actively used by a head can be used, and as soon as nvkms + * finishes the "assessment", the SOR is again eligible for reuse. + * + * Because of the potential for SOR reuse, nvAssignSOREvo() will always call + * RefreshSORAssignments() to update pConnectorEvo->or.mask on *every* + * connector after calling NV0073_CTRL_CMD_DFP_ASSIGN_SOR for *any* connector. + */ +NvBool nvAssignSOREvo(NVConnectorEvoPtr pConnectorEvo, NvU32 sorExcludeMask) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NvU32 displayId = 0x0; + + NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS params = { 0 }; + NvU32 ret; + + /* + * Skip assigning an SOR for non-SOR connectors or if an SOR is already + * assigned. + */ + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + return TRUE; + } + + if (!NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) { + return TRUE; + } + + /* Mode-set is not possible without SOR */ + nvAssert(!nvIsConnectorActiveEvo(pConnectorEvo)); + + displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = displayId; + params.sorExcludeMask = sorExcludeMask; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_ASSIGN_SOR, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + RefreshSORAssignments(pDispEvo, ¶ms); + nvAssert(pConnectorEvo->or.mask != 0); + + return TRUE; +} + +void nvRestoreSORAssigmentsEvo(NVDevEvoRec *pDevEvo) +{ + NVDispEvoRec *pDispEvo; + NvU32 dispIndex; + + if (!NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) { + return; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + const NVConnectorEvoRec * + sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS] = { }; + const NVConnectorEvoRec *pConnectorEvo; + NvU32 sorIndex; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NvU32 i; + + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + continue; + } + + FOR_EACH_INDEX_IN_MASK(32, i, pConnectorEvo->or.mask) { + /* + * RM populates same sor index into more than one connectors if + * they are are DCC partners, this checks make sure SOR + * assignment happens only for a single connector. The sor + * assignment call before modeset/dp-link-training makes sure + * assignment happens for the correct connector. + */ + if (sorAssignList[i] != NULL) { + continue; + } + sorAssignList[i] = pConnectorEvo; + } FOR_EACH_INDEX_IN_MASK_END + } + + for (sorIndex = 0; sorIndex < ARRAY_LEN(sorAssignList); sorIndex++) { + if (sorAssignList[sorIndex] == NULL) { + continue; + } + + NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS params = { + .subDeviceInstance = pDispEvo->displayOwner, + .displayId = nvDpyIdToNvU32(sorAssignList[sorIndex]->displayId), + .sorExcludeMask = ~NVBIT(sorIndex), + }; + NvU32 ret; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_ASSIGN_SOR, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, + EVO_LOG_ERROR, + "Failed to restore SOR-%u -> %s assigment.", + sorIndex, sorAssignList[sorIndex]->name); + } else { + RefreshSORAssignments(pDispEvo, ¶ms); + } + } + } +} + +/*! + * Free the EVO core channel. + * + * This function does nothing if the core channel was already free. + */ +void nvFreeCoreChannelEvo(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvU32 head; + + nvEvoCancelPostFlipIMPTimer(pDevEvo); + nvCancelVrrFrameReleaseTimers(pDevEvo); + + nvCancelLowerDispBandwidthTimer(pDevEvo); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + nvRmUnregisterBacklight(pDispEvo); + + nvAssert(pDevEvo->skipConsoleRestore || + nvDpyIdListIsEmpty(nvActiveDpysOnDispEvo(pDispEvo))); + } + + // Pause the DisplayPort library's control of the device. + nvRmPauseDP(pDevEvo); + + nvFreeLutSurfacesEvo(pDevEvo); + + // Unmap and free the cursor controls for all heads + nvFreeCursorEvo(pDevEvo); + + // TODO: Unregister all surfaces registered with this device. + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + nvRmEvoFreePreSyncpt(pDevEvo, pDevEvo->head[head].layer[layer]); + pDevEvo->head[head].layer[layer] = NULL; + } + pDevEvo->head[head].numLayers = 0; + } + + nvRMFreeWindowChannels(pDevEvo); + nvRMFreeOverlayChannels(pDevEvo); + nvRMFreeBaseChannels(pDevEvo); + + nvRMFreeEvoCoreChannel(pDevEvo); + + if (pDevEvo->displayHandle != 0) { + if (nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->displayHandle) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to tear down Disp"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDevEvo->displayHandle); + pDevEvo->displayHandle = 0; + + if (!pDevEvo->skipConsoleRestore) { + nvRmVTSwitch(pDevEvo, + NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_RESTORE_VT_STATE); + } else { + nvRmVTSwitch(pDevEvo, + NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_CONSOLE_RESTORED); + } + } + + // No longer possible that NVKMS is driving any displays, allow GC6. + nvRmSetGc6Allowed(pDevEvo, TRUE); + + nvFree(pDevEvo->gpus); + pDevEvo->gpus = NULL; +} + + +#define ASSIGN_PIN(_pPin, _pin) \ + do { \ + ct_assert(NV_IS_UNSIGNED((_pin))); \ + if ((_pPin)) { \ + if ((_pin) >= NV_EVO_NUM_LOCK_PIN_CAPS) { \ + return FALSE; \ + } \ + *(_pPin) = (_pin); \ + } \ + } while (0) + +static NvBool QueryFrameLockHeaderPins(const NVDispEvoRec *pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NvU32 *pFrameLockPin, + NvU32 *pRasterLockPin, + NvU32 *pFlipLockPin) +{ + NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS params = { }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + params.base.subdeviceIndex = pEvoSubDev->subDeviceInstance; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to query framelock header pins"); + return FALSE; + } + + ASSIGN_PIN(pFrameLockPin, params.frameLockPin); + ASSIGN_PIN(pRasterLockPin, params.rasterLockPin); + ASSIGN_PIN(pFlipLockPin, params.flipLockPin); + + return TRUE; +} + +// Gets the lock pin dedicated for a given signal and returns the corresponding method +NVEvoLockPin nvEvoGetPinForSignal(const NVDispEvoRec *pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockSignal signal) +{ + NVEvoLockPinCaps *caps = pEvoSubDev->capabilities.pin; + NvU32 pin; + + switch (signal) { + + case NV_EVO_LOCK_SIGNAL_RASTER_LOCK: + if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev, + NULL, &pin, NULL)) { + break; + } + + if (!caps[pin].scanLock) break; + + return NV_EVO_LOCK_PIN_0 + pin; + + case NV_EVO_LOCK_SIGNAL_FRAME_LOCK: + if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev, + &pin, NULL, NULL)) { + break; + } + + if (!caps[pin].scanLock) break; + + return NV_EVO_LOCK_PIN_0 + pin; + + case NV_EVO_LOCK_SIGNAL_FLIP_LOCK: + if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev, + NULL, NULL, &pin) || + !caps[pin].flipLock) { + // If the query from RM fails (or returns a bogus pin), fall + // back to an alternate mechanism. This may happen on boards + // with no framelock header. Look in the capabilities for the + // pin that has the requested capability. + for (pin = 0; pin < NV_EVO_NUM_LOCK_PIN_CAPS; pin++) { + if (caps[pin].flipLock) + break; + } + + if (pin == NV_EVO_NUM_LOCK_PIN_CAPS) { + // Not found + break; + } + } + + if (!caps[pin].flipLock) { + break; + } + + return NV_EVO_LOCK_PIN_0 + pin; + + case NV_EVO_LOCK_SIGNAL_STEREO: + // Look in the capabilities for the pin that has the requested capability + for (pin = 0; pin < NV_EVO_NUM_LOCK_PIN_CAPS; pin++) { + if (caps[pin].stereo) + break; + } + + if (pin == NV_EVO_NUM_LOCK_PIN_CAPS) break; + + return NV_EVO_LOCK_PIN_0 + pin; + + default: + nvAssert(!"Unknown signal type"); + break; + } + + // Pin not found + return NV_EVO_LOCK_PIN_ERROR; +} + +void nvSetDVCEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, + NvS32 dvc, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + nvAssert(dvc >= NV_EVO_DVC_MIN); + nvAssert(dvc <= NV_EVO_DVC_MAX); + + // HW range is from -2048 to + 2047 + // Negative values, are not used they distort the colors + // Values from 1023 to 0 are greying the colors out. + // We use 0 to 2047 with 1024 as default. + dvc += 1024; + nvAssert(dvc >= 0); + pHeadState->procAmp.satCos = dvc; + + // In SW YUV420 mode, HW is programmed with default DVC. The DVC is handled + // in a headSurface composite shader. + if (pHeadState->timings.yuv420Mode == NV_YUV420_MODE_SW) { + pHeadState->procAmp.satCos = 1024; + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetProcAmp(pDispEvo, head, updateState); + nvPopEvoSubDevMask(pDevEvo); +} + +void nvSetImageSharpeningEvo(NVDispEvoRec *pDispEvo, const NvU32 head, + NvU32 value, NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + /* + * Evo values are from -128 to 127, with a default of 0. + * Negative values sharpen. + * Control panel values from 0 (less sharp) to 255 + */ + value = 127 - value; + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetOutputScaler(pDispEvo, head, value, updateState); + nvPopEvoSubDevMask(pDevEvo); +} + +NvBool nvLayerSetPositionEvo( + NVDevEvoPtr pDevEvo, + const struct NvKmsSetLayerPositionRequest *pRequest) +{ + NVDispEvoPtr pDispEvo; + NvU32 sd; + + /* + * We need this call to not modify any state if it will fail, so we + * first verify that all relevant layers support output positioning, + * then go back through the layers to actually modify the relevant + * state. + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + + if ((pRequest->requestedDispsBitMask & NVBIT(sd)) == 0) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NvU32 layer; + + if ((pRequest->disp[sd].requestedHeadsBitMask & + NVBIT(head)) == 0) { + continue; + } + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + const NvS16 x = pRequest->disp[sd].head[head].layerPosition[layer].x; + const NvS16 y = pRequest->disp[sd].head[head].layerPosition[layer].y; + + if ((pRequest->disp[sd].head[head].requestedLayerBitMask & + NVBIT(layer)) == 0x0) { + continue; + } + + /* + * Error out if a requested layer does not support position + * updates and the requested position is not (0, 0). + */ + if (!pDevEvo->caps.layerCaps[layer].supportsWindowMode && + (x != 0 || y != 0)) { + nvEvoLogDebug(EVO_LOG_ERROR, "Layer %d does not support " + "position updates.", layer); + return FALSE; + } + } + } + } + + /* Checks in above block passed, so make the requested changes. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + + if ((pRequest->requestedDispsBitMask & NVBIT(sd)) == 0) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NVEvoUpdateState updateState = { }; + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + NvU32 layer; + + if ((pRequest->disp[sd].requestedHeadsBitMask & + NVBIT(head)) == 0) { + continue; + } + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + const NvS16 x = pRequest->disp[sd].head[head].layerPosition[layer].x; + const NvS16 y = pRequest->disp[sd].head[head].layerPosition[layer].y; + + if ((pRequest->disp[sd].head[head].requestedLayerBitMask & + NVBIT(layer)) == 0x0) { + continue; + } + + if ((pSdHeadState->layer[layer].outputPosition.x != x) || + (pSdHeadState->layer[layer].outputPosition.y != y)) { + + NVEvoChannelPtr pChannel = pDevEvo->head[head].layer[layer]; + + pSdHeadState->layer[layer].outputPosition.x = x; + pSdHeadState->layer[layer].outputPosition.y = y; + + pDevEvo->hal->SetImmPointOut(pDevEvo, pChannel, sd, + &updateState, x, y); + } + } + + pDevEvo->hal->Update(pDevEvo, &updateState, TRUE /* releaseElv */); + } + } + + return TRUE; +} + +/* + * nvConstructHwModeTimingsImpCheckEvo() - perform an IMP check on the + * given raster timings and viewport during the + * nvConstructHwModeTimingsEvo path. If IMP fails, we try multiple + * times, each time scaling back the usage bounds until we find a + * configuration IMP will accept, or until we can't scale back any + * further. If this fails, mark the viewport as invalid. + */ + +NvBool nvConstructHwModeTimingsImpCheckEvo( + const NVConnectorEvoRec *pConnectorEvo, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString, + const int head) +{ + NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP]; + NvBool requireBootClocks = !!(pParams->overrides & + NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS); + NvU32 ret; + + /* bypass this checking if the user disabled IMP */ + + if ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_EXTENDED_GPU_CAPABILITIES_CHECK) != 0) { + return TRUE; + } + + nvkms_memset(&timingsParams, 0, sizeof(timingsParams)); + + timingsParams[head].pConnectorEvo = pConnectorEvo; + timingsParams[head].activeRmId = + nvRmAllocDisplayId( + pConnectorEvo->pDispEvo, + nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId)); + if (timingsParams[head].activeRmId == 0x0) { + return FALSE; + } + timingsParams[head].pTimings = pTimings; + timingsParams[head].pUsage = &pTimings->viewPort.guaranteedUsage; + + ret = nvValidateImpOneDispDowngrade(pConnectorEvo->pDispEvo, timingsParams, + requireBootClocks, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE, + /* downgradePossibleHeadsBitMask */ + (NVBIT(NVKMS_MAX_HEADS_PER_DISP) - 1UL)); + if (!ret) { + nvEvoLogInfoString(pInfoString, + "ViewPort %dx%d exceeds hardware capabilities.", + pTimings->viewPort.out.width, + pTimings->viewPort.out.height); + } + + nvRmFreeDisplayId(pConnectorEvo->pDispEvo, timingsParams[head].activeRmId); + + return ret; +} + +/* + * Convert from NvModeTimings values to NVHwModeTimingsEvo. + */ + +static void +ConstructHwModeTimingsFromNvModeTimings(const NvModeTimings *pModeTimings, + NVHwModeTimingsEvoPtr pTimings) +{ + NvU32 hBlankStart; + NvU32 vBlankStart; + NvU32 hBlankEnd; + NvU32 vBlankEnd; + NvU32 hSyncWidth; + NvU32 vSyncWidth; + NvU32 vTotalAdjustment = 0; + + NvModeTimings modeTimings; + + modeTimings = *pModeTimings; + + if (modeTimings.doubleScan) { + modeTimings.vVisible *= 2; + modeTimings.vSyncStart *= 2; + modeTimings.vSyncEnd *= 2; + modeTimings.vTotal *= 2; + } + + /* + * The real pixel clock and width values for modes using YUV 420 emulation + * are half of the incoming values parsed from the EDID. This conversion is + * performed here, so NvModeTimings will have the user-visible (full width) + * values, and NVHwModeTimingsEvo will have the real (half width) values. + * + * HW YUV 420 requires setting the full width mode timings, which are then + * converted in HW. RM will recognize YUV420 mode is in use and halve + * these values for IMP. + * + * In either case, only modes with even width are allowed in YUV 420 mode. + */ + if (modeTimings.yuv420Mode != NV_YUV420_MODE_NONE) { + nvAssert(((modeTimings.pixelClockHz & 1) == 0) && + ((modeTimings.hVisible & 1) == 0) && + ((modeTimings.hSyncStart & 1) == 0) && + ((modeTimings.hSyncEnd & 1) == 0) && + ((modeTimings.hTotal & 1) == 0) && + ((modeTimings.vVisible & 1) == 0)); + if (modeTimings.yuv420Mode == NV_YUV420_MODE_SW) { + modeTimings.pixelClockHz /= 2; + modeTimings.hVisible /= 2; + modeTimings.hSyncStart /= 2; + modeTimings.hSyncEnd /= 2; + modeTimings.hTotal /= 2; + } + } + + pTimings->hSyncPol = modeTimings.hSyncNeg; + pTimings->vSyncPol = modeTimings.vSyncNeg; + pTimings->interlaced = modeTimings.interlaced; + pTimings->doubleScan = modeTimings.doubleScan; + + /* pTimings->pixelClock are in KHz but modeTimings.pixelClock are in Hz */ + + pTimings->pixelClock = HzToKHz(modeTimings.pixelClockHz); + + /* + * assign total width, height; note that when the rastertimings + * are interlaced, we need to make sure SetRasterSize.Height is + * odd, per EVO's mfs file + */ + + if (pTimings->interlaced) vTotalAdjustment = 1; + + pTimings->rasterSize.x = modeTimings.hTotal; + pTimings->rasterSize.y = modeTimings.vTotal | vTotalAdjustment; + + /* + * A bit of EVO quirkiness: The hw increases the blank/sync values + * by one. So we need to offset by subtracting one. + * + * In other words, the h/w inserts one extra sync line/pixel thus + * incrementing the raster params by one. The number of blank + * lines/pixels we get is true to what we ask for. Note the hw + * does not increase the TotalImageSize by one so we don't need to + * adjust SetRasterSize. + * + * This is slightly unintuitive. Per Evo's specs, the blankEnd + * comes before blankStart as defined below: BlankStart: The last + * pixel/line at the end of the h/v active area. BlankEnd: The + * last pixel/line at the end of the h/v blanking. + * + * Also: note that in the below computations, we divide by two for + * interlaced modes *before* subtracting; see bug 263622. + */ + + hBlankStart = modeTimings.hVisible + + (modeTimings.hTotal - modeTimings.hSyncStart); + + vBlankStart = modeTimings.vVisible + + (modeTimings.vTotal - modeTimings.vSyncStart); + + hBlankEnd = (modeTimings.hTotal - modeTimings.hSyncStart); + vBlankEnd = (modeTimings.vTotal - modeTimings.vSyncStart); + + hSyncWidth = (modeTimings.hSyncEnd - modeTimings.hSyncStart); + vSyncWidth = (modeTimings.vSyncEnd - modeTimings.vSyncStart); + + if (pTimings->interlaced) { + vBlankStart /= 2; + vBlankEnd /= 2; + vSyncWidth /= 2; + } + + pTimings->rasterSyncEnd.x = hSyncWidth - 1; + pTimings->rasterSyncEnd.y = vSyncWidth - 1; + pTimings->rasterBlankStart.x = hBlankStart - 1; + pTimings->rasterBlankStart.y = vBlankStart - 1; + pTimings->rasterBlankEnd.x = hBlankEnd - 1; + pTimings->rasterBlankEnd.y = vBlankEnd - 1; + + /* assign rasterVertBlank2 */ + + if (pTimings->interlaced) { + const NvU32 firstFieldHeight = modeTimings.vTotal / 2; + + pTimings->rasterVertBlank2Start = firstFieldHeight + vBlankStart - 1; + pTimings->rasterVertBlank2End = firstFieldHeight + vBlankEnd - 1; + } else { + pTimings->rasterVertBlank2Start = 0; + pTimings->rasterVertBlank2End = 0; + } + + pTimings->hdmi3D = modeTimings.hdmi3D; + pTimings->yuv420Mode = modeTimings.yuv420Mode; +} + + + +/* + * Adjust the HwModeTimings as necessary to meet dual link dvi + * requirements; returns TRUE if the timings were successfully + * modified; returns FALSE if the timings cannot be made valid for + * dual link dvi. + */ +static NvBool ApplyDualLinkRequirements(const NVDpyEvoRec *pDpyEvo, + const struct + NvKmsModeValidationParams *pParams, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString) +{ + int adjust; + + nvAssert(pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP); + + if (pTimings->protocol != NVKMS_PROTOCOL_SOR_DUAL_TMDS) { + return TRUE; + } + + if ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_DUAL_LINK_DVI_CHECK) != 0) { + return TRUE; + } + + /* extract the fields we will need below */ + + /* + * hTotal must be even for dual link dvi; we won't try to patch + * the htotal size; just give up if it isn't even + */ + + if ((pTimings->rasterSize.x % 2) != 0) { + nvEvoLogInfoString(pInfoString, + "Horizontal Total (%d) must be even for dual link DVI mode timings.", + pTimings->rasterSize.x); + return FALSE; + } + + /* + * RASTER_BLANK_END_X must be odd, so that the active region + * starts on the following (even) pixel; if it is odd, we are + * already done + */ + + if ((pTimings->rasterBlankEnd.x % 2) == 1) return TRUE; + + /* + * RASTER_BLANK_END_X is even, so we need to adjust both + * RASTER_BLANK_END_X and RASTER_BLANK_START_X by one; we'll first + * try to subtract one pixel from both + */ + + adjust = -1; + + /* + * if RASTER_BLANK_END_X cannot be made smaller (would collide + * with hSyncEnd), see if it would be safe to instead add one to + * RASTER_BLANK_END_X and RASTER_BLANK_START_X + */ + + if (pTimings->rasterBlankEnd.x <= pTimings->rasterSyncEnd.x + 1) { + if (pTimings->rasterBlankStart.x + 1 >= pTimings->rasterSize.x) { + nvEvoLogInfoString(pInfoString, + "Cannot adjust mode timings for dual link DVI requirements."); + return FALSE; + } + adjust = 1; + } + + pTimings->rasterBlankEnd.x += adjust; + pTimings->rasterBlankStart.x += adjust; + + nvEvoLogInfoString(pInfoString, + "Adjusted mode timings for dual link DVI requirements."); + + return TRUE; +} + +/* Query the HDMI 2.1 FRL configuration, if applicable. */ +static NvBool QueryHdmiFrlConfig(const NVDpyEvoRec *pDpyEvo, + const struct + NvKmsModeValidationParams *pParams, + const NvModeTimings *pModeTimings, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString) +{ + /* TODO: apply any overrides from ModeValidationParams. */ + if (!nvHdmiFrlQueryConfig(pDpyEvo, + pModeTimings, + pTimings, + pParams)) { + nvEvoLogInfoString(pInfoString, + "Unable to determine HDMI 2.1 Fixed Rate Link configuration."); + return FALSE; + } + + return TRUE; +} + +void nvInitScalingUsageBounds(const NVDevEvoRec *pDevEvo, + struct NvKmsScalingUsageBounds *pScaling) +{ + pScaling->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_1X; + pScaling->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_1X; + pScaling->vTaps = pDevEvo->hal->caps.minScalerTaps; + pScaling->vUpscalingAllowed = FALSE; +} + +/* + * Check if the provided number of vertical taps is possible based on the + * capabilities: the lineStore (the smaller of inWidth and outWidth) must + * not exceed the maximum pixels for the desired taps; see bug 241014 + */ +static NvBool IsVTapsPossible(const NVEvoScalerCaps *pScalerCaps, + NvU32 inWidth, NvU32 outWidth, + NVEvoScalerTaps nTaps) +{ + const NvU32 lineStore = NV_MIN(inWidth, outWidth); + NvU32 maxPixels = pScalerCaps->taps[nTaps].maxPixelsVTaps; + + return lineStore <= maxPixels; +} + +/*! + * Compute the scale factor and check against the maximum. + * + * param[in] max Max scale factor to check against (* 1024) + * param[in] in Input width or height + * param[in] out Output width or height + * param[out] pFactor Output scale factor (* 1024) + */ +static NvBool ComputeScalingFactor(NvU32 max, + NvU16 in, NvU16 out, + NvU16 *pFactor) +{ + /* Use a 32-bit temporary to prevent overflow */ + NvU32 tmp; + + /* Add (out - 1) to round up */ + tmp = ((in * 1024) + (out - 1)) / out; + + /* Check against scaling limits. */ + if (tmp > max) { + return FALSE; + } + + *pFactor = tmp; + return TRUE; +} + +/*! + * Compute scaling factors based on in/out dimensions. + * Used by IMP and when programming viewport and window parameters in HW. + * + * The 'maxScaleFactor' values are defined by nvdClass_01.mfs as: + * SizeIn/SizeOut * 1024 + */ +NvBool nvComputeScalingUsageBounds(const NVEvoScalerCaps *pScalerCaps, + const NvU32 inWidth, const NvU32 inHeight, + const NvU32 outWidth, const NvU32 outHeight, + NVEvoScalerTaps hTaps, NVEvoScalerTaps vTaps, + struct NvKmsScalingUsageBounds *out) +{ + const NVEvoScalerTapsCaps *pTapsCaps = NULL; + + out->vTaps = vTaps; + + /* Start with default values (1.0) */ + out->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_1X; + out->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_1X; + + if (outHeight > inHeight) { + out->vUpscalingAllowed = TRUE; + } else if (outHeight < inHeight) { + out->vUpscalingAllowed = FALSE; + + pTapsCaps = &pScalerCaps->taps[vTaps]; + if (!ComputeScalingFactor(pTapsCaps->maxVDownscaleFactor, + inHeight, outHeight, + &out->maxVDownscaleFactor)) { + return FALSE; + } + } + + if (outWidth < inWidth) { + pTapsCaps = &pScalerCaps->taps[hTaps]; + if (!ComputeScalingFactor(pTapsCaps->maxHDownscaleFactor, + inWidth, outWidth, + &out->maxHDownscaleFactor)) { + return FALSE; + } + } + + return TRUE; +} + +NvBool nvAssignScalerTaps(const NVDevEvoRec *pDevEvo, + const NVEvoScalerCaps *pScalerCaps, + const NvU32 inWidth, const NvU32 inHeight, + const NvU32 outWidth, const NvU32 outHeight, + NvBool doubleScan, + NVEvoScalerTaps *hTapsOut, NVEvoScalerTaps *vTapsOut) +{ + NVEvoScalerTaps hTaps, vTaps; + NvBool setHTaps = (outWidth != inWidth); + NvBool setVTaps = (outHeight != inHeight); + + /* + * Select the taps filtering; we select the highest taps allowed with our + * scaling configuration. + * + * Note if requiresScalingTapsInBothDimensions is true and if we are + * scaling in *either* dimension, then we need to program > 1 taps + * in *both* dimensions. + */ + if ((setHTaps || setVTaps) && + pDevEvo->hal->caps.requiresScalingTapsInBothDimensions) { + setHTaps = TRUE; + setVTaps = TRUE; + } + + /* + * Horizontal taps: if not scaling, then no filtering; otherwise, set the + * maximum filtering, because htaps shouldn't have any constraints (unlike + * vtaps... see below). + */ + if (setHTaps) { + /* + * XXX dispClass_01.mfs says: "For text and desktop scaling, the 2 tap + * bilinear frequently looks better than the 8 tap filter which is more + * optimized for video type scaling." Once we determine how best to + * expose configuration of taps, we should choose how to indicate that 8 + * or 5 taps is the maximum. + * + * For now, we'll start with 2 taps as the default, but may end up + * picking a higher taps value if the required H downscaling factor + * isn't possible with 2 taps. + */ + NvBool hTapsFound = FALSE; + + for (hTaps = NV_EVO_SCALER_2TAPS; + hTaps <= NV_EVO_SCALER_TAPS_MAX; + hTaps++) { + NvU16 hFactor; + + if (!ComputeScalingFactor( + pScalerCaps->taps[hTaps].maxHDownscaleFactor, + inWidth, outWidth, + &hFactor)) { + continue; + } + + hTapsFound = TRUE; + break; + } + + if (!hTapsFound) { + return FALSE; + } + } else { + hTaps = pDevEvo->hal->caps.minScalerTaps; + } + + /* + * Vertical taps: if scaling, set the maximum valid filtering, otherwise, no + * filtering. + */ + if (setVTaps) { + /* + * Select the maximum vertical taps based on the capabilities. + * + * For doublescan modes, limit to 2 taps to reduce blurriness. We really + * want plain old line doubling, but EVO doesn't support that. + */ + if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_5TAPS) && + !doubleScan) { + vTaps = NV_EVO_SCALER_5TAPS; + } else if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_3TAPS) && + !doubleScan) { + vTaps = NV_EVO_SCALER_3TAPS; + } else if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_2TAPS)) { + vTaps = NV_EVO_SCALER_2TAPS; + } else { + return FALSE; + } + } else { + vTaps = pDevEvo->hal->caps.minScalerTaps; + } + + *hTapsOut = hTaps; + *vTapsOut = vTaps; + + return TRUE; +} + +/* + * Check that ViewPortIn does not exceed hardware limits and compute vTaps and + * hTaps based on configured ViewPortIn/Out scaling if possible given scaler + * capabilities. + */ +NvBool nvValidateHwModeTimingsViewPort(const NVDevEvoRec *pDevEvo, + const NVEvoScalerCaps *pScalerCaps, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString) +{ + NVHwModeViewPortEvoPtr pViewPort = &pTimings->viewPort; + const NvU32 inWidth = pViewPort->in.width; + const NvU32 inHeight = pViewPort->in.height; + const NvU32 outWidth = pViewPort->out.width; + const NvU32 outHeight = pViewPort->out.height; + const NvBool scaling = (outWidth != inWidth) || (outHeight != inHeight); + NVEvoScalerTaps hTaps, vTaps; + + /* + * As per the MFS, there is a restriction for the width and height + * of ViewPortIn and ViewPortOut + */ + if (inWidth > 8192 || inHeight > 8192 || + outWidth > 8192 || outHeight > 8192) { + nvEvoLogInfoString(pInfoString, + "Viewport dimensions exceed hardware capabilities"); + return FALSE; + } + + if (!nvAssignScalerTaps(pDevEvo, pScalerCaps, inWidth, inHeight, outWidth, outHeight, + pTimings->doubleScan, &hTaps, &vTaps)) { + nvEvoLogInfoString(pInfoString, + "Unable to configure scaling from %dx%d to %dx%d (exceeds filtering capabilities)", + inWidth, inHeight, + outWidth, outHeight); + return FALSE; + } + + /* + * If this is an interlaced mode but we don't have scaling + * configured, check that the width will fit in the 2-tap vertical + * LineStoreSize; this is an EVO requirement for interlaced + * rasters + */ + if (pTimings->interlaced && !scaling) { + /* !scaling means widths should be same */ + nvAssert(outWidth == inWidth); + + if (outWidth > pScalerCaps->taps[NV_EVO_SCALER_2TAPS].maxPixelsVTaps) { + nvEvoLogInfoString(pInfoString, + "Interlaced mode requires filtering, but line width (%d) exceeds filtering capabilities", + outWidth); + return FALSE; + } + + /* hTaps and vTaps should have been set to minScalerTaps above */ + nvAssert(hTaps == pDevEvo->hal->caps.minScalerTaps); + nvAssert(vTaps == pDevEvo->hal->caps.minScalerTaps); + } + + pViewPort->hTaps = hTaps; + pViewPort->vTaps = vTaps; + return TRUE; +} + +static void AssignGuaranteedSOCBounds(const NVDevEvoRec *pDevEvo, + struct NvKmsUsageBounds *pGuaranteed) +{ + NvU32 layer; + + pGuaranteed->layer[NVKMS_MAIN_LAYER].usable = TRUE; + pGuaranteed->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats = + nvEvoGetFormatsWithEqualOrLowerUsageBound( + NvKmsSurfaceMemoryFormatA8R8G8B8, + pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats); + nvInitScalingUsageBounds(pDevEvo, &pGuaranteed->layer[NVKMS_MAIN_LAYER].scaling); + + for (layer = 1; layer < ARRAY_LEN(pGuaranteed->layer); layer++) { + pGuaranteed->layer[layer].usable = FALSE; + nvInitScalingUsageBounds(pDevEvo, &pGuaranteed->layer[layer].scaling); + } +} + +/* + * Initialize the given NvKmsUsageBounds. Ask for everything supported by the HW + * by default. Later, based on what IMP says, we will scale back as needed. + */ +void nvAssignDefaultUsageBounds(const NVDispEvoRec *pDispEvo, + NVHwModeViewPortEvo *pViewPort) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + struct NvKmsUsageBounds *pPossible = &pViewPort->possibleUsage; + NvU32 i; + + for (i = 0; i < ARRAY_LEN(pPossible->layer); i++) { + struct NvKmsScalingUsageBounds *pScaling = &pPossible->layer[i].scaling; + + pPossible->layer[i].usable = TRUE; + pPossible->layer[i].supportedSurfaceMemoryFormats = + pDevEvo->caps.layerCaps[i].supportedSurfaceMemoryFormats; + nvInitScalingUsageBounds(pDevEvo, pScaling); + + /* Scaling is not currently supported for the main layer. Bug 3488083 */ + if (i != NVKMS_MAIN_LAYER && pDevEvo->hal->GetWindowScalingCaps) { + const NVEvoScalerCaps *pScalerCaps = + pDevEvo->hal->GetWindowScalingCaps(pDevEvo); + int j; + + for (j = NV_EVO_SCALER_TAPS_MAX; j >= NV_EVO_SCALER_TAPS_MIN; j--) { + const NVEvoScalerTapsCaps *pTapsCaps = &pScalerCaps->taps[j]; + + if ((pTapsCaps->maxVDownscaleFactor == 0) && + (pTapsCaps->maxHDownscaleFactor == 0)) { + continue; + } + + pScaling->maxVDownscaleFactor = pTapsCaps->maxVDownscaleFactor; + pScaling->maxHDownscaleFactor = pTapsCaps->maxHDownscaleFactor; + pScaling->vTaps = j; + pScaling->vUpscalingAllowed = (pTapsCaps->maxPixelsVTaps > 0); + break; + } + } + } + + if (pDevEvo->isSOCDisplay) { + AssignGuaranteedSOCBounds(pDevEvo, &pViewPort->guaranteedUsage); + } else { + pViewPort->guaranteedUsage = *pPossible; + } +} + +/* + * ConstructHwModeTimingsViewPort() - determine the ViewPortOut size + * + * ViewPortIn (specified by inWidth, inHeight) selects the pixels to + * extract from the scanout surface; ViewPortOut positions those + * pixels within the raster timings. + * + * If the configuration is not possible, pViewPort->valid will be set + * to false; otherwise, pViewPort->valid will be set to true. + */ + +static NvBool +ConstructHwModeTimingsViewPort(const NVDispEvoRec *pDispEvo, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut) +{ + NVHwModeViewPortEvoPtr pViewPort = &pTimings->viewPort; + NvU32 outWidth, outHeight; + const NvU32 hVisible = nvEvoVisibleWidth(pTimings); + const NvU32 vVisible = nvEvoVisibleHeight(pTimings); + + /* the ViewPortOut should default to the raster size */ + + outWidth = hVisible; + outHeight = vVisible; + + pViewPort->out.xAdjust = 0; + pViewPort->out.yAdjust = 0; + pViewPort->out.width = outWidth; + pViewPort->out.height = outHeight; + + /* + * If custom viewPortOut or viewPortIn were specified, do basic + * validation and then assign them to pViewPort. We'll do more + * extensive checking of these values as part of IMP. Note that + * pViewPort->out.[xy]Adjust are relative to viewPortOut centered + * within the raster timings, but pViewPortOut->[xy]1 are relative + * to 0,0. + */ + if (pViewPortOut) { + NvS16 offset; + struct NvKmsRect viewPortOut = *pViewPortOut; + + /* + * When converting from user viewport out to hardware raster timings, + * double in the vertical dimension + */ + if (pTimings->doubleScan) { + viewPortOut.y *= 2; + viewPortOut.height *= 2; + } + + /* + * The client-specified viewPortOut is in "full" horizontal space for + * SW YUV420 modes. Convert to "half" horizontal space (matching + * NVHwModeTimingsEvo and viewPortIn). + */ + if (pTimings->yuv420Mode == NV_YUV420_MODE_SW) { + viewPortOut.x /= 2; + viewPortOut.width /= 2; + } + + if (A_plus_B_greater_than_C_U16(viewPortOut.x, + viewPortOut.width, + hVisible)) { + return FALSE; + } + + if (A_plus_B_greater_than_C_U16(viewPortOut.y, + viewPortOut.height, + vVisible)) { + return FALSE; + } + + offset = (hVisible - viewPortOut.width) / 2 * -1; + pViewPort->out.xAdjust = offset + viewPortOut.x; + + offset = (vVisible - viewPortOut.height) / 2 * -1; + pViewPort->out.yAdjust = offset + viewPortOut.y; + + pViewPort->out.width = viewPortOut.width; + pViewPort->out.height = viewPortOut.height; + } + + if (pViewPortSizeIn) { + if (pViewPortSizeIn->width <= 0) { + return FALSE; + } + if (pViewPortSizeIn->height <= 0) { + return FALSE; + } + + pViewPort->in.width = pViewPortSizeIn->width; + pViewPort->in.height = pViewPortSizeIn->height; + } else { + pViewPort->in.width = pViewPort->out.width; + pViewPort->in.height = pViewPort->out.height; + + /* When deriving viewportIn from viewportOut, halve the height for + * doubleScan */ + if (pTimings->doubleScan) { + pViewPort->in.height /= 2; + } + } + + nvAssignDefaultUsageBounds(pDispEvo, &pTimings->viewPort); + + return TRUE; +} + + + +/* + * nvGetDfpProtocol()- determine the protocol to use on the given pDpy + * with the given pTimings; assigns pTimings->protocol. + */ + +NvBool nvGetDfpProtocol(const NVDpyEvoRec *pDpyEvo, + NVHwModeTimingsEvoPtr pTimings) +{ + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + const NvU32 rmProtocol = pConnectorEvo->or.protocol; + enum nvKmsTimingsProtocol timingsProtocol; + + nvAssert(pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP); + + if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + /* Override protocol if this mode requires HDMI FRL. */ + if (pTimings->hdmiFrlConfig.frlRate != HDMI_FRL_DATA_RATE_NONE) { + nvAssert(nvDpyIsHdmiEvo(pDpyEvo)); + nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A || + rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B); + timingsProtocol = NVKMS_PROTOCOL_SOR_HDMI_FRL; + } else { + switch (rmProtocol) { + default: + nvAssert(!"unrecognized SOR RM protocol"); + return FALSE; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A: + if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings)) { + return FALSE; + } + timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B: + if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings)) { + return FALSE; + } + timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS: + /* + * Override dual/single link TMDS protocol if necessary. + * XXX might be nice to give a way for users to override the + * SingleLink/DualLink decision. + * + * TMDS_A: "use A side of the link" + * TMDS_B: "use B side of the link" + */ + if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings)) { + timingsProtocol = NVKMS_PROTOCOL_SOR_DUAL_TMDS; + } else { + timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A; + } + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: + timingsProtocol = NVKMS_PROTOCOL_SOR_DP_A; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: + timingsProtocol = NVKMS_PROTOCOL_SOR_DP_B; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM: + timingsProtocol = NVKMS_PROTOCOL_SOR_LVDS_CUSTOM; + break; + } + } + } else if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR) { + nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC); + timingsProtocol = NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC; + } else if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_DSI) { + nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI); + timingsProtocol = NVKMS_PROTOCOL_DSI; + } else { + nvAssert(!"Unknown OR type"); + return FALSE; + } + + pTimings->protocol = timingsProtocol; + + return TRUE; + +} + + + +/* + * ConstructHwModeTimingsEvoCrt() - construct EVO hardware timings to + * drive a CRT, given the mode timings in pMt + */ + +static NvBool +ConstructHwModeTimingsEvoCrt(const NVConnectorEvoRec *pConnectorEvo, + const NvModeTimings *pModeTimings, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString) +{ + ConstructHwModeTimingsFromNvModeTimings(pModeTimings, pTimings); + + /* assign the protocol; we expect DACs to have RGB protocol */ + + nvAssert(pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT); + + pTimings->protocol = NVKMS_PROTOCOL_DAC_RGB; + + /* assign scaling fields */ + + return ConstructHwModeTimingsViewPort(pConnectorEvo->pDispEvo, pTimings, + pInfoString, pViewPortSizeIn, + pViewPortOut); +} + + +/*! + * Construct EVO hardware timings to drive a digital protocol (TMDS, + * DP, etc). + * + * \param[in] pDpy The display device for which to build timings. + * \param[in] pModeTimings The hw-neutral description of the timings. + * \param[out] pTimings The EVO-specific modetimings. + * + * \return TRUE if the EVO modetimings could be built; FALSE if failure. + */ +static NvBool ConstructHwModeTimingsEvoDfp(const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvoPtr pTimings, + const struct + NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString) +{ + NvBool ret; + + ConstructHwModeTimingsFromNvModeTimings(pModeTimings, pTimings); + + ret = QueryHdmiFrlConfig(pDpyEvo, pParams, + pModeTimings, pTimings, + pInfoString); + + if (!ret) { + return FALSE; + } + + ret = nvGetDfpProtocol(pDpyEvo, pTimings); + + if (!ret) { + return FALSE; + } + + ret = ApplyDualLinkRequirements(pDpyEvo, pParams, pTimings, pInfoString); + + if (!ret) { + return FALSE; + } + + return ConstructHwModeTimingsViewPort(pDpyEvo->pDispEvo, pTimings, + pInfoString, pViewPortSizeIn, + pViewPortOut); +} + +NvBool nvDowngradeHwModeTimingsDpPixelDepthEvo( + NVHwModeTimingsEvoPtr pTimings, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace) +{ + /* + * In YUV420, HW is programmed with RGB color space and full color range. + * The color space conversion and color range compression happen in a + * headSurface composite shader. + * + * XXX Add support for + * NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422 over DP. + */ + nvAssert(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420 || + colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444 || + colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB); + + switch (pTimings->pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + /* Cannot downgrade pixelDepth further. */ + return FALSE; + + case NVKMS_PIXEL_DEPTH_24_444: + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_18_444; + break; + case NVKMS_PIXEL_DEPTH_30_444: + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_24_444; + break; + } + + return TRUE; +} + +/* + * nvDPValidateModeEvo() - For DP devices handled by the DP lib, check DP + * bandwidth and pick the best possible/supported pixel depth to use for + * the given mode timings. + */ + +NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams *pParams) +{ + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + /* XXX Add support for + * NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422 over DP. */ + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace = + (pTimings->yuv420Mode != NV_YUV420_MODE_NONE) ? + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420 : + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB; + + /* Only do this for DP devices. */ + if (!nvConnectorUsesDPLib(pConnectorEvo)) { + return TRUE; + } + + if ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_DISPLAYPORT_BANDWIDTH_CHECK) != 0) { + return TRUE; + } + + nvAssert(nvDpyUsesDPLib(pDpyEvo)); + nvAssert(pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR); + + nvAssert(pTimings->pixelDepth == NVKMS_PIXEL_DEPTH_30_444 || + pTimings->pixelDepth == NVKMS_PIXEL_DEPTH_24_444 || + pTimings->pixelDepth == NVKMS_PIXEL_DEPTH_18_444); + + tryAgain: + + if (!nvDPValidateModeForDpyEvo(pDpyEvo, colorSpace, pParams, pTimings)) { + if (nvDowngradeHwModeTimingsDpPixelDepthEvo(pTimings, colorSpace)) { + goto tryAgain; + } + /* + * Cannot downgrade pixelDepth further -- + * this mode is not possible on this DP link, so fail. + */ + + return FALSE; + } + + return TRUE; +} + + + +/* + * Construct the hardware values to program EVO for the specified + * NVModeTimings + */ + +NvBool nvConstructHwModeTimingsEvo(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsMode *pKmsMode, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams + *pParams, + NVEvoInfoStringPtr pInfoString) +{ + const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + NvBool ret; + + /* assign the pTimings values */ + + if (pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + ret = ConstructHwModeTimingsEvoDfp(pDpyEvo, + &pKmsMode->timings, + pViewPortSizeIn, pViewPortOut, + pTimings, pParams, pInfoString); + } else if (pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) { + ret = ConstructHwModeTimingsEvoCrt(pConnectorEvo, + &pKmsMode->timings, + pViewPortSizeIn, pViewPortOut, + pTimings, pInfoString); + } else { + nvAssert(!"Invalid pDpyEvo->type"); + return FALSE; + } + + if (!ret) return FALSE; + + /* tweak the raster timings for gsync */ + + if (pDpyEvo->pDispEvo->pFrameLockEvo) { + // if this fails, the timing remains untweaked, which just means + // that the mode may not work well with frame lock + TweakTimingsForGsync(pDpyEvo, pTimings, pInfoString, pParams->stereoMode); + } + + /* Defaults, should match EVO displayClass_02.mfs values for _DEFAULT */ + if (pConnectorEvo->legacyType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) { + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_30_444; + } else if (pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + + if (pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + switch (pDpyEvo->parsedEdid.info.input.u.digital.bpc) { + case 10: + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_30_444; + break; + case 6: + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_18_444; + break; + default: + nvAssert(!"Invalid Pixel Depth for DSI"); + // fall through + case 8: + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_24_444; + break; + } + } else if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + // Pick displayport pixel depths for raster timings. + // Start off picking best possible depth based on monitor caps + // If the monitor doesn't have an EDID version 1.4 or higher, assume + // it's 8. + // For monitor with EDID version 1.4 or higher: + // if bpc >= 10 configure pixel depth to 30bpp + // if bpc is 8 or undefined configure pixel depth to 24bpp + // if bpc is 6 use 18bpp as pixel depth + if (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.info.input.isDigital && + pDpyEvo->parsedEdid.info.version >= NVT_EDID_VER_1_4) { + switch (pDpyEvo->parsedEdid.info.input.u.digital.bpc) { + case 16: + case 14: + case 12: + case 10: + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_30_444; + break; + case 8: + case 0: + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_24_444; + break; + case 6: + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_18_444; + break; + default: + nvAssert(!"Invalid EDID bit depth for DP"); + return FALSE; + } + } else { + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_24_444; + } + } else { + /* TMDS default */ + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_24_444; + } + } + + pTimings->stereo.mode = pParams->stereoMode; + pTimings->stereo.isAegis = pDpyEvo->stereo3DVision.isAegis; + + return TRUE; +} + +static NvBool DowngradeViewPortTaps(const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NVEvoScalerTaps srcTaps, + NVEvoScalerTaps dstTaps, + NvBool isVert, + NVEvoScalerTaps *pTaps) +{ + const NVEvoScalerCaps *pScalerCaps = &pHeadCaps->scalerCaps; + NvBool dstPossible; + + if (isVert) { + dstPossible = IsVTapsPossible(pScalerCaps, pViewPort->in.width, + pViewPort->out.width, dstTaps); + } else { + dstPossible = pScalerCaps->taps[dstTaps].maxHDownscaleFactor > 0; + } + + if (*pTaps >= srcTaps && dstPossible) { + *pTaps = dstTaps; + return TRUE; + } + + return FALSE; +} + +/* Downgrade the htaps from 8 to 5 */ +static NvBool DowngradeViewPortHTaps8(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + return DowngradeViewPortTaps(pHeadCaps, + pViewPort, + NV_EVO_SCALER_8TAPS, + NV_EVO_SCALER_5TAPS, + FALSE /* isVert */, + &pViewPort->hTaps); +} + +/* Downgrade the htaps from 5 to 2 */ +static NvBool DowngradeViewPortHTaps5(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + return DowngradeViewPortTaps(pHeadCaps, + pViewPort, + NV_EVO_SCALER_5TAPS, + NV_EVO_SCALER_2TAPS, + FALSE /* isVert */, + &pViewPort->hTaps); +} + +/* Downgrade the vtaps from 5 to 3 */ +static NvBool DowngradeViewPortVTaps5(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + return DowngradeViewPortTaps(pHeadCaps, + pViewPort, + NV_EVO_SCALER_5TAPS, + NV_EVO_SCALER_3TAPS, + TRUE /* isVert */, + &pViewPort->vTaps); +} + +/* Downgrade the vtaps from 3 to 2 */ +static NvBool DowngradeViewPortVTaps3(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + return DowngradeViewPortTaps(pHeadCaps, + pViewPort, + NV_EVO_SCALER_3TAPS, + NV_EVO_SCALER_2TAPS, + TRUE /* isVert */, + &pViewPort->vTaps); +} + +static NvBool +DowngradeLayerDownscaleFactor(NVHwModeViewPortEvoPtr pViewPort, + const NvU32 layer, + NvU16 srcFactor, + NvU16 dstFactor, + NvU16 *pFactor) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + + if (!pUsage->layer[layer].usable) { + return FALSE; + } + + if (*pFactor == srcFactor) { + *pFactor = dstFactor; + return TRUE; + } + + return FALSE; +} + +static NvBool +DowngradeLayerVDownscaleFactor4X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_4X, + NV_EVO_SCALE_FACTOR_3X, + &pScaling->maxVDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool +DowngradeLayerVDownscaleFactor3X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_3X, + NV_EVO_SCALE_FACTOR_2X, + &pScaling->maxVDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool +DowngradeLayerVDownscaleFactor2X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_2X, + NV_EVO_SCALE_FACTOR_1X, + &pScaling->maxVDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool +DowngradeLayerHDownscaleFactor4X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_4X, + NV_EVO_SCALE_FACTOR_3X, + &pScaling->maxHDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeLayerHDownscaleFactor3X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_3X, + NV_EVO_SCALE_FACTOR_2X, + &pScaling->maxHDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeLayerHDownscaleFactor2X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_2X, + NV_EVO_SCALE_FACTOR_1X, + &pScaling->maxHDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +/* Downgrade the vtaps from 5 to 2 */ +static NvBool DowngradeLayerVTaps5(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pUsage->layer[layer].scaling; + + if (!pUsage->layer[layer].usable) { + continue; + } + + if (pScaling->vTaps == NV_EVO_SCALER_5TAPS) { + pScaling->vTaps = NV_EVO_SCALER_2TAPS; + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeLayerVUpscaling(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pUsage->layer[layer].scaling; + + if (!pUsage->layer[layer].usable) { + continue; + } + + if (pScaling->vUpscalingAllowed) { + pScaling->vUpscalingAllowed = FALSE; + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeViewPortOverlayFormats( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 removeSurfaceMemoryFormats) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (layer == NVKMS_MAIN_LAYER || !pUsage->layer[layer].usable) { + continue; + } + + if (pUsage->layer[layer].supportedSurfaceMemoryFormats & + removeSurfaceMemoryFormats) { + pUsage->layer[layer].supportedSurfaceMemoryFormats &= + ~removeSurfaceMemoryFormats; + if (pUsage->layer[layer].supportedSurfaceMemoryFormats == 0) { + pUsage->layer[layer].usable = FALSE; + } + + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeViewPortBaseFormats( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 removeSurfaceMemoryFormats) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + + if (!pUsage->layer[NVKMS_MAIN_LAYER].usable) { + return FALSE; + } + + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + removeSurfaceMemoryFormats) { + pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &= + ~removeSurfaceMemoryFormats; + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats == 0) { + pUsage->layer[NVKMS_MAIN_LAYER].usable = FALSE; + } + + return TRUE; + } + + return FALSE; +} + +typedef NvBool (*DowngradeViewPortFuncPtr)(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 removeSurfaceMemoryFormats); + +/* + * Try to downgrade the usage bounds of the viewports, keeping the + * viewports roughly equal in capability; we do this from + * ValidateMetaMode50() when IMP rejects the mode. Return TRUE if we + * were able to downgrade something; return FALSE if there was nothing + * left to downgrade. + */ + +static NvBool DownGradeMetaModeUsageBounds( + const NVDevEvoRec *pDevEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvU32 downgradePossibleHeadsBitMask) +{ + static const struct { + DowngradeViewPortFuncPtr downgradeFunc; + NvU64 removeSurfaceMemoryFormats; + } downgradeFuncs[] = { + { DowngradeLayerVDownscaleFactor4X, + 0 }, + { DowngradeLayerHDownscaleFactor4X, + 0 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP }, + { DowngradeLayerVDownscaleFactor3X, + 0 }, + { DowngradeLayerHDownscaleFactor3X, + 0 }, + { DowngradeViewPortVTaps5, + 0 }, + { DowngradeViewPortVTaps3, + 0 }, + { DowngradeViewPortHTaps8, + 0 }, + { DowngradeViewPortHTaps5, + 0 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP }, + { DowngradeLayerVTaps5, + 0 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP }, + { DowngradeLayerVDownscaleFactor2X, + 0 }, + { DowngradeLayerHDownscaleFactor2X, + 0 }, + { DowngradeLayerVUpscaling, + 0 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP }, + }; + int i; + + // XXX assume the heads have equal capabilities + // XXX assume the gpus have equal capabilities + + const NVEvoHeadCaps *pHeadCaps = + &pDevEvo->gpus[0].capabilities.head[0]; + + + for (i = 0; i < ARRAY_LEN(downgradeFuncs); i++) { + int head; + FOR_ALL_HEADS(head, downgradePossibleHeadsBitMask) { + if (timingsParams[head].pTimings == NULL) { + continue; + } + + if (downgradeFuncs[i].downgradeFunc( + pDevEvo, + head, + pHeadCaps, + &timingsParams[head].pTimings->viewPort, + downgradeFuncs[i].removeSurfaceMemoryFormats)) { + return TRUE; + } + } + } + + /* Nothing else to downgrade */ + return FALSE; +} + +NvBool nvAllocateDisplayBandwidth( + NVDispEvoPtr pDispEvo, + NvU32 newIsoBandwidthKBPS, + NvU32 newDramFloorKBPS) +{ + NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS params = { }; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (!pDevEvo->isSOCDisplay) { + return TRUE; + } + + params.subDeviceInstance = 0; + params.averageBandwidthKBPS = newIsoBandwidthKBPS; + params.floorBandwidthKBPS = newDramFloorKBPS; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH, + ¶ms, sizeof(params)); + if (ret != NV_OK) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate %u KBPS Iso and %u KBPS Dram", + newIsoBandwidthKBPS, newDramFloorKBPS); + return FALSE; + } + + pDispEvo->isoBandwidthKBPS = newIsoBandwidthKBPS; + pDispEvo->dramFloorKBPS = newDramFloorKBPS; + + return TRUE; +} + +static void AssignNVEvoIsModePossibleDispInput( + NVDispEvoPtr pDispEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NVEvoIsModePossibleDispInput *pImpInput) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + NvU32 nextSorIndex = 0; + + nvkms_memset(pImpInput, 0, sizeof(*pImpInput)); + + pImpInput->requireBootClocks = requireBootClocks; + pImpInput->reallocBandwidth = reallocBandwidth; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVConnectorEvoRec *pConnectorEvo = + timingsParams[head].pConnectorEvo; + NvU32 otherHead = 0; + + nvAssert((timingsParams[head].pTimings == NULL) == + (timingsParams[head].pConnectorEvo == NULL)); + + pImpInput->head[head].orIndex = NV_INVALID_OR; + + if (timingsParams[head].pTimings == NULL) { + continue; + } + + pImpInput->head[head].pTimings = timingsParams[head].pTimings; + pImpInput->head[head].displayId = timingsParams[head].activeRmId; + pImpInput->head[head].orType = pConnectorEvo->or.type; + pImpInput->head[head].pUsage = timingsParams[head].pUsage; + + if (!NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED) || + pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + + nvAssert(pConnectorEvo->or.mask != 0x0); + + pImpInput->head[head].orIndex = + nvEvoConnectorGetPrimaryOr(pConnectorEvo); + continue; + } + + /* + * If more than one head is attached to the same connector, then make + * sure that all of them use the same SOR index. + */ + for (otherHead = 0; otherHead < head; otherHead++) { + if (timingsParams[otherHead].pConnectorEvo == pConnectorEvo) { + pImpInput->head[head].orIndex = pImpInput->head[otherHead].orIndex; + break; + } + } + + /* + * On GPUs with a full crossbar, the SORs are equally capable, so just + * use next unused SOR. + * + * We assume there are as many SORs as there are heads. + */ + if (pImpInput->head[head].orIndex == NV_INVALID_OR) { + pImpInput->head[head].orIndex = nextSorIndex; + nextSorIndex++; + } + } +} + +/*! + * Validate the described disp configuration through IMP. + + * \param[in] pDispEvo The disp of the dpyIdList. + * + * \param[in.out] timingsParams[] The proposed configuration to use on each head + * includes - + * + * pConnectorEvo - + * The proposed connector to drive on each head. + * + * activeRmId - + * The display ID that we use to talk to RM + * about the dpy(s) on each head. + * + * pTimings - + * The proposed timings to use on each head; + * note the usage bounds within pTimings + * may be altered by this function. + * + * depth - + * The depth of the buffer to be displayed on + * each head. + * \param[in] requireBootClocks + * Only validate modes that will work at P8 + * clocks. + * + * \param[in] reallocBandwidth + * Try to allocate the required display + * bandwidth if IMP passes. + * + * \param[out] pMinIsoBandwidthKBPS + * The ISO bandwidth that's required for the + * proposed disp configuration only. This value + * doesn't take the current display state into + * account. + * + * \param[out] pMinDramFloorKBPS + * The DRAM floor that's required for the + * proposed disp configuration only. This value + * doesn't take the current display state into + * account. + * + * \return Return TRUE if the proposed disp configuration is + * considered valid for IMP purposes. + */ +NvBool nvValidateImpOneDisp( + NVDispEvoPtr pDispEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NvU32 *pMinIsoBandwidthKBPS, + NvU32 *pMinDramFloorKBPS) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoIsModePossibleDispInput impInput = { }; + NVEvoIsModePossibleDispOutput impOutput = { }; + NvU32 newIsoBandwidthKBPS, newDramFloorKBPS; + NvBool needToRealloc = FALSE; + + AssignNVEvoIsModePossibleDispInput(pDispEvo, + timingsParams, requireBootClocks, + reallocBandwidth, + &impInput); + + pDevEvo->hal->IsModePossible(pDispEvo, &impInput, &impOutput); + if (!impOutput.possible) { + return FALSE; + } + + switch (reallocBandwidth) { + case NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE: + needToRealloc = (impOutput.minRequiredBandwidthKBPS > pDispEvo->isoBandwidthKBPS) || + (impOutput.floorBandwidthKBPS > pDispEvo->dramFloorKBPS); + newIsoBandwidthKBPS = + NV_MAX(pDispEvo->isoBandwidthKBPS, impOutput.minRequiredBandwidthKBPS); + newDramFloorKBPS = + NV_MAX(pDispEvo->dramFloorKBPS, impOutput.floorBandwidthKBPS); + + break; + case NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST: + needToRealloc = (impOutput.minRequiredBandwidthKBPS != pDispEvo->isoBandwidthKBPS) || + (impOutput.floorBandwidthKBPS != pDispEvo->dramFloorKBPS); + newIsoBandwidthKBPS = impOutput.minRequiredBandwidthKBPS; + newDramFloorKBPS = impOutput.floorBandwidthKBPS; + + break; + case NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE: + default: + break; + } + + if (needToRealloc) { + if (!nvAllocateDisplayBandwidth(pDispEvo, + newIsoBandwidthKBPS, + newDramFloorKBPS)) { + return FALSE; + } + } + + if (pMinIsoBandwidthKBPS != NULL) { + *pMinIsoBandwidthKBPS = impOutput.minRequiredBandwidthKBPS; + } + + if (pMinDramFloorKBPS != NULL) { + *pMinDramFloorKBPS = impOutput.floorBandwidthKBPS; + } + + return TRUE; +} + +NvBool nvValidateImpOneDispDowngrade( + NVDispEvoPtr pDispEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NvU32 downgradePossibleHeadsBitMask) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvBool impPassed = FALSE; + + do { + impPassed = nvValidateImpOneDisp(pDispEvo, + timingsParams, + requireBootClocks, + reallocBandwidth, + NULL /* pMinIsoBandwidthKBPS */, + NULL /* pMinDramFloorKBPS */); + if (impPassed) { + break; + } + } while (DownGradeMetaModeUsageBounds(pDevEvo, timingsParams, + downgradePossibleHeadsBitMask)); + + if (impPassed && !pDevEvo->isSOCDisplay) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + if (timingsParams[head].pTimings != NULL) { + timingsParams[head].pTimings->viewPort.possibleUsage = + timingsParams[head].pTimings->viewPort.guaranteedUsage; + } + } + } + + return impPassed; +} + +/* + * Return TRUE iff this display can be configured as a framelock + * server given the current modetimings/framelock configuration, FALSE + * o.w. + */ + +NvBool nvFrameLockServerPossibleEvo(const NVDpyEvoRec *pDpyEvo) +{ + + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, + NV_EVO_ADD_FRAME_LOCK_SERVER, + NULL); +} + +/* + * Return TRUE iff this display can be configured as a framelock client + * given the current modetimings/framelock configuration, FALSE o.w. + */ + +NvBool nvFrameLockClientPossibleEvo(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, + NV_EVO_ADD_FRAME_LOCK_CLIENT, + NULL); +} + + +/* + * FrameLockSli() - Helper function for nvEvoRefFrameLockSli() and + * nvEvoUnRefFrameLockSli(), which are hooked into the EVO locking state + * machine via custom rules. This function will find the GPU acting as the + * given GPU's SLI primary and perform the NV_EVO_{ADD,REM}_FRAMELOCK_REF action + * to increment or decrement the refcount on that GPU. + * If queryOnly, it also figures out which displays to pass into the EVO state + * machine; otherwise, it passes NULLs to perform a query without affecting + * state. + */ + +static NvBool FrameLockSli(NVDevEvoPtr pDevEvo, + NvU32 action, + NvBool queryOnly) +{ + RasterLockTopology *topos; + NVEvoSubDevPtr pEvoSubDev; + NVDispEvoPtr pDispEvo; + unsigned int numTopos; + + topos = GetRasterLockTopologies(pDevEvo, &numTopos); + if (!topos) { + return FALSE; + } + + nvAssert(numTopos == 1); + if (numTopos != 1) { + nvFree(topos); + return FALSE; + } + + /* Want to be framelock server */ + pDispEvo = topos[0].pDispEvoOrder[0]; + + nvFree(topos); + + if (!pDispEvo) { + return FALSE; + } + + nvAssert(pDevEvo == pDispEvo->pDevEvo); + + pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + if (queryOnly) { + return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, NULL); + } else { + NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1]; + NvU32 i = 0; + NvU32 head; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + pHeads[i++] = head; + } + } + nvAssert(i > 0 && i <= NVKMS_MAX_HEADS_PER_DISP); + pHeads[i] = NV_INVALID_HEAD; + + return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, + pHeads); + } +} + + +/* + * nvEvoRefFrameLockSli() - Attempt to set up framelock on the GPU's SLI + * primary. Hooked into EVO state machine via custom rules. + * If pHeads is NULL, only perform a query. + */ + +NvBool nvEvoRefFrameLockSli(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + return FrameLockSli(pDispEvo->pDevEvo, NV_EVO_ADD_FRAME_LOCK_REF, + pHeads == NULL); + +} /* nvEvoRefFrameLockSli */ + + +/* + * nvEvoUnRefFrameLockSli() - Attempt to clean up framelock on the GPU's SLI + * primary. Hooked into EVO state machine via custom rules. + * If pHeads is NULL, only perform a query. + */ + +NvBool nvEvoUnRefFrameLockSli(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + return FrameLockSli(pDispEvo->pDevEvo, NV_EVO_REM_FRAME_LOCK_REF, + pHeads == NULL); + +} /* nvEvoUnRefFrameLockSli */ + + +/* + * GetRasterLockPin() - Ask RM which lockpin to use in order to configure GPU0 + * be a server or client of GPU1, where GPUn is represented by the duple + * (pDispn, headn) (or NV_EVO_LOCK_PIN_ERROR if the two cannot be locked). + */ +static void GetRasterLockPin(NVDispEvoPtr pDispEvo0, NvU32 head0, + NVDispEvoPtr pDispEvo1, NvU32 head1, + NVEvoLockPin *serverPin, NVEvoLockPin *clientPin) +{ + NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS params = { }; + NvU32 displayHandle0 = pDispEvo0->pDevEvo->displayHandle; + NvU32 displayHandle1 = pDispEvo1->pDevEvo->displayHandle; + NvU32 ret; + + params.base.subdeviceIndex = pDispEvo0->displayOwner; + params.head = head0; + + params.peer.hDisplay = displayHandle1; + params.peer.subdeviceIndex = pDispEvo1->displayOwner; + params.peer.head = head1; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + displayHandle0, + NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo0, EVO_LOG_ERROR, + "stateless lockpin query failed; ret: 0x%x", ret); + if (serverPin) *serverPin = NV_EVO_LOCK_PIN_ERROR; + if (clientPin) *clientPin = NV_EVO_LOCK_PIN_ERROR; + return; + } + + if (serverPin) { + if (FLD_TEST_DRF(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + _MASTER_SCAN_LOCK_CONNECTED, _NO, + params.masterScanLock)) { + *serverPin = NV_EVO_LOCK_PIN_ERROR; + } else { + int pin = DRF_VAL(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + _MASTER_SCAN_LOCK_PIN, + params.masterScanLock); + *serverPin = NV_EVO_LOCK_PIN_0 + pin; + } + } + + if (clientPin) { + if (FLD_TEST_DRF(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + _SLAVE_SCAN_LOCK_CONNECTED, _NO, + params.slaveScanLock)) { + *clientPin = NV_EVO_LOCK_PIN_ERROR; + } else { + int pin = DRF_VAL(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + _SLAVE_SCAN_LOCK_PIN, + params.slaveScanLock); + *clientPin = NV_EVO_LOCK_PIN_0 + pin; + } + } +} /* GetRasterLockPin */ + +static NvU32 +UpdateLUTTimer(NVDispEvoPtr pDispEvo, const NvU32 head, NvBool baseLutEnabled, + NvBool outputLutEnabled) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const int dispIndex = pDispEvo->displayOwner; + const int numLUTs = ARRAY_LEN(pDevEvo->lut.head[head].LUT); + + if (!pDevEvo->hal->IsCompNotifierComplete(pDispEvo, + LUTNotifierForHead(head))) { + // If the notifier is still pending, then the previous update is still + // pending and further LUT changes should continue to go into the third + // buffer. Reschedule the timer for another 10 ms. + return 10; + } + + // Update the current LUT index and kick off an update. + pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex++; + pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex %= numLUTs; + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled = baseLutEnabled; + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled = outputLutEnabled; + + nvEvoUpdateCurrentPalette(pDispEvo, head, TRUE); + + // Return 0 to cancel the timer. + return 0; +} + +static void UpdateLUTTimerNVKMS(void *dataPtr, NvU32 dataU32) +{ + NVDispEvoPtr pDispEvo = dataPtr; + const NvU32 head = DRF_VAL(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _HEAD, + dataU32); + const NvBool baseLutEnabled = FLD_TEST_DRF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, + _BASE_LUT, _ENABLE, dataU32); + const NvBool outputLutEnabled = FLD_TEST_DRF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, + _OUTPUT_LUT, _ENABLE, dataU32); + NvU32 ret = UpdateLUTTimer(pDispEvo, head, baseLutEnabled, + outputLutEnabled); + + if (ret != 0) { + ScheduleLutUpdate(pDispEvo, head, dataU32, ret * 1000); + } +} + +static void ScheduleLutUpdate(NVDispEvoRec *pDispEvo, + const NvU32 head, const NvU32 data, + const NvU64 usec) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + /* Cancel previous update */ + nvCancelLutUpdateEvo(pDispEvo, head); + + /* schedule a new timer */ + pDevEvo->lut.head[head].disp[pDispEvo->displayOwner].updateTimer = + nvkms_alloc_timer(UpdateLUTTimerNVKMS, + pDispEvo, data, + usec); +} + +/* + * The gamma ramp, if specified, has a 16-bit range. Convert it to EVO's 14-bit + * shifted range and zero out the low 3 bits for bug 813188. + */ +static inline NvU16 GammaToEvo(NvU16 gamma) +{ + return ((gamma >> 2) & ~7) + 24576; +} + +static NVEvoLutDataRec *GetNewLutBuffer( + const NVDispEvoRec *pDispEvo, + NvU32 head, + const struct NvKmsSetLutCommonParams *pParams) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoLutDataRec *pLUTBuffer = NULL; + + // XXX NVKMS TODO: If only input or output are specified and the other one + // is enabled in the hardware, this will zero out the one not specified. In + // practice it isn't a problem today because the X driver always specifies + // both, but we should fix this once we start always using the base channel, + // where we have a separate base LUT ctxdma. + // + // This is also a problem if a partial update of the input LUT is attempted + // (i.e. start != 0 or end != numberOfLutEntries-1). + // + // Filed bug: 2042919 to track removing this TODO. + + pLUTBuffer = nvCalloc(1, sizeof(*pLUTBuffer)); + + if (pLUTBuffer == NULL) { + goto done; + } + + if (pParams->input.specified && pParams->input.end != 0) { + const struct NvKmsLutRamps *pRamps = + nvKmsNvU64ToPointer(pParams->input.pRamps); + const NvU16 *red = pRamps->red; + const NvU16 *green = pRamps->green; + const NvU16 *blue = pRamps->blue; + + nvAssert(pRamps != NULL); + + // Update our shadow copy of the LUT. + pDevEvo->hal->FillLUTSurface(pLUTBuffer->base, + red, green, blue, + pParams->input.end + 1, + pParams->input.depth); + } + + if (pParams->output.specified && pParams->output.enabled) { + const struct NvKmsLutRamps *pRamps = + nvKmsNvU64ToPointer(pParams->output.pRamps); + int i; + + nvAssert(pRamps != NULL); + + if (pDevEvo->hal->caps.hasUnorm16OLUT) { + for (i = 0; i < 1024; i++) { + // Copy the client's 16-bit ramp directly to the LUT buffer. + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Red = pRamps->red[i]; + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Green = pRamps->green[i]; + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Blue = pRamps->blue[i]; + } + + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + 1024] = + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + 1023]; + } else { + for (i = 0; i < 1024; i++) { + // Convert from the client's 16-bit range to the EVO 14-bit shifted + // range. + pLUTBuffer->output[i].Red = GammaToEvo(pRamps->red[i]); + pLUTBuffer->output[i].Green = GammaToEvo(pRamps->green[i]); + pLUTBuffer->output[i].Blue = GammaToEvo(pRamps->blue[i]); + } + + pLUTBuffer->output[1024] = pLUTBuffer->output[1023]; + } + } + + /* fall through */ + +done: + return pLUTBuffer; +} + + +/* + * Update the head's LUT with the given colors. + * + * The color LUT is triple-buffered. + * + * curLUTIndex indicates the buffer currently being updated. What the other + * two buffers are used for depends on whether the previous update has + * completed. If not (case 1): + * curLUTIndex + 1 (mod 3): currently being displayed + * curLUTIndex + 2 (mod 3): will be displayed at next vblank + * If so (case 2): + * curLUTIndex + 1 (mod 3): unused + * curLUTIndex + 2 (mod 3): currently being displayed + * + * In case 1, just update the current buffer and kick off a timer to submit the + * update from i+2 to i. If more LUT changes come in before the first update + * happens, kill the timer and start a new one. + * + * In case 2, kill the timer if it still hasn't gone off, update buffer i, and + * kick off an update. No new timer needs to be scheduled. + */ + +void nvEvoSetLut(NVDispEvoPtr pDispEvo, NvU32 head, NvBool kickoff, + const struct NvKmsSetLutCommonParams *pParams) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const int dispIndex = pDispEvo->displayOwner; + const int curLUT = pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex; + const NvBool waitForPreviousUpdate = + pDevEvo->lut.head[head].disp[dispIndex].waitForPreviousUpdate; + const int numLUTs = ARRAY_LEN(pDevEvo->lut.head[head].LUT); + const int lutToFill = (curLUT + 1) % numLUTs; + NVLutSurfaceEvoPtr pSurfEvo = pDevEvo->lut.head[head].LUT[lutToFill]; + NvBool baseLutEnabled = + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled ; + NvBool outputLutEnabled = + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled; + + if (!pParams->input.specified && !pParams->output.specified) { + return; + } + + if (pParams->input.specified) { + baseLutEnabled = (pParams->input.end != 0); + } + + if (pParams->output.specified) { + outputLutEnabled = pParams->output.enabled; + } + + nvAssert(pSurfEvo != NULL); + + if ((pParams->input.specified && pParams->input.end != 0) || + (pParams->output.specified && pParams->output.enabled)) { + NVEvoLutDataRec *pLUTBuffer = GetNewLutBuffer(pDispEvo, head, pParams); + + if (pLUTBuffer == NULL) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "LUT Allocation failure; skipping LUT update"); + return; + } + + // Fill in the new LUT buffer. + nvUploadDataToLutSurfaceEvo(pSurfEvo, pLUTBuffer, pDispEvo); + + nvFree(pLUTBuffer); + } + + /* Kill a pending timer */ + nvCancelLutUpdateEvo(pDispEvo, head); + + if (!kickoff) { + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled = baseLutEnabled; + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled = outputLutEnabled; + pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex = lutToFill; + return; + } + + // See if we can just fill the next LUT buffer and kick off an update now. + // We can do that if this is the very first update, or if the previous + // update is complete, or if we need to guarantee that this update + // is synchronous. + NvBool previousUpdateComplete = + pDevEvo->hal->IsCompNotifierComplete(pDispEvo, + LUTNotifierForHead(head)); + if (!waitForPreviousUpdate || previousUpdateComplete || + pParams->synchronous) { + // Kick off an update now. + pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex = lutToFill; + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled = baseLutEnabled; + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled = outputLutEnabled; + nvEvoUpdateCurrentPalette(pDispEvo, head, TRUE); + + // If this LUT update is synchronous, then sync before returning. + if (pParams->synchronous && + pDevEvo->lut.head[head].disp[dispIndex].waitForPreviousUpdate) { + + pDevEvo->hal->WaitForCompNotifier(pDispEvo, + LUTNotifierForHead(head)); + pDevEvo->lut.head[head].disp[dispIndex].waitForPreviousUpdate = + FALSE; + } + } else { + // Schedule a timer to kick off an update later. + // XXX 5 ms is a guess. We could probably look at this pDpy's refresh + // rate to come up with a more reasonable estimate. + NvU32 dataU32 = DRF_NUM(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _HEAD, head); + + nvAssert((head & ~0xff) == 0); + + if (baseLutEnabled) { + dataU32 |= DRF_DEF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _BASE_LUT, + _ENABLE); + } + + if (outputLutEnabled) { + dataU32 |= DRF_DEF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _OUTPUT_LUT, + _ENABLE); + } + + ScheduleLutUpdate(pDispEvo, head, dataU32, 5 * 1000); + } +} + +NvBool nvValidateSetLutCommonParams( + const NVDevEvoRec *pDevEvo, + const struct NvKmsSetLutCommonParams *pParams) +{ + NvU32 maxSize = 0; + + if (pParams->output.specified && pParams->output.enabled) { + if (pParams->output.pRamps == 0) { + return FALSE; + } + } + + if (!pParams->input.specified || pParams->input.end == 0) { + return TRUE; + } + + if (pParams->input.pRamps == 0) { + return FALSE; + } + + switch (pParams->input.depth) { + case 8: maxSize = 256; break; + case 15: maxSize = 32; break; + case 16: maxSize = 64; break; + case 24: maxSize = 256; break; + case 30: maxSize = 1024; break; + default: return FALSE; + } + + nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE); + nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE); + nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE); + + /* Currently, the implementation assumes start==0. */ + if (pParams->input.start != 0) { + return FALSE; + } + + if (pParams->input.end >= maxSize) { + return FALSE; + } + + return TRUE; +} + +static NvU32 GetSwapLockoutWindowUs(NVDispEvoPtr pDispEvo) +{ + NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS params = { 0 }; + NvU32 ret; + + nvAssert(pDispEvo->pFrameLockEvo != NULL); + + ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDispEvo->pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW failed"); + } + + return params.tSwapRdyHi; +} + +static NvU32 CalculateSwapLockoutStartP2060(NVDispEvoPtr pDispEvo, + const NvU32 head, + const NvU32 tSwapRdyHiUs) +{ + const NVHwModeTimingsEvo *pTimings; + + nvAssert(head != NV_INVALID_HEAD); + nvAssert(nvHeadIsActive(pDispEvo, head)); + + pTimings = &pDispEvo->headState[head].timings; + + /* + * SWAP_LOCKOUT_START = Vtotal * TswapRdyHi * Refresh_Rate + * + * = Vtotal * TswapRdyHi * (pclk / Refresh_Rate) + * = Vtotal * TswapRdyHi * (pclk / (Votal * Htotal)) + * = Vtotal * TswapRdyHi * (pclk / (Votal * Htotal)) + * = TswapRdyHi * (pclk / Htotal) + * = TswapRdyHiUs * 1e-6 * pclk / Htotal + * = TswapRdyHiUs * pclk / (Htotal * 1000000) + * = TswapRdyHiUs * (pclkKhz * 1000) / (Htotal * 1000000) + * = TswapRdyHiUs * pclkKhz / (Htotal * 1000) + * + * Since SWAP_LOCKOUT_START must be higher than LSR_MIN_TIME, round this + * result up to the nearest integer. + */ + + return NV_ROUNDUP_DIV(tSwapRdyHiUs * pTimings->pixelClock, + pTimings->rasterSize.x * 1000); +} + +/** + * Override the swap lockout start value on heads on this pDisp, or restore the + * default value. + * + * This is called before (with isPre == TRUE) and after (with isPre == FALSE) + * swap barriers are enabled on the G-Sync board. In order to satisfy certain + * timing criteria, we need to set a special value for SWAP_LOCKOUT_START for + * the duration of swap barriers being enabled. + */ +void nvSetSwapBarrierNotifyEvo(NVDispEvoPtr pDispEvo, + NvBool enable, NvBool isPre) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 tSwapRdyHiUs = 0; + NvU32 head; + + if ((isPre && !enable) || (!isPre && enable)) { + return; + } + + if (enable) { + tSwapRdyHiUs = GetSwapLockoutWindowUs(pDispEvo); + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS params = { }; + NvU32 ret; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + params.maxSwapLockoutSkew = + NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW_INIT; + + if (enable) { + params.swapLockoutStart = + CalculateSwapLockoutStartP2060(pDispEvo, head, tSwapRdyHiUs); + } else { + params.swapLockoutStart = + NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START_INIT; + } + + params.head = head; + + params.base.subdeviceIndex = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP failed"); + } + } +} + +/*! + * Release a reference to a pDevEvo + * + * If the refcount of the device drops to 0, this frees the device. + * + * \return TRUE if the device was freed, FALSE otherwise. + */ +NvBool nvFreeDevEvo(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo == NULL) { + return FALSE; + } + + pDevEvo->allocRefCnt--; + + if (pDevEvo->allocRefCnt > 0) { + return FALSE; + } + + if (pDevEvo->pNvKmsOpenDev != NULL) { + /* + * DP-MST allows to attach more than one heads/stream to single DP + * connector, and there is no way to convey that DP-MST configuration to + * next driver load; therefore disallow DP-MST. + */ + nvEvoRestoreConsole(pDevEvo, FALSE /* allowMST */); + + nvEvoUnregisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, + pDevEvo->fbConsoleSurfaceHandle, + TRUE /* skipUpdate */); + pDevEvo->fbConsoleSurfaceHandle = 0; + } + + nvFreeCoreChannelEvo(pDevEvo); + + nvTeardownHdmiLibrary(pDevEvo); + + nvFreePerOpenDev(nvEvoGlobal.nvKmsPerOpen, pDevEvo->pNvKmsOpenDev); + + nvFreeFrameLocksEvo(pDevEvo); + + if (pDevEvo->hal) { + pDevEvo->hal->FreeRmCtrlObject(pDevEvo); + } + + nvRmDestroyDisplays(pDevEvo); + + nvkms_free_timer(pDevEvo->consoleRestoreTimer); + pDevEvo->consoleRestoreTimer = NULL; + + nvPreallocFree(pDevEvo); + + nvRmFreeDeviceEvo(pDevEvo); + + nvListDel(&pDevEvo->devListEntry); + + nvkms_free_ref_ptr(pDevEvo->ref_ptr); + + nvFree(pDevEvo); + return TRUE; +} + +NVDevEvoPtr nvAllocDevEvo(const struct NvKmsAllocDeviceRequest *pRequest, + enum NvKmsAllocDeviceStatus *pStatus) +{ + NVDevEvoPtr pDevEvo = NULL; + enum NvKmsAllocDeviceStatus status = + NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + NvU32 i; + + nvAssert(nvFindDevEvoByDeviceId(pRequest->deviceId) == NULL); + + pDevEvo = nvCalloc(1, sizeof(*pDevEvo)); + + if (pDevEvo == NULL) { + goto done; + } + + pDevEvo->allocRefCnt = 1; + + pDevEvo->gpuLogIndex = NV_INVALID_GPU_LOG_INDEX; + + pDevEvo->gc6Allowed = TRUE; + + nvListAppend(&pDevEvo->devListEntry, &nvEvoGlobal.devList); + + pDevEvo->ref_ptr = nvkms_alloc_ref_ptr(pDevEvo); + if (!pDevEvo->ref_ptr) { + goto done; + } + + for (i = 0; i < ARRAY_LEN(pDevEvo->openedGpuIds); i++) { + pDevEvo->openedGpuIds[i] = NV0000_CTRL_GPU_INVALID_ID; + } + + for (i = 0; i < ARRAY_LEN(pDevEvo->headForWindow); i++) { + pDevEvo->headForWindow[i] = NV_INVALID_HEAD; + } + + if (!nvRmAllocDeviceEvo(pDevEvo, pRequest)) { + goto done; + } + + status = nvAssignEvoCaps(pDevEvo); + + if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) { + goto done; + } + + if (!nvPreallocAlloc(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + /* + * Copy the registry keys from the alloc device request to the device. + * + * This needs to be set before nvRmAllocDisplays, because nvRmAllocDisplays + * will initialize DP lib which may read registry keys that we want to + * allow clients to override. + */ + ct_assert(ARRAY_LEN(pRequest->registryKeys) == + ARRAY_LEN(pDevEvo->registryKeys)); + ct_assert(ARRAY_LEN(pRequest->registryKeys[0].name) == + ARRAY_LEN(pDevEvo->registryKeys[0].name)); + + for (i = 0; i < ARRAY_LEN(pRequest->registryKeys); i++) { + const size_t nameLen = sizeof(pDevEvo->registryKeys[i].name); + nvkms_memcpy(pDevEvo->registryKeys[i].name, + pRequest->registryKeys[i].name, + nameLen); + pDevEvo->registryKeys[i].name[nameLen - 1] = '\0'; + pDevEvo->registryKeys[i].value = pRequest->registryKeys[i].value; + } + + status = nvRmAllocDisplays(pDevEvo); + + if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) { + goto done; + } + + nvAllocFrameLocksEvo(pDevEvo); + + if (!pDevEvo->hal->AllocRmCtrlObject(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + if (!nvAllocCoreChannelEvo(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_CORE_CHANNEL_ALLOC_FAILED; + goto done; + } + + pDevEvo->pNvKmsOpenDev = nvAllocPerOpenDev(nvEvoGlobal.nvKmsPerOpen, + pDevEvo, TRUE /* isPrivileged */); + if (!pDevEvo->pNvKmsOpenDev) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); + + /* + * Import the framebuffer console, if there is one, + * as a surface we can flip to. + */ + nvRmImportFbConsoleMemory(pDevEvo); + + /* + * This check must be placed after nvAllocCoreChannelEvo() since it depends + * on the HW capabilities that are read in that function. + */ + if (!ValidateConnectorTypes(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + if (!nvInitHdmiLibrary(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + nvRmMuxInit(pDevEvo); + + status = NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + + /* fall through */ + +done: + if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) { + nvFreeDevEvo(pDevEvo); + pDevEvo = NULL; + } + + *pStatus = status; + + return pDevEvo; +} + + +// How long before we time out waiting for lock? +// In microseconds. +#define LOCK_TIMEOUT 5000000 + +// +// EvoWaitForLock() +// Wait for raster or flip lock to complete +// Note that we use pDev and subdevice here instead of pDisp since this is used +// per-subdev in SLI (including the pDispEvo->numSubDevices > 1 case). +// +static NvBool EvoWaitForLock(NVDevEvoPtr pDevEvo, + NvU32 sd, NvU32 head, NvU32 type) +{ + NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS status = { }; + NvU32 ret; + NvU64 startTime = 0; + + nvAssert(type == EVO_RASTER_LOCK || type == EVO_FLIP_LOCK); + + if ((type == EVO_FLIP_LOCK) && + !pDevEvo->hal->caps.supportsFlipLockRGStatus) { + return TRUE; + } + + status.head = head; + status.base.subdeviceIndex = sd; + status.scanLocked = NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_NO; + status.flipLocked = NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_NO; + + // Just keep looping until we get what we want. + do { + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_RG_STATUS, + &status, + sizeof(status)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unable to read SLI lock status"); + return FALSE; + } + + if ((type == EVO_RASTER_LOCK) && + (status.scanLocked == + NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_YES)) { + break; + } + if ((type == EVO_FLIP_LOCK) && + (status.flipLocked == + NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_YES)) { + break; + } + + if (nvExceedsTimeoutUSec(&startTime, LOCK_TIMEOUT)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "SLI lock timeout exceeded (type %d)", type); + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + // Once we've exited from the various loops above, we should be locked + // as requested. + return TRUE; +} + +// +// EvoUpdateHeadParams() +// Send GPUs HeadParams updates; accounts for SLI. +// +static void EvoUpdateHeadParams(const NVDispEvoRec *pDispEvo, NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + nvPushEvoSubDevMaskDisp(pDispEvo); + + pDevEvo->hal->SetHeadControl(pDevEvo, pDispEvo->displayOwner, head, updateState); + + nvPopEvoSubDevMask(pDevEvo); +} + +// +// nvReadCRC32Evo() +// Returns the last CRC32 value +NvBool nvReadCRC32Evo(NVDispEvoPtr pDispEvo, NvU32 head, + CRC32NotifierCrcOut *crcOut) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NVEvoDmaPtr dma = NULL; + NVConnectorEvoPtr pConnectorEvo = NULL; + NVEvoUpdateState updateState = { }; + NvU32 numCRC32 = 0; + NvBool res = TRUE; + NvBool found = FALSE; + NvU32 ret; + + // Look up the head connector + nvListForEachEntry(pConnectorEvo, + &pDispEvo->connectorList, + connectorListEntry) { + NvU32 activeHeadMask = + nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + if (activeHeadMask & NVBIT(head)) { + found = TRUE; + break; + } + } + + if (!found) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unable to find active connector for head %d", head); + return FALSE; + } + + // Allocate a temporary DMA notifier + dma = nvCalloc(1, sizeof(NVEvoDma)); + if ((dma == NULL) || + !nvRmAllocEvoDma(pDevEvo, + dma, + NV_DMA_EVO_NOTIFIER_SIZE - 1, + DRF_DEF(OS03, _FLAGS, _TYPE, _NOTIFIER), + 1 << pDispEvo->displayOwner)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "CRC32 notifier DMA allocation failed"); + nvFree(dma); + return FALSE; + } + + // Bind the CRC32 notifier ctxDma + ret = nvRmEvoBindDispContextDMA(pDevEvo, pDevEvo->core, dma->ctxHandle); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to bind display engine CRC32 notify context " + "DMA: 0x%x (%s)", ret, nvstatusToString(ret)); + res = FALSE; + goto done; + } + + // Only set up the actual output for SLI primary. + nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner); + + /* CRC notifiers are similar to completion notifiers, but work slightly + * different: + * + * 1. In order to start CRC generation for a head, we need to: + * + * - Point an EVO head at a block of memory with + * HEAD_SET_CONTEXT_DMA_CRC(head) + * + * - Program the CRC control with HEAD_SET_CRC_CONTROL(head) to select + * what output we want to capture CRC values from, and kicking off a + * core channel update (this already generates a CRC value for the + * last scanout buffer) + * + * ----> hal->StartCRC32Capture() + * + * 2. From 1) on, a new CRC value is generated per vblank and written to + * an incrementing entry in the CRC notifier. With pre-nvdisplay chips, + * a CRC notifier can hold up to 256 entries. Once filled up, new CRC + * values are discarded. Either case, we are only interested in the + * last CRC32 value. + * + * 3. In order to stop CRC generation, we need to perform the inverse + * operation of 1): + * + * - Program the CRC control with HEAD_SET_CRC_CONTROL(head) to + * unselect all outputs we were capturing CRC values from. + * + * - Unset the CRC context DMA with HEAD_SET_CONTEXT_DMA_CRC(head) + * + * ----> hal->StopCRC32Capture() + * + * 4. From 3) on, it is safe to wait for the CRC notifier and query all + * entries. + * + * ----> hal->QueryCRC32() + */ + pDevEvo->hal->StartCRC32Capture(pDevEvo, + dma, + pConnectorEvo, + pTimings->protocol, + nvEvoConnectorGetPrimaryOr(pConnectorEvo), + head, + pDispEvo->displayOwner, + &updateState); + + // This update should generate one CRC value. + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE /* releaseElv */); + + pDevEvo->hal->StopCRC32Capture(pDevEvo, + head, + &updateState); + + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE /* releaseElv */); + + if (!pDevEvo->hal->QueryCRC32(pDevEvo, + dma, + pDispEvo->displayOwner, + 1, + crcOut, + &numCRC32) || + (numCRC32 == 0)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to query all CRC32 values"); + } + + nvPopEvoSubDevMask(pDevEvo); + +done: + // Clean-up + nvRmFreeEvoDma(pDevEvo, dma); + nvFree(dma); + + return res; +} + +NvU32 nvGetActiveSorMask(const NVDispEvoRec *pDispEvo) +{ + NvU32 activeSorMask = 0; + NvU32 head; + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + NVConnectorEvoPtr pConnectorEvo = + pDispEvo->headState[head].pConnectorEvo; + + if (pConnectorEvo != NULL && + pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + nvAssert(pConnectorEvo->or.mask != 0x0); + + activeSorMask |= pConnectorEvo->or.mask; + } + } + + return activeSorMask; +} + +NvBool nvEvoPollForNoMethodPending(NVDevEvoPtr pDevEvo, + const NvU32 sd, + NVEvoChannelPtr pChannel, + NvU64 *pStartTime, + const NvU32 timeout) +{ + do + { + NvBool isMethodPending = TRUE; + + if (pDevEvo->hal->IsChannelMethodPending( + pDevEvo, + pChannel, + sd, + &isMethodPending) && !isMethodPending) { + break; + } + + if (nvExceedsTimeoutUSec(pStartTime, timeout)) { + return FALSE; + } + + nvkms_yield(); + } while (TRUE); + + return TRUE; +} + +static NvU32 SetSORFlushMode(NVDevEvoPtr pDevEvo, + NvU32 sorNumber, + NvU32 headMask, + NvBool enable) +{ + NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS params = { }; + + params.base.subdeviceIndex = 0; + params.sorNumber = sorNumber; + params.headMask = headMask; + params.bEnable = enable; + + return nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE, + ¶ms, sizeof(params)); +} + +static void DPSerializerLinkTrain(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo, + NvBool enableLink, + NvBool reTrain) +{ + const NvU32 displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + const NvU32 sorNumber = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + NvBool force = NV_FALSE; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + /* + * The NV0073_CTRL_DP_DATA_SET_{LANE_COUNT, LINK_BW} defines are the same + * as the actual DPCD values. As such, we can directly assign the + * dpSerializerCaps here. + */ + NvBool isMST = pConnectorEvo->dpSerializerCaps.supportsMST; + NvU32 linkBW = pConnectorEvo->dpSerializerCaps.maxLinkBW; + NvU32 laneCount = pConnectorEvo->dpSerializerCaps.maxLaneCount; + + nvAssert(nvConnectorIsDPSerializer(pConnectorEvo)); + + if (sorNumber == NV_INVALID_OR) { + return; + } + + if (reTrain) { + if (!pConnectorEvo->dpSerializerEnabled) { + nvEvoLogDev(pDevEvo, EVO_LOG_INFO, + "Received expected HPD_IRQ during serializer shutdown"); + return; + } + } else if (enableLink) { + pConnectorEvo->dpSerializerEnabled = NV_TRUE; + } else { + linkBW = 0; + laneCount = NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0; + pConnectorEvo->dpSerializerEnabled = NV_FALSE; + } + + if (isMST) { + NvU32 dpcdData = 0; + + dpcdData = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, dpcdData); + dpcdData = + FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UPSTREAM_IS_SRC, _YES, dpcdData); + if (!nvWriteDPCDReg(pConnectorEvo, NV_DPCD_MSTM_CTRL, dpcdData)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to enable MST DPCD"); + return; + } + } + + /* + * We cannot perform link training while the OR has an attached head + * since we would be changing the OR clocks and link frequency while + * it's actively encoding pixels, and this could lead to FIFO overflow/ + * underflow issues. Instead, the recommended, safe sequence is to enter + * flush mode first, re-train the link, and exit flush mode after. + */ + if (reTrain) { + if (SetSORFlushMode(pDevEvo, sorNumber, headMask, NV_TRUE) != + NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to enter flush mode"); + return; + } + } + + do { + NvU32 dpCtrlData = 0; + NvU32 dpCtrlCmd = 0; + NV0073_CTRL_DP_CTRL_PARAMS dpCtrlParams = { }; + + dpCtrlCmd = DRF_DEF(0073_CTRL, _DP_CMD, _SET_LANE_COUNT, _TRUE) | + DRF_DEF(0073_CTRL, _DP_CMD, _SET_LINK_BW, _TRUE) | + DRF_DEF(0073_CTRL, _DP_CMD, _SET_ENHANCED_FRAMING, _TRUE); + + if (isMST) { + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SET_FORMAT_MODE, _MULTI_STREAM); + } + + if (force) { + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FAKE_LINK_TRAINING, _DONOT_TOGGLE_TRANSMISSION); + } + + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LINK_BW, + linkBW, dpCtrlData); + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LANE_COUNT, + laneCount, dpCtrlData); + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _TARGET, + NV0073_CTRL_DP_DATA_TARGET_SINK, + dpCtrlData); + + dpCtrlParams.subDeviceInstance = pDispEvo->displayOwner; + dpCtrlParams.displayId = displayId; + dpCtrlParams.cmd = dpCtrlCmd; + dpCtrlParams.data = dpCtrlData; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_CTRL, + &dpCtrlParams, sizeof(dpCtrlParams)) == NVOS_STATUS_SUCCESS) { + break; + } + + if (force) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Fake link training failed"); + break; + } + + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Link training failed"); + + /* + * XXX Force the link config on the GPU side to avoid hanging the display + * pipe during modeset. Eventually, we need to figure out how to deal + * with/report these kinds of LT failures. + */ + force = NV_TRUE; + + } while (NV_TRUE); + + if (reTrain) { + if (SetSORFlushMode(pDevEvo, sorNumber, headMask, NV_FALSE) != + NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to exit flush mode"); + } + } +} + +void nvDPSerializerHandleDPIRQ(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo) +{ + DPSerializerLinkTrain(pDispEvo, pConnectorEvo, + NV_TRUE /* enableLink */, + NV_TRUE /* reTrain */); +} + +void nvDPSerializerPreSetMode(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo) +{ + const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + + if (!pConnectorEvo->dpSerializerEnabled && (headMask != 0)) { + DPSerializerLinkTrain(pDispEvo, pConnectorEvo, + NV_TRUE /* enableLink */, + NV_FALSE /* reTrain */); + } +} + +void nvDPSerializerPostSetMode(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo) +{ + const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + + if (pConnectorEvo->dpSerializerEnabled && (headMask == 0)) { + DPSerializerLinkTrain(pDispEvo, pConnectorEvo, + NV_FALSE /* enableLink */, + NV_FALSE /* reTrain */); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo1.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo1.c new file mode 100644 index 0000000..3037e54 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo1.c @@ -0,0 +1,539 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains implementations of the EVO HAL methods for display class + * 1.x, found in the Tesla and Fermi 1 (GF10x) chips. + */ + +#include "nvkms-types.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-evo1.h" +#include "nvkms-prealloc.h" +#include "nvkms-utils.h" + +#include // NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS + +/*! + * Initialize head-independent IMP param fields. + * + * Initializes an NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS structure. + * IMP users should call this once, followed by per-head calls to + * AssignPerHeadImpParams(). + * + * \param pImp[in] A pointer to a param structure. + */ +static void InitImpParams(NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp) +{ + int i; + + nvkms_memset(pImp, 0, sizeof(*pImp)); + + /* Initialize to not possible. */ + pImp->IsPossible = NV5070_CTRL_CMD_IS_MODE_POSSIBLE_IS_POSSIBLE_NO; + + /* Set all heads to inactive. */ + for (i = 0; i < NV5070_CTRL_CMD_MAX_HEADS; i++) { + pImp->Head[i].HeadActive = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_HEAD_ACTIVE_NO; + } + + /* Set all ORs to no owner. */ + for (i = 0; i < NV5070_CTRL_CMD_MAX_DACS; i++) { + pImp->Dac[i].owner = NV5070_CTRL_CMD_OR_OWNER_NONE; + } + + pImp->bUseSorOwnerMask = TRUE; + for (i = 0; i < NV5070_CTRL_CMD_MAX_SORS; i++) { + pImp->Sor[i].ownerMask = NV5070_CTRL_CMD_SOR_OWNER_MASK_NONE; + } + + for (i = 0; i < NV5070_CTRL_CMD_MAX_PIORS; i++) { + pImp->Pior[i].owner = NV5070_CTRL_CMD_OR_OWNER_NONE; + } +} + +/*! + * Initialize head-specific IMP param fields. + * + * Initialize the portion of the NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS + * structure that applies to a specific head, and the OR driven by + * that head. + * + * The param structure should be initialized by InitImpParams() + * before calling this per-head function. + * + * \param[out] pImp The param structure to initialize. + * \param[in] pTimings The rastering timings and viewport configuration. + * \param[in] pUsage The usage bounds that will be used for this head. + * \param[in] head The number of the head that will be driven. + * \param[in] orNumber The number of the OR driven by the head. + * \param[in] orType The type of the OR driven by the head. + */ +static void AssignPerHeadImpParams(const NVDevEvoRec *pDevEvo, + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp, + const NVHwModeTimingsEvo *pTimings, + const struct NvKmsUsageBounds *pUsage, + const int head, + const int orNumber, + const int orType) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + NvU64 overlayFormats = 0; + NvU32 protocol; + + nvkms_memset(&pImp->Head[head], 0, sizeof(pImp->Head[head])); + + nvAssert(head < NV5070_CTRL_CMD_MAX_HEADS); + pImp->Head[head].HeadActive = TRUE; + + nvAssert(orType == NV0073_CTRL_SPECIFIC_OR_TYPE_NONE || + orNumber != NV_INVALID_OR); + + /* raster timings */ + + pImp->Head[head].PixelClock.Frequency = pTimings->pixelClock; + + pImp->Head[head].PixelClock.Adj1000Div1001 = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PIXEL_CLOCK_ADJ1000DIV1001_NO; + + pImp->Head[head].RasterSize.Width = pTimings->rasterSize.x; + pImp->Head[head].RasterSize.Height = pTimings->rasterSize.y; + pImp->Head[head].RasterBlankStart.X = pTimings->rasterBlankStart.x; + pImp->Head[head].RasterBlankStart.Y = pTimings->rasterBlankStart.y; + pImp->Head[head].RasterBlankEnd.X = pTimings->rasterBlankEnd.x; + pImp->Head[head].RasterBlankEnd.Y = pTimings->rasterBlankEnd.y; + pImp->Head[head].RasterVertBlank2.YStart = pTimings->rasterVertBlank2Start; + pImp->Head[head].RasterVertBlank2.YEnd = pTimings->rasterVertBlank2End; + pImp->Head[head].Control.Structure = + pTimings->interlaced ? + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_INTERLACED : + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_PROGRESSIVE; + + if (orType == NV0073_CTRL_SPECIFIC_OR_TYPE_DAC) { + nvAssert(orNumber < ARRAY_LEN(pImp->Dac)); + nvAssert(pImp->Dac[orNumber].owner == NV5070_CTRL_CMD_OR_OWNER_NONE); + pImp->Dac[orNumber].owner = NV5070_CTRL_CMD_OR_OWNER_HEAD(head); + nvAssert(pTimings->protocol == NVKMS_PROTOCOL_DAC_RGB); + pImp->Dac[orNumber].protocol = NV5070_CTRL_CMD_DAC_PROTOCOL_RGB_CRT; + } else if (orType == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + nvAssert(orNumber < ARRAY_LEN(pImp->Sor)); + pImp->Sor[orNumber].ownerMask |= NV5070_CTRL_CMD_SOR_OWNER_MASK_HEAD(head); + switch (pTimings->protocol) { + default: + nvAssert(!"Unknown protocol"); + /* fall through */ + case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_LVDS_CUSTOM; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_A; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_B; + break; + case NVKMS_PROTOCOL_SOR_DUAL_TMDS: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_DUAL_TMDS; + break; + case NVKMS_PROTOCOL_SOR_DP_A: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_DP_A; + break; + case NVKMS_PROTOCOL_SOR_DP_B: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_DP_B; + break; + } + pImp->Sor[orNumber].protocol = protocol; + pImp->Sor[orNumber].pixelReplicateMode = + NV5070_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_OFF; + } else if (orType == NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR) { + nvAssert(orNumber < ARRAY_LEN(pImp->Pior)); + nvAssert(pImp->Pior[orNumber].owner == NV5070_CTRL_CMD_OR_OWNER_NONE); + pImp->Pior[orNumber].owner = NV5070_CTRL_CMD_OR_OWNER_HEAD(head); + switch (pTimings->protocol) { + default: + nvAssert(!"Unknown protocol"); + /* fall through */ + case NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC: + protocol = NV5070_CTRL_CMD_PIOR_PROTOCOL_EXT_TMDS_ENC; + break; + } + pImp->Pior[orNumber].protocol = protocol; + } else { + nvAssert(orType == NV0073_CTRL_SPECIFIC_OR_TYPE_NONE); + } + + /* viewport out */ + + pImp->Head[head].OutputScaler.VerticalTaps = + NVEvoScalerTapsToNum(pViewPort->vTaps); + + pImp->Head[head].OutputScaler.HorizontalTaps = + NVEvoScalerTapsToNum(pViewPort->hTaps); + + pImp->Head[head].ViewportSizeOut.Width = pViewPort->out.width; + pImp->Head[head].ViewportSizeOut.Height = pViewPort->out.height; + + pImp->Head[head].ViewportSizeOutMin.Width = + pImp->Head[head].ViewportSizeOut.Width; + + pImp->Head[head].ViewportSizeOutMin.Height = + pImp->Head[head].ViewportSizeOut.Height; + + pImp->Head[head].ViewportSizeOutMax.Width = + pImp->Head[head].ViewportSizeOut.Width; + + pImp->Head[head].ViewportSizeOutMax.Height = + pImp->Head[head].ViewportSizeOut.Height; + + /* viewport in */ + + pImp->Head[head].ViewportSizeIn.Width = pViewPort->in.width; + pImp->Head[head].ViewportSizeIn.Height = pViewPort->in.height; + + /* + * The actual format doesn't really matter, since RM just + * converts it back to bits per pixel for its IMP calculation anyway. The + * hardware doesn't have a "usage bound" for core -- changing the format + * of the core surface will always incur a supervisor interrupt and rerun + * IMP (XXX if we change the core surface as part of a flip to one of a + * different depth, should we force the pre/post IMP update path?). + * + * EVO2 hal uses surfaces of the same format in the core and base channels, + * see needToReprogramCoreSurface() in nvkms-evo2.c. + */ + if (pUsage->layer[NVKMS_MAIN_LAYER].usable) { + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_RF16_GF16_BF16_AF16; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_R5G6B5; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_I8; + } else { /* default to RGB 4BPP */ + nvAssert(!"Unknown core format"); + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8; + } + } else { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8; + } + + pImp->Head[head].Params.SuperSample = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_SUPER_SAMPLE_X1AA; + + /* base usage bounds */ + + if (pUsage->layer[NVKMS_MAIN_LAYER].usable) { + pImp->Head[head].BaseUsageBounds.Usable = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_YES; + + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_64; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_32; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_16; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_8; + } else { /* default to RGB 8BPP */ + nvAssert(!"Unknown base channel usage bound format"); + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_64; + } + + pImp->Head[head].BaseUsageBounds.SuperSample = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_SUPER_SAMPLE_X1AA; + } else { + pImp->Head[head].BaseUsageBounds.Usable = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_NO; + } + + /* overlay usage bounds */ + + pImp->Head[head].OverlayUsageBounds.Usable = + pUsage->layer[NVKMS_OVERLAY_LAYER].usable + ? NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_YES + : NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_NO; + + overlayFormats = pUsage->layer[NVKMS_OVERLAY_LAYER].usable ? + pUsage->layer[NVKMS_OVERLAY_LAYER].supportedSurfaceMemoryFormats : + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP; + + if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + pImp->Head[head].OverlayUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_32; + } else if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + pImp->Head[head].OverlayUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_16; + } else { + nvAssert(!"Unknown overlay channel usage bound format"); + pImp->Head[head].OverlayUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_32; + } + + /* pixel depth */ + + switch (pTimings->pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + pImp->Head[head].outputResourcePixelDepthBPP = + NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444; + break; + case NVKMS_PIXEL_DEPTH_24_444: + pImp->Head[head].outputResourcePixelDepthBPP = + NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; + break; + case NVKMS_PIXEL_DEPTH_30_444: + pImp->Head[head].outputResourcePixelDepthBPP = + NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444; + break; + } +} + +void nvEvo1IsModePossible(NVDispEvoPtr pDispEvo, + const NVEvoIsModePossibleDispInput *pInput, + NVEvoIsModePossibleDispOutput *pOutput) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_IMP_PARAMS, sizeof(*pImp)); + NvBool result = FALSE; + NvU32 head; + NvU32 ret; + + InitImpParams(pImp); + + pImp->RequestedOperation = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_QUERY; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (pInput->head[head].pTimings == NULL) { + continue; + } + + AssignPerHeadImpParams(pDevEvo, pImp, + pInput->head[head].pTimings, + pInput->head[head].pUsage, + head, + pInput->head[head].orIndex, + pInput->head[head].orType); + } + + pImp->base.subdeviceIndex = pDispEvo->displayOwner; + + if (pInput->requireBootClocks) { + // XXX TODO: IMP requires lock pin information if pstate information is + // requested. For now, just assume no locking. + pImp->MinPState = NV5070_CTRL_IS_MODE_POSSIBLE_NEED_MIN_PSTATE; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + pImp->Head[head].displayId[0] = pInput->head[head].displayId; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_IS_MODE_POSSIBLE, + pImp, sizeof(*pImp)); + + if (ret != NV_OK || !pImp->IsPossible || + (pInput->requireBootClocks && + // P8 = "boot clocks" + (pImp->MinPState < NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P8 && + // XXX TODO: With PStates 3.0, only a "v-pstate" is returned in + // impParams.minPerfLevel. We need to correlate that with "boot + // clocks" somehow. + pImp->MinPState != NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_UNDEFINED))) { + goto done; + } + + result = TRUE; + +done: + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_IMP_PARAMS); + pOutput->possible = result; +} + +void nvEvo1PrePostIMP(NVDispEvoPtr pDispEvo, NvBool isPre) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_IMP_PARAMS, sizeof(*pImp)); + NvU32 ret; + + if (isPre) { + /* + * Sync the core channel for pre-modeset IMP to ensure that the state + * cache reflects all of the methods we've pushed + */ + ret = nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); + if (!ret) { + nvAssert(!"nvRMSyncEvoChannel failed during PreModesetIMP"); + } + } + + nvkms_memset(pImp, 0, sizeof(*pImp)); + + pImp->RequestedOperation = isPre ? + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_PRE_MODESET_USE_SC : + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_POST_MODESET_USE_SC; + + pImp->base.subdeviceIndex = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_IS_MODE_POSSIBLE, + pImp, sizeof(*pImp)); + if ((ret != NVOS_STATUS_SUCCESS) || !pImp->IsPossible) { + nvAssert(!"NV5070_CTRL_CMD_IS_MODE_POSSIBLE failed"); + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_IMP_PARAMS); +} + +/*! + * Return the value to use for HEAD_SET_STORAGE_PITCH. + * + * Per dispClass_02.mfs, the HEAD_SET_STORAGE_PITCH "units are blocks + * if the layout is BLOCKLINEAR, the units are multiples of 256 bytes + * if the layout is PITCH." + * + * \return Returns 0 if the pitch is invalid. Otherwise returns the + * HEAD_SET_STORAGE_PITCH value. + */ +NvU32 nvEvoGetHeadSetStoragePitchValue(const NVDevEvoRec *pDevEvo, + enum NvKmsSurfaceMemoryLayout layout, + NvU32 pitch) +{ + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + /* pitch is already in units of blocks; nothing else needed. */ + } else { + /* pitch is in units of bytes, and must be aligned to 0x100. */ + if ((pitch & 0xFF) != 0) { + return 0; + } + + pitch >>= 8; + } + + if (pitch > pDevEvo->caps.maxPitchValue) { + return 0; + } + + return pitch; +} + +static NvBool GetChannelState(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvU32 *result) +{ + NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS info = { }; + NvU32 ret; + + info.base.subdeviceIndex = sd; + info.channelClass = pChan->hwclass; + info.channelInstance = pChan->instance; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_CHANNEL_INFO, + &info, sizeof(info)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to query display engine channel state: 0x%08x:%d:%d:0x%08x", + pChan->hwclass, pChan->instance, sd, ret); + return FALSE; + } + + *result = info.channelState; + + return TRUE; +} + +NvBool nvEvo1IsChannelIdle(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result) +{ + NvU32 channelState; + + if (!GetChannelState(pDevEvo, pChan, sd, &channelState)) { + return FALSE; + } + + *result = (channelState == NV5070_CTRL_GET_CHANNEL_INFO_STATE_IDLE); + + return TRUE; +} + +/* + * Result is false if an EVO channel is either one of NO_METHOD_PENDING or + * UNCONNECTED, true o.w. + * + * NO_METHOD_PENDING is a mask for EMPTY | WRTIDLE | IDLE. + * + * If NVKMS hasn't grabbed the channel, it can be seen as UNCONNECTED. + */ +NvBool nvEvo1IsChannelMethodPending(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result) +{ + NvU32 channelState; + + if (!GetChannelState(pDevEvo, pChan, sd, &channelState)) { + return FALSE; + } + + *result = !(channelState & + (NV5070_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING | + NV5070_CTRL_GET_CHANNEL_INFO_STATE_UNCONNECTED)); + + return TRUE; +} + +void nvEvo1SetDscParams(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings) +{ + nvAssert(!pTimings->dpDsc.enable); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo2.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo2.c new file mode 100644 index 0000000..b19d496 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo2.c @@ -0,0 +1,3850 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains implementations of the EVO HAL methods for display class + * 2.x. + */ + +#include "nvkms-dma.h" +#include "nvkms-types.h" +#include "nvkms-rmapi.h" +#include "nvkms-surface.h" + +#include "nvkms-evo.h" +#include "nvkms-evo1.h" + +#include + +#include // NV5070_NOTIFICATION_STATUS + +#include // NV917C_BASE_CHANNEL_DMA +#include // GK104DispOverlayImmControlPio +#include // NV917C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE +#include // NV917D_HEAD_SET_HDMI_CTRL +#include // NV917E_OVERLAY_CHANNEL_DMA +#include // NV917C_SET_SPARE_{PRE,POST}_UPDATE_TRAP + +#include // NV917D_CORE_CHANNEL_DMA +#include // NV917D_NOTIFIER_CRC +#include // NV927D_CORE_CHANNEL_DMA +#include // NV977D_CORE_CHANNEL_DMA +#include // NV947D_CORE_CHANNEL_DMA +#include + +#include // NV5070_CTRL_CMD_STOP_BASE_PARAMS + +ct_assert(NV_EVO_LOCK_PIN_0 > + NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1); +ct_assert(NV_EVO_LOCK_PIN_0 > + NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1); + +/** Number of CRCs supported by hardware on NV917D hardware (Comp and SF/SOR) */ +#define NV_EVO2_NUM_CRC_FIELDS 2 + +/** Flags read from CRCNotifier on NV917D hardware (Comp, SF/SOR Ovf and count) */ +#define NV_EVO2_NUM_CRC_FLAGS 3 + +#define NV_EVO2_SUPPORTED_DITHERING_MODES \ + ((1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL)) + +#define NV_EVO2_SUPPORTED_CURSOR_COMP_BLEND_MODES \ + ((1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA)) + +static void +EvoSetCursorImage(NVDevEvoPtr pDevEvo, + const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams); + +static void +EvoPushSetLUTContextDmaMethodsForOneSd(NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const NvU32 ctxdma, + NvBool enableBaseLut, + const NvBool enableOutputLut, + NVEvoUpdateState *updateState); +static void +EvoPushUpdateComposition(NVDevEvoPtr pDevEvo, + const int head, + const NVFlipChannelEvoHwState *pBaseHwState, + const NVFlipChannelEvoHwState *pOverlayHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + +static void InitChannelCaps90(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel) +{ + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0) { + static const NVEvoChannelCaps OverlayCaps = { + /* + * Overlay supports timestamp flips on class 9x7e, but error checks + * that it doesn't exceed 61 bits. + */ + .validTimeStampBits = 61, + /* The size of the legacy overlay notifier format. */ + .legacyNotifierFormatSizeBytes = NV_DISP_NOTIFICATION_2_SIZEOF, + /* Overlay does not support tearing/immediate flips. */ + .tearingFlips = FALSE, + .vrrTearingFlips = FALSE, + /* Overlay does not support per-eye stereo flips. */ + .perEyeStereoFlips = FALSE, + }; + + pChannel->caps = OverlayCaps; + } + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0) { + static const NVEvoChannelCaps BaseCaps = { + /* + * Base supports timestamp flips on class 9x7c, but error checks + * that it doesn't exceed 61 bits. + */ + .validTimeStampBits = 61, + /* The size of the legacy base format. */ + .legacyNotifierFormatSizeBytes = NV_DISP_BASE_NOTIFIER_1_SIZEOF, + /* Base supports tearing/immediate flips. */ + .tearingFlips = TRUE, + /* Some 9x7c classes support VRR; may be overridden at runtime. */ + .vrrTearingFlips = FALSE, + /* Base supports per-eye stereo flips. */ + .perEyeStereoFlips = TRUE, + }; + + pChannel->caps = BaseCaps; + + /* Base supports VRR tearing flips for class 917c and up. */ + if (pChannel->hwclass >= NV917C_BASE_CHANNEL_DMA) { + pChannel->caps.vrrTearingFlips = TRUE; + } + } +} + +static void EvoInitChannel90(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + const NvBool isCore = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + pChannel->channelMask); + + InitChannelCaps90(pDevEvo, pChannel); + + /* Set up core channel state. */ + if (isCore) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_DEFAULT_BASE_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_DEFAULT_BASE_COLOR, _RED, 0) | + DRF_NUM(917D, _HEAD_SET_DEFAULT_BASE_COLOR, _GREEN, 0) | + DRF_NUM(917D, _HEAD_SET_DEFAULT_BASE_COLOR, _BLUE, 0)); + } + } + + /* Set up base channel state. */ + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0) { + NvU32 head = NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + pDevEvo->pSubDevices[sd]->baseComp[head].initialized = FALSE; + } + + // For now we only support USE_CORE_LUT mode, but sending this method every + // flip causes an error check to fire for tearing flips even if the LUT mode + // isn't changing. So instead, program it here. ApplyBaseFlipOverrides() + // will force the first flip to be non-tearing. + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_BASE_LUT_LO, 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(917C, _SET_BASE_LUT_LO, _ENABLE, + _USE_CORE_LUT)); + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_OUTPUT_LUT_LO, 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(917C, _SET_OUTPUT_LUT_LO, _ENABLE, + _USE_CORE_LUT)); + } + + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0) { + NvU32 head = NV_EVO_CHANNEL_MASK_OVERLAY_HEAD_NUMBER(pChannel->channelMask); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + pDevEvo->pSubDevices[sd]->overlayComp[head].initialized = FALSE; + } + } +} + +static void EvoInitWindowMapping90(const NVDispEvoRec *pDispEvo, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + /* Fixed window mapping on EVO 2 -- nothing to do. */ +} + +/* + * These values are the same between all overlay + * (7E_SURFACE_SET_PARAMS_FORMAT_) EVO classes. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 EvoOverlayFormatFromKmsFormat91(enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatI8: + return 0; + case NvKmsSurfaceMemoryFormatR5G6B5: + return 0; + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + return NV917E_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5; + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + return NV917E_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8; + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + return NV917E_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10; + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + return NV917E_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16; + case NvKmsSurfaceMemoryFormatR16G16B16A16: + return NV917E_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16; + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return 0; + } + + return 0; +} + +static void EvoSetRasterParams90(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 overscanColor = + DRF_NUM(917D, _HEAD_SET_OVERSCAN_COLOR, _RED, pOverscanColor->red) | + DRF_NUM(917D, _HEAD_SET_OVERSCAN_COLOR, _GRN, pOverscanColor->green) | + DRF_NUM(917D, _HEAD_SET_OVERSCAN_COLOR, _BLU, pOverscanColor->blue); + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // XXX[AGP]: These methods are sequential and could use an incrementing + // method, but it's not clear if there's a bug in EVO that causes corruption + // sometimes. Play it safe and send methods with count=1. + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OVERSCAN_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, overscanColor); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_SIZE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_SIZE, _WIDTH, pTimings->rasterSize.x) | + DRF_NUM(917D, _HEAD_SET_RASTER_SIZE, _HEIGHT, pTimings->rasterSize.y)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_SYNC_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_SYNC_END, _X, pTimings->rasterSyncEnd.x) | + DRF_NUM(917D, _HEAD_SET_RASTER_SYNC_END, _Y, pTimings->rasterSyncEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_BLANK_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_BLANK_END, _X, pTimings->rasterBlankEnd.x) | + DRF_NUM(917D, _HEAD_SET_RASTER_BLANK_END, _Y, pTimings->rasterBlankEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_BLANK_START(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_BLANK_START, _X, pTimings->rasterBlankStart.x) | + DRF_NUM(917D, _HEAD_SET_RASTER_BLANK_START, _Y, pTimings->rasterBlankStart.y)); + + if (pTimings->interlaced) { + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_VERT_BLANK2(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_VERT_BLANK2, _YSTART, + pTimings->rasterVertBlank2Start) | + DRF_NUM(917D, _HEAD_SET_RASTER_VERT_BLANK2, _YEND, + pTimings->rasterVertBlank2End)); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _HERTZ, + pTimings->pixelClock * 1000) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _ADJ1000DIV1001,_FALSE)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _MODE, _CLK_CUSTOM) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _NOT_DRIVER, _FALSE) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _ENABLE_HOPPING, _FALSE) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _HOPPING_MODE, _VBLANK)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _HERTZ, + pTimings->pixelClock * 1000) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _ADJ1000DIV1001,_FALSE)); +} + +/* + * Wrapper for EvoSetRasterParams90 which additionally sends the HDMI 3D + * control methods. + */ +static void EvoSetRasterParams91(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 hdmiStereoCtrl = + DRF_DEF(917D, _HEAD_SET_HDMI_CTRL, _STEREO3D_STRUCTURE, _FRAME_PACKED) | + DRF_NUM(917D, _HEAD_SET_HDMI_CTRL, _HDMI_VIC, 0); + + EvoSetRasterParams90(pDevEvo, head, + pTimings, + pOverscanColor, updateState); + + if (pTimings->hdmi3D) { + hdmiStereoCtrl |= + DRF_DEF(917D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _STEREO3D); + } else { + hdmiStereoCtrl |= + DRF_DEF(917D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _NORMAL); + } + + nvDmaSetStartEvoMethod(pChannel, + NV917D_HEAD_SET_VACTIVE_SPACE_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VACTIVE_SPACE_COLOR, _RED_CR, 0) | +#if defined(DEBUG) + DRF_NUM(917D, _HEAD_SET_VACTIVE_SPACE_COLOR, _GRN_Y, 512) | +#else + DRF_NUM(917D, _HEAD_SET_VACTIVE_SPACE_COLOR, _GRN_Y, 0) | +#endif + DRF_NUM(917D, _HEAD_SET_VACTIVE_SPACE_COLOR, _BLU_CB, 0)); + + nvDmaSetStartEvoMethod(pChannel, + NV917D_HEAD_SET_HDMI_CTRL(head), 1); + nvDmaSetEvoMethodData(pChannel, hdmiStereoCtrl); +} + +static void EvoSetProcAmp90(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvU32 dynRange; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // These NVT defines match the HEAD_SET_PROCAMP ones. + ct_assert(NVT_COLORIMETRY_RGB == NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB); + ct_assert(NVT_COLORIMETRY_YUV_601 == NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601); + ct_assert(NVT_COLORIMETRY_YUV_709 == NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709); + ct_assert(NVT_COLOR_RANGE_FULL == NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE); + ct_assert(NVT_COLOR_RANGE_LIMITED == NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE); + + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + dynRange = DRF_DEF(917D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _VESA); + } else { + nvAssert(pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED); + dynRange = DRF_DEF(917D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _CEA); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PROCAMP(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_PROCAMP, _COLOR_SPACE, + pHeadState->procAmp.colorimetry) | + DRF_DEF(917D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _AUTO) | + DRF_NUM(917D, _HEAD_SET_PROCAMP, _SAT_COS, + pHeadState->procAmp.satCos) | + DRF_NUM(917D, _HEAD_SET_PROCAMP, _SAT_SINE, 0) | + dynRange | + DRF_NUM(917D, _HEAD_SET_PROCAMP, _RANGE_COMPRESSION, + pHeadState->procAmp.colorRange)); +} + +static void EvoSetHeadControl90(NVDevEvoPtr pDevEvo, int sd, int head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + /* + * NOTE: This function should only push state to the hardware based on data + * in the pHC. If not, then we may miss updates due to the memcmp of the + * HeadControl structure in UpdateEvoLockState(). + */ + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + NvU32 data = 0, pin; + NvU32 serverLockMode, clientLockMode; + + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pHC->serverLock) { + case NV_EVO_NO_LOCK: + serverLockMode = NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK; + break; + case NV_EVO_FRAME_LOCK: + serverLockMode = NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK; + break; + case NV_EVO_RASTER_LOCK: + serverLockMode = NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK; + break; + default: + nvAssert(!"Invalid server lock mode"); + return; + } + + switch (pHC->clientLock) { + case NV_EVO_NO_LOCK: + clientLockMode = NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK; + break; + case NV_EVO_FRAME_LOCK: + clientLockMode = NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK; + break; + case NV_EVO_RASTER_LOCK: + clientLockMode = NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK; + break; + default: + nvAssert(!"Invalid client lock mode"); + return; + } + + // Convert head control state to EVO method values. + if (pHC->interlaced) { + data |= DRF_DEF(917D, _HEAD_SET_CONTROL, _STRUCTURE, _INTERLACED); + } else { + data |= DRF_DEF(917D, _HEAD_SET_CONTROL, _STRUCTURE, _PROGRESSIVE); + } + + nvAssert(pHC->serverLockPin != NV_EVO_LOCK_PIN_ERROR); + nvAssert(pHC->clientLockPin != NV_EVO_LOCK_PIN_ERROR); + nvAssert(pHC->flipLockPin != NV_EVO_LOCK_PIN_ERROR); + + if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->serverLockPin)) { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + /* + * dispClass_02.mfs says: + * "master lock pin, if internal, must be set to the corresponding + * internal pin for that head" (error check #12) + * (Note that this is only enforced when scanlock master is enabled) + */ + nvAssert(pHC->serverLock == NV_EVO_NO_LOCK || pin == head); + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _MASTER_LOCK_MODE, serverLockMode); + + if (clientLockMode == NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK) { + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->clientLockPin)) { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCK_MODE, clientLockMode); + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCKOUT_WINDOW, + pHC->clientLockoutWindow); + + /* + * Interlaced with stereo lock mode is not supported. + * + * We always enable stereo lock when it's available and either framelock + * or rasterlock is in use. + */ + if (pHC->stereoLocked) { + nvAssert(!pHC->interlaced); + + if (pHC->serverLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(927D, _HEAD_SET_CONTROL, _MASTER_STEREO_LOCK_MODE, + NV927D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE); + } + if (pHC->clientLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(927D, _HEAD_SET_CONTROL, _SLAVE_STEREO_LOCK_MODE, + NV927D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE); + } + } + + /* + * Changing the flip lock pin induces a head shutdown. We want to avoid + * this in two cases: + * + * 1) When transitioning from the UEFI console, the flip lock pin is + * currently set to UNSPECIFIED, so we want to preserve that setting + * if possible to avoid an unnecessary flicker. + * + * 2) While framelock is enabled, we need to avoid head shutdown when + * transitioning to and from fliplock to guarantee no loss of stereo + * sync. + * + * To guarantee stereo sync while also avoiding unnecessary flicker when + * transitioning from UEFI, we'll set the flip lock pin to UNSPECIFIED + * unless fliplock, frame lock, or raster lock are enabled. Enabling + * framelock may induce one head shutdown when transitioning away from + * UNSPECIFIED, but then enabling/disabling fliplock after that will + * have no effect on the fliplock pin. + */ + if (!pHC->flipLock && + (pHC->serverLock == NV_EVO_NO_LOCK) && + (pHC->clientLock == NV_EVO_NO_LOCK)) { + + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _FLIP_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->flipLockPin)) { + pin = pHC->flipLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _FLIP_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(pin)); + } else { + pin = pHC->flipLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _FLIP_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(pin)); + } + if (pHC->flipLock) { + data |= DRF_DEF(917D, _HEAD_SET_CONTROL, _FLIP_LOCK, _ENABLE); + } + + nvAssert(pHC->stereoPin != NV_EVO_LOCK_PIN_ERROR); + if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin)) { + /* + * dispClass_02.mfs says: + * "stereo pin, if internal, must be set to the corresponding internal + * pin for that head" (error check #14) + * So just ignore which pin we selected; no sense in wasting cycles + * keeping track of it + */ + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _STEREO_PIN, + NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(head)); + } else { + pin = pHC->stereoPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _STEREO_PIN, + NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(pin)); + } + + // Send the method. + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, data); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_LOCK_CHAIN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(917D, _HEAD_SET_LOCK_CHAIN, _POSITION, + pHC->lockChainPosition)); +} + +static void EvoSetHeadRefClk90(NVDevEvoPtr pDevEvo, int head, NvBool external, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_SW_SPARE_A(head), 1); + nvDmaSetEvoMethodData(pChannel, external ? + DRF_DEF(907D, _HEAD_SET_SW_SPARE_A_CODE, _VPLL_REF, _GSYNC) : + DRF_DEF(907D, _HEAD_SET_SW_SPARE_A_CODE, _VPLL_REF, _NO_PREF)); +} + +static void EvoDACSetControl90(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + if (headMask != 0) { + nvAssert(protocol == NVKMS_PROTOCOL_DAC_RGB); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_DAC_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _DAC_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_DEF(917D, _DAC_SET_CONTROL, _PROTOCOL, _RGB_CRT)); +} + +static void EvoSORSetControl90(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 hwProtocol = 0; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + nvAssert(orIndex != NV_INVALID_OR); + + if (headMask != 0) { + switch (protocol) { + default: + nvAssert(!"unexpected protocol"); + /* fallthrough */ + case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B; + break; + case NVKMS_PROTOCOL_SOR_DUAL_TMDS: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS; + break; + case NVKMS_PROTOCOL_SOR_DP_A: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A; + break; + case NVKMS_PROTOCOL_SOR_DP_B: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B; + break; + } + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_SOR_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _SOR_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_NUM(917D, _SOR_SET_CONTROL, _PROTOCOL, hwProtocol) | + DRF_DEF(917D, _SOR_SET_CONTROL, _DE_SYNC_POLARITY, _POSITIVE_TRUE) | + DRF_DEF(917D, _SOR_SET_CONTROL, _PIXEL_REPLICATE_MODE, _OFF)); +} + +static void EvoPIORSetControl90(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + if (headMask != 0) { + nvAssert(protocol == NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_PIOR_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _PIOR_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_DEF(917D, _PIOR_SET_CONTROL, _PROTOCOL, _EXT_TMDS_ENC) | + DRF_DEF(917D, _PIOR_SET_CONTROL, _DE_SYNC_POLARITY, _POSITIVE_TRUE)); +} + +static NvU32 EvoGetPixelDepth90(const enum nvKmsPixelDepth pixelDepth) +{ + switch (pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444; + case NVKMS_PIXEL_DEPTH_24_444: + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; + case NVKMS_PIXEL_DEPTH_30_444: + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444; + } + nvAssert(!"Unexpected pixel depth"); + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; +} + +static void EvoHeadSetControlOR90(NVDevEvoPtr pDevEvo, + const int head, + const NVHwModeTimingsEvo *pTimings, + const NvBool colorSpaceOverride, + NVEvoUpdateState *updateState) +{ + const NvU32 hwPixelDepth = EvoGetPixelDepth90(pTimings->pixelDepth); + const NvU16 colorSpaceFlag = nvEvo1GetColorSpaceFlag(pDevEvo, + colorSpaceOverride); + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _CRC_MODE, _ACTIVE_RASTER) | + (pTimings->hSyncPol ? + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _POSITIVE_TRUE)) | + (pTimings->vSyncPol ? + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _POSITIVE_TRUE)) | + (colorSpaceOverride ? + (DRF_DEF(977D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _ENABLE) | + DRF_NUM(977D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_FLAG, colorSpaceFlag)) : + DRF_DEF(977D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _DISABLE)) | + DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _PIXEL_DEPTH, hwPixelDepth)); +} + +static void EvoORSetControl90(NVDevEvoPtr pDevEvo, + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask, + NVEvoUpdateState *updateState) +{ + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + EvoDACSetControl90(pConnectorEvo, protocol, orIndex, headMask); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + EvoSORSetControl90(pConnectorEvo, protocol, orIndex, headMask); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + EvoPIORSetControl90(pConnectorEvo, protocol, orIndex, headMask); + break; + default: + nvAssert(!"Invalid pConnectorEvo->or.type"); + break; + } +} + +static void EvoHeadSetDisplayId90(NVDevEvoPtr pDevEvo, + const NvU32 head, const NvU32 displayId, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_DISPLAY_ID(head, 0), 1); + nvDmaSetEvoMethodData(pChannel, displayId); +} + +static NvBool EvoSetUsageBounds90(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVEvoSubDevHeadStateRec *pCurrentFlipState = + &pDevEvo->gpus[sd].headState[head]; + const struct NvKmsUsageBounds *pCurrentUsage = + &pCurrentFlipState->usage; + NvU64 overlayFormats = 0; + NvU32 baseUsage = 0, overlayUsage = 0; + const NVSurfaceEvoRec *pCurrentBaseSurf = + pCurrentFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + const NVSurfaceEvoRec *pCurrentOverlaySurf = + pCurrentFlipState->layer[NVKMS_OVERLAY_LAYER].pSurfaceEvo[NVKMS_LEFT]; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + if (UsageBoundsEqual(pCurrentUsage, pUsage)) { + return FALSE; + } + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* + * Make sure to interlock usage bounds update with the base and overlay + * channel updates, otherwise it ends up violating hardware error check for + * the base/overlay channel blocking. + * + * // check for blocking violations + * for (vlt_index = 0; vlt_index < NV_CHIP_DISP_TOTAL_HEADS_PRESENT_por; vlt_index++) { + * if ((wir_BlockBase[vlt_index] == TRUE) + * && (wir_BaseQuiescent[vlt_index] == FALSE) + * && ((ecv_GlobalHeadConnected[vlt_index] == TRUE) || (pri_ErrcheckWhenDisconnected == TRUE))) + * throw (vlt_index << NV_DISP_CORE_STATE_ERROR_HEAD_INDEX_SHIFT) | NV_DISP_CORE_STATE_ERROR_001; + * } + * + * for (vlt_index = 0; vlt_index < NV_CHIP_DISP_TOTAL_HEADS_PRESENT_por; vlt_index++) { + * if ((wir_BlockOverlay[vlt_index] == TRUE) + * && (wir_OverlayQuiescent[vlt_index] == FALSE) + * && ((ecv_GlobalHeadConnected[vlt_index] == TRUE) || (pri_ErrcheckWhenDisconnected == TRUE))) + * throw (vlt_index << NV_DISP_CORE_STATE_ERROR_HEAD_INDEX_SHIFT) | NV_DISP_CORE_STATE_ERROR_002; + */ + + if (pCurrentBaseSurf != NULL && + !nvEvoLayerUsageBoundsEqual(pUsage, pCurrentUsage, NVKMS_MAIN_LAYER)) { + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->base[head]); + } + + if (pCurrentOverlaySurf != NULL && + !nvEvoLayerUsageBoundsEqual(pUsage, pCurrentUsage, NVKMS_OVERLAY_LAYER)) { + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->overlay[head]); + } + + + if (pUsage->layer[NVKMS_MAIN_LAYER].usable) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, _USABLE, + _TRUE); + + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_64); + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_32); + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_16); + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_8); + } else { + nvAssert(!"Unexpected base pixel depth"); + return FALSE; + } + + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _SUPER_SAMPLE, _X1_AA); + } + + overlayUsage |= pUsage->layer[NVKMS_OVERLAY_LAYER].usable ? + DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, _USABLE, _TRUE) : + DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, _USABLE, _FALSE); + + overlayFormats = pUsage->layer[NVKMS_OVERLAY_LAYER].usable ? + pUsage->layer[NVKMS_OVERLAY_LAYER].supportedSurfaceMemoryFormats : + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP; + + if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + overlayUsage |= DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_32); + } else if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + overlayUsage |= DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_16); + } else { + nvAssert(!"Unsupported overlay depth"); + overlayUsage |= DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_16); + } + + nvDmaSetStartEvoMethod(pChannel, + NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(head), 2); + nvDmaSetEvoMethodData(pChannel, baseUsage); + nvDmaSetEvoMethodData(pChannel, overlayUsage); + + return TRUE; +} + +static void EvoSetNotifierMethods90(NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + NvBool notify, + NvBool awaken, + NvU32 notifier) +{ + ASSERT_DRF_NUM(917D, _SET_NOTIFIER_CONTROL, _OFFSET, notifier); + + if (notify) { + NvU32 sd; + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + nvDmaSetStartEvoMethod(pChannel, + NV917D_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, + _SET_CONTEXT_DMA_NOTIFIER, + _HANDLE, + pDevEvo->core->notifiersDma[sd].ctxHandle)); + nvPopEvoSubDevMask(pDevEvo); + } + } + } else { + nvDmaSetStartEvoMethod(pChannel, + NV917D_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _SET_CONTEXT_DMA_NOTIFIER, _HANDLE, 0)); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _SET_NOTIFIER_CONTROL, _OFFSET, notifier) | + (awaken ? + DRF_DEF(917D, _SET_NOTIFIER_CONTROL, _MODE, _WRITE_AWAKEN) : + DRF_DEF(917D, _SET_NOTIFIER_CONTROL, _MODE, _WRITE)) | + (notify ? + DRF_DEF(917D, _SET_NOTIFIER_CONTROL, _NOTIFY, _ENABLE) : + DRF_DEF(917D, _SET_NOTIFIER_CONTROL, _NOTIFY, _DISABLE))); +} + +static void UpdateCore9x(NVEvoChannelPtr pChannel, + NVEvoChannelMask interlockChannelMask) +{ + NvU32 head, value = 0; + + ct_assert(NV_EVO_CHANNEL_MASK_BASE__SIZE == + NV_EVO_CHANNEL_MASK_OVERLAY__SIZE); + for (head = 0; head < NV_EVO_CHANNEL_MASK_BASE__SIZE; head++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE, + interlockChannelMask)) { + value |= DRF_IDX_DEF(917D, _UPDATE, + _INTERLOCK_WITH_BASE, head, _ENABLE); + } + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE, + interlockChannelMask)) { + value |= DRF_IDX_DEF(917D, _UPDATE, + _INTERLOCK_WITH_OVERLAY, head, _ENABLE); + } + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, value); + + nvDmaKickoffEvo(pChannel); +} + +static void UpdateBase91(NVEvoChannelPtr pChannel, + NvBool interlockWithCore, + NvBool vrrTearing) +{ + NvU32 updateValue = 0; + NvU32 trapParam = 0; + + if (interlockWithCore) { + updateValue |= DRF_DEF(917C, _UPDATE, _INTERLOCK_WITH_CORE, _ENABLE); + } + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SPARE_PRE_UPDATE_TRAP, 1); + nvDmaSetEvoMethodData(pChannel, trapParam); + + nvDmaSetStartEvoMethod(pChannel, NV917C_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, updateValue); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SPARE_POST_UPDATE_TRAP, 1); + nvDmaSetEvoMethodData(pChannel, trapParam); + + nvDmaKickoffEvo(pChannel); +} + +static void UpdateOverlay9x(NVEvoChannelPtr pChannel, + NvBool interlockWithCore) +{ + NvU32 value = 0; + + if (interlockWithCore) { + value |= DRF_DEF(917E, _UPDATE, _INTERLOCK_WITH_CORE, _ENABLE); + } + + nvDmaSetStartEvoMethod(pChannel, NV917E_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, value); + + nvDmaKickoffEvo(pChannel); +} + +static void EvoUpdate91(NVDevEvoPtr pDevEvo, + const NVEvoUpdateState *updateState, + NvBool releaseElv) +{ + NvU32 sd; + NVEvoChannelMask fliplockedBaseChannels[NVKMS_MAX_SUBDEVICES] = { }; + NvBool updateAllFliplockedBaseChannels = FALSE; + + /* + * Multiple 'base + core channel interlocked' updates can create deadlock + * if heads are flip locked. + * + * For example - if head-0 and head-1 are flip locked and you initiate two + * 'base + core channel interlocked' updates separately for each of + * the head then that creates deadlock: + * + * + * +--------+ +--------+ +--------+ + * | BASE-0 | | CORE | | BASE-1 | + * +--------+ +--------+ +--------+ + * | | | | | | + * | | | | | | + * +--------+------+--------+ | | + * | INTERLOCKED | | | + * | UPDATE-0 | | | + * +--------+------+--------+ | | + * | Base | | Core | | | + * <...| update |<.... | Update | | | + * : | for | | for | | | + * : | head-0 | | head-0 | | | + * : +--------+------+--------+ | | + * : | | | ^ | | | + * : | | | : | | | + * : +--------+ | : | | | + * : | : | | | + * : +---(----+------+--------+ + * : | : INTERLOCKED | + * : | : UPDATE-1 | + * : +--------+------+--------+ + * V | Core | | Base | + * : | update |<.... | Update | + * : | for | | for |<... + * : | head-1 | | head-1 | : + * : +--------+------+--------+ : + * : | | | | ^ + * : +--------+ +--------+ : + * : : + * V...................>............................> + * + * ^ + * | + * | + * [ BASE-0 and BASE-1 are fliplocked ] + * + * Here you can follow the dotted arrow line and see how deadlock + * has been formed. The dotted arrow line indicates the execution + * dependency of the one update onto another, e.g. the core update + * for head-1 can't get executed unless the core update for head-0 + * gets executed. + * + * To prevent this deadlock, initiate the base channel updates for all flip + * locked heads if update state contains 'base + core channel interlocked' + * for the flip locked head. + */ + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoChannelMask updateChannelMask = updateState->subdev[sd].channelMask; + NVEvoChannelMask interlockChannelMask = + updateChannelMask & ~updateState->subdev[sd].noCoreInterlockMask; + NvU32 head; + + for (head = 0; head < NV_EVO_CHANNEL_MASK_BASE__SIZE; head++) { + NVEvoChannelMask thisMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE); + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + + if (pHC->flipLock) { + fliplockedBaseChannels[sd] |= thisMask; + } + + /* + * If this update is updating only one base channel without any core + * interlock, in that case, we don't need to also update all flip + * locked base channels. + */ + if (NV_EVO_CHANNEL_MASK_POPCOUNT(interlockChannelMask) <= 1 && + !FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + updateChannelMask)) { + continue; + } + + if ((updateChannelMask & thisMask) != 0x0 && pHC->flipLock) { + updateAllFliplockedBaseChannels = TRUE; + } + } + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoChannelMask updateChannelMask = updateState->subdev[sd].channelMask | + (updateAllFliplockedBaseChannels ? fliplockedBaseChannels[sd] : 0x0); + NVEvoChannelMask interlockChannelMask = + updateChannelMask & ~updateState->subdev[sd].noCoreInterlockMask; + NvBool interlockWithCore = FALSE; + const NvU32 subDeviceMask = (1 << sd); + NvU32 head; + + nvPushEvoSubDevMask(pDevEvo, subDeviceMask); + + if (NV_EVO_CHANNEL_MASK_POPCOUNT(interlockChannelMask) > 1) { + /* We can only interlock updates if core is included. */ + nvAssert(!FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + updateState->subdev[sd].noCoreInterlockMask)); + updateChannelMask |= DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE); + interlockChannelMask |= + DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE); + interlockWithCore = TRUE; + } + + if (FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + updateChannelMask)) { + UpdateCore9x(pDevEvo->core, updateChannelMask); + } + + for (head = 0; head < NV_EVO_CHANNEL_MASK_OVERLAY__SIZE; head++) { + NVEvoChannelMask thisMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE); + if (updateChannelMask & thisMask) { + NvBool thisInterlockWithCore = interlockWithCore && + (interlockChannelMask & thisMask); + UpdateOverlay9x(pDevEvo->overlay[head], + thisInterlockWithCore); + } + } + + for (head = 0; head < NV_EVO_CHANNEL_MASK_BASE__SIZE; head++) { + NVEvoChannelMask thisMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE); + if (updateChannelMask & thisMask) { + NvBool thisInterlockWithCore = interlockWithCore && + (interlockChannelMask & thisMask); + NvBool vrrTearing = + updateState->subdev[sd].base[head].vrrTearing; + + UpdateBase91(pDevEvo->base[head], + thisInterlockWithCore, vrrTearing); + } + } + + nvPopEvoSubDevMask(pDevEvo); + } +} + +static void EvoSetNotifier90(NVDevEvoRec *pDevEvo, + const NvBool notify, + const NvBool awaken, + const NvU32 notifier, + NVEvoUpdateState *updateState) +{ + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + + EvoSetNotifierMethods90(pDevEvo, pDevEvo->core, notify, awaken, notifier); +} + +/* + * Returns the data for the SET_STORAGE method. The method data + * format is the same between classes 90[CDE]. + */ +static NvU32 EvoComputeSetStorage90(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo) +{ + NvU32 setStorage; + + NvU32 pitch = nvEvoGetHeadSetStoragePitchValue( + pDevEvo, + pSurfaceEvo->layout, + pSurfaceEvo->planes[0].pitch); + nvAssert(pitch != 0); + + if (pSurfaceEvo->layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + // 1 block = 1 * X Gobs; 1 Gob = 64B * 4Lines; X = 1 << + // blockHeightLog2Gobs + NvU32 blockHeight = pSurfaceEvo->log2GobsPerBlockY; + + setStorage = DRF_NUM(917D, _HEAD_SET_STORAGE, _BLOCK_HEIGHT, blockHeight) | + DRF_DEF(917D, _HEAD_SET_STORAGE, _MEMORY_LAYOUT, _BLOCKLINEAR); + } else { + setStorage = DRF_DEF(917D, _HEAD_SET_STORAGE, _MEMORY_LAYOUT, _PITCH); + } + + ASSERT_DRF_NUM(917D, _HEAD_SET_STORAGE, _PITCH, pitch); + setStorage |= DRF_NUM(917D, _HEAD_SET_STORAGE, _PITCH, pitch); + + return setStorage; +} + +static void SetCscMatrix(NVEvoChannelPtr pChannel, NvU32 method, + const struct NvKmsCscMatrix *matrix, + NvU32 extraFirstWordBits) +{ + int y; + + // The _COEFF fields are the same across all of the methods on all + // channels. + ct_assert(DRF_SHIFTMASK(NV917C_SET_CSC_RED2RED_COEFF) == + DRF_SHIFTMASK(NV917D_HEAD_SET_CSC_RED2RED_COEFF)); + ct_assert(DRF_SHIFTMASK(NV917C_SET_CSC_RED2RED_COEFF) == + DRF_SHIFTMASK(NV917E_SET_CSC_RED2RED_COEFF)); + + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 4; x++) { + // Use DRF_NUM to truncate client-supplied values that are out of + // range. + NvU32 val = DRF_NUM(917C, _SET_CSC_RED2RED, _COEFF, + matrix->m[y][x]); + + if (x == 0 && y == 0) { + val |= extraFirstWordBits; + } + + nvDmaSetStartEvoMethod(pChannel, method, 1); + nvDmaSetEvoMethodData(pChannel, val); + + method += 4; + } + } +} + +/* + * These values are the same between all base + * (_SURFACE_SET_PARAMS_FORMAT_) and core (_HEAD_SET_PARAMS_FORMAT_) + * EVO classes. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 nvHwFormatFromKmsFormat90( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatI8: + return NV917D_HEAD_SET_PARAMS_FORMAT_I8; + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + return NV917D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5; + case NvKmsSurfaceMemoryFormatR5G6B5: + return NV917D_HEAD_SET_PARAMS_FORMAT_R5G6B5; + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + return NV917D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8; + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + return NV917D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8; + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + return NV917D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10; + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + return NV917D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16; + case NvKmsSurfaceMemoryFormatR16G16B16A16: + return NV917D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16; + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return 0; + } + + return 0; +} + +static void EvoSetSurface(NVDevEvoPtr pDevEvo, + const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + const struct NvKmsCscMatrix *pCscMatrix, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 sd; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + FOR_EACH_SUBDEV_IN_MASK(sd, nvPeekEvoSubDevMask(pDevEvo)) { + /* + * The EVO2 ->SetCursorImage() function programs cursor image surface + * only if NVEvoSubDeviceRec::pCoreChannelSurface is non-null. + */ + pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head] = pSurfaceEvo; + } FOR_EACH_SUBDEV_IN_MASK_END + + if (!pSurfaceEvo) { + // Disable surface scanout on this head. It will scan out the default + // base color instead. + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMAS_ISO(head), 1); + nvDmaSetEvoMethodData(pChannel, 0); + return; + } + + nvAssert(pSurfaceEvo->planes[0].ctxDma); + + // XXX[AGP]: These methods are sequential, but sending them with a single + // count=7 method header sometimes causes EVO to throw an IsoViolation + // exception. + + // Set the surface parameters. + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OFFSET(head), 1); + nvDmaSetEvoMethodData(pChannel, + nvCtxDmaOffsetFromBytes(pSurfaceEvo->planes[0].offset)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_SIZE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_SIZE, _WIDTH, pSurfaceEvo->widthInPixels) | + DRF_NUM(917D, _HEAD_SET_SIZE, _HEIGHT, pSurfaceEvo->heightInPixels)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_STORAGE(head), 1); + nvDmaSetEvoMethodData(pChannel, EvoComputeSetStorage90(pDevEvo, pSurfaceEvo)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PARAMS(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_PARAMS, _FORMAT, + nvHwFormatFromKmsFormat90(pSurfaceEvo->format)) | + DRF_DEF(917D, _HEAD_SET_PARAMS, _SUPER_SAMPLE, _X1_AA) | + DRF_DEF(917D, _HEAD_SET_PARAMS, _GAMMA, _LINEAR)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMAS_ISO(head), 1); + nvDmaSetEvoMethodData(pChannel, pSurfaceEvo->planes[0].ctxDma); + + /* NULL => don't change the CSC. */ + if (pCscMatrix) { + SetCscMatrix(pChannel, NV917D_HEAD_SET_CSC_RED2RED(head), pCscMatrix, 0); + } +} + +static void +EvoPushSetCoreSurfaceMethodsForOneSd(NVDevEvoRec *pDevEvo, + const NvU32 sd, + const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + const struct NvKmsCscMatrix *pCscMatrix, + NVEvoUpdateState *updateState) +{ + const NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + const NVFlipCursorEvoHwState *pSdCursorState = &pSdHeadState->cursor; + + const NVDispEvoRec *pDispEvo = pDevEvo->gpus[sd].pDispEvo; + const int dispIndex = pDispEvo->displayOwner; + NvU8 curLutIndex = pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex; + + NvBool enableOutputLut = + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled; + NvBool enableBaseLut = + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled; + + NVLutSurfaceEvoPtr pCurLutSurfEvo = + pDevEvo->lut.head[head].LUT[curLutIndex]; + NvU32 lutCtxdma = pCurLutSurfEvo != NULL ? + pCurLutSurfEvo->dispCtxDma : 0x0; + + if (pSurfaceEvo == NULL || pCurLutSurfEvo == NULL) { + enableOutputLut = FALSE; + enableBaseLut = FALSE; + lutCtxdma = 0x0; + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + + EvoSetSurface(pDevEvo, head, pSurfaceEvo, pCscMatrix, updateState); + + EvoSetCursorImage(pDevEvo, + head, + pSurfaceEvo != NULL ? + pSdCursorState->pSurfaceEvo : NULL, + updateState, + &pSdCursorState->cursorCompParams); + + /* + * EvoPushSetLUTContextDmaMethodsForOneSd() force enables base + * Lut if core scanout surface depth is 8. + */ + EvoPushSetLUTContextDmaMethodsForOneSd( + pDevEvo, sd, head, lutCtxdma, enableBaseLut, enableOutputLut, + updateState); + + nvPopEvoSubDevMask(pDevEvo); +} + +static void +FlipBase90(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState) +{ + int eye; + + /* program notifier */ + + if (pHwState->completionNotifier.surface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->completionNotifier.surface; + NvU32 value = 0; + + if (pNIso->format == NVKMS_NISO_FORMAT_LEGACY) { + value = FLD_SET_DRF(917C, _SET_NOTIFIER_CONTROL, _FORMAT, + _LEGACY, value); + } else { + value = FLD_SET_DRF(917C, _SET_NOTIFIER_CONTROL, _FORMAT, + _FOUR_WORD, value); + } + + value = FLD_SET_DRF_NUM(917C, _SET_NOTIFIER_CONTROL, _OFFSET, + pNIso->offsetInWords, value); + + value = FLD_SET_DRF_NUM(917C, _SET_NOTIFIER_CONTROL, _MODE, + pHwState->completionNotifier.awaken ? + NV917C_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN : + NV917C_SET_NOTIFIER_CONTROL_MODE_WRITE, value); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].ctxDma); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + /* program semaphore */ + nvAssertSameSemaphoreSurface(pHwState); + + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->syncObject.u.semaphores.acquireSurface; + NvU32 value = 0; + + if (pNIso->format == NVKMS_NISO_FORMAT_LEGACY) { + value = FLD_SET_DRF(917C, _SET_SEMAPHORE_CONTROL, _FORMAT, + _LEGACY, value); + } else { + value = FLD_SET_DRF(917C, _SET_SEMAPHORE_CONTROL, _FORMAT, + _FOUR_WORD, value); + } + + value = FLD_SET_DRF_NUM(917C, _SET_SEMAPHORE_CONTROL, _OFFSET, + pNIso->offsetInWords, value); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].ctxDma); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.acquireValue); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.releaseValue); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + if (!pHwState->pSurfaceEvo[NVKMS_LEFT]) { + nvAssert(!pHwState->pSurfaceEvo[NVKMS_RIGHT]); + + // Disable base on this head. + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMAS_ISO(0), 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMAS_ISO(1), 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CSC_RED2RED, 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(917C, _SET_CSC_RED2RED, _OWNER, _CORE)); + return; + } + + NvU32 presentControl = + DRF_NUM(917C, _SET_PRESENT_CONTROL, _MIN_PRESENT_INTERVAL, + pHwState->minPresentInterval); + + if (pHwState->tearing) { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, _BEGIN_MODE, + _IMMEDIATE, presentControl); + /* + * This avoids an invalid state exception: + * + * if ((SetPresentControl.BeginMode != NON_TEARING) && + * (SetPresentControl.BeginMode != AT_FRAME) + * && (wir_InterlockWithCore == ENABLE)) + * throw NV_DISP_BASE_STATE_ERROR_001; + */ + nvDisableCoreInterlockUpdateState(pDevEvo, updateState, pChannel); + } else { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, _BEGIN_MODE, + _NON_TEARING, presentControl); + } + + if (pHwState->pSurfaceEvo[NVKMS_RIGHT]) { + if (pHwState->perEyeStereoFlip) { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _STEREO_FLIP_MODE, _AT_ANY_FRAME, + presentControl); + } else { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _STEREO_FLIP_MODE, _PAIR_FLIP, + presentControl); + } + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _MODE, _STEREO, presentControl); + } else { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _MODE, _MONO, presentControl); + } + + // If we have a non-zero timestamp we need to enable timestamp mode + if (pHwState->timeStamp == 0) { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _TIMESTAMP_MODE, _DISABLE, presentControl); + } else { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _TIMESTAMP_MODE, _ENABLE, presentControl); + } + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_TIMESTAMP_ORIGIN_LO, 2); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_UPDATE_TIMESTAMP_LO, 2); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp)); + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp)); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, presentControl); + + SetCscMatrix(pChannel, NV917C_SET_CSC_RED2RED, &pHwState->cscMatrix, + DRF_DEF(917C, _SET_CSC_RED2RED, _OWNER, _BASE)); + + // Set the surface parameters. + FOR_ALL_EYES(eye) { + NvU32 ctxdma = 0; + NvU64 offset = 0; + + if (pHwState->pSurfaceEvo[eye]) { + ctxdma = pHwState->pSurfaceEvo[eye]->planes[0].ctxDma; + offset = pHwState->pSurfaceEvo[eye]->planes[0].offset; + } + + nvDmaSetStartEvoMethod(pChannel, NV917C_SURFACE_SET_OFFSET(0, eye), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917C, _SURFACE_SET_OFFSET, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMAS_ISO(eye), 1); + nvDmaSetEvoMethodData(pChannel, ctxdma); + } + + ASSERT_EYES_MATCH(pHwState->pSurfaceEvo, widthInPixels); + ASSERT_EYES_MATCH(pHwState->pSurfaceEvo, heightInPixels); + nvDmaSetStartEvoMethod(pChannel, NV917C_SURFACE_SET_SIZE(0), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917C, _SURFACE_SET_SIZE, _WIDTH, + pHwState->pSurfaceEvo[NVKMS_LEFT]->widthInPixels) | + DRF_NUM(917C, _SURFACE_SET_SIZE, _HEIGHT, + pHwState->pSurfaceEvo[NVKMS_LEFT]->heightInPixels)); + + nvAssert(pHwState->sizeIn.width == pHwState->pSurfaceEvo[NVKMS_LEFT]->widthInPixels); + nvAssert(pHwState->sizeIn.height == pHwState->pSurfaceEvo[NVKMS_LEFT]->heightInPixels); + nvAssert(pHwState->sizeIn.width == pHwState->sizeOut.width); + nvAssert(pHwState->sizeIn.height == pHwState->sizeOut.height); + + nvAssert(!pHwState->pSurfaceEvo[NVKMS_RIGHT] || + (EvoComputeSetStorage90(pDevEvo, pHwState->pSurfaceEvo[NVKMS_LEFT]) == + EvoComputeSetStorage90(pDevEvo, pHwState->pSurfaceEvo[NVKMS_RIGHT]))); + nvDmaSetStartEvoMethod(pChannel, NV917C_SURFACE_SET_STORAGE(0), 1); + nvDmaSetEvoMethodData(pChannel, EvoComputeSetStorage90(pDevEvo, pHwState->pSurfaceEvo[NVKMS_LEFT])); + + ASSERT_EYES_MATCH(pHwState->pSurfaceEvo, format); + nvDmaSetStartEvoMethod(pChannel, NV917C_SURFACE_SET_PARAMS(0), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917C, _SURFACE_SET_PARAMS, _FORMAT, + nvHwFormatFromKmsFormat90(pHwState->pSurfaceEvo[NVKMS_LEFT]->format)) | + DRF_DEF(917C, _SURFACE_SET_PARAMS, _SUPER_SAMPLE, _X1_AA) | + DRF_DEF(917C, _SURFACE_SET_PARAMS, _GAMMA, _LINEAR)); + + nvAssert(pHwState->inputLut.pLutSurfaceEvo == NULL); +} + +static void +FlipOverlay90(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NvBool *pInterlockwithCore) +{ + const NvU32 head = + NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + const NVSurfaceEvoRec *pSurfaceEvo = pHwState->pSurfaceEvo[NVKMS_LEFT]; + NvU32 value; + NvU32 sd; + + /* Overlay class 917E can't do stereo */ + nvAssert(!pHwState->pSurfaceEvo[NVKMS_RIGHT]); + + /* + * The NVKMS driver enforces these conditions on its clients: 1) enable a + * core-surface before enabling an overlay-surface, 2) disable an + * overlay-surface before disabling a core-surface. + * + * Updates to enable/disable a core and an overlay surface execute + * separately and are not interlocked. To avoid a race condition between a + * core and an overlay channel, detect an overlay channel update which is + * enabling/disabling an overlay-surface and interlock that update with a + * core channel update. + * + * This makes sure that an update to disable an overlay-surface interlocked + * with a core channel and a follow-on update to disable the core-surface + * will wait for the previous overlay flip to complete. It also makes sure + * that an update to enable an overlay-surface will wait for the previous + * core channel flip to complete. + */ + + FOR_EACH_SUBDEV_IN_MASK(sd, nvPeekEvoSubDevMask(pDevEvo)) { + NvBool prevCtxDmaIso = + pDevEvo->pSubDevices[sd]->overlayContextDmaIso[head]; + + if ((prevCtxDmaIso != 0x0 && pSurfaceEvo == NULL) || + (prevCtxDmaIso == 0x0 && pSurfaceEvo != NULL)) { + *pInterlockwithCore = TRUE; + } + + if (pSurfaceEvo != NULL) { + pDevEvo->pSubDevices[sd]->overlayContextDmaIso[head] = + pSurfaceEvo->planes[0].ctxDma; + pDevEvo->pSubDevices[sd]->overlaySurfFormat[head] = pSurfaceEvo->format; + + } else { + pDevEvo->pSubDevices[sd]->overlayContextDmaIso[head] = 0x0; + } + } FOR_EACH_SUBDEV_IN_MASK_END + + /* program notifier */ + + if (pHwState->completionNotifier.surface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->completionNotifier.surface; + value = 0; + + if (pNIso->format == NVKMS_NISO_FORMAT_LEGACY) { + value = FLD_SET_DRF(917E, _SET_NOTIFIER_CONTROL, _FORMAT, + _LEGACY, value); + } else { + value = FLD_SET_DRF(917E, _SET_NOTIFIER_CONTROL, _FORMAT, + _FOUR_WORD, value); + } + + value = FLD_SET_DRF_NUM(917E, _SET_NOTIFIER_CONTROL, _OFFSET, + pNIso->offsetInWords, value); + + value = FLD_SET_DRF_NUM(917E, _SET_NOTIFIER_CONTROL, _MODE, + pHwState->completionNotifier.awaken ? + NV917E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN : + NV917E_SET_NOTIFIER_CONTROL_MODE_WRITE, value); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].ctxDma); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + /* program semaphore */ + nvAssertSameSemaphoreSurface(pHwState); + + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->syncObject.u.semaphores.acquireSurface; + value = 0; + + if (pNIso->format == NVKMS_NISO_FORMAT_LEGACY) { + value = FLD_SET_DRF(917E, _SET_SEMAPHORE_CONTROL, _FORMAT, + _LEGACY, value); + } else { + value = FLD_SET_DRF(917E, _SET_SEMAPHORE_CONTROL, _FORMAT, + _FOUR_WORD, value); + } + + value = FLD_SET_DRF_NUM(917E, _SET_SEMAPHORE_CONTROL, _OFFSET, + pNIso->offsetInWords, value); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].ctxDma); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.acquireValue); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.releaseValue); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_TIMESTAMP_ORIGIN_LO, 2); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_UPDATE_TIMESTAMP_LO, 2); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp)); + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp)); + + if (pHwState->timeStamp == 0) { + value = NV917E_SET_PRESENT_CONTROL_BEGIN_MODE_ASAP; + } else { + value = NV917E_SET_PRESENT_CONTROL_BEGIN_MODE_TIMESTAMP; + } + nvAssert(!pHwState->tearing); + nvAssert(!pHwState->vrrTearing); + nvAssert(!pHwState->perEyeStereoFlip); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SET_PRESENT_CONTROL, _BEGIN_MODE, value) | + DRF_NUM(917E, _SET_PRESENT_CONTROL, _MIN_PRESENT_INTERVAL, + pHwState->minPresentInterval)); + + if (!pSurfaceEvo) { + // Disable overlay on this head. + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMAS_ISO(NVKMS_LEFT), 1); + nvDmaSetEvoMethodData(pChannel, 0); + return; + } + + nvAssert(pSurfaceEvo->planes[0].ctxDma); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SIZE_IN, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SET_SIZE_IN, _WIDTH, pHwState->sizeIn.width) | + DRF_NUM(917E, _SET_SIZE_IN, _HEIGHT, pHwState->sizeIn.height)); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SIZE_OUT, 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(917E, _SET_SIZE_OUT, _WIDTH, + pHwState->sizeOut.width)); + + // Set the surface parameters. + nvDmaSetStartEvoMethod(pChannel, NV917E_SURFACE_SET_OFFSET(NVKMS_LEFT), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SURFACE_SET_OFFSET, _ORIGIN, + nvCtxDmaOffsetFromBytes(pSurfaceEvo->planes[0].offset))); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SURFACE_SET_SIZE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SURFACE_SET_SIZE, _WIDTH, pSurfaceEvo->widthInPixels) | + DRF_NUM(917E, _SURFACE_SET_SIZE, _HEIGHT, pSurfaceEvo->heightInPixels)); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SURFACE_SET_STORAGE, 1); + nvDmaSetEvoMethodData(pChannel, EvoComputeSetStorage90(pDevEvo, pSurfaceEvo)); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SURFACE_SET_PARAMS, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SURFACE_SET_PARAMS, _FORMAT, + EvoOverlayFormatFromKmsFormat91(pSurfaceEvo->format)) | + DRF_DEF(917E, _SURFACE_SET_PARAMS, _COLOR_SPACE, _RGB)); + + SetCscMatrix(pChannel, NV917E_SET_CSC_RED2RED, &pHwState->cscMatrix, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMAS_ISO(NVKMS_LEFT), 1); + nvDmaSetEvoMethodData(pChannel, pSurfaceEvo->planes[0].ctxDma); + + nvAssert(pHwState->inputLut.pLutSurfaceEvo == NULL); +} + +static NvBool +needToReprogramCoreSurface(NVDevEvoPtr pDevEvo, + const NvU32 sd, + const NvU32 head, + const NVSurfaceEvoRec *pNewSurfaceEvo) +{ + const NVDispEvoRec *pDispEvo = pDevEvo->gpus[sd].pDispEvo; + const int dispIndex = pDispEvo->displayOwner; + NvBool enableBaseLut = + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled; + + const NVSurfaceEvoRec *pCurrCoreSurfaceEvo = + pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head]; + const NvBool currIsBaseSurfSpecified = + pDevEvo->pSubDevices[sd]->isBaseSurfSpecified[head]; + const NvU32 currHeightInPixels = pCurrCoreSurfaceEvo != NULL ? + pCurrCoreSurfaceEvo->heightInPixels : 0; + const NvU32 currWidthInPixels = pCurrCoreSurfaceEvo != NULL ? + pCurrCoreSurfaceEvo->widthInPixels : 0; + const enum NvKmsSurfaceMemoryFormat currFormat = + pCurrCoreSurfaceEvo != NULL ? + pCurrCoreSurfaceEvo->format : NvKmsSurfaceMemoryFormatI8; + + const NvBool newIsBaseSurfSpecified = pNewSurfaceEvo != NULL; + const NvU32 newHeightInPixels = pNewSurfaceEvo != NULL ? + pNewSurfaceEvo->heightInPixels : 0; + const NvU32 newWidthInPixels = pNewSurfaceEvo != NULL ? + pNewSurfaceEvo->widthInPixels : 0; + const enum NvKmsSurfaceMemoryFormat newFormat = pNewSurfaceEvo != NULL ? + pNewSurfaceEvo->format : NvKmsSurfaceMemoryFormatI8; + + /* If base channel flips from NULL to non-NULL surface or vice-versa */ + if (currIsBaseSurfSpecified != newIsBaseSurfSpecified) { + return TRUE; + } + + /* + * Reprogram the core surface if the current and new base surfaces have + * different size or format. The format check is needed to enable/disable + * the input lut if the input lut is not explicitly enabled/disabled by + * client and the base surface if flipping to or flipping away from the I8 + * format. + */ + if (newIsBaseSurfSpecified) { + + if (newWidthInPixels != currWidthInPixels || + newHeightInPixels != currHeightInPixels) { + return TRUE; + } + + if (!enableBaseLut && + newFormat != currFormat && + (currFormat == NvKmsSurfaceMemoryFormatI8 || + newFormat == NvKmsSurfaceMemoryFormatI8)) { + return TRUE; + } + } + + return !currIsBaseSurfSpecified; +} + +static void +EvoPushUpdateCompositionIfNeeded(NVDevEvoPtr pDevEvo, + const NvU32 sd, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + const NVSurfaceEvoRec *pNewSurfaceEvo = pHwState->pSurfaceEvo[NVKMS_LEFT]; + NvBool updateComposition = FALSE; + const NVFlipChannelEvoHwState *pBaseHwState = NULL; + const NVFlipChannelEvoHwState *pOverlayHwState = NULL; + NvU32 head = NV_INVALID_HEAD; + + if (pNewSurfaceEvo == NULL) { + return; + } + + /* + * Re-program the composition parameters if this is first layer update, or + * if color key selection method is changed, or if layer is using source + * color keying and color key is changed. + */ + + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0) { + head = NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + pOverlayHwState = + &pDevEvo->gpus[sd].headState[head].layer[NVKMS_OVERLAY_LAYER]; + pBaseHwState = pHwState; + + if ((!pDevEvo->pSubDevices[sd]->baseComp[head].initialized) || + + (pHwState->composition.colorKeySelect != + pDevEvo->pSubDevices[sd]->baseComp[head].colorKeySelect) || + + ((pHwState->composition.colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) && + (pNewSurfaceEvo->format != + pDevEvo->pSubDevices[sd]->baseSurfFormat[head] || + nvkms_memcmp(&pHwState->composition.colorKey, + &pDevEvo->pSubDevices[sd]->baseComp[head].colorKey, + sizeof(&pHwState->composition.colorKey)) != 0))) { + + pDevEvo->pSubDevices[sd]->baseComp[head].initialized = TRUE; + pDevEvo->pSubDevices[sd]->baseComp[head].colorKeySelect = + pHwState->composition.colorKeySelect; + pDevEvo->pSubDevices[sd]->baseComp[head].colorKey = + pHwState->composition.colorKey; + updateComposition = TRUE; + } + } + + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0) { + head = NV_EVO_CHANNEL_MASK_OVERLAY_HEAD_NUMBER(pChannel->channelMask); + pBaseHwState = + &pDevEvo->gpus[sd].headState[head].layer[NVKMS_MAIN_LAYER]; + pOverlayHwState = pHwState; + + if ((!pDevEvo->pSubDevices[sd]->overlayComp[head].initialized) || + + (pHwState->composition.colorKeySelect != + pDevEvo->pSubDevices[sd]->overlayComp[head].colorKeySelect) || + + ((pHwState->composition.colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) && + (pNewSurfaceEvo->format != + pDevEvo->pSubDevices[sd]->overlaySurfFormat[head] || + nvkms_memcmp(&pHwState->composition.colorKey, + &pDevEvo->pSubDevices[sd]->overlayComp[head].colorKey, + sizeof(&pHwState->composition.colorKey)) != 0))) { + + pDevEvo->pSubDevices[sd]->overlayComp[head].initialized = TRUE; + pDevEvo->pSubDevices[sd]->overlayComp[head].colorKeySelect = + pHwState->composition.colorKeySelect; + pDevEvo->pSubDevices[sd]->overlayComp[head].colorKey = + pHwState->composition.colorKey; + updateComposition = TRUE; + } + } + + if (updateComposition) { + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoPushUpdateComposition(pDevEvo, head, pBaseHwState, pOverlayHwState, + updateState, bypassComposition); + nvPopEvoSubDevMask(pDevEvo); + } +} + +static void EvoFlip90(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NvU32 sd; + + FOR_EACH_SUBDEV_IN_MASK(sd, nvPeekEvoSubDevMask(pDevEvo)) { + EvoPushUpdateCompositionIfNeeded(pDevEvo, sd, pChannel, pHwState, + updateState, bypassComposition); + } FOR_EACH_SUBDEV_IN_MASK_END + + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0) { + const NvU32 head = + NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + + FOR_EACH_SUBDEV_IN_MASK(sd, nvPeekEvoSubDevMask(pDevEvo)) { + if (needToReprogramCoreSurface( + pDevEvo, + sd, + head, + pHwState->pSurfaceEvo[NVKMS_LEFT])) { + const struct NvKmsCscMatrix zeroCscMatrix = { }; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoPushSetCoreSurfaceMethodsForOneSd(pDevEvo, sd, head, + pHwState->pSurfaceEvo[NVKMS_LEFT], + &zeroCscMatrix, updateState); + nvPopEvoSubDevMask(pDevEvo); + } + + if (pHwState->pSurfaceEvo[NVKMS_LEFT] != NULL) { + pDevEvo->pSubDevices[sd]->isBaseSurfSpecified[head] = TRUE; + pDevEvo->pSubDevices[sd]->baseSurfFormat[head] = + pHwState->pSurfaceEvo[NVKMS_LEFT]->format; + } else { + pDevEvo->pSubDevices[sd]->isBaseSurfSpecified[head] = FALSE; + } + } FOR_EACH_SUBDEV_IN_MASK_END + + FlipBase90(pDevEvo, pChannel, pHwState, updateState); + + if (pHwState->vrrTearing) { + int head = NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + NvU32 sd, subDeviceMask = nvPeekEvoSubDevMask(pDevEvo); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDeviceMask & (1 << sd)) { + updateState->subdev[sd].base[head].vrrTearing = TRUE; + } + } + } + } else if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0) { + NvBool interlockWithCore = FALSE; + + FlipOverlay90(pDevEvo, pChannel, pHwState, &interlockWithCore); + + if (interlockWithCore) { + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + } + } else { + nvAssert(!"Unknown channel mask in EvoFlip90"); + } + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); +} + +static void EvoFlipTransitionWAR90(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState) +{ + /* Nothing to do pre-Turing */ +} + +/*! + * Pack the given abstract color key into a key and mask as required + * by the display engine. + * + * \param[in] format NVKMS format for the input surface + * \param[in] key NVKMS representation of a color key + * \param[out] pValue NV857E_SET_KEY_COLOR_COLOR value + * \param[out] pMask NV857E_SET_KEY_COLOR_MASK value + */ +static void EvoPackColorKey91(enum NvKmsSurfaceMemoryFormat format, + const NVColorKey key, + NvU32 *pValue, NvU32 *pMask) +{ + NvU32 value = 0, mask = 0; + switch (format) { + case NvKmsSurfaceMemoryFormatR5G6B5: + if (key.matchR) { + mask |= 0x1f << 11; + value |= (key.r & 0x1f) << 11; + } + if (key.matchG) { + mask |= 0x3f << 5; + value |= (key.g & 0x3f) << 5; + } + if (key.matchB) { + mask |= 0x1f << 0; + value |= (key.b & 0x1f) << 0; + } + break; + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + if (key.matchA) { + mask |= 0x1 << 15; + value |= (key.a & 0x1) << 15; + } + if (key.matchR) { + mask |= 0x1f << 10; + value |= (key.r & 0x1f) << 10; + } + if (key.matchG) { + mask |= 0x1f << 5; + value |= (key.g & 0x1f) << 5; + } + if (key.matchB) { + mask |= 0x1f << 0; + value |= (key.b & 0x1f) << 0; + } + break; + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + if (key.matchA) { + /* Only one bit of alpha is handled by the hw. */ + mask |= 0x1 << 31; + value |= (key.a ? 1:0) << 31; + } + if (key.matchR) { + mask |= 0xff << 16; + value |= (key.r & 0xff) << 16; + } + if (key.matchG) { + mask |= 0xff << 8; + value |= (key.g & 0xff) << 8; + } + if (key.matchB) { + mask |= 0xff << 0; + value |= (key.b & 0xff) << 0; + } + break; + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + if (key.matchA) { + /* Only one bit of alpha is handled by the hw. */ + mask |= 0x1 << 31; + value |= (key.a ? 1:0) << 31; + } + if (key.matchB) { + mask |= 0xff << 16; + value |= (key.b & 0xff) << 16; + } + if (key.matchG) { + mask |= 0xff << 8; + value |= (key.g & 0xff) << 8; + } + if (key.matchR) { + mask |= 0xff << 0; + value |= (key.r & 0xff) << 0; + } + break; + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + if (key.matchA) { + /* Only one bit of alpha is handled by the hw. */ + mask |= 0x1 << 31; + value |= (key.a ? 1:0) << 31; + } + if (key.matchB) { + mask |= 0x3ff << 20; + value |= (key.b & 0x3ff) << 20; + } + if (key.matchG) { + mask |= 0x3ff << 10; + value |= (key.g & 0x3ff) << 10; + } + if (key.matchR) { + mask |= 0x3ff << 0; + value |= (key.r & 0x3ff) << 0; + } + break; + case NvKmsSurfaceMemoryFormatI8: + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatR16G16B16A16: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + nvAssert(!"Unhandled format in nvEvo1PackColorKey"); + break; + } + + *pMask = mask; + *pValue = value; +} + +static NvBool EvoOverlayCompositionControlFromNvKmsCompositionParams( + const NVFlipChannelEvoHwState *pBaseHwState, + const NVFlipChannelEvoHwState *pOverlayHwState, + NvU32 *pMode, + NvU32 *pColorKeyValue, + NvU32 *pColorKeyMask) +{ + const struct NvKmsCompositionParams *pBaseCompParams = + &pBaseHwState->composition; + const struct NvKmsCompositionParams *pOverlayCompParams = + &pOverlayHwState->composition; + + switch (pOverlayCompParams->colorKeySelect) { + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE: + if (pOverlayCompParams->blendingMode[1] == NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) { + *pMode = NV917E_SET_COMPOSITION_CONTROL_MODE_OPAQUE; + *pColorKeyValue = *pColorKeyMask = 0; + } else { + return FALSE; + } + break; + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC: + if ((pOverlayCompParams->blendingMode[0] == + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) && + (pOverlayCompParams->blendingMode[1] == + NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT)) { + + *pMode = NV917E_SET_COMPOSITION_CONTROL_MODE_SOURCE_COLOR_VALUE_KEYING; + + if (pOverlayHwState->pSurfaceEvo[NVKMS_LEFT] != NULL) { + EvoPackColorKey91(pOverlayHwState->pSurfaceEvo[NVKMS_LEFT]->format, + pOverlayCompParams->colorKey, + pColorKeyValue, + pColorKeyMask); + } else { + *pColorKeyValue = *pColorKeyMask = 0; + } + + } else { + return FALSE; + } + break; + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST: + if ((pBaseCompParams->colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) && + (pOverlayCompParams->blendingMode[1] == + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE)) { + *pMode = NV917E_SET_COMPOSITION_CONTROL_MODE_OPAQUE; + *pColorKeyValue = *pColorKeyMask = 0; + } else if ((pBaseCompParams->colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) && + (pOverlayCompParams->blendingMode[1] == + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) && + (pOverlayCompParams->blendingMode[0] == + NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT)) { + + *pMode = NV917E_SET_COMPOSITION_CONTROL_MODE_DESTINATION_COLOR_VALUE_KEYING; + + if (pBaseHwState->pSurfaceEvo[NVKMS_LEFT] != NULL) { + EvoPackColorKey91(pBaseHwState->pSurfaceEvo[NVKMS_LEFT]->format, + pBaseCompParams->colorKey, + pColorKeyValue, + pColorKeyMask); + } else { + *pColorKeyValue = *pColorKeyMask = 0; + } + + } else { + return FALSE; + } + break; + default: + return FALSE; + } + + return TRUE; +} + +static void +EvoPushUpdateComposition(NVDevEvoPtr pDevEvo, + const int head, + const NVFlipChannelEvoHwState *pBaseHwState, + const NVFlipChannelEvoHwState *pOverlayHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + + /* Composition is always programmed through the overlay channel. */ + NVEvoChannelPtr pChannel = pDevEvo->overlay[head]; + NvU32 colorKeyValue = 0, colorKeyMask = 0; + NvU32 compositionModeValue = 0; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (!EvoOverlayCompositionControlFromNvKmsCompositionParams( + pBaseHwState, pOverlayHwState, + &compositionModeValue, + &colorKeyValue, + &colorKeyMask)) { + /* + * composition mode is validated during + * nvUpdateFlipEvoHwState(), so it should always be valid when + * we get here. + */ + nvAssert(!"Invalid composition params"); + return; + } + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_COMPOSITION_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, compositionModeValue); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_KEY_COLOR_LO, 2); + nvDmaSetEvoMethodData(pChannel, colorKeyValue); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_KEY_MASK_LO, 2); + nvDmaSetEvoMethodData(pChannel, colorKeyMask); + nvDmaSetEvoMethodData(pChannel, 0); +} + +/* + * The LUT entries in INDEX_1025_UNITY_RANGE have 16 bits, with the + * black value at 24576, and the white at 49151. Since the effective + * range is 16384, we treat this as a 14-bit LUT. However, we need to + * clear the low 3 bits to WAR hardware bug 813188. This gives us + * 14-bit LUT values, but only 11 bits of precision. + */ +static inline NvU16 ColorToLUTEntry(NvU16 val) +{ + const NvU16 val14bit = val >> 2; + return (val14bit & ~7) + 24576; +} + +/* In INDEX_1025_UNITY_RANGE, the LUT indices for color depths with less + * than 10 bpc are the indices you'd have in 257-entry mode multiplied + * by four. So, you under-replicate all but the two least significant bits. + * Since when is EVO supposed to make sense? + */ +static void +EvoFillLUTSurface90(NVEvoLutEntryRec *pLUTBuffer, + const NvU16 *red, + const NvU16 *green, + const NvU16 *blue, + int nColorMapEntries, int depth) +{ + int i, lutIndex; + + switch (depth) { + case 15: + for (i = 0; i < nColorMapEntries; i++) { + lutIndex = PALETTE_DEPTH_SHIFT(i, 5) << 2; + pLUTBuffer[lutIndex].Red = ColorToLUTEntry(red[i]); + pLUTBuffer[lutIndex].Green = ColorToLUTEntry(green[i]); + pLUTBuffer[lutIndex].Blue = ColorToLUTEntry(blue[i]); + } + break; + case 16: + for (i = 0; i < nColorMapEntries; i++) { + pLUTBuffer[PALETTE_DEPTH_SHIFT(i, 6) << 2].Green = ColorToLUTEntry(green[i]); + if (i < 32) { + lutIndex = PALETTE_DEPTH_SHIFT(i, 5) << 2; + pLUTBuffer[lutIndex].Red = ColorToLUTEntry(red[i]); + pLUTBuffer[lutIndex].Blue = ColorToLUTEntry(blue[i]); + } + } + break; + case 8: + case 24: + for (i = 0; i < nColorMapEntries; i++) { + lutIndex = i << 2; + pLUTBuffer[lutIndex].Red = ColorToLUTEntry(red[i]); + pLUTBuffer[lutIndex].Green = ColorToLUTEntry(green[i]); + pLUTBuffer[lutIndex].Blue = ColorToLUTEntry(blue[i]); + } + break; + case 30: + for (i = 0; i < nColorMapEntries; i++) { + pLUTBuffer[i].Red = ColorToLUTEntry(red[i]); + pLUTBuffer[i].Green = ColorToLUTEntry(green[i]); + pLUTBuffer[i].Blue = ColorToLUTEntry(blue[i]); + } + break; + default: + nvAssert(!"invalid depth"); + return; + } +} + +static void +EvoPushSetLUTContextDmaMethodsForOneSd(NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const NvU32 ctxdma, + NvBool enableBaseLut, + const NvBool enableOutputLut, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU64 offset; + const NVSurfaceEvoRec *pCoreSurfaceEvo = + pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head]; + const NvBool surfaceDepth8 = (pCoreSurfaceEvo != NULL) ? + (pCoreSurfaceEvo->format == NvKmsSurfaceMemoryFormatI8) : FALSE; + + nvAssert(nvPeekEvoSubDevMask(pDevEvo) == NVBIT(sd)); + + // Depth 8 requires the base LUT to be enabled. + if (ctxdma && !enableBaseLut && surfaceDepth8) { + // TODO: Is this still required? Callers should specify the LUT at + // modeset time now. + enableBaseLut = TRUE; + } + + nvAssert(ctxdma || (!enableBaseLut && !enableOutputLut)); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* Program the base LUT */ + + offset = offsetof(NVEvoLutDataRec, base); + nvAssert((offset & 0xff) == 0); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_BASE_LUT_LO(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _MODE, _INDEX_1025_UNITY_RANGE) | + (enableBaseLut ? DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _ENABLE, _ENABLE) : + DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _ENABLE, _DISABLE)) | + DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _NEVER_YIELD_TO_BASE, _DISABLE)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_BASE_LUT_HI(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_BASE_LUT_HI, _ORIGIN, offset >> 8)); + + /* Program the output LUT */ + + offset = offsetof(NVEvoLutDataRec, output); + nvAssert((offset & 0xff) == 0); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OUTPUT_LUT_LO(head), 1); + nvDmaSetEvoMethodData(pChannel, + (enableOutputLut ? DRF_DEF(917D, _HEAD_SET_OUTPUT_LUT_LO, _ENABLE, _ENABLE) : + DRF_DEF(917D, _HEAD_SET_OUTPUT_LUT_LO, _ENABLE, _DISABLE)) | + DRF_DEF(917D, _HEAD_SET_OUTPUT_LUT_LO, _MODE, _INTERPOLATE_1025_UNITY_RANGE) | + DRF_DEF(917D, _HEAD_SET_OUTPUT_LUT_LO, _NEVER_YIELD_TO_BASE, _DISABLE)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OUTPUT_LUT_HI(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_OUTPUT_LUT_HI, _ORIGIN, offset >> 8)); + + /* Set the ctxdma that's used by both LUTs */ + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMA_LUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CONTEXT_DMA_LUT, _HANDLE, ctxdma)); + + /* + * Use this backdoor to disable "wide pipe" underreplication during + * expansion of color components into the display pipe. Underreplication + * of a non-zero 8-bit color to more than 8 bits causes lookups to fall + * between LUT entries in a 256-entry LUT, which we don't want. See bug + * 734919 for details. + * The "wide pipe" may also cause scanout of 8-bit data to an 8-bit OR to + * not be a straight passthrough (bug 895401). + */ + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _PRIMARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _SECONDARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _WIDE_PIPE_CRC, _DISABLE)); +} + +static void EvoSetLUTContextDma90(const NVDispEvoRec *pDispEvo, + const int head, + NVLutSurfaceEvoPtr pLutSurfEvo, + NvBool enableBaseLut, + NvBool enableOutputLut, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + + const NvBool coreChannelCtxDmaNonNull = + pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head] != NULL; + const NvU32 ctxdma = (pLutSurfEvo != NULL) ? pLutSurfEvo->dispCtxDma : 0; + + /* + * If the core channel doesn't have a scanout surface set, then setting the + * LUT context DMA will cause an exception. + */ + if (!coreChannelCtxDmaNonNull && ctxdma) { + return; + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoPushSetLUTContextDmaMethodsForOneSd( + pDevEvo, sd, head, ctxdma, enableBaseLut, enableOutputLut, + updateState); + nvPopEvoSubDevMask(pDevEvo); +} + +#define NV_EVO2_CAP_GET_PIN(cl, n, pEvoCaps, word, name, idx, pCaps) \ + (pEvoCaps)->pin[(idx)].flipLock = \ + FLD_TEST_DRF(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_##word, \ + _LOCK_PIN##name##USAGE, _FLIP_LOCK, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_##word]); \ + (pEvoCaps)->pin[(idx)].stereo = \ + FLD_TEST_DRF(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_##word, \ + _LOCK_PIN##name##USAGE, _STEREO, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_##word]); \ + (pEvoCaps)->pin[(idx)].scanLock = \ + FLD_TEST_DRF(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_##word, \ + _LOCK_PIN##name##USAGE, _SCAN_LOCK, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_##word]); + +/* Take the max of MAX_PIXELS_t_TAP422 and MAX_PIXELS_t_TAP444 */ +#define NV_EVO2_CAP_GET_HEAD_MAX_PIXELS(cl, n, pEvoCaps, i, x, t, pCaps) \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_##t##TAPS].maxPixelsVTaps = \ + NV_MAX(REF_VAL(NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_HEAD##i##_##x##_MAX_PIXELS##t##TAP422, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_HEAD##i##_##x]), \ + REF_VAL(NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_HEAD##i##_##x##_MAX_PIXELS##t##TAP444, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_HEAD##i##_##x])) + +#define NV_EVO2_CAP_GET_HEAD(cl, n, pEvoCaps, i, x, y, z, pCaps) \ + (pEvoCaps)->head[(i)].usable = TRUE; \ + (pEvoCaps)->head[(i)].scalerCaps.present = TRUE; \ + NV_EVO2_CAP_GET_HEAD_MAX_PIXELS(cl, n, pEvoCaps, i, x, 5, pCaps); \ + NV_EVO2_CAP_GET_HEAD_MAX_PIXELS(cl, n, pEvoCaps, i, y, 3, pCaps); \ + NV_EVO2_CAP_GET_HEAD_MAX_PIXELS(cl, n, pEvoCaps, i, z, 2, pCaps); \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_8TAPS].maxHDownscaleFactor = NV_U16_MAX; \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_5TAPS].maxVDownscaleFactor = NV_U16_MAX; \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_3TAPS].maxVDownscaleFactor = NV_U16_MAX; \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_2TAPS].maxVDownscaleFactor = NV_U16_MAX; \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_2TAPS].maxHDownscaleFactor = NV_U16_MAX; + +#define NV_EVO2_CAP_GET_SOR(cl, n, pEvoCaps, i, x, y, pCaps) \ + (pEvoCaps)->sor[(i)].dualTMDS = \ + FLD_TEST_DRF(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_CAP_SOR##i##_##x, \ + _DUAL_TMDS, _TRUE, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_SOR##i##_##x]); \ + (pEvoCaps)->sor[(i)].maxTMDSClkKHz = \ + DRF_VAL(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_CAP_SOR##i##_##y, _TMDS_LVDS_CLK_MAX, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_SOR##i##_##y]) * 10000; + +static void EvoParseCapabilityNotifier3(NVEvoCapabilitiesPtr pEvoCaps, + volatile const NvU32 *pCaps) +{ + // Lock pins + // These magic numbers (5, 6, _A, etc.) are token-pasted into the + // NV917D_CORE_NOTIFIER_3_* macros and can't be autogenerated by the + // preprocessor. Architecture appears to have no plans to ever fix this. + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 0, 0x0, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 1, 0x1, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 2, 0x2, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 3, 0x3, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 4, 0x4, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 5, 0x5, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 6, 0x6, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 7, 0x7, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, 8, 0x8, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, 9, 0x9, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _A, 0xa, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _B, 0xb, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _C, 0xc, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _D, 0xd, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _E, 0xe, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _F, 0xf, pCaps); + + // Miscellaneous capabilities + pEvoCaps->misc.supportsInterlaced = TRUE; + pEvoCaps->misc.supportsSemiPlanar = FALSE; + pEvoCaps->misc.supportsPlanar = FALSE; + pEvoCaps->misc.supportsDSI = FALSE; + + // Heads + NV_EVO2_CAP_GET_HEAD(917D, 3, pEvoCaps, 0, 53, 54, 55, pCaps); + NV_EVO2_CAP_GET_HEAD(917D, 3, pEvoCaps, 1, 61, 62, 63, pCaps); + NV_EVO2_CAP_GET_HEAD(917D, 3, pEvoCaps, 2, 69, 70, 71, pCaps); + NV_EVO2_CAP_GET_HEAD(917D, 3, pEvoCaps, 3, 77, 78, 79, pCaps); + + // SORs + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 0, 20, 21, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 1, 22, 23, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 2, 24, 25, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 3, 26, 27, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 4, 28, 29, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 5, 30, 31, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 6, 32, 33, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 7, 34, 35, pCaps); + + // Don't need any PIOR caps currently. +} + +static NvBool EvoGetCapabilities90(NVDevEvoPtr pDevEvo) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NVDispEvoPtr pDispEvo; + unsigned int i, sd; + struct NvKmsRRParams rrParams = { NVKMS_ROTATION_0, FALSE, FALSE }; + NvU8 layer; + + nvAssert(nvPeekEvoSubDevMask(pDevEvo) == SUBDEVICE_MASK_ALL); + + /* Main layer position and size updates are not supported on EVO. */ + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); + layer++) { + pDevEvo->caps.layerCaps[layer].supportsWindowMode = + (layer != NVKMS_MAIN_LAYER); + } + + pDevEvo->caps.cursorCompositionCaps = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NV_EVO2_SUPPORTED_CURSOR_COMP_BLEND_MODES, + }, + }, + } + }; + + /* Base doesn't support any composition with underlying layers. */ + pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].composition = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC] = { + .supportedBlendModes = { + [0] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + }, + }, + }, + }; + + pDevEvo->caps.layerCaps[NVKMS_OVERLAY_LAYER].composition = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC] = { + .supportedBlendModes = { + [0] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT), + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST] = { + .supportedBlendModes = { + [0] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT), + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + }, + }, + }, + }; + + pDevEvo->caps.validLayerRRTransforms |= + NVBIT(NvKmsRRParamsToCapBit(&rrParams)); + + for (i = NvKmsSurfaceMemoryFormatMin; + i <= NvKmsSurfaceMemoryFormatMax; + i++) { + if (nvHwFormatFromKmsFormat90(i) != 0) { + pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats |= + NVBIT64(i); + } + + if (EvoOverlayFormatFromKmsFormat91(i) != 0) { + pDevEvo->caps.layerCaps[NVKMS_OVERLAY_LAYER].supportedSurfaceMemoryFormats |= + NVBIT64(i); + } + } + + EvoSetNotifierMethods90(pDevEvo, + pChannel, + TRUE /* notify */, + TRUE /* awaken */, + 0 /* notifier */); + + /* Initialize the capability notifiers. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + nvWriteEvoCoreNotifier(pDispEvo, NV917D_CORE_NOTIFIER_3_CAPABILITIES_4, + DRF_DEF(917D_CORE_NOTIFIER_3, _CAPABILITIES_4, _DONE, _FALSE)); + } + + /* Tell the hardware to fill in the notifier. */ + nvDmaSetStartEvoMethod(pChannel, NV917D_GET_CAPABILITIES, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaKickoffEvo(pChannel); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + NVEvoSubDevPtr pEvoSubDev; + volatile NvU32 *pCaps; + + nvEvoWaitForCoreNotifier(pDispEvo, NV917D_CORE_NOTIFIER_3_CAPABILITIES_4, + DRF_BASE(NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE), + DRF_EXTENT(NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE), + NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE); + + pEvoSubDev = &pDevEvo->gpus[sd]; + pCaps = pDevEvo->core->notifiersDma[sd].subDeviceAddress[sd]; + + nvkms_memset(&pEvoSubDev->capabilities, 0, + sizeof(pEvoSubDev->capabilities)); + EvoParseCapabilityNotifier3(&pEvoSubDev->capabilities, pCaps); + } + + /* Reset notifier state so it isn't on for future updates */ + EvoSetNotifierMethods90(pDevEvo, + pChannel, + FALSE /* notify */, + FALSE /* awaken */, + 0 /* notifier */); + nvDmaKickoffEvo(pChannel); + + return TRUE; +} + +static void EvoSetViewportPointIn90(NVDevEvoPtr pDevEvo, const int head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // Set the input viewport point + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_POINT_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(917D, _HEAD_SET_VIEWPORT_POINT_IN, _X, x) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_POINT_IN, _Y, y)); +} + +static void EvoSetOutputScaler90(const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvU32 imageSharpeningValue, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeViewPortEvo *pViewPort = &pHeadState->timings.viewPort; + NvU32 setControlOutputScaler = 0; + NvU32 vTapsHw = 0, hTapsHw = 0; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + switch (pViewPort->vTaps) { + case NV_EVO_SCALER_5TAPS: + vTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5; + break; + case NV_EVO_SCALER_3TAPS: + // XXX TAPS_3_ADAPTIVE instead? --> I think only allowed with interlaced + vTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3; + break; + case NV_EVO_SCALER_2TAPS: + vTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2; + break; + case NV_EVO_SCALER_8TAPS: + nvAssert(!"Unknown pHeadState->vTaps"); + // fall through + case NV_EVO_SCALER_1TAP: + vTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1; + break; + } + switch (pViewPort->hTaps) { + case NV_EVO_SCALER_8TAPS: + hTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8; + break; + case NV_EVO_SCALER_2TAPS: + hTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2; + break; + case NV_EVO_SCALER_5TAPS: + case NV_EVO_SCALER_3TAPS: + nvAssert(!"Unknown pHeadState->hTaps"); + // fall through + case NV_EVO_SCALER_1TAP: + hTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1; + break; + } + setControlOutputScaler = + DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _HORIZONTAL_TAPS, + hTapsHw) | + DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _VERTICAL_TAPS, + vTapsHw); + + if (nvIsImageSharpeningAvailable(&pHeadState->timings.viewPort)) { + setControlOutputScaler = + FLD_SET_DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_SCALER, + _HRESPONSE_BIAS, imageSharpeningValue, + setControlOutputScaler); + + setControlOutputScaler = + FLD_SET_DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_SCALER, + _VRESPONSE_BIAS, imageSharpeningValue, + setControlOutputScaler); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER(head), 1); + nvDmaSetEvoMethodData(pChannel, setControlOutputScaler); +} + +static void EvoSetViewportInOut90(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* The input viewport shouldn't vary. */ + nvAssert(pViewPortMin->in.width == pViewPort->in.width); + nvAssert(pViewPortMax->in.width == pViewPort->in.width); + nvAssert(pViewPortMin->in.height == pViewPort->in.height); + nvAssert(pViewPortMax->in.height == pViewPort->in.height); + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_SIZE_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_IN, _WIDTH, pViewPort->in.width) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_IN, _HEIGHT, pViewPort->in.height)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_X, pViewPort->out.xAdjust) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_Y, pViewPort->out.yAdjust)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_SIZE_OUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT, _WIDTH, pViewPort->out.width) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT, _HEIGHT, pViewPort->out.height)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT_MIN, _WIDTH, pViewPortMin->out.width) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT_MIN, _HEIGHT, pViewPortMin->out.height)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT_MAX, _WIDTH, pViewPortMax->out.width) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT_MAX, _HEIGHT, pViewPortMax->out.height)); + +} + + +/*! + * Compute the 917D_HEAD_SET_CONTROL_CURSOR method value. + * + * This function also validates that the given NVSurfaceEvoRec can be + * used as a cursor image. + * + * Pre-nvdisplay core channel classes have the same layout of the + * *7D_HEAD_SET_CONTROL_CURSOR method value. + + * + * \param[in] pDevEvo The device on which the cursor will be programmed. + * \param[in] pSurfaceEvo The surface to be used as the cursor image. + * \param[out] pValue The 917D_HEAD_SET_CONTROL_CURSOR method value. + + * \return If TRUE, the surface can be used as a cursor image, and + * pValue contains the method value. If FALSE, the surface + * cannot be used as a cursor image. + */ +NvBool nvEvoGetHeadSetControlCursorValue90(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo, + NvU32 *pValue) +{ + NvU32 value = 0; + + if (pSurfaceEvo == NULL) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _ENABLE, _DISABLE); + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _FORMAT, _A8R8G8B8); + goto done; + } else { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _ENABLE, _ENABLE); + } + + /* The cursor must always be pitch. */ + + if (pSurfaceEvo->layout != NvKmsSurfaceMemoryLayoutPitch) { + return FALSE; + } + + /* + * The only supported cursor image memory format is A8R8G8B8. + */ + if (pSurfaceEvo->format == NvKmsSurfaceMemoryFormatA8R8G8B8) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _FORMAT, _A8R8G8B8); + } else { + return FALSE; + } + + /* + * The cursor only supports a few image sizes. + */ + if ((pSurfaceEvo->widthInPixels == 32) && + (pSurfaceEvo->heightInPixels == 32)) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W32_H32); + } else if ((pSurfaceEvo->widthInPixels == 64) && + (pSurfaceEvo->heightInPixels == 64)) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W64_H64); + } else if ((pDevEvo->cursorHal->caps.maxSize >= 128) && + (pSurfaceEvo->widthInPixels == 128) && + (pSurfaceEvo->heightInPixels == 128)) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W128_H128); + } else if ((pDevEvo->cursorHal->caps.maxSize >= 256) && + (pSurfaceEvo->widthInPixels == 256) && + (pSurfaceEvo->heightInPixels == 256)) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W256_H256); + } else { + return FALSE; + } + + /* + * Hard code the cursor hotspot. + */ + value |= DRF_NUM(927D, _HEAD_SET_CONTROL_CURSOR, _HOT_SPOT_Y, 0); + value |= DRF_NUM(927D, _HEAD_SET_CONTROL_CURSOR, _HOT_SPOT_X, 0); + +done: + + if (pValue != NULL) { + *pValue = value; + } + + return TRUE; +} + +static void EvoSetCursorImage(NVDevEvoPtr pDevEvo, const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NvU32 ctxdma = pSurfaceEvo ? pSurfaceEvo->planes[0].ctxDma : 0; + const NvU64 offset = pSurfaceEvo ? pSurfaceEvo->planes[0].offset : 0; + NvU32 headSetControlCursorValue = 0; + NvBool ret; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + nvAssert(pCursorCompParams->colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE); + nvAssert(NVBIT(pCursorCompParams->blendingMode[1]) & + NV_EVO2_SUPPORTED_CURSOR_COMP_BLEND_MODES); + nvAssert(!pSurfaceEvo || ctxdma); + + ret = nvEvoGetHeadSetControlCursorValue90(pDevEvo, pSurfaceEvo, + &headSetControlCursorValue); + /* + * The caller should have already validated the surface, so there + * shouldn't be a failure. + */ + if (!ret) { + nvAssert(!"Could not construct HEAD_SET_CONTROL_CURSOR value"); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_PRESENT_CONTROL_CURSOR, _MODE, _MONO)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OFFSETS_CURSOR(head, 0), 4); + // The cursor has its own context DMA. + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_OFFSETS_CURSOR, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_OFFSETS_CURSOR, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CONTEXT_DMAS_CURSOR, _HANDLE, ctxdma)); + // Always set the right cursor context DMA. + // HW will just ignore this if it is not in stereo cursor mode. + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CONTEXT_DMAS_CURSOR, _HANDLE, ctxdma)); + + switch (pCursorCompParams->blendingMode[1]) { + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA: + headSetControlCursorValue |= + DRF_DEF(917D, _HEAD_SET_CONTROL_CURSOR, _COMPOSITION, _ALPHA_BLEND); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA: + headSetControlCursorValue |= + DRF_DEF(917D, _HEAD_SET_CONTROL_CURSOR, _COMPOSITION, _PREMULT_ALPHA_BLEND); + break; + default: + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "%s: composition mode %d not supported for cursor", + __func__, pCursorCompParams->blendingMode[1]); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, headSetControlCursorValue); +} + +static void EvoSetCursorImage91(NVDevEvoPtr pDevEvo, const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams) +{ + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!((nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)))) { + continue; + } + + /* + * Set up the cursor surface: a cursor surface is allowed only if + * there's a non-NULL ISO ctxdma. + */ + if (pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head] == NULL && + pSurfaceEvo != NULL) { + continue; + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoSetCursorImage(pDevEvo, + head, + pSurfaceEvo, + updateState, + pCursorCompParams); + nvPopEvoSubDevMask(pDevEvo); + } +} + +static NvBool EvoValidateCursorSurface90(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo) +{ + return nvEvoGetHeadSetControlCursorValue90(pDevEvo, pSurfaceEvo, NULL); +} + +/* + * The 'sourceFetchRect' parameter is ignored by this function because there are + * no format-dependent restrictions for the source fetch rectangle on EVO. + */ +static NvBool EvoValidateWindowFormat90( + const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut) +{ + const NvU32 hwFormat = nvHwFormatFromKmsFormat90(format); + + if (hwFormat == 0) { + return FALSE; + } + + if (hwFormatOut != NULL) { + *hwFormatOut = hwFormat; + } + + return TRUE; +} + +static void EvoInitCompNotifier3(const NVDispEvoRec *pDispEvo, int idx) +{ + nvWriteEvoCoreNotifier(pDispEvo, NV917D_CORE_NOTIFIER_3_COMPLETION_0 + idx, + DRF_DEF(917D_CORE_NOTIFIER_3, _COMPLETION_0, _DONE, _FALSE)); +} + +static NvBool EvoIsCompNotifierComplete3(NVDispEvoPtr pDispEvo, int idx) { + return nvEvoIsCoreNotifierComplete(pDispEvo, + NV917D_CORE_NOTIFIER_3_COMPLETION_0 + idx, + DRF_BASE(NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE), + DRF_EXTENT(NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE), + NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE); +} + +static void EvoWaitForCompNotifier3(const NVDispEvoRec *pDispEvo, int idx) +{ + nvEvoWaitForCoreNotifier(pDispEvo, NV917D_CORE_NOTIFIER_3_COMPLETION_0 + idx, + DRF_BASE(NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE), + DRF_EXTENT(NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE), + NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE); +} + +static void EvoSetDither91(NVDispEvoPtr pDispEvo, const int head, + const NvBool enabled, const NvU32 type, + const NvU32 algo, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 ditherControl; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enabled) { + ditherControl = DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _ENABLE); + + switch (type) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _BITS, _DITHER_TO_6_BITS); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _BITS, _DITHER_TO_8_BITS); + break; + default: + nvAssert(!"Unknown ditherType"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF: + ditherControl = NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE; + break; + } + + } else { + ditherControl = DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _DISABLE); + } + + switch (algo) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_ERR_ACC: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_ERR_ACC); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _TEMPORAL); + break; + default: + nvAssert(!"Unknown DitherAlgo"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN: + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_ERR_ACC: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_ERR_ACC); + break; + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_DITHER_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, ditherControl); +} + +static void EvoSetStallLock94(NVDispEvoPtr pDispEvo, const int head, + NvBool enable, NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enable) { + nvDmaSetStartEvoMethod(pChannel, NV947D_HEAD_SET_STALL_LOCK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _ENABLE, _TRUE) | + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _MODE, _ONE_SHOT) | + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _LOCK_PIN, _UNSPECIFIED) | + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _UNSTALL_MODE, _LINE_LOCK)); + } else { + nvDmaSetStartEvoMethod(pChannel, NV947D_HEAD_SET_STALL_LOCK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _ENABLE, _FALSE)); + } +} + +static NvBool ForceIdleBaseChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + NV5070_CTRL_CMD_STOP_BASE_PARAMS stopParams = { }; + NvNotification *pNotifyData = pChannel->notifiersDma[sd].subDeviceAddress[sd]; + NvU64 startTime = 0; + const NvU32 timeout = 2000000; // 2 seconds + NvU32 ret; + + nvAssert((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0); + + pNotifyData->status = NV5070_NOTIFICATION_STATUS_IN_PROGRESS; + + stopParams.base.subdeviceIndex = sd; + stopParams.channelInstance = pChannel->instance; + stopParams.notifyMode = NV5070_CTRL_CMD_STOP_BASE_NOTIFY_MODE_WRITE; + stopParams.hNotifierCtxDma = pChannel->notifiersDma[sd].ctxHandle; + stopParams.offset = 0; + stopParams.hEvent = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_STOP_BASE, + &stopParams, sizeof(stopParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"STOP_BASE failed"); + return FALSE; + } + + do { + if (pNotifyData->status == NV5070_NOTIFICATION_STATUS_DONE_SUCCESS) { + break; + } + + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + nvAssert(!"STOP_BASE timed out"); + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + +static NvBool ForceIdleOverlayChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS stopParams = { }; + NvNotification *pNotifyData = pChannel->notifiersDma[sd].subDeviceAddress[sd]; + NvU64 startTime = 0; + const NvU32 timeout = 2000000; // 2 seconds + NvU32 ret; + + nvAssert((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0); + + pNotifyData->status = NV5070_NOTIFICATION_STATUS_IN_PROGRESS; + + stopParams.base.subdeviceIndex = sd; + stopParams.channelInstance = pChannel->instance; + stopParams.notifyMode = NV5070_CTRL_CMD_STOP_OVERLAY_NOTIFY_MODE_WRITE; + stopParams.hNotifierCtxDma = pChannel->notifiersDma[sd].ctxHandle; + stopParams.offset = 0; + stopParams.hEvent = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_STOP_OVERLAY, + &stopParams, sizeof(stopParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"STOP_OVERLAY failed"); + return FALSE; + } + + do { + if (pNotifyData->status == NV5070_NOTIFICATION_STATUS_DONE_SUCCESS) { + break; + } + + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + nvAssert(!"STOP_OVERLAY timed out"); + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + +static NvBool EvoForceIdleSatelliteChannel90( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState) +{ + NvU32 head, sd; + NvBool ret = TRUE; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + /* + * Forcing a channel to be idle is currently only implemented for + * base. + */ + if ((idleChannelState->subdev[sd].channelMask & + ~(NV_EVO_CHANNEL_MASK_BASE_ALL | + NV_EVO_CHANNEL_MASK_OVERLAY_ALL)) != 0) { + + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Forcing channel idle only implemented for base and overlay"); + return FALSE; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + const NVEvoChannelMask thisBaseMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE); + const NVEvoChannelMask thisOverlayMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE); + + if (idleChannelState->subdev[sd].channelMask & + thisBaseMask) { + + NVEvoChannelPtr pBaseChannel = pDevEvo->base[head]; + + if (!ForceIdleBaseChannel(pDevEvo, pBaseChannel, sd)) { + ret = FALSE; + } + } + + if (idleChannelState->subdev[sd].channelMask & + thisOverlayMask) { + + NVEvoChannelPtr pOverlayChannel = pDevEvo->overlay[head]; + + if (!ForceIdleOverlayChannel(pDevEvo, pOverlayChannel, sd)) { + ret = FALSE; + } + } + } + } + + return ret; +} + +static NvBool EvoAllocRmCtrlObject90(NVDevEvoPtr pDevEvo) +{ + /* Nothing to do for pre-nvdisplay */ + return TRUE; +} + +static void EvoFreeRmCtrlObject90(NVDevEvoPtr pDevEvo) +{ + /* Nothing to do for pre-nvdisplay */ +} + +static void EvoSetImmPointOut91(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NVEvoUpdateState *updateState, + NvU16 x, NvU16 y) +{ + GK104DispOverlayImmControlPio *pOverlayImm = + pChannel->imm.u.pio->control[sd]; + + /* The only immediate channel we have is overlay. */ + nvAssert((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0); + nvAssert(pChannel->imm.type == NV_EVO_IMM_CHANNEL_PIO); + nvAssert(pOverlayImm != NULL); + + /* Left eye */ + pOverlayImm->SetPointsOut[0] = + DRF_NUM(917B, _SET_POINTS_OUT, _X, x) | + DRF_NUM(917B, _SET_POINTS_OUT, _Y, y); + + pOverlayImm->Update = + DRF_DEF(917B, _UPDATE, _INTERLOCK_WITH_CORE, _DISABLE); +} + +static void EvoStartHeadCRC32Capture90(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NVConnectorEvoPtr pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + NvU32 head, + NvU32 sd, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 dmaCtx = pDma->ctxHandle; + NvU32 orOutput = 0; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + orOutput = + NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(orIndex); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + if (protocol == NVKMS_PROTOCOL_SOR_DP_A || + protocol == NVKMS_PROTOCOL_SOR_DP_B) { + orOutput = + NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(head); + } else { + orOutput = + NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(orIndex); + } + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + orOutput = + NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(orIndex); + break; + default: + nvAssert(!"Invalid pConnectorEvo->or.type"); + break; + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMA_CRC(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CONTEXT_DMA_CRC, _HANDLE, dmaCtx)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CRC_CONTROL, _PRIMARY_OUTPUT, orOutput) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _SECONDARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, _CORE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _TIMESTAMP_MODE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _FLIPLOCK_MODE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE)); + + /* Reset the CRC notifier */ + nvEvoResetCRC32Notifier(pDma->subDeviceAddress[sd], + NV917D_NOTIFIER_CRC_1_STATUS_0, + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_DONE), + NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_FALSE); +} + +static void EvoStopHeadCRC32Capture90(NVDevEvoPtr pDevEvo, + NvU32 head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMA_CRC(head), 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _PRIMARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _SECONDARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, _CORE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _TIMESTAMP_MODE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _FLIPLOCK_MODE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE)); +} + +/*! + * Queries the current head's CRC Notifier and returns values if successful + * + * First waits for hardware to finish writing to the CRC32Notifier, + * and performs a read of the Compositor and SF/OR CRCs in numCRC32 frames + * Crc fields in input array crc32 should be calloc'd to 0s. + * + * \param[in] pDevEvo NVKMS device pointer + * \param[in] pDma Pointer to DMA-mapped memory + * \param[in] sd Subdevice index + * \param[in] entry_count Number of independent frames to read CRCs from + * \param[out] crc32 Contains pointers to CRC output arrays + * \param[out] numCRC32 Number of CRC frames successfully read from DMA + * + * \return Returns TRUE if was able to successfully read CRCs from DMA, + * otherwise FALSE + */ +static NvBool EvoQueryHeadCRC32_90(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NvU32 sd, + NvU32 entry_count, + CRC32NotifierCrcOut *crc32, + NvU32 *numCRC32) +{ + volatile NvU32 *pCRC32Notifier = pDma->subDeviceAddress[sd]; + const NvU32 entry_stride = + NV917D_NOTIFIER_CRC_1_CRC_ENTRY1_8 - NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4; + // Define how many/which variables to read from each CRCNotifierEntry struct + const CRC32NotifierEntryRec field_info[NV_EVO2_NUM_CRC_FIELDS] = { + { + .field_offset = NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3, + .field_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3_COMPOSITOR_CRC), + .field_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3_COMPOSITOR_CRC), + .field_frame_values = crc32->compositorCrc32 + }, + { + .field_offset = NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4, + .field_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4_PRIMARY_OUTPUT_CRC), + .field_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4_PRIMARY_OUTPUT_CRC), + .field_frame_values = crc32->outputCrc32 + } + }; + const CRC32NotifierEntryFlags flag_info[NV_EVO2_NUM_CRC_FLAGS] = { + { + .flag_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_COUNT), + .flag_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_STATUS_0_COUNT), + .flag_type = NVEvoCrc32NotifierFlagCount + }, + { + .flag_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + }, + { + .flag_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + } + }; + + if (!nvEvoWaitForCRC32Notifier(pCRC32Notifier, + NV917D_NOTIFIER_CRC_1_STATUS_0, + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_DONE), + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_STATUS_0_DONE), + NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_TRUE)) { + return FALSE; + } + + *numCRC32 = nvEvoReadCRC32Notifier(pCRC32Notifier, + entry_stride, + entry_count, + NV917D_NOTIFIER_CRC_1_STATUS_0, /* Status offset */ + NV_EVO2_NUM_CRC_FIELDS, + NV_EVO2_NUM_CRC_FLAGS, + field_info, + flag_info); + + + nvEvoResetCRC32Notifier(pCRC32Notifier, + NV917D_NOTIFIER_CRC_1_STATUS_0, + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_DONE), + NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_FALSE); + + return TRUE; +} + +static void EvoGetScanLine90(const NVDispEvoRec *pDispEvo, + const NvU32 head, + NvU16 *pScanLine, + NvBool *pInBlankingPeriod) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + const void *pDma = pDevEvo->base[head]->pb.control[sd]; + NvU32 scanLine = nvDmaLoadPioMethod(pDma, NV917C_GET_SCANLINE); + + /* + * This method immediately returns the value of the scanline currently being + * read by the DMI. This method is a channel method so it operates + * completely asynchronously from the processing of methods in the + * pushbuffer. A negative value indicate that the DMI is in vertical + * blanking. Note that this is a PIO method that executes immediately. The + * coding of this value is as follows: + * If Line[15] == 0 (positive value) + * then Line[14:0] is the post-aa resolved line currently being read by + * the DMI. + * If Line[15] == 1 (negative value) + * then Line[14:0] is the number of microseconds remaining in the vertical + * blanking interval. + * Examples: + * Line = 0x0192 - DMI is reading line 402 of the current buffer. + * Line = 0x8023 - DMI is 35 uS from the end of vertical blanking. + */ + + if ((scanLine & NVBIT(15)) == 0) { + *pInBlankingPeriod = FALSE; + *pScanLine = scanLine & DRF_MASK(14:0); + } else { + *pInBlankingPeriod = TRUE; + } +} + +static NvU32 EvoGetActiveViewportOffset94(NVDispEvoRec *pDispEvo, NvU32 head) +{ + NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS params = { }; + NvU32 ret; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + params.base.subdeviceIndex = pDispEvo->displayOwner; + params.head = head; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to query active viewport offset"); + } + + return params.activeViewportBase; +} + +static void +EvoClearSurfaceUsage91(NVDevEvoPtr pDevEvo, NVSurfaceEvoPtr pSurfaceEvo) +{ + NvU32 sd; + NvBool kickOff = FALSE; + NVEvoUpdateState updateState = { }; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + const struct NvKmsCscMatrix zeroCscMatrix = { }; + const NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + + /* + * In background, if the given surface is used for the core surface + * programming to satisfy the EVO hardware constraints then clear + * that usage. Reuse the client specified base surface for the core + * channel programming. + */ + if (pSurfaceEvo != + pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head]) { + continue; + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoPushSetCoreSurfaceMethodsForOneSd(pDevEvo, sd, head, + pSdHeadState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT], + &zeroCscMatrix, &updateState); + nvPopEvoSubDevMask(pDevEvo); + kickOff = TRUE; + } + } + + if (kickOff) { + EvoUpdate91(pDevEvo, &updateState, TRUE /* releaseElv */); + } +} + +static NvBool EvoComputeWindowScalingTaps91(const NVDevEvoRec *pDevEvo, + const NVEvoChannel *pChannel, + NVFlipChannelEvoHwState *pHwState) +{ + /* Window scaling isn't supported on EVO. */ + if ((pHwState->sizeIn.width != pHwState->sizeOut.width) || + (pHwState->sizeIn.height != pHwState->sizeOut.height)) + { + return FALSE; + } + + pHwState->hTaps = NV_EVO_SCALER_1TAP; + pHwState->vTaps = NV_EVO_SCALER_1TAP; + + return TRUE; +} + +static NvU32 GetAccelerators( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + NV5070_CTRL_GET_ACCL_PARAMS params = { }; + NvU32 ret; + + params.base.subdeviceIndex = sd; + params.channelClass = pChannel->hwclass; + nvAssert(pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL); + params.channelInstance = pChannel->instance; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_ACCL, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to retrieve accelerators"); + return 0; + } + + return params.accelerators; +} + +static NvBool SetAccelerators( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NvU32 accelerators, + NvU32 accelMask) +{ + NV5070_CTRL_SET_ACCL_PARAMS params = { }; + NvU32 ret; + + params.base.subdeviceIndex = sd; + params.channelClass = pChannel->hwclass; + nvAssert(pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL); + params.channelInstance = pChannel->instance; + params.accelerators = accelerators; + params.accelMask = accelMask; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_ACCL, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set accelerators"); + return FALSE; + } + + return TRUE; +} + +static void EvoAccelerateChannel91(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + NvU32 *pOldAccelerators) +{ + /* Start with a conservative set of accelerators; may need to add more + * later. */ + const NvU32 accelMask = + NV5070_CTRL_ACCL_IGNORE_PI | + NV5070_CTRL_ACCL_SKIP_SEMA | + NV5070_CTRL_ACCL_IGNORE_FLIPLOCK; + + *pOldAccelerators = GetAccelerators(pDevEvo, pChannel, sd); + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, accelMask, accelMask)) { + nvAssert(!"Failed to set accelerators"); + } +} + +static void EvoResetChannelAccelerators91(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + NvU32 oldAccelerators) +{ + /* Start with a conservative set of accelerators; may need to add more + * later. */ + const NvU32 accelMask = + NV5070_CTRL_ACCL_IGNORE_PI | + NV5070_CTRL_ACCL_SKIP_SEMA | + NV5070_CTRL_ACCL_IGNORE_FLIPLOCK; + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, oldAccelerators, accelMask)) { + nvAssert(!"Failed to set accelerators"); + } +} + +NVEvoHAL nvEvo94 = { + EvoSetRasterParams91, /* SetRasterParams */ + EvoSetProcAmp90, /* SetProcAmp */ + EvoSetHeadControl90, /* SetHeadControl */ + EvoSetHeadRefClk90, /* SetHeadRefClk */ + EvoHeadSetControlOR90, /* HeadSetControlOR */ + EvoORSetControl90, /* ORSetControl */ + EvoHeadSetDisplayId90, /* HeadSetDisplayId */ + EvoSetUsageBounds90, /* SetUsageBounds */ + EvoUpdate91, /* Update */ + nvEvo1IsModePossible, /* IsModePossible */ + nvEvo1PrePostIMP, /* PrePostIMP */ + EvoSetNotifier90, /* SetNotifier */ + EvoGetCapabilities90, /* GetCapabilities */ + EvoFlip90, /* Flip */ + EvoFlipTransitionWAR90, /* FlipTransitionWAR */ + EvoFillLUTSurface90, /* FillLUTSurface */ + EvoSetLUTContextDma90, /* SetLUTContextDma */ + EvoSetOutputScaler90, /* SetOutputScaler */ + EvoSetViewportPointIn90, /* SetViewportPointIn */ + EvoSetViewportInOut90, /* SetViewportInOut */ + EvoSetCursorImage91, /* SetCursorImage */ + EvoValidateCursorSurface90, /* ValidateCursorSurface */ + EvoValidateWindowFormat90, /* ValidateWindowFormat */ + EvoInitCompNotifier3, /* InitCompNotifier */ + EvoIsCompNotifierComplete3, /* IsCompNotifierComplete */ + EvoWaitForCompNotifier3, /* WaitForCompNotifier */ + EvoSetDither91, /* SetDither */ + EvoSetStallLock94, /* SetStallLock */ + NULL, /* SetDisplayRate */ + EvoInitChannel90, /* InitChannel */ + NULL, /* InitDefaultLut */ + EvoInitWindowMapping90, /* InitWindowMapping */ + nvEvo1IsChannelIdle, /* IsChannelIdle */ + nvEvo1IsChannelMethodPending, /* IsChannelMethodPending */ + EvoForceIdleSatelliteChannel90, /* ForceIdleSatelliteChannel */ + EvoForceIdleSatelliteChannel90, /* ForceIdleSatelliteChannelIgnoreLock */ + EvoAccelerateChannel91, /* AccelerateChannel */ + EvoResetChannelAccelerators91, /* ResetChannelAccelerators */ + EvoAllocRmCtrlObject90, /* AllocRmCtrlObject */ + EvoFreeRmCtrlObject90, /* FreeRmCtrlObject */ + EvoSetImmPointOut91, /* SetImmPointOut */ + EvoStartHeadCRC32Capture90, /* StartCRC32Capture */ + EvoStopHeadCRC32Capture90, /* StopCRC32Capture */ + EvoQueryHeadCRC32_90, /* QueryCRC32 */ + EvoGetScanLine90, /* GetScanLine */ + NULL, /* ConfigureVblankSyncObject */ + nvEvo1SetDscParams, /* SetDscParams */ + NULL, /* EnableMidFrameAndDWCFWatermark */ + EvoGetActiveViewportOffset94, /* GetActiveViewportOffset */ + EvoClearSurfaceUsage91, /* ClearSurfaceUsage */ + EvoComputeWindowScalingTaps91, /* ComputeWindowScalingTaps */ + NULL, /* GetWindowScalingCaps */ + { /* caps */ + FALSE, /* supportsNonInterlockedUsageBoundsUpdate */ + FALSE, /* supportsDisplayRate */ + TRUE, /* supportsFlipLockRGStatus */ + FALSE, /* needDefaultLutSurface */ + FALSE, /* hasUnorm10OLUT */ + TRUE, /* supportsDigitalVibrance */ + TRUE, /* supportsImageSharpening */ + FALSE, /* supportsHDMIVRR */ + TRUE, /* supportsCoreChannelSurface */ + FALSE, /* supportsHDMIFRL */ + TRUE, /* supportsSetStorageMemoryLayout */ + FALSE, /* supportsIndependentAcqRelSemaphore */ + TRUE, /* supportsCoreLut */ + FALSE, /* supportsSynchronizedOverlayPositionUpdate */ + FALSE, /* supportsVblankSyncObjects */ + TRUE, /* requiresScalingTapsInBothDimensions */ + NV_EVO2_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_1TAP, /* minScalerTaps */ + }, +}; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo3.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo3.c new file mode 100644 index 0000000..21e346a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo3.c @@ -0,0 +1,6965 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains implementations of the EVO HAL methods for display class + * 3.x (also known as "nvdisplay"). + */ + +#include "nvkms-dma.h" +#include "nvkms-types.h" +#include "nvkms-rmapi.h" +#include "nvkms-surface.h" +#include "nvkms-softfloat.h" +#include "nvkms-evo.h" +#include "nvkms-evo1.h" +#include "nvkms-modeset-types.h" +#include "nvkms-prealloc.h" +#include "nv-float.h" + +#include + +#include // NVC372_DISPLAY_SW +#include // NVC373_DISP_CAPABILITIES +#include // NVC37B_WINDOW_IMM_CHANNEL_DMA +#include // NVC37D_CORE_CHANNEL_DMA +#include // NVC37D_NOTIFIER_CRC +#include // NVC37D_HEAD_SET_SW_SPARE_* +#include // NVC37E_WINDOW_CHANNEL_DMA +#include // NVC573_DISP_CAPABILITIES +#include // NVC57D_CORE_CHANNEL_DMA +#include // NVC57E_WINDOW_CHANNEL_DMA +#include +#include // NVC673_DISP_CAPABILITIES +#include // NVC67D_CORE_CHANNEL_DMA +#include // NVC67E_WINDOW_CHANNEL_DMA + +#include +#include +#include + +/** Number of CRCs supported by hardware on NVC37D hardware (SF/SOR, Comp, RG) */ +#define NV_EVO3_NUM_CRC_FIELDS 3 + +/** Number of CRCs supported by hardware on NVC37D hardware SF/SOR, Comp, RG Ovf and Count */ +#define NV_EVO3_NUM_CRC_FLAGS 4 + +static NvBool EvoIsChannelIdleC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result); + +static void SetCsc00MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix); +static void SetCsc11MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix); +static void +UpdateCompositionC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const struct NvKmsCompositionParams *pCompParams, + NVEvoUpdateState *updateState); +static void +UpdateCompositionC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const struct NvKmsCompositionParams *pCompParams, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + +ct_assert(NV_EVO_LOCK_PIN_0 > + NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1); + +/* nvdisplay has a maximum of 2 eyes and 3 planes per surface */ +ct_assert((NVKMS_MAX_EYES * NVKMS_MAX_PLANES_PER_SURFACE) == 6); + +#define NV_EVO3_SUPPORTED_DITHERING_MODES \ + ((1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL)) + +#define NV_EVO3_SUPPORTED_CURSOR_COMP_BLEND_MODES \ + ((1 << NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA)) + +/* Windows support all composition modes. */ +#define NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES \ + ((1 << NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA)) + +#define NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C3 \ + (DRF_DEF(C37D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_LUT, _USAGE_1025) | \ + DRF_DEF(C37D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_2) | \ + DRF_DEF(C37D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE)) + +#define NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C5 \ + (DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _ILUT_ALLOWED, _TRUE) | \ + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _TMO_LUT_ALLOWED, _FALSE)) + +static inline NvU8 EyeAndPlaneToCtxDmaIdx(const NvU8 eye, const NvU8 plane) +{ + /* + * See the definition of the SetContextDmaIso and SetOffset methods in the + * relevant nvdClass_01.mfs file to see how these method array indices are + * mapped. + */ + nvAssert((eye < NVKMS_MAX_EYES) && (plane < NVKMS_MAX_PLANES_PER_SURFACE)); + + return eye + (plane << 1); +} + +static void InitChannelCapsC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel) +{ + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0) { + static const NVEvoChannelCaps WindowCaps = { + /* + * Window classes always support timestamp flips, and allow full + * use of the 64-bit timestamp value. + */ + .validTimeStampBits = 64, + /* Window classes always support tearing flips. */ + .tearingFlips = TRUE, + .vrrTearingFlips = TRUE, + /* Window classes support per-eye stereo flips. */ + .perEyeStereoFlips = TRUE, + }; + + pChannel->caps = WindowCaps; + } +} + +// HW supports ratio = 1, 2 (downscaling), 4 (downscaling) +#define NUM_SCALER_RATIOS 3 + +// There are 16 phases stored in matrix, but HW can derive the values of phase +// +16 and -16 from phase 0. Therefore, SW loads phase +16/-16 in phase 0 coeff +// values. +// coeff values in phase 0. +#define NUM_TAPS5_COEFF_PHASES 16 + +// There are 5 coefficient values per phase (or matrix row), but SW doesn't need +// to upload c2. So, the value here is set to 4. +#define NUM_TAPS5_COEFF_VALUES 4 + +// The coefficient values are obtained from bug 1953108 comment 10 +// Per MFS: However since all 5 coefficients have to add up to 1.0, only 4 need to be specified, and +// HW can derive the missing one. The center coefficient is the one that is left out, so +// if the 5 taps need weights (c0, c1, c2, c3, c4) then only (c0, c1, c3, c4) are stored, +// and c2 is calculated by HW. +// Phase 0 is the center phase and the corresponding filter kernel is symmetrical: +// c0=c4, c1=c3 --> only c0 and c1 need to be stored. +// Phase 16 (and -16) is the edge phase and the corresponding filter kernels are: +// (0, c0, c1, c1, c0) for phase +16 +// (c0, c1, c1, c0, 0) for phase -16 +// The difference between +16 and -16 is automatically handled by HW. The table only needs +// to store c0 and c1 for either case. +// Therefore, based on MFS above, the matrix below contains the values loaded to HW. +// Real Phase 0 is commented for easy reference. +// Also, phase 16 values (last row) are commented, but its C0,C1 values are loaded in row 0/phase 0. +static const NvU32 scalerTaps5Coeff[NUM_SCALER_RATIOS][NUM_TAPS5_COEFF_PHASES][NUM_TAPS5_COEFF_VALUES] = +{ + // ratio = 1 + {{ 0 , 0 , -16 , 144}, // real phase 0:{ 0, 0, /*256,*/ 0, 0 }, + { 0 , -5 , /*255,*/ 5 , 0}, + { 0 , -9 , /*254,*/ 11 , 0}, + { -1 , -12 , /*251,*/ 18 , -1}, + { -1 , -15 , /*248,*/ 25 , -1}, + { -1 , -18 , /*243,*/ 33 , -2}, + { -2 , -20 , /*238,*/ 42 , -3}, + { -2 , -21 , /*232,*/ 51 , -3}, + { -3 , -22 , /*225,*/ 60 , -5}, + { -3 , -22 , /*217,*/ 70 , -6}, + { -4 , -22 , /*208,*/ 81 , -7}, + { -4 , -22 , /*199,*/ 91 , -9}, + { -5 , -21 , /*190,*/ 102 , -10}, + { -5 , -20 , /*180,*/ 113 , -12}, + { -5 , -19 , /*169,*/ 125 , -13}, + { -6 , -18 , /*158,*/ 136 , -15} + // real phase 16: { 0 , -16 , 144, 144 , -16 } + }, + // ratio = 2 + {{ 3, 60 , 20 , 108 }, // real phase 0: {3 , 60 , 130 , 60 , 3 }, + { 3 , 57 , /*130,*/ 63 , 4 }, + { 2 , 54 , /*130,*/ 66 , 4 }, + { 2 , 51 , /*129,*/ 69 , 5 }, + { 2 , 48 , /*128,*/ 72 , 6 }, + { 1 , 45 , /*128,*/ 75 , 7 }, + { 1 , 43 , /*127,*/ 78 , 7 }, + { 1 , 40 , /*125,*/ 81 , 8 }, + { 1 , 37 , /*124,*/ 84 , 9 }, + { 0 , 35 , /*122,*/ 88 , 10 }, + { 0 , 33 , /*121,*/ 91 , 12 }, + { 0 , 30 , /*119,*/ 94 , 13 }, + { 0 , 28 , /*117,*/ 97 , 14 }, + { 0 , 26 , /*115,*/ 99 , 16 }, + { 0 , 24 , /*112,*/ 102 , 17 }, + { 0 , 22 , /*110,*/ 105 , 19 }, + // real phase 16:{0 , 20 , 108 , 108 , 20 }, + }, + // ratio = 4 + {{ 4 , 62 , 23 , 105 }, // real phase 0: {4 , 62 , 124 , 62 , 4 , + { 4 , 59 , /*124,*/ 64 , 5 }, + { 3 , 56 , /*124,*/ 67 , 6 }, + { 3 , 53 , /*123,*/ 70 , 7 }, + { 2 , 51 , /*123,*/ 73 , 8 }, + { 2 , 48 , /*122,*/ 76 , 8 }, + { 2 , 45 , /*121,*/ 79 , 9 }, + { 1 , 43 , /*120,*/ 81 , 10 }, + { 1 , 40 , /*119,*/ 84 , 12 }, + { 1 , 38 , /*117,*/ 87 , 13 }, + { 1 , 36 , /*116,*/ 90 , 14 }, + { 0 , 34 , /*114,*/ 92 , 15 }, + { 0 , 31 , /*113,*/ 95 , 17 }, + { 0 , 29 , /*111,*/ 97 , 18 }, + { 0 , 27 , /*109,*/ 100 , 20 }, + { 0 , 25 , /*107,*/ 102 , 22 }, + // real phase 16: {0 , 23 , 105 , 105 , 23 }, + } +}; + +static void InitScalerCoefficientsPrecomp5(NVEvoChannelPtr pChannel, + NvU32 coeff, NvU32 index) +{ + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_INPUT_SCALER_COEFF_VALUE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_INPUT_SCALER_COEFF_VALUE, _DATA, coeff) | + DRF_NUM(C57E, _SET_INPUT_SCALER_COEFF_VALUE, _INDEX, index)); +} + +static void InitScalerCoefficientsPostcomp5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 coeff, NvU32 index) +{ + NvU32 h; + + for (h = 0; h < pDevEvo->numHeads; h++) { + nvDmaSetStartEvoMethod(pChannel, + NVC57D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(h), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_OUTPUT_SCALER_COEFF_VALUE, _DATA, coeff) | + DRF_NUM(C57D, _HEAD_SET_OUTPUT_SCALER_COEFF_VALUE, _INDEX, index)); + } +} + +static void InitTaps5ScalerCoefficientsC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvBool isPrecomp) +{ + NvU8 ratio; + + if (isPrecomp) { + const NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[0].capabilities.window[pChannel->instance]; + const NVEvoScalerCaps *pScalerCaps = &pWinCaps->scalerCaps; + + if (!pScalerCaps->present) { + return; + } + } + + for (ratio = 0; ratio < NUM_SCALER_RATIOS; ratio++) { + NvU8 phase; + for (phase = 0; phase < NUM_TAPS5_COEFF_PHASES; phase++) { + NvU8 coeffIdx; + for (coeffIdx = 0; coeffIdx < NUM_TAPS5_COEFF_VALUES; coeffIdx++) { + NvU32 coeff = scalerTaps5Coeff[ratio][phase][coeffIdx]; + NvU32 index = ratio << 6 | phase << 2 | coeffIdx; + + if (isPrecomp) { + InitScalerCoefficientsPrecomp5(pChannel, coeff, index); + } else { + InitScalerCoefficientsPostcomp5(pDevEvo, + pChannel, coeff, index); + } + } + } + } +} + +/* + * This is a 3x4 matrix with S5.14 coefficients (truncated from S5.16 + * SW-specified values). + */ +static const struct NvKmsCscMatrix Rec709RGBToLMS = {{ + { 0x4bb8, 0x9f84, 0x14c8, 0 }, + { 0x27fc, 0xba2c, 0x1dd4, 0 }, + { 0x8fc, 0x2818, 0xcef0, 0 }, +}}; + +/* + * This is a 3x4 matrix with S5.14 coefficients (truncated from S5.16 + * SW-specified values). + */ +static const struct NvKmsCscMatrix LMSToRec709RGB = {{ + { 0x62c48, 0x1aadf4, 0x25a8, 0 }, + { 0x1ead18, 0x28f64, 0x1fc390, 0 }, + { 0x1ffd00, 0x1fbc34, 0x146c4, 0 }, +}}; + +/* + * This is a 3x4 matrix with S5.14 coefficients (truncated from S5.16 + * SW-specified values). + */ +static const struct NvKmsCscMatrix LMSToRec2020RGB = {{ + { 0x36fc0, 0x1d7e54, 0x11e0, 0 }, + { 0x1f3584, 0x1fbc8, 0x1fcebc, 0 }, + { 0x1ff964, 0x1fe6a4, 0x11ff4, 0 }, +}}; + +/* + * The two arrays below specify the PQ OETF transfer function that's used to + * convert from linear LMS FP16 to PQ encoded L'M'S' fixed-point. + */ +static const NvU32 OetfPQ512SegSizesLog2[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, + 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, + 5, +}; + +static const NvU16 OetfPQ512Entries[] = { + 0x0000, 0x000C, 0x0014, 0x001C, 0x0028, 0x003C, 0x005C, 0x008C, 0x00D0, 0x0134, 0x0184, 0x01C8, 0x0238, 0x029C, 0x033C, 0x03C4, + 0x043C, 0x04A4, 0x0504, 0x0560, 0x0600, 0x0690, 0x0714, 0x078C, 0x07FC, 0x0864, 0x08C8, 0x0924, 0x0980, 0x09D4, 0x0A24, 0x0A70, + 0x0B04, 0x0B90, 0x0C10, 0x0C88, 0x0CFC, 0x0D68, 0x0DD4, 0x0E38, 0x0EF4, 0x0FA4, 0x1048, 0x10E4, 0x1174, 0x1200, 0x1284, 0x1304, + 0x13F4, 0x14D0, 0x159C, 0x165C, 0x1714, 0x17C0, 0x1864, 0x1900, 0x1A28, 0x1B34, 0x1C30, 0x1D1C, 0x1DFC, 0x1ECC, 0x1F94, 0x2050, + 0x2104, 0x21B0, 0x2258, 0x22F8, 0x2390, 0x2424, 0x24B4, 0x2540, 0x25C4, 0x2648, 0x26C4, 0x2740, 0x27B8, 0x282C, 0x289C, 0x290C, + 0x29E0, 0x2AAC, 0x2B70, 0x2C2C, 0x2CE0, 0x2D90, 0x2E38, 0x2ED8, 0x2F74, 0x300C, 0x30A0, 0x3130, 0x31BC, 0x3244, 0x32C8, 0x3348, + 0x3440, 0x352C, 0x360C, 0x36E4, 0x37B4, 0x387C, 0x393C, 0x39F8, 0x3AA8, 0x3B58, 0x3C00, 0x3CA4, 0x3D44, 0x3DDC, 0x3E74, 0x3F04, + 0x401C, 0x4128, 0x4228, 0x431C, 0x4408, 0x44E8, 0x45C4, 0x4694, 0x475C, 0x4820, 0x48DC, 0x4994, 0x4A48, 0x4AF4, 0x4B9C, 0x4C3C, + 0x4D78, 0x4EA0, 0x4FBC, 0x50CC, 0x51D0, 0x52CC, 0x53BC, 0x54A0, 0x5580, 0x5658, 0x5728, 0x57F0, 0x58B4, 0x5974, 0x5A2C, 0x5ADC, + 0x5C34, 0x5D7C, 0x5EB4, 0x5FDC, 0x60F4, 0x6204, 0x630C, 0x6404, 0x64F8, 0x65E0, 0x66C4, 0x679C, 0x6870, 0x693C, 0x6A04, 0x6AC4, + 0x6C38, 0x6D94, 0x6EE4, 0x7020, 0x7150, 0x7274, 0x738C, 0x7498, 0x7598, 0x7694, 0x7784, 0x786C, 0x794C, 0x7A24, 0x7AF8, 0x7BC4, + 0x7D50, 0x7EC4, 0x8024, 0x8174, 0x82B4, 0x83E8, 0x850C, 0x8628, 0x8738, 0x883C, 0x8938, 0x8A2C, 0x8B18, 0x8BFC, 0x8CD8, 0x8DB0, + 0x8F4C, 0x90D0, 0x9240, 0x939C, 0x94EC, 0x962C, 0x975C, 0x9880, 0x999C, 0x9AAC, 0x9BB0, 0x9CAC, 0x9DA0, 0x9E8C, 0x9F70, 0xA04C, + 0xA1F4, 0xA384, 0xA500, 0xA664, 0xA7BC, 0xA904, 0xAA3C, 0xAB6C, 0xAC8C, 0xADA0, 0xAEAC, 0xAFAC, 0xB0A4, 0xB194, 0xB27C, 0xB360, + 0xB510, 0xB6A4, 0xB824, 0xB994, 0xBAF0, 0xBC3C, 0xBD78, 0xBEA8, 0xBFCC, 0xC0E4, 0xC1F0, 0xC2F4, 0xC3F0, 0xC4E4, 0xC5CC, 0xC6B0, + 0xC78C, 0xC860, 0xC930, 0xC9F8, 0xCABC, 0xCB7C, 0xCC38, 0xCCEC, 0xCD9C, 0xCE48, 0xCEF0, 0xCF94, 0xD034, 0xD0D4, 0xD16C, 0xD200, + 0xD294, 0xD324, 0xD3B4, 0xD43C, 0xD4C4, 0xD54C, 0xD5CC, 0xD650, 0xD6CC, 0xD748, 0xD7C4, 0xD83C, 0xD8B0, 0xD924, 0xD994, 0xDA08, + 0xDAE0, 0xDBB4, 0xDC84, 0xDD4C, 0xDE10, 0xDECC, 0xDF84, 0xE038, 0xE0E8, 0xE194, 0xE238, 0xE2DC, 0xE37C, 0xE418, 0xE4B0, 0xE544, + 0xE5D4, 0xE664, 0xE6F0, 0xE778, 0xE800, 0xE884, 0xE904, 0xE984, 0xEA00, 0xEA7C, 0xEAF4, 0xEB68, 0xEBDC, 0xEC50, 0xECC0, 0xED30, + 0xEE08, 0xEED8, 0xEFA4, 0xF068, 0xF128, 0xF1E4, 0xF298, 0xF348, 0xF3F4, 0xF49C, 0xF540, 0xF5E0, 0xF67C, 0xF714, 0xF7A8, 0xF83C, + 0xF8CC, 0xF958, 0xF9E0, 0xFA68, 0xFAEC, 0xFB6C, 0xFBE8, 0xFC64, 0xFCE0, 0xFD58, 0xFDCC, 0xFE40, 0xFEB4, 0xFF24, 0xFF90, 0xFFFC, +}; + +/* + * The two arrays below specify the PQ EOTF transfer function that's used to + * convert from PQ encoded L'M'S' fixed-point to linear LMS FP16. This transfer + * function is the inverse of the OETF curve. + */ +static const NvU32 EotfPQ512SegSizesLog2[] = { + 6, 6, 4, 4, 4, 3, 4, 3, 3, 3, 2, 2, 2, 3, 3, 2, + 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 6, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 1, 2, + 2, 1, 1, 2, 2, 2, 2, 1, 2, 1, 1, 2, 1, 4, 2, 2, +}; + +static const NvU16 EotfPQ512Entries[] = { + 0x0000, 0x0001, 0x0003, 0x0005, 0x0008, 0x000C, 0x0011, 0x0016, 0x001B, 0x0022, 0x0028, 0x002F, 0x0037, 0x003F, 0x0048, 0x0051, + 0x005A, 0x0064, 0x006F, 0x007A, 0x0085, 0x0091, 0x009E, 0x00AB, 0x00B8, 0x00C6, 0x00D4, 0x00E3, 0x00F3, 0x0102, 0x0113, 0x0123, + 0x0135, 0x0146, 0x0158, 0x016B, 0x017E, 0x0192, 0x01A6, 0x01BB, 0x01D0, 0x01E5, 0x01FC, 0x0212, 0x0229, 0x0241, 0x0259, 0x0272, + 0x028B, 0x02A4, 0x02BE, 0x02D9, 0x02F4, 0x0310, 0x032C, 0x0349, 0x0366, 0x0384, 0x03A2, 0x03C1, 0x03E0, 0x0400, 0x0421, 0x0442, + 0x0463, 0x0485, 0x04A8, 0x04CB, 0x04EF, 0x0513, 0x0538, 0x055D, 0x0583, 0x05AA, 0x05D1, 0x05F9, 0x0621, 0x064A, 0x0673, 0x069D, + 0x06C7, 0x06F3, 0x071E, 0x074B, 0x0777, 0x07A5, 0x07D3, 0x0801, 0x0819, 0x0830, 0x0849, 0x0861, 0x087A, 0x0893, 0x08AD, 0x08C7, + 0x08E1, 0x08FB, 0x0916, 0x0931, 0x094C, 0x0968, 0x0984, 0x09A0, 0x09BD, 0x09DA, 0x09F7, 0x0A15, 0x0A33, 0x0A51, 0x0A70, 0x0A8F, + 0x0AAE, 0x0ACE, 0x0AEE, 0x0B0E, 0x0B2F, 0x0B50, 0x0B71, 0x0B93, 0x0BB5, 0x0BD7, 0x0BFA, 0x0C0F, 0x0C20, 0x0C32, 0x0C44, 0x0C56, + 0x0C69, 0x0CB5, 0x0D03, 0x0D55, 0x0DA9, 0x0E01, 0x0E5B, 0x0EB9, 0x0F1B, 0x0F7F, 0x0FE7, 0x1029, 0x1061, 0x109A, 0x10D5, 0x1111, + 0x1150, 0x1190, 0x11D3, 0x1217, 0x125E, 0x12A6, 0x12F0, 0x133D, 0x138B, 0x13DC, 0x1417, 0x1442, 0x146D, 0x149A, 0x14C8, 0x14F7, + 0x1527, 0x1558, 0x158B, 0x15BF, 0x15F4, 0x162A, 0x1662, 0x169B, 0x16D5, 0x1711, 0x174E, 0x178C, 0x17CC, 0x1806, 0x1828, 0x184A, + 0x186D, 0x18B4, 0x18FF, 0x194D, 0x199E, 0x19F3, 0x1A4B, 0x1AA7, 0x1B06, 0x1B37, 0x1B69, 0x1B9B, 0x1BCF, 0x1C02, 0x1C1D, 0x1C38, + 0x1C54, 0x1C70, 0x1C8D, 0x1CAB, 0x1CC9, 0x1CE7, 0x1D06, 0x1D26, 0x1D46, 0x1D88, 0x1DCC, 0x1E13, 0x1E5C, 0x1EA8, 0x1EF6, 0x1F47, + 0x1F9A, 0x1FF1, 0x2025, 0x2053, 0x2082, 0x20B3, 0x20E6, 0x211A, 0x214F, 0x2187, 0x21C0, 0x21FA, 0x2237, 0x2275, 0x22B5, 0x22F7, + 0x233B, 0x23C9, 0x2430, 0x247F, 0x24D3, 0x252B, 0x2589, 0x25EB, 0x2653, 0x26C1, 0x2734, 0x27AD, 0x2817, 0x2838, 0x285A, 0x287C, + 0x28A0, 0x28C5, 0x28EA, 0x2911, 0x2938, 0x2960, 0x298A, 0x29B4, 0x29DF, 0x2A0C, 0x2A39, 0x2A68, 0x2A98, 0x2AFA, 0x2B62, 0x2BCE, + 0x2C20, 0x2C5B, 0x2C99, 0x2CDA, 0x2D1E, 0x2D65, 0x2DB0, 0x2DFD, 0x2E4E, 0x2EA3, 0x2EFC, 0x2F58, 0x2FB8, 0x300E, 0x3043, 0x307A, + 0x30B3, 0x30D0, 0x30EE, 0x310D, 0x312C, 0x314C, 0x316D, 0x318E, 0x31B0, 0x31D3, 0x31F6, 0x321A, 0x323F, 0x3265, 0x328B, 0x32B2, + 0x32DA, 0x332D, 0x3383, 0x33DC, 0x341D, 0x344D, 0x347F, 0x34B4, 0x34EA, 0x3523, 0x355E, 0x359B, 0x35DB, 0x361D, 0x3662, 0x36A9, + 0x36F3, 0x3740, 0x3791, 0x37E4, 0x381D, 0x384A, 0x3879, 0x38A9, 0x38DB, 0x3910, 0x3946, 0x397E, 0x39B8, 0x39F5, 0x3A34, 0x3A75, + 0x3AB9, 0x3AFF, 0x3B48, 0x3B94, 0x3BE2, 0x3C1A, 0x3C44, 0x3C70, 0x3C9D, 0x3CA0, 0x3CA3, 0x3CA6, 0x3CA9, 0x3CAC, 0x3CAF, 0x3CB1, + 0x3CB4, 0x3CB7, 0x3CBA, 0x3CBD, 0x3CC0, 0x3CC3, 0x3CC6, 0x3CC9, 0x3CCC, 0x3CCF, 0x3CD2, 0x3CD5, 0x3CD8, 0x3CDB, 0x3CDE, 0x3CE1, + 0x3CE4, 0x3CE7, 0x3CEA, 0x3CEE, 0x3CF1, 0x3CF4, 0x3CF7, 0x3CFA, 0x3CFD, 0x3D00, 0x3D03, 0x3D06, 0x3D09, 0x3D0D, 0x3D10, 0x3D13, + 0x3D16, 0x3D19, 0x3D1C, 0x3D20, 0x3D23, 0x3D26, 0x3D29, 0x3D2C, 0x3D30, 0x3D33, 0x3D36, 0x3D39, 0x3D3D, 0x3D40, 0x3D43, 0x3D46, + 0x3D4A, 0x3D4D, 0x3D50, 0x3D54, 0x3D57, 0x3D5A, 0x3D5D, 0x3D61, 0x3D64, 0x3D9B, 0x3DD3, 0x3E0D, 0x3E4A, 0x3E89, 0x3ECA, 0x3F0E, + 0x3F54, 0x3F9C, 0x3FE8, 0x401B, 0x4043, 0x406D, 0x4099, 0x40C6, 0x40F4, 0x4124, 0x4156, 0x418A, 0x41C0, 0x41F8, 0x4232, 0x426D, + 0x42AB, 0x42EB, 0x432E, 0x4373, 0x43BA, 0x4428, 0x4479, 0x44D0, 0x452D, 0x4591, 0x45FC, 0x466F, 0x46EB, 0x472C, 0x476F, 0x47B5, + 0x47FE, 0x4824, 0x484B, 0x4874, 0x489D, 0x48F5, 0x4954, 0x4986, 0x49B9, 0x49EF, 0x4A26, 0x4A5F, 0x4A9B, 0x4AD9, 0x4B19, 0x4B9F, + 0x4C18, 0x4C66, 0x4CBA, 0x4CE6, 0x4D13, 0x4D43, 0x4D74, 0x4DA7, 0x4DDC, 0x4E12, 0x4E4B, 0x4E86, 0x4EC3, 0x4F02, 0x4F44, 0x4F88, + 0x4FCE, 0x500C, 0x5032, 0x5082, 0x50D8, 0x5106, 0x5135, 0x5166, 0x5199, 0x5205, 0x5278, 0x52F5, 0x537C, 0x53C3, 0x5406, 0x542D, + 0x5454, 0x54A9, 0x5503, 0x550F, 0x551B, 0x5527, 0x5533, 0x5540, 0x554C, 0x5559, 0x5565, 0x5572, 0x557F, 0x558C, 0x5599, 0x55A7, + 0x55B4, 0x55C1, 0x55CF, 0x5607, 0x5641, 0x567E, 0x56BC, 0x56FE, 0x5741, 0x5788, 0x57D1, +}; + +static void InitCsc0LUT(NVEvoChannelPtr pChannel, + const NvU32 *pSegmentSizes, NvU32 numSegmentSizes, + const NvU16 *pLUTEntries, NvU32 numEntries) +{ + NvU32 i; + + for (i = 0; i < numSegmentSizes; i++) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC0LUT_SEGMENT_SIZE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CSC0LUT_SEGMENT_SIZE, _IDX, i) | + DRF_NUM(C57E, _SET_CSC0LUT_SEGMENT_SIZE, _VALUE, pSegmentSizes[i])); + } + + for (i = 0; i < numEntries; i++) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC0LUT_ENTRY, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CSC0LUT_ENTRY, _IDX, i) | + DRF_NUM(C57E, _SET_CSC0LUT_ENTRY, _VALUE, pLUTEntries[i])); + } +} + +static void InitCsc1LUT(NVEvoChannelPtr pChannel, + const NvU32 *pSegmentSizes, NvU32 numSegmentSizes, + const NvU16 *pLUTEntries, NvU32 numEntries) +{ + NvU32 i; + + for (i = 0; i < numSegmentSizes; i++) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC1LUT_SEGMENT_SIZE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CSC1LUT_SEGMENT_SIZE, _IDX, i) | + DRF_NUM(C57E, _SET_CSC1LUT_SEGMENT_SIZE, _VALUE, pSegmentSizes[i])); + } + + for (i = 0; i < numEntries; i++) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC1LUT_ENTRY, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CSC1LUT_ENTRY, _IDX, i) | + DRF_NUM(C57E, _SET_CSC1LUT_ENTRY, _VALUE, pLUTEntries[i])); + } +} + +static void ConfigureCsc0C5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvBool enable) +{ + NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[0].capabilities.window[pChannel->instance]; + struct NvKmsCscMatrix matrix = { }; + NvU32 lutData = 0; + NvU32 csc01Data = 0; + + if (!pWinCaps->csc0MatricesPresent) { + return; + } + + if (enable) { + matrix = Rec709RGBToLMS; + + lutData |= DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _INTERPOLATE, _ENABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _ENABLE, _ENABLE); + + csc01Data |= DRF_DEF(C57E, _SET_CSC01CONTROL, _ENABLE, _ENABLE); + } else { + matrix = NVKMS_IDENTITY_CSC_MATRIX; + + lutData |= DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _INTERPOLATE, _DISABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _ENABLE, _DISABLE); + + csc01Data |= DRF_DEF(C57E, _SET_CSC01CONTROL, _ENABLE, _DISABLE); + } + + /* Linear RGB FP16 -> Linear LMS FP16 */ + SetCsc00MatrixC5(pChannel, &matrix); + + /* Linear LMS FP16 -> PQ encoded L'M'S' fixed-point */ + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC0LUT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, lutData); + + /* + * PQ encoded L'M'S' fixed-point -> ICtCp + * + * Note that we're converting between fixed colorspaces, so the default HW + * coefficients are sufficient. + */ + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC01CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, csc01Data); +} + +static void ConfigureCsc1C5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvBool enable) +{ + NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[0].capabilities.window[pChannel->instance]; + struct NvKmsCscMatrix matrix = { }; + NvU32 lutData = 0; + NvU32 csc10Data = 0; + const NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + const NvU32 head = pDevEvo->headForWindow[win]; + + if (!pWinCaps->csc1MatricesPresent || (head == NV_INVALID_HEAD)) { + return; + } + + if (enable) { + const NvU32 sdMask = nvPeekEvoSubDevMask(pDevEvo); + const NvU32 sd = (sdMask == 0) ? 0 : __builtin_ffs(sdMask) - 1; + NVDispHeadStateEvoRec *pHeadState; + + /* + * All callers of this path should push a single sd on the stack, + * so that ffs(sdMask) is safe. + */ + nvAssert(nvPopCount32(sdMask) == 1); + + pHeadState = &pDevEvo->pDispEvo[sd]->headState[head]; + + if ((pHeadState->procAmp.colorimetry == NVT_COLORIMETRY_BT2020RGB) || + (pHeadState->procAmp.colorimetry == NVT_COLORIMETRY_BT2020YCC)) { + matrix = LMSToRec2020RGB; + } else { + matrix = LMSToRec709RGB; + } + + lutData |= DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _INTERPOLATE, _ENABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _ENABLE, _ENABLE); + + csc10Data |= DRF_DEF(C57E, _SET_CSC10CONTROL, _ENABLE, _ENABLE); + } else { + matrix = NVKMS_IDENTITY_CSC_MATRIX; + + lutData |= DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _INTERPOLATE, _DISABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _ENABLE, _DISABLE); + + csc10Data |= DRF_DEF(C57E, _SET_CSC10CONTROL, _ENABLE, _DISABLE); + } + + /* + * ICtCp -> PQ encoded L'M'S' fixed-point + * + * Note that we're converting between fixed colorspaces, so the default HW + * coefficients are sufficient. + */ + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC10CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, csc10Data); + + /* PQ encoded L'M'S' fixed-point -> Linear LMS FP16 */ + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC1LUT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, lutData); + + /* Linear LMS FP16 -> Linear RGB FP16 */ + SetCsc11MatrixC5(pChannel, &matrix); +} + +static void InitDesktopColorC3(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DESKTOP_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_DESKTOP_COLOR, _RED, 0) | + DRF_NUM(C37D, _HEAD_SET_DESKTOP_COLOR, _GREEN, 0) | + DRF_NUM(C37D, _HEAD_SET_DESKTOP_COLOR, _BLUE, 0) | + DRF_NUM(C37D, _HEAD_SET_DESKTOP_COLOR, _ALPHA, 255)); + } +} + +static void InitDesktopColorC5(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_DESKTOP_COLOR_ALPHA_RED, _ALPHA, 255) | + DRF_NUM(C57D, _HEAD_SET_DESKTOP_COLOR_ALPHA_RED, _RED, 0)); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_DESKTOP_COLOR_GREEN_BLUE, _GREEN, 0) | + DRF_NUM(C57D, _HEAD_SET_DESKTOP_COLOR_GREEN_BLUE, _BLUE, 0)); + } +} + +static void EvoInitChannel3(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + InitChannelCapsC3(pDevEvo, pChannel); +} + +static void EvoInitChannelC3(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + const NvBool isCore = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + pChannel->channelMask); + + EvoInitChannel3(pDevEvo, pChannel); + + if (isCore) { + InitDesktopColorC3(pDevEvo, pChannel); + } +} + +static void EvoInitChannelC5(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + const NvBool isCore = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + pChannel->channelMask); + const NvBool isWindow = + ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0); + + EvoInitChannel3(pDevEvo, pChannel); + + if (isCore) { + InitTaps5ScalerCoefficientsC5(pDevEvo, pChannel, FALSE); + InitDesktopColorC5(pDevEvo, pChannel); + } else if (isWindow) { + NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[0].capabilities.window[pChannel->instance]; + NvU32 csc0SizesLen = ARRAY_LEN(OetfPQ512SegSizesLog2); + NvU32 csc0EntriesLen = ARRAY_LEN(OetfPQ512Entries); + NvU32 csc1SizesLen = ARRAY_LEN(EotfPQ512SegSizesLog2); + NvU32 csc1EntriesLen = ARRAY_LEN(EotfPQ512Entries); + + InitTaps5ScalerCoefficientsC5(pDevEvo, pChannel, TRUE); + + if (pWinCaps->cscLUTsPresent) { + InitCsc0LUT(pChannel, + OetfPQ512SegSizesLog2, csc0SizesLen, + OetfPQ512Entries, csc0EntriesLen); + InitCsc1LUT(pChannel, + EotfPQ512SegSizesLog2, csc1SizesLen, + EotfPQ512Entries, csc1EntriesLen); + } + } +} + +static const NvU32 IdentityFMTMatrix[12] = { + 0x00010000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00010000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00010000, 0x00000000 +}; + +/* + * TODO: The full set of FMT matrices needs to be generated for each RGB and YUV + * encoding. For now, I'm using the matrix below to convert all YUV input + * formats to pipe native. + */ +static const NvU32 YCbCrRec709_8bpcFMTMatrix[12] = { + 0x0001ccb7, 0x00012b3c, 0x00000000, 0x001f06f1, + 0x001f770c, 0x00012b3c, 0x001fc933, 0x00004d2d, + 0x00000000, 0x00012b3c, 0x00021edd, 0x001eddde +}; + +static const NvU32* EvoGetFMTMatrixC5( + const enum NvKmsSurfaceMemoryFormat format) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(format); + + if (pFormatInfo->isYUV) { + return YCbCrRec709_8bpcFMTMatrix; + } else { + return IdentityFMTMatrix; + } +} + +static void EvoSetFMTMatrixC5( + NVEvoChannelPtr pChannel, const enum NvKmsSurfaceMemoryFormat format) +{ + const NvU32 *matrix = EvoGetFMTMatrixC5(format); + NvU32 method = NVC57E_SET_FMT_COEFFICIENT_C00; + int i; + + for (i = 0; i < 12; i++) { + nvDmaSetStartEvoMethod(pChannel, method, 1); + nvDmaSetEvoMethodData(pChannel, matrix[i]); + + method += 4; + } +} + +static void EvoInitDefaultLutC5(NVDevEvoPtr pDevEvo) +{ + NVLutSurfaceEvoPtr pLut = pDevEvo->lut.defaultLut; + NvU16 sd; + + nvAssert(pLut); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoLutDataRec *pData = pLut->subDeviceAddress[sd]; + NvU16 i; + + ct_assert(NV_NUM_EVO_LUT_ENTRIES == 1025); + for (i = 0; i < 1024; i++) { + // nvdisplay 3 uses FP16 entries in the ILUT. + pData->base[NV_LUT_VSS_HEADER_SIZE + i].Red = + pData->base[NV_LUT_VSS_HEADER_SIZE + i].Green = + pData->base[NV_LUT_VSS_HEADER_SIZE + i].Blue = nvUnorm10ToFp16(i).v; + + // nvdisplay 3 uses 16-bit fixed-point entries in the OLUT. + pData->output[NV_LUT_VSS_HEADER_SIZE + i].Red = + pData->output[NV_LUT_VSS_HEADER_SIZE + i].Green = + pData->output[NV_LUT_VSS_HEADER_SIZE + i].Blue = (i << (16 - 10)); + } + + pData->base[NV_LUT_VSS_HEADER_SIZE + 1024] = pData->base[NV_LUT_VSS_HEADER_SIZE + 1023]; + pData->output[NV_LUT_VSS_HEADER_SIZE + 1024] = pData->output[NV_LUT_VSS_HEADER_SIZE + 1023]; + } +} + +static void EvoInitWindowMapping3(NVDevEvoPtr pDevEvo, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 win, sd; + + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* Bind each window to a head. On GV100, there is a fixed mapping. */ + for (win = 0; win < pDevEvo->numWindows; win++) { + NvU32 head = pDevEvo->headForWindow[win]; + + nvDmaSetStartEvoMethod(pChannel, NVC37D_WINDOW_SET_CONTROL(win), 1); + if ((head == NV_INVALID_HEAD) || (head >= pDevEvo->numHeads)) { + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _WINDOW_SET_CONTROL, _OWNER, + NVC37D_WINDOW_SET_CONTROL_OWNER_NONE)); + } else { + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C37D, _WINDOW_SET_CONTROL, _OWNER, head)); + } + } + + pModesetUpdateState->windowMappingChanged = FALSE; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + void *pCoreDma = pDevEvo->pSubDevices[sd]->pCoreDma; + /* + * Short timeout (100ms) because we don't expect display to be very + * busy at this point (it should at most be processing methods from + * InitChannel()). + */ + const NvU32 timeout = 100000; + NvU64 startTime = 0; + + if (!((nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)))) { + continue; + } + + /* This core channel must be idle before reading state cache */ + do { + NvBool isIdle = NV_FALSE; + if (!EvoIsChannelIdleC3(pDevEvo, pChannel, sd, &isIdle)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, "EvoIsChannelIdleC3() failed!"); + } + if (isIdle) { + break; + } + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Timed out waiting for core channel idle."); + break; + } + } while (TRUE); + + for (win = 0; win < pDevEvo->numWindows; win++) { + NvU32 data = nvDmaLoadPioMethod(pCoreDma, NVC37D_WINDOW_SET_CONTROL(win)); + + if (DRF_VAL(C37D, + _WINDOW_SET_CONTROL, _OWNER, data) != + pDevEvo->headForWindow[win]) { + + pModesetUpdateState->windowMappingChanged = TRUE; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + nvDisableCoreInterlockUpdateState(pDevEvo, + updateState, + pDevEvo->window[win]); + nvPopEvoSubDevMask(pDevEvo); + } + } + } +} + +static void EvoInitWindowMappingC3(const NVDispEvoRec *pDispEvo, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 win; + + nvPushEvoSubDevMaskDisp(pDispEvo); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + EvoInitWindowMapping3(pDevEvo, + pModesetUpdateState); + + // Set window usage bounds + for (win = 0; win < pDevEvo->numWindows; win++) { + nvDmaSetStartEvoMethod(pChannel, NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS(win), 1); + /* XXXnvdisplay: window scaling */ + nvDmaSetEvoMethodData(pChannel, NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C3); + } + nvPopEvoSubDevMask(pDevEvo); +} + +static void EvoInitWindowMappingC5(const NVDispEvoRec *pDispEvo, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 win; + + nvPushEvoSubDevMaskDisp(pDispEvo); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + EvoInitWindowMapping3(pDevEvo, + pModesetUpdateState); + + // Set window usage bounds + for (win = 0; win < pDevEvo->numWindows; win++) { + NvU32 bounds = NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C5; + + bounds |= + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_2) | + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS(win), 1); + nvDmaSetEvoMethodData(pChannel, bounds); + } + nvPopEvoSubDevMask(pDevEvo); +} + +static NvBool ComputeMinFrameIdle( + const NVHwModeTimingsEvo *pTimings, + NvU16 *pLeadingRasterLines, + NvU16 *pTrailingRasterLines) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + + /* + * leadingRasterLines defines the number of lines between the start of the + * frame (vsync) and the start of the active region. This includes Vsync, + * Vertical Back Porch, and the top part of the overscan border. The + * minimum value is 2 because vsync and VBP must be at least 1 line each. + * + * trailingRasterLines defines the number of lines between the end of the + * active region and the end of the frame. This includes the bottom part + * of the overscan border and the Vertical Front Porch. + */ + const NvU32 activeHeight = (pTimings->rasterBlankStart.y - + pTimings->rasterBlankEnd.y); + /* This is how it's done in dispClassNVD20CoreUpdateErrorChecks_hls.c */ + const NvU32 overscan = (activeHeight / 2) - (pViewPort->out.height / 2); + + /* + * The +1 is justified by this comment in the error check: + * + * If the value is 1, that means there are 2 lines of vblank (lines 0 and + * 1) before active. That is why the uLeadingBorder equation needs +1; + */ + const NvU32 leadingRasterLines = + pTimings->rasterBlankEnd.y + overscan + pViewPort->out.yAdjust + 1; + const NvU32 trailingRasterLines = + pTimings->rasterSize.y - (leadingRasterLines + pViewPort->out.height); + + /* nvdClass_01.mfs says: "The minimum value is 2 because vsync and VBP must + * be at least 1 line each." */ + if (leadingRasterLines < 2) { + return FALSE; + } + + *pLeadingRasterLines = leadingRasterLines; + *pTrailingRasterLines = trailingRasterLines; + + return TRUE; +} + +static void EvoSetRasterParamsC3(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + /* XXXnvdisplay: Convert these for YCbCr, as necessary */ + NvU32 overscanColor = + DRF_NUM(C37D, _HEAD_SET_OVERSCAN_COLOR, _RED_CR, pOverscanColor->red) | + DRF_NUM(C37D, _HEAD_SET_OVERSCAN_COLOR, _GREEN_Y, pOverscanColor->green) | + DRF_NUM(C37D, _HEAD_SET_OVERSCAN_COLOR, _BLUE_CB, pOverscanColor->blue); + NvU32 hdmiStereoCtrl; + NvU16 minFrameIdleLeadingRasterLines, minFrameIdleTrailingRasterLines; + NvBool ret; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // XXX[AGP]: These methods are sequential and could use an incrementing + // method, but it's not clear if there's a bug in EVO that causes corruption + // sometimes. Play it safe and send methods with count=1. + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_OVERSCAN_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, overscanColor); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_RASTER_SIZE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_RASTER_SIZE, _WIDTH, pTimings->rasterSize.x) | + DRF_NUM(C37D, _HEAD_SET_RASTER_SIZE, _HEIGHT, pTimings->rasterSize.y)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_RASTER_SYNC_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_RASTER_SYNC_END, _X, pTimings->rasterSyncEnd.x) | + DRF_NUM(C37D, _HEAD_SET_RASTER_SYNC_END, _Y, pTimings->rasterSyncEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_RASTER_BLANK_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_RASTER_BLANK_END, _X, pTimings->rasterBlankEnd.x) | + DRF_NUM(C37D, _HEAD_SET_RASTER_BLANK_END, _Y, pTimings->rasterBlankEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_RASTER_BLANK_START(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_RASTER_BLANK_START, _X, pTimings->rasterBlankStart.x) | + DRF_NUM(C37D, _HEAD_SET_RASTER_BLANK_START, _Y, pTimings->rasterBlankStart.y)); + + ret = ComputeMinFrameIdle(pTimings, + &minFrameIdleLeadingRasterLines, + &minFrameIdleTrailingRasterLines); + if (!ret) { + /* This should have been ensured by IMP in AssignPerHeadImpParams. */ + nvAssert(ret); + /* In case a mode validation override was used to skip IMP, program the + * default values. This may still cause a hardware exception. */ + minFrameIdleLeadingRasterLines = 2; + minFrameIdleTrailingRasterLines = 1; + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_MIN_FRAME_IDLE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_MIN_FRAME_IDLE, _LEADING_RASTER_LINES, + minFrameIdleLeadingRasterLines) | + DRF_NUM(C37D, _HEAD_SET_MIN_FRAME_IDLE, _TRAILING_RASTER_LINES, + minFrameIdleTrailingRasterLines)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _HERTZ, + pTimings->pixelClock * 1000) | + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _ADJ1000DIV1001,_FALSE)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _NOT_DRIVER, _FALSE) | + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _HOPPING, _DISABLE) | + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _HOPPING_MODE, _VBLANK)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _HERTZ, + pTimings->pixelClock * 1000) | + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _ADJ1000DIV1001,_FALSE)); + + nvDmaSetStartEvoMethod(pChannel, + NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _RED_CR, 0) | +#if defined(DEBUG) + DRF_NUM(C37D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _GREEN_Y, 512) | +#else + DRF_NUM(C37D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _GREEN_Y, 0) | +#endif + DRF_NUM(C37D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _BLUE_CB, 0)); + + hdmiStereoCtrl = DRF_NUM(C37D, _HEAD_SET_HDMI_CTRL, _HDMI_VIC, 0); + if (pTimings->hdmi3D) { + hdmiStereoCtrl = + FLD_SET_DRF(C37D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _STEREO3D, hdmiStereoCtrl); + } else { + hdmiStereoCtrl = + FLD_SET_DRF(C37D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _NORMAL, hdmiStereoCtrl); + } + nvDmaSetStartEvoMethod(pChannel, + NVC37D_HEAD_SET_HDMI_CTRL(head), 1); + nvDmaSetEvoMethodData(pChannel, hdmiStereoCtrl); +} + +static void EvoSetProcAmpC3(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvU32 dynRange; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // These NVT defines match the HEAD_SET_PROCAMP ones. + ct_assert(NVT_COLORIMETRY_RGB == NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB); + ct_assert(NVT_COLORIMETRY_YUV_601 == NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601); + ct_assert(NVT_COLORIMETRY_YUV_709 == NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709); + /* XXXnvdisplay add REC2020 */ + ct_assert(NVT_COLOR_RANGE_FULL == NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE); + ct_assert(NVT_COLOR_RANGE_LIMITED == NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE); + + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + dynRange = DRF_DEF(C37D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _VESA); + } else { + nvAssert(pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED); + dynRange = DRF_DEF(C37D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _CEA); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PROCAMP(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_PROCAMP, _COLOR_SPACE, + pHeadState->procAmp.colorimetry) | + DRF_DEF(C37D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _DISABLE) | + DRF_NUM(C37D, _HEAD_SET_PROCAMP, _SAT_COS, + pHeadState->procAmp.satCos) | + DRF_NUM(C37D, _HEAD_SET_PROCAMP, _SAT_SINE, 0) | + dynRange | + DRF_NUM(C37D, _HEAD_SET_PROCAMP, _RANGE_COMPRESSION, + pHeadState->procAmp.colorRange) | + DRF_DEF(C37D, _HEAD_SET_PROCAMP, _BLACK_LEVEL, _GRAPHICS)); +} + +static const struct NvKmsCscMatrix RGBToFullRangeYCbCrRec709Matrix = {{ + { 0x8000, 0x1f8bbc, 0x1ff444, 0x8000 }, + { 0x366c, 0xb718, 0x127c, 0 }, + { 0x1fe2ac, 0x1f9d54, 0x8000, 0x8000 }, +}}; +static const struct NvKmsCscMatrix RGBToFullRangeYCbCrRec601Matrix = {{ + { 0x8000, 0x1f94d0, 0x1feb30, 0x8000 }, + { 0x4c8c, 0x9644, 0x1d30, 0 }, + { 0x1fd4cc, 0x1fab34, 0x8000, 0x8000 }, +}}; +static const struct NvKmsCscMatrix RGBToLimitedRangeYCbCrRec2020Matrix = {{ + { 0x7000, 0x1f9900, 0x1ff700, 0x8000 }, + { 0x3988, 0x947c, 0xcfc, 0x1000 }, + { 0x1fe0b8, 0x1faf44, 0x7000, 0x8000 }, +}}; +static const struct NvKmsCscMatrix RGBToLimitedRangeYCbCrRec709Matrix = {{ + { 0x7000, 0x1f9a44, 0x1ff5bc, 0x8000 }, + { 0x2e90, 0x9ca4, 0xfd0, 0x1000 }, + { 0x1fe654, 0x1fa9a8, 0x7000, 0x8000 }, +}}; +static const struct NvKmsCscMatrix RGBToLimitedRangeYCbCrRec601Matrix = {{ + { 0x7000, 0x1fa234, 0x1fedc8, 0x8000 }, + { 0x417c, 0x8090, 0x18f8, 0x1000 }, + { 0x1fda34, 0x1fb5cc, 0x7000, 0x8000 }, +}}; +static const struct NvKmsCscMatrix RGBToLimitedRangeRGB = {{ + { 0xdb04, 0, 0, 0x1000 }, + { 0, 0xdb04, 0, 0x1000 }, + { 0, 0, 0xdb04, 0x1000 }, +}}; + +/*! + * Return the appropriate OCSC1 matrix for the requested color range and + * colorimetry, or NULL if the OCSC1 should be disabled. + */ +static const struct NvKmsCscMatrix* EvoGetOCsc1MatrixC5(const NVDispHeadStateEvoRec *pHeadState) +{ + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + switch (pHeadState->procAmp.colorimetry) { + case NVT_COLORIMETRY_BT2020RGB: + // fall through + case NVT_COLORIMETRY_RGB: + // No OCSC1 needed. + return NULL; + case NVT_COLORIMETRY_YUV_601: + return &RGBToFullRangeYCbCrRec601Matrix; + case NVT_COLORIMETRY_YUV_709: + return &RGBToFullRangeYCbCrRec709Matrix; + default: + nvAssert(!"Unexpected colorimetry"); + return NULL; + } + } else { + switch (pHeadState->procAmp.colorimetry) { + case NVT_COLORIMETRY_BT2020RGB: + // fall through + case NVT_COLORIMETRY_RGB: + return &RGBToLimitedRangeRGB; + case NVT_COLORIMETRY_YUV_601: + return &RGBToLimitedRangeYCbCrRec601Matrix; + case NVT_COLORIMETRY_YUV_709: + return &RGBToLimitedRangeYCbCrRec709Matrix; + case NVT_COLORIMETRY_BT2020YCC: + return &RGBToLimitedRangeYCbCrRec2020Matrix; + default: + nvAssert(!"Unexpected colorimetry"); + return NULL; + } + } +} + +struct EvoClampRangeC5 { + NvU32 green, red_blue; +}; + +/*! + * Return the output clamping ranges for the requested color range and + * colorimetry. + */ +static struct EvoClampRangeC5 +EvoGetOCsc1ClampRange(const NVDispHeadStateEvoRec *pHeadState) +{ + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + return (struct EvoClampRangeC5) { + .green = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _LOW, 0x0) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _HIGH, 0xFFF), + .red_blue = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _LOW, 0x0) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _HIGH, 0xFFF), + }; + } else { + switch (pHeadState->procAmp.colorimetry) { + default: + nvAssert(!"Unexpected colorimetry"); + // fall through + case NVT_COLORIMETRY_BT2020RGB: + // fall through + case NVT_COLORIMETRY_RGB: + return (struct EvoClampRangeC5) { + .green = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _LOW, 0x100) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _HIGH, 0xEB0), + .red_blue = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _LOW, 0x100) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _HIGH, 0xEB0), + }; + case NVT_COLORIMETRY_YUV_601: + case NVT_COLORIMETRY_YUV_709: + case NVT_COLORIMETRY_BT2020YCC: + return (struct EvoClampRangeC5) { + .green = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _LOW, 0x100) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _HIGH, 0xEB0), + .red_blue = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _LOW, 0x100) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _HIGH, 0xF00), + }; + } + } +} + + +static void EvoSetOCsc1C5(NVDispEvoPtr pDispEvo, const NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const struct NvKmsCscMatrix *matrix = EvoGetOCsc1MatrixC5(pHeadState); + struct EvoClampRangeC5 clamp = EvoGetOCsc1ClampRange(pHeadState); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_CLAMP_RANGE_GREEN(head), 1); + nvDmaSetEvoMethodData(pChannel, clamp.green); + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_CLAMP_RANGE_RED_BLUE(head), 1); + nvDmaSetEvoMethodData(pChannel, clamp.red_blue); + + if (matrix) { + int x, y; + NvU32 method = NVC57D_HEAD_SET_OCSC1COEFFICIENT_C00(head); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC1CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_OCSC1CONTROL, _ENABLE, _ENABLE)); + + for (y = 0; y < 3; y++) { + for (x = 0; x < 4; x++) { + nvDmaSetStartEvoMethod(pChannel, method, 1); + nvDmaSetEvoMethodData(pChannel, matrix->m[y][x]); + + method += 4; + } + } + } else { + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC1CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_OCSC1CONTROL, _ENABLE, _DISABLE)); + } +} + +/* + * 1.402 1.0 0.0 + * -0.714136 1.0 -0.344136 + * 0.0 1.0 1.772 + */ +static const struct NvKmsMatrix CrYCb601toRGBMatrix = { { + { 0x3fb374bc, 0x3f800000, 0x00000000 }, + { 0xbf36d19e, 0x3f800000, 0xbeb03298 }, + { 0x00000000, 0x3f800000, 0x3fe2d0e5 } +} }; + +/* + * 1.5748 1.0 0.0 + * -0.468124 1.0 -0.187324 + * 0.0 1.0 1.8556 + */ +static const struct NvKmsMatrix CrYCb709toRGBMatrix = { { + { 0x3fc9930c, 0x3f800000, 0x00000000 }, + { 0xbeefadf3, 0x3f800000, 0xbe3fd1dd }, + { 0x00000000, 0x3f800000, 0x3fed844d } +} }; + +/* + * 0.5 -0.418688 -0.081312 + * 0.299 0.587 0.114 + * -0.168736 -0.331264 0.5 + */ +static const struct NvKmsMatrix RGBtoCrYCb601Matrix = { { + { 0x3f000000, 0xbed65e46, 0xbda686e8 }, + { 0x3e991687, 0x3f1645a2, 0x3de978d5 }, + { 0xbe2cc921, 0xbea99b6f, 0x3f000000 } +} }; + +/* + * 0.5 -0.45415 -0.04585 + * 0.21260 0.71520 0.07220 + * -0.11457 -0.38543 0.5 + */ +static const struct NvKmsMatrix RGBtoCrYCb709Matrix = { { + { 0x3f000000, 0xbee88659, 0xbd3bcd36 }, + { 0x3e59b3d0, 0x3f371759, 0x3d93dd98 }, + { 0xbdeaa3ad, 0xbec55715, 0x3f000000 } +} }; + +/* + * Converts FP32 to fixed point S5.14 coefficient format + */ +static inline NvU32 cscCoefConvertS514(float32_t x) +{ + /* more concisely, (NvS32)floor(x * 65536.0 + 2.0) */ + const NvS32 y = f32_to_i32(f32_mulAdd(x, + NvU32viewAsF32(NV_FLOAT_65536), + NvU32viewAsF32(NV_FLOAT_TWO)), + softfloat_round_min, FALSE); + return (NvU32)(0x001ffffc & clamp_S32(y, -0x100000, 0xfffff)); +} + +/* + * Sets up the OCSC0 matrix coefficients, used to perform saturation + * adjustment. + * + * The pipeline operates in FP16 RGB, however this adjustment must be + * performed in CrYCb. Therefore, we multiply the saturation + * adjustment matrix by the appropriate color space conversion + * matrix. The specific color space used depends on the colorimetry of + * the final output. Then we multiply by its inverse to convert back + * to RGB. Finally, we convert the coefficients to S5.14 fixed point + * format. + * + * The OCSC0 matrix will be enabled later in EvoSetLUTContextDmaC5 if + * and only if we also enable the OLUT as required by the + * specification. + */ +static void EvoSetOCsc0C5(NVDispEvoPtr pDispEvo, const NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + const float32_t zeroF32 = NvU32viewAsF32(NV_FLOAT_ZERO); + const float32_t oneF32 = NvU32viewAsF32(NV_FLOAT_ONE); + /* divide satCos by the default setting of 1024 */ + const float32_t satCos = f32_div(i32_to_f32(pHeadState->procAmp.satCos), + NvU32viewAsF32(NV_FLOAT_1024)); + const struct NvKmsMatrixF32 satHueMatrix = { { + { satCos, zeroF32, zeroF32 }, + { zeroF32, oneF32, zeroF32 }, + { zeroF32, zeroF32, satCos } + } }; + struct NvKms3x4MatrixF32 ocsc0Matrix = { { + { oneF32, zeroF32, zeroF32, zeroF32 }, + { zeroF32, oneF32, zeroF32, zeroF32 }, + { zeroF32, zeroF32, oneF32, zeroF32 } + } }; + + struct NvKmsMatrixF32 CrYCbtoRGBMatrix; + struct NvKmsMatrixF32 RGBtoCrYCbMatrix; + switch (pHeadState->procAmp.colorimetry) { + default: + nvAssert(!"Unexpected colorimetry"); + /* fallthrough */ + case NVT_COLORIMETRY_RGB: + /* fallthrough; for RGB output, perform saturation adjustment in YUV709 */ + case NVT_COLORIMETRY_YUV_709: + CrYCbtoRGBMatrix = NvKmsMatrixToNvKmsMatrixF32(CrYCb709toRGBMatrix); + RGBtoCrYCbMatrix = NvKmsMatrixToNvKmsMatrixF32(RGBtoCrYCb709Matrix); + break; + case NVT_COLORIMETRY_YUV_601: + CrYCbtoRGBMatrix = NvKmsMatrixToNvKmsMatrixF32(CrYCb601toRGBMatrix); + RGBtoCrYCbMatrix = NvKmsMatrixToNvKmsMatrixF32(RGBtoCrYCb601Matrix); + break; + } + + ocsc0Matrix = nvMultiply3x4Matrix(&RGBtoCrYCbMatrix, &ocsc0Matrix); + ocsc0Matrix = nvMultiply3x4Matrix(&satHueMatrix, &ocsc0Matrix); + ocsc0Matrix = nvMultiply3x4Matrix(&CrYCbtoRGBMatrix, &ocsc0Matrix); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC0COEFFICIENT_C00(head), 12); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C00, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[0][0]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C01, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[0][1]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C02, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[0][2]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C03, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[0][3]))); + + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C10, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[1][0]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C11, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[1][1]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C12, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[1][2]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C13, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[1][3]))); + + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C20, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[2][0]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C21, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[2][1]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C22, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[2][2]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C23, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[2][3]))); +} + +static void EvoSetProcAmpC5(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvU32 dynRange, chromaLpf, chromaDownV; + NvU32 colorimetry; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pHeadState->procAmp.colorimetry) { + default: + nvAssert(!"Unrecognized colorimetry"); + // fall through + case NVT_COLORIMETRY_BT2020RGB: + // fall through + case NVT_COLORIMETRY_RGB: + colorimetry = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _RGB); + break; + case NVT_COLORIMETRY_YUV_601: + colorimetry = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _YUV_601); + break; + case NVT_COLORIMETRY_YUV_709: + colorimetry = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _YUV_709); + break; + case NVT_COLORIMETRY_BT2020YCC: + colorimetry = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _YUV_2020); + break; + } + + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + dynRange = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _VESA); + } else { + nvAssert(pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED); + dynRange = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _CEA); + } + + /* + * NVC67D_HEAD_SET_PROCAMP_CHROMA_DOWN_V is only defined in NVC67D, but + * it is an unused bit in NVC57D_HEAD_SET_PROCAMP, and YUV420 should only + * be set on >=nvdisplay 4.0, so it's okay to set it here. + */ + if (pHeadState->procAmp.colorFormat == NVT_COLOR_FORMAT_YCbCr420) { + chromaLpf = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _ENABLE); + chromaDownV = DRF_DEF(C67D, _HEAD_SET_PROCAMP, _CHROMA_DOWN_V, _ENABLE); + } else { + chromaLpf = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _DISABLE); + chromaDownV = DRF_DEF(C67D, _HEAD_SET_PROCAMP, _CHROMA_DOWN_V, _DISABLE); + } + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_PROCAMP(head), 1); + nvDmaSetEvoMethodData(pChannel, + colorimetry | dynRange | chromaLpf | chromaDownV); + + EvoSetOCsc0C5(pDispEvo, head); + EvoSetOCsc1C5(pDispEvo, head); +} + +/* + * With nvdisplay, external fliplock pins are controlled via a headless + * SetControl method, unlike previous EVO display implementations which + * specified this information in the per-head HeadSetControl method. This + * function loops over all of the core nvkms HeadControl data structures to + * determine which pins should be enabled in the SetControl method. It should + * be called any time the HeadControl data structures are updated. + */ +static void SetControl(NVDevEvoPtr pDevEvo, int sd) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 data = 0; + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + if (pHC->flipLock && !NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->flipLockPin)) { + NvU32 pin = pHC->flipLockPin - NV_EVO_LOCK_PIN_0; + data = FLD_IDX_SET_DRF(C37D, _SET_CONTROL, _FLIP_LOCK_PIN, + pin, _ENABLE, data); + } + } + + /* + * GV100 HW bug 2062029 WAR + * + * GV100 always holds the external fliplock line low as if + * NVC37D_SET_CONTROL_FLIP_LOCK_PIN was enabled. To work around this, + * the GV100 VBIOS initializes the fliplock GPIOs to be software + * controlled (forced off). The following rmctrl needs to be called to + * switch HW control of the fliplock GPIOs back on whenever external + * fliplock is enabled. + */ + { + NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS params = { }; + + params.base.subdeviceIndex = pEvoSubDev->subDeviceInstance; + params.bEnable = (data != 0); + + if (nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_SET_SWAPRDY_GPIO_WAR, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, "Failed to override fliplock GPIO"); + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SET_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, data); +} + +static void EvoSetHeadControlC3(NVDevEvoPtr pDevEvo, int sd, int head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + /* + * NOTE: This function should only push state to the hardware based on data + * in the pHC. If not, then we may miss updates due to the memcmp of the + * HeadControl structure in UpdateEvoLockState(). + */ + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + NvU32 data = 0, pin; + NvU32 serverLockMode, clientLockMode; + + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pHC->serverLock) { + case NV_EVO_NO_LOCK: + serverLockMode = NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK; + break; + case NV_EVO_FRAME_LOCK: + serverLockMode = NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK; + break; + case NV_EVO_RASTER_LOCK: + serverLockMode = NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK; + break; + default: + nvAssert(!"Invalid server lock mode"); + return; + } + + switch (pHC->clientLock) { + case NV_EVO_NO_LOCK: + clientLockMode = NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK; + break; + case NV_EVO_FRAME_LOCK: + clientLockMode = NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK; + break; + case NV_EVO_RASTER_LOCK: + clientLockMode = NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK; + break; + default: + nvAssert(!"Invalid client lock mode"); + return; + } + + // Convert head control state to EVO method values. + nvAssert(!pHC->interlaced); + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _STRUCTURE, _PROGRESSIVE); + + nvAssert(pHC->serverLockPin != NV_EVO_LOCK_PIN_ERROR); + nvAssert(pHC->clientLockPin != NV_EVO_LOCK_PIN_ERROR); + + if (serverLockMode == NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK) { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, _LOCK_PIN_NONE); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->serverLockPin)) { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + /* + * nvdClass_01.mfs says: + * "master lock pin, if internal, must be set to the corresponding + * internal pin for that head" (error check #12) + */ + nvAssert(pin == head); + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _MASTER_LOCK_MODE, serverLockMode); + + if (clientLockMode == NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK) { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, _LOCK_PIN_NONE); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->clientLockPin)) { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCK_MODE, clientLockMode); + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCKOUT_WINDOW, + pHC->clientLockoutWindow); + + /* + * We always enable stereo lock when it's available and either framelock + * or rasterlock is in use. + */ + if (pHC->stereoLocked) { + if (pHC->serverLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _MASTER_STEREO_LOCK_MODE, + NVC37D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE); + } + if (pHC->clientLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_STEREO_LOCK_MODE, + NVC37D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE); + } + } + + nvAssert(pHC->stereoPin != NV_EVO_LOCK_PIN_ERROR); + if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin)) { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _STEREO_PIN, _LOCK_PIN_NONE); + } else { + pin = pHC->stereoPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _STEREO_PIN, + NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(pin)); + } + + if (pHC->hdmi3D) { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _STEREO3D_STRUCTURE, _FRAME_PACKED); + } else { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _STEREO3D_STRUCTURE, _NORMAL); + } + + /* + * NVC67D_HEAD_SET_CONTROL_YUV420PACKER is only defined in NVC67D, but + * it is an unused bit in NVC37D_HEAD_SET_CONTROL, and YUV420 should only + * be set on >=nvdisplay 4.0, so it's okay to set it here. + */ + if (pHC->hwYuv420) { + data |= DRF_DEF(C67D, _HEAD_SET_CONTROL, _YUV420PACKER, _ENABLE); + } else { + data |= DRF_DEF(C67D, _HEAD_SET_CONTROL, _YUV420PACKER, _DISABLE); + } + + // Send the HeadSetControl method. + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, data); + + SetControl(pDevEvo, sd); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_LOCK_CHAIN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C37D, _HEAD_SET_LOCK_CHAIN, _POSITION, + pHC->lockChainPosition)); +} + +static void EvoSetHeadRefClkC3(NVDevEvoPtr pDevEvo, int head, NvBool external, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 sd; + + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + if (external) { + pDevEvo->gpus[sd].setSwSpareA[head] = + FLD_SET_DRF(C37D, + _HEAD_SET_SW_SPARE_A_CODE, + _VPLL_REF, + _QSYNC, + pDevEvo->gpus[sd].setSwSpareA[head]); + } else { + pDevEvo->gpus[sd].setSwSpareA[head] = + FLD_SET_DRF(C37D, + _HEAD_SET_SW_SPARE_A_CODE, + _VPLL_REF, + _NO_PREF, + pDevEvo->gpus[sd].setSwSpareA[head]); + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_SW_SPARE_A(head), 1); + nvDmaSetEvoMethodData(pChannel, pDevEvo->gpus[sd].setSwSpareA[head]); + nvPopEvoSubDevMask(pDevEvo); + } + } +} + +static void EvoSORSetControlC3(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 hwProtocol = 0; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + nvAssert(orIndex != NV_INVALID_OR); + + if (headMask != 0) { + switch (protocol) { + default: + nvAssert(!"Unknown SOR protocol"); + /* Fall through */ + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B; + break; + case NVKMS_PROTOCOL_SOR_DUAL_TMDS: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS; + break; + case NVKMS_PROTOCOL_SOR_DP_A: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_A; + break; + case NVKMS_PROTOCOL_SOR_DP_B: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_B; + break; + case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM; + break; + case NVKMS_PROTOCOL_SOR_HDMI_FRL: + hwProtocol = NVC67D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL; + break; + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SOR_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _SOR_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_NUM(C37D, _SOR_SET_CONTROL, _PROTOCOL, hwProtocol) | + DRF_DEF(C37D, _SOR_SET_CONTROL, _DE_SYNC_POLARITY, _POSITIVE_TRUE) | + DRF_DEF(C37D, _SOR_SET_CONTROL, _PIXEL_REPLICATE_MODE, _OFF)); +} + +static NvU32 EvoGetPixelDepthC3(const enum nvKmsPixelDepth pixelDepth) +{ + switch (pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444; + case NVKMS_PIXEL_DEPTH_24_444: + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; + case NVKMS_PIXEL_DEPTH_30_444: + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444; + } + nvAssert(!"Unexpected pixel depth"); + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; +} + +static void EvoPIORSetControlC3(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + if (headMask != 0) { + nvAssert(protocol == NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_PIOR_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _PIOR_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_DEF(C37D, _PIOR_SET_CONTROL, _PROTOCOL, _EXT_TMDS_ENC) | + DRF_DEF(C37D, _PIOR_SET_CONTROL, _DE_SYNC_POLARITY, _POSITIVE_TRUE)); +} + +static void EvoDSISetControlC6(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + /* Only Head 0 can be used to drive DSI output on Orin */ + nvAssert((headMask == 0x0) || (headMask == 0x1)); + /* Only one DSI engine exists on Orin */ + nvAssert(orIndex == 0); + + if (headMask != 0) { + nvAssert(protocol == NVKMS_PROTOCOL_DSI); + } + + nvDmaSetStartEvoMethod(pChannel, NVC67D_DSI_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _DSI_SET_CONTROL, _OWNER_MASK, headMask)); +} + +static void EvoORSetControlC3Helper(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + EvoSORSetControlC3(pConnectorEvo, protocol, orIndex, headMask); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + EvoPIORSetControlC3(pConnectorEvo, protocol, orIndex, headMask); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + /* No DAC support on nvdisplay. Fall through. */ + default: + nvAssert(!"Invalid pConnectorEvo->or.type"); + break; + } +} + +static void EvoORSetControlC3(NVDevEvoPtr pDevEvo, + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask, + NVEvoUpdateState *updateState) +{ + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + + EvoORSetControlC3Helper(pConnectorEvo, protocol, orIndex, headMask); +} + +static void EvoORSetControlC6(NVDevEvoPtr pDevEvo, + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask, + NVEvoUpdateState *updateState) +{ + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_DSI: + EvoDSISetControlC6(pConnectorEvo, protocol, orIndex, headMask); + break; + default: + EvoORSetControlC3Helper(pConnectorEvo, protocol, orIndex, headMask); + break; + } +} + +static void EvoHeadSetControlORC3(NVDevEvoPtr pDevEvo, + const int head, + const NVHwModeTimingsEvo *pTimings, + const NvBool colorSpaceOverride, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NvU32 hwPixelDepth = EvoGetPixelDepthC3(pTimings->pixelDepth); + const NvU16 colorSpaceFlag = nvEvo1GetColorSpaceFlag(pDevEvo, + colorSpaceOverride); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _CRC_MODE, _COMPLETE_RASTER) | + (pTimings->hSyncPol ? + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _POSITIVE_TRUE)) | + (pTimings->vSyncPol ? + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _POSITIVE_TRUE)) | + DRF_NUM(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _PIXEL_DEPTH, hwPixelDepth) | + (colorSpaceOverride ? + (DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _ENABLE) | + DRF_NUM(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_FLAG, colorSpaceFlag)) : + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _DISABLE))); +} + +static void EvoHeadSetControlORC5(NVDevEvoPtr pDevEvo, + const int head, + const NVHwModeTimingsEvo *pTimings, + const NvBool colorSpaceOverride, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NvU32 hwPixelDepth = EvoGetPixelDepthC3(pTimings->pixelDepth); + const NvU16 colorSpaceFlag = nvEvo1GetColorSpaceFlag(pDevEvo, + colorSpaceOverride); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _CRC_MODE, _COMPLETE_RASTER) | + (pTimings->hSyncPol ? + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _POSITIVE_TRUE)) | + (pTimings->vSyncPol ? + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _POSITIVE_TRUE)) | + DRF_NUM(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _PIXEL_DEPTH, hwPixelDepth) | + (colorSpaceOverride ? + (DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _ENABLE) | + DRF_NUM(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_FLAG, colorSpaceFlag)) : + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _DISABLE)) | + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _EXT_PACKET_WIN, _NONE)); +} + +static void EvoHeadSetDisplayIdC3(NVDevEvoPtr pDevEvo, + const NvU32 head, const NvU32 displayId, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DISPLAY_ID(head, 0), 1); + nvDmaSetEvoMethodData(pChannel, displayId); +} + +static void SetFormatUsageBoundsOneWindow3(NVDevEvoPtr pDevEvo, NvU32 window, + const NvU64 supportedFormats, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 value = 0; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _RGB_PACKED1BPP, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _RGB_PACKED2BPP, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _RGB_PACKED4BPP, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _RGB_PACKED8BPP, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_PACKED422, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_SEMI_PLANAR420, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_SEMI_PLANAR422, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_SEMI_PLANAR444, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _EXT_YUV_SEMI_PLANAR420, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _EXT_YUV_SEMI_PLANAR422, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _EXT_YUV_SEMI_PLANAR444, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_PLANAR444, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_PLANAR420, _TRUE, value); + } + + if (supportedFormats != 0 && value == 0) { + nvAssert(!"Unknown depth in SetFormatUsageBoundsOneWindow"); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(window), 1); + nvDmaSetEvoMethodData(pChannel, value); +} + +static inline NvU32 GetMaxPixelsFetchedPerLine(NvU16 inWidth, + NvU16 maxHDownscaleFactor) +{ + /* + * Volta should be: + * (((SetViewportSizeIn.Width + 6) * SetMaxInputScaleFactor.Horizontal + 1023 ) >> 10 ) + 6 + * + * Turing should be: + * (((SetViewportSizeIn.Width + 6) * SetMaxInputScaleFactor.Horizontal + 1023 ) >> 10 ) + 8 + * + * Ampere, which adds "overfetch" to have tiled displays / 2-head-1-OR use cases without + * visual artefacts at head boundaries: + * (((SetViewportSizeIn.Width + 14) * SetMaxInputScaleFactor.Horizontal + 1023) >> 10) + 8 + * + * We don't have to be super-precise when programming maxPixelsFetchedPerLine, + * so return realistic worst-case value. + */ + return (((inWidth + 14) * maxHDownscaleFactor + 1023) >> 10) + 8; +} + +static void SetScalingUsageBoundsOneWindow5( + NVDevEvoPtr pDevEvo, NvU32 window, + const struct NvKmsScalingUsageBounds *pScaling, + const NVHwModeViewPortEvo *pViewPort, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 setWindowUsageBounds = NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C5; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, + NVC57D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(window), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _WINDOW_SET_MAX_INPUT_SCALE_FACTOR, _HORIZONTAL, + pScaling->maxHDownscaleFactor) | + DRF_NUM(C57D, _WINDOW_SET_MAX_INPUT_SCALE_FACTOR, _VERTICAL, + pScaling->maxVDownscaleFactor)); + + setWindowUsageBounds |= + (DRF_NUM(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _MAX_PIXELS_FETCHED_PER_LINE, + GetMaxPixelsFetchedPerLine(pViewPort->in.width, + pScaling->maxHDownscaleFactor))) | + (pScaling->vTaps >= NV_EVO_SCALER_5TAPS ? + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_5) : + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_2)) | + (pScaling->vUpscalingAllowed ? + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _TRUE) : + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE)); + nvDmaSetStartEvoMethod(pChannel, + NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS(window), 1); + nvDmaSetEvoMethodData(pChannel, setWindowUsageBounds); +} + +static NvBool EvoSetUsageBounds3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + const struct NvKmsUsageBounds *pCurrentUsage = + &pDevEvo->gpus[sd].headState[head].usage; + /* Return FALSE if a core channel UPDATE isn't actually needed. */ + NvBool needCoreUpdate = FALSE; + NvU32 layer; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + NvU64 currentFormats = 0; + NvU64 targetFormats = 0; + + if (pCurrentUsage->layer[layer].usable) { + currentFormats = + pCurrentUsage->layer[layer].supportedSurfaceMemoryFormats; + } + + if (pUsage->layer[layer].usable) { + targetFormats = pUsage->layer[layer].supportedSurfaceMemoryFormats; + } + + if (targetFormats == currentFormats) { + continue; + } + + SetFormatUsageBoundsOneWindow3(pDevEvo, + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER( + pDevEvo->head[head].layer[layer]->channelMask), + targetFormats, + updateState); + needCoreUpdate = TRUE; + } + + return needCoreUpdate; +} + +static NvBool EvoSetUsageBoundsC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + return EvoSetUsageBounds3(pDevEvo, sd, head, pUsage, updateState); +} + +static NvBool EvoSetUsageBoundsC5(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + const struct NvKmsUsageBounds *pCurrentUsage = + &pDevEvo->gpus[sd].headState[head].usage; + NvBool needCoreUpdate; + NvU32 layer; + + needCoreUpdate = EvoSetUsageBounds3(pDevEvo, sd, head, pUsage, updateState); + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!nvEvoScalingUsageBoundsEqual(&pCurrentUsage->layer[layer].scaling, + &pUsage->layer[layer].scaling)) { + const NVHwModeViewPortEvo *pViewPort = + &pDevEvo->gpus[sd].pDispEvo->headState[head].timings.viewPort; + + SetScalingUsageBoundsOneWindow5( + pDevEvo, + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER( + pDevEvo->head[head].layer[layer]->channelMask), + &pUsage->layer[layer].scaling, + pViewPort, + updateState); + needCoreUpdate = TRUE; + } + } + + return needCoreUpdate; +} + +static void EvoSetNotifierC3(NVDevEvoRec *pDevEvo, + const NvBool notify, + const NvBool awaken, + const NvU32 notifier, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // To work around HW BUG 1945716, set the core channel completion notifier + // context DMA to 0 when notification is not requested. + if (notify) { + NvU32 sd; + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + nvDmaSetStartEvoMethod(pChannel, + NVC37D_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, + _SET_CONTEXT_DMA_NOTIFIER, + _HANDLE, + pDevEvo->core->notifiersDma[sd].ctxHandle)); + nvPopEvoSubDevMask(pDevEvo); + } + } + } else { + nvDmaSetStartEvoMethod(pChannel, + NVC37D_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _SET_CONTEXT_DMA_NOTIFIER, _HANDLE, 0)); + } + + /* + * XXXnvdisplay: Note that nvdClass_01.mfs says: + * "The units of the offset are 16 bytes.", while dispClass_02.mfs says: + * "The units of the offset are 32 bit words." + * The "legacy" 32-bit notifier format is no longer supported. This will + * have to be exposed to upper layers. + */ + ASSERT_DRF_NUM(C37D, _SET_NOTIFIER_CONTROL, _OFFSET, notifier); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _SET_NOTIFIER_CONTROL, _OFFSET, notifier) | + (awaken ? + DRF_DEF(C37D, _SET_NOTIFIER_CONTROL, _MODE, _WRITE_AWAKEN) : + DRF_DEF(C37D, _SET_NOTIFIER_CONTROL, _MODE, _WRITE)) | + (notify ? + DRF_DEF(C37D, _SET_NOTIFIER_CONTROL, _NOTIFY, _ENABLE) : + DRF_DEF(C37D, _SET_NOTIFIER_CONTROL, _NOTIFY, _DISABLE))); +} + +static void UpdateCoreC3(NVEvoChannelPtr pChannel, + NVEvoChannelMask interlockChannelMask, + NvU32 flipLockPin, + NvBool releaseElv) +{ + NvU32 head, interlockFlags = 0; + NvU32 window, windowInterlockFlags = 0; + NvU32 update = DRF_NUM(C37D, _UPDATE, _FLIP_LOCK_PIN, flipLockPin); + + update |= releaseElv ? DRF_DEF(C37D, _UPDATE, _RELEASE_ELV, _TRUE) : 0; + + for (head = 0; head < NV_EVO_CHANNEL_MASK_CURSOR__SIZE; head++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _CURSOR, head, _ENABLE, + interlockChannelMask)) { + interlockFlags |= + DRF_IDX_DEF(C37D, _SET_INTERLOCK_FLAGS, + _INTERLOCK_WITH_CURSOR, head, _ENABLE); + } + } + + for (window = 0; window < NV_EVO_CHANNEL_MASK_WINDOW__SIZE; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + interlockChannelMask)) { + windowInterlockFlags |= + DRF_IDX_DEF(C37D, _SET_WINDOW_INTERLOCK_FLAGS, + _INTERLOCK_WITH_WINDOW, window, _ENABLE); + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SET_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, interlockFlags); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SET_WINDOW_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, windowInterlockFlags); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, update); + + nvDmaKickoffEvo(pChannel); +} + +static void UpdateWindowIMM(NVEvoChannelPtr pChannel, + NVEvoChannelMask winImmChannelMask, + NVEvoChannelMask winImmInterlockMask, + NvBool releaseElv) +{ + nvAssert((winImmChannelMask & ~NV_EVO_CHANNEL_MASK_WINDOW_ALL) == 0); + nvAssert((winImmInterlockMask & ~NV_EVO_CHANNEL_MASK_WINDOW_ALL) == 0); + + if ((winImmChannelMask & pChannel->channelMask) != 0) { + NvU32 updateImm = 0; + + if ((winImmInterlockMask & pChannel->channelMask) != 0) { + updateImm |= DRF_DEF(C37B, _UPDATE, _INTERLOCK_WITH_WINDOW, _ENABLE); + } else { + updateImm |= DRF_DEF(C37B, _UPDATE, _INTERLOCK_WITH_WINDOW, _DISABLE); + } + updateImm |= releaseElv ? DRF_DEF(C37B, _UPDATE, _RELEASE_ELV, _TRUE) : 0; + + nvDmaSetStartEvoMethod(pChannel->imm.u.dma, NVC37B_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel->imm.u.dma, updateImm); + nvDmaKickoffEvo(pChannel->imm.u.dma); + } +} + +static void UpdateWindowC3(NVEvoChannelPtr pChannel, + NVEvoChannelMask interlockChannelMask, + NVEvoChannelMask winImmChannelMask, + NVEvoChannelMask winImmInterlockMask, + NvBool transitionWAR, + NvU32 flipLockPin, + NvBool releaseElv) +{ + NvU32 head, interlockFlags = 0; + NvU32 window, windowInterlockFlags = 0; + NvU32 update = DRF_NUM(C37E, _UPDATE, _FLIP_LOCK_PIN, flipLockPin); + + update |= releaseElv ? DRF_DEF(C37E, _UPDATE, _RELEASE_ELV, _TRUE) : 0; + + if ((winImmInterlockMask & pChannel->channelMask) != 0) { + /* + * We expect winImmChannelMask to always be a superset of + * winImmInterlockMask. We should never interlock with a window + * immediate channel if we're not also going to kick off that + * window immediate channel. + */ + nvAssert((winImmChannelMask & pChannel->channelMask) != 0); + + update |= DRF_DEF(C37E, _UPDATE, _INTERLOCK_WITH_WIN_IMM, _ENABLE); + } else { + update |= DRF_DEF(C37E, _UPDATE, _INTERLOCK_WITH_WIN_IMM, _DISABLE); + } + + // Nothing currently requires updating a window channel without releasing + // ELV. + nvAssert(releaseElv); + + if (FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + interlockChannelMask)) { + interlockFlags |= + DRF_DEF(C37E, _SET_INTERLOCK_FLAGS, _INTERLOCK_WITH_CORE, _ENABLE); + } + + for (head = 0; head < NV_EVO_CHANNEL_MASK_CURSOR__SIZE; head++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _CURSOR, head, _ENABLE, + interlockChannelMask)) { + interlockFlags |= + DRF_IDX_DEF(C37E, _SET_INTERLOCK_FLAGS, + _INTERLOCK_WITH_CURSOR, head, _ENABLE); + } + } + + for (window = 0; window < NV_EVO_CHANNEL_MASK_WINDOW__SIZE; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + interlockChannelMask)) { + windowInterlockFlags |= + DRF_IDX_DEF(C37E, _SET_WINDOW_INTERLOCK_FLAGS, + _INTERLOCK_WITH_WINDOW, window, _ENABLE); + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, interlockFlags); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_WINDOW_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, windowInterlockFlags); + + /* + * If we determined that this update will transition from NULL to non-NULL + * ctxdma or vice-versa, bookend this update method with software methods + * to notify RM to apply a workaround for hardware bug 2193096. + */ + if (transitionWAR) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SW_SET_MCLK_SWITCH, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _SW_SET_MCLK_SWITCH, _ENABLE, _FALSE)); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37E_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, update); + + if (transitionWAR) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SW_SET_MCLK_SWITCH, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _SW_SET_MCLK_SWITCH, _ENABLE, _TRUE)); + } + + UpdateWindowIMM(pChannel, winImmChannelMask, + winImmInterlockMask, releaseElv); + + nvDmaKickoffEvo(pChannel); +} + +/*! + * This function finds any fliplocked channels in the current update and pushes + * flips for them setting the appropriate fliplock pin and interlock masks. + * + * All of this complexity is here to support the case where multiple heads on a + * single GPU are fliplocked together, but flip requests come in for only a + * subset of those heads at a time (e.g., separate X screens on a single GPU). + * Unlike previous hardware, we're required to interlock all channels which are + * part of a fliplock update, instead of just using fliplock across heads. + */ +/* + * There are two scenarios: + * a) All fliplocked channels on this GPU are already part of this update. In + * that case we just need to set the appropriate fliplock pin for each, and + * we're done -- they're already interlocked. + * b) Some fliplocked channels are not part of this update. We still need to + * set them in the interlock mask, but it's dangerous to interlock with any + * channels *not* in the fliplock group; as an example: + * With two separate X screens on a single GPU, each driving one monitor, + * fliplocked together, if we get a flip request for screen 0/head 0 that + * interlocks core and base, then a second flip request for screen 1/head1 + * that interlocks core and base, we would end up programming one flip on + * the window on head 0, one flip on the window on head 1, and two flips in + * the core channel. The second core channel flip would never complete + * since it would be waiting for an interlock with the other window + * channels. + * + * To handle this case we pull the fliplocked channels out of this update + * and update them interlocked with all fliplocked channels (including those + * that aren't in this update), then proceed with a normal interlocked + * update excluding the fliplocked channels. + * + * \return Channel mask of channels which were handled by this function. + * Channels in this mask should be considered done and have no + * further updates pushed. No other channels should be + * interlocked with them. + */ +static NVEvoChannelMask ProcessFlipLockUpdates( + NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 *pFlipLockPin, + const NVEvoUpdateState *updateState) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 head, window; + /* Channels that are part of this update which need to be fliplocked. */ + NVEvoChannelMask flipLockUpdateMask = 0; + /* All channels on this subdevice which are fliplocked. */ + NVEvoChannelMask flipLockAllMask = 0; + /* Channels which this function has handled and do not need further + * processing. */ + NVEvoChannelMask handledMask = 0; + NVEvoLockPin pin = NV_EVO_LOCK_PIN_ERROR; + NvU32 hwPin = NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE; + + /* First check if any of the fliplock-qualifying channels are actually + * fliplocked, and determine which pin they're using. */ + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + + if (pHC->flipLock) { + /* Convert the head index to a window index (two windows per head, + * one "base" and one "overlay"; we only fliplock "base") */ + NVEvoChannelMask windowMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, head * 2, _ENABLE); + if (updateState->subdev[sd].flipLockQualifyingMask & windowMask) { + if (flipLockUpdateMask == 0) { + pin = pHC->flipLockPin; + } else { + /* For now, we only support kicking off a single fliplock + * group as part of a single update call. */ + nvAssert(pin == pHC->flipLockPin); + } + flipLockUpdateMask |= windowMask; + } + } + } + + /* If we don't have any fliplocked updates, then we're done. */ + if (flipLockUpdateMask == 0) { + goto done; + } + + /* + * Gather all of the channels on this GPU which are part of this fliplock + * group (some of which may not be part of this update). + */ + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + + if (pHC->flipLock && (pHC->flipLockPin == pin)) { + NVEvoChannelMask windowMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, head * 2, _ENABLE); + flipLockAllMask |= windowMask; + } + } + + /* Convert the pin to a hardware enum. */ + if (NV_EVO_LOCK_PIN_IS_INTERNAL(pin)) { + hwPin = NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 + + (pin - NV_EVO_LOCK_PIN_INTERNAL_0); + } else { + hwPin = NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(pin - NV_EVO_LOCK_PIN_0); + } + + /* If we're updating all of the fliplocked channels in this update, we can + * interlock with other channels as normal. */ + if (flipLockUpdateMask == flipLockAllMask) { + goto done; + } + + /* + * Kick off each of our update channels, using the full fliplock mask and + * hwPin calculated above. + */ + nvAssert((flipLockUpdateMask & ~NV_EVO_CHANNEL_MASK_WINDOW_ALL) == 0); + for (window = 0; window < pDevEvo->numWindows; window++) { + const NVEvoChannelMask windowMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE); + NVEvoChannelMask winImmChannelMask = + updateState->subdev[sd].winImmChannelMask; + NVEvoChannelMask winImmInterlockMask = + updateState->subdev[sd].winImmInterlockMask; + if (flipLockUpdateMask & windowMask) { + const NvBool transitionWAR = + (updateState->subdev[sd].flipTransitionWAR & windowMask) != 0; + UpdateWindowC3(pDevEvo->window[window], + flipLockAllMask, + winImmChannelMask, + winImmInterlockMask, + transitionWAR, + hwPin, TRUE /* releaseElv */); + } else { + UpdateWindowIMM(pDevEvo->window[window], winImmChannelMask, + winImmInterlockMask, TRUE /* releaseElv */); + } + } + handledMask = flipLockUpdateMask; + hwPin = NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE; + +done: + *pFlipLockPin = hwPin; + return handledMask; +} + +static void EvoUpdateC3(NVDevEvoPtr pDevEvo, + const NVEvoUpdateState *updateState, + NvBool releaseElv) +{ + NvU32 sd, window; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoChannelMask updateChannelMask = + updateState->subdev[sd].channelMask; + const NVEvoChannelMask noCoreInterlockMask = + updateState->subdev[sd].noCoreInterlockMask; + NVEvoChannelMask coreInterlockMask = + updateChannelMask & ~noCoreInterlockMask; + const NvU32 subDeviceMask = (1 << sd); + NvU32 flipLockPin = NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE; + + nvPushEvoSubDevMask(pDevEvo, subDeviceMask); + + if (updateState->subdev[sd].flipLockQualifyingMask) { + NVEvoChannelMask handledChannels = 0; + + nvAssert((updateState->subdev[sd].flipLockQualifyingMask & + ~updateChannelMask) == 0); + nvAssert((updateState->subdev[sd].flipLockQualifyingMask & + updateState->subdev[sd].noCoreInterlockMask) == 0); + + handledChannels = + ProcessFlipLockUpdates(pDevEvo, sd, &flipLockPin, updateState); + + updateChannelMask &= ~handledChannels; + coreInterlockMask &= ~handledChannels; + } + + if (FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + updateChannelMask)) { + const NVEvoChannelMask thisInterlockMask = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + coreInterlockMask) ? coreInterlockMask : 0; + UpdateCoreC3(pDevEvo->core, thisInterlockMask, flipLockPin, + releaseElv); + } + + for (window = 0; window < pDevEvo->numWindows; window++) { + const NVEvoChannelMask windowMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE); + NVEvoChannelMask winImmChannelMask = + updateState->subdev[sd].winImmChannelMask; + NVEvoChannelMask winImmInterlockMask = + updateState->subdev[sd].winImmInterlockMask; + if (updateChannelMask & windowMask) { + const NvBool transitionWAR = + (updateState->subdev[sd].flipTransitionWAR & windowMask) != 0; + NVEvoChannelMask thisInterlockMask = + FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + coreInterlockMask) ? coreInterlockMask : 0; + UpdateWindowC3(pDevEvo->window[window], + thisInterlockMask, + winImmChannelMask, + winImmInterlockMask, + transitionWAR, + flipLockPin, + releaseElv); + } else { + UpdateWindowIMM(pDevEvo->window[window], winImmChannelMask, + winImmInterlockMask, releaseElv); + } + } + + nvPopEvoSubDevMask(pDevEvo); + } +} + +/*! + * Initialize head-specific IMP param fields. + * + * Initialize the NVC372_CTRL_IMP_HEAD for the specific head. + * + * \param[out] pImpHead The param structure to initialize. + * \param[in] pTimings The rastering timings and viewport configuration. + * \param[in] head The number of the head that will be driven. + * + * \return FALSE iff the parameters aren't even legal for HW. + */ +static NvBool AssignPerHeadImpParams(NVC372_CTRL_IMP_HEAD *pImpHead, + const NVHwModeTimingsEvo *pTimings, + const int head, + const NVEvoScalerCaps *pScalerCaps) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + struct NvKmsScalingUsageBounds scalingUsageBounds = { }; + + pImpHead->headIndex = head; + + /* raster timings */ + + pImpHead->maxPixelClkKHz = pTimings->pixelClock; + + pImpHead->rasterSize.width = pTimings->rasterSize.x; + pImpHead->rasterSize.height = pTimings->rasterSize.y; + pImpHead->rasterBlankStart.X = pTimings->rasterBlankStart.x; + pImpHead->rasterBlankStart.Y = pTimings->rasterBlankStart.y; + pImpHead->rasterBlankEnd.X = pTimings->rasterBlankEnd.x; + pImpHead->rasterBlankEnd.Y = pTimings->rasterBlankEnd.y; + pImpHead->rasterVertBlank2.yStart = pTimings->rasterVertBlank2Start; + pImpHead->rasterVertBlank2.yEnd = pTimings->rasterVertBlank2End; + + /* XXX TODO: Fill in correct scanlock information (only needed for + * MIN_VPSTATE). */ + pImpHead->control.masterLockMode = NV_DISP_LOCK_MODE_NO_LOCK; + pImpHead->control.masterLockPin = NV_DISP_LOCK_PIN_UNSPECIFIED; + pImpHead->control.slaveLockMode = NV_DISP_LOCK_MODE_NO_LOCK; + pImpHead->control.slaveLockPin = NV_DISP_LOCK_PIN_UNSPECIFIED; + + if (!nvComputeScalingUsageBounds(pScalerCaps, + pViewPort->in.width, pViewPort->in.height, + pViewPort->out.width, pViewPort->out.height, + pViewPort->hTaps, pViewPort->vTaps, + &scalingUsageBounds)) { + return FALSE; + } + pImpHead->bUpscalingAllowedV = scalingUsageBounds.vUpscalingAllowed; + pImpHead->maxDownscaleFactorV = scalingUsageBounds.maxVDownscaleFactor; + pImpHead->maxDownscaleFactorH = scalingUsageBounds.maxHDownscaleFactor; + pImpHead->outputScalerVerticalTaps = + NVEvoScalerTapsToNum(scalingUsageBounds.vTaps); + + if (!ComputeMinFrameIdle(pTimings, + &pImpHead->minFrameIdle.leadingRasterLines, + &pImpHead->minFrameIdle.trailingRasterLines)) { + return FALSE; + } + + /* Assume we'll need the full 1025-entry output LUT. */ + pImpHead->lut = NVC372_CTRL_IMP_LUT_USAGE_1025; + + /* Cursor width, in units of 32 pixels. Assume we use the maximum size. */ + pImpHead->cursorSize32p = 256 / 32; + + pImpHead->bEnableDsc = pTimings->hdmiFrlConfig.dscInfo.bEnableDSC || + pTimings->dpDsc.enable; + + return TRUE; +} + +/*! + * Initialize window-specific IMP param fields. + * + * Initialize the NVC372_CTRL_IMP_WINDOW for the specific window. + * + * \param[out] pImpWindow The param structure to initialize. + * \param[in] pViewPort The viewport configuration for the head that + * the window is bound to. + * \param[in] supportedFormats The surface memory formats that can be + * supported on this window. + * \param[in] window The number of the window. + * \param[in] head The number of the head that the window is + * bound to. + */ +static void AssignPerWindowImpParams(NVC372_CTRL_IMP_WINDOW *pImpWindow, + const NVHwModeViewPortEvo *pViewPort, + const NvU64 supportedFormats, + const struct NvKmsScalingUsageBounds *pScaling, + const int window, + const int head) +{ + pImpWindow->windowIndex = window; + pImpWindow->owningHead = head; + + pImpWindow->formatUsageBound = 0; + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_RGB_PACKED_1_BPP; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_RGB_PACKED_2_BPP; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_RGB_PACKED_4_BPP; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_RGB_PACKED_8_BPP; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_YUV_PACKED_422; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_420; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_422; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_444; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_420; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_422; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_444; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_YUV_PLANAR_444; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_YUV_PLANAR_420; + } + + if (pImpWindow->formatUsageBound == 0) { + nvAssert(!"Unknown format in AssignPerWindowImpParams"); + } + + pImpWindow->maxPixelsFetchedPerLine = + GetMaxPixelsFetchedPerLine(pViewPort->in.width, + pScaling->maxHDownscaleFactor); + + pImpWindow->maxDownscaleFactorH = pScaling->maxHDownscaleFactor; + pImpWindow->maxDownscaleFactorV = pScaling->maxVDownscaleFactor; + pImpWindow->bUpscalingAllowedV = pScaling->vUpscalingAllowed; + pImpWindow->inputScalerVerticalTaps = + NVEvoScalerTapsToNum(pScaling->vTaps); + + /* Assume we need a full 1025-entry window (input) LUT and no tone-mapping + * output (TMO) LUT. */ + pImpWindow->lut = NVC372_CTRL_IMP_LUT_USAGE_1025; + pImpWindow->tmoLut = NVC372_CTRL_IMP_LUT_USAGE_NONE; +} + +static void +EvoIsModePossibleC3(NVDispEvoPtr pDispEvo, + const NVEvoIsModePossibleDispInput *pInput, + NVEvoIsModePossibleDispOutput *pOutput) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVEvoCapabilitiesPtr pEvoCaps = &pDevEvo->gpus[0].capabilities; + NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *pImp = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_IMP_PARAMS, sizeof(*pImp)); + NvBool result = FALSE; + NvU32 head; + NvU32 ret; + + nvkms_memset(pImp, 0, sizeof(*pImp)); + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVHwModeTimingsEvo *pTimings = pInput->head[head].pTimings; + const struct NvKmsUsageBounds *pUsage = pInput->head[head].pUsage; + const NVHwModeViewPortEvo *pViewPort; + NvU8 impHeadIndex; + NvU32 layer; + + if (pTimings == NULL) { + continue; + } + + pViewPort = &pTimings->viewPort; + + impHeadIndex = pImp->numHeads; + pImp->numHeads++; + nvAssert(impHeadIndex < NVC372_CTRL_MAX_POSSIBLE_HEADS); + + if (!AssignPerHeadImpParams(&pImp->head[impHeadIndex], + pTimings, + head, + &pEvoCaps->head[head].scalerCaps)) { + goto done; + } + + /* XXXnvdisplay: This assumes a fixed window<->head mapping */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!pUsage->layer[layer].usable) { + continue; + } + + nvAssert(pImp->numWindows < NVC372_CTRL_MAX_POSSIBLE_WINDOWS); + + AssignPerWindowImpParams( + &pImp->window[pImp->numWindows], + pViewPort, + pUsage->layer[layer].supportedSurfaceMemoryFormats, + &pUsage->layer[layer].scaling, + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER( + pDevEvo->head[head].layer[layer]->channelMask), + head); + + pImp->numWindows++; + } + } + + pImp->base.subdeviceIndex = pDispEvo->displayOwner; + + /* XXXnvdisplay: Set bUseCachedPerfState? */ + + /* + * Set NEED_MIN_VPSTATE if reallocBandwidth != NONE. RM-IMP will only + * output the min required display bandwidth values if NEED_MIN_VPSTATE + * is set. + */ + if (pInput->requireBootClocks || + (pInput->reallocBandwidth != NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE)) { + // XXX TODO: IMP requires lock pin information if pstate information is + // requested. For now, just assume no locking. + pImp->options = NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->rmCtrlHandle, + NVC372_CTRL_CMD_IS_MODE_POSSIBLE, + pImp, sizeof(*pImp)); + + // XXXnvdisplay TODO: check pImp->minImpVPState if + // pInput->requireBootClocks is true? + if (ret != NV_OK || !pImp->bIsPossible) { + goto done; + } + + result = TRUE; + +done: + pOutput->possible = result; + if (pOutput->possible) { + pOutput->minRequiredBandwidthKBPS = pImp->minRequiredBandwidthKBPS; + pOutput->floorBandwidthKBPS = pImp->floorBandwidthKBPS; + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_IMP_PARAMS); +} + +static void EvoPrePostIMPC3(NVDispEvoPtr pDispEvo, NvBool isPre) +{ + /* Nothing to do on nvdisplay -- pre/post IMP calls are not required. */ +} + +static void +EvoFlipC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + +static NvBool IsCscMatrixIdentity(const struct NvKmsCscMatrix *matrix) +{ + const struct NvKmsCscMatrix identity = NVKMS_IDENTITY_CSC_MATRIX; + + int y; + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 4; x++) { + if (matrix->m[y][x] != identity.m[y][x]) { + return FALSE; + } + } + } + + return TRUE; +} + +/* + * Returns TRUE iff the CSC should be enabled (i.e., the matrix is not the + * identity matrix). + */ +static NvBool SetCscMatrixC3(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix) +{ + NvU32 method = NVC37E_SET_CSC_RED2RED; + int y; + + if (IsCscMatrixIdentity(matrix)) { + return FALSE; + } + + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 4; x++) { + // Use DRF_NUM to truncate client-supplied values that are out of + // range. + NvU32 val = DRF_NUM(C37E, _SET_CSC_RED2RED, _COEFF, + matrix->m[y][x]); + + nvDmaSetStartEvoMethod(pChannel, method, 1); + nvDmaSetEvoMethodData(pChannel, val); + + method += 4; + } + } + + return TRUE; +} + +static void SetCscMatrixC5Wrapper(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix, + NvU32 coeffMethod, NvU32 controlMethod, + NvU32 enableMethodData, + NvU32 disableMethodData) +{ + int y; + + if (IsCscMatrixIdentity(matrix)) { + nvDmaSetStartEvoMethod(pChannel, controlMethod, 1); + nvDmaSetEvoMethodData(pChannel, disableMethodData); + return; + } + + nvDmaSetStartEvoMethod(pChannel, controlMethod, 1); + nvDmaSetEvoMethodData(pChannel, enableMethodData); + + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 4; x++) { + // Use DRF_NUM to truncate client-supplied values that are out of + // range. + // + // Note that it doesn't matter whether we use the CSC00 or CSC11 + // methods to truncate since they're identical. + NvU32 val = DRF_NUM(C57E, _SET_CSC00COEFFICIENT_C00, _VALUE, + matrix->m[y][x]); + + nvDmaSetStartEvoMethod(pChannel, coeffMethod, 1); + nvDmaSetEvoMethodData(pChannel, val); + + coeffMethod += 4; + } + } +} + +static void SetCsc00MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix) +{ + SetCscMatrixC5Wrapper(pChannel, + matrix, + NVC57E_SET_CSC00COEFFICIENT_C00, NVC57E_SET_CSC00CONTROL, + DRF_DEF(C57E, _SET_CSC00CONTROL, _ENABLE, _ENABLE), + DRF_DEF(C57E, _SET_CSC00CONTROL, _ENABLE, _DISABLE)); +} + +static void SetCsc11MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix) +{ + SetCscMatrixC5Wrapper(pChannel, + matrix, + NVC57E_SET_CSC11COEFFICIENT_C00, NVC57E_SET_CSC11CONTROL, + DRF_DEF(C57E, _SET_CSC11CONTROL, _ENABLE, _ENABLE), + DRF_DEF(C57E, _SET_CSC11CONTROL, _ENABLE, _DISABLE)); +} + +/* + * WAR for GV100 HW bug 1978592: + * + * Timestamped flips allow SW to specify the earliest time that the next UPDATE + * will complete. Due to a HW bug, GV100 waits for the timestamp in the ARMED + * state (i.e. the timestamps that were pushed in the previous UPDATE) instead + * of the timestamp in the ASSEMBLY state (the time we want to postpone this + * flip until). + * + * This WAR inserts an additional UPDATE to push the timestamp from ASSEMBLY to + * ARMED while changing no other state, so the following normal UPDATE can + * wait for the correct timestamp. + * + * This update needs to have the following characteristics: + * + * - MIN_PRESENT_INTERVAL 0 + * - TIMESTAMP_MODE _ENABLE + * - All other SET_PRESENT_CONTROL fields unmodified from previous UPDATE + * - SET_UPDATE_TIMESTAMP (target timestamp) + * - RELEASE_ELV _FALSE + * - Non-interlocked + * - Non-fliplocked + */ +static void +InsertAdditionalTimestampFlip(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState) +{ + NvU32 presentControl = pChannel->oldPresentControl; + + /* This hardware bug is only present on GV100 which uses window + * class C37E. */ + nvAssert(pChannel->hwclass == NVC37E_WINDOW_CHANNEL_DMA); + + nvAssert(pHwState->timeStamp != 0); + + /* + * Update the necessary fields in SET_PRESENT_CONTROL without modifying + * the existing values by using the cached SET_PRESENT_CONTROL values + * from the previous update. + * + * Note that BEGIN_MODE must not be changed here; even though BEGIN_MODE + * may currently be NON_TEARING, a NON_TEARING + MIN_PRESENT_INTERVAL 0 + * flip will be correctly collapsed with the surrounding + * MIN_PRESENT_INTERVAL 1 flips. If we were to change BEGIN_MODE to + * IMMEDIATE, this would cause an additional delay due to the transition + * from NON_TEARING to IMMEDIATE. + */ + presentControl = FLD_SET_DRF_NUM(C37E, _SET_PRESENT_CONTROL, + _MIN_PRESENT_INTERVAL, + 0, presentControl); + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, + _TIMESTAMP_MODE, + _ENABLE, presentControl); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, presentControl); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_UPDATE_TIMESTAMP_LO, 2); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp)); + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp)); + + // Issue non-interlocked, non-fliplocked, non-ReleaseElv UPDATE + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, + NVC37E_SET_WINDOW_INTERLOCK_FLAGS, + 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37E, _UPDATE, _RELEASE_ELV, _FALSE) | + DRF_NUM(C37E, _UPDATE, _FLIP_LOCK_PIN, + NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE) | + DRF_DEF(C37E, _UPDATE, _INTERLOCK_WITH_WIN_IMM, + _DISABLE)); +} + +static void +EvoProgramSemaphore3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState) +{ + nvAssertSameSemaphoreSurface(pHwState); + + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo == NULL) { + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->syncObject.u.semaphores.acquireSurface; + + nvAssert(pNIso->format == NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY); + /* XXX nvdisplay: enforce this at a higher level */ + nvAssert((pNIso->offsetInWords % 4) == 0); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].ctxDma); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.acquireValue); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.releaseValue); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C37E, _SET_SEMAPHORE_CONTROL, _OFFSET, + pNIso->offsetInWords / 4)); + } +} + +/*! + * On Tegra, syncpts are used for synchronization between SW and HW, + * and also across HW engines. Since NvDisplay 4.0 only natively + * understands semaphores, there's a SHIM layer in the memory subsystem + * that will convert semaphore acquires/releases into corresponding + * syncpoint reads/writes. As such, each syncpoint is mapped to an + * underlying 'dummy' semaphore surface, and the methods for these surfaces + * need to be programmed as if they were real memory-backed semaphores. + */ + +static void +EvoProgramSemaphore6(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState) +{ + NvU32 hCtxDma, offset, acqMode, relMode, value; + const NVFlipNIsoSurfaceEvoHwState *pNIso; + + /*! Program Acq-only semaphore */ + hCtxDma = offset = acqMode = relMode = value = 0; + if (pHwState->syncObject.usingSyncpt) { + hCtxDma = pHwState->syncObject.u.syncpts.preCtxDma; + offset = 0; + acqMode = DRF_DEF(C67E, _SET_ACQ_SEMAPHORE_CONTROL, _ACQ_MODE, _CGEQ); + value = pHwState->syncObject.u.syncpts.preValue; + } else { + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo != NULL) { + pNIso = &pHwState->syncObject.u.semaphores.acquireSurface; + hCtxDma = pNIso->pSurfaceEvo->planes[0].ctxDma; + offset = pNIso->offsetInWords / 4; + acqMode = DRF_DEF(C67E, _SET_ACQ_SEMAPHORE_CONTROL, _ACQ_MODE, _EQ); + value = pHwState->syncObject.u.semaphores.acquireValue; + } + } + + /*! set ctx dma handle */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_CONTEXT_DMA_ACQ_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67E, _SET_CONTEXT_DMA_ACQ, _SEMAPHORE_HANDLE, hCtxDma)); + /*! set semaphore control and acq mode */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_ACQ_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, offset | acqMode); + /*! set semaphore value */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_ACQ_SEMAPHORE_VALUE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67E, _SET_ACQ_SEMAPHORE_VALUE, _VALUE, value)); + + /*! Program Rel-only semaphore */ + hCtxDma = offset = acqMode = relMode = value = 0; + if (pHwState->syncObject.usingSyncpt) { + hCtxDma = pHwState->syncObject.u.syncpts.postCtxDma; + offset = 0; + acqMode = DRF_DEF(C67E, _SET_SEMAPHORE_CONTROL, _SKIP_ACQ, _TRUE); + relMode = DRF_DEF(C67E, _SET_SEMAPHORE_CONTROL, _REL_MODE, _WRITE); + value = pHwState->syncObject.u.syncpts.postValue; + /*! increase local max val as well */ + if (hCtxDma != 0) { + pChannel->postSyncpt.syncptMaxVal++; + } + } else { + if (pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo != NULL) { + pNIso = &pHwState->syncObject.u.semaphores.releaseSurface; + hCtxDma = pNIso->pSurfaceEvo->planes[0].ctxDma; + offset = pNIso->offsetInWords / 4; + acqMode = DRF_DEF(C67E, _SET_SEMAPHORE_CONTROL, _SKIP_ACQ, _TRUE); + relMode = DRF_DEF(C67E, _SET_SEMAPHORE_CONTROL, _REL_MODE, _WRITE); + value = pHwState->syncObject.u.semaphores.releaseValue; + } + } + + /*! set ctx dma handle */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67E, _SET_CONTEXT_DMA_SEMAPHORE, _HANDLE, hCtxDma)); + /*! set semaphore control and acq-rel mode */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, offset | acqMode | relMode); + /*! set semaphore value */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67E, _SET_SEMAPHORE_RELEASE, _VALUE, value)); +} + +static NvBool +EvoFlipC3Common(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo; + NvU32 presentControl, eye; + NvU32 storage; + NvU8 planeIndex; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* program notifier */ + + if (pHwState->completionNotifier.surface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->completionNotifier.surface; + NvU32 value = 0; + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].ctxDma); + + nvAssert(pNIso->format == NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY); + /* XXX nvdisplay: enforce this at a higher level */ + nvAssert((pNIso->offsetInWords % 4) == 0); + + value = FLD_SET_DRF_NUM(C37E, _SET_NOTIFIER_CONTROL, _OFFSET, + pNIso->offsetInWords / 4, value); + + if (pHwState->completionNotifier.awaken) { + value = FLD_SET_DRF(C37E, _SET_NOTIFIER_CONTROL, _MODE, + _WRITE_AWAKEN, value); + } else { + value = FLD_SET_DRF(C37E, _SET_NOTIFIER_CONTROL, _MODE, + _WRITE, value); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + if (!pHwState->pSurfaceEvo[NVKMS_LEFT]) { + // Disable this window, and set all its ctxdma entries to NULL. + for (eye = 0; eye < NVKMS_MAX_EYES; eye++) { + for (planeIndex = 0; + planeIndex < NVKMS_MAX_PLANES_PER_SURFACE; + planeIndex++) { + const NvU8 ctxDmaIdx = EyeAndPlaneToCtxDmaIdx(eye, planeIndex); + nvDmaSetStartEvoMethod(pChannel, + NVC37E_SET_CONTEXT_DMA_ISO(ctxDmaIdx), + 1); + nvDmaSetEvoMethodData(pChannel, 0); + } + } + + return FALSE; + } + + presentControl = DRF_NUM(C37E, _SET_PRESENT_CONTROL, _MIN_PRESENT_INTERVAL, + pHwState->minPresentInterval); + + if (pHwState->timeStamp != 0) { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _TIMESTAMP_MODE, + _ENABLE, presentControl); + } else { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _TIMESTAMP_MODE, + _DISABLE, presentControl); + } + + if (pHwState->tearing) { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _BEGIN_MODE, + _IMMEDIATE, presentControl); + } else { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _BEGIN_MODE, + _NON_TEARING, presentControl); + } + + if (pHwState->pSurfaceEvo[NVKMS_RIGHT]) { + if (pHwState->perEyeStereoFlip) { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _STEREO_MODE, + _AT_ANY_FRAME, presentControl); + } else { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _STEREO_MODE, + _PAIR_FLIP, presentControl); + } + } else { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _STEREO_MODE, + _MONO, presentControl); + } + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, presentControl); + + /* + * GV100 timestamped flips need a duplicate update which only changes + * TIMESTAMP_MODE and MIN_PRESENT_INTERVAL fields in SET_PRESENT_CONTROL; + * to allow updating these fields without changing anything else in + * SET_PRESENT_CONTROL, cache the values we sent in previous flips here. + * (bug 1990958) + */ + pChannel->oldPresentControl = presentControl; + + /* Set the surface parameters. */ + FOR_ALL_EYES(eye) { + const NVSurfaceEvoRec *pSurfaceEvoPerEye = pHwState->pSurfaceEvo[eye]; + NvU8 numSurfacePlanes = 0; + + if (pSurfaceEvoPerEye != NULL) { + pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(pSurfaceEvoPerEye->format); + numSurfacePlanes = pFormatInfo->numPlanes; + } + + for (planeIndex = 0; + planeIndex < NVKMS_MAX_PLANES_PER_SURFACE; + planeIndex++) { + NvU32 ctxdma = 0; + NvU64 offset = 0; + const NvU8 ctxDmaIdx = EyeAndPlaneToCtxDmaIdx(eye, planeIndex); + + if (planeIndex < numSurfacePlanes) { + ctxdma = pSurfaceEvoPerEye->planes[planeIndex].ctxDma; + offset = pSurfaceEvoPerEye->planes[planeIndex].offset; + } + + nvDmaSetStartEvoMethod(pChannel, + NVC37E_SET_CONTEXT_DMA_ISO(ctxDmaIdx), 1); + nvDmaSetEvoMethodData(pChannel, ctxdma); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_OFFSET(ctxDmaIdx), 1); + nvDmaSetEvoMethodData(pChannel, nvCtxDmaOffsetFromBytes(offset)); + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SIZE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_SIZE, _WIDTH, pHwState->pSurfaceEvo[NVKMS_LEFT]->widthInPixels) | + DRF_NUM(C37E, _SET_SIZE, _HEIGHT, pHwState->pSurfaceEvo[NVKMS_LEFT]->heightInPixels)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SIZE_IN, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_SIZE_IN, _WIDTH, pHwState->sizeIn.width) | + DRF_NUM(C37E, _SET_SIZE_IN, _HEIGHT, pHwState->sizeIn.height)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SIZE_OUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_SIZE_OUT, _WIDTH, pHwState->sizeOut.width) | + DRF_NUM(C37E, _SET_SIZE_OUT, _HEIGHT, pHwState->sizeOut.height)); + + /* XXX nvdisplay: enforce pitch/BL layout are consistent between eyes at a + * higher level */ + + storage = 0; + if (pHwState->pSurfaceEvo[NVKMS_LEFT]->layout == + NvKmsSurfaceMemoryLayoutBlockLinear) { + const NvU32 blockHeight = pHwState->pSurfaceEvo[NVKMS_LEFT]->log2GobsPerBlockY; + storage |= DRF_NUM(C37E, _SET_STORAGE, _BLOCK_HEIGHT, blockHeight); + if (pDevEvo->hal->caps.supportsSetStorageMemoryLayout) { + storage |= DRF_DEF(C37E, _SET_STORAGE, _MEMORY_LAYOUT, _BLOCKLINEAR); + } + } else if (pDevEvo->hal->caps.supportsSetStorageMemoryLayout) { + storage |= DRF_DEF(C37E, _SET_STORAGE, _MEMORY_LAYOUT, _PITCH); + } + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_STORAGE, 1); + nvDmaSetEvoMethodData(pChannel, storage); + + pFormatInfo = nvKmsGetSurfaceMemoryFormatInfo( + pHwState->pSurfaceEvo[NVKMS_LEFT]->format); + + for (planeIndex = 0; + planeIndex < NVKMS_MAX_PLANES_PER_SURFACE; + planeIndex++) { + NvU32 pitch; + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_PLANAR_STORAGE(planeIndex), + 1); + + if (planeIndex >= pFormatInfo->numPlanes) { + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_PLANAR_STORAGE, _PITCH, 0)); + continue; + } + + /* + * Per nvdClass_01.mfs, the HEAD_SET_STORAGE_PITCH "units are blocks + * if the layout is BLOCKLINEAR, the units are multiples of 64 bytes + * if the layout is PITCH." + */ + pitch = pHwState->pSurfaceEvo[NVKMS_LEFT]->planes[planeIndex].pitch; + if (pHwState->pSurfaceEvo[NVKMS_LEFT]->layout == + NvKmsSurfaceMemoryLayoutBlockLinear) { + /* pitch is already in units of blocks; no conversion needed. */ + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_PLANAR_STORAGE, _PITCH, pitch)); + } else { + /* XXX nvdisplay: enforce this at a higher level */ + nvAssert((pitch & 63) == 0); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_PLANAR_STORAGE, _PITCH, pitch >> 6)); + } + } + + ASSERT_EYES_MATCH(pHwState->pSurfaceEvo, format); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_UPDATE_TIMESTAMP_LO, 2); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp)); + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp)); + + return TRUE; +} + +/* + * This function returns TRUE if precomp needs to swap the U and V components to + * support the given input surface format. For all such formats, + * SetParams.SwapUV needs to be enabled. + * + * Due to the "feature" described in bug 1640117, there's a mismatch in the + * ihub<->precomp interface: + * - For all Yx___UxVx_N444 and Yx___UxVx_N422 formats, ihub will fetch and send + * the V sample as the first chroma byte, and the U sample as the second byte. + * However, precomp expects the U sample as the first byte, and the V sample + * as the second byte. + * - For all Yx___VxUx_N420 formats, ihub will fetch and send the U sample as + * the first chroma byte, and the V sample as the second byte. + * However, precomp expects the V sample as the first byte, and the U sample + * as the second byte. + * + * In the above explanation, note that ihub simply fetches and sends the chroma + * bytes in the same order that they're packed in memory. + */ +static NvBool IsSurfaceFormatUVSwapped( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + return TRUE; + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return FALSE; + case NvKmsSurfaceMemoryFormatI8: + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + case NvKmsSurfaceMemoryFormatR5G6B5: + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatR16G16B16A16: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + return FALSE; + } + + return FALSE; +} + +/* + * Map the given NvKmsSurfaceMemoryFormat to its corresponding HW format for the + * C370 (Volta) NVDISPLAY class. + * + * Volta supports YUV422 packed, but this function excludes the corresponding + * mappings because the required programming support hasn't been added to NVKMS + * yet. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 nvHwFormatFromKmsFormatC3( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatI8: + return NVC37E_SET_PARAMS_FORMAT_I8; + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + return NVC37E_SET_PARAMS_FORMAT_A1R5G5B5; + case NvKmsSurfaceMemoryFormatR5G6B5: + return NVC37E_SET_PARAMS_FORMAT_R5G6B5; + case NvKmsSurfaceMemoryFormatA8R8G8B8: + return NVC37E_SET_PARAMS_FORMAT_A8R8G8B8; + case NvKmsSurfaceMemoryFormatX8R8G8B8: + return NVC37E_SET_PARAMS_FORMAT_X8R8G8B8; + case NvKmsSurfaceMemoryFormatA8B8G8R8: + return NVC37E_SET_PARAMS_FORMAT_A8B8G8R8; + case NvKmsSurfaceMemoryFormatX8B8G8R8: + return NVC37E_SET_PARAMS_FORMAT_X8B8G8R8; + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + return NVC37E_SET_PARAMS_FORMAT_A2B10G10R10; + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + return NVC37E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16; + case NvKmsSurfaceMemoryFormatR16G16B16A16: + return NVC37E_SET_PARAMS_FORMAT_R16_G16_B16_A16; + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return 0; + } + + return 0; +} + +/* + * Map the given NvKmsSurfaceMemoryFormat to its corresponding HW format for the + * C570 (Turing) NVDISPLAY class. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 nvHwFormatFromKmsFormatC5( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + return NVC57E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422; + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + return NVC57E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422; + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + return NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N444; + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + return NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N422; + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + return NVC57E_SET_PARAMS_FORMAT_Y8___V8U8_N420; + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + return NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N444; + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + return NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N422; + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + return NVC57E_SET_PARAMS_FORMAT_Y10___V10U10_N420; + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + return NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N444; + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + return NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N422; + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + return NVC57E_SET_PARAMS_FORMAT_Y12___V12U12_N420; + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + case NvKmsSurfaceMemoryFormatI8: + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + case NvKmsSurfaceMemoryFormatR5G6B5: + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatR16G16B16A16: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + return nvHwFormatFromKmsFormatC3(format); + } + + return 0; +} + +/* + * Map the given NvKmsSurfaceMemoryFormat to its corresponding HW format for the + * C670 (Orin and Ampere) NVDISPLAY class. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 nvHwFormatFromKmsFormatC6( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + return NVC67E_SET_PARAMS_FORMAT_Y8___U8___V8_N444; + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return NVC67E_SET_PARAMS_FORMAT_Y8___U8___V8_N420; + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatI8: + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + case NvKmsSurfaceMemoryFormatR5G6B5: + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatR16G16B16A16: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + return nvHwFormatFromKmsFormatC5(format); + } + + return 0; +} + +static +NVLutSurfaceEvoPtr EvoGetLutSurface3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState) +{ + NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + NvU32 head = pDevEvo->headForWindow[win]; + NvBool found = FALSE; + NvU32 dispIndex = 0; + NvU32 sd; + + if ((pHwState->pSurfaceEvo[NVKMS_LEFT] == NULL) || + (head == NV_INVALID_HEAD)) { + return NULL; + } + + /* Input Lut is explicitly enabled by client */ + if (pHwState->inputLut.pLutSurfaceEvo != NULL) { + return pHwState->inputLut.pLutSurfaceEvo; + } + + /* + * For everything but I8 surfaces, we can just use the specified + * LUT, even if it's NULL. + * For I8 surfaces, we can only use the specified surface if it's + * non-NULL (an input LUT is required). + */ + if (pHwState->pSurfaceEvo[NVKMS_LEFT]->format != + NvKmsSurfaceMemoryFormatI8) { + return NULL; + } + + /* + * The rest of the function is to handle the I8 case where no input + * LUT was specified: look up the LUT to use from the device. + */ + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + if (found) { + nvAssert(dispIndex == pDevEvo->gpus[sd].pDispEvo->displayOwner); + } else { + dispIndex = pDevEvo->gpus[sd].pDispEvo->displayOwner; + found = TRUE; + } + } + } + + nvAssert(found); + + /* + * It is not allowed to change the input LUT on immediate flips. The + * higher-level code should makes sure to disable tearing if there is change + * in the surface format and curLUTIndex does not change until next + * EvoSetLUTContextDma3() call which also makes sure to disable tearing. + */ + const NvU32 lutIndex = + pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex; + + return pDevEvo->lut.head[head].LUT[lutIndex]; +} + +static void +EvoFlipC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NvBool enableCSC, swapUV, flip3Return; + enum NvKmsSurfaceMemoryFormat format; + NVLutSurfaceEvoPtr pLutSurfaceEvo = + EvoGetLutSurface3(pDevEvo, pChannel, pHwState); + + if (pHwState->timeStamp != 0) { + InsertAdditionalTimestampFlip(pDevEvo, pChannel, pHwState, + updateState); + } + + flip3Return = EvoFlipC3Common(pDevEvo, pChannel, pHwState, updateState); + + /* program semaphore */ + EvoProgramSemaphore3(pDevEvo, pChannel, pHwState); + + if (!flip3Return) { + return; + } + + format = pHwState->pSurfaceEvo[NVKMS_LEFT]->format; + + enableCSC = SetCscMatrixC3(pChannel, &pHwState->cscMatrix); + swapUV = IsSurfaceFormatUVSwapped(format); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_PARAMS, 1); + nvDmaSetEvoMethodData(pChannel, + (enableCSC ? DRF_DEF(C37E, _SET_PARAMS, _CSC, _ENABLE) : + DRF_DEF(C37E, _SET_PARAMS, _CSC, _DISABLE)) | + DRF_NUM(C37E, _SET_PARAMS, _FORMAT, nvHwFormatFromKmsFormatC3(format)) | + (swapUV ? DRF_DEF(C37E, _SET_PARAMS, _SWAP_UV, _ENABLE) : + DRF_DEF(C37E, _SET_PARAMS, _SWAP_UV, _DISABLE)) | + DRF_DEF(C37E, _SET_PARAMS, _UNDERREPLICATE, _DISABLE)); + + if (pLutSurfaceEvo) { + const NvU32 ctxDma = pLutSurfaceEvo->dispCtxDma; + const NvU32 origin = offsetof(NVEvoLutDataRec, base); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTROL_INPUT_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37E, _SET_CONTROL_INPUT_LUT, _SIZE, _SIZE_1025) | + DRF_DEF(C37E, _SET_CONTROL_INPUT_LUT, _RANGE, _UNITY) | + DRF_DEF(C37E, _SET_CONTROL_INPUT_LUT, _OUTPUT_MODE, _INDEX)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_OFFSET_INPUT_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_OFFSET_INPUT_LUT, _ORIGIN, origin)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_INPUT_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_CONTEXT_DMA_INPUT_LUT, _HANDLE, ctxDma)); + } else { + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_INPUT_LUT, 1); + nvDmaSetEvoMethodData(pChannel, 0); + } + + UpdateCompositionC3(pDevEvo, pChannel, + &pHwState->composition, updateState); +} + +static void +EvoFlipC5Common(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + enum NvKmsSurfaceMemoryFormat format; + NvBool swapUV; + NvU32 hTaps, vTaps; + NvBool scaling = FALSE; + NVLutSurfaceEvoPtr pLutSurfaceEvo = + EvoGetLutSurface3(pDevEvo, pChannel, pHwState); + + if (!EvoFlipC3Common(pDevEvo, pChannel, pHwState, updateState)) { + return; + } + + format = pHwState->pSurfaceEvo[NVKMS_LEFT]->format; + + swapUV = IsSurfaceFormatUVSwapped(format); + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_PARAMS, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_PARAMS, _FORMAT, nvHwFormatFromKmsFormatC6(format)) | + (swapUV ? DRF_DEF(C57E, _SET_PARAMS, _SWAP_UV, _ENABLE) : + DRF_DEF(C57E, _SET_PARAMS, _SWAP_UV, _DISABLE))); + + /* + * In nvdisplay 2, there was a fixed-function block in the precomp FMT + * module that was responsible for YUV->RGB conversion. + * + * In nvdisplay 3, that fixed-function block no longer exists. + * In its place, there's a generic 3x4 S5.16 coefficient matrix that SW must + * explicitly configure to convert the input surface format to the internal + * RGB pipe native format. + */ + EvoSetFMTMatrixC5(pChannel, format); + + vTaps = (pHwState->vTaps >= NV_EVO_SCALER_5TAPS) ? + NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 : + NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2; + hTaps = (pHwState->hTaps >= NV_EVO_SCALER_5TAPS) ? + NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 : + NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2; + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CONTROL_INPUT_SCALER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CONTROL_INPUT_SCALER, _VERTICAL_TAPS, vTaps) | + DRF_NUM(C57E, _SET_CONTROL_INPUT_SCALER, _HORIZONTAL_TAPS, hTaps)); + + scaling = (pHwState->sizeIn.width != pHwState->sizeOut.width) || + (pHwState->sizeIn.height != pHwState->sizeOut.height); + nvAssert(!(scaling && bypassComposition)); + + /* + * If scaling, enable the CSC0 and CSC1 pipelines so that we can scale in + * the non-linear ICtCp domain. + * + * If no scaling, just use CSC11 to convert from the input gamut to the + * output (panel) gamut, and disable everything else. + */ + if (scaling) { + ConfigureCsc0C5(pDevEvo, pChannel, TRUE); + ConfigureCsc1C5(pDevEvo, pChannel, TRUE); + } else { + ConfigureCsc0C5(pDevEvo, pChannel, FALSE); + ConfigureCsc1C5(pDevEvo, pChannel, FALSE); + + SetCsc11MatrixC5(pChannel, &pHwState->cscMatrix); + } + + // In nvdisplay 3, an ILUT is required to convert the input surface to FP16, + // unless the surface being displayed is already FP16 to begin with. + if (format == NvKmsSurfaceMemoryFormatRF16GF16BF16AF16 || bypassComposition) { + pLutSurfaceEvo = NULL; + } else if (!pLutSurfaceEvo) { + pLutSurfaceEvo = pDevEvo->lut.defaultLut; + } + + if (pLutSurfaceEvo) { + const NvU32 ctxDma = pLutSurfaceEvo->dispCtxDma; + const NvU32 origin = offsetof(NVEvoLutDataRec, base); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_ILUT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _SET_ILUT_CONTROL, _INTERPOLATE, _DISABLE) | + DRF_DEF(C57E, _SET_ILUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_ILUT_CONTROL, _MODE, _DIRECT10) | + DRF_NUM(C57E, _SET_ILUT_CONTROL, _SIZE, NV_LUT_VSS_HEADER_SIZE + + NV_NUM_EVO_LUT_ENTRIES)); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_OFFSET_ILUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_OFFSET_ILUT, _ORIGIN, origin)); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CONTEXT_DMA_ILUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CONTEXT_DMA_ILUT, _HANDLE, ctxDma)); + } else { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CONTEXT_DMA_ILUT, 1); + nvDmaSetEvoMethodData(pChannel, 0); + } + + UpdateCompositionC5(pDevEvo, pChannel, + &pHwState->composition, updateState, + bypassComposition); +} + +static void +EvoFlipC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + EvoFlipC5Common(pDevEvo, pChannel, pHwState, updateState, bypassComposition); + + /* Work around bug 2117571: whenever the tearing mode is changing, send a + * software method to notify RM. */ + if (pHwState->tearing != pChannel->oldTearingMode) { + NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + NvU32 head = pDevEvo->headForWindow[win]; + + if (head != NV_INVALID_HEAD) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_WINDOWS_NOTIFY_RM, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _WINDOWS_NOTIFY_RM, _VSYNC_STATE_CHANGE, _TRUE) | + DRF_NUM(C57E, _WINDOWS_NOTIFY_RM, _ASSOCIATED_HEAD, head) | + (pHwState->tearing ? + DRF_DEF(C57E, _WINDOWS_NOTIFY_RM, _VSYNC_STATE, _OFF) : + DRF_DEF(C57E, _WINDOWS_NOTIFY_RM, _VSYNC_STATE, _ON))); + } + + pChannel->oldTearingMode = pHwState->tearing; + } + + /* program semaphore */ + EvoProgramSemaphore3(pDevEvo, pChannel, pHwState); +} + +static void +EvoFlipC6(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NvBool fromTop = TRUE; + NvBool fromLeft = TRUE; + + NvU32 vDirVal = 0; + NvU32 hDirVal = 0; + + switch (pHwState->rrParams.rotation) { + case NVKMS_ROTATION_90: + case NVKMS_ROTATION_270: + nvAssert(!"Invalid rotation requested."); + /* Fall-through */ + case NVKMS_ROTATION_0: + break; + case NVKMS_ROTATION_180: + fromTop = FALSE; + fromLeft = FALSE; + break; + } + + if (pHwState->rrParams.reflectionX) { + fromLeft = !fromLeft; + } + if (pHwState->rrParams.reflectionY) { + fromTop = !fromTop; + } + + vDirVal = (fromTop ? + DRF_DEF(C67E, _SET_SCAN_DIRECTION, _VERTICAL_DIRECTION, _FROM_TOP) : + DRF_DEF(C67E, _SET_SCAN_DIRECTION, _VERTICAL_DIRECTION, _FROM_BOTTOM)); + hDirVal = (fromLeft ? + DRF_DEF(C67E, _SET_SCAN_DIRECTION, _HORIZONTAL_DIRECTION, _FROM_LEFT) : + DRF_DEF(C67E, _SET_SCAN_DIRECTION, _HORIZONTAL_DIRECTION, _FROM_RIGHT)); + + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_SCAN_DIRECTION, 1); + nvDmaSetEvoMethodData(pChannel, vDirVal | hDirVal); + + EvoFlipC5Common(pDevEvo, pChannel, pHwState, updateState, bypassComposition); + + /* program semaphore */ + EvoProgramSemaphore6(pDevEvo, pChannel, pHwState); +} + +static void UpdateComposition(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + /* smaller => closer to front */ + NvU32 depth, + NvU32 colorKeySelect, + NvU32 constantAlpha, + NvU32 compositionFactorSelect, + const NVColorKey key, + NVEvoUpdateState *updateState) +{ + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_COMPOSITION_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_COMPOSITION_CONTROL, _COLOR_KEY_SELECT, colorKeySelect) | + DRF_NUM(C37E, _SET_COMPOSITION_CONTROL, _DEPTH, depth)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_COMPOSITION_CONSTANT_ALPHA, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_COMPOSITION_CONSTANT_ALPHA, _K1, constantAlpha)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_COMPOSITION_FACTOR_SELECT, 1); + nvDmaSetEvoMethodData(pChannel, compositionFactorSelect); + +#define UPDATE_COMPONENT(_COMP, _C, _c) \ + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_KEY_##_COMP, 1); \ + if (key.match##_C) { \ + nvDmaSetEvoMethodData(pChannel, \ + DRF_NUM(C37E, _SET_KEY_##_COMP, _MIN, key._c) | \ + DRF_NUM(C37E, _SET_KEY_##_COMP, _MAX, key._c)); \ + } else { \ + nvDmaSetEvoMethodData(pChannel, \ + DRF_NUM(C37E, _SET_KEY_##_COMP, _MIN, 0) | \ + DRF_SHIFTMASK(NVC37E_SET_KEY_##_COMP##_MAX)); \ + } + + if (colorKeySelect != + NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE) { + UPDATE_COMPONENT(ALPHA, A, a); + UPDATE_COMPONENT(RED_CR, R, r); + UPDATE_COMPONENT(GREEN_Y, G, g); + UPDATE_COMPONENT(BLUE_CB, B, b); + } + +#undef UPDATE_COMPONENT +} + +static void EvoFlipTransitionWARC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState) +{ + /* Nothing to do for Volta */ +} + +/* + * Hardware bug 2193096 requires that we send special software methods around + * a window channel update that transitions from NULL ctxdma to non-NULL or + * vice versa. Below we compare the current hardware state in pSdHeadState + * against the state to be pushed in this update in pFlipState, and add any + * window(s) that qualify to the 'flipTransitionWAR' mask in the updateState. + */ +static void EvoFlipTransitionWARC5(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + const NvBool enabledPrev = + pSdHeadState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL; + const NvBool enabledNext = + pFlipState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL; + + if (enabledPrev != enabledNext) { + /* XXX TODO: dynamic window assignment */ + const NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER( + pDevEvo->head[head].layer[layer]->channelMask); + updateState->subdev[sd].flipTransitionWAR |= + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, win, _ENABLE); + + nvAssert(pFlipState->dirty.layer[layer]); + } + } +} + +static void EvoFlipTransitionWARC6(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState) +{ + /* Nothing to do for Orin/Ampere for now */ +} + +static void +UpdateCompositionC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const struct NvKmsCompositionParams *pCompParams, + NVEvoUpdateState *updateState) +{ + NvU32 colorKeySelect; + NvU32 compositionFactorSelect = 0; + NvU32 constantAlpha = 0; + NvU32 match; + + switch (pCompParams->colorKeySelect) { + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE: + colorKeySelect = + NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE; + break; + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC: + colorKeySelect = + NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC; + + break; + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST: + colorKeySelect = + NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST; + + break; + default: + nvAssert(!"Invalid color key select"); + return; + } + + /* Match and nomatch pixels should not use alpha blending mode at once. */ + nvAssert((colorKeySelect == NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) || + (!NvKmsIsCompositionModeUseAlpha(pCompParams->blendingMode[0])) || + (!NvKmsIsCompositionModeUseAlpha(pCompParams->blendingMode[1]))); + + /* + * Match and nomatch pixels should not use blending mode PREMULT_ALPHA, + * NON_PREMULT_ALPHA, PREMULT_SURFACE_ALPHA, and NON_PREMULT_SURFACE_ALPHA + * at once. + */ + nvAssert(pCompParams->blendingMode[0] == NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE || + pCompParams->blendingMode[0] == NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT || + pCompParams->blendingMode[1] == NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE || + pCompParams->blendingMode[1] == NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT); + + for (match = 0; match <= 1; match++) { + switch (pCompParams->blendingMode[match]) { + case NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE: + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _ONE) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _ZERO); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _ONE) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _ZERO); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT: + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _ZERO) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _ONE); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _ZERO) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _ONE); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA: + constantAlpha = 255; + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA: + constantAlpha = 255; + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA: + constantAlpha = pCompParams->surfaceAlpha; + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA: + constantAlpha = pCompParams->surfaceAlpha; + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } + break; + default: + nvAssert(!"Invalid blend mode"); + return; + } + } + + UpdateComposition(pDevEvo, + pChannel, + pCompParams->depth, + colorKeySelect, + constantAlpha, + compositionFactorSelect, + pCompParams->colorKey, + updateState); +} + +static void EvoBypassCompositionC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NVEvoUpdateState *updateState) +{ + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_COMPOSITION_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _SET_COMPOSITION_CONTROL, _BYPASS, _ENABLE)); +} + +static void +UpdateCompositionC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const struct NvKmsCompositionParams *pCompParams, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + if (bypassComposition) { + EvoBypassCompositionC5(pDevEvo, pChannel, updateState); + } else { + UpdateCompositionC3(pDevEvo, pChannel, pCompParams, updateState); + } +} + +/* + * The LUT entries in INDEX_1025_UNITY_RANGE have 16 bits, with the + * black value at 24576, and the white at 49151. Since the effective + * range is 16384, we treat this as a 14-bit LUT. However, we need to + * clear the low 3 bits to WAR hardware bug 813188. This gives us + * 14-bit LUT values, but only 11 bits of precision. + * XXXnvdisplay: Bug 813188 is supposed to be fixed on NVDisplay; can we expose + * more precision? + */ +static inline NvU16 ColorToLUTEntry(NvU16 val) +{ + const NvU16 val14bit = val >> 2; + return (val14bit & ~7) + 24576; +} + +/* + * Unlike earlier EVO implementations, the INDEX mode of the input LUT on + * NVDisplay is straightforward: the value of the input component is expanded + * to the LUT size by simply shifting left by the difference between the LUT + * index width and the component width. We do the same, here, to select the + * right LUT entry to fill. + */ +static inline NvU32 GetLUTIndex(int i, int componentSize) +{ + return i << (10 - componentSize); +} + +static void +EvoFillLUTSurfaceC3(NVEvoLutEntryRec *pLUTBuffer, + const NvU16 *red, + const NvU16 *green, + const NvU16 *blue, + int nColorMapEntries, int depth) +{ + int i; + NvU32 rSize, gSize, bSize; + + switch (depth) { + case 15: + rSize = gSize = bSize = 5; + break; + case 16: + rSize = bSize = 5; + gSize = 6; + break; + case 8: + case 24: + rSize = gSize = bSize = 8; + break; + case 30: + rSize = gSize = bSize = 10; + break; + default: + nvAssert(!"invalid depth"); + return; + } + + for (i = 0; i < nColorMapEntries; i++) { + if (i < (1 << rSize)) { + pLUTBuffer[GetLUTIndex(i, rSize)].Red = ColorToLUTEntry(red[i]); + } + if (i < (1 << gSize)) { + pLUTBuffer[GetLUTIndex(i, gSize)].Green = ColorToLUTEntry(green[i]); + } + if (i < (1 << bSize)) { + pLUTBuffer[GetLUTIndex(i, bSize)].Blue = ColorToLUTEntry(blue[i]); + } + } +} + +static inline float16_t ColorToFp16(NvU16 val, float32_t maxf) +{ + return nvUnormToFp16(val, maxf); +} + +static void +EvoFillLUTSurfaceC5(NVEvoLutEntryRec *pLUTBuffer, + const NvU16 *red, + const NvU16 *green, + const NvU16 *blue, + int nColorMapEntries, int depth) +{ + int i; + NvU32 rSize, gSize, bSize; + const float32_t maxf = ui32_to_f32(0xffff); + + switch (depth) { + case 15: + rSize = gSize = bSize = 5; + break; + case 16: + rSize = bSize = 5; + gSize = 6; + break; + case 8: + case 24: + rSize = gSize = bSize = 8; + break; + case 30: + rSize = gSize = bSize = 10; + break; + default: + nvAssert(!"invalid depth"); + return; + } + + // Skip the VSS header + pLUTBuffer += NV_LUT_VSS_HEADER_SIZE; + + for (i = 0; i < nColorMapEntries; i++) { + if (i < (1 << rSize)) { + pLUTBuffer[GetLUTIndex(i, rSize)].Red = + ColorToFp16(red[i], maxf).v; + } + if (i < (1 << gSize)) { + pLUTBuffer[GetLUTIndex(i, gSize)].Green = + ColorToFp16(green[i], maxf).v; + } + if (i < (1 << bSize)) { + pLUTBuffer[GetLUTIndex(i, bSize)].Blue = + ColorToFp16(blue[i], maxf).v; + } + } +} + +static void EvoSetLUTContextDma3(NVDevEvoPtr pDevEvo, + const int head, + NVLutSurfaceEvoPtr pLutSurfEvo, + NvBool enableBaseLut, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 sd; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* + * For Window semaphores and Notifiers, the general rule of thumb is that + * the current semaphore/notifier will be released if the address for the + * semaphore/notifier changes (via context DMA change or offset change). + * This allows SW to push updates in the window channel that change other + * methods, but do not cause the semaphore or notifier to be released. This + * make it possible to reprogram the window channel with new input Lut + * without releasing semaphore. + * + * Note that higher-level code will use core channel notifiers to + * synchronize these LUT updates, but that's okay because EvoUpdateC3() + * will interlock the core and window channel(s) updates together. + */ + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + NVEvoChannelPtr pChannel = pDevEvo->window[head << 1]; + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + NVFlipChannelEvoHwState *pMainFlipState = + &pSdHeadState->layer[NVKMS_MAIN_LAYER]; + NVLutSurfaceEvoPtr pInputLutSurfEvo = enableBaseLut ? + pLutSurfEvo : NULL; + + if (pMainFlipState->inputLut.pLutSurfaceEvo == pInputLutSurfEvo) { + continue; + } + + pMainFlipState->inputLut.pLutSurfaceEvo = pInputLutSurfEvo; + /* It is not allowed to change the input LUT on immediate flips. */ + pMainFlipState->tearing = FALSE; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + + pDevEvo->hal->Flip(pDevEvo, pChannel, pMainFlipState, updateState, + bypassComposition); + + nvPopEvoSubDevMask(pDevEvo); + } + } +} + +static void EvoSetLUTContextDmaC3(const NVDispEvoRec *pDispEvo, + const int head, + NVLutSurfaceEvoPtr pLutSurfEvo, + NvBool enableBaseLut, + NvBool enableOutputLut, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NvU32 ctxdma = (pLutSurfEvo != NULL) ? pLutSurfEvo->dispCtxDma : 0; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU64 offset; + + nvAssert(ctxdma || (!enableBaseLut && !enableOutputLut)); + + nvPushEvoSubDevMaskDisp(pDispEvo); + + EvoSetLUTContextDma3(pDevEvo, + head, + pLutSurfEvo, + enableBaseLut, + updateState, + bypassComposition); + + /* Program the output LUT */ + + offset = offsetof(NVEvoLutDataRec, output); + nvAssert((offset & 0xff) == 0); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_LUT, _SIZE, _SIZE_1025) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_LUT, _RANGE, _UNITY) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_LUT, _OUTPUT_MODE, _INTERPOLATE)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_OFFSET_OUTPUT_LUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_OFFSET_OUTPUT_LUT, _ORIGIN, offset >> 8)); + + /* Set the ctxdma for the output LUT */ + + if (!enableOutputLut) { + /* Class C37D has no separate enable flag. */ + ctxdma = 0; + } + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTEXT_DMA_OUTPUT_LUT, _HANDLE, ctxdma)); + + nvPopEvoSubDevMask(pDevEvo); +} + +static void EvoSetLUTContextDmaC5(const NVDispEvoRec *pDispEvo, + const int head, + NVLutSurfaceEvoPtr pLutSurfEvo, + NvBool enableBaseLut, + NvBool enableOutputLut, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NvU32 ctxdma = (pLutSurfEvo != NULL) ? pLutSurfEvo->dispCtxDma : 0; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU64 offset; + + nvAssert(ctxdma || (!enableBaseLut && !enableOutputLut)); + + nvPushEvoSubDevMaskDisp(pDispEvo); + + EvoSetLUTContextDma3(pDevEvo, + head, + pLutSurfEvo, + enableBaseLut, + updateState, + bypassComposition); + + /* Program the output LUT */ + + offset = offsetof(NVEvoLutDataRec, output); + nvAssert((offset & 0xff) == 0); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OLUT_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_OLUT_CONTROL, _INTERPOLATE, _ENABLE) | + DRF_DEF(C57D, _HEAD_SET_OLUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57D, _HEAD_SET_OLUT_CONTROL, _MODE, _DIRECT10) | + DRF_NUM(C57D, _HEAD_SET_OLUT_CONTROL, _SIZE, NV_LUT_VSS_HEADER_SIZE + + NV_NUM_EVO_LUT_ENTRIES)); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OFFSET_OLUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_OFFSET_OLUT, _ORIGIN, offset >> 8)); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OLUT_FP_NORM_SCALE(head), 1); + nvDmaSetEvoMethodData(pChannel, 0xffffffff); + + /* Set the ctxdma for the output LUT */ + + if (bypassComposition) { + ctxdma = 0; + + /* if we're not enabling the OLUT, OCSC0 also needs to be disabled */ + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC0CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(C57D, _HEAD_SET_OCSC0CONTROL, _ENABLE, _DISABLE)); + } else if (!enableOutputLut) { + /* Use the default OLUT if the client didn't provide one */ + ctxdma = pDevEvo->lut.defaultLut->dispCtxDma; + } + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_CONTEXT_DMA_OLUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_CONTEXT_DMA_OLUT, _HANDLE, ctxdma)); + + if (!bypassComposition) { + /* only enable OCSC0 after enabling the OLUT */ + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC0CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(C57D, _HEAD_SET_OCSC0CONTROL, _ENABLE, _ENABLE)); + } + + nvPopEvoSubDevMask(pDevEvo); +} + +static inline NvU32 ReadCapReg(volatile const NvU32 *pCaps, NvU32 offset) +{ + /* Offsets are in bytes, but the array has dword-sized elements. */ + return pCaps[offset / sizeof(NvU32)]; +} + +static NvBool QueryStereoPinC3(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + NvU32 *pStereoPin) +{ + NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS params = { }; + + params.base.subdeviceIndex = pEvoSubDev->subDeviceInstance; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_GET_LOCKPINS_CAPS, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to query stereo pin"); + return FALSE; + } + + if ((params.stereoPin >= NV_EVO_NUM_LOCK_PIN_CAPS) || + (params.stereoPin == NVC370_CTRL_GET_LOCKPINS_CAPS_STEREO_PIN_NONE)) { + return FALSE; + } else { + *pStereoPin = params.stereoPin; + return TRUE; + } +} + +static void EvoParseCapabilityNotifier3(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + volatile const NvU32 *pCaps) +{ + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + const NvU32 sysCap = ReadCapReg(pCaps, NVC373_SYS_CAP); + const NvU32 sysCapB = ReadCapReg(pCaps, NVC373_SYS_CAPB); + NvU32 i, stereoPin; + NvU32 layer; + + pDevEvo->caps.cursorCompositionCaps = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NV_EVO3_SUPPORTED_CURSOR_COMP_BLEND_MODES, + }, + }, + } + }; + + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); layer++) { + pDevEvo->caps.layerCaps[layer].composition = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC] = { + .supportedBlendModes = { + [0] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + [1] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST] = { + .supportedBlendModes = { + [0] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + [1] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + }, + }, + }, + }; + } + + /* + * Previous EVO display implementations exposed capabilities for lock pins, + * detailing which pin(s) could be used for which functions. The idea was + * that it didn't make sense to try to drive a stereo pin with a fliplock + * signal (for example), so the pin associated with the stereo function was + * marked as stereo-capable but not any other function; attempting to use a + * non-stereo-capable pin for stereo or vice-versa would result in an error. + * + * With nvdisplay, the meaning of lock pins was changed such that they no + * longer have a shared namespace. So stereo lockpin 0 is not the same as + * fliplock lockpin 0 and neither is the same as scanlock lockpin 0. With + * this scheme, there is no way to specify a pin that is incapable of a + * given function, so the entire capabilities mechanism was removed. + * + * However, the pins chosen for HEAD_SET_CONTROL still need to match the + * pins selected for each function in the VBIOS DCB. Fliplock and scanlock + * query this information through + * NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS. Stereo is handled + * here, using NVC370_CTRL_CMD_GET_LOCKPINS_CAPS. + */ + + for (i = 0; i < NV_EVO_NUM_LOCK_PIN_CAPS; i++) { + pEvoCaps->pin[i].flipLock = TRUE; + pEvoCaps->pin[i].scanLock = TRUE; + } + + if (QueryStereoPinC3(pDevEvo, pEvoSubDev, &stereoPin)) { + pEvoCaps->pin[stereoPin].stereo = TRUE; + } + + // Miscellaneous capabilities + // NVDisplay does not support interlaced modes. + pEvoCaps->misc.supportsInterlaced = FALSE; + + // Heads + ct_assert(ARRAY_LEN(pEvoCaps->head) >= NVC373_HEAD_CAPA__SIZE_1); + for (i = 0; i < NVC373_HEAD_CAPA__SIZE_1; i++) { + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[i]; + + pHeadCaps->usable = + FLD_IDX_TEST_DRF(C373, _SYS_CAP, _HEAD_EXISTS, i, _YES, sysCap); + } + + // SORs + ct_assert(ARRAY_LEN(pEvoCaps->sor) >= NVC373_SOR_CAP__SIZE_1); + for (i = 0; i < NVC373_SOR_CAP__SIZE_1; i++) { + NVEvoSorCaps *pSorCaps = &pEvoCaps->sor[i]; + + NvBool sorUsable = + FLD_IDX_TEST_DRF(C373, _SYS_CAP, _SOR_EXISTS, i, _YES, sysCap); + + /* XXXnvdisplay: add SOR caps: max DP clk, ... */ + if (sorUsable) { + const NvU32 sorCap = ReadCapReg(pCaps, NVC373_SOR_CAP(i)); + pSorCaps->dualTMDS = + FLD_TEST_DRF(C373, _SOR_CAP, _DUAL_TMDS, _TRUE, sorCap); + + /* + * Assume that all SORs are equally capable, and that all SORs + * support HDMI FRL if the display class supports it. (If this + * assert fires, we may need to rework SOR assignment for such HDMI + * sinks.) + * + * Although HDMI_FRL is only defined for class C6, classes C3 and + * C5 don't use that bit in the SOR_CAP register so it should + * always be 0 on those chips. + */ + nvAssert(!!FLD_TEST_DRF(C673, _SOR_CAP, _HDMI_FRL, _TRUE, sorCap) == + !!pDevEvo->hal->caps.supportsHDMIFRL); + + pSorCaps->maxTMDSClkKHz = + DRF_VAL(C373, _SOR_CLK_CAP, _TMDS_MAX, + ReadCapReg(pCaps, NVC373_SOR_CLK_CAP(i))) * 10000; + } + } + + // Don't need any PIOR caps currently. + + // Windows + ct_assert(ARRAY_LEN(pEvoCaps->window) >= NVC373_SYS_CAPB_WINDOW_EXISTS__SIZE_1); + for (i = 0; i < NVC373_SYS_CAPB_WINDOW_EXISTS__SIZE_1; i++) { + NVEvoWindowCaps *pWinCaps = &pEvoCaps->window[i]; + + pWinCaps->usable = + FLD_IDX_TEST_DRF(C373, _SYS_CAPB, _WINDOW_EXISTS, i, _YES, sysCapB); + } +} + +static void EvoParseCapabilityNotifierC3(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + volatile const NvU32 *pCaps) +{ + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + NvU32 i; + + // Miscellaneous capabilities + pEvoCaps->misc.supportsSemiPlanar = FALSE; + pEvoCaps->misc.supportsPlanar = FALSE; + pEvoCaps->misc.supportsDSI = FALSE; + + // Heads + ct_assert(ARRAY_LEN(pEvoCaps->head) >= NVC373_HEAD_CAPA__SIZE_1); + for (i = 0; i < NVC373_HEAD_CAPA__SIZE_1; i++) { + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[i]; + + /* XXXnvdisplay: add caps for hsat, ocsc, lut */ + if (pHeadCaps->usable) { + NVEvoScalerCaps *pScalerCaps = &pHeadCaps->scalerCaps; + + pScalerCaps->present = + FLD_TEST_DRF(C373, _HEAD_CAPA, _SCALER, _TRUE, + ReadCapReg(pCaps, NVC373_HEAD_CAPA(i))); + if (pScalerCaps->present) { + NVEvoScalerTapsCaps *pTapsCaps; + NvU32 tmp; + + /* + * Note that some of these may be zero (e.g., only 2-tap 444 + * mode is supported on GV100, so the rest are all zero. + * + * Downscaling by more than 2x in either direction is not + * allowed by state error check for both horizontal and + * vertical 2-tap scaling. + * + * Downscaling by more than 4x in either direction is not + * allowed by argument error check (and state error check) for + * 5-tap scaling. + * + * 5-tap scaling is not implemented on GV100, though, so we + * should never see numTaps == 5 on GV100, and we can just use a + * max of 2 here all the time. + */ + + /* 2-tap capabilities */ + tmp = ReadCapReg(pCaps, NVC373_HEAD_CAPD(i)); + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_2TAPS]; + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxPixelsVTaps = + NV_MAX(DRF_VAL(C373, _HEAD_CAPD, _MAX_PIXELS_2TAP422, tmp), + DRF_VAL(C373, _HEAD_CAPD, _MAX_PIXELS_2TAP444, tmp)); + + /* + * Note that there is a capability register for 1TAP, but there + * doesn't appear to be a way to select 1-tap scaling in the + * channel methods, so don't bother reading it for now. + */ + } + } + } +} + +static void EvoParsePrecompScalerCaps5(NVEvoCapabilitiesPtr pEvoCaps, + volatile const NvU32 *pCaps) +{ + int i; + + for (i = 0; i < NVC573_SYS_CAPB_WINDOW_EXISTS__SIZE_1; i++) { + NVEvoWindowCaps *pWinCaps = &pEvoCaps->window[i]; + NVEvoScalerCaps *pScalerCaps = &pWinCaps->scalerCaps; + NVEvoScalerTapsCaps *pTapsCaps; + NvU32 capA = ReadCapReg(pCaps, NVC573_PRECOMP_WIN_PIPE_HDR_CAPA(i)); + NvU32 capD, capF; + NvBool csc00Present = FALSE, csc01Present = FALSE; + NvBool csc0LUTPresent = FALSE, csc1LUTPresent = FALSE; + NvBool csc10Present = FALSE, csc11Present = FALSE; + + csc00Present = FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC00_PRESENT, _TRUE, capA); + csc01Present = FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC01_PRESENT, _TRUE, capA); + pWinCaps->csc0MatricesPresent = (csc00Present && csc01Present); + + csc0LUTPresent = FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC0LUT_PRESENT, _TRUE, capA); + csc1LUTPresent = FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC1LUT_PRESENT, _TRUE, capA); + pWinCaps->cscLUTsPresent = (csc0LUTPresent && csc1LUTPresent); + + csc10Present = FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC10_PRESENT, _TRUE, capA); + csc11Present = FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC11_PRESENT, _TRUE, capA); + pWinCaps->csc1MatricesPresent = (csc10Present && csc11Present); + + pScalerCaps->present = + FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, _SCLR_PRESENT, + _TRUE, capA); + if (pScalerCaps->present) { + capD = ReadCapReg(pCaps, NVC573_PRECOMP_WIN_PIPE_HDR_CAPD(i)); + capF = ReadCapReg(pCaps, NVC573_PRECOMP_WIN_PIPE_HDR_CAPF(i)); + + /* 5-tap capabilities */ + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_5TAPS]; + if (FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPD, + _SCLR_VS_MAX_SCALE_FACTOR, _4X, capD)) { + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_4X; + } else { + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + } + + if (FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPD, + _SCLR_HS_MAX_SCALE_FACTOR, _4X, capD)) { + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_4X; + } else { + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + } + + pTapsCaps->maxPixelsVTaps = + DRF_VAL(C573, _PRECOMP_WIN_PIPE_HDR_CAPF, + _VSCLR_MAX_PIXELS_5TAP, capF); + + /* 2-tap capabilities */ + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_2TAPS]; + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxPixelsVTaps = + DRF_VAL(C573, _PRECOMP_WIN_PIPE_HDR_CAPF, _VSCLR_MAX_PIXELS_2TAP, + capF); + } + } +} + +static void EvoParseCapabilityNotifierC5C6Common(NVEvoCapabilitiesPtr pEvoCaps, + volatile const NvU32 *pCaps) +{ + NvU32 i; + NvBool postcompScalingSupported = FALSE; + + // Heads + ct_assert(ARRAY_LEN(pEvoCaps->head) >= NVC573_SYS_CAP_HEAD_EXISTS__SIZE_1); + for (i = 0; i < NVC573_SYS_CAP_HEAD_EXISTS__SIZE_1; i++) { + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[i]; + + if (pHeadCaps->usable) { + NVEvoScalerCaps *pScalerCaps = &pHeadCaps->scalerCaps; + NVEvoScalerTapsCaps *pTapsCaps; + NvU32 capA = ReadCapReg(pCaps, NVC573_POSTCOMP_HEAD_HDR_CAPA(i)); + NvU32 capC, capD; + + pScalerCaps->present = + FLD_TEST_DRF(C573, _POSTCOMP_HEAD_HDR_CAPA, _SCLR_PRESENT, + _TRUE, capA); + if (pScalerCaps->present) { + postcompScalingSupported = TRUE; + + capC = ReadCapReg(pCaps, NVC573_POSTCOMP_HEAD_HDR_CAPC(i)); + capD = ReadCapReg(pCaps, NVC573_POSTCOMP_HEAD_HDR_CAPD(i)); + + /* + * Note that some of these may be zero. + * + * XXXnvdisplay: what about POSTCOMP_HEAD_HDR_CAPC_SCLR_*? + */ + + /* 5-tap capabilities */ + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_5TAPS]; + if (FLD_TEST_DRF(C573, _POSTCOMP_HEAD_HDR_CAPC, + _SCLR_VS_MAX_SCALE_FACTOR, _4X, capC)) { + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_4X; + } else { + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + } + + if (FLD_TEST_DRF(C573, _POSTCOMP_HEAD_HDR_CAPC, + _SCLR_HS_MAX_SCALE_FACTOR, _4X, capC)) { + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_4X; + } else { + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + } + + pTapsCaps->maxPixelsVTaps = + DRF_VAL(C573, _POSTCOMP_HEAD_HDR_CAPD, + _VSCLR_MAX_PIXELS_5TAP, capD); + + /* 2-tap capabilities */ + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_2TAPS]; + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxPixelsVTaps = + DRF_VAL(C573, _POSTCOMP_HEAD_HDR_CAPD, + _VSCLR_MAX_PIXELS_2TAP, capD); + } + +#if defined(NV_DEBUG) + NvU32 capA = ReadCapReg(pCaps, NVC573_POSTCOMP_HEAD_HDR_CAPA(i)); + NvU32 unitWidth = DRF_VAL(C573, _POSTCOMP_HEAD_HDR_CAPA, _UNIT_WIDTH, capA); + + // EvoInitChannelC5 assumes 16-bit fixed-point. + nvAssert(unitWidth == 16); +#endif + } + } + + /* + * To keep the design simple, NVKMS will expose support for precomp scaling + * iff postcomp scaling isn't supported. This means that on chips which have + * both precomp and postcomp scalers (e.g., Turing), NVKMS will only report + * that postcomp scaling is supported. + */ + if (!postcompScalingSupported) { + EvoParsePrecompScalerCaps5(pEvoCaps, pCaps); + } + + // XXXnvdisplay3: add SOR caps for DP over USB +} + +static void EvoParseCapabilityNotifierC5(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + volatile const NvU32 *pCaps) +{ + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + + // Miscellaneous capabilities + + /* + * On Turing, the NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR bit actually + * reports whether IHUB supports YUV _semi-planar_ formats. + */ + pEvoCaps->misc.supportsSemiPlanar = + FLD_TEST_DRF(C573, _IHUB_COMMON_CAPA, _SUPPORT_PLANAR, _TRUE, + ReadCapReg(pCaps, NVC573_IHUB_COMMON_CAPA)); + pEvoCaps->misc.supportsDSI = FALSE; + + EvoParseCapabilityNotifierC5C6Common(pEvoCaps, pCaps); +} + +static void EvoParseCapabilityNotifierC6(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + volatile const NvU32 *pCaps) +{ + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + NvU32 capC = ReadCapReg(pCaps, NVC673_IHUB_COMMON_CAPC); + NvU32 i; + + // Miscellaneous capabilities + + pEvoCaps->misc.supportsPlanar = + FLD_TEST_DRF(C673, _IHUB_COMMON_CAPA, _SUPPORT_PLANAR, _TRUE, + ReadCapReg(pCaps, NVC673_IHUB_COMMON_CAPA)); + + pEvoCaps->misc.supportsSemiPlanar = + FLD_TEST_DRF(C673, _IHUB_COMMON_CAPC, _SUPPORT_SEMI_PLANAR, _TRUE, capC); + + pEvoCaps->misc.supportsHVFlip = + FLD_TEST_DRF(C673, _IHUB_COMMON_CAPC, _SUPPORT_HOR_VER_FLIP, _TRUE, capC); + + ct_assert(ARRAY_LEN(pEvoCaps->head) >= NVC673_SYS_CAP_HEAD_EXISTS__SIZE_1); + + // DSI is currently supported on just Orin, which has only 1 DSI engine (DSI0). + pEvoCaps->misc.supportsDSI = + FLD_TEST_DRF(C673, _SYS_CAP, _DSI0_EXISTS, _YES, + ReadCapReg(pCaps, NVC673_SYS_CAP)); + + for (i = 0; i < NVC673_SYS_CAP_HEAD_EXISTS__SIZE_1; i++) { + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[i]; + + if (pHeadCaps->usable) { + NvU32 capA = ReadCapReg(pCaps, NVC673_POSTCOMP_HEAD_HDR_CAPA(i)); + NvBool hclpfPresent = + FLD_TEST_DRF(C673, _POSTCOMP_HEAD_HDR_CAPA, _HCLPF_PRESENT, + _TRUE, capA); + NvBool vfilterPresent = + FLD_TEST_DRF(C673, _POSTCOMP_HEAD_HDR_CAPA, _VFILTER_PRESENT, + _TRUE, capA); + + pHeadCaps->supportsHDMIYUV420HW = hclpfPresent && vfilterPresent; + } + } + + EvoParseCapabilityNotifierC5C6Common(pEvoCaps, pCaps); +} + +static NvU32 UsableWindowCount(const NVEvoCapabilities *pEvoCaps) +{ + NvU32 i, count = 0; + NvBool foundUnusable = FALSE; + + for (i = 0; i < ARRAY_LEN(pEvoCaps->window); i++) { + if (pEvoCaps->window[i].usable) { + count++; + /* Assert that usable windows are contiguous starting from 0. */ + if (foundUnusable) { + nvAssert(!foundUnusable); + } + } else { + foundUnusable = TRUE; + } + } + + return count; +} + +typedef typeof(EvoParseCapabilityNotifierC3) parse_caps_t; +typedef typeof(nvHwFormatFromKmsFormatC3) get_hw_fmt_t; + +static NvBool EvoGetCapabilities3(NVDevEvoPtr pDevEvo, + parse_caps_t *pParse, + get_hw_fmt_t *pGetHwFmt, + NvU32 hwclass, + size_t length) +{ + NvU32 capsHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + NVDispEvoPtr pDispEvo; + NvU32 sd; + NvU32 status; + NvBool ret = FALSE; + NvBool first = TRUE; + NvBool supportsSemiPlanar = TRUE; + NvBool supportsPlanar = TRUE; + NvBool supportsHVFlip = TRUE; + unsigned int i; + enum NvKmsRotation curRotation; + NvBool reflectionX; + NvBool reflectionY; + NvU32 win; + NvU8 layer; + + /* With nvdisplay, capabilities are exposed in a separate object. */ + status = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + capsHandle, + hwclass, NULL); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to allocate caps object"); + goto free_handle; + } + + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); + layer++) { + pDevEvo->caps.layerCaps[layer].supportsWindowMode = TRUE; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + void *ptr; + + status = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + capsHandle, + 0, + length, + &ptr, + 0); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to map caps memory"); + goto free_object; + } + + nvkms_memset(&pEvoSubDev->capabilities, 0, + sizeof(pEvoSubDev->capabilities)); + + EvoParseCapabilityNotifier3(pDevEvo, pEvoSubDev, ptr); + pParse(pDevEvo, pEvoSubDev, ptr); + + status = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + capsHandle, ptr, 0); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to unmap caps memory"); + } + + if (first) { + pDevEvo->numWindows = + UsableWindowCount(&pEvoSubDev->capabilities); + first = FALSE; + } else { + /* Assert that each subdevice has the same number of windows. */ + nvAssert(pDevEvo->numWindows == + UsableWindowCount(&pEvoSubDev->capabilities)); + } + + /* + * Expose YUV semi-planar iff all of the disps belonging to pDevEvo + * support it. + */ + supportsSemiPlanar &= + pEvoSubDev->capabilities.misc.supportsSemiPlanar; + + /* + * Expose YUV planar iff all of the disps belonging to pDevEvo + * support it. + */ + supportsPlanar &= + pEvoSubDev->capabilities.misc.supportsPlanar; + + supportsHVFlip &= + pEvoSubDev->capabilities.misc.supportsHVFlip; + } + + /* + * On Volta, only WINDOWs (2N) and (2N + 1) can be attached to HEAD N. + * This is a HW restriction that's documented in the MFS. + * However, starting Turing, display HW supports flexible window + * mapping, which means that SW can freely attach any window to any + * head. + * + * On Orin VDK R4, for example, there's currently one usable head + * (HEAD 0), and 8 total usable windows (WINDOWs 0-7). + * This configuration is currently causing issues since this function + * assumes that the available number of heads and windows strictly + * abides by the fixed mapping enforced on Volta. + * + * This window mapping init sequence will eventually be updated to + * support flexible window mapping. But, until that happens, there + * should at least be a sanity check to make sure that the head that the + * current window maps to - per the Volta restriction - is actually + * available. + */ + + for (win = 0; win < pDevEvo->numWindows; win++) { + pDevEvo->headForWindow[win] = NVC37D_WINDOW_MAPPED_TO_HEAD(win); + nvAssert(pDevEvo->headForWindow[win] < pDevEvo->numHeads); + } + + for (i = NvKmsSurfaceMemoryFormatMin; + i <= NvKmsSurfaceMemoryFormatMax; + i++) { + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(i); + + if ((pFormatInfo->numPlanes == 2 && !supportsSemiPlanar) || + (pFormatInfo->numPlanes == 3 && !supportsPlanar)) { + continue; + } + + if (pGetHwFmt(i) != 0) { + NvU8 layer; + + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); + layer++) { + pDevEvo->caps.layerCaps[layer].supportedSurfaceMemoryFormats |= + NVBIT64(i); + } + } + } + + for (reflectionX = FALSE; + reflectionX <= TRUE; + reflectionX++) { + + for (reflectionY = FALSE; + reflectionY <= TRUE; + reflectionY++) { + + for (curRotation = NVKMS_ROTATION_MIN; + curRotation <= NVKMS_ROTATION_MAX; + curRotation++) { + struct NvKmsRRParams rrParams = { curRotation, + reflectionX, + reflectionY }; + NvU8 bitPosition; + + if ((reflectionX || reflectionY) && !supportsHVFlip) { + continue; + } + + if (curRotation == NVKMS_ROTATION_180 && !supportsHVFlip) { + continue; + } + + /* + * Skipping over rotations by 90 and 270 degrees + * because these rotations require support for + * SCAN_COLUMN rotation, which hasn't been added + * to NVKMS yet. + */ + if (curRotation == NVKMS_ROTATION_90 || + curRotation == NVKMS_ROTATION_270) { + continue; + } + + bitPosition = NvKmsRRParamsToCapBit(&rrParams); + pDevEvo->caps.validLayerRRTransforms |= NVBIT(bitPosition); + } + } + } + + ret = TRUE; + +free_object: + status = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + capsHandle); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to free caps object"); + } + +free_handle: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, capsHandle); + + return ret; +} + +static NvBool EvoGetCapabilitiesC3(NVDevEvoPtr pDevEvo) +{ + return EvoGetCapabilities3(pDevEvo, EvoParseCapabilityNotifierC3, + nvHwFormatFromKmsFormatC3, + NVC373_DISP_CAPABILITIES, + sizeof(_NvC373DispCapabilities)); +} + +static NvBool EvoGetCapabilitiesC5(NVDevEvoPtr pDevEvo) +{ + return EvoGetCapabilities3(pDevEvo, EvoParseCapabilityNotifierC5, + nvHwFormatFromKmsFormatC5, + NVC573_DISP_CAPABILITIES, + sizeof(_NvC573DispCapabilities)); +} + +static NvBool EvoGetCapabilitiesC6(NVDevEvoPtr pDevEvo) +{ + return EvoGetCapabilities3(pDevEvo, EvoParseCapabilityNotifierC6, + nvHwFormatFromKmsFormatC6, + NVC673_DISP_CAPABILITIES, + sizeof(_NvC673DispCapabilities)); +} + +static void EvoSetViewportPointInC3(NVDevEvoPtr pDevEvo, const int head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* Set the input viewport point */ + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_VIEWPORT_POINT_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C37D, _HEAD_SET_VIEWPORT_POINT_IN, _X, x) | + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_POINT_IN, _Y, y)); + /* XXXnvdisplay set ViewportValidPointIn to configure overfetch */ +} + +static void EvoSetOutputScalerC3(const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvU32 imageSharpeningValue, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeViewPortEvo *pViewPort = &pHeadState->timings.viewPort; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + NvU32 vTaps = pViewPort->vTaps > NV_EVO_SCALER_2TAPS ? + NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 : + NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2; + NvU32 hTaps = pViewPort->hTaps > NV_EVO_SCALER_2TAPS ? + NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 : + NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2; + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _VERTICAL_TAPS, vTaps) | + DRF_NUM(C37D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _HORIZONTAL_TAPS, hTaps)); +} + +static NvBool EvoSetViewportInOut3(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState, + NvU32 setWindowUsageBounds) +{ + const NVEvoCapabilitiesPtr pEvoCaps = &pDevEvo->gpus[0].capabilities; + NVEvoChannelPtr pChannel = pDevEvo->core; + struct NvKmsScalingUsageBounds scalingUsageBounds = { }; + NvU32 win; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* The input viewport shouldn't vary. */ + nvAssert(pViewPortMin->in.width == pViewPort->in.width); + nvAssert(pViewPortMax->in.width == pViewPort->in.width); + nvAssert(pViewPortMin->in.height == pViewPort->in.height); + nvAssert(pViewPortMax->in.height == pViewPort->in.height); + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_VIEWPORT_SIZE_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_SIZE_IN, _WIDTH, pViewPort->in.width) | + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_SIZE_IN, _HEIGHT, pViewPort->in.height)); + /* XXXnvdisplay set ViewportValidSizeIn to configure overfetch */ + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_X, pViewPort->out.xAdjust) | + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_Y, pViewPort->out.yAdjust)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_SIZE_OUT, _WIDTH, pViewPort->out.width) | + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_SIZE_OUT, _HEIGHT, pViewPort->out.height)); + + /* XXXnvdisplay deal with pViewPortMin, pViewPortMax */ + + if (!nvComputeScalingUsageBounds(&pEvoCaps->head[head].scalerCaps, + pViewPort->in.width, pViewPort->in.height, + pViewPort->out.width, pViewPort->out.height, + pViewPort->hTaps, pViewPort->vTaps, + &scalingUsageBounds)) { + /* Should have been rejected by validation */ + nvAssert(!"Attempt to program invalid viewport"); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_MAX_OUTPUT_SCALE_FACTOR, _HORIZONTAL, + scalingUsageBounds.maxHDownscaleFactor) | + DRF_NUM(C37D, _HEAD_SET_MAX_OUTPUT_SCALE_FACTOR, _VERTICAL, + scalingUsageBounds.maxVDownscaleFactor)); + + /* + * Program MAX_PIXELS_FETCHED_PER_LINE window usage bounds + * for each window that’s attached to the head. + * + * Precomp will clip the post-scaled window to the input viewport, reverse-scale + * this cropped size back to the input surface domain, and isohub will fetch + * this cropped size. This function assumes that there's no window scaling yet, + * so the MAX_PIXELS_FETCHED_PER_LINE will be bounded by the input viewport + * width. SetScalingUsageBoundsOneWindow5() will take care of updating + * MAX_PIXELS_FETCHED_PER_LINE, if window scaling is enabled later. + */ + setWindowUsageBounds |= + DRF_NUM(C37D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _MAX_PIXELS_FETCHED_PER_LINE, + GetMaxPixelsFetchedPerLine(pViewPort->in.width, + NV_EVO_SCALE_FACTOR_1X)); + + for (win = 0; win < pDevEvo->numWindows; win++) { + if (head != pDevEvo->headForWindow[win]) { + continue; + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS(win), 1); + nvDmaSetEvoMethodData(pChannel, setWindowUsageBounds); + } + + return scalingUsageBounds.vUpscalingAllowed; +} + +static void EvoSetViewportInOutC3(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvBool verticalUpscalingAllowed = + EvoSetViewportInOut3(pDevEvo, head, pViewPortMin, pViewPort, + pViewPortMax, updateState, + NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C3); + + nvDmaSetStartEvoMethod(pChannel, + NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_HEAD_USAGE_BOUNDS, _CURSOR, _USAGE_W256_H256) | + DRF_DEF(C37D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OUTPUT_LUT, _USAGE_1025) | + (verticalUpscalingAllowed ? + DRF_DEF(C37D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _TRUE) : + DRF_DEF(C37D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE))); +} + +static void EvoSetViewportInOutC5(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 setWindowUsageBounds = + (NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C5 | + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_2) | + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE)); + NvU32 verticalUpscalingAllowed = + EvoSetViewportInOut3(pDevEvo, head, pViewPortMin, pViewPort, + pViewPortMax, updateState, setWindowUsageBounds); + + nvDmaSetStartEvoMethod(pChannel, + NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _CURSOR, _USAGE_W256_H256) | + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OLUT_ALLOWED, _TRUE) | + /* Despite the generic name of this field, it's specific to vertical taps. */ + (pViewPort->vTaps > NV_EVO_SCALER_2TAPS ? + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OUTPUT_SCALER_TAPS, _TAPS_5) : + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OUTPUT_SCALER_TAPS, _TAPS_2)) | + (verticalUpscalingAllowed ? + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _TRUE) : + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE))); +} + +/*! + * Compute the C37D_HEAD_SET_CONTROL_CURSOR method value. + * + * This function also validates that the given NVSurfaceEvoRec can be + * used as a cursor image. + + * + * \param[in] pDevEvo The device on which the cursor will be programmed. + * \param[in] pSurfaceEvo The surface to be used as the cursor image. + * \param[out] pValue The C37D_HEAD_SET_CONTROL_CURSOR method value. + + * \return If TRUE, the surface can be used as a cursor image, and + * pValue contains the method value. If FALSE, the surface + * cannot be used as a cursor image. + */ +static NvBool EvoGetHeadSetControlCursorValueC3(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo, + NvU32 *pValue) +{ + NvU32 value = 0; + + if (pSurfaceEvo == NULL) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _ENABLE, _DISABLE); + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _FORMAT, _A8R8G8B8); + goto done; + } else { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _ENABLE, _ENABLE); + } + + /* The cursor must always be pitch. */ + + if (pSurfaceEvo->layout != NvKmsSurfaceMemoryLayoutPitch) { + return FALSE; + } + + /* + * The only supported cursor image memory format is A8R8G8B8. + */ + if (pSurfaceEvo->format == NvKmsSurfaceMemoryFormatA8R8G8B8) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _FORMAT, _A8R8G8B8); + } else { + return FALSE; + } + + /* + * The cursor only supports a few image sizes. + */ + if ((pSurfaceEvo->widthInPixels == 32) && + (pSurfaceEvo->heightInPixels == 32)) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W32_H32); + } else if ((pSurfaceEvo->widthInPixels == 64) && + (pSurfaceEvo->heightInPixels == 64)) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W64_H64); + } else if ((pDevEvo->cursorHal->caps.maxSize >= 128) && + (pSurfaceEvo->widthInPixels == 128) && + (pSurfaceEvo->heightInPixels == 128)) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W128_H128); + } else if ((pDevEvo->cursorHal->caps.maxSize >= 256) && + (pSurfaceEvo->widthInPixels == 256) && + (pSurfaceEvo->heightInPixels == 256)) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W256_H256); + } else { + return FALSE; + } + + /* + * Hard code the cursor hotspot. + */ + value |= DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR, _HOT_SPOT_Y, 0); + value |= DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR, _HOT_SPOT_X, 0); + + // XXXnvdisplay: Add support for cursor de-gamma. + +done: + + if (pValue != NULL) { + *pValue = value; + } + + return TRUE; +} + +static void EvoSetCursorImageC3(NVDevEvoPtr pDevEvo, const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NvU32 ctxdma = pSurfaceEvo ? pSurfaceEvo->planes[0].ctxDma : 0; + const NvU64 offset = pSurfaceEvo ? pSurfaceEvo->planes[0].offset : 0; + NvU32 headSetControlCursorValue = 0; + NvBool ret; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + nvAssert(pCursorCompParams->colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE); + nvAssert(NVBIT(pCursorCompParams->blendingMode[1]) & + NV_EVO3_SUPPORTED_CURSOR_COMP_BLEND_MODES); + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvAssert(!pSurfaceEvo || ctxdma); + + ret = EvoGetHeadSetControlCursorValueC3(pDevEvo, pSurfaceEvo, + &headSetControlCursorValue); + /* + * The caller should have already validated the surface, so there + * shouldn't be a failure. + */ + if (!ret) { + nvAssert(!"Could not construct HEAD_SET_CONTROL_CURSOR value"); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_PRESENT_CONTROL_CURSOR, _MODE, _MONO)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTEXT_DMA_CURSOR(head, 0), 4); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTEXT_DMA_CURSOR, _HANDLE, ctxdma)); + // Always set the right cursor context DMA. + // HW will just ignore this if it is not in stereo cursor mode. + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTEXT_DMA_CURSOR, _HANDLE, ctxdma)); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_OFFSET_CURSOR, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_OFFSET_CURSOR, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, headSetControlCursorValue); + + nvDmaSetStartEvoMethod(pChannel, + NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(head), 1); + switch (pCursorCompParams->blendingMode[1]) { + case NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, 255) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _ZERO) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, 255) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, 255) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, + pCursorCompParams->surfaceAlpha) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, + pCursorCompParams->surfaceAlpha) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + default: + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "%s: composition mode %d not supported for cursor", + __func__, pCursorCompParams->blendingMode[1]); + break; + } +} + +static NvBool EvoValidateCursorSurfaceC3(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo) +{ + return EvoGetHeadSetControlCursorValueC3(pDevEvo, pSurfaceEvo, NULL); +} + +static NvBool ValidateWindowFormatSourceRectC3( + const struct NvKmsRect *sourceFetchRect, + const enum NvKmsSurfaceMemoryFormat format) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(format); + + /* + * sourceFetchRect represents the dimensions of the source fetch rectangle. + * If YUV crop and scaler overfetch are supported, it is up to the caller to + * provide the correct dimensions (e.g., ValidSizeIn/ValidPointIn vs. + * SizeIn/PointIn). + * + * For all YUV formats, the position and size of the fetch rectangle must be + * even in the horizontal direction. + * + * For YUV420 formats, there is an additional restriction that the position + * and size of the fetch rectangle must be even in the vertical direction as + * well. + */ + if (pFormatInfo->isYUV) { + if (((sourceFetchRect->x & 1) != 0) || + (sourceFetchRect->width & 1) != 0) { + return FALSE; + } + + if (pFormatInfo->yuv.vertChromaDecimationFactor > 1) { + if (((sourceFetchRect->y & 1) != 0) || + (sourceFetchRect->height & 1) != 0) { + return FALSE; + } + } + } + + return TRUE; +} + +typedef typeof(ValidateWindowFormatSourceRectC3) val_src_rect_t; + +static NvBool EvoValidateWindowFormatWrapper( + const enum NvKmsSurfaceMemoryFormat format, + get_hw_fmt_t *pGetHwFmt, + const struct NvKmsRect *sourceFetchRect, + val_src_rect_t *pValSrcRect, + NvU32 *hwFormatOut) +{ + const NvU32 hwFormat = pGetHwFmt(format); + + if (hwFormat == 0) { + return FALSE; + } + + if (hwFormatOut != NULL) { + *hwFormatOut = hwFormat; + } + + /* + * If sourceFetchRect is NULL, this function is only responsible for + * verifying whether the given NvKmsSurfaceMemoryFormat has a corresponding + * HW format. + */ + if (sourceFetchRect == NULL) { + return TRUE; + } + + return pValSrcRect(sourceFetchRect, format); +} + +static NvBool EvoValidateWindowFormatC3( + const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut) +{ + return EvoValidateWindowFormatWrapper( + format, + nvHwFormatFromKmsFormatC3, + sourceFetchRect, + ValidateWindowFormatSourceRectC3, + hwFormatOut); +} + +static NvBool EvoValidateWindowFormatC5( + const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut) +{ + return EvoValidateWindowFormatWrapper( + format, + nvHwFormatFromKmsFormatC5, + sourceFetchRect, + ValidateWindowFormatSourceRectC3, + hwFormatOut); +} + +static NvBool EvoValidateWindowFormatC6( + const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut) +{ + return EvoValidateWindowFormatWrapper( + format, + nvHwFormatFromKmsFormatC6, + sourceFetchRect, + ValidateWindowFormatSourceRectC3, + hwFormatOut); +} + +static NvU32 OffsetForNotifier(int idx) +{ + /* NVDisplay notifiers are always the 16-byte variety. We only care about + * the NV_DISP_NOTIFIER__0 dword which contains the status. */ + NvU32 base = idx * (NV_DISP_NOTIFIER_SIZEOF / sizeof(NvU32)); + return base + NV_DISP_NOTIFIER__0; +} + +static void EvoInitCompNotifierC3(const NVDispEvoRec *pDispEvo, int idx) +{ + nvWriteEvoCoreNotifier(pDispEvo, OffsetForNotifier(idx), + DRF_DEF(_DISP, _NOTIFIER__0, _STATUS, _NOT_BEGUN)); +} + +static NvBool EvoIsCompNotifierCompleteC3(NVDispEvoPtr pDispEvo, int idx) { + return nvEvoIsCoreNotifierComplete(pDispEvo, OffsetForNotifier(idx), + DRF_BASE(NV_DISP_NOTIFIER__0_STATUS), + DRF_EXTENT(NV_DISP_NOTIFIER__0_STATUS), + NV_DISP_NOTIFIER__0_STATUS_FINISHED); +} + +static void EvoWaitForCompNotifierC3(const NVDispEvoRec *pDispEvo, int idx) +{ + nvEvoWaitForCoreNotifier(pDispEvo, OffsetForNotifier(idx), + DRF_BASE(NV_DISP_NOTIFIER__0_STATUS), + DRF_EXTENT(NV_DISP_NOTIFIER__0_STATUS), + NV_DISP_NOTIFIER__0_STATUS_FINISHED); +} + +static void EvoSetDitherC3(NVDispEvoPtr pDispEvo, const int head, + const NvBool enabled, const NvU32 type, + const NvU32 algo, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 ditherControl; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enabled) { + ditherControl = DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _ENABLE); + + switch (type) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _BITS, _TO_6_BITS); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _BITS, _TO_8_BITS); + break; + /* XXXnvdisplay: Support DITHER_TO_{10,12}_BITS (see also bug 1729668). */ + default: + nvAssert(!"Unknown ditherType"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF: + ditherControl = NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE; + break; + } + + } else { + ditherControl = DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _DISABLE); + } + + switch (algo) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_ERR_ACC: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_ERR_ACC); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _TEMPORAL); + break; + default: + nvAssert(!"Unknown DitherAlgo"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN: + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_ERR_ACC: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_ERR_ACC); + break; + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DITHER_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, ditherControl); +} + +static void EvoSetDisplayRateC3(NVDispEvoPtr pDispEvo, const int head, + NvBool enable, + NVEvoUpdateState *updateState, + NvU32 timeoutMicroseconds) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enable) { + timeoutMicroseconds = + NV_MIN(timeoutMicroseconds, + DRF_MASK(NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DISPLAY_RATE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_DISPLAY_RATE, _RUN_MODE, _ONE_SHOT) | + DRF_NUM(C37D, _HEAD_SET_DISPLAY_RATE, _MIN_REFRESH_INTERVAL, + timeoutMicroseconds) | + (timeoutMicroseconds == 0 ? + DRF_DEF(C37D, _HEAD_SET_DISPLAY_RATE, _MIN_REFRESH, _DISABLE) : + DRF_DEF(C37D, _HEAD_SET_DISPLAY_RATE, _MIN_REFRESH, _ENABLE))); + } else { + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DISPLAY_RATE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_DISPLAY_RATE, _RUN_MODE, _CONTINUOUS)); + } +} + +static void EvoSetStallLockC3(NVDispEvoPtr pDispEvo, const int head, + NvBool enable, NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enable) { + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_STALL_LOCK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _ENABLE, _TRUE) | + DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _MODE, _ONE_SHOT) | + DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _LOCK_PIN, _LOCK_PIN_NONE) | + DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _UNSTALL_MODE, _LINE_LOCK)); + } else { + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_STALL_LOCK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _ENABLE, _FALSE)); + } +} + +static NvBool GetChannelState(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvU32 *result) +{ + NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS info = { }; + NvU32 ret; + + info.base.subdeviceIndex = sd; + info.channelClass = pChan->hwclass; + info.channelInstance = pChan->instance; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_GET_CHANNEL_INFO, + &info, sizeof(info)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to query display engine channel state: 0x%08x:%d:%d:0x%08x", + pChan->hwclass, pChan->instance, sd, ret); + return FALSE; + } + + *result = info.channelState; + + return TRUE; +} + +static NvBool EvoIsChannelIdleC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result) +{ + NvU32 channelState; + + if (!GetChannelState(pDevEvo, pChan, sd, &channelState)) { + return FALSE; + } + + *result = (channelState == NVC370_CTRL_GET_CHANNEL_INFO_STATE_IDLE); + + return TRUE; +} + +static NvBool EvoIsChannelMethodPendingC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result) +{ + NvBool tmpResult; + + /* With C370, Idle and NoMethodPending are equivalent. */ + ct_assert(NVC370_CTRL_GET_CHANNEL_INFO_STATE_IDLE == + NVC370_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING); + + if (!EvoIsChannelIdleC3(pDevEvo, pChan, sd, &tmpResult)) { + return FALSE; + } + + *result = !tmpResult; + + return TRUE; +} + +static NvBool EvoAllocRmCtrlObjectC3(NVDevEvoPtr pDevEvo) +{ + const NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + /* Note that this object is not at all related to the GF100_DISP_SW (9072) + * or NV50_DISPLAY_SW (5072) objects, despite their similarity in name. */ + NvU32 status = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + handle, + NVC372_DISPLAY_SW, NULL); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to allocate nvdisplay rmctrl object"); + goto fail; + } + + pDevEvo->rmCtrlHandle = handle; + + return TRUE; + +fail: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + return FALSE; +} + +static NvU32 GetAccelerators( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + NVC370_CTRL_GET_ACCL_PARAMS params = { }; + NvU32 ret; + + params.base.subdeviceIndex = sd; + params.channelClass = pChannel->hwclass; + nvAssert(pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL); + params.channelInstance = + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_GET_ACCL, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to retrieve accelerators"); + return 0; + } + + return params.accelerators; +} + +static NvBool SetAccelerators( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NvU32 accelerators, + NvU32 accelMask) +{ + NVC370_CTRL_SET_ACCL_PARAMS params = { }; + NvU32 ret; + + params.base.subdeviceIndex = sd; + params.channelClass = pChannel->hwclass; + nvAssert(pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL); + params.channelInstance = + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + params.accelerators = accelerators; + params.accelMask = accelMask; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_SET_ACCL, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set accelerators"); + return FALSE; + } + + return TRUE; +} + +static void EvoAccelerateChannelC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + NvU32 *pOldAccelerators) +{ + /* Start with a conservative set of accelerators; may need to add more + * later. */ + const NvU32 accelMask = + NVC370_CTRL_ACCL_IGNORE_PI | + NVC370_CTRL_ACCL_SKIP_SEMA; + + *pOldAccelerators = GetAccelerators(pDevEvo, pChannel, sd); + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, accelMask, accelMask)) { + nvAssert(!"Failed to set accelerators"); + } +} + +static void EvoResetChannelAcceleratorsC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + NvU32 oldAccelerators) +{ + /* Start with a conservative set of accelerators; may need to add more + * later. */ + const NvU32 accelMask = + NVC370_CTRL_ACCL_IGNORE_PI | + NVC370_CTRL_ACCL_SKIP_SEMA; + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, oldAccelerators, accelMask)) { + nvAssert(!"Failed to set accelerators"); + } +} + +static void ForceFlipToNull( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NVEvoUpdateState *updateState) +{ + NVFlipChannelEvoHwState hwState = { }; + const NvU32 subDeviceMask = (1 << sd); + + nvPushEvoSubDevMask(pDevEvo, subDeviceMask); + + pDevEvo->hal->Flip(pDevEvo, pChannel, &hwState, updateState, + FALSE /* bypassComposition */); + + nvPopEvoSubDevMask(pDevEvo); +} + +static NvBool PollForChannelIdle( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + const NvU32 timeout = 2000000; // 2 seconds + NvU64 startTime = 0; + NvBool isMethodPending = TRUE; + + do { + if (!EvoIsChannelMethodPendingC3(pDevEvo, pChannel, sd, + &isMethodPending)) { + break; + } + + if (!isMethodPending) { + break; + } + + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + +/*! + * This function emulates the behavior of the STOP_BASE/STOP_OVERLAY RM control + * calls for pre-EVO hardware. + * + * STOP_BASE/STOP_OVERLAY will apply hardware channel accelerators, push + * methods via the debug interface to NULL context DMAs, and wait for the + * channel to go idle (which means the surface programmed into the core channel + * will become visible). + * + * If we asked RM to do the same thing for the window channel that is emulating + * the base channel on nvdisplay, the display would just go black: there's no + * surface in the core channel, so NULLing the context DMA in the window + * channel will disable both "core" and "base". + * + * So instead, similar functionality is implemented here: we apply + * accelerators, push methods to flip to core, and wait for the channel to + * idle. + */ +typedef struct { + struct { + NvU32 accelerators; + NvBool overridden; + } window[NVKMS_MAX_WINDOWS_PER_DISP]; +} EvoIdleChannelAcceleratorState; + +static NvBool EvoForceIdleSatelliteChannelsWithAccel( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState, + const NvU32 accelMask) +{ + NvU32 sd, window; + NVEvoUpdateState updateState = { }; + NvBool ret = FALSE; + + EvoIdleChannelAcceleratorState *pAcceleratorState = nvCalloc( + pDevEvo->numSubDevices, sizeof(EvoIdleChannelAcceleratorState)); + + if (!pAcceleratorState) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to alloc accelerator state"); + return FALSE; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + /* + * Forcing a channel to be idle is currently only implemented for window + * channels. + */ + if ((idleChannelState->subdev[sd].channelMask & + ~NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0) { + + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Forcing non-window channel idle not implemented"); + goto done; + } + + for (window = 0; window < pDevEvo->numWindows; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, + _WINDOW, window, _ENABLE, + idleChannelState->subdev[sd].channelMask)) { + NVEvoChannelPtr pChannel = pDevEvo->window[window]; + + /* Save old window channel accelerators. */ + NvU32 oldAccel = GetAccelerators(pDevEvo, pChannel, sd); + + pAcceleratorState[sd].window[window].accelerators = + oldAccel; + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, accelMask, + accelMask)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set accelerators"); + goto done; + } + pAcceleratorState[sd].window[window].overridden = TRUE; + + /* Push a flip to null in this channel. */ + ForceFlipToNull(pDevEvo, pChannel, sd, &updateState); + } + } + } + + /* Push one update for all of the flips programmed above. */ + EvoUpdateC3(pDevEvo, &updateState, TRUE /* releaseElv */); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + for (window = 0; window < pDevEvo->numWindows; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + idleChannelState->subdev[sd].channelMask)) { + NVEvoChannelPtr pChannel = pDevEvo->window[window]; + + /* Wait for the flips to complete. */ + if (!PollForChannelIdle(pDevEvo, pChannel, sd)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Timed out while idling base channel"); + goto done; + } + } + } + } + + ret = TRUE; + +done: + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + for (window = 0; window < pDevEvo->numWindows; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + idleChannelState->subdev[sd].channelMask)) { + NVEvoChannelPtr pChannel = pDevEvo->window[window]; + + const NvU32 oldAccel = + pAcceleratorState[sd].window[window].accelerators; + + if (!pAcceleratorState[sd].window[window].overridden) { + continue; + } + + /* Restore window channel accelerators. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, oldAccel, + accelMask)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to restore accelerators"); + } + } + } + } + + nvFree(pAcceleratorState); + return ret; +} + +static NvBool EvoForceIdleSatelliteChannelC3( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState) +{ + /* Start with a conservative set of accelerators; may need to add more + * later. */ + const NvU32 accelMask = + NVC370_CTRL_ACCL_IGNORE_PI | + NVC370_CTRL_ACCL_SKIP_SEMA; + + return EvoForceIdleSatelliteChannelsWithAccel(pDevEvo, + idleChannelState, + accelMask); +} + +static NvBool EvoForceIdleSatelliteChannelIgnoreLockC3( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState) +{ + const NvU32 accelMask = + NVC370_CTRL_ACCL_IGNORE_PI | + NVC370_CTRL_ACCL_SKIP_SEMA | + NVC370_CTRL_ACCL_IGNORE_FLIPLOCK | + NVC370_CTRL_ACCL_IGNORE_INTERLOCK; + + return EvoForceIdleSatelliteChannelsWithAccel(pDevEvo, + idleChannelState, + accelMask); +} + +static void EvoFreeRmCtrlObjectC3(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->rmCtrlHandle) { + NvU32 status; + + status = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->rmCtrlHandle); + + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to free nvdisplay rmctrl object"); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDevEvo->rmCtrlHandle); + pDevEvo->rmCtrlHandle = 0; + } +} + +static void EvoSetImmPointOutC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NVEvoUpdateState *updateState, + NvU16 x, NvU16 y) +{ + NVEvoChannelPtr pImmChannel = pChannel->imm.u.dma; + + nvAssert((pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0); + nvAssert(pChannel->imm.type == NV_EVO_IMM_CHANNEL_DMA); + + nvDmaSetStartEvoMethod(pImmChannel, + NVC37B_SET_POINT_OUT(0 /* Left eye */), 1); + + nvDmaSetEvoMethodData(pImmChannel, + DRF_NUM(C37B, _SET_POINT_OUT, _X, x) | + DRF_NUM(C37B, _SET_POINT_OUT, _Y, y)); + + nvWinImmChannelUpdateState(pDevEvo, updateState, pChannel); +} + +static void EvoStartHeadCRC32CaptureC3(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NVConnectorEvoPtr pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + NvU32 head, + NvU32 sd, + NVEvoUpdateState *updateState) +{ + const NvU32 winChannel = head << 1; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 dmaCtx = pDma->ctxHandle; + NvU32 orOutput = 0; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + /* The window channel should fit in + * NVC37D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL */ + nvAssert(winChannel < DRF_MASK(NVC37D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL)); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + if (protocol == NVKMS_PROTOCOL_SOR_DP_A || + protocol == NVKMS_PROTOCOL_SOR_DP_B) { + orOutput = NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF; + } else { + orOutput = + NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(orIndex); + } + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + orOutput = + NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR(orIndex); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + /* No DAC support on nvdisplay. Fall through. */ + default: + nvAssert(!"Invalid pConnectorEvo->or.type"); + break; + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTEXT_DMA_CRC(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTEXT_DMA_CRC, _HANDLE, dmaCtx)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CRC_CONTROL, _PRIMARY_CRC, orOutput) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _SECONDARY_CRC, _NONE) | + DRF_NUM(C37D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, winChannel) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE)); + + /* Reset the CRC notifier */ + nvEvoResetCRC32Notifier(pDma->subDeviceAddress[sd], + NVC37D_NOTIFIER_CRC_STATUS_0, + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_DONE), + NVC37D_NOTIFIER_CRC_STATUS_0_DONE_FALSE); +} + +static void EvoStopHeadCRC32CaptureC3(NVDevEvoPtr pDevEvo, + NvU32 head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTEXT_DMA_CRC(head), 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _PRIMARY_CRC, _NONE) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _SECONDARY_CRC, _NONE) | + DRF_NUM(C37D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, 0) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE)); +} + +/*! + * Queries the current head's CRC Notifier and returns values if successful + * + * First waits for hardware to finish writing to the CRC32Notifier, + * and performs a read of the Compositor, SF/OR CRCs, + * and the RG CRC in numCRC32 frames. + * Crc fields in input array crc32 should be calloc'd to 0s. + * + * \param[in] pDevEvo NVKMS device pointer + * \param[in] pDma Pointer to DMA-mapped memory + * \param[in] sd Subdevice index + * \param[in] entry_count Number of independent frames to read CRCs from + * \param[out] crc32 Contains pointers to CRC output arrays + * \param[out] numCRC32 Number of CRC frames successfully read from DMA + * + * \return Returns TRUE if was able to successfully read CRCs from DMA, + * otherwise FALSE + */ +static NvBool EvoQueryHeadCRC32_C3(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NvU32 sd, + NvU32 entry_count, + CRC32NotifierCrcOut *crc32, + NvU32 *numCRC32) +{ + volatile NvU32 *pCRC32Notifier = pDma->subDeviceAddress[sd]; + const NvU32 entry_stride = + NVC37D_NOTIFIER_CRC_CRC_ENTRY1_21 - NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13; + // Define how many/which variables to read from each CRCNotifierEntry struct + const CRC32NotifierEntryRec field_info[NV_EVO3_NUM_CRC_FIELDS] = { + { + .field_offset = NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11, + .field_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11_COMPOSITOR_CRC), + .field_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11_COMPOSITOR_CRC), + .field_frame_values = crc32->compositorCrc32, + }, + { + .field_offset = NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12, + .field_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12_RG_CRC), + .field_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12_RG_CRC), + .field_frame_values = crc32->rasterGeneratorCrc32, + }, + { + .field_offset = NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13, + .field_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13_PRIMARY_OUTPUT_CRC), + .field_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13_PRIMARY_OUTPUT_CRC), + .field_frame_values = crc32->outputCrc32 + } + }; + + const CRC32NotifierEntryFlags flag_info[NV_EVO3_NUM_CRC_FLAGS] = { + { + .flag_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_COUNT), + .flag_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_COUNT), + .flag_type = NVEvoCrc32NotifierFlagCount + }, + { + .flag_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + }, + { + .flag_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + }, + { + .flag_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + } + }; + + if (!nvEvoWaitForCRC32Notifier(pCRC32Notifier, + NVC37D_NOTIFIER_CRC_STATUS_0, + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_DONE), + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_DONE), + NVC37D_NOTIFIER_CRC_STATUS_0_DONE_TRUE)) { + return FALSE; + } + + *numCRC32 = nvEvoReadCRC32Notifier(pCRC32Notifier, + entry_stride, + entry_count, + NVC37D_NOTIFIER_CRC_STATUS_0, /* Status offset */ + NV_EVO3_NUM_CRC_FIELDS, + NV_EVO3_NUM_CRC_FLAGS, + field_info, + flag_info); + + nvEvoResetCRC32Notifier(pCRC32Notifier, + NVC37D_NOTIFIER_CRC_STATUS_0, + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_DONE), + NVC37D_NOTIFIER_CRC_STATUS_0_DONE_FALSE); + + return TRUE; +} + +static void EvoGetScanLineC3(const NVDispEvoRec *pDispEvo, + const NvU32 head, + NvU16 *pScanLine, + NvBool *pInBlankingPeriod) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + const NvU32 window = head << 1; + void *pDma = pDevEvo->window[window]->pb.control[sd]; + const NvU32 scanLine = nvDmaLoadPioMethod(pDma, NVC37E_GET_LINE); + + if ((scanLine & NVBIT(15)) == 0) { + *pInBlankingPeriod = FALSE; + *pScanLine = scanLine & DRF_MASK(14:0); + } else { + *pInBlankingPeriod = TRUE; + } +} + +/* + * This method configures and programs the RG Core Semaphores. Default behavior + * is to continuously trigger on the specified rasterline when enabled. + */ +static void +EvoConfigureVblankSyncObjectC6(const NVDevEvoPtr pDevEvo, + const NvU16 rasterLine, + const NvU32 head, + const NvU32 semaphoreIndex, + const NvU32 hCtxDma, + NVEvoUpdateState* pUpdateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* + * Populate the NVEvoUpdateState for the caller. The Update State contains + * a mask of which display channels need to be updated. + */ + nvUpdateUpdateState(pDevEvo, pUpdateState, pChannel); + + /* + * Tell HW what ctxdma entry to use to look up actual RG semaphore surface. + * If hCtxDma is 0, HW will disable the semaphore. + */ + nvDmaSetStartEvoMethod(pChannel, + NVC67D_HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE(head, semaphoreIndex), + 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE, _HANDLE, hCtxDma)); + + if (hCtxDma == 0) { + /* Disabling semaphore so no configuration necessary. */ + return; + } + + /* + * Configure the semaphore with the following: + * Set OFFSET to 0 (default). + * Set PAYLOAD_SIZE to 32bits (default). + * Set REL_MODE to WRITE (default). + * Set RUN_MODE to CONTINUOUS. + * Set RASTER_LINE to start of Vblank: Vsync + Vbp + Vactive. + * + * Note that all these options together fit in 32bits, and that all 32 bits + * must be written each time any given option changes. + * + * The actual payload value doesn't currently matter since this RG + * semaphore will be mapped to a syncpt for now. Each HW-issued payload + * write is converted to a single syncpt increment irrespective of what the + * actual semaphore payload value is. + */ + nvDmaSetStartEvoMethod(pChannel, + NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL(head, semaphoreIndex), + 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _OFFSET, 0) | + DRF_DEF(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _PAYLOAD_SIZE, + _PAYLOAD_32BIT) | + DRF_DEF(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _REL_MODE, + _WRITE) | + DRF_DEF(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _RUN_MODE, + _CONTINUOUS) | + DRF_NUM(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _RASTER_LINE, + rasterLine)); +} + +static void EvoSetHdmiFrlDscParams(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings) +{ + NVEvoChannelPtr pChannel = pDispEvo->pDevEvo->core; + const HDMI_FRL_CONFIG *pFrl = &pTimings->hdmiFrlConfig; + NvU32 bpc, flatnessDetThresh; + NvU32 i; + + nvAssert(pDispEvo->pDevEvo->hal->caps.supportsHDMIFRL && + pFrl->frlRate != HDMI_FRL_DATA_RATE_NONE && + pFrl->dscInfo.bEnableDSC); + + bpc = nvPixelDepthToBitsPerComponent(pTimings->pixelDepth); + if (bpc < 8) { + nvAssert(bpc >= 8); + bpc = 8; + } + flatnessDetThresh = (2 << (bpc - 8)); + + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_DSC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _ENABLE, _TRUE) | + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _MODE, _SINGLE) | /* TODO DUAL for 2Head1OR */ + DRF_NUM(C67D, _HEAD_SET_DSC_CONTROL, _FLATNESS_DET_THRESH, flatnessDetThresh) | + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _FULL_ICH_ERR_PRECISION, _ENABLE) | + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _AUTO_RESET, _ENABLE) | + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _FORCE_ICH_RESET, _FALSE)); + + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_DSC_PPS_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C67D, _HEAD_SET_DSC_PPS_CONTROL, _ENABLE, _TRUE) | + DRF_DEF(C67D, _HEAD_SET_DSC_PPS_CONTROL, _LOCATION, _VBLANK) | + DRF_DEF(C67D, _HEAD_SET_DSC_PPS_CONTROL, _FREQUENCY, _EVERY_FRAME) | + /* MFS says "For FRL DSC CVTEM, it should be 0x21 (136bytes)." */ + DRF_NUM(C67D, _HEAD_SET_DSC_PPS_CONTROL, _SIZE, 0x21)); + + /* The loop below assumes the methods are tightly packed. */ + ct_assert(ARRAY_LEN(pFrl->dscInfo.pps) == 32); + ct_assert((NVC67D_HEAD_SET_DSC_PPS_DATA1(0) - NVC67D_HEAD_SET_DSC_PPS_DATA0(0)) == 4); + ct_assert((NVC67D_HEAD_SET_DSC_PPS_DATA31(0) - NVC67D_HEAD_SET_DSC_PPS_DATA0(0)) == (31 * 4)); + for (i = 0; i < ARRAY_LEN(pFrl->dscInfo.pps); i++) { + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_DSC_PPS_DATA0(head) + (i * 4), 1); + nvDmaSetEvoMethodData(pChannel, pFrl->dscInfo.pps[i]); + } + + /* Byte 0 must be 0x7f, the rest are don't care (will be filled in by HW) */ + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_DSC_PPS_HEAD(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_DSC_PPS_HEAD, _BYTE0, 0x7f)); + + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_HDMI_DSC_HCACTIVE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_HDMI_DSC_HCACTIVE, _BYTES, pFrl->dscInfo.dscHActiveBytes) | + DRF_NUM(C67D, _HEAD_SET_HDMI_DSC_HCACTIVE, _TRI_BYTES, pFrl->dscInfo.dscHActiveTriBytes)); + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_HDMI_DSC_HCBLANK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_HDMI_DSC_HCBLANK, _WIDTH, pFrl->dscInfo.dscHBlankTriBytes)); +} + +static void EvoSetDpDscParams(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings) +{ + NVEvoChannelPtr pChannel = pDispEvo->pDevEvo->core; + NvU32 flatnessDetThresh; + NvU32 i; + + nvAssert(pTimings->dpDsc.enable); + + // XXX: I'm pretty sure that this is wrong. + // BitsPerPixelx16 is something like (24 * 16) = 384, and 2 << (384 - 8) is + // an insanely large number. + flatnessDetThresh = (2 << (pTimings->dpDsc.bitsPerPixelX16 - 8)); /* ??? */ + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DSC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _ENABLE, _TRUE) | + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _MODE, _SINGLE) | /* TODO DUAL for 2Head1OR */ + DRF_NUM(C57D, _HEAD_SET_DSC_CONTROL, _FLATNESS_DET_THRESH, flatnessDetThresh) | + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _FULL_ICH_ERR_PRECISION, _ENABLE) | + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _AUTO_RESET, _DISABLE) | + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _FORCE_ICH_RESET, _TRUE)); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DSC_PPS_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_DSC_PPS_CONTROL, _ENABLE, _TRUE) | + DRF_DEF(C57D, _HEAD_SET_DSC_PPS_CONTROL, _LOCATION, _VSYNC) | + DRF_DEF(C57D, _HEAD_SET_DSC_PPS_CONTROL, _FREQUENCY, _EVERY_FRAME) | + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_CONTROL, _SIZE, 0x1F /* 32 PPS Dwords - 1 = 31 */)); + + +#define NV_EVO5_NUM_HEAD_SET_DSC_PPS_DATA_DWORDS \ + (((NVC57D_HEAD_SET_DSC_PPS_DATA31(0) - NVC57D_HEAD_SET_DSC_PPS_DATA0(0)) / 4) + 1) + + ct_assert(NV_EVO5_NUM_HEAD_SET_DSC_PPS_DATA_DWORDS <= ARRAY_LEN(pTimings->dpDsc.pps)); + + for (i = 0; i < NV_EVO5_NUM_HEAD_SET_DSC_PPS_DATA_DWORDS; i++) { + nvDmaSetStartEvoMethod(pChannel,(NVC57D_HEAD_SET_DSC_PPS_DATA0(head) + (i * 4)), 1); + nvDmaSetEvoMethodData(pChannel, pTimings->dpDsc.pps[i]); + } + + /* + * In case of DP, PPS is sent using the SDP over the Main-Link + * during the vertical blanking interval. The PPS SDP header is defined + * in DP 1.4 specification under section 2.2.5.9.1. + */ + + nvDmaSetStartEvoMethod(pChannel, + NVC57D_HEAD_SET_DSC_PPS_HEAD(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_HEAD, _BYTE0, 0x00) | /* SDP ID = 0x0 */ + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_HEAD, _BYTE1, 0x10) | /* SDP Type = 0x10 */ + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_HEAD, _BYTE2, 0x7f) | /* Number of payload data bytes - 1 = 0x7F */ + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_HEAD, _BYTE3, 0x00)); /* Reserved */ +} + +static void EvoSetDscParamsC5(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings) +{ + + if (pTimings->hdmiFrlConfig.frlRate != HDMI_FRL_DATA_RATE_NONE) { + if (pTimings->hdmiFrlConfig.dscInfo.bEnableDSC) { + EvoSetHdmiFrlDscParams(pDispEvo, head, pTimings); + } + } else if (pTimings->dpDsc.enable) { + EvoSetDpDscParams(pDispEvo, head, pTimings); + } else { + NVEvoChannelPtr pChannel = pDispEvo->pDevEvo->core; + + /* Disable DSC function */ + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DSC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _ENABLE, _FALSE)); + + /* Disable PPS SDP (Secondary-Data Packet), DP won't send out PPS SDP */ + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DSC_PPS_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_DSC_PPS_CONTROL, _ENABLE, _FALSE)); + } + +} + +static void +EvoEnableMidFrameAndDWCFWatermarkC5(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NvBool enable, + NVEvoUpdateState *pUpdateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + if (enable) { + pDevEvo->gpus[sd].setSwSpareA[head] = + FLD_SET_DRF(C37D, + _HEAD_SET_SW_SPARE_A, + _DISABLE_MID_FRAME_AND_DWCF_WATERMARK, + _FALSE, + pDevEvo->gpus[sd].setSwSpareA[head]); + } else { + pDevEvo->gpus[sd].setSwSpareA[head] = + FLD_SET_DRF(C37D, + _HEAD_SET_SW_SPARE_A, + _DISABLE_MID_FRAME_AND_DWCF_WATERMARK, + _TRUE, + pDevEvo->gpus[sd].setSwSpareA[head]); + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + + nvUpdateUpdateState(pDevEvo, pUpdateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_SW_SPARE_A(head), 1); + nvDmaSetEvoMethodData(pChannel, pDevEvo->gpus[sd].setSwSpareA[head]); + + nvPopEvoSubDevMask(pDevEvo); +} + +static NvU32 EvoGetActiveViewportOffsetC3(NVDispEvoRec *pDispEvo, NvU32 head) +{ + NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS params = { }; + NvU32 ret; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + params.base.subdeviceIndex = pDispEvo->displayOwner; + params.windowIndex = head << 1; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->rmCtrlHandle, + NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to query active viewport offset"); + } + + return params.activeViewportPointIn.y; +} + +static NvBool EvoComputeWindowScalingTapsC3(const NVDevEvoRec *pDevEvo, + const NVEvoChannel *pChannel, + NVFlipChannelEvoHwState *pHwState) +{ + NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + const NVEvoScalerCaps *pScalerCaps = + &pDevEvo->gpus[0].capabilities.window[win].scalerCaps; + + if (!nvAssignScalerTaps(pDevEvo, + pScalerCaps, + pHwState->sizeIn.width, pHwState->sizeIn.height, + pHwState->sizeOut.width, pHwState->sizeOut.height, + FALSE /* doubleScan */, + &pHwState->hTaps, &pHwState->vTaps)) { + return FALSE; + } + + return TRUE; +} + +static NvBool EvoComputeWindowScalingTapsC5(const NVDevEvoRec *pDevEvo, + const NVEvoChannel *pChannel, + NVFlipChannelEvoHwState *pHwState) +{ + if (!EvoComputeWindowScalingTapsC3(pDevEvo, pChannel, pHwState)) { + return FALSE; + } + + /* + * If scaling is enabled, CSC11 will be used by NVKMS to convert from + * linear FP16 LMS to linear FP16 RGB. As such, the user-supplied precomp + * CSC can't be programmed into CSC11 in this case. + */ + if ((pHwState->sizeIn.width != pHwState->sizeOut.width) || + (pHwState->sizeIn.height != pHwState->sizeOut.height)) { + if (!IsCscMatrixIdentity(&pHwState->cscMatrix)) { + return FALSE; + } + } + + return TRUE; +} + +static inline const NVEvoScalerCaps* +EvoGetWindowScalingCapsC3(const NVDevEvoRec *pDevEvo) +{ + /* + * Use window 0 by default. This should be fine for now since precomp + * scaling will only be enabled on Orin, and all windows have the same + * capabilities on Orin. + * + * The mapping in this function can be updated if/when precomp scaling + * support is extended to other display architectures. + */ + return &pDevEvo->gpus[0].capabilities.window[0].scalerCaps; +} + + +NVEvoHAL nvEvoC3 = { + EvoSetRasterParamsC3, /* SetRasterParams */ + EvoSetProcAmpC3, /* SetProcAmp */ + EvoSetHeadControlC3, /* SetHeadControl */ + EvoSetHeadRefClkC3, /* SetHeadRefClk */ + EvoHeadSetControlORC3, /* HeadSetControlOR */ + EvoORSetControlC3, /* ORSetControl */ + EvoHeadSetDisplayIdC3, /* HeadSetDisplayId */ + EvoSetUsageBoundsC3, /* SetUsageBounds */ + EvoUpdateC3, /* Update */ + EvoIsModePossibleC3, /* IsModePossible */ + EvoPrePostIMPC3, /* PrePostIMP */ + EvoSetNotifierC3, /* SetNotifier */ + EvoGetCapabilitiesC3, /* GetCapabilities */ + EvoFlipC3, /* Flip */ + EvoFlipTransitionWARC3, /* FlipTransitionWAR */ + EvoFillLUTSurfaceC3, /* FillLUTSurface */ + EvoSetLUTContextDmaC3, /* SetLUTContextDma */ + EvoSetOutputScalerC3, /* SetOutputScaler */ + EvoSetViewportPointInC3, /* SetViewportPointIn */ + EvoSetViewportInOutC3, /* SetViewportInOut */ + EvoSetCursorImageC3, /* SetCursorImage */ + EvoValidateCursorSurfaceC3, /* ValidateCursorSurface */ + EvoValidateWindowFormatC3, /* ValidateWindowFormat */ + EvoInitCompNotifierC3, /* InitCompNotifier */ + EvoIsCompNotifierCompleteC3, /* IsCompNotifierComplete */ + EvoWaitForCompNotifierC3, /* WaitForCompNotifier */ + EvoSetDitherC3, /* SetDither */ + EvoSetStallLockC3, /* SetStallLock */ + EvoSetDisplayRateC3, /* SetDisplayRate */ + EvoInitChannelC3, /* InitChannel */ + NULL, /* InitDefaultLut */ + EvoInitWindowMappingC3, /* InitWindowMapping */ + EvoIsChannelIdleC3, /* IsChannelIdle */ + EvoIsChannelMethodPendingC3, /* IsChannelMethodPending */ + EvoForceIdleSatelliteChannelC3, /* ForceIdleSatelliteChannel */ + EvoForceIdleSatelliteChannelIgnoreLockC3, /* ForceIdleSatelliteChannelIgnoreLock */ + EvoAccelerateChannelC3, /* AccelerateChannel */ + EvoResetChannelAcceleratorsC3, /* ResetChannelAccelerators */ + EvoAllocRmCtrlObjectC3, /* AllocRmCtrlObject */ + EvoFreeRmCtrlObjectC3, /* FreeRmCtrlObject */ + EvoSetImmPointOutC3, /* SetImmPointOut */ + EvoStartHeadCRC32CaptureC3, /* StartCRC32Capture */ + EvoStopHeadCRC32CaptureC3, /* StopCRC32Capture */ + EvoQueryHeadCRC32_C3, /* QueryCRC32 */ + EvoGetScanLineC3, /* GetScanLine */ + NULL, /* ConfigureVblankSyncObject */ + nvEvo1SetDscParams, /* SetDscParams */ + NULL, /* EnableMidFrameAndDWCFWatermark */ + EvoGetActiveViewportOffsetC3, /* GetActiveViewportOffset */ + NULL, /* ClearSurfaceUsage */ + EvoComputeWindowScalingTapsC3, /* ComputeWindowScalingTaps */ + EvoGetWindowScalingCapsC3, /* GetWindowScalingCaps */ + { /* caps */ + TRUE, /* supportsNonInterlockedUsageBoundsUpdate */ + TRUE, /* supportsDisplayRate */ + FALSE, /* supportsFlipLockRGStatus */ + FALSE, /* needDefaultLutSurface */ + FALSE, /* hasUnorm10OLUT */ + TRUE, /* supportsDigitalVibrance */ + FALSE, /* supportsImageSharpening */ + FALSE, /* supportsHDMIVRR */ + FALSE, /* supportsCoreChannelSurface */ + FALSE, /* supportsHDMIFRL */ + TRUE, /* supportsSetStorageMemoryLayout */ + FALSE, /* supportsIndependentAcqRelSemaphore */ + FALSE, /* supportsCoreLut */ + TRUE, /* supportsSynchronizedOverlayPositionUpdate */ + FALSE, /* supportsVblankSyncObjects */ + FALSE, /* requiresScalingTapsInBothDimensions */ + NV_EVO3_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_2TAPS, /* minScalerTaps */ + }, +}; + +NVEvoHAL nvEvoC5 = { + EvoSetRasterParamsC3, /* SetRasterParams */ + EvoSetProcAmpC5, /* SetProcAmp */ + EvoSetHeadControlC3, /* SetHeadControl */ + EvoSetHeadRefClkC3, /* SetHeadRefClk */ + EvoHeadSetControlORC5, /* HeadSetControlOR */ + EvoORSetControlC3, /* ORSetControl */ + EvoHeadSetDisplayIdC3, /* HeadSetDisplayId */ + EvoSetUsageBoundsC5, /* SetUsageBounds */ + EvoUpdateC3, /* Update */ + EvoIsModePossibleC3, /* IsModePossible */ + EvoPrePostIMPC3, /* PrePostIMP */ + EvoSetNotifierC3, /* SetNotifier */ + EvoGetCapabilitiesC5, /* GetCapabilities */ + EvoFlipC5, /* Flip */ + EvoFlipTransitionWARC5, /* FlipTransitionWAR */ + EvoFillLUTSurfaceC5, /* FillLUTSurface */ + EvoSetLUTContextDmaC5, /* SetLUTContextDma */ + EvoSetOutputScalerC3, /* SetOutputScaler */ + EvoSetViewportPointInC3, /* SetViewportPointIn */ + EvoSetViewportInOutC5, /* SetViewportInOut */ + EvoSetCursorImageC3, /* SetCursorImage */ + EvoValidateCursorSurfaceC3, /* ValidateCursorSurface */ + EvoValidateWindowFormatC5, /* ValidateWindowFormat */ + EvoInitCompNotifierC3, /* InitCompNotifier */ + EvoIsCompNotifierCompleteC3, /* IsCompNotifierComplete */ + EvoWaitForCompNotifierC3, /* WaitForCompNotifier */ + EvoSetDitherC3, /* SetDither */ + EvoSetStallLockC3, /* SetStallLock */ + EvoSetDisplayRateC3, /* SetDisplayRate */ + EvoInitChannelC5, /* InitChannel */ + EvoInitDefaultLutC5, /* InitDefaultLut */ + EvoInitWindowMappingC5, /* InitWindowMapping */ + EvoIsChannelIdleC3, /* IsChannelIdle */ + EvoIsChannelMethodPendingC3, /* IsChannelMethodPending */ + EvoForceIdleSatelliteChannelC3, /* ForceIdleSatelliteChannel */ + EvoForceIdleSatelliteChannelIgnoreLockC3, /* ForceIdleSatelliteChannelIgnoreLock */ + EvoAccelerateChannelC3, /* AccelerateChannel */ + EvoResetChannelAcceleratorsC3, /* ResetChannelAccelerators */ + EvoAllocRmCtrlObjectC3, /* AllocRmCtrlObject */ + EvoFreeRmCtrlObjectC3, /* FreeRmCtrlObject */ + EvoSetImmPointOutC3, /* SetImmPointOut */ + EvoStartHeadCRC32CaptureC3, /* StartCRC32Capture */ + EvoStopHeadCRC32CaptureC3, /* StopCRC32Capture */ + EvoQueryHeadCRC32_C3, /* QueryCRC32 */ + EvoGetScanLineC3, /* GetScanLine */ + NULL, /* ConfigureVblankSyncObject */ + EvoSetDscParamsC5, /* SetDscParams */ + EvoEnableMidFrameAndDWCFWatermarkC5, /* EnableMidFrameAndDWCFWatermark */ + EvoGetActiveViewportOffsetC3, /* GetActiveViewportOffset */ + NULL, /* ClearSurfaceUsage */ + EvoComputeWindowScalingTapsC5, /* ComputeWindowScalingTaps */ + EvoGetWindowScalingCapsC3, /* GetWindowScalingCaps */ + { /* caps */ + TRUE, /* supportsNonInterlockedUsageBoundsUpdate */ + TRUE, /* supportsDisplayRate */ + FALSE, /* supportsFlipLockRGStatus */ + TRUE, /* needDefaultLutSurface */ + TRUE, /* hasUnorm10OLUT */ + TRUE, /* supportsDigitalVibrance */ + FALSE, /* supportsImageSharpening */ + TRUE, /* supportsHDMIVRR */ + FALSE, /* supportsCoreChannelSurface */ + FALSE, /* supportsHDMIFRL */ + TRUE, /* supportsSetStorageMemoryLayout */ + FALSE, /* supportsIndependentAcqRelSemaphore */ + FALSE, /* supportsCoreLut */ + TRUE, /* supportsSynchronizedOverlayPositionUpdate */ + FALSE, /* supportsVblankSyncObjects */ + FALSE, /* requiresScalingTapsInBothDimensions */ + NV_EVO3_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_2TAPS, /* minScalerTaps */ + }, +}; + +NVEvoHAL nvEvoC6 = { + EvoSetRasterParamsC3, /* SetRasterParams */ + EvoSetProcAmpC5, /* SetProcAmp */ + EvoSetHeadControlC3, /* SetHeadControl */ + EvoSetHeadRefClkC3, /* SetHeadRefClk */ + EvoHeadSetControlORC5, /* HeadSetControlOR */ + EvoORSetControlC6, /* ORSetControl */ + EvoHeadSetDisplayIdC3, /* HeadSetDisplayId */ + EvoSetUsageBoundsC5, /* SetUsageBounds */ + EvoUpdateC3, /* Update */ + EvoIsModePossibleC3, /* IsModePossible */ + EvoPrePostIMPC3, /* PrePostIMP */ + EvoSetNotifierC3, /* SetNotifier */ + EvoGetCapabilitiesC6, /* GetCapabilities */ + EvoFlipC6, /* Flip */ + EvoFlipTransitionWARC6, /* FlipTransitionWAR */ + EvoFillLUTSurfaceC5, /* FillLUTSurface */ + EvoSetLUTContextDmaC5, /* SetLUTContextDma */ + EvoSetOutputScalerC3, /* SetOutputScaler */ + EvoSetViewportPointInC3, /* SetViewportPointIn */ + EvoSetViewportInOutC5, /* SetViewportInOut */ + EvoSetCursorImageC3, /* SetCursorImage */ + EvoValidateCursorSurfaceC3, /* ValidateCursorSurface */ + EvoValidateWindowFormatC6, /* ValidateWindowFormat */ + EvoInitCompNotifierC3, /* InitCompNotifier */ + EvoIsCompNotifierCompleteC3, /* IsCompNotifierComplete */ + EvoWaitForCompNotifierC3, /* WaitForCompNotifier */ + EvoSetDitherC3, /* SetDither */ + EvoSetStallLockC3, /* SetStallLock */ + EvoSetDisplayRateC3, /* SetDisplayRate */ + EvoInitChannelC5, /* InitChannel */ + EvoInitDefaultLutC5, /* InitDefaultLut */ + EvoInitWindowMappingC5, /* InitWindowMapping */ + EvoIsChannelIdleC3, /* IsChannelIdle */ + EvoIsChannelMethodPendingC3, /* IsChannelMethodPending */ + EvoForceIdleSatelliteChannelC3, /* ForceIdleSatelliteChannel */ + EvoForceIdleSatelliteChannelIgnoreLockC3, /* ForceIdleSatelliteChannelIgnoreLock */ + EvoAccelerateChannelC3, /* AccelerateChannel */ + EvoResetChannelAcceleratorsC3, /* ResetChannelAccelerators */ + EvoAllocRmCtrlObjectC3, /* AllocRmCtrlObject */ + EvoFreeRmCtrlObjectC3, /* FreeRmCtrlObject */ + EvoSetImmPointOutC3, /* SetImmPointOut */ + EvoStartHeadCRC32CaptureC3, /* StartCRC32Capture */ + EvoStopHeadCRC32CaptureC3, /* StopCRC32Capture */ + EvoQueryHeadCRC32_C3, /* QueryCRC32 */ + EvoGetScanLineC3, /* GetScanLine */ + EvoConfigureVblankSyncObjectC6, /* ConfigureVblankSyncObject */ + EvoSetDscParamsC5, /* SetDscParams */ + NULL, /* EnableMidFrameAndDWCFWatermark */ + EvoGetActiveViewportOffsetC3, /* GetActiveViewportOffset */ + NULL, /* ClearSurfaceUsage */ + EvoComputeWindowScalingTapsC5, /* ComputeWindowScalingTaps */ + EvoGetWindowScalingCapsC3, /* GetWindowScalingCaps */ + { /* caps */ + TRUE, /* supportsNonInterlockedUsageBoundsUpdate */ + TRUE, /* supportsDisplayRate */ + FALSE, /* supportsFlipLockRGStatus */ + TRUE, /* needDefaultLutSurface */ + TRUE, /* hasUnorm10OLUT */ + TRUE, /* supportsDigitalVibrance */ + FALSE, /* supportsImageSharpening */ + TRUE, /* supportsHDMIVRR */ + FALSE, /* supportsCoreChannelSurface */ + TRUE, /* supportsHDMIFRL */ + FALSE, /* supportsSetStorageMemoryLayout */ + TRUE, /* supportsIndependentAcqRelSemaphore */ + FALSE, /* supportsCoreLut */ + TRUE, /* supportsSynchronizedOverlayPositionUpdate */ + TRUE, /* supportsVblankSyncObjects */ + FALSE, /* requiresScalingTapsInBothDimensions */ + NV_EVO3_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_2TAPS, /* minScalerTaps */ + }, +}; diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-flip.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-flip.c new file mode 100644 index 0000000..1bdafba --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-flip.c @@ -0,0 +1,2888 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-dma.h" +#include "nvkms-evo.h" +#include "nvkms-flip.h" +#include "nvkms-flip-workarea.h" +#include "nvkms-surface.h" +#include "nvkms-prealloc.h" +#include "nvkms-private.h" +#include "nvkms-rm.h" +#include "nvkms-vrr.h" +#include "nvkms-cursor.h" +#include "nvkms-types.h" + +#include "nvkms-sync.h" + +static void SchedulePostFlipIMPTimer(NVDevEvoPtr pDevEvo); + +// The EVO .mfs file defines the maximum minPresentInterval to be 8. +#define NV_MAX_SWAP_INTERVAL 8 + +/*! + * Assign the elements in an NVSurfaceEvoPtr[NVKMS_MAX_EYES] array. + * + * Use NVEvoApiHandlesRec to translate an + * NvKmsSurfaceHandle[NVKMS_MAX_EYES] array into an an + * NVSurfaceEvoPtr[NVKMS_MAX_EYES] array. + * + * \param[in] pOpenDevSurfaceHandles The client's surfaces. + * \param[in] surfaceHandles The handles naming surfaces. + * \param[out] pSurfaceEvos The surface pointers. + * + * \return Return TRUE if all surfaceHandles could be successfully + * translated into pSurfaceEvos. Otherwise, return FALSE. + */ +static NvBool AssignSurfaceArray( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle surfaceHandles[NVKMS_MAX_EYES], + const NVEvoChannelMask channelMask, + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]) +{ + NvU32 eye; + + nvkms_memset(pSurfaceEvos, 0, sizeof(NVSurfaceEvoRec *) * NVKMS_MAX_EYES); + + for (eye = 0; eye < NVKMS_MAX_EYES; eye++) { + if (surfaceHandles[eye] != 0) { + pSurfaceEvos[eye] = + nvEvoGetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + surfaceHandles[eye], + channelMask); + if ((pSurfaceEvos[eye] == NULL) || + (pSurfaceEvos[eye]->isoType != NVKMS_MEMORY_ISO)) { + return FALSE; + } + } + } + return TRUE; +} + + +/*! + * Assign the NVFlipNIsoSurfaceEvoHwState. + * + * Use the given NvKmsNIsoSurface to populate the + * NVFlipNIsoSurfaceEvoHwState. Validate that NvKmsNIsoSurface + * description is legitimate. + * + * \param[in] pDevEvo The device where the surface will be used. + * \param[in] pOpenDevSurfaceHandles The client's surfaces. + * \param[in] pParamsNIso The client's description of the NISO surface. + * \param[in] notifier Whether the NISO surface is a notifier. + * \param[in] pChannel The channel where the surface will be used. + * \param[out] pNIsoState The NVKMS presentation of the NISO surface. + * + * \return Return TRUE if the NVFlipNIsoSurfaceEvoHwState could be + * assigned and validated. Otherwise, return FALSE and leave + * the NVFlipNIsoSurfaceEvoHwState untouched. + */ +static NvBool AssignNIsoEvoHwState( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsNIsoSurface *pParamsNIso, + const NvBool notifier, /* TRUE=notifier; FALSE=semaphore */ + const NVEvoChannel *pChannel, + NVFlipNIsoSurfaceEvoHwState *pNIsoState) +{ + NVSurfaceEvoPtr pSurfaceEvo; + NvU32 elementSizeInBytes = 0, offsetInBytes, maxBytes; + + nvAssert(pParamsNIso->surfaceHandle != 0); + + pSurfaceEvo = + nvEvoGetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + pParamsNIso->surfaceHandle, + pChannel->channelMask); + if (pSurfaceEvo == NULL) { + return FALSE; + } + + /* Attempt to validate the surface: */ + + /* Only pitch surfaces can be used */ + if (pSurfaceEvo->layout != NvKmsSurfaceMemoryLayoutPitch) { + return FALSE; + } + + if (pSurfaceEvo->isoType != NVKMS_MEMORY_NISO) { + return FALSE; + } + + if ((pParamsNIso->format != NVKMS_NISO_FORMAT_FOUR_WORD) && + (pParamsNIso->format != NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY) && + (pParamsNIso->format != NVKMS_NISO_FORMAT_LEGACY)) { + return FALSE; + } + + if ((pDevEvo->caps.validNIsoFormatMask & + (1 << pParamsNIso->format)) == 0) { + return FALSE; + } + + /* Check that the item fits within the surface. */ + switch (pParamsNIso->format) { + case NVKMS_NISO_FORMAT_FOUR_WORD: + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + elementSizeInBytes = 16; + break; + case NVKMS_NISO_FORMAT_LEGACY: + if (notifier) { + /* Legacy notifier size depends on the channel. */ + elementSizeInBytes = pChannel->caps.legacyNotifierFormatSizeBytes; + } else { + /* Legacy semaphores are always 4 bytes. */ + elementSizeInBytes = 4; + } + break; + } + +#if defined(DEBUG) + /* Assert that the size calculated by nvkms-sync library is the same as the + * one we derived from channel caps above. */ + if (notifier) { + NvBool overlay = !!(pChannel->channelMask & + NV_EVO_CHANNEL_MASK_OVERLAY_ALL); + NvU32 libSize = nvKmsSizeOfNotifier(pParamsNIso->format, overlay); + nvAssert(libSize == elementSizeInBytes); + } else { + nvAssert(nvKmsSizeOfSemaphore(pParamsNIso->format) == elementSizeInBytes); + } +#endif + /* + * offsetInWords is an NvU16 and offsetInBytes is an NvU32, so + * neither of the expressions: + * offsetInWords * 4 + * offsetInBytes + elementSizeInBytes + * should ever overflow. + */ + + ct_assert(sizeof(pParamsNIso->offsetInWords) == 2); + + offsetInBytes = ((NvU32)pParamsNIso->offsetInWords) * 4; + + /* + * Compute the upper extent of the NISO element within the surface. + */ + + maxBytes = offsetInBytes + elementSizeInBytes; + + if (maxBytes > pSurfaceEvo->planes[0].rmObjectSizeInBytes) { + return FALSE; + } + + /* EVO expects the NISO element to fit within a 4k page. */ + + if (maxBytes > 4096) { + return FALSE; + } + + /* + * XXX NVKMS TODO: Check that the surface is in vidmem if + * NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY + */ + + pNIsoState->pSurfaceEvo = pSurfaceEvo; + pNIsoState->format = pParamsNIso->format; + pNIsoState->offsetInWords = pParamsNIso->offsetInWords; + + return TRUE; +} + + +static NvBool AssignCompletionNotifierEvoHwState( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsCompletionNotifierDescription *pParamsNotif, + const NVEvoChannel *pChannel, + NVFlipCompletionNotifierEvoHwState *pNotif) +{ + NvBool ret; + + nvkms_memset(pNotif, 0, sizeof(*pNotif)); + + /* If no surface is specified, we should not use a notifier. */ + if (pParamsNotif->surface.surfaceHandle == 0) { + return TRUE; + } + + ret = AssignNIsoEvoHwState(pDevEvo, + pOpenDevSurfaceHandles, + &pParamsNotif->surface, + TRUE, /* notifier */ + pChannel, + &pNotif->surface); + if (ret) { + pNotif->awaken = pParamsNotif->awaken; + } + + return ret; +} + +static NvBool AssignSemaphoreEvoHwState( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NVEvoChannel *pChannel, + const NvU32 sd, + const struct NvKmsChannelSyncObjects *pChannelSyncObjects, + NVFlipSyncObjectEvoHwState *pFlipSyncObject) +{ + NvBool ret; + + nvAssert(!pChannelSyncObjects->useSyncpt); + + nvkms_memset(pFlipSyncObject, 0, sizeof(*pFlipSyncObject)); + + if (!pDevEvo->hal->caps.supportsIndependentAcqRelSemaphore) { + /*! acquire and release sema surface needs to be same */ + if (pChannelSyncObjects->u.semaphores.acquire.surface.surfaceHandle != + pChannelSyncObjects->u.semaphores.release.surface.surfaceHandle) { + return FALSE; + } + if (pChannelSyncObjects->u.semaphores.acquire.surface.format != + pChannelSyncObjects->u.semaphores.release.surface.format) { + return FALSE; + } + if (pChannelSyncObjects->u.semaphores.acquire.surface.offsetInWords != + pChannelSyncObjects->u.semaphores.release.surface.offsetInWords) { + return FALSE; + } + } + + /*! If no surface is specified, we should not use a semaphore.*/ + if (pChannelSyncObjects->u.semaphores.acquire.surface.surfaceHandle != 0) { + + ret = AssignNIsoEvoHwState( + pDevEvo, + pOpenDevSurfaceHandles, + &pChannelSyncObjects->u.semaphores.acquire.surface, + FALSE, /* notifier */ + pChannel, + &pFlipSyncObject->u.semaphores.acquireSurface); + if (ret) { + pFlipSyncObject->u.semaphores.acquireValue = + pChannelSyncObjects->u.semaphores.acquire.value; + } else { + return ret; + } + } + + /*! If no surface is specified, we should not use a semaphore.*/ + if (pChannelSyncObjects->u.semaphores.release.surface.surfaceHandle != 0) { + + ret = AssignNIsoEvoHwState( + pDevEvo, + pOpenDevSurfaceHandles, + &pChannelSyncObjects->u.semaphores.release.surface, + FALSE, /* notifier */ + pChannel, + &pFlipSyncObject->u.semaphores.releaseSurface); + if (ret) { + pFlipSyncObject->u.semaphores.releaseValue = + pChannelSyncObjects->u.semaphores.release.value; + } else { + return ret; + } + } + + return TRUE; +} + +static NvBool AssignPreSyncptEvoHwState( + NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel, + const struct NvKmsChannelSyncObjects *pChannelSyncObjects, + NVFlipSyncObjectEvoHwState *pFlipSyncObject) +{ + NvBool ret, bFound = FALSE; + NvU32 id = 0; + NvU32 hSyncptCtxDma, hSyncpt; + NvU32 value; + enum NvKmsSyncptType preType; + + nvAssert(pDevEvo->pAllSyncptUsedInCurrentFlip != NULL); + + nvAssert(pChannelSyncObjects->useSyncpt); + + preType = pChannelSyncObjects->u.syncpts.pre.type; + + if (preType == NVKMS_SYNCPT_TYPE_NONE) { + return TRUE; + } + + if (preType == NVKMS_SYNCPT_TYPE_FD) { + /*! Get id from fd using nvhost API */ + NvKmsSyncPtOpParams params = { }; + params.fd_to_id_and_thresh.fd = + pChannelSyncObjects->u.syncpts.pre.u.fd; + ret = nvkms_syncpt_op(NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH, + ¶ms); + if (!ret) { + return FALSE; + } + id = params.fd_to_id_and_thresh.id; + value = params.fd_to_id_and_thresh.thresh; + } else { + id = pChannelSyncObjects->u.syncpts.pre.u.raw.id; + value = pChannelSyncObjects->u.syncpts.pre.u.raw.value; + } + if (id >= NV_SYNCPT_GLOBAL_TABLE_LENGTH) { + return FALSE; + } + /*! use id value to check the global table */ + bFound = (pDevEvo->preSyncptTable[id].hCtxDma != 0); + if (bFound == FALSE) { + /*! Register - allocate and bind ctxdma to syncpt*/ + ret = nvRmEvoAllocAndBindSyncpt(pDevEvo, + pChannel, + id, + &hSyncpt, + &hSyncptCtxDma); + if (!ret) { + nvAssert(!"Failed to register pre-syncpt"); + return FALSE; + } + + /*! Fill the Entry in Global Table */ + pDevEvo->preSyncptTable[id].hCtxDma = hSyncptCtxDma; + pDevEvo->preSyncptTable[id].hSyncpt = hSyncpt; + pDevEvo->preSyncptTable[id].channelMask |= pChannel->channelMask; + pDevEvo->pAllSyncptUsedInCurrentFlip[id] = NV_TRUE; + pDevEvo->preSyncptTable[id].id = id; + } else { + /*! + * syncpt found, just bind the context dma of this syncpt + * to the window if it is not already. + */ + if ((pDevEvo->preSyncptTable[id].channelMask & + pChannel->channelMask) == 0) { + + ret = nvRmEvoBindDispContextDMA(pDevEvo, + pChannel, + pDevEvo->preSyncptTable[id].hCtxDma); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to bind pre-syncpt with ctxdma"); + return ret; + } + pDevEvo->preSyncptTable[id].channelMask |= pChannel->channelMask; + pDevEvo->pAllSyncptUsedInCurrentFlip[id] = NV_TRUE; + /*! hSyncpt already allocated for id*/ + } + } + /*! Fill pre-syncpt related information in hardware state */ + pFlipSyncObject->u.syncpts.preCtxDma = pDevEvo->preSyncptTable[id].hCtxDma; + pFlipSyncObject->u.syncpts.preValue = value; + pFlipSyncObject->usingSyncpt = TRUE; + + return TRUE; +} + +static NvBool AssignPostSyncptEvoHwState( + NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel, + const struct NvKmsChannelSyncObjects *pChannelSyncObjects, + NVFlipSyncObjectEvoHwState *pFlipSyncObject) +{ + enum NvKmsSyncptType postType; + NvU32 threshold; + + nvAssert(pChannelSyncObjects->useSyncpt); + + postType = pChannelSyncObjects->u.syncpts.requestedPostType; + + /*! + * It is possible that syncpt is mentioned but post-syncpt + * is not specified (case where only pre-syncpt used) + */ + if (postType == NVKMS_SYNCPT_TYPE_NONE) { + return TRUE; + } + + /*! return threshold to caller but increase only when programming hw */ + threshold = pChannel->postSyncpt.syncptMaxVal + 1; + + /*! each channel associated with one post-syncpt */ + pFlipSyncObject->u.syncpts.postCtxDma = pChannel->postSyncpt.hCtxDma; + pFlipSyncObject->u.syncpts.postValue = threshold; + + pFlipSyncObject->usingSyncpt = TRUE; + + return TRUE; +} + +void nvFillPostSyncptReplyOneChannel( + NVEvoChannel *pChannel, + enum NvKmsSyncptType postType, + struct NvKmsSyncpt *postSyncpt, + const NVFlipSyncObjectEvoHwState *pHwSyncObject) +{ + if (postType == NVKMS_SYNCPT_TYPE_RAW) { + postSyncpt->u.raw.id = pChannel->postSyncpt.id; + postSyncpt->u.raw.value = pHwSyncObject->u.syncpts.postValue; + postSyncpt->type = NVKMS_SYNCPT_TYPE_RAW; + } else if (postType == NVKMS_SYNCPT_TYPE_FD) { + NvBool ret = TRUE; + NvKmsSyncPtOpParams params = { }; + params.id_and_thresh_to_fd.id = pChannel->postSyncpt.id; + params.id_and_thresh_to_fd.thresh = + pHwSyncObject->u.syncpts.postValue; + + ret = nvkms_syncpt_op(NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD, ¶ms); + if (!ret) { + nvAssert(!"Failed syncpt op ID_AND_THRESH_TO_FD"); + return; + } + postSyncpt->u.fd = params.id_and_thresh_to_fd.fd; + postSyncpt->type = NVKMS_SYNCPT_TYPE_FD; + } +} + +static void FillPostSyncptReply( + NVDevEvoRec *pDevEvo, + NvU32 sd, + const struct NvKmsFlipRequestOneSubDevice *pRequestOneSubDevice, + struct NvKmsFlipReplyOneSubDevice *pReplyOneSubDevice, + const struct NvKmsFlipWorkArea *pWorkArea) +{ + NvU32 head; + + /*! check for valid config */ + if (!pDevEvo->supportsSyncpts) { + return; + } + + for (head = 0; head < ARRAY_LEN(pRequestOneSubDevice->head); head++) { + const struct NvKmsFlipCommonParams *pRequestParams = + &pRequestOneSubDevice->head[head]; + struct NvKmsFlipCommonReplyOneHead *pReplyParams = + &pReplyOneSubDevice->head[head]; + const NVFlipEvoHwState *pFlipState = + &pWorkArea->sd[sd].head[head].newState; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!pRequestParams->layer[layer].syncObjects.specified || + !pRequestParams->layer[layer].syncObjects.val.useSyncpt) { + continue; + } + + nvFillPostSyncptReplyOneChannel( + pDevEvo->head[head].layer[layer], + pRequestParams->layer[layer].syncObjects.val.u.syncpts.requestedPostType, + &pReplyParams->layer[layer].postSyncpt, + &pFlipState->layer[layer].syncObject); + } + } +} + +NvBool nvHandleSyncptRegistration( + NVDevEvoRec *pDevEvo, + NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState) +{ + NvBool ret = TRUE; + NvU32 layer; + + if (!pDevEvo->supportsSyncpts) { + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pParams->layer[layer].syncObjects.specified && + pParams->layer[layer].syncObjects.val.useSyncpt) { + return FALSE; + } + } + + return TRUE; + } + + pDevEvo->pAllSyncptUsedInCurrentFlip = + nvCalloc(1, sizeof(NvBool) * NV_SYNCPT_GLOBAL_TABLE_LENGTH); + if (pDevEvo->pAllSyncptUsedInCurrentFlip == NULL) { + return FALSE; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!pParams->layer[layer].syncObjects.specified || + !pParams->layer[layer].syncObjects.val.useSyncpt) { + continue; + } + + nvkms_memset(&pFlipState->layer[layer].syncObject, + 0, + sizeof(pFlipState->layer[layer].syncObject)); + + ret = AssignPreSyncptEvoHwState(pDevEvo, + pDevEvo->head[head].layer[layer], + &pParams->layer[layer].syncObjects.val, + &pFlipState->layer[layer].syncObject); + if (!ret) { + nvAssert(!"Failed to store hw state for layer pre-syncpt"); + goto done; + } + + ret = AssignPostSyncptEvoHwState(pDevEvo, + pDevEvo->head[head].layer[layer], + &pParams->layer[layer].syncObjects.val, + &pFlipState->layer[layer].syncObject); + if (!ret) { + nvAssert(!"Failed to store hw state for layer post-syncpt"); + goto done; + } + } + +done: + nvFree(pDevEvo->pAllSyncptUsedInCurrentFlip); + pDevEvo->pAllSyncptUsedInCurrentFlip = NULL; + return ret; +} + + +void nvClearFlipEvoHwState( + NVFlipEvoHwState *pFlipState) +{ + NvU32 i; + + nvkms_memset(pFlipState, 0, sizeof(*pFlipState)); + + for (i = 0; i < ARRAY_LEN(pFlipState->layer); i++) { + pFlipState->layer[i].cscMatrix = NVKMS_IDENTITY_CSC_MATRIX; + } +} + +/*! + * Initialize NVFlipEvoHwState with a current snapshot from headState. + */ +void nvInitFlipEvoHwState( + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + NVFlipEvoHwState *pFlipState) +{ + const NVDispEvoRec *pDispEvo = pDevEvo->gpus[sd].pDispEvo; + const NVEvoSubDevHeadStateRec *pSdHeadState; + NvU32 i; + + nvClearFlipEvoHwState(pFlipState); + + if (!nvHeadIsActive(pDispEvo, head)) { + return; + } + + pSdHeadState = &pDevEvo->gpus[sd].headState[head]; + + pFlipState->viewPortPointIn = pSdHeadState->viewPortPointIn; + pFlipState->cursor = pSdHeadState->cursor; + + ct_assert(ARRAY_LEN(pFlipState->layer) == ARRAY_LEN(pSdHeadState->layer)); + + for (i = 0; i < ARRAY_LEN(pFlipState->layer); i++) { + pFlipState->layer[i] = pSdHeadState->layer[i]; + } + + // pFlipState->usage describes the usage bounds that will be necessary after + // this flip is complete. Initialize it using pSdHeadState->targetUsage, + // which describes the usage bounds that will be required just before this + // flip occurs, rather than pSdHeadState->usage, which describes the usage + // bounds currently programmed into the hardware. + // + // pSdHeadState->usage may have higher bounds than pSdHeadState->targetUsage + // if TryLoweringUsageBounds has not yet noticed that a satellite channel is + // no longer in use, or a flip to NULL in a satellite channel is pending but + // has not yet occurred. + pFlipState->usage = pSdHeadState->targetUsage; + + pFlipState->disableMidFrameAndDWCFWatermark = + pSdHeadState->targetDisableMidFrameAndDWCFWatermark; +} + + +static NvBool IsLayerDirty(const struct NvKmsFlipCommonParams *pParams, + const NvU32 layer) +{ + return pParams->layer[layer].surface.specified || + pParams->layer[layer].sizeIn.specified || + pParams->layer[layer].sizeOut.specified || + pParams->layer[layer].outputPosition.specified || + pParams->layer[layer].completionNotifier.specified || + pParams->layer[layer].syncObjects.specified || + pParams->layer[layer].compositionParams.specified || + pParams->layer[layer].csc.specified; +} + +/*! + * Check whether the flipPermissions for pOpenDev allow the flipping + * requested by NvKmsFlipCommonParams. + */ +static NvBool CheckFlipPermissions( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams) +{ + const int dispIndex = pDevEvo->gpus[sd].pDispEvo->displayOwner; + const struct NvKmsFlipPermissions *pFlipPermissions = + nvGetFlipPermissionsFromOpenDev(pOpenDev); + const struct NvKmsModesetPermissions *pModesetPermissions = + nvGetModesetPermissionsFromOpenDev(pOpenDev); + const NvU8 allLayersMask = NVBIT(pDevEvo->head[head].numLayers) - 1; + NvU8 layerMask = 0; + NvU32 layer; + + nvAssert(pOpenDev != NULL); + nvAssert(pFlipPermissions != NULL); + nvAssert(pModesetPermissions != NULL); + + layerMask = pFlipPermissions->disp[dispIndex].head[head].layerMask; + + /* + * If the client has modeset permissions for this disp+head, allow + * the client to also perform flips on any layer. + */ + if (!nvDpyIdListIsEmpty(pModesetPermissions->disp[dispIndex]. + head[head].dpyIdList)) { + layerMask = allLayersMask; + } + + /* Changing viewPortIn requires permission to alter all layers. */ + + if (pParams->viewPortIn.specified && (layerMask != allLayersMask)) { + return FALSE; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (IsLayerDirty(pParams, layer) && ((layerMask & NVBIT(layer)) == 0)) { + return FALSE; + } + } + + return TRUE; +} + +/*! + * Determine whether a base channel flip requires a non-tearing present mode. + * + * EVO requires a non-tearing flip when certain parameters are changing. See + * NV_DISP_BASE_STATE_ERROR_052 in dispClass024XBaseUpdateErrorChecks.mfs. + */ +static NvBool FlipRequiresNonTearingMode( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVFlipChannelEvoHwState *pOld, + const NVFlipChannelEvoHwState *pNew) +{ + // TODO: Do we need to care about the right eye here? The error check + // doesn't. + const NVSurfaceEvoRec *pOldSurf = pOld->pSurfaceEvo[NVKMS_LEFT]; + const NVSurfaceEvoRec *pNewSurf = pNew->pSurfaceEvo[NVKMS_LEFT]; + NvU32 oldHwFormat = 0, newHwFormat = 0; + + if (pOldSurf == NULL || pNewSurf == NULL) { + return TRUE; + } + + // If these functions actually return FALSE at this point, then something is + // really wrong... + if (!pDevEvo->hal->ValidateWindowFormat( + pOldSurf->format, NULL, &oldHwFormat)) { + nvAssert(FALSE); + } + + if (!pDevEvo->hal->ValidateWindowFormat( + pNewSurf->format, NULL, &newHwFormat)) { + nvAssert(FALSE); + } + + // Commented entries are things checked in the .mfs that are not yet + // supported in NVKMS. + return // SuperSample + oldHwFormat != newHwFormat || + // Gamma + // Layout (i.e. frame, field1, or field2) + pOldSurf->widthInPixels != pNewSurf->widthInPixels || + pOldSurf->heightInPixels != pNewSurf->heightInPixels || + pOldSurf->layout != pNewSurf->layout; + // UseGainOfs + // NewBaseLut -- USE_CORE_LUT is programmed in InitChannel* + // NewOutputLut +} + + +/*! + * Apply flip overrides if necessary. + * + * 1. Override swap intervals for VRR. + * 2. If the flip is changing certain parameters, override the tearing mode. + */ +static NvBool ApplyBaseFlipOverrides( + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + NVFlipChannelEvoHwState *pNew, + NvBool allowVrr) +{ + const NVDispEvoRec *pDispEvo = pDevEvo->gpus[sd].pDispEvo; + const NVFlipChannelEvoHwState *pOld = + &pDevEvo->gpus[sd].headState[head].layer[NVKMS_MAIN_LAYER]; + + // Apply VRR swap interval overrides. + // + // Note that this applies the overrides whenever the client requests VRR and + // VRR is enabled, regardless of whether actually activating it later + // succeeds. + if (allowVrr) { + if (!nvHeadIsActive(pDispEvo, head)) { + // + // XXX If VRR is allowed then modeset should have happened before + // base channel flip, currently we don't know how to do modeset + // and program base channel for VRR at same time. This should be + // revisited as part of bug 1731279. + // + return FALSE; + } + nvApplyVrrBaseFlipOverrides(pDevEvo->gpus[sd].pDispEvo, head, + pOld, pNew); + } + + if (!nvHeadIsActive(pDispEvo, head)) { + // + // This is possible when modeset and base flip happening at same time, + // tearing parameter does not make sense in that case, + // it should is disabled. + // + pNew->tearing = FALSE; + } else { + // Force non-tearing mode if EVO requires it. + if (FlipRequiresNonTearingMode(pDevEvo, head, pOld, pNew)) { + pNew->tearing = FALSE; + } + } + + return TRUE; +} + +static NvBool ValidateScalingUsageBounds( + const struct NvKmsScalingUsageBounds *pS, + const struct NvKmsScalingUsageBounds *pMaxS) +{ + return (pS->maxVDownscaleFactor <= pMaxS->maxVDownscaleFactor) && + (pS->maxHDownscaleFactor <= pMaxS->maxHDownscaleFactor) && + (pS->vTaps <= pMaxS->vTaps) && + (!pS->vUpscalingAllowed || pMaxS->vUpscalingAllowed); +} + +/*! + * Validate the requested usage bounds against the specified maximums. + */ +static NvBool ValidateUsageBounds( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const struct NvKmsUsageBounds *pUsage, + const struct NvKmsUsageBounds *pGuaranteedUsage) +{ + NvU32 i; + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + const NvU64 supportedSurfaceFormatsUnion = + pUsage->layer[i].supportedSurfaceMemoryFormats | + pGuaranteedUsage->layer[i].supportedSurfaceMemoryFormats; + + if ((pUsage->layer[i].usable && !pGuaranteedUsage->layer[i].usable) || + (supportedSurfaceFormatsUnion != + pGuaranteedUsage->layer[i].supportedSurfaceMemoryFormats) || + !ValidateScalingUsageBounds(&pUsage->layer[i].scaling, + &pGuaranteedUsage->layer[i].scaling)) { + return FALSE; + } + } + + return TRUE; +} + +/*! + * Assign pFlipState->usage. + */ +static NvBool AssignUsageBounds( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + NVFlipEvoHwState *pFlipState) +{ + struct NvKmsUsageBounds *pUsage = &pFlipState->usage; + int i; + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + const NVFlipChannelEvoHwState *pLayerFlipState = &pFlipState->layer[i]; + + nvInitScalingUsageBounds(pDevEvo, &pUsage->layer[i].scaling); + + if (pLayerFlipState->pSurfaceEvo[NVKMS_LEFT]) { + pUsage->layer[i].usable = TRUE; + pUsage->layer[i].supportedSurfaceMemoryFormats = + nvEvoGetFormatsWithEqualOrLowerUsageBound( + pLayerFlipState->pSurfaceEvo[NVKMS_LEFT]->format, + pDevEvo->caps.layerCaps[i].supportedSurfaceMemoryFormats); + + /* Scaling is not currently supported for the main layer. Bug 3488083 */ + if (i != NVKMS_MAIN_LAYER && pDevEvo->hal->GetWindowScalingCaps) { + const NVEvoScalerCaps *pScalerCaps = + pDevEvo->hal->GetWindowScalingCaps(pDevEvo); + + if (!nvComputeScalingUsageBounds(pScalerCaps, + pLayerFlipState->sizeIn.width, + pLayerFlipState->sizeIn.height, + pLayerFlipState->sizeOut.width, + pLayerFlipState->sizeOut.height, + pLayerFlipState->hTaps, + pLayerFlipState->vTaps, + &pUsage->layer[i].scaling)) { + return FALSE; + } + } + } else { + pUsage->layer[i].usable = FALSE; + pUsage->layer[i].supportedSurfaceMemoryFormats = 0; + } + } + + return TRUE; +} + +static NvBool OverrideUsageBounds(const NVDevEvoRec *pDevEvo, + NVFlipEvoHwState *pFlipState, + const struct NvKmsFlipCommonParams *pParams, + NvU32 sd, + NvU32 head, + const struct NvKmsUsageBounds *pPossibleUsage) +{ + NvU32 i; + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + const struct NvKmsScalingUsageBounds *pPossibleScaling = + &pPossibleUsage->layer[i].scaling; + struct NvKmsScalingUsageBounds *pTargetScaling = + &pFlipState->usage.layer[i].scaling; + NvU16 possibleV = pPossibleScaling->maxVDownscaleFactor; + NvU16 possibleH = pPossibleScaling->maxHDownscaleFactor; + NvU16 targetV = pTargetScaling->maxVDownscaleFactor; + NvU16 targetH = pTargetScaling->maxHDownscaleFactor; + + if (!pFlipState->usage.layer[i].usable) { + continue; + } + + if (pParams->layer[i].maxDownscaleFactors.specified) { + NvU16 requestedV = pParams->layer[i].maxDownscaleFactors.vertical; + NvU16 requestedH = pParams->layer[i].maxDownscaleFactors.horizontal; + + if ((requestedV < targetV) || (requestedH < targetH)) { + return FALSE; + } + + if ((requestedV > possibleV) || (requestedH > possibleH)) { + return FALSE; + } + + pTargetScaling->maxVDownscaleFactor = requestedV; + pTargetScaling->maxHDownscaleFactor = requestedH; + } else { + /* + * Calculate max H/V downscale factor by quantizing the range. + * + * E.g., + * max H/V downscale factor supported by HW is 4x for 5-tap and 2x + * for 2-tap mode. If 5-tap mode is required, the target usage bound + * that nvkms will attempt to program will either allow up to 2x + * downscaling, or up to 4x downscaling. If 2-tap mode is required, + * the target usage bound that NVKMS will attempt to program will + * allow up to 2x downscaling. Example: to downscale from 4096x2160 + * -> 2731x864 in 5-tap mode, NVKMS would specify up to 2x for the + * H downscale bound (required is 1.5x), and up to 4x for the V + * downscale bound (required is 2.5x). + */ + if (targetV > NV_EVO_SCALE_FACTOR_1X) { + const NvU16 possibleMid = + NV_EVO_SCALE_FACTOR_1X + ((possibleV - NV_EVO_SCALE_FACTOR_1X) / 2); + + if (targetV <= possibleMid) { + pTargetScaling->maxVDownscaleFactor = possibleMid; + } else { + pTargetScaling->maxVDownscaleFactor = possibleV; + } + } + + if (targetH > NV_EVO_SCALE_FACTOR_1X) { + const NvU16 possibleMid = + NV_EVO_SCALE_FACTOR_1X + ((possibleH - NV_EVO_SCALE_FACTOR_1X) / 2); + + if (targetH <= possibleMid) { + pTargetScaling->maxHDownscaleFactor = possibleMid; + } else { + pTargetScaling->maxHDownscaleFactor = possibleH; + } + } + } + + pTargetScaling->vTaps = pPossibleScaling->vTaps; + pTargetScaling->vUpscalingAllowed = pPossibleScaling->vUpscalingAllowed; + } + + return TRUE; +} + +static NvBool FlipTimeStampValidForChannel( + const NVEvoChannel *pChannel, + NvU64 timeStamp) +{ + if (pChannel->caps.validTimeStampBits < 64) { + const NvU64 validTimeStampMask = + NVBIT64(pChannel->caps.validTimeStampBits) - 1; + if ((timeStamp & ~validTimeStampMask) != 0) { + return FALSE; + } + } + return TRUE; +} + +static NvBool ValidatePerLayerCompParams( + const struct NvKmsCompositionParams *pCompParams, + const struct NvKmsCompositionCapabilities *pCaps, + NVSurfaceEvoPtr pSurfaceEvo) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = (pSurfaceEvo != NULL) ? + nvKmsGetSurfaceMemoryFormatInfo(pSurfaceEvo->format) : NULL; + const enum NvKmsCompositionColorKeySelect colorKeySelect = + pCompParams->colorKeySelect; + NvU32 match; + + if ((pCaps->supportedColorKeySelects & NVBIT(colorKeySelect)) == 0x0) { + return FALSE; + } + + NVKMS_COMPOSITION_FOR_MATCH_BITS(colorKeySelect, match) { + if ((pCaps->colorKeySelect[colorKeySelect].supportedBlendModes[match] & + NVBIT(pCompParams->blendingMode[match])) == 0x0) { + return FALSE; + } + + switch (pCompParams->blendingMode[match]) { + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA: + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA: + if (pCompParams->surfaceAlpha != 0) { + return FALSE; + } + break; + default: + break; + } + } + + /* Match and nomatch pixels should not use alpha blending mode at once. */ + if ((colorKeySelect != NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) && + (NvKmsIsCompositionModeUseAlpha(pCompParams->blendingMode[0])) && + (NvKmsIsCompositionModeUseAlpha(pCompParams->blendingMode[1]))) { + return FALSE; + } + + /* + * If surface is NULL, no further validation required. The composition + * parameters do not take effect if surface is NULL. + */ + if (pFormatInfo == NULL || pFormatInfo->isYUV) { + return TRUE; + } + + /* Disable color keying for 8 Bpp surfaces. */ + if ((colorKeySelect == NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) || + (colorKeySelect == NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST)) { + + if (pFormatInfo->rgb.bytesPerPixel > 4) { + return FALSE; + } + } + + return TRUE; +} + +static NvBool UpdateLayerFlipEvoHwStateCommon( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const NvU32 layer, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState) +{ + const NVEvoChannel *pChannel = pDevEvo->head[head].layer[layer]; + NVFlipChannelEvoHwState *pHwState = &pFlipState->layer[layer]; + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDevConst(pOpenDev); + NvBool ret; + + if (pParams->layer[layer].surface.specified) { + ret = AssignSurfaceArray(pDevEvo, + pOpenDevSurfaceHandles, + pParams->layer[layer].surface.handle, + pChannel->channelMask, + pHwState->pSurfaceEvo); + if (!ret) { + return FALSE; + } + + /* + * Verify the (rotation, reflectionX, reflectionY) is a + * combination currently supported. + */ + if ((NVBIT(NvKmsRRParamsToCapBit(&pParams->layer[layer].surface.rrParams)) & + pDevEvo->caps.validLayerRRTransforms) == 0) { + return FALSE; + } + pHwState->rrParams = pParams->layer[layer].surface.rrParams; + + } + + /* Verify the timestamp is in the valid range for this channel. */ + if (!FlipTimeStampValidForChannel(pChannel, + pParams->layer[layer].timeStamp)) { + return FALSE; + } + pHwState->timeStamp = pParams->layer[layer].timeStamp; + + /*! + * The NVKMS_SYNCPT_TYPE* types are handled earlier in the flip path (in + * the function nvHandleSyncptRegistration) + */ + if (pParams->layer[layer].syncObjects.specified && + !pParams->layer[layer].syncObjects.val.useSyncpt) { + + if (pParams->layer[layer].syncObjects.val.u.semaphores.acquire.surface.surfaceHandle != 0 || + pParams->layer[layer].syncObjects.val.u.semaphores.release.surface.surfaceHandle != 0) { + if (pParams->layer[layer].skipPendingFlips) { + return FALSE; + } + } + + ret = AssignSemaphoreEvoHwState(pDevEvo, + pOpenDevSurfaceHandles, + pChannel, + sd, + &pParams->layer[layer].syncObjects.val, + &pHwState->syncObject); + if (!ret) { + return FALSE; + } + } + + if (pHwState->pSurfaceEvo[NVKMS_LEFT]) { + pHwState->minPresentInterval = + pParams->layer[layer].minPresentInterval; + } else { + /* The hardware requires that MPI be 0 when disabled. */ + pHwState->minPresentInterval = 0; + } + + if (pParams->layer[layer].sizeIn.specified) { + pHwState->sizeIn = pParams->layer[layer].sizeIn.val; + } + + if (pParams->layer[layer].sizeOut.specified) { + pHwState->sizeOut = pParams->layer[layer].sizeOut.val; + } + + /* + * If supportsWindowMode = TRUE, the sizeIn/sizeOut dimensions can be + * any arbitrary (valid) values. + * + * If supportsWindowMode = FALSE (legacy EVO main layer), the sizeIn + * /sizeOut dimensions must match the size of the surface for that layer. + * + * Note that if sizeIn/Out dimensions are invalid i.e. with a width or + * height of zero, this will be rejected by a call to + * ValidateFlipChannelEvoHwState() later in the code path. + * + * Note that if scaling is unsupported, i.e. that sizeIn cannot differ + * from sizeOut, then any unsupported configurations will be caught by the + * ComputeWindowScalingTaps() call later on in this function. + */ + if (!pDevEvo->caps.layerCaps[layer].supportsWindowMode && + (pHwState->pSurfaceEvo[NVKMS_LEFT] != NULL)) { + const NVSurfaceEvoRec *pSurfaceEvo = + pHwState->pSurfaceEvo[NVKMS_LEFT]; + + if ((pHwState->sizeIn.width != pSurfaceEvo->widthInPixels) || + (pHwState->sizeIn.height != pSurfaceEvo->heightInPixels)) { + return FALSE; + } + + if ((pHwState->sizeOut.width != pSurfaceEvo->widthInPixels) || + (pHwState->sizeOut.height != pSurfaceEvo->heightInPixels)) { + return FALSE; + } + } + + /* + * Allow the client to specify non-origin outputPosition only if the + * layer supports window mode. + * + * If window mode is unsupported but the client specifies non-origin + * outputPosition, return FALSE. + */ + if (pDevEvo->caps.layerCaps[layer].supportsWindowMode) { + if (pParams->layer[layer].outputPosition.specified) { + const NvS16 x = pParams->layer[layer].outputPosition.val.x; + const NvS16 y = pParams->layer[layer].outputPosition.val.y; + if ((pHwState->outputPosition.x != x) || + (pHwState->outputPosition.y != y)) { + pHwState->outputPosition.x = x; + pHwState->outputPosition.y = y; + pFlipState->dirty.layerPosition[layer] = TRUE; + } + } + } else if (pParams->layer[layer].outputPosition.specified && + ((pParams->layer[layer].outputPosition.val.x != 0) || + (pParams->layer[layer].outputPosition.val.y != 0))) { + return FALSE; + } + + if (pParams->layer[layer].compositionParams.specified) { + pHwState->composition = + pParams->layer[layer].compositionParams.val; + } + + if (pHwState->composition.depth == 0) { + pHwState->composition.depth = + NVKMS_MAX_LAYERS_PER_HEAD - layer; + } + + /* XXX Move ValidatePerLayerCompParams() call to nvValidateFlipEvoHwState() */ + if (!ValidatePerLayerCompParams( + &pHwState->composition, + &pDevEvo->caps.layerCaps[layer].composition, + pHwState->pSurfaceEvo[NVKMS_LEFT])) { + return FALSE; + } + + if (!pDevEvo->hal->ComputeWindowScalingTaps(pDevEvo, + pChannel, + pHwState)) { + return FALSE; + } + + if (pParams->layer[layer].completionNotifier.specified) { + ret = AssignCompletionNotifierEvoHwState( + pDevEvo, + pOpenDevSurfaceHandles, + &pParams->layer[layer].completionNotifier.val, + pChannel, + &pFlipState->layer[layer].completionNotifier); + if (!ret) { + return FALSE; + } + } + + pFlipState->dirty.layer[layer] = TRUE; + + return TRUE; +} + +static NvBool UpdateMainLayerFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState, + NvBool allowVrr) +{ + const NVEvoChannel *pChannel = + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER]; + NVFlipChannelEvoHwState *pHwState = &pFlipState->layer[NVKMS_MAIN_LAYER]; + + if (!IsLayerDirty(pParams, NVKMS_MAIN_LAYER)) { + return TRUE; + } + + if (!UpdateLayerFlipEvoHwStateCommon(pOpenDev, pDevEvo, sd, head, + NVKMS_MAIN_LAYER, + pParams, pFlipState)) { + return FALSE; + } + + if (pParams->layer[NVKMS_MAIN_LAYER].csc.specified) { + if (pParams->layer[NVKMS_MAIN_LAYER].csc.useMain) { + return FALSE; + } else { + pHwState->cscMatrix = pParams->layer[NVKMS_MAIN_LAYER].csc.matrix; + } + } + + if (pParams->layer[NVKMS_MAIN_LAYER].surface.specified) { + if (pParams->layer[NVKMS_MAIN_LAYER].perEyeStereoFlip && + !pChannel->caps.perEyeStereoFlips) { + return FALSE; + } + + pHwState->perEyeStereoFlip = + pParams->layer[NVKMS_MAIN_LAYER].perEyeStereoFlip; + } + + if (pParams->layer[NVKMS_MAIN_LAYER].tearing && !pChannel->caps.tearingFlips) { + return FALSE; + } + + // EVO will throw an invalid argument exception if + // minPresentInterval is too large, or if tearing is enabled and + // it's not zero. + if (pParams->layer[NVKMS_MAIN_LAYER].minPresentInterval > NV_MAX_SWAP_INTERVAL || + (pParams->layer[NVKMS_MAIN_LAYER].tearing && + pParams->layer[NVKMS_MAIN_LAYER].minPresentInterval != 0)) { + return FALSE; + } + + pHwState->tearing = pParams->layer[NVKMS_MAIN_LAYER].tearing; + + if (!ApplyBaseFlipOverrides(pDevEvo, + sd, head, &pFlipState->layer[NVKMS_MAIN_LAYER], + allowVrr)) { + return FALSE; + } + + return TRUE; +} + +static NvBool UpdateCursorLayerFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState) +{ + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDevConst(pOpenDev); + + if (pParams->cursor.imageSpecified) { + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES] = { }; + + if (!nvGetCursorImageSurfaces(pDevEvo, + pOpenDevSurfaceHandles, + &pParams->cursor.image, + pSurfaceEvos)) { + return FALSE; + } + + /* XXX NVKMS TODO: add support for stereo cursor */ + if (pSurfaceEvos[NVKMS_RIGHT] != NULL) { + return FALSE; + } + + pFlipState->cursor.pSurfaceEvo = pSurfaceEvos[NVKMS_LEFT]; + + if (pFlipState->cursor.pSurfaceEvo != NULL) { + if (!ValidatePerLayerCompParams(&pParams->cursor.image.cursorCompParams, + &pDevEvo->caps.cursorCompositionCaps, + pFlipState->cursor.pSurfaceEvo)) { + return FALSE; + } + + pFlipState->cursor.cursorCompParams = + pParams->cursor.image.cursorCompParams; + } + + pFlipState->dirty.cursorSurface = TRUE; + } + + if (pParams->cursor.positionSpecified) { + pFlipState->cursor.x = pParams->cursor.position.x; + pFlipState->cursor.y = pParams->cursor.position.y; + + pFlipState->dirty.cursorPosition = TRUE; + } + + return TRUE; +} + +static NvBool UpdateOverlayLayerFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const NvU32 layer, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState) +{ + NVFlipChannelEvoHwState *pHwState = &pFlipState->layer[layer]; + + nvAssert(layer != NVKMS_MAIN_LAYER); + + if (!IsLayerDirty(pParams, layer)) { + return TRUE; + } + + if (pParams->layer[layer].skipPendingFlips || + pParams->layer[layer].perEyeStereoFlip) { + return FALSE; + } + + if (!UpdateLayerFlipEvoHwStateCommon(pOpenDev, pDevEvo, sd, head, layer, + pParams, pFlipState)) { + return FALSE; + } + + if (pParams->layer[layer].csc.specified) { + if (pParams->layer[layer].csc.useMain) { + if (pFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]) { + pHwState->cscMatrix = + pFlipState->layer[NVKMS_MAIN_LAYER].cscMatrix; + } + } else { + pHwState->cscMatrix = pParams->layer[layer].csc.matrix; + } + } + + return TRUE; +} + +/*! + * Update the NVFlipEvoHwState, using NvKmsFlipCommonParams. + * + * Propagate the requested configuration from NvKmsFlipCommonParams to + * NVFlipEvoHwState, performing steps such as translating from + * NvKmsSurfaceHandle to NVSurfaceEvoRecs. Validate the NvKmsFlipCommonParams + * parameters, but defer more general validation of the resulting + * NVFlipEvoHwState until nvValidateFlipEvoHwState(), which callers must call + * separately. + * + * The NVFlipEvoHwState should first be initialized by calling + * nvInitFlipEvoHwState(). + * + * No NVKMS hardware or software state should be altered here, because + * this function is used before we have decided to commit the proposed + * NVFlipEvoHwState to hardware. + * + * \param[in] pOpenDev The pOpenDev of the client doing the flip. + * \param[in] pDevEvo The device on which the surface image will be set. + * \param[in] sd The subdevice for the flip, as specified by the + * client. + * \param[in] head The head for the flip, as specified by the client. + * \param[in] pParams The requested flip, NvKmsFlipCommonParams. + * \param[in,out] pFlipState The resulting NVFlipEvoHwState. + * \param[in] allowVrr Whether VRR flipping should be allowed. + * \param[in] pPossibleUsage Possible usage. + * + * \return If pFlipState could be updated, return TRUE. + * Otherwise, return FALSE. + */ +NvBool nvUpdateFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState, + NvBool allowVrr, + const struct NvKmsUsageBounds *pPossibleUsage) +{ + NvU32 layer; + + if (!CheckFlipPermissions(pOpenDev, pDevEvo, sd, head, pParams)) { + return FALSE; + } + + if (pParams->viewPortIn.specified) { + pFlipState->dirty.viewPortPointIn = TRUE; + pFlipState->viewPortPointIn = pParams->viewPortIn.point; + } + + if (!UpdateCursorLayerFlipEvoHwState(pOpenDev, pDevEvo, pParams, + pFlipState)) { + return FALSE; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (layer == NVKMS_MAIN_LAYER) { + if (!UpdateMainLayerFlipEvoHwState(pOpenDev, pDevEvo, sd, head, + pParams, pFlipState, allowVrr)) { + return FALSE; + } + continue; + } + + if (!UpdateOverlayLayerFlipEvoHwState(pOpenDev, pDevEvo, sd, head, + layer, pParams, pFlipState)) { + return FALSE; + } + } + + if (!AssignUsageBounds(pDevEvo, head, pFlipState)) { + return FALSE; + } + + if (!OverrideUsageBounds(pDevEvo, pFlipState, pParams, sd, head, + pPossibleUsage)) { + return FALSE; + } + + + /* + * If there is active cursor/cropped-window(overlay) without full screen + * window(base/core) then NVKMS is supposed to disable MidFrame/DWCF + * watermark. + */ + + pFlipState->disableMidFrameAndDWCFWatermark = FALSE; + + if (NV5070_CTRL_SYSTEM_GET_CAP( + pDevEvo->capsBits, + NV5070_CTRL_SYSTEM_CAPS_BUG_2052012_GLITCHY_MCLK_SWITCH) && + !pFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]) { + + if (pFlipState->cursor.pSurfaceEvo != NULL) { + pFlipState->disableMidFrameAndDWCFWatermark = TRUE; + } else { + NvU32 layer; + + /* + * XXX TODO: Check the output size of the overlay in order + * to determine if it will be fullscreen or not. + */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (layer != NVKMS_MAIN_LAYER && + pFlipState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL) { + pFlipState->disableMidFrameAndDWCFWatermark = TRUE; + break; + } + } + } + } + + return TRUE; +} + +/* + * Checks that if the surface is NULL (i.e. no image will be shown), various + * other elements must be NULL as well. If the surface is not NULL, verifies + * that the sizeIn/Out have nonzero values. + */ +inline static NvBool ValidateFlipChannelEvoHwState( + const NVFlipChannelEvoHwState *pState) +{ + if (pState->pSurfaceEvo[NVKMS_LEFT] != NULL) { + /* Verify sizes are valid. */ + if ((pState->sizeIn.width == 0) || (pState->sizeIn.height == 0) || + (pState->sizeOut.width == 0) || (pState->sizeOut.height == 0)) { + return FALSE; + } + + return TRUE; + } + + if (pState->completionNotifier.surface.pSurfaceEvo != NULL) { + return FALSE; + } + + if (!pState->syncObject.usingSyncpt) { + if (pState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo != NULL) { + return FALSE; + } + + if (pState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo != NULL) { + return FALSE; + } + } + + return TRUE; +} + +static NvBool ValidateSurfaceSize( + const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo, + const struct NvKmsRect *sourceFetchRect) +{ + NvU8 planeIndex; + + if ((pSurfaceEvo->widthInPixels > pDevEvo->caps.maxWidthInPixels) || + (pSurfaceEvo->heightInPixels > pDevEvo->caps.maxHeight)) { + return FALSE; + } + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + + NvU64 planePitch = pSurfaceEvo->planes[planeIndex].pitch; + + /* + * Convert planePitch to units of bytes if it's currently specified in + * units of blocks. Each block is 64-bytes wide. + */ + if (pSurfaceEvo->layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + planePitch <<= NVKMS_BLOCK_LINEAR_LOG_GOB_WIDTH; + } + + if (planePitch > pDevEvo->caps.maxWidthInBytes) { + return FALSE; + } + } + + if (!pDevEvo->hal->ValidateWindowFormat(pSurfaceEvo->format, + sourceFetchRect, + NULL)) { + return FALSE; + } + + return TRUE; +} + +static NvBool +ValidateMainFlipChannelEvoHwState(const NVDevEvoRec *pDevEvo, + const NVFlipChannelEvoHwState *pHwState, + const NVHwModeTimingsEvo *pTimings, + struct NvKmsPoint viewPortPointIn) +{ + NvU32 eye; + const NVSurfaceEvoRec *pFirstSurfaceEvo = NULL; + + /* + * This struct represents the source fetch rectangle for a given surface, + * and will be populated later as such. This function doesn't explicitly set + * sourceFetchRect.{x,y} because NVKMS currently doesn't support programming + * source fetch offsets, so the init value of 0 should be fine for both of + * these fields. + */ + struct NvKmsRect sourceFetchRect = {0}; + + if (!ValidateFlipChannelEvoHwState(pHwState)) { + return FALSE; + } + + for (eye = 0; eye < NVKMS_MAX_EYES; eye++) { + const NVSurfaceEvoRec *pSurfaceEvo = pHwState->pSurfaceEvo[eye]; + + if (pSurfaceEvo == NULL) { + continue; + } + + if (pFirstSurfaceEvo == NULL) { + pFirstSurfaceEvo = pSurfaceEvo; + } else if (pSurfaceEvo->widthInPixels != + pFirstSurfaceEvo->widthInPixels || + pSurfaceEvo->heightInPixels != + pFirstSurfaceEvo->heightInPixels) { + return FALSE; + } + + sourceFetchRect.width = pHwState->sizeIn.width; + sourceFetchRect.height = pHwState->sizeIn.height; + + if (!ValidateSurfaceSize(pDevEvo, pSurfaceEvo, &sourceFetchRect)) { + return FALSE; + } + + /* The use of A_plus_B_greater_than_C_U16 is only valid if these + * fit in a U16 */ + nvAssert(pSurfaceEvo->widthInPixels <= NV_U16_MAX); + nvAssert(pSurfaceEvo->heightInPixels <= NV_U16_MAX); + /* And the checks above in ValidateSurfaceSize should have + * guaranteed that. */ + nvAssert(pDevEvo->caps.maxWidthInPixels <= NV_U16_MAX); + nvAssert(pDevEvo->caps.maxHeight <= NV_U16_MAX); + + /* + * Validate that the requested viewport parameters fit within the + * specified surface, unless the main layer is allowed to be smaller + * than the viewport. + */ + if (!pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].supportsWindowMode) { + if (A_plus_B_greater_than_C_U16(viewPortPointIn.x, + pTimings->viewPort.in.width, + pSurfaceEvo->widthInPixels)) { + return FALSE; + } + + if (A_plus_B_greater_than_C_U16(viewPortPointIn.y, + pTimings->viewPort.in.height, + pSurfaceEvo->heightInPixels)) { + return FALSE; + } + } + } + + return TRUE; +} + +static NvBool +ValidateOverlayFlipChannelEvoHwState(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NvU32 layer, + const NVFlipChannelEvoHwState *pHwState) +{ + const NVSurfaceEvoRec *pSurfaceEvo = pHwState->pSurfaceEvo[NVKMS_LEFT]; + + /* + * This struct represents the source fetch rectangle for a given surface, + * and will be populated later as such. This function doesn't explicitly set + * sourceFetchRect.{x,y} because NVKMS currently doesn't support programming + * source fetch offsets, so the init value of 0 should be fine for both of + * these fields. + */ + struct NvKmsRect sourceFetchRect = {0}; + + nvAssert(layer != NVKMS_MAIN_LAYER); + + if (!ValidateFlipChannelEvoHwState(pHwState)) { + return FALSE; + } + + if (pSurfaceEvo == NULL) { + return TRUE; + } + + sourceFetchRect.width = pHwState->sizeIn.width; + sourceFetchRect.height = pHwState->sizeIn.height; + + if (!ValidateSurfaceSize(pDevEvo, pSurfaceEvo, &sourceFetchRect)) { + return FALSE; + } + + /* Validate input size against surface size. */ + if (pHwState->sizeIn.width > pSurfaceEvo->widthInPixels || + pHwState->sizeIn.height > pSurfaceEvo->heightInPixels) { + return FALSE; + } + + return TRUE; +} + +/*! + * Perform validation of the the given NVFlipEvoHwState. + */ +NvBool nvValidateFlipEvoHwState( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings, + const NVFlipEvoHwState *pFlipState) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + + if (pFlipState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL) { + NvU32 tmpLayer; + + /* Depth should be different for each of the layers owned by the head */ + for (tmpLayer = 0; tmpLayer < pDevEvo->head[head].numLayers; tmpLayer++) { + if (pFlipState->layer[tmpLayer].pSurfaceEvo[NVKMS_LEFT] == NULL) { + continue; + } + + if ((tmpLayer != layer) && + (pFlipState->layer[tmpLayer].composition.depth == + pFlipState->layer[layer].composition.depth)) { + return FALSE; + } + } + + /* Depth of the main layer should be the greatest one */ + if ((pFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT] != NULL) && + (pFlipState->layer[layer].composition.depth > + pFlipState->layer[NVKMS_MAIN_LAYER].composition.depth)) { + return FALSE; + } + } + + if (layer == NVKMS_MAIN_LAYER) { + if (!ValidateMainFlipChannelEvoHwState(pDevEvo, + &pFlipState->layer[layer], + pTimings, + pFlipState->viewPortPointIn)) { + return FALSE; + } + continue; + } + + if (pFlipState->dirty.layer[layer] && + !ValidateOverlayFlipChannelEvoHwState(pDevEvo, + head, + layer, + &pFlipState->layer[layer])) { + return FALSE; + } + } + + /* XXX NVKMS TODO: validate cursor x,y against current viewport in? */ + + return ValidateUsageBounds(pDevEvo, + head, + &pFlipState->usage, + &pTimings->viewPort.possibleUsage); +} + + +/*! + * Validate overlay should be enabled only with valid core scanout surface. + */ +static NvBool ValidatePerDispState( + const NVDevEvoRec *pDevEvo, + const struct NvKmsFlipWorkArea *pWorkArea) +{ + const NVDispEvoRec *pDispEvo; + NvU32 sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + const NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[pDispEvo->displayOwner].headState[head]; + const NVSurfaceEvoRec *pMainScanoutSurface = + pSdHeadState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + const NVFlipEvoHwState *pFlipState = + &pWorkArea->sd[sd].head[head].newState; + NvU32 layer; + + if (pFlipState->dirty.layer[NVKMS_MAIN_LAYER]) { + pMainScanoutSurface = + pFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (layer == NVKMS_MAIN_LAYER) { + continue; + } + + if (pFlipState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL && + pMainScanoutSurface == NULL) { + return FALSE; + } + } + } + } + + return TRUE; +} + +/* + * Record in the updateState that the given channel needs interlocked + * window immediate updates. + */ +static void UpdateWinImmInterlockState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].winImmInterlockMask |= + pChannel->channelMask; + } + } +} + +/*! + * Record in the updateState that the given channel's method are eligible for + * flip locking. + */ +static void UpdateUpdateFlipLockState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].flipLockQualifyingMask |= + pChannel->channelMask; + } + } +} + +/*! + * Program a flip on all requested layers on the specified head. + * + * This also updates pDispEvo->headState[head], caching what was programmed. + + * \param[in,out] pDispEvo The disp on which the flip should be performed. + * \param[in] head The head on which the flip should be performed. + * \param[in] pFlipState The description of how to update each layer. + * \param[in,out] updateState Indicates which channels require UPDATEs + */ +void nvFlipEvoOneHead( + NVDevEvoPtr pDevEvo, + const NvU32 sd, + const NvU32 head, + const NVFlipEvoHwState *pFlipState, + NvBool allowFlipLock, + NVEvoUpdateState *updateState) +{ + const NvU32 subDeviceMask = NVBIT(sd); + const NVDispHeadStateEvoRec *pHeadState = + &pDevEvo->gpus[sd].pDispEvo->headState[head]; + NvBool bypassComposition = pHeadState->bypassComposition; + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + NvU32 layer; + + /* + * Provide the pre-update hardware state (in pSdHeadState) and the new + * target state (pFlipState) to the HAL implementation so that it has the + * information it needs to implement the workaround for hardware bug + * 2193096, which requires special logic on transitions between NULL and + * non-NULL ctxdmas (and vice versa). + */ + pDevEvo->hal->FlipTransitionWAR(pDevEvo, sd, head, + pSdHeadState, pFlipState, + updateState); + + /* + * Promote the software state first, such that the hardware programming + * paths below see the new state atomically. + */ + if (pFlipState->dirty.viewPortPointIn) { + pSdHeadState->viewPortPointIn = pFlipState->viewPortPointIn; + } + + if (pFlipState->dirty.cursorSurface || pFlipState->dirty.cursorPosition) { + pSdHeadState->cursor = pFlipState->cursor; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pFlipState->dirty.layer[layer]) { + pSdHeadState->layer[layer] = pFlipState->layer[layer]; + } + } + + if (pFlipState->dirty.viewPortPointIn) { + nvSetViewPortPointInEvo(pDevEvo->gpus[sd].pDispEvo, + head, + pFlipState->viewPortPointIn.x, + pFlipState->viewPortPointIn.y, + updateState); + } + + if (pFlipState->dirty.cursorSurface) { + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + pDevEvo->hal->SetCursorImage(pDevEvo, + head, + pSdHeadState->cursor.pSurfaceEvo, + updateState, + &pSdHeadState->cursor.cursorCompParams); + nvPopEvoSubDevMask(pDevEvo); + } + + if (pFlipState->dirty.cursorPosition) { + nvEvoMoveCursorInternal(pDevEvo->gpus[sd].pDispEvo, + head, + pFlipState->cursor.x, + pFlipState->cursor.y); + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!pFlipState->dirty.layer[layer]) { + continue; + } + + if (pFlipState->dirty.layerPosition[layer]) { + /* Ensure position updates are supported on this layer. */ + nvAssert(pDevEvo->caps.layerCaps[layer].supportsWindowMode); + + pDevEvo->hal->SetImmPointOut(pDevEvo, + pDevEvo->head[head].layer[layer], + sd, + updateState, + pFlipState->layer[layer].outputPosition.x, + pFlipState->layer[layer].outputPosition.y); + + if (pDevEvo->hal->caps.supportsSynchronizedOverlayPositionUpdate) { + UpdateWinImmInterlockState(pDevEvo, updateState, + pDevEvo->head[head].layer[layer]); + } + } + + nvPushEvoSubDevMask(pDevEvo, subDeviceMask); + pDevEvo->hal->Flip(pDevEvo, + pDevEvo->head[head].layer[layer], + &pFlipState->layer[layer], + updateState, + bypassComposition); + if (layer == NVKMS_MAIN_LAYER && allowFlipLock) { + UpdateUpdateFlipLockState(pDevEvo, updateState, + pDevEvo->head[head].layer[layer]); + } + nvPopEvoSubDevMask(pDevEvo); + } + + pSdHeadState->targetUsage = pFlipState->usage; + + pSdHeadState->targetDisableMidFrameAndDWCFWatermark = + pFlipState->disableMidFrameAndDWCFWatermark; +} + +static void ChangeSurfaceFlipRefCount( + NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo, + NvBool increase) +{ + if (pSurfaceEvo != NULL) { + if (increase) { + nvEvoIncrementSurfaceRefCnts(pSurfaceEvo); + } else { + nvEvoDecrementSurfaceRefCnts(pSurfaceEvo); + } + } +} + +void nvUpdateSurfacesFlipRefCount( + NVDevEvoPtr pDevEvo, + const NvU32 head, + NVFlipEvoHwState *pFlipState, + NvBool increase) +{ + NvU32 i; + + ChangeSurfaceFlipRefCount( + pDevEvo, + pFlipState->cursor.pSurfaceEvo, + increase); + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + NVFlipChannelEvoHwState *pLayerFlipState = &pFlipState->layer[i]; + + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->pSurfaceEvo[NVKMS_LEFT], + increase); + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->pSurfaceEvo[NVKMS_RIGHT], + increase); + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->completionNotifier.surface.pSurfaceEvo, + increase); + + if (!pLayerFlipState->syncObject.usingSyncpt) { + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo, + increase); + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo, + increase); + } + } +} + +static void UnionScalingUsageBounds( + const struct NvKmsScalingUsageBounds *a, + const struct NvKmsScalingUsageBounds *b, + struct NvKmsScalingUsageBounds *ret) +{ + ret->maxVDownscaleFactor = NV_MAX(a->maxVDownscaleFactor, + b->maxVDownscaleFactor); + ret->maxHDownscaleFactor = NV_MAX(a->maxHDownscaleFactor, + b->maxHDownscaleFactor); + ret->vTaps = NV_MAX(a->vTaps, b->vTaps); + ret->vUpscalingAllowed = a->vUpscalingAllowed || b->vUpscalingAllowed; +} + +struct NvKmsUsageBounds nvUnionUsageBounds( + const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b) +{ + struct NvKmsUsageBounds ret; + NvU32 i; + + for (i = 0; i < ARRAY_LEN(a->layer); i++) { + nvAssert(a->layer[i].usable == + !!a->layer[i].supportedSurfaceMemoryFormats); + nvAssert(b->layer[i].usable == + !!b->layer[i].supportedSurfaceMemoryFormats); + + ret.layer[i].usable = a->layer[i].usable || b->layer[i].usable; + + ret.layer[i].supportedSurfaceMemoryFormats = + a->layer[i].supportedSurfaceMemoryFormats | + b->layer[i].supportedSurfaceMemoryFormats; + + UnionScalingUsageBounds(&a->layer[i].scaling, + &b->layer[i].scaling, + &ret.layer[i].scaling); + } + + return ret; +} + +NvBool UsageBoundsEqual( + const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b) +{ + NvU32 layer; + + for (layer = 0; layer < ARRAY_LEN(a->layer); layer++) { + if (!nvEvoLayerUsageBoundsEqual(a, b, layer)) { + return FALSE; + } + } + + return TRUE; +} + +static NvBool AllocatePreFlipBandwidth(NVDevEvoPtr pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea) +{ + NVValidateImpOneDispHeadParamsRec *timingsParams = NULL; + struct NvKmsUsageBounds *currentAndNew = NULL; + struct NvKmsUsageBounds *guaranteedAndCurrent = NULL; + NVDispEvoPtr pDispEvo; + NvU32 head; + NvBool recheckIMP = FALSE; + NvBool ret = TRUE; + + if (!pDevEvo->isSOCDisplay) { + return TRUE; + } + + timingsParams = + nvCalloc(NVKMS_MAX_HEADS_PER_DISP, sizeof(*timingsParams)); + if (timingsParams == NULL) { + return FALSE; + } + + currentAndNew = + nvCalloc(NVKMS_MAX_HEADS_PER_DISP, sizeof(*currentAndNew)); + if (currentAndNew == NULL) { + nvFree(timingsParams); + return FALSE; + } + + guaranteedAndCurrent = + nvCalloc(NVKMS_MAX_HEADS_PER_DISP, sizeof(*guaranteedAndCurrent)); + if (guaranteedAndCurrent == NULL) { + nvFree(timingsParams); + nvFree(currentAndNew); + return FALSE; + } + + pDispEvo = pDevEvo->pDispEvo[0]; + + // SOC Display never has more than one disp + nvAssert(pDevEvo->nDispEvo == 1); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const struct NvKmsUsageBounds *pCurrent = + &pDevEvo->gpus[0].headState[head].preallocatedUsage; + const struct NvKmsUsageBounds *pNew = + &pWorkArea->sd[0].head[head].newState.usage; + + if (pHeadState->activeRmId == 0) { + continue; + } + + timingsParams[head].pConnectorEvo = pHeadState->pConnectorEvo; + timingsParams[head].activeRmId = pHeadState->activeRmId; + timingsParams[head].pTimings = &pHeadState->timings; + + currentAndNew[head] = nvUnionUsageBounds(pCurrent, pNew); + guaranteedAndCurrent[head] = nvUnionUsageBounds( + &pHeadState->timings.viewPort.guaranteedUsage, + pCurrent); + + if (!ValidateUsageBounds(pDevEvo, + head, + pNew, + &guaranteedAndCurrent[head])) { + recheckIMP = TRUE; + } + + guaranteedAndCurrent[head] = + nvUnionUsageBounds(&guaranteedAndCurrent[head], pNew); + timingsParams[head].pUsage = &guaranteedAndCurrent[head]; + } + + if (recheckIMP) { + ret = nvValidateImpOneDisp(pDispEvo, timingsParams, + FALSE /* requireBootClocks */, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE, + NULL /* pMinIsoBandwidthKBPS */, + NULL /* pMinDramFloorKBPS */); + if (ret) { + for (head = 0; head < pDevEvo->numHeads; head++) { + pDevEvo->gpus[0].headState[head].preallocatedUsage = + currentAndNew[head]; + } + } + } + + nvFree(timingsParams); + nvFree(currentAndNew); + nvFree(guaranteedAndCurrent); + + if (ret) { + nvScheduleLowerDispBandwidthTimer(pDevEvo); + } + + return ret; +} + +/*! + * If the satellite channel is active then pre-NVDisplay hardware does not allow + * to change its usage bounds in non-interlock update. The nvSetUsageBoundsEvo() + * code path for pre-NVDisplay hardware, interlocks the satellite channels with + * the usage bounds update. This makes it essential to poll for + * NO_METHOD_PENDING state of the satellite channels, otherwise blocking + * pre-flip IMP update will also get stuck. + * + * It is not possible to interlock flip-locked satellite channels with the core + * channel usage bounds update; in that case, reject the flip. Do not allow + * client to make any change in surface usage bounds parameters without + * deactivating channel first, if channel is flip-locked. + */ +static NvBool PrepareToDoPreFlipIMP(NVDevEvoPtr pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea) +{ + NvU64 startTime = 0; + NvU32 timeout = 2000000; /* 2 seconds */ + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoHeadControlPtr pHC = + &pEvoSubDev->headControl[head]; + const NVEvoSubDevHeadStateRec *pCurrentFlipState = + &pDevEvo->gpus[sd].headState[head]; + const NVSurfaceEvoRec *pCurrentBaseSurf = + pCurrentFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + const struct NvKmsUsageBounds *pCurrentUsage = + &pCurrentFlipState->usage; + + NVFlipEvoHwState *pNewFlipState = + &pWorkArea->sd[sd].head[head].newState; + const NVSurfaceEvoRec *pNewBaseSurf = + pNewFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + struct NvKmsUsageBounds *pNewUsage = + &pNewFlipState->usage; + + struct NvKmsUsageBounds *pPreFlipUsage = + &pWorkArea->sd[sd].head[head].preFlipUsage; + + NvU32 layer; + + *pPreFlipUsage = nvUnionUsageBounds(pNewUsage, + pCurrentUsage); + + if (pDevEvo->hal->caps.supportsNonInterlockedUsageBoundsUpdate) { + /* + * NVDisplay does not interlock the satellite channel + * with its usage bounds update. + */ + continue; + } + + /* + * If head is flip-locked then do not change usage + * bounds while base channel is active. + */ + if (pHC->flipLock && + /* If the base channel is active before and after flip then + * current and new base usage bounds should be same. */ + ((pNewBaseSurf != NULL && + pCurrentBaseSurf != NULL && + !nvEvoLayerUsageBoundsEqual(pCurrentUsage, + pNewUsage, NVKMS_MAIN_LAYER)) || + /* If the base channel is active before flip then current and + * preflip base usage bounds should be same. */ + (pCurrentBaseSurf != NULL && + !nvEvoLayerUsageBoundsEqual(pCurrentUsage, + pPreFlipUsage, NVKMS_MAIN_LAYER)))) { + return FALSE; + } + + /* + * Poll for NO_METHOD_PENDING state if usage + * bounds of the channel are changed. + */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!nvEvoLayerUsageBoundsEqual(pCurrentUsage, + pPreFlipUsage, layer) && + !nvEvoPollForNoMethodPending(pDevEvo, + sd, + pDevEvo->head[head].layer[layer], + &startTime, + timeout)) { + return FALSE; + } + } + } + } + + return TRUE; +} + +/*! + * Tasks need to perform before triggering flip, they all should be done here. + * + * If necessary, raise usage bounds and/or disable MidFrameAndDWCFWatermark + * (bug 200508242) in the core channel and do an IMP update. + * + * Note that this function only raises usage bounds and/or disables + * MidFrameAndDWCFWatermark, never lowers usage bounds and/or enables + * MidFrameAndDWCFWatermark. This allows it to run before queuing a flip even + * if there are still pending flips in a base channel. + */ +static void PreFlipIMP(NVDevEvoPtr pDevEvo, + const struct NvKmsFlipWorkArea *pWorkArea) +{ + NvU32 head, sd; + NVDispEvoPtr pDispEvo; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoUpdateState updateState = { }; + NvBool update = FALSE; + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + const NVFlipEvoHwState *pNewState = + &pWorkArea->sd[sd].head[head].newState; + const struct NvKmsUsageBounds *pPreFlipUsage = + &pWorkArea->sd[sd].head[head].preFlipUsage; + struct NvKmsUsageBounds *pCurrentUsage = + &pDevEvo->gpus[sd].headState[head].usage; + + if (!UsageBoundsEqual(pCurrentUsage, pPreFlipUsage)) { + update |= nvSetUsageBoundsEvo(pDevEvo, sd, head, + pPreFlipUsage, &updateState); + } + + if (!pDevEvo->gpus[sd]. + headState[head].disableMidFrameAndDWCFWatermark && + pNewState->disableMidFrameAndDWCFWatermark) { + + nvEnableMidFrameAndDWCFWatermark(pDevEvo, + sd, + head, + FALSE /* enable */, + &updateState); + update = TRUE; + } + } + + if (update) { + nvDoIMPUpdateEvo(pDispEvo, &updateState); + } + } +} + +static void LowerDispBandwidth(void *dataPtr, NvU32 dataU32) +{ + NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP]; + struct NvKmsUsageBounds *guaranteedAndCurrent; + NVDevEvoPtr pDevEvo = dataPtr; + NVDispEvoPtr pDispEvo; + NvU32 head; + NvBool ret; + + guaranteedAndCurrent = + nvCalloc(1, sizeof(*guaranteedAndCurrent) * NVKMS_MAX_HEADS_PER_DISP); + if (guaranteedAndCurrent == NULL) { + nvAssert(guaranteedAndCurrent != NULL); + return; + } + + nvkms_memset(&timingsParams, 0, sizeof(timingsParams)); + + pDispEvo = pDevEvo->pDispEvo[0]; + + // SOC Display never has more than one disp + nvAssert(pDevEvo->nDispEvo == 1); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const struct NvKmsUsageBounds *pGuaranteed = + &pHeadState->timings.viewPort.guaranteedUsage; + const struct NvKmsUsageBounds *pCurrent = + &pDevEvo->gpus[0].headState[head].usage; + + if (pHeadState->activeRmId == 0) { + continue; + } + + timingsParams[head].pConnectorEvo = pHeadState->pConnectorEvo; + timingsParams[head].activeRmId = pHeadState->activeRmId; + timingsParams[head].pTimings = &pHeadState->timings; + + guaranteedAndCurrent[head] = nvUnionUsageBounds(pGuaranteed, pCurrent); + timingsParams[head].pUsage = &guaranteedAndCurrent[head]; + } + + ret = nvValidateImpOneDisp(pDispEvo, timingsParams, + FALSE /* requireBootClocks */, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST, + NULL /* pMinIsoBandwidthKBPS */, + NULL /* pMinDramFloorKBPS */); + if (ret) { + for (head = 0; head < pDevEvo->numHeads; head++) { + pDevEvo->gpus[0].headState[head].preallocatedUsage = + pDevEvo->gpus[0].headState[head].usage; + } + } + + nvAssert(ret); + + nvFree(guaranteedAndCurrent); +} + +void nvCancelLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo) +{ + nvkms_free_timer(pDevEvo->lowerDispBandwidthTimer); + pDevEvo->lowerDispBandwidthTimer = NULL; +} + +void nvScheduleLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo) +{ + nvAssert(pDevEvo->isSOCDisplay); + + nvCancelLowerDispBandwidthTimer(pDevEvo); + + pDevEvo->lowerDispBandwidthTimer = + nvkms_alloc_timer(LowerDispBandwidth, + pDevEvo, + 0, /* dataU32 */ + 30000000 /* 30 seconds */); +} + +/*! + * Check whether the core, base, and overlay channels are idle (i.e. no methods + * pending in the corresponding pushbuffer) and lower the usage bounds if + * possible. + */ +static NvBool TryLoweringUsageBoundsOneHead(NVDevEvoPtr pDevEvo, NvU32 sd, + NvU32 head, + NVEvoUpdateState *updateState) +{ + const NVEvoSubDevHeadStateRec *pHeadState = + &pDevEvo->gpus[sd].headState[head]; + const struct NvKmsUsageBounds *pCurrent = &pHeadState->usage; + const struct NvKmsUsageBounds *pTarget = &pHeadState->targetUsage; + struct NvKmsUsageBounds newUsage = *pCurrent; + NvBool changed = FALSE; + NvBool scheduleLater = FALSE; + int i; + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + if (pCurrent->layer[i].usable && !pTarget->layer[i].usable) { + NvBool isMethodPending; + + if (pDevEvo->hal->IsChannelMethodPending( + pDevEvo, + pDevEvo->head[head].layer[i], + sd, + &isMethodPending) && !isMethodPending) { + newUsage.layer[i] = pTarget->layer[i]; + changed = TRUE; + } else { + scheduleLater = TRUE; + } + } else if ((pCurrent->layer[i].usable && pTarget->layer[i].usable) && + ((pCurrent->layer[i].supportedSurfaceMemoryFormats != + pTarget->layer[i].supportedSurfaceMemoryFormats) || + (!nvEvoScalingUsageBoundsEqual(&pCurrent->layer[i].scaling, + &pTarget->layer[i].scaling)))) { + NvBool isMethodPending; + + if (pDevEvo->hal->IsChannelMethodPending( + pDevEvo, + pDevEvo->head[head].layer[i], + sd, + &isMethodPending) && !isMethodPending) { + newUsage.layer[i] = pTarget->layer[i]; + changed = TRUE; + } else { + scheduleLater = TRUE; + } + } + } + + if (scheduleLater) { + SchedulePostFlipIMPTimer(pDevEvo); + } + + if (changed) { + changed = nvSetUsageBoundsEvo(pDevEvo, sd, head, &newUsage, + updateState); + } + + return changed; +} + +static NvBool +TryEnablingMidFrameAndDWCFWatermarkOneHead(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NVEvoUpdateState *updateState) +{ + const NVEvoSubDevHeadStateRec *pHeadState = + &pDevEvo->gpus[sd].headState[head]; + NvBool changed = FALSE; + + if (pHeadState->disableMidFrameAndDWCFWatermark && + !pHeadState->targetDisableMidFrameAndDWCFWatermark) { + + NvBool isIdle; + + if (pDevEvo->hal->IsChannelIdle(pDevEvo, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + sd, + &isIdle) && isIdle) { + nvEnableMidFrameAndDWCFWatermark(pDevEvo, + sd, + head, + TRUE /* enable */, + updateState); + changed = TRUE; + } else { + // Schedule another timer to try again later. + SchedulePostFlipIMPTimer(pDevEvo); + } + } + + return changed; +} + +static void +TryToDoPostFlipIMP(void *dataPtr, NvU32 dataU32) +{ + NVDevEvoPtr pDevEvo = dataPtr; + NvU32 head, sd; + NVDispEvoPtr pDispEvo; + + pDevEvo->postFlipIMPTimer = NULL; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoUpdateState updateState = { }; + NvBool update = FALSE; + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + if (TryLoweringUsageBoundsOneHead(pDevEvo, sd, head, + &updateState)) { + update = TRUE; + } + + if (TryEnablingMidFrameAndDWCFWatermarkOneHead( + pDevEvo, + sd, + head, + &updateState)) { + update = TRUE; + } + } + + if (update) { + nvDoIMPUpdateEvo(pDispEvo, &updateState); + } + } +} + +static void SchedulePostFlipIMPTimer(NVDevEvoPtr pDevEvo) +{ + if (!pDevEvo->postFlipIMPTimer) { + pDevEvo->postFlipIMPTimer = + nvkms_alloc_timer( + TryToDoPostFlipIMP, + pDevEvo, + 0, /* dataU32 */ + 10000000 /* 10 seconds */); + } +} + +void nvEvoCancelPostFlipIMPTimer(NVDevEvoPtr pDevEvo) +{ + nvkms_free_timer(pDevEvo->postFlipIMPTimer); + pDevEvo->postFlipIMPTimer = NULL; +} + +/*! + * If necessary, schedule a timer to see if usage bounds can be lowered. + */ +static void SchedulePostFlipIMP(NVDevEvoPtr pDevEvo) +{ + NvU32 head, sd; + NVDispEvoPtr pDispEvo; + + // If a timer is already scheduled, do nothing. + if (pDevEvo->postFlipIMPTimer) { + return; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + const NVEvoSubDevHeadStateRec *pHeadState = + &pDevEvo->gpus[sd].headState[head]; + + if (!UsageBoundsEqual(&pHeadState->usage, + &pHeadState->targetUsage) || + (pHeadState->disableMidFrameAndDWCFWatermark != + pHeadState->targetDisableMidFrameAndDWCFWatermark)) { + + SchedulePostFlipIMPTimer(pDevEvo); + return; + } + } + } +} + +/*! + * Program a flip on all requested layers on all requested heads on + * all requested disps in NvKmsFlipRequest. + * + * /param[in] skipUpdate Update software state tracking, but don't kick + * off or perform an UPDATE. + * + * Note that this should be used only when the + * satellite channels (including the cursor) are + * disabled -- only the core channel should be + * displaying anything, and only the core surface + * should be specified in a skipUpdate flip. + * /param[in] allowFlipLock Whether this update should use fliplocked base + * flips. This is used on nvdisplay to set the + * interlock mask to include all fliplocked + * channels if necessary. This should currently + * only be set when this flip was initiated + * through NVKMS_IOCTL_FLIP. + */ +NvBool nvFlipEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsFlipRequest *request, + struct NvKmsFlipReply *reply, + NvBool skipUpdate, + NvBool allowFlipLock) +{ + NvU32 head, sd; + NvU32 requestedHeadCount, activeHeadCount, dirtyBaseChannelCount; + NvBool ret = FALSE; + NvBool changed = FALSE; + NvBool allowVrr = request->allowVrr; + NVDispEvoPtr pDispEvo; + struct NvKmsFlipWorkArea *pWorkArea = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_FLIP_WORK_AREA, + sizeof(*pWorkArea)); + + nvkms_memset(pWorkArea, 0, sizeof(*pWorkArea)); + + /* + * Do not execute NVKMS_IOCTL_FLIP if the display channel yet has not + * been transitioned from vbios to driver. A modeset requires, to make + * display channel transition from vbios to driver. + * + * The NVKMS client should do modeset before initiating + * NVKMS_IOCTL_FLIP requests. + */ + if (pDevEvo->coreInitMethodsPending) { + goto done; + } + + /* + * Initialize the work area. Note we take two snapshots of the + * current headState: newState and oldState. newState will + * describe the new configuration. After that is applied, we will + * refer to oldState to identify any surfaces that are no longer + * in use. + */ + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + for (head = 0; head < ARRAY_LEN(pWorkArea->sd[sd].head); head++) { + nvInitFlipEvoHwState(pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].newState); + nvInitFlipEvoHwState(pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].oldState); + } + } + + + /*! + * Count active and requested heads so we can make a decision about VRR + * and register syncpts if specified. + */ + requestedHeadCount = activeHeadCount = dirtyBaseChannelCount = 0; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + const struct NvKmsFlipRequestOneSubDevice *pRequestOneSubDevice = + &request->sd[sd]; + + for (head = 0; head < ARRAY_LEN(pRequestOneSubDevice->head); head++) { + const NvBool headActive = nvHeadIsActive(pDispEvo, head); + + if (headActive) { + activeHeadCount++; + } + + if (NVBIT(head) & pRequestOneSubDevice->requestedHeadsBitMask) { + requestedHeadCount++; + } + + if (headActive) { + if (!nvHandleSyncptRegistration( + pDevEvo, + head, + &pRequestOneSubDevice->head[head], + &pWorkArea->sd[sd].head[head].newState)) { + goto done; + } + } + } + } + + /* Deactivate VRR if only a subset of the heads are requested */ + if (requestedHeadCount != activeHeadCount) { + allowVrr = FALSE; + } + + /* Validate the flip parameters and update the work area. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + const struct NvKmsFlipRequestOneSubDevice *pRequestOneSubDevice = + &request->sd[sd]; + + for (head = 0; head < ARRAY_LEN(pRequestOneSubDevice->head); head++) { + const NVDispHeadStateEvoRec *pHeadState; + const NvBool headActive = nvHeadIsActive(pDispEvo, head); + + if (!(NVBIT(head) & pRequestOneSubDevice->requestedHeadsBitMask)) { + continue; + } + + if (!headActive) { + goto done; + } + + pHeadState = &pDispEvo->headState[head]; + + if (!nvUpdateFlipEvoHwState( + pOpenDev, + pDevEvo, + sd, + head, + &pRequestOneSubDevice->head[head], + &pWorkArea->sd[sd].head[head].newState, + allowVrr, + &pHeadState->timings.viewPort.possibleUsage)) { + goto done; + } + + if (pWorkArea->sd[sd].head[head].newState.dirty.layer[NVKMS_MAIN_LAYER]) { + dirtyBaseChannelCount++; + } + + if (!nvValidateFlipEvoHwState( + pDevEvo, + head, + &pHeadState->timings, + &pWorkArea->sd[sd].head[head].newState)) { + goto done; + } + + pWorkArea->sd[sd].changed = TRUE; + changed = TRUE; + } + } + + /* Deactivate VRR if only a subset of the heads are being flipped */ + if (dirtyBaseChannelCount != activeHeadCount) { + allowVrr = FALSE; + } + + if (!ValidatePerDispState(pDevEvo, pWorkArea)) { + goto done; + } + + /* If nothing changed, fail. */ + + if (!changed) { + goto done; + } + + ret = AllocatePreFlipBandwidth(pDevEvo, pWorkArea); + if (!ret) { + goto done; + } + + if (!request->commit) { + ret = NV_TRUE; + goto done; + } + + if (!PrepareToDoPreFlipIMP(pDevEvo, pWorkArea)) { + goto done; + } + + /* + * At this point, something changed on at least one head of one + * subdevice, and has been validated. Apply the request to our + * hardware and software state. We must not fail beyond this + * point. + */ + + ret = TRUE; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + if (!pWorkArea->sd[sd].changed) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + // Increase refCnt of surfaces used AFTER flip + nvUpdateSurfacesFlipRefCount( + pDevEvo, + head, + &pWorkArea->sd[sd].head[head].newState, + NV_TRUE); + } + } + + PreFlipIMP(pDevEvo, pWorkArea); + + /* Apply NvKmsFlipRequest::allowVrr only if a base channel has become dirty */ + if (dirtyBaseChannelCount > 0) { + nvSetVrrActive(pDevEvo, allowVrr); + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + const struct NvKmsFlipRequestOneSubDevice *pRequestOneSubDevice = + &request->sd[sd]; + + NVEvoUpdateState updateState = { }; + + if (!pWorkArea->sd[sd].changed) { + continue; + } + + pDispEvo = pDevEvo->gpus[sd].pDispEvo; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVFlipEvoHwState *pFlipState = + &pWorkArea->sd[sd].head[head].newState; + const struct NvKmsFlipCommonParams *pParams = + &pRequestOneSubDevice->head[head]; + + if (pParams->layer[NVKMS_MAIN_LAYER].skipPendingFlips && + pFlipState->dirty.layer[NVKMS_MAIN_LAYER] && + !skipUpdate) { + pDevEvo->hal->AccelerateChannel( + pDevEvo, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + sd, + &pWorkArea->sd[sd].head[head].oldAccelerators); + pWorkArea->sd[sd].head[head].accelerated = TRUE; + } + + nvFlipEvoOneHead(pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].newState, + allowFlipLock, + &updateState); + } + + if (!skipUpdate) { + pDevEvo->hal->Update(pDevEvo, &updateState, TRUE /* releaseElv */); + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + // Decrease refCnt of surfaces used BEFORE the flip + nvUpdateSurfacesFlipRefCount( + pDevEvo, + head, + &pWorkArea->sd[sd].head[head].oldState, + NV_FALSE); + } + + FillPostSyncptReply(pDevEvo, + sd, + &request->sd[sd], + &reply->sd[sd], + pWorkArea); + + } + + { + NvU64 startTime = 0; + const NvU32 timeout = 2000000; /* 2 seconds */ + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!pWorkArea->sd[sd].changed) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (!pWorkArea->sd[sd].head[head].accelerated) { + continue; + } + + if (!nvEvoPollForNoMethodPending(pDevEvo, + sd, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + &startTime, + timeout)) { + nvAssert(!"Timed out while idling base channel"); + } + + pDevEvo->hal->ResetChannelAccelerators( + pDevEvo, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + sd, + pWorkArea->sd[sd].head[head].oldAccelerators); + } + } + } + + if (dirtyBaseChannelCount > 0) { + nvSetNextVrrFlipTypeAndIndex(pDevEvo, reply); + } else { + // TODO Schedule vrr unstall; per-disp/per-device? + } + + if (!skipUpdate) { + // Note that usage bounds are not lowered here, because the flip + // queued by this function may not occur until later. Instead, schedule + // a timer for later to check if the usage bounds can be lowered. + SchedulePostFlipIMP(pDevEvo); + + pDevEvo->skipConsoleRestore = FALSE; + } + + /* fall through */ + +done: + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_FLIP_WORK_AREA); + + return ret; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-framelock.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-framelock.c new file mode 100644 index 0000000..fc7bc5c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-framelock.c @@ -0,0 +1,2217 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-framelock.h" +#include "nvkms-dpy.h" +#include "nvkms-utils.h" +#include "nvkms-evo.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" + +#include "nvkms-private.h" /* nvSendDpyAttributeChangedEventEvo() */ + +#include +#include /* NV0000_CTRL_CMD_GSYNC_GET_ATTACHED_IDS */ +#include +#include "nvos.h" + +static NvBool FrameLockUseHouseSyncGetSupport(NVFrameLockEvoPtr pFrameLockEvo, + NvU32 *val); +static NvBool FrameLockSetPolarity( + NVFrameLockEvoPtr pFrameLockEvo, + enum NvKmsFrameLockAttributePolarityValue val); +static NvBool HouseSyncOutputModeUsable(const NVFrameLockEvoRec *pFrameLockEvo); + +/*! + * Handle framelock sync gain/loss events triggered from resman. + * + * When RM sends an event notification that's handled by FrameLockEvent, + * that function schedules a timer to service that event notification. + * These timers are serviced out of order, though; we may receive a + * SYNC_LOSS event followed by a SYNC_GAIN event, but our scheduled + * callbacks may be called in the reverse order. + * + * Since we can't trust that events were serviced in order, this function + * responds to every sync gain or loss event by querying the actual + * sync status across all GPUs from RM and updating our cached sync status + * and notifying clients if necessary. + */ +static void +FrameLockHandleSyncEvent(void *dataPtr, NvU32 dataU32) +{ + NVDispEvoPtr pDispEvo = dataPtr; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvU32 connectorIndex = pDispEvo->framelock.connectorIndex; + NvBool syncReadyCurrent = FALSE; + NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS statusParams = { 0 }; + + statusParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC, + &statusParams, + sizeof(statusParams)) != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to query gsync status after event"); + } else { + if (statusParams.bTiming && statusParams.bSyncReady) { + syncReadyCurrent = TRUE; + } + } + + // Update syncReadyGpuMask for consistency with non-NVKMS path, although + // it is currently unused. + if (syncReadyCurrent) { + pFrameLockEvo->syncReadyGpuMask |= (1 << connectorIndex); + } else { + pFrameLockEvo->syncReadyGpuMask &= ~(1 << connectorIndex); + } + + if (syncReadyCurrent != pFrameLockEvo->syncReadyLast) { + pFrameLockEvo->syncReadyLast = syncReadyCurrent; + nvSendFrameLockAttributeChangedEventEvo( + pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY, + pFrameLockEvo->syncReadyLast); + } +} + +/*! + * Receive framelock events from resman. + * + * This function is registered as a kernel callback function from + * resman. + * + * However, it is called with resman's context (alternate stack, + * resman locks held, etc). Schedule deferred work, so that we can + * process the event without resman's encumbrances. + */ +static void FrameLockEvent(void *arg, void *pEventDataVoid, + NvU32 hEvent, + NvU32 Data, NV_STATUS Status) +{ + static nvkms_timer_proc_t *callbackTable[] = { + [NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(0)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(1)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(2)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(3)] = FrameLockHandleSyncEvent, + + [NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(0)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(1)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(2)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(3)] = FrameLockHandleSyncEvent, + }; + + const NvNotification *pNotifyData = pEventDataVoid; + NvU32 notifyIndex; + + /* callbackTable[] assumes at most four connectors per gsync */ + ct_assert(NV30F1_GSYNC_CONNECTOR_COUNT == 4); + + if (pNotifyData == NULL) { + nvAssert(!"Invalid pNotifyData from resman"); + return; + } + + notifyIndex = pNotifyData->info32; + + if ((notifyIndex >= ARRAY_LEN(callbackTable)) || + (callbackTable[notifyIndex] == NULL)) { + nvAssert(!"Invalid notifyIndex from resman"); + return; + } + + (void) nvkms_alloc_timer_with_ref_ptr( + callbackTable[notifyIndex], /* callback */ + arg, /* argument (this is a ref_ptr to a pDispEvo) */ + 0, /* unused */ + 0); /* timeout (i.e., service as soon as possible) */ +} + +/*! + * Free all events and handles allocated in FrameLockCreateEvents(). + */ +static void FrameLockDestroyEvents(NVDispEvoPtr pDispEvo) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + unsigned int i; + + if (pFrameLockEvo == NULL) { + return; + } + + for (i = 0; i < NV_FRAMELOCK_NUM_EVENTS; i++) { + if (pDispEvo->framelock.gsyncEvent[i].handle) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + pDispEvo->framelock.gsyncEvent[i].handle); + nvFreeUnixRmHandle(&pDispEvo->pDevEvo->handleAllocator, + pDispEvo->framelock.gsyncEvent[i].handle); + pDispEvo->framelock.gsyncEvent[i].handle = 0; + } + } +} + +/*! + * Allocate and configure all events and handles associated with them. + */ +static NvBool FrameLockCreateEvents(NVDispEvoPtr pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + const NvU32 connectorIndex = pDispEvo->framelock.connectorIndex; + unsigned int i; + + if (pDispEvo->pFrameLockEvo == NULL) { + return TRUE; + } + + nvAssert(connectorIndex < NV30F1_GSYNC_CONNECTOR_COUNT); + + /* We should only get here on hardware that has per-connector events */ + nvAssert(!(pFrameLockEvo->caps & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ONLY_PRIMARY_CONNECTOR_EVENT)); + + for (i = 0; i < NV_FRAMELOCK_NUM_EVENTS; i++) { + NvU32 notifier; + NvBool ret; + + switch (i) { + case NV_FRAMELOCK_SYNC_LOSS: + notifier = NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(connectorIndex); + break; + case NV_FRAMELOCK_SYNC_GAIN: + notifier = NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(connectorIndex); + break; + default: + nvAssert(!"Unknown gsync event index"); + continue; + } + + pDispEvo->framelock.gsyncEvent[i].handle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + ret = TRUE; + + if (!nvRmRegisterCallback(pDevEvo, + &pDispEvo->framelock.gsyncEvent[i].callback, + pDispEvo->ref_ptr, + pFrameLockEvo->device, + pDispEvo->framelock.gsyncEvent[i].handle, + FrameLockEvent, + notifier)) { + ret = FALSE; + } + + if (!ret) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to register for framelock event %d", i); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDispEvo->framelock.gsyncEvent[i].handle); + pDispEvo->framelock.gsyncEvent[i].handle = 0; + goto noEvents; + } + } + + return TRUE; + +noEvents: + + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Failed to register for framelock events"); + + FrameLockDestroyEvents(pDispEvo); + + return FALSE; +} + +/*! + * Bind a pSubDev to a pFrameLock. + */ +static void BindGpuToFrameLock(NVDevEvoPtr pDevEvo, + const NvU32 gpuId, + NVFrameLockEvoPtr pFrameLockEvo, + NvU32 connectorIndex) +{ + NVDispEvoPtr pDispEvo; + unsigned int dispIndex; + + if (pFrameLockEvo->nGpuIds >= ARRAY_LEN(pFrameLockEvo->gpuIds)) { + return; + } + + pFrameLockEvo->gpuIds[pFrameLockEvo->nGpuIds] = gpuId; + pFrameLockEvo->nGpuIds++; + + /* + * If a disp exists for this subdevice, wire it up. + * Note that this should not happen for SLI non-display-owners. + */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + if (nvGpuIdOfDispEvo(pDispEvo) != gpuId) { + continue; + } + + pDispEvo->pFrameLockEvo = pFrameLockEvo; + + pDispEvo->framelock.connectorIndex = connectorIndex; + + pFrameLockEvo->connectedGpuMask |= (1 << connectorIndex); + pFrameLockEvo->syncReadyGpuMask &= ~(1 << connectorIndex); + + /* Set up stereo synchronization events */ + FrameLockCreateEvents(pDispEvo); + } +} + +/*! + * Break the binding of pSubDev and pDisp to pFrameLock that we + * created in BindGpuToFrameLock(). + */ +static void UnbindGpuFromFrameLock(NVDevEvoPtr pDevEvo, + const NvU32 gpuId, + NVFrameLockEvoPtr pFrameLockEvo) +{ + NVDispEvoPtr pDispEvo; + unsigned int dispIndex; + unsigned int gpu, j; + + for (gpu = 0; gpu < pFrameLockEvo->nGpuIds; gpu++) { + if (pFrameLockEvo->gpuIds[gpu] == gpuId) { + break; + } + } + + if (gpu == pFrameLockEvo->nGpuIds) { + return; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + const NvU32 connectorIndex = pDispEvo->framelock.connectorIndex; + + if (nvGpuIdOfDispEvo(pDispEvo) != gpuId) { + continue; + } + + FrameLockDestroyEvents(pDispEvo); + + pFrameLockEvo->connectedGpuMask &= ~(1 << connectorIndex); + pFrameLockEvo->syncReadyGpuMask &= ~(1 << connectorIndex); + + pDispEvo->framelock.connectorIndex = 0; + + pDispEvo->pFrameLockEvo = NULL; + } + + for (j = gpu; j < (pFrameLockEvo->nGpuIds - 1); j++) { + pFrameLockEvo->gpuIds[j] = pFrameLockEvo->gpuIds[j+1]; + } + + pFrameLockEvo->nGpuIds--; +} + +/*! + * Find the NVFrameLockEvoPtr with the specified gsyncId. + */ +static NVFrameLockEvoPtr FindFrameLock(NvU32 gsyncId) +{ + NVFrameLockEvoPtr pFrameLockEvo; + + FOR_ALL_EVO_FRAMELOCKS(pFrameLockEvo) { + if (pFrameLockEvo->gsyncId == gsyncId) { + return pFrameLockEvo; + } + } + + return NULL; +} + +/*! + * Return whether the NVDevEvoPtr contains a GPU with the specified gpuId. + */ +static NvBool GpuIdInDevEvo(NVDevEvoPtr pDevEvo, NvU32 gpuId) +{ + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (pDevEvo->pSubDevices[sd]->gpuId == gpuId) { + return TRUE; + } + } + + return FALSE; +} + +/*! + * Free the pFrameLock object. + */ +static void FreeFrameLockEvo(NVDevEvoPtr pDevEvo, + NVFrameLockEvoPtr pFrameLockEvo) +{ + if (pFrameLockEvo == NULL) { + return; + } + + if (pFrameLockEvo->device != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pFrameLockEvo->device); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pFrameLockEvo->device); + pFrameLockEvo->device = 0; + } + + nvAssert(pFrameLockEvo->nGpuIds == 0); + + nvListDel(&pFrameLockEvo->frameLockListEntry); + + nvFree(pFrameLockEvo); +} + +/*! + * Allocate and initialize a new pFrameLock object. + */ +static NVFrameLockEvoPtr AllocFrameLockEvo(NVDevEvoPtr pDevEvo, + int instance, NvU32 gsyncId) +{ + NV30F1_ALLOC_PARAMETERS gsyncAllocParams = { 0 }; + NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS gsyncGetCapsParams = { 0 }; + NVFrameLockEvoPtr pFrameLockEvo; + + nvAssert(FindFrameLock(gsyncId) == NULL); + + pFrameLockEvo = nvCalloc(1, sizeof(NVFrameLockEvoRec)); + + if (pFrameLockEvo == NULL) { + return NULL; + } + + nvListInit(&pFrameLockEvo->frameLockListEntry); + + pFrameLockEvo->device = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + gsyncAllocParams.gsyncInstance = instance; + + /* allocate a framelock object for the framelock device */ + if (nvRmApiAlloc(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30_GSYNC, + &gsyncAllocParams) != NVOS_STATUS_SUCCESS) { + pFrameLockEvo->device = 0; + goto fail; + } + + /* Store unique frame lock device ID */ + pFrameLockEvo->gsyncId = gsyncId; + pFrameLockEvo->houseSyncUseable = 0; + pFrameLockEvo->nGpuIds = 0; + + /* Initialize the state for the framelock board */ + pFrameLockEvo->polarity = NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_FALLING_EDGE; + pFrameLockEvo->syncDelay = 0; + pFrameLockEvo->syncInterval = 0; + pFrameLockEvo->videoMode = + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_AUTO; + pFrameLockEvo->testMode = FALSE; + pFrameLockEvo->houseSyncMode = + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_DISABLED; + + /* Query the framelock revision information */ + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_CAPS, + &gsyncGetCapsParams, + sizeof(gsyncGetCapsParams)) + != NVOS_STATUS_SUCCESS) { + goto fail; + } + + /* Check if the Quadro Sync card has a firmware + * version compatible with the GPUs connected to it. + */ + pDevEvo->badFramelockFirmware = gsyncGetCapsParams.isFirmwareRevMismatch; + if (gsyncGetCapsParams.isFirmwareRevMismatch) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "The firmware on this Quadro Sync " + "card is not compatible with the GPUs connected to it." + " Please visit " + " " + "for instructions on installing the correct firmware."); + goto fail; + } + + /* gsyncGetCapsParams.revId has the framelock board id in the high 4 bits + * and the FPGA revision in the low 4 bits. This is preserved here for + * legacy clients, but we expose the full board ID (e.g. 0x358, 0x2060, + * 0x2061) and firmware version individually, so clients can more easily + * distinguish P2061 ("Quadro Sync II") from P2060 and P358 + * ("Quadro Sync"). + */ + + pFrameLockEvo->fpgaIdAndRevision = gsyncGetCapsParams.revId; + pFrameLockEvo->boardId = gsyncGetCapsParams.boardId; + pFrameLockEvo->firmwareMajorVersion = gsyncGetCapsParams.revision; + pFrameLockEvo->firmwareMinorVersion = gsyncGetCapsParams.extendedRevision; + pFrameLockEvo->caps = gsyncGetCapsParams.capFlags; + pFrameLockEvo->maxSyncSkew = gsyncGetCapsParams.maxSyncSkew; + pFrameLockEvo->syncSkewResolution = gsyncGetCapsParams.syncSkewResolution; + pFrameLockEvo->maxSyncInterval = gsyncGetCapsParams.maxSyncInterval; + pFrameLockEvo->videoModeReadOnly = !!(gsyncGetCapsParams.capFlags & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ONLY_GET_VIDEO_MODE); + + /* Determine if house sync is selectable on this frame lock device */ + if (!FrameLockUseHouseSyncGetSupport(pFrameLockEvo, + &pFrameLockEvo->houseSyncUseable)) { + pFrameLockEvo->houseSyncUseable = FALSE; + } + + pFrameLockEvo->houseSyncModeValidValues = + (1 << NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_DISABLED); + + if (pFrameLockEvo->houseSyncUseable) { + pFrameLockEvo->houseSyncModeValidValues |= + (1 << NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_INPUT); + } + + if (HouseSyncOutputModeUsable(pFrameLockEvo)) { + pFrameLockEvo->houseSyncModeValidValues |= + (1 << NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_OUTPUT); + } + + /* Add frame lock device to global list. */ + nvListAppend(&pFrameLockEvo->frameLockListEntry, &nvEvoGlobal.frameLockList); + + return pFrameLockEvo; + +fail: + + FreeFrameLockEvo(pDevEvo, pFrameLockEvo); + return NULL; +} + + +static void BindFrameLockToDevEvo(NVFrameLockEvoPtr pFrameLockEvo, + NVDevEvoPtr pDevEvo) +{ + NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS gsyncTopologyParams = { }; + int i; + + /* find out which gpus are attached to which connectors */ + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GET_GSYNC_GPU_TOPOLOGY, + &gsyncTopologyParams, + sizeof(gsyncTopologyParams)) + != NVOS_STATUS_SUCCESS) { + return; + } + + /* Bind corresponding GPUs to the Frame Lock device */ + for (i = 0; i < ARRAY_LEN(gsyncTopologyParams.gpus); i++) { + + NvU32 connectorIndex; + const NvU32 gpuId = gsyncTopologyParams.gpus[i].gpuId; + + if (gpuId == NV30F1_CTRL_GPU_INVALID_ID) { + continue; + } + + if (!GpuIdInDevEvo(pDevEvo, gpuId)) { + continue; + } + + /* + * Connector type of _NONE means we sync through a proxy GPU, + * which we do not support. + */ + if (gsyncTopologyParams.gpus[i].connector == + NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_NONE) { + continue; + } + /* + * gsyncTopologyParams.gpus[i].connector is an enumerated + * type; convert it to a 0-based index + */ + nvAssert(gsyncTopologyParams.gpus[i].connector < + (NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_ONE + + NV30F1_GSYNC_CONNECTOR_COUNT)); + connectorIndex = gsyncTopologyParams.gpus[i].connector - + NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_ONE; + + BindGpuToFrameLock(pDevEvo, gpuId, pFrameLockEvo, connectorIndex); + } +} + +static void UnBindFrameLockFromDevEvo(NVFrameLockEvoPtr pFrameLockEvo, + NVDevEvoPtr pDevEvo) +{ + int i; + + /* + * Loop through GPUs from highest to lowest, because + * UnbindGpuFromFrameLock() may remove gpuIds[i]. + */ + for (i = pFrameLockEvo->nGpuIds - 1; i >= 0; i--) { + const NvU32 gpuId = pFrameLockEvo->gpuIds[i]; + + if (!GpuIdInDevEvo(pDevEvo, gpuId)) { + continue; + } + + UnbindGpuFromFrameLock(pDevEvo, gpuId, pFrameLockEvo); + } +} + + +/*! + * Find all of the available framelock devices. + * + * Framelock devices can only be recognized by resman after an RM + * client has attached a GPU that the framelock device is connected + * to. So, subsequent calls to this function may find additional + * framelock devices. + * + * Allocate framelock objects for all the newly found framelock devices. + */ +void nvAllocFrameLocksEvo(NVDevEvoPtr pDevEvo) +{ + NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS attachedGsyncParams = { }; + int i; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GSYNC_GET_ATTACHED_IDS, + &attachedGsyncParams, sizeof(attachedGsyncParams)) + != NVOS_STATUS_SUCCESS) { + return; + } + + for (i = 0; i < ARRAY_LEN(attachedGsyncParams.gsyncIds); i++) { + + NVFrameLockEvoPtr pFrameLockEvo; + + if (attachedGsyncParams.gsyncIds[i] == NV0000_CTRL_GSYNC_INVALID_ID) { + continue; + } + + pFrameLockEvo = FindFrameLock(attachedGsyncParams.gsyncIds[i]); + + if (pFrameLockEvo == NULL) { + pFrameLockEvo = AllocFrameLockEvo(pDevEvo, i, + attachedGsyncParams.gsyncIds[i]); + } + + if (pFrameLockEvo == NULL) { + continue; + } + + BindFrameLockToDevEvo(pFrameLockEvo, pDevEvo); + } +} + +/*! + * Free any framelock devices connected to any GPU on this pDevEvo. + */ + +void nvFreeFrameLocksEvo(NVDevEvoPtr pDevEvo) +{ + NVFrameLockEvoPtr pFrameLockEvo, pFrameLockEvoTmp; + + /* Destroy the pFrameLockEvos */ + nvListForEachEntry_safe(pFrameLockEvo, pFrameLockEvoTmp, + &nvEvoGlobal.frameLockList, frameLockListEntry) { + + UnBindFrameLockFromDevEvo(pFrameLockEvo, pDevEvo); + + if (pFrameLockEvo->nGpuIds == 0) { + FreeFrameLockEvo(pDevEvo, pFrameLockEvo); + } + } +} + +/*! + * Determine if this framelock device supports user selection of house + * sync. assign val appropriately. Returns TRUE if the attribute was + * successfully queried. + */ +static NvBool FrameLockUseHouseSyncGetSupport(NVFrameLockEvoPtr pFrameLockEvo, + NvU32 *val) +{ + NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS + gsyncGetControlParamsParams = { 0 }; + NvU32 ret; + + if (!val) return FALSE; + + gsyncGetControlParamsParams.which = + NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_USE_HOUSE; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_PARAMS, + &gsyncGetControlParamsParams, + sizeof(gsyncGetControlParamsParams)); + + /* If we can query Use House Sync, then it is available */ + *val = (ret == NVOS_STATUS_SUCCESS) ? TRUE : FALSE; + + return *val; +} + + +/*! + * Return whether or not this framelock device supports house sync mode. + * + * House sync mode is currently only available on P2061 (Quadro Sync II). + */ +static NvBool HouseSyncOutputModeUsable(const NVFrameLockEvoRec *pFrameLockEvo) +{ + return (pFrameLockEvo->houseSyncUseable && + (pFrameLockEvo->boardId == + NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2061)); +} + + +/*! + * Enable or disable house sync output mode in the framelock board. + */ +static NvBool FrameLockSetHouseSyncOutputMode(NVFrameLockEvoPtr pFrameLockEvo, + NvBool enable) +{ + NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_PARAMS + gsyncSetHouseSyncModeParams = { 0 }; + NvU32 ret; + NvU8 houseSyncMode = enable ? NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_OUTPUT : + NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_INPUT; + + nvAssert(HouseSyncOutputModeUsable(pFrameLockEvo)); + + gsyncSetHouseSyncModeParams.houseSyncMode = houseSyncMode; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_HOUSE_SYNC_MODE, + &gsyncSetHouseSyncModeParams, + sizeof(gsyncSetHouseSyncModeParams)); + + return (ret == NVOS_STATUS_SUCCESS); +} + + +/*! + * Set the framelock to use the house sync if val is TRUE, otherwise + * set the framelock to use external sync. Returns FALSE if the + * assignment failed. + */ +NvBool nvFrameLockSetUseHouseSyncEvo(NVFrameLockEvoPtr pFrameLockEvo, NvU32 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + NvBool houseSyncOutputMode = FALSE; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_USE_HOUSE; + + gsyncSetControlParamsParams.useHouseSync = val; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + if (HouseSyncOutputModeUsable(pFrameLockEvo)) { + + NvS64 houseSyncInputPresent; + NvBool allowHouseSyncOutput = FALSE; + + if (nvFrameLockGetStatusEvo(pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS, + &houseSyncInputPresent)) { + if (houseSyncInputPresent == 0) { + allowHouseSyncOutput = TRUE; + } + } + + if (!val && allowHouseSyncOutput && + (pFrameLockEvo->houseSyncMode == + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_OUTPUT)) { + + houseSyncOutputMode = TRUE; + } + + if (!FrameLockSetHouseSyncOutputMode(pFrameLockEvo, houseSyncOutputMode)) { + return FALSE; + } + } + + /* + * House sync polarity is required to be rising edge if house sync is not + * in use. + * + * In addition, house sync polarity has no effect when house sync output + * mode is in use. + */ + if (val && !houseSyncOutputMode) { + return FrameLockSetPolarity(pFrameLockEvo, pFrameLockEvo->polarity); + } else { + return FrameLockSetPolarity(pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_RISING_EDGE); + } +} + +/*! + * Set the polarity according to val; val is interpreted as an + * NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY value. Returns FALSE if the + * assignment failed. + */ +static NvBool FrameLockSetPolarity( + NVFrameLockEvoPtr pFrameLockEvo, + enum NvKmsFrameLockAttributePolarityValue val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + NvU32 polarity; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY; + + switch (val) { + case NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_RISING_EDGE: + polarity = NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_RISING_EDGE; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_FALLING_EDGE: + polarity = NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_FALLING_EDGE; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_BOTH_EDGES: + polarity = NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_BOTH_EDGES; + break; + + default: + return FALSE; + } + + gsyncSetControlParamsParams.syncPolarity = polarity; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + return TRUE; +} + +/*! + * Set the sync delay to the value given in val. Returns FALSE if the + * assignment failed. Assigns pFrameLockEvo->syncDelay upon success. + */ +static NvBool FrameLockSetSyncDelay(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + + if (val > pFrameLockEvo->maxSyncSkew) return FALSE; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_SKEW; + + gsyncSetControlParamsParams.syncSkew = val; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->syncDelay = val; + + return TRUE; +} + +/*! + * Set the sync interval to the value given in val. Returns FALSE if + * the assignment failed. Assigns pFrameLockEvo->syncInterval upon + * success. + */ +static NvBool FrameLockSetSyncInterval(NVFrameLockEvoPtr pFrameLockEvo, + NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_NSYNC; + + gsyncSetControlParamsParams.nSync = val; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->syncInterval = val; + + return TRUE; +} + +/*! + * Query the status of the values that are acquired through the + * GET_STATUS_SYNC command, and assign the value to val. Returns + * FALSE if the query failed or if attr is not one of the currently + * handled attributes. + */ +static NvBool FrameLockGetStatusSync(const NVDispEvoRec *pDispEvo, NvS64 *val, + enum NvKmsDispAttribute nvKmsAttribute) +{ + NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS gsyncGetStatusSyncParams = { 0 }; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + + gsyncGetStatusSyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC, + &gsyncGetStatusSyncParams, + sizeof(gsyncGetStatusSyncParams)) + != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + switch (nvKmsAttribute) + { + + case NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_STEREO_SYNC: + *val = (gsyncGetStatusSyncParams.bTiming && + gsyncGetStatusSyncParams.bStereoSync && + gsyncGetStatusSyncParams.bSyncReady); + break; + + case NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TIMING: + *val = gsyncGetStatusSyncParams.bTiming ? TRUE : FALSE; + break; + + default: + return FALSE; + } + + return TRUE; +} + +/*! + * Return the sync rate. + */ +static NvS64 FrameLockInterpretSyncRate(const NVFrameLockEvoRec *pFrameLockEvo, + NvS64 val) +{ + /* Only show decimal places if they are accurate. The queried + value provides 4 decimal places */ + if (pFrameLockEvo->caps & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_2DPS) { + // only two are valid + val -= (val % 100); + } else if (pFrameLockEvo->caps & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_3DPS) { + // only three are valid + val -= (val % 10); + } else if (pFrameLockEvo->caps & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_4DPS) { + // all four are valid, nothing to do + } + return val; +} + +/*! + * Query the status of one of the values that are acquired through the + * GET_STATUS command, and assign the value to val. Returns FALSE if + * the query failed or if attr is not one of the currently handled + * attributes. + */ +NvBool nvFrameLockGetStatusEvo(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS gsyncGetStatusParams = { 0 }; + + switch (attribute) { + + case NV_KMS_FRAMELOCK_ATTRIBUTE_PORT0_STATUS: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_PORT0_INPUT; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_PORT1_STATUS: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_PORT1_INPUT; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_HOUSE_SYNC; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_INCOMING_HOUSE_SYNC_RATE: + gsyncGetStatusParams.which = + NV30F1_CTRL_GSYNC_GET_STATUS_HOUSE_SYNC_INCOMING; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_READY; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED: + gsyncGetStatusParams.which = + NV30F1_CTRL_GSYNC_GET_STATUS_PORT0_ETHERNET | + NV30F1_CTRL_GSYNC_GET_STATUS_PORT1_ETHERNET; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE: + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE_4: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_REFRESH; + break; + + default: + return FALSE; + } + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_STATUS, + &gsyncGetStatusParams, + sizeof(gsyncGetStatusParams)) + != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + switch (attribute) { + + case NV_KMS_FRAMELOCK_ATTRIBUTE_PORT0_STATUS: + *val = gsyncGetStatusParams.bPort0Input ? + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_INPUT : + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_OUTPUT; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_PORT1_STATUS: + *val = gsyncGetStatusParams.bPort1Input ? + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_INPUT : + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_OUTPUT; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS: + *val = gsyncGetStatusParams.bHouseSync; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY: + *val = gsyncGetStatusParams.bSyncReady; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED: + *val = 0x0; + if (gsyncGetStatusParams.bPort0Ethernet) + *val |= NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_PORT0; + if (gsyncGetStatusParams.bPort1Ethernet) + *val |= NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_PORT1; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_INCOMING_HOUSE_SYNC_RATE: + *val = + FrameLockInterpretSyncRate(pFrameLockEvo, + gsyncGetStatusParams.houseSyncIncoming); + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE: + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE_4: + *val = FrameLockInterpretSyncRate(pFrameLockEvo, + gsyncGetStatusParams.refresh); + if (attribute == NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE) { + /* _STATUS_REFRESH is in Hz/10000, _SYNC_RATE is Hz/1000 */ + *val /= 10; + } + break; + + default: + return FALSE; + } + + return TRUE; +} + +/*! + * [en|dis]able syncing of the GPU to the FrameLock board for the + * display mask associated with that gpu. val controls whether we are + * enabling or disabling. + */ +static NvBool FrameLockSetEnable(NVDispEvoPtr pDispEvo, NvS64 val) +{ + if (val) { + + /* XXX NVKMS TODO: address the following: + + In Xinerama a single app has a channel on each gpu. Before + framelock is enabled the first time per X server, vblanks + are not synchronized, so if a swap groupped app is started + before framelock is enabled the channels get unstalled at + different times, and it's likely that one display will be + armed while the other is not. When framelock is enabled in + this state, we'll deadlock because suddenly the armed display + is waiting on the unarmed display to unstall, and the unarmed + display cannot arm. Prevent this by idling all channels */ + + return nvEnableFrameLockEvo(pDispEvo); + } else { + return nvDisableFrameLockEvo(pDispEvo); + } +} + +static NvBool FrameLockSetWatchdog(NVFrameLockEvoPtr pFrameLockEvo, NvU32 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS + gsyncSetControlWatchdogParams = { 0 }; + NvU32 ret; + + gsyncSetControlWatchdogParams.enable = val; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_WATCHDOG, + &gsyncSetControlWatchdogParams, + sizeof(gsyncSetControlWatchdogParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + return TRUE; +} + + +/*! + * For the given display, determine if it can be set as a frame lock + * server + */ +static NvBool FrameLockDpyCanBeServer(const NVDpyEvoRec *pDpyEvo) +{ + NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS gsyncGetControlSyncParams = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + /* XXX[2Heads1OR] Get the primary hardware head. */ + const NvU32 head = pDpyEvo->apiHead; + const NVDispHeadStateEvoRec *pHeadState; + NvU32 ret; + + nvAssert(head != NV_INVALID_HEAD); + nvAssert(pDispEvo); + nvAssert(pDispEvo->pFrameLockEvo); + + pHeadState = &pDispEvo->headState[head]; + nvAssert(pHeadState->activeRmId); + + /* If already a server, assume it can be a server. */ + if (nvDpyIdsAreEqual(pDispEvo->framelock.server, pDpyEvo->id)) { + return TRUE; + } + + gsyncGetControlSyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + gsyncGetControlSyncParams.displays = pHeadState->activeRmId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SYNC, + &gsyncGetControlSyncParams, + sizeof(gsyncGetControlSyncParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + if (gsyncGetControlSyncParams.master && + nvFrameLockServerPossibleEvo(pDpyEvo)) { + return TRUE; + } + + return FALSE; +} + + +/*! + * For the given display, determine if it can be set as a frame lock + * client. + */ +static NvBool FrameLockDpyCanBeClient(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo; + + nvAssert(pDpyEvo->pDispEvo); + nvAssert(pDpyEvo->pDispEvo->pFrameLockEvo); + nvAssert(nvDpyEvoIsActive(pDpyEvo)); + + pDispEvo = pDpyEvo->pDispEvo; + + /* If already a client, assume it can be a client. */ + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->framelock.clients)) { + return TRUE; + } + + /* Otherwise, see if we can make it a client. */ + return nvFrameLockClientPossibleEvo(pDpyEvo); +} + + +/*! + * [en|dis]able test mode (based on the value of val). Returns FALSE + * if changing the test mode failed. Assigns pFrameLockEvo->testMode + * upon success. + */ +static NvBool FrameLockSetTestMode(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS + gsyncSetControlTestingParams = { 0 }; + NvU32 ret; + + gsyncSetControlTestingParams.bEmitTestSignal = (val == TRUE); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_TESTING, + &gsyncSetControlTestingParams, + sizeof(gsyncSetControlTestingParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->testMode = val; + + return TRUE; +} + + +/*! + * Set the video mode according to val; returns FALSE if the + * assignment failed. Assigns pFrameLockEvo->videoMode upon success. + */ +static NvBool FrameLockSetVideoMode(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE; + + switch (val) { + + case NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_AUTO: + gsyncSetControlParamsParams.syncVideoMode = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NONE; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_TTL: + gsyncSetControlParamsParams.syncVideoMode = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_TTL; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_BI_LEVEL: + gsyncSetControlParamsParams.syncVideoMode = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NTSCPALSECAM; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_TRI_LEVEL: + gsyncSetControlParamsParams.syncVideoMode = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_HDTV; + break; + + default: + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->videoMode = val; + + return TRUE; +} + + +/*! + * Enable or disable the swap ready connection through the gsync + * connector. This should be called when we bind the swap barrier. + */ +static NvBool SetSwapBarrier(NVDispEvoPtr pDispEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS + gsyncSetSwapBarrierParams = { 0 }; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvU32 ret; + NvBool enable = !!val; + + if (!pFrameLockEvo) return FALSE; + + nvSetSwapBarrierNotifyEvo(pDispEvo, enable, TRUE /* isPre */); + + gsyncSetSwapBarrierParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + gsyncSetSwapBarrierParams.enable = enable; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SWAP_BARRIER, + &gsyncSetSwapBarrierParams, + sizeof(gsyncSetSwapBarrierParams)); + + nvSetSwapBarrierNotifyEvo(pDispEvo, enable, FALSE /* isPre */); + + return (ret == NVOS_STATUS_SUCCESS); +} + + +/*! + * Flush all of our known framelock SW state out to the HW, to make + * sure both are in sync. This should be called any time we get the + * HW back from outside control (e.g., starting X or coming back from + * a VT switch). + */ +static NvBool ResetHardwareOneDisp(NVDispEvoPtr pDispEvo, NvS64 value) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvU32 activeHeadsMask; + NvBool ret = TRUE; + + if (!pDispEvo->pFrameLockEvo || !value) { + /* Nothing to do */ + return TRUE; + } + + /* We should never get here when framelock is enabled */ + if (pDispEvo->framelock.syncEnabled) { + nvAssert(!"Attempted to reset framelock HW while framelock is enabled"); + return FALSE; + } + + /* (Re-)set the HW state to match the SW state */ + if (!nvFrameLockSetUseHouseSyncEvo(pFrameLockEvo, + pFrameLockEvo->houseSyncArmed)) { + ret = FALSE; + } + if (!FrameLockSetSyncDelay(pFrameLockEvo, pFrameLockEvo->syncDelay)) { + ret = FALSE; + } + if (!FrameLockSetSyncInterval(pFrameLockEvo, pFrameLockEvo->syncInterval)) { + ret = FALSE; + } + if (!FrameLockSetVideoMode(pFrameLockEvo, pFrameLockEvo->videoMode)) { + ret = FALSE; + } + if (!FrameLockSetTestMode(pFrameLockEvo, pFrameLockEvo->testMode)) { + ret = FALSE; + } + + /* Since (we think) sync is disabled, these should always be disabled */ + if (!FrameLockSetWatchdog(pFrameLockEvo, FALSE)) { + ret = FALSE; + } + if (!SetSwapBarrier(pDispEvo, FALSE)) { + ret = FALSE; + } + + /* Disable both server and client lock for all heads */ + activeHeadsMask = nvGetActiveHeadMask(pDispEvo); + + if (!nvFramelockSetControlUnsyncEvo(pDispEvo, activeHeadsMask, TRUE)) { + ret = FALSE; + } + if (!nvFramelockSetControlUnsyncEvo(pDispEvo, activeHeadsMask, FALSE)) { + ret = FALSE; + } + + return ret; +} + + +/*! + * Returns the allowable configurations for the given display device. + * The device must be enabled to advertise server/client + * configuration. + */ +static unsigned int FrameLockGetValidDpyConfig(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo; + unsigned int valid = + (1 << (NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED)); + + if (!pDpyEvo || !nvDpyEvoIsActive(pDpyEvo)) { + goto done; + } + + pDispEvo = pDpyEvo->pDispEvo; + + if (!pDispEvo || !pDispEvo->pFrameLockEvo) { + goto done; + } + + /* Check if display can be a server */ + + if (FrameLockDpyCanBeServer(pDpyEvo)) { + valid |= (1 << (NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_SERVER)); + } + + /* Check if display can be a client */ + + if (FrameLockDpyCanBeClient(pDpyEvo)) { + valid |= (1 << (NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_CLIENT)); + } + + done: + + return valid; +} + +static NvBool GetFrameLock(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + *val = (pDispEvo->pFrameLockEvo) ? 1 : 0; + return TRUE; +} + +static NvBool SetFrameLockPolarity(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + if ((val != NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_RISING_EDGE) && + (val != NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_FALLING_EDGE) && + (val != NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_BOTH_EDGES)) { + return FALSE; + } + + pFrameLockEvo->polarity = val; + + return TRUE; +} + +static NvBool GetFrameLockPolarity(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->polarity; + + return TRUE; +} + +static NvBool GetFrameLockSyncDelay(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->syncDelay; + + return TRUE; +} + +static NvBool GetFrameLockSyncDelayValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = 0; + pValidValues->u.range.max = pFrameLockEvo->maxSyncSkew; + + return TRUE; +} + +static NvBool SetHouseSyncMode(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + if ((val < 0) || (val > 31)) { + return FALSE; + } + + if ((pFrameLockEvo->houseSyncModeValidValues & NVBIT(val)) == 0) { + return FALSE; + } + + pFrameLockEvo->houseSyncMode = val; + + return TRUE; +} + +static NvBool GetHouseSyncMode(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + if (!pFrameLockEvo->houseSyncUseable) return FALSE; + + *val = pFrameLockEvo->houseSyncMode; + + return TRUE; +} + +static NvBool GetHouseSyncModeValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!pFrameLockEvo->houseSyncUseable) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = pFrameLockEvo->houseSyncModeValidValues; + + return TRUE; +} + +static NvBool GetFrameLockSyncInterval(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->syncInterval; + + return TRUE; +} + +static NvBool GetFrameLockSyncIntervalValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = 0; + pValidValues->u.range.max = pFrameLockEvo->maxSyncInterval; + + return TRUE; +} + +static NvBool SetFrameLockSync(NVDispEvoRec *pDispEvo, NvS64 val) +{ + NvBool a, b; + + if (!pDispEvo->pFrameLockEvo) return FALSE; + + /* If we are already enabled or already disabled, we're done. */ + if (val == pDispEvo->framelock.syncEnabled) return TRUE; + + /* Something must be set to enable/disable */ + if (nvDpyIdIsInvalid(pDispEvo->framelock.server) && + nvDpyIdListIsEmpty(pDispEvo->framelock.clients)) return FALSE; + + /* If we're disabling and test mode is currently enabled, disable it */ + if (!val && + !nvDpyIdIsInvalid(pDispEvo->framelock.server) && + pDispEvo->pFrameLockEvo->testMode) { + + FrameLockSetTestMode(pDispEvo->pFrameLockEvo, FALSE); + } + + /* + * It is important to set syncEnabled before calling FrameLockSetEnable. + * FrameLockSetEnable may call into GLS which may call back into the + * driver to query if framelock is enabled, which checks this field. + */ + pDispEvo->framelock.syncEnabled = val; + + a = FrameLockSetEnable(pDispEvo, val); + b = FrameLockSetWatchdog(pDispEvo->pFrameLockEvo, val); + + /* + * Since RM doesn't send a SYNC_READY event on sync disable through nvctrl, + * send it here. + */ + if (!val && a && b) { + nvSendFrameLockAttributeChangedEventEvo( + pDispEvo->pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY, + FALSE); + pDispEvo->pFrameLockEvo->syncReadyLast = val; + } + + return (a && b); +} + +static NvBool GetFrameLockSync(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + if (!pDispEvo->pFrameLockEvo) return FALSE; + + /* return the cached state */ + + *val = ((pDispEvo->framelock.currentServerHead != NV_INVALID_HEAD) || + (pDispEvo->framelock.currentClientHeadsMask != 0x0)); + + return TRUE; +} + +static NvBool GetFrameLockSyncReady(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + /* return the cached state */ + + *val = pFrameLockEvo->syncReadyLast; + + return TRUE; +} + +static NvBool GetFrameLockStereoSync(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + if (!pDispEvo->pFrameLockEvo) return FALSE; + + return FrameLockGetStatusSync(pDispEvo, val, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_STEREO_SYNC); +} + +static NvBool GetFrameLockTiming(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + if (!pDispEvo->pFrameLockEvo) return FALSE; + + return FrameLockGetStatusSync(pDispEvo, val, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TIMING); +} + +static NvBool SetFrameLockTestSignal(NVDispEvoRec *pDispEvo, NvS64 val) +{ + if (!pDispEvo->pFrameLockEvo) return FALSE; + + /* The test signal can only be emitted if the GPU is the server + * and framelock is enabled. + */ + + if (!nvDpyIdIsInvalid(pDispEvo->framelock.server) && + pDispEvo->framelock.syncEnabled) { + return FrameLockSetTestMode(pDispEvo->pFrameLockEvo, val); + } + + return FALSE; +} + +static NvBool GetFrameLockTestSignal(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + if (!pDispEvo->pFrameLockEvo || + nvDpyIdIsInvalid(pDispEvo->framelock.server)) { + return FALSE; + } + + *val = pDispEvo->pFrameLockEvo->testMode; + + return TRUE; +} + +static NvBool SetFrameLockVideoMode(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + if (pFrameLockEvo->videoModeReadOnly) { + return FALSE; + } + + return FrameLockSetVideoMode(pFrameLockEvo, val); +} + +static NvBool GetFrameLockVideoMode(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->videoMode; + + return TRUE; +} + +static NvBool GetFrameLockVideoModeValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_AUTO; + pValidValues->u.range.max = + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_TRI_LEVEL; + + if (pFrameLockEvo->videoModeReadOnly) { + pValidValues->writable = FALSE; + } + + return TRUE; +} + +static NvBool GetFrameLockFpgaRevision(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->fpgaIdAndRevision; + + return TRUE; +} + +static NvBool GetFrameLockFirmwareMajorVersion( + const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->firmwareMajorVersion; + + return TRUE; +} + +static NvBool GetFrameLockFirmwareMinorVersion( + const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->firmwareMinorVersion; + + return TRUE; +} + +static NvBool GetFrameLockBoardId(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->boardId; + + return TRUE; +} + +static NvBool GetFrameLockFpgaRevisionUnsupported( + NVDispEvoPtr pDispEvo, + NvS64 *val) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + *val = pDevEvo->badFramelockFirmware; + + return TRUE; +} + +static NvBool GetFrameLockSyncDelayResolution( + const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->syncSkewResolution; + + return TRUE; +} + +NvBool nvSetFrameLockDisplayConfigEvo(NVDpyEvoRec *pDpyEvo, NvS64 val) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + unsigned int valid; + NvBool removeFromClients = FALSE; + NvBool removeFromServer = FALSE; + + if (!pDispEvo || !pDispEvo->pFrameLockEvo) { + return FALSE; + } + + /* Only set the config when framelock is disabled */ + + if (pDispEvo->framelock.syncEnabled) { + return FALSE; + } + + valid = FrameLockGetValidDpyConfig(pDpyEvo); + + /* Display device cannot be set as such */ + if (!((1<framelock.server) && + !nvDpyIdsAreEqual(pDispEvo->framelock.server, pDpyEvo->id)) { + NVDpyEvoPtr pOtherDpyEvo; + + pOtherDpyEvo = + nvGetDpyEvoFromDispEvo(pDispEvo, pDispEvo->framelock.server); + if (pOtherDpyEvo) { + nvSendDpyAttributeChangedEventEvo( + pOtherDpyEvo, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED); + } + } + pDispEvo->framelock.server = pDpyEvo->id; + removeFromClients = TRUE; + break; + + case NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_CLIENT: + pDispEvo->framelock.clients = + nvAddDpyIdToDpyIdList(pDpyEvo->id, pDispEvo->framelock.clients); + removeFromServer = TRUE; + break; + + case NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED: + removeFromClients = TRUE; + removeFromServer = TRUE; + break; + + default: + return FALSE; + } + + if (removeFromClients) { + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->framelock.clients)) { + pDispEvo->framelock.clients = + nvDpyIdListMinusDpyId(pDispEvo->framelock.clients, pDpyEvo->id); + } + } + + if (removeFromServer) { + if (nvDpyIdsAreEqual(pDispEvo->framelock.server, pDpyEvo->id)) { + pDispEvo->framelock.server = nvInvalidDpyId(); + } + } + + return TRUE; +} + +NvBool nvGetFrameLockDisplayConfigEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + + if (!pDispEvo || !pDispEvo->pFrameLockEvo) { + return FALSE; + } + + if (nvDpyIdsAreEqual(pDispEvo->framelock.server, pDpyEvo->id)) { + *val = NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_SERVER; + } else if (nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->framelock.clients)) { + *val = NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_CLIENT; + } else { + *val = NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED; + } + + return TRUE; +} + +NvBool nvGetFrameLockDisplayConfigValidValuesEvo( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (pDpyEvo->pDispEvo->pFrameLockEvo == NULL) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = FrameLockGetValidDpyConfig(pDpyEvo); + + return TRUE; +} + +static const struct { + NvBool (*set)(NVDispEvoPtr pDispEvo, NvS64 value); + NvBool (*get)(NVDispEvoPtr pDispEvo, NvS64 *pValue); + enum NvKmsAttributeType type; +} DispAttributesDispatchTable[] = { + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK] = { + .set = NULL, + .get = GetFrameLock, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_SYNC] = { + .set = SetFrameLockSync, + .get = GetFrameLockSync, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_GPU_FRAMELOCK_FPGA_REVISION_UNSUPPORTED] = { + .set = NULL, + .get = GetFrameLockFpgaRevisionUnsupported, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_STEREO_SYNC] = { + .set = NULL, + .get = GetFrameLockStereoSync, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TIMING] = { + .set = NULL, + .get = GetFrameLockTiming, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TEST_SIGNAL] = { + .set = SetFrameLockTestSignal, + .get = GetFrameLockTestSignal, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_RESET] = { + .set = ResetHardwareOneDisp, + .get = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_SET_SWAP_BARRIER] = { + .set = SetSwapBarrier, + .get = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_ALLOW_FLIPLOCK] = { + .set = nvAllowFlipLockEvo, + .get = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_QUERY_DP_AUX_LOG] = { + .set = NULL, + .get = nvRmQueryDpAuxLog, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, +}; + + +/*! + * Set pParams->attribute to pParams->value on the given disp. + */ +NvBool nvSetDispAttributeEvo(NVDispEvoPtr pDispEvo, + struct NvKmsSetDispAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DispAttributesDispatchTable)) { + return FALSE; + } + + if (DispAttributesDispatchTable[index].set == NULL) { + return FALSE; + } + + return DispAttributesDispatchTable[index].set(pDispEvo, + pParams->request.value); +} + + +/*! + * Get the value of pParams->attribute on the given disp. + */ +NvBool nvGetDispAttributeEvo(NVDispEvoPtr pDispEvo, + struct NvKmsGetDispAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DispAttributesDispatchTable)) { + return FALSE; + } + + if (DispAttributesDispatchTable[index].get == NULL) { + return FALSE; + } + + return DispAttributesDispatchTable[index].get(pDispEvo, + &pParams->reply.value); +} + + +/*! + * Get the valid values of pParams->attribute on the given disp. + */ +NvBool nvGetDispAttributeValidValuesEvo( + const NVDispEvoRec *pDispEvo, + struct NvKmsGetDispAttributeValidValuesParams *pParams) +{ + struct NvKmsAttributeValidValuesCommonReply *pReply = + &pParams->reply.common; + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DispAttributesDispatchTable)) { + return FALSE; + } + + /* + * FRAMELOCK and GPU_FRAMELOCK_FPGA_REVISION_UNSUPPORTED + * can be queried without a pFrameLockEvo; all other + * attributes require a pFrameLockEvo. + */ + if (((pParams->request.attribute != NV_KMS_DISP_ATTRIBUTE_FRAMELOCK) && + (pParams->request.attribute != + NV_KMS_DISP_ATTRIBUTE_GPU_FRAMELOCK_FPGA_REVISION_UNSUPPORTED)) && + (pDispEvo->pFrameLockEvo == NULL)) { + return FALSE; + } + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + pReply->readable = (DispAttributesDispatchTable[index].get != NULL); + pReply->writable = (DispAttributesDispatchTable[index].set != NULL); + + pReply->type = DispAttributesDispatchTable[index].type; + + return TRUE; +} + + +static const struct { + NvBool (*set)(NVFrameLockEvoPtr pFrameLockEvo, NvS64 value); + NvBool (*get)(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, NvS64 *pValue); + NvBool (*getValidValues)( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues); + enum NvKmsAttributeType type; +} FrameLockAttributesDispatchTable[] = { + [NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY] = { + .set = SetFrameLockPolarity, + .get = GetFrameLockPolarity, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BITMASK, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_DELAY] = { + .set = FrameLockSetSyncDelay, + .get = GetFrameLockSyncDelay, + .getValidValues = GetFrameLockSyncDelayValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE] = { + .set = SetHouseSyncMode, + .get = GetHouseSyncMode, + .getValidValues = GetHouseSyncModeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_INTERVAL] = { + .set = FrameLockSetSyncInterval, + .get = GetFrameLockSyncInterval, + .getValidValues = GetFrameLockSyncIntervalValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY] = { + .set = NULL, + .get = GetFrameLockSyncReady, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE] = { + .set = SetFrameLockVideoMode, + .get = GetFrameLockVideoMode, + .getValidValues = GetFrameLockVideoModeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_FPGA_REVISION] = { + .set = NULL, + .get = GetFrameLockFpgaRevision, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_FIRMWARE_MAJOR_VERSION] = { + .set = NULL, + .get = GetFrameLockFirmwareMajorVersion, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_FIRMWARE_MINOR_VERSION] = { + .set = NULL, + .get = GetFrameLockFirmwareMinorVersion, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_BOARD_ID] = { + .set = NULL, + .get = GetFrameLockBoardId, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_DELAY_RESOLUTION] = { + .set = NULL, + .get = GetFrameLockSyncDelayResolution, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_PORT0_STATUS] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_PORT1_STATUS] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BITMASK, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE_4] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_INCOMING_HOUSE_SYNC_RATE] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, +}; + +NvBool nvSetFrameLockAttributeEvo( + NVFrameLockEvoRec *pFrameLockEvo, + const struct NvKmsSetFrameLockAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(FrameLockAttributesDispatchTable)) { + return FALSE; + } + + if (FrameLockAttributesDispatchTable[index].set == NULL) { + return FALSE; + } + + if ((FrameLockAttributesDispatchTable[index].type == + NV_KMS_ATTRIBUTE_TYPE_BOOLEAN) && + (pParams->request.value != TRUE) && + (pParams->request.value != FALSE)) { + return FALSE; + } + + return FrameLockAttributesDispatchTable[index].set(pFrameLockEvo, + pParams->request.value); +} + +NvBool nvGetFrameLockAttributeEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsGetFrameLockAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(FrameLockAttributesDispatchTable)) { + return FALSE; + } + + if (FrameLockAttributesDispatchTable[index].get == NULL) { + return FALSE; + } + + return FrameLockAttributesDispatchTable[index].get(pFrameLockEvo, + pParams->request.attribute, + &pParams->reply.value); +} + +NvBool nvGetFrameLockAttributeValidValuesEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsGetFrameLockAttributeValidValuesParams *pParams) +{ + struct NvKmsAttributeValidValuesCommonReply *pReply = + &pParams->reply.common; + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(FrameLockAttributesDispatchTable)) { + return FALSE; + } + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + pReply->readable = (FrameLockAttributesDispatchTable[index].get != NULL); + pReply->writable = (FrameLockAttributesDispatchTable[index].set != NULL); + + pReply->type = FrameLockAttributesDispatchTable[index].type; + + /* + * The getValidValues function provides two important things: + * - If type==Range, then assigns reply::u::range. + * - If the attribute is not currently available, returns FALSE. + * If the getValidValues function is NULL, assume the attribute is + * available. The type must not be something requires assigning + * to reply::u. + */ + if (FrameLockAttributesDispatchTable[index].getValidValues == NULL) { + nvAssert(pReply->type != NV_KMS_ATTRIBUTE_TYPE_RANGE); + return TRUE; + } + + return FrameLockAttributesDispatchTable[index].getValidValues( + pFrameLockEvo, pReply); +} + +NvU32 nvGetFramelockServerHead(const NVDispEvoRec *pDispEvo) +{ + const NVDpyEvoRec *pDpyEvo = + nvGetDpyEvoFromDispEvo(pDispEvo, pDispEvo->framelock.server); + /* XXX[2Heads1OR] Get the primary hardware head. */ + return (pDpyEvo != NULL) ? pDpyEvo->apiHead : NV_INVALID_HEAD; +} + +NvU32 nvGetFramelockClientHeadsMask(const NVDispEvoRec *pDispEvo) +{ + NvU32 headsMask = 0x0; + const NVDpyEvoRec *pDpyEvo; + + /* + * XXX[2Heads1OR] Translate api-head -> hardware-heads, and make sure to + * include the secondary hardware-head of the server dpy. + */ + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->framelock.clients, pDispEvo) { + if (pDpyEvo->apiHead == NV_INVALID_HEAD) { + continue; + } + headsMask |= NVBIT(pDpyEvo->apiHead); + } + return headsMask; +} + +void nvUpdateGLSFramelock(const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvBool enable, const NvBool server) +{ + NVDpyEvoRec *pDpyEvo; + NvS64 value = enable | (server << 1); + + /* + * XXX[2Heads1OR] Optimize this loop in follow on code change when + * apiHead -> pDpyEvo mapping will get implemented. + */ + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + /* XXX[2Heads1OR] Get the primary hardware head. */ + if (pDpyEvo->apiHead != head) { + continue; + } + + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_UPDATE_GLS_FRAMELOCK, + value); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hal.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hal.c new file mode 100644 index 0000000..e9f07ac --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hal.c @@ -0,0 +1,214 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" +#include "nvkms-cursor.h" +#include "nvkms-hal.h" +#include "nvkms-rm.h" + + + +#include "class/cl9470.h" // NV9470_DISPLAY +#include "class/cl9570.h" // NV9570_DISPLAY +#include "class/cl9770.h" // NV9770_DISPLAY +#include "class/cl9870.h" // NV9870_DISPLAY +#include "class/clc370.h" // NVC370_DISPLAY +#include "class/clc570.h" // NVC570_DISPLAY +#include "class/clc670.h" // NVC670_DISPLAY + +#include "class/cl947d.h" // NV947D_CORE_CHANNEL_DMA +#include "class/cl957d.h" // NV957D_CORE_CHANNEL_DMA +#include "class/cl977d.h" // NV977D_CORE_CHANNEL_DMA +#include "class/cl987d.h" // NV987D_CORE_CHANNEL_DMA +#include "class/clc37d.h" // NVC37D_CORE_CHANNEL_DMA +#include "class/clc37e.h" // NVC37E_WINDOW_CHANNEL_DMA +#include "class/clc57d.h" // NVC57D_CORE_CHANNEL_DMA +#include "class/clc57e.h" // NVC57E_WINDOW_CHANNEL_DMA +#include "class/clc67d.h" // NVC67D_CORE_CHANNEL_DMA +#include "class/clc67e.h" // NVC67E_WINDOW_CHANNEL_DMA + +extern NVEvoHAL nvEvo94; +extern NVEvoHAL nvEvoC3; +extern NVEvoHAL nvEvoC5; +extern NVEvoHAL nvEvoC6; + +enum NvKmsAllocDeviceStatus nvAssignEvoCaps(NVDevEvoPtr pDevEvo) +{ +#define ENTRY(_classPrefix, \ + _pEvoHal, \ + _supportsInbandStereoSignaling, \ + _supportsDP13, \ + _supportsHDMI20, \ + _inputLutAppliesToBase, \ + _genericPageKind, \ + _validNIsoFormatMask, \ + _maxPitch, \ + _maxWidthInBytes, \ + _maxWidthInPixels, \ + _maxHeight, \ + _coreChannelDmaArmedOffset, \ + _dmaArmedSize) \ + { \ + .class = NV ## _classPrefix ## 70_DISPLAY, \ + .pEvoHal = _pEvoHal, \ + .coreChannelDma = { \ + .coreChannelClass = \ + NV ## _classPrefix ## 7D_CORE_CHANNEL_DMA, \ + .dmaArmedSize = _dmaArmedSize, \ + .dmaArmedOffset = \ + _coreChannelDmaArmedOffset, \ + }, \ + .evoCaps = { \ + .supportsDP13 = _supportsDP13, \ + .supportsInbandStereoSignaling = \ + _supportsInbandStereoSignaling, \ + .supportsHDMI20 = _supportsHDMI20, \ + .validNIsoFormatMask = _validNIsoFormatMask, \ + .inputLutAppliesToBase = _inputLutAppliesToBase, \ + .maxPitchValue = _maxPitch, \ + .maxWidthInBytes = _maxWidthInBytes, \ + .maxWidthInPixels = _maxWidthInPixels, \ + .maxHeight = _maxHeight, \ + .genericPageKind = _genericPageKind, \ + .maxRasterWidth = DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_RASTER_SIZE_WIDTH), \ + .maxRasterHeight = DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_RASTER_SIZE_HEIGHT),\ + } \ + } + +#define EVO_CORE_CHANNEL_DMA_ARMED_OFFSET 0x0 + +#define EVO_CORE_CHANNEL_DMA_ARMED_SIZE 0x1000 + + +/* Pre-NVDisplay EVO entries */ +#define ENTRY_EVO(_classPrefix, ...) \ + ENTRY(_classPrefix, __VA_ARGS__, \ + ((1 << NVKMS_NISO_FORMAT_LEGACY) | \ + (1 << NVKMS_NISO_FORMAT_FOUR_WORD)), \ + DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_STORAGE_PITCH), \ + DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_STORAGE_PITCH) * \ + NVKMS_BLOCK_LINEAR_GOB_WIDTH, \ + DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_SIZE_WIDTH), \ + DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_SIZE_HEIGHT), \ + EVO_CORE_CHANNEL_DMA_ARMED_OFFSET, \ + EVO_CORE_CHANNEL_DMA_ARMED_SIZE) + + +/* + * The file + * https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/volta/gv100/dev_display_withoffset.ref.txt + * defines: + * + * #define NV_UDISP_FE_CHN_ASSY_BASEADR_CORE 0x00680000 + * #define NV_UDISP_FE_CHN_ARMED_BASEADR_CORE (0x00680000+32768) + * + * The NVD_CORE_CHANNEL_DMA_ARMED_OFFSET is calculated as + * (NV_UDISP_FE_CHN_ARMED_BASEADR_CORE - NV_UDISP_FE_CHN_ASSY_BASEADR_CORE). + */ +#define NVD_CORE_CHANNEL_DMA_ARMED_OFFSET 0x8000 + +/* + * From the above in dev_display_withoffset.ref.txt, ARMED is the upper + * 32k of the core channel's 64k space. + */ +#define NVD_CORE_CHANNEL_DMA_ARMED_SIZE 0x8000 + + +/* + * The file + * https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/turing/tu104/dev_mmu.ref.txt + * defines: + * + * #define NV_MMU_PTE_KIND_GENERIC_MEMORY 0x06 + * + * The file + * https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/volta/gv100/dev_mmu.ref.txt + * defines: + * + * #define NV_MMU_PTE_KIND_GENERIC_16BX2 0xfe + * + * Which correspond to the "generic" page kind used for non-compressed single- + * sample blocklinear color images on Turing+ and pre-Turing GPUs respectively. + * This is the only blocklinear memory layout display ever cares about. + */ +#define TURING_GENERIC_KIND 0x06 +#define FERMI_GENERIC_KIND 0xfe + + +/* NVDisplay and later entries */ +#define ENTRY_NVD(_coreClassPrefix, _windowClassPrefix, ...) \ + ENTRY(_coreClassPrefix, __VA_ARGS__, \ + (1 << NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY), \ + DRF_MASK(NV ## _windowClassPrefix ## 7E_SET_PLANAR_STORAGE_PITCH), \ + DRF_MASK(NV ## _windowClassPrefix ## 7E_SET_PLANAR_STORAGE_PITCH) * \ + NVKMS_BLOCK_LINEAR_GOB_WIDTH, \ + DRF_MASK(NV ## _windowClassPrefix ## 7E_SET_SIZE_IN_WIDTH), \ + DRF_MASK(NV ## _windowClassPrefix ## 7E_SET_SIZE_IN_WIDTH), \ + NVD_CORE_CHANNEL_DMA_ARMED_OFFSET, \ + NVD_CORE_CHANNEL_DMA_ARMED_SIZE) + + static const struct { + NvU32 class; + const NVEvoHAL *pEvoHal; + const NVEvoCoreChannelDmaRec coreChannelDma; + const NVEvoCapsRec evoCaps; + } dispTable[] = { + /* + * genericPageKind------------------------+ + * inputLutAppliesToBase ------------+ | + * supportsHDMI20 ----------------+ | | + * supportsDP13 ---------------+ | | | + * inbandStereoSignaling----+ | | | | + * pEvoHal --------------+ | | | | | + * windowClassPrefix | | | | | | + * classPrefix | | | | | | | + * | | | | | | | | + */ + ENTRY_NVD(C6, C6, &nvEvoC6, 1, 1, 1, 0, TURING_GENERIC_KIND), + ENTRY_NVD(C5, C5, &nvEvoC5, 1, 1, 1, 0, TURING_GENERIC_KIND), + ENTRY_NVD(C3, C3, &nvEvoC3, 1, 1, 1, 0, FERMI_GENERIC_KIND), + ENTRY_EVO(98, &nvEvo94, 1, 1, 1, 1, FERMI_GENERIC_KIND), + ENTRY_EVO(97, &nvEvo94, 1, 1, 1, 1, FERMI_GENERIC_KIND), + ENTRY_EVO(95, &nvEvo94, 1, 0, 1, 1, FERMI_GENERIC_KIND), + ENTRY_EVO(94, &nvEvo94, 1, 0, 0, 1, FERMI_GENERIC_KIND), + }; + + int i; + + for (i = 0; i < ARRAY_LEN(dispTable); i++) { + if (nvRmEvoClassListCheck(pDevEvo, dispTable[i].class)) { + pDevEvo->hal = dispTable[i].pEvoHal; + pDevEvo->dispClass = dispTable[i].class; + pDevEvo->caps = dispTable[i].evoCaps; + + pDevEvo->coreChannelDma = dispTable[i].coreChannelDma; + nvAssert(nvRmEvoClassListCheck( + pDevEvo, + pDevEvo->coreChannelDma.coreChannelClass)); + + return nvInitDispHalCursorEvo(pDevEvo); + } + } + + return NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hdmi.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hdmi.c new file mode 100644 index 0000000..6f4fecf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hdmi.c @@ -0,0 +1,2047 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This source file contains codes for enabling HDMI audio. + */ + + +#include "nvkms-dpy.h" +#include "nvkms-hdmi.h" +#include "nvkms-evo.h" +#include "nvkms-modepool.h" +#include "nvkms-rmapi.h" +#include "nvkms-utils.h" +#include "nvkms-vrr.h" +#include "dp/nvdp-connector.h" + +#include "hdmi_spec.h" +#include "nvos.h" + +#include // NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS +#include // NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM +#include // NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS +#include // NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER + +#include + +#define CAP_HDMI_SUPPORT_GPU 0x00000001 +#define CAP_HDMI_SUPPORT_MONITOR 0x00000002 + +static inline const NVT_EDID_CEA861_INFO *GetExt861(const NVParsedEdidEvoRec *pParsedEdid, + int extIndex) +{ + if (!pParsedEdid->valid || extIndex > 1) { + return NULL; + } + + return (extIndex == 0) ? &pParsedEdid->info.ext861 : + &pParsedEdid->info.ext861_2; +} + +/* + * CalculateVideoInfoFrameColorFormat() - calculate colorspace, + * colorimetry and colorrange for video infoframe. + */ +static void CalculateVideoInfoFrameColorFormat( + const NVAttributesSetEvoRec *pAttributesSet, + const NvU32 hdTimings, + NVT_VIDEO_INFOFRAME_CTRL *pCtrl, + NVT_EDID_INFO *pEdidInfo) +{ + NvBool sinkSupportsRGBQuantizationOverride = FALSE; + + // sets video infoframe colorspace (RGB/YUV). + switch (pAttributesSet->colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + pCtrl->color_space = NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + pCtrl->color_space = NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr422; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + pCtrl->color_space = NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr444; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + pCtrl->color_space = NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr420; + break; + default: + nvAssert(!"Invalid colorSpace value"); + break; + } + + // sets video infoframe colorimetry. + switch (pAttributesSet->colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + pCtrl->colorimetry = NVT_COLORIMETRY_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + if (hdTimings) { + pCtrl->colorimetry = NVT_COLORIMETRY_YUV_709; + } else { + pCtrl->colorimetry = NVT_COLORIMETRY_YUV_601; + } + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + pCtrl->colorimetry = NVT_COLORIMETRY_YUV_709; + break; + default: + nvAssert(!"Invalid colorSpace value"); + break; + } + + // sets video infoframe colorrange. + switch (pAttributesSet->colorRange) { + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL: + pCtrl->rgb_quantization_range = + NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_FULL_RANGE; + break; + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED: + pCtrl->rgb_quantization_range = + NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_LIMITED_RANGE; + break; + default: + nvAssert(!"Invalid colorRange value"); + break; + } + + if (pEdidInfo != NULL) { + sinkSupportsRGBQuantizationOverride = (pEdidInfo->ext861.valid.VCDB && + ((pEdidInfo->ext861.video_capability & NVT_CEA861_VCDB_QS_MASK) >> + NVT_CEA861_VCDB_QS_SHIFT) != 0); + } + + // For RGB pixel encoding, explicitly set quantization bits in AVI Infoframe only + // if sink supports selectable RGB quantization range in VCDB of EDID. Or else + // set default range for the transmitted video format. + // (HDMI 2.0 spec section 7.3.1) + if ((pAttributesSet->colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB) && + !sinkSupportsRGBQuantizationOverride) { + pCtrl->rgb_quantization_range = NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_DEFAULT; + } + + // Only limited color range is allowed with YUV444 or YUV422 color spaces + nvAssert(!(((pCtrl->color_space == NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr422) || + (pCtrl->color_space == NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr444)) && + (pCtrl->rgb_quantization_range != + NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_LIMITED_RANGE))); +} + +/* + * GetHDMISupportCap() - find the HDMI capabilities of + * the gpu and the display device. + */ + +static NvU32 GetHDMISupportCap(const NVDpyEvoRec *pDpyEvo) +{ + NvU32 hdmiCap = 0; + int extIndex; + + if (pDpyEvo->hdmiCapable) { + hdmiCap |= CAP_HDMI_SUPPORT_GPU; + } + + for (extIndex = 0; TRUE; extIndex++) { + + int vsdbIndex; + const NVT_EDID_CEA861_INFO *pExt861 = + GetExt861(&pDpyEvo->parsedEdid, extIndex); + + if (pExt861 == NULL) { + break; + } + + if (pExt861->revision <= NVT_CEA861_REV_ORIGINAL) { + continue; + } + + for (vsdbIndex = 0; vsdbIndex < pExt861->total_vsdb; vsdbIndex++) { + if (pExt861->vsdb[vsdbIndex].ieee_id == NVT_CEA861_HDMI_IEEE_ID) { + hdmiCap |= CAP_HDMI_SUPPORT_MONITOR; + return hdmiCap; + } + } + } + + return hdmiCap; +} + +/*! + * Return whether the GPU supports HDMI and the display is connected + * via HDMI. + */ +NvBool nvDpyIsHdmiEvo(const NVDpyEvoRec *pDpyEvo) +{ + NvU32 hdmiCap; + + hdmiCap = GetHDMISupportCap(pDpyEvo); + + return ((hdmiCap & CAP_HDMI_SUPPORT_GPU) && + (hdmiCap & CAP_HDMI_SUPPORT_MONITOR)); +} + +/*! + * Updates the display's HDMI 2.0 capabilities to the RM. + */ +void nvUpdateHdmiCaps(NVDpyEvoPtr pDpyEvo) +{ + NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS params = { 0 }; + NVParsedEdidEvoPtr pParsedEdid = &pDpyEvo->parsedEdid; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (!pDevEvo->caps.supportsHDMI20 || + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + params.caps = 0; + + /* + * nvUpdateHdmiCaps() gets called on dpy's connect/disconnect events + * to set/clear capabilities, clear capabilities if parsed edid + * is not valid. + */ + if (pParsedEdid->valid) { + const NVT_HDMI_FORUM_INFO *pHdmiInfo = &pParsedEdid->info.hdmiForumInfo; + if (pHdmiInfo->scdc_present) { + params.caps |= DRF_DEF(0073, _CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, + _SCDC_SUPPORTED, _TRUE); + } + + if (pHdmiInfo->max_TMDS_char_rate > 0) { + params.caps |= DRF_DEF(0073, _CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, + _GT_340MHZ_CLOCK_SUPPORTED, _TRUE); + } + + if (pHdmiInfo->lte_340Mcsc_scramble) { + if (!pHdmiInfo->scdc_present) { + nvEvoLogDisp(pDispEvo, + EVO_LOG_WARN, + "EDID inconsistency: SCDC is not present in EDID, but EDID requests 340Mcsc scrambling."); + } + + params.caps |= DRF_DEF(0073, _CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, + _LTE_340MHZ_SCRAMBLING_SUPPORTED, _TRUE); + } + + /* HDMI Fixed-rate link information */ + if (pDevEvo->hal->caps.supportsHDMIFRL) { + nvAssert((pHdmiInfo->max_FRL_Rate & + ~DRF_MASK(NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED)) == 0); + params.caps |= DRF_NUM(0073_CTRL_CMD_SPECIFIC, _SET_HDMI_SINK_CAPS, _MAX_FRL_RATE_SUPPORTED, + pHdmiInfo->max_FRL_Rate); + + if (pHdmiInfo->dsc_1p2) { + nvAssert((pHdmiInfo->dsc_1p2 & + ~DRF_MASK(NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED)) == 0); + params.caps |= DRF_NUM(0073_CTRL_CMD_SPECIFIC, _SET_HDMI_SINK_CAPS, _DSC_MAX_FRL_RATE_SUPPORTED, + pHdmiInfo->dsc_1p2); + params.caps |= DRF_DEF(0073_CTRL_CMD_SPECIFIC, _SET_HDMI_SINK_CAPS, _DSC_12_SUPPORTED, _TRUE); + } + } + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS failed"); + } +} + +/* + * HdmiSendEnable() - Used to signal RM to enable various hdmi components + * such as audio engine. + */ + +static void HdmiSendEnable(NVDpyEvoPtr pDpyEvo, NvBool hdmiEnable) +{ + NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDpyEvo->pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + params.enable = hdmiEnable; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE failed"); + } +} + +/* + * SendInfoFrame() - Send infoframe to the hardware through the hdmipkt + * library. + */ + +static void SendInfoFrame(const NVDispEvoRec *pDispEvo, + const NvU32 head, + NVHDMIPKT_TC transmitControl, + NVT_INFOFRAME_HEADER *pInfoFrameHeader, + NvU32 infoframeSize) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVHDMIPKT_TYPE hdmiLibType; + NVHDMIPKT_RESULT ret; + NvU8 *infoframe = NULL; + NvU8 hdmiPacketType, checksum; + NvU32 i; + NvU8 *pPayload; + size_t headerSize; + NvBool needChecksum = + (transmitControl & DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN)); + + /* + * The 'type' the timing library writes into the NVT_INFOFRAME_HEADER + * structure is not the same as the protocol values that hardware expects + * to see in the real packet header; those are defined in the + * HDMI_PACKET_TYPE enums (hdmi_pktType_*) from hdmi_spec.h; use those + * to fill in the first byte of the packet. It's *also* not the type that + * the HDMI library expects to see in its NvHdmiPkt_PacketWrite call; those + * are NVHDMIPKT_TYPE_*. Determine both below. + */ + switch (pInfoFrameHeader->type) { + default: + nvAssert(0); + return; + case NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET: + hdmiLibType = NVHDMIPKT_TYPE_GENERIC; + hdmiPacketType = hdmi_pktType_ExtendedMetadata; + break; + case NVT_INFOFRAME_TYPE_VIDEO: + hdmiLibType = NVHDMIPKT_TYPE_AVI_INFOFRAME; + hdmiPacketType = hdmi_pktType_AviInfoFrame; + break; + case NVT_INFOFRAME_TYPE_VENDOR_SPECIFIC: + hdmiLibType = NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME; + hdmiPacketType = hdmi_pktType_VendorSpecInfoFrame; + break; + } + + /* + * These structures are weird. The NVT_VIDEO_INFOFRAME, + * NVT_VENDOR_SPECIFIC_INFOFRAME, NVT_EXTENDED_METADATA_PACKET_INFOFRAME, + * etc structures are *kind of* what we want to send to the hdmipkt library, + * except the type in the header is different, and a single checksum byte + * may need to be inserted *between* the header and the payload (requiring + * us to allocate a buffer one byte larger). + */ + infoframe = nvAlloc(infoframeSize + (needChecksum ? sizeof(checksum) : 0)); + if (infoframe == NULL) { + return; + } + + /* + * The fields and size of NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER + * match with those of NVT_INFOFRAME_HEADER at the time of writing, but + * nvtiming.h declares them separately. To be safe, special case + * NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET. + */ + if (pInfoFrameHeader->type == NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET) { + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER *pExtMetadataHeader = + (NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER *) pInfoFrameHeader; + + pPayload = (NvU8 *)(pExtMetadataHeader + 1); + headerSize = sizeof(NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER); + } else { + pPayload = (NvU8 *)(pInfoFrameHeader + 1); + headerSize = sizeof(NVT_INFOFRAME_HEADER); + } + + infoframe[0] = hdmiPacketType; + nvkms_memcpy(&infoframe[1], &((NvU8*) pInfoFrameHeader)[1], headerSize - 1); + + if (needChecksum) { + /* PB0: checksum */ + checksum = 0; + infoframe[headerSize] = 0; + for (i = 0; i < infoframeSize + sizeof(checksum); i++) { + checksum += infoframe[i]; + } + infoframe[headerSize] = ~checksum + 1; + } + + /* copy the payload, starting after the 3-byte header and checksum */ + nvkms_memcpy(&infoframe[headerSize + (needChecksum ? sizeof(checksum) : 0)], + pPayload, infoframeSize - headerSize /* payload size */); + + ret = NvHdmiPkt_PacketWrite(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + pHeadState->activeRmId, + head, + hdmiLibType, + transmitControl, + infoframeSize, + infoframe); + + if (ret != NVHDMIPKT_SUCCESS) { + nvAssert(ret == NVHDMIPKT_SUCCESS); + } + + nvFree(infoframe); +} + +/* + * SendVideoInfoFrame() - Construct video infoframe using provided EDID and call + * SendInfoFrame() to send it to RM. + */ +static void SendVideoInfoFrame(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVAttributesSetEvoRec *pAttributesSet, + const NVDispHeadInfoFrameStateEvoRec *pInfoFrameState, + NVT_EDID_INFO *pEdidInfo) +{ + NvBool hdTimings = pInfoFrameState->hdTimings; + NVT_VIDEO_INFOFRAME_CTRL videoCtrl = pInfoFrameState->videoCtrl; + NVT_VIDEO_INFOFRAME VideoInfoFrame; + NVT_STATUS status; + + + CalculateVideoInfoFrameColorFormat(pAttributesSet, hdTimings, &videoCtrl, pEdidInfo); + + status = NvTiming_ConstructVideoInfoframe(pEdidInfo, + &videoCtrl, + NULL, &VideoInfoFrame); + if (status != NVT_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Error in constructing Video InfoFrame"); + return; + } + + SendInfoFrame(pDispEvo, + head, + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME, + (NVT_INFOFRAME_HEADER *) &VideoInfoFrame, + sizeof(VideoInfoFrame)); +} + +/* + * SendHDMIVendorSpecificInfoFrame() - Construct vendor specific infoframe + * using provided EDID and call SendInfoFrame() to send it to RM. + */ + +static void +SendHDMIVendorSpecificInfoFrame(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVDispHeadInfoFrameStateEvoRec *pInfoFrameState, + NVT_EDID_INFO *pEdidInfo) +{ + NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL vendorCtrl = pInfoFrameState->vendorCtrl; + NVT_VENDOR_SPECIFIC_INFOFRAME vendorInfoFrame; + NVT_STATUS status; + + status = NvTiming_ConstructVendorSpecificInfoframe(pEdidInfo, + &vendorCtrl, + &vendorInfoFrame); + if (status != NVT_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Error in constructing Vendor Specific InfoFrame"); + return; + } + + SendInfoFrame(pDispEvo, + head, + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME, + &vendorInfoFrame.Header, + sizeof(vendorInfoFrame)); +} + +/* + * Send video and 3D InfoFrames for HDMI. + */ +void nvUpdateHdmiInfoFrames(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVAttributesSetEvoRec *pAttributesSet, + const NVDispHeadInfoFrameStateEvoRec *pInfoFrameState, + NVDpyEvoRec *pDpyEvo) +{ + if (!nvDpyIsHdmiEvo(pDpyEvo)) { + return; + } + + if (!pDpyEvo->parsedEdid.valid) { + nvEvoLogDispDebug( + pDispEvo, EVO_LOG_WARN, + "No EDID: cannot construct video/vendor-specific info frame"); + return; + } + + SendVideoInfoFrame(pDispEvo, + head, + pAttributesSet, + pInfoFrameState, + &pDpyEvo->parsedEdid.info); + + SendHDMIVendorSpecificInfoFrame(pDispEvo, + head, + pInfoFrameState, + &pDpyEvo->parsedEdid.info); +} + +static void SetDpAudioMute(const NVDispEvoRec *pDispEvo, + const NvU32 displayId, const NvBool mute) +{ + NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = displayId; + params.mute = mute; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, "NvRmControl" + "(NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM) failed" + "return status = %d...", ret); + } +} + +static void SetDpAudioEnable(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + /* Mute audio stream before disabling it */ + if (!enable) { + SetDpAudioMute(pDispEvo, pHeadState->activeRmId, TRUE); + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + params.enable = enable; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "%s: Failed to %s DisplayPort audio stream-%u", + pConnectorEvo->name, + enable ? "enable" : "disable", + head); + } + + /* Unmute audio stream after enabling it */ + if (enable) { + SetDpAudioMute(pDispEvo, pHeadState->activeRmId, FALSE); + } +} + +static void EnableHdmiAudio(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable) +{ + static const NvU8 InfoframeMutePacket[] = { + pktType_GeneralControl, 0, 0, HDMI_GENCTRL_PACKET_MUTE_ENABLE, 0, 0, 0, 0, + 0, 0 + }; + static const NvU8 InfoframeUnMutePacket[] = { + pktType_GeneralControl, 0, 0, HDMI_GENCTRL_PACKET_MUTE_DISABLE, 0, 0, 0, 0, + 0, 0 + }; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + params.transmitControl = + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ENABLE, _YES) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _OTHER_FRAME, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _SINGLE_FRAME, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ON_HBLANK, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _VIDEO_FMT, _SW_CONTROLLED) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _RESERVED_LEGACY_MODE, _NO); + + params.packetSize = sizeof(InfoframeMutePacket); + + nvAssert(sizeof(InfoframeMutePacket) == sizeof(InfoframeUnMutePacket)); + + nvkms_memcpy(params.aPacket, + enable ? InfoframeUnMutePacket : InfoframeMutePacket, + params.packetSize); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET failed"); + } +} + +static const NVT_EDID_CEA861_INFO *GetMaxSampleRateExtBlock( + const NVParsedEdidEvoRec *pParsedEdid, + NvU32 *pMaxFreqSupported) +{ + const NVT_EDID_CEA861_INFO *pExt861 = NULL; + int extIndex; + int i; + + *pMaxFreqSupported = 0; + + for (extIndex = 0; TRUE; extIndex++) { + + NvU8 sampleRateMask = 0; + const NVT_EDID_CEA861_INFO *pTmpExt861 = + GetExt861(pParsedEdid, extIndex); + NvU32 maxFreqSupported = 0; + + if (pTmpExt861 == NULL) { + break; + } + + if (pTmpExt861->revision == NVT_CEA861_REV_NONE) { + continue; + } + + /* loop through all SAD to find out the max supported rate */ + for (i = 0; i < NVT_CEA861_AUDIO_MAX_DESCRIPTOR; i++) { + + const NvU8 byte1 = pTmpExt861->audio[i].byte1; + const NvU8 byte2 = pTmpExt861->audio[i].byte2; + + if (byte1 == 0) { + break; + } + + if ((byte2 & NVT_CEA861_AUDIO_SAMPLE_RATE_MASK) > sampleRateMask) { + sampleRateMask = byte2 & NVT_CEA861_AUDIO_SAMPLE_RATE_MASK; + } + } + + if (sampleRateMask != 0) { + /* get the highest bit index */ + for (i = 7; i >= 1; i--) { + if ((1<<(i-1)) & sampleRateMask) { + maxFreqSupported = i; + break; + } + } + } else if (pTmpExt861->basic_caps & NVT_CEA861_CAP_BASIC_AUDIO) { + /* + * no short audio descriptor found, try the basic cap + * Uncompressed, two channel, digital audio. Exact parameters are + * determined by the interface specification used with CEA-861-D + * (e.g., 2 channel IEC 60958 LPCM, 32, 44.1, and 48 kHz + * sampling rates, 16 bits/sample). + */ + maxFreqSupported = + NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0480KHZ; + } + + if (maxFreqSupported > *pMaxFreqSupported) { + *pMaxFreqSupported = maxFreqSupported; + pExt861 = pTmpExt861; + } + } + + return pExt861; +} + +/* + * Search a CEA-861 block for a Vendor Specific Data Block + * with an IEEE "HDMI Licensing, LLC" OUI. + * + * If found, returns VSDB_DATA * to Vendor Specific Data Block + * If !found, returns NULL + */ +static const VSDB_DATA *GetVsdb(const NVT_EDID_CEA861_INFO *pExt861) +{ + const VSDB_DATA *pVsdb = NULL; + + for (int i = 0; i < pExt861->total_vsdb; i++) { + if (pExt861->vsdb[i].ieee_id == NVT_CEA861_HDMI_IEEE_ID) { + pVsdb = &pExt861->vsdb[i]; + break; + } + } + return pVsdb; +} + +static NvBool FillELDBuffer(const NvU32 displayId, + const NvBool isDisplayPort, + const NVParsedEdidEvoRec *pParsedEdid, + NVEldEvoRec *pEld, + NvU32 *pMaxFreqSupported) +{ + const NVT_EDID_CEA861_INFO *pExt861; + NvU32 SADCount, monitorNameLen; + NvU8 name[NVT_EDID_LDD_PAYLOAD_SIZE + 1]; + NVT_STATUS status; + NvU32 i; + NvU8 EldSAI = 0; + NvU8 EldAudSynchDelay = 0; + const VSDB_DATA *pVsdb; + + pExt861 = GetMaxSampleRateExtBlock(pParsedEdid, pMaxFreqSupported); + + if (pExt861 == NULL) { + return FALSE; + } + + /* ELD header block: offset 0: ELD_Ver */ + pEld->buffer[0] = NVT_ELD_VER_2 << 3; + + /* Baseline block: offset 4: CEA_EDID_Ver */ + pEld->buffer[4] = pExt861->revision << 5; + + /* offset 5: SAD_Count */ + SADCount = 0; + while (SADCount < NVT_CEA861_AUDIO_MAX_DESCRIPTOR && + pExt861->audio[SADCount].byte1 != 0) { + SADCount++; + } + pEld->buffer[5] = SADCount << 4; + + /* offset 5: Conn_Type */ + if (isDisplayPort) { + pEld->buffer[5] |= NVT_ELD_CONN_TYPE_DP << 2; + } else { + pEld->buffer[5] |= NVT_ELD_CONN_TYPE_HDMI << 2; + } + + /* offset 5 b0: HDCP; always 0 for now */ + + pVsdb = GetVsdb(pExt861); + /* offset 5 b1=1 if Supports_AI; always 0 for DP */ + if ((!isDisplayPort) && + (pVsdb != NULL) && + (pVsdb->vendor_data_size > 2)) { + EldSAI = pVsdb->vendor_data[2]; + EldSAI >>= 7; + } + pEld->buffer[5] |= EldSAI << 1; + + /* offset 6: Aud_Synch_delay in units of 2 msec */ + if ((pVsdb != NULL) && + (pVsdb->vendor_data_size > 6)) { + EldAudSynchDelay = pVsdb->vendor_data[6]; + } + pEld->buffer[6] = EldAudSynchDelay; + + /* offset 7: speaker allocation multiple allocation is not supported in ELD */ + pEld->buffer[7] = pExt861->speaker[0].byte1; + + /* + * offset 8 ~ 15: port ID; nobody knows what port ID is, so far DD/RM/Audio + * all agree to fill it with display Id. + */ + pEld->buffer[8] = displayId & 0xff; + pEld->buffer[9] = (displayId >> 8) & 0xff; + pEld->buffer[10] = (displayId >> 16) & 0xff; + pEld->buffer[11] = (displayId >> 24) & 0xff; + + /* offset 16 ~ 17: manufacturer name */ + pEld->buffer[16] = pParsedEdid->info.manuf_id & 0xff; + pEld->buffer[17] = pParsedEdid->info.manuf_id >> 8; + /* offset 18 ~ 19: product code */ + pEld->buffer[18] = pParsedEdid->info.product_id & 0xff; + pEld->buffer[19] = (pParsedEdid->info.product_id >> 8) & 0xff; + + /* + * offset 20 ~ 20 + MNL - 1: monitor name string (MNL - Monitor Name + * Length) + */ + + monitorNameLen = 0; + + status = NvTiming_GetProductName(&pParsedEdid->info, name, sizeof(name)); + + if (status == NVT_STATUS_SUCCESS) { + /* + * NvTiming_GetProductName() returns a nul-terminated string, but the + * string in the EDID is terminated with 0x0A and padded with 0x20. + * Put back these special characters. + */ + NvBool pastTerminator = FALSE; + NvU32 i; + + for (i = 0; i < NVT_EDID_LDD_PAYLOAD_SIZE; i++) { + if (pastTerminator) { + name[i] = 0x20; + } + if (name[i] == '\0') { + name[i] = 0x0A; + pastTerminator = TRUE; + } + } + + monitorNameLen = NVT_EDID_LDD_PAYLOAD_SIZE; + pEld->buffer[4] |= NVT_EDID_LDD_PAYLOAD_SIZE; + nvkms_memcpy(&pEld->buffer[20], name, + NVT_EDID_LDD_PAYLOAD_SIZE); + } + + /* offset 20 + MNL ~ 20 + MNL + (3 * SAD_Count) - 1 : CEA_SADs */ + if (SADCount) { + const size_t sadSize = SADCount * sizeof(NVT_3BYTES); + const size_t bufferSize = sizeof(pEld->buffer) - monitorNameLen - 20; + const size_t copySize = NV_MIN(bufferSize, sadSize); + nvAssert(copySize == sadSize); + + nvkms_memcpy(&pEld->buffer[20 + monitorNameLen], + &pExt861->audio[0], copySize); + } + + /* + * The reserved section is not used yet. + * offset 20 + MNL + (3 * SAD_Count) ~ 4 + Baseline_ELD_Len * 4 - 1; + */ + + /* Baseline block size in DWORD */ + i = (16 + monitorNameLen + SADCount * sizeof(NVT_3BYTES) + + sizeof(NvU32) - 1) / sizeof(NvU32); + pEld->buffer[2] = (NvU8)i; + + /* Update the entire ELD space */ + pEld->size = NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER; + + return TRUE; +} + +void nvHdmiDpConstructHeadAudioState(const NvU32 displayId, + const NVDpyEvoRec *pDpyEvo, + NVDispHeadAudioStateEvoRec *pAudioState) +{ + nvkms_memset(pAudioState, 0, sizeof(*pAudioState)); + + /* + * CRT and the DSI digital flat panel does not support audio. If (supported + * == FALSE) the nvHdmiDpEnableDisableAudio does nothing. + */ + if (pDpyEvo->pConnectorEvo->legacyType != + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP || + pDpyEvo->pConnectorEvo->signalFormat == + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + return; + } + + /* + * The DP/TMDS digital flat panels supports audio, but do not enable audio + * on the eDP and DVI displays. Some eDP panels goes blank when audio is + * enabled, and DVI monitors do not support audio. + * + * If (supported == TRUE) and (enabled == FALSE) then + * nvHdmiDpEnableDisableAudio() makes sure to keep audio disabled for + * a given head. + */ + pAudioState->supported = TRUE; + + if ((nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) && + pDpyEvo->internal) || + (!nvDpyIsHdmiEvo(pDpyEvo) && + !nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo))) { + return; + } + + if (FillELDBuffer(displayId, + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo), + &pDpyEvo->parsedEdid, + &pAudioState->eld, + &pAudioState->maxFreqSupported)) { + pAudioState->isAudioOverHdmi = nvDpyIsHdmiEvo(pDpyEvo); + pAudioState->enabled = TRUE; + } +} + +/* + * Returns audio device entry of connector, which should + * be attached to given head. Returns NONE if head is inactive. + * + * Each connector(SOR) supports four audio device entries, from 0 to 3, + * which can drive four independent audio streams. Any head can be attached to + * any audio device entry. + * + * Before audio-over-dp-mst support, by default the 0th device entry was + * used when a given head was driving a DP-SST/HDMI/DVI display. This + * function preserves that behavior. In the case of DP-MST, multiple heads + * are attached to a single connector. In that case this functions returns + * a device entry equal to the given head index. + */ +static NvU32 GetAudioDeviceEntry(const NVDispEvoRec *pDispEvo, const NvU32 head) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = + pHeadState->pConnectorEvo; + + if (pConnectorEvo == NULL) { + return NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_NONE; + } + + ct_assert(NV_MAX_AUDIO_DEVICE_ENTRIES == NVKMS_MAX_HEADS_PER_DISP); + + if (nvConnectorUsesDPLib(pConnectorEvo) && + (nvDPGetActiveLinkMode(pConnectorEvo->pDpLibConnector) == + NV_DP_LINK_MODE_MST)) { + return NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_0 + head; + } + + return NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_0; +} + +/*! + * Send EDID-Like-Data (ELD) to RM. + * + * ELD should be updated under the following situations: + * + * 1. Power on reset + * 2. Pre modeset + * 3. HotPlug / Post modeset + * + * Apart from ELD, also update the following control flags: + * + * isPD - Present Detect, indicates if the monitor is attached + * isELDV - indicates if the ELD is Valid + * + * The values of iSPD and isELDV should be: + * + * NV_ELD_POWER_ON_RESET : isPD = 0, isELDV = 0 + * NV_ELD_PRE_MODESET : isPD = 1, isELDV = 0 + * NV_ELD_POST_MODESET : isPD = 1, isELDV = 1 + * + * The initial ELD case of each audio device entry in hardware is unknown. + * Fortunately, NVConnectorEvoRec::audioDevEldCase[] is zero-initialized, + * which means each audioDevEldCase[] array element will have initial + * value NV_ELD_PRE_MODESET=0. + * + * That ensures that nvRemoveUnusedHdmiDpAudioDevice(), during + * the first modeset, will reset all unused audio device entries to + * NV_ELD_POWER_ON_RESET. + * + * \param[in] pDispEvo The disp of the displayId + * \param[in] displayId The display device whose ELD should be updated. + * This should be NVDispHeadStateEvoRec::activeRmId + * in case of NV_ELD_PRE_MODESET and + * NV_ELD_POST_MODESET, otherwise it should be + * NVConnectorEvoRec::displayId. + * \param[in] deviceEntry The device entry of connector. + * \param[in[ isDP The DisplayPort display device. + * \param[in] pParsedEdid The parsed edid from which ELD should be + * extracted. + * \param[in] eldCase The condition that requires updating the ELD. + */ + +static void RmSetELDAudioCaps( + const NVDispEvoRec *pDispEvo, + NVConnectorEvoRec *pConnectorEvo, + const NvU32 displayId, + const NvU32 deviceEntry, + const NvU32 maxFreqSupported, const NVEldEvoRec *pEld, + const NvEldCase eldCase) +{ + NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS params = { 0 }; + NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS audio_power_params = { 0 }; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvBool isPD, isELDV; + NvU32 ret; + + pConnectorEvo->audioDevEldCase[deviceEntry] = eldCase; + + /* setup the ctrl flag */ + switch(eldCase) { + case NV_ELD_POWER_ON_RESET : + isPD = isELDV = FALSE; + break; + case NV_ELD_PRE_MODESET : + isPD = TRUE; + isELDV = FALSE; + break; + case NV_ELD_POST_MODESET : + isPD = isELDV = TRUE; + break; + default : + return; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.deviceEntry = deviceEntry; + params.displayId = displayId; + + if (isELDV) { + if (pEld->size == 0) { + isPD = isELDV = FALSE; + } else { + ct_assert(sizeof(params.bufferELD) == sizeof(pEld->buffer)); + + nvkms_memcpy(params.bufferELD, pEld->buffer, sizeof(pEld->buffer)); + params.numELDSize = pEld->size; + + params.maxFreqSupported = maxFreqSupported; + } + } else { + params.numELDSize = 0; + } + + params.ctrl = + DRF_NUM(0073_CTRL, _DFP_ELD_AUDIO_CAPS, _CTRL_PD, isPD)| + DRF_NUM(0073_CTRL, _DFP_ELD_AUDIO_CAPS, _CTRL_ELDV, isELDV); + + /* + * ELD information won't be populated to GPU HDA controller driver if + * HDA controller is in suspended state. + * Issue NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER RM control call for + * bringing the HDA controller in active state before writing ELD. Once ELD + * data is written, then HDA controller can again go into suspended state. + */ + audio_power_params.bEnter = FALSE; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle, + NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER, + &audio_power_params, sizeof(audio_power_params)); + + if (ret != NVOS_STATUS_SUCCESS) + nvAssert(!"NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER failed"); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, "NvRmControl" + "(NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS) failed" + "return status = %d...", ret); + } + + audio_power_params.bEnter = TRUE; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle, + NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER, + &audio_power_params, sizeof(audio_power_params)); + + if (ret != NVOS_STATUS_SUCCESS) + nvAssert(!"NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER failed"); + +} + +void nvHdmiDpEnableDisableAudio(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + const NvU32 deviceEntry = GetAudioDeviceEntry(pDispEvo, head); + + /* + * We should only reach this function for active heads, and therefore + * pConnectorEvo and deviceEntry are valid. + */ + nvAssert((pHeadState->pConnectorEvo != NULL) && + (deviceEntry != NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_NONE)); + + if (!pHeadState->audio.supported) { + return; + } + + if (!pHeadState->audio.enabled) { + + if (enable) { + /* Make sure to remove corresponding audio device */ + RmSetELDAudioCaps(pDispEvo, + pConnectorEvo, + nvDpyIdToNvU32(pConnectorEvo->displayId), + deviceEntry, + 0 /* maxFreqSupported */, + NULL /* pEld */, + NV_ELD_POWER_ON_RESET); + } else { + /* Do nothing. The audio device is already in the disabled state. */ + } + + return; + } + + /* Invalidate ELD buffer before disabling audio */ + if (!enable) { + RmSetELDAudioCaps(pDispEvo, + pConnectorEvo, + pHeadState->activeRmId, + deviceEntry, + 0 /* maxFreqSupported */, + NULL /* pEld */, + NV_ELD_PRE_MODESET); + } + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + SetDpAudioEnable(pDispEvo, head, enable); + } + + if (pHeadState->audio.isAudioOverHdmi) { + EnableHdmiAudio(pDispEvo, head, enable); + } + + /* Populate ELD buffer after enabling audio */ + if (enable) { + RmSetELDAudioCaps(pDispEvo, + pConnectorEvo, + pHeadState->activeRmId, + deviceEntry, + pHeadState->audio.maxFreqSupported, + &pHeadState->audio.eld, + NV_ELD_POST_MODESET); + } +} + +/* + * Report HDMI capabilities to RM before modeset. + */ +void nvDpyUpdateHdmiPreModesetEvo(NVDpyEvoPtr pDpyEvo) +{ + if (!nvDpyIsHdmiEvo(pDpyEvo)) { + return; + } + + HdmiSendEnable(pDpyEvo, TRUE); +} + +/* + * Parse HDMI 2.1 VRR capabilities from the EDID and GPU. + */ +void nvDpyUpdateHdmiVRRCaps(NVDpyEvoPtr pDpyEvo) +{ +} + +void nvRemoveUnusedHdmiDpAudioDevice(const NVDispEvoRec *pDispEvo) +{ + NVConnectorEvoRec *pConnectorEvo; + const NvU32 activeSorMask = nvGetActiveSorMask(pDispEvo); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NvU32 deviceEntry; + + // Only connectors with assigned SORs can have audio. + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR || + pConnectorEvo->or.mask == 0x0) { + continue; + } + + // Check whether an active pConnectorEvo shares an SOR with this one. + // + // This is a workaround for the fact that + // NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS takes a displayId rather than + // an SOR index. See bug 1953489. + if (nvIsConnectorActiveEvo(pConnectorEvo) && + (pConnectorEvo->or.mask & activeSorMask) != 0x0) { + continue; + } + + for (deviceEntry = 0; + deviceEntry < NV_MAX_AUDIO_DEVICE_ENTRIES; + deviceEntry++) { + + /* + * Skip if the audio device is enabled (ELD case is set to + * NV_ELD_POST_MODESET by nvHdmiDpEnableDisableAudio()), or if the + * audio device is already disabled (ELD case is set to + * NV_ELD_POWER_ON_RESET). + */ + if ((pConnectorEvo->audioDevEldCase[deviceEntry] == + NV_ELD_POST_MODESET) || + (pConnectorEvo->audioDevEldCase[deviceEntry] == + NV_ELD_POWER_ON_RESET)) { + continue; + } + + RmSetELDAudioCaps(pDispEvo, + pConnectorEvo, + nvDpyIdToNvU32(pConnectorEvo->displayId), + deviceEntry, + 0 /* maxFreqSupported */, + NULL /* pEld */, + NV_ELD_POWER_ON_RESET); + } + } +} + +/* + * Find the name of the given audio format, as described in the + * CEA-861 specification's description of byte 1 in the Audio + * Descriptor Block. hasSampleSize and hasMaxBitRate (i.e., how to + * interpret byte 3 of the Audio Descriptor Block) are a function of + * audio format, so set them as a side effect of interpreting the + * audio format. + * + * Note the return value is a const char * and should not be freed. + */ +static const char *GetCea861AudioFormatInfo(NvU8 format, + NvBool *hasSampleSize, + NvBool *hasMaxBitRate) +{ + static const struct { + NvU8 format; + NvBool hasSampleSize : 1; + NvBool hasMaxBitRate : 1; + const char *name; + } audioFormatTable[] = { + { NVT_CEA861_AUDIO_FORMAT_LINEAR_PCM, TRUE, FALSE, "PCM" }, + { NVT_CEA861_AUDIO_FORMAT_AC3, FALSE, TRUE, "AC-3" }, + { NVT_CEA861_AUDIO_FORMAT_MPEG1, FALSE, TRUE, "MPEG-1" }, + { NVT_CEA861_AUDIO_FORMAT_MP3, FALSE, TRUE, "MP3" }, + { NVT_CEA861_AUDIO_FORMAT_MPEG2, FALSE, TRUE, "MPEG-2" }, + { NVT_CEA861_AUDIO_FORMAT_AAC, FALSE, TRUE, "AAC" }, + { NVT_CEA861_AUDIO_FORMAT_DTS, FALSE, TRUE, "DTS" }, + { NVT_CEA861_AUDIO_FORMAT_ATRAC, FALSE, TRUE, "ATRAC" }, + { NVT_CEA861_AUDIO_FORMAT_ONE_BIT, FALSE, FALSE, "DSD" }, + { NVT_CEA861_AUDIO_FORMAT_DDP, FALSE, FALSE, "E-AC-3" }, + { NVT_CEA861_AUDIO_FORMAT_DTS_HD, FALSE, FALSE, "DTS-HD" }, + { NVT_CEA861_AUDIO_FORMAT_MAT, FALSE, FALSE, "MLP" }, + { NVT_CEA861_AUDIO_FORMAT_DST, FALSE, FALSE, "DSP" }, + { NVT_CEA861_AUDIO_FORMAT_WMA_PRO, FALSE, FALSE, "WMA Pro" }, + }; + + int i; + + *hasSampleSize = FALSE; + *hasMaxBitRate = FALSE; + + for (i = 0; i < ARRAY_LEN(audioFormatTable); i++) { + if (format != audioFormatTable[i].format) { + continue; + } + + *hasSampleSize = audioFormatTable[i].hasSampleSize; + *hasMaxBitRate = audioFormatTable[i].hasMaxBitRate; + + return audioFormatTable[i].name; + } + + return ""; +} + + +/* + * Build a string description of the list of sample Rates, as + * described in the CEA-861 specification's description of byte 2 in + * the Audio Descriptor Block. + * + * Note the return value is a static char * and will be overwritten in + * subsequent calls to this function. + */ +static const char *GetCea861AudioSampleRateString(NvU8 sampleRates) +{ + static const struct { + NvU8 rate; + const char *name; + } sampleRateTable[] = { + { NVT_CEA861_AUDIO_SAMPLE_RATE_32KHZ, "32KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_44KHZ, "44KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_48KHZ, "48KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_88KHZ, "88KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_96KHZ, "96KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_176KHZ,"176KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_192KHZ,"192KHz" }, + }; + + static char sampleRateString[64]; + + NvBool first = TRUE; + int i; + char *s; + int ret, bytesLeft = sizeof(sampleRateString); + + sampleRateString[0] = '\0'; + s = sampleRateString; + + for (i = 0; i < ARRAY_LEN(sampleRateTable); i++) { + if (sampleRates & sampleRateTable[i].rate) { + if (first) { + first = FALSE; + } else { + ret = nvkms_snprintf(s, bytesLeft, ", "); + s += ret; + bytesLeft -= ret; + } + ret = nvkms_snprintf(s, bytesLeft, "%s", sampleRateTable[i].name); + s += ret; + bytesLeft -= ret; + } + } + + nvAssert(bytesLeft >= 0); + + return sampleRateString; +} + + +/* + * Build a string description of the list of sample sizes, as + * described in the CEA-861 specification's description of byte 3 in + * the Audio Descriptor Block. + * + * Note the return value is a static char * and will be overwritten in + * subsequent calls to this function. + */ +static const char *GetCea861AudioSampleSizeString(NvU8 sampleSizes) +{ + static const struct { + NvU8 bit; + const char *name; + } sampleSizeTable[] = { + { NVT_CEA861_AUDIO_SAMPLE_SIZE_16BIT, "16-bits" }, + { NVT_CEA861_AUDIO_SAMPLE_SIZE_20BIT, "20-bits" }, + { NVT_CEA861_AUDIO_SAMPLE_SIZE_24BIT, "24-bits" }, + }; + + static char sampleSizeString[64]; + + NvBool first = TRUE; + int i; + char *s; + int ret, bytesLeft = sizeof(sampleSizeString); + + sampleSizeString[0] = '\0'; + s = sampleSizeString; + + for (i = 0; i < ARRAY_LEN(sampleSizeTable); i++) { + if (sampleSizes & sampleSizeTable[i].bit) { + if (first) { + first = FALSE; + } else { + ret = nvkms_snprintf(s, bytesLeft, ", "); + s += ret; + bytesLeft -= ret; + } + ret = nvkms_snprintf(s, bytesLeft, "%s", sampleSizeTable[i].name); + s += ret; + bytesLeft -= ret; + } + } + + nvAssert(bytesLeft >= 0); + + return sampleSizeString; +} + + +/* + * Log the speaker allocation data block, as described in the CEA-861 + * specification. + */ +static void LogEdidCea861SpeakerAllocationData(NVEvoInfoStringPtr pInfoString, + NvU8 speaker) +{ + if ((speaker & NVT_CEA861_SPEAKER_ALLOC_MASK) == 0) { + return; + } + + nvEvoLogInfoString(pInfoString, + " Speaker Allocation Data :"); + + if (speaker & NVT_CEA861_SPEAKER_ALLOC_FL_FR) { + nvEvoLogInfoString(pInfoString, + " Front Left + Front Right"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_LFE) { + nvEvoLogInfoString(pInfoString, + " Low Frequency Effect"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_FC) { + nvEvoLogInfoString(pInfoString, + " Front Center"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_RL_RR) { + nvEvoLogInfoString(pInfoString, + " Rear Left + Rear Right"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_RC) { + nvEvoLogInfoString(pInfoString, + " Rear Center"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_FLC_FRC) { + nvEvoLogInfoString(pInfoString, + " Front Left Center + Front Right Center"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_RLC_RRC) { + nvEvoLogInfoString(pInfoString, + " Rear Left Center + Rear Right Center"); + } +} + + +static void LogEdidCea861Info(NVEvoInfoStringPtr pInfoString, + const NVT_EDID_CEA861_INFO *pExt861) +{ + int vsdbIndex; + int audioIndex; + + nvEvoLogInfoString(pInfoString, + " CEA-861 revision : %d\n", + pExt861->revision); + + /* + * IEEE vendor registration IDs are tracked here: + * http://standards.ieee.org/develop/regauth/oui/oui.txt + */ + for (vsdbIndex = 0; vsdbIndex < pExt861->total_vsdb; vsdbIndex++) { + const NvU32 ieeeId = pExt861->vsdb[vsdbIndex].ieee_id; + nvEvoLogInfoString(pInfoString, + " IEEE Vendor Registration ID: %02x-%02x-%02x", + (ieeeId >> 16) & 0xFF, + (ieeeId >> 8) & 0xFF, + ieeeId & 0xFF); + } + + nvEvoLogInfoString(pInfoString, + " Supports YCbCr 4:4:4 : %s", + (pExt861->basic_caps & NVT_CEA861_CAP_YCbCr_444) ? + "Yes" : "No"); + + nvEvoLogInfoString(pInfoString, + " Supports YCbCr 4:2:2 : %s", + (pExt861->basic_caps & NVT_CEA861_CAP_YCbCr_422) ? + "Yes" : "No"); + + nvEvoLogInfoString(pInfoString, + " Supports Basic Audio : %s", + (pExt861->basic_caps & NVT_CEA861_CAP_BASIC_AUDIO) ? + "Yes" : "No"); + + for (audioIndex = 0; audioIndex < ARRAY_LEN(pExt861->audio); audioIndex++) { + + NvU32 byte1, byte2, byte3; + NvU8 format; + NvU8 maxChannels; + NvU8 sampleRates; + const char *formatString; + NvBool hasSampleSize; + NvBool hasMaxBitRate; + + byte1 = pExt861->audio[audioIndex].byte1; + byte2 = pExt861->audio[audioIndex].byte2; + byte3 = pExt861->audio[audioIndex].byte3; + + if (byte1 == 0) { + break; + } + + nvEvoLogInfoString(pInfoString, + " Audio Descriptor : %d", audioIndex); + + /* + * byte 1 contains the Audio Format and the maximum number + * of channels + */ + + format = ((byte1 & NVT_CEA861_AUDIO_FORMAT_MASK) >> + NVT_CEA861_AUDIO_FORMAT_SHIFT); + + formatString = GetCea861AudioFormatInfo(format, + &hasSampleSize, + &hasMaxBitRate); + + maxChannels = (byte1 & NVT_CEA861_AUDIO_MAX_CHANNEL_MASK) + 1; + + /* byte 2 contains the sample rates */ + + sampleRates = (byte2 & NVT_CEA861_AUDIO_SAMPLE_RATE_MASK); + + /* + * byte 3 varies, depending on Audio Format; interpret + * using hasSampleSize and hasMaxBitRate + */ + + nvEvoLogInfoString(pInfoString, + " Audio Format : %s", formatString); + nvEvoLogInfoString(pInfoString, + " Maximum Channels : %d", maxChannels); + nvEvoLogInfoString(pInfoString, + " Sample Rates : %s", + GetCea861AudioSampleRateString(sampleRates)); + if (hasSampleSize) { + nvEvoLogInfoString(pInfoString, + " Sample Sizes : %s", + GetCea861AudioSampleSizeString(byte3)); + } + if (hasMaxBitRate) { + nvEvoLogInfoString(pInfoString, + " Maximum Bit Rate : %d kHz", + byte3 * 8); + } + } + + LogEdidCea861SpeakerAllocationData(pInfoString, pExt861->speaker[0].byte1); +} + +void nvLogEdidCea861InfoEvo(NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString) +{ + int extIndex; + + for (extIndex = 0; TRUE; extIndex++) { + const NVT_EDID_CEA861_INFO *pExt861 = + GetExt861(&pDpyEvo->parsedEdid, extIndex); + + if (pExt861 == NULL) { + break; + } + + if (pExt861->revision == NVT_CEA861_REV_NONE) { + continue; + } + + nvEvoLogInfoString(pInfoString, + "CEA-861 extension block # : %d\n", extIndex); + + LogEdidCea861Info(pInfoString, pExt861); + } +} + +/* + * HDMI 2.0 4K@60hz uncompressed RGB 4:4:4 (6G mode) is allowed if: + * + * - The GPU supports it. + * - The EDID and NVT_TIMING indicate the monitor supports it, or + * this check is overridden. + */ +NvBool nvHdmi204k60HzRGB444Allowed(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming) +{ + const NVParsedEdidEvoRec *pParsedEdid = &pDpyEvo->parsedEdid; + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + + const NvBool gpuSupports444 = pDevEvo->caps.supportsHDMI20; + + const NvBool overrideMonitorCheck = ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_HDMI2_CHECK) != 0); + + const NvBool monitorSupports444 = + (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.rgb444.bpcs) && + (pParsedEdid->info.hdmiForumInfo.max_TMDS_char_rate > 0)); + + nvAssert(pParsedEdid->valid); + + return (gpuSupports444 && + (overrideMonitorCheck || monitorSupports444)); +} + +/* + * Enable or disable HDMI 2.1 VRR infoframes. The HDMI 2.1 VRR infoframe must + * be enabled before the first extended vblank after enabling VRR, or the + * display will blank. + */ +void nvHdmiSetVRR(NVDispEvoPtr pDispEvo, NvU32 head, NvBool enable) +{ + NVT_EXTENDED_METADATA_PACKET_INFOFRAME empInfoFrame; + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL empCtrl; + NVHDMIPKT_TC transmitControl; + NVT_STATUS status; + + nvkms_memset(&empCtrl, NVT_INFOFRAME_CTRL_DONTCARE, + sizeof(empCtrl)); + + empCtrl.EnableVRR = enable; + + status = NvTiming_ConstructExtendedMetadataPacketInfoframe(&empCtrl, + &empInfoFrame); + + if (status != NVT_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Error in constructing Extended Metadata Packet InfoFrame"); + return; + } + + transmitControl = + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _DIS); + + // Transmit the enable packet every frame, but only transmit the + // disable packet once. + if (enable) { + transmitControl |= DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _DIS); + } else { + transmitControl |= DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _EN); + } + + SendInfoFrame(pDispEvo, + head, + transmitControl, + (NVT_INFOFRAME_HEADER *) &empInfoFrame, + sizeof(empInfoFrame)); +} + +/* + * The HDMI library calls this function during initialization to ask the + * implementation to allocate and map a NV*71_DISP_SF_USER object. The + * appropriate class, mapping size, and subdevice ID are provided. A handle is + * generated here and passed back to the library; the same handle is provided + * in the symmetric HdmiLibRmFreeMemoryMap() function so we don't have to save + * a copy of it in nvkms's data structures. + */ +static NvBool HdmiLibRmGetMemoryMap( + NvHdmiPkt_CBHandle handle, + NvU32 dispSfUserClassId, + NvU32 dispSfUserSize, + NvU32 sd, + NvU32 *pDispSfHandle, + void **pPtr) +{ + NVDevEvoRec *pDevEvo = handle; + void *ptr = NULL; + NvU32 ret; + NvU32 dispSfHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (dispSfHandle == 0) { + return FALSE; + } + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle, + dispSfUserClassId, + NULL); + + if (ret != NVOS_STATUS_SUCCESS) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + dispSfHandle); + return FALSE; + } + + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle, + 0, + dispSfUserSize, + &ptr, + 0); + if (ret != NVOS_STATUS_SUCCESS) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + dispSfHandle); + return FALSE; + } + + *pDispSfHandle = dispSfHandle; + *pPtr = ptr; + + return TRUE; +} + +static void HdmiLibRmFreeMemoryMap( + NvHdmiPkt_CBHandle handle, + NvU32 sd, + NvU32 dispSfHandle, + void *ptr) +{ + NVDevEvoRec *pDevEvo = handle; + NvU32 ret; + + if (ptr != NULL) { + nvAssert(dispSfHandle != 0); + ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle, + ptr, + 0); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(ret == NVOS_STATUS_SUCCESS); + } + } + + if (dispSfHandle) { + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(ret == NVOS_STATUS_SUCCESS); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + dispSfHandle); + } +} + +/* Wrapper around RmControl for 0073 (NV04_DISPLAY_COMMON) object. */ +static NvBool HdmiLibRmDispControl( + NvHdmiPkt_CBHandle handle, + NvU32 subDevice, + NvU32 cmd, + void *pParams, + NvU32 paramSize) +{ + NVDevEvoRec *pDevEvo = handle; + NvU32 ret; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + cmd, + pParams, + paramSize); + + return ret == NVOS_STATUS_SUCCESS; +} + +static void HdmiLibAcquireMutex( + NvHdmiPkt_CBHandle handle) +{ + /* The HDMI library only executes when nvkms calls it, and nvkms will only + * call it while holding the nvkms lock. So there is no concurrency to + * protect against with this mutex. */ +} + +static void HdmiLibReleaseMutex( + NvHdmiPkt_CBHandle handle) +{ +} + +static void *HdmiLibMalloc(NvHdmiPkt_CBHandle handle, NvLength len) +{ + return nvAlloc(len); +} + +static void HdmiLibFree(NvHdmiPkt_CBHandle handle, void *p) +{ + nvFree(p); +} + +static void HdmiLibPrint( + NvHdmiPkt_CBHandle handle, + const char *format, ...) +{ + NVDevEvoRec *pDevEvo = handle; + + va_list ap; + va_start(ap, format); + /* The HDMI library doesn't have log levels, but currently only logs in + * debug builds. It's pretty chatty (e.g., it prints "Initialize Success" + * when it inits), so hardcode it to INFO level for now. */ + nvVEvoLog(EVO_LOG_INFO, pDevEvo->gpuLogIndex, format, ap); + va_end(ap); +} + +static void HdmiLibAssert( + NvHdmiPkt_CBHandle handle, + NvBool expr) +{ + /* + * This interface isn't the best... I hope you have a kernel debugger if + * this fires, because the file and line number will always be this one. + */ + nvAssert(expr); +} + +static const NVHDMIPKT_CALLBACK HdmiLibCallbacks = +{ + .rmGetMemoryMap = HdmiLibRmGetMemoryMap, + .rmFreeMemoryMap = HdmiLibRmFreeMemoryMap, + .rmDispControl2 = HdmiLibRmDispControl, + .acquireMutex = HdmiLibAcquireMutex, + .releaseMutex = HdmiLibReleaseMutex, + .setTimeout = NULL, /* optional */ + .checkTimeout = NULL, /* optional */ + .malloc = HdmiLibMalloc, + .free = HdmiLibFree, + .print = HdmiLibPrint, + .assert = HdmiLibAssert, +}; + +void nvTeardownHdmiLibrary(NVDevEvoRec *pDevEvo) +{ + NvHdmiPkt_DestroyLibrary(pDevEvo->hdmiLib.handle); +} + +NvBool nvInitHdmiLibrary(NVDevEvoRec *pDevEvo) +{ + pDevEvo->hdmiLib.handle = + NvHdmiPkt_InitializeLibrary(pDevEvo->dispClass, + pDevEvo->numSubDevices, + pDevEvo, // callback handle + &HdmiLibCallbacks, + 0, // not used because we set + NULL); // NVHDMIPKT_RM_CALLS_INTERNAL=0 + + if (pDevEvo->hdmiLib.handle == NVHDMIPKT_INVALID_HANDLE) { + pDevEvo->hdmiLib.handle = NULL; + return FALSE; + } + + return TRUE; +} + +/* + * Call the HDMI library to "assess" the link. This basically does link + * training to see what the maximum lane configuration is. We do this when the + * monitor is connected after reading the EDID, so we can validate modes + * against the link capabilities. + * + * Returns true if the link was assessed to be capable of FRL, and false + * otherwise. + */ +NvBool nvHdmiFrlAssessLink(NVDpyEvoPtr pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVHDMIPKT_RESULT ret; + const NvU32 displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + + /* HDMI dpys not dynamic dpy so its connector should have a dpyId. */ + nvAssert(displayId != 0); + nvAssert(pDpyEvo->parsedEdid.valid); + + ret = NvHdmi_AssessLinkCapabilities(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + displayId, + &pDpyEvo->parsedEdid.info, + &pDpyEvo->hdmi.srcCaps, + &pDpyEvo->hdmi.sinkCaps); + if (ret != NVHDMIPKT_SUCCESS) { + nvAssert(ret == NVHDMIPKT_SUCCESS); + return FALSE; + } + + return pDpyEvo->hdmi.sinkCaps.linkMaxFRLRate != HDMI_FRL_DATA_RATE_NONE; +} + +/* Determine if HDMI FRL is needed to drive the given timings on the given dpy. */ +static NvBool TimingsNeedFRL(const NVDpyEvoRec *pDpyEvo, + const NVHwModeTimingsEvo *pTimings) +{ + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + + /* Can't use FRL if the display hardware doesn't support it */ + if (!pDevEvo->hal->caps.supportsHDMIFRL) { + return FALSE; + } + + /* Can only use FRL for HDMI devices. */ + if (!nvDpyIsHdmiEvo(pDpyEvo)) { + return FALSE; + } + + /* Can only use FRL if the HDMI sink supports it. */ + if (!pDpyEvo->parsedEdid.valid || + !pDpyEvo->parsedEdid.info.hdmiForumInfo.max_FRL_Rate) { + return FALSE; + } + + /* + * For HDMI, maxSingleLinkPixelClockKHz is the maximum non-FRL rate. + * If the rate is higher than that, try to use FRL for the mode. + */ + return pTimings->pixelClock > pDpyEvo->maxSingleLinkPixelClockKHz; +} + +NvBool nvHdmiFrlQueryConfig( + const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + NVHwModeTimingsEvo *pHwTimings, + const struct NvKmsModeValidationParams *pValidationParams) +{ + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + HDMI_VIDEO_TRANSPORT_INFO videoTransportInfo = { }; + HDMI_QUERY_FRL_CLIENT_CONTROL clientControl = { }; + const NVT_TIMING *pNvtTiming; + NVT_TIMING nvtTiming = { }; + NVHDMIPKT_RESULT ret; + + if (!TimingsNeedFRL(pDpyEvo, pHwTimings)) { + return TRUE; + } + + /* See if we can find an NVT_TIMING for this mode from the EDID. */ + pNvtTiming = nvFindEdidNVT_TIMING(pDpyEvo, pModeTimings, pValidationParams); + + if (pNvtTiming == NULL) { + /* + * No luck finding this mode in the EDID. + * + * Construct enough of an NVT_TIMING for the hdmi library, based on the + * pHwTimings mode. + * + * The HDMI library's hdmiQueryFRLConfigC671 uses: + * - pVidTransInfo->pTiming->pclk + * - pVidTransInfo->pTiming->HTotal + * - pVidTransInfo->pTiming->HVisible + * - pVidTransInfo->pTiming->VVisible + * + * This is also used, although we don't have a CEA format so we just + * set it to 0: + * - NVT_GET_CEA_FORMAT(pVidTransInfo->pTiming->etc.status) + */ + + /* Convert from KHz to 10KHz; round up for the purposes of determining a + * minimum FRL rate. */ + nvtTiming.pclk = (pHwTimings->pixelClock + 9) / 10; + nvtTiming.HVisible = pHwTimings->rasterBlankStart.x - + pHwTimings->rasterBlankEnd.x; + nvtTiming.HTotal = pHwTimings->rasterSize.x; + nvtTiming.VVisible = pHwTimings->rasterBlankStart.y - + pHwTimings->rasterBlankEnd.y; + nvtTiming.etc.status = 0; + + pNvtTiming = &nvtTiming; + } + + videoTransportInfo.pTiming = pNvtTiming; + /* + * pTimings->pixelDepth isn't assigned yet at this point in mode + * validation, so we can't use that. + * This matches the non-DP default assigned later in + * nvConstructHwModeTimingsEvo(). + * + * TODO: we should select a higher depth by default and downgrade if not + * possible. + */ + videoTransportInfo.bpc = HDMI_BPC8; + /* TODO: support YUV/YCbCr 444 and 422 packing modes. */ + switch (pModeTimings->yuv420Mode) { + case NV_YUV420_MODE_NONE: + videoTransportInfo.packing = HDMI_PIXEL_PACKING_RGB; + break; + case NV_YUV420_MODE_SW: + /* + * Don't bother implementing this with FRL. + * HDMI FRL and HW YUV420 support were both added in nvdisplay 4.0 + * hardware, so if the hardware supports FRL it should support + * YUV420_MODE_HW. + */ + return FALSE; + case NV_YUV420_MODE_HW: + videoTransportInfo.packing = HDMI_PIXEL_PACKING_YCbCr420; + break; + } + /* TODO: implement 2head1or+FRL */ + videoTransportInfo.bDualHeadMode = FALSE; + + clientControl.option = HDMI_QUERY_FRL_HIGHEST_PIXEL_QUALITY; + + if (pValidationParams->forceDsc) { + clientControl.enableDSC = TRUE; + } + if (pValidationParams->dscOverrideBitsPerPixelX16 != 0) { + clientControl.forceBppx16 = TRUE; + clientControl.bitsPerPixelX16 = + pValidationParams->dscOverrideBitsPerPixelX16; + } + + ret = NvHdmi_QueryFRLConfig(pDevEvo->hdmiLib.handle, + &videoTransportInfo, + &clientControl, + &pDpyEvo->hdmi.srcCaps, + &pDpyEvo->hdmi.sinkCaps, + &pHwTimings->hdmiFrlConfig); + + return ret == NVHDMIPKT_SUCCESS; +} + +void nvHdmiFrlClearConfig(NVDispEvoRec *pDispEvo, NvU32 activeRmId) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + NVHDMIPKT_RESULT ret; + ret = NvHdmi_ClearFRLConfig(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, activeRmId); + if (ret != NVHDMIPKT_SUCCESS) { + nvAssert(ret == NVHDMIPKT_SUCCESS); + } +} + +void nvHdmiFrlSetConfig(NVDispEvoRec *pDispEvo, NvU32 head) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + HDMI_FRL_CONFIG *pFrlConfig = &pHeadState->timings.hdmiFrlConfig; + NVHDMIPKT_RESULT ret; + NvU32 retries = 0; + const NvU32 MAX_RETRIES = 5; + + if (pFrlConfig->frlRate == HDMI_FRL_DATA_RATE_NONE) { + return; + } + + nvAssert(pHeadState->activeRmId != 0); + + do { + ret = NvHdmi_SetFRLConfig(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + pHeadState->activeRmId, + NV_FALSE /* bFakeLt */, + pFrlConfig); + } while (ret != NVHDMIPKT_SUCCESS && retries++ < MAX_RETRIES); + + if (ret != NVHDMIPKT_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "HDMI FRL link training failed."); + + /* + * Link training failed even after retrying. Since link training + * happens after we've already committed to a modeset and failing is + * not an option, try one last time with the 'bFakeLt' parameter + * set, which should enable enough of the display hardware to + * prevent hangs when we attempt to drive the OR with + * PROTOCOL_HDMI_FRL. + */ + ret = NvHdmi_SetFRLConfig(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + pHeadState->activeRmId, + NV_TRUE /* bFakeLt */, + pFrlConfig); + + if (ret != NVHDMIPKT_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "HDMI FRL fallback link training failed."); + } + } + + if (retries != 0) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_WARN, + "HDMI FRL link training retried %d times.", + retries); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hw-states.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hw-states.c new file mode 100644 index 0000000..184d7ce --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hw-states.c @@ -0,0 +1,1125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * nvkms-hw-states.c - Defines how to set up EVO hardware for the given usage. + * Used by the EVO state machines in nv_evo_states.c. + */ + +#include "nvkms-types.h" +#include "nvkms-framelock.h" +#include "nvkms-evo-states.h" + +/* + * Listed below are the different locking topologies for scan lock + * + * ------ (raster lock) + * ====== (frame lock) + * + * Config NoLock: No locking relationship between the heads + * + * +--------+ +------------+ + * | Head A | | Heads B... | + * +--------+ +------------+ + * + * + * Config LockHeads: Supports raster lock across two or more heads. + * + * +--------+ +--------+ + * | Head A | -- Internal -+--> | Head B | + * +--------+ | +--------+ + * | + * | +--------+ + * +--> | Head C | + * | +--------+ + * . ... + * + * Config FrameLockClient: Supports frame lock clients across GPUs/systems, + * one head per GPU + * + * +--------+ +-------------+ + * | Gsync | ==============> | Head A/B... | + * +--------+ +-------------+ + * ^ | + * +-------- External ----------+ + * + * + * Config FrameLockServer: Same as above, but generates timing for the + * frame lock network + * + * +--------+ +-------------+ + * | Gsync | | Head A/B... | + * +--------+ +-------------+ + * ^ | + * +-------------- External ---+ + * + * + * Config FrameLockClientManyHeads: Support frame lock across GPUs/systems, + * two or more heads per GPU + * + * +-------- External ---------------------------+ + * V | + * +--------+ +--------+ +--------+ + * | Gsync | =====> | Head A | == Internal =+==> | Head B | + * +--------+ +--------+ | +--------+ + * | + * | +--------+ + * +==> | Head C | + * | +--------+ + * . ... + * + * Config FrameLockServerManyHeads: Same as above, only this head + * is driving timing for the frame lock network. + * + * +-------- External ---------------------------+ + * V | + * +--------+ +--------+ +--------+ + * | Gsync | | Head A | == Internal =+==> | Head B | + * +--------+ +--------+ | +--------+ + * | + * | +--------+ + * +==> | Head C | + * | +--------+ + * . ... + * + * Config LockHeadsFrameLockClient: Frame lock enabled on one head of a + * GPU where two or more heads are raster-locked. + * Config LockHeadsFrameLockClientManyHeads: Same, but two or more heads are + * enabled. + * + * +-------- External ---------------------------+ + * V | + * +--------+ +--------+ +--------+ + * | Gsync | =====> | Head A | -- Internal -+--> | Head B | + * +--------+ +--------+ | +--------+ + * | + * | +--------+ + * +--> | Head C | + * | +--------+ + * . ... + * + * Config LockHeadsFrameLockServer: Frame lock enabled on one head of a GPU + * where two or more heads are raster-locked, and this head is driving timing + * for the frame lock network. + * Config LockHeadsFrameLockServerManyHeads: Same, but one head is frame + * lock server and the others are frame lock clients. + * + * +-------- External ---------------------------+ + * V | + * +--------+ +--------+ +--------+ + * | Gsync | | Head A | -- Internal -+--> | Head B | + * +--------+ +--------+ | +--------+ + * | + * | +--------+ + * +--> | Head C | + * | +--------+ + * . ... + * + * Configs SliPrimary, SliSecondary, SliLastSecondary: Supports SLI. + * + * +-----------------+ + * +--- | Head A, subdev0 | + * | +-----------------+ + * External + * | +-----------------+ + * +--> | Head A, subdev1 | + * | +-----------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Config LockHeadsSli{Primary,Secondary,LastSecondary}: Supports SLI with two + * or more heads rasterlocked (primary or any secondary, independently). + * + * +-----------------+ +---------------------+ + * +--- | Head A, subdev0 | -- Internal --> | Heads B..., subdev0 | + * | +-----------------+ +---------------------+ + * External + * | +-----------------+ +---------------------+ + * +--> | Head A, subdev1 | -- Internal --> | Heads B..., subdev1 | + * | +-----------------+ +---------------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * The SliSecondary states also come in a FrameLockClient variant; this means that + * they have framelock enabled in the RM (for reporting purposes; they still + * get their sync from the SLI primary). + * + * + * Config SliPrimaryFrameLockClient: Supports frame lock across GPU + * groups/systems with SLI + * + * + * +===============================+ + * I V + * +-------+ +-----------------+ + * | Gsync | <-----------+--- | Head A, subdev0 | + * +-------+ | +-----------------+ + * External + * | +-----------------+ + * +--> | Head A, subdev1 | + * | +-----------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Config SliPrimaryFrameLockServer: Same as above, only this SLI head drives + * timing for the frame lock network. + * + * +-------+ +-----------------+ + * | Gsync | <-----------+--- | Head A, subdev0 | + * +-------+ | +-----------------+ + * External + * | +-----------------+ + * +--> | Head A, subdev1 | + * | +-----------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Config SliPrimaryLockHeadsFrameLockClient: Supports frame lock across GPU + * groups/systems with SLI, with two or more heads on a GPU rasterlocked + * together. + * + * + * +======================+ + * I V + * +-------+ +-----------------+ +---------------------+ + * | Gsync | <--+--- | Head A, subdev0 | -- Internal --> | Heads B..., subdev0 | + * +-------+ | +-----------------+ +---------------------+ + * External + * | +-----------------+ +---------------------+ + * +--> | Head A, subdev1 | -- Internal --> | Heads B..., subdev1 | + * | +-----------------+ +---------------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Config SliPrimaryLockHeadsFrameLockServer: Same as above, only this SLI head + * drives timing for the frame lock network. + * + * +-------+ +-----------------+ +---------------------+ + * | Gsync | <--+--- | Head A, subdev0 | -- Internal --> | Heads B..., subdev0 | + * +-------+ | +-----------------+ +---------------------+ + * External + * | +-----------------+ +---------------------+ + * +--> | Head A, subdev1 | -- Internal --> | Heads B..., subdev1 | + * | +-----------------+ +---------------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Note that for the SLI and framelock topologies we set the external fliplock + * pin. Changing the pin causes a raster reset for some reason, so we want to + * change the pin here, prior to enabling flip lock. + */ + +NvBool nvEvoLockHWStateNoLock(NVDispEvoPtr pDispEvo, NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i; + + nvAssert(pHeads != NULL && pHeads[0] != NV_INVALID_HEAD); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* Disable scan lock on this head */ + pHC->serverLock = NV_EVO_NO_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + pHC->clientLock = NV_EVO_NO_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + pHC->clientLockoutWindow = 0; + + /* Reset the flip lock pin to internal, if not needed for SLI */ + if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockPinSetForSliHeadMask, head)) { + pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + } + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_UNSET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + + /* Disable framelock */ + pEvoSubDev->frameLockServerMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockExtRefClkMaskAssy &= ~(1 << head); + + /* Reset SLI state */ + pEvoSubDev->sliRasterLockServerMask &= ~(1 << head); + pEvoSubDev->sliRasterLockClientMask &= ~(1 << head); + + pHC->lockChainPosition = 0; + } + + pEvoSubDev->frameLockHouseSync = FALSE; + + return TRUE; +} + +NvBool nvEvoLockHWStateLockHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i, serverHead = 0; + + nvAssert(pHeads != NULL && + pHeads[0] != NV_INVALID_HEAD && + pHeads[1] != NV_INVALID_HEAD); + + /* First, disable all scan locking */ + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* Make the first head a raster lock server on the internal pin */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(head); + serverHead = head; + } else { + /* Make all the other heads raster lock clients on the internal pin */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(serverHead); + pHC->clientLockoutWindow = 2; + } + } + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) + +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Set up for the FRAME_LOCK_SERVER state */ + if (!nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + nvAssert(pHeads != NULL && pHeads[0] != NV_INVALID_HEAD); + + /* Additionally enable the first head as a frame lock client */ + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + + pEvoSubDev->frameLockServerMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockServer(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_RASTER_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* disable all scan locking */ + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + nvAssert(pHeads != NULL && pHeads[0] != NV_INVALID_HEAD); + + /* Enable the first head as a raster lock server on the external pin */ + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + + /* Set up the first head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockServerHouseSync(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockClientManyHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Set up as a frame lock server with two heads */ + if (!nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + /* Additionally enable the first head as a frame lock client */ + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + + pEvoSubDev->frameLockServerMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockServerManyHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_RASTER_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i, serverHead = 0; + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Disable all scan lock */ + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* Make the first head a frame lock server on the internal pin. + * The first head is guaranteed to be framelock server or one of + * the requested framelock clients here + */ + nvAssert(nvIsFramelockableHead(pDispEvo, head)); + + pHC->serverLock = NV_EVO_FRAME_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(head); + serverHead = head; + + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + } else { + /* If two or more heads are framelocked, but at least one head + * cannot be framelocked with the others, that head will be in + * the list of pDpys, but must not be framelocked, so skip it. + */ + + if (!nvIsFramelockableHead(pDispEvo, head)) { + continue; + } + if (i == 1) { + /* Make the second head a raster lock server on the external pin */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + } + /* Make all nonzero heads a frame lock client on the internal pin */ + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(serverHead); + + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + } + + /* Set up all heads to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + } + + return TRUE; +} +NvBool nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateLockHeadsFrameLockServer(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_RASTER_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i, serverHead = 0; + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Disable all scan lock */ + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* Make the first head a raster lock server on the internal pin */ + if (i == 0) { + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(head); + serverHead = head; + + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + } else { + if (i == 1) { + /* Make the second head a raster lock server on the external pin */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + } + + /* Make all nonzero heads raster lock clients on the internal pin */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(serverHead); + pHC->clientLockoutWindow = 2; + + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + } + + /* Set up all heads to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + } + + return TRUE; +} + +NvBool nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateLockHeadsFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Set up for the LOCK_HEADS_FRAME_LOCK_SERVER state */ + if (!nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + /* Additionally, enable the first head as a frame lock client */ + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + + pEvoSubDev->frameLockServerMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + + return TRUE; +} + +static void SetLockChainPosition(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoHeadControlPtr pHC) +{ + if (pDispEvo->displayOwner == pEvoSubDev->subDeviceInstance) { + /* + * When we own display (even if subDeviceInstance != 0), set + * lockChainPosition of 0, since we are actually scanning out pixels + * (this is the case for all SLI Mosaic and non-Mosaic display owners). + */ + pHC->lockChainPosition = 0; + } else { + /* + * If we don't own display, just assume the video bridge chain is + * linear + */ + pHC->lockChainPosition = pEvoSubDev->subDeviceInstance; + } +} + +NvBool nvEvoLockHWStateSliPrimary(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = pEvoSubDev->sliServerLockPin; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + + pEvoSubDev->sliRasterLockServerMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryLockHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = pEvoSubDev->sliServerLockPin; + unsigned int i; + + if (pin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* LockHeads sets up server lock on the first head, client lock on the rest */ + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + + nvAssert(pHeads != NULL && + pHeads[0] != NV_INVALID_HEAD && + pHeads[1] != NV_INVALID_HEAD); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* + * The first head is configured as rasterlock server on its + * internal pin. It serves as the server for everything else on + * this GPU, as well as (indirectly though another head) everything + * in the SLI group. + */ + pEvoSubDev->sliRasterLockServerMask |= 1 << head; + } else { + if (i == 1) { + /* + * The first rasterlock client on this GPU also serves as server + * for the rest of the SLI device + */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + } + + /* All of these heads should inherit extrefclk from the server */ + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + } + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliSecondary(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin serverPin = pEvoSubDev->sliServerLockPin; + NVEvoLockPin clientPin = pEvoSubDev->sliClientLockPin; + NvU32 clientLockoutWindow = pEvoSubDev->forceZeroClientLockoutWindow ? 0 : 2; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (clientPin == NV_EVO_LOCK_PIN_ERROR || + serverPin == NV_EVO_LOCK_PIN_ERROR || + flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* Server lock to be consumed by GPUs further down the chain */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = serverPin; + + /* Client lock to sync to GPUs further up the chain */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = clientPin; + pHC->clientLockoutWindow = clientLockoutWindow; + + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + + return TRUE; +} + +NvBool nvEvoLockHWStateSliSecondaryFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + if (!nvEvoLockHWStateSliSecondary(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + pEvoSubDev->frameLockClientMaskAssy |= 1 << pHeads[0]; + + return TRUE; +} + +NvBool nvEvoLockHWStateSliLastSecondary(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin clientPin = pEvoSubDev->sliClientLockPin; + NvU32 clientLockoutWindow = pEvoSubDev->forceZeroClientLockoutWindow ? 0 : 2; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (clientPin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* Only set up client lock; no more GPUs to consume server lock */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = clientPin; + pHC->clientLockoutWindow = clientLockoutWindow; + + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + + return TRUE; +} + +NvBool nvEvoLockHWStateSliLastSecondaryFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + if (!nvEvoLockHWStateSliLastSecondary(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + const int head = pHeads[0]; + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + + return TRUE; +} + +NvBool nvEvoLockHWStateSliSecondaryLockHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin serverPin = pEvoSubDev->sliServerLockPin; + NVEvoLockPin clientPin = pEvoSubDev->sliClientLockPin; + NvU32 clientLockoutWindow = pEvoSubDev->forceZeroClientLockoutWindow ? 0 : 2; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + if (clientPin == NV_EVO_LOCK_PIN_ERROR || serverPin == NV_EVO_LOCK_PIN_ERROR || + flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* + * first head (chosen arbitrarily): server lock to be consumed by + * GPUs further down the chain + */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = serverPin; + } + + /* + * Client lock all heads to the external SLI pin. Note that we cannot + * client lock one head and set up internal locking for the other + * because of bug 405996. + */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = clientPin; + pHC->clientLockoutWindow = clientLockoutWindow; + + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i; + + if (!nvEvoLockHWStateSliSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + pEvoSubDev->frameLockClientMaskAssy |= 1 << pHeads[i]; + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliLastSecondaryLockHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin clientPin = pEvoSubDev->sliClientLockPin; + NvU32 clientLockoutWindow = pEvoSubDev->forceZeroClientLockoutWindow ? 0 : 2; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + if (clientPin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* + * Client lock all heads to the external SLI pin. Note that we cannot + * client lock one head and set up internal locking for the other + * because of bug 405996. + */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = clientPin; + pHC->clientLockoutWindow = clientLockoutWindow; + + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i; + + if (!nvEvoLockHWStateSliLastSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + pEvoSubDev->frameLockClientMaskAssy |= 1 << pHeads[i]; + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryFrameLockServer(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (flPin == NV_EVO_LOCK_PIN_ERROR || + !nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + } else { + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + } + + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + /* Set up this head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + if (!nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* Enable first head as framelock client */ + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + } + + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + /* Set up this head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + if (flPin == NV_EVO_LOCK_PIN_ERROR || + !nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + } else { + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + } + + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + /* Set up this head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + if (!nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* Enable first head as framelock client */ + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + } + + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + /* Set up this head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + } + + return TRUE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-lut.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-lut.c new file mode 100644 index 0000000..3e623a6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-lut.c @@ -0,0 +1,391 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-lut.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-dma.h" +#include "nvkms-utils.h" +#include "nvos.h" + +#include /* NV01_MEMORY_LOCAL_USER */ + +static void FreeLutSurfaceEvoInVidmem(NVLutSurfaceEvoPtr pSurfEvo) +{ + NVDevEvoPtr pDevEvo; + + if (pSurfEvo == NULL) { + return; + } + + pDevEvo = pSurfEvo->pDevEvo; + + nvRmEvoUnMapVideoMemory(pDevEvo, pSurfEvo->handle, + pSurfEvo->subDeviceAddress); + + /* Free display context dmas for the surface, if any */ + nvRmEvoFreeDispContextDMA(pDevEvo, &pSurfEvo->dispCtxDma); + + /* Free the surface */ + if (pSurfEvo->handle) { + NvU32 result; + + result = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, pSurfEvo->handle); + if (result != NVOS_STATUS_SUCCESS) { + nvAssert(!"Freeing LUT surface failed"); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pSurfEvo->handle); + pSurfEvo->handle = 0; + } + + nvFree(pSurfEvo); +} + +static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInVidmem(NVDevEvoPtr pDevEvo) +{ + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + NvU32 ret = NVOS_STATUS_ERROR_GENERIC; + NvU32 attr = 0, attr2 = 0; + NvU32 allocFlags = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN | + NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; + NvU64 size = 0, alignment = 4096; + + NVLutSurfaceEvoPtr pSurfEvo; + + pSurfEvo = nvCalloc(1, sizeof(*pSurfEvo)); + if (pSurfEvo == NULL) { + return NULL; + } + + pSurfEvo->pDevEvo = pDevEvo; + + size = (sizeof(NVEvoLutDataRec) + 63) & ~63; + + pSurfEvo->size = size; + + pSurfEvo->handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (pSurfEvo->handle == 0) { + goto fail; + } + + attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, attr); + attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _DEFAULT, attr2); + + alignment = NV_MAX(alignment, NV_EVO_SURFACE_ALIGNMENT); + if (alignment != 0) { + allocFlags |= NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; + } + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.size = size; + memAllocParams.attr = attr; + memAllocParams.attr2 = attr2; + memAllocParams.flags = allocFlags; + memAllocParams.alignment = alignment; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSurfEvo->handle, + NV01_MEMORY_LOCAL_USER, + &memAllocParams); + + /* If we failed the allocation above, abort */ + if (ret != NVOS_STATUS_SUCCESS) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSurfEvo->handle); + pSurfEvo->handle = 0; + + goto fail; + } + + /* Allocate a display context dma */ + pSurfEvo->dispCtxDma = + nvRmEvoAllocateAndBindDispContextDMA(pDevEvo, + pSurfEvo->handle, + NvKmsSurfaceMemoryLayoutPitch, + pSurfEvo->size - 1); + + if (!pSurfEvo->dispCtxDma) { + goto fail; + } + + /* Map the surface for the CPU */ + if (!nvRmEvoMapVideoMemory(pSurfEvo->pDevEvo, + pSurfEvo->handle, pSurfEvo->size, + pSurfEvo->subDeviceAddress, + SUBDEVICE_MASK_ALL)) { + goto fail; + } + + return pSurfEvo; + + fail: + /* An error occurred -- free the surface */ + FreeLutSurfaceEvoInVidmem(pSurfEvo); + + return NULL; + +} + +static void FreeLutSurfaceEvoInSysmem(NVLutSurfaceEvoPtr pSurfEvo) +{ + NVDevEvoPtr pDevEvo; + + if (pSurfEvo == NULL) { + return; + } + + pDevEvo = pSurfEvo->pDevEvo; + + /* Free display context dmas for the surface, if any */ + nvRmEvoFreeDispContextDMA(pDevEvo, &pSurfEvo->dispCtxDma); + + /* Free the surface */ + if (pSurfEvo->handle) { + NvU32 result; + + if (pSurfEvo->subDeviceAddress[0] != NULL) { + /* + * SOC display devices should only have one subdevice + * (and therefore it is safe to unmap only subDeviceAddress[0]) + * for reasons described in AllocLutSurfaceEvoInSysmem + */ + nvAssert(pDevEvo->numSubDevices == 1); + + result = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSurfEvo->handle, + pSurfEvo->subDeviceAddress[0], + 0); + if (result != NVOS_STATUS_SUCCESS) { + nvAssert(!"Unmapping LUT surface failed"); + } + pSurfEvo->subDeviceAddress[0] = NULL; + } + + result = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, pSurfEvo->handle); + if (result != NVOS_STATUS_SUCCESS) { + nvAssert(!"Freeing LUT surface failed"); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSurfEvo->handle); + } + + nvFree(pSurfEvo); +} + +static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInSysmem(NVDevEvoPtr pDevEvo) +{ + NvU32 memoryHandle = 0; + void *pBase = NULL; + NvU64 size = 0; + NVLutSurfaceEvoPtr pSurfEvo; + + pSurfEvo = nvCalloc(1, sizeof(*pSurfEvo)); + if (pSurfEvo == NULL) { + return NULL; + } + + pSurfEvo->pDevEvo = pDevEvo; + + size = (sizeof(NVEvoLutDataRec) + 63) & ~63; + + pSurfEvo->size = size; + + memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + if (memoryHandle == 0) { + goto fail; + } + + /* Allocate the LUT memory from sysmem */ + if (!nvRmAllocSysmem(pDevEvo, memoryHandle, NULL, &pBase, size, + NVKMS_MEMORY_ISO)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unable to allocate LUT memory from sysmem"); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle); + + goto fail; + } + + pSurfEvo->handle = memoryHandle; + + /* Allocate and bind a display context dma */ + pSurfEvo->dispCtxDma = + nvRmEvoAllocateAndBindDispContextDMA(pDevEvo, + pSurfEvo->handle, + NvKmsSurfaceMemoryLayoutPitch, + pSurfEvo->size - 1); + if (!pSurfEvo->dispCtxDma) { + goto fail; + } + + /* + * AllocLutSurfaceEvoInSysmem() will only be called if + * pDevEvo->requiresAllAllocationsInSysmem is TRUE. NVKMS will only set this + * cap bit for SOC display devices, and these devices should only have one + * subdevice. + */ + nvAssert(pDevEvo->numSubDevices == 1); + pSurfEvo->subDeviceAddress[0] = pBase; + + return pSurfEvo; + + fail: + /* An error occurred -- free the surface */ + FreeLutSurfaceEvoInSysmem(pSurfEvo); + + return NULL; +} + +static void FreeLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo) +{ + NVDevEvoPtr pDevEvo; + + if (pSurfEvo == NULL) { + return; + } + + pDevEvo = pSurfEvo->pDevEvo; + + if (pDevEvo->requiresAllAllocationsInSysmem) { + FreeLutSurfaceEvoInSysmem(pSurfEvo); + } else { + FreeLutSurfaceEvoInVidmem(pSurfEvo); + } +} + +static NVLutSurfaceEvoPtr AllocLutSurfaceEvo(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->requiresAllAllocationsInSysmem) { + return AllocLutSurfaceEvoInSysmem(pDevEvo); + } else { + return AllocLutSurfaceEvoInVidmem(pDevEvo); + } +} + +NvBool nvAllocLutSurfacesEvo(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 head, dispIndex, i; + + for (head = 0; head < pDevEvo->numHeads; head++) { + for (i = 0; i < ARRAY_LEN(pDevEvo->lut.head[head].LUT); i++) { + pDevEvo->lut.head[head].LUT[i] = AllocLutSurfaceEvo(pDevEvo); + + if (pDevEvo->lut.head[head].LUT[i] == NULL) { + nvFreeLutSurfacesEvo(pDevEvo); + return FALSE; + } + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + // No palette has been loaded yet, so disable the LUT. + pDevEvo->lut.head[head].disp[dispIndex].waitForPreviousUpdate = FALSE; + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled = FALSE; + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled = FALSE; + } + } + + if (pDevEvo->hal->caps.needDefaultLutSurface) { + pDevEvo->lut.defaultLut = AllocLutSurfaceEvo(pDevEvo); + if (pDevEvo->lut.defaultLut == NULL) { + nvFreeLutSurfacesEvo(pDevEvo); + return FALSE; + } + + pDevEvo->hal->InitDefaultLut(pDevEvo); + } + + return TRUE; +} + +void nvFreeLutSurfacesEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 head, i, dispIndex; + NVDispEvoPtr pDispEvo; + + /* Cancel any queued LUT update timers */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + nvCancelLutUpdateEvo(pDispEvo, head); + } + } + + /* wait for any outstanding LUT updates before freeing the surface */ + if (pDevEvo->core) { + nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); + } + + if (pDevEvo->lut.defaultLut != NULL) { + FreeLutSurfaceEvo(pDevEvo->lut.defaultLut); + pDevEvo->lut.defaultLut = NULL; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + for (i = 0; i < ARRAY_LEN(pDevEvo->lut.head[head].LUT); i++) { + if (pDevEvo->lut.head[head].LUT[i] != NULL) { + FreeLutSurfaceEvo(pDevEvo->lut.head[head].LUT[i]); + pDevEvo->lut.head[head].LUT[i] = NULL; + } + } + } +} + +void nvUploadDataToLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo, + const NVEvoLutDataRec *pLUTBuffer, + NVDispEvoPtr pDispEvo) +{ + const NvU32* data = (const NvU32*)pLUTBuffer; + size_t size = sizeof(*pLUTBuffer); + const int sd = pDispEvo->displayOwner; + NvU32 *dst; + const NvU32 *src; + int dword; + + if (pSurfEvo == NULL) { + nvAssert(pSurfEvo); + return; + } + + nvAssert(pSurfEvo->subDeviceAddress[sd]); + + /* The size to copy should not be larger than the surface. */ + nvAssert(size <= pSurfEvo->size); + + /* The source, destination, and size should be 4-byte aligned. */ + nvAssert((((NvUPtr)data) & 0x3) == 0); + nvAssert((((NvUPtr)pSurfEvo->subDeviceAddress[sd]) & 0x3) == 0); + nvAssert((size % 4) == 0); + + src = data; + dst = (NvU32*)pSurfEvo->subDeviceAddress[sd]; + + for (dword = 0; dword < (size/4); dword++) { + *(dst++) = *(src++); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modepool.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modepool.c new file mode 100644 index 0000000..3b32f94 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modepool.c @@ -0,0 +1,1986 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-modepool.h" +#include "nvkms-types.h" +#include "nvkms-dpy.h" +#include "nvkms-hdmi.h" +#include "nvkms-utils.h" +#include "nvkms-3dvision.h" +#include "nvkms-evo.h" +#include "nvkms-ioctl.h" + +#include "nv_mode_timings_utils.h" +#include "nv_vasprintf.h" + +#include "nvkms-prealloc.h" + +#include "nvkms-api.h" + +typedef struct { + enum NvKmsModeSource source; + NvBool patchedStereoTimings; +} EvoValidateModeFlags; + +static NvBool +ValidateModeIndexEdid(NVDpyEvoPtr pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsValidateModeIndexReply *pReply, + NVEvoInfoStringPtr pInfoString, + const NvU32 requestedModeIndex, + NvU32 *pCurrentModeIndex); +static NvBool +ValidateModeIndexVesa(NVDpyEvoPtr pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsValidateModeIndexReply *pReply, + NVEvoInfoStringPtr pInfoString, + const NvU32 requestedModeIndex, + NvU32 *pCurrentModeIndex); + +static void LogModeValidationEnd(const NVDispEvoRec *pDispEvo, + NVEvoInfoStringPtr pInfoString, + const char *failureReasonFormat, ...) + __attribute__ ((format (printf, 3, 4))); + +static NvBool ConstructModeTimingsMetaData( + NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsMode *pKmsMode, + EvoValidateModeFlags *pFlags, + NVDispHeadInfoFrameStateEvoRec *pInfoFrameState); + +static NvBool ValidateMode(NVDpyEvoPtr pDpyEvo, + const struct NvKmsMode *pKmsMode, + const EvoValidateModeFlags *flags, + const struct NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString, + struct NvKmsModeValidationValidSyncs *pValidSyncs, + struct NvKmsUsageBounds *pModeUsage); + +#define NV_MAX_MODE_NAME_LEN 64 +#define NV_MAX_MODE_DESCRIPTION_LEN 128 + +/* A single frequency, at its longest, will have the format: "aaa.bbb" */ +#define NV_MAX_FREQUENCY_STRING_LEN 8 + +/* A range element, at its longest, will have the format: "aaa.bbb-ccc.ddd, " */ +#define NV_MAX_RANGE_ELEMENT_STRING_LEN 18 +#define NV_MAX_RANGE_STRING_LEN \ + (NV_MAX_RANGE_ELEMENT_STRING_LEN * NVKMS_MAX_VALID_SYNC_RANGES) + + +void +nvValidateModeIndex(NVDpyEvoPtr pDpyEvo, + const struct NvKmsValidateModeIndexRequest *pRequest, + struct NvKmsValidateModeIndexReply *pReply) +{ + const struct NvKmsModeValidationParams *pParams = &pRequest->modeValidation; + const NvU32 requestedModeIndex = pRequest->modeIndex; + NVEvoInfoStringRec infoString; + NvU32 currentModeIndex = 0; + NvBool done; + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + nvInitInfoString(&infoString, nvKmsNvU64ToPointer(pRequest->pInfoString), + pRequest->infoStringSize); + + done = ValidateModeIndexEdid(pDpyEvo, pParams, pReply, &infoString, + requestedModeIndex, ¤tModeIndex); + if (done) { + goto out; + } + + done = ValidateModeIndexVesa(pDpyEvo, pParams, pReply, &infoString, + requestedModeIndex, ¤tModeIndex); + if (done) { + goto out; + } + + pReply->end = 1; + return; + +out: + if (pRequest->infoStringSize > 0) { + /* Add 1 for the final '\0' */ + nvAssert((infoString.length + 1) <= pRequest->infoStringSize); + pReply->infoStringLenWritten = infoString.length + 1; + } +} + + +void +nvValidateModeEvo(NVDpyEvoPtr pDpyEvo, + const struct NvKmsValidateModeRequest *pRequest, + struct NvKmsValidateModeReply *pReply) +{ + NVEvoInfoStringRec infoString; + struct NvKmsMode kmsMode = { + .timings = pRequest->mode.timings, + }; + EvoValidateModeFlags evoFlags; + NVDispHeadInfoFrameStateEvoRec dummyInfoFrameState; + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + if (!ConstructModeTimingsMetaData(pDpyEvo, + &pRequest->modeValidation, + &kmsMode, + &evoFlags, + &dummyInfoFrameState)) { + pReply->valid = FALSE; + return; + } + + nvInitInfoString(&infoString, nvKmsNvU64ToPointer(pRequest->pInfoString), + pRequest->infoStringSize); + + pReply->valid = ValidateMode(pDpyEvo, + &kmsMode, + &evoFlags, + &pRequest->modeValidation, + &infoString, + &pReply->validSyncs, + &pReply->modeUsage); + + if (infoString.length > 0) { + /* Add 1 for the final '\0' */ + nvAssert((infoString.length + 1) <= pRequest->infoStringSize); + pReply->infoStringLenWritten = infoString.length + 1; + } +} + + +/*! + * Determine whether this mode is HDMI 3D by checking the HDMI 3D + * support map parsed from the CEA-861 EDID extension. + * + * Currently only frame packed 3D modes are supported, as we rely on + * Kepler's HW support for this mode. + */ +static NvBool GetHdmi3DValue(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming) +{ + /* This should only be used in paths where we have a valid parsed EDID. */ + + nvAssert(pDpyEvo->parsedEdid.valid); + + if ((pParams->stereoMode == NVKMS_STEREO_HDMI_3D) && + (NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status) == + NVT_TYPE_EDID_861ST) && + nvDpyEvoSupportsHdmi3D(pDpyEvo)) { + + const NVT_EDID_INFO *pInfo = &pDpyEvo->parsedEdid.info; + int i; + + for (i = 0; i < pInfo->Hdmi3Dsupport.total; i++) { + HDMI3DDETAILS hdmi3DMap = pInfo->Hdmi3Dsupport.map[i]; + NvU32 vic = NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status); + if ((vic == hdmi3DMap.Vic) && + (hdmi3DMap.StereoStructureMask & + NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK)) { + return TRUE; + } + } + } + + return FALSE; +} + +/* + * For Kepler HW HDMI 1.4 frame packed stereo, HW combines two flips + * into a single top-down double-height frame, and it needs a + * doubled refresh rate to accommodate this. + */ +static void UpdateNvModeTimingsForHdmi3D(NvModeTimings *pModeTimings, + NvBool enableHdmi3D) +{ + if (enableHdmi3D) { + pModeTimings->pixelClockHz *= 2; + pModeTimings->RRx1k *= 2; + } else { + nvAssert((pModeTimings->pixelClockHz % 2) == 0); + pModeTimings->pixelClockHz /= 2; + + nvAssert((pModeTimings->RRx1k % 2) == 0); + pModeTimings->RRx1k /= 2; + } +} + +/* + * DP 1.3 decimated YUV 4:2:0 mode is required if: + * + * - The GPU and monitor both support it. + * - Either the monitor doesn't support RGB 4:4:4 scanout of this mode, or + * the user prefers YUV 4:2:0 scanout when possible. + */ +static NvBool DpYuv420Required(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming) +{ + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + const NvBool monitorSupports444 = + IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.rgb444.bpcs); + + if (!pDevEvo->caps.supportsDP13) { + // The GPU doesn't support YUV420. + return FALSE; + } + + if (monitorSupports444) { + // The GPU and monitor both support YUV420 and RGB444; use RGB444 + // by default, but allow the user to prefer YUV420 mode in this + // decision. + return pParams->preferYUV420; + } else { + // The GPU and monitor both support YUV420, and the monitor doesn't + // support RGB444, so we have to fall back to YUV420. + return TRUE; + } +} + +/* + * Return whether this mode requires SW, HW, or no YUV 4:2:0 compression given + * this GPU, display, connector type, and user preference. + */ +static enum NvYuv420Mode GetYUV420Value( + const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming) +{ + if (!IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.yuv420.bpcs) || + ((pTiming->HSyncWidth & 1) != 0) || + ((pTiming->HFrontPorch & 1) != 0) || + ((pTiming->HVisible & 1) != 0) || + ((pTiming->HTotal & 1) != 0) || + ((pTiming->VVisible & 1) != 0)) { + // If this mode doesn't support YUV420, then the GPU caps or + // user preference doesn't matter. + return NV_YUV420_MODE_NONE; + } else if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + if (DpYuv420Required(pDpyEvo, pParams, pTiming)) { + return NV_YUV420_MODE_SW; + } else { + return NV_YUV420_MODE_NONE; + } + } else if (nvDpyIsHdmiEvo(pDpyEvo)) { + /* + * YUV 4:2:0 compression is necessary for HDMI 2.0 4K@60hz modes + * unless the GPU and display both support HDMI 2.0 4K@60hz + * uncompressed RGB 4:4:4 (6G mode). A mode validation override + * may be used to allow RGB 4:4:4 mode if the GPU supports it + * even if the display doesn't claim support in the EDID. + */ + if (!nvHdmi204k60HzRGB444Allowed(pDpyEvo, pParams, pTiming) || + pParams->preferYUV420) { + + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + // XXX assume the heads have equal capabilities + // XXX assume the gpus have equal capabilities + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[0]; + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[0]; + + if (pHeadCaps->supportsHDMIYUV420HW) { + return NV_YUV420_MODE_HW; + } else { + return NV_YUV420_MODE_SW; + } + } else { + return NV_YUV420_MODE_NONE; + } + } else { + return NV_YUV420_MODE_NONE; + } +} + + +/*! + * Scan through the EDID-specified modes, counting each one. If the + * count reaches requestedModeIndex, then validate that mode. + * + * \param[in] pDpyEvo The dpy whose EDID's modes are considered. + * \param[in] pParams The NvKmsModeValidationParams. + * \param[out] pReply The NvKmsValidateModeIndexReply; if we found + * requestedModeIndex, pReply->valid will store if + * the mode was valid. + * \param[in] requestedModeIndex The index of the mode we are looking for. + * \param[in,out] pCurrentModeIndex A running total of the number of modes + * we have considered. This will be incremented + * by the number of modes considered. + * + * \return If we found the mode with index == requestedModeIndex, + * return TRUE. Otherwise, return FALSE. + */ +static NvBool +ValidateModeIndexEdid(NVDpyEvoPtr pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsValidateModeIndexReply *pReply, + NVEvoInfoStringPtr pInfoString, + const NvU32 requestedModeIndex, + NvU32 *pCurrentModeIndex) +{ + const char *description; + int i; + NvBool is3DVisionStereo = nvIs3DVisionStereoEvo(pParams->stereoMode); + + /* if no EDID, we have nothing to do here */ + + if (!pDpyEvo->parsedEdid.valid) { + return FALSE; + } + + /* Scan through all EDID modes. */ + + for (i = 0; i < pDpyEvo->parsedEdid.info.total_timings; i++) { + + NVT_TIMING timing = pDpyEvo->parsedEdid.info.timing[i]; + EvoValidateModeFlags flags; + struct NvKmsMode kmsMode = { }; + + /* Skip this mode if it was marked invalid by nvtiming. */ + + if (timing.etc.status == 0) { + continue; + } + + /* + * If *pCurrentModeIndex matches requestedModeIndex, then + * validate the mode. Otherwise, go on to the next mode. + */ + if (*pCurrentModeIndex != requestedModeIndex) { + (*pCurrentModeIndex)++; + continue; + } + + nvkms_memset(&flags, 0, sizeof(flags)); + flags.source = NvKmsModeSourceEdid; + + /* patch the mode for 3DVision */ + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + nvPatch3DVisionModeTimingsEvo(&timing, pDpyEvo, pInfoString)) { + flags.patchedStereoTimings = TRUE; + } + + if ((NVT_GET_TIMING_STATUS_TYPE(timing.etc.status) == + NVT_TYPE_EDID_861ST) && + (NVT_GET_CEA_FORMAT(timing.etc.status) > 0) && + (timing.etc.name[0] != '\0')) { + description = (const char *) timing.etc.name; + } else { + description = NULL; + } + + /* convert from the EDID's NVT_TIMING to NvModeTimings */ + + NVT_TIMINGtoNvModeTimings(&timing, &kmsMode.timings); + + /* + * Determine whether this mode is a HDMI 3D by checking the HDMI 3D + * support map parsed from the CEA-861 EDID extension. + * + * Currently only frame packed 3D modes are supported, as we rely on + * Kepler's HW support for this mode. + */ + kmsMode.timings.hdmi3D = GetHdmi3DValue(pDpyEvo, pParams, &timing); + + if (kmsMode.timings.hdmi3D) { + UpdateNvModeTimingsForHdmi3D(&kmsMode.timings, TRUE); + } + + kmsMode.timings.yuv420Mode = GetYUV420Value(pDpyEvo, pParams, &timing); + + /* validate the mode */ + + pReply->valid = ValidateMode(pDpyEvo, + &kmsMode, + &flags, + pParams, + pInfoString, + &pReply->validSyncs, + &pReply->modeUsage); + /* + * if this is a detailed timing, then flag it as such; this + * will be used later when searching for the AutoSelect mode + */ + + if (NVT_GET_TIMING_STATUS_TYPE(timing.etc.status) == + NVT_TYPE_EDID_DTD) { + + /* + * if the EDID indicates that the first detailed timing is + * preferred, then flag it is as such; this will be used + * later when searching for the AutoSelect mode + * + * Note that the sequence number counts from 1 + */ + + if ((pDpyEvo->parsedEdid.info.u.feature_ver_1_3.preferred_timing_is_native) && + NVT_GET_TIMING_STATUS_SEQ(timing.etc.status) == 1) { + + pReply->preferredMode = TRUE; + } + } + + /* + * If the NVT_TIMING was patched for 3DVision above, then the + * NvModeTimings generated from it, when passed to + * nvFindEdidNVT_TIMING() during nvValidateModeForModeset(), + * won't match the original EDID NVT_TIMING. Rebuild + * NvModeTimings based on the original (non-3DVision-patched) + * NVT_TIMING from the EDID, and return that to the client. + * When the NvModeTimings is passed to + * nvValidateModeForModeset(), the 3DVision patching will be + * performed again. + */ + if (flags.patchedStereoTimings) { + enum NvYuv420Mode yuv420Mode = kmsMode.timings.yuv420Mode; + NvBool hdmi3D = kmsMode.timings.hdmi3D; + + NVT_TIMINGtoNvModeTimings(&pDpyEvo->parsedEdid.info.timing[i], + &kmsMode.timings); + kmsMode.timings.yuv420Mode = yuv420Mode; + kmsMode.timings.hdmi3D = hdmi3D; + + if (hdmi3D) { + UpdateNvModeTimingsForHdmi3D(&kmsMode.timings, TRUE); + } + } + + pReply->mode.timings = kmsMode.timings; + pReply->source = NvKmsModeSourceEdid; + + if (description != NULL) { + nvAssert(nvkms_strlen(description) < sizeof(pReply->description)); + nvkms_strncpy(pReply->description, description, + sizeof(pReply->description)); + pReply->description[sizeof(pReply->description) - 1] = '\0'; + } + + nvBuildModeName(kmsMode.timings.hVisible, kmsMode.timings.vVisible, + pReply->mode.name, sizeof(pReply->mode.name)); + return TRUE; + } + + /* No matching mode found. */ + return FALSE; +} + + +// NOTE: does not include timings for 848x480, 1280x768, 1360x768, +// 1400x1050, 1440x900, 1680x1050, 1920x1200 + +static const NvModeTimings VesaModesTable[] = { + /* + * { RRx1k, PClkHz; + * hVisible, hSyncStart, hSyncEnd, hTotal, + * hSkew, + * vVisible, vSyncStart, vSyncEnd, vTotal, + * { widthMM, heightMM }, + * interlaced, doubleScan, + * hSyncPos, hSyncNeg, vSyncPos, vSyncNeg, hdmi3D, yuv420 }, + */ + + // VESA Standard 640x350 @ 85Hz + { 85080, 31500000, + 640, 672, 736, 832, + 0, + 350, 382, 385, 445, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 640x400 @ 85Hz + { 85080, 31500000, + 640, 672, 736, 832, + 0, + 400, 401, 404, 445, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 720x400 @ 85Hz + { 85039, 35500000, + 720, 756, 828, 936, + 0, + 400, 401, 404, 446, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // Industry Standard 640x480 @ 60Hz + { 59940, 25175000, + 640, 656, 752, 800, + 0, + 480, 490, 492, 525, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 640x480 @ 72Hz + { 72809, 31500000, + 640, 664, 704, 832, + 0, + 480, 489, 492, 520, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 640x480 @ 75Hz + { 75000, 31500000, + 640, 656, 720, 840, + 0, + 480, 481, 484, 500, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 640x480 @ 85Hz + { 85008, 36000000, + 640, 696, 752, 832, + 0, + 480, 481, 484, 509, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 800x600 @ 56Hz + { 56250, 36000000, + 800, 824, 896, 1024, + 0, + 600, 601, 603, 625, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 800x600 @ 60Hz + { 60317, 40000000, + 800, 840, 968, 1056, + 0, + 600, 601, 605, 628, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 800x600 @ 72Hz + { 72188, 50000000, + 800, 856, 976, 1040, + 0, + 600, 637, 643, 666, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 800x600 @ 75Hz + { 75000, 49500000, + 800, 816, 896, 1056, + 0, + 600, 601, 604, 625, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 800x600 @ 85Hz + { 85137, 56300000, + 800, 832, 896, 1048, + 0, + 600, 601, 604, 631, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1024x768i @ 87Hz + { 86958, 44900000, + 1024, 1032, 1208, 1264, + 0, + 768, 768, 776, 817, + { 0, 0 }, + TRUE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1024x768 @ 60Hz + { 60004, 65000000, + 1024, 1048, 1184, 1344, + 0, + 768, 771, 777, 806, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 1024x768 @ 70Hz + { 70069, 75000000, + 1024, 1048, 1184, 1328, + 0, + 768, 771, 777, 806, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 1024x768 @ 75Hz + { 75029, 78750000, + 1024, 1040, 1136, 1312, + 0, + 768, 769, 772, 800, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1024x768 @ 85Hz + { 84997, 94500000, + 1024, 1072, 1168, 1376, + 0, + 768, 769, 772, 808, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1152x864 @ 75Hz + { 75000, 108000000, + 1152, 1216, 1344, 1600, + 0, + 864, 865, 868, 900, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1280x960 @ 60Hz + { 60000, 108000000, + 1280, 1376, 1488, 1800, + 0, + 960, 961, 964, 1000, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1280x960 @ 85Hz + { 85002, 148500000, + 1280, 1344, 1504, 1728, + 0, + 960, 961, 964, 1011, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1280x1024 @ 60Hz + { 60020, 108000000, + 1280, 1328, 1440, 1688, + 0, + 1024, 1025, 1028, 1066, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1280x1024 @ 75Hz + { 75025, 135000000, + 1280, 1296, 1440, 1688, + 0, + 1024, 1025, 1028, 1066, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1280x1024 @ 85Hz + { 85024, 157500000, + 1280, 1344, 1504, 1728, + 0, + 1024, 1025, 1028, 1072, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1600x1200 @ 60Hz + { 60000, 162000000, + 1600, 1664, 1856, 2160, + 0, + 1200, 1201, 1204, 1250, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1600x1200 @ 65Hz + { 65000, 175500000, + 1600, 1664, 1856, 2160, + 0, + 1200, 1201, 1204, 1250, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1600x1200 @ 70Hz + { 70000, 189000000, + 1600, 1664, 1856, 2160, + 0, + 1200, 1201, 1204, 1250, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1600x1200 @ 75Hz + { 75000, 202500000, + 1600, 1664, 1856, 2160, + 0, + 1200, 1201, 1204, 1250, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1600x1200 @ 85Hz + { 85000, 229500000, + 1600, 1664, 1856, 2160, + 0, + 1200, 1201, 1204, 1250, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1792x1344 @ 60Hz + { 60014, 204800000, + 1792, 1920, 2120, 2448, + 0, + 1344, 1345, 1348, 1394, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1792x1344 @ 75Hz + { 74997, 261000000, + 1792, 1888, 2104, 2456, + 0, + 1344, 1345, 1348, 1417, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1856x1392 @ 60Hz + { 60009, 218300000, + 1856, 1952, 2176, 2528, + 0, + 1392, 1393, 1396, 1439, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1856x1392 @ 75Hz + { 75000, 288000000, + 1856, 1984, 2208, 2560, + 0, + 1392, 1393, 1396, 1500, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1920x1440 @ 60Hz + { 60000, 234000000, + 1920, 2048, 2256, 2600, + 0, + 1440, 1441, 1444, 1500, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1920x1440 @ 75Hz + { 75000, 297000000, + 1920, 2064, 2288, 2640, + 0, + 1440, 1441, 1444, 1500, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, +}; + + +/*! + * Scan through the VESA Standard modes, counting each one. If the + * count reaches requestedModeIndex, then validate that mode. + * + * \param[in] pDpyEvo The dpy for whom the modes are considered. + * \param[in] pParams The NvKmsModeValidationParams. + * \param[out] pReply The NvKmsValidateModeIndexReply; if we found + * requestedModeIndex, pReply->valid will store if + * the mode was valid. + * \param[in] requestedModeIndex The index of the mode we are looking for. + * \param[in,out] pCurrentModeIndex A running total of the number of modes + * we have considered. This will be incremented + * by the number of modes considered. + * + * \return If we found the mode with index == requestedModeIndex, + * return TRUE. Otherwise, return FALSE. + */ +static NvBool +ValidateModeIndexVesa(NVDpyEvoPtr pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsValidateModeIndexReply *pReply, + NVEvoInfoStringPtr pInfoString, + const NvU32 requestedModeIndex, + NvU32 *pCurrentModeIndex) +{ + int i; + + for (i = 0; i < ARRAY_LEN(VesaModesTable); i++) { + struct NvKmsMode kmsMode = { }; + EvoValidateModeFlags flags; + + /* + * If *pCurrentModeIndex matches requestedModeIndex, then + * validate the mode. Otherwise, go on to the next mode. + */ + if (*pCurrentModeIndex != requestedModeIndex) { + (*pCurrentModeIndex)++; + continue; + } + + kmsMode.timings = VesaModesTable[i]; + + nvkms_memset(&flags, 0, sizeof(flags)); + flags.source = NvKmsModeSourceVesa; + + /* is this mode valid? */ + pReply->valid = ValidateMode(pDpyEvo, + &kmsMode, + &flags, + pParams, + pInfoString, + &pReply->validSyncs, + &pReply->modeUsage); + + pReply->mode.timings = kmsMode.timings; + pReply->source = NvKmsModeSourceVesa; + + nvBuildModeName(VesaModesTable[i].hVisible, + VesaModesTable[i].vVisible, + pReply->mode.name, sizeof(pReply->mode.name)); + return TRUE; + } + + /* No matching mode found. */ + return FALSE; +} + + +/*! + * Return if the given NvModeTimings match any entry in VesaModesTable[]. + */ +static NvBool IsVesaMode(const NvModeTimings *pModeTimings, + const struct NvKmsModeValidationParams *pParams) +{ + int i; + + for (i = 0; i < ARRAY_LEN(VesaModesTable); i++) { + if (NvModeTimingsMatch(&VesaModesTable[i], pModeTimings, + TRUE /* ignoreSizeMM */, + ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK) != 0x0) + /* ignoreRRx1k */)) { + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Write to pInfoString with information about the current mode that + * we are validating; called from the beginning of ValidateMode(); + * LogModeValidationEnd() should be called at the end of + * ValidateMode() to report whether the mode was validated. + */ + +static void LogModeValidationBegin(NVEvoInfoStringPtr pInfoString, + const NvModeTimings *pModeTimings, + const char *modeName) +{ + nvEvoLogInfoString(pInfoString, "%d x %d @ %d Hz%s", + pModeTimings->hVisible, + pModeTimings->vVisible, + NV_U32_KHZ_TO_HZ(pModeTimings->RRx1k), + pModeTimings->hdmi3D ? " (HDMI 3D)" : ""); + + nvEvoLogModeValidationModeTimings(pInfoString, pModeTimings); +} + + +/*! + * Append to pInfoString with any mode validation failure. + */ +static void LogModeValidationEnd(const NVDispEvoRec *pDispEvo, + NVEvoInfoStringPtr pInfoString, + const char *failureReasonFormat, ...) +{ + /* expand any varargs, and print the mode validation result */ + + if (failureReasonFormat) { + char *buf; + NV_VSNPRINTF(buf, failureReasonFormat); + nvEvoLogInfoString(pInfoString, + "Mode is rejected: %s.", + buf ? buf : "Unknown failure"); + nvFree(buf); + } +} + + +/*! + * Print mode timings to the NVEvoInfoStringPtr. + */ +void nvEvoLogModeValidationModeTimings(NVEvoInfoStringPtr + pInfoString, + const NvModeTimings *pModeTimings) +{ + const char *extra; + NvU32 hdmi3DPixelClock = HzToKHz(pModeTimings->pixelClockHz); + + if (pModeTimings->hdmi3D) { + hdmi3DPixelClock /= 2; + } + + nvEvoLogInfoString(pInfoString, " Pixel Clock : " + NV_FMT_DIV_1000_POINT_2 " MHz%s", + NV_VA_DIV_1000_POINT_2(hdmi3DPixelClock), + pModeTimings->hdmi3D ? " (HDMI 3D)" : ""); + + nvEvoLogInfoString(pInfoString, " HRes, HSyncStart : %4d, %4d", + pModeTimings->hVisible, + pModeTimings->hSyncStart); + + nvEvoLogInfoString(pInfoString, " HSyncEnd, HTotal : %4d, %4d", + pModeTimings->hSyncEnd, + pModeTimings->hTotal); + + nvEvoLogInfoString(pInfoString, " VRes, VSyncStart : %4d, %4d", + pModeTimings->vVisible, + pModeTimings->vSyncStart); + + nvEvoLogInfoString(pInfoString, " VSyncEnd, VTotal : %4d, %4d", + pModeTimings->vSyncEnd, + pModeTimings->vTotal); + + nvEvoLogInfoString(pInfoString, " Sync Polarity : %s%s%s%s", + pModeTimings->hSyncPos ? "+H " : "", + pModeTimings->hSyncNeg ? "-H " : "", + pModeTimings->vSyncPos ? "+V " : "", + pModeTimings->vSyncNeg ? "-V " : ""); + + if (pModeTimings->interlaced && pModeTimings->doubleScan) { + extra = "Interlace DoubleScan"; + } else if (pModeTimings->interlaced) { + extra = "Interlace"; + } else if (pModeTimings->doubleScan) { + extra = "DoubleScan"; + } else { + extra = NULL; + } + + if (extra) { + nvEvoLogInfoString(pInfoString, " Extra : %s", extra); + } +} + + +/*! + * Adjust the given value by the given percentage, using integer math. + * + * The 'percentage' argument is multiplied by 100 by the caller. E.g., + * + * percentage=50 ==> 50% + * percentage=110 ==> 110% + * + * So, divide by 100.0: + * + * value * percentage / 100 + */ +static NvU32 Percentage(const NvU32 value, const NvU32 percentage) +{ + return axb_div_c(value, percentage, 100); +} + +/*! + * Write the given frequency to the given buffer. + * + * The frequency value is assumed to have been multiplied by 1000, + * such that 'value % 1000' gives the fractional part, and value/1000 + * gives the integer part. + * + * The buffer is assumed to be (at least) NV_MAX_FREQUENCY_STRING_LEN + * bytes long. + * + * Note that to meet the size assumptions made in the + * NV_MAX_FREQUENCY_STRING_LEN definition, the integer portion of the + * frquency value is clamped to 3 digits. + */ +static int +FrequencyToString(const NvU32 value, char *buffer) +{ + int n = nvkms_snprintf(buffer, NV_MAX_FREQUENCY_STRING_LEN, + "%d.%03d", + /* mod 1000, to limit to 3 digits */ + (value / 1000) % 1000, + value % 1000); + + buffer[NV_MAX_FREQUENCY_STRING_LEN - 1] = '\0'; + + return n; +} + +/*! + * Write the given NvKmsModeValidationFrequencyRanges to the given buffer. + */ +static void +RangesToString(const struct NvKmsModeValidationFrequencyRanges *pRanges, + char buffer[NV_MAX_RANGE_STRING_LEN]) +{ + char *s; + int i, n; + + s = buffer; + + for (i = 0; i < pRanges->numRanges; i++) { + if (pRanges->range[i].high == pRanges->range[i].low) { + s += FrequencyToString(pRanges->range[i].high, s); + } else { + char highString[NV_MAX_FREQUENCY_STRING_LEN]; + char lowString[NV_MAX_FREQUENCY_STRING_LEN]; + + FrequencyToString(pRanges->range[i].high, highString); + FrequencyToString(pRanges->range[i].low, lowString); + + n = buffer + NV_MAX_RANGE_STRING_LEN - s; + s += nvkms_snprintf(s, n, "%s-%s", lowString, highString); + } + + if (i < (pRanges->numRanges - 1)) { + n = buffer + NV_MAX_RANGE_STRING_LEN - s; + s += nvkms_snprintf(s, n, ", "); + } + } + + buffer[NV_MAX_RANGE_STRING_LEN - 1] = '\0'; +} + +static NvBool ValidateModeTimings( + NVDpyEvoPtr pDpyEvo, + const struct NvKmsMode *pKmsMode, + const EvoValidateModeFlags *flags, + const struct NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString, + struct NvKmsModeValidationValidSyncs *pValidSyncs) +{ + int i; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 overrides = pParams->overrides; + const NvBool is3DVisionStereo = nvIs3DVisionStereoEvo(pParams->stereoMode); + const char *modeName = pKmsMode->name; + const NvModeTimings *pModeTimings = &pKmsMode->timings; + char localModeName[NV_MAX_MODE_NAME_LEN]; + + if (modeName[0] == '\0') { + nvBuildModeName(pModeTimings->hVisible, pModeTimings->vVisible, + localModeName, sizeof(localModeName)); + modeName = localModeName; + } + + /* Compute the validSyncs to use during validation. */ + + *pValidSyncs = pParams->validSyncs; + nvDpySetValidSyncsEvo(pDpyEvo, pValidSyncs); + + if (pModeTimings->interlaced) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + if (!pEvoSubDev->capabilities.misc.supportsInterlaced) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced modes are not supported on this GPU"); + return FALSE; + } + } + + if ((flags->source != NvKmsModeSourceEdid) && + (overrides & NVKMS_MODE_VALIDATION_ALLOW_NON_EDID_MODES) == 0) { + + NvBool continuousFrequency = TRUE; + + /* + * EDID 1.3 defines the "GTF Supported" flag like this: + * + * If this bit is set to 1, the display supports timings based + * on the GTF standard. + * + * We interpret this to mean that if the bit is not set, then + * the display device only supports modes listed in the EDID. + */ + if (pDpyEvo->parsedEdid.valid && + (pDpyEvo->parsedEdid.info.version == NVT_EDID_VER_1_3)) { + continuousFrequency = + pDpyEvo->parsedEdid.info.u.feature_ver_1_3.support_gtf; + } + + /* + * EDID 1.4 Release A, Revision 2; Note 5 in section 3.6.4: + * + * If bit 0 is set to 0, then the display is non-continuous + * frequency (multi-mode) and is only specified to accept the + * video timing formats that are listed in BASE EDID and + * certain EXTENSION Blocks. + */ + if (pDpyEvo->parsedEdid.valid && + (pDpyEvo->parsedEdid.info.version >= NVT_EDID_VER_1_4)) { + if (pDpyEvo->parsedEdid.info.input.isDigital) { + continuousFrequency = + pDpyEvo->parsedEdid.info.u.feature_ver_1_4_digital.continuous_frequency; + } else { + continuousFrequency = + pDpyEvo->parsedEdid.info.u.feature_ver_1_4_analog.continuous_frequency; + } + } + + if (!continuousFrequency) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Only EDID-provided modes are allowed on %s (continuous frequency modes not allowed)", + pDpyEvo->name); + return FALSE; + } + + /* + * By default, we only allow EDID modes when driving digital + * protocol. + */ + if (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.info.input.isDigital) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Only EDID-provided modes are allowed on %s", + pDpyEvo->name); + return FALSE; + } + } + + /* Throw out modes that will break downstream assumptions */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_TOTAL_SIZE_CHECK) == 0) { + + if (pModeTimings->hVisible > pModeTimings->hSyncStart) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's visible horizontal size (%d) exceeds the horizontal sync start (%d)", + pModeTimings->hVisible, + pModeTimings->hSyncStart); + return FALSE; + } + + if (pModeTimings->hSyncStart > pModeTimings->hSyncEnd) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's horizontal sync start (%d) exceeds the horizontal sync end (%d)", + pModeTimings->hSyncStart, + pModeTimings->hSyncEnd); + return FALSE; + } + + if (pModeTimings->hSyncEnd > pModeTimings->hTotal) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's horizontal sync end (%d) exceeds the horizontal total size (%d)", + pModeTimings->hSyncEnd, + pModeTimings->hTotal); + return FALSE; + } + + if (pModeTimings->vVisible > pModeTimings->vSyncStart) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's visible vertical size (%d) exceeds the vertical sync start (%d)", + pModeTimings->vVisible, + pModeTimings->vSyncStart); + return FALSE; + } + + if (pModeTimings->vSyncStart > pModeTimings->vSyncEnd) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's vertical sync start (%d) exceeds the vertical sync end (%d)", + pModeTimings->vSyncStart, + pModeTimings->vSyncEnd); + return FALSE; + } + + if (pModeTimings->vSyncEnd > pModeTimings->vTotal) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's vertical sync end (%d) exceeds the vertical total size (%d)", + pModeTimings->vSyncEnd, + pModeTimings->vTotal); + return FALSE; + } + } + + /* reject modes with too high pclk */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_MAX_PCLK_CHECK) == 0) { + + NvU32 maxPixelClockKHz = pDpyEvo->maxPixelClockKHz; + NvU32 realPixelClock = HzToKHz(pModeTimings->pixelClockHz); + if (pModeTimings->yuv420Mode == NV_YUV420_MODE_SW) { + realPixelClock /= 2; + } + + if (realPixelClock > maxPixelClockKHz) { + NvU32 hdmi3DPixelClock = realPixelClock; + + if (pModeTimings->hdmi3D) { + hdmi3DPixelClock /= 2; + } + + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + (realPixelClock - maxPixelClockKHz < 5000)) { + + nvAssert(!pModeTimings->hdmi3D); + + nvEvoLogInfoString(pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz) is slightly higher than Display Device maximum (" NV_FMT_DIV_1000_POINT_1 " MHz), but is within tolerance for 3D Vision Stereo.", + NV_VA_DIV_1000_POINT_1(realPixelClock), + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + + } else { + + LogModeValidationEnd(pDispEvo, pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz%s) too high for Display Device (Max: " NV_FMT_DIV_1000_POINT_1 " MHz)", + NV_VA_DIV_1000_POINT_1(hdmi3DPixelClock), + pModeTimings->hdmi3D ? + ", doubled for HDMI 3D" : "", + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + return FALSE; + } + } + } + + /* check against the EDID's max pclk */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_EDID_MAX_PCLK_CHECK) == 0) { + + NvU32 realPixelClock = HzToKHz(pModeTimings->pixelClockHz); + if (pModeTimings->yuv420Mode == NV_YUV420_MODE_SW) { + realPixelClock /= 2; + } + + if (pDpyEvo->parsedEdid.valid && + (pDpyEvo->parsedEdid.limits.max_pclk_10khz != 0) && + (realPixelClock > + (pDpyEvo->parsedEdid.limits.max_pclk_10khz * 10))) { + + NvU32 hdmi3DPixelClock = realPixelClock; + NvU32 maxPixelClockKHz = pDpyEvo->parsedEdid.limits.max_pclk_10khz * 10; + + if (pModeTimings->hdmi3D) { + hdmi3DPixelClock /= 2; + } + + /* + * If this mode came from the EDID, then something is odd + * (see bug 336963); print a warning, but continue + */ + + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + (realPixelClock - maxPixelClockKHz < 5000)) { + + nvAssert(!pModeTimings->hdmi3D); + + nvEvoLogInfoString(pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz) is slightly higher than EDID specified maximum (" NV_FMT_DIV_1000_POINT_1 " MHz), but is within tolerance for 3D Vision Stereo.", + NV_VA_DIV_1000_POINT_1(realPixelClock), + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + + } else if ((flags->source == NvKmsModeSourceEdid) && + ((overrides & + NVKMS_MODE_VALIDATION_OBEY_EDID_CONTRADICTIONS) == 0)) { + nvEvoLogInfoString(pInfoString, + "The EDID for %s contradicts itself: mode \"%s\" is specified in the EDID; " + "however, the EDID's reported maximum PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz) would exclude this mode's PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz%s); " + "ignoring EDID maximum PixelClock check for mode \"%s\".", + pDpyEvo->name, modeName, + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz), + NV_VA_DIV_1000_POINT_1(hdmi3DPixelClock), + pModeTimings->hdmi3D ? + ", doubled for HDMI 3D" : "", + modeName); + } else { + + LogModeValidationEnd(pDispEvo, pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz%s) too high for EDID (EDID Max: " NV_FMT_DIV_1000_POINT_1" MHz)", + NV_VA_DIV_1000_POINT_1(hdmi3DPixelClock), + pModeTimings->hdmi3D ? + ", doubled for HDMI 3D" : "", + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + return FALSE; + } + } + } + + /* check the mode against the max size */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_MAX_SIZE_CHECK) == 0) { + + const NvU32 maxHeight = pDevEvo->caps.maxRasterHeight; + const NvU32 maxWidth = pDevEvo->caps.maxRasterWidth; + + NvU16 realHTotal = pModeTimings->hTotal; + if (pModeTimings->yuv420Mode == NV_YUV420_MODE_SW) { + realHTotal /= 2; + } + + // With YUV420 modes, we want to use the real half-width hTotal + // for validation, but report the full-width value in the log. + if ((realHTotal > maxWidth) || + (pModeTimings->vTotal > maxHeight)) { + + LogModeValidationEnd(pDispEvo, pInfoString, + "Mode total size (%u x %u), with visible size (%u x %u), larger than maximum size (%u x %u)", + pModeTimings->hTotal, + pModeTimings->vTotal, + pModeTimings->hVisible, + pModeTimings->vVisible, + maxWidth, maxHeight); + return FALSE; + } + } + + /* check against the frequency information */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_HORIZ_SYNC_CHECK) == 0) { + if (pValidSyncs->horizSyncHz.numRanges > 0) { + NvU32 hSync = axb_div_c(pModeTimings->pixelClockHz, 1, + pModeTimings->hTotal); + + for (i = 0; i < pValidSyncs->horizSyncHz.numRanges; i++) { + NvU32 low = pValidSyncs->horizSyncHz.range[i].low; + NvU32 high = pValidSyncs->horizSyncHz.range[i].high; + if ((hSync > Percentage(low, 99)) && + (hSync < Percentage(high, 101))) { + break; + } + } + + /* + * Now see whether we ran out of sync ranges without + * finding a match + */ + + if (i == pValidSyncs->horizSyncHz.numRanges) { + + char rangeString[NV_MAX_RANGE_STRING_LEN]; + char hSyncString[NV_MAX_FREQUENCY_STRING_LEN]; + + RangesToString(&pValidSyncs->horizSyncHz, rangeString); + FrequencyToString(hSync, hSyncString); + + /* + * If this mode came from the EDID and the valid + * HorizSync ranges (which excluded this timing) also + * came from the EDID, then something is odd (see bug + * 336963); print a warning, but continue. + */ + + if ((flags->source == NvKmsModeSourceEdid) && + (pValidSyncs->horizSyncHz.source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_EDID) && + ((overrides & + NVKMS_MODE_VALIDATION_OBEY_EDID_CONTRADICTIONS) == 0)) { + + nvEvoLogInfoString(pInfoString, + "The EDID for %s contradicts itself: mode \"%s\" is specified in the EDID; " + "however, the EDID's valid HorizSync range (%s kHz) would exclude this mode's HorizSync (%s kHz); " + "ignoring HorizSync check for mode \"%s\".", + pDpyEvo->name, modeName, + rangeString, hSyncString, modeName); + } else { + + LogModeValidationEnd(pDispEvo, pInfoString, + "HorizSync (%s kHz) out of range (%s kHz)", hSyncString, rangeString); + return FALSE; + } + } + } + } + + if ((overrides & NVKMS_MODE_VALIDATION_NO_VERT_REFRESH_CHECK) == 0) { + + if (pValidSyncs->vertRefreshHz1k.numRanges > 0) { + + /* + * note: we expect RRx1k to be field rate for interlaced + * modes, (undoubled) frame rate for doublescan modes, and + * (doubled) frame rate for HDMI 3D modes. + */ + NvU32 vRefresh = pModeTimings->RRx1k; + + for (i = 0; i < pValidSyncs->vertRefreshHz1k.numRanges; i++) { + NvU32 low = pValidSyncs->vertRefreshHz1k.range[i].low; + NvU32 high = pValidSyncs->vertRefreshHz1k.range[i].high; + + if ((vRefresh > Percentage(low, 99)) && + (vRefresh < Percentage(high, 101))) { + break; + } + } + + /* + * Now see whether we ran out of refresh ranges without + * finding a match + */ + + if (i == pValidSyncs->vertRefreshHz1k.numRanges) { + + char rangeString[NV_MAX_RANGE_STRING_LEN]; + char vRefreshString[NV_MAX_FREQUENCY_STRING_LEN]; + + if (pModeTimings->hdmi3D) { + vRefresh /= 2; + } + + RangesToString(&pValidSyncs->vertRefreshHz1k, + rangeString); + FrequencyToString(vRefresh, vRefreshString); + + /* + * If this mode came from the EDID and the valid + * VertRefresh ranges (which excluded this timing) + * also came from the EDID, then something is odd (see + * bug 336963); print a warning, but continue. + */ + + if ((flags->source == NvKmsModeSourceEdid) && + (pValidSyncs->vertRefreshHz1k.source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_EDID) && + ((overrides & + NVKMS_MODE_VALIDATION_OBEY_EDID_CONTRADICTIONS) == 0)) { + + nvEvoLogInfoString(pInfoString, + "The EDID for %s contradicts itself: mode \"%s\" is specified in the EDID; " + "however, the EDID's valid VertRefresh range (%s Hz) would exclude this mode's VertRefresh (%s Hz%s); " + "ignoring VertRefresh check for mode \"%s\".", + pDpyEvo->name, modeName, + rangeString, vRefreshString, + pModeTimings->hdmi3D ? ", doubled for HDMI 3D" : "", + modeName); + } else { + + LogModeValidationEnd(pDispEvo, pInfoString, + "VertRefresh (%s Hz%s) out of range (%s Hz)", vRefreshString, + pModeTimings->hdmi3D ? ", doubled for HDMI 3D" : "", + rangeString); + return FALSE; + } + } + } + } + + /* + * If 3D Vision Stereo is enabled, and the pDpy requires patched + * stereo modetimings, and these modetimings are not patched, then + * reject the mode, unless the mode validation override "AllowNon3DVModes" + * has been set. + */ + + if ((overrides & NVKMS_MODE_VALIDATION_ALLOW_NON_3DVISION_MODES) == 0) { + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + !flags->patchedStereoTimings) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Mode not compatible with 3D Vision Stereo"); + return FALSE; + } + } + + /* + * If HDMI 3D is enabled and supported, reject non-HDMI 3D modes unless the + * mode validation override "AllowNonHDMI3DModes" has been set. + */ + if (((overrides & NVKMS_MODE_VALIDATION_ALLOW_NON_HDMI3D_MODES) == 0) && + (pParams->stereoMode == NVKMS_STEREO_HDMI_3D) && + nvDpyEvoSupportsHdmi3D(pDpyEvo) && + !pModeTimings->hdmi3D) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Mode not compatible with HDMI 3D"); + return FALSE; + } + + if (pModeTimings->hdmi3D && pModeTimings->interlaced) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced frame packed HDMI 3D modes are not supported."); + return FALSE; + } + + if (pModeTimings->interlaced && + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) && + (overrides & NVKMS_MODE_VALIDATION_ALLOW_DP_INTERLACED) == 0) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced modes are not supported over DisplayPort"); + return FALSE; + } + + if (pModeTimings->interlaced && + (overrides & NVKMS_MODE_VALIDATION_NO_INTERLACED_MODES)) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced modes are not allowed"); + return FALSE; + } + + if (pModeTimings->interlaced && + pParams->stereoMode != NVKMS_STEREO_DISABLED) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced modes are not allowed with stereo"); + return FALSE; + } + + return TRUE; +} + +/* + * Log to the InfoString with information about this + * particular ViewPort. + */ + +static void LogViewPort(NVEvoInfoStringPtr pInfoString, + const NVHwModeTimingsEvo *pTimings) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + const struct NvKmsRect viewPortOut = nvEvoViewPortOutClientView(pTimings); + + /* print the viewport name, size, and taps */ + + nvEvoLogInfoString(pInfoString, + "Viewport %dx%d+%d+%d", + viewPortOut.width, + viewPortOut.height, + viewPortOut.x, + viewPortOut.y); + + nvEvoLogInfoString(pInfoString, + " Horizontal Taps %d", + NVEvoScalerTapsToNum(pViewPort->hTaps)); + + nvEvoLogInfoString(pInfoString, + " Vertical Taps %d", + NVEvoScalerTapsToNum(pViewPort->hTaps)); +} + +/* + * Validate pModeTimings for use on pDpy. If the mode is valid, use + * pDev->disp.ConstructHwModeTimings() to assign pHwModeTimings and + * return TRUE. + */ +static NvBool ValidateMode(NVDpyEvoPtr pDpyEvo, + const struct NvKmsMode *pKmsMode, + const EvoValidateModeFlags *flags, + const struct NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString, + struct NvKmsModeValidationValidSyncs *pValidSyncs, + struct NvKmsUsageBounds *pModeUsage) +{ + const char *modeName = pKmsMode->name; + const NvModeTimings *pModeTimings = &pKmsMode->timings; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + char localModeName[NV_MAX_MODE_NAME_LEN]; + + NVHwModeTimingsEvo *pTimingsEvo = + nvPreallocGet(pDevEvo, + PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS, + sizeof(*pTimingsEvo)); + + NvBool ret = FALSE; + + if (modeName[0] == '\0') { + nvBuildModeName(pModeTimings->hVisible, pModeTimings->vVisible, + localModeName, sizeof(localModeName)); + modeName = localModeName; + } + + /* Initialize the EVO hwModeTimings structure */ + + nvkms_memset(pTimingsEvo, 0, sizeof(*pTimingsEvo)); + + /* begin logging of ModeValidation for this mode */ + + LogModeValidationBegin(pInfoString, pModeTimings, modeName); + + if (!ValidateModeTimings(pDpyEvo, pKmsMode, flags, pParams, + pInfoString, pValidSyncs)) { + goto done; + } + + /* + * we made it past the rest of mode validation; now construct the + * hw modetimings to use for this mode; we do this here so that we + * can report any failures as part of the mode validation + * reporting. + * + * XXX For certain modes like doublescan, interlaced, and YUV 4:2:0 + * emulated mode, the timings stored in the pTimingsEvo constructed + * here are different than the timings in pModeTimings used for validation + * earlier in this function. + * + * In certain cases (like pclk validation for YUV 4:2:0 modes, which store + * a doubled pclk in pModeTimings and the real pclk in pTimingsEvo) we + * want to use the pTimingsEvo value for validation in this function. + * It may make sense to restructure this function so pTimingsEvo + * construction happens earlier, then the pTimingsEvo values are used + * for the remaining validation. + */ + + if (!nvConstructHwModeTimingsEvo(pDpyEvo, + pKmsMode, + NULL, /* pViewPortSizeIn */ + NULL, /* pViewPortOut */ + pTimingsEvo, + pParams, + pInfoString)) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Unable to construct hardware-specific mode " + "timings"); + goto done; + } + + if (!nvDPValidateModeEvo(pDpyEvo, pTimingsEvo, pParams)) { + LogModeValidationEnd(pDispEvo, + pInfoString, "DP Bandwidth check failed"); + goto done; + } + + /* + * Check ViewPortIn dimensions and ensure valid h/vTaps can be assigned. + */ + if (!nvValidateHwModeTimingsViewPort(pDevEvo, + /* XXX assume the gpus have equal capabilities */ + &pDevEvo->gpus[0].capabilities.head[0].scalerCaps, + pTimingsEvo, pInfoString)) { + goto done; + } + + + /* Run the raster timings through IMP checking. */ + + if (!nvConstructHwModeTimingsImpCheckEvo(pDpyEvo->pConnectorEvo, + pTimingsEvo, pParams, pInfoString, + 0 /* head */)) { + LogModeValidationEnd(pDispEvo, pInfoString, + "GPU extended capability check failed"); + goto done; + } + + /* Log modevalidation information about the viewport. */ + + LogViewPort(pInfoString, pTimingsEvo); + + /* Copy out the usage bounds that passed validation */ + + nvkms_memcpy(pModeUsage, &pTimingsEvo->viewPort.possibleUsage, sizeof(*pModeUsage)); + + /* Whew, if we got this far, the mode is valid. */ + + LogModeValidationEnd(pDispEvo, pInfoString, NULL); + + ret = TRUE; + +done: + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS); + + return ret; +} + + +/*! + * Return whether the given NVT_TIMING and NvModeTimings match. + */ +static NvBool NVT_TIMINGmatchesNvModeTimings +( + const NVT_TIMING *pTiming, + const NvModeTimings *pModeTimings, + const struct NvKmsModeValidationParams *pParams +) +{ + NvModeTimings tmpModeTimings; + + NVT_TIMINGtoNvModeTimings(pTiming, &tmpModeTimings); + + return NvModeTimingsMatch(&tmpModeTimings, pModeTimings, + TRUE /* ignoreSizeMM */, + ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK) != 0x0) + /* ignoreRRx1k */); +} + + +/*! + * Find the NVT_TIMING from the dpy's EDID that matches the pModeTimings. + */ +const NVT_TIMING *nvFindEdidNVT_TIMING +( + const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + const struct NvKmsModeValidationParams *pParams +) +{ + NvModeTimings tmpModeTimings; + int i; + + if (!pDpyEvo->parsedEdid.valid) { + return NULL; + } + + tmpModeTimings = *pModeTimings; + + /* + * Revert any modeTimings modifications that were done for hdmi3D + * in ValidateModeIndexEdid(), so that the modeTimings can be + * compared with the NVT_TIMINGs in the parsed EDID. + */ + if (tmpModeTimings.hdmi3D) { + UpdateNvModeTimingsForHdmi3D(&tmpModeTimings, FALSE); + } + + /* + * The NVT_TIMINGs we compare against below won't have hdmi3D or + * yuv420 set; clear those flags in tmpModeTimings so that we can + * do a more meaningful comparison. + */ + tmpModeTimings.hdmi3D = FALSE; + tmpModeTimings.yuv420Mode = NV_YUV420_MODE_NONE; + + for (i = 0; i < pDpyEvo->parsedEdid.info.total_timings; i++) { + const NVT_TIMING *pTiming = &pDpyEvo->parsedEdid.info.timing[i]; + if (NVT_TIMINGmatchesNvModeTimings(pTiming, &tmpModeTimings, pParams) && + /* + * Only consider the mode a match if the yuv420 + * configuration of pTiming would match pModeTimings. + */ + (pModeTimings->yuv420Mode == + GetYUV420Value(pDpyEvo, pParams, pTiming))) { + return pTiming; + } + } + + return NULL; +} + +/*! + * Construct mode-timing's meta data required for mode validation + * logic. This meta data involves EvoValidateModeFlags, patched stereo + * vision timings, etc. + * + * \param[in] pDpyEvo The dpy for whom the mode is considered. + * \param[in] pParams The NvKmsModeValidationParams. + * \param[in/out] pKmsMode The NVKMS mode to be considered. + * \param[out] pFlags The EvoValidateModeFlags + * \param[out] pInfoFrameCtrl InfoFrame control + * + * \return Return TRUE on success with patched mode timings, + * EvoValidateModeFlags and infoFrame controls etc.; otherwise + * returns FALSE. + */ +static NvBool ConstructModeTimingsMetaData( + NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsMode *pKmsMode, + EvoValidateModeFlags *pFlags, + NVDispHeadInfoFrameStateEvoRec *pInfoFrameState) +{ + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + EvoValidateModeFlags flags = { 0 }; + NVT_VIDEO_INFOFRAME_CTRL *pVideoInfoFrameCtrl = NULL; + NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL *pVendorInfoFrameCtrl = NULL; + NvModeTimings modeTimings = pKmsMode->timings; + const NVT_TIMING *pTiming; + + if (pInfoFrameState != NULL) { + pVideoInfoFrameCtrl = &pInfoFrameState->videoCtrl; + pVendorInfoFrameCtrl = &pInfoFrameState->vendorCtrl; + + nvkms_memset(pVideoInfoFrameCtrl, NVT_INFOFRAME_CTRL_DONTCARE, + sizeof(*pVideoInfoFrameCtrl)); + + nvkms_memset(pVendorInfoFrameCtrl, NVT_INFOFRAME_CTRL_DONTCARE, + sizeof(*pVendorInfoFrameCtrl)); + } + + flags.source = NvKmsModeSourceUnknown; + + /* Is this an EDID mode? */ + pTiming = nvFindEdidNVT_TIMING(pDpyEvo, &modeTimings, pParams); + + if (pTiming != NULL) { + NVT_TIMING timing = *pTiming; + const NvBool is3DVisionStereo = + nvIs3DVisionStereoEvo(pParams->stereoMode); + + flags.source = NvKmsModeSourceEdid; + + /* Patch the mode for 3DVision. */ + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + nvPatch3DVisionModeTimingsEvo(&timing, pDpyEvo, + &dummyInfoString)) { + flags.patchedStereoTimings = TRUE; + + /* + * Replace the client's modeTimings with the version + * patched for 3DVision stereo. + */ + NVT_TIMINGtoNvModeTimings(&timing, &modeTimings); + + /* Restore the yuv420 and hdmi3D flags from the client's mode. */ + modeTimings.yuv420Mode = pKmsMode->timings.yuv420Mode; + modeTimings.hdmi3D = pKmsMode->timings.hdmi3D; + + /* Re-apply adjustments for hdmi3D. */ + if (modeTimings.hdmi3D) { + UpdateNvModeTimingsForHdmi3D(&modeTimings, TRUE); + } + + } + + /* Validate yuv420. */ + if (modeTimings.yuv420Mode != + GetYUV420Value(pDpyEvo, pParams, &timing)) { + return FALSE; + } + + /* Validate hdmi3D. */ + if (modeTimings.hdmi3D != GetHdmi3DValue(pDpyEvo, pParams, &timing)) { + return FALSE; + } + + if (pParams->stereoMode == NVKMS_STEREO_HDMI_3D) { + if (!nvDpyEvoSupportsHdmi3D(pDpyEvo)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "HDMI 3D mode is selected, but " + "HDMI 3D is not supported by %s; HDMI 3D may not function " + "properly. This might happen if no EDID is available for " + "%s, if the display is not connected over HDMI, or if the " + "display does not support HDMI 3D.", pDpyEvo->name, + pDpyEvo->name); + } else if (!modeTimings.hdmi3D) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "HDMI 3D mode is selected, but the " + "currently selected mode is incompatible with HDMI 3D. " + "HDMI 3D will be disabled."); + } + } + + /* + * Compute the infoFrame control; this will be assigned to + * pTimingsEvo after ValidateMode has written to it. + */ + if (nvDpyIsHdmiEvo(pDpyEvo)) { + NvTiming_ConstructVideoInfoframeCtrl(&timing, pVideoInfoFrameCtrl); + + if (pVendorInfoFrameCtrl != NULL) { + // Currently hardcoded to send infoframe necessary for HDMI 1.4a 4kx2k extended modes. + if (NVT_GET_TIMING_STATUS_TYPE(timing.etc.status) == NVT_TYPE_HDMI_EXT) { + pVendorInfoFrameCtrl->Enable = 1; + pVendorInfoFrameCtrl->HDMIFormat = NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_EXT; + pVendorInfoFrameCtrl->HDMI_VIC = NVT_GET_TIMING_STATUS_SEQ(timing.etc.status); + pVendorInfoFrameCtrl->ThreeDStruc = NVT_HDMI_VS_BYTE5_HDMI_3DS_NA; + pVendorInfoFrameCtrl->ThreeDDetail = NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_NA; + pVendorInfoFrameCtrl->MetadataPresent = 0; + pVendorInfoFrameCtrl->MetadataType = NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_NA; + } else { + pVendorInfoFrameCtrl->Enable = 0; + } + } + } + + goto done; + } + + /* Otherwise, is this a VESA mode? */ + + if (IsVesaMode(&modeTimings, pParams)) { + flags.source = NvKmsModeSourceVesa; + goto done; + } + + /* + * Otherwise, this must be a user-specified mode; no metadata changes + * are needed. + */ + +done: + *pFlags = flags; + pKmsMode->timings = modeTimings; + + return TRUE; +} + +/*! + * Validate the NvKmsMode. + * + * \param[in] pDpyEvo The dpy for whom the mode is considered. + * \param[in] pParams The NvKmsModeValidationParams. + * \param[in] pKmsMode The mode to be considered. + * \param[out] pTimingsEvo The EVO mode timings to be programmed in hardware. + * + * \return If the mode is valid, return TRUE and populate pTimingsEvo. + * If the mode is not valid, return FALSE. + */ +NvBool nvValidateModeForModeset(NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const struct NvKmsMode *pKmsMode, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvo *pTimingsEvo, + NVDispHeadInfoFrameStateEvoRec *pInfoFrameState) +{ + EvoValidateModeFlags flags; + struct NvKmsMode kmsMode = *pKmsMode; + struct NvKmsModeValidationValidSyncs dummyValidSyncs; + + nvkms_memset(pTimingsEvo, 0, sizeof(*pTimingsEvo)); + + if (!ConstructModeTimingsMetaData(pDpyEvo, + pParams, + &kmsMode, + &flags, + pInfoFrameState)) { + return FALSE; + } + + if (!ValidateModeTimings(pDpyEvo, + pKmsMode, + &flags, + pParams, + &dummyInfoString, + &dummyValidSyncs)) { + return FALSE; + } + + if (!nvConstructHwModeTimingsEvo(pDpyEvo, + &kmsMode, + pViewPortSizeIn, + pViewPortOut, + pTimingsEvo, + pParams, + &dummyInfoString)) { + return FALSE; + } + + return TRUE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modeset.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modeset.c new file mode 100644 index 0000000..6a1ce23 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modeset.c @@ -0,0 +1,2864 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * The EVO modeset sequence is structured to minimize changes to the + * hardware from one modeset to the next, and to minimize the number + * of UPDATE methods that are programmed. + * + * Software state is tracked in three different structures: + * + * (1) NVDispEvo::headState - This is the NVKMS record of what has + * been programmed in the hardware, for all heads on the disp. + * + * (2) NvKmsSetModeRequest - This is the NVKMS client's description of + * what changes are requested. Note that clients can just request to + * change specific heads on specific disps. Other heads/disps should + * retain their current configuration across the modeset. + * + * (3) NVProposedModeSetHwState - This describes the hardware state + * that is desired at the end of the modeset. It is assigned by + * considering the current state (NVDispEvo::headState) and applying + * any client-requested changes (NvKmsSetModeRequest). + * + * The intended flow is: + * + * - Assign NVProposedModeSetHwState, given NVDispEvo::headState and + * NvKmsSetModeRequest, noting which heads are changing. + * - Check whether the proposed state is valid, and fail the modeset + * if anything about the proposed configuration is invalid. + * + * NOTE: Nothing before this point in the sequence should alter NVKMS + * software state, or program hardware. Also, to the extent + * possible, we should avoid failing the modeset after this point in + * the sequence, because this is when we start altering software and + * hardware state. + * + * - Notify RM that the modeset is starting. + * - Reset the EVO locking state machine. + * - For each disp: + * - For each head: + * - Shut down newly unused heads + * - For each head: + * - Apply the requested changes. + * - Send evo UPDATE method + * - For each head: + * - Perform post-UPDATE work + * - Update the EVO locking state machine. + * - Notify RM that the modeset is complete. + * - Populate the reply structure returned to the NVKMS client. + * + * + * TODO: + * - Would it be worthwhile to centralize SOR (re)assignment, disp-wide, + * in ApplyProposedModeSetHwStateOneDisp() between the calls to + * ApplyProposedModeSetHwStateOneHeadShutDown() and + * ApplyProposedModeSetHwStateOneHeadPreUpdate()? + */ + +#include "nvkms-evo.h" +#include "nvkms-types.h" +#include "nvkms-dpy.h" +#include "nvkms-rm.h" +#include "nvkms-hdmi.h" +#include "nvkms-flip.h" +#include "nvkms-3dvision.h" +#include "nvkms-modepool.h" +#include "nvkms-prealloc.h" +#include "nvkms-private.h" +#include "nvkms-vrr.h" +#include "nvkms-lut.h" + +#include "dp/nvdp-connector.h" + +#include "nvkms-api.h" + +#include "nvkms-modeset.h" +#include "nvkms-modeset-types.h" +#include "nvkms-modeset-workarea.h" +#include "nvkms-attributes.h" + +/*! + * Get an allowFlipLockGroup value that is not yet used by pProposed. + * + * Scan through pProposed to find all currently used + * allowFlipLockGroup values, then pick the first allowFlipLockGroup + * value that is not used. + */ +static NvU8 GetAvailableAllowFlipLockGroupValue( + const NVProposedModeSetHwState *pProposed) +{ + NvU32 dispIndex; + NvU64 allowFlipLockGroupMask = 0; + NvU8 allowFlipLockGroup; + + /* + * Find all of the used allowFlipLockGroup values. Note that + * allowFlipLockGroup is 1-based (so that 0 can represent no + * assigned allowFlipLockGroup). Shift to 0-based, to store in + * allowFlipLockGroupMask. + */ + for (dispIndex = 0; dispIndex < ARRAY_LEN(pProposed->disp); dispIndex++) { + NvU32 head; + for (head = 0; + head < ARRAY_LEN(pProposed->disp[dispIndex].head); + head++) { + + const NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[dispIndex].head[head]; + + if (pProposedHead->allowFlipLockGroup != 0) { + nvAssert(pProposedHead->allowFlipLockGroup <= 64); + allowFlipLockGroupMask |= + NVBIT64(pProposedHead->allowFlipLockGroup - 1); + } + } + } + + /* Find the first available allowFlipLockGroup values. */ + allowFlipLockGroupMask = ~allowFlipLockGroupMask; + if (allowFlipLockGroupMask == 0) { + /* + * For this to be zero, the pProposed would need to already + * have 64 unique allowFlipLockGroup values; 64 unique + * flipLock groups is highly unlikely. + */ + nvAssert(!"allowFlipLockGroupMask is too small"); + return 0; + } + + allowFlipLockGroup = BIT_IDX_64(LOWESTBIT(allowFlipLockGroupMask)); + + /* Shift allowFlipLockGroup back to 1-based. */ + + return allowFlipLockGroup + 1; +} + + +/*! + * Get the NVHwModeTimingsEvo for the mode requested by the client. + * + * NvKmsSetModeOneHeadRequest::mode specifies mode timings in a + * hardware-neutral format, along with mode validation parameters and + * the dpyIdList on which to set the mode. Validate the requested + * mode and compute NVHwModeTimingsEvo for it. + * + * \param[in] pDispEvo The disp of the dpyIdList and head. + * \param[in] pRequestHead The mode, mode validation parameters, dpyIdList, + * and head requested by the client. + * \param[out] pTimings The mode timings to program in the hardware. + * + * \return Return TRUE if the requested mode is valid and pTimings + * could be assigned. Otherwise, return FALSE. + */ +static NvBool +GetHwModeTimings(const NVDispEvoRec *pDispEvo, + const struct NvKmsSetModeOneHeadRequest *pRequestHead, + NVHwModeTimingsEvo *pTimings, + NVDispHeadInfoFrameStateEvoRec *pInfoFrameState) +{ + NVDpyEvoPtr pDpyEvo; + + if (nvDpyIdListIsEmpty(pRequestHead->dpyIdList)) { + return TRUE; + } + + pDpyEvo = nvGetOneArbitraryDpyEvo(pRequestHead->dpyIdList, pDispEvo); + + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvValidateModeForModeset(pDpyEvo, + &pRequestHead->modeValidationParams, + &pRequestHead->mode, + &pRequestHead->viewPortSizeIn, + pRequestHead->viewPortOutSpecified ? + &pRequestHead->viewPortOut : NULL, + pTimings, + pInfoFrameState); +} + +static NvBool ApplySyncptRegistration( + NVDevEvoRec *pDevEvo, + NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState) +{ + NvU32 layer; + + if (!pDevEvo->supportsSyncpts) { + return TRUE; + } + + /*! + * Modeset path should not request pre-syncpt as it will + * not progress because this will update all of the Core and + * Window method state together, and wait for the Core + * completion notifier to signal. If any of the Window + * channels is waiting for a semaphore acquire, then this + * will stall the Core notifier as well since the Core and + * Window channels are interlocked. + */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pParams->layer[layer].syncObjects.specified && + pParams->layer[layer].syncObjects.val.useSyncpt && + pParams->layer[layer].syncObjects.val.u.syncpts.pre.type != + NVKMS_SYNCPT_TYPE_NONE) { + nvAssert(!"Failing as pre-syncpt requested in modeset!"); + return FALSE; + } + } + + return nvHandleSyncptRegistration(pDevEvo, + head, + pParams, + pFlipState); +} + +static NvBool +GetColorSpaceAndColorRange( + const NVDispEvoPtr pDispEvo, + const struct NvKmsSetModeOneHeadRequest *pRequestHead, + NVProposedModeSetHwStateOneHead *pProposedHead) +{ + enum NvKmsDpyAttributeColorRangeValue requestedColorRange; + enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace; + NVDpyEvoRec *pOneArbitraryDpyEvo = + nvGetOneArbitraryDpyEvo(pRequestHead->dpyIdList, pDispEvo); + + if (pRequestHead->colorSpaceSpecified) { + const NVDpyEvoRec *pDpyEvo; + + /* + * There could be multiple DPYs driven by this head. For each DPY, + * validate that the requested colorspace and color range is valid. + */ + FOR_ALL_EVO_DPYS(pDpyEvo, pRequestHead->dpyIdList, pDispEvo) { + if (!nvDpyValidateColorSpace(pDpyEvo, pRequestHead->colorSpace)) { + return FALSE; + } + } + + requestedColorSpace = pRequestHead->colorSpace; + } else { + requestedColorSpace = pOneArbitraryDpyEvo->requestedColorSpace; + } + + if (pRequestHead->colorRangeSpecified) { + requestedColorRange = pRequestHead->colorRange; + } else { + requestedColorRange = pOneArbitraryDpyEvo->requestedColorRange; + } + + /* + * Choose current colorSpace and colorRange based on the current mode + * timings and the requested color space and range. + */ + nvChooseCurrentColorSpaceAndRangeEvo(pProposedHead->timings.pixelDepth, + pProposedHead->timings.yuv420Mode, + requestedColorSpace, + requestedColorRange, + &pProposedHead->attributes.colorSpace, + &pProposedHead->attributes.colorRange); + /* + * When colorspace is specified in modeset request, it should + * match the proposed colorspace. + */ + if (pRequestHead->colorSpaceSpecified) { + NvBool ret = FALSE; + switch (pProposedHead->attributes.colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + ret = (pRequestHead->colorSpace == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB); + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + ret = (pRequestHead->colorSpace == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422); + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + ret = (pRequestHead->colorSpace == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444); + break; + default: + break; + } + if (!ret) { + return ret; + } + } + + /* + * When color range is specified in modeset request, it should + * match the proposed color range. + */ + if (pRequestHead->colorRangeSpecified && + (pProposedHead->attributes.colorRange != pRequestHead->colorRange)) { + return FALSE; + } + + return TRUE; +} + +/*! + * Assign the NVProposedModeSetHwState structure. + * + * Use the current hardware state, and the requested changes in + * pRequest, to determine what the desired resulting hardware + * configuration for the device should be. + * + * \param[in] pDevEvo The device whose hardware state is to be changed. + * \param[in] pOpenDev The pOpenDev of the client doing the modeset. + * \param[in] pRequest The requested changes to apply to the hardware state. + * \param[out] pReply The reply structure for the client; if we cannot + * apply some portion of pRequest, set the + * corresponding status field in pReply to a + * non-SUCCESS value. + * \param[out] pProposed The proposed resulting hardware state. + * + * \return If the requested changes could be applied to pProposed, + * return TRUE. If the requested changes could not be applied + * to pProposed, set the corresponding status field in pReply + * to a non-SUCCESS value and return FALSE. + */ +static NvBool +AssignProposedModeSetHwState(NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + NVProposedModeSetHwState *pProposed, + NvBool modesetOwnerChanged) +{ + NvU32 sd; + NVDispEvoPtr pDispEvo; + NvBool ret = TRUE; + NvU8 allowFlipLockGroup = 0; + + /* Initialize pProposed with the current hardware configuration. */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + for (head = 0; head < pDevEvo->numHeads; head++) { + + const NVDispHeadStateEvoRec *pHeadState; + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[sd].head[head]; + const NvU32 apiHead = nvHardwareHeadToApiHead(head); + + /* + * Case of invalid hardware head is handled inside + * nvInitFlipEvoHwState(). + */ + nvInitFlipEvoHwState(pDevEvo, sd, head, + &pProposed->sd[sd].head[head].flip); + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + pHeadState = &pDispEvo->headState[head]; + + pProposedHead->timings = pHeadState->timings; + pProposedHead->dpyIdList = + pDispEvo->apiHeadState[apiHead].activeDpys; + pProposedHead->pConnectorEvo = pHeadState->pConnectorEvo; + pProposedHead->activeRmId = pHeadState->activeRmId; + pProposedHead->allowFlipLockGroup = pHeadState->allowFlipLockGroup; + pProposedHead->modeValidationParams = + pHeadState->modeValidationParams; + pProposedHead->attributes = + pDispEvo->apiHeadState[apiHead].attributes; + pProposedHead->changed = FALSE; + pProposedHead->hs10bpcHint = pHeadState->hs10bpcHint; + pProposedHead->audio = pHeadState->audio; + pProposedHead->infoFrame = + pDispEvo->apiHeadState[apiHead].infoFrame; + } + } + + /* Update pProposed with the requested changes from the client. */ + + if (pOpenDev == pDevEvo->modesetOwner || pOpenDev == pDevEvo->pNvKmsOpenDev) { + pProposed->allowHeadSurfaceInNvKms = pRequest->allowHeadSurfaceInNvKms; + } else { + pProposed->allowHeadSurfaceInNvKms = pDevEvo->allowHeadSurfaceInNvKms; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + const struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[sd]; + NvBool shutDownAllHeads = FALSE; + NvU32 head; + + if ((pRequest->requestedDispsBitMask & (1 << sd)) == 0) { + if (modesetOwnerChanged) { + shutDownAllHeads = TRUE; + } else { + continue; + } + } + + NVProposedModeSetHwStateOneDisp *pProposedDisp = + &pProposed->disp[sd]; + + pDispEvo = pDevEvo->pDispEvo[sd]; + + for (head = 0; head < pDevEvo->numHeads; head++) { + + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[head]; + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + NVDpyIdList newDpyIdList; + NvBool clearAndContinue = FALSE; + + if ((pRequestDisp->requestedHeadsBitMask & (1 << head)) == 0 || + shutDownAllHeads) { + if (modesetOwnerChanged) { + /* + * If the modeset owner is changing, implicitly shut down + * other heads not included in requestedHeadsBitMask. + */ + newDpyIdList = nvEmptyDpyIdList(); + } else { + /* + * Otherwise, just leave the head alone so it keeps its + * current configuration. + */ + continue; + } + } else { + newDpyIdList = pRequestHead->dpyIdList; + } + + /* + * If newDpyIdList is empty or do not find the valid dpy in + * newDpyIdList, then the head should be disabled. + * Clear the pProposedHead, so that no state leaks to the new + * configuration. + */ + if (nvDpyIdListIsEmpty(newDpyIdList)) { + clearAndContinue = TRUE; + } else { + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(newDpyIdList, pDispEvo); + if (pDpyEvo != NULL) { + pProposedHead->pConnectorEvo = pDpyEvo->pConnectorEvo; + pProposedHead->changed = TRUE; + } else { + clearAndContinue = TRUE; + } + } + + + if (clearAndContinue) { + nvkms_memset(pProposedHead, 0, sizeof(*pProposedHead)); + pProposedHead->changed = TRUE; + continue; + } + + pProposedHead->dpyIdList = newDpyIdList; + pProposedHead->activeRmId = + nvRmAllocDisplayId(pDispEvo, pProposedHead->dpyIdList); + if (pProposedHead->activeRmId == 0x0) { + /* XXX Need separate error code? */ + pReply->disp[sd].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_DPY; + ret = FALSE; + continue; + } + + /* Verify that the requested dpys are valid on this head. */ + if ((pProposedHead->pConnectorEvo->validHeadMask & NVBIT(head)) == 0) { + pReply->disp[sd].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_DPY; + ret = FALSE; + continue; + } + + /* + * Get the requested modetimings for this head. If that + * fails, record in the reply that getting the mode + * failed. In the case of failure, continue to the next + * head so that if additional heads fail, we can report + * more complete failure information to the client. + */ + if (!GetHwModeTimings(pDispEvo, pRequestHead, + &pProposedHead->timings, &pProposedHead->infoFrame)) { + pReply->disp[sd].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE; + ret = FALSE; + continue; + } + pProposedHead->infoFrame.hdTimings = + nvEvoIsHDQualityVideoTimings(&pProposedHead->timings); + + pProposedHead->allowFlipLockGroup = 0; + pProposedHead->modeValidationParams = + pRequestHead->modeValidationParams; + pProposedHead->allowGsync = pRequestHead->allowGsync; + pProposedHead->allowAdaptiveSync = pRequestHead->allowAdaptiveSync; + pProposedHead->vrrOverrideMinRefreshRate = + pRequestHead->vrrOverrideMinRefreshRate; + + if (!GetColorSpaceAndColorRange(pDispEvo, pRequestHead, pProposedHead)) { + pReply->disp[sd].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE; + ret = FALSE; + continue; + } + + pProposedHead->attributes.digitalSignal = + nvGetDefaultDpyAttributeDigitalSignalValue(pProposedHead->pConnectorEvo); + if (pProposedHead->timings.hdmiFrlConfig.frlRate != + HDMI_FRL_DATA_RATE_NONE) { + nvAssert(pProposedHead->attributes.digitalSignal == + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_TMDS); + pProposedHead->attributes.digitalSignal = + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_HDMI_FRL; + } + + { + NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedHead->dpyIdList, + pDispEvo); + + pProposedHead->attributes.dvc = + pDpyEvo->currentAttributes.dvc; + + /* Image sharpening is available when scaling is enabled. */ + pProposedHead->attributes.imageSharpening.available = + nvIsImageSharpeningAvailable(&pProposedHead->timings.viewPort); + pProposedHead->attributes.imageSharpening.value = + pDpyEvo->currentAttributes.imageSharpening.value; + } + + /* + * modesetOwnerChanged implies that there was a modeset + * ownership change since the last modeset. If input/output lut not + * specified by the new modeset owner then keep them disabled by + * default. + */ + if (modesetOwnerChanged) { + pProposedHead->lut = pRequestHead->lut; + + if (!pRequestHead->lut.input.specified) { + pProposedHead->lut.input.specified = TRUE; + pProposedHead->lut.input.end = 0; + } + + if (!pRequestHead->lut.output.specified) { + pProposedHead->lut.output.specified = TRUE; + pProposedHead->lut.output.enabled = FALSE; + } + } else if (pRequestHead->lut.input.specified) { + pProposedHead->lut = pRequestHead->lut; + } else { + pProposedHead->lut.input.specified = FALSE; + } + + NVFlipEvoHwState *pFlip = + &pProposed->sd[sd].head[head].flip; + + /* + * Clear the flipStates of all layers: + * + * The current flipState of main layer may still contain + * old surfaces (e.g., headSurface) that are no longer + * desirable or compatible with the new modeset + * configuration. + * + * Function ApplyProposedModeSetHwStateOneHeadShutDown() clears + * pSdHeadState and disables all layers. It is not possible to + * re-apply the existing flipstates because hardware releases + * sempahores when layers get disabled; this results in a stuck + * channel if you re-apply the existing flipstate which has + * the old semaphore values. + */ + + nvClearFlipEvoHwState(pFlip); + + if (pRequest->commit) { + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + pFlip->dirty.layer[layer] = TRUE; + } + } + + if (!ApplySyncptRegistration( + pDevEvo, + head, + &pRequest->disp[sd].head[head].flip, + pFlip)) { + pReply->disp[sd].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP; + ret = FALSE; + continue; /* next head */ + } + if (!nvUpdateFlipEvoHwState(pOpenDev, + pDevEvo, + sd, + head, + &pRequestHead->flip, + pFlip, + FALSE /* allowVrr */, + &pProposedHead->timings.viewPort.possibleUsage)) { + pReply->disp[sd].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP; + ret = FALSE; + continue; /* next head */ + } + + /* + * If the modeset is flipping to a depth 30 surface, record this as + * a hint to headSurface, so it can also allocate its surfaces at + * depth 30. + */ + { + const NVSurfaceEvoRec *pSurfaceEvo = + pFlip->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + + pProposedHead->hs10bpcHint = + (pSurfaceEvo != NULL) && + (pSurfaceEvo->format == NvKmsSurfaceMemoryFormatA2B10G10R10 || + pSurfaceEvo->format == NvKmsSurfaceMemoryFormatX2B10G10R10); + } + + /* + * EVO3 hal simulates USE_CORE_LUT behavior. + * NVDisplay window channel does allow to change the input LUT + * on immediate flips, therefore force disable tearing + * if LUT is specified. + * + * XXX NVKMS TODO: Implement separate input programming for + * base and overlay layers and remove code block. + */ + if ((pRequestHead->lut.input.specified || + pRequestHead->lut.output.specified) && + !pDevEvo->hal->caps.supportsCoreLut) { + pFlip->layer[NVKMS_MAIN_LAYER].tearing = FALSE; + } + + /* Construct audio state */ + { + NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedHead->dpyIdList, + pDispEvo); + + nvHdmiDpConstructHeadAudioState(pProposedHead->activeRmId, + pDpyEvo, &pProposedHead->audio); + } + } /* head */ + } /* pDispEvo */ + + /* Assign allowFlipLockGroup for the heads specified in the request. */ + + allowFlipLockGroup = GetAvailableAllowFlipLockGroupValue(pProposed); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + NvU32 head; + + if ((pRequest->requestedDispsBitMask & NVBIT(sd)) == 0) { + continue; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequest->disp[sd].head[head]; + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[sd].head[head]; + + if ((pRequest->disp[sd].requestedHeadsBitMask & + NVBIT(head)) == 0) { + continue; + } + + if (pRequestHead->allowFlipLock) { + pProposedHead->allowFlipLockGroup = allowFlipLockGroup; + } + } + } + + return ret; +} + + +/*! + * Validate the proposed configuration on the specified disp using IMP. + * + * \param[in] pDispEvo The disp to which pProposedDisp is to be applied. + * \param[in] pProposed The requested configuration. + * \param[in] pProposedDisp The requested configuration for this disp. + * \param[out] pWorkArea The scratch space for the current modeset request. + * + * \return If pProposedDisp passes IMP, return TRUE. Otherwise, + * return FALSE. + */ +static NvBool +ValidateProposedModeSetHwStateOneDispImp(NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState + *pProposed, + NVProposedModeSetHwStateOneDisp + *pProposedDisp, + NVModeSetWorkArea *pWorkArea) +{ + NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP]; + NvBool skipImpCheck = TRUE, requireBootClocks = FALSE; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head, downgradePossibleHeadsBitMask = 0; + NVEvoReallocateBandwidthMode reallocBandwidth = pDevEvo->isSOCDisplay ? + NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE : + NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE; + + nvkms_memset(&timingsParams, 0, sizeof(timingsParams)); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + + const NvBool skipImpCheckThisHead = + (pProposedHead->modeValidationParams.overrides & + NVKMS_MODE_VALIDATION_NO_EXTENDED_GPU_CAPABILITIES_CHECK) != 0; + + const NvBool requireBootClocksThisHead = + (pProposedHead->modeValidationParams.overrides & + NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS) != 0; + + /* + * Don't try to downgrade heads which are not marked as changed. + * This could lead to unchanged/not-requested heads hogging all + * the disp bandwidth and preventing otherwise possible modesets, + * but it fixes the cases where we could have downgraded unchanged/ + * not-requested heads without NVKMS clients knowing about it. + * Even if we add some mechanism through the modeset reply to notify + * clients about such a change, not all clients might be in a position + * to handle it. This seems to be a fair trade-off for Orin, as by + * default all heads are initialized with minimal usage bounds. + */ + if (pProposedHead->changed) { + downgradePossibleHeadsBitMask |= NVBIT(head); + } + + if (pProposedHead->pConnectorEvo == NULL) { + continue; + } + + timingsParams[head].pConnectorEvo = pProposedHead->pConnectorEvo; + timingsParams[head].activeRmId = pProposedHead->activeRmId; + timingsParams[head].pTimings = &pProposedHead->timings; + timingsParams[head].pUsage = + &pProposedHead->timings.viewPort.guaranteedUsage; + + skipImpCheck = skipImpCheck && skipImpCheckThisHead; + requireBootClocks = requireBootClocks || requireBootClocksThisHead; + } + + if (skipImpCheck && + reallocBandwidth == NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE) { + return TRUE; + } + + if (!nvValidateImpOneDispDowngrade(pDispEvo, timingsParams, + requireBootClocks, + reallocBandwidth, + downgradePossibleHeadsBitMask)) { + return FALSE; + } + + if (pDevEvo->isSOCDisplay) { + NvBool ret; + struct NvKmsUsageBounds *guaranteedAndProposed = + nvCalloc(1, sizeof(*guaranteedAndProposed) * + NVKMS_MAX_HEADS_PER_DISP); + if (guaranteedAndProposed == NULL) { + return FALSE; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + const struct NvKmsUsageBounds *pProposedUsage; + + if (pProposedHead->pConnectorEvo == NULL) { + continue; + } + + if (pProposedHead->changed) { + pProposedUsage = &pProposed->sd[0].head[head].flip.usage; + } else { + pProposedUsage = + &pDevEvo->gpus[0].headState[head].preallocatedUsage; + } + + guaranteedAndProposed[head] = nvUnionUsageBounds( + &pProposedHead->timings.viewPort.guaranteedUsage, + pProposedUsage); + timingsParams[head].pUsage = &guaranteedAndProposed[head]; + } + + ret = nvValidateImpOneDisp(pDispEvo, timingsParams, + requireBootClocks, + reallocBandwidth, + &pWorkArea->postModesetIsoBandwidthKBPS, + &pWorkArea->postModesetDramFloorKBPS); + + nvFree(guaranteedAndProposed); + + if (!ret) { + return FALSE; + } + + nvScheduleLowerDispBandwidthTimer(pDevEvo); + } + + return TRUE; +} + +static NvBool SkipDisplayPortBandwidthCheck( + const NVProposedModeSetHwStateOneHead *pProposedHead) +{ + return (pProposedHead->modeValidationParams.overrides & + NVKMS_MODE_VALIDATION_NO_DISPLAYPORT_BANDWIDTH_CHECK) != 0; +} + +static NvBool DowngradeDpPixelDepth( + NVDispEvoPtr pDispEvo, + NVProposedModeSetHwStateOneDisp *pProposedDisp, + const NVConnectorEvoRec *pConnectorEvo) +{ + NvU32 head; + + /* + * In DP-MST case, many heads can share same connector and dp-bandwidth + * therefore its necessary to validate and downgrade dp-pixel-depth across + * all head which are sharing same connector before retry. + */ + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + NVHwModeTimingsEvoPtr pTimings = &pProposedHead->timings; + + if (SkipDisplayPortBandwidthCheck(pProposedHead)) { + continue; + } + + if ((pProposedHead->pConnectorEvo == pConnectorEvo) && + nvDowngradeHwModeTimingsDpPixelDepthEvo( + pTimings, + pProposedHead->attributes.colorSpace)) { + return TRUE; + } + } + + return FALSE; +} + +/*! + * Validate the DisplayPort bandwidth of the proposed disp configuration. + * + * \param[in] pDispEvo The disp to which pProposedDisp is to be applied. + * \param[in] pProposedDisp The requested configuration. + * + * \return If pProposedDisp passes the DP bandwidth check, return + * TRUE. Otherwise, return FALSE. + */ +static NvBool ValidateProposedModeSetHwStateOneDispDPlib( + NVDispEvoPtr pDispEvo, + NVProposedModeSetHwStateOneDisp *pProposedDisp) +{ + NvU32 head; + NvBool bResult = TRUE, bTryAgain = FALSE; + + +tryAgain: + + bTryAgain = FALSE; + bResult = TRUE; + + nvDPBeginValidation(pDispEvo); + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + NVHwModeTimingsEvoPtr pTimings = &pProposedHead->timings; + + if ((pProposedHead->pConnectorEvo == NULL) || + SkipDisplayPortBandwidthCheck(pProposedHead)) { + continue; + } + + bResult = !!nvDPLibValidateTimings(pDispEvo, + head, + pProposedHead->activeRmId, + pProposedHead->dpyIdList, + pProposedHead->attributes.colorSpace, + &pProposedHead->modeValidationParams, + pTimings); + + if (!bResult) { + if (DowngradeDpPixelDepth(pDispEvo, + pProposedDisp, + pProposedHead->pConnectorEvo)) { + bTryAgain = TRUE; + } + + /* + * Cannot downgrade pixelDepth further -- + * This proposed mode-set is not possible on this DP link, so fail. + */ + + break; + } + } + + bResult = !!nvDPEndValidation(pDispEvo) && bResult; + + if (bTryAgain) { + goto tryAgain; + } + + if (bResult) { + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + + if ((pProposedHead->pConnectorEvo == NULL) || + !nvConnectorUsesDPLib(pProposedHead->pConnectorEvo)) { + pProposedHead->pDpLibModesetState = NULL; + continue; + } + + pProposedHead->pDpLibModesetState = + nvDPLibCreateModesetState(pDispEvo, + head, + pProposedHead->activeRmId, + pProposedHead->dpyIdList, + pProposedHead->attributes.colorSpace, + &pProposedHead->timings); + if (pProposedHead->pDpLibModesetState == NULL) { + return FALSE; + } + } + } + + return bResult; +} + +static void VBlankCallbackDeferredWork(void *dataPtr, NvU32 data32) +{ + NVVBlankCallbackPtr pVBlankCallbackTmp = NULL; + NVVBlankCallbackPtr pVBlankCallback = NULL; + NVDispEvoPtr pDispEvo = dataPtr; + NvU32 head = data32; + + if (!nvHeadIsActive(pDispEvo, head)) { + return; + } + + nvListForEachEntry_safe(pVBlankCallback, + pVBlankCallbackTmp, + &pDispEvo->headState[head].vblankCallbackList, + vblankCallbackListEntry) { + pVBlankCallback->pCallback(pDispEvo, head, pVBlankCallback); + } +} + +static void VBlankCallback(void *pParam1, void *pParam2) +{ + const NvU32 head = (NvU32)(NvUPtr)pParam2; + + (void) nvkms_alloc_timer_with_ref_ptr( + VBlankCallbackDeferredWork, + pParam1, /* ref_ptr to pDispEvo */ + head, /* dataU32 */ + 0); /* timeout: schedule the work immediately */ +} + +/*! + * Validate the proposed configuration on the specified disp. + * + * \param[in] pDispEvo The disp to which pProposedDisp is to be applied. + * \param[in] pProposedDisp The requested configuration. + * \param[out] pReplyDisp The reply structure for the client. + * \param[out] pWorkArea The scratch space for the current modeset request. + * + * \return If pProposedDisp is valid, return TRUE. Otherwise, set the + * appropriate status fields in pReplyDisp to non-SUCCESS, + * and return FALSE. + */ +static NvBool +ValidateProposedModeSetHwStateOneDisp( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState *pProposed, + NVProposedModeSetHwStateOneDisp *pProposedDisp, + struct NvKmsSetModeOneDispReply *pReplyDisp, + NVModeSetWorkArea *pWorkArea) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVDpyIdList dpyIdList; + NvU32 head; + + /* + * Check that the requested configuration of connectors can be + * driven simultaneously. + */ + dpyIdList = nvEmptyDpyIdList(); + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDpyEvoPtr pDpyEvo; + FOR_ALL_EVO_DPYS(pDpyEvo, + pProposedDisp->head[head].dpyIdList, pDispEvo) { + dpyIdList = nvAddDpyIdToDpyIdList(pDpyEvo->pConnectorEvo->displayId, + dpyIdList); + } + } + + if (!nvRmIsPossibleToActivateDpyIdList(pDispEvo, dpyIdList)) { + pReplyDisp->status = NVKMS_SET_MODE_ONE_DISP_STATUS_INCOMPATIBLE_DPYS; + return FALSE; + } + + /* + * Check that no dpyId is used by multiple heads. + */ + dpyIdList = nvEmptyDpyIdList(); + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + /* + * Intersect the proposed dpys for this head with the + * accumulated list of dpys for this disp; if the intersection + * is not empty, a dpy is proposed to be used on multiple + * heads. + */ + NVDpyIdList proposedDpyIdList = + pProposedDisp->head[head].dpyIdList; + NVDpyIdList intersectedDpyIdList = + nvIntersectDpyIdListAndDpyIdList(dpyIdList, proposedDpyIdList); + + if (!nvDpyIdListIsEmpty(intersectedDpyIdList)) { + pReplyDisp->status = NVKMS_SET_MODE_ONE_DISP_STATUS_DUPLICATE_DPYS; + return FALSE; + } + + dpyIdList = nvAddDpyIdListToDpyIdList(dpyIdList, proposedDpyIdList); + } + + /* + * Check that the requested flipping state is valid. + */ + + for (head = 0; head < pDevEvo->numHeads; head++) { + + if (!pProposedDisp->head[head].changed) { + continue; + } + + if (nvDpyIdListIsEmpty(pProposedDisp->head[head].dpyIdList)) { + continue; + } + + if (!nvValidateFlipEvoHwState( + pDevEvo, + head, + &pProposedDisp->head[head].timings, + &pProposed->sd[pDispEvo->displayOwner].head[head].flip)) { + pReplyDisp->head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP; + return FALSE; + } + } + + /* + * Check ViewPortIn dimensions and ensure valid h/vTaps can be assigned. + */ + for (head = 0; head < pDevEvo->numHeads; head++) { + + /* XXX assume the gpus have equal capabilities */ + const NVEvoScalerCaps *pScalerCaps = + &pDevEvo->gpus[0].capabilities.head[head].scalerCaps; + const NVHwModeTimingsEvoPtr pTimings = &pProposedDisp->head[head].timings; + + if (!nvValidateHwModeTimingsViewPort(pDevEvo, pScalerCaps, pTimings, + &dummyInfoString)) { + pReplyDisp->head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE; + return FALSE; + } + } + + /* + * Check that the configuration fits DisplayPort bandwidth constraints. + */ + if (!ValidateProposedModeSetHwStateOneDispDPlib(pDispEvo, pProposedDisp)) { + pReplyDisp->status = + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_DISPLAY_PORT_BANDWIDTH_CHECK; + return FALSE; + } + + /* + * The pixelDepth value, which required to choose the dithering + * configuration, gets finalized as part of the DisplayPort bandwidth + * validation. + */ + for (head = 0; head < pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedHead->dpyIdList, + pDispEvo); + + if (!pProposedHead->changed || (pDpyEvo == NULL)) { + continue; + } + + nvChooseDitheringEvo(pDpyEvo->pConnectorEvo, + pProposedHead->timings.pixelDepth, + &pDpyEvo->requestedDithering, + &pProposedHead->attributes.dithering); + } + + /* + * Check that the configuration passes IMP. + */ + if (!ValidateProposedModeSetHwStateOneDispImp(pDispEvo, pProposed, + pProposedDisp, pWorkArea)) { + pReplyDisp->status = + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_EXTENDED_GPU_CAPABILITIES_CHECK; + return FALSE; + } + + return TRUE; +} + +/*! + * Validate the proposed configuration. + * + * \param[in] pDevEvo The device to which pProposed is to be applied. + * \param[in] pProposed The requested configuration. + * \param[out] pReply The reply structure for the client. + * \param[out] pWorkArea The scratch space for the current modeset request. + * + * \return If pProposed is valid, return TRUE. Otherwise, set the + * appropriate status fields in pReply to non-SUCCESS, + * and return FALSE. + */ +static NvBool +ValidateProposedModeSetHwState(NVDevEvoPtr pDevEvo, + NVProposedModeSetHwState *pProposed, + struct NvKmsSetModeReply *pReply, + NVModeSetWorkArea *pWorkArea) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvBool ret = FALSE; + NVProposedModeSetHwState *pActual = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE, + sizeof(*pActual)); + + /* + * Copy the proposed modeset to a scratch area. During the process below, + * we may modify some parts of the timings. If all of validation succeeds, + * then we'll copy the modified version back out; if not, we don't want to + * touch the input. + */ + nvkms_memcpy(pActual, pProposed, sizeof(*pProposed)); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + NVProposedModeSetHwStateOneDisp *pProposedDisp = + &pActual->disp[dispIndex]; + struct NvKmsSetModeOneDispReply *pReplyDisp; + + pReplyDisp = &pReply->disp[dispIndex]; + + if (!ValidateProposedModeSetHwStateOneDisp(pDispEvo, + pActual, + pProposedDisp, + pReplyDisp, + pWorkArea)) { + goto done; + } + } + + nvkms_memcpy(pProposed, pActual, sizeof(*pProposed)); + ret = TRUE; + +done: + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE); + + return ret; +} + +/*! + * Ensure there is an SOR assigned for this pConnectorEvo, for use by + * the pending modeset. + * + * In DP-MST, multiple heads may use the same pConnectorEvo, and they + * should use the same SOR. + * + * When we call nvAssignSOREvo(), we have to tell RM which SORs have + * already been assigned and need to be excluded from consideration for + * the new SOR assignment request. + */ +static void AssignSor(NVModeSetWorkArea *pWorkArea, + NVConnectorEvoPtr pConnectorEvo) +{ + const NvU32 sd = pConnectorEvo->pDispEvo->displayOwner; + + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + // Nothing to do! + return; + } + + /* If an OR has already been assigned for this connector, we are done. */ + if (nvDpyIdIsInDpyIdList( + pConnectorEvo->displayId, + pWorkArea->sd[sd].sorAssignedConnectorsList)) { + nvAssert(pConnectorEvo->or.mask != 0x0); + return; + } + + /* + * We keep a record all the SORs assigned for this modeset, so that + * it can be used as the sorExcludeMask argument to + * nvAssignSOREvo(). + */ + if (nvAssignSOREvo( + pConnectorEvo, + pWorkArea->sd[sd].assignedSorMask /* sorExcludeMask */)) { + nvAssert(pConnectorEvo->or.mask != 0x0); + + pWorkArea->sd[sd].sorAssignedConnectorsList = + nvAddDpyIdToDpyIdList( + pConnectorEvo->displayId, + pWorkArea->sd[sd].sorAssignedConnectorsList); + pWorkArea->sd[sd].assignedSorMask |= pConnectorEvo->or.mask; + } else { + nvAssert(!"Failed to assign SOR, this failure might cause hang!"); + } +} + +static void AssignProposedUsageOneHead( + NVDevEvoPtr pDevEvo, + const NVProposedModeSetHwState *pProposed, + NvU32 head) +{ + const NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[0].head[head]; + const NVProposedModeSetHwStateOneSubDev *pProposedSd = + &pProposed->sd[0]; + + if (!pDevEvo->isSOCDisplay || (pProposedHead->pConnectorEvo == NULL)) { + return; + } + + pDevEvo->gpus[0].headState[head].preallocatedUsage = + pProposedSd->head[head].flip.usage; +} + +static NvBool IsProposedModeSetHwStateOneDispIncompatibleWithDpy +( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + const NVConnectorEvoRec *pConnectorEvo +) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + + const NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + + if (!pProposedHead->changed) { + continue; + } + + /* + * DDC partners incompatible with each other, only one should be active + * at a time. + */ + if ((pProposedHead->pConnectorEvo != NULL) && + nvDpyIdIsInDpyIdList(pProposedHead->pConnectorEvo->displayId, + pConnectorEvo->ddcPartnerDpyIdsList)) { + return TRUE; + } + } + + return FALSE; +} + +static void +KickoffModesetUpdateState( + NVDispEvoPtr pDispEvo, + NVEvoModesetUpdateState *modesetUpdateState) +{ + if (!nvDpyIdListIsEmpty(modesetUpdateState->connectorIds)) { + NVConnectorEvoRec *pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, + modesetUpdateState->connectorIds)) { + continue; + } + + if (pConnectorEvo->pDpLibConnector != NULL) { + nvDPPreSetMode(pConnectorEvo->pDpLibConnector, + modesetUpdateState); + } else if (nvConnectorIsDPSerializer(pConnectorEvo)) { + nvDPSerializerPreSetMode(pDispEvo, pConnectorEvo); + } + } + } + + nvDoIMPUpdateEvo(pDispEvo, + &modesetUpdateState->updateState); + + if (!nvDpyIdListIsEmpty(modesetUpdateState->connectorIds)) { + NVConnectorEvoRec *pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, + modesetUpdateState->connectorIds)) { + continue; + } + + if (pConnectorEvo->pDpLibConnector != NULL) { + nvDPPostSetMode(pConnectorEvo->pDpLibConnector); + } else if (nvConnectorIsDPSerializer(pConnectorEvo)) { + nvDPSerializerPostSetMode(pDispEvo, pConnectorEvo); + } + } + } + + *modesetUpdateState = + (NVEvoModesetUpdateState) { }; +} + +/*! + * Determine if display devices driven by head are incompatible with newly + * activated display devices. + */ +static NvBool +IsProposedModeSetHwStateOneHeadIncompatible( + NVDispEvoPtr pDispEvo, + NvU32 head, + const + NVProposedModeSetHwStateOneDisp *pProposedDisp) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvBool isIncompatible; + + /* + * DisplayPort devices require an EVO update when detaching the head + * from the SOR, because DPlib performs link-training to powerdown + * the link. So, always consider DisplayPort as incompatible. + */ + + isIncompatible = + nvConnectorUsesDPLib(pHeadState->pConnectorEvo) || + IsProposedModeSetHwStateOneDispIncompatibleWithDpy(pDispEvo, + pProposedDisp, + pHeadState->pConnectorEvo); + + return isIncompatible; +} + +static void DisableActiveCoreRGSyncObjects(NVDevEvoPtr pDevEvo, + NVDispHeadStateEvoPtr pHeadState, + NvU32 head, + NVEvoUpdateState *pUpdateState) +{ + for (int i = 0; i < pHeadState->numVblankSyncObjectsCreated; i++) { + if (pHeadState->vblankSyncObjects[i].enabled) { + /* hCtxDma of 0 indicates Disable. */ + pDevEvo->hal->ConfigureVblankSyncObject( + pDevEvo, + 0, /* rasterLine */ + head, + i, + 0, /* hCtxDma */ + pUpdateState); + pHeadState->vblankSyncObjects[i].enabled = FALSE; + } + } +} + +/*! + * Send methods to shut down a head + * + * \param[in,out] pDispEvo The disp of the head. + * \param[in] head The head to consider. + * \param[in] pProposedDisp The requested configuration of the display + * \param[in/out] modesetUpdateState Structure tracking channels which need to + * be updated/kicked off + */ +static void +ApplyProposedModeSetHwStateOneHeadShutDown( + NVDispEvoPtr pDispEvo, + NvU32 head, + const + NVProposedModeSetHwStateOneDisp + *pProposedDisp, + NVModeSetWorkArea *pWorkArea) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoPtr pHeadState; + NVDpyEvoPtr pDpyEvo; + const NvU32 sd = pDispEvo->displayOwner; + NvU32 apiHead = nvHardwareHeadToApiHead(head); + NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[apiHead]; + + /* + * If nothing changed about this head's configuration, then we + * should not shut it down. + */ + if (!pProposedDisp->head[head].changed) { + return; + } + + /* + * Otherwise, go through the shutdown process for any head that + * changed. If NVProposedModeSetHwStateOneHead::dpyIdList is + * empty, then we'll leave it shut down. If it is non-empty, then + * ApplyProposedModeSetHwStateOneHead{Pre,Post}Update() will + * update the head with its new configuration. + */ + + if (!nvHeadIsActive(pDispEvo, head)) { + return; + } + + pHeadState = &pDispEvo->headState[head]; + pDpyEvo = nvGetOneArbitraryDpyEvo(pApiHeadState->activeDpys, pDispEvo); + + /* + * Identify and disable any active core RG sync objects. + * + * Note: the disable occurs at the hardware level; this intentionally does + * not clear the software state tracking the existence of these sync + * objects, which will be re-enabled at the hardware level in + * ApplyProposedModeSetHwStateOneHeadPreUpdate(), if the given head will be + * active after the modeset. + */ + DisableActiveCoreRGSyncObjects(pDevEvo, pHeadState, head, + &pWorkArea->modesetUpdateState.updateState); + + nvDisable3DVisionAegis(pDpyEvo); + + nvHdmiDpEnableDisableAudio(pDispEvo, head, FALSE /* enable */); + + /* Cancel any pending LUT updates. */ + nvCancelLutUpdateEvo(pDispEvo, head); + + nvEvoDetachConnector(pHeadState->pConnectorEvo, head, &pWorkArea->modesetUpdateState); + + /* Clear software shadow state. */ + + pWorkArea->sd[pDispEvo->displayOwner].changedDpyIdList = + nvAddDpyIdListToDpyIdList( + pApiHeadState->activeDpys, + pWorkArea->sd[pDispEvo->displayOwner].changedDpyIdList); + pApiHeadState->activeDpys = nvEmptyDpyIdList(); + pHeadState->pConnectorEvo = NULL; + + pHeadState->bypassComposition = FALSE; + nvkms_memset(&pHeadState->timings, 0, sizeof(pHeadState->timings)); + + /* Track old activeRmId and free it after end modeset */ + pWorkArea->sd[pDispEvo->displayOwner].head[head].oldActiveRmId = + pHeadState->activeRmId; + pHeadState->activeRmId = 0; + + pHeadState->allowFlipLockGroup = 0; + + nvkms_memset(&pHeadState->audio, 0, sizeof(pHeadState->audio)); + + nvkms_memset(&pHeadState->modeValidationParams, 0, + sizeof(pHeadState->modeValidationParams)); + + nvkms_memset(&pDevEvo->gpus[sd].headState[head], 0, + sizeof(pDevEvo->gpus[sd].headState[head])); + + pDpyEvo->apiHead = NV_INVALID_HEAD; +} + +static void +ApplyProposedModeSetHwStateOneDispFlip( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState *pProposed, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + const NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + const NvU32 sd = pDispEvo->displayOwner; + + /* + * If nothing changed about this head's configuration, then there + * is nothing to do. + */ + if (!pProposedHead->changed) { + continue; + } + + /* Check for disabled heads. */ + if (pProposedHead->pConnectorEvo == NULL) { + continue; + } + + nvSetUsageBoundsEvo(pDevEvo, sd, head, + &pProposed->sd[sd].head[head].flip.usage, + pUpdateState); + + nvFlipEvoOneHead(pDevEvo, sd, head, + &pProposed->sd[sd].head[head].flip, + FALSE /* allowFlipLock */, + pUpdateState); + } +} + +static void ReenableActiveCoreRGSyncObjects(NVDevEvoPtr pDevEvo, + NVDispHeadStateEvoPtr pHeadState, + NvU32 head, + NVEvoUpdateState *pUpdateState) +{ + for (int i = 0; i < pHeadState->numVblankSyncObjectsCreated; i++) { + if (pHeadState->vblankSyncObjects[i].inUse) { + pDevEvo->hal->ConfigureVblankSyncObject( + pDevEvo, + pHeadState->timings.rasterBlankStart.y, + head, + i, + pHeadState->vblankSyncObjects[i].evoSyncpt.hCtxDma, + pUpdateState); + + pHeadState->vblankSyncObjects[i].enabled = TRUE; + } + } +} + +/*! + * Update the heads to be modified on this disp. + * + * This should update the ASSY state of the head, but not trigger an + * UPDATE method. + * + * \param[in,out] pDispEvo The disp of the head. + * \param[in] head The head to consider. + * \param[in] pProposedHead The requested configuration of the head. + * \param[in,out] updateState Indicates which channels require UPDATEs + * \param[in] bypassComposition + * On Turing and newer, enable display + * composition pipeline bypass mode. + */ +static void +ApplyProposedModeSetHwStateOneHeadPreUpdate( + NVDispEvoPtr pDispEvo, + NvU32 head, + const NVProposedModeSetHwState *pProposed, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + NVModeSetWorkArea *pWorkArea, + NvBool bypassComposition) +{ + const NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + NVEvoModesetUpdateState *pModesetUpdateState = &pWorkArea->modesetUpdateState; + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVDispHeadStateEvoPtr pHeadState; + NVDpyEvoPtr pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedHead->dpyIdList, pDispEvo); + NvU32 apiHead = nvHardwareHeadToApiHead(head); + NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[apiHead]; + + /* + * If nothing changed about this head's configuration, then there + * is nothing to do. + */ + if (!pProposedHead->changed) { + return; + } + + /* Check for disabled heads. */ + + if (pProposedHead->pConnectorEvo == NULL) { + /* + * ApplyProposedModeSetHwStateOneHeadShutDown() should have + * already been called for this head. + */ + nvAssert(!nvHeadIsActive(pDispEvo, head)); + return; + } + + if (pDpyEvo == NULL) { + nvAssert(!"Invalid pDpyEvo"); + return; + } + + pDpyEvo->apiHead = apiHead; + + AssignSor(pWorkArea, pProposedHead->pConnectorEvo); + + nvDpyUpdateHdmiPreModesetEvo(pDpyEvo); + + pHeadState = &pDispEvo->headState[head]; + + pHeadState->bypassComposition = bypassComposition; + + pHeadState->activeRmId = pProposedHead->activeRmId; + + /* + * Cache the list of active pDpys for this head, as well as the + * mode timings. + */ + pApiHeadState->activeDpys = pProposedHead->dpyIdList; + pWorkArea->sd[pDispEvo->displayOwner].changedDpyIdList = + nvAddDpyIdListToDpyIdList( + pApiHeadState->activeDpys, + pWorkArea->sd[pDispEvo->displayOwner].changedDpyIdList); + + nvAssert(pDpyEvo->pConnectorEvo == pProposedHead->pConnectorEvo); + pHeadState->pConnectorEvo = pProposedHead->pConnectorEvo; + + pHeadState->timings = pProposedHead->timings; + + pHeadState->audio = pProposedHead->audio; + pApiHeadState->infoFrame = pProposedHead->infoFrame; + + AssignProposedUsageOneHead(pDispEvo->pDevEvo, pProposed, head); + + nvSendHwModeTimingsToAegisEvo(pDispEvo, head); + + /* Set LUT settings */ + nvEvoSetLut(pDispEvo, head, FALSE /* kickoff */, &pProposedHead->lut); + + /* Update current LUT to hardware */ + nvEvoUpdateCurrentPalette(pDispEvo, head, FALSE /* kickoff */); + + nvEvoSetTimings(pDispEvo, head, updateState); + + nvSetDitheringEvo(pDispEvo, + head, + &pProposedHead->attributes.dithering, + updateState); + + nvEvoHeadSetControlOR(pDispEvo, head, updateState); + + /* Update hardware's current colorSpace and colorRange */ + nvUpdateCurrentHardwareColorSpaceAndRangeEvo(pDispEvo, + head, + pProposedHead->attributes.colorSpace, + pProposedHead->attributes.colorRange, + updateState); + + nvEvoAttachConnector(pProposedHead->pConnectorEvo, + head, + pProposedHead->pDpLibModesetState, + pModesetUpdateState); + + nvSetViewPortsEvo(pDispEvo, head, updateState); + + nvSetImageSharpeningEvo( + pDispEvo, + head, + pProposedHead->attributes.imageSharpening.value, + updateState); + + + nvSetDVCEvo(pDispEvo, head, + pProposedHead->attributes.dvc, + updateState); + + + nvHdmiFrlSetConfig(pDispEvo, head); + + /* + * Re-enable any active sync objects, configuring them in accordance with + * the new timings. + */ + ReenableActiveCoreRGSyncObjects(pDispEvo->pDevEvo, pHeadState, head, + updateState); + + pApiHeadState->attributes = pProposedHead->attributes; +} + + +/*! + * Update the heads to be modified on this disp. + * + * PreUpdate() will have already been called on this head, and an + * UPDATE method sent. + * + * \param[in,out] pDispEvo The disp of the head. + * \param[in] head The head to consider. + * \param[in] pProposedHead The requested configuration of the head. + */ +static void +ApplyProposedModeSetHwStateOneHeadPostUpdate(NVDispEvoPtr pDispEvo, + NvU32 head, + const + NVProposedModeSetHwStateOneHead + *pProposedHead) +{ + NVDispHeadStateEvoRec *pHeadState; + NVDpyEvoRec *pDpyEvo; + NvU32 apiHead = nvHardwareHeadToApiHead(head); + NVDispApiHeadStateEvoRec *pApiHeadState = &pDispEvo->apiHeadState[apiHead]; + + /* + * If nothing changed about this head's configuration, then there + * is nothing to do. + */ + if (!pProposedHead->changed) { + return; + } + + if (!nvHeadIsActive(pDispEvo, head)) { + return; + } + + /* + * Cache configuration state in the headState, so that + * AssignProposedModeSetHwState() can preserve the configuration + * if this head is left alone in the next NvKmsSetModeRequest. + */ + pHeadState = &pDispEvo->headState[head]; + + pDpyEvo = nvGetOneArbitraryDpyEvo(pApiHeadState->activeDpys, pDispEvo); + nvAssert(pDpyEvo != NULL); + + pHeadState->allowFlipLockGroup = pProposedHead->allowFlipLockGroup; + pHeadState->modeValidationParams = pProposedHead->modeValidationParams; + pHeadState->hs10bpcHint = pProposedHead->hs10bpcHint; + + nvUpdateInfoFrames(pDpyEvo); + + /* Perform 3D vision authentication */ + nv3DVisionAuthenticationEvo(pDispEvo, head); + + nvHdmiDpEnableDisableAudio(pDispEvo, head, TRUE /* enable */); +} + +/* + * Shut down all heads that are incompatible with pProposedDisp. This + * requires doing an update immediately. + */ +static void +KickoffProposedModeSetHwStateIncompatibleHeadsShutDown( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + NVModeSetWorkArea *pWorkArea) +{ + NvU32 head; + NvBool foundIncompatibleHead = FALSE; + NvU32 clearHdmiFrlActiveRmId[NVKMS_MAX_HEADS_PER_DISP] = { }; + NVDpyIdList proposedActiveConnectorsList = nvEmptyDpyIdList(); + NVDpyIdList currActiveConnectorsList = nvEmptyDpyIdList(); + NVDpyIdList proposedInactiveConnectorList, unionOfActiveConnectorList; + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + NVDpyId activeConnectorId = + (pDispEvo->headState[head].pConnectorEvo != NULL) ? + pDispEvo->headState[head].pConnectorEvo->displayId : + nvInvalidDpyId(); + NVDpyId proposedConnectorId = + (pProposedDisp->head[head].pConnectorEvo != NULL) ? + pProposedDisp->head[head].pConnectorEvo->displayId : + nvInvalidDpyId(); + + currActiveConnectorsList = + nvAddDpyIdToDpyIdList(activeConnectorId, + currActiveConnectorsList); + + proposedActiveConnectorsList = + nvAddDpyIdToDpyIdList(proposedConnectorId, + proposedActiveConnectorsList); + } + + proposedInactiveConnectorList = + nvDpyIdListMinusDpyIdList(currActiveConnectorsList, + proposedActiveConnectorsList); + unionOfActiveConnectorList = + nvAddDpyIdListToDpyIdList(proposedActiveConnectorsList, + currActiveConnectorsList); + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + NvBool thisHeadIncompatible = FALSE; + const NVConnectorEvoRec *pCurrConnectorEvo = + pDispEvo->headState[head].pConnectorEvo; + + if (!pProposedDisp->head[head].changed || !nvHeadIsActive(pDispEvo, head)) { + continue; + } + + /* + * If the number of current active connectors + proposed active + * connectors is greater than number of heads then modeset is under + * risk to run out of SORs. This is because the number of connectors > + * the number of SORs >= the number of heads. + * + * The sor assignment failure during modeset causes display engine + * and/or kernel panics. + * + * In this situation, all the connectors which are not going active + * after modeset, mark them incompatible and shut down them before + * triggering modeset on all the active connectors. + */ + if (nvCountDpyIdsInDpyIdList(unionOfActiveConnectorList) > + pDispEvo->pDevEvo->numHeads && + nvDpyIdIsInDpyIdList(pCurrConnectorEvo->displayId, + proposedInactiveConnectorList)) { + thisHeadIncompatible = TRUE; + } + + /* if the *new* timings are FRL, then we need to shut down the head. */ + if (pProposedDisp->head[head].timings.hdmiFrlConfig.frlRate != + HDMI_FRL_DATA_RATE_NONE) { + thisHeadIncompatible = TRUE; + } + + /* if the *old* timings are FRL, then we need to shut down the head and + * clear the FRL config. */ + if (pDispEvo->headState[head].timings.hdmiFrlConfig.frlRate != + HDMI_FRL_DATA_RATE_NONE) { + thisHeadIncompatible = TRUE; + /* cache the activeRmId since it will be cleared below, but + * we don't want to actually call into the HDMI library until + * afterwards. */ + clearHdmiFrlActiveRmId[head] = pDispEvo->headState[head].activeRmId; + } + + if (IsProposedModeSetHwStateOneHeadIncompatible(pDispEvo, + head, + pProposedDisp)) { + thisHeadIncompatible = TRUE; + } + + if (!thisHeadIncompatible) { + continue; + } + + ApplyProposedModeSetHwStateOneHeadShutDown( + pDispEvo, + head, + pProposedDisp, + pWorkArea); + + foundIncompatibleHead = TRUE; + } + + /* Submit UPDATE method and kick off, to shut down incompatible heads. */ + if (foundIncompatibleHead) { + KickoffModesetUpdateState(pDispEvo, &pWorkArea->modesetUpdateState); + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + if (clearHdmiFrlActiveRmId[head] == 0) { + continue; + } + nvHdmiFrlClearConfig(pDispEvo, clearHdmiFrlActiveRmId[head]); + } + } +} + +static void +KickoffProposedModeSetHwState( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState *pProposed, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + const NvBool bypassComposition, + NVModeSetWorkArea *pWorkArea) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoModesetUpdateState *pModesetUpdateState = &pWorkArea->modesetUpdateState; + /* + * If there is a change in window ownership, decouple window channel flips + * and the core channel update that performs a modeset. + * + * This allows window channel flips to be instead interlocked with the core + * channel update that sets the window usage bounds, avoiding window + * invalid usage exceptions. + * + * See comment about NVDisplay error code 37, in + * function EvoInitWindowMapping3(). + */ + const NvBool decoupleFlipUpdates = + pModesetUpdateState->windowMappingChanged; + NvU32 head; + + /* Send methods to shut down any other unused heads, but don't update yet. */ + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + ApplyProposedModeSetHwStateOneHeadShutDown( + pDispEvo, + head, + pProposedDisp, + pWorkArea); + } + + /* Apply pre-UPDATE modifications for any enabled heads. */ + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + ApplyProposedModeSetHwStateOneHeadPreUpdate( + pDispEvo, + head, + pProposed, + pProposedDisp, + pWorkArea, + bypassComposition); + } + + if (!decoupleFlipUpdates) { + /* Merge modeset and flip state updates together */ + ApplyProposedModeSetHwStateOneDispFlip( + pDispEvo, + pProposed, + pProposedDisp, + &pModesetUpdateState->updateState); + } + + /* Submit UPDATE method and kick off. */ + KickoffModesetUpdateState(pDispEvo, + pModesetUpdateState); + + if (decoupleFlipUpdates) { + NVEvoUpdateState flipUpdateState = { }; + + ApplyProposedModeSetHwStateOneDispFlip( + pDispEvo, + pProposed, + pProposedDisp, + &flipUpdateState); + + pDevEvo->hal->Update(pDevEvo, + &flipUpdateState, + TRUE /* releaseElv */); + } + + nvRemoveUnusedHdmiDpAudioDevice(pDispEvo); + + /* Apply post-UPDATE modifications for any enabled heads. */ + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + + const NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + + ApplyProposedModeSetHwStateOneHeadPostUpdate( + pDispEvo, + head, + pProposedHead); + } +} + +static void AllocatePostModesetDispBandwidth(NVDispEvoPtr pDispEvo, + NVModeSetWorkArea *pWorkArea) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU64 maxFrameTimeUsec = 0ULL; + NvU32 head; + + if (!pDevEvo->isSOCDisplay) { + return; + } + + if ((pDispEvo->isoBandwidthKBPS == pWorkArea->postModesetIsoBandwidthKBPS) && + (pDispEvo->dramFloorKBPS == pWorkArea->postModesetDramFloorKBPS)) { + return; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU64 curFrameTimeUsec = 0ULL; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + curFrameTimeUsec = nvEvoFrametimeUsFromTimings(&pDispEvo->headState[head].timings); + maxFrameTimeUsec = NV_MAX(maxFrameTimeUsec, curFrameTimeUsec); + } + + nvkms_usleep(maxFrameTimeUsec * 2); + + if (!nvAllocateDisplayBandwidth(pDispEvo, + pWorkArea->postModesetIsoBandwidthKBPS, + pWorkArea->postModesetDramFloorKBPS)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Unexpectedly failed to program post-modeset bandwidth!"); + } +} + +/*! + * Update the disp with the modifications described in pProposedDisp. + * + * \param[in] pDispEvo The disp to be modified. + * \param[in] pProposedDisp The requested configuration of the disp. + * \param[in] pWorkArea Preallocated scratch memory. + * \param[in] updateCoreFirst If true, avoid interlock with core: kick off + * the core channel and wait for a notifier + * before the rest of the channels for this update. + * \param[in] bypassComposition + * On Turing and newer, enable display composition + * pipeline bypass mode. + * + * This function is not allowed to fail. + */ +static void +ApplyProposedModeSetHwStateOneDisp( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState *pProposed, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + NVModeSetWorkArea *pWorkArea, + NvBool updateCoreFirst, + NvBool bypassComposition) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + const NvU32 sd = pDispEvo->displayOwner; + + nvkms_memset(&pWorkArea->modesetUpdateState, 0, + sizeof(pWorkArea->modesetUpdateState)); + + /* Record the current flip state. */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + nvInitFlipEvoHwState(pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].oldState); + } + + if (updateCoreFirst) { + /* If this is the first core update, initialize the window -> head + * mapping. + * + * Hal ->InitWindowMapping() sets + * NVModesetUpdateState::windowMappingChanged true, if there is + * any change in window ownerships/assignment. This is necessary on + * GV100+ because of a series of unfortunate requirements. + * + * NVDisplay has two requirements that we need to honor: + * + * 1. You can't move a window from one head to another while the head + * is active. + * 2. You can't change window assignments in an update that's + * interlocked with the corresponding window channel. + * + * In addition, GV100 has an additional requirement: + * + * 3. You can't change window assignment for a head while it is + * active, but it's okay to assign windows in the same update that + * activates a head. + * + * If there is a change in window assignment, the task of not + * interlocking core and respective window channels will be handled by + * NVEvoUpdateState::subdev[]::noCoreInterlockMask. + * ->InitWindowMapping() will set 'noCoreInterlockMask' and ->Update() + * will take care not to interlock window channels specified in mask + * with core channel. + * + * The GOP driver and NVKMS assign window channels in the same way. The + * window channels channels 2n and 2n+1 are guaranteed to get assigned + * to head n. + */ + pDevEvo->hal->InitWindowMapping(pDispEvo, &pWorkArea->modesetUpdateState); + } + + /* + * Temporarily lock to the max DRAM frequency to prevent mclk switch events + * from being requested. Display can't tolerate mclk switch events during + * modeset transitions. This max DRAM floor will be released after the Core + * notifier signals post-modeset in the AllocatePostModesetDispBandwidth() + * call below. This only needs to be done for Orin SOC display. + */ + if (!nvAllocateDisplayBandwidth(pDispEvo, + pDispEvo->isoBandwidthKBPS, + NV_U32_MAX)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Unexpectedly failed to lock to max DRAM pre-modeset!"); + } + + KickoffProposedModeSetHwStateIncompatibleHeadsShutDown( + pDispEvo, + pProposedDisp, + pWorkArea); + + KickoffProposedModeSetHwState( + pDispEvo, + pProposed, + pProposedDisp, + bypassComposition, + pWorkArea); + + /* + * This function waits for 2 frames to make sure that the final IMP + * arbitration settings have been programmed by the post-SV3 worker thread + * in RM. Once these settings have taken effect, it's safe to release the + * max DRAM floor that was previously requested, and to program the ISO + * bandwidth that's required for the new mode. This only needs to be done + * for Orin SOC display. + */ + AllocatePostModesetDispBandwidth(pDispEvo, pWorkArea); + + /* + * Record the new flip state, then generate any flip events, and update + * surface reference counts. + */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + nvInitFlipEvoHwState( + pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].newState); + nvUpdateSurfacesFlipRefCount( + pDevEvo, + head, + &pWorkArea->sd[sd].head[head].newState, + NV_TRUE); + nvUpdateSurfacesFlipRefCount( + pDevEvo, + head, + &pWorkArea->sd[sd].head[head].oldState, + NV_FALSE); + } +} + + +/*! + * Initialize the pReply structure. + * + * Mark all of the heads and disps as successful. During the process + * of assigning and validating the proposed configuration, heads with + * invalid requested configuration will have their reply status field + * changed to a non-success value. + * + * \param[in] pRequest The client's requested configuration. This + * indicates which heads on which disps the + * client requested changes on. + * \param[out] pReply The reply to the client. + */ +static void +InitializeReply(const NVDevEvoRec *pDevEvo, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply) +{ + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + pReply->status = NVKMS_SET_MODE_STATUS_SUCCESS; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + NvU32 head; + + pReply->disp[dispIndex].status = + NVKMS_SET_MODE_ONE_DISP_STATUS_SUCCESS; + + for (head = 0; head < pDevEvo->numHeads; head++) { + + pReply->disp[dispIndex].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_SUCCESS; + } + } +} + + +/*! + * Validate the client-provided NvKmsSetModeRequest. + * + * Check basic validity of NvKmsSetModeRequest: e.g., that + * requestedDispsBitMask and requestedHeadsBitMask do not exceed the + * disps or heads of the pDevEvo. + * + * \param[in] pDevEvo The device that is to be modified. + * \param[in] pOpenDev The pOpenDev of the client doing the modeset. + * \param[in] pRequest The client's requested configuration. This + * indicates which heads on which disps the + * client requested changes on. + * \param[out] pReply The reply to the client. + + * \return If pRequest is valid, return TRUE. Otherwise, set the + * appropriate status fields in pReply to non-SUCCESS, + * and return FALSE. + */ +static NvBool +ValidateRequest(const NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply) +{ + NvU32 dispIndex, head; + NvBool ret = TRUE; + + const struct NvKmsModesetPermissions *pPermissions = + nvGetModesetPermissionsFromOpenDev(pOpenDev); + + nvAssert(pOpenDev != NULL); + nvAssert(pPermissions != NULL); + + /* Check for invalid disps in requestedDispsBitMask. */ + if (nvHasBitAboveMax(pRequest->requestedDispsBitMask, + NVKMS_MAX_SUBDEVICES)) { + pReply->status = NVKMS_SET_MODE_STATUS_INVALID_REQUESTED_DISPS_BITMASK; + ret = FALSE; + } + + for (dispIndex = 0; dispIndex < NVKMS_MAX_SUBDEVICES; dispIndex++) { + + if ((pRequest->requestedDispsBitMask & (1 << dispIndex)) == 0) { + continue; + } + + if (dispIndex >= pDevEvo->nDispEvo) { + pReply->status = + NVKMS_SET_MODE_STATUS_INVALID_REQUESTED_DISPS_BITMASK; + ret = FALSE; + continue; + } + + const struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + + /* Check for invalid heads in requestedHeadsBitMask. */ + if (nvHasBitAboveMax(pRequestDisp->requestedHeadsBitMask, + NVKMS_MAX_HEADS_PER_DISP)) { + pReply->disp[dispIndex].status = + NVKMS_SET_MODE_ONE_DISP_STATUS_INVALID_REQUESTED_HEADS_BITMASK; + ret = FALSE; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + + if ((pRequestDisp->requestedHeadsBitMask & (1 << head)) == 0) { + continue; + } + + if (head >= pDevEvo->numHeads) { + pReply->disp[dispIndex].status = + NVKMS_SET_MODE_ONE_DISP_STATUS_INVALID_REQUESTED_HEADS_BITMASK; + ret = FALSE; + continue; + } + + const NVDpyIdList permDpyIdList = + pPermissions->disp[dispIndex].head[head].dpyIdList; + + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[head]; + + /* + * Does the client have permission to touch this head at + * all? + */ + if (pRequest->commit && nvDpyIdListIsEmpty(permDpyIdList)) { + pReply->disp[dispIndex].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_PERMISSIONS; + ret = FALSE; + continue; + } + + /* + * pRequestHead->dpyIdList == EMPTY means the head is + * being shut down: no more to do for validation. + */ + if (nvDpyIdListIsEmpty(pRequestHead->dpyIdList)) { + continue; + } + + /* + * Does the client have permission to drive this dpyIdList + * with this head? + */ + if (pRequest->commit && + !nvDpyIdListIsASubSetofDpyIdList(pRequestHead->dpyIdList, + permDpyIdList)) { + pReply->disp[dispIndex].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_PERMISSIONS; + ret = FALSE; + continue; + } + + /* + * Are all requested dpys in the list of valid dpys for this disp? + */ + if (!nvDpyIdListIsASubSetofDpyIdList( + pRequestHead->dpyIdList, + pDevEvo->pDispEvo[dispIndex]->validDisplays)) { + pReply->disp[dispIndex].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_DPY; + ret = FALSE; + continue; + } + + if (!nvValidateSetLutCommonParams(pDevEvo, &pRequestHead->lut)) { + pReply->disp[dispIndex].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_LUT; + ret = FALSE; + continue; + } + } + } + + return ret; +} + +static void FillPostSyncptReplyForModeset( + const NVDevEvoRec *pDevEvo, + NvU32 head, + const struct NvKmsFlipCommonParams *pFlipRequest, + struct NvKmsFlipCommonReplyOneHead *pFlipReply, + const NVFlipEvoHwState *pFlipState) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pFlipRequest->layer[layer].syncObjects.specified && + pFlipRequest->layer[layer].syncObjects.val.useSyncpt) { + nvFillPostSyncptReplyOneChannel( + pDevEvo->head[head].layer[layer], + pFlipRequest->layer[layer].syncObjects.val.u.syncpts.requestedPostType, + &pFlipReply->layer[layer].postSyncpt, + &pFlipState->layer[layer].syncObject); + } + } +} + +/*! + * Assign the NvKmsSetModeReply structure. + * + * After a modeset was successfully completed, update the pReply with + * information about the modeset that the client may need. + * + * \param[in] pDevEvo The device that was modified. + * \param[in] pRequest The client's requested configuration. This + * indicates which heads on which disps the + * client requested changes on. + * \param[out] pReply The reply to the client. + */ +static void +AssignReplySuccess(const NVDevEvoRec *pDevEvo, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + const NVModeSetWorkArea *pWorkArea) +{ + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + NvU32 head; + const struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + + if ((pRequest->requestedDispsBitMask & (1 << dispIndex)) == 0) { + continue; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[head]; + struct NvKmsSetModeOneHeadReply *pReplyHead = + &pReply->disp[dispIndex].head[head]; + + if ((pRequestDisp->requestedHeadsBitMask & (1 << head)) == 0) { + continue; + } + + pReplyHead->status = NVKMS_SET_MODE_ONE_HEAD_STATUS_SUCCESS; + + if (nvDpyIdListIsEmpty(pRequestHead->dpyIdList)) { + pReplyHead->activeRmId = 0; + } else { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + pReplyHead->activeRmId = pHeadState->activeRmId; + pReplyHead->possibleUsage = pHeadState->timings.viewPort.possibleUsage; + pReplyHead->guaranteedUsage = pHeadState->timings.viewPort.guaranteedUsage; + pReplyHead->usingHeadSurface = + (pDispEvo->pHsChannel[head] != NULL); + pReplyHead->vrrEnabled = + (pDispEvo->headState[head].timings.vrr.type != + NVKMS_DPY_VRR_TYPE_NONE); + } + FillPostSyncptReplyForModeset( + pDevEvo, + head, + &pRequestHead->flip, + &pReplyHead->flipReply, + &pWorkArea->sd[dispIndex].head[head].newState); + } + } +} + + +/*! + * Call RM to notify that a modeset is impending, or that the modeset has + * completed. + * + * \param[in] pDevEvo The device to modify. + * \param[in] pProposed The proposed resulting hardware state. + * \param[in] beginOrEnd Whether this is a begin call or an end call. + */ +static void +BeginEndModeset(NVDevEvoPtr pDevEvo, + const NVProposedModeSetHwState *pProposed, + enum NvKmsBeginEndModeset beginOrEnd) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 head, dpyMask = 0; + + /* Compute dpyMask: take all the dpyIds on this dispIndex. */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + dpyMask |= + nvDpyIdListToNvU32(pProposed->disp[dispIndex].head[head].dpyIdList); + } + + nvRmBeginEndModeset(pDispEvo, beginOrEnd, dpyMask); + } +} + +/*! + * Idle all of the satellite channels. + * + * XXX NVKMS: use interlocked UPDATEs, instead, so that we don't + * have to busy-wait on the CPU. + * + * XXX NVKMS: we should idle all channels, not just base. + */ +static NvBool IdleAllSatelliteChannels(NVDevEvoRec *pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 head, sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + NvBool unused; + if (!nvRMIdleBaseChannel(pDevEvo, head, sd, &unused)) { + return FALSE; + } + } + } + + return TRUE; +} + +/*! + * Helper function to validate the proposed mode + */ +static NvBool +IsProposedModeSetValid(NVDevEvoPtr pDevEvo, + struct NvKmsSetModeReply *pReply, + const struct NvKmsPerOpenDev *pOpenDev, + NVProposedModeSetHwState *pProposed, + const struct NvKmsSetModeRequest *pRequest, + NVModeSetWorkArea *pWorkArea) +{ + return ValidateProposedModeSetHwState(pDevEvo, pProposed, pReply, + pWorkArea); +} + +/*! + * Perform a modeset across the heads on the disps of the device. + * + * See the comments at the top of this source file for a description + * of the flow performed by this function. + * + * \param[in,out] pDevEvo The device to be modified. + * \param[in] pOpenDev The pOpenDev of the client doing the modeset. + * \param[in] pOpenDevSurfaceHandles + * The table mapping client handles to surfaces. + * \param[in] pRequest The client's requested configuration changes. + * \param[out] pReply The reply to the client. + * \param[in] bypassComposition + * On Turing and higher, enable composition pipeline + * bypass mode. + * \param[in] doRasterLock + * Rasterlock heads in the post-modeset routine. + * + * \return Return TRUE if the modeset was successful. Otherwise, + * return FALSE. If the modeset was not successful, + * the state of the hardware and software should not + * have been changed. + */ +NvBool nvSetDispModeEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + NvBool bypassComposition, + NvBool doRasterLock) +{ + NvBool ret = FALSE; + NVProposedModeSetHwState *pProposed = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE, + sizeof(*pProposed)); + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + NvU32 dispNeedsEarlyUpdate; + NvBool updateCoreFirst = FALSE; + + /* + * We should shutdown unused heads and do not inherit the previous modeset + * state as part of this modeset if: + * - The requesting client is the internal NVKMS client (i.e., + * this is a console restore modeset), or + * - 'modesetOwnerChanged' is recorded in the device; + * i.e., there was a modeset ownership change since the last + * modeset. + */ + const NvBool modesetOwnerChanged = + (pOpenDev == pDevEvo->pNvKmsOpenDev) ? TRUE : + pDevEvo->modesetOwnerChanged; + + NVModeSetWorkArea *pWorkArea = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_MODE_SET_WORK_AREA, + sizeof(*pWorkArea)); + + nvkms_memset(pProposed, 0, sizeof(*pProposed)); + nvkms_memset(pWorkArea, 0, sizeof(*pWorkArea)); + + nvAssert(pOpenDev != NULL); + + InitializeReply(pDevEvo, pRequest, pReply); + + if (!ValidateRequest(pDevEvo, pOpenDev, pRequest, pReply)) { + goto done; + } + + /* Disallow GC6 in anticipation of touching GPU/displays. */ + if (!nvRmSetGc6Allowed(pDevEvo, FALSE)) { + goto done; + } + + if (!AssignProposedModeSetHwState(pDevEvo, pOpenDev, + pRequest, pReply, pProposed, + modesetOwnerChanged)) { + goto done; + } + + if (!IsProposedModeSetValid(pDevEvo, pReply, pOpenDev, pProposed, + pRequest, pWorkArea)) { + goto done; + } + + /* The requested configuration is valid. */ + + ret = TRUE; + + if (!pRequest->commit) { + goto done; + } + + /* All satellite channels must be idle. */ + + if (!IdleAllSatelliteChannels(pDevEvo)) { + ret = FALSE; + goto done; + } + + /* From this point, we should not fail. */ + + /* + * Disable stereo pin during console restore or modeset owner changes. + */ + if (modesetOwnerChanged) { + NvU32 sd; + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + for (head = 0; head < pDevEvo->numHeads; head++) { + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + nvSetStereoEvo(pDispEvo, head, FALSE); + } + } + } + + nvEvoCancelPostFlipIMPTimer(pDevEvo); + + BeginEndModeset(pDevEvo, pProposed, BEGIN_MODESET); + + nvEvoLockStatePreModeset(pDevEvo, &dispNeedsEarlyUpdate, &pWorkArea->earlyUpdateState); + + nvDisableVrr(pDevEvo); + + updateCoreFirst = pDevEvo->coreInitMethodsPending; + pDevEvo->coreInitMethodsPending = FALSE; + + /* + * If the core channel has assembly state we need to be committed + * before proceeding through the rest of the modeset, kickoff here. + * This is used to disable fliplock before issuing base flips + * in ApplyProposedModeSetHwStateOneDisp. + * + * XXX This violates the assumption (guarded by + * pDevEvo->coreInitMethodsPending) that we aren't kicking + * off until after the assembly core channel state (which we don't + * want to commit) has already been overwritten below and made safe + * for kickoff. Because of this, needsEarlyUpdate should only be set + * when it is safe to kickoff the existing core channel assembly + * state immediately. Currently it is only set when the call + * to nvEvoLockStatePreModeset() above disabled fliplock, at which + * point there should be no invalid state remaining in the + * core channel assembly. + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + if (dispNeedsEarlyUpdate & (1 << dispIndex)) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &pWorkArea->earlyUpdateState, + TRUE /* releaseElv */); + } + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + ApplyProposedModeSetHwStateOneDisp(pDispEvo, + pProposed, + &pProposed->disp[dispIndex], + pWorkArea, + updateCoreFirst, + bypassComposition); + } + + nvEnableVrr(pDevEvo, pRequest); + + /* + * Cache whether HS in NVKMS is allowed, so we can make consistent + * decisions for future partial updates from non-modeset owners. + */ + pDevEvo->allowHeadSurfaceInNvKms = pProposed->allowHeadSurfaceInNvKms; + + nvEvoLockStatePostModeset(pDevEvo, doRasterLock); + + BeginEndModeset(pDevEvo, pProposed, END_MODESET); + + AssignReplySuccess(pDevEvo, pRequest, pReply, pWorkArea); + + pDevEvo->skipConsoleRestore = FALSE; + + /* + * If this was a pNvKmsOpenDev-initiated modeset, force the next modeset to + * shut down all unused heads and not to inherit any state from this + * modeset. That will prevent a regular client from inheriting + * pNvKmsOpenDev modeset state. + */ + pDevEvo->modesetOwnerChanged = + (pOpenDev == pDevEvo->pNvKmsOpenDev) ? TRUE : FALSE; + + /* fall through */ +done: + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 head; + + /* + * In case of successful commit, update current attribute values and + * free old display IDs. + */ + if (pRequest->commit && ret) { + NVDpyEvoRec *pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, + pWorkArea->sd[dispIndex].changedDpyIdList, + pDispEvo) { + nvDpyUpdateCurrentAttributes(pDpyEvo); + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + if (pWorkArea->sd[dispIndex].head[head].oldActiveRmId != 0x0) { + nvRmFreeDisplayId( + pDispEvo, + pWorkArea->sd[dispIndex].head[head].oldActiveRmId); + } + } + } else { + /* Otherwise, free new allocated RM display IDs for changed heads */ + for (head = 0; head < pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[dispIndex].head[head]; + + if (!pProposedHead->changed || pProposedHead->activeRmId == 0x0) { + continue; + } + nvRmFreeDisplayId(pDispEvo, pProposedHead->activeRmId); + } + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[dispIndex].head[head]; + nvDPLibFreeModesetState(pProposedHead->pDpLibModesetState); + } + } + + /* If all heads are shut down, allow GC6. */ + if (nvAllHeadsInactive(pDevEvo)) { + nvRmSetGc6Allowed(pDevEvo, TRUE); + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_MODE_SET_WORK_AREA); + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE); + return ret; +} + +/*! + * Register a callback to activate when vblank is reached on a given head. + * + * \param[in,out] pDispEvo The display engine to register the callback on. + * \param[in] head The head to register the callback on. + * \param[in] pCallback The function to call when vblank is reached on the + * provided pDispEvo+head combination. + * \param[in] pUserData A pointer to caller-provided custom data. + * + * \return Returns a pointer to a NVVBlankCallbackRec structure if the + * registration was successful. Otherwise, return NULL. + */ +NVVBlankCallbackPtr nvRegisterVBlankCallback(NVDispEvoPtr pDispEvo, + NvU32 head, + NVVBlankCallbackProc pCallback, + void *pUserData) +{ + NVVBlankCallbackPtr pVBlankCallback = NULL; + + pVBlankCallback = nvCalloc(1, sizeof(*pVBlankCallback)); + if (pVBlankCallback == NULL) { + return NULL; + } + + pVBlankCallback->pCallback = pCallback; + pVBlankCallback->pUserData = pUserData; + + nvListAppend(&pVBlankCallback->vblankCallbackListEntry, + &pDispEvo->headState[head].vblankCallbackList); + + // If this is the first entry in the list, register the vblank callback + if (pDispEvo->headState[head].rmVBlankCallbackHandle == 0) { + + pDispEvo->headState[head].rmVBlankCallbackHandle = + nvRmAddVBlankCallback(pDispEvo, + head, + VBlankCallback); + } + return pVBlankCallback; +} + +/*! + * Un-register a vblank callback for a given head. + * + * \param[in,out] pDispEvo The display engine to register the callback on. + * \param[in] head The head to register the callback on. + * \param[in] pCallback A pointer to the NVVBlankCallbackRec to un-register. + * + */ +void nvUnregisterVBlankCallback(NVDispEvoPtr pDispEvo, + NvU32 head, + NVVBlankCallbackPtr pCallback) +{ + nvListDel(&pCallback->vblankCallbackListEntry); + nvFree(pCallback); + + // If there are no more callbacks, disable the RM-level callback + if (nvListIsEmpty(&pDispEvo->headState[head].vblankCallbackList)) { + nvRmRemoveVBlankCallback(pDispEvo, + pDispEvo->headState[head].rmVBlankCallbackHandle); + + pDispEvo->headState[head].rmVBlankCallbackHandle = 0; + } +} + +/*! + * Perform a modeset that disables some or all heads. + * + * \param[in] pDevEvo The device to shut down. + * \param[in] pTestFunc The pointer to test function, identifying heads + * targeted to shut down. If NULL then shut down + * all heads. + */ +void nvShutDownHeads(NVDevEvoPtr pDevEvo, NVShutDownHeadsTestFunc pTestFunc) +{ + if (pDevEvo->displayHandle != 0) { + struct NvKmsSetModeParams *params = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE, + sizeof(*params)); + struct NvKmsSetModeRequest *req = NULL; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvBool dirty = FALSE; + + nvkms_memset(params, 0, sizeof(*params)); + req = ¶ms->request; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 head; + + req->requestedDispsBitMask |= NVBIT(dispIndex); + for (head = 0; head < pDevEvo->numHeads; head++) { + /* + * XXX pTestFunc isn't honored by nvSetDispModeEvo()'s + * modesetOwnerChanged logic. + */ + if (pTestFunc && !pTestFunc(pDispEvo, head)) { + continue; + } + + dirty = TRUE; + req->disp[dispIndex].requestedHeadsBitMask |= NVBIT(head); + } + } + + if (dirty) { + req->commit = TRUE; + + /* + * XXX TODO: The coreInitMethodsPending flag indicates that the + * init_no_update methods which were pushed by the hardware during + * core channel allocation are still pending, it means this is + * first modeset after boot and the boot display/heads are still + * active. In theory, we could only shut down heads which satisfies + * pTestFunc() test but this fails because other heads active at + * boot do not have mode timing information populated during + * MarkConnectorBootHeadActive(), so nvSetDispMode() tries to + * program invalid modes on those heads. + * + * For now, just shut down all heads if any head satisfies + * pTestFunc() test. + */ + if (pDevEvo->coreInitMethodsPending) { + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + req->disp[dispIndex].requestedHeadsBitMask |= + NVBIT(pDevEvo->numHeads) - 1; + } + } + + nvSetDispModeEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, req, + ¶ms->reply, FALSE /* bypassComposition */, + TRUE /* doRastertLock */); + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE); + } + + if (pTestFunc == NULL) { + nvAssertAllDpysAreInactive(pDevEvo); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-prealloc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-prealloc.c new file mode 100644 index 0000000..7d3a1f7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-prealloc.c @@ -0,0 +1,146 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" +#include "nvkms-flip-workarea.h" +#include "nvkms-modeset-types.h" +#include "nvkms-modeset-workarea.h" +#include "nvkms-prealloc.h" +#include "nvkms-utils.h" + +#include "nvkms-api.h" + +#include + +static size_t GetSizeForType(NVDevEvoPtr pDevEvo, enum NVPreallocType type) +{ + switch (type) { + case PREALLOC_TYPE_IMP_PARAMS: + return pDevEvo->hal->caps.impStructSize; + case PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE: /* fall through */ + case PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE: + return sizeof(struct NvKmsSetModeParams); + case PREALLOC_TYPE_MODE_SET_WORK_AREA: + return sizeof(NVModeSetWorkArea); + case PREALLOC_TYPE_FLIP_WORK_AREA: + return sizeof(struct NvKmsFlipWorkArea); + case PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE: /* fallthrough */ + case PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE: + return sizeof(NVProposedModeSetHwState); + case PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS: + return sizeof(NVHwModeTimingsEvo); + case PREALLOC_TYPE_MAX: + /* Not a real option, but added for -Wswitch-enum */ + break; + } + + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Unknown prealloc type %d in GetSizeForType.", type); + + return 0; +} + +void *nvPreallocGet( + NVDevEvoPtr pDevEvo, + enum NVPreallocType type, + size_t sizeCheck) +{ + struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc; + size_t size = GetSizeForType(pDevEvo, type); + + if (size != sizeCheck) { + nvAssert(size == sizeCheck); + return NULL; + } + + if ((pPrealloc->used[type / 8] & NVBIT(type % 8)) != 0) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Prealloc type %d already used in nvPreallocGet.", type); + return NULL; + } + + /* Since these are preallocated, they should not be NULL. */ + if (pPrealloc->ptr[type] == NULL) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Prealloc type %d NULL in nvPreallocGet.", type); + } + + pPrealloc->used[type / 8] |= NVBIT(type % 8); + + return pPrealloc->ptr[type]; +} + +void nvPreallocRelease( + NVDevEvoPtr pDevEvo, + enum NVPreallocType type) +{ + struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc; + + if ((pPrealloc->used[type / 8] & NVBIT(type % 8)) == 0) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Prealloc type %d not used in nvPreallocRelease.", type); + } + + pPrealloc->used[type / 8] &= ~(NvU8)NVBIT(type % 8); +} + +NvBool nvPreallocAlloc(NVDevEvoPtr pDevEvo) +{ + struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc; + NvU32 type; + + for (type = 0; type < PREALLOC_TYPE_MAX; type++) { + size_t size = GetSizeForType(pDevEvo, type); + if (size == 0) { + goto fail; + } + pPrealloc->ptr[type] = nvAlloc(size); + if (pPrealloc->ptr[type] == NULL) { + goto fail; + } + } + + nvkms_memset(pPrealloc->used, 0, sizeof(pPrealloc->used)); + + return TRUE; + +fail: + nvPreallocFree(pDevEvo); + return FALSE; +} + +void nvPreallocFree(NVDevEvoPtr pDevEvo) +{ + struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc; + NvU32 type; + + for (type = 0; type < PREALLOC_TYPE_MAX; type++) { + if ((pDevEvo->prealloc.used[type / 8] & NVBIT(type % 8)) != 0) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Prealloc type %d still used in nvPreallocFree.", type); + } + + nvFree(pPrealloc->ptr[type]); + pPrealloc->ptr[type] = NULL; + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rm.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rm.c new file mode 100644 index 0000000..1b3283a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rm.c @@ -0,0 +1,5426 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +#include "dp/nvdp-connector.h" +#include "dp/nvdp-timer.h" +#include "dp/nvdp-device.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "g_nvkms-evo-states.h" +#include "nvkms-event.h" +#include "nvkms-dpy.h" +#include "nvkms-types.h" +#include "nvkms-evo.h" +#include "nvkms-dma.h" +#include "nvkms-utils.h" +#include "nvkms-private.h" +#include "nvkms-modeset.h" +#include "nvkms-surface.h" +#include "nvkms-vrr.h" + +#include "class/cl0002.h" /* NV01_CONTEXT_DMA */ +#include "class/cl00c3.h" /* NV01_MEMORY_SYNCPOINT */ +#include "class/cl0005.h" /* NV01_EVENT */ + +#include /* NV04_DISPLAY_COMMON */ +#include /* NV01_MEMORY_SYSTEM */ +#include /* NV01_MEMORY_FRAMEBUFFER_CONSOLE */ +#include /* NV01_DEVICE_0 */ +#include /* NV01_MEMORY_LOCAL_USER */ +#include /* NV20_SUBDEVICE_0 */ + +#include "class/clc37b.h" /* NVC37B_WINDOW_IMM_CHANNEL_DMA */ +#include "class/clc37e.h" /* NVC37E_WINDOW_CHANNEL_DMA */ +#include "class/clc57b.h" /* NVC57B_WINDOW_IMM_CHANNEL_DMA */ +#include "class/clc57e.h" /* NVC57E_WINDOW_CHANNEL_DMA */ +#include "class/clc67b.h" /* NVC67B_WINDOW_IMM_CHANNEL_DMA */ +#include "class/clc67e.h" /* NVC67E_WINDOW_CHANNEL_DMA */ + +#include "class/cl917b.h" /* NV917B_OVERLAY_IMM_CHANNEL_PIO */ + +#include "class/cl927c.h" /* NV927C_BASE_CHANNEL_DMA */ + +#include "class/cl917e.h" /* NV917E_OVERLAY_CHANNEL_DMA */ + +#include /* NV0000_CTRL_GPU_* */ +#include /* NV0002_CTRL_CMD_BIND_CONTEXTDMA */ +#include /* NV0073_CTRL_CMD_DFP_GET_INFO */ +#include /* NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID */ +#include /* NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO */ +#include /* NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED */ +#include /* NV0080_CTRL_CMD_GPU_SET_DISPLAY_OWNER */ +#include /* NV0080_CTRL_CMD_GR_GET_CAPS_V2 */ +#include /* NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH */ +#include /* NV2080_CTRL_CMD_BIOS_GET_NBSI */ +#include /* NV2080_CTRL_CMD_BUS_GET_INFO */ +#include /* NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION */ +#include /* NV2080_CTRL_CMD_GPU_GET_SW_FEATURES */ +#include /* NV2080_CTRL_CMD_TIMER_GET_TIME */ +#include /* NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT */ +#include /* NV5070_CTRL_CMD_SET_RMFREE_FLAGS */ +#include /* NV5070_CTRL_CMD_SET_DAC_PWR */ + +#include "nvos.h" + +#include "displayport/dpcd.h" + +#define NVKMS_SYNCPT_ID_INVALID (0xFFFFFFFF) + +static NvU32 GetLegacyConnectorType(NVDispEvoPtr pDispEvo, NVDpyId dpyId); + +static void RmFreeEvoChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel); + +static NvBool QueryGpuCapabilities(NVDevEvoPtr pDevEvo) +{ + NvBool ctxDmaCoherentAllowedDev = FALSE; + NvBool ctxDmaNonCoherentAllowedDev = FALSE; + NvU32 ret, sd; + + NV0000_CTRL_GPU_GET_ID_INFO_PARAMS idInfoParams = { 0 }; + + pDevEvo->isHeadSurfaceSupported = FALSE; + + pDevEvo->validResamplingMethodMask = + NVBIT(NVKMS_RESAMPLING_METHOD_BILINEAR) | + NVBIT(NVKMS_RESAMPLING_METHOD_NEAREST); + + /* ctxDma{,Non}CoherentAllowed */ + + /* simulationType */ + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS simParams = { 0 }; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO, + &simParams, + sizeof(simParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + simParams.type = NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE; + } + if (sd == 0) { + pDevEvo->simulationType = simParams.type; + } + nvAssert(pDevEvo->simulationType == simParams.type); + } + + /* mobile */ + + idInfoParams.gpuId = pDevEvo->pSubDevices[0]->gpuId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_ID_INFO, + &idInfoParams, sizeof(idInfoParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + pDevEvo->mobile = FALSE; + pDevEvo->isSOCDisplay = FALSE; + } else { + pDevEvo->mobile = + FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _MOBILE, _TRUE, + idInfoParams.gpuFlags); + + pDevEvo->isSOCDisplay = + FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE, + idInfoParams.gpuFlags); + } + + /* TODO: This cap bit should be queried from RM */ + pDevEvo->requiresAllAllocationsInSysmem = pDevEvo->isSOCDisplay; + + /* ctxDma{,Non}CoherentAllowed */ + + if (!pDevEvo->isSOCDisplay) { + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NV2080_CTRL_BUS_GET_INFO_PARAMS busParams = { 0 }; + struct { + NV2080_CTRL_BUS_INFO coherentFlags; + NV2080_CTRL_BUS_INFO nonCoherentFlags; + } busInfoList = { { 0 } }; + + NvBool ctxDmaCoherentAllowed; + NvBool ctxDmaNonCoherentAllowed; + + busInfoList.coherentFlags.index = + NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS; + busInfoList.nonCoherentFlags.index = + NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS; + + busParams.busInfoListSize = + sizeof(busInfoList) / sizeof(busInfoList.coherentFlags); + busParams.busInfoList = NV_PTR_TO_NvP64(&busInfoList); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + NV2080_CTRL_CMD_BUS_GET_INFO, + &busParams, sizeof(busParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + ctxDmaCoherentAllowed = + FLD_TEST_DRF(2080_CTRL_BUS_INFO, _COHERENT_DMA_FLAGS, + _CTXDMA, _TRUE, busInfoList.coherentFlags.data); + + ctxDmaNonCoherentAllowed = + FLD_TEST_DRF(2080_CTRL_BUS_INFO, _NONCOHERENT_DMA_FLAGS, + _CTXDMA, _TRUE, busInfoList.nonCoherentFlags.data); + + if (sd == 0) { + ctxDmaCoherentAllowedDev = ctxDmaCoherentAllowed; + ctxDmaNonCoherentAllowedDev = ctxDmaNonCoherentAllowed; + } else { + ctxDmaCoherentAllowedDev = + ctxDmaCoherentAllowedDev && ctxDmaCoherentAllowed; + ctxDmaNonCoherentAllowedDev = + ctxDmaNonCoherentAllowedDev && ctxDmaNonCoherentAllowed; + } + } + nvAssert(ctxDmaCoherentAllowedDev || ctxDmaNonCoherentAllowedDev); + + if (ctxDmaCoherentAllowedDev) { + pDevEvo->isoIOCoherencyModes.coherent = TRUE; + pDevEvo->nisoIOCoherencyModes.coherent = TRUE; + } + + if (ctxDmaNonCoherentAllowedDev) { + pDevEvo->isoIOCoherencyModes.noncoherent = TRUE; + pDevEvo->nisoIOCoherencyModes.noncoherent = TRUE; + } + } else { + /* + * On SOC display, NISO requests are IO-coherent and ISO + * requests are non-coherent. + */ + pDevEvo->isoIOCoherencyModes.noncoherent = TRUE; + pDevEvo->nisoIOCoherencyModes.coherent = TRUE; + } + + pDevEvo->supportsSyncpts = + nvRmEvoClassListCheck(pDevEvo, NV01_MEMORY_SYNCPOINT); + + return TRUE; +} + + +static void FreeDisplay(NVDispEvoPtr pDispEvo) +{ + NvU32 head; + + if (pDispEvo == NULL) { + return; + } + + for (head = 0; head < ARRAY_LEN(pDispEvo->pSwapGroup); head++) { + nvAssert(pDispEvo->pSwapGroup[head] == NULL); + nvAssert(nvListIsEmpty(&pDispEvo->headState[head].vblankCallbackList)); + } + + nvAssert(nvListIsEmpty(&pDispEvo->dpyList)); + + nvkms_free_ref_ptr(pDispEvo->ref_ptr); + + nvFree(pDispEvo); +} + + +static inline NVDispEvoPtr AllocDisplay(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + NVDispEvoPtr pDispEvo = nvCalloc(1, sizeof(NVDispEvoRec)); + + if (pDispEvo == NULL) { + goto fail; + } + + pDispEvo->pDevEvo = pDevEvo; + + nvListInit(&pDispEvo->dpyList); + nvListInit(&pDispEvo->connectorList); + + pDispEvo->framelock.server = nvInvalidDpyId(); + pDispEvo->framelock.clients = nvEmptyDpyIdList(); + pDispEvo->framelock.currentServerHead = NV_INVALID_HEAD; + + for (head = 0; head < ARRAY_LEN(pDispEvo->headState); head++) { + NvU32 apiHead = nvHardwareHeadToApiHead(head); + + pDispEvo->apiHeadState[apiHead].activeDpys = nvEmptyDpyIdList(); + pDispEvo->apiHeadState[apiHead].hwHeadsMask = NVBIT(head); + pDispEvo->apiHeadState[apiHead].attributes = + NV_EVO_DEFAULT_ATTRIBUTES_SET; + + nvListInit(&pDispEvo->headState[head].vblankCallbackList); + } + + pDispEvo->ref_ptr = nvkms_alloc_ref_ptr(pDispEvo); + if (!pDispEvo->ref_ptr) { + goto fail; + } + + return pDispEvo; + +fail: + FreeDisplay(pDispEvo); + + return NULL; +} + + +static void FreeDisplays(NVDevEvoPtr pDevEvo) +{ + unsigned int sd; + + for (sd = 0; sd < pDevEvo->nDispEvo; sd++) { + FreeDisplay(pDevEvo->pDispEvo[sd]); + pDevEvo->pDispEvo[sd] = NULL; + } + pDevEvo->nDispEvo = 0; +} + + +/*! + * Allocate the NVDispRecs for the given pDev. + * + * \param[in,out] pDev The device for which to allocate Displays. + */ +static NvBool AllocDisplays(NVDevEvoPtr pDevEvo) +{ + unsigned int sd; + + nvAssert(pDevEvo->nDispEvo == 0); + + pDevEvo->nDispEvo = pDevEvo->numSubDevices; + + for (sd = 0; sd < pDevEvo->nDispEvo; sd++) { + NVDispEvoPtr pDispEvo = AllocDisplay(pDevEvo); + + if (pDispEvo == NULL) { + goto fail; + } + + pDevEvo->pDispEvo[sd] = pDispEvo; + + pDispEvo->displayOwner = sd; + + pDispEvo->gpuLogIndex = pDevEvo->pSubDevices[sd]->gpuLogIndex; + } + + return TRUE; + +fail: + FreeDisplays(pDevEvo); + return FALSE; +} + +/* + * Get the (id) list of all supported display devices for this pDisp. + */ +static NvBool ProbeValidDisplays(NVDispEvoPtr pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS getSupportedParams = { 0 }; + NvU32 ret; + + pDispEvo->connectorIds = nvEmptyDpyIdList(); + pDispEvo->displayPortMSTIds = nvEmptyDpyIdList(); + pDispEvo->dynamicDpyIds = nvEmptyDpyIdList(); + pDispEvo->validDisplays = nvEmptyDpyIdList(); + + getSupportedParams.subDeviceInstance = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, + &getSupportedParams, sizeof(getSupportedParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to get supported display device(s)"); + } else { + NVDpyIdList dpyIdList; + NVDpyId dpyId; + + // Grab only the static ids from the list. Dynamic ids are + // used to communicate with devices that are connected to + // a connector that has a static id. + dpyIdList = nvNvU32ToDpyIdList(getSupportedParams.displayMask); + + FOR_ALL_DPY_IDS(dpyId, dpyIdList) { + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS getOrInfoParams = { 0 }; + getOrInfoParams.subDeviceInstance = pDispEvo->displayOwner; + getOrInfoParams.displayId = nvDpyIdToNvU32(dpyId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + &getOrInfoParams, + sizeof(getOrInfoParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to get supported display device(s)"); + } else { + if (!getOrInfoParams.bIsDispDynamic) { + pDispEvo->connectorIds = + nvAddDpyIdToDpyIdList(dpyId, pDispEvo->connectorIds); + } + } + } + } + + pDispEvo->validDisplays = pDispEvo->connectorIds; + + return TRUE; +} + +/*! + * Return TRUE if every pDispEvo on this pDevEvo has an empty validDisplays. + */ +static NvBool NoValidDisplays(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + unsigned int sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + if (!nvDpyIdListIsEmpty(pDispEvo->validDisplays)) { + return FALSE; + } + } + + return TRUE; +} + + +/* + * Find the NvKmsConnectorSignalFormat for the pConnectorEvo. + */ +static NvKmsConnectorSignalFormat +GetSignalFormat(const NVConnectorEvoRec *pConnectorEvo) +{ + // SignalFormat represents a weird combination of our OR type and protocol. + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + switch (pConnectorEvo->or.protocol) { + default: + nvAssert(!"Unexpected OR protocol for DAC"); + // fall through + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA; + } + + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + switch (pConnectorEvo->or.protocol) { + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS; + + default: + nvAssert(!"Unexpected OR protocol for SOR"); + // fall through + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A: + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B: + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS; + + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_DP; + } + + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + switch (pConnectorEvo->or.protocol) { + default: + nvAssert(!"Unexpected OR protocol for PIOR"); + // fall through + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS; + } + + case NV0073_CTRL_SPECIFIC_OR_TYPE_DSI: + switch (pConnectorEvo->or.protocol) { + default: + nvAssert(!"Unexpected OR protocol for DSI"); + // fall through + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI; + } + + default: + nvAssert(!"Unexpected OR type"); + return NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN; + } + + return NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN; +} + + +static NvU32 GetDfpInfo(const NVConnectorEvoRec *pConnectorEvo) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NV0073_CTRL_DFP_GET_INFO_PARAMS params = { 0 }; + NvU32 ret; + + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + return 0x0; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_INFO, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, "Failed to query DFP info"); + return 0x0; + } + + return params.flags; +} + +typedef struct _AllocConnectorDispDataRec { + NvU32 dfpIndex; + NvU32 crtIndex; + NvU32 typeIndices[NVKMS_CONNECTOR_TYPE_MAX + 1]; +} AllocConnectorDispDataRec; + +/*! + * Query and setup information for a connector. + */ +static NvBool AllocConnector( + NVDispEvoPtr pDispEvo, + NVDpyId dpyId, + AllocConnectorDispDataRec *pAllocConnectorDispData) +{ + NVConnectorEvoPtr pConnectorEvo = NULL; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS params = { 0 }; + NvU32 ret; + NvBool isDP; + + pConnectorEvo = nvCalloc(1, sizeof(*pConnectorEvo)); + + if (pConnectorEvo == NULL) { + return FALSE; + } + + pConnectorEvo->pDispEvo = pDispEvo; + pConnectorEvo->displayId = dpyId; + pConnectorEvo->type = NVKMS_CONNECTOR_TYPE_UNKNOWN; + pConnectorEvo->physicalIndex = NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION; + pConnectorEvo->physicalLocation = NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION; + /* Query the output resource configuration */ + nvRmGetConnectorORInfo(pConnectorEvo, FALSE); + + isDP = + (pConnectorEvo->or.type == + NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && + (pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A || + pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B); + + /* Determine the connector type. */ + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(dpyId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to determine connector type for connector " + NV_DPY_ID_PRINT_FORMAT, nvDpyIdToPrintFormat(dpyId)); + goto fail; + } else { + + static const struct { + NvU32 type0073; + NvKmsConnectorType typeNvKms; + } connectorTypeTable[] = { + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_EXT, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_USB_C, + NVKMS_CONNECTOR_TYPE_USBC }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_INT, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_MINI_EXT, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_1, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_2, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VGA_15_PIN, + NVKMS_CONNECTOR_TYPE_VGA }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_SVIDEO, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_COMPOSITE, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_D, + NVKMS_CONNECTOR_TYPE_DVI_D }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_ADC, + NVKMS_CONNECTOR_TYPE_ADC }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_1, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_2, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_SPWG, + NVKMS_CONNECTOR_TYPE_LVDS }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_OEM, + NVKMS_CONNECTOR_TYPE_LVDS }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_A, + NVKMS_CONNECTOR_TYPE_HDMI }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_C_MINI, + NVKMS_CONNECTOR_TYPE_HDMI }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VIRTUAL_WFD, + NVKMS_CONNECTOR_TYPE_UNKNOWN }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DSI, + NVKMS_CONNECTOR_TYPE_DSI }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_SERIALIZER, + NVKMS_CONNECTOR_TYPE_DP_SERIALIZER }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_STEREO_3PIN_DIN, + NVKMS_CONNECTOR_TYPE_UNKNOWN }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_UNKNOWN, + NVKMS_CONNECTOR_TYPE_UNKNOWN }, + }; + + int i, j; + + for (i = 0; i < params.count; i++) { + for (j = 0; j < ARRAY_LEN(connectorTypeTable); j++) { + if (connectorTypeTable[j].type0073 == params.data[i].type) { + if (pConnectorEvo->type == NVKMS_CONNECTOR_TYPE_UNKNOWN) { + pConnectorEvo->type = connectorTypeTable[j].typeNvKms; + } else { + /* + * The only cases where we should see + * params.count > 1 (and thus attempt to + * assign pConnectorEvo->type multiple times) + * should be where all the + * NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_* + * values map to the same NvKmsConnectorType; + */ + nvAssert(pConnectorEvo->type == + connectorTypeTable[j].typeNvKms); + } + break; + } + } + if (j == ARRAY_LEN(connectorTypeTable)) { + nvAssert(!"Unhandled connector type!"); + } + + if (i == 0) { + pConnectorEvo->physicalIndex = params.data[i].index; + pConnectorEvo->physicalLocation = params.data[i].location; + } else { + nvAssert(pConnectorEvo->physicalIndex == params.data[i].index); + nvAssert(pConnectorEvo->physicalLocation == + params.data[i].location); + } + } + + pConnectorEvo->ddcPartnerDpyIdsList = nvNvU32ToDpyIdList(params.DDCPartners); + } + + /* If the connector type is unknown, ignore this connector. */ + if (pConnectorEvo->type == NVKMS_CONNECTOR_TYPE_UNKNOWN) { + nvFree(pConnectorEvo); + return TRUE; + } + + /* + * Ignore connectors that use DP protocol, but don't have a + * DP-compatible type. + */ + if (isDP && + ((pConnectorEvo->type != NVKMS_CONNECTOR_TYPE_DP) && + !nvConnectorIsDPSerializer(pConnectorEvo) && + (pConnectorEvo->type != NVKMS_CONNECTOR_TYPE_USBC))) { + nvFree(pConnectorEvo); + return TRUE; + } + + /* + * Bind connector to the DP lib if DP capable. Serializer + * connector is not managed by DP lib. + */ + if (isDP && + !nvConnectorIsDPSerializer(pConnectorEvo)) { + pConnectorEvo->pDpLibConnector = nvDPCreateConnector(pConnectorEvo); + if (!pConnectorEvo->pDpLibConnector) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to initialize DisplayPort support for " + NV_DPY_ID_PRINT_FORMAT, nvDpyIdToPrintFormat(dpyId)); + goto fail; + } + } + + pConnectorEvo->signalFormat = GetSignalFormat(pConnectorEvo); + + pConnectorEvo->dfpInfo = GetDfpInfo(pConnectorEvo); + + /* + * Change-list 6909651 has disabled YCbCr* color space for DisplayPort, it + * says - + * "Disable anything other than RGB for DisplayPort; on FERMI at least, + * there are problems YCbCr* on DP (but not HDMI), since the limited range + * must be implemented by EVO HW, and not the LUT (per EVO error checks)." + * + * TODO Investigate if YCbCr* color space for DisplayPort allowed on Kepler + * onward (also take DP-MST into consideration). + */ + if (!nvConnectorUsesDPLib(pConnectorEvo)) { + /* check for color space (YCbCr422, YCbCr444) capability of GPU */ + if (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS_FORMAT, _YCBCR422_CAPABLE, _TRUE, + pConnectorEvo->dfpInfo)) { + pConnectorEvo->colorSpaceCaps.ycbcr422Capable = TRUE; + } + + if (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS_FORMAT, _YCBCR444_CAPABLE, _TRUE, + pConnectorEvo->dfpInfo)) { + pConnectorEvo->colorSpaceCaps.ycbcr444Capable = TRUE; + } + } else { + pConnectorEvo->colorSpaceCaps.ycbcr422Capable = FALSE; + pConnectorEvo->colorSpaceCaps.ycbcr444Capable = FALSE; + } + + if (pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + nvAssert(pDevEvo->numHeads >= 1); + // DSI supports only HEAD0 assignment + pConnectorEvo->validHeadMask = 0x1; + + if (pConnectorEvo->type != NVKMS_CONNECTOR_TYPE_DSI) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Mismatch between connector type and signal format for DSI!"); + goto fail; + } + } else { + pConnectorEvo->validHeadMask = (1 << pDevEvo->numHeads) - 1; + } + + /* Assign connector indices. */ + + pConnectorEvo->legacyType = + GetLegacyConnectorType(pDispEvo, pConnectorEvo->displayId); + + switch (pConnectorEvo->legacyType) { + case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT: + pConnectorEvo->legacyTypeIndex = + pAllocConnectorDispData->crtIndex++; + break; + case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP: + pConnectorEvo->legacyTypeIndex = + pAllocConnectorDispData->dfpIndex++; + break; + default: + nvAssert(!"Unknown connector type"); + break; + } + + nvAssert(pConnectorEvo->type < + ARRAY_LEN(pAllocConnectorDispData->typeIndices)); + pConnectorEvo->typeIndex = + pAllocConnectorDispData->typeIndices[pConnectorEvo->type]++; + + nvListAppend(&pConnectorEvo->connectorListEntry, &pDispEvo->connectorList); + + nvkms_snprintf(pConnectorEvo->name, sizeof(pConnectorEvo->name), "%s-%u", + NvKmsConnectorTypeString(pConnectorEvo->type), + pConnectorEvo->typeIndex); + + return TRUE; + +fail: + nvFree(pConnectorEvo); + return FALSE; +} + + +static void FreeConnectors(NVDispEvoPtr pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo, pConnectorEvoNext; + + nvListForEachEntry_safe(pConnectorEvo, pConnectorEvoNext, + &pDispEvo->connectorList, connectorListEntry) { + // Unbind DP lib from the connector + nvDPDestroyConnector(pConnectorEvo->pDpLibConnector); + pConnectorEvo->pDpLibConnector = NULL; + nvListDel(&pConnectorEvo->connectorListEntry); + nvFree(pConnectorEvo); + } +} + + +/*! + * Allocate and initialize the connector structs for the given pDisp. + * + * NOTE: Each Display ID in pDispEvo->connectorIds (aka the + * NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED mask) is a possible display + * connection to the GPU which is static after boot. + */ +static NvBool AllocConnectors(NVDispEvoPtr pDispEvo) +{ + NVDpyId dpyId; + NVConnectorEvoPtr pConnectorEvo; + AllocConnectorDispDataRec allocConnectorDispData = { }; + + nvAssert(nvListIsEmpty(&pDispEvo->connectorList)); + + if (nvDpyIdListIsEmpty(pDispEvo->connectorIds)) { + /* Allow boards with no connectors */ + return TRUE; + } + + /* Allocate the connectors */ + FOR_ALL_DPY_IDS(dpyId, pDispEvo->connectorIds) { + if (!AllocConnector(pDispEvo, dpyId, &allocConnectorDispData)) { + goto fail; + } + } + + /* + * Reassign pDispEvo->connectorIds, to exclude any connectors ignored above: + * AllocConnector() may return TRUE but not actually create a pConnectorEvo + * for some connectors reported by resman. + */ + pDispEvo->connectorIds = nvEmptyDpyIdList(); + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + pDispEvo->connectorIds = + nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, + pDispEvo->connectorIds); + } + + pDispEvo->validDisplays = pDispEvo->connectorIds; + + return TRUE; + + fail: + FreeConnectors(pDispEvo); + return FALSE; +} + + +/*! + * Query the number of heads and save the result in pDevEvo->numHeads. + * + * Query the number of heads on each pDisp of the pDev and limit to + * the minimum across all pDisps. Query the headMask on each pDisp + * and take the intersection across pDisps. Limit the number of heads + * to the number of bits in the headMask. + * + * \param[in,out] pDev This is the device pointer; the pDisps within + * it are used to query per-GPU information. + * The result is written to pDevEvo->numHeads. + * + * \return Return TRUE if numHeads could be correctly assigned; + * return FALSE if numHeads could not be queried. + */ +static NvBool ProbeHeadCount(NVDevEvoPtr pDevEvo) +{ + NvU32 numHeads = 0, headMask = 0; + int sd, head, numBits; + NVDispEvoPtr pDispEvo; + NvU32 ret; + + pDevEvo->numHeads = 0; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS numHeadsParams = { 0 }; + NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS headMaskParams = { 0 }; + + numHeadsParams.subDeviceInstance = sd; + numHeadsParams.flags = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS, + &numHeadsParams, sizeof(numHeadsParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to get the number of heads"); + return FALSE; + } + + if (numHeadsParams.numHeads == 0) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "No heads found on board!"); + return FALSE; + } + + if (numHeads == 0) { + numHeads = numHeadsParams.numHeads; + } else { + if (numHeads != numHeadsParams.numHeads) { + NvU32 minNumHeads = + NV_MIN(numHeads, numHeadsParams.numHeads); + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Unexpected numbers of heads " + "(%d, %d); clamping to %d", + numHeads, numHeadsParams.numHeads, minNumHeads); + numHeads = minNumHeads; + } + } + + headMaskParams.subDeviceInstance = sd; + + ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK, + &headMaskParams, sizeof(headMaskParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to get head configuration"); + return FALSE; + } + + if (headMask == 0) { + headMask = headMaskParams.headMask; + } else { + if (headMask != headMaskParams.headMask) { + NvU32 intersectedHeadMask = + headMask & headMaskParams.headMask; + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Unexpected head configurations " + "(0x%02x, 0x%02x); limiting to 0x%02x", + headMask, headMaskParams.headMask, + intersectedHeadMask); + headMask = intersectedHeadMask; + } + } + } + + /* clamp numHeads to the number of bits in headMask */ + + numBits = nvPopCount32(headMask); + + /* for now, we only support headMask when it is tightly packed at 0 */ + + for (head = 0; head < numBits; head++) { + if ((headMask & (1 << head)) == 0) { + NvU32 modifiedHeadMask = (1 << head) - 1; + + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "The head configuration (0x%02x) " + "is unexpected; limiting to 0x%02x", headMask, + modifiedHeadMask); + + headMask = modifiedHeadMask; + numBits = head; + break; + } + } + + /* headMask should never increase numHeads */ + + if (numBits > numHeads) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "The head configuration (0x%02x) " + "is inconsistent with the number of heads (%d)", + headMask, numHeads); + } else if (numBits < numHeads) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Correcting number of heads for " + "current head configuration (0x%02x)", headMask); + numHeads = numBits; + } + + pDevEvo->numHeads = numHeads; + + return TRUE; +} + +/*! + * Set a pConnectorEvo's software state based on the boot head assignment. + */ +static void MarkConnectorBootHeadActive(NVDispEvoPtr pDispEvo, NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDpyId displayId, rootPortId; + NVDpyEvoPtr pDpyEvo; + NVConnectorEvoPtr pConnectorEvo; + NVDispHeadStateEvoPtr pHeadState; + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS params = { 0 }; + NvU32 ret; + NvU32 apiHead = nvHardwareHeadToApiHead(head); + + // Use the first displayId in the boot display list. + // + // TODO: What should we do if more than one dpy ID is listed for a boot + // display? + nvAssert(nvCountDpyIdsInDpyIdList(pDispEvo->vbiosDpyConfig[head]) == 1); + displayId = nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(), + pDispEvo->vbiosDpyConfig[head]); + + // The displayId reported by RM could be a dynamic one. Find the root port + // for this ID. + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(displayId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + return; + } + + if (params.bIsDispDynamic) { + rootPortId = nvNvU32ToDpyId(params.rootPortId); + } else { + rootPortId = displayId; + } + + pConnectorEvo = nvGetConnectorFromDisp(pDispEvo, rootPortId); + if (!pConnectorEvo) { + return; + } + + if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) { + + nvAssert(params.index != NV_INVALID_OR); + if (params.index == NV_INVALID_OR) { + // If RM reported that a head is driving this dpyId, then there + // should be an SOR assigned. However, due to a bug in the way + // PDB_PROP_GPU_DISABLE_VGA_CONSOLE_RESTORATION_ON_RESUME is + // handled, RM can report an "active" head with no SOR assigned on + // certain specific GPUs. If that happens, just treat the head as + // disabled. See bug 1692425. + pDispEvo->vbiosDpyConfig[head] = nvEmptyDpyIdList(); + return; + } else { + // Track the SOR assignment for this connector. See the comment in + // nvRmGetConnectorORInfo() for why this is deferred until now. + nvAssert(pConnectorEvo->or.mask == 0x0); + pConnectorEvo->or.mask |= NVBIT(params.index); + } + } + nvAssert((pConnectorEvo->or.mask & NVBIT(params.index)) != 0x0); + + // Use the pDpyEvo for the connector, since we may not have one for + // display id if it's a dynamic one. + pDpyEvo = nvGetDpyEvoFromDispEvo(pDispEvo, pConnectorEvo->displayId); + + pHeadState = &pDispEvo->headState[head]; + + nvAssert(pDpyEvo->apiHead == NV_INVALID_HEAD); + nvAssert(!nvHeadIsActive(pDispEvo, head)); + + pDpyEvo->apiHead = nvHardwareHeadToApiHead(head); + + pDispEvo->apiHeadState[apiHead].activeDpys = + nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId); + pHeadState->pConnectorEvo = pConnectorEvo; + pHeadState->activeRmId = nvDpyIdToNvU32(displayId); + + // Track the assigned head. + pConnectorEvo->or.ownerHeadMask[params.index] |= NVBIT(head); + + nvEvoStateStartNoLock(&pDispEvo->pDevEvo->gpus[pDispEvo->displayOwner]); +} + +/*! + * Query the vbios assignment of heads to display devices, and cache + * in pDispEvo->vbiosDpyConfig for later use by nvDPResume(). + * + * \param[in,out] pDisp This is the GPU display pointer; the result is + * written to pDispEvo->vbiosDpyConfig + */ +static void GetVbiosHeadAssignmentOneDisp(NVDispEvoPtr pDispEvo) +{ + unsigned int head; + NvU32 ret = NVOS_STATUS_ERROR_GENERIC; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + nvkms_memset(&pDispEvo->vbiosDpyConfig, 0, + sizeof(pDispEvo->vbiosDpyConfig)); + + /* if there is no display, there is no origDpyConfig */ + + nvAssert(pDevEvo->displayCommonHandle != 0); + + /* + * get the vbios assignment of heads within the GPU, so that + * later when we do head assignment, we can try to preserve the + * existing assignment; see bug 208072 + */ + + for (head = 0; head < pDevEvo->numHeads; head++) { + NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS activeDpysParams = { 0 }; + + activeDpysParams.subDeviceInstance = pDispEvo->displayOwner; + activeDpysParams.head = head; + /* + * We want to check for active displays set by any low-level software + * such as VBIOS, not just those set by an RM client + */ + activeDpysParams.flags = + DRF_DEF(0073, _CTRL_SYSTEM_GET_ACTIVE_FLAGS, _CLIENT, _DISABLE); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, + &activeDpysParams, sizeof(activeDpysParams)); + + if (ret == NVOS_STATUS_SUCCESS) { + // XXX TODO: If this is a dynamic display ID, it's not necessarily + // correlated with the NVDpyId we'll assign to a dynamic pDpyEvo + // later. We should instead store this as an NvU32 and assign it as + // the activeRmId for a dynamic pDpyEvo that DPLib reports as being + // driven by the firmware group. See bug 1656584. + pDispEvo->vbiosDpyConfig[head] = + nvNvU32ToDpyIdList(activeDpysParams.displayId); + if (activeDpysParams.displayId != 0) { + MarkConnectorBootHeadActive(pDispEvo, head); + } + } + + nvAssert(ret == NVOS_STATUS_SUCCESS); + } +} + +static void GetVbiosHeadAssignment(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + GetVbiosHeadAssignmentOneDisp(pDispEvo); + } +} + +/*! + * Query the boot display device(s). + */ +static void ProbeBootDisplays(NVDispEvoPtr pDispEvo) +{ + NvU32 ret; + NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS bootParams = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + pDispEvo->bootDisplays = nvEmptyDpyIdList(); + + bootParams.subDeviceInstance = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_BOOT_DISPLAYS, + &bootParams, sizeof(bootParams)); + + if (ret == NVOS_STATUS_SUCCESS) { + pDispEvo->bootDisplays = + nvNvU32ToDpyIdList(bootParams.bootDisplayMask); + } +} + +/*! + * Query the 0073 display common object capabilities. + */ +static NvBool ProbeDisplayCommonCaps(NVDevEvoPtr pDevEvo) +{ + NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS capsParams = { }; + NvU32 ret; + + ct_assert(sizeof(pDevEvo->commonCapsBits) == sizeof(capsParams.capsTbl)); + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2, + &capsParams, sizeof(capsParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to determine display common capabilities"); + return FALSE; + } + nvkms_memcpy(pDevEvo->commonCapsBits, capsParams.capsTbl, + sizeof(pDevEvo->commonCapsBits)); + + return TRUE; +} + +static NvBool ReadDPCDReg(NVConnectorEvoPtr pConnectorEvo, + NvU32 dpcdAddr, + NvU8 *dpcdData) +{ + NV0073_CTRL_DP_AUXCH_CTRL_PARAMS params = { }; + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + + params.subDeviceInstance = pConnectorEvo->pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + params.cmd = DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _AUX); + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _READ); + + params.addr = dpcdAddr; + + /* Requested size is 0-based */ + params.size = 0; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_AUXCH_CTRL, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "AUX read failed for DPCD addr 0x%x", + dpcdAddr); + return FALSE; + } + + if (params.size != 1U) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "AUX read returned 0 bytes for DPCD addr 0x%x", + dpcdAddr); + return FALSE; + } + + *dpcdData = params.data[0]; + + return TRUE; +} + +NvBool nvWriteDPCDReg(NVConnectorEvoPtr pConnectorEvo, + NvU32 dpcdAddr, + NvU8 dpcdData) +{ + NV0073_CTRL_DP_AUXCH_CTRL_PARAMS params = { }; + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + + params.subDeviceInstance = pConnectorEvo->pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + params.cmd = DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _AUX); + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _WRITE); + + params.addr = dpcdAddr; + params.data[0] = dpcdData; + + /* Requested size is 0-based */ + params.size = 0; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_AUXCH_CTRL, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "AUX write failed for DPCD addr 0x%x", + dpcdAddr); + return FALSE; + } + + if (params.size != 1U) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Wrote 0 bytes for DPCD addr 0x%x", + dpcdAddr); + return FALSE; + } + + return TRUE; +} + +static NvBool ReadDPSerializerCaps(NVConnectorEvoPtr pConnectorEvo) +{ + NVDpyIdList oneDpyIdList = + nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId); + NVDpyIdList connectedList; + NvU8 dpcdData = 0; + + /* + * This call will not only confirm that the DP serializer is connected, but + * will also power on the corresponding DPAUX pads if the serializer is + * detected via NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE. The DPAUX pads + * need to be enabled for the DPCD reads below. + */ + connectedList = nvRmGetConnectedDpys(pConnectorEvo->pDispEvo, oneDpyIdList); + if (!nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedList)) { + nvEvoLogDev(pConnectorEvo->pDispEvo->pDevEvo, EVO_LOG_ERROR, + "Serializer connector %s is not currently connected!", + pConnectorEvo->name); + return FALSE; + } + + if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MAX_LINK_BANDWIDTH, &dpcdData)) { + return FALSE; + } + pConnectorEvo->dpSerializerCaps.maxLinkBW = + DRF_VAL(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, dpcdData); + + if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MAX_LANE_COUNT, &dpcdData)) { + return FALSE; + } + pConnectorEvo->dpSerializerCaps.maxLaneCount = + DRF_VAL(_DPCD, _MAX_LANE_COUNT, _LANE, dpcdData); + + if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MSTM, &dpcdData)) { + return FALSE; + } + pConnectorEvo->dpSerializerCaps.supportsMST = + FLD_TEST_DRF(_DPCD, _MSTM, _CAP, _YES, dpcdData); + + return TRUE; +} + +static NvBool AllocDPSerializerDpys(NVConnectorEvoPtr pConnectorEvo) +{ + NvBool supportsMST; + NvU32 numHeads; + NvU32 i; + + if (!nvConnectorIsDPSerializer(pConnectorEvo)) { + return TRUE; + } + + if (!ReadDPSerializerCaps(pConnectorEvo)) { + return FALSE; + } + + supportsMST = pConnectorEvo->dpSerializerCaps.supportsMST; + numHeads = pConnectorEvo->pDispEvo->pDevEvo->numHeads; + for (i = 0; i < numHeads && supportsMST; i++) { + NVDpyEvoPtr pDpyEvo = NULL; + NvBool dynamicDpyCreated = FALSE; + char address[5] = { }; + + nvkms_snprintf(address, sizeof(address), "0.%d", i + 1); + pDpyEvo = nvGetDPMSTDpyEvo(pConnectorEvo, address, + &dynamicDpyCreated); + if ((pDpyEvo == NULL) || !dynamicDpyCreated) { + return FALSE; + } + + pDpyEvo->dp.serializerStreamIndex = i; + } + + return TRUE; +} + +/*! + * + */ +static NvBool AllocDpys(NVDispEvoPtr pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo; + + // At this point, there should be no DisplayPort multistream devices. + nvAssert(nvDpyIdListsAreEqual(pDispEvo->validDisplays, + pDispEvo->connectorIds)); + nvAssert(nvDpyIdListIsEmpty(pDispEvo->displayPortMSTIds)); + nvAssert(nvDpyIdListIsEmpty(pDispEvo->dynamicDpyIds)); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = nvAllocDpyEvo(pDispEvo, pConnectorEvo, + pConnectorEvo->displayId, NULL); + + if (pDpyEvo == NULL) { + nvAssert(!"Failed to allocate pDpy"); + return FALSE; + } + + if (!AllocDPSerializerDpys(pConnectorEvo)) { + nvAssert(!"Failed to allocate non DPLib managed dpys"); + return FALSE; + } + } + + return TRUE; +} + +static void FreeDpys(NVDispEvoPtr pDispEvo) +{ + NVDpyEvoPtr pDpyEvo, pDpyEvoTmp; + + nvListForEachEntry_safe(pDpyEvo, pDpyEvoTmp, + &pDispEvo->dpyList, dpyListEntry) { + nvFreeDpyEvo(pDispEvo, pDpyEvo); + } +} + + +/*! + * Receive hotplug notification from resman. + * + * This function is registered as the kernel callback function from + * resman when an NV2080_NOTIFIERS_HOTPLUG event is generated. + * + * However, this function is called with resman's context (alternate + * stack, resman locks held, etc). Schedule deferred work, so that we + * can process the hotplug event without resman's encumbrances. + */ +static void ReceiveHotplugEvent(void *arg, void *pEventDataVoid, NvU32 hEvent, + NvU32 Data, NV_STATUS Status) +{ + (void) nvkms_alloc_timer_with_ref_ptr( + nvHandleHotplugEventDeferredWork, /* callback */ + arg, /* argument (this is a ref_ptr to a pDispEvo) */ + 0, /* dataU32 */ + 0); +} + +static void ReceiveDPIRQEvent(void *arg, void *pEventDataVoid, NvU32 hEvent, + NvU32 Data, NV_STATUS Status) +{ + // XXX The displayId of the connector that generated the event should be + // available here somewhere. We should figure out how to find that and + // plumb it through to nvHandleDPIRQEventDeferredWork. + (void) nvkms_alloc_timer_with_ref_ptr( + nvHandleDPIRQEventDeferredWork, /* callback */ + arg, /* argument (this is a ref_ptr to a pDispEvo) */ + 0, /* dataU32 */ + 0); +} + +NvBool nvRmRegisterCallback(const NVDevEvoRec *pDevEvo, + NVOS10_EVENT_KERNEL_CALLBACK_EX *cb, + struct nvkms_ref_ptr *ref_ptr, + NvU32 parentHandle, + NvU32 eventHandle, + Callback5ArgVoidReturn func, + NvU32 event) +{ + NV0005_ALLOC_PARAMETERS allocEventParams = { 0 }; + + cb->func = func; + cb->arg = ref_ptr; + + allocEventParams.hParentClient = nvEvoGlobal.clientHandle; + allocEventParams.hClass = NV01_EVENT_KERNEL_CALLBACK_EX; + allocEventParams.notifyIndex = event; + allocEventParams.data = NV_PTR_TO_NvP64(cb); + + return nvRmApiAlloc(nvEvoGlobal.clientHandle, + parentHandle, + eventHandle, + NV01_EVENT_KERNEL_CALLBACK_EX, + &allocEventParams) + == NVOS_STATUS_SUCCESS; +} + +static NvBool RegisterDispCallback(NVOS10_EVENT_KERNEL_CALLBACK_EX *cb, + NVDispEvoPtr pDispEvo, + NvU32 handle, + Callback5ArgVoidReturn func, + NvU32 event) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle; + + return nvRmRegisterCallback(pDevEvo, cb, pDispEvo->ref_ptr, subDevice, + handle, func, event); +} + +enum NvKmsAllocDeviceStatus nvRmAllocDisplays(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + unsigned int sd; + enum NvKmsAllocDeviceStatus status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + NvU32 totalDispNumSubDevices = 0; + + pDevEvo->sli.bridge.present = FALSE; + + if (!QueryGpuCapabilities(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to query GPU capabilities"); + goto fail; + } + + if (pDevEvo->supportsSyncpts) { + pDevEvo->preSyncptTable = + nvCalloc(1, sizeof(NVEvoSyncpt) * NV_SYNCPT_GLOBAL_TABLE_LENGTH); + if (pDevEvo->preSyncptTable == NULL) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate memory for pre-syncpt table"); + goto fail; + } + } + + if (!AllocDisplays(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate displays"); + goto fail; + } + + /* allocate the display common object for this device */ + + if (nvRmEvoClassListCheck(pDevEvo, NV04_DISPLAY_COMMON)) { + + pDevEvo->displayCommonHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->displayCommonHandle, + NV04_DISPLAY_COMMON, NULL) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to initialize the display " + "subsystem for the NVIDIA graphics device!"); + goto fail; + + } + } else { + /* + * Not supporting NV04_DISPLAY_COMMON is expected in some + * configurations: e.g., GF117 (an Optimus-only or "coproc" GPU), + * emulation netlists. Fail with "no hardware". + */ + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + if (!ProbeDisplayCommonCaps(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + if (!ProbeHeadCount(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + if (!ProbeValidDisplays(pDispEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + /* Keep track of connectors per pDisp and bind to DP lib if capable */ + if (!AllocConnectors(pDispEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + } + + /* + * If there are no valid display devices, fail with "no hardware". + */ + if (NoValidDisplays(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + /* + * The number of numSubDevices across disps should equal the + * device's numSubDevices. + */ + totalDispNumSubDevices = 0; + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + totalDispNumSubDevices++; + } + + if (totalDispNumSubDevices != pDevEvo->numSubDevices) { + nvAssert(!"Number of disps' subdevices does not match device's"); + } + + /* + * Allocate an NV event for each pDispEvo on the corresponding + * subDevice, tied to the pDevEvo's OS event. + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams = { }; + NvU32 subDevice, ret; + + subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle; + + pDispEvo->hotplugEventHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (!RegisterDispCallback(&pDispEvo->rmHotplugCallback, pDispEvo, + pDispEvo->hotplugEventHandle, + ReceiveHotplugEvent, + NV2080_NOTIFIERS_HOTPLUG)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to register display hotplug event"); + } + + // Enable hotplug notifications from this subdevice. + setEventParams.event = NV2080_NOTIFIERS_HOTPLUG; + setEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle, + subDevice, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &setEventParams, + sizeof(setEventParams))) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to register display hotplug " + "handler: 0x%x\n", ret); + } + } + + // Allocate a handler for the DisplayPort "IRQ" event, which is signaled + // when there's a short interruption in the hotplug detect line. + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams = { }; + NvU32 subDevice, ret; + + subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle; + + pDispEvo->DPIRQEventHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (!RegisterDispCallback(&pDispEvo->rmDPIRQCallback, pDispEvo, + pDispEvo->DPIRQEventHandle, ReceiveDPIRQEvent, + NV2080_NOTIFIERS_DP_IRQ)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to register DisplayPort interrupt event"); + } + + // Enable DP IRQ notifications from this subdevice. + setEventParams.event = NV2080_NOTIFIERS_DP_IRQ; + setEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle, + subDevice, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &setEventParams, + sizeof(setEventParams))) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to register DisplayPort interrupt " + "handler: 0x%x\n", ret); + } + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + ProbeBootDisplays(pDispEvo); + + if (!AllocDpys(pDispEvo)) { + goto fail; + } + + } + + nvAllocVrrEvo(pDevEvo); + + return NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + +fail: + nvRmDestroyDisplays(pDevEvo); + return status; +} + + +void nvRmDestroyDisplays(NVDevEvoPtr pDevEvo) +{ + NvU32 ret; + NVDispEvoPtr pDispEvo; + int dispIndex; + NvS64 tmp; + + nvFreeVrrEvo(pDevEvo); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + // Before freeing anything, dump anything left in the RM's DisplayPort + // AUX channel log. + if (pDispEvo->dpAuxLoggingEnabled) { + do { + ret = nvRmQueryDpAuxLog(pDispEvo, &tmp); + } while (ret && tmp); + } + + // Free the DisplayPort IRQ event. + if (pDispEvo->DPIRQEventHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pDispEvo->DPIRQEventHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDispEvo->DPIRQEventHandle); + pDispEvo->DPIRQEventHandle = 0; + } + + // Free the hotplug event. + /* + * XXX I wish I could cancel anything scheduled by + * ReceiveHotplugEvent() and ReceiveDPIRQEvent() for this pDispEvo... + */ + if (pDispEvo->hotplugEventHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pDispEvo->hotplugEventHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDispEvo->hotplugEventHandle); + pDispEvo->hotplugEventHandle = 0; + } + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + FreeDpys(pDispEvo); + FreeConnectors(pDispEvo); + } + + FreeDisplays(pDevEvo); + + if (pDevEvo->supportsSyncpts) { + nvFree(pDevEvo->preSyncptTable); + } + + if (pDevEvo->displayCommonHandle != 0) { + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->displayCommonHandle); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Free(displayCommonHandle) failed"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->displayCommonHandle); + pDevEvo->displayCommonHandle = 0; + } +} + + +/*! + * The Allocate a display ID that we use to talk to RM about the dpy(s) on + * head. + * + * \param[in] pDisp The display system on which to allocate the ID. + * \param[in] dpyList The list of dpys. + * + * \return The display ID, or 0 on failure. + */ +NvU32 nvRmAllocDisplayId(const NVDispEvoRec *pDispEvo, const NVDpyIdList dpyList) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS params = { 0 }; + const NVDpyEvoRec *pDpyEvo; + const NVConnectorEvoRec *pConnectorEvo = NULL; + NvBool isDPMST = NV_FALSE; + NvU32 ret; + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyList, pDispEvo) { + if (pConnectorEvo == NULL) { + /* First DPY from list, assign pConnectorEvo and isDPMST variable */ + pConnectorEvo = pDpyEvo->pConnectorEvo; + isDPMST = nvDpyEvoIsDPMST(pDpyEvo); + } + + if (pConnectorEvo != pDpyEvo->pConnectorEvo || + isDPMST != nvDpyEvoIsDPMST(pDpyEvo)) { + return 0; + } + } + + nvAssert(nvConnectorUsesDPLib(pConnectorEvo) || !isDPMST); + + if (!isDPMST) { + /* For non-MST dpy(s), simply return static display ID of connector */ + return nvDpyIdToNvU32(pConnectorEvo->displayId); + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID, + ¶ms, sizeof(params)); + + if (ret == NVOS_STATUS_SUCCESS) { + return params.displayIdAssigned; + } else { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Failed to allocate display resource."); + } + + return 0; +} + + +/*! + * Send DISPLAY_CHANGE to resman. + * + * This should be called before and after each mode change, with the display + * mask describing the NEW display configuration. + */ +void nvRmBeginEndModeset(NVDispEvoPtr pDispEvo, + enum NvKmsBeginEndModeset beginOrEnd, + NvU32 mask) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS bracketParams = { }; + NvU32 ret; + + bracketParams.subDeviceInstance = pDispEvo->displayOwner; + bracketParams.newDevices = mask; + bracketParams.properties = 0; /* this is currently unused */ + switch (beginOrEnd) { + case BEGIN_MODESET: + bracketParams.enable = NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_START; + break; + case END_MODESET: + bracketParams.enable = NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_END; + break; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE, + &bracketParams, + sizeof(bracketParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE"); + } +} + + +/*! + * Free a RM display ID, if it was allocated dynamically. + * + * This function frees a display ID if it was allocated by + * nvRmAllocDisplayId. If the display ID is static, this function does + * nothing. + * + * From ctrl0073dp.h: You must not call this function while either the ARM + * or ASSEMBLY state cache refers to this display-id. The head must not be + * attached. + * + * \param[in] pDisp The display system on which to free the ID. + * \param[in] displayId The display ID to free. + */ +void nvRmFreeDisplayId(const NVDispEvoRec *pDispEvo, NvU32 displayId) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS params = { 0 }; + NVDpyId dpyId = nvNvU32ToDpyId(displayId); + NvU32 ret; + + /* Do nothing if display ID is static one! */ + if (nvDpyIdIsInDpyIdList(dpyId, pDispEvo->connectorIds)) { + return; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = displayId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to relinquish display resource."); + } +} + + +/*! + * Query Resman for the (broad) display device type. + */ +static NvU32 GetLegacyConnectorType(NVDispEvoPtr pDispEvo, NVDpyId dpyId) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS params = { 0 }; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(dpyId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_TYPE, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failure getting specific display device type."); + return NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_UNKNOWN; + } + + nvAssert((params.displayType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) || + (params.displayType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP)); + + return params.displayType; +} + + +/*! + * Query RM for the current OR properties of the given connector. + * + * If 'assertOnly' is TRUE, this function will only assert that the OR + * configuration has not changed. + */ +void nvRmGetConnectorORInfo(NVConnectorEvoPtr pConnectorEvo, NvBool assertOnly) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS params = { 0 }; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + ¶ms, + sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to determine output resource properties."); + + if (assertOnly) { + return; + } + pConnectorEvo->or.type = NV0073_CTRL_SPECIFIC_OR_TYPE_DAC; + pConnectorEvo->or.mask = 0; + pConnectorEvo->or.protocol = + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT; + pConnectorEvo->or.ditherType = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF; + pConnectorEvo->or.ditherAlgo = + NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN; + pConnectorEvo->or.location = NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP; + + return; + } + + if (!assertOnly) { + pConnectorEvo->or.type = params.type; + if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED) && + params.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + // For the SOR crossbar, RM may report that multiple displayIDs own + // the same SOR. For example, it may report SOR 2 for both the + // DisplayPort and TMDS halves of a physical connector even though + // they have separate displayIds. + // + // All we really need to know is which SOR is assigned to the boot + // display, so we defer the query to MarkConnectorBootHeadActive(). + pConnectorEvo->or.mask = 0x0; + } else { + pConnectorEvo->or.mask = NVBIT(params.index); + } + pConnectorEvo->or.protocol = params.protocol; + pConnectorEvo->or.ditherType = params.ditherType; + pConnectorEvo->or.ditherAlgo = params.ditherAlgo; + pConnectorEvo->or.location = params.location; + } else { + nvAssert(pConnectorEvo->or.type == params.type); + nvAssert((pConnectorEvo->or.mask & NVBIT(params.index)) != 0x0); + nvAssert(pConnectorEvo->or.protocol == params.protocol); + nvAssert(pConnectorEvo->or.ditherType == params.ditherType); + nvAssert(pConnectorEvo->or.ditherAlgo == params.ditherAlgo); + nvAssert(pConnectorEvo->or.location == params.location); + } +} + +/*! + * Query connector state, and retry if necessary. + */ +NVDpyIdList nvRmGetConnectedDpys(const NVDispEvoRec *pDispEvo, + NVDpyIdList dpyIdList) +{ + NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayMask = nvDpyIdListToNvU32(dpyIdList); + params.flags = + (DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_METHOD,_DEFAULT) | + DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_DDC,_DEFAULT) | + DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_LOAD,_DEFAULT)); + + do { + params.retryTimeMs = 0; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, + ¶ms, + sizeof(params)); + + if (ret == NVOS_STATUS_ERROR_NOT_READY && + params.retryTimeMs == 0) { + // Work around bug 970351: RM returns a zero retry time on platforms + // where the display driver is in user space. Use a conservative + // default. This code can be removed once this call is fixed in RM. + params.retryTimeMs = 20; + } + + if (params.retryTimeMs > 0) { + nvkms_usleep(params.retryTimeMs * 1000); + } else { + nvkms_yield(); + } + } while(params.retryTimeMs > 0); + + if (ret == NVOS_STATUS_SUCCESS) { + return nvNvU32ToDpyIdList(params.displayMask); + } else { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed detecting connected display devices"); + return nvEmptyDpyIdList(); + } +} + +/*! + * Notify the DP library that we are ready to proceed after a suspend/boot, and + * that it should initialize and start handling events. + */ +NvBool nvRmResumeDP(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NVConnectorEvoPtr pConnectorEvo; + NVDpyIdList connectedIdsList = + nvRmGetConnectedDpys(pDispEvo, pDispEvo->connectorIds); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NvBool plugged = + nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedIdsList); + + if (!pConnectorEvo->pDpLibConnector) { + continue; + } + + if (!nvDPResume(pConnectorEvo->pDpLibConnector, plugged)) { + goto failed; + } + } + } + + return TRUE; + +failed: + nvRmPauseDP(pDevEvo); + return FALSE; +} + + +void nvRmPauseDP(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NVConnectorEvoPtr pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPPause(pConnectorEvo->pDpLibConnector); + } + } + } +} + + +/*! + * This function is called whenever the DPMS level changes; On a CRT, + * you set the DPMS level by (dis/en)abling the hsync and vsync + * signals: + * + * Hsync Vsync Mode + * ===== ===== ==== + * 1 1 Normal (on). + * 0 1 Standby -- RGB guns off, power supply on, tube filaments + * energized, (screen saver mode). + * 1 0 Suspend -- RGB guns off, power supply off, tube filaments + * energized. + * 0 0 Power off -- small auxiliary circuit stays on to monitor the + * hsync/vsync signals to know when to wake up. + */ +NvBool nvRmSetDpmsEvo(NVDpyEvoPtr pDpyEvo, NvS64 value) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (nvDpyUsesDPLib(pDpyEvo)) { + nvDPDeviceSetPowerState(pDpyEvo, + (value == NV_KMS_DPY_ATTRIBUTE_DPMS_ON)); + return TRUE; + } else if (pDpyEvo->pConnectorEvo->legacyType != + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) { + NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS powerParams = { 0 }; + + powerParams.subDeviceInstance = pDispEvo->displayOwner; + powerParams.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + + powerParams.powerState = (value == NV_KMS_DPY_ATTRIBUTE_DPMS_ON) ? + NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_ON : + NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_OFF; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_MONITOR_POWER, + &powerParams, + sizeof(powerParams)); + + return (ret == NVOS_STATUS_SUCCESS); + } else { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS powerParams = { { 0 }, 0 }; + + powerParams.base.subdeviceIndex = pDispEvo->displayOwner; + if (pConnectorEvo->or.mask == 0x0) { + nvAssert(pConnectorEvo->or.mask != 0x0); + return FALSE; + } + powerParams.orNumber = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + + switch (value) { + case NV_KMS_DPY_ATTRIBUTE_DPMS_ON: + powerParams.normalHSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _ENABLE); + powerParams.normalVSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _ENABLE); + break; + case NV_KMS_DPY_ATTRIBUTE_DPMS_STANDBY: + powerParams.normalHSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _LO); + powerParams.normalVSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _ENABLE); + break; + case NV_KMS_DPY_ATTRIBUTE_DPMS_SUSPEND: + powerParams.normalHSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _ENABLE); + powerParams.normalVSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _LO); + break; + case NV_KMS_DPY_ATTRIBUTE_DPMS_OFF: + powerParams.normalHSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _LO); + powerParams.normalVSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _LO); + break; + default: + return FALSE; + } + // XXX These could probably be disabled too, in the DPMS_OFF case. + powerParams.normalData = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_DATA, _ENABLE); + powerParams.normalPower = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_PWR, _ON); + + powerParams.flags = + DRF_DEF(5070, _CTRL_CMD_SET_DAC_PWR_FLAGS, _SPECIFIED_NORMAL, _YES); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_DAC_PWR, + &powerParams, + sizeof(powerParams)); + + return (ret == NVOS_STATUS_SUCCESS); + } +} + + +NvBool nvRmAllocSysmem(NVDevEvoPtr pDevEvo, NvU32 memoryHandle, + NvU32 *ctxDmaFlags, void **ppBase, NvU64 size, + NvKmsMemoryIsoType isoType) +{ + NvU32 ret; + NvBool bufferAllocated = FALSE; + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + const NvKmsDispIOCoherencyModes *pIOCoherencyModes; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + + memAllocParams.attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO); + + memAllocParams.size = size; + + if (isoType == NVKMS_MEMORY_NISO) { + memAllocParams.attr2 |= DRF_DEF(OS32, _ATTR2, _NISO_DISPLAY, _YES); + + pIOCoherencyModes = &pDevEvo->nisoIOCoherencyModes; + } else { + pIOCoherencyModes = &pDevEvo->isoIOCoherencyModes; + } + + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI) | + DRF_DEF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS) | + DRF_DEF(OS32, _ATTR, _FORMAT, _PITCH); + + if (pIOCoherencyModes->noncoherent) { + // Model (3) + // - allocate USWC system memory + // - allocate ctx dma with NVOS03_FLAGS_CACHE_SNOOP_DISABLE + // - to sync CPU and GPU, flush CPU WC buffer + + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE, + memAllocParams.attr); + + ret = nvRmApiAlloc( + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + NV01_MEMORY_SYSTEM, + &memAllocParams); + + if (ret == NVOS_STATUS_SUCCESS) { + bufferAllocated = TRUE; + if (ctxDmaFlags) { + *ctxDmaFlags |= DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _DISABLE); + } + } else { + bufferAllocated = FALSE; + } + + } + + if (!bufferAllocated && pIOCoherencyModes->coherent) { + // Model (2b): Similar to existing PCI model + // - allocate cached (or USWC) system memory + // - allocate ctx DMA with NVOS03_FLAGS_CACHE_SNOOP_ENABLE + // ... + + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, + memAllocParams.attr); + + ret = nvRmApiAlloc( + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + NV01_MEMORY_SYSTEM, + &memAllocParams); + + if (ret == NVOS_STATUS_SUCCESS) { + bufferAllocated = TRUE; + if (ctxDmaFlags) { + *ctxDmaFlags |= DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _ENABLE); + } + } else { + bufferAllocated = FALSE; + } + } + + if (bufferAllocated) { + ret = nvRmApiMapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + 0, /* offset */ + size, + ppBase, + 0 /* flags */); + + if (ret != NVOS_STATUS_SUCCESS) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle); + + bufferAllocated = FALSE; + } + } + + return bufferAllocated; +} + + +/*****************************************************************************/ +/* Alloc memory and a context dma, following the rules dictated by the + DMA coherence flags. */ +/*****************************************************************************/ + +NvBool nvRmAllocEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma, + NvU64 limit, NvU32 ctxDmaFlags, NvU32 subDeviceMask) +{ + NV_CONTEXT_DMA_ALLOCATION_PARAMS ctxdmaParams = { }; + NvBool bufferAllocated = FALSE; + NvU32 memoryHandle = 0; + void *pBase = NULL; + + NvBool needBar1Mapping = FALSE; + + NvU32 ctxDmaHandle = 0; + NvU32 localCtxDmaFlags = ctxDmaFlags | + DRF_DEF(OS03, _FLAGS, _ACCESS, _READ_WRITE) | + DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE); + + NvU32 ret; + + nvkms_memset(pDma, 0, sizeof(*pDma)); + + memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + /* + * On certain GPUs (GF100, GF104) there exists a hardware bug that forces + * us to put display NISO surfaces (pushbuffer, semaphores, notifiers + * accessed by EVO) in vidmem instead of sysmem. See bug 632241 for + * details. + */ + if (NV5070_CTRL_SYSTEM_GET_CAP(pDevEvo->capsBits, + NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY)) { + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.type = NVOS32_TYPE_DMA; + memAllocParams.size = limit + 1; + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB) | + DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM); + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + NV01_MEMORY_LOCAL_USER, + &memAllocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + /* We can't fall back to any of the sysmem options below, due to + * the nature of the HW bug forcing us to use vidmem. */ + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unable to allocate video memory for display"); + return FALSE; + } + + limit = memAllocParams.size - 1; + + /* We'll access these surfaces through IFB */ + pBase = NULL; + + bufferAllocated = TRUE; + needBar1Mapping = TRUE; + } + + if (!bufferAllocated) { + /* + * Setting NVKMS_MEMORY_NISO since nvRmAllocEvoDma() is currently only + * called to allocate pushbuffer and notifier memory. + */ + bufferAllocated = nvRmAllocSysmem(pDevEvo, memoryHandle, + &localCtxDmaFlags, &pBase, limit + 1, + NVKMS_MEMORY_NISO); + } + + if (!bufferAllocated) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle); + + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unable to allocate DMA memory"); + + return FALSE; + } + + ctxDmaHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + // Create a ctxdma for this allocation. + ctxdmaParams.hMemory = memoryHandle; + ctxdmaParams.flags = localCtxDmaFlags; + ctxdmaParams.offset = 0; + ctxdmaParams.limit = limit; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + ctxDmaHandle, + NV01_CONTEXT_DMA, + &ctxdmaParams); + + if (ret != NVOS_STATUS_SUCCESS) { + if (pBase != NULL) { + nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + pBase, + 0); + } + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, memoryHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, ctxDmaHandle); + + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate a DMA context"); + + return FALSE; + } + + pDma->memoryHandle = memoryHandle; + + pDma->ctxHandle = ctxDmaHandle; + + pDma->limit = limit; + + if (needBar1Mapping) { + NvBool result; + + result = nvRmEvoMapVideoMemory(pDevEvo, memoryHandle, limit + 1, + pDma->subDeviceAddress, subDeviceMask); + + if (!result) { + nvRmFreeEvoDma(pDevEvo, pDma); + return FALSE; + } + } else { + int sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (((1 << sd) & subDeviceMask) == 0) { + continue; + } + + pDma->subDeviceAddress[sd] = pBase; + } + } + pDma->isBar1Mapping = needBar1Mapping; + + return TRUE; +} + +void nvRmFreeEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma) +{ + NvU32 ret; + + if (pDma->ctxHandle != 0) { + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, pDma->ctxHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to free DMA context"); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDma->ctxHandle); + pDma->ctxHandle = 0; + } + + if (pDma->memoryHandle != 0) { + if (pDma->isBar1Mapping) { + nvRmEvoUnMapVideoMemory(pDevEvo, pDma->memoryHandle, + pDma->subDeviceAddress); + } else { + int sd = 0; + NvBool addressMapped = TRUE; + + /* If pDma->subDeviceAddress[sd] is non-NULL for multiple subdevices, + * assume they are the same. Unmap only one but set all of them to + * NULL. This matches the logic in nvRmAllocEvoDma(). + */ + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + if (addressMapped && pDma->subDeviceAddress[sd] != NULL) { + ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDma->memoryHandle, + pDma->subDeviceAddress[sd], + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to unmap memory"); + } + + addressMapped = FALSE; + } + + pDma->subDeviceAddress[sd] = NULL; + } + } + + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, pDma->memoryHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to free DMA memory"); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDma->memoryHandle); + pDma->memoryHandle = 0; + + pDma->limit = 0; + + nvkms_memset(pDma->subDeviceAddress, 0, sizeof(pDma->subDeviceAddress)); + } +} + +static void +CompletionNotifierEventDeferredWork(void *dataPtr, NvU32 dataU32) +{ + NVEvoChannelPtr pChannel = dataPtr; + + nvSendFlipOccurredEventEvo(pChannel->pb.pDevEvo, pChannel->channelMask); +} + +static void CompletionNotifierEvent(void *arg, void *pEventDataVoid, + NvU32 hEvent, NvU32 Data, NV_STATUS Status) +{ + (void) nvkms_alloc_timer_with_ref_ptr( + CompletionNotifierEventDeferredWork, /* callback */ + arg, /* argument (this is a ref_ptr to a pChannel) */ + 0, /* dataU32 */ + 0); /* timeout: schedule the work immediately */ +} + +/*****************************************************************************/ +/* RmAllocEvoChannel () + * Allocates the EVO channel and associated notifier surfaces and ctxdmas. + * Takes how big the DMA controls are (varies by class of channel) and which + * class to allocate. + */ +/*****************************************************************************/ +static NVEvoChannelPtr +RmAllocEvoChannel(NVDevEvoPtr pDevEvo, + NVEvoChannelMask channelMask, + NvV32 instance, NvU32 class) +{ + NVEvoChannelPtr pChannel = NULL; + NVDmaBufferEvoPtr buffer = NULL; + int sd; + NvU32 ret; + + /* One 4k page is enough to map PUT and GET */ + const NvU64 dmaControlLen = 0x1000; + + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(channelMask) == 1); + + /* Allocate the channel data structure */ + pChannel = nvCalloc(1, sizeof(*pChannel)); + + if (pChannel == NULL) { + goto fail; + } + + buffer = &pChannel->pb; + + pChannel->hwclass = class; + pChannel->instance = instance; + pChannel->channelMask = channelMask; + + pChannel->notifiersDma = nvCalloc(pDevEvo->numSubDevices, sizeof(NVEvoDma)); + + if (pChannel->notifiersDma == NULL) { + goto fail; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoDmaPtr pNotifiersDma = &pChannel->notifiersDma[sd]; + + void *pDmaDisplayChannel = NULL; + + // Allocation of the notifiers + if (!nvRmAllocEvoDma(pDevEvo, pNotifiersDma, + NV_DMA_EVO_NOTIFIER_SIZE - 1, + DRF_DEF(OS03, _FLAGS, _TYPE, _NOTIFIER), + 1 << sd)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Notifier DMA allocation failed"); + + goto fail; + } + + nvAssert(pNotifiersDma->subDeviceAddress[sd] != NULL); + + // Only allocate memory for one pushbuffer. + // All subdevices will share (via subdevice mask) + if (sd == 0) { + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS ChannelAllocParams = { 0 }; + + NvU64 limit = NV_DMA_EVO_PUSH_BUFFER_SIZE - 1; + NVEvoDmaPtr pDma = &buffer->dma; + + // Allocation of the push buffer + if (!nvRmAllocEvoDma(pDevEvo, pDma, limit, 0, SUBDEVICE_MASK_ALL)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Display engine push buffer DMA allocation failed"); + + goto fail; + } + + if (!pDma->isBar1Mapping) { + buffer->base = pDma->subDeviceAddress[0]; + } else { + /* + * Allocate memory for a shadow copy in sysmem that we'll copy + * to vidmem via BAR1 at kickoff time. + */ + buffer->base = nvCalloc(buffer->dma.limit + 1, 1); + if (buffer->base == NULL) { + goto fail; + } + } + + buffer->channel_handle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + // Channel instance (always 0 for CORE - head number otherwise) + ChannelAllocParams.channelInstance = instance; + // PB CtxDMA Handle + ChannelAllocParams.hObjectBuffer = buffer->dma.ctxHandle; + // Initial offset within the PB + ChannelAllocParams.offset = 0; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + buffer->channel_handle, + class, + &ChannelAllocParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Display engine push buffer channel allocation failed: 0x%x (%s)", + ret, nvstatusToString(ret)); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + buffer->channel_handle); + buffer->channel_handle = 0; + + goto fail; + } + } + + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + buffer->channel_handle, + 0, + dmaControlLen, + &pDmaDisplayChannel, + 0); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Display engine push buffer DMA mapping failed: 0x%x (%s)", + ret, nvstatusToString(ret)); + goto fail; + } + + buffer->control[sd] = pDmaDisplayChannel; + } + + /* Initialize the rest of the required push buffer information */ + buffer->buffer = buffer->base; + buffer->end = (NvU32 *)((char *)buffer->base + + NV_DMA_EVO_PUSH_BUFFER_SIZE - 8); + + /* + * Due to hardware bug 235044, we can not use the last 12 dwords of the + * core channel pushbuffer. Adjust offset_max appropriately. + * + * This bug is fixed in Volta and newer, so this workaround can be removed + * when Pascal support is dropped. See bug 3116066. + */ + buffer->offset_max = NV_DMA_EVO_PUSH_BUFFER_SIZE - + NV_DMA_EVO_PUSH_BUFFER_PAD_SIZE; + buffer->fifo_free_count = (buffer->offset_max >> 2) - 2; + buffer->put_offset = 0; + buffer->num_channels = pDevEvo->numSubDevices; + buffer->pDevEvo = pDevEvo; + buffer->currentSubDevMask = SUBDEVICE_MASK_ALL; + + if (!FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, channelMask)) { + pChannel->ref_ptr = nvkms_alloc_ref_ptr(pChannel); + + if (pChannel->ref_ptr == NULL) { + goto fail; + } + + pChannel->completionNotifierEventHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (!nvRmRegisterCallback(pDevEvo, + &pChannel->completionNotifierEventCallback, + pChannel->ref_ptr, + pChannel->pb.channel_handle, + pChannel->completionNotifierEventHandle, + CompletionNotifierEvent, + 0)) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pChannel->completionNotifierEventHandle); + pChannel->completionNotifierEventHandle = 0; + goto fail; + } + } + + pChannel->imm.type = NV_EVO_IMM_CHANNEL_NONE; + + pDevEvo->hal->InitChannel(pDevEvo, pChannel); + + return pChannel; + +fail: + + RmFreeEvoChannel(pDevEvo, pChannel); + + return NULL; +} + +static void FreeImmediateChannelPio(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + NVEvoPioChannel *pPio = pChannel->imm.u.pio; + int sd; + + nvAssert(pPio != NULL); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + if (!pPio->control[sd]) { + continue; + } + + if (nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pPio->handle, + pPio->control[sd], + 0)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to unmap immediate channel"); + } + pPio->control[sd] = NULL; + } + + if (pPio->handle) { + if (nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + pPio->handle)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, "Failed to free immediate channel"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pPio->handle); + pPio->handle = 0; + } + + nvFree(pPio); + pChannel->imm.u.pio = NULL; +} + +static void FreeImmediateChannelDma(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + NVEvoChannelPtr pImmChannel = pChannel->imm.u.dma; + + RmFreeEvoChannel(pDevEvo, pImmChannel); + pChannel->imm.u.dma = NULL; +} + +static void FreeImmediateChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + switch (pChannel->imm.type) { + case NV_EVO_IMM_CHANNEL_NONE: + return; + case NV_EVO_IMM_CHANNEL_PIO: + FreeImmediateChannelPio(pDevEvo, pChannel); + break; + case NV_EVO_IMM_CHANNEL_DMA: + FreeImmediateChannelDma(pDevEvo, pChannel); + break; + } + pChannel->imm.type = NV_EVO_IMM_CHANNEL_NONE; +} + +/*****************************************************************************/ +/* RmFreeEvoChannel () + * Frees all of the stuff allocated in RmAllocEvoChannel */ +/*****************************************************************************/ +static void RmFreeEvoChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + int sd; + + if (pChannel == NULL) { + return; + } + + FreeImmediateChannel(pDevEvo, pChannel); + + if (pChannel->completionNotifierEventHandle != 0) { + + nvRmApiFree(nvEvoGlobal.clientHandle, + pChannel->pb.channel_handle, + pChannel->completionNotifierEventHandle); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pChannel->completionNotifierEventHandle); + + pChannel->completionNotifierEventHandle = 0; + } + + nvkms_free_ref_ptr(pChannel->ref_ptr); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (pChannel->pb.control[sd]) { + if (nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pChannel->pb.channel_handle, + pChannel->pb.control[sd], + 0) != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to unmap display engine channel memory"); + } + pChannel->pb.control[sd] = NULL; + } + } + + if (pChannel->pb.channel_handle != 0) { + // If NVKMS restored the console successfully, tell RM to leave the + // channels allocated to avoid shutting down the heads we just + // enabled. + // + // On EVO, only leave the core and base channels allocated. The + // other satellite channels shouldn't be active at the console. + // + // On nvdisplay, one or more window channels are also needed. Rather + // than try to figure out which ones are needed, just leave them all + // alone. + const NvBool isCore = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + pChannel->channelMask); + const NvBool isBase = + (pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0; + const NvBool isWindow = + (pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0; + if ((isCore || isBase || isWindow) && pDevEvo->skipConsoleRestore) { + NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS params = { }; + + params.base.subdeviceIndex = pDevEvo->vtFbInfo.subDeviceInstance; + params.flags = NV5070_CTRL_SET_RMFREE_FLAGS_PRESERVE_HW; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_RMFREE_FLAGS, + ¶ms, sizeof(params)) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set the PRESERVE_HW flag"); + } + } + + if (nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + pChannel->pb.channel_handle) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to tear down display engine channel"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pChannel->pb.channel_handle); + pChannel->pb.channel_handle = 0; + } + + if (pChannel->pb.dma.isBar1Mapping) { + /* Pushbuffer is in vidmem. Free shadow copy. */ + nvFree(pChannel->pb.base); + pChannel->pb.base = NULL; + } + + nvRmFreeEvoDma(pDevEvo, &pChannel->pb.dma); + + if (pChannel->notifiersDma) { + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + nvRmFreeEvoDma(pDevEvo, &pChannel->notifiersDma[sd]); + } + } + + nvFree(pChannel->notifiersDma); + pChannel->notifiersDma = NULL; + + nvFree(pChannel); +} + +static NvBool +AllocImmediateChannelPio(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 class, + NvU32 instance, + NvU32 mapSize) +{ + NVEvoPioChannel *pPio = NULL; + NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS params = { 0 }; + NvU32 sd; + + pPio = nvCalloc(1, sizeof(*pPio)); + + if (!pPio) { + return FALSE; + } + + pChannel->imm.type = NV_EVO_IMM_CHANNEL_PIO; + pChannel->imm.u.pio = pPio; + + params.channelInstance = instance; + + if (nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + handle, + class, + ¶ms) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate immediate channel %d", instance); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + return FALSE; + } + + pPio->handle = handle; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + void *pImm = NULL; + + if (nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pPio->handle, + 0, + mapSize, + &pImm, + 0) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to map immediate channel %d/%d", + sd, instance); + return FALSE; + } + + pPio->control[sd] = pImm; + } + + return TRUE; +} + +static NvBool +AllocImmediateChannelDma(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 immClass) +{ + NVEvoChannelPtr pImmChannel = RmAllocEvoChannel( + pDevEvo, + DRF_DEF64(_EVO, _CHANNEL_MASK, _WINDOW_IMM, _ENABLE), + pChannel->instance, immClass); + + if (!pImmChannel) { + return FALSE; + } + + pChannel->imm.type = NV_EVO_IMM_CHANNEL_DMA; + pChannel->imm.u.dma = pImmChannel; + + return TRUE; +} + +NvBool nvRMAllocateBaseChannels(NVDevEvoPtr pDevEvo) +{ + int i; + NvU32 baseClass = 0; + NvU32 head; + + static const NvU32 baseChannelDmaClasses[] = { + NV927C_BASE_CHANNEL_DMA, + }; + + for (i = 0; i < ARRAY_LEN(baseChannelDmaClasses); i++) { + if (nvRmEvoClassListCheck(pDevEvo, baseChannelDmaClasses[i])) { + baseClass = baseChannelDmaClasses[i]; + break; + } + } + + if (!baseClass) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported base display class"); + return FALSE; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + pDevEvo->base[head] = RmAllocEvoChannel( + pDevEvo, + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE), + head, baseClass); + + if (!pDevEvo->base[head]) { + return FALSE; + } + } + + return TRUE; +} + +NvBool nvRMAllocateOverlayChannels(NVDevEvoPtr pDevEvo) +{ + NvU32 immMapSize; + NvU32 head; + + if (!nvRmEvoClassListCheck(pDevEvo, + NV917E_OVERLAY_CHANNEL_DMA)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unsupported overlay display class"); + return FALSE; + } + + nvAssert(nvRmEvoClassListCheck(pDevEvo, NV917B_OVERLAY_IMM_CHANNEL_PIO)); + + /* + * EvoSetImmPointOut91() will interpret the PIO mapping as a pointer + * to GK104DispOverlayImmControlPio and access the SetPointOut and + * Update fields, which is safe as long as SetPointOut and Update are + * at consistent offsets. + */ + nvAssert(offsetof(GK104DispOverlayImmControlPio, SetPointsOut) == + NV917B_SET_POINTS_OUT(NVKMS_LEFT)); + nvAssert(offsetof(GK104DispOverlayImmControlPio, Update) == + NV917B_UPDATE); + immMapSize = + NV_MAX(NV917B_SET_POINTS_OUT(NVKMS_LEFT), NV917B_UPDATE) + sizeof(NvV32); + + for (head = 0; head < pDevEvo->numHeads; head++) { + pDevEvo->overlay[head] = RmAllocEvoChannel( + pDevEvo, + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE), + head, NV917E_OVERLAY_CHANNEL_DMA); + + if (!pDevEvo->overlay[head]) { + return FALSE; + } + + if (!AllocImmediateChannelPio(pDevEvo, pDevEvo->overlay[head], + NV917B_OVERLAY_IMM_CHANNEL_PIO, head, immMapSize)) { + return FALSE; + } + } + + return TRUE; +} + +/*! + * This allocates a syncpt per channel. This syncpt is dedicated + * to this channel. As NVKMS only supports syncpoints for SOC devices, + * in which there's only one device/sub-device/disp, sd can be 0. + */ +static NvBool AllocSyncpt(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel, + NVEvoSyncpt *pEvoSyncptOut) +{ + NvU32 hSyncptCtxDma, hSyncpt, id; + NvKmsSyncPtOpParams params = { }; + NvBool result; + + if (!pDevEvo->supportsSyncpts) { + return FALSE; + } + + /*! Set syncpt id to invalid to avoid un-intended Free */ + pEvoSyncptOut->id = NVKMS_SYNCPT_ID_INVALID; + + /* + * HW engine on Orin is called HOST1X, all syncpts are in internal RAM of + * HOST1X. + * OP_ALLOC calls into HOST1X driver and allocs a syncpt resource. + */ + params.alloc.syncpt_name = "nvkms-fence"; + result = nvkms_syncpt_op(NVKMS_SYNCPT_OP_ALLOC, ¶ms); + if (!result) { + return FALSE; + } + id = params.alloc.id; + + /* Post syncpt max val is tracked locally. Init the value here. */ + params.read_minval.id = id; + result = nvkms_syncpt_op(NVKMS_SYNCPT_OP_READ_MINVAL, ¶ms); + if (!result) { + goto failed; + } + + result = nvRmEvoAllocAndBindSyncpt(pDevEvo, pChannel, id, + &hSyncpt, &hSyncptCtxDma); + if (!result) { + goto failed; + } + + /*! Populate syncpt values to return. */ + pEvoSyncptOut->id = id; + pEvoSyncptOut->hCtxDma = hSyncptCtxDma; + pEvoSyncptOut->hSyncpt = hSyncpt; + pEvoSyncptOut->channelMask = pChannel->channelMask; + pEvoSyncptOut->syncptMaxVal = params.read_minval.minval; + + return TRUE; + +failed: + /*! put back syncpt as operation failed */ + params.put.id = id; + nvkms_syncpt_op(NVKMS_SYNCPT_OP_PUT, ¶ms); + return FALSE; +} + +static NvBool AllocPostSyncptPerChannel(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel) +{ + if (!pDevEvo->supportsSyncpts) { + return TRUE; + } + + return AllocSyncpt(pDevEvo, pChannel, &pChannel->postSyncpt); +} + +NvBool nvRMAllocateWindowChannels(NVDevEvoPtr pDevEvo) +{ + int index; + NvU32 window; + + static const struct { + NvU32 windowClass; + NvU32 immClass; + } windowChannelClasses[] = { + { NVC67E_WINDOW_CHANNEL_DMA, + NVC67B_WINDOW_IMM_CHANNEL_DMA }, + { NVC57E_WINDOW_CHANNEL_DMA, + NVC57B_WINDOW_IMM_CHANNEL_DMA }, + { NVC37E_WINDOW_CHANNEL_DMA, + NVC37B_WINDOW_IMM_CHANNEL_DMA }, + }, *c = NULL; + + for (index = 0; index < ARRAY_LEN(windowChannelClasses); index++) { + if (nvRmEvoClassListCheck(pDevEvo, + windowChannelClasses[index].windowClass)) { + + c = &windowChannelClasses[index]; + + nvAssert(nvRmEvoClassListCheck(pDevEvo, c->immClass)); + break; + } + } + + if (index >= ARRAY_LEN(windowChannelClasses)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported window display class"); + return FALSE; + } + + nvAssert(pDevEvo->numWindows <= ARRAY_LEN(pDevEvo->window)); + for (window = 0; window < pDevEvo->numWindows; window++) { + pDevEvo->window[window] = RmAllocEvoChannel( + pDevEvo, + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE), + window, c->windowClass); + + if (!pDevEvo->window[window]) { + return FALSE; + } + + if (!AllocImmediateChannelDma(pDevEvo, pDevEvo->window[window], + c->immClass)) { + return FALSE; + } + + if (!AllocPostSyncptPerChannel(pDevEvo, + pDevEvo->window[window])) { + return FALSE; + } + } + + return TRUE; +} + +static void EvoFreeCoreChannel(NVDevEvoRec *pDevEvo, NVEvoChannel *pChannel) +{ + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 ret; + + if (!pDevEvo->pSubDevices[sd]->pCoreDma) { + continue; + } + + ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pChannel->pb.channel_handle, + pDevEvo->pSubDevices[sd]->pCoreDma, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug( + pDevEvo, + EVO_LOG_ERROR, + "Failed to unmap NVDisplay core channel memory mapping for ARMed values"); + } + pDevEvo->pSubDevices[sd]->pCoreDma = NULL; + } + + RmFreeEvoChannel(pDevEvo, pChannel); +} + +static NVEvoChannel* EvoAllocateCoreChannel(NVDevEvoRec *pDevEvo) +{ + NVEvoChannel *pChannel; + NvU32 sd; + + pChannel = + RmAllocEvoChannel(pDevEvo, + DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE), + 0, + pDevEvo->coreChannelDma.coreChannelClass); + + if (pChannel == NULL) { + goto failed; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pChannel->pb.channel_handle, + pDevEvo->coreChannelDma.dmaArmedOffset, + pDevEvo->coreChannelDma.dmaArmedSize, + (void**)&pDevEvo->pSubDevices[sd]->pCoreDma, + DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev( + pDevEvo, + EVO_LOG_ERROR, + "Core channel memory mapping for ARMed values failed: 0x%x (%s)", + ret, nvstatusToString(ret)); + goto failed; + } + } + + return pChannel; + +failed: + if (pChannel != NULL) { + EvoFreeCoreChannel(pDevEvo, pChannel); + } + return NULL; +} + +/* Pre-allocate the vblank syncpts, store in NVDispHeadStateEvoRec. */ +static void AllocCoreRGSyncpts(NVDevEvoPtr pDevEvo) +{ + + NVDispEvoPtr pDispEvo = NULL; + NvU32 syncptIdx = 0; + + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + return; + } + + /* If Syncpts are supported, we're on Orin, which only has one display. */ + nvAssert(pDevEvo->nDispEvo == 1); + pDispEvo = pDevEvo->pDispEvo[0]; + + /* Initialize all heads' vblank sync object counts to zero. */ + for (int i = 0; i < pDevEvo->numHeads; i++) { + pDispEvo->headState[i].numVblankSyncObjectsCreated = 0; + } + + /* For each core RG syncpt index: */ + for (syncptIdx = 0; syncptIdx < NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD; + syncptIdx++) { + /* For each Head: */ + for (int i = 0; i < pDevEvo->numHeads; i++) { + NvBool result = FALSE; + NVDispHeadStateEvoPtr pHeadState = &pDispEvo->headState[i]; + + result = + AllocSyncpt(pDevEvo, pDevEvo->core, + &pHeadState->vblankSyncObjects[syncptIdx].evoSyncpt); + if (!result) { + /* + * Stop trying to allocate more syncpts if none are + * available. + */ + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + "Failed to allocate Core RG Syncpoint at index %d " + "on Head %d.", syncptIdx, i); + return; + } + + /* Populate the index of the syncpt in the NVVblankSyncObjectRec. */ + pHeadState->vblankSyncObjects[syncptIdx].index = syncptIdx; + /* Update the count. */ + pHeadState->numVblankSyncObjectsCreated = syncptIdx + 1; + } + } +} + +NvBool nvRMSetupEvoCoreChannel(NVDevEvoPtr pDevEvo) +{ + NvU32 sd; + + pDevEvo->core = EvoAllocateCoreChannel(pDevEvo); + if (!pDevEvo->core) { + return FALSE; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + // Bind the core notifier ctxDma + NvU32 ret = + nvRmEvoBindDispContextDMA(pDevEvo, pDevEvo->core, + pDevEvo->core->notifiersDma[sd].ctxHandle); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to bind display engine notify context DMA: 0x%x (%s)", + ret, nvstatusToString(ret)); + nvRMFreeEvoCoreChannel(pDevEvo); + return FALSE; + } + } + + AllocCoreRGSyncpts(pDevEvo); + + nvInitEvoSubDevMask(pDevEvo); + + /* + * XXX NVKMS TODO: Enable core channel event generation; see bug + * 1671139. + */ + + // Query the VBIOS head assignments. Note that this has to happen after the + // core channel is allocated or else RM will return incorrect information + // about dynamic display IDs it allocates for the boot display on DP MST + // devices. + GetVbiosHeadAssignment(pDevEvo); + + return TRUE; +} + +void nvRMFreeBaseChannels(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + RmFreeEvoChannel(pDevEvo, pDevEvo->base[head]); + pDevEvo->base[head] = NULL; + } +} + +void nvRMFreeOverlayChannels(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + RmFreeEvoChannel(pDevEvo, pDevEvo->overlay[head]); + pDevEvo->overlay[head] = NULL; + } +} + +void nvRMFreeWindowChannels(NVDevEvoPtr pDevEvo) +{ + NvU32 window; + + for (window = 0; window < pDevEvo->numWindows; window++) { + nvRmEvoFreeSyncpt(pDevEvo, &pDevEvo->window[window]->postSyncpt); + RmFreeEvoChannel(pDevEvo, pDevEvo->window[window]); + pDevEvo->window[window] = NULL; + } +} + +/* Frees the Core RG Syncpts. */ +static void FreeCoreRGSyncpts(NVDevEvoPtr pDevEvo) +{ + + NVDispEvoPtr pDispEvo = NULL; + + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + return; + } + + /* If Syncpts are supported, we're on Orin, which only has one display. */ + nvAssert(pDevEvo->nDispEvo == 1); + pDispEvo = pDevEvo->pDispEvo[0]; + /* For each Head: */ + for (int i = 0; i < pDevEvo->numHeads; i++) { + /* Free all core RG syncpts. */ + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[i]; + for (int j = 0; j < pHeadState->numVblankSyncObjectsCreated; j++) { + nvRmEvoFreeSyncpt(pDevEvo, + &pHeadState->vblankSyncObjects[j].evoSyncpt); + } + pHeadState->numVblankSyncObjectsCreated = 0; + } +} + +void nvRMFreeEvoCoreChannel(NVDevEvoPtr pDevEvo) +{ + FreeCoreRGSyncpts(pDevEvo); + + if (pDevEvo->core != NULL) { + EvoFreeCoreChannel(pDevEvo, pDevEvo->core); + pDevEvo->core = NULL; + } +} + +/* Poll for an EVO channel on a particular subdevice to process all its methods */ +static NvBool SyncOneEvoChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvU32 errorToken) +{ + NvBool isMethodPending; + NvU64 startTime = 0; + const NvU32 timeout = 2000000; // microseconds + + do { + if (!pDevEvo->hal->IsChannelMethodPending(pDevEvo, pChan, + sd, &isMethodPending)) { + return FALSE; + } + + if (!isMethodPending) { + break; + } + + if (!nvIsEmulationEvo(pDevEvo)) { + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Idling display engine timed out: 0x%08x:%d:%d:%d", + pChan->hwclass, pChan->instance, + sd, errorToken); + return FALSE; + } + } + + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + +/* Sync an EVO channel on all subdevices */ +NvBool nvRMSyncEvoChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 errorToken) +{ + NvBool ret = TRUE; + + if (pChannel) { + NvU32 sd; + + nvDmaKickoffEvo(pChannel); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!SyncOneEvoChannel(pDevEvo, pChannel, sd, errorToken)) { + ret = FALSE; + } + } + } + + return ret; +} + + +/* + * Wait for the requested base channel to be idle (no methods pending), and + * call STOP_BASE if the wait times out. + * + * stoppedBase will be TRUE if calling STOP_BASE was necessary and + * successful. + */ +NvBool nvRMIdleBaseChannel(NVDevEvoPtr pDevEvo, NvU32 head, NvU32 sd, + NvBool *stoppedBase) +{ + NVEvoChannelPtr pMainLayerChannel = + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER]; + NvU64 startTime = 0; + NvBool idleTimedOut = FALSE; + const NvU32 timeout = 2000000; // 2 seconds + NvBool isMethodPending = TRUE; + NvBool ret = TRUE; + + *stoppedBase = FALSE; + + do { + if (!pDevEvo->hal->IsChannelMethodPending(pDevEvo, + pMainLayerChannel, + sd, + &isMethodPending)) { + break; + } + + if (!isMethodPending) { + break; + } + + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + idleTimedOut = TRUE; + break; + } + + nvkms_yield(); + + } while (TRUE); + + if (idleTimedOut) { + NVEvoIdleChannelState idleChannelState = { }; + + idleChannelState.subdev[sd].channelMask |= pMainLayerChannel->channelMask; + ret = pDevEvo->hal->ForceIdleSatelliteChannel(pDevEvo, &idleChannelState); + + *stoppedBase = ret; + } + + return ret; +} + + +NvBool nvRmEvoClassListCheck(const NVDevEvoRec *pDevEvo, NvU32 classID) +{ + const NvU32 *classes = pDevEvo->supportedClasses; + + int i; + + nvAssert(pDevEvo->numClasses > 0); + + for (i = 0; i < pDevEvo->numClasses; i++) { + if (classes[i] == classID) { + return TRUE; + } + } + + return FALSE; +} + +/*! + * This API used to register syncpt object with RM. + * It involves -> + * 1. Allocate a new NV01_MEMORY_SYNCPOINT syncpt object. + * 2. Allocate a new ctxdma descriptor for the syncpt object. + * 3. Bind the ctxdma entry to the channel. + */ +NvBool nvRmEvoAllocAndBindSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel, + NvU32 id, + NvU32 *pSyncptHandle, + NvU32 *pSyncptCtxDmaHandle) +{ + NvU32 ret = FALSE; + + NvU32 hSyncpt; + NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS syncptAllocParams = {0}; + + NvU32 hSyncptCtxDma; + NV_CONTEXT_DMA_ALLOCATION_PARAMS ctxDmaParams = {0}; + + /*! Alloc SYNC Object */ + syncptAllocParams.syncpointId = id; + + hSyncpt = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + if (hSyncpt == 0) { + goto skipEverythingAndFail; + } + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + hSyncpt, + NV01_MEMORY_SYNCPOINT, + &syncptAllocParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to allocate syncpt object"); + goto cleanHandleAndFail; + } + + /*! Alloc Context DMA descriptor for syncpt object */ + ctxDmaParams.hMemory = hSyncpt; + ctxDmaParams.flags = DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE); + ctxDmaParams.offset = 0; + ctxDmaParams.limit = 65535; /* 64K-1 */ + + hSyncptCtxDma = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + if (hSyncptCtxDma == 0) { + goto cleanSyncptHandleAndFail; + } + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + hSyncptCtxDma, + NV01_CONTEXT_DMA, + &ctxDmaParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to allocate context dma object"); + goto cleanCtxdmaSyncptHandleAndFail; + } + + /*! Bind Context DMA to syncpt Object */ + ret = nvRmEvoBindDispContextDMA(pDevEvo, pChannel, hSyncptCtxDma); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to bind context dma object"); + goto cleanEverythingAndFail; + } + + *pSyncptHandle = hSyncpt; + *pSyncptCtxDmaHandle = hSyncptCtxDma; + + return TRUE; + +cleanEverythingAndFail: + nvRmApiFree(nvEvoGlobal.clientHandle, pDevEvo->deviceHandle, hSyncptCtxDma); + +cleanCtxdmaSyncptHandleAndFail: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, hSyncptCtxDma); + +cleanSyncptHandleAndFail: + nvRmApiFree(nvEvoGlobal.clientHandle, pDevEvo->deviceHandle, hSyncpt); + +cleanHandleAndFail: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, hSyncpt); + +skipEverythingAndFail: + return FALSE; +} + +static void FreeSyncptHandle( + NVDevEvoRec *pDevEvo, + NVEvoSyncpt *pSyncpt) +{ + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSyncpt->hSyncpt); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pSyncpt->hSyncpt); + pSyncpt->hSyncpt = 0; + + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSyncpt->hCtxDma); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pSyncpt->hCtxDma); + pSyncpt->hCtxDma = 0; +} + +/*! + * This API used to unregister syncpt object with given channel. + * It searches global table, and when finds that for given channel, syncpt + * is registered, then frees it. + */ +void nvRmEvoFreePreSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel) +{ + NvU32 i; + NvBool isChannelIdle = NV_FALSE; + + if (pChannel == NULL) { + return; + } + + if (!pDevEvo->supportsSyncpts) { + return; + } + + if (pChannel->channelMask == 0) { + return; + } + + pDevEvo->hal->IsChannelIdle( + pDevEvo, pChannel, 0, &isChannelIdle); + + if (isChannelIdle == NV_FALSE) { + return; + } + + /*! Find pre-syncpt and free it */ + for (i = 0; i < NV_SYNCPT_GLOBAL_TABLE_LENGTH; i++) { + + pDevEvo->preSyncptTable[i].channelMask &= ~pChannel->channelMask; + if (pDevEvo->preSyncptTable[i].channelMask == 0 && + pDevEvo->preSyncptTable[i].hCtxDma != 0) { + + /*! Free handles */ + FreeSyncptHandle(pDevEvo, &pDevEvo->preSyncptTable[i]); + } + } +} + +static NvBool GarbageCollectSyncptHelperOneChannel( + NVDevEvoRec *pDevEvo, + NvU32 sd, + NVEvoChannel *pChannel, + NVEvoSyncpt *pSyncpt, + NVEvoChannelMask *pIdledChannelMask) +{ + NvBool isChannelIdle = FALSE; + + if ((pChannel->channelMask & pSyncpt->channelMask) == 0) { + return TRUE; + } + + if ((*pIdledChannelMask) & pChannel->channelMask) { + goto done; + } + + /*! Check whether channel is idle. */ + pDevEvo->hal->IsChannelIdle(pDevEvo, pChannel, sd, &isChannelIdle); + + if (!isChannelIdle) { + return FALSE; + } + + /*! record idle channel mask to use in next check */ + *pIdledChannelMask |= pChannel->channelMask; + +done: + pSyncpt->channelMask &= ~pChannel->channelMask; + return TRUE; +} + +static NvBool GarbageCollectSyncptHelperOneSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoSyncpt *pSyncpt, + NVEvoChannelMask *pIdledChannelMask) +{ + NvBool ret = TRUE; + NvU32 head, sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + /*! + * If a given channel isn't idle, continue to check if this syncpt + * is used on other channels which may be idle. + */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!GarbageCollectSyncptHelperOneChannel( + pDevEvo, + sd, + pDevEvo->head[head].layer[layer], + pSyncpt, + &pIdledChannelMask[sd])) { + ret = FALSE; + } + } + } + } + + return ret; +} + +/*! + * This API is used to unregister the given syncpt object. + */ +void nvRmEvoFreeSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoSyncpt *pEvoSyncpt) +{ + if ((pEvoSyncpt == NULL) || !pDevEvo->supportsSyncpts || + (pEvoSyncpt->id == NVKMS_SYNCPT_ID_INVALID)) { + return; + } + + /*! Put reference of syncptid from nvhost */ + NvKmsSyncPtOpParams params = { }; + params.put.id = pEvoSyncpt->id; + nvkms_syncpt_op(NVKMS_SYNCPT_OP_PUT, ¶ms); + + /*! Free handles */ + FreeSyncptHandle(pDevEvo, pEvoSyncpt); +} + +/*! + * This API try to find free syncpt and then unregisters it. + * It searches global table, and when finds that all channels using this + * syncpt are idle then frees it. It makes sure that syncpt is not part + * of current flip. + */ +NvBool nvRmGarbageCollectSyncpts( + NVDevEvoRec *pDevEvo) +{ + NvU32 i; + NvBool freedSyncpt = FALSE; + NVEvoChannelMask idledChannelMask[NVKMS_MAX_SUBDEVICES] = { 0 }; + + if (!pDevEvo->supportsSyncpts) { + return FALSE; + } + + for (i = 0; i < NV_SYNCPT_GLOBAL_TABLE_LENGTH; i++) { + + NvBool allLayersIdle = NV_TRUE; + + if (pDevEvo->pAllSyncptUsedInCurrentFlip != NULL) { + if (pDevEvo->pAllSyncptUsedInCurrentFlip[i]) { + /*! syncpt is part of current flip, so skip it */ + continue; + } + } + + if (pDevEvo->preSyncptTable[i].hCtxDma == 0) { + /*! syncpt isn't registered, so skip it */ + continue; + } + + allLayersIdle = GarbageCollectSyncptHelperOneSyncpt( + pDevEvo, + &pDevEvo->preSyncptTable[i], + idledChannelMask); + + if (allLayersIdle) { + /*! Free handles */ + FreeSyncptHandle(pDevEvo, &pDevEvo->preSyncptTable[i]); + freedSyncpt = TRUE; + } + } + + return freedSyncpt; +} + +NvU32 nvRmEvoBindDispContextDMA( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 hCtxDma) +{ + NV0002_CTRL_BIND_CONTEXTDMA_PARAMS params = { }; + NvU32 ret; + NvBool retryOnlyOnce = TRUE; + + params.hChannel = pChannel->pb.channel_handle; + +retryOnce: + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + hCtxDma, + NV0002_CTRL_CMD_BIND_CONTEXTDMA, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + /*! + * syncpts (lazily freed) occupy space in the disp ctxDma hash + * table, and therefore may cause bind ctxDma to fail. + * Free any unused syncpts and try again. + */ + if (retryOnlyOnce) { + /*! try to free syncpt only once */ + if (nvRmGarbageCollectSyncpts(pDevEvo)) { + retryOnlyOnce = FALSE; + goto retryOnce; + } + } + } + return ret; +} + + +NvU32 nvRmEvoAllocateAndBindDispContextDMA( + NVDevEvoPtr pDevEvo, + NvU32 hMemory, + const enum NvKmsSurfaceMemoryLayout layout, + NvU64 limit) +{ + NV_CONTEXT_DMA_ALLOCATION_PARAMS ctxdmaParams = { }; + NvU32 hDispCtxDma; + NvU32 flags = DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE); + NvU32 ret; + int h; + + /* each surface to be displayed needs its own ctx dma. */ + nvAssert(pDevEvo->displayHandle != 0); + + nvAssert(pDevEvo->core); + nvAssert(pDevEvo->core->pb.channel_handle); + + nvAssert(hMemory); + nvAssert(limit); + + switch (layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + flags |= DRF_DEF(OS03, _FLAGS, _PTE_KIND, _BL); + break; + case NvKmsSurfaceMemoryLayoutPitch: + flags |= DRF_DEF(OS03, _FLAGS, _PTE_KIND, _PITCH); + break; + } + + hDispCtxDma = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + ctxdmaParams.hMemory = hMemory; + ctxdmaParams.flags = flags; + ctxdmaParams.offset = 0; + ctxdmaParams.limit = limit; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + hDispCtxDma, + NV01_CONTEXT_DMA, + &ctxdmaParams); + + if (ret != NVOS_STATUS_SUCCESS) { + goto cleanup_this_handle_and_fail; + } + + ret = nvRmEvoBindDispContextDMA(pDevEvo, pDevEvo->core, hDispCtxDma); + + if (ret != NVOS_STATUS_SUCCESS) { + goto free_this_handle_and_fail; + } + + for (h = 0; h < pDevEvo->numHeads; h++) { + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[h].numLayers; layer++) { + if (pDevEvo->head[h].layer[layer]) { + nvAssert(pDevEvo->head[h].layer[layer]->pb.channel_handle); + + ret = nvRmEvoBindDispContextDMA(pDevEvo, + pDevEvo->head[h].layer[layer], + hDispCtxDma); + + if (ret != NVOS_STATUS_SUCCESS) { + goto free_this_handle_and_fail; + } + } + } + } + + return hDispCtxDma; + +free_this_handle_and_fail: + + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, hDispCtxDma); + + /* Fall through */ +cleanup_this_handle_and_fail: + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, hDispCtxDma); + + return 0; +} + +void nvRmEvoFreeDispContextDMA(NVDevEvoPtr pDevEvo, + NvU32 *hDispCtxDma) +{ + if (*hDispCtxDma) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, *hDispCtxDma); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, *hDispCtxDma); + *hDispCtxDma = 0; + } +} + +void nvRmEvoUnMapVideoMemory(NVDevEvoPtr pDevEvo, NvU32 memoryHandle, + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES]) +{ + unsigned int sd; + NvU32 ret; + + if (memoryHandle == 0) { + return; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDeviceAddress[sd] != NULL) { + ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + memoryHandle, + subDeviceAddress[sd], + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"UnmapMemory() failed"); + } + } + + subDeviceAddress[sd] = NULL; + } +} + +NvBool nvRmEvoMapVideoMemory(NVDevEvoPtr pDevEvo, + NvU32 memoryHandle, NvU64 size, + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES], + NvU32 subDeviceMask) +{ + NvU32 ret; + + unsigned int sd; + + nvkms_memset(subDeviceAddress, 0, sizeof(void*) * NVKMS_MAX_SUBDEVICES); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + void *address = NULL; + + if (((1 << sd) & subDeviceMask) == 0) { + continue; + } + + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + memoryHandle, + 0, + size, + &address, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvRmEvoUnMapVideoMemory(pDevEvo, memoryHandle, subDeviceAddress); + return FALSE; + } + subDeviceAddress[sd] = address; + } + return TRUE; +} + +static NvBool GetClassList(NVDevEvoPtr pDevEvo) +{ + NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS classListParams = { 0 }; + NvU32 ret; + + classListParams.numClasses = 0; + classListParams.classList = NvP64_NULL; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_GET_CLASSLIST, + &classListParams, sizeof(classListParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + pDevEvo->supportedClasses = + nvCalloc(classListParams.numClasses, sizeof(NvU32)); + + if (pDevEvo->supportedClasses == NULL) { + return FALSE; + } + + classListParams.classList = NV_PTR_TO_NvP64(pDevEvo->supportedClasses); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_GET_CLASSLIST, + &classListParams, sizeof(classListParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvFree(pDevEvo->supportedClasses); + pDevEvo->supportedClasses = NULL; + return FALSE; + } + + pDevEvo->numClasses = classListParams.numClasses; + + return TRUE; +} + +static NvBool GetEngineListOneSubDevice(NVDevEvoPtr pDevEvo, NvU32 sd) +{ + NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS engineListParams = { 0 }; + NvU32 ret; + NVSubDeviceEvoPtr pSubDevice = pDevEvo->pSubDevices[sd]; + size_t length; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pSubDevice->handle, + NV2080_CTRL_CMD_GPU_GET_ENGINES_V2, + &engineListParams, sizeof(engineListParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + if (engineListParams.engineCount == 0) { + return TRUE; + } + + length = engineListParams.engineCount * sizeof(NvU32); + + pSubDevice->supportedEngines = nvAlloc(length); + + if (pSubDevice->supportedEngines == NULL) { + return FALSE; + } + + nvkms_memcpy(pSubDevice->supportedEngines, + engineListParams.engineList, + length); + pSubDevice->numEngines = engineListParams.engineCount; + + return TRUE; +} + +static NvBool GetEngineList(NVDevEvoPtr pDevEvo) +{ + int sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!GetEngineListOneSubDevice(pDevEvo, sd)) { + return FALSE; + } + } + + return TRUE; +} + +static void FreeSubDevice(NVDevEvoPtr pDevEvo, NVSubDeviceEvoPtr pSubDevice) +{ + if (pSubDevice == NULL) { + return; + } + + if (pSubDevice->handle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSubDevice->handle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSubDevice->handle); + } + + if (pSubDevice->gpuString[0] != '\0') { + nvEvoLogDebug(EVO_LOG_INFO, "Freed %s", pSubDevice->gpuString); + } + + nvFree(pSubDevice->supportedEngines); + + nvFree(pSubDevice); +} + +static NVSubDeviceEvoPtr AllocSubDevice(NVDevEvoPtr pDevEvo, const NvU32 sd) +{ + NV2080_ALLOC_PARAMETERS subdevAllocParams = { 0 }; + NV2080_CTRL_GPU_GET_ID_PARAMS getIdParams = { 0 }; + NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *pGidParams = NULL; + NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS pciInfoParams = { 0 }; + NvU32 ret; + const char *uuid; + + NVSubDeviceEvoPtr pSubDevice = nvCalloc(1, sizeof(*pSubDevice)); + + if (pSubDevice == NULL) { + goto failure; + } + + pSubDevice->handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + subdevAllocParams.subDeviceId = sd; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSubDevice->handle, + NV20_SUBDEVICE_0, + &subdevAllocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize subDevice"); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSubDevice->handle); + pSubDevice->handle = 0; + goto failure; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pSubDevice->handle, + NV2080_CTRL_CMD_GPU_GET_ID, + &getIdParams, + sizeof(getIdParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to identify GPU"); + goto failure; + } + + pSubDevice->gpuId = getIdParams.gpuId; + + /* Query the UUID for the gpuString. */ + + pGidParams = nvCalloc(1, sizeof(*pGidParams)); + + if (pGidParams == NULL) { + goto failure; + } + + pGidParams->flags = + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _ASCII) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pSubDevice->handle, + NV2080_CTRL_CMD_GPU_GET_GID_INFO, + pGidParams, + sizeof(*pGidParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + /* If the query failed, make sure the UUID is cleared out. */ + nvkms_memset(pGidParams, 0, sizeof(*pGidParams)); + } + + /* Query the PCI bus address for the gpuString. */ + + pciInfoParams.gpuId = pSubDevice->gpuId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_PCI_INFO, + &pciInfoParams, sizeof(pciInfoParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + /* If the query failed, make sure the PCI bus address is cleared out. */ + nvkms_memset(&pciInfoParams, 0, sizeof(pciInfoParams)); + } + + pSubDevice->gpuLogIndex = nvGetGpuLogIndex(); + + /* + * Create the gpuString, using this example format: + * GPU:0 (GPU-af2422f5-2719-29de-567f-ac899cf458c4) @ PCI:0000:01:00.0 + */ + if ((pGidParams->data[0] == '\0') || (pGidParams->length == 0)) { + uuid = ""; + } else { + uuid = (const char *) pGidParams->data; + } + + nvkms_snprintf(pSubDevice->gpuString, sizeof(pSubDevice->gpuString), + "GPU:%d (%s) @ PCI:%04x:%02x:%02x.0", + pSubDevice->gpuLogIndex, uuid, + pciInfoParams.domain, + pciInfoParams.bus, + pciInfoParams.slot); + + pSubDevice->gpuString[sizeof(pSubDevice->gpuString) - 1] = '\0'; + + nvEvoLogDebug(EVO_LOG_INFO, "Allocated %s", pSubDevice->gpuString); + nvFree(pGidParams); + + return pSubDevice; + +failure: + FreeSubDevice(pDevEvo, pSubDevice); + nvFree(pGidParams); + + return NULL; +} + +static void CloseDevice(NVDevEvoPtr pDevEvo) +{ + NvU32 i; + + for (i = 0; i < ARRAY_LEN(pDevEvo->openedGpuIds); i++) { + const NvU32 gpuId = pDevEvo->openedGpuIds[i]; + + if (gpuId == NV0000_CTRL_GPU_INVALID_ID) { + break; + } + + nvkms_close_gpu(gpuId); + pDevEvo->openedGpuIds[i] = NV0000_CTRL_GPU_INVALID_ID; + } +} + +static NvBool OpenTegraDevice(NVDevEvoPtr pDevEvo) +{ + NV0000_CTRL_GPU_GET_ID_INFO_PARAMS params = { 0 }; + nv_gpu_info_t *gpu_info = NULL; + NvU32 ret, gpu_count = 0; + + nvAssert(pDevEvo->deviceId == NVKMS_DEVICE_ID_TEGRA); + + gpu_info = nvAlloc(NV_MAX_GPUS * sizeof(*gpu_info)); + if (gpu_info == NULL) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate GPU ids arrays"); + goto fail; + } + + gpu_count = nvkms_enumerate_gpus(gpu_info); + if (gpu_count == 0) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "No NVIDIA GPUs found"); + goto fail; + } + + if (gpu_count != 1) { + // XXX If the system has both Tegra/iGPU and dGPU, it is not + // guaranteed to find the Tegra, so fail. + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "More than one NVIDIA GPU found " + "in a Tegra configuration where only Tegra is expected."); + goto fail; + } + + if (!nvkms_open_gpu(gpu_info[0].gpu_id)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to open GPU"); + goto fail; + } + + pDevEvo->openedGpuIds[0] = gpu_info[0].gpu_id; + params.gpuId = gpu_info[0].gpu_id; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_ID_INFO, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to find GPU ID"); + goto fail; + } + + pDevEvo->deviceId = params.deviceInstance; + + nvFree(gpu_info); + return TRUE; + +fail: + nvFree(gpu_info); + CloseDevice(pDevEvo); + return FALSE; +} + +static NvBool OpenDevice(NVDevEvoPtr pDevEvo) +{ + NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS idParams = { }; + NvU32 ret, i, gpuIdIndex = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS, + &idParams, sizeof(idParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to query attached GPUs"); + goto fail; + } + + ct_assert(ARRAY_LEN(pDevEvo->openedGpuIds) >= ARRAY_LEN(idParams.gpuIds)); + + for (i = 0; i < ARRAY_LEN(idParams.gpuIds); i++) { + NV0000_CTRL_GPU_GET_ID_INFO_PARAMS params = { 0 }; + const NvU32 gpuId = idParams.gpuIds[i]; + + if (gpuId == NV0000_CTRL_GPU_INVALID_ID) { + break; + } + + nvAssert(pDevEvo->openedGpuIds[gpuIdIndex] == + NV0000_CTRL_GPU_INVALID_ID); + + params.gpuId = gpuId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_ID_INFO, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to find GPU ID"); + goto fail; + } + + if (pDevEvo->deviceId != params.deviceInstance) { + continue; + } + + if (!nvkms_open_gpu(gpuId)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to open GPU"); + goto fail; + } + + pDevEvo->openedGpuIds[gpuIdIndex++] = gpuId; + } + + return TRUE; + +fail: + CloseDevice(pDevEvo); + return FALSE; +} + +static void NonStallInterruptCallback( + void *arg, + void *pEventDataVoid, + NvU32 hEvent, + NvU32 data, + NV_STATUS status) +{ + /* + * We are called within resman's altstack and locks. Schedule a separate + * callback to execute with the nvkms_lock. + * + * XXX It might be nice to use a lighter-weight lock here to check if any + * requests are pending in any NvKmsDeferredRequestFifo before scheduling + * nvKmsServiceNonStallInterrupt(). + */ + + (void) nvkms_alloc_timer_with_ref_ptr( + nvKmsServiceNonStallInterrupt, /* callback */ + arg, /* argument (this is a ref_ptr to a pDevEvo) */ + 0, /* dataU32 */ + 0); /* usec */ +} + +static void UnregisterNonStallInterruptCallback(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->nonStallInterrupt.handle != 0) { + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS + eventNotificationParams = { 0 }; + + eventNotificationParams.event = NV2080_NOTIFIERS_FIFO_EVENT_MTHD; + eventNotificationParams.action = + NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &eventNotificationParams, + sizeof(eventNotificationParams)); + + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + pDevEvo->nonStallInterrupt.handle); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->nonStallInterrupt.handle); + } + + pDevEvo->nonStallInterrupt.handle = 0; +} + +static NvBool RegisterNonStallInterruptCallback(NVDevEvoPtr pDevEvo) +{ + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS eventNotificationParams = { 0 }; + + pDevEvo->nonStallInterrupt.handle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (!nvRmRegisterCallback(pDevEvo, + &pDevEvo->nonStallInterrupt.callback, + pDevEvo->ref_ptr, + pDevEvo->pSubDevices[0]->handle, + pDevEvo->nonStallInterrupt.handle, + NonStallInterruptCallback, + NV2080_NOTIFIERS_FIFO_EVENT_MTHD | + NV01_EVENT_NONSTALL_INTR)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to register nonstall interrupt callback"); + goto failure_free_handle; + } + + // Setup event notification + eventNotificationParams.event = NV2080_NOTIFIERS_FIFO_EVENT_MTHD; + eventNotificationParams.action = + NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &eventNotificationParams, + sizeof(eventNotificationParams)) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set nonstall interrupt notification"); + goto failure_free_callback_and_handle; + } + + return TRUE; + +failure_free_callback_and_handle: + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + pDevEvo->nonStallInterrupt.handle); +failure_free_handle: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->nonStallInterrupt.handle); + pDevEvo->nonStallInterrupt.handle = 0; + return FALSE; +} + +NvBool nvRmAllocDeviceEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsAllocDeviceRequest *pRequest) +{ + NV0080_ALLOC_PARAMETERS allocParams = { 0 }; + NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS getNumSubDevicesParams = { 0 }; + NvU32 ret, sd; + + if (nvEvoGlobal.clientHandle == 0) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Client handle not initialized"); + goto failure; + } + + /* + * RM deviceIds should be within [0,NV_MAX_DEVICES); check + * that the client provided a value in range, and add one when + * using deviceId as the per-device unique identifier in the + * RM handle allocator: the identifier is expected to be != 0. + */ + + if ((pRequest->deviceId >= NV_MAX_DEVICES) && + (pRequest->deviceId != NVKMS_DEVICE_ID_TEGRA)) { + goto failure; + } + + pDevEvo->dpTimer = nvDPAllocTimer(pDevEvo); + if (!pDevEvo->dpTimer) { + goto failure; + } + + if (!nvInitUnixRmHandleAllocator(&pDevEvo->handleAllocator, + nvEvoGlobal.clientHandle, + pRequest->deviceId + 1)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize handles"); + goto failure; + } + + pDevEvo->deviceHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + pDevEvo->deviceId = pRequest->deviceId; + pDevEvo->sli.mosaic = pRequest->sliMosaic; + + if (pRequest->deviceId == NVKMS_DEVICE_ID_TEGRA) { + /* + * On Tegra, NVKMS client is not desktop RM client, so + * enumerate and open first GPU. + */ + if (!OpenTegraDevice(pDevEvo)) { + goto failure; + } + + pDevEvo->usesTegraDevice = TRUE; + } else if (!OpenDevice(pDevEvo)) { + goto failure; + } + + allocParams.deviceId = pDevEvo->deviceId; + + /* Give NVKMS a private GPU virtual address space. */ + allocParams.hClientShare = nvEvoGlobal.clientHandle; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV01_DEVICE_0, + &allocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize device"); + goto failure; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES, + &getNumSubDevicesParams, + sizeof(getNumSubDevicesParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to determine number of GPUs"); + goto failure; + } + + ct_assert(NVKMS_MAX_SUBDEVICES == NV_MAX_SUBDEVICES); + if ((getNumSubDevicesParams.numSubDevices == 0) || + (getNumSubDevicesParams.numSubDevices > + ARRAY_LEN(pDevEvo->pSubDevices))) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported number of GPUs: %d", + getNumSubDevicesParams.numSubDevices); + goto failure; + } + + pDevEvo->numSubDevices = getNumSubDevicesParams.numSubDevices; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + pDevEvo->pSubDevices[sd] = AllocSubDevice(pDevEvo, sd); + if (pDevEvo->pSubDevices[sd] == NULL) { + goto failure; + } + } + + pDevEvo->gpuLogIndex = pDevEvo->pSubDevices[0]->gpuLogIndex; + + if (!GetClassList(pDevEvo) || !GetEngineList(pDevEvo)) { + goto failure; + } + + if (!RegisterNonStallInterruptCallback(pDevEvo)) { + goto failure; + } + + return TRUE; + +failure: + nvRmFreeDeviceEvo(pDevEvo); + return FALSE; +} + +void nvRmFreeDeviceEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 sd; + + UnregisterNonStallInterruptCallback(pDevEvo); + + nvFree(pDevEvo->supportedClasses); + pDevEvo->supportedClasses = NULL; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + FreeSubDevice(pDevEvo, pDevEvo->pSubDevices[sd]); + pDevEvo->pSubDevices[sd] = NULL; + } + + if (pDevEvo->deviceHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDevEvo->deviceHandle); + pDevEvo->deviceHandle = 0; + } + + nvTearDownUnixRmHandleAllocator(&pDevEvo->handleAllocator); + + nvDPFreeTimer(pDevEvo->dpTimer); + pDevEvo->dpTimer = NULL; + + CloseDevice(pDevEvo); +} + + +/*! + * Determine whether all the dpys in the dpyIdList can be activated together. + * + * \param[in] pDispEvo The disp on which we search for a head. + * \param[in] dpyIdList The connectors to test. + * + * \return Return TRUE if all dpys can be driven simultaneously. + */ +NvBool nvRmIsPossibleToActivateDpyIdList(NVDispEvoPtr pDispEvo, + const NVDpyIdList dpyIdList) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS mapParams = { 0 }; + NvU32 ret = 0; + + /* Trivially accept an empty dpyIdList. */ + + if (nvDpyIdListIsEmpty(dpyIdList)) { + return TRUE; + } + + /* don't even try if EVO isn't initialized (e.g. during a VT switch) */ + + if (!pDevEvo->gpus) { + return FALSE; + } + + /* build a mask of all the displays to use */ + + mapParams.subDeviceInstance = pDispEvo->displayOwner; + + mapParams.displayMask = nvDpyIdListToNvU32(dpyIdList); + + /* ask RM for the head routing */ + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_HEAD_ROUTING_MAP, + &mapParams, + sizeof(mapParams)); + + if ((ret != NVOS_STATUS_SUCCESS) || (mapParams.displayMask == 0)) { + char *dpyIdListStr = nvGetDpyIdListStringEvo(pDispEvo, dpyIdList); + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "The requested configuration of display devices " + "(%s) is not supported on this GPU.", + nvSafeString(dpyIdListStr, "unknown")); + nvFree(dpyIdListStr); + + return FALSE; + } + + /* make sure we got everything we asked for */ + + if (mapParams.displayMask != nvDpyIdListToNvU32(dpyIdList)) { + char *requestedDpyIdListStr; + char *returnedDpyIdListStr; + + requestedDpyIdListStr = + nvGetDpyIdListStringEvo(pDispEvo, dpyIdList); + + returnedDpyIdListStr = + nvGetDpyIdListStringEvo(pDispEvo, + nvNvU32ToDpyIdList(mapParams.displayMask)); + + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "The requested configuration of display devices " + "(%s) is not supported on this GPU; " + "%s is recommended, instead.", + nvSafeString(requestedDpyIdListStr, "unknown"), + nvSafeString(returnedDpyIdListStr, "unknown")); + + nvFree(requestedDpyIdListStr); + nvFree(returnedDpyIdListStr); + + return FALSE; + } + + return TRUE; +} + + +/*! + * Tell the RM to save or restore the console VT state. + * + * \param[in] cmd indicate RM about the action. + * + * \return TRUE on success, FALSE on failure. + */ +NvBool nvRmVTSwitch(NVDevEvoPtr pDevEvo, NvU32 cmd) +{ + NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS params = { 0 }; + NvU32 ret; + + params.cmd = cmd; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + return TRUE; +} + +NvBool nvRmGetVTFBInfo(NVDevEvoPtr pDevEvo) +{ + NvU32 ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_OS_UNIX_VT_GET_FB_INFO, + &pDevEvo->vtFbInfo, sizeof(pDevEvo->vtFbInfo)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + return TRUE; +} + +/*! + * Import the current framebuffer console memory, for later use with NVKMS-based + * console restore. + * + * Note this relies on pDevEvo->fbInfo populated by nvRmVTSwitch(). + * + * There are several cases in which NVKMS cannot perform console restore: + * + * - Anything other than linear frame buffer consoles (i.e., VGA text modes, + * Non-linear or paletted graphical modes, etc). For those, resman cannot + * query the framebuffer dimensions from the kernel, + * NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE returns empty fbInfo + * params, and consequently pDevEvo->fbInfo.width == 0. + * + * - Linear frame buffer console with an unaligned pitch. In this case, + * nvEvoRegisterSurface() will fail: it has to ensure the surface registration + * satisfies the EVO method interface requirement that PITCH surfaces are + * multiples of 256 bytes. Consequently, pDevEvo->fbConsoleSurfaceHandle will + * be 0. + * + * - Depth 8 frame buffer consoles: these are color index, and cannot be + * supported by NVKMS console restore because they require the VGA palette, + * which exists in special RAM in the VGA core, so we can't name it with a + * ctxdma that we can feed into EVO's LUT. The pFbInfo->depth switch below + * will reject depth 8. + */ +void nvRmImportFbConsoleMemory(NVDevEvoPtr pDevEvo) +{ + NvU32 ret; + struct NvKmsRegisterSurfaceParams registration = { }; + const NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pFbInfo = &pDevEvo->vtFbInfo; + NvHandle hMemory; + + nvAssert(pDevEvo->fbConsoleSurfaceHandle == 0); + + if (pFbInfo->width == 0) { + // No console memory to map. + return; + } + + switch (pFbInfo->depth) { + case 15: + registration.request.format = NvKmsSurfaceMemoryFormatX1R5G5B5; + break; + case 16: + registration.request.format = NvKmsSurfaceMemoryFormatR5G6B5; + break; + case 32: + // That's a lie, it's really depth 24. Fall through. + case 24: + registration.request.format = NvKmsSurfaceMemoryFormatX8R8G8B8; + break; + default: + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Unsupported framebuffer console depth %d", + pFbInfo->depth); + return; + } + + hMemory = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + if (hMemory == 0) { + return; + } + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + hMemory, + NV01_MEMORY_FRAMEBUFFER_CONSOLE, + NULL); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Failed to map framebuffer console memory"); + goto done; + } + + registration.request.useFd = FALSE; + registration.request.rmClient = nvEvoGlobal.clientHandle; + registration.request.widthInPixels = pFbInfo->width; + registration.request.heightInPixels = pFbInfo->height; + registration.request.layout = NvKmsSurfaceMemoryLayoutPitch; + + registration.request.planes[0].u.rmObject = hMemory; + registration.request.planes[0].pitch = pFbInfo->pitch; + registration.request.planes[0].rmObjectSizeInBytes = + (NvU64) pFbInfo->height * (NvU64) pFbInfo->pitch; + + nvEvoRegisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, ®istration, + NvHsMapPermissionsNone); + + pDevEvo->fbConsoleSurfaceHandle = registration.reply.surfaceHandle; + + // nvEvoRegisterSurface dups the handle, so we can free the one we just + // imported. + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + hMemory); +done: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, hMemory); +} + +NvBool nvRmQueryDpAuxLog(NVDispEvoRec *pDispEvo, NvS64 *pValue) +{ + *pValue = FALSE; + return TRUE; +} + + +/*! + * Return the GPU's current PTIMER, or 0 if the query fails. + */ +NvU64 nvRmGetGpuTime(NVDevEvoPtr pDevEvo) +{ + const NvU32 sd = 0; + NV2080_CTRL_TIMER_GET_TIME_PARAMS params; + + NvU32 ret; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + NV2080_CTRL_CMD_TIMER_GET_TIME, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDebug(EVO_LOG_ERROR, "Failed to query GPU time, ret = %d", ret); + return 0; + } + + return params.time_nsec; +} + +NvBool nvRmSetGc6Allowed(NVDevEvoPtr pDevEvo, NvBool allowed) +{ + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS params = { }; + NvU32 sd; + + if (allowed == pDevEvo->gc6Allowed) { + return TRUE; + } + + params.action = allowed ? NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC : + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + // XXX This is catastrophic, is there a good way to unravel? + nvEvoLogDevDebug( + pDevEvo, EVO_LOG_ERROR, + "Failed to modify GC6 blocker refcount, sd = %d, ret = %x", + sd, ret); + return FALSE; + } + } + + pDevEvo->gc6Allowed = allowed; + + /* + * If we are just now disallowing GC6, it's possible that we previously + * entered GC6 and invalidated display channel state. Re-initialize it here + * to ensure that future modesets are successful. + */ + if (!allowed && pDevEvo->core) { + NvU32 channelIdx; + + pDevEvo->hal->InitChannel(pDevEvo, pDevEvo->core); + pDevEvo->coreInitMethodsPending = TRUE; + + for (channelIdx = 0; channelIdx < pDevEvo->numHeads; channelIdx++) { + // XXX We should InitChannel() for all per-head channels when coming + // out of GC6. + pDevEvo->hal->InitChannel( + pDevEvo, pDevEvo->head[channelIdx].layer[NVKMS_MAIN_LAYER]); + } + } + + return TRUE; +} + +/*! + * Register an RM callback function for the RG line 1 interrupt. + * + * \param[in] pDispEvo The display on which to allocate the callback + * + * \param[in] head The head on which to allocate the callback + * + * \param[in] pCallback The callback function pointer to be registered + * + * \return Handle to callback object on success, 0 on failure. This same + * handle must be used to unregister the callback. + */ +NvU32 nvRmAddRgLine1Callback( + const NVDispEvoRec *pDispEvo, + NvU32 head, + NV0092_REGISTER_RG_LINE_CALLBACK_FN pCallback) +{ + NV0092_RG_LINE_CALLBACK_ALLOCATION_PARAMETERS rgLineParams = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + NvU32 ret; + + rgLineParams.subDeviceInstance = pDispEvo->displayOwner; + rgLineParams.head = head; + rgLineParams.rgLineNum = 1; + rgLineParams.pCallbkFn = pCallback; + + /* + * This object only takes a single pointer, but we want it to operate + * on a {pDispEvo,head} tuple, but we want to allocate the callback + * in NVKMS using a ref_ptr to allow for the pDispEvo being freed while + * callbacks are outstanding, so we bundle pDispEvo->ref_ptr and head + * into a single value here before passing it to RM, then decouple them + * in the RM callback function before allocating the NVKMS timer callback. + * + * This works because pDispEvo->ref_ptr will never have the lowest 2 + * bits set, and RM doesn't do anything with pCallbkParams aside from + * passing it back to the callback function. + */ + nvAssert(!((NvUPtr)pDispEvo->ref_ptr & head)); + rgLineParams.pCallbkParams = (void*)((NvUPtr)pDispEvo->ref_ptr | head); + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + handle, + NV0092_RG_LINE_CALLBACK, + &rgLineParams); + + if (ret == NVOS_STATUS_SUCCESS) { + return handle; + } else { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to enable RG line interrupt, ret: %d", ret); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + return 0; + } +} + +/*! + * Unregister an RM callback function previously registered with + * nvRmAddRgLine1Callback. + * + * \param[in] pDispEvo The display on which to unregister the + * callback + * + * \param[in] callbackObjectHandle Handle to the previously allocated + * callback object + */ +void nvRmRemoveRgLine1Callback(const NVDispEvoRec *pDispEvo, + NvU32 callbackObjectHandle) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (callbackObjectHandle == 0) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to disable RG line interrupt, obj handle 0"); + return; + } + + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + callbackObjectHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to disable RG line interrupt, ret: %d", ret); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, callbackObjectHandle); +} + +/*! + * Register an RM callback function for the VBlankinterrupt. + * + * \param[in] pDispEvo The display on which to allocate the callback + * + * \param[in] head The head on which to allocate the callback + * + * \param[in] pCallback The callback function pointer to be registered + * + * \return Handle to callback object on success, 0 on failure. This same + * handle must be used to unregister the callback. + */ +NvU32 nvRmAddVBlankCallback( + const NVDispEvoRec *pDispEvo, + NvU32 head, + OSVBLANKCALLBACKPROC pCallback) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NvU32 ret; + NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS params = { + .pProc = pCallback, + .LogicalHead = head, + .pParm1 = pDispEvo->ref_ptr, + + /* + * The callback's second argument is a pointer, which is large enough to + * store the head number. + */ + .pParm2 = (void *)(NvUPtr)head, + }; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + handle, + NV9010_VBLANK_CALLBACK, + ¶ms); + + if (ret == NVOS_STATUS_SUCCESS) { + return handle; + } else { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to enable VBlank callback, ret: %d", ret); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + return 0; + } +} + +/*! + * Unregister an RM callback function previously registered with + * nvRmAddVBlankCallback. + * + * \param[in] pDispEvo The display on which to unregister the + * callback + * + * \param[in] callbackObjectHandle Handle to the previously allocated + * callback object + */ +void nvRmRemoveVBlankCallback(const NVDispEvoRec *pDispEvo, + NvU32 callbackObjectHandle) +{ + const NvU32 sd = pDispEvo->displayOwner; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (callbackObjectHandle == 0) { + // already removed + return; + } + + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + callbackObjectHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to disable VBlank callback, ret: %d", ret); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, callbackObjectHandle); +} + +/*! + * Initialize the dynamic display mux on supported systems. + * + * \param[in] pDpyEvo The dpy on which to initialize the mux. + */ +static void MuxInit(const NVDpyEvoRec *pDpyEvo) +{ + NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + + if (pDpyEvo->internal) { + /* Attempt to get the EDID from ACPI. This is required for internal + * displays only, as the internal mux initialization requires data + * from the internal panel's EDID, while the external mux can be + * initialized in the absence of a display, in which case there is + * obviously no EDID present. The EDID read is done via ACPI, in + * order to accommodate mux initialization while the internal panel + * is disconnected from the GPU. */ + + /* Map with hard-coded data for systems known to support dynamic mux + * switching. This is a poor-man's alternative to the WDDM driver's + * CDisplayMgr::NVInitializeACPIToDeviceMaskMap() */ + NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS acpiMap = { + .mapTable = { + {.acpiId = 0x8001a420, .displayId = 0x1000, .dodIndex = 0}, + } + }; + NVEdidRec edid = { }; + NVParsedEdidEvoRec *pParsedEdid = NULL; + NVEvoInfoStringRec infoString; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_ACPI_ID_MAPPING, + &acpiMap, sizeof(acpiMap)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDebug(EVO_LOG_ERROR, "Failed to set ACPI ID map."); + return; + } + + nvInitInfoString(&infoString, NULL, 0); + + /* Retrieve the internal panel's EDID from ACPI */ + if (!nvDpyReadAndParseEdidEvo(pDpyEvo, NULL, + NVKMS_EDID_READ_MODE_ACPI, + &edid, &pParsedEdid, + &infoString)) { + /* EDID read is expected to fail on non-dynamic-mux systems. */ + goto edid_done; + } + + if (edid.length == 0 || pParsedEdid == NULL || !pParsedEdid->valid) { + goto edid_done; + } + + params.manfId = pParsedEdid->info.manuf_id; + params.productId = pParsedEdid->info.product_id; + +edid_done: + nvFree(edid.buffer); + nvFree(pParsedEdid); + + /* Internal mux initialization will fail without manfId/productId */ + if (!params.manfId || !params.productId) { + return; + } + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_INIT_MUX_DATA, + ¶ms, + sizeof(params)); + + if (ret == NVOS_STATUS_SUCCESS) { + pDispEvo->muxDisplays = nvAddDpyIdToDpyIdList(pDpyEvo->id, + pDispEvo->muxDisplays); + } else { + nvEvoLogDebug(EVO_LOG_ERROR, "Failed to initialize mux on %s.", + pDpyEvo->name); + } +} + +static NVDpyIdList GetValidMuxDpys(NVDispEvoPtr pDispEvo) +{ + NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS params = { 0 }; + + params.subDeviceInstance = pDispEvo->displayOwner; + + nvRmApiControl(nvEvoGlobal.clientHandle, + pDispEvo->pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX, + ¶ms, sizeof(params)); + + return nvNvU32ToDpyIdList(params.muxDisplayMask); +} + +void nvRmMuxInit(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NVDpyIdList validMuxDpys = GetValidMuxDpys(pDispEvo); + NVDpyEvoPtr pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, validMuxDpys, pDispEvo) { + MuxInit(pDpyEvo); + } + } +} + +/*! + * Perform mux pre-switch operations + * + * \param[in] pDpyEvo The Dpy of the target mux + * \param[in] state The target mux state + * + * \return TRUE on success; FALSE on failure + */ +NvBool nvRmMuxPre(const NVDpyEvoRec *pDpyEvo, NvMuxState state) +{ + NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo; + NVDevEvoPtr pDevEvo; + NvU32 ret; + + pDispEvo = pDpyEvo->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + params.flags = DRF_DEF(0073_CTRL_DFP, _DISP_MUX_FLAGS, _SR_ENTER_SKIP, _NO); + + if (state == MUX_STATE_DISCRETE) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU; + } else if (state == MUX_STATE_INTEGRATED) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU; + } else { + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS, + ¶ms, sizeof(params)); + + nvEvoLogDebug(EVO_LOG_INFO, "RmMuxPre status %d", ret); + + return ret == NVOS_STATUS_SUCCESS; +} + +/*! + * Perform mux switch operation + * + * \param[in] pDpyEvo The Dpy of the target mux + * \param[in] state The target mux state + * + * \return TRUE on success; FALSE on failure + */ +NvBool nvRmMuxSwitch(const NVDpyEvoRec *pDpyEvo, NvMuxState state) +{ + NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo; + NVDevEvoPtr pDevEvo; + NvU32 ret; + + pDispEvo = pDpyEvo->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + + if (state == MUX_STATE_DISCRETE) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU; + } else if (state == MUX_STATE_INTEGRATED) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU; + } else { + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX, + ¶ms, sizeof(params)); + + nvEvoLogDebug(EVO_LOG_INFO, "RmMuxSwitch status %d", ret); + + /* + * Force link training after waiting for the DP AUX link to settle. + * The delay duration comes from DFP_MUX_AUX_SETTLE_DELAY_MS_DEFAULT + * in drivers/resman/kernel/inc/dfpmux.h. + */ + nvkms_usleep(100000); + + if (pDpyEvo->internal && state == MUX_STATE_DISCRETE) { + nvAssert(nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)); + nvDPNotifyShortPulse(pDpyEvo->pConnectorEvo->pDpLibConnector); + nvDPFireExpiredTimers(pDevEvo); + } + + return ret == NVOS_STATUS_SUCCESS; +} + +/*! + * Perform mux post-switch operations + * + * \param[in] pDpyEvo The Dpy of the target mux + * \param[in] state The target mux state + * + * \return TRUE on success; FALSE on failure + */ +NvBool nvRmMuxPost(const NVDpyEvoRec *pDpyEvo, NvMuxState state) +{ + NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo; + NVDevEvoPtr pDevEvo; + NvU32 ret; + + pDispEvo = pDpyEvo->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + params.flags = DRF_DEF(0073_CTRL_DFP, _DISP_MUX_FLAGS, _SR_ENTER_SKIP, _NO); + + if (state == MUX_STATE_DISCRETE) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU; + } else if (state == MUX_STATE_INTEGRATED) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU; + } else { + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS, + ¶ms, sizeof(params)); + + nvEvoLogDebug(EVO_LOG_INFO, "RmMuxPost status %d", ret); + + return ret == NVOS_STATUS_SUCCESS; +} + +/*! + * Query the current state of a dynamic mux + * + * \param[in] pDpyEvo The Dpy of the target mux whose state is to be queried + * + * \return Mux state (either MUX_STATE_INTEGRATED or MUX_STATE_DISCRETE) on + * success; MUX_STATE_UNKNOWN on failure. + */ +NvMuxState nvRmMuxState(const NVDpyEvoRec *pDpyEvo) +{ + NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo; + NVDevEvoPtr pDevEvo; + + pDispEvo = pDpyEvo->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) { + return MUX_STATE_UNKNOWN; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + + if (NVOS_STATUS_SUCCESS == nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS, + ¶ms, sizeof(params))) { + if (FLD_TEST_DRF(0073_CTRL_DFP, _DISP_MUX, _STATE, _INTEGRATED_GPU, + params.muxStatus)) { + return MUX_STATE_INTEGRATED; + } + if (FLD_TEST_DRF(0073_CTRL_DFP, _DISP_MUX, _STATE, _DISCRETE_GPU, + params.muxStatus)) { + return MUX_STATE_DISCRETE; + } + } + + return MUX_STATE_UNKNOWN; +} + +void nvRmRegisterBacklight(NVDispEvoRec *pDispEvo) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS dispParams = { 0 }; + NvU32 displayMask, displayId; + NvU32 brightness; + + nvAssert(pDispEvo->backlightDevice == NULL); + + dispParams.subDeviceInstance = pDispEvo->displayOwner; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS, + &dispParams, sizeof(dispParams)) != NV_OK) { + return; + } + + /* Find a display with a backlight */ + displayMask = dispParams.availableInternalDisplaysMask; + for (; displayMask; displayMask &= ~LOWESTBIT(displayMask)) + { + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NV_STATUS status; + + displayId = LOWESTBIT(displayMask); + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = displayId; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + if (status == NV_OK) + { + brightness = params.brightness; + break; + } + } + + if (displayMask == 0) + { + /* No internal display has backlight */ + return; + } + + pDispEvo->backlightDevice = nvkms_register_backlight( + pDevEvo->pSubDevices[pDispEvo->displayOwner]->gpuId, + displayId, pDispEvo, + brightness); +} + +void nvRmUnregisterBacklight(NVDispEvoRec *pDispEvo) +{ + if (pDispEvo->backlightDevice != NULL) { + nvkms_unregister_backlight(pDispEvo->backlightDevice); + } + pDispEvo->backlightDevice = NULL; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c new file mode 100644 index 0000000..ab3a9f8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c @@ -0,0 +1,260 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-kernel-rmapi-ops.h" +#include "nvidia-modeset-os-interface.h" + +#include "nvkms-rmapi.h" + +NvU32 nvRmApiAlloc( + NvU32 hClient, + NvU32 hParent, + NvU32 hObject, + NvU32 hClass, + void *pAllocParams) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_ALLOC; + + ops.params.alloc.hRoot = hClient; + ops.params.alloc.hObjectParent = hParent; + ops.params.alloc.hObjectNew = hObject; + ops.params.alloc.hClass = hClass; + ops.params.alloc.pAllocParms = NV_PTR_TO_NvP64(pAllocParams); + + nvkms_call_rm(&ops); + + return ops.params.alloc.status; +} + +NvU32 nvRmApiAllocMemory64( + NvU32 hClient, + NvU32 hParent, + NvU32 hMemory, + NvU32 hClass, + NvU32 flags, + void **ppAddress, + NvU64 *pLimit) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV01_ALLOC_MEMORY; + + ops.params.allocMemory64.hRoot = hClient; + ops.params.allocMemory64.hObjectParent = hParent; + ops.params.allocMemory64.hObjectNew = hMemory; + ops.params.allocMemory64.hClass = hClass; + ops.params.allocMemory64.flags = flags; + ops.params.allocMemory64.pMemory = NV_PTR_TO_NvP64(*ppAddress); + ops.params.allocMemory64.limit = *pLimit; + + nvkms_call_rm(&ops); + + *pLimit = ops.params.allocMemory64.limit; + *ppAddress = NvP64_VALUE(ops.params.allocMemory64.pMemory); + + return ops.params.allocMemory64.status; +} + +NvU32 nvRmApiControl( + NvU32 hClient, + NvU32 hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_CONTROL; + + ops.params.control.hClient = hClient; + ops.params.control.hObject = hObject; + ops.params.control.cmd = cmd; + ops.params.control.params = NV_PTR_TO_NvP64(pParams); + ops.params.control.paramsSize = paramsSize; + + nvkms_call_rm(&ops); + + return ops.params.control.status; +} + +NvU32 nvRmApiDupObject( + NvU32 hClient, + NvU32 hParent, + NvU32 hObjectDest, + NvU32 hClientSrc, + NvU32 hObjectSrc, + NvU32 flags) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_DUP_OBJECT; + + ops.params.dupObject.hClient = hClient; + ops.params.dupObject.hParent = hParent; + ops.params.dupObject.hObject = hObjectDest; + ops.params.dupObject.hClientSrc = hClientSrc; + ops.params.dupObject.hObjectSrc = hObjectSrc; + ops.params.dupObject.flags = flags; + + nvkms_call_rm(&ops); + + return ops.params.dupObject.status; +} + +NvU32 nvRmApiFree( + NvU32 hClient, + NvU32 hParent, + NvU32 hObject) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV01_FREE; + + ops.params.free.hRoot = hClient; + ops.params.free.hObjectParent = hParent; + ops.params.free.hObjectOld = hObject; + + nvkms_call_rm(&ops); + + return ops.params.free.status; +} + +NvU32 nvRmApiVidHeapControl( + void *pVidHeapControlParams) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + NVOS32_PARAMETERS *pParams = pVidHeapControlParams; + + ops.op = NV04_VID_HEAP_CONTROL; + + ops.params.pVidHeapControl = pParams; + + nvkms_call_rm(&ops); + + return pParams->status; +} + +NvU32 nvRmApiMapMemory( + NvU32 hClient, + NvU32 hDevice, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + void **ppLinearAddress, + NvU32 flags) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_MAP_MEMORY; + + ops.params.mapMemory.hClient = hClient; + ops.params.mapMemory.hDevice = hDevice; + ops.params.mapMemory.hMemory = hMemory; + ops.params.mapMemory.offset = offset; + ops.params.mapMemory.length = length; + ops.params.mapMemory.flags = flags; + + nvkms_call_rm(&ops); + + *ppLinearAddress = NvP64_VALUE(ops.params.mapMemory.pLinearAddress); + + return ops.params.mapMemory.status; +} + +NvU32 nvRmApiUnmapMemory( + NvU32 hClient, + NvU32 hDevice, + NvU32 hMemory, + const void *pLinearAddress, + NvU32 flags) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_UNMAP_MEMORY; + + ops.params.unmapMemory.hClient = hClient; + ops.params.unmapMemory.hDevice = hDevice; + ops.params.unmapMemory.hMemory = hMemory; + ops.params.unmapMemory.pLinearAddress = NV_PTR_TO_NvP64(pLinearAddress); + ops.params.unmapMemory.flags = flags; + + nvkms_call_rm(&ops); + + return ops.params.unmapMemory.status; +} + +NvU32 nvRmApiMapMemoryDma( + NvU32 hClient, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_MAP_MEMORY_DMA; + + ops.params.mapMemoryDma.hClient = hClient; + ops.params.mapMemoryDma.hDevice = hDevice; + ops.params.mapMemoryDma.hDma = hDma; + ops.params.mapMemoryDma.hMemory = hMemory; + ops.params.mapMemoryDma.offset = offset; + ops.params.mapMemoryDma.length = length; + ops.params.mapMemoryDma.flags = flags; + ops.params.mapMemoryDma.dmaOffset = *pDmaOffset; + + nvkms_call_rm(&ops); + + *pDmaOffset = ops.params.mapMemoryDma.dmaOffset; + + return ops.params.mapMemoryDma.status; +} + +NvU32 nvRmApiUnmapMemoryDma( + NvU32 hClient, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU32 flags, + NvU64 dmaOffset) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_UNMAP_MEMORY_DMA; + + ops.params.unmapMemoryDma.hClient = hClient; + ops.params.unmapMemoryDma.hDevice = hDevice; + ops.params.unmapMemoryDma.hDma = hDma; + ops.params.unmapMemoryDma.hMemory = hMemory; + ops.params.unmapMemoryDma.flags = flags; + ops.params.unmapMemoryDma.dmaOffset = dmaOffset; + + nvkms_call_rm(&ops); + + return ops.params.unmapMemoryDma.status; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-surface.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-surface.c new file mode 100644 index 0000000..3d78bb2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-surface.c @@ -0,0 +1,1259 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-surface.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-utils.h" +#include "nvkms-flip.h" +#include "nvkms-private.h" +#include "nvos.h" + +// NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD +#include "ctrl/ctrl0000/ctrl0000unix.h" + +/* NV01_MEMORY_SYSTEM_OS_DESCRIPTOR */ +#include "class/cl0071.h" + +static void FreeSurfaceEvoStruct(NVSurfaceEvoPtr pSurfaceEvo) +{ + if (pSurfaceEvo == NULL) { + return; + } + + nvAssert(!nvSurfaceEvoInAnyOpens(pSurfaceEvo)); + + nvAssert(pSurfaceEvo->structRefCnt == 0); + nvAssert(pSurfaceEvo->rmRefCnt == 0); + + nvFree(pSurfaceEvo); +} + +static void FreeSurfaceEvoRm(NVDevEvoPtr pDevEvo, NVSurfaceEvoPtr pSurfaceEvo) +{ + NvU64 structRefCnt; + NvU32 firstPlaneRmHandle; + NvU8 planeIndex; + + if ((pDevEvo == NULL) || (pSurfaceEvo == NULL)) { + return; + } + + nvAssert(pSurfaceEvo->rmRefCnt == 0); + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + nvRmEvoFreeDispContextDMA(pDevEvo, + &pSurfaceEvo->planes[planeIndex].ctxDma); + } + + firstPlaneRmHandle = pSurfaceEvo->planes[0].rmHandle; + + if (firstPlaneRmHandle != 0) { + + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + if (pSurfaceEvo->cpuAddress[sd] != NULL) { + nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + firstPlaneRmHandle, + pSurfaceEvo->cpuAddress[sd], + 0); + pSurfaceEvo->cpuAddress[sd] = NULL; + } + } + + } + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + + if (pSurfaceEvo->planes[planeIndex].rmHandle == 0) { + break; + } + + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSurfaceEvo->planes[planeIndex].rmHandle); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pSurfaceEvo->planes[planeIndex].rmHandle); + + pSurfaceEvo->planes[planeIndex].rmHandle = 0; + } + + /* + * The surface is now an orphan: clear the pSurfaceEvo, for + * everything other than its structRefCnt. The only operation + * that can be done on it is unregistration. + */ + structRefCnt = pSurfaceEvo->structRefCnt; + nvkms_memset(pSurfaceEvo, 0, sizeof(*pSurfaceEvo)); + pSurfaceEvo->structRefCnt = structRefCnt; +} + +void nvEvoIncrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo) +{ + nvAssert(!nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)); + + pSurfaceEvo->structRefCnt++; +} + +void nvEvoDecrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo) +{ + nvAssert(pSurfaceEvo->structRefCnt >= 1); + pSurfaceEvo->structRefCnt--; + + if (pSurfaceEvo->structRefCnt == 0) { + FreeSurfaceEvoStruct(pSurfaceEvo); + } +} + +static NvBool ValidatePlaneProperties( + NVDevEvoPtr pDevEvo, + const struct NvKmsRegisterSurfaceRequest *pRequest) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(pRequest->format); + NvU8 planeIndex; + + /* + * Reject all registration requests for multi-planar NISO surfaces. + * This is a non-sensical request. + */ + if ((pRequest->isoType == NVKMS_MEMORY_NISO) && + (pFormatInfo->numPlanes > 1)) { + return FALSE; + } + + for (planeIndex = 0; planeIndex < pFormatInfo->numPlanes; planeIndex++) { + + const NvU64 planeOffset = pRequest->planes[planeIndex].offset; + NvU64 planePitch = pRequest->planes[planeIndex].pitch; + NvU64 rmObjectSizeInBytes = + pRequest->planes[planeIndex].rmObjectSizeInBytes; + NvU64 widthInBytes; + NvU64 planeSizeInBytes; + NvU32 planeEffectiveLines = pRequest->heightInPixels; + NvU32 widthInPixels = pRequest->widthInPixels; + + if ((planePitch == 0U) || (rmObjectSizeInBytes == 0U)) + { + nvEvoLog(EVO_LOG_ERROR, "Invalid request parameters, planePitch or rmObjectSizeInBytes, passed during surface registration"); + return FALSE; + } + + if ((pRequest->isoType == NVKMS_MEMORY_ISO) && + ((planeEffectiveLines == 0U) || (widthInPixels == 0U))) + { + nvEvoLog(EVO_LOG_ERROR, "Invalid request parameters, heightInPixels or widthInPixels, passed during surface registration for ISO surfaces"); + return FALSE; + } + + /* The offset must be 1KB-aligned. */ + if ((planeOffset & + ((1 << NV_SURFACE_OFFSET_ALIGNMENT_SHIFT) - 1)) != 0) { + return FALSE; + } + + /* + * Convert planePitch to units of bytes if it's currently specified in + * units of blocks. Each block is 64-bytes wide. + */ + if (pRequest->layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + planePitch <<= NVKMS_BLOCK_LINEAR_LOG_GOB_WIDTH; + } + + /* + * Convert width to bytes. + */ + widthInBytes = widthInPixels; + + if (pFormatInfo->isYUV) { + NvU8 divisor = 1; + NvU8 bytesPerBlock = pFormatInfo->yuv.storageBitsPerComponent >> 3; + + switch (pFormatInfo->numPlanes) { + case 3: + /* planar */ + if (planeIndex > 0) { + divisor = pFormatInfo->yuv.horizChromaDecimationFactor; + } + break; + + case 2: + /* semi-planar */ + if (planeIndex > 0) { + divisor = pFormatInfo->yuv.horizChromaDecimationFactor; + bytesPerBlock *= 2; + } + break; + + case 1: + /* 4:2:2 packed */ + bytesPerBlock *= 2; + } + + widthInBytes *= bytesPerBlock; + /* Dimensions of decimated planes of odd-width YUV surfaces are + * supposed to be rounded up */ + widthInBytes = (widthInBytes + (divisor - 1)) / divisor; + } else { + widthInBytes *= pFormatInfo->rgb.bytesPerPixel; + } + + /* + * Check that an entire line of pixels will fit in the pitch value + * specified. + */ + if (widthInBytes > planePitch) { + return FALSE; + } + + /* + * Check that the entire memory region occupied by this plane falls + * within the size of the underlying memory allocation. + * + * Force planeEffectiveLines to be even before dividing by + * vertChromaDecimationFactor. The height of the source fetch rectangle + * must be even anyways if there's vertical decimation. + */ + if (planeIndex != 0 && pFormatInfo->isYUV && + pFormatInfo->yuv.vertChromaDecimationFactor > 1) { + planeEffectiveLines = planeEffectiveLines & ~(0x1); + planeEffectiveLines /= pFormatInfo->yuv.vertChromaDecimationFactor; + } + + planeSizeInBytes = planeEffectiveLines * planePitch; + + if ((pRequest->isoType == NVKMS_MEMORY_ISO) && + (planeSizeInBytes == 0U)) + { + nvEvoLog(EVO_LOG_ERROR, "Plane size calculated during ISO surface registration is 0"); + return FALSE; + } + + if ((planeSizeInBytes > rmObjectSizeInBytes) || + (planeOffset > (rmObjectSizeInBytes - planeSizeInBytes))) { + return FALSE; + } + } + + return TRUE; +} + +static NvBool ValidateRegisterSurfaceRequest( + NVDevEvoPtr pDevEvo, + const struct NvKmsRegisterSurfaceRequest *pRequest) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(pRequest->format); + + /* + * The purpose of this check is to make sure the given format is valid and not + * some garbage number. It exists to check for format validity in the case + * where noDisplayHardWareAccess is TRUE. + */ + if (pFormatInfo->depth == 0) { + return FALSE; + } + + /* + * NvKmsSurfaceMemoryFormat has a few formats that we will never display. + * Head surface has several formats it wants to texture from but we won't + * (and can't) display surfaces with those formats. We should reject any + * attempt to register a surface that is marked for display and uses one of + * those formats. + */ + if (!pRequest->noDisplayHardwareAccess) { + /* + * This isn't a perfect check since we can't predict which channel this + * surface will be used on, but we should definitely reject a format if + * it isn't usable on any channel. + */ + NvBool usableOnAnyChannel = FALSE; + NvU8 layer; + + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); + layer++) { + + if (NVBIT64(pRequest->format) & + pDevEvo->caps.layerCaps[layer].supportedSurfaceMemoryFormats) { + usableOnAnyChannel = TRUE; + break; + } + } + + if (!usableOnAnyChannel) { + return FALSE; + } + + if (!pDevEvo->hal->ValidateWindowFormat(pRequest->format, NULL, NULL)) { + return FALSE; + } + } + + if (!ValidatePlaneProperties(pDevEvo, pRequest)) { + return FALSE; + } + + /* XXX Validate surface properties. */ + + return TRUE; +} + + +void nvEvoRegisterSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsRegisterSurfaceParams *pParams, + enum NvHsMapPermissions hsMapPermissions) +{ + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pOpenDev); + const struct NvKmsRegisterSurfaceRequest *pRequest = &pParams->request; + NVSurfaceEvoPtr pSurfaceEvo = NULL; + NvKmsSurfaceHandle surfaceHandle = 0; + NvU32 result; + NvU8 planeIndex; + NvBool nisoMemory = (pRequest->isoType == NVKMS_MEMORY_NISO); + + /* + * HeadSurface needs a CPU mapping of surfaces containing semaphores, in + * order to check, from the CPU, if a semaphore-interlocked flip is ready. + */ + const NvBool needCpuMapping = nisoMemory && pDevEvo->isHeadSurfaceSupported; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + if (!ValidateRegisterSurfaceRequest(pDevEvo, pRequest)) { + goto fail; + } + + pSurfaceEvo = nvCalloc(1, sizeof(*pSurfaceEvo)); + + if (pSurfaceEvo == NULL) { + goto fail; + } + + pSurfaceEvo->format = pRequest->format; + + surfaceHandle = nvEvoCreateApiHandle(pOpenDevSurfaceHandles, pSurfaceEvo); + + if (surfaceHandle == 0) { + goto fail; + } + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + + const NvU32 planeRmHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (planeRmHandle == 0) { + goto fail; + } + + pSurfaceEvo->planes[planeIndex].rmHandle = planeRmHandle; + + if (pRequest->useFd) { + /* + * On T234, the 'fd' provided is allocated outside of RM whereas on + * dGPU it is allocated by RM. So we check whether the fd is associated + * with an nvidia character device, and if it is, then we consider that + * it belongs to RM. Based on whether it belongs to RM or not we need + * to call different mechanisms to import it. + */ + if (nvkms_fd_is_nvidia_chardev(pRequest->planes[planeIndex].u.fd)) { + NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS importParams = { }; + importParams.fd = pRequest->planes[planeIndex].u.fd; + importParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; + importParams.object.data.rmObject.hDevice = pDevEvo->deviceHandle; + importParams.object.data.rmObject.hParent = pDevEvo->deviceHandle; + importParams.object.data.rmObject.hObject = planeRmHandle; + + result = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD, + &importParams, + sizeof(importParams)); + } else { + /* + * If 'fd' doesn't belongs to resman assume that it is allocated by + * some other dmabuf allocator (like nvmap). + */ + NV_OS_DESC_MEMORY_ALLOCATION_PARAMS allocParams = { }; + + allocParams.type = NVOS32_TYPE_IMAGE; + allocParams.descriptor = + (NvP64)(NvU64)(pRequest->planes[planeIndex].u.fd); + allocParams.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE; + allocParams.limit = pRequest->planes[planeIndex].rmObjectSizeInBytes - 1; + + allocParams.attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, + allocParams.attr); + allocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, + _NO, allocParams.attr2); + + /* + * The NVKMS client performing the import doesn't know what the original + * CPU cache attributes are, so assume WRITE_BACK since we only need RM to + * IOVA map the memory into display's address space and the CPU cache + * attributes shouldn't really matter in this case. + */ + allocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, + _WRITE_BACK, allocParams.attr); + allocParams.flags = NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED; + + switch (pRequest->layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + allocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, + allocParams.attr); + break; + + case NvKmsSurfaceMemoryLayoutPitch: + allocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH, + allocParams.attr); + break; + + default: + nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Unknown layout"); + goto fail; + } + + if (nisoMemory) { + allocParams.attr2 = + FLD_SET_DRF(OS32, _ATTR2, _NISO_DISPLAY, _YES, + allocParams.attr2); + } + + result = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + planeRmHandle, + NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + &allocParams); + + /* + * Bug 200614156. RM doesn't support mapping osdesc objects into CPU’s + * address space. + */ + nvAssert(!needCpuMapping); + } + } else { + /* + * If 'useFd' is not specified, the (rmClient, rmObject) tuple from + * the request is an object in the caller's RM client space. + * Call RM to dup the memory into nvkms's RM client. + */ + result = nvRmApiDupObject(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + planeRmHandle, + pRequest->rmClient, + pRequest->planes[planeIndex].u.rmObject, + 0); + } + + if (result != NVOS_STATUS_SUCCESS) { + goto fail; + } + + /* XXX Validate sizeInBytes: can we query the surface size from RM? */ + + if (!pRequest->noDisplayHardwareAccess) { + + const NvU32 planeCtxDma = + nvRmEvoAllocateAndBindDispContextDMA( + pDevEvo, + planeRmHandle, + pRequest->layout, + pRequest->planes[planeIndex].rmObjectSizeInBytes - 1); + if (!planeCtxDma) { + goto fail; + } + + pSurfaceEvo->planes[planeIndex].ctxDma = planeCtxDma; + } + + pSurfaceEvo->planes[planeIndex].pitch = + pRequest->planes[planeIndex].pitch; + pSurfaceEvo->planes[planeIndex].offset = + pRequest->planes[planeIndex].offset; + pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes = + pRequest->planes[planeIndex].rmObjectSizeInBytes; + } + + pSurfaceEvo->requireCtxDma = !pRequest->noDisplayHardwareAccess; + + /* + * Map the first plane of the surface only into the CPU's address space. + * This is the only valid plane since we would have already rejected + * multi-planar semaphore requests earlier. + */ + if (needCpuMapping) { + + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + result = nvRmApiMapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pSurfaceEvo->planes[0].rmHandle, + 0, + pRequest->planes[0].rmObjectSizeInBytes, + (void **) &pSurfaceEvo->cpuAddress[sd], + 0); + + if (result != NVOS_STATUS_SUCCESS) { + goto fail; + } + } + } + + pSurfaceEvo->widthInPixels = pRequest->widthInPixels; + pSurfaceEvo->heightInPixels = pRequest->heightInPixels; + pSurfaceEvo->layout = pRequest->layout; + pSurfaceEvo->log2GobsPerBlockY = pRequest->log2GobsPerBlockY; + pSurfaceEvo->isoType = pRequest->isoType; + + pSurfaceEvo->rmRefCnt = 1; + pSurfaceEvo->structRefCnt = 1; + + pSurfaceEvo->owner.pOpenDev = pOpenDev; + pSurfaceEvo->owner.surfaceHandle = surfaceHandle; + + pParams->reply.surfaceHandle = surfaceHandle; + + return; + +fail: + nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + FreeSurfaceEvoRm(pDevEvo, pSurfaceEvo); + FreeSurfaceEvoStruct(pSurfaceEvo); +} + +/* Temporary storage used by ClearSurfaceUsage{Collect,Apply}. */ +struct ClearSurfaceUsageCache { + struct { + struct { + NvBool flipToNull : 1; + NvBool flipSemaphoreToNull : 1; + + NvBool needToIdle : 1; + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; + + NvBool flipCursorToNull : 1; + } head[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP]; +}; + +/* + * Search for heads where the surfaces are used, and populate the structure + * pointed to by 'pCache' to indicate which channels need to be updated. + */ +static void +ClearSurfaceUsageCollect(NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo, + struct ClearSurfaceUsageCache *pCache) +{ + NVDispEvoPtr pDispEvo; + NvU32 head, sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + + const NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + const NVFlipChannelEvoHwState *pMainFlipState = + &pSdHeadState->layer[NVKMS_MAIN_LAYER]; + NvU32 layer; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + /* + * XXX NVKMS TODO: flip across heads/subdevices for all scenarios + * that are flip locked. + */ + + if (!pMainFlipState->syncObject.usingSyncpt && + (pSurfaceEvo == pMainFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo || + pSurfaceEvo == pMainFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo)) { + pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].flipSemaphoreToNull = TRUE; + } + + if (pSurfaceEvo == pMainFlipState->pSurfaceEvo[NVKMS_LEFT] || + pSurfaceEvo == pMainFlipState->pSurfaceEvo[NVKMS_RIGHT] || + pSurfaceEvo == pMainFlipState->completionNotifier.surface.pSurfaceEvo) { + pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].flipToNull = TRUE; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + const NVFlipChannelEvoHwState *pLayerFlipState = + &pSdHeadState->layer[layer]; + + if (layer == NVKMS_MAIN_LAYER) { + continue; + } + + if (pSurfaceEvo == pLayerFlipState->pSurfaceEvo[NVKMS_LEFT] || + pSurfaceEvo == pLayerFlipState->pSurfaceEvo[NVKMS_RIGHT] || + pSurfaceEvo == pLayerFlipState->completionNotifier.surface.pSurfaceEvo || + (!pLayerFlipState->syncObject.usingSyncpt && + (pSurfaceEvo == pLayerFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo || + pSurfaceEvo == pLayerFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo))) { + pCache->head[sd][head].layer[layer].flipToNull = TRUE; + } + + /* + * EVO requires that, when flipping the base channel (aka main layer) to + * NULL, overlay channel is also flipped to NULL. + */ + if (pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].flipToNull && + (pLayerFlipState->pSurfaceEvo[NVKMS_LEFT] != NULL || + pLayerFlipState->pSurfaceEvo[NVKMS_RIGHT] != NULL)) { + pCache->head[sd][head].layer[layer].flipToNull = TRUE; + } + } + + if (pSurfaceEvo == pSdHeadState->cursor.pSurfaceEvo) { + pCache->head[sd][head].flipCursorToNull = TRUE; + } + } + } +} + +/* + * Do the hard work to babysit the hardware to ensure that any channels which + * need clearing have actually done so before proceeding to free memory and + * remove ctxdmas from the hash table. + * + * This is achieved in several steps: + * 1. Issue a flip of any overlay layer to NULL -- these are processed + * separately since using one Flip request would interlock them, potentially + * exacerbating stuck channels by getting other channels stuck too. + * Pre-NVDisplay requires that, when flipping the core channel to NULL, + * all satellite channels are also flipped to NULL. The EVO2 hal takes care + * to enable/disable the core surface along with the base surface, + * therefore flip overlay to NULL before base. + * 2. Issue a flip of any main layer to NULL + * 3. Wait for any base/overlay layer that we expect to be idle to actually + * be idle. If they don't idle in a timely fashion, apply accelerators to + * forcibly idle any problematic channels. + * 4. Issue a flip of any core channels to NULL. + */ +static void +ClearSurfaceUsageApply(NVDevEvoPtr pDevEvo, + struct ClearSurfaceUsageCache *pCache, + NvBool skipUpdate) +{ + NVDispEvoPtr pDispEvo; + NvU32 head, sd; + NvBool found = FALSE; + struct NvKmsFlipRequest *request = nvCalloc(1, sizeof(*request)); + + if (request == NULL) { + nvAssert(!"Failed to allocate memory"); + return; + } + + /* 1. Issue a flip of any overlay layer to NULL */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + + struct NvKmsFlipCommonParams *pRequestOneHead = + &request->sd[sd].head[head]; + NvU32 layer; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + + if (layer == NVKMS_MAIN_LAYER) { + continue; + } + + if (pCache->head[sd][head].layer[layer].flipToNull) { + pRequestOneHead->layer[layer].surface.specified = TRUE; + // No need to specify sizeIn/sizeOut as we are flipping NULL surface. + pRequestOneHead->layer[layer].compositionParams.specified = TRUE; + pRequestOneHead->layer[layer].syncObjects.specified = TRUE; + pRequestOneHead->layer[layer].completionNotifier.specified = TRUE; + + request->sd[sd].requestedHeadsBitMask |= NVBIT(head); + found = TRUE; + + pCache->head[sd][head].layer[layer].needToIdle = TRUE; + } + } + } + } + + if (found) { + request->commit = NV_TRUE; + + nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, request, NULL, skipUpdate, + FALSE /* allowFlipLock */); + + nvkms_memset(request, 0, sizeof(*request)); + found = FALSE; + } + + /* + * No need to idle the overlay layer before flipping the main channel to + * NULL, because the FlipOverlay90() function in the EVO2 hal makes sure + * that the overlay's flip to NULL is always interlocked with the core + * channel and the base (main layer) channel's flip to NULL can proceed only + * after completion of the overlay's flip to NULL (the base channel's flip + * to NULL interlocks with the core channel's flip to NULL). + */ + + /* 2. Issue a flip of any main layer to NULL */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + + struct NvKmsFlipCommonParams *pRequestOneHead = + &request->sd[sd].head[head]; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + if (pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].flipToNull || + pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].flipSemaphoreToNull) { + + if (pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].flipToNull) { + pRequestOneHead->layer[NVKMS_MAIN_LAYER].surface.specified = TRUE; + // No need to specify sizeIn/sizeOut as we are flipping NULL surface. + pRequestOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.specified = TRUE; + + pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].needToIdle = TRUE; + } + + /* XXX arguably we should also idle for this case, but we + * don't currently have a way to do so without also + * clearing the ISO surface */ + pRequestOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.useSyncpt = FALSE; + pRequestOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.specified = TRUE; + + request->sd[sd].requestedHeadsBitMask |= NVBIT(head); + found = TRUE; + } + } + } + + if (found) { + request->commit = NV_TRUE; + + nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, request, NULL, skipUpdate, + FALSE /* allowFlipLock */); + + nvkms_memset(request, 0, sizeof(*request)); + found = FALSE; + } + + /* + * 3. Wait for any base/overlay layer that we expect to be idle to actually + * be idle. If they don't idle in a timely fashion, apply accelerators to + * forcibly idle any problematic channels. + */ + if (!skipUpdate) { + NvU64 startTime = 0; + const NvU32 timeout = 500000; // .5 seconds + NvBool allIdle; + + do { + allIdle = TRUE; + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + NvBool isMethodPending; + + if (!pCache->head[sd][head].layer[layer].needToIdle) { + continue; + } + + if (pDevEvo->hal->IsChannelMethodPending( + pDevEvo, pDevEvo->head[head].layer[layer], sd, + &isMethodPending) && + isMethodPending) { + + allIdle = FALSE; + } else { + /* This has been completed, no need to keep trying */ + pCache->head[sd][head].layer[layer].needToIdle = FALSE; + } + } + } + } + + if (!allIdle) { + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + break; + } + nvkms_yield(); + } + } while (!allIdle); + + /* If we timed out above, force things to be idle. */ + if (!allIdle) { + NVEvoIdleChannelState idleChannelState = { }; + NvBool tryToForceIdle = FALSE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pCache->head[sd][head].layer[layer].needToIdle) { + idleChannelState.subdev[sd].channelMask |= + pDevEvo->head[head].layer[layer]->channelMask; + tryToForceIdle = TRUE; + } + } + } + } + + if (tryToForceIdle) { + NvBool ret = pDevEvo->hal->ForceIdleSatelliteChannel(pDevEvo, &idleChannelState); + if (!ret) { + nvAssert(ret); + } + } + } + } + + /* 4. Issue a flip of any core channels to NULL */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + + struct NvKmsFlipCommonParams *pRequestOneHead = + &request->sd[sd].head[head]; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + if (pCache->head[sd][head].flipCursorToNull) { + pRequestOneHead->cursor.imageSpecified = TRUE; + request->sd[sd].requestedHeadsBitMask |= NVBIT(head); + found = TRUE; + } + } + } + + if (found) { + request->commit = NV_TRUE; + + nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, request, NULL, skipUpdate, + FALSE /* allowFlipLock */); + } + + nvFree(request); +} + +/* + * This function unregisters/releases all of the surface handles remaining for + * the given pOpenDev. + * + * It duplicates some functionality of nvEvoUnregisterSurface() and + * nvEvoReleaseSurface(), but with an important difference: it processes the + * "clear surface usage" step for all surfaces up front, and only once that is + * complete it proceeds with freeing the surfaces. + * + * In practice, this makes teardown much smoother than invoking those functions + * individually for each surface, particularly in the case that the hardware is + * stuck and needs accelerators. Consider the case where a client has + * registered several surfaces, and is flipping between two of them, and the + * hardware is stuck on a semaphore acquire that will never complete with + * several frames pending in the pushbuffer. If the first surface processed + * by nvEvoUnregisterSurface() happens to be the current "back buffer" (i.e., + * not the most recently pushed surface to be displayed), then + * nvEvoUnregisterSurface() will call ClearSurfaceUsage(), but it will find no + * channels to clear, and will proceed with nvEvoDecrementSurfaceRefCnts() + * which will call nvRMSyncEvoChannel() to drain any outstanding methods. Due + * to the stalled semaphore, nvRMSyncEvoChannel() will stall for 2 seconds, + * time out along with a nasty message to the kernel log, then we'll free the + * surface and remove its entry from the display hash table anyway. And that + * may happen several times until we finally call nvEvoUnregisterSurface() on + * the surface which is the most recently requested flip, where + * ClearSurfaceUsage() will finally get a chance to tear down the channel + * forcefully by using accelerators to skip the semaphore acquire. But, some + * of the methods which were outstanding and now get processed may reference a + * ctxdma which was already freed, triggering nasty Xid messages. + * + * By gathering up all the channels we can to find which ones to clear first, + * we have a much higher chance of avoiding these timeouts. + */ +void nvEvoFreeClientSurfaces(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NVEvoApiHandlesRec *pOpenDevSurfaceHandles) +{ + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pSurfaceEvo; + struct ClearSurfaceUsageCache cache = { }; + NvBool needApply = FALSE; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pOpenDevSurfaceHandles, + pSurfaceEvo, surfaceHandle) { + + if (nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + /* + * If something besides the owner has an rmRefCnt reference, + * the surface might be in use by EVO; flip to NULL to attempt + * to free it. + */ + if (pSurfaceEvo->rmRefCnt > 1) { + ClearSurfaceUsageCollect(pDevEvo, pSurfaceEvo, &cache); + needApply = TRUE; + } + } + } + + if (needApply) { + ClearSurfaceUsageApply(pDevEvo, &cache, FALSE); + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pOpenDevSurfaceHandles, + pSurfaceEvo, surfaceHandle) { + const NvBool isOwner = + nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle); + + /* Remove the handle from the calling client's namespace. */ + nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + if (isOwner) { + nvEvoDecrementSurfaceRefCnts(pSurfaceEvo); + } else { + nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo); + } + } + +} + +void nvEvoUnregisterSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle, + NvBool skipUpdate) +{ + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pOpenDev); + NVSurfaceEvoPtr pSurfaceEvo; + + pSurfaceEvo = nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, + surfaceHandle); + if (pSurfaceEvo == NULL) { + return; + } + + if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Surface unregister attempted by non-owner; " + "non-owners must release the surface."); + return; + } + + /* + * If something besides the owner has an rmRefCnt reference, + * the surface might be in use by EVO; flip to NULL to attempt + * to free it. + */ + if (pSurfaceEvo->rmRefCnt > 1) { + struct ClearSurfaceUsageCache cache = { }; + + ClearSurfaceUsageCollect(pDevEvo, pSurfaceEvo, &cache); + ClearSurfaceUsageApply(pDevEvo, &cache, skipUpdate); + } + + /* Remove the handle from the calling client's namespace. */ + nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + nvEvoDecrementSurfaceRefCnts(pSurfaceEvo); +} + +void nvEvoReleaseSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle) +{ + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pOpenDev); + NVSurfaceEvoPtr pSurfaceEvo; + + pSurfaceEvo = nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, + surfaceHandle); + if (pSurfaceEvo == NULL) { + return; + } + + if (nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Surface release attempted by owner; " + "owners must unregister the surface."); + return; + } + + /* Remove the handle from the calling client's namespace. */ + nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo); +} + +void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo) +{ + nvAssert(!nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)); + + pSurfaceEvo->rmRefCnt++; + pSurfaceEvo->structRefCnt++; +} + +void nvEvoDecrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo) +{ + nvAssert(pSurfaceEvo->rmRefCnt >= 1); + pSurfaceEvo->rmRefCnt--; + + if (pSurfaceEvo->rmRefCnt == 0) { + NVDevEvoPtr pDevEvo = + nvGetDevEvoFromOpenDev(pSurfaceEvo->owner.pOpenDev); + + /* + * Don't sync if this surface was registered as not requiring display + * hardware access, to WAR timeouts that result from OGL unregistering + * a deferred request fifo causing a sync here that may timeout if + * GLS hasn't had the opportunity to release semaphores with pending + * flips. (Bug 2050970) + */ + if (pSurfaceEvo->requireCtxDma) { + /* + * XXX NVKMS TODO + * Make the sync more efficient: we only need to sync if the + * in-flight methods flip away from this surface. + */ + NvU32 head; + + /* + * If the core channel is no longer allocated, we don't need to + * sync. This assumes the channels are allocated/deallocated + * together. + */ + if (pDevEvo->core) { + + if (pDevEvo->hal->ClearSurfaceUsage != NULL) { + pDevEvo->hal->ClearSurfaceUsage(pDevEvo, pSurfaceEvo); + } + + nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + NVEvoChannelPtr pChannel = + pDevEvo->head[head].layer[layer]; + + nvRMSyncEvoChannel(pDevEvo, pChannel, __LINE__); + } + } + } + } + + FreeSurfaceEvoRm(pDevEvo, pSurfaceEvo); + } + + nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo); +} + +NvBool nvEvoSurfaceRefCntsTooLarge(const NVSurfaceEvoRec *pSurfaceEvo) +{ + return ((pSurfaceEvo->rmRefCnt == NV_U64_MAX) || + (pSurfaceEvo->structRefCnt == NV_U64_MAX)); +} + +static NVSurfaceEvoPtr GetSurfaceFromHandle( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle surfaceHandle, + const NVEvoChannelMask channelMask, + const NvBool requireCtxDma) +{ + NVSurfaceEvoPtr pSurfaceEvo = + nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + nvAssert(requireCtxDma || !channelMask); + + if (pSurfaceEvo == NULL) { + return NULL; + } + + if (pSurfaceEvo->rmRefCnt == 0) { /* orphan */ + return NULL; + } + + if (requireCtxDma && !pSurfaceEvo->requireCtxDma) { + return NULL; + } + + /* Validate that the surface can be used as a cursor image */ + if ((channelMask & + NV_EVO_CHANNEL_MASK_CURSOR_ALL) && + !pDevEvo->hal->ValidateCursorSurface(pDevEvo, pSurfaceEvo)) { + return NULL; + } + + /* + * XXX If !requireCtxDma, fetched surfaces aren't going to be accessed by + * the display hardware, so they shouldn't need to be checked by + * nvEvoGetHeadSetStoragePitchValue(). These surfaces will be used as a + * texture by the 3d engine. But previously all surfaces were checked by + * nvEvoGetHeadSetStoragePitchValue() at registration time, and we don't + * know if nvEvoGetHeadSetStoragePitchValue() was protecting us from any + * surface dimensions that could cause trouble for the 3d engine. + */ + if ((channelMask & ~NV_EVO_CHANNEL_MASK_CURSOR_ALL) || !requireCtxDma) { + NvU8 planeIndex; + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + if (nvEvoGetHeadSetStoragePitchValue( + pDevEvo, + pSurfaceEvo->layout, + pSurfaceEvo->planes[planeIndex].pitch) == 0) { + return NULL; + } + } + } + + return pSurfaceEvo; +} + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandle( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle surfaceHandle, + const NVEvoChannelMask channelMask) +{ + return GetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + surfaceHandle, + channelMask, + TRUE /* requireCtxDma */); +} + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandleNoCtxDmaOk( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvKmsSurfaceHandle surfaceHandle) +{ + return GetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + surfaceHandle, 0x0 /* channelMask */, + FALSE /* requireCtxDma */); +} + +/*! + * Create a deferred request fifo, using the specified pSurfaceEvo. + */ +NVDeferredRequestFifoRec *nvEvoRegisterDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo) +{ + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvU32 ret; + + if (pSurfaceEvo->planes[0].rmObjectSizeInBytes < + sizeof(struct NvKmsDeferredRequestFifo)) { + return NULL; + } + + /* + * XXX validate that the surface is in sysmem; can we query that from + * resman? + */ + + pDeferredRequestFifo = nvCalloc(1, sizeof(*pDeferredRequestFifo)); + + if (pDeferredRequestFifo == NULL) { + return NULL; + } + + /* Get a CPU mapping of the surface. */ + + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSurfaceEvo->planes[0].rmHandle, + 0, + sizeof(*pDeferredRequestFifo->fifo), + (void **) &pDeferredRequestFifo->fifo, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvFree(pDeferredRequestFifo); + return NULL; + } + + pDeferredRequestFifo->pSurfaceEvo = pSurfaceEvo; + + nvEvoIncrementSurfaceRefCnts(pSurfaceEvo); + + return pDeferredRequestFifo; +} + +/*! + * Free the deferred request fifo. + */ +void nvEvoUnregisterDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVDeferredRequestFifoRec *pDeferredRequestFifo) +{ + nvAssert(pDeferredRequestFifo->fifo != NULL); + nvAssert(pDeferredRequestFifo->pSurfaceEvo != NULL); + + nvRmApiUnmapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDeferredRequestFifo->pSurfaceEvo->planes[0].rmHandle, + pDeferredRequestFifo->fifo, + 0); + + nvEvoDecrementSurfaceRefCnts(pDeferredRequestFifo->pSurfaceEvo); + + nvFree(pDeferredRequestFifo); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-utils.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-utils.c new file mode 100644 index 0000000..648af0c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-utils.c @@ -0,0 +1,796 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-utils.h" +#include "nvkms-types.h" +#include "nv_mode_timings_utils.h" +#include "nv_vasprintf.h" + +#include "nv_list.h" /* for nv_container_of() */ + +void nvVEvoLog(NVEvoLogType logType, NvU8 gpuLogIndex, + const char *fmt, va_list ap) +{ + char *msg, prefix[10]; + const char *gpuPrefix = ""; + int level; + + switch (logType) { + default: + case EVO_LOG_INFO: level = NVKMS_LOG_LEVEL_INFO; break; + case EVO_LOG_WARN: level = NVKMS_LOG_LEVEL_WARN; break; + case EVO_LOG_ERROR: level = NVKMS_LOG_LEVEL_ERROR; break; + } + + msg = nv_vasprintf(fmt, ap); + if (msg == NULL) { + return; + } + + if (gpuLogIndex != NV_INVALID_GPU_LOG_INDEX) { + nvkms_snprintf(prefix, sizeof(prefix), "GPU:%d: ", gpuLogIndex); + gpuPrefix = prefix; + } + + nvkms_log(level, gpuPrefix, msg); + + nvFree(msg); +} + +void nvEvoLogDev(const NVDevEvoRec *pDevEvo, NVEvoLogType logType, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, pDevEvo->gpuLogIndex, fmt, ap); + va_end(ap); +} + +void nvEvoLogDisp(const NVDispEvoRec *pDispEvo, NVEvoLogType logType, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, pDispEvo->gpuLogIndex, fmt, ap); + va_end(ap); +} + +void nvEvoLog(NVEvoLogType logType, const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, NV_INVALID_GPU_LOG_INDEX, fmt, ap); + va_end(ap); +} + +#if defined(DEBUG) + +void nvEvoLogDebug(NVEvoLogType logType, const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, NV_INVALID_GPU_LOG_INDEX, fmt, ap); + va_end(ap); +} + +void nvEvoLogDevDebug(const NVDevEvoRec *pDevEvo, NVEvoLogType logType, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, pDevEvo->gpuLogIndex, fmt, ap); + va_end(ap); +} + +void nvEvoLogDispDebug(const NVDispEvoRec *pDispEvo, NVEvoLogType logType, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, pDispEvo->gpuLogIndex, fmt, ap); + va_end(ap); +} + +#endif /* DEBUG */ + + +/*! + * Initialize the given NVEvoInfoString. + * + * Point the infoString at the specified character array. + */ +void nvInitInfoString(NVEvoInfoStringPtr pInfoString, + char *s, NvU16 totalLength) +{ + nvkms_memset(pInfoString, 0, sizeof(*pInfoString)); + pInfoString->s = s; + pInfoString->totalLength = totalLength; +} + + +/*! + * Append the text, described by 'format' and 'ap', to the infoString. + */ +static void LogInfoString(NVEvoInfoStringPtr pInfoString, + const char *format, va_list ap) +{ + char *s; + size_t size = pInfoString->totalLength - pInfoString->length; + int ret; + + if (pInfoString->s == NULL) { + return; + } + if (size <= 1) { + nvAssert(!"pInfoString too small"); + return; + } + + s = pInfoString->s + pInfoString->length; + + ret = nvkms_vsnprintf(s, size, format, ap); + + if (ret > 0) { + pInfoString->length += NV_MIN((size_t)ret, size - 1); + } + + /* + * If ret is larger than size, then we may need to increase + * totalLength to support logging everything that we are trying to + * log to this buffer. + */ + nvAssert(ret <= size); + + nvAssert(pInfoString->length < pInfoString->totalLength); + pInfoString->s[pInfoString->length] = '\0'; +} + + +/*! + * Append to the infoString, without any additions. + */ +void nvEvoLogInfoStringRaw(NVEvoInfoStringPtr pInfoString, + const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + LogInfoString(pInfoString, format, ap); + va_end(ap); +} + + +/*! + * Append to the infoString, appending a newline. + */ +void nvEvoLogInfoString(NVEvoInfoStringPtr pInfoString, + const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + LogInfoString(pInfoString, format, ap); + va_end(ap); + + nvEvoLogInfoStringRaw(pInfoString, "\n"); +} + + +/*! + * The NVEvoApiHandlesRec-related functions below are used to manage + * sets of NvKms API handles. For the various NvKms objects (e.g., + * devices, disps, connectors, surfaces) clients will specify the + * object by handle, and NVKMS will look up the corresponding object. + * + * We store a pointer to the object in a dynamically allocated array, + * and use the handle to look up the pointer in the array. + * + * Note that handles are 1-based (valid handles are in the range + * [1,numPointers], and 0 is an invalid handle), while indices to the + * corresponding pointers are 0-based (valid indices are in the range + * [0,numPointers-1]). Subtract 1 from the handle to get the index + * for the pointer. + */ + +/*! + * Increase the size of the NVEvoApiHandles::pointers array. + * + * Reallocate the pointers array, increasing by defaultSize. + * Initialize the new region of memory. + */ +static NvBool GrowApiHandlesPointersArray(NVEvoApiHandlesPtr pEvoApiHandles) +{ + NvU32 newNumPointers = + pEvoApiHandles->numPointers + pEvoApiHandles->defaultSize; + size_t oldSize = pEvoApiHandles->numPointers * sizeof(void *); + size_t newSize = newNumPointers * sizeof(void *); + void **newPointers; + + /* Check for wrap in the newNumPointers computation. */ + if (newSize <= oldSize) { + return FALSE; + } + + newPointers = nvRealloc(pEvoApiHandles->pointers, newSize); + + if (newPointers == NULL) { + return FALSE; + } + + nvkms_memset(&newPointers[pEvoApiHandles->numPointers], 0, newSize - oldSize); + + pEvoApiHandles->pointers = newPointers; + pEvoApiHandles->numPointers = newNumPointers; + + return TRUE; +} + + +/*! + * Attempt to shrink the NVEvoApiHandles::pointers array. + * + * If high elements in the array are unused, reduce the array size in + * multiples of defaultSize. + */ +static void ShrinkApiHandlesPointersArray(NVEvoApiHandlesPtr pEvoApiHandles) +{ + NvU32 index; + NvU32 newNumPointers; + void **newPointers; + + /* If the array is already as small as it can be, we are done. */ + + if (pEvoApiHandles->numPointers == pEvoApiHandles->defaultSize) { + return; + } + + /* Find the highest non-empty element. */ + + for (index = pEvoApiHandles->numPointers - 1; index > 0; index--) { + if (pEvoApiHandles->pointers[index] != NULL) { + break; + } + } + + /* + * Compute the new array size by rounding index up to the next + * multiple of defaultSize. + */ + newNumPointers = ((index / pEvoApiHandles->defaultSize) + 1) * + pEvoApiHandles->defaultSize; + + /* If the array is already that size, we are done. */ + + if (pEvoApiHandles->numPointers == newNumPointers) { + return; + } + + newPointers = + nvRealloc(pEvoApiHandles->pointers, newNumPointers * sizeof(void *)); + + if (newPointers != NULL) { + pEvoApiHandles->pointers = newPointers; + pEvoApiHandles->numPointers = newNumPointers; + } +} + + +/*! + * Return true if 'pointer' is already present in pEvoApiHandles + */ +NvBool nvEvoApiHandlePointerIsPresent(NVEvoApiHandlesPtr pEvoApiHandles, + void *pointer) +{ + NvU32 index; + + for (index = 0; index < pEvoApiHandles->numPointers; index++) { + if (pEvoApiHandles->pointers[index] == pointer) { + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Create an NvKms API handle. + * + * Create an available handle from pEvoApiHandles, and associate + * 'pointer' with the handle. + */ +NvKmsGenericHandle +nvEvoCreateApiHandle(NVEvoApiHandlesPtr pEvoApiHandles, void *pointer) +{ + NvU32 index; + + if (pointer == NULL) { + return 0; + } + + for (index = 0; index < pEvoApiHandles->numPointers; index++) { + if (pEvoApiHandles->pointers[index] == NULL) { + goto availableIndex; + } + } + + /* + * Otherwise, there are no free elements in the pointers array: + * grow the array and try again. + */ + if (!GrowApiHandlesPointersArray(pEvoApiHandles)) { + return 0; + } + + /* fall through */ + +availableIndex: + + nvAssert(index < pEvoApiHandles->numPointers); + nvAssert(pEvoApiHandles->pointers[index] == NULL); + + pEvoApiHandles->pointers[index] = pointer; + + return index + 1; +} + + +/*! + * Retrieve a pointer that maps to an NvKms API handle. + * + * Return the pointer that nvEvoCreateApiHandle() associated with 'handle'. + */ +void *nvEvoGetPointerFromApiHandle(const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsGenericHandle handle) +{ + NvU32 index; + + if (handle == 0) { + return NULL; + } + + index = handle - 1; + + if (index >= pEvoApiHandles->numPointers) { + return NULL; + } + + return pEvoApiHandles->pointers[index]; +} + + +/*! + * Retrieve a pointer that maps to the next NvKms API handle. + * + * This is intended to be used by the + * FOR_ALL_POINTERS_IN_EVO_API_HANDLES() macro. On the first + * iteration, *pHandle == 0, and this will return the first pointer it + * finds in the pointer array. The returned *pHandle will be the + * location to begin searching on the next iteration, and so on. + * + * Once there are no more non-zero elements in the pointer array, + * return NULL. + */ +void *nvEvoGetPointerFromApiHandleNext(const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsGenericHandle *pHandle) +{ + NvU32 index = *pHandle; + + for (; index < pEvoApiHandles->numPointers; index++) { + if (pEvoApiHandles->pointers[index] != NULL) { + *pHandle = index + 1; + return pEvoApiHandles->pointers[index]; + } + } + + return NULL; +} + + +/*! + * Remove an NvKms API handle. + * + * Clear the 'handle' entry, and its corresponding pointer, from pEvoApiHandles. + */ +void nvEvoDestroyApiHandle(NVEvoApiHandlesPtr pEvoApiHandles, + NvKmsGenericHandle handle) +{ + NvU32 index; + + if (handle == 0) { + return; + } + + index = handle - 1; + + if (index >= pEvoApiHandles->numPointers) { + return; + } + + pEvoApiHandles->pointers[index] = NULL; + + ShrinkApiHandlesPointersArray(pEvoApiHandles); +} + + +/* Only used in nvAssert, so only build into debug builds to avoid never-used + * warnings */ +#if defined(DEBUG) +/*! + * Return the number of non-NULL pointers in the pointer array. + */ +static NvU32 +CountApiHandles(const NVEvoApiHandlesRec *pEvoApiHandles) +{ + NvU32 index, count = 0; + + for (index = 0; index < pEvoApiHandles->numPointers; index++) { + if (pEvoApiHandles->pointers[index] != NULL) { + count++; + } + } + + return count; +} +#endif /* DEBUG */ + + +/*! + * Initialize the NVEvoApiHandlesRec. + * + * This should be called before any + * nvEvo{Create,GetPointerFrom,Destroy}ApiHandle() calls on this + * pEvoApiHandles. + * + * The pointer array for the pEvoApiHandles will be managed in + * multiples of 'defaultSize'. + */ +NvBool nvEvoInitApiHandles(NVEvoApiHandlesPtr pEvoApiHandles, NvU32 defaultSize) +{ + nvkms_memset(pEvoApiHandles, 0, sizeof(*pEvoApiHandles)); + + pEvoApiHandles->defaultSize = defaultSize; + + return GrowApiHandlesPointersArray(pEvoApiHandles); +} + + +/*! + * Free the NVEvoApiHandlesPtr resources. + */ +void nvEvoDestroyApiHandles(NVEvoApiHandlesPtr pEvoApiHandles) +{ + nvAssert(CountApiHandles(pEvoApiHandles) == 0); + + nvFree(pEvoApiHandles->pointers); + + nvkms_memset(pEvoApiHandles, 0, sizeof(*pEvoApiHandles)); +} + +NvU8 nvPixelDepthToBitsPerComponent(enum nvKmsPixelDepth pixelDepth) +{ + switch (pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + return 6; + case NVKMS_PIXEL_DEPTH_24_444: + return 8; + case NVKMS_PIXEL_DEPTH_30_444: + return 10; + } + nvAssert(!"Unknown NVKMS_PIXEL_DEPTH"); + return 0; +} + +/* Import function required by nvBuildModeName() */ + +int nvBuildModeNameSnprintf(char *str, size_t size, const char *format, ...) +{ + va_list ap; + int ret; + + va_start(ap, format); + ret = nvkms_vsnprintf(str, size, format, ap); + va_end(ap); + + return ret; +} + +/* Import functions required by nv_vasprintf() */ + +void *nv_vasprintf_alloc(size_t size) +{ + return nvAlloc(size); +} + +void nv_vasprintf_free(void *ptr) +{ + nvFree(ptr); +} + +int nv_vasprintf_vsnprintf(char *str, size_t size, + const char *format, va_list ap) +{ + return nvkms_vsnprintf(str, size, format, ap); +} + +/* + * Track the size of each allocation, so that it can be passed to + * nvkms_free(). + */ +typedef struct { + size_t size; /* includes sizeof(nvkms_memory_info_t) */ + char data[] __attribute__((aligned(8))); +} nvkms_memory_info_t; + +void *nvInternalAlloc(size_t size, const NvBool zero) +{ + size_t totalSize = size + sizeof(nvkms_memory_info_t); + nvkms_memory_info_t *p; + + if (totalSize < size) { /* overflow in the above addition */ + return NULL; + } + + p = nvkms_alloc(totalSize, zero); + + if (p == NULL) { + return NULL; + } + + p->size = totalSize; + + return p->data; +} + +void *nvInternalRealloc(void *ptr, size_t size) +{ + nvkms_memory_info_t *p = NULL; + void *newptr; + + if (ptr == NULL) { + /* realloc with a ptr of NULL is equivalent to alloc. */ + return nvInternalAlloc(size, FALSE); + } + + if (size == 0) { + /* realloc with a size of 0 is equivalent to free. */ + nvInternalFree(ptr); + return NULL; + } + + p = nv_container_of(ptr, nvkms_memory_info_t, data); + + newptr = nvInternalAlloc(size, FALSE); + + if (newptr != NULL) { + size_t oldsize = p->size - sizeof(nvkms_memory_info_t); + size_t copysize = (size < oldsize) ? size : oldsize; + nvkms_memcpy(newptr, ptr, copysize); + nvInternalFree(ptr); + } + + return newptr; +} + +void nvInternalFree(void *ptr) +{ + nvkms_memory_info_t *p; + + if (ptr == NULL) { + return; + } + + p = nv_container_of(ptr, nvkms_memory_info_t, data); + + nvkms_free(p, p->size); +} + +char *nvInternalStrDup(const char *str) +{ + size_t len; + char *newstr; + + if (str == NULL) { + return NULL; + } + + len = nvkms_strlen(str) + 1; + + newstr = nvInternalAlloc(len, FALSE); + + if (newstr == NULL) { + return NULL; + } + + nvkms_memcpy(newstr, str, len); + + return newstr; +} + +/*! + * Look up the value of a key in the set of registry keys provided at device + * allocation time, copied from the client request during nvAllocDevEvo(). + * + * \param[in] pDevEvo The device with regkeys to be checked. + * + * \param[in] key The name of the key to look up. + * + * \param[out] val The value of the key, if the key was specified. + * + * \return Whether the key was specified in the registry. + */ +NvBool nvGetRegkeyValue(const NVDevEvoRec *pDevEvo, + const char *key, NvU32 *val) +{ + int i; + + for (i = 0; i < ARRAY_LEN(pDevEvo->registryKeys); i++) { + if (nvkms_strcmp(key, pDevEvo->registryKeys[i].name) == 0) { + *val = pDevEvo->registryKeys[i].value; + return TRUE; + } + } + + return FALSE; +} + +#if defined(DEBUG) + +#include "nv_memory_tracker.h" + +void *nvDebugAlloc(size_t size, int line, const char *file) +{ + return nvMemoryTrackerTrackedAlloc(&nvEvoGlobal.debugMemoryAllocationList, + size, line, file); +} + +void *nvDebugCalloc(size_t nmemb, size_t size, int line, const char *file) +{ + return nvMemoryTrackerTrackedCalloc(&nvEvoGlobal.debugMemoryAllocationList, + nmemb, size, line, file); +} + +void *nvDebugRealloc(void *ptr, size_t size, int line, const char *file) +{ + return nvMemoryTrackerTrackedRealloc(&nvEvoGlobal.debugMemoryAllocationList, + ptr, size, line, file); +} + +void nvDebugFree(void *ptr) +{ + nvMemoryTrackerTrackedFree(ptr); +} + +char *nvDebugStrDup(const char *str, int line, const char *file) +{ + size_t size = nvkms_strlen(str); + char *newStr = nvDebugAlloc(size + 1, line, file); + + if (newStr == NULL) { + return NULL; + } + + nvkms_memcpy(newStr, str, size); + newStr[size] = '\0'; + + return newStr; +} + +void nvReportUnfreedAllocations(void) +{ + nvMemoryTrackerPrintUnfreedAllocations( + &nvEvoGlobal.debugMemoryAllocationList); +} + +void nvMemoryTrackerPrintf(const char *format, ...) +{ + va_list ap; + va_start(ap, format); + nvVEvoLog(EVO_LOG_WARN, NV_INVALID_GPU_LOG_INDEX, format, ap); + va_end(ap); +} + +void *nvMemoryTrackerAlloc(size_t size) +{ + return nvkms_alloc(size, FALSE); +} + +void nvMemoryTrackerFree(void *ptr, size_t size) +{ + nvkms_free(ptr, size); +} + +void nvMemoryTrackerMemset(void *s, int c, size_t n) +{ + nvkms_memset(s, c, n); +} + +void nvMemoryTrackerMemcpy(void *dest, const void *src, size_t n) +{ + nvkms_memcpy(dest, src, n); +} + +#endif /* DEBUG */ + +/* + * The C++ displayPort library source code introduces a reference to + * __cxa_pure_virtual. This should never actually get called, so + * simply assert. + */ +void __cxa_pure_virtual(void); + +void __cxa_pure_virtual(void) +{ + nvAssert(!"Pure virtual function called"); +} + +/* Import functions required by unix_rm_handle */ + +#if defined(DEBUG) + +void nvUnixRmHandleDebugAssert(const char *expString, + const char *filenameString, + const char *funcString, + const unsigned lineNumber) +{ + nvDebugAssert(expString, filenameString, funcString, lineNumber); +} + +void nvUnixRmHandleLogMsg(NvU32 level, const char *fmt, ...) +{ + + va_list ap; + va_start(ap, fmt); + + /* skip verbose messages */ + if (level < NV_UNIX_RM_HANDLE_DEBUG_VERBOSE) { + nvVEvoLog(EVO_LOG_WARN, NV_INVALID_GPU_LOG_INDEX, fmt, ap); + } + + va_end(ap); +} + +#endif /* DEBUG */ + +void *nvUnixRmHandleReallocMem(void *oldPtr, NvLength newSize) +{ + return nvRealloc(oldPtr, newSize); +} + +void nvUnixRmHandleFreeMem(void *ptr) +{ + nvFree(ptr); +} + +/* Import functions required by nv_assert */ + +#if defined(DEBUG) + +void nvDebugAssert(const char *expString, const char *filenameString, + const char *funcString, const unsigned int lineNumber) +{ + nvEvoLog(EVO_LOG_WARN, "NVKMS Assert @%s:%d:%s(): '%s'", + filenameString, lineNumber, funcString, expString); +} + +#endif /* DEBUG */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-vrr.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-vrr.c new file mode 100644 index 0000000..72b12aa --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-vrr.c @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-dma.h" +#include "nvkms-evo.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-vrr.h" +#include "dp/nvdp-connector-event-sink.h" +#include "nvkms-hdmi.h" +#include "nvkms-dpy.h" + +#include + +/*! + * Allocate the VRR semaphore surface. + * + * Only one array of VRR semaphores is needed per "head group", which for our + * purposes means a pDevEvo. This array is allocated when the device is + * initialized and kept around for the lifetime of the pDevEvo. + */ +void nvAllocVrrEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 handle; + NvU64 size = NVKMS_VRR_SEMAPHORE_SURFACE_SIZE; + + /* On GPUs that support the HEAD_SET_DISPLAY_RATE method (nvdisplay), we + * don't need a VRR semaphore surface. */ + if (pDevEvo->hal->caps.supportsDisplayRate) { + return; + } + + handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (nvRmAllocSysmem(pDevEvo, handle, NULL, &pDevEvo->vrr.pSemaphores, + size, NVKMS_MEMORY_NISO)) { + pDevEvo->vrr.semaphoreHandle = handle; + } else { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate G-SYNC semaphore memory"); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + } +} + +void nvFreeVrrEvo(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->vrr.semaphoreHandle != 0) { + if (pDevEvo->vrr.pSemaphores != NULL) { + nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->vrr.semaphoreHandle, + pDevEvo->vrr.pSemaphores, + 0); + pDevEvo->vrr.pSemaphores = NULL; + } + nvRmApiFree(nvEvoGlobal.clientHandle, pDevEvo->deviceHandle, + pDevEvo->vrr.semaphoreHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->vrr.semaphoreHandle); + pDevEvo->vrr.semaphoreHandle = 0; + } +} + +NvBool nvExportVrrSemaphoreSurface(const NVDevEvoRec *pDevEvo, int fd) +{ + // Export the memory as an FD. + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS exportParams = { }; + const NvU32 hMemory = pDevEvo->vrr.semaphoreHandle; + NvU32 status; + + if (hMemory == 0) { + return FALSE; + } + + exportParams.fd = fd; + exportParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; + exportParams.object.data.rmObject.hDevice = pDevEvo->deviceHandle; + exportParams.object.data.rmObject.hObject = hMemory; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD, + &exportParams, sizeof(exportParams)); + + return status == NVOS_STATUS_SUCCESS; +} + +NvBool nvDispSupportsVrr( + const NVDispEvoRec *pDispEvo) +{ + return FALSE; +} + +void nvDisableVrr(NVDevEvoPtr pDevEvo) +{ + return; +} + +void nvGetDpyMinRefreshRateValidValues( + const NVHwModeTimingsEvo *pTimings, + const enum NvKmsDpyVRRType vrrType, + const NvU32 edidTimeoutMicroseconds, + NvU32 *minMinRefreshRate, + NvU32 *maxMinRefreshRate) +{ + return; +} + +void nvEnableVrr( + NVDevEvoPtr pDevEvo, + const struct NvKmsSetModeRequest *pRequest) +{ + return; +} + +void nvSetVrrActive( + NVDevEvoPtr pDevEvo, + NvBool active) +{ + return; +} + +void nvApplyVrrBaseFlipOverrides( + const NVDispEvoRec *pDispEvo, + NvU32 head, + const NVFlipChannelEvoHwState *pOld, + NVFlipChannelEvoHwState *pNew) +{ + return; +} + +void nvCancelVrrFrameReleaseTimers( + NVDevEvoPtr pDevEvo) +{ + return; +} + +void nvSetNextVrrFlipTypeAndIndex( + NVDevEvoPtr pDevEvo, + struct NvKmsFlipReply *reply) +{ + return; +} + +void nvTriggerVrrUnstallMoveCursor( + NVDispEvoPtr pDispEvo) +{ + return; +} + +void nvTriggerVrrUnstallSetCursorImage( + NVDispEvoPtr pDispEvo, + NvBool ctxDmaChanged) +{ + return; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms.c new file mode 100644 index 0000000..1ef4182 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms.c @@ -0,0 +1,5036 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms.h" +#include "nvkms-private.h" +#include "nvkms-api.h" + +#include "nvkms-types.h" +#include "nvkms-utils.h" +#include "nvkms-console-restore.h" +#include "nvkms-dpy.h" +#include "nvkms-dma.h" +#include "nvkms-evo.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-modepool.h" +#include "nvkms-modeset.h" +#include "nvkms-attributes.h" +#include "nvkms-framelock.h" +#include "nvkms-surface.h" +#include "nvkms-3dvision.h" +#include "nvkms-ioctl.h" +#include "nvkms-cursor.h" /* nvSetCursorImage, nvEvoMoveCursor */ +#include "nvkms-flip.h" /* nvFlipEvo */ +#include "nvkms-vrr.h" + +#include "dp/nvdp-connector.h" + +#include "nvUnixVersion.h" /* NV_VERSION_STRING */ +#include /* NV01_NULL_OBJECT/NV01_ROOT */ + +#include "nv_list.h" + + +/*! \file + * + * This source file implements the API of NVKMS, built around open, + * close, and ioctl file operations. + * + * An NvKmsPerOpen is stored "per-open"; all API handles are specific + * to a per-open instance. The NvKmsPerOpen is allocated during each + * nvKmsOpen() call, and freed during the corresponding nvKmsClose() + * call. + * + * An NvKmsPerOpenDev stores the API handles for the device and all + * the disps and connectors on the device. It is allocated during + * nvKmsIoctl(ALLOC_DEVICE), and freed during nvKmsIoctl(FREE_DEVICE). + */ + + +/* + * When the NVKMS device file is opened, the per-open structure could + * be used for one of several actions, denoted by its "type". The + * per-open type starts as Undefined. The per-open's first use + * defines its type. Once the type transitions from Undefined to + * anything, it can never transition to any other type. + */ +enum NvKmsPerOpenType { + /* + * The per-open is used for making ioctl calls to make requests of + * NVKMS. + */ + NvKmsPerOpenTypeIoctl, + + /* + * The per-open is used for granting access to a NVKMS registered + * surface. + */ + NvKmsPerOpenTypeGrantSurface, + + /* + * The per-open is used for granting permissions. + */ + NvKmsPerOpenTypeGrantPermissions, + + /* + * The per-open is used for granting access to a swap group + */ + NvKmsPerOpenTypeGrantSwapGroup, + + /* + * The per-open is used to unicast a specific event. + */ + NvKmsPerOpenTypeUnicastEvent, + + /* + * The per-open is currently undefined (this is the initial + * state). + */ + NvKmsPerOpenTypeUndefined, +}; + +enum NvKmsUnicastEventType { + /* Used by: + * NVKMS_IOCTL_JOIN_SWAP_GROUP */ + NvKmsUnicastEventTypeDeferredRequest, + + /* Undefined, this indicates the unicast fd is available for use. */ + NvKmsUnicastEventTypeUndefined, +}; + +struct NvKmsPerOpenConnector { + NVConnectorEvoPtr pConnectorEvo; + NvKmsConnectorHandle nvKmsApiHandle; +}; + +struct NvKmsPerOpenFrameLock { + NVFrameLockEvoPtr pFrameLockEvo; + int refCnt; + NvKmsFrameLockHandle nvKmsApiHandle; +}; + +struct NvKmsPerOpenDisp { + NVDispEvoPtr pDispEvo; + NvKmsDispHandle nvKmsApiHandle; + NvKmsFrameLockHandle frameLockHandle; + NVEvoApiHandlesRec connectorHandles; + struct NvKmsPerOpenConnector connector[NVKMS_MAX_CONNECTORS_PER_DISP]; + NVEvoApiHandlesRec vblankSyncObjectHandles[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsPerOpenDev { + NVDevEvoPtr pDevEvo; + NvKmsDeviceHandle nvKmsApiHandle; + NVEvoApiHandlesRec dispHandles; + NVEvoApiHandlesRec surfaceHandles; + struct NvKmsFlipPermissions flipPermissions; + struct NvKmsModesetPermissions modesetPermissions; + struct NvKmsPerOpenDisp disp[NVKMS_MAX_SUBDEVICES]; + NvBool isPrivileged; + NVEvoApiHandlesRec deferredRequestFifoHandles; +}; + +struct NvKmsPerOpenEventListEntry { + NVListRec eventListEntry; + struct NvKmsEvent event; +}; + +struct NvKmsPerOpen { + nvkms_per_open_handle_t *pOpenKernel; + NvU32 pid; + enum NvKmsClientType clientType; + NVListRec perOpenListEntry; + NVListRec perOpenIoctlListEntry; + enum NvKmsPerOpenType type; + + union { + struct { + NVListRec eventList; + NvU32 eventInterestMask; + NVEvoApiHandlesRec devHandles; + NVEvoApiHandlesRec frameLockHandles; + } ioctl; + + struct { + NVSurfaceEvoPtr pSurfaceEvo; + } grantSurface; + + struct { + NVDevEvoPtr pDevEvo; + NVSwapGroupPtr pSwapGroup; + } grantSwapGroup; + + struct { + NVDevEvoPtr pDevEvo; + struct NvKmsPermissions permissions; + } grantPermissions; + + struct { + /* + * A unicast event NvKmsPerOpen is assigned to an object, so that + * that object can generate events on the unicast event. Store a + * pointer to that object, so that we can clear the pointer when the + * unicast event NvKmsPerOpen is closed. + */ + enum NvKmsUnicastEventType type; + union { + struct { + NVDeferredRequestFifoPtr pDeferredRequestFifo; + } deferred; + } e; + } unicastEvent; + }; +}; + +static void AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo); +static void FreeSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo); + +static NVListRec perOpenList = NV_LIST_INIT(&perOpenList); +static NVListRec perOpenIoctlList = NV_LIST_INIT(&perOpenIoctlList); + +/*! + * Check if there is an NvKmsPerOpenDev on this NvKmsPerOpen that has + * the specified deviceId. + */ +static NvBool DeviceIdAlreadyPresent(struct NvKmsPerOpen *pOpen, NvU32 deviceId) +{ + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + if (pOpenDev->pDevEvo->usesTegraDevice && + (deviceId == NVKMS_DEVICE_ID_TEGRA)) { + return TRUE; + } else if (pOpenDev->pDevEvo->deviceId == deviceId) { + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Get the NvKmsPerOpenDev described by NvKmsPerOpen + deviceHandle. + */ +static struct NvKmsPerOpenDev *GetPerOpenDev( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle) +{ + if (pOpen == NULL) { + return NULL; + } + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.devHandles, deviceHandle); +} + + +/*! + * Get the NvKmsPerOpenDev and NvKmsPerOpenDisp described by + * NvKmsPerOpen + deviceHandle + dispHandle. + */ +static NvBool GetPerOpenDevAndDisp( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + struct NvKmsPerOpenDev **ppOpenDev, + struct NvKmsPerOpenDisp **ppOpenDisp) +{ + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDev = GetPerOpenDev(pOpen, deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pOpenDisp = nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, + dispHandle); + + if (pOpenDisp == NULL) { + return FALSE; + } + + *ppOpenDev = pOpenDev; + *ppOpenDisp = pOpenDisp; + + return TRUE; +} + + +/*! + * Get the NvKmsPerOpenDisp described by NvKmsPerOpen + deviceHandle + + * dispHandle. + */ +static struct NvKmsPerOpenDisp *GetPerOpenDisp( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle) +{ + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, deviceHandle); + + if (pOpenDev == NULL) { + return NULL; + } + + return nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, dispHandle); +} + + +/*! + * Get the NvKmsPerOpenConnector described by NvKmsPerOpen + + * deviceHandle + dispHandle + connectorHandle. + */ +static struct NvKmsPerOpenConnector *GetPerOpenConnector( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + const NvKmsConnectorHandle connectorHandle) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); + + if (pOpenDisp == NULL) { + return NULL; + } + + return nvEvoGetPointerFromApiHandle(&pOpenDisp->connectorHandles, + connectorHandle); +} + + +/*! + * Get the NVDpyEvoRec described by NvKmsPerOpen + deviceHandle + + * dispHandle + dpyId. + */ +static NVDpyEvoRec *GetPerOpenDpy( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + const NVDpyId dpyId) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); + + if (pOpenDisp == NULL) { + return NULL; + } + + return nvGetDpyEvoFromDispEvo(pOpenDisp->pDispEvo, dpyId); +} + + +/*! + * Get the NvKmsPerOpenFrameLock described by pOpen + frameLockHandle. + */ +static struct NvKmsPerOpenFrameLock *GetPerOpenFrameLock( + const struct NvKmsPerOpen *pOpen, + NvKmsFrameLockHandle frameLockHandle) +{ + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, + frameLockHandle); +} + + +/*! + * Free the NvKmsPerOpenFrameLock associated with this NvKmsPerOpenDisp. + * + * Multiple disps can be assigned to the same framelock object, so + * NvKmsPerOpenFrameLock is reference counted: the object is freed + * once all NvKmsPerOpenDisps remove their reference to it. + * + * \param[in,out] pOpen The per-open data, to which the + * NvKmsPerOpenFrameLock is assigned. + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp whose corresponding + * NvKmsPerOpenFrameLock should be freed. + */ +static void FreePerOpenFrameLock(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDisp *pOpenDisp) +{ + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + pOpenFrameLock = + nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, + pOpenDisp->frameLockHandle); + if (pOpenFrameLock == NULL) { + return; + } + + pOpenDisp->frameLockHandle = 0; + + pOpenFrameLock->refCnt--; + + if (pOpenFrameLock->refCnt != 0) { + return; + } + + nvEvoDestroyApiHandle(&pOpen->ioctl.frameLockHandles, + pOpenFrameLock->nvKmsApiHandle); + nvFree(pOpenFrameLock); +} + + +/*! + * Allocate and initialize an NvKmsPerOpenFrameLock. + * + * If the disp described by the specified NvKmsPerOpenDisp has a + * framelock object, allocate an NvKmsPerOpenFrameLock for it. + * + * Multiple disps can be assigned to the same framelock object, so + * NvKmsPerOpenFrameLock is reference counted: we first look to see if + * an NvKmsPerOpenFrameLock for this disp's framelock object already + * exists. If so, we increment its reference count. Otherwise, we + * allocate a new NvKmsPerOpenFrameLock. + * + * \param[in,out] pOpen The per-open data, to which the + * new NvKmsPerOpenFrameLock should be assigned. + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp whose corresponding + * NvKmsPerOpenFrameLock should be allocated. + */ +static NvBool AllocPerOpenFrameLock( + struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDisp *pOpenDisp) +{ + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + NVDispEvoPtr pDispEvo = pOpenDisp->pDispEvo; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvKmsGenericHandle handle; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pFrameLockEvo == NULL) { + return TRUE; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, + pOpenFrameLock, handle) { + if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { + goto done; + } + } + + pOpenFrameLock = nvCalloc(1, sizeof(*pOpenFrameLock)); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pOpenFrameLock->pFrameLockEvo = pFrameLockEvo; + pOpenFrameLock->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpen->ioctl.frameLockHandles, pOpenFrameLock); + + if (pOpenFrameLock->nvKmsApiHandle == 0) { + nvFree(pOpenFrameLock); + return FALSE; + } + +done: + pOpenDisp->frameLockHandle = pOpenFrameLock->nvKmsApiHandle; + pOpenFrameLock->refCnt++; + return TRUE; +} + + +/*! + * Get the NvKmsConnectorHandle that corresponds to the given + * NVConnectorEvoRec on the NvKmsPerOpen + deviceHandle + dispHandle. + */ +static NvKmsConnectorHandle ConnectorEvoToConnectorHandle( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + const NVConnectorEvoRec *pConnectorEvo) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + struct NvKmsPerOpenConnector *pOpenConnector; + NvKmsGenericHandle connector; + + pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); + + if (pOpenDisp == NULL) { + return 0; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, + pOpenConnector, connector) { + if (pOpenConnector->pConnectorEvo == pConnectorEvo) { + return pOpenConnector->nvKmsApiHandle; + } + } + + return 0; +} + + +/*! + * Get the NvKmsDeviceHandle and NvKmsDispHandle that corresponds to + * the given NVDispEvoRec on the NvKmsPerOpen. + */ +static NvBool DispEvoToDevAndDispHandles( + const struct NvKmsPerOpen *pOpen, + const NVDispEvoRec *pDispEvo, + NvKmsDeviceHandle *pDeviceHandle, + NvKmsDispHandle *pDispHandle) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; + + if (pOpenDev->pDevEvo != pDevEvo) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, + pOpenDisp, disp) { + if (pOpenDisp->pDispEvo != pDispEvo) { + continue; + } + + *pDeviceHandle = pOpenDev->nvKmsApiHandle; + *pDispHandle = pOpenDisp->nvKmsApiHandle; + + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Get the NvKmsPerOpenDev that corresponds to the given NVDevEvoRec + * on the NvKmsPerOpen. + */ +static struct NvKmsPerOpenDev *DevEvoToOpenDev( + const struct NvKmsPerOpen *pOpen, + const NVDevEvoRec *pDevEvo) +{ + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + if (pOpenDev->pDevEvo == pDevEvo) { + return pOpenDev; + } + } + + return NULL; +} + + +/*! + * Get the NvKmsFrameLockHandle that corresponds to the given + * NVFrameLockEvoRec on the NvKmsPerOpen. + */ +static NvBool FrameLockEvoToFrameLockHandle( + const struct NvKmsPerOpen *pOpen, + const NVFrameLockEvoRec *pFrameLockEvo, + NvKmsFrameLockHandle *pFrameLockHandle) +{ + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + NvKmsGenericHandle handle; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, + pOpenFrameLock, handle) { + + if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { + *pFrameLockHandle = pOpenFrameLock->nvKmsApiHandle; + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Clear the specified NvKmsPerOpenConnector. + * + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to which the + * NvKmsPerOpenConnector is assigned. + * \param[in,out] pOpenConnector The NvKmsPerOpenConnector to be cleared. + */ +static void ClearPerOpenConnector( + struct NvKmsPerOpenDisp *pOpenDisp, + struct NvKmsPerOpenConnector *pOpenConnector) +{ + nvEvoDestroyApiHandle(&pOpenDisp->connectorHandles, + pOpenConnector->nvKmsApiHandle); + nvkms_memset(pOpenConnector, 0, sizeof(*pOpenConnector)); +} + + +/*! + * Initialize an NvKmsPerOpenConnector. + * + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to which the + * NvKmsPerOpenConnector is assigned. + * \param[in,out] pOpenConnector The NvKmsPerOpenConnector to initialize. + * \param[in] pConnectorEvo The connector that the NvKmsPerOpenConnector + * corresponds to. + * + * \return If the NvKmsPerOpenConnector is successfully initialized, + * return TRUE. Otherwise, return FALSE. + */ +static NvBool InitPerOpenConnector( + struct NvKmsPerOpenDisp *pOpenDisp, + struct NvKmsPerOpenConnector *pOpenConnector, + NVConnectorEvoPtr pConnectorEvo) +{ + pOpenConnector->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpenDisp->connectorHandles, pOpenConnector); + + if (pOpenConnector->nvKmsApiHandle == 0) { + goto fail; + } + + pOpenConnector->pConnectorEvo = pConnectorEvo; + + return TRUE; + +fail: + ClearPerOpenConnector(pOpenDisp, pOpenConnector); + return FALSE; +} + +/*! + * Clear the specified NvKmsPerOpenDisp. + * + * \param[in,out] pOpenDev The NvKmsPerOpenDev to which the NvKmsPerOpenDisp + * is assigned. + * \param[in,out] pDispEvo The NvKmsPerOpenDisp to be cleared. + */ +static void ClearPerOpenDisp( + struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsPerOpenDisp *pOpenDisp) +{ + struct NvKmsPerOpenConnector *pOpenConnector; + NvKmsGenericHandle connector; + + FreePerOpenFrameLock(pOpen, pOpenDisp); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, + pOpenConnector, connector) { + ClearPerOpenConnector(pOpenDisp, pOpenConnector); + } + + /* Destroy the API handle structures. */ + nvEvoDestroyApiHandles(&pOpenDisp->connectorHandles); + + for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { + nvEvoDestroyApiHandles(&pOpenDisp->vblankSyncObjectHandles[i]); + } + + nvEvoDestroyApiHandle(&pOpenDev->dispHandles, pOpenDisp->nvKmsApiHandle); + + nvkms_memset(pOpenDisp, 0, sizeof(*pOpenDisp)); +} + + +/*! + * Initialize an NvKmsPerOpenDisp. + * + * \param[in,out] pOpenDev The NvKmsPerOpenDev to which the NvKmsPerOpenDisp + * is assigned. + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to initialize. + * \param[in] pDispEvo The disp that the NvKmsPerOpenDisp corresponds to. + * + * \return If the NvKmsPerOpenDisp is successfully initialized, return TRUE. + * Otherwise, return FALSE. + */ +static NvBool InitPerOpenDisp( + struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsPerOpenDisp *pOpenDisp, + NVDispEvoPtr pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo; + NvU32 connector; + + pOpenDisp->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpenDev->dispHandles, pOpenDisp); + + if (pOpenDisp->nvKmsApiHandle == 0) { + goto fail; + } + + pOpenDisp->pDispEvo = pDispEvo; + + if (nvListCount(&pDispEvo->connectorList) >= + ARRAY_LEN(pOpenDisp->connector)) { + nvAssert(!"More connectors on this disp than NVKMS can handle."); + goto fail; + } + + if (!nvEvoInitApiHandles(&pOpenDisp->connectorHandles, + ARRAY_LEN(pOpenDisp->connector))) { + goto fail; + } + + connector = 0; + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!InitPerOpenConnector(pOpenDisp, &pOpenDisp->connector[connector], + pConnectorEvo)) { + goto fail; + } + connector++; + } + + /* Initialize the vblankSyncObjectHandles for each head. */ + for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { + if (!nvEvoInitApiHandles(&pOpenDisp->vblankSyncObjectHandles[i], + NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { + goto fail; + } + } + + if (!AllocPerOpenFrameLock(pOpen, pOpenDisp)) { + goto fail; + } + + return TRUE; + +fail: + ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); + return FALSE; +} + +/*! + * Check that the NvKmsPermissions make sense. + */ +static NvBool ValidateNvKmsPermissions( + const NVDevEvoRec *pDevEvo, + const struct NvKmsPermissions *pPermissions) +{ + if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { + + NvU8 layerMask = pPermissions->flip.disp[d].head[h].layerMask; + + if (layerMask == 0) { + continue; + } + + if (nvHasBitAboveMax(layerMask, pDevEvo->head[h].numLayers)) { + return FALSE; + } + + /* + * If the above blocks didn't 'continue', then there + * are permissions specified for this disp+head. Is + * the specified disp+head in range for the current + * configuration? + */ + if (d >= pDevEvo->nDispEvo) { + return FALSE; + } + + if (h >= pDevEvo->numHeads) { + return FALSE; + } + } + } + } else if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_MODESET) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { + + NVDpyIdList dpyIdList = + pPermissions->modeset.disp[d].head[h].dpyIdList; + + if (nvDpyIdListIsEmpty(dpyIdList)) { + continue; + } + + /* + * If the above blocks didn't 'continue', then there + * are permissions specified for this disp+head. Is + * the specified disp+head in range for the current + * configuration? + */ + if (d >= pDevEvo->nDispEvo) { + return FALSE; + } + + if (h >= pDevEvo->numHeads) { + return FALSE; + } + } + } + } else { + return FALSE; + } + + return TRUE; +} + +/*! + * Assign pPermissions with the maximum permissions possible for + * the pDevEvo. + */ +static void AssignFullNvKmsFlipPermissions( + const NVDevEvoRec *pDevEvo, + struct NvKmsFlipPermissions *pPermissions) +{ + NvU32 dispIndex, head; + + nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); + + for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { + for (head = 0; head < pDevEvo->numHeads; head++) { + pPermissions->disp[dispIndex].head[head].layerMask = + NVBIT(pDevEvo->head[head].numLayers) - 1; + } + } +} + +static void AssignFullNvKmsModesetPermissions( + const NVDevEvoRec *pDevEvo, + struct NvKmsModesetPermissions *pPermissions) +{ + NvU32 dispIndex, head; + + nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); + + for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { + for (head = 0; head < pDevEvo->numHeads; head++) { + pPermissions->disp[dispIndex].head[head].dpyIdList = + nvAllDpyIdList(); + } + } +} + +/*! + * Set the modeset owner to pOpenDev + * + * \param pOpenDev The per-open device structure for the new modeset owner. + * \return FALSE if there was already a modeset owner. TRUE otherwise. + */ +static NvBool GrabModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) +{ + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; + + if (pDevEvo->modesetOwner == pOpenDev) { + return TRUE; + } + + if (pDevEvo->modesetOwner != NULL) { + return FALSE; + } + + /* + * If claiming modeset ownership, undo any SST forcing imposed by + * console restore. + */ + if (pOpenDev != pDevEvo->pNvKmsOpenDev) { + nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); + } + + pDevEvo->modesetOwner = pOpenDev; + + AssignFullNvKmsFlipPermissions(pDevEvo, &pOpenDev->flipPermissions); + AssignFullNvKmsModesetPermissions(pDevEvo, &pOpenDev->modesetPermissions); + + pDevEvo->modesetOwnerChanged = TRUE; + + return TRUE; +} + + +/*! + * Clear permissions on the specified device for all NvKmsPerOpens. + * + * For NvKmsPerOpen::type==Ioctl, clear the permissions, except for the + * specified pOpenDevExclude. + * + * For NvKmsPerOpen::type==GrantPermissions, clear + * NvKmsPerOpen::grantPermissions and reset NvKmsPerOpen::type to + * Undefined. + */ +static void RevokePermissionsInternal( + const NvU32 typeBitmask, + const NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDevExclude) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { + + if ((pOpen->type == NvKmsPerOpenTypeGrantPermissions) && + (pOpen->grantPermissions.pDevEvo == pDevEvo) && + (typeBitmask & NVBIT(pOpen->grantPermissions.permissions.type))) { + nvkms_memset(&pOpen->grantPermissions, 0, + sizeof(pOpen->grantPermissions)); + pOpen->type = NvKmsPerOpenTypeUndefined; + } + + if (pOpen->type == NvKmsPerOpenTypeIoctl) { + + struct NvKmsPerOpenDev *pOpenDev = + DevEvoToOpenDev(pOpen, pDevEvo); + + if (pOpenDev == NULL) { + continue; + } + + if (pOpenDev == pOpenDevExclude || pOpenDev->isPrivileged) { + continue; + } + + if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING)) { + nvkms_memset(&pOpenDev->flipPermissions, 0, + sizeof(pOpenDev->flipPermissions)); + } + + if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET)) { + nvkms_memset(&pOpenDev->modesetPermissions, 0, + sizeof(pOpenDev->modesetPermissions)); + } + } + } +} + +static void ReallocCoreChannel(NVDevEvoRec *pDevEvo) +{ + if (nvAllocCoreChannelEvo(pDevEvo)) { + nvDPSetAllowMultiStreaming(pDevEvo, FALSE /* allowMST */); + AllocSurfaceCtxDmasForAllOpens(pDevEvo); + } +} + +static void RestoreConsole(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->isSOCDisplay) + return; + + pDevEvo->modesetOwnerChanged = TRUE; + + // Try to issue a modeset and flip to the framebuffer console surface. + if (!nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */)) { + // If that didn't work, free the core channel to trigger RM's console + // restore code. + FreeSurfaceCtxDmasForAllOpens(pDevEvo); + nvFreeCoreChannelEvo(pDevEvo); + + // Reallocate the core channel right after freeing it. This makes sure + // that it's allocated and ready right away if another NVKMS client is + // started. + ReallocCoreChannel(pDevEvo); + } +} + +/*! + * Release modeset ownership previously set by GrabModesetOwnership + * + * \param pOpenDev The per-open device structure relinquishing modeset + * ownership. + * \return FALSE if pOpenDev is not the modeset owner, TRUE otherwise. + */ +static NvBool ReleaseModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) +{ + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; + + if (pDevEvo->modesetOwner != pOpenDev) { + // Only the current owner can release ownership. + return FALSE; + } + + pDevEvo->modesetOwner = NULL; + pDevEvo->handleConsoleHotplugs = TRUE; + + RestoreConsole(pDevEvo); + RevokePermissionsInternal(NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) | + NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET), + pDevEvo, NULL /* pOpenDevExclude */); + return TRUE; +} + +/*! + * Free the specified NvKmsPerOpenDev. + * + * \param[in,out] pOpen The per-open data, to which the + * NvKmsPerOpenDev is assigned. + * \param[in,out] pOpenDev The NvKmsPerOpenDev to free. + */ +void nvFreePerOpenDev(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pOpenDev == NULL) { + return; + } + + nvEvoDestroyApiHandles(&pOpenDev->surfaceHandles); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, + pOpenDisp, disp) { + ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); + } + + nvEvoDestroyApiHandles(&pOpenDev->dispHandles); + + nvEvoDestroyApiHandle(&pOpen->ioctl.devHandles, pOpenDev->nvKmsApiHandle); + + nvEvoDestroyApiHandles(&pOpenDev->deferredRequestFifoHandles); + + nvFree(pOpenDev); +} + + +/*! + * Allocate and initialize an NvKmsPerOpenDev. + * + * \param[in,out] pOpen The per-open data, to which the + * new NvKmsPerOpenDev should be assigned. + * \param[in] pDevEvo The device to which the new NvKmsPerOpenDev + * corresponds. + * \param[in] isPrivileged The NvKmsPerOpenDev is privileged which can + * do modeset anytime. + * + * \return On success, return a pointer to the new NvKmsPerOpenDev. + * On failure, return NULL. + */ +struct NvKmsPerOpenDev *nvAllocPerOpenDev(struct NvKmsPerOpen *pOpen, + NVDevEvoPtr pDevEvo, NvBool isPrivileged) +{ + struct NvKmsPerOpenDev *pOpenDev = nvCalloc(1, sizeof(*pOpenDev)); + NVDispEvoPtr pDispEvo; + NvU32 disp; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pOpenDev == NULL) { + goto fail; + } + + pOpenDev->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpen->ioctl.devHandles, pOpenDev); + + if (pOpenDev->nvKmsApiHandle == 0) { + goto fail; + } + + pOpenDev->pDevEvo = pDevEvo; + + if (!nvEvoInitApiHandles(&pOpenDev->dispHandles, + ARRAY_LEN(pOpenDev->disp))) { + goto fail; + } + + if (pDevEvo->nDispEvo > ARRAY_LEN(pOpenDev->disp)) { + nvAssert(!"More disps on this device than NVKMS can handle."); + goto fail; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, disp, pDevEvo) { + if (!InitPerOpenDisp(pOpen, pOpenDev, &pOpenDev->disp[disp], pDispEvo)) { + goto fail; + } + } + + if (!nvEvoInitApiHandles(&pOpenDev->surfaceHandles, 32)) { + goto fail; + } + + pOpenDev->isPrivileged = isPrivileged; + if (pOpenDev->isPrivileged) { + AssignFullNvKmsFlipPermissions(pDevEvo, + &pOpenDev->flipPermissions); + AssignFullNvKmsModesetPermissions(pOpenDev->pDevEvo, + &pOpenDev->modesetPermissions); + } + + if (!nvEvoInitApiHandles(&pOpenDev->deferredRequestFifoHandles, 4)) { + goto fail; + } + + return pOpenDev; + +fail: + nvFreePerOpenDev(pOpen, pOpenDev); + return NULL; +} + + +/*! + * Assign NvKmsPerOpen::type. + * + * This succeeds only if NvKmsPerOpen::type is Undefined, or already + * has the requested type and allowRedundantAssignment is TRUE. + */ +static NvBool AssignNvKmsPerOpenType(struct NvKmsPerOpen *pOpen, + enum NvKmsPerOpenType type, + NvBool allowRedundantAssignment) +{ + if ((pOpen->type == type) && allowRedundantAssignment) { + return TRUE; + } + + if (pOpen->type != NvKmsPerOpenTypeUndefined) { + return FALSE; + } + + switch (type) { + case NvKmsPerOpenTypeIoctl: + nvListInit(&pOpen->ioctl.eventList); + + if (!nvEvoInitApiHandles(&pOpen->ioctl.devHandles, NV_MAX_DEVICES)) { + return FALSE; + } + + if (!nvEvoInitApiHandles(&pOpen->ioctl.frameLockHandles, 4)) { + nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles); + return FALSE; + } + + nvListAppend(&pOpen->perOpenIoctlListEntry, &perOpenIoctlList); + break; + + case NvKmsPerOpenTypeGrantSurface: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeGrantSwapGroup: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeGrantPermissions: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeUnicastEvent: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeUndefined: + nvAssert(!"unexpected NvKmsPerOpenType"); + break; + } + + pOpen->type = type; + return TRUE; +} + +/*! + * Allocate the specified device. + */ +static NvBool AllocDevice(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsAllocDeviceParams *pParams = pParamsVoid; + NVDevEvoPtr pDevEvo; + struct NvKmsPerOpenDev *pOpenDev; + NvU32 disp, head; + NvU8 layer; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + if (nvkms_strcmp(pParams->request.versionString, NV_VERSION_STRING) != 0) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_VERSION_MISMATCH; + return FALSE; + } + + /* + * It is an error to call NVKMS_IOCTL_ALLOC_DEVICE multiple times + * on the same device with the same fd. + */ + if (DeviceIdAlreadyPresent(pOpen, pParams->request.deviceId)) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; + return FALSE; + } + + pDevEvo = nvFindDevEvoByDeviceId(pParams->request.deviceId); + + if (pDevEvo == NULL) { + pDevEvo = nvAllocDevEvo(&pParams->request, &pParams->reply.status); + if (pDevEvo == NULL) { + return FALSE; + } + } else { + if (!pParams->request.tryInferSliMosaicFromExistingDevice && + (pDevEvo->sli.mosaic != pParams->request.sliMosaic)) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; + return FALSE; + } + + if (pDevEvo->usesTegraDevice && + (pParams->request.deviceId != NVKMS_DEVICE_ID_TEGRA)) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; + return FALSE; + } + pDevEvo->allocRefCnt++; + } + + pOpenDev = nvAllocPerOpenDev(pOpen, pDevEvo, FALSE /* isPrivileged */); + + if (pOpenDev == NULL) { + nvFreeDevEvo(pDevEvo); + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + return FALSE; + } + + /* Beyond this point, the function cannot fail. */ + + if (pParams->request.enableConsoleHotplugHandling) { + pDevEvo->handleConsoleHotplugs = TRUE; + } + + pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; + pParams->reply.subDeviceMask = + NV_TWO_N_MINUS_ONE(pDevEvo->numSubDevices); + pParams->reply.numHeads = pDevEvo->numHeads; + pParams->reply.numDisps = pDevEvo->nDispEvo; + + ct_assert(ARRAY_LEN(pParams->reply.dispHandles) == + ARRAY_LEN(pOpenDev->disp)); + + for (disp = 0; disp < ARRAY_LEN(pParams->reply.dispHandles); disp++) { + pParams->reply.dispHandles[disp] = pOpenDev->disp[disp].nvKmsApiHandle; + } + + pParams->reply.inputLutAppliesToBase = pDevEvo->caps.inputLutAppliesToBase; + + ct_assert(ARRAY_LEN(pParams->reply.layerCaps) == + ARRAY_LEN(pDevEvo->caps.layerCaps)); + + for (head = 0; head < pDevEvo->numHeads; head++) { + pParams->reply.numLayers[head] = pDevEvo->head[head].numLayers; + } + + for (layer = 0; + layer < ARRAY_LEN(pParams->reply.layerCaps); + layer++) { + pParams->reply.layerCaps[layer] = pDevEvo->caps.layerCaps[layer]; + } + + pParams->reply.surfaceAlignment = NV_EVO_SURFACE_ALIGNMENT; + pParams->reply.requiresVrrSemaphores = !pDevEvo->hal->caps.supportsDisplayRate; + + pParams->reply.nIsoSurfacesInVidmemOnly = + !!NV5070_CTRL_SYSTEM_GET_CAP(pDevEvo->capsBits, + NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY); + + pParams->reply.requiresAllAllocationsInSysmem = + pDevEvo->requiresAllAllocationsInSysmem; + pParams->reply.supportsHeadSurface = pDevEvo->isHeadSurfaceSupported; + + pParams->reply.validNIsoFormatMask = pDevEvo->caps.validNIsoFormatMask; + + pParams->reply.maxWidthInBytes = pDevEvo->caps.maxWidthInBytes; + pParams->reply.maxWidthInPixels = pDevEvo->caps.maxWidthInPixels; + pParams->reply.maxHeightInPixels = pDevEvo->caps.maxHeight; + pParams->reply.cursorCompositionCaps = pDevEvo->caps.cursorCompositionCaps; + pParams->reply.genericPageKind = pDevEvo->caps.genericPageKind; + + pParams->reply.maxCursorSize = pDevEvo->cursorHal->caps.maxSize; + + pParams->reply.validLayerRRTransforms = pDevEvo->caps.validLayerRRTransforms; + + pParams->reply.isoIOCoherencyModes = pDevEvo->isoIOCoherencyModes; + pParams->reply.nisoIOCoherencyModes = pDevEvo->nisoIOCoherencyModes; + + /* + * TODO: Replace the isSOCDisplay check with a RM query. Bug 3689635. + */ + pParams->reply.displayIsGpuL2Coherent = !pDevEvo->isSOCDisplay; + + pParams->reply.supportsSyncpts = pDevEvo->supportsSyncpts; + + pParams->reply.supportsIndependentAcqRelSemaphore = + pDevEvo->hal->caps.supportsIndependentAcqRelSemaphore; + + pParams->reply.supportsVblankSyncObjects = + pDevEvo->hal->caps.supportsVblankSyncObjects; + + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + + return TRUE; +} + +static void UnregisterDeferredRequestFifos(struct NvKmsPerOpenDev *pOpenDev) +{ + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsGenericHandle handle; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->deferredRequestFifoHandles, + pDeferredRequestFifo, + handle) { + + nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle); + + nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, + pDeferredRequestFifo); + } +} + +/* + * Forward declaration since this function is used by + * DisableRemainingVblankSyncObjects(). + */ +static void DisableAndCleanVblankSyncObject(struct NvKmsPerOpenDisp *pOpenDisp, + NvU32 head, + NVVblankSyncObjectRec *pVblankSyncObject, + NVEvoUpdateState *pUpdateState, + NvKmsVblankSyncObjectHandle handle); + +static void DisableRemainingVblankSyncObjects(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; + NVVblankSyncObjectRec *pVblankSyncObject; + NvKmsVblankSyncObjectHandle handle; + NvU32 head = 0; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pOpenDev == NULL) { + return; + } + + /* For each pOpenDisp: */ + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, + pOpenDisp, disp) { + /* + * A single update state can handle changes across multiple heads on a + * given Disp. + */ + NVEvoUpdateState updateState = { }; + + /* For each head: */ + for (head = 0; head < ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); head++) { + NVEvoApiHandlesRec *pHandles = + &pOpenDisp->vblankSyncObjectHandles[head]; + + /* For each still-active vblank sync object: */ + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles, + pVblankSyncObject, handle) { + DisableAndCleanVblankSyncObject(pOpenDisp, head, + pVblankSyncObject, + &updateState, + handle); + } + } + + if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { + /* + * Instruct hardware to execute the staged commands from the + * ConfigureVblankSyncObject() calls (inherent in + * DisableAndCleanVblankSyncObject()) above. This will set up + * and wait for a notification that the hardware execution + * has completed. + */ + nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState, + TRUE); + } + } +} + +static void FreeDeviceReference(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev) +{ + /* Disable all client-owned vblank sync objects that still exist. */ + DisableRemainingVblankSyncObjects(pOpen, pOpenDev); + + UnregisterDeferredRequestFifos(pOpenDev); + + nvEvoFreeClientSurfaces(pOpenDev->pDevEvo, pOpenDev, + &pOpenDev->surfaceHandles); + + if (!nvFreeDevEvo(pOpenDev->pDevEvo)) { + // If this pOpenDev is the modeset owner, implicitly release it. Does + // nothing if this pOpenDev is not the modeset owner. + // + // If nvFreeDevEvo() freed the device, then it also implicitly released + // ownership. + ReleaseModesetOwnership(pOpenDev); + + nvAssert(pOpenDev->pDevEvo->modesetOwner != pOpenDev); + } + + nvFreePerOpenDev(pOpen, pOpenDev); +} + +/*! + * Free the specified device. + */ +static NvBool FreeDevice(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsFreeDeviceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + FreeDeviceReference(pOpen, pOpenDev); + + return TRUE; +} + + +/*! + * Get the disp data. This information should remain static for the + * lifetime of the disp. + */ +static NvBool QueryDisp(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryDispParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + const NVEvoSubDeviceRec *pSubDevice; + NVDispEvoPtr pDispEvo; + NvU32 connector; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pDispEvo = pOpenDisp->pDispEvo; + + pParams->reply.displayOwner = pDispEvo->displayOwner; + pParams->reply.subDeviceMask = nvDispSubDevMaskEvo(pDispEvo); + // Don't include dynamic displays in validDpys. The data returned here is + // supposed to be static for the lifetime of the pDispEvo. + pParams->reply.validDpys = + nvDpyIdListMinusDpyIdList(pDispEvo->validDisplays, + pDispEvo->dynamicDpyIds); + pParams->reply.bootDpys = pDispEvo->bootDisplays; + pParams->reply.muxDpys = pDispEvo->muxDisplays; + pParams->reply.frameLockHandle = pOpenDisp->frameLockHandle; + pParams->reply.numConnectors = nvListCount(&pDispEvo->connectorList); + + ct_assert(ARRAY_LEN(pParams->reply.connectorHandles) == + ARRAY_LEN(pOpenDisp->connector)); + + for (connector = 0; connector < ARRAY_LEN(pParams->reply.connectorHandles); + connector++) { + pParams->reply.connectorHandles[connector] = + pOpenDisp->connector[connector].nvKmsApiHandle; + } + + pSubDevice = pDispEvo->pDevEvo->pSubDevices[pDispEvo->displayOwner]; + if (pSubDevice != NULL) { + ct_assert(sizeof(pParams->reply.gpuString) >= + sizeof(pSubDevice->gpuString)); + nvkms_memcpy(pParams->reply.gpuString, pSubDevice->gpuString, + sizeof(pSubDevice->gpuString)); + } + + return TRUE; +} + + +/*! + * Get the connector static data. This information should remain static for the + * lifetime of the connector. + */ +static NvBool QueryConnectorStaticData(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryConnectorStaticDataParams *pParams = pParamsVoid; + struct NvKmsPerOpenConnector *pOpenConnector; + NVConnectorEvoPtr pConnectorEvo; + + pOpenConnector = GetPerOpenConnector(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.connectorHandle); + if (pOpenConnector == NULL) { + return FALSE; + } + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pConnectorEvo = pOpenConnector->pConnectorEvo; + + pParams->reply.dpyId = pConnectorEvo->displayId; + pParams->reply.isDP = nvConnectorUsesDPLib(pConnectorEvo) || + nvConnectorIsDPSerializer(pConnectorEvo); + pParams->reply.legacyTypeIndex = pConnectorEvo->legacyTypeIndex; + pParams->reply.type = pConnectorEvo->type; + pParams->reply.typeIndex = pConnectorEvo->typeIndex; + pParams->reply.signalFormat = pConnectorEvo->signalFormat; + pParams->reply.physicalIndex = pConnectorEvo->physicalIndex; + pParams->reply.physicalLocation = pConnectorEvo->physicalLocation; + pParams->reply.headMask = pConnectorEvo->validHeadMask; + + pParams->reply.isLvds = + (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && + (pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM); + + pParams->reply.locationOnChip = (pConnectorEvo->or.location == + NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP); + return TRUE; +} + + +/*! + * Get the connector dynamic data. This information should reflects changes to + * the connector over time (e.g. for DisplayPort MST devices). + */ +static NvBool QueryConnectorDynamicData(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryConnectorDynamicDataParams *pParams = pParamsVoid; + struct NvKmsPerOpenConnector *pOpenConnector; + NVConnectorEvoPtr pConnectorEvo; + NVDispEvoPtr pDispEvo; + NVDpyEvoPtr pDpyEvo; + + pOpenConnector = GetPerOpenConnector(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.connectorHandle); + if (pOpenConnector == NULL) { + return FALSE; + } + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pConnectorEvo = pOpenConnector->pConnectorEvo; + pDispEvo = pConnectorEvo->pDispEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + pParams->reply.detectComplete = pConnectorEvo->detectComplete; + } else { + pParams->reply.detectComplete = TRUE; + } + + // Find the dynamic dpys on this connector. + pParams->reply.dynamicDpyIdList = nvEmptyDpyIdList(); + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->dynamicDpyIds, pDispEvo) { + if (pDpyEvo->pConnectorEvo == pConnectorEvo) { + pParams->reply.dynamicDpyIdList = + nvAddDpyIdToDpyIdList(pDpyEvo->id, + pParams->reply.dynamicDpyIdList); + } + } + + return TRUE; +} + + +/*! + * Get the static data for the specified dpy. This information should + * remain static for the lifetime of the dpy. + */ +static NvBool QueryDpyStaticData(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryDpyStaticDataParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pParams->reply.connectorHandle = + ConnectorEvoToConnectorHandle(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pDpyEvo->pConnectorEvo); + /* + * All pConnectorEvos should have corresponding pOpenConnectors, + * so we should always be able to find the NvKmsConnectorHandle. + */ + nvAssert(pParams->reply.connectorHandle != 0); + + pParams->reply.type = pDpyEvo->pConnectorEvo->legacyType; + + if (pDpyEvo->dp.addressString != NULL) { + const size_t len = nvkms_strlen(pDpyEvo->dp.addressString) + 1; + nvkms_memcpy(pParams->reply.dpAddress, pDpyEvo->dp.addressString, + NV_MIN(sizeof(pParams->reply.dpAddress), len)); + pParams->reply.dpAddress[sizeof(pParams->reply.dpAddress) - 1] = '\0'; + } + + pParams->reply.mobileInternal = pDpyEvo->internal; + pParams->reply.isDpMST = nvDpyEvoIsDPMST(pDpyEvo); + + return TRUE; +} + + +/*! + * Get the dynamic data for the specified dpy. This information can + * change when a hotplug occurs. + */ +static NvBool QueryDpyDynamicData(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryDpyDynamicDataParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvDpyGetDynamicData(pDpyEvo, pParams); +} + +/* Store a copy of the user's infoString pointer, so we can copy out to it when + * we're done. */ +struct InfoStringExtraUserStateCommon +{ + NvU64 userInfoString; +}; + +/* + * Allocate a kernel buffer to populate the infoString which will be copied out + * to userspace upon completion. + */ +static NvBool InfoStringPrepUserCommon( + NvU32 infoStringSize, + NvU64 *ppInfoString, + struct InfoStringExtraUserStateCommon *pExtra) +{ + char *kernelInfoString = NULL; + + if (infoStringSize == 0) { + *ppInfoString = 0; + return TRUE; + } + + if (!nvKmsNvU64AddressIsSafe(*ppInfoString)) { + return FALSE; + } + + if (infoStringSize > NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH) { + return FALSE; + } + + kernelInfoString = nvCalloc(1, infoStringSize); + if (kernelInfoString == NULL) { + return FALSE; + } + + pExtra->userInfoString = *ppInfoString; + *ppInfoString = nvKmsPointerToNvU64(kernelInfoString); + + return TRUE; +} + +/* + * Copy the infoString out to userspace and free the kernel-internal buffer. + */ +static NvBool InfoStringDoneUserCommon( + NvU32 infoStringSize, + NvU64 pInfoString, + NvU32 *infoStringLenWritten, + struct InfoStringExtraUserStateCommon *pExtra) +{ + char *kernelInfoString = nvKmsNvU64ToPointer(pInfoString); + int status; + NvBool ret; + + if ((infoStringSize == 0) || (*infoStringLenWritten == 0)) { + ret = TRUE; + goto done; + } + + nvAssert(*infoStringLenWritten <= infoStringSize); + + status = nvkms_copyout(pExtra->userInfoString, + kernelInfoString, + *infoStringLenWritten); + if (status == 0) { + ret = TRUE; + } else { + ret = FALSE; + *infoStringLenWritten = 0; + } + +done: + nvFree(kernelInfoString); + + return ret; +} + +struct NvKmsValidateModeIndexExtraUserState +{ + struct InfoStringExtraUserStateCommon common; +}; + +static NvBool ValidateModeIndexPrepUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; + struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; + + return InfoStringPrepUserCommon( + pParams->request.infoStringSize, + &pParams->request.pInfoString, + &pExtra->common); +} + +static NvBool ValidateModeIndexDoneUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; + struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; + + return InfoStringDoneUserCommon( + pParams->request.infoStringSize, + pParams->request.pInfoString, + &pParams->reply.infoStringLenWritten, + &pExtra->common); +} + +/*! + * Validate the requested mode. + */ +static NvBool ValidateModeIndex(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + nvValidateModeIndex(pDpyEvo, &pParams->request, &pParams->reply); + + return TRUE; +} + +struct NvKmsValidateModeExtraUserState +{ + struct InfoStringExtraUserStateCommon common; +}; + +static NvBool ValidateModePrepUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsValidateModeParams *pParams = pParamsVoid; + struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; + + return InfoStringPrepUserCommon( + pParams->request.infoStringSize, + &pParams->request.pInfoString, + &pExtra->common); +} + +static NvBool ValidateModeDoneUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsValidateModeParams *pParams = pParamsVoid; + struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; + + return InfoStringDoneUserCommon( + pParams->request.infoStringSize, + pParams->request.pInfoString, + &pParams->reply.infoStringLenWritten, + &pExtra->common); +} + +/*! + * Validate the requested mode. + */ +static NvBool ValidateMode(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsValidateModeParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + nvValidateModeEvo(pDpyEvo, &pParams->request, &pParams->reply); + + return TRUE; +} + +static NvBool +CopyInOneLut(NvU64 pRampsUser, struct NvKmsLutRamps **ppRampsKernel) +{ + struct NvKmsLutRamps *pRampsKernel = NULL; + int status; + + if (pRampsUser == 0) { + return TRUE; + } + + if (!nvKmsNvU64AddressIsSafe(pRampsUser)) { + return FALSE; + } + + pRampsKernel = nvAlloc(sizeof(*pRampsKernel)); + if (!pRampsKernel) { + return FALSE; + } + + status = nvkms_copyin((char *)pRampsKernel, pRampsUser, + sizeof(*pRampsKernel)); + if (status != 0) { + nvFree(pRampsKernel); + return FALSE; + } + + *ppRampsKernel = pRampsKernel; + + return TRUE; +} + +static NvBool +CopyInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) +{ + struct NvKmsLutRamps *pInputRamps = NULL; + struct NvKmsLutRamps *pOutputRamps = NULL; + + if (!CopyInOneLut(pCommonLutParams->input.pRamps, &pInputRamps)) { + goto fail; + } + if (!CopyInOneLut(pCommonLutParams->output.pRamps, &pOutputRamps)) { + goto fail; + } + + pCommonLutParams->input.pRamps = nvKmsPointerToNvU64(pInputRamps); + pCommonLutParams->output.pRamps = nvKmsPointerToNvU64(pOutputRamps); + + return TRUE; + +fail: + nvFree(pInputRamps); + nvFree(pOutputRamps); + return FALSE; +} + +static void +FreeCopiedInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) +{ + struct NvKmsLutRamps *pInputRamps = + nvKmsNvU64ToPointer(pCommonLutParams->input.pRamps); + struct NvKmsLutRamps *pOutputRamps = + nvKmsNvU64ToPointer(pCommonLutParams->output.pRamps); + + nvFree(pInputRamps); + nvFree(pOutputRamps); +} + +/* No extra user state needed for SetMode; although we lose the user pointers + * for the LUT ramps after copying them in, that's okay because we don't need + * to copy them back out again. */ +struct NvKmsSetModeExtraUserState +{ +}; + +/*! + * Copy in any data referenced by pointer for the SetMode request. Currently + * this is only the LUT ramps. + */ +static NvBool SetModePrepUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsSetModeParams *pParams = pParamsVoid; + struct NvKmsSetModeRequest *pReq = &pParams->request; + NvU32 disp, head, dispFailed, headFailed; + + /* Iterate over all of the common LUT ramp pointers embedded in the SetMode + * request, and copy in each one. */ + for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { + for (head = 0; head < ARRAY_LEN(pReq->disp[disp].head); head++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pReq->disp[disp].head[head].lut; + + if (!CopyInLutParams(pCommonLutParams)) { + /* Remember how far we got through these loops before we + * failed, so that we can undo everything up to this point. */ + dispFailed = disp; + headFailed = head; + goto fail; + } + } + } + + return TRUE; + +fail: + for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { + for (head = 0; head < ARRAY_LEN(pReq->disp[disp].head); head++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pReq->disp[disp].head[head].lut; + + if (disp > dispFailed || + (disp == dispFailed && head >= headFailed)) { + break; + } + + FreeCopiedInLutParams(pCommonLutParams); + } + } + + return FALSE; +} + +/*! + * Free buffers allocated in SetModePrepUser. + */ +static NvBool SetModeDoneUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsSetModeParams *pParams = pParamsVoid; + struct NvKmsSetModeRequest *pReq = &pParams->request; + NvU32 disp, head; + + for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { + for (head = 0; head < ARRAY_LEN(pReq->disp[disp].head); head++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pReq->disp[disp].head[head].lut; + + FreeCopiedInLutParams(pCommonLutParams); + } + } + + return TRUE; +} + +/*! + * Perform a modeset on the device. + */ +static NvBool SetMode(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetModeParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + return nvSetDispModeEvo(pOpenDev->pDevEvo, pOpenDev, + &pParams->request, &pParams->reply, + FALSE /* bypassComposition */, + TRUE /* doRasterLock */); +} + +static inline NvBool nvHsIoctlSetCursorImage( + NVDispEvoPtr pDispEvo, + const struct NvKmsPerOpenDev *pOpenDevice, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvU32 head, + const struct NvKmsSetCursorImageCommonParams *pParams) +{ + return nvSetCursorImage(pDispEvo, + pOpenDevice, + pOpenDevSurfaceHandles, + head, + pParams); +} + +/*! + * Set the cursor image. + */ +static NvBool SetCursorImage(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetCursorImageParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; + + if (!GetPerOpenDevAndDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + &pOpenDev, + &pOpenDisp)) { + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + return nvHsIoctlSetCursorImage(pDispEvo, + pOpenDev, + &pOpenDev->surfaceHandles, + pParams->request.head, + &pParams->request.common); +} + +static inline NvBool nvHsIoctlMoveCursor( + NVDispEvoPtr pDispEvo, + NvU32 head, + const struct NvKmsMoveCursorCommonParams *pParams) +{ + nvEvoMoveCursor(pDispEvo, head, pParams); + return TRUE; +} + +/*! + * Change the cursor position. + */ +static NvBool MoveCursor(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsMoveCursorParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + return nvHsIoctlMoveCursor(pDispEvo, + pParams->request.head, + &pParams->request.common); +} + +/* No extra user state needed for SetLut; although we lose the user pointers + * for the LUT ramps after copying them in, that's okay because we don't need + * to copy them back out again. */ +struct NvKmsSetLutExtraUserState +{ +}; + +/*! + * Copy in any data referenced by pointer for the SetLut request. Currently + * this is only the LUT ramps. + */ +static NvBool SetLutPrepUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsSetLutParams *pParams = pParamsVoid; + struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; + + return CopyInLutParams(pCommonLutParams); +} + +/*! + * Free buffers allocated in SetLutPrepUser. + */ +static NvBool SetLutDoneUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsSetLutParams *pParams = pParamsVoid; + struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; + + FreeCopiedInLutParams(pCommonLutParams); + + return TRUE; +} + +/*! + * Set the LUT on the specified head. + */ +static NvBool SetLut(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetLutParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + if (!nvValidateSetLutCommonParams(pDispEvo->pDevEvo, + &pParams->request.common)) { + return FALSE; + } + + nvEvoSetLut(pDispEvo, + pParams->request.head, TRUE /* kickoff */, + &pParams->request.common); + + return TRUE; +} + + +/*! + * Return whether the specified head is idle. + */ +static NvBool IdleBaseChannelCheckIdleOneHead( + NVDispEvoPtr pDispEvo, + NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + { + NVEvoChannelPtr pMainLayerChannel = + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER]; + NvBool isMethodPending = FALSE; + NvBool ret; + + ret = pDevEvo->hal->IsChannelMethodPending(pDevEvo, pMainLayerChannel, + pDispEvo->displayOwner, &isMethodPending); + return !ret || !isMethodPending; + } +} + +/*! + * Return whether all heads described in pRequest are idle. + * + * Note that we loop over all requested heads, rather than return FALSE once we + * find the first non-idle head, because checking for idle has side effects: in + * headSurface, checking for idle gives the headSurface flip queue the + * opportunity to proceed another frame. + */ +static NvBool IdleBaseChannelCheckIdle( + NVDevEvoPtr pDevEvo, + const struct NvKmsIdleBaseChannelRequest *pRequest, + struct NvKmsIdleBaseChannelReply *pReply) +{ + NvU32 head, sd; + NVDispEvoPtr pDispEvo; + NvBool allIdle = TRUE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + + NvBool idle; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + if ((pRequest->subDevicesPerHead[head] & NVBIT(sd)) == 0) { + continue; + } + + idle = IdleBaseChannelCheckIdleOneHead(pDispEvo, head); + + if (!idle) { + pReply->stopSubDevicesPerHead[head] |= NVBIT(sd); + } + allIdle = allIdle && idle; + } + } + + return allIdle; +} + +/*! + * Idle all requested heads. + * + * First, wait for the heads to idle naturally. If a timeout is exceeded, then + * force the non-idle heads to idle, and record these in pReply. + */ +static NvBool IdleBaseChannelAll( + NVDevEvoPtr pDevEvo, + const struct NvKmsIdleBaseChannelRequest *pRequest, + struct NvKmsIdleBaseChannelReply *pReply) +{ + NvU64 startTime = 0; + + /* + * Each element in subDevicesPerHead[] must be large enough to hold one bit + * per subdevice. + */ + ct_assert(NVKMS_MAX_SUBDEVICES <= + (sizeof(pRequest->subDevicesPerHead[0]) * 8)); + + /* Loop until all head,sd pairs are idle, or we time out. */ + do { + const NvU32 timeout = 2000000; /* 2 seconds */ + + + /* + * Clear the pReply data, + * IdleBaseChannelCheckIdle() will fill it afresh. + */ + nvkms_memset(pReply, 0, sizeof(*pReply)); + + /* If all heads are idle, we are done. */ + if (IdleBaseChannelCheckIdle(pDevEvo, pRequest, pReply)) { + return TRUE; + } + + /* Break out of the loop if we exceed the timeout. */ + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + break; + } + + /* At least one head is not idle; yield, and try again. */ + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + + +/*! + * Wait for the requested base channels to be idle, returning whether + * stopping the base channels was necessary. + */ +static NvBool IdleBaseChannel(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsIdleBaseChannelParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* Only the modesetOwner can idle base. */ + + if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { + return FALSE; + } + + return IdleBaseChannelAll(pOpenDev->pDevEvo, + &pParams->request, &pParams->reply); +} + + +static inline NvBool nvHsIoctlFlip( + NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsFlipRequest *pRequest, + struct NvKmsFlipReply *pReply) +{ + return nvFlipEvo(pOpenDev->pDevEvo, + pOpenDev, + pRequest, + pReply, + FALSE /* skipUpdate */, + TRUE /* allowFlipLock */); +} + +/*! + * Flip the specified head. + */ +static NvBool Flip(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsFlipParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + return nvHsIoctlFlip(pOpenDev->pDevEvo, pOpenDev, + &pParams->request, &pParams->reply); +} + + +/*! + * Record whether this client is interested in the specified dynamic + * dpy. + */ +static NvBool DeclareDynamicDpyInterest(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + /* XXX NVKMS TODO: implement me. */ + + return TRUE; +} + + +/*! + * Register a surface with the specified per-open + device. + */ +static NvBool RegisterSurface(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsRegisterSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + /* + * Only allow userspace clients to specify memory objects by FD. + * This prevents clients from specifying (hClient, hObject) tuples that + * really belong to other clients. + */ + if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE && + !pParams->request.useFd) { + return FALSE; + } + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + nvEvoRegisterSurface(pOpenDev->pDevEvo, pOpenDev, pParams, + NvHsMapPermissionsReadOnly); + return TRUE; +} + + +/*! + * Unregister a surface from the specified per-open + device. + */ +static NvBool UnregisterSurface(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsUnregisterSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + nvEvoUnregisterSurface(pOpenDev->pDevEvo, pOpenDev, + pParams->request.surfaceHandle, + FALSE /* skipUpdate */); + return TRUE; +} + + +/*! + * Associate a surface with the NvKmsPerOpen specified by + * NvKmsGrantSurfaceParams::request::fd. + */ +static NvBool GrantSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsGrantSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + NVSurfaceEvoPtr pSurfaceEvo; + struct NvKmsPerOpen *pOpenFd; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pSurfaceEvo = + nvEvoGetSurfaceFromHandleNoCtxDmaOk(pOpenDev->pDevEvo, + &pOpenDev->surfaceHandles, + pParams->request.surfaceHandle); + if (pSurfaceEvo == NULL) { + return FALSE; + } + + if (nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)) { + return FALSE; + } + + /* Only the owner of the surface can grant it to other clients. */ + + if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, + pParams->request.surfaceHandle)) { + return FALSE; + } + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (!AssignNvKmsPerOpenType( + pOpenFd, NvKmsPerOpenTypeGrantSurface, FALSE)) { + return FALSE; + } + + nvEvoIncrementSurfaceStructRefCnt(pSurfaceEvo); + pOpenFd->grantSurface.pSurfaceEvo = pSurfaceEvo; + + return TRUE; +} + + +/*! + * Retrieve the surface and device associated with + * NvKmsAcquireSurfaceParams::request::fd, and give the client an + * NvKmsSurfaceHandle to the surface. + */ +static NvBool AcquireSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsAcquireSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpen *pOpenFd; + struct NvKmsPerOpenDev *pOpenDev; + NvKmsSurfaceHandle surfaceHandle = 0; + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (pOpenFd->type != NvKmsPerOpenTypeGrantSurface) { + return FALSE; + } + + nvAssert(pOpenFd->grantSurface.pSurfaceEvo != NULL); + + if (pOpenFd->grantSurface.pSurfaceEvo->rmRefCnt == 0) { /* orphan */ + return FALSE; + } + + if (nvEvoSurfaceRefCntsTooLarge(pOpenFd->grantSurface.pSurfaceEvo)) { + return FALSE; + } + + /* Since the surface isn't orphaned, it should have an owner, with a + * pOpenDev and a pDevEvo. Get the pOpenDev for the acquiring client that + * matches the owner's pDevEvo. */ + nvAssert(pOpenFd->grantSurface.pSurfaceEvo->owner.pOpenDev->pDevEvo != NULL); + pOpenDev = DevEvoToOpenDev(pOpen, + pOpenFd->grantSurface.pSurfaceEvo->owner.pOpenDev->pDevEvo); + + if (pOpenDev == NULL) { + return FALSE; + } + + surfaceHandle = + nvEvoCreateApiHandle(&pOpenDev->surfaceHandles, + pOpenFd->grantSurface.pSurfaceEvo); + + if (surfaceHandle == 0) { + return FALSE; + } + + nvEvoIncrementSurfaceStructRefCnt(pOpenFd->grantSurface.pSurfaceEvo); + + pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; + pParams->reply.surfaceHandle = surfaceHandle; + + return TRUE; +} + +static NvBool ReleaseSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsReleaseSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + nvEvoReleaseSurface(pOpenDev->pDevEvo, pOpenDev, + pParams->request.surfaceHandle); + return TRUE; +} + + +/*! + * Change the value of the specified attribute. + */ +static NvBool SetDpyAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetDpyAttributeParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvSetDpyAttributeEvo(pDpyEvo, pParams); +} + + +/*! + * Get the value of the specified attribute. + */ +static NvBool GetDpyAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetDpyAttributeParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvGetDpyAttributeEvo(pDpyEvo, pParams); +} + + +/*! + * Get the valid values of the specified attribute. + */ +static NvBool GetDpyAttributeValidValues(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetDpyAttributeValidValuesParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvGetDpyAttributeValidValuesEvo(pDpyEvo, pParams); +} + + +/*! + * Set the value of the specified attribute. + */ +static NvBool SetDispAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetDispAttributeParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + return nvSetDispAttributeEvo(pOpenDisp->pDispEvo, pParams); +} + + +/*! + * Get the value of the specified attribute. + */ +static NvBool GetDispAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetDispAttributeParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + return nvGetDispAttributeEvo(pOpenDisp->pDispEvo, pParams); +} + + +/*! + * Get the valid values of the specified attribute. + */ +static NvBool GetDispAttributeValidValues(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetDispAttributeValidValuesParams *pParams = pParamsVoid; + + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + return nvGetDispAttributeValidValuesEvo(pOpenDisp->pDispEvo, pParams); +} + + +/*! + * Get information about the specified framelock device. + */ +static NvBool QueryFrameLock(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryFrameLockParams *pParams = pParamsVoid; + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + const NVFrameLockEvoRec *pFrameLockEvo; + NvU32 gpu; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pOpenFrameLock = + GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; + + ct_assert(ARRAY_LEN(pFrameLockEvo->gpuIds) <= + ARRAY_LEN(pParams->reply.gpuIds)); + + for (gpu = 0; gpu < pFrameLockEvo->nGpuIds; gpu++) { + pParams->reply.gpuIds[gpu] = pFrameLockEvo->gpuIds[gpu]; + } + + return TRUE; +} + + +static NvBool SetFrameLockAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetFrameLockAttributeParams *pParams = pParamsVoid; + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + NVFrameLockEvoRec *pFrameLockEvo; + + pOpenFrameLock = + GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; + + return nvSetFrameLockAttributeEvo(pFrameLockEvo, pParams); +} + + +static NvBool GetFrameLockAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetFrameLockAttributeParams *pParams = pParamsVoid; + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + const NVFrameLockEvoRec *pFrameLockEvo; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pOpenFrameLock = + GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; + + return nvGetFrameLockAttributeEvo(pFrameLockEvo, pParams); +} + + +static NvBool GetFrameLockAttributeValidValues(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetFrameLockAttributeValidValuesParams *pParams = pParamsVoid; + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + const NVFrameLockEvoRec *pFrameLockEvo; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pOpenFrameLock = + GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; + + return nvGetFrameLockAttributeValidValuesEvo(pFrameLockEvo, pParams); +} + + +/*! + * Pop the next event off of the client's event queue. + */ +static NvBool GetNextEvent(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetNextEventParams *pParams = pParamsVoid; + struct NvKmsPerOpenEventListEntry *pEntry; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (nvListIsEmpty(&pOpen->ioctl.eventList)) { + pParams->reply.valid = FALSE; + return TRUE; + } + + pEntry = nvListFirstEntry(&pOpen->ioctl.eventList, + struct NvKmsPerOpenEventListEntry, + eventListEntry); + + pParams->reply.valid = TRUE; + pParams->reply.event = pEntry->event; + + nvListDel(&pEntry->eventListEntry); + + nvFree(pEntry); + + if (nvListIsEmpty(&pOpen->ioctl.eventList)) { + nvkms_event_queue_changed(pOpen->pOpenKernel, FALSE); + } + + return TRUE; +} + + +/*! + * Record the client's event interest for the specified device. + */ +static NvBool DeclareEventInterest(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsDeclareEventInterestParams *pParams = pParamsVoid; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + pOpen->ioctl.eventInterestMask = pParams->request.interestMask; + + return TRUE; +} + +static NvBool ClearUnicastEvent(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsClearUnicastEventParams *pParams = pParamsVoid; + struct NvKmsPerOpen *pOpenFd = NULL; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + pOpenFd = nvkms_get_per_open_data(pParams->request.unicastEventFd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (pOpenFd->type != NvKmsPerOpenTypeUnicastEvent) { + return FALSE; + } + + nvkms_event_queue_changed(pOpenFd->pOpenKernel, FALSE); + + return TRUE; +} + +static NvBool SetLayerPosition(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetLayerPositionParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* XXX NVKMS HEADSURFACE TODO: intercept */ + + return nvLayerSetPositionEvo(pOpenDev->pDevEvo, &pParams->request); +} + +static NvBool GrabOwnership(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsGrabOwnershipParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + // The only kind of ownership right now is modeset ownership. + return GrabModesetOwnership(pOpenDev); +} + +static NvBool ReleaseOwnership(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsReleaseOwnershipParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + // The only kind of ownership right now is modeset ownership. + return ReleaseModesetOwnership(pOpenDev); +} + +static NvBool GrantPermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsGrantPermissionsParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpen *pOpenFd; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* Only the modesetOwner can grant permissions. */ + + if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { + return FALSE; + } + + if (!ValidateNvKmsPermissions(pOpenDev->pDevEvo, + &pParams->request.permissions)) { + return FALSE; + } + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (!AssignNvKmsPerOpenType( + pOpenFd, NvKmsPerOpenTypeGrantPermissions, FALSE)) { + return FALSE; + } + + pOpenFd->grantPermissions.permissions = pParams->request.permissions; + + pOpenFd->grantPermissions.pDevEvo = pOpenDev->pDevEvo; + + return TRUE; +} + +static NvBool AcquirePermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsAcquirePermissionsParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpen *pOpenFd; + const struct NvKmsPermissions *pPermissionsNew; + enum NvKmsPermissionsType type; + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (pOpenFd->type != NvKmsPerOpenTypeGrantPermissions) { + return FALSE; + } + + pOpenDev = DevEvoToOpenDev(pOpen, pOpenFd->grantPermissions.pDevEvo); + + if (pOpenDev == NULL) { + return FALSE; + } + + type = pOpenFd->grantPermissions.permissions.type; + + pPermissionsNew = &pOpenFd->grantPermissions.permissions; + + if (type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pOpenDev->flipPermissions.disp); d++) { + for (h = 0; h < ARRAY_LEN(pOpenDev->flipPermissions. + disp[d].head); h++) { + pOpenDev->flipPermissions.disp[d].head[h].layerMask |= + pPermissionsNew->flip.disp[d].head[h].layerMask; + } + } + + pParams->reply.permissions.flip = pOpenDev->flipPermissions; + + } else if (type == NV_KMS_PERMISSIONS_TYPE_MODESET) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pOpenDev->modesetPermissions.disp); d++) { + for (h = 0; h < ARRAY_LEN(pOpenDev->modesetPermissions. + disp[d].head); h++) { + pOpenDev->modesetPermissions.disp[d].head[h].dpyIdList = + nvAddDpyIdListToDpyIdList( + pOpenDev->modesetPermissions.disp[d].head[h].dpyIdList, + pPermissionsNew->modeset.disp[d].head[h].dpyIdList); + } + } + + pParams->reply.permissions.modeset = pOpenDev->modesetPermissions; + + } else { + /* + * GrantPermissions() should ensure that + * pOpenFd->grantPermissions.permissions.type is always valid. + */ + nvAssert(!"AcquirePermissions validation failure"); + return FALSE; + } + + pParams->reply.permissions.type = type; + pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; + + return TRUE; +} + +static NvBool RevokePermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsRevokePermissionsParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev = + GetPerOpenDev(pOpen, pParams->request.deviceHandle); + const NvU32 validBitmask = + NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) | + NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* Only the modeset owner can revoke permissions. */ + if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { + return FALSE; + } + + /* Reject invalid bitmasks. */ + + if ((pParams->request.permissionsTypeBitmask & ~validBitmask) != 0) { + return FALSE; + } + + /* Revoke permissions for everyone except the caller. */ + + RevokePermissionsInternal(pParams->request.permissionsTypeBitmask, + pOpenDev->pDevEvo, + pOpenDev /* pOpenDevExclude */); + return TRUE; +} + +static NvBool RegisterDeferredRequestFifo(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsRegisterDeferredRequestFifoParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + NVSurfaceEvoPtr pSurfaceEvo; + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsDeferredRequestFifoHandle handle; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pSurfaceEvo = nvEvoGetSurfaceFromHandleNoCtxDmaOk( + pOpenDev->pDevEvo, + &pOpenDev->surfaceHandles, + pParams->request.surfaceHandle); + + if (pSurfaceEvo == NULL) { + return FALSE; + } + + /* + * WAR Bug 2050970: If a surface is unregistered and it wasn't registered + * with NvKmsRegisterSurfaceRequest::noDisplayHardwareAccess, then the call + * to nvRMSyncEvoChannel() in nvEvoDecrementSurfaceRefCnts() may hang + * if any flips in flight acquire on semaphore releases that haven't + * occurred yet. + * + * Since a ctxdma is not necessary for the deferred request fifo surface, + * we work around this by forcing all surfaces that will be registered as + * a deferred request fifo to be registered with + * noDisplayHardwareAccess==TRUE, then skip the idle in + * nvEvoDecrementSurfaceRefCnts() for these surfaces. + */ + if (pSurfaceEvo->requireCtxDma) { + return FALSE; + } + + pDeferredRequestFifo = + nvEvoRegisterDeferredRequestFifo(pOpenDev->pDevEvo, pSurfaceEvo); + + if (pDeferredRequestFifo == NULL) { + return FALSE; + } + + handle = nvEvoCreateApiHandle(&pOpenDev->deferredRequestFifoHandles, + pDeferredRequestFifo); + + if (handle == 0) { + nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, + pDeferredRequestFifo); + return FALSE; + } + + pParams->reply.deferredRequestFifoHandle = handle; + + return TRUE; +} + +static NvBool UnregisterDeferredRequestFifo(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsUnregisterDeferredRequestFifoParams *pParams = pParamsVoid; + NvKmsDeferredRequestFifoHandle handle = + pParams->request.deferredRequestFifoHandle; + NVDeferredRequestFifoRec *pDeferredRequestFifo; + struct NvKmsPerOpenDev *pOpenDev = + GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pDeferredRequestFifo = + nvEvoGetPointerFromApiHandle( + &pOpenDev->deferredRequestFifoHandles, handle); + + if (pDeferredRequestFifo == NULL) { + return FALSE; + } + + nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle); + + nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, pDeferredRequestFifo); + + return TRUE; +} + +/*! + * Get the CRC32 data for the specified dpy. + */ +static NvBool QueryDpyCRC32(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryDpyCRC32Params *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; + CRC32NotifierCrcOut crcOut; + + if (!GetPerOpenDevAndDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + &pOpenDev, + &pOpenDisp)) { + return FALSE; + } + + if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { + // Only the current owner can query CRC32 values. + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + nvkms_memset(&(pParams->reply), 0, sizeof(pParams->reply)); + + // Since will only read 1 frame of CRCs, point to single reply struct vals + crcOut.rasterGeneratorCrc32 = &(pParams->reply.rasterGeneratorCrc32); + crcOut.compositorCrc32 = &(pParams->reply.compositorCrc32); + crcOut.outputCrc32 = &(pParams->reply.outputCrc32); + + if (!nvReadCRC32Evo(pDispEvo, pParams->request.head, &crcOut)) { + return FALSE; + } + + return TRUE; +} + +static NvBool SwitchMux( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSwitchMuxParams *pParams = pParamsVoid; + const struct NvKmsSwitchMuxRequest *r = &pParams->request; + NVDpyEvoPtr pDpyEvo; + NVDevEvoPtr pDevEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, r->deviceHandle, r->dispHandle, r->dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + if (pDevEvo->modesetOwner != GetPerOpenDev(pOpen, r->deviceHandle)) { + return FALSE; + } + + switch (pParams->request.operation) { + case NVKMS_SWITCH_MUX_PRE: + return nvRmMuxPre(pDpyEvo, r->state); + case NVKMS_SWITCH_MUX: + return nvRmMuxSwitch(pDpyEvo, r->state); + case NVKMS_SWITCH_MUX_POST: + return nvRmMuxPost(pDpyEvo, r->state); + default: + return FALSE; + } +} + +static NvBool GetMuxState( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetMuxStateParams *pParams = pParamsVoid; + const struct NvKmsGetMuxStateRequest *r = &pParams->request; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, r->deviceHandle, r->dispHandle, r->dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + pParams->reply.state = nvRmMuxState(pDpyEvo); + + return pParams->reply.state != MUX_STATE_GET; +} + +static NvBool ExportVrrSemaphoreSurface( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsExportVrrSemaphoreSurfaceParams *pParams = pParamsVoid; + const struct NvKmsExportVrrSemaphoreSurfaceRequest *req = &pParams->request; + const struct NvKmsPerOpenDev *pOpenDev = + GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + return nvExportVrrSemaphoreSurface(pOpenDev->pDevEvo, req->memFd); +} + +static NvBool EnableVblankSyncObject( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsEnableVblankSyncObjectParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp* pOpenDisp = NULL; + NVDispHeadStateEvoRec *pHeadState = NULL; + NVDevEvoPtr pDevEvo = NULL; + NvKmsVblankSyncObjectHandle vblankHandle = 0; + int freeVblankSyncObjectIdx = 0; + NvU32 head = pParams->request.head; + NVVblankSyncObjectRec *vblankSyncObjects = NULL; + NVDispEvoPtr pDispEvo = NULL; + NVEvoUpdateState updateState = { }; + + /* Obtain the Head State. */ + pOpenDisp = GetPerOpenDisp(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + nvEvoLogDebug(EVO_LOG_ERROR, "Unable to GetPerOpenDisp."); + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + /* Ensure Vblank Sync Object API is supported on this chip. */ + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + nvEvoLogDebug(EVO_LOG_ERROR, "Vblank Sync Object functionality is not " + "supported on this chip."); + return FALSE; + } + + /* Validate requested head because it comes from user input. */ + if (head >= ARRAY_LEN(pDispEvo->headState)) { + nvEvoLogDebug(EVO_LOG_ERROR, "Invalid head requested, head=%d.", head); + return FALSE; + } + pHeadState = &pDispEvo->headState[head]; + vblankSyncObjects = pHeadState->vblankSyncObjects; + pDevEvo = pDispEvo->pDevEvo; + + /* + * Find the available sync object. Sync Objects with handle=0 are not in + * use. + */ + for (freeVblankSyncObjectIdx = 0; + freeVblankSyncObjectIdx < pHeadState->numVblankSyncObjectsCreated; + freeVblankSyncObjectIdx++) { + if (!vblankSyncObjects[freeVblankSyncObjectIdx].inUse) { + break; + } + } + if (freeVblankSyncObjectIdx == pHeadState->numVblankSyncObjectsCreated) { + return FALSE; + } + + /* Save the created vblank handle if it is valid. */ + vblankHandle = + nvEvoCreateApiHandle(&pOpenDisp->vblankSyncObjectHandles[head], + &vblankSyncObjects[freeVblankSyncObjectIdx]); + if (vblankHandle == 0) { + nvEvoLogDebug(EVO_LOG_ERROR, "Unable to create vblank handle."); + return FALSE; + } + + if (nvHeadIsActive(pDispEvo, head)) { + /* + * Instruct the hardware to enable a semaphore corresponding to this + * syncpt. The Update State will be populated. + */ + pDevEvo->hal->ConfigureVblankSyncObject( + pDevEvo, + pHeadState->timings.rasterBlankStart.y, + head, + freeVblankSyncObjectIdx, + vblankSyncObjects[freeVblankSyncObjectIdx].evoSyncpt.hCtxDma, + &updateState); + + /* + * Instruct hardware to execute the staged commands from the + * ConfigureVblankSyncObject() call above. This will set up and wait for a + * notification that the hardware execution actually completed. + */ + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE); + + vblankSyncObjects[freeVblankSyncObjectIdx].enabled = TRUE; + } + + /* Populate the vblankSyncObjects array. */ + vblankSyncObjects[freeVblankSyncObjectIdx].inUse = TRUE; + + /* Populate the reply field. */ + pParams->reply.vblankHandle = vblankHandle; + /* Note: the syncpt ID is NOT the same as the vblank handle. */ + pParams->reply.syncptId = + pHeadState->vblankSyncObjects[freeVblankSyncObjectIdx].evoSyncpt.id; + + return TRUE; +} + +static void DisableAndCleanVblankSyncObject(struct NvKmsPerOpenDisp *pOpenDisp, + NvU32 head, + NVVblankSyncObjectRec *pVblankSyncObject, + NVEvoUpdateState *pUpdateState, + NvKmsVblankSyncObjectHandle handle) +{ + NVDispEvoPtr pDispEvo = pOpenDisp->pDispEvo; + + if (nvHeadIsActive(pDispEvo, head)) { + /* + * Instruct the hardware to disable the semaphore corresponding to this + * syncpt. The Update State will be populated. + * + * Note: Using dummy zero value for rasterLine because the disable + * codepath in ConfigureVblankSyncObject() does not use that argument. + */ + pDispEvo->pDevEvo->hal->ConfigureVblankSyncObject(pDispEvo->pDevEvo, + 0, /* rasterLine */ + head, + pVblankSyncObject->index, + 0, /* hCtxDma */ + pUpdateState); + /* + * Note: it is the caller's responsibility to call + * nvEvoUpdateAndKickOff(). + */ + } + + pVblankSyncObject->inUse = FALSE; + pVblankSyncObject->enabled = FALSE; + + /* Remove the handle from the map. */ + nvEvoDestroyApiHandle(&pOpenDisp->vblankSyncObjectHandles[head], handle); +} + +static NvBool DisableVblankSyncObject( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsDisableVblankSyncObjectParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp* pOpenDisp = + GetPerOpenDisp(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle); + NVVblankSyncObjectRec *pVblankSyncObject = NULL; + NvU32 head = pParams->request.head; + NVDevEvoPtr pDevEvo = NULL; + NVEvoUpdateState updateState = { }; + + if (pOpenDisp == NULL) { + nvEvoLogDebug(EVO_LOG_ERROR, "Unable to GetPerOpenDisp."); + return FALSE; + } + + pDevEvo = pOpenDisp->pDispEvo->pDevEvo; + + /* Ensure Vblank Sync Object API is supported on this chip. */ + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + nvEvoLogDebug(EVO_LOG_ERROR, "Vblank Sync Object functionality is not " + "supported on this chip."); + return FALSE; + } + + /* Validate requested head because it comes from user input. */ + if (head >= ARRAY_LEN(pOpenDisp->pDispEvo->headState)) { + nvEvoLogDebug(EVO_LOG_ERROR, "Invalid head requested, head=%d.", head); + return FALSE; + } + + /* Mark the indicated object as free. */ + pVblankSyncObject = + nvEvoGetPointerFromApiHandle(&pOpenDisp->vblankSyncObjectHandles[head], + pParams->request.vblankHandle); + if (pVblankSyncObject == NULL) { + nvEvoLogDebug(EVO_LOG_ERROR, "unable to find object with provided " + "handle."); + return FALSE; + } + + DisableAndCleanVblankSyncObject(pOpenDisp, head, pVblankSyncObject, + &updateState, pParams->request.vblankHandle); + + if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { + /* + * Instruct hardware to execute the staged commands from the + * ConfigureVblankSyncObject() call inside of the + * DisableAndCleanVblankSyncObject() call above. This will set up and + * wait for a notification that the hardware execution has completed. + */ + nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState, TRUE); + } + + return TRUE; +} + +/*! + * Perform the ioctl operation requested by the client. + * + * \param[in,out] pOpenVoid The per-open data, allocated by + * nvKmsOpen(). + * \param[in] cmdOpaque The NVKMS_IOCTL_ operation to perform. + * \param[in,out] paramsAddress A pointer, in the client process's + * address space, to the parameter + * structure. This is cmd-specific. + * \param[in] paramSize The client-specified size of the params. + * + * \return Return TRUE if the ioctl operation was successfully + * performed. Otherwise, return FALSE. + */ +NvBool nvKmsIoctl( + void *pOpenVoid, + const NvU32 cmdOpaque, + const NvU64 paramsAddress, + const size_t paramSize) +{ + static const struct { + + NvBool (*proc)(struct NvKmsPerOpen *pOpen, void *pParamsVoid); + NvBool (*prepUser)(void *pParamsVoid, void *pExtraStateVoid); + NvBool (*doneUser)(void *pParamsVoid, void *pExtraStateVoid); + const size_t paramSize; + /* Size of extra state tracked for user parameters */ + const size_t extraSize; + + const size_t requestSize; + const size_t requestOffset; + + const size_t replySize; + const size_t replyOffset; + + } dispatch[] = { + +#define _ENTRY_WITH_USER(_cmd, _func, _prepUser, _doneUser, _extraSize) \ + [_cmd] = { \ + .proc = _func, \ + .prepUser = _prepUser, \ + .doneUser = _doneUser, \ + .paramSize = sizeof(struct NvKms##_func##Params), \ + .requestSize = sizeof(struct NvKms##_func##Request), \ + .requestOffset = offsetof(struct NvKms##_func##Params, request), \ + .replySize = sizeof(struct NvKms##_func##Reply), \ + .replyOffset = offsetof(struct NvKms##_func##Params, reply), \ + .extraSize = _extraSize, \ + } + +#define ENTRY(_cmd, _func) \ + _ENTRY_WITH_USER(_cmd, _func, NULL, NULL, 0) + +#define ENTRY_CUSTOM_USER(_cmd, _func) \ + _ENTRY_WITH_USER(_cmd, _func, \ + _func##PrepUser, _func##DoneUser, \ + sizeof(struct NvKms##_func##ExtraUserState)) + + ENTRY(NVKMS_IOCTL_ALLOC_DEVICE, AllocDevice), + ENTRY(NVKMS_IOCTL_FREE_DEVICE, FreeDevice), + ENTRY(NVKMS_IOCTL_QUERY_DISP, QueryDisp), + ENTRY(NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA, QueryConnectorStaticData), + ENTRY(NVKMS_IOCTL_QUERY_CONNECTOR_DYNAMIC_DATA, QueryConnectorDynamicData), + ENTRY(NVKMS_IOCTL_QUERY_DPY_STATIC_DATA, QueryDpyStaticData), + ENTRY(NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA, QueryDpyDynamicData), + ENTRY_CUSTOM_USER(NVKMS_IOCTL_VALIDATE_MODE_INDEX, ValidateModeIndex), + ENTRY_CUSTOM_USER(NVKMS_IOCTL_VALIDATE_MODE, ValidateMode), + ENTRY_CUSTOM_USER(NVKMS_IOCTL_SET_MODE, SetMode), + ENTRY(NVKMS_IOCTL_SET_CURSOR_IMAGE, SetCursorImage), + ENTRY(NVKMS_IOCTL_MOVE_CURSOR, MoveCursor), + ENTRY_CUSTOM_USER(NVKMS_IOCTL_SET_LUT, SetLut), + ENTRY(NVKMS_IOCTL_IDLE_BASE_CHANNEL, IdleBaseChannel), + ENTRY(NVKMS_IOCTL_FLIP, Flip), + ENTRY(NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST, + DeclareDynamicDpyInterest), + ENTRY(NVKMS_IOCTL_REGISTER_SURFACE, RegisterSurface), + ENTRY(NVKMS_IOCTL_UNREGISTER_SURFACE, UnregisterSurface), + ENTRY(NVKMS_IOCTL_GRANT_SURFACE, GrantSurface), + ENTRY(NVKMS_IOCTL_ACQUIRE_SURFACE, AcquireSurface), + ENTRY(NVKMS_IOCTL_RELEASE_SURFACE, ReleaseSurface), + ENTRY(NVKMS_IOCTL_SET_DPY_ATTRIBUTE, SetDpyAttribute), + ENTRY(NVKMS_IOCTL_GET_DPY_ATTRIBUTE, GetDpyAttribute), + ENTRY(NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES, + GetDpyAttributeValidValues), + ENTRY(NVKMS_IOCTL_SET_DISP_ATTRIBUTE, SetDispAttribute), + ENTRY(NVKMS_IOCTL_GET_DISP_ATTRIBUTE, GetDispAttribute), + ENTRY(NVKMS_IOCTL_GET_DISP_ATTRIBUTE_VALID_VALUES, + GetDispAttributeValidValues), + ENTRY(NVKMS_IOCTL_QUERY_FRAMELOCK, QueryFrameLock), + ENTRY(NVKMS_IOCTL_SET_FRAMELOCK_ATTRIBUTE, SetFrameLockAttribute), + ENTRY(NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE, GetFrameLockAttribute), + ENTRY(NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE_VALID_VALUES, + GetFrameLockAttributeValidValues), + ENTRY(NVKMS_IOCTL_GET_NEXT_EVENT, GetNextEvent), + ENTRY(NVKMS_IOCTL_DECLARE_EVENT_INTEREST, DeclareEventInterest), + ENTRY(NVKMS_IOCTL_CLEAR_UNICAST_EVENT, ClearUnicastEvent), + ENTRY(NVKMS_IOCTL_SET_LAYER_POSITION, SetLayerPosition), + ENTRY(NVKMS_IOCTL_GRAB_OWNERSHIP, GrabOwnership), + ENTRY(NVKMS_IOCTL_RELEASE_OWNERSHIP, ReleaseOwnership), + ENTRY(NVKMS_IOCTL_GRANT_PERMISSIONS, GrantPermissions), + ENTRY(NVKMS_IOCTL_ACQUIRE_PERMISSIONS, AcquirePermissions), + ENTRY(NVKMS_IOCTL_REVOKE_PERMISSIONS, RevokePermissions), + ENTRY(NVKMS_IOCTL_QUERY_DPY_CRC32, QueryDpyCRC32), + ENTRY(NVKMS_IOCTL_REGISTER_DEFERRED_REQUEST_FIFO, + RegisterDeferredRequestFifo), + ENTRY(NVKMS_IOCTL_UNREGISTER_DEFERRED_REQUEST_FIFO, + UnregisterDeferredRequestFifo), + ENTRY(NVKMS_IOCTL_SWITCH_MUX, SwitchMux), + ENTRY(NVKMS_IOCTL_GET_MUX_STATE, GetMuxState), + ENTRY(NVKMS_IOCTL_EXPORT_VRR_SEMAPHORE_SURFACE, ExportVrrSemaphoreSurface), + ENTRY(NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT, EnableVblankSyncObject), + ENTRY(NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT, DisableVblankSyncObject), + }; + + struct NvKmsPerOpen *pOpen = pOpenVoid; + void *pParamsKernelPointer; + NvBool ret; + enum NvKmsIoctlCommand cmd = cmdOpaque; + void *pExtraUserState = NULL; + + if (!AssignNvKmsPerOpenType(pOpen, NvKmsPerOpenTypeIoctl, TRUE)) { + return FALSE; + } + + if (cmd >= ARRAY_LEN(dispatch)) { + return FALSE; + } + + if (dispatch[cmd].proc == NULL) { + return FALSE; + } + + if (paramSize != dispatch[cmd].paramSize) { + return FALSE; + } + + if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) { + pParamsKernelPointer = nvCalloc(1, paramSize + dispatch[cmd].extraSize); + if (pParamsKernelPointer == NULL) { + return FALSE; + } + + if (dispatch[cmd].requestSize > 0) { + int status = + nvkms_copyin((char *) pParamsKernelPointer + + dispatch[cmd].requestOffset, + paramsAddress + dispatch[cmd].requestOffset, + dispatch[cmd].requestSize); + if (status != 0) { + nvFree(pParamsKernelPointer); + return FALSE; + } + } + + if (dispatch[cmd].prepUser) { + pExtraUserState = (char *)pParamsKernelPointer + paramSize; + + if (!dispatch[cmd].prepUser(pParamsKernelPointer, + pExtraUserState)) { + nvFree(pParamsKernelPointer); + return FALSE; + } + } + } else { + pParamsKernelPointer = nvKmsNvU64ToPointer(paramsAddress); + } + + ret = dispatch[cmd].proc(pOpen, pParamsKernelPointer); + + if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) { + + if (dispatch[cmd].doneUser) { + pExtraUserState = (char *)pParamsKernelPointer + paramSize; + + if (!dispatch[cmd].doneUser(pParamsKernelPointer, + pExtraUserState)) { + ret = FALSE; + } + } + + if (dispatch[cmd].replySize > 0) { + int status = + nvkms_copyout(paramsAddress + dispatch[cmd].replyOffset, + (char *) pParamsKernelPointer + + dispatch[cmd].replyOffset, + dispatch[cmd].replySize); + if (status != 0) { + ret = FALSE; + } + } + + nvFree(pParamsKernelPointer); + } + + return ret; +} + + +/*! + * Close callback. + * + * \param[in,out] pOpenVoid The per-open data, allocated by nvKmsOpen(). + */ +void nvKmsClose(void *pOpenVoid) +{ + struct NvKmsPerOpen *pOpen = pOpenVoid; + + if (pOpen == NULL) { + return; + } + + /* + * First remove the pOpen from global tracking. Otherwise, assertions can + * fail in the free paths below -- the assertions check that the object + * being freed is not tracked by any pOpen. + */ + nvListDel(&pOpen->perOpenListEntry); + + if (pOpen->type == NvKmsPerOpenTypeIoctl) { + + struct NvKmsPerOpenEventListEntry *pEntry, *pEntryTmp; + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + FreeDeviceReference(pOpen, pOpenDev); + } + + nvEvoDestroyApiHandles(&pOpen->ioctl.frameLockHandles); + + nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles); + + nvListForEachEntry_safe(pEntry, pEntryTmp, + &pOpen->ioctl.eventList, eventListEntry) { + nvListDel(&pEntry->eventListEntry); + nvFree(pEntry); + } + + nvListDel(&pOpen->perOpenIoctlListEntry); + } + + if (pOpen->type == NvKmsPerOpenTypeGrantSurface) { + nvAssert(pOpen->grantSurface.pSurfaceEvo != NULL); + nvEvoDecrementSurfaceStructRefCnt(pOpen->grantSurface.pSurfaceEvo); + } + + if (pOpen->type == NvKmsPerOpenTypeGrantSwapGroup) { + nvAssert(pOpen->grantSwapGroup.pSwapGroup != NULL); + } + + if (pOpen->type == NvKmsPerOpenTypeUnicastEvent) { + nvRemoveUnicastEvent(pOpen); + } + + nvFree(pOpen); +} + + +/*! + * Open callback. + * + * Allocate, initialize, and return an opaque pointer to an NvKmsPerOpen. + * + * \return If successful, return an NvKmsPerOpen pointer. Otherwise, + * return NULL. + */ +void *nvKmsOpen( + NvU32 pid, + enum NvKmsClientType clientType, + nvkms_per_open_handle_t *pOpenKernel) +{ + struct NvKmsPerOpen *pOpen = nvCalloc(1, sizeof(*pOpen)); + + if (pOpen == NULL) { + goto fail; + } + + pOpen->pid = pid; + pOpen->clientType = clientType; + pOpen->type = NvKmsPerOpenTypeUndefined; + pOpen->pOpenKernel = pOpenKernel; + + nvListAppend(&pOpen->perOpenListEntry, &perOpenList); + + return pOpen; + +fail: + nvKmsClose(pOpen); + return NULL; +} + +extern const char *const pNV_KMS_ID; + +#if NVKMS_PROCFS_ENABLE + +static const char *ProcFsPerOpenTypeString( + enum NvKmsPerOpenType type) +{ + switch (type) { + case NvKmsPerOpenTypeIoctl: return "ioctl"; + case NvKmsPerOpenTypeGrantSurface: return "grantSurface"; + case NvKmsPerOpenTypeGrantSwapGroup: return "grantSwapGroup"; + case NvKmsPerOpenTypeGrantPermissions: return "grantPermissions"; + case NvKmsPerOpenTypeUnicastEvent: return "unicastEvent"; + case NvKmsPerOpenTypeUndefined: return "undefined"; + } + + return "unknown"; +} + +static const char *ProcFsUnicastEventTypeString( + enum NvKmsUnicastEventType type) +{ + switch (type) { + case NvKmsUnicastEventTypeDeferredRequest: return "DeferredRequest"; + case NvKmsUnicastEventTypeUndefined: return "undefined"; + } + + return "unknown"; +} + +static const char *ProcFsPerOpenClientTypeString( + enum NvKmsClientType clientType) +{ + switch (clientType) { + case NVKMS_CLIENT_USER_SPACE: return "user-space"; + case NVKMS_CLIENT_KERNEL_SPACE: return "kernel-space"; + } + + return "unknown"; +} + +static const char *ProcFsPermissionsTypeString( + enum NvKmsPermissionsType permissionsType) +{ + switch (permissionsType) { + case NV_KMS_PERMISSIONS_TYPE_FLIPPING: return "flipping"; + case NV_KMS_PERMISSIONS_TYPE_MODESET: return "modeset"; + } + + return "unknown"; +} + +static void +ProcFsPrintClients( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + struct NvKmsPerOpen *pOpen; + NVEvoInfoStringRec infoString; + + nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { + + const char *extra = ""; + + nvInitInfoString(&infoString, buffer, size); + + if (pOpen == nvEvoGlobal.nvKmsPerOpen) { + extra = " (NVKMS-internal client)"; + } + + nvEvoLogInfoString(&infoString, + "Client (pOpen) : %p", pOpen); + nvEvoLogInfoString(&infoString, + " pid : %d%s", pOpen->pid, extra); + nvEvoLogInfoString(&infoString, + " clientType : %s", + ProcFsPerOpenClientTypeString(pOpen->clientType)); + nvEvoLogInfoString(&infoString, + " type : %s", + ProcFsPerOpenTypeString(pOpen->type)); + + if (pOpen->type == NvKmsPerOpenTypeIoctl) { + + NvKmsGenericHandle deviceHandle; + struct NvKmsPerOpenDev *pOpenDev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, deviceHandle) { + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; + + nvEvoLogInfoString(&infoString, + " pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId, pDevEvo); + nvEvoLogInfoString(&infoString, + " NvKmsDeviceHandle : %d", deviceHandle); + } + + } else if (pOpen->type == NvKmsPerOpenTypeGrantSurface) { + + NVSurfaceEvoPtr pSurfaceEvo = pOpen->grantSurface.pSurfaceEvo; + + nvEvoLogInfoString(&infoString, + " pSurfaceEvo : %p", pSurfaceEvo); + + } else if (pOpen->type == NvKmsPerOpenTypeGrantPermissions) { + + NVDevEvoPtr pDevEvo = pOpen->grantPermissions.pDevEvo; + const struct NvKmsPermissions *pPerms = + &pOpen->grantPermissions.permissions; + + nvEvoLogInfoString(&infoString, + " pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId, pDevEvo); + + nvEvoLogInfoString(&infoString, + " PermissionsType : %s", + ProcFsPermissionsTypeString(pPerms->type)); + + if (pPerms->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pPerms->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPerms->flip.disp[d].head); h++) { + + const NvU8 layerMask = + pPerms->flip.disp[d].head[h].layerMask; + + if (layerMask == 0) { + continue; + } + + nvEvoLogInfoString(&infoString, + " disp:%02d, head:%02d : 0x%08x", d, h, + layerMask); + } + } + } else if (pPerms->type == NV_KMS_PERMISSIONS_TYPE_MODESET) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pPerms->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPerms->flip.disp[d].head); h++) { + + NVDpyIdList dpyIdList = + pPerms->modeset.disp[d].head[h].dpyIdList; + NVDispEvoPtr pDispEvo; + char *dpys; + + if (nvDpyIdListIsEmpty(dpyIdList)) { + continue; + } + + pDispEvo = pDevEvo->pDispEvo[d]; + + dpys = nvGetDpyIdListStringEvo(pDispEvo, dpyIdList); + + if (dpys == NULL) { + continue; + } + + nvEvoLogInfoString(&infoString, + " disp:%02d, head:%02d : %s", d, h, dpys); + + nvFree(dpys); + } + } + } + } else if (pOpen->type == NvKmsPerOpenTypeGrantSwapGroup) { + + NVDevEvoPtr pDevEvo = pOpen->grantSwapGroup.pDevEvo; + + nvEvoLogInfoString(&infoString, + " pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId, pDevEvo); + nvEvoLogInfoString(&infoString, + " pSwapGroup : %p", + pOpen->grantSwapGroup.pSwapGroup); + + } else if (pOpen->type == NvKmsPerOpenTypeUnicastEvent) { + nvEvoLogInfoString(&infoString, + " unicastEvent type : %s", + ProcFsUnicastEventTypeString(pOpen->unicastEvent.type)); + switch(pOpen->unicastEvent.type) { + case NvKmsUnicastEventTypeDeferredRequest: + nvEvoLogInfoString(&infoString, + " pDeferredRequestFifo : %p", + pOpen->unicastEvent.e.deferred.pDeferredRequestFifo); + break; + default: + break; + } + } + + nvEvoLogInfoString(&infoString, ""); + outString(data, buffer); + } +} + +static void PrintSurfacePlanes( + NVEvoInfoStringRec *pInfoString, + const NVSurfaceEvoRec *pSurfaceEvo) +{ + NvU8 planeIndex; + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + nvEvoLogInfoString(pInfoString, + "plane[%u] disp ctxDma:0x%08x pitch:%u offset:%" NvU64_fmtu + " rmObjectSizeInBytes:%" NvU64_fmtu, + planeIndex, + pSurfaceEvo->planes[planeIndex].ctxDma, + pSurfaceEvo->planes[planeIndex].pitch, + pSurfaceEvo->planes[planeIndex].offset, + pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes); + } +} + +static void PrintSurfaceClients( + NVEvoInfoStringRec *pInfoString, + const NVSurfaceEvoRec *pSurfaceEvo, + const NVDevEvoRec *pDevEvo) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + NvKmsGenericHandle deviceHandle; + struct NvKmsPerOpenDev *pOpenDev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, deviceHandle) { + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pTmpSurfaceEvo; + + if (pOpenDev->pDevEvo != pDevEvo) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pTmpSurfaceEvo, surfaceHandle) { + if (pTmpSurfaceEvo != pSurfaceEvo) { + continue; + } + + nvEvoLogInfoString(pInfoString, + " pOpen : %p", pOpen); + nvEvoLogInfoString(pInfoString, + " pOpenDev : %p", pOpenDev); + nvEvoLogInfoString(pInfoString, + " NvKmsSurfaceHandle : %d", surfaceHandle); + } + } + } +} + +static void PrintSurface( + NVEvoInfoStringRec *pInfoString, + const NVSurfaceEvoRec *pSurfaceEvo, + const NVDevEvoRec *pDevEvo) +{ + NvU32 sd; + + nvEvoLogInfoString(pInfoString, + "pSurfaceEvo : %p", pSurfaceEvo); + nvEvoLogInfoString(pInfoString, + " pDevEvo (deviceId:%02d) : %p", pDevEvo->deviceId, pDevEvo); + nvEvoLogInfoString(pInfoString, + " owner : " + "pOpenDev:%p, NvKmsSurfaceHandle:%d", + pSurfaceEvo->owner.pOpenDev, + pSurfaceEvo->owner.surfaceHandle); + nvEvoLogInfoString(pInfoString, + " {width,height}InPixels : %d x %d", + pSurfaceEvo->widthInPixels, + pSurfaceEvo->heightInPixels); + nvEvoLogInfoString(pInfoString, + " misc : " + "log2GobsPerBlockY:%d", + pSurfaceEvo->log2GobsPerBlockY); + nvEvoLogInfoString(pInfoString, + " memory : layout:%s format:%s", + NvKmsSurfaceMemoryLayoutToString(pSurfaceEvo->layout), + nvKmsSurfaceMemoryFormatToString(pSurfaceEvo->format)); + nvEvoLogInfoString(pInfoString, + " refCnts : " + "rmRefCnt:%" NvU64_fmtx" structRefCnt:%" NvU64_fmtx, + pSurfaceEvo->rmRefCnt, + pSurfaceEvo->structRefCnt); + + PrintSurfacePlanes(pInfoString, pSurfaceEvo); + + nvEvoLogInfoString(pInfoString, + " clients :"); + + PrintSurfaceClients(pInfoString, pSurfaceEvo, pDevEvo); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (pSurfaceEvo->cpuAddress[sd] != NULL) { + nvEvoLogInfoString(pInfoString, + " cpuAddress[%02d] : %p", + sd, pSurfaceEvo->cpuAddress[sd]); + } + } + + nvEvoLogInfoString(pInfoString, ""); +} + +static void +ProcFsPrintSurfaces( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + struct NvKmsPerOpen *pOpen; + NVEvoInfoStringRec infoString; + NvU32 i; + + for (i = 0; i < 2; i++) { + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + NvKmsGenericHandle deviceHandle; + struct NvKmsPerOpenDev *pOpenDev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, deviceHandle) { + + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pSurfaceEvo; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pSurfaceEvo, + surfaceHandle) { + /* + * Because clients can grant surfaces between each + * other, a pSurfaceEvo could be in multiple clients' + * lists. So, we loop over all surfaces on all clients + * twice: the first time we print unique surfaces and set + * 'procFsFlag' to recognize duplicates. The second time, + * we clear 'procFsFlag'. + */ + if (i == 0) { + if (pSurfaceEvo->procFsFlag) { + continue; + } + + nvInitInfoString(&infoString, buffer, size); + PrintSurface(&infoString, pSurfaceEvo, + pOpenDev->pDevEvo); + outString(data, buffer); + + pSurfaceEvo->procFsFlag = TRUE; + } else { + pSurfaceEvo->procFsFlag = FALSE; + } + } + } + } + } +} + +static const char *SwapGroupPerEyeStereoString(const NvU32 request) +{ + const NvU32 value = + DRF_VAL(KMS, _DEFERRED_REQUEST, + _SWAP_GROUP_READY_PER_EYE_STEREO, request); + + switch (value) { + + case NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_PAIR: + return "PerPair"; + case NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_EYE: + return "PerEye"; + } + + return "Unknown"; +} + +static void ProcFsPrintOneDeferredRequestFifo( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString, + const NVDeferredRequestFifoRec *pDeferredRequestFifo, + const struct NvKmsPerOpen *pOpen, + const struct NvKmsPerOpenDev *pOpenDev, + const NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle) +{ + NVEvoInfoStringRec infoString; + + const struct NvKmsDeferredRequestFifo *fifo = pDeferredRequestFifo->fifo; + NvU32 i, prevI; + + nvInitInfoString(&infoString, buffer, size); + + nvEvoLogInfoString(&infoString, + "pDeferredRequestFifo : %p", pDeferredRequestFifo); + + nvEvoLogInfoString(&infoString, + " Client (pOpen) : %p", pOpen); + + nvEvoLogInfoString(&infoString, + " pOpenDev : %p", pOpenDev); + + nvEvoLogInfoString(&infoString, + " pSurfaceEvo : %p", pDeferredRequestFifo->pSurfaceEvo); + + nvEvoLogInfoString(&infoString, + " NvKms...RequestFifoHandle : %d", deferredRequestFifoHandle); + + if (pDeferredRequestFifo->swapGroup.pSwapGroup != NULL) { + + nvEvoLogInfoString(&infoString, + " swapGroup :"); + nvEvoLogInfoString(&infoString, + " pSwapGroup : %p", + pDeferredRequestFifo->swapGroup.pSwapGroup); + nvEvoLogInfoString(&infoString, + " pOpenUnicastEvent : %p", + pDeferredRequestFifo->swapGroup.pOpenUnicastEvent); + nvEvoLogInfoString(&infoString, + " ready : %d", + pDeferredRequestFifo->swapGroup.ready); + nvEvoLogInfoString(&infoString, + " semaphoreIndex : 0x%02x", + pDeferredRequestFifo->swapGroup.semaphoreIndex); + } + + nvEvoLogInfoString(&infoString, + " put : %d", fifo->put); + + nvEvoLogInfoString(&infoString, + " get : %d", fifo->get); + + outString(data, buffer); + + for (i = 0; i < ARRAY_LEN(fifo->request); i++) { + + const NvU32 request = fifo->request[i]; + const NvU32 opcode = DRF_VAL(KMS, _DEFERRED_REQUEST, _OPCODE, request); + const NvU32 semaphoreIndex = + DRF_VAL(KMS, _DEFERRED_REQUEST, _SEMAPHORE_INDEX, request); + + switch (opcode) { + + case NVKMS_DEFERRED_REQUEST_OPCODE_NOP: + break; + + case NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY: + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + " request[0x%02x] : " + "opcode:SWAP_GROUP_READY, semaphoreIndex:0x%02x, " + "perEyeStereo:%s", + i, semaphoreIndex, + SwapGroupPerEyeStereoString(request)); + outString(data, buffer); + break; + + default: + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + " request[0x%02x] : opcode:INVALID", i); + outString(data, buffer); + break; + } + } + + /* + * Print the fifo->semaphore[] array, but collapse multiple lines with + * duplicate values. + * + * To collapse duplicates, loop over all semaphore[] elements. If the + * current element is the same as semaphore[prev], continue. If they + * differ, print the value in semaphore[prev .. i-1], and update prev. + */ + prevI = 0; + + for (i = 1; i <= ARRAY_LEN(fifo->semaphore); i++) { + + const NvU32 prevValue = fifo->semaphore[prevI].data[0]; + + if (i != ARRAY_LEN(fifo->semaphore)) { + const NvU32 currentValue = fifo->semaphore[i].data[0]; + + /* + * If the value in this element matches the previous element, don't + * print anything, yet. + */ + if (currentValue == prevValue) { + continue; + } + } + + nvInitInfoString(&infoString, buffer, size); + + if (prevI == (i - 1)) { + nvEvoLogInfoString(&infoString, + " semaphore[0x%02x] : 0x%08x", + prevI, prevValue); + } else { + nvEvoLogInfoString(&infoString, + " semaphore[0x%02x..0x%02x] : 0x%08x", + prevI, i - 1, prevValue); + } + + outString(data, buffer); + + prevI = i; + } + + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, ""); + outString(data, buffer); +} + +static void +ProcFsPrintDeferredRequestFifos( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle devHandle; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES( + &pOpen->ioctl.devHandles, + pOpenDev, devHandle) { + + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsGenericHandle fifoHandle; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES( + &pOpenDev->deferredRequestFifoHandles, + pDeferredRequestFifo, fifoHandle) { + + ProcFsPrintOneDeferredRequestFifo( + data, buffer, size, outString, + pDeferredRequestFifo, + pOpen, + pOpenDev, + fifoHandle); + } + } + } +} + +static void +ProcFsPrintDpyCrcs( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + NVDevEvoPtr pDevEvo; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex, head; + NVEvoInfoStringRec infoString; + + FOR_ALL_EVO_DEVS(pDevEvo) { + + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + "pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId, pDevEvo); + outString(data, buffer); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + " pDispEvo (dispIndex:%02d) : %p", + dispIndex, pDispEvo); + outString(data, buffer); + + for (head = 0; head < pDevEvo->numHeads; head++) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + struct NvKmsDpyCRC32 compCrc; + struct NvKmsDpyCRC32 rgCrc; + struct NvKmsDpyCRC32 outputCrc; + CRC32NotifierCrcOut crcOut; + crcOut.compositorCrc32 = &compCrc; + crcOut.rasterGeneratorCrc32 = &rgCrc; + crcOut.outputCrc32 = &outputCrc; + + if (pHeadState->pConnectorEvo == NULL) { + continue; + } + + nvInitInfoString(&infoString, buffer, size); + if (nvReadCRC32Evo(pDispEvo, head, &crcOut)) { + nvEvoLogInfoString(&infoString, + " head %d :", + head); + if (compCrc.supported) { + nvEvoLogInfoString(&infoString, + " compositor CRC : 0x%08x", + compCrc.value); + } else { + nvEvoLogInfoString(&infoString, + " compositor CRC : unsupported"); + } + if (rgCrc.supported) { + nvEvoLogInfoString(&infoString, + " raster generator CRC : 0x%08x", + rgCrc.value); + } else { + nvEvoLogInfoString(&infoString, + " raster generator CRC : unsupported"); + } + if (outputCrc.supported) { + nvEvoLogInfoString(&infoString, + " output CRC : 0x%08x", + outputCrc.value); + } else { + nvEvoLogInfoString(&infoString, + " output CRC : unsupported"); + } + } else { + nvEvoLogInfoString(&infoString, + " head %d : error", + head); + } + outString(data, buffer); + } + } + } +} + +#endif /* NVKMS_PROCFS_ENABLE */ + +void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles) +{ +#if NVKMS_PROCFS_ENABLE + static const nvkms_procfs_file_t procFiles[] = { + { "clients", ProcFsPrintClients }, + { "surfaces", ProcFsPrintSurfaces }, + { "deferred-request-fifos", ProcFsPrintDeferredRequestFifos }, + { "crcs", ProcFsPrintDpyCrcs }, + { NULL, NULL }, + }; + + *ppProcFiles = procFiles; +#else + *ppProcFiles = NULL; +#endif +} + +static void FreeGlobalState(void) +{ + nvKmsClose(nvEvoGlobal.nvKmsPerOpen); + nvEvoGlobal.nvKmsPerOpen = NULL; + + if (nvEvoGlobal.clientHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle); + nvEvoGlobal.clientHandle = 0; + } +} + +NvBool nvKmsModuleLoad(void) +{ + NvU32 ret = NVOS_STATUS_ERROR_GENERIC; + + nvEvoLog(EVO_LOG_INFO, "Loading %s", pNV_KMS_ID); + + ret = nvRmApiAlloc(NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &nvEvoGlobal.clientHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLog(EVO_LOG_ERROR, "Failed to initialize client"); + goto fail; + } + + nvEvoGlobal.nvKmsPerOpen = nvKmsOpen(0, NVKMS_CLIENT_KERNEL_SPACE, NULL); + if (!nvEvoGlobal.nvKmsPerOpen) { + nvEvoLog(EVO_LOG_ERROR, "Failed to initialize internal modeset client"); + goto fail; + } + + if (!AssignNvKmsPerOpenType(nvEvoGlobal.nvKmsPerOpen, + NvKmsPerOpenTypeIoctl, FALSE)) { + goto fail; + } + + return TRUE; +fail: + FreeGlobalState(); + + return FALSE; +} + + +void nvKmsModuleUnload(void) +{ + FreeGlobalState(); + + nvAssert(nvListIsEmpty(&nvEvoGlobal.frameLockList)); + nvAssert(nvListIsEmpty(&nvEvoGlobal.devList)); +#if defined(DEBUG) + nvReportUnfreedAllocations(); +#endif + nvEvoLog(EVO_LOG_INFO, "Unloading"); +} + + +static void SendEvent(struct NvKmsPerOpen *pOpen, + const struct NvKmsEvent *pEvent) +{ + struct NvKmsPerOpenEventListEntry *pEntry = nvAlloc(sizeof(*pEntry)); + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pEntry == NULL) { + return; + } + + pEntry->event = *pEvent; + nvListAppend(&pEntry->eventListEntry, &pOpen->ioctl.eventList); + + nvkms_event_queue_changed(pOpen->pOpenKernel, TRUE); +} + +static void ConsoleRestoreTimerFired(void *dataPtr, NvU32 dataU32) +{ + NVDevEvoPtr pDevEvo = dataPtr; + + if (pDevEvo->modesetOwner == NULL && pDevEvo->handleConsoleHotplugs) { + pDevEvo->skipConsoleRestore = FALSE; + nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */); + } +} + +/*! + * Generate a dpy event. + * + * \param[in] pDpyEvo The dpy for which the event should be generated. + * \param[in] eventType The NVKMS_EVENT_TYPE_ + * \param[in] attribute The NvKmsDpyAttribute; only used for + * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED. + * \param[in] NvS64 The NvKmsDpyAttribute value; only used for + * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED. + */ +static void SendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, + const NvU32 eventType, + const enum NvKmsDpyAttribute attribute, + const NvS64 value) +{ + struct NvKmsPerOpen *pOpen; + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsEvent event = { 0 }; + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + + if (!DispEvoToDevAndDispHandles(pOpen, pDispEvo, + &deviceHandle, &dispHandle)) { + continue; + } + + if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { + continue; + } + + event.eventType = eventType; + + switch (eventType) { + + case NVKMS_EVENT_TYPE_DPY_CHANGED: + event.u.dpyChanged.deviceHandle = deviceHandle; + event.u.dpyChanged.dispHandle = dispHandle; + event.u.dpyChanged.dpyId = pDpyEvo->id; + break; + + case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED: + event.u.dynamicDpyConnected.deviceHandle = deviceHandle; + event.u.dynamicDpyConnected.dispHandle = dispHandle; + event.u.dynamicDpyConnected.dpyId = pDpyEvo->id; + break; + + case NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED: + event.u.dynamicDpyDisconnected.deviceHandle = deviceHandle; + event.u.dynamicDpyDisconnected.dispHandle = dispHandle; + event.u.dynamicDpyDisconnected.dpyId = pDpyEvo->id; + break; + + case NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED: + event.u.dpyAttributeChanged.deviceHandle = deviceHandle; + event.u.dpyAttributeChanged.dispHandle = dispHandle; + event.u.dpyAttributeChanged.dpyId = pDpyEvo->id; + event.u.dpyAttributeChanged.attribute = attribute; + event.u.dpyAttributeChanged.value = value; + break; + + default: + nvAssert(!"Bad eventType"); + return; + } + + SendEvent(pOpen, &event); + } + + if (eventType == NVKMS_EVENT_TYPE_DPY_CHANGED) { + NVDevEvoPtr pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + + if (pDevEvo->modesetOwner == NULL && pDevEvo->handleConsoleHotplugs) { + nvkms_free_timer(pDevEvo->consoleRestoreTimer); + pDevEvo->consoleRestoreTimer = + nvkms_alloc_timer(ConsoleRestoreTimerFired, pDevEvo, 0, 500); + } + } +} + +void nvSendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, const NvU32 eventType) +{ + nvAssert(eventType != NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED); + SendDpyEventEvo(pDpyEvo, eventType, + 0 /* attribute (unused) */, + 0 /* value (unused) */ ); +} + +void nvSendDpyAttributeChangedEventEvo(const NVDpyEvoRec *pDpyEvo, + const enum NvKmsDpyAttribute attribute, + const NvS64 value) +{ + SendDpyEventEvo(pDpyEvo, + NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED, + attribute, value); +} + +void nvSendFrameLockAttributeChangedEventEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + const enum NvKmsFrameLockAttribute attribute, + const NvS64 value) +{ + struct NvKmsPerOpen *pOpen; + const NvU32 eventType = NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsEvent event = { 0 }; + NvKmsFrameLockHandle frameLockHandle; + + if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { + continue; + } + + if (!FrameLockEvoToFrameLockHandle(pOpen, pFrameLockEvo, + &frameLockHandle)) { + continue; + } + + event.eventType = eventType; + event.u.frameLockAttributeChanged.frameLockHandle = frameLockHandle; + event.u.frameLockAttributeChanged.attribute = attribute; + event.u.frameLockAttributeChanged.value = value; + + SendEvent(pOpen, &event); + } +} + + +void nvSendFlipOccurredEventEvo( + const NVDevEvoRec *pDevEvo, + NVEvoChannelMask channelMask) +{ + struct NvKmsPerOpen *pOpen; + const NvU32 eventType = NVKMS_EVENT_TYPE_FLIP_OCCURRED; + const NvU32 dispIndex = 0; /* XXX NVKMS TODO: need disp-scope in event */ + const NVDispEvoRec *pDispEvo = pDevEvo->pDispEvo[dispIndex]; + NvU32 head, layer; + + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(channelMask) == 1); + + for (head = 0; head < pDevEvo->numHeads; head++) { + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pDevEvo->head[head].layer[layer]->channelMask == channelMask) { + break; + } + } + + if (layer < pDevEvo->head[head].numLayers) { + break; + } + } + + if (head >= pDevEvo->numHeads) { + nvAssert(!"Bad channelMask"); + return; + } + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsEvent event = { 0 }; + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + + struct NvKmsPerOpenDev *pOpenDev; + const struct NvKmsFlipPermissions *pFlipPermissions; + + pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + + if (pOpenDev == NULL) { + continue; + } + + if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { + continue; + } + + pFlipPermissions = &pOpenDev->flipPermissions; + + if ((pFlipPermissions->disp[dispIndex].head[head].layerMask & + NVBIT(layer)) == 0x0) { + continue; + } + + if (!DispEvoToDevAndDispHandles(pOpen, pDispEvo, + &deviceHandle, &dispHandle)) { + continue; + } + + event.eventType = eventType; + event.u.flipOccurred.deviceHandle = deviceHandle; + event.u.flipOccurred.dispHandle = dispHandle; + event.u.flipOccurred.head = head; + event.u.flipOccurred.layer = layer; + + SendEvent(pOpen, &event); + } +} + +void nvSendUnicastEvent(struct NvKmsPerOpen *pOpen) +{ + if (pOpen == NULL) { + return; + } + + nvAssert(pOpen->type == NvKmsPerOpenTypeUnicastEvent); + nvAssert(pOpen->unicastEvent.type != NvKmsUnicastEventTypeUndefined); + + nvkms_event_queue_changed(pOpen->pOpenKernel, TRUE); +} + +void nvRemoveUnicastEvent(struct NvKmsPerOpen *pOpen) +{ + NVDeferredRequestFifoPtr pDeferredRequestFifo; + + if (pOpen == NULL) { + return; + } + + nvAssert(pOpen->type == NvKmsPerOpenTypeUnicastEvent); + + switch(pOpen->unicastEvent.type) + { + case NvKmsUnicastEventTypeDeferredRequest: + pDeferredRequestFifo = + pOpen->unicastEvent.e.deferred.pDeferredRequestFifo; + + pDeferredRequestFifo->swapGroup.pOpenUnicastEvent = NULL; + pOpen->unicastEvent.e.deferred.pDeferredRequestFifo = NULL; + break; + default: + nvAssert("Invalid Unicast Event Type!"); + break; + } + + pOpen->unicastEvent.type = NvKmsUnicastEventTypeUndefined; +} + +static void AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pSurfaceEvo; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pSurfaceEvo, surfaceHandle) { + + NvU8 planeIndex; + + if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + continue; + } + + if (!pSurfaceEvo->requireCtxDma) { + nvAssert(pSurfaceEvo->planes[0].ctxDma == 0); + continue; + } + + /* + * Orphan surfaces should not get this far: they should + * fail the owner check above. + */ + nvAssert(pSurfaceEvo->rmRefCnt > 0); + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + + pSurfaceEvo->planes[planeIndex].ctxDma = + nvRmEvoAllocateAndBindDispContextDMA( + pDevEvo, + pSurfaceEvo->planes[planeIndex].rmHandle, + pSurfaceEvo->layout, + pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes - 1); + if (!pSurfaceEvo->planes[planeIndex].ctxDma) { + FreeSurfaceCtxDmasForAllOpens(pDevEvo); + nvAssert(!"Failed to re-allocate surface ctx dma"); + return; + } + } + } + } +} + + +static void FreeSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pSurfaceEvo; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pSurfaceEvo, surfaceHandle) { + + NvU8 planeIndex; + + if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + continue; + } + + /* + * Orphan surfaces should not get this far: they should + * fail the owner check above. + */ + nvAssert(pSurfaceEvo->rmRefCnt > 0); + + if (!pSurfaceEvo->requireCtxDma) { + nvAssert(pSurfaceEvo->planes[0].ctxDma == 0); + continue; + } + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + nvRmEvoFreeDispContextDMA( + pDevEvo, + &pSurfaceEvo->planes[planeIndex].ctxDma); + } + } + } +} + +#if defined(DEBUG) +NvBool nvSurfaceEvoInAnyOpens(const NVSurfaceEvoRec *pSurfaceEvo) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { + + if (pOpen->type == NvKmsPerOpenTypeIoctl) { + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + + NvKmsGenericHandle surfaceHandleUnused; + NVSurfaceEvoPtr pSurfaceEvoTmp; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pSurfaceEvoTmp, + surfaceHandleUnused) { + if (pSurfaceEvoTmp == pSurfaceEvo) { + return TRUE; + } + } + } + } else if ((pOpen->type == NvKmsPerOpenTypeGrantSurface) && + (pOpen->grantSurface.pSurfaceEvo == pSurfaceEvo)) { + return TRUE; + } + } + + return FALSE; +} +#endif + +NVDevEvoPtr nvGetDevEvoFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev) +{ + nvAssert(pOpenDev != NULL); + return pOpenDev->pDevEvo; +} + +const struct NvKmsFlipPermissions *nvGetFlipPermissionsFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev) +{ + nvAssert(pOpenDev != NULL); + return &pOpenDev->flipPermissions; +} + +const struct NvKmsModesetPermissions *nvGetModesetPermissionsFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev) +{ + nvAssert(pOpenDev != NULL); + return &pOpenDev->modesetPermissions; +} + +NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDev( + struct NvKmsPerOpenDev *pOpenDev) +{ + if (pOpenDev == NULL) { + return NULL; + } + + return &pOpenDev->surfaceHandles; +} + +const NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDevConst( + const struct NvKmsPerOpenDev *pOpenDev) +{ + if (pOpenDev == NULL) { + return NULL; + } + + return &pOpenDev->surfaceHandles; +} + +static int suspendCounter = 0; + +/* + * Suspend NVKMS. + * + * This function is called by RM once per GPU, but NVKMS just counts the number + * of suspend calls so that it can deallocate the core channels on the first + * call to suspend(), and reallocate them on the last call to resume(). + */ +void nvKmsSuspend(NvU32 gpuId) +{ + if (suspendCounter == 0) { + NVDevEvoPtr pDevEvo; + + FOR_ALL_EVO_DEVS(pDevEvo) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Suspending"); + + /* + * Shut down all heads and skip console restore. + * + * This works around an RM bug where it fails to train DisplayPort + * links during resume if the system was suspended while heads were + * active. + * + * XXX TODO bug 1850734: In addition to fixing the above + * RM bug, NVKMS should clear pDispEvo head and connector state + * that becomes stale after suspend. Shutting the heads down here + * clears the relevant state explicitly. + */ + nvShutDownHeads(pDevEvo, + NULL /* pTestFunc, shut down all heads */); + pDevEvo->skipConsoleRestore = TRUE; + + FreeSurfaceCtxDmasForAllOpens(pDevEvo); + + nvFreeCoreChannelEvo(pDevEvo); + } + } + + suspendCounter++; +} + +void nvKmsResume(NvU32 gpuId) +{ + suspendCounter--; + + if (suspendCounter == 0) { + NVDevEvoPtr pDevEvo; + + FOR_ALL_EVO_DEVS(pDevEvo) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Resuming"); + + nvRestoreSORAssigmentsEvo(pDevEvo); + + ReallocCoreChannel(pDevEvo); + + if (pDevEvo->modesetOwner == NULL) { + // Hardware state was lost, so we need to force a console + // restore. + pDevEvo->skipConsoleRestore = TRUE; + RestoreConsole(pDevEvo); + } + } + } +} + +static void ServiceOneDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVDeferredRequestFifoRec *pDeferredRequestFifo) +{ + struct NvKmsDeferredRequestFifo *fifo = pDeferredRequestFifo->fifo; + NvU32 get, put; + + nvAssert(fifo != NULL); + + get = fifo->get; + put = fifo->put; + + if (put == get) { + return; + } + + if ((get >= ARRAY_LEN(fifo->request)) || + (put >= ARRAY_LEN(fifo->request))) { + return; + } + + while (get != put) { + + const NvU32 request = fifo->request[get]; + const NvU32 opcode = + DRF_VAL(KMS, _DEFERRED_REQUEST, _OPCODE, request); + + switch (opcode) { + + case NVKMS_DEFERRED_REQUEST_OPCODE_NOP: + break; + + default: + nvAssert(!"Invalid NVKMS deferred request opcode"); + break; + } + + get = (get + 1) % ARRAY_LEN(fifo->request); + } + + fifo->get = put; +} + +/*! + * Respond to a non-stall interrupt. + */ +void nvKmsServiceNonStallInterrupt(void *dataPtr, NvU32 dataU32) +{ + NVDevEvoPtr pDevEvo = dataPtr; + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsGenericHandle handle; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES( + &pOpenDev->deferredRequestFifoHandles, + pDeferredRequestFifo, + handle) { + + ServiceOneDeferredRequestFifo(pDevEvo, pDeferredRequestFifo); + } + } + +} + +NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness) +{ + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NV_STATUS status = NV_ERR_INVALID_STATE; + NVDispEvoRec *pDispEvo = drv_priv; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = display_id; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + if (status == NV_OK) { + *brightness = params.brightness; + } + + return status == NV_OK; +} + +NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness) +{ + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NV_STATUS status = NV_ERR_INVALID_STATE; + NVDispEvoRec *pDispEvo = drv_priv; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = display_id; + params.brightness = brightness; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + return status == NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/srcs.mk b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/srcs.mk new file mode 100644 index 0000000..8754b44 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/srcs.mk @@ -0,0 +1,181 @@ +SRCS ?= +SRCS_CXX ?= + +SRCS += ../common/shared/nvstatus/nvstatus.c +SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c +SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c +SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c +SRCS += ../common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c +SRCS += ../common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c +SRCS += ../common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c +SRCS += ../common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c +SRCS += ../common/softfloat/source/8086-SSE/softfloat_raiseFlags.c +SRCS += ../common/softfloat/source/f32_add.c +SRCS += ../common/softfloat/source/f32_div.c +SRCS += ../common/softfloat/source/f32_eq.c +SRCS += ../common/softfloat/source/f32_eq_signaling.c +SRCS += ../common/softfloat/source/f32_isSignalingNaN.c +SRCS += ../common/softfloat/source/f32_le.c +SRCS += ../common/softfloat/source/f32_le_quiet.c +SRCS += ../common/softfloat/source/f32_lt.c +SRCS += ../common/softfloat/source/f32_lt_quiet.c +SRCS += ../common/softfloat/source/f32_mul.c +SRCS += ../common/softfloat/source/f32_mulAdd.c +SRCS += ../common/softfloat/source/f32_rem.c +SRCS += ../common/softfloat/source/f32_roundToInt.c +SRCS += ../common/softfloat/source/f32_sqrt.c +SRCS += ../common/softfloat/source/f32_sub.c +SRCS += ../common/softfloat/source/f32_to_f16.c +SRCS += ../common/softfloat/source/f32_to_f64.c +SRCS += ../common/softfloat/source/f32_to_i32.c +SRCS += ../common/softfloat/source/f32_to_i32_r_minMag.c +SRCS += ../common/softfloat/source/f32_to_i64.c +SRCS += ../common/softfloat/source/f32_to_i64_r_minMag.c +SRCS += ../common/softfloat/source/f32_to_ui32.c +SRCS += ../common/softfloat/source/f32_to_ui32_r_minMag.c +SRCS += ../common/softfloat/source/f32_to_ui64.c +SRCS += ../common/softfloat/source/f32_to_ui64_r_minMag.c +SRCS += ../common/softfloat/source/f64_add.c +SRCS += ../common/softfloat/source/f64_div.c +SRCS += ../common/softfloat/source/f64_eq.c +SRCS += ../common/softfloat/source/f64_eq_signaling.c +SRCS += ../common/softfloat/source/f64_isSignalingNaN.c +SRCS += ../common/softfloat/source/f64_le.c +SRCS += ../common/softfloat/source/f64_le_quiet.c +SRCS += ../common/softfloat/source/f64_lt.c +SRCS += ../common/softfloat/source/f64_lt_quiet.c +SRCS += ../common/softfloat/source/f64_mul.c +SRCS += ../common/softfloat/source/f64_mulAdd.c +SRCS += ../common/softfloat/source/f64_rem.c +SRCS += ../common/softfloat/source/f64_roundToInt.c +SRCS += ../common/softfloat/source/f64_sqrt.c +SRCS += ../common/softfloat/source/f64_sub.c +SRCS += ../common/softfloat/source/f64_to_f32.c +SRCS += ../common/softfloat/source/f64_to_i32.c +SRCS += ../common/softfloat/source/f64_to_i32_r_minMag.c +SRCS += ../common/softfloat/source/f64_to_i64.c +SRCS += ../common/softfloat/source/f64_to_i64_r_minMag.c +SRCS += ../common/softfloat/source/f64_to_ui32.c +SRCS += ../common/softfloat/source/f64_to_ui32_r_minMag.c +SRCS += ../common/softfloat/source/f64_to_ui64.c +SRCS += ../common/softfloat/source/f64_to_ui64_r_minMag.c +SRCS += ../common/softfloat/source/i32_to_f32.c +SRCS += ../common/softfloat/source/i32_to_f64.c +SRCS += ../common/softfloat/source/i64_to_f32.c +SRCS += ../common/softfloat/source/i64_to_f64.c +SRCS += ../common/softfloat/source/s_addMagsF32.c +SRCS += ../common/softfloat/source/s_addMagsF64.c +SRCS += ../common/softfloat/source/s_approxRecipSqrt32_1.c +SRCS += ../common/softfloat/source/s_approxRecipSqrt_1Ks.c +SRCS += ../common/softfloat/source/s_countLeadingZeros64.c +SRCS += ../common/softfloat/source/s_countLeadingZeros8.c +SRCS += ../common/softfloat/source/s_mul64To128.c +SRCS += ../common/softfloat/source/s_mulAddF32.c +SRCS += ../common/softfloat/source/s_mulAddF64.c +SRCS += ../common/softfloat/source/s_normRoundPackToF32.c +SRCS += ../common/softfloat/source/s_normRoundPackToF64.c +SRCS += ../common/softfloat/source/s_normSubnormalF32Sig.c +SRCS += ../common/softfloat/source/s_normSubnormalF64Sig.c +SRCS += ../common/softfloat/source/s_roundPackToF16.c +SRCS += ../common/softfloat/source/s_roundPackToF32.c +SRCS += ../common/softfloat/source/s_roundPackToF64.c +SRCS += ../common/softfloat/source/s_roundToI32.c +SRCS += ../common/softfloat/source/s_roundToI64.c +SRCS += ../common/softfloat/source/s_roundToUI32.c +SRCS += ../common/softfloat/source/s_roundToUI64.c +SRCS += ../common/softfloat/source/s_shiftRightJam128.c +SRCS += ../common/softfloat/source/s_subMagsF32.c +SRCS += ../common/softfloat/source/s_subMagsF64.c +SRCS += ../common/softfloat/source/softfloat_state.c +SRCS += ../common/softfloat/source/ui32_to_f32.c +SRCS += ../common/softfloat/source/ui32_to_f64.c +SRCS += ../common/softfloat/source/ui64_to_f32.c +SRCS += ../common/softfloat/source/ui64_to_f64.c +SRCS_CXX += ../common/displayport/src/dp_auxretry.cpp +SRCS_CXX += ../common/displayport/src/dp_bitstream.cpp +SRCS_CXX += ../common/displayport/src/dp_buffer.cpp +SRCS_CXX += ../common/displayport/src/dp_configcaps.cpp +SRCS_CXX += ../common/displayport/src/dp_connectorimpl.cpp +SRCS_CXX += ../common/displayport/src/dp_crc.cpp +SRCS_CXX += ../common/displayport/src/dp_deviceimpl.cpp +SRCS_CXX += ../common/displayport/src/dp_discovery.cpp +SRCS_CXX += ../common/displayport/src/dp_edid.cpp +SRCS_CXX += ../common/displayport/src/dp_evoadapter.cpp +SRCS_CXX += ../common/displayport/src/dp_groupimpl.cpp +SRCS_CXX += ../common/displayport/src/dp_guid.cpp +SRCS_CXX += ../common/displayport/src/dp_list.cpp +SRCS_CXX += ../common/displayport/src/dp_merger.cpp +SRCS_CXX += ../common/displayport/src/dp_messagecodings.cpp +SRCS_CXX += ../common/displayport/src/dp_messageheader.cpp +SRCS_CXX += ../common/displayport/src/dp_messages.cpp +SRCS_CXX += ../common/displayport/src/dp_mst_edid.cpp +SRCS_CXX += ../common/displayport/src/dp_splitter.cpp +SRCS_CXX += ../common/displayport/src/dp_sst_edid.cpp +SRCS_CXX += ../common/displayport/src/dp_timer.cpp +SRCS_CXX += ../common/displayport/src/dp_vrr.cpp +SRCS_CXX += ../common/displayport/src/dp_wardatabase.cpp +SRCS_CXX += ../common/displayport/src/dp_watermark.cpp +SRCS_CXX += ../common/displayport/src/dptestutil/dp_testmessage.cpp +SRCS += ../common/modeset/hdmipacket/nvhdmipkt.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_0073.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9171.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9271.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9471.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9571.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_C371.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_C671.c +SRCS += ../common/modeset/timing/nvt_cvt.c +SRCS += ../common/modeset/timing/nvt_displayid20.c +SRCS += ../common/modeset/timing/nvt_dmt.c +SRCS += ../common/modeset/timing/nvt_dsc_pps.c +SRCS += ../common/modeset/timing/nvt_edid.c +SRCS += ../common/modeset/timing/nvt_edidext_861.c +SRCS += ../common/modeset/timing/nvt_edidext_displayid.c +SRCS += ../common/modeset/timing/nvt_edidext_displayid20.c +SRCS += ../common/modeset/timing/nvt_gtf.c +SRCS += ../common/modeset/timing/nvt_tv.c +SRCS += ../common/modeset/timing/nvt_util.c +SRCS += ../common/unix/common/utils/nv_memory_tracker.c +SRCS += ../common/unix/common/utils/nv_mode_timings_utils.c +SRCS += ../common/unix/common/utils/nv_vasprintf.c +SRCS += ../common/unix/common/utils/unix_rm_handle.c +SRCS += kapi/src/nvkms-kapi-channelevent.c +SRCS += kapi/src/nvkms-kapi-notifiers.c +SRCS += kapi/src/nvkms-kapi.c +SRCS += lib/nvkms-format.c +SRCS += lib/nvkms-sync.c +SRCS_CXX += src/dp/nvdp-connector-event-sink.cpp +SRCS_CXX += src/dp/nvdp-connector.cpp +SRCS_CXX += src/dp/nvdp-device.cpp +SRCS_CXX += src/dp/nvdp-evo-interface.cpp +SRCS_CXX += src/dp/nvdp-host.cpp +SRCS_CXX += src/dp/nvdp-timer.cpp +SRCS += src/g_nvkms-evo-states.c +SRCS += src/nvkms-3dvision.c +SRCS += src/nvkms-attributes.c +SRCS += src/nvkms-console-restore.c +SRCS += src/nvkms-cursor.c +SRCS += src/nvkms-cursor2.c +SRCS += src/nvkms-cursor3.c +SRCS += src/nvkms-dma.c +SRCS += src/nvkms-dpy.c +SRCS += src/nvkms-event.c +SRCS += src/nvkms-evo.c +SRCS += src/nvkms-evo1.c +SRCS += src/nvkms-evo2.c +SRCS += src/nvkms-evo3.c +SRCS += src/nvkms-flip.c +SRCS += src/nvkms-framelock.c +SRCS += src/nvkms-hal.c +SRCS += src/nvkms-hdmi.c +SRCS += src/nvkms-hw-states.c +SRCS += src/nvkms-lut.c +SRCS += src/nvkms-modepool.c +SRCS += src/nvkms-modeset.c +SRCS += src/nvkms-prealloc.c +SRCS += src/nvkms-rm.c +SRCS += src/nvkms-rmapi-dgpu.c +SRCS += src/nvkms-surface.c +SRCS += src/nvkms-utils.c +SRCS += src/nvkms-vrr.c +SRCS += src/nvkms.c diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/Makefile b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/Makefile new file mode 100644 index 0000000..88945c1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/Makefile @@ -0,0 +1,179 @@ +########################################################################### +# Makefile for nv-kernel.o +########################################################################### + +NV_MODULE_LOGGING_NAME ?= nvidia + +VERSION_MK_DIR = ../../ + +include ../../utils.mk + +include srcs.mk + +# The source files for nv-kernel.o are all SRCS and SRCS_CXX defined in srcs.mk, +# and the NVIDIA ID string +ALL_SRCS = $(SRCS) $(SRCS_CXX) +ALL_SRCS += $(NVIDSTRING) + +SRC_COMMON = ../common +CONDITIONAL_CFLAGS := + +CFLAGS += -include $(SRC_COMMON)/sdk/nvidia/inc/cpuopsys.h + +CFLAGS += -I kernel/inc +CFLAGS += -I interface +CFLAGS += -I $(SRC_COMMON)/sdk/nvidia/inc +CFLAGS += -I arch/nvalloc/common/inc +CFLAGS += -I arch/nvalloc/common/inc/deprecated +CFLAGS += -I arch/nvalloc/unix/include +CFLAGS += -I inc +CFLAGS += -I inc/os +CFLAGS += -I $(SRC_COMMON)/shared/inc +CFLAGS += -I $(SRC_COMMON)/shared/msgq/inc +CFLAGS += -I $(SRC_COMMON)/inc + +CFLAGS += -I $(SRC_COMMON)/inc/swref/published + +CFLAGS += -I generated +CFLAGS += -I $(SRC_COMMON)/nvswitch/kernel/inc +CFLAGS += -I $(SRC_COMMON)/nvswitch/interface +CFLAGS += -I $(SRC_COMMON)/nvswitch/common/inc/ +CFLAGS += -I $(SRC_COMMON)/inc/displayport +CFLAGS += -I $(SRC_COMMON)/nvlink/interface/ +CFLAGS += -I src/mm/uvm/interface +CFLAGS += -I $(SRC_COMMON)/cyclestats +CFLAGS += -I inc/libraries +CFLAGS += -I src/libraries +CFLAGS += -I inc/kernel +CFLAGS += -I inc/physical + +# XXX TODO: review which of these we need for the build + +CFLAGS += -Werror-implicit-function-declaration +CFLAGS += -Wwrite-strings +#CFLAGS += -Wformat +#CFLAGS += -Wreturn-type +#CFLAGS += -Wswitch +#CFLAGS += -Wno-multichar +#CFLAGS += -Wno-unused-local-typedefs +#CFLAGS += -Wchar-subscripts +#CFLAGS += -Wparentheses +#CFLAGS += -Wpointer-arith +#CFLAGS += -Wstack-usage=3584 +#CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -Wformat-overflow=1) +#CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -Wformat-truncation=0) +CFLAGS += -fno-common +CFLAGS += -ffreestanding +CFLAGS += -fno-stack-protector + +ifeq ($(TARGET_ARCH),x86_64) + CFLAGS += -msoft-float + CFLAGS += -mno-red-zone + CFLAGS += -mcmodel=kernel + CFLAGS += -mno-mmx + CFLAGS += -mno-sse + CFLAGS += -mno-sse2 + CFLAGS += -mno-3dnow +endif + +ifeq ($(TARGET_ARCH),aarch64) + CFLAGS += -mgeneral-regs-only + CFLAGS += -march=armv8-a + CFLAGS += -mstrict-align + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mno-outline-atomics) +endif + +#CFLAGS += -ffunction-sections +#CFLAGS += -fdata-sections +#CFLAGS += -DDEVELOP +CFLAGS += -fno-pic + +CFLAGS += -DGL_EXPERT +CFLAGS += -DNVPMAPI +CFLAGS += -DNVCONFIG_PROFILE=unix_global_internal_profile +CFLAGS += -D_LANGUAGE_C +CFLAGS += -D__NO_CTYPE +CFLAGS += -DNVRM +CFLAGS += -DLOCK_VAL_ENABLED=0 +CFLAGS += -DPORT_ATOMIC_64_BIT_SUPPORTED=1 +CFLAGS += -DPORT_IS_KERNEL_BUILD=1 +CFLAGS += -DPORT_IS_CHECKED_BUILD=1 +CFLAGS += -DPORT_MODULE_atomic=1 +CFLAGS += -DPORT_MODULE_core=1 +CFLAGS += -DPORT_MODULE_cpu=1 +CFLAGS += -DPORT_MODULE_crypto=1 +CFLAGS += -DPORT_MODULE_debug=1 +CFLAGS += -DPORT_MODULE_memory=1 +CFLAGS += -DPORT_MODULE_safe=1 +CFLAGS += -DPORT_MODULE_string=1 +CFLAGS += -DPORT_MODULE_sync=1 +CFLAGS += -DPORT_MODULE_thread=1 +CFLAGS += -DPORT_MODULE_util=1 +CFLAGS += -DPORT_MODULE_example=0 +CFLAGS += -DPORT_MODULE_mmio=0 +CFLAGS += -DPORT_MODULE_time=0 +CFLAGS += -DRS_STANDALONE=0 +CFLAGS += -DRS_STANDALONE_TEST=0 +CFLAGS += -DRS_COMPATABILITY_MODE=1 +CFLAGS += -DRS_PROVIDES_API_STATE=0 +CFLAGS += -DNV_CONTAINERS_NO_TEMPLATES + +CFLAGS += -DNV_PRINTF_STRINGS_ALLOWED=1 +CFLAGS += -DNV_ASSERT_FAILED_USES_STRINGS=1 +CFLAGS += -DPORT_ASSERT_FAILED_USES_STRINGS=1 + +ifeq ($(DEBUG),1) + CFLAGS += -gsplit-dwarf +endif + +# Define how to perform dead code elimination: place each symbol in its own +# section at compile time, and garbage collect unreachable sections at link +# time. exports_link_command.txt tells the linker which symbols need to be +# exported from nv-kernel.o so the linker can determine which symbols are +# unreachable. +CFLAGS += -ffunction-sections +CFLAGS += -fdata-sections +NV_KERNEL_O_LDFLAGS += --gc-sections +EXPORTS_LINK_COMMAND = exports_link_command.txt + +CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -fcf-protection=none) + +ifeq ($(TARGET_ARCH),x86_64) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch-register) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch=thunk-extern) +endif + +CFLAGS += $(CONDITIONAL_CFLAGS) + +CC_ONLY_CFLAGS += --std=gnu11 +CXX_ONLY_CFLAGS += --std=gnu++11 + +OBJS = $(call BUILD_OBJECT_LIST,$(ALL_SRCS)) + +# Define how to generate the NVIDIA ID string +$(eval $(call GENERATE_NVIDSTRING, \ + NVRM_ID, \ + UNIX Open Kernel Module, $(OBJS))) + +# Define how to build each object file from the corresponding source file. +$(foreach src, $(ALL_SRCS), $(eval $(call DEFINE_OBJECT_RULE,TARGET,$(src)))) + +NV_KERNEL_O = $(OUTPUTDIR)/nv-kernel.o + +.PNONY: all clean +all: $(NV_KERNEL_O) + +LINKER_SCRIPT = nv-kernel.ld + +$(NV_KERNEL_O): $(OBJS) $(EXPORTS_LINK_COMMAND) $(LINKER_SCRIPT) + $(call quiet_cmd,LD) \ + $(NV_KERNEL_O_LDFLAGS) \ + -T $(LINKER_SCRIPT) \ + -r -o $(NV_KERNEL_O) $(OBJS) @$(EXPORTS_LINK_COMMAND) + $(call quiet_cmd,OBJCOPY) \ + --localize-symbol=memset \ + --localize-symbol=memcpy \ + $@ + +clean: + $(RM) -rf $(OUTPUTDIR) diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h new file mode 100644 index 0000000..1ff7df6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file nvrangetypes.h + * @brief Range types and operator macros + * @note #include a header to define NvUxx and NvSxx before sourcing this file. + */ + +#ifndef _NVRANGETYPES_H_ +#define _NVRANGETYPES_H_ + + +// +// Define range types by convention +// +#define __NV_DEFINE_RANGE_TYPE(T) \ +typedef struct NvRange ## T \ +{ \ + Nv ## T min; \ + Nv ## T max; \ +} NvRange ## T; + + +__NV_DEFINE_RANGE_TYPE(U64) // NvRangeU64 +__NV_DEFINE_RANGE_TYPE(S64) // NvRangeS64 +__NV_DEFINE_RANGE_TYPE(U32) // NvRangeU32 +__NV_DEFINE_RANGE_TYPE(S32) // NvRangeS32 +__NV_DEFINE_RANGE_TYPE(U16) // NvRangeU16 +__NV_DEFINE_RANGE_TYPE(S16) // NvRangeS16 +__NV_DEFINE_RANGE_TYPE(U8) // NvRangeU8 +__NV_DEFINE_RANGE_TYPE(S8) // NvRangeS8 + + +// +// Operator macros +// +// Macros are named xxx_RANGE (rather than xxx_RANGEU32, etc.) since they work +// properly on ranges with any number of bits, signed or unsigned. +// + +#define NV_EQUAL_RANGE(r1, r2) ((r1).min == (r2).min && (r1).max == (r2).max) +#define NV_EMPTY_INCLUSIVE_RANGE(r) ((r).min > (r).max) +#define NV_EMPTY_EXCLUSIVE_RANGE(r) ((r).min + 1 > (r).max - 1) +#define NV_WITHIN_INCLUSIVE_RANGE(r, x) ((r).min <= (x) && (x) <= (r).max) +#define NV_WITHIN_EXCLUSIVE_RANGE(r, x) ((r).min < (x) && (x) < (r).max) +#define NV_IS_SUBSET_RANGE(r1, r2) ((r1).min >= (r2).min && (r2).max >= (r1).max) +#define NV_IS_SUPERSET_RANGE(r1, r2) ((r1).min <= (r2).min && (r2).max <= (r1).max) +#define NV_CENTER_OF_RANGE(r) ((r).min / 2 + ((r).max + 1) / 2) // Avoid overflow and rounding anomalies. +#define NV_IS_OVERLAPPING_RANGE(r1, r2) \ + (NV_WITHIN_INCLUSIVE_RANGE((r1), (r2).min) || \ + NV_WITHIN_INCLUSIVE_RANGE((r1), (r2).max)) + +#define NV_DISTANCE_FROM_RANGE(r, x) ((x) < (r).min? (r).min - (x): ((x) > (r).max? (x) - (r).max: 0)) +#define NV_VALUE_WITHIN_INCLUSIVE_RANGE(r, x) ((x) < (r).min? (r).min : ((x) > (r).max? (r).max : (x))) +#define NV_VALUE_WITHIN_EXCLUSIVE_RANGE(r, x) ((x) <= (r).min? (r).min + 1 : ((x) >= (r).max? (r).max - 1 : (x))) + +#define NV_INIT_RANGE(r, x, y) \ +do \ +{ \ + (r).min = (x); \ + (r).max = (y); \ +} while(0) + +#define NV_ASSIGN_DELTA_RANGE(r, x, d) \ +do \ +{ \ + (r).min = (x) - (d); \ + (r).max = (x) + (d); \ +} while(0) + +#define NV_ASSIGN_INTERSECTION_RANGE(r1, r2) \ +do \ +{ \ + if ((r1).min < (r2).min) \ + (r1).min = (r2).min; \ + if ((r1).max > (r2).max) \ + (r1).max = (r2).max; \ +} while(0) + +#define NV_ASSIGN_UNION_RANGE(r1, r2) \ +do \ +{ \ + if ((r1).min > (r2).min) \ + (r1).min = (r2).min; \ + if ((r1).max < (r2).max) \ + (r1).max = (r2).max; \ +} while(0) + +#define NV_MULTIPLY_RANGE(r, x) \ +do \ +{ \ + (r).min *= (x); \ + (r).max *= (x); \ +} while(0) + +#define NV_DIVIDE_FLOOR_RANGE(r, x) \ +do \ +{ \ + (r).min /= (x); \ + (r).max /= (x); \ +} while(0) + +#define NV_DIVIDE_CEILING_RANGE(r, x) \ +do \ +{ \ + (r).min = ((r).min + (x) - 1) / (x); \ + (r).max = ((r).max + (x) - 1) / (x); \ +} while(0) + +#define NV_DIVIDE_ROUND_RANGE(r, x) \ +do \ +{ \ + (r).min = ((r).min + (x) / 2) / (x); \ + (r).max = ((r).max + (x) / 2) / (x); \ +} while(0) + +#define NV_DIVIDE_WIDE_RANGE(r, x) \ +do \ +{ \ + (r).min /= (x); \ + (r).max = ((r).max + (x) - 1) / (x); \ +} while(0) + +#define NV_DIVIDE_NARROW_RANGE(r, x) \ +do \ +{ \ + (r).min = ((r).min + (x) - 1) / (x); \ + (r).max /= (x); \ +} while(0) + +#define NV_VALUE_WITHIN_INCLUSIVE_RANGE(r, x) \ + ((x) < (r).min? (r).min : ((x) > (r).max? (r).max : (x))) + +#define NV_WITHIN_INCLUSIVE_RANGE(r, x) \ + ((r).min <= (x) && (x) <= (r).max) + +#define NV_DISTANCE_FROM_RANGE(r, x) \ + ((x) < (r).min? (r).min - (x): ((x) > (r).max? (x) - (r).max: 0)) + +#endif // _NVRANGETYPES_H_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-caps.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-caps.h new file mode 100644 index 0000000..35bbf7c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-caps.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_CAPS_H_ +#define _NV_CAPS_H_ + +#include + +/* + * Opaque OS-specific struct; on Linux, this has member + * 'struct proc_dir_entry'. + */ +typedef struct nv_cap nv_cap_t; + +/* + * Creates directory named "capabilities" under the provided path. + * + * @param[in] path Absolute path + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_init(const char *path); + +/* + * Creates capability directory entry + * + * @param[in] parent_cap Parent capability directory + * @param[in] name Capability directory's name + * @param[in] mode Capability directory's access mode + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap, const char *name, int mode); + +/* + * Creates capability file entry + * + * @param[in] parent_cap Parent capability directory + * @param[in] name Capability file's name + * @param[in] mode Capability file's access mode + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap, const char *name, int mode); + +/* + * Destroys capability entry + * + * @param[in] cap Capability entry + */ +void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap); + +/* + * Validates and duplicates the provided file descriptor + * + * @param[in] cap Capability entry + * @param[in] fd File descriptor to be validated + * + * Returns duplicate fd upon success. Otherwise, returns -1. + */ +int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd); + +/* + * Closes file descriptor + * + * This function should be used to close duplicate file descriptors + * returned by nv_cap_validate_and_dup_fd. + * + * @param[in] fd File descriptor to be validated + * + */ +void NV_API_CALL nv_cap_close_fd(int fd); + +#endif /* _NV_CAPS_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h new file mode 100644 index 0000000..a8c0c0a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_GPU_INFO_H_ +#define _NV_GPU_INFO_H_ + +typedef struct { + NvU32 gpu_id; + + struct { + NvU32 domain; + NvU8 bus, slot, function; + } pci_info; + + /* + * opaque OS-specific pointer; on Linux, this is a pointer to the + * 'struct device' for the GPU. + */ + void *os_device_ptr; +} nv_gpu_info_t; + +#define NV_MAX_GPUS 32 + +#endif /* _NV_GPU_INFO_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h new file mode 100644 index 0000000..cb0b6a2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_NUMBERS_H +#define NV_IOCTL_NUMBERS_H + +/* NOTE: using an ioctl() number > 55 will overflow! */ +#define NV_IOCTL_MAGIC 'F' +#define NV_IOCTL_BASE 200 +#define NV_ESC_CARD_INFO (NV_IOCTL_BASE + 0) +#define NV_ESC_REGISTER_FD (NV_IOCTL_BASE + 1) +#define NV_ESC_ALLOC_OS_EVENT (NV_IOCTL_BASE + 6) +#define NV_ESC_FREE_OS_EVENT (NV_IOCTL_BASE + 7) +#define NV_ESC_STATUS_CODE (NV_IOCTL_BASE + 9) +#define NV_ESC_CHECK_VERSION_STR (NV_IOCTL_BASE + 10) +#define NV_ESC_IOCTL_XFER_CMD (NV_IOCTL_BASE + 11) +#define NV_ESC_ATTACH_GPUS_TO_FD (NV_IOCTL_BASE + 12) +#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13) +#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14) +#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17) + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h new file mode 100644 index 0000000..ffd1dee --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h @@ -0,0 +1,145 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_H +#define NV_IOCTL_H + +#include +#include + +typedef struct { + NvU32 domain; /* PCI domain number */ + NvU8 bus; /* PCI bus number */ + NvU8 slot; /* PCI slot number */ + NvU8 function; /* PCI function number */ + NvU16 vendor_id; /* PCI vendor ID */ + NvU16 device_id; /* PCI device ID */ +} nv_pci_info_t; + +/* + * ioctl()'s with parameter structures too large for the + * _IOC cmd layout use the nv_ioctl_xfer_t structure + * and the NV_ESC_IOCTL_XFER_CMD ioctl() to pass the actual + * size and user argument pointer into the RM, which + * will then copy it to/from kernel space in separate steps. + */ +typedef struct nv_ioctl_xfer +{ + NvU32 cmd; + NvU32 size; + NvP64 ptr NV_ALIGN_BYTES(8); +} nv_ioctl_xfer_t; + +typedef struct nv_ioctl_card_info +{ + NvBool valid; + nv_pci_info_t pci_info; /* PCI config information */ + NvU32 gpu_id; + NvU16 interrupt_line; + NvU64 reg_address NV_ALIGN_BYTES(8); + NvU64 reg_size NV_ALIGN_BYTES(8); + NvU64 fb_address NV_ALIGN_BYTES(8); + NvU64 fb_size NV_ALIGN_BYTES(8); + NvU32 minor_number; + NvU8 dev_name[10]; /* device names such as vmgfx[0-32] for vmkernel */ +} nv_ioctl_card_info_t; + +/* alloc event */ +typedef struct nv_ioctl_alloc_os_event +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 fd; + NvU32 Status; +} nv_ioctl_alloc_os_event_t; + +/* free event */ +typedef struct nv_ioctl_free_os_event +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 fd; + NvU32 Status; +} nv_ioctl_free_os_event_t; + +/* status code */ +typedef struct nv_ioctl_status_code +{ + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU32 status; +} nv_ioctl_status_code_t; + +/* check version string */ +#define NV_RM_API_VERSION_STRING_LENGTH 64 + +typedef struct nv_ioctl_rm_api_version +{ + NvU32 cmd; + NvU32 reply; + char versionString[NV_RM_API_VERSION_STRING_LENGTH]; +} nv_ioctl_rm_api_version_t; + +#define NV_RM_API_VERSION_CMD_STRICT 0 +#define NV_RM_API_VERSION_CMD_RELAXED '1' +#define NV_RM_API_VERSION_CMD_OVERRIDE '2' + +#define NV_RM_API_VERSION_REPLY_UNRECOGNIZED 0 +#define NV_RM_API_VERSION_REPLY_RECOGNIZED 1 + +typedef struct nv_ioctl_query_device_intr +{ + NvU32 intrStatus NV_ALIGN_BYTES(4); + NvU32 status; +} nv_ioctl_query_device_intr; + +/* system parameters that the kernel driver may use for configuration */ +typedef struct nv_ioctl_sys_params +{ + NvU64 memblock_size NV_ALIGN_BYTES(8); +} nv_ioctl_sys_params_t; + +typedef struct nv_ioctl_register_fd +{ + int ctl_fd; +} nv_ioctl_register_fd_t; + +#define NV_DMABUF_EXPORT_MAX_HANDLES 128 + +typedef struct nv_ioctl_export_to_dma_buf_fd +{ + int fd; + NvHandle hClient; + NvU32 totalObjects; + NvU32 numObjects; + NvU32 index; + NvU64 totalSize NV_ALIGN_BYTES(8); + NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES]; + NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8); + NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8); + NvU32 status; +} nv_ioctl_export_to_dma_buf_fd_t; + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h new file mode 100644 index 0000000..f44799b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_KERNEL_RMAPI_OPS_H_ +#define _NV_KERNEL_RMAPI_OPS_H_ + +/* + * Define the RMAPI provided to kernel-level RM clients. + * + * Kernel-level RM clients should populate nvidia_kernel_rmapi_ops_t + * by assigning nvidia_kernel_rmapi_ops_t::op and the corresponding + * parameter structure in nvidia_kernel_rmapi_ops_t's params union. + * Then, pass a pointer to the nvidia_kernel_rmapi_ops_t to + * rm_kernel_rmapi_op(). + */ + +#include "nvtypes.h" +#include "nvos.h" + +typedef struct { + NvU32 op; /* One of the NV0[14]_XXXX operations listed below. */ + + union { + NVOS00_PARAMETERS free; /* NV01_FREE */ + NVOS02_PARAMETERS allocMemory64; /* NV01_ALLOC_MEMORY */ + NVOS21_PARAMETERS alloc; /* NV04_ALLOC */ + NVOS32_PARAMETERS *pVidHeapControl; /* NV04_VID_HEAP_CONTROL */ + NVOS33_PARAMETERS mapMemory; /* NV04_MAP_MEMORY */ + NVOS34_PARAMETERS unmapMemory; /* NV04_UNMAP_MEMORY */ + NVOS39_PARAMETERS allocContextDma2; /* NV04_ALLOC_CONTEXT_DMA */ + NVOS46_PARAMETERS mapMemoryDma; /* NV04_MAP_MEMORY_DMA */ + NVOS47_PARAMETERS unmapMemoryDma; /* NV04_UNMAP_MEMORY_DMA */ + NVOS49_PARAMETERS bindContextDma; /* NV04_BIND_CONTEXT_DMA */ + NVOS54_PARAMETERS control; /* NV04_CONTROL*/ + NVOS55_PARAMETERS dupObject; /* NV04_DUP_OBJECT */ + NVOS57_PARAMETERS share; /* NV04_SHARE */ + NVOS61_PARAMETERS addVblankCallback; /* NV04_ADD_VBLANK_CALLBACK */ + } params; +} nvidia_kernel_rmapi_ops_t; + +#endif /* _NV_KERNEL_RMAPI_OPS_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-priv.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-priv.h new file mode 100644 index 0000000..f03d263 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-priv.h @@ -0,0 +1,367 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PRIV_H_ +#define _NV_PRIV_H_ + +#include +#include +#include +#include +#include + +#define NV_PRIV_REG_WR08(b,o,d) (*((volatile NvV8*)&(b)->Reg008[(o)/1])=(NvV8)(d)) +#define NV_PRIV_REG_WR16(b,o,d) (*((volatile NvV16*)&(b)->Reg016[(o)/2])=(NvV16)(d)) +#define NV_PRIV_REG_WR32(b,o,d) (*((volatile NvV32*)&(b)->Reg032[(o)/4])=(NvV32)(d)) + +#define NV_PRIV_REG_RD08(b,o) ((b)->Reg008[(o)/1]) +#define NV_PRIV_REG_RD16(b,o) ((b)->Reg016[(o)/2]) +#define NV_PRIV_REG_RD32(b,o) ((b)->Reg032[(o)/4]) + +struct OBJGPU; + +typedef struct +{ + NvBool baseValid; + VGAADDRDESC base; + NvBool workspaceBaseValid; + VGAADDRDESC workspaceBase; + NvU32 vesaMode; +} nv_vga_t; + +/* +* device state during Power Management +*/ +typedef struct nv_pm_state_s +{ + NvU32 IntrEn; + NvBool InHibernate; +} nv_pm_state_t; + +/* +* data structure for the UNIX workqueues +*/ +typedef struct nv_work_item_s +{ + NvU32 flags; + NvU32 gpuInstance; + union + { + OSWorkItemFunction *pGpuFunction; + OSSystemWorkItemFunction *pSystemFunction; + } func; + void *pData; +} nv_work_item_t; + +#define NV_WORK_ITEM_FLAGS_NONE 0x0 +#define NV_WORK_ITEM_FLAGS_REQUIRES_GPU 0x1 +#define NV_WORK_ITEM_FLAGS_DONT_FREE_DATA 0x2 + +/* + * pseudo-registry data structure + */ + +typedef enum +{ + NV_REGISTRY_ENTRY_TYPE_UNKNOWN = 0, + NV_REGISTRY_ENTRY_TYPE_DWORD, + NV_REGISTRY_ENTRY_TYPE_BINARY, + NV_REGISTRY_ENTRY_TYPE_STRING +} nv_reg_type_t; + +typedef struct nv_reg_entry_s +{ + char *regParmStr; + NvU32 type; + NvU32 data; // used when type == NV_REGISTRY_ENTRY_TYPE_DWORD + NvU8 *pdata; // used when type == NV_REGISTRY_ENTRY_TYPE_{BINARY,STRING} + NvU32 len; // used when type == NV_REGISTRY_ENTRY_TYPE_{BINARY,STRING} + struct nv_reg_entry_s *next; +} nv_reg_entry_t; + +#define INVALID_DISP_ID 0xFFFFFFFF +#define MAX_DISP_ID_PER_ADAPTER 0x2 + +typedef struct nv_i2c_adapter_entry_s +{ + void *pOsAdapter; + NvU32 port; + NvU32 displayId[MAX_DISP_ID_PER_ADAPTER]; +} nv_i2c_adapter_entry_t; + +#define NV_INIT_FLAG_HAL 0x0001 +#define NV_INIT_FLAG_HAL_COMPONENTS 0x0002 +#define NV_INIT_FLAG_GPU_STATE 0x0004 +#define NV_INIT_FLAG_GPU_STATE_LOAD 0x0008 +#define NV_INIT_FLAG_FIFO_WATCHDOG 0x0010 +#define NV_INIT_FLAG_CORE_LOGIC 0x0020 +#define NV_INIT_FLAG_GPUMGR_ATTACH 0x0040 +#define NV_INIT_FLAG_PUBLIC_I2C 0x0080 +#define NV_INIT_FLAG_SCALABILITY 0x0100 +#define NV_INIT_FLAG_DMA 0x0200 + +#define MAX_I2C_ADAPTERS NV402C_CTRL_NUM_I2C_PORTS + +/* + * GPU dynamic power state machine. + * + * The GPU is in exactly one of these states at at time. Only certain state + * transitions are valid, as documented by the DAGs below. + * + * When in "instant idle" or COARSE mode: + * + * +----------------------+ + * v | + * +---------+ +----------------+ +--------+ + * | UNKNOWN | --> | IDLE_INDICATED | --> | IN_USE | + * +---------+ +----------------+ +--------+ + * + * The transition from UNKNOWN to IDLE_INDICATED happens in + * rm_init_dynamic_power_management(). + * + * Thereafter, transitions from IDLE_INDICATED to IN_USE happen when + * os_ref_dynamic_power() is called and the refcount transitions from 0 to 1; + * transitions from IN_USE to IDLE_INDICATED happen when + * os_unref_dynamic_power() is called and the refcount transitions from 1 to 0. + * Note that only calls to os_(un)ref_dynamic_power() with the mode == COARSE + * are considered in this mode; calls with mode == FINE are ignored. Since + * COARSE calls are placed only in rm_init_adapter/rm_shutdown_adapter, the GPU + * effectively stays in the IN_USE state any time any client has initialized + * it. + * + * + * When in "deferred idle" or FINE mode: + * + * +----------------------------------------------------------------+ + * | | + * | | + * | +-------------------------------------------+----------------------+ + * | | | v + * | +---------+ +----------------+ +--------------+ +----------------+ +--------+ + * | | UNKNOWN | --> | IDLE_INDICATED | --> | | --> | IDLE_SUSTAINED | --> | IN_USE | -+ + * | +---------+ +----------------+ | | +----------------+ +--------+ | + * | ^ | | | ^ | + * +--------------------+ | IDLE_INSTANT | ------+----------------------+ | + * | | | | + * | | | | + * | | <-----+ | + * +--------------+ | + * ^ | + * +-----------------------------------------------------+ + * + * As before, the transition from UNKNOWN to IDLE_INDICATED happens in + * rm_init_dynamic_power_management(). This is not ideal: it means the GPU may + * be powered down immediately upon loading the RM module, even if + * rm_init_adapter() is going to be called soon thereafter. However, we can't + * rely on deferred idle callbacks yet, since those currently rely on core RM + * being initialized. + * + * At the beginning of rm_init_adapter(), the GPU transitions to the IN_USE + * state; during the rm_init_adapter() sequence, + * RmInitDeferredDynamicPowerManagement() will be called which will schedule + * timer callbacks and set the "deferred_idle_enabled" boolean. + * + * While in "deferred idle" mode, one of the callbacks + * timerCallbackForIdlePreConditions(), timerCallbackToIndicateIdle(), or + * RmIndicateIdle() should be scheduled when in the states: + * - IN_USE + * - IDLE_INSTANT + * - IDLE_SUSTAINED + * Note that since we may transition from IN_USE to IDLE_INSTANT rapidly (e.g., + * for a series of RM calls), we don't attempt to schedule the callbacks and + * cancel them on each of these transitions. The + * timerCallbackForIdlePreConditions() callback will simply exit early if in + * the IN_USE state. + * + * As before, the GPU will remain in the IN_USE state until + * os_unref_dynamic_power() is called and the count transitions from 1 to 0 + * (calls with mode == FINE are honored, in this mode, and these transitions + * can happen frequently). When the refcount reaches 0, rather than going + * directly to the IDLE_INDICATED state, it transitions to the IDLE_INSTANT + * state. + * + * Then, when the next timerCallbackForIdlePreConditions() callback executes, + * if all preconditions are met, the state will transition to IDLE_SUSTAINED. + * + * If, when in the IDLE_SUSTAINED state, os_ref_dynamic_power() is called, the + * GPU will transition back to the IN_USE state and return to the IDLE_INSTANT + * state. This ensures that there is a suitable delay between any activity + * that requires bumping the refcount and indicating idleness. + * + * If the timerCallbackForIdlePreConditions() callback executes again and the + * GPU is still in the IDLE_SUSTAINED state, userspace mappings will be revoked + * and the timerCallbackToIndicateIdle() callback will be scheduled. + * + * If, before the timerCallbackToIndicateIdle() callback executes, either + * os_ref_dynamic_power() is called or a mapping which has been revoked is + * accessed (which triggers the RmForceGpuNotIdle() callback), the GPU will + * transition back to the IN_USE or IDLE_INSTANT state, respectively. + * + * Then, when the timerCallbackToIndicateIdle() callback executes, if all + * mappings are still revoked, and the GPU is still in the IDLE_SUSTAINED + * state, and all GPU idleness preconditions remain satisfied, the + * RmIndicateIdle() work item will be enqueued. (Else, the GPU will transition + * back to the IDLE_INSTANT state and the callback for preconditions is + * scheduled again.) + * + * Finally, once the RmIndicateIdle() work item is called, if all of the same + * conditions still hold, the state will transition to IDLE_INDICATED. No + * callbacks will be scheduled from here; the callbacks for preconditions + * should be re-scheduled when transitioning out of the IDLE_INDICATED state. + * + * Once in the IDLE_INDICATED state, the kernel is free to call the RM to + * perform the GC6 entry sequence then turn off power to the GPU (although it + * may not, if the audio function is being used for example). + * + * There are two paths to exit the IDLE_INDICATED state: + * (a) If os_ref_dynamic_power() is called, in which case it transitions + * directly to the IN_USE state; + * (b) If RmForceGpuNotIdle() is called, in which case it transitions back to + * the IDLE_INSTANT state. + */ +typedef enum +{ + NV_DYNAMIC_POWER_STATE_UNKNOWN = 0, + + NV_DYNAMIC_POWER_STATE_IN_USE, + + NV_DYNAMIC_POWER_STATE_IDLE_INSTANT, + NV_DYNAMIC_POWER_STATE_IDLE_SUSTAINED, + NV_DYNAMIC_POWER_STATE_IDLE_INDICATED, +} nv_dynamic_power_state_t; + +typedef struct nv_dynamic_power_s +{ + /* + * mode is read without the mutex -- should be read-only outside of + * rm_init_dynamic_power_management, called during probe only. + */ + nv_dynamic_power_mode_t mode; + /* + * Whether to indicate idle immediately when the refcount reaches 0, or + * only go to the IDLE_INSTANT state, and expect timer callbacks to + * transition through IDLE_SUSTAINED -> IDLE_INDICATED. + */ + NvBool deferred_idle_enabled; + + nv_dynamic_power_state_t state; + NvS32 refcount; + + /* + * A word on lock ordering. These locks must be taken in the order: + * + * RM API lock > this dynamic_power mutex > RM GPUs lock + * + * Skipping any of those locks is fine (if they aren't required to protect + * whatever state is being accessed or modified), so long as the order is + * not violated. + */ + PORT_MUTEX *mutex; + + /* + * callback handles for deferred dynamic power management. + */ + NvP64 idle_precondition_check_event; + NvP64 indicate_idle_event; + NvBool idle_precondition_check_callback_scheduled; + + /* + * callback handle for kernel initiated gc6 entry/exit. + * these will be protected by the gpu lock. + */ + NvP64 remove_idle_holdoff; + NvBool b_idle_holdoff; + + /* + * flag set if the platform does not support fine grain dynamic power + * management. + */ + NvBool b_fine_not_supported; + + /* + * Counter to track clients disallowing GCOFF. + */ + NvU32 clients_gcoff_disallow_refcount; + + /* + * Maximum FB allocation size which can be saved in system memory + * while doing GCOFF based dynamic PM. + */ + NvU64 gcoff_max_fb_size; + + /* + * NVreg_DynamicPowerManagement regkey value set by the user + */ + NvU32 dynamic_power_regkey; +} nv_dynamic_power_t; + +typedef struct +{ + OBJGPU *pGpu; + + NvU32 pmc_boot_0; + + nv_vga_t vga; + + NvU32 flags; + NvU32 status; + + nv_i2c_adapter_entry_t i2c_adapters[MAX_I2C_ADAPTERS]; + + void *pVbiosCopy; + NvU32 vbiosSize; + + nv_pm_state_t pm_state; + + nv_reg_entry_t *pRegistry; + + nv_dynamic_power_t dynamic_power; + + /* Flag to check if the GPU needs 4K page isolation. */ + NvBool b_4k_page_isolation_required; + + /* Flag to check if GPU mobile config is enabled */ + NvBool b_mobile_config_enabled; + + /* Flag to check if S0ix-based power management is enabled. */ + NvBool s0ix_pm_enabled; + + /* + * Maximum FB allocation size which can be saved in system memory + * during system supened with S0ix-based power management. + */ + NvU64 s0ix_gcoff_max_fb_size; + + NvU32 pmc_boot_42; +} nv_priv_t; + +#define NV_SET_NV_PRIV(nv,p) ((nv)->priv = (p)) +#define NV_GET_NV_PRIV(nv) ((nv) ? (nv)->priv : NULL) + +/* + * Make sure that your stack has taken API Lock before using this macro. + */ +#define NV_GET_NV_PRIV_PGPU(nv) \ + (NV_GET_NV_PRIV(nv) ? ((nv_priv_t *)NV_GET_NV_PRIV(nv))->pGpu : NULL) + +#endif // _NV_PRIV_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-reg.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-reg.h new file mode 100644 index 0000000..9429385 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-reg.h @@ -0,0 +1,927 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RM_REG_H_ +#define _RM_REG_H_ + +#include "nvtypes.h" + +/* + * use NV_REG_STRING to stringify a registry key when using that registry key + */ + +#define __NV_REG_STRING(regkey) #regkey +#define NV_REG_STRING(regkey) __NV_REG_STRING(regkey) + +/* + * use NV_DEFINE_REG_ENTRY and NV_DEFINE_PARAMS_TABLE_ENTRY to simplify definition + * of registry keys in the kernel module source code. + */ + +#define __NV_REG_VAR(regkey) NVreg_##regkey + +#if defined(NV_MODULE_PARAMETER) +#define NV_DEFINE_REG_ENTRY(regkey, default_value) \ + static NvU32 __NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_PARAMETER(__NV_REG_VAR(regkey)) +#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \ + NvU32 __NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_PARAMETER(__NV_REG_VAR(regkey)) +#else +#define NV_DEFINE_REG_ENTRY(regkey, default_value) \ + static NvU32 __NV_REG_VAR(regkey) = (default_value) +#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \ + NvU32 __NV_REG_VAR(regkey) = (default_value) +#endif + +#if defined(NV_MODULE_STRING_PARAMETER) +#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \ + char *__NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_STRING_PARAMETER(__NV_REG_VAR(regkey)) +#else +#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \ + char *__NV_REG_VAR(regkey) = (default_value) +#endif + +#define NV_DEFINE_PARAMS_TABLE_ENTRY(regkey) \ + { NV_REG_STRING(regkey), &__NV_REG_VAR(regkey) } + +/* + * Like NV_DEFINE_PARMS_TABLE_ENTRY, but allows a mismatch between the name of + * the regkey and the name of the module parameter. When using this macro, the + * name of the parameter is passed to the extra "parameter" argument, and it is + * this name that must be used in the NV_DEFINE_REG_ENTRY() macro. + */ + +#define NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(regkey, parameter) \ + { NV_REG_STRING(regkey), &__NV_REG_VAR(parameter)} + +/* + *----------------- registry key definitions-------------------------- + */ + +/* + * Option: ModifyDeviceFiles + * + * Description: + * + * When this option is enabled, the NVIDIA driver will verify the validity + * of the NVIDIA device files in /dev and attempt to dynamically modify + * and/or (re-)create them, if necessary. If you don't wish for the NVIDIA + * driver to touch the device files, you can use this registry key. + * + * This module parameter is only honored by the NVIDIA GPU driver and NVIDIA + * capability driver. Furthermore, the NVIDIA capability driver provides + * modifiable /proc file entry (DeviceFileModify=0/1) to alter the behavior of + * this module parameter per device file. + * + * Possible Values: + * 0 = disable dynamic device file management + * 1 = enable dynamic device file management (default) + */ + +#define __NV_MODIFY_DEVICE_FILES ModifyDeviceFiles +#define NV_REG_MODIFY_DEVICE_FILES NV_REG_STRING(__NV_MODIFY_DEVICE_FILES) + +/* + * Option: DeviceFileUID + * + * Description: + * + * This registry key specifies the UID assigned to the NVIDIA device files + * created and/or modified by the NVIDIA driver when dynamic device file + * management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default UID is 0 ('root'). + */ + +#define __NV_DEVICE_FILE_UID DeviceFileUID +#define NV_REG_DEVICE_FILE_UID NV_REG_STRING(__NV_DEVICE_FILE_UID) + +/* + * Option: DeviceFileGID + * + * Description: + * + * This registry key specifies the GID assigned to the NVIDIA device files + * created and/or modified by the NVIDIA driver when dynamic device file + * management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default GID is 0 ('root'). + */ + +#define __NV_DEVICE_FILE_GID DeviceFileGID +#define NV_REG_DEVICE_FILE_GID NV_REG_STRING(__NV_DEVICE_FILE_GID) + +/* + * Option: DeviceFileMode + * + * Description: + * + * This registry key specifies the device file mode assigned to the NVIDIA + * device files created and/or modified by the NVIDIA driver when dynamic + * device file management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default mode is 0666 (octal, rw-rw-rw-). + */ + +#define __NV_DEVICE_FILE_MODE DeviceFileMode +#define NV_REG_DEVICE_FILE_MODE NV_REG_STRING(__NV_DEVICE_FILE_MODE) + +/* + * Option: ResmanDebugLevel + * + * Default value: ~0 + */ + +#define __NV_RESMAN_DEBUG_LEVEL ResmanDebugLevel +#define NV_REG_RESMAN_DEBUG_LEVEL NV_REG_STRING(__NV_RESMAN_DEBUG_LEVEL) + +/* + * Option: RmLogonRC + * + * Default value: 1 + */ + +#define __NV_RM_LOGON_RC RmLogonRC +#define NV_REG_RM_LOGON_RC NV_REG_STRING(__NV_RM_LOGON_RC) + +/* + * Option: InitializeSystemMemoryAllocations + * + * Description: + * + * The NVIDIA Linux driver normally clears system memory it allocates + * for use with GPUs or within the driver stack. This is to ensure + * that potentially sensitive data is not rendered accessible by + * arbitrary user applications. + * + * Owners of single-user systems or similar trusted configurations may + * choose to disable the aforementioned clears using this option and + * potentially improve performance. + * + * Possible values: + * + * 1 = zero out system memory allocations (default) + * 0 = do not perform memory clears + */ + +#define __NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \ + InitializeSystemMemoryAllocations +#define NV_REG_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \ + NV_REG_STRING(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS) + +/* + * Option: RegistryDwords + * + * Description: + * + * This option accepts a semicolon-separated list of key=value pairs. Each + * key name is checked against the table of static options; if a match is + * found, the static option value is overridden, but invalid options remain + * invalid. Pairs that do not match an entry in the static option table + * are passed on to the RM directly. + * + * Format: + * + * NVreg_RegistryDwords=";;..." + */ + +#define __NV_REGISTRY_DWORDS RegistryDwords +#define NV_REG_REGISTRY_DWORDS NV_REG_STRING(__NV_REGISTRY_DWORDS) + +/* + * Option: RegistryDwordsPerDevice + * + * Description: + * + * This option allows to specify registry keys per GPU device. It helps to + * control registry at GPU level of granularity. It accepts a semicolon + * separated list of key=value pairs. The first key value pair MUST be + * "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot + * number and F is the Function. This PCI BDF is used to identify which GPU to + * assign the registry keys that follows next. + * If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT + * found, then all the registry keys that follows are skipped, until we find next + * valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for + * the value of the "pci" string: + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI dev id string. + * + * For each of the registry keys that follows, key name is checked against the + * table of static options; if a match is found, the static option value is + * overridden, but invalid options remain invalid. Pairs that do not match an + * entry in the static option table are passed on to the RM directly. + * + * Format: + * + * NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F;;;..; \ + * pci=DDDD:BB:DD.F;;..;" + */ + +#define __NV_REGISTRY_DWORDS_PER_DEVICE RegistryDwordsPerDevice +#define NV_REG_REGISTRY_DWORDS_PER_DEVICE NV_REG_STRING(__NV_REGISTRY_DWORDS_PER_DEVICE) + +#define __NV_RM_MSG RmMsg +#define NV_RM_MSG NV_REG_STRING(__NV_RM_MSG) + +/* + * Option: UsePageAttributeTable + * + * Description: + * + * Enable/disable use of the page attribute table (PAT) available in + * modern x86/x86-64 processors to set the effective memory type of memory + * mappings to write-combining (WC). + * + * If enabled, an x86 processor with PAT support is present and the host + * system's Linux kernel did not configure one of the PAT entries to + * indicate the WC memory type, the driver will change the second entry in + * the PAT from its default (write-through (WT)) to WC at module load + * time. If the kernel did update one of the PAT entries, the driver will + * not modify the PAT. + * + * In both cases, the driver will honor attempts to map memory with the WC + * memory type by selecting the appropriate PAT entry using the correct + * set of PTE flags. + * + * Possible values: + * + * ~0 = use the NVIDIA driver's default logic (default) + * 1 = enable use of the PAT for WC mappings. + * 0 = disable use of the PAT for WC mappings. + */ + +#define __NV_USE_PAGE_ATTRIBUTE_TABLE UsePageAttributeTable +#define NV_USE_PAGE_ATTRIBUTE_TABLE NV_REG_STRING(__NV_USE_PAGE_ATTRIBUTE_TABLE) + +/* + * Option: EnableMSI + * + * Description: + * + * When this option is enabled and the host kernel supports the MSI feature, + * the NVIDIA driver will enable the PCI-E MSI capability of GPUs with the + * support for this feature instead of using PCI-E wired interrupt. + * + * Possible Values: + * + * 0 = disable MSI interrupt + * 1 = enable MSI interrupt (default) + * + */ + +#define __NV_ENABLE_MSI EnableMSI +#define NV_REG_ENABLE_MSI NV_REG_STRING(__NV_ENABLE_MSI) + +/* + * Option: EnablePCIeGen3 + * + * Description: + * + * Due to interoperability problems seen with Kepler PCIe Gen3 capable GPUs + * when configured on SandyBridge E desktop platforms, NVIDIA feels that + * delivering a reliable, high-quality experience is not currently possible in + * PCIe Gen3 mode on all PCIe Gen3 platforms. Therefore, Quadro, Tesla and + * NVS Kepler products operate in PCIe Gen2 mode by default. You may use this + * option to enable PCIe Gen3 support. + * + * This is completely unsupported! + * + * Possible Values: + * + * 0: disable PCIe Gen3 support (default) + * 1: enable PCIe Gen3 support + */ + +#define __NV_ENABLE_PCIE_GEN3 EnablePCIeGen3 +#define NV_REG_ENABLE_PCIE_GEN3 NV_REG_STRING(__NV_ENABLE_PCIE_GEN3) + +/* + * Option: MemoryPoolSize + * + * Description: + * + * When set to a non-zero value, this option specifies the size of the + * memory pool, given as a multiple of 1 GB, created on VMware ESXi to + * satisfy any system memory allocations requested by the NVIDIA kernel + * module. + */ + +#define __NV_MEMORY_POOL_SIZE MemoryPoolSize +#define NV_REG_MEMORY_POOL_SIZE NV_REG_STRING(__NV_MEMORY_POOL_SIZE) + +/* + * Option: KMallocHeapMaxSize + * + * Description: + * + * When set to a non-zero value, this option specifies the maximum size of the + * heap memory space reserved for kmalloc operations. Given as a + * multiple of 1 MB created on VMware ESXi to satisfy any system memory + * allocations requested by the NVIDIA kernel module. + */ + +#define __NV_KMALLOC_HEAP_MAX_SIZE KMallocHeapMaxSize +#define NV_KMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_KMALLOC_HEAP_MAX_SIZE) + +/* + * Option: VMallocHeapMaxSize + * + * Description: + * + * When set to a non-zero value, this option specifies the maximum size of the + * heap memory space reserved for vmalloc operations. Given as a + * multiple of 1 MB created on VMware ESXi to satisfy any system memory + * allocations requested by the NVIDIA kernel module. + */ + +#define __NV_VMALLOC_HEAP_MAX_SIZE VMallocHeapMaxSize +#define NV_VMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_VMALLOC_HEAP_MAX_SIZE) + +/* + * Option: IgnoreMMIOCheck + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will ignore + * MMIO limit check during device probe on VMWare ESXi kernel. This is + * typically necessary when VMware ESXi MMIO limit differs between any + * base version and its updates. Customer using updates can set regkey + * to avoid probe failure. + */ + +#define __NV_IGNORE_MMIO_CHECK IgnoreMMIOCheck +#define NV_REG_IGNORE_MMIO_CHECK NV_REG_STRING(__NV_IGNORE_MMIO_CHECK) + +/* + * Option: TCEBypassMode + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will attempt to setup + * all GPUs in "TCE bypass mode", in which DMA mappings of system memory bypass + * the IOMMU/TCE remapping hardware on IBM POWER systems. This is typically + * necessary for CUDA applications in which large system memory mappings may + * exceed the default TCE remapping capacity when operated in non-bypass mode. + * + * This option has no effect on non-POWER platforms. + * + * Possible Values: + * + * 0: system default TCE mode on all GPUs + * 1: enable TCE bypass mode on all GPUs + * 2: disable TCE bypass mode on all GPUs + */ +#define __NV_TCE_BYPASS_MODE TCEBypassMode +#define NV_REG_TCE_BYPASS_MODE NV_REG_STRING(__NV_TCE_BYPASS_MODE) + +#define NV_TCE_BYPASS_MODE_DEFAULT 0 +#define NV_TCE_BYPASS_MODE_ENABLE 1 +#define NV_TCE_BYPASS_MODE_DISABLE 2 + +/* + * Option: pci + * + * Description: + * + * On Unix platforms, per GPU based registry key can be specified as: + * NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F,". + * where DDDD:BB:DD.F refers to Domain:Bus:Device.Function. + * We need this key "pci" to identify what follows next is a PCI BDF identifier, + * for which the registry keys are to be applied. + * + * This define is not used on non-UNIX platforms. + * + * Possible Formats for value: + * + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI BDF identifier string. + */ +#define __NV_PCI_DEVICE_BDF pci +#define NV_REG_PCI_DEVICE_BDF NV_REG_STRING(__NV_PCI_DEVICE_BDF) + +/* + * Option: EnableStreamMemOPs + * + * Description: + * + * When this option is enabled, the CUDA driver will enable support for + * CUDA Stream Memory Operations in user-mode applications, which are so + * far required to be disabled by default due to limited support in + * devtools. + * + * Note: this is treated as a hint. MemOPs may still be left disabled by CUDA + * driver for other reasons. + * + * Possible Values: + * + * 0 = disable feature (default) + * 1 = enable feature + */ +#define __NV_ENABLE_STREAM_MEMOPS EnableStreamMemOPs +#define NV_REG_ENABLE_STREAM_MEMOPS NV_REG_STRING(__NV_ENABLE_STREAM_MEMOPS) + +/* + * Option: EnableUserNUMAManagement + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will require the + * user-mode NVIDIA Persistence daemon to manage the onlining and offlining + * of its NUMA device memory. + * + * This option has no effect on platforms that do not support onlining + * device memory to a NUMA node (this feature is only supported on certain + * POWER9 systems). + * + * Possible Values: + * + * 0: disable user-mode NUMA management + * 1: enable user-mode NUMA management (default) + */ +#define __NV_ENABLE_USER_NUMA_MANAGEMENT EnableUserNUMAManagement +#define NV_REG_ENABLE_USER_NUMA_MANAGEMENT NV_REG_STRING(__NV_ENABLE_USER_NUMA_MANAGEMENT) + +/* + * Option: GpuBlacklist + * + * Description: + * + * This option accepts a list of blacklisted GPUs, separated by commas, that + * cannot be attached or used. Each blacklisted GPU is identified by a UUID in + * the ASCII format with leading "GPU-". An exact match is required; no partial + * UUIDs. This regkey is deprecated and will be removed in the future. Use + * NV_REG_EXCLUDED_GPUS instead. + */ +#define __NV_GPU_BLACKLIST GpuBlacklist +#define NV_REG_GPU_BLACKLIST NV_REG_STRING(__NV_GPU_BLACKLIST) + +/* + * Option: ExcludedGpus + * + * Description: + * + * This option accepts a list of excluded GPUs, separated by commas, that + * cannot be attached or used. Each excluded GPU is identified by a UUID in + * the ASCII format with leading "GPU-". An exact match is required; no partial + * UUIDs. + */ +#define __NV_EXCLUDED_GPUS ExcludedGpus +#define NV_REG_EXCLUDED_GPUS NV_REG_STRING(__NV_EXCLUDED_GPUS) + +/* + * Option: NvLinkDisable + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will not attempt to + * initialize or train NVLink connections for any GPUs. System reboot is required + * for changes to take affect. + * + * This option has no effect if no GPUs support NVLink. + * + * Possible Values: + * + * 0: Do not disable NVLink (default) + * 1: Disable NVLink + */ +#define __NV_NVLINK_DISABLE NvLinkDisable +#define NV_REG_NVLINK_DISABLE NV_REG_STRING(__NV_NVLINK_DISABLE) + +/* + * Option: RestrictProfilingToAdminUsers + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will prevent users + * without administrative access (i.e., the CAP_SYS_ADMIN capability) from + * using GPU performance counters. + * + * Possible Values: + * + * 0: Do not restrict GPU counters (default) + * 1: Restrict GPU counters to system administrators only + */ + +#define __NV_RM_PROFILING_ADMIN_ONLY RmProfilingAdminOnly +#define __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER RestrictProfilingToAdminUsers +#define NV_REG_RM_PROFILING_ADMIN_ONLY NV_REG_STRING(__NV_RM_PROFILING_ADMIN_ONLY) + +/* + * Option: TemporaryFilePath + * + * Description: + * + * When specified, this option changes the location in which the + * NVIDIA kernel module will create unnamed temporary files (e.g. to + * save the contents of video memory in). The indicated file must + * be a directory. By default, temporary files are created in /tmp. + */ +#define __NV_TEMPORARY_FILE_PATH TemporaryFilePath +#define NV_REG_TEMPORARY_FILE_PATH NV_REG_STRING(__NV_TEMPORARY_FILE_PATH) + +/* + * Option: PreserveVideoMemoryAllocations + * + * If enabled, this option prompts the NVIDIA kernel module to save and + * restore all video memory allocations across system power management + * cycles, i.e. suspend/resume and hibernate/restore. Otherwise, + * only select allocations are preserved. + * + * Possible Values: + * + * 0: Preserve only select video memory allocations (default) + * 1: Preserve all video memory allocations + */ +#define __NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS PreserveVideoMemoryAllocations +#define NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS \ + NV_REG_STRING(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS) + +/* + * Option: EnableS0ixPowerManagement + * + * When this option is enabled, the NVIDIA driver will use S0ix-based + * power management for system suspend/resume, if both the platform and + * the GPU support S0ix. + * + * During system suspend, if S0ix is enabled and + * video memory usage is above the threshold configured by + * 'S0ixPowerManagementVideoMemoryThreshold', video memory will be kept + * in self-refresh mode while the rest of the GPU is powered down. + * + * Otherwise, the driver will copy video memory contents to system memory + * and power off the video memory along with the GPU. + * + * Possible Values: + * + * 0: Disable S0ix based power management (default) + * 1: Enable S0ix based power management + */ + +#define __NV_ENABLE_S0IX_POWER_MANAGEMENT EnableS0ixPowerManagement +#define NV_REG_ENABLE_S0IX_POWER_MANAGEMENT \ + NV_REG_STRING(__NV_ENABLE_S0IX_POWER_MANAGEMENT) + +/* + * Option: S0ixPowerManagementVideoMemoryThreshold + * + * This option controls the threshold that the NVIDIA driver will use during + * S0ix-based system power management. + * + * When S0ix is enabled and the system is suspended, the driver will + * compare the amount of video memory in use with this threshold, + * to decide whether to keep video memory in self-refresh or copy video + * memory content to system memory. + * + * See the 'EnableS0ixPowerManagement' option. + * + * Values are expressed in Megabytes (1048576 bytes). + * + * Default value for this option is 256MB. + * + */ +#define __NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + S0ixPowerManagementVideoMemoryThreshold +#define NV_REG_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + NV_REG_STRING(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD) + +/* + * Option: DynamicPowerManagement + * + * This option controls how aggressively the NVIDIA kernel module will manage + * GPU power through kernel interfaces. + * + * Possible Values: + * + * 0: Never allow the GPU to be powered down (default). + * 1: Power down the GPU when it is not initialized. + * 2: Power down the GPU after it has been inactive for some time. + * 3: (Default) Power down the GPU after a period of inactivity (i.e., + * mode 2) on Ampere or later notebooks. Otherwise, do not power down + * the GPU. + */ +#define __NV_DYNAMIC_POWER_MANAGEMENT DynamicPowerManagement +#define NV_REG_DYNAMIC_POWER_MANAGEMENT \ + NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT) + +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_NEVER 0 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_COARSE 1 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_FINE 2 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_DEFAULT 3 + +/* + * Option: DynamicPowerManagementVideoMemoryThreshold + * + * This option controls the threshold that the NVIDIA driver will use + * when selecting the dynamic power management scheme. + * + * When the driver detects that the GPU is idle, it will compare the amount + * of video memory in use with this threshold. + * + * If the current video memory usage is less than the threshold, the + * driver may preserve video memory contents in system memory and power off + * the video memory along with the GPU itself, if supported. Otherwise, + * the video memory will be kept in self-refresh mode while powering down + * the rest of the GPU, if supported. + * + * Values are expressed in Megabytes (1048576 bytes). + * + * If the requested value is greater than 200MB (the default), then it + * will be capped to 200MB. + */ +#define __NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + DynamicPowerManagementVideoMemoryThreshold +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD) + +/* + * Option: RegisterPCIDriver + * + * Description: + * + * When this option is enabled, the NVIDIA driver will register with + * PCI subsystem. + * + * Possible values: + * + * 1 - register as PCI driver (default) + * 0 - do not register as PCI driver + */ + +#define __NV_REGISTER_PCI_DRIVER RegisterPCIDriver +#define NV_REG_REGISTER_PCI_DRIVER NV_REG_STRING(__NV_REGISTER_PCI_DRIVER) + +/* + * Option: EnablePCIERelaxedOrderingMode + * + * Description: + * + * When this option is enabled, the registry key RmSetPCIERelaxedOrdering will + * be set to NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE, causing + * every device to set the relaxed ordering bit to 1 in all outbound MWr + * transaction-layer packets. This is equivalent to setting the regkey to + * FORCE_ENABLE as a non-per-device registry key. + * + * Possible values: + * 0 - Do not enable PCIe TLP relaxed ordering bit-setting (default) + * 1 - Enable PCIe TLP relaxed ordering bit-setting + */ +#define __NV_ENABLE_PCIE_RELAXED_ORDERING_MODE EnablePCIERelaxedOrderingMode +#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \ + NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE) + +/* + * Option: EnableGpuFirmware + * + * Description: + * + * When this option is enabled, the NVIDIA driver will enable use of GPU + * firmware. + * + * Possible mode values: + * 0 - Do not enable GPU firmware + * 1 - Enable GPU firmware + * 2 - (Default) Use the default enablement policy for GPU firmware + * + * Setting this to anything other than 2 will alter driver firmware- + * enablement policies, possibly disabling GPU firmware where it would + * have otherwise been enabled by default. + * + * If this key is set globally to the system, the driver may still attempt + * to apply some policies to maintain uniform firmware modes across all + * GPUS. This may result in the driver failing initialization on some GPUs + * to maintain such a policy. + * + * If this key is set using NVreg_RegistryDwordsPerDevice, then the driver + * will attempt to honor whatever configuration is specified without applying + * additional policies. This may also result in failed GPU initialzations if + * the configuration is not possible (for example if the firmware is missing + * from the filesystem, or the GPU is not capable). + * + * Policy bits: + * + * POLICY_ALLOW_FALLBACK: + * As the normal behavior is to fail GPU initialization if this registry + * entry is set in such a way that results in an invalid configuration, if + * instead the user would like the driver to automatically try to fallback + * to initializing the failing GPU with firmware disabled, then this bit can + * be set (ex: 0x11 means try to enable GPU firmware but fall back if needed). + * Note that this can result in a mixed mode configuration (ex: GPU0 has + * firmware enabled, but GPU1 does not). + * + */ + +#define __NV_ENABLE_GPU_FIRMWARE EnableGpuFirmware +#define NV_REG_ENABLE_GPU_FIRMWARE NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE) + +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000 +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001 +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002 + +#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0 +#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010 + +#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012 +#define NV_REG_ENABLE_GPU_FIRMWARE_INVALID_VALUE 0xFFFFFFFF + +/* + * Option: EnableGpuFirmwareLogs + * + * When this option is enabled, the NVIDIA driver will send GPU firmware logs + * to the system log, when possible. + * + * Possible values: + * 0 - Do not send GPU firmware logs to the system log + * 1 - Enable sending of GPU firmware logs to the system log + * 2 - (Default) Enable sending of GPU firmware logs to the system log for + * the debug kernel driver build only + */ +#define __NV_ENABLE_GPU_FIRMWARE_LOGS EnableGpuFirmwareLogs +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE_LOGS) + +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000 +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001 +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002 + +/* + * Option: EnableDbgBreakpoint + * + * When this option is set to a non-zero value, and the kernel is configured + * appropriately, assertions within resman will trigger a CPU breakpoint (e.g., + * INT3 on x86_64), assumed to be caught by an attached debugger. + * + * When this option is set to the value zero (the default), assertions within + * resman will print to the system log, but no CPU breakpoint will be triggered. + */ +#define __NV_ENABLE_DBG_BREAKPOINT EnableDbgBreakpoint + + +/* + * Option: OpenRmEnableUnsupportedGpus + * + * Open nvidia.ko support for features beyond what is used on Data Center GPUs + * is still fairly immature, so for now require users to opt into use of open + * nvidia.ko with a special registry key, if not on a Data Center GPU. + */ + +#define __NV_OPENRM_ENABLE_UNSUPPORTED_GPUS OpenRmEnableUnsupportedGpus +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS NV_REG_STRING(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS) +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE 0x00000000 +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_ENABLE 0x00000001 +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE + +/* + * Option: NVreg_DmaRemapPeerMmio + * + * Description: + * + * When this option is enabled, the NVIDIA driver will use device driver + * APIs provided by the Linux kernel for DMA-remapping part of a device's + * MMIO region to another device, creating e.g., IOMMU mappings as necessary. + * When this option is disabled, the NVIDIA driver will instead only apply a + * fixed offset, which may be zero, to CPU physical addresses to produce the + * DMA address for the peer's MMIO region, and no IOMMU mappings will be + * created. + * + * This option only affects peer MMIO DMA mappings, and not system memory + * mappings. + * + * Possible Values: + * 0 = disable dynamic DMA remapping of peer MMIO regions + * 1 = enable dynamic DMA remapping of peer MMIO regions (default) + */ +#define __NV_DMA_REMAP_PEER_MMIO DmaRemapPeerMmio +#define NV_DMA_REMAP_PEER_MMIO NV_REG_STRING(__NV_DMA_REMAP_PEER_MMIO) +#define NV_DMA_REMAP_PEER_MMIO_DISABLE 0x00000000 +#define NV_DMA_REMAP_PEER_MMIO_ENABLE 0x00000001 + +#if defined(NV_DEFINE_REGISTRY_KEY_TABLE) + +/* + *---------registry key parameter declarations-------------- + */ + +NV_DEFINE_REG_ENTRY(__NV_RESMAN_DEBUG_LEVEL, ~0); +NV_DEFINE_REG_ENTRY(__NV_RM_LOGON_RC, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MODIFY_DEVICE_FILES, 1); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_UID, 0); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_GID, 0); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_MODE, 0666); +NV_DEFINE_REG_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, 1); +NV_DEFINE_REG_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE, ~0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_PCIE_GEN3, 0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_MSI, 1); +NV_DEFINE_REG_ENTRY(__NV_TCE_BYPASS_MODE, NV_TCE_BYPASS_MODE_DEFAULT); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_STREAM_MEMOPS, 0); +NV_DEFINE_REG_ENTRY(__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER, 1); +NV_DEFINE_REG_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, 0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT, 0); +NV_DEFINE_REG_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 256); +NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3); +NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS, NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG); +NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT); + +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_USER_NUMA_MANAGEMENT, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MEMORY_POOL_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_KMALLOC_HEAP_MAX_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_VMALLOC_HEAP_MAX_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0); + +NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_RM_MSG, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL); +NV_DEFINE_REG_ENTRY(__NV_DMA_REMAP_PEER_MMIO, NV_DMA_REMAP_PEER_MMIO_ENABLE); + +/* + *----------------registry database definition---------------------- + */ + +/* + * You can enable any of the registry options disabled by default by + * editing their respective entries in the table below. The last field + * determines if the option is considered valid - in order for the + * changes to take effect, you need to recompile and reload the NVIDIA + * kernel module. + */ +nv_parm_t nv_parms[] = { + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RESMAN_DEBUG_LEVEL), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_LOGON_RC), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MODIFY_DEVICE_FILES), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_UID), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_GID), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_MSI), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_GEN3), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MEMORY_POOL_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_KMALLOC_HEAP_MAX_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_VMALLOC_HEAP_MAX_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IGNORE_MMIO_CHECK), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_TCE_BYPASS_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_STREAM_MEMOPS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_USER_NUMA_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_NVLINK_DISABLE), + NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(__NV_RM_PROFILING_ADMIN_ONLY, + __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DMA_REMAP_PEER_MMIO), + {NULL, NULL} +}; + +#elif defined(NVRM) + +extern nv_parm_t nv_parms[]; + +#endif /* NV_DEFINE_REGISTRY_KEY_TABLE */ + +#endif /* _RM_REG_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h new file mode 100644 index 0000000..02b0156 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_ +#define _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_ + +#include + +/* + * This is a wrapper for NVOS02_PARAMETERS with file descriptor + */ + +typedef struct +{ + NVOS02_PARAMETERS params; + int fd; +} nv_ioctl_nvos02_parameters_with_fd; + +/* + * This is a wrapper for NVOS33_PARAMETERS with file descriptor + */ +typedef struct +{ + NVOS33_PARAMETERS params; + int fd; +} nv_ioctl_nvos33_parameters_with_fd; + +#endif // _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv.h new file mode 100644 index 0000000..b71d5bf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv.h @@ -0,0 +1,1091 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_H_ +#define _NV_H_ + + + +#include + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(__FreeBSD__) + #include // NULL +#elif defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) + #include // NULL +#else + #include // NULL +#endif + +#include +#include "nv_stdarg.h" +#include +#include +#include + +extern nv_cap_t *nvidia_caps_root; + +extern const NvBool nv_is_rm_firmware_supported_os; + +#include +#include + +#include + +/* NVIDIA's reserved major character device number (Linux). */ +#define NV_MAJOR_DEVICE_NUMBER 195 + +#define GPU_UUID_LEN (16) + +/* + * Buffer size for an ASCII UUID: We need 2 digits per byte, plus space + * for "GPU", 5 dashes, and '\0' termination: + */ +#define GPU_UUID_ASCII_LEN (GPU_UUID_LEN * 2 + 9) + +/* + * #define an absolute maximum used as a sanity check for the + * NV_ESC_IOCTL_XFER_CMD ioctl() size argument. + */ +#define NV_ABSOLUTE_MAX_IOCTL_SIZE 16384 + +/* + * Solaris provides no more than 8 bits for the argument size in + * the ioctl() command encoding; make sure we don't exceed this + * limit. + */ +#define __NV_IOWR_ASSERT(type) ((sizeof(type) <= NV_PLATFORM_MAX_IOCTL_SIZE) ? 1 : -1) +#define __NV_IOWR(nr, type) ({ \ + typedef char __NV_IOWR_TYPE_SIZE_ASSERT[__NV_IOWR_ASSERT(type)]; \ + _IOWR(NV_IOCTL_MAGIC, (nr), type); \ +}) + +#define NV_PCI_DEV_FMT "%04x:%02x:%02x.%x" +#define NV_PCI_DEV_FMT_ARGS(nv) (nv)->pci_info.domain, (nv)->pci_info.bus, \ + (nv)->pci_info.slot, (nv)->pci_info.function + +#define NV_RM_DEVICE_INTR_ADDRESS 0x100 + +/*! + * @brief The order of the display clocks in the below defined enum + * should be synced with below mapping array and macro. + * All four should be updated simultaneously in case + * of removal or addition of clocks in below order. + * Also, TEGRASOC_WHICH_CLK_MAX is used in various places + * in below mentioned files. + * arch/nvalloc/unix/Linux/nv-linux.h + * + * arch/nvalloc/unix/src/os.c + * dispClkMapRmToOsArr[] = {...}; + * + * arch/nvalloc/unix/Linux/nv-clk.c + * osMapClk[] = {...}; + * + */ +typedef enum _TEGRASOC_WHICH_CLK +{ + TEGRASOC_WHICH_CLK_NVDISPLAYHUB, + TEGRASOC_WHICH_CLK_NVDISPLAY_DISP, + TEGRASOC_WHICH_CLK_NVDISPLAY_P0, + TEGRASOC_WHICH_CLK_NVDISPLAY_P1, + TEGRASOC_WHICH_CLK_DPAUX0, + TEGRASOC_WHICH_CLK_FUSE, + TEGRASOC_WHICH_CLK_DSIPLL_VCO, + TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN, + TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA, + TEGRASOC_WHICH_CLK_SPPLL0_VCO, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB, + TEGRASOC_WHICH_CLK_SPPLL0_DIV10, + TEGRASOC_WHICH_CLK_SPPLL0_DIV25, + TEGRASOC_WHICH_CLK_SPPLL0_DIV27, + TEGRASOC_WHICH_CLK_SPPLL1_VCO, + TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN, + TEGRASOC_WHICH_CLK_SPPLL1_DIV27, + TEGRASOC_WHICH_CLK_VPLL0_REF, + TEGRASOC_WHICH_CLK_VPLL0, + TEGRASOC_WHICH_CLK_VPLL1, + TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF, + TEGRASOC_WHICH_CLK_RG0, + TEGRASOC_WHICH_CLK_RG1, + TEGRASOC_WHICH_CLK_DISPPLL, + TEGRASOC_WHICH_CLK_DISPHUBPLL, + TEGRASOC_WHICH_CLK_DSI_LP, + TEGRASOC_WHICH_CLK_DSI_CORE, + TEGRASOC_WHICH_CLK_DSI_PIXEL, + TEGRASOC_WHICH_CLK_PRE_SOR0, + TEGRASOC_WHICH_CLK_PRE_SOR1, + TEGRASOC_WHICH_CLK_DP_LINK_REF, + TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT, + TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO, + TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M, + TEGRASOC_WHICH_CLK_RG0_M, + TEGRASOC_WHICH_CLK_RG1_M, + TEGRASOC_WHICH_CLK_SOR0_M, + TEGRASOC_WHICH_CLK_SOR1_M, + TEGRASOC_WHICH_CLK_PLLHUB, + TEGRASOC_WHICH_CLK_SOR0, + TEGRASOC_WHICH_CLK_SOR1, + TEGRASOC_WHICH_CLK_SOR_PAD_INPUT, + TEGRASOC_WHICH_CLK_PRE_SF0, + TEGRASOC_WHICH_CLK_SF0, + TEGRASOC_WHICH_CLK_SF1, + TEGRASOC_WHICH_CLK_DSI_PAD_INPUT, + TEGRASOC_WHICH_CLK_PRE_SOR0_REF, + TEGRASOC_WHICH_CLK_PRE_SOR1_REF, + TEGRASOC_WHICH_CLK_SOR0_PLL_REF, + TEGRASOC_WHICH_CLK_SOR1_PLL_REF, + TEGRASOC_WHICH_CLK_SOR0_REF, + TEGRASOC_WHICH_CLK_SOR1_REF, + TEGRASOC_WHICH_CLK_OSC, + TEGRASOC_WHICH_CLK_DSC, + TEGRASOC_WHICH_CLK_MAUD, + TEGRASOC_WHICH_CLK_AZA_2XBIT, + TEGRASOC_WHICH_CLK_AZA_BIT, + TEGRASOC_WHICH_CLK_MIPI_CAL, + TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL, + TEGRASOC_WHICH_CLK_SOR0_DIV, + TEGRASOC_WHICH_CLK_MAX, // TEGRASOC_WHICH_CLK_MAX is defined for boundary checks only. +} TEGRASOC_WHICH_CLK; + +#ifdef NVRM + +extern const char *pNVRM_ID; + +/* + * ptr arithmetic convenience + */ + +typedef union +{ + volatile NvV8 Reg008[1]; + volatile NvV16 Reg016[1]; + volatile NvV32 Reg032[1]; +} nv_hwreg_t, * nv_phwreg_t; + + +#define NVRM_PCICFG_NUM_BARS 6 +#define NVRM_PCICFG_BAR_OFFSET(i) (0x10 + (i) * 4) +#define NVRM_PCICFG_BAR_REQTYPE_MASK 0x00000001 +#define NVRM_PCICFG_BAR_REQTYPE_MEMORY 0x00000000 +#define NVRM_PCICFG_BAR_MEMTYPE_MASK 0x00000006 +#define NVRM_PCICFG_BAR_MEMTYPE_64BIT 0x00000004 +#define NVRM_PCICFG_BAR_ADDR_MASK 0xfffffff0 + +#define NVRM_PCICFG_NUM_DWORDS 16 + +#define NV_GPU_NUM_BARS 3 +#define NV_GPU_BAR_INDEX_REGS 0 +#define NV_GPU_BAR_INDEX_FB 1 +#define NV_GPU_BAR_INDEX_IMEM 2 + +typedef struct +{ + NvU64 cpu_address; + NvU64 size; + NvU32 offset; + NvU32 *map; + nv_phwreg_t map_u; +} nv_aperture_t; + +typedef struct +{ + char *name; + NvU32 *data; +} nv_parm_t; + +#define NV_RM_PAGE_SHIFT 12 +#define NV_RM_PAGE_SIZE (1 << NV_RM_PAGE_SHIFT) +#define NV_RM_PAGE_MASK (NV_RM_PAGE_SIZE - 1) + +#define NV_RM_TO_OS_PAGE_SHIFT (os_page_shift - NV_RM_PAGE_SHIFT) +#define NV_RM_PAGES_PER_OS_PAGE (1U << NV_RM_TO_OS_PAGE_SHIFT) +#define NV_RM_PAGES_TO_OS_PAGES(count) \ + ((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \ + ((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0)) + +#if defined(NVCPU_X86_64) +#define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 3) +#else +#define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 2) +#endif + +typedef struct nvidia_stack_s +{ + NvU32 size; + void *top; + NvU8 stack[NV_STACK_SIZE-16] __attribute__ ((aligned(16))); +} nvidia_stack_t; + +/* + * TODO: Remove once all UNIX layers have been converted to use nvidia_stack_t + */ +typedef nvidia_stack_t nv_stack_t; + +typedef struct nv_file_private_t nv_file_private_t; + +/* + * this is a wrapper for unix events + * unlike the events that will be returned to clients, this includes + * kernel-specific data, such as file pointer, etc.. + */ +typedef struct nv_event_s +{ + NvHandle hParent; + NvHandle hObject; + NvU32 index; + NvU32 info32; + NvU16 info16; + nv_file_private_t *nvfp; /* per file-descriptor data pointer */ + NvU32 fd; + NvBool active; /* whether the event should be signaled */ + NvU32 refcount; /* count of associated RM events */ + struct nv_event_s *next; +} nv_event_t; + +typedef struct nv_kern_mapping_s +{ + void *addr; + NvU64 size; + NvU32 modeFlag; + struct nv_kern_mapping_s *next; +} nv_kern_mapping_t; + +typedef struct nv_usermap_access_params_s +{ + NvU64 addr; + NvU64 size; + NvU64 offset; + NvU64 *page_array; + NvU64 num_pages; + NvU64 mmap_start; + NvU64 mmap_size; + NvU64 access_start; + NvU64 access_size; + NvU64 remap_prot_extra; + NvBool contig; + NvU32 caching; +} nv_usermap_access_params_t; + +/* + * It stores mapping context per mapping + */ +typedef struct nv_alloc_mapping_context_s { + void *alloc; + NvU64 page_index; + NvU64 *page_array; + NvU64 num_pages; + NvU64 mmap_start; + NvU64 mmap_size; + NvU64 access_start; + NvU64 access_size; + NvU64 remap_prot_extra; + NvU32 prot; + NvBool valid; + NvU32 caching; +} nv_alloc_mapping_context_t; + +typedef enum +{ + NV_SOC_IRQ_DISPLAY_TYPE = 0x1, + NV_SOC_IRQ_DPAUX_TYPE, + NV_SOC_IRQ_GPIO_TYPE, + NV_SOC_IRQ_HDACODEC_TYPE, + NV_SOC_IRQ_INVALID_TYPE +} nv_soc_irq_type_t; + +/* + * It stores interrupt numbers and interrupt type and private data + */ +typedef struct nv_soc_irq_info_s { + NvU32 irq_num; + nv_soc_irq_type_t irq_type; + NvBool bh_pending; + union { + NvU32 gpio_num; + NvU32 dpaux_instance; + } irq_data; +} nv_soc_irq_info_t; + +#define NV_MAX_SOC_IRQS 6 +#define NV_MAX_DPAUX_NUM_DEVICES 4 +#define NV_MAX_SOC_DPAUX_NUM_DEVICES 2 // From SOC_DEV_MAPPING + +#define NV_IGPU_LEGACY_STALL_IRQ 70 +#define NV_IGPU_MAX_STALL_IRQS 3 +#define NV_IGPU_MAX_NONSTALL_IRQS 1 +/* + * per device state + */ + +/* DMA-capable device data, defined by kernel interface layer */ +typedef struct nv_dma_device nv_dma_device_t; + +typedef struct nv_state_t +{ + void *priv; /* private data */ + void *os_state; /* os-specific device state */ + + int flags; + + /* PCI config info */ + nv_pci_info_t pci_info; + NvU16 subsystem_id; + NvU16 subsystem_vendor; + NvU32 gpu_id; + NvU32 iovaspace_id; + struct + { + NvBool valid; + NvU8 uuid[GPU_UUID_LEN]; + } nv_uuid_cache; + void *handle; + + NvU32 pci_cfg_space[NVRM_PCICFG_NUM_DWORDS]; + + /* physical characteristics */ + nv_aperture_t bars[NV_GPU_NUM_BARS]; + nv_aperture_t *regs; + nv_aperture_t *dpaux[NV_MAX_DPAUX_NUM_DEVICES]; + nv_aperture_t *hdacodec_regs; + nv_aperture_t *mipical_regs; + nv_aperture_t *fb, ud; + nv_aperture_t *simregs; + nv_aperture_t *emc_regs; + + NvU32 num_dpaux_instance; + NvU32 interrupt_line; + NvU32 dpaux_irqs[NV_MAX_DPAUX_NUM_DEVICES]; + nv_soc_irq_info_t soc_irq_info[NV_MAX_SOC_IRQS]; + NvS32 current_soc_irq; + NvU32 num_soc_irqs; + NvU32 hdacodec_irq; + NvU8 *soc_dcb_blob; + NvU32 soc_dcb_size; + NvU32 disp_sw_soc_chip_id; + + NvU32 igpu_stall_irq[NV_IGPU_MAX_STALL_IRQS]; + NvU32 igpu_nonstall_irq; + NvU32 num_stall_irqs; + NvU64 dma_mask; + + NvBool primary_vga; + + NvU32 sim_env; + + NvU32 rc_timer_enabled; + + /* list of events allocated for this device */ + nv_event_t *event_list; + + /* lock to protect event_list */ + void *event_spinlock; + + nv_kern_mapping_t *kern_mappings; + + /* Kernel interface DMA device data */ + nv_dma_device_t *dma_dev; + nv_dma_device_t *niso_dma_dev; + + /* + * Per-GPU queue. The actual queue object is usually allocated in the + * arch-specific parent structure (e.g. nv_linux_state_t), and this + * pointer just points to it. + */ + struct os_work_queue *queue; + + /* For loading RM as a firmware (DCE or GSP) client */ + NvBool request_firmware; /* request firmware from the OS */ + NvBool request_fw_client_rm; /* attempt to init RM as FW a client */ + NvBool allow_fallback_to_monolithic_rm; /* allow fallback to monolithic RM if FW client RM doesn't work out */ + NvBool enable_firmware_logs; /* attempt to enable firmware log decoding/printing */ + + /* Variable to track, if nvidia_remove is called */ + NvBool removed; + + NvBool console_device; + + /* Variable to track, if GPU is external GPU */ + NvBool is_external_gpu; + + /* Variable to track, if regkey PreserveVideoMemoryAllocations is set */ + NvBool preserve_vidmem_allocations; + + /* Variable to force allocation of 32-bit addressable memory */ + NvBool force_dma32_alloc; + + /* Variable to track if device has entered dynamic power state */ + NvBool dynamic_power_entered; + + /* PCI power state should be D0 during system suspend */ + NvBool d0_state_in_suspend; + + /* Current cyclestats client and context */ + NvU32 profiler_owner; + void *profiler_context; + + /* + * RMAPI objects to use in the OS layer to talk to core RM. + * + * Note that we only need to store one subdevice handle: in SLI, we will + * have a separate nv_state_t per physical GPU. + */ + struct { + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubDevice; + NvHandle hI2C; + NvHandle hDisp; + } rmapi; + + /* Bool to check if ISO iommu enabled */ + NvBool iso_iommu_present; + + /* Bool to check if dma-buf is supported */ + NvBool dma_buf_supported; + + NvBool printed_openrm_enable_unsupported_gpus_error; + + /* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */ + NvBool nvpcf_dsm_in_gpu_scope; + +} nv_state_t; + +// These define need to be in sync with defines in system.h +#define OS_TYPE_LINUX 0x1 +#define OS_TYPE_FREEBSD 0x2 +#define OS_TYPE_SUNOS 0x3 +#define OS_TYPE_VMWARE 0x4 + +struct nv_file_private_t +{ + NvHandle *handles; + NvU16 maxHandles; + NvU32 deviceInstance; + NvU8 metadata[64]; + + nv_file_private_t *ctl_nvfp; + void *ctl_nvfp_priv; +}; + +// Forward define the gpu ops structures +typedef struct gpuSession *nvgpuSessionHandle_t; +typedef struct gpuDevice *nvgpuDeviceHandle_t; +typedef struct gpuAddressSpace *nvgpuAddressSpaceHandle_t; +typedef struct gpuChannel *nvgpuChannelHandle_t; +typedef struct UvmGpuChannelInfo_tag *nvgpuChannelInfo_t; +typedef struct UvmGpuChannelAllocParams_tag nvgpuChannelAllocParams_t; +typedef struct UvmGpuCaps_tag *nvgpuCaps_t; +typedef struct UvmGpuCopyEnginesCaps_tag *nvgpuCesCaps_t; +typedef struct UvmGpuAddressSpaceInfo_tag *nvgpuAddressSpaceInfo_t; +typedef struct UvmGpuAllocInfo_tag *nvgpuAllocInfo_t; +typedef struct UvmGpuP2PCapsParams_tag *nvgpuP2PCapsParams_t; +typedef struct UvmGpuFbInfo_tag *nvgpuFbInfo_t; +typedef struct UvmGpuEccInfo_tag *nvgpuEccInfo_t; +typedef struct UvmGpuFaultInfo_tag *nvgpuFaultInfo_t; +typedef struct UvmGpuAccessCntrInfo_tag *nvgpuAccessCntrInfo_t; +typedef struct UvmGpuAccessCntrConfig_tag *nvgpuAccessCntrConfig_t; +typedef struct UvmGpuInfo_tag nvgpuInfo_t; +typedef struct UvmGpuClientInfo_tag nvgpuClientInfo_t; +typedef struct UvmPmaAllocationOptions_tag *nvgpuPmaAllocationOptions_t; +typedef struct UvmPmaStatistics_tag *nvgpuPmaStatistics_t; +typedef struct UvmGpuMemoryInfo_tag *nvgpuMemoryInfo_t; +typedef struct UvmGpuExternalMappingInfo_tag *nvgpuExternalMappingInfo_t; +typedef struct UvmGpuChannelResourceInfo_tag *nvgpuChannelResourceInfo_t; +typedef struct UvmGpuChannelInstanceInfo_tag *nvgpuChannelInstanceInfo_t; +typedef struct UvmGpuChannelResourceBindParams_tag *nvgpuChannelResourceBindParams_t; +typedef struct UvmGpuPagingChannelAllocParams_tag nvgpuPagingChannelAllocParams_t; +typedef struct UvmGpuPagingChannel_tag *nvgpuPagingChannelHandle_t; +typedef struct UvmGpuPagingChannelInfo_tag *nvgpuPagingChannelInfo_t; +typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU32, NvU64 *, NvU32, NvU64, NvU64); +typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64); + +/* + * flags + */ + +#define NV_FLAG_OPEN 0x0001 +#define NV_FLAG_EXCLUDE 0x0002 +#define NV_FLAG_CONTROL 0x0004 +// Unused 0x0008 +#define NV_FLAG_SOC_DISPLAY 0x0010 +#define NV_FLAG_USES_MSI 0x0020 +#define NV_FLAG_USES_MSIX 0x0040 +#define NV_FLAG_PASSTHRU 0x0080 +#define NV_FLAG_SUSPENDED 0x0100 +#define NV_FLAG_SOC_IGPU 0x0200 +// Unused 0x0400 +#define NV_FLAG_PERSISTENT_SW_STATE 0x0800 +#define NV_FLAG_IN_RECOVERY 0x1000 +// Unused 0x2000 +#define NV_FLAG_UNBIND_LOCK 0x4000 +/* To be set when GPU is not present on the bus, to help device teardown */ +#define NV_FLAG_IN_SURPRISE_REMOVAL 0x8000 + +typedef enum +{ + NV_PM_ACTION_HIBERNATE, + NV_PM_ACTION_STANDBY, + NV_PM_ACTION_RESUME +} nv_pm_action_t; + +typedef enum +{ + NV_PM_ACTION_DEPTH_DEFAULT, + NV_PM_ACTION_DEPTH_MODESET, + NV_PM_ACTION_DEPTH_UVM +} nv_pm_action_depth_t; + +typedef enum +{ + NV_DYNAMIC_PM_NEVER, + NV_DYNAMIC_PM_COARSE, + NV_DYNAMIC_PM_FINE +} nv_dynamic_power_mode_t; + +typedef enum +{ + NV_POWER_STATE_IN_HIBERNATE, + NV_POWER_STATE_IN_STANDBY, + NV_POWER_STATE_RUNNING +} nv_power_state_t; + +typedef enum +{ + NV_FIRMWARE_GSP, + NV_FIRMWARE_GSP_LOG +} nv_firmware_t; + +#define NV_PRIMARY_VGA(nv) ((nv)->primary_vga) + +#define NV_IS_CTL_DEVICE(nv) ((nv)->flags & NV_FLAG_CONTROL) +#define NV_IS_SOC_DISPLAY_DEVICE(nv) \ + ((nv)->flags & NV_FLAG_SOC_DISPLAY) + +#define NV_IS_SOC_IGPU_DEVICE(nv) \ + ((nv)->flags & NV_FLAG_SOC_IGPU) + +#define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv) \ + (((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0) + +#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \ + ((nv)->iso_iommu_present) + +/* + * NVIDIA ACPI event ID to be passed into the core NVIDIA driver for + * AC/DC event. + */ +#define NV_SYSTEM_ACPI_BATTERY_POWER_EVENT 0x8002 + +/* + * GPU add/remove events + */ +#define NV_SYSTEM_GPU_ADD_EVENT 0x9001 +#define NV_SYSTEM_GPU_REMOVE_EVENT 0x9002 + +/* + * NVIDIA ACPI sub-event IDs (event types) to be passed into + * to core NVIDIA driver for ACPI events. + */ +#define NV_SYSTEM_ACPI_EVENT_VALUE_DISPLAY_SWITCH_DEFAULT 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_AC 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_BATTERY 1 +#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_UNDOCKED 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_DOCKED 1 + +#define NV_ACPI_NVIF_HANDLE_PRESENT 0x01 +#define NV_ACPI_DSM_HANDLE_PRESENT 0x02 +#define NV_ACPI_WMMX_HANDLE_PRESENT 0x04 + +#define NV_EVAL_ACPI_METHOD_NVIF 0x01 +#define NV_EVAL_ACPI_METHOD_WMMX 0x02 + +#define NV_I2C_CMD_READ 1 +#define NV_I2C_CMD_WRITE 2 +#define NV_I2C_CMD_SMBUS_READ 3 +#define NV_I2C_CMD_SMBUS_WRITE 4 +#define NV_I2C_CMD_SMBUS_QUICK_WRITE 5 +#define NV_I2C_CMD_SMBUS_QUICK_READ 6 +#define NV_I2C_CMD_SMBUS_BLOCK_READ 7 +#define NV_I2C_CMD_SMBUS_BLOCK_WRITE 8 + +// Flags needed by OSAllocPagesNode +#define NV_ALLOC_PAGES_NODE_NONE 0x0 +#define NV_ALLOC_PAGES_NODE_SKIP_RECLAIM 0x1 + +/* +** where we hide our nv_state_t * ... +*/ +#define NV_SET_NV_STATE(pgpu,p) ((pgpu)->pOsGpuInfo = (p)) +#define NV_GET_NV_STATE(pGpu) \ + (nv_state_t *)((pGpu) ? (pGpu)->pOsGpuInfo : NULL) + +#define IS_REG_OFFSET(nv, offset, length) \ + (((offset) >= (nv)->regs->cpu_address) && \ + (((offset) + ((length)-1)) <= \ + (nv)->regs->cpu_address + ((nv)->regs->size-1))) + +#define IS_FB_OFFSET(nv, offset, length) \ + (((nv)->fb) && ((offset) >= (nv)->fb->cpu_address) && \ + (((offset) + ((length)-1)) <= (nv)->fb->cpu_address + ((nv)->fb->size-1))) + +#define IS_UD_OFFSET(nv, offset, length) \ + (((nv)->ud.cpu_address != 0) && ((nv)->ud.size != 0) && \ + ((offset) >= (nv)->ud.cpu_address) && \ + (((offset) + ((length)-1)) <= (nv)->ud.cpu_address + ((nv)->ud.size-1))) + +#define IS_IMEM_OFFSET(nv, offset, length) \ + (((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) && \ + ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) && \ + ((offset) >= (nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) && \ + (((offset) + ((length) - 1)) <= \ + (nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + \ + ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].size - 1))) + +#define NV_RM_MAX_MSIX_LINES 8 + +#define NV_MAX_ISR_DELAY_US 20000 +#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000) + +#define NV_TIMERCMP(a, b, CMP) \ + (((a)->tv_sec == (b)->tv_sec) ? \ + ((a)->tv_usec CMP (b)->tv_usec) : ((a)->tv_sec CMP (b)->tv_sec)) + +#define NV_TIMERADD(a, b, result) \ + { \ + (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \ + if ((result)->tv_usec >= 1000000) \ + { \ + ++(result)->tv_sec; \ + (result)->tv_usec -= 1000000; \ + } \ + } + +#define NV_TIMERSUB(a, b, result) \ + { \ + (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \ + if ((result)->tv_usec < 0) \ + { \ + --(result)->tv_sec; \ + (result)->tv_usec += 1000000; \ + } \ + } + +#define NV_TIMEVAL_TO_US(tv) ((NvU64)(tv).tv_sec * 1000000 + (tv).tv_usec) + +#ifndef NV_ALIGN_UP +#define NV_ALIGN_UP(v,g) (((v) + ((g) - 1)) & ~((g) - 1)) +#endif +#ifndef NV_ALIGN_DOWN +#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1)) +#endif + +/* + * driver internal interfaces + */ + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for UNIX specific OS interface. + * + * --------------------------------------------------------------------------- + */ + +NvU32 NV_API_CALL nv_get_dev_minor (nv_state_t *); +void* NV_API_CALL nv_alloc_kernel_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, void **); +NV_STATUS NV_API_CALL nv_free_kernel_mapping (nv_state_t *, void *, void *, void *); +NV_STATUS NV_API_CALL nv_alloc_user_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, NvU32, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_free_user_mapping (nv_state_t *, void *, NvU64, void *); +NV_STATUS NV_API_CALL nv_add_mapping_context_to_file (nv_state_t *, nv_usermap_access_params_t*, NvU32, void *, NvU64, NvU32); + +NvU64 NV_API_CALL nv_get_kern_phys_address (NvU64); +NvU64 NV_API_CALL nv_get_user_phys_address (NvU64); +nv_state_t* NV_API_CALL nv_get_adapter_state (NvU32, NvU8, NvU8); +nv_state_t* NV_API_CALL nv_get_ctl_state (void); + +void NV_API_CALL nv_set_dma_address_size (nv_state_t *, NvU32 ); + +NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU32, NvU32, NvU64, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvBool, NvU32, NvBool, NvBool, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *); + +NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **); +void NV_API_CALL nv_unregister_user_pages (nv_state_t *, NvU64, void **, void **); + +NV_STATUS NV_API_CALL nv_register_peer_io_mem (nv_state_t *, NvU64 *, NvU64, void **); +void NV_API_CALL nv_unregister_peer_io_mem(nv_state_t *, void *); + +struct sg_table; + +NV_STATUS NV_API_CALL nv_register_sgt (nv_state_t *, NvU64 *, NvU64, NvU32, void **, struct sg_table *, void *); +void NV_API_CALL nv_unregister_sgt (nv_state_t *, struct sg_table **, void **, void *); +NV_STATUS NV_API_CALL nv_register_phys_pages (nv_state_t *, NvU64 *, NvU64, NvU32, void **); +void NV_API_CALL nv_unregister_phys_pages (nv_state_t *, void *); + +NV_STATUS NV_API_CALL nv_dma_map_sgt (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **); +NV_STATUS NV_API_CALL nv_dma_map_pages (nv_dma_device_t *, NvU64, NvU64 *, NvBool, NvU32, void **); +NV_STATUS NV_API_CALL nv_dma_unmap_pages (nv_dma_device_t *, NvU64, NvU64 *, void **); + +NV_STATUS NV_API_CALL nv_dma_map_alloc (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **); +NV_STATUS NV_API_CALL nv_dma_unmap_alloc (nv_dma_device_t *, NvU64, NvU64 *, void **); + +NV_STATUS NV_API_CALL nv_dma_map_peer (nv_dma_device_t *, nv_dma_device_t *, NvU8, NvU64, NvU64 *); +void NV_API_CALL nv_dma_unmap_peer (nv_dma_device_t *, NvU64, NvU64); + +NV_STATUS NV_API_CALL nv_dma_map_mmio (nv_dma_device_t *, NvU64, NvU64 *); +void NV_API_CALL nv_dma_unmap_mmio (nv_dma_device_t *, NvU64, NvU64); + +void NV_API_CALL nv_dma_cache_invalidate (nv_dma_device_t *, void *); +void NV_API_CALL nv_dma_enable_nvlink (nv_dma_device_t *); + +NvS32 NV_API_CALL nv_start_rc_timer (nv_state_t *); +NvS32 NV_API_CALL nv_stop_rc_timer (nv_state_t *); + +void NV_API_CALL nv_post_event (nv_event_t *, NvHandle, NvU32, NvU32, NvU16, NvBool); +NvS32 NV_API_CALL nv_get_event (nv_file_private_t *, nv_event_t *, NvU32 *); + +void* NV_API_CALL nv_i2c_add_adapter (nv_state_t *, NvU32); +void NV_API_CALL nv_i2c_del_adapter (nv_state_t *, void *); + +void NV_API_CALL nv_acpi_methods_init (NvU32 *); +void NV_API_CALL nv_acpi_methods_uninit (void); + +NV_STATUS NV_API_CALL nv_acpi_method (NvU32, NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); +NV_STATUS NV_API_CALL nv_acpi_dsm_method (nv_state_t *, NvU8 *, NvU32, NvBool, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); +NV_STATUS NV_API_CALL nv_acpi_ddc_method (nv_state_t *, void *, NvU32 *, NvBool); +NV_STATUS NV_API_CALL nv_acpi_dod_method (nv_state_t *, NvU32 *, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_rom_method (nv_state_t *, NvU32 *, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_get_powersource (NvU32 *); +NvBool NV_API_CALL nv_acpi_is_battery_present(void); + +NV_STATUS NV_API_CALL nv_acpi_mux_method (nv_state_t *, NvU32 *, NvU32, const char *); + +NV_STATUS NV_API_CALL nv_log_error (nv_state_t *, NvU32, const char *, va_list); + +NvU64 NV_API_CALL nv_get_dma_start_address (nv_state_t *); +NV_STATUS NV_API_CALL nv_set_primary_vga_status(nv_state_t *); +NV_STATUS NV_API_CALL nv_pci_trigger_recovery (nv_state_t *); +NvBool NV_API_CALL nv_requires_dma_remap (nv_state_t *); + +NvBool NV_API_CALL nv_is_rm_firmware_active(nv_state_t *); +const void*NV_API_CALL nv_get_firmware(nv_state_t *, nv_firmware_t, const void **, NvU32 *); +void NV_API_CALL nv_put_firmware(const void *); + +nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **); +void NV_API_CALL nv_put_file_private(void *); + +NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvS32 *); + +NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**); +NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode); + +void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv); + +void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64, NvU64); + +void NV_API_CALL nv_p2p_free_platform_data(void *data); + +#if defined(NVCPU_PPC64LE) +NV_STATUS NV_API_CALL nv_get_nvlink_line_rate (nv_state_t *, NvU32 *); +#endif + +NV_STATUS NV_API_CALL nv_revoke_gpu_mappings (nv_state_t *); +void NV_API_CALL nv_acquire_mmap_lock (nv_state_t *); +void NV_API_CALL nv_release_mmap_lock (nv_state_t *); +NvBool NV_API_CALL nv_get_all_mappings_revoked_locked (nv_state_t *); +void NV_API_CALL nv_set_safe_to_mmap_locked (nv_state_t *, NvBool); + +NV_STATUS NV_API_CALL nv_indicate_idle (nv_state_t *); +NV_STATUS NV_API_CALL nv_indicate_not_idle (nv_state_t *); +void NV_API_CALL nv_idle_holdoff (nv_state_t *); + +NvBool NV_API_CALL nv_dynamic_power_available (nv_state_t *); +void NV_API_CALL nv_audio_dynamic_power (nv_state_t *); + +void NV_API_CALL nv_control_soc_irqs (nv_state_t *, NvBool bEnable); +NV_STATUS NV_API_CALL nv_get_current_irq_priv_data(nv_state_t *, NvU32 *); + +NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap (int, int*); +int NV_API_CALL nv_cap_drv_init(void); +void NV_API_CALL nv_cap_drv_exit(void); +NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *); +NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *); + +NvU32 NV_API_CALL nv_get_os_type(void); + +void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end); +struct dma_buf; +typedef struct nv_dma_buf nv_dma_buf_t; +struct drm_gem_object; + +NV_STATUS NV_API_CALL nv_dma_import_sgt (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *); +void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *); +NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvU32 *, struct sg_table **, nv_dma_buf_t **); +NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvU32 *, struct sg_table **, nv_dma_buf_t **); +void NV_API_CALL nv_dma_release_dma_buf (nv_dma_buf_t *); + +void NV_API_CALL nv_schedule_uvm_isr (nv_state_t *); + +NvBool NV_API_CALL nv_platform_supports_s0ix (void); +NvBool NV_API_CALL nv_s2idle_pm_configured (void); + +NvBool NV_API_CALL nv_is_chassis_notebook (void); +void NV_API_CALL nv_allow_runtime_suspend (nv_state_t *nv); +void NV_API_CALL nv_disallow_runtime_suspend (nv_state_t *nv); + +typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *); + +NV_STATUS NV_API_CALL nv_get_num_phys_pages (void *, NvU32 *); +NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *); + +NV_STATUS NV_API_CALL nv_i2c_transfer(nv_state_t *, NvU32, NvU8, nv_i2c_msg_t *, int); +void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *); +NV_STATUS NV_API_CALL nv_i2c_bus_status(nv_state_t *, NvU32, NvS32 *, NvS32 *); +NV_STATUS NV_API_CALL nv_clk_get_handles (nv_state_t *); +void NV_API_CALL nv_clk_clear_handles (nv_state_t *); +NV_STATUS NV_API_CALL nv_enable_clk (nv_state_t *, TEGRASOC_WHICH_CLK); +NvBool NV_API_CALL nv_is_clk_enabled (nv_state_t *, TEGRASOC_WHICH_CLK); +void NV_API_CALL nv_disable_clk (nv_state_t *, TEGRASOC_WHICH_CLK); +NV_STATUS NV_API_CALL nv_get_curr_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *); +NV_STATUS NV_API_CALL nv_get_max_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *); +NV_STATUS NV_API_CALL nv_get_min_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *); +NV_STATUS NV_API_CALL nv_set_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32); +NV_STATUS NV_API_CALL nv_set_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK); +NV_STATUS NV_API_CALL nv_get_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK*); +NV_STATUS NV_API_CALL nv_soc_device_reset (nv_state_t *); +NV_STATUS NV_API_CALL nv_imp_get_import_data (TEGRA_IMP_IMPORT_DATA *); +NV_STATUS NV_API_CALL nv_imp_enable_disable_rfl (nv_state_t *nv, NvBool bEnable); +NV_STATUS NV_API_CALL nv_imp_icc_set_bw (nv_state_t *nv, NvU32 avg_bw_kbps, NvU32 floor_bw_kbps); +NV_STATUS NV_API_CALL nv_soc_pm_powergate (nv_state_t *); +NV_STATUS NV_API_CALL nv_soc_pm_unpowergate (nv_state_t *); +NV_STATUS NV_API_CALL nv_gpio_get_pin_state(nv_state_t *, NvU32, NvU32 *); +void NV_API_CALL nv_gpio_set_pin_state(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_gpio_set_pin_direction(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_gpio_get_pin_direction(nv_state_t *, NvU32, NvU32 *); +NV_STATUS NV_API_CALL nv_gpio_get_pin_number(nv_state_t *, NvU32, NvU32 *); +NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt(nv_state_t *, NvU32, NvU32); +NV_STATUS NV_API_CALL nv_get_num_dpaux_instances(nv_state_t *nv, NvU32 *num_instances); +NV_STATUS NV_API_CALL nv_get_tegra_brightness_level(nv_state_t *, NvU32 *); +NV_STATUS NV_API_CALL nv_set_tegra_brightness_level(nv_state_t *, NvU32); +NV_STATUS NV_API_CALL nv_get_syncpoint_aperture(NvU32, NvU64 *, NvU64 *, NvU32 *); +NvU32 NV_API_CALL nv_tegra_get_rm_interface_type(NvU32); +NV_STATUS NV_API_CALL nv_tegra_dce_register_ipc_client(NvU32, void *, nvTegraDceClientIpcCallback, NvU32 *); +NV_STATUS NV_API_CALL nv_tegra_dce_client_ipc_send_recv(NvU32, void *, NvU32); +NV_STATUS NV_API_CALL nv_tegra_dce_unregister_ipc_client(NvU32); +NV_STATUS NV_API_CALL nv_dsi_parse_panel_props(nv_state_t *, void *); +NvBool NV_API_CALL nv_dsi_is_panel_connected(nv_state_t *); +NV_STATUS NV_API_CALL nv_dsi_panel_enable(nv_state_t *, void *); +NV_STATUS NV_API_CALL nv_dsi_panel_reset(nv_state_t *, void *); +void NV_API_CALL nv_dsi_panel_disable(nv_state_t *, void *); +void NV_API_CALL nv_dsi_panel_cleanup(nv_state_t *, void *); +NV_STATUS NV_API_CALL nv_soc_mipi_cal_reset(nv_state_t *); +NvU32 NV_API_CALL nv_soc_fuse_register_read (NvU32 addr); + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for Resource Manager interface. + * + * --------------------------------------------------------------------------- + */ + +NvBool NV_API_CALL rm_init_rm (nvidia_stack_t *); +void NV_API_CALL rm_shutdown_rm (nvidia_stack_t *); +NvBool NV_API_CALL rm_init_private_state (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_free_private_state (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_init_adapter (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_disable_adapter (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_shutdown_adapter (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_exclude_adapter (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_acquire_api_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_release_api_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_acquire_gpu_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_release_gpu_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_acquire_all_gpus_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_release_all_gpus_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_ioctl (nvidia_stack_t *, nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32); +NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *, NvU32 *); +void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_power_management (nvidia_stack_t *, nv_state_t *, nv_pm_action_t); +NV_STATUS NV_API_CALL rm_stop_user_channels (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_restart_user_channels (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_save_low_res_mode (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_get_vbios_version (nvidia_stack_t *, nv_state_t *, char *); +char* NV_API_CALL rm_get_gpu_uuid (nvidia_stack_t *, nv_state_t *); +const NvU8* NV_API_CALL rm_get_gpu_uuid_raw (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_set_rm_firmware_requested(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_get_firmware_version (nvidia_stack_t *, nv_state_t *, char *, NvLength); +void NV_API_CALL rm_cleanup_file_private (nvidia_stack_t *, nv_state_t *, nv_file_private_t *); +void NV_API_CALL rm_unbind_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_read_registry_dword (nvidia_stack_t *, nv_state_t *, const char *, NvU32 *); +NV_STATUS NV_API_CALL rm_write_registry_dword (nvidia_stack_t *, nv_state_t *, const char *, NvU32); +NV_STATUS NV_API_CALL rm_write_registry_binary (nvidia_stack_t *, nv_state_t *, const char *, NvU8 *, NvU32); +NV_STATUS NV_API_CALL rm_write_registry_string (nvidia_stack_t *, nv_state_t *, const char *, const char *, NvU32); +void NV_API_CALL rm_parse_option_string (nvidia_stack_t *, const char *); +char* NV_API_CALL rm_remove_spaces (const char *); +char* NV_API_CALL rm_string_token (char **, const char); + +NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *); +const char* NV_API_CALL rm_get_device_name (NvU16, NvU16, NvU16); + +NV_STATUS NV_API_CALL rm_is_supported_device (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_supported_pci_device(NvU8 pci_class, + NvU8 pci_subclass, + NvU16 vendor, + NvU16 device, + NvU16 subsystem_vendor, + NvU16 subsystem_device, + NvBool print_legacy_warning); + +void NV_API_CALL rm_i2c_remove_adapters (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_i2c_is_smbus_capable (nvidia_stack_t *, nv_state_t *, void *); +NV_STATUS NV_API_CALL rm_i2c_transfer (nvidia_stack_t *, nv_state_t *, void *, NvU8, NvU8, NvU8, NvU32, NvU8 *); + +NV_STATUS NV_API_CALL rm_perform_version_check (nvidia_stack_t *, void *, NvU32); + +NV_STATUS NV_API_CALL rm_system_event (nvidia_stack_t *, NvU32, NvU32); + +void NV_API_CALL rm_disable_gpu_state_persistence (nvidia_stack_t *sp, nv_state_t *); +NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *); +NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64); +NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *); +NV_STATUS NV_API_CALL rm_p2p_get_gpu_info (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **); +NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *); +NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *); +NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *); +NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *); +NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU32, NvU32, NvU64 *, void **); +NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *); +void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle); +NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, NvU64 *); +NV_STATUS NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64); +NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **); +void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *); +NV_STATUS NV_API_CALL rm_log_gpu_crash (nv_stack_t *, nv_state_t *); + +void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd); +NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id); +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(nvidia_stack_t *, nv_state_t *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_handle_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *); +NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *); +NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *); +NvBool NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, NvS32 *, NvU64 *, NvU64 *, NvU64 *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool); +NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_iommu_needed_for_sriov(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_disable_iomap_wc(void); + +void NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool); +void NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t); +void NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t); +NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool); +const char* NV_API_CALL rm_get_vidmem_power_status(nvidia_stack_t *, nv_state_t *); +const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *, nv_state_t *); +const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool); + +void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32); +NV_STATUS NV_API_CALL rm_get_clientnvpcf_power_limits(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *); + +/* vGPU VFIO specific functions */ +NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32); +NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16); +NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 **, NvBool); +NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8); +NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *); +NV_STATUS NV_API_CALL nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32); +NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 **, NvU64 **, NvU32 *); +NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *); +NV_STATUS NV_API_CALL nv_vgpu_update_request(nvidia_stack_t *, const NvU8 *, NvU32, NvU64 *, NvU64 *, const char *); +NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *); + +NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*); +nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*); +void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size); + +/* Callbacks should occur roughly every 10ms. */ +#define NV_SNAPSHOT_TIMER_HZ 100 +void NV_API_CALL nv_start_snapshot_timer(void (*snapshot_callback)(void *context)); +void NV_API_CALL nv_flush_snapshot_timer(void); +void NV_API_CALL nv_stop_snapshot_timer(void); + +static inline const NvU8 *nv_get_cached_uuid(nv_state_t *nv) +{ + return nv->nv_uuid_cache.valid ? nv->nv_uuid_cache.uuid : NULL; +} + +/* nano second resolution timer callback structure */ +typedef struct nv_nano_timer nv_nano_timer_t; + +/* nano timer functions */ +void NV_API_CALL nv_create_nano_timer(nv_state_t *, void *pTmrEvent, nv_nano_timer_t **); +void NV_API_CALL nv_start_nano_timer(nv_state_t *nv, nv_nano_timer_t *, NvU64 timens); +NV_STATUS NV_API_CALL rm_run_nano_timer_callback(nvidia_stack_t *, nv_state_t *, void *pTmrEvent); +void NV_API_CALL nv_cancel_nano_timer(nv_state_t *, nv_nano_timer_t *); +void NV_API_CALL nv_destroy_nano_timer(nv_state_t *nv, nv_nano_timer_t *); + +#if defined(NVCPU_X86_64) + +static inline NvU64 nv_rdtsc(void) +{ + NvU64 val; + __asm__ __volatile__ ("rdtsc \t\n" + "shlq $0x20,%%rdx \t\n" + "orq %%rdx,%%rax \t\n" + : "=A" (val)); + return val; +} + +#endif + +#endif /* NVRM */ + +static inline int nv_count_bits(NvU64 word) +{ + NvU64 bits; + + bits = (word & 0x5555555555555555ULL) + ((word >> 1) & 0x5555555555555555ULL); + bits = (bits & 0x3333333333333333ULL) + ((bits >> 2) & 0x3333333333333333ULL); + bits = (bits & 0x0f0f0f0f0f0f0f0fULL) + ((bits >> 4) & 0x0f0f0f0f0f0f0f0fULL); + bits = (bits & 0x00ff00ff00ff00ffULL) + ((bits >> 8) & 0x00ff00ff00ff00ffULL); + bits = (bits & 0x0000ffff0000ffffULL) + ((bits >> 16) & 0x0000ffff0000ffffULL); + bits = (bits & 0x00000000ffffffffULL) + ((bits >> 32) & 0x00000000ffffffffULL); + + return (int)(bits); +} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv_escape.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv_escape.h new file mode 100644 index 0000000..629af5a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv_escape.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_ESCAPE_H_INCLUDED +#define NV_ESCAPE_H_INCLUDED + +#define NV_ESC_RM_ALLOC_MEMORY 0x27 +#define NV_ESC_RM_ALLOC_OBJECT 0x28 +#define NV_ESC_RM_FREE 0x29 +#define NV_ESC_RM_CONTROL 0x2A +#define NV_ESC_RM_ALLOC 0x2B +#define NV_ESC_RM_CONFIG_GET 0x32 +#define NV_ESC_RM_CONFIG_SET 0x33 +#define NV_ESC_RM_DUP_OBJECT 0x34 +#define NV_ESC_RM_SHARE 0x35 +#define NV_ESC_RM_CONFIG_GET_EX 0x37 +#define NV_ESC_RM_CONFIG_SET_EX 0x38 +#define NV_ESC_RM_I2C_ACCESS 0x39 +#define NV_ESC_RM_IDLE_CHANNELS 0x41 +#define NV_ESC_RM_VID_HEAP_CONTROL 0x4A +#define NV_ESC_RM_ACCESS_REGISTRY 0x4D +#define NV_ESC_RM_MAP_MEMORY 0x4E +#define NV_ESC_RM_UNMAP_MEMORY 0x4F +#define NV_ESC_RM_GET_EVENT_DATA 0x52 +#define NV_ESC_RM_ALLOC_CONTEXT_DMA2 0x54 +#define NV_ESC_RM_ADD_VBLANK_CALLBACK 0x56 +#define NV_ESC_RM_MAP_MEMORY_DMA 0x57 +#define NV_ESC_RM_UNMAP_MEMORY_DMA 0x58 +#define NV_ESC_RM_BIND_CONTEXT_DMA 0x59 +#define NV_ESC_RM_EXPORT_OBJECT_TO_FD 0x5C +#define NV_ESC_RM_IMPORT_OBJECT_FROM_FD 0x5D +#define NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO 0x5E + +#endif // NV_ESCAPE_H_INCLUDED diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os-interface.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os-interface.h new file mode 100644 index 0000000..d17b2af --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os-interface.h @@ -0,0 +1,241 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* + * Os interface definitions needed by os-interface.c + */ + +#ifndef OS_INTERFACE_H +#define OS_INTERFACE_H + +/******************* Operating System Interface Routines *******************\ +* * +* Operating system wrapper functions used to abstract the OS. * +* * +\***************************************************************************/ + +#include +#include +#include "nv_stdarg.h" +#include +#include +#include + + + +typedef struct +{ + NvU32 os_major_version; + NvU32 os_minor_version; + NvU32 os_build_number; + const char * os_build_version_str; + const char * os_build_date_plus_str; +}os_version_info; + +/* Each OS defines its own version of this opaque type */ +struct os_work_queue; + +/* Each OS defines its own version of this opaque type */ +typedef struct os_wait_queue os_wait_queue; + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for OS interface. + * + * --------------------------------------------------------------------------- + */ + +NvU64 NV_API_CALL os_get_num_phys_pages (void); +NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64); +void NV_API_CALL os_free_mem (void *); +NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *); +NvU64 NV_API_CALL os_get_current_tick (void); +NvU64 NV_API_CALL os_get_current_tick_hr (void); +NvU64 NV_API_CALL os_get_tick_resolution (void); +NV_STATUS NV_API_CALL os_delay (NvU32); +NV_STATUS NV_API_CALL os_delay_us (NvU32); +NvU64 NV_API_CALL os_get_cpu_frequency (void); +NvU32 NV_API_CALL os_get_current_process (void); +void NV_API_CALL os_get_current_process_name (char *, NvU32); +NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *); +char* NV_API_CALL os_string_copy (char *, const char *); +NvU32 NV_API_CALL os_string_length (const char *); +NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32); +NvS32 NV_API_CALL os_string_compare (const char *, const char *); +NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...); +NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list); +void NV_API_CALL os_log_error (const char *, va_list); +void* NV_API_CALL os_mem_copy (void *, const void *, NvU32); +NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32); +NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32); +void* NV_API_CALL os_mem_set (void *, NvU8, NvU32); +NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32); +void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *); +NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *); +NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *); +NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *); +NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8); +NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16); +NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32); +NvBool NV_API_CALL os_pci_remove_supported (void); +void NV_API_CALL os_pci_remove (void *); +void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32); +void NV_API_CALL os_unmap_kernel_space (void *, NvU64); +void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **); +void NV_API_CALL os_unmap_user_space (void *, NvU64, void *); +NV_STATUS NV_API_CALL os_flush_cpu_cache (void); +NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void); +NV_STATUS NV_API_CALL os_flush_user_cache (void); +void NV_API_CALL os_flush_cpu_write_combine_buffer(void); +NvU8 NV_API_CALL os_io_read_byte (NvU32); +NvU16 NV_API_CALL os_io_read_word (NvU32); +NvU32 NV_API_CALL os_io_read_dword (NvU32); +void NV_API_CALL os_io_write_byte (NvU32, NvU8); +void NV_API_CALL os_io_write_word (NvU32, NvU16); +void NV_API_CALL os_io_write_dword (NvU32, NvU32); +NvBool NV_API_CALL os_is_administrator (void); +NvBool NV_API_CALL os_allow_priority_override (void); +void NV_API_CALL os_dbg_init (void); +void NV_API_CALL os_dbg_breakpoint (void); +void NV_API_CALL os_dbg_set_level (NvU32); +NvU32 NV_API_CALL os_get_cpu_count (void); +NvU32 NV_API_CALL os_get_cpu_number (void); +void NV_API_CALL os_disable_console_access (void); +void NV_API_CALL os_enable_console_access (void); +NV_STATUS NV_API_CALL os_registry_init (void); +NV_STATUS NV_API_CALL os_schedule (void); +NV_STATUS NV_API_CALL os_alloc_spinlock (void **); +void NV_API_CALL os_free_spinlock (void *); +NvU64 NV_API_CALL os_acquire_spinlock (void *); +void NV_API_CALL os_release_spinlock (void *, NvU64); +NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *); +NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *); +NV_STATUS NV_API_CALL os_alloc_mutex (void **); +void NV_API_CALL os_free_mutex (void *); +NV_STATUS NV_API_CALL os_acquire_mutex (void *); +NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *); +void NV_API_CALL os_release_mutex (void *); +void* NV_API_CALL os_alloc_semaphore (NvU32); +void NV_API_CALL os_free_semaphore (void *); +NV_STATUS NV_API_CALL os_acquire_semaphore (void *); +NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *); +NV_STATUS NV_API_CALL os_release_semaphore (void *); +NvBool NV_API_CALL os_semaphore_may_sleep (void); +NV_STATUS NV_API_CALL os_get_version_info (os_version_info*); +NvBool NV_API_CALL os_is_isr (void); +NvBool NV_API_CALL os_pat_supported (void); +void NV_API_CALL os_dump_stack (void); +NvBool NV_API_CALL os_is_efi_enabled (void); +NvBool NV_API_CALL os_is_xen_dom0 (void); +NvBool NV_API_CALL os_is_vgx_hyper (void); +NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32); +NvBool NV_API_CALL os_is_grid_supported (void); +NvU32 NV_API_CALL os_get_grid_csp_support (void); +void NV_API_CALL os_get_screen_info (NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64, NvU64); +void NV_API_CALL os_bug_check (NvU32, const char *); +NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32); +NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**); +NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *); +NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *); +NV_STATUS NV_API_CALL os_get_euid (NvU32 *); +NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr); +NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *); +void NV_API_CALL os_add_record_for_crashLog (void *, NvU32); +void NV_API_CALL os_delete_record_for_crashLog (void *); +NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32); +NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *); +NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *); +NV_STATUS NV_API_CALL os_get_page (NvU64 address); +NV_STATUS NV_API_CALL os_put_page (NvU64 address); +NvU32 NV_API_CALL os_get_page_refcount (NvU64 address); +NvU32 NV_API_CALL os_count_tail_pages (NvU64 address); +void NV_API_CALL os_free_pages_phys (NvU64, NvU32); +NV_STATUS NV_API_CALL os_call_nv_vmbus (NvU32, void *); +NV_STATUS NV_API_CALL os_open_temporary_file (void **); +void NV_API_CALL os_close_file (void *); +NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64); +NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64); +NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **); +NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64); +NvBool NV_API_CALL os_is_nvswitch_present (void); +void NV_API_CALL os_get_random_bytes (NvU8 *, NvU16); +NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **); +void NV_API_CALL os_free_wait_queue (os_wait_queue *); +void NV_API_CALL os_wait_uninterruptible (os_wait_queue *); +void NV_API_CALL os_wait_interruptible (os_wait_queue *); +void NV_API_CALL os_wake_up (os_wait_queue *); +nv_cap_t* NV_API_CALL os_nv_cap_init (const char *); +nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int); +nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int); +void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *); +int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int); +void NV_API_CALL os_nv_cap_close_fd (int); + +NV_STATUS NV_API_CALL os_get_tegra_platform (NvU32 *); + +extern NvU32 os_page_size; +extern NvU64 os_page_mask; +extern NvU8 os_page_shift; +extern NvU32 os_sev_status; +extern NvBool os_sev_enabled; +extern NvBool os_dma_buf_enabled; + +/* + * --------------------------------------------------------------------------- + * + * Debug macros. + * + * --------------------------------------------------------------------------- + */ + +#define NV_DBG_INFO 0x0 +#define NV_DBG_SETUP 0x1 +#define NV_DBG_USERERRORS 0x2 +#define NV_DBG_WARNINGS 0x3 +#define NV_DBG_ERRORS 0x4 + + +void NV_API_CALL out_string(const char *str); +int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...); + +#define NV_DEV_PRINTF(debuglevel, nv, format, ... ) \ + nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format, NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__) + +#define NV_DEV_PRINTF_STATUS(debuglevel, nv, status, format, ... ) \ + nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format " (0x%x)\n", NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__, status) + +/* + * Fields for os_lock_user_pages flags parameter + */ +#define NV_LOCK_USER_PAGES_FLAGS_WRITE 0:0 +#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000 +#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001 + +// NV OS Tegra platform type defines +#define NV_OS_TEGRA_PLATFORM_SIM 0 +#define NV_OS_TEGRA_PLATFORM_FPGA 1 +#define NV_OS_TEGRA_PLATFORM_SILICON 2 + +#endif /* OS_INTERFACE_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os_custom.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os_custom.h new file mode 100644 index 0000000..37df06f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os_custom.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _OS_CUSTOM_H_ +#define _OS_CUSTOM_H_ + +/*! + * @file os_custom.h + * @brief OS module specific definitions for this OS + */ + +#include +#include + +// File modes, added for NVIDIA capabilities. +#define OS_RUSR 00400 // read permission, owner +#define OS_WUSR 00200 // write permission, owner +#define OS_XUSR 00100 // execute/search permission, owner +#define OS_RWXU (OS_RUSR | OS_WUSR | OS_XUSR) // read, write, execute/search, owner +#define OS_RGRP 00040 // read permission, group +#define OS_WGRP 00020 // write permission, group +#define OS_XGRP 00010 // execute/search permission, group +#define OS_RWXG (OS_RGRP | OS_WGRP | OS_XGRP) // read, write, execute/search, group +#define OS_ROTH 00004 // read permission, other +#define OS_WOTH 00002 // write permission, other +#define OS_XOTH 00001 // execute/search permission, other +#define OS_RWXO (OS_ROTH | OS_WOTH | OS_XOTH) // read, write, execute/search, other +#define OS_RUGO (OS_RUSR | OS_RGRP | OS_ROTH) +#define OS_WUGO (OS_WUSR | OS_WGRP | OS_WOTH) +#define OS_XUGO (OS_XUSR | OS_XGRP | OS_XOTH) + +// Trigger for collecting GPU state for later extraction. +NV_STATUS RmLogGpuCrash(OBJGPU *); + +// This is callback function in the miniport. +// The argument is a device extension, and must be cast as such to be useful. +typedef void (*MINIPORT_CALLBACK)(void*); + +NV_STATUS osPackageRegistry(OBJGPU *pGpu, PACKED_REGISTRY_TABLE *, NvU32 *); + +#endif // _OS_CUSTOM_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osapi.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osapi.h new file mode 100644 index 0000000..484351e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osapi.h @@ -0,0 +1,192 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _OSAPI_H_ +#define _OSAPI_H_ + +#include "core/system.h" +#include "gpu/gpu.h" + +#include // NV_DBG_ERRORS +#include +#include + +#if defined(__use_altstack__) +#if defined(QA_BUILD) +//--------------------------------------------------------------------------- +// +// 32 bit debug marker values. +// +//--------------------------------------------------------------------------- + +#define NV_MARKER1 (NvU32)(('M' << 24) | ('R' << 16) | ('V' << 8) | 'N') +#define NV_MARKER2 (NvU32)(('N' << 24) | ('V' << 16) | ('R' << 8) | 'M') + +// +// The two macros below implement a simple alternate stack usage sanity +// check for QA_BUILD RM builds. NV_ALTSTACK_WRITE_MARKERS() fills +// altstacks with NV_MARKER1, which enables NV_ALTSTACK_CHECK_MARKERS() +// to determine the stack usage fairly reliably by looking for the +// first clobbered marker. If more than 7/8 of the alternate stack were +// used, NV_ALTSTACK_CHECK_MARKERS() prints an error and asserts. +// +#define NV_ALTSTACK_WRITE_MARKERS(sp) \ +{ \ + NvU32 i, *stack = (void *)(sp)->stack; \ + for (i = 0; i < ((sp)->size / sizeof(NvU32)); i++) \ + stack[i] = NV_MARKER1; \ +} + +#define NV_ALTSTACK_CHECK_MARKERS(sp) \ +{ \ + NvU32 i, *stack = (void *)(sp)->stack; \ + for (i = 0; i < ((sp)->size / sizeof(NvU32)); i++) \ + { \ + if (stack[i] != NV_MARKER1) \ + break; \ + } \ + if ((i * sizeof(NvU32)) < ((sp)->size / 8)) \ + { \ + nv_printf(NV_DBG_ERRORS, "NVRM: altstack: used %d of %d bytes!\n", \ + ((sp)->size - (i * sizeof(NvU32))), (sp)->size); \ + NV_ASSERT_PRECOMP((i * sizeof(NvU32)) >= ((sp)->size / 8)); \ + } \ +} +#else +#define NV_ALTSTACK_WRITE_MARKERS(sp) +#define NV_ALTSTACK_CHECK_MARKERS(sp) +#endif +#if defined(NVCPU_X86_64) +#define NV_ENTER_RM_RUNTIME(sp,fp) \ +{ \ + NV_ALTSTACK_WRITE_MARKERS(sp); \ + __asm__ __volatile__ ("movq %%rbp,%0" : "=r" (fp)); /* save %rbp */ \ + __asm__ __volatile__ ("movq %0,%%rbp" :: "r" ((sp)->top)); \ +} + +#define NV_EXIT_RM_RUNTIME(sp,fp) \ +{ \ + register void *__rbp __asm__ ("rbp"); \ + if (__rbp != (sp)->top) \ + { \ + nv_printf(NV_DBG_ERRORS, "NVRM: detected corrupted runtime stack!\n"); \ + NV_ASSERT_PRECOMP(__rbp == (sp)->top); \ + } \ + NV_ALTSTACK_CHECK_MARKERS(sp); \ + __asm__ __volatile__ ("movq %0,%%rbp" :: "r" (fp)); /* restore %rbp */ \ +} +#else +#error "gcc \"altstacks\" support is not implemented on this platform!" +#endif +#else +#define NV_ENTER_RM_RUNTIME(sp,fp) { (void)sp; (void)fp; } +#define NV_EXIT_RM_RUNTIME(sp,fp) +#endif + +void RmShutdownRm (void); + +NvBool RmInitPrivateState (nv_state_t *); +void RmFreePrivateState (nv_state_t *); + +NvBool RmInitAdapter (nv_state_t *); +NvBool RmPartiallyInitAdapter (nv_state_t *); +void RmShutdownAdapter (nv_state_t *); +void RmDisableAdapter (nv_state_t *); +void RmPartiallyDisableAdapter(nv_state_t *); +NV_STATUS RmGetAdapterStatus (nv_state_t *, NvU32 *); +NV_STATUS RmExcludeAdapter (nv_state_t *); + +NvBool RmGpuHasIOSpaceEnabled (nv_state_t *); + +void RmFreeUnusedClients (nv_state_t *, nv_file_private_t *); +NV_STATUS RmIoctl (nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32); + +NV_STATUS RmAllocOsEvent (NvHandle, nv_file_private_t *, NvU32); +NV_STATUS RmFreeOsEvent (NvHandle, NvU32); + +void RmI2cAddGpuPorts(nv_state_t *); + +NV_STATUS RmInitX86EmuState(OBJGPU *); +void RmFreeX86EmuState(OBJGPU *); +NV_STATUS RmSystemEvent(nv_state_t *, NvU32, NvU32); + +const NvU8 *RmGetGpuUuidRaw(nv_state_t *); + +NV_STATUS nv_vbios_call(OBJGPU *, NvU32 *, NvU32 *); + +int amd_adv_spec_cache_feature(OBJOS *); +int amd_msr_c0011022_incompatible(OBJOS *); + +NV_STATUS rm_get_adapter_status (nv_state_t *, NvU32 *); + +NV_STATUS rm_alloc_os_event (NvHandle, nv_file_private_t *, NvU32); +NV_STATUS rm_free_os_event (NvHandle, NvU32); +NV_STATUS rm_get_event_data (nv_file_private_t *, NvP64, NvU32 *); +void rm_client_free_os_events (NvHandle); + +NV_STATUS rm_create_mmap_context (nv_state_t *, NvHandle, NvHandle, NvHandle, NvP64, NvU64, NvU64, NvU32, NvU32); +NV_STATUS rm_update_device_mapping_info (NvHandle, NvHandle, NvHandle, void *, void *); + +NV_STATUS rm_access_registry (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvP64, NvU32, NvP64, NvU32 *, NvU32 *, NvU32 *); + +// registry management +NV_STATUS RmInitRegistry (void); +NV_STATUS RmDestroyRegistry (nv_state_t *); + +NV_STATUS RmWriteRegistryDword (nv_state_t *, const char *, NvU32 ); +NV_STATUS RmReadRegistryDword (nv_state_t *, const char *, NvU32 *); +NV_STATUS RmWriteRegistryString (nv_state_t *, const char *, const char *, NvU32); +NV_STATUS RmReadRegistryBinary (nv_state_t *, const char *, NvU8 *, NvU32 *); +NV_STATUS RmWriteRegistryBinary (nv_state_t *, const char *, NvU8 *, NvU32); +NV_STATUS RmReadRegistryString (nv_state_t *, const char *, NvU8 *, NvU32 *); + +NV_STATUS RmPackageRegistry (nv_state_t *, PACKED_REGISTRY_TABLE *, NvU32 *); + +NvBool RmIsNvifFunctionSupported(NvU32, NvU32); +void RmInitAcpiMethods (OBJOS *, OBJSYS *, OBJGPU *); +void RmUnInitAcpiMethods (OBJSYS *); + +void RmInflateOsToRmPageArray (RmPhysAddr *, NvU64); +void RmDeflateRmToOsPageArray (RmPhysAddr *, NvU64); + +void RmInitS0ixPowerManagement (nv_state_t *); +void RmInitDeferredDynamicPowerManagement (nv_state_t *); +void RmDestroyDeferredDynamicPowerManagement(nv_state_t *); + +NV_STATUS os_ref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t); +void os_unref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t); +void RmHandleDisplayChange (nvidia_stack_t *, nv_state_t *); +void RmUpdateGc6ConsoleRefCount (nv_state_t *, NvBool); + +NvBool rm_get_uefi_console_status (nv_state_t *); +NvU64 rm_get_uefi_console_size (nv_state_t *, NvU64 *); + +RM_API *RmUnixRmApiPrologue (nv_state_t *, THREAD_STATE_NODE *, NvU32 module); +void RmUnixRmApiEpilogue (nv_state_t *, THREAD_STATE_NODE *); + +static inline NvBool rm_is_system_notebook(void) +{ + return (nv_is_chassis_notebook() || nv_acpi_is_battery_present()); +} + +#endif // _OSAPI_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osfuncs.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osfuncs.h new file mode 100644 index 0000000..7afbf63 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osfuncs.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef OSFUNCS_H +#define OSFUNCS_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Declarations for the Operating System Specific Functions. * +* * +\***************************************************************************/ + +#include + +OSQueueWorkItem osQueueWorkItem; +OSQueueWorkItemWithFlags osQueueWorkItemWithFlags; +OSQueueSystemWorkItem osQueueSystemWorkItem; +OSDbgBreakpointEnabled osDbgBreakpointEnabled; + +void* osGetStereoDongleInterface(void); + +OSCallACPI_DSM osCallACPI_DSM; +OSCallACPI_DDC osCallACPI_DDC; +OSCallACPI_NVHG_ROM osCallACPI_NVHG_ROM; +OSCallACPI_DOD osCallACPI_DOD; +OSCallACPI_MXDS osCallACPI_MXDS; +OSCallACPI_MXDM osCallACPI_MXDM; + +#if defined(NVCPU_X86_64) +OSnv_rdcr4 nv_rdcr4; +NvU64 nv_rdcr3(OBJOS *); +OSnv_cpuid nv_cpuid; +#endif + +#endif // OSFUNCS_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h new file mode 100644 index 0000000..1330ad8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RMOBJEXPORTIMPORT_H_ +#define _RMOBJEXPORTIMPORT_H_ + +#include "nvstatus.h" + +typedef NvHandle RmObjExportHandle; + +NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject, + RmObjExportHandle *pDstObject, NvU32 *pDeviceInstance); + +void RmFreeObjExportHandle(RmObjExportHandle hObject); + +NV_STATUS RmImportObject(NvHandle hDstClient, NvHandle hDstParent, + NvHandle *phDstObject, RmObjExportHandle hSrcObject, + NvU8 *pObjectType); + +NV_STATUS RmGetExportObjectInfo(RmObjExportHandle hSrcObject, NvU32 *deviceInstance); +#endif // _RMOBJEXPORTIMPORT_H_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/escape.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/escape.c new file mode 100644 index 0000000..b262789 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/escape.c @@ -0,0 +1,859 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +//***************************** Module Header ********************************** +// +// This code is linked into the resource manager proper. It receives the +// ioctl from the resource manager's customer, unbundles the args and +// calls the correct resman routines. +// +//****************************************************************************** + +#include +#include +#include +#include +#include +#include +#include + +#include +#include // NV01_ROOT +#include // NV01_ROOT_NON_PRIV +#include // NV01_EVENT +#include // NV01_MEMORY_SYSTEM +#include // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR + +#define NV_CTL_DEVICE_ONLY(nv) \ +{ \ + if (((nv)->flags & NV_FLAG_CONTROL) == 0) \ + { \ + rmStatus = NV_ERR_INVALID_ARGUMENT; \ + goto done; \ + } \ +} + +#define NV_ACTUAL_DEVICE_ONLY(nv) \ +{ \ + if (((nv)->flags & NV_FLAG_CONTROL) != 0) \ + { \ + rmStatus = NV_ERR_INVALID_ARGUMENT; \ + goto done; \ + } \ +} + +static NvBool RmIsDeviceRefNeeded(NVOS54_PARAMETERS *pApi, NvS32 *fd) +{ + switch(pApi->cmd) + { + default: + *fd = -1; + return NV_FALSE; + } +} + +// only return errors through pApi->status +static void RmCreateOsDescriptor(NVOS32_PARAMETERS *pApi, API_SECURITY_INFO secInfo) +{ + NV_STATUS rmStatus; + NvBool writable; + NvU32 flags = 0; + NvU64 allocSize, pageCount, *pPteArray = NULL; + void *pDescriptor, *pPageArray = NULL; + + pDescriptor = NvP64_VALUE(pApi->data.AllocOsDesc.descriptor); + if (((NvUPtr)pDescriptor & ~os_page_mask) != 0) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + goto done; + } + + // Check to prevent an NvU64 overflow + if ((pApi->data.AllocOsDesc.limit + 1) == 0) + { + rmStatus = NV_ERR_INVALID_LIMIT; + goto done; + } + + allocSize = (pApi->data.AllocOsDesc.limit + 1); + pageCount = (1 + ((allocSize - 1) / os_page_size)); + + writable = FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_WRITE, pApi->data.AllocOsDesc.attr2); + flags = FLD_SET_DRF_NUM(_LOCK_USER_PAGES, _FLAGS, _WRITE, writable, flags); + rmStatus = os_lock_user_pages(pDescriptor, pageCount, &pPageArray, flags); + if (rmStatus == NV_OK) + { + pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray; + pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY; + } + else if (rmStatus == NV_ERR_INVALID_ADDRESS) + { + rmStatus = os_lookup_user_io_memory(pDescriptor, pageCount, + &pPteArray, &pPageArray); + if (rmStatus == NV_OK) + { + if (pPageArray != NULL) + { + pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray; + pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY; + } + else if (pPteArray != NULL) + { + pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPteArray; + pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY; + } + else + { + NV_ASSERT_FAILED("unknown memory import type"); + rmStatus = NV_ERR_NOT_SUPPORTED; + } + } + } + if (rmStatus != NV_OK) + goto done; + + Nv04VidHeapControlWithSecInfo(pApi, secInfo); + + if (pApi->status != NV_OK) + { + switch (pApi->data.AllocOsDesc.descriptorType) + { + default: + break; + case NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY: + os_unlock_user_pages(pageCount, pPageArray); + break; + } + } + +done: + if (rmStatus != NV_OK) + pApi->status = rmStatus; +} + +// only return errors through pApi->status +static void RmAllocOsDescriptor(NVOS02_PARAMETERS *pApi, API_SECURITY_INFO secInfo) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 flags, attr, attr2; + NVOS32_PARAMETERS *pVidHeapParams; + + if (!FLD_TEST_DRF(OS02, _FLAGS, _LOCATION, _PCI, pApi->flags) || + !FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, pApi->flags)) + { + rmStatus = NV_ERR_INVALID_FLAGS; + goto done; + } + + attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI); + + if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _CACHED, pApi->flags) || + FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, pApi->flags)) + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, attr); + } + else if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, pApi->flags)) + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, attr); + else { + rmStatus = NV_ERR_INVALID_FLAGS; + goto done; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, pApi->flags)) + attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr); + else + attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, attr); + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, pApi->flags)) + attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _YES); + else + attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO); + + pVidHeapParams = portMemAllocNonPaged(sizeof(NVOS32_PARAMETERS)); + if (pVidHeapParams == NULL) + { + rmStatus = NV_ERR_NO_MEMORY; + goto done; + } + portMemSet(pVidHeapParams, 0, sizeof(NVOS32_PARAMETERS)); + + pVidHeapParams->hRoot = pApi->hRoot; + pVidHeapParams->hObjectParent = pApi->hObjectParent; + pVidHeapParams->function = NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR; + + flags = (NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED | + NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED); + + if (DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags)) + attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, attr2); + + // currently CPU-RO memory implies GPU-RO as well + if (DRF_VAL(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, pApi->flags) || + DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags)) + attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY, attr2); + + pVidHeapParams->data.AllocOsDesc.hMemory = pApi->hObjectNew; + pVidHeapParams->data.AllocOsDesc.flags = flags; + pVidHeapParams->data.AllocOsDesc.attr = attr; + pVidHeapParams->data.AllocOsDesc.attr2 = attr2; + pVidHeapParams->data.AllocOsDesc.descriptor = pApi->pMemory; + pVidHeapParams->data.AllocOsDesc.limit = pApi->limit; + pVidHeapParams->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS; + + RmCreateOsDescriptor(pVidHeapParams, secInfo); + + pApi->status = pVidHeapParams->status; + + portMemFree(pVidHeapParams); + +done: + if (rmStatus != NV_OK) + pApi->status = rmStatus; +} + +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hRoot) == NV_OFFSETOF(NVOS64_PARAMETERS, hRoot)); +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectParent) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectParent)); +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectNew) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectNew)); +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hClass) == NV_OFFSETOF(NVOS64_PARAMETERS, hClass)); +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, pAllocParms) == NV_OFFSETOF(NVOS64_PARAMETERS, pAllocParms)); + +NV_STATUS RmIoctl( + nv_state_t *nv, + nv_file_private_t *nvfp, + NvU32 cmd, + void *data, + NvU32 dataSize +) +{ + NV_STATUS rmStatus = NV_ERR_GENERIC; + API_SECURITY_INFO secInfo = { }; + + secInfo.privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER; + secInfo.paramLocation = PARAM_LOCATION_USER; + secInfo.pProcessToken = NULL; + secInfo.gpuOsInfo = NULL; + secInfo.clientOSInfo = nvfp->ctl_nvfp; + if (secInfo.clientOSInfo == NULL) + secInfo.clientOSInfo = nvfp; + + switch (cmd) + { + case NV_ESC_RM_ALLOC_MEMORY: + { + nv_ioctl_nvos02_parameters_with_fd *pApi; + NVOS02_PARAMETERS *pParms; + + pApi = data; + pParms = &pApi->params; + + NV_ACTUAL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(nv_ioctl_nvos02_parameters_with_fd)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (pParms->hClass == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR) + RmAllocOsDescriptor(pParms, secInfo); + else + { + NvU32 flags = pParms->flags; + + Nv01AllocMemoryWithSecInfo(pParms, secInfo); + + // + // If the system memory is going to be mapped immediately, + // create the mmap context for it now. + // + if ((pParms->hClass == NV01_MEMORY_SYSTEM) && + (!FLD_TEST_DRF(OS02, _FLAGS, _ALLOC, _NONE, flags)) && + (!FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, flags)) && + (pParms->status == NV_OK)) + { + if (rm_create_mmap_context(nv, pParms->hRoot, + pParms->hObjectParent, pParms->hObjectNew, + pParms->pMemory, pParms->limit + 1, 0, + NV_MEMORY_DEFAULT, + pApi->fd) != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "could not create mmap context for %p\n", + NvP64_VALUE(pParms->pMemory)); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + } + + break; + } + + case NV_ESC_RM_ALLOC_OBJECT: + { + NVOS05_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS05_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv01AllocObjectWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_ALLOC: + { + NVOS21_PARAMETERS *pApi = data; + NVOS64_PARAMETERS *pApiAccess = data; + NvBool bAccessApi = (dataSize == sizeof(NVOS64_PARAMETERS)); + + if ((dataSize != sizeof(NVOS21_PARAMETERS)) && + (dataSize != sizeof(NVOS64_PARAMETERS))) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + switch (pApi->hClass) + { + case NV01_ROOT: + case NV01_ROOT_CLIENT: + case NV01_ROOT_NON_PRIV: + { + NV_CTL_DEVICE_ONLY(nv); + + // Force userspace client allocations to be the _CLIENT class. + pApi->hClass = NV01_ROOT_CLIENT; + break; + } + case NV01_EVENT: + case NV01_EVENT_OS_EVENT: + case NV01_EVENT_KERNEL_CALLBACK: + case NV01_EVENT_KERNEL_CALLBACK_EX: + { + break; + } + default: + { + NV_CTL_DEVICE_ONLY(nv); + break; + } + } + + if (!bAccessApi) + { + Nv04AllocWithSecInfo(pApi, secInfo); + } + else + { + Nv04AllocWithAccessSecInfo(pApiAccess, secInfo); + } + + break; + } + + case NV_ESC_RM_FREE: + { + NVOS00_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS00_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv01FreeWithSecInfo(pApi, secInfo); + + if (pApi->status == NV_OK && + pApi->hObjectOld == pApi->hRoot) + { + rm_client_free_os_events(pApi->hRoot); + } + + break; + } + + case NV_ESC_RM_VID_HEAP_CONTROL: + { + NVOS32_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS32_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (pApi->function == NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR) + RmCreateOsDescriptor(pApi, secInfo); + else + Nv04VidHeapControlWithSecInfo(pApi, secInfo); + + break; + } + + case NV_ESC_RM_I2C_ACCESS: + { + NVOS_I2C_ACCESS_PARAMS *pApi = data; + + NV_ACTUAL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS_I2C_ACCESS_PARAMS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04I2CAccessWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_IDLE_CHANNELS: + { + NVOS30_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS30_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04IdleChannelsWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_MAP_MEMORY: + { + nv_ioctl_nvos33_parameters_with_fd *pApi; + NVOS33_PARAMETERS *pParms; + + pApi = data; + pParms = &pApi->params; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(nv_ioctl_nvos33_parameters_with_fd)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // Don't allow userspace to override the caching type + pParms->flags = FLD_SET_DRF(OS33, _FLAGS, _CACHING_TYPE, _DEFAULT, pParms->flags); + Nv04MapMemoryWithSecInfo(pParms, secInfo); + + if (pParms->status == NV_OK) + { + pParms->status = rm_create_mmap_context(nv, pParms->hClient, + pParms->hDevice, pParms->hMemory, + pParms->pLinearAddress, pParms->length, + pParms->offset, + DRF_VAL(OS33, _FLAGS, _CACHING_TYPE, pParms->flags), + pApi->fd); + if (pParms->status != NV_OK) + { + NVOS34_PARAMETERS params; + portMemSet(¶ms, 0, sizeof(NVOS34_PARAMETERS)); + params.hClient = pParms->hClient; + params.hDevice = pParms->hDevice; + params.hMemory = pParms->hMemory; + params.pLinearAddress = pParms->pLinearAddress; + params.flags = pParms->flags; + Nv04UnmapMemoryWithSecInfo(¶ms, secInfo); + } + } + break; + } + + case NV_ESC_RM_UNMAP_MEMORY: + { + NVOS34_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS34_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04UnmapMemoryWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_ACCESS_REGISTRY: + { + NVOS38_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS38_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pApi->status = rm_access_registry(pApi->hClient, + pApi->hObject, + pApi->AccessType, + pApi->pDevNode, + pApi->DevNodeLength, + pApi->pParmStr, + pApi->ParmStrLength, + pApi->pBinaryData, + &pApi->BinaryDataLength, + &pApi->Data, + &pApi->Entry); + break; + } + + case NV_ESC_RM_ALLOC_CONTEXT_DMA2: + { + NVOS39_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS39_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04AllocContextDmaWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_BIND_CONTEXT_DMA: + { + NVOS49_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS49_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04BindContextDmaWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_MAP_MEMORY_DMA: + { + NVOS46_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS46_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04MapMemoryDmaWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_UNMAP_MEMORY_DMA: + { + NVOS47_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS47_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04UnmapMemoryDmaWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_DUP_OBJECT: + { + NVOS55_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS55_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04DupObjectWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_SHARE: + { + NVOS57_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS57_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04ShareWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_ALLOC_OS_EVENT: + { + nv_ioctl_alloc_os_event_t *pApi = data; + + if (dataSize != sizeof(nv_ioctl_alloc_os_event_t)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pApi->Status = rm_alloc_os_event(pApi->hClient, + nvfp, + pApi->fd); + break; + } + + case NV_ESC_FREE_OS_EVENT: + { + nv_ioctl_free_os_event_t *pApi = data; + + if (dataSize != sizeof(nv_ioctl_free_os_event_t)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pApi->Status = rm_free_os_event(pApi->hClient, pApi->fd); + break; + } + + case NV_ESC_RM_GET_EVENT_DATA: + { + NVOS41_PARAMETERS *pApi = data; + + if (dataSize != sizeof(NVOS41_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pApi->status = rm_get_event_data(nvfp, + pApi->pEvent, + &pApi->MoreEvents); + break; + } + + case NV_ESC_STATUS_CODE: + { + nv_state_t *pNv; + nv_ioctl_status_code_t *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(nv_ioctl_status_code_t)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pNv = nv_get_adapter_state(pApi->domain, pApi->bus, pApi->slot); + if (pNv == NULL) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + rmStatus = rm_get_adapter_status(pNv, &pApi->status); + + if (rmStatus != NV_OK) + goto done; + + break; + } + + case NV_ESC_RM_CONTROL: + { + NVOS54_PARAMETERS *pApi = data; + void *priv = NULL; + nv_file_private_t *nvfp; + NvS32 fd; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS54_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (RmIsDeviceRefNeeded(pApi, &fd)) + { + nvfp = nv_get_file_private(fd, NV_FALSE, &priv); + if (nvfp == NULL) + { + rmStatus = NV_ERR_INVALID_DEVICE; + goto done; + } + + secInfo.gpuOsInfo = priv; + } + + Nv04ControlWithSecInfo(pApi, secInfo); + + if ((pApi->status != NV_OK) && (priv != NULL)) + { + nv_put_file_private(priv); + + secInfo.gpuOsInfo = NULL; + } + + break; + } + + case NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO: + { + NVOS56_PARAMETERS *pApi = data; + void *pOldCpuAddress; + void *pNewCpuAddress; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS56_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pOldCpuAddress = NvP64_VALUE(pApi->pOldCpuAddress); + pNewCpuAddress = NvP64_VALUE(pApi->pNewCpuAddress); + + pApi->status = rm_update_device_mapping_info(pApi->hClient, + pApi->hDevice, + pApi->hMemory, + pOldCpuAddress, + pNewCpuAddress); + break; + } + + case NV_ESC_REGISTER_FD: + { + nv_ioctl_register_fd_t *params = data; + void *priv = NULL; + nv_file_private_t *ctl_nvfp; + + if (dataSize != sizeof(nv_ioctl_register_fd_t)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // LOCK: acquire API lock + rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus != NV_OK) + goto done; + + // If there is already a ctl fd registered on this nvfp, fail. + if (nvfp->ctl_nvfp != NULL) + { + // UNLOCK: release API lock + rmApiLockRelease(); + rmStatus = NV_ERR_INVALID_STATE; + goto done; + } + + // + // Note that this call is valid for both "actual" devices and ctrl + // devices. In particular, NV_ESC_ALLOC_OS_EVENT can be used with + // both types of devices. + // But, the ctl_fd passed in should always correspond to a control FD. + // + ctl_nvfp = nv_get_file_private(params->ctl_fd, + NV_TRUE, /* require ctl fd */ + &priv); + if (ctl_nvfp == NULL) + { + // UNLOCK: release API lock + rmApiLockRelease(); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // Disallow self-referential links, and disallow links to FDs that + // themselves have a link. + if ((ctl_nvfp == nvfp) || (ctl_nvfp->ctl_nvfp != NULL)) + { + nv_put_file_private(priv); + // UNLOCK: release API lock + rmApiLockRelease(); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // + // nvfp->ctl_nvfp is read outside the lock, so set it atomically. + // Note that once set, this can never be removed until the fd + // associated with nvfp is closed. We hold on to 'priv' until the + // fd is closed, too, to ensure that the fd associated with + // ctl_nvfp remains valid. + // + portAtomicSetSize(&nvfp->ctl_nvfp, ctl_nvfp); + nvfp->ctl_nvfp_priv = priv; + + // UNLOCK: release API lock + rmApiLockRelease(); + + // NOTE: nv_put_file_private(priv) is not called here. It MUST be + // called during cleanup of this nvfp. + rmStatus = NV_OK; + break; + } + + default: + { + NV_PRINTF(LEVEL_ERROR, "unknown NVRM ioctl command: 0x%x\n", cmd); + goto done; + } + } + + rmStatus = NV_OK; +done: + + return rmStatus; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c new file mode 100644 index 0000000..f1ed289 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c @@ -0,0 +1,299 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + return NV_OK; +} + +void NV_API_CALL rm_init_dynamic_power_management( + nvidia_stack_t *sp, + nv_state_t *nv, + NvBool bPr3AcpiMethodPresent +) +{ +} + +void NV_API_CALL rm_cleanup_dynamic_power_management( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ +} + +NV_STATUS NV_API_CALL rm_ref_dynamic_power( + nvidia_stack_t *sp, + nv_state_t *nv, + nv_dynamic_power_mode_t mode +) +{ + return NV_OK; +} + +void NV_API_CALL rm_unref_dynamic_power( + nvidia_stack_t *sp, + nv_state_t *nv, + nv_dynamic_power_mode_t mode +) +{ +} + +NV_STATUS NV_API_CALL rm_transition_dynamic_power( + nvidia_stack_t *sp, + nv_state_t *nv, + NvBool bEnter +) +{ + return NV_OK; +} + +const char* NV_API_CALL rm_get_vidmem_power_status( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + return "?"; +} + +const char* NV_API_CALL rm_get_dynamic_power_management_status( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + return "?"; +} + +const char* NV_API_CALL rm_get_gpu_gcx_support( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvBool bGcxTypeGC6 +) +{ + return "?"; +} + +NV_STATUS +subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams +) +{ + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *pParams +) +{ + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *pParams +) +{ + return NV_OK; +} + +void +RmUpdateGc6ConsoleRefCount +( + nv_state_t *nv, + NvBool bIncrease +) +{ +} + +void +RmInitS0ixPowerManagement +( + nv_state_t *nv +) +{ +} + +void +RmInitDeferredDynamicPowerManagement +( + nv_state_t *nv +) +{ +} + +void +RmDestroyDeferredDynamicPowerManagement +( + nv_state_t *nv +) +{ +} + +void RmHandleDisplayChange +( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ +} + +NV_STATUS +os_ref_dynamic_power +( + nv_state_t *nv, + nv_dynamic_power_mode_t mode +) +{ + return NV_OK; +} + +void +os_unref_dynamic_power +( + nv_state_t *nv, + nv_dynamic_power_mode_t mode +) +{ +} + +NV_STATUS NV_API_CALL rm_get_clientnvpcf_power_limits( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 *limitRated, + NvU32 *limitCurr +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +deviceCtrlCmdOsUnixVTSwitch_IMPL +( + Device *pDevice, + NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams +) +{ + return NV_OK; +} + +NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(Device *pDevice, + NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams) +{ + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_save_low_res_mode( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool NV_API_CALL rm_isr( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 *NeedBottomHalf +) +{ + *NeedBottomHalf = NV_FALSE; + return NV_TRUE; +} + +void NV_API_CALL rm_isr_bh( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ +} + +void NV_API_CALL rm_isr_bh_unlocked( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ +} + +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 *faultsCopied +) +{ + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_gpu_handle_mmu_faults( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 *faultsCopied +) +{ + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 *faultsCopied +) +{ + return NV_OK; +} + +NvBool NV_API_CALL rm_is_chipset_io_coherent +( + nvidia_stack_t *sp +) +{ + return NV_FALSE; +} + +NvBool NV_API_CALL rm_disable_iomap_wc(void) +{ + return NV_FALSE; +} + +NV_STATUS RmInitX86EmuState(OBJGPU *pGpu) +{ + return NV_OK; +} + +void RmFreeX86EmuState(OBJGPU *pGpu) +{ +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c new file mode 100644 index 0000000..3c1037c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +void* memset(void* s, int c, NvUPtr n) +{ + return os_mem_set(s, (NvU8)c, (NvU32)n); +} + +void* memcpy(void* dest, const void* src, NvUPtr n) +{ + return os_mem_copy(dest, src, (NvU32)n); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c new file mode 100644 index 0000000..25398d4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c @@ -0,0 +1,150 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvstatus.h" +#include "os/os.h" +#include "nv.h" +#include "nv-hypervisor.h" + +HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void) +{ + return OS_HYPERVISOR_UNKNOWN; +} + +NV_STATUS NV_API_CALL nv_vgpu_get_type_ids( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvU32 *numVgpuTypes, + NvU32 **vgpuTypeIds, + NvBool isVirtfn +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_process_vf_info( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvU8 cmd, + NvU32 domain, + NvU8 bus, + NvU8 slot, + NvU8 function, + NvBool isMdevAttached, + void *vf_pci_info +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_get_type_info( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvU32 vgpuTypeId, + char *buffer, + int type_info, + NvU8 devfn +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_create_request( + nvidia_stack_t *sp, + nv_state_t *pNv, + const NvU8 *pMdevUuid, + NvU32 vgpuTypeId, + NvU16 *vgpuId, + NvU32 gpuPciBdf +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_update_request( + nvidia_stack_t *sp , + const NvU8 *pMdevUuid, + VGPU_DEVICE_STATE deviceState, + NvU64 *offsets, + NvU64 *sizes, + const char *configParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap( + nvidia_stack_t *sp , + nv_state_t *pNv, + const NvU8 *pMdevUuid, + NvU64 **offsets, + NvU64 **sizes, + NvU32 *numAreas +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_gpu_bind_event( + nvidia_stack_t *sp +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_start( + nvidia_stack_t *sp, + const NvU8 *pMdevUuid, + void *waitQueue, + NvS32 *returnStatus, + NvU8 *vmName, + NvU32 qemuPid +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_delete( + nvidia_stack_t *sp, + const NvU8 *pMdevUuid, + NvU16 vgpuId +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_get_bar_info( + nvidia_stack_t *sp, + nv_state_t *pNv, + const NvU8 *pMdevUuid, + NvU64 *size, + NvU32 regionIndex, + void *pVgpuVfioRef +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void initVGXSpecificRegistry(OBJGPU *pGpu) +{} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os.c new file mode 100644 index 0000000..64572f6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os.c @@ -0,0 +1,4908 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include // NV device driver interface +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "gpu/gpu.h" +#include + +#include "nverror.h" + +#include "mem_mgr/io_vaspace.h" +#include +#include "gpu/mem_mgr/mem_desc.h" +#include "core/thread_state.h" +#include +#include +#include +#include +#include "virtualization/hypervisor/hypervisor.h" +#include "rmobjexportimport.h" +#include +#include "rmapi/rs_utils.h" +#include "rmapi/client_resource.h" +#include "os/dce_rm_client_ipc.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" + + + +extern const char *ppOsBugCheckBugcodeStr[]; + + +ct_assert(NV_RM_PAGE_SIZE == RM_PAGE_SIZE); +ct_assert(NV_RM_PAGE_MASK == RM_PAGE_MASK); +ct_assert(NV_RM_PAGE_SHIFT == RM_PAGE_SHIFT); + +typedef struct +{ + NvU32 euid; + NvU32 pid; +} TOKEN_USER, *PTOKEN_USER; + +struct OS_RM_CAPS +{ + NvU32 count; + + // This should be the last element + nv_cap_t **caps; +}; + +NvBool osIsRaisedIRQL() +{ + return (!os_semaphore_may_sleep()); +} + +NvBool osIsISR() +{ + return os_is_isr(); +} + +NV_STATUS osGetDriverBlock +( + OS_GPU_INFO *pOsGpuInfo, + OS_DRIVER_BLOCK *pBlock +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osGetCurrentTick(NvU64 *pTimeInNs) +{ + *pTimeInNs = os_get_current_tick(); + return NV_OK; +} + +NvU64 osGetTickResolution(void) +{ + return os_get_tick_resolution(); +} + +NV_STATUS osGetPerformanceCounter(NvU64 *pTimeInNs) +{ + *pTimeInNs = os_get_current_tick_hr(); + return NV_OK; +} + +NV_STATUS osGetCurrentTime( + NvU32 *pSeconds, + NvU32 *pMicroSeconds +) +{ + return os_get_current_time(pSeconds, pMicroSeconds); +} + +/*! + * @brief Get timestamp for logging. + * + * Everything that logs a time stamp should use this routine for consistency. + * + * The returned value is OS dependent. We want the time stamp to use + * KeQueryPerformanceCounter on Windows so it matches the DirectX timestamps. + * Linux uses microseconds since 1970 (osGetCurrentTime), since matching DirectX + * is not a priority. + * + * osGetTimestampFreq returns the frequency required to decode the time stamps. + * + * @returns system dependent timestamp. + */ +NvU64 osGetTimestamp(void) +{ + NvU32 sec = 0; + NvU32 usec = 0; + osGetCurrentTime(&sec, &usec); + return (NvU64)sec * 1000000 + usec; +} + +/*! + * @brief Get timestamp frequency. + * + * Timestamps are OS dependent. This call returns the frequency + * required to decode them. + * + * @returns Timestamp frequency. For example, 1000000 for MHz. + */ +NvU64 osGetTimestampFreq(void) +{ + return 1000000; +} + +NV_STATUS osDelay(NvU32 milliseconds) +{ + return os_delay(milliseconds); +} + +NV_STATUS osDelayUs(NvU32 microseconds) +{ + return os_delay_us(microseconds); +} + +NV_STATUS osDelayNs(NvU32 nanoseconds) +{ + NvU32 microseconds = NV_MAX(1, (nanoseconds / 1000)); + return os_delay_us(microseconds); +} + +NvU32 osGetCpuFrequency(void) +{ + /* convert os_get_cpu_frequency()'s return value from Hz to MHz */ + return ((NvU32)(os_get_cpu_frequency() / 1000000ULL)); +} + +void* osPciInitHandle( + NvU32 Domain, + NvU8 Bus, + NvU8 Slot, + NvU8 Function, + NvU16 *pVendor, + NvU16 *pDevice +) +{ + // + // Check if the BDF is for a GPU that's already been attached, for which + // we should already have a handle cached. This won't catch devices that + // have been probed but not yet attached, but that shouldn't be a common + // occurrence. + // + // More importantly, having this check here means we don't need to check + // a global list of devices in the kernel interface layer, which could + // have the implication of taking another lock, causing hairy lock + // ordering issues. + // + if (Function == 0) + { + OBJGPU *pGpu = gpumgrGetGpuFromBusInfo(Domain, Bus, Slot); + if (pGpu != NULL) + { + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + if (pVendor) *pVendor = nv->pci_info.vendor_id; + if (pDevice) *pDevice = nv->pci_info.device_id; + return nv->handle; + } + } + + return os_pci_init_handle(Domain, Bus, Slot, Function, pVendor, pDevice); +} + +NvU8 osPciReadByte( + void *pHandle, + NvU32 Offset +) +{ + NvU8 val; + os_pci_read_byte(pHandle, Offset, &val); + return val; +} + +NvU16 osPciReadWord( + void *pHandle, + NvU32 Offset +) +{ + NvU16 val; + os_pci_read_word(pHandle, Offset, &val); + return val; +} + +NvU32 osPciReadDword( + void *pHandle, + NvU32 Offset +) +{ + NvU32 val; + os_pci_read_dword(pHandle, Offset, &val); + return val; +} + +void osPciWriteByte( + void *pHandle, + NvU32 Offset, + NvU8 Value +) +{ + os_pci_write_byte(pHandle, Offset, Value); +} + +void osPciWriteWord( + void *pHandle, + NvU32 Offset, + NvU16 Value +) +{ + os_pci_write_word(pHandle, Offset, Value); +} + +void osPciWriteDword( + void *pHandle, + NvU32 Offset, + NvU32 Value +) +{ + os_pci_write_dword(pHandle, Offset, Value); +} + +void* osMapKernelSpace( + RmPhysAddr Start, + NvU64 Size, + NvU32 Mode, + NvU32 Protect +) +{ + NvU64 offset; + NvU8 *ptr; + + if (0 == Size) + { + NV_ASSERT(Size != 0); + return NULL; + } + + offset = (Start & ~os_page_mask); + Start &= os_page_mask; + Size = ((Size + offset + ~os_page_mask) & os_page_mask); + + ptr = os_map_kernel_space(Start, Size, Mode); + if (ptr != NULL) + return (ptr + offset); + + return NULL; +} + +void osUnmapKernelSpace( + void *pAddress, + NvU64 Size +) +{ + NvU64 offset; + NvUPtr ptr = (NvUPtr)pAddress; + + if (0 == Size) + { + NV_ASSERT(Size != 0); + return; + } + + offset = (ptr & ~os_page_mask); + ptr &= os_page_mask; + Size = ((Size + offset + ~os_page_mask) & os_page_mask); + os_unmap_kernel_space((void *)ptr, Size); +} + +void* osMapIOSpace( + RmPhysAddr Start, + NvU64 Size, + void ** pData, + NvU32 User, + NvU32 Mode, + NvU32 Protect +) +{ + + NvU64 offset; + NvU8 *addr; + + if (0 == Size) + { + NV_ASSERT(Size != 0); + return NULL; + } + + offset = (Start & ~os_page_mask); + Start &= os_page_mask; + Size = ((Size + offset + ~os_page_mask) & os_page_mask); + + if (User) + addr = os_map_user_space(Start, Size, Mode, Protect, pData); + else + addr = os_map_kernel_space(Start, Size, Mode); + if (addr != NULL) + return (addr + offset); + + return addr; +} + +void osUnmapIOSpace( + void *pAddress, + NvU64 Size, + void *pData, + NvU32 User +) +{ + NvU64 offset; + NvUPtr addr = (NvUPtr)pAddress; + + if (0 == Size) + { + NV_ASSERT(Size != 0); + return; + } + + offset = (addr & ~os_page_mask); + addr &= os_page_mask; + Size = ((Size + offset + ~os_page_mask) & os_page_mask); + + if (User) + os_unmap_user_space((void *)addr, Size, pData); + else + os_unmap_kernel_space((void *)addr, Size); +} + +NV_STATUS osGetNumMemoryPages +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 *pNumPages +) +{ + void *pAllocPrivate = NULL; + + pAllocPrivate = memdescGetMemData(pMemDesc); + if (pAllocPrivate == NULL) + { + NV_PRINTF(LEVEL_ERROR, "pAllocPrivate is NULL!\n"); + return NV_ERR_INVALID_STATE; + } + + return nv_get_num_phys_pages(pAllocPrivate, pNumPages); +} + +NV_STATUS osGetMemoryPages +( + MEMORY_DESCRIPTOR *pMemDesc, + void *pPages, + NvU32 *pNumPages +) +{ + void *pAllocPrivate = NULL; + + pAllocPrivate = memdescGetMemData(pMemDesc); + if (pAllocPrivate == NULL) + { + NV_PRINTF(LEVEL_ERROR, "pAllocPrivate is NULL!\n"); + return NV_ERR_INVALID_STATE; + } + + return nv_get_phys_pages(pAllocPrivate, pPages, pNumPages); +} + +NV_STATUS osMapSystemMemory +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 Offset, + NvU64 Length, + NvBool Kernel, + NvU32 Protect, + NvP64 *ppAddress, + NvP64 *ppPrivate +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + + RmPhysAddr userAddress; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + NV_STATUS rmStatus = NV_OK; + void *pAllocPrivate = NULL; + void *pAddress; + void *pPrivate = NULL; + NvU64 pageIndex; + NvU32 pageOffset; + + *ppAddress = NvP64_NULL; + *ppPrivate = NvP64_NULL; + + if ((Offset + Length) < Length) + return NV_ERR_INVALID_ARGUMENT; + if ((Offset + Length) > pMemDesc->Size) + return NV_ERR_INVALID_ARGUMENT; + + pageIndex = (Offset >> os_page_shift); + pageOffset = (Offset & ~os_page_mask); + + pAllocPrivate = memdescGetMemData(pMemDesc); + if (!pAllocPrivate) + { + NV_PRINTF(LEVEL_ERROR, "pAllocPrivate is NULL!\n"); + return NV_ERR_INVALID_STATE; + } + + if (Kernel) + { + pAddress = nv_alloc_kernel_mapping(nv, pAllocPrivate, + pageIndex, pageOffset, Length, &pPrivate); + if (pAddress == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "failed to create system memory kernel mapping!\n"); + rmStatus = NV_ERR_GENERIC; + } + else + { + *ppAddress = NV_PTR_TO_NvP64(pAddress); + *ppPrivate = NV_PTR_TO_NvP64(pPrivate); + } + } + else + { + rmStatus = nv_alloc_user_mapping(nv, pAllocPrivate, + pageIndex, pageOffset, Length, Protect, &userAddress, + &pPrivate); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to create system memory user mapping!\n"); + } + else + { + *ppAddress = (NvP64)(userAddress); + *ppPrivate = NV_PTR_TO_NvP64(pPrivate); + } + } + + return rmStatus; +} + +void osUnmapSystemMemory +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool Kernel, + NvU32 ProcessId, + NvP64 pAddress, + NvP64 pPrivate +) +{ + NV_STATUS status; + void *pAllocPrivate = memdescGetMemData(pMemDesc); + OBJGPU *pGpu = pMemDesc->pGpu; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if (Kernel) + { + status = nv_free_kernel_mapping(nv, pAllocPrivate, NvP64_VALUE(pAddress), + NvP64_VALUE(pPrivate)); + } + else + { + status = nv_free_user_mapping(nv, pAllocPrivate, (NvU64)pAddress, + NvP64_VALUE(pPrivate)); + } + + if (pGpu != NULL && + pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) && + memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM && + pAllocPrivate != NULL) + { + nv_unregister_phys_pages(nv, pAllocPrivate); + memdescSetMemData(pMemDesc, NULL, NULL); + } + + NV_ASSERT(status == NV_OK); +} + +void osIoWriteByte( + NvU32 Address, + NvU8 Value +) +{ + os_io_write_byte(Address, Value); +} + +NvU16 osIoReadWord( + NvU32 Address +) +{ + return os_io_read_word(Address); +} + +void osIoWriteWord( + NvU32 Address, + NvU16 Value +) +{ + os_io_write_word(Address, Value); +} + +NvU8 osIoReadByte( + NvU32 Address +) +{ + return os_io_read_byte(Address); +} + +NvBool osIsAdministrator(void) +{ + return os_is_administrator(); +} + +NvBool osAllowPriorityOverride(void) +{ + return os_allow_priority_override(); +} + +NvU32 osGetCurrentProcess(void) +{ + return os_get_current_process(); +} + +void osGetCurrentProcessName(char *ProcName, NvU32 Length) +{ + return os_get_current_process_name(ProcName, Length); +} + +NV_STATUS osGetCurrentThread(OS_THREAD_HANDLE *pThreadId) +{ + NV_STATUS rmStatus; + NvU64 threadId = 0; + + if (pThreadId == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + rmStatus = os_get_current_thread(&threadId); + if (rmStatus == NV_OK) + { + *pThreadId = threadId; + } + else + { + *pThreadId = 0; + } + + return rmStatus; +} + +NV_STATUS osAttachToProcess(void** ppProcessInfo, NvU32 ProcessId) +{ + // + // This function is used by RmUnmapMemory() to attach to the + // process for which a given device memory mapping was + // created, in order to be able to unmap it. On Linux/UNIX + // platforms, we can't "attach" to a random process, but + // since we don't create/destroy user mappings in the RM, we + // don't need to, either. + // + // Report success to the caller to keep RmUnmapMemory() from + // failing, and memory from being leaked as a result. + // + *ppProcessInfo = NULL; + return NV_OK; +} + +void osDetachFromProcess(void* pProcessInfo) +{ + // stub + return; +} + +NvBool osDbgBreakpointEnabled(void) +{ + return NV_TRUE; +} + +NV_STATUS osAcquireRmSema(void *pSema) +{ + return NV_OK; +} + +NV_STATUS osCondAcquireRmSema(void *pSema) +{ + return NV_OK; +} + +NvU32 osReleaseRmSema(void *pSema, OBJGPU *pDpcGpu) +{ + return NV_SEMA_RELEASE_SUCCEED; +} + +void osSpinLoop(void) +{ +} + +NV_STATUS osSchedule(void) +{ + return os_schedule(); +} + +NV_STATUS osQueueWorkItemWithFlags( + OBJGPU *pGpu, + OSWorkItemFunction pFunction, + void *pParams, + NvU32 flags +) +{ + nv_work_item_t *pWi; + nv_state_t *nv; + NV_STATUS status; + + pWi = portMemAllocNonPaged(sizeof(nv_work_item_t)); + + if (NULL == pWi) + { + return NV_ERR_NO_MEMORY; + } + + pWi->flags = NV_WORK_ITEM_FLAGS_REQUIRES_GPU; + if (flags & OS_QUEUE_WORKITEM_FLAGS_DONT_FREE_PARAMS) + pWi->flags |= NV_WORK_ITEM_FLAGS_DONT_FREE_DATA; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA; + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW; + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW; + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW; + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY; + + pWi->gpuInstance = gpuGetInstance(pGpu); + pWi->func.pGpuFunction = pFunction; + pWi->pData = pParams; + nv = NV_GET_NV_STATE(pGpu); + + status = os_queue_work_item(nv ? nv->queue : NULL, pWi); + + if (NV_OK != status) + { + portMemFree((void *)pWi); + } + + return status; +} + +NV_STATUS osQueueWorkItem( + OBJGPU *pGpu, + OSWorkItemFunction pFunction, + void *pParams +) +{ + return osQueueWorkItemWithFlags(pGpu, pFunction, pParams, OS_QUEUE_WORKITEM_FLAGS_NONE); +} + +NV_STATUS osQueueSystemWorkItem( + OSSystemWorkItemFunction pFunction, + void *pParams +) +{ + nv_work_item_t *pWi; + NV_STATUS status; + + pWi = portMemAllocNonPaged(sizeof(nv_work_item_t)); + + if (NULL == pWi) + { + return NV_ERR_NO_MEMORY; + } + + pWi->flags = NV_WORK_ITEM_FLAGS_NONE; + pWi->func.pSystemFunction = pFunction; + pWi->pData = pParams; + + status = os_queue_work_item(NULL, pWi); + + if (NV_OK != status) + { + portMemFree((void *)pWi); + } + + return status; +} + +void osQueueMMUFaultHandler(OBJGPU *pGpu) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + nv_schedule_uvm_isr(nv); +} + +static inline nv_dma_device_t* osGetDmaDeviceForMemDesc( + OS_GPU_INFO *pOsGpuInfo, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + return (pOsGpuInfo->niso_dma_dev != NULL) && + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO) ? + pOsGpuInfo->niso_dma_dev : pOsGpuInfo->dma_dev; +} + +NV_STATUS osDmaMapPages( + OS_GPU_INFO *pOsGpuInfo, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + return nv_dma_map_pages( + osGetDmaDeviceForMemDesc(pOsGpuInfo, pMemDesc), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetPteArray(pMemDesc, AT_CPU), + memdescGetContiguity(pMemDesc, AT_CPU), + memdescGetCpuCacheAttrib(pMemDesc), + NULL); +} + +NV_STATUS osDmaUnmapPages( + OS_GPU_INFO *pOsGpuInfo, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + return nv_dma_unmap_pages( + pOsGpuInfo->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetPteArray(pMemDesc, AT_CPU), + NULL); +} + +// +// Set the DMA address size for the given GPU +// +// This is a global device setting and care would need to be taken if it was to +// be modified outside of GPU initialization. At least on Linux other drivers, +// like UVM, might be requesting its own DMA mappings for the same GPU after +// the GPU has been initialized. +// +void osDmaSetAddressSize( + OS_GPU_INFO *pOsGpuInfo, + NvU32 bits +) +{ + nv_set_dma_address_size(pOsGpuInfo, bits); +} + +NV_STATUS osAllocPagesInternal( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPU *pGpu = pMemDesc->pGpu; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + void *pMemData; + NV_STATUS status; + + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetMemData(pMemDesc, NULL, NULL); + + NV_ASSERT_OR_RETURN(pMemDesc->PageCount > 0, NV_ERR_INVALID_ARGUMENT); + + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) + { + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + status = nv_alias_pages( + NV_GET_NV_STATE(pGpu), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetContiguity(pMemDesc, AT_CPU), + memdescGetCpuCacheAttrib(pMemDesc), + memdescGetGuestId(pMemDesc), + memdescGetPteArray(pMemDesc, AT_CPU), + &pMemData); + } + else + { + NvBool unencrypted = 0; + + if (nv && (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE))) + nv->force_dma32_alloc = NV_TRUE; + + if (!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO) && + (pMemDesc->_addressSpace == ADDR_SYSMEM) && + !NV_SOC_IS_ISO_IOMMU_PRESENT(nv)) + { + NV_PRINTF(LEVEL_INFO, "Forcing physically contiguous flags\n"); + memdescSetContiguity(pMemDesc, AT_CPU, NV_TRUE); + } + + status = nv_alloc_pages( + NV_GET_NV_STATE(pGpu), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetContiguity(pMemDesc, AT_CPU), + memdescGetCpuCacheAttrib(pMemDesc), + pSys->getProperty(pSys, + PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS), + unencrypted, + memdescGetPteArray(pMemDesc, AT_CPU), + &pMemData); + + if (nv && nv->force_dma32_alloc) + nv->force_dma32_alloc = NV_FALSE; + } + + // + // If the OS layer doesn't think in RM page size, we need to inflate the + // PTE array into RM pages. + // + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmInflateOsToRmPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + memdescSetMemData(pMemDesc, pMemData, NULL); + + return status; +} + +void osFreePagesInternal( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + NV_STATUS rmStatus; + + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + rmStatus = nv_free_pages(NV_GET_NV_STATE(pGpu), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetContiguity(pMemDesc, AT_CPU), + memdescGetCpuCacheAttrib(pMemDesc), + memdescGetMemData(pMemDesc)); + NV_ASSERT(rmStatus == NV_OK); +} + +NV_STATUS osLockMem( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Not supported on this OS. + DBG_BREAKPOINT(); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osUnlockMem( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Not supported on this OS. + DBG_BREAKPOINT(); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osMapPciMemoryUser( + OS_GPU_INFO *pOsGpuInfo, + RmPhysAddr busAddress, + NvU64 length, + NvU32 Protect, + NvP64 *pVirtualAddress, + NvP64 *pPriv, + NvU32 modeFlag +) +{ + void *addr; + void *priv = NULL; + + addr = osMapIOSpace(busAddress, length, &priv, NV_TRUE, modeFlag, Protect); + + *pPriv = NV_PTR_TO_NvP64(priv); + *pVirtualAddress = NV_PTR_TO_NvP64(addr); + + return (addr != NULL) ? NV_OK : NV_ERR_GENERIC; +} + +void osUnmapPciMemoryUser( + OS_GPU_INFO *pOsGpuInfo, + NvP64 virtualAddress, + NvU64 length, + NvP64 pPriv +) +{ + void *addr, *priv; + + addr = NvP64_VALUE(virtualAddress); + priv = NvP64_VALUE(pPriv); + + osUnmapIOSpace(addr, length, priv, NV_TRUE); +} + +NV_STATUS osMapPciMemoryKernelOld +( + OBJGPU *pGpu, + RmPhysAddr busAddress, + NvU64 length, + NvU32 Protect, + void **pVirtualAddress, + NvU32 modeFlag +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_kern_mapping_t *mapping; + + if (pVirtualAddress == NULL) + return NV_ERR_GENERIC; + + *pVirtualAddress = os_map_kernel_space(busAddress, length, modeFlag); + if (*pVirtualAddress == NULL) + return NV_ERR_GENERIC; + + mapping = portMemAllocNonPaged(sizeof(nv_kern_mapping_t)); + if (NULL == mapping) + { + os_unmap_kernel_space(*pVirtualAddress, length); + *pVirtualAddress = 0; + return NV_ERR_GENERIC; + } + + mapping->addr = *pVirtualAddress; + mapping->size = length; + mapping->modeFlag = modeFlag; + + mapping->next = nv->kern_mappings; + nv->kern_mappings = mapping; + + return NV_OK; +} + +NV_STATUS osMapPciMemoryKernel64 +( + OBJGPU *pGpu, + RmPhysAddr busAddress, + NvU64 length, + NvU32 Protect, + NvP64 *pVirtualAddress, + NvU32 modeFlag +) +{ + void *tmppVirtualAddress = NvP64_VALUE(pVirtualAddress); + NV_STATUS rc; + + rc = osMapPciMemoryKernelOld(pGpu, + busAddress, + length, + Protect, + &tmppVirtualAddress, + modeFlag); + + *pVirtualAddress = NV_PTR_TO_NvP64(tmppVirtualAddress); + + return rc; +} + +void osUnmapPciMemoryKernelOld +( + OBJGPU *pGpu, + void* virtualAddress +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_kern_mapping_t *mapping, *tmp; + + // this can happen, for example, during a call to RmShutdownAdapter() + // from a failed RmInitAdapter() + if (virtualAddress == NULL) + { + return; + } + + tmp = mapping = nv->kern_mappings; + while (mapping) + { + if (mapping->addr == virtualAddress) + { + if (mapping == nv->kern_mappings) + { + nv->kern_mappings = mapping->next; + } + else + { + tmp->next = mapping->next; + } + + os_unmap_kernel_space(mapping->addr, mapping->size); + + portMemFree(mapping); + return; + } + tmp = mapping; + mapping = mapping->next; + } + + DBG_BREAKPOINT(); +} + +void osUnmapPciMemoryKernel64 +( + OBJGPU *pGpu, + NvP64 virtualAddress +) +{ + osUnmapPciMemoryKernelOld(pGpu, NvP64_VALUE(virtualAddress)); +} + +NV_STATUS osMapGPU( + OBJGPU *pGpu, + RS_PRIV_LEVEL privLevel, + NvU64 offset, + NvU64 length, + NvU32 Protect, + NvP64 *pAddress, + NvP64 *pPriv +) +{ + NV_STATUS rmStatus = NV_OK; + + if (privLevel >= RS_PRIV_LEVEL_KERNEL) + { + if (!portSafeAddU64((NvUPtr)pGpu->deviceMappings[0].gpuNvAddr, offset, (NvU64*)pAddress)) + { + rmStatus = NV_ERR_INVALID_LIMIT; + } + } + else + { + RmPhysAddr busAddress; + if (!portSafeAddU64(pGpu->busInfo.gpuPhysAddr, offset, &busAddress)) + { + rmStatus = NV_ERR_INVALID_LIMIT; + } + else + { + rmStatus = osMapPciMemoryUser(pGpu->pOsGpuInfo, + busAddress, + length, + Protect, + pAddress, + pPriv, + NV_FALSE); + } + } + + return rmStatus; +} + +void osUnmapGPU( + OS_GPU_INFO *pOsGpuInfo, + RS_PRIV_LEVEL privLevel, + NvP64 address, + NvU64 length, + NvP64 priv +) +{ + if (privLevel < RS_PRIV_LEVEL_KERNEL) + { + osUnmapPciMemoryUser(pOsGpuInfo, address, length, priv); + } +} + +NV_STATUS osDeviceClassToDeviceName( + NvU32 deviceInstance, + NvU8 *szName +) +{ + return NV_ERR_GENERIC; +} + +static void postEvent( + nv_event_t *event, + NvU32 hEvent, + NvU32 notifyIndex, + NvU32 info32, + NvU16 info16, + NvBool dataValid +) +{ + nv_state_t *nv = nv_get_ctl_state(); + portSyncSpinlockAcquire(nv->event_spinlock); + if (event->active) + nv_post_event(event, hEvent, notifyIndex, + info32, info16, dataValid); + portSyncSpinlockRelease(nv->event_spinlock); +} + +NvU32 osSetEvent +( + OBJGPU *pGpu, + NvP64 eventID +) +{ + nv_event_t *event = NvP64_VALUE(eventID); + postEvent(event, 0, 0, 0, 0, NV_FALSE); + return 1; +} + +NV_STATUS osNotifyEvent( + OBJGPU *pGpu, + PEVENTNOTIFICATION NotifyEvent, + NvU32 Method, + NvU32 Data, + NV_STATUS Status +) +{ + NV_STATUS rmStatus = NV_OK; + + // notify the event + switch (NotifyEvent->NotifyType) + { + case NV01_EVENT_OS_EVENT: + { + nv_event_t *event = NvP64_VALUE(NotifyEvent->Data); + postEvent(event, + NotifyEvent->hEvent, + NotifyEvent->NotifyIndex, + 0, 0, + NotifyEvent->bEventDataRequired); + break; + } + + // NOTE: NV01_EVENT_KERNEL_CALLBACK is deprecated. please use NV01_EVENT_KERNEL_CALLBACK_EX. + case NV01_EVENT_KERNEL_CALLBACK: + { + MINIPORT_CALLBACK callBackToMiniport = + (MINIPORT_CALLBACK)NvP64_VALUE(NotifyEvent->Data); + + // perform a direct callback to the miniport + if (callBackToMiniport) + callBackToMiniport(NV_GET_NV_STATE(pGpu)); + break; + } + + case NV01_EVENT_KERNEL_CALLBACK_EX: + { + NVOS10_EVENT_KERNEL_CALLBACK_EX *kc = (NVOS10_EVENT_KERNEL_CALLBACK_EX *)NvP64_VALUE(NotifyEvent->Data); + + // passes two arguments (arg, params) to the kernel callback instead of one (arg). + if (kc && kc->func) + { + kc->func(kc->arg, NULL, NotifyEvent->hEvent, Data, Status); + } + break; + } + + + default: + { + rmStatus = NV_ERR_GENERIC; + break; + } + } + + return rmStatus; + +} // end of osNotifyEvent() + +// Allow CPL Events to be callback or events +NV_STATUS osEventNotification +( + OBJGPU *pGpu, + PEVENTNOTIFICATION pNotifyEvent, + NvU32 notifyIndex, + void * pEventData, + NvU32 eventDataSize +) +{ + return osEventNotificationWithInfo(pGpu, pNotifyEvent, notifyIndex, 0, 0, + pEventData, eventDataSize); +} + +NV_STATUS osEventNotificationWithInfo +( + OBJGPU *pGpu, + PEVENTNOTIFICATION pNotifyEvent, + NvU32 notifyIndex, + NvU32 info32, + NvU16 info16, + void * pEventData, + NvU32 eventDataSize +) +{ + NV_STATUS rmStatus = NV_OK; + + // walk this object's event list and find any matches for this specific notify + for (; pNotifyEvent; pNotifyEvent = pNotifyEvent->Next) + { + // notifyIndex must match if request isn't for all + if ((notifyIndex != OS_EVENT_NOTIFICATION_INDEX_ALL) && + (pNotifyEvent->NotifyIndex != notifyIndex)) + { + continue; + } + + switch (pNotifyEvent->NotifyType) + { + case NV_EVENT_BUFFER_BIND: + case NV01_EVENT_WIN32_EVENT: + { + nv_event_t *event = NvP64_VALUE(pNotifyEvent->Data); + postEvent(event, + pNotifyEvent->hEvent, + pNotifyEvent->NotifyIndex, + info32, info16, + pNotifyEvent->bEventDataRequired); + break; + } + + // NOTE: NV01_EVENT_KERNEL_CALLBACK is deprecated. please use NV01_EVENT_KERNEL_CALLBACK_EX. + case NV01_EVENT_KERNEL_CALLBACK: + { + MINIPORT_CALLBACK callBackToMiniport = + (MINIPORT_CALLBACK)NvP64_VALUE(pNotifyEvent->Data); + + // perform a direct callback to the miniport + if (callBackToMiniport) + callBackToMiniport(NV_GET_NV_STATE(pGpu)); + break; + } + + case NV01_EVENT_KERNEL_CALLBACK_EX: + { + NVOS10_EVENT_KERNEL_CALLBACK_EX *kc = (NVOS10_EVENT_KERNEL_CALLBACK_EX *)NvP64_VALUE(pNotifyEvent->Data); + + if (kc && kc->func) + { + kc->func(kc->arg, pEventData, pNotifyEvent->hEvent, 0, NV_OK); + } + break; + } + + default: + break; + } + } + + return rmStatus; +} + +// Allow CPL Events to be callback or events +NV_STATUS osObjectEventNotification +( + NvHandle hClient, + NvHandle hObject, + NvU32 hClass, + PEVENTNOTIFICATION pNotifyEvent, + NvU32 notifyIndex, + void *pEventData, + NvU32 eventDataSize +) +{ + NV_STATUS rmStatus = NV_OK; + + NV_PRINTF(LEVEL_INFO, "%s()\n", __FUNCTION__); + // walk this object's event list and find any matches for this specific notify + for (; pNotifyEvent; pNotifyEvent = pNotifyEvent->Next) + { + // notifyIndex must match if request isn't for all + if ((notifyIndex != OS_EVENT_NOTIFICATION_INDEX_ALL) && + (pNotifyEvent->NotifyIndex != notifyIndex)) + { + continue; + } + + switch (pNotifyEvent->NotifyType) + { + case NV01_EVENT_OS_EVENT: + { + nv_event_t *event = NvP64_VALUE(pNotifyEvent->Data); + postEvent(event, + pNotifyEvent->hEvent, + pNotifyEvent->NotifyIndex, + 0, 0, + pNotifyEvent->bEventDataRequired); + break; + } + + case NV01_EVENT_KERNEL_CALLBACK_EX: + { + NVOS10_EVENT_KERNEL_CALLBACK_EX *kc = (NVOS10_EVENT_KERNEL_CALLBACK_EX *)NvP64_VALUE(pNotifyEvent->Data); + + if (kc && kc->func) + { + kc->func(kc->arg, pEventData, pNotifyEvent->hEvent, 0, NV_OK); + } + break; + } + + default: + break; + } + } + + return rmStatus; +} + +NV_STATUS osReferenceObjectCount(void *pEvent) +{ + nv_state_t *nv = nv_get_ctl_state(); + nv_event_t *event = pEvent; + + portSyncSpinlockAcquire(nv->event_spinlock); + ++event->refcount; + portSyncSpinlockRelease(nv->event_spinlock); + return NV_OK; +} + +NV_STATUS osDereferenceObjectCount(void *pOSEvent) +{ + nv_state_t *nv = nv_get_ctl_state(); + nv_event_t *event = pOSEvent; + + portSyncSpinlockAcquire(nv->event_spinlock); + NV_ASSERT(event->refcount > 0); + --event->refcount; + // If event->refcount == 0 but event->active is true, the client + // has not yet freed the OS event. free_os_event will free its + // memory when they do, or else when the client itself is freed. + if (event->refcount == 0 && !event->active) + portMemFree(event); + portSyncSpinlockRelease(nv->event_spinlock); + + return NV_OK; +} + +NV_STATUS osUserHandleToKernelPtr(NvHandle hClient, NvP64 hEvent, NvP64 *pEvent) +{ + nv_state_t *nv = nv_get_ctl_state(); + NvU32 fd = (NvU64)hEvent; + NV_STATUS result; + + portSyncSpinlockAcquire(nv->event_spinlock); + nv_event_t *e = nv->event_list; + while (e != NULL) + { + if (e->fd == fd && e->hParent == hClient) + break; + e = e->next; + } + + if (e != NULL) + { + ++e->refcount; + *pEvent = NV_PTR_TO_NvP64(e); + result = NV_OK; + } + else + result = NV_ERR_OBJECT_NOT_FOUND; + portSyncSpinlockRelease(nv->event_spinlock); + + return result; +} + +NV_STATUS osFlushCpuCache(void) +{ + return os_flush_cpu_cache_all(); +} + +void osFlushCpuWriteCombineBuffer(void) +{ + os_flush_cpu_write_combine_buffer(); +} + + +// +// Evict GPU memory range from the CPU caches. +// +// On some platforms (e.g. P9+V100), the CPU can coherently cache GPU memory +// and RM takes advantage of that. Most everything is handled transparently, +// but there are two exceptions that require explicitly flushing any CPU cache +// lines of GPU memory. These are: +// +// 1) Flushing memory backing ACR regions before they get locked. +// +// Otherwise the cache could get flushed while the regions are locked causing a +// region violation physical fault. See more details in +// acrFlushRegionsFromGpuCoherentCpuCache_IMPL(). +// +// 2) Flushing all of FB before GPU reset (NVLink going down) +// +// Leaving cache entries on the CPU causes fatal errors when the CPU tries +// flushing them later while the link is down. See more details in +// nvlinkStatePostUnload_IMPL(). +// +void osFlushGpuCoherentCpuCacheRange +( + OS_GPU_INFO *pOsGpuInfo, + NvU64 cpuVirtual, + NvU64 size +) +{ + nv_flush_coherent_cpu_cache_range(pOsGpuInfo, cpuVirtual, size); +} + +void osErrorLogV(OBJGPU *pGpu, NvU32 num, const char * pFormat, va_list arglist) +{ + NV_STATUS rmStatus; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if ((pFormat == NULL) || (*pFormat == '\0')) + { + return; + } + + rmStatus = nv_log_error(nv, num, pFormat, arglist); + NV_ASSERT(rmStatus == NV_OK); +} + +void osErrorLog(OBJGPU *pGpu, NvU32 num, const char* pFormat, ...) +{ + va_list arglist; + va_start(arglist, pFormat); + osErrorLogV(pGpu, num, pFormat, arglist); + va_end(arglist); +} + +NvU32 +osPollHotkeyState +( + OBJGPU *pGpu +) +{ + return 0; +} + +void osDevWriteReg008( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV8 thisValue +) +{ + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + return; + } + + NV_PRIV_REG_WR08(pMapping->gpuNvAddr, thisAddress, thisValue); +} + +void osDevWriteReg016( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV16 thisValue +) +{ + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + return; + } + + NV_PRIV_REG_WR16(pMapping->gpuNvAddr, thisAddress, thisValue); +} + +void osDevWriteReg032( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV32 thisValue +) +{ + NvBool vgpuHandled = NV_FALSE; + + if (vgpuHandled) + { + return; + } + + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + return; + } + + NV_PRIV_REG_WR32(pMapping->gpuNvAddr, thisAddress, thisValue); +} + +NvU8 osDevReadReg008( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress +) +{ + NvU8 retval = 0; + + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + } + else + retval = NV_PRIV_REG_RD08(pMapping->gpuNvAddr, thisAddress); + + return retval; +} + +NvU16 osDevReadReg016( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress +) +{ + NvU16 retval = 0; + + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + } + else + retval = NV_PRIV_REG_RD16(pMapping->gpuNvAddr, thisAddress); + + return retval; +} + +NvU32 osDevReadReg032( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress +) +{ + NvU32 retval = 0; + NvBool vgpuHandled = NV_FALSE; + + if (vgpuHandled) + { + return retval; + } + + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + } + else + retval = NV_PRIV_REG_RD32(pMapping->gpuNvAddr, thisAddress); + + return retval; +} + +NV_STATUS osReadRegistryDwordBase( + OBJGPU *pGpu, + const char *regParmStr, + NvU32 *Data +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmReadRegistryDword(nv, regParmStr, Data); +} + +NV_STATUS osWriteRegistryDword( + OBJGPU *pGpu, + const char *regParmStr, + NvU32 Data +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmWriteRegistryDword(nv, regParmStr, Data); +} + +NV_STATUS osReadRegistryBinary( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *Data, + NvU32 *cbLen +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmReadRegistryBinary(nv, regParmStr, Data, cbLen); +} + +NV_STATUS osWriteRegistryBinary( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmWriteRegistryBinary(nv, regParmStr, Data, cbLen); +} + +NV_STATUS osWriteRegistryVolatile( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osReadRegistryVolatile +( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osReadRegistryVolatileSize +( + OBJGPU *pGpu, + const char *regParmStr, + NvU32 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osReadRegistryStringBase( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *buffer, + NvU32 *pBufferLength +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmReadRegistryString(nv, regParmStr, buffer, pBufferLength); +} + +NvU32 osGetCpuCount() +{ + return os_get_cpu_count(); // Total number of logical CPUs. +} + +NvU32 osGetCurrentProcessorNumber(void) +{ + return os_get_cpu_number(); +} + +void osGetTimeoutParams(OBJGPU *pGpu, NvU32 *pTimeoutUs, NvU32 *pScale, NvU32 *pFlags) +{ + NvU32 gpuMode = gpuGetMode(pGpu); + + NV_ASSERT((NV_GPU_MODE_GRAPHICS_MODE == gpuMode) || + (NV_GPU_MODE_COMPUTE_MODE == gpuMode)); + + { + switch (gpuMode) + { + default: + case NV_GPU_MODE_GRAPHICS_MODE: + *pTimeoutUs = 4 * 1000000; + break; + + case NV_GPU_MODE_COMPUTE_MODE: + *pTimeoutUs = 30 * 1000000; + break; + } + } + + *pFlags = GPU_TIMEOUT_FLAGS_OSTIMER; + + *pScale = 1; + if (IS_EMULATION(pGpu) || IS_SIMULATION(pGpu)) + { + *pScale = 60; // 1s -> 1m + } + + return; +} + +void osFlushLog() +{ + // Not implemented +} + +static NvU32 _osGetTegraPlatform(void) +{ + NV_STATUS status; + NvU32 mode; + + status = os_get_tegra_platform(&mode); + if (status != NV_ERR_NOT_SUPPORTED) + { + return mode; + } + + return NV_OS_TEGRA_PLATFORM_SILICON; +} + +NvU32 osGetSimulationMode(void) +{ + NvU32 mode; + + switch (_osGetTegraPlatform()) + { + case NV_OS_TEGRA_PLATFORM_SIM: + mode = NV_SIM_MODE_CMODEL; + break; + case NV_OS_TEGRA_PLATFORM_FPGA: + mode = NV_SIM_MODE_TEGRA_FPGA; + break; + case NV_OS_TEGRA_PLATFORM_SILICON: + default: + mode = NV_SIM_MODE_HARDWARE; + break; + } + + return mode; +} + +NV_STATUS +cliresCtrlCmdOsUnixFlushUserCache_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *pAddressSpaceParams +) +{ + Memory *pMemory; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 start, end; + NvBool bInvalidateOnly; + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memGetByHandle(RES_GET_CLIENT(pRmCliRes), + pAddressSpaceParams->hObject, + &pMemory)); + + pMemDesc = pMemory->pMemDesc; + + if (memdescGetAddressSpace(pMemDesc) != ADDR_SYSMEM) + { + NV_PRINTF(LEVEL_ERROR, "%s: wrong address space %d\n", + __FUNCTION__, memdescGetAddressSpace(pMemDesc)); + return NV_ERR_INVALID_COMMAND; + } + + if (memdescGetCpuCacheAttrib(pMemDesc) != NV_MEMORY_CACHED) + { + NV_PRINTF(LEVEL_ERROR, "%s: wrong caching type %d\n", + __FUNCTION__, memdescGetCpuCacheAttrib(pMemDesc)); + return NV_ERR_INVALID_COMMAND; + } + + start = pAddressSpaceParams->offset; + end = start + pAddressSpaceParams->length; + + switch(pAddressSpaceParams->cacheOps) + { + case NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH_INVALIDATE: + case NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH: + bInvalidateOnly = NV_FALSE; + break; + + case NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_INVALIDATE: + bInvalidateOnly = NV_TRUE; + break; + + default: + NV_PRINTF(LEVEL_ERROR, "%s: cacheOps not specified\n", __FUNCTION__); + return NV_ERR_INVALID_COMMAND; + } + + if ((end - start) > pMemory->Length) + { + NV_PRINTF(LEVEL_ERROR, + "%s: end address 0x%llx exceeded buffer length: 0x%llx\n", + __FUNCTION__, end, pMemory->Length); + return NV_ERR_INVALID_LIMIT; + } + + if (bInvalidateOnly) + { + // + // XXX: this seems fishy - I'm not sure if invalidating by the kernel + // VA only as nv_dma_cache_invalidate() does here is sufficient for + // this control call. + // pAddressSpaceParams->internalOnly is expected to be the RM client + // VA for this control call; if we wanted to invalidate the user VA we + // could do so using that. + // + // For I/O coherent platforms this won't actually do anything. + // On non-I/O-coherent platforms, there's no need to do a second + // invalidation after the full flush. + // + nv_state_t *nv = NV_GET_NV_STATE(pMemDesc->pGpu); + if (nv->iovaspace_id != NV_IOVA_DOMAIN_NONE) + { + PIOVAMAPPING pIovaMapping = memdescGetIommuMap(pMemDesc, nv->iovaspace_id); + // + // This should only be called for devices that map memory descriptors + // through the nv-dma library, where the memory descriptor data + // contains all the kernel-specific context we need for the + // invalidation. + // + // (These checks match those in osIovaUnmap() leading up to + // nv_dma_unmap_alloc()). + // + if (pIovaMapping == NULL || + pIovaMapping->pOsData == NULL || + memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED) || + memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + nv_dma_cache_invalidate(nv->dma_dev, pIovaMapping->pOsData); + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + } + else + { + return os_flush_user_cache(); + } + + return NV_OK; +} + +static NV_STATUS +_initializeExportObjectFd +( + nv_file_private_t *nvfp, + NvHandle hClient, + NvHandle hDevice, + NvU16 maxObjects, + NvU8 *metadata +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + Device *pDevice; + NvU32 deviceInstance = NV_MAX_DEVICES; + + if (nvfp->handles != NULL) + { + return NV_ERR_STATE_IN_USE; + } + + if (hDevice != 0) + { + status = serverutilGetResourceRef(hClient, hDevice, &pResourceRef); + if (status != NV_OK) + { + return status; + } + + pDevice = dynamicCast(pResourceRef->pResource, Device); + if (pDevice == NULL) + { + return NV_ERR_INVALID_PARAMETER; + } + + deviceInstance = pDevice->deviceInst; + } + + NV_ASSERT_OK_OR_RETURN(os_alloc_mem((void **)&nvfp->handles, + sizeof(nvfp->handles[0]) * maxObjects)); + + os_mem_set(nvfp->handles, 0, + sizeof(nvfp->handles[0]) * maxObjects); + + nvfp->maxHandles = maxObjects; + nvfp->deviceInstance = deviceInstance; + + if (metadata != NULL) + { + os_mem_copy(nvfp->metadata, metadata, sizeof(nvfp->metadata)); + } + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdOsUnixExportObjectToFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RmObjExportHandle hExportHandle = 0; + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + + /* + * This flag is intended to be implemented entirely in the rmapi library in + * userspace, we should never encounter it here. + */ + if (FLD_TEST_DRF(0000_CTRL, _OS_UNIX_EXPORT_OBJECT_TO_FD_FLAGS, + _EMPTY_FD, _TRUE, pParams->flags)) + { + return NV_ERR_INVALID_PARAMETER; + } + + if (pParams->object.type != NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM || + pParams->fd == -1) + { + return NV_ERR_INVALID_PARAMETER; + } + + status = RmExportObject(hClient, + pParams->object.data.rmObject.hObject, + &hExportHandle, NULL); + if (status != NV_OK) + { + goto done; + } + NV_ASSERT(hExportHandle != 0); + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + status = _initializeExportObjectFd(nvfp, hClient, + pParams->object.data.rmObject.hDevice, + 1, NULL); + if (status != NV_OK) + { + goto done; + } + + nvfp->handles[0] = hExportHandle; + +done: + + if (status != NV_OK && hExportHandle != 0) + { + RmFreeObjExportHandle(hExportHandle); + } + + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +// This control call has been deprecated. It will be deleted soon. +NV_STATUS +cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *pParams +) +{ + NV_STATUS status; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + + ct_assert(sizeof(nvfp->metadata) == sizeof(pParams->metadata)); + + if (pParams->maxObjects == 0) + { + return NV_ERR_INVALID_PARAMETER; + } + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + status = _initializeExportObjectFd(nvfp, hClient, pParams->hDevice, + pParams->maxObjects, pParams->metadata); + +done: + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +NV_STATUS +cliresCtrlCmdOsUnixExportObjectsToFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RmObjExportHandle *pExportHandle; + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + NvU32 i; + NvU32 deviceInstance; + NvU32 result; + NvHandle *exportHandles = NULL; + NvBool bFdSetup = NV_FALSE; + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + ct_assert(sizeof(nvfp->metadata) == sizeof(pParams->metadata)); + + /* Setup export FD if not done */ + if (nvfp->handles == NULL) + { + if (pParams->maxObjects == 0) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + status = _initializeExportObjectFd(nvfp, hClient, pParams->hDevice, + pParams->maxObjects, + pParams->metadata); + if (status != NV_OK) + { + goto done; + } + + bFdSetup = NV_TRUE; + } + + if ((nvfp->handles == NULL) || + (pParams->numObjects > + NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_MAX_OBJECTS)) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if (!portSafeAddU32(pParams->numObjects, pParams->index, &result) || + (result > nvfp->maxHandles)) + { + status = NV_ERR_OUT_OF_RANGE; + goto done; + } + + status = os_alloc_mem((void **)&exportHandles, + sizeof(exportHandles[0]) * + pParams->numObjects); + if (status != NV_OK) + { + goto done; + } + + for (i = 0; i < pParams->numObjects; i++) + { + exportHandles[i] = 0; + + if (pParams->objects[i] == 0) + { + continue; + } + + status = RmExportObject(hClient, + pParams->objects[i], + &exportHandles[i], + &deviceInstance); + if (status != NV_OK) + { + goto done; + } + + NV_ASSERT(exportHandles[i] != 0); + + if (deviceInstance != nvfp->deviceInstance) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + } + + for (i = 0; i < pParams->numObjects; i++) + { + pExportHandle = &nvfp->handles[i + pParams->index]; + + // If the handle already exists in this position, free it + if (*pExportHandle != 0) + { + RmFreeObjExportHandle(*pExportHandle); + *pExportHandle = 0; + } + + *pExportHandle = exportHandles[i]; + } + +done: + + if ((status != NV_OK) && (exportHandles != NULL)) + { + for (i = 0; i < pParams->numObjects; i++) + { + if (exportHandles[i] != 0) + { + RmFreeObjExportHandle(exportHandles[i]); + } + } + } + + if (exportHandles != NULL) + { + os_free_mem(exportHandles); + } + + if ((status != NV_OK) && bFdSetup) + { + os_free_mem(nvfp->handles); + nvfp->handles = NULL; + nvfp->maxHandles = 0; + } + + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +NV_STATUS +cliresCtrlCmdOsUnixImportObjectFromFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + + if (pParams->object.type != NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM || + pParams->fd == -1) + { + return NV_ERR_INVALID_PARAMETER; + } + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if ((nvfp->handles == NULL) || (nvfp->handles[0] == 0) || + (nvfp->maxHandles > 1)) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + status = RmImportObject(hClient, + pParams->object.data.rmObject.hParent, + &pParams->object.data.rmObject.hObject, + nvfp->handles[0], NULL); + +done: + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +NV_STATUS +cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + NvU32 i = 0; + RmObjExportHandle hImportHandle; + NvU32 result; + RM_API *pRmApi; + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if ((nvfp->handles == NULL) || + (pParams->numObjects > + NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_TO_FD_MAX_OBJECTS)) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if (!portSafeAddU32(pParams->numObjects, pParams->index, &result) || + (result > nvfp->maxHandles)) + { + status = NV_ERR_OUT_OF_RANGE; + goto done; + } + + for (i = 0; i < pParams->numObjects; i++) + { + hImportHandle = nvfp->handles[i + pParams->index]; + + /* Nothing to import, just continue */ + if (hImportHandle == 0) + { + pParams->objectTypes[i] = \ + NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_NONE; + continue; + } + + status = RmImportObject(hClient, + pParams->hParent, + &pParams->objects[i], + hImportHandle, + &pParams->objectTypes[i]); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "%s: Unable to import handle (%x, %x, %x)\n", + __FUNCTION__, pParams->hParent, pParams->objects[i], hImportHandle); + goto done; + } + } + +done: + + if (status != NV_OK) + { + pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + while (i > 0) + { + i--; + + if (pParams->objects[i] != 0) + { + pRmApi->Free(pRmApi, hClient, pParams->objects[i]); + } + } + } + + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +NV_STATUS +cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *pParams +) +{ + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + + if (pParams->fd < 0) + { + return NV_ERR_INVALID_PARAMETER; + } + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if (nvfp->handles == NULL) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + pParams->maxObjects = nvfp->maxHandles; + pParams->deviceInstance = nvfp->deviceInstance; + os_mem_copy(pParams->metadata, nvfp->metadata, sizeof(nvfp->metadata)); + +done: + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +/*! + * osAcpiDsm + * + * @brief Handles os specific _DSM method function calls. + * + * Input parameters: + * @param[in] pGpu : OBJGPU pointer + * @param[in] acpiDsmFunction : ACPI DSM function + * @param[in] acpiDsmSubFunction : ACPI DSM subfunction + * @param[in/out] pInOut : in/out buffer, caller should make sure the buffer is large enough. + * @param[in] pSize : when input, size of data that the caller wants to read, in bytes. + * when output, size of valid data in pInOuta in bytes. + */ +NV_STATUS osCallACPI_DSM +( + OBJGPU *pGpu, + ACPI_DSM_FUNCTION acpiDsmFunction, + NvU32 acpiDsmSubFunction, + NvU32 *pInOut, + NvU16 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_DOD +( + OBJGPU *pGpu, + NvU32 *pOut, + NvU32 *pSize +) +{ + NV_STATUS rmStatus; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if ((pOut == NULL) || (pSize == NULL)) + { + return NV_ERR_INVALID_POINTER; + } + + rmStatus = nv_acpi_dod_method(nv, pOut, pSize); + + return rmStatus; +} + +// +// osAcpiDdc +// +// Handles os specific _DDC method function calls. _DDC is to get EDID from SBIOS. +// +NV_STATUS osCallACPI_DDC +( + OBJGPU *pGpu, + NvU32 ulAcpiId, + NvU8 *pOutData, + NvU32 *pOutSize, + NvBool bReadMultiBlock +) +{ + NV_STATUS rmStatus; + + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if ((pOutData == NULL) || (pOutSize == NULL)) + { + return NV_ERR_INVALID_POINTER; + } + + portMemSet(pOutData, 0, *pOutSize); + + rmStatus = nv_acpi_ddc_method(nv, pOutData, pOutSize, bReadMultiBlock); + + return rmStatus; +} + +// osCallACPI_NVHG_ROM +// Making ACPI Call into SBIOS with ROM method to get display device's ROM data. +// +NV_STATUS osCallACPI_NVHG_ROM +( + OBJGPU *pGpu, + NvU32 *pInData, + NvU32 *pOutData +) +{ + NV_STATUS rmStatus; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if ((pOutData == NULL) || (pInData == NULL)) + { + return NV_ERR_INVALID_POINTER; + } + + if (pInData[1] > ROM_METHOD_MAX_RETURN_BUFFER_SIZE) + { + return NV_ERR_INVALID_ARGUMENT; + } + + rmStatus = nv_acpi_rom_method(nv, pInData, pOutData); + + return rmStatus; +} + +void osInitSystemStaticConfig(SYS_STATIC_CONFIG *pConfig) +{ + pConfig->bIsNotebook = rm_is_system_notebook(); + pConfig->osType = nv_get_os_type(); + pConfig->osSevStatus = os_sev_status; + pConfig->bOsSevEnabled = os_sev_enabled; +} + +NvU32 osApiLockAcquireConfigureFlags(NvU32 flags) +{ + return flags; +} + +NV_STATUS osGpuLocksQueueRelease(OBJGPU *pGpu, NvU32 dpcGpuLocksRelease) +{ + return NV_SEMA_RELEASE_FAILED; +} + +void osSyncWithRmDestroy() +{ +} + +void osSyncWithGpuDestroy(NvBool bEntry) +{ +} + +void osModifyGpuSwStatePersistence +( + OS_GPU_INFO *pOsGpuInfo, + NvBool bEnable +) +{ + if (bEnable) + { + pOsGpuInfo->flags |= NV_FLAG_PERSISTENT_SW_STATE; + } + else + { + pOsGpuInfo->flags &= ~NV_FLAG_PERSISTENT_SW_STATE; + } +} + +NV_STATUS +osSystemGetBatteryDrain(NvS32 *pChargeRate) +{ + NV_PRINTF(LEVEL_WARNING, "%s: Platform not supported!\n", __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osPexRecoveryCallback +( + OS_GPU_INFO *pOsGpuInfo, + OS_PEX_RECOVERY_STATUS Status +) +{ + NV_ASSERT_FAILED("Not supported"); + return NV_ERR_NOT_SUPPORTED; +} + +// +//osCallACPI_MXDS +// +//Handles OS specific MXDS function call. +// +NV_STATUS osCallACPI_MXDS +( + OBJGPU *pGpu, + NvU32 acpiId, + NvU32 *pInOut +) +{ + NV_STATUS rmStatus; + + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if (pInOut == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + rmStatus = nv_acpi_mux_method(nv, pInOut, acpiId, "MXDS"); + + return rmStatus; +} + +// +//osCallACPI_MXDM +// +//Handles OS specific MXDM function call. +// +NV_STATUS osCallACPI_MXDM +( + OBJGPU *pGpu, + NvU32 acpiId, + NvU32 *pInOut +) +{ + NV_STATUS rmStatus; + + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if (pInOut == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + rmStatus = nv_acpi_mux_method(nv, pInOut, acpiId, "MXDM"); + + return rmStatus; +} + +NV_STATUS osGetVersionDump(void * pVoid) +{ + return NV_OK; +} + +NV_STATUS osGetVersion(NvU32 *majorVer, NvU32 *minorVer, NvU32 *buildNum, NvU16 *unusedPatchVersion, NvU16 *unusedProductType) +{ + os_version_info osVersionInfo; + NV_STATUS rmStatus; + + portMemSet(&osVersionInfo, 0, sizeof(osVersionInfo)); + + rmStatus = os_get_version_info(&osVersionInfo); + if (rmStatus == NV_OK) + { + if (majorVer) + *majorVer = osVersionInfo.os_major_version; + if (minorVer) + *minorVer = osVersionInfo.os_minor_version; + if (buildNum) + *buildNum = osVersionInfo.os_build_number; + } + + return rmStatus; +} + +NV_STATUS +osGetSystemCpuLogicalCoreCounts +( + NvU32 *pCpuCoreCount +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osGetSystemCpuC0AndAPerfCounters +( + NvU32 coreIndex, + POS_CPU_CORE_PERF_COUNTERS pCpuPerfData +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void +osEnableCpuPerformanceCounters +( + OBJOS *pOS +) +{ + NV_ASSERT_FAILED("Not supported"); + return; +} + +NV_STATUS +osCpuDpcObjInit +( + void **ppCpuDpcObj, + OBJGPU *pGpu, + NvU32 coreCount +) +{ + NV_ASSERT_FAILED("Not supported"); + return NV_ERR_NOT_SUPPORTED; +} + +void +osCpuDpcObjQueue +( + void **ppCpuDpcObj, + NvU32 coreCount, + POS_CPU_CORE_PERF_COUNTERS pCpuPerfData +) +{ + NV_ASSERT_FAILED("Not supported"); +} + +void +osCpuDpcObjFree +( + void **ppCpuDpcObj +) +{ + NV_ASSERT_FAILED("Not supported"); +} + +NV_STATUS +osGetCarveoutInfo +( + NvU64 *pAddr, + NvU64 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osGetVPRInfo +( + NvU64 *pAddr, + NvU64 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osAllocInVPR +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osGetGenCarveout +( + NvU64 *pAddr, + NvU64 *pSize, + NvU32 id, + NvU64 align +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osI2CClosePorts +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 numPorts +) +{ + nv_i2c_unregister_clients(pOsGpuInfo); + return NV_OK; +} + +NV_STATUS +osI2CTransfer +( + OBJGPU *pGpu, + NvU32 Port, + NvU8 Address, + nv_i2c_msg_t *nv_i2c_msgs, + NvU32 count +) +{ + return nv_i2c_transfer(NV_GET_NV_STATE(pGpu), Port, Address, + nv_i2c_msgs, count); +} + +NV_STATUS +osTegraI2CGetBusState +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 port, + NvS32 *scl, + NvS32 *sda +) +{ + return nv_i2c_bus_status(pOsGpuInfo, port, scl, sda); +} + +NV_STATUS +osReadI2CBufferDirect +( + OBJGPU *pGpu, + NvU32 Port, + NvU8 Address, + void *pOutputBuffer, + NvU32 OutputSize, + void *pInputBuffer, + NvU32 InputSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osWriteI2CBufferDirect +( + OBJGPU *pGpu, + NvU32 Port, + NvU8 Address, + void *pOutputBuffer0, + NvU32 OutputSize0, + void *pOutputBuffer1, + NvU32 OutputSize1 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osGC6PowerControl +( + OBJGPU *pGpu, + NvU32 cmd, + NvU32 *pOut +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Map memory into an IOVA space according to the given mapping info. + * + * @param[in] pIovaMapping IOVA mapping info + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osIovaMap +( + PIOVAMAPPING pIovaMapping +) +{ + OBJGPU *pGpu; + nv_state_t *nv, *peer; + NV_STATUS status; + RmPhysAddr base; + NvBool bIsBar0; + PMEMORY_DESCRIPTOR pRootMemDesc; + NvBool bIsFbOffset = NV_FALSE; + NvBool bIsIndirectPeerMapping = NV_FALSE; + NvBool bIsContig; + NV_ADDRESS_SPACE addressSpace; + NvU32 osPageCount; + + if (pIovaMapping == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pGpu = gpumgrGetGpuFromId(pIovaMapping->iovaspaceId); + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pRootMemDesc = memdescGetRootMemDesc(pIovaMapping->pPhysMemDesc, NULL); + addressSpace = memdescGetAddressSpace(pIovaMapping->pPhysMemDesc); + if (gpumgrCheckIndirectPeer(pGpu, pRootMemDesc->pGpu) && + (addressSpace == ADDR_FBMEM)) + { + bIsIndirectPeerMapping = NV_TRUE; + } + + if ((addressSpace != ADDR_SYSMEM) && !bIsIndirectPeerMapping) + { + NV_PRINTF(LEVEL_INFO, + "%s passed memory descriptor in an unsupported address space (%s)\n", + __FUNCTION__, + memdescGetApertureString(memdescGetAddressSpace(pIovaMapping->pPhysMemDesc))); + return NV_ERR_NOT_SUPPORTED; + } + + // + // For guest-allocated memory, we don't actually want to do any remapping, + // since the physical address is already the DMA address to be used by the + // GPU. + // + if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) + { + return NV_OK; + } + + nv = NV_GET_NV_STATE(pGpu); + + // + // Intercept peer IO type memory. These are contiguous allocations, so no + // need to adjust pages. + // + if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) + { + NV_ASSERT(memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU)); + + status = nv_dma_map_mmio(nv->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount), + &pIovaMapping->iovaArray[0]); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: failed to map peer IO mem (status = 0x%x)\n", + __FUNCTION__, status); + } + + return status; + } + + // + // We need to check against the "root" GPU, e.g., the GPU that owns this + // allocation. If we're trying to map one of its BARs for a peer, we need + // to handle it differently because it wouldn't have gone through our system + // memory page allocation paths, obviously, and wouldn't have alloc private + // data associated with it. + // + peer = NV_GET_NV_STATE(pRootMemDesc->pGpu); + bIsContig = memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU); + if (NV_RM_PAGE_SIZE < os_page_size && !bIsContig) + { + RmDeflateRmToOsPageArray(&pIovaMapping->iovaArray[0], + pIovaMapping->pPhysMemDesc->PageCount); + } + + base = memdescGetPhysAddr(pIovaMapping->pPhysMemDesc, AT_CPU, 0); + bIsBar0 = IS_REG_OFFSET(peer, base, pIovaMapping->pPhysMemDesc->Size); + + bIsFbOffset = IS_FB_OFFSET(peer, base, pIovaMapping->pPhysMemDesc->Size); + + // + // For indirect peers bIsFbOffset should be NV_TRUE + // TODO:IS_FB_OFFSET macro is currently broken for P9 systems + // Bug 2010857 tracks fixing this + // +#if defined(NVCPU_PPC64LE) + KernelMemorySystem *pRootKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pRootMemDesc->pGpu); + if (bIsIndirectPeerMapping) + { + NvU64 atsBase = base + pRootKernelMemorySystem->coherentCpuFbBase; + if ((atsBase >= pRootKernelMemorySystem->coherentCpuFbBase) && + (atsBase + pIovaMapping->pPhysMemDesc->Size <= + pRootKernelMemorySystem->coherentCpuFbEnd)) + { + bIsFbOffset = NV_TRUE; + } + else + { + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + } + } +#endif + + void *pPriv = memdescGetMemData(pIovaMapping->pPhysMemDesc); + osPageCount = NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount); + + if (!bIsBar0 && !bIsFbOffset) + { + if (pPriv == NULL) + { + return NV_ERR_INVALID_STATE; + } + } + + // + // TODO: When ISO SMMU is not present, dma mapping of ISO memory causes crash during + // __clean_dcache_area_poc. We are allocating ISO memory as contiguous when + // SMMU is not present using dma_alloc_coherent, skip dma mapping + // of ISO memory to unblock Tegra Display in AV+L. Bug 200765629 + // + if (!NV_SOC_IS_ISO_IOMMU_PRESENT(nv) && + !memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO)) + { + NV_PRINTF(LEVEL_INFO, "%s: Skip memdescMapIommu mapping\n", __FUNCTION__); + return NV_OK; + } + + if (!bIsBar0 && (!bIsFbOffset || bIsIndirectPeerMapping)) + { + status = nv_dma_map_alloc( + osGetDmaDeviceForMemDesc(nv, pIovaMapping->pPhysMemDesc), + osPageCount, + &pIovaMapping->iovaArray[0], + bIsContig, &pPriv); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: failed to map allocation (status = 0x%x)\n", + __FUNCTION__, status); + return status; + } + + pIovaMapping->pOsData = pPriv; + } + else if (peer != nv) + { + status = nv_dma_map_peer(nv->dma_dev, peer->dma_dev, bIsBar0 ? 0 : 1, + osPageCount, &pIovaMapping->iovaArray[0]); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "%s: failed to map peer (base = 0x%llx, status = 0x%x)\n", + __FUNCTION__, base, status); + return status; + } + + // + // pOsData must be NULL to distinguish a peer DMA mapping from a + // system memory mapping in osIovaUnmap(), so make sure to set it + // accordingly here. + // + pIovaMapping->pOsData = NULL; + } + else + { + NV_PRINTF(LEVEL_INFO, "cannot map a GPU's BAR to itself\n"); + return NV_ERR_NOT_SUPPORTED; + } + + // + // If the OS layer doesn't think in RM page size, we need to inflate the + // PTE array into RM pages. + // + if (NV_RM_PAGE_SIZE < os_page_size && !bIsContig) + { + RmInflateOsToRmPageArray(&pIovaMapping->iovaArray[0], + pIovaMapping->pPhysMemDesc->PageCount); + } + + return NV_OK; +} + +/*! + * @brief Unmap memory from an IOVA space according to the given mapping info. + * + * This mapping info must have been previously mapped by osIovaMap(). + * + * @param[in] pIovaMapping IOVA mapping info + * + */ +void +osIovaUnmap +( + PIOVAMAPPING pIovaMapping +) +{ + OBJGPU *pGpu; + nv_state_t *nv; + void *pPriv; + NV_STATUS status; + + if (pIovaMapping == NULL) + { + return; + } + + pGpu = gpumgrGetGpuFromId(pIovaMapping->iovaspaceId); + if (pGpu == NULL) + { + return; + } + + // + // For guest-allocated memory, we never actually remapped the memory, so we + // shouldn't try to unmap it here. + // + if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) + { + return; + } + + nv = NV_GET_NV_STATE(pGpu); + + if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) + { + nv_dma_unmap_mmio(nv->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount), + pIovaMapping->iovaArray[0]); + + return; + } + + // + // TODO: Formalize the interface with the OS layers so we can use a common + // definition of OS_IOVA_MAPPING_DATA. + // + pPriv = (void *)pIovaMapping->pOsData; + + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(&pIovaMapping->iovaArray[0], + pIovaMapping->pPhysMemDesc->PageCount); + } + + if (pPriv != NULL) + { + status = nv_dma_unmap_alloc(nv->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount), + &pIovaMapping->iovaArray[0], &pPriv); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: failed to unmap allocation (status = 0x%x)\n", + __FUNCTION__, status); + } + } + else + { + nv_dma_unmap_peer(nv->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount), + pIovaMapping->iovaArray[0]); + } + + // + // If the OS layer doesn't think in RM page size, we need to fluff out the + // PTE array into RM pages. + // + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU)) + { + RmInflateOsToRmPageArray(&pIovaMapping->iovaArray[0], + pIovaMapping->pPhysMemDesc->PageCount); + } + + pIovaMapping->pOsData = NULL; +} + +/*! + * @brief Set the GPU Rail Voltage in Tegra SoC. Currently not supported + * + * @param[in] pGpu GPU object pointer + * @param[in] reqVoltageuV Rail Voltage requested in uV + * @param[out] pSetVoltageuV Rail Voltage set in uV + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osSetGpuRailVoltage +( + OBJGPU *pGpu, + NvU32 reqVoltageuV, + NvU32 *pSetVoltageuV +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Get the GPU Rail Voltage in Tegra SoC. Currently not supported + * + * @param[in] pGpu GPU object pointer + * @param[out] voltageuV Rail Voltage in uV + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osGetGpuRailVoltage +( + OBJGPU *pGpu, + NvU32 *pVoltageuV +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Bring down system in a controlled manner on known error conditions. + * + * @bugCode[in] Error code / reason. + */ +void osBugCheck(NvU32 bugCode) +{ + if (bugCode > OS_BUG_CHECK_BUGCODE_LAST) + { + bugCode = OS_BUG_CHECK_BUGCODE_UNKNOWN; + } + + os_bug_check(bugCode, ppOsBugCheckBugcodeStr[bugCode]); +} + +/*! + * @brief Perform an action at assertion failure. + */ +void osAssertFailed(void) +{ + os_dump_stack(); +} + +/*! + * @brief Get the GPU Chip Info - Speedo and IDDQ values + * + * + * @param[in] pGpu GPU object pointer + * @param[out] pGpuSpeedoHv Pointer to GPU Speedo value at high voltage corner. + * @param[out] pGpuSpeedoLv Pointer to GPU Speedo value at low voltage corner. + * @param[out] pGpuIddq Pointer to GPU Iddq Value + * @param[out] pChipSkuId SKU ID for the chip + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osGetChipInfo +( + OBJGPU *pGpu, + NvU32 *pGpuSpeedoHv, + NvU32 *pGpuSpeedoLv, + NvU32 *pGpuIddq, + NvU32 *pChipSkuId +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/* + * @brief Get the GPU Rail Voltage Info (i.e. Min, Max and StepSize) in Tegra SoC. + * + * @param[in] pGpu GPU object pointer + * @param[out] pMinVoltageuV Minimum Voltage supported on the Rail in Micro Volts + * @param[out] pMaxVoltageuV Maximum Voltage supported on the Rail in Micro Volts + * @param[out] pStepVoltageuV Voltage Step-size supported on the Rail in Micro Volts + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osGetGpuRailVoltageInfo +( + OBJGPU *pGpu, + NvU32 *pMinVoltageuV, + NvU32 *pMaxVoltageuV, + NvU32 *pStepVoltageuV +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Get the current opaque security token. + * + * For Linux the security token is the effective UID of a process and process ID + * + * Note: This function allocates memory for the token. The onus is on the calling + * function to free the memory associated with the token once its done with it. + * + * @return pointer to the security token. + */ +PSECURITY_TOKEN +osGetSecurityToken() +{ + NV_STATUS rmStatus; + TOKEN_USER *pTokenUser; + + pTokenUser = portMemAllocNonPaged(sizeof(TOKEN_USER)); + if (pTokenUser == NULL) + { + return NULL; + } + rmStatus = os_get_euid(&pTokenUser->euid); + if (rmStatus != NV_OK) + { + portMemFree(pTokenUser); + return NULL; + } + + pTokenUser->pid = os_get_current_process(); + + return (PSECURITY_TOKEN)pTokenUser; +} + +PUID_TOKEN +osGetCurrentUidToken(void) +{ + NV_STATUS rmStatus; + NvU32 *pUidToken; + + pUidToken = portMemAllocNonPaged(sizeof(NvU32)); + if (pUidToken == NULL) + { + return NULL; + } + + rmStatus = os_get_euid(pUidToken); + if (rmStatus != NV_OK) + { + portMemFree(pUidToken); + return NULL; + } + + return (PUID_TOKEN)pUidToken; +} + +/*! + * @brief Interface function to validate the token for the current client + * + * This function takes two tokens as parameters, validates them and checks + * if either the PID or EUID from client database matches the current PID or EUID. + * + * @param[in] pClientSecurityToken security token cached in the client db + * @param[in] pCurrentSecurityToken security token of the current client + * @return NV_OK if the validation is successful + * NV_ERR_INVALID_CLIENT if the tokens do not match + * NV_ERR_INVALID_POINTER if the tokens are invalid + */ +NV_STATUS +osValidateClientTokens +( + PSECURITY_TOKEN pClientSecurityToken, + PSECURITY_TOKEN pCurrentSecurityToken +) +{ + PTOKEN_USER pClientTokenUser = (PTOKEN_USER)pClientSecurityToken; + PTOKEN_USER pCurrentTokenUser = (PTOKEN_USER)pCurrentSecurityToken; + + NV_ASSERT_OR_RETURN((pClientTokenUser != NULL), NV_ERR_INVALID_POINTER); + NV_ASSERT_OR_RETURN((pCurrentTokenUser != NULL), NV_ERR_INVALID_POINTER); + + if ((pClientTokenUser->euid != pCurrentTokenUser->euid) && + (pClientTokenUser->pid != pCurrentTokenUser->pid)) + { + NV_PRINTF(LEVEL_INFO, + "NVRM: %s: Current security token doesn't match the one in the client database. " + "Current EUID: %d, PID: %d; Client DB EUID: %d, PID: %d\n", + __FUNCTION__, pCurrentTokenUser->euid, pCurrentTokenUser->pid, + pClientTokenUser->euid, pClientTokenUser->pid); + return NV_ERR_INVALID_CLIENT; + } + + return NV_OK; +} + +/*! + * @brief Interface function to compare the tokens for two client + * + * This function takes two tokens as parameters, validates them and checks + * if the EUIDs of each token match. + * + * @param[in] pToken1 Token to compare + * @param[in] pToken2 Token to compare + * @return NV_TRUE if the tokens match + * NV_FALSE if the tokens do not match + */ +NvBool +osUidTokensEqual +( + PUID_TOKEN pUidToken1, + PUID_TOKEN pUidToken2 +) +{ + NvU32 * pTokenUser1 = (NvU32*)pUidToken1; + NvU32 * pTokenUser2 = (NvU32*)pUidToken2; + + NV_ASSERT_OR_RETURN((pTokenUser1 != NULL), NV_FALSE); + NV_ASSERT_OR_RETURN((pTokenUser2 != NULL), NV_FALSE); + + if (*pTokenUser1 != *pTokenUser2) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +NvBool +osRemoveGpuSupported +( + void +) +{ + return os_pci_remove_supported(); +} + +/* + * @brief Get the address ranges assigned to local or peer GPUs on a system that + * supports hardware address translation services (ATS) over NVLink/C2C. + * + * @note + * - All address values are in the System Physical Address (SPA) space + * - Targets can either be "Local" (bIsPeer=False) or for a specified "Peer" + * (bIsPeer=True, peerIndex=#) GPU + * - Target address and mask values have a specified bit width, and represent + * the higher order bits above the target address granularity + * + * @param[in] pGpu GPU object pointer + * @param[out] pAddrSysPhys Pointer to hold SPA + * @param[out] pAddrWidth Address range width value pointer + * @param[out] pMask Mask value pointer + * @param[out] pMaskWidth Mask width value pointer + * @param[in] bIsPeer NV_TRUE if this is a peer, local GPU otherwise + * @param[in] peerIndex Peer index + * + * @return NV_OK or NV_ERR_NOT_SUPPORTED + * + * A return value of NV_ERR_NOT_SUPPORTED for the local GPU would + * indicate that the system does not support ATS over NVLink/C2C + */ +NV_STATUS +osGetAtsTargetAddressRange +( + OBJGPU *pGpu, + NvU64 *pAddrSysPhys, + NvU32 *pAddrWidth, + NvU32 *pMask, + NvU32 *pMaskWidth, + NvBool bIsPeer, + NvU32 peerIndex +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/* + * @brief Get the physical address in CPU address map and NUMA node id + * of the GPU memory. + * + * @note + * - The physical address is System Physical Address (SPA) in baremetal/host + * and Intermediate Physical Address(IPA) or Guest Physical Address(GPA) + * inside a VM. + * + * @param[in] pGpu GPU object pointer + * @param[out] pAddrPhys Pointer to hold the physical address of FB in + * CPU address map + * @param[out] pNodeId NUMA nodeID of respective GPU memory + * + * @return NV_OK or NV_ERR_NOT_SUPPORTED + * + */ +NV_STATUS +osGetFbNumaInfo +( + OBJGPU *pGpu, + NvU64 *pAddrPhys, + NvS32 *pNodeId +) +{ + return NV_ERR_NOT_SUPPORTED; +} + + +/* + * @brief Verif only function to get the chiplib overrides for link connection + * state for all C2C links. + * + * If chiplib overrides exist, each link can either be enabled (1) or disabled (0) + * + * @param[in] pGpu GPU object pointer + * @param[in] maxLinks Size of pLinkConnection array + * @param[out] pLinkConnection array of pLinkConnection values to be populated by MODS + * + * @return NV_OK or NV_ERR_NOT_SUPPORTED (no overrides available) + */ +NV_STATUS +osGetForcedC2CConnection +( + OBJGPU *pGpu, + NvU32 maxLinks, + NvU32 *pLinkConnection +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS +osGetSmbiosTableInfo +( + const NvU8 *pMappedAddr, + NvU64 *pBaseAddr, + NvU64 *pLength, + NvU64 *pNumSubTypes, + NvU32 *pVersion +) +{ + *pBaseAddr = 0; + *pLength = 0; + *pNumSubTypes = 0; + *pVersion = 0; + + if (portMemCmp(pMappedAddr, "_SM3_", 5) == 0) + { + *pVersion = (pMappedAddr[7] << 8) | pMappedAddr[8]; + portMemCopy(pLength, 4, pMappedAddr + 12, 4); + portMemCopy(pBaseAddr, 8, pMappedAddr + 16, 8); + + *pNumSubTypes = *pLength / 4; + + return NV_OK; + } + + if (portMemCmp(pMappedAddr, "_SM_", 4) == 0) + { + *pVersion = (pMappedAddr[6] << 8) | pMappedAddr[7]; + + pMappedAddr += 16; + + if (portMemCmp(pMappedAddr, "_DMI_", 5) == 0) + { + portMemCopy(pLength, 2, pMappedAddr + 6, 2); + portMemCopy(pBaseAddr, 4, pMappedAddr + 8, 4); + portMemCopy(pNumSubTypes, 2, pMappedAddr + 12, 2); + + if (!*pVersion) + *pVersion = (pMappedAddr[14] & 0xF0) << 4 | + (pMappedAddr[14] & 0x0F); + + return NV_OK; + } + } + + return NV_ERR_INVALID_ADDRESS; +} + + +/* + * @brief Function to export SMBIOS table. Also, maps table in kernel-space. + * + * @param[out] ppBaseVAddr Base virtual address of SMBIOS table. + * @param[out] pLength Size of SMBIOS table. + * @param[out] pNumSubTypes Count of structures (types) embedded in + * the SMBIOS table. + * @param[out] pVersion SMBIOS version + * + * @return NV_OK, NV_ERR_INSUFFICIENT_RESOURCES or NV_ERR_INVALID_ADDRESS + * or errors from OS layer + */ +NV_STATUS +osGetSmbiosTable +( + void **ppBaseVAddr, + NvU64 *pLength, + NvU64 *pNumSubTypes, + NvU32 *pVersion +) +{ + NV_STATUS status = NV_OK; + NvU64 physSmbiosAddr = ~0ull; + void *pMappedAddr = NULL; + NvU64 basePAddr = 0; + + if (!NVCPU_IS_X86_64) + { + return NV_ERR_NOT_SUPPORTED; + } + + status = os_get_smbios_header(&physSmbiosAddr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "%s: Failed query SMBIOS table with error: %x \n", + __FUNCTION__, status); + return status; + } + + NV_ASSERT(physSmbiosAddr != ~0ull); + + pMappedAddr = osMapKernelSpace(physSmbiosAddr, + os_page_size, + NV_MEMORY_CACHED, + NV_PROTECT_READ_WRITE); + if (!pMappedAddr) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + status = osGetSmbiosTableInfo(pMappedAddr, + &basePAddr, + pLength, + pNumSubTypes, + pVersion); + + osUnmapKernelSpace(pMappedAddr, os_page_size); + + if (status != NV_OK) + { + return status; + } + + *ppBaseVAddr = osMapKernelSpace(basePAddr, + *pLength, + NV_MEMORY_CACHED, + NV_PROTECT_READ_WRITE); + if (!*ppBaseVAddr) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + return NV_OK; +} + +/* + * @brief Function to free SMBIOS table mappings + * + * @param[in] pBaseVAddr Base virtual address of SMBIOS table. + * @param[in] length Size of SMBIOS table. + * + */ +void +osPutSmbiosTable +( + void *pBaseVAddr, + NvU64 length +) +{ + osUnmapKernelSpace(pBaseVAddr, length); +} + +NV_STATUS +osGetAcpiRsdpFromUefi +( + NvU32 *pRsdpAddr +) +{ + return os_get_acpi_rsdp_from_uefi(pRsdpAddr); +} + +/* + * @brief Returns NV_TRUE if NvSwitch device is present in the system. + */ +NvBool +osIsNvswitchPresent +( + void +) +{ + return os_is_nvswitch_present(); +} + +/* + * @brief Function to add crashlog buffer entry. + * + * @param[in] pBuffer virt_addr of nvlog buffer + * @param[in] length size of nvlog buffer + */ +void +osAddRecordForCrashLog +( + void *pBuffer, + NvU32 length +) +{ + os_add_record_for_crashLog(pBuffer, length); +} + +/* + * @brief Function to delete crashlog buffer entry. + * + * @param[in] pBuffer virt_addr of nvlog buffer + */ +void +osDeleteRecordForCrashLog +( + void *pBuffer +) +{ + os_delete_record_for_crashLog(pBuffer); +} + +/* + * @brief Queries the sysfs interface to get memblock size + * @param[out] memblock_size Pointer to the memblock_size + */ +NV_STATUS +osNumaMemblockSize +( + NvU64 *memblock_size +) +{ + return os_numa_memblock_size(memblock_size); +} + +NvBool +osNumaOnliningEnabled +( + OS_GPU_INFO *pOsGpuInfo +) +{ + NvS32 numaNodeId = NV0000_CTRL_NO_NUMA_NODE; + + // + // Note that this numaNodeId value fetched from Linux layer might not be + // accurate since it is possible to overwrite it with regkey on some configs + // + if (nv_get_device_memory_config(pOsGpuInfo, NULL, NULL, NULL, + &numaNodeId) != NV_OK) + { + return NV_FALSE; + } + + return (numaNodeId != NV0000_CTRL_NO_NUMA_NODE); +} + +/* + * @brief Function to call NUMA allocation entry. + * + * @param[in] nid NUMA node id + * @param[in] size Allocation size + * @param[in] flag Allocation flags + * @param[out] pAddress Ptr to the allocated physical address + */ +NV_STATUS +osAllocPagesNode +( + NvS32 nid, + NvLength size, + NvU32 flag, + NvU64 *pAddress +) +{ + NV_STATUS status = NV_OK; + NvU32 localFlag = NV_ALLOC_PAGES_NODE_NONE; + + if (pAddress == NULL || nid < 0 || size > NV_U32_MAX) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Translate the flags + if (flag & OS_ALLOC_PAGES_NODE_SKIP_RECLAIM) + { + localFlag |= NV_ALLOC_PAGES_NODE_SKIP_RECLAIM; + } + + status = os_alloc_pages_node(nid, (NvU32)size, localFlag, pAddress); + return status; +} + +NV_STATUS +osAllocAcquirePage +( + NvU64 pAddress +) +{ + os_get_page(pAddress); + return NV_OK; +} + +NV_STATUS +osAllocReleasePage +( + NvU64 pAddress +) +{ + os_put_page(pAddress); + return NV_OK; +} + +/* + * @brief Function to return refcount on a page + * @param[in] address The physical address of the page + */ +NvU32 +osGetPageRefcount +( + NvU64 pAddress +) +{ + return os_get_page_refcount(pAddress); +} + +/* + * @brief Function to return the number of tail pages if the address is + * referring to a compound page; For non-compound pages, 1 is returned. + * @param[in] address The physical address of the page + */ +NvU32 +osCountTailPages +( + NvU64 pAddress +) +{ + return os_count_tail_pages(pAddress); +} + +/* + * @brief Upon success, gets NPU register address range. + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[out] pBase base (physical) of NPU register address range + * @param[out] pSize size of NPU register address range + */ +NV_STATUS +osGetIbmnpuGenregInfo +( + OS_GPU_INFO *pOsGpuInfo, + NvU64 *pBase, + NvU64 *pSize +) +{ + return nv_get_ibmnpu_genreg_info(pOsGpuInfo, pBase, pSize, NULL); +} + +/* + * @brief Upon success, gets NPU's relaxed ordering mode. + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[out] pMode relaxed ordering mode + */ +NV_STATUS +osGetIbmnpuRelaxedOrderingMode +( + OS_GPU_INFO *pOsGpuInfo, + NvBool *pMode +) +{ + return nv_get_ibmnpu_relaxed_ordering_mode(pOsGpuInfo, pMode); +} + +/* + * @brief Waits for NVLink HW flush on an NPU associated with a GPU. + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + */ +void +osWaitForIbmnpuRsync +( + OS_GPU_INFO *pOsGpuInfo +) +{ + nv_wait_for_ibmnpu_rsync(pOsGpuInfo); +} + +NvU32 +osGetPageSize() +{ + return os_page_size; +} + + + +/* + * @brief Opens a new temporary file for reading and writing + * + * @param[in] ppFile void double pointer + * + * @returns NV_STATUS, NV_OK if success, + NV_ERR_GENERIC, if error + NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osOpenTemporaryFile +( + void **ppFile +) +{ + return os_open_temporary_file(ppFile); +} + +/* + * @brief Closes the specified temporary file + * + * @param[in] pFile Pointer to file + * + * @returns void + */ +void +osCloseFile +( + void *pFile +) +{ + os_close_file(pFile); +} + +/* + * @brief Writes the buffer to the specified file at the given offset + * + * @param[in] pFile Pointer to file (void) + * @param[in] pBuffer Pointer to buffer from which to copy + * @param[in] size Size of the copy + * @parma[in] offset offset in the file + * + * @returns NV_STATUS, NV_OK if success, + NV_ERR_GENERIC, if error + NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osWriteToFile +( + void *pFile, + NvU8 *pBuffer, + NvU64 size, + NvU64 offset +) +{ + return os_write_file(pFile, pBuffer, size, offset); +} + +/* + * @brief Reads from the specified file at the given offset + * + * @param[in] pFile Pointer to file (void *) + * @param[in] pBuffer Pointer to buffer to which the data is copied + * @param[in] size Size of the copy + * @parma[in] offset offset in the file + * + * @returns NV_STATUS, NV_OK if success, + NV_ERR_GENERIC, if error + NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osReadFromFile +( + void *pFile, + NvU8 *pBuffer, + NvU64 size, + NvU64 offset +) +{ + return os_read_file(pFile, pBuffer, size, offset); +} + +/* + * @brief Unregisters caps from the capability framework. + * The function assumes that the caps are allocated and stored in the + * hierarchical order. If they aren't, OS (Linux kernel) would warn and + * leak the caps. + * + * @param[in] pOsRmCaps caps of interest + */ +void +osRmCapUnregister +( + OS_RM_CAPS **ppOsRmCaps +) +{ + OS_RM_CAPS *pOsRmCaps = *ppOsRmCaps; + NvS32 i; + + if (pOsRmCaps == NULL) + { + return; + } + + for (i = pOsRmCaps->count - 1; i >= 0; i--) + { + if (pOsRmCaps->caps[i] != NULL) + { + os_nv_cap_destroy_entry(pOsRmCaps->caps[i]); + } + } + + os_free_mem(pOsRmCaps->caps); + os_free_mem(pOsRmCaps); + + *ppOsRmCaps = NULL; +} + +static NV_STATUS +_allocOsRmCaps +( + OS_RM_CAPS **ppOsRmCaps, + NvU32 count +) +{ + NV_STATUS status; + OS_RM_CAPS *pOsRmCaps; + + *ppOsRmCaps = NULL; + + status = os_alloc_mem((void**)&pOsRmCaps, sizeof(OS_RM_CAPS)); + if (status != NV_OK) + return status; + + pOsRmCaps->count = count; + + status = os_alloc_mem((void**)&pOsRmCaps->caps, sizeof(pOsRmCaps->caps[0]) * count); + if (status != NV_OK) + { + os_free_mem(pOsRmCaps); + return status; + } + + os_mem_set(pOsRmCaps->caps, 0, sizeof(pOsRmCaps->caps[0]) * count); + + *ppOsRmCaps = pOsRmCaps; + return NV_OK; +} + +#define OS_RM_CAP_GPU_DIR 0 +#define OS_RM_CAP_GPU_MIG_DIR 1 +#define OS_RM_CAP_GPU_COUNT 2 + +/* + * @brief Registers OBJGPU with the capability framework. + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[out] ppOsRmCaps GPU OS specific capabilities pointer + */ +NV_STATUS +osRmCapRegisterGpu +( + OS_GPU_INFO *pOsGpuInfo, + OS_RM_CAPS **ppOsRmCaps +) +{ + NvU32 minor = nv_get_dev_minor(pOsGpuInfo); + char name[16]; + NV_STATUS status; + OS_RM_CAPS *pOsRmCaps; + nv_cap_t *parent; + nv_cap_t *cap; + + // Return success on the unsupported platforms. + if (nvidia_caps_root == NULL) + { + return NV_OK; + } + + if (*ppOsRmCaps != NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_GPU_COUNT); + if (status != NV_OK) + return status; + + *ppOsRmCaps = pOsRmCaps; + + os_snprintf(name, sizeof(name), "gpu%u", minor); + name[sizeof(name) - 1] = '\0'; + parent = nvidia_caps_root; + + cap = os_nv_cap_create_dir_entry(parent, name, (OS_RUGO | OS_XUGO)); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup gpu%u directory\n", minor); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_GPU_DIR] = cap; + parent = cap; + + // TODO: Bug 2679591: Add MIG directory only if SMC is enabled. + // For now, always add "mig" directory. + cap = os_nv_cap_create_dir_entry(parent, "mig", (OS_RUGO | OS_XUGO)); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup mig directory\n"); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_GPU_MIG_DIR] = cap; + + return NV_OK; + +failed: + osRmCapUnregister(ppOsRmCaps); + + return status; +} + +#define OS_RM_CAP_SMC_PART_DIR 0 +#define OS_RM_CAP_SMC_PART_ACCESS_FILE 1 +#define OS_RM_CAP_SMC_PART_COUNT 2 + +/* + * @brief Registers SMC partition (a.k.a. GPU instance) with the capability + * framework + * + * @param[in] pGpuOsRmCaps GPU OS specific capabilities pointer + * @param[out] ppPartitionOsRmCaps OS specific capabilities pointer for SMC partition + * @param[in] swizzId SMC partition swizz ID + */ +NV_STATUS +osRmCapRegisterSmcPartition +( + OS_RM_CAPS *pGpuOsRmCaps, + OS_RM_CAPS **ppPartitionOsRmCaps, + NvU32 swizzId +) +{ + char name[16]; + NV_STATUS status; + nv_cap_t *parent; + nv_cap_t *cap; + OS_RM_CAPS *pOsRmCaps; + + // Return success as there is nothing to do. + if (pGpuOsRmCaps == NULL) + { + return NV_OK; + } + + if (*ppPartitionOsRmCaps != NULL || swizzId >= NV_U32_MAX) + { + return NV_ERR_INVALID_ARGUMENT; + } + + parent = pGpuOsRmCaps->caps[OS_RM_CAP_GPU_MIG_DIR]; + if (parent == NULL) + { + return NV_ERR_INVALID_STATE; + } + + status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_SMC_PART_COUNT); + if (status != NV_OK) + return status; + + *ppPartitionOsRmCaps = pOsRmCaps; + + os_snprintf(name, sizeof(name), "gi%u", swizzId); + name[sizeof(name) - 1] = '\0'; + + cap = os_nv_cap_create_dir_entry(parent, name, OS_RUGO | OS_XUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup gi%u directory\n", + swizzId); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_SMC_PART_DIR] = cap; + parent = cap; + + cap = os_nv_cap_create_file_entry(parent, "access", OS_RUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup access file for ID:%u\n", + swizzId); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_SMC_PART_ACCESS_FILE] = cap; + + return NV_OK; + +failed: + osRmCapUnregister(ppPartitionOsRmCaps); + + return status; +} + +#define OS_RM_CAP_SMC_EXEC_PART_DIR 0 +#define OS_RM_CAP_SMC_EXEC_PART_ACCESS_FILE 1 +#define OS_RM_CAP_SMC_EXEC_PART_COUNT 2 + +/* + * @brief Registers SMC execution partition (a.k.a. compute instance) with the + * capability framework + * + * @param[in] pPartitionOsRmCaps OS specific capabilities pointer for SMC partition + * @param[out] ppExecPartitionOsRmCaps OS specific capabilities pointer for SMC execution partition + * @param[in] execPartitionId SMC execution partition ID + */ +NV_STATUS +osRmCapRegisterSmcExecutionPartition +( + OS_RM_CAPS *pPartitionOsRmCaps, + OS_RM_CAPS **ppExecPartitionOsRmCaps, + NvU32 execPartitionId +) +{ + char name[16]; + NV_STATUS status; + nv_cap_t *parent; + nv_cap_t *cap; + OS_RM_CAPS *pOsRmCaps; + + // Return success as there is nothing to do. + if (pPartitionOsRmCaps == NULL) + { + return NV_OK; + } + + if ((*ppExecPartitionOsRmCaps != NULL) || (execPartitionId >= NV_U32_MAX)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + parent = pPartitionOsRmCaps->caps[OS_RM_CAP_SMC_PART_DIR]; + if (parent == NULL) + { + return NV_ERR_INVALID_STATE; + } + + status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_SMC_EXEC_PART_COUNT); + if (status != NV_OK) + { + return status; + } + + *ppExecPartitionOsRmCaps = pOsRmCaps; + + os_snprintf(name, sizeof(name), "ci%u", execPartitionId); + name[sizeof(name) - 1] = '\0'; + + cap = os_nv_cap_create_dir_entry(parent, name, OS_RUGO | OS_XUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup ci%u directory\n", + execPartitionId); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_SMC_EXEC_PART_DIR] = cap; + parent = cap; + + cap = os_nv_cap_create_file_entry(parent, "access", OS_RUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup access file for ID:%u\n", + execPartitionId); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_SMC_EXEC_PART_ACCESS_FILE] = cap; + + return NV_OK; + +failed: + osRmCapUnregister(ppExecPartitionOsRmCaps); + + return status; +} + +/* + * @brief Release the acquired capability + * + * @param[in] dupedCapDescriptor descriptor to be released + */ +void +osRmCapRelease +( + NvU64 dupedCapDescriptor +) +{ + if (dupedCapDescriptor == NV_U64_MAX) + { + return; + } + + os_nv_cap_close_fd((int)dupedCapDescriptor); +} + +#define OS_RM_CAP_SYS_MIG_DIR 0 +#define OS_RM_CAP_SYS_SMC_CONFIG_FILE 1 +#define OS_RM_CAP_SYS_SMC_MONITOR_FILE 2 +#define OS_RM_CAP_SYS_COUNT 3 + +NV_STATUS +osRmCapRegisterSys +( + OS_RM_CAPS **ppOsRmCaps +) +{ + nv_cap_t **ppCaps; + nv_cap_t *parent; + nv_cap_t *cap; + NV_STATUS status; + OS_RM_CAPS *pOsRmCaps; + + if (nvidia_caps_root == NULL) + return NV_ERR_NOT_SUPPORTED; + + status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_SYS_COUNT); + if (status != NV_OK) + return status; + + *ppOsRmCaps = pOsRmCaps; + + ppCaps = pOsRmCaps->caps; + + parent = os_nv_cap_create_dir_entry(nvidia_caps_root, "mig", OS_RUGO | OS_XUGO); + if (parent == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create mig directory\n"); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + ppCaps[OS_RM_CAP_SYS_MIG_DIR] = parent; + + cap = os_nv_cap_create_file_entry(parent, "config", OS_RUSR); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create mig config file\n"); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + ppCaps[OS_RM_CAP_SYS_SMC_CONFIG_FILE] = cap; + + cap = os_nv_cap_create_file_entry(parent, "monitor", OS_RUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create mig monitor file\n"); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + ppCaps[OS_RM_CAP_SYS_SMC_MONITOR_FILE] = cap; + + return NV_OK; + +failed: + osRmCapUnregister(ppOsRmCaps); + return status; +} + +/* + * @brief Acquire the requested capability + * + * @param[in] pOsRmCaps opaque pointer to the caps. + * @param[in] rmCap the capability to be acquired. + * @param[in] capDescriptor descriptor to be used for validation + * @param[out] dupedCapDescriptor returns duplicated descriptor if validation + * is successful + * + * Note: On Linux, duplicating fd is helpful to let administrators know about + * the capability users. See https://linux.die.net/man/8/lsof usage. + */ +NV_STATUS +osRmCapAcquire +( + OS_RM_CAPS *pOsRmCaps, + NvU32 rmCap, + NvU64 capDescriptor, + NvU64 *dupedCapDescriptor +) +{ + nv_cap_t *cap; + int fd = (int)capDescriptor; + int duped_fd; + NvU32 index; + NV_STATUS status; + + *dupedCapDescriptor = NV_U64_MAX; + + switch (rmCap) + { + case NV_RM_CAP_SMC_PARTITION_ACCESS: + { + index = OS_RM_CAP_SMC_PART_ACCESS_FILE; + break; + } + case NV_RM_CAP_EXT_FABRIC_MGMT: + { + status = nv_acquire_fabric_mgmt_cap(fd, &duped_fd); + if (status != NV_OK) + { + return status; + } + + goto done; + } + case NV_RM_CAP_SMC_EXEC_PARTITION_ACCESS: + { + index = OS_RM_CAP_SMC_EXEC_PART_ACCESS_FILE; + break; + } + case NV_RM_CAP_SYS_SMC_CONFIG: + { + index = OS_RM_CAP_SYS_SMC_CONFIG_FILE; + break; + } + case NV_RM_CAP_SYS_SMC_MONITOR: + { + index = OS_RM_CAP_SYS_SMC_MONITOR_FILE; + break; + } + default: + { + return NV_ERR_INVALID_ARGUMENT; + } + } + + if (pOsRmCaps == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (index >= pOsRmCaps->count) + { + return NV_ERR_INVALID_ARGUMENT; + } + + cap = pOsRmCaps->caps[index]; + + duped_fd = os_nv_cap_validate_and_dup_fd(cap, fd); + if (duped_fd < 0) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + +done: + *dupedCapDescriptor = duped_fd; + + return NV_OK; +} + +/* + * @brief Initializes capability descriptor + * + * @param[out] pCapDescriptor descriptor to be used + * + */ +void +osRmCapInitDescriptor +( + NvU64 *pCapDescriptor +) +{ + *pCapDescriptor = NV_U64_MAX; +} + +/* + * @brief Generates random bytes which can be used as a universally unique + * identifier. + * + * @param[out] pBytes Array of random bytes + * @param[in] numBytes Size of the array + */ +NV_STATUS +osGetRandomBytes +( + NvU8 *pBytes, + NvU16 numBytes +) +{ + os_get_random_bytes(pBytes, numBytes); + + return NV_OK; +} + +/* + * @brief Allocate wait queue + * + * @param[out] ppWq Wait queue + */ +NV_STATUS +osAllocWaitQueue +( + OS_WAIT_QUEUE **ppWq +) +{ + return os_alloc_wait_queue(ppWq); +} + +/* + * @brief Free wait queue + * + * @param[in] pWq Wait queue + */ +void +osFreeWaitQueue +( + OS_WAIT_QUEUE *pWq +) +{ + os_free_wait_queue(pWq); +} + +/* + * @brief Put thread to uninterruptible sleep + * + * @param[in] pWq Wait queue + */ +void +osWaitUninterruptible +( + OS_WAIT_QUEUE *pWq +) +{ + os_wait_uninterruptible(pWq); +} + +/* + * @brief Put thread to interruptible sleep + * + * @param[in] pWq Wait queue + */ +void +osWaitInterruptible +( + OS_WAIT_QUEUE *pWq +) +{ + os_wait_interruptible(pWq); +} + +/* + * @brief Wake up thread from uninterruptible sleep + * + * @param[in] pWq Wait queue + */ +void +osWakeUp +( + OS_WAIT_QUEUE *pWq +) +{ + os_wake_up(pWq); +} + +NV_STATUS +osReadPFPciConfigInVF +( + NvU32 addr, + NvU32 *data +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Callback function to notify RM when unix layer receives an event + * + * This function is basically a wrapper to call the Core RM layer and is + * being called from DCE KMD when an event is received from DCE RM. + * + * @param[in] handle handle allocated for corresponding IPC type with DCE + * @param[in] interfaceType RM IPC interface type + * @param[in] length length of the message passed from DCE + * @param[in] data any specific data if present + * @param[in] usrCtx any specific user context if present + * + * @returns void + */ +static void +NV_API_CALL +osTegraDceClientIpcCallback +( + NvU32 handle, + NvU32 interfaceType, + NvU32 length, + void *data, + void *usrCtx +) +{ + THREAD_STATE_NODE threadState; + NvU32 rmInterfaceType = nv_tegra_get_rm_interface_type(interfaceType); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (rmLocksAcquireAll(RM_LOCK_MODULES_KERNEL_RM_EVENTS) == NV_OK) + { + dceclientHandleAsyncRpcCallback(handle, rmInterfaceType, length, data, usrCtx); + + rmLocksReleaseAll(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); +} + +/*! + * @brief Performs IPC Client registration with DCE + * + * This function is basically a wrapper to call the unix/linux layer. + * + * @param[in] interfaceType RM IPC interface type + * @param[in] usrCtx any specific user context if present + * @param[out] clientId unique ID registered with DCE for IPC + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * other errors as may be returned by subfunctions. + */ +NV_STATUS +osTegraDceRegisterIpcClient +( + NvU32 interfaceType, + void *usrCtx, + NvU32 *clientId +) +{ + if (interfaceType == DCE_CLIENT_RM_IPC_TYPE_SYNC) + return nv_tegra_dce_register_ipc_client(interfaceType, usrCtx, NULL, clientId); + else if (interfaceType == DCE_CLIENT_RM_IPC_TYPE_EVENT) + return nv_tegra_dce_register_ipc_client(interfaceType, usrCtx, osTegraDceClientIpcCallback, clientId); + else + return NV_ERR_INVALID_ARGUMENT; +} + +/*! + * @brief Performs IPC Client destroy with DCE + * + * This function is basically a wrapper to call the unix/linux layer. + * + * @param[in] clientId unique ID registered with DCE for IPC + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * other errors as may be returned by subfunctions. + */ +NV_STATUS +osTegraDceUnregisterIpcClient +( + NvU32 clientId +) +{ + return nv_tegra_dce_unregister_ipc_client(clientId); +} + +/*! + * @brief Performs IPC Send/Receive to/from DCE + * + * This function is basically a wrapper to call the unix/linux layer. + * + * @param[in] clientId unique ID registered with DCE KMD for corresponding IPC type + * @param[in] msg structure to hold dce ipc message info + * @param[in] msgLength length of the message + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * other errors as may be returned by subfunctions. + */ +NV_STATUS +osTegraDceClientIpcSendRecv +( + NvU32 clientId, + void *msg, + NvU32 msgLength +) +{ + return nv_tegra_dce_client_ipc_send_recv(clientId, msg, msgLength); +} + +/*! + * @brief Returns IMP-relevant data collected from other modules + * + * This function is basically a wrapper to call the unix/linux layer. + * + * @param[out] pTegraImpImportData Structure to receive the data + * + * @returns NV_OK if successful, + * NV_ERR_BUFFER_TOO_SMALL if the array in TEGRA_IMP_IMPORT_DATA is + * too small, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * other errors as may be returned by subfunctions. + */ +NV_STATUS +osTegraSocGetImpImportData +( + TEGRA_IMP_IMPORT_DATA *pTegraImpImportData +) +{ + return nv_imp_get_import_data(pTegraImpImportData); +} + +/*! + * @brief Tells BPMP whether or not RFL is valid + * + * Display HW generates an ok_to_switch signal which asserts when mempool + * occupancy is high enough to be able to turn off memory long enough to + * execute a dramclk frequency switch without underflowing display output. + * ok_to_switch drives the RFL ("request for latency") signal in the memory + * unit, and the switch sequencer waits for this signal to go active before + * starting a dramclk switch. However, if the signal is not valid (e.g., if + * display HW or SW has not been initialized yet), the switch sequencer ignores + * the signal. This API tells BPMP whether or not the signal is valid. + * + * @param[in] pOsGpuInfo Per GPU Linux state + * @param[in] bEnable True if RFL will be valid; false if invalid + * + * @returns NV_OK if successful, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * NV_ERR_GENERIC if some other kind of error occurred. + */ +NV_STATUS +osTegraSocEnableDisableRfl +( + OS_GPU_INFO *pOsGpuInfo, + NvBool bEnable +) +{ + return nv_imp_enable_disable_rfl(pOsGpuInfo, bEnable); +} + +/*! + * @brief Allocates a specified amount of ISO memory bandwidth for display + * + * floorBandwidthKBPS is the minimum required (i.e., floor) dramclk frequency + * multiplied by the width of the pipe over which the display data will travel. + * (It is understood that the bandwidth calculated by multiplying the clock + * frequency by the pipe width will not be realistically achievable, due to + * overhead in the memory subsystem. The infrastructure will not actually use + * the bandwidth value, except to reverse the calculation to get the required + * dramclk frequency.) + * + * This function is basically a wrapper to call the unix/linux layer. + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[in] averageBandwidthKBPS Amount of ISO memory bandwidth requested + * @param[in] floorBandwidhtKBPS Min required dramclk freq * pipe width + * + * @returns NV_OK if successful, + * NV_ERR_INSUFFICIENT_RESOURCES if one of the bandwidth values is too + * high, and bandwidth cannot be allocated, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * NV_ERR_GENERIC if some other kind of error occurred. + */ +NV_STATUS +osTegraAllocateDisplayBandwidth +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 averageBandwidthKBPS, + NvU32 floorBandwidthKBPS +) +{ + return nv_imp_icc_set_bw(pOsGpuInfo, + averageBandwidthKBPS, + floorBandwidthKBPS); +} + +/*! + * @brief Creates or sets up platform specific nano second resolution timer + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[in] pTmrEvent Pointer to timer event information + * @param[in/out] pTimer pointer to hold high resolution timer object + */ +NV_STATUS +osCreateNanoTimer +( + OS_GPU_INFO *pOsGpuInfo, + void *pTmrEvent, + void **pTimer +) +{ + nv_create_nano_timer(pOsGpuInfo, pTmrEvent, (nv_nano_timer_t **)pTimer); + return NV_OK; +} + +/*! + * @brief Starts platform specific nano second resolution timer + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[in] pTimer pointer to high resolution timer object + * @param[in] timens time in nano seconds + */ +NV_STATUS +osStartNanoTimer +( + OS_GPU_INFO *pOsGpuInfo, + void *pTimer, + NvU64 timeNs +) +{ + nv_start_nano_timer(pOsGpuInfo, (nv_nano_timer_t *)pTimer, timeNs); + return NV_OK; +} + +/*! + * @brief Cancels platform specific nano second resolution timer + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[in] pTimer pointer to timer object + */ +NV_STATUS +osCancelNanoTimer +( + OS_GPU_INFO *pOsGpuInfo, + void *pTimer +) +{ + nv_cancel_nano_timer(pOsGpuInfo, (nv_nano_timer_t *)pTimer); + return NV_OK; + +} + +/*! + * @brief Destroys & cancels platform specific nano second resolution timer + * + * + * @param[in] pGpu Device of interest + * @param[in] pTimer pointer to timer object + */ +NV_STATUS +osDestroyNanoTimer +( + OS_GPU_INFO *pOsGpuInfo, + void *pTimer +) +{ + nv_destroy_nano_timer(pOsGpuInfo, (nv_nano_timer_t *)pTimer); + return NV_OK; +} + +/*! + * @brief Get number of dpAux instances. + * It is wrapper function to call unix/linux layer. + * + * @param[in] pGpu GPU object pointer + * @param[out] pNumIntances Number of dpAux instances. + * + * @returns NV_STATUS, NV_OK if success, + * NV_ERR_GENERIC, if error + * NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ + +NV_STATUS +osGetTegraNumDpAuxInstances +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 *pNumIntances +) +{ + return nv_get_num_dpaux_instances(pOsGpuInfo, pNumIntances); +} + +/* + * @brief Return the priv Data of current IRQ. + * It is wrapper function to call unix/linux layer. + * + * @param[in] pGpu Device of interest + * @param[out] pPrivData privData of current IRQ + * + * @returns NV_STATUS, NV_OK if success, + * NV_ERR_GENERIC, if error + * NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osGetCurrentIrqPrivData +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 *pPrivData +) +{ + return nv_get_current_irq_priv_data(pOsGpuInfo, pPrivData); +} + +/*! + * @brief Get the brightness level + * It is wrapper function to call unix/linux layer. + * + * @param[in] pGpu GPU object pointer + * @param[out] brightness Pointer to brightness level + * + * @returns NV_STATUS, NV_OK if success, + * NV_ERR_GENERIC, if error + * NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osGetTegraBrightnessLevel +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 *brightness +) +{ + return nv_get_tegra_brightness_level(pOsGpuInfo, brightness); +} + +/*! + * @brief Set the brightness level + * It is wrapper function to call unix/linux layer. + * + * @param[in] pGpu GPU object pointer + * @param[out] brightness brightness level + * + * @returns NV_STATUS, NV_OK if success, + * NV_ERR_GENERIC, if error + * NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osSetTegraBrightnessLevel +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 brightness +) +{ + return nv_set_tegra_brightness_level(pOsGpuInfo, brightness); +} + +/* @brief Gets syncpoint aperture information + * + * @param[in] OS_GPU_INFO OS specific GPU information pointer + * @param[in] syncpointId + * @param[out] *physAddr + * @param[out] *limit + * @param[out] *offset + */ +NV_STATUS +osGetSyncpointAperture +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 syncpointId, + NvU64 *physAddr, + NvU64 *limit, + NvU32 *offset +) +{ + return nv_get_syncpoint_aperture(syncpointId, physAddr, limit, offset); +} + +/*! + * @brief Check GPU is accessible or not + * + * @param[in] pGpu GPU object pointer + * + * @returns NVBool, Returns TRUE if the GPU is accessible, + * FALSE, if error + */ +NvBool +osIsGpuAccessible +( + OBJGPU *pGpu +) +{ + return nv_is_gpu_accessible(NV_GET_NV_STATE(pGpu)); +} + +/*! + * @brief Check GPU OS info matches + * + * @param[in] pGpu GPU object pointer + * + * @returns NVBool, Returns TRUE if matched. + */ +NvBool +osMatchGpuOsInfo +( + OBJGPU *pGpu, + void *pOsInfo +) +{ + return nv_match_gpu_os_info(NV_GET_NV_STATE(pGpu), pOsInfo); +} + +/*! + * @brief Release GPU OS info. + * + * @param[in] pOsInfo GPU OS info pointer + * + * @returns void + */ +void +osReleaseGpuOsInfo +( + void *pOsInfo +) +{ + nv_put_file_private(pOsInfo); +} + +NvBool +osDmabufIsSupported(void) +{ + return os_dma_buf_enabled; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osapi.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osapi.c new file mode 100644 index 0000000..d53ae0a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osapi.c @@ -0,0 +1,4442 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +#include +#include +#include +#include +#include +#include +#include // Declares RmInitRm(). +#include "gpu/gpu.h" +#include + +#include +#include +#include +#include "kernel/gpu/mem_mgr/mem_mgr.h" + +#include +#include + +#include +#include +#include +#include +#include + +#include "rmapi/exports.h" +#include "rmapi/rmapi_utils.h" +#include "rmapi/rs_utils.h" +#include "rmapi/resource_fwd_decls.h" +#include +#include +#include "nv-reg.h" +#include "core/hal_mgr.h" +#include "gpu/device/device.h" + +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "gpu/gpu_uuid.h" + +#include "ctrl/ctrl0000/ctrl0000system.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "ctrl/ctrl0073/ctrl0073system.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" +#include "ctrl/ctrl2080/ctrl2080bios.h" +#include "ctrl/ctrl2080/ctrl2080fb.h" +#include "ctrl/ctrl2080/ctrl2080perf.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrl402c.h" + +#include "g_nv_name_released.h" // released chip entries from nvChipAutoFlags.h + +#include + +// +// Helper function which can be called before doing any RM control +// This function: +// +// a. Performs threadStateInit(). +// b. Acquires API lock. +// c. Checks if RMAPI client handle is valid (i.e. RM is initialized) and +// returns early if RMAPI client handle is invalid. +// d. Increments the dynamic power refcount. If GPU is in RTD3 suspended +// state, then it will wake-up the GPU. +// e. Returns the RMAPI interface handle. +// +// This function should be called only when caller doesn't have acquired API +// lock. Caller needs to use RmUnixRmApiEpilogue() after RM control, if +// RmUnixRmApiPrologue() is successful. +// +RM_API *RmUnixRmApiPrologue(nv_state_t *pNv, THREAD_STATE_NODE *pThreadNode, NvU32 module) +{ + threadStateInit(pThreadNode, THREAD_STATE_FLAGS_NONE); + + if ((rmApiLockAcquire(API_LOCK_FLAGS_NONE, module)) == NV_OK) + { + if ((pNv->rmapi.hClient != 0) && + (os_ref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE) == NV_OK)) + { + return rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + } + + rmApiLockRelease(); + } + + threadStateFree(pThreadNode, THREAD_STATE_FLAGS_NONE); + + return NULL; +} + +// +// Helper function which can be called after doing RM control, if +// caller has used RmUnixRmApiPrologue() helper function. This function: +// +// a. Decrements the dynamic power refcount. +// b. Release API lock. +// c. Performs threadStateFree(). +// +void RmUnixRmApiEpilogue(nv_state_t *pNv, THREAD_STATE_NODE *pThreadNode) +{ + os_unref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE); + rmApiLockRelease(); + threadStateFree(pThreadNode, THREAD_STATE_FLAGS_NONE); +} + +NvBool RmGpuHasIOSpaceEnabled(nv_state_t * nv) +{ + NvU16 val; + NvBool has_io; + os_pci_read_word(nv->handle, NV_CONFIG_PCI_NV_1, &val); + has_io = FLD_TEST_DRF(_CONFIG, _PCI_NV_1, _IO_SPACE, _ENABLED, val); + return has_io; +} + +// This is a stub function for unix +void osHandleDeferredRecovery( + OBJGPU *pGpu +) +{ + +} + +// This is a stub function for unix +NvBool osIsSwPreInitOnly +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return NV_FALSE; +} + +const NvU8 * RmGetGpuUuidRaw( + nv_state_t *pNv +) +{ + NV_STATUS rmStatus; + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv); + NvU32 gidFlags; + NvBool isApiLockTaken = NV_FALSE; + + if (pNv->nv_uuid_cache.valid) + goto done; + + // + // PBI is not present in simulation and the loop inside + // pciPbiReadUuid takes up considerable amount of time in + // simulation environment during RM load. + // + if (pGpu && IS_SIMULATION(pGpu)) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + } + rmStatus = NV_ERR_NOT_SUPPORTED; + + if (rmStatus == NV_OK) + { + rmStatus = gpumgrSetUuid(pNv->gpu_id, pNv->nv_uuid_cache.uuid); + if (rmStatus != NV_OK) + { + return NULL; + } + + pNv->nv_uuid_cache.valid = NV_TRUE; + goto done; + } + else if (rmStatus == NV_ERR_NOT_SUPPORTED) + { + nv_printf(NV_DBG_INFO, + "NVRM: PBI is not supported for GPU " NV_PCI_DEV_FMT "\n", + NV_PCI_DEV_FMT_ARGS(pNv)); + } + + gidFlags = DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1) + | DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_FORMAT,_BINARY); + + if (!rmApiLockIsOwner()) + { + rmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU); + if (rmStatus != NV_OK) + { + return NULL; + } + + isApiLockTaken = NV_TRUE; + } + + if (pGpu == NULL) + { + if (isApiLockTaken == NV_TRUE) + { + rmApiLockRelease(); + } + + return NULL; + } + + rmStatus = gpuGetGidInfo(pGpu, NULL, NULL, gidFlags); + if (isApiLockTaken == NV_TRUE) + { + rmApiLockRelease(); + } + + if (rmStatus != NV_OK) + return NULL; + + if (!pGpu->gpuUuid.isInitialized) + return NULL; + + // copy the uuid from the OBJGPU uuid cache + os_mem_copy(pNv->nv_uuid_cache.uuid, pGpu->gpuUuid.uuid, GPU_UUID_LEN); + pNv->nv_uuid_cache.valid = NV_TRUE; + +done: + return pNv->nv_uuid_cache.uuid; +} + +static NV_STATUS RmGpuUuidRawToString( + const NvU8 *pGidData, + char *pBuf, + NvU32 bufLen +) +{ + NvU8 *pGidString; + NvU32 GidStrlen; + NV_STATUS rmStatus; + NvU32 gidFlags; + + gidFlags = DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _ASCII) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1); + + rmStatus = transformGidToUserFriendlyString(pGidData, RM_SHA1_GID_SIZE, + &pGidString, &GidStrlen, + gidFlags); + if (rmStatus != NV_OK) + return rmStatus; + + if (bufLen >= GidStrlen) + portMemCopy(pBuf, bufLen, pGidString, GidStrlen); + else + rmStatus = NV_ERR_BUFFER_TOO_SMALL; + + portMemFree((void *)pGidString); + + return rmStatus; +} + +// This function should be called with the API and GPU locks already acquired. +NV_STATUS +RmLogGpuCrash(OBJGPU *pGpu) +{ + NV_STATUS status = NV_OK; + NvBool bGpuIsLost, bGpuIsConnected; + + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Re-evaluate whether or not the GPU is accessible. This could be called + // from a recovery context where the OS has re-enabled MMIO for the device. + // This happens during EEH processing on IBM Power + Linux, and marking + // the device as connected again will allow rcdbAddRmGpuDump() to collect + // more GPU state. + // + bGpuIsLost = pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST); + bGpuIsConnected = pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED); + if (!bGpuIsConnected || bGpuIsLost) + { + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + NvU32 pmcBoot0 = NV_PRIV_REG_RD32(nv->regs->map_u, NV_PMC_BOOT_0); + if (pmcBoot0 == nvp->pmc_boot_0) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED, NV_TRUE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_LOST, NV_FALSE); + } + } + + // Restore the disconnected properties, if they were reset + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED, bGpuIsConnected); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_LOST, bGpuIsLost); + + // Restore persistence mode to the way it was prior to the crash + osModifyGpuSwStatePersistence(pGpu->pOsGpuInfo, + pGpu->getProperty(pGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE)); + + return status; +} + +static void free_os_event_under_lock(nv_event_t *event) +{ + event->active = NV_FALSE; + + // If refcount > 0, event will be freed by osDereferenceObjectCount + // when the last associated RM event is freed. + if (event->refcount == 0) + { + portMemFree(event); + } +} + +static void free_os_events( + nv_file_private_t *nvfp, + NvHandle client +) +{ + nv_state_t *nv = nv_get_ctl_state(); + nv_event_t **pprev; + + portSyncSpinlockAcquire(nv->event_spinlock); + + pprev = &nv->event_list; + while (*pprev != NULL) + { + nv_event_t *cur = *pprev; + // + // XXX We must be called from either rm_client_free_os_events() or + // RmFreeUnusedClients() for this to work. + // + if ((cur->hParent == client) || (cur->nvfp == nvfp)) + { + *pprev = cur->next; + free_os_event_under_lock(cur); + } + else + { + pprev = &cur->next; + } + } + + portSyncSpinlockRelease(nv->event_spinlock); +} + +void rm_client_free_os_events( + NvHandle client +) +{ + free_os_events(NULL, client); +} + +void RmFreeUnusedClients( + nv_state_t *nv, + nv_file_private_t *nvfp +) +{ + NvU32 *pClientList; + NvU32 numClients, i; + NV_STATUS status; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // + // The 'nvfp' pointer uniquely identifies an open instance in kernel space + // and the kernel interface layer guarantees that we are not called before + // the associated nvfp descriptor is closed. We can thus safely free + // abandoned clients with matching 'nvfp' pointers. + // + status = rmapiGetClientHandlesFromOSInfo(nvfp, &pClientList, &numClients); + if (status != NV_OK) + { + numClients = 0; + } + + for (i = 0; i < numClients; ++i) + { + NV_PRINTF(LEVEL_INFO, "freeing abandoned client 0x%x\n", + pClientList[i]); + + } + + if (numClients != 0) + { + pRmApi->FreeClientList(pRmApi, pClientList, numClients); + + portMemFree(pClientList); + } + + // Clean up any remaining events using this nvfp. + free_os_events(nvfp, 0); +} + +static void RmUnbindLock( + nv_state_t *nv +) +{ + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + if ((pGpu == NULL) || (gpuGetUserClientCount(pGpu) == 0)) + { + nv->flags |= NV_FLAG_UNBIND_LOCK; + } +} + +static NV_STATUS allocate_os_event( + NvHandle hParent, + nv_file_private_t *nvfp, + NvU32 fd +) +{ + nv_state_t *nv = nv_get_ctl_state(); + NvU32 status = NV_OK; + nv_event_t *event; + + nv_event_t *new_event = portMemAllocNonPaged(sizeof(nv_event_t)); + if (new_event == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + portSyncSpinlockAcquire(nv->event_spinlock); + for (event = nv->event_list; event; event = event->next) + { + // Only one event may be associated with a given fd. + if (event->hParent == hParent && event->fd == fd) + { + status = NV_ERR_INVALID_ARGUMENT; + portSyncSpinlockRelease(nv->event_spinlock); + goto done; + } + } + + new_event->next = nv->event_list; + nv->event_list = new_event; + portSyncSpinlockRelease(nv->event_spinlock); + +done: + if (status == NV_OK) + { + new_event->hParent = hParent; + new_event->nvfp = nvfp; + new_event->fd = fd; + new_event->active = NV_TRUE; + new_event->refcount = 0; + + NV_PRINTF(LEVEL_INFO, "allocated OS event:\n"); + NV_PRINTF(LEVEL_INFO, " hParent: 0x%x\n", hParent); + NV_PRINTF(LEVEL_INFO, " fd: %d\n", fd); + } + else + { + portMemFree(new_event); + } + + return status; +} + +NV_STATUS RmAllocOsEvent( + NvHandle hParent, + nv_file_private_t *nvfp, + NvU32 fd +) +{ + if (NV_OK != allocate_os_event(hParent, nvfp, fd)) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate OS event\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + return NV_OK; +} + +static NV_STATUS free_os_event( + NvHandle hParent, + NvU32 fd +) +{ + nv_state_t *nv = nv_get_ctl_state(); + nv_event_t *event, *tmp; + NV_STATUS result; + + portSyncSpinlockAcquire(nv->event_spinlock); + tmp = event = nv->event_list; + while (event) + { + if ((event->fd == fd) && (event->hParent == hParent)) + { + if (event == nv->event_list) + nv->event_list = event->next; + else + tmp->next = event->next; + break; + } + tmp = event; + event = event->next; + } + + if (event != NULL) + { + free_os_event_under_lock(event); + result = NV_OK; + } + else + result = NV_ERR_INVALID_EVENT; + portSyncSpinlockRelease(nv->event_spinlock); + + if (result == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "freed OS event:\n"); + NV_PRINTF(LEVEL_INFO, " hParent: 0x%x\n", hParent); + NV_PRINTF(LEVEL_INFO, " fd: %d\n", fd); + } + else + { + NV_PRINTF(LEVEL_ERROR, "failed to find OS event:\n"); + NV_PRINTF(LEVEL_ERROR, " hParent: 0x%x\n", hParent); + NV_PRINTF(LEVEL_ERROR, " fd: %d\n", fd); + } + + return result; +} + +NV_STATUS RmFreeOsEvent( + NvHandle hParent, + NvU32 fd +) +{ + if (NV_OK != free_os_event(hParent, fd)) + { + return NV_ERR_INVALID_EVENT; + } + return NV_OK; +} + +static void RmExecuteWorkItem( + void *pWorkItem +) +{ + nv_work_item_t *pWi = (nv_work_item_t *)pWorkItem; + NvU32 gpuMask; + NvU32 releaseLocks = 0; + + if (!(pWi->flags & NV_WORK_ITEM_FLAGS_REQUIRES_GPU) && + ((pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW) || + (pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW) || + (pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW) || + (pWi->flags & OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY))) + { + // Requesting one of the GPU locks without providing a GPU instance + NV_ASSERT(0); + goto done; + } + + // Get locks requested by workitem + if (NV_OK != workItemLocksAcquire(pWi->gpuInstance, pWi->flags, + &releaseLocks, &gpuMask)) + { + goto done; + } + + // Some work items may not require a valid GPU instance + if (pWi->flags & NV_WORK_ITEM_FLAGS_REQUIRES_GPU) + { + pWi->func.pGpuFunction(pWi->gpuInstance, pWi->pData); + } + else + { + pWi->func.pSystemFunction(pWi->pData); + } + + // Release any locks taken + workItemLocksRelease(releaseLocks, gpuMask); + +done: + if ((pWi->pData != NULL) && + !(pWi->flags & NV_WORK_ITEM_FLAGS_DONT_FREE_DATA)) + { + portMemFree(pWi->pData); + } + + portMemFree((void *)pWi); +} + +static NV_STATUS RmGetEventData( + nv_file_private_t *nvfp, + NvP64 pEvent, + NvU32 *MoreEvents, + NvBool bUserModeArgs +) +{ + NV_STATUS RmStatus; + NvUnixEvent *pKernelEvent = NULL; + nv_event_t nv_event; + RMAPI_PARAM_COPY paramCopy; + + RmStatus = nv_get_event(nvfp, &nv_event, MoreEvents); + if (RmStatus != NV_OK) + return NV_ERR_OPERATING_SYSTEM; + + // setup for access to client's parameters + RMAPI_PARAM_COPY_INIT(paramCopy, pKernelEvent, pEvent, 1, sizeof(NvUnixEvent)); + RmStatus = rmapiParamsAcquire(¶mCopy, bUserModeArgs); + if (RmStatus != NV_OK) + return NV_ERR_OPERATING_SYSTEM; + + pKernelEvent->hObject = nv_event.hObject; + pKernelEvent->NotifyIndex = nv_event.index; + pKernelEvent->info32 = nv_event.info32; + pKernelEvent->info16 = nv_event.info16; + + // release client buffer access, with copyout as needed + if (rmapiParamsRelease(¶mCopy) != NV_OK) + return NV_ERR_OPERATING_SYSTEM; + + return NV_OK; +} + +static NV_STATUS RmAccessRegistry( + NvHandle hClient, + NvHandle hObject, + NvU32 AccessType, + NvP64 clientDevNodeAddress, + NvU32 DevNodeLength, + NvP64 clientParmStrAddress, + NvU32 ParmStrLength, + NvP64 clientBinaryDataAddress, + NvU32 *pBinaryDataLength, + NvU32 *Data, + NvU32 *Entry +) +{ + NvU32 gpuMask = 0, gpuInstance = 0; + OBJGPU *pGpu; + NvBool isDevice = NV_FALSE; + NV_STATUS RmStatus = NV_ERR_OPERATING_SYSTEM; + RsClient *pClient; + Device *pDevice; + Subdevice *pSubdevice; + + RMAPI_PARAM_COPY devNodeParamCopy; + NvU8 *tmpDevNode = NULL; + NvU32 copyOutDevNodeLength = 0; + + RMAPI_PARAM_COPY parmStrParamCopy; + char *tmpParmStr = NULL; + NvU32 copyOutParmStrLength = 0; + + RMAPI_PARAM_COPY binaryDataParamCopy; + NvU8 *tmpBinaryData = NULL; + NvU32 BinaryDataLength = 0; + NvU32 copyOutBinaryDataLength = 0; + + if (NV_OK != serverAcquireClient(&g_resServ, hClient, LOCK_ACCESS_WRITE, &pClient)) + return NV_ERR_INVALID_CLIENT; + + if (hClient == hObject) + { + pGpu = NULL; + } + else + { + RmStatus = deviceGetByHandle(pClient, hObject, &pDevice); + if (RmStatus != NV_OK) + { + RmStatus = subdeviceGetByHandle(pClient, hObject, &pSubdevice); + if (RmStatus != NV_OK) + goto done; + + RmStatus = rmGpuGroupLockAcquire(pSubdevice->subDeviceInst, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, + &gpuMask); + if (RmStatus != NV_OK) + return RmStatus; + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + pGpu = GPU_RES_GET_GPU(pSubdevice); + } + else + { + RmStatus = rmGpuGroupLockAcquire(pDevice->deviceInst, + GPU_LOCK_GRP_DEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, + &gpuMask); + if (RmStatus != NV_OK) + return RmStatus; + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + pGpu = GPU_RES_GET_GPU(pDevice); + isDevice = NV_TRUE; + } + } + + if (pBinaryDataLength) + { + BinaryDataLength = *pBinaryDataLength; + } + + // a passed-in devNode + if (DevNodeLength) + { + // the passed-in DevNodeLength does not account for '\0' + DevNodeLength++; + + if (DevNodeLength > NVOS38_MAX_REGISTRY_STRING_LENGTH) + { + RmStatus = NV_ERR_INVALID_STRING_LENGTH; + goto done; + } + + // get access to client's DevNode + RMAPI_PARAM_COPY_INIT(devNodeParamCopy, tmpDevNode, clientDevNodeAddress, DevNodeLength, 1); + devNodeParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + RmStatus = rmapiParamsAcquire(&devNodeParamCopy, NV_TRUE); + if (RmStatus != NV_OK) + { + RmStatus = NV_ERR_OPERATING_SYSTEM; + goto done; + } + } + + // a passed-in parmStr + if (ParmStrLength) + { + // the passed-in ParmStrLength does not account for '\0' + ParmStrLength++; + + if (ParmStrLength > NVOS38_MAX_REGISTRY_STRING_LENGTH) + { + RmStatus = NV_ERR_INVALID_STRING_LENGTH; + goto done; + } + + // get access to client's parmStr + RMAPI_PARAM_COPY_INIT(parmStrParamCopy, tmpParmStr, clientParmStrAddress, ParmStrLength, 1); + parmStrParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + RmStatus = rmapiParamsAcquire(&parmStrParamCopy, NV_TRUE); + if (RmStatus != NV_OK) + { + RmStatus = NV_ERR_OPERATING_SYSTEM; + goto done; + } + } + + if ((AccessType == NVOS38_ACCESS_TYPE_READ_BINARY) || + (AccessType == NVOS38_ACCESS_TYPE_WRITE_BINARY)) + { + if ((BinaryDataLength > NVOS38_MAX_REGISTRY_BINARY_LENGTH) || + (BinaryDataLength == 0)) + { + RmStatus = NV_ERR_INVALID_STRING_LENGTH; + goto done; + } + + // get access to client's binaryData + RMAPI_PARAM_COPY_INIT(binaryDataParamCopy, tmpBinaryData, clientBinaryDataAddress, BinaryDataLength, 1); + if (AccessType == NVOS38_ACCESS_TYPE_READ_BINARY) + binaryDataParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + RmStatus = rmapiParamsAcquire(&binaryDataParamCopy, NV_TRUE); + if (RmStatus != NV_OK) + { + RmStatus = NV_ERR_OPERATING_SYSTEM; + goto done; + } + } + + switch (AccessType) + { + case NVOS38_ACCESS_TYPE_READ_DWORD: + RmStatus = osReadRegistryDword(pGpu, + tmpParmStr, Data); + break; + + case NVOS38_ACCESS_TYPE_WRITE_DWORD: + if (isDevice && osIsAdministrator()) + { + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + RmStatus = osWriteRegistryDword(pGpu, + tmpParmStr, *Data); + + if (RmStatus != NV_OK) + goto done; + } + break; + } + + RmStatus = osWriteRegistryDword(pGpu, + tmpParmStr, *Data); + break; + + case NVOS38_ACCESS_TYPE_READ_BINARY: + RmStatus = osReadRegistryBinary(pGpu, + tmpParmStr, tmpBinaryData, &BinaryDataLength); + + if (RmStatus != NV_OK) + { + goto done; + } + + if (BinaryDataLength) + copyOutBinaryDataLength = BinaryDataLength; + + break; + + case NVOS38_ACCESS_TYPE_WRITE_BINARY: + if (isDevice && osIsAdministrator()) + { + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + RmStatus = osWriteRegistryBinary(pGpu, + tmpParmStr, tmpBinaryData, + BinaryDataLength); + + if (RmStatus != NV_OK) + goto done; + } + break; + } + + RmStatus = osWriteRegistryBinary(pGpu, + tmpParmStr, tmpBinaryData, + BinaryDataLength); + break; + + default: + RmStatus = NV_ERR_INVALID_ACCESS_TYPE; + } + + done: + if (gpuMask != 0) + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + + if (tmpDevNode != NULL) + { + // skip copyout on error + if ((RmStatus != NV_OK) || (copyOutDevNodeLength == 0)) + devNodeParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + devNodeParamCopy.paramsSize = copyOutDevNodeLength; + if (NV_OK != rmapiParamsRelease(&devNodeParamCopy)) + if (RmStatus == NV_OK) + RmStatus = NV_ERR_OPERATING_SYSTEM; + } + if (tmpParmStr != NULL) + { + // skip copyout on error + if ((RmStatus != NV_OK) || (copyOutParmStrLength == 0)) + parmStrParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + parmStrParamCopy.paramsSize = copyOutParmStrLength; + if (NV_OK != rmapiParamsRelease(&parmStrParamCopy)) + if (RmStatus == NV_OK) + RmStatus = NV_ERR_OPERATING_SYSTEM; + } + if (tmpBinaryData != NULL) + { + // skip copyout on error + if ((RmStatus != NV_OK) || (copyOutBinaryDataLength == 0)) + binaryDataParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + binaryDataParamCopy.paramsSize = copyOutBinaryDataLength; + if (NV_OK != rmapiParamsRelease(&binaryDataParamCopy)) + if (RmStatus == NV_OK) + RmStatus = NV_ERR_OPERATING_SYSTEM; + *pBinaryDataLength = copyOutBinaryDataLength; + } + + serverReleaseClient(&g_resServ, LOCK_ACCESS_WRITE, pClient); + return RmStatus; +} + +static NV_STATUS RmUpdateDeviceMappingInfo( + NvHandle hClient, + NvHandle hDevice, + NvHandle hMappable, + void *pOldCpuAddress, + void *pNewCpuAddress +) +{ + NV_STATUS status; + RsClient *pClient; + RsResourceRef *pMappableRef; + RsCpuMapping *pCpuMapping; + Device *pDevice; + Subdevice *pSubdevice; + NvU32 gpuMask = 0; + + status = serverAcquireClient(&g_resServ, hClient, LOCK_ACCESS_WRITE, &pClient); + if (status != NV_OK) + return status; + + status = deviceGetByHandle(pClient, hDevice, &pDevice); + if (status != NV_OK) + { + status = subdeviceGetByHandle(pClient, hDevice, &pSubdevice); + if (status != NV_OK) + goto done; + + status = rmGpuGroupLockAcquire(pSubdevice->subDeviceInst, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, + &gpuMask); + if (status != NV_OK) + goto done; + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + } + else + { + status = rmGpuGroupLockAcquire(pDevice->deviceInst, + GPU_LOCK_GRP_DEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, + &gpuMask); + if (status != NV_OK) + goto done; + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + } + + status = clientGetResourceRef(pClient, hMappable, &pMappableRef); + if (status != NV_OK) + goto done; + + if ((objDynamicCastById(pMappableRef->pResource, classId(Memory)) == NULL) && + (objDynamicCastById(pMappableRef->pResource, classId(KernelChannel)) == NULL)) + { + status = NV_ERR_INVALID_OBJECT_HANDLE; + goto done; + } + + status = refFindCpuMappingWithFilter(pMappableRef, + NV_PTR_TO_NvP64(pOldCpuAddress), + serverutilMappingFilterCurrentUserProc, + &pCpuMapping); + if (status != NV_OK) + goto done; + + pCpuMapping->pLinearAddress = NV_PTR_TO_NvP64(pNewCpuAddress); + +done: + + if (gpuMask != 0) + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + + serverReleaseClient(&g_resServ, LOCK_ACCESS_WRITE, pClient); + return status; +} + +static NV_STATUS RmPerformVersionCheck( + void *pData, + NvU32 dataSize +) +{ + nv_ioctl_rm_api_version_t *pParams; + char clientCh, rmCh; + const char *rmStr = NV_VERSION_STRING; + NvBool relaxed = NV_FALSE; + NvU32 i; + + if (dataSize != sizeof(nv_ioctl_rm_api_version_t)) + return NV_ERR_INVALID_ARGUMENT; + + pParams = pData; + + // + // write the reply value, so that the client knows we recognized + // the request + // + pParams->reply = NV_RM_API_VERSION_REPLY_RECOGNIZED; + + // + // the client requested to override the version check; just return + // success. + // + if (pParams->cmd == NV_RM_API_VERSION_CMD_OVERRIDE) + { + return NV_OK; + } + + // + // the client requested relaxed version checking; we will only + // compare the strings until the first decimal point. + // + if (pParams->cmd == NV_RM_API_VERSION_CMD_RELAXED) + { + relaxed = NV_TRUE; + } + + // + // rmStr (i.e., NV_VERSION_STRING) must be null-terminated and fit within + // NV_RM_API_VERSION_STRING_LENGTH, so that: + // + // (1) If the versions don't match, we can return rmStr in + // pParams->versionString. + // (2) The below loop is guaranteed to not overrun rmStr. + // + if ((os_string_length(rmStr) + 1) > NV_RM_API_VERSION_STRING_LENGTH) + { + return NV_ERR_BUFFER_TOO_SMALL; + } + + for (i = 0; i < NV_RM_API_VERSION_STRING_LENGTH; i++) + { + clientCh = pParams->versionString[i]; + rmCh = rmStr[i]; + + // + // fail if the current character is not the same + // + if (clientCh != rmCh) + { + break; + } + + // + // if relaxed matching was requested, succeed when we find the + // first decimal point + // + if ((relaxed) && (clientCh == '.')) + { + return NV_OK; + } + + // + // we found the end of the strings: succeed + // + if (clientCh == '\0') + { + return NV_OK; + } + } + + // + // the version strings did not match: print an error message and + // copy the RM's version string into pParams->versionString, so + // that the client can report the mismatch; explicitly NULL + // terminate the client's string, since we cannot trust it + // + pParams->versionString[NV_RM_API_VERSION_STRING_LENGTH - 1] = '\0'; + + nv_printf(NV_DBG_ERRORS, + "NVRM: API mismatch: the client has the version %s, but\n" + "NVRM: this kernel module has the version %s. Please\n" + "NVRM: make sure that this kernel module and all NVIDIA driver\n" + "NVRM: components have the same version.\n", + pParams->versionString, NV_VERSION_STRING); + + os_string_copy(pParams->versionString, rmStr); + + return NV_ERR_GENERIC; +} + +NV_STATUS RmSystemEvent( + nv_state_t *pNv, + NvU32 event_type, + NvU32 event_val +) +{ + NV_STATUS rmStatus = NV_OK; + NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS params; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + switch (event_type) + { + case NV_SYSTEM_ACPI_BATTERY_POWER_EVENT: + { + Nv2080PowerEventNotification powerParams; + portMemSet(&powerParams, 0, sizeof(powerParams)); + powerParams.bSwitchToAC = NV_TRUE; + powerParams.bGPUCapabilityChanged = NV_FALSE; + powerParams.displayMaskAffected = 0; + + params.eventType = NV0000_CTRL_SYSTEM_EVENT_TYPE_POWER_SOURCE; + if (event_val == NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_BATTERY) + { + params.eventData = NV0000_CTRL_SYSTEM_EVENT_DATA_POWER_BATTERY; + powerParams.bSwitchToAC = NV_FALSE; + } + else if (event_val == NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_AC) + { + params.eventData = NV0000_CTRL_SYSTEM_EVENT_DATA_POWER_AC; + powerParams.bSwitchToAC = NV_TRUE; + } + else + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + if (rmStatus == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + rmStatus = pRmApi->Control(pRmApi, + pNv->rmapi.hClient, + pNv->rmapi.hClient, + NV0000_CTRL_CMD_SYSTEM_NOTIFY_EVENT, + (void *)¶ms, + sizeof(NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS)); + + // + // TODO: bug 2812848 Investigate if we can use system event + // or if we can broadcast NV2080_NOTIFIERS_POWER_EVENT for all GPUs + // + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_POWER_EVENT, + &powerParams, sizeof(powerParams), 0, 0); + } + break; + } + default: + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + + return rmStatus; +} + +/*! + * @brief Deal with D-notifier events to apply a performance + * level based on the requested auxiliary power-state. + * Read confluence page "D-Notifiers on Linux" for more details. + * + * @param[in] pGpu OBJGPU pointer. + * @param[in] event_type NvU32 Event type. + */ +static void RmHandleDNotifierEvent( + nv_state_t *pNv, + NvU32 event_type +) +{ +} + +static NV_STATUS +RmDmabufVerifyMemHandle( + OBJGPU *pGpu, + NvHandle hSrcClient, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + void *pGpuInstanceInfo +) +{ + NV_STATUS status; + RsClient *pClient = NULL; + RsResourceRef *pSrcMemoryRef = NULL; + Memory *pSrcMemory = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hSrcClient, &pClient)); + + status = clientGetResourceRef(pClient, hMemory, &pSrcMemoryRef); + if (status != NV_OK) + { + return status; + } + + pSrcMemory = dynamicCast(pSrcMemoryRef->pResource, Memory); + if (pSrcMemory == NULL) + { + return NV_ERR_INVALID_OBJECT; + } + + pMemDesc = pSrcMemory->pMemDesc; + + // Check if hMemory belongs to the same pGpu + if ((pMemDesc->pGpu != pGpu) && + (pSrcMemory->pGpu != pGpu)) + { + return NV_ERR_INVALID_OBJECT_PARENT; + } + + // Offset and size must be aligned to OS page-size + if (!NV_IS_ALIGNED64(offset, os_page_size) || + !NV_IS_ALIGNED64(size, os_page_size)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Only supported for vidmem handles + if (memdescGetAddressSpace(pMemDesc) != ADDR_FBMEM) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if ((size == 0) || + (size > memdescGetSize(pMemDesc)) || + (offset > (memdescGetSize(pMemDesc) - size))) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +static NV_STATUS +RmDmabufGetClientAndDevice( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubdevice, + void **ppGpuInstanceInfo +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + *phClient = pMemoryManager->hClient; + *phDevice = pMemoryManager->hDevice; + *phSubdevice = pMemoryManager->hSubdevice; + *ppGpuInstanceInfo = NULL; + + return NV_OK; +} + +static void +RmDmabufPutClientAndDevice( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle hDevice, + NvHandle hSubdevice, + void *pGpuInstanceInfo +) +{ +} + +/* + * --------------------------------------------------------------------------- + * + * The routines below are part of the interface between the kernel interface + * layer and the kernel-agnostic portions of the resource manager. + * + * --------------------------------------------------------------------------- + */ + +NvBool NV_API_CALL rm_init_private_state( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + NvBool retval; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + retval = RmInitPrivateState(pNv); + + NV_EXIT_RM_RUNTIME(sp,fp); + + return retval; +} + +void NV_API_CALL rm_free_private_state( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + RmFreePrivateState(pNv); + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NvBool NV_API_CALL rm_init_adapter( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + NvBool retval = NV_FALSE; + void *fp; + NvBool bEnabled; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_DEVICE_INIT); + + // LOCK: acquire API lock + if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT) == NV_OK) + { + if (!((gpumgrQueryGpuDrainState(pNv->gpu_id, &bEnabled, NULL) == NV_OK) + && bEnabled)) + { + if (pNv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + retval = RmPartiallyInitAdapter(pNv); + } + else + { + retval = RmInitAdapter(pNv); + } + } + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return retval; +} + +void NV_API_CALL rm_disable_adapter( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_ASSERT_OK(os_flush_work_queue(pNv->queue)); + + // LOCK: acquire API lock + if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK) + { + if (pNv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + RmPartiallyDisableAdapter(pNv); + } + else + { + RmDisableAdapter(pNv); + } + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + NV_ASSERT_OK(os_flush_work_queue(pNv->queue)); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_shutdown_adapter( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK) + { + RmShutdownAdapter(pNv); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS NV_API_CALL rm_exclude_adapter( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + rmStatus = RmExcludeAdapter(pNv); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_acquire_api_lock( + nvidia_stack_t *sp +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_release_api_lock( + nvidia_stack_t *sp +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // UNLOCK: release API lock + rmApiLockRelease(); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_acquire_gpu_lock( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire GPU lock + rmStatus = rmDeviceGpuLocksAcquire(NV_GET_NV_PRIV_PGPU(nv), + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_OSAPI); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_release_gpu_lock( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // UNLOCK: release GPU lock + rmDeviceGpuLocksRelease(NV_GET_NV_PRIV_PGPU(nv), GPUS_LOCK_FLAGS_NONE, NULL); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_acquire_all_gpus_lock( + nvidia_stack_t *sp +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire all GPUs lock + rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_release_all_gpus_lock( + nvidia_stack_t *sp +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // UNLOCK: release all GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return NV_OK; +} + +/*! + * @brief Handle ACPI_NOTIFY_GPS_STATUS_CHANGE event. + * + * This function is called for GPS when SBIOS trigger + * gps STATUS_CHANGE event, which calls rm control call + * NV0000_CTRL_CMD_SYSTEM_GPS_CONTROL to init the GPS + * data from SBIOS. + */ +static void RmHandleGPSStatusChange +( + nv_state_t *pNv +) +{ +} + +/*! + * @brief Function to handle device specific ACPI events. + * + * @param[in] sp nvidia_stack_t pointer. + * @param[in] nv nv_state_t pointer. + * @param[in] event_type NvU32 Event type. + */ +void NV_API_CALL rm_acpi_notify( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 event_type +) +{ + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + + switch (event_type) + { + case ACPI_VIDEO_NOTIFY_PROBE: + { + THREAD_STATE_NODE threadState; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + RmHandleDisplayChange(sp, nv); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + break; + } + + case ACPI_NOTIFY_GPS_STATUS_CHANGE: + RmHandleGPSStatusChange(nv); + break; + + case ACPI_NOTIFY_POWER_LEVEL_D1: /* fallthrough */ + case ACPI_NOTIFY_POWER_LEVEL_D2: /* fallthrough */ + case ACPI_NOTIFY_POWER_LEVEL_D3: /* fallthrough */ + case ACPI_NOTIFY_POWER_LEVEL_D4: /* fallthrough */ + case ACPI_NOTIFY_POWER_LEVEL_D5: + RmHandleDNotifierEvent(nv, event_type); + break; + + default: + NV_PRINTF(LEVEL_INFO, "No support for 0x%x event\n", event_type); + NV_ASSERT(0); + break; + } + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +static void nv_align_mmap_offset_length( + nv_usermap_access_params_t *nvuap) +{ + NvU64 page_size = os_page_size; + NvU64 end = nvuap->size + (nvuap->addr & (page_size - 1)); + + nvuap->mmap_start = NV_ALIGN_DOWN(nvuap->addr, page_size); + nvuap->mmap_size = NV_ALIGN_UP(end, page_size); + nvuap->offset = NV_ALIGN_DOWN(nvuap->offset, page_size); +} + +static inline NV_STATUS RmGetArrayMinMax( + NvU64 *array, + NvU64 count, + NvU64 *min, + NvU64 *max +) +{ + NvU64 i; + + if (array == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + *min = array[0]; + *max = array[0]; + + if (count == 1) + return NV_OK; + + for (i = 1; i < count; i++) + { + if (array[i] > *max) + *max = array[i]; + + if (array[i] < *min) + *min = array[i]; + } + + return NV_OK; +} + +static NV_STATUS RmSetUserMapAccessRange( + nv_usermap_access_params_t *nvuap +) +{ + NV_STATUS status = NV_OK; + + if (nvuap->contig) + { + nvuap->access_start = nvuap->mmap_start; + nvuap->access_size = nvuap->mmap_size; + } + else + { + NvU64 highest_address_mapped; + NvU64 lowest_address_mapped; + + status = RmGetArrayMinMax(nvuap->page_array, nvuap->num_pages, + &lowest_address_mapped, + &highest_address_mapped); + if (status != NV_OK) + { + return status; + } + + nvuap->access_start = lowest_address_mapped; + nvuap->access_size = (highest_address_mapped + os_page_size) - lowest_address_mapped; + } + + return status; +} + +static NV_STATUS RmGetAllocPrivate(NvU32, NvU32, NvU64, NvU64, NvU32 *, void **, + NvU64 *); +static NV_STATUS RmValidateMmapRequest(nv_state_t *, NvU64, NvU64, NvU32 *); + +/* Must be called with the API lock and the GPU locks */ +static NV_STATUS RmCreateMmapContextLocked( + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 address, + NvU64 size, + NvU64 offset, + NvU32 cachingType, + NvU32 fd +) +{ + NV_STATUS status = NV_OK; + void *pAllocPriv = NULL; + OBJGPU *pGpu = NULL; + NvBool bCoherentAtsCpuOffset = NV_FALSE; + nv_state_t *pNv = NULL; + NvU64 addr = (NvU64)address; + NvU32 prot = 0; + NvU64 pageIndex = 0; + nv_usermap_access_params_t *nvuap = NULL; + NvBool bClientMap = (hClient == hDevice); + + if (!bClientMap) + { + if (CliSetGpuContext(hClient, hDevice, &pGpu, NULL) != NV_OK) + { + NvU32 tmp; + if (CliSetSubDeviceContext(hClient, hDevice, &tmp, &pGpu) != NV_OK) + { + // + // If this mapping isn't for a GPU then we don't need to + // create a context for it. + // + return status; + } + } + } + + status = os_alloc_mem((void**)&nvuap, sizeof(nv_usermap_access_params_t)); + if (status != NV_OK) + { + return status; + } + + portMemSet(nvuap, 0, sizeof(nv_usermap_access_params_t)); + nvuap->addr = addr; + nvuap->size = size; + nvuap->offset = offset; + nvuap->caching = cachingType; + + // + // Assume the allocation is contiguous until RmGetMmapPteArray + // determines otherwise. + // + nvuap->contig = NV_TRUE; + nv_align_mmap_offset_length(nvuap); + + if (pGpu != NULL) + { + pNv = NV_GET_NV_STATE(pGpu); + } + + // + // If no device is given, or the address isn't in the given device's BARs, + // validate this as a system memory mapping and associate it with the + // control device. + // + if ((pNv == NULL) || + (!IS_REG_OFFSET(pNv, addr, size) && + !IS_FB_OFFSET(pNv, addr, size) && + !bCoherentAtsCpuOffset && + !IS_IMEM_OFFSET(pNv, addr, size))) + { + pNv = nv_get_ctl_state(); + + // + // Validate the mapping request by looking up the underlying sysmem + // allocation. + // + status = RmGetAllocPrivate(hClient, hMemory, addr, size, &prot, &pAllocPriv, + &pageIndex); + + if (status != NV_OK) + { + goto done; + } + } + else + { + + if (RmSetUserMapAccessRange(nvuap) != NV_OK) + { + goto done; + } + + status = nv_get_usermap_access_params(pNv, nvuap); + if (status != NV_OK) + { + goto done; + } + + // Validate the mapping request for BAR's. + status = RmValidateMmapRequest(pNv, nvuap->access_start, + nvuap->access_size, &prot); + if (status != NV_OK) + { + goto done; + } + } + + status = nv_add_mapping_context_to_file(pNv, nvuap, prot, pAllocPriv, + pageIndex, fd); + +done: + os_free_mem(nvuap); + return status; +} + +// TODO: Bug 1802250: [uvm8] Use an alt stack in all functions in unix/src/osapi.c +NV_STATUS rm_create_mmap_context( + nv_state_t *pNv, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 address, + NvU64 size, + NvU64 offset, + NvU32 cachingType, + NvU32 fd +) +{ + NV_STATUS rmStatus = NV_OK; + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI)) == NV_OK) + { + RmClient *pClient; + + if (NV_OK != serverutilAcquireClient(hClient, LOCK_ACCESS_READ, &pClient)) + return NV_ERR_INVALID_CLIENT; + + if (pClient->ProcID != osGetCurrentProcess()) + { + rmStatus = NV_ERR_INVALID_CLIENT; + } + // LOCK: acquire GPUs lock + else if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) == NV_OK) + { + rmStatus = RmCreateMmapContextLocked(hClient, hDevice, hMemory, + address, size, offset, cachingType, fd); + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + serverutilReleaseClient(LOCK_ACCESS_READ, pClient); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return rmStatus; +} + +static NV_STATUS RmGetAllocPrivate( + NvU32 hClient, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + NvU32 *pProtection, + void **ppPrivate, + NvU64 *pPageIndex +) +{ + RmClient *pClient; + NV_STATUS rmStatus; + PMEMORY_DESCRIPTOR pMemDesc; + NvU32 pageOffset; + NvU64 pageCount; + RsResourceRef *pResourceRef; + RmResource *pRmResource; + void *pMemData; + NvBool bPeerIoMem; + NvBool bReadOnlyMem; + *pProtection = NV_PROTECT_READ_WRITE; + *ppPrivate = NULL; + + pageOffset = (offset & ~os_page_mask); + offset &= os_page_mask; + + NV_ASSERT_OR_RETURN(rmApiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + if (NV_OK != serverutilAcquireClient(hClient, LOCK_ACCESS_READ, &pClient)) + return NV_ERR_INVALID_CLIENT; + + rmStatus = clientGetResourceRef(staticCast(pClient, RsClient), hMemory, &pResourceRef); + if (rmStatus != NV_OK) + goto done; + + pRmResource = dynamicCast(pResourceRef->pResource, RmResource); + if (!pRmResource) + { + rmStatus = NV_ERR_INVALID_OBJECT; + goto done; + } + + rmStatus = rmresGetMemoryMappingDescriptor(pRmResource, &pMemDesc); + if (rmStatus != NV_OK) + goto done; + + bReadOnlyMem = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY); + bPeerIoMem = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM); + + if (!(pMemDesc->Allocated || bPeerIoMem)) + { + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + switch (memdescGetAddressSpace(pMemDesc)) + { + case ADDR_SYSMEM: + break; + default: + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + pMemData = memdescGetMemData(pMemDesc); + if (pMemData == NULL) + { + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + rmStatus = os_match_mmap_offset(pMemData, offset, pPageIndex); + if (rmStatus != NV_OK) + goto done; + + pageCount = ((pageOffset + length) / os_page_size); + pageCount += (*pPageIndex + (((pageOffset + length) % os_page_size) ? 1 : 0)); + + if (pageCount > NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (bReadOnlyMem) + *pProtection = NV_PROTECT_READABLE; + + *ppPrivate = pMemData; + +done: + serverutilReleaseClient(LOCK_ACCESS_READ, pClient); + + return rmStatus; +} + +static NV_STATUS RmValidateMmapRequest( + nv_state_t *pNv, + NvU64 offset, + NvU64 length, + NvU32 *pProtection +) +{ + NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS params = { 0 }; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_STATUS status; + + if (osIsAdministrator()) + { + *pProtection = NV_PROTECT_READ_WRITE; + return NV_OK; + } + + params.addressStart = offset; + params.addressLength = length; + + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, + pNv->rmapi.hSubDevice, + NV2080_CTRL_CMD_GPU_VALIDATE_MEM_MAP_REQUEST, + ¶ms, sizeof(params)); + + if (status == NV_OK) + { + *pProtection = params.protection; + } + + return status; +} + +NV_STATUS rm_get_adapter_status( + nv_state_t *pNv, + NvU32 *pStatus +) +{ + NV_STATUS rmStatus = NV_ERR_OPERATING_SYSTEM; + + // LOCK: acquire API lock + if (rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI) == NV_OK) + { + rmStatus = RmGetAdapterStatus(pNv, pStatus); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return rmStatus; +} + +NvBool NV_API_CALL rm_init_rm( + nvidia_stack_t *sp +) +{ + NvBool retval; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + retval = RmInitRm(); + + NV_EXIT_RM_RUNTIME(sp,fp); + + return retval; +} + +void NV_API_CALL rm_shutdown_rm( + nvidia_stack_t *sp +) +{ + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + RmShutdownRm(); + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NvBool NV_API_CALL rm_init_event_locks( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + void *fp; + NvBool ret; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pNv->event_spinlock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + ret = (pNv->event_spinlock != NULL); + + NV_EXIT_RM_RUNTIME(sp,fp); + return ret; +} + +void NV_API_CALL rm_destroy_event_locks( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + if (pNv && pNv->event_spinlock) + portSyncSpinlockDestroy(pNv->event_spinlock); + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_get_vbios_version( + nvidia_stack_t *sp, + nv_state_t *pNv, + char *vbiosString +) +{ + *vbiosString = '\0'; +} + +NV_STATUS NV_API_CALL rm_stop_user_channels( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_restart_user_channels( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_ioctl( + nvidia_stack_t *sp, + nv_state_t *pNv, + nv_file_private_t *nvfp, + NvU32 Command, + void *pData, + NvU32 dataSize +) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + rmStatus = RmIoctl(pNv, nvfp, Command, pData, dataSize); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +void NV_API_CALL rm_cleanup_file_private( + nvidia_stack_t *sp, + nv_state_t *pNv, + nv_file_private_t *nvfp +) +{ + THREAD_STATE_NODE threadState; + void *fp; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + RM_API_CONTEXT rmApiContext = {0}; + NvU32 i; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + threadStateSetTimeoutOverride(&threadState, 10 * 1000); + + if (rmapiPrologue(pRmApi, &rmApiContext) != NV_OK) + return; + + // LOCK: acquire API lock + if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK) + { + // Unref any object which was exported on this file. + if (nvfp->handles != NULL) + { + for (i = 0; i < nvfp->maxHandles; i++) + { + if (nvfp->handles[i] == 0) + { + continue; + } + + RmFreeObjExportHandle(nvfp->handles[i]); + nvfp->handles[i] = 0; + } + + os_free_mem(nvfp->handles); + nvfp->handles = NULL; + nvfp->maxHandles = 0; + } + + // Free any RM clients associated with this file. + RmFreeUnusedClients(pNv, nvfp); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + rmapiEpilogue(pRmApi, &rmApiContext); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + if (nvfp->ctl_nvfp != NULL) + { + nv_put_file_private(nvfp->ctl_nvfp_priv); + nvfp->ctl_nvfp = NULL; + nvfp->ctl_nvfp_priv = NULL; + } + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_unbind_lock( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK) + { + RmUnbindLock(pNv); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS rm_alloc_os_event( + NvHandle hClient, + nv_file_private_t *nvfp, + NvU32 fd +) +{ + NV_STATUS RmStatus; + + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK) + { + RmStatus = RmAllocOsEvent(hClient, nvfp, fd); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return RmStatus; +} + +NV_STATUS rm_free_os_event( + NvHandle hClient, + NvU32 fd +) +{ + NV_STATUS RmStatus; + + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK) + { + RmStatus = RmFreeOsEvent(hClient, fd); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return RmStatus; +} + +NV_STATUS rm_get_event_data( + nv_file_private_t *nvfp, + NvP64 pEvent, + NvU32 *MoreEvents +) +{ + NV_STATUS RmStatus; + + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK) + { + RmStatus = RmGetEventData(nvfp, pEvent, MoreEvents, NV_TRUE); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return RmStatus; +} + +NV_STATUS NV_API_CALL rm_read_registry_dword( + nvidia_stack_t *sp, + nv_state_t *nv, + const char *regParmStr, + NvU32 *Data +) +{ + OBJGPU *pGpu = NULL; + NV_STATUS RmStatus; + void *fp; + NvBool isApiLockTaken = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + + // + // We can be called from different contexts: + // + // 1) early initialization without device state. + // 2) from outside the RM API (without the lock held) + // + // In context 1)the API lock is not needed and + // in context 2), it needs to be acquired. + // + if (nv != NULL) + { + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI)) != NV_OK) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return RmStatus; + } + + isApiLockTaken = NV_TRUE; + } + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + + // Skipping the NULL check as osReadRegistryDword takes care of it. + RmStatus = osReadRegistryDword(pGpu, regParmStr, Data); + + if (isApiLockTaken == NV_TRUE) + { + // UNLOCK: release API lock + rmApiLockRelease(); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return RmStatus; +} + +NV_STATUS NV_API_CALL rm_write_registry_dword( + nvidia_stack_t *sp, + nv_state_t *nv, + const char *regParmStr, + NvU32 Data +) +{ + NV_STATUS RmStatus; + void *fp; + NvBool isApiLockTaken = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + + if (nv != NULL) + { + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return RmStatus; + } + + isApiLockTaken = NV_TRUE; + } + + RmStatus = RmWriteRegistryDword(nv, regParmStr, Data); + + if (isApiLockTaken == NV_TRUE) + { + // UNLOCK: release API lock + rmApiLockRelease(); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return RmStatus; +} + +NV_STATUS NV_API_CALL rm_write_registry_binary( + nvidia_stack_t *sp, + nv_state_t *nv, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + NV_STATUS RmStatus; + void *fp; + NvBool isApiLockTaken = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + + if (nv != NULL) + { + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return RmStatus; + } + + isApiLockTaken = NV_TRUE; + } + + RmStatus = RmWriteRegistryBinary(nv, regParmStr, Data, cbLen); + + if (isApiLockTaken == NV_TRUE) + { + // UNLOCK: release API lock + rmApiLockRelease(); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return RmStatus; +} + +NV_STATUS NV_API_CALL rm_write_registry_string( + nvidia_stack_t *sp, + nv_state_t *nv, + const char *regParmStr, + const char *string, + NvU32 stringLength +) +{ + NV_STATUS rmStatus; + void *fp; + NvBool isApiLockTaken = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + + if (nv != NULL) + { + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; + } + + isApiLockTaken = NV_TRUE; + } + + rmStatus = RmWriteRegistryString(nv, regParmStr, string, (stringLength + 1)); + + if (isApiLockTaken == NV_TRUE) + { + // UNLOCK: release API lock + rmApiLockRelease(); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +static NvBool NV_API_CALL rm_is_space(const char ch) +{ + // + // return true if it is a: + // ' ' : (space - decimal 32.) + // '\t' : (TAB - decimal 9) + // 'LF' : (Line feed, new line - decimal 10) + // 'VT' : (Vertical TAB - decimal 11) + // 'FF' : (Form feed, new page - decimal 12) + // '\r' : (carriage return - decimal 13) + // + return ((ch == ' ') || ((ch >= '\t') && (ch <= '\r'))); +} + +char* NV_API_CALL rm_remove_spaces(const char *in) +{ + unsigned int len = os_string_length(in) + 1; + const char *in_ptr; + char *out, *out_ptr; + + if (os_alloc_mem((void **)&out, len) != NV_OK) + return NULL; + + in_ptr = in; + out_ptr = out; + + while (*in_ptr != '\0') + { + if (!rm_is_space(*in_ptr)) + *out_ptr++ = *in_ptr; + in_ptr++; + } + *out_ptr = '\0'; + + return out; +} + +char* NV_API_CALL rm_string_token(char **strp, const char delim) +{ + char *s, *token; + + if ((strp == NULL) || (*strp == NULL)) + return NULL; + + s = token = *strp; + *strp = NULL; + + for (; *s != '\0'; s++) { + if (*s == delim) { + *s = '\0'; + *strp = ++s; + break; + } + } + + return token; +} + +// Parse string passed in NVRM as module parameter. +void NV_API_CALL rm_parse_option_string(nvidia_stack_t *sp, const char *nvRegistryDwords) +{ + unsigned int i; + nv_parm_t *entry; + char *option_string = NULL; + char *ptr, *token; + char *name, *value; + NvU32 data; + + if (nvRegistryDwords != NULL) + { + if ((option_string = rm_remove_spaces(nvRegistryDwords)) == NULL) + { + return; + } + + ptr = option_string; + + while ((token = rm_string_token(&ptr, ';')) != NULL) + { + if (!(name = rm_string_token(&token, '=')) || !os_string_length(name)) + { + continue; + } + + if (!(value = rm_string_token(&token, '=')) || !os_string_length(value)) + { + continue; + } + + if (rm_string_token(&token, '=') != NULL) + { + continue; + } + + data = os_strtoul(value, NULL, 0); + + for (i = 0; (entry = &nv_parms[i])->name != NULL; i++) + { + if (os_string_compare(entry->name, name) == 0) + break; + } + + if (!entry->name) + rm_write_registry_dword(sp, NULL, name, data); + else + *entry->data = data; + } + + // Free the memory allocated by rm_remove_spaces() + os_free_mem(option_string); + } +} + +NV_STATUS NV_API_CALL rm_run_rc_callback( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + OBJGPU *pGpu; + void *fp; + + /* make sure our timer's not still running when it shouldn't be */ + if (nv == NULL) + return NV_ERR_GENERIC; + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + if (pGpu == NULL) + return NV_ERR_GENERIC; + + if (nv->rc_timer_enabled == 0) + return NV_ERR_GENERIC; + + if (!FULL_GPU_SANITY_CHECK(pGpu)) + { + return NV_ERR_GENERIC; + } + + NV_ENTER_RM_RUNTIME(sp,fp); + + osRun1HzCallbacksNow(pGpu); + + NV_EXIT_RM_RUNTIME(sp,fp); + + return NV_OK; +} + +static NV_STATUS RmRunNanoTimerCallback( + OBJGPU *pGpu, + void *pTmrEvent +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + POBJTMR pTmr = GPU_GET_TIMER(pGpu); + THREAD_STATE_NODE threadState; + NV_STATUS status = NV_OK; + + // LOCK: try to acquire GPUs lock + if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_TMR)) != NV_OK) + { + return status; + } + + if ((status = osCondAcquireRmSema(pSys->pSema)) != NV_OK) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + return status; + } + + threadStateInitISRAndDeferredIntHandler(&threadState, pGpu, + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + + // Call timer event service + status = tmrEventServiceOSTimerCallback_HAL(pGpu, pTmr, (PTMR_EVENT)pTmrEvent); + + // Out of conflicting thread + threadStateFreeISRAndDeferredIntHandler(&threadState, + pGpu, THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + + osReleaseRmSema(pSys->pSema, NULL); + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, pGpu); + + return status; +} + +NV_STATUS NV_API_CALL rm_run_nano_timer_callback +( + nvidia_stack_t *sp, + nv_state_t *nv, + void *pTmrEvent +) +{ + NV_STATUS status; + OBJGPU *pGpu = NULL; + void *fp; + + if (nv == NULL) + return NV_ERR_GENERIC; + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + if (pGpu == NULL) + return NV_ERR_GENERIC; + + if (!FULL_GPU_SANITY_CHECK(pGpu)) + { + return NV_ERR_GENERIC; + } + + NV_ENTER_RM_RUNTIME(sp,fp); + + status = RmRunNanoTimerCallback(pGpu, pTmrEvent); + + NV_EXIT_RM_RUNTIME(sp,fp); + + return status; +} + +void NV_API_CALL rm_execute_work_item( + nvidia_stack_t *sp, + void *pNvWorkItem +) +{ + void *fp; + THREAD_STATE_NODE threadState; + + NV_ENTER_RM_RUNTIME(sp, fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + RmExecuteWorkItem(pNvWorkItem); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp, fp); +} + +const char* NV_API_CALL rm_get_device_name( + NvU16 device, + NvU16 subsystem_vendor, + NvU16 subsystem_device +) +{ + unsigned int i; + const char *tmpName = NULL; + + for (i = 0; i < NV_ARRAY_ELEMENTS(sChipsReleased); i++) + { + // if the device ID doesn't match, go to the next entry + if (device != sChipsReleased[i].devID) + { + continue; + } + + // if the entry has 0 for the subsystem IDs, then the device + // ID match is sufficient, but continue scanning through + // sChipsReleased[] in case there is a subsystem ID match later + // in the table + if (sChipsReleased[i].subSystemVendorID == 0 && + sChipsReleased[i].subSystemID == 0) + { + tmpName = sChipsReleased[i].name; + continue; + } + + if (subsystem_vendor == sChipsReleased[i].subSystemVendorID && + subsystem_device == sChipsReleased[i].subSystemID) + { + tmpName = sChipsReleased[i].name; + break; + } + } + + return (tmpName != NULL) ? tmpName : "Unknown"; +} + +NV_STATUS rm_access_registry( + NvHandle hClient, + NvHandle hObject, + NvU32 AccessType, + NvP64 clientDevNodeAddress, + NvU32 DevNodeLength, + NvP64 clientParmStrAddress, + NvU32 ParmStrLength, + NvP64 clientBinaryDataAddress, + NvU32 *pBinaryDataLength, + NvU32 *Data, + NvU32 *Entry +) +{ + NV_STATUS RmStatus; + NvBool bReadOnly = (AccessType == NVOS38_ACCESS_TYPE_READ_DWORD) || + (AccessType == NVOS38_ACCESS_TYPE_READ_BINARY); + + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(bReadOnly ? RMAPI_LOCK_FLAGS_READ : RMAPI_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_OSAPI)) == NV_OK) + { + RmStatus = RmAccessRegistry(hClient, + hObject, + AccessType, + clientDevNodeAddress, + DevNodeLength, + clientParmStrAddress, + ParmStrLength, + clientBinaryDataAddress, + pBinaryDataLength, + Data, + Entry); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return RmStatus; +} + +NV_STATUS rm_update_device_mapping_info( + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + void *pOldCpuAddress, + void *pNewCpuAddress +) +{ + NV_STATUS RmStatus; + + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU)) == NV_OK) + { + RmStatus = RmUpdateDeviceMappingInfo(hClient, + hDevice, + hMemory, + pOldCpuAddress, + pNewCpuAddress); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return RmStatus; +} + +static NvBool NV_API_CALL rm_is_legacy_device( + NvU16 device_id, + NvU16 subsystem_vendor, + NvU16 subsystem_device, + NvBool print_warning +) +{ + return NV_FALSE; +} + +static NvBool NV_API_CALL rm_is_legacy_arch( + NvU32 pmc_boot_0, + NvU32 pmc_boot_42 +) +{ + NvBool legacy = NV_FALSE; + + return legacy; +} + +NV_STATUS NV_API_CALL rm_is_supported_device( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + OBJSYS *pSys; + POBJHALMGR pHalMgr; + GPUHWREG *reg_mapping; + NvU32 myHalPublicID; + void *fp; + NvU32 pmc_boot_0; + NvU32 pmc_boot_42; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pSys = SYS_GET_INSTANCE(); + pHalMgr = SYS_GET_HALMGR(pSys); + + reg_mapping = osMapKernelSpace(pNv->regs->cpu_address, + os_page_size, + NV_MEMORY_UNCACHED, + NV_PROTECT_READABLE); + + if (reg_mapping == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to map registers!\n"); + rmStatus = NV_ERR_OPERATING_SYSTEM; + goto threadfree; + } + pmc_boot_0 = NV_PRIV_REG_RD32(reg_mapping, NV_PMC_BOOT_0); + pmc_boot_42 = NV_PRIV_REG_RD32(reg_mapping, NV_PMC_BOOT_42); + + osUnmapKernelSpace(reg_mapping, os_page_size); + + if ((pmc_boot_0 == 0xFFFFFFFF) && (pmc_boot_42 == 0xFFFFFFFF)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: The NVIDIA GPU %04x:%02x:%02x.%x\n" + "NVRM: (PCI ID: %04x:%04x) installed in this system has\n" + "NVRM: fallen off the bus and is not responding to commands.\n", + pNv->pci_info.domain, pNv->pci_info.bus, pNv->pci_info.slot, + pNv->pci_info.function, pNv->pci_info.vendor_id, + pNv->pci_info.device_id); + rmStatus = NV_ERR_GPU_IS_LOST; + goto threadfree; + } + + /* + * For legacy architectures, rm_is_legacy_arch() prints "legacy" message. + * We do not want to print "unsupported" message for legacy architectures + * to avoid confusion. Also, the probe should not continue for legacy + * architectures. Hence, we set rmStatus to NV_ERR_NOT_SUPPORTED and + * goto threadfree. + */ + if (rm_is_legacy_arch(pmc_boot_0, pmc_boot_42)) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + goto threadfree; + } + + rmStatus = halmgrGetHalForGpu(pHalMgr, pmc_boot_0, pmc_boot_42, &myHalPublicID); + + if (rmStatus != NV_OK) + goto print_unsupported; + + goto threadfree; + +print_unsupported: + nv_printf(NV_DBG_ERRORS, + "NVRM: The NVIDIA GPU %04x:%02x:%02x.%x (PCI ID: %04x:%04x)\n" + "NVRM: installed in this system is not supported by the\n" + "NVRM: NVIDIA %s driver release.\n" + "NVRM: Please see 'Appendix A - Supported NVIDIA GPU Products'\n" + "NVRM: in this release's README, available on the operating system\n" + "NVRM: specific graphics driver download page at www.nvidia.com.\n", + pNv->pci_info.domain, pNv->pci_info.bus, pNv->pci_info.slot, + pNv->pci_info.function, pNv->pci_info.vendor_id, + pNv->pci_info.device_id, NV_VERSION_STRING); + +threadfree: + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NvBool NV_API_CALL rm_is_supported_pci_device( + NvU8 pci_class, + NvU8 pci_subclass, + NvU16 vendor, + NvU16 device, + NvU16 subsystem_vendor, + NvU16 subsystem_device, + NvBool print_legacy_warning +) +{ + const NvU16 nv_pci_vendor_id = 0x10DE; + const NvU16 nv_pci_id_riva_tnt = 0x0020; + const NvU8 nv_pci_class_display = 0x03; + const NvU8 nv_pci_subclass_display_vga = 0x00; + const NvU8 nv_pci_subclass_display_3d = 0x02; + + if (pci_class != nv_pci_class_display) + { + return NV_FALSE; + } + + if ((pci_subclass != nv_pci_subclass_display_vga) && + (pci_subclass != nv_pci_subclass_display_3d)) + { + return NV_FALSE; + } + + if (vendor != nv_pci_vendor_id) + { + return NV_FALSE; + } + + if (device < nv_pci_id_riva_tnt) + { + return NV_FALSE; + } + + if (rm_is_legacy_device( + device, + subsystem_vendor, + subsystem_device, + print_legacy_warning)) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +/* + * Performs the I2C transfers which are related with DP AUX channel + */ +static NV_STATUS RmDpAuxI2CTransfer +( + nv_state_t *pNv, + NvU32 displayId, + NvU8 addr, + NvU32 len, + NvU8 *pData, + NvBool bWrite +) +{ + NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS *pParams; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_STATUS status; + + if (len > NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE) + { + NV_PRINTF(LEVEL_ERROR, + "%s: requested I2C transfer length %u is greater than maximum supported length %u\n", + __FUNCTION__, len, NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE); + return NV_ERR_NOT_SUPPORTED; + } + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + if (pParams == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemSet(pParams, 0, sizeof(*pParams)); + + pParams->subDeviceInstance = 0; + pParams->displayId = displayId; + pParams->addr = addr; + pParams->size = len; + pParams->bWrite = bWrite; + + if (bWrite) + { + portMemCopy(pParams->data, NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE, + pData, len); + } + + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hDisp, + NV0073_CTRL_CMD_DP_AUXCH_I2C_TRANSFER_CTRL, + pParams, sizeof(*pParams)); + + if ((status == NV_OK) && !bWrite) + { + portMemCopy(pData, len, pParams->data, pParams->size); + } + + portMemFree(pParams); + + return status; +} + +/* + * Performs the I2C transfers which are not related with DP AUX channel + */ +static NV_STATUS RmNonDPAuxI2CTransfer +( + nv_state_t *pNv, + NvU8 portId, + NvU8 type, + NvU8 addr, + NvU8 command, + NvU32 len, + NvU8 *pData +) +{ + NV402C_CTRL_I2C_TRANSACTION_PARAMS *params; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_STATUS rmStatus = NV_OK; + + params = portMemAllocNonPaged(sizeof(*params)); + if (params == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemSet(params, 0, sizeof(*params)); + + params->portId = portId; + // precondition our address (our stack requires this) + params->deviceAddress = addr << 1; + + switch (type) + { + case NV_I2C_CMD_WRITE: + params->transData.i2cBlockData.bWrite = NV_TRUE; + /* fall through*/ + + case NV_I2C_CMD_READ: + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW; + params->transData.i2cBlockData.messageLength = len; + params->transData.i2cBlockData.pMessage = pData; + break; + + case NV_I2C_CMD_SMBUS_WRITE: + params->transData.smbusByteData.bWrite = NV_TRUE; + /* fall through*/ + + case NV_I2C_CMD_SMBUS_READ: + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW; + params->transData.smbusByteData.message = pData[0]; + params->transData.smbusByteData.registerAddress = command; + break; + + case NV_I2C_CMD_SMBUS_BLOCK_WRITE: + params->transData.smbusBlockData.bWrite = NV_TRUE; + /* fall through*/ + + case NV_I2C_CMD_SMBUS_BLOCK_READ: + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW; + params->transData.smbusBlockData.registerAddress = command; + params->transData.smbusBlockData.messageLength = len; + params->transData.smbusBlockData.pMessage = pData; + break; + + case NV_I2C_CMD_SMBUS_QUICK_WRITE: + params->transData.smbusQuickData.bWrite = NV_TRUE; + /* fall through*/ + + case NV_I2C_CMD_SMBUS_QUICK_READ: + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW; + break; + + default: + portMemFree(params); + return NV_ERR_INVALID_ARGUMENT; + } + + rmStatus = pRmApi->Control(pRmApi, pNv->rmapi.hClient, + pNv->rmapi.hI2C, + NV402C_CTRL_CMD_I2C_TRANSACTION, + params, sizeof(*params)); + + // + // For NV_I2C_CMD_SMBUS_READ, copy the read data to original + // data buffer. + // + if (rmStatus == NV_OK && type == NV_I2C_CMD_SMBUS_READ) + { + pData[0] = params->transData.smbusByteData.message; + } + + portMemFree(params); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_i2c_transfer( + nvidia_stack_t *sp, + nv_state_t *pNv, + void *pI2cAdapter, + NvU8 type, + NvU8 addr, + NvU8 command, + NvU32 len, + NvU8 *pData +) +{ + THREAD_STATE_NODE threadState; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = NULL; + NvBool unlockApi = NV_FALSE; + NvBool unlockGpu = NV_FALSE; + NvU32 x; + void *fp; + NvU32 numDispId = 0; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (pNvp->flags & NV_INIT_FLAG_PUBLIC_I2C) + { + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK) + goto finish; + + unlockApi = NV_TRUE; + + // LOCK: acquire GPUs lock + if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK) + goto finish; + + unlockGpu = NV_TRUE; + } + + pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + if (!pGpu) + { + rmStatus = NV_ERR_GENERIC; + goto finish; + } + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + if (pNvp->i2c_adapters[x].pOsAdapter == pI2cAdapter) + { + break; + } + } + + if (x == MAX_I2C_ADAPTERS) + { + rmStatus = NV_ERR_GENERIC; + goto finish; + } + + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + NvU32 displayId = pNvp->i2c_adapters[x].displayId[numDispId]; + + if (displayId == INVALID_DISP_ID) + { + continue; + } + + // Handle i2c-over-DpAux adapters separately from regular i2c adapters + if (displayId == 0) + { + rmStatus = RmNonDPAuxI2CTransfer(pNv, pNvp->i2c_adapters[x].port, + type, addr, command, len, pData); + } + else + { + if ((type != NV_I2C_CMD_READ) && (type != NV_I2C_CMD_WRITE)) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + goto semafinish; + } + + rmStatus = RmDpAuxI2CTransfer(pNv, displayId, addr, len, pData, + type == NV_I2C_CMD_WRITE); + } +semafinish: + if (rmStatus == NV_OK) + { + break; + } + } + +finish: + if (unlockGpu) + { + // UNLOCK: release GPU lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + if (unlockApi) + { + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +static void rm_i2c_add_adapter( + nv_state_t *pNv, + NvU32 port, + NvU32 displayId +) +{ + NvU32 y, free; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + NvU32 numDispId = 0; + + for (y = 0, free = MAX_I2C_ADAPTERS; y < MAX_I2C_ADAPTERS; y++) + { + if (pNvp->i2c_adapters[y].pOsAdapter == NULL) + { + // Only find the first free entry, and ignore the rest + if (free == MAX_I2C_ADAPTERS) + { + free = y; + } + } + else if (pNvp->i2c_adapters[y].port == port) + { + break; + } + } + + if (y < MAX_I2C_ADAPTERS) + { + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + if (pNvp->i2c_adapters[y].displayId[numDispId] == INVALID_DISP_ID) + { + pNvp->i2c_adapters[y].displayId[numDispId] = displayId; + break; + } + else + { + NV_PRINTF(LEVEL_INFO, + "%s: adapter already exists (port=0x%x, displayId=0x%x)\n", + __FUNCTION__, port, + pNvp->i2c_adapters[y].displayId[numDispId]); + } + } + + if (numDispId == MAX_DISP_ID_PER_ADAPTER) + { + NV_PRINTF(LEVEL_ERROR, + "%s: no more free display Id entries in adapter\n", + __FUNCTION__); + } + + return; + } + + if (free == MAX_I2C_ADAPTERS) + { + NV_PRINTF(LEVEL_ERROR, "%s: no more free adapter entries exist\n", + __FUNCTION__); + return; + } + + pNvp->i2c_adapters[free].pOsAdapter = nv_i2c_add_adapter(pNv, port); + pNvp->i2c_adapters[free].port = port; + // When port is added, numDispId will be 0. + pNvp->i2c_adapters[free].displayId[numDispId] = displayId; +} + +void RmI2cAddGpuPorts(nv_state_t * pNv) +{ + NvU32 x = 0; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + NvU32 displayMask; + NV_STATUS status; + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS systemGetSupportedParams = { 0 }; + + // Make displayId as Invalid. + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + NvU32 numDispId; + + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + pNvp->i2c_adapters[x].displayId[numDispId] = INVALID_DISP_ID; + } + } + + // First, set up the regular i2c adapters - one per i2c port + if (pNv->rmapi.hI2C != 0) + { + NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS i2cPortInfoParams = { 0 }; + + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hI2C, + NV402C_CTRL_CMD_I2C_GET_PORT_INFO, + &i2cPortInfoParams, sizeof(i2cPortInfoParams)); + + if (status == NV_OK) + { + for (x = 0; x < NV_ARRAY_ELEMENTS(i2cPortInfoParams.info); x++) + { + // + // Check if this port is implemented and RM I2C framework has + // validated this port. Only limited amount of ports can + // be added to the OS framework. + // + if (FLD_TEST_DRF(402C_CTRL, _I2C_GET_PORT_INFO, _IMPLEMENTED, + _YES, i2cPortInfoParams.info[x]) && + FLD_TEST_DRF(402C_CTRL, _I2C_GET_PORT_INFO, _VALID, + _YES, i2cPortInfoParams.info[x])) + { + rm_i2c_add_adapter(pNv, x, 0); + } + } + } + } + + // + // Now set up the i2c-over-DpAux adapters - one per DP OD + // + // 1. Perform NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS RM control which + // will return the mask for all the display ID's. + // 2. Loop for all the display ID's and do + // NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO RM control call. For each + // output resource, check for the following requirements: + // a. It must be DisplayPort. + // b. It must be internal to the GPU (ie, not on the board) + // c. It must be directly connected to the physical connector (ie, no DP + // 1.2 multistream ODs). + // 3. Perform NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID RM control for + // getting the I2C port data. + // + // With these restrictions, we should only end up with at most one OD + // per DP connector. + // + + if (pNv->rmapi.hDisp == 0) + { + return; + } + + systemGetSupportedParams.subDeviceInstance = 0; + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hDisp, + NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, + &systemGetSupportedParams, sizeof(systemGetSupportedParams)); + + if (status != NV_OK) + { + return; + } + + for (displayMask = systemGetSupportedParams.displayMask; + displayMask != 0; + displayMask &= ~LOWESTBIT(displayMask)) + { + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS orInfoParams = { 0 }; + NvU32 displayId = LOWESTBIT(displayMask); + + orInfoParams.subDeviceInstance = 0; + orInfoParams.displayId = displayId; + + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hDisp, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + &orInfoParams, sizeof(orInfoParams)); + + if ((status == NV_OK) && + (orInfoParams.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && + ((orInfoParams.protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A) || + (orInfoParams.protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B)) && + (orInfoParams.location == NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP) && + (!orInfoParams.bIsDispDynamic)) + { + NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS i2cPortIdParams = { 0 }; + + i2cPortIdParams.subDeviceInstance = 0; + i2cPortIdParams.displayId = displayId; + + status = pRmApi->Control(pRmApi, + pNv->rmapi.hClient, + pNv->rmapi.hDisp, + NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID, + &i2cPortIdParams, + sizeof(i2cPortIdParams)); + + if ((status == NV_OK) && + (i2cPortIdParams.ddcPortId != NV0073_CTRL_SPECIFIC_I2C_PORT_NONE)) + { + rm_i2c_add_adapter(pNv, i2cPortIdParams.ddcPortId - 1, displayId); + } + } + } +} + +void NV_API_CALL rm_i2c_remove_adapters( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + // + // Cycle through all adapter entries, and first remove the adapter + // from the list from the kernel, then remove the i2c adapter + // list once that is completed. This should only be used from exit + // module time. Otherwise it could fail to remove some of the + // kernel adapters and subsequent transfer requests would result + // in crashes. + // + NvU32 x = 0; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + NvU32 numDispId; + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + if (pNvp->i2c_adapters[x].pOsAdapter != NULL) + { + nv_i2c_del_adapter(pNv, pNvp->i2c_adapters[x].pOsAdapter); + + pNvp->i2c_adapters[x].pOsAdapter = NULL; + pNvp->i2c_adapters[x].port = 0; + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + pNvp->i2c_adapters[x].displayId[numDispId] = INVALID_DISP_ID; + } + } + } +} + +NvBool NV_API_CALL rm_i2c_is_smbus_capable( + nvidia_stack_t *sp, + nv_state_t *pNv, + void *pI2cAdapter +) +{ + THREAD_STATE_NODE threadState; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = NULL; + NvBool unlock = NV_FALSE; + NvU32 x; + NvBool ret = NV_FALSE; + void *fp; + NvU32 numDispId = 0; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (pNvp->flags & NV_INIT_FLAG_PUBLIC_I2C) + { + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK) + goto semafinish; + + unlock = NV_TRUE; + } + + pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + if (!pGpu) + { + goto semafinish; + } + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + if (pNvp->i2c_adapters[x].pOsAdapter == pI2cAdapter) + { + break; + } + } + + if (x == MAX_I2C_ADAPTERS) + { + goto semafinish; + } + + // we do not support smbus functions on i2c-over-DPAUX + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + if (pNvp->i2c_adapters[x].displayId[numDispId] == 0x0) + { + ret = NV_TRUE; + } + } + +semafinish: + if (unlock) + { + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return ret; +} + +NV_STATUS NV_API_CALL rm_perform_version_check( + nvidia_stack_t *sp, + void *pData, + NvU32 dataSize +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + rmStatus = RmPerformVersionCheck(pData, dataSize); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_system_event( + nvidia_stack_t *sp, + NvU32 event_type, + NvU32 event_val +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + nv_state_t *nv; + OBJGPU *pGpu = gpumgrGetGpu(0);// Grab the first GPU + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_EVENT)) == NV_OK) + { + if (pGpu != NULL) + { + nv = NV_GET_NV_STATE(pGpu); + if ((rmStatus = os_ref_dynamic_power(nv, NV_DYNAMIC_PM_FINE)) == + NV_OK) + { + // LOCK: acquire GPU lock + if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_EVENT)) == + NV_OK) + { + rmStatus = RmSystemEvent(nv, event_type, event_val); + + // UNLOCK: release GPU lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + os_unref_dynamic_power(nv, NV_DYNAMIC_PM_FINE); + } + // UNLOCK: release API lock + rmApiLockRelease(); + } + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_p2p_dma_map_pages( + nvidia_stack_t *sp, + nv_dma_device_t *peer, + NvU8 *pGpuUuid, + NvU32 pageSize, + NvU32 pageCount, + NvU64 *pDmaAddresses, + void **ppPriv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_p2p_get_gpu_info( + nvidia_stack_t *sp, + NvU64 gpuVirtualAddress, + NvU64 length, + NvU8 **ppGpuUuid, + void **ppGpuInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent( + nvidia_stack_t *sp, + NvU64 gpuVirtualAddress, + NvU64 length, + void **p2pObject, + NvU64 *pPhysicalAddresses, + NvU32 *pEntries, + void *pPlatformData, + void *pGpuInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_p2p_get_pages( + nvidia_stack_t *sp, + NvU64 p2pToken, + NvU32 vaSpaceToken, + NvU64 gpuVirtualAddress, + NvU64 length, + NvU64 *pPhysicalAddresses, + NvU32 *pWreqMbH, + NvU32 *pRreqMbH, + NvU32 *pEntries, + NvU8 **ppGpuUuid, + void *pPlatformData +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_p2p_register_callback( + nvidia_stack_t *sp, + NvU64 p2pToken, + NvU64 gpuVirtualAddress, + NvU64 length, + void *pPlatformData, + void (*pFreeCallback)(void *pData), + void *pData +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent( + nvidia_stack_t *sp, + void *p2pObject, + void *pKey +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_p2p_put_pages( + nvidia_stack_t *sp, + NvU64 p2pToken, + NvU32 vaSpaceToken, + NvU64 gpuVirtualAddress, + void *pKey +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +char* NV_API_CALL rm_get_gpu_uuid( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + NV_STATUS rmStatus; + const NvU8 *pGid; + OBJGPU *pGpu; + char *pGidString; + + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // Allocate space for the ASCII string + rmStatus = os_alloc_mem((void **)&pGidString, GPU_UUID_ASCII_LEN); + if (rmStatus != NV_OK) + { + pGidString = NULL; + goto done; + } + + // Get the raw UUID; note the pGid is cached, so we do not need to free it + pGid = RmGetGpuUuidRaw(nv); + + if (pGid != NULL) + { + // Convert the raw UUID to ASCII + rmStatus = RmGpuUuidRawToString(pGid, pGidString, GPU_UUID_ASCII_LEN); + if (rmStatus != NV_OK) + { + os_free_mem(pGidString); + pGidString = NULL; + } + } + else + { + const char *pTmpString; + + // No raw GID, but we still return a string + pGpu = NV_GET_NV_PRIV_PGPU(nv); + + if (rmStatus == NV_ERR_NOT_SUPPORTED && pGpu != NULL && + pGpu->getProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED)) + pTmpString = "N/A"; + else + pTmpString = "GPU-???????\?-???\?-???\?-???\?-????????????"; + + portStringCopy(pGidString, GPU_UUID_ASCII_LEN, pTmpString, + portStringLength(pTmpString) + 1); + } + +done: + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return pGidString; +} + +// +// This function will return the UUID in the binary format +// +const NvU8 * NV_API_CALL rm_get_gpu_uuid_raw( + nvidia_stack_t *sp, + nv_state_t *nv) +{ + THREAD_STATE_NODE threadState; + void *fp; + const NvU8 *pGid; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGid = RmGetGpuUuidRaw(nv); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return pGid; +} + +static void rm_set_firmware_logs( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + NV_STATUS status; + NvU32 data; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + status = RmReadRegistryDword(nv, NV_REG_ENABLE_GPU_FIRMWARE_LOGS, &data); + if (status == NV_OK) + { + if ((data == NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE) +#if defined(DEBUG) || defined(DEVELOP) + || (data == NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG) +#endif + ) + { + nv->enable_firmware_logs = NV_TRUE; + } + } + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_set_rm_firmware_requested( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + nv->request_firmware = NV_TRUE; + nv->allow_fallback_to_monolithic_rm = NV_FALSE; + + // Check if we want firmware logs + if (nv->request_firmware) + rm_set_firmware_logs(sp, nv); +} + +// +// This function will be called by nv_procfs_read_gpu_info(). +// nv_procfs_read_gpu_info() will not print the 'GPU Firmware:' field at +// all if the 'version' string is empty. +// +// If GSP is enabled (firmware was requested), this function needs to return +// the firmware version or "NA" in case of any errors. +// +// If GSP is not enabled (firmware was not requested), this function needs to +// return the empty string, regardless of error cases. +// +void NV_API_CALL rm_get_firmware_version( + nvidia_stack_t *sp, + nv_state_t *nv, + char *version, + NvLength version_length +) +{ + NV2080_CTRL_GSP_GET_FEATURES_PARAMS params = { 0 }; + RM_API *pRmApi; + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus = NV_OK; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pRmApi = RmUnixRmApiPrologue(nv, &threadState, RM_LOCK_MODULES_GPU); + if (pRmApi != NULL) + { + rmStatus = pRmApi->Control(pRmApi, + nv->rmapi.hClient, + nv->rmapi.hSubDevice, + NV2080_CTRL_CMD_GSP_GET_FEATURES, + ¶ms, + sizeof(params)); + + RmUnixRmApiEpilogue(nv, &threadState); + } + else + { + rmStatus = NV_ERR_INVALID_STATE; + } + + if (rmStatus != NV_OK) + { + if (RMCFG_FEATURE_GSP_CLIENT_RM && nv->request_firmware) + { + const char *pTmpString = "N/A"; + portStringCopy(version, version_length, pTmpString, portStringLength(pTmpString) + 1); + } + NV_PRINTF(LEVEL_INFO, + "%s: Failed to query gpu build versions, status=0x%x\n", + __FUNCTION__, + rmStatus); + goto finish; + } + portMemCopy(version, version_length, params.firmwareVersion, sizeof(params.firmwareVersion)); + +finish: + NV_EXIT_RM_RUNTIME(sp,fp); +} + +// +// disable GPU SW state persistence +// + +void NV_API_CALL rm_disable_gpu_state_persistence(nvidia_stack_t *sp, nv_state_t *nv) +{ + THREAD_STATE_NODE threadState; + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu->setProperty(pGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE, NV_FALSE); + osModifyGpuSwStatePersistence(pGpu->pOsGpuInfo, NV_FALSE); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS NV_API_CALL rm_log_gpu_crash( + nv_stack_t *sp, + nv_state_t *nv +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if ((status = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DIAG)) == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + if ((pGpu != NULL) && + ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DIAG)) == NV_OK)) + { + status = RmLogGpuCrash(pGpu); + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return status; +} + +void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd) +{ + nvidia_kernel_rmapi_ops_t *ops = ops_cmd; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + switch (ops->op) + { + case NV01_FREE: + Nv01FreeKernel(&ops->params.free); + break; + + case NV01_ALLOC_MEMORY: + Nv01AllocMemoryKernel(&ops->params.allocMemory64); + break; + + case NV04_ALLOC: + Nv04AllocKernel(&ops->params.alloc); + break; + + case NV04_VID_HEAP_CONTROL: + Nv04VidHeapControlKernel(ops->params.pVidHeapControl); + break; + + case NV04_MAP_MEMORY: + Nv04MapMemoryKernel(&ops->params.mapMemory); + break; + + case NV04_UNMAP_MEMORY: + Nv04UnmapMemoryKernel(&ops->params.unmapMemory); + break; + + case NV04_ALLOC_CONTEXT_DMA: + Nv04AllocContextDmaKernel(&ops->params.allocContextDma2); + break; + + case NV04_MAP_MEMORY_DMA: + Nv04MapMemoryDmaKernel(&ops->params.mapMemoryDma); + break; + + case NV04_UNMAP_MEMORY_DMA: + Nv04UnmapMemoryDmaKernel(&ops->params.unmapMemoryDma); + break; + + case NV04_BIND_CONTEXT_DMA: + Nv04BindContextDmaKernel(&ops->params.bindContextDma); + break; + + case NV04_CONTROL: + Nv04ControlKernel(&ops->params.control); + break; + + case NV04_DUP_OBJECT: + Nv04DupObjectKernel(&ops->params.dupObject); + break; + + case NV04_SHARE: + Nv04ShareKernel(&ops->params.share); + break; + + case NV04_ADD_VBLANK_CALLBACK: + Nv04AddVblankCallbackKernel(&ops->params.addVblankCallback); + break; + } + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +// +// ACPI method (NVIF/_DSM/WMMX/MXM*/etc.) initialization +// +void RmInitAcpiMethods(OBJOS *pOS, OBJSYS *pSys, OBJGPU *pGpu) +{ + NvU32 handlesPresent; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_NVIF_INIT_DONE)) + return; + + nv_acpi_methods_init(&handlesPresent); + +} + +// +// ACPI method (NVIF/_DSM/WMMX/MXM*/etc.) teardown +// +void RmUnInitAcpiMethods(OBJSYS *pSys) +{ + pSys->setProperty(pSys, PDB_PROP_SYS_NVIF_INIT_DONE, NV_FALSE); + + nv_acpi_methods_uninit(); +} + +// +// Converts an array of OS page address to an array of RM page addresses.This +// assumes that: +// (1) The pteArray is at least pageCount entries large, +// (2) The pageCount is given in RM pages, and +// (3) The OS page entries start at index 0. +// +void RmInflateOsToRmPageArray(RmPhysAddr *pteArray, NvU64 pageCount) +{ + NvUPtr osPageIdx, osPageOffset; + NvU64 i; + + // + // We can do the translation in place by moving backwards, since there + // will always be more RM pages than OS pages + // + for (i = pageCount - 1; i != NV_U64_MAX; i--) + { + osPageIdx = i >> NV_RM_TO_OS_PAGE_SHIFT; + osPageOffset = (i & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) * + NV_RM_PAGE_SIZE; + pteArray[i] = pteArray[osPageIdx] + osPageOffset; + } +} + +void RmDeflateRmToOsPageArray(RmPhysAddr *pteArray, NvU64 pageCount) +{ + NvU64 i; + + for (i = 0; i < NV_RM_PAGES_TO_OS_PAGES(pageCount); i++) + { + pteArray[i] = pteArray[(i << NV_RM_TO_OS_PAGE_SHIFT)]; + } + + // Zero out the rest of the addresses, which are now invalid + portMemSet(pteArray + i, 0, sizeof(*pteArray) * (pageCount - i)); +} + +NvBool NV_API_CALL +rm_get_device_remove_flag +( + nvidia_stack_t * sp, + NvU32 gpu_id +) +{ + THREAD_STATE_NODE threadState; + void *fp; + NvBool bRemove; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (gpumgrQueryGpuDrainState(gpu_id, NULL, &bRemove) != NV_OK) + { + bRemove = NV_FALSE; + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + return bRemove; +} + +NvBool NV_API_CALL +rm_gpu_need_4k_page_isolation +( + nv_state_t *nv +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + return nvp->b_4k_page_isolation_required; +} + +NV_STATUS NV_API_CALL rm_get_gpu_numa_info( + nvidia_stack_t *sp, + nv_state_t *nv, + NvS32 *pNid, + NvU64 *pNumaMemAddr, + NvU64 *pNumaMemSize, + NvU64 *pOfflineAddresses, + NvU32 *pOfflineAddressesCount +) +{ + *pNid = NV0000_CTRL_NO_NUMA_NODE; + *pNumaMemAddr = 0; + *pNumaMemSize = 0; + *pOfflineAddressesCount = 0; + + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_gpu_numa_online( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + + +NV_STATUS NV_API_CALL rm_gpu_numa_offline( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// +// A device is considered "sequestered" if it has drain state enabled for it. +// The kernel interface layer can use this to check the drain state of a device +// in paths outside of initialization, e.g., when clients attempt to reference +// count the device. +// +NvBool NV_API_CALL rm_is_device_sequestered( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + NvBool bDrain = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU) == NV_OK) + { + // + // If gpumgrQueryGpuDrainState succeeds, bDrain will be set as needed. + // If gpumgrQueryGpuDrainState fails, bDrain will stay false; we assume + // that if core RM can't tell us the drain state, it must not be + // attached and the "sequestered" question is not relevant. + // + (void) gpumgrQueryGpuDrainState(pNv->gpu_id, &bDrain, NULL); + + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + return bDrain; +} + +void NV_API_CALL rm_check_for_gpu_surprise_removal( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + NV_STATUS rmStatus; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock. + if ((rmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU)) == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + if ((rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_GPU)) == NV_OK) + { + osHandleGpuLost(pGpu); + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + // UNLOCK: release api lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS NV_API_CALL rm_set_external_kernel_client_count( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvBool bIncr +) +{ + THREAD_STATE_NODE threadState; + void *fp; + OBJGPU *pGpu; + NV_STATUS rmStatus = NV_OK; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + if (pGpu != NULL) + { + rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU); + if (rmStatus == NV_OK) + { + rmStatus = gpuSetExternalKernelClientCount(pGpu, bIncr); + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NvBool rm_get_uefi_console_status( + nv_state_t *nv +) +{ + NvU16 fbWidth, fbHeight, fbDepth, fbPitch; + NvU64 fbSize; + NvU64 fbBaseAddress = 0; + NvBool bConsoleDevice = NV_FALSE; + + // os_get_screen_info() will return dimensions and an address for + // any fbdev driver (e.g., efifb, vesafb, etc). To find if this is a + // UEFI console check the fbBaseAddress: if it was set up by the EFI GOP + // driver, it will point into BAR1 (FB); if it was set up by the VBIOS, + // it will point to BAR2 + 16MB. + os_get_screen_info(&fbBaseAddress, &fbWidth, &fbHeight, &fbDepth, &fbPitch, + nv->bars[NV_GPU_BAR_INDEX_FB].cpu_address, + nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000); + + fbSize = fbHeight * fbPitch; + + bConsoleDevice = (fbSize != 0); + + return bConsoleDevice; +} + +NvU64 rm_get_uefi_console_size( + nv_state_t *nv, + NvU64 *pFbBaseAddress +) +{ + NvU16 fbWidth, fbHeight, fbDepth, fbPitch; + NvU64 fbSize; + + fbSize = fbWidth = fbHeight = fbDepth = fbPitch = 0; + + // os_get_screen_info() will return dimensions and an address for + // any fbdev driver (e.g., efifb, vesafb, etc). To find if this is a + // UEFI console check the fbBaseAddress: if it was set up by the EFI GOP + // driver, it will point into BAR1 (FB); if it was set up by the VBIOS, + // it will point to BAR2 + 16MB. + os_get_screen_info(pFbBaseAddress, &fbWidth, &fbHeight, &fbDepth, &fbPitch, + nv->bars[NV_GPU_BAR_INDEX_FB].cpu_address, + nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000); + + fbSize = fbHeight * fbPitch; + + return fbSize; +} + +/* + * IOMMU needs to be present on the server to support SR-IOV vGPU, unless + * we have SR-IOV enabled for remote GPU. + */ + +NvBool NV_API_CALL rm_is_iommu_needed_for_sriov( + nvidia_stack_t *sp, + nv_state_t * nv +) +{ + OBJGPU *pGpu; + NvU32 data; + NvBool ret = NV_TRUE; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_REMOTE_GPU, &data) == NV_OK) + { + if (data == NV_REG_STR_RM_REMOTE_GPU_ENABLE) + ret = NV_FALSE; + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return ret; +} + +// +// Verifies the handle, offset and size and dups hMemory. +// Must be called with API lock and GPU lock held. +// +NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hSrcClient, + NvHandle hDstClient, + NvHandle hDevice, + NvHandle hSubdevice, + void *pGpuInstanceInfo, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + NvHandle *phMemoryDuped +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + OBJGPU *pGpu; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + + NV_ASSERT(rmApiLockIsOwner()); + + NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu))); + + rmStatus = RmDmabufVerifyMemHandle(pGpu, hSrcClient, hMemory, + offset, size, pGpuInstanceInfo); + if (rmStatus == NV_OK) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hMemoryDuped = 0; + + rmStatus = pRmApi->DupObject(pRmApi, + hDstClient, + hDevice, + &hMemoryDuped, + hSrcClient, + hMemory, + 0); + if (rmStatus == NV_OK) + { + *phMemoryDuped = hMemoryDuped; + } + else if (rmStatus == NV_ERR_INVALID_OBJECT_PARENT) + { + hMemoryDuped = 0; + + // If duping under Device fails, try duping under Subdevice + rmStatus = pRmApi->DupObject(pRmApi, + hDstClient, + hSubdevice, + &hMemoryDuped, + hSrcClient, + hMemory, + 0); + if (rmStatus == NV_OK) + { + *phMemoryDuped = hMemoryDuped; + } + } + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +// +// Frees dup'd hMemory. +// Must be called with API lock and GPU lock held. +// +void NV_API_CALL rm_dma_buf_undup_mem_handle( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle hMemory +) +{ + THREAD_STATE_NODE threadState; + RM_API *pRmApi; + OBJGPU *pGpu; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + + NV_ASSERT(rmApiLockIsOwner()); + + NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu))); + + pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + pRmApi->Free(pRmApi, hClient, hMemory); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +// +// Maps a handle to BAR1. +// Must be called with API lock and GPU lock held. +// +NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + NvU64 *pBar1Va +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// +// Unmaps a handle from BAR1. +// Must be called with API lock and GPU lock held. +// +NV_STATUS NV_API_CALL rm_dma_buf_unmap_mem_handle( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle hMemory, + NvU64 size, + NvU64 bar1Va +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubdevice, + void **ppGpuInstanceInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus == NV_OK) + { + rmStatus = RmDmabufGetClientAndDevice(pGpu, hClient, phClient, phDevice, + phSubdevice, ppGpuInstanceInfo); + + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +void NV_API_CALL rm_dma_buf_put_client_and_device( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle hDevice, + NvHandle hSubdevice, + void *pGpuInstanceInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus == NV_OK) + { + RmDmabufPutClientAndDevice(pGpu, hClient, hDevice, hSubdevice, + pGpuInstanceInfo); + + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + // UNLOCK: release API lock + rmApiLockRelease(); + } + NV_ASSERT_OK(rmStatus); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osinit.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osinit.c new file mode 100644 index 0000000..098c825 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osinit.c @@ -0,0 +1,1633 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/************************************************************************************************************** +* +* Description: +* UNIX-general, device-independent initialization code for +* the resource manager. +* +* +**************************************************************************************************************/ + +#include +#include // NV device driver interface +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "gpu/gpu.h" +#include +#include "nverror.h" +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "gpu_mgr/gpu_db.h" +#include +#include +#include +#include + +#include +// RMCONFIG: need definition of REGISTER_ALL_HALS() +#include "g_hal_register.h" + +typedef enum +{ + RM_INIT_OK, + + /* general os errors */ + RM_INIT_REG_SETUP_FAILED = 0x10, + RM_INIT_SYS_ENVIRONMENT_FAILED, + + /* gpu errors */ + RM_INIT_GPU_GPUMGR_ALLOC_GPU_FAILED = 0x20, + RM_INIT_GPU_GPUMGR_CREATE_DEV_FAILED, + RM_INIT_GPU_GPUMGR_ATTACH_GPU_FAILED, + RM_INIT_GPU_PRE_INIT_FAILED, + RM_INIT_GPU_STATE_INIT_FAILED, + RM_INIT_GPU_LOAD_FAILED, + RM_INIT_GPU_UNIVERSAL_VALIDATION_FAILED, + RM_INIT_GPU_DMA_CONFIGURATION_FAILED, + + /* vbios errors */ + RM_INIT_VBIOS_FAILED = 0x30, + RM_INIT_VBIOS_POST_FAILED, + RM_INIT_VBIOS_X86EMU_FAILED, + + /* scalability errors */ + RM_INIT_SCALABILITY_FAILED = 0x40, + + /* general core rm errors */ + RM_INIT_WATCHDOG_FAILED, + RM_FIFO_GET_UD_BAR1_MAP_INFO_FAILED, + RM_GPUDB_REGISTER_FAILED, + + RM_INIT_ALLOC_RMAPI_FAILED, + RM_INIT_GPUINFO_WITH_RMAPI_FAILED, + + /* rm firmware errors */ + RM_INIT_FIRMWARE_POLICY_FAILED = 0x60, + RM_INIT_FIRMWARE_FETCH_FAILED, + RM_INIT_FIRMWARE_VALIDATION_FAILED, + RM_INIT_FIRMWARE_INIT_FAILED, + + RM_INIT_MAX_FAILURES +} rm_init_status; + +typedef rm_init_status RM_INIT_STATUS; + +typedef struct { + RM_INIT_STATUS initStatus; + NV_STATUS rmStatus; + NvU32 line; +} UNIX_STATUS; + +#define INIT_UNIX_STATUS { RM_INIT_OK, NV_OK, 0 } +#define RM_INIT_SUCCESS(init) ((init) == RM_INIT_OK) + +#define RM_SET_ERROR(status, err) { (status).initStatus = (err); \ + (status).line = __LINE__; } + + +// +// GPU architectures support DMA addressing up to a certain address width, +// above which all other bits in any given DMA address must not vary +// (e.g., all 0). This value is the minimum of the DMA addressing +// capabilities, in number of physical address bits, for all supported +// GPU architectures. +// +#define NV_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH 36 + +static inline NvU64 nv_encode_pci_info(nv_pci_info_t *pci_info) +{ + return gpuEncodeDomainBusDevice(pci_info->domain, pci_info->bus, pci_info->slot); +} + +static inline NvU32 nv_generate_id_from_pci_info(nv_pci_info_t *pci_info) +{ + return gpuGenerate32BitId(pci_info->domain, pci_info->bus, pci_info->slot); +} + +static inline void nv_os_map_kernel_space(nv_state_t *nv, nv_aperture_t *aperture) +{ + NV_ASSERT(aperture->map == NULL); + + // let's start off assuming a standard device and map the registers + // normally. It is unfortunate to hard-code the register size here, but we don't + // want to fail trying to map all of a multi-devices' register space + aperture->map = osMapKernelSpace(aperture->cpu_address, + aperture->size, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + aperture->map_u = (nv_phwreg_t)aperture->map; +} + +// local prototypes +static void initUnixSpecificRegistry(OBJGPU *); + +NvBool osRmInitRm(OBJOS *pOS) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU64 system_memory_size = (NvU64)-1; + + NV_PRINTF(LEVEL_INFO, "init rm\n"); + + if (os_is_efi_enabled()) + { + pSys->setProperty(pSys, PDB_PROP_SYS_IS_UEFI, NV_TRUE); + } + + // have to init this before the debug subsystem, which will + // try to check the value of ResmanDebugLevel + RmInitRegistry(); + + // init the debug subsystem if necessary + os_dbg_init(); + nvDbgInitRmMsg(NULL); + + // Force nvlog reinit since module params are now available + NVLOG_UPDATE(); + + // Register all supported hals + if (REGISTER_ALL_HALS() != NV_OK) + { + RmDestroyRegistry(NULL); + return NV_FALSE; + } + + system_memory_size = NV_RM_PAGES_PER_OS_PAGE * os_get_num_phys_pages(); + + // if known, relay the number of system memory pages (in terms of RM page + // size) to the RM; this is needed for e.g. TurboCache parts. + if (system_memory_size != (NvU64)-1) + pOS->SystemMemorySize = system_memory_size; + + // Setup any ThreadState defaults + threadStateInitSetupFlags(THREAD_STATE_SETUP_FLAGS_ENABLED | + THREAD_STATE_SETUP_FLAGS_TIMEOUT_ENABLED | + THREAD_STATE_SETUP_FLAGS_SLI_LOGIC_ENABLED | + THREAD_STATE_SETUP_FLAGS_DO_NOT_INCLUDE_SLEEP_TIME_ENABLED); + + return NV_TRUE; +} + +void RmShutdownRm(void) +{ + NV_PRINTF(LEVEL_INFO, "shutdown rm\n"); + + RmDestroyRegistry(NULL); + + // Free objects created with RmInitRm, including the system object + RmDestroyRm(); +} + +// +// osAttachGpu +// +// This routine is used as a callback by the gpumgrAttachGpu +// interface to allow os-dependent code to set up any state +// before engine construction begins. +// +NV_STATUS osAttachGpu( + OBJGPU *pGpu, + void *pOsGpuInfo +) +{ + nv_state_t *nv = (nv_state_t *)pOsGpuInfo; + nv_priv_t *nvp; + + nvp = NV_GET_NV_PRIV(nv); + + nvp->pGpu = pGpu; + + NV_SET_NV_STATE(pGpu, (void *)nv); + + initUnixSpecificRegistry(pGpu); + + // Assign default values to Registry keys for VGX + if (os_is_vgx_hyper()) + { + initVGXSpecificRegistry(pGpu); + } + + return NV_OK; +} + +NV_STATUS osDpcAttachGpu( + OBJGPU *pGpu, + void *pOsGpuInfo +) +{ + return NV_OK; // Nothing to do for unix +} + +void osDpcDetachGpu( + OBJGPU *pGpu +) +{ + return; // Nothing to do for unix +} + +NV_STATUS +osHandleGpuLost +( + OBJGPU *pGpu +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + NvU32 pmc_boot_0; + + // Determine if we've already run the handler + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED)) + { + return NV_OK; + } + + pmc_boot_0 = NV_PRIV_REG_RD32(nv->regs->map_u, NV_PMC_BOOT_0); + if (pmc_boot_0 != nvp->pmc_boot_0) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *pBoardInfoParams; + NV_STATUS status; + + // + // This doesn't support PEX Reset and Recovery yet. + // This will help to prevent accessing registers of a GPU + // which has fallen off the bus. + // + nvErrorLog_va((void *)pGpu, ROBUST_CHANNEL_GPU_HAS_FALLEN_OFF_THE_BUS, + "GPU has fallen off the bus."); + + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "GPU has fallen off the bus.\n"); + + pBoardInfoParams = portMemAllocNonPaged(sizeof(*pBoardInfoParams)); + if (pBoardInfoParams != NULL) + { + portMemSet(pBoardInfoParams, 0, sizeof(*pBoardInfoParams)); + + status = pRmApi->Control(pRmApi, nv->rmapi.hClient, + nv->rmapi.hSubDevice, + NV2080_CTRL_CMD_GPU_GET_OEM_BOARD_INFO, + pBoardInfoParams, + sizeof(*pBoardInfoParams)); + if (status == NV_OK) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "GPU serial number is %s.\n", + pBoardInfoParams->serialNumber); + } + + portMemFree(pBoardInfoParams); + } + + gpuSetDisconnectedProperties(pGpu); + + // Trigger the OS's PCI recovery mechanism + if (nv_pci_trigger_recovery(nv) != NV_OK) + { + // + // Initiate a crash dump immediately, since the OS doesn't appear + // to have a mechanism wired up for attempted recovery. + // + (void) RmLogGpuCrash(pGpu); + } + else + { + // + // Make the SW state stick around until the recovery can start, but + // don't change the PDB property: this is only used to report to + // clients whether or not persistence mode is enabled, and we'll + // need it after the recovery callbacks to restore the correct + // persistence mode for the GPU. + // + osModifyGpuSwStatePersistence(pGpu->pOsGpuInfo, NV_TRUE); + } + + DBG_BREAKPOINT(); + } + + return NV_OK; +} + +/* + * Initialize the required GPU information by doing RMAPI control calls + * and store the same in the UNIX specific data structures. + */ +static NV_STATUS +RmInitGpuInfoWithRmApi +( + OBJGPU *pGpu +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams = { 0 }; + NV_STATUS status; + + // LOCK: acquire GPUs lock + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT); + if (status != NV_OK) + { + return status; + } + + pGpuInfoParams = portMemAllocNonPaged(sizeof(*pGpuInfoParams)); + if (pGpuInfoParams == NULL) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + return NV_ERR_NO_MEMORY; + } + + + portMemSet(pGpuInfoParams, 0, sizeof(*pGpuInfoParams)); + + pGpuInfoParams->gpuInfoListSize = 3; + pGpuInfoParams->gpuInfoList[0].index = NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED; + pGpuInfoParams->gpuInfoList[1].index = NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED; + pGpuInfoParams->gpuInfoList[2].index = NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY; + + status = pRmApi->Control(pRmApi, nv->rmapi.hClient, + nv->rmapi.hSubDevice, + NV2080_CTRL_CMD_GPU_GET_INFO_V2, + pGpuInfoParams, sizeof(*pGpuInfoParams)); + + if (status == NV_OK) + { + nvp->b_4k_page_isolation_required = + (pGpuInfoParams->gpuInfoList[0].data == + NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED_YES); + nvp->b_mobile_config_enabled = + (pGpuInfoParams->gpuInfoList[1].data == + NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED_YES); + nv->dma_buf_supported = + (pGpuInfoParams->gpuInfoList[2].data == + NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY_YES); + } + + portMemFree(pGpuInfoParams); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + return status; +} + +static void RmSetSocDispDeviceMappings( + GPUATTACHARG *gpuAttachArg, + nv_state_t *nv +) +{ + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_DISP].gpuNvAddr = (GPUHWREG*) nv->regs->map; + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_DISP].gpuNvPAddr = nv->regs->cpu_address; + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_DISP].gpuNvLength = (NvU32) nv->regs->size; +} + +static void RmSetSocDpauxDeviceMappings( + GPUATTACHARG *gpuAttachArg, + nv_state_t *nv +) +{ +} + +static void RmSetSocHdacodecDeviceMappings( + GPUATTACHARG *gpuAttachArg, + nv_state_t *nv +) +{ +} + +static void RmSetSocMipiCalDeviceMappings( + GPUATTACHARG *gpuAttachArg, + nv_state_t *nv +) +{ + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_MIPICAL].gpuNvAddr = (GPUHWREG*) nv->mipical_regs->map; + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_MIPICAL].gpuNvPAddr = nv->mipical_regs->cpu_address; + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_MIPICAL].gpuNvLength = nv->mipical_regs->size; +} + +static void +osInitNvMapping( + nv_state_t *nv, + NvU32 *pDeviceReference, + UNIX_STATUS *status +) +{ + OBJGPU *pGpu; + OBJSYS *pSys = SYS_GET_INSTANCE(); + GPUATTACHARG *gpuAttachArg; + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + NvU32 deviceInstance; + NvU32 data = 0; + + NV_PRINTF(LEVEL_INFO, "osInitNvMapping:\n"); + + // allocate the next available gpu device number + status->rmStatus = gpumgrAllocGpuInstance(pDeviceReference); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot get valid gpu instance\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_ALLOC_GPU_FAILED); + return; + } + + // RM_BASIC_LOCK_MODEL: allocate GPU lock + status->rmStatus = rmGpuLockAlloc(*pDeviceReference); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** cannot allocate GPU lock\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_ALLOC_GPU_FAILED); + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(*pDeviceReference); + return; + } + + // attach default single-entry broadcast device for this gpu + status->rmStatus = gpumgrCreateDevice(&deviceInstance, NVBIT(*pDeviceReference), NULL); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot attach bc gpu\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_CREATE_DEV_FAILED); + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(*pDeviceReference); + return; + } + + // init attach state + gpuAttachArg = portMemAllocNonPaged(sizeof(GPUATTACHARG)); + if (gpuAttachArg == NULL) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot allocate gpuAttachArg\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_ALLOC_GPU_FAILED); + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(*pDeviceReference); + return; + } + + portMemSet(gpuAttachArg, 0, sizeof(GPUATTACHARG)); + + if (NV_IS_SOC_DISPLAY_DEVICE(nv)) + { + gpuAttachArg->socDeviceArgs.specified = NV_TRUE; + + RmSetSocDispDeviceMappings(gpuAttachArg, nv); + + RmSetSocDpauxDeviceMappings(gpuAttachArg, nv); + + RmSetSocHdacodecDeviceMappings(gpuAttachArg, nv); + + RmSetSocMipiCalDeviceMappings(gpuAttachArg, nv); + + gpuAttachArg->socDeviceArgs.socChipId0 = nv->disp_sw_soc_chip_id; + + gpuAttachArg->socDeviceArgs.iovaspaceId = nv->iovaspace_id; + } + else + { + gpuAttachArg->fbPhysAddr = nv->fb->cpu_address; + gpuAttachArg->fbBaseAddr = (GPUHWREG*) 0; // not mapped + gpuAttachArg->devPhysAddr = nv->regs->cpu_address; + gpuAttachArg->regBaseAddr = (GPUHWREG*) nv->regs->map; + gpuAttachArg->intLine = 0; // don't know yet + gpuAttachArg->instPhysAddr = nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address; + gpuAttachArg->instBaseAddr = (GPUHWREG*) 0; // not mapped + + gpuAttachArg->regLength = nv->regs->size; + gpuAttachArg->fbLength = nv->fb->size; + gpuAttachArg->instLength = nv->bars[NV_GPU_BAR_INDEX_IMEM].size; + + gpuAttachArg->iovaspaceId = nv->iovaspace_id; + } + + // + // we need this to check if we are running on virtual GPU + // in gpuBindHal function later. + // + gpuAttachArg->nvDomainBusDeviceFunc = nv_encode_pci_info(&nv->pci_info); + + gpuAttachArg->bRequestFwClientRm = nv->request_fw_client_rm; + + gpuAttachArg->pOsAttachArg = (void *)nv; + + // use gpu manager to attach gpu + status->rmStatus = gpumgrAttachGpu(*pDeviceReference, gpuAttachArg); + portMemFree(gpuAttachArg); + if (status->rmStatus != NV_OK) + { + gpumgrDestroyDevice(deviceInstance); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_ATTACH_GPU_FAILED); + NV_PRINTF(LEVEL_ERROR, "*** Cannot attach gpu\n"); + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(*pDeviceReference); + return; + } + nvp->flags |= NV_INIT_FLAG_GPUMGR_ATTACH; + + pGpu = gpumgrGetGpu(*pDeviceReference); + + sysInitRegistryOverrides(pSys); + + sysApplyLockingPolicy(pSys); + + pGpu->busInfo.IntLine = nv->interrupt_line; + pGpu->dmaStartAddress = (RmPhysAddr)nv_get_dma_start_address(nv); + if (nv->fb != NULL) + { + pGpu->registerAccess.gpuFbAddr = (GPUHWREG*) nv->fb->map; + pGpu->busInfo.gpuPhysFbAddr = nv->fb->cpu_address; + } + + // set default parent gpu + gpumgrSetParentGPU(pGpu, pGpu); + + NV_PRINTF(LEVEL_INFO, "device instance : %d\n", *pDeviceReference); + NV_PRINTF(LEVEL_INFO, "NV regs using linear address : 0x%p\n", + pGpu->deviceMappings[SOC_DEV_MAPPING_DISP].gpuNvAddr); + NV_PRINTF(LEVEL_INFO, + "NV fb using linear address : 0x%p\n", pGpu->registerAccess.gpuFbAddr); + + pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED, NV_TRUE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS, NV_FALSE); + + if (!os_is_vgx_hyper()) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT, NV_TRUE); + } + + if ((osReadRegistryDword(NULL, + NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, + &data) == NV_OK) && data) + { + + nv->preserve_vidmem_allocations = NV_TRUE; + } +} + +static inline void +RmSetDeviceDmaAddressSize( + nv_state_t *nv, + NvU8 numDmaAddressBits +) +{ + nv_set_dma_address_size(nv, numDmaAddressBits); +} + +static NV_STATUS +RmInitDeviceDma( + nv_state_t *nv +) +{ + if (nv->iovaspace_id != NV_IOVA_DOMAIN_NONE) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + POBJVMM pVmm = SYS_GET_VMM(pSys); + POBJVASPACE pIOVAS; + NV_STATUS status = vmmCreateVaspace(pVmm, IO_VASPACE_A, + nv->iovaspace_id, 0, 0ULL, ~0ULL, + 0ULL, 0ULL, + NULL, VASPACE_FLAGS_ENABLE_VMM, + &pIOVAS); + if (status != NV_OK) + { + return status; + } + } + + return NV_OK; +} + +static void +RmTeardownDeviceDma( + nv_state_t *nv +) +{ + if (nv->iovaspace_id != NV_IOVA_DOMAIN_NONE) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + POBJVMM pVmm = SYS_GET_VMM(pSys); + POBJVASPACE pIOVAS; + + if (NV_OK == vmmGetVaspaceFromId(pVmm, nv->iovaspace_id, IO_VASPACE_A, &pIOVAS)) + { + vmmDestroyVaspace(pVmm, pIOVAS); + } + } +} + +static void +RmInitNvDevice( + NvU32 deviceReference, + UNIX_STATUS *status +) +{ + // set the device context + OBJGPU *pGpu = gpumgrGetGpu(deviceReference); + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + NV_PRINTF(LEVEL_INFO, "RmInitNvDevice:\n"); + + NV_PRINTF(LEVEL_INFO, + "device instance : 0x%08x\n", deviceReference); + + // initialize all engines -- calls back osInitMapping() + status->rmStatus = gpumgrStatePreInitGpu(pGpu); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot pre-initialize the device\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_PRE_INIT_FAILED); + return; + } + + RmSetDeviceDmaAddressSize(nv, gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM)); + + os_disable_console_access(); + + status->rmStatus = gpumgrStateInitGpu(pGpu); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot initialize the device\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_STATE_INIT_FAILED); + os_enable_console_access(); + return; + } + nvp->flags |= NV_INIT_FLAG_GPU_STATE; + + status->rmStatus = gpumgrStateLoadGpu(pGpu, GPU_STATE_DEFAULT); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "*** Cannot load state into the device\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_LOAD_FAILED); + os_enable_console_access(); + return; + } + nvp->flags |= NV_INIT_FLAG_GPU_STATE_LOAD; + + os_enable_console_access(); + + status->rmStatus = gpuPerformUniversalValidation_HAL(pGpu); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Failed universal validation\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_UNIVERSAL_VALIDATION_FAILED); + return; + } + + return; +} + +static void RmTeardownDpauxRegisters( + nv_state_t *nv +) +{ +} + +static void RmTeardownHdacodecRegisters( + nv_state_t *nv +) +{ +} + +static void RmTeardownMipiCalRegisters( + nv_state_t *nv +) +{ + if (nv->mipical_regs && nv->mipical_regs->map) + { + osUnmapKernelSpace(nv->mipical_regs->map, + nv->mipical_regs->size); + nv->mipical_regs->map = NULL; + } +} + +static NV_STATUS +RmTeardownRegisters( + nv_state_t *nv +) +{ + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "Tearing down registers\n"); + + if (nv->regs && nv->regs->map) + { + osUnmapKernelSpace(nv->regs->map, nv->regs->size); + nv->regs->map = 0; + nv->regs->map_u = NULL; + } + + RmTeardownDpauxRegisters(nv); + + RmTeardownHdacodecRegisters(nv); + + RmTeardownMipiCalRegisters(nv); + + return NV_OK; +} + +static NV_STATUS +RmSetupDpauxRegisters( + nv_state_t *nv, + UNIX_STATUS *status +) +{ + + return NV_OK; +} + +static NV_STATUS +RmSetupHdacodecRegisters( + nv_state_t *nv, + UNIX_STATUS *status +) +{ + + return NV_OK; +} + +static NV_STATUS +RmSetupMipiCalRegisters( + nv_state_t *nv, + UNIX_STATUS *status +) +{ + if (nv->mipical_regs != NULL) + { + nv_os_map_kernel_space(nv, nv->mipical_regs); + if (nv->mipical_regs->map == NULL) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to map mipical registers!!\n"); + RM_SET_ERROR(*status, RM_INIT_REG_SETUP_FAILED); + status->rmStatus = NV_ERR_OPERATING_SYSTEM; + return NV_ERR_GENERIC; + } + } + + if (nv->mipical_regs != NULL) + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, " MIPICAL: " NvP64_fmt " " NvP64_fmt " 0x%p\n", + nv->mipical_regs->cpu_address, nv->mipical_regs->size, nv->mipical_regs->map); + } + + return NV_OK; +} + +static void +RmSetupRegisters( + nv_state_t *nv, + UNIX_STATUS *status +) +{ + NV_STATUS ret; + + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "RmSetupRegisters for 0x%x:0x%x\n", + nv->pci_info.vendor_id, nv->pci_info.device_id); + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "pci config info:\n"); + NV_DEV_PRINTF(NV_DBG_SETUP, nv, " registers look like: " NvP64_fmt " " NvP64_fmt, + nv->regs->cpu_address, nv->regs->size); + + if (nv->fb != NULL) + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, " fb looks like: " NvP64_fmt " " NvP64_fmt, + nv->fb->cpu_address, nv->fb->size); + } + + { + nv_os_map_kernel_space(nv, nv->regs); + } + + if (nv->regs->map == NULL) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to map regs registers!!\n"); + RM_SET_ERROR(*status, RM_INIT_REG_SETUP_FAILED); + status->rmStatus = NV_ERR_OPERATING_SYSTEM; + return; + } + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "Successfully mapped framebuffer and registers\n"); + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "final mappings:\n"); + NV_DEV_PRINTF(NV_DBG_SETUP, nv, " regs: " NvP64_fmt " " NvP64_fmt " 0x%p\n", + nv->regs->cpu_address, nv->regs->size, nv->regs->map); + + ret = RmSetupDpauxRegisters(nv, status); + if (ret != NV_OK) + goto err_unmap_disp_regs; + + ret = RmSetupHdacodecRegisters(nv, status); + if (ret != NV_OK) + { + RmTeardownDpauxRegisters(nv); + goto err_unmap_disp_regs; + } + + ret = RmSetupMipiCalRegisters(nv, status); + if (ret != NV_OK) + { + RmTeardownHdacodecRegisters(nv); + RmTeardownDpauxRegisters(nv); + goto err_unmap_disp_regs; + } + + return; + +err_unmap_disp_regs: + if (nv->regs && nv->regs->map) + { + osUnmapKernelSpace(nv->regs->map, nv->regs->size); + nv->regs->map = 0; + } + + return; +} + +NvBool RmInitPrivateState( + nv_state_t *pNv +) +{ + nv_priv_t *nvp; + NvU32 gpuId; + NvU32 pmc_boot_0 = 0; + NvU32 pmc_boot_42 = 0; + + NV_SET_NV_PRIV(pNv, NULL); + + if (!NV_IS_SOC_DISPLAY_DEVICE(pNv) && !NV_IS_SOC_IGPU_DEVICE(pNv)) + { + pNv->regs->map_u = os_map_kernel_space(pNv->regs->cpu_address, + os_page_size, + NV_MEMORY_UNCACHED); + if (pNv->regs->map_u == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "failed to map GPU registers (DISABLE_INTERRUPTS).\n"); + return NV_FALSE; + } + + pmc_boot_0 = NV_PRIV_REG_RD32(pNv->regs->map_u, NV_PMC_BOOT_0); + pmc_boot_42 = NV_PRIV_REG_RD32(pNv->regs->map_u, NV_PMC_BOOT_42); + + os_unmap_kernel_space(pNv->regs->map_u, os_page_size); + pNv->regs->map_u = NULL; + } + + if (os_alloc_mem((void **)&nvp, sizeof(*nvp)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to allocate private device state.\n"); + return NV_FALSE; + } + + gpuId = nv_generate_id_from_pci_info(&pNv->pci_info); + + if (gpumgrRegisterGpuId(gpuId, nv_encode_pci_info(&pNv->pci_info)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to register GPU with GPU manager.\n"); + os_free_mem(nvp); + return NV_FALSE; + } + + pNv->gpu_id = gpuId; + + pNv->iovaspace_id = nv_requires_dma_remap(pNv) ? gpuId : + NV_IOVA_DOMAIN_NONE; + + // + // Set up a reasonable default DMA address size, based on the minimum + // possible on currently supported GPUs. + // + RmSetDeviceDmaAddressSize(pNv, NV_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH); + + os_mem_set(nvp, 0, sizeof(*nvp)); + nvp->status = NV_ERR_INVALID_STATE; + nvp->pmc_boot_0 = pmc_boot_0; + nvp->pmc_boot_42 = pmc_boot_42; + NV_SET_NV_PRIV(pNv, nvp); + + return NV_TRUE; +} + +void RmClearPrivateState( + nv_state_t *pNv +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(pNv); + NvU32 status; + void *pVbiosCopy = NULL; + void *pRegistryCopy = NULL; + NvU32 vbiosSize; + nv_i2c_adapter_entry_t i2c_adapters[MAX_I2C_ADAPTERS]; + nv_dynamic_power_t dynamicPowerCopy; + NvU32 x = 0; + NvU32 pmc_boot_0, pmc_boot_42; + + // + // Do not clear private state after GPU resets, it is used while + // recovering the GPU. Only clear the pGpu pointer, which is + // restored during next initialization cycle. + // + if (pNv->flags & NV_FLAG_IN_RECOVERY) + { + nvp->pGpu = NULL; + } + + status = nvp->status; + pVbiosCopy = nvp->pVbiosCopy; + vbiosSize = nvp->vbiosSize; + pRegistryCopy = nvp->pRegistry; + dynamicPowerCopy = nvp->dynamic_power; + pmc_boot_0 = nvp->pmc_boot_0; + pmc_boot_42 = nvp->pmc_boot_42; + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + i2c_adapters[x] = nvp->i2c_adapters[x]; + } + + portMemSet(nvp, 0, sizeof(nv_priv_t)); + + nvp->status = status; + nvp->pVbiosCopy = pVbiosCopy; + nvp->vbiosSize = vbiosSize; + nvp->pRegistry = pRegistryCopy; + nvp->dynamic_power = dynamicPowerCopy; + nvp->pmc_boot_0 = pmc_boot_0; + nvp->pmc_boot_42 = pmc_boot_42; + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + nvp->i2c_adapters[x] = i2c_adapters[x]; + } + + nvp->flags |= NV_INIT_FLAG_PUBLIC_I2C; +} + +void RmFreePrivateState( + nv_state_t *pNv +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(pNv); + + gpumgrUnregisterGpuId(pNv->gpu_id); + + RmDestroyRegistry(pNv); + + if (nvp != NULL) + { + portMemFree(nvp->pVbiosCopy); + os_free_mem(nvp); + } + + NV_SET_NV_PRIV(pNv, NULL); +} + +NvBool RmPartiallyInitAdapter( + nv_state_t *nv +) +{ + NV_PRINTF(LEVEL_INFO, "%s: %04x:%02x:%02x.0\n", __FUNCTION__, + nv->pci_info.domain, nv->pci_info.bus, nv->pci_info.slot); + + nv_start_rc_timer(nv); + + return NV_TRUE; +} + +static NV_STATUS +RmInitX86Emu( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + PORT_UNREFERENCED_VARIABLE(nv); + +#if NVCPU_IS_X86_64 + status = RmInitX86EmuState(pGpu); +#else + // We don't expect a "primary VGA" adapter on non-amd64 platforms + NV_ASSERT(!NV_PRIMARY_VGA(nv)); +#endif + + return status; +} + +static NV_STATUS RmRegisterGpudb( + OBJGPU *pGpu +) +{ + NV_STATUS rmStatus; + const NvU8 *pGid; + nv_state_t *pNv = NV_GET_NV_STATE(pGpu); + + pGid = RmGetGpuUuidRaw(pNv); + if (pGid == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get UUID\n"); + return NV_ERR_OPERATING_SYSTEM; + } + + rmStatus = gpudbRegisterGpu(pGid, &pGpu->gpuClData.upstreamPort.addr, + pGpu->busInfo.nvDomainBusDeviceFunc); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to register GPU with GPU data base\n"); + } + + return rmStatus; +} + +static void RmUnixFreeRmApi( + nv_state_t *nv +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + if (nv->rmapi.hClient != 0) + { + pRmApi->Free(pRmApi, nv->rmapi.hClient, nv->rmapi.hClient); + } + + portMemSet(&nv->rmapi, 0, sizeof(nv->rmapi)); +} + +static NvBool RmUnixAllocRmApi( + nv_state_t *nv, + NvU32 deviceId +) +{ + NV0080_ALLOC_PARAMETERS deviceParams = { 0 }; + NV2080_ALLOC_PARAMETERS subDeviceParams = { 0 }; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + portMemSet(&nv->rmapi, 0, sizeof(nv->rmapi)); + + if (pRmApi->AllocWithHandle( + pRmApi, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &nv->rmapi.hClient) != NV_OK) + { + goto fail; + } + + // + // Any call to rmapiDelPendingDevices() will internally delete the UNIX OS + // layer RMAPI handles. Set this flag to preserve these handles. These + // handles will be freed explicitly by RmUnixFreeRmApi(). + // + if (!rmclientSetClientFlagsByHandle(nv->rmapi.hClient, + RMAPI_CLIENT_FLAG_RM_INTERNAL_CLIENT)) + { + goto fail; + } + + deviceParams.deviceId = deviceId; + + if (pRmApi->Alloc( + pRmApi, + nv->rmapi.hClient, + nv->rmapi.hClient, + &nv->rmapi.hDevice, + NV01_DEVICE_0, + &deviceParams) != NV_OK) + { + goto fail; + } + + subDeviceParams.subDeviceId = 0; + + if (pRmApi->Alloc( + pRmApi, + nv->rmapi.hClient, + nv->rmapi.hDevice, + &nv->rmapi.hSubDevice, + NV20_SUBDEVICE_0, + &subDeviceParams) != NV_OK) + { + goto fail; + } + + // + // The NV40_I2C allocation expected to fail, if it is disabled + // with RM config. + // + if (pRmApi->Alloc( + pRmApi, + nv->rmapi.hClient, + nv->rmapi.hSubDevice, + &nv->rmapi.hI2C, + NV40_I2C, + NULL) != NV_OK) + { + nv->rmapi.hI2C = 0; + } + + // + // The NV04_DISPLAY_COMMON allocation expected to fail for displayless + // system. nv->rmapi.hDisp value needs to be checked before doing display + // related control calls. + // + if (pRmApi->Alloc( + pRmApi, + nv->rmapi.hClient, + nv->rmapi.hDevice, + &nv->rmapi.hDisp, + NV04_DISPLAY_COMMON, + NULL) != NV_OK) + { + nv->rmapi.hDisp = 0; + } + + return NV_TRUE; + +fail: + RmUnixFreeRmApi(nv); + return NV_FALSE; +} + +NvBool RmInitAdapter( + nv_state_t *nv +) +{ + NvU32 devicereference = 0; + UNIX_STATUS status = INIT_UNIX_STATUS; + nv_priv_t *nvp; + NvBool retVal = NV_FALSE; + OBJSYS *pSys; + OBJGPU *pGpu = NULL; + OBJOS *pOS; + KernelDisplay *pKernelDisplay; + const void *gspFwHandle = NULL; + const void *gspFwLogHandle = NULL; + + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "RmInitAdapter\n"); + + nv->flags &= ~NV_FLAG_PASSTHRU; + + RmSetupRegisters(nv, &status); + if (! RM_INIT_SUCCESS(status.initStatus) ) + goto failed; + + nvp = NV_GET_NV_PRIV(nv); + nvp->status = NV_ERR_OPERATING_SYSTEM; + + status.rmStatus = RmInitDeviceDma(nv); + if (status.rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot configure the device for DMA\n"); + RM_SET_ERROR(status, RM_INIT_GPU_DMA_CONFIGURATION_FAILED); + goto shutdown; + } + + nvp->flags |= NV_INIT_FLAG_DMA; + + pSys = SYS_GET_INSTANCE(); + + // + // WAR: If the below UEFI property is set, display RM will attempt to read + // the state cache during RM init in order to retrieve a snapshot of the + // display state that the UEFI driver has already programmed. On Orin + // (T234D), the UEFI boot flow is being enabled on Linux, but our UEFI + // driver doesn't have any display support right now. As such, our UEFI + // driver won't allocate any of the display channels, which means that RM + // will attempt to read the state cache for uninitialized channels. WAR this + // issue by un-setting the below UEFI property for now. + // + // JIRA task TDS-5094 tracks adding display support to the UEFI driver. + // + if (NV_IS_SOC_DISPLAY_DEVICE(nv)) { + pSys->setProperty(pSys, PDB_PROP_SYS_IS_UEFI, NV_FALSE); + } + + // + // Get firmware from the OS, if requested, and decide if RM will run as a + // firmware client. + // + if (nv->request_firmware) + { + nv->request_fw_client_rm = NV_TRUE; + } + + // initialize the RM device register mapping + osInitNvMapping(nv, &devicereference, &status); + if (! RM_INIT_SUCCESS(status.initStatus) ) + { + switch (status.rmStatus) + { + case NV_ERR_NOT_SUPPORTED: + nvp->status = NV_ERR_NOT_SUPPORTED; + break; + } + NV_PRINTF(LEVEL_ERROR, + "osInitNvMapping failed, bailing out of RmInitAdapter\n"); + goto shutdown; + } + + // + // now we can have a pdev for the first time... + // + pGpu = gpumgrGetGpu(devicereference); + + pOS = SYS_GET_OS(pSys); + + RmInitAcpiMethods(pOS, pSys, pGpu); + + if (IS_GSP_CLIENT(pGpu) && IsT234D(pGpu)) + { + status.rmStatus = dceclientDceRmInit(pGpu, GPU_GET_DCECLIENTRM(pGpu), NV_TRUE); + if (status.rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot initialize DCE firmware RM\n"); + RM_SET_ERROR(status, RM_INIT_FIRMWARE_INIT_FAILED); + goto shutdown; + } + } + + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + if (pKernelDisplay != NULL) + { + kdispSetWarPurgeSatellitesOnCoreFree(pKernelDisplay, NV_TRUE); + } + + if (IS_PASSTHRU(pGpu)) + nv->flags |= NV_FLAG_PASSTHRU; + + // finally, initialize the device + RmInitNvDevice(devicereference, &status); + if (! RM_INIT_SUCCESS(status.initStatus) ) + { + NV_PRINTF(LEVEL_ERROR, + "RmInitNvDevice failed, bailing out of RmInitAdapter\n"); + switch (status.rmStatus) + { + case NV_ERR_INSUFFICIENT_POWER: + nvp->status = NV_ERR_INSUFFICIENT_POWER; + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "GPU does not have the necessary power cables connected.\n"); + break; + } + goto shutdown; + } + + // LOCK: acquire GPUs lock + status.rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_INIT); + if (status.rmStatus != NV_OK) + { + goto shutdown; + } + + status.rmStatus = osVerifySystemEnvironment(pGpu); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + if (status.rmStatus != NV_OK) + { + RM_SET_ERROR(status, RM_INIT_SYS_ENVIRONMENT_FAILED); + switch (status.rmStatus) + { + case NV_ERR_IRQ_NOT_FIRING: + nvp->status = NV_ERR_IRQ_NOT_FIRING; + break; + } + NV_PRINTF(LEVEL_ERROR, "RmVerifySystemEnvironment failed, bailing!\n"); + goto shutdown; + } + + nv_start_rc_timer(nv); + + nvp->status = NV_OK; + + if (!RmUnixAllocRmApi(nv, devicereference)) { + RM_SET_ERROR(status, RM_INIT_ALLOC_RMAPI_FAILED); + status.rmStatus = NV_ERR_GENERIC; + goto shutdown; + } + + status.rmStatus = RmInitGpuInfoWithRmApi(pGpu); + if (status.rmStatus != NV_OK) + { + RM_SET_ERROR(status, RM_INIT_GPUINFO_WITH_RMAPI_FAILED); + goto shutdown; + } + + status.rmStatus = RmInitX86Emu(pGpu); + if (status.rmStatus != NV_OK) + { + RM_SET_ERROR(status, RM_INIT_VBIOS_X86EMU_FAILED); + NV_PRINTF(LEVEL_ERROR, + "RmInitX86Emu failed, bailing out of RmInitAdapter\n"); + goto shutdown; + } + + // i2c only on master device?? + RmI2cAddGpuPorts(nv); + nvp->flags |= NV_INIT_FLAG_PUBLIC_I2C; + + nv->flags &= ~NV_FLAG_IN_RECOVERY; + + pOS->setProperty(pOS, PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED, NV_TRUE); + + RmInitS0ixPowerManagement(nv); + RmInitDeferredDynamicPowerManagement(nv); + + if (!NV_IS_SOC_DISPLAY_DEVICE(nv) && !NV_IS_SOC_IGPU_DEVICE(nv)) + { + status.rmStatus = RmRegisterGpudb(pGpu); + if (status.rmStatus != NV_OK) + { + RM_SET_ERROR(status, RM_GPUDB_REGISTER_FAILED); + goto shutdown; + } + } + + if (nvp->b_mobile_config_enabled) + { + NvU32 ac_plugged = 0; + if (nv_acpi_get_powersource(&ac_plugged) == NV_OK) + { + // LOCK: acquire GPU lock + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_NONE) == NV_OK) + { + // + // As we have already acquired the API Lock here, we are + // calling RmSystemEvent directly instead of rm_system_event. + // + RmSystemEvent(nv, NV_SYSTEM_ACPI_BATTERY_POWER_EVENT, !ac_plugged); + + // UNLOCK: release GPU lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + } + } + + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "RmInitAdapter succeeded!\n"); + + retVal = NV_TRUE; + goto done; + + shutdown: + nv->flags &= ~NV_FLAG_IN_RECOVERY; + + // call ShutdownAdapter to undo anything we've done above + RmShutdownAdapter(nv); + + failed: + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "RmInitAdapter failed! (0x%x:0x%x:%d)\n", + status.initStatus, status.rmStatus, status.line); + +done: + nv_put_firmware(gspFwHandle); + nv_put_firmware(gspFwLogHandle); + + return retVal; +} + +void RmShutdownAdapter( + nv_state_t *nv +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + NV_STATUS rmStatus; + + if ((pGpu != NULL) && (nvp->flags & NV_INIT_FLAG_GPUMGR_ATTACH)) + { + NvU32 gpuInstance = gpuGetInstance(pGpu); + NvU32 deviceInstance = gpuGetDeviceInstance(pGpu); + OBJSYS *pSys = SYS_GET_INSTANCE(); + + RmUnixFreeRmApi(nv); + + nv->ud.cpu_address = 0; + nv->ud.size = 0; + + // LOCK: acquire GPUs lock + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK) + { + RmDestroyDeferredDynamicPowerManagement(nv); + + gpuFreeEventHandle(pGpu); + + rmapiSetDelPendingClientResourcesFromGpuMask(NVBIT(gpuInstance)); + rmapiDelPendingDevices(NVBIT(gpuInstance)); + + os_disable_console_access(); + + if (nvp->flags & NV_INIT_FLAG_GPU_STATE_LOAD) + { + rmStatus = gpuStateUnload(pGpu, GPU_STATE_DEFAULT); + NV_ASSERT(rmStatus == NV_OK); + } + + if (IS_GSP_CLIENT(pGpu) && IsT234D(pGpu)) + { + rmStatus = dceclientDceRmInit(pGpu, GPU_GET_DCECLIENTRM(pGpu), NV_FALSE); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "DCE firmware RM Shutdown failure\n"); + } + } + + if (nvp->flags & NV_INIT_FLAG_GPU_STATE) + { + rmStatus = gpuStateDestroy(pGpu); + NV_ASSERT(rmStatus == NV_OK); + } + + os_enable_console_access(); + + //if (nvp->flags & NV_INIT_FLAG_HAL) + // destroyHal(pDev); + +#if NVCPU_IS_X86_64 + RmFreeX86EmuState(pGpu); +#endif + + gpumgrDetachGpu(gpuInstance); + gpumgrDestroyDevice(deviceInstance); + + if (nvp->flags & NV_INIT_FLAG_DMA) + { + RmTeardownDeviceDma(nv); + } + + RmClearPrivateState(nv); + + RmUnInitAcpiMethods(pSys); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(deviceInstance); + } + } + else + { + RmClearPrivateState(nv); + } + + RmTeardownRegisters(nv); +} + +void RmPartiallyDisableAdapter( + nv_state_t *nv +) +{ + NV_PRINTF(LEVEL_INFO, "%s: RM is in SW Persistence mode\n", __FUNCTION__); + + nv_stop_rc_timer(nv); +} + +void RmDisableAdapter( + nv_state_t *nv +) +{ + NV_STATUS rmStatus; + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + NvU32 gpuMask; + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET)) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_TIMEOUT_RECOVERY, NV_TRUE); + nv->flags |= NV_FLAG_IN_RECOVERY; + } + + // + // Free the client allocated resources. + // + // This needs to happen prior to tearing down SLI state when SLI is enabled. + // + // Note this doesn't free RM internal resource allocations. Those are + // freed during (gpumgrUpdateSLIConfig->...->)gpuStateUnload. + // + // We need to free resources for all GPUs linked in a group as + // gpumgrUpdateSLIConfig will teardown GPU state for the entire set. + // + gpuMask = gpumgrGetGpuMask(pGpu); + + rmapiSetDelPendingClientResourcesFromGpuMask(gpuMask); + rmapiDelPendingDevices(gpuMask); + + // LOCK: acquire GPUs lock + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK) + { + nv_stop_rc_timer(nv); + + os_disable_console_access(); + + if (nvp->flags & NV_INIT_FLAG_GPU_STATE_LOAD) + { + rmStatus = gpuStateUnload(pGpu, GPU_STATE_DEFAULT); + NV_ASSERT(rmStatus == NV_OK); + nvp->flags &= ~NV_INIT_FLAG_GPU_STATE_LOAD; + } + + os_enable_console_access(); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } +} + +NV_STATUS RmGetAdapterStatus( + nv_state_t *pNv, + NvU32 *pStatus +) +{ + // + // This status is determined in RmInitAdapter(); the glue layer + // requests it when the adapter failed to initialize to learn + // more about the error condition. This is currently limited to + // osVerifySystemEnvironment() failures. + // + nv_priv_t *nvp; + + nvp = NV_GET_NV_PRIV(pNv); + if (nvp == NULL) + { + return NV_ERR_INVALID_STATE; + } + + *pStatus = nvp->status; + return NV_OK; +} + +static void initUnixSpecificRegistry( + OBJGPU *pGpu +) +{ + // By default, enable GPU reset on Unix + osWriteRegistryDword(pGpu, "RMSecBusResetEnable", 1); + osWriteRegistryDword(pGpu, "RMForcePcieConfigSave", 1); + +} + +void +osRemoveGpu( + NvU32 domain, + NvU8 bus, + NvU8 device +) +{ + void *handle; + + handle = os_pci_init_handle(domain, bus, device, 0, NULL, NULL); + if (handle != NULL) + { + os_pci_remove(handle); + } +} + +NV_STATUS RmExcludeAdapter( + nv_state_t *nv +) +{ + return NV_ERR_NOT_SUPPORTED; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c new file mode 100644 index 0000000..f6489d2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c @@ -0,0 +1,1016 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************* OS Memory Descriptor APIS *****************************\ +* * +* This contains routines to create and destroy OS memory descriptor * +* * +****************************************************************************/ + +#include // NV device driver interface +#include +#include +#include +#include +#include +#include + +static NV_STATUS osCreateOsDescriptorFromPageArray(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void **); +static void osDestroyOsDescriptorPageArray(PMEMORY_DESCRIPTOR); + +static NV_STATUS osCreateOsDescriptorFromIoMemory(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static void osDestroyOsDescriptorFromIoMemory(PMEMORY_DESCRIPTOR); + +static NV_STATUS osCreateOsDescriptorFromPhysAddr(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static void osDestroyOsDescriptorFromPhysAddr(PMEMORY_DESCRIPTOR); + +static NV_STATUS osCreateOsDescriptorFromFileHandle(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static NV_STATUS osCreateOsDescriptorFromDmaBufPtr(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static void osDestroyOsDescriptorFromDmaBuf(PMEMORY_DESCRIPTOR); +static NV_STATUS osCreateOsDescriptorFromSgtPtr(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static void osDestroyOsDescriptorFromSgt(PMEMORY_DESCRIPTOR); + +static NV_STATUS osCheckGpuBarsOverlapAddrRange(NvRangeU64 addrRange); + +NV_STATUS +osCreateMemFromOsDescriptor +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + NvU32 descriptorType, + RS_PRIV_LEVEL privilegeLevel +) +{ + RmClient* pClient; + NV_STATUS rmStatus; + void *pPrivate; + + if ((pDescriptor == NvP64_NULL) || + (*pLimit == 0) || + (serverutilGetClientUnderLock(hClient, &pClient) != NV_OK)) + { + return NV_ERR_INVALID_PARAM_STRUCT; + } + + // + // For the sake of simplicity, unmatched RM and OS page + // sizes are not currently supported in this path, except for + // PPC64LE and aarch64. + // + // Also, the nvmap handle is sent which can be any random number so + // the virtual address alignment sanity check can't be done here. + // + if (!NVCPU_IS_PPC64LE && + !NVCPU_IS_AARCH64 && + (NV_RM_PAGE_SIZE != os_page_size)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // The two checks below use cached privilege because they + // concern the privilege level of the client, and not the + // privilege level of the calling context which may be + // overridden to KERNEL at some internal callsites. + // + + // + // The RM cannot obtain a table of physical addresses + // for a kernel virtual address range on all of + // the supported UNIX platforms. Since this path is + // not really compelling for kernel allocations on any + // of those platforms, it is not supported. + // For UVM, they could have pre-allocated sysmem to register + // with RM so we put in an exception for that case. + // + if ((rmclientGetCachedPrivilege(pClient) >= RS_PRIV_LEVEL_KERNEL) && + (descriptorType != NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR) && + (descriptorType != NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE) && + (descriptorType != NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR) && + (descriptorType != NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR and + // NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR can only be utilized by kernel space + // rm-clients. + // + if ((rmclientGetCachedPrivilege(pClient) < RS_PRIV_LEVEL_KERNEL) && + ((descriptorType == NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR) || + (descriptorType == NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR))) + { + return NV_ERR_NOT_SUPPORTED; + } + + switch (descriptorType) + { + case NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS: + rmStatus = NV_ERR_NOT_SUPPORTED; + break; + case NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR: + if (privilegeLevel < RS_PRIV_LEVEL_KERNEL) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + break; + } + rmStatus = osCreateOsDescriptorFromPhysAddr(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY: + rmStatus = osCreateOsDescriptorFromIoMemory(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY: + rmStatus = osCreateOsDescriptorFromPageArray(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE: + rmStatus = osCreateOsDescriptorFromFileHandle(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR: + rmStatus = osCreateOsDescriptorFromDmaBufPtr(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR: + rmStatus = osCreateOsDescriptorFromSgtPtr(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + default: + rmStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + + return rmStatus; +} + +static NV_STATUS +osCreateMemdescFromPages +( + OBJGPU *pGpu, + NvU64 size, + NvU32 flags, + NvU32 cacheType, + MEMORY_DESCRIPTOR **ppMemDesc, + void *pImportPriv, + void **ppPrivate +) +{ + NV_STATUS rmStatus; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 memdescFlags = MEMDESC_FLAGS_NONE; + NvU32 gpuCachedFlags; + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, flags)) + { + memdescFlags |= MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO; + } + + rmStatus = memdescCreate(ppMemDesc, pGpu, size, 0, + NV_MEMORY_NONCONTIGUOUS, ADDR_SYSMEM, + cacheType, memdescFlags); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, flags)) + gpuCachedFlags = NV_MEMORY_CACHED; + else + gpuCachedFlags = NV_MEMORY_UNCACHED; + + pMemDesc = *ppMemDesc; + rmStatus = nv_register_user_pages(NV_GET_NV_STATE(pGpu), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetPteArray(pMemDesc, AT_CPU), pImportPriv, + ppPrivate); + if (rmStatus != NV_OK) + { + memdescDestroy(pMemDesc); + return rmStatus; + } + + memdescSetGpuCacheAttrib(pMemDesc, gpuCachedFlags); + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_FALSE); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM, NV_TRUE); + + // + // If the OS layer doesn't think in RM page size, we need to inflate the + // PTE array into RM pages. + // + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmInflateOsToRmPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + // + // memdescMapIommu() requires the OS-private data to be set on the memory + // descriptor, but we don't want to wire up the teardown callback just yet: + // that callback needs to unpin the pages, but that will already be done + // as part of failure handling further up the stack if memdescMapIommu() + // fails. So we only set up the priv-data cleanup callback once we're sure + // this call will succeed. + // + memdescSetMemData(pMemDesc, *ppPrivate, NULL); + + rmStatus = memdescMapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + if (rmStatus != NV_OK) + { + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + nv_unregister_user_pages(NV_GET_NV_STATE(pGpu), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + NULL /* import_priv */, ppPrivate); + memdescDestroy(pMemDesc); + return rmStatus; + } + + return NV_OK; +} + +static NV_STATUS +osCreateOsDescriptorFromPageArray +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus; + + *ppPrivate = NvP64_VALUE(pDescriptor); + + // + // Since the only type of memory permitted in this path + // is anonymous user memory, certain restrictions + // apply for the allocation flags: + // + // 1) anonymous memory is write-back cacheable, hence + // the _COHERENCY flag must match. + // + // 2) the RM has no control over the location of the + // associated pages in memory and thus cannot + // honor requests for contiguous memory. + // + // These restrictions are enforced here to avoid subtle + // bugs later on. + // + if ((!FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _CACHED, flags) && + !FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, flags)) || + FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, flags)) + { + return NV_ERR_INVALID_FLAGS; + } + + rmStatus = osCreateMemdescFromPages(pGpu, (*pLimit + 1), flags, + NV_MEMORY_CACHED, ppMemDesc, + NULL /* pImportPriv */, ppPrivate); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + // All is well - wire up the cleanup callback now + memdescSetMemData(*ppMemDesc, memdescGetMemData(*ppMemDesc), + osDestroyOsDescriptorPageArray); + + return NV_OK; +} + +/*! + * @brief Checks if the given address range overlaps with the BARs for any of + * the GPUs. + */ +static NV_STATUS +osCheckGpuBarsOverlapAddrRange +( + NvRangeU64 addrRange +) +{ + NvRangeU64 gpuPhysAddrRange; + NvRangeU64 gpuPhysFbAddrRange; + NvRangeU64 gpuPhysInstAddrRange; + NvU32 gpuInstance; + OBJGPU *pGpu; + NvU32 gpuMask; + NV_STATUS rmStatus; + + rmStatus = gpumgrGetGpuAttachInfo(NULL, &gpuMask); + NV_ASSERT_OR_RETURN(rmStatus == NV_OK, rmStatus); + + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + NV_INIT_RANGE(gpuPhysFbAddrRange, gpumgrGetGpuPhysFbAddr(pGpu), + gpumgrGetGpuPhysFbAddr(pGpu) + pGpu->fbLength -1); + + NV_INIT_RANGE(gpuPhysAddrRange, pGpu->busInfo.gpuPhysAddr, + pGpu->busInfo.gpuPhysAddr + pGpu->deviceMappings[0].gpuNvLength -1); + + NV_INIT_RANGE(gpuPhysInstAddrRange, pGpu->busInfo.gpuPhysInstAddr, + pGpu->busInfo.gpuPhysInstAddr + pGpu->instLength -1); + + if (NV_IS_OVERLAPPING_RANGE(gpuPhysFbAddrRange, addrRange) || + NV_IS_OVERLAPPING_RANGE(gpuPhysAddrRange, addrRange) || + NV_IS_OVERLAPPING_RANGE(gpuPhysInstAddrRange, addrRange)) + { + return NV_ERR_INVALID_ADDRESS; + } + } + + return NV_OK; +} + +static NV_STATUS +osCreateOsDescriptorFromIoMemory +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus; + NvU32 gpuCachedFlags; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 *pPteArray; + NvRangeU64 physAddrRange; + NvU64 *base = 0; + NvBool bAllowMmap; + + // + // Unlike the page array path, this one deals exclusively + // with I/O memory, which is expected to be contiguous + // physically, and which may only be accessed with uncached + // transactions. + // + if (!FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, flags) || + !FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, flags)) + { + return NV_ERR_INVALID_FLAGS; + } + + // + // _PEER_MAP_OVERRIDE flag is controlled by the RM and not the client. + // + // RM will set the _PEER_MAP_OVERRIDE_REQUIRED flag itself for IO memory + // memory imported with RmVidHeapControl. + // + if (FLD_TEST_DRF(OS02, _FLAGS, _PEER_MAP_OVERRIDE, _REQUIRED, flags)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + bAllowMmap = !FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NEVER_MAP, flags); + + base = (void *)(NvUPtr)pDescriptor; + + // + // There is an architectural deadlock scenario involved when full-duplex P2P + // enabled over BAR1. See #3 in the description of bug 1571948 which explains + // the classic deadlock. So, make sure to error out usermode's memory + // registration if a memory range falls within any of the available GPU's + // BAR window. + // + physAddrRange.min = *base; + physAddrRange.max = *base + *pLimit; + + rmStatus = osCheckGpuBarsOverlapAddrRange(physAddrRange); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): phys range 0x%016llx-0x%016llx overlaps with GPU BARs", + __FUNCTION__, physAddrRange.min, physAddrRange.max); + return rmStatus; + } + + rmStatus = memdescCreate(ppMemDesc, pGpu, (*pLimit + 1), 0, + NV_MEMORY_CONTIGUOUS, ADDR_SYSMEM, + NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): error %d while attempting to create the MMIO mapping\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + pMemDesc = *ppMemDesc; + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, flags)) + gpuCachedFlags = NV_MEMORY_CACHED; + else + gpuCachedFlags = NV_MEMORY_UNCACHED; + + memdescSetGpuCacheAttrib(pMemDesc, gpuCachedFlags); + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetMemData(pMemDesc, NULL, NULL); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_FALSE); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM, NV_TRUE); + + pPteArray = memdescGetPteArray(pMemDesc, AT_CPU); + pPteArray[0] = *base; + + *ppPrivate = NULL; + + if (bAllowMmap) + { + rmStatus = nv_register_peer_io_mem(NV_GET_NV_STATE(pGpu), pPteArray, + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + ppPrivate); + if (rmStatus != NV_OK) + { + memdescDestroy(pMemDesc); + return rmStatus; + } + } + + memdescSetMemData(pMemDesc, *ppPrivate, NULL); + + // + // memdescMapIommu() requires the OS-private data to be set on the memory + // descriptor, but we don't want to wire up the teardown callback just yet: + // that callback needs to unpin the pages, but that will already be done + // as part of failure handling further up the stack if memdescMapIommu() + // fails. So we only set up the priv-data cleanup callback once we're sure + // this call will succeed. + // + rmStatus = memdescMapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + if (rmStatus != NV_OK) + { + if (*ppPrivate != NULL) + { + nv_unregister_peer_io_mem(NV_GET_NV_STATE(pGpu), *ppPrivate); + } + memdescDestroy(pMemDesc); + return rmStatus; + } + + // All is well - wire up the cleanup callback now + memdescSetMemData(pMemDesc, memdescGetMemData(pMemDesc), + osDestroyOsDescriptorFromIoMemory); + + return NV_OK; +} + +static NV_STATUS +osCreateOsDescriptorFromPhysAddr +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 *pPteArray; + NvU64 base = 0; + NvU32 cache_type = NV_MEMORY_CACHED; + NvU64 memdescFlags = MEMDESC_FLAGS_NONE; + + // Currently only work with contiguous sysmem allocations + if (!FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, flags)) + { + return NV_ERR_INVALID_FLAGS; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_TYPE_SYNCPOINT, _APERTURE, flags)) + { + // Syncpoint memory is uncached, DMA mapping needs to skip CPU sync. + cache_type = NV_MEMORY_UNCACHED; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, flags)) + { + memdescFlags |= MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO; + } + + base = (NvU64)pDescriptor; + rmStatus = memdescCreate(ppMemDesc, pGpu, (*pLimit + 1), 0, + NV_MEMORY_CONTIGUOUS, ADDR_SYSMEM, + cache_type, memdescFlags); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): error %d while creating memdesc for kernel memory\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + pMemDesc = *ppMemDesc; + + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetMemData(pMemDesc, NULL, NULL); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM, NV_TRUE); + + pPteArray = memdescGetPteArray(pMemDesc, AT_CPU); + pPteArray[0] = base; + + *ppPrivate = NULL; + rmStatus = nv_register_phys_pages(NV_GET_NV_STATE(pGpu), pPteArray, + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetCpuCacheAttrib(pMemDesc), + ppPrivate); + if (rmStatus != NV_OK) + { + memdescDestroy(pMemDesc); + return rmStatus; + } + + memdescSetMemData(pMemDesc, *ppPrivate, + osDestroyOsDescriptorFromPhysAddr); + + return NV_OK; +} + +static NV_STATUS +_createMemdescFromDmaBufSgtHelper +( + OBJGPU *pGpu, + NvU32 flags, + void *pImportPriv, + struct sg_table *pImportSgt, + NvU32 size, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate, + MEM_DATA_RELEASE_CALL_BACK *pMemDataReleaseCallback +) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 cacheType = NV_MEMORY_UNCACHED; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 memdescFlags = MEMDESC_FLAGS_NONE; + NvU32 gpuCachedFlags; + + NV_ASSERT((pMemDataReleaseCallback == osDestroyOsDescriptorFromDmaBuf) || + (pMemDataReleaseCallback == osDestroyOsDescriptorFromSgt)); + + if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_COMBINE, flags)) + { + cacheType = NV_MEMORY_WRITECOMBINED; + } + else if (!FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, flags)) + { + cacheType = NV_MEMORY_CACHED; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, flags)) + { + memdescFlags |= MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO; + } + + rmStatus = memdescCreate(ppMemDesc, pGpu, size, 0, + NV_MEMORY_NONCONTIGUOUS, ADDR_SYSMEM, + cacheType, memdescFlags); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, flags)) + { + gpuCachedFlags = NV_MEMORY_CACHED; + } + else + { + gpuCachedFlags = NV_MEMORY_UNCACHED; + } + + pMemDesc = *ppMemDesc; + + memdescSetGpuCacheAttrib(pMemDesc, gpuCachedFlags); + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetMemData(pMemDesc, NULL, NULL); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_FALSE); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM, NV_TRUE); + + *ppPrivate = NULL; + rmStatus = nv_register_sgt(NV_GET_NV_STATE(pGpu), + memdescGetPteArray(pMemDesc, AT_CPU), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetCpuCacheAttrib(pMemDesc), + ppPrivate, + pImportSgt, + pImportPriv); + if (rmStatus != NV_OK) + { + memdescDestroy(pMemDesc); + return rmStatus; + } + + // + // If the OS layer doesn't think in RM page size, we need to inflate the + // PTE array into RM pages. + // + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmInflateOsToRmPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + memdescSetMemData(*ppMemDesc, *ppPrivate, NULL); + + // + // memdescMapIommu() requires the OS-private data to be set on the memory + // descriptor, but we don't want to wire up the teardown callback just yet: + // that callback does teardown that will already be done as part of failure + // handling further up the stack if memdescMapIommu() fails. So we only + // setup the priv-data cleanup callback once we're sure this call will + // succeed. + // + rmStatus = memdescMapIommu(*ppMemDesc, pGpu->busInfo.iovaspaceId); + if (rmStatus != NV_OK) + { + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + if (*ppPrivate != NULL) + { + nv_unregister_sgt(NV_GET_NV_STATE(pGpu), &pImportSgt, + (void **) &pImportPriv, *ppPrivate); + } + memdescDestroy(pMemDesc); + return rmStatus; + } + + // All is well - wire up the cleanup callback now + memdescSetMemData(*ppMemDesc, *ppPrivate, pMemDataReleaseCallback); + + return rmStatus; +} + +static NV_STATUS +_createMemdescFromDmaBuf +( + OBJGPU *pGpu, + NvU32 flags, + nv_dma_buf_t *pImportPriv, + struct sg_table *pImportSgt, + NvU32 size, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = + _createMemdescFromDmaBufSgtHelper(pGpu, flags, pImportPriv, pImportSgt, + size, ppMemDesc, ppPrivate, + osDestroyOsDescriptorFromDmaBuf); + if (rmStatus != NV_OK) + { + nv_dma_release_dma_buf(pImportPriv); + } + + return rmStatus; +} + +static NV_STATUS +_createMemdescFromSgt +( + OBJGPU *pGpu, + NvU32 flags, + struct drm_gem_object *pImportPrivGem, + struct sg_table *pImportSgt, + NvU32 size, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = + _createMemdescFromDmaBufSgtHelper(pGpu, flags, pImportPrivGem, + pImportSgt, size, ppMemDesc, + ppPrivate, + osDestroyOsDescriptorFromSgt); + if (rmStatus != NV_OK) + { + nv_dma_release_sgt(pImportSgt, pImportPrivGem); + } + + return rmStatus; +} + +static nv_dma_device_t *GetDmaDeviceForImport +( + nv_state_t *nv, + NvU32 flags +) +{ + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, flags) && + (nv->niso_dma_dev != NULL)) + { + return nv->niso_dma_dev; + } + else + { + return nv->dma_dev; + } +} + +static NV_STATUS +osCreateOsDescriptorFromFileHandle +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = NV_OK; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_dma_device_t *dma_dev = NULL; + NvU32 size = 0; + nv_dma_buf_t *pImportPriv = NULL; + struct sg_table *pImportSgt = NULL; + NvS32 fd; + + fd = (NvS32)((NvU64)pDescriptor); + if ((NvU64)fd != (NvU64)pDescriptor) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): fd must fit within a signed 32-bit integer!\n", + __FUNCTION__); + return NV_ERR_INVALID_ARGUMENT; + } + + dma_dev = GetDmaDeviceForImport(nv, flags); + rmStatus = nv_dma_import_from_fd(dma_dev, fd, &size, + &pImportSgt, &pImportPriv); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): Error (%d) while trying to import fd!\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + return _createMemdescFromDmaBuf(pGpu, flags, pImportPriv, + pImportSgt, + size, ppMemDesc, ppPrivate); +} + +static NV_STATUS +osCreateOsDescriptorFromSgtPtr +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = NV_OK; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS *params = + (NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS*)((NvUPtr) pDescriptor); + + struct sg_table *sgt = params->sgt; + struct drm_gem_object *gem = params->gem; + + rmStatus = nv_dma_import_sgt(nv->dma_dev, sgt, gem); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): Error (%d) while trying to import sgt!\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + return _createMemdescFromSgt(pGpu, flags, gem, sgt, + (*pLimit + 1), ppMemDesc, ppPrivate); +} + +static NV_STATUS +osCreateOsDescriptorFromDmaBufPtr +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = NV_OK; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_dma_device_t *dma_dev = NULL; + NvU32 size = 0; + nv_dma_buf_t *pImportPriv = NULL; + struct sg_table *pImportSgt = NULL; + void *dmaBuf = (void*)((NvUPtr)pDescriptor); + + dma_dev = GetDmaDeviceForImport(nv, flags); + rmStatus = nv_dma_import_dma_buf(dma_dev, dmaBuf, &size, + &pImportSgt, &pImportPriv); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): Error (%d) while trying to import dma_buf!\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + return _createMemdescFromDmaBuf(pGpu, flags, pImportPriv, + pImportSgt, + size, ppMemDesc, ppPrivate); +} + +static void +osDestroyOsDescriptorFromPhysAddr +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + void *pPrivate; + + pPrivate = memdescGetMemData(pMemDesc); + NV_ASSERT(pPrivate != NULL); + + nv_unregister_phys_pages(NV_GET_NV_STATE(pGpu), pPrivate); +} + +static void +osDestroyOsDescriptorFromIoMemory +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + void *pPrivate = memdescGetMemData(pMemDesc); + + if (pPrivate == NULL) + { + return; + } + + nv_unregister_peer_io_mem(NV_GET_NV_STATE(pGpu), pPrivate); +} + +static void +osDestroyOsDescriptorPageArray +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + NvU64 osPageCount = NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount); + NV_STATUS status; + void *pPrivate; + + pPrivate = memdescGetMemData(pMemDesc); + + NV_ASSERT(pPrivate != NULL); + + // + // TODO: Bug 1811006: Notably skip any IOMMU mapping management as the + // pMemDesc->pGpu might have been torn down already and the pGpu passed in + // doesn't necessarily have IOMMU mappings. For now just allow memdescDestroy() + // to clean up whatever is there (this may not work correctly either if any + // of the IOMMU mappings have outlasted their VASPACEs). This should + // be cleaned up once the fix for bug 1811006 is known. + // + + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + nv_unregister_user_pages(NV_GET_NV_STATE(pGpu), osPageCount, + NULL /* import_priv */, &pPrivate); + + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_FOREIGN_PAGE) == NV_FALSE) + { + status = os_unlock_user_pages(osPageCount, pPrivate); + NV_ASSERT(status == NV_OK); + } + else + { + os_free_mem(pPrivate); + } +} + +static void +osDestroyOsDescriptorFromDmaBuf +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + void *pPrivate = memdescGetMemData(pMemDesc); + + struct sg_table *pImportSgt; + void *pImportPriv; + + /* + * Unmap IOMMU now or we will get a kernel crash when it is unmapped after + * pImportSgt is freed. + */ + memdescUnmapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + nv_unregister_sgt(NV_GET_NV_STATE(pGpu), &pImportSgt, + &pImportPriv, pPrivate); + + /* + * pImportSgt doesn't need to be passed to nv_dma_release_dma_buf() because + * the DMA-BUF associated with pImportPriv already has a reference to the + * SGT. + */ + + nv_dma_release_dma_buf(pImportPriv); +} + +static void +osDestroyOsDescriptorFromSgt +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + void *pPrivate = memdescGetMemData(pMemDesc); + + struct sg_table *pImportSgt; + struct drm_gem_object *pImportPrivGem; + + NV_ASSERT(pPrivate != NULL); + + /* + * Unmap IOMMU now or we will get a kernel crash when it is unmapped after + * pImportSgt is freed. + */ + memdescUnmapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + nv_unregister_sgt(NV_GET_NV_STATE(pGpu), &pImportSgt, + (void **) &pImportPrivGem, pPrivate); + + nv_dma_release_sgt(pImportSgt, pImportPrivGem); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osunix.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osunix.c new file mode 100644 index 0000000..7d70602 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osunix.c @@ -0,0 +1,88 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** HW State Routines ***************************\ +* * +* Fills in os specific function pointers for the Unix OS object. * +* * +\***************************************************************************/ + +#include +#include + +static void initOSSpecificFunctionPointers(OBJOS *); +static void initMiscOSFunctionPointers(OBJOS *); +static void initUnixOSFunctionPointers(OBJOS *); +static void initOSSpecificProperties(OBJOS *); + +void +osInitObjOS(OBJOS *pOS) +{ + initOSSpecificFunctionPointers(pOS); + initOSSpecificProperties(pOS); +} + +static void +initOSSpecificFunctionPointers(OBJOS *pOS) +{ + initMiscOSFunctionPointers(pOS); + initUnixOSFunctionPointers(pOS); +} + +static void +initMiscOSFunctionPointers(OBJOS *pOS) +{ + pOS->osQueueWorkItem = osQueueWorkItem; + pOS->osQueueWorkItemWithFlags = osQueueWorkItemWithFlags; + pOS->osQueueSystemWorkItem = osQueueSystemWorkItem; +} + +static void +initUnixOSFunctionPointers(OBJOS *pOS) +{ +#if defined(NVCPU_X86_64) + pOS->osNv_rdcr4 = nv_rdcr4; + pOS->osNv_cpuid = nv_cpuid; +#endif + + pOS->osCallACPI_DSM = osCallACPI_DSM; + pOS->osCallACPI_DDC = osCallACPI_DDC; + pOS->osCallACPI_NVHG_ROM = osCallACPI_NVHG_ROM; + pOS->osCallACPI_DOD = osCallACPI_DOD; + pOS->osCallACPI_MXDM = osCallACPI_MXDM; + pOS->osCallACPI_MXDS = osCallACPI_MXDS; + + pOS->osDbgBreakpointEnabled = osDbgBreakpointEnabled; +} + +static void +initOSSpecificProperties +( + OBJOS *pOS +) +{ + pOS->setProperty(pOS, PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT, NV_TRUE); + pOS->setProperty(pOS, PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE, NV_TRUE); + pOS->setProperty(pOS, PDB_PROP_OS_LIMIT_GPU_RESET, NV_TRUE); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c new file mode 100644 index 0000000..36df48f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c @@ -0,0 +1,145 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include +#include + +static NV_STATUS +RmPowerManagementInternalTegra( + OBJGPU *pGpu, + nv_pm_action_t pmAction +) +{ + // + // Default to NV_OK. there may cases where resman is loaded, but + // no devices are allocated (we're still at the console). in these + // cases, it's fine to let the system do whatever it wants. + // + NV_STATUS rmStatus = NV_OK; + + if (pGpu) + { + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + switch (pmAction) + { + case NV_PM_ACTION_HIBERNATE: + nvp->pm_state.InHibernate = NV_TRUE; + case NV_PM_ACTION_STANDBY: + nvp->pm_state.InHibernate = NV_FALSE; + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH, NV_TRUE); + rmStatus = gpuStateUnload(pGpu, + IS_GPU_GC6_STATE_ENTERING(pGpu) ? + GPU_STATE_FLAGS_PRESERVING | GPU_STATE_FLAGS_PM_TRANSITION | GPU_STATE_FLAGS_GC6_TRANSITION : + GPU_STATE_FLAGS_PRESERVING | GPU_STATE_FLAGS_PM_TRANSITION); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_STANDBY, NV_TRUE); + break; + + case NV_PM_ACTION_RESUME: + if (!nvp->pm_state.InHibernate) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH, NV_TRUE); + rmStatus = gpuStateLoad(pGpu, + IS_GPU_GC6_STATE_ENTERING(pGpu) ? + GPU_STATE_FLAGS_PRESERVING | GPU_STATE_FLAGS_PM_TRANSITION | GPU_STATE_FLAGS_GC6_TRANSITION : + GPU_STATE_FLAGS_PRESERVING | GPU_STATE_FLAGS_PM_TRANSITION); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_STANDBY, NV_FALSE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH, NV_FALSE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH, NV_FALSE); + } + break; + + default: + rmStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + + } + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_power_management( + nvidia_stack_t *sp, + nv_state_t *pNv, + nv_pm_action_t pmAction +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus = NV_OK; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_ASSERT_OK(os_flush_work_queue(pNv->queue)); + + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DYN_POWER)) == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + if (pGpu != NULL) + { + // LOCK: acquire GPUs lock + if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DYN_POWER)) == NV_OK) + { + rmStatus = RmPowerManagementInternalTegra(pGpu, pmAction); + + // + // RmPowerManagementInternalTegra() is most likely to fail due to + // gpuStateUnload() failures deep in the RM's GPU power + // management paths. However, those paths make no + // attempt to unwind in case of errors. Rather, they + // soldier on and simply report an error at the very end. + // GPU software state meanwhile will indicate the GPU + // has been suspended. + // + // Sadly, in case of an error during suspend/hibernate, + // the only path forward here is to attempt to resume the + // GPU, accepting that the odds of success will vary. + // + if (rmStatus != NV_OK && pmAction != NV_PM_ACTION_RESUME) + { + RmPowerManagementInternalTegra(pGpu, NV_PM_ACTION_RESUME); + } + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + } + // UNLOCK: release API lock + rmApiLockRelease(); + } + + NV_ASSERT_OK(os_flush_work_queue(pNv->queue)); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/registry.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/registry.c new file mode 100644 index 0000000..32462da --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/registry.c @@ -0,0 +1,524 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#if defined(DEBUG_REGISTRY) +#define DBG_REG_PRINTF(a, ...) \ + NV_PRINTF(LEVEL_INFO, a, ##__VA_ARGS__) +#else +#define DBG_REG_PRINTF(a, ...) +#endif + +static NvS32 stringCaseCompare( + const char *string1, + const char *string2 +) +{ + NvU8 c1, c2; + + do + { + c1 = *string1, c2 = *string2; + if (c1 >= 'A' && c1 <= 'Z') + c1 += ('a' - 'A'); + if (c2 >= 'A' && c2 <= 'Z') + c2 += ('a' - 'A'); + string1++, string2++; + } + while ((c1 == c2) && (c1 != '\0')); + + return (c1 - c2); +} + +static nv_reg_entry_t *the_registry = NULL; + +static nv_reg_entry_t* regCreateNewRegistryKey( + nv_state_t *nv, + const char *regParmStr +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + nv_reg_entry_t *new_reg = NULL; + char *new_ParmStr = NULL; + NvU32 parm_size; + + if (regParmStr == NULL) + { + DBG_BREAKPOINT(); + return NULL; + } + + new_reg = portMemAllocNonPaged(sizeof(nv_reg_entry_t)); + if (NULL == new_reg) + { + NV_PRINTF(LEVEL_ERROR, "failed to grow registry\n"); + return NULL; + } + + portMemSet(new_reg, 0, sizeof(nv_reg_entry_t)); + + if (regParmStr != NULL) + { + parm_size = (portStringLength(regParmStr) + 1); + new_ParmStr = portMemAllocNonPaged(parm_size); + if (NULL == new_ParmStr) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate registry param string\n"); + portMemFree(new_reg); + return NULL; + } + + NV_ASSERT(parm_size <= NVOS38_MAX_REGISTRY_STRING_LENGTH); + + if (portMemCopy(new_ParmStr, parm_size, regParmStr, parm_size) == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to copy registry param string\n"); + portMemFree(new_ParmStr); + portMemFree(new_reg); + return NULL; + } + } + + new_reg->regParmStr = new_ParmStr; + new_reg->type = NV_REGISTRY_ENTRY_TYPE_UNKNOWN; + + if (nvp != NULL) + { + new_reg->next = nvp->pRegistry; + nvp->pRegistry = new_reg; + DBG_REG_PRINTF("local registry now at 0x%p\n", nvp->pRegistry); + } + else + { + new_reg->next = the_registry; + the_registry = new_reg; + DBG_REG_PRINTF("global registry now at 0x%p\n", the_registry); + } + + return new_reg; +} + +static NV_STATUS regFreeEntry(nv_reg_entry_t *tmp) +{ + portMemFree(tmp->regParmStr); + tmp->regParmStr = NULL; + { + portMemFree(tmp->pdata); + tmp->pdata = NULL; + tmp->len = 0; + } + portMemFree(tmp); + + return NV_OK; +} + +static nv_reg_entry_t* regFindRegistryEntry( + nv_state_t *nv, + const char *regParmStr, + NvU32 type, + NvBool *bGlobalEntry +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + nv_reg_entry_t *tmp; + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + if (nvp != NULL) + { + tmp = nvp->pRegistry; + DBG_REG_PRINTF(" local registry at 0x%p\n", tmp); + + while ((tmp != NULL) && (tmp->regParmStr != NULL)) + { + DBG_REG_PRINTF(" Testing against %s\n", + tmp->regParmStr); + if ((stringCaseCompare(tmp->regParmStr, regParmStr) == 0) && + (type == tmp->type)) + { + DBG_REG_PRINTF(" found a match!\n"); + if (bGlobalEntry) + *bGlobalEntry = NV_FALSE; + return tmp; + } + tmp = tmp->next; + } + } + + tmp = the_registry; + DBG_REG_PRINTF(" global registry at 0x%p\n", tmp); + + while ((tmp != NULL) && (tmp->regParmStr != NULL)) + { + DBG_REG_PRINTF(" Testing against %s\n", + tmp->regParmStr); + if ((stringCaseCompare(tmp->regParmStr, regParmStr) == 0) && + (type == tmp->type)) + { + DBG_REG_PRINTF(" found a match!\n"); + if (bGlobalEntry) + *bGlobalEntry = NV_TRUE; + return tmp; + } + tmp = tmp->next; + } + + DBG_REG_PRINTF(" no match\n"); + return NULL; +} + +NV_STATUS RmWriteRegistryDword( + nv_state_t *nv, + const char *regParmStr, + NvU32 Data +) +{ + nv_reg_entry_t *tmp; + NvBool bGlobalEntry; + + if (regParmStr == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s -> 0x%x\n", __FUNCTION__, regParmStr, Data); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_DWORD, &bGlobalEntry); + + // If we found an entry and we were looking for a global entry and + // found a global, or we were looking for a per-GPU entry and found a + // per-GPU entry + if (tmp != NULL && + ((nv == NULL && bGlobalEntry) || + (nv != NULL && !bGlobalEntry))) + { + tmp->data = Data; + + if (stringCaseCompare(regParmStr, "ResmanDebugLevel") == 0) + { + os_dbg_set_level(Data); + } + + return NV_OK; + } + + tmp = regCreateNewRegistryKey(nv, regParmStr); + if (tmp == NULL) + return NV_ERR_GENERIC; + + tmp->type = NV_REGISTRY_ENTRY_TYPE_DWORD; + tmp->data = Data; + + return NV_OK; +} + +NV_STATUS RmReadRegistryDword( + nv_state_t *nv, + const char *regParmStr, + NvU32 *Data +) +{ + nv_reg_entry_t *tmp; + + if ((regParmStr == NULL) || (Data == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_DWORD, NULL); + if (tmp == NULL) + { + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_BINARY, NULL); + if ((tmp != NULL) && (tmp->len >= sizeof(NvU32))) + { + *Data = *(NvU32 *)tmp->pdata; + } + else + { + DBG_REG_PRINTF(" not found\n"); + return NV_ERR_GENERIC; + } + } + else + { + *Data = tmp->data; + } + + DBG_REG_PRINTF(" found in the_registry: 0x%x\n", *Data); + + return NV_OK; +} + +NV_STATUS RmReadRegistryBinary( + nv_state_t *nv, + const char *regParmStr, + NvU8 *Data, + NvU32 *cbLen +) +{ + nv_reg_entry_t *tmp; + NV_STATUS status; + + if ((regParmStr == NULL) || (Data == NULL) || (cbLen == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_BINARY, NULL); + if (tmp == NULL) + { + DBG_REG_PRINTF(" not found\n"); + return NV_ERR_GENERIC; + } + + DBG_REG_PRINTF(" found\n"); + + if (*cbLen >= tmp->len) + { + portMemCopy((NvU8 *)Data, *cbLen, (NvU8 *)tmp->pdata, tmp->len); + *cbLen = tmp->len; + status = NV_OK; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "buffer (length: %u) is too small (data length: %u)\n", + *cbLen, tmp->len); + status = NV_ERR_GENERIC; + } + + return status; +} + +NV_STATUS RmWriteRegistryBinary( + nv_state_t *nv, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + nv_reg_entry_t *tmp; + NvBool bGlobalEntry; + + if ((regParmStr == NULL) || (Data == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_BINARY, &bGlobalEntry); + + // If we found an entry and we were looking for a global entry and + // found a global, or we were looking for a per-GPU entry and found a + // per-GPU entry + if (tmp != NULL && + ((nv == NULL && bGlobalEntry) || + (nv != NULL && !bGlobalEntry))) + { + if (tmp->pdata != NULL) + { + portMemFree(tmp->pdata); + tmp->pdata = NULL; + tmp->len = 0; + } + } + else + { + tmp = regCreateNewRegistryKey(nv, regParmStr); + if (tmp == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to create binary registry entry\n"); + return NV_ERR_GENERIC; + } + } + + tmp->pdata = portMemAllocNonPaged(cbLen); + if (NULL == tmp->pdata) + { + NV_PRINTF(LEVEL_ERROR, "failed to write binary registry entry\n"); + return NV_ERR_GENERIC; + } + + tmp->type = NV_REGISTRY_ENTRY_TYPE_BINARY; + tmp->len = cbLen; + portMemCopy((NvU8 *)tmp->pdata, tmp->len, (NvU8 *)Data, cbLen); + + return NV_OK; +} + +NV_STATUS RmWriteRegistryString( + nv_state_t *nv, + const char *regParmStr, + const char *buffer, + NvU32 bufferLength +) +{ + nv_reg_entry_t *tmp; + NvBool bGlobalEntry; + + if ((regParmStr == NULL) || (buffer == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_STRING, &bGlobalEntry); + + // If we found an entry and we were looking for a global entry and + // found a global, or we were looking for a per-GPU entry and found a + // per-GPU entry + if (tmp != NULL && + ((nv == NULL && bGlobalEntry) || + (nv != NULL && !bGlobalEntry))) + { + if (tmp->pdata != NULL) + { + portMemFree(tmp->pdata); + tmp->len = 0; + tmp->pdata = NULL; + } + } + else + { + tmp = regCreateNewRegistryKey(nv, regParmStr); + if (tmp == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "failed to allocate a string registry entry!\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + + tmp->pdata = portMemAllocNonPaged(bufferLength); + if (tmp->pdata == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to write a string registry entry!\n"); + return NV_ERR_NO_MEMORY; + } + + tmp->type = NV_REGISTRY_ENTRY_TYPE_STRING; + tmp->len = bufferLength; + portMemCopy((void *)tmp->pdata, tmp->len, buffer, (bufferLength - 1)); + tmp->pdata[bufferLength-1] = '\0'; + + return NV_OK; +} + +NV_STATUS RmReadRegistryString( + nv_state_t *nv, + const char *regParmStr, + NvU8 *buffer, + NvU32 *pBufferLength +) +{ + NvU32 bufferLength; + nv_reg_entry_t *tmp; + + if ((regParmStr == NULL) || (buffer == NULL) || (pBufferLength == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + bufferLength = *pBufferLength; + *pBufferLength = 0; + *buffer = '\0'; + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_STRING, NULL); + if (tmp == NULL) + { + return NV_ERR_GENERIC; + } + + if (bufferLength >= tmp->len) + { + portMemCopy((void *)buffer, bufferLength, (void *)tmp->pdata, tmp->len); + *pBufferLength = tmp->len; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "buffer (length: %u) is too small (data length: %u)\n", + bufferLength, tmp->len); + return NV_ERR_BUFFER_TOO_SMALL; + } + + return NV_OK; +} + +NV_STATUS RmInitRegistry(void) +{ + NV_STATUS rmStatus; + + rmStatus = os_registry_init(); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to initialize the OS registry!\n"); + } + + return rmStatus; +} + +NV_STATUS RmDestroyRegistry(nv_state_t *nv) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + nv_reg_entry_t *tmp; + + if (nvp != NULL) + { + tmp = nvp->pRegistry; + nvp->pRegistry = NULL; + } + else + { + tmp = the_registry; + the_registry = NULL; + } + + while (tmp != NULL) + { + nv_reg_entry_t *entry = tmp; + tmp = tmp->next; + regFreeEntry(entry); + } + + return NV_OK; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c new file mode 100644 index 0000000..75691ea --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c @@ -0,0 +1,628 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * + * @brief Provides RmExportObject, RmImportObject, RmFreeObjExportHandle and + * RmGetExportObjectInfo interfaces : + * + * These interfaces allow rm clients to export their objects into + * a unique RmObjExportHandle which another rm client could + * import, even if the source rm client gets destroyed. + * + * RM's device instance may get destroyed asynchronously, in which + * case exported objects residing on that device instance also get + * destroyed. This means it is not possible to import it back, but the + * RmObjExportHandle into which the object had been exported still + * remains valid but no other object could get it. + * + * There are not init/fini routines, it is the responsibility of the + * rest of RM's eco-system to make sure that all RmObjExportHandles get + * freed during driver unload. + * + * The api lock is expected to be held before calling into + * rmobjexportimport.c; do not hold gpu or any other lock. + */ + +#include "rmobjexportimport.h" +#include "nvlimits.h" +#include "gpu/device/device.h" + +#include "containers/map.h" +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" + +#include "class/cl0080.h" +#include "class/cl2080.h" +#include +#include + +// +// A reference to an RmObjExportHandle +// generated by function RmGenerateObjExportHandle(). +// +typedef struct +{ + NvU32 deviceInstance; +} RmObjExportHandleRef; +MAKE_MAP(RmObjExportHandleMap, RmObjExportHandleRef); + +// +// Memory allocator +// +PORT_MEM_ALLOCATOR *pMemAllocator; + +// +// Map RmObjExportHandle -> RmObjExportHandleRef +// +RmObjExportHandleMap objExportHandleMap; + +// +// Rm client to use to dup an object exported to RmObjExportHandle. The minimal +// requirement for duping is to have a device object allocated. This rm client +// is simply like any other external rm client and has no any special handling. +// +// We keep this rm client just like any other external rm client: if +// gpu(s)/device gets powered-down/uninitialized, rm objects allocated by +// external rm clients and located on that gpu(s)/device gets freed (the +// os-layer does that). In that way, code in this file doesn't need to worry +// about freeing exported objects located on that gpu(s)/device. +// +NvHandle hObjExportRmClient; + +// +// Tracker for device and subdevice handles. For now only one subdevice +// (instance 0) is supported per device. +// +typedef struct +{ + NvHandle hRmDevice; + NvHandle hRmSubDevice; +} RmObjExportDevice; + +RmObjExportDevice objExportDevice[NV_MAX_DEVICES]; + +// +// Usage reference counter for static object in this file like rm client used to +// dup an exported object, memory allocator, map etc. +// +NvU64 objExportImportRefCount; + +// +// Static functions for internal use to code in this file. +// +static NV_STATUS RmRefObjExportImport (void); +static void RmUnrefObjExportImport (void); + +static RmObjExportHandle RmGenerateObjExportHandle (NvU32 deviceInstance); +static NV_STATUS RmUnrefObjExportHandle (RmObjExportHandle hObject); + +// +// Free the RmObjExportHandle. +// +static NV_STATUS RmUnrefObjExportHandle(RmObjExportHandle hObject) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + RmObjExportHandleRef *pHandleRef = + mapFind(&objExportHandleMap, hObject); + + if (pHandleRef == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (pRmApi->Free(pRmApi, + hObjExportRmClient, + (NvHandle)mapKey(&objExportHandleMap, pHandleRef)) != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Exported object trying to free was zombie in %s\n", + __FUNCTION__); + } + + mapRemove(&objExportHandleMap, pHandleRef); + + return NV_OK; +} + +// +// Generate unique RmObjExportHandle. +// +static RmObjExportHandle RmGenerateObjExportHandle(NvU32 deviceInstance) +{ + // + // The object export handle belongs to range of 0 to + // (MAX_OBJ_EXPORT_HANDLES - 1). + // + // Handle 0 is considered as invalid object handle, this function generates + // handle from range of 1 to (MAX_OBJ_EXPORT_HANDLES - 1). + // + #define MAX_OBJ_EXPORT_HANDLES 0x80000 + + static NvHandle hObjExportHandleNext = 1; + + RmObjExportHandle hStartHandle = hObjExportHandleNext; + RmObjExportHandle hObject = 0; + + do + { + RmObjExportHandleRef *pHandleRef; + + hObject = hObjExportHandleNext++; + /* Reset hObjExportHandleNext to next valid handle */ + if (hObjExportHandleNext == MAX_OBJ_EXPORT_HANDLES) { + hObjExportHandleNext = 1; + } + + pHandleRef = mapFind(&objExportHandleMap, hObject); + + if (hObject != hObjExportRmClient && pHandleRef == NULL) + { + break; + } + else + { + hObject = 0; + } + + } while(hObjExportHandleNext != hStartHandle); + + if (hObject != 0) + { + RmObjExportHandleRef *pHandleRef = + mapInsertNew(&objExportHandleMap, hObject); + + if (pHandleRef != NULL) + { + pHandleRef->deviceInstance = deviceInstance; + } + else + { + hObject = 0; + } + } + + return hObject; +} + +// +// Validate that the given hObject is not one of our internally used handles. +// +// Note that mapFind(&objExportHandleMap, hObject) could still fail; that is the +// caller's responsibility. +// +static NvBool RmValidateHandleAgainstInternalHandles(RmObjExportHandle hObject) +{ + NvU32 i; + + // + // No external RmObjExportHandle could be valid if hObjExportRmClient has + // not been allocated yet, or if it is equal to any of the handles used + // internally by code in this file. + // + if (objExportImportRefCount == 0 || hObjExportRmClient == 0 || + hObject == hObjExportRmClient) + { + return NV_FALSE; + } + + for (i = 0; i < NV_ARRAY_ELEMENTS(objExportDevice); i++) + { + if (objExportDevice[i].hRmDevice != 0 && + (hObject == objExportDevice[i].hRmDevice || + hObject == objExportDevice[i].hRmSubDevice)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +// +// Increment reference count of static objects internally +// used by code in this file. +// +static NV_STATUS RmRefObjExportImport(void) +{ + NV_STATUS rmStatus = NV_OK; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + if ((objExportImportRefCount++) != 0) + { + NV_ASSERT(hObjExportRmClient != 0); + NV_ASSERT(pMemAllocator != NULL); + return NV_OK; + } + + rmStatus = pRmApi->AllocWithHandle(pRmApi, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &hObjExportRmClient); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to alloc root in %s\n", __FUNCTION__); + goto failed; + } + + pMemAllocator = portMemAllocatorCreateNonPaged(); + + if (pMemAllocator == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to alloc memory allocator in %s\n", + __FUNCTION__); + goto failed; + } + + mapInit(&objExportHandleMap, pMemAllocator); + + return NV_OK; + +failed: + + RmUnrefObjExportImport(); + + return rmStatus; +} + +// +// Decrement reference count of static objects internally used by code in this +// file, and free them if reference count reaches to zero. +// +static void RmUnrefObjExportImport(void) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if ((--objExportImportRefCount) != 0) + { + return; + } + + if (pMemAllocator != NULL) + { + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS(objExportDevice); i++) + { + if (objExportDevice[i].hRmDevice != 0) + { + RmUnrefObjExportHandle(objExportDevice[i].hRmSubDevice); + objExportDevice[i].hRmSubDevice = 0; + RmUnrefObjExportHandle(objExportDevice[i].hRmDevice); + objExportDevice[i].hRmDevice = 0; + } + } + + mapDestroy(&objExportHandleMap); + + portMemAllocatorRelease(pMemAllocator); + pMemAllocator = NULL; + } + + if (hObjExportRmClient != 0) + { + NV_STATUS rmStatus = pRmApi->Free(pRmApi, + hObjExportRmClient, + hObjExportRmClient); + + NV_ASSERT(rmStatus == NV_OK); + hObjExportRmClient = 0; + } +} + +NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject, + RmObjExportHandle *pDstObject, NvU32 *pDeviceInstance) +{ + RmObjExportHandle hDstObject; + NvU32 deviceInstance = NV_MAX_DEVICES; + NvHandle hTmpObject; + NvBool bClientAsDstParent = NV_FALSE; + NV_STATUS status; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + if (pDstObject == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Find the device instance on which the rm object exists. + // + hTmpObject = hSrcObject; + do + { + RsResourceRef *pResourceRef; + + status = serverutilGetResourceRef(hSrcClient, hTmpObject, &pResourceRef); + if (status != NV_OK) + return status; + + Device *pDevice = dynamicCast(pResourceRef->pResource, Device); + if (pDevice != NULL) + { + deviceInstance = pDevice->deviceInst; + break; + } + + hTmpObject = pResourceRef->pParentRef ? pResourceRef->pParentRef->hResource : 0; + } while (hTmpObject != 0); + + // If a memory object is not parented by a device, use client as a parent. + if ((hTmpObject == 0) || (deviceInstance >= NV_MAX_DEVICES)) + { + bClientAsDstParent = NV_TRUE; + } + + status = RmRefObjExportImport(); + + if (status != NV_OK) + { + return status; + } + + if (!bClientAsDstParent && + ((objExportDevice[deviceInstance].hRmDevice == 0) || + serverutilValidateNewResourceHandle(hObjExportRmClient, + objExportDevice[deviceInstance].hRmDevice))) + { + // + // Device object has not been created or it got destroyed in the + // teardown path of device instance destruction; allocate a fresh device + // object. + // + NV0080_ALLOC_PARAMETERS params; + NV2080_ALLOC_PARAMETERS subdevParams; + + if (objExportDevice[deviceInstance].hRmDevice == 0) + { + NV_ASSERT(objExportDevice[deviceInstance].hRmSubDevice == 0); + + objExportDevice[deviceInstance].hRmDevice = + RmGenerateObjExportHandle(deviceInstance); + objExportDevice[deviceInstance].hRmSubDevice = + RmGenerateObjExportHandle(deviceInstance); + + if (objExportDevice[deviceInstance].hRmDevice == 0 || + objExportDevice[deviceInstance].hRmSubDevice == 0) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate object handles in %s\n", + __FUNCTION__); + + status = NV_ERR_NO_MEMORY; + goto done; + } + } + + portMemSet(¶ms, 0, sizeof(NV0080_ALLOC_PARAMETERS)); + + params.deviceId = deviceInstance; + + status = pRmApi->AllocWithHandle(pRmApi, + hObjExportRmClient, + hObjExportRmClient, + objExportDevice[deviceInstance].hRmDevice, + NV01_DEVICE_0, + ¶ms); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to alloc device in %s\n", + __FUNCTION__); + goto done; + } + + portMemSet(&subdevParams, 0, sizeof(NV2080_ALLOC_PARAMETERS)); + + subdevParams.subDeviceId = 0; + + status = pRmApi->AllocWithHandle(pRmApi, + hObjExportRmClient, + objExportDevice[deviceInstance].hRmDevice, + objExportDevice[deviceInstance].hRmSubDevice, + NV20_SUBDEVICE_0, + &subdevParams); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to alloc subdevice in %s\n", + __FUNCTION__); + + (void) pRmApi->Free(pRmApi, hObjExportRmClient, + objExportDevice[deviceInstance].hRmDevice); + goto done; + } + } + + hDstObject = RmGenerateObjExportHandle(deviceInstance); + + if (hDstObject == 0) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate object handle in %s\n", + __FUNCTION__); + status = NV_ERR_NO_MEMORY; + goto done; + } + + // If duping under device handle fails, try subdevice handle. + status = pRmApi->DupObject(pRmApi, + hObjExportRmClient, + bClientAsDstParent ? hObjExportRmClient : + objExportDevice[deviceInstance].hRmDevice, + &hDstObject, + hSrcClient, + hSrcObject, + 0 /* flags */); + if (status != NV_OK) + { + if (!bClientAsDstParent && (status == NV_ERR_INVALID_OBJECT_PARENT)) + { + NV_PRINTF(LEVEL_INFO, + "pRmApi->DupObject(Dev, failed due to invalid parent in %s." + " Now attempting DupObject with Subdev handle.\n", + __FUNCTION__); + + status = pRmApi->DupObject(pRmApi, + hObjExportRmClient, + objExportDevice[deviceInstance].hRmSubDevice, + &hDstObject, + hSrcClient, + hSrcObject, + 0 /* flags */); + if (status != NV_OK) + { + RmUnrefObjExportHandle(hDstObject); + + NV_PRINTF(LEVEL_ERROR, + "pRmApi->DupObject(Subdev, failed with error code 0x%x in %s\n", + status, __FUNCTION__); + goto done; + } + } + else + { + RmUnrefObjExportHandle(hDstObject); + + NV_PRINTF(LEVEL_ERROR, + "pRmApi->DupObject(Dev, failed with error code 0x%x in %s\n", + status, __FUNCTION__); + goto done; + } + } + + if (pDeviceInstance != NULL) + { + *pDeviceInstance = deviceInstance; + } + + *pDstObject = hDstObject; + +done: + if (status != NV_OK) + { + RmUnrefObjExportImport(); + } + + return status; +} + +void RmFreeObjExportHandle(RmObjExportHandle hObject) +{ + if (!RmValidateHandleAgainstInternalHandles(hObject)) + { + NV_PRINTF(LEVEL_ERROR, "Invalid handle to exported object in %s\n", + __FUNCTION__); + return; + } + + RmUnrefObjExportHandle(hObject); + + RmUnrefObjExportImport(); +} + +NV_STATUS RmImportObject(NvHandle hDstClient, NvHandle hDstParent, + NvHandle *phDstObject, RmObjExportHandle hSrcObject, + NvU8 *pObjectType) +{ + NV_STATUS status; + NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS params; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + if (!RmValidateHandleAgainstInternalHandles(hSrcObject)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (mapFind(&objExportHandleMap, hSrcObject) == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pObjectType != NULL) + { + params.hObject = hSrcObject; + params.mapFlags = 0; + params.addrSpaceType = \ + NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID; + + status = pRmApi->Control(pRmApi, hObjExportRmClient, hObjExportRmClient, + NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE, + ¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GET_ADDR_SPACE_TYPE failed with error code 0x%x in %s\n", + status, __FUNCTION__); + return status; + } + + switch (params.addrSpaceType) + { + case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM: + *pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_SYSMEM; + break; + case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM: + *pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_VIDMEM; + break; + case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC: + *pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC; + break; + default: + NV_ASSERT_OK_OR_RETURN(NV_ERR_INVALID_ARGUMENT); + } + } + + status = pRmApi->DupObject(pRmApi, hDstClient, hDstParent, phDstObject, + hObjExportRmClient, hSrcObject, + 0 /* flags */); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "pRmApi->DupObject(pRmApi, failed with error code 0x%x in %s\n", + status, __FUNCTION__); + return status; + } + + return NV_OK; +} + +NV_STATUS RmGetExportObjectInfo(RmObjExportHandle hSrcObject, NvU32 *deviceInstance) +{ + RmObjExportHandleRef *pHandleRef = NULL; + + if (!RmValidateHandleAgainstInternalHandles(hSrcObject)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pHandleRef = mapFind(&objExportHandleMap, hSrcObject); + if (pHandleRef == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + *deviceInstance = pHandleRef->deviceInstance; + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/exports_link_command.txt b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/exports_link_command.txt new file mode 100644 index 0000000..c77f379 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/exports_link_command.txt @@ -0,0 +1,103 @@ +--undefined=rm_disable_adapter +--undefined=rm_execute_work_item +--undefined=rm_free_os_event +--undefined=rm_free_private_state +--undefined=rm_cleanup_file_private +--undefined=rm_unbind_lock +--undefined=rm_get_device_name +--undefined=rm_get_vbios_version +--undefined=rm_get_gpu_uuid +--undefined=rm_get_gpu_uuid_raw +--undefined=rm_set_rm_firmware_requested +--undefined=rm_get_firmware_version +--undefined=rm_i2c_remove_adapters +--undefined=rm_i2c_is_smbus_capable +--undefined=rm_i2c_transfer +--undefined=rm_init_adapter +--undefined=rm_init_private_state +--undefined=rm_init_rm +--undefined=rm_ioctl +--undefined=rm_is_supported_device +--undefined=rm_is_supported_pci_device +--undefined=rm_isr +--undefined=rm_isr_bh +--undefined=rm_isr_bh_unlocked +--undefined=rm_perform_version_check +--undefined=rm_power_management +--undefined=rm_stop_user_channels +--undefined=rm_restart_user_channels +--undefined=rm_read_registry_dword +--undefined=rm_run_rc_callback +--undefined=rm_run_nano_timer_callback +--undefined=rm_save_low_res_mode +--undefined=rm_shutdown_adapter +--undefined=rm_exclude_adapter +--undefined=rm_acquire_api_lock +--undefined=rm_release_api_lock +--undefined=rm_acquire_gpu_lock +--undefined=rm_release_gpu_lock +--undefined=rm_acquire_all_gpus_lock +--undefined=rm_release_all_gpus_lock +--undefined=rm_shutdown_rm +--undefined=rm_system_event +--undefined=rm_write_registry_binary +--undefined=rm_write_registry_dword +--undefined=rm_write_registry_string +--undefined=rm_parse_option_string +--undefined=rm_remove_spaces +--undefined=rm_string_token +--undefined=rm_disable_gpu_state_persistence +--undefined=pNVRM_ID +--undefined=rm_p2p_get_pages +--undefined=rm_p2p_get_pages_persistent +--undefined=rm_p2p_get_gpu_info +--undefined=rm_p2p_register_callback +--undefined=rm_p2p_put_pages +--undefined=rm_p2p_put_pages_persistent +--undefined=rm_p2p_dma_map_pages +--undefined=rm_dma_buf_dup_mem_handle +--undefined=rm_dma_buf_undup_mem_handle +--undefined=rm_dma_buf_map_mem_handle +--undefined=rm_dma_buf_unmap_mem_handle +--undefined=rm_dma_buf_get_client_and_device +--undefined=rm_dma_buf_put_client_and_device +--undefined=rm_log_gpu_crash +--undefined=rm_kernel_rmapi_op +--undefined=nv_get_hypervisor_type +--undefined=rm_gpu_copy_mmu_faults +--undefined=rm_gpu_handle_mmu_faults +--undefined=rm_gpu_copy_mmu_faults_unlocked +--undefined=rm_gpu_need_4k_page_isolation +--undefined=rm_is_chipset_io_coherent +--undefined=rm_get_device_remove_flag +--undefined=rm_init_event_locks +--undefined=rm_destroy_event_locks +--undefined=rm_get_gpu_numa_info +--undefined=rm_gpu_numa_online +--undefined=rm_gpu_numa_offline +--undefined=rm_is_device_sequestered +--undefined=nv_vgpu_create_request +--undefined=nv_vgpu_delete +--undefined=nv_vgpu_get_bar_info +--undefined=nv_vgpu_start +--undefined=nv_vgpu_get_type_ids +--undefined=nv_vgpu_get_type_info +--undefined=nv_vgpu_get_sparse_mmap +--undefined=nv_vgpu_update_request +--undefined=nv_vgpu_process_vf_info +--undefined=nv_gpu_bind_event +--undefined=rm_check_for_gpu_surprise_removal +--undefined=rm_set_external_kernel_client_count +--undefined=rm_schedule_gpu_wakeup +--undefined=rm_init_dynamic_power_management +--undefined=rm_cleanup_dynamic_power_management +--undefined=rm_ref_dynamic_power +--undefined=rm_unref_dynamic_power +--undefined=rm_transition_dynamic_power +--undefined=rm_get_vidmem_power_status +--undefined=rm_acpi_notify +--undefined=rm_get_dynamic_power_management_status +--undefined=rm_get_gpu_gcx_support +--undefined=rm_is_iommu_needed_for_sriov +--undefined=rm_disable_iomap_wc +--undefined=rm_get_clientnvpcf_power_limits diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_allclasses.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_allclasses.h new file mode 100644 index 0000000..2e0d3b1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_allclasses.h @@ -0,0 +1,215 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * g_allclasses.h + * + * Pull in all class headers or class number declarations. + * The class list is generated by chip-config from Classes.pm + * + * NOTE: this file may be included multiple times + * + */ + +#if defined(SDK_ALL_CLASSES_INCLUDE_FULL_HEADER) + +#include // NV01_ROOT +#include // NV01_ROOT_NON_PRIV +#include // NV01_ROOT_CLIENT +#include // NV0020_GPU_MANAGEMENT +#include // NV01_DEVICE_0 +#include // NV20_SUBDEVICE_0 +#include // NV2081_BINAPI +#include // NV2082_BINAPI_PRIVILEGED +#include // NV01_CONTEXT_DMA +#include // NV01_MEMORY_SYSTEM +#include // NV01_MEMORY_SYNCPOINT +#include // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR +#include // IO_VASPACE_A +#include // NV01_EVENT +#include // NV01_EVENT_KERNEL_CALLBACK +#include // NV01_EVENT_OS_EVENT +#include // NV01_EVENT_KERNEL_CALLBACK_EX +#include // NVC372_DISPLAY_SW +#include // NVC673_DISP_CAPABILITIES +#include // NV04_DISPLAY_COMMON +#include // NVC670_DISPLAY +#include // NVC671_DISP_SF_USER +#include // NVC67A_CURSOR_IMM_CHANNEL_PIO +#include // NVC67B_WINDOW_IMM_CHANNEL_DMA +#include // NVC67D_CORE_CHANNEL_DMA +#include // NVC67E_WINDOW_CHANNEL_DMA +#include // NVC77F_ANY_CHANNEL_DMA +#include // GF100_HDACODEC + + +#else // defined(SDK_ALL_CLASSES_INCLUDE_FULL_HEADER) + + +#ifndef NV01_ROOT +#define NV01_ROOT (0x00000000) +#endif +#ifndef NV1_ROOT +#define NV1_ROOT (0x00000000) // alias +#endif +#ifndef NV01_NULL_OBJECT +#define NV01_NULL_OBJECT (0x00000000) // alias +#endif +#ifndef NV1_NULL_OBJECT +#define NV1_NULL_OBJECT (0x00000000) // alias +#endif + +#ifndef NV01_ROOT_NON_PRIV +#define NV01_ROOT_NON_PRIV (0x00000001) +#endif +#ifndef NV1_ROOT_NON_PRIV +#define NV1_ROOT_NON_PRIV (0x00000001) // alias +#endif + +#ifndef NV01_ROOT_CLIENT +#define NV01_ROOT_CLIENT (0x00000041) +#endif + +#ifndef NV0020_GPU_MANAGEMENT +#define NV0020_GPU_MANAGEMENT (0x00000020) +#endif + +#ifndef NV01_DEVICE_0 +#define NV01_DEVICE_0 (0x00000080) +#endif + +#ifndef NV20_SUBDEVICE_0 +#define NV20_SUBDEVICE_0 (0x00002080) +#endif + +#ifndef NV2081_BINAPI +#define NV2081_BINAPI (0x00002081) +#endif + +#ifndef NV2082_BINAPI_PRIVILEGED +#define NV2082_BINAPI_PRIVILEGED (0x00002082) +#endif + +#ifndef NV01_CONTEXT_DMA +#define NV01_CONTEXT_DMA (0x00000002) +#endif + +#ifndef NV01_MEMORY_SYSTEM +#define NV01_MEMORY_SYSTEM (0x0000003e) +#endif +#ifndef NV1_MEMORY_SYSTEM +#define NV1_MEMORY_SYSTEM (0x0000003e) // alias +#endif + +#ifndef NV01_MEMORY_SYNCPOINT +#define NV01_MEMORY_SYNCPOINT (0x000000c3) +#endif + +#ifndef NV01_MEMORY_SYSTEM_OS_DESCRIPTOR +#define NV01_MEMORY_SYSTEM_OS_DESCRIPTOR (0x00000071) +#endif + +#ifndef IO_VASPACE_A +#define IO_VASPACE_A (0x000000f2) +#endif + +#ifndef NV01_EVENT +#define NV01_EVENT (0x00000005) +#endif +#ifndef NV1_EVENT +#define NV1_EVENT (0x00000005) // alias +#endif + +#ifndef NV01_EVENT_KERNEL_CALLBACK +#define NV01_EVENT_KERNEL_CALLBACK (0x00000078) +#endif +#ifndef NV1_EVENT_KERNEL_CALLBACK +#define NV1_EVENT_KERNEL_CALLBACK (0x00000078) // alias +#endif + +#ifndef NV01_EVENT_OS_EVENT +#define NV01_EVENT_OS_EVENT (0x00000079) +#endif +#ifndef NV1_EVENT_OS_EVENT +#define NV1_EVENT_OS_EVENT (0x00000079) // alias +#endif +#ifndef NV01_EVENT_WIN32_EVENT +#define NV01_EVENT_WIN32_EVENT (0x00000079) // alias +#endif +#ifndef NV1_EVENT_WIN32_EVENT +#define NV1_EVENT_WIN32_EVENT (0x00000079) // alias +#endif + +#ifndef NV01_EVENT_KERNEL_CALLBACK_EX +#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e) +#endif +#ifndef NV1_EVENT_KERNEL_CALLBACK_EX +#define NV1_EVENT_KERNEL_CALLBACK_EX (0x0000007e) // alias +#endif + +#ifndef NVC372_DISPLAY_SW +#define NVC372_DISPLAY_SW (0x0000c372) +#endif + +#ifndef NVC673_DISP_CAPABILITIES +#define NVC673_DISP_CAPABILITIES (0x0000c673) +#endif + +#ifndef NV04_DISPLAY_COMMON +#define NV04_DISPLAY_COMMON (0x00000073) +#endif + +#ifndef NVC670_DISPLAY +#define NVC670_DISPLAY (0x0000c670) +#endif + +#ifndef NVC671_DISP_SF_USER +#define NVC671_DISP_SF_USER (0x0000c671) +#endif + +#ifndef NVC67A_CURSOR_IMM_CHANNEL_PIO +#define NVC67A_CURSOR_IMM_CHANNEL_PIO (0x0000c67a) +#endif + +#ifndef NVC67B_WINDOW_IMM_CHANNEL_DMA +#define NVC67B_WINDOW_IMM_CHANNEL_DMA (0x0000c67b) +#endif + +#ifndef NVC67D_CORE_CHANNEL_DMA +#define NVC67D_CORE_CHANNEL_DMA (0x0000c67d) +#endif + +#ifndef NVC67E_WINDOW_CHANNEL_DMA +#define NVC67E_WINDOW_CHANNEL_DMA (0x0000c67e) +#endif + +#ifndef NVC77F_ANY_CHANNEL_DMA +#define NVC77F_ANY_CHANNEL_DMA (0x0000c77f) +#endif + +#ifndef GF100_HDACODEC +#define GF100_HDACODEC (0x000090ec) +#endif + + +#endif // defined(SDK_ALL_CLASSES_INCLUDE_FULL_HEADER) diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.c new file mode 100644 index 0000000..d641c7e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.c @@ -0,0 +1,659 @@ +#define NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_binary_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xb7a47c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_BinaryApi(BinaryApi*); +void __nvoc_init_funcTable_BinaryApi(BinaryApi*); +NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_BinaryApi(BinaryApi*); +void __nvoc_dtor_BinaryApi(BinaryApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApi; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_BinaryApi = { + /*pClassDef=*/ &__nvoc_class_def_BinaryApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_BinaryApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_BinaryApi = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_BinaryApi_BinaryApi, + &__nvoc_rtti_BinaryApi_GpuResource, + &__nvoc_rtti_BinaryApi_RmResource, + &__nvoc_rtti_BinaryApi_RmResourceCommon, + &__nvoc_rtti_BinaryApi_RsResource, + &__nvoc_rtti_BinaryApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(BinaryApi), + /*classId=*/ classId(BinaryApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "BinaryApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_BinaryApi, + /*pCastInfo=*/ &__nvoc_castinfo_BinaryApi, + /*pExportInfo=*/ &__nvoc_export_info_BinaryApi +}; + +static NV_STATUS __nvoc_thunk_BinaryApi_gpuresControl(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return binapiControl((struct BinaryApi *)(((unsigned char *)pResource) - __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_GpuResource_binapiShareCallback(struct BinaryApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiUnmap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiGetMemInterMapParams(struct BinaryApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiGetMemoryMappingDescriptor(struct BinaryApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApi_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiGetMapAddrSpace(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_binapiGetInternalObjectHandle(struct BinaryApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiControlFilter(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_binapiAddAdditionalDependants(struct RsClient *pClient, struct BinaryApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_binapiGetRefCount(struct BinaryApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiCheckMemInterUnmap(struct BinaryApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiMapTo(struct BinaryApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiControl_Prologue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiGetRegBaseOffsetAndSize(struct BinaryApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_binapiCanCopy(struct BinaryApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiInternalControlForward(struct BinaryApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_binapiPreDestruct(struct BinaryApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiUnmapFrom(struct BinaryApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_binapiControl_Epilogue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiControlLookup(struct BinaryApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiMap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_binapiAccessCallback(struct BinaryApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApi = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_BinaryApi(BinaryApi *pThis) { + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_BinaryApi(BinaryApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_BinaryApi_fail_GpuResource; + __nvoc_init_dataField_BinaryApi(pThis); + + status = __nvoc_binapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_BinaryApi_fail__init; + goto __nvoc_ctor_BinaryApi_exit; // Success + +__nvoc_ctor_BinaryApi_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_BinaryApi_fail_GpuResource: +__nvoc_ctor_BinaryApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_BinaryApi_1(BinaryApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__binapiControl__ = &binapiControl_IMPL; + + pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_BinaryApi_gpuresControl; + + pThis->__binapiShareCallback__ = &__nvoc_thunk_GpuResource_binapiShareCallback; + + pThis->__binapiUnmap__ = &__nvoc_thunk_GpuResource_binapiUnmap; + + pThis->__binapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_binapiGetMemInterMapParams; + + pThis->__binapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_binapiGetMemoryMappingDescriptor; + + pThis->__binapiGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_binapiGetMapAddrSpace; + + pThis->__binapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_binapiGetInternalObjectHandle; + + pThis->__binapiControlFilter__ = &__nvoc_thunk_RsResource_binapiControlFilter; + + pThis->__binapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_binapiAddAdditionalDependants; + + pThis->__binapiGetRefCount__ = &__nvoc_thunk_RsResource_binapiGetRefCount; + + pThis->__binapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_binapiCheckMemInterUnmap; + + pThis->__binapiMapTo__ = &__nvoc_thunk_RsResource_binapiMapTo; + + pThis->__binapiControl_Prologue__ = &__nvoc_thunk_RmResource_binapiControl_Prologue; + + pThis->__binapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_binapiGetRegBaseOffsetAndSize; + + pThis->__binapiCanCopy__ = &__nvoc_thunk_RsResource_binapiCanCopy; + + pThis->__binapiInternalControlForward__ = &__nvoc_thunk_GpuResource_binapiInternalControlForward; + + pThis->__binapiPreDestruct__ = &__nvoc_thunk_RsResource_binapiPreDestruct; + + pThis->__binapiUnmapFrom__ = &__nvoc_thunk_RsResource_binapiUnmapFrom; + + pThis->__binapiControl_Epilogue__ = &__nvoc_thunk_RmResource_binapiControl_Epilogue; + + pThis->__binapiControlLookup__ = &__nvoc_thunk_RsResource_binapiControlLookup; + + pThis->__binapiMap__ = &__nvoc_thunk_GpuResource_binapiMap; + + pThis->__binapiAccessCallback__ = &__nvoc_thunk_RmResource_binapiAccessCallback; +} + +void __nvoc_init_funcTable_BinaryApi(BinaryApi *pThis) { + __nvoc_init_funcTable_BinaryApi_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_BinaryApi(BinaryApi *pThis) { + pThis->__nvoc_pbase_BinaryApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_BinaryApi(pThis); +} + +NV_STATUS __nvoc_objCreate_BinaryApi(BinaryApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + BinaryApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(BinaryApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(BinaryApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_BinaryApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_BinaryApi(pThis); + status = __nvoc_ctor_BinaryApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_BinaryApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_BinaryApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_BinaryApi(BinaryApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_BinaryApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x1c0579 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi; + +void __nvoc_init_BinaryApiPrivileged(BinaryApiPrivileged*); +void __nvoc_init_funcTable_BinaryApiPrivileged(BinaryApiPrivileged*); +NV_STATUS __nvoc_ctor_BinaryApiPrivileged(BinaryApiPrivileged*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_BinaryApiPrivileged(BinaryApiPrivileged*); +void __nvoc_dtor_BinaryApiPrivileged(BinaryApiPrivileged*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApiPrivileged; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_BinaryApiPrivileged = { + /*pClassDef=*/ &__nvoc_class_def_BinaryApiPrivileged, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_BinaryApiPrivileged, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_BinaryApi = { + /*pClassDef=*/ &__nvoc_class_def_BinaryApi, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_BinaryApiPrivileged = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_BinaryApiPrivileged_BinaryApiPrivileged, + &__nvoc_rtti_BinaryApiPrivileged_BinaryApi, + &__nvoc_rtti_BinaryApiPrivileged_GpuResource, + &__nvoc_rtti_BinaryApiPrivileged_RmResource, + &__nvoc_rtti_BinaryApiPrivileged_RmResourceCommon, + &__nvoc_rtti_BinaryApiPrivileged_RsResource, + &__nvoc_rtti_BinaryApiPrivileged_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged = +{ + /*classInfo=*/ { + /*size=*/ sizeof(BinaryApiPrivileged), + /*classId=*/ classId(BinaryApiPrivileged), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "BinaryApiPrivileged", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_BinaryApiPrivileged, + /*pCastInfo=*/ &__nvoc_castinfo_BinaryApiPrivileged, + /*pExportInfo=*/ &__nvoc_export_info_BinaryApiPrivileged +}; + +static NV_STATUS __nvoc_thunk_BinaryApiPrivileged_binapiControl(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return binapiprivControl((struct BinaryApiPrivileged *)(((unsigned char *)pResource) - __nvoc_rtti_BinaryApiPrivileged_BinaryApi.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_GpuResource_binapiprivShareCallback(struct BinaryApiPrivileged *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiprivUnmap(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiprivGetMemInterMapParams(struct BinaryApiPrivileged *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiprivGetMemoryMappingDescriptor(struct BinaryApiPrivileged *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiprivGetMapAddrSpace(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_binapiprivGetInternalObjectHandle(struct BinaryApiPrivileged *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiprivControlFilter(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_binapiprivAddAdditionalDependants(struct RsClient *pClient, struct BinaryApiPrivileged *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_binapiprivGetRefCount(struct BinaryApiPrivileged *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiprivCheckMemInterUnmap(struct BinaryApiPrivileged *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiprivMapTo(struct BinaryApiPrivileged *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiprivControl_Prologue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiprivGetRegBaseOffsetAndSize(struct BinaryApiPrivileged *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_binapiprivCanCopy(struct BinaryApiPrivileged *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiprivInternalControlForward(struct BinaryApiPrivileged *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_binapiprivPreDestruct(struct BinaryApiPrivileged *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiprivUnmapFrom(struct BinaryApiPrivileged *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_binapiprivControl_Epilogue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiprivControlLookup(struct BinaryApiPrivileged *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiprivMap(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_binapiprivAccessCallback(struct BinaryApiPrivileged *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApiPrivileged = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_BinaryApi(BinaryApi*); +void __nvoc_dtor_BinaryApiPrivileged(BinaryApiPrivileged *pThis) { + __nvoc_dtor_BinaryApi(&pThis->__nvoc_base_BinaryApi); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_BinaryApiPrivileged(BinaryApiPrivileged *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_BinaryApiPrivileged(BinaryApiPrivileged *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_BinaryApi(&pThis->__nvoc_base_BinaryApi, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_BinaryApiPrivileged_fail_BinaryApi; + __nvoc_init_dataField_BinaryApiPrivileged(pThis); + + status = __nvoc_binapiprivConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_BinaryApiPrivileged_fail__init; + goto __nvoc_ctor_BinaryApiPrivileged_exit; // Success + +__nvoc_ctor_BinaryApiPrivileged_fail__init: + __nvoc_dtor_BinaryApi(&pThis->__nvoc_base_BinaryApi); +__nvoc_ctor_BinaryApiPrivileged_fail_BinaryApi: +__nvoc_ctor_BinaryApiPrivileged_exit: + + return status; +} + +static void __nvoc_init_funcTable_BinaryApiPrivileged_1(BinaryApiPrivileged *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__binapiprivControl__ = &binapiprivControl_IMPL; + + pThis->__nvoc_base_BinaryApi.__binapiControl__ = &__nvoc_thunk_BinaryApiPrivileged_binapiControl; + + pThis->__binapiprivShareCallback__ = &__nvoc_thunk_GpuResource_binapiprivShareCallback; + + pThis->__binapiprivUnmap__ = &__nvoc_thunk_GpuResource_binapiprivUnmap; + + pThis->__binapiprivGetMemInterMapParams__ = &__nvoc_thunk_RmResource_binapiprivGetMemInterMapParams; + + pThis->__binapiprivGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_binapiprivGetMemoryMappingDescriptor; + + pThis->__binapiprivGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_binapiprivGetMapAddrSpace; + + pThis->__binapiprivGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_binapiprivGetInternalObjectHandle; + + pThis->__binapiprivControlFilter__ = &__nvoc_thunk_RsResource_binapiprivControlFilter; + + pThis->__binapiprivAddAdditionalDependants__ = &__nvoc_thunk_RsResource_binapiprivAddAdditionalDependants; + + pThis->__binapiprivGetRefCount__ = &__nvoc_thunk_RsResource_binapiprivGetRefCount; + + pThis->__binapiprivCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_binapiprivCheckMemInterUnmap; + + pThis->__binapiprivMapTo__ = &__nvoc_thunk_RsResource_binapiprivMapTo; + + pThis->__binapiprivControl_Prologue__ = &__nvoc_thunk_RmResource_binapiprivControl_Prologue; + + pThis->__binapiprivGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_binapiprivGetRegBaseOffsetAndSize; + + pThis->__binapiprivCanCopy__ = &__nvoc_thunk_RsResource_binapiprivCanCopy; + + pThis->__binapiprivInternalControlForward__ = &__nvoc_thunk_GpuResource_binapiprivInternalControlForward; + + pThis->__binapiprivPreDestruct__ = &__nvoc_thunk_RsResource_binapiprivPreDestruct; + + pThis->__binapiprivUnmapFrom__ = &__nvoc_thunk_RsResource_binapiprivUnmapFrom; + + pThis->__binapiprivControl_Epilogue__ = &__nvoc_thunk_RmResource_binapiprivControl_Epilogue; + + pThis->__binapiprivControlLookup__ = &__nvoc_thunk_RsResource_binapiprivControlLookup; + + pThis->__binapiprivMap__ = &__nvoc_thunk_GpuResource_binapiprivMap; + + pThis->__binapiprivAccessCallback__ = &__nvoc_thunk_RmResource_binapiprivAccessCallback; +} + +void __nvoc_init_funcTable_BinaryApiPrivileged(BinaryApiPrivileged *pThis) { + __nvoc_init_funcTable_BinaryApiPrivileged_1(pThis); +} + +void __nvoc_init_BinaryApi(BinaryApi*); +void __nvoc_init_BinaryApiPrivileged(BinaryApiPrivileged *pThis) { + pThis->__nvoc_pbase_BinaryApiPrivileged = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_BinaryApi = &pThis->__nvoc_base_BinaryApi; + __nvoc_init_BinaryApi(&pThis->__nvoc_base_BinaryApi); + __nvoc_init_funcTable_BinaryApiPrivileged(pThis); +} + +NV_STATUS __nvoc_objCreate_BinaryApiPrivileged(BinaryApiPrivileged **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + BinaryApiPrivileged *pThis; + + pThis = portMemAllocNonPaged(sizeof(BinaryApiPrivileged)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(BinaryApiPrivileged)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_BinaryApiPrivileged); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_BinaryApiPrivileged(pThis); + status = __nvoc_ctor_BinaryApiPrivileged(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_BinaryApiPrivileged_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_BinaryApiPrivileged_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_BinaryApiPrivileged(BinaryApiPrivileged **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_BinaryApiPrivileged(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.h new file mode 100644 index 0000000..c4758ca --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.h @@ -0,0 +1,416 @@ +#ifndef _G_BINARY_API_NVOC_H_ +#define _G_BINARY_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_binary_api_nvoc.h" + +#ifndef BINARY_API_H +#define BINARY_API_H + +#include "core/core.h" +#include "rmapi/resource.h" +#include "gpu/gpu_resource.h" +#include "resserv/rs_resource.h" +#include "rmapi/control.h" + +#ifdef NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct BinaryApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct BinaryApi *__nvoc_pbase_BinaryApi; + NV_STATUS (*__binapiControl__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__binapiShareCallback__)(struct BinaryApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__binapiUnmap__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__binapiGetMemInterMapParams__)(struct BinaryApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__binapiGetMemoryMappingDescriptor__)(struct BinaryApi *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__binapiGetMapAddrSpace__)(struct BinaryApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__binapiGetInternalObjectHandle__)(struct BinaryApi *); + NV_STATUS (*__binapiControlFilter__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__binapiAddAdditionalDependants__)(struct RsClient *, struct BinaryApi *, RsResourceRef *); + NvU32 (*__binapiGetRefCount__)(struct BinaryApi *); + NV_STATUS (*__binapiCheckMemInterUnmap__)(struct BinaryApi *, NvBool); + NV_STATUS (*__binapiMapTo__)(struct BinaryApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__binapiControl_Prologue__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__binapiGetRegBaseOffsetAndSize__)(struct BinaryApi *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__binapiCanCopy__)(struct BinaryApi *); + NV_STATUS (*__binapiInternalControlForward__)(struct BinaryApi *, NvU32, void *, NvU32); + void (*__binapiPreDestruct__)(struct BinaryApi *); + NV_STATUS (*__binapiUnmapFrom__)(struct BinaryApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__binapiControl_Epilogue__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__binapiControlLookup__)(struct BinaryApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__binapiMap__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__binapiAccessCallback__)(struct BinaryApi *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_BinaryApi_TYPEDEF__ +#define __NVOC_CLASS_BinaryApi_TYPEDEF__ +typedef struct BinaryApi BinaryApi; +#endif /* __NVOC_CLASS_BinaryApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_BinaryApi +#define __nvoc_class_id_BinaryApi 0xb7a47c +#endif /* __nvoc_class_id_BinaryApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi; + +#define __staticCast_BinaryApi(pThis) \ + ((pThis)->__nvoc_pbase_BinaryApi) + +#ifdef __nvoc_binary_api_h_disabled +#define __dynamicCast_BinaryApi(pThis) ((BinaryApi*)NULL) +#else //__nvoc_binary_api_h_disabled +#define __dynamicCast_BinaryApi(pThis) \ + ((BinaryApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(BinaryApi))) +#endif //__nvoc_binary_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_BinaryApi(BinaryApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_BinaryApi(BinaryApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_BinaryApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_BinaryApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define binapiControl(pResource, pCallContext, pParams) binapiControl_DISPATCH(pResource, pCallContext, pParams) +#define binapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) binapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define binapiUnmap(pGpuResource, pCallContext, pCpuMapping) binapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define binapiGetMemInterMapParams(pRmResource, pParams) binapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define binapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) binapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define binapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) binapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define binapiGetInternalObjectHandle(pGpuResource) binapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define binapiControlFilter(pResource, pCallContext, pParams) binapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define binapiAddAdditionalDependants(pClient, pResource, pReference) binapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define binapiGetRefCount(pResource) binapiGetRefCount_DISPATCH(pResource) +#define binapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) binapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define binapiMapTo(pResource, pParams) binapiMapTo_DISPATCH(pResource, pParams) +#define binapiControl_Prologue(pResource, pCallContext, pParams) binapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define binapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) binapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define binapiCanCopy(pResource) binapiCanCopy_DISPATCH(pResource) +#define binapiInternalControlForward(pGpuResource, command, pParams, size) binapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define binapiPreDestruct(pResource) binapiPreDestruct_DISPATCH(pResource) +#define binapiUnmapFrom(pResource, pParams) binapiUnmapFrom_DISPATCH(pResource, pParams) +#define binapiControl_Epilogue(pResource, pCallContext, pParams) binapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define binapiControlLookup(pResource, pParams, ppEntry) binapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define binapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) binapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define binapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) binapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS binapiControl_IMPL(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS binapiControl_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__binapiControl__(pResource, pCallContext, pParams); +} + +static inline NvBool binapiShareCallback_DISPATCH(struct BinaryApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__binapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS binapiUnmap_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__binapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS binapiGetMemInterMapParams_DISPATCH(struct BinaryApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__binapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS binapiGetMemoryMappingDescriptor_DISPATCH(struct BinaryApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__binapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS binapiGetMapAddrSpace_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__binapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle binapiGetInternalObjectHandle_DISPATCH(struct BinaryApi *pGpuResource) { + return pGpuResource->__binapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS binapiControlFilter_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__binapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline void binapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct BinaryApi *pResource, RsResourceRef *pReference) { + pResource->__binapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 binapiGetRefCount_DISPATCH(struct BinaryApi *pResource) { + return pResource->__binapiGetRefCount__(pResource); +} + +static inline NV_STATUS binapiCheckMemInterUnmap_DISPATCH(struct BinaryApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__binapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS binapiMapTo_DISPATCH(struct BinaryApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__binapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS binapiControl_Prologue_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__binapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS binapiGetRegBaseOffsetAndSize_DISPATCH(struct BinaryApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__binapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool binapiCanCopy_DISPATCH(struct BinaryApi *pResource) { + return pResource->__binapiCanCopy__(pResource); +} + +static inline NV_STATUS binapiInternalControlForward_DISPATCH(struct BinaryApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__binapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void binapiPreDestruct_DISPATCH(struct BinaryApi *pResource) { + pResource->__binapiPreDestruct__(pResource); +} + +static inline NV_STATUS binapiUnmapFrom_DISPATCH(struct BinaryApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__binapiUnmapFrom__(pResource, pParams); +} + +static inline void binapiControl_Epilogue_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__binapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS binapiControlLookup_DISPATCH(struct BinaryApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__binapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS binapiMap_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__binapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool binapiAccessCallback_DISPATCH(struct BinaryApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__binapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS binapiConstruct_IMPL(struct BinaryApi *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_binapiConstruct(arg_pResource, arg_pCallContext, arg_pParams) binapiConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#ifdef NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct BinaryApiPrivileged { + const struct NVOC_RTTI *__nvoc_rtti; + struct BinaryApi __nvoc_base_BinaryApi; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct BinaryApi *__nvoc_pbase_BinaryApi; + struct BinaryApiPrivileged *__nvoc_pbase_BinaryApiPrivileged; + NV_STATUS (*__binapiprivControl__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__binapiprivShareCallback__)(struct BinaryApiPrivileged *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__binapiprivUnmap__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__binapiprivGetMemInterMapParams__)(struct BinaryApiPrivileged *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__binapiprivGetMemoryMappingDescriptor__)(struct BinaryApiPrivileged *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__binapiprivGetMapAddrSpace__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__binapiprivGetInternalObjectHandle__)(struct BinaryApiPrivileged *); + NV_STATUS (*__binapiprivControlFilter__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__binapiprivAddAdditionalDependants__)(struct RsClient *, struct BinaryApiPrivileged *, RsResourceRef *); + NvU32 (*__binapiprivGetRefCount__)(struct BinaryApiPrivileged *); + NV_STATUS (*__binapiprivCheckMemInterUnmap__)(struct BinaryApiPrivileged *, NvBool); + NV_STATUS (*__binapiprivMapTo__)(struct BinaryApiPrivileged *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__binapiprivControl_Prologue__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__binapiprivGetRegBaseOffsetAndSize__)(struct BinaryApiPrivileged *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__binapiprivCanCopy__)(struct BinaryApiPrivileged *); + NV_STATUS (*__binapiprivInternalControlForward__)(struct BinaryApiPrivileged *, NvU32, void *, NvU32); + void (*__binapiprivPreDestruct__)(struct BinaryApiPrivileged *); + NV_STATUS (*__binapiprivUnmapFrom__)(struct BinaryApiPrivileged *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__binapiprivControl_Epilogue__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__binapiprivControlLookup__)(struct BinaryApiPrivileged *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__binapiprivMap__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__binapiprivAccessCallback__)(struct BinaryApiPrivileged *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ +#define __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ +typedef struct BinaryApiPrivileged BinaryApiPrivileged; +#endif /* __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ */ + +#ifndef __nvoc_class_id_BinaryApiPrivileged +#define __nvoc_class_id_BinaryApiPrivileged 0x1c0579 +#endif /* __nvoc_class_id_BinaryApiPrivileged */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged; + +#define __staticCast_BinaryApiPrivileged(pThis) \ + ((pThis)->__nvoc_pbase_BinaryApiPrivileged) + +#ifdef __nvoc_binary_api_h_disabled +#define __dynamicCast_BinaryApiPrivileged(pThis) ((BinaryApiPrivileged*)NULL) +#else //__nvoc_binary_api_h_disabled +#define __dynamicCast_BinaryApiPrivileged(pThis) \ + ((BinaryApiPrivileged*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(BinaryApiPrivileged))) +#endif //__nvoc_binary_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_BinaryApiPrivileged(BinaryApiPrivileged**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_BinaryApiPrivileged(BinaryApiPrivileged**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_BinaryApiPrivileged(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_BinaryApiPrivileged((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define binapiprivControl(pResource, pCallContext, pParams) binapiprivControl_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) binapiprivShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define binapiprivUnmap(pGpuResource, pCallContext, pCpuMapping) binapiprivUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define binapiprivGetMemInterMapParams(pRmResource, pParams) binapiprivGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define binapiprivGetMemoryMappingDescriptor(pRmResource, ppMemDesc) binapiprivGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define binapiprivGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) binapiprivGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define binapiprivGetInternalObjectHandle(pGpuResource) binapiprivGetInternalObjectHandle_DISPATCH(pGpuResource) +#define binapiprivControlFilter(pResource, pCallContext, pParams) binapiprivControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivAddAdditionalDependants(pClient, pResource, pReference) binapiprivAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define binapiprivGetRefCount(pResource) binapiprivGetRefCount_DISPATCH(pResource) +#define binapiprivCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) binapiprivCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define binapiprivMapTo(pResource, pParams) binapiprivMapTo_DISPATCH(pResource, pParams) +#define binapiprivControl_Prologue(pResource, pCallContext, pParams) binapiprivControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) binapiprivGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define binapiprivCanCopy(pResource) binapiprivCanCopy_DISPATCH(pResource) +#define binapiprivInternalControlForward(pGpuResource, command, pParams, size) binapiprivInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define binapiprivPreDestruct(pResource) binapiprivPreDestruct_DISPATCH(pResource) +#define binapiprivUnmapFrom(pResource, pParams) binapiprivUnmapFrom_DISPATCH(pResource, pParams) +#define binapiprivControl_Epilogue(pResource, pCallContext, pParams) binapiprivControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivControlLookup(pResource, pParams, ppEntry) binapiprivControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define binapiprivMap(pGpuResource, pCallContext, pParams, pCpuMapping) binapiprivMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define binapiprivAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) binapiprivAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS binapiprivControl_IMPL(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS binapiprivControl_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__binapiprivControl__(pResource, pCallContext, pParams); +} + +static inline NvBool binapiprivShareCallback_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__binapiprivShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS binapiprivUnmap_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__binapiprivUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS binapiprivGetMemInterMapParams_DISPATCH(struct BinaryApiPrivileged *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__binapiprivGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS binapiprivGetMemoryMappingDescriptor_DISPATCH(struct BinaryApiPrivileged *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__binapiprivGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS binapiprivGetMapAddrSpace_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__binapiprivGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle binapiprivGetInternalObjectHandle_DISPATCH(struct BinaryApiPrivileged *pGpuResource) { + return pGpuResource->__binapiprivGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS binapiprivControlFilter_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__binapiprivControlFilter__(pResource, pCallContext, pParams); +} + +static inline void binapiprivAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct BinaryApiPrivileged *pResource, RsResourceRef *pReference) { + pResource->__binapiprivAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 binapiprivGetRefCount_DISPATCH(struct BinaryApiPrivileged *pResource) { + return pResource->__binapiprivGetRefCount__(pResource); +} + +static inline NV_STATUS binapiprivCheckMemInterUnmap_DISPATCH(struct BinaryApiPrivileged *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__binapiprivCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS binapiprivMapTo_DISPATCH(struct BinaryApiPrivileged *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__binapiprivMapTo__(pResource, pParams); +} + +static inline NV_STATUS binapiprivControl_Prologue_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__binapiprivControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS binapiprivGetRegBaseOffsetAndSize_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__binapiprivGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool binapiprivCanCopy_DISPATCH(struct BinaryApiPrivileged *pResource) { + return pResource->__binapiprivCanCopy__(pResource); +} + +static inline NV_STATUS binapiprivInternalControlForward_DISPATCH(struct BinaryApiPrivileged *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__binapiprivInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void binapiprivPreDestruct_DISPATCH(struct BinaryApiPrivileged *pResource) { + pResource->__binapiprivPreDestruct__(pResource); +} + +static inline NV_STATUS binapiprivUnmapFrom_DISPATCH(struct BinaryApiPrivileged *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__binapiprivUnmapFrom__(pResource, pParams); +} + +static inline void binapiprivControl_Epilogue_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__binapiprivControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS binapiprivControlLookup_DISPATCH(struct BinaryApiPrivileged *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__binapiprivControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS binapiprivMap_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__binapiprivMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool binapiprivAccessCallback_DISPATCH(struct BinaryApiPrivileged *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__binapiprivAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS binapiprivConstruct_IMPL(struct BinaryApiPrivileged *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_binapiprivConstruct(arg_pResource, arg_pCallContext, arg_pParams) binapiprivConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_BINARY_API_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec.h new file mode 100644 index 0000000..7fdb068 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec.h @@ -0,0 +1,3 @@ + +#include "g_chips2halspec_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.c new file mode 100644 index 0000000..74f3a3f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.c @@ -0,0 +1,45 @@ +#define NVOC_CHIPS2HALSPEC_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_chips2halspec_nvoc.h" + +void __nvoc_init_halspec_ChipHal(ChipHal *pChipHal, NvU32 arch, NvU32 impl, NvU32 hidrev) +{ + // T234D + if(arch == 0x0 && impl == 0x0 && hidrev == 0x235) + { + pChipHal->__nvoc_HalVarIdx = 80; + } +} + +void __nvoc_init_halspec_RmVariantHal(RmVariantHal *pRmVariantHal, RM_RUNTIME_VARIANT rmVariant) +{ + // PF_KERNEL_ONLY + if(rmVariant == 0x2) + { + pRmVariantHal->__nvoc_HalVarIdx = 1; + } +} + +void __nvoc_init_halspec_DispIpHal(DispIpHal *pDispIpHal, NvU32 ipver) +{ + // DISPv0402 + if(ipver == 0x4020000) + { + pDispIpHal->__nvoc_HalVarIdx = 12; + } +} + +void __nvoc_init_halspec_DpuIpHal(DpuIpHal *pDpuIpHal, NvU32 ipver) +{ + // DPUv0000 + if(ipver == 0x0) + { + pDpuIpHal->__nvoc_HalVarIdx = 5; + } +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.h new file mode 100644 index 0000000..91ac183 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.h @@ -0,0 +1,118 @@ +#ifndef _G_CHIPS2HALSPEC_NVOC_H_ +#define _G_CHIPS2HALSPEC_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "g_chips2halspec_nvoc.h" + +#ifndef _CHIPS_2_HALSPEC_H_ +#define _CHIPS_2_HALSPEC_H_ + +#include "nvtypes.h" +#include "rmconfig.h" + +// Several WARs that only visible by NVOC compiler + +#define GPUHAL_ARCH(x) NV_PMC_BOOT_0_ARCHITECTURE_##x +#define GPUHAL_IMPL(x) NV_PMC_BOOT_0_IMPLEMENTATION_##x + +// Create alias 'group' to provide a concise syntax +#define group variant_group + +// Use in hal block to indicate that the function isn't wried to any enabled chips +#define __disabled__ false + +struct ChipHal { + unsigned short __nvoc_HalVarIdx; +}; +typedef struct ChipHal ChipHal; +void __nvoc_init_halspec_ChipHal(ChipHal*, NvU32, NvU32, NvU32); + +/* + * RM Runtime Variant Halspec + * + * One group of Hal Variants that presents two perspectives: + * + * Operating Environment Perspective: VF / PF / UCODE + * VF | PF | UCODE = true + * VF & PF & UCODE = false + * + * VF : RM is running in VGPU Guest environment. Equals to IS_VIRTUAL(pGpu) + * PF : RM is running in Host/Baremetal in standard PCIE environment + * UCODE : RM is running on microcontroller + * + * Functionality-Based Perspective: KERNEL_ONLY / PHYSICAL_ONLY / MONOLITHIC + * KERNEL_ONLY | PHYSICAL_ONLY | MONOLITHIC = true + * KERNEL_ONLY & PHYSICAL_ONLY & MONOLITHIC = false + * + * KERNEL_ONLY : RM does not own HW. The physical part is offloaded to Ucode. + * PHYSICAL_ONLY : RM owns HW but does not expose services to RM Clients + * MONOLITHIC : RM owns both the interface to the client and the underlying HW. + * + * Note: GSP Client "IS_GSP_CLIENT(pGpu) maps to "PF_KERNEL_ONLY" + * DCE Client maps to "PF_KERNEL_ONLY & T234D" + * + * + * HAL Variants + * +--------+ +----------------+ + * | VF | <-----| VF |--+ + * +--------+ +----------------+ | +---------------+ + * |--> | KERNEL_ONLY | + * +----------------+ | +---------------+ + * +--| PF_KERNEL_ONLY |--+ + * +--------+ | +----------------+ + * | PF | <--| + * +--------+ | +----------------+ +---------------+ + * +--| PF_MONOLITHIC |-----> | MONOLITHIC | + * +----------------+ +---------------+ + * + * +--------+ +----------------+ +---------------+ + * | UCODE | <-----| UCODE |-----> | PHYSICAL_ONLY | + * +--------+ +----------------+ +---------------+ + * + * */ +typedef enum _RM_RUNTIME_VARIANT { + RM_RUNTIME_VARIANT_VF = 1, + RM_RUNTIME_VARIANT_PF_KERNEL_ONLY = 2, + RM_RUNTIME_VARIANT_PF_MONOLITHIC = 3, + RM_RUNTIME_VARIANT_UCODE = 4, +} RM_RUNTIME_VARIANT; + +struct RmVariantHal { + unsigned short __nvoc_HalVarIdx; +}; +typedef struct RmVariantHal RmVariantHal; +void __nvoc_init_halspec_RmVariantHal(RmVariantHal*, RM_RUNTIME_VARIANT); + +/* DISP IP versions */ +struct DispIpHal { + unsigned short __nvoc_HalVarIdx; +}; +typedef struct DispIpHal DispIpHal; +void __nvoc_init_halspec_DispIpHal(DispIpHal*, NvU32); + +/* The 'delete' rules for DispIpHal and ChipHal */ +// delete DISPv0402 & ~T234D; +// delete ~DISPv0402 & T234D; + + +/* DPU IP versions */ +struct DpuIpHal { + unsigned short __nvoc_HalVarIdx; +}; +typedef struct DpuIpHal DpuIpHal; +void __nvoc_init_halspec_DpuIpHal(DpuIpHal*, NvU32); + +/* The 'delete' rules for DpuIpHal and ChipHal */ + + +#undef group +#endif /* _CHIPS_2_HALSPEC_H_ */ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_CHIPS2HALSPEC_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.c new file mode 100644 index 0000000..99ed939 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.c @@ -0,0 +1,385 @@ +#define NVOC_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_client_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x21d236 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +void __nvoc_init_UserInfo(UserInfo*); +void __nvoc_init_funcTable_UserInfo(UserInfo*); +NV_STATUS __nvoc_ctor_UserInfo(UserInfo*); +void __nvoc_init_dataField_UserInfo(UserInfo*); +void __nvoc_dtor_UserInfo(UserInfo*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_UserInfo; + +static const struct NVOC_RTTI __nvoc_rtti_UserInfo_UserInfo = { + /*pClassDef=*/ &__nvoc_class_def_UserInfo, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_UserInfo, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_UserInfo_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UserInfo, __nvoc_base_RsShared.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UserInfo_RsShared = { + /*pClassDef=*/ &__nvoc_class_def_RsShared, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UserInfo, __nvoc_base_RsShared), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_UserInfo = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_UserInfo_UserInfo, + &__nvoc_rtti_UserInfo_RsShared, + &__nvoc_rtti_UserInfo_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo = +{ + /*classInfo=*/ { + /*size=*/ sizeof(UserInfo), + /*classId=*/ classId(UserInfo), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "UserInfo", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_UserInfo, + /*pCastInfo=*/ &__nvoc_castinfo_UserInfo, + /*pExportInfo=*/ &__nvoc_export_info_UserInfo +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_UserInfo = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsShared(RsShared*); +void __nvoc_dtor_UserInfo(UserInfo *pThis) { + __nvoc_userinfoDestruct(pThis); + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_UserInfo(UserInfo *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsShared(RsShared* ); +NV_STATUS __nvoc_ctor_UserInfo(UserInfo *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared); + if (status != NV_OK) goto __nvoc_ctor_UserInfo_fail_RsShared; + __nvoc_init_dataField_UserInfo(pThis); + + status = __nvoc_userinfoConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_UserInfo_fail__init; + goto __nvoc_ctor_UserInfo_exit; // Success + +__nvoc_ctor_UserInfo_fail__init: + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); +__nvoc_ctor_UserInfo_fail_RsShared: +__nvoc_ctor_UserInfo_exit: + + return status; +} + +static void __nvoc_init_funcTable_UserInfo_1(UserInfo *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_UserInfo(UserInfo *pThis) { + __nvoc_init_funcTable_UserInfo_1(pThis); +} + +void __nvoc_init_RsShared(RsShared*); +void __nvoc_init_UserInfo(UserInfo *pThis) { + pThis->__nvoc_pbase_UserInfo = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object; + pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared; + __nvoc_init_RsShared(&pThis->__nvoc_base_RsShared); + __nvoc_init_funcTable_UserInfo(pThis); +} + +NV_STATUS __nvoc_objCreate_UserInfo(UserInfo **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + UserInfo *pThis; + + pThis = portMemAllocNonPaged(sizeof(UserInfo)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(UserInfo)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_UserInfo); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_UserInfo(pThis); + status = __nvoc_ctor_UserInfo(pThis); + if (status != NV_OK) goto __nvoc_objCreate_UserInfo_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_UserInfo_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_UserInfo(UserInfo **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_UserInfo(ppThis, pParent, createFlags); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xb23d83 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient; + +void __nvoc_init_RmClient(RmClient*); +void __nvoc_init_funcTable_RmClient(RmClient*); +NV_STATUS __nvoc_ctor_RmClient(RmClient*, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RmClient(RmClient*); +void __nvoc_dtor_RmClient(RmClient*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClient; + +static const struct NVOC_RTTI __nvoc_rtti_RmClient_RmClient = { + /*pClassDef=*/ &__nvoc_class_def_RmClient, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmClient, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClient_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClient, __nvoc_base_RsClient.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClient_RsClient = { + /*pClassDef=*/ &__nvoc_class_def_RsClient, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClient, __nvoc_base_RsClient), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RmClient = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_RmClient_RmClient, + &__nvoc_rtti_RmClient_RsClient, + &__nvoc_rtti_RmClient_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmClient), + /*classId=*/ classId(RmClient), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmClient", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RmClient, + /*pCastInfo=*/ &__nvoc_castinfo_RmClient, + /*pExportInfo=*/ &__nvoc_export_info_RmClient +}; + +static NV_STATUS __nvoc_thunk_RmClient_clientValidate(struct RsClient *pClient, const API_SECURITY_INFO *pSecInfo) { + return rmclientValidate((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pSecInfo); +} + +static NV_STATUS __nvoc_thunk_RmClient_clientFreeResource(struct RsClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) { + return rmclientFreeResource((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pServer, pParams); +} + +static NV_STATUS __nvoc_thunk_RmClient_clientInterMap(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams) { + return rmclientInterMap((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pMapperRef, pMappableRef, pParams); +} + +static void __nvoc_thunk_RmClient_clientInterUnmap(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams) { + rmclientInterUnmap((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pMapperRef, pParams); +} + +static NV_STATUS __nvoc_thunk_RmClient_clientPostProcessPendingFreeList(struct RsClient *pClient, struct RsResourceRef **ppFirstLowPriRef) { + return rmclientPostProcessPendingFreeList((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), ppFirstLowPriRef); +} + +static NV_STATUS __nvoc_thunk_RsClient_rmclientDestructResourceRef(struct RmClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef) { + return clientDestructResourceRef((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), pServer, pResourceRef); +} + +static NV_STATUS __nvoc_thunk_RsClient_rmclientValidateNewResourceHandle(struct RmClient *pClient, NvHandle hResource, NvBool bRestrict) { + return clientValidateNewResourceHandle((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), hResource, bRestrict); +} + +static NV_STATUS __nvoc_thunk_RsClient_rmclientShareResource(struct RmClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + return clientShareResource((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), pResourceRef, pSharePolicy, pCallContext); +} + +static NV_STATUS __nvoc_thunk_RsClient_rmclientUnmapMemory(struct RmClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo) { + return clientUnmapMemory((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), pResourceRef, pLockInfo, ppCpuMapping, pSecInfo); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClient = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsClient(RsClient*); +void __nvoc_dtor_RmClient(RmClient *pThis) { + __nvoc_rmclientDestruct(pThis); + __nvoc_dtor_RsClient(&pThis->__nvoc_base_RsClient); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmClient(RmClient *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsClient(RsClient* , struct PORT_MEM_ALLOCATOR *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RmClient(RmClient *pThis, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsClient(&pThis->__nvoc_base_RsClient, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmClient_fail_RsClient; + __nvoc_init_dataField_RmClient(pThis); + + status = __nvoc_rmclientConstruct(pThis, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmClient_fail__init; + goto __nvoc_ctor_RmClient_exit; // Success + +__nvoc_ctor_RmClient_fail__init: + __nvoc_dtor_RsClient(&pThis->__nvoc_base_RsClient); +__nvoc_ctor_RmClient_fail_RsClient: +__nvoc_ctor_RmClient_exit: + + return status; +} + +static void __nvoc_init_funcTable_RmClient_1(RmClient *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__rmclientValidate__ = &rmclientValidate_IMPL; + + pThis->__rmclientFreeResource__ = &rmclientFreeResource_IMPL; + + pThis->__rmclientInterMap__ = &rmclientInterMap_IMPL; + + pThis->__rmclientInterUnmap__ = &rmclientInterUnmap_IMPL; + + pThis->__rmclientPostProcessPendingFreeList__ = &rmclientPostProcessPendingFreeList_IMPL; + + pThis->__nvoc_base_RsClient.__clientValidate__ = &__nvoc_thunk_RmClient_clientValidate; + + pThis->__nvoc_base_RsClient.__clientFreeResource__ = &__nvoc_thunk_RmClient_clientFreeResource; + + pThis->__nvoc_base_RsClient.__clientInterMap__ = &__nvoc_thunk_RmClient_clientInterMap; + + pThis->__nvoc_base_RsClient.__clientInterUnmap__ = &__nvoc_thunk_RmClient_clientInterUnmap; + + pThis->__nvoc_base_RsClient.__clientPostProcessPendingFreeList__ = &__nvoc_thunk_RmClient_clientPostProcessPendingFreeList; + + pThis->__rmclientDestructResourceRef__ = &__nvoc_thunk_RsClient_rmclientDestructResourceRef; + + pThis->__rmclientValidateNewResourceHandle__ = &__nvoc_thunk_RsClient_rmclientValidateNewResourceHandle; + + pThis->__rmclientShareResource__ = &__nvoc_thunk_RsClient_rmclientShareResource; + + pThis->__rmclientUnmapMemory__ = &__nvoc_thunk_RsClient_rmclientUnmapMemory; +} + +void __nvoc_init_funcTable_RmClient(RmClient *pThis) { + __nvoc_init_funcTable_RmClient_1(pThis); +} + +void __nvoc_init_RsClient(RsClient*); +void __nvoc_init_RmClient(RmClient *pThis) { + pThis->__nvoc_pbase_RmClient = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsClient.__nvoc_base_Object; + pThis->__nvoc_pbase_RsClient = &pThis->__nvoc_base_RsClient; + __nvoc_init_RsClient(&pThis->__nvoc_base_RsClient); + __nvoc_init_funcTable_RmClient(pThis); +} + +NV_STATUS __nvoc_objCreate_RmClient(RmClient **ppThis, Dynamic *pParent, NvU32 createFlags, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RmClient *pThis; + + pThis = portMemAllocNonPaged(sizeof(RmClient)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RmClient)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RmClient); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsClient.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsClient.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RmClient(pThis); + status = __nvoc_ctor_RmClient(pThis, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RmClient_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RmClient_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RmClient(RmClient **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct PORT_MEM_ALLOCATOR * arg_pAllocator = va_arg(args, struct PORT_MEM_ALLOCATOR *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RmClient(ppThis, pParent, createFlags, arg_pAllocator, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.h new file mode 100644 index 0000000..499e23a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.h @@ -0,0 +1,323 @@ +#ifndef _G_CLIENT_NVOC_H_ +#define _G_CLIENT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_client_nvoc.h" + +#ifndef _CLIENT_H_ +#define _CLIENT_H_ + +#include "ctrl/ctrl0000/ctrl0000proc.h" // NV_PROC_NAME_MAX_LENGTH +#include "containers/btree.h" +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_client.h" +#include "rmapi/resource.h" +#include "rmapi/event.h" +#include "nvsecurityinfo.h" + +// event information definitions +typedef struct _def_client_system_event_info CLI_SYSTEM_EVENT_INFO, *PCLI_SYSTEM_EVENT_INFO; + +/** + * This ref-counted object is shared by all clients that were registered under + * the same user and is used to identify clients from the same user. + */ +#ifdef NVOC_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct UserInfo { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsShared __nvoc_base_RsShared; + struct Object *__nvoc_pbase_Object; + struct RsShared *__nvoc_pbase_RsShared; + struct UserInfo *__nvoc_pbase_UserInfo; + PUID_TOKEN pUidToken; +}; + +#ifndef __NVOC_CLASS_UserInfo_TYPEDEF__ +#define __NVOC_CLASS_UserInfo_TYPEDEF__ +typedef struct UserInfo UserInfo; +#endif /* __NVOC_CLASS_UserInfo_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UserInfo +#define __nvoc_class_id_UserInfo 0x21d236 +#endif /* __nvoc_class_id_UserInfo */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo; + +#define __staticCast_UserInfo(pThis) \ + ((pThis)->__nvoc_pbase_UserInfo) + +#ifdef __nvoc_client_h_disabled +#define __dynamicCast_UserInfo(pThis) ((UserInfo*)NULL) +#else //__nvoc_client_h_disabled +#define __dynamicCast_UserInfo(pThis) \ + ((UserInfo*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(UserInfo))) +#endif //__nvoc_client_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_UserInfo(UserInfo**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_UserInfo(UserInfo**, Dynamic*, NvU32); +#define __objCreate_UserInfo(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_UserInfo((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS userinfoConstruct_IMPL(struct UserInfo *arg_pUserInfo); +#define __nvoc_userinfoConstruct(arg_pUserInfo) userinfoConstruct_IMPL(arg_pUserInfo) +void userinfoDestruct_IMPL(struct UserInfo *pUserInfo); +#define __nvoc_userinfoDestruct(pUserInfo) userinfoDestruct_IMPL(pUserInfo) +#undef PRIVATE_FIELD + + +// Flags for RmClient +#define RMAPI_CLIENT_FLAG_RM_INTERNAL_CLIENT 0x00000001 +#define RMAPI_CLIENT_FLAG_DELETE_PENDING 0x00000002 + +// Values for client debugger state +#define RMAPI_CLIENT_DEBUGGER_STATE_NOT_SET 0x00000000 +#define RMAPI_CLIENT_DEBUGGER_STATE_COMPUTE_ACTIVE 0x00000001 +#define RMAPI_CLIENT_DEBUGGER_STATE_DEBUG_ACTIVE 0x00000002 + +#ifdef NVOC_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RmClient { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsClient __nvoc_base_RsClient; + struct Object *__nvoc_pbase_Object; + struct RsClient *__nvoc_pbase_RsClient; + struct RmClient *__nvoc_pbase_RmClient; + NV_STATUS (*__rmclientValidate__)(struct RmClient *, const API_SECURITY_INFO *); + NV_STATUS (*__rmclientFreeResource__)(struct RmClient *, struct RsServer *, struct RS_RES_FREE_PARAMS_INTERNAL *); + NV_STATUS (*__rmclientInterMap__)(struct RmClient *, struct RsResourceRef *, struct RsResourceRef *, struct RS_INTER_MAP_PARAMS *); + void (*__rmclientInterUnmap__)(struct RmClient *, struct RsResourceRef *, struct RS_INTER_UNMAP_PARAMS *); + NV_STATUS (*__rmclientPostProcessPendingFreeList__)(struct RmClient *, struct RsResourceRef **); + NV_STATUS (*__rmclientDestructResourceRef__)(struct RmClient *, RsServer *, struct RsResourceRef *); + NV_STATUS (*__rmclientValidateNewResourceHandle__)(struct RmClient *, NvHandle, NvBool); + NV_STATUS (*__rmclientShareResource__)(struct RmClient *, struct RsResourceRef *, RS_SHARE_POLICY *, struct CALL_CONTEXT *); + NV_STATUS (*__rmclientUnmapMemory__)(struct RmClient *, struct RsResourceRef *, struct RS_LOCK_INFO *, struct RsCpuMapping **, API_SECURITY_INFO *); + RS_PRIV_LEVEL cachedPrivilege; + NvBool bIsRootNonPriv; + NvU32 ProcID; + NvU32 SubProcessID; + char SubProcessName[100]; + NvBool bIsSubProcessDisabled; + NvU32 Flags; + NvU32 ClientDebuggerState; + void *pOSInfo; + char name[100]; + CLI_SYSTEM_EVENT_INFO CliSysEventInfo; + PSECURITY_TOKEN pSecurityToken; + struct UserInfo *pUserInfo; + NvBool bIsClientVirtualMode; + PNODE pCliSyncGpuBoostTree; +}; + +#ifndef __NVOC_CLASS_RmClient_TYPEDEF__ +#define __NVOC_CLASS_RmClient_TYPEDEF__ +typedef struct RmClient RmClient; +#endif /* __NVOC_CLASS_RmClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmClient +#define __nvoc_class_id_RmClient 0xb23d83 +#endif /* __nvoc_class_id_RmClient */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient; + +#define __staticCast_RmClient(pThis) \ + ((pThis)->__nvoc_pbase_RmClient) + +#ifdef __nvoc_client_h_disabled +#define __dynamicCast_RmClient(pThis) ((RmClient*)NULL) +#else //__nvoc_client_h_disabled +#define __dynamicCast_RmClient(pThis) \ + ((RmClient*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmClient))) +#endif //__nvoc_client_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RmClient(RmClient**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmClient(RmClient**, Dynamic*, NvU32, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RmClient(ppNewObj, pParent, createFlags, arg_pAllocator, arg_pParams) \ + __nvoc_objCreate_RmClient((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pAllocator, arg_pParams) + +#define rmclientValidate(pClient, pSecInfo) rmclientValidate_DISPATCH(pClient, pSecInfo) +#define rmclientFreeResource(pClient, pServer, pParams) rmclientFreeResource_DISPATCH(pClient, pServer, pParams) +#define rmclientInterMap(pClient, pMapperRef, pMappableRef, pParams) rmclientInterMap_DISPATCH(pClient, pMapperRef, pMappableRef, pParams) +#define rmclientInterUnmap(pClient, pMapperRef, pParams) rmclientInterUnmap_DISPATCH(pClient, pMapperRef, pParams) +#define rmclientPostProcessPendingFreeList(pClient, ppFirstLowPriRef) rmclientPostProcessPendingFreeList_DISPATCH(pClient, ppFirstLowPriRef) +#define rmclientDestructResourceRef(pClient, pServer, pResourceRef) rmclientDestructResourceRef_DISPATCH(pClient, pServer, pResourceRef) +#define rmclientValidateNewResourceHandle(pClient, hResource, bRestrict) rmclientValidateNewResourceHandle_DISPATCH(pClient, hResource, bRestrict) +#define rmclientShareResource(pClient, pResourceRef, pSharePolicy, pCallContext) rmclientShareResource_DISPATCH(pClient, pResourceRef, pSharePolicy, pCallContext) +#define rmclientUnmapMemory(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo) rmclientUnmapMemory_DISPATCH(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo) +NV_STATUS rmclientValidate_IMPL(struct RmClient *pClient, const API_SECURITY_INFO *pSecInfo); + +static inline NV_STATUS rmclientValidate_DISPATCH(struct RmClient *pClient, const API_SECURITY_INFO *pSecInfo) { + return pClient->__rmclientValidate__(pClient, pSecInfo); +} + +NV_STATUS rmclientFreeResource_IMPL(struct RmClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS rmclientFreeResource_DISPATCH(struct RmClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) { + return pClient->__rmclientFreeResource__(pClient, pServer, pParams); +} + +NV_STATUS rmclientInterMap_IMPL(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams); + +static inline NV_STATUS rmclientInterMap_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams) { + return pClient->__rmclientInterMap__(pClient, pMapperRef, pMappableRef, pParams); +} + +void rmclientInterUnmap_IMPL(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams); + +static inline void rmclientInterUnmap_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams) { + pClient->__rmclientInterUnmap__(pClient, pMapperRef, pParams); +} + +NV_STATUS rmclientPostProcessPendingFreeList_IMPL(struct RmClient *pClient, struct RsResourceRef **ppFirstLowPriRef); + +static inline NV_STATUS rmclientPostProcessPendingFreeList_DISPATCH(struct RmClient *pClient, struct RsResourceRef **ppFirstLowPriRef) { + return pClient->__rmclientPostProcessPendingFreeList__(pClient, ppFirstLowPriRef); +} + +static inline NV_STATUS rmclientDestructResourceRef_DISPATCH(struct RmClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef) { + return pClient->__rmclientDestructResourceRef__(pClient, pServer, pResourceRef); +} + +static inline NV_STATUS rmclientValidateNewResourceHandle_DISPATCH(struct RmClient *pClient, NvHandle hResource, NvBool bRestrict) { + return pClient->__rmclientValidateNewResourceHandle__(pClient, hResource, bRestrict); +} + +static inline NV_STATUS rmclientShareResource_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + return pClient->__rmclientShareResource__(pClient, pResourceRef, pSharePolicy, pCallContext); +} + +static inline NV_STATUS rmclientUnmapMemory_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo) { + return pClient->__rmclientUnmapMemory__(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo); +} + +NV_STATUS rmclientConstruct_IMPL(struct RmClient *arg_pClient, struct PORT_MEM_ALLOCATOR *arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_rmclientConstruct(arg_pClient, arg_pAllocator, arg_pParams) rmclientConstruct_IMPL(arg_pClient, arg_pAllocator, arg_pParams) +void rmclientDestruct_IMPL(struct RmClient *pClient); +#define __nvoc_rmclientDestruct(pClient) rmclientDestruct_IMPL(pClient) +RS_PRIV_LEVEL rmclientGetCachedPrivilege_IMPL(struct RmClient *pClient); +#ifdef __nvoc_client_h_disabled +static inline RS_PRIV_LEVEL rmclientGetCachedPrivilege(struct RmClient *pClient) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); + RS_PRIV_LEVEL ret; + portMemSet(&ret, 0, sizeof(RS_PRIV_LEVEL)); + return ret; +} +#else //__nvoc_client_h_disabled +#define rmclientGetCachedPrivilege(pClient) rmclientGetCachedPrivilege_IMPL(pClient) +#endif //__nvoc_client_h_disabled + +NvBool rmclientIsAdmin_IMPL(struct RmClient *pClient, RS_PRIV_LEVEL privLevel); +#ifdef __nvoc_client_h_disabled +static inline NvBool rmclientIsAdmin(struct RmClient *pClient, RS_PRIV_LEVEL privLevel) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); + return NV_FALSE; +} +#else //__nvoc_client_h_disabled +#define rmclientIsAdmin(pClient, privLevel) rmclientIsAdmin_IMPL(pClient, privLevel) +#endif //__nvoc_client_h_disabled + +void rmclientSetClientFlags_IMPL(struct RmClient *pClient, NvU32 clientFlags); +#ifdef __nvoc_client_h_disabled +static inline void rmclientSetClientFlags(struct RmClient *pClient, NvU32 clientFlags) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); +} +#else //__nvoc_client_h_disabled +#define rmclientSetClientFlags(pClient, clientFlags) rmclientSetClientFlags_IMPL(pClient, clientFlags) +#endif //__nvoc_client_h_disabled + +void *rmclientGetSecurityToken_IMPL(struct RmClient *pClient); +#ifdef __nvoc_client_h_disabled +static inline void *rmclientGetSecurityToken(struct RmClient *pClient) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); + return NULL; +} +#else //__nvoc_client_h_disabled +#define rmclientGetSecurityToken(pClient) rmclientGetSecurityToken_IMPL(pClient) +#endif //__nvoc_client_h_disabled + +NvBool rmclientIsCapableOrAdmin_IMPL(struct RmClient *pClient, NvU32 capability, RS_PRIV_LEVEL privLevel); +#ifdef __nvoc_client_h_disabled +static inline NvBool rmclientIsCapableOrAdmin(struct RmClient *pClient, NvU32 capability, RS_PRIV_LEVEL privLevel) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); + return NV_FALSE; +} +#else //__nvoc_client_h_disabled +#define rmclientIsCapableOrAdmin(pClient, capability, privLevel) rmclientIsCapableOrAdmin_IMPL(pClient, capability, privLevel) +#endif //__nvoc_client_h_disabled + +NvBool rmclientIsCapable_IMPL(struct RmClient *pClient, NvU32 capability); +#ifdef __nvoc_client_h_disabled +static inline NvBool rmclientIsCapable(struct RmClient *pClient, NvU32 capability) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); + return NV_FALSE; +} +#else //__nvoc_client_h_disabled +#define rmclientIsCapable(pClient, capability) rmclientIsCapable_IMPL(pClient, capability) +#endif //__nvoc_client_h_disabled + +#undef PRIVATE_FIELD + + +MAKE_LIST(RmClientList, RmClient*); +extern RmClientList g_clientListBehindGpusLock; +MAKE_LIST(UserInfoList, UserInfo*); +extern UserInfoList g_userInfoList; + + +// +// Convenience rmclientXxxByHandle util macros. Ideally, code operates on +// pClient directly instead of hClient but providing these for compatibility +// to hClient-heavy code. +// +RS_PRIV_LEVEL rmclientGetCachedPrivilegeByHandle(NvHandle hClient); +NvBool rmclientIsAdminByHandle(NvHandle hClient, RS_PRIV_LEVEL privLevel); +NvBool rmclientSetClientFlagsByHandle(NvHandle hClient, NvU32 clientFlags); +void rmclientPromoteDebuggerStateByHandle(NvHandle hClient, NvU32 newMinimumState); +void *rmclientGetSecurityTokenByHandle(NvHandle hClient); +NV_STATUS rmclientUserClientSecurityCheckByHandle(NvHandle hClient, const API_SECURITY_INFO *pSecInfo); +NvBool rmclientIsCapableOrAdminByHandle(NvHandle hClient, NvU32 capability, RS_PRIV_LEVEL privLevel); +NvBool rmclientIsCapableByHandle(NvHandle hClient, NvU32 capability); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_CLIENT_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.c new file mode 100644 index 0000000..1458006 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.c @@ -0,0 +1,1269 @@ +#define NVOC_CLIENT_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_client_resource_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x37a701 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClientResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClientResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_RmClientResource(RmClientResource*); +void __nvoc_init_funcTable_RmClientResource(RmClientResource*); +NV_STATUS __nvoc_ctor_RmClientResource(RmClientResource*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RmClientResource(RmClientResource*); +void __nvoc_dtor_RmClientResource(RmClientResource*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClientResource; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_RmClientResource = { + /*pClassDef=*/ &__nvoc_class_def_RmClientResource, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmClientResource, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_RsClientResource = { + /*pClassDef=*/ &__nvoc_class_def_RsClientResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClientResource, __nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClientResource, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClientResource, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RmClientResource = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_RmClientResource_RmClientResource, + &__nvoc_rtti_RmClientResource_Notifier, + &__nvoc_rtti_RmClientResource_INotifier, + &__nvoc_rtti_RmClientResource_RmResourceCommon, + &__nvoc_rtti_RmClientResource_RsClientResource, + &__nvoc_rtti_RmClientResource_RsResource, + &__nvoc_rtti_RmClientResource_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RmClientResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmClientResource), + /*classId=*/ classId(RmClientResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmClientResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RmClientResource, + /*pCastInfo=*/ &__nvoc_castinfo_RmClientResource, + /*pExportInfo=*/ &__nvoc_export_info_RmClientResource +}; + +static NvBool __nvoc_thunk_RmClientResource_resAccessCallback(struct RsResource *pRmCliRes, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return cliresAccessCallback((struct RmClientResource *)(((unsigned char *)pRmCliRes) - __nvoc_rtti_RmClientResource_RsResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NvBool __nvoc_thunk_RmClientResource_resShareCallback(struct RsResource *pRmCliRes, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return cliresShareCallback((struct RmClientResource *)(((unsigned char *)pRmCliRes) - __nvoc_rtti_RmClientResource_RsResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresControl(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresUnmap(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresMapTo(struct RmClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pParams); +} + +static void __nvoc_thunk_Notifier_cliresSetNotificationShare(struct RmClientResource *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_RmClientResource_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresControlFilter(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_cliresAddAdditionalDependants(struct RsClient *pClient, struct RmClientResource *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_cliresGetRefCount(struct RmClientResource *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_cliresUnregisterEvent(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_RmClientResource_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_cliresCanCopy(struct RmClientResource *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresControl_Prologue(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl_Prologue((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_cliresPreDestruct(struct RmClientResource *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresUnmapFrom(struct RmClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pParams); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_cliresGetNotificationListPtr(struct RmClientResource *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_RmClientResource_Notifier.offset)); +} + +static void __nvoc_thunk_RsResource_cliresControl_Epilogue(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + resControl_Epilogue((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pCallContext, pParams); +} + +static struct NotifShare *__nvoc_thunk_Notifier_cliresGetNotificationShare(struct RmClientResource *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_RmClientResource_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresControlLookup(struct RmClientResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresMap(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_cliresGetOrAllocNotifShare(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_RmClientResource_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClientResource[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetCpuInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x102u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetCpuInfo" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemSetMemorySize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x107u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemSetMemorySize" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetClassList_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x108u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetClassList" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemNotifyEvent_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x110u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemNotifyEvent" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemDebugCtrlRmMsg_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x121u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemDebugCtrlRmMsg" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetPrivilegedStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x135u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetPrivilegedStatus" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetFabricStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x136u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetFabricStatus" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetRmInstanceId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x139u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetRmInstanceId" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemSyncExternalFabricMgmt_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x13cu, + /*paramSize=*/ sizeof(NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemSyncExternalFabricMgmt" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetClientDatabaseInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*flags=*/ 0x7u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x13du, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetClientDatabaseInfo" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetBuildVersionV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x13eu, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetBuildVersionV2" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetFeatures_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x1f0u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetFeatures" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetAttachedIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x201u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetAttachedIds" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetIdInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x202u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_ID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetIdInfo" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetInitStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x203u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetInitStatus" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetDeviceIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x204u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetDeviceIds" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetIdInfoV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x205u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetIdInfoV2" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetProbedIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x214u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetProbedIds" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuAttachIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x215u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_ATTACH_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuAttachIds" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuDetachIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x216u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_DETACH_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuDetachIds" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetPciInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x21bu, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetPciInfo" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetSvmSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x240u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetSvmSize" +#endif + }, + { /* [22] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetUuidInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x274u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetUuidInfo" +#endif + }, + { /* [23] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetUuidFromGpuId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x275u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetUuidFromGpuId" +#endif + }, + { /* [24] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuModifyGpuDrainState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x278u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuModifyGpuDrainState" +#endif + }, + { /* [25] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuQueryGpuDrainState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x279u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuQueryGpuDrainState" +#endif + }, + { /* [26] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetMemOpEnable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x27bu, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetMemOpEnable" +#endif + }, + { /* [27] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuDisableNvlinkInit_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x281u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuDisableNvlinkInit" +#endif + }, + { /* [28] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdLegacyConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x282u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdLegacyConfig" +#endif + }, + { /* [29] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGsyncGetAttachedIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x301u, + /*paramSize=*/ sizeof(NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGsyncGetAttachedIds" +#endif + }, + { /* [30] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGsyncGetIdInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x302u, + /*paramSize=*/ sizeof(NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGsyncGetIdInfo" +#endif + }, + { /* [31] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdEventSetNotification_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x501u, + /*paramSize=*/ sizeof(NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdEventSetNotification" +#endif + }, + { /* [32] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdEventGetSystemEventStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x502u, + /*paramSize=*/ sizeof(NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdEventGetSystemEventStatus" +#endif + }, + { /* [33] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSetSubProcessID_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x901u, + /*paramSize=*/ sizeof(NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSetSubProcessID" +#endif + }, + { /* [34] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdDisableSubProcessUserdIsolation_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x902u, + /*paramSize=*/ sizeof(NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdDisableSubProcessUserdIsolation" +#endif + }, + { /* [35] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientGetAddrSpaceType_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd01u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientGetAddrSpaceType" +#endif + }, + { /* [36] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientGetHandleInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd02u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientGetHandleInfo" +#endif + }, + { /* [37] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientGetAccessRights_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd03u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientGetAccessRights" +#endif + }, + { /* [38] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientSetInheritedSharePolicy_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd04u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientSetInheritedSharePolicy" +#endif + }, + { /* [39] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientGetChildHandle_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd05u, + /*paramSize=*/ sizeof(NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientGetChildHandle" +#endif + }, + { /* [40] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientShareObject_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd06u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientShareObject" +#endif + }, + { /* [41] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixFlushUserCache_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d02u, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixFlushUserCache" +#endif + }, + { /* [42] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixExportObjectToFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d05u, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixExportObjectToFd" +#endif + }, + { /* [43] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixImportObjectFromFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d06u, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixImportObjectFromFd" +#endif + }, + { /* [44] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d08u, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixGetExportObjectInfo" +#endif + }, + { /* [45] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d0au, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixCreateExportObjectFd" +#endif + }, + { /* [46] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixExportObjectsToFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d0bu, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixExportObjectsToFd" +#endif + }, + { /* [47] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d0cu, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixImportObjectsFromFd" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClientResource = +{ + /*numEntries=*/ 48, + /*pExportEntries=*/ __nvoc_exported_method_def_RmClientResource +}; + +void __nvoc_dtor_RsClientResource(RsClientResource*); +void __nvoc_dtor_RmResourceCommon(RmResourceCommon*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_RmClientResource(RmClientResource *pThis) { + __nvoc_cliresDestruct(pThis); + __nvoc_dtor_RsClientResource(&pThis->__nvoc_base_RsClientResource); + __nvoc_dtor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmClientResource(RmClientResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsClientResource(RsClientResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RmResourceCommon(RmResourceCommon* ); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_RmClientResource(RmClientResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsClientResource(&pThis->__nvoc_base_RsClientResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmClientResource_fail_RsClientResource; + status = __nvoc_ctor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + if (status != NV_OK) goto __nvoc_ctor_RmClientResource_fail_RmResourceCommon; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_RmClientResource_fail_Notifier; + __nvoc_init_dataField_RmClientResource(pThis); + + status = __nvoc_cliresConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmClientResource_fail__init; + goto __nvoc_ctor_RmClientResource_exit; // Success + +__nvoc_ctor_RmClientResource_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_RmClientResource_fail_Notifier: + __nvoc_dtor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); +__nvoc_ctor_RmClientResource_fail_RmResourceCommon: + __nvoc_dtor_RsClientResource(&pThis->__nvoc_base_RsClientResource); +__nvoc_ctor_RmClientResource_fail_RsClientResource: +__nvoc_ctor_RmClientResource_exit: + + return status; +} + +static void __nvoc_init_funcTable_RmClientResource_1(RmClientResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__cliresAccessCallback__ = &cliresAccessCallback_IMPL; + + pThis->__cliresShareCallback__ = &cliresShareCallback_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__cliresCtrlCmdSystemGetCpuInfo__ = &cliresCtrlCmdSystemGetCpuInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSystemGetFeatures__ = &cliresCtrlCmdSystemGetFeatures_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__cliresCtrlCmdSystemGetBuildVersionV2__ = &cliresCtrlCmdSystemGetBuildVersionV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__cliresCtrlCmdSystemSetMemorySize__ = &cliresCtrlCmdSystemSetMemorySize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSystemGetClassList__ = &cliresCtrlCmdSystemGetClassList_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSystemNotifyEvent__ = &cliresCtrlCmdSystemNotifyEvent_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSystemDebugCtrlRmMsg__ = &cliresCtrlCmdSystemDebugCtrlRmMsg_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSystemGetPrivilegedStatus__ = &cliresCtrlCmdSystemGetPrivilegedStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__cliresCtrlCmdSystemGetFabricStatus__ = &cliresCtrlCmdSystemGetFabricStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__cliresCtrlCmdSystemGetRmInstanceId__ = &cliresCtrlCmdSystemGetRmInstanceId_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + pThis->__cliresCtrlCmdSystemGetClientDatabaseInfo__ = &cliresCtrlCmdSystemGetClientDatabaseInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__cliresCtrlCmdClientGetAddrSpaceType__ = &cliresCtrlCmdClientGetAddrSpaceType_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdClientGetHandleInfo__ = &cliresCtrlCmdClientGetHandleInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdClientGetAccessRights__ = &cliresCtrlCmdClientGetAccessRights_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdClientSetInheritedSharePolicy__ = &cliresCtrlCmdClientSetInheritedSharePolicy_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdClientShareObject__ = &cliresCtrlCmdClientShareObject_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdClientGetChildHandle__ = &cliresCtrlCmdClientGetChildHandle_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__cliresCtrlCmdGpuGetAttachedIds__ = &cliresCtrlCmdGpuGetAttachedIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__cliresCtrlCmdGpuGetIdInfo__ = &cliresCtrlCmdGpuGetIdInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__cliresCtrlCmdGpuGetIdInfoV2__ = &cliresCtrlCmdGpuGetIdInfoV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdGpuGetInitStatus__ = &cliresCtrlCmdGpuGetInitStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__cliresCtrlCmdGpuGetDeviceIds__ = &cliresCtrlCmdGpuGetDeviceIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__cliresCtrlCmdGpuGetProbedIds__ = &cliresCtrlCmdGpuGetProbedIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdGpuAttachIds__ = &cliresCtrlCmdGpuAttachIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdGpuDetachIds__ = &cliresCtrlCmdGpuDetachIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdGpuGetSvmSize__ = &cliresCtrlCmdGpuGetSvmSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__cliresCtrlCmdGpuGetPciInfo__ = &cliresCtrlCmdGpuGetPciInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdGpuGetUuidInfo__ = &cliresCtrlCmdGpuGetUuidInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdGpuGetUuidFromGpuId__ = &cliresCtrlCmdGpuGetUuidFromGpuId_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__cliresCtrlCmdGpuModifyGpuDrainState__ = &cliresCtrlCmdGpuModifyGpuDrainState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdGpuQueryGpuDrainState__ = &cliresCtrlCmdGpuQueryGpuDrainState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__cliresCtrlCmdGpuGetMemOpEnable__ = &cliresCtrlCmdGpuGetMemOpEnable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__cliresCtrlCmdGpuDisableNvlinkInit__ = &cliresCtrlCmdGpuDisableNvlinkInit_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdLegacyConfig__ = &cliresCtrlCmdLegacyConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdGsyncGetAttachedIds__ = &cliresCtrlCmdGsyncGetAttachedIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdGsyncGetIdInfo__ = &cliresCtrlCmdGsyncGetIdInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdEventSetNotification__ = &cliresCtrlCmdEventSetNotification_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdEventGetSystemEventStatus__ = &cliresCtrlCmdEventGetSystemEventStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdOsUnixExportObjectToFd__ = &cliresCtrlCmdOsUnixExportObjectToFd_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdOsUnixImportObjectFromFd__ = &cliresCtrlCmdOsUnixImportObjectFromFd_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdOsUnixGetExportObjectInfo__ = &cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdOsUnixCreateExportObjectFd__ = &cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdOsUnixExportObjectsToFd__ = &cliresCtrlCmdOsUnixExportObjectsToFd_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdOsUnixImportObjectsFromFd__ = &cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdOsUnixFlushUserCache__ = &cliresCtrlCmdOsUnixFlushUserCache_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSetSubProcessID__ = &cliresCtrlCmdSetSubProcessID_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdDisableSubProcessUserdIsolation__ = &cliresCtrlCmdDisableSubProcessUserdIsolation_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__cliresCtrlCmdSystemSyncExternalFabricMgmt__ = &cliresCtrlCmdSystemSyncExternalFabricMgmt_IMPL; +#endif + + pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__resAccessCallback__ = &__nvoc_thunk_RmClientResource_resAccessCallback; + + pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__resShareCallback__ = &__nvoc_thunk_RmClientResource_resShareCallback; + + pThis->__cliresControl__ = &__nvoc_thunk_RsResource_cliresControl; + + pThis->__cliresUnmap__ = &__nvoc_thunk_RsResource_cliresUnmap; + + pThis->__cliresMapTo__ = &__nvoc_thunk_RsResource_cliresMapTo; + + pThis->__cliresSetNotificationShare__ = &__nvoc_thunk_Notifier_cliresSetNotificationShare; + + pThis->__cliresControlFilter__ = &__nvoc_thunk_RsResource_cliresControlFilter; + + pThis->__cliresAddAdditionalDependants__ = &__nvoc_thunk_RsResource_cliresAddAdditionalDependants; + + pThis->__cliresGetRefCount__ = &__nvoc_thunk_RsResource_cliresGetRefCount; + + pThis->__cliresUnregisterEvent__ = &__nvoc_thunk_Notifier_cliresUnregisterEvent; + + pThis->__cliresCanCopy__ = &__nvoc_thunk_RsResource_cliresCanCopy; + + pThis->__cliresControl_Prologue__ = &__nvoc_thunk_RsResource_cliresControl_Prologue; + + pThis->__cliresPreDestruct__ = &__nvoc_thunk_RsResource_cliresPreDestruct; + + pThis->__cliresUnmapFrom__ = &__nvoc_thunk_RsResource_cliresUnmapFrom; + + pThis->__cliresGetNotificationListPtr__ = &__nvoc_thunk_Notifier_cliresGetNotificationListPtr; + + pThis->__cliresControl_Epilogue__ = &__nvoc_thunk_RsResource_cliresControl_Epilogue; + + pThis->__cliresGetNotificationShare__ = &__nvoc_thunk_Notifier_cliresGetNotificationShare; + + pThis->__cliresControlLookup__ = &__nvoc_thunk_RsResource_cliresControlLookup; + + pThis->__cliresMap__ = &__nvoc_thunk_RsResource_cliresMap; + + pThis->__cliresGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_cliresGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_RmClientResource(RmClientResource *pThis) { + __nvoc_init_funcTable_RmClientResource_1(pThis); +} + +void __nvoc_init_RsClientResource(RsClientResource*); +void __nvoc_init_RmResourceCommon(RmResourceCommon*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_RmClientResource(RmClientResource *pThis) { + pThis->__nvoc_pbase_RmClientResource = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RsClientResource = &pThis->__nvoc_base_RsClientResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_RsClientResource(&pThis->__nvoc_base_RsClientResource); + __nvoc_init_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_RmClientResource(pThis); +} + +NV_STATUS __nvoc_objCreate_RmClientResource(RmClientResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RmClientResource *pThis; + + pThis = portMemAllocNonPaged(sizeof(RmClientResource)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RmClientResource)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RmClientResource); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RmClientResource(pThis); + status = __nvoc_ctor_RmClientResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RmClientResource_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RmClientResource_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RmClientResource(RmClientResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RmClientResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.h new file mode 100644 index 0000000..b15641b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.h @@ -0,0 +1,635 @@ +#ifndef _G_CLIENT_RESOURCE_NVOC_H_ +#define _G_CLIENT_RESOURCE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_client_resource_nvoc.h" + +#ifndef _CLIENT_RESOURCE_H_ +#define _CLIENT_RESOURCE_H_ + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_client.h" +#include "rmapi/resource.h" +#include "rmapi/event.h" +#include "rmapi/control.h" + +#include "ctrl/ctrl0000/ctrl0000gpu.h" +#include "ctrl/ctrl0000/ctrl0000gpuacct.h" +#include "ctrl/ctrl0000/ctrl0000gsync.h" +#include "ctrl/ctrl0000/ctrl0000diag.h" +#include "ctrl/ctrl0000/ctrl0000event.h" +#include "ctrl/ctrl0000/ctrl0000nvd.h" +#include "ctrl/ctrl0000/ctrl0000proc.h" +#include "ctrl/ctrl0000/ctrl0000syncgpuboost.h" +#include "ctrl/ctrl0000/ctrl0000gspc.h" +#include "ctrl/ctrl0000/ctrl0000vgpu.h" +#include "ctrl/ctrl0000/ctrl0000client.h" + +/* include appropriate os-specific command header */ +#if defined(NV_UNIX) || defined(NV_QNX) +#include "ctrl/ctrl0000/ctrl0000unix.h" +#endif + +#ifdef NVOC_CLIENT_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RmClientResource { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsClientResource __nvoc_base_RsClientResource; + struct RmResourceCommon __nvoc_base_RmResourceCommon; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RsClientResource *__nvoc_pbase_RsClientResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct RmClientResource *__nvoc_pbase_RmClientResource; + NvBool (*__cliresAccessCallback__)(struct RmClientResource *, struct RsClient *, void *, RsAccessRight); + NvBool (*__cliresShareCallback__)(struct RmClientResource *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__cliresCtrlCmdSystemGetCpuInfo__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetFeatures__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetBuildVersionV2__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemSetMemorySize__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetClassList__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemNotifyEvent__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemDebugCtrlRmMsg__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetPrivilegedStatus__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetFabricStatus__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetRmInstanceId__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetClientDatabaseInfo__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdClientGetAddrSpaceType__)(struct RmClientResource *, NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdClientGetHandleInfo__)(struct RmClientResource *, NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdClientGetAccessRights__)(struct RmClientResource *, NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdClientSetInheritedSharePolicy__)(struct RmClientResource *, NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *); + NV_STATUS (*__cliresCtrlCmdClientShareObject__)(struct RmClientResource *, NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *); + NV_STATUS (*__cliresCtrlCmdClientGetChildHandle__)(struct RmClientResource *, NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetAttachedIds__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetIdInfo__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetIdInfoV2__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetInitStatus__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetDeviceIds__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetProbedIds__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuAttachIds__)(struct RmClientResource *, NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuDetachIds__)(struct RmClientResource *, NV0000_CTRL_GPU_DETACH_IDS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetSvmSize__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetPciInfo__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetUuidInfo__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetUuidFromGpuId__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuModifyGpuDrainState__)(struct RmClientResource *, NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuQueryGpuDrainState__)(struct RmClientResource *, NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetMemOpEnable__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuDisableNvlinkInit__)(struct RmClientResource *, NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *); + NV_STATUS (*__cliresCtrlCmdLegacyConfig__)(struct RmClientResource *, NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGsyncGetAttachedIds__)(struct RmClientResource *, NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGsyncGetIdInfo__)(struct RmClientResource *, NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdEventSetNotification__)(struct RmClientResource *, NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *); + NV_STATUS (*__cliresCtrlCmdEventGetSystemEventStatus__)(struct RmClientResource *, NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixExportObjectToFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixImportObjectFromFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixGetExportObjectInfo__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixCreateExportObjectFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixExportObjectsToFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixImportObjectsFromFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixFlushUserCache__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSetSubProcessID__)(struct RmClientResource *, NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *); + NV_STATUS (*__cliresCtrlCmdDisableSubProcessUserdIsolation__)(struct RmClientResource *, NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemSyncExternalFabricMgmt__)(struct RmClientResource *, NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *); + NV_STATUS (*__cliresControl__)(struct RmClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__cliresUnmap__)(struct RmClientResource *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__cliresMapTo__)(struct RmClientResource *, RS_RES_MAP_TO_PARAMS *); + void (*__cliresSetNotificationShare__)(struct RmClientResource *, struct NotifShare *); + NV_STATUS (*__cliresControlFilter__)(struct RmClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__cliresAddAdditionalDependants__)(struct RsClient *, struct RmClientResource *, RsResourceRef *); + NvU32 (*__cliresGetRefCount__)(struct RmClientResource *); + NV_STATUS (*__cliresUnregisterEvent__)(struct RmClientResource *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__cliresCanCopy__)(struct RmClientResource *); + NV_STATUS (*__cliresControl_Prologue__)(struct RmClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__cliresPreDestruct__)(struct RmClientResource *); + NV_STATUS (*__cliresUnmapFrom__)(struct RmClientResource *, RS_RES_UNMAP_FROM_PARAMS *); + PEVENTNOTIFICATION *(*__cliresGetNotificationListPtr__)(struct RmClientResource *); + void (*__cliresControl_Epilogue__)(struct RmClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + struct NotifShare *(*__cliresGetNotificationShare__)(struct RmClientResource *); + NV_STATUS (*__cliresControlLookup__)(struct RmClientResource *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__cliresMap__)(struct RmClientResource *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__cliresGetOrAllocNotifShare__)(struct RmClientResource *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_RmClientResource_TYPEDEF__ +#define __NVOC_CLASS_RmClientResource_TYPEDEF__ +typedef struct RmClientResource RmClientResource; +#endif /* __NVOC_CLASS_RmClientResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmClientResource +#define __nvoc_class_id_RmClientResource 0x37a701 +#endif /* __nvoc_class_id_RmClientResource */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClientResource; + +#define __staticCast_RmClientResource(pThis) \ + ((pThis)->__nvoc_pbase_RmClientResource) + +#ifdef __nvoc_client_resource_h_disabled +#define __dynamicCast_RmClientResource(pThis) ((RmClientResource*)NULL) +#else //__nvoc_client_resource_h_disabled +#define __dynamicCast_RmClientResource(pThis) \ + ((RmClientResource*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmClientResource))) +#endif //__nvoc_client_resource_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RmClientResource(RmClientResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmClientResource(RmClientResource**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RmClientResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RmClientResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define cliresAccessCallback(pRmCliRes, pInvokingClient, pAllocParams, accessRight) cliresAccessCallback_DISPATCH(pRmCliRes, pInvokingClient, pAllocParams, accessRight) +#define cliresShareCallback(pRmCliRes, pInvokingClient, pParentRef, pSharePolicy) cliresShareCallback_DISPATCH(pRmCliRes, pInvokingClient, pParentRef, pSharePolicy) +#define cliresCtrlCmdSystemGetCpuInfo(pRmCliRes, pCpuInfoParams) cliresCtrlCmdSystemGetCpuInfo_DISPATCH(pRmCliRes, pCpuInfoParams) +#define cliresCtrlCmdSystemGetFeatures(pRmCliRes, pParams) cliresCtrlCmdSystemGetFeatures_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetBuildVersionV2(pRmCliRes, pParams) cliresCtrlCmdSystemGetBuildVersionV2_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemSetMemorySize(pRmCliRes, pParams) cliresCtrlCmdSystemSetMemorySize_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetClassList(pRmCliRes, pParams) cliresCtrlCmdSystemGetClassList_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemNotifyEvent(pRmCliRes, pParams) cliresCtrlCmdSystemNotifyEvent_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemDebugCtrlRmMsg(pRmCliRes, pParams) cliresCtrlCmdSystemDebugCtrlRmMsg_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetPrivilegedStatus(pRmCliRes, pParams) cliresCtrlCmdSystemGetPrivilegedStatus_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetFabricStatus(pRmCliRes, pParams) cliresCtrlCmdSystemGetFabricStatus_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetRmInstanceId(pRmCliRes, pRmInstanceIdParams) cliresCtrlCmdSystemGetRmInstanceId_DISPATCH(pRmCliRes, pRmInstanceIdParams) +#define cliresCtrlCmdSystemGetClientDatabaseInfo(pRmCliRes, pParams) cliresCtrlCmdSystemGetClientDatabaseInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientGetAddrSpaceType(pRmCliRes, pParams) cliresCtrlCmdClientGetAddrSpaceType_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientGetHandleInfo(pRmCliRes, pParams) cliresCtrlCmdClientGetHandleInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientGetAccessRights(pRmCliRes, pParams) cliresCtrlCmdClientGetAccessRights_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientSetInheritedSharePolicy(pRmCliRes, pParams) cliresCtrlCmdClientSetInheritedSharePolicy_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientShareObject(pRmCliRes, pParams) cliresCtrlCmdClientShareObject_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientGetChildHandle(pRmCliRes, pParams) cliresCtrlCmdClientGetChildHandle_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuGetAttachedIds(pRmCliRes, pGpuAttachedIds) cliresCtrlCmdGpuGetAttachedIds_DISPATCH(pRmCliRes, pGpuAttachedIds) +#define cliresCtrlCmdGpuGetIdInfo(pRmCliRes, pGpuIdInfoParams) cliresCtrlCmdGpuGetIdInfo_DISPATCH(pRmCliRes, pGpuIdInfoParams) +#define cliresCtrlCmdGpuGetIdInfoV2(pRmCliRes, pGpuIdInfoParams) cliresCtrlCmdGpuGetIdInfoV2_DISPATCH(pRmCliRes, pGpuIdInfoParams) +#define cliresCtrlCmdGpuGetInitStatus(pRmCliRes, pGpuInitStatusParams) cliresCtrlCmdGpuGetInitStatus_DISPATCH(pRmCliRes, pGpuInitStatusParams) +#define cliresCtrlCmdGpuGetDeviceIds(pRmCliRes, pDeviceIdsParams) cliresCtrlCmdGpuGetDeviceIds_DISPATCH(pRmCliRes, pDeviceIdsParams) +#define cliresCtrlCmdGpuGetProbedIds(pRmCliRes, pGpuProbedIds) cliresCtrlCmdGpuGetProbedIds_DISPATCH(pRmCliRes, pGpuProbedIds) +#define cliresCtrlCmdGpuAttachIds(pRmCliRes, pGpuAttachIds) cliresCtrlCmdGpuAttachIds_DISPATCH(pRmCliRes, pGpuAttachIds) +#define cliresCtrlCmdGpuDetachIds(pRmCliRes, pGpuDetachIds) cliresCtrlCmdGpuDetachIds_DISPATCH(pRmCliRes, pGpuDetachIds) +#define cliresCtrlCmdGpuGetSvmSize(pRmCliRes, pSvmSizeGetParams) cliresCtrlCmdGpuGetSvmSize_DISPATCH(pRmCliRes, pSvmSizeGetParams) +#define cliresCtrlCmdGpuGetPciInfo(pRmCliRes, pPciInfoParams) cliresCtrlCmdGpuGetPciInfo_DISPATCH(pRmCliRes, pPciInfoParams) +#define cliresCtrlCmdGpuGetUuidInfo(pRmCliRes, pParams) cliresCtrlCmdGpuGetUuidInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuGetUuidFromGpuId(pRmCliRes, pParams) cliresCtrlCmdGpuGetUuidFromGpuId_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuModifyGpuDrainState(pRmCliRes, pParams) cliresCtrlCmdGpuModifyGpuDrainState_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuQueryGpuDrainState(pRmCliRes, pParams) cliresCtrlCmdGpuQueryGpuDrainState_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuGetMemOpEnable(pRmCliRes, pMemOpEnableParams) cliresCtrlCmdGpuGetMemOpEnable_DISPATCH(pRmCliRes, pMemOpEnableParams) +#define cliresCtrlCmdGpuDisableNvlinkInit(pRmCliRes, pParams) cliresCtrlCmdGpuDisableNvlinkInit_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdLegacyConfig(pRmCliRes, pParams) cliresCtrlCmdLegacyConfig_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGsyncGetAttachedIds(pRmCliRes, pGsyncAttachedIds) cliresCtrlCmdGsyncGetAttachedIds_DISPATCH(pRmCliRes, pGsyncAttachedIds) +#define cliresCtrlCmdGsyncGetIdInfo(pRmCliRes, pGsyncIdInfoParams) cliresCtrlCmdGsyncGetIdInfo_DISPATCH(pRmCliRes, pGsyncIdInfoParams) +#define cliresCtrlCmdEventSetNotification(pRmCliRes, pEventSetNotificationParams) cliresCtrlCmdEventSetNotification_DISPATCH(pRmCliRes, pEventSetNotificationParams) +#define cliresCtrlCmdEventGetSystemEventStatus(pRmCliRes, pSystemEventStatusParams) cliresCtrlCmdEventGetSystemEventStatus_DISPATCH(pRmCliRes, pSystemEventStatusParams) +#define cliresCtrlCmdOsUnixExportObjectToFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixExportObjectToFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixImportObjectFromFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixImportObjectFromFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixGetExportObjectInfo(pRmCliRes, pParams) cliresCtrlCmdOsUnixGetExportObjectInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixCreateExportObjectFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixCreateExportObjectFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixExportObjectsToFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixExportObjectsToFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixImportObjectsFromFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixImportObjectsFromFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixFlushUserCache(pRmCliRes, pAddressSpaceParams) cliresCtrlCmdOsUnixFlushUserCache_DISPATCH(pRmCliRes, pAddressSpaceParams) +#define cliresCtrlCmdSetSubProcessID(pRmCliRes, pParams) cliresCtrlCmdSetSubProcessID_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdDisableSubProcessUserdIsolation(pRmCliRes, pParams) cliresCtrlCmdDisableSubProcessUserdIsolation_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemSyncExternalFabricMgmt(pRmCliRes, pExtFabricMgmtParams) cliresCtrlCmdSystemSyncExternalFabricMgmt_DISPATCH(pRmCliRes, pExtFabricMgmtParams) +#define cliresControl(pResource, pCallContext, pParams) cliresControl_DISPATCH(pResource, pCallContext, pParams) +#define cliresUnmap(pResource, pCallContext, pCpuMapping) cliresUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define cliresMapTo(pResource, pParams) cliresMapTo_DISPATCH(pResource, pParams) +#define cliresSetNotificationShare(pNotifier, pNotifShare) cliresSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define cliresControlFilter(pResource, pCallContext, pParams) cliresControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define cliresAddAdditionalDependants(pClient, pResource, pReference) cliresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define cliresGetRefCount(pResource) cliresGetRefCount_DISPATCH(pResource) +#define cliresUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) cliresUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define cliresCanCopy(pResource) cliresCanCopy_DISPATCH(pResource) +#define cliresControl_Prologue(pResource, pCallContext, pParams) cliresControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define cliresPreDestruct(pResource) cliresPreDestruct_DISPATCH(pResource) +#define cliresUnmapFrom(pResource, pParams) cliresUnmapFrom_DISPATCH(pResource, pParams) +#define cliresGetNotificationListPtr(pNotifier) cliresGetNotificationListPtr_DISPATCH(pNotifier) +#define cliresControl_Epilogue(pResource, pCallContext, pParams) cliresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define cliresGetNotificationShare(pNotifier) cliresGetNotificationShare_DISPATCH(pNotifier) +#define cliresControlLookup(pResource, pParams, ppEntry) cliresControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define cliresMap(pResource, pCallContext, pParams, pCpuMapping) cliresMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define cliresGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) cliresGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NvBool cliresAccessCallback_IMPL(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); + +static inline NvBool cliresAccessCallback_DISPATCH(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pRmCliRes->__cliresAccessCallback__(pRmCliRes, pInvokingClient, pAllocParams, accessRight); +} + +NvBool cliresShareCallback_IMPL(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + +static inline NvBool cliresShareCallback_DISPATCH(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pRmCliRes->__cliresShareCallback__(pRmCliRes, pInvokingClient, pParentRef, pSharePolicy); +} + +NV_STATUS cliresCtrlCmdSystemGetCpuInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *pCpuInfoParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetCpuInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *pCpuInfoParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetCpuInfo__(pRmCliRes, pCpuInfoParams); +} + +NV_STATUS cliresCtrlCmdSystemGetFeatures_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetFeatures_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetFeatures__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetBuildVersionV2_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetBuildVersionV2_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetBuildVersionV2__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemSetMemorySize_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemSetMemorySize_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemSetMemorySize__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetClassList_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetClassList_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetClassList__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemNotifyEvent_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemNotifyEvent_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemNotifyEvent__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemDebugCtrlRmMsg_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemDebugCtrlRmMsg_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemDebugCtrlRmMsg__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetPrivilegedStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetPrivilegedStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetPrivilegedStatus__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetFabricStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetFabricStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetFabricStatus__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetRmInstanceId_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *pRmInstanceIdParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetRmInstanceId_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *pRmInstanceIdParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetRmInstanceId__(pRmCliRes, pRmInstanceIdParams); +} + +NV_STATUS cliresCtrlCmdSystemGetClientDatabaseInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetClientDatabaseInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetClientDatabaseInfo__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdClientGetAddrSpaceType_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdClientGetAddrSpaceType_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientGetAddrSpaceType__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdClientGetHandleInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdClientGetHandleInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientGetHandleInfo__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdClientGetAccessRights_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdClientGetAccessRights_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientGetAccessRights__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdClientSetInheritedSharePolicy_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdClientSetInheritedSharePolicy_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientSetInheritedSharePolicy__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdClientShareObject_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdClientShareObject_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientShareObject__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdClientGetChildHandle_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdClientGetChildHandle_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientGetChildHandle__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGpuGetAttachedIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds); + +static inline NV_STATUS cliresCtrlCmdGpuGetAttachedIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds) { + return pRmCliRes->__cliresCtrlCmdGpuGetAttachedIds__(pRmCliRes, pGpuAttachedIds); +} + +NV_STATUS cliresCtrlCmdGpuGetIdInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuIdInfoParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetIdInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuIdInfoParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetIdInfo__(pRmCliRes, pGpuIdInfoParams); +} + +NV_STATUS cliresCtrlCmdGpuGetIdInfoV2_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuIdInfoParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetIdInfoV2_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuIdInfoParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetIdInfoV2__(pRmCliRes, pGpuIdInfoParams); +} + +NV_STATUS cliresCtrlCmdGpuGetInitStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatusParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetInitStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatusParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetInitStatus__(pRmCliRes, pGpuInitStatusParams); +} + +NV_STATUS cliresCtrlCmdGpuGetDeviceIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *pDeviceIdsParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetDeviceIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *pDeviceIdsParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetDeviceIds__(pRmCliRes, pDeviceIdsParams); +} + +NV_STATUS cliresCtrlCmdGpuGetProbedIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds); + +static inline NV_STATUS cliresCtrlCmdGpuGetProbedIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds) { + return pRmCliRes->__cliresCtrlCmdGpuGetProbedIds__(pRmCliRes, pGpuProbedIds); +} + +NV_STATUS cliresCtrlCmdGpuAttachIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *pGpuAttachIds); + +static inline NV_STATUS cliresCtrlCmdGpuAttachIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *pGpuAttachIds) { + return pRmCliRes->__cliresCtrlCmdGpuAttachIds__(pRmCliRes, pGpuAttachIds); +} + +NV_STATUS cliresCtrlCmdGpuDetachIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DETACH_IDS_PARAMS *pGpuDetachIds); + +static inline NV_STATUS cliresCtrlCmdGpuDetachIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DETACH_IDS_PARAMS *pGpuDetachIds) { + return pRmCliRes->__cliresCtrlCmdGpuDetachIds__(pRmCliRes, pGpuDetachIds); +} + +NV_STATUS cliresCtrlCmdGpuGetSvmSize_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS *pSvmSizeGetParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetSvmSize_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS *pSvmSizeGetParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetSvmSize__(pRmCliRes, pSvmSizeGetParams); +} + +NV_STATUS cliresCtrlCmdGpuGetPciInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *pPciInfoParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetPciInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *pPciInfoParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetPciInfo__(pRmCliRes, pPciInfoParams); +} + +NV_STATUS cliresCtrlCmdGpuGetUuidInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetUuidInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetUuidInfo__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGpuGetUuidFromGpuId_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetUuidFromGpuId_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetUuidFromGpuId__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGpuModifyGpuDrainState_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdGpuModifyGpuDrainState_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuModifyGpuDrainState__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGpuQueryGpuDrainState_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdGpuQueryGpuDrainState_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuQueryGpuDrainState__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGpuGetMemOpEnable_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *pMemOpEnableParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetMemOpEnable_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *pMemOpEnableParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetMemOpEnable__(pRmCliRes, pMemOpEnableParams); +} + +NV_STATUS cliresCtrlCmdGpuDisableNvlinkInit_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdGpuDisableNvlinkInit_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuDisableNvlinkInit__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdLegacyConfig_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdLegacyConfig_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdLegacyConfig__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGsyncGetAttachedIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *pGsyncAttachedIds); + +static inline NV_STATUS cliresCtrlCmdGsyncGetAttachedIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *pGsyncAttachedIds) { + return pRmCliRes->__cliresCtrlCmdGsyncGetAttachedIds__(pRmCliRes, pGsyncAttachedIds); +} + +NV_STATUS cliresCtrlCmdGsyncGetIdInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *pGsyncIdInfoParams); + +static inline NV_STATUS cliresCtrlCmdGsyncGetIdInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *pGsyncIdInfoParams) { + return pRmCliRes->__cliresCtrlCmdGsyncGetIdInfo__(pRmCliRes, pGsyncIdInfoParams); +} + +NV_STATUS cliresCtrlCmdEventSetNotification_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pEventSetNotificationParams); + +static inline NV_STATUS cliresCtrlCmdEventSetNotification_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pEventSetNotificationParams) { + return pRmCliRes->__cliresCtrlCmdEventSetNotification__(pRmCliRes, pEventSetNotificationParams); +} + +NV_STATUS cliresCtrlCmdEventGetSystemEventStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS *pSystemEventStatusParams); + +static inline NV_STATUS cliresCtrlCmdEventGetSystemEventStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS *pSystemEventStatusParams) { + return pRmCliRes->__cliresCtrlCmdEventGetSystemEventStatus__(pRmCliRes, pSystemEventStatusParams); +} + +NV_STATUS cliresCtrlCmdOsUnixExportObjectToFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixExportObjectToFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixExportObjectToFd__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdOsUnixImportObjectFromFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixImportObjectFromFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixImportObjectFromFd__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixGetExportObjectInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixGetExportObjectInfo__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixCreateExportObjectFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixCreateExportObjectFd__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdOsUnixExportObjectsToFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixExportObjectsToFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixExportObjectsToFd__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixImportObjectsFromFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixImportObjectsFromFd__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdOsUnixFlushUserCache_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *pAddressSpaceParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixFlushUserCache_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *pAddressSpaceParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixFlushUserCache__(pRmCliRes, pAddressSpaceParams); +} + +NV_STATUS cliresCtrlCmdSetSubProcessID_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSetSubProcessID_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSetSubProcessID__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdDisableSubProcessUserdIsolation_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdDisableSubProcessUserdIsolation_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdDisableSubProcessUserdIsolation__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemSyncExternalFabricMgmt_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *pExtFabricMgmtParams); + +static inline NV_STATUS cliresCtrlCmdSystemSyncExternalFabricMgmt_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *pExtFabricMgmtParams) { + return pRmCliRes->__cliresCtrlCmdSystemSyncExternalFabricMgmt__(pRmCliRes, pExtFabricMgmtParams); +} + +static inline NV_STATUS cliresControl_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__cliresControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS cliresUnmap_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__cliresUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS cliresMapTo_DISPATCH(struct RmClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__cliresMapTo__(pResource, pParams); +} + +static inline void cliresSetNotificationShare_DISPATCH(struct RmClientResource *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__cliresSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS cliresControlFilter_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__cliresControlFilter__(pResource, pCallContext, pParams); +} + +static inline void cliresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RmClientResource *pResource, RsResourceRef *pReference) { + pResource->__cliresAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 cliresGetRefCount_DISPATCH(struct RmClientResource *pResource) { + return pResource->__cliresGetRefCount__(pResource); +} + +static inline NV_STATUS cliresUnregisterEvent_DISPATCH(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__cliresUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool cliresCanCopy_DISPATCH(struct RmClientResource *pResource) { + return pResource->__cliresCanCopy__(pResource); +} + +static inline NV_STATUS cliresControl_Prologue_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__cliresControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void cliresPreDestruct_DISPATCH(struct RmClientResource *pResource) { + pResource->__cliresPreDestruct__(pResource); +} + +static inline NV_STATUS cliresUnmapFrom_DISPATCH(struct RmClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__cliresUnmapFrom__(pResource, pParams); +} + +static inline PEVENTNOTIFICATION *cliresGetNotificationListPtr_DISPATCH(struct RmClientResource *pNotifier) { + return pNotifier->__cliresGetNotificationListPtr__(pNotifier); +} + +static inline void cliresControl_Epilogue_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__cliresControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline struct NotifShare *cliresGetNotificationShare_DISPATCH(struct RmClientResource *pNotifier) { + return pNotifier->__cliresGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS cliresControlLookup_DISPATCH(struct RmClientResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__cliresControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS cliresMap_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__cliresMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS cliresGetOrAllocNotifShare_DISPATCH(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__cliresGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS cliresConstruct_IMPL(struct RmClientResource *arg_pRmCliRes, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_cliresConstruct(arg_pRmCliRes, arg_pCallContext, arg_pParams) cliresConstruct_IMPL(arg_pRmCliRes, arg_pCallContext, arg_pParams) +void cliresDestruct_IMPL(struct RmClientResource *pRmCliRes); +#define __nvoc_cliresDestruct(pRmCliRes) cliresDestruct_IMPL(pRmCliRes) +#undef PRIVATE_FIELD + + +NV_STATUS CliGetSystemP2pCaps(NvU32 *gpuIds, + NvU32 gpuCount, + NvU32 *p2pCaps, + NvU32 *p2pOptimalReadCEs, + NvU32 *p2pOptimalWriteCEs, + NvU8 *p2pCapsStatus, + NvU32 *pBusPeerIds); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_CLIENT_RESOURCE_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.c new file mode 100644 index 0000000..f686355 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.c @@ -0,0 +1,427 @@ +#define NVOC_CONTEXT_DMA_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_context_dma_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x88441b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_ContextDma(ContextDma*); +void __nvoc_init_funcTable_ContextDma(ContextDma*); +NV_STATUS __nvoc_ctor_ContextDma(ContextDma*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_ContextDma(ContextDma*); +void __nvoc_dtor_ContextDma(ContextDma*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_ContextDma; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_ContextDma = { + /*pClassDef=*/ &__nvoc_class_def_ContextDma, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_ContextDma, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_ContextDma = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_ContextDma_ContextDma, + &__nvoc_rtti_ContextDma_Notifier, + &__nvoc_rtti_ContextDma_INotifier, + &__nvoc_rtti_ContextDma_RmResource, + &__nvoc_rtti_ContextDma_RmResourceCommon, + &__nvoc_rtti_ContextDma_RsResource, + &__nvoc_rtti_ContextDma_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma = +{ + /*classInfo=*/ { + /*size=*/ sizeof(ContextDma), + /*classId=*/ classId(ContextDma), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "ContextDma", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_ContextDma, + /*pCastInfo=*/ &__nvoc_castinfo_ContextDma, + /*pExportInfo=*/ &__nvoc_export_info_ContextDma +}; + +static NV_STATUS __nvoc_thunk_ContextDma_resMapTo(struct RsResource *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams) { + return ctxdmaMapTo((struct ContextDma *)(((unsigned char *)pContextDma) - __nvoc_rtti_ContextDma_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_ContextDma_resUnmapFrom(struct RsResource *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams) { + return ctxdmaUnmapFrom((struct ContextDma *)(((unsigned char *)pContextDma) - __nvoc_rtti_ContextDma_RsResource.offset), pParams); +} + +static NvBool __nvoc_thunk_RmResource_ctxdmaShareCallback(struct ContextDma *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_ctxdmaCheckMemInterUnmap(struct ContextDma *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ContextDma_RmResource.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_RmResource_ctxdmaAccessCallback(struct ContextDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RmResource_ctxdmaGetMemInterMapParams(struct ContextDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ContextDma_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_ctxdmaGetMemoryMappingDescriptor(struct ContextDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ContextDma_RmResource.offset), ppMemDesc); +} + +static void __nvoc_thunk_Notifier_ctxdmaSetNotificationShare(struct ContextDma *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_ctxdmaControl(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_ctxdmaControlFilter(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_ctxdmaGetRefCount(struct ContextDma *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_ctxdmaUnregisterEvent(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_RsResource_ctxdmaUnmap(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pCpuMapping); +} + +static NvBool __nvoc_thunk_RsResource_ctxdmaCanCopy(struct ContextDma *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_ctxdmaControl_Prologue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_ctxdmaAddAdditionalDependants(struct RsClient *pClient, struct ContextDma *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pReference); +} + +static void __nvoc_thunk_RsResource_ctxdmaPreDestruct(struct ContextDma *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_ctxdmaGetNotificationListPtr(struct ContextDma *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset)); +} + +static void __nvoc_thunk_RmResource_ctxdmaControl_Epilogue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pCallContext, pParams); +} + +static struct NotifShare *__nvoc_thunk_Notifier_ctxdmaGetNotificationShare(struct ContextDma *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_ctxdmaControlLookup(struct ContextDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_ctxdmaMap(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_ctxdmaGetOrAllocNotifShare(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_ContextDma[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdUpdateContextdma_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20101u, + /*paramSize=*/ sizeof(NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ctxdmaCtrlCmdUpdateContextdma" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdBindContextdma_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20102u, + /*paramSize=*/ sizeof(NV0002_CTRL_BIND_CONTEXTDMA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ctxdmaCtrlCmdBindContextdma" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdUnbindContextdma_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20103u, + /*paramSize=*/ sizeof(NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ctxdmaCtrlCmdUnbindContextdma" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_ContextDma = +{ + /*numEntries=*/ 3, + /*pExportEntries=*/ __nvoc_exported_method_def_ContextDma +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_ContextDma(ContextDma *pThis) { + __nvoc_ctxdmaDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_ContextDma(ContextDma *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_ContextDma(ContextDma *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail_RmResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail_Notifier; + __nvoc_init_dataField_ContextDma(pThis); + + status = __nvoc_ctxdmaConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail__init; + goto __nvoc_ctor_ContextDma_exit; // Success + +__nvoc_ctor_ContextDma_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_ContextDma_fail_Notifier: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_ContextDma_fail_RmResource: +__nvoc_ctor_ContextDma_exit: + + return status; +} + +static void __nvoc_init_funcTable_ContextDma_1(ContextDma *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__ctxdmaValidate__ = &ctxdmaValidate_IMPL; + + pThis->__ctxdmaGetKernelVA__ = &ctxdmaGetKernelVA_IMPL; + + pThis->__ctxdmaMapTo__ = &ctxdmaMapTo_IMPL; + + pThis->__ctxdmaUnmapFrom__ = &ctxdmaUnmapFrom_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__ctxdmaCtrlCmdUpdateContextdma__ = &ctxdmaCtrlCmdUpdateContextdma_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__ctxdmaCtrlCmdBindContextdma__ = &ctxdmaCtrlCmdBindContextdma_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__ctxdmaCtrlCmdUnbindContextdma__ = &ctxdmaCtrlCmdUnbindContextdma_IMPL; +#endif + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resMapTo__ = &__nvoc_thunk_ContextDma_resMapTo; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resUnmapFrom__ = &__nvoc_thunk_ContextDma_resUnmapFrom; + + pThis->__ctxdmaShareCallback__ = &__nvoc_thunk_RmResource_ctxdmaShareCallback; + + pThis->__ctxdmaCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_ctxdmaCheckMemInterUnmap; + + pThis->__ctxdmaAccessCallback__ = &__nvoc_thunk_RmResource_ctxdmaAccessCallback; + + pThis->__ctxdmaGetMemInterMapParams__ = &__nvoc_thunk_RmResource_ctxdmaGetMemInterMapParams; + + pThis->__ctxdmaGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_ctxdmaGetMemoryMappingDescriptor; + + pThis->__ctxdmaSetNotificationShare__ = &__nvoc_thunk_Notifier_ctxdmaSetNotificationShare; + + pThis->__ctxdmaControl__ = &__nvoc_thunk_RsResource_ctxdmaControl; + + pThis->__ctxdmaControlFilter__ = &__nvoc_thunk_RsResource_ctxdmaControlFilter; + + pThis->__ctxdmaGetRefCount__ = &__nvoc_thunk_RsResource_ctxdmaGetRefCount; + + pThis->__ctxdmaUnregisterEvent__ = &__nvoc_thunk_Notifier_ctxdmaUnregisterEvent; + + pThis->__ctxdmaUnmap__ = &__nvoc_thunk_RsResource_ctxdmaUnmap; + + pThis->__ctxdmaCanCopy__ = &__nvoc_thunk_RsResource_ctxdmaCanCopy; + + pThis->__ctxdmaControl_Prologue__ = &__nvoc_thunk_RmResource_ctxdmaControl_Prologue; + + pThis->__ctxdmaAddAdditionalDependants__ = &__nvoc_thunk_RsResource_ctxdmaAddAdditionalDependants; + + pThis->__ctxdmaPreDestruct__ = &__nvoc_thunk_RsResource_ctxdmaPreDestruct; + + pThis->__ctxdmaGetNotificationListPtr__ = &__nvoc_thunk_Notifier_ctxdmaGetNotificationListPtr; + + pThis->__ctxdmaControl_Epilogue__ = &__nvoc_thunk_RmResource_ctxdmaControl_Epilogue; + + pThis->__ctxdmaGetNotificationShare__ = &__nvoc_thunk_Notifier_ctxdmaGetNotificationShare; + + pThis->__ctxdmaControlLookup__ = &__nvoc_thunk_RsResource_ctxdmaControlLookup; + + pThis->__ctxdmaMap__ = &__nvoc_thunk_RsResource_ctxdmaMap; + + pThis->__ctxdmaGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_ctxdmaGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_ContextDma(ContextDma *pThis) { + __nvoc_init_funcTable_ContextDma_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_ContextDma(ContextDma *pThis) { + pThis->__nvoc_pbase_ContextDma = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_ContextDma(pThis); +} + +NV_STATUS __nvoc_objCreate_ContextDma(ContextDma **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + ContextDma *pThis; + + pThis = portMemAllocNonPaged(sizeof(ContextDma)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(ContextDma)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_ContextDma); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_ContextDma(pThis); + status = __nvoc_ctor_ContextDma(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_ContextDma_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_ContextDma_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_ContextDma(ContextDma **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_ContextDma(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.h new file mode 100644 index 0000000..77e92cd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.h @@ -0,0 +1,356 @@ +#ifndef _G_CONTEXT_DMA_NVOC_H_ +#define _G_CONTEXT_DMA_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_context_dma_nvoc.h" + +#ifndef CONTEXT_DMA_H +#define CONTEXT_DMA_H + +#include "core/core.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "rmapi/resource.h" +#include "rmapi/event.h" +#include "ctrl/ctrl0002.h" +#include "rmapi/control.h" // for macro RMCTRL_EXPORT etc. +#include "nvlimits.h" + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + + +/*! + * RM internal class representing NV01_CONTEXT_DMA + */ +#ifdef NVOC_CONTEXT_DMA_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct ContextDma { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ContextDma *__nvoc_pbase_ContextDma; + NV_STATUS (*__ctxdmaValidate__)(struct ContextDma *, NvU64, NvU64); + NV_STATUS (*__ctxdmaGetKernelVA__)(struct ContextDma *, NvU64, NvU64, void **, NvU32); + NV_STATUS (*__ctxdmaMapTo__)(struct ContextDma *, struct RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__ctxdmaUnmapFrom__)(struct ContextDma *, struct RS_RES_UNMAP_FROM_PARAMS *); + NV_STATUS (*__ctxdmaCtrlCmdUpdateContextdma__)(struct ContextDma *, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *); + NV_STATUS (*__ctxdmaCtrlCmdBindContextdma__)(struct ContextDma *, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *); + NV_STATUS (*__ctxdmaCtrlCmdUnbindContextdma__)(struct ContextDma *, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *); + NvBool (*__ctxdmaShareCallback__)(struct ContextDma *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__ctxdmaCheckMemInterUnmap__)(struct ContextDma *, NvBool); + NvBool (*__ctxdmaAccessCallback__)(struct ContextDma *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__ctxdmaGetMemInterMapParams__)(struct ContextDma *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__ctxdmaGetMemoryMappingDescriptor__)(struct ContextDma *, struct MEMORY_DESCRIPTOR **); + void (*__ctxdmaSetNotificationShare__)(struct ContextDma *, struct NotifShare *); + NV_STATUS (*__ctxdmaControl__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__ctxdmaControlFilter__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__ctxdmaGetRefCount__)(struct ContextDma *); + NV_STATUS (*__ctxdmaUnregisterEvent__)(struct ContextDma *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__ctxdmaUnmap__)(struct ContextDma *, struct CALL_CONTEXT *, RsCpuMapping *); + NvBool (*__ctxdmaCanCopy__)(struct ContextDma *); + NV_STATUS (*__ctxdmaControl_Prologue__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__ctxdmaAddAdditionalDependants__)(struct RsClient *, struct ContextDma *, RsResourceRef *); + void (*__ctxdmaPreDestruct__)(struct ContextDma *); + PEVENTNOTIFICATION *(*__ctxdmaGetNotificationListPtr__)(struct ContextDma *); + void (*__ctxdmaControl_Epilogue__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + struct NotifShare *(*__ctxdmaGetNotificationShare__)(struct ContextDma *); + NV_STATUS (*__ctxdmaControlLookup__)(struct ContextDma *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__ctxdmaMap__)(struct ContextDma *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__ctxdmaGetOrAllocNotifShare__)(struct ContextDma *, NvHandle, NvHandle, struct NotifShare **); + NvU32 Class; + NvU32 Flags; + NvBool bReadOnly; + NvU32 CacheSnoop; + NvU32 Type; + NvU64 Limit; + NV_ADDRESS_SPACE AddressSpace; + NvBool bUnicast; + void *KernelVAddr[8]; + void *KernelPriv; + NvU64 FbAperture[8]; + NvU64 FbApertureLen[8]; + struct Memory *pMemory; + struct MEMORY_DESCRIPTOR *pMemDesc; + NvU32 Instance[8]; + NvU32 InstRefCount[8]; + struct OBJGPU *pGpu; + struct Device *pDevice; +}; + +#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__ +#define __NVOC_CLASS_ContextDma_TYPEDEF__ +typedef struct ContextDma ContextDma; +#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ContextDma +#define __nvoc_class_id_ContextDma 0x88441b +#endif /* __nvoc_class_id_ContextDma */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma; + +#define __staticCast_ContextDma(pThis) \ + ((pThis)->__nvoc_pbase_ContextDma) + +#ifdef __nvoc_context_dma_h_disabled +#define __dynamicCast_ContextDma(pThis) ((ContextDma*)NULL) +#else //__nvoc_context_dma_h_disabled +#define __dynamicCast_ContextDma(pThis) \ + ((ContextDma*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ContextDma))) +#endif //__nvoc_context_dma_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_ContextDma(ContextDma**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_ContextDma(ContextDma**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_ContextDma(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_ContextDma((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define ctxdmaValidate(pContextDma, start, len) ctxdmaValidate_DISPATCH(pContextDma, start, len) +#define ctxdmaGetKernelVA(pContextDma, start, len, arg0, VA_idx) ctxdmaGetKernelVA_DISPATCH(pContextDma, start, len, arg0, VA_idx) +#define ctxdmaMapTo(pContextDma, pParams) ctxdmaMapTo_DISPATCH(pContextDma, pParams) +#define ctxdmaUnmapFrom(pContextDma, pParams) ctxdmaUnmapFrom_DISPATCH(pContextDma, pParams) +#define ctxdmaCtrlCmdUpdateContextdma(pContextDma, pUpdateCtxtDmaParams) ctxdmaCtrlCmdUpdateContextdma_DISPATCH(pContextDma, pUpdateCtxtDmaParams) +#define ctxdmaCtrlCmdBindContextdma(pContextDma, pBindCtxtDmaParams) ctxdmaCtrlCmdBindContextdma_DISPATCH(pContextDma, pBindCtxtDmaParams) +#define ctxdmaCtrlCmdUnbindContextdma(pContextDma, pUnbindCtxtDmaParams) ctxdmaCtrlCmdUnbindContextdma_DISPATCH(pContextDma, pUnbindCtxtDmaParams) +#define ctxdmaShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) ctxdmaShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define ctxdmaCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) ctxdmaCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define ctxdmaAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) ctxdmaAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define ctxdmaGetMemInterMapParams(pRmResource, pParams) ctxdmaGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define ctxdmaGetMemoryMappingDescriptor(pRmResource, ppMemDesc) ctxdmaGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define ctxdmaSetNotificationShare(pNotifier, pNotifShare) ctxdmaSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define ctxdmaControl(pResource, pCallContext, pParams) ctxdmaControl_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaControlFilter(pResource, pCallContext, pParams) ctxdmaControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaGetRefCount(pResource) ctxdmaGetRefCount_DISPATCH(pResource) +#define ctxdmaUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) ctxdmaUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define ctxdmaUnmap(pResource, pCallContext, pCpuMapping) ctxdmaUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define ctxdmaCanCopy(pResource) ctxdmaCanCopy_DISPATCH(pResource) +#define ctxdmaControl_Prologue(pResource, pCallContext, pParams) ctxdmaControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaAddAdditionalDependants(pClient, pResource, pReference) ctxdmaAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define ctxdmaPreDestruct(pResource) ctxdmaPreDestruct_DISPATCH(pResource) +#define ctxdmaGetNotificationListPtr(pNotifier) ctxdmaGetNotificationListPtr_DISPATCH(pNotifier) +#define ctxdmaControl_Epilogue(pResource, pCallContext, pParams) ctxdmaControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaGetNotificationShare(pNotifier) ctxdmaGetNotificationShare_DISPATCH(pNotifier) +#define ctxdmaControlLookup(pResource, pParams, ppEntry) ctxdmaControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define ctxdmaMap(pResource, pCallContext, pParams, pCpuMapping) ctxdmaMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define ctxdmaGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) ctxdmaGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS ctxdmaValidate_IMPL(struct ContextDma *pContextDma, NvU64 start, NvU64 len); + +static inline NV_STATUS ctxdmaValidate_DISPATCH(struct ContextDma *pContextDma, NvU64 start, NvU64 len) { + return pContextDma->__ctxdmaValidate__(pContextDma, start, len); +} + +NV_STATUS ctxdmaGetKernelVA_IMPL(struct ContextDma *pContextDma, NvU64 start, NvU64 len, void **arg0, NvU32 VA_idx); + +static inline NV_STATUS ctxdmaGetKernelVA_DISPATCH(struct ContextDma *pContextDma, NvU64 start, NvU64 len, void **arg0, NvU32 VA_idx) { + return pContextDma->__ctxdmaGetKernelVA__(pContextDma, start, len, arg0, VA_idx); +} + +NV_STATUS ctxdmaMapTo_IMPL(struct ContextDma *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams); + +static inline NV_STATUS ctxdmaMapTo_DISPATCH(struct ContextDma *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams) { + return pContextDma->__ctxdmaMapTo__(pContextDma, pParams); +} + +NV_STATUS ctxdmaUnmapFrom_IMPL(struct ContextDma *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams); + +static inline NV_STATUS ctxdmaUnmapFrom_DISPATCH(struct ContextDma *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pContextDma->__ctxdmaUnmapFrom__(pContextDma, pParams); +} + +NV_STATUS ctxdmaCtrlCmdUpdateContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *pUpdateCtxtDmaParams); + +static inline NV_STATUS ctxdmaCtrlCmdUpdateContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *pUpdateCtxtDmaParams) { + return pContextDma->__ctxdmaCtrlCmdUpdateContextdma__(pContextDma, pUpdateCtxtDmaParams); +} + +NV_STATUS ctxdmaCtrlCmdBindContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *pBindCtxtDmaParams); + +static inline NV_STATUS ctxdmaCtrlCmdBindContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *pBindCtxtDmaParams) { + return pContextDma->__ctxdmaCtrlCmdBindContextdma__(pContextDma, pBindCtxtDmaParams); +} + +NV_STATUS ctxdmaCtrlCmdUnbindContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *pUnbindCtxtDmaParams); + +static inline NV_STATUS ctxdmaCtrlCmdUnbindContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *pUnbindCtxtDmaParams) { + return pContextDma->__ctxdmaCtrlCmdUnbindContextdma__(pContextDma, pUnbindCtxtDmaParams); +} + +static inline NvBool ctxdmaShareCallback_DISPATCH(struct ContextDma *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__ctxdmaShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS ctxdmaCheckMemInterUnmap_DISPATCH(struct ContextDma *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__ctxdmaCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NvBool ctxdmaAccessCallback_DISPATCH(struct ContextDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__ctxdmaAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS ctxdmaGetMemInterMapParams_DISPATCH(struct ContextDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__ctxdmaGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS ctxdmaGetMemoryMappingDescriptor_DISPATCH(struct ContextDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__ctxdmaGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline void ctxdmaSetNotificationShare_DISPATCH(struct ContextDma *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__ctxdmaSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS ctxdmaControl_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__ctxdmaControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS ctxdmaControlFilter_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__ctxdmaControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 ctxdmaGetRefCount_DISPATCH(struct ContextDma *pResource) { + return pResource->__ctxdmaGetRefCount__(pResource); +} + +static inline NV_STATUS ctxdmaUnregisterEvent_DISPATCH(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__ctxdmaUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS ctxdmaUnmap_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__ctxdmaUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool ctxdmaCanCopy_DISPATCH(struct ContextDma *pResource) { + return pResource->__ctxdmaCanCopy__(pResource); +} + +static inline NV_STATUS ctxdmaControl_Prologue_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__ctxdmaControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void ctxdmaAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct ContextDma *pResource, RsResourceRef *pReference) { + pResource->__ctxdmaAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline void ctxdmaPreDestruct_DISPATCH(struct ContextDma *pResource) { + pResource->__ctxdmaPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *ctxdmaGetNotificationListPtr_DISPATCH(struct ContextDma *pNotifier) { + return pNotifier->__ctxdmaGetNotificationListPtr__(pNotifier); +} + +static inline void ctxdmaControl_Epilogue_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__ctxdmaControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline struct NotifShare *ctxdmaGetNotificationShare_DISPATCH(struct ContextDma *pNotifier) { + return pNotifier->__ctxdmaGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS ctxdmaControlLookup_DISPATCH(struct ContextDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__ctxdmaControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS ctxdmaMap_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__ctxdmaMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS ctxdmaGetOrAllocNotifShare_DISPATCH(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__ctxdmaGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS ctxdmaConstruct_IMPL(struct ContextDma *arg_pCtxdma, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_ctxdmaConstruct(arg_pCtxdma, arg_pCallContext, arg_pParams) ctxdmaConstruct_IMPL(arg_pCtxdma, arg_pCallContext, arg_pParams) +void ctxdmaDestruct_IMPL(struct ContextDma *pCtxdma); +#define __nvoc_ctxdmaDestruct(pCtxdma) ctxdmaDestruct_IMPL(pCtxdma) +NvBool ctxdmaIsBound_IMPL(struct ContextDma *pContextDma); +#ifdef __nvoc_context_dma_h_disabled +static inline NvBool ctxdmaIsBound(struct ContextDma *pContextDma) { + NV_ASSERT_FAILED_PRECOMP("ContextDma was disabled!"); + return NV_FALSE; +} +#else //__nvoc_context_dma_h_disabled +#define ctxdmaIsBound(pContextDma) ctxdmaIsBound_IMPL(pContextDma) +#endif //__nvoc_context_dma_h_disabled + +NV_STATUS ctxdmaGetByHandle_IMPL(struct RsClient *pClient, NvHandle hContextDma, struct ContextDma **arg0); +#define ctxdmaGetByHandle(pClient, hContextDma, arg0) ctxdmaGetByHandle_IMPL(pClient, hContextDma, arg0) +#undef PRIVATE_FIELD + + +// **************************************************************************** +// Deprecated Definitions +// **************************************************************************** + +#if RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS == 1 + +/** + * @warning This function is deprecated! Please use ctxdmaGetByHandle. + */ +NV_STATUS CliGetContextDma(NvHandle hClient, NvHandle hContextDma, struct ContextDma **); + +#endif + +#endif /* CONTEXT_DMA_H */ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_CONTEXT_DMA_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.c new file mode 100644 index 0000000..9a3213e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.c @@ -0,0 +1,286 @@ +#define NVOC_DCE_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_dce_client_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x61649c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJDCECLIENTRM; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_OBJDCECLIENTRM(OBJDCECLIENTRM*); +void __nvoc_init_funcTable_OBJDCECLIENTRM(OBJDCECLIENTRM*); +NV_STATUS __nvoc_ctor_OBJDCECLIENTRM(OBJDCECLIENTRM*); +void __nvoc_init_dataField_OBJDCECLIENTRM(OBJDCECLIENTRM*); +void __nvoc_dtor_OBJDCECLIENTRM(OBJDCECLIENTRM*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJDCECLIENTRM; + +static const struct NVOC_RTTI __nvoc_rtti_OBJDCECLIENTRM_OBJDCECLIENTRM = { + /*pClassDef=*/ &__nvoc_class_def_OBJDCECLIENTRM, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJDCECLIENTRM, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJDCECLIENTRM_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJDCECLIENTRM = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_OBJDCECLIENTRM_OBJDCECLIENTRM, + &__nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE, + &__nvoc_rtti_OBJDCECLIENTRM_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJDCECLIENTRM = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJDCECLIENTRM), + /*classId=*/ classId(OBJDCECLIENTRM), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJDCECLIENTRM", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJDCECLIENTRM, + /*pCastInfo=*/ &__nvoc_castinfo_OBJDCECLIENTRM, + /*pExportInfo=*/ &__nvoc_export_info_OBJDCECLIENTRM +}; + +static NV_STATUS __nvoc_thunk_OBJDCECLIENTRM_engstateConstructEngine(struct OBJGPU *arg0, struct OBJENGSTATE *arg1, ENGDESCRIPTOR arg2) { + return dceclientConstructEngine(arg0, (struct OBJDCECLIENTRM *)(((unsigned char *)arg1) - __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg2); +} + +static void __nvoc_thunk_OBJDCECLIENTRM_engstateStateDestroy(struct OBJGPU *arg0, struct OBJENGSTATE *arg1) { + dceclientStateDestroy(arg0, (struct OBJDCECLIENTRM *)(((unsigned char *)arg1) - __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJDCECLIENTRM_engstateStateLoad(struct OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) { + return dceclientStateLoad(arg0, (struct OBJDCECLIENTRM *)(((unsigned char *)arg1) - __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_OBJDCECLIENTRM_engstateStateUnload(struct OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) { + return dceclientStateUnload(arg0, (struct OBJDCECLIENTRM *)(((unsigned char *)arg1) - __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientReconcileTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStateInitLocked(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePreLoad(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePostUnload(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePreUnload(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStateInitUnlocked(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_dceclientInitMissing(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePreInitLocked(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePreInitUnlocked(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientGetTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientCompareTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_dceclientFreeTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePostLoad(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientAllocTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientSetTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_dceclientIsPresent(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJDCECLIENTRM = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_OBJDCECLIENTRM_fail_OBJENGSTATE; + __nvoc_init_dataField_OBJDCECLIENTRM(pThis); + goto __nvoc_ctor_OBJDCECLIENTRM_exit; // Success + +__nvoc_ctor_OBJDCECLIENTRM_fail_OBJENGSTATE: +__nvoc_ctor_OBJDCECLIENTRM_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJDCECLIENTRM_1(OBJDCECLIENTRM *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__dceclientConstructEngine__ = &dceclientConstructEngine_IMPL; + + pThis->__dceclientStateDestroy__ = &dceclientStateDestroy_IMPL; + + pThis->__dceclientStateLoad__ = &dceclientStateLoad_IMPL; + + pThis->__dceclientStateUnload__ = &dceclientStateUnload_IMPL; + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_OBJDCECLIENTRM_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_OBJDCECLIENTRM_engstateStateDestroy; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_OBJDCECLIENTRM_engstateStateLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_OBJDCECLIENTRM_engstateStateUnload; + + pThis->__dceclientReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientReconcileTunableState; + + pThis->__dceclientStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_dceclientStateInitLocked; + + pThis->__dceclientStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePreLoad; + + pThis->__dceclientStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePostUnload; + + pThis->__dceclientStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePreUnload; + + pThis->__dceclientStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_dceclientStateInitUnlocked; + + pThis->__dceclientInitMissing__ = &__nvoc_thunk_OBJENGSTATE_dceclientInitMissing; + + pThis->__dceclientStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePreInitLocked; + + pThis->__dceclientStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePreInitUnlocked; + + pThis->__dceclientGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientGetTunableState; + + pThis->__dceclientCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientCompareTunableState; + + pThis->__dceclientFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientFreeTunableState; + + pThis->__dceclientStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePostLoad; + + pThis->__dceclientAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientAllocTunableState; + + pThis->__dceclientSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientSetTunableState; + + pThis->__dceclientIsPresent__ = &__nvoc_thunk_OBJENGSTATE_dceclientIsPresent; +} + +void __nvoc_init_funcTable_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) { + __nvoc_init_funcTable_OBJDCECLIENTRM_1(pThis); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) { + pThis->__nvoc_pbase_OBJDCECLIENTRM = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_OBJDCECLIENTRM(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJDCECLIENTRM(OBJDCECLIENTRM **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJDCECLIENTRM *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJDCECLIENTRM)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJDCECLIENTRM)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJDCECLIENTRM); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJDCECLIENTRM(pThis); + status = __nvoc_ctor_OBJDCECLIENTRM(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJDCECLIENTRM_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJDCECLIENTRM_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJDCECLIENTRM(OBJDCECLIENTRM **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJDCECLIENTRM(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.h new file mode 100644 index 0000000..8c0f2a6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.h @@ -0,0 +1,377 @@ +#ifndef _G_DCE_CLIENT_NVOC_H_ +#define _G_DCE_CLIENT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_dce_client_nvoc.h" + +#ifndef _DCE_CLIENT_H_ +#define _DCE_CLIENT_H_ + +/*! + * @file dce_client.h + * @brief Provides definitions for all DceClient data structures and interfaces. + */ + +#include "gpu/eng_state.h" +#include "core/core.h" +#include "objrpc.h" +#include "os/dce_rm_client_ipc.h" +#include "class/cl0000.h" +#include "class/cl0080.h" +#include "class/cl2080.h" +#include "class/cl0073.h" +#include "class/cl0005.h" +#include "class/clc372sw.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" + +typedef struct +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvU32 hClass; + NV0000_ALLOC_PARAMETERS rootAllocParams; + NvBool valid; +} ROOT; + +typedef struct +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvU32 hClass; + NV0080_ALLOC_PARAMETERS deviceAllocParams; + NvBool valid; +} DEVICE; + +typedef struct +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvU32 hClass; + NV2080_ALLOC_PARAMETERS subdeviceAllocParams; + NvBool valid; +} SUBDEVICE; + +typedef struct +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvU32 hClass; + NVOS21_PARAMETERS displayCommonAllocParams; + NvBool valid; +} DISPLAY_COMMON; + +typedef struct +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvU32 hClass; + NVOS21_PARAMETERS displaySWAllocParams; + NvBool valid; +} DISPLAY_SW; + +typedef struct +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvU32 hClass; + NV0005_ALLOC_PARAMETERS displaySWEventAllocParams; + NvBool valid; +} DISPLAY_SW_EVENT; + +typedef struct +{ + NvHandle hClient; + NvHandle hObject; + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams; + NvBool valid; +} DISPLAY_HPD_CTRL; + +typedef struct +{ + NvHandle hClient; + NvHandle hObject; + NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS setManualParams; + NvBool valid; +} DISPLAY_DP_SET_MANUAL; + +/*! + * Max no of RM clients + */ +#define MAX_RM_CLIENTS 5 + +/*! + * Temporary alias of DceClient to OBJDCECLIENTRM + */ +#define DceClient OBJDCECLIENTRM + +/*! + * Defines the structure used to contain all generic information related to + * the DceClient. + */ +#ifdef NVOC_DCE_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJDCECLIENTRM { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct OBJDCECLIENTRM *__nvoc_pbase_OBJDCECLIENTRM; + NV_STATUS (*__dceclientConstructEngine__)(struct OBJGPU *, struct OBJDCECLIENTRM *, ENGDESCRIPTOR); + void (*__dceclientStateDestroy__)(struct OBJGPU *, struct OBJDCECLIENTRM *); + NV_STATUS (*__dceclientStateLoad__)(struct OBJGPU *, struct OBJDCECLIENTRM *, NvU32); + NV_STATUS (*__dceclientStateUnload__)(struct OBJGPU *, struct OBJDCECLIENTRM *, NvU32); + NV_STATUS (*__dceclientReconcileTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *); + NV_STATUS (*__dceclientStateInitLocked__)(POBJGPU, struct OBJDCECLIENTRM *); + NV_STATUS (*__dceclientStatePreLoad__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32); + NV_STATUS (*__dceclientStatePostUnload__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32); + NV_STATUS (*__dceclientStatePreUnload__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32); + NV_STATUS (*__dceclientStateInitUnlocked__)(POBJGPU, struct OBJDCECLIENTRM *); + void (*__dceclientInitMissing__)(POBJGPU, struct OBJDCECLIENTRM *); + NV_STATUS (*__dceclientStatePreInitLocked__)(POBJGPU, struct OBJDCECLIENTRM *); + NV_STATUS (*__dceclientStatePreInitUnlocked__)(POBJGPU, struct OBJDCECLIENTRM *); + NV_STATUS (*__dceclientGetTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *); + NV_STATUS (*__dceclientCompareTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *, void *); + void (*__dceclientFreeTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *); + NV_STATUS (*__dceclientStatePostLoad__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32); + NV_STATUS (*__dceclientAllocTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void **); + NV_STATUS (*__dceclientSetTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *); + NvBool (*__dceclientIsPresent__)(POBJGPU, struct OBJDCECLIENTRM *); + struct OBJRPC *pRpc; + NvU32 clientId[2]; +}; + +#ifndef __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ +#define __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ +typedef struct OBJDCECLIENTRM OBJDCECLIENTRM; +#endif /* __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDCECLIENTRM +#define __nvoc_class_id_OBJDCECLIENTRM 0x61649c +#endif /* __nvoc_class_id_OBJDCECLIENTRM */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJDCECLIENTRM; + +#define __staticCast_OBJDCECLIENTRM(pThis) \ + ((pThis)->__nvoc_pbase_OBJDCECLIENTRM) + +#ifdef __nvoc_dce_client_h_disabled +#define __dynamicCast_OBJDCECLIENTRM(pThis) ((OBJDCECLIENTRM*)NULL) +#else //__nvoc_dce_client_h_disabled +#define __dynamicCast_OBJDCECLIENTRM(pThis) \ + ((OBJDCECLIENTRM*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJDCECLIENTRM))) +#endif //__nvoc_dce_client_h_disabled + +#define PDB_PROP_DCECLIENT_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_DCECLIENT_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_OBJDCECLIENTRM(OBJDCECLIENTRM**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJDCECLIENTRM(OBJDCECLIENTRM**, Dynamic*, NvU32); +#define __objCreate_OBJDCECLIENTRM(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJDCECLIENTRM((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define dceclientConstructEngine(arg0, arg1, arg2) dceclientConstructEngine_DISPATCH(arg0, arg1, arg2) +#define dceclientStateDestroy(arg0, arg1) dceclientStateDestroy_DISPATCH(arg0, arg1) +#define dceclientStateLoad(arg0, arg1, arg2) dceclientStateLoad_DISPATCH(arg0, arg1, arg2) +#define dceclientStateUnload(arg0, arg1, arg2) dceclientStateUnload_DISPATCH(arg0, arg1, arg2) +#define dceclientReconcileTunableState(pGpu, pEngstate, pTunableState) dceclientReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define dceclientStateInitLocked(pGpu, pEngstate) dceclientStateInitLocked_DISPATCH(pGpu, pEngstate) +#define dceclientStatePreLoad(pGpu, pEngstate, arg0) dceclientStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define dceclientStatePostUnload(pGpu, pEngstate, arg0) dceclientStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define dceclientStatePreUnload(pGpu, pEngstate, arg0) dceclientStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define dceclientStateInitUnlocked(pGpu, pEngstate) dceclientStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define dceclientInitMissing(pGpu, pEngstate) dceclientInitMissing_DISPATCH(pGpu, pEngstate) +#define dceclientStatePreInitLocked(pGpu, pEngstate) dceclientStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define dceclientStatePreInitUnlocked(pGpu, pEngstate) dceclientStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define dceclientGetTunableState(pGpu, pEngstate, pTunableState) dceclientGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define dceclientCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) dceclientCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define dceclientFreeTunableState(pGpu, pEngstate, pTunableState) dceclientFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define dceclientStatePostLoad(pGpu, pEngstate, arg0) dceclientStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define dceclientAllocTunableState(pGpu, pEngstate, ppTunableState) dceclientAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define dceclientSetTunableState(pGpu, pEngstate, pTunableState) dceclientSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define dceclientIsPresent(pGpu, pEngstate) dceclientIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS dceclientConstructEngine_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, ENGDESCRIPTOR arg2); + +static inline NV_STATUS dceclientConstructEngine_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, ENGDESCRIPTOR arg2) { + return arg1->__dceclientConstructEngine__(arg0, arg1, arg2); +} + +void dceclientStateDestroy_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1); + +static inline void dceclientStateDestroy_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1) { + arg1->__dceclientStateDestroy__(arg0, arg1); +} + +NV_STATUS dceclientStateLoad_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvU32 arg2); + +static inline NV_STATUS dceclientStateLoad_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvU32 arg2) { + return arg1->__dceclientStateLoad__(arg0, arg1, arg2); +} + +NV_STATUS dceclientStateUnload_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvU32 arg2); + +static inline NV_STATUS dceclientStateUnload_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvU32 arg2) { + return arg1->__dceclientStateUnload__(arg0, arg1, arg2); +} + +static inline NV_STATUS dceclientReconcileTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) { + return pEngstate->__dceclientReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS dceclientStateInitLocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__dceclientStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS dceclientStatePreLoad_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return pEngstate->__dceclientStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dceclientStatePostUnload_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return pEngstate->__dceclientStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dceclientStatePreUnload_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return pEngstate->__dceclientStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dceclientStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__dceclientStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void dceclientInitMissing_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + pEngstate->__dceclientInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS dceclientStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__dceclientStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS dceclientStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__dceclientStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS dceclientGetTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) { + return pEngstate->__dceclientGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS dceclientCompareTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__dceclientCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void dceclientFreeTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) { + pEngstate->__dceclientFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS dceclientStatePostLoad_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return pEngstate->__dceclientStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dceclientAllocTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void **ppTunableState) { + return pEngstate->__dceclientAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS dceclientSetTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) { + return pEngstate->__dceclientSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool dceclientIsPresent_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__dceclientIsPresent__(pGpu, pEngstate); +} + +NV_STATUS dceclientInitRpcInfra_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1); +#ifdef __nvoc_dce_client_h_disabled +static inline NV_STATUS dceclientInitRpcInfra(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_dce_client_h_disabled +#define dceclientInitRpcInfra(arg0, arg1) dceclientInitRpcInfra_IMPL(arg0, arg1) +#endif //__nvoc_dce_client_h_disabled + +void dceclientDeinitRpcInfra_IMPL(struct OBJDCECLIENTRM *arg0); +#ifdef __nvoc_dce_client_h_disabled +static inline void dceclientDeinitRpcInfra(struct OBJDCECLIENTRM *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!"); +} +#else //__nvoc_dce_client_h_disabled +#define dceclientDeinitRpcInfra(arg0) dceclientDeinitRpcInfra_IMPL(arg0) +#endif //__nvoc_dce_client_h_disabled + +NV_STATUS dceclientDceRmInit_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvBool arg2); +#ifdef __nvoc_dce_client_h_disabled +static inline NV_STATUS dceclientDceRmInit(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvBool arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_dce_client_h_disabled +#define dceclientDceRmInit(arg0, arg1, arg2) dceclientDceRmInit_IMPL(arg0, arg1, arg2) +#endif //__nvoc_dce_client_h_disabled + +NV_STATUS dceclientSendRpc_IMPL(struct OBJDCECLIENTRM *arg0, void *arg1, NvU32 arg2); +#ifdef __nvoc_dce_client_h_disabled +static inline NV_STATUS dceclientSendRpc(struct OBJDCECLIENTRM *arg0, void *arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_dce_client_h_disabled +#define dceclientSendRpc(arg0, arg1, arg2) dceclientSendRpc_IMPL(arg0, arg1, arg2) +#endif //__nvoc_dce_client_h_disabled + +#undef PRIVATE_FIELD + + +NV_STATUS rpcRmApiControl_dce(RM_API *pRmApi, + NvHandle hClient, NvHandle hObject, + NvU32 cmd, void *pParamStructPtr, + NvU32 paramsSize); +NV_STATUS rpcRmApiAlloc_dce(RM_API *pRmApi, + NvHandle hClient, NvHandle hParent, + NvHandle hObject, NvU32 hClass, + void *pAllocParams); +NV_STATUS rpcRmApiDupObject_dce(RM_API *pRmApi, NvHandle hClient, + NvHandle hParent, NvHandle *phObject, NvHandle hClientSrc, + NvHandle hObjectSrc, NvU32 flags); +NV_STATUS rpcRmApiFree_dce(RM_API *pRmApi, NvHandle hClient, NvHandle hObject); +NV_STATUS rpcDceRmInit_dce(RM_API *pRmApi, NvBool bInit); +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DCE_CLIENT_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.c new file mode 100644 index 0000000..240c337 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.c @@ -0,0 +1,550 @@ +#define NVOC_DEVICE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_device_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xe0ac20 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Device; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_Device(Device*); +void __nvoc_init_funcTable_Device(Device*); +NV_STATUS __nvoc_ctor_Device(Device*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_Device(Device*); +void __nvoc_dtor_Device(Device*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Device; + +static const struct NVOC_RTTI __nvoc_rtti_Device_Device = { + /*pClassDef=*/ &__nvoc_class_def_Device, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Device, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Device_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Device_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Device_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Device_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Device_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Device = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_Device_Device, + &__nvoc_rtti_Device_GpuResource, + &__nvoc_rtti_Device_RmResource, + &__nvoc_rtti_Device_RmResourceCommon, + &__nvoc_rtti_Device_RsResource, + &__nvoc_rtti_Device_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Device = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Device), + /*classId=*/ classId(Device), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Device", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Device, + /*pCastInfo=*/ &__nvoc_castinfo_Device, + /*pExportInfo=*/ &__nvoc_export_info_Device +}; + +static NV_STATUS __nvoc_thunk_Device_gpuresControl(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return deviceControl((struct Device *)(((unsigned char *)pResource) - __nvoc_rtti_Device_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Device_gpuresInternalControlForward(struct GpuResource *pDevice, NvU32 command, void *pParams, NvU32 size) { + return deviceInternalControlForward((struct Device *)(((unsigned char *)pDevice) - __nvoc_rtti_Device_GpuResource.offset), command, pParams, size); +} + +static NvBool __nvoc_thunk_GpuResource_deviceShareCallback(struct Device *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_deviceUnmap(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_deviceGetMemInterMapParams(struct Device *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Device_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_deviceGetMemoryMappingDescriptor(struct Device *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Device_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_deviceGetMapAddrSpace(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_deviceGetInternalObjectHandle(struct Device *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_deviceControlFilter(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_deviceAddAdditionalDependants(struct RsClient *pClient, struct Device *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_deviceGetRefCount(struct Device *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_deviceCheckMemInterUnmap(struct Device *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Device_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_deviceMapTo(struct Device *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_deviceControl_Prologue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_deviceGetRegBaseOffsetAndSize(struct Device *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_deviceCanCopy(struct Device *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_devicePreDestruct(struct Device *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_deviceUnmapFrom(struct Device *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_deviceControl_Epilogue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_deviceControlLookup(struct Device *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_deviceMap(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_deviceAccessCallback(struct Device *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetClasslist_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*flags=*/ 0x813u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800201u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetClasslist" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetNumSubdevices_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800280u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetNumSubdevices" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*flags=*/ 0x5u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800287u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuModifyGpuSwStatePersistence" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800288u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuQueryGpuSwStatePersistence" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetVirtualizationMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800289u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetVirtualizationMode" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetClasslistV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*flags=*/ 0x813u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800292u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetClasslistV2" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800293u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetFindSubDeviceHandle" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetBrandCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*flags=*/ 0x211u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800294u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetBrandCaps" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800296u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuSetVgpuVfBar1Size" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdOsUnixVTSwitch_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*flags=*/ 0x1u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801e01u, + /*paramSize=*/ sizeof(NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdOsUnixVTSwitch" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdOsUnixVTGetFBInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*flags=*/ 0x1u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801e02u, + /*paramSize=*/ sizeof(NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdOsUnixVTGetFBInfo" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Device = +{ + /*numEntries=*/ 11, + /*pExportEntries=*/ __nvoc_exported_method_def_Device +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Device(Device *pThis) { + __nvoc_deviceDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Device(Device *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Device(Device *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Device_fail_GpuResource; + __nvoc_init_dataField_Device(pThis); + + status = __nvoc_deviceConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Device_fail__init; + goto __nvoc_ctor_Device_exit; // Success + +__nvoc_ctor_Device_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_Device_fail_GpuResource: +__nvoc_ctor_Device_exit: + + return status; +} + +static void __nvoc_init_funcTable_Device_1(Device *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__deviceControl__ = &deviceControl_IMPL; + + pThis->__deviceInternalControlForward__ = &deviceInternalControlForward_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + pThis->__deviceCtrlCmdGpuGetClasslist__ = &deviceCtrlCmdGpuGetClasslist_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + pThis->__deviceCtrlCmdGpuGetClasslistV2__ = &deviceCtrlCmdGpuGetClasslistV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__deviceCtrlCmdGpuGetNumSubdevices__ = &deviceCtrlCmdGpuGetNumSubdevices_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + pThis->__deviceCtrlCmdGpuModifyGpuSwStatePersistence__ = &deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__deviceCtrlCmdGpuQueryGpuSwStatePersistence__ = &deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__deviceCtrlCmdGpuGetVirtualizationMode__ = &deviceCtrlCmdGpuGetVirtualizationMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__deviceCtrlCmdGpuSetVgpuVfBar1Size__ = &deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + pThis->__deviceCtrlCmdGpuGetBrandCaps__ = &deviceCtrlCmdGpuGetBrandCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__deviceCtrlCmdGpuGetFindSubDeviceHandle__ = &deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + pThis->__deviceCtrlCmdOsUnixVTSwitch__ = &deviceCtrlCmdOsUnixVTSwitch_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + pThis->__deviceCtrlCmdOsUnixVTGetFBInfo__ = &deviceCtrlCmdOsUnixVTGetFBInfo_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_Device_gpuresControl; + + pThis->__nvoc_base_GpuResource.__gpuresInternalControlForward__ = &__nvoc_thunk_Device_gpuresInternalControlForward; + + pThis->__deviceShareCallback__ = &__nvoc_thunk_GpuResource_deviceShareCallback; + + pThis->__deviceUnmap__ = &__nvoc_thunk_GpuResource_deviceUnmap; + + pThis->__deviceGetMemInterMapParams__ = &__nvoc_thunk_RmResource_deviceGetMemInterMapParams; + + pThis->__deviceGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_deviceGetMemoryMappingDescriptor; + + pThis->__deviceGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_deviceGetMapAddrSpace; + + pThis->__deviceGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_deviceGetInternalObjectHandle; + + pThis->__deviceControlFilter__ = &__nvoc_thunk_RsResource_deviceControlFilter; + + pThis->__deviceAddAdditionalDependants__ = &__nvoc_thunk_RsResource_deviceAddAdditionalDependants; + + pThis->__deviceGetRefCount__ = &__nvoc_thunk_RsResource_deviceGetRefCount; + + pThis->__deviceCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_deviceCheckMemInterUnmap; + + pThis->__deviceMapTo__ = &__nvoc_thunk_RsResource_deviceMapTo; + + pThis->__deviceControl_Prologue__ = &__nvoc_thunk_RmResource_deviceControl_Prologue; + + pThis->__deviceGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_deviceGetRegBaseOffsetAndSize; + + pThis->__deviceCanCopy__ = &__nvoc_thunk_RsResource_deviceCanCopy; + + pThis->__devicePreDestruct__ = &__nvoc_thunk_RsResource_devicePreDestruct; + + pThis->__deviceUnmapFrom__ = &__nvoc_thunk_RsResource_deviceUnmapFrom; + + pThis->__deviceControl_Epilogue__ = &__nvoc_thunk_RmResource_deviceControl_Epilogue; + + pThis->__deviceControlLookup__ = &__nvoc_thunk_RsResource_deviceControlLookup; + + pThis->__deviceMap__ = &__nvoc_thunk_GpuResource_deviceMap; + + pThis->__deviceAccessCallback__ = &__nvoc_thunk_RmResource_deviceAccessCallback; +} + +void __nvoc_init_funcTable_Device(Device *pThis) { + __nvoc_init_funcTable_Device_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Device(Device *pThis) { + pThis->__nvoc_pbase_Device = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_Device(pThis); +} + +NV_STATUS __nvoc_objCreate_Device(Device **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + Device *pThis; + + pThis = portMemAllocNonPaged(sizeof(Device)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Device)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Device); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_Device(pThis); + status = __nvoc_ctor_Device(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Device_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Device_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Device(Device **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Device(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.h new file mode 100644 index 0000000..18ddf44 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.h @@ -0,0 +1,466 @@ +#ifndef _G_DEVICE_NVOC_H_ +#define _G_DEVICE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_device_nvoc.h" + +#ifndef _DEVICE_H_ +#define _DEVICE_H_ + +#include "core/core.h" + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "nvoc/utility.h" +#include "resserv/rs_resource.h" +#include "rmapi/control.h" +#include "containers/btree.h" + +#include "gpu/gpu_resource.h" +#include "mem_mgr/vaspace.h" + +#include "ctrl/ctrl0080.h" // rmcontrol params + +// Forward declaration +struct HOST_VGPU_DEVICE; +struct OBJVASPACE; + +#ifndef __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +typedef struct OBJVASPACE OBJVASPACE; +#endif /* __NVOC_CLASS_OBJVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVASPACE +#define __nvoc_class_id_OBJVASPACE 0x6c347f +#endif /* __nvoc_class_id_OBJVASPACE */ + + + +// TODO: Remove this after adding KERNEL_HOST_VGPU_DEVICE +typedef struct HOST_VGPU_DEVICE KERNEL_HOST_VGPU_DEVICE; + +/** + * A device consists of one or more GPUs. Devices provide broadcast + * semantics; that is, operations involving a device are applied to all GPUs + * in the device. + */ +#ifdef NVOC_DEVICE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Device { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct Device *__nvoc_pbase_Device; + NV_STATUS (*__deviceControl__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__deviceInternalControlForward__)(struct Device *, NvU32, void *, NvU32); + NV_STATUS (*__deviceCtrlCmdGpuGetClasslist__)(struct Device *, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuGetClasslistV2__)(struct Device *, NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuGetNumSubdevices__)(struct Device *, NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuModifyGpuSwStatePersistence__)(struct Device *, NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuQueryGpuSwStatePersistence__)(struct Device *, NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuGetVirtualizationMode__)(struct Device *, NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuSetVgpuVfBar1Size__)(struct Device *, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuGetBrandCaps__)(struct Device *, NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuGetFindSubDeviceHandle__)(struct Device *, NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *); + NV_STATUS (*__deviceCtrlCmdOsUnixVTSwitch__)(struct Device *, NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *); + NV_STATUS (*__deviceCtrlCmdOsUnixVTGetFBInfo__)(struct Device *, NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *); + NvBool (*__deviceShareCallback__)(struct Device *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__deviceUnmap__)(struct Device *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__deviceGetMemInterMapParams__)(struct Device *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__deviceGetMemoryMappingDescriptor__)(struct Device *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__deviceGetMapAddrSpace__)(struct Device *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__deviceGetInternalObjectHandle__)(struct Device *); + NV_STATUS (*__deviceControlFilter__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__deviceAddAdditionalDependants__)(struct RsClient *, struct Device *, RsResourceRef *); + NvU32 (*__deviceGetRefCount__)(struct Device *); + NV_STATUS (*__deviceCheckMemInterUnmap__)(struct Device *, NvBool); + NV_STATUS (*__deviceMapTo__)(struct Device *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__deviceControl_Prologue__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__deviceGetRegBaseOffsetAndSize__)(struct Device *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__deviceCanCopy__)(struct Device *); + void (*__devicePreDestruct__)(struct Device *); + NV_STATUS (*__deviceUnmapFrom__)(struct Device *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__deviceControl_Epilogue__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__deviceControlLookup__)(struct Device *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__deviceMap__)(struct Device *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__deviceAccessCallback__)(struct Device *, struct RsClient *, void *, RsAccessRight); + NvU32 deviceInst; + NvU32 PerfReqCnt; + PNODE DevMemoryTable; + NvBool bSliGpuBoostSyncActivate; + NvBool bPerfOptpActive; + NvU32 nPerfOptpRefCnt; + NvU32 nCudaLimitRefCnt; + struct OBJVASPACE *pVASpace; + NvHandle hClientShare; + NvHandle hTargetClient; + NvHandle hTargetDevice; + NvU32 deviceAllocFlags; + NvU32 deviceInternalAllocFlags; + NvU64 vaStartInternal; + NvU64 vaLimitInternal; + NvU64 vaSize; + NvU32 vaMode; + struct HOST_VGPU_DEVICE *pHostVgpuDevice; + KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice; +}; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Device; + +#define __staticCast_Device(pThis) \ + ((pThis)->__nvoc_pbase_Device) + +#ifdef __nvoc_device_h_disabled +#define __dynamicCast_Device(pThis) ((Device*)NULL) +#else //__nvoc_device_h_disabled +#define __dynamicCast_Device(pThis) \ + ((Device*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Device))) +#endif //__nvoc_device_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Device(Device**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Device(Device**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_Device(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Device((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define deviceControl(pResource, pCallContext, pParams) deviceControl_DISPATCH(pResource, pCallContext, pParams) +#define deviceInternalControlForward(pDevice, command, pParams, size) deviceInternalControlForward_DISPATCH(pDevice, command, pParams, size) +#define deviceCtrlCmdGpuGetClasslist(pDevice, pClassListParams) deviceCtrlCmdGpuGetClasslist_DISPATCH(pDevice, pClassListParams) +#define deviceCtrlCmdGpuGetClasslistV2(pDevice, pParams) deviceCtrlCmdGpuGetClasslistV2_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetNumSubdevices(pDevice, pSubDeviceCountParams) deviceCtrlCmdGpuGetNumSubdevices_DISPATCH(pDevice, pSubDeviceCountParams) +#define deviceCtrlCmdGpuModifyGpuSwStatePersistence(pDevice, pParams) deviceCtrlCmdGpuModifyGpuSwStatePersistence_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuQueryGpuSwStatePersistence(pDevice, pParams) deviceCtrlCmdGpuQueryGpuSwStatePersistence_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetVirtualizationMode(pDevice, pParams) deviceCtrlCmdGpuGetVirtualizationMode_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuSetVgpuVfBar1Size(pDevice, pParams) deviceCtrlCmdGpuSetVgpuVfBar1Size_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetBrandCaps(pDevice, pParams) deviceCtrlCmdGpuGetBrandCaps_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetFindSubDeviceHandle(pDevice, pParams) deviceCtrlCmdGpuGetFindSubDeviceHandle_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdOsUnixVTSwitch(pDevice, pParams) deviceCtrlCmdOsUnixVTSwitch_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdOsUnixVTGetFBInfo(pDevice, pParams) deviceCtrlCmdOsUnixVTGetFBInfo_DISPATCH(pDevice, pParams) +#define deviceShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) deviceShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define deviceUnmap(pGpuResource, pCallContext, pCpuMapping) deviceUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define deviceGetMemInterMapParams(pRmResource, pParams) deviceGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define deviceGetMemoryMappingDescriptor(pRmResource, ppMemDesc) deviceGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define deviceGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) deviceGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define deviceGetInternalObjectHandle(pGpuResource) deviceGetInternalObjectHandle_DISPATCH(pGpuResource) +#define deviceControlFilter(pResource, pCallContext, pParams) deviceControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define deviceAddAdditionalDependants(pClient, pResource, pReference) deviceAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define deviceGetRefCount(pResource) deviceGetRefCount_DISPATCH(pResource) +#define deviceCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) deviceCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define deviceMapTo(pResource, pParams) deviceMapTo_DISPATCH(pResource, pParams) +#define deviceControl_Prologue(pResource, pCallContext, pParams) deviceControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define deviceGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) deviceGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define deviceCanCopy(pResource) deviceCanCopy_DISPATCH(pResource) +#define devicePreDestruct(pResource) devicePreDestruct_DISPATCH(pResource) +#define deviceUnmapFrom(pResource, pParams) deviceUnmapFrom_DISPATCH(pResource, pParams) +#define deviceControl_Epilogue(pResource, pCallContext, pParams) deviceControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define deviceControlLookup(pResource, pParams, ppEntry) deviceControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define deviceMap(pGpuResource, pCallContext, pParams, pCpuMapping) deviceMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define deviceAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) deviceAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS deviceControl_IMPL(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS deviceControl_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__deviceControl__(pResource, pCallContext, pParams); +} + +NV_STATUS deviceInternalControlForward_IMPL(struct Device *pDevice, NvU32 command, void *pParams, NvU32 size); + +static inline NV_STATUS deviceInternalControlForward_DISPATCH(struct Device *pDevice, NvU32 command, void *pParams, NvU32 size) { + return pDevice->__deviceInternalControlForward__(pDevice, command, pParams, size); +} + +NV_STATUS deviceCtrlCmdGpuGetClasslist_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *pClassListParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetClasslist_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *pClassListParams) { + return pDevice->__deviceCtrlCmdGpuGetClasslist__(pDevice, pClassListParams); +} + +NV_STATUS deviceCtrlCmdGpuGetClasslistV2_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetClasslistV2_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuGetClasslistV2__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuGetNumSubdevices_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *pSubDeviceCountParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetNumSubdevices_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *pSubDeviceCountParams) { + return pDevice->__deviceCtrlCmdGpuGetNumSubdevices__(pDevice, pSubDeviceCountParams); +} + +NV_STATUS deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuModifyGpuSwStatePersistence_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuModifyGpuSwStatePersistence__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuQueryGpuSwStatePersistence_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuQueryGpuSwStatePersistence__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuGetVirtualizationMode_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetVirtualizationMode_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuGetVirtualizationMode__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuSetVgpuVfBar1Size_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuSetVgpuVfBar1Size__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuGetBrandCaps_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetBrandCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuGetBrandCaps__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetFindSubDeviceHandle_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *pParams) { + return pDevice->__deviceCtrlCmdGpuGetFindSubDeviceHandle__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdOsUnixVTSwitch_IMPL(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdOsUnixVTSwitch_DISPATCH(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdOsUnixVTSwitch__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_DISPATCH(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdOsUnixVTGetFBInfo__(pDevice, pParams); +} + +static inline NvBool deviceShareCallback_DISPATCH(struct Device *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__deviceShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS deviceUnmap_DISPATCH(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__deviceUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS deviceGetMemInterMapParams_DISPATCH(struct Device *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__deviceGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS deviceGetMemoryMappingDescriptor_DISPATCH(struct Device *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__deviceGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS deviceGetMapAddrSpace_DISPATCH(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__deviceGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle deviceGetInternalObjectHandle_DISPATCH(struct Device *pGpuResource) { + return pGpuResource->__deviceGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS deviceControlFilter_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__deviceControlFilter__(pResource, pCallContext, pParams); +} + +static inline void deviceAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Device *pResource, RsResourceRef *pReference) { + pResource->__deviceAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 deviceGetRefCount_DISPATCH(struct Device *pResource) { + return pResource->__deviceGetRefCount__(pResource); +} + +static inline NV_STATUS deviceCheckMemInterUnmap_DISPATCH(struct Device *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__deviceCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS deviceMapTo_DISPATCH(struct Device *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__deviceMapTo__(pResource, pParams); +} + +static inline NV_STATUS deviceControl_Prologue_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__deviceControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS deviceGetRegBaseOffsetAndSize_DISPATCH(struct Device *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__deviceGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool deviceCanCopy_DISPATCH(struct Device *pResource) { + return pResource->__deviceCanCopy__(pResource); +} + +static inline void devicePreDestruct_DISPATCH(struct Device *pResource) { + pResource->__devicePreDestruct__(pResource); +} + +static inline NV_STATUS deviceUnmapFrom_DISPATCH(struct Device *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__deviceUnmapFrom__(pResource, pParams); +} + +static inline void deviceControl_Epilogue_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__deviceControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS deviceControlLookup_DISPATCH(struct Device *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__deviceControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS deviceMap_DISPATCH(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__deviceMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool deviceAccessCallback_DISPATCH(struct Device *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__deviceAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS deviceSetDefaultVASpace(struct Device *pDevice, NvHandle hVASpace) { + return NV_OK; +} + +NV_STATUS deviceConstruct_IMPL(struct Device *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_deviceConstruct(arg_pResource, arg_pCallContext, arg_pParams) deviceConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void deviceDestruct_IMPL(struct Device *pResource); +#define __nvoc_deviceDestruct(pResource) deviceDestruct_IMPL(pResource) +NV_STATUS deviceInit_IMPL(struct Device *pDevice, struct CALL_CONTEXT *pCallContext, NvHandle hClient, NvHandle hDevice, NvU32 deviceInst, NvHandle hClientShare, NvHandle hTargetClient, NvHandle hTargetDevice, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 allocFlags, NvU32 vaMode); +#ifdef __nvoc_device_h_disabled +static inline NV_STATUS deviceInit(struct Device *pDevice, struct CALL_CONTEXT *pCallContext, NvHandle hClient, NvHandle hDevice, NvU32 deviceInst, NvHandle hClientShare, NvHandle hTargetClient, NvHandle hTargetDevice, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 allocFlags, NvU32 vaMode) { + NV_ASSERT_FAILED_PRECOMP("Device was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_device_h_disabled +#define deviceInit(pDevice, pCallContext, hClient, hDevice, deviceInst, hClientShare, hTargetClient, hTargetDevice, vaSize, vaStartInternal, vaLimitInternal, allocFlags, vaMode) deviceInit_IMPL(pDevice, pCallContext, hClient, hDevice, deviceInst, hClientShare, hTargetClient, hTargetDevice, vaSize, vaStartInternal, vaLimitInternal, allocFlags, vaMode) +#endif //__nvoc_device_h_disabled + +NV_STATUS deviceGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDevice, struct Device **ppDevice); +#define deviceGetByHandle(pClient, hDevice, ppDevice) deviceGetByHandle_IMPL(pClient, hDevice, ppDevice) +NV_STATUS deviceGetByInstance_IMPL(struct RsClient *pClient, NvU32 deviceInstance, struct Device **ppDevice); +#define deviceGetByInstance(pClient, deviceInstance, ppDevice) deviceGetByInstance_IMPL(pClient, deviceInstance, ppDevice) +NV_STATUS deviceGetByGpu_IMPL(struct RsClient *pClient, struct OBJGPU *pGpu, NvBool bAnyInGroup, struct Device **ppDevice); +#define deviceGetByGpu(pClient, pGpu, bAnyInGroup, ppDevice) deviceGetByGpu_IMPL(pClient, pGpu, bAnyInGroup, ppDevice) +NV_STATUS deviceGetDefaultVASpace_IMPL(struct Device *pDevice, struct OBJVASPACE **ppVAS); +#ifdef __nvoc_device_h_disabled +static inline NV_STATUS deviceGetDefaultVASpace(struct Device *pDevice, struct OBJVASPACE **ppVAS) { + NV_ASSERT_FAILED_PRECOMP("Device was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_device_h_disabled +#define deviceGetDefaultVASpace(pDevice, ppVAS) deviceGetDefaultVASpace_IMPL(pDevice, ppVAS) +#endif //__nvoc_device_h_disabled + +NV_STATUS deviceSetClientShare_IMPL(struct Device *pDevice, NvHandle hClientShare, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 deviceAllocFlags); +#ifdef __nvoc_device_h_disabled +static inline NV_STATUS deviceSetClientShare(struct Device *pDevice, NvHandle hClientShare, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 deviceAllocFlags) { + NV_ASSERT_FAILED_PRECOMP("Device was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_device_h_disabled +#define deviceSetClientShare(pDevice, hClientShare, vaSize, vaStartInternal, vaLimitInternal, deviceAllocFlags) deviceSetClientShare_IMPL(pDevice, hClientShare, vaSize, vaStartInternal, vaLimitInternal, deviceAllocFlags) +#endif //__nvoc_device_h_disabled + +void deviceRemoveFromClientShare_IMPL(struct Device *pDevice); +#ifdef __nvoc_device_h_disabled +static inline void deviceRemoveFromClientShare(struct Device *pDevice) { + NV_ASSERT_FAILED_PRECOMP("Device was disabled!"); +} +#else //__nvoc_device_h_disabled +#define deviceRemoveFromClientShare(pDevice) deviceRemoveFromClientShare_IMPL(pDevice) +#endif //__nvoc_device_h_disabled + +#undef PRIVATE_FIELD + + +// **************************************************************************** +// Deprecated Definitions +// **************************************************************************** + +/** + * WARNING: This function is deprecated! Please use deviceGetByHandle. + */ +struct Device *CliGetDeviceInfo(NvHandle, NvHandle); + +/** + * WARNING: This function is deprecated and use is *strongly* discouraged + * (especially for new code!) + * + * From the function name (CliSetGpuContext) it appears as a simple accessor but + * violates expectations by modifying the SLI BC threadstate (calls to + * GPU_RES_SET_THREAD_BC_STATE). This can be dangerous if not carefully managed + * by the caller. + * + * Instead of using this routine, please use deviceGetByHandle then call + * GPU_RES_GET_GPU, GPU_RES_GET_GPUGRP, GPU_RES_SET_THREAD_BC_STATE as needed. + * + * Note that GPU_RES_GET_GPU supports returning a pGpu for both pDevice, + * pSubdevice, the base pResource type, and any resource that inherits from + * GpuResource. That is, instead of using CliSetGpuContext or + * CliSetSubDeviceContext, please use following pattern to look up the pGpu: + * + * OBJGPU *pGpu = GPU_RES_GET_GPU(pResource or pResourceRef->pResource) + * + * To set the threadstate, please use: + * + * GPU_RES_SET_THREAD_BC_STATE(pResource or pResourceRef->pResource); + */ +NV_STATUS CliSetGpuContext(NvHandle, NvHandle, OBJGPU **, struct OBJGPUGRP **); + +/** + * WARNING: This function is deprecated! Please use gpuGetByRef() + */ +OBJGPU *CliGetGpuFromContext(RsResourceRef *pContextRef, NvBool *pbBroadcast); + +/** + * WARNING: This function is deprecated! Please use gpuGetByHandle() + */ +OBJGPU *CliGetGpuFromHandle(NvHandle hClient, NvHandle hResource, NvBool *pbBroadcast); + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DEVICE_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.c new file mode 100644 index 0000000..3a417eb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.c @@ -0,0 +1,329 @@ +#define NVOC_DISP_CAPABILITIES_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_capabilities_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x99db3e = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_DispCapabilities(DispCapabilities*); +void __nvoc_init_funcTable_DispCapabilities(DispCapabilities*); +NV_STATUS __nvoc_ctor_DispCapabilities(DispCapabilities*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispCapabilities(DispCapabilities*); +void __nvoc_dtor_DispCapabilities(DispCapabilities*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispCapabilities; + +static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_DispCapabilities = { + /*pClassDef=*/ &__nvoc_class_def_DispCapabilities, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispCapabilities, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispCapabilities = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_DispCapabilities_DispCapabilities, + &__nvoc_rtti_DispCapabilities_GpuResource, + &__nvoc_rtti_DispCapabilities_RmResource, + &__nvoc_rtti_DispCapabilities_RmResourceCommon, + &__nvoc_rtti_DispCapabilities_RsResource, + &__nvoc_rtti_DispCapabilities_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispCapabilities), + /*classId=*/ classId(DispCapabilities), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispCapabilities", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispCapabilities, + /*pCastInfo=*/ &__nvoc_castinfo_DispCapabilities, + /*pExportInfo=*/ &__nvoc_export_info_DispCapabilities +}; + +static NV_STATUS __nvoc_thunk_DispCapabilities_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispcapGetRegBaseOffsetAndSize((struct DispCapabilities *)(((unsigned char *)pDispCapabilities) - __nvoc_rtti_DispCapabilities_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_GpuResource_dispcapShareCallback(struct DispCapabilities *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispcapControl(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispcapUnmap(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcapGetMemInterMapParams(struct DispCapabilities *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcapGetMemoryMappingDescriptor(struct DispCapabilities *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispcapGetMapAddrSpace(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_dispcapGetInternalObjectHandle(struct DispCapabilities *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcapControlFilter(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_dispcapAddAdditionalDependants(struct RsClient *pClient, struct DispCapabilities *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_dispcapGetRefCount(struct DispCapabilities *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcapCheckMemInterUnmap(struct DispCapabilities *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcapMapTo(struct DispCapabilities *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcapControl_Prologue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_dispcapCanCopy(struct DispCapabilities *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispcapInternalControlForward(struct DispCapabilities *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_dispcapPreDestruct(struct DispCapabilities *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcapUnmapFrom(struct DispCapabilities *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_dispcapControl_Epilogue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcapControlLookup(struct DispCapabilities *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispcapMap(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_dispcapAccessCallback(struct DispCapabilities *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispCapabilities = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_DispCapabilities(DispCapabilities *pThis) { + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispCapabilities(DispCapabilities *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispCapabilities(DispCapabilities *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispCapabilities_fail_GpuResource; + __nvoc_init_dataField_DispCapabilities(pThis); + + status = __nvoc_dispcapConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispCapabilities_fail__init; + goto __nvoc_ctor_DispCapabilities_exit; // Success + +__nvoc_ctor_DispCapabilities_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_DispCapabilities_fail_GpuResource: +__nvoc_ctor_DispCapabilities_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispCapabilities_1(DispCapabilities *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__dispcapGetRegBaseOffsetAndSize__ = &dispcapGetRegBaseOffsetAndSize_IMPL; + + pThis->__nvoc_base_GpuResource.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispCapabilities_gpuresGetRegBaseOffsetAndSize; + + pThis->__dispcapShareCallback__ = &__nvoc_thunk_GpuResource_dispcapShareCallback; + + pThis->__dispcapControl__ = &__nvoc_thunk_GpuResource_dispcapControl; + + pThis->__dispcapUnmap__ = &__nvoc_thunk_GpuResource_dispcapUnmap; + + pThis->__dispcapGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispcapGetMemInterMapParams; + + pThis->__dispcapGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispcapGetMemoryMappingDescriptor; + + pThis->__dispcapGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispcapGetMapAddrSpace; + + pThis->__dispcapGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispcapGetInternalObjectHandle; + + pThis->__dispcapControlFilter__ = &__nvoc_thunk_RsResource_dispcapControlFilter; + + pThis->__dispcapAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispcapAddAdditionalDependants; + + pThis->__dispcapGetRefCount__ = &__nvoc_thunk_RsResource_dispcapGetRefCount; + + pThis->__dispcapCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispcapCheckMemInterUnmap; + + pThis->__dispcapMapTo__ = &__nvoc_thunk_RsResource_dispcapMapTo; + + pThis->__dispcapControl_Prologue__ = &__nvoc_thunk_RmResource_dispcapControl_Prologue; + + pThis->__dispcapCanCopy__ = &__nvoc_thunk_RsResource_dispcapCanCopy; + + pThis->__dispcapInternalControlForward__ = &__nvoc_thunk_GpuResource_dispcapInternalControlForward; + + pThis->__dispcapPreDestruct__ = &__nvoc_thunk_RsResource_dispcapPreDestruct; + + pThis->__dispcapUnmapFrom__ = &__nvoc_thunk_RsResource_dispcapUnmapFrom; + + pThis->__dispcapControl_Epilogue__ = &__nvoc_thunk_RmResource_dispcapControl_Epilogue; + + pThis->__dispcapControlLookup__ = &__nvoc_thunk_RsResource_dispcapControlLookup; + + pThis->__dispcapMap__ = &__nvoc_thunk_GpuResource_dispcapMap; + + pThis->__dispcapAccessCallback__ = &__nvoc_thunk_RmResource_dispcapAccessCallback; +} + +void __nvoc_init_funcTable_DispCapabilities(DispCapabilities *pThis) { + __nvoc_init_funcTable_DispCapabilities_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_DispCapabilities(DispCapabilities *pThis) { + pThis->__nvoc_pbase_DispCapabilities = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_DispCapabilities(pThis); +} + +NV_STATUS __nvoc_objCreate_DispCapabilities(DispCapabilities **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispCapabilities *pThis; + + pThis = portMemAllocNonPaged(sizeof(DispCapabilities)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispCapabilities)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispCapabilities); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_DispCapabilities(pThis); + status = __nvoc_ctor_DispCapabilities(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispCapabilities_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispCapabilities_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispCapabilities(DispCapabilities **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispCapabilities(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.h new file mode 100644 index 0000000..185980a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.h @@ -0,0 +1,239 @@ +#ifndef _G_DISP_CAPABILITIES_NVOC_H_ +#define _G_DISP_CAPABILITIES_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispCapabilities class. +* +******************************************************************************/ + +#include "g_disp_capabilities_nvoc.h" + +#ifndef DISP_CAPABILITIES_H +#define DISP_CAPABILITIES_H + +#include "gpu/gpu_resource.h" + +/*! + * RM internal class representing NVXXXX_DISP_CAPABILITIES + */ +#ifdef NVOC_DISP_CAPABILITIES_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispCapabilities { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct DispCapabilities *__nvoc_pbase_DispCapabilities; + NV_STATUS (*__dispcapGetRegBaseOffsetAndSize__)(struct DispCapabilities *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__dispcapShareCallback__)(struct DispCapabilities *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispcapControl__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispcapUnmap__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__dispcapGetMemInterMapParams__)(struct DispCapabilities *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispcapGetMemoryMappingDescriptor__)(struct DispCapabilities *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispcapGetMapAddrSpace__)(struct DispCapabilities *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__dispcapGetInternalObjectHandle__)(struct DispCapabilities *); + NV_STATUS (*__dispcapControlFilter__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__dispcapAddAdditionalDependants__)(struct RsClient *, struct DispCapabilities *, RsResourceRef *); + NvU32 (*__dispcapGetRefCount__)(struct DispCapabilities *); + NV_STATUS (*__dispcapCheckMemInterUnmap__)(struct DispCapabilities *, NvBool); + NV_STATUS (*__dispcapMapTo__)(struct DispCapabilities *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__dispcapControl_Prologue__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispcapCanCopy__)(struct DispCapabilities *); + NV_STATUS (*__dispcapInternalControlForward__)(struct DispCapabilities *, NvU32, void *, NvU32); + void (*__dispcapPreDestruct__)(struct DispCapabilities *); + NV_STATUS (*__dispcapUnmapFrom__)(struct DispCapabilities *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__dispcapControl_Epilogue__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispcapControlLookup__)(struct DispCapabilities *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__dispcapMap__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__dispcapAccessCallback__)(struct DispCapabilities *, struct RsClient *, void *, RsAccessRight); + NvU32 ControlOffset; + NvU32 ControlLength; +}; + +#ifndef __NVOC_CLASS_DispCapabilities_TYPEDEF__ +#define __NVOC_CLASS_DispCapabilities_TYPEDEF__ +typedef struct DispCapabilities DispCapabilities; +#endif /* __NVOC_CLASS_DispCapabilities_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispCapabilities +#define __nvoc_class_id_DispCapabilities 0x99db3e +#endif /* __nvoc_class_id_DispCapabilities */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities; + +#define __staticCast_DispCapabilities(pThis) \ + ((pThis)->__nvoc_pbase_DispCapabilities) + +#ifdef __nvoc_disp_capabilities_h_disabled +#define __dynamicCast_DispCapabilities(pThis) ((DispCapabilities*)NULL) +#else //__nvoc_disp_capabilities_h_disabled +#define __dynamicCast_DispCapabilities(pThis) \ + ((DispCapabilities*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispCapabilities))) +#endif //__nvoc_disp_capabilities_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispCapabilities(DispCapabilities**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispCapabilities(DispCapabilities**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispCapabilities(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispCapabilities((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispcapGetRegBaseOffsetAndSize(pDispCapabilities, pGpu, pOffset, pSize) dispcapGetRegBaseOffsetAndSize_DISPATCH(pDispCapabilities, pGpu, pOffset, pSize) +#define dispcapShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispcapShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispcapControl(pGpuResource, pCallContext, pParams) dispcapControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispcapUnmap(pGpuResource, pCallContext, pCpuMapping) dispcapUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispcapGetMemInterMapParams(pRmResource, pParams) dispcapGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispcapGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispcapGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispcapGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispcapGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispcapGetInternalObjectHandle(pGpuResource) dispcapGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispcapControlFilter(pResource, pCallContext, pParams) dispcapControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispcapAddAdditionalDependants(pClient, pResource, pReference) dispcapAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispcapGetRefCount(pResource) dispcapGetRefCount_DISPATCH(pResource) +#define dispcapCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispcapCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispcapMapTo(pResource, pParams) dispcapMapTo_DISPATCH(pResource, pParams) +#define dispcapControl_Prologue(pResource, pCallContext, pParams) dispcapControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispcapCanCopy(pResource) dispcapCanCopy_DISPATCH(pResource) +#define dispcapInternalControlForward(pGpuResource, command, pParams, size) dispcapInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispcapPreDestruct(pResource) dispcapPreDestruct_DISPATCH(pResource) +#define dispcapUnmapFrom(pResource, pParams) dispcapUnmapFrom_DISPATCH(pResource, pParams) +#define dispcapControl_Epilogue(pResource, pCallContext, pParams) dispcapControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispcapControlLookup(pResource, pParams, ppEntry) dispcapControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispcapMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispcapMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispcapAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispcapAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS dispcapGetRegBaseOffsetAndSize_IMPL(struct DispCapabilities *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +static inline NV_STATUS dispcapGetRegBaseOffsetAndSize_DISPATCH(struct DispCapabilities *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispCapabilities->__dispcapGetRegBaseOffsetAndSize__(pDispCapabilities, pGpu, pOffset, pSize); +} + +static inline NvBool dispcapShareCallback_DISPATCH(struct DispCapabilities *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__dispcapShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispcapControl_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__dispcapControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispcapUnmap_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispcapUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispcapGetMemInterMapParams_DISPATCH(struct DispCapabilities *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispcapGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispcapGetMemoryMappingDescriptor_DISPATCH(struct DispCapabilities *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispcapGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispcapGetMapAddrSpace_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__dispcapGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle dispcapGetInternalObjectHandle_DISPATCH(struct DispCapabilities *pGpuResource) { + return pGpuResource->__dispcapGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS dispcapControlFilter_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispcapControlFilter__(pResource, pCallContext, pParams); +} + +static inline void dispcapAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispCapabilities *pResource, RsResourceRef *pReference) { + pResource->__dispcapAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 dispcapGetRefCount_DISPATCH(struct DispCapabilities *pResource) { + return pResource->__dispcapGetRefCount__(pResource); +} + +static inline NV_STATUS dispcapCheckMemInterUnmap_DISPATCH(struct DispCapabilities *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispcapCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispcapMapTo_DISPATCH(struct DispCapabilities *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispcapMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispcapControl_Prologue_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispcapControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool dispcapCanCopy_DISPATCH(struct DispCapabilities *pResource) { + return pResource->__dispcapCanCopy__(pResource); +} + +static inline NV_STATUS dispcapInternalControlForward_DISPATCH(struct DispCapabilities *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__dispcapInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void dispcapPreDestruct_DISPATCH(struct DispCapabilities *pResource) { + pResource->__dispcapPreDestruct__(pResource); +} + +static inline NV_STATUS dispcapUnmapFrom_DISPATCH(struct DispCapabilities *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispcapUnmapFrom__(pResource, pParams); +} + +static inline void dispcapControl_Epilogue_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__dispcapControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispcapControlLookup_DISPATCH(struct DispCapabilities *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispcapControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS dispcapMap_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispcapMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool dispcapAccessCallback_DISPATCH(struct DispCapabilities *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispcapAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS dispcapConstruct_IMPL(struct DispCapabilities *arg_pDispCapabilities, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispcapConstruct(arg_pDispCapabilities, arg_pCallContext, arg_pParams) dispcapConstruct_IMPL(arg_pDispCapabilities, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // DISP_CAPABILITIES_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DISP_CAPABILITIES_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.c new file mode 100644 index 0000000..e925b3c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.c @@ -0,0 +1,1146 @@ +#define NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_channel_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xbd2ff3 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_DispChannel(DispChannel*); +void __nvoc_init_funcTable_DispChannel(DispChannel*); +NV_STATUS __nvoc_ctor_DispChannel(DispChannel*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, NvU32 arg_isDma); +void __nvoc_init_dataField_DispChannel(DispChannel*); +void __nvoc_dtor_DispChannel(DispChannel*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispChannel; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_DispChannel = { + /*pClassDef=*/ &__nvoc_class_def_DispChannel, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispChannel, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispChannel = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_DispChannel_DispChannel, + &__nvoc_rtti_DispChannel_Notifier, + &__nvoc_rtti_DispChannel_INotifier, + &__nvoc_rtti_DispChannel_GpuResource, + &__nvoc_rtti_DispChannel_RmResource, + &__nvoc_rtti_DispChannel_RmResourceCommon, + &__nvoc_rtti_DispChannel_RsResource, + &__nvoc_rtti_DispChannel_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispChannel), + /*classId=*/ classId(DispChannel), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispChannel", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispChannel, + /*pCastInfo=*/ &__nvoc_castinfo_DispChannel, + /*pExportInfo=*/ &__nvoc_export_info_DispChannel +}; + +static NV_STATUS __nvoc_thunk_DispChannel_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispchnGetRegBaseOffsetAndSize((struct DispChannel *)(((unsigned char *)pDispChannel) - __nvoc_rtti_DispChannel_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_GpuResource_dispchnShareCallback(struct DispChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnMapTo(struct DispChannel *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispchnGetOrAllocNotifShare(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannel_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnCheckMemInterUnmap(struct DispChannel *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannel_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnGetMapAddrSpace(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_dispchnSetNotificationShare(struct DispChannel *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannel_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_dispchnGetRefCount(struct DispChannel *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispchnAddAdditionalDependants(struct RsClient *pClient, struct DispChannel *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnControl_Prologue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnInternalControlForward(struct DispChannel *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnUnmapFrom(struct DispChannel *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_dispchnControl_Epilogue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnControlLookup(struct DispChannel *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_dispchnGetInternalObjectHandle(struct DispChannel *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnControl(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnUnmap(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnGetMemInterMapParams(struct DispChannel *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannel_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnGetMemoryMappingDescriptor(struct DispChannel *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannel_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnControlFilter(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispchnUnregisterEvent(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannel_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_dispchnCanCopy(struct DispChannel *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispchnPreDestruct(struct DispChannel *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispchnGetNotificationListPtr(struct DispChannel *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannel_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispchnGetNotificationShare(struct DispChannel *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannel_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnMap(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_dispchnAccessCallback(struct DispChannel *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispChannel = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_DispChannel(DispChannel *pThis) { + __nvoc_dispchnDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispChannel(DispChannel *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_DispChannel(DispChannel *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, NvU32 arg_isDma) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispChannel_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_DispChannel_fail_Notifier; + __nvoc_init_dataField_DispChannel(pThis); + + status = __nvoc_dispchnConstruct(pThis, arg_pCallContext, arg_pParams, arg_isDma); + if (status != NV_OK) goto __nvoc_ctor_DispChannel_fail__init; + goto __nvoc_ctor_DispChannel_exit; // Success + +__nvoc_ctor_DispChannel_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_DispChannel_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_DispChannel_fail_GpuResource: +__nvoc_ctor_DispChannel_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispChannel_1(DispChannel *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__dispchnGetRegBaseOffsetAndSize__ = &dispchnGetRegBaseOffsetAndSize_IMPL; + + pThis->__nvoc_base_GpuResource.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispChannel_gpuresGetRegBaseOffsetAndSize; + + pThis->__dispchnShareCallback__ = &__nvoc_thunk_GpuResource_dispchnShareCallback; + + pThis->__dispchnMapTo__ = &__nvoc_thunk_RsResource_dispchnMapTo; + + pThis->__dispchnGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispchnGetOrAllocNotifShare; + + pThis->__dispchnCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispchnCheckMemInterUnmap; + + pThis->__dispchnGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispchnGetMapAddrSpace; + + pThis->__dispchnSetNotificationShare__ = &__nvoc_thunk_Notifier_dispchnSetNotificationShare; + + pThis->__dispchnGetRefCount__ = &__nvoc_thunk_RsResource_dispchnGetRefCount; + + pThis->__dispchnAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispchnAddAdditionalDependants; + + pThis->__dispchnControl_Prologue__ = &__nvoc_thunk_RmResource_dispchnControl_Prologue; + + pThis->__dispchnInternalControlForward__ = &__nvoc_thunk_GpuResource_dispchnInternalControlForward; + + pThis->__dispchnUnmapFrom__ = &__nvoc_thunk_RsResource_dispchnUnmapFrom; + + pThis->__dispchnControl_Epilogue__ = &__nvoc_thunk_RmResource_dispchnControl_Epilogue; + + pThis->__dispchnControlLookup__ = &__nvoc_thunk_RsResource_dispchnControlLookup; + + pThis->__dispchnGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispchnGetInternalObjectHandle; + + pThis->__dispchnControl__ = &__nvoc_thunk_GpuResource_dispchnControl; + + pThis->__dispchnUnmap__ = &__nvoc_thunk_GpuResource_dispchnUnmap; + + pThis->__dispchnGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispchnGetMemInterMapParams; + + pThis->__dispchnGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispchnGetMemoryMappingDescriptor; + + pThis->__dispchnControlFilter__ = &__nvoc_thunk_RsResource_dispchnControlFilter; + + pThis->__dispchnUnregisterEvent__ = &__nvoc_thunk_Notifier_dispchnUnregisterEvent; + + pThis->__dispchnCanCopy__ = &__nvoc_thunk_RsResource_dispchnCanCopy; + + pThis->__dispchnPreDestruct__ = &__nvoc_thunk_RsResource_dispchnPreDestruct; + + pThis->__dispchnGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispchnGetNotificationListPtr; + + pThis->__dispchnGetNotificationShare__ = &__nvoc_thunk_Notifier_dispchnGetNotificationShare; + + pThis->__dispchnMap__ = &__nvoc_thunk_GpuResource_dispchnMap; + + pThis->__dispchnAccessCallback__ = &__nvoc_thunk_RmResource_dispchnAccessCallback; +} + +void __nvoc_init_funcTable_DispChannel(DispChannel *pThis) { + __nvoc_init_funcTable_DispChannel_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_DispChannel(DispChannel *pThis) { + pThis->__nvoc_pbase_DispChannel = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_DispChannel(pThis); +} + +NV_STATUS __nvoc_objCreate_DispChannel(DispChannel **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, NvU32 arg_isDma) { + NV_STATUS status; + Object *pParentObj; + DispChannel *pThis; + + pThis = portMemAllocNonPaged(sizeof(DispChannel)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispChannel)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispChannel); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_DispChannel(pThis); + status = __nvoc_ctor_DispChannel(pThis, arg_pCallContext, arg_pParams, arg_isDma); + if (status != NV_OK) goto __nvoc_objCreate_DispChannel_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispChannel_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispChannel(DispChannel **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + NvU32 arg_isDma = va_arg(args, NvU32); + + status = __nvoc_objCreate_DispChannel(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams, arg_isDma); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x10dec3 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelPio; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel; + +void __nvoc_init_DispChannelPio(DispChannelPio*); +void __nvoc_init_funcTable_DispChannelPio(DispChannelPio*); +NV_STATUS __nvoc_ctor_DispChannelPio(DispChannelPio*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispChannelPio(DispChannelPio*); +void __nvoc_dtor_DispChannelPio(DispChannelPio*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispChannelPio; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_DispChannelPio = { + /*pClassDef=*/ &__nvoc_class_def_DispChannelPio, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispChannelPio, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_DispChannel = { + /*pClassDef=*/ &__nvoc_class_def_DispChannel, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispChannelPio = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_DispChannelPio_DispChannelPio, + &__nvoc_rtti_DispChannelPio_DispChannel, + &__nvoc_rtti_DispChannelPio_Notifier, + &__nvoc_rtti_DispChannelPio_INotifier, + &__nvoc_rtti_DispChannelPio_GpuResource, + &__nvoc_rtti_DispChannelPio_RmResource, + &__nvoc_rtti_DispChannelPio_RmResourceCommon, + &__nvoc_rtti_DispChannelPio_RsResource, + &__nvoc_rtti_DispChannelPio_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelPio = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispChannelPio), + /*classId=*/ classId(DispChannelPio), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispChannelPio", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispChannelPio, + /*pCastInfo=*/ &__nvoc_castinfo_DispChannelPio, + /*pExportInfo=*/ &__nvoc_export_info_DispChannelPio +}; + +static NvBool __nvoc_thunk_GpuResource_dispchnpioShareCallback(struct DispChannelPio *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnpioMapTo(struct DispChannelPio *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispchnpioGetOrAllocNotifShare(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelPio_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnpioCheckMemInterUnmap(struct DispChannelPio *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannelPio_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnpioGetMapAddrSpace(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_dispchnpioSetNotificationShare(struct DispChannelPio *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelPio_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_dispchnpioGetRefCount(struct DispChannelPio *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispchnpioAddAdditionalDependants(struct RsClient *pClient, struct DispChannelPio *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnpioControl_Prologue(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_DispChannel_dispchnpioGetRegBaseOffsetAndSize(struct DispChannelPio *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispchnGetRegBaseOffsetAndSize((struct DispChannel *)(((unsigned char *)pDispChannel) + __nvoc_rtti_DispChannelPio_DispChannel.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnpioInternalControlForward(struct DispChannelPio *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnpioUnmapFrom(struct DispChannelPio *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_dispchnpioControl_Epilogue(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnpioControlLookup(struct DispChannelPio *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_dispchnpioGetInternalObjectHandle(struct DispChannelPio *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnpioControl(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnpioUnmap(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnpioGetMemInterMapParams(struct DispChannelPio *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannelPio_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnpioGetMemoryMappingDescriptor(struct DispChannelPio *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannelPio_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnpioControlFilter(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispchnpioUnregisterEvent(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelPio_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_dispchnpioCanCopy(struct DispChannelPio *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispchnpioPreDestruct(struct DispChannelPio *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispchnpioGetNotificationListPtr(struct DispChannelPio *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelPio_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispchnpioGetNotificationShare(struct DispChannelPio *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelPio_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnpioMap(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_dispchnpioAccessCallback(struct DispChannelPio *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispChannelPio = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_DispChannel(DispChannel*); +void __nvoc_dtor_DispChannelPio(DispChannelPio *pThis) { + __nvoc_dtor_DispChannel(&pThis->__nvoc_base_DispChannel); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispChannelPio(DispChannelPio *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DispChannel(DispChannel* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, NvU32); +NV_STATUS __nvoc_ctor_DispChannelPio(DispChannelPio *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DispChannel(&pThis->__nvoc_base_DispChannel, arg_pCallContext, arg_pParams, ((NvBool)(0 != 0))); + if (status != NV_OK) goto __nvoc_ctor_DispChannelPio_fail_DispChannel; + __nvoc_init_dataField_DispChannelPio(pThis); + + status = __nvoc_dispchnpioConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispChannelPio_fail__init; + goto __nvoc_ctor_DispChannelPio_exit; // Success + +__nvoc_ctor_DispChannelPio_fail__init: + __nvoc_dtor_DispChannel(&pThis->__nvoc_base_DispChannel); +__nvoc_ctor_DispChannelPio_fail_DispChannel: +__nvoc_ctor_DispChannelPio_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispChannelPio_1(DispChannelPio *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__dispchnpioShareCallback__ = &__nvoc_thunk_GpuResource_dispchnpioShareCallback; + + pThis->__dispchnpioMapTo__ = &__nvoc_thunk_RsResource_dispchnpioMapTo; + + pThis->__dispchnpioGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispchnpioGetOrAllocNotifShare; + + pThis->__dispchnpioCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispchnpioCheckMemInterUnmap; + + pThis->__dispchnpioGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispchnpioGetMapAddrSpace; + + pThis->__dispchnpioSetNotificationShare__ = &__nvoc_thunk_Notifier_dispchnpioSetNotificationShare; + + pThis->__dispchnpioGetRefCount__ = &__nvoc_thunk_RsResource_dispchnpioGetRefCount; + + pThis->__dispchnpioAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispchnpioAddAdditionalDependants; + + pThis->__dispchnpioControl_Prologue__ = &__nvoc_thunk_RmResource_dispchnpioControl_Prologue; + + pThis->__dispchnpioGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispChannel_dispchnpioGetRegBaseOffsetAndSize; + + pThis->__dispchnpioInternalControlForward__ = &__nvoc_thunk_GpuResource_dispchnpioInternalControlForward; + + pThis->__dispchnpioUnmapFrom__ = &__nvoc_thunk_RsResource_dispchnpioUnmapFrom; + + pThis->__dispchnpioControl_Epilogue__ = &__nvoc_thunk_RmResource_dispchnpioControl_Epilogue; + + pThis->__dispchnpioControlLookup__ = &__nvoc_thunk_RsResource_dispchnpioControlLookup; + + pThis->__dispchnpioGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispchnpioGetInternalObjectHandle; + + pThis->__dispchnpioControl__ = &__nvoc_thunk_GpuResource_dispchnpioControl; + + pThis->__dispchnpioUnmap__ = &__nvoc_thunk_GpuResource_dispchnpioUnmap; + + pThis->__dispchnpioGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispchnpioGetMemInterMapParams; + + pThis->__dispchnpioGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispchnpioGetMemoryMappingDescriptor; + + pThis->__dispchnpioControlFilter__ = &__nvoc_thunk_RsResource_dispchnpioControlFilter; + + pThis->__dispchnpioUnregisterEvent__ = &__nvoc_thunk_Notifier_dispchnpioUnregisterEvent; + + pThis->__dispchnpioCanCopy__ = &__nvoc_thunk_RsResource_dispchnpioCanCopy; + + pThis->__dispchnpioPreDestruct__ = &__nvoc_thunk_RsResource_dispchnpioPreDestruct; + + pThis->__dispchnpioGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispchnpioGetNotificationListPtr; + + pThis->__dispchnpioGetNotificationShare__ = &__nvoc_thunk_Notifier_dispchnpioGetNotificationShare; + + pThis->__dispchnpioMap__ = &__nvoc_thunk_GpuResource_dispchnpioMap; + + pThis->__dispchnpioAccessCallback__ = &__nvoc_thunk_RmResource_dispchnpioAccessCallback; +} + +void __nvoc_init_funcTable_DispChannelPio(DispChannelPio *pThis) { + __nvoc_init_funcTable_DispChannelPio_1(pThis); +} + +void __nvoc_init_DispChannel(DispChannel*); +void __nvoc_init_DispChannelPio(DispChannelPio *pThis) { + pThis->__nvoc_pbase_DispChannelPio = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier; + pThis->__nvoc_pbase_DispChannel = &pThis->__nvoc_base_DispChannel; + __nvoc_init_DispChannel(&pThis->__nvoc_base_DispChannel); + __nvoc_init_funcTable_DispChannelPio(pThis); +} + +NV_STATUS __nvoc_objCreate_DispChannelPio(DispChannelPio **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispChannelPio *pThis; + + pThis = portMemAllocNonPaged(sizeof(DispChannelPio)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispChannelPio)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispChannelPio); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_DispChannelPio(pThis); + status = __nvoc_ctor_DispChannelPio(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispChannelPio_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispChannelPio_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispChannelPio(DispChannelPio **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispChannelPio(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xfe3d2e = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelDma; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel; + +void __nvoc_init_DispChannelDma(DispChannelDma*); +void __nvoc_init_funcTable_DispChannelDma(DispChannelDma*); +NV_STATUS __nvoc_ctor_DispChannelDma(DispChannelDma*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispChannelDma(DispChannelDma*); +void __nvoc_dtor_DispChannelDma(DispChannelDma*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispChannelDma; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_DispChannelDma = { + /*pClassDef=*/ &__nvoc_class_def_DispChannelDma, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispChannelDma, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_DispChannel = { + /*pClassDef=*/ &__nvoc_class_def_DispChannel, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispChannelDma = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_DispChannelDma_DispChannelDma, + &__nvoc_rtti_DispChannelDma_DispChannel, + &__nvoc_rtti_DispChannelDma_Notifier, + &__nvoc_rtti_DispChannelDma_INotifier, + &__nvoc_rtti_DispChannelDma_GpuResource, + &__nvoc_rtti_DispChannelDma_RmResource, + &__nvoc_rtti_DispChannelDma_RmResourceCommon, + &__nvoc_rtti_DispChannelDma_RsResource, + &__nvoc_rtti_DispChannelDma_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelDma = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispChannelDma), + /*classId=*/ classId(DispChannelDma), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispChannelDma", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispChannelDma, + /*pCastInfo=*/ &__nvoc_castinfo_DispChannelDma, + /*pExportInfo=*/ &__nvoc_export_info_DispChannelDma +}; + +static NvBool __nvoc_thunk_GpuResource_dispchndmaShareCallback(struct DispChannelDma *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchndmaMapTo(struct DispChannelDma *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispchndmaGetOrAllocNotifShare(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelDma_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchndmaCheckMemInterUnmap(struct DispChannelDma *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannelDma_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchndmaGetMapAddrSpace(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_dispchndmaSetNotificationShare(struct DispChannelDma *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelDma_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_dispchndmaGetRefCount(struct DispChannelDma *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispchndmaAddAdditionalDependants(struct RsClient *pClient, struct DispChannelDma *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchndmaControl_Prologue(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_DispChannel_dispchndmaGetRegBaseOffsetAndSize(struct DispChannelDma *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispchnGetRegBaseOffsetAndSize((struct DispChannel *)(((unsigned char *)pDispChannel) + __nvoc_rtti_DispChannelDma_DispChannel.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchndmaInternalControlForward(struct DispChannelDma *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchndmaUnmapFrom(struct DispChannelDma *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_dispchndmaControl_Epilogue(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchndmaControlLookup(struct DispChannelDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_dispchndmaGetInternalObjectHandle(struct DispChannelDma *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchndmaControl(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchndmaUnmap(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchndmaGetMemInterMapParams(struct DispChannelDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannelDma_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchndmaGetMemoryMappingDescriptor(struct DispChannelDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannelDma_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchndmaControlFilter(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispchndmaUnregisterEvent(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelDma_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_dispchndmaCanCopy(struct DispChannelDma *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispchndmaPreDestruct(struct DispChannelDma *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispchndmaGetNotificationListPtr(struct DispChannelDma *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelDma_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispchndmaGetNotificationShare(struct DispChannelDma *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelDma_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchndmaMap(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_dispchndmaAccessCallback(struct DispChannelDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispChannelDma = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_DispChannel(DispChannel*); +void __nvoc_dtor_DispChannelDma(DispChannelDma *pThis) { + __nvoc_dtor_DispChannel(&pThis->__nvoc_base_DispChannel); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispChannelDma(DispChannelDma *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DispChannel(DispChannel* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, NvU32); +NV_STATUS __nvoc_ctor_DispChannelDma(DispChannelDma *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DispChannel(&pThis->__nvoc_base_DispChannel, arg_pCallContext, arg_pParams, ((NvBool)(0 == 0))); + if (status != NV_OK) goto __nvoc_ctor_DispChannelDma_fail_DispChannel; + __nvoc_init_dataField_DispChannelDma(pThis); + + status = __nvoc_dispchndmaConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispChannelDma_fail__init; + goto __nvoc_ctor_DispChannelDma_exit; // Success + +__nvoc_ctor_DispChannelDma_fail__init: + __nvoc_dtor_DispChannel(&pThis->__nvoc_base_DispChannel); +__nvoc_ctor_DispChannelDma_fail_DispChannel: +__nvoc_ctor_DispChannelDma_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispChannelDma_1(DispChannelDma *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__dispchndmaShareCallback__ = &__nvoc_thunk_GpuResource_dispchndmaShareCallback; + + pThis->__dispchndmaMapTo__ = &__nvoc_thunk_RsResource_dispchndmaMapTo; + + pThis->__dispchndmaGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispchndmaGetOrAllocNotifShare; + + pThis->__dispchndmaCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispchndmaCheckMemInterUnmap; + + pThis->__dispchndmaGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispchndmaGetMapAddrSpace; + + pThis->__dispchndmaSetNotificationShare__ = &__nvoc_thunk_Notifier_dispchndmaSetNotificationShare; + + pThis->__dispchndmaGetRefCount__ = &__nvoc_thunk_RsResource_dispchndmaGetRefCount; + + pThis->__dispchndmaAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispchndmaAddAdditionalDependants; + + pThis->__dispchndmaControl_Prologue__ = &__nvoc_thunk_RmResource_dispchndmaControl_Prologue; + + pThis->__dispchndmaGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispChannel_dispchndmaGetRegBaseOffsetAndSize; + + pThis->__dispchndmaInternalControlForward__ = &__nvoc_thunk_GpuResource_dispchndmaInternalControlForward; + + pThis->__dispchndmaUnmapFrom__ = &__nvoc_thunk_RsResource_dispchndmaUnmapFrom; + + pThis->__dispchndmaControl_Epilogue__ = &__nvoc_thunk_RmResource_dispchndmaControl_Epilogue; + + pThis->__dispchndmaControlLookup__ = &__nvoc_thunk_RsResource_dispchndmaControlLookup; + + pThis->__dispchndmaGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispchndmaGetInternalObjectHandle; + + pThis->__dispchndmaControl__ = &__nvoc_thunk_GpuResource_dispchndmaControl; + + pThis->__dispchndmaUnmap__ = &__nvoc_thunk_GpuResource_dispchndmaUnmap; + + pThis->__dispchndmaGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispchndmaGetMemInterMapParams; + + pThis->__dispchndmaGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispchndmaGetMemoryMappingDescriptor; + + pThis->__dispchndmaControlFilter__ = &__nvoc_thunk_RsResource_dispchndmaControlFilter; + + pThis->__dispchndmaUnregisterEvent__ = &__nvoc_thunk_Notifier_dispchndmaUnregisterEvent; + + pThis->__dispchndmaCanCopy__ = &__nvoc_thunk_RsResource_dispchndmaCanCopy; + + pThis->__dispchndmaPreDestruct__ = &__nvoc_thunk_RsResource_dispchndmaPreDestruct; + + pThis->__dispchndmaGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispchndmaGetNotificationListPtr; + + pThis->__dispchndmaGetNotificationShare__ = &__nvoc_thunk_Notifier_dispchndmaGetNotificationShare; + + pThis->__dispchndmaMap__ = &__nvoc_thunk_GpuResource_dispchndmaMap; + + pThis->__dispchndmaAccessCallback__ = &__nvoc_thunk_RmResource_dispchndmaAccessCallback; +} + +void __nvoc_init_funcTable_DispChannelDma(DispChannelDma *pThis) { + __nvoc_init_funcTable_DispChannelDma_1(pThis); +} + +void __nvoc_init_DispChannel(DispChannel*); +void __nvoc_init_DispChannelDma(DispChannelDma *pThis) { + pThis->__nvoc_pbase_DispChannelDma = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier; + pThis->__nvoc_pbase_DispChannel = &pThis->__nvoc_base_DispChannel; + __nvoc_init_DispChannel(&pThis->__nvoc_base_DispChannel); + __nvoc_init_funcTable_DispChannelDma(pThis); +} + +NV_STATUS __nvoc_objCreate_DispChannelDma(DispChannelDma **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispChannelDma *pThis; + + pThis = portMemAllocNonPaged(sizeof(DispChannelDma)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispChannelDma)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispChannelDma); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_DispChannelDma(pThis); + status = __nvoc_ctor_DispChannelDma(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispChannelDma_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispChannelDma_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispChannelDma(DispChannelDma **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispChannelDma(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.h new file mode 100644 index 0000000..6b9091c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.h @@ -0,0 +1,776 @@ +#ifndef _G_DISP_CHANNEL_NVOC_H_ +#define _G_DISP_CHANNEL_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispChannel and its derived classes. +* +******************************************************************************/ + +#include "g_disp_channel_nvoc.h" + +#ifndef DISP_CHANNEL_H +#define DISP_CHANNEL_H + +#include "gpu/gpu_resource.h" +#include "rmapi/event.h" + +struct ContextDma; + +#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__ +#define __NVOC_CLASS_ContextDma_TYPEDEF__ +typedef struct ContextDma ContextDma; +#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ContextDma +#define __nvoc_class_id_ContextDma 0x88441b +#endif /* __nvoc_class_id_ContextDma */ + + +struct DispObject; + +#ifndef __NVOC_CLASS_DispObject_TYPEDEF__ +#define __NVOC_CLASS_DispObject_TYPEDEF__ +typedef struct DispObject DispObject; +#endif /* __NVOC_CLASS_DispObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispObject +#define __nvoc_class_id_DispObject 0x999839 +#endif /* __nvoc_class_id_DispObject */ + + + +/*! + * Base class for display channels + */ +#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispChannel { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DispChannel *__nvoc_pbase_DispChannel; + NV_STATUS (*__dispchnGetRegBaseOffsetAndSize__)(struct DispChannel *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__dispchnShareCallback__)(struct DispChannel *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispchnMapTo__)(struct DispChannel *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__dispchnGetOrAllocNotifShare__)(struct DispChannel *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__dispchnCheckMemInterUnmap__)(struct DispChannel *, NvBool); + NV_STATUS (*__dispchnGetMapAddrSpace__)(struct DispChannel *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__dispchnSetNotificationShare__)(struct DispChannel *, struct NotifShare *); + NvU32 (*__dispchnGetRefCount__)(struct DispChannel *); + void (*__dispchnAddAdditionalDependants__)(struct RsClient *, struct DispChannel *, RsResourceRef *); + NV_STATUS (*__dispchnControl_Prologue__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnInternalControlForward__)(struct DispChannel *, NvU32, void *, NvU32); + NV_STATUS (*__dispchnUnmapFrom__)(struct DispChannel *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__dispchnControl_Epilogue__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnControlLookup__)(struct DispChannel *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__dispchnGetInternalObjectHandle__)(struct DispChannel *); + NV_STATUS (*__dispchnControl__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnUnmap__)(struct DispChannel *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__dispchnGetMemInterMapParams__)(struct DispChannel *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispchnGetMemoryMappingDescriptor__)(struct DispChannel *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispchnControlFilter__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnUnregisterEvent__)(struct DispChannel *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__dispchnCanCopy__)(struct DispChannel *); + void (*__dispchnPreDestruct__)(struct DispChannel *); + PEVENTNOTIFICATION *(*__dispchnGetNotificationListPtr__)(struct DispChannel *); + struct NotifShare *(*__dispchnGetNotificationShare__)(struct DispChannel *); + NV_STATUS (*__dispchnMap__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__dispchnAccessCallback__)(struct DispChannel *, struct RsClient *, void *, RsAccessRight); + struct DispObject *pDispObject; + NvU32 DispClass; + NvU32 InstanceNumber; + NvP64 pControl; + NvP64 pPriv; + NvU32 ControlOffset; + NvU32 ControlLength; + NvBool bIsDma; +}; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel; + +#define __staticCast_DispChannel(pThis) \ + ((pThis)->__nvoc_pbase_DispChannel) + +#ifdef __nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannel(pThis) ((DispChannel*)NULL) +#else //__nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannel(pThis) \ + ((DispChannel*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannel))) +#endif //__nvoc_disp_channel_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispChannel(DispChannel**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispChannel(DispChannel**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, NvU32 arg_isDma); +#define __objCreate_DispChannel(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams, arg_isDma) \ + __nvoc_objCreate_DispChannel((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams, arg_isDma) + +#define dispchnGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchnGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize) +#define dispchnShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchnShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispchnMapTo(pResource, pParams) dispchnMapTo_DISPATCH(pResource, pParams) +#define dispchnGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchnGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define dispchnCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchnCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispchnGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchnGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispchnSetNotificationShare(pNotifier, pNotifShare) dispchnSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispchnGetRefCount(pResource) dispchnGetRefCount_DISPATCH(pResource) +#define dispchnAddAdditionalDependants(pClient, pResource, pReference) dispchnAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispchnControl_Prologue(pResource, pCallContext, pParams) dispchnControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnInternalControlForward(pGpuResource, command, pParams, size) dispchnInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispchnUnmapFrom(pResource, pParams) dispchnUnmapFrom_DISPATCH(pResource, pParams) +#define dispchnControl_Epilogue(pResource, pCallContext, pParams) dispchnControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnControlLookup(pResource, pParams, ppEntry) dispchnControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispchnGetInternalObjectHandle(pGpuResource) dispchnGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispchnControl(pGpuResource, pCallContext, pParams) dispchnControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispchnUnmap(pGpuResource, pCallContext, pCpuMapping) dispchnUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispchnGetMemInterMapParams(pRmResource, pParams) dispchnGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispchnGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchnGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispchnControlFilter(pResource, pCallContext, pParams) dispchnControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispchnUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchnUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispchnCanCopy(pResource) dispchnCanCopy_DISPATCH(pResource) +#define dispchnPreDestruct(pResource) dispchnPreDestruct_DISPATCH(pResource) +#define dispchnGetNotificationListPtr(pNotifier) dispchnGetNotificationListPtr_DISPATCH(pNotifier) +#define dispchnGetNotificationShare(pNotifier) dispchnGetNotificationShare_DISPATCH(pNotifier) +#define dispchnMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchnMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispchnAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchnAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS dispchnGetRegBaseOffsetAndSize_IMPL(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +static inline NV_STATUS dispchnGetRegBaseOffsetAndSize_DISPATCH(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispChannel->__dispchnGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize); +} + +static inline NvBool dispchnShareCallback_DISPATCH(struct DispChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__dispchnShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispchnMapTo_DISPATCH(struct DispChannel *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispchnMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispchnGetOrAllocNotifShare_DISPATCH(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispchnGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS dispchnCheckMemInterUnmap_DISPATCH(struct DispChannel *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispchnCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispchnGetMapAddrSpace_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__dispchnGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void dispchnSetNotificationShare_DISPATCH(struct DispChannel *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispchnSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 dispchnGetRefCount_DISPATCH(struct DispChannel *pResource) { + return pResource->__dispchnGetRefCount__(pResource); +} + +static inline void dispchnAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannel *pResource, RsResourceRef *pReference) { + pResource->__dispchnAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS dispchnControl_Prologue_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispchnControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnInternalControlForward_DISPATCH(struct DispChannel *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__dispchnInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS dispchnUnmapFrom_DISPATCH(struct DispChannel *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispchnUnmapFrom__(pResource, pParams); +} + +static inline void dispchnControl_Epilogue_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__dispchnControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnControlLookup_DISPATCH(struct DispChannel *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispchnControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle dispchnGetInternalObjectHandle_DISPATCH(struct DispChannel *pGpuResource) { + return pGpuResource->__dispchnGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS dispchnControl_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__dispchnControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnUnmap_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispchnUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispchnGetMemInterMapParams_DISPATCH(struct DispChannel *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispchnGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispchnGetMemoryMappingDescriptor_DISPATCH(struct DispChannel *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispchnGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispchnControlFilter_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispchnControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnUnregisterEvent_DISPATCH(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispchnUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool dispchnCanCopy_DISPATCH(struct DispChannel *pResource) { + return pResource->__dispchnCanCopy__(pResource); +} + +static inline void dispchnPreDestruct_DISPATCH(struct DispChannel *pResource) { + pResource->__dispchnPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *dispchnGetNotificationListPtr_DISPATCH(struct DispChannel *pNotifier) { + return pNotifier->__dispchnGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *dispchnGetNotificationShare_DISPATCH(struct DispChannel *pNotifier) { + return pNotifier->__dispchnGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispchnMap_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispchnMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool dispchnAccessCallback_DISPATCH(struct DispChannel *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispchnAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS dispchnConstruct_IMPL(struct DispChannel *arg_pDispChannel, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams, NvU32 arg_isDma); +#define __nvoc_dispchnConstruct(arg_pDispChannel, arg_pCallContext, arg_pParams, arg_isDma) dispchnConstruct_IMPL(arg_pDispChannel, arg_pCallContext, arg_pParams, arg_isDma) +void dispchnDestruct_IMPL(struct DispChannel *pDispChannel); +#define __nvoc_dispchnDestruct(pDispChannel) dispchnDestruct_IMPL(pDispChannel) +void dispchnSetRegBaseOffsetAndSize_IMPL(struct DispChannel *pDispChannel, struct OBJGPU *pGpu); +#ifdef __nvoc_disp_channel_h_disabled +static inline void dispchnSetRegBaseOffsetAndSize(struct DispChannel *pDispChannel, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!"); +} +#else //__nvoc_disp_channel_h_disabled +#define dispchnSetRegBaseOffsetAndSize(pDispChannel, pGpu) dispchnSetRegBaseOffsetAndSize_IMPL(pDispChannel, pGpu) +#endif //__nvoc_disp_channel_h_disabled + +NV_STATUS dispchnGrabChannel_IMPL(struct DispChannel *pDispChannel, NvHandle hClient, NvHandle hParent, NvHandle hChannel, NvU32 hClass, void *pAllocParms); +#ifdef __nvoc_disp_channel_h_disabled +static inline NV_STATUS dispchnGrabChannel(struct DispChannel *pDispChannel, NvHandle hClient, NvHandle hParent, NvHandle hChannel, NvU32 hClass, void *pAllocParms) { + NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_channel_h_disabled +#define dispchnGrabChannel(pDispChannel, hClient, hParent, hChannel, hClass, pAllocParms) dispchnGrabChannel_IMPL(pDispChannel, hClient, hParent, hChannel, hClass, pAllocParms) +#endif //__nvoc_disp_channel_h_disabled + +NV_STATUS dispchnBindCtx_IMPL(struct OBJGPU *pGpu, struct ContextDma *pContextDma, NvHandle hDispChannel); +#define dispchnBindCtx(pGpu, pContextDma, hDispChannel) dispchnBindCtx_IMPL(pGpu, pContextDma, hDispChannel) +NV_STATUS dispchnUnbindCtx_IMPL(struct OBJGPU *pGpu, struct ContextDma *pContextDma, NvHandle hDispChannel); +#define dispchnUnbindCtx(pGpu, pContextDma, hDispChannel) dispchnUnbindCtx_IMPL(pGpu, pContextDma, hDispChannel) +void dispchnUnbindCtxFromAllChannels_IMPL(struct OBJGPU *pGpu, struct ContextDma *pContextDma); +#define dispchnUnbindCtxFromAllChannels(pGpu, pContextDma) dispchnUnbindCtxFromAllChannels_IMPL(pGpu, pContextDma) +void dispchnUnbindAllCtx_IMPL(struct OBJGPU *pGpu, struct DispChannel *pDispChannel); +#ifdef __nvoc_disp_channel_h_disabled +static inline void dispchnUnbindAllCtx(struct OBJGPU *pGpu, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!"); +} +#else //__nvoc_disp_channel_h_disabled +#define dispchnUnbindAllCtx(pGpu, pDispChannel) dispchnUnbindAllCtx_IMPL(pGpu, pDispChannel) +#endif //__nvoc_disp_channel_h_disabled + +NV_STATUS dispchnGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDisplayChannel, struct DispChannel **ppDispChannel); +#define dispchnGetByHandle(pClient, hDisplayChannel, ppDispChannel) dispchnGetByHandle_IMPL(pClient, hDisplayChannel, ppDispChannel) +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_XXX_CHANNEL_PIO + */ +#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispChannelPio { + const struct NVOC_RTTI *__nvoc_rtti; + struct DispChannel __nvoc_base_DispChannel; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DispChannel *__nvoc_pbase_DispChannel; + struct DispChannelPio *__nvoc_pbase_DispChannelPio; + NvBool (*__dispchnpioShareCallback__)(struct DispChannelPio *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispchnpioMapTo__)(struct DispChannelPio *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__dispchnpioGetOrAllocNotifShare__)(struct DispChannelPio *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__dispchnpioCheckMemInterUnmap__)(struct DispChannelPio *, NvBool); + NV_STATUS (*__dispchnpioGetMapAddrSpace__)(struct DispChannelPio *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__dispchnpioSetNotificationShare__)(struct DispChannelPio *, struct NotifShare *); + NvU32 (*__dispchnpioGetRefCount__)(struct DispChannelPio *); + void (*__dispchnpioAddAdditionalDependants__)(struct RsClient *, struct DispChannelPio *, RsResourceRef *); + NV_STATUS (*__dispchnpioControl_Prologue__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnpioGetRegBaseOffsetAndSize__)(struct DispChannelPio *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__dispchnpioInternalControlForward__)(struct DispChannelPio *, NvU32, void *, NvU32); + NV_STATUS (*__dispchnpioUnmapFrom__)(struct DispChannelPio *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__dispchnpioControl_Epilogue__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnpioControlLookup__)(struct DispChannelPio *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__dispchnpioGetInternalObjectHandle__)(struct DispChannelPio *); + NV_STATUS (*__dispchnpioControl__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnpioUnmap__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__dispchnpioGetMemInterMapParams__)(struct DispChannelPio *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispchnpioGetMemoryMappingDescriptor__)(struct DispChannelPio *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispchnpioControlFilter__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnpioUnregisterEvent__)(struct DispChannelPio *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__dispchnpioCanCopy__)(struct DispChannelPio *); + void (*__dispchnpioPreDestruct__)(struct DispChannelPio *); + PEVENTNOTIFICATION *(*__dispchnpioGetNotificationListPtr__)(struct DispChannelPio *); + struct NotifShare *(*__dispchnpioGetNotificationShare__)(struct DispChannelPio *); + NV_STATUS (*__dispchnpioMap__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__dispchnpioAccessCallback__)(struct DispChannelPio *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_DispChannelPio_TYPEDEF__ +#define __NVOC_CLASS_DispChannelPio_TYPEDEF__ +typedef struct DispChannelPio DispChannelPio; +#endif /* __NVOC_CLASS_DispChannelPio_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannelPio +#define __nvoc_class_id_DispChannelPio 0x10dec3 +#endif /* __nvoc_class_id_DispChannelPio */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelPio; + +#define __staticCast_DispChannelPio(pThis) \ + ((pThis)->__nvoc_pbase_DispChannelPio) + +#ifdef __nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannelPio(pThis) ((DispChannelPio*)NULL) +#else //__nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannelPio(pThis) \ + ((DispChannelPio*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannelPio))) +#endif //__nvoc_disp_channel_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispChannelPio(DispChannelPio**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispChannelPio(DispChannelPio**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispChannelPio(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispChannelPio((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispchnpioShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchnpioShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispchnpioMapTo(pResource, pParams) dispchnpioMapTo_DISPATCH(pResource, pParams) +#define dispchnpioGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchnpioGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define dispchnpioCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchnpioCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispchnpioGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchnpioGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispchnpioSetNotificationShare(pNotifier, pNotifShare) dispchnpioSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispchnpioGetRefCount(pResource) dispchnpioGetRefCount_DISPATCH(pResource) +#define dispchnpioAddAdditionalDependants(pClient, pResource, pReference) dispchnpioAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispchnpioControl_Prologue(pResource, pCallContext, pParams) dispchnpioControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnpioGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchnpioGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize) +#define dispchnpioInternalControlForward(pGpuResource, command, pParams, size) dispchnpioInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispchnpioUnmapFrom(pResource, pParams) dispchnpioUnmapFrom_DISPATCH(pResource, pParams) +#define dispchnpioControl_Epilogue(pResource, pCallContext, pParams) dispchnpioControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnpioControlLookup(pResource, pParams, ppEntry) dispchnpioControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispchnpioGetInternalObjectHandle(pGpuResource) dispchnpioGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispchnpioControl(pGpuResource, pCallContext, pParams) dispchnpioControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispchnpioUnmap(pGpuResource, pCallContext, pCpuMapping) dispchnpioUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispchnpioGetMemInterMapParams(pRmResource, pParams) dispchnpioGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispchnpioGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchnpioGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispchnpioControlFilter(pResource, pCallContext, pParams) dispchnpioControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispchnpioUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchnpioUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispchnpioCanCopy(pResource) dispchnpioCanCopy_DISPATCH(pResource) +#define dispchnpioPreDestruct(pResource) dispchnpioPreDestruct_DISPATCH(pResource) +#define dispchnpioGetNotificationListPtr(pNotifier) dispchnpioGetNotificationListPtr_DISPATCH(pNotifier) +#define dispchnpioGetNotificationShare(pNotifier) dispchnpioGetNotificationShare_DISPATCH(pNotifier) +#define dispchnpioMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchnpioMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispchnpioAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchnpioAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool dispchnpioShareCallback_DISPATCH(struct DispChannelPio *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__dispchnpioShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispchnpioMapTo_DISPATCH(struct DispChannelPio *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispchnpioMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispchnpioGetOrAllocNotifShare_DISPATCH(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispchnpioGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS dispchnpioCheckMemInterUnmap_DISPATCH(struct DispChannelPio *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispchnpioCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispchnpioGetMapAddrSpace_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__dispchnpioGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void dispchnpioSetNotificationShare_DISPATCH(struct DispChannelPio *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispchnpioSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 dispchnpioGetRefCount_DISPATCH(struct DispChannelPio *pResource) { + return pResource->__dispchnpioGetRefCount__(pResource); +} + +static inline void dispchnpioAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannelPio *pResource, RsResourceRef *pReference) { + pResource->__dispchnpioAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS dispchnpioControl_Prologue_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispchnpioControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnpioGetRegBaseOffsetAndSize_DISPATCH(struct DispChannelPio *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispChannel->__dispchnpioGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize); +} + +static inline NV_STATUS dispchnpioInternalControlForward_DISPATCH(struct DispChannelPio *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__dispchnpioInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS dispchnpioUnmapFrom_DISPATCH(struct DispChannelPio *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispchnpioUnmapFrom__(pResource, pParams); +} + +static inline void dispchnpioControl_Epilogue_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__dispchnpioControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnpioControlLookup_DISPATCH(struct DispChannelPio *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispchnpioControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle dispchnpioGetInternalObjectHandle_DISPATCH(struct DispChannelPio *pGpuResource) { + return pGpuResource->__dispchnpioGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS dispchnpioControl_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__dispchnpioControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnpioUnmap_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispchnpioUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispchnpioGetMemInterMapParams_DISPATCH(struct DispChannelPio *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispchnpioGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispchnpioGetMemoryMappingDescriptor_DISPATCH(struct DispChannelPio *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispchnpioGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispchnpioControlFilter_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispchnpioControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnpioUnregisterEvent_DISPATCH(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispchnpioUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool dispchnpioCanCopy_DISPATCH(struct DispChannelPio *pResource) { + return pResource->__dispchnpioCanCopy__(pResource); +} + +static inline void dispchnpioPreDestruct_DISPATCH(struct DispChannelPio *pResource) { + pResource->__dispchnpioPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *dispchnpioGetNotificationListPtr_DISPATCH(struct DispChannelPio *pNotifier) { + return pNotifier->__dispchnpioGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *dispchnpioGetNotificationShare_DISPATCH(struct DispChannelPio *pNotifier) { + return pNotifier->__dispchnpioGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispchnpioMap_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispchnpioMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool dispchnpioAccessCallback_DISPATCH(struct DispChannelPio *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispchnpioAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS dispchnpioConstruct_IMPL(struct DispChannelPio *arg_pDispChannelPio, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispchnpioConstruct(arg_pDispChannelPio, arg_pCallContext, arg_pParams) dispchnpioConstruct_IMPL(arg_pDispChannelPio, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_XXX_CHANNEL_DMA + */ +#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispChannelDma { + const struct NVOC_RTTI *__nvoc_rtti; + struct DispChannel __nvoc_base_DispChannel; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DispChannel *__nvoc_pbase_DispChannel; + struct DispChannelDma *__nvoc_pbase_DispChannelDma; + NvBool (*__dispchndmaShareCallback__)(struct DispChannelDma *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispchndmaMapTo__)(struct DispChannelDma *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__dispchndmaGetOrAllocNotifShare__)(struct DispChannelDma *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__dispchndmaCheckMemInterUnmap__)(struct DispChannelDma *, NvBool); + NV_STATUS (*__dispchndmaGetMapAddrSpace__)(struct DispChannelDma *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__dispchndmaSetNotificationShare__)(struct DispChannelDma *, struct NotifShare *); + NvU32 (*__dispchndmaGetRefCount__)(struct DispChannelDma *); + void (*__dispchndmaAddAdditionalDependants__)(struct RsClient *, struct DispChannelDma *, RsResourceRef *); + NV_STATUS (*__dispchndmaControl_Prologue__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchndmaGetRegBaseOffsetAndSize__)(struct DispChannelDma *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__dispchndmaInternalControlForward__)(struct DispChannelDma *, NvU32, void *, NvU32); + NV_STATUS (*__dispchndmaUnmapFrom__)(struct DispChannelDma *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__dispchndmaControl_Epilogue__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchndmaControlLookup__)(struct DispChannelDma *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__dispchndmaGetInternalObjectHandle__)(struct DispChannelDma *); + NV_STATUS (*__dispchndmaControl__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchndmaUnmap__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__dispchndmaGetMemInterMapParams__)(struct DispChannelDma *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispchndmaGetMemoryMappingDescriptor__)(struct DispChannelDma *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispchndmaControlFilter__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchndmaUnregisterEvent__)(struct DispChannelDma *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__dispchndmaCanCopy__)(struct DispChannelDma *); + void (*__dispchndmaPreDestruct__)(struct DispChannelDma *); + PEVENTNOTIFICATION *(*__dispchndmaGetNotificationListPtr__)(struct DispChannelDma *); + struct NotifShare *(*__dispchndmaGetNotificationShare__)(struct DispChannelDma *); + NV_STATUS (*__dispchndmaMap__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__dispchndmaAccessCallback__)(struct DispChannelDma *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_DispChannelDma_TYPEDEF__ +#define __NVOC_CLASS_DispChannelDma_TYPEDEF__ +typedef struct DispChannelDma DispChannelDma; +#endif /* __NVOC_CLASS_DispChannelDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannelDma +#define __nvoc_class_id_DispChannelDma 0xfe3d2e +#endif /* __nvoc_class_id_DispChannelDma */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelDma; + +#define __staticCast_DispChannelDma(pThis) \ + ((pThis)->__nvoc_pbase_DispChannelDma) + +#ifdef __nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannelDma(pThis) ((DispChannelDma*)NULL) +#else //__nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannelDma(pThis) \ + ((DispChannelDma*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannelDma))) +#endif //__nvoc_disp_channel_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispChannelDma(DispChannelDma**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispChannelDma(DispChannelDma**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispChannelDma(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispChannelDma((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispchndmaShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchndmaShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispchndmaMapTo(pResource, pParams) dispchndmaMapTo_DISPATCH(pResource, pParams) +#define dispchndmaGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchndmaGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define dispchndmaCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchndmaCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispchndmaGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchndmaGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispchndmaSetNotificationShare(pNotifier, pNotifShare) dispchndmaSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispchndmaGetRefCount(pResource) dispchndmaGetRefCount_DISPATCH(pResource) +#define dispchndmaAddAdditionalDependants(pClient, pResource, pReference) dispchndmaAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispchndmaControl_Prologue(pResource, pCallContext, pParams) dispchndmaControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispchndmaGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchndmaGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize) +#define dispchndmaInternalControlForward(pGpuResource, command, pParams, size) dispchndmaInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispchndmaUnmapFrom(pResource, pParams) dispchndmaUnmapFrom_DISPATCH(pResource, pParams) +#define dispchndmaControl_Epilogue(pResource, pCallContext, pParams) dispchndmaControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispchndmaControlLookup(pResource, pParams, ppEntry) dispchndmaControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispchndmaGetInternalObjectHandle(pGpuResource) dispchndmaGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispchndmaControl(pGpuResource, pCallContext, pParams) dispchndmaControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispchndmaUnmap(pGpuResource, pCallContext, pCpuMapping) dispchndmaUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispchndmaGetMemInterMapParams(pRmResource, pParams) dispchndmaGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispchndmaGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchndmaGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispchndmaControlFilter(pResource, pCallContext, pParams) dispchndmaControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispchndmaUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchndmaUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispchndmaCanCopy(pResource) dispchndmaCanCopy_DISPATCH(pResource) +#define dispchndmaPreDestruct(pResource) dispchndmaPreDestruct_DISPATCH(pResource) +#define dispchndmaGetNotificationListPtr(pNotifier) dispchndmaGetNotificationListPtr_DISPATCH(pNotifier) +#define dispchndmaGetNotificationShare(pNotifier) dispchndmaGetNotificationShare_DISPATCH(pNotifier) +#define dispchndmaMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchndmaMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispchndmaAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchndmaAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool dispchndmaShareCallback_DISPATCH(struct DispChannelDma *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__dispchndmaShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispchndmaMapTo_DISPATCH(struct DispChannelDma *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispchndmaMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispchndmaGetOrAllocNotifShare_DISPATCH(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispchndmaGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS dispchndmaCheckMemInterUnmap_DISPATCH(struct DispChannelDma *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispchndmaCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispchndmaGetMapAddrSpace_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__dispchndmaGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void dispchndmaSetNotificationShare_DISPATCH(struct DispChannelDma *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispchndmaSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 dispchndmaGetRefCount_DISPATCH(struct DispChannelDma *pResource) { + return pResource->__dispchndmaGetRefCount__(pResource); +} + +static inline void dispchndmaAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannelDma *pResource, RsResourceRef *pReference) { + pResource->__dispchndmaAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS dispchndmaControl_Prologue_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispchndmaControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchndmaGetRegBaseOffsetAndSize_DISPATCH(struct DispChannelDma *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispChannel->__dispchndmaGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize); +} + +static inline NV_STATUS dispchndmaInternalControlForward_DISPATCH(struct DispChannelDma *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__dispchndmaInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS dispchndmaUnmapFrom_DISPATCH(struct DispChannelDma *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispchndmaUnmapFrom__(pResource, pParams); +} + +static inline void dispchndmaControl_Epilogue_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__dispchndmaControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchndmaControlLookup_DISPATCH(struct DispChannelDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispchndmaControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle dispchndmaGetInternalObjectHandle_DISPATCH(struct DispChannelDma *pGpuResource) { + return pGpuResource->__dispchndmaGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS dispchndmaControl_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__dispchndmaControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchndmaUnmap_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispchndmaUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispchndmaGetMemInterMapParams_DISPATCH(struct DispChannelDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispchndmaGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispchndmaGetMemoryMappingDescriptor_DISPATCH(struct DispChannelDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispchndmaGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispchndmaControlFilter_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispchndmaControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchndmaUnregisterEvent_DISPATCH(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispchndmaUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool dispchndmaCanCopy_DISPATCH(struct DispChannelDma *pResource) { + return pResource->__dispchndmaCanCopy__(pResource); +} + +static inline void dispchndmaPreDestruct_DISPATCH(struct DispChannelDma *pResource) { + pResource->__dispchndmaPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *dispchndmaGetNotificationListPtr_DISPATCH(struct DispChannelDma *pNotifier) { + return pNotifier->__dispchndmaGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *dispchndmaGetNotificationShare_DISPATCH(struct DispChannelDma *pNotifier) { + return pNotifier->__dispchndmaGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispchndmaMap_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispchndmaMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool dispchndmaAccessCallback_DISPATCH(struct DispChannelDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispchndmaAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS dispchndmaConstruct_IMPL(struct DispChannelDma *arg_pDispChannelDma, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispchndmaConstruct(arg_pDispChannelDma, arg_pCallContext, arg_pParams) dispchndmaConstruct_IMPL(arg_pDispChannelDma, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // DISP_CHANNEL_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DISP_CHANNEL_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.c new file mode 100644 index 0000000..8d6cb58 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.c @@ -0,0 +1,169 @@ +#define NVOC_DISP_INST_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_inst_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x8223e2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayInstanceMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner* ); +void __nvoc_init_funcTable_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner* ); +void __nvoc_init_dataField_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner* ); +void __nvoc_dtor_DisplayInstanceMemory(DisplayInstanceMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DisplayInstanceMemory; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayInstanceMemory_DisplayInstanceMemory = { + /*pClassDef=*/ &__nvoc_class_def_DisplayInstanceMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DisplayInstanceMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayInstanceMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayInstanceMemory, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DisplayInstanceMemory = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_DisplayInstanceMemory_DisplayInstanceMemory, + &__nvoc_rtti_DisplayInstanceMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayInstanceMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DisplayInstanceMemory), + /*classId=*/ classId(DisplayInstanceMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DisplayInstanceMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DisplayInstanceMemory, + /*pCastInfo=*/ &__nvoc_castinfo_DisplayInstanceMemory, + /*pExportInfo=*/ &__nvoc_export_info_DisplayInstanceMemory +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DisplayInstanceMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_DisplayInstanceMemory(DisplayInstanceMemory *pThis) { + __nvoc_instmemDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_DisplayInstanceMemory_fail_Object; + __nvoc_init_dataField_DisplayInstanceMemory(pThis, pRmhalspecowner); + + status = __nvoc_instmemConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_DisplayInstanceMemory_fail__init; + goto __nvoc_ctor_DisplayInstanceMemory_exit; // Success + +__nvoc_ctor_DisplayInstanceMemory_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_DisplayInstanceMemory_fail_Object: +__nvoc_ctor_DisplayInstanceMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_DisplayInstanceMemory_1(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); +} + +void __nvoc_init_funcTable_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_DisplayInstanceMemory_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_DisplayInstanceMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_DisplayInstanceMemory(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_DisplayInstanceMemory(DisplayInstanceMemory **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + DisplayInstanceMemory *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(DisplayInstanceMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DisplayInstanceMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DisplayInstanceMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_DisplayInstanceMemory(pThis, pRmhalspecowner); + status = __nvoc_ctor_DisplayInstanceMemory(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_DisplayInstanceMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DisplayInstanceMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DisplayInstanceMemory(DisplayInstanceMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_DisplayInstanceMemory(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.h new file mode 100644 index 0000000..a2348bb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.h @@ -0,0 +1,358 @@ +#ifndef _G_DISP_INST_MEM_NVOC_H_ +#define _G_DISP_INST_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_disp_inst_mem_nvoc.h" + +#ifndef DISPLAY_INSTANCE_MEMORY_H +#define DISPLAY_INSTANCE_MEMORY_H + +/* ------------------------ Includes --------------------------------------- */ +#include "nvtypes.h" +#include "nvoc/utility.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "gpu/mem_mgr/mem_desc.h" + +/* ------------------------ Forward Declaration ---------------------------- */ +typedef struct OBJEHEAP OBJEHEAP; +struct DispChannel; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + + +struct ContextDma; + +#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__ +#define __NVOC_CLASS_ContextDma_TYPEDEF__ +typedef struct ContextDma ContextDma; +#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ContextDma +#define __nvoc_class_id_ContextDma 0x88441b +#endif /* __nvoc_class_id_ContextDma */ + + + +/* ------------------------ Macros & Defines ------------------------------- */ +#define KERNEL_DISPLAY_GET_INST_MEM(p) ((p)->pInst) +#define DISP_INST_MEM_ALIGN 0x10000 + +/* ------------------------ Types definitions ------------------------------ */ +/*! + * A software hash table entry + */ +typedef struct +{ + struct ContextDma *pContextDma; + struct DispChannel *pDispChannel; +} SW_HASH_TABLE_ENTRY; + +#ifdef NVOC_DISP_INST_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DisplayInstanceMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct DisplayInstanceMemory *__nvoc_pbase_DisplayInstanceMemory; + NV_ADDRESS_SPACE instMemAddrSpace; + NvU32 instMemAttr; + NvU64 instMemBase; + NvU32 instMemSize; + MEMORY_DESCRIPTOR *pAllocedInstMemDesc; + MEMORY_DESCRIPTOR *pInstMemDesc; + void *pInstMem; + NvU32 nHashTableEntries; + NvU32 hashTableBaseAddr; + SW_HASH_TABLE_ENTRY *pHashTable; + OBJEHEAP *pInstHeap; +}; + +#ifndef __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ +#define __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ +typedef struct DisplayInstanceMemory DisplayInstanceMemory; +#endif /* __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DisplayInstanceMemory +#define __nvoc_class_id_DisplayInstanceMemory 0x8223e2 +#endif /* __nvoc_class_id_DisplayInstanceMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayInstanceMemory; + +#define __staticCast_DisplayInstanceMemory(pThis) \ + ((pThis)->__nvoc_pbase_DisplayInstanceMemory) + +#ifdef __nvoc_disp_inst_mem_h_disabled +#define __dynamicCast_DisplayInstanceMemory(pThis) ((DisplayInstanceMemory*)NULL) +#else //__nvoc_disp_inst_mem_h_disabled +#define __dynamicCast_DisplayInstanceMemory(pThis) \ + ((DisplayInstanceMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DisplayInstanceMemory))) +#endif //__nvoc_disp_inst_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DisplayInstanceMemory(DisplayInstanceMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DisplayInstanceMemory(DisplayInstanceMemory**, Dynamic*, NvU32); +#define __objCreate_DisplayInstanceMemory(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_DisplayInstanceMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +void instmemGetSize_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *pTotalInstMemSize, NvU32 *pHashTableSize); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemGetSize(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *pTotalInstMemSize, NvU32 *pHashTableSize) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemGetSize(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) instmemGetSize_v03_00(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemGetSize_HAL(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) instmemGetSize(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) + +NvU32 instmemGetHashTableBaseAddr_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NvU32 instmemGetHashTableBaseAddr(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return 0; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemGetHashTableBaseAddr(pGpu, pInstMem) instmemGetHashTableBaseAddr_v03_00(pGpu, pInstMem) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemGetHashTableBaseAddr_HAL(pGpu, pInstMem) instmemGetHashTableBaseAddr(pGpu, pInstMem) + +NvBool instmemIsValid_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NvBool instmemIsValid(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_FALSE; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemIsValid(pGpu, pInstMem, offset) instmemIsValid_v03_00(pGpu, pInstMem, offset) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemIsValid_HAL(pGpu, pInstMem, offset) instmemIsValid(pGpu, pInstMem, offset) + +NvU32 instmemGenerateHashTableData_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 hClient, NvU32 offset, NvU32 dispChannelNum); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NvU32 instmemGenerateHashTableData(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 hClient, NvU32 offset, NvU32 dispChannelNum) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return 0; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemGenerateHashTableData(pGpu, pInstMem, hClient, offset, dispChannelNum) instmemGenerateHashTableData_v03_00(pGpu, pInstMem, hClient, offset, dispChannelNum) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemGenerateHashTableData_HAL(pGpu, pInstMem, hClient, offset, dispChannelNum) instmemGenerateHashTableData(pGpu, pInstMem, hClient, offset, dispChannelNum) + +NV_STATUS instmemHashFunc_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvHandle hClient, NvHandle hContextDma, NvU32 dispChannelNum, NvU32 *result); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemHashFunc(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvHandle hClient, NvHandle hContextDma, NvU32 dispChannelNum, NvU32 *result) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemHashFunc(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) instmemHashFunc_v03_00(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemHashFunc_HAL(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) instmemHashFunc(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) + +NV_STATUS instmemCommitContextDma_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemCommitContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemCommitContextDma(pGpu, pInstMem, pContextDma) instmemCommitContextDma_v03_00(pGpu, pInstMem, pContextDma) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemCommitContextDma_HAL(pGpu, pInstMem, pContextDma) instmemCommitContextDma(pGpu, pInstMem, pContextDma) + +static inline void instmemDecommitContextDma_b3696a(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) { + return; +} + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemDecommitContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemDecommitContextDma(pGpu, pInstMem, pContextDma) instmemDecommitContextDma_b3696a(pGpu, pInstMem, pContextDma) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemDecommitContextDma_HAL(pGpu, pInstMem, pContextDma) instmemDecommitContextDma(pGpu, pInstMem, pContextDma) + +NV_STATUS instmemUpdateContextDma_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, NvU64 *pNewAddress, NvU64 *pNewLimit, NvHandle hMemory, NvU32 comprInfo); + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemUpdateContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, NvU64 *pNewAddress, NvU64 *pNewLimit, NvHandle hMemory, NvU32 comprInfo) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemUpdateContextDma(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) instmemUpdateContextDma_v03_00(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemUpdateContextDma_HAL(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) instmemUpdateContextDma(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) + +NV_STATUS instmemConstruct_IMPL(struct DisplayInstanceMemory *arg_pInstMem); +#define __nvoc_instmemConstruct(arg_pInstMem) instmemConstruct_IMPL(arg_pInstMem) +void instmemDestruct_IMPL(struct DisplayInstanceMemory *pInstMem); +#define __nvoc_instmemDestruct(pInstMem) instmemDestruct_IMPL(pInstMem) +NV_STATUS instmemStateInitLocked_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemStateInitLocked(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemStateInitLocked(pGpu, pInstMem) instmemStateInitLocked_IMPL(pGpu, pInstMem) +#endif //__nvoc_disp_inst_mem_h_disabled + +void instmemStateDestroy_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemStateDestroy(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemStateDestroy(pGpu, pInstMem) instmemStateDestroy_IMPL(pGpu, pInstMem) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemStateLoad_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemStateLoad(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemStateLoad(pGpu, pInstMem, flags) instmemStateLoad_IMPL(pGpu, pInstMem, flags) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemStateUnload_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemStateUnload(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemStateUnload(pGpu, pInstMem, flags) instmemStateUnload_IMPL(pGpu, pInstMem, flags) +#endif //__nvoc_disp_inst_mem_h_disabled + +void instmemSetMemory_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NV_ADDRESS_SPACE dispInstMemAddrSpace, NvU32 dispInstMemAttr, NvU64 dispInstMemBase, NvU32 dispInstMemSize); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemSetMemory(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NV_ADDRESS_SPACE dispInstMemAddrSpace, NvU32 dispInstMemAttr, NvU64 dispInstMemBase, NvU32 dispInstMemSize) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemSetMemory(pGpu, pInstMem, dispInstMemAddrSpace, dispInstMemAttr, dispInstMemBase, dispInstMemSize) instmemSetMemory_IMPL(pGpu, pInstMem, dispInstMemAddrSpace, dispInstMemAttr, dispInstMemBase, dispInstMemSize) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemBindContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemBindContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemBindContextDma(pGpu, pInstMem, pContextDma, pDispChannel) instmemBindContextDma_IMPL(pGpu, pInstMem, pContextDma, pDispChannel) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemUnbindContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemUnbindContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemUnbindContextDma(pGpu, pInstMem, pContextDma, pDispChannel) instmemUnbindContextDma_IMPL(pGpu, pInstMem, pContextDma, pDispChannel) +#endif //__nvoc_disp_inst_mem_h_disabled + +void instmemUnbindContextDmaFromAllChannels_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemUnbindContextDmaFromAllChannels(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemUnbindContextDmaFromAllChannels(pGpu, pInstMem, pContextDma) instmemUnbindContextDmaFromAllChannels_IMPL(pGpu, pInstMem, pContextDma) +#endif //__nvoc_disp_inst_mem_h_disabled + +void instmemUnbindDispChannelContextDmas_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct DispChannel *pDispChannel); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemUnbindDispChannelContextDmas(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemUnbindDispChannelContextDmas(pGpu, pInstMem, pDispChannel) instmemUnbindDispChannelContextDmas_IMPL(pGpu, pInstMem, pDispChannel) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemReserveContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *offset); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemReserveContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *offset) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemReserveContextDma(pGpu, pInstMem, offset) instmemReserveContextDma_IMPL(pGpu, pInstMem, offset) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemFreeContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemFreeContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemFreeContextDma(pGpu, pInstMem, offset) instmemFreeContextDma_IMPL(pGpu, pInstMem, offset) +#endif //__nvoc_disp_inst_mem_h_disabled + +#undef PRIVATE_FIELD + + +#endif // DISPLAY_INSTANCE_MEMORY_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DISP_INST_MEM_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.c new file mode 100644 index 0000000..70d76dd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.c @@ -0,0 +1,4087 @@ +#define NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_objs_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xe9980c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_DisplayApi(DisplayApi*, RmHalspecOwner* ); +void __nvoc_init_funcTable_DisplayApi(DisplayApi*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DisplayApi(DisplayApi*, RmHalspecOwner* ); +void __nvoc_dtor_DisplayApi(DisplayApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DisplayApi; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_DisplayApi = { + /*pClassDef=*/ &__nvoc_class_def_DisplayApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DisplayApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayApi, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayApi, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DisplayApi = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_DisplayApi_DisplayApi, + &__nvoc_rtti_DisplayApi_Notifier, + &__nvoc_rtti_DisplayApi_INotifier, + &__nvoc_rtti_DisplayApi_RmResource, + &__nvoc_rtti_DisplayApi_RmResourceCommon, + &__nvoc_rtti_DisplayApi_RsResource, + &__nvoc_rtti_DisplayApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DisplayApi), + /*classId=*/ classId(DisplayApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DisplayApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DisplayApi, + /*pCastInfo=*/ &__nvoc_castinfo_DisplayApi, + /*pExportInfo=*/ &__nvoc_export_info_DisplayApi +}; + +static NV_STATUS __nvoc_thunk_DisplayApi_resControl(struct RsResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *)pDisplayApi) - __nvoc_rtti_DisplayApi_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_rmresControl_Prologue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *)pDisplayApi) - __nvoc_rtti_DisplayApi_RmResource.offset), pCallContext, pRsParams); +} + +static void __nvoc_thunk_DisplayApi_rmresControl_Epilogue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *)pDisplayApi) - __nvoc_rtti_DisplayApi_RmResource.offset), pCallContext, pRsParams); +} + +static NvBool __nvoc_thunk_RmResource_dispapiShareCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispapiCheckMemInterUnmap(struct DisplayApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DisplayApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_RmResource_dispapiAccessCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispapiGetMemInterMapParams(struct DisplayApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DisplayApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispapiGetMemoryMappingDescriptor(struct DisplayApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DisplayApi_RmResource.offset), ppMemDesc); +} + +static void __nvoc_thunk_Notifier_dispapiSetNotificationShare(struct DisplayApi *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DisplayApi_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispapiControlFilter(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_dispapiGetRefCount(struct DisplayApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispapiUnregisterEvent(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DisplayApi_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispapiUnmap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pCallContext, pCpuMapping); +} + +static NvBool __nvoc_thunk_RsResource_dispapiCanCopy(struct DisplayApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispapiMapTo(struct DisplayApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_dispapiAddAdditionalDependants(struct RsClient *pClient, struct DisplayApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pReference); +} + +static void __nvoc_thunk_RsResource_dispapiPreDestruct(struct DisplayApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispapiUnmapFrom(struct DisplayApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pParams); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispapiGetNotificationListPtr(struct DisplayApi *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DisplayApi_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispapiGetNotificationShare(struct DisplayApi *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DisplayApi_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispapiControlLookup(struct DisplayApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispapiMap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispapiGetOrAllocNotifShare(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DisplayApi_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DisplayApi = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_DisplayApi(DisplayApi *pThis) { + __nvoc_dispapiDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DisplayApi(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DisplayApi_fail_RmResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_DisplayApi_fail_Notifier; + __nvoc_init_dataField_DisplayApi(pThis, pRmhalspecowner); + + status = __nvoc_dispapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DisplayApi_fail__init; + goto __nvoc_ctor_DisplayApi_exit; // Success + +__nvoc_ctor_DisplayApi_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_DisplayApi_fail_Notifier: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_DisplayApi_fail_RmResource: +__nvoc_ctor_DisplayApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_DisplayApi_1(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__dispapiControl__ = &dispapiControl_IMPL; + + pThis->__dispapiControl_Prologue__ = &dispapiControl_Prologue_IMPL; + + pThis->__dispapiControl_Epilogue__ = &dispapiControl_Epilogue_IMPL; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resControl__ = &__nvoc_thunk_DisplayApi_resControl; + + pThis->__nvoc_base_RmResource.__rmresControl_Prologue__ = &__nvoc_thunk_DisplayApi_rmresControl_Prologue; + + pThis->__nvoc_base_RmResource.__rmresControl_Epilogue__ = &__nvoc_thunk_DisplayApi_rmresControl_Epilogue; + + pThis->__dispapiShareCallback__ = &__nvoc_thunk_RmResource_dispapiShareCallback; + + pThis->__dispapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispapiCheckMemInterUnmap; + + pThis->__dispapiAccessCallback__ = &__nvoc_thunk_RmResource_dispapiAccessCallback; + + pThis->__dispapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispapiGetMemInterMapParams; + + pThis->__dispapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispapiGetMemoryMappingDescriptor; + + pThis->__dispapiSetNotificationShare__ = &__nvoc_thunk_Notifier_dispapiSetNotificationShare; + + pThis->__dispapiControlFilter__ = &__nvoc_thunk_RsResource_dispapiControlFilter; + + pThis->__dispapiGetRefCount__ = &__nvoc_thunk_RsResource_dispapiGetRefCount; + + pThis->__dispapiUnregisterEvent__ = &__nvoc_thunk_Notifier_dispapiUnregisterEvent; + + pThis->__dispapiUnmap__ = &__nvoc_thunk_RsResource_dispapiUnmap; + + pThis->__dispapiCanCopy__ = &__nvoc_thunk_RsResource_dispapiCanCopy; + + pThis->__dispapiMapTo__ = &__nvoc_thunk_RsResource_dispapiMapTo; + + pThis->__dispapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispapiAddAdditionalDependants; + + pThis->__dispapiPreDestruct__ = &__nvoc_thunk_RsResource_dispapiPreDestruct; + + pThis->__dispapiUnmapFrom__ = &__nvoc_thunk_RsResource_dispapiUnmapFrom; + + pThis->__dispapiGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispapiGetNotificationListPtr; + + pThis->__dispapiGetNotificationShare__ = &__nvoc_thunk_Notifier_dispapiGetNotificationShare; + + pThis->__dispapiControlLookup__ = &__nvoc_thunk_RsResource_dispapiControlLookup; + + pThis->__dispapiMap__ = &__nvoc_thunk_RsResource_dispapiMap; + + pThis->__dispapiGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispapiGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_DisplayApi(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_DisplayApi_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_DisplayApi(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_DisplayApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_DisplayApi(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_DisplayApi(DisplayApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DisplayApi *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(DisplayApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DisplayApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DisplayApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_DisplayApi(pThis, pRmhalspecowner); + status = __nvoc_ctor_DisplayApi(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DisplayApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DisplayApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DisplayApi(DisplayApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DisplayApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x999839 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispObject; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +void __nvoc_init_DispObject(DispObject*, RmHalspecOwner* ); +void __nvoc_init_funcTable_DispObject(DispObject*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_DispObject(DispObject*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispObject(DispObject*, RmHalspecOwner* ); +void __nvoc_dtor_DispObject(DispObject*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispObject; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_DispObject = { + /*pClassDef=*/ &__nvoc_class_def_DispObject, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispObject, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_DisplayApi = { + /*pClassDef=*/ &__nvoc_class_def_DisplayApi, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispObject = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_DispObject_DispObject, + &__nvoc_rtti_DispObject_DisplayApi, + &__nvoc_rtti_DispObject_Notifier, + &__nvoc_rtti_DispObject_INotifier, + &__nvoc_rtti_DispObject_RmResource, + &__nvoc_rtti_DispObject_RmResourceCommon, + &__nvoc_rtti_DispObject_RsResource, + &__nvoc_rtti_DispObject_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispObject = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispObject), + /*classId=*/ classId(DispObject), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispObject", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispObject, + /*pCastInfo=*/ &__nvoc_castinfo_DispObject, + /*pExportInfo=*/ &__nvoc_export_info_DispObject +}; + +static NvBool __nvoc_thunk_RmResource_dispobjShareCallback(struct DispObject *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_dispobjControl(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispObject_DisplayApi.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RmResource_dispobjAccessCallback(struct DispObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispobjGetMemInterMapParams(struct DispObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispObject_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispobjGetMemoryMappingDescriptor(struct DispObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispObject_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispobjCheckMemInterUnmap(struct DispObject *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispObject_RmResource.offset), bSubdeviceHandleProvided); +} + +static void __nvoc_thunk_Notifier_dispobjSetNotificationShare(struct DispObject *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispObject_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispobjControlFilter(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_dispobjGetRefCount(struct DispObject *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispobjUnregisterEvent(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispObject_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispobjUnmap(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_dispobjControl_Prologue(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispObject_DisplayApi.offset), pCallContext, pRsParams); +} + +static NvBool __nvoc_thunk_RsResource_dispobjCanCopy(struct DispObject *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispobjMapTo(struct DispObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_dispobjAddAdditionalDependants(struct RsClient *pClient, struct DispObject *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pReference); +} + +static void __nvoc_thunk_RsResource_dispobjPreDestruct(struct DispObject *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispobjUnmapFrom(struct DispObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pParams); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispobjGetNotificationListPtr(struct DispObject *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispObject_Notifier.offset)); +} + +static void __nvoc_thunk_DisplayApi_dispobjControl_Epilogue(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispObject_DisplayApi.offset), pCallContext, pRsParams); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispobjGetNotificationShare(struct DispObject *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispObject_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispobjControlLookup(struct DispObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispobjMap(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispobjGetOrAllocNotifShare(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispObject_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_DispObject[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetRmFreeFlags_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700117u, + /*paramSize=*/ sizeof(NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetRmFreeFlags" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdIMPSetGetParameter_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700118u, + /*paramSize=*/ sizeof(NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdIMPSetGetParameter" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700202u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgStatus" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgUnderflowProp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700203u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgUnderflowProp" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetRgUnderflowProp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700204u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetRgUnderflowProp" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgFliplockProp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700205u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgFliplockProp" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetRgFliplockProp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700206u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetRgFliplockProp" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgConnectedLockpin_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700207u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgConnectedLockpin" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgConnectedLockpinStateless_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x5070020au, + /*paramSize=*/ sizeof(NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgConnectedLockpinStateless" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgScanLine_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x5070020cu, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgScanLine" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetSorSeqCtl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700301u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetSorSeqCtl" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetSorSeqCtl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700302u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetSorSeqCtl" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSeqProgSpeed_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700305u, + /*paramSize=*/ sizeof(NV5070_CTRL_SEQ_PROG_SPEED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSeqProgSpeed" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetSorPwm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700420u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetSorPwm" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetSorPwm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700421u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetSorPwm" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetSorOpMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700422u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetSorOpMode" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetSorOpMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700423u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetSorOpMode" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetSorFlushMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700457u, + /*paramSize=*/ sizeof(NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetSorFlushMode" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSystemGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700709u, + /*paramSize=*/ sizeof(NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSystemGetCapsV2" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdEventSetTrigger_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700902u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdEventSetTrigger" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispObject = +{ + /*numEntries=*/ 20, + /*pExportEntries=*/ __nvoc_exported_method_def_DispObject +}; + +void __nvoc_dtor_DisplayApi(DisplayApi*); +void __nvoc_dtor_DispObject(DispObject *pThis) { + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispObject(DispObject *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispObject(DispObject *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispObject_fail_DisplayApi; + __nvoc_init_dataField_DispObject(pThis, pRmhalspecowner); + + status = __nvoc_dispobjConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispObject_fail__init; + goto __nvoc_ctor_DispObject_exit; // Success + +__nvoc_ctor_DispObject_fail__init: + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); +__nvoc_ctor_DispObject_fail_DisplayApi: +__nvoc_ctor_DispObject_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispObject_1(DispObject *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispobjCtrlCmdSetRmFreeFlags__ = &dispobjCtrlCmdSetRmFreeFlags_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdIMPSetGetParameter__ = &dispobjCtrlCmdIMPSetGetParameter_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispobjCtrlCmdGetRgStatus__ = &dispobjCtrlCmdGetRgStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetRgUnderflowProp__ = &dispobjCtrlCmdGetRgUnderflowProp_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSetRgUnderflowProp__ = &dispobjCtrlCmdSetRgUnderflowProp_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetRgFliplockProp__ = &dispobjCtrlCmdGetRgFliplockProp_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispobjCtrlCmdSetRgFliplockProp__ = &dispobjCtrlCmdSetRgFliplockProp_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetRgConnectedLockpin__ = &dispobjCtrlCmdGetRgConnectedLockpin_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispobjCtrlCmdGetRgConnectedLockpinStateless__ = &dispobjCtrlCmdGetRgConnectedLockpinStateless_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetRgScanLine__ = &dispobjCtrlCmdGetRgScanLine_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetSorSeqCtl__ = &dispobjCtrlCmdGetSorSeqCtl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSetSorSeqCtl__ = &dispobjCtrlCmdSetSorSeqCtl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSeqProgSpeed__ = &dispobjCtrlCmdSeqProgSpeed_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispobjCtrlCmdGetSorPwm__ = &dispobjCtrlCmdGetSorPwm_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispobjCtrlCmdSetSorPwm__ = &dispobjCtrlCmdSetSorPwm_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispobjCtrlCmdGetSorOpMode__ = &dispobjCtrlCmdGetSorOpMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispobjCtrlCmdSetSorOpMode__ = &dispobjCtrlCmdSetSorOpMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispobjCtrlCmdSetSorFlushMode__ = &dispobjCtrlCmdSetSorFlushMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSystemGetCapsV2__ = &dispobjCtrlCmdSystemGetCapsV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__dispobjCtrlCmdEventSetTrigger__ = &dispobjCtrlCmdEventSetTrigger_IMPL; +#endif + + pThis->__dispobjShareCallback__ = &__nvoc_thunk_RmResource_dispobjShareCallback; + + pThis->__dispobjControl__ = &__nvoc_thunk_DisplayApi_dispobjControl; + + pThis->__dispobjAccessCallback__ = &__nvoc_thunk_RmResource_dispobjAccessCallback; + + pThis->__dispobjGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispobjGetMemInterMapParams; + + pThis->__dispobjGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispobjGetMemoryMappingDescriptor; + + pThis->__dispobjCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispobjCheckMemInterUnmap; + + pThis->__dispobjSetNotificationShare__ = &__nvoc_thunk_Notifier_dispobjSetNotificationShare; + + pThis->__dispobjControlFilter__ = &__nvoc_thunk_RsResource_dispobjControlFilter; + + pThis->__dispobjGetRefCount__ = &__nvoc_thunk_RsResource_dispobjGetRefCount; + + pThis->__dispobjUnregisterEvent__ = &__nvoc_thunk_Notifier_dispobjUnregisterEvent; + + pThis->__dispobjUnmap__ = &__nvoc_thunk_RsResource_dispobjUnmap; + + pThis->__dispobjControl_Prologue__ = &__nvoc_thunk_DisplayApi_dispobjControl_Prologue; + + pThis->__dispobjCanCopy__ = &__nvoc_thunk_RsResource_dispobjCanCopy; + + pThis->__dispobjMapTo__ = &__nvoc_thunk_RsResource_dispobjMapTo; + + pThis->__dispobjAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispobjAddAdditionalDependants; + + pThis->__dispobjPreDestruct__ = &__nvoc_thunk_RsResource_dispobjPreDestruct; + + pThis->__dispobjUnmapFrom__ = &__nvoc_thunk_RsResource_dispobjUnmapFrom; + + pThis->__dispobjGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispobjGetNotificationListPtr; + + pThis->__dispobjControl_Epilogue__ = &__nvoc_thunk_DisplayApi_dispobjControl_Epilogue; + + pThis->__dispobjGetNotificationShare__ = &__nvoc_thunk_Notifier_dispobjGetNotificationShare; + + pThis->__dispobjControlLookup__ = &__nvoc_thunk_RsResource_dispobjControlLookup; + + pThis->__dispobjMap__ = &__nvoc_thunk_RsResource_dispobjMap; + + pThis->__dispobjGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispobjGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_DispObject(DispObject *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_DispObject_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_DisplayApi(DisplayApi*, RmHalspecOwner* ); +void __nvoc_init_DispObject(DispObject *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_DispObject = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier; + pThis->__nvoc_pbase_DisplayApi = &pThis->__nvoc_base_DisplayApi; + __nvoc_init_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner); + __nvoc_init_funcTable_DispObject(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_DispObject(DispObject **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispObject *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(DispObject)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispObject)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispObject); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_DispObject(pThis, pRmhalspecowner); + status = __nvoc_ctor_DispObject(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispObject_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispObject_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispObject(DispObject **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispObject(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x36aa0b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvDispApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispObject; + +void __nvoc_init_NvDispApi(NvDispApi*, RmHalspecOwner* ); +void __nvoc_init_funcTable_NvDispApi(NvDispApi*); +NV_STATUS __nvoc_ctor_NvDispApi(NvDispApi*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_NvDispApi(NvDispApi*); +void __nvoc_dtor_NvDispApi(NvDispApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_NvDispApi; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_NvDispApi = { + /*pClassDef=*/ &__nvoc_class_def_NvDispApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_NvDispApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_DisplayApi = { + /*pClassDef=*/ &__nvoc_class_def_DisplayApi, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_DispObject = { + /*pClassDef=*/ &__nvoc_class_def_DispObject, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_NvDispApi = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_NvDispApi_NvDispApi, + &__nvoc_rtti_NvDispApi_DispObject, + &__nvoc_rtti_NvDispApi_DisplayApi, + &__nvoc_rtti_NvDispApi_Notifier, + &__nvoc_rtti_NvDispApi_INotifier, + &__nvoc_rtti_NvDispApi_RmResource, + &__nvoc_rtti_NvDispApi_RmResourceCommon, + &__nvoc_rtti_NvDispApi_RsResource, + &__nvoc_rtti_NvDispApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_NvDispApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(NvDispApi), + /*classId=*/ classId(NvDispApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "NvDispApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_NvDispApi, + /*pCastInfo=*/ &__nvoc_castinfo_NvDispApi, + /*pExportInfo=*/ &__nvoc_export_info_NvDispApi +}; + +static NvBool __nvoc_thunk_RmResource_nvdispapiShareCallback(struct NvDispApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_nvdispapiControl(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_NvDispApi_DisplayApi.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RmResource_nvdispapiAccessCallback(struct NvDispApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RmResource_nvdispapiGetMemInterMapParams(struct NvDispApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_NvDispApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_nvdispapiGetMemoryMappingDescriptor(struct NvDispApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_NvDispApi_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RmResource_nvdispapiCheckMemInterUnmap(struct NvDispApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_NvDispApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static void __nvoc_thunk_Notifier_nvdispapiSetNotificationShare(struct NvDispApi *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvDispApi_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdispapiControlFilter(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_nvdispapiGetRefCount(struct NvDispApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_nvdispapiUnregisterEvent(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvDispApi_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdispapiUnmap(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_nvdispapiControl_Prologue(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_NvDispApi_DisplayApi.offset), pCallContext, pRsParams); +} + +static NvBool __nvoc_thunk_RsResource_nvdispapiCanCopy(struct NvDispApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdispapiMapTo(struct NvDispApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_nvdispapiAddAdditionalDependants(struct RsClient *pClient, struct NvDispApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pReference); +} + +static void __nvoc_thunk_RsResource_nvdispapiPreDestruct(struct NvDispApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdispapiUnmapFrom(struct NvDispApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pParams); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_nvdispapiGetNotificationListPtr(struct NvDispApi *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvDispApi_Notifier.offset)); +} + +static void __nvoc_thunk_DisplayApi_nvdispapiControl_Epilogue(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_NvDispApi_DisplayApi.offset), pCallContext, pRsParams); +} + +static struct NotifShare *__nvoc_thunk_Notifier_nvdispapiGetNotificationShare(struct NvDispApi *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvDispApi_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdispapiControlLookup(struct NvDispApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdispapiMap(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_nvdispapiGetOrAllocNotifShare(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvDispApi_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_NvDispApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdIdleChannel_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700101u, + /*paramSize=*/ sizeof(NVC370_CTRL_IDLE_CHANNEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdIdleChannel" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdSetAccl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700102u, + /*paramSize=*/ sizeof(NVC370_CTRL_SET_ACCL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdSetAccl" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdGetAccl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700103u, + /*paramSize=*/ sizeof(NVC370_CTRL_GET_ACCL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdGetAccl" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdGetChannelInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700104u, + /*paramSize=*/ sizeof(NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdGetChannelInfo" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdGetLockpinsCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700201u, + /*paramSize=*/ sizeof(NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdGetLockpinsCaps" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdSetSwaprdyGpioWar_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700202u, + /*paramSize=*/ sizeof(NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdSetSwaprdyGpioWar" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700602u, + /*paramSize=*/ sizeof(NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_NvDispApi = +{ + /*numEntries=*/ 7, + /*pExportEntries=*/ __nvoc_exported_method_def_NvDispApi +}; + +void __nvoc_dtor_DispObject(DispObject*); +void __nvoc_dtor_NvDispApi(NvDispApi *pThis) { + __nvoc_dtor_DispObject(&pThis->__nvoc_base_DispObject); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_NvDispApi(NvDispApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DispObject(DispObject* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_NvDispApi(NvDispApi *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DispObject(&pThis->__nvoc_base_DispObject, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_NvDispApi_fail_DispObject; + __nvoc_init_dataField_NvDispApi(pThis); + + status = __nvoc_nvdispapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_NvDispApi_fail__init; + goto __nvoc_ctor_NvDispApi_exit; // Success + +__nvoc_ctor_NvDispApi_fail__init: + __nvoc_dtor_DispObject(&pThis->__nvoc_base_DispObject); +__nvoc_ctor_NvDispApi_fail_DispObject: +__nvoc_ctor_NvDispApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_NvDispApi_1(NvDispApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__nvdispapiCtrlCmdIdleChannel__ = &nvdispapiCtrlCmdIdleChannel_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__nvdispapiCtrlCmdSetAccl__ = &nvdispapiCtrlCmdSetAccl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__nvdispapiCtrlCmdGetAccl__ = &nvdispapiCtrlCmdGetAccl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__nvdispapiCtrlCmdGetChannelInfo__ = &nvdispapiCtrlCmdGetChannelInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__nvdispapiCtrlCmdSetSwaprdyGpioWar__ = &nvdispapiCtrlCmdSetSwaprdyGpioWar_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__nvdispapiCtrlCmdGetLockpinsCaps__ = &nvdispapiCtrlCmdGetLockpinsCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides__ = &nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_IMPL; +#endif + + pThis->__nvdispapiShareCallback__ = &__nvoc_thunk_RmResource_nvdispapiShareCallback; + + pThis->__nvdispapiControl__ = &__nvoc_thunk_DisplayApi_nvdispapiControl; + + pThis->__nvdispapiAccessCallback__ = &__nvoc_thunk_RmResource_nvdispapiAccessCallback; + + pThis->__nvdispapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_nvdispapiGetMemInterMapParams; + + pThis->__nvdispapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_nvdispapiGetMemoryMappingDescriptor; + + pThis->__nvdispapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_nvdispapiCheckMemInterUnmap; + + pThis->__nvdispapiSetNotificationShare__ = &__nvoc_thunk_Notifier_nvdispapiSetNotificationShare; + + pThis->__nvdispapiControlFilter__ = &__nvoc_thunk_RsResource_nvdispapiControlFilter; + + pThis->__nvdispapiGetRefCount__ = &__nvoc_thunk_RsResource_nvdispapiGetRefCount; + + pThis->__nvdispapiUnregisterEvent__ = &__nvoc_thunk_Notifier_nvdispapiUnregisterEvent; + + pThis->__nvdispapiUnmap__ = &__nvoc_thunk_RsResource_nvdispapiUnmap; + + pThis->__nvdispapiControl_Prologue__ = &__nvoc_thunk_DisplayApi_nvdispapiControl_Prologue; + + pThis->__nvdispapiCanCopy__ = &__nvoc_thunk_RsResource_nvdispapiCanCopy; + + pThis->__nvdispapiMapTo__ = &__nvoc_thunk_RsResource_nvdispapiMapTo; + + pThis->__nvdispapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_nvdispapiAddAdditionalDependants; + + pThis->__nvdispapiPreDestruct__ = &__nvoc_thunk_RsResource_nvdispapiPreDestruct; + + pThis->__nvdispapiUnmapFrom__ = &__nvoc_thunk_RsResource_nvdispapiUnmapFrom; + + pThis->__nvdispapiGetNotificationListPtr__ = &__nvoc_thunk_Notifier_nvdispapiGetNotificationListPtr; + + pThis->__nvdispapiControl_Epilogue__ = &__nvoc_thunk_DisplayApi_nvdispapiControl_Epilogue; + + pThis->__nvdispapiGetNotificationShare__ = &__nvoc_thunk_Notifier_nvdispapiGetNotificationShare; + + pThis->__nvdispapiControlLookup__ = &__nvoc_thunk_RsResource_nvdispapiControlLookup; + + pThis->__nvdispapiMap__ = &__nvoc_thunk_RsResource_nvdispapiMap; + + pThis->__nvdispapiGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_nvdispapiGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_NvDispApi(NvDispApi *pThis) { + __nvoc_init_funcTable_NvDispApi_1(pThis); +} + +void __nvoc_init_DispObject(DispObject*, RmHalspecOwner* ); +void __nvoc_init_NvDispApi(NvDispApi *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_NvDispApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier; + pThis->__nvoc_pbase_DisplayApi = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi; + pThis->__nvoc_pbase_DispObject = &pThis->__nvoc_base_DispObject; + __nvoc_init_DispObject(&pThis->__nvoc_base_DispObject, pRmhalspecowner); + __nvoc_init_funcTable_NvDispApi(pThis); +} + +NV_STATUS __nvoc_objCreate_NvDispApi(NvDispApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + NvDispApi *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(NvDispApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(NvDispApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_NvDispApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_NvDispApi(pThis, pRmhalspecowner); + status = __nvoc_ctor_NvDispApi(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_NvDispApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_NvDispApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_NvDispApi(NvDispApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_NvDispApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x6aa5e2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSwObj; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +void __nvoc_init_DispSwObj(DispSwObj*, RmHalspecOwner* ); +void __nvoc_init_funcTable_DispSwObj(DispSwObj*); +NV_STATUS __nvoc_ctor_DispSwObj(DispSwObj*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispSwObj(DispSwObj*); +void __nvoc_dtor_DispSwObj(DispSwObj*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSwObj; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_DispSwObj = { + /*pClassDef=*/ &__nvoc_class_def_DispSwObj, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispSwObj, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_DisplayApi = { + /*pClassDef=*/ &__nvoc_class_def_DisplayApi, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispSwObj = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_DispSwObj_DispSwObj, + &__nvoc_rtti_DispSwObj_DisplayApi, + &__nvoc_rtti_DispSwObj_Notifier, + &__nvoc_rtti_DispSwObj_INotifier, + &__nvoc_rtti_DispSwObj_RmResource, + &__nvoc_rtti_DispSwObj_RmResourceCommon, + &__nvoc_rtti_DispSwObj_RsResource, + &__nvoc_rtti_DispSwObj_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispSwObj = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispSwObj), + /*classId=*/ classId(DispSwObj), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispSwObj", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispSwObj, + /*pCastInfo=*/ &__nvoc_castinfo_DispSwObj, + /*pExportInfo=*/ &__nvoc_export_info_DispSwObj +}; + +static NvBool __nvoc_thunk_RmResource_dispswobjShareCallback(struct DispSwObj *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_dispswobjControl(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispSwObj_DisplayApi.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RmResource_dispswobjAccessCallback(struct DispSwObj *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispswobjGetMemInterMapParams(struct DispSwObj *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSwObj_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispswobjGetMemoryMappingDescriptor(struct DispSwObj *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSwObj_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispswobjCheckMemInterUnmap(struct DispSwObj *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSwObj_RmResource.offset), bSubdeviceHandleProvided); +} + +static void __nvoc_thunk_Notifier_dispswobjSetNotificationShare(struct DispSwObj *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObj_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswobjControlFilter(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_dispswobjGetRefCount(struct DispSwObj *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispswobjUnregisterEvent(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObj_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswobjUnmap(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_dispswobjControl_Prologue(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispSwObj_DisplayApi.offset), pCallContext, pRsParams); +} + +static NvBool __nvoc_thunk_RsResource_dispswobjCanCopy(struct DispSwObj *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswobjMapTo(struct DispSwObj *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_dispswobjAddAdditionalDependants(struct RsClient *pClient, struct DispSwObj *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pReference); +} + +static void __nvoc_thunk_RsResource_dispswobjPreDestruct(struct DispSwObj *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswobjUnmapFrom(struct DispSwObj *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pParams); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispswobjGetNotificationListPtr(struct DispSwObj *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObj_Notifier.offset)); +} + +static void __nvoc_thunk_DisplayApi_dispswobjControl_Epilogue(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispSwObj_DisplayApi.offset), pCallContext, pRsParams); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispswobjGetNotificationShare(struct DispSwObj *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObj_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswobjControlLookup(struct DispSwObj *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswobjMap(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispswobjGetOrAllocNotifShare(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObj_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_DispSwObj[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispswobjCtrlCmdIsModePossible_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3720101u, + /*paramSize=*/ sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispSwObj.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispswobjCtrlCmdIsModePossible" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispswobjCtrlCmdIsModePossibleOrSettings_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3720102u, + /*paramSize=*/ sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispSwObj.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispswobjCtrlCmdIsModePossibleOrSettings" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispswobjCtrlCmdVideoAdaptiveRefreshRate_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3720103u, + /*paramSize=*/ sizeof(NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispSwObj.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispswobjCtrlCmdVideoAdaptiveRefreshRate" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispswobjCtrlCmdGetActiveViewportPointIn_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*flags=*/ 0x211u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3720104u, + /*paramSize=*/ sizeof(NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispSwObj.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispswobjCtrlCmdGetActiveViewportPointIn" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSwObj = +{ + /*numEntries=*/ 4, + /*pExportEntries=*/ __nvoc_exported_method_def_DispSwObj +}; + +void __nvoc_dtor_DisplayApi(DisplayApi*); +void __nvoc_dtor_DispSwObj(DispSwObj *pThis) { + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispSwObj(DispSwObj *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispSwObj(DispSwObj *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispSwObj_fail_DisplayApi; + __nvoc_init_dataField_DispSwObj(pThis); + + status = __nvoc_dispswobjConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispSwObj_fail__init; + goto __nvoc_ctor_DispSwObj_exit; // Success + +__nvoc_ctor_DispSwObj_fail__init: + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); +__nvoc_ctor_DispSwObj_fail_DisplayApi: +__nvoc_ctor_DispSwObj_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispSwObj_1(DispSwObj *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispswobjCtrlCmdIsModePossible__ = &dispswobjCtrlCmdIsModePossible_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispswobjCtrlCmdIsModePossibleOrSettings__ = &dispswobjCtrlCmdIsModePossibleOrSettings_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispswobjCtrlCmdVideoAdaptiveRefreshRate__ = &dispswobjCtrlCmdVideoAdaptiveRefreshRate_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + pThis->__dispswobjCtrlCmdGetActiveViewportPointIn__ = &dispswobjCtrlCmdGetActiveViewportPointIn_IMPL; +#endif + + pThis->__dispswobjShareCallback__ = &__nvoc_thunk_RmResource_dispswobjShareCallback; + + pThis->__dispswobjControl__ = &__nvoc_thunk_DisplayApi_dispswobjControl; + + pThis->__dispswobjAccessCallback__ = &__nvoc_thunk_RmResource_dispswobjAccessCallback; + + pThis->__dispswobjGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispswobjGetMemInterMapParams; + + pThis->__dispswobjGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispswobjGetMemoryMappingDescriptor; + + pThis->__dispswobjCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispswobjCheckMemInterUnmap; + + pThis->__dispswobjSetNotificationShare__ = &__nvoc_thunk_Notifier_dispswobjSetNotificationShare; + + pThis->__dispswobjControlFilter__ = &__nvoc_thunk_RsResource_dispswobjControlFilter; + + pThis->__dispswobjGetRefCount__ = &__nvoc_thunk_RsResource_dispswobjGetRefCount; + + pThis->__dispswobjUnregisterEvent__ = &__nvoc_thunk_Notifier_dispswobjUnregisterEvent; + + pThis->__dispswobjUnmap__ = &__nvoc_thunk_RsResource_dispswobjUnmap; + + pThis->__dispswobjControl_Prologue__ = &__nvoc_thunk_DisplayApi_dispswobjControl_Prologue; + + pThis->__dispswobjCanCopy__ = &__nvoc_thunk_RsResource_dispswobjCanCopy; + + pThis->__dispswobjMapTo__ = &__nvoc_thunk_RsResource_dispswobjMapTo; + + pThis->__dispswobjAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispswobjAddAdditionalDependants; + + pThis->__dispswobjPreDestruct__ = &__nvoc_thunk_RsResource_dispswobjPreDestruct; + + pThis->__dispswobjUnmapFrom__ = &__nvoc_thunk_RsResource_dispswobjUnmapFrom; + + pThis->__dispswobjGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispswobjGetNotificationListPtr; + + pThis->__dispswobjControl_Epilogue__ = &__nvoc_thunk_DisplayApi_dispswobjControl_Epilogue; + + pThis->__dispswobjGetNotificationShare__ = &__nvoc_thunk_Notifier_dispswobjGetNotificationShare; + + pThis->__dispswobjControlLookup__ = &__nvoc_thunk_RsResource_dispswobjControlLookup; + + pThis->__dispswobjMap__ = &__nvoc_thunk_RsResource_dispswobjMap; + + pThis->__dispswobjGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispswobjGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_DispSwObj(DispSwObj *pThis) { + __nvoc_init_funcTable_DispSwObj_1(pThis); +} + +void __nvoc_init_DisplayApi(DisplayApi*, RmHalspecOwner* ); +void __nvoc_init_DispSwObj(DispSwObj *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_DispSwObj = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier; + pThis->__nvoc_pbase_DisplayApi = &pThis->__nvoc_base_DisplayApi; + __nvoc_init_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner); + __nvoc_init_funcTable_DispSwObj(pThis); +} + +NV_STATUS __nvoc_objCreate_DispSwObj(DispSwObj **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispSwObj *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(DispSwObj)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispSwObj)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispSwObj); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_DispSwObj(pThis, pRmhalspecowner); + status = __nvoc_ctor_DispSwObj(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispSwObj_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispSwObj_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispSwObj(DispSwObj **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispSwObj(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x41f4f2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +void __nvoc_init_DispCommon(DispCommon*, RmHalspecOwner* ); +void __nvoc_init_funcTable_DispCommon(DispCommon*); +NV_STATUS __nvoc_ctor_DispCommon(DispCommon*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispCommon(DispCommon*); +void __nvoc_dtor_DispCommon(DispCommon*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispCommon; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_DispCommon = { + /*pClassDef=*/ &__nvoc_class_def_DispCommon, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispCommon, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_DisplayApi = { + /*pClassDef=*/ &__nvoc_class_def_DisplayApi, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispCommon = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_DispCommon_DispCommon, + &__nvoc_rtti_DispCommon_DisplayApi, + &__nvoc_rtti_DispCommon_Notifier, + &__nvoc_rtti_DispCommon_INotifier, + &__nvoc_rtti_DispCommon_RmResource, + &__nvoc_rtti_DispCommon_RmResourceCommon, + &__nvoc_rtti_DispCommon_RsResource, + &__nvoc_rtti_DispCommon_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispCommon = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispCommon), + /*classId=*/ classId(DispCommon), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispCommon", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispCommon, + /*pCastInfo=*/ &__nvoc_castinfo_DispCommon, + /*pExportInfo=*/ &__nvoc_export_info_DispCommon +}; + +static NvBool __nvoc_thunk_RmResource_dispcmnShareCallback(struct DispCommon *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_dispcmnControl(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispCommon_DisplayApi.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RmResource_dispcmnAccessCallback(struct DispCommon *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcmnGetMemInterMapParams(struct DispCommon *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCommon_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcmnGetMemoryMappingDescriptor(struct DispCommon *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCommon_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcmnCheckMemInterUnmap(struct DispCommon *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCommon_RmResource.offset), bSubdeviceHandleProvided); +} + +static void __nvoc_thunk_Notifier_dispcmnSetNotificationShare(struct DispCommon *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispCommon_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcmnControlFilter(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_dispcmnGetRefCount(struct DispCommon *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispcmnUnregisterEvent(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispCommon_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcmnUnmap(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_dispcmnControl_Prologue(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispCommon_DisplayApi.offset), pCallContext, pRsParams); +} + +static NvBool __nvoc_thunk_RsResource_dispcmnCanCopy(struct DispCommon *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcmnMapTo(struct DispCommon *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_dispcmnAddAdditionalDependants(struct RsClient *pClient, struct DispCommon *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pReference); +} + +static void __nvoc_thunk_RsResource_dispcmnPreDestruct(struct DispCommon *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcmnUnmapFrom(struct DispCommon *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pParams); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispcmnGetNotificationListPtr(struct DispCommon *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispCommon_Notifier.offset)); +} + +static void __nvoc_thunk_DisplayApi_dispcmnControl_Epilogue(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispCommon_DisplayApi.offset), pCallContext, pRsParams); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispcmnGetNotificationShare(struct DispCommon *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispCommon_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcmnControlLookup(struct DispCommon *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcmnMap(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispcmnGetOrAllocNotifShare(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispCommon_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_DispCommon[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetNumHeads_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*flags=*/ 0x212u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730102u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetNumHeads" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetScanline_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730108u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetScanline" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetSuppported_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*flags=*/ 0x212u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730120u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetSuppported" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetConnectState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730122u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetConnectState" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetHotplugConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730123u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetHotplugConfig" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetHeadRoutingMap_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730125u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetHeadRoutingMap" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetActive_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730126u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetActive" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730138u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetCapsV2" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetBootDisplays_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730166u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetBootDisplays" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetHotplugUnplugState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73017bu, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetHotplugUnplugState" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemArmLightweightSupervisor_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73017eu, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemArmLightweightSupervisor" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemConfigVrrPstateSwitch_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730184u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemConfigVrrPstateSwitch" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730190u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemQueryDisplayIdsWithMux" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemAllocateDisplayBandwidth_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730196u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemAllocateDisplayBandwidth" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetHotplugEventConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730197u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetHotplugEventConfig" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemSetHotplugEventConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730198u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemSetHotplugEventConfig" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemCheckSidebandI2cSupport_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73019cu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemCheckSidebandI2cSupport" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetI2cPortid_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730211u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetI2cPortid" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetType_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*flags=*/ 0x212u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730240u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetType" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificFakeDevice_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730243u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificFakeDevice" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetEdidV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730245u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetEdidV2" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetEdidV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730246u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetEdidV2" +#endif + }, + { /* [22] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetConnectorData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730250u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetConnectorData" +#endif + }, + { /* [23] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetHdmiEnable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730273u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetHdmiEnable" +#endif + }, + { /* [24] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificCtrlHdmi_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730274u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificCtrlHdmi" +#endif + }, + { /* [25] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetAllHeadMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730287u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetAllHeadMask" +#endif + }, + { /* [26] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetOdPacket_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730288u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetOdPacket" +#endif + }, + { /* [27] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetOdPacketCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730289u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetOdPacketCtrl" +#endif + }, + { /* [28] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetPclkLimit_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73028au, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetPclkLimit" +#endif + }, + { /* [29] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificOrGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*flags=*/ 0x212u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73028bu, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificOrGetInfo" +#endif + }, + { /* [30] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetHdmiSinkCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730293u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetHdmiSinkCaps" +#endif + }, + { /* [31] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetMonitorPower_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730295u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetMonitorPower" +#endif + }, + { /* [32] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73029au, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig" +#endif + }, + { /* [33] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetRegionalCrcs_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a0u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetRegionalCrcs" +#endif + }, + { /* [34] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificApplyEdidOverrideV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a1u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificApplyEdidOverrideV2" +#endif + }, + { /* [35] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetHdmiGpuCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a2u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetHdmiGpuCaps" +#endif + }, + { /* [36] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetHdmiScdcData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a6u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetHdmiScdcData" +#endif + }, + { /* [37] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificIsDirectmodeDisplay_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a7u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificIsDirectmodeDisplay" +#endif + }, + { /* [38] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a8u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation" +#endif + }, + { /* [39] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetSharedGenericPacket_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a9u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetSharedGenericPacket" +#endif + }, + { /* [40] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302aau, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificAcquireSharedGenericPacket" +#endif + }, + { /* [41] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302abu, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificReleaseSharedGenericPacket" +#endif + }, + { /* [42] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificDispI2cReadWrite_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302acu, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificDispI2cReadWrite" +#endif + }, + { /* [43] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdInternalGetHotplugUnplugState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730401u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdInternalGetHotplugUnplugState" +#endif + }, + { /* [44] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*flags=*/ 0x212u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731140u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetInfo" +#endif + }, + { /* [45] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetDisplayportDongleInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731142u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetDisplayportDongleInfo" +#endif + }, + { /* [46] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpSetEldAudioCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731144u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpSetEldAudioCaps" +#endif + }, + { /* [47] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpUpdateDynamicDfpCache_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73114eu, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpUpdateDynamicDfpCache" +#endif + }, + { /* [48] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpSetAudioEnable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731150u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpSetAudioEnable" +#endif + }, + { /* [49] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpAssignSor_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731152u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpAssignSor" +#endif + }, + { /* [50] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetPadlinkMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731153u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetPadlinkMask" +#endif + }, + { /* [51] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpConfigTwoHeadOneOr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731156u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpConfigTwoHeadOneOr" +#endif + }, + { /* [52] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpDscCrcControl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731157u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpDscCrcControl" +#endif + }, + { /* [53] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpInitMuxData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731158u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpInitMuxData" +#endif + }, + { /* [54] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetDsiModeTiming_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731166u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetDsiModeTiming" +#endif + }, + { /* [55] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x206u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetFixedModeTiming_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x206u) + /*flags=*/ 0x206u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731172u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetFixedModeTiming" +#endif + }, + { /* [56] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpAuxchCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731341u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_AUXCH_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpAuxchCtrl" +#endif + }, + { /* [57] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpAuxchSetSema_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731342u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpAuxchSetSema" +#endif + }, + { /* [58] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731343u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpCtrl" +#endif + }, + { /* [59] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetLaneData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731345u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_LANE_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetLaneData" +#endif + }, + { /* [60] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetLaneData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731346u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_LANE_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetLaneData" +#endif + }, + { /* [61] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetTestpattern_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731347u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetTestpattern" +#endif + }, + { /* [62] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731351u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data" +#endif + }, + { /* [63] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731352u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data" +#endif + }, + { /* [64] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpMainLinkCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731356u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpMainLinkCtrl" +#endif + }, + { /* [65] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetAudioMuteStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731359u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetAudioMuteStream" +#endif + }, + { /* [66] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpTopologyAllocateDisplayId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73135bu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpTopologyAllocateDisplayId" +#endif + }, + { /* [67] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpTopologyFreeDisplayId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73135cu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpTopologyFreeDisplayId" +#endif + }, + { /* [68] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetLinkConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731360u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetLinkConfig" +#endif + }, + { /* [69] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetEDPData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731361u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_EDP_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetEDPData" +#endif + }, + { /* [70] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731362u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigStream" +#endif + }, + { /* [71] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetRateGov_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731363u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetRateGov" +#endif + }, + { /* [72] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetManualDisplayPort_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731365u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetManualDisplayPort" +#endif + }, + { /* [73] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSendACT_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731367u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSendACT" +#endif + }, + { /* [74] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*flags=*/ 0x212u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731369u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetCaps" +#endif + }, + { /* [75] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetMSAProperties_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136au, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetMSAProperties" +#endif + }, + { /* [76] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGenerateFakeInterrupt_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136bu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGenerateFakeInterrupt" +#endif + }, + { /* [77] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigRadScratchReg_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136cu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigRadScratchReg" +#endif + }, + { /* [78] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigSingleHeadMultiStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136eu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigSingleHeadMultiStream" +#endif + }, + { /* [79] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetTriggerSelect_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136fu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetTriggerSelect" +#endif + }, + { /* [80] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetTriggerAll_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731370u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetTriggerAll" +#endif + }, + { /* [81] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetAuxLogData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731373u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetAuxLogData" +#endif + }, + { /* [82] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigIndexedLinkRates_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731377u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigIndexedLinkRates" +#endif + }, + { /* [83] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetStereoMSAProperties_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731378u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetStereoMSAProperties" +#endif + }, + { /* [84] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigureFec_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137au, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigureFec" +#endif + }, + { /* [85] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigMacroPad_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137bu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigMacroPad" +#endif + }, + { /* [86] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetGenericInfoframe_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137eu, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetGenericInfoframe" +#endif + }, + { /* [87] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetMsaAttributes_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137fu, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetMsaAttributes" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispCommon = +{ + /*numEntries=*/ 88, + /*pExportEntries=*/ __nvoc_exported_method_def_DispCommon +}; + +void __nvoc_dtor_DisplayApi(DisplayApi*); +void __nvoc_dtor_DispCommon(DispCommon *pThis) { + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispCommon(DispCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispCommon(DispCommon *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispCommon_fail_DisplayApi; + __nvoc_init_dataField_DispCommon(pThis); + + status = __nvoc_dispcmnConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispCommon_fail__init; + goto __nvoc_ctor_DispCommon_exit; // Success + +__nvoc_ctor_DispCommon_fail__init: + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); +__nvoc_ctor_DispCommon_fail_DisplayApi: +__nvoc_ctor_DispCommon_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispCommon_1(DispCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetCapsV2__ = &dispcmnCtrlCmdSystemGetCapsV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + pThis->__dispcmnCtrlCmdSystemGetNumHeads__ = &dispcmnCtrlCmdSystemGetNumHeads_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetScanline__ = &dispcmnCtrlCmdSystemGetScanline_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + pThis->__dispcmnCtrlCmdSystemGetSuppported__ = &dispcmnCtrlCmdSystemGetSuppported_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetConnectState__ = &dispcmnCtrlCmdSystemGetConnectState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__dispcmnCtrlCmdSystemGetHotplugUnplugState__ = &dispcmnCtrlCmdSystemGetHotplugUnplugState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__dispcmnCtrlCmdInternalGetHotplugUnplugState__ = &dispcmnCtrlCmdInternalGetHotplugUnplugState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetHeadRoutingMap__ = &dispcmnCtrlCmdSystemGetHeadRoutingMap_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetActive__ = &dispcmnCtrlCmdSystemGetActive_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetBootDisplays__ = &dispcmnCtrlCmdSystemGetBootDisplays_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSystemQueryDisplayIdsWithMux__ = &dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSystemCheckSidebandI2cSupport__ = &dispcmnCtrlCmdSystemCheckSidebandI2cSupport_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__dispcmnCtrlCmdSystemAllocateDisplayBandwidth__ = &dispcmnCtrlCmdSystemAllocateDisplayBandwidth_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetHotplugConfig__ = &dispcmnCtrlCmdSystemGetHotplugConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetHotplugEventConfig__ = &dispcmnCtrlCmdSystemGetHotplugEventConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemSetHotplugEventConfig__ = &dispcmnCtrlCmdSystemSetHotplugEventConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSystemArmLightweightSupervisor__ = &dispcmnCtrlCmdSystemArmLightweightSupervisor_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSystemConfigVrrPstateSwitch__ = &dispcmnCtrlCmdSystemConfigVrrPstateSwitch_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + pThis->__dispcmnCtrlCmdSpecificGetType__ = &dispcmnCtrlCmdSpecificGetType_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSpecificGetEdidV2__ = &dispcmnCtrlCmdSpecificGetEdidV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSpecificSetEdidV2__ = &dispcmnCtrlCmdSpecificSetEdidV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSpecificFakeDevice__ = &dispcmnCtrlCmdSpecificFakeDevice_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSpecificGetConnectorData__ = &dispcmnCtrlCmdSpecificGetConnectorData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSpecificSetHdmiEnable__ = &dispcmnCtrlCmdSpecificSetHdmiEnable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSpecificCtrlHdmi__ = &dispcmnCtrlCmdSpecificCtrlHdmi_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSpecificGetAllHeadMask__ = &dispcmnCtrlCmdSpecificGetAllHeadMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSpecificSetOdPacket__ = &dispcmnCtrlCmdSpecificSetOdPacket_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSpecificAcquireSharedGenericPacket__ = &dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSpecificSetSharedGenericPacket__ = &dispcmnCtrlCmdSpecificSetSharedGenericPacket_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSpecificReleaseSharedGenericPacket__ = &dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificSetOdPacketCtrl__ = &dispcmnCtrlCmdSpecificSetOdPacketCtrl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + pThis->__dispcmnCtrlCmdSpecificOrGetInfo__ = &dispcmnCtrlCmdSpecificOrGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificGetPclkLimit__ = &dispcmnCtrlCmdSpecificGetPclkLimit_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSpecificSetHdmiSinkCaps__ = &dispcmnCtrlCmdSpecificSetHdmiSinkCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificSetMonitorPower__ = &dispcmnCtrlCmdSpecificSetMonitorPower_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig__ = &dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificApplyEdidOverrideV2__ = &dispcmnCtrlCmdSpecificApplyEdidOverrideV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSpecificGetI2cPortid__ = &dispcmnCtrlCmdSpecificGetI2cPortid_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificGetHdmiGpuCaps__ = &dispcmnCtrlCmdSpecificGetHdmiGpuCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSpecificGetHdmiScdcData__ = &dispcmnCtrlCmdSpecificGetHdmiScdcData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificIsDirectmodeDisplay__ = &dispcmnCtrlCmdSpecificIsDirectmodeDisplay_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation__ = &dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSpecificDispI2cReadWrite__ = &dispcmnCtrlCmdSpecificDispI2cReadWrite_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + pThis->__dispcmnCtrlCmdDfpGetInfo__ = &dispcmnCtrlCmdDfpGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpGetDisplayportDongleInfo__ = &dispcmnCtrlCmdDfpGetDisplayportDongleInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdDfpSetEldAudioCaps__ = &dispcmnCtrlCmdDfpSetEldAudioCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdDfpSetAudioEnable__ = &dispcmnCtrlCmdDfpSetAudioEnable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpUpdateDynamicDfpCache__ = &dispcmnCtrlCmdDfpUpdateDynamicDfpCache_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpAssignSor__ = &dispcmnCtrlCmdDfpAssignSor_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpDscCrcControl__ = &dispcmnCtrlCmdDfpDscCrcControl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdDfpInitMuxData__ = &dispcmnCtrlCmdDfpInitMuxData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpGetDsiModeTiming__ = &dispcmnCtrlCmdDfpGetDsiModeTiming_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpConfigTwoHeadOneOr__ = &dispcmnCtrlCmdDfpConfigTwoHeadOneOr_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpGetPadlinkMask__ = &dispcmnCtrlCmdDfpGetPadlinkMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x206u) + pThis->__dispcmnCtrlCmdDfpGetFixedModeTiming__ = &dispcmnCtrlCmdDfpGetFixedModeTiming_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpAuxchCtrl__ = &dispcmnCtrlCmdDpAuxchCtrl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpAuxchSetSema__ = &dispcmnCtrlCmdDpAuxchSetSema_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpCtrl__ = &dispcmnCtrlCmdDpCtrl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetLaneData__ = &dispcmnCtrlCmdDpGetLaneData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetLaneData__ = &dispcmnCtrlCmdDpSetLaneData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetTestpattern__ = &dispcmnCtrlCmdDpSetTestpattern_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpMainLinkCtrl__ = &dispcmnCtrlCmdDpMainLinkCtrl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdDpSetAudioMuteStream__ = &dispcmnCtrlCmdDpSetAudioMuteStream_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetLinkConfig__ = &dispcmnCtrlCmdDpGetLinkConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetEDPData__ = &dispcmnCtrlCmdDpGetEDPData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpTopologyAllocateDisplayId__ = &dispcmnCtrlCmdDpTopologyAllocateDisplayId_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpTopologyFreeDisplayId__ = &dispcmnCtrlCmdDpTopologyFreeDisplayId_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpConfigStream__ = &dispcmnCtrlCmdDpConfigStream_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpConfigSingleHeadMultiStream__ = &dispcmnCtrlCmdDpConfigSingleHeadMultiStream_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetRateGov__ = &dispcmnCtrlCmdDpSetRateGov_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSendACT__ = &dispcmnCtrlCmdDpSendACT_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetManualDisplayPort__ = &dispcmnCtrlCmdDpSetManualDisplayPort_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + pThis->__dispcmnCtrlCmdDpGetCaps__ = &dispcmnCtrlCmdDpGetCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetMSAProperties__ = &dispcmnCtrlCmdDpSetMSAProperties_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetStereoMSAProperties__ = &dispcmnCtrlCmdDpSetStereoMSAProperties_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__dispcmnCtrlCmdDpGenerateFakeInterrupt__ = &dispcmnCtrlCmdDpGenerateFakeInterrupt_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpConfigRadScratchReg__ = &dispcmnCtrlCmdDpConfigRadScratchReg_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetTriggerSelect__ = &dispcmnCtrlCmdDpSetTriggerSelect_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetTriggerAll__ = &dispcmnCtrlCmdDpSetTriggerAll_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetAuxLogData__ = &dispcmnCtrlCmdDpGetAuxLogData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpConfigIndexedLinkRates__ = &dispcmnCtrlCmdDpConfigIndexedLinkRates_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpConfigureFec__ = &dispcmnCtrlCmdDpConfigureFec_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetGenericInfoframe__ = &dispcmnCtrlCmdDpGetGenericInfoframe_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetMsaAttributes__ = &dispcmnCtrlCmdDpGetMsaAttributes_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpConfigMacroPad__ = &dispcmnCtrlCmdDpConfigMacroPad_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data__ = &dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data__ = &dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSpecificGetRegionalCrcs__ = &dispcmnCtrlCmdSpecificGetRegionalCrcs_IMPL; +#endif + + pThis->__dispcmnShareCallback__ = &__nvoc_thunk_RmResource_dispcmnShareCallback; + + pThis->__dispcmnControl__ = &__nvoc_thunk_DisplayApi_dispcmnControl; + + pThis->__dispcmnAccessCallback__ = &__nvoc_thunk_RmResource_dispcmnAccessCallback; + + pThis->__dispcmnGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispcmnGetMemInterMapParams; + + pThis->__dispcmnGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispcmnGetMemoryMappingDescriptor; + + pThis->__dispcmnCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispcmnCheckMemInterUnmap; + + pThis->__dispcmnSetNotificationShare__ = &__nvoc_thunk_Notifier_dispcmnSetNotificationShare; + + pThis->__dispcmnControlFilter__ = &__nvoc_thunk_RsResource_dispcmnControlFilter; + + pThis->__dispcmnGetRefCount__ = &__nvoc_thunk_RsResource_dispcmnGetRefCount; + + pThis->__dispcmnUnregisterEvent__ = &__nvoc_thunk_Notifier_dispcmnUnregisterEvent; + + pThis->__dispcmnUnmap__ = &__nvoc_thunk_RsResource_dispcmnUnmap; + + pThis->__dispcmnControl_Prologue__ = &__nvoc_thunk_DisplayApi_dispcmnControl_Prologue; + + pThis->__dispcmnCanCopy__ = &__nvoc_thunk_RsResource_dispcmnCanCopy; + + pThis->__dispcmnMapTo__ = &__nvoc_thunk_RsResource_dispcmnMapTo; + + pThis->__dispcmnAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispcmnAddAdditionalDependants; + + pThis->__dispcmnPreDestruct__ = &__nvoc_thunk_RsResource_dispcmnPreDestruct; + + pThis->__dispcmnUnmapFrom__ = &__nvoc_thunk_RsResource_dispcmnUnmapFrom; + + pThis->__dispcmnGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispcmnGetNotificationListPtr; + + pThis->__dispcmnControl_Epilogue__ = &__nvoc_thunk_DisplayApi_dispcmnControl_Epilogue; + + pThis->__dispcmnGetNotificationShare__ = &__nvoc_thunk_Notifier_dispcmnGetNotificationShare; + + pThis->__dispcmnControlLookup__ = &__nvoc_thunk_RsResource_dispcmnControlLookup; + + pThis->__dispcmnMap__ = &__nvoc_thunk_RsResource_dispcmnMap; + + pThis->__dispcmnGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispcmnGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_DispCommon(DispCommon *pThis) { + __nvoc_init_funcTable_DispCommon_1(pThis); +} + +void __nvoc_init_DisplayApi(DisplayApi*, RmHalspecOwner* ); +void __nvoc_init_DispCommon(DispCommon *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_DispCommon = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier; + pThis->__nvoc_pbase_DisplayApi = &pThis->__nvoc_base_DisplayApi; + __nvoc_init_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner); + __nvoc_init_funcTable_DispCommon(pThis); +} + +NV_STATUS __nvoc_objCreate_DispCommon(DispCommon **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispCommon *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(DispCommon)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispCommon)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispCommon); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_DispCommon(pThis, pRmhalspecowner); + status = __nvoc_ctor_DispCommon(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispCommon_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispCommon_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispCommon(DispCommon **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispCommon(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.h new file mode 100644 index 0000000..62baf22 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.h @@ -0,0 +1,2140 @@ +#ifndef _G_DISP_OBJS_NVOC_H_ +#define _G_DISP_OBJS_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing the display - both Disp and DispCommon +* entries with their insides (DispChannelList and DispDmaControlList) +* +******************************************************************************/ + +#include "g_disp_objs_nvoc.h" + +#ifndef DISP_OBJS_H +#define DISP_OBJS_H + +#include "rmapi/event.h" +#include "rmapi/resource.h" + +#include "gpu/gpu_halspec.h" + +/* + * On T234, RM is in kernel mode, so when RM is running in kernel mode it + * does not allow usermode clients like MODs to call control calls that are + * marked as KERNEL_PRIVILEGED. + * So defining new macro DISPLAY_PRIVILEGED(i.e PRIVILEGED) for Tegra and mark + * control calls needed by MODs with this so that MODs running as root can call + * these control calls. However keeping same privilege level for DGPUs which + * does not change the current behaviour. + */ +#define DISPLAY_PRIVILEGED PRIVILEGED + +#include "ctrl/ctrl0073.h" +#include "ctrl/ctrl5070/ctrl5070event.h" +#include "ctrl/ctrl5070/ctrl5070or.h" +#include "ctrl/ctrl5070/ctrl5070seq.h" +#include "ctrl/ctrl5070/ctrl5070system.h" +#include "ctrl/ctrlc370/ctrlc370chnc.h" +#include "ctrl/ctrlc370/ctrlc370event.h" +#include "ctrl/ctrlc370/ctrlc370rg.h" +#include "ctrl/ctrlc370/ctrlc370verif.h" +#include "ctrl/ctrlc372/ctrlc372base.h" +#include "ctrl/ctrlc372/ctrlc372chnc.h" + +// **************************************************************************** +// Type definitions +// **************************************************************************** + +struct OBJGPU; +struct Device; +struct Memory; +struct RsResource; +struct RmResource; +struct DispChannel; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + + + +#define DISPAPI_GET_GPU(pDispRes) staticCast(pDispRes, DisplayApi)->pGpuInRmctrl + +#define DISPAPI_GET_GPUGRP(pDispRes) staticCast(pDispRes, DisplayApi)->pGpuGrp + +/*! + * Base class for many of display's RsResource subclasses + */ +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DisplayApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DisplayApi *__nvoc_pbase_DisplayApi; + NV_STATUS (*__dispapiControl__)(struct DisplayApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispapiControl_Prologue__)(struct DisplayApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__dispapiControl_Epilogue__)(struct DisplayApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispapiShareCallback__)(struct DisplayApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispapiCheckMemInterUnmap__)(struct DisplayApi *, NvBool); + NvBool (*__dispapiAccessCallback__)(struct DisplayApi *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__dispapiGetMemInterMapParams__)(struct DisplayApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispapiGetMemoryMappingDescriptor__)(struct DisplayApi *, struct MEMORY_DESCRIPTOR **); + void (*__dispapiSetNotificationShare__)(struct DisplayApi *, struct NotifShare *); + NV_STATUS (*__dispapiControlFilter__)(struct DisplayApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__dispapiGetRefCount__)(struct DisplayApi *); + NV_STATUS (*__dispapiUnregisterEvent__)(struct DisplayApi *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__dispapiUnmap__)(struct DisplayApi *, struct CALL_CONTEXT *, RsCpuMapping *); + NvBool (*__dispapiCanCopy__)(struct DisplayApi *); + NV_STATUS (*__dispapiMapTo__)(struct DisplayApi *, RS_RES_MAP_TO_PARAMS *); + void (*__dispapiAddAdditionalDependants__)(struct RsClient *, struct DisplayApi *, RsResourceRef *); + void (*__dispapiPreDestruct__)(struct DisplayApi *); + NV_STATUS (*__dispapiUnmapFrom__)(struct DisplayApi *, RS_RES_UNMAP_FROM_PARAMS *); + PEVENTNOTIFICATION *(*__dispapiGetNotificationListPtr__)(struct DisplayApi *); + struct NotifShare *(*__dispapiGetNotificationShare__)(struct DisplayApi *); + NV_STATUS (*__dispapiControlLookup__)(struct DisplayApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__dispapiMap__)(struct DisplayApi *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__dispapiGetOrAllocNotifShare__)(struct DisplayApi *, NvHandle, NvHandle, struct NotifShare **); + struct OBJGPU *pGpuInRmctrl; + struct OBJGPUGRP *pGpuGrp; + NvBool bBcResource; + NvU32 *pNotifyActions[8]; + NvU32 numNotifiers; + NvHandle hNotifierMemory; + struct Memory *pNotifierMemory; +}; + +#ifndef __NVOC_CLASS_DisplayApi_TYPEDEF__ +#define __NVOC_CLASS_DisplayApi_TYPEDEF__ +typedef struct DisplayApi DisplayApi; +#endif /* __NVOC_CLASS_DisplayApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DisplayApi +#define __nvoc_class_id_DisplayApi 0xe9980c +#endif /* __nvoc_class_id_DisplayApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +#define __staticCast_DisplayApi(pThis) \ + ((pThis)->__nvoc_pbase_DisplayApi) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_DisplayApi(pThis) ((DisplayApi*)NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_DisplayApi(pThis) \ + ((DisplayApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DisplayApi))) +#endif //__nvoc_disp_objs_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DisplayApi(DisplayApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DisplayApi(DisplayApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DisplayApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DisplayApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispapiControl(pDisplayApi, pCallContext, pParams) dispapiControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define dispapiControl_Prologue(pDisplayApi, pCallContext, pRsParams) dispapiControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispapiControl_Epilogue(pDisplayApi, pCallContext, pRsParams) dispapiControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispapiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) dispapiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispapiGetMemInterMapParams(pRmResource, pParams) dispapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispapiSetNotificationShare(pNotifier, pNotifShare) dispapiSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispapiControlFilter(pResource, pCallContext, pParams) dispapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispapiGetRefCount(pResource) dispapiGetRefCount_DISPATCH(pResource) +#define dispapiUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispapiUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispapiUnmap(pResource, pCallContext, pCpuMapping) dispapiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define dispapiCanCopy(pResource) dispapiCanCopy_DISPATCH(pResource) +#define dispapiMapTo(pResource, pParams) dispapiMapTo_DISPATCH(pResource, pParams) +#define dispapiAddAdditionalDependants(pClient, pResource, pReference) dispapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispapiPreDestruct(pResource) dispapiPreDestruct_DISPATCH(pResource) +#define dispapiUnmapFrom(pResource, pParams) dispapiUnmapFrom_DISPATCH(pResource, pParams) +#define dispapiGetNotificationListPtr(pNotifier) dispapiGetNotificationListPtr_DISPATCH(pNotifier) +#define dispapiGetNotificationShare(pNotifier) dispapiGetNotificationShare_DISPATCH(pNotifier) +#define dispapiControlLookup(pResource, pParams, ppEntry) dispapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispapiMap(pResource, pCallContext, pParams, pCpuMapping) dispapiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define dispapiGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispapiGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS dispapiSetUnicastAndSynchronize_KERNEL(struct DisplayApi *pDisplayApi, struct OBJGPUGRP *pGpuGroup, struct OBJGPU **ppGpu, NvU32 subDeviceInstance); + +#ifdef __nvoc_disp_objs_h_disabled +static inline NV_STATUS dispapiSetUnicastAndSynchronize(struct DisplayApi *pDisplayApi, struct OBJGPUGRP *pGpuGroup, struct OBJGPU **ppGpu, NvU32 subDeviceInstance) { + NV_ASSERT_FAILED_PRECOMP("DisplayApi was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_objs_h_disabled +#define dispapiSetUnicastAndSynchronize(pDisplayApi, pGpuGroup, ppGpu, subDeviceInstance) dispapiSetUnicastAndSynchronize_KERNEL(pDisplayApi, pGpuGroup, ppGpu, subDeviceInstance) +#endif //__nvoc_disp_objs_h_disabled + +#define dispapiSetUnicastAndSynchronize_HAL(pDisplayApi, pGpuGroup, ppGpu, subDeviceInstance) dispapiSetUnicastAndSynchronize(pDisplayApi, pGpuGroup, ppGpu, subDeviceInstance) + +NV_STATUS dispapiControl_IMPL(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS dispapiControl_DISPATCH(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__dispapiControl__(pDisplayApi, pCallContext, pParams); +} + +NV_STATUS dispapiControl_Prologue_IMPL(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); + +static inline NV_STATUS dispapiControl_Prologue_DISPATCH(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__dispapiControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +void dispapiControl_Epilogue_IMPL(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); + +static inline void dispapiControl_Epilogue_DISPATCH(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__dispapiControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool dispapiShareCallback_DISPATCH(struct DisplayApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__dispapiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispapiCheckMemInterUnmap_DISPATCH(struct DisplayApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NvBool dispapiAccessCallback_DISPATCH(struct DisplayApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispapiGetMemInterMapParams_DISPATCH(struct DisplayApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispapiGetMemoryMappingDescriptor_DISPATCH(struct DisplayApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline void dispapiSetNotificationShare_DISPATCH(struct DisplayApi *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispapiSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispapiControlFilter_DISPATCH(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 dispapiGetRefCount_DISPATCH(struct DisplayApi *pResource) { + return pResource->__dispapiGetRefCount__(pResource); +} + +static inline NV_STATUS dispapiUnregisterEvent_DISPATCH(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispapiUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispapiUnmap_DISPATCH(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__dispapiUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool dispapiCanCopy_DISPATCH(struct DisplayApi *pResource) { + return pResource->__dispapiCanCopy__(pResource); +} + +static inline NV_STATUS dispapiMapTo_DISPATCH(struct DisplayApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispapiMapTo__(pResource, pParams); +} + +static inline void dispapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DisplayApi *pResource, RsResourceRef *pReference) { + pResource->__dispapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline void dispapiPreDestruct_DISPATCH(struct DisplayApi *pResource) { + pResource->__dispapiPreDestruct__(pResource); +} + +static inline NV_STATUS dispapiUnmapFrom_DISPATCH(struct DisplayApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispapiUnmapFrom__(pResource, pParams); +} + +static inline PEVENTNOTIFICATION *dispapiGetNotificationListPtr_DISPATCH(struct DisplayApi *pNotifier) { + return pNotifier->__dispapiGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *dispapiGetNotificationShare_DISPATCH(struct DisplayApi *pNotifier) { + return pNotifier->__dispapiGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispapiControlLookup_DISPATCH(struct DisplayApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS dispapiMap_DISPATCH(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__dispapiMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispapiGetOrAllocNotifShare_DISPATCH(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispapiGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispapiConstruct_IMPL(struct DisplayApi *arg_pDisplayApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispapiConstruct(arg_pDisplayApi, arg_pCallContext, arg_pParams) dispapiConstruct_IMPL(arg_pDisplayApi, arg_pCallContext, arg_pParams) +void dispapiDestruct_IMPL(struct DisplayApi *pDisplayApi); +#define __nvoc_dispapiDestruct(pDisplayApi) dispapiDestruct_IMPL(pDisplayApi) +NV_STATUS dispapiCtrlCmdEventSetNotification_IMPL(struct DisplayApi *pDisplayApi, NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams); +#ifdef __nvoc_disp_objs_h_disabled +static inline NV_STATUS dispapiCtrlCmdEventSetNotification(struct DisplayApi *pDisplayApi, NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams) { + NV_ASSERT_FAILED_PRECOMP("DisplayApi was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_objs_h_disabled +#define dispapiCtrlCmdEventSetNotification(pDisplayApi, pSetEventParams) dispapiCtrlCmdEventSetNotification_IMPL(pDisplayApi, pSetEventParams) +#endif //__nvoc_disp_objs_h_disabled + +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_DISPLAY. Parent for all other display + * resources (channels, etc). Allocated under a device or subdevice. + * + * Only one instance of this class is allowed per-GPU. Multi-instance restrictions + * are enforced by resource_list.h + */ +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispObject { + const struct NVOC_RTTI *__nvoc_rtti; + struct DisplayApi __nvoc_base_DisplayApi; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DisplayApi *__nvoc_pbase_DisplayApi; + struct DispObject *__nvoc_pbase_DispObject; + NV_STATUS (*__dispobjCtrlCmdSetRmFreeFlags__)(struct DispObject *, NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdIMPSetGetParameter__)(struct DispObject *, NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetRgStatus__)(struct DispObject *, NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetRgUnderflowProp__)(struct DispObject *, NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetRgUnderflowProp__)(struct DispObject *, NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetRgFliplockProp__)(struct DispObject *, NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetRgFliplockProp__)(struct DispObject *, NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetRgConnectedLockpin__)(struct DispObject *, NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetRgConnectedLockpinStateless__)(struct DispObject *, NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetRgScanLine__)(struct DispObject *, NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetSorSeqCtl__)(struct DispObject *, NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetSorSeqCtl__)(struct DispObject *, NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSeqProgSpeed__)(struct DispObject *, NV5070_CTRL_SEQ_PROG_SPEED_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetSorPwm__)(struct DispObject *, NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetSorPwm__)(struct DispObject *, NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetSorOpMode__)(struct DispObject *, NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetSorOpMode__)(struct DispObject *, NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetSorFlushMode__)(struct DispObject *, NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSystemGetCapsV2__)(struct DispObject *, NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdEventSetTrigger__)(struct DispObject *); + NvBool (*__dispobjShareCallback__)(struct DispObject *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispobjControl__)(struct DispObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispobjAccessCallback__)(struct DispObject *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__dispobjGetMemInterMapParams__)(struct DispObject *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispobjGetMemoryMappingDescriptor__)(struct DispObject *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispobjCheckMemInterUnmap__)(struct DispObject *, NvBool); + void (*__dispobjSetNotificationShare__)(struct DispObject *, struct NotifShare *); + NV_STATUS (*__dispobjControlFilter__)(struct DispObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__dispobjGetRefCount__)(struct DispObject *); + NV_STATUS (*__dispobjUnregisterEvent__)(struct DispObject *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__dispobjUnmap__)(struct DispObject *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__dispobjControl_Prologue__)(struct DispObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispobjCanCopy__)(struct DispObject *); + NV_STATUS (*__dispobjMapTo__)(struct DispObject *, RS_RES_MAP_TO_PARAMS *); + void (*__dispobjAddAdditionalDependants__)(struct RsClient *, struct DispObject *, RsResourceRef *); + void (*__dispobjPreDestruct__)(struct DispObject *); + NV_STATUS (*__dispobjUnmapFrom__)(struct DispObject *, RS_RES_UNMAP_FROM_PARAMS *); + PEVENTNOTIFICATION *(*__dispobjGetNotificationListPtr__)(struct DispObject *); + void (*__dispobjControl_Epilogue__)(struct DispObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + struct NotifShare *(*__dispobjGetNotificationShare__)(struct DispObject *); + NV_STATUS (*__dispobjControlLookup__)(struct DispObject *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__dispobjMap__)(struct DispObject *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__dispobjGetOrAllocNotifShare__)(struct DispObject *, NvHandle, NvHandle, struct NotifShare **); + NvU32 rmFreeFlags; +}; + +#ifndef __NVOC_CLASS_DispObject_TYPEDEF__ +#define __NVOC_CLASS_DispObject_TYPEDEF__ +typedef struct DispObject DispObject; +#endif /* __NVOC_CLASS_DispObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispObject +#define __nvoc_class_id_DispObject 0x999839 +#endif /* __nvoc_class_id_DispObject */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispObject; + +#define __staticCast_DispObject(pThis) \ + ((pThis)->__nvoc_pbase_DispObject) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_DispObject(pThis) ((DispObject*)NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_DispObject(pThis) \ + ((DispObject*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispObject))) +#endif //__nvoc_disp_objs_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispObject(DispObject**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispObject(DispObject**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispObject(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispObject((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispobjCtrlCmdSetRmFreeFlags(pDispObject, pParams) dispobjCtrlCmdSetRmFreeFlags_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdIMPSetGetParameter(pDispObject, pImpSetGetParams) dispobjCtrlCmdIMPSetGetParameter_DISPATCH(pDispObject, pImpSetGetParams) +#define dispobjCtrlCmdGetRgStatus(pDispObject, pParams) dispobjCtrlCmdGetRgStatus_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgUnderflowProp(pDispObject, pParams) dispobjCtrlCmdGetRgUnderflowProp_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetRgUnderflowProp(pDispObject, pParams) dispobjCtrlCmdSetRgUnderflowProp_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgFliplockProp(pDispObject, pParams) dispobjCtrlCmdGetRgFliplockProp_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetRgFliplockProp(pDispObject, pParams) dispobjCtrlCmdSetRgFliplockProp_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgConnectedLockpin(pDispObject, pParams) dispobjCtrlCmdGetRgConnectedLockpin_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgConnectedLockpinStateless(pDispObject, pParams) dispobjCtrlCmdGetRgConnectedLockpinStateless_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgScanLine(pDispObject, pParams) dispobjCtrlCmdGetRgScanLine_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetSorSeqCtl(pDispObject, pParams) dispobjCtrlCmdGetSorSeqCtl_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetSorSeqCtl(pDispObject, pParams) dispobjCtrlCmdSetSorSeqCtl_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSeqProgSpeed(pDispObject, pParams) dispobjCtrlCmdSeqProgSpeed_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetSorPwm(pDispObject, pParams) dispobjCtrlCmdGetSorPwm_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetSorPwm(pDispObject, pParams) dispobjCtrlCmdSetSorPwm_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetSorOpMode(pDispObject, pParams) dispobjCtrlCmdGetSorOpMode_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetSorOpMode(pDispObject, pParams) dispobjCtrlCmdSetSorOpMode_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetSorFlushMode(pDispObject, pParams) dispobjCtrlCmdSetSorFlushMode_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSystemGetCapsV2(pDispObject, pCapsParams) dispobjCtrlCmdSystemGetCapsV2_DISPATCH(pDispObject, pCapsParams) +#define dispobjCtrlCmdEventSetTrigger(pDispObject) dispobjCtrlCmdEventSetTrigger_DISPATCH(pDispObject) +#define dispobjShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) dispobjShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispobjControl(pDisplayApi, pCallContext, pParams) dispobjControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define dispobjAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispobjAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispobjGetMemInterMapParams(pRmResource, pParams) dispobjGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispobjGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispobjGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispobjCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispobjCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispobjSetNotificationShare(pNotifier, pNotifShare) dispobjSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispobjControlFilter(pResource, pCallContext, pParams) dispobjControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispobjGetRefCount(pResource) dispobjGetRefCount_DISPATCH(pResource) +#define dispobjUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispobjUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispobjUnmap(pResource, pCallContext, pCpuMapping) dispobjUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define dispobjControl_Prologue(pDisplayApi, pCallContext, pRsParams) dispobjControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispobjCanCopy(pResource) dispobjCanCopy_DISPATCH(pResource) +#define dispobjMapTo(pResource, pParams) dispobjMapTo_DISPATCH(pResource, pParams) +#define dispobjAddAdditionalDependants(pClient, pResource, pReference) dispobjAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispobjPreDestruct(pResource) dispobjPreDestruct_DISPATCH(pResource) +#define dispobjUnmapFrom(pResource, pParams) dispobjUnmapFrom_DISPATCH(pResource, pParams) +#define dispobjGetNotificationListPtr(pNotifier) dispobjGetNotificationListPtr_DISPATCH(pNotifier) +#define dispobjControl_Epilogue(pDisplayApi, pCallContext, pRsParams) dispobjControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispobjGetNotificationShare(pNotifier) dispobjGetNotificationShare_DISPATCH(pNotifier) +#define dispobjControlLookup(pResource, pParams, ppEntry) dispobjControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispobjMap(pResource, pCallContext, pParams, pCpuMapping) dispobjMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define dispobjGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispobjGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS dispobjConstructHal_IMPL(struct DispObject *pDispObject, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_disp_objs_h_disabled +static inline NV_STATUS dispobjConstructHal(struct DispObject *pDispObject, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("DispObject was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_objs_h_disabled +#define dispobjConstructHal(pDispObject, pCallContext, pParams) dispobjConstructHal_IMPL(pDispObject, pCallContext, pParams) +#endif //__nvoc_disp_objs_h_disabled + +#define dispobjConstructHal_HAL(pDispObject, pCallContext, pParams) dispobjConstructHal(pDispObject, pCallContext, pParams) + +NV_STATUS dispobjCtrlCmdSetRmFreeFlags_IMPL(struct DispObject *pDispObject, NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetRmFreeFlags_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetRmFreeFlags__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdIMPSetGetParameter_IMPL(struct DispObject *pDispObject, NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS *pImpSetGetParams); + +static inline NV_STATUS dispobjCtrlCmdIMPSetGetParameter_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS *pImpSetGetParams) { + return pDispObject->__dispobjCtrlCmdIMPSetGetParameter__(pDispObject, pImpSetGetParams); +} + +NV_STATUS dispobjCtrlCmdGetRgStatus_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetRgStatus_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgStatus__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetRgUnderflowProp_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetRgUnderflowProp_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgUnderflowProp__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetRgUnderflowProp_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetRgUnderflowProp_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetRgUnderflowProp__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetRgFliplockProp_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetRgFliplockProp_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgFliplockProp__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetRgFliplockProp_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetRgFliplockProp_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetRgFliplockProp__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetRgConnectedLockpin_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetRgConnectedLockpin_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgConnectedLockpin__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetRgConnectedLockpinStateless_IMPL(struct DispObject *pDispObject, NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetRgConnectedLockpinStateless_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgConnectedLockpinStateless__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetRgScanLine_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetRgScanLine_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgScanLine__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetSorSeqCtl_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetSorSeqCtl_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetSorSeqCtl__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetSorSeqCtl_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetSorSeqCtl_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetSorSeqCtl__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSeqProgSpeed_IMPL(struct DispObject *pDispObject, NV5070_CTRL_SEQ_PROG_SPEED_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSeqProgSpeed_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_SEQ_PROG_SPEED_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSeqProgSpeed__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetSorPwm_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetSorPwm_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetSorPwm__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetSorPwm_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetSorPwm_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetSorPwm__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetSorOpMode_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetSorOpMode_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetSorOpMode__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetSorOpMode_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetSorOpMode_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetSorOpMode__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetSorFlushMode_IMPL(struct DispObject *pDispObject, NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetSorFlushMode_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetSorFlushMode__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSystemGetCapsV2_IMPL(struct DispObject *pDispObject, NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *pCapsParams); + +static inline NV_STATUS dispobjCtrlCmdSystemGetCapsV2_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *pCapsParams) { + return pDispObject->__dispobjCtrlCmdSystemGetCapsV2__(pDispObject, pCapsParams); +} + +NV_STATUS dispobjCtrlCmdEventSetTrigger_IMPL(struct DispObject *pDispObject); + +static inline NV_STATUS dispobjCtrlCmdEventSetTrigger_DISPATCH(struct DispObject *pDispObject) { + return pDispObject->__dispobjCtrlCmdEventSetTrigger__(pDispObject); +} + +static inline NvBool dispobjShareCallback_DISPATCH(struct DispObject *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__dispobjShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispobjControl_DISPATCH(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__dispobjControl__(pDisplayApi, pCallContext, pParams); +} + +static inline NvBool dispobjAccessCallback_DISPATCH(struct DispObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispobjAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispobjGetMemInterMapParams_DISPATCH(struct DispObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispobjGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispobjGetMemoryMappingDescriptor_DISPATCH(struct DispObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispobjGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispobjCheckMemInterUnmap_DISPATCH(struct DispObject *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispobjCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline void dispobjSetNotificationShare_DISPATCH(struct DispObject *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispobjSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispobjControlFilter_DISPATCH(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispobjControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 dispobjGetRefCount_DISPATCH(struct DispObject *pResource) { + return pResource->__dispobjGetRefCount__(pResource); +} + +static inline NV_STATUS dispobjUnregisterEvent_DISPATCH(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispobjUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispobjUnmap_DISPATCH(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__dispobjUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispobjControl_Prologue_DISPATCH(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__dispobjControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool dispobjCanCopy_DISPATCH(struct DispObject *pResource) { + return pResource->__dispobjCanCopy__(pResource); +} + +static inline NV_STATUS dispobjMapTo_DISPATCH(struct DispObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispobjMapTo__(pResource, pParams); +} + +static inline void dispobjAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispObject *pResource, RsResourceRef *pReference) { + pResource->__dispobjAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline void dispobjPreDestruct_DISPATCH(struct DispObject *pResource) { + pResource->__dispobjPreDestruct__(pResource); +} + +static inline NV_STATUS dispobjUnmapFrom_DISPATCH(struct DispObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispobjUnmapFrom__(pResource, pParams); +} + +static inline PEVENTNOTIFICATION *dispobjGetNotificationListPtr_DISPATCH(struct DispObject *pNotifier) { + return pNotifier->__dispobjGetNotificationListPtr__(pNotifier); +} + +static inline void dispobjControl_Epilogue_DISPATCH(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__dispobjControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline struct NotifShare *dispobjGetNotificationShare_DISPATCH(struct DispObject *pNotifier) { + return pNotifier->__dispobjGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispobjControlLookup_DISPATCH(struct DispObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispobjControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS dispobjMap_DISPATCH(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__dispobjMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispobjGetOrAllocNotifShare_DISPATCH(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispobjGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispobjConstruct_IMPL(struct DispObject *arg_pDispObject, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispobjConstruct(arg_pDispObject, arg_pCallContext, arg_pParams) dispobjConstruct_IMPL(arg_pDispObject, arg_pCallContext, arg_pParams) +NV_STATUS dispobjGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDispObject, struct DispObject **ppDispObject); +#define dispobjGetByHandle(pClient, hDispObject, ppDispObject) dispobjGetByHandle_IMPL(pClient, hDispObject, ppDispObject) +NV_STATUS dispobjGetByDevice_IMPL(struct RsClient *pClient, struct Device *pDevice, struct DispObject **ppDispObject); +#define dispobjGetByDevice(pClient, pDevice, ppDispObject) dispobjGetByDevice_IMPL(pClient, pDevice, ppDispObject) +void dispobjClearRmFreeFlags_IMPL(struct DispObject *pDispObject); +#ifdef __nvoc_disp_objs_h_disabled +static inline void dispobjClearRmFreeFlags(struct DispObject *pDispObject) { + NV_ASSERT_FAILED_PRECOMP("DispObject was disabled!"); +} +#else //__nvoc_disp_objs_h_disabled +#define dispobjClearRmFreeFlags(pDispObject) dispobjClearRmFreeFlags_IMPL(pDispObject) +#endif //__nvoc_disp_objs_h_disabled + +NvBool dispobjGetRmFreeFlags_IMPL(struct DispObject *pDispObject); +#ifdef __nvoc_disp_objs_h_disabled +static inline NvBool dispobjGetRmFreeFlags(struct DispObject *pDispObject) { + NV_ASSERT_FAILED_PRECOMP("DispObject was disabled!"); + return NV_FALSE; +} +#else //__nvoc_disp_objs_h_disabled +#define dispobjGetRmFreeFlags(pDispObject) dispobjGetRmFreeFlags_IMPL(pDispObject) +#endif //__nvoc_disp_objs_h_disabled + +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing NvDisp's XXX_DISPLAY (C370, C570...etc). Parent for + * all other display resources (channels, etc). Allocated under a device or subdevice. + * + * Only one instance of this class is allowed per-GPU. Multi-instance restrictions + * are enforced by resource_list.h + */ +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct NvDispApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct DispObject __nvoc_base_DispObject; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DisplayApi *__nvoc_pbase_DisplayApi; + struct DispObject *__nvoc_pbase_DispObject; + struct NvDispApi *__nvoc_pbase_NvDispApi; + NV_STATUS (*__nvdispapiCtrlCmdIdleChannel__)(struct NvDispApi *, NVC370_CTRL_IDLE_CHANNEL_PARAMS *); + NV_STATUS (*__nvdispapiCtrlCmdSetAccl__)(struct NvDispApi *, NVC370_CTRL_SET_ACCL_PARAMS *); + NV_STATUS (*__nvdispapiCtrlCmdGetAccl__)(struct NvDispApi *, NVC370_CTRL_GET_ACCL_PARAMS *); + NV_STATUS (*__nvdispapiCtrlCmdGetChannelInfo__)(struct NvDispApi *, NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS *); + NV_STATUS (*__nvdispapiCtrlCmdSetSwaprdyGpioWar__)(struct NvDispApi *, NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS *); + NV_STATUS (*__nvdispapiCtrlCmdGetLockpinsCaps__)(struct NvDispApi *, NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS *); + NV_STATUS (*__nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides__)(struct NvDispApi *, NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS *); + NvBool (*__nvdispapiShareCallback__)(struct NvDispApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__nvdispapiControl__)(struct NvDispApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__nvdispapiAccessCallback__)(struct NvDispApi *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__nvdispapiGetMemInterMapParams__)(struct NvDispApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__nvdispapiGetMemoryMappingDescriptor__)(struct NvDispApi *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__nvdispapiCheckMemInterUnmap__)(struct NvDispApi *, NvBool); + void (*__nvdispapiSetNotificationShare__)(struct NvDispApi *, struct NotifShare *); + NV_STATUS (*__nvdispapiControlFilter__)(struct NvDispApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__nvdispapiGetRefCount__)(struct NvDispApi *); + NV_STATUS (*__nvdispapiUnregisterEvent__)(struct NvDispApi *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__nvdispapiUnmap__)(struct NvDispApi *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__nvdispapiControl_Prologue__)(struct NvDispApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__nvdispapiCanCopy__)(struct NvDispApi *); + NV_STATUS (*__nvdispapiMapTo__)(struct NvDispApi *, RS_RES_MAP_TO_PARAMS *); + void (*__nvdispapiAddAdditionalDependants__)(struct RsClient *, struct NvDispApi *, RsResourceRef *); + void (*__nvdispapiPreDestruct__)(struct NvDispApi *); + NV_STATUS (*__nvdispapiUnmapFrom__)(struct NvDispApi *, RS_RES_UNMAP_FROM_PARAMS *); + PEVENTNOTIFICATION *(*__nvdispapiGetNotificationListPtr__)(struct NvDispApi *); + void (*__nvdispapiControl_Epilogue__)(struct NvDispApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + struct NotifShare *(*__nvdispapiGetNotificationShare__)(struct NvDispApi *); + NV_STATUS (*__nvdispapiControlLookup__)(struct NvDispApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__nvdispapiMap__)(struct NvDispApi *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__nvdispapiGetOrAllocNotifShare__)(struct NvDispApi *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_NvDispApi_TYPEDEF__ +#define __NVOC_CLASS_NvDispApi_TYPEDEF__ +typedef struct NvDispApi NvDispApi; +#endif /* __NVOC_CLASS_NvDispApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvDispApi +#define __nvoc_class_id_NvDispApi 0x36aa0b +#endif /* __nvoc_class_id_NvDispApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvDispApi; + +#define __staticCast_NvDispApi(pThis) \ + ((pThis)->__nvoc_pbase_NvDispApi) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_NvDispApi(pThis) ((NvDispApi*)NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_NvDispApi(pThis) \ + ((NvDispApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NvDispApi))) +#endif //__nvoc_disp_objs_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_NvDispApi(NvDispApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_NvDispApi(NvDispApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_NvDispApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_NvDispApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define nvdispapiCtrlCmdIdleChannel(pNvDispApi, pParams) nvdispapiCtrlCmdIdleChannel_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdSetAccl(pNvDispApi, pParams) nvdispapiCtrlCmdSetAccl_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdGetAccl(pNvDispApi, pParams) nvdispapiCtrlCmdGetAccl_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdGetChannelInfo(pNvDispApi, pParams) nvdispapiCtrlCmdGetChannelInfo_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdSetSwaprdyGpioWar(pNvDispApi, pParams) nvdispapiCtrlCmdSetSwaprdyGpioWar_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdGetLockpinsCaps(pNvDispApi, pParams) nvdispapiCtrlCmdGetLockpinsCaps_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides(pNvDispApi, pParams) nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_DISPATCH(pNvDispApi, pParams) +#define nvdispapiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) nvdispapiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define nvdispapiControl(pDisplayApi, pCallContext, pParams) nvdispapiControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define nvdispapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) nvdispapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define nvdispapiGetMemInterMapParams(pRmResource, pParams) nvdispapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define nvdispapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) nvdispapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define nvdispapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) nvdispapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define nvdispapiSetNotificationShare(pNotifier, pNotifShare) nvdispapiSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define nvdispapiControlFilter(pResource, pCallContext, pParams) nvdispapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define nvdispapiGetRefCount(pResource) nvdispapiGetRefCount_DISPATCH(pResource) +#define nvdispapiUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) nvdispapiUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define nvdispapiUnmap(pResource, pCallContext, pCpuMapping) nvdispapiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define nvdispapiControl_Prologue(pDisplayApi, pCallContext, pRsParams) nvdispapiControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define nvdispapiCanCopy(pResource) nvdispapiCanCopy_DISPATCH(pResource) +#define nvdispapiMapTo(pResource, pParams) nvdispapiMapTo_DISPATCH(pResource, pParams) +#define nvdispapiAddAdditionalDependants(pClient, pResource, pReference) nvdispapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define nvdispapiPreDestruct(pResource) nvdispapiPreDestruct_DISPATCH(pResource) +#define nvdispapiUnmapFrom(pResource, pParams) nvdispapiUnmapFrom_DISPATCH(pResource, pParams) +#define nvdispapiGetNotificationListPtr(pNotifier) nvdispapiGetNotificationListPtr_DISPATCH(pNotifier) +#define nvdispapiControl_Epilogue(pDisplayApi, pCallContext, pRsParams) nvdispapiControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define nvdispapiGetNotificationShare(pNotifier) nvdispapiGetNotificationShare_DISPATCH(pNotifier) +#define nvdispapiControlLookup(pResource, pParams, ppEntry) nvdispapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define nvdispapiMap(pResource, pCallContext, pParams, pCpuMapping) nvdispapiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define nvdispapiGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) nvdispapiGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS nvdispapiCtrlCmdIdleChannel_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_IDLE_CHANNEL_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdIdleChannel_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_IDLE_CHANNEL_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdIdleChannel__(pNvDispApi, pParams); +} + +NV_STATUS nvdispapiCtrlCmdSetAccl_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_SET_ACCL_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdSetAccl_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_SET_ACCL_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdSetAccl__(pNvDispApi, pParams); +} + +NV_STATUS nvdispapiCtrlCmdGetAccl_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_GET_ACCL_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdGetAccl_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_GET_ACCL_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdGetAccl__(pNvDispApi, pParams); +} + +NV_STATUS nvdispapiCtrlCmdGetChannelInfo_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdGetChannelInfo_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdGetChannelInfo__(pNvDispApi, pParams); +} + +NV_STATUS nvdispapiCtrlCmdSetSwaprdyGpioWar_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdSetSwaprdyGpioWar_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdSetSwaprdyGpioWar__(pNvDispApi, pParams); +} + +NV_STATUS nvdispapiCtrlCmdGetLockpinsCaps_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdGetLockpinsCaps_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdGetLockpinsCaps__(pNvDispApi, pParams); +} + +NV_STATUS nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides__(pNvDispApi, pParams); +} + +static inline NvBool nvdispapiShareCallback_DISPATCH(struct NvDispApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvdispapiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS nvdispapiControl_DISPATCH(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__nvdispapiControl__(pDisplayApi, pCallContext, pParams); +} + +static inline NvBool nvdispapiAccessCallback_DISPATCH(struct NvDispApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvdispapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS nvdispapiGetMemInterMapParams_DISPATCH(struct NvDispApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvdispapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS nvdispapiGetMemoryMappingDescriptor_DISPATCH(struct NvDispApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvdispapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS nvdispapiCheckMemInterUnmap_DISPATCH(struct NvDispApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvdispapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline void nvdispapiSetNotificationShare_DISPATCH(struct NvDispApi *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvdispapiSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS nvdispapiControlFilter_DISPATCH(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvdispapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 nvdispapiGetRefCount_DISPATCH(struct NvDispApi *pResource) { + return pResource->__nvdispapiGetRefCount__(pResource); +} + +static inline NV_STATUS nvdispapiUnregisterEvent_DISPATCH(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvdispapiUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS nvdispapiUnmap_DISPATCH(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvdispapiUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS nvdispapiControl_Prologue_DISPATCH(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__nvdispapiControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool nvdispapiCanCopy_DISPATCH(struct NvDispApi *pResource) { + return pResource->__nvdispapiCanCopy__(pResource); +} + +static inline NV_STATUS nvdispapiMapTo_DISPATCH(struct NvDispApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvdispapiMapTo__(pResource, pParams); +} + +static inline void nvdispapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct NvDispApi *pResource, RsResourceRef *pReference) { + pResource->__nvdispapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline void nvdispapiPreDestruct_DISPATCH(struct NvDispApi *pResource) { + pResource->__nvdispapiPreDestruct__(pResource); +} + +static inline NV_STATUS nvdispapiUnmapFrom_DISPATCH(struct NvDispApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvdispapiUnmapFrom__(pResource, pParams); +} + +static inline PEVENTNOTIFICATION *nvdispapiGetNotificationListPtr_DISPATCH(struct NvDispApi *pNotifier) { + return pNotifier->__nvdispapiGetNotificationListPtr__(pNotifier); +} + +static inline void nvdispapiControl_Epilogue_DISPATCH(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__nvdispapiControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline struct NotifShare *nvdispapiGetNotificationShare_DISPATCH(struct NvDispApi *pNotifier) { + return pNotifier->__nvdispapiGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS nvdispapiControlLookup_DISPATCH(struct NvDispApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__nvdispapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS nvdispapiMap_DISPATCH(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvdispapiMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS nvdispapiGetOrAllocNotifShare_DISPATCH(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvdispapiGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS nvdispapiConstruct_IMPL(struct NvDispApi *arg_pNvdispApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_nvdispapiConstruct(arg_pNvdispApi, arg_pCallContext, arg_pParams) nvdispapiConstruct_IMPL(arg_pNvdispApi, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_DISPLAY_SW + * + * With NvDisplay, we have divided classes into HW & SW classes. HW class provides + * interface for register/methods. SW class provides rmctrls. Clients can use + * multiple SW classes on a chip, but only one HW class. NVC372_DISPLAY_SW is SW + * class of NvDisplay family chips. + * + * Multi-instance restrictions are enforced by resource_list.h + */ +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispSwObj { + const struct NVOC_RTTI *__nvoc_rtti; + struct DisplayApi __nvoc_base_DisplayApi; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DisplayApi *__nvoc_pbase_DisplayApi; + struct DispSwObj *__nvoc_pbase_DispSwObj; + NV_STATUS (*__dispswobjCtrlCmdIsModePossible__)(struct DispSwObj *, NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *); + NV_STATUS (*__dispswobjCtrlCmdIsModePossibleOrSettings__)(struct DispSwObj *, NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS *); + NV_STATUS (*__dispswobjCtrlCmdVideoAdaptiveRefreshRate__)(struct DispSwObj *, NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS *); + NV_STATUS (*__dispswobjCtrlCmdGetActiveViewportPointIn__)(struct DispSwObj *, NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS *); + NvBool (*__dispswobjShareCallback__)(struct DispSwObj *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispswobjControl__)(struct DispSwObj *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispswobjAccessCallback__)(struct DispSwObj *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__dispswobjGetMemInterMapParams__)(struct DispSwObj *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispswobjGetMemoryMappingDescriptor__)(struct DispSwObj *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispswobjCheckMemInterUnmap__)(struct DispSwObj *, NvBool); + void (*__dispswobjSetNotificationShare__)(struct DispSwObj *, struct NotifShare *); + NV_STATUS (*__dispswobjControlFilter__)(struct DispSwObj *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__dispswobjGetRefCount__)(struct DispSwObj *); + NV_STATUS (*__dispswobjUnregisterEvent__)(struct DispSwObj *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__dispswobjUnmap__)(struct DispSwObj *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__dispswobjControl_Prologue__)(struct DispSwObj *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispswobjCanCopy__)(struct DispSwObj *); + NV_STATUS (*__dispswobjMapTo__)(struct DispSwObj *, RS_RES_MAP_TO_PARAMS *); + void (*__dispswobjAddAdditionalDependants__)(struct RsClient *, struct DispSwObj *, RsResourceRef *); + void (*__dispswobjPreDestruct__)(struct DispSwObj *); + NV_STATUS (*__dispswobjUnmapFrom__)(struct DispSwObj *, RS_RES_UNMAP_FROM_PARAMS *); + PEVENTNOTIFICATION *(*__dispswobjGetNotificationListPtr__)(struct DispSwObj *); + void (*__dispswobjControl_Epilogue__)(struct DispSwObj *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + struct NotifShare *(*__dispswobjGetNotificationShare__)(struct DispSwObj *); + NV_STATUS (*__dispswobjControlLookup__)(struct DispSwObj *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__dispswobjMap__)(struct DispSwObj *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__dispswobjGetOrAllocNotifShare__)(struct DispSwObj *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_DispSwObj_TYPEDEF__ +#define __NVOC_CLASS_DispSwObj_TYPEDEF__ +typedef struct DispSwObj DispSwObj; +#endif /* __NVOC_CLASS_DispSwObj_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSwObj +#define __nvoc_class_id_DispSwObj 0x6aa5e2 +#endif /* __nvoc_class_id_DispSwObj */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSwObj; + +#define __staticCast_DispSwObj(pThis) \ + ((pThis)->__nvoc_pbase_DispSwObj) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_DispSwObj(pThis) ((DispSwObj*)NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_DispSwObj(pThis) \ + ((DispSwObj*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispSwObj))) +#endif //__nvoc_disp_objs_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispSwObj(DispSwObj**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispSwObj(DispSwObj**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispSwObj(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispSwObj((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispswobjCtrlCmdIsModePossible(pDispSwObj, pParams) dispswobjCtrlCmdIsModePossible_DISPATCH(pDispSwObj, pParams) +#define dispswobjCtrlCmdIsModePossibleOrSettings(pDispSwObj, pParams) dispswobjCtrlCmdIsModePossibleOrSettings_DISPATCH(pDispSwObj, pParams) +#define dispswobjCtrlCmdVideoAdaptiveRefreshRate(pDispSwObj, pParams) dispswobjCtrlCmdVideoAdaptiveRefreshRate_DISPATCH(pDispSwObj, pParams) +#define dispswobjCtrlCmdGetActiveViewportPointIn(pDispSwObj, pParams) dispswobjCtrlCmdGetActiveViewportPointIn_DISPATCH(pDispSwObj, pParams) +#define dispswobjShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) dispswobjShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispswobjControl(pDisplayApi, pCallContext, pParams) dispswobjControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define dispswobjAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispswobjAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispswobjGetMemInterMapParams(pRmResource, pParams) dispswobjGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispswobjGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispswobjGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispswobjCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispswobjCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispswobjSetNotificationShare(pNotifier, pNotifShare) dispswobjSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispswobjControlFilter(pResource, pCallContext, pParams) dispswobjControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispswobjGetRefCount(pResource) dispswobjGetRefCount_DISPATCH(pResource) +#define dispswobjUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispswobjUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispswobjUnmap(pResource, pCallContext, pCpuMapping) dispswobjUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define dispswobjControl_Prologue(pDisplayApi, pCallContext, pRsParams) dispswobjControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispswobjCanCopy(pResource) dispswobjCanCopy_DISPATCH(pResource) +#define dispswobjMapTo(pResource, pParams) dispswobjMapTo_DISPATCH(pResource, pParams) +#define dispswobjAddAdditionalDependants(pClient, pResource, pReference) dispswobjAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispswobjPreDestruct(pResource) dispswobjPreDestruct_DISPATCH(pResource) +#define dispswobjUnmapFrom(pResource, pParams) dispswobjUnmapFrom_DISPATCH(pResource, pParams) +#define dispswobjGetNotificationListPtr(pNotifier) dispswobjGetNotificationListPtr_DISPATCH(pNotifier) +#define dispswobjControl_Epilogue(pDisplayApi, pCallContext, pRsParams) dispswobjControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispswobjGetNotificationShare(pNotifier) dispswobjGetNotificationShare_DISPATCH(pNotifier) +#define dispswobjControlLookup(pResource, pParams, ppEntry) dispswobjControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispswobjMap(pResource, pCallContext, pParams, pCpuMapping) dispswobjMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define dispswobjGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispswobjGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS dispswobjCtrlCmdIsModePossible_IMPL(struct DispSwObj *pDispSwObj, NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *pParams); + +static inline NV_STATUS dispswobjCtrlCmdIsModePossible_DISPATCH(struct DispSwObj *pDispSwObj, NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *pParams) { + return pDispSwObj->__dispswobjCtrlCmdIsModePossible__(pDispSwObj, pParams); +} + +NV_STATUS dispswobjCtrlCmdIsModePossibleOrSettings_IMPL(struct DispSwObj *pDispSwObj, NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS *pParams); + +static inline NV_STATUS dispswobjCtrlCmdIsModePossibleOrSettings_DISPATCH(struct DispSwObj *pDispSwObj, NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS *pParams) { + return pDispSwObj->__dispswobjCtrlCmdIsModePossibleOrSettings__(pDispSwObj, pParams); +} + +NV_STATUS dispswobjCtrlCmdVideoAdaptiveRefreshRate_IMPL(struct DispSwObj *pDispSwObj, NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS *pParams); + +static inline NV_STATUS dispswobjCtrlCmdVideoAdaptiveRefreshRate_DISPATCH(struct DispSwObj *pDispSwObj, NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS *pParams) { + return pDispSwObj->__dispswobjCtrlCmdVideoAdaptiveRefreshRate__(pDispSwObj, pParams); +} + +NV_STATUS dispswobjCtrlCmdGetActiveViewportPointIn_IMPL(struct DispSwObj *pDispSwObj, NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS *pParams); + +static inline NV_STATUS dispswobjCtrlCmdGetActiveViewportPointIn_DISPATCH(struct DispSwObj *pDispSwObj, NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS *pParams) { + return pDispSwObj->__dispswobjCtrlCmdGetActiveViewportPointIn__(pDispSwObj, pParams); +} + +static inline NvBool dispswobjShareCallback_DISPATCH(struct DispSwObj *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__dispswobjShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispswobjControl_DISPATCH(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__dispswobjControl__(pDisplayApi, pCallContext, pParams); +} + +static inline NvBool dispswobjAccessCallback_DISPATCH(struct DispSwObj *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispswobjAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispswobjGetMemInterMapParams_DISPATCH(struct DispSwObj *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispswobjGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispswobjGetMemoryMappingDescriptor_DISPATCH(struct DispSwObj *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispswobjGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispswobjCheckMemInterUnmap_DISPATCH(struct DispSwObj *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispswobjCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline void dispswobjSetNotificationShare_DISPATCH(struct DispSwObj *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispswobjSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispswobjControlFilter_DISPATCH(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispswobjControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 dispswobjGetRefCount_DISPATCH(struct DispSwObj *pResource) { + return pResource->__dispswobjGetRefCount__(pResource); +} + +static inline NV_STATUS dispswobjUnregisterEvent_DISPATCH(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispswobjUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispswobjUnmap_DISPATCH(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__dispswobjUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispswobjControl_Prologue_DISPATCH(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__dispswobjControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool dispswobjCanCopy_DISPATCH(struct DispSwObj *pResource) { + return pResource->__dispswobjCanCopy__(pResource); +} + +static inline NV_STATUS dispswobjMapTo_DISPATCH(struct DispSwObj *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispswobjMapTo__(pResource, pParams); +} + +static inline void dispswobjAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispSwObj *pResource, RsResourceRef *pReference) { + pResource->__dispswobjAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline void dispswobjPreDestruct_DISPATCH(struct DispSwObj *pResource) { + pResource->__dispswobjPreDestruct__(pResource); +} + +static inline NV_STATUS dispswobjUnmapFrom_DISPATCH(struct DispSwObj *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispswobjUnmapFrom__(pResource, pParams); +} + +static inline PEVENTNOTIFICATION *dispswobjGetNotificationListPtr_DISPATCH(struct DispSwObj *pNotifier) { + return pNotifier->__dispswobjGetNotificationListPtr__(pNotifier); +} + +static inline void dispswobjControl_Epilogue_DISPATCH(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__dispswobjControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline struct NotifShare *dispswobjGetNotificationShare_DISPATCH(struct DispSwObj *pNotifier) { + return pNotifier->__dispswobjGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispswobjControlLookup_DISPATCH(struct DispSwObj *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispswobjControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS dispswobjMap_DISPATCH(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__dispswobjMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispswobjGetOrAllocNotifShare_DISPATCH(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispswobjGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispswobjConstruct_IMPL(struct DispSwObj *arg_pDispSwObj, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispswobjConstruct(arg_pDispSwObj, arg_pCallContext, arg_pParams) dispswobjConstruct_IMPL(arg_pDispSwObj, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_DISPLAY_COMMON (class id: 0x0073) + * + * Only one instance of this class is allowed per-GPU. Multi-instance restrictions + * are enforced by resource_list.h + */ +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispCommon { + const struct NVOC_RTTI *__nvoc_rtti; + struct DisplayApi __nvoc_base_DisplayApi; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DisplayApi *__nvoc_pbase_DisplayApi; + struct DispCommon *__nvoc_pbase_DispCommon; + NV_STATUS (*__dispcmnCtrlCmdSystemGetCapsV2__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetNumHeads__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetScanline__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetSuppported__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetConnectState__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetHotplugUnplugState__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdInternalGetHotplugUnplugState__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetHeadRoutingMap__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetActive__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetBootDisplays__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemQueryDisplayIdsWithMux__)(struct DispCommon *, NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemCheckSidebandI2cSupport__)(struct DispCommon *, NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemAllocateDisplayBandwidth__)(struct DispCommon *, NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetHotplugConfig__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetHotplugEventConfig__)(struct DispCommon *, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemSetHotplugEventConfig__)(struct DispCommon *, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemArmLightweightSupervisor__)(struct DispCommon *, NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemConfigVrrPstateSwitch__)(struct DispCommon *, NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetType__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetEdidV2__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetEdidV2__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificFakeDevice__)(struct DispCommon *, NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetConnectorData__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetHdmiEnable__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificCtrlHdmi__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetAllHeadMask__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetOdPacket__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificAcquireSharedGenericPacket__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetSharedGenericPacket__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificReleaseSharedGenericPacket__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetOdPacketCtrl__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificOrGetInfo__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetPclkLimit__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetHdmiSinkCaps__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetMonitorPower__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificApplyEdidOverrideV2__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetI2cPortid__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetHdmiGpuCaps__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetHdmiScdcData__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificIsDirectmodeDisplay__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificDispI2cReadWrite__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpGetInfo__)(struct DispCommon *, NV0073_CTRL_DFP_GET_INFO_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpGetDisplayportDongleInfo__)(struct DispCommon *, NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpSetEldAudioCaps__)(struct DispCommon *, NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpSetAudioEnable__)(struct DispCommon *, NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpUpdateDynamicDfpCache__)(struct DispCommon *, NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpAssignSor__)(struct DispCommon *, NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpDscCrcControl__)(struct DispCommon *, NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpInitMuxData__)(struct DispCommon *, NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpGetDsiModeTiming__)(struct DispCommon *, NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpConfigTwoHeadOneOr__)(struct DispCommon *, NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpGetPadlinkMask__)(struct DispCommon *, NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpGetFixedModeTiming__)(struct DispCommon *, NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpAuxchCtrl__)(struct DispCommon *, NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpAuxchSetSema__)(struct DispCommon *, NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpCtrl__)(struct DispCommon *, NV0073_CTRL_DP_CTRL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetLaneData__)(struct DispCommon *, NV0073_CTRL_DP_LANE_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetLaneData__)(struct DispCommon *, NV0073_CTRL_DP_LANE_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetTestpattern__)(struct DispCommon *, NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpMainLinkCtrl__)(struct DispCommon *, NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetAudioMuteStream__)(struct DispCommon *, NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetLinkConfig__)(struct DispCommon *, NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetEDPData__)(struct DispCommon *, NV0073_CTRL_DP_GET_EDP_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpTopologyAllocateDisplayId__)(struct DispCommon *, NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpTopologyFreeDisplayId__)(struct DispCommon *, NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpConfigStream__)(struct DispCommon *, NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpConfigSingleHeadMultiStream__)(struct DispCommon *, NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetRateGov__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSendACT__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetManualDisplayPort__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetCaps__)(struct DispCommon *, NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetMSAProperties__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetStereoMSAProperties__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGenerateFakeInterrupt__)(struct DispCommon *, NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpConfigRadScratchReg__)(struct DispCommon *, NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetTriggerSelect__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetTriggerAll__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetAuxLogData__)(struct DispCommon *, NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpConfigIndexedLinkRates__)(struct DispCommon *, NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpConfigureFec__)(struct DispCommon *, NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetGenericInfoframe__)(struct DispCommon *, NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetMsaAttributes__)(struct DispCommon *, NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpConfigMacroPad__)(struct DispCommon *, NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data__)(struct DispCommon *, NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data__)(struct DispCommon *, NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetRegionalCrcs__)(struct DispCommon *, NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS *); + NvBool (*__dispcmnShareCallback__)(struct DispCommon *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispcmnControl__)(struct DispCommon *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispcmnAccessCallback__)(struct DispCommon *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__dispcmnGetMemInterMapParams__)(struct DispCommon *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispcmnGetMemoryMappingDescriptor__)(struct DispCommon *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispcmnCheckMemInterUnmap__)(struct DispCommon *, NvBool); + void (*__dispcmnSetNotificationShare__)(struct DispCommon *, struct NotifShare *); + NV_STATUS (*__dispcmnControlFilter__)(struct DispCommon *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__dispcmnGetRefCount__)(struct DispCommon *); + NV_STATUS (*__dispcmnUnregisterEvent__)(struct DispCommon *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__dispcmnUnmap__)(struct DispCommon *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__dispcmnControl_Prologue__)(struct DispCommon *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispcmnCanCopy__)(struct DispCommon *); + NV_STATUS (*__dispcmnMapTo__)(struct DispCommon *, RS_RES_MAP_TO_PARAMS *); + void (*__dispcmnAddAdditionalDependants__)(struct RsClient *, struct DispCommon *, RsResourceRef *); + void (*__dispcmnPreDestruct__)(struct DispCommon *); + NV_STATUS (*__dispcmnUnmapFrom__)(struct DispCommon *, RS_RES_UNMAP_FROM_PARAMS *); + PEVENTNOTIFICATION *(*__dispcmnGetNotificationListPtr__)(struct DispCommon *); + void (*__dispcmnControl_Epilogue__)(struct DispCommon *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + struct NotifShare *(*__dispcmnGetNotificationShare__)(struct DispCommon *); + NV_STATUS (*__dispcmnControlLookup__)(struct DispCommon *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__dispcmnMap__)(struct DispCommon *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__dispcmnGetOrAllocNotifShare__)(struct DispCommon *, NvHandle, NvHandle, struct NotifShare **); + NvU32 hotPlugMaskToBeReported; + NvU32 hotUnplugMaskToBeReported; +}; + +#ifndef __NVOC_CLASS_DispCommon_TYPEDEF__ +#define __NVOC_CLASS_DispCommon_TYPEDEF__ +typedef struct DispCommon DispCommon; +#endif /* __NVOC_CLASS_DispCommon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispCommon +#define __nvoc_class_id_DispCommon 0x41f4f2 +#endif /* __nvoc_class_id_DispCommon */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCommon; + +#define __staticCast_DispCommon(pThis) \ + ((pThis)->__nvoc_pbase_DispCommon) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_DispCommon(pThis) ((DispCommon*)NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_DispCommon(pThis) \ + ((DispCommon*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispCommon))) +#endif //__nvoc_disp_objs_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispCommon(DispCommon**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispCommon(DispCommon**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispCommon(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispCommon((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispcmnCtrlCmdSystemGetCapsV2(pDispCommon, pCapsParams) dispcmnCtrlCmdSystemGetCapsV2_DISPATCH(pDispCommon, pCapsParams) +#define dispcmnCtrlCmdSystemGetNumHeads(pDispCommon, pNumHeadsParams) dispcmnCtrlCmdSystemGetNumHeads_DISPATCH(pDispCommon, pNumHeadsParams) +#define dispcmnCtrlCmdSystemGetScanline(pDispCommon, pScanlineParams) dispcmnCtrlCmdSystemGetScanline_DISPATCH(pDispCommon, pScanlineParams) +#define dispcmnCtrlCmdSystemGetSuppported(pDispCommon, pSupportedParams) dispcmnCtrlCmdSystemGetSuppported_DISPATCH(pDispCommon, pSupportedParams) +#define dispcmnCtrlCmdSystemGetConnectState(pDispCommon, pConnectParams) dispcmnCtrlCmdSystemGetConnectState_DISPATCH(pDispCommon, pConnectParams) +#define dispcmnCtrlCmdSystemGetHotplugUnplugState(pDispCommon, pHotplugParams) dispcmnCtrlCmdSystemGetHotplugUnplugState_DISPATCH(pDispCommon, pHotplugParams) +#define dispcmnCtrlCmdInternalGetHotplugUnplugState(pDispCommon, pHotplugParams) dispcmnCtrlCmdInternalGetHotplugUnplugState_DISPATCH(pDispCommon, pHotplugParams) +#define dispcmnCtrlCmdSystemGetHeadRoutingMap(pDispCommon, pMapParams) dispcmnCtrlCmdSystemGetHeadRoutingMap_DISPATCH(pDispCommon, pMapParams) +#define dispcmnCtrlCmdSystemGetActive(pDispCommon, pActiveParams) dispcmnCtrlCmdSystemGetActive_DISPATCH(pDispCommon, pActiveParams) +#define dispcmnCtrlCmdSystemGetBootDisplays(pDispCommon, pParams) dispcmnCtrlCmdSystemGetBootDisplays_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemQueryDisplayIdsWithMux(pDispCommon, pParams) dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemCheckSidebandI2cSupport(pDispCommon, pParams) dispcmnCtrlCmdSystemCheckSidebandI2cSupport_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemAllocateDisplayBandwidth(pDispCommon, pParams) dispcmnCtrlCmdSystemAllocateDisplayBandwidth_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemGetHotplugConfig(pDispCommon, pHotplugParams) dispcmnCtrlCmdSystemGetHotplugConfig_DISPATCH(pDispCommon, pHotplugParams) +#define dispcmnCtrlCmdSystemGetHotplugEventConfig(pDispCommon, pParams) dispcmnCtrlCmdSystemGetHotplugEventConfig_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemSetHotplugEventConfig(pDispCommon, pParams) dispcmnCtrlCmdSystemSetHotplugEventConfig_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemArmLightweightSupervisor(pDispCommon, pParams) dispcmnCtrlCmdSystemArmLightweightSupervisor_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemConfigVrrPstateSwitch(pDispCommon, pParams) dispcmnCtrlCmdSystemConfigVrrPstateSwitch_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetType(pDispCommon, pDisplayTypeParams) dispcmnCtrlCmdSpecificGetType_DISPATCH(pDispCommon, pDisplayTypeParams) +#define dispcmnCtrlCmdSpecificGetEdidV2(pDispCommon, pEdidParams) dispcmnCtrlCmdSpecificGetEdidV2_DISPATCH(pDispCommon, pEdidParams) +#define dispcmnCtrlCmdSpecificSetEdidV2(pDispCommon, pEdidParams) dispcmnCtrlCmdSpecificSetEdidV2_DISPATCH(pDispCommon, pEdidParams) +#define dispcmnCtrlCmdSpecificFakeDevice(pDispCommon, pTestParams) dispcmnCtrlCmdSpecificFakeDevice_DISPATCH(pDispCommon, pTestParams) +#define dispcmnCtrlCmdSpecificGetConnectorData(pDispCommon, pConnectorParams) dispcmnCtrlCmdSpecificGetConnectorData_DISPATCH(pDispCommon, pConnectorParams) +#define dispcmnCtrlCmdSpecificSetHdmiEnable(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetHdmiEnable_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificCtrlHdmi(pDispCommon, pParams) dispcmnCtrlCmdSpecificCtrlHdmi_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetAllHeadMask(pDispCommon, pAllHeadMaskParams) dispcmnCtrlCmdSpecificGetAllHeadMask_DISPATCH(pDispCommon, pAllHeadMaskParams) +#define dispcmnCtrlCmdSpecificSetOdPacket(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetOdPacket_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificAcquireSharedGenericPacket(pDispCommon, pParams) dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetSharedGenericPacket(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetSharedGenericPacket_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificReleaseSharedGenericPacket(pDispCommon, pParams) dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetOdPacketCtrl(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetOdPacketCtrl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificOrGetInfo(pDispCommon, pParams) dispcmnCtrlCmdSpecificOrGetInfo_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetPclkLimit(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetPclkLimit_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetHdmiSinkCaps(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetHdmiSinkCaps_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetMonitorPower(pDispCommon, setMonitorPowerParams) dispcmnCtrlCmdSpecificSetMonitorPower_DISPATCH(pDispCommon, setMonitorPowerParams) +#define dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificApplyEdidOverrideV2(pDispCommon, pEdidOverrideParams) dispcmnCtrlCmdSpecificApplyEdidOverrideV2_DISPATCH(pDispCommon, pEdidOverrideParams) +#define dispcmnCtrlCmdSpecificGetI2cPortid(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetI2cPortid_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetHdmiGpuCaps(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetHdmiGpuCaps_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetHdmiScdcData(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetHdmiScdcData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificIsDirectmodeDisplay(pDispCommon, pParams) dispcmnCtrlCmdSpecificIsDirectmodeDisplay_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificDispI2cReadWrite(pDispCommon, pParams) dispcmnCtrlCmdSpecificDispI2cReadWrite_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetInfo(pDispCommon, pParams) dispcmnCtrlCmdDfpGetInfo_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetDisplayportDongleInfo(pDispCommon, pParams) dispcmnCtrlCmdDfpGetDisplayportDongleInfo_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpSetEldAudioCaps(pDispCommon, pEldAudioCapsParams) dispcmnCtrlCmdDfpSetEldAudioCaps_DISPATCH(pDispCommon, pEldAudioCapsParams) +#define dispcmnCtrlCmdDfpSetAudioEnable(pDispCommon, pParams) dispcmnCtrlCmdDfpSetAudioEnable_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpUpdateDynamicDfpCache(pDispCommon, pParams) dispcmnCtrlCmdDfpUpdateDynamicDfpCache_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpAssignSor(pDispCommon, pParams) dispcmnCtrlCmdDfpAssignSor_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpDscCrcControl(pDispCommon, pParams) dispcmnCtrlCmdDfpDscCrcControl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpInitMuxData(pDispCommon, pParams) dispcmnCtrlCmdDfpInitMuxData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetDsiModeTiming(pDispCommon, pParams) dispcmnCtrlCmdDfpGetDsiModeTiming_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpConfigTwoHeadOneOr(pDispCommon, pParams) dispcmnCtrlCmdDfpConfigTwoHeadOneOr_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetPadlinkMask(pDispCommon, pParams) dispcmnCtrlCmdDfpGetPadlinkMask_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetFixedModeTiming(pDispCommon, pParams) dispcmnCtrlCmdDfpGetFixedModeTiming_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpAuxchCtrl(pDispCommon, pAuxchCtrlParams) dispcmnCtrlCmdDpAuxchCtrl_DISPATCH(pDispCommon, pAuxchCtrlParams) +#define dispcmnCtrlCmdDpAuxchSetSema(pDispCommon, pSemaParams) dispcmnCtrlCmdDpAuxchSetSema_DISPATCH(pDispCommon, pSemaParams) +#define dispcmnCtrlCmdDpCtrl(pDispCommon, pParams) dispcmnCtrlCmdDpCtrl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetLaneData(pDispCommon, pParams) dispcmnCtrlCmdDpGetLaneData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetLaneData(pDispCommon, pParams) dispcmnCtrlCmdDpSetLaneData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetTestpattern(pDispCommon, pParams) dispcmnCtrlCmdDpSetTestpattern_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpMainLinkCtrl(pDispCommon, pParams) dispcmnCtrlCmdDpMainLinkCtrl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetAudioMuteStream(pDispCommon, pParams) dispcmnCtrlCmdDpSetAudioMuteStream_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetLinkConfig(pDispCommon, pParams) dispcmnCtrlCmdDpGetLinkConfig_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetEDPData(pDispCommon, pParams) dispcmnCtrlCmdDpGetEDPData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpTopologyAllocateDisplayId(pDispCommon, pParams) dispcmnCtrlCmdDpTopologyAllocateDisplayId_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpTopologyFreeDisplayId(pDispCommon, pParams) dispcmnCtrlCmdDpTopologyFreeDisplayId_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigStream(pDispCommon, pParams) dispcmnCtrlCmdDpConfigStream_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigSingleHeadMultiStream(pDispCommon, pParams) dispcmnCtrlCmdDpConfigSingleHeadMultiStream_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetRateGov(pDispCommon, pParams) dispcmnCtrlCmdDpSetRateGov_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSendACT(pDispCommon, pParams) dispcmnCtrlCmdDpSendACT_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetManualDisplayPort(pDispCommon, pParams) dispcmnCtrlCmdDpSetManualDisplayPort_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetCaps(pDispCommon, pParams) dispcmnCtrlCmdDpGetCaps_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetMSAProperties(pDispCommon, pParams) dispcmnCtrlCmdDpSetMSAProperties_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetStereoMSAProperties(pDispCommon, pParams) dispcmnCtrlCmdDpSetStereoMSAProperties_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGenerateFakeInterrupt(pDispCommon, pParams) dispcmnCtrlCmdDpGenerateFakeInterrupt_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigRadScratchReg(pDispCommon, pParams) dispcmnCtrlCmdDpConfigRadScratchReg_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetTriggerSelect(pDispCommon, pTriggerSelectParams) dispcmnCtrlCmdDpSetTriggerSelect_DISPATCH(pDispCommon, pTriggerSelectParams) +#define dispcmnCtrlCmdDpSetTriggerAll(pDispCommon, pTriggerAllParams) dispcmnCtrlCmdDpSetTriggerAll_DISPATCH(pDispCommon, pTriggerAllParams) +#define dispcmnCtrlCmdDpGetAuxLogData(pDispCommon, pDpAuxBufferWrapper) dispcmnCtrlCmdDpGetAuxLogData_DISPATCH(pDispCommon, pDpAuxBufferWrapper) +#define dispcmnCtrlCmdDpConfigIndexedLinkRates(pDispCommon, pParams) dispcmnCtrlCmdDpConfigIndexedLinkRates_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigureFec(pDispCommon, pParams) dispcmnCtrlCmdDpConfigureFec_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetGenericInfoframe(pDispCommon, pParams) dispcmnCtrlCmdDpGetGenericInfoframe_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetMsaAttributes(pDispCommon, pParams) dispcmnCtrlCmdDpGetMsaAttributes_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigMacroPad(pDispCommon, pParams) dispcmnCtrlCmdDpConfigMacroPad_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data(pDispCommon, pParams) dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data(pDispCommon, pParams) dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetRegionalCrcs(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetRegionalCrcs_DISPATCH(pDispCommon, pParams) +#define dispcmnShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) dispcmnShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispcmnControl(pDisplayApi, pCallContext, pParams) dispcmnControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define dispcmnAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispcmnAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispcmnGetMemInterMapParams(pRmResource, pParams) dispcmnGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispcmnGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispcmnGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispcmnCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispcmnCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispcmnSetNotificationShare(pNotifier, pNotifShare) dispcmnSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispcmnControlFilter(pResource, pCallContext, pParams) dispcmnControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispcmnGetRefCount(pResource) dispcmnGetRefCount_DISPATCH(pResource) +#define dispcmnUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispcmnUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispcmnUnmap(pResource, pCallContext, pCpuMapping) dispcmnUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define dispcmnControl_Prologue(pDisplayApi, pCallContext, pRsParams) dispcmnControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispcmnCanCopy(pResource) dispcmnCanCopy_DISPATCH(pResource) +#define dispcmnMapTo(pResource, pParams) dispcmnMapTo_DISPATCH(pResource, pParams) +#define dispcmnAddAdditionalDependants(pClient, pResource, pReference) dispcmnAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispcmnPreDestruct(pResource) dispcmnPreDestruct_DISPATCH(pResource) +#define dispcmnUnmapFrom(pResource, pParams) dispcmnUnmapFrom_DISPATCH(pResource, pParams) +#define dispcmnGetNotificationListPtr(pNotifier) dispcmnGetNotificationListPtr_DISPATCH(pNotifier) +#define dispcmnControl_Epilogue(pDisplayApi, pCallContext, pRsParams) dispcmnControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispcmnGetNotificationShare(pNotifier) dispcmnGetNotificationShare_DISPATCH(pNotifier) +#define dispcmnControlLookup(pResource, pParams, ppEntry) dispcmnControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispcmnMap(pResource, pCallContext, pParams, pCpuMapping) dispcmnMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define dispcmnGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispcmnGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS dispcmnCtrlCmdSystemGetCapsV2_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *pCapsParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetCapsV2_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *pCapsParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetCapsV2__(pDispCommon, pCapsParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetNumHeads_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *pNumHeadsParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetNumHeads_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *pNumHeadsParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetNumHeads__(pDispCommon, pNumHeadsParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetScanline_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS *pScanlineParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetScanline_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS *pScanlineParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetScanline__(pDispCommon, pScanlineParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetSuppported_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *pSupportedParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetSuppported_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *pSupportedParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetSuppported__(pDispCommon, pSupportedParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetConnectState_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *pConnectParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetConnectState_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *pConnectParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetConnectState__(pDispCommon, pConnectParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetHotplugUnplugState_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetHotplugUnplugState_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetHotplugUnplugState__(pDispCommon, pHotplugParams); +} + +NV_STATUS dispcmnCtrlCmdInternalGetHotplugUnplugState_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams); + +static inline NV_STATUS dispcmnCtrlCmdInternalGetHotplugUnplugState_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams) { + return pDispCommon->__dispcmnCtrlCmdInternalGetHotplugUnplugState__(pDispCommon, pHotplugParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetHeadRoutingMap_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS *pMapParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetHeadRoutingMap_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS *pMapParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetHeadRoutingMap__(pDispCommon, pMapParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetActive_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *pActiveParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetActive_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *pActiveParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetActive__(pDispCommon, pActiveParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetBootDisplays_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetBootDisplays_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetBootDisplays__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemQueryDisplayIdsWithMux__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemCheckSidebandI2cSupport_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemCheckSidebandI2cSupport_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemCheckSidebandI2cSupport__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemAllocateDisplayBandwidth_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemAllocateDisplayBandwidth_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemAllocateDisplayBandwidth__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetHotplugConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS *pHotplugParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetHotplugConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS *pHotplugParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetHotplugConfig__(pDispCommon, pHotplugParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetHotplugEventConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetHotplugEventConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetHotplugEventConfig__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemSetHotplugEventConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemSetHotplugEventConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemSetHotplugEventConfig__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemArmLightweightSupervisor_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemArmLightweightSupervisor_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemArmLightweightSupervisor__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemConfigVrrPstateSwitch_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemConfigVrrPstateSwitch_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemConfigVrrPstateSwitch__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetType_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS *pDisplayTypeParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetType_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS *pDisplayTypeParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetType__(pDispCommon, pDisplayTypeParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetEdidV2_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *pEdidParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetEdidV2_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *pEdidParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetEdidV2__(pDispCommon, pEdidParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetEdidV2_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *pEdidParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetEdidV2_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *pEdidParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetEdidV2__(pDispCommon, pEdidParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificFakeDevice_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS *pTestParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificFakeDevice_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS *pTestParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificFakeDevice__(pDispCommon, pTestParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetConnectorData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *pConnectorParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetConnectorData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *pConnectorParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetConnectorData__(pDispCommon, pConnectorParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetHdmiEnable_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetHdmiEnable_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiEnable__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificCtrlHdmi_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificCtrlHdmi_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificCtrlHdmi__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetAllHeadMask_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *pAllHeadMaskParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetAllHeadMask_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *pAllHeadMaskParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetAllHeadMask__(pDispCommon, pAllHeadMaskParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetOdPacket_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetOdPacket_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetOdPacket__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificAcquireSharedGenericPacket__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetSharedGenericPacket_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetSharedGenericPacket_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetSharedGenericPacket__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificReleaseSharedGenericPacket__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetOdPacketCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetOdPacketCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetOdPacketCtrl__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificOrGetInfo_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificOrGetInfo_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificOrGetInfo__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetPclkLimit_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetPclkLimit_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetPclkLimit__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetHdmiSinkCaps_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetHdmiSinkCaps_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiSinkCaps__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetMonitorPower_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS *setMonitorPowerParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetMonitorPower_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS *setMonitorPowerParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetMonitorPower__(pDispCommon, setMonitorPowerParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificApplyEdidOverrideV2_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *pEdidOverrideParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificApplyEdidOverrideV2_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *pEdidOverrideParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificApplyEdidOverrideV2__(pDispCommon, pEdidOverrideParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetI2cPortid_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetI2cPortid_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetI2cPortid__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetHdmiGpuCaps_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetHdmiGpuCaps_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetHdmiGpuCaps__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetHdmiScdcData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetHdmiScdcData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetHdmiScdcData__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificIsDirectmodeDisplay_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificIsDirectmodeDisplay_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificIsDirectmodeDisplay__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificDispI2cReadWrite_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificDispI2cReadWrite_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_DISP_I2C_READ_WRITE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificDispI2cReadWrite__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpGetInfo_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_INFO_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpGetInfo_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_INFO_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetInfo__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpGetDisplayportDongleInfo_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpGetDisplayportDongleInfo_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetDisplayportDongleInfo__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpSetEldAudioCaps_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *pEldAudioCapsParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpSetEldAudioCaps_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *pEldAudioCapsParams) { + return pDispCommon->__dispcmnCtrlCmdDfpSetEldAudioCaps__(pDispCommon, pEldAudioCapsParams); +} + +NV_STATUS dispcmnCtrlCmdDfpSetAudioEnable_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpSetAudioEnable_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpSetAudioEnable__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpUpdateDynamicDfpCache_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpUpdateDynamicDfpCache_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpUpdateDynamicDfpCache__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpAssignSor_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpAssignSor_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpAssignSor__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpDscCrcControl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpDscCrcControl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpDscCrcControl__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpInitMuxData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpInitMuxData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpInitMuxData__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpGetDsiModeTiming_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpGetDsiModeTiming_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetDsiModeTiming__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpConfigTwoHeadOneOr_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpConfigTwoHeadOneOr_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpConfigTwoHeadOneOr__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpGetPadlinkMask_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpGetPadlinkMask_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetPadlinkMask__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpGetFixedModeTiming_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpGetFixedModeTiming_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_FIXED_MODE_TIMING_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetFixedModeTiming__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpAuxchCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *pAuxchCtrlParams); + +static inline NV_STATUS dispcmnCtrlCmdDpAuxchCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *pAuxchCtrlParams) { + return pDispCommon->__dispcmnCtrlCmdDpAuxchCtrl__(pDispCommon, pAuxchCtrlParams); +} + +NV_STATUS dispcmnCtrlCmdDpAuxchSetSema_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS *pSemaParams); + +static inline NV_STATUS dispcmnCtrlCmdDpAuxchSetSema_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS *pSemaParams) { + return pDispCommon->__dispcmnCtrlCmdDpAuxchSetSema__(pDispCommon, pSemaParams); +} + +NV_STATUS dispcmnCtrlCmdDpCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_CTRL_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_CTRL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpCtrl__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetLaneData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_LANE_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetLaneData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_LANE_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetLaneData__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetLaneData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_LANE_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetLaneData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_LANE_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetLaneData__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetTestpattern_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetTestpattern_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetTestpattern__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpMainLinkCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpMainLinkCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpMainLinkCtrl__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetAudioMuteStream_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetAudioMuteStream_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetAudioMuteStream__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetLinkConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetLinkConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetLinkConfig__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetEDPData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_EDP_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetEDPData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_EDP_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetEDPData__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpTopologyAllocateDisplayId_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpTopologyAllocateDisplayId_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpTopologyAllocateDisplayId__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpTopologyFreeDisplayId_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpTopologyFreeDisplayId_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpTopologyFreeDisplayId__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpConfigStream_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpConfigStream_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigStream__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpConfigSingleHeadMultiStream_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpConfigSingleHeadMultiStream_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigSingleHeadMultiStream__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetRateGov_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetRateGov_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetRateGov__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSendACT_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSendACT_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSendACT__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetManualDisplayPort_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetManualDisplayPort_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetManualDisplayPort__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetCaps_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetCaps_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetCaps__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetMSAProperties_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetMSAProperties_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetMSAProperties__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetStereoMSAProperties_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetStereoMSAProperties_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetStereoMSAProperties__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGenerateFakeInterrupt_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGenerateFakeInterrupt_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGenerateFakeInterrupt__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpConfigRadScratchReg_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpConfigRadScratchReg_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigRadScratchReg__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetTriggerSelect_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS *pTriggerSelectParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetTriggerSelect_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS *pTriggerSelectParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetTriggerSelect__(pDispCommon, pTriggerSelectParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetTriggerAll_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS *pTriggerAllParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetTriggerAll_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS *pTriggerAllParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetTriggerAll__(pDispCommon, pTriggerAllParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetAuxLogData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS *pDpAuxBufferWrapper); + +static inline NV_STATUS dispcmnCtrlCmdDpGetAuxLogData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS *pDpAuxBufferWrapper) { + return pDispCommon->__dispcmnCtrlCmdDpGetAuxLogData__(pDispCommon, pDpAuxBufferWrapper); +} + +NV_STATUS dispcmnCtrlCmdDpConfigIndexedLinkRates_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpConfigIndexedLinkRates_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigIndexedLinkRates__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpConfigureFec_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpConfigureFec_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigureFec__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetGenericInfoframe_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetGenericInfoframe_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetGenericInfoframe__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetMsaAttributes_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetMsaAttributes_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetMsaAttributes__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpConfigMacroPad_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpConfigMacroPad_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigMacroPad__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetRegionalCrcs_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetRegionalCrcs_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetRegionalCrcs__(pDispCommon, pParams); +} + +static inline NvBool dispcmnShareCallback_DISPATCH(struct DispCommon *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__dispcmnShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispcmnControl_DISPATCH(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__dispcmnControl__(pDisplayApi, pCallContext, pParams); +} + +static inline NvBool dispcmnAccessCallback_DISPATCH(struct DispCommon *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispcmnAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispcmnGetMemInterMapParams_DISPATCH(struct DispCommon *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispcmnGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispcmnGetMemoryMappingDescriptor_DISPATCH(struct DispCommon *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispcmnGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispcmnCheckMemInterUnmap_DISPATCH(struct DispCommon *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispcmnCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline void dispcmnSetNotificationShare_DISPATCH(struct DispCommon *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispcmnSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispcmnControlFilter_DISPATCH(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispcmnControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 dispcmnGetRefCount_DISPATCH(struct DispCommon *pResource) { + return pResource->__dispcmnGetRefCount__(pResource); +} + +static inline NV_STATUS dispcmnUnregisterEvent_DISPATCH(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispcmnUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispcmnUnmap_DISPATCH(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__dispcmnUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispcmnControl_Prologue_DISPATCH(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__dispcmnControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool dispcmnCanCopy_DISPATCH(struct DispCommon *pResource) { + return pResource->__dispcmnCanCopy__(pResource); +} + +static inline NV_STATUS dispcmnMapTo_DISPATCH(struct DispCommon *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispcmnMapTo__(pResource, pParams); +} + +static inline void dispcmnAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispCommon *pResource, RsResourceRef *pReference) { + pResource->__dispcmnAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline void dispcmnPreDestruct_DISPATCH(struct DispCommon *pResource) { + pResource->__dispcmnPreDestruct__(pResource); +} + +static inline NV_STATUS dispcmnUnmapFrom_DISPATCH(struct DispCommon *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispcmnUnmapFrom__(pResource, pParams); +} + +static inline PEVENTNOTIFICATION *dispcmnGetNotificationListPtr_DISPATCH(struct DispCommon *pNotifier) { + return pNotifier->__dispcmnGetNotificationListPtr__(pNotifier); +} + +static inline void dispcmnControl_Epilogue_DISPATCH(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__dispcmnControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline struct NotifShare *dispcmnGetNotificationShare_DISPATCH(struct DispCommon *pNotifier) { + return pNotifier->__dispcmnGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispcmnControlLookup_DISPATCH(struct DispCommon *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispcmnControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS dispcmnMap_DISPATCH(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__dispcmnMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispcmnGetOrAllocNotifShare_DISPATCH(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispcmnGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispcmnConstruct_IMPL(struct DispCommon *arg_pDispCommon, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispcmnConstruct(arg_pDispCommon, arg_pCallContext, arg_pParams) dispcmnConstruct_IMPL(arg_pDispCommon, arg_pCallContext, arg_pParams) +NV_STATUS dispcmnGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDispCommon, struct DispCommon **ppDispCommon); +#define dispcmnGetByHandle(pClient, hDispCommon, ppDispCommon) dispcmnGetByHandle_IMPL(pClient, hDispCommon, ppDispCommon) +void dispcmnGetByDevice_IMPL(struct RsClient *pClient, NvHandle hDevice, struct DispCommon **ppDispCommon); +#define dispcmnGetByDevice(pClient, hDevice, ppDispCommon) dispcmnGetByDevice_IMPL(pClient, hDevice, ppDispCommon) +#undef PRIVATE_FIELD + + +// **************************************************************************** +// Deprecated Definitions +// **************************************************************************** + +/** + * @warning This function is deprecated! Please use dispchnGetByHandle. + */ +NV_STATUS CliFindDispChannelInfo(NvHandle, NvHandle, struct DispChannel **ppDispChannel, NvHandle*); + +/** + * @warning This function is deprecated! Please use dispcmnGetByHandle. + */ +NvBool CliGetDispCommonInfo(NvHandle, NvHandle, struct DisplayApi **); + +/** + * @warning This function is deprecated! Please use dispobjGetByHandle. + */ +NvBool CliGetDispInfo(NvHandle, NvHandle, struct DisplayApi **); + +/** + * @warning This function is deprecated! Please use dispobjGetByHandle. + */ +struct DisplayApi *CliGetDispFromDispHandle(NvHandle hClient, NvHandle hDisp); + +#endif // DISP_OBJS_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DISP_OBJS_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.c new file mode 100644 index 0000000..c7c3a33 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.c @@ -0,0 +1,329 @@ +#define NVOC_DISP_SF_USER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_sf_user_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xba7439 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_DispSfUser(DispSfUser*); +void __nvoc_init_funcTable_DispSfUser(DispSfUser*); +NV_STATUS __nvoc_ctor_DispSfUser(DispSfUser*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispSfUser(DispSfUser*); +void __nvoc_dtor_DispSfUser(DispSfUser*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSfUser; + +static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_DispSfUser = { + /*pClassDef=*/ &__nvoc_class_def_DispSfUser, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispSfUser, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispSfUser = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_DispSfUser_DispSfUser, + &__nvoc_rtti_DispSfUser_GpuResource, + &__nvoc_rtti_DispSfUser_RmResource, + &__nvoc_rtti_DispSfUser_RmResourceCommon, + &__nvoc_rtti_DispSfUser_RsResource, + &__nvoc_rtti_DispSfUser_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispSfUser), + /*classId=*/ classId(DispSfUser), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispSfUser", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispSfUser, + /*pCastInfo=*/ &__nvoc_castinfo_DispSfUser, + /*pExportInfo=*/ &__nvoc_export_info_DispSfUser +}; + +static NV_STATUS __nvoc_thunk_DispSfUser_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispsfGetRegBaseOffsetAndSize((struct DispSfUser *)(((unsigned char *)pDispSfUser) - __nvoc_rtti_DispSfUser_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_GpuResource_dispsfShareCallback(struct DispSfUser *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispsfControl(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispsfUnmap(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispsfGetMemInterMapParams(struct DispSfUser *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispsfGetMemoryMappingDescriptor(struct DispSfUser *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSfUser_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispsfGetMapAddrSpace(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_dispsfGetInternalObjectHandle(struct DispSfUser *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispsfControlFilter(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_dispsfAddAdditionalDependants(struct RsClient *pClient, struct DispSfUser *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_dispsfGetRefCount(struct DispSfUser *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispsfCheckMemInterUnmap(struct DispSfUser *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSfUser_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispsfMapTo(struct DispSfUser *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispsfControl_Prologue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_dispsfCanCopy(struct DispSfUser *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispsfInternalControlForward(struct DispSfUser *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_dispsfPreDestruct(struct DispSfUser *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispsfUnmapFrom(struct DispSfUser *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_dispsfControl_Epilogue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispsfControlLookup(struct DispSfUser *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispsfMap(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_dispsfAccessCallback(struct DispSfUser *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSfUser = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_DispSfUser(DispSfUser *pThis) { + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispSfUser(DispSfUser *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispSfUser(DispSfUser *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispSfUser_fail_GpuResource; + __nvoc_init_dataField_DispSfUser(pThis); + + status = __nvoc_dispsfConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispSfUser_fail__init; + goto __nvoc_ctor_DispSfUser_exit; // Success + +__nvoc_ctor_DispSfUser_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_DispSfUser_fail_GpuResource: +__nvoc_ctor_DispSfUser_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispSfUser_1(DispSfUser *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__dispsfGetRegBaseOffsetAndSize__ = &dispsfGetRegBaseOffsetAndSize_IMPL; + + pThis->__nvoc_base_GpuResource.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispSfUser_gpuresGetRegBaseOffsetAndSize; + + pThis->__dispsfShareCallback__ = &__nvoc_thunk_GpuResource_dispsfShareCallback; + + pThis->__dispsfControl__ = &__nvoc_thunk_GpuResource_dispsfControl; + + pThis->__dispsfUnmap__ = &__nvoc_thunk_GpuResource_dispsfUnmap; + + pThis->__dispsfGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispsfGetMemInterMapParams; + + pThis->__dispsfGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispsfGetMemoryMappingDescriptor; + + pThis->__dispsfGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispsfGetMapAddrSpace; + + pThis->__dispsfGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispsfGetInternalObjectHandle; + + pThis->__dispsfControlFilter__ = &__nvoc_thunk_RsResource_dispsfControlFilter; + + pThis->__dispsfAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispsfAddAdditionalDependants; + + pThis->__dispsfGetRefCount__ = &__nvoc_thunk_RsResource_dispsfGetRefCount; + + pThis->__dispsfCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispsfCheckMemInterUnmap; + + pThis->__dispsfMapTo__ = &__nvoc_thunk_RsResource_dispsfMapTo; + + pThis->__dispsfControl_Prologue__ = &__nvoc_thunk_RmResource_dispsfControl_Prologue; + + pThis->__dispsfCanCopy__ = &__nvoc_thunk_RsResource_dispsfCanCopy; + + pThis->__dispsfInternalControlForward__ = &__nvoc_thunk_GpuResource_dispsfInternalControlForward; + + pThis->__dispsfPreDestruct__ = &__nvoc_thunk_RsResource_dispsfPreDestruct; + + pThis->__dispsfUnmapFrom__ = &__nvoc_thunk_RsResource_dispsfUnmapFrom; + + pThis->__dispsfControl_Epilogue__ = &__nvoc_thunk_RmResource_dispsfControl_Epilogue; + + pThis->__dispsfControlLookup__ = &__nvoc_thunk_RsResource_dispsfControlLookup; + + pThis->__dispsfMap__ = &__nvoc_thunk_GpuResource_dispsfMap; + + pThis->__dispsfAccessCallback__ = &__nvoc_thunk_RmResource_dispsfAccessCallback; +} + +void __nvoc_init_funcTable_DispSfUser(DispSfUser *pThis) { + __nvoc_init_funcTable_DispSfUser_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_DispSfUser(DispSfUser *pThis) { + pThis->__nvoc_pbase_DispSfUser = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_DispSfUser(pThis); +} + +NV_STATUS __nvoc_objCreate_DispSfUser(DispSfUser **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispSfUser *pThis; + + pThis = portMemAllocNonPaged(sizeof(DispSfUser)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispSfUser)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispSfUser); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_DispSfUser(pThis); + status = __nvoc_ctor_DispSfUser(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispSfUser_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispSfUser_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispSfUser(DispSfUser **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispSfUser(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.h new file mode 100644 index 0000000..0baf74e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.h @@ -0,0 +1,239 @@ +#ifndef _G_DISP_SF_USER_NVOC_H_ +#define _G_DISP_SF_USER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispSfUser class. +* +******************************************************************************/ + +#include "g_disp_sf_user_nvoc.h" + +#ifndef DISP_SF_USER_H +#define DISP_SF_USER_H + +#include "gpu/gpu_resource.h" + +/*! + * RM internal class representing NVXXXX_DISP_SF_USER + */ +#ifdef NVOC_DISP_SF_USER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispSfUser { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct DispSfUser *__nvoc_pbase_DispSfUser; + NV_STATUS (*__dispsfGetRegBaseOffsetAndSize__)(struct DispSfUser *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__dispsfShareCallback__)(struct DispSfUser *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispsfControl__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispsfUnmap__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__dispsfGetMemInterMapParams__)(struct DispSfUser *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispsfGetMemoryMappingDescriptor__)(struct DispSfUser *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispsfGetMapAddrSpace__)(struct DispSfUser *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__dispsfGetInternalObjectHandle__)(struct DispSfUser *); + NV_STATUS (*__dispsfControlFilter__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__dispsfAddAdditionalDependants__)(struct RsClient *, struct DispSfUser *, RsResourceRef *); + NvU32 (*__dispsfGetRefCount__)(struct DispSfUser *); + NV_STATUS (*__dispsfCheckMemInterUnmap__)(struct DispSfUser *, NvBool); + NV_STATUS (*__dispsfMapTo__)(struct DispSfUser *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__dispsfControl_Prologue__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispsfCanCopy__)(struct DispSfUser *); + NV_STATUS (*__dispsfInternalControlForward__)(struct DispSfUser *, NvU32, void *, NvU32); + void (*__dispsfPreDestruct__)(struct DispSfUser *); + NV_STATUS (*__dispsfUnmapFrom__)(struct DispSfUser *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__dispsfControl_Epilogue__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispsfControlLookup__)(struct DispSfUser *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__dispsfMap__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__dispsfAccessCallback__)(struct DispSfUser *, struct RsClient *, void *, RsAccessRight); + NvU32 ControlOffset; + NvU32 ControlLength; +}; + +#ifndef __NVOC_CLASS_DispSfUser_TYPEDEF__ +#define __NVOC_CLASS_DispSfUser_TYPEDEF__ +typedef struct DispSfUser DispSfUser; +#endif /* __NVOC_CLASS_DispSfUser_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSfUser +#define __nvoc_class_id_DispSfUser 0xba7439 +#endif /* __nvoc_class_id_DispSfUser */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser; + +#define __staticCast_DispSfUser(pThis) \ + ((pThis)->__nvoc_pbase_DispSfUser) + +#ifdef __nvoc_disp_sf_user_h_disabled +#define __dynamicCast_DispSfUser(pThis) ((DispSfUser*)NULL) +#else //__nvoc_disp_sf_user_h_disabled +#define __dynamicCast_DispSfUser(pThis) \ + ((DispSfUser*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispSfUser))) +#endif //__nvoc_disp_sf_user_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispSfUser(DispSfUser**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispSfUser(DispSfUser**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispSfUser(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispSfUser((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispsfGetRegBaseOffsetAndSize(pDispSfUser, pGpu, pOffset, pSize) dispsfGetRegBaseOffsetAndSize_DISPATCH(pDispSfUser, pGpu, pOffset, pSize) +#define dispsfShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispsfShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispsfControl(pGpuResource, pCallContext, pParams) dispsfControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispsfUnmap(pGpuResource, pCallContext, pCpuMapping) dispsfUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispsfGetMemInterMapParams(pRmResource, pParams) dispsfGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispsfGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispsfGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispsfGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispsfGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispsfGetInternalObjectHandle(pGpuResource) dispsfGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispsfControlFilter(pResource, pCallContext, pParams) dispsfControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispsfAddAdditionalDependants(pClient, pResource, pReference) dispsfAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispsfGetRefCount(pResource) dispsfGetRefCount_DISPATCH(pResource) +#define dispsfCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispsfCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispsfMapTo(pResource, pParams) dispsfMapTo_DISPATCH(pResource, pParams) +#define dispsfControl_Prologue(pResource, pCallContext, pParams) dispsfControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispsfCanCopy(pResource) dispsfCanCopy_DISPATCH(pResource) +#define dispsfInternalControlForward(pGpuResource, command, pParams, size) dispsfInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispsfPreDestruct(pResource) dispsfPreDestruct_DISPATCH(pResource) +#define dispsfUnmapFrom(pResource, pParams) dispsfUnmapFrom_DISPATCH(pResource, pParams) +#define dispsfControl_Epilogue(pResource, pCallContext, pParams) dispsfControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispsfControlLookup(pResource, pParams, ppEntry) dispsfControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispsfMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispsfMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispsfAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispsfAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS dispsfGetRegBaseOffsetAndSize_IMPL(struct DispSfUser *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +static inline NV_STATUS dispsfGetRegBaseOffsetAndSize_DISPATCH(struct DispSfUser *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispSfUser->__dispsfGetRegBaseOffsetAndSize__(pDispSfUser, pGpu, pOffset, pSize); +} + +static inline NvBool dispsfShareCallback_DISPATCH(struct DispSfUser *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__dispsfShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispsfControl_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__dispsfControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispsfUnmap_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispsfUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispsfGetMemInterMapParams_DISPATCH(struct DispSfUser *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispsfGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispsfGetMemoryMappingDescriptor_DISPATCH(struct DispSfUser *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispsfGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispsfGetMapAddrSpace_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__dispsfGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle dispsfGetInternalObjectHandle_DISPATCH(struct DispSfUser *pGpuResource) { + return pGpuResource->__dispsfGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS dispsfControlFilter_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispsfControlFilter__(pResource, pCallContext, pParams); +} + +static inline void dispsfAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispSfUser *pResource, RsResourceRef *pReference) { + pResource->__dispsfAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 dispsfGetRefCount_DISPATCH(struct DispSfUser *pResource) { + return pResource->__dispsfGetRefCount__(pResource); +} + +static inline NV_STATUS dispsfCheckMemInterUnmap_DISPATCH(struct DispSfUser *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispsfCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispsfMapTo_DISPATCH(struct DispSfUser *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispsfMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispsfControl_Prologue_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispsfControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool dispsfCanCopy_DISPATCH(struct DispSfUser *pResource) { + return pResource->__dispsfCanCopy__(pResource); +} + +static inline NV_STATUS dispsfInternalControlForward_DISPATCH(struct DispSfUser *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__dispsfInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void dispsfPreDestruct_DISPATCH(struct DispSfUser *pResource) { + pResource->__dispsfPreDestruct__(pResource); +} + +static inline NV_STATUS dispsfUnmapFrom_DISPATCH(struct DispSfUser *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispsfUnmapFrom__(pResource, pParams); +} + +static inline void dispsfControl_Epilogue_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__dispsfControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispsfControlLookup_DISPATCH(struct DispSfUser *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispsfControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS dispsfMap_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispsfMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool dispsfAccessCallback_DISPATCH(struct DispSfUser *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispsfAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS dispsfConstruct_IMPL(struct DispSfUser *arg_pDispSfUser, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispsfConstruct(arg_pDispSfUser, arg_pCallContext, arg_pParams) dispsfConstruct_IMPL(arg_pDispSfUser, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // DISP_SF_USER_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DISP_SF_USER_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_desc_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_desc_nvoc.h new file mode 100644 index 0000000..df1e83a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_desc_nvoc.h @@ -0,0 +1,1518 @@ +#ifndef _G_ENG_DESC_NVOC_H_ +#define _G_ENG_DESC_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_eng_desc_nvoc.h" + +#ifndef _ENG_DESC_H_ +#define _ENG_DESC_H_ + +#include "core/core.h" + +// +// Engine descriptors +// +// An ENGDESCRIPTOR carries both an NVOC_CLASS_ID and an instance ID. For example, +// to specify the engine CE1, use MKENGDESC(classId(OBJCE), 1). +// +#define ENGDESC_CLASS 31:8 +#define ENGDESC_INST 7:0 + +#define MKENGDESC(class, inst) ((((NvU32)(class)) << SF_SHIFT(ENGDESC_CLASS)) | \ + ((inst ) << SF_SHIFT(ENGDESC_INST ))) + +#define ENGDESC_FIELD(desc, field) (((desc) >> SF_SHIFT(ENGDESC ## field)) & \ + SF_MASK(ENGDESC ## field)) + +typedef NvU32 ENGDESCRIPTOR, *PENGDESCRIPTOR; + + +// +// Class declarations to get classIds for use with ENGDESCRIPTOR +// +struct OBJINVALID; + +#ifndef __NVOC_CLASS_OBJINVALID_TYPEDEF__ +#define __NVOC_CLASS_OBJINVALID_TYPEDEF__ +typedef struct OBJINVALID OBJINVALID; +#endif /* __NVOC_CLASS_OBJINVALID_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJINVALID +#define __nvoc_class_id_OBJINVALID 0xb33b15 +#endif /* __nvoc_class_id_OBJINVALID */ + + // classId only. Not a real class +struct OBJSWENG; + +#ifndef __NVOC_CLASS_OBJSWENG_TYPEDEF__ +#define __NVOC_CLASS_OBJSWENG_TYPEDEF__ +typedef struct OBJSWENG OBJSWENG; +#endif /* __NVOC_CLASS_OBJSWENG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSWENG +#define __nvoc_class_id_OBJSWENG 0x95a6f5 +#endif /* __nvoc_class_id_OBJSWENG */ + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +struct Falcon; + +#ifndef __NVOC_CLASS_Falcon_TYPEDEF__ +#define __NVOC_CLASS_Falcon_TYPEDEF__ +typedef struct Falcon Falcon; +#endif /* __NVOC_CLASS_Falcon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Falcon +#define __nvoc_class_id_Falcon 0xdc5264 +#endif /* __nvoc_class_id_Falcon */ + + +struct OBJMC; + +#ifndef __NVOC_CLASS_OBJMC_TYPEDEF__ +#define __NVOC_CLASS_OBJMC_TYPEDEF__ +typedef struct OBJMC OBJMC; +#endif /* __NVOC_CLASS_OBJMC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJMC +#define __nvoc_class_id_OBJMC 0x9aad0e +#endif /* __nvoc_class_id_OBJMC */ + + +struct KernelMc; + +#ifndef __NVOC_CLASS_KernelMc_TYPEDEF__ +#define __NVOC_CLASS_KernelMc_TYPEDEF__ +typedef struct KernelMc KernelMc; +#endif /* __NVOC_CLASS_KernelMc_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelMc +#define __nvoc_class_id_KernelMc 0x3827ff +#endif /* __nvoc_class_id_KernelMc */ + + +struct PrivRing; + +#ifndef __NVOC_CLASS_PrivRing_TYPEDEF__ +#define __NVOC_CLASS_PrivRing_TYPEDEF__ +typedef struct PrivRing PrivRing; +#endif /* __NVOC_CLASS_PrivRing_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PrivRing +#define __nvoc_class_id_PrivRing 0x4c57c4 +#endif /* __nvoc_class_id_PrivRing */ + + +struct SwIntr; + +#ifndef __NVOC_CLASS_SwIntr_TYPEDEF__ +#define __NVOC_CLASS_SwIntr_TYPEDEF__ +typedef struct SwIntr SwIntr; +#endif /* __NVOC_CLASS_SwIntr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SwIntr +#define __nvoc_class_id_SwIntr 0x5ca633 +#endif /* __nvoc_class_id_SwIntr */ + + +struct MemorySystem; + +#ifndef __NVOC_CLASS_MemorySystem_TYPEDEF__ +#define __NVOC_CLASS_MemorySystem_TYPEDEF__ +typedef struct MemorySystem MemorySystem; +#endif /* __NVOC_CLASS_MemorySystem_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemorySystem +#define __nvoc_class_id_MemorySystem 0x174e21 +#endif /* __nvoc_class_id_MemorySystem */ + + +struct KernelMemorySystem; + +#ifndef __NVOC_CLASS_KernelMemorySystem_TYPEDEF__ +#define __NVOC_CLASS_KernelMemorySystem_TYPEDEF__ +typedef struct KernelMemorySystem KernelMemorySystem; +#endif /* __NVOC_CLASS_KernelMemorySystem_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelMemorySystem +#define __nvoc_class_id_KernelMemorySystem 0x7faff1 +#endif /* __nvoc_class_id_KernelMemorySystem */ + + +struct MemoryManager; + +#ifndef __NVOC_CLASS_MemoryManager_TYPEDEF__ +#define __NVOC_CLASS_MemoryManager_TYPEDEF__ +typedef struct MemoryManager MemoryManager; +#endif /* __NVOC_CLASS_MemoryManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryManager +#define __nvoc_class_id_MemoryManager 0x22ad47 +#endif /* __nvoc_class_id_MemoryManager */ + + +struct OBJFBFLCN; + +#ifndef __NVOC_CLASS_OBJFBFLCN_TYPEDEF__ +#define __NVOC_CLASS_OBJFBFLCN_TYPEDEF__ +typedef struct OBJFBFLCN OBJFBFLCN; +#endif /* __NVOC_CLASS_OBJFBFLCN_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFBFLCN +#define __nvoc_class_id_OBJFBFLCN 0x8a20bf +#endif /* __nvoc_class_id_OBJFBFLCN */ + + +struct OBJHSHUBMANAGER; + +#ifndef __NVOC_CLASS_OBJHSHUBMANAGER_TYPEDEF__ +#define __NVOC_CLASS_OBJHSHUBMANAGER_TYPEDEF__ +typedef struct OBJHSHUBMANAGER OBJHSHUBMANAGER; +#endif /* __NVOC_CLASS_OBJHSHUBMANAGER_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHSHUBMANAGER +#define __nvoc_class_id_OBJHSHUBMANAGER 0xbb32b9 +#endif /* __nvoc_class_id_OBJHSHUBMANAGER */ + + +struct Hshub; + +#ifndef __NVOC_CLASS_Hshub_TYPEDEF__ +#define __NVOC_CLASS_Hshub_TYPEDEF__ +typedef struct Hshub Hshub; +#endif /* __NVOC_CLASS_Hshub_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Hshub +#define __nvoc_class_id_Hshub 0x5b3331 +#endif /* __nvoc_class_id_Hshub */ + + +struct OBJTMR; + +#ifndef __NVOC_CLASS_OBJTMR_TYPEDEF__ +#define __NVOC_CLASS_OBJTMR_TYPEDEF__ +typedef struct OBJTMR OBJTMR; +#endif /* __NVOC_CLASS_OBJTMR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJTMR +#define __nvoc_class_id_OBJTMR 0x9ddede +#endif /* __nvoc_class_id_OBJTMR */ + + +struct VirtMemAllocator; + +#ifndef __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +#define __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +typedef struct VirtMemAllocator VirtMemAllocator; +#endif /* __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VirtMemAllocator +#define __nvoc_class_id_VirtMemAllocator 0x899e48 +#endif /* __nvoc_class_id_VirtMemAllocator */ + + +struct Graphics; + +#ifndef __NVOC_CLASS_Graphics_TYPEDEF__ +#define __NVOC_CLASS_Graphics_TYPEDEF__ +typedef struct Graphics Graphics; +#endif /* __NVOC_CLASS_Graphics_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Graphics +#define __nvoc_class_id_Graphics 0xd334df +#endif /* __nvoc_class_id_Graphics */ + + +struct OBJGR; + +#ifndef __NVOC_CLASS_OBJGR_TYPEDEF__ +#define __NVOC_CLASS_OBJGR_TYPEDEF__ +typedef struct OBJGR OBJGR; +#endif /* __NVOC_CLASS_OBJGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGR +#define __nvoc_class_id_OBJGR 0xb0940a +#endif /* __nvoc_class_id_OBJGR */ + + // classId only. Not a real class. Bug 200664045 +struct GraphicsManager; + +#ifndef __NVOC_CLASS_GraphicsManager_TYPEDEF__ +#define __NVOC_CLASS_GraphicsManager_TYPEDEF__ +typedef struct GraphicsManager GraphicsManager; +#endif /* __NVOC_CLASS_GraphicsManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GraphicsManager +#define __nvoc_class_id_GraphicsManager 0x2f465a +#endif /* __nvoc_class_id_GraphicsManager */ + + +struct KernelGraphicsManager; + +#ifndef __NVOC_CLASS_KernelGraphicsManager_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsManager_TYPEDEF__ +typedef struct KernelGraphicsManager KernelGraphicsManager; +#endif /* __NVOC_CLASS_KernelGraphicsManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsManager +#define __nvoc_class_id_KernelGraphicsManager 0xd22179 +#endif /* __nvoc_class_id_KernelGraphicsManager */ + + +struct MIGManager; + +#ifndef __NVOC_CLASS_MIGManager_TYPEDEF__ +#define __NVOC_CLASS_MIGManager_TYPEDEF__ +typedef struct MIGManager MIGManager; +#endif /* __NVOC_CLASS_MIGManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MIGManager +#define __nvoc_class_id_MIGManager 0xfd75d0 +#endif /* __nvoc_class_id_MIGManager */ + + +struct KernelMIGManager; + +#ifndef __NVOC_CLASS_KernelMIGManager_TYPEDEF__ +#define __NVOC_CLASS_KernelMIGManager_TYPEDEF__ +typedef struct KernelMIGManager KernelMIGManager; +#endif /* __NVOC_CLASS_KernelMIGManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelMIGManager +#define __nvoc_class_id_KernelMIGManager 0x01c1bf +#endif /* __nvoc_class_id_KernelMIGManager */ + + +struct SMDebugger; + +#ifndef __NVOC_CLASS_SMDebugger_TYPEDEF__ +#define __NVOC_CLASS_SMDebugger_TYPEDEF__ +typedef struct SMDebugger SMDebugger; +#endif /* __NVOC_CLASS_SMDebugger_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SMDebugger +#define __nvoc_class_id_SMDebugger 0x12018b +#endif /* __nvoc_class_id_SMDebugger */ + + +struct KernelGraphics; + +#ifndef __NVOC_CLASS_KernelGraphics_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphics_TYPEDEF__ +typedef struct KernelGraphics KernelGraphics; +#endif /* __NVOC_CLASS_KernelGraphics_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphics +#define __nvoc_class_id_KernelGraphics 0xea3fa9 +#endif /* __nvoc_class_id_KernelGraphics */ + + +struct KernelFifo; + +#ifndef __NVOC_CLASS_KernelFifo_TYPEDEF__ +#define __NVOC_CLASS_KernelFifo_TYPEDEF__ +typedef struct KernelFifo KernelFifo; +#endif /* __NVOC_CLASS_KernelFifo_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelFifo +#define __nvoc_class_id_KernelFifo 0xf3e155 +#endif /* __nvoc_class_id_KernelFifo */ + + +struct OBJFIFO; + +#ifndef __NVOC_CLASS_OBJFIFO_TYPEDEF__ +#define __NVOC_CLASS_OBJFIFO_TYPEDEF__ +typedef struct OBJFIFO OBJFIFO; +#endif /* __NVOC_CLASS_OBJFIFO_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFIFO +#define __nvoc_class_id_OBJFIFO 0xb02365 +#endif /* __nvoc_class_id_OBJFIFO */ + + +struct OBJOS; + +#ifndef __NVOC_CLASS_OBJOS_TYPEDEF__ +#define __NVOC_CLASS_OBJOS_TYPEDEF__ +typedef struct OBJOS OBJOS; +#endif /* __NVOC_CLASS_OBJOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOS +#define __nvoc_class_id_OBJOS 0xaa1d70 +#endif /* __nvoc_class_id_OBJOS */ + + +struct OBJBUS; + +#ifndef __NVOC_CLASS_OBJBUS_TYPEDEF__ +#define __NVOC_CLASS_OBJBUS_TYPEDEF__ +typedef struct OBJBUS OBJBUS; +#endif /* __NVOC_CLASS_OBJBUS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJBUS +#define __nvoc_class_id_OBJBUS 0xcc4c31 +#endif /* __nvoc_class_id_OBJBUS */ + + +struct KernelBus; + +#ifndef __NVOC_CLASS_KernelBus_TYPEDEF__ +#define __NVOC_CLASS_KernelBus_TYPEDEF__ +typedef struct KernelBus KernelBus; +#endif /* __NVOC_CLASS_KernelBus_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelBus +#define __nvoc_class_id_KernelBus 0xd2ac57 +#endif /* __nvoc_class_id_KernelBus */ + + +struct OBJINFOROM; + +#ifndef __NVOC_CLASS_OBJINFOROM_TYPEDEF__ +#define __NVOC_CLASS_OBJINFOROM_TYPEDEF__ +typedef struct OBJINFOROM OBJINFOROM; +#endif /* __NVOC_CLASS_OBJINFOROM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJINFOROM +#define __nvoc_class_id_OBJINFOROM 0x0e1639 +#endif /* __nvoc_class_id_OBJINFOROM */ + + +struct Perf; + +#ifndef __NVOC_CLASS_Perf_TYPEDEF__ +#define __NVOC_CLASS_Perf_TYPEDEF__ +typedef struct Perf Perf; +#endif /* __NVOC_CLASS_Perf_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Perf +#define __nvoc_class_id_Perf 0xed0b65 +#endif /* __nvoc_class_id_Perf */ + + +struct KernelPerf; + +#ifndef __NVOC_CLASS_KernelPerf_TYPEDEF__ +#define __NVOC_CLASS_KernelPerf_TYPEDEF__ +typedef struct KernelPerf KernelPerf; +#endif /* __NVOC_CLASS_KernelPerf_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelPerf +#define __nvoc_class_id_KernelPerf 0xc53a57 +#endif /* __nvoc_class_id_KernelPerf */ + + +struct OBJBIF; + +#ifndef __NVOC_CLASS_OBJBIF_TYPEDEF__ +#define __NVOC_CLASS_OBJBIF_TYPEDEF__ +typedef struct OBJBIF OBJBIF; +#endif /* __NVOC_CLASS_OBJBIF_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJBIF +#define __nvoc_class_id_OBJBIF 0xd1c956 +#endif /* __nvoc_class_id_OBJBIF */ + + +struct KernelBif; + +#ifndef __NVOC_CLASS_KernelBif_TYPEDEF__ +#define __NVOC_CLASS_KernelBif_TYPEDEF__ +typedef struct KernelBif KernelBif; +#endif /* __NVOC_CLASS_KernelBif_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelBif +#define __nvoc_class_id_KernelBif 0xdbe523 +#endif /* __nvoc_class_id_KernelBif */ + + +struct OBJSF; + +#ifndef __NVOC_CLASS_OBJSF_TYPEDEF__ +#define __NVOC_CLASS_OBJSF_TYPEDEF__ +typedef struct OBJSF OBJSF; +#endif /* __NVOC_CLASS_OBJSF_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSF +#define __nvoc_class_id_OBJSF 0x0bd720 +#endif /* __nvoc_class_id_OBJSF */ + + +struct OBJGPIO; + +#ifndef __NVOC_CLASS_OBJGPIO_TYPEDEF__ +#define __NVOC_CLASS_OBJGPIO_TYPEDEF__ +typedef struct OBJGPIO OBJGPIO; +#endif /* __NVOC_CLASS_OBJGPIO_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPIO +#define __nvoc_class_id_OBJGPIO 0x05c7b5 +#endif /* __nvoc_class_id_OBJGPIO */ + + +struct ClockManager; + +#ifndef __NVOC_CLASS_ClockManager_TYPEDEF__ +#define __NVOC_CLASS_ClockManager_TYPEDEF__ +typedef struct ClockManager ClockManager; +#endif /* __NVOC_CLASS_ClockManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ClockManager +#define __nvoc_class_id_ClockManager 0xbcadd3 +#endif /* __nvoc_class_id_ClockManager */ + + +struct KernelDisplay; + +#ifndef __NVOC_CLASS_KernelDisplay_TYPEDEF__ +#define __NVOC_CLASS_KernelDisplay_TYPEDEF__ +typedef struct KernelDisplay KernelDisplay; +#endif /* __NVOC_CLASS_KernelDisplay_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelDisplay +#define __nvoc_class_id_KernelDisplay 0x55952e +#endif /* __nvoc_class_id_KernelDisplay */ + + +struct OBJDISP; + +#ifndef __NVOC_CLASS_OBJDISP_TYPEDEF__ +#define __NVOC_CLASS_OBJDISP_TYPEDEF__ +typedef struct OBJDISP OBJDISP; +#endif /* __NVOC_CLASS_OBJDISP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDISP +#define __nvoc_class_id_OBJDISP 0xd1755e +#endif /* __nvoc_class_id_OBJDISP */ + + +struct OBJDPU; + +#ifndef __NVOC_CLASS_OBJDPU_TYPEDEF__ +#define __NVOC_CLASS_OBJDPU_TYPEDEF__ +typedef struct OBJDPU OBJDPU; +#endif /* __NVOC_CLASS_OBJDPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDPU +#define __nvoc_class_id_OBJDPU 0x23486d +#endif /* __nvoc_class_id_OBJDPU */ + + +struct OBJFAN; + +#ifndef __NVOC_CLASS_OBJFAN_TYPEDEF__ +#define __NVOC_CLASS_OBJFAN_TYPEDEF__ +typedef struct OBJFAN OBJFAN; +#endif /* __NVOC_CLASS_OBJFAN_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFAN +#define __nvoc_class_id_OBJFAN 0xda9ade +#endif /* __nvoc_class_id_OBJFAN */ + + +struct DisplayInstanceMemory; + +#ifndef __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ +#define __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ +typedef struct DisplayInstanceMemory DisplayInstanceMemory; +#endif /* __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DisplayInstanceMemory +#define __nvoc_class_id_DisplayInstanceMemory 0x8223e2 +#endif /* __nvoc_class_id_DisplayInstanceMemory */ + + +struct KernelHead; + +#ifndef __NVOC_CLASS_KernelHead_TYPEDEF__ +#define __NVOC_CLASS_KernelHead_TYPEDEF__ +typedef struct KernelHead KernelHead; +#endif /* __NVOC_CLASS_KernelHead_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelHead +#define __nvoc_class_id_KernelHead 0x0145e6 +#endif /* __nvoc_class_id_KernelHead */ + + +struct OBJVOLT; + +#ifndef __NVOC_CLASS_OBJVOLT_TYPEDEF__ +#define __NVOC_CLASS_OBJVOLT_TYPEDEF__ +typedef struct OBJVOLT OBJVOLT; +#endif /* __NVOC_CLASS_OBJVOLT_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVOLT +#define __nvoc_class_id_OBJVOLT 0xa68120 +#endif /* __nvoc_class_id_OBJVOLT */ + + +struct Intr; + +#ifndef __NVOC_CLASS_Intr_TYPEDEF__ +#define __NVOC_CLASS_Intr_TYPEDEF__ +typedef struct Intr Intr; +#endif /* __NVOC_CLASS_Intr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Intr +#define __nvoc_class_id_Intr 0xc06e44 +#endif /* __nvoc_class_id_Intr */ + + +struct OBJHDA; + +#ifndef __NVOC_CLASS_OBJHDA_TYPEDEF__ +#define __NVOC_CLASS_OBJHDA_TYPEDEF__ +typedef struct OBJHDA OBJHDA; +#endif /* __NVOC_CLASS_OBJHDA_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHDA +#define __nvoc_class_id_OBJHDA 0xd3bfb4 +#endif /* __nvoc_class_id_OBJHDA */ + + +struct OBJI2C; + +#ifndef __NVOC_CLASS_OBJI2C_TYPEDEF__ +#define __NVOC_CLASS_OBJI2C_TYPEDEF__ +typedef struct OBJI2C OBJI2C; +#endif /* __NVOC_CLASS_OBJI2C_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJI2C +#define __nvoc_class_id_OBJI2C 0x2bc374 +#endif /* __nvoc_class_id_OBJI2C */ + + +struct KernelRc; + +#ifndef __NVOC_CLASS_KernelRc_TYPEDEF__ +#define __NVOC_CLASS_KernelRc_TYPEDEF__ +typedef struct KernelRc KernelRc; +#endif /* __NVOC_CLASS_KernelRc_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelRc +#define __nvoc_class_id_KernelRc 0x4888db +#endif /* __nvoc_class_id_KernelRc */ + + +struct OBJRC; + +#ifndef __NVOC_CLASS_OBJRC_TYPEDEF__ +#define __NVOC_CLASS_OBJRC_TYPEDEF__ +typedef struct OBJRC OBJRC; +#endif /* __NVOC_CLASS_OBJRC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJRC +#define __nvoc_class_id_OBJRC 0x42d150 +#endif /* __nvoc_class_id_OBJRC */ + + +struct OBJSOR; + +#ifndef __NVOC_CLASS_OBJSOR_TYPEDEF__ +#define __NVOC_CLASS_OBJSOR_TYPEDEF__ +typedef struct OBJSOR OBJSOR; +#endif /* __NVOC_CLASS_OBJSOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSOR +#define __nvoc_class_id_OBJSOR 0x5ccbfa +#endif /* __nvoc_class_id_OBJSOR */ + + +struct OBJDAC; + +#ifndef __NVOC_CLASS_OBJDAC_TYPEDEF__ +#define __NVOC_CLASS_OBJDAC_TYPEDEF__ +typedef struct OBJDAC OBJDAC; +#endif /* __NVOC_CLASS_OBJDAC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDAC +#define __nvoc_class_id_OBJDAC 0x4b1802 +#endif /* __nvoc_class_id_OBJDAC */ + + +struct OBJPIOR; + +#ifndef __NVOC_CLASS_OBJPIOR_TYPEDEF__ +#define __NVOC_CLASS_OBJPIOR_TYPEDEF__ +typedef struct OBJPIOR OBJPIOR; +#endif /* __NVOC_CLASS_OBJPIOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPIOR +#define __nvoc_class_id_OBJPIOR 0x0128a3 +#endif /* __nvoc_class_id_OBJPIOR */ + + +struct OBJHEAD; + +#ifndef __NVOC_CLASS_OBJHEAD_TYPEDEF__ +#define __NVOC_CLASS_OBJHEAD_TYPEDEF__ +typedef struct OBJHEAD OBJHEAD; +#endif /* __NVOC_CLASS_OBJHEAD_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHEAD +#define __nvoc_class_id_OBJHEAD 0x74dd86 +#endif /* __nvoc_class_id_OBJHEAD */ + + +struct OBJVGA; + +#ifndef __NVOC_CLASS_OBJVGA_TYPEDEF__ +#define __NVOC_CLASS_OBJVGA_TYPEDEF__ +typedef struct OBJVGA OBJVGA; +#endif /* __NVOC_CLASS_OBJVGA_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVGA +#define __nvoc_class_id_OBJVGA 0x84e0bc +#endif /* __nvoc_class_id_OBJVGA */ + + +struct OBJSTEREO; + +#ifndef __NVOC_CLASS_OBJSTEREO_TYPEDEF__ +#define __NVOC_CLASS_OBJSTEREO_TYPEDEF__ +typedef struct OBJSTEREO OBJSTEREO; +#endif /* __NVOC_CLASS_OBJSTEREO_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSTEREO +#define __nvoc_class_id_OBJSTEREO 0x9fd931 +#endif /* __nvoc_class_id_OBJSTEREO */ + + +struct OBJOR; + +#ifndef __NVOC_CLASS_OBJOR_TYPEDEF__ +#define __NVOC_CLASS_OBJOR_TYPEDEF__ +typedef struct OBJOR OBJOR; +#endif /* __NVOC_CLASS_OBJOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOR +#define __nvoc_class_id_OBJOR 0x215d6b +#endif /* __nvoc_class_id_OBJOR */ + + +struct OBJBSP; + +#ifndef __NVOC_CLASS_OBJBSP_TYPEDEF__ +#define __NVOC_CLASS_OBJBSP_TYPEDEF__ +typedef struct OBJBSP OBJBSP; +#endif /* __NVOC_CLASS_OBJBSP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJBSP +#define __nvoc_class_id_OBJBSP 0x8f99e1 +#endif /* __nvoc_class_id_OBJBSP */ + + +struct OBJCIPHER; + +#ifndef __NVOC_CLASS_OBJCIPHER_TYPEDEF__ +#define __NVOC_CLASS_OBJCIPHER_TYPEDEF__ +typedef struct OBJCIPHER OBJCIPHER; +#endif /* __NVOC_CLASS_OBJCIPHER_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJCIPHER +#define __nvoc_class_id_OBJCIPHER 0x8dd911 +#endif /* __nvoc_class_id_OBJCIPHER */ + + +struct OBJFUSE; + +#ifndef __NVOC_CLASS_OBJFUSE_TYPEDEF__ +#define __NVOC_CLASS_OBJFUSE_TYPEDEF__ +typedef struct OBJFUSE OBJFUSE; +#endif /* __NVOC_CLASS_OBJFUSE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFUSE +#define __nvoc_class_id_OBJFUSE 0x95ba71 +#endif /* __nvoc_class_id_OBJFUSE */ + + +struct OBJHDCP; + +#ifndef __NVOC_CLASS_OBJHDCP_TYPEDEF__ +#define __NVOC_CLASS_OBJHDCP_TYPEDEF__ +typedef struct OBJHDCP OBJHDCP; +#endif /* __NVOC_CLASS_OBJHDCP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHDCP +#define __nvoc_class_id_OBJHDCP 0x426d44 +#endif /* __nvoc_class_id_OBJHDCP */ + + +struct OBJHDMI; + +#ifndef __NVOC_CLASS_OBJHDMI_TYPEDEF__ +#define __NVOC_CLASS_OBJHDMI_TYPEDEF__ +typedef struct OBJHDMI OBJHDMI; +#endif /* __NVOC_CLASS_OBJHDMI_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHDMI +#define __nvoc_class_id_OBJHDMI 0x2213b6 +#endif /* __nvoc_class_id_OBJHDMI */ + + +struct Therm; + +#ifndef __NVOC_CLASS_Therm_TYPEDEF__ +#define __NVOC_CLASS_Therm_TYPEDEF__ +typedef struct Therm Therm; +#endif /* __NVOC_CLASS_Therm_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Therm +#define __nvoc_class_id_Therm 0x6c1e56 +#endif /* __nvoc_class_id_Therm */ + + +struct OBJSEQ; + +#ifndef __NVOC_CLASS_OBJSEQ_TYPEDEF__ +#define __NVOC_CLASS_OBJSEQ_TYPEDEF__ +typedef struct OBJSEQ OBJSEQ; +#endif /* __NVOC_CLASS_OBJSEQ_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSEQ +#define __nvoc_class_id_OBJSEQ 0x45da4a +#endif /* __nvoc_class_id_OBJSEQ */ + + +struct OBJDPAUX; + +#ifndef __NVOC_CLASS_OBJDPAUX_TYPEDEF__ +#define __NVOC_CLASS_OBJDPAUX_TYPEDEF__ +typedef struct OBJDPAUX OBJDPAUX; +#endif /* __NVOC_CLASS_OBJDPAUX_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDPAUX +#define __nvoc_class_id_OBJDPAUX 0xfd2ab9 +#endif /* __nvoc_class_id_OBJDPAUX */ + + +struct Pmu; + +#ifndef __NVOC_CLASS_Pmu_TYPEDEF__ +#define __NVOC_CLASS_Pmu_TYPEDEF__ +typedef struct Pmu Pmu; +#endif /* __NVOC_CLASS_Pmu_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Pmu +#define __nvoc_class_id_Pmu 0xf3d722 +#endif /* __nvoc_class_id_Pmu */ + + +struct KernelPmu; + +#ifndef __NVOC_CLASS_KernelPmu_TYPEDEF__ +#define __NVOC_CLASS_KernelPmu_TYPEDEF__ +typedef struct KernelPmu KernelPmu; +#endif /* __NVOC_CLASS_KernelPmu_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelPmu +#define __nvoc_class_id_KernelPmu 0xab9d7d +#endif /* __nvoc_class_id_KernelPmu */ + + +struct Lpwr; + +#ifndef __NVOC_CLASS_Lpwr_TYPEDEF__ +#define __NVOC_CLASS_Lpwr_TYPEDEF__ +typedef struct Lpwr Lpwr; +#endif /* __NVOC_CLASS_Lpwr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Lpwr +#define __nvoc_class_id_Lpwr 0x112230 +#endif /* __nvoc_class_id_Lpwr */ + + +struct OBJISOHUB; + +#ifndef __NVOC_CLASS_OBJISOHUB_TYPEDEF__ +#define __NVOC_CLASS_OBJISOHUB_TYPEDEF__ +typedef struct OBJISOHUB OBJISOHUB; +#endif /* __NVOC_CLASS_OBJISOHUB_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJISOHUB +#define __nvoc_class_id_OBJISOHUB 0x7c5e0d +#endif /* __nvoc_class_id_OBJISOHUB */ + + +struct Pmgr; + +#ifndef __NVOC_CLASS_Pmgr_TYPEDEF__ +#define __NVOC_CLASS_Pmgr_TYPEDEF__ +typedef struct Pmgr Pmgr; +#endif /* __NVOC_CLASS_Pmgr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Pmgr +#define __nvoc_class_id_Pmgr 0x894574 +#endif /* __nvoc_class_id_Pmgr */ + + +struct OBJHDACODEC; + +#ifndef __NVOC_CLASS_OBJHDACODEC_TYPEDEF__ +#define __NVOC_CLASS_OBJHDACODEC_TYPEDEF__ +typedef struct OBJHDACODEC OBJHDACODEC; +#endif /* __NVOC_CLASS_OBJHDACODEC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHDACODEC +#define __nvoc_class_id_OBJHDACODEC 0xa576e2 +#endif /* __nvoc_class_id_OBJHDACODEC */ + + +struct Spi; + +#ifndef __NVOC_CLASS_Spi_TYPEDEF__ +#define __NVOC_CLASS_Spi_TYPEDEF__ +typedef struct Spi Spi; +#endif /* __NVOC_CLASS_Spi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Spi +#define __nvoc_class_id_Spi 0x824313 +#endif /* __nvoc_class_id_Spi */ + + +struct OBJUVM; + +#ifndef __NVOC_CLASS_OBJUVM_TYPEDEF__ +#define __NVOC_CLASS_OBJUVM_TYPEDEF__ +typedef struct OBJUVM OBJUVM; +#endif /* __NVOC_CLASS_OBJUVM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJUVM +#define __nvoc_class_id_OBJUVM 0xf9a17d +#endif /* __nvoc_class_id_OBJUVM */ + + +struct OBJSEC2; + +#ifndef __NVOC_CLASS_OBJSEC2_TYPEDEF__ +#define __NVOC_CLASS_OBJSEC2_TYPEDEF__ +typedef struct OBJSEC2 OBJSEC2; +#endif /* __NVOC_CLASS_OBJSEC2_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSEC2 +#define __nvoc_class_id_OBJSEC2 0x28c408 +#endif /* __nvoc_class_id_OBJSEC2 */ + + +struct OBJPMS; + +#ifndef __NVOC_CLASS_OBJPMS_TYPEDEF__ +#define __NVOC_CLASS_OBJPMS_TYPEDEF__ +typedef struct OBJPMS OBJPMS; +#endif /* __NVOC_CLASS_OBJPMS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPMS +#define __nvoc_class_id_OBJPMS 0x9e3810 +#endif /* __nvoc_class_id_OBJPMS */ + + +struct OBJENGSTATE; + +#ifndef __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ +#define __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ +typedef struct OBJENGSTATE OBJENGSTATE; +#endif /* __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJENGSTATE +#define __nvoc_class_id_OBJENGSTATE 0x7a7ed6 +#endif /* __nvoc_class_id_OBJENGSTATE */ + + +struct OBJLSFM; + +#ifndef __NVOC_CLASS_OBJLSFM_TYPEDEF__ +#define __NVOC_CLASS_OBJLSFM_TYPEDEF__ +typedef struct OBJLSFM OBJLSFM; +#endif /* __NVOC_CLASS_OBJLSFM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJLSFM +#define __nvoc_class_id_OBJLSFM 0x9a25e4 +#endif /* __nvoc_class_id_OBJLSFM */ + + +struct OBJACR; + +#ifndef __NVOC_CLASS_OBJACR_TYPEDEF__ +#define __NVOC_CLASS_OBJACR_TYPEDEF__ +typedef struct OBJACR OBJACR; +#endif /* __NVOC_CLASS_OBJACR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJACR +#define __nvoc_class_id_OBJACR 0xdb32a1 +#endif /* __nvoc_class_id_OBJACR */ + + +struct OBJGPULOG; + +#ifndef __NVOC_CLASS_OBJGPULOG_TYPEDEF__ +#define __NVOC_CLASS_OBJGPULOG_TYPEDEF__ +typedef struct OBJGPULOG OBJGPULOG; +#endif /* __NVOC_CLASS_OBJGPULOG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPULOG +#define __nvoc_class_id_OBJGPULOG 0xdd19be +#endif /* __nvoc_class_id_OBJGPULOG */ + + +struct KernelNvlink; + +#ifndef __NVOC_CLASS_KernelNvlink_TYPEDEF__ +#define __NVOC_CLASS_KernelNvlink_TYPEDEF__ +typedef struct KernelNvlink KernelNvlink; +#endif /* __NVOC_CLASS_KernelNvlink_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelNvlink +#define __nvoc_class_id_KernelNvlink 0xce6818 +#endif /* __nvoc_class_id_KernelNvlink */ + + +struct Nvlink; + +#ifndef __NVOC_CLASS_Nvlink_TYPEDEF__ +#define __NVOC_CLASS_Nvlink_TYPEDEF__ +typedef struct Nvlink Nvlink; +#endif /* __NVOC_CLASS_Nvlink_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Nvlink +#define __nvoc_class_id_Nvlink 0x790a3c +#endif /* __nvoc_class_id_Nvlink */ + + +struct OBJHWPM; + +#ifndef __NVOC_CLASS_OBJHWPM_TYPEDEF__ +#define __NVOC_CLASS_OBJHWPM_TYPEDEF__ +typedef struct OBJHWPM OBJHWPM; +#endif /* __NVOC_CLASS_OBJHWPM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHWPM +#define __nvoc_class_id_OBJHWPM 0x97e43b +#endif /* __nvoc_class_id_OBJHWPM */ + + +struct OBJGPUMON; + +#ifndef __NVOC_CLASS_OBJGPUMON_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUMON_TYPEDEF__ +typedef struct OBJGPUMON OBJGPUMON; +#endif /* __NVOC_CLASS_OBJGPUMON_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUMON +#define __nvoc_class_id_OBJGPUMON 0x2b424b +#endif /* __nvoc_class_id_OBJGPUMON */ + + +struct OBJGRIDDISPLAYLESS; + +#ifndef __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ +#define __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ +typedef struct OBJGRIDDISPLAYLESS OBJGRIDDISPLAYLESS; +#endif /* __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGRIDDISPLAYLESS +#define __nvoc_class_id_OBJGRIDDISPLAYLESS 0x20fd5a +#endif /* __nvoc_class_id_OBJGRIDDISPLAYLESS */ + + +struct FECS; + +#ifndef __NVOC_CLASS_FECS_TYPEDEF__ +#define __NVOC_CLASS_FECS_TYPEDEF__ +typedef struct FECS FECS; +#endif /* __NVOC_CLASS_FECS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_FECS +#define __nvoc_class_id_FECS 0x5ee8dc +#endif /* __nvoc_class_id_FECS */ + + +struct GPCCS; + +#ifndef __NVOC_CLASS_GPCCS_TYPEDEF__ +#define __NVOC_CLASS_GPCCS_TYPEDEF__ +typedef struct GPCCS GPCCS; +#endif /* __NVOC_CLASS_GPCCS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GPCCS +#define __nvoc_class_id_GPCCS 0x4781e8 +#endif /* __nvoc_class_id_GPCCS */ + + +struct OBJCE; + +#ifndef __NVOC_CLASS_OBJCE_TYPEDEF__ +#define __NVOC_CLASS_OBJCE_TYPEDEF__ +typedef struct OBJCE OBJCE; +#endif /* __NVOC_CLASS_OBJCE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJCE +#define __nvoc_class_id_OBJCE 0x793ceb +#endif /* __nvoc_class_id_OBJCE */ + + +struct KernelCE; + +#ifndef __NVOC_CLASS_KernelCE_TYPEDEF__ +#define __NVOC_CLASS_KernelCE_TYPEDEF__ +typedef struct KernelCE KernelCE; +#endif /* __NVOC_CLASS_KernelCE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelCE +#define __nvoc_class_id_KernelCE 0x242aca +#endif /* __nvoc_class_id_KernelCE */ + + +struct OBJMSENC; + +#ifndef __NVOC_CLASS_OBJMSENC_TYPEDEF__ +#define __NVOC_CLASS_OBJMSENC_TYPEDEF__ +typedef struct OBJMSENC OBJMSENC; +#endif /* __NVOC_CLASS_OBJMSENC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJMSENC +#define __nvoc_class_id_OBJMSENC 0xe97b6c +#endif /* __nvoc_class_id_OBJMSENC */ + + +struct OBJNVJPG; + +#ifndef __NVOC_CLASS_OBJNVJPG_TYPEDEF__ +#define __NVOC_CLASS_OBJNVJPG_TYPEDEF__ +typedef struct OBJNVJPG OBJNVJPG; +#endif /* __NVOC_CLASS_OBJNVJPG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJNVJPG +#define __nvoc_class_id_OBJNVJPG 0x2b3a54 +#endif /* __nvoc_class_id_OBJNVJPG */ + + +struct OBJFAS; + +#ifndef __NVOC_CLASS_OBJFAS_TYPEDEF__ +#define __NVOC_CLASS_OBJFAS_TYPEDEF__ +typedef struct OBJFAS OBJFAS; +#endif /* __NVOC_CLASS_OBJFAS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFAS +#define __nvoc_class_id_OBJFAS 0x4ddf48 +#endif /* __nvoc_class_id_OBJFAS */ + + +struct OBJVMMU; + +#ifndef __NVOC_CLASS_OBJVMMU_TYPEDEF__ +#define __NVOC_CLASS_OBJVMMU_TYPEDEF__ +typedef struct OBJVMMU OBJVMMU; +#endif /* __NVOC_CLASS_OBJVMMU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVMMU +#define __nvoc_class_id_OBJVMMU 0xdf8918 +#endif /* __nvoc_class_id_OBJVMMU */ + + +struct Gsp; + +#ifndef __NVOC_CLASS_Gsp_TYPEDEF__ +#define __NVOC_CLASS_Gsp_TYPEDEF__ +typedef struct Gsp Gsp; +#endif /* __NVOC_CLASS_Gsp_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Gsp +#define __nvoc_class_id_Gsp 0xda3de4 +#endif /* __nvoc_class_id_Gsp */ + + +struct OBJFSP; + +#ifndef __NVOC_CLASS_OBJFSP_TYPEDEF__ +#define __NVOC_CLASS_OBJFSP_TYPEDEF__ +typedef struct OBJFSP OBJFSP; +#endif /* __NVOC_CLASS_OBJFSP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFSP +#define __nvoc_class_id_OBJFSP 0xd39158 +#endif /* __nvoc_class_id_OBJFSP */ + + +struct KernelFsp; + +#ifndef __NVOC_CLASS_KernelFsp_TYPEDEF__ +#define __NVOC_CLASS_KernelFsp_TYPEDEF__ +typedef struct KernelFsp KernelFsp; +#endif /* __NVOC_CLASS_KernelFsp_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelFsp +#define __nvoc_class_id_KernelFsp 0x87fb96 +#endif /* __nvoc_class_id_KernelFsp */ + + +struct OBJOFA; + +#ifndef __NVOC_CLASS_OBJOFA_TYPEDEF__ +#define __NVOC_CLASS_OBJOFA_TYPEDEF__ +typedef struct OBJOFA OBJOFA; +#endif /* __NVOC_CLASS_OBJOFA_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOFA +#define __nvoc_class_id_OBJOFA 0xdd7bab +#endif /* __nvoc_class_id_OBJOFA */ + + +struct KernelIoctrl; + +#ifndef __NVOC_CLASS_KernelIoctrl_TYPEDEF__ +#define __NVOC_CLASS_KernelIoctrl_TYPEDEF__ +typedef struct KernelIoctrl KernelIoctrl; +#endif /* __NVOC_CLASS_KernelIoctrl_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelIoctrl +#define __nvoc_class_id_KernelIoctrl 0x880c7d +#endif /* __nvoc_class_id_KernelIoctrl */ + + +struct Ioctrl; + +#ifndef __NVOC_CLASS_Ioctrl_TYPEDEF__ +#define __NVOC_CLASS_Ioctrl_TYPEDEF__ +typedef struct Ioctrl Ioctrl; +#endif /* __NVOC_CLASS_Ioctrl_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Ioctrl +#define __nvoc_class_id_Ioctrl 0x11ce10 +#endif /* __nvoc_class_id_Ioctrl */ + + +struct KernelNvdec; + +#ifndef __NVOC_CLASS_KernelNvdec_TYPEDEF__ +#define __NVOC_CLASS_KernelNvdec_TYPEDEF__ +typedef struct KernelNvdec KernelNvdec; +#endif /* __NVOC_CLASS_KernelNvdec_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelNvdec +#define __nvoc_class_id_KernelNvdec 0xaba9df +#endif /* __nvoc_class_id_KernelNvdec */ + + +struct KernelSec2; + +#ifndef __NVOC_CLASS_KernelSec2_TYPEDEF__ +#define __NVOC_CLASS_KernelSec2_TYPEDEF__ +typedef struct KernelSec2 KernelSec2; +#endif /* __NVOC_CLASS_KernelSec2_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelSec2 +#define __nvoc_class_id_KernelSec2 0x2f36c9 +#endif /* __nvoc_class_id_KernelSec2 */ + + +struct KernelGsp; + +#ifndef __NVOC_CLASS_KernelGsp_TYPEDEF__ +#define __NVOC_CLASS_KernelGsp_TYPEDEF__ +typedef struct KernelGsp KernelGsp; +#endif /* __NVOC_CLASS_KernelGsp_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGsp +#define __nvoc_class_id_KernelGsp 0x311d4e +#endif /* __nvoc_class_id_KernelGsp */ + + +struct OBJDCECLIENTRM; + +#ifndef __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ +#define __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ +typedef struct OBJDCECLIENTRM OBJDCECLIENTRM; +#endif /* __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDCECLIENTRM +#define __nvoc_class_id_OBJDCECLIENTRM 0x61649c +#endif /* __nvoc_class_id_OBJDCECLIENTRM */ + + +struct OBJDISPMACRO; + +#ifndef __NVOC_CLASS_OBJDISPMACRO_TYPEDEF__ +#define __NVOC_CLASS_OBJDISPMACRO_TYPEDEF__ +typedef struct OBJDISPMACRO OBJDISPMACRO; +#endif /* __NVOC_CLASS_OBJDISPMACRO_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDISPMACRO +#define __nvoc_class_id_OBJDISPMACRO 0xa1cad2 +#endif /* __nvoc_class_id_OBJDISPMACRO */ + + +struct OBJNNE; + +#ifndef __NVOC_CLASS_OBJNNE_TYPEDEF__ +#define __NVOC_CLASS_OBJNNE_TYPEDEF__ +typedef struct OBJNNE OBJNNE; +#endif /* __NVOC_CLASS_OBJNNE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJNNE +#define __nvoc_class_id_OBJNNE 0xc7f0f8 +#endif /* __nvoc_class_id_OBJNNE */ + + +struct Smbpbi; + +#ifndef __NVOC_CLASS_Smbpbi_TYPEDEF__ +#define __NVOC_CLASS_Smbpbi_TYPEDEF__ +typedef struct Smbpbi Smbpbi; +#endif /* __NVOC_CLASS_Smbpbi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Smbpbi +#define __nvoc_class_id_Smbpbi 0x884e68 +#endif /* __nvoc_class_id_Smbpbi */ + + +struct OBJDSI; + +#ifndef __NVOC_CLASS_OBJDSI_TYPEDEF__ +#define __NVOC_CLASS_OBJDSI_TYPEDEF__ +typedef struct OBJDSI OBJDSI; +#endif /* __NVOC_CLASS_OBJDSI_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDSI +#define __nvoc_class_id_OBJDSI 0x2e9a64 +#endif /* __nvoc_class_id_OBJDSI */ + + +struct OBJDCB; + +#ifndef __NVOC_CLASS_OBJDCB_TYPEDEF__ +#define __NVOC_CLASS_OBJDCB_TYPEDEF__ +typedef struct OBJDCB OBJDCB; +#endif /* __NVOC_CLASS_OBJDCB_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDCB +#define __nvoc_class_id_OBJDCB 0xf931d4 +#endif /* __nvoc_class_id_OBJDCB */ + + +struct KernelGmmu; + +#ifndef __NVOC_CLASS_KernelGmmu_TYPEDEF__ +#define __NVOC_CLASS_KernelGmmu_TYPEDEF__ +typedef struct KernelGmmu KernelGmmu; +#endif /* __NVOC_CLASS_KernelGmmu_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGmmu +#define __nvoc_class_id_KernelGmmu 0x29362f +#endif /* __nvoc_class_id_KernelGmmu */ + + +struct OBJGMMU; + +#ifndef __NVOC_CLASS_OBJGMMU_TYPEDEF__ +#define __NVOC_CLASS_OBJGMMU_TYPEDEF__ +typedef struct OBJGMMU OBJGMMU; +#endif /* __NVOC_CLASS_OBJGMMU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGMMU +#define __nvoc_class_id_OBJGMMU 0xd7a41d +#endif /* __nvoc_class_id_OBJGMMU */ + + +// +// Engine tags to be used by both RM/HAL to reference specific engines. +// +// These values are used in the engine descriptor table +// as well as in the class descriptor table. +// +#define ENG_INVALID MKENGDESC(classId(OBJINVALID), 0) +#define ENG_SW MKENGDESC(classId(OBJSWENG), 0) +#define ENG_GPU MKENGDESC(classId(OBJGPU), 0) +#define ENG_FLCN MKENGDESC(classId(Falcon), 0) +#define ENG_MC MKENGDESC(classId(OBJMC), 0) +#define ENG_KERNEL_MC MKENGDESC(classId(KernelMc), 0) +#define ENG_PRIV_RING MKENGDESC(classId(PrivRing), 0) +#define ENG_SW_INTR MKENGDESC(classId(SwIntr), 0) +#define ENG_MEMORY_SYSTEM MKENGDESC(classId(MemorySystem), 0) +#define ENG_KERNEL_MEMORY_SYSTEM MKENGDESC(classId(KernelMemorySystem), 0) +#define ENG_MEMORY_MANAGER MKENGDESC(classId(MemoryManager), 0) +#define ENG_FBFLCN MKENGDESC(classId(OBJFBFLCN), 0) +#define ENG_TMR MKENGDESC(classId(OBJTMR), 0) +#define ENG_DMA MKENGDESC(classId(VirtMemAllocator), 0) +#define ENG_KERNEL_FIFO MKENGDESC(classId(KernelFifo), 0) +#define ENG_FIFO MKENGDESC(classId(OBJFIFO), 0) +#define ENG_OS MKENGDESC(classId(OBJOS), 0) +#define ENG_BUS MKENGDESC(classId(OBJBUS), 0) +#define ENG_KERNEL_BUS MKENGDESC(classId(KernelBus), 0) +#define ENG_INFOROM MKENGDESC(classId(OBJINFOROM), 0) +#define ENG_PERF MKENGDESC(classId(Perf), 0) +#define ENG_KERNEL_PERF MKENGDESC(classId(KernelPerf), 0) +#define ENG_BIF MKENGDESC(classId(OBJBIF), 0) +#define ENG_KERNEL_BIF MKENGDESC(classId(KernelBif), 0) +#define ENG_HSHUBMANAGER MKENGDESC(classId(OBJHSHUBMANAGER)), 0) +#define ENG_SF MKENGDESC(classId(OBJSF), 0) +#define ENG_GPIO MKENGDESC(classId(OBJGPIO), 0) +#define ENG_CLK MKENGDESC(classId(ClockManager), 0) +#define ENG_KERNEL_DISPLAY MKENGDESC(classId(KernelDisplay), 0) +#define ENG_DISP MKENGDESC(classId(OBJDISP), 0) +#define ENG_DPU MKENGDESC(classId(OBJDPU), 0) +#define ENG_FAN MKENGDESC(classId(OBJFAN), 0) +#define ENG_INST MKENGDESC(classId(DisplayInstanceMemory), 0) +#define ENG_KERNEL_HEAD MKENGDESC(classId(KernelHead), 0) +#define ENG_VOLT MKENGDESC(classId(OBJVOLT), 0) +#define ENG_INTR MKENGDESC(classId(Intr), 0) +#define ENG_HDA MKENGDESC(classId(OBJHDA), 0) +#define ENG_I2C MKENGDESC(classId(OBJI2C), 0) +#define ENG_KERNEL_RC MKENGDESC(classId(KernelRc), 0) +#define ENG_RC MKENGDESC(classId(OBJRC), 0) +#define ENG_SOR MKENGDESC(classId(OBJSOR), 0) +#define ENG_DAC MKENGDESC(classId(OBJDAC), 0) +#define ENG_PIOR MKENGDESC(classId(OBJPIOR), 0) +#define ENG_HEAD MKENGDESC(classId(OBJHEAD), 0) +#define ENG_VGA MKENGDESC(classId(OBJVGA), 0) +#define ENG_STEREO MKENGDESC(classId(OBJSTEREO), 0) +#define ENG_OR MKENGDESC(classId(OBJOR), 0) +#define ENG_BSP MKENGDESC(classId(OBJBSP), 0) +#define ENG_CIPHER MKENGDESC(classId(OBJCIPHER), 0) +#define ENG_FUSE MKENGDESC(classId(OBJFUSE), 0) +#define ENG_HDCP MKENGDESC(classId(OBJHDCP), 0) +#define ENG_HDMI MKENGDESC(classId(OBJHDMI), 0) +#define ENG_THERM MKENGDESC(classId(Therm), 0) +#define ENG_SEQ MKENGDESC(classId(OBJSEQ), 0) +#define ENG_DPAUX MKENGDESC(classId(OBJDPAUX), 0) +#define ENG_PMU MKENGDESC(classId(Pmu), 0) +#define ENG_KERNEL_PMU MKENGDESC(classId(KernelPmu), 0) +#define ENG_LPWR MKENGDESC(classId(Lpwr), 0) +#define ENG_ISOHUB MKENGDESC(classId(OBJISOHUB), 0) +#define ENG_PMGR MKENGDESC(classId(Pmgr), 0) +#define ENG_HDACODEC MKENGDESC(classId(OBJHDACODEC), 0) +#define ENG_SPI MKENGDESC(classId(Spi), 0) +#define ENG_UVM MKENGDESC(classId(OBJUVM), 0) +#define ENG_SEC2 MKENGDESC(classId(OBJSEC2), 0) +#define ENG_PMS MKENGDESC(classId(OBJPMS), 0) +#define ENG_ENGSTATE MKENGDESC(classId(OBJENGSTATE), 0) +#define ENG_LSFM MKENGDESC(classId(OBJLSFM), 0) +#define ENG_ACR MKENGDESC(classId(OBJACR), 0) +#define ENG_GPULOG MKENGDESC(classId(OBJGPULOG), 0) +#define ENG_NVLINK MKENGDESC(classId(Nvlink), 0) +#define ENG_HWPM MKENGDESC(classId(OBJHWPM), 0) +#define ENG_GPUMON MKENGDESC(classId(OBJGPUMON), 0) +#define ENG_GRIDDISPLAYLESS MKENGDESC(classId(OBJGRIDDISPLAYLESS), 0) +#define ENG_VMMU MKENGDESC(classId(OBJVMMU), 0) +#define ENG_NVJPG MKENGDESC(classId(OBJNVJPG), 0) +#define ENG_GSP MKENGDESC(classId(Gsp), 0) +#define ENG_FSP MKENGDESC(classId(OBJFSP), 0) +#define ENG_KERNEL_FSP MKENGDESC(classId(KernelFsp), 0) +#define ENG_OFA MKENGDESC(classId(OBJOFA), 0) +#define ENG_KERNEL_GSP MKENGDESC(classId(KernelGsp), 0) +#define ENG_KERNEL_NVDEC MKENGDESC(classId(KernelNvdec), 0) +#define ENG_KERNEL_SEC2 MKENGDESC(classId(KernelSec2), 0) +#define ENG_DISPMACRO MKENGDESC(classId(OBJDISPMACRO), 0) +#define ENG_NNE MKENGDESC(classId(OBJNNE), 0) +#define ENG_SMBPBI MKENGDESC(classId(Smbpbi), 0) +#define ENG_DSI MKENGDESC(classId(OBJDSI), 0) +#define ENG_DCECLIENTRM MKENGDESC(classId(OBJDCECLIENTRM), 0) +#define ENG_DCB MKENGDESC(classId(OBJDCB), 0) +#define ENG_KERNEL_NVLINK MKENGDESC(classId(KernelNvlink), 0) +#define ENG_GMMU MKENGDESC(classId(OBJGMMU), 0) +#define ENG_KERNEL_GMMU MKENGDESC(classId(KernelGmmu), 0) + +// Indexed CE engine tag reference +#define ENG_CE(x) MKENGDESC(classId(OBJCE), x) +#define ENG_CE__SIZE_1 10 +#define IS_CE(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJCE)) +#define GET_CE_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed MSENC Engine Tag Reference +#define ENG_MSENC(x) MKENGDESC(classId(OBJMSENC), x) +#define ENG_MSENC__SIZE_1 3 +#define IS_MSENC(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJMSENC)) +#define GET_MSENC_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed BSP/NVDEC Engine Tag Reference +#define ENG_NVDEC(x) MKENGDESC(classId(OBJBSP), x) +#define ENG_NVDEC__SIZE_1 5 +#define IS_NVDEC(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJBSP)) +#define GET_NVDEC_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed GR engine tag reference +#define ENG_GR(x) MKENGDESC(classId(Graphics), x) +#define ENG_GR__SIZE_1 8 +#define IS_GR(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(Graphics)) +#define GET_GR_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed NVJPEG Engine Tag Reference +#define ENG_NVJPEG(x) MKENGDESC(classId(OBJNVJPG), x) +#define ENG_NVJPEG__SIZE_1 1 +#define IS_NVJPEG(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJNVJPG)) +#define GET_NVJPEG_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed FECS engine tag reference +#define ENG_FECS(x) MKENGDESC(classId(FECS), x) +#define ENG_FECS__SIZE_1 8 +#define IS_FECS(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(FECS)) +#define GET_FECS_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed GPCCS engine tag reference +#define ENG_GPCCS(x) MKENGDESC(classId(GPCCS), x) +#define ENG_GPCCS__SIZE_1 8 +#define IS_GPCCS(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(GPCCS)) +#define GET_GPCCS_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed IOCTRL engine tag reference +#define ENG_IOCTRL(x) MKENGDESC(classId(Ioctrl), x) +#define ENG_IOCTRL__SIZE_1 3 +#define IS_IOCTRL(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(Ioctrl)) +#define GET_IOCTRL_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed HSHUB engine tag reference +#define ENG_HSHUB(x) MKENGDESC(classId(Hshub), x) +#define ENG_HSHUB__SIZE_1 5 +#define IS_HSHUB(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(Hshub)) +#define GET_HSHUB_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed IOCTRL engine tag reference +#define ENG_KERNEL_IOCTRL(x) MKENGDESC(classId(KernelIoctrl), x) +#define ENG_KERNEL_IOCTRL__SIZE_1 3 +#define IS_KERNEL_IOCTRL(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(KernelIoctrl)) +#define GET_KERNEL_IOCTRL_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +#endif // _ENG_DESC_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_ENG_DESC_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.c new file mode 100644 index 0000000..6ed668a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.c @@ -0,0 +1,189 @@ +#define NVOC_ENG_STATE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_eng_state_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x7a7ed6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_funcTable_OBJENGSTATE(OBJENGSTATE*); +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_dataField_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJENGSTATE; + +static const struct NVOC_RTTI __nvoc_rtti_OBJENGSTATE_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJENGSTATE, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJENGSTATE_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJENGSTATE, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJENGSTATE = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJENGSTATE_OBJENGSTATE, + &__nvoc_rtti_OBJENGSTATE_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJENGSTATE), + /*classId=*/ classId(OBJENGSTATE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJENGSTATE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJENGSTATE, + /*pCastInfo=*/ &__nvoc_castinfo_OBJENGSTATE, + /*pExportInfo=*/ &__nvoc_export_info_OBJENGSTATE +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJENGSTATE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE *pThis) { + __nvoc_engstateDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJENGSTATE(OBJENGSTATE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJENGSTATE_fail_Object; + __nvoc_init_dataField_OBJENGSTATE(pThis); + goto __nvoc_ctor_OBJENGSTATE_exit; // Success + +__nvoc_ctor_OBJENGSTATE_fail_Object: +__nvoc_ctor_OBJENGSTATE_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJENGSTATE_1(OBJENGSTATE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__engstateConstructEngine__ = &engstateConstructEngine_IMPL; + + pThis->__engstateInitMissing__ = &engstateInitMissing_IMPL; + + pThis->__engstateStatePreInitLocked__ = &engstateStatePreInitLocked_IMPL; + + pThis->__engstateStatePreInitUnlocked__ = &engstateStatePreInitUnlocked_IMPL; + + pThis->__engstateStateInitLocked__ = &engstateStateInitLocked_IMPL; + + pThis->__engstateStateInitUnlocked__ = &engstateStateInitUnlocked_IMPL; + + pThis->__engstateStatePreLoad__ = &engstateStatePreLoad_IMPL; + + pThis->__engstateStateLoad__ = &engstateStateLoad_IMPL; + + pThis->__engstateStatePostLoad__ = &engstateStatePostLoad_IMPL; + + pThis->__engstateStatePreUnload__ = &engstateStatePreUnload_IMPL; + + pThis->__engstateStateUnload__ = &engstateStateUnload_IMPL; + + pThis->__engstateStatePostUnload__ = &engstateStatePostUnload_IMPL; + + pThis->__engstateStateDestroy__ = &engstateStateDestroy_IMPL; + + pThis->__engstateAllocTunableState__ = &engstateAllocTunableState_IMPL; + + pThis->__engstateFreeTunableState__ = &engstateFreeTunableState_IMPL; + + pThis->__engstateGetTunableState__ = &engstateGetTunableState_IMPL; + + pThis->__engstateSetTunableState__ = &engstateSetTunableState_IMPL; + + pThis->__engstateReconcileTunableState__ = &engstateReconcileTunableState_IMPL; + + pThis->__engstateCompareTunableState__ = &engstateCompareTunableState_IMPL; + + pThis->__engstateIsPresent__ = &engstateIsPresent_IMPL; +} + +void __nvoc_init_funcTable_OBJENGSTATE(OBJENGSTATE *pThis) { + __nvoc_init_funcTable_OBJENGSTATE_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJENGSTATE(OBJENGSTATE *pThis) { + pThis->__nvoc_pbase_OBJENGSTATE = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJENGSTATE(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJENGSTATE(OBJENGSTATE **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJENGSTATE *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJENGSTATE)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJENGSTATE)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJENGSTATE); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJENGSTATE(pThis); + status = __nvoc_ctor_OBJENGSTATE(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJENGSTATE_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJENGSTATE_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJENGSTATE(OBJENGSTATE **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJENGSTATE(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.h new file mode 100644 index 0000000..f9853bd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.h @@ -0,0 +1,385 @@ +#ifndef _G_ENG_STATE_NVOC_H_ +#define _G_ENG_STATE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_eng_state_nvoc.h" + +#ifndef _ENG_STATE_H_ +#define _ENG_STATE_H_ + +/*! + * @file eng_state.h + * @brief Provides definitions for all OBJENGSTATE data structures and interfaces. + */ + +#include "core/core.h" +#include "gpu/eng_desc.h" + +typedef enum ENGSTATE_STATE +{ + ENGSTATE_STATE_UNDEFINED = 0, + ENGSTATE_STATE_CONSTRUCT, + ENGSTATE_STATE_PRE_INIT, + ENGSTATE_STATE_INIT, + ENGSTATE_STATE_PRE_LOAD, + ENGSTATE_STATE_LOAD, + ENGSTATE_STATE_POST_LOAD, + ENGSTATE_STATE_PRE_UNLOAD, + ENGSTATE_STATE_UNLOAD, + ENGSTATE_STATE_POST_UNLOAD, + ENGSTATE_STATE_DESTROY, + ENGSTATE_STATE_COUNT // Keep this last +} ENGSTATE_STATE; + +// Stats data stored for every state transition +typedef struct ENGSTATE_STATS +{ + NvS32 memoryAllocCount; + NvS32 memoryAllocSize; + NvU32 transitionTimeUs; +} ENGSTATE_STATS; + +// Temporary transition data, not stored +typedef struct ENGSTATE_TRANSITION_DATA +{ + NvS64 memoryAllocCount; + NvS64 memoryAllocSize; + NvU64 transitionStartTimeNs; +} ENGSTATE_TRANSITION_DATA; + +typedef struct OBJENGSTATE *POBJENGSTATE; + +#define ENG_GET_FIFO(p) (engstateGetFifo(staticCast((p), OBJENGSTATE))) +#define ENG_GET_ENG_DESC(p) (staticCast((p), OBJENGSTATE)->engDesc) + + +/*! + * Defines the structure used to contain all generic information related to + * the OBJENGSTATE. + */ +#ifdef NVOC_ENG_STATE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJENGSTATE { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + NV_STATUS (*__engstateConstructEngine__)(POBJGPU, POBJENGSTATE, ENGDESCRIPTOR); + void (*__engstateInitMissing__)(POBJGPU, POBJENGSTATE); + NV_STATUS (*__engstateStatePreInitLocked__)(POBJGPU, POBJENGSTATE); + NV_STATUS (*__engstateStatePreInitUnlocked__)(POBJGPU, POBJENGSTATE); + NV_STATUS (*__engstateStateInitLocked__)(POBJGPU, POBJENGSTATE); + NV_STATUS (*__engstateStateInitUnlocked__)(POBJGPU, POBJENGSTATE); + NV_STATUS (*__engstateStatePreLoad__)(POBJGPU, POBJENGSTATE, NvU32); + NV_STATUS (*__engstateStateLoad__)(POBJGPU, POBJENGSTATE, NvU32); + NV_STATUS (*__engstateStatePostLoad__)(POBJGPU, POBJENGSTATE, NvU32); + NV_STATUS (*__engstateStatePreUnload__)(POBJGPU, POBJENGSTATE, NvU32); + NV_STATUS (*__engstateStateUnload__)(POBJGPU, POBJENGSTATE, NvU32); + NV_STATUS (*__engstateStatePostUnload__)(POBJGPU, POBJENGSTATE, NvU32); + void (*__engstateStateDestroy__)(POBJGPU, POBJENGSTATE); + NV_STATUS (*__engstateAllocTunableState__)(POBJGPU, POBJENGSTATE, void **); + void (*__engstateFreeTunableState__)(POBJGPU, POBJENGSTATE, void *); + NV_STATUS (*__engstateGetTunableState__)(POBJGPU, POBJENGSTATE, void *); + NV_STATUS (*__engstateSetTunableState__)(POBJGPU, POBJENGSTATE, void *); + NV_STATUS (*__engstateReconcileTunableState__)(POBJGPU, POBJENGSTATE, void *); + NV_STATUS (*__engstateCompareTunableState__)(POBJGPU, POBJENGSTATE, void *, void *); + NvBool (*__engstateIsPresent__)(POBJGPU, POBJENGSTATE); + NvBool PDB_PROP_ENGSTATE_IS_MISSING; + ENGDESCRIPTOR engDesc; + void *pOriginalTunableState; + struct OBJGPU *pGpu; + ENGSTATE_STATE currentState; + ENGSTATE_STATS stats[11]; + char name[100]; +}; + +#ifndef __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ +#define __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ +typedef struct OBJENGSTATE OBJENGSTATE; +#endif /* __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJENGSTATE +#define __nvoc_class_id_OBJENGSTATE 0x7a7ed6 +#endif /* __nvoc_class_id_OBJENGSTATE */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +#define __staticCast_OBJENGSTATE(pThis) \ + ((pThis)->__nvoc_pbase_OBJENGSTATE) + +#ifdef __nvoc_eng_state_h_disabled +#define __dynamicCast_OBJENGSTATE(pThis) ((OBJENGSTATE*)NULL) +#else //__nvoc_eng_state_h_disabled +#define __dynamicCast_OBJENGSTATE(pThis) \ + ((OBJENGSTATE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJENGSTATE))) +#endif //__nvoc_eng_state_h_disabled + +#define PDB_PROP_ENGSTATE_IS_MISSING_BASE_CAST +#define PDB_PROP_ENGSTATE_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_OBJENGSTATE(OBJENGSTATE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJENGSTATE(OBJENGSTATE**, Dynamic*, NvU32); +#define __objCreate_OBJENGSTATE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJENGSTATE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define engstateConstructEngine(pGpu, pEngstate, arg0) engstateConstructEngine_DISPATCH(pGpu, pEngstate, arg0) +#define engstateInitMissing(pGpu, pEngstate) engstateInitMissing_DISPATCH(pGpu, pEngstate) +#define engstateStatePreInitLocked(pGpu, pEngstate) engstateStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define engstateStatePreInitUnlocked(pGpu, pEngstate) engstateStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define engstateStateInitLocked(pGpu, pEngstate) engstateStateInitLocked_DISPATCH(pGpu, pEngstate) +#define engstateStateInitUnlocked(pGpu, pEngstate) engstateStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define engstateStatePreLoad(pGpu, pEngstate, arg0) engstateStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define engstateStateLoad(pGpu, pEngstate, arg0) engstateStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define engstateStatePostLoad(pGpu, pEngstate, arg0) engstateStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define engstateStatePreUnload(pGpu, pEngstate, arg0) engstateStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define engstateStateUnload(pGpu, pEngstate, arg0) engstateStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define engstateStatePostUnload(pGpu, pEngstate, arg0) engstateStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define engstateStateDestroy(pGpu, pEngstate) engstateStateDestroy_DISPATCH(pGpu, pEngstate) +#define engstateAllocTunableState(pGpu, pEngstate, ppTunableState) engstateAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define engstateFreeTunableState(pGpu, pEngstate, pTunableState) engstateFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define engstateGetTunableState(pGpu, pEngstate, pTunableState) engstateGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define engstateSetTunableState(pGpu, pEngstate, pTunableState) engstateSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define engstateReconcileTunableState(pGpu, pEngstate, pTunableState) engstateReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define engstateCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) engstateCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define engstateIsPresent(pGpu, pEngstate) engstateIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS engstateConstructEngine_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, ENGDESCRIPTOR arg0); + +static inline NV_STATUS engstateConstructEngine_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, ENGDESCRIPTOR arg0) { + return pEngstate->__engstateConstructEngine__(pGpu, pEngstate, arg0); +} + +void engstateInitMissing_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline void engstateInitMissing_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + pEngstate->__engstateInitMissing__(pGpu, pEngstate); +} + +NV_STATUS engstateStatePreInitLocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline NV_STATUS engstateStatePreInitLocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + return pEngstate->__engstateStatePreInitLocked__(pGpu, pEngstate); +} + +NV_STATUS engstateStatePreInitUnlocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline NV_STATUS engstateStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + return pEngstate->__engstateStatePreInitUnlocked__(pGpu, pEngstate); +} + +NV_STATUS engstateStateInitLocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline NV_STATUS engstateStateInitLocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + return pEngstate->__engstateStateInitLocked__(pGpu, pEngstate); +} + +NV_STATUS engstateStateInitUnlocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline NV_STATUS engstateStateInitUnlocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + return pEngstate->__engstateStateInitUnlocked__(pGpu, pEngstate); +} + +NV_STATUS engstateStatePreLoad_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0); + +static inline NV_STATUS engstateStatePreLoad_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) { + return pEngstate->__engstateStatePreLoad__(pGpu, pEngstate, arg0); +} + +NV_STATUS engstateStateLoad_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0); + +static inline NV_STATUS engstateStateLoad_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) { + return pEngstate->__engstateStateLoad__(pGpu, pEngstate, arg0); +} + +NV_STATUS engstateStatePostLoad_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0); + +static inline NV_STATUS engstateStatePostLoad_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) { + return pEngstate->__engstateStatePostLoad__(pGpu, pEngstate, arg0); +} + +NV_STATUS engstateStatePreUnload_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0); + +static inline NV_STATUS engstateStatePreUnload_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) { + return pEngstate->__engstateStatePreUnload__(pGpu, pEngstate, arg0); +} + +NV_STATUS engstateStateUnload_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0); + +static inline NV_STATUS engstateStateUnload_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) { + return pEngstate->__engstateStateUnload__(pGpu, pEngstate, arg0); +} + +NV_STATUS engstateStatePostUnload_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0); + +static inline NV_STATUS engstateStatePostUnload_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) { + return pEngstate->__engstateStatePostUnload__(pGpu, pEngstate, arg0); +} + +void engstateStateDestroy_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline void engstateStateDestroy_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + pEngstate->__engstateStateDestroy__(pGpu, pEngstate); +} + +NV_STATUS engstateAllocTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void **ppTunableState); + +static inline NV_STATUS engstateAllocTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void **ppTunableState) { + return pEngstate->__engstateAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +void engstateFreeTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState); + +static inline void engstateFreeTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) { + pEngstate->__engstateFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +NV_STATUS engstateGetTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState); + +static inline NV_STATUS engstateGetTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) { + return pEngstate->__engstateGetTunableState__(pGpu, pEngstate, pTunableState); +} + +NV_STATUS engstateSetTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState); + +static inline NV_STATUS engstateSetTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) { + return pEngstate->__engstateSetTunableState__(pGpu, pEngstate, pTunableState); +} + +NV_STATUS engstateReconcileTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState); + +static inline NV_STATUS engstateReconcileTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) { + return pEngstate->__engstateReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +NV_STATUS engstateCompareTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunables1, void *pTunables2); + +static inline NV_STATUS engstateCompareTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__engstateCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +NvBool engstateIsPresent_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline NvBool engstateIsPresent_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + return pEngstate->__engstateIsPresent__(pGpu, pEngstate); +} + +NV_STATUS engstateConstructBase_IMPL(struct OBJENGSTATE *arg0, struct OBJGPU *arg1, ENGDESCRIPTOR arg2); +#ifdef __nvoc_eng_state_h_disabled +static inline NV_STATUS engstateConstructBase(struct OBJENGSTATE *arg0, struct OBJGPU *arg1, ENGDESCRIPTOR arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_eng_state_h_disabled +#define engstateConstructBase(arg0, arg1, arg2) engstateConstructBase_IMPL(arg0, arg1, arg2) +#endif //__nvoc_eng_state_h_disabled + +void engstateLogStateTransitionPre_IMPL(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2); +#ifdef __nvoc_eng_state_h_disabled +static inline void engstateLogStateTransitionPre(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); +} +#else //__nvoc_eng_state_h_disabled +#define engstateLogStateTransitionPre(arg0, arg1, arg2) engstateLogStateTransitionPre_IMPL(arg0, arg1, arg2) +#endif //__nvoc_eng_state_h_disabled + +void engstateLogStateTransitionPost_IMPL(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2); +#ifdef __nvoc_eng_state_h_disabled +static inline void engstateLogStateTransitionPost(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); +} +#else //__nvoc_eng_state_h_disabled +#define engstateLogStateTransitionPost(arg0, arg1, arg2) engstateLogStateTransitionPost_IMPL(arg0, arg1, arg2) +#endif //__nvoc_eng_state_h_disabled + +const char *engstateGetName_IMPL(struct OBJENGSTATE *arg0); +#ifdef __nvoc_eng_state_h_disabled +static inline const char *engstateGetName(struct OBJENGSTATE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NULL; +} +#else //__nvoc_eng_state_h_disabled +#define engstateGetName(arg0) engstateGetName_IMPL(arg0) +#endif //__nvoc_eng_state_h_disabled + +void engstateDestruct_IMPL(POBJENGSTATE pEngstate); +#define __nvoc_engstateDestruct(pEngstate) engstateDestruct_IMPL(pEngstate) +NV_STATUS engstateStatePreInit_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); +#ifdef __nvoc_eng_state_h_disabled +static inline NV_STATUS engstateStatePreInit(POBJGPU pGpu, POBJENGSTATE pEngstate) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_eng_state_h_disabled +#define engstateStatePreInit(pGpu, pEngstate) engstateStatePreInit_IMPL(pGpu, pEngstate) +#endif //__nvoc_eng_state_h_disabled + +NV_STATUS engstateStateInit_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); +#ifdef __nvoc_eng_state_h_disabled +static inline NV_STATUS engstateStateInit(POBJGPU pGpu, POBJENGSTATE pEngstate) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_eng_state_h_disabled +#define engstateStateInit(pGpu, pEngstate) engstateStateInit_IMPL(pGpu, pEngstate) +#endif //__nvoc_eng_state_h_disabled + +ENGDESCRIPTOR engstateGetDescriptor_IMPL(POBJENGSTATE pEngstate); +#ifdef __nvoc_eng_state_h_disabled +static inline ENGDESCRIPTOR engstateGetDescriptor(POBJENGSTATE pEngstate) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + ENGDESCRIPTOR ret; + portMemSet(&ret, 0, sizeof(ENGDESCRIPTOR)); + return ret; +} +#else //__nvoc_eng_state_h_disabled +#define engstateGetDescriptor(pEngstate) engstateGetDescriptor_IMPL(pEngstate) +#endif //__nvoc_eng_state_h_disabled + +struct OBJFIFO *engstateGetFifo_IMPL(POBJENGSTATE pEngstate); +#ifdef __nvoc_eng_state_h_disabled +static inline struct OBJFIFO *engstateGetFifo(POBJENGSTATE pEngstate) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NULL; +} +#else //__nvoc_eng_state_h_disabled +#define engstateGetFifo(pEngstate) engstateGetFifo_IMPL(pEngstate) +#endif //__nvoc_eng_state_h_disabled + +#undef PRIVATE_FIELD + + +#endif // _ENG_STATE_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_ENG_STATE_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.c new file mode 100644 index 0000000..7ac02b9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.c @@ -0,0 +1,379 @@ +#define NVOC_EVENT_BUFFER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_event_buffer_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x63502b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_EventBuffer(EventBuffer*); +void __nvoc_init_funcTable_EventBuffer(EventBuffer*); +NV_STATUS __nvoc_ctor_EventBuffer(EventBuffer*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_EventBuffer(EventBuffer*); +void __nvoc_dtor_EventBuffer(EventBuffer*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_EventBuffer; + +static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_EventBuffer = { + /*pClassDef=*/ &__nvoc_class_def_EventBuffer, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_EventBuffer, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_EventBuffer = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_EventBuffer_EventBuffer, + &__nvoc_rtti_EventBuffer_RmResource, + &__nvoc_rtti_EventBuffer_RmResourceCommon, + &__nvoc_rtti_EventBuffer_RsResource, + &__nvoc_rtti_EventBuffer_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer = +{ + /*classInfo=*/ { + /*size=*/ sizeof(EventBuffer), + /*classId=*/ classId(EventBuffer), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "EventBuffer", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_EventBuffer, + /*pCastInfo=*/ &__nvoc_castinfo_EventBuffer, + /*pExportInfo=*/ &__nvoc_export_info_EventBuffer +}; + +static NvBool __nvoc_thunk_RmResource_eventbufferShareCallback(struct EventBuffer *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventbufferCheckMemInterUnmap(struct EventBuffer *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_EventBuffer_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferControl(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventbufferGetMemInterMapParams(struct EventBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventbufferGetMemoryMappingDescriptor(struct EventBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_EventBuffer_RmResource.offset), ppMemDesc); +} + +static NvU32 __nvoc_thunk_RsResource_eventbufferGetRefCount(struct EventBuffer *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferControlFilter(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_eventbufferAddAdditionalDependants(struct RsClient *pClient, struct EventBuffer *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferUnmap(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventbufferControl_Prologue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_eventbufferCanCopy(struct EventBuffer *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferMapTo(struct EventBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_eventbufferPreDestruct(struct EventBuffer *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferUnmapFrom(struct EventBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_eventbufferControl_Epilogue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferControlLookup(struct EventBuffer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferMap(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_eventbufferAccessCallback(struct EventBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_EventBuffer[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdEnableEvent_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cd0101u, + /*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "eventbuffertBufferCtrlCmdEnableEvent" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdUpdateGet_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cd0102u, + /*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "eventbuffertBufferCtrlCmdUpdateGet" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdFlush_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cd0104u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "eventbuffertBufferCtrlCmdFlush" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cd0105u, + /*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "eventbuffertBufferCtrlCmdPostTelemetryEvent" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_EventBuffer = +{ + /*numEntries=*/ 4, + /*pExportEntries=*/ __nvoc_exported_method_def_EventBuffer +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_EventBuffer(EventBuffer *pThis) { + __nvoc_eventbufferDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_EventBuffer(EventBuffer *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_EventBuffer(EventBuffer *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_EventBuffer_fail_RmResource; + __nvoc_init_dataField_EventBuffer(pThis); + + status = __nvoc_eventbufferConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_EventBuffer_fail__init; + goto __nvoc_ctor_EventBuffer_exit; // Success + +__nvoc_ctor_EventBuffer_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_EventBuffer_fail_RmResource: +__nvoc_ctor_EventBuffer_exit: + + return status; +} + +static void __nvoc_init_funcTable_EventBuffer_1(EventBuffer *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__eventbuffertBufferCtrlCmdEnableEvent__ = &eventbuffertBufferCtrlCmdEnableEvent_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__eventbuffertBufferCtrlCmdUpdateGet__ = &eventbuffertBufferCtrlCmdUpdateGet_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__eventbuffertBufferCtrlCmdFlush__ = &eventbuffertBufferCtrlCmdFlush_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__eventbuffertBufferCtrlCmdPostTelemetryEvent__ = &eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL; +#endif + + pThis->__eventbufferShareCallback__ = &__nvoc_thunk_RmResource_eventbufferShareCallback; + + pThis->__eventbufferCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_eventbufferCheckMemInterUnmap; + + pThis->__eventbufferControl__ = &__nvoc_thunk_RsResource_eventbufferControl; + + pThis->__eventbufferGetMemInterMapParams__ = &__nvoc_thunk_RmResource_eventbufferGetMemInterMapParams; + + pThis->__eventbufferGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_eventbufferGetMemoryMappingDescriptor; + + pThis->__eventbufferGetRefCount__ = &__nvoc_thunk_RsResource_eventbufferGetRefCount; + + pThis->__eventbufferControlFilter__ = &__nvoc_thunk_RsResource_eventbufferControlFilter; + + pThis->__eventbufferAddAdditionalDependants__ = &__nvoc_thunk_RsResource_eventbufferAddAdditionalDependants; + + pThis->__eventbufferUnmap__ = &__nvoc_thunk_RsResource_eventbufferUnmap; + + pThis->__eventbufferControl_Prologue__ = &__nvoc_thunk_RmResource_eventbufferControl_Prologue; + + pThis->__eventbufferCanCopy__ = &__nvoc_thunk_RsResource_eventbufferCanCopy; + + pThis->__eventbufferMapTo__ = &__nvoc_thunk_RsResource_eventbufferMapTo; + + pThis->__eventbufferPreDestruct__ = &__nvoc_thunk_RsResource_eventbufferPreDestruct; + + pThis->__eventbufferUnmapFrom__ = &__nvoc_thunk_RsResource_eventbufferUnmapFrom; + + pThis->__eventbufferControl_Epilogue__ = &__nvoc_thunk_RmResource_eventbufferControl_Epilogue; + + pThis->__eventbufferControlLookup__ = &__nvoc_thunk_RsResource_eventbufferControlLookup; + + pThis->__eventbufferMap__ = &__nvoc_thunk_RsResource_eventbufferMap; + + pThis->__eventbufferAccessCallback__ = &__nvoc_thunk_RmResource_eventbufferAccessCallback; +} + +void __nvoc_init_funcTable_EventBuffer(EventBuffer *pThis) { + __nvoc_init_funcTable_EventBuffer_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_EventBuffer(EventBuffer *pThis) { + pThis->__nvoc_pbase_EventBuffer = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_EventBuffer(pThis); +} + +NV_STATUS __nvoc_objCreate_EventBuffer(EventBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + EventBuffer *pThis; + + pThis = portMemAllocNonPaged(sizeof(EventBuffer)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(EventBuffer)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_EventBuffer); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_EventBuffer(pThis); + status = __nvoc_ctor_EventBuffer(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_EventBuffer_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_EventBuffer_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_EventBuffer(EventBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_EventBuffer(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.h new file mode 100644 index 0000000..6a7e12c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.h @@ -0,0 +1,288 @@ +#ifndef _G_EVENT_BUFFER_NVOC_H_ +#define _G_EVENT_BUFFER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_event_buffer_nvoc.h" + +#ifndef _EVENT_BUFFER_H_ +#define _EVENT_BUFFER_H_ + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "rmapi/event.h" +#include "rmapi/resource.h" +#include "ctrl/ctrl90cd.h" +#include "eventbufferproducer.h" + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + + +typedef struct +{ + // + // Addr: user RO address + // Priv: return cookie to be passed to unmap + // + NvP64 headerAddr; + NvP64 headerPriv; + NvP64 recordBuffAddr; + NvP64 recordBuffPriv; + NvP64 vardataBuffAddr; + NvP64 vardataBuffPriv; +} EVENT_BUFFER_MAP_INFO; + +// This class shares buffers between kernel and usermode +#ifdef NVOC_EVENT_BUFFER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct EventBuffer { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct EventBuffer *__nvoc_pbase_EventBuffer; + NV_STATUS (*__eventbuffertBufferCtrlCmdEnableEvent__)(struct EventBuffer *, NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *); + NV_STATUS (*__eventbuffertBufferCtrlCmdUpdateGet__)(struct EventBuffer *, NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *); + NV_STATUS (*__eventbuffertBufferCtrlCmdFlush__)(struct EventBuffer *); + NV_STATUS (*__eventbuffertBufferCtrlCmdPostTelemetryEvent__)(struct EventBuffer *, NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *); + NvBool (*__eventbufferShareCallback__)(struct EventBuffer *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__eventbufferCheckMemInterUnmap__)(struct EventBuffer *, NvBool); + NV_STATUS (*__eventbufferControl__)(struct EventBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__eventbufferGetMemInterMapParams__)(struct EventBuffer *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__eventbufferGetMemoryMappingDescriptor__)(struct EventBuffer *, struct MEMORY_DESCRIPTOR **); + NvU32 (*__eventbufferGetRefCount__)(struct EventBuffer *); + NV_STATUS (*__eventbufferControlFilter__)(struct EventBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__eventbufferAddAdditionalDependants__)(struct RsClient *, struct EventBuffer *, RsResourceRef *); + NV_STATUS (*__eventbufferUnmap__)(struct EventBuffer *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__eventbufferControl_Prologue__)(struct EventBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__eventbufferCanCopy__)(struct EventBuffer *); + NV_STATUS (*__eventbufferMapTo__)(struct EventBuffer *, RS_RES_MAP_TO_PARAMS *); + void (*__eventbufferPreDestruct__)(struct EventBuffer *); + NV_STATUS (*__eventbufferUnmapFrom__)(struct EventBuffer *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__eventbufferControl_Epilogue__)(struct EventBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__eventbufferControlLookup__)(struct EventBuffer *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__eventbufferMap__)(struct EventBuffer *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__eventbufferAccessCallback__)(struct EventBuffer *, struct RsClient *, void *, RsAccessRight); + struct MEMORY_DESCRIPTOR *pHeaderDesc; + struct MEMORY_DESCRIPTOR *pRecordBufDesc; + struct MEMORY_DESCRIPTOR *pVardataBufDesc; + NvHandle hSubDevice; + NvU32 subDeviceInst; + EVENT_BUFFER_MAP_INFO kernelMapInfo; + EVENT_BUFFER_MAP_INFO clientMapInfo; + NvHandle hClient; + NvU16 seqNo; + NvBool bNotifyPending; + PEVENTNOTIFICATION pListeners; + EVENT_BUFFER_PRODUCER_INFO producerInfo; + struct Memory *pHeader; + struct Memory *pRecord; + struct Memory *pVardata; + NvHandle hInternalClient; + NvHandle hInternalDevice; + NvHandle hInternalSubdevice; + NvHandle hInternalHeader; + NvHandle hInternalBuffer; +}; + +#ifndef __NVOC_CLASS_EventBuffer_TYPEDEF__ +#define __NVOC_CLASS_EventBuffer_TYPEDEF__ +typedef struct EventBuffer EventBuffer; +#endif /* __NVOC_CLASS_EventBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_EventBuffer +#define __nvoc_class_id_EventBuffer 0x63502b +#endif /* __nvoc_class_id_EventBuffer */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer; + +#define __staticCast_EventBuffer(pThis) \ + ((pThis)->__nvoc_pbase_EventBuffer) + +#ifdef __nvoc_event_buffer_h_disabled +#define __dynamicCast_EventBuffer(pThis) ((EventBuffer*)NULL) +#else //__nvoc_event_buffer_h_disabled +#define __dynamicCast_EventBuffer(pThis) \ + ((EventBuffer*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(EventBuffer))) +#endif //__nvoc_event_buffer_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_EventBuffer(EventBuffer**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_EventBuffer(EventBuffer**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_EventBuffer(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_EventBuffer((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define eventbuffertBufferCtrlCmdEnableEvent(pEventBuffer, pEnableParams) eventbuffertBufferCtrlCmdEnableEvent_DISPATCH(pEventBuffer, pEnableParams) +#define eventbuffertBufferCtrlCmdUpdateGet(pEventBuffer, pUpdateParams) eventbuffertBufferCtrlCmdUpdateGet_DISPATCH(pEventBuffer, pUpdateParams) +#define eventbuffertBufferCtrlCmdFlush(pEventBuffer) eventbuffertBufferCtrlCmdFlush_DISPATCH(pEventBuffer) +#define eventbuffertBufferCtrlCmdPostTelemetryEvent(pEventBuffer, pPostTelemetryEvent) eventbuffertBufferCtrlCmdPostTelemetryEvent_DISPATCH(pEventBuffer, pPostTelemetryEvent) +#define eventbufferShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) eventbufferShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define eventbufferCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) eventbufferCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define eventbufferControl(pResource, pCallContext, pParams) eventbufferControl_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferGetMemInterMapParams(pRmResource, pParams) eventbufferGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define eventbufferGetMemoryMappingDescriptor(pRmResource, ppMemDesc) eventbufferGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define eventbufferGetRefCount(pResource) eventbufferGetRefCount_DISPATCH(pResource) +#define eventbufferControlFilter(pResource, pCallContext, pParams) eventbufferControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferAddAdditionalDependants(pClient, pResource, pReference) eventbufferAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define eventbufferUnmap(pResource, pCallContext, pCpuMapping) eventbufferUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define eventbufferControl_Prologue(pResource, pCallContext, pParams) eventbufferControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferCanCopy(pResource) eventbufferCanCopy_DISPATCH(pResource) +#define eventbufferMapTo(pResource, pParams) eventbufferMapTo_DISPATCH(pResource, pParams) +#define eventbufferPreDestruct(pResource) eventbufferPreDestruct_DISPATCH(pResource) +#define eventbufferUnmapFrom(pResource, pParams) eventbufferUnmapFrom_DISPATCH(pResource, pParams) +#define eventbufferControl_Epilogue(pResource, pCallContext, pParams) eventbufferControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferControlLookup(pResource, pParams, ppEntry) eventbufferControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define eventbufferMap(pResource, pCallContext, pParams, pCpuMapping) eventbufferMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define eventbufferAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) eventbufferAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS eventbuffertBufferCtrlCmdEnableEvent_IMPL(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *pEnableParams); + +static inline NV_STATUS eventbuffertBufferCtrlCmdEnableEvent_DISPATCH(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *pEnableParams) { + return pEventBuffer->__eventbuffertBufferCtrlCmdEnableEvent__(pEventBuffer, pEnableParams); +} + +NV_STATUS eventbuffertBufferCtrlCmdUpdateGet_IMPL(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *pUpdateParams); + +static inline NV_STATUS eventbuffertBufferCtrlCmdUpdateGet_DISPATCH(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *pUpdateParams) { + return pEventBuffer->__eventbuffertBufferCtrlCmdUpdateGet__(pEventBuffer, pUpdateParams); +} + +NV_STATUS eventbuffertBufferCtrlCmdFlush_IMPL(struct EventBuffer *pEventBuffer); + +static inline NV_STATUS eventbuffertBufferCtrlCmdFlush_DISPATCH(struct EventBuffer *pEventBuffer) { + return pEventBuffer->__eventbuffertBufferCtrlCmdFlush__(pEventBuffer); +} + +NV_STATUS eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *pPostTelemetryEvent); + +static inline NV_STATUS eventbuffertBufferCtrlCmdPostTelemetryEvent_DISPATCH(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *pPostTelemetryEvent) { + return pEventBuffer->__eventbuffertBufferCtrlCmdPostTelemetryEvent__(pEventBuffer, pPostTelemetryEvent); +} + +static inline NvBool eventbufferShareCallback_DISPATCH(struct EventBuffer *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__eventbufferShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS eventbufferCheckMemInterUnmap_DISPATCH(struct EventBuffer *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__eventbufferCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS eventbufferControl_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__eventbufferControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventbufferGetMemInterMapParams_DISPATCH(struct EventBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__eventbufferGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS eventbufferGetMemoryMappingDescriptor_DISPATCH(struct EventBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__eventbufferGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvU32 eventbufferGetRefCount_DISPATCH(struct EventBuffer *pResource) { + return pResource->__eventbufferGetRefCount__(pResource); +} + +static inline NV_STATUS eventbufferControlFilter_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__eventbufferControlFilter__(pResource, pCallContext, pParams); +} + +static inline void eventbufferAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct EventBuffer *pResource, RsResourceRef *pReference) { + pResource->__eventbufferAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS eventbufferUnmap_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__eventbufferUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS eventbufferControl_Prologue_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__eventbufferControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool eventbufferCanCopy_DISPATCH(struct EventBuffer *pResource) { + return pResource->__eventbufferCanCopy__(pResource); +} + +static inline NV_STATUS eventbufferMapTo_DISPATCH(struct EventBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__eventbufferMapTo__(pResource, pParams); +} + +static inline void eventbufferPreDestruct_DISPATCH(struct EventBuffer *pResource) { + pResource->__eventbufferPreDestruct__(pResource); +} + +static inline NV_STATUS eventbufferUnmapFrom_DISPATCH(struct EventBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__eventbufferUnmapFrom__(pResource, pParams); +} + +static inline void eventbufferControl_Epilogue_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__eventbufferControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventbufferControlLookup_DISPATCH(struct EventBuffer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__eventbufferControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS eventbufferMap_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__eventbufferMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool eventbufferAccessCallback_DISPATCH(struct EventBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__eventbufferAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS eventbufferConstruct_IMPL(struct EventBuffer *arg_pEventBuffer, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_eventbufferConstruct(arg_pEventBuffer, arg_pCallContext, arg_pParams) eventbufferConstruct_IMPL(arg_pEventBuffer, arg_pCallContext, arg_pParams) +void eventbufferDestruct_IMPL(struct EventBuffer *pEventBuffer); +#define __nvoc_eventbufferDestruct(pEventBuffer) eventbufferDestruct_IMPL(pEventBuffer) +#undef PRIVATE_FIELD + + +NV_STATUS eventBufferAdd(struct EventBuffer *pEventBuffer, void* pEventData, NvU32 recordType, NvBool* bNotify, NvP64 *pHandle); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_EVENT_BUFFER_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.c new file mode 100644 index 0000000..570e4f2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.c @@ -0,0 +1,692 @@ +#define NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_event_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xd5f150 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +void __nvoc_init_NotifShare(NotifShare*); +void __nvoc_init_funcTable_NotifShare(NotifShare*); +NV_STATUS __nvoc_ctor_NotifShare(NotifShare*); +void __nvoc_init_dataField_NotifShare(NotifShare*); +void __nvoc_dtor_NotifShare(NotifShare*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_NotifShare; + +static const struct NVOC_RTTI __nvoc_rtti_NotifShare_NotifShare = { + /*pClassDef=*/ &__nvoc_class_def_NotifShare, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_NotifShare, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_NotifShare_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NotifShare, __nvoc_base_RsShared.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NotifShare_RsShared = { + /*pClassDef=*/ &__nvoc_class_def_RsShared, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NotifShare, __nvoc_base_RsShared), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_NotifShare = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_NotifShare_NotifShare, + &__nvoc_rtti_NotifShare_RsShared, + &__nvoc_rtti_NotifShare_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare = +{ + /*classInfo=*/ { + /*size=*/ sizeof(NotifShare), + /*classId=*/ classId(NotifShare), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "NotifShare", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_NotifShare, + /*pCastInfo=*/ &__nvoc_castinfo_NotifShare, + /*pExportInfo=*/ &__nvoc_export_info_NotifShare +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_NotifShare = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsShared(RsShared*); +void __nvoc_dtor_NotifShare(NotifShare *pThis) { + __nvoc_shrnotifDestruct(pThis); + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_NotifShare(NotifShare *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsShared(RsShared* ); +NV_STATUS __nvoc_ctor_NotifShare(NotifShare *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared); + if (status != NV_OK) goto __nvoc_ctor_NotifShare_fail_RsShared; + __nvoc_init_dataField_NotifShare(pThis); + + status = __nvoc_shrnotifConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_NotifShare_fail__init; + goto __nvoc_ctor_NotifShare_exit; // Success + +__nvoc_ctor_NotifShare_fail__init: + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); +__nvoc_ctor_NotifShare_fail_RsShared: +__nvoc_ctor_NotifShare_exit: + + return status; +} + +static void __nvoc_init_funcTable_NotifShare_1(NotifShare *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_NotifShare(NotifShare *pThis) { + __nvoc_init_funcTable_NotifShare_1(pThis); +} + +void __nvoc_init_RsShared(RsShared*); +void __nvoc_init_NotifShare(NotifShare *pThis) { + pThis->__nvoc_pbase_NotifShare = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object; + pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared; + __nvoc_init_RsShared(&pThis->__nvoc_base_RsShared); + __nvoc_init_funcTable_NotifShare(pThis); +} + +NV_STATUS __nvoc_objCreate_NotifShare(NotifShare **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + NotifShare *pThis; + + pThis = portMemAllocNonPaged(sizeof(NotifShare)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(NotifShare)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_NotifShare); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_NotifShare(pThis); + status = __nvoc_ctor_NotifShare(pThis); + if (status != NV_OK) goto __nvoc_objCreate_NotifShare_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_NotifShare_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_NotifShare(NotifShare **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_NotifShare(ppThis, pParent, createFlags); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xa4ecfc = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Event; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_Event(Event*); +void __nvoc_init_funcTable_Event(Event*); +NV_STATUS __nvoc_ctor_Event(Event*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_Event(Event*); +void __nvoc_dtor_Event(Event*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Event; + +static const struct NVOC_RTTI __nvoc_rtti_Event_Event = { + /*pClassDef=*/ &__nvoc_class_def_Event, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Event, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Event_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Event_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Event_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Event_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Event = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_Event_Event, + &__nvoc_rtti_Event_RmResource, + &__nvoc_rtti_Event_RmResourceCommon, + &__nvoc_rtti_Event_RsResource, + &__nvoc_rtti_Event_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Event = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Event), + /*classId=*/ classId(Event), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Event", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Event, + /*pCastInfo=*/ &__nvoc_castinfo_Event, + /*pExportInfo=*/ &__nvoc_export_info_Event +}; + +static NvBool __nvoc_thunk_RmResource_eventShareCallback(struct Event *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventCheckMemInterUnmap(struct Event *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Event_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventControl(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventGetMemInterMapParams(struct Event *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Event_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventGetMemoryMappingDescriptor(struct Event *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Event_RmResource.offset), ppMemDesc); +} + +static NvU32 __nvoc_thunk_RsResource_eventGetRefCount(struct Event *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventControlFilter(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_eventAddAdditionalDependants(struct RsClient *pClient, struct Event *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventUnmap(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventControl_Prologue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_eventCanCopy(struct Event *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventMapTo(struct Event *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_eventPreDestruct(struct Event *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventUnmapFrom(struct Event *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_eventControl_Epilogue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventControlLookup(struct Event *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventMap(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_eventAccessCallback(struct Event *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Event = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_Event(Event *pThis) { + __nvoc_eventDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Event(Event *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Event(Event *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Event_fail_RmResource; + __nvoc_init_dataField_Event(pThis); + + status = __nvoc_eventConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Event_fail__init; + goto __nvoc_ctor_Event_exit; // Success + +__nvoc_ctor_Event_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_Event_fail_RmResource: +__nvoc_ctor_Event_exit: + + return status; +} + +static void __nvoc_init_funcTable_Event_1(Event *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__eventShareCallback__ = &__nvoc_thunk_RmResource_eventShareCallback; + + pThis->__eventCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_eventCheckMemInterUnmap; + + pThis->__eventControl__ = &__nvoc_thunk_RsResource_eventControl; + + pThis->__eventGetMemInterMapParams__ = &__nvoc_thunk_RmResource_eventGetMemInterMapParams; + + pThis->__eventGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_eventGetMemoryMappingDescriptor; + + pThis->__eventGetRefCount__ = &__nvoc_thunk_RsResource_eventGetRefCount; + + pThis->__eventControlFilter__ = &__nvoc_thunk_RsResource_eventControlFilter; + + pThis->__eventAddAdditionalDependants__ = &__nvoc_thunk_RsResource_eventAddAdditionalDependants; + + pThis->__eventUnmap__ = &__nvoc_thunk_RsResource_eventUnmap; + + pThis->__eventControl_Prologue__ = &__nvoc_thunk_RmResource_eventControl_Prologue; + + pThis->__eventCanCopy__ = &__nvoc_thunk_RsResource_eventCanCopy; + + pThis->__eventMapTo__ = &__nvoc_thunk_RsResource_eventMapTo; + + pThis->__eventPreDestruct__ = &__nvoc_thunk_RsResource_eventPreDestruct; + + pThis->__eventUnmapFrom__ = &__nvoc_thunk_RsResource_eventUnmapFrom; + + pThis->__eventControl_Epilogue__ = &__nvoc_thunk_RmResource_eventControl_Epilogue; + + pThis->__eventControlLookup__ = &__nvoc_thunk_RsResource_eventControlLookup; + + pThis->__eventMap__ = &__nvoc_thunk_RsResource_eventMap; + + pThis->__eventAccessCallback__ = &__nvoc_thunk_RmResource_eventAccessCallback; +} + +void __nvoc_init_funcTable_Event(Event *pThis) { + __nvoc_init_funcTable_Event_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_Event(Event *pThis) { + pThis->__nvoc_pbase_Event = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_Event(pThis); +} + +NV_STATUS __nvoc_objCreate_Event(Event **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + Event *pThis; + + pThis = portMemAllocNonPaged(sizeof(Event)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Event)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Event); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_Event(pThis); + status = __nvoc_ctor_Event(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Event_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Event_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Event(Event **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Event(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xf8f965 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +void __nvoc_init_INotifier(INotifier*); +void __nvoc_init_funcTable_INotifier(INotifier*); +NV_STATUS __nvoc_ctor_INotifier(INotifier*, struct CALL_CONTEXT * arg_pCallContext); +void __nvoc_init_dataField_INotifier(INotifier*); +void __nvoc_dtor_INotifier(INotifier*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_INotifier; + +static const struct NVOC_RTTI __nvoc_rtti_INotifier_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_INotifier, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_INotifier = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_INotifier_INotifier, + }, +}; + +// Not instantiable because it's not derived from class "Object" +// Not instantiable because it's an abstract class with following pure virtual functions: +// inotifyGetNotificationListPtr +// inotifySetNotificationShare +// inotifyGetNotificationShare +// inotifyUnregisterEvent +// inotifyGetOrAllocNotifShare +const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier = +{ + /*classInfo=*/ { + /*size=*/ sizeof(INotifier), + /*classId=*/ classId(INotifier), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "INotifier", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_INotifier, + /*pExportInfo=*/ &__nvoc_export_info_INotifier +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_INotifier = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_INotifier(INotifier *pThis) { + __nvoc_inotifyDestruct(pThis); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_INotifier(INotifier *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_INotifier(INotifier *pThis, struct CALL_CONTEXT * arg_pCallContext) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_INotifier(pThis); + + status = __nvoc_inotifyConstruct(pThis, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_INotifier_fail__init; + goto __nvoc_ctor_INotifier_exit; // Success + +__nvoc_ctor_INotifier_fail__init: +__nvoc_ctor_INotifier_exit: + + return status; +} + +static void __nvoc_init_funcTable_INotifier_1(INotifier *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__inotifyGetNotificationListPtr__ = NULL; + + pThis->__inotifySetNotificationShare__ = NULL; + + pThis->__inotifyGetNotificationShare__ = NULL; + + pThis->__inotifyUnregisterEvent__ = NULL; + + pThis->__inotifyGetOrAllocNotifShare__ = NULL; +} + +void __nvoc_init_funcTable_INotifier(INotifier *pThis) { + __nvoc_init_funcTable_INotifier_1(pThis); +} + +void __nvoc_init_INotifier(INotifier *pThis) { + pThis->__nvoc_pbase_INotifier = pThis; + __nvoc_init_funcTable_INotifier(pThis); +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xa8683b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_funcTable_Notifier(Notifier*); +NV_STATUS __nvoc_ctor_Notifier(Notifier*, struct CALL_CONTEXT * arg_pCallContext); +void __nvoc_init_dataField_Notifier(Notifier*); +void __nvoc_dtor_Notifier(Notifier*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Notifier; + +static const struct NVOC_RTTI __nvoc_rtti_Notifier_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Notifier, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Notifier_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Notifier, __nvoc_base_INotifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Notifier = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_Notifier_Notifier, + &__nvoc_rtti_Notifier_INotifier, + }, +}; + +// Not instantiable because it's not derived from class "Object" +const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Notifier), + /*classId=*/ classId(Notifier), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Notifier", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_Notifier, + /*pExportInfo=*/ &__nvoc_export_info_Notifier +}; + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset)); +} + +static void __nvoc_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Notifier = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_INotifier(INotifier*); +void __nvoc_dtor_Notifier(Notifier *pThis) { + __nvoc_notifyDestruct(pThis); + __nvoc_dtor_INotifier(&pThis->__nvoc_base_INotifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Notifier(Notifier *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_INotifier(INotifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_Notifier(Notifier *pThis, struct CALL_CONTEXT * arg_pCallContext) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_INotifier(&pThis->__nvoc_base_INotifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_Notifier_fail_INotifier; + __nvoc_init_dataField_Notifier(pThis); + + status = __nvoc_notifyConstruct(pThis, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_Notifier_fail__init; + goto __nvoc_ctor_Notifier_exit; // Success + +__nvoc_ctor_Notifier_fail__init: + __nvoc_dtor_INotifier(&pThis->__nvoc_base_INotifier); +__nvoc_ctor_Notifier_fail_INotifier: +__nvoc_ctor_Notifier_exit: + + return status; +} + +static void __nvoc_init_funcTable_Notifier_1(Notifier *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL; + + pThis->__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL; + + pThis->__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL; + + pThis->__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL; + + pThis->__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL; + + pThis->__nvoc_base_INotifier.__inotifyGetNotificationListPtr__ = &__nvoc_thunk_Notifier_inotifyGetNotificationListPtr; + + pThis->__nvoc_base_INotifier.__inotifyGetNotificationShare__ = &__nvoc_thunk_Notifier_inotifyGetNotificationShare; + + pThis->__nvoc_base_INotifier.__inotifySetNotificationShare__ = &__nvoc_thunk_Notifier_inotifySetNotificationShare; + + pThis->__nvoc_base_INotifier.__inotifyUnregisterEvent__ = &__nvoc_thunk_Notifier_inotifyUnregisterEvent; + + pThis->__nvoc_base_INotifier.__inotifyGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_inotifyGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_Notifier(Notifier *pThis) { + __nvoc_init_funcTable_Notifier_1(pThis); +} + +void __nvoc_init_INotifier(INotifier*); +void __nvoc_init_Notifier(Notifier *pThis) { + pThis->__nvoc_pbase_Notifier = pThis; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_INotifier; + __nvoc_init_INotifier(&pThis->__nvoc_base_INotifier); + __nvoc_init_funcTable_Notifier(pThis); +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.h new file mode 100644 index 0000000..ab8d3a0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.h @@ -0,0 +1,529 @@ +#ifndef _G_EVENT_NVOC_H_ +#define _G_EVENT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_event_nvoc.h" + +#ifndef _EVENT_H_ +#define _EVENT_H_ + +#include "class/cl0000.h" // NV0000_NOTIFIERS_MAXCOUNT + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_server.h" +#include "rmapi/resource.h" + +typedef struct _def_system_event_queue SYSTEM_EVENTS_QUEUE; + +struct EVENTNOTIFICATION +{ + NvHandle hEventClient; + NvHandle hEvent; + NvU32 subdeviceInst; + NvU32 NotifyIndex; // NVnnnn_NOTIFIERS_xyz + NvU32 NotifyType; // Event class. NV01_EVENT_OS_EVENT for example. + NvBool bUserOsEventHandle; // Event was allocated from user app. + NvBool bBroadcastEvent; // Wait for all subdevices before sending event. + NvBool bClientRM; // Event was allocated from client RM. + NvBool bSubdeviceSpecificEvent; // SubdeviceSpecificValue is valid. + NvU32 SubdeviceSpecificValue; // NV0005_NOTIFY_INDEX_SUBDEVICE + NvBool bEventDataRequired; // nv_post_event allocates memory for Data. + NvBool bNonStallIntrEvent; + NvU32 NotifyTriggerCount; // Used with bBroadcastEvent. + NvP64 Data; + struct EVENTNOTIFICATION *Next; +}; +typedef struct EVENTNOTIFICATION EVENTNOTIFICATION, *PEVENTNOTIFICATION; + +struct INotifier; + +#ifndef __NVOC_CLASS_INotifier_TYPEDEF__ +#define __NVOC_CLASS_INotifier_TYPEDEF__ +typedef struct INotifier INotifier; +#endif /* __NVOC_CLASS_INotifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_INotifier +#define __nvoc_class_id_INotifier 0xf8f965 +#endif /* __nvoc_class_id_INotifier */ + + + +#define NV_SYSTEM_EVENT_QUEUE_SIZE 16 +struct _def_system_event_queue +{ + NvU32 Head; + NvU32 Tail; + struct event_queue + { + NvU32 event; + NvU32 status; + } EventQueue[NV_SYSTEM_EVENT_QUEUE_SIZE]; +}; + +struct _def_client_system_event_info +{ + SYSTEM_EVENTS_QUEUE systemEventsQueue; + NvU32 notifyActions[NV0000_NOTIFIERS_MAXCOUNT]; +}; + +/** + * This class represents data that is shared between one notifier and any + * events that are registered with the notifier. + * + * Instances of this class are ref-counted and will be kept alive until + * the notifier and all of its events have been freed. + */ +#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct NotifShare { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsShared __nvoc_base_RsShared; + struct Object *__nvoc_pbase_Object; + struct RsShared *__nvoc_pbase_RsShared; + struct NotifShare *__nvoc_pbase_NotifShare; + struct INotifier *pNotifier; + NvHandle hNotifierClient; + NvHandle hNotifierResource; + EVENTNOTIFICATION *pEventList; +}; + +#ifndef __NVOC_CLASS_NotifShare_TYPEDEF__ +#define __NVOC_CLASS_NotifShare_TYPEDEF__ +typedef struct NotifShare NotifShare; +#endif /* __NVOC_CLASS_NotifShare_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NotifShare +#define __nvoc_class_id_NotifShare 0xd5f150 +#endif /* __nvoc_class_id_NotifShare */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare; + +#define __staticCast_NotifShare(pThis) \ + ((pThis)->__nvoc_pbase_NotifShare) + +#ifdef __nvoc_event_h_disabled +#define __dynamicCast_NotifShare(pThis) ((NotifShare*)NULL) +#else //__nvoc_event_h_disabled +#define __dynamicCast_NotifShare(pThis) \ + ((NotifShare*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NotifShare))) +#endif //__nvoc_event_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_NotifShare(NotifShare**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_NotifShare(NotifShare**, Dynamic*, NvU32); +#define __objCreate_NotifShare(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_NotifShare((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS shrnotifConstruct_IMPL(struct NotifShare *arg_pNotifShare); +#define __nvoc_shrnotifConstruct(arg_pNotifShare) shrnotifConstruct_IMPL(arg_pNotifShare) +void shrnotifDestruct_IMPL(struct NotifShare *pNotifShare); +#define __nvoc_shrnotifDestruct(pNotifShare) shrnotifDestruct_IMPL(pNotifShare) +#undef PRIVATE_FIELD + + +/** + * This class represents event notification consumers + */ +#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Event { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Event *__nvoc_pbase_Event; + NvBool (*__eventShareCallback__)(struct Event *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__eventCheckMemInterUnmap__)(struct Event *, NvBool); + NV_STATUS (*__eventControl__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__eventGetMemInterMapParams__)(struct Event *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__eventGetMemoryMappingDescriptor__)(struct Event *, struct MEMORY_DESCRIPTOR **); + NvU32 (*__eventGetRefCount__)(struct Event *); + NV_STATUS (*__eventControlFilter__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__eventAddAdditionalDependants__)(struct RsClient *, struct Event *, RsResourceRef *); + NV_STATUS (*__eventUnmap__)(struct Event *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__eventControl_Prologue__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__eventCanCopy__)(struct Event *); + NV_STATUS (*__eventMapTo__)(struct Event *, RS_RES_MAP_TO_PARAMS *); + void (*__eventPreDestruct__)(struct Event *); + NV_STATUS (*__eventUnmapFrom__)(struct Event *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__eventControl_Epilogue__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__eventControlLookup__)(struct Event *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__eventMap__)(struct Event *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__eventAccessCallback__)(struct Event *, struct RsClient *, void *, RsAccessRight); + struct NotifShare *pNotifierShare; + NvHandle hNotifierClient; + NvHandle hNotifierResource; + NvHandle hEvent; +}; + +#ifndef __NVOC_CLASS_Event_TYPEDEF__ +#define __NVOC_CLASS_Event_TYPEDEF__ +typedef struct Event Event; +#endif /* __NVOC_CLASS_Event_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Event +#define __nvoc_class_id_Event 0xa4ecfc +#endif /* __nvoc_class_id_Event */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Event; + +#define __staticCast_Event(pThis) \ + ((pThis)->__nvoc_pbase_Event) + +#ifdef __nvoc_event_h_disabled +#define __dynamicCast_Event(pThis) ((Event*)NULL) +#else //__nvoc_event_h_disabled +#define __dynamicCast_Event(pThis) \ + ((Event*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Event))) +#endif //__nvoc_event_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Event(Event**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Event(Event**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_Event(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Event((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define eventShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) eventShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define eventCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) eventCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define eventControl(pResource, pCallContext, pParams) eventControl_DISPATCH(pResource, pCallContext, pParams) +#define eventGetMemInterMapParams(pRmResource, pParams) eventGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define eventGetMemoryMappingDescriptor(pRmResource, ppMemDesc) eventGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define eventGetRefCount(pResource) eventGetRefCount_DISPATCH(pResource) +#define eventControlFilter(pResource, pCallContext, pParams) eventControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define eventAddAdditionalDependants(pClient, pResource, pReference) eventAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define eventUnmap(pResource, pCallContext, pCpuMapping) eventUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define eventControl_Prologue(pResource, pCallContext, pParams) eventControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define eventCanCopy(pResource) eventCanCopy_DISPATCH(pResource) +#define eventMapTo(pResource, pParams) eventMapTo_DISPATCH(pResource, pParams) +#define eventPreDestruct(pResource) eventPreDestruct_DISPATCH(pResource) +#define eventUnmapFrom(pResource, pParams) eventUnmapFrom_DISPATCH(pResource, pParams) +#define eventControl_Epilogue(pResource, pCallContext, pParams) eventControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define eventControlLookup(pResource, pParams, ppEntry) eventControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define eventMap(pResource, pCallContext, pParams, pCpuMapping) eventMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define eventAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) eventAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool eventShareCallback_DISPATCH(struct Event *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__eventShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS eventCheckMemInterUnmap_DISPATCH(struct Event *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__eventCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS eventControl_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__eventControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventGetMemInterMapParams_DISPATCH(struct Event *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__eventGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS eventGetMemoryMappingDescriptor_DISPATCH(struct Event *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__eventGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvU32 eventGetRefCount_DISPATCH(struct Event *pResource) { + return pResource->__eventGetRefCount__(pResource); +} + +static inline NV_STATUS eventControlFilter_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__eventControlFilter__(pResource, pCallContext, pParams); +} + +static inline void eventAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Event *pResource, RsResourceRef *pReference) { + pResource->__eventAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS eventUnmap_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__eventUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS eventControl_Prologue_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__eventControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool eventCanCopy_DISPATCH(struct Event *pResource) { + return pResource->__eventCanCopy__(pResource); +} + +static inline NV_STATUS eventMapTo_DISPATCH(struct Event *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__eventMapTo__(pResource, pParams); +} + +static inline void eventPreDestruct_DISPATCH(struct Event *pResource) { + pResource->__eventPreDestruct__(pResource); +} + +static inline NV_STATUS eventUnmapFrom_DISPATCH(struct Event *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__eventUnmapFrom__(pResource, pParams); +} + +static inline void eventControl_Epilogue_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__eventControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventControlLookup_DISPATCH(struct Event *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__eventControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS eventMap_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__eventMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool eventAccessCallback_DISPATCH(struct Event *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__eventAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS eventConstruct_IMPL(struct Event *arg_pEvent, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_eventConstruct(arg_pEvent, arg_pCallContext, arg_pParams) eventConstruct_IMPL(arg_pEvent, arg_pCallContext, arg_pParams) +void eventDestruct_IMPL(struct Event *pEvent); +#define __nvoc_eventDestruct(pEvent) eventDestruct_IMPL(pEvent) +NV_STATUS eventInit_IMPL(struct Event *pEvent, struct CALL_CONTEXT *pCallContext, NvHandle hNotifierClient, NvHandle hNotifierResource, PEVENTNOTIFICATION **pppEventNotification); +#ifdef __nvoc_event_h_disabled +static inline NV_STATUS eventInit(struct Event *pEvent, struct CALL_CONTEXT *pCallContext, NvHandle hNotifierClient, NvHandle hNotifierResource, PEVENTNOTIFICATION **pppEventNotification) { + NV_ASSERT_FAILED_PRECOMP("Event was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_event_h_disabled +#define eventInit(pEvent, pCallContext, hNotifierClient, hNotifierResource, pppEventNotification) eventInit_IMPL(pEvent, pCallContext, hNotifierClient, hNotifierResource, pppEventNotification) +#endif //__nvoc_event_h_disabled + +#undef PRIVATE_FIELD + + +/** + * Mix-in interface for resources that send notifications to events + */ +#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct INotifier { + const struct NVOC_RTTI *__nvoc_rtti; + struct INotifier *__nvoc_pbase_INotifier; + PEVENTNOTIFICATION *(*__inotifyGetNotificationListPtr__)(struct INotifier *); + void (*__inotifySetNotificationShare__)(struct INotifier *, struct NotifShare *); + struct NotifShare *(*__inotifyGetNotificationShare__)(struct INotifier *); + NV_STATUS (*__inotifyUnregisterEvent__)(struct INotifier *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__inotifyGetOrAllocNotifShare__)(struct INotifier *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_INotifier_TYPEDEF__ +#define __NVOC_CLASS_INotifier_TYPEDEF__ +typedef struct INotifier INotifier; +#endif /* __NVOC_CLASS_INotifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_INotifier +#define __nvoc_class_id_INotifier 0xf8f965 +#endif /* __nvoc_class_id_INotifier */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +#define __staticCast_INotifier(pThis) \ + ((pThis)->__nvoc_pbase_INotifier) + +#ifdef __nvoc_event_h_disabled +#define __dynamicCast_INotifier(pThis) ((INotifier*)NULL) +#else //__nvoc_event_h_disabled +#define __dynamicCast_INotifier(pThis) \ + ((INotifier*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(INotifier))) +#endif //__nvoc_event_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_INotifier(INotifier**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_INotifier(INotifier**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext); +#define __objCreate_INotifier(ppNewObj, pParent, createFlags, arg_pCallContext) \ + __nvoc_objCreate_INotifier((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext) + +#define inotifyGetNotificationListPtr(pNotifier) inotifyGetNotificationListPtr_DISPATCH(pNotifier) +#define inotifySetNotificationShare(pNotifier, pNotifShare) inotifySetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define inotifyGetNotificationShare(pNotifier) inotifyGetNotificationShare_DISPATCH(pNotifier) +#define inotifyUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) inotifyUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define inotifyGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) inotifyGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +static inline PEVENTNOTIFICATION *inotifyGetNotificationListPtr_DISPATCH(struct INotifier *pNotifier) { + return pNotifier->__inotifyGetNotificationListPtr__(pNotifier); +} + +static inline void inotifySetNotificationShare_DISPATCH(struct INotifier *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__inotifySetNotificationShare__(pNotifier, pNotifShare); +} + +static inline struct NotifShare *inotifyGetNotificationShare_DISPATCH(struct INotifier *pNotifier) { + return pNotifier->__inotifyGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS inotifyUnregisterEvent_DISPATCH(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__inotifyUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS inotifyGetOrAllocNotifShare_DISPATCH(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__inotifyGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS inotifyConstruct_IMPL(struct INotifier *arg_pNotifier, struct CALL_CONTEXT *arg_pCallContext); +#define __nvoc_inotifyConstruct(arg_pNotifier, arg_pCallContext) inotifyConstruct_IMPL(arg_pNotifier, arg_pCallContext) +void inotifyDestruct_IMPL(struct INotifier *pNotifier); +#define __nvoc_inotifyDestruct(pNotifier) inotifyDestruct_IMPL(pNotifier) +PEVENTNOTIFICATION inotifyGetNotificationList_IMPL(struct INotifier *pNotifier); +#ifdef __nvoc_event_h_disabled +static inline PEVENTNOTIFICATION inotifyGetNotificationList(struct INotifier *pNotifier) { + NV_ASSERT_FAILED_PRECOMP("INotifier was disabled!"); + return NULL; +} +#else //__nvoc_event_h_disabled +#define inotifyGetNotificationList(pNotifier) inotifyGetNotificationList_IMPL(pNotifier) +#endif //__nvoc_event_h_disabled + +#undef PRIVATE_FIELD + + +/** + * Basic implementation for event notification mix-in + */ +#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Notifier { + const struct NVOC_RTTI *__nvoc_rtti; + struct INotifier __nvoc_base_INotifier; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + PEVENTNOTIFICATION *(*__notifyGetNotificationListPtr__)(struct Notifier *); + struct NotifShare *(*__notifyGetNotificationShare__)(struct Notifier *); + void (*__notifySetNotificationShare__)(struct Notifier *, struct NotifShare *); + NV_STATUS (*__notifyUnregisterEvent__)(struct Notifier *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__notifyGetOrAllocNotifShare__)(struct Notifier *, NvHandle, NvHandle, struct NotifShare **); + struct NotifShare *pNotifierShare; +}; + +#ifndef __NVOC_CLASS_Notifier_TYPEDEF__ +#define __NVOC_CLASS_Notifier_TYPEDEF__ +typedef struct Notifier Notifier; +#endif /* __NVOC_CLASS_Notifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Notifier +#define __nvoc_class_id_Notifier 0xa8683b +#endif /* __nvoc_class_id_Notifier */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +#define __staticCast_Notifier(pThis) \ + ((pThis)->__nvoc_pbase_Notifier) + +#ifdef __nvoc_event_h_disabled +#define __dynamicCast_Notifier(pThis) ((Notifier*)NULL) +#else //__nvoc_event_h_disabled +#define __dynamicCast_Notifier(pThis) \ + ((Notifier*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Notifier))) +#endif //__nvoc_event_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Notifier(Notifier**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Notifier(Notifier**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext); +#define __objCreate_Notifier(ppNewObj, pParent, createFlags, arg_pCallContext) \ + __nvoc_objCreate_Notifier((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext) + +#define notifyGetNotificationListPtr(pNotifier) notifyGetNotificationListPtr_DISPATCH(pNotifier) +#define notifyGetNotificationShare(pNotifier) notifyGetNotificationShare_DISPATCH(pNotifier) +#define notifySetNotificationShare(pNotifier, pNotifShare) notifySetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define notifyUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) notifyUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define notifyGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) notifyGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +PEVENTNOTIFICATION *notifyGetNotificationListPtr_IMPL(struct Notifier *pNotifier); + +static inline PEVENTNOTIFICATION *notifyGetNotificationListPtr_DISPATCH(struct Notifier *pNotifier) { + return pNotifier->__notifyGetNotificationListPtr__(pNotifier); +} + +struct NotifShare *notifyGetNotificationShare_IMPL(struct Notifier *pNotifier); + +static inline struct NotifShare *notifyGetNotificationShare_DISPATCH(struct Notifier *pNotifier) { + return pNotifier->__notifyGetNotificationShare__(pNotifier); +} + +void notifySetNotificationShare_IMPL(struct Notifier *pNotifier, struct NotifShare *pNotifShare); + +static inline void notifySetNotificationShare_DISPATCH(struct Notifier *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__notifySetNotificationShare__(pNotifier, pNotifShare); +} + +NV_STATUS notifyUnregisterEvent_IMPL(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); + +static inline NV_STATUS notifyUnregisterEvent_DISPATCH(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__notifyUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +NV_STATUS notifyGetOrAllocNotifShare_IMPL(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); + +static inline NV_STATUS notifyGetOrAllocNotifShare_DISPATCH(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__notifyGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS notifyConstruct_IMPL(struct Notifier *arg_pNotifier, struct CALL_CONTEXT *arg_pCallContext); +#define __nvoc_notifyConstruct(arg_pNotifier, arg_pCallContext) notifyConstruct_IMPL(arg_pNotifier, arg_pCallContext) +void notifyDestruct_IMPL(struct Notifier *pNotifier); +#define __nvoc_notifyDestruct(pNotifier) notifyDestruct_IMPL(pNotifier) +#undef PRIVATE_FIELD + + +void CliAddSystemEvent(NvU32, NvU32); +NvBool CliDelObjectEvents(NvHandle hClient, NvHandle hObject); +NvBool CliGetEventInfo(NvHandle hClient, NvHandle hEvent, struct Event **ppEvent); +NV_STATUS CliGetEventNotificationList(NvHandle hClient, NvHandle hObject, + struct INotifier **ppNotifier, + PEVENTNOTIFICATION **pppEventNotification); + +NV_STATUS registerEventNotification(PEVENTNOTIFICATION*, NvHandle, NvHandle, NvHandle, NvU32, NvU32, NvP64, NvBool); +NV_STATUS unregisterEventNotification(PEVENTNOTIFICATION*, NvHandle, NvHandle, NvHandle); +NV_STATUS unregisterEventNotificationWithData(PEVENTNOTIFICATION *, NvHandle, NvHandle, NvHandle, NvBool, NvP64); +NV_STATUS bindEventNotificationToSubdevice(PEVENTNOTIFICATION, NvHandle, NvU32); +NV_STATUS engineNonStallIntrNotify(OBJGPU *, NvU32); +NV_STATUS notifyEvents(OBJGPU*, EVENTNOTIFICATION*, NvU32, NvU32, NvU32, NV_STATUS, NvU32); +NV_STATUS engineNonStallIntrNotifyEvent(OBJGPU *, NvU32, NvHandle); + +#endif // _EVENT_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_EVENT_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.c new file mode 100644 index 0000000..c658d34 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.c @@ -0,0 +1,334 @@ +#define NVOC_GENERIC_ENGINE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_generic_engine_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x4bc329 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_GenericEngineApi(GenericEngineApi*); +void __nvoc_init_funcTable_GenericEngineApi(GenericEngineApi*); +NV_STATUS __nvoc_ctor_GenericEngineApi(GenericEngineApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_GenericEngineApi(GenericEngineApi*); +void __nvoc_dtor_GenericEngineApi(GenericEngineApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GenericEngineApi; + +static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_GenericEngineApi = { + /*pClassDef=*/ &__nvoc_class_def_GenericEngineApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GenericEngineApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_GenericEngineApi = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_GenericEngineApi_GenericEngineApi, + &__nvoc_rtti_GenericEngineApi_GpuResource, + &__nvoc_rtti_GenericEngineApi_RmResource, + &__nvoc_rtti_GenericEngineApi_RmResourceCommon, + &__nvoc_rtti_GenericEngineApi_RsResource, + &__nvoc_rtti_GenericEngineApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GenericEngineApi), + /*classId=*/ classId(GenericEngineApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GenericEngineApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GenericEngineApi, + /*pCastInfo=*/ &__nvoc_castinfo_GenericEngineApi, + /*pExportInfo=*/ &__nvoc_export_info_GenericEngineApi +}; + +static NV_STATUS __nvoc_thunk_GenericEngineApi_gpuresMap(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return genapiMap((struct GenericEngineApi *)(((unsigned char *)pGenericEngineApi) - __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_GenericEngineApi_gpuresGetMapAddrSpace(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return genapiGetMapAddrSpace((struct GenericEngineApi *)(((unsigned char *)pGenericEngineApi) - __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NV_STATUS __nvoc_thunk_GenericEngineApi_gpuresControl(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return genapiControl((struct GenericEngineApi *)(((unsigned char *)pGenericEngineApi) - __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_GpuResource_genapiShareCallback(struct GenericEngineApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_genapiUnmap(struct GenericEngineApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_genapiGetMemInterMapParams(struct GenericEngineApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_genapiGetMemoryMappingDescriptor(struct GenericEngineApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), ppMemDesc); +} + +static NvHandle __nvoc_thunk_GpuResource_genapiGetInternalObjectHandle(struct GenericEngineApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_genapiControlFilter(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_genapiAddAdditionalDependants(struct RsClient *pClient, struct GenericEngineApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_genapiGetRefCount(struct GenericEngineApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_genapiCheckMemInterUnmap(struct GenericEngineApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_genapiMapTo(struct GenericEngineApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_genapiControl_Prologue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_genapiGetRegBaseOffsetAndSize(struct GenericEngineApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_genapiCanCopy(struct GenericEngineApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_genapiInternalControlForward(struct GenericEngineApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_genapiPreDestruct(struct GenericEngineApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_genapiUnmapFrom(struct GenericEngineApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_genapiControl_Epilogue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_genapiControlLookup(struct GenericEngineApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pParams, ppEntry); +} + +static NvBool __nvoc_thunk_RmResource_genapiAccessCallback(struct GenericEngineApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_GenericEngineApi = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_GenericEngineApi(GenericEngineApi *pThis) { + __nvoc_genapiDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GenericEngineApi(GenericEngineApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_GenericEngineApi(GenericEngineApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GenericEngineApi_fail_GpuResource; + __nvoc_init_dataField_GenericEngineApi(pThis); + + status = __nvoc_genapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GenericEngineApi_fail__init; + goto __nvoc_ctor_GenericEngineApi_exit; // Success + +__nvoc_ctor_GenericEngineApi_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_GenericEngineApi_fail_GpuResource: +__nvoc_ctor_GenericEngineApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_GenericEngineApi_1(GenericEngineApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__genapiMap__ = &genapiMap_IMPL; + + pThis->__genapiGetMapAddrSpace__ = &genapiGetMapAddrSpace_IMPL; + + pThis->__genapiControl__ = &genapiControl_IMPL; + + pThis->__nvoc_base_GpuResource.__gpuresMap__ = &__nvoc_thunk_GenericEngineApi_gpuresMap; + + pThis->__nvoc_base_GpuResource.__gpuresGetMapAddrSpace__ = &__nvoc_thunk_GenericEngineApi_gpuresGetMapAddrSpace; + + pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_GenericEngineApi_gpuresControl; + + pThis->__genapiShareCallback__ = &__nvoc_thunk_GpuResource_genapiShareCallback; + + pThis->__genapiUnmap__ = &__nvoc_thunk_GpuResource_genapiUnmap; + + pThis->__genapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_genapiGetMemInterMapParams; + + pThis->__genapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_genapiGetMemoryMappingDescriptor; + + pThis->__genapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_genapiGetInternalObjectHandle; + + pThis->__genapiControlFilter__ = &__nvoc_thunk_RsResource_genapiControlFilter; + + pThis->__genapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_genapiAddAdditionalDependants; + + pThis->__genapiGetRefCount__ = &__nvoc_thunk_RsResource_genapiGetRefCount; + + pThis->__genapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_genapiCheckMemInterUnmap; + + pThis->__genapiMapTo__ = &__nvoc_thunk_RsResource_genapiMapTo; + + pThis->__genapiControl_Prologue__ = &__nvoc_thunk_RmResource_genapiControl_Prologue; + + pThis->__genapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_genapiGetRegBaseOffsetAndSize; + + pThis->__genapiCanCopy__ = &__nvoc_thunk_RsResource_genapiCanCopy; + + pThis->__genapiInternalControlForward__ = &__nvoc_thunk_GpuResource_genapiInternalControlForward; + + pThis->__genapiPreDestruct__ = &__nvoc_thunk_RsResource_genapiPreDestruct; + + pThis->__genapiUnmapFrom__ = &__nvoc_thunk_RsResource_genapiUnmapFrom; + + pThis->__genapiControl_Epilogue__ = &__nvoc_thunk_RmResource_genapiControl_Epilogue; + + pThis->__genapiControlLookup__ = &__nvoc_thunk_RsResource_genapiControlLookup; + + pThis->__genapiAccessCallback__ = &__nvoc_thunk_RmResource_genapiAccessCallback; +} + +void __nvoc_init_funcTable_GenericEngineApi(GenericEngineApi *pThis) { + __nvoc_init_funcTable_GenericEngineApi_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_GenericEngineApi(GenericEngineApi *pThis) { + pThis->__nvoc_pbase_GenericEngineApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_GenericEngineApi(pThis); +} + +NV_STATUS __nvoc_objCreate_GenericEngineApi(GenericEngineApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + GenericEngineApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(GenericEngineApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(GenericEngineApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GenericEngineApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_GenericEngineApi(pThis); + status = __nvoc_ctor_GenericEngineApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_GenericEngineApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_GenericEngineApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GenericEngineApi(GenericEngineApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_GenericEngineApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.h new file mode 100644 index 0000000..0d6059b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.h @@ -0,0 +1,237 @@ +#ifndef _G_GENERIC_ENGINE_NVOC_H_ +#define _G_GENERIC_ENGINE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_generic_engine_nvoc.h" + +#ifndef _GENERICENGINEAPI_H_ +#define _GENERICENGINEAPI_H_ + +#include "gpu/gpu_resource.h" + +/*! + * RM internal class providing a generic engine API to RM clients (e.g.: + * GF100_SUBDEVICE_GRAPHICS and GF100_SUBDEVICE_FB). Classes are primarily used + * for exposing BAR0 mappings and controls. + */ +#ifdef NVOC_GENERIC_ENGINE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct GenericEngineApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct GenericEngineApi *__nvoc_pbase_GenericEngineApi; + NV_STATUS (*__genapiMap__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__genapiGetMapAddrSpace__)(struct GenericEngineApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NV_STATUS (*__genapiControl__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__genapiShareCallback__)(struct GenericEngineApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__genapiUnmap__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__genapiGetMemInterMapParams__)(struct GenericEngineApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__genapiGetMemoryMappingDescriptor__)(struct GenericEngineApi *, struct MEMORY_DESCRIPTOR **); + NvHandle (*__genapiGetInternalObjectHandle__)(struct GenericEngineApi *); + NV_STATUS (*__genapiControlFilter__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__genapiAddAdditionalDependants__)(struct RsClient *, struct GenericEngineApi *, RsResourceRef *); + NvU32 (*__genapiGetRefCount__)(struct GenericEngineApi *); + NV_STATUS (*__genapiCheckMemInterUnmap__)(struct GenericEngineApi *, NvBool); + NV_STATUS (*__genapiMapTo__)(struct GenericEngineApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__genapiControl_Prologue__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__genapiGetRegBaseOffsetAndSize__)(struct GenericEngineApi *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__genapiCanCopy__)(struct GenericEngineApi *); + NV_STATUS (*__genapiInternalControlForward__)(struct GenericEngineApi *, NvU32, void *, NvU32); + void (*__genapiPreDestruct__)(struct GenericEngineApi *); + NV_STATUS (*__genapiUnmapFrom__)(struct GenericEngineApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__genapiControl_Epilogue__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__genapiControlLookup__)(struct GenericEngineApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvBool (*__genapiAccessCallback__)(struct GenericEngineApi *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_GenericEngineApi_TYPEDEF__ +#define __NVOC_CLASS_GenericEngineApi_TYPEDEF__ +typedef struct GenericEngineApi GenericEngineApi; +#endif /* __NVOC_CLASS_GenericEngineApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GenericEngineApi +#define __nvoc_class_id_GenericEngineApi 0x4bc329 +#endif /* __nvoc_class_id_GenericEngineApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi; + +#define __staticCast_GenericEngineApi(pThis) \ + ((pThis)->__nvoc_pbase_GenericEngineApi) + +#ifdef __nvoc_generic_engine_h_disabled +#define __dynamicCast_GenericEngineApi(pThis) ((GenericEngineApi*)NULL) +#else //__nvoc_generic_engine_h_disabled +#define __dynamicCast_GenericEngineApi(pThis) \ + ((GenericEngineApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GenericEngineApi))) +#endif //__nvoc_generic_engine_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_GenericEngineApi(GenericEngineApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GenericEngineApi(GenericEngineApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_GenericEngineApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_GenericEngineApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define genapiMap(pGenericEngineApi, pCallContext, pParams, pCpuMapping) genapiMap_DISPATCH(pGenericEngineApi, pCallContext, pParams, pCpuMapping) +#define genapiGetMapAddrSpace(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace) genapiGetMapAddrSpace_DISPATCH(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace) +#define genapiControl(pGenericEngineApi, pCallContext, pParams) genapiControl_DISPATCH(pGenericEngineApi, pCallContext, pParams) +#define genapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) genapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define genapiUnmap(pGpuResource, pCallContext, pCpuMapping) genapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define genapiGetMemInterMapParams(pRmResource, pParams) genapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define genapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) genapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define genapiGetInternalObjectHandle(pGpuResource) genapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define genapiControlFilter(pResource, pCallContext, pParams) genapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define genapiAddAdditionalDependants(pClient, pResource, pReference) genapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define genapiGetRefCount(pResource) genapiGetRefCount_DISPATCH(pResource) +#define genapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) genapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define genapiMapTo(pResource, pParams) genapiMapTo_DISPATCH(pResource, pParams) +#define genapiControl_Prologue(pResource, pCallContext, pParams) genapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define genapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) genapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define genapiCanCopy(pResource) genapiCanCopy_DISPATCH(pResource) +#define genapiInternalControlForward(pGpuResource, command, pParams, size) genapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define genapiPreDestruct(pResource) genapiPreDestruct_DISPATCH(pResource) +#define genapiUnmapFrom(pResource, pParams) genapiUnmapFrom_DISPATCH(pResource, pParams) +#define genapiControl_Epilogue(pResource, pCallContext, pParams) genapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define genapiControlLookup(pResource, pParams, ppEntry) genapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define genapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) genapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS genapiMap_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); + +static inline NV_STATUS genapiMap_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGenericEngineApi->__genapiMap__(pGenericEngineApi, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS genapiGetMapAddrSpace_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +static inline NV_STATUS genapiGetMapAddrSpace_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGenericEngineApi->__genapiGetMapAddrSpace__(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace); +} + +NV_STATUS genapiControl_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS genapiControl_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGenericEngineApi->__genapiControl__(pGenericEngineApi, pCallContext, pParams); +} + +static inline NvBool genapiShareCallback_DISPATCH(struct GenericEngineApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__genapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS genapiUnmap_DISPATCH(struct GenericEngineApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__genapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS genapiGetMemInterMapParams_DISPATCH(struct GenericEngineApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__genapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS genapiGetMemoryMappingDescriptor_DISPATCH(struct GenericEngineApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__genapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvHandle genapiGetInternalObjectHandle_DISPATCH(struct GenericEngineApi *pGpuResource) { + return pGpuResource->__genapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS genapiControlFilter_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__genapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline void genapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GenericEngineApi *pResource, RsResourceRef *pReference) { + pResource->__genapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 genapiGetRefCount_DISPATCH(struct GenericEngineApi *pResource) { + return pResource->__genapiGetRefCount__(pResource); +} + +static inline NV_STATUS genapiCheckMemInterUnmap_DISPATCH(struct GenericEngineApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__genapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS genapiMapTo_DISPATCH(struct GenericEngineApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__genapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS genapiControl_Prologue_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__genapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS genapiGetRegBaseOffsetAndSize_DISPATCH(struct GenericEngineApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__genapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool genapiCanCopy_DISPATCH(struct GenericEngineApi *pResource) { + return pResource->__genapiCanCopy__(pResource); +} + +static inline NV_STATUS genapiInternalControlForward_DISPATCH(struct GenericEngineApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__genapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void genapiPreDestruct_DISPATCH(struct GenericEngineApi *pResource) { + pResource->__genapiPreDestruct__(pResource); +} + +static inline NV_STATUS genapiUnmapFrom_DISPATCH(struct GenericEngineApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__genapiUnmapFrom__(pResource, pParams); +} + +static inline void genapiControl_Epilogue_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__genapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS genapiControlLookup_DISPATCH(struct GenericEngineApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__genapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvBool genapiAccessCallback_DISPATCH(struct GenericEngineApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__genapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS genapiConstruct_IMPL(struct GenericEngineApi *arg_pGenericEngineApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_genapiConstruct(arg_pGenericEngineApi, arg_pCallContext, arg_pParams) genapiConstruct_IMPL(arg_pGenericEngineApi, arg_pCallContext, arg_pParams) +void genapiDestruct_IMPL(struct GenericEngineApi *pGenericEngineApi); +#define __nvoc_genapiDestruct(pGenericEngineApi) genapiDestruct_IMPL(pGenericEngineApi) +#undef PRIVATE_FIELD + + +#endif // _GENERICENGINEAPI_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GENERIC_ENGINE_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_class_list.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_class_list.c new file mode 100644 index 0000000..76fc7e7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_class_list.c @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include + + + +const CLASSDESCRIPTOR * +gpuGetClassDescriptorList_T234D(POBJGPU pGpu, NvU32 *pNumClassDescriptors) +{ + static const CLASSDESCRIPTOR halT234DClassDescriptorList[] = { + { GF100_HDACODEC, ENG_HDACODEC }, + { NV01_MEMORY_SYNCPOINT, ENG_DMA }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVC670_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVC671_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVC673_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVC67A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVC67B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC77F_ANY_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + }; + + #define HALT234D_NUM_CLASS_DESCS (sizeof(halT234DClassDescriptorList) / sizeof(CLASSDESCRIPTOR)) + + #define HALT234D_NUM_CLASSES 16 + + ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALT234D_NUM_CLASSES); + + *pNumClassDescriptors = HALT234D_NUM_CLASS_DESCS; + return halT234DClassDescriptorList; +} + + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.c new file mode 100644 index 0000000..b1d3150 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.c @@ -0,0 +1,154 @@ +#define NVOC_GPU_DB_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_db_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xcdd250 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_GpuDb(GpuDb*); +void __nvoc_init_funcTable_GpuDb(GpuDb*); +NV_STATUS __nvoc_ctor_GpuDb(GpuDb*); +void __nvoc_init_dataField_GpuDb(GpuDb*); +void __nvoc_dtor_GpuDb(GpuDb*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuDb; + +static const struct NVOC_RTTI __nvoc_rtti_GpuDb_GpuDb = { + /*pClassDef=*/ &__nvoc_class_def_GpuDb, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuDb, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuDb_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuDb, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_GpuDb = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_GpuDb_GpuDb, + &__nvoc_rtti_GpuDb_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GpuDb), + /*classId=*/ classId(GpuDb), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GpuDb", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuDb, + /*pCastInfo=*/ &__nvoc_castinfo_GpuDb, + /*pExportInfo=*/ &__nvoc_export_info_GpuDb +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuDb = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_GpuDb(GpuDb *pThis) { + __nvoc_gpudbDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GpuDb(GpuDb *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_GpuDb(GpuDb *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_GpuDb_fail_Object; + __nvoc_init_dataField_GpuDb(pThis); + + status = __nvoc_gpudbConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_GpuDb_fail__init; + goto __nvoc_ctor_GpuDb_exit; // Success + +__nvoc_ctor_GpuDb_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_GpuDb_fail_Object: +__nvoc_ctor_GpuDb_exit: + + return status; +} + +static void __nvoc_init_funcTable_GpuDb_1(GpuDb *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_GpuDb(GpuDb *pThis) { + __nvoc_init_funcTable_GpuDb_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_GpuDb(GpuDb *pThis) { + pThis->__nvoc_pbase_GpuDb = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_GpuDb(pThis); +} + +NV_STATUS __nvoc_objCreate_GpuDb(GpuDb **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + GpuDb *pThis; + + pThis = portMemAllocNonPaged(sizeof(GpuDb)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(GpuDb)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GpuDb); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_GpuDb(pThis); + status = __nvoc_ctor_GpuDb(pThis); + if (status != NV_OK) goto __nvoc_objCreate_GpuDb_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_GpuDb_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GpuDb(GpuDb **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_GpuDb(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.h new file mode 100644 index 0000000..3013f66 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.h @@ -0,0 +1,154 @@ +#ifndef _G_GPU_DB_NVOC_H_ +#define _G_GPU_DB_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_gpu_db_nvoc.h" + +#ifndef GPU_DB_H +#define GPU_DB_H + +#include "core/core.h" +#include "containers/list.h" +#include "gpu/gpu_uuid.h" + +typedef struct NBADDR NBADDR; + +// **************************************************************************** +// Type Definitions +// **************************************************************************** +// +// The GPU database object is used to encapsulate the GPUINFO +// + +/*! + * @brief Compute policy data for a GPU + * Saved policy information for a GPU that can be retrieved later + */ +typedef struct GPU_COMPUTE_POLICY_INFO +{ + // + // Timeslice config for channels/TSG's on a runlist. The timeslice configs + // are restricted to four levels : default, short, medium and long. + // + NvU32 timeslice; + // Future policies to be added here +} GPU_COMPUTE_POLICY_INFO; + +typedef struct +{ + NvU32 domain; + NvU8 bus; + NvU8 device; + NvU8 function; + NvBool bValid; +} PCI_PORT_INFO; + +#define GPUDB_CLK_PROP_TOP_POLS_COUNT 1 + +/*! + * @brief Clock Propagation Topology Policies control data + */ +typedef struct +{ + NvU8 chosenIdx[GPUDB_CLK_PROP_TOP_POLS_COUNT]; +} GPU_CLK_PROP_TOP_POLS_CONTROL; + +typedef struct +{ + NvU8 uuid[RM_SHA1_GID_SIZE]; + PCI_PORT_INFO pciPortInfo; + PCI_PORT_INFO upstreamPciPortInfo; + GPU_COMPUTE_POLICY_INFO policyInfo; + NvBool bShutdownState; + GPU_CLK_PROP_TOP_POLS_CONTROL clkPropTopPolsControl; +} GPU_INFO_LIST_NODE, *PGPU_INFO_LIST_NODE; + +MAKE_LIST(GpuInfoList, GPU_INFO_LIST_NODE); + +#ifdef NVOC_GPU_DB_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct GpuDb { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct GpuDb *__nvoc_pbase_GpuDb; + GpuInfoList gpuList; + PORT_MUTEX *pLock; +}; + +#ifndef __NVOC_CLASS_GpuDb_TYPEDEF__ +#define __NVOC_CLASS_GpuDb_TYPEDEF__ +typedef struct GpuDb GpuDb; +#endif /* __NVOC_CLASS_GpuDb_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuDb +#define __nvoc_class_id_GpuDb 0xcdd250 +#endif /* __nvoc_class_id_GpuDb */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb; + +#define __staticCast_GpuDb(pThis) \ + ((pThis)->__nvoc_pbase_GpuDb) + +#ifdef __nvoc_gpu_db_h_disabled +#define __dynamicCast_GpuDb(pThis) ((GpuDb*)NULL) +#else //__nvoc_gpu_db_h_disabled +#define __dynamicCast_GpuDb(pThis) \ + ((GpuDb*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuDb))) +#endif //__nvoc_gpu_db_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_GpuDb(GpuDb**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GpuDb(GpuDb**, Dynamic*, NvU32); +#define __objCreate_GpuDb(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_GpuDb((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS gpudbConstruct_IMPL(struct GpuDb *arg_pGpuDb); +#define __nvoc_gpudbConstruct(arg_pGpuDb) gpudbConstruct_IMPL(arg_pGpuDb) +void gpudbDestruct_IMPL(struct GpuDb *pGpuDb); +#define __nvoc_gpudbDestruct(pGpuDb) gpudbDestruct_IMPL(pGpuDb) +#undef PRIVATE_FIELD + + +NV_STATUS gpudbRegisterGpu(const NvU8 *pUuid, const NBADDR *pUpstreamPortPciInfo, NvU64 pciInfo); +NV_STATUS gpudbSetGpuComputePolicyConfig(const NvU8 *uuid, NvU32 policyType, GPU_COMPUTE_POLICY_INFO *policyInfo); +NV_STATUS gpudbGetGpuComputePolicyConfigs(const NvU8 *uuid, GPU_COMPUTE_POLICY_INFO *policyInfo); +NV_STATUS gpudbSetClockPoliciesControl(const NvU8 *uuid, GPU_CLK_PROP_TOP_POLS_CONTROL *pControl); +NV_STATUS gpudbGetClockPoliciesControl(const NvU8 *uuid, GPU_CLK_PROP_TOP_POLS_CONTROL *pControl); +NV_STATUS gpudbSetShutdownState(const NvU8 *pUuid); +#endif // GPU_DB_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_DB_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.c new file mode 100644 index 0000000..3e792c4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.c @@ -0,0 +1,148 @@ +#define NVOC_GPU_GROUP_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_group_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xe40531 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJGPUGRP(OBJGPUGRP*); +void __nvoc_init_funcTable_OBJGPUGRP(OBJGPUGRP*); +NV_STATUS __nvoc_ctor_OBJGPUGRP(OBJGPUGRP*); +void __nvoc_init_dataField_OBJGPUGRP(OBJGPUGRP*); +void __nvoc_dtor_OBJGPUGRP(OBJGPUGRP*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUGRP; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPUGRP_OBJGPUGRP = { + /*pClassDef=*/ &__nvoc_class_def_OBJGPUGRP, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPUGRP, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPUGRP_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGPUGRP, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPUGRP = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJGPUGRP_OBJGPUGRP, + &__nvoc_rtti_OBJGPUGRP_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJGPUGRP), + /*classId=*/ classId(OBJGPUGRP), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJGPUGRP", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPUGRP, + /*pCastInfo=*/ &__nvoc_castinfo_OBJGPUGRP, + /*pExportInfo=*/ &__nvoc_export_info_OBJGPUGRP +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUGRP = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJGPUGRP(OBJGPUGRP *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJGPUGRP(OBJGPUGRP *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJGPUGRP(OBJGPUGRP *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJGPUGRP_fail_Object; + __nvoc_init_dataField_OBJGPUGRP(pThis); + goto __nvoc_ctor_OBJGPUGRP_exit; // Success + +__nvoc_ctor_OBJGPUGRP_fail_Object: +__nvoc_ctor_OBJGPUGRP_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJGPUGRP_1(OBJGPUGRP *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJGPUGRP(OBJGPUGRP *pThis) { + __nvoc_init_funcTable_OBJGPUGRP_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJGPUGRP(OBJGPUGRP *pThis) { + pThis->__nvoc_pbase_OBJGPUGRP = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJGPUGRP(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJGPUGRP(OBJGPUGRP **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJGPUGRP *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJGPUGRP)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJGPUGRP)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPUGRP); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJGPUGRP(pThis); + status = __nvoc_ctor_OBJGPUGRP(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJGPUGRP_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJGPUGRP_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUGRP(OBJGPUGRP **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJGPUGRP(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.h new file mode 100644 index 0000000..dca3585 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.h @@ -0,0 +1,308 @@ +#ifndef _G_GPU_GROUP_NVOC_H_ +#define _G_GPU_GROUP_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_gpu_group_nvoc.h" + +#ifndef GPU_GROUP_H +#define GPU_GROUP_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Defines and structures used for GPUGRP Object. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "nvoc/object.h" +#include "nvlimits.h" + +struct OBJVASPACE; +struct OBJGPU; + +/*! + * @brief Specialization of @ref FOR_EACH_INDEX_IN_MASK for looping + * over each GPU in an instance bitmask and processing the GPU in + * unicast mode. + * + * @note This macro is constructed to handle 'continue' and 'break' + * statements but not 'return.' Do NOT return directly from the loop - + * use status variable and 'break' to safely abort. + * + * @param[in] maskWidth bit-width of the mask (allowed: 8, 16, 32, 64) + * @param[in,out] pGpu Local GPU variable to use. + * @param[in] mask GPU instance bitmask. + */ +#define FOR_EACH_GPU_IN_MASK_UC(maskWidth, pSys, pGpu, mask) \ +{ \ + NvU32 gpuInstance; \ + NvBool bOrigBcState = NV_FALSE; \ + NvBool bEntryBcState = NV_FALSE; \ + OBJGPU *pEntryGpu = pGpu; \ + pGpu = NULL; \ + if (pEntryGpu != NULL) \ + { \ + bEntryBcState = gpumgrGetBcEnabledStatus(pEntryGpu); \ + } \ + FOR_EACH_INDEX_IN_MASK(maskWidth, gpuInstance, mask) \ + { \ + if (NULL != pGpu) /* continue */ \ + { \ + gpumgrSetBcEnabledStatus(pGpu, bOrigBcState); \ + } \ + pGpu = gpumgrGetGpu(gpuInstance); \ + if (pGpu == NULL) \ + { /* We should never hit this assert */ \ + NV_ASSERT(0); /* But it occurs very rarely */ \ + continue; /* It needs to be debugged */ \ + } \ + bOrigBcState = gpumgrGetBcEnabledStatus(pGpu); \ + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); \ + +#define FOR_EACH_GPU_IN_MASK_UC_END \ + } \ + FOR_EACH_INDEX_IN_MASK_END \ + if (NULL != pGpu) /* break */ \ + { \ + gpumgrSetBcEnabledStatus(pGpu, bOrigBcState); \ + pGpu = NULL; \ + } \ + if (pEntryGpu != NULL) \ + { \ + NV_ASSERT(bEntryBcState == gpumgrGetBcEnabledStatus(pEntryGpu));\ + pGpu = pEntryGpu; \ + } \ +} + +typedef struct _def_vid_link_node +{ + /*! + * GPU instance for this node + */ + NvU32 gpuInstance; + /*! + * DrPort that receives data from Child GPU + */ + NvU32 ParentDrPort; + /*! + * DrPort that sources data to a Parent GPU + */ + NvU32 ChildDrPort; +} SLILINKNODE; + +typedef struct OBJGPUGRP *POBJGPUGRP; + +#ifndef __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +typedef struct OBJGPUGRP OBJGPUGRP; +#endif /* __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUGRP +#define __nvoc_class_id_OBJGPUGRP 0xe40531 +#endif /* __nvoc_class_id_OBJGPUGRP */ + + + +#ifdef NVOC_GPU_GROUP_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJGPUGRP { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJGPUGRP *__nvoc_pbase_OBJGPUGRP; + NvU32 gpuMask; + NvU32 gpuSliLinkMask; + NvU32 linkingGpuMask; + NvU32 attachedGpuMaskAtLinking; + SLILINKNODE SliLinkOrder[8]; + NvU32 ConnectionCount; + NvU32 flags; + NvU32 displayFlags; + NvBool bcEnabled; + struct OBJGPU *parentGpu; + struct OBJVASPACE *pGlobalVASpace; +}; + +#ifndef __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +typedef struct OBJGPUGRP OBJGPUGRP; +#endif /* __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUGRP +#define __nvoc_class_id_OBJGPUGRP 0xe40531 +#endif /* __nvoc_class_id_OBJGPUGRP */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP; + +#define __staticCast_OBJGPUGRP(pThis) \ + ((pThis)->__nvoc_pbase_OBJGPUGRP) + +#ifdef __nvoc_gpu_group_h_disabled +#define __dynamicCast_OBJGPUGRP(pThis) ((OBJGPUGRP*)NULL) +#else //__nvoc_gpu_group_h_disabled +#define __dynamicCast_OBJGPUGRP(pThis) \ + ((OBJGPUGRP*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPUGRP))) +#endif //__nvoc_gpu_group_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUGRP(OBJGPUGRP**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJGPUGRP(OBJGPUGRP**, Dynamic*, NvU32); +#define __objCreate_OBJGPUGRP(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJGPUGRP((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS gpugrpCreate_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask); +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpCreate(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpCreate(pGpuGrp, gpuMask) gpugrpCreate_IMPL(pGpuGrp, gpuMask) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpDestroy_IMPL(struct OBJGPUGRP *pGpuGrp); +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpDestroy(struct OBJGPUGRP *pGpuGrp) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpDestroy(pGpuGrp) gpugrpDestroy_IMPL(pGpuGrp) +#endif //__nvoc_gpu_group_h_disabled + +NvU32 gpugrpGetGpuMask_IMPL(struct OBJGPUGRP *pGpuGrp); +#ifdef __nvoc_gpu_group_h_disabled +static inline NvU32 gpugrpGetGpuMask(struct OBJGPUGRP *pGpuGrp) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return 0; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetGpuMask(pGpuGrp) gpugrpGetGpuMask_IMPL(pGpuGrp) +#endif //__nvoc_gpu_group_h_disabled + +void gpugrpSetGpuMask_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask); +#ifdef __nvoc_gpu_group_h_disabled +static inline void gpugrpSetGpuMask(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpSetGpuMask(pGpuGrp, gpuMask) gpugrpSetGpuMask_IMPL(pGpuGrp, gpuMask) +#endif //__nvoc_gpu_group_h_disabled + +NvBool gpugrpGetBcEnabledState_IMPL(struct OBJGPUGRP *pGpuGrp); +#ifdef __nvoc_gpu_group_h_disabled +static inline NvBool gpugrpGetBcEnabledState(struct OBJGPUGRP *pGpuGrp) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetBcEnabledState(pGpuGrp) gpugrpGetBcEnabledState_IMPL(pGpuGrp) +#endif //__nvoc_gpu_group_h_disabled + +void gpugrpSetBcEnabledState_IMPL(struct OBJGPUGRP *pGpuGrp, NvBool bcState); +#ifdef __nvoc_gpu_group_h_disabled +static inline void gpugrpSetBcEnabledState(struct OBJGPUGRP *pGpuGrp, NvBool bcState) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpSetBcEnabledState(pGpuGrp, bcState) gpugrpSetBcEnabledState_IMPL(pGpuGrp, bcState) +#endif //__nvoc_gpu_group_h_disabled + +void gpugrpSetParentGpu_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pParentGpu); +#ifdef __nvoc_gpu_group_h_disabled +static inline void gpugrpSetParentGpu(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pParentGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpSetParentGpu(pGpuGrp, pParentGpu) gpugrpSetParentGpu_IMPL(pGpuGrp, pParentGpu) +#endif //__nvoc_gpu_group_h_disabled + +struct OBJGPU *gpugrpGetParentGpu_IMPL(struct OBJGPUGRP *pGpuGrp); +#ifdef __nvoc_gpu_group_h_disabled +static inline struct OBJGPU *gpugrpGetParentGpu(struct OBJGPUGRP *pGpuGrp) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NULL; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetParentGpu(pGpuGrp) gpugrpGetParentGpu_IMPL(pGpuGrp) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpCreateGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu, NvU32 vaspaceClass, NvU64 vaStart, NvU64 vaEnd, NvU32 vaspaceFlags, struct OBJVASPACE **ppGlobalVAS); +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpCreateGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu, NvU32 vaspaceClass, NvU64 vaStart, NvU64 vaEnd, NvU32 vaspaceFlags, struct OBJVASPACE **ppGlobalVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpCreateGlobalVASpace(pGpuGrp, pGpu, vaspaceClass, vaStart, vaEnd, vaspaceFlags, ppGlobalVAS) gpugrpCreateGlobalVASpace_IMPL(pGpuGrp, pGpu, vaspaceClass, vaStart, vaEnd, vaspaceFlags, ppGlobalVAS) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpDestroyGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpDestroyGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpDestroyGlobalVASpace(pGpuGrp, pGpu) gpugrpDestroyGlobalVASpace_IMPL(pGpuGrp, pGpu) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpGetGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJVASPACE **ppGlobalVAS); +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpGetGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJVASPACE **ppGlobalVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetGlobalVASpace(pGpuGrp, ppGlobalVAS) gpugrpGetGlobalVASpace_IMPL(pGpuGrp, ppGlobalVAS) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpGetGpuFromSubDeviceInstance_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 subDeviceInst, struct OBJGPU **ppGpu); +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpGetGpuFromSubDeviceInstance(struct OBJGPUGRP *pGpuGrp, NvU32 subDeviceInst, struct OBJGPU **ppGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetGpuFromSubDeviceInstance(pGpuGrp, subDeviceInst, ppGpu) gpugrpGetGpuFromSubDeviceInstance_IMPL(pGpuGrp, subDeviceInst, ppGpu) +#endif //__nvoc_gpu_group_h_disabled + +#undef PRIVATE_FIELD + + +#endif // GPU_GROUP_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_GROUP_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.c new file mode 100644 index 0000000..90b0c90 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.c @@ -0,0 +1,97 @@ +#define NVOC_GPU_HALSPEC_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_halspec_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x34a6d6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner; + +void __nvoc_init_RmHalspecOwner(RmHalspecOwner*, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver); +void __nvoc_init_funcTable_RmHalspecOwner(RmHalspecOwner*); +NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner*); +void __nvoc_init_dataField_RmHalspecOwner(RmHalspecOwner*); +void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmHalspecOwner; + +static const struct NVOC_RTTI __nvoc_rtti_RmHalspecOwner_RmHalspecOwner = { + /*pClassDef=*/ &__nvoc_class_def_RmHalspecOwner, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmHalspecOwner, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RmHalspecOwner = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_RmHalspecOwner_RmHalspecOwner, + }, +}; + +// Not instantiable because it's not derived from class "Object" +const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmHalspecOwner), + /*classId=*/ classId(RmHalspecOwner), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmHalspecOwner", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_RmHalspecOwner, + /*pExportInfo=*/ &__nvoc_export_info_RmHalspecOwner +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RmHalspecOwner = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmHalspecOwner(RmHalspecOwner *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_RmHalspecOwner(pThis); + goto __nvoc_ctor_RmHalspecOwner_exit; // Success + +__nvoc_ctor_RmHalspecOwner_exit: + + return status; +} + +static void __nvoc_init_funcTable_RmHalspecOwner_1(RmHalspecOwner *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_RmHalspecOwner(RmHalspecOwner *pThis) { + __nvoc_init_funcTable_RmHalspecOwner_1(pThis); +} + +void __nvoc_init_RmHalspecOwner(RmHalspecOwner *pThis, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver) { + pThis->__nvoc_pbase_RmHalspecOwner = pThis; + __nvoc_init_halspec_ChipHal(&pThis->chipHal, ChipHal_arch, ChipHal_impl, ChipHal_hidrev); + __nvoc_init_halspec_RmVariantHal(&pThis->rmVariantHal, RmVariantHal_rmVariant); + __nvoc_init_halspec_DispIpHal(&pThis->dispIpHal, DispIpHal_ipver); + __nvoc_init_funcTable_RmHalspecOwner(pThis); +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.h new file mode 100644 index 0000000..8b7d5d3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.h @@ -0,0 +1,91 @@ +#ifndef _G_GPU_HALSPEC_NVOC_H_ +#define _G_GPU_HALSPEC_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_gpu_halspec_nvoc.h" + +#ifndef GPU_HALSPEC_H +#define GPU_HALSPEC_H + +#include "g_chips2halspec.h" // NVOC halspec, generated by rmconfig.pl + +#ifdef NVOC_GPU_HALSPEC_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RmHalspecOwner { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmHalspecOwner *__nvoc_pbase_RmHalspecOwner; + struct ChipHal chipHal; + struct RmVariantHal rmVariantHal; + struct DispIpHal dispIpHal; +}; + +#ifndef __NVOC_CLASS_RmHalspecOwner_TYPEDEF__ +#define __NVOC_CLASS_RmHalspecOwner_TYPEDEF__ +typedef struct RmHalspecOwner RmHalspecOwner; +#endif /* __NVOC_CLASS_RmHalspecOwner_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmHalspecOwner +#define __nvoc_class_id_RmHalspecOwner 0x34a6d6 +#endif /* __nvoc_class_id_RmHalspecOwner */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner; + +#define __staticCast_RmHalspecOwner(pThis) \ + ((pThis)->__nvoc_pbase_RmHalspecOwner) + +#ifdef __nvoc_gpu_halspec_h_disabled +#define __dynamicCast_RmHalspecOwner(pThis) ((RmHalspecOwner*)NULL) +#else //__nvoc_gpu_halspec_h_disabled +#define __dynamicCast_RmHalspecOwner(pThis) \ + ((RmHalspecOwner*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmHalspecOwner))) +#endif //__nvoc_gpu_halspec_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RmHalspecOwner(RmHalspecOwner**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmHalspecOwner(RmHalspecOwner**, Dynamic*, NvU32, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver); +#define __objCreate_RmHalspecOwner(ppNewObj, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver) \ + __nvoc_objCreate_RmHalspecOwner((ppNewObj), staticCast((pParent), Dynamic), (createFlags), ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver) + +#undef PRIVATE_FIELD + + +#endif // GPU_HALSPEC_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_HALSPEC_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c new file mode 100644 index 0000000..2e30ec5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c @@ -0,0 +1,322 @@ +#define NVOC_GPU_MGMT_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_mgmt_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x376305 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_GpuManagementApi(GpuManagementApi*); +void __nvoc_init_funcTable_GpuManagementApi(GpuManagementApi*); +NV_STATUS __nvoc_ctor_GpuManagementApi(GpuManagementApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_GpuManagementApi(GpuManagementApi*); +void __nvoc_dtor_GpuManagementApi(GpuManagementApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuManagementApi; + +static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_GpuManagementApi = { + /*pClassDef=*/ &__nvoc_class_def_GpuManagementApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuManagementApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_GpuManagementApi = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_GpuManagementApi_GpuManagementApi, + &__nvoc_rtti_GpuManagementApi_RmResource, + &__nvoc_rtti_GpuManagementApi_RmResourceCommon, + &__nvoc_rtti_GpuManagementApi_RsResource, + &__nvoc_rtti_GpuManagementApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GpuManagementApi), + /*classId=*/ classId(GpuManagementApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GpuManagementApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuManagementApi, + /*pCastInfo=*/ &__nvoc_castinfo_GpuManagementApi, + /*pExportInfo=*/ &__nvoc_export_info_GpuManagementApi +}; + +static NvBool __nvoc_thunk_RmResource_gpumgmtapiShareCallback(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiCheckMemInterUnmap(struct GpuManagementApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiControl(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiGetMemInterMapParams(struct GpuManagementApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiGetMemoryMappingDescriptor(struct GpuManagementApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), ppMemDesc); +} + +static NvU32 __nvoc_thunk_RsResource_gpumgmtapiGetRefCount(struct GpuManagementApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiControlFilter(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_gpumgmtapiAddAdditionalDependants(struct RsClient *pClient, struct GpuManagementApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiUnmap(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiControl_Prologue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_gpumgmtapiCanCopy(struct GpuManagementApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiMapTo(struct GpuManagementApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_gpumgmtapiPreDestruct(struct GpuManagementApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiUnmapFrom(struct GpuManagementApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_gpumgmtapiControl_Epilogue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiControlLookup(struct GpuManagementApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiMap(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_gpumgmtapiAccessCallback(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_GpuManagementApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) gpumgmtapiCtrlCmdSetShutdownState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*flags=*/ 0x7u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x200101u, + /*paramSize=*/ sizeof(NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_GpuManagementApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "gpumgmtapiCtrlCmdSetShutdownState" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuManagementApi = +{ + /*numEntries=*/ 1, + /*pExportEntries=*/ __nvoc_exported_method_def_GpuManagementApi +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_GpuManagementApi(GpuManagementApi *pThis) { + __nvoc_gpumgmtapiDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GpuManagementApi(GpuManagementApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_GpuManagementApi(GpuManagementApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuManagementApi_fail_RmResource; + __nvoc_init_dataField_GpuManagementApi(pThis); + + status = __nvoc_gpumgmtapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuManagementApi_fail__init; + goto __nvoc_ctor_GpuManagementApi_exit; // Success + +__nvoc_ctor_GpuManagementApi_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_GpuManagementApi_fail_RmResource: +__nvoc_ctor_GpuManagementApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_GpuManagementApi_1(GpuManagementApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + pThis->__gpumgmtapiCtrlCmdSetShutdownState__ = &gpumgmtapiCtrlCmdSetShutdownState_IMPL; +#endif + + pThis->__gpumgmtapiShareCallback__ = &__nvoc_thunk_RmResource_gpumgmtapiShareCallback; + + pThis->__gpumgmtapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_gpumgmtapiCheckMemInterUnmap; + + pThis->__gpumgmtapiControl__ = &__nvoc_thunk_RsResource_gpumgmtapiControl; + + pThis->__gpumgmtapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_gpumgmtapiGetMemInterMapParams; + + pThis->__gpumgmtapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_gpumgmtapiGetMemoryMappingDescriptor; + + pThis->__gpumgmtapiGetRefCount__ = &__nvoc_thunk_RsResource_gpumgmtapiGetRefCount; + + pThis->__gpumgmtapiControlFilter__ = &__nvoc_thunk_RsResource_gpumgmtapiControlFilter; + + pThis->__gpumgmtapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_gpumgmtapiAddAdditionalDependants; + + pThis->__gpumgmtapiUnmap__ = &__nvoc_thunk_RsResource_gpumgmtapiUnmap; + + pThis->__gpumgmtapiControl_Prologue__ = &__nvoc_thunk_RmResource_gpumgmtapiControl_Prologue; + + pThis->__gpumgmtapiCanCopy__ = &__nvoc_thunk_RsResource_gpumgmtapiCanCopy; + + pThis->__gpumgmtapiMapTo__ = &__nvoc_thunk_RsResource_gpumgmtapiMapTo; + + pThis->__gpumgmtapiPreDestruct__ = &__nvoc_thunk_RsResource_gpumgmtapiPreDestruct; + + pThis->__gpumgmtapiUnmapFrom__ = &__nvoc_thunk_RsResource_gpumgmtapiUnmapFrom; + + pThis->__gpumgmtapiControl_Epilogue__ = &__nvoc_thunk_RmResource_gpumgmtapiControl_Epilogue; + + pThis->__gpumgmtapiControlLookup__ = &__nvoc_thunk_RsResource_gpumgmtapiControlLookup; + + pThis->__gpumgmtapiMap__ = &__nvoc_thunk_RsResource_gpumgmtapiMap; + + pThis->__gpumgmtapiAccessCallback__ = &__nvoc_thunk_RmResource_gpumgmtapiAccessCallback; +} + +void __nvoc_init_funcTable_GpuManagementApi(GpuManagementApi *pThis) { + __nvoc_init_funcTable_GpuManagementApi_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_GpuManagementApi(GpuManagementApi *pThis) { + pThis->__nvoc_pbase_GpuManagementApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_GpuManagementApi(pThis); +} + +NV_STATUS __nvoc_objCreate_GpuManagementApi(GpuManagementApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + GpuManagementApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(GpuManagementApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(GpuManagementApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GpuManagementApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_GpuManagementApi(pThis); + status = __nvoc_ctor_GpuManagementApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_GpuManagementApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_GpuManagementApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GpuManagementApi(GpuManagementApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_GpuManagementApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h new file mode 100644 index 0000000..72e3eba --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h @@ -0,0 +1,221 @@ +#ifndef _G_GPU_MGMT_API_NVOC_H_ +#define _G_GPU_MGMT_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_gpu_mgmt_api_nvoc.h" + +#ifndef GPU_MGMT_API_H +#define GPU_MGMT_API_H + +#include "rmapi/resource.h" +#include "ctrl/ctrl0020.h" + +// **************************************************************************** +// Type Definitions +// **************************************************************************** + +// +// GpuManagementApi class information +// +// This is a global GPU class will help us to route IOCTLs to probed +// and persistent GPU state +// + +#ifdef NVOC_GPU_MGMT_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct GpuManagementApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuManagementApi *__nvoc_pbase_GpuManagementApi; + NV_STATUS (*__gpumgmtapiCtrlCmdSetShutdownState__)(struct GpuManagementApi *, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *); + NvBool (*__gpumgmtapiShareCallback__)(struct GpuManagementApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__gpumgmtapiCheckMemInterUnmap__)(struct GpuManagementApi *, NvBool); + NV_STATUS (*__gpumgmtapiControl__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__gpumgmtapiGetMemInterMapParams__)(struct GpuManagementApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__gpumgmtapiGetMemoryMappingDescriptor__)(struct GpuManagementApi *, struct MEMORY_DESCRIPTOR **); + NvU32 (*__gpumgmtapiGetRefCount__)(struct GpuManagementApi *); + NV_STATUS (*__gpumgmtapiControlFilter__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__gpumgmtapiAddAdditionalDependants__)(struct RsClient *, struct GpuManagementApi *, RsResourceRef *); + NV_STATUS (*__gpumgmtapiUnmap__)(struct GpuManagementApi *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__gpumgmtapiControl_Prologue__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__gpumgmtapiCanCopy__)(struct GpuManagementApi *); + NV_STATUS (*__gpumgmtapiMapTo__)(struct GpuManagementApi *, RS_RES_MAP_TO_PARAMS *); + void (*__gpumgmtapiPreDestruct__)(struct GpuManagementApi *); + NV_STATUS (*__gpumgmtapiUnmapFrom__)(struct GpuManagementApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__gpumgmtapiControl_Epilogue__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__gpumgmtapiControlLookup__)(struct GpuManagementApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__gpumgmtapiMap__)(struct GpuManagementApi *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__gpumgmtapiAccessCallback__)(struct GpuManagementApi *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_GpuManagementApi_TYPEDEF__ +#define __NVOC_CLASS_GpuManagementApi_TYPEDEF__ +typedef struct GpuManagementApi GpuManagementApi; +#endif /* __NVOC_CLASS_GpuManagementApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuManagementApi +#define __nvoc_class_id_GpuManagementApi 0x376305 +#endif /* __nvoc_class_id_GpuManagementApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi; + +#define __staticCast_GpuManagementApi(pThis) \ + ((pThis)->__nvoc_pbase_GpuManagementApi) + +#ifdef __nvoc_gpu_mgmt_api_h_disabled +#define __dynamicCast_GpuManagementApi(pThis) ((GpuManagementApi*)NULL) +#else //__nvoc_gpu_mgmt_api_h_disabled +#define __dynamicCast_GpuManagementApi(pThis) \ + ((GpuManagementApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuManagementApi))) +#endif //__nvoc_gpu_mgmt_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_GpuManagementApi(GpuManagementApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GpuManagementApi(GpuManagementApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_GpuManagementApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_GpuManagementApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define gpumgmtapiCtrlCmdSetShutdownState(pGpuMgmt, pParams) gpumgmtapiCtrlCmdSetShutdownState_DISPATCH(pGpuMgmt, pParams) +#define gpumgmtapiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) gpumgmtapiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define gpumgmtapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) gpumgmtapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define gpumgmtapiControl(pResource, pCallContext, pParams) gpumgmtapiControl_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiGetMemInterMapParams(pRmResource, pParams) gpumgmtapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define gpumgmtapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) gpumgmtapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define gpumgmtapiGetRefCount(pResource) gpumgmtapiGetRefCount_DISPATCH(pResource) +#define gpumgmtapiControlFilter(pResource, pCallContext, pParams) gpumgmtapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiAddAdditionalDependants(pClient, pResource, pReference) gpumgmtapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define gpumgmtapiUnmap(pResource, pCallContext, pCpuMapping) gpumgmtapiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define gpumgmtapiControl_Prologue(pResource, pCallContext, pParams) gpumgmtapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiCanCopy(pResource) gpumgmtapiCanCopy_DISPATCH(pResource) +#define gpumgmtapiMapTo(pResource, pParams) gpumgmtapiMapTo_DISPATCH(pResource, pParams) +#define gpumgmtapiPreDestruct(pResource) gpumgmtapiPreDestruct_DISPATCH(pResource) +#define gpumgmtapiUnmapFrom(pResource, pParams) gpumgmtapiUnmapFrom_DISPATCH(pResource, pParams) +#define gpumgmtapiControl_Epilogue(pResource, pCallContext, pParams) gpumgmtapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiControlLookup(pResource, pParams, ppEntry) gpumgmtapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define gpumgmtapiMap(pResource, pCallContext, pParams, pCpuMapping) gpumgmtapiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define gpumgmtapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) gpumgmtapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS gpumgmtapiCtrlCmdSetShutdownState_IMPL(struct GpuManagementApi *pGpuMgmt, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *pParams); + +static inline NV_STATUS gpumgmtapiCtrlCmdSetShutdownState_DISPATCH(struct GpuManagementApi *pGpuMgmt, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *pParams) { + return pGpuMgmt->__gpumgmtapiCtrlCmdSetShutdownState__(pGpuMgmt, pParams); +} + +static inline NvBool gpumgmtapiShareCallback_DISPATCH(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__gpumgmtapiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS gpumgmtapiCheckMemInterUnmap_DISPATCH(struct GpuManagementApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__gpumgmtapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS gpumgmtapiControl_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__gpumgmtapiControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS gpumgmtapiGetMemInterMapParams_DISPATCH(struct GpuManagementApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__gpumgmtapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS gpumgmtapiGetMemoryMappingDescriptor_DISPATCH(struct GpuManagementApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__gpumgmtapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvU32 gpumgmtapiGetRefCount_DISPATCH(struct GpuManagementApi *pResource) { + return pResource->__gpumgmtapiGetRefCount__(pResource); +} + +static inline NV_STATUS gpumgmtapiControlFilter_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__gpumgmtapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline void gpumgmtapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GpuManagementApi *pResource, RsResourceRef *pReference) { + pResource->__gpumgmtapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS gpumgmtapiUnmap_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__gpumgmtapiUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS gpumgmtapiControl_Prologue_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__gpumgmtapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool gpumgmtapiCanCopy_DISPATCH(struct GpuManagementApi *pResource) { + return pResource->__gpumgmtapiCanCopy__(pResource); +} + +static inline NV_STATUS gpumgmtapiMapTo_DISPATCH(struct GpuManagementApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__gpumgmtapiMapTo__(pResource, pParams); +} + +static inline void gpumgmtapiPreDestruct_DISPATCH(struct GpuManagementApi *pResource) { + pResource->__gpumgmtapiPreDestruct__(pResource); +} + +static inline NV_STATUS gpumgmtapiUnmapFrom_DISPATCH(struct GpuManagementApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__gpumgmtapiUnmapFrom__(pResource, pParams); +} + +static inline void gpumgmtapiControl_Epilogue_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__gpumgmtapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS gpumgmtapiControlLookup_DISPATCH(struct GpuManagementApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__gpumgmtapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS gpumgmtapiMap_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__gpumgmtapiMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool gpumgmtapiAccessCallback_DISPATCH(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__gpumgmtapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS gpumgmtapiConstruct_IMPL(struct GpuManagementApi *arg_pGpuMgmt, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_gpumgmtapiConstruct(arg_pGpuMgmt, arg_pCallContext, arg_pParams) gpumgmtapiConstruct_IMPL(arg_pGpuMgmt, arg_pCallContext, arg_pParams) +void gpumgmtapiDestruct_IMPL(struct GpuManagementApi *pGpuMgmt); +#define __nvoc_gpumgmtapiDestruct(pGpuMgmt) gpumgmtapiDestruct_IMPL(pGpuMgmt) +#undef PRIVATE_FIELD + + +#endif // GPU_MGMT_API_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_MGMT_API_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.c new file mode 100644 index 0000000..8f939ac --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.c @@ -0,0 +1,154 @@ +#define NVOC_GPU_MGR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_mgr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xcf1b25 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMGR; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJGPUMGR(OBJGPUMGR*); +void __nvoc_init_funcTable_OBJGPUMGR(OBJGPUMGR*); +NV_STATUS __nvoc_ctor_OBJGPUMGR(OBJGPUMGR*); +void __nvoc_init_dataField_OBJGPUMGR(OBJGPUMGR*); +void __nvoc_dtor_OBJGPUMGR(OBJGPUMGR*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUMGR; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPUMGR_OBJGPUMGR = { + /*pClassDef=*/ &__nvoc_class_def_OBJGPUMGR, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPUMGR, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPUMGR_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGPUMGR, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPUMGR = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJGPUMGR_OBJGPUMGR, + &__nvoc_rtti_OBJGPUMGR_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMGR = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJGPUMGR), + /*classId=*/ classId(OBJGPUMGR), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJGPUMGR", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPUMGR, + /*pCastInfo=*/ &__nvoc_castinfo_OBJGPUMGR, + /*pExportInfo=*/ &__nvoc_export_info_OBJGPUMGR +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUMGR = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJGPUMGR(OBJGPUMGR *pThis) { + __nvoc_gpumgrDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJGPUMGR(OBJGPUMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJGPUMGR(OBJGPUMGR *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJGPUMGR_fail_Object; + __nvoc_init_dataField_OBJGPUMGR(pThis); + + status = __nvoc_gpumgrConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJGPUMGR_fail__init; + goto __nvoc_ctor_OBJGPUMGR_exit; // Success + +__nvoc_ctor_OBJGPUMGR_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJGPUMGR_fail_Object: +__nvoc_ctor_OBJGPUMGR_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJGPUMGR_1(OBJGPUMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJGPUMGR(OBJGPUMGR *pThis) { + __nvoc_init_funcTable_OBJGPUMGR_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJGPUMGR(OBJGPUMGR *pThis) { + pThis->__nvoc_pbase_OBJGPUMGR = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJGPUMGR(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJGPUMGR(OBJGPUMGR **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJGPUMGR *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJGPUMGR)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJGPUMGR)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPUMGR); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJGPUMGR(pThis); + status = __nvoc_ctor_OBJGPUMGR(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJGPUMGR_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJGPUMGR_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUMGR(OBJGPUMGR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJGPUMGR(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.h new file mode 100644 index 0000000..2786a55 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.h @@ -0,0 +1,425 @@ +#ifndef _G_GPU_MGR_NVOC_H_ +#define _G_GPU_MGR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_gpu_mgr_nvoc.h" + +#ifndef _GPUMGR_H_ +#define _GPUMGR_H_ + +// +// GPU Manager Defines and Structures +// + +struct OBJGPU; +#include "core/core.h" +#include "core/system.h" +#include "nvlimits.h" +#include "gpu_mgr/gpu_group.h" +#include "gpu/gpu_uuid.h" +#include "gpu/gpu_device_mapping.h" +#include "gpu/gpu_access.h" +#include "ctrl/ctrl0000/ctrl0000gpu.h" +#include "ctrl/ctrl2080/ctrl2080ce.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" +#include "nvoc/utility.h" +#include "nv_firmware_types.h" + +#include "class/cl2080.h" // NV2080_ENGINE_TYPE_* + +#include "utils/nvbitvector.h" +TYPEDEF_BITVECTOR(MC_ENGINE_BITVECTOR); + +#define GPUMGR_MAX_GPU_INSTANCES 8 +#define GPUMGR_MAX_COMPUTE_INSTANCES 8 + +MAKE_BITVECTOR(ENGTYPE_BIT_VECTOR, NV2080_ENGINE_TYPE_LAST); +typedef ENGTYPE_BIT_VECTOR *PENGTYPE_BIT_VECTOR; + +// +// Terminology: +// GPU -> entity sitting on the bus +// Device -> broadcast semantics; maps to one or more GPUs +// Subdevice -> unicast semantics; maps to a single GPU +// + + +//////////////////////////////////////////////////////////////////////////////// +// DO NOT ADD NEW STUBS HERE // +//////////////////////////////////////////////////////////////////////////////// +#define gpumgrGetGpuLinkCount(deviceInstance) ((NvU32) 0) +#define gpumgrGetSliLinkOutputMaskFromGpu(pGpu) ((NvU32) 0) +#define gpumgrGetVidLinkOutputMaskFromGpu(pGpu) ((NvU32) 0) +#define gpumgrGetSliLinkOrderCount(pGpu) ((NvU32) 0) +#define gpumgrGetSliLinkConnectionCount(pGpu) ((NvU32) 0) +#define gpumgrGetSLIConfig(gpuInstance, onlyWithSliLink) ((NvU32) 0) +#define gpumgrDisableVidLink(pGpu, head, max_dr_port) +#define gpumgrGetGpuVidLinkMaxPixelClock(pGpu, pMaxPclkMhz) (NV_ERR_NOT_SUPPORTED) +#define gpumgrPinsetToPinsetTableIndex(pinset, pPinsetIndex) (NV_ERR_NOT_SUPPORTED) +#define gpumgrGetBcEnabledStatus(g) (NV_FALSE) +#define gpumgrGetBcEnabledStatusEx(g, t) (NV_FALSE) +#define gpumgrSetBcEnabledStatus(g, b) do { NvBool b2 = b; (void)b2; } while (0) +#define gpumgrSLILoopReentrancy(pGpu, l, r, i, pFuncStr) +#define gpumgrSLILoopReentrancyPop(pGpu) ((NvU32)0) +#define gpumgrSLILoopReentrancyPush(pGpu, sliLoopReentrancy) do { NvU32 x = sliLoopReentrancy; (void)x; } while(0) + + +typedef struct +{ + NvU32 gpuId; + NvU64 gpuDomainBusDevice; + NvBool bInitAttempted; + NvBool bDrainState; // no new client connections to this GPU + NvBool bRemoveIdle; // remove this GPU once it's idle (detached) + NvBool bExcluded; // this gpu is marked as excluded; do not use + NvBool bUuidValid; // cached uuid is valid + NvBool bSkipHwNvlinkDisable; //skip HW registers configuration for disabled links + NvU32 initDisabledNvlinksMask; + NV_STATUS initStatus; + NvU8 uuid[RM_SHA1_GID_SIZE]; + OS_RM_CAPS *pOsRmCaps; // "Opaque" pointer to os-specific capabilities +} PROBEDGPU; + +#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_FLIPS 11:4 +#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME 12:12 +#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME_INVALID 0x0000000 +#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME_VALID 0x0000001 + +/*! + * Structure for tracking resources allocated for saving primary GPU's VBIOS + * state. This is used for TDR/fullchip reset recovery. The GPU object gets + * destroyed, so the data belongs here. + */ +typedef struct _def_gpumgr_save_vbios_state +{ + RmPhysAddr vgaWorkspaceVidMemBase; //__nvoc_pbase_OBJGPUMGR) + +#ifdef __nvoc_gpu_mgr_h_disabled +#define __dynamicCast_OBJGPUMGR(pThis) ((OBJGPUMGR*)NULL) +#else //__nvoc_gpu_mgr_h_disabled +#define __dynamicCast_OBJGPUMGR(pThis) \ + ((OBJGPUMGR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPUMGR))) +#endif //__nvoc_gpu_mgr_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUMGR(OBJGPUMGR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJGPUMGR(OBJGPUMGR**, Dynamic*, NvU32); +#define __objCreate_OBJGPUMGR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJGPUMGR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +static inline void gpumgrAddSystemNvlinkTopo(NvU64 DomainBusDevice) { + return; +} + +static inline NvBool gpumgrGetSystemNvlinkTopo(NvU64 DomainBusDevice, struct NVLINK_TOPOLOGY_PARAMS *pTopoParams) { + return ((NvBool)(0 != 0)); +} + +static inline void gpumgrUpdateSystemNvlinkTopo(NvU64 DomainBusDevice, struct NVLINK_TOPOLOGY_PARAMS *pTopoParams) { + return; +} + +static inline NV_STATUS gpumgrSetGpuInitDisabledNvlinks(NvU32 gpuId, NvU32 mask, NvBool bSkipHwNvlinkDisable) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS gpumgrGetGpuInitDisabledNvlinks(NvU32 gpuId, NvU32 *pMask, NvBool *pbSkipHwNvlinkDisable) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NvBool gpumgrCheckIndirectPeer(struct OBJGPU *pGpu, struct OBJGPU *pRemoteGpu) { + return ((NvBool)(0 != 0)); +} + +static inline void gpumgrAddSystemMIGInstanceTopo(NvU64 domainBusDevice) { + return; +} + +static inline NvBool gpumgrGetSystemMIGInstanceTopo(NvU64 domainBusDevice, struct GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY **ppTopoParams) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool gpumgrIsSystemMIGEnabled(NvU64 domainBusDevice) { + return ((NvBool)(0 != 0)); +} + +static inline void gpumgrSetSystemMIGEnabled(NvU64 domainBusDevice, NvBool bMIGEnabled) { + return; +} + +static inline void gpumgrUnregisterRmCapsForMIGGI(NvU64 gpuDomainBusDevice) { + return; +} + +static inline void gpumgrUpdateBoardId(struct OBJGPU *arg0) { + return; +} + +static inline void gpumgrServiceInterrupts(NvU32 arg0, MC_ENGINE_BITVECTOR *arg1, NvBool arg2) { + return; +} + +NV_STATUS gpumgrConstruct_IMPL(struct OBJGPUMGR *arg_); +#define __nvoc_gpumgrConstruct(arg_) gpumgrConstruct_IMPL(arg_) +void gpumgrDestruct_IMPL(struct OBJGPUMGR *arg0); +#define __nvoc_gpumgrDestruct(arg0) gpumgrDestruct_IMPL(arg0) +#undef PRIVATE_FIELD + + +typedef struct { + NvBool specified; // Set this flag when using this struct + NvBool bIsIGPU; // Set this flag for iGPU + + DEVICE_MAPPING deviceMapping[DEVICE_INDEX_MAX]; // Register Aperture mapping + NvU32 socChipId0; // Chip ID used for HAL binding + NvU32 iovaspaceId; // SMMU client ID +} SOCGPUATTACHARG; + +// +// Packages up system/bus state for attach process. +// +typedef struct GPUATTACHARG +{ + GPUHWREG *regBaseAddr; + GPUHWREG *fbBaseAddr; + GPUHWREG *instBaseAddr; + RmPhysAddr devPhysAddr; + RmPhysAddr fbPhysAddr; + RmPhysAddr instPhysAddr; + RmPhysAddr ioPhysAddr; + NvU64 nvDomainBusDeviceFunc; + NvU32 regLength; + NvU64 fbLength; + NvU32 instLength; + NvU32 intLine; + void *pOsAttachArg; + NvBool bIsSOC; + NvU32 socDeviceCount; + DEVICE_MAPPING socDeviceMappings[GPU_MAX_DEVICE_MAPPINGS]; + NvU32 socId; + NvU32 socSubId; + NvU32 socChipId0; + NvU32 iovaspaceId; + NvBool bRequestFwClientRm; + + // + // The SOC-specific fields above are legacy fields that were added for + // ARCH MODS iGPU verification. There is a plan to deprecate these fields as + // part of an effort to clean up the existing iGPU code in RM. + // + // Starting with T234D+, the SOCGPUATTACHARG field below will be used to + // pass the required attach info for a single SOC device from the RM OS + // layer to core RM. + // + SOCGPUATTACHARG socDeviceArgs; +} GPUATTACHARG; + +NV_STATUS gpumgrGetGpuAttachInfo(NvU32 *pGpuCnt, NvU32 *pGpuMask); +NV_STATUS gpumgrGetProbedGpuIds(NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *); +NV_STATUS gpumgrGetProbedGpuDomainBusDevice(NvU32 gpuId, NvU64 *gpuDomainBusDevice); +NV_STATUS gpumgrGetAttachedGpuIds(NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *); +NV_STATUS gpumgrGetGpuIdInfo(NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *); +NV_STATUS gpumgrGetGpuIdInfoV2(NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *); +void gpumgrSetGpuId(OBJGPU*, NvU32 gpuId); +NV_STATUS gpumgrGetGpuInitStatus(NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *); +void gpumgrSetGpuInitStatus(NvU32 gpuId, NV_STATUS status); +OBJGPU* gpumgrGetGpuFromId(NvU32 gpuId); +OBJGPU* gpumgrGetGpuFromUuid(const NvU8 *pGpuUuid, NvU32 flags); +OBJGPU* gpumgrGetGpuFromBusInfo(NvU32 domain, NvU8 bus, NvU8 device); +NvU32 gpumgrGetDefaultPrimaryGpu(NvU32 gpuMask); +NV_STATUS gpumgrAllocGpuInstance(NvU32 *pDeviceInstance); +NV_STATUS gpumgrRegisterGpuId(NvU32 gpuId, NvU64 gpuDomainBusDevice); +NV_STATUS gpumgrUnregisterGpuId(NvU32 gpuId); +NV_STATUS gpumgrExcludeGpuId(NvU32 gpuId); +NV_STATUS gpumgrSetUuid(NvU32 gpuId, NvU8 *uuid); +NV_STATUS gpumgrGetGpuUuidInfo(NvU32 gpuId, NvU8 **ppUuidStr, NvU32 *pUuidStrLen, NvU32 uuidFlags); +NV_STATUS gpumgrAttachGpu(NvU32 deviceInstance, GPUATTACHARG *); +NV_STATUS gpumgrDetachGpu(NvU32 deviceInstance); +OBJGPU* gpumgrGetNextGpu(NvU32 gpuMask, NvU32 *pStartIndex); +NV_STATUS gpumgrStatePreInitGpu(OBJGPU*); +NV_STATUS gpumgrStateInitGpu(OBJGPU*); +NV_STATUS gpumgrStateLoadGpu(OBJGPU*, NvU32); +NV_STATUS gpumgrAllocDeviceInstance(NvU32 *pDeviceInstance); +NV_STATUS gpumgrCreateDevice(NvU32 *pDeviceInstance, NvU32 gpuMask, NvU32 *pGpuIdsOrdinal); +NV_STATUS gpumgrDestroyDevice(NvU32 deviceInstance); +NvU32 gpumgrGetDeviceInstanceMask(void); +NvU32 gpumgrGetDeviceGpuMask(NvU32 deviceInstance); +NV_STATUS gpumgrIsDeviceInstanceValid(NvU32 deviceInstance); +NvU32 gpumgrGetPrimaryForDevice(NvU32 deviceInstance); +NvBool gpumgrIsSubDeviceInstanceValid(NvU32 subDeviceInstance); +NvBool gpumgrIsDeviceEnabled(NvU32 deviceInstance); +NvU32 gpumgrGetGpuMask(OBJGPU *pGpu); +OBJGPU* gpumgrGetGpu(NvU32 deviceInstance); +OBJGPU* gpumgrGetSomeGpu(void); +NvU32 gpumgrGetSubDeviceCount(NvU32 gpuMask); +NvU32 gpumgrGetSubDeviceCountFromGpu(OBJGPU *pGpu); +NvU32 gpumgrGetSubDeviceMaxValuePlus1(OBJGPU *pGpu); +NvU32 gpumgrGetSubDeviceInstanceFromGpu(OBJGPU *pGpu); +OBJGPU* gpumgrGetParentGPU(OBJGPU *pGpu); +void gpumgrSetParentGPU(OBJGPU *pGpu, OBJGPU *pParentGpu); +NvBool gpumgrIsGpuDisplayParent(OBJGPU*); +OBJGPU* gpumgrGetDisplayParent(OBJGPU*); +NV_STATUS gpumgrGetGpuLockAndDrPorts(OBJGPU*, OBJGPU*, NvU32 *, NvU32 *); +NV_STATUS gpumgrGetBootPrimary(OBJGPU **ppGpu); +OBJGPU* gpumgrGetMGpu(void); +RmPhysAddr gpumgrGetGpuPhysFbAddr(OBJGPU*); +OBJGPU* gpumgrGetGpuFromSubDeviceInst(NvU32, NvU32); +NV_STATUS gpumgrAddDeviceInstanceToGpus(NvU32 gpuMask); +NV_STATUS gpumgrRemoveDeviceInstanceFromGpus(NvU32 gpuMask); +NV_STATUS gpumgrConstructGpuGrpObject(struct OBJGPUMGR *pGpuMgr, NvU32 gpuMask, struct OBJGPUGRP **ppGpuGrp); +struct OBJGPUGRP* gpumgrGetGpuGrpFromGpu(OBJGPU *pGpu); +struct OBJGPUGRP* gpumgrGetGpuGrpFromInstance(NvU32 gpugrpInstance); +NV_STATUS gpumgrModifyGpuDrainState(NvU32 gpuId, NvBool bEnable, NvBool bRemove, NvBool bLinkDisable); +NV_STATUS gpumgrQueryGpuDrainState(NvU32 gpuId, NvBool *pBEnable, NvBool *pBRemove); +NvBool gpumgrIsGpuPointerValid(OBJGPU *pGpu); +NvU32 gpumgrGetGrpMaskFromGpuInst(NvU32 gpuInst); +void gpumgrAddDeviceMaskToGpuInstTable(NvU32 gpuMask); +void gpumgrClearDeviceMaskFromGpuInstTable(NvU32 gpuMask); +NvBool gpumgrSetGpuAcquire(OBJGPU *pGpu); +void gpumgrSetGpuRelease(void); +NvU8 gpumgrGetGpuBridgeType(void); + +// +// gpumgrIsSubDeviceCountOne +// +static NV_INLINE NvBool +gpumgrIsSubDeviceCountOne(NvU32 gpuMask) +{ + // + // A fast version of gpumgrGetSubDeviceCount(gpumask) == 1. + // Make sure it returns 0 for gpuMask==0, just like gpumgrGetSubDeviceCount(0)!!! + // + return gpuMask != 0 && (gpuMask&(gpuMask-1)) == 0; +} + +// +// gpumgrIsParentGPU +// +static NV_INLINE NvBool +gpumgrIsParentGPU(OBJGPU *pGpu) +{ + return gpumgrGetParentGPU(pGpu) == pGpu; +} + +#endif // _GPUMGR_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_MGR_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.c new file mode 100644 index 0000000..263d6f4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.c @@ -0,0 +1,433 @@ +#define NVOC_GPU_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x7ef3cb = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE; + +void __nvoc_init_OBJGPU(OBJGPU*, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver); +void __nvoc_init_funcTable_OBJGPU(OBJGPU*); +NV_STATUS __nvoc_ctor_OBJGPU(OBJGPU*, NvU32 arg_gpuInstance); +void __nvoc_init_dataField_OBJGPU(OBJGPU*); +void __nvoc_dtor_OBJGPU(OBJGPU*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPU; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_OBJGPU = { + /*pClassDef=*/ &__nvoc_class_def_OBJGPU, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPU, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGPU, __nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_RmHalspecOwner = { + /*pClassDef=*/ &__nvoc_class_def_RmHalspecOwner, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGPU, __nvoc_base_RmHalspecOwner), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_OBJTRACEABLE = { + /*pClassDef=*/ &__nvoc_class_def_OBJTRACEABLE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGPU, __nvoc_base_OBJTRACEABLE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPU = { + /*numRelatives=*/ 4, + /*relatives=*/ { + &__nvoc_rtti_OBJGPU_OBJGPU, + &__nvoc_rtti_OBJGPU_OBJTRACEABLE, + &__nvoc_rtti_OBJGPU_RmHalspecOwner, + &__nvoc_rtti_OBJGPU_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJGPU), + /*classId=*/ classId(OBJGPU), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJGPU", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPU, + /*pCastInfo=*/ &__nvoc_castinfo_OBJGPU, + /*pExportInfo=*/ &__nvoc_export_info_OBJGPU +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPU = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner*); +void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_dtor_OBJGPU(OBJGPU *pThis) { + __nvoc_gpuDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + __nvoc_dtor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner); + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJGPU(OBJGPU *pThis) { + ChipHal *chipHal = &staticCast(pThis, RmHalspecOwner)->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &staticCast(pThis, RmHalspecOwner)->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + pThis->setProperty(pThis, PDB_PROP_GPU_IS_CONNECTED, ((NvBool)(0 == 0))); + + // NVOC Property Hal field -- PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY + if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */ + { + pThis->setProperty(pThis, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_TEGRA_SOC_IGPU + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_TEGRA_SOC_IGPU, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_ATS_SUPPORTED + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_ATS_SUPPORTED, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_ZERO_FB + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_ZERO_FB, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_MIG_SUPPORTED + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_MIG_SUPPORTED, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_IS_COT_ENABLED + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_IS_COT_ENABLED, ((NvBool)(0 != 0))); + } + + pThis->boardId = ~0; + + pThis->deviceInstance = 32; + + // Hal field -- isVirtual + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->isVirtual = ((NvBool)(0 != 0)); + } + + // Hal field -- isGspClient + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->isGspClient = ((NvBool)(0 == 0)); + } + else if (0) + { + } + + pThis->bIsDebugModeEnabled = ((NvBool)(0 != 0)); + + pThis->numOfMclkLockRequests = 0U; + + pThis->bUseRegisterAccessMap = !(0); + + pThis->boardInfo = ((void *)0); + + // Hal field -- bUnifiedMemorySpaceEnabled + if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */ + { + pThis->bUnifiedMemorySpaceEnabled = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bUnifiedMemorySpaceEnabled = ((NvBool)(0 != 0)); + } + + // Hal field -- bWarBug200577889SriovHeavyEnabled + pThis->bWarBug200577889SriovHeavyEnabled = ((NvBool)(0 != 0)); + + // Hal field -- bNeed4kPageIsolation + if (0) + { + } + // default + else + { + pThis->bNeed4kPageIsolation = ((NvBool)(0 != 0)); + } + + // Hal field -- bInstLoc47bitPaWar + if (0) + { + } + // default + else + { + pThis->bInstLoc47bitPaWar = ((NvBool)(0 != 0)); + } + + // Hal field -- bIsBarPteInSysmemSupported + if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */ + { + pThis->bIsBarPteInSysmemSupported = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bIsBarPteInSysmemSupported = ((NvBool)(0 != 0)); + } + + // Hal field -- bClientRmAllocatedCtxBuffer + if (0) + { + } + // default + else + { + pThis->bClientRmAllocatedCtxBuffer = ((NvBool)(0 != 0)); + } + + // Hal field -- bVidmemPreservationBrokenBug3172217 + if (0) + { + } + // default + else + { + pThis->bVidmemPreservationBrokenBug3172217 = ((NvBool)(0 != 0)); + } + + // Hal field -- bInstanceMemoryAlwaysCached + if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */ + { + pThis->bInstanceMemoryAlwaysCached = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bInstanceMemoryAlwaysCached = ((NvBool)(0 != 0)); + } + + pThis->bIsGeforce = ((NvBool)(0 == 0)); + + // Hal field -- bComputePolicyTimesliceSupported + if (0) + { + } + // default + else + { + pThis->bComputePolicyTimesliceSupported = ((NvBool)(0 != 0)); + } +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE* ); +NV_STATUS __nvoc_ctor_OBJGPU(OBJGPU *pThis, NvU32 arg_gpuInstance) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_Object; + status = __nvoc_ctor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner); + if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_RmHalspecOwner; + status = __nvoc_ctor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_OBJTRACEABLE; + __nvoc_init_dataField_OBJGPU(pThis); + + status = __nvoc_gpuConstruct(pThis, arg_gpuInstance); + if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail__init; + goto __nvoc_ctor_OBJGPU_exit; // Success + +__nvoc_ctor_OBJGPU_fail__init: + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); +__nvoc_ctor_OBJGPU_fail_OBJTRACEABLE: + __nvoc_dtor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner); +__nvoc_ctor_OBJGPU_fail_RmHalspecOwner: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJGPU_fail_Object: +__nvoc_ctor_OBJGPU_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) { + ChipHal *chipHal = &staticCast(pThis, RmHalspecOwner)->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &staticCast(pThis, RmHalspecOwner)->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +void __nvoc_init_funcTable_OBJGPU(OBJGPU *pThis) { + __nvoc_init_funcTable_OBJGPU_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_RmHalspecOwner(RmHalspecOwner*, NvU32, NvU32, NvU32, RM_RUNTIME_VARIANT, NvU32); +void __nvoc_init_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_init_OBJGPU(OBJGPU *pThis, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver) { + pThis->__nvoc_pbase_OBJGPU = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + pThis->__nvoc_pbase_RmHalspecOwner = &pThis->__nvoc_base_RmHalspecOwner; + pThis->__nvoc_pbase_OBJTRACEABLE = &pThis->__nvoc_base_OBJTRACEABLE; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver); + __nvoc_init_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + __nvoc_init_funcTable_OBJGPU(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU **ppThis, Dynamic *pParent, NvU32 createFlags, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver, NvU32 arg_gpuInstance) { + NV_STATUS status; + Object *pParentObj; + OBJGPU *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJGPU)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJGPU)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPU); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJGPU(pThis, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver); + status = __nvoc_ctor_OBJGPU(pThis, arg_gpuInstance); + if (status != NV_OK) goto __nvoc_objCreate_OBJGPU_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJGPU_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJGPU(OBJGPU **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + NvU32 ChipHal_arch = va_arg(args, NvU32); + NvU32 ChipHal_impl = va_arg(args, NvU32); + NvU32 ChipHal_hidrev = va_arg(args, NvU32); + RM_RUNTIME_VARIANT RmVariantHal_rmVariant = va_arg(args, RM_RUNTIME_VARIANT); + NvU32 DispIpHal_ipver = va_arg(args, NvU32); + NvU32 arg_gpuInstance = va_arg(args, NvU32); + + status = __nvoc_objCreate_OBJGPU(ppThis, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver, arg_gpuInstance); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.h new file mode 100644 index 0000000..7671085 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.h @@ -0,0 +1,3188 @@ +#ifndef _G_GPU_NVOC_H_ +#define _G_GPU_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_gpu_nvoc.h" + +#ifndef _OBJGPU_H_ +#define _OBJGPU_H_ + +/*! + * @file + * @brief Resource Manager Defines and Structures: Defines and structures used for the GPU Object. + */ + +/*! + * + * Forward declaration of SEQSCRIPT - here because it is used by many clients + * and we don't want objseq.h to have to be included everywhere, so adding this + * here. See NVCR 12827752 + * + */ +typedef struct _SEQSCRIPT SEQSCRIPT, *PSEQSCRIPT; + +typedef struct GPUATTACHARG GPUATTACHARG; + +/* + * WARNING -- Avoid including headers in gpu.h + * A change in gpu.h and headers included by gpu.h triggers recompilation of most RM + * files in an incremental build. We should keep the list of included header as short as + * possible. + * Especially, GPU's child module should not have its object header being included here. + * A child module generally includes the header of its parent. A child module header included + * by the parent module affects all the sibling modules. + * */ +#include "ctrl/ctrl0080/ctrl0080gpu.h" // NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS (form hal) +#include "ctrl/ctrl2080/ctrl2080internal.h" // NV2080_CTRL_CMD_INTERNAL_MAX_BSPS/NVENCS +#include "ctrl/ctrl2080/ctrl2080nvd.h" +#include "class/cl2080.h" +#include "class/cl90cd.h" + +#include "nvlimits.h" +#include "utils/nv_enum.h" + +#include "gpu/gpu_timeout.h" +#include "gpu/gpu_access.h" + +#include "platform/acpi_common.h" +#include "acpigenfuncs.h" +#include "nvacpitypes.h" +#include "platform/sli/sli.h" + +#include "core/core.h" +#include "core/system.h" +#include "core/info_block.h" +#include "core/hal.h" +#include "nvoc/utility.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/gpu_resource_desc.h" +#include "diagnostics/traceable.h" +#include "gpu/gpu_uuid.h" +#include "prereq_tracker/prereq_tracker.h" +#include "gpu/gpu_halspec.h" + +#include "rmapi/control.h" +#include "rmapi/event.h" +#include "rmapi/rmapi.h" + +#include "nv_arch.h" + +#include "g_rmconfig_util.h" // prototypes for rmconfig utility functions, eg: rmcfg_IsGK104() + +// TODO - the forward declaration of OS_GPU_INFO should be simplified +typedef struct nv_state_t OS_GPU_INFO; + +struct OBJGMMU; + +#ifndef __NVOC_CLASS_OBJGMMU_TYPEDEF__ +#define __NVOC_CLASS_OBJGMMU_TYPEDEF__ +typedef struct OBJGMMU OBJGMMU; +#endif /* __NVOC_CLASS_OBJGMMU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGMMU +#define __nvoc_class_id_OBJGMMU 0xd7a41d +#endif /* __nvoc_class_id_OBJGMMU */ + + +struct OBJGRIDDISPLAYLESS; + +#ifndef __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ +#define __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ +typedef struct OBJGRIDDISPLAYLESS OBJGRIDDISPLAYLESS; +#endif /* __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGRIDDISPLAYLESS +#define __nvoc_class_id_OBJGRIDDISPLAYLESS 0x20fd5a +#endif /* __nvoc_class_id_OBJGRIDDISPLAYLESS */ + + +struct OBJHOSTENG; + +#ifndef __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ +#define __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ +typedef struct OBJHOSTENG OBJHOSTENG; +#endif /* __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHOSTENG +#define __nvoc_class_id_OBJHOSTENG 0xb356e7 +#endif /* __nvoc_class_id_OBJHOSTENG */ + + +struct OBJPMUCLIENT; + +#ifndef __NVOC_CLASS_OBJPMUCLIENT_TYPEDEF__ +#define __NVOC_CLASS_OBJPMUCLIENT_TYPEDEF__ +typedef struct OBJPMUCLIENT OBJPMUCLIENT; +#endif /* __NVOC_CLASS_OBJPMUCLIENT_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPMUCLIENT +#define __nvoc_class_id_OBJPMUCLIENT 0xea631d +#endif /* __nvoc_class_id_OBJPMUCLIENT */ + + +struct OBJINTRABLE; + +#ifndef __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ +#define __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ +typedef struct OBJINTRABLE OBJINTRABLE; +#endif /* __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJINTRABLE +#define __nvoc_class_id_OBJINTRABLE 0x31ccb7 +#endif /* __nvoc_class_id_OBJINTRABLE */ + + +struct OBJVBIOS; + +#ifndef __NVOC_CLASS_OBJVBIOS_TYPEDEF__ +#define __NVOC_CLASS_OBJVBIOS_TYPEDEF__ +typedef struct OBJVBIOS OBJVBIOS; +#endif /* __NVOC_CLASS_OBJVBIOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVBIOS +#define __nvoc_class_id_OBJVBIOS 0x5dc772 +#endif /* __nvoc_class_id_OBJVBIOS */ + + +struct NvDebugDump; + +#ifndef __NVOC_CLASS_NvDebugDump_TYPEDEF__ +#define __NVOC_CLASS_NvDebugDump_TYPEDEF__ +typedef struct NvDebugDump NvDebugDump; +#endif /* __NVOC_CLASS_NvDebugDump_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvDebugDump +#define __nvoc_class_id_NvDebugDump 0x7e80a2 +#endif /* __nvoc_class_id_NvDebugDump */ + + +struct GpuMutexMgr; + +#ifndef __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ +#define __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ +typedef struct GpuMutexMgr GpuMutexMgr; +#endif /* __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuMutexMgr +#define __nvoc_class_id_GpuMutexMgr 0x9d93b2 +#endif /* __nvoc_class_id_GpuMutexMgr */ + + +struct KernelFalcon; + +#ifndef __NVOC_CLASS_KernelFalcon_TYPEDEF__ +#define __NVOC_CLASS_KernelFalcon_TYPEDEF__ +typedef struct KernelFalcon KernelFalcon; +#endif /* __NVOC_CLASS_KernelFalcon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelFalcon +#define __nvoc_class_id_KernelFalcon 0xb6b1af +#endif /* __nvoc_class_id_KernelFalcon */ + + +struct KernelChannel; + +#ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__ +#define __NVOC_CLASS_KernelChannel_TYPEDEF__ +typedef struct KernelChannel KernelChannel; +#endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannel +#define __nvoc_class_id_KernelChannel 0x5d8d70 +#endif /* __nvoc_class_id_KernelChannel */ + + +struct GenericKernelFalcon; + +#ifndef __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ +#define __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ +typedef struct GenericKernelFalcon GenericKernelFalcon; +#endif /* __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GenericKernelFalcon +#define __nvoc_class_id_GenericKernelFalcon 0xabcf08 +#endif /* __nvoc_class_id_GenericKernelFalcon */ + + + +struct Subdevice; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + + +struct RsClient; + +#ifndef __NVOC_CLASS_RsClient_TYPEDEF__ +#define __NVOC_CLASS_RsClient_TYPEDEF__ +typedef struct RsClient RsClient; +#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClient +#define __nvoc_class_id_RsClient 0x8f87e5 +#endif /* __nvoc_class_id_RsClient */ + + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + + +#ifndef PARTITIONID_INVALID +#define PARTITIONID_INVALID 0xFFFFFFFF +#endif +typedef struct MIG_INSTANCE_REF MIG_INSTANCE_REF; +typedef struct NV2080_CTRL_GPU_REG_OP NV2080_CTRL_GPU_REG_OP; + +typedef enum +{ + BRANDING_TYPE_UNCACHED, + BRANDING_TYPE_NONE, + BRANDING_TYPE_QUADRO_GENERIC, + BRANDING_TYPE_QUADRO_AD, + BRANDING_TYPE_NVS_NVIDIA, // "NVIDIA NVS" + BRANDING_TYPE_VGX, +} BRANDING_TYPE; + +typedef enum +{ + COMPUTE_BRANDING_TYPE_NONE, + COMPUTE_BRANDING_TYPE_TESLA, +} COMPUTE_BRANDING_TYPE; + +#define MAX_DSM_SUPPORTED_FUNCS_RTN_LEN 8 // # bytes to store supported functions + +typedef struct { + // supported function status and cache + NvU32 suppFuncStatus; + NvU8 suppFuncs[MAX_DSM_SUPPORTED_FUNCS_RTN_LEN]; + NvU32 suppFuncsLen; + NvBool bArg3isInteger; + // callback status and cache + NvU32 callbackStatus; + NvU32 callback; +} ACPI_DSM_CACHE; + +typedef struct { + + ACPI_DSM_CACHE dsm[ACPI_DSM_FUNCTION_COUNT]; + ACPI_DSM_FUNCTION dispStatusHotplugFunc; + ACPI_DSM_FUNCTION dispStatusConfigFunc; + ACPI_DSM_FUNCTION perfPostPowerStateFunc; + ACPI_DSM_FUNCTION stereo3dStateActiveFunc; + NvU32 dsmPlatCapsCache[ACPI_DSM_FUNCTION_COUNT]; + NvU32 MDTLFeatureSupport; + +} ACPI_DATA; + + +#define OOR_ARCH_DEF(x) \ + NV_ENUM_ENTRY(x, OOR_ARCH_X86_64, 0x00000000) \ + NV_ENUM_ENTRY(x, OOR_ARCH_PPC64LE, 0x00000001) \ + NV_ENUM_ENTRY(x, OOR_ARCH_ARM, 0x00000002) \ + NV_ENUM_ENTRY(x, OOR_ARCH_AARCH64, 0x00000003) \ + NV_ENUM_ENTRY(x, OOR_ARCH_NONE, 0x00000004) + +NV_ENUM_DEF(OOR_ARCH, OOR_ARCH_DEF) + +typedef struct +{ + NvU32 classId; + NvU32 flags; +} GPUCHILDORDER; + +typedef struct +{ + NvU32 classId; + NvU32 instances; +} GPUCHILDPRESENT; + +// GPU Child Order Flags +#define GCO_LIST_INIT NVBIT(0) // entry is used for init ordering (DO NOT USE) +#define GCO_LIST_LOAD NVBIT(1) // entry is used for load and postload ordering (DO NOT USE) +#define GCO_LIST_UNLOAD NVBIT(2) // entry is used for unload and preunload ordering (DO NOT USE) +#define GCO_LIST_DESTROY NVBIT(3) // entry is used for destroy order (DO NOT USE) +#define GCO_LIST_ALL (GCO_LIST_INIT | GCO_LIST_LOAD | GCO_LIST_UNLOAD | GCO_LIST_DESTROY) + // ^ entry is used for all list types (RECOMMENDED) +#define GCO_ALL (GCO_LIST_ALL) + + +typedef struct +{ + NvU32 childTypeIdx; + NvU32 childInst; + NvU32 gpuChildPtrOffset; +} GPU_CHILD_ITER; + +typedef GPU_CHILD_ITER ENGSTATE_ITER; +typedef GPU_CHILD_ITER PMUCLIENT_ITER; + +// +// Object 'get' macros for GPU relative object retrievals. +// + +#define ENG_GET_GPU(p) objFindAncestorOfType(OBJGPU, (p)) + +// GPU_GET_FIFO_UC is autogenerated, returns per Gpu pFifo. +#define GPU_GET_FIFO(p) GPU_GET_FIFO_UC(p) + +// GPU_GET_KERNEL_FIFO_UC is autogenerated, returns per Gpu pKernelFifo. +#define GPU_GET_KERNEL_FIFO(p) gpuGetKernelFifoShared(p) + +#define GPU_GET_HEAP(p) (RMCFG_MODULE_HEAP ? MEMORY_MANAGER_GET_HEAP(GPU_GET_MEMORY_MANAGER(p)) : NULL) + +#define GPU_GET_HAL(p) (RMCFG_MODULE_HAL ? (p)->pHal : NULL) + +#define GPU_GET_OS(p) (RMCFG_MODULE_OS ? (p)->pOS : NULL) // TBD: replace with SYS_GET_OS +#define GPU_QUICK_PATH_GET_OS(p) GPU_GET_OS(p) // TBD: remove + +#define GPU_GET_REGISTER_ACCESS(g) (&(g)->registerAccess) + +// Returns the pRmApi that routes to the physical driver, either via RPC or local calls +#define GPU_GET_PHYSICAL_RMAPI(g) (&(g)->physicalRmApi) + +// +// Defines and helpers for encoding and decoding PCI domain, bus and device. +// +// Ideally these would live in objbus.h (or somewhere else more appropriate) and +// not gpu/gpu.h, but keep them here for now while support for 32-bit domains is +// being added as part of bug 1904645. +// + +// DRF macros for GPUBUSINFO::nvDomainBusDeviceFunc +#define NVGPU_BUSDEVICE_DOMAIN 63:32 +#define NVGPU_BUSDEVICE_BUS 15:8 +#define NVGPU_BUSDEVICE_DEVICE 7:0 + +static NV_INLINE NvU32 gpuDecodeDomain(NvU64 gpuDomainBusDevice) +{ + return (NvU32)DRF_VAL64(GPU, _BUSDEVICE, _DOMAIN, gpuDomainBusDevice); +} + +static NV_INLINE NvU8 gpuDecodeBus(NvU64 gpuDomainBusDevice) +{ + return (NvU8)DRF_VAL64(GPU, _BUSDEVICE, _BUS, gpuDomainBusDevice); +} + +static NV_INLINE NvU8 gpuDecodeDevice(NvU64 gpuDomainBusDevice) +{ + return (NvU8)DRF_VAL64(GPU, _BUSDEVICE, _DEVICE, gpuDomainBusDevice); +} + +static NV_INLINE NvU64 gpuEncodeDomainBusDevice(NvU32 domain, NvU8 bus, NvU8 device) +{ + return DRF_NUM64(GPU, _BUSDEVICE, _DOMAIN, domain) | + DRF_NUM64(GPU, _BUSDEVICE, _BUS, bus) | + DRF_NUM64(GPU, _BUSDEVICE, _DEVICE, device); +} + +static NV_INLINE NvU32 gpuEncodeBusDevice(NvU8 bus, NvU8 device) +{ + NvU64 busDevice = gpuEncodeDomainBusDevice(0, bus, device); + + // Bus and device are guaranteed to fit in the lower 32bits + return (NvU32)busDevice; +} + +// +// Generate a 32-bit id from domain, bus and device tuple. +// +NvU32 gpuGenerate32BitId(NvU32 domain, NvU8 bus, NvU8 device); + +// +// Helpers for getting domain, bus and device of a GPU +// +// Ideally these would be inline functions, but NVOC doesn't support that today, +// tracked in bug 1905882 +// +#define gpuGetDBDF(pGpu) ((pGpu)->busInfo.nvDomainBusDeviceFunc) +#define gpuGetDomain(pGpu) gpuDecodeDomain((pGpu)->busInfo.nvDomainBusDeviceFunc) +#define gpuGetBus(pGpu) gpuDecodeBus((pGpu)->busInfo.nvDomainBusDeviceFunc) +#define gpuGetDevice(pGpu) gpuDecodeDevice((pGpu)->busInfo.nvDomainBusDeviceFunc) + +#undef NVGPU_BUSDEVICE_DOMAIN +#undef NVGPU_BUSDEVICE_BUS +#undef NVGPU_BUSDEVICE_DEVICE + +// +// MaskRevision constants. +// +#define GPU_NO_MASK_REVISION 0x00 +#define GPU_MASK_REVISION_A1 0xA1 +#define GPU_MASK_REVISION_A2 0xA2 +#define GPU_MASK_REVISION_A3 0xA3 +#define GPU_MASK_REVISION_A4 0xA4 +#define GPU_MASK_REVISION_A5 0xA5 +#define GPU_MASK_REVISION_A6 0xA6 +#define GPU_MASK_REVISION_B1 0xB1 +#define GPU_MASK_REVISION_B2 0xB2 +#define GPU_MASK_REVISION_C1 0xC1 +#define GPU_MASK_REVISION_D1 0xD1 + +#define GPU_GET_MASKREVISION(pGpu) (((gpuGetChipMajRev(pGpu))<<4)|(gpuGetChipMinRev(pGpu))) + +// +// Revision constants. +// +#define GPU_NO_REVISION 0xFF +#define GPU_REVISION_0 0x00 +#define GPU_REVISION_1 0x01 +#define GPU_REVISION_2 0x02 +#define GPU_REVISION_3 0x03 +#define GPU_REVISION_4 0x04 +#define GPU_REVISION_5 0x05 +#define GPU_REVISION_6 0x06 +#define GPU_REVISION_7 0x07 +#define GPU_REVISION_8 0x08 +#define GPU_REVISION_9 0x09 +#define GPU_REVISION_A 0x0A +#define GPU_REVISION_B 0x0B +#define GPU_REVISION_C 0x0C +#define GPU_REVISION_D 0x0D +#define GPU_REVISION_E 0x0E +#define GPU_REVISION_F 0x0F + +// +// One extra nibble should be added to the architecture version read from the +// PMC boot register to represent the architecture number in RM. +// +#define GPU_ARCH_SHIFT 0x4 + +// Registry key for inst mem modification defines +#define INSTMEM_TAG_MASK (0xf0000000) +#define INSTMEM_TAG(a) ((INSTMEM_TAG_MASK & (a)) >> 28) + + +typedef struct +{ + + NvU32 PCIDeviceID; + NvU32 Manufacturer; + NvU32 PCISubDeviceID; + NvU32 PCIRevisionID; + NvU32 Subrevision; + + // + // ImplentationExternal and ArchitectureExternal are only valid if they are + // not both zero. They are used when we want to report a different + // arch/imp to an external client. For example, MCP73 is almost the same + // as MCP67, so we report the MCP67 arch/imp to external clients of MCP73. + // (If an MCP73 client really needs to know that it is running on MCP73 + // instead of MCP67, it should check capability bits.) + // + NvU32 ImplementationExternal; + NvU32 ArchitectureExternal; +} GPUIDINFO; + + +typedef struct +{ + NvU32 impl; + NvU32 arch; + NvU32 majorRev; + NvU32 minorRev; + NvU32 devIDStrap; + NvU32 minorExtRev; +} PMCBOOT0; + +typedef struct +{ + NvU32 impl; + NvU32 arch; + NvU32 majorRev; + NvU32 minorRev; + NvU32 devIDStrap; + NvU32 minorExtRev; +} PMCBOOT42; + +// +// Random collection of bus-related configuration state. +// +typedef struct +{ + RmPhysAddr gpuPhysAddr; + RmPhysAddr gpuPhysFbAddr; + RmPhysAddr gpuPhysInstAddr; + RmPhysAddr gpuPhysIoAddr; + NvU32 iovaspaceId; + NvU32 IntLine; + NvU32 IsrHooked; + NvU64 nvDomainBusDeviceFunc; + OOR_ARCH oorArch; +} GPUBUSINFO; + +typedef struct +{ + PCLASSDESCRIPTOR pClasses; + NvU32 *pSuppressClasses; + NvU32 numClasses; + NvBool bSuppressRead; +} GPUCLASSDB, *PGPUCLASSDB; + +typedef struct +{ + const CLASSDESCRIPTOR *pClassDescriptors; + NvU32 numClassDescriptors; + + PENGDESCRIPTOR pEngineInitDescriptors; + PENGDESCRIPTOR pEngineDestroyDescriptors; + PENGDESCRIPTOR pEngineLoadDescriptors; + PENGDESCRIPTOR pEngineUnloadDescriptors; + NvU32 numEngineDescriptors; +} GPU_ENGINE_ORDER, *PGPU_ENGINE_ORDER; + +// +// PCI Express Support +// +typedef struct NBADDR +{ + NvU32 domain; + NvU8 bus; + NvU8 device; + NvU8 func; + NvU8 valid; + void *handle; +} NBADDR; + +typedef struct +{ + NBADDR addr; + void *vAddr; // virtual address of the port, if it has been mapped . Not used starting with Win10 BuildXXXXX + NvU32 PCIECapPtr; // offset of the PCIE capptr in the NB + // Capability register set in enhanced configuration space + // + NvU32 PCIEErrorCapPtr; // offset of the Advanced Error Reporting Capability register set + NvU32 PCIEVCCapPtr; // offset of the Virtual Channel (VC) Capability register set + NvU32 PCIEL1SsCapPtr; // Offset of the L1 Substates Capabilities + NvU16 DeviceID, VendorID; // device and vendor ID for port +} PORTDATA; + +typedef struct // GPU specific data for core logic object, stored in GPU object +{ + PORTDATA upstreamPort; // the upstream port info for the GPU + // If there is a switch this is equal to boardDownstreamPort + // If there is no switch this is equal to rootPort + PORTDATA rootPort; // The root port of the PCI-E root complex + PORTDATA boardUpstreamPort; // If there is no BR03 this is equal to rootPort. + PORTDATA boardDownstreamPort; // If there is no BR03 these data are not set. +} GPUCLDATA; + + +// +// Flags for gpuStateLoad() and gpuStateUnload() routines. Flags *must* be used +// symmetrically across an Unload/Load pair. +// +#define GPU_STATE_FLAGS_PRESERVING NVBIT(0) // GPU state is preserved +#define GPU_STATE_FLAGS_VGA_TRANSITION NVBIT(1) // To be used with GPU_STATE_FLAGS_PRESERVING. +#define GPU_STATE_FLAGS_PM_TRANSITION NVBIT(2) // To be used with GPU_STATE_FLAGS_PRESERVING. +#define GPU_STATE_FLAGS_PM_SUSPEND NVBIT(3) +#define GPU_STATE_FLAGS_PM_HIBERNATE NVBIT(4) +#define GPU_STATE_FLAGS_GC6_TRANSITION NVBIT(5) // To be used with GPU_STATE_FLAGS_PRESERVING. +#define GPU_STATE_DEFAULT 0 // Default flags for destructive state loads + // and unloads + +typedef struct engine_event_node +{ + PEVENTNOTIFICATION pEventNotify; + struct Memory *pMemory; + struct engine_event_node *pNext; +} ENGINE_EVENT_NODE; + +// Linked list of per engine non-stall event nodes +typedef struct +{ + ENGINE_EVENT_NODE *pEventNode; + // lock to protect above list + PORT_SPINLOCK *pSpinlock; +} ENGINE_EVENT_LIST; + +struct OBJHWBC; +typedef struct hwbc_list +{ + struct OBJHWBC *pHWBC; + struct hwbc_list *pNext; +} HWBC_LIST; + +typedef struct SRIOV_P2P_INFO +{ + NvU32 gfid; + NvBool bAllowP2pAccess; + NvU32 accessRefCount; + NvU32 destRefCount; +} SRIOV_P2P_INFO, *PSRIOV_P2P_INFO; + +// +// typedef of private struct used in OBJGPU's data field +// + +typedef struct +{ + NvBool isInitialized; + NvU8 uuid[RM_SHA1_GID_SIZE]; +} _GPU_UUID; + +typedef struct +{ + NvBool bValid; + NvU8 id; +} _GPU_PCIE_PEER_CLIQUE; + +typedef struct +{ + NvU32 platformId; // used to identify soc + NvU32 implementationId; // soc-specific + NvU32 revisionId; // soc-revision + PMCBOOT0 pmcBoot0; + PMCBOOT42 pmcBoot42; + NvU8 subRevision; // sub-revision (NV_FUSE_OPT_SUBREVISION on GPU) +} _GPU_CHIP_INFO; + + +// Engine Database +typedef struct +{ + NvU32 size; + NvU32 *pType; + NvBool bValid; +} _GPU_ENGINE_DB; + +#define MAX_NUM_BARS (8) +// SRIOV state +typedef struct +{ + /*! + * Total number of VFs available in this GPU + */ + NvU32 totalVFs; + + /*! + * First VF Offset + */ + NvU32 firstVFOffset; + + /*! + * Max GFID possible + */ + NvU32 maxGfid; + + /*! + * Physical offset of Virtual BAR0 register. Stores the offset if the GPU is + * a physical function, else 0 + */ + NvU32 virtualRegPhysOffset; + + /*! + * Allocated GFIDs. Will be used to ensure plugins doesn't use same GFID for multiple VFs + */ + NvU8 *pAllocatedGfids; + + /*! + * The sizes of the BAR regions on the VF + */ + NvU64 vfBarSize[MAX_NUM_BARS]; + + /*! + * First PF's BAR addresses + */ + NvU64 firstVFBarAddress[MAX_NUM_BARS]; + + /*! + * If the VF BARs are 64-bit addressable + */ + NvBool b64bitVFBar0; + NvBool b64bitVFBar1; + NvBool b64bitVFBar2; + + /*! + * GFID used for P2P access + */ + PSRIOV_P2P_INFO pP2PInfo; + NvBool bP2PAllocated; + NvU32 maxP2pGfid; +} _GPU_SRIOV_STATE; + +// Max # of instances for GPU children +#define GPU_MAX_CES 10 +#define GPU_MAX_GRS 8 +#define GPU_MAX_FIFOS 1 +#define GPU_MAX_MSENCS NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS +#define GPU_MAX_NVDECS NV2080_CTRL_CMD_INTERNAL_MAX_BSPS +#define GPU_MAX_NVJPGS 8 +#define GPU_MAX_HSHUBS 5 + +// +// Macro defines for OBJGPU fields -- Macro defines inside NVOC class block is +// gone after NVOC preprocessing stage. For macros used outside gpu/gpu.h should +// not be defined inside the class block. +// + +// +// Maximum number of Falcon objects that can be allocated on one GPU. +// This is purely a software limit and can be raised freely as more are added. +// +#define GPU_MAX_FALCON_ENGINES \ + ENG_IOCTRL__SIZE_1 + \ + ENG_GPCCS__SIZE_1 + \ + ENG_FECS__SIZE_1 + \ + ENG_NVJPEG__SIZE_1 + \ + ENG_NVDEC__SIZE_1 + \ + ENG_MSENC__SIZE_1 + \ + 32 + +// for OBJGPU::pRmCtrlDeferredCmd +#define MAX_DEFERRED_CMDS 2 + +// for OBJGPU::computeModeRefCount +#define NV_GPU_MODE_GRAPHICS_MODE 0x00000001 +#define NV_GPU_MODE_COMPUTE_MODE 0x00000002 +#define NV_GPU_COMPUTE_REFCOUNT_COMMAND_INCREMENT 0x0000000a +#define NV_GPU_COMPUTE_REFCOUNT_COMMAND_DECREMENT 0x0000000b + +// +// Structure to hold information obtained from +// parsing the DEVICE_INFO2 table during init. +// + +typedef struct NV2080_CTRL_INTERNAL_DEVICE_INFO DEVICE_INFO2_TABLE; + +#define NV_GPU_INTERNAL_DEVICE_HANDLE 0xABCD0080 +#define NV_GPU_INTERNAL_SUBDEVICE_HANDLE 0xABCD2080 + +// +// NV GPU simulation mode defines +// Keep in sync with os.h SIM MODE defines until osGetSimulationMode is deprecated. +// +#ifndef NV_SIM_MODE_DEFS +#define NV_SIM_MODE_DEFS +#define NV_SIM_MODE_HARDWARE 0U +#define NV_SIM_MODE_RTL 1U +#define NV_SIM_MODE_CMODEL 2U +#define NV_SIM_MODE_MODS_AMODEL 3U +#define NV_SIM_MODE_TEGRA_FPGA 4U +#define NV_SIM_MODE_INVALID (~0x0U) +#endif + +// +// The actual GPU object definition +// +#ifdef NVOC_GPU_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJGPU { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct RmHalspecOwner __nvoc_base_RmHalspecOwner; + struct OBJTRACEABLE __nvoc_base_OBJTRACEABLE; + struct Object *__nvoc_pbase_Object; + struct RmHalspecOwner *__nvoc_pbase_RmHalspecOwner; + struct OBJTRACEABLE *__nvoc_pbase_OBJTRACEABLE; + struct OBJGPU *__nvoc_pbase_OBJGPU; + NvBool PDB_PROP_GPU_IN_STANDBY; + NvBool PDB_PROP_GPU_IN_HIBERNATE; + NvBool PDB_PROP_GPU_IN_PM_CODEPATH; + NvBool PDB_PROP_GPU_IN_PM_RESUME_CODEPATH; + NvBool PDB_PROP_GPU_STATE_INITIALIZED; + NvBool PDB_PROP_GPU_EMULATION; + NvBool PDB_PROP_GPU_PRIMARY_DEVICE; + NvBool PDB_PROP_GPU_HYBRID_MGPU; + NvBool PDB_PROP_GPU_ALTERNATE_TREE_ENABLED; + NvBool PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS; + NvBool PDB_PROP_GPU_3D_CONTROLLER; + NvBool PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM; + NvBool PDB_PROP_GPU_IS_CONNECTED; + NvBool PDB_PROP_GPU_BROKEN_FB; + NvBool PDB_PROP_GPU_IN_FULLCHIP_RESET; + NvBool PDB_PROP_GPU_IN_SECONDARY_BUS_RESET; + NvBool PDB_PROP_GPU_IN_GC6_RESET; + NvBool PDB_PROP_GPU_IS_GEMINI; + NvBool PDB_PROP_GPU_PERSISTENT_SW_STATE; + NvBool PDB_PROP_GPU_COHERENT_CPU_MAPPING; + NvBool PDB_PROP_GPU_IS_LOST; + NvBool PDB_PROP_GPU_IN_TIMEOUT_RECOVERY; + NvBool PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT; + NvBool PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY; + NvBool PDB_PROP_GPU_TEGRA_SOC_IGPU; + NvBool PDB_PROP_GPU_ATS_SUPPORTED; + NvBool PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING; + NvBool PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE; + NvBool PDB_PROP_GPU_IS_UEFI; + NvBool PDB_PROP_GPU_ZERO_FB; + NvBool PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE; + NvBool PDB_PROP_GPU_MIG_SUPPORTED; + NvBool PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED; + NvBool PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED; + NvBool PDB_PROP_GPU_IS_COT_ENABLED; + NvBool PDB_PROP_GPU_SWRL_GRANULAR_LOCKING; + NvBool PDB_PROP_GPU_IN_SLI_LINK_CODEPATH; + NvBool PDB_PROP_GPU_IS_PLX_PRESENT; + NvBool PDB_PROP_GPU_IS_BR03_PRESENT; + NvBool PDB_PROP_GPU_IS_BR04_PRESENT; + NvBool PDB_PROP_GPU_BEHIND_BRIDGE; + NvBool PDB_PROP_GPU_BEHIND_BR03; + NvBool PDB_PROP_GPU_BEHIND_BR04; + NvBool PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED; + NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED; + NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED; + NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY; + NvBool PDB_PROP_GPU_RM_UNLINKED_SLI; + NvBool PDB_PROP_GPU_SLI_LINK_ACTIVE; + NvBool PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST; + NvBool PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH; + NvBool PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL; + NvBool PDB_PROP_GPU_IS_MOBILE; + NvBool PDB_PROP_GPU_RTD3_GC6_ACTIVE; + NvBool PDB_PROP_GPU_FAST_GC6_ACTIVE; + NvBool PDB_PROP_GPU_ACCOUNTING_ON; + NvBool PDB_PROP_GPU_INACCESSIBLE; + NvBool PDB_PROP_GPU_NVLINK_SYSMEM; + NvBool PDB_PROP_GPU_C2C_SYSMEM; + NvBool PDB_PROP_GPU_IN_TCC_MODE; + NvBool PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE; + NvBool PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K; + NvBool PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT; + NvBool PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT; + NvBool PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS; + NvBool PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU; + NvBool PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA; + NvBool PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA; + NvBool PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED; + NvBool PDB_PROP_GPU_NV_USERMODE_ENABLED; + NvBool PDB_PROP_GPU_IN_FATAL_ERROR; + OS_GPU_INFO *pOsGpuInfo; + OS_RM_CAPS *pOsRmCaps; + NvU32 halImpl; + void *hPci; + ENGINE_EVENT_LIST engineNonstallIntr[52]; + NvBool bIsSOC; + NvU32 gpuInstance; + NvU32 gpuDisabled; + NvU32 gpuId; + NvU32 boardId; + NvU32 deviceInstance; + NvU32 subdeviceInstance; + NvS32 numaNodeId; + _GPU_UUID gpuUuid; + _GPU_PCIE_PEER_CLIQUE pciePeerClique; + NvU32 i2cPortForExtdev; + GPUIDINFO idInfo; + _GPU_CHIP_INFO chipInfo; + GPUBUSINFO busInfo; + GPU_ENGINE_ORDER engineOrder; + GPUCLASSDB classDB; + NvU32 chipId0; + NvU32 chipId1; + NvU32 pmcEnable; + NvU32 pmcRmOwnsIntrMask; + NvBool testIntr; + NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *gspSupportedEngines; + NvU32 numCEs; + NvU32 ceFaultMethodBufferSize; + NvBool isVirtual; + NvBool isGspClient; + NvU64 fbLength; + NvU32 instLength; + NvBool instSetViaAttachArg; + NvU32 activeFBIOs; + NvU64 gpuVbiosPostTime; + NvBool bIsCeMapInitialized; + NvBool bIsKCeMapInitialized; + NvU32 uefiScanoutSurfaceSizeInMB; + RmPhysAddr dmaStartAddress; + NvU32 gpuDeviceMapCount; + DEVICE_MAPPING deviceMappings[60]; + PIO_APERTURE pIOApertures[12]; + DEVICE_MAPPING *pDeviceMappingsByDeviceInstance[12]; + void *gpuCfgAddr; + TIMEOUT_DATA timeoutData; + NvU32 computeModeRules; + NvS32 computeModeRefCount; + NvHandle hComputeModeReservation; + NvBool bIsDebugModeEnabled; + NvU32 masterFromSLIConfig; + NvU32 sliStatus; + PENG_INFO_LINK_NODE infoList; + struct OBJOS *pOS; + struct OBJHAL *pHal; + struct MemoryManager *pMemoryManager; + struct KernelDisplay *pKernelDisplay; + struct OBJTMR *pTmr; + struct OBJDCECLIENTRM *pDceclientrm; + HWBC_LIST *pHWBCList; + GPUCLDATA gpuClData; + _GPU_ENGINE_DB engineDB; + NvU32 engineDBSize; + NvU32 instCacheOverride; + NvS32 numOfMclkLockRequests; + NvU32 netlistNum; + RmCtrlDeferredCmd pRmCtrlDeferredCmd[2]; + ACPI_DATA acpi; + NvU32 activeFifoEventMthdNotifiers; + struct Falcon *constructedFalcons[60]; + NvU32 numConstructedFalcons; + struct GenericKernelFalcon *genericKernelFalcons[60]; + NvU32 numGenericKernelFalcons; + NvU8 *pUserRegisterAccessMap; + NvU8 *pUnrestrictedRegisterAccessMap; + NvU32 userRegisterAccessMapSize; + struct PrereqTracker *pPrereqTracker; + RegisterAccess registerAccess; + NvBool bUseRegisterAccessMap; + NvU32 *pRegopOffsetScratchBuffer; + NvU32 *pRegopOffsetAddrScratchBuffer; + NvU32 regopScratchBufferMaxOffsets; + _GPU_SRIOV_STATE sriovState; + NvU64 vmmuSegmentSize; + NvHandle hDefaultClientShare; + NvHandle hDefaultClientShareDevice; + NvHandle hDefaultClientShareSubDevice; + NvU32 externalKernelClientCount; + DEVICE_INFO2_TABLE *pDeviceInfoTable; + NvU32 numDeviceInfoEntries; + NvHandle hInternalClient; + NvHandle hInternalDevice; + NvHandle hInternalSubdevice; + struct Subdevice *pCachedSubdevice; + struct RsClient *pCachedRsClient; + RM_API physicalRmApi; + struct Subdevice **pSubdeviceBackReferences; + NvU32 numSubdeviceBackReferences; + NvU32 maxSubdeviceBackReferences; + NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo; + NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *boardInfo; + NvBool bBar2MovedByVtd; + NvBool bBar1Is64Bit; + NvBool bSurpriseRemovalSupported; + NvBool bTwoStageRcRecoveryEnabled; + NvBool bReplayableTraceEnabled; + NvBool bInD3Cold; + NvBool bIsSimulation; + NvBool bIsModsAmodel; + NvBool bIsFmodel; + NvBool bIsRtlsim; + NvBool bIsPassthru; + NvBool bIsVirtualWithSriov; + NvBool bStateLoading; + NvBool bStateUnloading; + NvBool bStateLoaded; + NvBool bFullyConstructed; + NvBool bUnifiedMemorySpaceEnabled; + NvBool bSriovEnabled; + NvBool bWarBug200577889SriovHeavyEnabled; + NvBool bCacheOnlyMode; + NvBool bNeed4kPageIsolation; + NvBool bSplitVasManagementServerClientRm; + NvU32 instLocOverrides; + NvU32 instLocOverrides2; + NvU32 instLocOverrides3; + NvU32 instLocOverrides4; + NvBool bInstLoc47bitPaWar; + NvU32 instVprOverrides; + NvU32 optimizeUseCaseOverride; + NvS16 fecsCtxswLogConsumerCount; + NvS16 videoCtxswLogConsumerCount; + struct OBJVASPACE *pFabricVAS; + NvBool bPipelinedPteMemEnabled; + NvBool bIsBarPteInSysmemSupported; + NvBool bRegUsesGlobalSurfaceOverrides; + NvBool bClientRmAllocatedCtxBuffer; + NvBool bIterativeMmuWalker; + NvBool bEccPageRetirementWithSliAllowed; + NvBool bVidmemPreservationBrokenBug3172217; + NvBool bInstanceMemoryAlwaysCached; + NvBool bRmProfilingPrivileged; + NvBool bGeforceSmb; + NvBool bIsGeforce; + NvBool bIsQuadro; + NvBool bIsVgx; + NvBool bIsNvidiaNvs; + NvBool bIsTitan; + NvBool bIsTesla; + BRANDING_TYPE brandingCache; + NvBool bComputePolicyTimesliceSupported; + NvBool bGlobalPoisonFuseEnabled; + RmPhysAddr simAccessBufPhysAddr; +}; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU; + +#define __staticCast_OBJGPU(pThis) \ + ((pThis)->__nvoc_pbase_OBJGPU) + +#ifdef __nvoc_gpu_h_disabled +#define __dynamicCast_OBJGPU(pThis) ((OBJGPU*)NULL) +#else //__nvoc_gpu_h_disabled +#define __dynamicCast_OBJGPU(pThis) \ + ((OBJGPU*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPU))) +#endif //__nvoc_gpu_h_disabled + +#define PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL_BASE_CAST +#define PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL_BASE_NAME PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL +#define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU_BASE_CAST +#define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU_BASE_NAME PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU +#define PDB_PROP_GPU_INACCESSIBLE_BASE_CAST +#define PDB_PROP_GPU_INACCESSIBLE_BASE_NAME PDB_PROP_GPU_INACCESSIBLE +#define PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH_BASE_CAST +#define PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH +#define PDB_PROP_GPU_IN_FATAL_ERROR_BASE_CAST +#define PDB_PROP_GPU_IN_FATAL_ERROR_BASE_NAME PDB_PROP_GPU_IN_FATAL_ERROR +#define PDB_PROP_GPU_IN_PM_RESUME_CODEPATH_BASE_CAST +#define PDB_PROP_GPU_IN_PM_RESUME_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_RESUME_CODEPATH +#define PDB_PROP_GPU_IN_STANDBY_BASE_CAST +#define PDB_PROP_GPU_IN_STANDBY_BASE_NAME PDB_PROP_GPU_IN_STANDBY +#define PDB_PROP_GPU_IS_COT_ENABLED_BASE_CAST +#define PDB_PROP_GPU_IS_COT_ENABLED_BASE_NAME PDB_PROP_GPU_IS_COT_ENABLED +#define PDB_PROP_GPU_COHERENT_CPU_MAPPING_BASE_CAST +#define PDB_PROP_GPU_COHERENT_CPU_MAPPING_BASE_NAME PDB_PROP_GPU_COHERENT_CPU_MAPPING +#define PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED_BASE_CAST +#define PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED +#define PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY_BASE_CAST +#define PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY_BASE_NAME PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY +#define PDB_PROP_GPU_SLI_LINK_ACTIVE_BASE_CAST +#define PDB_PROP_GPU_SLI_LINK_ACTIVE_BASE_NAME PDB_PROP_GPU_SLI_LINK_ACTIVE +#define PDB_PROP_GPU_IN_TCC_MODE_BASE_CAST +#define PDB_PROP_GPU_IN_TCC_MODE_BASE_NAME PDB_PROP_GPU_IN_TCC_MODE +#define PDB_PROP_GPU_C2C_SYSMEM_BASE_CAST +#define PDB_PROP_GPU_C2C_SYSMEM_BASE_NAME PDB_PROP_GPU_C2C_SYSMEM +#define PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING_BASE_CAST +#define PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING_BASE_NAME PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING +#define PDB_PROP_GPU_IN_GC6_RESET_BASE_CAST +#define PDB_PROP_GPU_IN_GC6_RESET_BASE_NAME PDB_PROP_GPU_IN_GC6_RESET +#define PDB_PROP_GPU_HYBRID_MGPU_BASE_CAST +#define PDB_PROP_GPU_HYBRID_MGPU_BASE_NAME PDB_PROP_GPU_HYBRID_MGPU +#define PDB_PROP_GPU_3D_CONTROLLER_BASE_CAST +#define PDB_PROP_GPU_3D_CONTROLLER_BASE_NAME PDB_PROP_GPU_3D_CONTROLLER +#define PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED_BASE_NAME PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED +#define PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE_BASE_CAST +#define PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE +#define PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED_BASE_NAME PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED +#define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_CAST +#define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_NAME PDB_PROP_GPU_RM_UNLINKED_SLI +#define PDB_PROP_GPU_IS_UEFI_BASE_CAST +#define PDB_PROP_GPU_IS_UEFI_BASE_NAME PDB_PROP_GPU_IS_UEFI +#define PDB_PROP_GPU_IN_SECONDARY_BUS_RESET_BASE_CAST +#define PDB_PROP_GPU_IN_SECONDARY_BUS_RESET_BASE_NAME PDB_PROP_GPU_IN_SECONDARY_BUS_RESET +#define PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT_BASE_CAST +#define PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT_BASE_NAME PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT +#define PDB_PROP_GPU_IS_CONNECTED_BASE_CAST +#define PDB_PROP_GPU_IS_CONNECTED_BASE_NAME PDB_PROP_GPU_IS_CONNECTED +#define PDB_PROP_GPU_IS_PLX_PRESENT_BASE_CAST +#define PDB_PROP_GPU_IS_PLX_PRESENT_BASE_NAME PDB_PROP_GPU_IS_PLX_PRESENT +#define PDB_PROP_GPU_NVLINK_SYSMEM_BASE_CAST +#define PDB_PROP_GPU_NVLINK_SYSMEM_BASE_NAME PDB_PROP_GPU_NVLINK_SYSMEM +#define PDB_PROP_GPU_IS_MOBILE_BASE_CAST +#define PDB_PROP_GPU_IS_MOBILE_BASE_NAME PDB_PROP_GPU_IS_MOBILE +#define PDB_PROP_GPU_RTD3_GC6_ACTIVE_BASE_CAST +#define PDB_PROP_GPU_RTD3_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_RTD3_GC6_ACTIVE +#define PDB_PROP_GPU_ALTERNATE_TREE_ENABLED_BASE_CAST +#define PDB_PROP_GPU_ALTERNATE_TREE_ENABLED_BASE_NAME PDB_PROP_GPU_ALTERNATE_TREE_ENABLED +#define PDB_PROP_GPU_PERSISTENT_SW_STATE_BASE_CAST +#define PDB_PROP_GPU_PERSISTENT_SW_STATE_BASE_NAME PDB_PROP_GPU_PERSISTENT_SW_STATE +#define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_CAST +#define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_CODEPATH +#define PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT_BASE_CAST +#define PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT_BASE_NAME PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_CAST +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED +#define PDB_PROP_GPU_BEHIND_BR03_BASE_CAST +#define PDB_PROP_GPU_BEHIND_BR03_BASE_NAME PDB_PROP_GPU_BEHIND_BR03 +#define PDB_PROP_GPU_BEHIND_BR04_BASE_CAST +#define PDB_PROP_GPU_BEHIND_BR04_BASE_NAME PDB_PROP_GPU_BEHIND_BR04 +#define PDB_PROP_GPU_MIG_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_MIG_SUPPORTED_BASE_NAME PDB_PROP_GPU_MIG_SUPPORTED +#define PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE_BASE_CAST +#define PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE_BASE_NAME PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE +#define PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE_BASE_CAST +#define PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE_BASE_NAME PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE +#define PDB_PROP_GPU_ACCOUNTING_ON_BASE_CAST +#define PDB_PROP_GPU_ACCOUNTING_ON_BASE_NAME PDB_PROP_GPU_ACCOUNTING_ON +#define PDB_PROP_GPU_IN_HIBERNATE_BASE_CAST +#define PDB_PROP_GPU_IN_HIBERNATE_BASE_NAME PDB_PROP_GPU_IN_HIBERNATE +#define PDB_PROP_GPU_BROKEN_FB_BASE_CAST +#define PDB_PROP_GPU_BROKEN_FB_BASE_NAME PDB_PROP_GPU_BROKEN_FB +#define PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT_BASE_CAST +#define PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT_BASE_NAME PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT +#define PDB_PROP_GPU_IN_TIMEOUT_RECOVERY_BASE_CAST +#define PDB_PROP_GPU_IN_TIMEOUT_RECOVERY_BASE_NAME PDB_PROP_GPU_IN_TIMEOUT_RECOVERY +#define PDB_PROP_GPU_FAST_GC6_ACTIVE_BASE_CAST +#define PDB_PROP_GPU_FAST_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_FAST_GC6_ACTIVE +#define PDB_PROP_GPU_IN_FULLCHIP_RESET_BASE_CAST +#define PDB_PROP_GPU_IN_FULLCHIP_RESET_BASE_NAME PDB_PROP_GPU_IN_FULLCHIP_RESET +#define PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA_BASE_CAST +#define PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA_BASE_NAME PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA +#define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA_BASE_CAST +#define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA_BASE_NAME PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA +#define PDB_PROP_GPU_IN_SLI_LINK_CODEPATH_BASE_CAST +#define PDB_PROP_GPU_IN_SLI_LINK_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_SLI_LINK_CODEPATH +#define PDB_PROP_GPU_IS_BR03_PRESENT_BASE_CAST +#define PDB_PROP_GPU_IS_BR03_PRESENT_BASE_NAME PDB_PROP_GPU_IS_BR03_PRESENT +#define PDB_PROP_GPU_IS_GEMINI_BASE_CAST +#define PDB_PROP_GPU_IS_GEMINI_BASE_NAME PDB_PROP_GPU_IS_GEMINI +#define PDB_PROP_GPU_STATE_INITIALIZED_BASE_CAST +#define PDB_PROP_GPU_STATE_INITIALIZED_BASE_NAME PDB_PROP_GPU_STATE_INITIALIZED +#define PDB_PROP_GPU_NV_USERMODE_ENABLED_BASE_CAST +#define PDB_PROP_GPU_NV_USERMODE_ENABLED_BASE_NAME PDB_PROP_GPU_NV_USERMODE_ENABLED +#define PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS_BASE_CAST +#define PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS_BASE_NAME PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS +#define PDB_PROP_GPU_IS_BR04_PRESENT_BASE_CAST +#define PDB_PROP_GPU_IS_BR04_PRESENT_BASE_NAME PDB_PROP_GPU_IS_BR04_PRESENT +#define PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM_BASE_CAST +#define PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM_BASE_NAME PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM +#define PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED_BASE_CAST +#define PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED_BASE_NAME PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED +#define PDB_PROP_GPU_ZERO_FB_BASE_CAST +#define PDB_PROP_GPU_ZERO_FB_BASE_NAME PDB_PROP_GPU_ZERO_FB +#define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_CAST +#define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_NAME PDB_PROP_GPU_SWRL_GRANULAR_LOCKING +#define PDB_PROP_GPU_TEGRA_SOC_IGPU_BASE_CAST +#define PDB_PROP_GPU_TEGRA_SOC_IGPU_BASE_NAME PDB_PROP_GPU_TEGRA_SOC_IGPU +#define PDB_PROP_GPU_ATS_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_ATS_SUPPORTED_BASE_NAME PDB_PROP_GPU_ATS_SUPPORTED +#define PDB_PROP_GPU_EMULATION_BASE_CAST +#define PDB_PROP_GPU_EMULATION_BASE_NAME PDB_PROP_GPU_EMULATION +#define PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS_BASE_CAST +#define PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS_BASE_NAME PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS +#define PDB_PROP_GPU_PRIMARY_DEVICE_BASE_CAST +#define PDB_PROP_GPU_PRIMARY_DEVICE_BASE_NAME PDB_PROP_GPU_PRIMARY_DEVICE +#define PDB_PROP_GPU_BEHIND_BRIDGE_BASE_CAST +#define PDB_PROP_GPU_BEHIND_BRIDGE_BASE_NAME PDB_PROP_GPU_BEHIND_BRIDGE +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY_BASE_CAST +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY +#define PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST_BASE_CAST +#define PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST_BASE_NAME PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST +#define PDB_PROP_GPU_IS_LOST_BASE_CAST +#define PDB_PROP_GPU_IS_LOST_BASE_NAME PDB_PROP_GPU_IS_LOST +#define PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K_BASE_CAST +#define PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K_BASE_NAME PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K + +NV_STATUS __nvoc_objCreateDynamic_OBJGPU(OBJGPU**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU**, Dynamic*, NvU32, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver, NvU32 arg_gpuInstance); +#define __objCreate_OBJGPU(ppNewObj, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver, arg_gpuInstance) \ + __nvoc_objCreate_OBJGPU((ppNewObj), staticCast((pParent), Dynamic), (createFlags), ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver, arg_gpuInstance) + +static inline NV_STATUS gpuConstructPhysical_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuConstructPhysical(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuConstructPhysical(pGpu) gpuConstructPhysical_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuConstructPhysical_HAL(pGpu) gpuConstructPhysical(pGpu) + +static inline void gpuDestructPhysical_b3696a(struct OBJGPU *pGpu) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestructPhysical(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestructPhysical(pGpu) gpuDestructPhysical_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDestructPhysical_HAL(pGpu) gpuDestructPhysical(pGpu) + +NV_STATUS gpuStatePreInit_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStatePreInit(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStatePreInit(pGpu) gpuStatePreInit_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuStatePreInit_HAL(pGpu) gpuStatePreInit(pGpu) + +NV_STATUS gpuStateLoad_IMPL(struct OBJGPU *pGpu, NvU32 arg0); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStateLoad(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStateLoad(pGpu, arg0) gpuStateLoad_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuStateLoad_HAL(pGpu, arg0) gpuStateLoad(pGpu, arg0) + +NV_STATUS gpuStateDestroy_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStateDestroy(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStateDestroy(pGpu) gpuStateDestroy_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuStateDestroy_HAL(pGpu) gpuStateDestroy(pGpu) + +static inline NV_STATUS gpuApplyOverrides_46f6a7(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuApplyOverrides(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuApplyOverrides(pGpu, arg0, arg1) gpuApplyOverrides_46f6a7(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +#define gpuApplyOverrides_HAL(pGpu, arg0, arg1) gpuApplyOverrides(pGpu, arg0, arg1) + +static inline NV_STATUS gpuInitDevinitOverridesFromRegistry_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitDevinitOverridesFromRegistry(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitDevinitOverridesFromRegistry(pGpu) gpuInitDevinitOverridesFromRegistry_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitDevinitOverridesFromRegistry_HAL(pGpu) gpuInitDevinitOverridesFromRegistry(pGpu) + +static inline NV_STATUS gpuApplyDevinitReg032Override_46f6a7(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuApplyDevinitReg032Override(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuApplyDevinitReg032Override(pGpu, arg0, arg1) gpuApplyDevinitReg032Override_46f6a7(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +#define gpuApplyDevinitReg032Override_HAL(pGpu, arg0, arg1) gpuApplyDevinitReg032Override(pGpu, arg0, arg1) + +static inline NV_STATUS gpuCheckPCIIDMismatch_56cd7a(struct OBJGPU *pGpu, struct OBJVBIOS *arg0) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuCheckPCIIDMismatch(struct OBJGPU *pGpu, struct OBJVBIOS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckPCIIDMismatch(pGpu, arg0) gpuCheckPCIIDMismatch_56cd7a(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuCheckPCIIDMismatch_HAL(pGpu, arg0) gpuCheckPCIIDMismatch(pGpu, arg0) + +static inline NvBool gpuCheckGpuIDMismatch_491d52(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuCheckGpuIDMismatch(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckGpuIDMismatch(pGpu, arg0, arg1) gpuCheckGpuIDMismatch_491d52(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +#define gpuCheckGpuIDMismatch_HAL(pGpu, arg0, arg1) gpuCheckGpuIDMismatch(pGpu, arg0, arg1) + +NV_STATUS gpuGetNameString_T234D(struct OBJGPU *pGpu, NvU32 arg0, void *arg1); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetNameString(struct OBJGPU *pGpu, NvU32 arg0, void *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetNameString(pGpu, arg0, arg1) gpuGetNameString_T234D(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetNameString_HAL(pGpu, arg0, arg1) gpuGetNameString(pGpu, arg0, arg1) + +NV_STATUS gpuGetShortNameString_T234D(struct OBJGPU *pGpu, NvU8 *arg0); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetShortNameString(struct OBJGPU *pGpu, NvU8 *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetShortNameString(pGpu, arg0) gpuGetShortNameString_T234D(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetShortNameString_HAL(pGpu, arg0) gpuGetShortNameString(pGpu, arg0) + +static inline void gpuDeterminePersistantIllumSettings_b3696a(struct OBJGPU *pGpu) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDeterminePersistantIllumSettings(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDeterminePersistantIllumSettings(pGpu) gpuDeterminePersistantIllumSettings_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDeterminePersistantIllumSettings_HAL(pGpu) gpuDeterminePersistantIllumSettings(pGpu) + +static inline NV_STATUS gpuInitSliIllumination_46f6a7(struct OBJGPU *pGpu) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitSliIllumination(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitSliIllumination(pGpu) gpuInitSliIllumination_46f6a7(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitSliIllumination_HAL(pGpu) gpuInitSliIllumination(pGpu) + +NV_STATUS gpuBuildGenericKernelFalconList_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuBuildGenericKernelFalconList(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuBuildGenericKernelFalconList(pGpu) gpuBuildGenericKernelFalconList_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuBuildGenericKernelFalconList_HAL(pGpu) gpuBuildGenericKernelFalconList(pGpu) + +void gpuDestroyGenericKernelFalconList_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestroyGenericKernelFalconList(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyGenericKernelFalconList(pGpu) gpuDestroyGenericKernelFalconList_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDestroyGenericKernelFalconList_HAL(pGpu) gpuDestroyGenericKernelFalconList(pGpu) + +struct GenericKernelFalcon *gpuGetGenericKernelFalconForEngine_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); + +#ifdef __nvoc_gpu_h_disabled +static inline struct GenericKernelFalcon *gpuGetGenericKernelFalconForEngine(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetGenericKernelFalconForEngine(pGpu, arg0) gpuGetGenericKernelFalconForEngine_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetGenericKernelFalconForEngine_HAL(pGpu, arg0) gpuGetGenericKernelFalconForEngine(pGpu, arg0) + +void gpuRegisterGenericKernelFalconIntrService_IMPL(struct OBJGPU *pGpu, void *pRecords); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuRegisterGenericKernelFalconIntrService(struct OBJGPU *pGpu, void *pRecords) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuRegisterGenericKernelFalconIntrService(pGpu, pRecords) gpuRegisterGenericKernelFalconIntrService_IMPL(pGpu, pRecords) +#endif //__nvoc_gpu_h_disabled + +#define gpuRegisterGenericKernelFalconIntrService_HAL(pGpu, pRecords) gpuRegisterGenericKernelFalconIntrService(pGpu, pRecords) + +static inline void gpuGetHwDefaults_b3696a(struct OBJGPU *pGpu) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuGetHwDefaults(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuGetHwDefaults(pGpu) gpuGetHwDefaults_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetHwDefaults_HAL(pGpu) gpuGetHwDefaults(pGpu) + +RmPhysAddr gpuGetDmaEndAddress_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline RmPhysAddr gpuGetDmaEndAddress(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + RmPhysAddr ret; + portMemSet(&ret, 0, sizeof(RmPhysAddr)); + return ret; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDmaEndAddress(pGpu) gpuGetDmaEndAddress_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetDmaEndAddress_HAL(pGpu) gpuGetDmaEndAddress(pGpu) + +static inline NV_STATUS gpuMarkDeviceForReset_46f6a7(struct OBJGPU *pGpu) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuMarkDeviceForReset(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuMarkDeviceForReset(pGpu) gpuMarkDeviceForReset_46f6a7(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuMarkDeviceForReset_HAL(pGpu) gpuMarkDeviceForReset(pGpu) + +static inline NV_STATUS gpuMarkDeviceForDrainAndReset_46f6a7(struct OBJGPU *pGpu) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuMarkDeviceForDrainAndReset(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuMarkDeviceForDrainAndReset(pGpu) gpuMarkDeviceForDrainAndReset_46f6a7(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuMarkDeviceForDrainAndReset_HAL(pGpu) gpuMarkDeviceForDrainAndReset(pGpu) + +static inline NvU32 gpuGetSliFingerPinsetMask_4a4dee(struct OBJGPU *pGpu) { + return 0; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetSliFingerPinsetMask(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetSliFingerPinsetMask(pGpu) gpuGetSliFingerPinsetMask_4a4dee(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetSliFingerPinsetMask_HAL(pGpu) gpuGetSliFingerPinsetMask(pGpu) + +static inline NV_STATUS gpuPrivSecInitRegistryOverrides_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPrivSecInitRegistryOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPrivSecInitRegistryOverrides(pGpu) gpuPrivSecInitRegistryOverrides_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuPrivSecInitRegistryOverrides_HAL(pGpu) gpuPrivSecInitRegistryOverrides(pGpu) + +static inline void gpuDestroyOverrides_b3696a(struct OBJGPU *pGpu) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestroyOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyOverrides(pGpu) gpuDestroyOverrides_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDestroyOverrides_HAL(pGpu) gpuDestroyOverrides(pGpu) + +static inline NV_STATUS gpuWriteBusConfigReg_46f6a7(struct OBJGPU *pGpu, NvU32 index, NvU32 value) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWriteBusConfigReg(struct OBJGPU *pGpu, NvU32 index, NvU32 value) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWriteBusConfigReg(pGpu, index, value) gpuWriteBusConfigReg_46f6a7(pGpu, index, value) +#endif //__nvoc_gpu_h_disabled + +#define gpuWriteBusConfigReg_HAL(pGpu, index, value) gpuWriteBusConfigReg(pGpu, index, value) + +static inline NV_STATUS gpuReadBusConfigReg_46f6a7(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadBusConfigReg(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadBusConfigReg(pGpu, index, data) gpuReadBusConfigReg_46f6a7(pGpu, index, data) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadBusConfigReg_HAL(pGpu, index, data) gpuReadBusConfigReg(pGpu, index, data) + +static inline NV_STATUS gpuReadBusConfigRegEx_46f6a7(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadBusConfigRegEx(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadBusConfigRegEx(pGpu, index, data, pThreadState) gpuReadBusConfigRegEx_46f6a7(pGpu, index, data, pThreadState) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadBusConfigRegEx_HAL(pGpu, index, data, pThreadState) gpuReadBusConfigRegEx(pGpu, index, data, pThreadState) + +static inline NV_STATUS gpuReadFunctionConfigReg_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadFunctionConfigReg(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadFunctionConfigReg(pGpu, function, reg, data) gpuReadFunctionConfigReg_5baef9(pGpu, function, reg, data) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadFunctionConfigReg_HAL(pGpu, function, reg, data) gpuReadFunctionConfigReg(pGpu, function, reg, data) + +static inline NV_STATUS gpuWriteFunctionConfigReg_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWriteFunctionConfigReg(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWriteFunctionConfigReg(pGpu, function, reg, data) gpuWriteFunctionConfigReg_5baef9(pGpu, function, reg, data) +#endif //__nvoc_gpu_h_disabled + +#define gpuWriteFunctionConfigReg_HAL(pGpu, function, reg, data) gpuWriteFunctionConfigReg(pGpu, function, reg, data) + +static inline NV_STATUS gpuWriteFunctionConfigRegEx_5baef9(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWriteFunctionConfigRegEx(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWriteFunctionConfigRegEx(pGpu, function, reg, data, pThreadState) gpuWriteFunctionConfigRegEx_5baef9(pGpu, function, reg, data, pThreadState) +#endif //__nvoc_gpu_h_disabled + +#define gpuWriteFunctionConfigRegEx_HAL(pGpu, function, reg, data, pThreadState) gpuWriteFunctionConfigRegEx(pGpu, function, reg, data, pThreadState) + +static inline NV_STATUS gpuSetPower_56cd7a(struct OBJGPU *pGpu, NvU32 arg1, NvU32 arg2, NvU32 arg3) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetPower(struct OBJGPU *pGpu, NvU32 arg1, NvU32 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetPower(pGpu, arg1, arg2, arg3) gpuSetPower_56cd7a(pGpu, arg1, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetPower_HAL(pGpu, arg1, arg2, arg3) gpuSetPower(pGpu, arg1, arg2, arg3) + +static inline void gpuGetIdInfo_b3696a(struct OBJGPU *pGpu) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuGetIdInfo(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuGetIdInfo(pGpu) gpuGetIdInfo_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetIdInfo_HAL(pGpu) gpuGetIdInfo(pGpu) + +static inline void gpuUpdateIdInfo_b3696a(struct OBJGPU *pGpu) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuUpdateIdInfo(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuUpdateIdInfo(pGpu) gpuUpdateIdInfo_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuUpdateIdInfo_HAL(pGpu) gpuUpdateIdInfo(pGpu) + +static inline NvU32 gpuGetDeviceIDList_4a4dee(struct OBJGPU *pGpu, DEVICE_ID_MAPPING **arg0) { + return 0; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetDeviceIDList(struct OBJGPU *pGpu, DEVICE_ID_MAPPING **arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDeviceIDList(pGpu, arg0) gpuGetDeviceIDList_4a4dee(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetDeviceIDList_HAL(pGpu, arg0) gpuGetDeviceIDList(pGpu, arg0) + +static inline NV_STATUS gpuGenGidData_46f6a7(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGenGidData(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGenGidData(pGpu, pGidData, gidSize, gidFlags) gpuGenGidData_46f6a7(pGpu, pGidData, gidSize, gidFlags) +#endif //__nvoc_gpu_h_disabled + +#define gpuGenGidData_HAL(pGpu, pGidData, gidSize, gidFlags) gpuGenGidData(pGpu, pGidData, gidSize, gidFlags) + +NvU8 gpuGetChipSubRev_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvU8 gpuGetChipSubRev(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetChipSubRev(pGpu) gpuGetChipSubRev_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetChipSubRev_HAL(pGpu) gpuGetChipSubRev(pGpu) + +NvU32 gpuGetEmulationRev1_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetEmulationRev1(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetEmulationRev1(pGpu) gpuGetEmulationRev1_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetEmulationRev1_HAL(pGpu) gpuGetEmulationRev1(pGpu) + +static inline NV_STATUS gpuPerformUniversalValidation_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPerformUniversalValidation(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPerformUniversalValidation(pGpu) gpuPerformUniversalValidation_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuPerformUniversalValidation_HAL(pGpu) gpuPerformUniversalValidation(pGpu) + +static inline NvU32 gpuGetVirtRegPhysOffset_4a4dee(struct OBJGPU *pGpu) { + return 0; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetVirtRegPhysOffset(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetVirtRegPhysOffset(pGpu) gpuGetVirtRegPhysOffset_4a4dee(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetVirtRegPhysOffset_HAL(pGpu) gpuGetVirtRegPhysOffset(pGpu) + +NV_STATUS gpuGetRegBaseOffset_FWCLIENT(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetRegBaseOffset(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetRegBaseOffset(pGpu, arg0, arg1) gpuGetRegBaseOffset_FWCLIENT(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetRegBaseOffset_HAL(pGpu, arg0, arg1) gpuGetRegBaseOffset(pGpu, arg0, arg1) + +static inline void gpuHandleSanityCheckRegReadError_b3696a(struct OBJGPU *pGpu, NvU32 addr, NvU32 value) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuHandleSanityCheckRegReadError(struct OBJGPU *pGpu, NvU32 addr, NvU32 value) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuHandleSanityCheckRegReadError(pGpu, addr, value) gpuHandleSanityCheckRegReadError_b3696a(pGpu, addr, value) +#endif //__nvoc_gpu_h_disabled + +#define gpuHandleSanityCheckRegReadError_HAL(pGpu, addr, value) gpuHandleSanityCheckRegReadError(pGpu, addr, value) + +static inline void gpuGetSanityCheckRegReadError_b3696a(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuGetSanityCheckRegReadError(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuGetSanityCheckRegReadError(pGpu, value, pErrorString) gpuGetSanityCheckRegReadError_b3696a(pGpu, value, pErrorString) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetSanityCheckRegReadError_HAL(pGpu, value, pErrorString) gpuGetSanityCheckRegReadError(pGpu, value, pErrorString) + +static inline NV_STATUS gpuSanityCheckVirtRegAccess_56cd7a(struct OBJGPU *pGpu, NvU32 arg0) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheckVirtRegAccess(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheckVirtRegAccess(pGpu, arg0) gpuSanityCheckVirtRegAccess_56cd7a(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuSanityCheckVirtRegAccess_HAL(pGpu, arg0) gpuSanityCheckVirtRegAccess(pGpu, arg0) + +NV_STATUS gpuInitRegistryOverrides_KERNEL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitRegistryOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitRegistryOverrides(pGpu) gpuInitRegistryOverrides_KERNEL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitRegistryOverrides_HAL(pGpu) gpuInitRegistryOverrides(pGpu) + +NV_STATUS gpuInitInstLocOverrides_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitInstLocOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitInstLocOverrides(pGpu) gpuInitInstLocOverrides_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitInstLocOverrides_HAL(pGpu) gpuInitInstLocOverrides(pGpu) + +const GPUCHILDORDER *gpuGetChildrenOrder_T234D(struct OBJGPU *pGpu, NvU32 *pNumEntries); + +#ifdef __nvoc_gpu_h_disabled +static inline const GPUCHILDORDER *gpuGetChildrenOrder(struct OBJGPU *pGpu, NvU32 *pNumEntries) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetChildrenOrder(pGpu, pNumEntries) gpuGetChildrenOrder_T234D(pGpu, pNumEntries) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetChildrenOrder_HAL(pGpu, pNumEntries) gpuGetChildrenOrder(pGpu, pNumEntries) + +const GPUCHILDPRESENT *gpuGetChildrenPresent_T234D(struct OBJGPU *pGpu, NvU32 *pNumEntries); + +#ifdef __nvoc_gpu_h_disabled +static inline const GPUCHILDPRESENT *gpuGetChildrenPresent(struct OBJGPU *pGpu, NvU32 *pNumEntries) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetChildrenPresent(pGpu, pNumEntries) gpuGetChildrenPresent_T234D(pGpu, pNumEntries) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetChildrenPresent_HAL(pGpu, pNumEntries) gpuGetChildrenPresent(pGpu, pNumEntries) + +const CLASSDESCRIPTOR *gpuGetClassDescriptorList_T234D(struct OBJGPU *pGpu, NvU32 *arg0); + +#ifdef __nvoc_gpu_h_disabled +static inline const CLASSDESCRIPTOR *gpuGetClassDescriptorList(struct OBJGPU *pGpu, NvU32 *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetClassDescriptorList(pGpu, arg0) gpuGetClassDescriptorList_T234D(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetClassDescriptorList_HAL(pGpu, arg0) gpuGetClassDescriptorList(pGpu, arg0) + +NvU32 gpuGetPhysAddrWidth_T234D(struct OBJGPU *pGpu, NV_ADDRESS_SPACE arg0); + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetPhysAddrWidth(struct OBJGPU *pGpu, NV_ADDRESS_SPACE arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetPhysAddrWidth(pGpu, arg0) gpuGetPhysAddrWidth_T234D(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetPhysAddrWidth_HAL(pGpu, arg0) gpuGetPhysAddrWidth(pGpu, arg0) + +static inline NV_STATUS gpuInitSriov_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitSriov(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitSriov(pGpu) gpuInitSriov_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitSriov_HAL(pGpu) gpuInitSriov(pGpu) + +static inline NV_STATUS gpuDeinitSriov_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeinitSriov(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeinitSriov(pGpu) gpuDeinitSriov_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDeinitSriov_HAL(pGpu) gpuDeinitSriov(pGpu) + +static inline NV_STATUS gpuCreateDefaultClientShare_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuCreateDefaultClientShare(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuCreateDefaultClientShare(pGpu) gpuCreateDefaultClientShare_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuCreateDefaultClientShare_HAL(pGpu) gpuCreateDefaultClientShare(pGpu) + +static inline void gpuDestroyDefaultClientShare_b3696a(struct OBJGPU *pGpu) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestroyDefaultClientShare(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyDefaultClientShare(pGpu) gpuDestroyDefaultClientShare_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDestroyDefaultClientShare_HAL(pGpu) gpuDestroyDefaultClientShare(pGpu) + +static inline NV_STATUS gpuSetCacheOnlyModeOverrides_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetCacheOnlyModeOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetCacheOnlyModeOverrides(pGpu) gpuSetCacheOnlyModeOverrides_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetCacheOnlyModeOverrides_HAL(pGpu) gpuSetCacheOnlyModeOverrides(pGpu) + +NV_STATUS gpuGetCeFaultMethodBufferSize_KERNEL(struct OBJGPU *arg0, NvU32 *arg1); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetCeFaultMethodBufferSize(struct OBJGPU *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetCeFaultMethodBufferSize(arg0, arg1) gpuGetCeFaultMethodBufferSize_KERNEL(arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetCeFaultMethodBufferSize_HAL(arg0, arg1) gpuGetCeFaultMethodBufferSize(arg0, arg1) + +static inline NV_STATUS gpuSetVFBarSizes_56cd7a(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg0) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetVFBarSizes(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetVFBarSizes(pGpu, arg0) gpuSetVFBarSizes_56cd7a(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetVFBarSizes_HAL(pGpu, arg0) gpuSetVFBarSizes(pGpu, arg0) + +static inline void gpuServiceInterruptsAllGpus(struct OBJGPU *pGpu) { + return; +} + +static inline PENGDESCRIPTOR gpuGetInitEngineDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.pEngineInitDescriptors; +} + +static inline PENGDESCRIPTOR gpuGetLoadEngineDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.pEngineLoadDescriptors; +} + +static inline PENGDESCRIPTOR gpuGetUnloadEngineDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.pEngineUnloadDescriptors; +} + +static inline PENGDESCRIPTOR gpuGetDestroyEngineDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.pEngineDestroyDescriptors; +} + +static inline NvU32 gpuGetNumEngDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.numEngineDescriptors; +} + +static inline NvU32 gpuGetMode(struct OBJGPU *pGpu) { + return pGpu->computeModeRefCount > 0 ? 2 : 1; +} + +static inline ACPI_DSM_FUNCTION gpuGetDispStatusHotplugFunc(struct OBJGPU *pGpu) { + return pGpu->acpi.dispStatusHotplugFunc; +} + +static inline ACPI_DSM_FUNCTION gpuGetDispStatusConfigFunc(struct OBJGPU *pGpu) { + return pGpu->acpi.dispStatusConfigFunc; +} + +static inline ACPI_DSM_FUNCTION gpuGetPerfPostPowerStateFunc(struct OBJGPU *pGpu) { + return pGpu->acpi.perfPostPowerStateFunc; +} + +static inline ACPI_DSM_FUNCTION gpuGetStereo3dStateActiveFunc(struct OBJGPU *pGpu) { + return pGpu->acpi.stereo3dStateActiveFunc; +} + +static inline NvU32 gpuGetPmcBoot0(struct OBJGPU *pGpu) { + return pGpu->chipId0; +} + +static inline NV_STATUS gpuGetSparseTextureComputeMode(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS gpuSetSparseTextureComputeMode(struct OBJGPU *pGpu, NvU32 arg0) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline struct OBJFIFO *gpuGetFifoShared(struct OBJGPU *pGpu) { + return ((void *)0); +} + +static inline struct KernelFifo *gpuGetKernelFifoShared(struct OBJGPU *pGpu) { + return ((void *)0); +} + +static inline ENGSTATE_ITER gpuGetEngstateIter(struct OBJGPU *pGpu) { + GPU_CHILD_ITER it = { 0 }; + return it; +} + +static inline struct OBJHOSTENG *gpuGetHosteng(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { + return ((void *)0); +} + +static inline NV_STATUS gpuConstructUserRegisterAccessMap(struct OBJGPU *pGpu) { + return NV_OK; +} + +static inline NV_STATUS gpuInitRegisterAccessMap(struct OBJGPU *pGpu, NvU8 *arg0, NvU32 arg1, const NvU8 *arg2, const NvU32 arg3) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS gpuSetUserRegisterAccessPermissions(struct OBJGPU *pGpu, NvU32 offset, NvU32 size, NvBool bAllow) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS gpuSetUserRegisterAccessPermissionsInBulk(struct OBJGPU *pGpu, const NvU32 *regOffsetsAndSizesArr, NvU32 arrSizeBytes, NvBool bAllow) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NvBool gpuGetUserRegisterAccessPermissions(struct OBJGPU *pGpu, NvU32 offset) { + return ((NvBool)(0 != 0)); +} + +static inline void gpuDumpCallbackRegister(struct OBJGPU *pGpu) { + return; +} + +static inline RmPhysAddr gpuGetDmaStartAddress(struct OBJGPU *pGpu) { + return pGpu->dmaStartAddress; +} + +static inline NV_STATUS gpuFreeEventHandle(struct OBJGPU *pGpu) { + return NV_OK; +} + +static inline NvU32 gpuGetChipMajRev(struct OBJGPU *pGpu) { + return pGpu->chipInfo.pmcBoot42.majorRev; +} + +static inline NvU32 gpuGetChipMinRev(struct OBJGPU *pGpu) { + return pGpu->chipInfo.pmcBoot42.minorRev; +} + +static inline NvU32 gpuGetChipImpl(struct OBJGPU *pGpu) { + return pGpu->chipInfo.implementationId; +} + +static inline NvU32 gpuGetChipArch(struct OBJGPU *pGpu) { + return pGpu->chipInfo.platformId; +} + +static inline NvU32 gpuGetChipMinExtRev(struct OBJGPU *pGpu) { + return pGpu->chipInfo.pmcBoot42.minorExtRev; +} + +static inline NvU64 gpuGetVmmuSegmentSize(struct OBJGPU *pGpu) { + return pGpu->vmmuSegmentSize; +} + +static inline const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *gpuGetChipInfo(struct OBJGPU *pGpu) { + return pGpu->pChipInfo; +} + +static inline NvBool gpuIsBar2MovedByVtd(struct OBJGPU *pGpu) { + return pGpu->bBar2MovedByVtd; +} + +static inline NvBool gpuIsBar1Size64Bit(struct OBJGPU *pGpu) { + return pGpu->bBar1Is64Bit; +} + +static inline NvBool gpuIsSurpriseRemovalSupported(struct OBJGPU *pGpu) { + return pGpu->bSurpriseRemovalSupported; +} + +static inline NvBool gpuIsReplayableTraceEnabled(struct OBJGPU *pGpu) { + return pGpu->bReplayableTraceEnabled; +} + +static inline NvBool gpuIsStateLoading(struct OBJGPU *pGpu) { + return pGpu->bStateLoading; +} + +static inline NvBool gpuIsStateUnloading(struct OBJGPU *pGpu) { + return pGpu->bStateUnloading; +} + +static inline NvBool gpuIsStateLoaded(struct OBJGPU *pGpu) { + return pGpu->bStateLoaded; +} + +static inline NvBool gpuIsFullyConstructed(struct OBJGPU *pGpu) { + return pGpu->bFullyConstructed; +} + +static inline NvBool gpuIsUnifiedMemorySpaceEnabled(struct OBJGPU *pGpu) { + return pGpu->bUnifiedMemorySpaceEnabled; +} + +static inline NvBool gpuIsSriovEnabled(struct OBJGPU *pGpu) { + return pGpu->bSriovEnabled; +} + +static inline NvBool gpuIsCacheOnlyModeEnabled(struct OBJGPU *pGpu) { + return pGpu->bCacheOnlyMode; +} + +static inline NvBool gpuIsSplitVasManagementServerClientRmEnabled(struct OBJGPU *pGpu) { + return pGpu->bSplitVasManagementServerClientRm; +} + +static inline NvBool gpuIsWarBug200577889SriovHeavyEnabled(struct OBJGPU *pGpu) { + return pGpu->bWarBug200577889SriovHeavyEnabled; +} + +static inline NvBool gpuIsPipelinedPteMemEnabled(struct OBJGPU *pGpu) { + return pGpu->bPipelinedPteMemEnabled; +} + +static inline NvBool gpuIsBarPteInSysmemSupported(struct OBJGPU *pGpu) { + return pGpu->bIsBarPteInSysmemSupported; +} + +static inline NvBool gpuIsRegUsesGlobalSurfaceOverridesEnabled(struct OBJGPU *pGpu) { + return pGpu->bRegUsesGlobalSurfaceOverrides; +} + +static inline NvBool gpuIsTwoStageRcRecoveryEnabled(struct OBJGPU *pGpu) { + return pGpu->bTwoStageRcRecoveryEnabled; +} + +static inline NvBool gpuIsInD3Cold(struct OBJGPU *pGpu) { + return pGpu->bInD3Cold; +} + +static inline NvBool gpuIsClientRmAllocatedCtxBufferEnabled(struct OBJGPU *pGpu) { + return pGpu->bClientRmAllocatedCtxBuffer; +} + +static inline NvBool gpuIsIterativeMmuWalkerEnabled(struct OBJGPU *pGpu) { + return pGpu->bIterativeMmuWalker; +} + +static inline NvBool gpuIsEccPageRetirementWithSliAllowed(struct OBJGPU *pGpu) { + return pGpu->bEccPageRetirementWithSliAllowed; +} + +static inline NvBool gpuIsVidmemPreservationBrokenBug3172217(struct OBJGPU *pGpu) { + return pGpu->bVidmemPreservationBrokenBug3172217; +} + +static inline NvBool gpuIsInstanceMemoryAlwaysCached(struct OBJGPU *pGpu) { + return pGpu->bInstanceMemoryAlwaysCached; +} + +static inline NvBool gpuIsRmProfilingPrivileged(struct OBJGPU *pGpu) { + return pGpu->bRmProfilingPrivileged; +} + +static inline NvBool gpuIsGeforceSmb(struct OBJGPU *pGpu) { + return pGpu->bGeforceSmb; +} + +static inline NvBool gpuIsGeforceBranded(struct OBJGPU *pGpu) { + return pGpu->bIsGeforce; +} + +static inline NvBool gpuIsQuadroBranded(struct OBJGPU *pGpu) { + return pGpu->bIsQuadro; +} + +static inline NvBool gpuIsVgxBranded(struct OBJGPU *pGpu) { + return pGpu->bIsVgx; +} + +static inline NvBool gpuIsNvidiaNvsBranded(struct OBJGPU *pGpu) { + return pGpu->bIsNvidiaNvs; +} + +static inline NvBool gpuIsTitanBranded(struct OBJGPU *pGpu) { + return pGpu->bIsTitan; +} + +static inline NvBool gpuIsTeslaBranded(struct OBJGPU *pGpu) { + return pGpu->bIsTesla; +} + +static inline NvBool gpuIsComputePolicyTimesliceSupported(struct OBJGPU *pGpu) { + return pGpu->bComputePolicyTimesliceSupported; +} + +NV_STATUS gpuConstruct_IMPL(struct OBJGPU *arg_pGpu, NvU32 arg_gpuInstance); +#define __nvoc_gpuConstruct(arg_pGpu, arg_gpuInstance) gpuConstruct_IMPL(arg_pGpu, arg_gpuInstance) +NV_STATUS gpuBindHalLegacy_IMPL(struct OBJGPU *pGpu, NvU32 chipId0, NvU32 chipId1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuBindHalLegacy(struct OBJGPU *pGpu, NvU32 chipId0, NvU32 chipId1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuBindHalLegacy(pGpu, chipId0, chipId1) gpuBindHalLegacy_IMPL(pGpu, chipId0, chipId1) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuPostConstruct_IMPL(struct OBJGPU *pGpu, GPUATTACHARG *arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPostConstruct(struct OBJGPU *pGpu, GPUATTACHARG *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPostConstruct(pGpu, arg0) gpuPostConstruct_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuCreateObject_IMPL(struct OBJGPU *pGpu, NVOC_CLASS_ID arg0, NvU32 arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuCreateObject(struct OBJGPU *pGpu, NVOC_CLASS_ID arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuCreateObject(pGpu, arg0, arg1) gpuCreateObject_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +void gpuDestruct_IMPL(struct OBJGPU *pGpu); +#define __nvoc_gpuDestruct(pGpu) gpuDestruct_IMPL(pGpu) +NV_STATUS gpuStateInit_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStateInit(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStateInit(pGpu) gpuStateInit_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuStateUnload_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStateUnload(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStateUnload(pGpu, arg0) gpuStateUnload_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuInitDispIpHal_IMPL(struct OBJGPU *pGpu, NvU32 ipver); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitDispIpHal(struct OBJGPU *pGpu, NvU32 ipver) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitDispIpHal(pGpu, ipver) gpuInitDispIpHal_IMPL(pGpu, ipver) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsImplementation_IMPL(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsImplementation(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsImplementation(pGpu, arg0, arg1, arg2) gpuIsImplementation_IMPL(pGpu, arg0, arg1, arg2) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsImplementationOrBetter_IMPL(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsImplementationOrBetter(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsImplementationOrBetter(pGpu, arg0, arg1, arg2) gpuIsImplementationOrBetter_IMPL(pGpu, arg0, arg1, arg2) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsGpuFullPower_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsGpuFullPower(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsGpuFullPower(pGpu) gpuIsGpuFullPower_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsGpuFullPowerForPmResume_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsGpuFullPowerForPmResume(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsGpuFullPowerForPmResume(pGpu) gpuIsGpuFullPowerForPmResume_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuBuildClassDB_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuBuildClassDB(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuBuildClassDB(pGpu) gpuBuildClassDB_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDestroyClassDB_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDestroyClassDB(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyClassDB(pGpu) gpuDestroyClassDB_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteEngineFromClassDB_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteEngineFromClassDB(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteEngineFromClassDB(pGpu, arg0) gpuDeleteEngineFromClassDB_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteEngineOnPreInit_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteEngineOnPreInit(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteEngineOnPreInit(pGpu, arg0) gpuDeleteEngineOnPreInit_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuAddClassToClassDBByEngTag_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuAddClassToClassDBByEngTag(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuAddClassToClassDBByEngTag(pGpu, arg0) gpuAddClassToClassDBByEngTag_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuAddClassToClassDBByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuAddClassToClassDBByClassId(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuAddClassToClassDBByClassId(pGpu, arg0) gpuAddClassToClassDBByClassId_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuAddClassToClassDBByEngTagClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuAddClassToClassDBByEngTagClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuAddClassToClassDBByEngTagClassId(pGpu, arg0, arg1) gpuAddClassToClassDBByEngTagClassId_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteClassFromClassDBByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteClassFromClassDBByClassId(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteClassFromClassDBByClassId(pGpu, arg0) gpuDeleteClassFromClassDBByClassId_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteClassFromClassDBByEngTag_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteClassFromClassDBByEngTag(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteClassFromClassDBByEngTag(pGpu, arg0) gpuDeleteClassFromClassDBByEngTag_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteClassFromClassDBByEngTagClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteClassFromClassDBByEngTagClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteClassFromClassDBByEngTagClassId(pGpu, arg0, arg1) gpuDeleteClassFromClassDBByEngTagClassId_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsClassSupported_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsClassSupported(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsClassSupported(pGpu, arg0) gpuIsClassSupported_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetClassByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, PCLASSDESCRIPTOR *arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetClassByClassId(struct OBJGPU *pGpu, NvU32 arg0, PCLASSDESCRIPTOR *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetClassByClassId(pGpu, arg0, arg1) gpuGetClassByClassId_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetClassByEngineAndClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1, PCLASSDESCRIPTOR *arg2); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetClassByEngineAndClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1, PCLASSDESCRIPTOR *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetClassByEngineAndClassId(pGpu, arg0, arg1, arg2) gpuGetClassByEngineAndClassId_IMPL(pGpu, arg0, arg1, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetClassList_IMPL(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 arg2); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetClassList(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetClassList(pGpu, arg0, arg1, arg2) gpuGetClassList_IMPL(pGpu, arg0, arg1, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuConstructEngineTable_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuConstructEngineTable(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuConstructEngineTable(pGpu) gpuConstructEngineTable_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +void gpuDestroyEngineTable_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestroyEngineTable(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyEngineTable(pGpu) gpuDestroyEngineTable_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuUpdateEngineTable_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuUpdateEngineTable(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuUpdateEngineTable(pGpu) gpuUpdateEngineTable_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuCheckEngineTable_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuCheckEngineTable(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckEngineTable(pGpu, arg0) gpuCheckEngineTable_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuXlateEngDescToClientEngineId_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0, NvU32 *arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuXlateEngDescToClientEngineId(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuXlateEngDescToClientEngineId(pGpu, arg0, arg1) gpuXlateEngDescToClientEngineId_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuXlateClientEngineIdToEngDesc_IMPL(struct OBJGPU *pGpu, NvU32 arg0, ENGDESCRIPTOR *arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuXlateClientEngineIdToEngDesc(struct OBJGPU *pGpu, NvU32 arg0, ENGDESCRIPTOR *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuXlateClientEngineIdToEngDesc(pGpu, arg0, arg1) gpuXlateClientEngineIdToEngDesc_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetFlcnFromClientEngineId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetFlcnFromClientEngineId(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetFlcnFromClientEngineId(pGpu, arg0, arg1) gpuGetFlcnFromClientEngineId_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsEngDescSupported_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsEngDescSupported(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsEngDescSupported(pGpu, arg0) gpuIsEngDescSupported_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuReadBusConfigCycle_IMPL(struct OBJGPU *pGpu, NvU32 index, NvU32 *pData); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadBusConfigCycle(struct OBJGPU *pGpu, NvU32 index, NvU32 *pData) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadBusConfigCycle(pGpu, index, pData) gpuReadBusConfigCycle_IMPL(pGpu, index, pData) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuWriteBusConfigCycle_IMPL(struct OBJGPU *pGpu, NvU32 index, NvU32 value); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWriteBusConfigCycle(struct OBJGPU *pGpu, NvU32 index, NvU32 value) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWriteBusConfigCycle(pGpu, index, value) gpuWriteBusConfigCycle_IMPL(pGpu, index, value) +#endif //__nvoc_gpu_h_disabled + +NvU32 gpuGetGpuMask_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetGpuMask(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetGpuMask(pGpu) gpuGetGpuMask_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +void gpuChangeComputeModeRefCount_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuChangeComputeModeRefCount(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuChangeComputeModeRefCount(pGpu, arg0) gpuChangeComputeModeRefCount_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuEnterShutdown_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuEnterShutdown(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuEnterShutdown(pGpu) gpuEnterShutdown_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSanityCheck_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheck(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheck(pGpu, arg0, arg1) gpuSanityCheck_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +DEVICE_MAPPING *gpuGetDeviceMapping_IMPL(struct OBJGPU *pGpu, DEVICE_INDEX arg0, NvU32 arg1); +#ifdef __nvoc_gpu_h_disabled +static inline DEVICE_MAPPING *gpuGetDeviceMapping(struct OBJGPU *pGpu, DEVICE_INDEX arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDeviceMapping(pGpu, arg0, arg1) gpuGetDeviceMapping_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +DEVICE_MAPPING *gpuGetDeviceMappingFromDeviceID_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1); +#ifdef __nvoc_gpu_h_disabled +static inline DEVICE_MAPPING *gpuGetDeviceMappingFromDeviceID(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDeviceMappingFromDeviceID(pGpu, arg0, arg1) gpuGetDeviceMappingFromDeviceID_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetGidInfo_IMPL(struct OBJGPU *pGpu, NvU8 **ppGidString, NvU32 *pGidStrlen, NvU32 gidFlags); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetGidInfo(struct OBJGPU *pGpu, NvU8 **ppGidString, NvU32 *pGidStrlen, NvU32 gidFlags) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetGidInfo(pGpu, ppGidString, pGidStrlen, gidFlags) gpuGetGidInfo_IMPL(pGpu, ppGidString, pGidStrlen, gidFlags) +#endif //__nvoc_gpu_h_disabled + +void gpuSetThreadBcState_IMPL(struct OBJGPU *pGpu, NvBool arg0); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuSetThreadBcState(struct OBJGPU *pGpu, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuSetThreadBcState(pGpu, arg0) gpuSetThreadBcState_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +void gpuSetDisconnectedProperties_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuSetDisconnectedProperties(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuSetDisconnectedProperties(pGpu) gpuSetDisconnectedProperties_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuAddConstructedFalcon_IMPL(struct OBJGPU *pGpu, struct Falcon *arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuAddConstructedFalcon(struct OBJGPU *pGpu, struct Falcon *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuAddConstructedFalcon(pGpu, arg0) gpuAddConstructedFalcon_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuRemoveConstructedFalcon_IMPL(struct OBJGPU *pGpu, struct Falcon *arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuRemoveConstructedFalcon(struct OBJGPU *pGpu, struct Falcon *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuRemoveConstructedFalcon(pGpu, arg0) gpuRemoveConstructedFalcon_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetConstructedFalcon_IMPL(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetConstructedFalcon(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetConstructedFalcon(pGpu, arg0, arg1) gpuGetConstructedFalcon_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +struct OBJENGSTATE *gpuGetEngstate_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); +#ifdef __nvoc_gpu_h_disabled +static inline struct OBJENGSTATE *gpuGetEngstate(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetEngstate(pGpu, arg0) gpuGetEngstate_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +struct OBJENGSTATE *gpuGetEngstateNoShare_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); +#ifdef __nvoc_gpu_h_disabled +static inline struct OBJENGSTATE *gpuGetEngstateNoShare(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetEngstateNoShare(pGpu, arg0) gpuGetEngstateNoShare_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuGetNextEngstate_IMPL(struct OBJGPU *pGpu, ENGSTATE_ITER *pIt, struct OBJENGSTATE **ppEngState); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuGetNextEngstate(struct OBJGPU *pGpu, ENGSTATE_ITER *pIt, struct OBJENGSTATE **ppEngState) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetNextEngstate(pGpu, pIt, ppEngState) gpuGetNextEngstate_IMPL(pGpu, pIt, ppEngState) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSanityCheckGfid_IMPL(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheckGfid(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheckGfid(pGpu, gfid, bInUse) gpuSanityCheckGfid_IMPL(pGpu, gfid, bInUse) +#endif //__nvoc_gpu_h_disabled + +void gpuSetGfidUsage_IMPL(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuSetGfidUsage(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuSetGfidUsage(pGpu, gfid, bInUse) gpuSetGfidUsage_IMPL(pGpu, gfid, bInUse) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSetExternalKernelClientCount_IMPL(struct OBJGPU *pGpu, NvBool bIncr); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetExternalKernelClientCount(struct OBJGPU *pGpu, NvBool bIncr) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetExternalKernelClientCount(pGpu, bIncr) gpuSetExternalKernelClientCount_IMPL(pGpu, bIncr) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsInUse_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsInUse(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsInUse(pGpu) gpuIsInUse_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvU32 gpuGetUserClientCount_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetUserClientCount(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetUserClientCount(pGpu) gpuGetUserClientCount_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvU32 gpuGetExternalClientCount_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetExternalClientCount(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetExternalClientCount(pGpu) gpuGetExternalClientCount_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +void gpuNotifySubDeviceEvent_IMPL(struct OBJGPU *pGpu, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuNotifySubDeviceEvent(struct OBJGPU *pGpu, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuNotifySubDeviceEvent(pGpu, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) gpuNotifySubDeviceEvent_IMPL(pGpu, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuRegisterSubdevice_IMPL(struct OBJGPU *pGpu, struct Subdevice *pSubdevice); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuRegisterSubdevice(struct OBJGPU *pGpu, struct Subdevice *pSubdevice) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuRegisterSubdevice(pGpu, pSubdevice) gpuRegisterSubdevice_IMPL(pGpu, pSubdevice) +#endif //__nvoc_gpu_h_disabled + +void gpuUnregisterSubdevice_IMPL(struct OBJGPU *pGpu, struct Subdevice *pSubdevice); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuUnregisterSubdevice(struct OBJGPU *pGpu, struct Subdevice *pSubdevice) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuUnregisterSubdevice(pGpu, pSubdevice) gpuUnregisterSubdevice_IMPL(pGpu, pSubdevice) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetProcWithObject_IMPL(struct OBJGPU *pGpu, NvU32 elementID, NvU32 internalClassId, NvU32 *pPidArray, NvU32 *pPidArrayCount, MIG_INSTANCE_REF *pRef); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetProcWithObject(struct OBJGPU *pGpu, NvU32 elementID, NvU32 internalClassId, NvU32 *pPidArray, NvU32 *pPidArrayCount, MIG_INSTANCE_REF *pRef) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetProcWithObject(pGpu, elementID, internalClassId, pPidArray, pPidArrayCount, pRef) gpuGetProcWithObject_IMPL(pGpu, elementID, internalClassId, pPidArray, pPidArrayCount, pRef) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuFindClientInfoWithPidIterator_IMPL(struct OBJGPU *pGpu, NvU32 pid, NvU32 subPid, NvU32 internalClassId, NV2080_CTRL_GPU_PID_INFO_DATA *pData, NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, MIG_INSTANCE_REF *pRef, NvBool bGlobalInfo); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuFindClientInfoWithPidIterator(struct OBJGPU *pGpu, NvU32 pid, NvU32 subPid, NvU32 internalClassId, NV2080_CTRL_GPU_PID_INFO_DATA *pData, NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, MIG_INSTANCE_REF *pRef, NvBool bGlobalInfo) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuFindClientInfoWithPidIterator(pGpu, pid, subPid, internalClassId, pData, pSmcInfo, pRef, bGlobalInfo) gpuFindClientInfoWithPidIterator_IMPL(pGpu, pid, subPid, internalClassId, pData, pSmcInfo, pRef, bGlobalInfo) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuCheckSysmemAccess_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuCheckSysmemAccess(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckSysmemAccess(pGpu) gpuCheckSysmemAccess_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +void gpuInitChipInfo_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuInitChipInfo(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuInitChipInfo(pGpu) gpuInitChipInfo_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSanityCheckRegRead_IMPL(struct OBJGPU *pGpu, NvU32 addr, NvU32 size, void *pValue); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheckRegRead(struct OBJGPU *pGpu, NvU32 addr, NvU32 size, void *pValue) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheckRegRead(pGpu, addr, size, pValue) gpuSanityCheckRegRead_IMPL(pGpu, addr, size, pValue) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSanityCheckRegisterAccess_IMPL(struct OBJGPU *pGpu, NvU32 addr, NvU32 *pRetVal); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheckRegisterAccess(struct OBJGPU *pGpu, NvU32 addr, NvU32 *pRetVal) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheckRegisterAccess(pGpu, addr, pRetVal) gpuSanityCheckRegisterAccess_IMPL(pGpu, addr, pRetVal) +#endif //__nvoc_gpu_h_disabled + +#undef PRIVATE_FIELD + + +// Look up pGpu associated with a pResourceRef +NV_STATUS gpuGetByRef (RsResourceRef *pContextRef, NvBool *pbBroadcast, struct OBJGPU **ppGpu); + +// Look up pGpu associated with a hResource +NV_STATUS gpuGetByHandle(struct RsClient *pClient, NvHandle hResource, NvBool *pbBroadcast, struct OBJGPU **ppGpu); + +// Checks if an SR-IOV GFID is in use +#define GPU_IS_SRIOV_GFID_IN_USE(gfid) ((gpuSanityCheckGfid(pGpu, gfid, NV_TRUE) == NV_ERR_IN_USE) ? NV_TRUE : NV_FALSE) + +#define GPU_GFID_PF (0) +#define IS_GFID_PF(gfid) ((gfid) == GPU_GFID_PF) +#define IS_GFID_VF(gfid) ((gfid) != GPU_GFID_PF) +// Invalid P2P GFID +#define INVALID_P2P_GFID (0xFFFFFFFF) + +// +// Generates GPU child accessor macros (i.e.: GPU_GET_{ENG}) +// +#define GPU_CHILD_SINGLE_INST(className, accessorName, numInstances, bConstructEarly, bAlwaysCreate, gpuField) \ + static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu) { return pGpu->gpuField; } \ + ct_assert(numInstances == 1); + +#define GPU_CHILD_MULTI_INST(className, accessorName, numInstances, bConstructEarly, bAlwaysCreate, gpuField) \ + static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu, NvU32 index) { return index < numInstances ? pGpu->gpuField[index] : NULL; } + +#include "gpu/gpu_child_list.h" + +static NV_FORCEINLINE struct Graphics *GPU_GET_GR(struct OBJGPU *pGpu) { return NULL; } + +// Temporary stubs +#if RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS +#define GPU_CHILD_LIST_DISABLED_ONLY +#define GPU_CHILD_SINGLE_INST(className, accessorName, numInstances, bConstructEarly, bAlwaysCreate, gpuField) \ + static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu) { return NULL; } + +#define GPU_CHILD_MULTI_INST(className, accessorName, numInstances, bConstructEarly, bAlwaysCreate, gpuField) \ + static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu, NvU32 index) { return NULL; } + +#include "gpu/gpu_child_list.h" +#endif // RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS + + +// +// Inline functions +// + +// +// This function returns subdevice mask for a GPU. +// For non SLI, subdeviceInstance is 0, so this +// function will always return 1. +// + +static NV_INLINE NvU32 +gpuGetSubdeviceMask +( + struct OBJGPU *pGpu +) +{ + return 1 << pGpu->subdeviceInstance; +} + +static NV_INLINE NvU32 +gpuGetInstance +( + struct OBJGPU *pGpu +) +{ + return pGpu->gpuInstance; +} + +static NV_INLINE NvU32 +gpuGetDeviceInstance +( + struct OBJGPU *pGpu +) +{ + return pGpu->deviceInstance; +} + +NV_INLINE +static NvU32 gpuGetNumCEs(struct OBJGPU *pGpu) +{ + return pGpu->numCEs; +} + +// +// Per GPU mode flags macros. In general these macros should not be +// used and all code paths should be the same on all environments. +// However occasionally a tweak is needed to work around a limitation +// or improve speed on non-hardware. Is_RTLSIM normally is handled +// in the IS_SIMULATION case and should almost never be used. +// +// IS_EMULATION actual emulation hardware +// IS_SIMULATION fmodel or RTL simulation +// IS_MODS_AMODEL amodel under mods for trace player +// IS_LIVE_AMODEL amodel under windows for 3D drivers (removed) +// IS_RTLSIM RTL simulation +// IS_SILICON Real hardware +// IS_VIRTUAL RM is running within a guest VM +// IS_GSP_CLIENT RM is a GSP/DCE client with GPU support offloaded to GSP/DCE +// IS_FW_CLIENT RM is a firmware client with GPU support offloaded microprocessor +// + +#define IS_EMULATION(pGpu) ((pGpu)->getProperty((pGpu), PDB_PROP_GPU_EMULATION)) +#define IS_SIMULATION(pGpu) (pGpu->bIsSimulation) +#define IS_MODS_AMODEL(pGpu) (pGpu->bIsModsAmodel) +#define IS_FMODEL(pGpu) (pGpu->bIsFmodel) +#define IS_RTLSIM(pGpu) (pGpu->bIsRtlsim) +#define IS_SILICON(pGpu) (!(IS_EMULATION(pGpu) || IS_SIMULATION(pGpu))) +#define IS_PASSTHRU(pGpu) ((pGpu)->bIsPassthru) +#define IS_GSP_CLIENT(pGpu) ((RMCFG_FEATURE_GSP_CLIENT_RM || RMCFG_FEATURE_DCE_CLIENT_RM) && (pGpu)->isGspClient) +#define IS_FW_CLIENT(pGpu) IS_GSP_CLIENT(pGpu) // TODO to be removed +#define IS_VIRTUAL(pGpu) NV_FALSE +#define IS_VIRTUAL_WITH_SRIOV(pGpu) NV_FALSE +#define IS_VIRTUAL_WITH_HEAVY_SRIOV(pGpu) NV_FALSE +#define IS_VIRTUAL_WITH_FULL_SRIOV(pGpu) NV_FALSE +#define IS_VIRTUAL_WITHOUT_SRIOV(pGpu) NV_FALSE +#define IS_SRIOV_HEAVY(pGpu) NV_FALSE +#define IS_SRIOV_HEAVY_GUEST(pGpu) NV_FALSE +#define IS_SRIOV_FULL_GUEST(pGpu) NV_FALSE +#define IS_SRIOV_HEAVY_HOST(pGpu) NV_FALSE +#define IS_SRIOV_FULL_HOST(pGpu) NV_FALSE +#define IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) NV_FALSE + +extern GPU_CHILD_ITER gpuGetPossibleEngDescriptorIter(void); +extern NvBool gpuGetNextPossibleEngDescriptor(GPU_CHILD_ITER *pIt, ENGDESCRIPTOR *pEngDesc); + +NV_STATUS gpuCtrlExecRegOps(struct OBJGPU *, struct Graphics *, NvHandle, NvHandle, NV2080_CTRL_GPU_REG_OP *, NvU32, NvBool); +NV_STATUS gpuValidateRegOps(struct OBJGPU *, NV2080_CTRL_GPU_REG_OP *, NvU32, NvBool, NvBool); + +// GPU Sanity Check Flags +#define GPU_SANITY_CHECK_FLAGS_BOOT_0 NVBIT(0) +#define GPU_SANITY_CHECK_FLAGS_OFF_BY_N NVBIT(1) +#define GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH NVBIT(2) +#define GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED NVBIT(3) +#define GPU_SANITY_CHECK_FLAGS_FB NVBIT(4) + +#define GPU_SANITY_CHECK_FLAGS_NONE 0x0 +#define GPU_SANITY_CHECK_FLAGS_ALL 0xffffffff + +// +// Macro for checking if GPU is in reset. +// +#define API_GPU_IN_RESET_SANITY_CHECK(pGpu) \ + ((NULL == pGpu) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING)) + +// +// Marco for checking if GPU is still connected. +// +#define API_GPU_ATTACHED_SANITY_CHECK(pGpu) \ + ((NULL != pGpu) && \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET)) + +// +// Macro for checking if GPU has Full Sanity +// +#define FULL_GPU_SANITY_CHECK(pGpu) \ + ((NULL != pGpu) && \ + gpuIsGpuFullPower(pGpu) && \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST) && \ + gpuCheckSysmemAccess(pGpu)) + +// +// Macro for checking if GPU has Full Sanity +// +#define FULL_GPU_SANITY_FOR_PM_RESUME(pGpu) \ + ((NULL != pGpu) && \ + gpuIsGpuFullPowerForPmResume(pGpu) && \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST)) + +// +// Macro for checking if GPU is in the recovery path +// +#define API_GPU_IN_RECOVERY_SANITY_CHECK(pGpu) \ + ((NULL == pGpu) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TIMEOUT_RECOVERY)) + +// +// Identifiers for gpuGetRegBaseOffset HAL interface. +// +#define NV_REG_BASE_GR (0x00000001) +#define NV_REG_BASE_PM (0x00000002) +#define NV_REG_BASE_TIMER (0x00000003) +#define NV_REG_BASE_DFD (0x00000004) +#define NV_REG_BASE_FLUSH (0x00000005) +#define NV_REG_BASE_LTCG (0x00000006) +#define NV_REG_BASE_TOP (0x00000007) +#define NV_REG_BASE_MASTER (0x0000000A) +#define NV_REG_BASE_USERMODE (0x0000000B) +#define NV_REG_BASE_LAST NV_REG_BASE_USERMODE +ct_assert(NV_REG_BASE_LAST < NV2080_CTRL_INTERNAL_GET_CHIP_INFO_REG_BASE_MAX); + +// Macros for CPU family information +#define NV_CPU_FAMILY 3:0 +#define NV_CPU_EXTENDED_FAMILY 11:4 + +// Macros for CPU model information +#define NV_CPU_MODEL 3:0 +#define NV_CPU_EXTENDED_MODEL 7:4 + +// Macros for AMD CPU information +#define NV_CPU_ID_AMD_FAMILY 0xF +#define NV_CPU_ID_AMD_EXTENDED_FAMILY 0xA +#define NV_CPU_ID_AMD_MODEL 0x0 +#define NV_CPU_ID_AMD_EXTENDED_MODEL 0x4 + +// Macros for Intel CPU information +#define NV_CPU_ID_INTEL_FAMILY 0x6 +#define NV_CPU_ID_INTEL_EXTENDED_FAMILY 0x0 +#define NV_CPU_ID_INTEL_CORE_S_MODEL 0x7 +#define NV_CPU_ID_INTEL_CORE_P_MODEL 0xA +#define NV_CPU_ID_INTEL_EXTENDED_MODEL 0x9 + +#define GPU_READ_PRI_ERROR_MASK 0xFFF00000 +#define GPU_READ_PRI_ERROR_CODE 0xBAD00000 + +// +// Define for invalid register value. GPU could have fallen off the bus or +// the GPU could be in reset. +// +#define GPU_REG_VALUE_INVALID 0xFFFFFFFF + +// +// Hal InfoBlock access interface +// +#define gpuGetInfoBlock(pGpu, pListHead, dataId) getInfoPtr(pListHead, dataId) +#define gpuAddInfoBlock(pGpu, ppListHead, dataId, size) addInfoPtr(ppListHead, dataId, size) +#define gpuDeleteInfoBlock(pGpu, ppListHead, dataId) deleteInfoPtr(ppListHead, dataId); +#define gpuTestInfoBlock(pGpu, pListHead, dataId) testInfoPtr(pListHead, dataId); + +// Static info getters +void *gpuGetStaticInfo(struct OBJGPU *pGpu); +#define GPU_GET_STATIC_INFO(pGpu) gpuGetStaticInfo(pGpu) +void *gpuGetGspStaticInfo(struct OBJGPU *pGpu); +#define GPU_GET_GSP_STATIC_INFO(pGpu) gpuGetGspStaticInfo(pGpu) + + +#define IS_GPU_GC6_STATE_POWERED_ON(obj) NV_TRUE +#define IS_GPU_GC6_STATE_EXITED(obj) NV_FALSE +#define IS_GPU_GC6_STATE_ENTERING(obj) NV_FALSE +#define IS_GPU_GC6_STATE_ENTERED(obj) NV_FALSE +#define IS_GPU_GC6_STATE_EXITING(obj) NV_FALSE + +#endif // _OBJGPU_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.c new file mode 100644 index 0000000..00b40f0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.c @@ -0,0 +1,309 @@ +#define NVOC_GPU_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_resource_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x5d5d9f = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_funcTable_GpuResource(GpuResource*); +NV_STATUS __nvoc_ctor_GpuResource(GpuResource*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_GpuResource(GpuResource*); +void __nvoc_dtor_GpuResource(GpuResource*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuResource; + +static const struct NVOC_RTTI __nvoc_rtti_GpuResource_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuResource, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuResource_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuResource_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuResource_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuResource_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_GpuResource = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_GpuResource_GpuResource, + &__nvoc_rtti_GpuResource_RmResource, + &__nvoc_rtti_GpuResource_RmResourceCommon, + &__nvoc_rtti_GpuResource_RsResource, + &__nvoc_rtti_GpuResource_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GpuResource), + /*classId=*/ classId(GpuResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GpuResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuResource, + /*pCastInfo=*/ &__nvoc_castinfo_GpuResource, + /*pExportInfo=*/ &__nvoc_export_info_GpuResource +}; + +static NV_STATUS __nvoc_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pCpuMapping); +} + +static NvBool __nvoc_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuResource_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuResource_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuResource_RmResource.offset), ppMemDesc); +} + +static NvU32 __nvoc_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpuresControlLookup(struct GpuResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pParams, ppEntry); +} + +static NvBool __nvoc_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuResource = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_GpuResource(GpuResource *pThis) { + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GpuResource(GpuResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_GpuResource(GpuResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuResource_fail_RmResource; + __nvoc_init_dataField_GpuResource(pThis); + + status = __nvoc_gpuresConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuResource_fail__init; + goto __nvoc_ctor_GpuResource_exit; // Success + +__nvoc_ctor_GpuResource_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_GpuResource_fail_RmResource: +__nvoc_ctor_GpuResource_exit: + + return status; +} + +static void __nvoc_init_funcTable_GpuResource_1(GpuResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__gpuresControl__ = &gpuresControl_IMPL; + + pThis->__gpuresMap__ = &gpuresMap_IMPL; + + pThis->__gpuresUnmap__ = &gpuresUnmap_IMPL; + + pThis->__gpuresShareCallback__ = &gpuresShareCallback_IMPL; + + pThis->__gpuresGetRegBaseOffsetAndSize__ = &gpuresGetRegBaseOffsetAndSize_IMPL; + + pThis->__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL; + + pThis->__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL; + + pThis->__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resControl__ = &__nvoc_thunk_GpuResource_resControl; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resMap__ = &__nvoc_thunk_GpuResource_resMap; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resUnmap__ = &__nvoc_thunk_GpuResource_resUnmap; + + pThis->__nvoc_base_RmResource.__rmresShareCallback__ = &__nvoc_thunk_GpuResource_rmresShareCallback; + + pThis->__gpuresCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_gpuresCheckMemInterUnmap; + + pThis->__gpuresGetMemInterMapParams__ = &__nvoc_thunk_RmResource_gpuresGetMemInterMapParams; + + pThis->__gpuresGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_gpuresGetMemoryMappingDescriptor; + + pThis->__gpuresGetRefCount__ = &__nvoc_thunk_RsResource_gpuresGetRefCount; + + pThis->__gpuresControlFilter__ = &__nvoc_thunk_RsResource_gpuresControlFilter; + + pThis->__gpuresAddAdditionalDependants__ = &__nvoc_thunk_RsResource_gpuresAddAdditionalDependants; + + pThis->__gpuresControl_Prologue__ = &__nvoc_thunk_RmResource_gpuresControl_Prologue; + + pThis->__gpuresCanCopy__ = &__nvoc_thunk_RsResource_gpuresCanCopy; + + pThis->__gpuresMapTo__ = &__nvoc_thunk_RsResource_gpuresMapTo; + + pThis->__gpuresPreDestruct__ = &__nvoc_thunk_RsResource_gpuresPreDestruct; + + pThis->__gpuresUnmapFrom__ = &__nvoc_thunk_RsResource_gpuresUnmapFrom; + + pThis->__gpuresControl_Epilogue__ = &__nvoc_thunk_RmResource_gpuresControl_Epilogue; + + pThis->__gpuresControlLookup__ = &__nvoc_thunk_RsResource_gpuresControlLookup; + + pThis->__gpuresAccessCallback__ = &__nvoc_thunk_RmResource_gpuresAccessCallback; +} + +void __nvoc_init_funcTable_GpuResource(GpuResource *pThis) { + __nvoc_init_funcTable_GpuResource_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_GpuResource(GpuResource *pThis) { + pThis->__nvoc_pbase_GpuResource = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_GpuResource(pThis); +} + +NV_STATUS __nvoc_objCreate_GpuResource(GpuResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + GpuResource *pThis; + + pThis = portMemAllocNonPaged(sizeof(GpuResource)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(GpuResource)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GpuResource); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_GpuResource(pThis); + status = __nvoc_ctor_GpuResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_GpuResource_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_GpuResource_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GpuResource(GpuResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_GpuResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.h new file mode 100644 index 0000000..8013615 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.h @@ -0,0 +1,329 @@ +#ifndef _G_GPU_RESOURCE_NVOC_H_ +#define _G_GPU_RESOURCE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_gpu_resource_nvoc.h" + +#ifndef _GPURESOURCE_H_ +#define _GPURESOURCE_H_ + +#include "core/core.h" +#include "gpu/mem_mgr/mem_desc.h" + +#include "rmapi/resource.h" + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + +struct Subdevice; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + + + +#define GPU_RES_GET_GPU(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pGpu +#define GPU_RES_GET_GPUGRP(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pGpuGrp +#define GPU_RES_GET_DEVICE(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pDevice +#define GPU_RES_GET_SUBDEVICE(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pSubdevice + +#define GPU_RES_SET_THREAD_BC_STATE(pRes) do { \ + gpuSetThreadBcState(staticCastNoPtrCheck((pRes), GpuResource)->pGpu, \ + staticCastNoPtrCheck((pRes), GpuResource)->bBcResource); \ + } while(0) + +/*! + * Abstract base class for common CPU mapping operations + */ +#ifdef NVOC_GPU_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct GpuResource { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + NV_STATUS (*__gpuresControl__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__gpuresMap__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__gpuresUnmap__)(struct GpuResource *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NvBool (*__gpuresShareCallback__)(struct GpuResource *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__gpuresGetRegBaseOffsetAndSize__)(struct GpuResource *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__gpuresGetMapAddrSpace__)(struct GpuResource *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NV_STATUS (*__gpuresInternalControlForward__)(struct GpuResource *, NvU32, void *, NvU32); + NvHandle (*__gpuresGetInternalObjectHandle__)(struct GpuResource *); + NV_STATUS (*__gpuresCheckMemInterUnmap__)(struct GpuResource *, NvBool); + NV_STATUS (*__gpuresGetMemInterMapParams__)(struct GpuResource *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__gpuresGetMemoryMappingDescriptor__)(struct GpuResource *, struct MEMORY_DESCRIPTOR **); + NvU32 (*__gpuresGetRefCount__)(struct GpuResource *); + NV_STATUS (*__gpuresControlFilter__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__gpuresAddAdditionalDependants__)(struct RsClient *, struct GpuResource *, RsResourceRef *); + NV_STATUS (*__gpuresControl_Prologue__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__gpuresCanCopy__)(struct GpuResource *); + NV_STATUS (*__gpuresMapTo__)(struct GpuResource *, RS_RES_MAP_TO_PARAMS *); + void (*__gpuresPreDestruct__)(struct GpuResource *); + NV_STATUS (*__gpuresUnmapFrom__)(struct GpuResource *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__gpuresControl_Epilogue__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__gpuresControlLookup__)(struct GpuResource *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvBool (*__gpuresAccessCallback__)(struct GpuResource *, struct RsClient *, void *, RsAccessRight); + struct OBJGPUGRP *pGpuGrp; + struct OBJGPU *pGpu; + struct Device *pDevice; + struct Subdevice *pSubdevice; + NvBool bBcResource; +}; + +#ifndef __NVOC_CLASS_GpuResource_TYPEDEF__ +#define __NVOC_CLASS_GpuResource_TYPEDEF__ +typedef struct GpuResource GpuResource; +#endif /* __NVOC_CLASS_GpuResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuResource +#define __nvoc_class_id_GpuResource 0x5d5d9f +#endif /* __nvoc_class_id_GpuResource */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +#define __staticCast_GpuResource(pThis) \ + ((pThis)->__nvoc_pbase_GpuResource) + +#ifdef __nvoc_gpu_resource_h_disabled +#define __dynamicCast_GpuResource(pThis) ((GpuResource*)NULL) +#else //__nvoc_gpu_resource_h_disabled +#define __dynamicCast_GpuResource(pThis) \ + ((GpuResource*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuResource))) +#endif //__nvoc_gpu_resource_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_GpuResource(GpuResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GpuResource(GpuResource**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_GpuResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_GpuResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define gpuresControl(pGpuResource, pCallContext, pParams) gpuresControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define gpuresMap(pGpuResource, pCallContext, pParams, pCpuMapping) gpuresMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define gpuresUnmap(pGpuResource, pCallContext, pCpuMapping) gpuresUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define gpuresShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) gpuresShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define gpuresGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) gpuresGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define gpuresGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) gpuresGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define gpuresInternalControlForward(pGpuResource, command, pParams, size) gpuresInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define gpuresGetInternalObjectHandle(pGpuResource) gpuresGetInternalObjectHandle_DISPATCH(pGpuResource) +#define gpuresCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) gpuresCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define gpuresGetMemInterMapParams(pRmResource, pParams) gpuresGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define gpuresGetMemoryMappingDescriptor(pRmResource, ppMemDesc) gpuresGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define gpuresGetRefCount(pResource) gpuresGetRefCount_DISPATCH(pResource) +#define gpuresControlFilter(pResource, pCallContext, pParams) gpuresControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define gpuresAddAdditionalDependants(pClient, pResource, pReference) gpuresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define gpuresControl_Prologue(pResource, pCallContext, pParams) gpuresControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define gpuresCanCopy(pResource) gpuresCanCopy_DISPATCH(pResource) +#define gpuresMapTo(pResource, pParams) gpuresMapTo_DISPATCH(pResource, pParams) +#define gpuresPreDestruct(pResource) gpuresPreDestruct_DISPATCH(pResource) +#define gpuresUnmapFrom(pResource, pParams) gpuresUnmapFrom_DISPATCH(pResource, pParams) +#define gpuresControl_Epilogue(pResource, pCallContext, pParams) gpuresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define gpuresControlLookup(pResource, pParams, ppEntry) gpuresControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define gpuresAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) gpuresAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS gpuresControl_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS gpuresControl_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__gpuresControl__(pGpuResource, pCallContext, pParams); +} + +NV_STATUS gpuresMap_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); + +static inline NV_STATUS gpuresMap_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__gpuresMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS gpuresUnmap_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); + +static inline NV_STATUS gpuresUnmap_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__gpuresUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +NvBool gpuresShareCallback_IMPL(struct GpuResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + +static inline NvBool gpuresShareCallback_DISPATCH(struct GpuResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__gpuresShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +NV_STATUS gpuresGetRegBaseOffsetAndSize_IMPL(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +static inline NV_STATUS gpuresGetRegBaseOffsetAndSize_DISPATCH(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__gpuresGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +NV_STATUS gpuresGetMapAddrSpace_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +static inline NV_STATUS gpuresGetMapAddrSpace_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__gpuresGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +NV_STATUS gpuresInternalControlForward_IMPL(struct GpuResource *pGpuResource, NvU32 command, void *pParams, NvU32 size); + +static inline NV_STATUS gpuresInternalControlForward_DISPATCH(struct GpuResource *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__gpuresInternalControlForward__(pGpuResource, command, pParams, size); +} + +NvHandle gpuresGetInternalObjectHandle_IMPL(struct GpuResource *pGpuResource); + +static inline NvHandle gpuresGetInternalObjectHandle_DISPATCH(struct GpuResource *pGpuResource) { + return pGpuResource->__gpuresGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS gpuresCheckMemInterUnmap_DISPATCH(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__gpuresCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS gpuresGetMemInterMapParams_DISPATCH(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__gpuresGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS gpuresGetMemoryMappingDescriptor_DISPATCH(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__gpuresGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvU32 gpuresGetRefCount_DISPATCH(struct GpuResource *pResource) { + return pResource->__gpuresGetRefCount__(pResource); +} + +static inline NV_STATUS gpuresControlFilter_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__gpuresControlFilter__(pResource, pCallContext, pParams); +} + +static inline void gpuresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference) { + pResource->__gpuresAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS gpuresControl_Prologue_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__gpuresControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool gpuresCanCopy_DISPATCH(struct GpuResource *pResource) { + return pResource->__gpuresCanCopy__(pResource); +} + +static inline NV_STATUS gpuresMapTo_DISPATCH(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__gpuresMapTo__(pResource, pParams); +} + +static inline void gpuresPreDestruct_DISPATCH(struct GpuResource *pResource) { + pResource->__gpuresPreDestruct__(pResource); +} + +static inline NV_STATUS gpuresUnmapFrom_DISPATCH(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__gpuresUnmapFrom__(pResource, pParams); +} + +static inline void gpuresControl_Epilogue_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__gpuresControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS gpuresControlLookup_DISPATCH(struct GpuResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__gpuresControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvBool gpuresAccessCallback_DISPATCH(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__gpuresAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS gpuresConstruct_IMPL(struct GpuResource *arg_pGpuResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_gpuresConstruct(arg_pGpuResource, arg_pCallContext, arg_pParams) gpuresConstruct_IMPL(arg_pGpuResource, arg_pCallContext, arg_pParams) +NV_STATUS gpuresCopyConstruct_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); +#ifdef __nvoc_gpu_resource_h_disabled +static inline NV_STATUS gpuresCopyConstruct(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("GpuResource was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_resource_h_disabled +#define gpuresCopyConstruct(pGpuResource, pCallContext, pParams) gpuresCopyConstruct_IMPL(pGpuResource, pCallContext, pParams) +#endif //__nvoc_gpu_resource_h_disabled + +void gpuresSetGpu_IMPL(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvBool bBcResource); +#ifdef __nvoc_gpu_resource_h_disabled +static inline void gpuresSetGpu(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvBool bBcResource) { + NV_ASSERT_FAILED_PRECOMP("GpuResource was disabled!"); +} +#else //__nvoc_gpu_resource_h_disabled +#define gpuresSetGpu(pGpuResource, pGpu, bBcResource) gpuresSetGpu_IMPL(pGpuResource, pGpu, bBcResource) +#endif //__nvoc_gpu_resource_h_disabled + +void gpuresControlSetup_IMPL(struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, struct GpuResource *pGpuResource); +#ifdef __nvoc_gpu_resource_h_disabled +static inline void gpuresControlSetup(struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, struct GpuResource *pGpuResource) { + NV_ASSERT_FAILED_PRECOMP("GpuResource was disabled!"); +} +#else //__nvoc_gpu_resource_h_disabled +#define gpuresControlSetup(pParams, pGpuResource) gpuresControlSetup_IMPL(pParams, pGpuResource) +#endif //__nvoc_gpu_resource_h_disabled + +NV_STATUS gpuresGetByHandle_IMPL(struct RsClient *pClient, NvHandle hResource, struct GpuResource **ppGpuResource); +#define gpuresGetByHandle(pClient, hResource, ppGpuResource) gpuresGetByHandle_IMPL(pClient, hResource, ppGpuResource) +NV_STATUS gpuresGetByDeviceOrSubdeviceHandle_IMPL(struct RsClient *pClient, NvHandle hResource, struct GpuResource **ppGpuResource); +#define gpuresGetByDeviceOrSubdeviceHandle(pClient, hResource, ppGpuResource) gpuresGetByDeviceOrSubdeviceHandle_IMPL(pClient, hResource, ppGpuResource) +#undef PRIVATE_FIELD + + +#endif // _GPURESOURCE_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_RESOURCE_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal.h new file mode 100644 index 0000000..2ac7ad4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal.h @@ -0,0 +1,142 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// HAL support for use in HAL setup +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_hal.h +// + +#ifndef _G_RMCFG_HAL_H_ +#define _G_RMCFG_HAL_H_ + + +typedef struct DISP_HAL_IFACES *PDISP_HAL_IFACES; +typedef struct DPU_HAL_IFACES *PDPU_HAL_IFACES; +typedef struct GPIO_HAL_IFACES *PGPIO_HAL_IFACES; +typedef struct RPC_HAL_IFACES *PRPC_HAL_IFACES; +typedef struct RPCSTRUCTURECOPY_HAL_IFACES *PRPCSTRUCTURECOPY_HAL_IFACES; + + + +// +// per-GPU list of function ptrs to setup iface for each engine +// + +typedef struct { + + +} HAL_IFACE_SETUP, *PHAL_IFACE_SETUP; + + + +// +// IP_VERSIONS support +// + +typedef struct IGRP_IP_VERSIONS_TABLE_INFO IGRP_IP_VERSIONS_TABLE_INFO; + +// generic form of Head_iGrp_ipVersions_getInfo typedef + +typedef NV_STATUS IGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *); +typedef void IGrp_ipVersions_install(IGRP_IP_VERSIONS_TABLE_INFO *); +typedef NV_STATUS IGrp_ipVersions_wrapup(IGRP_IP_VERSIONS_TABLE_INFO *); + +// a single inclusive version range +typedef struct { + NvU32 v0; + NvU32 v1; +} IGRP_IP_VERSION_RANGE; + + +typedef struct { + const IGRP_IP_VERSION_RANGE *pRanges; + NvU32 numRanges; + IGrp_ipVersions_install *ifacesInstallFn; +} IGRP_IP_VERSIONS_ENTRY; + + +struct IGRP_IP_VERSIONS_TABLE_INFO { + POBJGPU pGpu; + Dynamic *pDynamic; // eg: pBiff + + const IGRP_IP_VERSIONS_ENTRY *pTable; + NvU32 numEntries; + IGrp_ipVersions_wrapup *ifacesWrapupFn; // overrides and asserts +}; + +// HAL_IMPLEMENTATION enum +typedef enum +{ + HAL_IMPL_GF100, + HAL_IMPL_GF100B, + HAL_IMPL_GF104, + HAL_IMPL_GF104B, + HAL_IMPL_GF106, + HAL_IMPL_GF106B, + HAL_IMPL_GF108, + HAL_IMPL_GF110D, + HAL_IMPL_GF110, + HAL_IMPL_GF117, + HAL_IMPL_GF118, + HAL_IMPL_GF119, + HAL_IMPL_GF110F, + HAL_IMPL_GF110F2, + HAL_IMPL_GF110F3, + HAL_IMPL_GK104, + HAL_IMPL_GK106, + HAL_IMPL_GK107, + HAL_IMPL_GK20A, + HAL_IMPL_GK110, + HAL_IMPL_GK110B, + HAL_IMPL_GK110C, + HAL_IMPL_GK208, + HAL_IMPL_GK208S, + HAL_IMPL_GM107, + HAL_IMPL_GM108, + HAL_IMPL_GM200, + HAL_IMPL_GM204, + HAL_IMPL_GM206, + HAL_IMPL_GP100, + HAL_IMPL_GP102, + HAL_IMPL_GP104, + HAL_IMPL_GP106, + HAL_IMPL_GP107, + HAL_IMPL_GP108, + HAL_IMPL_GV100, + HAL_IMPL_GV11B, + HAL_IMPL_TU102, + HAL_IMPL_TU104, + HAL_IMPL_TU106, + HAL_IMPL_TU116, + HAL_IMPL_TU117, + HAL_IMPL_GA100, + HAL_IMPL_GA102, + HAL_IMPL_GA103, + HAL_IMPL_GA104, + HAL_IMPL_GA106, + HAL_IMPL_GA107, + HAL_IMPL_GA10B, + HAL_IMPL_GA102F, + HAL_IMPL_T001_FERMI_NOT_EXIST, + HAL_IMPL_T124, + HAL_IMPL_T132, + HAL_IMPL_T210, + HAL_IMPL_T186, + HAL_IMPL_T194, + HAL_IMPL_T002_TURING_NOT_EXIST, + HAL_IMPL_T234, + HAL_IMPL_T234D, + HAL_IMPL_AMODEL, + + HAL_IMPL_MAXIMUM, // NOTE: this symbol must be at the end of the enum list. + // It is used to allocate arrays and control loop iterations. +} HAL_IMPLEMENTATION; + +// +// HAL implementation names for debug & logging use +// +#define HAL_IMPL_NAME_LIST \ + { HAL_IMPL_T234D, "T234D" } + + +#endif // _G_RMCFG_HAL_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_archimpl.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_archimpl.h new file mode 100644 index 0000000..6add705 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_archimpl.h @@ -0,0 +1,94 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Hal registration entry points. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_hal_archimpl.h +// +// Chips: T234D +// + +#ifndef _G_RMCFG_HAL_ARCHIMPL_H_ +#define _G_RMCFG_HAL_ARCHIMPL_H_ + +#include "g_hal.h" + +// OpenRM for Tegra build uses different include path +// The following lines refer to the same file. +// TODO: merge them +#include "nv_ref.h" + +// +// CHIPID array Implementation +// +const struct ChipID +{ + NvU32 arch; + NvU32 impl; + NvU32 hidrev; +} chipID[] = { + { 0x0, 0x0, 0x0 } , // GF100 (disabled) + { 0x0, 0x0, 0x0 } , // GF100B (disabled) + { 0x0, 0x0, 0x0 } , // GF104 (disabled) + { 0x0, 0x0, 0x0 } , // GF104B (disabled) + { 0x0, 0x0, 0x0 } , // GF106 (disabled) + { 0x0, 0x0, 0x0 } , // GF106B (disabled) + { 0x0, 0x0, 0x0 } , // GF108 (disabled) + { 0x0, 0x0, 0x0 } , // GF110D (disabled) + { 0x0, 0x0, 0x0 } , // GF110 (disabled) + { 0x0, 0x0, 0x0 } , // GF117 (disabled) + { 0x0, 0x0, 0x0 } , // GF118 (disabled) + { 0x0, 0x0, 0x0 } , // GF119 (disabled) + { 0x0, 0x0, 0x0 } , // GF110F (disabled) + { 0x0, 0x0, 0x0 } , // GF110F2 (disabled) + { 0x0, 0x0, 0x0 } , // GF110F3 (disabled) + { 0x0, 0x0, 0x0 } , // GK104 (disabled) + { 0x0, 0x0, 0x0 } , // GK106 (disabled) + { 0x0, 0x0, 0x0 } , // GK107 (disabled) + { 0x0, 0x0, 0x0 } , // GK20A (disabled) + { 0x0, 0x0, 0x0 } , // GK110 (disabled) + { 0x0, 0x0, 0x0 } , // GK110B (disabled) + { 0x0, 0x0, 0x0 } , // GK110C (disabled) + { 0x0, 0x0, 0x0 } , // GK208 (disabled) + { 0x0, 0x0, 0x0 } , // GK208S (disabled) + { 0x0, 0x0, 0x0 } , // GM107 (disabled) + { 0x0, 0x0, 0x0 } , // GM108 (disabled) + { 0x0, 0x0, 0x0 } , // GM200 (disabled) + { 0x0, 0x0, 0x0 } , // GM204 (disabled) + { 0x0, 0x0, 0x0 } , // GM206 (disabled) + { 0x0, 0x0, 0x0 } , // GP100 (disabled) + { 0x0, 0x0, 0x0 } , // GP102 (disabled) + { 0x0, 0x0, 0x0 } , // GP104 (disabled) + { 0x0, 0x0, 0x0 } , // GP106 (disabled) + { 0x0, 0x0, 0x0 } , // GP107 (disabled) + { 0x0, 0x0, 0x0 } , // GP108 (disabled) + { 0x0, 0x0, 0x0 } , // GV100 (disabled) + { 0x0, 0x0, 0x0 } , // GV11B (disabled) + { 0x0, 0x0, 0x0 } , // TU102 (disabled) + { 0x0, 0x0, 0x0 } , // TU104 (disabled) + { 0x0, 0x0, 0x0 } , // TU106 (disabled) + { 0x0, 0x0, 0x0 } , // TU116 (disabled) + { 0x0, 0x0, 0x0 } , // TU117 (disabled) + { 0x0, 0x0, 0x0 } , // GA100 (disabled) + { 0x0, 0x0, 0x0 } , // GA102 (disabled) + { 0x0, 0x0, 0x0 } , // GA103 (disabled) + { 0x0, 0x0, 0x0 } , // GA104 (disabled) + { 0x0, 0x0, 0x0 } , // GA106 (disabled) + { 0x0, 0x0, 0x0 } , // GA107 (disabled) + { 0x0, 0x0, 0x0 } , // GA10B (disabled) + { 0x0, 0x0, 0x0 } , // GA102F (disabled) + { 0x0, 0x0, 0x0 } , // T001_FERMI_NOT_EXIST (disabled) + { 0x0, 0x0, 0x0 } , // T124 (disabled) + { 0x0, 0x0, 0x0 } , // T132 (disabled) + { 0x0, 0x0, 0x0 } , // T210 (disabled) + { 0x0, 0x0, 0x0 } , // T186 (disabled) + { 0x0, 0x0, 0x0 } , // T194 (disabled) + { 0x0, 0x0, 0x0 } , // T002_TURING_NOT_EXIST (disabled) + { 0x0, 0x0, 0x0 } , // T234 (disabled) + { 0x0, 0x0, 0x235 } , // T234D + { 0x0, 0x0, 0x0 } , // AMODEL (disabled) + +}; + +#endif // _G_RMCFG_HAL_ARCHIMPL_H_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.c new file mode 100644 index 0000000..b2e4490 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.c @@ -0,0 +1,154 @@ +#define NVOC_HAL_MGR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_hal_mgr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xbf26de = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJHALMGR(OBJHALMGR*); +void __nvoc_init_funcTable_OBJHALMGR(OBJHALMGR*); +NV_STATUS __nvoc_ctor_OBJHALMGR(OBJHALMGR*); +void __nvoc_init_dataField_OBJHALMGR(OBJHALMGR*); +void __nvoc_dtor_OBJHALMGR(OBJHALMGR*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHALMGR; + +static const struct NVOC_RTTI __nvoc_rtti_OBJHALMGR_OBJHALMGR = { + /*pClassDef=*/ &__nvoc_class_def_OBJHALMGR, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJHALMGR, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJHALMGR_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJHALMGR, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJHALMGR = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJHALMGR_OBJHALMGR, + &__nvoc_rtti_OBJHALMGR_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJHALMGR), + /*classId=*/ classId(OBJHALMGR), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJHALMGR", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJHALMGR, + /*pCastInfo=*/ &__nvoc_castinfo_OBJHALMGR, + /*pExportInfo=*/ &__nvoc_export_info_OBJHALMGR +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHALMGR = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJHALMGR(OBJHALMGR *pThis) { + __nvoc_halmgrDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJHALMGR(OBJHALMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJHALMGR(OBJHALMGR *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJHALMGR_fail_Object; + __nvoc_init_dataField_OBJHALMGR(pThis); + + status = __nvoc_halmgrConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJHALMGR_fail__init; + goto __nvoc_ctor_OBJHALMGR_exit; // Success + +__nvoc_ctor_OBJHALMGR_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJHALMGR_fail_Object: +__nvoc_ctor_OBJHALMGR_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJHALMGR_1(OBJHALMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJHALMGR(OBJHALMGR *pThis) { + __nvoc_init_funcTable_OBJHALMGR_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJHALMGR(OBJHALMGR *pThis) { + pThis->__nvoc_pbase_OBJHALMGR = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJHALMGR(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJHALMGR(OBJHALMGR **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJHALMGR *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJHALMGR)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJHALMGR)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJHALMGR); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJHALMGR(pThis); + status = __nvoc_ctor_OBJHALMGR(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJHALMGR_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJHALMGR_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJHALMGR(OBJHALMGR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJHALMGR(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.h new file mode 100644 index 0000000..30f2cf4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.h @@ -0,0 +1,139 @@ +#ifndef _G_HAL_MGR_NVOC_H_ +#define _G_HAL_MGR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_hal_mgr_nvoc.h" + +#ifndef _HAL_MGR_H_ +#define _HAL_MGR_H_ + +#include "core/core.h" +#include "core/info_block.h" +#include "core/hal.h" + +#define HALMGR_GET_HAL(p, halid) halmgrGetHal((p), halid) + +typedef struct OBJHALMGR *POBJHALMGR; + +#ifndef __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +typedef struct OBJHALMGR OBJHALMGR; +#endif /* __NVOC_CLASS_OBJHALMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHALMGR +#define __nvoc_class_id_OBJHALMGR 0xbf26de +#endif /* __nvoc_class_id_OBJHALMGR */ + + + +#ifdef NVOC_HAL_MGR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJHALMGR { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJHALMGR *__nvoc_pbase_OBJHALMGR; + struct OBJHAL *pHalList[60]; +}; + +#ifndef __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +typedef struct OBJHALMGR OBJHALMGR; +#endif /* __NVOC_CLASS_OBJHALMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHALMGR +#define __nvoc_class_id_OBJHALMGR 0xbf26de +#endif /* __nvoc_class_id_OBJHALMGR */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR; + +#define __staticCast_OBJHALMGR(pThis) \ + ((pThis)->__nvoc_pbase_OBJHALMGR) + +#ifdef __nvoc_hal_mgr_h_disabled +#define __dynamicCast_OBJHALMGR(pThis) ((OBJHALMGR*)NULL) +#else //__nvoc_hal_mgr_h_disabled +#define __dynamicCast_OBJHALMGR(pThis) \ + ((OBJHALMGR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJHALMGR))) +#endif //__nvoc_hal_mgr_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJHALMGR(OBJHALMGR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJHALMGR(OBJHALMGR**, Dynamic*, NvU32); +#define __objCreate_OBJHALMGR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJHALMGR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS halmgrConstruct_IMPL(struct OBJHALMGR *arg_); +#define __nvoc_halmgrConstruct(arg_) halmgrConstruct_IMPL(arg_) +void halmgrDestruct_IMPL(struct OBJHALMGR *arg0); +#define __nvoc_halmgrDestruct(arg0) halmgrDestruct_IMPL(arg0) +NV_STATUS halmgrCreateHal_IMPL(struct OBJHALMGR *arg0, NvU32 arg1); +#ifdef __nvoc_hal_mgr_h_disabled +static inline NV_STATUS halmgrCreateHal(struct OBJHALMGR *arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJHALMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_hal_mgr_h_disabled +#define halmgrCreateHal(arg0, arg1) halmgrCreateHal_IMPL(arg0, arg1) +#endif //__nvoc_hal_mgr_h_disabled + +NV_STATUS halmgrGetHalForGpu_IMPL(struct OBJHALMGR *arg0, NvU32 arg1, NvU32 arg2, NvU32 *arg3); +#ifdef __nvoc_hal_mgr_h_disabled +static inline NV_STATUS halmgrGetHalForGpu(struct OBJHALMGR *arg0, NvU32 arg1, NvU32 arg2, NvU32 *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJHALMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_hal_mgr_h_disabled +#define halmgrGetHalForGpu(arg0, arg1, arg2, arg3) halmgrGetHalForGpu_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_hal_mgr_h_disabled + +struct OBJHAL *halmgrGetHal_IMPL(struct OBJHALMGR *arg0, NvU32 arg1); +#ifdef __nvoc_hal_mgr_h_disabled +static inline struct OBJHAL *halmgrGetHal(struct OBJHALMGR *arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJHALMGR was disabled!"); + return NULL; +} +#else //__nvoc_hal_mgr_h_disabled +#define halmgrGetHal(arg0, arg1) halmgrGetHal_IMPL(arg0, arg1) +#endif //__nvoc_hal_mgr_h_disabled + +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_HAL_MGR_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.c new file mode 100644 index 0000000..2cdd8ef --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.c @@ -0,0 +1,148 @@ +#define NVOC_HAL_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_hal_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xe803b6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJHAL(OBJHAL*); +void __nvoc_init_funcTable_OBJHAL(OBJHAL*); +NV_STATUS __nvoc_ctor_OBJHAL(OBJHAL*); +void __nvoc_init_dataField_OBJHAL(OBJHAL*); +void __nvoc_dtor_OBJHAL(OBJHAL*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHAL; + +static const struct NVOC_RTTI __nvoc_rtti_OBJHAL_OBJHAL = { + /*pClassDef=*/ &__nvoc_class_def_OBJHAL, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJHAL, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJHAL_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJHAL, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJHAL = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJHAL_OBJHAL, + &__nvoc_rtti_OBJHAL_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJHAL), + /*classId=*/ classId(OBJHAL), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJHAL", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJHAL, + /*pCastInfo=*/ &__nvoc_castinfo_OBJHAL, + /*pExportInfo=*/ &__nvoc_export_info_OBJHAL +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHAL = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJHAL(OBJHAL *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJHAL(OBJHAL *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJHAL(OBJHAL *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJHAL_fail_Object; + __nvoc_init_dataField_OBJHAL(pThis); + goto __nvoc_ctor_OBJHAL_exit; // Success + +__nvoc_ctor_OBJHAL_fail_Object: +__nvoc_ctor_OBJHAL_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJHAL_1(OBJHAL *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJHAL(OBJHAL *pThis) { + __nvoc_init_funcTable_OBJHAL_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJHAL(OBJHAL *pThis) { + pThis->__nvoc_pbase_OBJHAL = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJHAL(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJHAL(OBJHAL **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJHAL *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJHAL)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJHAL)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJHAL); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJHAL(pThis); + status = __nvoc_ctor_OBJHAL(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJHAL_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJHAL_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJHAL(OBJHAL **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJHAL(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.h new file mode 100644 index 0000000..4fa2daa --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.h @@ -0,0 +1,146 @@ +#ifndef _G_HAL_NVOC_H_ +#define _G_HAL_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_hal_nvoc.h" + +#ifndef _OBJHAL_H_ +#define _OBJHAL_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: hal.h * +* Defines and structures used for the HAL Object. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "core/info_block.h" + +// +// HAL Info Block Id: +// +// 31 7 0 +// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | 24 bits | 8 bits | +// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// Info ID # Impl +// +// Impl: The hal implementation +// Info ID number: unique id for a particular info type +// +#define MKHALINFOID(impl,infoId) (((infoId & 0xffffff) << 8) | (impl & 0xff)) + +typedef struct MODULEDESCRIPTOR MODULEDESCRIPTOR, *PMODULEDESCRIPTOR; + +struct MODULEDESCRIPTOR { + + // (rmconfig) per-obj function ptr to init hal interfaces + const HAL_IFACE_SETUP *pHalSetIfaces; +}; + +typedef struct OBJHAL *POBJHAL; + +#ifndef __NVOC_CLASS_OBJHAL_TYPEDEF__ +#define __NVOC_CLASS_OBJHAL_TYPEDEF__ +typedef struct OBJHAL OBJHAL; +#endif /* __NVOC_CLASS_OBJHAL_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHAL +#define __nvoc_class_id_OBJHAL 0xe803b6 +#endif /* __nvoc_class_id_OBJHAL */ + + +#ifdef NVOC_HAL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJHAL { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJHAL *__nvoc_pbase_OBJHAL; + struct MODULEDESCRIPTOR moduleDescriptor; +}; + +#ifndef __NVOC_CLASS_OBJHAL_TYPEDEF__ +#define __NVOC_CLASS_OBJHAL_TYPEDEF__ +typedef struct OBJHAL OBJHAL; +#endif /* __NVOC_CLASS_OBJHAL_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHAL +#define __nvoc_class_id_OBJHAL 0xe803b6 +#endif /* __nvoc_class_id_OBJHAL */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL; + +#define __staticCast_OBJHAL(pThis) \ + ((pThis)->__nvoc_pbase_OBJHAL) + +#ifdef __nvoc_hal_h_disabled +#define __dynamicCast_OBJHAL(pThis) ((OBJHAL*)NULL) +#else //__nvoc_hal_h_disabled +#define __dynamicCast_OBJHAL(pThis) \ + ((OBJHAL*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJHAL))) +#endif //__nvoc_hal_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJHAL(OBJHAL**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJHAL(OBJHAL**, Dynamic*, NvU32); +#define __objCreate_OBJHAL(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJHAL((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +PMODULEDESCRIPTOR objhalGetModuleDescriptor_IMPL(struct OBJHAL *pHal); +#ifdef __nvoc_hal_h_disabled +static inline PMODULEDESCRIPTOR objhalGetModuleDescriptor(struct OBJHAL *pHal) { + NV_ASSERT_FAILED_PRECOMP("OBJHAL was disabled!"); + return NULL; +} +#else //__nvoc_hal_h_disabled +#define objhalGetModuleDescriptor(pHal) objhalGetModuleDescriptor_IMPL(pHal) +#endif //__nvoc_hal_h_disabled + +#undef PRIVATE_FIELD + + +//-------------------------------------------------------------------- +// RM routines. +//-------------------------------------------------------------------- + +NV_STATUS ipVersionsSetupHal(struct OBJGPU *, void *pDynamic, IGrp_ipVersions_getInfo getInfoFn); + +#endif // _OBJHAL_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_HAL_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_private.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_private.h new file mode 100644 index 0000000..c7801ef --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_private.h @@ -0,0 +1,66 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Private HAL support for halgen. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_hal_private.h +// +// Chips: T234D +// + +// +// This file is included in several .c files for chips hal register and engines +// hal function assignment. The macros RMCFG_ENGINE_SETUP and RMCFG_HAL_SETUP_xxx +// are used to provide different content for those .c files. +// + +#ifndef _G_RMCFG_HAL_PRIVATE_H_ +#define _G_RMCFG_HAL_PRIVATE_H_ + +#include "g_hal.h" + +// establish the per-chip RMCFG_HAL_SETUP_chip #defines as needed. +#if defined(RMCFG_ENGINE_SETUP) + +// setup all enabled chip families +#if defined(RMCFG_HAL_SETUP_ALL) +# define RMCFG_HAL_SETUP_T23XD 1 +#endif // RMCFG_HAL_SETUP_ALL + +// +// setup all enabled chips in each enabled family +// + +#if defined(RMCFG_HAL_SETUP_T23XD) +# define RMCFG_HAL_SETUP_T234D 1 +#endif // T23XD + +#endif // RMCFG_ENGINE_SETUP + +// pull in private headers for each engine + + +// +// per-GPU structure with an interface init function for each engine +// + +// registerHalModule function declaration +NV_STATUS registerHalModule(NvU32, const HAL_IFACE_SETUP *); + +#if defined(RMCFG_HAL_SETUP_T234D) + +static const HAL_IFACE_SETUP halIface_T234D = { + + +}; + +NV_STATUS registerHalModule_T234D(void) +{ + return registerHalModule(HAL_IMPL_T234D, &halIface_T234D); +} + +#endif // T23XD or T234D + + + +#endif // _G_RMCFG_HAL_PRIVATE_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_register.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_register.h new file mode 100644 index 0000000..d1a0e53 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_register.h @@ -0,0 +1,51 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Hal registration entry points. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_hal_register.h +// +// Chips: T234D +// + +#ifndef _G_RMCFG_HAL_REGISTER_H_ +#define _G_RMCFG_HAL_REGISTER_H_ + +// +// per-family HAL registration entry points +// + + +NV_STATUS registerHalModule_T234D(void); + +static NV_STATUS NV_INLINE REGISTER_T23XD_HALS(void) +{ + NV_STATUS rmStatus; + + rmStatus = registerHalModule_T234D(); + if (rmStatus != NV_OK) + return rmStatus; + + return NV_OK; +} + +// +// This routine can be used by platform dependent code to +// enable all HAL modules. +// +static NV_STATUS NV_INLINE REGISTER_ALL_HALS(void) +{ + NV_STATUS rmStatus; + + rmStatus = REGISTER_T23XD_HALS(); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + return NV_OK; +} + + + +#endif // _G_RMCFG_HAL_REGISTER_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.c new file mode 100644 index 0000000..4d72d26 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.c @@ -0,0 +1,327 @@ +#define NVOC_HDA_CODEC_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_hda_codec_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xf59a20 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_Hdacodec(Hdacodec*); +void __nvoc_init_funcTable_Hdacodec(Hdacodec*); +NV_STATUS __nvoc_ctor_Hdacodec(Hdacodec*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_Hdacodec(Hdacodec*); +void __nvoc_dtor_Hdacodec(Hdacodec*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Hdacodec; + +static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_Hdacodec = { + /*pClassDef=*/ &__nvoc_class_def_Hdacodec, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Hdacodec, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Hdacodec = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_Hdacodec_Hdacodec, + &__nvoc_rtti_Hdacodec_GpuResource, + &__nvoc_rtti_Hdacodec_RmResource, + &__nvoc_rtti_Hdacodec_RmResourceCommon, + &__nvoc_rtti_Hdacodec_RsResource, + &__nvoc_rtti_Hdacodec_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Hdacodec), + /*classId=*/ classId(Hdacodec), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Hdacodec", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Hdacodec, + /*pCastInfo=*/ &__nvoc_castinfo_Hdacodec, + /*pExportInfo=*/ &__nvoc_export_info_Hdacodec +}; + +static NvBool __nvoc_thunk_GpuResource_hdacodecShareCallback(struct Hdacodec *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_hdacodecControl(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_hdacodecUnmap(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_hdacodecGetMemInterMapParams(struct Hdacodec *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_hdacodecGetMemoryMappingDescriptor(struct Hdacodec *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Hdacodec_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_hdacodecGetMapAddrSpace(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_hdacodecGetInternalObjectHandle(struct Hdacodec *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_hdacodecControlFilter(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_hdacodecAddAdditionalDependants(struct RsClient *pClient, struct Hdacodec *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_hdacodecGetRefCount(struct Hdacodec *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_hdacodecCheckMemInterUnmap(struct Hdacodec *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Hdacodec_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_hdacodecMapTo(struct Hdacodec *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_hdacodecControl_Prologue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_hdacodecGetRegBaseOffsetAndSize(struct Hdacodec *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_hdacodecCanCopy(struct Hdacodec *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_hdacodecInternalControlForward(struct Hdacodec *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_hdacodecPreDestruct(struct Hdacodec *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_hdacodecUnmapFrom(struct Hdacodec *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_hdacodecControl_Epilogue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_hdacodecControlLookup(struct Hdacodec *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_hdacodecMap(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_hdacodecAccessCallback(struct Hdacodec *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Hdacodec = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Hdacodec(Hdacodec *pThis) { + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Hdacodec(Hdacodec *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Hdacodec(Hdacodec *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Hdacodec_fail_GpuResource; + __nvoc_init_dataField_Hdacodec(pThis); + + status = __nvoc_hdacodecConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Hdacodec_fail__init; + goto __nvoc_ctor_Hdacodec_exit; // Success + +__nvoc_ctor_Hdacodec_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_Hdacodec_fail_GpuResource: +__nvoc_ctor_Hdacodec_exit: + + return status; +} + +static void __nvoc_init_funcTable_Hdacodec_1(Hdacodec *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__hdacodecShareCallback__ = &__nvoc_thunk_GpuResource_hdacodecShareCallback; + + pThis->__hdacodecControl__ = &__nvoc_thunk_GpuResource_hdacodecControl; + + pThis->__hdacodecUnmap__ = &__nvoc_thunk_GpuResource_hdacodecUnmap; + + pThis->__hdacodecGetMemInterMapParams__ = &__nvoc_thunk_RmResource_hdacodecGetMemInterMapParams; + + pThis->__hdacodecGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_hdacodecGetMemoryMappingDescriptor; + + pThis->__hdacodecGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_hdacodecGetMapAddrSpace; + + pThis->__hdacodecGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_hdacodecGetInternalObjectHandle; + + pThis->__hdacodecControlFilter__ = &__nvoc_thunk_RsResource_hdacodecControlFilter; + + pThis->__hdacodecAddAdditionalDependants__ = &__nvoc_thunk_RsResource_hdacodecAddAdditionalDependants; + + pThis->__hdacodecGetRefCount__ = &__nvoc_thunk_RsResource_hdacodecGetRefCount; + + pThis->__hdacodecCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_hdacodecCheckMemInterUnmap; + + pThis->__hdacodecMapTo__ = &__nvoc_thunk_RsResource_hdacodecMapTo; + + pThis->__hdacodecControl_Prologue__ = &__nvoc_thunk_RmResource_hdacodecControl_Prologue; + + pThis->__hdacodecGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_hdacodecGetRegBaseOffsetAndSize; + + pThis->__hdacodecCanCopy__ = &__nvoc_thunk_RsResource_hdacodecCanCopy; + + pThis->__hdacodecInternalControlForward__ = &__nvoc_thunk_GpuResource_hdacodecInternalControlForward; + + pThis->__hdacodecPreDestruct__ = &__nvoc_thunk_RsResource_hdacodecPreDestruct; + + pThis->__hdacodecUnmapFrom__ = &__nvoc_thunk_RsResource_hdacodecUnmapFrom; + + pThis->__hdacodecControl_Epilogue__ = &__nvoc_thunk_RmResource_hdacodecControl_Epilogue; + + pThis->__hdacodecControlLookup__ = &__nvoc_thunk_RsResource_hdacodecControlLookup; + + pThis->__hdacodecMap__ = &__nvoc_thunk_GpuResource_hdacodecMap; + + pThis->__hdacodecAccessCallback__ = &__nvoc_thunk_RmResource_hdacodecAccessCallback; +} + +void __nvoc_init_funcTable_Hdacodec(Hdacodec *pThis) { + __nvoc_init_funcTable_Hdacodec_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Hdacodec(Hdacodec *pThis) { + pThis->__nvoc_pbase_Hdacodec = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_Hdacodec(pThis); +} + +NV_STATUS __nvoc_objCreate_Hdacodec(Hdacodec **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + Hdacodec *pThis; + + pThis = portMemAllocNonPaged(sizeof(Hdacodec)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Hdacodec)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Hdacodec); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_Hdacodec(pThis); + status = __nvoc_ctor_Hdacodec(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Hdacodec_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Hdacodec_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Hdacodec(Hdacodec **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Hdacodec(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.h new file mode 100644 index 0000000..39aebb1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.h @@ -0,0 +1,229 @@ +#ifndef _G_HDA_CODEC_API_NVOC_H_ +#define _G_HDA_CODEC_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_hda_codec_api_nvoc.h" + +#ifndef HDA_CODEC_API_H +#define HDA_CODEC_API_H + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_resource.h" +#include "ctrl/ctrl90ec.h" +#include "gpu/gpu_resource.h" + +#ifdef NVOC_HDA_CODEC_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Hdacodec { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct Hdacodec *__nvoc_pbase_Hdacodec; + NvBool (*__hdacodecShareCallback__)(struct Hdacodec *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__hdacodecControl__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__hdacodecUnmap__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__hdacodecGetMemInterMapParams__)(struct Hdacodec *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__hdacodecGetMemoryMappingDescriptor__)(struct Hdacodec *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__hdacodecGetMapAddrSpace__)(struct Hdacodec *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__hdacodecGetInternalObjectHandle__)(struct Hdacodec *); + NV_STATUS (*__hdacodecControlFilter__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__hdacodecAddAdditionalDependants__)(struct RsClient *, struct Hdacodec *, RsResourceRef *); + NvU32 (*__hdacodecGetRefCount__)(struct Hdacodec *); + NV_STATUS (*__hdacodecCheckMemInterUnmap__)(struct Hdacodec *, NvBool); + NV_STATUS (*__hdacodecMapTo__)(struct Hdacodec *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__hdacodecControl_Prologue__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__hdacodecGetRegBaseOffsetAndSize__)(struct Hdacodec *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__hdacodecCanCopy__)(struct Hdacodec *); + NV_STATUS (*__hdacodecInternalControlForward__)(struct Hdacodec *, NvU32, void *, NvU32); + void (*__hdacodecPreDestruct__)(struct Hdacodec *); + NV_STATUS (*__hdacodecUnmapFrom__)(struct Hdacodec *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__hdacodecControl_Epilogue__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__hdacodecControlLookup__)(struct Hdacodec *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__hdacodecMap__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__hdacodecAccessCallback__)(struct Hdacodec *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_Hdacodec_TYPEDEF__ +#define __NVOC_CLASS_Hdacodec_TYPEDEF__ +typedef struct Hdacodec Hdacodec; +#endif /* __NVOC_CLASS_Hdacodec_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Hdacodec +#define __nvoc_class_id_Hdacodec 0xf59a20 +#endif /* __nvoc_class_id_Hdacodec */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec; + +#define __staticCast_Hdacodec(pThis) \ + ((pThis)->__nvoc_pbase_Hdacodec) + +#ifdef __nvoc_hda_codec_api_h_disabled +#define __dynamicCast_Hdacodec(pThis) ((Hdacodec*)NULL) +#else //__nvoc_hda_codec_api_h_disabled +#define __dynamicCast_Hdacodec(pThis) \ + ((Hdacodec*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Hdacodec))) +#endif //__nvoc_hda_codec_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Hdacodec(Hdacodec**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Hdacodec(Hdacodec**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_Hdacodec(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Hdacodec((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define hdacodecShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) hdacodecShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define hdacodecControl(pGpuResource, pCallContext, pParams) hdacodecControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define hdacodecUnmap(pGpuResource, pCallContext, pCpuMapping) hdacodecUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define hdacodecGetMemInterMapParams(pRmResource, pParams) hdacodecGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define hdacodecGetMemoryMappingDescriptor(pRmResource, ppMemDesc) hdacodecGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define hdacodecGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) hdacodecGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define hdacodecGetInternalObjectHandle(pGpuResource) hdacodecGetInternalObjectHandle_DISPATCH(pGpuResource) +#define hdacodecControlFilter(pResource, pCallContext, pParams) hdacodecControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define hdacodecAddAdditionalDependants(pClient, pResource, pReference) hdacodecAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define hdacodecGetRefCount(pResource) hdacodecGetRefCount_DISPATCH(pResource) +#define hdacodecCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) hdacodecCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define hdacodecMapTo(pResource, pParams) hdacodecMapTo_DISPATCH(pResource, pParams) +#define hdacodecControl_Prologue(pResource, pCallContext, pParams) hdacodecControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define hdacodecGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) hdacodecGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define hdacodecCanCopy(pResource) hdacodecCanCopy_DISPATCH(pResource) +#define hdacodecInternalControlForward(pGpuResource, command, pParams, size) hdacodecInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define hdacodecPreDestruct(pResource) hdacodecPreDestruct_DISPATCH(pResource) +#define hdacodecUnmapFrom(pResource, pParams) hdacodecUnmapFrom_DISPATCH(pResource, pParams) +#define hdacodecControl_Epilogue(pResource, pCallContext, pParams) hdacodecControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define hdacodecControlLookup(pResource, pParams, ppEntry) hdacodecControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define hdacodecMap(pGpuResource, pCallContext, pParams, pCpuMapping) hdacodecMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define hdacodecAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) hdacodecAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool hdacodecShareCallback_DISPATCH(struct Hdacodec *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__hdacodecShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS hdacodecControl_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__hdacodecControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS hdacodecUnmap_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__hdacodecUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS hdacodecGetMemInterMapParams_DISPATCH(struct Hdacodec *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__hdacodecGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS hdacodecGetMemoryMappingDescriptor_DISPATCH(struct Hdacodec *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__hdacodecGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS hdacodecGetMapAddrSpace_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__hdacodecGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle hdacodecGetInternalObjectHandle_DISPATCH(struct Hdacodec *pGpuResource) { + return pGpuResource->__hdacodecGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS hdacodecControlFilter_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__hdacodecControlFilter__(pResource, pCallContext, pParams); +} + +static inline void hdacodecAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Hdacodec *pResource, RsResourceRef *pReference) { + pResource->__hdacodecAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 hdacodecGetRefCount_DISPATCH(struct Hdacodec *pResource) { + return pResource->__hdacodecGetRefCount__(pResource); +} + +static inline NV_STATUS hdacodecCheckMemInterUnmap_DISPATCH(struct Hdacodec *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__hdacodecCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS hdacodecMapTo_DISPATCH(struct Hdacodec *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__hdacodecMapTo__(pResource, pParams); +} + +static inline NV_STATUS hdacodecControl_Prologue_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__hdacodecControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS hdacodecGetRegBaseOffsetAndSize_DISPATCH(struct Hdacodec *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__hdacodecGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool hdacodecCanCopy_DISPATCH(struct Hdacodec *pResource) { + return pResource->__hdacodecCanCopy__(pResource); +} + +static inline NV_STATUS hdacodecInternalControlForward_DISPATCH(struct Hdacodec *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__hdacodecInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void hdacodecPreDestruct_DISPATCH(struct Hdacodec *pResource) { + pResource->__hdacodecPreDestruct__(pResource); +} + +static inline NV_STATUS hdacodecUnmapFrom_DISPATCH(struct Hdacodec *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__hdacodecUnmapFrom__(pResource, pParams); +} + +static inline void hdacodecControl_Epilogue_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__hdacodecControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS hdacodecControlLookup_DISPATCH(struct Hdacodec *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__hdacodecControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS hdacodecMap_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__hdacodecMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool hdacodecAccessCallback_DISPATCH(struct Hdacodec *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__hdacodecAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS hdacodecConstruct_IMPL(struct Hdacodec *arg_pHdacodecApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_hdacodecConstruct(arg_pHdacodecApi, arg_pCallContext, arg_pParams) hdacodecConstruct_IMPL(arg_pHdacodecApi, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_HDA_CODEC_API_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hypervisor_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hypervisor_nvoc.h new file mode 100644 index 0000000..d92619f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hypervisor_nvoc.h @@ -0,0 +1,151 @@ +#ifndef _G_HYPERVISOR_NVOC_H_ +#define _G_HYPERVISOR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_hypervisor_nvoc.h" + +#ifndef HYPERVISOR_H +#define HYPERVISOR_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: hypervisor.h * +* Defines and structures used for the hypervisor object. * +\***************************************************************************/ + +#include "core/core.h" +#include "nvoc/utility.h" +#include "nv-hypervisor.h" +#include "mem_mgr/mem.h" + +/* ------------------------ Forward Declarations ---------------------------- */ +struct OBJOS; + +#ifndef __NVOC_CLASS_OBJOS_TYPEDEF__ +#define __NVOC_CLASS_OBJOS_TYPEDEF__ +typedef struct OBJOS OBJOS; +#endif /* __NVOC_CLASS_OBJOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOS +#define __nvoc_class_id_OBJOS 0xaa1d70 +#endif /* __nvoc_class_id_OBJOS */ + + + +typedef struct OBJHYPERVISOR *POBJHYPERVISOR; + +#ifndef __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +#define __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +typedef struct OBJHYPERVISOR OBJHYPERVISOR; +#endif /* __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHYPERVISOR +#define __nvoc_class_id_OBJHYPERVISOR 0x33c1ba +#endif /* __nvoc_class_id_OBJHYPERVISOR */ + + +typedef struct HOST_VGPU_DEVICE HOST_VGPU_DEVICE; + +#ifdef NVOC_HYPERVISOR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJHYPERVISOR { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJHYPERVISOR *__nvoc_pbase_OBJHYPERVISOR; + NvBool bDetected; + NvBool bIsHVMGuest; + HYPERVISOR_TYPE type; + NvBool bIsHypervHost; + NvBool bIsHypervVgpuSupported; +}; + +#ifndef __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +#define __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +typedef struct OBJHYPERVISOR OBJHYPERVISOR; +#endif /* __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHYPERVISOR +#define __nvoc_class_id_OBJHYPERVISOR 0x33c1ba +#endif /* __nvoc_class_id_OBJHYPERVISOR */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHYPERVISOR; + +#define __staticCast_OBJHYPERVISOR(pThis) \ + ((pThis)->__nvoc_pbase_OBJHYPERVISOR) + +#ifdef __nvoc_hypervisor_h_disabled +#define __dynamicCast_OBJHYPERVISOR(pThis) ((OBJHYPERVISOR*)NULL) +#else //__nvoc_hypervisor_h_disabled +#define __dynamicCast_OBJHYPERVISOR(pThis) \ + ((OBJHYPERVISOR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJHYPERVISOR))) +#endif //__nvoc_hypervisor_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJHYPERVISOR(OBJHYPERVISOR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJHYPERVISOR(OBJHYPERVISOR**, Dynamic*, NvU32); +#define __objCreate_OBJHYPERVISOR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJHYPERVISOR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +static inline NvBool hypervisorIsVgxHyper_491d52(void) { + return ((NvBool)(0 != 0)); +} + +#define hypervisorIsVgxHyper() hypervisorIsVgxHyper_491d52() +#define hypervisorIsVgxHyper_HAL() hypervisorIsVgxHyper() + +static inline NvBool hypervisorCheckForAdminAccess(NvHandle hClient, NvU32 rmCtrlId) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool hypervisorCheckForObjectAccess(NvHandle hClient) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool hypervisorCheckForGspOffloadAccess(NvU32 rmCtrlId) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool hypervisorIsType(HYPERVISOR_TYPE hyperType) { + return ((NvBool)(0 != 0)); +} + +#undef PRIVATE_FIELD + + +#endif // HYPERVISOR_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_HYPERVISOR_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.c new file mode 100644 index 0000000..67f0241 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.c @@ -0,0 +1,235 @@ +#define NVOC_IO_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_io_vaspace_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x28ed9c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE; + +void __nvoc_init_OBJIOVASPACE(OBJIOVASPACE*); +void __nvoc_init_funcTable_OBJIOVASPACE(OBJIOVASPACE*); +NV_STATUS __nvoc_ctor_OBJIOVASPACE(OBJIOVASPACE*); +void __nvoc_init_dataField_OBJIOVASPACE(OBJIOVASPACE*); +void __nvoc_dtor_OBJIOVASPACE(OBJIOVASPACE*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJIOVASPACE; + +static const struct NVOC_RTTI __nvoc_rtti_OBJIOVASPACE_OBJIOVASPACE = { + /*pClassDef=*/ &__nvoc_class_def_OBJIOVASPACE, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJIOVASPACE, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJIOVASPACE_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJIOVASPACE_OBJVASPACE = { + /*pClassDef=*/ &__nvoc_class_def_OBJVASPACE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJIOVASPACE = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_OBJIOVASPACE_OBJIOVASPACE, + &__nvoc_rtti_OBJIOVASPACE_OBJVASPACE, + &__nvoc_rtti_OBJIOVASPACE_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJIOVASPACE), + /*classId=*/ classId(OBJIOVASPACE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJIOVASPACE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJIOVASPACE, + /*pCastInfo=*/ &__nvoc_castinfo_OBJIOVASPACE, + /*pExportInfo=*/ &__nvoc_export_info_OBJIOVASPACE +}; + +static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceConstruct_(struct OBJVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) { + return iovaspaceConstruct_((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); +} + +static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceAlloc(struct OBJVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) { + return iovaspaceAlloc((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr); +} + +static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceFree(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return iovaspaceFree((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), vAddr); +} + +static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceApplyDefaultAlignment(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) { + return iovaspaceApplyDefaultAlignment((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pAllocInfo, pAlign, pSize, pPageSizeLockMask); +} + +static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceIncAllocRefCnt(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return iovaspaceIncAllocRefCnt((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), vAddr); +} + +static NvU64 __nvoc_thunk_OBJIOVASPACE_vaspaceGetVaStart(struct OBJVASPACE *pVAS) { + return iovaspaceGetVaStart((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +static NvU64 __nvoc_thunk_OBJIOVASPACE_vaspaceGetVaLimit(struct OBJVASPACE *pVAS) { + return iovaspaceGetVaLimit((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceGetVasInfo(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return iovaspaceGetVasInfo((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pParams); +} + +static NvBool __nvoc_thunk_OBJVASPACE_iovaspaceIsInternalVaRestricted(struct OBJIOVASPACE *pVAS) { + return vaspaceIsInternalVaRestricted((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +static NvU32 __nvoc_thunk_OBJVASPACE_iovaspaceGetFlags(struct OBJIOVASPACE *pVAS) { + return vaspaceGetFlags((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJIOVASPACE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJVASPACE(OBJVASPACE*); +void __nvoc_dtor_OBJIOVASPACE(OBJIOVASPACE *pThis) { + __nvoc_iovaspaceDestruct(pThis); + __nvoc_dtor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJIOVASPACE(OBJIOVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJVASPACE(OBJVASPACE* ); +NV_STATUS __nvoc_ctor_OBJIOVASPACE(OBJIOVASPACE *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + if (status != NV_OK) goto __nvoc_ctor_OBJIOVASPACE_fail_OBJVASPACE; + __nvoc_init_dataField_OBJIOVASPACE(pThis); + goto __nvoc_ctor_OBJIOVASPACE_exit; // Success + +__nvoc_ctor_OBJIOVASPACE_fail_OBJVASPACE: +__nvoc_ctor_OBJIOVASPACE_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJIOVASPACE_1(OBJIOVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__iovaspaceConstruct___ = &iovaspaceConstruct__IMPL; + + pThis->__iovaspaceAlloc__ = &iovaspaceAlloc_IMPL; + + pThis->__iovaspaceFree__ = &iovaspaceFree_IMPL; + + pThis->__iovaspaceApplyDefaultAlignment__ = &iovaspaceApplyDefaultAlignment_IMPL; + + pThis->__iovaspaceIncAllocRefCnt__ = &iovaspaceIncAllocRefCnt_IMPL; + + pThis->__iovaspaceGetVaStart__ = &iovaspaceGetVaStart_IMPL; + + pThis->__iovaspaceGetVaLimit__ = &iovaspaceGetVaLimit_IMPL; + + pThis->__iovaspaceGetVasInfo__ = &iovaspaceGetVasInfo_IMPL; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceConstruct___ = &__nvoc_thunk_OBJIOVASPACE_vaspaceConstruct_; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceAlloc__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceAlloc; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceFree__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceFree; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceApplyDefaultAlignment__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceApplyDefaultAlignment; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceIncAllocRefCnt__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceIncAllocRefCnt; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVaStart__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceGetVaStart; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVaLimit__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceGetVaLimit; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVasInfo__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceGetVasInfo; + + pThis->__iovaspaceIsInternalVaRestricted__ = &__nvoc_thunk_OBJVASPACE_iovaspaceIsInternalVaRestricted; + + pThis->__iovaspaceGetFlags__ = &__nvoc_thunk_OBJVASPACE_iovaspaceGetFlags; +} + +void __nvoc_init_funcTable_OBJIOVASPACE(OBJIOVASPACE *pThis) { + __nvoc_init_funcTable_OBJIOVASPACE_1(pThis); +} + +void __nvoc_init_OBJVASPACE(OBJVASPACE*); +void __nvoc_init_OBJIOVASPACE(OBJIOVASPACE *pThis) { + pThis->__nvoc_pbase_OBJIOVASPACE = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJVASPACE = &pThis->__nvoc_base_OBJVASPACE; + __nvoc_init_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + __nvoc_init_funcTable_OBJIOVASPACE(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJIOVASPACE(OBJIOVASPACE **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJIOVASPACE *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJIOVASPACE)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJIOVASPACE)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJIOVASPACE); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJIOVASPACE(pThis); + status = __nvoc_ctor_OBJIOVASPACE(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJIOVASPACE_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJIOVASPACE_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJIOVASPACE(OBJIOVASPACE **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJIOVASPACE(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.h new file mode 100644 index 0000000..16b86e6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.h @@ -0,0 +1,303 @@ +#ifndef _G_IO_VASPACE_NVOC_H_ +#define _G_IO_VASPACE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_io_vaspace_nvoc.h" + +#ifndef _IOVASPACE_H_ +#define _IOVASPACE_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: IOVASPACE.H * +* Defines and structures used for IOMMU Virtual Address Space Object. * +\***************************************************************************/ + +#include "mem_mgr/vaspace.h" // base class object header + +#define NV_IOVA_DOMAIN_NONE (~(NvU32)0) + +typedef struct OBJIOVASPACE *POBJIOVASPACE; + +#ifndef __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ +typedef struct OBJIOVASPACE OBJIOVASPACE; +#endif /* __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJIOVASPACE +#define __nvoc_class_id_OBJIOVASPACE 0x28ed9c +#endif /* __nvoc_class_id_OBJIOVASPACE */ + + + +typedef struct IOVAMAPPING IOVAMAPPING; +typedef struct IOVAMAPPING *PIOVAMAPPING; + +// Opaque pointer for the OS layer to use +typedef struct OS_IOVA_MAPPING_DATA *POS_IOVA_MAPPING_DATA; + +struct IOVAMAPPING +{ + NvU32 iovaspaceId; + + // + // Refcount of the mapping. + // + // Each iovaspaceAcquireMapping() call increments the refcount, and each + // iovaspaceReleaseMapping() call decrements it. Additionally, submappings + // increment the refcount of their root mapping on creation and only + // decrement it when they are destroyed. + // + // Mappings are destroyed when their refcount reaches 0. + // + // Notably a mapping can be destroyed regardless of its refcount with + // iovaspaceDestroyMapping(). Destroying a root mapping destroys all of its + // submappings as well. + // + NvU32 refcount; + + PMEMORY_DESCRIPTOR pPhysMemDesc; + + // + // Maintain a hierarchy of IOVA mappings. The "root" mapping will generally + // be tied to the root memory descriptor. That mapping can have submappings + // within the same IOVA space that correspond to submemory descriptors of + // the root memory descriptor. + // + // Also, the root memory descriptor may have multiple IOVA mappings (up to + // one per IOVA space), so those need to be tracked in association directly + // with the root memory descriptor. + // + // The memory descriptor (root or submemory) always points to a single IOVA + // mapping. For root memory descriptors, that mapping is the head of a list + // in which each mapping covers a unique IOVA space. For submemory + // descriptors, there can only be one IOVA mapping, corresponding to the + // IOVA space of the pGpu associated with the submemory descriptor. + // + union + { + struct IOVAMAPPING *pParent; + struct IOVAMAPPING *pChildren; + } link; + + // + // For root mappings, this points to the next root mapping for the same + // parent physical memory descriptor (e.g., a root mapping for a different + // IOVA space). + // + // For submappings, this instead points to the next submapping of the + // parent root mapping, since a submemory descriptor may only have a single + // IOVA mapping (which is a submapping of an IOVA mapping on the root + // memory descriptor). + // + struct IOVAMAPPING *pNext; + + // OS data associated with this mapping. Core RM doesn't touch this. + POS_IOVA_MAPPING_DATA pOsData; + + // + // If the memory is contiguous, this array consists of one element. + // If the memory is discontiguous, this array is actually larger and has + // one entry for each physical page in pPhysMemDesc. As a result, this + // structure must be allocated from the heap. + // + RmPhysAddr iovaArray[1]; + // WARNING: DO NOT place anything behind the IOVA array! +}; + +/*! + * Virtual address space for a system's IOMMU translation. + */ +#ifdef NVOC_IO_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJIOVASPACE { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJVASPACE __nvoc_base_OBJVASPACE; + struct Object *__nvoc_pbase_Object; + struct OBJVASPACE *__nvoc_pbase_OBJVASPACE; + struct OBJIOVASPACE *__nvoc_pbase_OBJIOVASPACE; + NV_STATUS (*__iovaspaceConstruct___)(struct OBJIOVASPACE *, NvU32, NvU32, NvU64, NvU64, NvU64, NvU64, NvU32); + NV_STATUS (*__iovaspaceAlloc__)(struct OBJIOVASPACE *, NvU64, NvU64, NvU64, NvU64, NvU64, VAS_ALLOC_FLAGS, NvU64 *); + NV_STATUS (*__iovaspaceFree__)(struct OBJIOVASPACE *, NvU64); + NV_STATUS (*__iovaspaceApplyDefaultAlignment__)(struct OBJIOVASPACE *, const FB_ALLOC_INFO *, NvU64 *, NvU64 *, NvU64 *); + NV_STATUS (*__iovaspaceIncAllocRefCnt__)(struct OBJIOVASPACE *, NvU64); + NvU64 (*__iovaspaceGetVaStart__)(struct OBJIOVASPACE *); + NvU64 (*__iovaspaceGetVaLimit__)(struct OBJIOVASPACE *); + NV_STATUS (*__iovaspaceGetVasInfo__)(struct OBJIOVASPACE *, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *); + NvBool (*__iovaspaceIsInternalVaRestricted__)(struct OBJIOVASPACE *); + NvU32 (*__iovaspaceGetFlags__)(struct OBJIOVASPACE *); + NvU64 mappingCount; +}; + +#ifndef __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ +typedef struct OBJIOVASPACE OBJIOVASPACE; +#endif /* __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJIOVASPACE +#define __nvoc_class_id_OBJIOVASPACE 0x28ed9c +#endif /* __nvoc_class_id_OBJIOVASPACE */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE; + +#define __staticCast_OBJIOVASPACE(pThis) \ + ((pThis)->__nvoc_pbase_OBJIOVASPACE) + +#ifdef __nvoc_io_vaspace_h_disabled +#define __dynamicCast_OBJIOVASPACE(pThis) ((OBJIOVASPACE*)NULL) +#else //__nvoc_io_vaspace_h_disabled +#define __dynamicCast_OBJIOVASPACE(pThis) \ + ((OBJIOVASPACE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJIOVASPACE))) +#endif //__nvoc_io_vaspace_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJIOVASPACE(OBJIOVASPACE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJIOVASPACE(OBJIOVASPACE**, Dynamic*, NvU32); +#define __objCreate_OBJIOVASPACE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJIOVASPACE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define iovaspaceConstruct_(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) iovaspaceConstruct__DISPATCH(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) +#define iovaspaceAlloc(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) iovaspaceAlloc_DISPATCH(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) +#define iovaspaceFree(pVAS, vAddr) iovaspaceFree_DISPATCH(pVAS, vAddr) +#define iovaspaceApplyDefaultAlignment(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) iovaspaceApplyDefaultAlignment_DISPATCH(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) +#define iovaspaceIncAllocRefCnt(pVAS, vAddr) iovaspaceIncAllocRefCnt_DISPATCH(pVAS, vAddr) +#define iovaspaceGetVaStart(pVAS) iovaspaceGetVaStart_DISPATCH(pVAS) +#define iovaspaceGetVaLimit(pVAS) iovaspaceGetVaLimit_DISPATCH(pVAS) +#define iovaspaceGetVasInfo(pVAS, pParams) iovaspaceGetVasInfo_DISPATCH(pVAS, pParams) +#define iovaspaceIsInternalVaRestricted(pVAS) iovaspaceIsInternalVaRestricted_DISPATCH(pVAS) +#define iovaspaceGetFlags(pVAS) iovaspaceGetFlags_DISPATCH(pVAS) +NV_STATUS iovaspaceConstruct__IMPL(struct OBJIOVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags); + +static inline NV_STATUS iovaspaceConstruct__DISPATCH(struct OBJIOVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) { + return pVAS->__iovaspaceConstruct___(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); +} + +NV_STATUS iovaspaceAlloc_IMPL(struct OBJIOVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr); + +static inline NV_STATUS iovaspaceAlloc_DISPATCH(struct OBJIOVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) { + return pVAS->__iovaspaceAlloc__(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr); +} + +NV_STATUS iovaspaceFree_IMPL(struct OBJIOVASPACE *pVAS, NvU64 vAddr); + +static inline NV_STATUS iovaspaceFree_DISPATCH(struct OBJIOVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__iovaspaceFree__(pVAS, vAddr); +} + +NV_STATUS iovaspaceApplyDefaultAlignment_IMPL(struct OBJIOVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask); + +static inline NV_STATUS iovaspaceApplyDefaultAlignment_DISPATCH(struct OBJIOVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) { + return pVAS->__iovaspaceApplyDefaultAlignment__(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask); +} + +NV_STATUS iovaspaceIncAllocRefCnt_IMPL(struct OBJIOVASPACE *pVAS, NvU64 vAddr); + +static inline NV_STATUS iovaspaceIncAllocRefCnt_DISPATCH(struct OBJIOVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__iovaspaceIncAllocRefCnt__(pVAS, vAddr); +} + +NvU64 iovaspaceGetVaStart_IMPL(struct OBJIOVASPACE *pVAS); + +static inline NvU64 iovaspaceGetVaStart_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceGetVaStart__(pVAS); +} + +NvU64 iovaspaceGetVaLimit_IMPL(struct OBJIOVASPACE *pVAS); + +static inline NvU64 iovaspaceGetVaLimit_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceGetVaLimit__(pVAS); +} + +NV_STATUS iovaspaceGetVasInfo_IMPL(struct OBJIOVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams); + +static inline NV_STATUS iovaspaceGetVasInfo_DISPATCH(struct OBJIOVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return pVAS->__iovaspaceGetVasInfo__(pVAS, pParams); +} + +static inline NvBool iovaspaceIsInternalVaRestricted_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceIsInternalVaRestricted__(pVAS); +} + +static inline NvU32 iovaspaceGetFlags_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceGetFlags__(pVAS); +} + +void iovaspaceDestruct_IMPL(struct OBJIOVASPACE *pIOVAS); +#define __nvoc_iovaspaceDestruct(pIOVAS) iovaspaceDestruct_IMPL(pIOVAS) +NV_STATUS iovaspaceAcquireMapping_IMPL(struct OBJIOVASPACE *pIOVAS, PMEMORY_DESCRIPTOR pIovaMapping); +#ifdef __nvoc_io_vaspace_h_disabled +static inline NV_STATUS iovaspaceAcquireMapping(struct OBJIOVASPACE *pIOVAS, PMEMORY_DESCRIPTOR pIovaMapping) { + NV_ASSERT_FAILED_PRECOMP("OBJIOVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_io_vaspace_h_disabled +#define iovaspaceAcquireMapping(pIOVAS, pIovaMapping) iovaspaceAcquireMapping_IMPL(pIOVAS, pIovaMapping) +#endif //__nvoc_io_vaspace_h_disabled + +void iovaspaceReleaseMapping_IMPL(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping); +#ifdef __nvoc_io_vaspace_h_disabled +static inline void iovaspaceReleaseMapping(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping) { + NV_ASSERT_FAILED_PRECOMP("OBJIOVASPACE was disabled!"); +} +#else //__nvoc_io_vaspace_h_disabled +#define iovaspaceReleaseMapping(pIOVAS, pIovaMapping) iovaspaceReleaseMapping_IMPL(pIOVAS, pIovaMapping) +#endif //__nvoc_io_vaspace_h_disabled + +void iovaspaceDestroyMapping_IMPL(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping); +#ifdef __nvoc_io_vaspace_h_disabled +static inline void iovaspaceDestroyMapping(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping) { + NV_ASSERT_FAILED_PRECOMP("OBJIOVASPACE was disabled!"); +} +#else //__nvoc_io_vaspace_h_disabled +#define iovaspaceDestroyMapping(pIOVAS, pIovaMapping) iovaspaceDestroyMapping_IMPL(pIOVAS, pIovaMapping) +#endif //__nvoc_io_vaspace_h_disabled + +#undef PRIVATE_FIELD + + +struct OBJIOVASPACE* iovaspaceFromId(NvU32 iovaspaceId); +struct OBJIOVASPACE* iovaspaceFromMapping(PIOVAMAPPING pIovaMapping); + +// +// Helper that looks up the IOVAS from the mapping and then calls +// iovaspaceDestroyMapping(). +// +void iovaMappingDestroy(PIOVAMAPPING pIovaMapping); + +#endif // _IOVASPACE_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_IO_VASPACE_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_journal_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_journal_nvoc.h new file mode 100644 index 0000000..af7da93 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_journal_nvoc.h @@ -0,0 +1,47 @@ +#ifndef _G_JOURNAL_NVOC_H_ +#define _G_JOURNAL_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_journal_nvoc.h" + +#ifndef _JOURNAL_H_ +#define _JOURNAL_H_ + +// +// Journal object defines and Structures +// + +#include "kernel/core/core.h" + +#endif // _JOURNAL_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_JOURNAL_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.c new file mode 100644 index 0000000..8942517 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.c @@ -0,0 +1,346 @@ +#define NVOC_KERN_DISP_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kern_disp_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x55952e = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_KernelDisplay(KernelDisplay*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelDisplay(KernelDisplay*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelDisplay(KernelDisplay*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelDisplay(KernelDisplay*, RmHalspecOwner* ); +void __nvoc_dtor_KernelDisplay(KernelDisplay*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelDisplay; + +static const struct NVOC_RTTI __nvoc_rtti_KernelDisplay_KernelDisplay = { + /*pClassDef=*/ &__nvoc_class_def_KernelDisplay, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelDisplay, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelDisplay_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelDisplay_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelDisplay = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelDisplay_KernelDisplay, + &__nvoc_rtti_KernelDisplay_OBJENGSTATE, + &__nvoc_rtti_KernelDisplay_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelDisplay), + /*classId=*/ classId(KernelDisplay), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelDisplay", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelDisplay, + /*pCastInfo=*/ &__nvoc_castinfo_KernelDisplay, + /*pExportInfo=*/ &__nvoc_export_info_KernelDisplay +}; + +static NV_STATUS __nvoc_thunk_KernelDisplay_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, ENGDESCRIPTOR engDesc) { + return kdispConstructEngine(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), engDesc); +} + +static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStatePreInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) { + return kdispStatePreInitLocked(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) { + return kdispStateInitLocked(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_KernelDisplay_engstateStateDestroy(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) { + kdispStateDestroy(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStateLoad(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, NvU32 flags) { + return kdispStateLoad(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), flags); +} + +static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStateUnload(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, NvU32 flags) { + return kdispStateUnload(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), flags); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispReconcileTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePreLoad(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePostUnload(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePreUnload(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStateInitUnlocked(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kdispInitMissing(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePreInitUnlocked(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispGetTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispCompareTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kdispFreeTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePostLoad(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispAllocTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispSetTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kdispIsPresent(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelDisplay = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelDisplay(KernelDisplay *pThis) { + __nvoc_kdispDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); + + // NVOC Property Hal field -- PDB_PROP_KDISP_IS_MISSING + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->setProperty(pThis, PDB_PROP_KDISP_IS_MISSING, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KDISP_IMP_ENABLE + if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */ + { + pThis->setProperty(pThis, PDB_PROP_KDISP_IMP_ENABLE, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KDISP_IMP_ENABLE, ((NvBool)(0 != 0))); + } + + pThis->pStaticInfo = ((void *)0); + + pThis->bWarPurgeSatellitesOnCoreFree = ((NvBool)(0 != 0)); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelDisplay_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelDisplay(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelDisplay_exit; // Success + +__nvoc_ctor_KernelDisplay_fail_OBJENGSTATE: +__nvoc_ctor_KernelDisplay_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelDisplay_1(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); + + pThis->__kdispConstructEngine__ = &kdispConstructEngine_IMPL; + + pThis->__kdispStatePreInitLocked__ = &kdispStatePreInitLocked_IMPL; + + pThis->__kdispStateInitLocked__ = &kdispStateInitLocked_IMPL; + + pThis->__kdispStateDestroy__ = &kdispStateDestroy_IMPL; + + pThis->__kdispStateLoad__ = &kdispStateLoad_IMPL; + + pThis->__kdispStateUnload__ = &kdispStateUnload_IMPL; + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelDisplay_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__ = &__nvoc_thunk_KernelDisplay_engstateStatePreInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelDisplay_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelDisplay_engstateStateDestroy; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelDisplay_engstateStateLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelDisplay_engstateStateUnload; + + pThis->__kdispReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispReconcileTunableState; + + pThis->__kdispStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePreLoad; + + pThis->__kdispStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePostUnload; + + pThis->__kdispStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePreUnload; + + pThis->__kdispStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kdispStateInitUnlocked; + + pThis->__kdispInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kdispInitMissing; + + pThis->__kdispStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePreInitUnlocked; + + pThis->__kdispGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispGetTunableState; + + pThis->__kdispCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispCompareTunableState; + + pThis->__kdispFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispFreeTunableState; + + pThis->__kdispStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePostLoad; + + pThis->__kdispAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispAllocTunableState; + + pThis->__kdispSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispSetTunableState; + + pThis->__kdispIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kdispIsPresent; +} + +void __nvoc_init_funcTable_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelDisplay_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelDisplay = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_KernelDisplay(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelDisplay(KernelDisplay **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelDisplay *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelDisplay)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelDisplay)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelDisplay); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelDisplay(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelDisplay(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelDisplay_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelDisplay_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelDisplay(KernelDisplay **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelDisplay(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.h new file mode 100644 index 0000000..d42b685 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.h @@ -0,0 +1,642 @@ +#ifndef _G_KERN_DISP_NVOC_H_ +#define _G_KERN_DISP_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kern_disp_nvoc.h" + +#ifndef KERN_DISP_H +#define KERN_DISP_H + +/****************************************************************************** +* +* Kernel Display module header +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#include "gpu/eng_state.h" +#include "gpu/gpu_halspec.h" +#include "gpu/disp/kern_disp_type.h" +#include "gpu/disp/kern_disp_max.h" +#include "gpu/mem_mgr/context_dma.h" +#include "gpu/disp/vblank_callback/vblank.h" + +#include "ctrl/ctrl2080/ctrl2080internal.h" + +typedef NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS KernelDisplayStaticInfo; + +struct DispChannel; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + + +struct RgLineCallback; + +#ifndef __NVOC_CLASS_RgLineCallback_TYPEDEF__ +#define __NVOC_CLASS_RgLineCallback_TYPEDEF__ +typedef struct RgLineCallback RgLineCallback; +#endif /* __NVOC_CLASS_RgLineCallback_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RgLineCallback +#define __nvoc_class_id_RgLineCallback 0xa3ff1c +#endif /* __nvoc_class_id_RgLineCallback */ + + + +#define KDISP_GET_HEAD(pKernelDisplay, headID) (RMCFG_MODULE_KERNEL_HEAD ? kdispGetHead(pKernelDisplay, headID) : NULL) + +/*! + * KernelDisp is a logical abstraction of the GPU Display Engine. The + * Public API of the Display Engine is exposed through this object, and any + * interfaces which do not manage the underlying Display hardware can be + * managed by this object. + */ +#ifdef NVOC_KERN_DISP_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelDisplay { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelDisplay *__nvoc_pbase_KernelDisplay; + NV_STATUS (*__kdispConstructEngine__)(struct OBJGPU *, struct KernelDisplay *, ENGDESCRIPTOR); + NV_STATUS (*__kdispStatePreInitLocked__)(struct OBJGPU *, struct KernelDisplay *); + NV_STATUS (*__kdispStateInitLocked__)(struct OBJGPU *, struct KernelDisplay *); + void (*__kdispStateDestroy__)(struct OBJGPU *, struct KernelDisplay *); + NV_STATUS (*__kdispStateLoad__)(struct OBJGPU *, struct KernelDisplay *, NvU32); + NV_STATUS (*__kdispStateUnload__)(struct OBJGPU *, struct KernelDisplay *, NvU32); + NV_STATUS (*__kdispReconcileTunableState__)(POBJGPU, struct KernelDisplay *, void *); + NV_STATUS (*__kdispStatePreLoad__)(POBJGPU, struct KernelDisplay *, NvU32); + NV_STATUS (*__kdispStatePostUnload__)(POBJGPU, struct KernelDisplay *, NvU32); + NV_STATUS (*__kdispStatePreUnload__)(POBJGPU, struct KernelDisplay *, NvU32); + NV_STATUS (*__kdispStateInitUnlocked__)(POBJGPU, struct KernelDisplay *); + void (*__kdispInitMissing__)(POBJGPU, struct KernelDisplay *); + NV_STATUS (*__kdispStatePreInitUnlocked__)(POBJGPU, struct KernelDisplay *); + NV_STATUS (*__kdispGetTunableState__)(POBJGPU, struct KernelDisplay *, void *); + NV_STATUS (*__kdispCompareTunableState__)(POBJGPU, struct KernelDisplay *, void *, void *); + void (*__kdispFreeTunableState__)(POBJGPU, struct KernelDisplay *, void *); + NV_STATUS (*__kdispStatePostLoad__)(POBJGPU, struct KernelDisplay *, NvU32); + NV_STATUS (*__kdispAllocTunableState__)(POBJGPU, struct KernelDisplay *, void **); + NV_STATUS (*__kdispSetTunableState__)(POBJGPU, struct KernelDisplay *, void *); + NvBool (*__kdispIsPresent__)(POBJGPU, struct KernelDisplay *); + NvBool PDB_PROP_KDISP_IMP_ENABLE; + struct DisplayInstanceMemory *pInst; + struct KernelHead *pKernelHead[4]; + const KernelDisplayStaticInfo *pStaticInfo; + NvBool bWarPurgeSatellitesOnCoreFree; + struct RgLineCallback *rgLineCallbackPerHead[4][2]; + NvU32 isrVblankHeads; +}; + +#ifndef __NVOC_CLASS_KernelDisplay_TYPEDEF__ +#define __NVOC_CLASS_KernelDisplay_TYPEDEF__ +typedef struct KernelDisplay KernelDisplay; +#endif /* __NVOC_CLASS_KernelDisplay_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelDisplay +#define __nvoc_class_id_KernelDisplay 0x55952e +#endif /* __nvoc_class_id_KernelDisplay */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay; + +#define __staticCast_KernelDisplay(pThis) \ + ((pThis)->__nvoc_pbase_KernelDisplay) + +#ifdef __nvoc_kern_disp_h_disabled +#define __dynamicCast_KernelDisplay(pThis) ((KernelDisplay*)NULL) +#else //__nvoc_kern_disp_h_disabled +#define __dynamicCast_KernelDisplay(pThis) \ + ((KernelDisplay*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelDisplay))) +#endif //__nvoc_kern_disp_h_disabled + +#define PDB_PROP_KDISP_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KDISP_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING +#define PDB_PROP_KDISP_IMP_ENABLE_BASE_CAST +#define PDB_PROP_KDISP_IMP_ENABLE_BASE_NAME PDB_PROP_KDISP_IMP_ENABLE + +NV_STATUS __nvoc_objCreateDynamic_KernelDisplay(KernelDisplay**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelDisplay(KernelDisplay**, Dynamic*, NvU32); +#define __objCreate_KernelDisplay(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelDisplay((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kdispConstructEngine(pGpu, pKernelDisplay, engDesc) kdispConstructEngine_DISPATCH(pGpu, pKernelDisplay, engDesc) +#define kdispStatePreInitLocked(pGpu, pKernelDisplay) kdispStatePreInitLocked_DISPATCH(pGpu, pKernelDisplay) +#define kdispStateInitLocked(pGpu, pKernelDisplay) kdispStateInitLocked_DISPATCH(pGpu, pKernelDisplay) +#define kdispStateDestroy(pGpu, pKernelDisplay) kdispStateDestroy_DISPATCH(pGpu, pKernelDisplay) +#define kdispStateLoad(pGpu, pKernelDisplay, flags) kdispStateLoad_DISPATCH(pGpu, pKernelDisplay, flags) +#define kdispStateUnload(pGpu, pKernelDisplay, flags) kdispStateUnload_DISPATCH(pGpu, pKernelDisplay, flags) +#define kdispReconcileTunableState(pGpu, pEngstate, pTunableState) kdispReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kdispStatePreLoad(pGpu, pEngstate, arg0) kdispStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kdispStatePostUnload(pGpu, pEngstate, arg0) kdispStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kdispStatePreUnload(pGpu, pEngstate, arg0) kdispStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kdispStateInitUnlocked(pGpu, pEngstate) kdispStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kdispInitMissing(pGpu, pEngstate) kdispInitMissing_DISPATCH(pGpu, pEngstate) +#define kdispStatePreInitUnlocked(pGpu, pEngstate) kdispStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kdispGetTunableState(pGpu, pEngstate, pTunableState) kdispGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kdispCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kdispCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kdispFreeTunableState(pGpu, pEngstate, pTunableState) kdispFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kdispStatePostLoad(pGpu, pEngstate, arg0) kdispStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kdispAllocTunableState(pGpu, pEngstate, ppTunableState) kdispAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kdispSetTunableState(pGpu, pEngstate, pTunableState) kdispSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kdispIsPresent(pGpu, pEngstate) kdispIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS kdispConstructInstMem_IMPL(struct KernelDisplay *pKernelDisplay); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispConstructInstMem(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispConstructInstMem(pKernelDisplay) kdispConstructInstMem_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispConstructInstMem_HAL(pKernelDisplay) kdispConstructInstMem(pKernelDisplay) + +void kdispDestructInstMem_IMPL(struct KernelDisplay *pKernelDisplay); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispDestructInstMem(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispDestructInstMem(pKernelDisplay) kdispDestructInstMem_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispDestructInstMem_HAL(pKernelDisplay) kdispDestructInstMem(pKernelDisplay) + +NV_STATUS kdispSelectClass_v03_00_KERNEL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 swClass); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispSelectClass(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 swClass) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispSelectClass(pGpu, pKernelDisplay, swClass) kdispSelectClass_v03_00_KERNEL(pGpu, pKernelDisplay, swClass) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispSelectClass_HAL(pGpu, pKernelDisplay, swClass) kdispSelectClass(pGpu, pKernelDisplay, swClass) + +NvS32 kdispGetBaseOffset_v04_02(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvS32 kdispGetBaseOffset(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return 0; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetBaseOffset(pGpu, pKernelDisplay) kdispGetBaseOffset_v04_02(pGpu, pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetBaseOffset_HAL(pGpu, pKernelDisplay) kdispGetBaseOffset(pGpu, pKernelDisplay) + +NV_STATUS kdispGetChannelNum_v03_00(struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pChannelNum); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispGetChannelNum(struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pChannelNum) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetChannelNum(pKernelDisplay, channelClass, channelInstance, pChannelNum) kdispGetChannelNum_v03_00(pKernelDisplay, channelClass, channelInstance, pChannelNum) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetChannelNum_HAL(pKernelDisplay, channelClass, channelInstance, pChannelNum) kdispGetChannelNum(pKernelDisplay, channelClass, channelInstance, pChannelNum) + +void kdispGetDisplayCapsBaseAndSize_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispGetDisplayCapsBaseAndSize(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetDisplayCapsBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplayCapsBaseAndSize_v03_00(pGpu, pKernelDisplay, pOffset, pSize) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetDisplayCapsBaseAndSize_HAL(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplayCapsBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) + +void kdispGetDisplaySfUserBaseAndSize_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispGetDisplaySfUserBaseAndSize(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetDisplaySfUserBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplaySfUserBaseAndSize_v03_00(pGpu, pKernelDisplay, pOffset, pSize) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetDisplaySfUserBaseAndSize_HAL(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplaySfUserBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) + +NV_STATUS kdispGetDisplayChannelUserBaseAndSize_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pOffset, NvU32 *pSize); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispGetDisplayChannelUserBaseAndSize(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pOffset, NvU32 *pSize) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetDisplayChannelUserBaseAndSize(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) kdispGetDisplayChannelUserBaseAndSize_v03_00(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetDisplayChannelUserBaseAndSize_HAL(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) kdispGetDisplayChannelUserBaseAndSize(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) + +NV_STATUS kdispImportImpData_IMPL(struct KernelDisplay *pKernelDisplay); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispImportImpData(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispImportImpData(pKernelDisplay) kdispImportImpData_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispImportImpData_HAL(pKernelDisplay) kdispImportImpData(pKernelDisplay) + +NV_STATUS kdispArbAndAllocDisplayBandwidth_v04_02(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, enum DISPLAY_ICC_BW_CLIENT iccBwClient, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispArbAndAllocDisplayBandwidth(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, enum DISPLAY_ICC_BW_CLIENT iccBwClient, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispArbAndAllocDisplayBandwidth(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispArbAndAllocDisplayBandwidth_v04_02(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispArbAndAllocDisplayBandwidth_HAL(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispArbAndAllocDisplayBandwidth(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) + +NV_STATUS kdispSetPushBufferParamsToPhysical_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvHandle hObjectBuffer, struct ContextDma *pBufferContextDma, NvU32 hClass, NvU32 channelInstance, DISPCHNCLASS internalDispChnClass); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispSetPushBufferParamsToPhysical(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvHandle hObjectBuffer, struct ContextDma *pBufferContextDma, NvU32 hClass, NvU32 channelInstance, DISPCHNCLASS internalDispChnClass) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispSetPushBufferParamsToPhysical(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass) kdispSetPushBufferParamsToPhysical_IMPL(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispSetPushBufferParamsToPhysical_HAL(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass) kdispSetPushBufferParamsToPhysical(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass) + +static inline NV_STATUS kdispAcquireDispChannelHw_56cd7a(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvU32 channelInstance, NvHandle hObjectBuffer, NvU32 initialGetPutOffset, NvBool allowGrabWithinSameClient, NvBool connectPbAtGrab) { + return NV_OK; +} + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispAcquireDispChannelHw(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvU32 channelInstance, NvHandle hObjectBuffer, NvU32 initialGetPutOffset, NvBool allowGrabWithinSameClient, NvBool connectPbAtGrab) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispAcquireDispChannelHw(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) kdispAcquireDispChannelHw_56cd7a(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispAcquireDispChannelHw_HAL(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) kdispAcquireDispChannelHw(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) + +static inline NV_STATUS kdispReleaseDispChannelHw_56cd7a(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) { + return NV_OK; +} + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispReleaseDispChannelHw(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispReleaseDispChannelHw(pKernelDisplay, pDispChannel) kdispReleaseDispChannelHw_56cd7a(pKernelDisplay, pDispChannel) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispReleaseDispChannelHw_HAL(pKernelDisplay, pDispChannel) kdispReleaseDispChannelHw(pKernelDisplay, pDispChannel) + +NV_STATUS kdispMapDispChannel_IMPL(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispMapDispChannel(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispMapDispChannel(pKernelDisplay, pDispChannel) kdispMapDispChannel_IMPL(pKernelDisplay, pDispChannel) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispMapDispChannel_HAL(pKernelDisplay, pDispChannel) kdispMapDispChannel(pKernelDisplay, pDispChannel) + +void kdispUnbindUnmapDispChannel_IMPL(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispUnbindUnmapDispChannel(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispUnbindUnmapDispChannel(pKernelDisplay, pDispChannel) kdispUnbindUnmapDispChannel_IMPL(pKernelDisplay, pDispChannel) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispUnbindUnmapDispChannel_HAL(pKernelDisplay, pDispChannel) kdispUnbindUnmapDispChannel(pKernelDisplay, pDispChannel) + +NV_STATUS kdispRegisterRgLineCallback_IMPL(struct KernelDisplay *pKernelDisplay, struct RgLineCallback *pRgLineCallback, NvU32 head, NvU32 rgIntrLine, NvBool bEnable); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispRegisterRgLineCallback(struct KernelDisplay *pKernelDisplay, struct RgLineCallback *pRgLineCallback, NvU32 head, NvU32 rgIntrLine, NvBool bEnable) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispRegisterRgLineCallback(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) kdispRegisterRgLineCallback_IMPL(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispRegisterRgLineCallback_HAL(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) kdispRegisterRgLineCallback(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) + +void kdispInvokeRgLineCallback_KERNEL(struct KernelDisplay *pKernelDisplay, NvU32 head, NvU32 rgIntrLine, NvBool bIsIrqlIsr); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispInvokeRgLineCallback(struct KernelDisplay *pKernelDisplay, NvU32 head, NvU32 rgIntrLine, NvBool bIsIrqlIsr) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispInvokeRgLineCallback(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) kdispInvokeRgLineCallback_KERNEL(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispInvokeRgLineCallback_HAL(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) kdispInvokeRgLineCallback(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) + +void kdispServiceVblank_KERNEL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 arg0, NvU32 arg1, THREAD_STATE_NODE *arg2); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispServiceVblank(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 arg0, NvU32 arg1, THREAD_STATE_NODE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispServiceVblank(pGpu, pKernelDisplay, arg0, arg1, arg2) kdispServiceVblank_KERNEL(pGpu, pKernelDisplay, arg0, arg1, arg2) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispServiceVblank_HAL(pGpu, pKernelDisplay, arg0, arg1, arg2) kdispServiceVblank(pGpu, pKernelDisplay, arg0, arg1, arg2) + +NvU32 kdispReadPendingVblank_KERNEL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvU32 kdispReadPendingVblank(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return 0; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispReadPendingVblank(pGpu, pKernelDisplay, arg0) kdispReadPendingVblank_KERNEL(pGpu, pKernelDisplay, arg0) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, arg0) kdispReadPendingVblank(pGpu, pKernelDisplay, arg0) + +static inline NvBool kdispGetVgaWorkspaceBase_ceaee8(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU64 *pOffset) { + NV_ASSERT_PRECOMP(0); + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvBool kdispGetVgaWorkspaceBase(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU64 *pOffset) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetVgaWorkspaceBase(pGpu, pKernelDisplay, pOffset) kdispGetVgaWorkspaceBase_ceaee8(pGpu, pKernelDisplay, pOffset) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetVgaWorkspaceBase_HAL(pGpu, pKernelDisplay, pOffset) kdispGetVgaWorkspaceBase(pGpu, pKernelDisplay, pOffset) + +void kdispInvokeDisplayModesetCallback_KERNEL(struct KernelDisplay *pKernelDisplay, NvBool bModesetStart, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispInvokeDisplayModesetCallback(struct KernelDisplay *pKernelDisplay, NvBool bModesetStart, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispInvokeDisplayModesetCallback(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispInvokeDisplayModesetCallback_KERNEL(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispInvokeDisplayModesetCallback_HAL(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispInvokeDisplayModesetCallback(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) + +NV_STATUS kdispConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, ENGDESCRIPTOR engDesc); + +static inline NV_STATUS kdispConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, ENGDESCRIPTOR engDesc) { + return pKernelDisplay->__kdispConstructEngine__(pGpu, pKernelDisplay, engDesc); +} + +NV_STATUS kdispStatePreInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + +static inline NV_STATUS kdispStatePreInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + return pKernelDisplay->__kdispStatePreInitLocked__(pGpu, pKernelDisplay); +} + +NV_STATUS kdispStateInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + +static inline NV_STATUS kdispStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + return pKernelDisplay->__kdispStateInitLocked__(pGpu, pKernelDisplay); +} + +void kdispStateDestroy_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + +static inline void kdispStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + pKernelDisplay->__kdispStateDestroy__(pGpu, pKernelDisplay); +} + +NV_STATUS kdispStateLoad_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags); + +static inline NV_STATUS kdispStateLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags) { + return pKernelDisplay->__kdispStateLoad__(pGpu, pKernelDisplay, flags); +} + +NV_STATUS kdispStateUnload_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags); + +static inline NV_STATUS kdispStateUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags) { + return pKernelDisplay->__kdispStateUnload__(pGpu, pKernelDisplay, flags); +} + +static inline NV_STATUS kdispReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + return pEngstate->__kdispReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kdispStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return pEngstate->__kdispStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kdispStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return pEngstate->__kdispStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kdispStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return pEngstate->__kdispStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kdispStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + return pEngstate->__kdispStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kdispInitMissing_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + pEngstate->__kdispInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kdispStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + return pEngstate->__kdispStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kdispGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + return pEngstate->__kdispGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kdispCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kdispCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kdispFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + pEngstate->__kdispFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kdispStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return pEngstate->__kdispStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kdispAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void **ppTunableState) { + return pEngstate->__kdispAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kdispSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + return pEngstate->__kdispSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kdispIsPresent_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + return pEngstate->__kdispIsPresent__(pGpu, pEngstate); +} + +void kdispDestruct_IMPL(struct KernelDisplay *pKernelDisplay); +#define __nvoc_kdispDestruct(pKernelDisplay) kdispDestruct_IMPL(pKernelDisplay) +NV_STATUS kdispConstructKhead_IMPL(struct KernelDisplay *pKernelDisplay); +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispConstructKhead(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispConstructKhead(pKernelDisplay) kdispConstructKhead_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +void kdispDestructKhead_IMPL(struct KernelDisplay *pKernelDisplay); +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispDestructKhead(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispDestructKhead(pKernelDisplay) kdispDestructKhead_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +NV_STATUS kdispGetIntChnClsForHwCls_IMPL(struct KernelDisplay *pKernelDisplay, NvU32 hwClass, DISPCHNCLASS *pDispChnClass); +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispGetIntChnClsForHwCls(struct KernelDisplay *pKernelDisplay, NvU32 hwClass, DISPCHNCLASS *pDispChnClass) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetIntChnClsForHwCls(pKernelDisplay, hwClass, pDispChnClass) kdispGetIntChnClsForHwCls_IMPL(pKernelDisplay, hwClass, pDispChnClass) +#endif //__nvoc_kern_disp_h_disabled + +void kdispNotifyEvent_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16); +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispNotifyEvent(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispNotifyEvent(pGpu, pKernelDisplay, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) kdispNotifyEvent_IMPL(pGpu, pKernelDisplay, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) +#endif //__nvoc_kern_disp_h_disabled + +void kdispSetWarPurgeSatellitesOnCoreFree_IMPL(struct KernelDisplay *pKernelDisplay, NvBool value); +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispSetWarPurgeSatellitesOnCoreFree(struct KernelDisplay *pKernelDisplay, NvBool value) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispSetWarPurgeSatellitesOnCoreFree(pKernelDisplay, value) kdispSetWarPurgeSatellitesOnCoreFree_IMPL(pKernelDisplay, value) +#endif //__nvoc_kern_disp_h_disabled + +#undef PRIVATE_FIELD + + +void +dispdeviceFillVgaSavedDisplayState( struct OBJGPU *pGpu, + NvU64 vgaAddr, + NvU8 vgaMemType, + NvBool vgaValid, + NvU64 workspaceAddr, + NvU8 workspaceMemType, + NvBool workspaceValid, + NvBool baseValid, + NvBool workspaceBaseValid +); + +static NV_INLINE struct KernelHead* +kdispGetHead +( + struct KernelDisplay *pKernelDisplay, + NvU32 head +) +{ + if (head >= OBJ_MAX_HEADS) + { + return NULL; + } + + return pKernelDisplay->pKernelHead[head]; +} + +static NV_INLINE NvU32 +kdispGetNumHeads(struct KernelDisplay *pKernelDisplay) +{ + NV_ASSERT(pKernelDisplay->pStaticInfo != NULL); + return pKernelDisplay->pStaticInfo->numHeads; +} + +static NV_INLINE NvU32 +kdispGetIsPrimaryVga(struct KernelDisplay *pKernelDisplay) +{ + NV_ASSERT(pKernelDisplay->pStaticInfo != NULL); + return pKernelDisplay->pStaticInfo->bPrimaryVga; +} +#endif // KERN_DISP_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERN_DISP_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.c new file mode 100644 index 0000000..b23fe50 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.c @@ -0,0 +1,176 @@ +#define NVOC_KERNEL_HEAD_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_head_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x0145e6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_KernelHead(KernelHead*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelHead(KernelHead*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelHead(KernelHead*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelHead(KernelHead*, RmHalspecOwner* ); +void __nvoc_dtor_KernelHead(KernelHead*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelHead; + +static const struct NVOC_RTTI __nvoc_rtti_KernelHead_KernelHead = { + /*pClassDef=*/ &__nvoc_class_def_KernelHead, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelHead, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelHead_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelHead, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelHead = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_KernelHead_KernelHead, + &__nvoc_rtti_KernelHead_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelHead), + /*classId=*/ classId(KernelHead), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelHead", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelHead, + /*pCastInfo=*/ &__nvoc_castinfo_KernelHead, + /*pExportInfo=*/ &__nvoc_export_info_KernelHead +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelHead = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_KernelHead(KernelHead *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_KernelHead_fail_Object; + __nvoc_init_dataField_KernelHead(pThis, pRmhalspecowner); + + status = __nvoc_kheadConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_KernelHead_fail__init; + goto __nvoc_ctor_KernelHead_exit; // Success + +__nvoc_ctor_KernelHead_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_KernelHead_fail_Object: +__nvoc_ctor_KernelHead_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelHead_1(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +void __nvoc_init_funcTable_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelHead_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelHead = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_KernelHead(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelHead(KernelHead **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelHead *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelHead)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelHead)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelHead); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelHead(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelHead(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelHead_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelHead_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelHead(KernelHead **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelHead(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.h new file mode 100644 index 0000000..1ee76b6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.h @@ -0,0 +1,354 @@ +#ifndef _G_KERNEL_HEAD_NVOC_H_ +#define _G_KERNEL_HEAD_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/**************************** Kernelhead Routines **************************\ +* * +* Kernel head object function Definitions. * +* * +\***************************************************************************/ + +#include "g_kernel_head_nvoc.h" + +#ifndef KERNEL_HEAD_H +#define KERNEL_HEAD_H + +/* ------------------------ Includes --------------------------------------- */ +#include "gpu/disp/vblank_callback/vblank.h" +#include "gpu/gpu_halspec.h" +/* ------------------------ Types definitions ------------------------------ */ +enum +{ + headIntr_none = 0, + headIntr_vblank = NVBIT(0), +}; + +/* ------------------------ Macros & Defines ------------------------------- */ + +#ifdef NVOC_KERNEL_HEAD_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct __nvoc_inner_struc_KernelHead_1__ { + struct { + NvU32 Total; + NvU32 LowLatency; + NvU32 NormLatency; + } Counters; + struct { + VBLANKCALLBACK *pListLL; + VBLANKCALLBACK *pListNL; + } Callback; + NvU32 IntrState; +}; + + +struct KernelHead { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct KernelHead *__nvoc_pbase_KernelHead; + struct __nvoc_inner_struc_KernelHead_1__ Vblank; + NvU32 PublicId; +}; + +#ifndef __NVOC_CLASS_KernelHead_TYPEDEF__ +#define __NVOC_CLASS_KernelHead_TYPEDEF__ +typedef struct KernelHead KernelHead; +#endif /* __NVOC_CLASS_KernelHead_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelHead +#define __nvoc_class_id_KernelHead 0x0145e6 +#endif /* __nvoc_class_id_KernelHead */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead; + +#define __staticCast_KernelHead(pThis) \ + ((pThis)->__nvoc_pbase_KernelHead) + +#ifdef __nvoc_kernel_head_h_disabled +#define __dynamicCast_KernelHead(pThis) ((KernelHead*)NULL) +#else //__nvoc_kernel_head_h_disabled +#define __dynamicCast_KernelHead(pThis) \ + ((KernelHead*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelHead))) +#endif //__nvoc_kernel_head_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelHead(KernelHead**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelHead(KernelHead**, Dynamic*, NvU32); +#define __objCreate_KernelHead(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelHead((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NvU32 kheadGetVblankTotalCounter_IMPL(struct KernelHead *pKernelHead); + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadGetVblankTotalCounter(struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadGetVblankTotalCounter(pKernelHead) kheadGetVblankTotalCounter_IMPL(pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadGetVblankTotalCounter_HAL(pKernelHead) kheadGetVblankTotalCounter(pKernelHead) + +void kheadSetVblankTotalCounter_IMPL(struct KernelHead *pKernelHead, NvU32 arg0); + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadSetVblankTotalCounter(struct KernelHead *pKernelHead, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadSetVblankTotalCounter(pKernelHead, arg0) kheadSetVblankTotalCounter_IMPL(pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadSetVblankTotalCounter_HAL(pKernelHead, arg0) kheadSetVblankTotalCounter(pKernelHead, arg0) + +NvU32 kheadGetVblankLowLatencyCounter_IMPL(struct KernelHead *pKernelHead); + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadGetVblankLowLatencyCounter(struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadGetVblankLowLatencyCounter(pKernelHead) kheadGetVblankLowLatencyCounter_IMPL(pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadGetVblankLowLatencyCounter_HAL(pKernelHead) kheadGetVblankLowLatencyCounter(pKernelHead) + +void kheadSetVblankLowLatencyCounter_IMPL(struct KernelHead *pKernelHead, NvU32 arg0); + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadSetVblankLowLatencyCounter(struct KernelHead *pKernelHead, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadSetVblankLowLatencyCounter(pKernelHead, arg0) kheadSetVblankLowLatencyCounter_IMPL(pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadSetVblankLowLatencyCounter_HAL(pKernelHead, arg0) kheadSetVblankLowLatencyCounter(pKernelHead, arg0) + +static inline NvU32 kheadGetVblankNormLatencyCounter_46f6a7(struct KernelHead *pKernelHead) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadGetVblankNormLatencyCounter(struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadGetVblankNormLatencyCounter(pKernelHead) kheadGetVblankNormLatencyCounter_46f6a7(pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadGetVblankNormLatencyCounter_HAL(pKernelHead) kheadGetVblankNormLatencyCounter(pKernelHead) + +static inline void kheadSetVblankNormLatencyCounter_b3696a(struct KernelHead *pKernelHead, NvU32 arg0) { + return; +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadSetVblankNormLatencyCounter(struct KernelHead *pKernelHead, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadSetVblankNormLatencyCounter(pKernelHead, arg0) kheadSetVblankNormLatencyCounter_b3696a(pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadSetVblankNormLatencyCounter_HAL(pKernelHead, arg0) kheadSetVblankNormLatencyCounter(pKernelHead, arg0) + +static inline NvBool kheadReadVblankIntrEnable_491d52(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvBool kheadReadVblankIntrEnable(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadReadVblankIntrEnable(pGpu, pKernelHead) kheadReadVblankIntrEnable_491d52(pGpu, pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadReadVblankIntrEnable_HAL(pGpu, pKernelHead) kheadReadVblankIntrEnable(pGpu, pKernelHead) + +static inline NvBool kheadGetDisplayInitialized_491d52(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvBool kheadGetDisplayInitialized(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadGetDisplayInitialized(pGpu, pKernelHead) kheadGetDisplayInitialized_491d52(pGpu, pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadGetDisplayInitialized_HAL(pGpu, pKernelHead) kheadGetDisplayInitialized(pGpu, pKernelHead) + +static inline void kheadWriteVblankIntrEnable_b3696a(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvBool arg0) { + return; +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadWriteVblankIntrEnable(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadWriteVblankIntrEnable(pGpu, pKernelHead, arg0) kheadWriteVblankIntrEnable_b3696a(pGpu, pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadWriteVblankIntrEnable_HAL(pGpu, pKernelHead, arg0) kheadWriteVblankIntrEnable(pGpu, pKernelHead, arg0) + +static inline void kheadProcessVblankCallbacks_e426af(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0) { + NV_ASSERT_PRECOMP(0); + return; +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadProcessVblankCallbacks(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadProcessVblankCallbacks(pGpu, pKernelHead, arg0) kheadProcessVblankCallbacks_e426af(pGpu, pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadProcessVblankCallbacks_HAL(pGpu, pKernelHead, arg0) kheadProcessVblankCallbacks(pGpu, pKernelHead, arg0) + +static inline void kheadResetPendingVblank_e426af(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) { + NV_ASSERT_PRECOMP(0); + return; +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadResetPendingVblank(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadResetPendingVblank(pGpu, pKhead, arg0) kheadResetPendingVblank_e426af(pGpu, pKhead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadResetPendingVblank_HAL(pGpu, pKhead, arg0) kheadResetPendingVblank(pGpu, pKhead, arg0) + +static inline void kheadResetPendingVblankForKernel_e426af(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) { + NV_ASSERT_PRECOMP(0); + return; +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadResetPendingVblankForKernel(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadResetPendingVblankForKernel(pGpu, pKhead, arg0) kheadResetPendingVblankForKernel_e426af(pGpu, pKhead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadResetPendingVblankForKernel_HAL(pGpu, pKhead, arg0) kheadResetPendingVblankForKernel(pGpu, pKhead, arg0) + +static inline NvU32 kheadReadPendingVblank_92bfc3(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 intr) { + NV_ASSERT_PRECOMP(0); + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadReadPendingVblank(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 intr) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadReadPendingVblank(pGpu, pKernelHead, intr) kheadReadPendingVblank_92bfc3(pGpu, pKernelHead, intr) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadReadPendingVblank_HAL(pGpu, pKernelHead, intr) kheadReadPendingVblank(pGpu, pKernelHead, intr) + +NV_STATUS kheadConstruct_IMPL(struct KernelHead *arg_pKernelHead); +#define __nvoc_kheadConstruct(arg_pKernelHead) kheadConstruct_IMPL(arg_pKernelHead) +void kheadAddVblankCallback_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0); +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadAddVblankCallback(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadAddVblankCallback(pGpu, pKernelHead, arg0) kheadAddVblankCallback_IMPL(pGpu, pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +void kheadDeleteVblankCallback_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0); +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadDeleteVblankCallback(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadDeleteVblankCallback(pGpu, pKernelHead, arg0) kheadDeleteVblankCallback_IMPL(pGpu, pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +NvU32 kheadCheckVblankCallbacksQueued_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0, NvU32 *arg1); +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadCheckVblankCallbacksQueued(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, arg0, arg1) kheadCheckVblankCallbacksQueued_IMPL(pGpu, pKernelHead, arg0, arg1) +#endif //__nvoc_kernel_head_h_disabled + +NvU32 kheadReadVblankIntrState_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead); +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadReadVblankIntrState(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadReadVblankIntrState(pGpu, pKernelHead) kheadReadVblankIntrState_IMPL(pGpu, pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +void kheadWriteVblankIntrState_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0); +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadWriteVblankIntrState(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadWriteVblankIntrState(pGpu, pKernelHead, arg0) kheadWriteVblankIntrState_IMPL(pGpu, pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +#undef PRIVATE_FIELD + + +void kheadProcessVblankCallbacks_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 state); + +#endif // KERNEL_HEAD_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_HEAD_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_desc_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_desc_nvoc.h new file mode 100644 index 0000000..c5ad909 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_desc_nvoc.h @@ -0,0 +1,1075 @@ +#ifndef _G_MEM_DESC_NVOC_H_ +#define _G_MEM_DESC_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_mem_desc_nvoc.h" + +#ifndef _MEMDESC_H_ +#define _MEMDESC_H_ + +#include "core/prelude.h" +#include "poolalloc.h" + + +struct OBJVASPACE; + +#ifndef __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +typedef struct OBJVASPACE OBJVASPACE; +#endif /* __NVOC_CLASS_OBJVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVASPACE +#define __nvoc_class_id_OBJVASPACE 0x6c347f +#endif /* __nvoc_class_id_OBJVASPACE */ + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +struct Heap; + +#ifndef __NVOC_CLASS_Heap_TYPEDEF__ +#define __NVOC_CLASS_Heap_TYPEDEF__ +typedef struct Heap Heap; +#endif /* __NVOC_CLASS_Heap_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Heap +#define __nvoc_class_id_Heap 0x556e9a +#endif /* __nvoc_class_id_Heap */ + + +struct MEMORY_DESCRIPTOR; + +typedef struct CTX_BUF_POOL_INFO CTX_BUF_POOL_INFO; +typedef struct COMPR_INFO COMPR_INFO; + +// +// Address space identifiers. +// +typedef NvU32 NV_ADDRESS_SPACE; +#define ADDR_UNKNOWN 0 // Address space is unknown +#define ADDR_SYSMEM 1 // System memory (PCI) +#define ADDR_FBMEM 2 // Frame buffer memory space +#define ADDR_REGMEM 3 // NV register memory space +#define ADDR_VIRTUAL 4 // Virtual address space only +#define ADDR_FABRIC_V2 6 // Fabric address space for the FLA based addressing. Will replace ADDR_FABRIC. +#define ADDR_FABRIC_MC 8 // Multicast fabric address space (MCFLA) + +// +// Address translation identifiers: +// +// Memory descriptors are used to describe physical block(s) of memory. +// That memory can be described at various levels of address translation +// using the address translation (AT) enumerates. The levels of translation +// supported is illustrated below. +// +// The diagram is drawn for system memory with SR-IOV but the translations +// are similar for video memory (replace IOMMU with VMMU). VGPU pre-SR-IOV +// is also different. +// +// +-------------------+ +-------------------+ +// | CPU | | GPU Engine | +// +-------------------+ +-------------------+ +// | | +// | | GPU VA +// | V +// | +-------------------+ +// | CPU VA | GMMU | +// | +-------------------+ +// | | +// | | GPU GPA (AT_GPU) +// v v +// +-------------------+ +-------------------+ +// | MMU (1st level)| | | IOMMU (1st level) | +// +-------------------+ +-------------------+ +// | | +// | CPU GPA (AT_CPU) | <---- AT_PA for VGPU guest +// v v +// +-------------------+ +-------------------+ +// | MMU (2nd level) | | IOMMU (2nd level) | +// +-------------------+ +-------------------+ +// | | +// | SPA | SPA <---- AT_PA for bare metal +// v v or VGPU host +// +---------------------------------------------------+ +// | System Memory | +// +---------------------------------------------------+ +// +// +// Descriptions for *physical* address translation levels: +// +// AT_CPU - CPU physical address or guest physical address (GPA) +// AT_GPU - GPU physical address or guest physical address (GPA) +// AT_PA - When running in host RM or bare metal this is the system physical address. When +// running inside a VGPU guest environment, this is the last level of translation +// visible to the OS context that RM is running in. +// +// AT_CPU should typically == AT_PA, but there might be cases such as IBM P9 where vidmem +// might be 0-based on GPU but exposed elsewhere in the CPU address space. +// +// Descriptions for *virtual* address translation levels: +// +// AT_GPU_VA - Memory descriptors can also describe virtual memory allocations. AT_GPU_VA +// represents a GMMU virtual address. +// +#define AT_CPU AT_VARIANT(0) +#define AT_GPU AT_VARIANT(1) +#define AT_PA AT_VARIANT(2) + +#define AT_GPU_VA AT_VARIANT(3) + +// +// TODO - switch to using numeric values for AT_XYZ. Using pointers for +// typesafety after initial split from using class IDs/mmuContext +// +typedef struct ADDRESS_TRANSLATION_ *ADDRESS_TRANSLATION; +#define AT_VARIANT(x) ((struct ADDRESS_TRANSLATION_ *)x) +#define AT_VALUE(x) ((NvU64)(NvUPtr)(x)) + +// +// Overrides address translation in SR-IOV enabled usecases +// +// In SRIOV systems, an access from guest has to go through the following +// translations: +// +// GVA -> GPA -> SPA +// +// Given HOST manages channel/memory management for guest, there are certain +// code paths that expects VA -> GPA translations and some may need GPA -> SPA +// translations. We use address translation to differentiate between these +// cases. +// +// We use AT_PA to force GPA -> SPA translation for vidmem. In case of non-SRIOV systems, +// using IO_VASPACE_A will fall back to FERMI_VASPACE_A or default context. +// +#define FORCE_VMMU_TRANSLATION(pMemDesc, curAddressTranslation) \ + ((memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) ? AT_PA : curAddressTranslation) + +typedef struct _memdescDestroyCallback MEM_DESC_DESTROY_CALLBACK; + +typedef void (MEM_DATA_RELEASE_CALL_BACK)(struct MEMORY_DESCRIPTOR *); + +// +// A memory descriptor is an object that describes and can be used to manipulate +// a block of memory. The memory can be video or system memory; it can be +// contiguous or noncontiguous; it can be tiled, block linear, etc. However, +// regardless of what type of memory it is, clients can use a standard set of +// APIs to manipulate it. +// +DECLARE_INTRUSIVE_LIST(MEMORY_DESCRIPTOR_LIST); + +typedef struct MEMORY_DESCRIPTOR +{ + // The GPU that this memory belongs to + OBJGPU *pGpu; + + // Flags field for optional behavior + NvU64 _flags; + + // Size of mapping used for this allocation. Multiple mappings on Fermi must always use the same page size. + NvU32 _pageSize; + + // Size of the memory allocation in pages + NvU64 PageCount; + + // Alignment of the memory allocation as size in bytes + // XXX: would 32b work here? + NvU64 Alignment; + + // Size of the memory allocation requested in bytes + NvU64 Size; + + // Actual size of memory allocated to satisfy alignment. + // We report the requested size, not the actual size. A number of callers + // depend on this. + NvU64 ActualSize; + + // The information returned from osAllocPages + NvP64 _address; + void *_pMemData; + MEM_DATA_RELEASE_CALL_BACK *_pMemDataReleaseCallback; + + // When memory is allocated by a guest Virtual Machine (VM) + // it is aliased by the host RM. We store a unique guest ID + // for each piece of aliased memory to facilitate host RM mappings + // to these pages (only in case of system memory). + // XXX: would 32b work here? + NvU64 _guestId; + + // To keep track of the offset from parent memdesc + NvU64 subMemOffset; + + // + // The byte offset at which the memory allocation begins within the first + // PTE. To locate the physical address of the byte at offset i in the memory + // allocation, use the following logic: + // i += PteAdjust; + // if (PhysicallyContiguous) + // PhysAddr = PteArray[0] + i; + // else + // PhysAddr = PteArray[i >> RM_PAGE_SHIFT] + (i & RM_PAGE_MASK); + // + NvU32 PteAdjust; + + // Has the memory been allocated yet? + NvBool Allocated; + + // + // Marks that a request to deallocate memory has been called on this memdesc while it had multiple references + // NV_TRUE denotes that memFree will be called when refcount reaches 0. + // + NvBool bDeferredFree; + + // Does this use SUBALLOCATOR? + NvBool bUsingSuballocator; + + // Where does the memory live? Video, system, other + NV_ADDRESS_SPACE _addressSpace; + + // Attributes reflecting GPU caching of this memory. + NvU32 _gpuCacheAttrib; + + // Peer vid mem cacheability + NvU32 _gpuP2PCacheAttrib; + + // One of NV_MEMORY_CACHED, NV_MEMORY_UNCACHED, NV_MEMORY_WRITECOMBINED + NvU32 _cpuCacheAttrib; + + // The page kind of this memory + NvU32 _pteKind; + NvU32 _pteKindCompressed; + + // + // Scale memory allocation by this value + // + NvU32 _subDeviceAllocCount; + + // + // Reference count for the object. + // + NvU32 RefCount; + + // Reference count for duplication of memory object via RmDupObject. + NvU32 DupCount; + + // + // The HwResId is used by the device dependent HAL to keep track of + // resources attached to the memory (e.g.: compression tags, zcull). + // + NvU32 _hwResId; + + // + // Keep track which heap is actually used for this allocation + // + struct Heap *pHeap; + + // + // GFID that this memory allocation belongs to + // + NvU32 gfid; + + // + // Keep track of the PMA_ALLOC_INFO data. + // + struct PMA_ALLOC_INFO *pPmaAllocInfo; + + // Serve as head node in a list of page handles + PoolPageHandleList *pPageHandleList; + + // + // List of callbacks to call when destroying memory descriptor + // + MEM_DESC_DESTROY_CALLBACK *_pMemDestroyCallbackList; + + // pointer to descriptor which was used to subset current descriptor + struct MEMORY_DESCRIPTOR *_pParentDescriptor; + + // Count used for sanity check + NvU32 childDescriptorCnt; + + // Next memory descriptor in subdevice list + struct MEMORY_DESCRIPTOR *_pNext; + + // Pointer to system Memory descriptor which used to back some FB content across S3/S4. + struct MEMORY_DESCRIPTOR *_pStandbyBuffer; + + // Serve as a head node in a list of submemdescs + MEMORY_DESCRIPTOR_LIST *pSubMemDescList; + + // If strung in a intrusive linked list + ListNode node; + + // + // Pointer to IOVA mappings used to back the IOMMU VAs for different IOVA spaces + // Submemory descriptors only have on mapping, but the root descriptor will have + // one per IOVA space that the memory is mapped into. + // + struct IOVAMAPPING *_pIommuMappings; + + // Kernel mapping of the memory + NvP64 _kernelMapping; + NvP64 _kernelMappingPriv; + + // Internal mapping + void *_pInternalMapping; + void *_pInternalMappingPriv; + NvU32 _internalMappingRefCount; + + // Array to hold SPA addresses when memdesc is allocated from GPA. Valid only for SRIOV cases + RmPhysAddr *pPteSpaMappings; + + // + // context buffer pool from which this memdesc is to be allocated. + // This is controlled by PDB_PROP_GPU_MOVE_RM_BUFFERS_TO_PMA which is + // enabled only for SMC today + // + CTX_BUF_POOL_INFO *pCtxBufPool; + + // Max physical address width to be override + NvU32 _overridenAddressWidth; + + // We verified that memdesc is safe to be mapped as large pages + NvBool bForceHugePages; + + // + // If PhysicallyContiguous is NV_TRUE, this array consists of one element. + // If PhysicallyContiguous is NV_FALSE, this array is actually larger and has + // one entry for each physical page in the memory allocation. As a result, + // this structure must be allocated from the heap. + // If the AddressSpace is ADDR_FBMEM, each entry is an FB offset. + // Otherwise, each entry is a physical address on the system bus. + // TBD: for now, the array will be sized at one entry for every 4KB, but + // we probably want to optimize this later to support 64KB pages. + // + RmPhysAddr _pteArray[1]; + //!!! Place nothing behind PteArray!!! +} MEMORY_DESCRIPTOR, *PMEMORY_DESCRIPTOR; + +MAKE_INTRUSIVE_LIST(MEMORY_DESCRIPTOR_LIST, MEMORY_DESCRIPTOR, node); + +// +// Common address space lists +// +extern const NV_ADDRESS_SPACE ADDRLIST_FBMEM_PREFERRED[]; +extern const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_PREFERRED[]; +extern const NV_ADDRESS_SPACE ADDRLIST_FBMEM_ONLY[]; +extern const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_ONLY[]; + +NvU32 memdescAddrSpaceListToU32(const NV_ADDRESS_SPACE *addrlist); +const NV_ADDRESS_SPACE *memdescU32ToAddrSpaceList(NvU32 index); + +NV_STATUS _memdescUpdateSpaArray(PMEMORY_DESCRIPTOR pMemDesc); +// Create a memory descriptor data structure (without allocating any physical +// storage). +NV_STATUS memdescCreate(MEMORY_DESCRIPTOR **ppMemDesc, OBJGPU *pGpu, NvU64 Size, + NvU64 alignment, NvBool PhysicallyContiguous, + NV_ADDRESS_SPACE AddressSpace, NvU32 CpuCacheAttrib, NvU64 Flags); + +#define MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE_FB_BC_ONLY(pGpu, addressSpace) \ + ((gpumgrGetBcEnabledStatus(pGpu) && (pGpu != NULL) && (addressSpace == ADDR_FBMEM)) ? MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE : MEMDESC_FLAGS_NONE) + +// Initialize a caller supplied memory descriptor for use with memdescDescribe() +void memdescCreateExisting(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu, NvU64 Size, + NV_ADDRESS_SPACE AddressSpace, + NvU32 CpuCacheAttrib, NvU64 Flags); + +// Increment reference count +void memdescAddRef(MEMORY_DESCRIPTOR *pMemDesc); + +// Decrement reference count +void memdescRemoveRef(MEMORY_DESCRIPTOR *pMemDesc); + +// Decrement reference count and reclaim any resources when possible +void memdescDestroy(MEMORY_DESCRIPTOR *pMemDesc); + +// +// The destroy callback is called when the memory descriptor is +// destroyed with memdescDestroy(). +// +// The caller is responsible for managing the memory used +// containing the callback. +// +typedef void (MemDescDestroyCallBack)(OBJGPU *, void *pObject, MEMORY_DESCRIPTOR *); +struct _memdescDestroyCallback +{ + MemDescDestroyCallBack *destroyCallback; + void *pObject; + MEM_DESC_DESTROY_CALLBACK *pNext; +}; +void memdescAddDestroyCallback(MEMORY_DESCRIPTOR *pMemDesc, MEM_DESC_DESTROY_CALLBACK *); +void memdescRemoveDestroyCallback(MEMORY_DESCRIPTOR *pMemDesc, MEM_DESC_DESTROY_CALLBACK *); + +// Allocate physical storage for a memory descriptor and fill in its PteArray +NV_STATUS memdescAlloc(MEMORY_DESCRIPTOR *pMemDesc); + +// Allocate memory from one of the possible locations specified in pList. +NV_STATUS memdescAllocList(MEMORY_DESCRIPTOR *pMemDesc, const NV_ADDRESS_SPACE *pList); + +// Free physical storage for a memory descriptor +void memdescFree(MEMORY_DESCRIPTOR *pMemDesc); + +// Lock the paged virtual memory +NV_STATUS memdescLock(MEMORY_DESCRIPTOR *pMemDesc); + +// Unlock the paged virtual memory +NV_STATUS memdescUnlock(MEMORY_DESCRIPTOR *pMemDesc); + +// Allocate a CPU mapping of an arbitrary subrange of the memory. +// 64-bit clean (mac can have a 32-bit kernel pointer and 64-bit client pointers) +NV_STATUS memdescMap(MEMORY_DESCRIPTOR *pMemDesc, NvU64 Offset, NvU64 Size, + NvBool Kernel, NvU32 Protect, NvP64 *pAddress, NvP64 *pPriv); + +// Free a CPU mapping of an arbitrary subrange of the memory. +void memdescUnmap(MEMORY_DESCRIPTOR *pMemDesc, NvBool Kernel, NvU32 ProcessId, + NvP64 Address, NvP64 Priv); + +// Allocate a CPU mapping of an arbitrary subrange of the memory. +// fails unless Kernel == NV_TRUE +NV_STATUS memdescMapOld(MEMORY_DESCRIPTOR *pMemDesc, NvU64 Offset, NvU64 Size, + NvBool Kernel, NvU32 Protect, void **pAddress, void **pPriv); + +// Free a CPU mapping of an arbitrary subrange of the memory. +void memdescUnmapOld(MEMORY_DESCRIPTOR *pMemDesc, NvBool Kernel, NvU32 ProcessId, + void *Address, void *Priv); + +// Fill in a MEMORY_DESCRIPTOR with a description of a preexisting contiguous +// memory allocation. It should already be initialized with +// memdescCreate*(). +void memdescDescribe(MEMORY_DESCRIPTOR *pMemDesc, + NV_ADDRESS_SPACE AddressSpace, + RmPhysAddr Base, NvU64 Size); + +// Fill in a MEMORY_DESCRIPTOR with the physical page addresses returned by PMA. +// It should already be initialized with memdescCreate*(). +void memdescFillPages(MEMORY_DESCRIPTOR *pMemDesc, NvU32 offset, + NvU64 *pPages, NvU32 pageCount, NvU32 pageSize); + +// Create a MEMORY_DESCRIPTOR for a subset of an existing memory allocation. +// The new MEMORY_DESCRIPTOR must be freed with memdescDestroy. +NV_STATUS memdescCreateSubMem(MEMORY_DESCRIPTOR **ppMemDescNew, + MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, NvU64 Offset, NvU64 Size); + +// Compute the physical address of a byte within a MEMORY_DESCRIPTOR +RmPhysAddr memdescGetPhysAddr(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU64 offset); + +// Compute count physical addresses within a MEMORY_DESCRIPTOR. Starting at the +// given offset and advancing it by stride for each consecutive address. +void memdescGetPhysAddrs(MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses); + +// Compute count physical addresses within a MEMORY_DESCRIPTOR for a specific +// GPU. Starting at the given offset and advancing it by stride for each +// consecutive address. +void memdescGetPhysAddrsForGpu(MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses); + +// Obtains one of the PTEs from the MEMORY_DESCRIPTOR. Assumes 4KB pages, +// and works for either contiguous or noncontiguous descriptors. +RmPhysAddr memdescGetPte(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU32 PteIndex); + +void memdescSetPte(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU32 PteIndex, RmPhysAddr PhysAddr); + +// Obtains the PteArray from the MEMORY_DESCRIPTOR for the specified GPU. +RmPhysAddr * memdescGetPteArrayForGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu, ADDRESS_TRANSLATION addressTranslation); + +/*! + * @brief Obtains the PteArray from the MEMORY_DESCRIPTOR. + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] addressTranslation Address translation identifier + * + * @returns PageArray + */ +static inline RmPhysAddr * +memdescGetPteArray(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation) +{ + return memdescGetPteArrayForGpu(pMemDesc, pMemDesc->pGpu, addressTranslation); +} + +// Obtains the PteArray size from the MEMORY_DESCRIPTOR based on the mmuContext. +NvU32 memdescGetPteArraySize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation); + +// Return the aperture of the NV_ADDRESS_SPACE as a null terminated string. +// Useful for print statements. +const char* memdescGetApertureString(NV_ADDRESS_SPACE addrSpace); + +// Return true if two MEMORY_DESCRIPTOR are equal +NvBool memdescDescIsEqual(MEMORY_DESCRIPTOR *pMemDescOne, MEMORY_DESCRIPTOR *pMemDescTwo); + +// Retrieve the per-GPU memory descriptor for a subdevice +MEMORY_DESCRIPTOR *memdescGetMemDescFromSubDeviceInst(MEMORY_DESCRIPTOR *pMemDesc, NvU32 subDeviceInst); + +// Retrieve the per-GPU memory descriptor for a GPU +MEMORY_DESCRIPTOR *memdescGetMemDescFromGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu); + +// Retrieve the per-GPU memory descriptor at an index +MEMORY_DESCRIPTOR *memdescGetMemDescFromIndex(MEMORY_DESCRIPTOR *pMemDesc, NvU32 index); + +// Print information on memory descriptor +void memdescPrintMemdesc(MEMORY_DESCRIPTOR *pMemDesc, NvBool bPrintIndividualPages, const char *pPrefixMessage); + +// Get the page offset for an arbitrary power of two page size +NvU64 memdescGetPageOffset(MEMORY_DESCRIPTOR *pMemDesc, NvU32 pageSize); + +// +// Internal APIs for the IOVASPACE to manage IOMMU mappings in a memdesc. +// +// Note that the external APIs are memdescMapIommu(), +// memdescUnmapIommu() and memdescGetIommuMap(). +// +NV_STATUS memdescAddIommuMap(PMEMORY_DESCRIPTOR pMemDesc, struct IOVAMAPPING *pIommuMap); +void memdescRemoveIommuMap(PMEMORY_DESCRIPTOR pMemDesc, struct IOVAMAPPING *pIommuMap); + +// +// Map and unmap IOMMU for the specified VA space +// +// Each memdescUnmapIommu() call has to be paired with a previous successful +// memdescMapIommu() call for the same VA space. The calls are refcounted for +// each VA space and only the last Unmap will remove the mappings. +// +// The caller has to guarantee that before the VA space is destroyed, either the +// mapping is explicitly unmapped with memdescUnmapIommu() or the memdesc is +// freed (or destroyed for memdescs that are not memdescFree()d). +// +NV_STATUS memdescMapIommu(PMEMORY_DESCRIPTOR pMemDesc, NvU32 vaspaceId); +void memdescUnmapIommu(PMEMORY_DESCRIPTOR pMemDesc, NvU32 vaspaceId); + +// Returns the IOVA mapping created by memdescMapIommu(). +struct IOVAMAPPING *memdescGetIommuMap(PMEMORY_DESCRIPTOR pMemDesc, NvU32 vaspaceId); + +// +// Check subdevice consistency functions +// +void memdescCheckSubDevicePageSizeConsistency(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, struct OBJVASPACE *pVAS, + NvU64 pageSize, NvU64 pageOffset); +void memdescCheckSubDeviceMemContiguityConsistency(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, struct OBJVASPACE *pVAS, + NvBool bIsMemContiguous); +NV_STATUS memdescCheckSubDeviceKindComprConsistency(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, struct OBJVASPACE *pVAS, + NvU32 kind, COMPR_INFO *pComprInfo); + +// +// Accessor functions +// +void memdescSetHeapOffset(MEMORY_DESCRIPTOR *pMemDesc, RmPhysAddr fbOffset); +void memdescSetCpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc, NvU32 cpuCacheAttrib); +void memdescSetGpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc, NvU32 GpuCacheAttrib); +NvU32 memdescGetGpuP2PCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetGpuP2PCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc, NvU32 GpuCacheAttrib); +NvU32 memdescGetPteKindForGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu); +void memdescSetPteKindForGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu, NvU32 pteKind); +NvU32 memdescGetPteKindCompressed(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetPteKindCompressed(MEMORY_DESCRIPTOR *pMemDesc, NvU32 pteKindCmpr); +NvP64 memdescGetKernelMapping(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetKernelMapping(MEMORY_DESCRIPTOR *pMemDesc, NvP64 kernelMapping); +NvP64 memdescGetKernelMappingPriv(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetKernelMappingPriv(MEMORY_DESCRIPTOR *pMemDesc, NvP64 kernelMappingPriv); +MEMORY_DESCRIPTOR *memdescGetStandbyBuffer(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetStandbyBuffer(MEMORY_DESCRIPTOR *pMemDesc, MEMORY_DESCRIPTOR *pStandbyBuffer); +void memdescSetDestroyCallbackList(MEMORY_DESCRIPTOR *pMemDesc, MEM_DESC_DESTROY_CALLBACK *pCb); +NvU64 memdescGetGuestId(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetGuestId(MEMORY_DESCRIPTOR *pMemDesc, NvU64 guestId); +NvBool memdescGetFlag(MEMORY_DESCRIPTOR *pMemDesc, NvU64 flag); +void memdescSetFlag(MEMORY_DESCRIPTOR *pMemDesc, NvU64 flag, NvBool bValue); +NvP64 memdescGetAddress(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetAddress(MEMORY_DESCRIPTOR *pMemDesc, NvP64 pAddress); +void *memdescGetMemData(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetMemData(MEMORY_DESCRIPTOR *pMemDesc, void *pMemData, MEM_DATA_RELEASE_CALL_BACK *pMemDataReleaseCallback); +NvBool memdescGetVolatility(MEMORY_DESCRIPTOR *pMemDesc); +NvBool memdescGetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation); +void memdescSetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvBool isContiguous); +NvBool memdescCheckContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation); +NV_ADDRESS_SPACE memdescGetAddressSpace(PMEMORY_DESCRIPTOR pMemDesc); +NvU32 memdescGetPageSize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation); +void memdescSetPageSize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU32 pageSize); +PMEMORY_DESCRIPTOR memdescGetRootMemDesc(PMEMORY_DESCRIPTOR pMemDesc, NvU64 *pRootOffset); +void memdescSetCustomHeap(PMEMORY_DESCRIPTOR); +NvBool memdescGetCustomHeap(PMEMORY_DESCRIPTOR); + +/*! + * @brief Get PTE kind + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] addressTranslation Address translation identifier + * + * @returns Current PTE kind value. + */ +static inline NvU32 +memdescGetPteKind(PMEMORY_DESCRIPTOR pMemDesc) +{ + return memdescGetPteKindForGpu(pMemDesc, pMemDesc->pGpu); +} + +/*! + * @brief Set PTE kind. + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pteKind New PTE kind + * + * @returns nothing + */ +static inline void +memdescSetPteKind(PMEMORY_DESCRIPTOR pMemDesc, NvU32 pteKind) +{ + memdescSetPteKindForGpu(pMemDesc, pMemDesc->pGpu, pteKind); +} + +/*! + * @brief Get HW resource identifier (HwResId) + * + * TODO: Need to ensure this is checked per subdevice only. + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current HW resource identifier + */ +static inline NvU32 +memdescGetHwResId(PMEMORY_DESCRIPTOR pMemDesc) +{ + return pMemDesc->_hwResId; +} + +/*! + * @brief Set HW resource identifier (HwResId) + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] hwResId New HW resource identifier + * + * @returns nothing + */ +static inline void +memdescSetHwResId(PMEMORY_DESCRIPTOR pMemDesc, NvU32 hwResId) +{ + pMemDesc->_hwResId = hwResId; +} + +/*! + * @brief Get mem destroy callback list pointer + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Pointer to mem destroy callback list + */ +static inline MEM_DESC_DESTROY_CALLBACK * +memdescGetDestroyCallbackList(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_pMemDestroyCallbackList; +} + +/*! + * @brief Get the byte offset relative to the root memory descriptor. + * + * Root memory descriptor is the Top level memory descriptor with no parent, + * from which this memory descriptor was derived. + * + * @param[in] pMemDesc Return pointer to memory descriptor. + * + * @returns the byte offset relative to Root memory descriptor. + */ +static inline NvU64 +memdescGetRootOffset(PMEMORY_DESCRIPTOR pMemDesc) +{ + NvU64 rootOffset = 0; + (void)memdescGetRootMemDesc(pMemDesc, &rootOffset); + return rootOffset; +} + +/*! + * @brief Get CPU cache attributes + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current CPU cache attributes + */ +static inline NvU32 +memdescGetCpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_cpuCacheAttrib; +} + +/*! + * @brief Get GPU cache attributes + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current GPU cache attributes + */ +static inline NvU32 +memdescGetGpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_gpuCacheAttrib; +} + +/*! + * @brief Return pte adjust + * + * PteAdjust is zero whenever the memory is allocated as allocations are always + * going to be page-size aligned. However, we can have memory descriptors + * created on pre-allocated addresses + offset that aren't page aligned. + * PteAdjust is non-zero in such cases. We do not allow memdescDescribe operation + * (i.e. memory descriptors created on pre-allocated address) for subdevice + * memdesc and hence top level memdesc is always used to access pte adjust. + * + * @param[in] pMemDesc Memory descriptor to use + * + * @returns PteAdjust + */ +static inline NvU32 +memdescGetPteAdjust(PMEMORY_DESCRIPTOR pMemDesc) +{ + return pMemDesc->PteAdjust; +} + +/*! + * @brief Get subdevice allocation count. + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current subdevice allocation count value. + */ +static inline NvU32 +memdescGetSubDeviceAllocCount (MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_subDeviceAllocCount; +} + +/*! + * @brief Get memory descriptor of parent + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Memory descriptor of parent + */ +static inline MEMORY_DESCRIPTOR * +memdescGetParentDescriptor(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_pParentDescriptor; +} + +/*! + * @brief Set the address space of the memory descriptor + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] addressSpace Address Space + * + * @returns nothing + */ +static inline void +memdescSetAddressSpace(PMEMORY_DESCRIPTOR pMemDesc, NV_ADDRESS_SPACE addressSpace) +{ + pMemDesc->_addressSpace = addressSpace; +} + +/*! + * @brief Return size + * + * @param[in] pMemDesc Memory descriptor to use + * + * @returns Size + */ +static inline NvU64 +memdescGetSize(PMEMORY_DESCRIPTOR pMemDesc) +{ + return pMemDesc->Size; +} + +/*! + * @brief Checks if subdevice memory descriptors are present + * + * See memdescGetMemDescFromSubDeviceInst for an explanation of subdevice memory + * descriptors + * + * @param[in] pMemDesc Memory descriptor to query + * + * @returns NV_TRUE if subdevice memory descriptors exist + */ +static NV_INLINE NvBool +memdescHasSubDeviceMemDescs(MEMORY_DESCRIPTOR *pMemDesc) +{ + return (pMemDesc->_subDeviceAllocCount > 1); +} + +/*! + * @brief Checks if memory descriptor describes memory that is submemory + * + * @param[in] pMemDesc Memory descriptor to query + * + * @returns NV_TRUE if it is a submemory desc, NV_FALSE otherwise. + */ +static NV_INLINE NvBool +memdescIsSubMemoryMemDesc(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_pParentDescriptor != NULL ? NV_TRUE : NV_FALSE; +} + +/*! + * @brief Override the registry INST_LOC two-bit enum to an aperture (list) + cpu attr. + * + * loc parameters uses NV_REG_STR_RM_INST_LOC defines. + * Caller must set initial default values. + */ +void memdescOverrideInstLoc(NvU32 loc, const char *name, NV_ADDRESS_SPACE *pAddrSpace, NvU32 *pCpuMappingAttr); +void memdescOverrideInstLocList(NvU32 loc, const char *name, const NV_ADDRESS_SPACE **ppAllocList, NvU32 *pCpuMappingAttr); + +/*! +* @brief Override the physical system address limit. +* +*/ +void memdescOverridePhysicalAddressWidthWindowsWAR(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 addressWidth); + +/*! +* @brief Register memory descriptor referenced by hMemory in CPU-RM to GSP +* +* @param[in] pGpu OBJGPU pointer +* @param[in] hClient client handled +* @param[in] hSubDevice subdevice handle +* @param[in] hMemory memory handle +* +* @returns NV_STATUS +*/ +NV_STATUS memdescRegisterToGSP(OBJGPU *pGpu, NvHandle hClient, NvHandle hParent, NvHandle hMemory); + +/*! +* @brief Deregister memory descriptor referenced by hMemory in CPU-RM from GSP +* +* @param[in] pGpu OBJGPU pointer +* @param[in] hClient client handled +* @param[in] hSubDevice subdevice handle +* @param[in] hMemory memory handle +* +* @returns NV_STATUS +*/ + +NV_STATUS memdescDeregisterFromGSP(OBJGPU *pGpu, NvHandle hClient, NvHandle hParent, NvHandle hMemory); + +// cache maintenance functions +void memdescFlushCpuCaches(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc); + +// Map memory descriptor for RM internal access +void* memdescMapInternal(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags); +void memdescUnmapInternal(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags); + +// +// External flags: +// ALLOC_PER_SUBDEVICE Allocate independent system memory for each GPU +// LOST_ON_SUSPEND PM code will skip this allocation during S/R +// LOCKLESS_SYSMEM_ALLOC System memory should be allocated unprotected by +// the RM lock +// GPU_PRIVILEGED This memory will be marked as privileged in the GPU +// page tables. When set only GPU requestors who are +// "privileged" are allowed to access this memory. +// This can be used for mapping sensitive memory into +// a user's GPU address space (like context buffers). +// Note support for this in our GPUs is limited, so +// only use it if you know the HW accessing the memory +// makes privileged requests. +// +// Internal flags: +// SET_KIND Whether or not the kind was set a different value +// than default. +// PRE_ALLOCATED Caller provided memory descriptor memory +// FIXED_ADDRESS_ALLOCATE Allocate from the heap with a fixed address +// ALLOCATED Has the memory been allocated yet? +// GUEST_ALLOCATED Is the memory allocated by a guest VM? +// We make aliased memory descriptors to guest +// allocated memory and mark it so, so that we know +// how to deal with it in memdescMap() etc. +// KERNEL_MODE Is the memory for a user or kernel context? +// XXX This is lame, and it would be best if we could +// get rid of it. Memory *storage* isn't either user +// or kernel -- only mappings are user or kernel. +// Unfortunately, osAllocPages requires that we +// provide this information. +// PHYSICALLY_CONTIGUOUS Are the underlying physical pages of this memory +// allocation contiguous? +// ENCRYPTED TurboCipher allocations need a bit in the PTE to +// indicate encrypted +// UNICAST Memory descriptor was created via UC path +// PAGED_SYSMEM Allocate the memory from paged system memory. When +// this flag is used, memdescLock() should be called +// to lock the memory in physical pages before we +// access this memory descriptor. +// CPU_ONLY Allocate memory only accessed by CPU. +// +#define MEMDESC_FLAGS_NONE ((NvU64)0x0) +#define MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE NVBIT64(0) +#define MEMDESC_FLAGS_SET_KIND NVBIT64(1) +#define MEMDESC_FLAGS_LOST_ON_SUSPEND NVBIT64(2) +#define MEMDESC_FLAGS_PRE_ALLOCATED NVBIT64(3) +#define MEMDESC_FLAGS_FIXED_ADDRESS_ALLOCATE NVBIT64(4) +#define MEMDESC_FLAGS_LOCKLESS_SYSMEM_ALLOC NVBIT64(5) +#define MEMDESC_FLAGS_GPU_IN_RESET NVBIT64(6) +#define MEMDESC_ALLOC_FLAGS_PROTECTED NVBIT64(7) +#define MEMDESC_FLAGS_GUEST_ALLOCATED NVBIT64(8) +#define MEMDESC_FLAGS_KERNEL_MODE NVBIT64(9) +#define MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS NVBIT64(10) +#define MEMDESC_FLAGS_ENCRYPTED NVBIT64(11) +#define MEMDESC_FLAGS_PAGED_SYSMEM NVBIT64(12) +#define MEMDESC_FLAGS_GPU_PRIVILEGED NVBIT64(13) +#define MEMDESC_FLAGS_PRESERVE_CONTENT_ON_SUSPEND NVBIT64(14) +#define MEMDESC_FLAGS_DUMMY_TOPLEVEL NVBIT64(15) + +// Don't use the below two flags. For memdesc internal use only. +// These flags will be removed on memory allocation refactoring in RM +#define MEMDESC_FLAGS_PROVIDE_IOMMU_MAP NVBIT64(16) +#define MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE NVBIT64(17) + +#define MEMDESC_FLAGS_CUSTOM_HEAP_ACR NVBIT64(18) + +// Allocate in "fast" or "slow" memory, if there are multiple grades of memory (like mixed density) +#define MEMDESC_FLAGS_HIGH_PRIORITY NVBIT64(19) +#define MEMDESC_FLAGS_LOW_PRIORITY NVBIT64(20) + +// Flag to specify if requested size should be rounded to page size +#define MEMDESC_FLAGS_PAGE_SIZE_ALIGN_IGNORE NVBIT64(21) + +#define MEMDESC_FLAGS_CPU_ONLY NVBIT64(22) + +// This flags is used for a special SYSMEM descriptor that points to a memory +// region allocated externally (e.g. malloc, kmalloc etc.) +#define MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM NVBIT64(23) + +// Owned by Physical Memory Allocator (PMA). +#define MEMDESC_FLAGS_ALLOC_PMA_OWNED NVBIT64(24) + +// This flag is added as part of Sub-Allocator feature meant to be used by VGPU clients. +// Once VGPU clients allocate a large block of memory for their use, they carve-out a small +// portion of it to be used for RM internal allocations originating from a given client. Each +// allocation can choose to use this carved-out memory owned by client or be part of global heap. +// This flag has to be used in RM internal allocation only when a particular allocation is tied to +// the life-time of this client and will be freed before client gets destroyed. +#define MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE NVBIT64(25) + +// This flag is used to specify the pages are pinned using other kernel module or API +// Currently, this flag is used for vGPU on KVM where RM calls vfio APIs to pin and unpin pages +// instead of using os_lock_user_pages() and os_unlock_user_pages(). +#define MEMDESC_FLAGS_FOREIGN_PAGE NVBIT64(26) + +// These flags are used for SYSMEM descriptors that point to a physical BAR +// range and do not take the usual memory mapping paths. Currently, these are used for vGPU. +#define MEMDESC_FLAGS_BAR0_REFLECT NVBIT64(27) +#define MEMDESC_FLAGS_BAR1_REFLECT NVBIT64(28) + +// This flag is used to create shared memory required for vGPU operation. +// During RPC and all other shared memory allocations, VF RM will set this flag to instruct mods +// layer to create shared memory between VF process and PF process. +#define MEMDESC_FLAGS_MODS_SHARED_MEM NVBIT64(29) + +// This flag is set in memdescs that describe client (currently MODS) managed VPR allocations. +#define MEMDESC_FLAGS_VPR_REGION_CLIENT_MANAGED NVBIT64(30) + +// This flags is used for a special SYSMEM descriptor that points to physical BAR +// range of a third party device. +#define MEMDESC_FLAGS_PEER_IO_MEM NVBIT64(31) + +// If the flag is set, the RM will only allow read-only CPU user-mappings +// to the descriptor. +#define MEMDESC_FLAGS_USER_READ_ONLY NVBIT64(32) + +// If the flag is set, the RM will only allow read-only DMA mappings +// to the descriptor. +#define MEMDESC_FLAGS_DEVICE_READ_ONLY NVBIT64(33) + +// This flag is used to denote the memory descriptor that is part of larger memory descriptor; +// created using NV01_MEMORY_LIST_SYSTEM, NV01_MEMORY_LIST_FBMEM or NV01_MEMORY_LIST_OBJECT. +#define MEMDESC_FLAGS_LIST_MEMORY NVBIT64(34) + +// This flag is used to denote that this memdesc is allocated from +// a context buffer pool. When this flag is set, we expect a pointer +// to this context buffer pool to be cached in memdesc. +#define MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL NVBIT64(36) + +// +// This flag is used to skip privilege checks for the ADDR_REGMEM mapping type. +// This flag is useful for cases like UserModeApi where we want to use this memory type +// in a non-privileged user context +#define MEMDESC_FLAGS_SKIP_REGMEM_PRIV_CHECK NVBIT64(37) + +// This flag denotes the memory descriptor of type Display non iso +#define MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO NVBIT64(38) + +// This flag is used to force mapping of coherent sysmem through +// the GMMU over BAR1. This is useful when we need some form +// of special translation of the SYSMEM_COH aperture by the GMMU. +#define MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1 NVBIT64(39) + +// This flag is used to override system memory limit to be allocated +// within override address width. +#define MEMDESC_FLAGS_OVERRIDE_SYSTEM_ADDRESS_LIMIT NVBIT64(40) + +// +// If this flag is set, Linux RM will ensure that the allocated memory is +// 32-bit addressable. +#define MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE NVBIT64(41) + +// +// If this flag is set, the memory is registered in GSP +// +#define MEMDESC_FLAGS_REGISTERED_TO_GSP NVBIT64(42) + +// +// Indicates that this memdesc is tracking client sysmem allocation as +// against RM internal sysmem allocation +// +#define MEMDESC_FLAGS_SYSMEM_OWNED_BY_CLIENT NVBIT64(44) + +// +// The following is a special use case for sharing memory between +// the GPU and a WSL client. There is no IOMMU-compliant support +// currently for this, so a WAR is required for r515. The intent +// is to remove this by r525. +// +#define MEMDESC_FLAGS_WSL_SHARED_MEMORY NVBIT64(46) + +#endif // _MEMDESC_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_MEM_DESC_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.c new file mode 100644 index 0000000..089c0e1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.c @@ -0,0 +1,428 @@ +#define NVOC_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_mem_mgr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x22ad47 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryManager; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_MemoryManager(MemoryManager*, RmHalspecOwner* ); +void __nvoc_init_funcTable_MemoryManager(MemoryManager*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_MemoryManager(MemoryManager*, RmHalspecOwner* ); +void __nvoc_init_dataField_MemoryManager(MemoryManager*, RmHalspecOwner* ); +void __nvoc_dtor_MemoryManager(MemoryManager*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_MemoryManager; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryManager_MemoryManager = { + /*pClassDef=*/ &__nvoc_class_def_MemoryManager, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_MemoryManager, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryManager_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryManager_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_MemoryManager = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_MemoryManager_MemoryManager, + &__nvoc_rtti_MemoryManager_OBJENGSTATE, + &__nvoc_rtti_MemoryManager_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryManager = +{ + /*classInfo=*/ { + /*size=*/ sizeof(MemoryManager), + /*classId=*/ classId(MemoryManager), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "MemoryManager", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_MemoryManager, + /*pCastInfo=*/ &__nvoc_castinfo_MemoryManager, + /*pExportInfo=*/ &__nvoc_export_info_MemoryManager +}; + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrReconcileTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStateLoad(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStateUnload(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStateInitLocked(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreLoad(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePostUnload(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_memmgrStateDestroy(POBJGPU pGpu, struct MemoryManager *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreUnload(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStateInitUnlocked(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_memmgrInitMissing(POBJGPU pGpu, struct MemoryManager *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreInitLocked(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreInitUnlocked(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrGetTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrCompareTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_memmgrFreeTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePostLoad(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrAllocTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrSetTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrConstructEngine(POBJGPU pGpu, struct MemoryManager *pEngstate, ENGDESCRIPTOR arg0) { + return engstateConstructEngine(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_memmgrIsPresent(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_MemoryManager = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_MemoryManager(MemoryManager *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // Hal field -- bFbRegionsSupported + if (0) + { + } + // default + else + { + pThis->bFbRegionsSupported = ((NvBool)(0 != 0)); + } + + // Hal field -- bPmaEnabled + if (0) + { + } + // default + else + { + pThis->bPmaEnabled = ((NvBool)(0 != 0)); + } + + // Hal field -- bClientPageTablesPmaManaged + if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */ + { + pThis->bClientPageTablesPmaManaged = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bClientPageTablesPmaManaged = ((NvBool)(0 != 0)); + } + + // Hal field -- bScanoutSysmem + if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */ + { + pThis->bScanoutSysmem = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bScanoutSysmem = ((NvBool)(0 != 0)); + } + + // Hal field -- bDisallowSplitLowerMemory + if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */ + { + pThis->bDisallowSplitLowerMemory = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bDisallowSplitLowerMemory = ((NvBool)(0 != 0)); + } + + // Hal field -- bSmallPageCompression + if (0) + { + } + // default + else + { + pThis->bSmallPageCompression = ((NvBool)(0 != 0)); + } + + // Hal field -- bSysmemCompressionSupportDef + if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */ + { + pThis->bSysmemCompressionSupportDef = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bSysmemCompressionSupportDef = ((NvBool)(0 != 0)); + } + + // Hal field -- bBug2301372IncreaseRmReserveMemoryWar + if (0) + { + } + // default + else + { + pThis->bBug2301372IncreaseRmReserveMemoryWar = ((NvBool)(0 != 0)); + } + + pThis->bEnableDynamicPageOfflining = ((NvBool)(0 != 0)); + + // Hal field -- bVgpuPmaSupport + if (0) + { + } + // default + else + { + pThis->bVgpuPmaSupport = ((NvBool)(0 != 0)); + } + + // Hal field -- bAllowNoncontiguousAllocation + if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */ + { + pThis->bAllowNoncontiguousAllocation = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bAllowNoncontiguousAllocation = ((NvBool)(0 != 0)); + } + + // Hal field -- bScrubOnFreeEnabled + if (0) + { + } + // default + else + { + pThis->bScrubOnFreeEnabled = ((NvBool)(0 != 0)); + } + + // Hal field -- bFastScrubberEnabled + if (0) + { + } + // default + else + { + pThis->bFastScrubberEnabled = ((NvBool)(0 != 0)); + } +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_MemoryManager_fail_OBJENGSTATE; + __nvoc_init_dataField_MemoryManager(pThis, pRmhalspecowner); + goto __nvoc_ctor_MemoryManager_exit; // Success + +__nvoc_ctor_MemoryManager_fail_OBJENGSTATE: +__nvoc_ctor_MemoryManager_exit: + + return status; +} + +static void __nvoc_init_funcTable_MemoryManager_1(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__memmgrReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrReconcileTunableState; + + pThis->__memmgrStateLoad__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateLoad; + + pThis->__memmgrStateUnload__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateUnload; + + pThis->__memmgrStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateInitLocked; + + pThis->__memmgrStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreLoad; + + pThis->__memmgrStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePostUnload; + + pThis->__memmgrStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateDestroy; + + pThis->__memmgrStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreUnload; + + pThis->__memmgrStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateInitUnlocked; + + pThis->__memmgrInitMissing__ = &__nvoc_thunk_OBJENGSTATE_memmgrInitMissing; + + pThis->__memmgrStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreInitLocked; + + pThis->__memmgrStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreInitUnlocked; + + pThis->__memmgrGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrGetTunableState; + + pThis->__memmgrCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrCompareTunableState; + + pThis->__memmgrFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrFreeTunableState; + + pThis->__memmgrStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePostLoad; + + pThis->__memmgrAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrAllocTunableState; + + pThis->__memmgrSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrSetTunableState; + + pThis->__memmgrConstructEngine__ = &__nvoc_thunk_OBJENGSTATE_memmgrConstructEngine; + + pThis->__memmgrIsPresent__ = &__nvoc_thunk_OBJENGSTATE_memmgrIsPresent; +} + +void __nvoc_init_funcTable_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_MemoryManager_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_MemoryManager = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_MemoryManager(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_MemoryManager(MemoryManager **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + MemoryManager *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(MemoryManager)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(MemoryManager)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_MemoryManager); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_MemoryManager(pThis, pRmhalspecowner); + status = __nvoc_ctor_MemoryManager(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_MemoryManager_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_MemoryManager_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_MemoryManager(MemoryManager **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_MemoryManager(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.h new file mode 100644 index 0000000..d97440e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.h @@ -0,0 +1,2241 @@ +#ifndef _G_MEM_MGR_NVOC_H_ +#define _G_MEM_MGR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_mem_mgr_nvoc.h" + +#ifndef MEM_MGR_H +#define MEM_MGR_H + +#include "core/core.h" +#include "core/info_block.h" +#include "gpu/eng_state.h" + +#include "gpu/gpu.h" + +#include "mem_mgr/mem.h" + +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "containers/map.h" +#include "gpu/mem_mgr/heap_base.h" +#include "mem_mgr/vaspace.h" + +typedef volatile struct _cl906f_tag1 Nv906fControl; +typedef struct KERNEL_MIG_GPU_INSTANCE KERNEL_MIG_GPU_INSTANCE; + +typedef struct +{ + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 offset; +} TRANSFER_SURFACE; + +// Memory transfer engine types. +typedef enum +{ + TRANSFER_TYPE_PROCESSOR = 0, // CPU/GSP/DPU depending on execution context + TRANSFER_TYPE_GSP_DMA, // Dma engine internal to GSP + TRANSFER_TYPE_CE, // Copy Engine HW +} TRANSFER_TYPE; + +#define TRANSFER_FLAGS_NONE 0 +#define TRANSFER_FLAGS_DEFER_FLUSH NVBIT32(0) // Applicable only for write operations +#define TRANSFER_FLAGS_SHADOW_ALLOC NVBIT32(1) // Applicable only for non-PROCESSOR transfers +#define TRANSFER_FLAGS_SHADOW_INIT_MEM NVBIT32(2) // Applicable only for non-PROCESSOR transfers +#define TRANSFER_FLAGS_PERSISTENT_CPU_MAPPING NVBIT32(3) // Require long lived PROCESSOR mapping +#define TRANSFER_FLAGS_DESTROY_MAPPING NVBIT32(4) // Destroy any cached mappings when complete + +typedef struct +{ + NvU32 bar1Size; + NvU32 bar1AvailSize; + NvU32 bankSwizzleAlignment; + NvU32 bar1MaxContigAvailSize; +} GETBAR1INFO, *PGETBAR1INFO; + +// +// RM Default PTE kind +// Bug #2242255, introducing the RM Default kind to allow sharing memory between +// different architectures especially between Turing+ and Pre Turing chips +// +#define RM_DEFAULT_PTE_KIND 0x100 + +typedef enum +{ + FB_IS_KIND_Z, // Kind is a Z buffer + FB_IS_KIND_ZBC, // Zero bandwidth clears + FB_IS_KIND_ZBC_ALLOWS_1, // ZBC with 1 bit of tag + FB_IS_KIND_ZBC_ALLOWS_2, // ZBC with 2 bits of tag + FB_IS_KIND_ZBC_ALLOWS_4, // ZBC with 4 bits of tag + FB_IS_KIND_COMPRESSIBLE, // Any compressible kind + FB_IS_KIND_COMPRESSIBLE_1, // Compressible with 1 comp tag bit + FB_IS_KIND_COMPRESSIBLE_2, // Compressible with 2 comp tag bits + FB_IS_KIND_COMPRESSIBLE_4, // Compressible with 4 comp tag bits + FB_IS_KIND_SUPPORTED, // Kind is supported + FB_IS_KIND_DISALLOW_PLC, // Kind Disallows PLC +} FB_IS_KIND_OP; + +// Surface compression parameters +typedef struct COMPR_INFO +{ + // Surface kind; if not compressed, following parameters are ignored + NvU32 kind; + + // Compression page shift; 0 if kind is uncompressed + NvU32 compPageShift; + + // + // Are comptags are determined per-page by PA? + // If set, following parameters are ignored + // + NvBool bPhysBasedComptags; + + // see GMMU_COMPR_INFO + NvU32 compPageIndexLo; + NvU32 compPageIndexHi; + NvU32 compTagLineMin; + NvU32 compTagLineMultiplier; +} COMPR_INFO; + +// +// Fixed Channel Properties for Memutils Object +// + +typedef NV_STATUS FbScrubCallback(OBJGPU *); + +#define BLOCK_INDEX_FROM_ADDR(addr,size) ((NvU32)((addr) >> size)) +#define BLOCK_ADDR_FROM_INDEX(idx,size) (((NvU64)(idx)) << size) + +#define MEMUTILS_SIZE_PER_BLOCK_INBYTES (0x68) +#define MEMUTILS_TOTAL_SIZE_PER_BLOCK_INBYTES (0x60) //(COPY + PB SEMA) +#define MEMUTILS_TD_BLOCKS_PER_CHUNK 0x40 + +#define BLOCK_INDEX_FROM_ADDR(addr,size) ((NvU32)((addr) >> size)) +#define BLOCK_ADDR_FROM_INDEX(idx,size) (((NvU64)(idx)) << size) + +#define MEMUTILS_NUM_PAYLOAD_SEMAPHORES (2) +#define MEMUTILS_NUM_GPFIFIO_ENTRIES (32) +// PB size should be a multiple of chunk size +#define MEMUTILS_CHANNEL_PB_SIZE (0x10 * MEMUTILS_SIZE_PER_BLOCK_INBYTES * \ + MEMUTILS_TD_BLOCKS_PER_CHUNK) +#define MEMUTILS_CHANNEL_SEMAPHORE_SIZE (4 * MEMUTILS_NUM_PAYLOAD_SEMAPHORES) +#define MEMUTILS_CHANNEL_NOTIFIER_SIZE (sizeof(NvNotification) * 1) + +// offset and line length should be a multiple of 4KB +#define MEMUTIL_SCRUB_OFFSET_ALIGNMENT (4 * 1024) +#define MEMUTIL_SCRUB_LINE_LENGTH_ALIGNMENT (4 * 1024) + +typedef enum { + SCRUBBER_CHANNEL, + FAST_SCRUBBER_CHANNEL, + COPY_CHANNEL, + MAX_CHANNEL_TYPE +} CHANNEL_KIND; + +// This will be moved to a channel object next +typedef struct OBJCHANNEL +{ + NvHandle deviceId; // Device Handle + NvHandle physMemId; // Memory Handle + NvHandle channelId; // Channel Handle + NvHandle subdeviceId; // Subdevice Handle + NvHandle errNotifierIdVirt; + NvHandle errNotifierIdPhys; + NvHandle copyObjectId; + NvHandle eventId; + NvHandle pushBufferId; + NvHandle bitMapSemPhysId; + NvHandle bitMapSemVirtId; + NvHandle hVASpaceId; // VASpace handle, when scrubber in virtual mode + NvHandle hFbAlias; // Used only for virtual channels + NvHandle hFbAliasVA; + // to be moved later + + NvU32 channelSize; + NvU32 channelNumGpFifioEntries; + NvU32 channelPbSize; + NvU32 channelNotifierSize; + NvU32 methodSizePerBlock; + NvU32 semaOffset; + NvU32 finishPayloadOffset; + NvU32 finishPayload; + NvBool isChannelSynchronized; + NvBool isProgressChecked; +// +// RM internal channels are created as privileged channels (physical address access) by default +// For MMU Bug: 2739505, we need to switch to use channels in non-privileged mode. +// + NvBool bUseVasForCeCopy; // set to NV_TRUE, when scrubber operates in virtual address + struct RsClient *pRsClient; + struct OBJVASPACE *pVAS; + NvU32 engineType; + NvU64 startFbOffset; + NvU64 fbSize; + NvU64 fbAliasVA; + NvU64 vaStartOffset; + // to be moved to a separate object later + + NvU32 *pBlockPendingState; + NvU32 *pBlockDoneState; + NvU32 blockCount; + NvHandle hClient; + NvHandle hLiteClient; // Used only for fifo lite channels + NvBool bClientAllocated; + NvU64 pbGpuVA; + NvU64 pbGpuBitMapVA; + NvU64 pbGpuNotifierVA; + NvU8 *pbCpuVA; + NvU8 *pbBitMapVA; + Nv906fControl *pControlGPFifo; + NvU32 classEngineID; + NVOS10_EVENT_KERNEL_CALLBACK_EX callback; + NvU32 state; + NvU32 hTdCopyClass; + NvU32 minBlockSize; + NvU32 maxBlockSize; + NvU32 channelPutOffset; + NvU8 blockShift; + NvU32 lastPayloadPushed; + NvBool isChannelActive; + NvU32 workSubmitToken; + // + // Work submit token read from notifier memory. + // + NvNotification *pTokenFromNotifier; + NvU32 lastSubmittedEntry; + NvHandle lastAllocatedHandle; + CHANNEL_KIND type; + + // Used for Volta+ + NvHandle doorbellRegionHandle; + NvU8 *pDoorbellRegion; + NvU32 *pDoorbellRegisterOffset; + NvBool bUseDoorbellRegister; + NvHandle hUserD; + NvBool bClientUserd; + + + // + // Used only by suspend resume channel. + // This denotes whether the channel manages the BAR2 VASpace. + // Suspend resume happens way before the regular BAR2 init. + // Channel instmem has to be stored in vidmem due to 40 bit restriction in host on Pascal+ chips. + // So the suspend resume channel has to setup BAR2 for accessing vidmem. + // + NvBool bManageBAR2; + OBJGPU *pGpu; + struct KernelCE *pKCe; + + // Used by Partition Scrubber + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance; + NvHandle hPartitionRef; +} OBJCHANNEL, *POBJCHANNEL; + +#define NV_METHOD(SubCh, Method, Num) \ + (DRF_DEF(906F, _DMA_INCR, _OPCODE, _VALUE) | \ + DRF_NUM(906F, _DMA_INCR, _COUNT, Num) | \ + DRF_NUM(906F, _DMA_INCR, _SUBCHANNEL, SubCh) | \ + DRF_NUM(906F, _DMA_INCR, _ADDRESS, (Method) >> 2)) + +#define PUSH_DATA(Data) MEM_WR32(ptr++, (Data)) + +#define PUSH_PAIR(SubCh, Method, Data) \ + do \ + { \ + PUSH_DATA(NV_METHOD(SubCh, (Method), 1)); \ + PUSH_DATA((Data)); \ + } while (0) + +//----------------------------------------------------------------------------- + +typedef struct +{ + NvU32 lastSubmittedBlock; + NvBool isTopDownScrubber; + NvBool isActive; + NvU32 scrubberState; + NvU32 currentFbRegion; + NvU32 startBlock; + NvU32 endBlock; + NvU32 *pPendingBitMap; + NvU32 *pDoneBitMap; + NvU32 blockCount; + struct OBJCE *pCe; + NvBool bCeInUse; + OBJCHANNEL tdHeapState; + OBJCHANNEL allocationScrubberState; +} OBJSCRUB, *POBJSCRUB; + +typedef struct +{ + NvU64 base; // Base/start address of the region + NvU64 limit; // Last/end address of region + NvU64 rsvdSize; // Memory RM may be required to allocate in this region + NvBool bRsvdRegion; // Reserved region -- not publicly usable + NvU32 performance; // Relative performance. Higher is faster + NvBool bSupportCompressed; // Support compressed kinds + NvBool bSupportISO; // Support ISO (display, cursor, video) surfaces + NvBool bProtected; // Represents a protected region of memory. + NvBool bInternalHeap; // PMA:Used for internal RM allocations + NvBool bLostOnSuspend; // Not required to be Saved during S/R. +} FB_REGION_DESCRIPTOR, *PFB_REGION_DESCRIPTOR; + +#define MAX_FB_REGIONS 16 + +// Maximum number of contexts created for WHQL test WDDM Max Contexts +#define WHQL_TEST_MAX_CONTEXTS 100 + +// Object 'get' macros for FB relative object retrievals. +#define MEMORY_MANAGER_GET_HEAP(p) ((p)->pHeap) + +typedef struct _def_fb_mem_node +{ + struct _def_fb_mem_node *pNext; + + NvBool bFreeDescriptor; + PMEMORY_DESCRIPTOR pMemDesc; + +} FB_MEM_NODE, *PFB_MEM_NODE; + +// defines for MemoryManager::fbsrReservedRanges +#define MAX_FBSR_RESERVED_REGIONS 2 // Max. Memory descriptors for RM Instance memory +#define FBSR_RESERVED_INST_MEMORY_BEFORE_BAR2PTE 0 +#define FBSR_RESERVED_INST_MEMORY_AFTER_BAR2PTE 1 + +/*! + * MemoryManager provides the root memory management of GPU video memory. + * External entities might provide suballocators on top of MemoryManager. + * + * MemoryManager can have static information on the memory system (e.g.: list of + * kinds, etc), however MemoryManager does not have direct access to the GPU + * memory system (e.g.: BAR0 registers). It relies on KernelMemorySystem for + * operations on the memory system. + * + * MemoryManager is instantiated in VGPU guest/GSP Client as well as the VGPU + * host/GSP-RM. + */ + +#define MEM_MGR_STUB_ORIN(...) { return __VA_ARGS__; } + +#ifdef NVOC_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RM_POOL_ALLOC_MEM_RESERVE_INFO; + +struct MIG_MEMORY_PARTITIONING_INFO { + struct NV_RANGE partitionableMemoryRange; + struct NV_RANGE partitionableBar1Range; + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubdevice; + NvBool bNonMIGTopLevelScrubber; +}; + + +struct MemoryManager { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct MemoryManager *__nvoc_pbase_MemoryManager; + NV_STATUS (*__memmgrReconcileTunableState__)(POBJGPU, struct MemoryManager *, void *); + NV_STATUS (*__memmgrStateLoad__)(POBJGPU, struct MemoryManager *, NvU32); + NV_STATUS (*__memmgrStateUnload__)(POBJGPU, struct MemoryManager *, NvU32); + NV_STATUS (*__memmgrStateInitLocked__)(POBJGPU, struct MemoryManager *); + NV_STATUS (*__memmgrStatePreLoad__)(POBJGPU, struct MemoryManager *, NvU32); + NV_STATUS (*__memmgrStatePostUnload__)(POBJGPU, struct MemoryManager *, NvU32); + void (*__memmgrStateDestroy__)(POBJGPU, struct MemoryManager *); + NV_STATUS (*__memmgrStatePreUnload__)(POBJGPU, struct MemoryManager *, NvU32); + NV_STATUS (*__memmgrStateInitUnlocked__)(POBJGPU, struct MemoryManager *); + void (*__memmgrInitMissing__)(POBJGPU, struct MemoryManager *); + NV_STATUS (*__memmgrStatePreInitLocked__)(POBJGPU, struct MemoryManager *); + NV_STATUS (*__memmgrStatePreInitUnlocked__)(POBJGPU, struct MemoryManager *); + NV_STATUS (*__memmgrGetTunableState__)(POBJGPU, struct MemoryManager *, void *); + NV_STATUS (*__memmgrCompareTunableState__)(POBJGPU, struct MemoryManager *, void *, void *); + void (*__memmgrFreeTunableState__)(POBJGPU, struct MemoryManager *, void *); + NV_STATUS (*__memmgrStatePostLoad__)(POBJGPU, struct MemoryManager *, NvU32); + NV_STATUS (*__memmgrAllocTunableState__)(POBJGPU, struct MemoryManager *, void **); + NV_STATUS (*__memmgrSetTunableState__)(POBJGPU, struct MemoryManager *, void *); + NV_STATUS (*__memmgrConstructEngine__)(POBJGPU, struct MemoryManager *, ENGDESCRIPTOR); + NvBool (*__memmgrIsPresent__)(POBJGPU, struct MemoryManager *); + NvBool bFbsrWddmModeEnabled; + NvBool bFbRegionsSupported; + NvBool bPmaSupportedOnPlatform; + NvBool bPmaEnabled; + NvBool bPmaInitialized; + NvBool bPmaForcePersistence; + NvBool bPmaAddrTree; + NvBool bClientPageTablesPmaManaged; + NvBool bScanoutSysmem; + NvBool bMixedDensityFbp; + NvBool bPreferSlowRegion; + NvBool bPersistentStandbyBuffer; + NvBool bEnableFbsrPagedDma; + NvBool bDisallowSplitLowerMemory; + NvBool bIgnoreUpperMemory; + NvBool bLddmReservedMemoryCalculated; + NvBool bSmallPageCompression; + NvBool bSysmemCompressionSupportDef; + NvBool bBug1698088IncreaseRmReserveMemoryWar; + NvBool bBug2301372IncreaseRmReserveMemoryWar; + NvBool bEnableFbsrFileMode; + NvBool bEnableDynamicPageOfflining; + NvBool bVgpuPmaSupport; + NvBool bAllowNoncontiguousAllocation; + NvBool bEccInterleavedVidmemScrub; + NvBool bScrubberInitialized; + NvBool bAllowSysmemHugePages; + NvBool bEccScrubOverride; + NvU32 sysmemPageSize; + struct Heap *pHeap; + NvBool bScrubOnFreeEnabled; + NvBool bFastScrubberEnabled; + NvBool bDisableAsyncScrubforMods; + NvBool bUseVasForCeMemoryOps; + NvBool bRmExecutingEccScrub; + NvBool bBug1441072EccScrubWar; + NvU64 heapStartOffset; + NvU64 rsvdMemoryBase; + NvU32 rsvdMemorySize; + struct RM_POOL_ALLOC_MEM_RESERVE_INFO *pPageLevelReserve; + struct MIG_MEMORY_PARTITIONING_INFO MIGMemoryPartitioningInfo; + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubdevice; +}; + +#ifndef __NVOC_CLASS_MemoryManager_TYPEDEF__ +#define __NVOC_CLASS_MemoryManager_TYPEDEF__ +typedef struct MemoryManager MemoryManager; +#endif /* __NVOC_CLASS_MemoryManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryManager +#define __nvoc_class_id_MemoryManager 0x22ad47 +#endif /* __nvoc_class_id_MemoryManager */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryManager; + +#define __staticCast_MemoryManager(pThis) \ + ((pThis)->__nvoc_pbase_MemoryManager) + +#ifdef __nvoc_mem_mgr_h_disabled +#define __dynamicCast_MemoryManager(pThis) ((MemoryManager*)NULL) +#else //__nvoc_mem_mgr_h_disabled +#define __dynamicCast_MemoryManager(pThis) \ + ((MemoryManager*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(MemoryManager))) +#endif //__nvoc_mem_mgr_h_disabled + +#define PDB_PROP_MEMMGR_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_MEMMGR_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_MemoryManager(MemoryManager**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_MemoryManager(MemoryManager**, Dynamic*, NvU32); +#define __objCreate_MemoryManager(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_MemoryManager((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define memmgrReconcileTunableState(pGpu, pEngstate, pTunableState) memmgrReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define memmgrStateLoad(pGpu, pEngstate, arg0) memmgrStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define memmgrStateUnload(pGpu, pEngstate, arg0) memmgrStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define memmgrStateInitLocked(pGpu, pEngstate) memmgrStateInitLocked_DISPATCH(pGpu, pEngstate) +#define memmgrStatePreLoad(pGpu, pEngstate, arg0) memmgrStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define memmgrStatePostUnload(pGpu, pEngstate, arg0) memmgrStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define memmgrStateDestroy(pGpu, pEngstate) memmgrStateDestroy_DISPATCH(pGpu, pEngstate) +#define memmgrStatePreUnload(pGpu, pEngstate, arg0) memmgrStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define memmgrStateInitUnlocked(pGpu, pEngstate) memmgrStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define memmgrInitMissing(pGpu, pEngstate) memmgrInitMissing_DISPATCH(pGpu, pEngstate) +#define memmgrStatePreInitLocked(pGpu, pEngstate) memmgrStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define memmgrStatePreInitUnlocked(pGpu, pEngstate) memmgrStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define memmgrGetTunableState(pGpu, pEngstate, pTunableState) memmgrGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define memmgrCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) memmgrCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define memmgrFreeTunableState(pGpu, pEngstate, pTunableState) memmgrFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define memmgrStatePostLoad(pGpu, pEngstate, arg0) memmgrStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define memmgrAllocTunableState(pGpu, pEngstate, ppTunableState) memmgrAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define memmgrSetTunableState(pGpu, pEngstate, pTunableState) memmgrSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define memmgrConstructEngine(pGpu, pEngstate, arg0) memmgrConstructEngine_DISPATCH(pGpu, pEngstate, arg0) +#define memmgrIsPresent(pGpu, pEngstate) memmgrIsPresent_DISPATCH(pGpu, pEngstate) +static inline NvU32 memmgrDeterminePageSize_4a4dee(struct MemoryManager *pMemoryManager, NvHandle hClient, NvU64 memSize, NvU32 memFormat, NvU32 pageFormatFlags, NvU32 *pRetAttr, NvU32 *pRetAttr2) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrDeterminePageSize(struct MemoryManager *pMemoryManager, NvHandle hClient, NvU64 memSize, NvU32 memFormat, NvU32 pageFormatFlags, NvU32 *pRetAttr, NvU32 *pRetAttr2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDeterminePageSize(pMemoryManager, hClient, memSize, memFormat, pageFormatFlags, pRetAttr, pRetAttr2) memmgrDeterminePageSize_4a4dee(pMemoryManager, hClient, memSize, memFormat, pageFormatFlags, pRetAttr, pRetAttr2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrDeterminePageSize_HAL(pMemoryManager, hClient, memSize, memFormat, pageFormatFlags, pRetAttr, pRetAttr2) memmgrDeterminePageSize(pMemoryManager, hClient, memSize, memFormat, pageFormatFlags, pRetAttr, pRetAttr2) + +static inline NV_STATUS memmgrScrubInit_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrScrubInit(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubInit(pGpu, pMemoryManager) memmgrScrubInit_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubInit_HAL(pGpu, pMemoryManager) memmgrScrubInit(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrScrubHandlePostSchedulingEnable_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrScrubHandlePostSchedulingEnable(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubHandlePostSchedulingEnable(pGpu, pMemoryManager) memmgrScrubHandlePostSchedulingEnable_46f6a7(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubHandlePostSchedulingEnable_HAL(pGpu, pMemoryManager) memmgrScrubHandlePostSchedulingEnable(pGpu, pMemoryManager) + +static inline void memmgrGetScrubState_f2d351(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0, NvU64 *arg1, NvBool *arg2) { + NV_ASSERT_PRECOMP(0); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetScrubState(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0, NvU64 *arg1, NvBool *arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetScrubState(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrGetScrubState_f2d351(pGpu, pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetScrubState_HAL(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrGetScrubState(pGpu, pMemoryManager, arg0, arg1, arg2) + +static inline void memmgrScrubInternalRegions_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrScrubInternalRegions(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubInternalRegions(pGpu, pMemoryManager) memmgrScrubInternalRegions_b3696a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubInternalRegions_HAL(pGpu, pMemoryManager) memmgrScrubInternalRegions(pGpu, pMemoryManager) + +static inline NvBool memmgrEccScrubInProgress_491d52(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrEccScrubInProgress(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrEccScrubInProgress(pGpu, pMemoryManager) memmgrEccScrubInProgress_491d52(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrEccScrubInProgress_HAL(pGpu, pMemoryManager) memmgrEccScrubInProgress(pGpu, pMemoryManager) + +static inline void memmgrAsyncScrubRegion_f2d351(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 arg0, NvU64 arg1) { + NV_ASSERT_PRECOMP(0); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrAsyncScrubRegion(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 arg0, NvU64 arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAsyncScrubRegion(pGpu, pMemoryManager, arg0, arg1) memmgrAsyncScrubRegion_f2d351(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAsyncScrubRegion_HAL(pGpu, pMemoryManager, arg0, arg1) memmgrAsyncScrubRegion(pGpu, pMemoryManager, arg0, arg1) + +static inline NV_STATUS memmgrScrubHandlePreSchedulingDisable_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrScrubHandlePreSchedulingDisable(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubHandlePreSchedulingDisable(pGpu, pMemoryManager) memmgrScrubHandlePreSchedulingDisable_46f6a7(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubHandlePreSchedulingDisable_HAL(pGpu, pMemoryManager) memmgrScrubHandlePreSchedulingDisable(pGpu, pMemoryManager) + +static inline void memmgrScrubDestroy_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrScrubDestroy(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubDestroy(pGpu, pMemoryManager) memmgrScrubDestroy_b3696a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubDestroy_HAL(pGpu, pMemoryManager) memmgrScrubDestroy(pGpu, pMemoryManager) + +static inline void memmgrScrubMemory_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr arg0, NvU64 arg1) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrScrubMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr arg0, NvU64 arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubMemory(pGpu, pMemoryManager, arg0, arg1) memmgrScrubMemory_b3696a(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubMemory_HAL(pGpu, pMemoryManager, arg0, arg1) memmgrScrubMemory(pGpu, pMemoryManager, arg0, arg1) + +static inline NV_STATUS memmgrMemUtilsMemSetBlocking_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NvU64 arg2) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsMemSetBlocking(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NvU64 arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsMemSetBlocking(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrMemUtilsMemSetBlocking_56cd7a(pGpu, pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsMemSetBlocking_HAL(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrMemUtilsMemSetBlocking(pGpu, pMemoryManager, arg0, arg1, arg2) + +static inline NV_STATUS memmgrMemUtilsMemSet_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NvU64 arg2, NvU32 arg3, NvU32 *arg4) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsMemSet(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NvU64 arg2, NvU32 arg3, NvU32 *arg4) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsMemSet(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4) memmgrMemUtilsMemSet_56cd7a(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsMemSet_HAL(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4) memmgrMemUtilsMemSet(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4) + +static inline NV_STATUS memmgrMemUtilsMemSetBatched_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NvU64 arg2) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsMemSetBatched(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NvU64 arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsMemSetBatched(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrMemUtilsMemSetBatched_56cd7a(pGpu, pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsMemSetBatched_HAL(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrMemUtilsMemSetBatched(pGpu, pMemoryManager, arg0, arg1, arg2) + +static inline NV_STATUS memmgrMemUtilsMemCopyBatched_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NV_ADDRESS_SPACE arg2, NvU32 arg3, RmPhysAddr arg4, NV_ADDRESS_SPACE arg5, NvU32 arg6, NvU64 arg7) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsMemCopyBatched(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NV_ADDRESS_SPACE arg2, NvU32 arg3, RmPhysAddr arg4, NV_ADDRESS_SPACE arg5, NvU32 arg6, NvU64 arg7) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsMemCopyBatched(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) memmgrMemUtilsMemCopyBatched_56cd7a(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsMemCopyBatched_HAL(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) memmgrMemUtilsMemCopyBatched(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + +static inline NV_STATUS memmgrMemUtilsAllocateEccScrubber_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsAllocateEccScrubber(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsAllocateEccScrubber(pGpu, pMemoryManager, arg0) memmgrMemUtilsAllocateEccScrubber_56cd7a(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsAllocateEccScrubber_HAL(pGpu, pMemoryManager, arg0) memmgrMemUtilsAllocateEccScrubber(pGpu, pMemoryManager, arg0) + +static inline NV_STATUS memmgrMemUtilsAllocateEccAllocScrubber_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsAllocateEccAllocScrubber(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsAllocateEccAllocScrubber(pGpu, pMemoryManager, arg0) memmgrMemUtilsAllocateEccAllocScrubber_56cd7a(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsAllocateEccAllocScrubber_HAL(pGpu, pMemoryManager, arg0) memmgrMemUtilsAllocateEccAllocScrubber(pGpu, pMemoryManager, arg0) + +static inline NV_STATUS memmgrMemUtilsChannelInitialize_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsChannelInitialize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsChannelInitialize(pGpu, pMemoryManager, arg0) memmgrMemUtilsChannelInitialize_56cd7a(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, arg0) memmgrMemUtilsChannelInitialize(pGpu, pMemoryManager, arg0) + +static inline NV_STATUS memmgrMemUtilsCopyEngineInitialize_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsCopyEngineInitialize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsCopyEngineInitialize(pGpu, pMemoryManager, arg0) memmgrMemUtilsCopyEngineInitialize_56cd7a(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsCopyEngineInitialize_HAL(pGpu, pMemoryManager, arg0) memmgrMemUtilsCopyEngineInitialize(pGpu, pMemoryManager, arg0) + +static inline NV_STATUS memmgrMemUtilsGetCopyEngineClass_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *pClass) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsGetCopyEngineClass(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *pClass) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsGetCopyEngineClass(pGpu, pMemoryManager, pClass) memmgrMemUtilsGetCopyEngineClass_56cd7a(pGpu, pMemoryManager, pClass) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsGetCopyEngineClass_HAL(pGpu, pMemoryManager, pClass) memmgrMemUtilsGetCopyEngineClass(pGpu, pMemoryManager, pClass) + +static inline NV_STATUS memmgrMemUtilsCreateMemoryAlias_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsCreateMemoryAlias(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsCreateMemoryAlias(pGpu, pMemoryManager, arg0) memmgrMemUtilsCreateMemoryAlias_56cd7a(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsCreateMemoryAlias_HAL(pGpu, pMemoryManager, arg0) memmgrMemUtilsCreateMemoryAlias(pGpu, pMemoryManager, arg0) + +static inline NV_STATUS memmgrAllocHal_92bfc3(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo) { + NV_ASSERT_PRECOMP(0); + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocHal(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocHal(pGpu, pMemoryManager, pFbAllocInfo) memmgrAllocHal_92bfc3(pGpu, pMemoryManager, pFbAllocInfo) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAllocHal_HAL(pGpu, pMemoryManager, pFbAllocInfo) memmgrAllocHal(pGpu, pMemoryManager, pFbAllocInfo) + +static inline NV_STATUS memmgrFreeHal_92bfc3(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo, PRMTIMEOUT pTimeout) { + NV_ASSERT_PRECOMP(0); + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFreeHal(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo, PRMTIMEOUT pTimeout) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFreeHal(pGpu, pMemoryManager, pFbAllocInfo, pTimeout) memmgrFreeHal_92bfc3(pGpu, pMemoryManager, pFbAllocInfo, pTimeout) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrFreeHal_HAL(pGpu, pMemoryManager, pFbAllocInfo, pTimeout) memmgrFreeHal(pGpu, pMemoryManager, pFbAllocInfo, pTimeout) + +static inline NV_STATUS memmgrUpdateSurfaceCompression_5baef9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, Memory *arg0, NvBool arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrUpdateSurfaceCompression(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, Memory *arg0, NvBool arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrUpdateSurfaceCompression(pGpu, pMemoryManager, arg0, arg1) memmgrUpdateSurfaceCompression_5baef9(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrUpdateSurfaceCompression_HAL(pGpu, pMemoryManager, arg0, arg1) memmgrUpdateSurfaceCompression(pGpu, pMemoryManager, arg0, arg1) + +static inline NV_STATUS memmgrGetBankPlacementData_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *pBankPlacementLowData) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetBankPlacementData(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *pBankPlacementLowData) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetBankPlacementData(pGpu, pMemoryManager, pBankPlacementLowData) memmgrGetBankPlacementData_46f6a7(pGpu, pMemoryManager, pBankPlacementLowData) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetBankPlacementData_HAL(pGpu, pMemoryManager, pBankPlacementLowData) memmgrGetBankPlacementData(pGpu, pMemoryManager, pBankPlacementLowData) + +static inline void memmgrDirtyForPmTest_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool partialDirty) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrDirtyForPmTest(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool partialDirty) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDirtyForPmTest(pGpu, pMemoryManager, partialDirty) memmgrDirtyForPmTest_b3696a(pGpu, pMemoryManager, partialDirty) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrDirtyForPmTest_HAL(pGpu, pMemoryManager, partialDirty) memmgrDirtyForPmTest(pGpu, pMemoryManager, partialDirty) + +static inline NvU32 memmgrGetReservedHeapSizeMb_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetReservedHeapSizeMb(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetReservedHeapSizeMb(pGpu, pMemoryManager) memmgrGetReservedHeapSizeMb_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetReservedHeapSizeMb_HAL(pGpu, pMemoryManager) memmgrGetReservedHeapSizeMb(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrAllocDetermineAlignment_5baef9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pMemSize, NvU64 *pAlign, NvU64 alignPad, NvU32 allocFlags, NvU32 retAttr, NvU32 retAttr2, NvU64 hwAlignment) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocDetermineAlignment(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pMemSize, NvU64 *pAlign, NvU64 alignPad, NvU32 allocFlags, NvU32 retAttr, NvU32 retAttr2, NvU64 hwAlignment) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocDetermineAlignment(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, allocFlags, retAttr, retAttr2, hwAlignment) memmgrAllocDetermineAlignment_5baef9(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, allocFlags, retAttr, retAttr2, hwAlignment) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAllocDetermineAlignment_HAL(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, allocFlags, retAttr, retAttr2, hwAlignment) memmgrAllocDetermineAlignment(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, allocFlags, retAttr, retAttr2, hwAlignment) + +static inline NV_STATUS memmgrInitFbRegionsHal_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitFbRegionsHal(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitFbRegionsHal(pGpu, pMemoryManager) memmgrInitFbRegionsHal_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrInitFbRegionsHal_HAL(pGpu, pMemoryManager) memmgrInitFbRegionsHal(pGpu, pMemoryManager) + +static inline NvU64 memmgrGetMaxContextSize_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetMaxContextSize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetMaxContextSize(pGpu, pMemoryManager) memmgrGetMaxContextSize_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetMaxContextSize_HAL(pGpu, pMemoryManager) memmgrGetMaxContextSize(pGpu, pMemoryManager) + +static inline void memmgrHandleSizeOverrides_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrHandleSizeOverrides(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrHandleSizeOverrides(pGpu, pMemoryManager) memmgrHandleSizeOverrides_b3696a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrHandleSizeOverrides_HAL(pGpu, pMemoryManager) memmgrHandleSizeOverrides(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrFinishHandleSizeOverrides_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFinishHandleSizeOverrides(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFinishHandleSizeOverrides(pGpu, pMemoryManager) memmgrFinishHandleSizeOverrides_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrFinishHandleSizeOverrides_HAL(pGpu, pMemoryManager) memmgrFinishHandleSizeOverrides(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrGetBAR1InfoForClient_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvHandle arg0, PGETBAR1INFO bar1Info) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetBAR1InfoForClient(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvHandle arg0, PGETBAR1INFO bar1Info) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetBAR1InfoForClient(pGpu, pMemoryManager, arg0, bar1Info) memmgrGetBAR1InfoForClient_46f6a7(pGpu, pMemoryManager, arg0, bar1Info) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetBAR1InfoForClient_HAL(pGpu, pMemoryManager, arg0, bar1Info) memmgrGetBAR1InfoForClient(pGpu, pMemoryManager, arg0, bar1Info) + +static inline NvU64 memmgrGetFbTaxSize_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetFbTaxSize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetFbTaxSize(pGpu, pMemoryManager) memmgrGetFbTaxSize_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetFbTaxSize_HAL(pGpu, pMemoryManager) memmgrGetFbTaxSize(pGpu, pMemoryManager) + +static inline NvU64 memmgrGetVgpuHostRmReservedFb_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 vgpuTypeId) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetVgpuHostRmReservedFb(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 vgpuTypeId) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetVgpuHostRmReservedFb(pGpu, pMemoryManager, vgpuTypeId) memmgrGetVgpuHostRmReservedFb_4a4dee(pGpu, pMemoryManager, vgpuTypeId) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetVgpuHostRmReservedFb_HAL(pGpu, pMemoryManager, vgpuTypeId) memmgrGetVgpuHostRmReservedFb(pGpu, pMemoryManager, vgpuTypeId) + +static inline void memmgrScrubRegistryOverrides_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrScrubRegistryOverrides(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubRegistryOverrides(pGpu, pMemoryManager) memmgrScrubRegistryOverrides_b3696a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubRegistryOverrides_HAL(pGpu, pMemoryManager) memmgrScrubRegistryOverrides(pGpu, pMemoryManager) + +static inline NvU64 memmgrGetRsvdSizeForSr_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetRsvdSizeForSr(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetRsvdSizeForSr(pGpu, pMemoryManager) memmgrGetRsvdSizeForSr_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetRsvdSizeForSr_HAL(pGpu, pMemoryManager) memmgrGetRsvdSizeForSr(pGpu, pMemoryManager) + +static inline NvBool memmgrVerifyDepthSurfaceAttrs_cbe027(struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 arg1) { + return ((NvBool)(0 == 0)); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrVerifyDepthSurfaceAttrs(struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrVerifyDepthSurfaceAttrs(pMemoryManager, arg0, arg1) memmgrVerifyDepthSurfaceAttrs_cbe027(pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrVerifyDepthSurfaceAttrs_HAL(pMemoryManager, arg0, arg1) memmgrVerifyDepthSurfaceAttrs(pMemoryManager, arg0, arg1) + +static inline NV_STATUS memmgrAllocMemToSaveVgaWorkspace_5baef9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR **arg0, MEMORY_DESCRIPTOR **arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocMemToSaveVgaWorkspace(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR **arg0, MEMORY_DESCRIPTOR **arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocMemToSaveVgaWorkspace(pGpu, pMemoryManager, arg0, arg1) memmgrAllocMemToSaveVgaWorkspace_5baef9(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAllocMemToSaveVgaWorkspace_HAL(pGpu, pMemoryManager, arg0, arg1) memmgrAllocMemToSaveVgaWorkspace(pGpu, pMemoryManager, arg0, arg1) + +static inline NvBool memmgrComparePhysicalAddresses_108313(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0, NvU64 arg1, NvU32 arg2, NvU64 arg3) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((NvBool)(0 != 0))); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrComparePhysicalAddresses(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0, NvU64 arg1, NvU32 arg2, NvU64 arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrComparePhysicalAddresses(pGpu, pMemoryManager, arg0, arg1, arg2, arg3) memmgrComparePhysicalAddresses_108313(pGpu, pMemoryManager, arg0, arg1, arg2, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrComparePhysicalAddresses_HAL(pGpu, pMemoryManager, arg0, arg1, arg2, arg3) memmgrComparePhysicalAddresses(pGpu, pMemoryManager, arg0, arg1, arg2, arg3) + +static inline RmPhysAddr memmgrGetInvalidOffset_c732fb(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 4294967295U; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline RmPhysAddr memmgrGetInvalidOffset(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + RmPhysAddr ret; + portMemSet(&ret, 0, sizeof(RmPhysAddr)); + return ret; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetInvalidOffset(pGpu, pMemoryManager) memmgrGetInvalidOffset_c732fb(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetInvalidOffset_HAL(pGpu, pMemoryManager) memmgrGetInvalidOffset(pGpu, pMemoryManager) + +static inline NvU32 memmgrGetAddrSpaceSizeMB_474d46(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetAddrSpaceSizeMB(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetAddrSpaceSizeMB(pGpu, pMemoryManager) memmgrGetAddrSpaceSizeMB_474d46(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetAddrSpaceSizeMB_HAL(pGpu, pMemoryManager) memmgrGetAddrSpaceSizeMB(pGpu, pMemoryManager) + +static inline NvU32 memmgrGetUsableMemSizeMB_13cd8d(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_PRECOMP(0); + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetUsableMemSizeMB(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetUsableMemSizeMB(pGpu, pMemoryManager) memmgrGetUsableMemSizeMB_13cd8d(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetUsableMemSizeMB_HAL(pGpu, pMemoryManager) memmgrGetUsableMemSizeMB(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrGetSurfacePhysAttr_dffb6f(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, Memory *pMemory, NvU64 *pOffset, NvU32 *pMemAperture, NvU32 *pMemKind, NvU32 *pComprOffset, NvU32 *pComprKind, NvU32 *pLineMin, NvU32 *pLineMax, NvU32 *pZCullId, NvU32 *pGpuCacheAttr, NvU32 *pGpuP2PCacheAttr, NvU64 *contigSegmentSize) { + NV_ASSERT_PRECOMP(0); + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetSurfacePhysAttr(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, Memory *pMemory, NvU64 *pOffset, NvU32 *pMemAperture, NvU32 *pMemKind, NvU32 *pComprOffset, NvU32 *pComprKind, NvU32 *pLineMin, NvU32 *pLineMax, NvU32 *pZCullId, NvU32 *pGpuCacheAttr, NvU32 *pGpuP2PCacheAttr, NvU64 *contigSegmentSize) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetSurfacePhysAttr(pGpu, pMemoryManager, pMemory, pOffset, pMemAperture, pMemKind, pComprOffset, pComprKind, pLineMin, pLineMax, pZCullId, pGpuCacheAttr, pGpuP2PCacheAttr, contigSegmentSize) memmgrGetSurfacePhysAttr_dffb6f(pGpu, pMemoryManager, pMemory, pOffset, pMemAperture, pMemKind, pComprOffset, pComprKind, pLineMin, pLineMax, pZCullId, pGpuCacheAttr, pGpuP2PCacheAttr, contigSegmentSize) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetSurfacePhysAttr_HAL(pGpu, pMemoryManager, pMemory, pOffset, pMemAperture, pMemKind, pComprOffset, pComprKind, pLineMin, pLineMax, pZCullId, pGpuCacheAttr, pGpuP2PCacheAttr, contigSegmentSize) memmgrGetSurfacePhysAttr(pGpu, pMemoryManager, pMemory, pOffset, pMemAperture, pMemKind, pComprOffset, pComprKind, pLineMin, pLineMax, pZCullId, pGpuCacheAttr, pGpuP2PCacheAttr, contigSegmentSize) + +static inline NvBool memmgrVerifyComprAttrs_cbe027(struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 arg1, NvU32 arg2) { + return ((NvBool)(0 == 0)); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrVerifyComprAttrs(struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrVerifyComprAttrs(pMemoryManager, arg0, arg1, arg2) memmgrVerifyComprAttrs_cbe027(pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrVerifyComprAttrs_HAL(pMemoryManager, arg0, arg1, arg2) memmgrVerifyComprAttrs(pMemoryManager, arg0, arg1, arg2) + +static inline NvBool memmgrIsKindCompressible_491d52(struct MemoryManager *pMemoryManager, NvU32 arg0) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsKindCompressible(struct MemoryManager *pMemoryManager, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsKindCompressible(pMemoryManager, arg0) memmgrIsKindCompressible_491d52(pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsKindCompressible_HAL(pMemoryManager, arg0) memmgrIsKindCompressible(pMemoryManager, arg0) + +static inline NvBool memmgrIsKindBlocklinear_491d52(struct MemoryManager *pMemoryManager, NvU32 arg0) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsKindBlocklinear(struct MemoryManager *pMemoryManager, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsKindBlocklinear(pMemoryManager, arg0) memmgrIsKindBlocklinear_491d52(pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsKindBlocklinear_HAL(pMemoryManager, arg0) memmgrIsKindBlocklinear(pMemoryManager, arg0) + +static inline NvU32 memmgrGetPteKindBl_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetPteKindBl(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetPteKindBl(pGpu, pMemoryManager) memmgrGetPteKindBl_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetPteKindBl_HAL(pGpu, pMemoryManager) memmgrGetPteKindBl(pGpu, pMemoryManager) + +static inline NvU32 memmgrGetPteKindPitch_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetPteKindPitch(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetPteKindPitch(pGpu, pMemoryManager) memmgrGetPteKindPitch_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetPteKindPitch_HAL(pGpu, pMemoryManager) memmgrGetPteKindPitch(pGpu, pMemoryManager) + +static inline NvU32 memmgrChooseKindZ_474d46(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrChooseKindZ(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKindZ(pGpu, pMemoryManager, arg0) memmgrChooseKindZ_474d46(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKindZ_HAL(pGpu, pMemoryManager, arg0) memmgrChooseKindZ(pGpu, pMemoryManager, arg0) + +static inline NvU32 memmgrChooseKindCompressZ_474d46(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrChooseKindCompressZ(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKindCompressZ(pGpu, pMemoryManager, arg0) memmgrChooseKindCompressZ_474d46(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKindCompressZ_HAL(pGpu, pMemoryManager, arg0) memmgrChooseKindCompressZ(pGpu, pMemoryManager, arg0) + +static inline NvU32 memmgrChooseKindCompressC_474d46(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrChooseKindCompressC(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKindCompressC(pGpu, pMemoryManager, arg0) memmgrChooseKindCompressC_474d46(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKindCompressC_HAL(pGpu, pMemoryManager, arg0) memmgrChooseKindCompressC(pGpu, pMemoryManager, arg0) + +static inline NvU32 memmgrChooseKindCompressCForMS2_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrChooseKindCompressCForMS2(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKindCompressCForMS2(pGpu, pMemoryManager, arg0) memmgrChooseKindCompressCForMS2_4a4dee(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKindCompressCForMS2_HAL(pGpu, pMemoryManager, arg0) memmgrChooseKindCompressCForMS2(pGpu, pMemoryManager, arg0) + +static inline NvU32 memmgrGetUncompressedKind_474d46(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 kind, NvBool releaseReacquire) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetUncompressedKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 kind, NvBool releaseReacquire) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetUncompressedKind(pGpu, pMemoryManager, kind, releaseReacquire) memmgrGetUncompressedKind_474d46(pGpu, pMemoryManager, kind, releaseReacquire) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetUncompressedKind_HAL(pGpu, pMemoryManager, kind, releaseReacquire) memmgrGetUncompressedKind(pGpu, pMemoryManager, kind, releaseReacquire) + +static inline NV_STATUS memmgrGetUncompressedKindForMS2_5baef9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetUncompressedKindForMS2(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetUncompressedKindForMS2(pGpu, pMemoryManager, arg0, arg1) memmgrGetUncompressedKindForMS2_5baef9(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetUncompressedKindForMS2_HAL(pGpu, pMemoryManager, arg0, arg1) memmgrGetUncompressedKindForMS2(pGpu, pMemoryManager, arg0, arg1) + +static inline NV_STATUS memmgrChooseKind_474d46(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0, NvU32 arg1, NvU32 *arg2) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrChooseKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0, NvU32 arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKind(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrChooseKind_474d46(pGpu, pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKind_HAL(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrChooseKind(pGpu, pMemoryManager, arg0, arg1, arg2) + +NvBool memmgrIsKind_TU102(struct MemoryManager *pMemoryManager, FB_IS_KIND_OP arg0, NvU32 arg1); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsKind(struct MemoryManager *pMemoryManager, FB_IS_KIND_OP arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsKind(pMemoryManager, arg0, arg1) memmgrIsKind_TU102(pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsKind_HAL(pMemoryManager, arg0, arg1) memmgrIsKind(pMemoryManager, arg0, arg1) + +static inline NvU32 memmgrGetMessageKind_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetMessageKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetMessageKind(pGpu, pMemoryManager) memmgrGetMessageKind_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetMessageKind_HAL(pGpu, pMemoryManager) memmgrGetMessageKind(pGpu, pMemoryManager) + +static inline NvU32 memmgrGetDefaultPteKindForNoHandle_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetDefaultPteKindForNoHandle(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetDefaultPteKindForNoHandle(pGpu, pMemoryManager) memmgrGetDefaultPteKindForNoHandle_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetDefaultPteKindForNoHandle_HAL(pGpu, pMemoryManager) memmgrGetDefaultPteKindForNoHandle(pGpu, pMemoryManager) + +NvBool memmgrIsSurfaceBlockLinear_TU102(struct MemoryManager *pMemoryManager, Memory *arg0, NvU32 arg1, NvU32 arg2); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsSurfaceBlockLinear(struct MemoryManager *pMemoryManager, Memory *arg0, NvU32 arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsSurfaceBlockLinear(pMemoryManager, arg0, arg1, arg2) memmgrIsSurfaceBlockLinear_TU102(pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsSurfaceBlockLinear_HAL(pMemoryManager, arg0, arg1, arg2) memmgrIsSurfaceBlockLinear(pMemoryManager, arg0, arg1, arg2) + +static inline NV_STATUS memmgrGetFlaKind_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *arg0) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetFlaKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetFlaKind(pGpu, pMemoryManager, arg0) memmgrGetFlaKind_46f6a7(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetFlaKind_HAL(pGpu, pMemoryManager, arg0) memmgrGetFlaKind(pGpu, pMemoryManager, arg0) + +static inline NvU32 memmgrGetHwPteKindFromSwPteKind_6a0a80(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 pteKind) { + return pteKind; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetHwPteKindFromSwPteKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 pteKind) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetHwPteKindFromSwPteKind(pGpu, pMemoryManager, pteKind) memmgrGetHwPteKindFromSwPteKind_6a0a80(pGpu, pMemoryManager, pteKind) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, pMemoryManager, pteKind) memmgrGetHwPteKindFromSwPteKind(pGpu, pMemoryManager, pteKind) + +static inline NvU32 memmgrGetSwPteKindFromHwPteKind_6a0a80(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 pteKind) { + return pteKind; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetSwPteKindFromHwPteKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 pteKind) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetSwPteKindFromHwPteKind(pGpu, pMemoryManager, pteKind) memmgrGetSwPteKindFromHwPteKind_6a0a80(pGpu, pMemoryManager, pteKind) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetSwPteKindFromHwPteKind_HAL(pGpu, pMemoryManager, pteKind) memmgrGetSwPteKindFromHwPteKind(pGpu, pMemoryManager, pteKind) + +static inline void memmgrGetPteKindForScrubber_f2d351(struct MemoryManager *pMemoryManager, NvU32 *arg0) { + NV_ASSERT_PRECOMP(0); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetPteKindForScrubber(struct MemoryManager *pMemoryManager, NvU32 *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetPteKindForScrubber(pMemoryManager, arg0) memmgrGetPteKindForScrubber_f2d351(pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetPteKindForScrubber_HAL(pMemoryManager, arg0) memmgrGetPteKindForScrubber(pMemoryManager, arg0) + +static inline NvU32 memmgrGetCtagOffsetFromParams_1a0c2b(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg0) { + return -1; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetCtagOffsetFromParams(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetCtagOffsetFromParams(pGpu, pMemoryManager, arg0) memmgrGetCtagOffsetFromParams_1a0c2b(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetCtagOffsetFromParams_HAL(pGpu, pMemoryManager, arg0) memmgrGetCtagOffsetFromParams(pGpu, pMemoryManager, arg0) + +static inline void memmgrSetCtagOffsetInParams_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg0, NvU32 arg1) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrSetCtagOffsetInParams(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetCtagOffsetInParams(pGpu, pMemoryManager, arg0, arg1) memmgrSetCtagOffsetInParams_b3696a(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrSetCtagOffsetInParams_HAL(pGpu, pMemoryManager, arg0, arg1) memmgrSetCtagOffsetInParams(pGpu, pMemoryManager, arg0, arg1) + +static inline NvU32 memmgrDetermineComptag_13cd8d(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr arg0) { + NV_ASSERT_PRECOMP(0); + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrDetermineComptag(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDetermineComptag(pGpu, pMemoryManager, arg0) memmgrDetermineComptag_13cd8d(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrDetermineComptag_HAL(pGpu, pMemoryManager, arg0) memmgrDetermineComptag(pGpu, pMemoryManager, arg0) + +static inline void memmgrChannelPushSemaphoreMethodsBlock_b3696a(struct MemoryManager *pMemoryManager, NvU32 arg0, NvU64 arg1, NvU32 arg2, NvU32 **arg3) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrChannelPushSemaphoreMethodsBlock(struct MemoryManager *pMemoryManager, NvU32 arg0, NvU64 arg1, NvU32 arg2, NvU32 **arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChannelPushSemaphoreMethodsBlock(pMemoryManager, arg0, arg1, arg2, arg3) memmgrChannelPushSemaphoreMethodsBlock_b3696a(pMemoryManager, arg0, arg1, arg2, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChannelPushSemaphoreMethodsBlock_HAL(pMemoryManager, arg0, arg1, arg2, arg3) memmgrChannelPushSemaphoreMethodsBlock(pMemoryManager, arg0, arg1, arg2, arg3) + +static inline void memmgrChannelPushAddressMethodsBlock_b3696a(struct MemoryManager *pMemoryManager, NvBool arg0, NvU32 arg1, RmPhysAddr arg2, NvU32 **arg3) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrChannelPushAddressMethodsBlock(struct MemoryManager *pMemoryManager, NvBool arg0, NvU32 arg1, RmPhysAddr arg2, NvU32 **arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChannelPushAddressMethodsBlock(pMemoryManager, arg0, arg1, arg2, arg3) memmgrChannelPushAddressMethodsBlock_b3696a(pMemoryManager, arg0, arg1, arg2, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChannelPushAddressMethodsBlock_HAL(pMemoryManager, arg0, arg1, arg2, arg3) memmgrChannelPushAddressMethodsBlock(pMemoryManager, arg0, arg1, arg2, arg3) + +static inline NV_STATUS memmgrScrubMapDoorbellRegion_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrScrubMapDoorbellRegion(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubMapDoorbellRegion(pGpu, pMemoryManager, arg0) memmgrScrubMapDoorbellRegion_56cd7a(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubMapDoorbellRegion_HAL(pGpu, pMemoryManager, arg0) memmgrScrubMapDoorbellRegion(pGpu, pMemoryManager, arg0) + +static inline NV_STATUS memmgrSetAllocParameters_dffb6f(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo) { + NV_ASSERT_PRECOMP(0); + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSetAllocParameters(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetAllocParameters(pGpu, pMemoryManager, pFbAllocInfo) memmgrSetAllocParameters_dffb6f(pGpu, pMemoryManager, pFbAllocInfo) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrSetAllocParameters_HAL(pGpu, pMemoryManager, pFbAllocInfo) memmgrSetAllocParameters(pGpu, pMemoryManager, pFbAllocInfo) + +static inline void memmgrCalcReservedFbSpaceForUVM_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrCalcReservedFbSpaceForUVM(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrCalcReservedFbSpaceForUVM(pGpu, pMemoryManager, arg0) memmgrCalcReservedFbSpaceForUVM_b3696a(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrCalcReservedFbSpaceForUVM_HAL(pGpu, pMemoryManager, arg0) memmgrCalcReservedFbSpaceForUVM(pGpu, pMemoryManager, arg0) + +static inline void memmgrCalcReservedFbSpaceHal_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0, NvU64 *arg1, NvU64 *arg2) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrCalcReservedFbSpaceHal(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0, NvU64 *arg1, NvU64 *arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrCalcReservedFbSpaceHal(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrCalcReservedFbSpaceHal_b3696a(pGpu, pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrCalcReservedFbSpaceHal_HAL(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrCalcReservedFbSpaceHal(pGpu, pMemoryManager, arg0, arg1, arg2) + +static inline NvU32 memmgrGetGrHeapReservationSize_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetGrHeapReservationSize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetGrHeapReservationSize(pGpu, pMemoryManager) memmgrGetGrHeapReservationSize_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetGrHeapReservationSize_HAL(pGpu, pMemoryManager) memmgrGetGrHeapReservationSize(pGpu, pMemoryManager) + +static inline NvU32 memmgrGetRunlistEntriesReservedFbSpace_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetRunlistEntriesReservedFbSpace(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetRunlistEntriesReservedFbSpace(pGpu, pMemoryManager) memmgrGetRunlistEntriesReservedFbSpace_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetRunlistEntriesReservedFbSpace_HAL(pGpu, pMemoryManager) memmgrGetRunlistEntriesReservedFbSpace(pGpu, pMemoryManager) + +static inline NvU32 memmgrGetUserdReservedFbSpace_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetUserdReservedFbSpace(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetUserdReservedFbSpace(pGpu, pMemoryManager) memmgrGetUserdReservedFbSpace_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetUserdReservedFbSpace_HAL(pGpu, pMemoryManager) memmgrGetUserdReservedFbSpace(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrCheckReservedMemorySize_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrCheckReservedMemorySize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrCheckReservedMemorySize(pGpu, pMemoryManager) memmgrCheckReservedMemorySize_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrCheckReservedMemorySize_HAL(pGpu, pMemoryManager) memmgrCheckReservedMemorySize(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrInitReservedMemory_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 arg0) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitReservedMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitReservedMemory(pGpu, pMemoryManager, arg0) memmgrInitReservedMemory_56cd7a(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrInitReservedMemory_HAL(pGpu, pMemoryManager, arg0) memmgrInitReservedMemory(pGpu, pMemoryManager, arg0) + +static inline NV_STATUS memmgrPreInitReservedMemory_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrPreInitReservedMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPreInitReservedMemory(pGpu, pMemoryManager) memmgrPreInitReservedMemory_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrPreInitReservedMemory_HAL(pGpu, pMemoryManager) memmgrPreInitReservedMemory(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrReadMmuLock_e133c0(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool *pbIsValid, NvU64 *pMmuLockLo, NvU64 *pMmuLockHi) { + *pbIsValid = ((NvBool)(0 != 0)); + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrReadMmuLock(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool *pbIsValid, NvU64 *pMmuLockLo, NvU64 *pMmuLockHi) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrReadMmuLock(pGpu, pMemoryManager, pbIsValid, pMmuLockLo, pMmuLockHi) memmgrReadMmuLock_e133c0(pGpu, pMemoryManager, pbIsValid, pMmuLockLo, pMmuLockHi) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrReadMmuLock_HAL(pGpu, pMemoryManager, pbIsValid, pMmuLockLo, pMmuLockHi) memmgrReadMmuLock(pGpu, pMemoryManager, pbIsValid, pMmuLockLo, pMmuLockHi) + +static inline NV_STATUS memmgrBlockMemLockedMemory_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrBlockMemLockedMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrBlockMemLockedMemory(pGpu, pMemoryManager) memmgrBlockMemLockedMemory_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrBlockMemLockedMemory_HAL(pGpu, pMemoryManager) memmgrBlockMemLockedMemory(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrInsertUnprotectedRegionAtBottomOfFb_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pSize) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInsertUnprotectedRegionAtBottomOfFb(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pSize) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInsertUnprotectedRegionAtBottomOfFb(pGpu, pMemoryManager, pSize) memmgrInsertUnprotectedRegionAtBottomOfFb_56cd7a(pGpu, pMemoryManager, pSize) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrInsertUnprotectedRegionAtBottomOfFb_HAL(pGpu, pMemoryManager, pSize) memmgrInsertUnprotectedRegionAtBottomOfFb(pGpu, pMemoryManager, pSize) + +NV_STATUS memmgrInitBaseFbRegions_FWCLIENT(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitBaseFbRegions(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitBaseFbRegions(pGpu, pMemoryManager) memmgrInitBaseFbRegions_FWCLIENT(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrInitBaseFbRegions_HAL(pGpu, pMemoryManager) memmgrInitBaseFbRegions(pGpu, pMemoryManager) + +static inline void memmgrGetDisablePlcKind_b3696a(struct MemoryManager *pMemoryManager, NvU32 *pteKind) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetDisablePlcKind(struct MemoryManager *pMemoryManager, NvU32 *pteKind) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetDisablePlcKind(pMemoryManager, pteKind) memmgrGetDisablePlcKind_b3696a(pMemoryManager, pteKind) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetDisablePlcKind_HAL(pMemoryManager, pteKind) memmgrGetDisablePlcKind(pMemoryManager, pteKind) + +static inline void memmgrEnableDynamicPageOfflining_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrEnableDynamicPageOfflining(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrEnableDynamicPageOfflining(pGpu, pMemoryManager) memmgrEnableDynamicPageOfflining_b3696a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrEnableDynamicPageOfflining_HAL(pGpu, pMemoryManager) memmgrEnableDynamicPageOfflining(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrSetMemDescPageSize_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, PMEMORY_DESCRIPTOR arg0, ADDRESS_TRANSLATION arg1, RM_ATTR_PAGE_SIZE arg2) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSetMemDescPageSize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, PMEMORY_DESCRIPTOR arg0, ADDRESS_TRANSLATION arg1, RM_ATTR_PAGE_SIZE arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetMemDescPageSize(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrSetMemDescPageSize_56cd7a(pGpu, pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrSetMemDescPageSize(pGpu, pMemoryManager, arg0, arg1, arg2) + +NV_STATUS memmgrSetPartitionableMem_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSetPartitionableMem(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetPartitionableMem(pGpu, pMemoryManager) memmgrSetPartitionableMem_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrSetPartitionableMem_HAL(pGpu, pMemoryManager) memmgrSetPartitionableMem(pGpu, pMemoryManager) + +NV_STATUS memmgrAllocMIGGPUInstanceMemory_PF(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 swizzId, NvHandle *phMemory, struct NV_RANGE *pAddrRange, struct Heap **ppMemoryPartitionHeap); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocMIGGPUInstanceMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 swizzId, NvHandle *phMemory, struct NV_RANGE *pAddrRange, struct Heap **ppMemoryPartitionHeap) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocMIGGPUInstanceMemory(pGpu, pMemoryManager, swizzId, phMemory, pAddrRange, ppMemoryPartitionHeap) memmgrAllocMIGGPUInstanceMemory_PF(pGpu, pMemoryManager, swizzId, phMemory, pAddrRange, ppMemoryPartitionHeap) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAllocMIGGPUInstanceMemory_HAL(pGpu, pMemoryManager, swizzId, phMemory, pAddrRange, ppMemoryPartitionHeap) memmgrAllocMIGGPUInstanceMemory(pGpu, pMemoryManager, swizzId, phMemory, pAddrRange, ppMemoryPartitionHeap) + +static inline NV_STATUS memmgrGetBlackListPagesForHeap_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Heap *pHeap) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetBlackListPagesForHeap(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Heap *pHeap) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetBlackListPagesForHeap(pGpu, pMemoryManager, pHeap) memmgrGetBlackListPagesForHeap_46f6a7(pGpu, pMemoryManager, pHeap) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetBlackListPagesForHeap_HAL(pGpu, pMemoryManager, pHeap) memmgrGetBlackListPagesForHeap(pGpu, pMemoryManager, pHeap) + +static inline NV_STATUS memmgrGetBlackListPages_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, BLACKLIST_ADDRESS *pBlAddrs, NvU32 *pCount) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetBlackListPages(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, BLACKLIST_ADDRESS *pBlAddrs, NvU32 *pCount) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetBlackListPages(pGpu, pMemoryManager, pBlAddrs, pCount) memmgrGetBlackListPages_46f6a7(pGpu, pMemoryManager, pBlAddrs, pCount) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetBlackListPages_HAL(pGpu, pMemoryManager, pBlAddrs, pCount) memmgrGetBlackListPages(pGpu, pMemoryManager, pBlAddrs, pCount) + +static inline NV_STATUS memmgrDiscoverMIGPartitionableMemoryRange_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct NV_RANGE *pMemoryRange) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrDiscoverMIGPartitionableMemoryRange(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct NV_RANGE *pMemoryRange) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDiscoverMIGPartitionableMemoryRange(pGpu, pMemoryManager, pMemoryRange) memmgrDiscoverMIGPartitionableMemoryRange_46f6a7(pGpu, pMemoryManager, pMemoryRange) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrDiscoverMIGPartitionableMemoryRange_HAL(pGpu, pMemoryManager, pMemoryRange) memmgrDiscoverMIGPartitionableMemoryRange(pGpu, pMemoryManager, pMemoryRange) + +static inline NV_STATUS memmgrReconcileTunableState_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + return pEngstate->__memmgrReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS memmgrStateLoad_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return pEngstate->__memmgrStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS memmgrStateUnload_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return pEngstate->__memmgrStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS memmgrStateInitLocked_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return pEngstate->__memmgrStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS memmgrStatePreLoad_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return pEngstate->__memmgrStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS memmgrStatePostUnload_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return pEngstate->__memmgrStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void memmgrStateDestroy_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate) { + pEngstate->__memmgrStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS memmgrStatePreUnload_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return pEngstate->__memmgrStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS memmgrStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return pEngstate->__memmgrStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void memmgrInitMissing_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate) { + pEngstate->__memmgrInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS memmgrStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return pEngstate->__memmgrStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS memmgrStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return pEngstate->__memmgrStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS memmgrGetTunableState_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + return pEngstate->__memmgrGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS memmgrCompareTunableState_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__memmgrCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void memmgrFreeTunableState_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + pEngstate->__memmgrFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS memmgrStatePostLoad_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return pEngstate->__memmgrStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS memmgrAllocTunableState_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, void **ppTunableState) { + return pEngstate->__memmgrAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS memmgrSetTunableState_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + return pEngstate->__memmgrSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS memmgrConstructEngine_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, ENGDESCRIPTOR arg0) { + return pEngstate->__memmgrConstructEngine__(pGpu, pEngstate, arg0); +} + +static inline NvBool memmgrIsPresent_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return pEngstate->__memmgrIsPresent__(pGpu, pEngstate); +} + +static inline NV_STATUS memmgrSavePowerMgmtState(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + +static inline NV_STATUS memmgrRestorePowerMgmtState(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + +static inline NV_STATUS memmgrFree(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Heap *arg0, NvHandle arg1, NvHandle arg2, NvHandle arg3, NvU32 arg4, MEMORY_DESCRIPTOR *arg5) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline struct Heap *memmgrGetDeviceSuballocator(struct MemoryManager *pMemoryManager, NvBool bForceSubheap) { + return ((void *)0); +} + +static inline NV_ADDRESS_SPACE memmgrAllocGetAddrSpace(struct MemoryManager *pMemoryManager, NvU32 flags, NvU32 attr) { + return 2; +} + +static inline NvBool memmgrIsScrubOnFreeEnabled(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bScrubOnFreeEnabled; +} + +static inline NvBool memmgrIsFastScrubberEnabled(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bFastScrubberEnabled; +} + +static inline NvBool memmgrUseVasForCeMemoryOps(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bUseVasForCeMemoryOps; +} + +static inline NvBool memmgrRmExecutingEccScrub(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bRmExecutingEccScrub; +} + +static inline NvBool memmgrBug1441072EccScrubWar(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bBug1441072EccScrubWar; +} + +static inline NvBool memmgrIsPmaInitialized(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaInitialized; +} + +static inline void memmgrSetPmaInitialized(struct MemoryManager *pMemoryManager, NvBool val) { + pMemoryManager->bPmaInitialized = val; +} + +static inline NvBool memmgrAreFbRegionsSupported(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bFbRegionsSupported; +} + +static inline NvBool memmgrIsPmaSupportedOnPlatform(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaSupportedOnPlatform; +} + +static inline NvBool memmgrIsPmaEnabled(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaEnabled; +} + +static inline NvBool memmgrIsPmaForcePersistence(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaForcePersistence; +} + +static inline void memmgrSetPmaForcePersistence(struct MemoryManager *pMemoryManager, NvBool val) { + pMemoryManager->bPmaForcePersistence = val; +} + +static inline NvBool memmgrAreClientPageTablesPmaManaged(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bClientPageTablesPmaManaged; +} + +static inline void memmgrSetClientPageTablesPmaManaged(struct MemoryManager *pMemoryManager, NvBool val) { + pMemoryManager->bClientPageTablesPmaManaged = val; +} + +static inline NvBool memmgrIsPmaAddrTree(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaAddrTree; +} + +static inline NvU64 memmgrGetRsvdMemoryBase(struct MemoryManager *pMemoryManager) { + return pMemoryManager->rsvdMemoryBase; +} + +static inline NvU32 memmgrGetRsvdMemorySize(struct MemoryManager *pMemoryManager) { + return pMemoryManager->rsvdMemorySize; +} + +NV_STATUS memmgrAllocResources_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_ALLOCATION_REQUEST *pAllocRequest, FB_ALLOC_INFO *pFbAllocInfo); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocResources(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_ALLOCATION_REQUEST *pAllocRequest, FB_ALLOC_INFO *pFbAllocInfo) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo) memmgrAllocResources_IMPL(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemCopy_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, TRANSFER_SURFACE *pSrc, NvU32 size, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemCopy(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, TRANSFER_SURFACE *pSrc, NvU32 size, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemCopy(pMemoryManager, pDst, pSrc, size, flags) memmgrMemCopy_IMPL(pMemoryManager, pDst, pSrc, size, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemSet_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, NvU32 value, NvU32 size, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemSet(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, NvU32 value, NvU32 size, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemSet(pMemoryManager, pDst, value, size, flags) memmgrMemSet_IMPL(pMemoryManager, pDst, value, size, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemWrite_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, void *pBuf, NvU64 size, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemWrite(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, void *pBuf, NvU64 size, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemWrite(pMemoryManager, pDst, pBuf, size, flags) memmgrMemWrite_IMPL(pMemoryManager, pDst, pBuf, size, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemRead_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pSrc, void *pBuf, NvU64 size, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemRead(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pSrc, void *pBuf, NvU64 size, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemRead(pMemoryManager, pSrc, pBuf, size, flags) memmgrMemRead_IMPL(pMemoryManager, pSrc, pBuf, size, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NvU8 *memmgrMemBeginTransfer_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pTransferInfo, NvU64 shadowBufSize, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU8 *memmgrMemBeginTransfer(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pTransferInfo, NvU64 shadowBufSize, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NULL; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemBeginTransfer(pMemoryManager, pTransferInfo, shadowBufSize, flags) memmgrMemBeginTransfer_IMPL(pMemoryManager, pTransferInfo, shadowBufSize, flags) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrMemEndTransfer_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pTransferInfo, NvU64 shadowBufSize, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrMemEndTransfer(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pTransferInfo, NvU64 shadowBufSize, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemEndTransfer(pMemoryManager, pTransferInfo, shadowBufSize, flags) memmgrMemEndTransfer_IMPL(pMemoryManager, pTransferInfo, shadowBufSize, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NvU8 *memmgrMemDescBeginTransfer_IMPL(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU8 *memmgrMemDescBeginTransfer(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NULL; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemDescBeginTransfer(pMemoryManager, pMemDesc, flags) memmgrMemDescBeginTransfer_IMPL(pMemoryManager, pMemDesc, flags) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrMemDescEndTransfer_IMPL(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrMemDescEndTransfer(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemDescEndTransfer(pMemoryManager, pMemDesc, flags) memmgrMemDescEndTransfer_IMPL(pMemoryManager, pMemDesc, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemDescMemSet_IMPL(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 value, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemDescMemSet(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 value, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemDescMemSet(pMemoryManager, pMemDesc, value, flags) memmgrMemDescMemSet_IMPL(pMemoryManager, pMemDesc, value, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrSetMIGPartitionableBAR1Range_IMPL(OBJGPU *arg0, struct MemoryManager *arg1); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSetMIGPartitionableBAR1Range(OBJGPU *arg0, struct MemoryManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetMIGPartitionableBAR1Range(arg0, arg1) memmgrSetMIGPartitionableBAR1Range_IMPL(arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +struct NV_RANGE memmgrGetMIGPartitionableBAR1Range_IMPL(OBJGPU *arg0, struct MemoryManager *arg1); +#ifdef __nvoc_mem_mgr_h_disabled +static inline struct NV_RANGE memmgrGetMIGPartitionableBAR1Range(OBJGPU *arg0, struct MemoryManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + struct NV_RANGE ret; + portMemSet(&ret, 0, sizeof(struct NV_RANGE)); + return ret; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetMIGPartitionableBAR1Range(arg0, arg1) memmgrGetMIGPartitionableBAR1Range_IMPL(arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrSetMIGPartitionableMemoryRange_IMPL(OBJGPU *arg0, struct MemoryManager *arg1, struct NV_RANGE arg2); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrSetMIGPartitionableMemoryRange(OBJGPU *arg0, struct MemoryManager *arg1, struct NV_RANGE arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetMIGPartitionableMemoryRange(arg0, arg1, arg2) memmgrSetMIGPartitionableMemoryRange_IMPL(arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +struct NV_RANGE memmgrGetMIGPartitionableMemoryRange_IMPL(OBJGPU *arg0, struct MemoryManager *arg1); +#ifdef __nvoc_mem_mgr_h_disabled +static inline struct NV_RANGE memmgrGetMIGPartitionableMemoryRange(OBJGPU *arg0, struct MemoryManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + struct NV_RANGE ret; + portMemSet(&ret, 0, sizeof(struct NV_RANGE)); + return ret; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetMIGPartitionableMemoryRange(arg0, arg1) memmgrGetMIGPartitionableMemoryRange_IMPL(arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrFreeMIGGPUInstanceMemory_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 swizzId, NvHandle hMemory, struct Heap **ppMemoryPartitionHeap); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFreeMIGGPUInstanceMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 swizzId, NvHandle hMemory, struct Heap **ppMemoryPartitionHeap) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFreeMIGGPUInstanceMemory(pGpu, pMemoryManager, swizzId, hMemory, ppMemoryPartitionHeap) memmgrFreeMIGGPUInstanceMemory_IMPL(pGpu, pMemoryManager, swizzId, hMemory, ppMemoryPartitionHeap) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrPageLevelPoolsCreate_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrPageLevelPoolsCreate(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPageLevelPoolsCreate(pGpu, pMemoryManager) memmgrPageLevelPoolsCreate_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrPageLevelPoolsDestroy_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrPageLevelPoolsDestroy(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPageLevelPoolsDestroy(pGpu, pMemoryManager) memmgrPageLevelPoolsDestroy_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrPageLevelPoolsGetInfo_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvHandle arg0, struct RM_POOL_ALLOC_MEM_RESERVE_INFO **arg1); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrPageLevelPoolsGetInfo(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvHandle arg0, struct RM_POOL_ALLOC_MEM_RESERVE_INFO **arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPageLevelPoolsGetInfo(pGpu, pMemoryManager, arg0, arg1) memmgrPageLevelPoolsGetInfo_IMPL(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrAllocMIGMemoryAllocationInternalHandles_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocMIGMemoryAllocationInternalHandles(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocMIGMemoryAllocationInternalHandles(pGpu, pMemoryManager) memmgrAllocMIGMemoryAllocationInternalHandles_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrFreeMIGMemoryAllocationInternalHandles_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrFreeMIGMemoryAllocationInternalHandles(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFreeMIGMemoryAllocationInternalHandles(pGpu, pMemoryManager) memmgrFreeMIGMemoryAllocationInternalHandles_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrGetFreeMemoryForAllMIGGPUInstances_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pBytes); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetFreeMemoryForAllMIGGPUInstances(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pBytes) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetFreeMemoryForAllMIGGPUInstances(pGpu, pMemoryManager, pBytes) memmgrGetFreeMemoryForAllMIGGPUInstances_IMPL(pGpu, pMemoryManager, pBytes) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrGetTopLevelScrubberStatus_IMPL(OBJGPU *arg0, struct MemoryManager *arg1, NvBool *pbTopLevelScrubberEnabled, NvBool *pbTopLevelScrubberConstructed); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetTopLevelScrubberStatus(OBJGPU *arg0, struct MemoryManager *arg1, NvBool *pbTopLevelScrubberEnabled, NvBool *pbTopLevelScrubberConstructed) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetTopLevelScrubberStatus(arg0, arg1, pbTopLevelScrubberEnabled, pbTopLevelScrubberConstructed) memmgrGetTopLevelScrubberStatus_IMPL(arg0, arg1, pbTopLevelScrubberEnabled, pbTopLevelScrubberConstructed) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrSaveAndDestroyTopLevelScrubber_IMPL(OBJGPU *arg0, struct MemoryManager *arg1); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSaveAndDestroyTopLevelScrubber(OBJGPU *arg0, struct MemoryManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSaveAndDestroyTopLevelScrubber(arg0, arg1) memmgrSaveAndDestroyTopLevelScrubber_IMPL(arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrInitSavedTopLevelScrubber_IMPL(OBJGPU *arg0, struct MemoryManager *arg1); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitSavedTopLevelScrubber(OBJGPU *arg0, struct MemoryManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitSavedTopLevelScrubber(arg0, arg1) memmgrInitSavedTopLevelScrubber_IMPL(arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrReserveMemoryForFsp_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrReserveMemoryForFsp(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrReserveMemoryForFsp(pGpu, pMemoryManager) memmgrReserveMemoryForFsp_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#undef PRIVATE_FIELD + + +#endif // MEM_MGR_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_MEM_MGR_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.c new file mode 100644 index 0000000..864eb9b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.c @@ -0,0 +1,312 @@ +#define NVOC_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x4789f2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_funcTable_Memory(Memory*); +NV_STATUS __nvoc_ctor_Memory(Memory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_Memory(Memory*); +void __nvoc_dtor_Memory(Memory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Memory; + +static const struct NVOC_RTTI __nvoc_rtti_Memory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Memory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Memory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Memory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Memory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Memory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Memory, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Memory = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_Memory_Memory, + &__nvoc_rtti_Memory_RmResource, + &__nvoc_rtti_Memory_RmResourceCommon, + &__nvoc_rtti_Memory_RsResource, + &__nvoc_rtti_Memory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Memory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Memory), + /*classId=*/ classId(Memory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Memory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Memory, + /*pCastInfo=*/ &__nvoc_castinfo_Memory, + /*pExportInfo=*/ &__nvoc_export_info_Memory +}; + +static NV_STATUS __nvoc_thunk_Memory_resControl(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_resMap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_resUnmap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_rmresGetMemInterMapParams(struct RmResource *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_rmresCheckMemInterUnmap(struct RmResource *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_rmresGetMemoryMappingDescriptor(struct RmResource *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RmResource.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_RmResource_memShareCallback(struct Memory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NvU32 __nvoc_thunk_RsResource_memGetRefCount(struct Memory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_memControlFilter(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_memAddAdditionalDependants(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_memControl_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_memCanCopy(struct Memory *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_memMapTo(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_memPreDestruct(struct Memory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_memUnmapFrom(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_memControl_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_memControlLookup(struct Memory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pParams, ppEntry); +} + +static NvBool __nvoc_thunk_RmResource_memAccessCallback(struct Memory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Memory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_Memory(Memory *pThis) { + __nvoc_memDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Memory(Memory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Memory(Memory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Memory_fail_RmResource; + __nvoc_init_dataField_Memory(pThis); + + status = __nvoc_memConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Memory_fail__init; + goto __nvoc_ctor_Memory_exit; // Success + +__nvoc_ctor_Memory_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_Memory_fail_RmResource: +__nvoc_ctor_Memory_exit: + + return status; +} + +static void __nvoc_init_funcTable_Memory_1(Memory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__memGetMapAddrSpace__ = &memGetMapAddrSpace_IMPL; + + pThis->__memControl__ = &memControl_IMPL; + + pThis->__memMap__ = &memMap_IMPL; + + pThis->__memUnmap__ = &memUnmap_IMPL; + + pThis->__memGetMemInterMapParams__ = &memGetMemInterMapParams_IMPL; + + pThis->__memCheckMemInterUnmap__ = &memCheckMemInterUnmap_ac1694; + + pThis->__memGetMemoryMappingDescriptor__ = &memGetMemoryMappingDescriptor_IMPL; + + pThis->__memCheckCopyPermissions__ = &memCheckCopyPermissions_ac1694; + + pThis->__memIsReady__ = &memIsReady_IMPL; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resControl__ = &__nvoc_thunk_Memory_resControl; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resMap__ = &__nvoc_thunk_Memory_resMap; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resUnmap__ = &__nvoc_thunk_Memory_resUnmap; + + pThis->__nvoc_base_RmResource.__rmresGetMemInterMapParams__ = &__nvoc_thunk_Memory_rmresGetMemInterMapParams; + + pThis->__nvoc_base_RmResource.__rmresCheckMemInterUnmap__ = &__nvoc_thunk_Memory_rmresCheckMemInterUnmap; + + pThis->__nvoc_base_RmResource.__rmresGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_rmresGetMemoryMappingDescriptor; + + pThis->__memShareCallback__ = &__nvoc_thunk_RmResource_memShareCallback; + + pThis->__memGetRefCount__ = &__nvoc_thunk_RsResource_memGetRefCount; + + pThis->__memControlFilter__ = &__nvoc_thunk_RsResource_memControlFilter; + + pThis->__memAddAdditionalDependants__ = &__nvoc_thunk_RsResource_memAddAdditionalDependants; + + pThis->__memControl_Prologue__ = &__nvoc_thunk_RmResource_memControl_Prologue; + + pThis->__memCanCopy__ = &__nvoc_thunk_RsResource_memCanCopy; + + pThis->__memMapTo__ = &__nvoc_thunk_RsResource_memMapTo; + + pThis->__memPreDestruct__ = &__nvoc_thunk_RsResource_memPreDestruct; + + pThis->__memUnmapFrom__ = &__nvoc_thunk_RsResource_memUnmapFrom; + + pThis->__memControl_Epilogue__ = &__nvoc_thunk_RmResource_memControl_Epilogue; + + pThis->__memControlLookup__ = &__nvoc_thunk_RsResource_memControlLookup; + + pThis->__memAccessCallback__ = &__nvoc_thunk_RmResource_memAccessCallback; +} + +void __nvoc_init_funcTable_Memory(Memory *pThis) { + __nvoc_init_funcTable_Memory_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_Memory(Memory *pThis) { + pThis->__nvoc_pbase_Memory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_Memory(pThis); +} + +NV_STATUS __nvoc_objCreate_Memory(Memory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + Memory *pThis; + + pThis = portMemAllocNonPaged(sizeof(Memory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Memory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Memory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_Memory(pThis); + status = __nvoc_ctor_Memory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Memory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Memory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Memory(Memory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Memory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.h new file mode 100644 index 0000000..53912e5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.h @@ -0,0 +1,417 @@ +#ifndef _G_MEM_NVOC_H_ +#define _G_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_mem_nvoc.h" + +#ifndef _MEMORY_API_H_ +#define _MEMORY_API_H_ + +#include "core/core.h" +#include "resserv/rs_resource.h" +#include "rmapi/rmapi.h" +#include "rmapi/resource.h" + +#include "containers/btree.h" + +#include "ctrl/ctrl0041.h" + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + +struct Subdevice; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + + +struct RsClient; + +#ifndef __NVOC_CLASS_RsClient_TYPEDEF__ +#define __NVOC_CLASS_RsClient_TYPEDEF__ +typedef struct RsClient RsClient; +#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClient +#define __nvoc_class_id_RsClient 0x8f87e5 +#endif /* __nvoc_class_id_RsClient */ + + +struct Heap; + +#ifndef __NVOC_CLASS_Heap_TYPEDEF__ +#define __NVOC_CLASS_Heap_TYPEDEF__ +typedef struct Heap Heap; +#endif /* __NVOC_CLASS_Heap_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Heap +#define __nvoc_class_id_Heap 0x556e9a +#endif /* __nvoc_class_id_Heap */ + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +typedef struct MEMORY_DESCRIPTOR MEMORY_DESCRIPTOR; +typedef struct PmuMapping PmuMapping; +typedef struct HWRESOURCE_INFO HWRESOURCE_INFO; + +// +// vGPU non-stall interrupt info +// +typedef struct _def_client_vgpu_ns_intr +{ + NvU32 nsSemValue; // Non stall interrupt semaphore value + NvU32 nsSemOffset; // Non stall interrupt semaphore offset. Currently it is always 0. + NvBool isSemaMemValidationEnabled; // Enable change in Non stall interrupt sema value check + // while generating event + NvU64 guestDomainId; // guest ID that we need to use to inject interrupt + NvU64 guestMSIAddr; // MSI address allocated by guest OS + NvU32 guestMSIData; // MSI data value set by guest OS + void *pVgpuVfioRef; // Reference to vgpu device in nvidia-vgpu-vfio module + void *pVmBusHostChannel; // VmBus Host channel to communicated the event with the Guest + void *pEventDpc; // DPC event to pass the interrupt +} VGPU_NS_INTR; + +typedef struct +{ + struct Memory *pNext; + struct Memory *pPrev; +} memCircularListItem; + +/*! + * RM internal class representing NV01_MEMORY_XXX + * + * @note Memory cannot be a GpuResource because NoDeviceMemory + * subclass is not allocated under a device. + */ +#ifdef NVOC_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Memory { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + NV_STATUS (*__memGetMapAddrSpace__)(struct Memory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NV_STATUS (*__memControl__)(struct Memory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__memMap__)(struct Memory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__memUnmap__)(struct Memory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__memGetMemInterMapParams__)(struct Memory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__memCheckMemInterUnmap__)(struct Memory *, NvBool); + NV_STATUS (*__memGetMemoryMappingDescriptor__)(struct Memory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__memCheckCopyPermissions__)(struct Memory *, struct OBJGPU *, NvHandle); + NV_STATUS (*__memIsReady__)(struct Memory *); + NvBool (*__memShareCallback__)(struct Memory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NvU32 (*__memGetRefCount__)(struct Memory *); + NV_STATUS (*__memControlFilter__)(struct Memory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__memAddAdditionalDependants__)(struct RsClient *, struct Memory *, RsResourceRef *); + NV_STATUS (*__memControl_Prologue__)(struct Memory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__memCanCopy__)(struct Memory *); + NV_STATUS (*__memMapTo__)(struct Memory *, RS_RES_MAP_TO_PARAMS *); + void (*__memPreDestruct__)(struct Memory *); + NV_STATUS (*__memUnmapFrom__)(struct Memory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__memControl_Epilogue__)(struct Memory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__memControlLookup__)(struct Memory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvBool (*__memAccessCallback__)(struct Memory *, struct RsClient *, void *, RsAccessRight); + NvBool bConstructed; + struct Device *pDevice; + struct Subdevice *pSubDevice; + struct OBJGPU *pGpu; + NvBool bBcResource; + NvU32 categoryClassId; + NvU64 Length; + NvU32 HeapOwner; + NvU32 RefCount; + struct Heap *pHeap; + MEMORY_DESCRIPTOR *pMemDesc; + NvBool isMemDescOwner; + memCircularListItem dupListItem; + NvP64 KernelVAddr; + NvP64 KernelMapPriv; + PmuMapping *pPmuMappingList; + NODE Node; + NvU32 Attr; + NvU32 Attr2; + NvU32 Pitch; + NvU32 Type; + NvU32 Flags; + NvU32 tag; + NvU64 osDeviceHandle; + HWRESOURCE_INFO *pHwResource; + NvBool bRpcAlloc; + VGPU_NS_INTR vgpuNsIntr; +}; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +#define __staticCast_Memory(pThis) \ + ((pThis)->__nvoc_pbase_Memory) + +#ifdef __nvoc_mem_h_disabled +#define __dynamicCast_Memory(pThis) ((Memory*)NULL) +#else //__nvoc_mem_h_disabled +#define __dynamicCast_Memory(pThis) \ + ((Memory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Memory))) +#endif //__nvoc_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Memory(Memory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Memory(Memory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_Memory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Memory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define memGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) memGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define memControl(pMemory, pCallContext, pParams) memControl_DISPATCH(pMemory, pCallContext, pParams) +#define memMap(pMemory, pCallContext, pParams, pCpuMapping) memMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define memUnmap(pMemory, pCallContext, pCpuMapping) memUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define memGetMemInterMapParams(pMemory, pParams) memGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define memCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) memCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define memGetMemoryMappingDescriptor(pMemory, ppMemDesc) memGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define memCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) memCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define memIsReady(pMemory) memIsReady_DISPATCH(pMemory) +#define memShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) memShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define memGetRefCount(pResource) memGetRefCount_DISPATCH(pResource) +#define memControlFilter(pResource, pCallContext, pParams) memControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define memAddAdditionalDependants(pClient, pResource, pReference) memAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define memControl_Prologue(pResource, pCallContext, pParams) memControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define memCanCopy(pResource) memCanCopy_DISPATCH(pResource) +#define memMapTo(pResource, pParams) memMapTo_DISPATCH(pResource, pParams) +#define memPreDestruct(pResource) memPreDestruct_DISPATCH(pResource) +#define memUnmapFrom(pResource, pParams) memUnmapFrom_DISPATCH(pResource, pParams) +#define memControl_Epilogue(pResource, pCallContext, pParams) memControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define memControlLookup(pResource, pParams, ppEntry) memControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define memAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) memAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS memGetMapAddrSpace_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +static inline NV_STATUS memGetMapAddrSpace_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__memGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +NV_STATUS memControl_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS memControl_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__memControl__(pMemory, pCallContext, pParams); +} + +NV_STATUS memMap_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); + +static inline NV_STATUS memMap_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__memMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS memUnmap_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); + +static inline NV_STATUS memUnmap_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__memUnmap__(pMemory, pCallContext, pCpuMapping); +} + +NV_STATUS memGetMemInterMapParams_IMPL(struct Memory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); + +static inline NV_STATUS memGetMemInterMapParams_DISPATCH(struct Memory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__memGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS memCheckMemInterUnmap_ac1694(struct Memory *pMemory, NvBool bSubdeviceHandleProvided) { + return NV_OK; +} + +static inline NV_STATUS memCheckMemInterUnmap_DISPATCH(struct Memory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__memCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +NV_STATUS memGetMemoryMappingDescriptor_IMPL(struct Memory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); + +static inline NV_STATUS memGetMemoryMappingDescriptor_DISPATCH(struct Memory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__memGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS memCheckCopyPermissions_ac1694(struct Memory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return NV_OK; +} + +static inline NV_STATUS memCheckCopyPermissions_DISPATCH(struct Memory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__memCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +NV_STATUS memIsReady_IMPL(struct Memory *pMemory); + +static inline NV_STATUS memIsReady_DISPATCH(struct Memory *pMemory) { + return pMemory->__memIsReady__(pMemory); +} + +static inline NvBool memShareCallback_DISPATCH(struct Memory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__memShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NvU32 memGetRefCount_DISPATCH(struct Memory *pResource) { + return pResource->__memGetRefCount__(pResource); +} + +static inline NV_STATUS memControlFilter_DISPATCH(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__memControlFilter__(pResource, pCallContext, pParams); +} + +static inline void memAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference) { + pResource->__memAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS memControl_Prologue_DISPATCH(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__memControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool memCanCopy_DISPATCH(struct Memory *pResource) { + return pResource->__memCanCopy__(pResource); +} + +static inline NV_STATUS memMapTo_DISPATCH(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__memMapTo__(pResource, pParams); +} + +static inline void memPreDestruct_DISPATCH(struct Memory *pResource) { + pResource->__memPreDestruct__(pResource); +} + +static inline NV_STATUS memUnmapFrom_DISPATCH(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__memUnmapFrom__(pResource, pParams); +} + +static inline void memControl_Epilogue_DISPATCH(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__memControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS memControlLookup_DISPATCH(struct Memory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__memControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvBool memAccessCallback_DISPATCH(struct Memory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__memAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS memConstruct_IMPL(struct Memory *arg_pMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_memConstruct(arg_pMemory, arg_pCallContext, arg_pParams) memConstruct_IMPL(arg_pMemory, arg_pCallContext, arg_pParams) +NV_STATUS memCopyConstruct_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); +#ifdef __nvoc_mem_h_disabled +static inline NV_STATUS memCopyConstruct(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("Memory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_h_disabled +#define memCopyConstruct(pMemory, pCallContext, pParams) memCopyConstruct_IMPL(pMemory, pCallContext, pParams) +#endif //__nvoc_mem_h_disabled + +void memDestruct_IMPL(struct Memory *pMemory); +#define __nvoc_memDestruct(pMemory) memDestruct_IMPL(pMemory) +NV_STATUS memConstructCommon_IMPL(struct Memory *pMemory, NvU32 categoryClassId, NvU32 flags, MEMORY_DESCRIPTOR *pMemDesc, NvU32 heapOwner, struct Heap *pHeap, NvU32 attr, NvU32 attr2, NvU32 Pitch, NvU32 type, NvU32 tag, HWRESOURCE_INFO *pHwResource); +#ifdef __nvoc_mem_h_disabled +static inline NV_STATUS memConstructCommon(struct Memory *pMemory, NvU32 categoryClassId, NvU32 flags, MEMORY_DESCRIPTOR *pMemDesc, NvU32 heapOwner, struct Heap *pHeap, NvU32 attr, NvU32 attr2, NvU32 Pitch, NvU32 type, NvU32 tag, HWRESOURCE_INFO *pHwResource) { + NV_ASSERT_FAILED_PRECOMP("Memory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_h_disabled +#define memConstructCommon(pMemory, categoryClassId, flags, pMemDesc, heapOwner, pHeap, attr, attr2, Pitch, type, tag, pHwResource) memConstructCommon_IMPL(pMemory, categoryClassId, flags, pMemDesc, heapOwner, pHeap, attr, attr2, Pitch, type, tag, pHwResource) +#endif //__nvoc_mem_h_disabled + +void memDestructCommon_IMPL(struct Memory *pMemory); +#ifdef __nvoc_mem_h_disabled +static inline void memDestructCommon(struct Memory *pMemory) { + NV_ASSERT_FAILED_PRECOMP("Memory was disabled!"); +} +#else //__nvoc_mem_h_disabled +#define memDestructCommon(pMemory) memDestructCommon_IMPL(pMemory) +#endif //__nvoc_mem_h_disabled + +NV_STATUS memCreateMemDesc_IMPL(struct OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, NV_ADDRESS_SPACE addrSpace, NvU64 FBOffset, NvU64 length, NvU32 attr, NvU32 attr2); +#define memCreateMemDesc(pGpu, ppMemDesc, addrSpace, FBOffset, length, attr, attr2) memCreateMemDesc_IMPL(pGpu, ppMemDesc, addrSpace, FBOffset, length, attr, attr2) +NV_STATUS memCreateKernelMapping_IMPL(struct Memory *pMemory, NvU32 Protect, NvBool bClear); +#ifdef __nvoc_mem_h_disabled +static inline NV_STATUS memCreateKernelMapping(struct Memory *pMemory, NvU32 Protect, NvBool bClear) { + NV_ASSERT_FAILED_PRECOMP("Memory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_h_disabled +#define memCreateKernelMapping(pMemory, Protect, bClear) memCreateKernelMapping_IMPL(pMemory, Protect, bClear) +#endif //__nvoc_mem_h_disabled + +NV_STATUS memGetByHandle_IMPL(struct RsClient *pClient, NvHandle hMemory, struct Memory **ppMemory); +#define memGetByHandle(pClient, hMemory, ppMemory) memGetByHandle_IMPL(pClient, hMemory, ppMemory) +NV_STATUS memGetByHandleAndDevice_IMPL(struct RsClient *pClient, NvHandle hMemory, NvHandle hDevice, struct Memory **ppMemory); +#define memGetByHandleAndDevice(pClient, hMemory, hDevice, ppMemory) memGetByHandleAndDevice_IMPL(pClient, hMemory, hDevice, ppMemory) +NV_STATUS memGetByHandleAndGroupedGpu_IMPL(struct RsClient *pClient, NvHandle hMemory, struct OBJGPU *pGpu, struct Memory **ppMemory); +#define memGetByHandleAndGroupedGpu(pClient, hMemory, pGpu, ppMemory) memGetByHandleAndGroupedGpu_IMPL(pClient, hMemory, pGpu, ppMemory) +#undef PRIVATE_FIELD + + +#endif + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_MEM_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_debug_dump_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_debug_dump_nvoc.h new file mode 100644 index 0000000..16f49e2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_debug_dump_nvoc.h @@ -0,0 +1,402 @@ +#ifndef _G_NV_DEBUG_DUMP_NVOC_H_ +#define _G_NV_DEBUG_DUMP_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_nv_debug_dump_nvoc.h" + +#ifndef _NV_DEBUG_DUMP_H_ +#define _NV_DEBUG_DUMP_H_ + +#include "gpu/eng_state.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "core/info_block.h" + +#include "lib/protobuf/prb.h" +#include "rmapi/control.h" +#include "gpu/gpu.h" + +// Os Independent Error Types +typedef enum +{ + NVD_SKIP_ZERO, + NVD_GPU_HUNG, + NVD_FAILURE_TO_RECOVER, + NVD_MACHINE_CHECK, + NVD_POWERUP_FAILURE, + NVD_CPU_EXCEPTION, + NVD_EXTERNALLY_GENERATED, + NVD_GPU_GENERATED, +} NVD_ERROR_TYPE; + +#define NV_NVD_ERROR_CODE_MAJOR 31:16 +#define NV_NVD_ERROR_CODE_MINOR 15:0 + +#define NVD_ERROR_CODE(Major, Minor) \ + (DRF_NUM(_NVD, _ERROR_CODE, _MAJOR, Major) | \ + DRF_NUM(_NVD, _ERROR_CODE, _MINOR, Minor)) + + +#define NVD_ENGINE_FLAGS_PRIORITY 1:0 +#define NVD_ENGINE_FLAGS_PRIORITY_LOW 0x00000000 +#define NVD_ENGINE_FLAGS_PRIORITY_MED 0x00000001 +#define NVD_ENGINE_FLAGS_PRIORITY_HIGH 0x00000002 +#define NVD_ENGINE_FLAGS_PRIORITY_CRITICAL 0x00000003 + +/* + * NVD_ENGINE_FLAGS_SOURCE + * + * CPU - Always run on CPU, even if running as GSP-RM client. + * GSP - Run on GSP for GSP-RM client, otherwise run on CPU. + * BOTH - Engine dump is split between GSP-RM and CPU. Run both. + */ +#define NVD_ENGINE_FLAGS_SOURCE 3:2 +#define NVD_ENGINE_FLAGS_SOURCE_CPU 0x00000001 +#define NVD_ENGINE_FLAGS_SOURCE_GSP 0x00000002 +#define NVD_ENGINE_FLAGS_SOURCE_BOTH 0x00000003 + + +#define NV_NVD_ENGINE_STEP_MAJOR 31:16 +#define NV_NVD_ENGINE_STEP_MINOR 15:0 + +#define NVD_ENGINE_STEP(Major, Minor) \ + (DRF_NUM(_NVD, _ENGINE_STEP, _MAJOR, Major) | \ + DRF_NUM(_NVD, _ENGINE_STEP, _MINOR, Minor)) + +typedef enum +{ + NVD_FIRST_ENGINE = 0, + NVD_LAST_ENGINE = 0xFF, +} NVD_WHICH_ENGINE; + +typedef struct _def_nvd_debug_buffer { + NvU32 tag; + MEMORY_DESCRIPTOR *pMemDesc; + struct _def_nvd_debug_buffer *pNext; +} NVD_DEBUG_BUFFER; + +// Enumeration of Dump Types (Journal Entry, OCA dump, or API requested dump) +typedef enum +{ + NVD_DUMP_TYPE_JOURNAL, // Very small records only. Total for + // whole Journal is 4K (including overhead), + // actual amount of raw data stored is less. + NVD_DUMP_TYPE_OCA, // Assume 8K - 512 K total + NVD_DUMP_TYPE_API, // Mini Dump >512K +} NVD_DUMP_TYPE; + +// Enumeration of Sizes returned by nvDumpGetDumpBufferSizeEnum +typedef enum +{ + NVD_DUMP_SIZE_JOURNAL_WRITE, // Very small records only. + NVD_DUMP_SIZE_SMALL, // Assume 8K - 512 K total + NVD_DUMP_SIZE_MEDIUM, // Mini Dump >512K + NVD_DUMP_SIZE_LARGE // Megs of space +} NVD_DUMP_SIZE; + +// +// NV Dump State +// +// State passed into all dump routines. +// +typedef struct _def_nvd_state NVD_STATE; + +struct _def_nvd_state +{ + NvBool bDumpInProcess; // Currently creating dump. + NvBool bRMLock; // Acquired the RM lock. + NvBool bGpuAccessible; // OK to read priv registers on GPU. + NvU32 bugCheckCode; // Raw OS bugcheck code. + NvU32 internalCode; // OS Independent error code. + NvU32 initialbufferSize; // Size of buffer passed in. + NVD_DUMP_TYPE nvDumpType; // Type of DUMP. +}; + + +NVD_DUMP_SIZE nvDumpGetDumpBufferSizeEnum( NVD_STATE *pNvDumpState ); + +typedef NV_STATUS NvdDumpEngineFunc(struct OBJGPU *pGpu, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, void *pvData); + +typedef struct _def_nvd_engine_callback { + NvdDumpEngineFunc *pDumpEngineFunc; // Callback function. + NvU32 engDesc; // Indicates which engine this is. + NvU32 flags; // See NVD_ENGINE_FLAGS above. + void *pvData; // Opaque pointer to data passed to callback function. + struct _def_nvd_engine_callback *pNext; // Next Engine +} NVD_ENGINE_CALLBACK; + +#ifdef NVOC_NV_DEBUG_DUMP_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct NvDebugDump { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct NvDebugDump *__nvoc_pbase_NvDebugDump; + NV_STATUS (*__nvdConstructEngine__)(struct OBJGPU *, struct NvDebugDump *, ENGDESCRIPTOR); + NV_STATUS (*__nvdStateInitLocked__)(struct OBJGPU *, struct NvDebugDump *); + NV_STATUS (*__nvdReconcileTunableState__)(POBJGPU, struct NvDebugDump *, void *); + NV_STATUS (*__nvdStateLoad__)(POBJGPU, struct NvDebugDump *, NvU32); + NV_STATUS (*__nvdStateUnload__)(POBJGPU, struct NvDebugDump *, NvU32); + NV_STATUS (*__nvdStatePreLoad__)(POBJGPU, struct NvDebugDump *, NvU32); + NV_STATUS (*__nvdStatePostUnload__)(POBJGPU, struct NvDebugDump *, NvU32); + void (*__nvdStateDestroy__)(POBJGPU, struct NvDebugDump *); + NV_STATUS (*__nvdStatePreUnload__)(POBJGPU, struct NvDebugDump *, NvU32); + NV_STATUS (*__nvdStateInitUnlocked__)(POBJGPU, struct NvDebugDump *); + void (*__nvdInitMissing__)(POBJGPU, struct NvDebugDump *); + NV_STATUS (*__nvdStatePreInitLocked__)(POBJGPU, struct NvDebugDump *); + NV_STATUS (*__nvdStatePreInitUnlocked__)(POBJGPU, struct NvDebugDump *); + NV_STATUS (*__nvdGetTunableState__)(POBJGPU, struct NvDebugDump *, void *); + NV_STATUS (*__nvdCompareTunableState__)(POBJGPU, struct NvDebugDump *, void *, void *); + void (*__nvdFreeTunableState__)(POBJGPU, struct NvDebugDump *, void *); + NV_STATUS (*__nvdStatePostLoad__)(POBJGPU, struct NvDebugDump *, NvU32); + NV_STATUS (*__nvdAllocTunableState__)(POBJGPU, struct NvDebugDump *, void **); + NV_STATUS (*__nvdSetTunableState__)(POBJGPU, struct NvDebugDump *, void *); + NvBool (*__nvdIsPresent__)(POBJGPU, struct NvDebugDump *); + NVD_DEBUG_BUFFER *pHeadDebugBuffer; + NVD_ENGINE_CALLBACK *pCallbacks; +}; + +#ifndef __NVOC_CLASS_NvDebugDump_TYPEDEF__ +#define __NVOC_CLASS_NvDebugDump_TYPEDEF__ +typedef struct NvDebugDump NvDebugDump; +#endif /* __NVOC_CLASS_NvDebugDump_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvDebugDump +#define __nvoc_class_id_NvDebugDump 0x7e80a2 +#endif /* __nvoc_class_id_NvDebugDump */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvDebugDump; + +#define __staticCast_NvDebugDump(pThis) \ + ((pThis)->__nvoc_pbase_NvDebugDump) + +#ifdef __nvoc_nv_debug_dump_h_disabled +#define __dynamicCast_NvDebugDump(pThis) ((NvDebugDump*)NULL) +#else //__nvoc_nv_debug_dump_h_disabled +#define __dynamicCast_NvDebugDump(pThis) \ + ((NvDebugDump*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NvDebugDump))) +#endif //__nvoc_nv_debug_dump_h_disabled + +#define PDB_PROP_NVD_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_NVD_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_NvDebugDump(NvDebugDump**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_NvDebugDump(NvDebugDump**, Dynamic*, NvU32); +#define __objCreate_NvDebugDump(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_NvDebugDump((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define nvdConstructEngine(pGpu, pNvd, arg0) nvdConstructEngine_DISPATCH(pGpu, pNvd, arg0) +#define nvdStateInitLocked(pGpu, pNvd) nvdStateInitLocked_DISPATCH(pGpu, pNvd) +#define nvdReconcileTunableState(pGpu, pEngstate, pTunableState) nvdReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define nvdStateLoad(pGpu, pEngstate, arg0) nvdStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define nvdStateUnload(pGpu, pEngstate, arg0) nvdStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define nvdStatePreLoad(pGpu, pEngstate, arg0) nvdStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define nvdStatePostUnload(pGpu, pEngstate, arg0) nvdStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define nvdStateDestroy(pGpu, pEngstate) nvdStateDestroy_DISPATCH(pGpu, pEngstate) +#define nvdStatePreUnload(pGpu, pEngstate, arg0) nvdStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define nvdStateInitUnlocked(pGpu, pEngstate) nvdStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define nvdInitMissing(pGpu, pEngstate) nvdInitMissing_DISPATCH(pGpu, pEngstate) +#define nvdStatePreInitLocked(pGpu, pEngstate) nvdStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define nvdStatePreInitUnlocked(pGpu, pEngstate) nvdStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define nvdGetTunableState(pGpu, pEngstate, pTunableState) nvdGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define nvdCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) nvdCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define nvdFreeTunableState(pGpu, pEngstate, pTunableState) nvdFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define nvdStatePostLoad(pGpu, pEngstate, arg0) nvdStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define nvdAllocTunableState(pGpu, pEngstate, ppTunableState) nvdAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define nvdSetTunableState(pGpu, pEngstate, pTunableState) nvdSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define nvdIsPresent(pGpu, pEngstate) nvdIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS nvdConstructEngine_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, ENGDESCRIPTOR arg0); + +static inline NV_STATUS nvdConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, ENGDESCRIPTOR arg0) { + return pNvd->__nvdConstructEngine__(pGpu, pNvd, arg0); +} + +NV_STATUS nvdStateInitLocked_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd); + +static inline NV_STATUS nvdStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pNvd) { + return pNvd->__nvdStateInitLocked__(pGpu, pNvd); +} + +static inline NV_STATUS nvdReconcileTunableState_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunableState) { + return pEngstate->__nvdReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS nvdStateLoad_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return pEngstate->__nvdStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS nvdStateUnload_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return pEngstate->__nvdStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS nvdStatePreLoad_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return pEngstate->__nvdStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS nvdStatePostUnload_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return pEngstate->__nvdStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void nvdStateDestroy_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + pEngstate->__nvdStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS nvdStatePreUnload_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return pEngstate->__nvdStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS nvdStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + return pEngstate->__nvdStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void nvdInitMissing_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + pEngstate->__nvdInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS nvdStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + return pEngstate->__nvdStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS nvdStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + return pEngstate->__nvdStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS nvdGetTunableState_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunableState) { + return pEngstate->__nvdGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS nvdCompareTunableState_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__nvdCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void nvdFreeTunableState_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunableState) { + pEngstate->__nvdFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS nvdStatePostLoad_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return pEngstate->__nvdStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS nvdAllocTunableState_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, void **ppTunableState) { + return pEngstate->__nvdAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS nvdSetTunableState_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunableState) { + return pEngstate->__nvdSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool nvdIsPresent_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + return pEngstate->__nvdIsPresent__(pGpu, pEngstate); +} + +void nvdDestruct_IMPL(struct NvDebugDump *pNvd); +#define __nvoc_nvdDestruct(pNvd) nvdDestruct_IMPL(pNvd) +NV_STATUS nvdAllocDebugBuffer_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 arg0, NvU32 *arg1, MEMORY_DESCRIPTOR **arg2); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdAllocDebugBuffer(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 arg0, NvU32 *arg1, MEMORY_DESCRIPTOR **arg2) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdAllocDebugBuffer(pGpu, pNvd, arg0, arg1, arg2) nvdAllocDebugBuffer_IMPL(pGpu, pNvd, arg0, arg1, arg2) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdFreeDebugBuffer_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, MEMORY_DESCRIPTOR *arg0); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdFreeDebugBuffer(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, MEMORY_DESCRIPTOR *arg0) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdFreeDebugBuffer(pGpu, pNvd, arg0) nvdFreeDebugBuffer_IMPL(pGpu, pNvd, arg0) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdEngineSignUp_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvdDumpEngineFunc *arg0, NvU32 engDesc, NvU32 flags, void *arg1); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdEngineSignUp(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvdDumpEngineFunc *arg0, NvU32 engDesc, NvU32 flags, void *arg1) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdEngineSignUp(pGpu, pNvd, arg0, engDesc, flags, arg1) nvdEngineSignUp_IMPL(pGpu, pNvd, arg0, engDesc, flags, arg1) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdEngineRelease_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdEngineRelease(struct OBJGPU *pGpu, struct NvDebugDump *pNvd) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdEngineRelease(pGpu, pNvd) nvdEngineRelease_IMPL(pGpu, pNvd) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdDoEngineDump_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, NvU32 arg0); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdDoEngineDump(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdDoEngineDump(pGpu, pNvd, pPrbEnc, pNvDumpState, arg0) nvdDoEngineDump_IMPL(pGpu, pNvd, pPrbEnc, pNvDumpState, arg0) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdDumpAllEngines_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdDumpAllEngines(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdDumpAllEngines(pGpu, pNvd, pPrbEnc, pNvDumpState) nvdDumpAllEngines_IMPL(pGpu, pNvd, pPrbEnc, pNvDumpState) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdFindEngine_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 engDesc, NVD_ENGINE_CALLBACK **ppEngineCallback); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdFindEngine(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 engDesc, NVD_ENGINE_CALLBACK **ppEngineCallback) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdFindEngine(pGpu, pNvd, engDesc, ppEngineCallback) nvdFindEngine_IMPL(pGpu, pNvd, engDesc, ppEngineCallback) +#endif //__nvoc_nv_debug_dump_h_disabled + +#undef PRIVATE_FIELD + + +#endif // _NV_DEBUG_DUMP_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_NV_DEBUG_DUMP_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_name_released.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_name_released.h new file mode 100644 index 0000000..8d716ed --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_name_released.h @@ -0,0 +1,1505 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef G_NV_NAME_RELEASED_H +#define G_NV_NAME_RELEASED_H + +typedef struct _CHIPS_RELEASED { + unsigned short devID; + unsigned short subSystemID; + unsigned short subSystemVendorID; + const char *name; +} CHIPS_RELEASED; + +static const CHIPS_RELEASED sChipsReleased[] = { + { 0x1340, 0x0000, 0x0000, "NVIDIA GeForce 830M" }, + { 0x1340, 0x2b2b, 0x103c, "NVIDIA GeForce 830A" }, + { 0x1341, 0x0000, 0x0000, "NVIDIA GeForce 840M" }, + { 0x1341, 0x3697, 0x17aa, "NVIDIA GeForce 840A" }, + { 0x1341, 0x3699, 0x17aa, "NVIDIA GeForce 840A" }, + { 0x1341, 0x369c, 0x17aa, "NVIDIA GeForce 840A" }, + { 0x1341, 0x36af, 0x17aa, "NVIDIA GeForce 840A" }, + { 0x1344, 0x0000, 0x0000, "NVIDIA GeForce 845M" }, + { 0x1346, 0x0000, 0x0000, "NVIDIA GeForce 930M" }, + { 0x1346, 0x30ba, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1346, 0x362c, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1346, 0x362f, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1346, 0x3636, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1347, 0x0000, 0x0000, "NVIDIA GeForce 940M" }, + { 0x1347, 0x36b9, 0x17aa, "NVIDIA GeForce 940A" }, + { 0x1347, 0x36ba, 0x17aa, "NVIDIA GeForce 940A" }, + { 0x1348, 0x0000, 0x0000, "NVIDIA GeForce 945M" }, + { 0x1348, 0x2b5c, 0x103c, "NVIDIA GeForce 945A" }, + { 0x1349, 0x0000, 0x0000, "NVIDIA GeForce 930M" }, + { 0x1349, 0x3124, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1349, 0x364b, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1349, 0x36c3, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1349, 0x36d1, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1349, 0x36d8, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x134B, 0x0000, 0x0000, "NVIDIA GeForce 940MX" }, + { 0x134B, 0x0008, 0x1414, "NVIDIA GeForce GPU" }, + { 0x134D, 0x0000, 0x0000, "NVIDIA GeForce 940MX" }, + { 0x134E, 0x0000, 0x0000, "NVIDIA GeForce 930MX" }, + { 0x134F, 0x0000, 0x0000, "NVIDIA GeForce 920MX" }, + { 0x137A, 0x0000, 0x0000, "NVIDIA N15M-Q3" }, + { 0x137A, 0x2225, 0x17aa, "Quadro K620M" }, + { 0x137A, 0x2232, 0x17aa, "Quadro M500M" }, + { 0x137A, 0x505a, 0x17aa, "Quadro M500M" }, + { 0x137B, 0x0000, 0x0000, "Quadro M520" }, + { 0x1380, 0x0000, 0x0000, "NVIDIA GeForce GTX 750 Ti" }, + { 0x1381, 0x0000, 0x0000, "NVIDIA GeForce GTX 750" }, + { 0x1382, 0x0000, 0x0000, "NVIDIA GeForce GTX 745" }, + { 0x1390, 0x0000, 0x0000, "NVIDIA GeForce 845M" }, + { 0x1391, 0x0000, 0x0000, "NVIDIA GeForce GTX 850M" }, + { 0x1391, 0x3697, 0x17aa, "NVIDIA GeForce GTX 850A" }, + { 0x1392, 0x0000, 0x0000, "NVIDIA GeForce GTX 860M" }, + { 0x1392, 0x066a, 0x1028, "NVIDIA GeForce GPU" }, + { 0x1392, 0x861e, 0x1043, "NVIDIA GeForce GTX 750 Ti" }, + { 0x1392, 0x86d9, 0x1043, "NVIDIA GeForce GTX 750 Ti" }, + { 0x1393, 0x0000, 0x0000, "NVIDIA GeForce 840M" }, + { 0x1398, 0x0000, 0x0000, "NVIDIA GeForce 845M" }, + { 0x1399, 0x0000, 0x0000, "NVIDIA GeForce 945M" }, + { 0x139A, 0x0000, 0x0000, "NVIDIA GeForce GTX 950M" }, + { 0x139A, 0x362c, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x362f, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x363f, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x3640, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x3647, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x36b9, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139B, 0x0000, 0x0000, "NVIDIA GeForce GTX 960M" }, + { 0x139B, 0x107a, 0x1025, "NVIDIA GeForce GTX 750 Ti" }, + { 0x139B, 0x06a3, 0x1028, "NVIDIA GeForce GTX 860M" }, + { 0x139B, 0x2b4c, 0x103c, "NVIDIA GeForce GTX 960A" }, + { 0x139B, 0x3649, 0x17aa, "NVIDIA GeForce GTX 750Ti" }, + { 0x139B, 0x36bf, 0x17aa, "NVIDIA GeForce GTX 960A" }, + { 0x139B, 0xc248, 0x19da, "NVIDIA GeForce GTX 750 Ti" }, + { 0x139B, 0x8a75, 0x1afa, "NVIDIA GeForce GTX 750Ti" }, + { 0x139C, 0x0000, 0x0000, "NVIDIA GeForce 940M" }, + { 0x139D, 0x0000, 0x0000, "NVIDIA GeForce GTX 750 Ti" }, + { 0x13B0, 0x0000, 0x0000, "Quadro M2000M" }, + { 0x13B1, 0x0000, 0x0000, "Quadro M1000M" }, + { 0x13B2, 0x0000, 0x0000, "Quadro M600M" }, + { 0x13B3, 0x0000, 0x0000, "Quadro K2200M" }, + { 0x13B4, 0x0000, 0x0000, "Quadro M620" }, + { 0x13B6, 0x0000, 0x0000, "Quadro M1200" }, + { 0x13B9, 0x0000, 0x0000, "NVS 810" }, + { 0x13BA, 0x0000, 0x0000, "Quadro K2200" }, + { 0x13BB, 0x0000, 0x0000, "Quadro K620" }, + { 0x13BC, 0x0000, 0x0000, "Quadro K1200" }, + { 0x13BC, 0x1140, 0x15c3, "EIZO Quadro MED-XN50LP" }, + { 0x13C0, 0x0000, 0x0000, "NVIDIA GeForce GTX 980" }, + { 0x13C2, 0x0000, 0x0000, "NVIDIA GeForce GTX 970" }, + { 0x13D7, 0x0000, 0x0000, "NVIDIA GeForce GTX 980M" }, + { 0x13D8, 0x0000, 0x0000, "NVIDIA GeForce GTX 970M" }, + { 0x13D8, 0x1198, 0x1462, "NVIDIA GeForce GTX 960" }, + { 0x13D8, 0x1199, 0x1462, "NVIDIA GeForce GTX 960" }, + { 0x13D8, 0xb282, 0x19da, "NVIDIA GeForce GTX 960" }, + { 0x13D8, 0xb284, 0x19da, "NVIDIA GeForce GTX 960" }, + { 0x13D8, 0xb286, 0x19da, "NVIDIA GeForce GTX 960" }, + { 0x13D9, 0x0000, 0x0000, "NVIDIA GeForce GTX 965M" }, + { 0x13DA, 0x0000, 0x0000, "NVIDIA GeForce GTX 980" }, + { 0x13F0, 0x0000, 0x0000, "Quadro M5000" }, + { 0x13F1, 0x0000, 0x0000, "Quadro M4000" }, + { 0x13F1, 0x1153, 0x15c3, "EIZO Quadro MED-XN90" }, + { 0x13F2, 0x0000, 0x0000, "Tesla M60" }, + { 0x13F3, 0x0000, 0x0000, "Tesla M6" }, + { 0x13F8, 0x0000, 0x0000, "Quadro M5000M" }, + { 0x13F8, 0x11dd, 0x10de, "Quadro M5000 SE" }, + { 0x13F9, 0x0000, 0x0000, "Quadro M4000M" }, + { 0x13FA, 0x0000, 0x0000, "Quadro M3000M" }, + { 0x13FA, 0x11c9, 0x10de, "Quadro M3000 SE" }, + { 0x13FB, 0x0000, 0x0000, "Quadro M5500" }, + { 0x1401, 0x0000, 0x0000, "NVIDIA GeForce GTX 960" }, + { 0x1402, 0x0000, 0x0000, "NVIDIA GeForce GTX 950" }, + { 0x1406, 0x0000, 0x0000, "NVIDIA GeForce GTX 960" }, + { 0x1407, 0x0000, 0x0000, "NVIDIA GeForce GTX 750" }, + { 0x1427, 0x0000, 0x0000, "NVIDIA GeForce GTX 965M" }, + { 0x1427, 0xd003, 0x1458, "NVIDIA GeForce GTX 950" }, + { 0x1430, 0x0000, 0x0000, "Quadro M2000" }, + { 0x1430, 0x1190, 0x15c3, "EIZO Quadro MED-XN70" }, + { 0x1431, 0x0000, 0x0000, "Tesla M4" }, + { 0x1436, 0x0000, 0x0000, "Quadro M2200" }, + { 0x15F0, 0x0000, 0x0000, "Quadro GP100" }, + { 0x15F7, 0x0000, 0x0000, "Tesla P100-PCIE-12GB" }, + { 0x15F8, 0x0000, 0x0000, "Tesla P100-PCIE-16GB" }, + { 0x15F9, 0x0000, 0x0000, "Tesla P100-SXM2-16GB" }, + { 0x1617, 0x0000, 0x0000, "NVIDIA GeForce GTX 980M" }, + { 0x1618, 0x0000, 0x0000, "NVIDIA GeForce GTX 970M" }, + { 0x1619, 0x0000, 0x0000, "NVIDIA GeForce GTX 965M" }, + { 0x161A, 0x0000, 0x0000, "NVIDIA GeForce GTX 980" }, + { 0x1667, 0x0000, 0x0000, "NVIDIA GeForce GTX 965M" }, + { 0x174D, 0x0000, 0x0000, "NVIDIA GeForce MX130" }, + { 0x174E, 0x0000, 0x0000, "NVIDIA GeForce MX110" }, + { 0x179C, 0x0000, 0x0000, "NVIDIA GeForce 940MX" }, + { 0x17C2, 0x0000, 0x0000, "NVIDIA GeForce GTX TITAN X" }, + { 0x17C8, 0x0000, 0x0000, "NVIDIA GeForce GTX 980 Ti" }, + { 0x17F0, 0x0000, 0x0000, "Quadro M6000" }, + { 0x17F1, 0x0000, 0x0000, "Quadro M6000 24GB" }, + { 0x17FD, 0x0000, 0x0000, "Tesla M40" }, + { 0x17FD, 0x1173, 0x10de, "Tesla M40 24GB" }, + { 0x1B00, 0x0000, 0x0000, "NVIDIA TITAN X (Pascal)" }, + { 0x1B02, 0x0000, 0x0000, "NVIDIA TITAN Xp" }, + { 0x1B02, 0x123e, 0x10de, "NVIDIA TITAN Xp COLLECTORS EDITION" }, + { 0x1B02, 0x123f, 0x10de, "NVIDIA TITAN Xp COLLECTORS EDITION" }, + { 0x1B06, 0x0000, 0x0000, "NVIDIA GeForce GTX 1080 Ti" }, + { 0x1B30, 0x0000, 0x0000, "Quadro P6000" }, + { 0x1B38, 0x0000, 0x0000, "Tesla P40" }, + { 0x1B80, 0x0000, 0x0000, "NVIDIA GeForce GTX 1080" }, + { 0x1B81, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070" }, + { 0x1B82, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070 Ti" }, + { 0x1B83, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 6GB" }, + { 0x1B84, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 3GB" }, + { 0x1B87, 0x0000, 0x0000, "NVIDIA P104-100" }, + { 0x1BA0, 0x0000, 0x0000, "NVIDIA GeForce GTX 1080" }, + { 0x1BA0, 0x0887, 0x1028, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BA1, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070" }, + { 0x1BA1, 0x08a1, 0x1028, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x08a2, 0x1028, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1cce, 0x1043, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1651, 0x1458, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1653, 0x1458, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x11e8, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x11e9, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1225, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1226, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1227, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x9501, 0x1558, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x95e1, 0x1558, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x2000, 0x1a58, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1032, 0x1d05, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA2, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070" }, + { 0x1BB0, 0x0000, 0x0000, "Quadro P5000" }, + { 0x1BB1, 0x0000, 0x0000, "Quadro P4000" }, + { 0x1BB1, 0x11a3, 0x15c3, "EIZO Quadro MED-XN91" }, + { 0x1BB4, 0x0000, 0x0000, "Tesla P6" }, + { 0x1BB5, 0x0000, 0x0000, "Quadro P5200" }, + { 0x1BB5, 0x2268, 0x17aa, "Quadro P5200 with Max-Q Design" }, + { 0x1BB5, 0x2269, 0x17aa, "Quadro P5200 with Max-Q Design" }, + { 0x1BB6, 0x0000, 0x0000, "Quadro P5000" }, + { 0x1BB7, 0x0000, 0x0000, "Quadro P4000" }, + { 0x1BB7, 0x11e9, 0x1462, "Quadro P4000 with Max-Q Design" }, + { 0x1BB7, 0x9501, 0x1558, "Quadro P4000 with Max-Q Design" }, + { 0x1BB8, 0x0000, 0x0000, "Quadro P3000" }, + { 0x1BB9, 0x0000, 0x0000, "Quadro P4200" }, + { 0x1BB9, 0x95e1, 0x1558, "Quadro P4200 with Max-Q Design" }, + { 0x1BB9, 0x2268, 0x17aa, "Quadro P4200 with Max-Q Design" }, + { 0x1BB9, 0x2269, 0x17aa, "Quadro P4200 with Max-Q Design" }, + { 0x1BBB, 0x0000, 0x0000, "Quadro P3200" }, + { 0x1BBB, 0x225f, 0x17aa, "Quadro P3200 with Max-Q Design" }, + { 0x1BBB, 0x2262, 0x17aa, "Quadro P3200 with Max-Q Design" }, + { 0x1BC7, 0x0000, 0x0000, "NVIDIA P104-101" }, + { 0x1BE0, 0x0000, 0x0000, "NVIDIA GeForce GTX 1080" }, + { 0x1BE0, 0x1221, 0x1025, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x123e, 0x1025, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x07c0, 0x1028, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x0876, 0x1028, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x088b, 0x1028, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x1031, 0x1043, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x1bf0, 0x1043, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x355b, 0x1458, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE1, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070" }, + { 0x1BE1, 0x84db, 0x103c, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BE1, 0x16f0, 0x1043, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BE1, 0x2009, 0x3842, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1C02, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 3GB" }, + { 0x1C03, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 6GB" }, + { 0x1C04, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 5GB" }, + { 0x1C06, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 6GB" }, + { 0x1C07, 0x0000, 0x0000, "NVIDIA P106-100" }, + { 0x1C09, 0x0000, 0x0000, "NVIDIA P106-090" }, + { 0x1C20, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060" }, + { 0x1C20, 0x0802, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0803, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0825, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0827, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0885, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0886, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x8467, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x8478, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x8581, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x1244, 0x1462, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x95e5, 0x1558, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x39b9, 0x17aa, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x2000, 0x1a58, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x2001, 0x1a58, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x1059, 0x1d05, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C21, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C22, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C23, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060" }, + { 0x1C30, 0x0000, 0x0000, "Quadro P2000" }, + { 0x1C30, 0x11b3, 0x15c3, "EIZO Quadro MED-XN71" }, + { 0x1C31, 0x0000, 0x0000, "Quadro P2200" }, + { 0x1C31, 0x131b, 0x15c3, "EIZO Quadro MED-XN72" }, + { 0x1C60, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060" }, + { 0x1C60, 0x8390, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C60, 0x8467, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C61, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C62, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C81, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C82, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C83, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C8C, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C8C, 0x087c, 0x1028, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x8519, 0x103c, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x856a, 0x103c, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x123c, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x126c, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x2266, 0x17aa, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x2267, 0x17aa, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x39ff, 0x17aa, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8D, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C8D, 0x84e9, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x84eb, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x856a, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x114f, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1341, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1351, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1481, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x14a1, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x18c1, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1b5e, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x126c, 0x1462, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1217, 0x152d, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1707, 0x1d72, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8F, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C8F, 0x123c, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8F, 0x126c, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8F, 0x126d, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8F, 0x1284, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8F, 0x1297, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C90, 0x0000, 0x0000, "NVIDIA GeForce MX150" }, + { 0x1C90, 0x09c1, 0x1028, "NVIDIA GeForce MX250" }, + { 0x1C91, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C91, 0x856a, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C91, 0x86e3, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C91, 0x1232, 0x152d, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C92, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C92, 0x149f, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C92, 0x1b31, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C92, 0x1245, 0x1462, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C92, 0x126c, 0x1462, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C94, 0x0000, 0x0000, "NVIDIA GeForce MX350" }, + { 0x1C96, 0x0000, 0x0000, "NVIDIA GeForce MX350" }, + { 0x1CB1, 0x0000, 0x0000, "Quadro P1000" }, + { 0x1CB1, 0x11bc, 0x15c3, "EIZO Quadro MED-XN51LP" }, + { 0x1CB2, 0x0000, 0x0000, "Quadro P600" }, + { 0x1CB3, 0x0000, 0x0000, "Quadro P400" }, + { 0x1CB3, 0x11be, 0x15c3, "EIZO Quadro MED-XN31LP" }, + { 0x1CB6, 0x0000, 0x0000, "Quadro P620" }, + { 0x1CBA, 0x0000, 0x0000, "Quadro P2000" }, + { 0x1CBA, 0x2266, 0x17aa, "Quadro P2000 with Max-Q Design" }, + { 0x1CBA, 0x2267, 0x17aa, "Quadro P2000 with Max-Q Design" }, + { 0x1CBB, 0x0000, 0x0000, "Quadro P1000" }, + { 0x1CBC, 0x0000, 0x0000, "Quadro P600" }, + { 0x1CBD, 0x0000, 0x0000, "Quadro P620" }, + { 0x1CFA, 0x0000, 0x0000, "Quadro P2000" }, + { 0x1CFB, 0x0000, 0x0000, "Quadro P1000" }, + { 0x1CFB, 0x2600, 0x102b, "Matrox D-Series D1480" }, + { 0x1CFB, 0x2700, 0x102b, "Matrox D-Series D1450" }, + { 0x1D01, 0x0000, 0x0000, "NVIDIA GeForce GT 1030" }, + { 0x1D02, 0x0000, 0x0000, "NVIDIA GeForce GT 1010" }, + { 0x1D10, 0x0000, 0x0000, "NVIDIA GeForce MX150" }, + { 0x1D11, 0x0000, 0x0000, "NVIDIA GeForce MX230" }, + { 0x1D12, 0x0000, 0x0000, "NVIDIA GeForce MX150" }, + { 0x1D13, 0x0000, 0x0000, "NVIDIA GeForce MX250" }, + { 0x1D16, 0x0000, 0x0000, "NVIDIA GeForce MX330" }, + { 0x1D33, 0x0000, 0x0000, "Quadro P500" }, + { 0x1D34, 0x0000, 0x0000, "Quadro P520" }, + { 0x1D52, 0x0000, 0x0000, "NVIDIA GeForce MX250" }, + { 0x1D81, 0x0000, 0x0000, "NVIDIA TITAN V" }, + { 0x1DB1, 0x0000, 0x0000, "Tesla V100-SXM2-16GB" }, + { 0x1DB1, 0x1307, 0x10de, "Tesla V100-SXM2-16GB-LS" }, + { 0x1DB3, 0x0000, 0x0000, "Tesla V100-FHHL-16GB" }, + { 0x1DB4, 0x0000, 0x0000, "Tesla V100-PCIE-16GB" }, + { 0x1DB4, 0x1306, 0x10de, "Tesla V100-PCIE-16GB-LS" }, + { 0x1DB5, 0x0000, 0x0000, "Tesla V100-SXM2-32GB" }, + { 0x1DB5, 0x1308, 0x10de, "Tesla V100-SXM2-32GB-LS" }, + { 0x1DB6, 0x0000, 0x0000, "Tesla V100-PCIE-32GB" }, + { 0x1DB7, 0x0000, 0x0000, "Tesla V100-DGXS-32GB" }, + { 0x1DB8, 0x0000, 0x0000, "Tesla V100-SXM3-32GB" }, + { 0x1DB8, 0x131d, 0x10de, "Tesla V100-SXM3-32GB-H" }, + { 0x1DBA, 0x0000, 0x0000, "Quadro GV100" }, + { 0x1DBA, 0x12eb, 0x10de, "NVIDIA TITAN V JHH Special Edition" }, + { 0x1DF0, 0x0000, 0x0000, "Tesla PG500-216" }, + { 0x1DF2, 0x0000, 0x0000, "Tesla PG503-216" }, + { 0x1DF6, 0x0000, 0x0000, "Tesla V100S-PCIE-32GB" }, + { 0x1E02, 0x0000, 0x0000, "NVIDIA TITAN RTX" }, + { 0x1E04, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 Ti" }, + { 0x1E07, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 Ti" }, + { 0x1E09, 0x0000, 0x0000, "NVIDIA CMP 50HX" }, + { 0x1E30, 0x0000, 0x0000, "Quadro RTX 6000" }, + { 0x1E30, 0x129e, 0x1028, "Quadro RTX 8000" }, + { 0x1E30, 0x129e, 0x103c, "Quadro RTX 8000" }, + { 0x1E30, 0x129e, 0x10de, "Quadro RTX 8000" }, + { 0x1E36, 0x0000, 0x0000, "Quadro RTX 6000" }, + { 0x1E78, 0x13d8, 0x10de, "Quadro RTX 8000" }, + { 0x1E78, 0x13d9, 0x10de, "Quadro RTX 6000" }, + { 0x1E81, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 SUPER" }, + { 0x1E82, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080" }, + { 0x1E84, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 SUPER" }, + { 0x1E87, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080" }, + { 0x1E89, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1E90, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080" }, + { 0x1E90, 0x1375, 0x1025, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08a1, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08a2, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ea, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08eb, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ec, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ed, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ee, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ef, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x093b, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x093c, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x8572, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x8573, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x8602, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x8606, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x86c6, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x86c7, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x87a6, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x87a7, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x131f, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x137f, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x141f, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1751, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1660, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1661, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1662, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x75a6, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x75a7, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x86a6, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x86a7, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1274, 0x1462, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1277, 0x1462, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1220, 0x152d, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x95e1, 0x1558, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x97e1, 0x1558, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x2002, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x2005, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x2007, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x3000, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x3001, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1069, 0x1d05, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E91, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 Super" }, + { 0x1E91, 0x8607, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x8736, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x8738, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x8772, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x878a, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x878b, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x1e61, 0x1043, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x1511, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x75b3, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x75b4, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x76b2, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x76b3, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x78a2, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x78a3, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x86b2, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x86b3, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x12ae, 0x1462, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x12b0, 0x1462, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x12c6, 0x1462, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x22c3, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x22c5, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x2009, 0x1a58, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x200a, 0x1a58, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x3002, 0x1a58, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x3012, 0x8086, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E93, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 Super" }, + { 0x1E93, 0x1401, 0x1025, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x149c, 0x1025, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x09d2, 0x1028, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x8607, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x86c7, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x8736, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x8738, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x8772, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x87a6, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x87a7, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x75b1, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x75b2, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x76b0, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x76b1, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x78a0, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x78a1, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x86b0, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x86b1, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x12ae, 0x1462, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x12b0, 0x1462, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x12b4, 0x1462, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x12c6, 0x1462, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x50d3, 0x1558, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x70d1, 0x1558, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x22c3, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x22c5, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x2009, 0x1a58, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x200a, 0x1a58, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x3002, 0x1a58, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x1089, 0x1d05, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1EB0, 0x0000, 0x0000, "Quadro RTX 5000" }, + { 0x1EB1, 0x0000, 0x0000, "Quadro RTX 4000" }, + { 0x1EB1, 0x12a0, 0x15c3, "EIZO Quadro MED-XN92" }, + { 0x1EB5, 0x0000, 0x0000, "Quadro RTX 5000" }, + { 0x1EB5, 0x1375, 0x1025, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x1401, 0x1025, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x149c, 0x1025, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x09c3, 0x1028, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8736, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8738, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8772, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8780, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8782, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8783, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8785, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x1dd1, 0x1043, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x1274, 0x1462, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x12b0, 0x1462, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x12c6, 0x1462, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x22b8, 0x17aa, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x22ba, 0x17aa, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x2005, 0x1a58, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x2007, 0x1a58, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x2008, 0x1a58, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x200a, 0x1a58, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB6, 0x0000, 0x0000, "Quadro RTX 4000" }, + { 0x1EB6, 0x09c3, 0x1028, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8736, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8738, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8772, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8780, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8782, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8783, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8785, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x1274, 0x1462, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x1277, 0x1462, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x12b0, 0x1462, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x12c6, 0x1462, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x22b8, 0x17aa, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x22ba, 0x17aa, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EC2, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 SUPER" }, + { 0x1EC7, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 SUPER" }, + { 0x1ED0, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080" }, + { 0x1ED0, 0x132d, 0x1025, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x08ed, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x08ee, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x08ef, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x8572, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x8573, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x8600, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x8605, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x138f, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x15c1, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x3fee, 0x17aa, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x3ffe, 0x17aa, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED1, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 Super" }, + { 0x1ED1, 0x1432, 0x1025, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x8746, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x878a, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x165f, 0x1043, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0xc192, 0x144d, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x3fce, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x3fcf, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x3fd0, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED3, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 Super" }, + { 0x1ED3, 0x1432, 0x1025, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x09d1, 0x1028, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x8746, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x878a, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x1d61, 0x1043, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x1e51, 0x1043, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x1f01, 0x1043, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x3fce, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x3fcf, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x3fd0, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1EF5, 0x0000, 0x0000, "Quadro RTX 5000" }, + { 0x1F02, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F03, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F06, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060 SUPER" }, + { 0x1F07, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F08, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F0A, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F0B, 0x0000, 0x0000, "NVIDIA CMP 40HX" }, + { 0x1F10, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F10, 0x132d, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1342, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08a1, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08a2, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ea, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08eb, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ec, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ed, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ee, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ef, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x093b, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x093c, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x8572, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x8573, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x8602, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x8606, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x132f, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x136f, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1881, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1e6e, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1658, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1663, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1664, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x75a4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x75a5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x86a4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x86a5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1274, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1277, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x95e1, 0x1558, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x97e1, 0x1558, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2002, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2005, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2007, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x3000, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x3001, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x105e, 0x1d05, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1070, 0x1d05, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2087, 0x1d05, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2087, 0x8086, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F11, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F12, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F12, 0x098f, 0x1028, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x8741, 0x103c, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x8744, 0x103c, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x878e, 0x103c, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x880e, 0x103c, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x1e11, 0x1043, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x1f11, 0x1043, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x12d9, 0x1462, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x3801, 0x17aa, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x3802, 0x17aa, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x3803, 0x17aa, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F14, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F14, 0x1401, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x1432, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x1442, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x1446, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x147d, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x09e2, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x09f3, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8607, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x86c6, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x86c7, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8736, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8738, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8746, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8772, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x878a, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x878b, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x87a6, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x87a7, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x174f, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x1512, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x75b5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x75b6, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x76b4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x76b5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x78a4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x78a5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x86b4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x86b5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x12ae, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x12b0, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x12c6, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x50d3, 0x1558, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x70d1, 0x1558, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x200c, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x2011, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x3002, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F15, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F36, 0x0000, 0x0000, "Quadro RTX 3000" }, + { 0x1F36, 0x0990, 0x1028, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x8736, 0x103c, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x8738, 0x103c, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x8772, 0x103c, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x13cf, 0x1043, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x0032, 0x1414, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F42, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060 SUPER" }, + { 0x1F47, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060 SUPER" }, + { 0x1F50, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F50, 0x08ed, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x08ee, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x08ef, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8572, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8573, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8574, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8600, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8605, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x3fee, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x3ffe, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F51, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F54, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F54, 0x878a, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F54, 0x3fce, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F54, 0x3fcf, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F54, 0x3fd0, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F55, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F76, 0x0000, 0x0000, "Quadro RTX 3000" }, + { 0x1F76, 0x2800, 0x102b, "Matrox D-Series D2450" }, + { 0x1F76, 0x2900, 0x102b, "Matrox D-Series D2480" }, + { 0x1F82, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F91, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F91, 0x863e, 0x103c, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x86e7, 0x103c, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x86e8, 0x103c, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x12cf, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x156f, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x0032, 0x1414, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0xc822, 0x144d, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x127e, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x1281, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x1284, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x1285, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x129c, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x229f, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x3802, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x3806, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x3f1a, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x1001, 0x1a58, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F95, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650 Ti" }, + { 0x1F95, 0x1479, 0x1025, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x147a, 0x1025, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x147b, 0x1025, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x147c, 0x1025, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x86e7, 0x103c, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x86e8, 0x103c, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x8815, 0x103c, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1dff, 0x1043, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1e1f, 0x1043, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0xc838, 0x144d, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x12bd, 0x1462, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x12c5, 0x1462, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x12d2, 0x1462, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x22c0, 0x17aa, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x22c1, 0x17aa, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x3837, 0x17aa, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x3f95, 0x17aa, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1003, 0x1a58, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1006, 0x1a58, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1007, 0x1a58, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x3e30, 0x1e83, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F96, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F96, 0x1297, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F97, 0x0000, 0x0000, "NVIDIA GeForce MX450" }, + { 0x1F98, 0x0000, 0x0000, "NVIDIA GeForce MX450" }, + { 0x1F99, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F99, 0x1479, 0x1025, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x147a, 0x1025, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x147b, 0x1025, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x147c, 0x1025, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x8815, 0x103c, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x13b2, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x1402, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x1902, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x12bd, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x12c5, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x12d2, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x22da, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x3f93, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x3e30, 0x1e83, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9C, 0x0000, 0x0000, "NVIDIA GeForce MX450" }, + { 0x1F9D, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F9D, 0x128d, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x130d, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x149c, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x185c, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x189c, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x12f4, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x1302, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x131b, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x1326, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x132a, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x132e, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9F, 0x0000, 0x0000, "NVIDIA GeForce MX550" }, + { 0x1FA0, 0x0000, 0x0000, "NVIDIA GeForce MX550" }, + { 0x1FB0, 0x12db, 0x1028, "NVIDIA T1000" }, + { 0x1FB0, 0x12db, 0x103c, "NVIDIA T1000" }, + { 0x1FB0, 0x8a80, 0x103c, "NVIDIA T1000" }, + { 0x1FB0, 0x12db, 0x10de, "NVIDIA T1000" }, + { 0x1FB0, 0x1485, 0x10de, "NVIDIA DGX Display" }, + { 0x1FB0, 0x12db, 0x17aa, "NVIDIA T1000" }, + { 0x1FB1, 0x1488, 0x1028, "NVIDIA T600" }, + { 0x1FB1, 0x1488, 0x103c, "NVIDIA T600" }, + { 0x1FB1, 0x8a80, 0x103c, "NVIDIA T600" }, + { 0x1FB1, 0x1488, 0x10de, "NVIDIA T600" }, + { 0x1FB1, 0x1488, 0x17aa, "NVIDIA T600" }, + { 0x1FB2, 0x1489, 0x1028, "NVIDIA T400" }, + { 0x1FB2, 0x1489, 0x103c, "NVIDIA T400" }, + { 0x1FB2, 0x8a80, 0x103c, "NVIDIA T400" }, + { 0x1FB2, 0x1489, 0x10de, "NVIDIA T400" }, + { 0x1FB2, 0x1489, 0x17aa, "NVIDIA T400" }, + { 0x1FB6, 0x0000, 0x0000, "NVIDIA T600 Laptop GPU" }, + { 0x1FB7, 0x0000, 0x0000, "NVIDIA T550 Laptop GPU" }, + { 0x1FB8, 0x0000, 0x0000, "Quadro T2000" }, + { 0x1FB8, 0x097e, 0x1028, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8736, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8738, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8772, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8780, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8782, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8783, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8785, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x87f0, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x1281, 0x1462, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x12bd, 0x1462, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x22c0, 0x17aa, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x22c1, 0x17aa, "Quadro T2000 with Max-Q Design" }, + { 0x1FB9, 0x0000, 0x0000, "Quadro T1000" }, + { 0x1FB9, 0x1479, 0x1025, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x147a, 0x1025, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x147b, 0x1025, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x147c, 0x1025, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8736, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8738, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8772, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8780, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8782, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8783, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8785, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x87f0, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x12bd, 0x1462, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x22c0, 0x17aa, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x22c1, 0x17aa, "Quadro T1000 with Max-Q Design" }, + { 0x1FBA, 0x0000, 0x0000, "NVIDIA T600 Laptop GPU" }, + { 0x1FBB, 0x0000, 0x0000, "NVIDIA T500" }, + { 0x1FBC, 0x0000, 0x0000, "NVIDIA T1200 Laptop GPU" }, + { 0x1FDD, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1FF0, 0x1612, 0x1028, "NVIDIA T1000 8GB" }, + { 0x1FF0, 0x1612, 0x103c, "NVIDIA T1000 8GB" }, + { 0x1FF0, 0x8a80, 0x103c, "NVIDIA T1000 8GB" }, + { 0x1FF0, 0x1612, 0x10de, "NVIDIA T1000 8GB" }, + { 0x1FF0, 0x1612, 0x17aa, "NVIDIA T1000 8GB" }, + { 0x1FF2, 0x1613, 0x1028, "NVIDIA T400 4GB" }, + { 0x1FF2, 0x1613, 0x103c, "NVIDIA T400 4GB" }, + { 0x1FF2, 0x8a80, 0x103c, "NVIDIA T400 4GB" }, + { 0x1FF2, 0x1613, 0x10de, "NVIDIA T400 4GB" }, + { 0x1FF2, 0x1613, 0x17aa, "NVIDIA T400 4GB" }, + { 0x1FF9, 0x0000, 0x0000, "Quadro T1000" }, + { 0x20B0, 0x0000, 0x0000, "NVIDIA A100-SXM4-40GB" }, + { 0x20B0, 0x1450, 0x10de, "NVIDIA A100-PG509-200" }, + { 0x20B2, 0x1463, 0x10de, "NVIDIA A100-SXM4-80GB" }, + { 0x20B2, 0x147f, 0x10de, "NVIDIA A100-SXM4-80GB" }, + { 0x20B3, 0x14a7, 0x10de, "NVIDIA PG506-242" }, + { 0x20B3, 0x14a8, 0x10de, "NVIDIA PG506-243" }, + { 0x20B5, 0x1533, 0x10de, "NVIDIA A100 80GB PCIe" }, + { 0x20B6, 0x1492, 0x10de, "NVIDIA PG506-232" }, + { 0x20B7, 0x1532, 0x10de, "NVIDIA A30" }, + { 0x20F1, 0x145f, 0x10de, "NVIDIA A100-PCIE-40GB" }, + { 0x2182, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 Ti" }, + { 0x2184, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660" }, + { 0x2187, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650 SUPER" }, + { 0x2188, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x2189, 0x0000, 0x0000, "NVIDIA CMP 30HX" }, + { 0x2191, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 Ti" }, + { 0x2191, 0x0949, 0x1028, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x85fb, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x85fe, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x86d6, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x8741, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x8744, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x878d, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x87af, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x87b3, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x171f, 0x1043, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x17ef, 0x1043, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x18d1, 0x1043, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x0032, 0x1414, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x128a, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x128b, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x12c6, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x12cb, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x12cc, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x12d9, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x380c, 0x17aa, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x381d, 0x17aa, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x381e, 0x17aa, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2192, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650 Ti" }, + { 0x21C4, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 SUPER" }, + { 0x21D1, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 Ti" }, + { 0x2203, 0x0000, 0x0000, "NVIDIA GeForce RTX 3090 Ti" }, + { 0x2204, 0x0000, 0x0000, "NVIDIA GeForce RTX 3090" }, + { 0x2206, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080" }, + { 0x2208, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti" }, + { 0x220A, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080" }, + { 0x220D, 0x0000, 0x0000, "NVIDIA CMP 90HX" }, + { 0x2216, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080" }, + { 0x2230, 0x1459, 0x1028, "NVIDIA RTX A6000" }, + { 0x2230, 0x1459, 0x103c, "NVIDIA RTX A6000" }, + { 0x2230, 0x1459, 0x10de, "NVIDIA RTX A6000" }, + { 0x2230, 0x1459, 0x17aa, "NVIDIA RTX A6000" }, + { 0x2231, 0x147e, 0x1028, "NVIDIA RTX A5000" }, + { 0x2231, 0x147e, 0x103c, "NVIDIA RTX A5000" }, + { 0x2231, 0x147e, 0x10de, "NVIDIA RTX A5000" }, + { 0x2231, 0x147e, 0x17aa, "NVIDIA RTX A5000" }, + { 0x2232, 0x163c, 0x1028, "NVIDIA RTX A4500" }, + { 0x2232, 0x163c, 0x103c, "NVIDIA RTX A4500" }, + { 0x2232, 0x163c, 0x10de, "NVIDIA RTX A4500" }, + { 0x2232, 0x163c, 0x17aa, "NVIDIA RTX A4500" }, + { 0x2233, 0x165a, 0x1028, "NVIDIA RTX A5500" }, + { 0x2233, 0x165a, 0x103c, "NVIDIA RTX A5500" }, + { 0x2233, 0x165a, 0x10de, "NVIDIA RTX A5500" }, + { 0x2233, 0x165a, 0x17aa, "NVIDIA RTX A5500" }, + { 0x2235, 0x145a, 0x10de, "NVIDIA A40" }, + { 0x2236, 0x1482, 0x10de, "NVIDIA A10" }, + { 0x2237, 0x152f, 0x10de, "NVIDIA A10G" }, + { 0x2238, 0x1677, 0x10de, "NVIDIA A10M" }, + { 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" }, + { 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" }, + { 0x2438, 0x0000, 0x0000, "NVIDIA RTX A5500 Laptop GPU" }, + { 0x2460, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" }, + { 0x2482, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Ti" }, + { 0x2484, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070" }, + { 0x2486, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" }, + { 0x2487, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060" }, + { 0x2488, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070" }, + { 0x2489, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" }, + { 0x248A, 0x0000, 0x0000, "NVIDIA CMP 70HX" }, + { 0x249C, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Laptop GPU" }, + { 0x249C, 0x1194, 0x1d05, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x249D, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Laptop GPU" }, + { 0x24A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Ti Laptop GPU" }, + { 0x24A0, 0x1192, 0x1d05, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x24B0, 0x14ad, 0x1028, "NVIDIA RTX A4000" }, + { 0x24B0, 0x14ad, 0x103c, "NVIDIA RTX A4000" }, + { 0x24B0, 0x14ad, 0x10de, "NVIDIA RTX A4000" }, + { 0x24B0, 0x14ad, 0x17aa, "NVIDIA RTX A4000" }, + { 0x24B1, 0x1658, 0x10de, "NVIDIA RTX A4000H" }, + { 0x24B6, 0x0000, 0x0000, "NVIDIA RTX A5000 Laptop GPU" }, + { 0x24B7, 0x0000, 0x0000, "NVIDIA RTX A4000 Laptop GPU" }, + { 0x24B8, 0x0000, 0x0000, "NVIDIA RTX A3000 Laptop GPU" }, + { 0x24B9, 0x0000, 0x0000, "NVIDIA RTX A3000 12GB Laptop GPU" }, + { 0x24BA, 0x0000, 0x0000, "NVIDIA RTX A4500 Laptop GPU" }, + { 0x24BB, 0x0000, 0x0000, "NVIDIA RTX A3000 12GB Laptop GPU" }, + { 0x24DC, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Laptop GPU" }, + { 0x24DD, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Laptop GPU" }, + { 0x24E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Ti Laptop GPU" }, + { 0x24FA, 0x0000, 0x0000, "NVIDIA RTX A4500 Embedded GPU" }, + { 0x2503, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060" }, + { 0x2504, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060" }, + { 0x2507, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050" }, + { 0x2508, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 OEM" }, + { 0x2520, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x2523, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x2531, 0x151d, 0x1028, "NVIDIA RTX A2000" }, + { 0x2531, 0x151d, 0x103c, "NVIDIA RTX A2000" }, + { 0x2531, 0x151d, 0x10de, "NVIDIA RTX A2000" }, + { 0x2531, 0x151d, 0x17aa, "NVIDIA RTX A2000" }, + { 0x2560, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x2563, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x2571, 0x1611, 0x1028, "NVIDIA RTX A2000 12GB" }, + { 0x2571, 0x1611, 0x103c, "NVIDIA RTX A2000 12GB" }, + { 0x2571, 0x1611, 0x10de, "NVIDIA RTX A2000 12GB" }, + { 0x2571, 0x1611, 0x17aa, "NVIDIA RTX A2000 12GB" }, + { 0x25A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x25A0, 0x8928, 0x103c, "NVIDIA GeForce RTX 3050Ti Laptop GPU" }, + { 0x25A0, 0x89f9, 0x103c, "NVIDIA GeForce RTX 3050Ti Laptop GPU" }, + { 0x25A0, 0x1196, 0x1d05, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x25A2, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" }, + { 0x25A2, 0x0baf, 0x1028, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x25A2, 0x1195, 0x1d05, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x25A5, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" }, + { 0x25A6, 0x0000, 0x0000, "NVIDIA GeForce MX570" }, + { 0x25A7, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" }, + { 0x25A9, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" }, + { 0x25AA, 0x0000, 0x0000, "NVIDIA GeForce MX570 A" }, + { 0x25B6, 0x14a9, 0x10de, "NVIDIA A16" }, + { 0x25B6, 0x157e, 0x10de, "NVIDIA A2" }, + { 0x25B8, 0x0000, 0x0000, "NVIDIA RTX A2000 Laptop GPU" }, + { 0x25B9, 0x0000, 0x0000, "NVIDIA RTX A1000 Laptop GPU" }, + { 0x25BA, 0x0000, 0x0000, "NVIDIA RTX A2000 8GB Laptop GPU" }, + { 0x25BB, 0x0000, 0x0000, "NVIDIA RTX A500 Laptop GPU" }, + { 0x25E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x25E2, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" }, + { 0x25E5, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" }, + { 0x25F9, 0x0000, 0x0000, "NVIDIA RTX A1000 Embedded GPU" }, + { 0x25FA, 0x0000, 0x0000, "NVIDIA RTX A2000 Embedded GPU" }, + { 0x13BD, 0x11cc, 0x10DE, "GRID M10-0B" }, + { 0x13BD, 0x11cd, 0x10DE, "GRID M10-1B" }, + { 0x13BD, 0x11ce, 0x10DE, "GRID M10-0Q" }, + { 0x13BD, 0x11cf, 0x10DE, "GRID M10-1Q" }, + { 0x13BD, 0x11d0, 0x10DE, "GRID M10-2Q" }, + { 0x13BD, 0x11d1, 0x10DE, "GRID M10-4Q" }, + { 0x13BD, 0x11d2, 0x10DE, "GRID M10-8Q" }, + { 0x13BD, 0x1286, 0x10DE, "GRID M10-2B" }, + { 0x13BD, 0x12ee, 0x10DE, "GRID M10-2B4" }, + { 0x13BD, 0x1339, 0x10DE, "GRID M10-1B4" }, + { 0x13F2, 0x114c, 0x10DE, "GRID M60-0Q" }, + { 0x13F2, 0x114d, 0x10DE, "GRID M60-1Q" }, + { 0x13F2, 0x114e, 0x10DE, "GRID M60-2Q" }, + { 0x13F2, 0x114f, 0x10DE, "GRID M60-4Q" }, + { 0x13F2, 0x1150, 0x10DE, "GRID M60-8Q" }, + { 0x13F2, 0x1176, 0x10DE, "GRID M60-0B" }, + { 0x13F2, 0x1177, 0x10DE, "GRID M60-1B" }, + { 0x13F2, 0x117d, 0x10DE, "GRID M60-2B" }, + { 0x13F2, 0x12ec, 0x10DE, "GRID M60-2B4" }, + { 0x13F2, 0x1337, 0x10DE, "GRID M60-1B4" }, + { 0x13F3, 0x117c, 0x10DE, "GRID M6-2B" }, + { 0x13F3, 0x117e, 0x10DE, "GRID M6-0B" }, + { 0x13F3, 0x117f, 0x10DE, "GRID M6-1B" }, + { 0x13F3, 0x1180, 0x10DE, "GRID M6-0Q" }, + { 0x13F3, 0x1181, 0x10DE, "GRID M6-1Q" }, + { 0x13F3, 0x1182, 0x10DE, "GRID M6-2Q" }, + { 0x13F3, 0x1183, 0x10DE, "GRID M6-4Q" }, + { 0x13F3, 0x1184, 0x10DE, "GRID M6-8Q" }, + { 0x13F3, 0x12ed, 0x10DE, "GRID M6-2B4" }, + { 0x13F3, 0x1338, 0x10DE, "GRID M6-1B4" }, + { 0x15F7, 0x1265, 0x10DE, "GRID P100C-1B" }, + { 0x15F7, 0x1266, 0x10DE, "GRID P100C-1Q" }, + { 0x15F7, 0x1267, 0x10DE, "GRID P100C-2Q" }, + { 0x15F7, 0x1268, 0x10DE, "GRID P100C-4Q" }, + { 0x15F7, 0x1269, 0x10DE, "GRID P100C-6Q" }, + { 0x15F7, 0x126a, 0x10DE, "GRID P100C-12Q" }, + { 0x15F7, 0x128d, 0x10DE, "GRID P100C-2B" }, + { 0x15F7, 0x12f4, 0x10DE, "GRID P100C-2B4" }, + { 0x15F7, 0x133f, 0x10DE, "GRID P100C-1B4" }, + { 0x15F7, 0x137d, 0x10DE, "GRID P100C-12C" }, + { 0x15F7, 0x138c, 0x10DE, "GRID P100C-4C" }, + { 0x15F7, 0x138d, 0x10DE, "GRID P100C-6C" }, + { 0x15F8, 0x1221, 0x10DE, "GRID P100-1B" }, + { 0x15F8, 0x1222, 0x10DE, "GRID P100-1Q" }, + { 0x15F8, 0x1223, 0x10DE, "GRID P100-2Q" }, + { 0x15F8, 0x1224, 0x10DE, "GRID P100-4Q" }, + { 0x15F8, 0x1225, 0x10DE, "GRID P100-8Q" }, + { 0x15F8, 0x1226, 0x10DE, "GRID P100-16Q" }, + { 0x15F8, 0x128c, 0x10DE, "GRID P100-2B" }, + { 0x15F8, 0x12f2, 0x10DE, "GRID P100-2B4" }, + { 0x15F8, 0x133d, 0x10DE, "GRID P100-1B4" }, + { 0x15F8, 0x137c, 0x10DE, "GRID P100-16C" }, + { 0x15F8, 0x138a, 0x10DE, "GRID P100-4C" }, + { 0x15F8, 0x138b, 0x10DE, "GRID P100-8C" }, + { 0x15F9, 0x122c, 0x10DE, "GRID P100X-1B" }, + { 0x15F9, 0x122d, 0x10DE, "GRID P100X-1Q" }, + { 0x15F9, 0x122e, 0x10DE, "GRID P100X-2Q" }, + { 0x15F9, 0x122f, 0x10DE, "GRID P100X-4Q" }, + { 0x15F9, 0x1230, 0x10DE, "GRID P100X-8Q" }, + { 0x15F9, 0x1231, 0x10DE, "GRID P100X-16Q" }, + { 0x15F9, 0x128b, 0x10DE, "GRID P100X-2B" }, + { 0x15F9, 0x12f3, 0x10DE, "GRID P100X-2B4" }, + { 0x15F9, 0x133e, 0x10DE, "GRID P100X-1B4" }, + { 0x15F9, 0x137b, 0x10DE, "GRID P100X-16C" }, + { 0x15F9, 0x1388, 0x10DE, "GRID P100X-4C" }, + { 0x15F9, 0x1389, 0x10DE, "GRID P100X-8C" }, + { 0x1B38, 0x11e7, 0x10DE, "GRID P40-1B" }, + { 0x1B38, 0x11e8, 0x10DE, "GRID P40-1Q" }, + { 0x1B38, 0x11e9, 0x10DE, "GRID P40-2Q" }, + { 0x1B38, 0x11ea, 0x10DE, "GRID P40-3Q" }, + { 0x1B38, 0x11eb, 0x10DE, "GRID P40-4Q" }, + { 0x1B38, 0x11ec, 0x10DE, "GRID P40-6Q" }, + { 0x1B38, 0x11ed, 0x10DE, "GRID P40-8Q" }, + { 0x1B38, 0x11ee, 0x10DE, "GRID P40-12Q" }, + { 0x1B38, 0x11ef, 0x10DE, "GRID P40-24Q" }, + { 0x1B38, 0x1287, 0x10DE, "GRID P40-2B" }, + { 0x1B38, 0x12b1, 0x10DE, "GeForce GTX P40-24" }, + { 0x1B38, 0x12b2, 0x10DE, "GeForce GTX P40-12" }, + { 0x1B38, 0x12b3, 0x10DE, "GeForce GTX P40-6" }, + { 0x1B38, 0x12ef, 0x10DE, "GRID P40-2B4" }, + { 0x1B38, 0x133a, 0x10DE, "GRID P40-1B4" }, + { 0x1B38, 0x137e, 0x10DE, "GRID P40-24C" }, + { 0x1B38, 0x1381, 0x10DE, "GRID P40-4C" }, + { 0x1B38, 0x1382, 0x10DE, "GRID P40-6C" }, + { 0x1B38, 0x1383, 0x10DE, "GRID P40-8C" }, + { 0x1B38, 0x1384, 0x10DE, "GRID P40-12C" }, + { 0x1B38, 0x13b0, 0x10DE, "GRID GTX P40-6" }, + { 0x1B38, 0x13b1, 0x10DE, "GRID GTX P40-12" }, + { 0x1B38, 0x13b2, 0x10DE, "GRID GTX P40-24" }, + { 0x1B38, 0x13d0, 0x10DE, "GRID GTX P40-8" }, + { 0x1BB3, 0x1203, 0x10DE, "GRID P4-1B" }, + { 0x1BB3, 0x1204, 0x10DE, "GRID P4-1Q" }, + { 0x1BB3, 0x1205, 0x10DE, "GRID P4-2Q" }, + { 0x1BB3, 0x1206, 0x10DE, "GRID P4-4Q" }, + { 0x1BB3, 0x1207, 0x10DE, "GRID P4-8Q" }, + { 0x1BB3, 0x1288, 0x10DE, "GRID P4-2B" }, + { 0x1BB3, 0x12f1, 0x10DE, "GRID P4-2B4" }, + { 0x1BB3, 0x133c, 0x10DE, "GRID P4-1B4" }, + { 0x1BB3, 0x136d, 0x10DE, "GRID GTX P4-2" }, + { 0x1BB3, 0x136e, 0x10DE, "GRID GTX P4-4" }, + { 0x1BB3, 0x136f, 0x10DE, "GRID GTX P4-8" }, + { 0x1BB3, 0x1380, 0x10DE, "GRID P4-8C" }, + { 0x1BB3, 0x1385, 0x10DE, "GRID P4-4C" }, + { 0x1BB4, 0x11f8, 0x10DE, "GRID P6-1B" }, + { 0x1BB4, 0x11f9, 0x10DE, "GRID P6-1Q" }, + { 0x1BB4, 0x11fa, 0x10DE, "GRID P6-2Q" }, + { 0x1BB4, 0x11fb, 0x10DE, "GRID P6-4Q" }, + { 0x1BB4, 0x11fc, 0x10DE, "GRID P6-8Q" }, + { 0x1BB4, 0x11fd, 0x10DE, "GRID P6-16Q" }, + { 0x1BB4, 0x1289, 0x10DE, "GRID P6-2B" }, + { 0x1BB4, 0x12f0, 0x10DE, "GRID P6-2B4" }, + { 0x1BB4, 0x133b, 0x10DE, "GRID P6-1B4" }, + { 0x1BB4, 0x137f, 0x10DE, "GRID P6-16C" }, + { 0x1BB4, 0x1386, 0x10DE, "GRID P6-4C" }, + { 0x1BB4, 0x1387, 0x10DE, "GRID P6-8C" }, + { 0x1DB1, 0x1259, 0x10DE, "GRID V100X-1B" }, + { 0x1DB1, 0x125a, 0x10DE, "GRID V100X-1Q" }, + { 0x1DB1, 0x125b, 0x10DE, "GRID V100X-2Q" }, + { 0x1DB1, 0x125c, 0x10DE, "GRID V100X-4Q" }, + { 0x1DB1, 0x125d, 0x10DE, "GRID V100X-8Q" }, + { 0x1DB1, 0x125e, 0x10DE, "GRID V100X-16Q" }, + { 0x1DB1, 0x128e, 0x10DE, "GRID V100X-2B" }, + { 0x1DB1, 0x12f6, 0x10DE, "GRID V100X-2B4" }, + { 0x1DB1, 0x1341, 0x10DE, "GRID V100X-1B4" }, + { 0x1DB1, 0x1378, 0x10DE, "GRID V100X-16C" }, + { 0x1DB1, 0x138e, 0x10DE, "GRID V100X-4C" }, + { 0x1DB1, 0x138f, 0x10DE, "GRID V100X-8C" }, + { 0x1DB3, 0x1290, 0x10DE, "GRID V100L-1B" }, + { 0x1DB3, 0x1291, 0x10DE, "GRID V100L-2B" }, + { 0x1DB3, 0x1292, 0x10DE, "GRID V100L-1Q" }, + { 0x1DB3, 0x1293, 0x10DE, "GRID V100L-2Q" }, + { 0x1DB3, 0x1294, 0x10DE, "GRID V100L-4Q" }, + { 0x1DB3, 0x1295, 0x10DE, "GRID V100L-8Q" }, + { 0x1DB3, 0x1296, 0x10DE, "GRID V100L-16Q" }, + { 0x1DB3, 0x12f9, 0x10DE, "GRID V100L-2B4" }, + { 0x1DB3, 0x1344, 0x10DE, "GRID V100L-1B4" }, + { 0x1DB3, 0x137a, 0x10DE, "GRID V100L-16C" }, + { 0x1DB3, 0x1398, 0x10DE, "GRID V100L-4C" }, + { 0x1DB3, 0x1399, 0x10DE, "GRID V100L-8C" }, + { 0x1DB4, 0x124e, 0x10DE, "GRID V100-1B" }, + { 0x1DB4, 0x124f, 0x10DE, "GRID V100-1Q" }, + { 0x1DB4, 0x1250, 0x10DE, "GRID V100-2Q" }, + { 0x1DB4, 0x1251, 0x10DE, "GRID V100-4Q" }, + { 0x1DB4, 0x1252, 0x10DE, "GRID V100-8Q" }, + { 0x1DB4, 0x1253, 0x10DE, "GRID V100-16Q" }, + { 0x1DB4, 0x128f, 0x10DE, "GRID V100-2B" }, + { 0x1DB4, 0x12f5, 0x10DE, "GRID V100-2B4" }, + { 0x1DB4, 0x1340, 0x10DE, "GRID V100-1B4" }, + { 0x1DB4, 0x1379, 0x10DE, "GRID V100-16C" }, + { 0x1DB4, 0x1393, 0x10DE, "GRID V100-4C" }, + { 0x1DB4, 0x1394, 0x10DE, "GRID V100-8C" }, + { 0x1DB5, 0x12cb, 0x10DE, "GRID V100DX-1B" }, + { 0x1DB5, 0x12cc, 0x10DE, "GRID V100DX-2B" }, + { 0x1DB5, 0x12cd, 0x10DE, "GRID V100DX-1Q" }, + { 0x1DB5, 0x12ce, 0x10DE, "GRID V100DX-2Q" }, + { 0x1DB5, 0x12cf, 0x10DE, "GRID V100DX-4Q" }, + { 0x1DB5, 0x12d0, 0x10DE, "GRID V100DX-8Q" }, + { 0x1DB5, 0x12d1, 0x10DE, "GRID V100DX-16Q" }, + { 0x1DB5, 0x12d2, 0x10DE, "GRID V100DX-32Q" }, + { 0x1DB5, 0x12f8, 0x10DE, "GRID V100DX-2B4" }, + { 0x1DB5, 0x1343, 0x10DE, "GRID V100DX-1B4" }, + { 0x1DB5, 0x1376, 0x10DE, "GRID V100DX-32C" }, + { 0x1DB5, 0x1390, 0x10DE, "GRID V100DX-4C" }, + { 0x1DB5, 0x1391, 0x10DE, "GRID V100DX-8C" }, + { 0x1DB5, 0x1392, 0x10DE, "GRID V100DX-16C" }, + { 0x1DB6, 0x12bd, 0x10DE, "GRID V100D-1B" }, + { 0x1DB6, 0x12be, 0x10DE, "GRID V100D-2B" }, + { 0x1DB6, 0x12bf, 0x10DE, "GRID V100D-1Q" }, + { 0x1DB6, 0x12c0, 0x10DE, "GRID V100D-2Q" }, + { 0x1DB6, 0x12c1, 0x10DE, "GRID V100D-4Q" }, + { 0x1DB6, 0x12c2, 0x10DE, "GRID V100D-8Q" }, + { 0x1DB6, 0x12c3, 0x10DE, "GRID V100D-16Q" }, + { 0x1DB6, 0x12c4, 0x10DE, "GRID V100D-32Q" }, + { 0x1DB6, 0x12f7, 0x10DE, "GRID V100D-2B4" }, + { 0x1DB6, 0x1342, 0x10DE, "GRID V100D-1B4" }, + { 0x1DB6, 0x1377, 0x10DE, "GRID V100D-32C" }, + { 0x1DB6, 0x1395, 0x10DE, "GRID V100D-4C" }, + { 0x1DB6, 0x1396, 0x10DE, "GRID V100D-8C" }, + { 0x1DB6, 0x1397, 0x10DE, "GRID V100D-16C" }, + { 0x1DB6, 0x13cd, 0x10DE, "GRID GTX V100D-8" }, + { 0x1DB6, 0x13ce, 0x10DE, "GRID GTX V100D-16" }, + { 0x1DB6, 0x13cf, 0x10DE, "GRID GTX V100D-32" }, + { 0x1DF6, 0x13e1, 0x10DE, "GRID V100S-1B" }, + { 0x1DF6, 0x13e3, 0x10DE, "GRID V100S-2B" }, + { 0x1DF6, 0x13e5, 0x10DE, "GRID V100S-1Q" }, + { 0x1DF6, 0x13e6, 0x10DE, "GRID V100S-2Q" }, + { 0x1DF6, 0x13e7, 0x10DE, "GRID V100S-4Q" }, + { 0x1DF6, 0x13e8, 0x10DE, "GRID V100S-8Q" }, + { 0x1DF6, 0x13e9, 0x10DE, "GRID V100S-16Q" }, + { 0x1DF6, 0x13ea, 0x10DE, "GRID V100S-32Q" }, + { 0x1DF6, 0x13f1, 0x10DE, "GRID V100S-4C" }, + { 0x1DF6, 0x13f2, 0x10DE, "GRID V100S-8C" }, + { 0x1DF6, 0x13f3, 0x10DE, "GRID V100S-16C" }, + { 0x1DF6, 0x13f4, 0x10DE, "GRID V100S-32C" }, + { 0x1E30, 0x1325, 0x10DE, "GRID RTX6000-1Q" }, + { 0x1E30, 0x1326, 0x10DE, "GRID RTX6000-2Q" }, + { 0x1E30, 0x1327, 0x10DE, "GRID RTX6000-3Q" }, + { 0x1E30, 0x1328, 0x10DE, "GRID RTX6000-4Q" }, + { 0x1E30, 0x1329, 0x10DE, "GRID RTX6000-6Q" }, + { 0x1E30, 0x132a, 0x10DE, "GRID RTX6000-8Q" }, + { 0x1E30, 0x132b, 0x10DE, "GRID RTX6000-12Q" }, + { 0x1E30, 0x132c, 0x10DE, "GRID RTX6000-24Q" }, + { 0x1E30, 0x132d, 0x10DE, "GRID RTX8000-1Q" }, + { 0x1E30, 0x132e, 0x10DE, "GRID RTX8000-2Q" }, + { 0x1E30, 0x132f, 0x10DE, "GRID RTX8000-3Q" }, + { 0x1E30, 0x1330, 0x10DE, "GRID RTX8000-4Q" }, + { 0x1E30, 0x1331, 0x10DE, "GRID RTX8000-6Q" }, + { 0x1E30, 0x1332, 0x10DE, "GRID RTX8000-8Q" }, + { 0x1E30, 0x1333, 0x10DE, "GRID RTX8000-12Q" }, + { 0x1E30, 0x1334, 0x10DE, "GRID RTX8000-16Q" }, + { 0x1E30, 0x1335, 0x10DE, "GRID RTX8000-24Q" }, + { 0x1E30, 0x1336, 0x10DE, "GRID RTX8000-48Q" }, + { 0x1E30, 0x13b9, 0x10DE, "GRID RTX6000-6" }, + { 0x1E30, 0x13ba, 0x10DE, "GRID RTX6000-12" }, + { 0x1E30, 0x13bb, 0x10DE, "GRID RTX6000-24" }, + { 0x1E30, 0x13bc, 0x10DE, "GRID RTX8000-12" }, + { 0x1E30, 0x13bd, 0x10DE, "GRID RTX8000-24" }, + { 0x1E30, 0x13be, 0x10DE, "GRID RTX8000-48" }, + { 0x1E30, 0x13bf, 0x10DE, "GRID RTX6000-4C" }, + { 0x1E30, 0x13c0, 0x10DE, "GRID RTX6000-6C" }, + { 0x1E30, 0x13c1, 0x10DE, "GRID RTX6000-8C" }, + { 0x1E30, 0x13c2, 0x10DE, "GRID RTX6000-12C" }, + { 0x1E30, 0x13c3, 0x10DE, "GRID RTX6000-24C" }, + { 0x1E30, 0x13c4, 0x10DE, "GRID RTX8000-4C" }, + { 0x1E30, 0x13c5, 0x10DE, "GRID RTX8000-6C" }, + { 0x1E30, 0x13c6, 0x10DE, "GRID RTX8000-8C" }, + { 0x1E30, 0x13c7, 0x10DE, "GRID RTX8000-12C" }, + { 0x1E30, 0x13c8, 0x10DE, "GRID RTX8000-16C" }, + { 0x1E30, 0x13c9, 0x10DE, "GRID RTX8000-24C" }, + { 0x1E30, 0x13ca, 0x10DE, "GRID RTX8000-48C" }, + { 0x1E30, 0x13cb, 0x10DE, "GRID RTX6000-8" }, + { 0x1E30, 0x13cc, 0x10DE, "GRID RTX8000-16" }, + { 0x1E30, 0x1437, 0x10DE, "GRID RTX6000-1B" }, + { 0x1E30, 0x1438, 0x10DE, "GRID RTX6000-2B" }, + { 0x1E30, 0x1441, 0x10DE, "GRID RTX8000-1B" }, + { 0x1E30, 0x1442, 0x10DE, "GRID RTX8000-2B" }, + { 0x1E37, 0x1347, 0x10DE, "GeForce RTX T10x-8" }, + { 0x1E37, 0x1348, 0x10DE, "GeForce RTX T10x-4" }, + { 0x1E37, 0x1349, 0x10DE, "GeForce RTX T10x-2" }, + { 0x1E37, 0x136a, 0x10DE, "GRID RTX T10-4" }, + { 0x1E37, 0x136b, 0x10DE, "GRID RTX T10-8" }, + { 0x1E37, 0x136c, 0x10DE, "GRID RTX T10-16" }, + { 0x1E37, 0x13a4, 0x10DE, "GeForce RTX T10-4" }, + { 0x1E37, 0x13a5, 0x10DE, "GeForce RTX T10-8" }, + { 0x1E37, 0x13a6, 0x10DE, "GeForce RTX T10-16" }, + { 0x1E37, 0x13a7, 0x10DE, "GRID RTX T10x-2" }, + { 0x1E37, 0x13a8, 0x10DE, "GRID RTX T10x-4" }, + { 0x1E37, 0x13a9, 0x10DE, "GRID RTX T10x-8" }, + { 0x1E37, 0x148a, 0x10DE, "GRID RTX T10-2" }, + { 0x1E37, 0x148b, 0x10DE, "GRID RTX T10-1" }, + { 0x1E37, 0x148c, 0x10DE, "GRID RTX T10-0" }, + { 0x1E78, 0x13f7, 0x10DE, "GRID RTX6000P-1B" }, + { 0x1E78, 0x13f8, 0x10DE, "GRID RTX6000P-2B" }, + { 0x1E78, 0x13f9, 0x10DE, "GRID RTX6000P-1Q" }, + { 0x1E78, 0x13fa, 0x10DE, "GRID RTX6000P-2Q" }, + { 0x1E78, 0x13fb, 0x10DE, "GRID RTX6000P-3Q" }, + { 0x1E78, 0x13fc, 0x10DE, "GRID RTX6000P-4Q" }, + { 0x1E78, 0x13fd, 0x10DE, "GRID RTX6000P-6Q" }, + { 0x1E78, 0x13fe, 0x10DE, "GRID RTX6000P-8Q" }, + { 0x1E78, 0x13ff, 0x10DE, "GRID RTX6000P-12Q" }, + { 0x1E78, 0x1400, 0x10DE, "GRID RTX6000P-24Q" }, + { 0x1E78, 0x1409, 0x10DE, "GRID RTX6000P-6" }, + { 0x1E78, 0x140a, 0x10DE, "GRID RTX6000P-8" }, + { 0x1E78, 0x140b, 0x10DE, "GRID RTX6000P-12" }, + { 0x1E78, 0x140c, 0x10DE, "GRID RTX6000P-24" }, + { 0x1E78, 0x140d, 0x10DE, "GRID RTX6000P-4C" }, + { 0x1E78, 0x140e, 0x10DE, "GRID RTX6000P-6C" }, + { 0x1E78, 0x140f, 0x10DE, "GRID RTX6000P-8C" }, + { 0x1E78, 0x1410, 0x10DE, "GRID RTX6000P-12C" }, + { 0x1E78, 0x1411, 0x10DE, "GRID RTX6000P-24C" }, + { 0x1E78, 0x1412, 0x10DE, "GRID RTX8000P-1B" }, + { 0x1E78, 0x1413, 0x10DE, "GRID RTX8000P-2B" }, + { 0x1E78, 0x1414, 0x10DE, "GRID RTX8000P-1Q" }, + { 0x1E78, 0x1415, 0x10DE, "GRID RTX8000P-2Q" }, + { 0x1E78, 0x1416, 0x10DE, "GRID RTX8000P-3Q" }, + { 0x1E78, 0x1417, 0x10DE, "GRID RTX8000P-4Q" }, + { 0x1E78, 0x1418, 0x10DE, "GRID RTX8000P-6Q" }, + { 0x1E78, 0x1419, 0x10DE, "GRID RTX8000P-8Q" }, + { 0x1E78, 0x141a, 0x10DE, "GRID RTX8000P-12Q" }, + { 0x1E78, 0x141b, 0x10DE, "GRID RTX8000P-16Q" }, + { 0x1E78, 0x141c, 0x10DE, "GRID RTX8000P-24Q" }, + { 0x1E78, 0x141d, 0x10DE, "GRID RTX8000P-48Q" }, + { 0x1E78, 0x1427, 0x10DE, "GRID RTX8000P-12" }, + { 0x1E78, 0x1428, 0x10DE, "GRID RTX8000P-16" }, + { 0x1E78, 0x1429, 0x10DE, "GRID RTX8000P-24" }, + { 0x1E78, 0x142a, 0x10DE, "GRID RTX8000P-48" }, + { 0x1E78, 0x142b, 0x10DE, "GRID RTX8000P-4C" }, + { 0x1E78, 0x142c, 0x10DE, "GRID RTX8000P-6C" }, + { 0x1E78, 0x142d, 0x10DE, "GRID RTX8000P-8C" }, + { 0x1E78, 0x142e, 0x10DE, "GRID RTX8000P-12C" }, + { 0x1E78, 0x142f, 0x10DE, "GRID RTX8000P-16C" }, + { 0x1E78, 0x1430, 0x10DE, "GRID RTX8000P-24C" }, + { 0x1E78, 0x1431, 0x10DE, "GRID RTX8000P-48C" }, + { 0x1EB8, 0x1309, 0x10DE, "GRID T4-1B" }, + { 0x1EB8, 0x130a, 0x10DE, "GRID T4-2B" }, + { 0x1EB8, 0x130b, 0x10DE, "GRID T4-2B4" }, + { 0x1EB8, 0x130c, 0x10DE, "GRID T4-1Q" }, + { 0x1EB8, 0x130d, 0x10DE, "GRID T4-2Q" }, + { 0x1EB8, 0x130e, 0x10DE, "GRID T4-4Q" }, + { 0x1EB8, 0x130f, 0x10DE, "GRID T4-8Q" }, + { 0x1EB8, 0x1310, 0x10DE, "GRID T4-16Q" }, + { 0x1EB8, 0x1345, 0x10DE, "GRID T4-1B4" }, + { 0x1EB8, 0x1367, 0x10DE, "GRID RTX T4-4" }, + { 0x1EB8, 0x1368, 0x10DE, "GRID RTX T4-8" }, + { 0x1EB8, 0x1369, 0x10DE, "GRID RTX T4-16" }, + { 0x1EB8, 0x1375, 0x10DE, "GRID T4-16C" }, + { 0x1EB8, 0x139a, 0x10DE, "GRID T4-4C" }, + { 0x1EB8, 0x139b, 0x10DE, "GRID T4-8C" }, + { 0x1EB8, 0x148d, 0x10DE, "GRID RTX T4-2" }, + { 0x1EB8, 0x148e, 0x10DE, "GRID RTX T4-1" }, + { 0x1EB8, 0x148f, 0x10DE, "GRID RTX T4-0" }, + { 0x20B0, 0x146f, 0x10DE, "GRID A100X-1-5C" }, + { 0x20B0, 0x1470, 0x10DE, "GRID A100X-2-10C" }, + { 0x20B0, 0x1471, 0x10DE, "GRID A100X-3-20C" }, + { 0x20B0, 0x1472, 0x10DE, "GRID A100X-4-20C" }, + { 0x20B0, 0x1473, 0x10DE, "GRID A100X-7-40C" }, + { 0x20B0, 0x1474, 0x10DE, "GRID A100X-4C" }, + { 0x20B0, 0x1475, 0x10DE, "GRID A100X-5C" }, + { 0x20B0, 0x1476, 0x10DE, "GRID A100X-8C" }, + { 0x20B0, 0x1477, 0x10DE, "GRID A100X-10C" }, + { 0x20B0, 0x1478, 0x10DE, "GRID A100X-20C" }, + { 0x20B0, 0x1479, 0x10DE, "GRID A100X-40C" }, + { 0x20B0, 0x160c, 0x10DE, "GRID A100X-1-5CME" }, + { 0x20B2, 0x1523, 0x10DE, "GRID A100DX-1-10C" }, + { 0x20B2, 0x1524, 0x10DE, "GRID A100DX-2-20C" }, + { 0x20B2, 0x1525, 0x10DE, "GRID A100DX-3-40C" }, + { 0x20B2, 0x1526, 0x10DE, "GRID A100DX-4-40C" }, + { 0x20B2, 0x1527, 0x10DE, "GRID A100DX-7-80C" }, + { 0x20B2, 0x1528, 0x10DE, "GRID A100DX-4C" }, + { 0x20B2, 0x1529, 0x10DE, "GRID A100DX-8C" }, + { 0x20B2, 0x152a, 0x10DE, "GRID A100DX-10C" }, + { 0x20B2, 0x152b, 0x10DE, "GRID A100DX-16C" }, + { 0x20B2, 0x152c, 0x10DE, "GRID A100DX-20C" }, + { 0x20B2, 0x152d, 0x10DE, "GRID A100DX-40C" }, + { 0x20B2, 0x152e, 0x10DE, "GRID A100DX-80C" }, + { 0x20B2, 0x160d, 0x10DE, "GRID A100DX-1-10CME" }, + { 0x20B5, 0x1591, 0x10DE, "GRID A100D-1-10C" }, + { 0x20B5, 0x1592, 0x10DE, "GRID A100D-2-20C" }, + { 0x20B5, 0x1593, 0x10DE, "GRID A100D-3-40C" }, + { 0x20B5, 0x1594, 0x10DE, "GRID A100D-4-40C" }, + { 0x20B5, 0x1595, 0x10DE, "GRID A100D-7-80C" }, + { 0x20B5, 0x1596, 0x10DE, "GRID A100D-4C" }, + { 0x20B5, 0x1597, 0x10DE, "GRID A100D-8C" }, + { 0x20B5, 0x1598, 0x10DE, "GRID A100D-10C" }, + { 0x20B5, 0x1599, 0x10DE, "GRID A100D-16C" }, + { 0x20B5, 0x159a, 0x10DE, "GRID A100D-20C" }, + { 0x20B5, 0x159b, 0x10DE, "GRID A100D-40C" }, + { 0x20B5, 0x159c, 0x10DE, "GRID A100D-80C" }, + { 0x20B5, 0x160f, 0x10DE, "GRID A100D-1-10CME" }, + { 0x20B7, 0x1589, 0x10DE, "NVIDIA A30-1-6C" }, + { 0x20B7, 0x158a, 0x10DE, "NVIDIA A30-2-12C" }, + { 0x20B7, 0x158b, 0x10DE, "NVIDIA A30-4-24C" }, + { 0x20B7, 0x158c, 0x10DE, "NVIDIA A30-4C" }, + { 0x20B7, 0x158d, 0x10DE, "NVIDIA A30-6C" }, + { 0x20B7, 0x158e, 0x10DE, "NVIDIA A30-8C" }, + { 0x20B7, 0x158f, 0x10DE, "NVIDIA A30-12C" }, + { 0x20B7, 0x1590, 0x10DE, "NVIDIA A30-24C" }, + { 0x20B7, 0x1610, 0x10DE, "NVIDIA A30-1-6CME" }, + { 0x20BF, 0x4450, 0x10DE, "GRID A100B-4C" }, + { 0x20BF, 0x4451, 0x10DE, "GRID A100B-5C" }, + { 0x20BF, 0x4452, 0x10DE, "GRID A100B-8C" }, + { 0x20BF, 0x4453, 0x10DE, "GRID A100B-10C" }, + { 0x20BF, 0x4454, 0x10DE, "GRID A100B-20C" }, + { 0x20BF, 0x4455, 0x10DE, "GRID A100B-40C" }, + { 0x20BF, 0x5560, 0x10DE, "GRID A100B-1-5C" }, + { 0x20BF, 0x5561, 0x10DE, "GRID A100B-2-10C" }, + { 0x20BF, 0x5562, 0x10DE, "GRID A100B-3-20C" }, + { 0x20BF, 0x5563, 0x10DE, "GRID A100B-4-20C" }, + { 0x20BF, 0x5564, 0x10DE, "GRID A100B-7-40C" }, + { 0x20F1, 0x1493, 0x10DE, "GRID A100-1-5C" }, + { 0x20F1, 0x1494, 0x10DE, "GRID A100-2-10C" }, + { 0x20F1, 0x1495, 0x10DE, "GRID A100-3-20C" }, + { 0x20F1, 0x1496, 0x10DE, "GRID A100-4-20C" }, + { 0x20F1, 0x1497, 0x10DE, "GRID A100-7-40C" }, + { 0x20F1, 0x1498, 0x10DE, "GRID A100-4C" }, + { 0x20F1, 0x1499, 0x10DE, "GRID A100-5C" }, + { 0x20F1, 0x149a, 0x10DE, "GRID A100-8C" }, + { 0x20F1, 0x149b, 0x10DE, "GRID A100-10C" }, + { 0x20F1, 0x149c, 0x10DE, "GRID A100-20C" }, + { 0x20F1, 0x149d, 0x10DE, "GRID A100-40C" }, + { 0x20F1, 0x160e, 0x10DE, "GRID A100-1-5CME" }, + { 0x2230, 0x14fa, 0x10DE, "NVIDIA RTXA6000-1B" }, + { 0x2230, 0x14fb, 0x10DE, "NVIDIA RTXA6000-2B" }, + { 0x2230, 0x14fc, 0x10DE, "NVIDIA RTXA6000-1Q" }, + { 0x2230, 0x14fd, 0x10DE, "NVIDIA RTXA6000-2Q" }, + { 0x2230, 0x14fe, 0x10DE, "NVIDIA RTXA6000-3Q" }, + { 0x2230, 0x14ff, 0x10DE, "NVIDIA RTXA6000-4Q" }, + { 0x2230, 0x1500, 0x10DE, "NVIDIA RTXA6000-6Q" }, + { 0x2230, 0x1501, 0x10DE, "NVIDIA RTXA6000-8Q" }, + { 0x2230, 0x1502, 0x10DE, "NVIDIA RTXA6000-12Q" }, + { 0x2230, 0x1503, 0x10DE, "NVIDIA RTXA6000-16Q" }, + { 0x2230, 0x1504, 0x10DE, "NVIDIA RTXA6000-24Q" }, + { 0x2230, 0x1505, 0x10DE, "NVIDIA RTXA6000-48Q" }, + { 0x2230, 0x1510, 0x10DE, "NVIDIA RTXA6000-12" }, + { 0x2230, 0x1511, 0x10DE, "NVIDIA RTXA6000-16" }, + { 0x2230, 0x1512, 0x10DE, "NVIDIA RTXA6000-24" }, + { 0x2230, 0x1513, 0x10DE, "NVIDIA RTXA6000-48" }, + { 0x2230, 0x1514, 0x10DE, "NVIDIA RTXA6000-4C" }, + { 0x2230, 0x1515, 0x10DE, "NVIDIA RTXA6000-6C" }, + { 0x2230, 0x1516, 0x10DE, "NVIDIA RTXA6000-8C" }, + { 0x2230, 0x1517, 0x10DE, "NVIDIA RTXA6000-12C" }, + { 0x2230, 0x1518, 0x10DE, "NVIDIA RTXA6000-16C" }, + { 0x2230, 0x1519, 0x10DE, "NVIDIA RTXA6000-24C" }, + { 0x2230, 0x151a, 0x10DE, "NVIDIA RTXA6000-48C" }, + { 0x2231, 0x1562, 0x10DE, "NVIDIA RTXA5000-1B" }, + { 0x2231, 0x1563, 0x10DE, "NVIDIA RTXA5000-2B" }, + { 0x2231, 0x1564, 0x10DE, "NVIDIA RTXA5000-1Q" }, + { 0x2231, 0x1565, 0x10DE, "NVIDIA RTXA5000-2Q" }, + { 0x2231, 0x1566, 0x10DE, "NVIDIA RTXA5000-3Q" }, + { 0x2231, 0x1567, 0x10DE, "NVIDIA RTXA5000-4Q" }, + { 0x2231, 0x1568, 0x10DE, "NVIDIA RTXA5000-6Q" }, + { 0x2231, 0x1569, 0x10DE, "NVIDIA RTXA5000-8Q" }, + { 0x2231, 0x156a, 0x10DE, "NVIDIA RTXA5000-12Q" }, + { 0x2231, 0x156b, 0x10DE, "NVIDIA RTXA5000-24Q" }, + { 0x2231, 0x1574, 0x10DE, "NVIDIA RTXA5000-6" }, + { 0x2231, 0x1575, 0x10DE, "NVIDIA RTXA5000-8" }, + { 0x2231, 0x1576, 0x10DE, "NVIDIA RTXA5000-12" }, + { 0x2231, 0x1577, 0x10DE, "NVIDIA RTXA5000-24" }, + { 0x2231, 0x1578, 0x10DE, "NVIDIA RTXA5000-4C" }, + { 0x2231, 0x1579, 0x10DE, "NVIDIA RTXA5000-6C" }, + { 0x2231, 0x157a, 0x10DE, "NVIDIA RTXA5000-8C" }, + { 0x2231, 0x157b, 0x10DE, "NVIDIA RTXA5000-12C" }, + { 0x2231, 0x157c, 0x10DE, "NVIDIA RTXA5000-24C" }, + { 0x2233, 0x165c, 0x10DE, "NVIDIA RTXA5500-1B" }, + { 0x2233, 0x165d, 0x10DE, "NVIDIA RTXA5500-2B" }, + { 0x2233, 0x165e, 0x10DE, "NVIDIA RTXA5500-1Q" }, + { 0x2233, 0x165f, 0x10DE, "NVIDIA RTXA5500-2Q" }, + { 0x2233, 0x1660, 0x10DE, "NVIDIA RTXA5500-3Q" }, + { 0x2233, 0x1661, 0x10DE, "NVIDIA RTXA5500-4Q" }, + { 0x2233, 0x1662, 0x10DE, "NVIDIA RTXA5500-6Q" }, + { 0x2233, 0x1663, 0x10DE, "NVIDIA RTXA5500-8Q" }, + { 0x2233, 0x1664, 0x10DE, "NVIDIA RTXA5500-12Q" }, + { 0x2233, 0x1665, 0x10DE, "NVIDIA RTXA5500-24Q" }, + { 0x2233, 0x166e, 0x10DE, "NVIDIA RTXA5500-6" }, + { 0x2233, 0x166f, 0x10DE, "NVIDIA RTXA5500-8" }, + { 0x2233, 0x1670, 0x10DE, "NVIDIA RTXA5500-12" }, + { 0x2233, 0x1671, 0x10DE, "NVIDIA RTXA5500-24" }, + { 0x2233, 0x1672, 0x10DE, "NVIDIA RTXA5500-4C" }, + { 0x2233, 0x1673, 0x10DE, "NVIDIA RTXA5500-6C" }, + { 0x2233, 0x1674, 0x10DE, "NVIDIA RTXA5500-8C" }, + { 0x2233, 0x1675, 0x10DE, "NVIDIA RTXA5500-12C" }, + { 0x2233, 0x1676, 0x10DE, "NVIDIA RTXA5500-24C" }, + { 0x2235, 0x14d5, 0x10DE, "NVIDIA A40-1B" }, + { 0x2235, 0x14d6, 0x10DE, "NVIDIA A40-2B" }, + { 0x2235, 0x14d7, 0x10DE, "NVIDIA A40-1Q" }, + { 0x2235, 0x14d8, 0x10DE, "NVIDIA A40-2Q" }, + { 0x2235, 0x14d9, 0x10DE, "NVIDIA A40-3Q" }, + { 0x2235, 0x14da, 0x10DE, "NVIDIA A40-4Q" }, + { 0x2235, 0x14db, 0x10DE, "NVIDIA A40-6Q" }, + { 0x2235, 0x14dc, 0x10DE, "NVIDIA A40-8Q" }, + { 0x2235, 0x14dd, 0x10DE, "NVIDIA A40-12Q" }, + { 0x2235, 0x14de, 0x10DE, "NVIDIA A40-16Q" }, + { 0x2235, 0x14df, 0x10DE, "NVIDIA A40-24Q" }, + { 0x2235, 0x14e0, 0x10DE, "NVIDIA A40-48Q" }, + { 0x2235, 0x14eb, 0x10DE, "NVIDIA A40-12" }, + { 0x2235, 0x14ec, 0x10DE, "NVIDIA A40-16" }, + { 0x2235, 0x14ed, 0x10DE, "NVIDIA A40-24" }, + { 0x2235, 0x14ee, 0x10DE, "NVIDIA A40-48" }, + { 0x2235, 0x14f3, 0x10DE, "NVIDIA A40-4C" }, + { 0x2235, 0x14f4, 0x10DE, "NVIDIA A40-6C" }, + { 0x2235, 0x14f5, 0x10DE, "NVIDIA A40-8C" }, + { 0x2235, 0x14f6, 0x10DE, "NVIDIA A40-12C" }, + { 0x2235, 0x14f7, 0x10DE, "NVIDIA A40-16C" }, + { 0x2235, 0x14f8, 0x10DE, "NVIDIA A40-24C" }, + { 0x2235, 0x14f9, 0x10DE, "NVIDIA A40-48C" }, + { 0x2235, 0x1684, 0x10DE, "NVIDIA A40-2" }, + { 0x2235, 0x1685, 0x10DE, "NVIDIA A40-3" }, + { 0x2235, 0x1686, 0x10DE, "NVIDIA A40-4" }, + { 0x2235, 0x1687, 0x10DE, "NVIDIA A40-6" }, + { 0x2235, 0x1688, 0x10DE, "NVIDIA A40-8" }, + { 0x2236, 0x14b6, 0x10DE, "NVIDIA A10-1B" }, + { 0x2236, 0x14b7, 0x10DE, "NVIDIA A10-2B" }, + { 0x2236, 0x14b8, 0x10DE, "NVIDIA A10-1Q" }, + { 0x2236, 0x14b9, 0x10DE, "NVIDIA A10-2Q" }, + { 0x2236, 0x14ba, 0x10DE, "NVIDIA A10-3Q" }, + { 0x2236, 0x14bb, 0x10DE, "NVIDIA A10-4Q" }, + { 0x2236, 0x14bc, 0x10DE, "NVIDIA A10-6Q" }, + { 0x2236, 0x14bd, 0x10DE, "NVIDIA A10-8Q" }, + { 0x2236, 0x14be, 0x10DE, "NVIDIA A10-12Q" }, + { 0x2236, 0x14bf, 0x10DE, "NVIDIA A10-24Q" }, + { 0x2236, 0x14c8, 0x10DE, "NVIDIA A10-6" }, + { 0x2236, 0x14c9, 0x10DE, "NVIDIA A10-8" }, + { 0x2236, 0x14ca, 0x10DE, "NVIDIA A10-12" }, + { 0x2236, 0x14cb, 0x10DE, "NVIDIA A10-24" }, + { 0x2236, 0x14d0, 0x10DE, "NVIDIA A10-4C" }, + { 0x2236, 0x14d1, 0x10DE, "NVIDIA A10-6C" }, + { 0x2236, 0x14d2, 0x10DE, "NVIDIA A10-8C" }, + { 0x2236, 0x14d3, 0x10DE, "NVIDIA A10-12C" }, + { 0x2236, 0x14d4, 0x10DE, "NVIDIA A10-24C" }, + { 0x2236, 0x167e, 0x10DE, "NVIDIA A10-2" }, + { 0x2236, 0x167f, 0x10DE, "NVIDIA A10-3" }, + { 0x2236, 0x1680, 0x10DE, "NVIDIA A10-4" }, + { 0x2237, 0x155b, 0x10DE, "NVIDIA A10G-2" }, + { 0x2237, 0x155c, 0x10DE, "NVIDIA A10G-3" }, + { 0x2237, 0x155d, 0x10DE, "NVIDIA A10G-4" }, + { 0x2237, 0x155e, 0x10DE, "NVIDIA A10G-6" }, + { 0x2237, 0x155f, 0x10DE, "NVIDIA A10G-8" }, + { 0x2237, 0x1560, 0x10DE, "NVIDIA A10G-12" }, + { 0x2237, 0x1561, 0x10DE, "NVIDIA A10G-24" }, + { 0x2237, 0x162a, 0x10DE, "NVIDIA A10G-1B" }, + { 0x2237, 0x162b, 0x10DE, "NVIDIA A10G-2B" }, + { 0x2237, 0x162c, 0x10DE, "NVIDIA A10G-1Q" }, + { 0x2237, 0x162d, 0x10DE, "NVIDIA A10G-2Q" }, + { 0x2237, 0x162e, 0x10DE, "NVIDIA A10G-3Q" }, + { 0x2237, 0x162f, 0x10DE, "NVIDIA A10G-4Q" }, + { 0x2237, 0x1630, 0x10DE, "NVIDIA A10G-6Q" }, + { 0x2237, 0x1631, 0x10DE, "NVIDIA A10G-8Q" }, + { 0x2237, 0x1632, 0x10DE, "NVIDIA A10G-12Q" }, + { 0x2237, 0x1633, 0x10DE, "NVIDIA A10G-24Q" }, + { 0x2238, 0x16a3, 0x10DE, "NVIDIA A10M-1B" }, + { 0x2238, 0x16a4, 0x10DE, "NVIDIA A10M-2B" }, + { 0x2238, 0x16a5, 0x10DE, "NVIDIA A10M-1Q" }, + { 0x2238, 0x16a6, 0x10DE, "NVIDIA A10M-2Q" }, + { 0x2238, 0x16a7, 0x10DE, "NVIDIA A10M-4Q" }, + { 0x2238, 0x16a8, 0x10DE, "NVIDIA A10M-5Q" }, + { 0x2238, 0x16a9, 0x10DE, "NVIDIA A10M-10Q" }, + { 0x2238, 0x16aa, 0x10DE, "NVIDIA A10M-20Q" }, + { 0x2238, 0x16b1, 0x10DE, "NVIDIA A10M-2" }, + { 0x2238, 0x16b2, 0x10DE, "NVIDIA A10M-4" }, + { 0x2238, 0x16b3, 0x10DE, "NVIDIA A10M-5" }, + { 0x2238, 0x16b4, 0x10DE, "NVIDIA A10M-10" }, + { 0x2238, 0x16b5, 0x10DE, "NVIDIA A10M-20" }, + { 0x2238, 0x16b6, 0x10DE, "NVIDIA A10M-4C" }, + { 0x2238, 0x16b7, 0x10DE, "NVIDIA A10M-5C" }, + { 0x2238, 0x16b8, 0x10DE, "NVIDIA A10M-10C" }, + { 0x2238, 0x16b9, 0x10DE, "NVIDIA A10M-20C" }, + { 0x2331, 0x16d3, 0x10DE, "GRID H100-1-10C" }, + { 0x2331, 0x16d4, 0x10DE, "GRID H100-2-20C" }, + { 0x2331, 0x16d5, 0x10DE, "GRID H100-3-40C" }, + { 0x2331, 0x16d6, 0x10DE, "GRID H100-4-40C" }, + { 0x2331, 0x16d7, 0x10DE, "GRID H100-7-80C" }, + { 0x2331, 0x16d8, 0x10DE, "GRID H100-4C" }, + { 0x2331, 0x16d9, 0x10DE, "GRID H100-8C" }, + { 0x2331, 0x16da, 0x10DE, "GRID H100-10C" }, + { 0x2331, 0x16db, 0x10DE, "GRID H100-16C" }, + { 0x2331, 0x16dc, 0x10DE, "GRID H100-20C" }, + { 0x2331, 0x16dd, 0x10DE, "GRID H100-40C" }, + { 0x2331, 0x16de, 0x10DE, "GRID H100-80C" }, + { 0x25B6, 0x159d, 0x10DE, "NVIDIA A16-1B" }, + { 0x25B6, 0x159e, 0x10DE, "NVIDIA A16-2B" }, + { 0x25B6, 0x159f, 0x10DE, "NVIDIA A16-1Q" }, + { 0x25B6, 0x1600, 0x10DE, "NVIDIA A16-2Q" }, + { 0x25B6, 0x1601, 0x10DE, "NVIDIA A16-4Q" }, + { 0x25B6, 0x1602, 0x10DE, "NVIDIA A16-8Q" }, + { 0x25B6, 0x1603, 0x10DE, "NVIDIA A16-16Q" }, + { 0x25B6, 0x1609, 0x10DE, "NVIDIA A16-4C" }, + { 0x25B6, 0x160a, 0x10DE, "NVIDIA A16-8C" }, + { 0x25B6, 0x160b, 0x10DE, "NVIDIA A16-16C" }, + { 0x25B6, 0x1646, 0x10DE, "NVIDIA A2-1B" }, + { 0x25B6, 0x1647, 0x10DE, "NVIDIA A2-2B" }, + { 0x25B6, 0x1648, 0x10DE, "NVIDIA A2-1Q" }, + { 0x25B6, 0x1649, 0x10DE, "NVIDIA A2-2Q" }, + { 0x25B6, 0x164a, 0x10DE, "NVIDIA A2-4Q" }, + { 0x25B6, 0x164b, 0x10DE, "NVIDIA A2-8Q" }, + { 0x25B6, 0x164c, 0x10DE, "NVIDIA A2-16Q" }, + { 0x25B6, 0x1652, 0x10DE, "NVIDIA A2-4" }, + { 0x25B6, 0x1653, 0x10DE, "NVIDIA A2-8" }, + { 0x25B6, 0x1654, 0x10DE, "NVIDIA A2-16" }, + { 0x25B6, 0x1655, 0x10DE, "NVIDIA A2-4C" }, + { 0x25B6, 0x1656, 0x10DE, "NVIDIA A2-8C" }, + { 0x25B6, 0x1657, 0x10DE, "NVIDIA A2-16C" }, +}; + +#endif // G_NV_NAME_RELEASED_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nvh_state.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nvh_state.h new file mode 100644 index 0000000..219042a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nvh_state.h @@ -0,0 +1,28 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// NVOC Header State : This file is used for different code path for disabled NVH +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_nvh_state.h +// + +#ifndef _G_NVH_STATE_H_ +#define _G_NVH_STATE_H_ + +// +// __nvoc_nvh_state_guard +// This macro define is used to check whether this header is included before +// NVOC headers. The usage: +// #ifndef __nvoc_nvh_state_guard +// #error "NVH state guard header is not included prior to this NVOC header" +// #endif +// +#define __nvoc_nvh_state_guard + +// +// List of disabled NVOC headers +// + + + +#endif // _G_NVH_STATE_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.c new file mode 100644 index 0000000..7e87023 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.c @@ -0,0 +1,130 @@ +#define NVOC_OBJECT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_object_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x497031 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_Object(Object*); +void __nvoc_init_funcTable_Object(Object*); +NV_STATUS __nvoc_ctor_Object(Object*); +void __nvoc_init_dataField_Object(Object*); +void __nvoc_dtor_Object(Object*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Object; + +static const struct NVOC_RTTI __nvoc_rtti_Object_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Object, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Object = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_Object_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Object = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Object), + /*classId=*/ classId(Object), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Object", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Object, + /*pCastInfo=*/ &__nvoc_castinfo_Object, + /*pExportInfo=*/ &__nvoc_export_info_Object +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Object = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Object(Object *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_Object(pThis); + goto __nvoc_ctor_Object_exit; // Success + +__nvoc_ctor_Object_exit: + + return status; +} + +static void __nvoc_init_funcTable_Object_1(Object *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_Object(Object *pThis) { + __nvoc_init_funcTable_Object_1(pThis); +} + +void __nvoc_init_Object(Object *pThis) { + pThis->__nvoc_pbase_Object = pThis; + __nvoc_init_funcTable_Object(pThis); +} + +NV_STATUS __nvoc_objCreate_Object(Object **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + Object *pThis; + + pThis = portMemAllocNonPaged(sizeof(Object)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Object)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Object); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, pThis); + } + else + { + pThis->pParent = NULL; + } + + __nvoc_init_Object(pThis); + status = __nvoc_ctor_Object(pThis); + if (status != NV_OK) goto __nvoc_objCreate_Object_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Object_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Object(Object **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_Object(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.h new file mode 100644 index 0000000..d9df8bb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.h @@ -0,0 +1,187 @@ +#ifndef _G_OBJECT_NVOC_H_ +#define _G_OBJECT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#include "g_object_nvoc.h" + +#ifndef _NVOC_OBJECT_H_ +#define _NVOC_OBJECT_H_ + +#include "nvtypes.h" +#include "nvstatus.h" + +#include "nvoc/prelude.h" + +struct Object; + +#ifndef __NVOC_CLASS_Object_TYPEDEF__ +#define __NVOC_CLASS_Object_TYPEDEF__ +typedef struct Object Object; +#endif /* __NVOC_CLASS_Object_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Object +#define __nvoc_class_id_Object 0x497031 +#endif /* __nvoc_class_id_Object */ + + +struct NVOC_CLASS_INFO; + +/*! + * Tracks the head of an object's child list, and the next object in its + * parent's child list. + */ +struct NVOC_CHILD_TREE +{ + struct Object *pChild; + struct Object *pSibling; +}; + +//! The base class of all instantiable NVOC objects. +#ifdef NVOC_OBJECT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Object { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object *__nvoc_pbase_Object; + struct Object *pParent; + struct NVOC_CHILD_TREE childTree; + NvU32 ipVersion; +}; + +#ifndef __NVOC_CLASS_Object_TYPEDEF__ +#define __NVOC_CLASS_Object_TYPEDEF__ +typedef struct Object Object; +#endif /* __NVOC_CLASS_Object_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Object +#define __nvoc_class_id_Object 0x497031 +#endif /* __nvoc_class_id_Object */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +#define __staticCast_Object(pThis) \ + ((pThis)->__nvoc_pbase_Object) + +#ifdef __nvoc_object_h_disabled +#define __dynamicCast_Object(pThis) ((Object*)NULL) +#else //__nvoc_object_h_disabled +#define __dynamicCast_Object(pThis) \ + ((Object*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Object))) +#endif //__nvoc_object_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Object(Object**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Object(Object**, Dynamic*, NvU32); +#define __objCreate_Object(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_Object((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +void objAddChild_IMPL(struct Object *pObj, struct Object *pChild); +#ifdef __nvoc_object_h_disabled +static inline void objAddChild(struct Object *pObj, struct Object *pChild) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); +} +#else //__nvoc_object_h_disabled +#define objAddChild(pObj, pChild) objAddChild_IMPL(pObj, pChild) +#endif //__nvoc_object_h_disabled + +void objRemoveChild_IMPL(struct Object *pObj, struct Object *pChild); +#ifdef __nvoc_object_h_disabled +static inline void objRemoveChild(struct Object *pObj, struct Object *pChild) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); +} +#else //__nvoc_object_h_disabled +#define objRemoveChild(pObj, pChild) objRemoveChild_IMPL(pObj, pChild) +#endif //__nvoc_object_h_disabled + +struct Object *objGetChild_IMPL(struct Object *pObj); +#ifdef __nvoc_object_h_disabled +static inline struct Object *objGetChild(struct Object *pObj) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); + return NULL; +} +#else //__nvoc_object_h_disabled +#define objGetChild(pObj) objGetChild_IMPL(pObj) +#endif //__nvoc_object_h_disabled + +struct Object *objGetSibling_IMPL(struct Object *pObj); +#ifdef __nvoc_object_h_disabled +static inline struct Object *objGetSibling(struct Object *pObj) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); + return NULL; +} +#else //__nvoc_object_h_disabled +#define objGetSibling(pObj) objGetSibling_IMPL(pObj) +#endif //__nvoc_object_h_disabled + +struct Object *objGetDirectParent_IMPL(struct Object *pObj); +#ifdef __nvoc_object_h_disabled +static inline struct Object *objGetDirectParent(struct Object *pObj) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); + return NULL; +} +#else //__nvoc_object_h_disabled +#define objGetDirectParent(pObj) objGetDirectParent_IMPL(pObj) +#endif //__nvoc_object_h_disabled + +#undef PRIVATE_FIELD + + +// +// IP versioning definitions are temporary until NVOC halspec support is +// finished. +// +// IP_VERSION format as defined by the hardware engines. +// A _MAJOR value of 0 means the object has no version number. +// + +#define NV_ODB_IP_VER_DEV 7:0 /* R-IVF */ +#define NV_ODB_IP_VER_ECO 15:8 /* R-IVF */ +#define NV_ODB_IP_VER_MINOR 23:16 /* R-IVF */ +#define NV_ODB_IP_VER_MAJOR 31:24 /* R-IVF */ + +#define IPVersion(pObj) staticCast((pObj), Object)->ipVersion +#define IsIPVersionValid(pObj) (DRF_VAL(_ODB, _IP_VER, _MAJOR, IPVersion(pObj)) != 0) +#define IsIPVersionOrLater(pObj, v0) (IPVersion(pObj) >= (v0)) +// v0 .. v1 inclusive +#define IsIPVersionInRange(pObj, v0, v1) ((IPVersion(pObj) >= (v0)) && (IPVersion(pObj) <= (v1))) + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_OBJECT_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.c new file mode 100644 index 0000000..f3cecb9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.c @@ -0,0 +1,357 @@ +#define NVOC_OBJTMR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_objtmr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x9ddede = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTMR; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_OBJTMR(OBJTMR*, RmHalspecOwner* ); +void __nvoc_init_funcTable_OBJTMR(OBJTMR*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_OBJTMR(OBJTMR*, RmHalspecOwner* ); +void __nvoc_init_dataField_OBJTMR(OBJTMR*, RmHalspecOwner* ); +void __nvoc_dtor_OBJTMR(OBJTMR*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJTMR; + +static const struct NVOC_RTTI __nvoc_rtti_OBJTMR_OBJTMR = { + /*pClassDef=*/ &__nvoc_class_def_OBJTMR, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJTMR, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJTMR_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJTMR_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJTMR = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_OBJTMR_OBJTMR, + &__nvoc_rtti_OBJTMR_OBJENGSTATE, + &__nvoc_rtti_OBJTMR_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTMR = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJTMR), + /*classId=*/ classId(OBJTMR), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJTMR", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJTMR, + /*pCastInfo=*/ &__nvoc_castinfo_OBJTMR, + /*pExportInfo=*/ &__nvoc_export_info_OBJTMR +}; + +static NV_STATUS __nvoc_thunk_OBJTMR_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr, ENGDESCRIPTOR arg0) { + return tmrConstructEngine(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJTMR_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr) { + return tmrStateInitLocked(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJTMR_engstateStateLoad(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr, NvU32 arg0) { + return tmrStateLoad(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJTMR_engstateStateUnload(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr, NvU32 arg0) { + return tmrStateUnload(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJTMR_engstateStateDestroy(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr) { + tmrStateDestroy(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrReconcileTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePreLoad(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePostUnload(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePreUnload(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStateInitUnlocked(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_tmrInitMissing(POBJGPU pGpu, struct OBJTMR *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePreInitLocked(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePreInitUnlocked(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrGetTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrCompareTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_tmrFreeTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePostLoad(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrAllocTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrSetTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_tmrIsPresent(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJTMR = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_OBJTMR(OBJTMR *pThis) { + __nvoc_tmrDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // NVOC Property Hal field -- PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS + if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */ + { + pThis->setProperty(pThis, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE + if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */ + { + pThis->setProperty(pThis, PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS + if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */ + { + pThis->setProperty(pThis, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS, ((NvBool)(0 != 0))); + } + pThis->setProperty(pThis, PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS, (0)); + pThis->setProperty(pThis, PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS, (0)); + + // NVOC Property Hal field -- PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL, ((NvBool)(0 != 0))); + } +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_OBJTMR_fail_OBJENGSTATE; + __nvoc_init_dataField_OBJTMR(pThis, pRmhalspecowner); + goto __nvoc_ctor_OBJTMR_exit; // Success + +__nvoc_ctor_OBJTMR_fail_OBJENGSTATE: +__nvoc_ctor_OBJTMR_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJTMR_1(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__tmrConstructEngine__ = &tmrConstructEngine_IMPL; + + pThis->__tmrStateInitLocked__ = &tmrStateInitLocked_IMPL; + + pThis->__tmrStateLoad__ = &tmrStateLoad_IMPL; + + pThis->__tmrStateUnload__ = &tmrStateUnload_IMPL; + + pThis->__tmrStateDestroy__ = &tmrStateDestroy_IMPL; + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_OBJTMR_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_OBJTMR_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_OBJTMR_engstateStateLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_OBJTMR_engstateStateUnload; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_OBJTMR_engstateStateDestroy; + + pThis->__tmrReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrReconcileTunableState; + + pThis->__tmrStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePreLoad; + + pThis->__tmrStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePostUnload; + + pThis->__tmrStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePreUnload; + + pThis->__tmrStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_tmrStateInitUnlocked; + + pThis->__tmrInitMissing__ = &__nvoc_thunk_OBJENGSTATE_tmrInitMissing; + + pThis->__tmrStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePreInitLocked; + + pThis->__tmrStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePreInitUnlocked; + + pThis->__tmrGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrGetTunableState; + + pThis->__tmrCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrCompareTunableState; + + pThis->__tmrFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrFreeTunableState; + + pThis->__tmrStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePostLoad; + + pThis->__tmrAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrAllocTunableState; + + pThis->__tmrSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrSetTunableState; + + pThis->__tmrIsPresent__ = &__nvoc_thunk_OBJENGSTATE_tmrIsPresent; +} + +void __nvoc_init_funcTable_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_OBJTMR_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_OBJTMR = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_OBJTMR(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_OBJTMR(OBJTMR **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJTMR *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(OBJTMR)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJTMR)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJTMR); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_OBJTMR(pThis, pRmhalspecowner); + status = __nvoc_ctor_OBJTMR(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_OBJTMR_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJTMR_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJTMR(OBJTMR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJTMR(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.h new file mode 100644 index 0000000..634ddb6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.h @@ -0,0 +1,1088 @@ +#ifndef _G_OBJTMR_NVOC_H_ +#define _G_OBJTMR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_objtmr_nvoc.h" + +#ifndef _OBJTMR_H_ +#define _OBJTMR_H_ + +/*! + * @file + * @brief Defines and structures used for the Tmr Engine Object. + */ + +/* ------------------------ Includes --------------------------------------- */ +#include "core/core.h" +#include "core/info_block.h" +#include "gpu/eng_state.h" +#include "gpu/gpu.h" +#include "tmr.h" +#include "lib/ref_count.h" +#include "os/os.h" +#include "nvoc/utility.h" + +/* ------------------------ Macros ----------------------------------------- */ +// +// Extent of the timer callback array +// +#define TMR_NUM_CALLBACKS_RM 96 +#define TMR_NUM_CALLBACKS_OS 36 + +// Callback scheduled without any explicit flags set. +#define TMR_FLAGS_NONE 0x00000000 +// Automatically reschedule the callback, so that it repeats. +// Otherwise, callback is scheduled for one-shot execution. +#define TMR_FLAG_RECUR NVBIT(0) +// Indicate that the implementation of the callback function will/can release +// a GPU semaphore. This allows fifoIdleChannels to query this information, +// and hence not bail out early if channels are blocked on semaphores that +// will in fact be released. + // !!NOTE: This is OBSOLETE, it should be moved directly to FIFO, where it's needed +#define TMR_FLAG_RELEASE_SEMAPHORE NVBIT(1) +#define TMR_FLAG_OS_TIMER_QUEUED NVBIT(2) + +#define TMR_GET_GPU(p) ENG_GET_GPU(p) + +/* ------------------------ Function Redefinitions ------------------------- */ +#define tmrEventScheduleRelSec(pTmr, pEvent, RelTimeSec) tmrEventScheduleRel(pTmr, pEvent, (NvU64)(RelTimeSec) * 1000000000 ) + +#define tmrGetInfoBlock(pTmr, pListHead, dataId) getInfoPtr(pListHead, dataId) +#define tmrAddInfoBlock(pTmr, ppListHead, dataId, size) addInfoPtr(ppListHead, dataId, size) +#define tmrDeleteInfoBlock(pTmr, ppListHead, dataId) deleteInfoPtr(ppListHead, dataId) +#define tmrTestInfoBlock(pTmr, pListHead, dataId) testInfoPtr(pListHead, dataId) + +/* ------------------------ Datatypes -------------------------------------- */ +TYPEDEF_BITVECTOR(MC_ENGINE_BITVECTOR); + +// +// Forward references for timer related structures +// +typedef struct DAYMSECTIME *PDAYMSECTIME; +typedef struct DAYMSECTIME DAYMSECTIME; + +// +// System time structure +// +struct DAYMSECTIME +{ + NvU32 days; + NvU32 msecs; + NvU32 valid; +}; + +/*! + * Callback wrapper memory type, used with interfacing all scheduling functions + * Reveals only partial representation of the event information. + * User Use only, internal code will not change them. + */ +struct TMR_EVENT +{ + TIMEPROC pTimeProc; //__nvoc_pbase_OBJTMR) + +#ifdef __nvoc_objtmr_h_disabled +#define __dynamicCast_OBJTMR(pThis) ((OBJTMR*)NULL) +#else //__nvoc_objtmr_h_disabled +#define __dynamicCast_OBJTMR(pThis) \ + ((OBJTMR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJTMR))) +#endif //__nvoc_objtmr_h_disabled + +#define PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS_BASE_CAST +#define PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS_BASE_NAME PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS +#define PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS_BASE_CAST +#define PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS_BASE_NAME PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS +#define PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS_BASE_CAST +#define PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS_BASE_NAME PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS +#define PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL_BASE_CAST +#define PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL_BASE_NAME PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL +#define PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS_BASE_CAST +#define PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS_BASE_NAME PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS +#define PDB_PROP_TMR_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_TMR_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING +#define PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE_BASE_CAST +#define PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE_BASE_NAME PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE + +NV_STATUS __nvoc_objCreateDynamic_OBJTMR(OBJTMR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJTMR(OBJTMR**, Dynamic*, NvU32); +#define __objCreate_OBJTMR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJTMR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define tmrConstructEngine(pGpu, pTmr, arg0) tmrConstructEngine_DISPATCH(pGpu, pTmr, arg0) +#define tmrStateInitLocked(pGpu, pTmr) tmrStateInitLocked_DISPATCH(pGpu, pTmr) +#define tmrStateLoad(pGpu, pTmr, arg0) tmrStateLoad_DISPATCH(pGpu, pTmr, arg0) +#define tmrStateUnload(pGpu, pTmr, arg0) tmrStateUnload_DISPATCH(pGpu, pTmr, arg0) +#define tmrStateDestroy(pGpu, pTmr) tmrStateDestroy_DISPATCH(pGpu, pTmr) +#define tmrReconcileTunableState(pGpu, pEngstate, pTunableState) tmrReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define tmrStatePreLoad(pGpu, pEngstate, arg0) tmrStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define tmrStatePostUnload(pGpu, pEngstate, arg0) tmrStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define tmrStatePreUnload(pGpu, pEngstate, arg0) tmrStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define tmrStateInitUnlocked(pGpu, pEngstate) tmrStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define tmrInitMissing(pGpu, pEngstate) tmrInitMissing_DISPATCH(pGpu, pEngstate) +#define tmrStatePreInitLocked(pGpu, pEngstate) tmrStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define tmrStatePreInitUnlocked(pGpu, pEngstate) tmrStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define tmrGetTunableState(pGpu, pEngstate, pTunableState) tmrGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define tmrCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) tmrCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define tmrFreeTunableState(pGpu, pEngstate, pTunableState) tmrFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define tmrStatePostLoad(pGpu, pEngstate, arg0) tmrStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define tmrAllocTunableState(pGpu, pEngstate, ppTunableState) tmrAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define tmrSetTunableState(pGpu, pEngstate, pTunableState) tmrSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define tmrIsPresent(pGpu, pEngstate) tmrIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS tmrGetCurrentTime_IMPL(struct OBJTMR *pTmr, NvU64 *pTime); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetCurrentTime(struct OBJTMR *pTmr, NvU64 *pTime) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCurrentTime(pTmr, pTime) tmrGetCurrentTime_IMPL(pTmr, pTime) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetCurrentTime_HAL(pTmr, pTime) tmrGetCurrentTime(pTmr, pTime) + +NV_STATUS tmrGetCurrentTimeEx_IMPL(struct OBJTMR *pTmr, NvU64 *pTime, THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetCurrentTimeEx(struct OBJTMR *pTmr, NvU64 *pTime, THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCurrentTimeEx(pTmr, pTime, arg0) tmrGetCurrentTimeEx_IMPL(pTmr, pTime, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetCurrentTimeEx_HAL(pTmr, pTime, arg0) tmrGetCurrentTimeEx(pTmr, pTime, arg0) + +NV_STATUS tmrDelay_OSTIMER(struct OBJTMR *pTmr, NvU32 arg0); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrDelay(struct OBJTMR *pTmr, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrDelay(pTmr, arg0) tmrDelay_OSTIMER(pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrDelay_HAL(pTmr, arg0) tmrDelay(pTmr, arg0) + +static inline NV_STATUS tmrSetCurrentTime_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCurrentTime(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCurrentTime(pGpu, pTmr) tmrSetCurrentTime_46f6a7(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCurrentTime_HAL(pGpu, pTmr) tmrSetCurrentTime(pGpu, pTmr) + +static inline NV_STATUS tmrSetAlarmIntrDisable_56cd7a(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_OK; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetAlarmIntrDisable(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetAlarmIntrDisable(pGpu, pTmr) tmrSetAlarmIntrDisable_56cd7a(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetAlarmIntrDisable_HAL(pGpu, pTmr) tmrSetAlarmIntrDisable(pGpu, pTmr) + +static inline NV_STATUS tmrSetAlarmIntrEnable_56cd7a(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_OK; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetAlarmIntrEnable(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetAlarmIntrEnable(pGpu, pTmr) tmrSetAlarmIntrEnable_56cd7a(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetAlarmIntrEnable_HAL(pGpu, pTmr) tmrSetAlarmIntrEnable(pGpu, pTmr) + +static inline NV_STATUS tmrSetAlarmIntrReset_56cd7a(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + return NV_OK; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetAlarmIntrReset(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetAlarmIntrReset(pGpu, pTmr, arg0) tmrSetAlarmIntrReset_56cd7a(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetAlarmIntrReset_HAL(pGpu, pTmr, arg0) tmrSetAlarmIntrReset(pGpu, pTmr, arg0) + +static inline NV_STATUS tmrGetIntrStatus_cb5ce8(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *pStatus, THREAD_STATE_NODE *arg0) { + *pStatus = 0; + return NV_OK; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetIntrStatus(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *pStatus, THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetIntrStatus(pGpu, pTmr, pStatus, arg0) tmrGetIntrStatus_cb5ce8(pGpu, pTmr, pStatus, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetIntrStatus_HAL(pGpu, pTmr, pStatus, arg0) tmrGetIntrStatus(pGpu, pTmr, pStatus, arg0) + +static inline NvU32 tmrGetTimeLo_cf0499(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return ((NvU32)(((NvU64)(osGetTimestamp())) & 4294967295U)); +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU32 tmrGetTimeLo(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetTimeLo(pGpu, pTmr) tmrGetTimeLo_cf0499(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetTimeLo_HAL(pGpu, pTmr) tmrGetTimeLo(pGpu, pTmr) + +static inline NvU64 tmrGetTime_fa6bbe(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return osGetTimestamp(); +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU64 tmrGetTime(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetTime(pGpu, pTmr) tmrGetTime_fa6bbe(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetTime_HAL(pGpu, pTmr) tmrGetTime(pGpu, pTmr) + +NvU64 tmrGetTimeEx_OSTIMER(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU64 tmrGetTimeEx(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetTimeEx(pGpu, pTmr, arg0) tmrGetTimeEx_OSTIMER(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetTimeEx_HAL(pGpu, pTmr, arg0) tmrGetTimeEx(pGpu, pTmr, arg0) + +NvU32 tmrReadTimeLoReg_OSTIMER(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU32 tmrReadTimeLoReg(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrReadTimeLoReg(pGpu, pTmr, arg0) tmrReadTimeLoReg_OSTIMER(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrReadTimeLoReg_HAL(pGpu, pTmr, arg0) tmrReadTimeLoReg(pGpu, pTmr, arg0) + +NvU32 tmrReadTimeHiReg_OSTIMER(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU32 tmrReadTimeHiReg(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrReadTimeHiReg(pGpu, pTmr, arg0) tmrReadTimeHiReg_OSTIMER(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrReadTimeHiReg_HAL(pGpu, pTmr, arg0) tmrReadTimeHiReg(pGpu, pTmr, arg0) + +static inline NV_STATUS tmrSetAlarm_56cd7a(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 alarm, THREAD_STATE_NODE *pThreadState) { + return NV_OK; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetAlarm(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 alarm, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetAlarm(pGpu, pTmr, alarm, pThreadState) tmrSetAlarm_56cd7a(pGpu, pTmr, alarm, pThreadState) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetAlarm_HAL(pGpu, pTmr, alarm, pThreadState) tmrSetAlarm(pGpu, pTmr, alarm, pThreadState) + +static inline NvBool tmrGetAlarmPending_491d52(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrGetAlarmPending(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetAlarmPending(pGpu, pTmr, arg0) tmrGetAlarmPending_491d52(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetAlarmPending_HAL(pGpu, pTmr, arg0) tmrGetAlarmPending(pGpu, pTmr, arg0) + +static inline NV_STATUS tmrSetCountdownIntrDisable_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCountdownIntrDisable(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdownIntrDisable(pGpu, pTmr) tmrSetCountdownIntrDisable_46f6a7(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCountdownIntrDisable_HAL(pGpu, pTmr) tmrSetCountdownIntrDisable(pGpu, pTmr) + +static inline NV_STATUS tmrSetCountdownIntrEnable_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCountdownIntrEnable(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdownIntrEnable(pGpu, pTmr) tmrSetCountdownIntrEnable_46f6a7(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCountdownIntrEnable_HAL(pGpu, pTmr) tmrSetCountdownIntrEnable(pGpu, pTmr) + +static inline NV_STATUS tmrSetCountdownIntrReset_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCountdownIntrReset(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdownIntrReset(pGpu, pTmr, arg0) tmrSetCountdownIntrReset_46f6a7(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCountdownIntrReset_HAL(pGpu, pTmr, arg0) tmrSetCountdownIntrReset(pGpu, pTmr, arg0) + +static inline NvBool tmrGetCountdownPending_491d52(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrGetCountdownPending(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCountdownPending(pGpu, pTmr, arg0) tmrGetCountdownPending_491d52(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetCountdownPending_HAL(pGpu, pTmr, arg0) tmrGetCountdownPending(pGpu, pTmr, arg0) + +static inline NV_STATUS tmrSetCountdown_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg0, NvU32 arg1, THREAD_STATE_NODE *arg2) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCountdown(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg0, NvU32 arg1, THREAD_STATE_NODE *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdown(pGpu, pTmr, arg0, arg1, arg2) tmrSetCountdown_46f6a7(pGpu, pTmr, arg0, arg1, arg2) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCountdown_HAL(pGpu, pTmr, arg0, arg1, arg2) tmrSetCountdown(pGpu, pTmr, arg0, arg1, arg2) + +static inline NV_STATUS tmrGetTimerBar0MapInfo_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *arg0, NvU32 *arg1) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetTimerBar0MapInfo(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetTimerBar0MapInfo(pGpu, pTmr, arg0, arg1) tmrGetTimerBar0MapInfo_46f6a7(pGpu, pTmr, arg0, arg1) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetTimerBar0MapInfo_HAL(pGpu, pTmr, arg0, arg1) tmrGetTimerBar0MapInfo(pGpu, pTmr, arg0, arg1) + +static inline NV_STATUS tmrGrTickFreqChange_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvBool arg0) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGrTickFreqChange(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGrTickFreqChange(pGpu, pTmr, arg0) tmrGrTickFreqChange_46f6a7(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGrTickFreqChange_HAL(pGpu, pTmr, arg0) tmrGrTickFreqChange(pGpu, pTmr, arg0) + +static inline NvU32 tmrGetUtilsClkScaleFactor_4a4dee(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return 0; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU32 tmrGetUtilsClkScaleFactor(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetUtilsClkScaleFactor(pGpu, pTmr) tmrGetUtilsClkScaleFactor_4a4dee(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetUtilsClkScaleFactor_HAL(pGpu, pTmr) tmrGetUtilsClkScaleFactor(pGpu, pTmr) + +NV_STATUS tmrGetGpuAndCpuTimestampPair_OSTIMER(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *arg0, NvU64 *arg1); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetGpuAndCpuTimestampPair(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *arg0, NvU64 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetGpuAndCpuTimestampPair(pGpu, pTmr, arg0, arg1) tmrGetGpuAndCpuTimestampPair_OSTIMER(pGpu, pTmr, arg0, arg1) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetGpuAndCpuTimestampPair_HAL(pGpu, pTmr, arg0, arg1) tmrGetGpuAndCpuTimestampPair(pGpu, pTmr, arg0, arg1) + +static inline NV_STATUS tmrGetGpuPtimerOffset_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *arg0, NvU32 *arg1) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetGpuPtimerOffset(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetGpuPtimerOffset(pGpu, pTmr, arg0, arg1) tmrGetGpuPtimerOffset_46f6a7(pGpu, pTmr, arg0, arg1) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetGpuPtimerOffset_HAL(pGpu, pTmr, arg0, arg1) tmrGetGpuPtimerOffset(pGpu, pTmr, arg0, arg1) + +static inline void tmrResetTimerRegistersForVF_b3696a(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 gfid) { + return; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrResetTimerRegistersForVF(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrResetTimerRegistersForVF(pGpu, pTmr, gfid) tmrResetTimerRegistersForVF_b3696a(pGpu, pTmr, gfid) +#endif //__nvoc_objtmr_h_disabled + +#define tmrResetTimerRegistersForVF_HAL(pGpu, pTmr, gfid) tmrResetTimerRegistersForVF(pGpu, pTmr, gfid) + +static inline NV_STATUS tmrEventCreateOSTimer_46f6a7(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventCreateOSTimer(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventCreateOSTimer(pTmr, pEvent) tmrEventCreateOSTimer_46f6a7(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventCreateOSTimer_HAL(pTmr, pEvent) tmrEventCreateOSTimer(pTmr, pEvent) + +static inline NV_STATUS tmrEventScheduleAbsOSTimer_46f6a7(struct OBJTMR *pTmr, PTMR_EVENT pEvent, NvU64 timeAbs) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventScheduleAbsOSTimer(struct OBJTMR *pTmr, PTMR_EVENT pEvent, NvU64 timeAbs) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventScheduleAbsOSTimer(pTmr, pEvent, timeAbs) tmrEventScheduleAbsOSTimer_46f6a7(pTmr, pEvent, timeAbs) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventScheduleAbsOSTimer_HAL(pTmr, pEvent, timeAbs) tmrEventScheduleAbsOSTimer(pTmr, pEvent, timeAbs) + +static inline NV_STATUS tmrEventServiceOSTimerCallback_46f6a7(struct OBJGPU *pGpu, struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventServiceOSTimerCallback(struct OBJGPU *pGpu, struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventServiceOSTimerCallback(pGpu, pTmr, pEvent) tmrEventServiceOSTimerCallback_46f6a7(pGpu, pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventServiceOSTimerCallback_HAL(pGpu, pTmr, pEvent) tmrEventServiceOSTimerCallback(pGpu, pTmr, pEvent) + +static inline NV_STATUS tmrEventCancelOSTimer_46f6a7(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventCancelOSTimer(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventCancelOSTimer(pTmr, pEvent) tmrEventCancelOSTimer_46f6a7(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventCancelOSTimer_HAL(pTmr, pEvent) tmrEventCancelOSTimer(pTmr, pEvent) + +static inline NV_STATUS tmrEventDestroyOSTimer_46f6a7(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventDestroyOSTimer(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventDestroyOSTimer(pTmr, pEvent) tmrEventDestroyOSTimer_46f6a7(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventDestroyOSTimer_HAL(pTmr, pEvent) tmrEventDestroyOSTimer(pTmr, pEvent) + +NV_STATUS tmrConstructEngine_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr, ENGDESCRIPTOR arg0); + +static inline NV_STATUS tmrConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pTmr, ENGDESCRIPTOR arg0) { + return pTmr->__tmrConstructEngine__(pGpu, pTmr, arg0); +} + +NV_STATUS tmrStateInitLocked_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr); + +static inline NV_STATUS tmrStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + return pTmr->__tmrStateInitLocked__(pGpu, pTmr); +} + +NV_STATUS tmrStateLoad_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg0); + +static inline NV_STATUS tmrStateLoad_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg0) { + return pTmr->__tmrStateLoad__(pGpu, pTmr, arg0); +} + +NV_STATUS tmrStateUnload_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg0); + +static inline NV_STATUS tmrStateUnload_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg0) { + return pTmr->__tmrStateUnload__(pGpu, pTmr, arg0); +} + +void tmrStateDestroy_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr); + +static inline void tmrStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + pTmr->__tmrStateDestroy__(pGpu, pTmr); +} + +static inline NV_STATUS tmrReconcileTunableState_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + return pEngstate->__tmrReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS tmrStatePreLoad_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return pEngstate->__tmrStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS tmrStatePostUnload_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return pEngstate->__tmrStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS tmrStatePreUnload_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return pEngstate->__tmrStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS tmrStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return pEngstate->__tmrStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void tmrInitMissing_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate) { + pEngstate->__tmrInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS tmrStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return pEngstate->__tmrStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS tmrStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return pEngstate->__tmrStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS tmrGetTunableState_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + return pEngstate->__tmrGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS tmrCompareTunableState_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__tmrCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void tmrFreeTunableState_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + pEngstate->__tmrFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS tmrStatePostLoad_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return pEngstate->__tmrStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS tmrAllocTunableState_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, void **ppTunableState) { + return pEngstate->__tmrAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS tmrSetTunableState_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + return pEngstate->__tmrSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool tmrIsPresent_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return pEngstate->__tmrIsPresent__(pGpu, pEngstate); +} + +static inline NvBool tmrServiceSwrlCallbacksPmcTree(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool tmrClearSwrlCallbacksSemaphore(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + return ((NvBool)(0 != 0)); +} + +static inline void tmrServiceSwrlCallbacks(struct OBJGPU *pGpu, struct OBJTMR *pTmr, THREAD_STATE_NODE *arg0) { + return; +} + +static inline NvBool tmrServiceSwrlWrapper(struct OBJGPU *pGpu, struct OBJTMR *pTmr, MC_ENGINE_BITVECTOR *arg0, THREAD_STATE_NODE *arg1) { + return ((NvBool)(0 != 0)); +} + +void tmrDestruct_IMPL(struct OBJTMR *pTmr); +#define __nvoc_tmrDestruct(pTmr) tmrDestruct_IMPL(pTmr) +NV_STATUS tmrEventCreate_IMPL(struct OBJTMR *pTmr, PTMR_EVENT *ppEvent, TIMEPROC callbackFn, void *pUserData, NvU32 flags); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventCreate(struct OBJTMR *pTmr, PTMR_EVENT *ppEvent, TIMEPROC callbackFn, void *pUserData, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventCreate(pTmr, ppEvent, callbackFn, pUserData, flags) tmrEventCreate_IMPL(pTmr, ppEvent, callbackFn, pUserData, flags) +#endif //__nvoc_objtmr_h_disabled + +void tmrEventCancel_IMPL(struct OBJTMR *pTmr, PTMR_EVENT pEvent); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrEventCancel(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventCancel(pTmr, pEvent) tmrEventCancel_IMPL(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +void tmrEventDestroy_IMPL(struct OBJTMR *pTmr, PTMR_EVENT pEvent); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrEventDestroy(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventDestroy(pTmr, pEvent) tmrEventDestroy_IMPL(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +void tmrInitCallbacks_IMPL(struct OBJTMR *pTmr); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrInitCallbacks(struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrInitCallbacks(pTmr) tmrInitCallbacks_IMPL(pTmr) +#endif //__nvoc_objtmr_h_disabled + +void tmrSetCountdownCallback_IMPL(struct OBJTMR *pTmr, TIMEPROC_COUNTDOWN arg0); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrSetCountdownCallback(struct OBJTMR *pTmr, TIMEPROC_COUNTDOWN arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdownCallback(pTmr, arg0) tmrSetCountdownCallback_IMPL(pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrCancelCallback_IMPL(struct OBJTMR *pTmr, void *pObject); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrCancelCallback(struct OBJTMR *pTmr, void *pObject) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrCancelCallback(pTmr, pObject) tmrCancelCallback_IMPL(pTmr, pObject) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrGetCurrentDiffTime_IMPL(struct OBJTMR *pTmr, NvU64 arg0, NvU64 *arg1); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetCurrentDiffTime(struct OBJTMR *pTmr, NvU64 arg0, NvU64 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCurrentDiffTime(pTmr, arg0, arg1) tmrGetCurrentDiffTime_IMPL(pTmr, arg0, arg1) +#endif //__nvoc_objtmr_h_disabled + +void tmrGetSystemTime_IMPL(struct OBJTMR *pTmr, PDAYMSECTIME pTime); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrGetSystemTime(struct OBJTMR *pTmr, PDAYMSECTIME pTime) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetSystemTime(pTmr, pTime) tmrGetSystemTime_IMPL(pTmr, pTime) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrCheckCallbacksReleaseSem_IMPL(struct OBJTMR *pTmr, NvU32 chId); +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrCheckCallbacksReleaseSem(struct OBJTMR *pTmr, NvU32 chId) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrCheckCallbacksReleaseSem(pTmr, chId) tmrCheckCallbacksReleaseSem_IMPL(pTmr, chId) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrDiffExceedsTime_IMPL(struct OBJTMR *pTmr, PDAYMSECTIME pFutureTime, PDAYMSECTIME pPastTime, NvU32 time); +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrDiffExceedsTime(struct OBJTMR *pTmr, PDAYMSECTIME pFutureTime, PDAYMSECTIME pPastTime, NvU32 time) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrDiffExceedsTime(pTmr, pFutureTime, pPastTime, time) tmrDiffExceedsTime_IMPL(pTmr, pFutureTime, pPastTime, time) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrEventScheduleAbs_IMPL(struct OBJTMR *pTmr, PTMR_EVENT pEvent, NvU64 timeAbs); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventScheduleAbs(struct OBJTMR *pTmr, PTMR_EVENT pEvent, NvU64 timeAbs) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventScheduleAbs(pTmr, pEvent, timeAbs) tmrEventScheduleAbs_IMPL(pTmr, pEvent, timeAbs) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrScheduleCallbackAbs_IMPL(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1, NvU64 arg2, NvU32 arg3, NvU32 arg4); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrScheduleCallbackAbs(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1, NvU64 arg2, NvU32 arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrScheduleCallbackAbs(pTmr, arg0, arg1, arg2, arg3, arg4) tmrScheduleCallbackAbs_IMPL(pTmr, arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrEventScheduleRel_IMPL(struct OBJTMR *pTmr, PTMR_EVENT pEvent, NvU64 timeRel); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventScheduleRel(struct OBJTMR *pTmr, PTMR_EVENT pEvent, NvU64 timeRel) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventScheduleRel(pTmr, pEvent, timeRel) tmrEventScheduleRel_IMPL(pTmr, pEvent, timeRel) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrScheduleCallbackRel_IMPL(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1, NvU64 arg2, NvU32 arg3, NvU32 arg4); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrScheduleCallbackRel(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1, NvU64 arg2, NvU32 arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrScheduleCallbackRel(pTmr, arg0, arg1, arg2, arg3, arg4) tmrScheduleCallbackRel_IMPL(pTmr, arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrScheduleCallbackRelSec_IMPL(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1, NvU32 arg2, NvU32 arg3, NvU32 arg4); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrScheduleCallbackRelSec(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1, NvU32 arg2, NvU32 arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrScheduleCallbackRelSec(pTmr, arg0, arg1, arg2, arg3, arg4) tmrScheduleCallbackRelSec_IMPL(pTmr, arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrEventOnList_IMPL(struct OBJTMR *pTmr, PTMR_EVENT pEvent); +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrEventOnList(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventOnList(pTmr, pEvent) tmrEventOnList_IMPL(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrCallbackOnList_IMPL(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1); +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrCallbackOnList(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrCallbackOnList(pTmr, arg0, arg1) tmrCallbackOnList_IMPL(pTmr, arg0, arg1) +#endif //__nvoc_objtmr_h_disabled + +void tmrRmCallbackIntrEnable_IMPL(struct OBJTMR *pTmr, struct OBJGPU *pGpu); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrRmCallbackIntrEnable(struct OBJTMR *pTmr, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrRmCallbackIntrEnable(pTmr, pGpu) tmrRmCallbackIntrEnable_IMPL(pTmr, pGpu) +#endif //__nvoc_objtmr_h_disabled + +void tmrRmCallbackIntrDisable_IMPL(struct OBJTMR *pTmr, struct OBJGPU *pGpu); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrRmCallbackIntrDisable(struct OBJTMR *pTmr, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrRmCallbackIntrDisable(pTmr, pGpu) tmrRmCallbackIntrDisable_IMPL(pTmr, pGpu) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrTimeUntilNextCallback_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *pTimeUntilCallbackNs); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrTimeUntilNextCallback(struct OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *pTimeUntilCallbackNs) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrTimeUntilNextCallback(pGpu, pTmr, pTimeUntilCallbackNs) tmrTimeUntilNextCallback_IMPL(pGpu, pTmr, pTimeUntilCallbackNs) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrCallExpiredCallbacks_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr); +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrCallExpiredCallbacks(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrCallExpiredCallbacks(pGpu, pTmr) tmrCallExpiredCallbacks_IMPL(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +void tmrResetCallbackInterrupt_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrResetCallbackInterrupt(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrResetCallbackInterrupt(pGpu, pTmr) tmrResetCallbackInterrupt_IMPL(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrGetCallbackInterruptPending_IMPL(struct OBJGPU *pGpu, struct OBJTMR *pTmr); +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrGetCallbackInterruptPending(struct OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCallbackInterruptPending(pGpu, pTmr) tmrGetCallbackInterruptPending_IMPL(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#undef PRIVATE_FIELD + + +NV_STATUS tmrCtrlCmdEventCreate(struct OBJGPU *pGpu, TMR_EVENT_SET_PARAMS *pParams); +NV_STATUS tmrCtrlCmdEventSchedule(struct OBJGPU *pGpu, TMR_EVENT_SCHEDULE_PARAMS *pParams); +NV_STATUS tmrCtrlCmdEventCancel(struct OBJGPU *pGpu, TMR_EVENT_GENERAL_PARAMS *pParams); +NV_STATUS tmrCtrlCmdEventDestroy(struct OBJGPU *pGpu, TMR_EVENT_GENERAL_PARAMS *pParams); + +NV_STATUS tmrDelay_OSTIMER(struct OBJTMR *pTmr, NvU32 nsec); + +#endif // _OBJTMR_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_OBJTMR_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_odb.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_odb.h new file mode 100644 index 0000000..9e2be6a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_odb.h @@ -0,0 +1,86 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_odb.h +// + +#ifndef _G_ODB_H_ +#define _G_ODB_H_ + +#define OBJECT_BASE_DEFINITION(ENG) __##ENG##_OBJECT_BASE_DEFINITION + +#ifndef __NVOC_CLASS_OBJGPIO_TYPEDEF__ +#define __NVOC_CLASS_OBJGPIO_TYPEDEF__ +typedef struct OBJGPIO OBJGPIO; +#endif /* __NVOC_CLASS_OBJGPIO_TYPEDEF__ */ +typedef struct OBJGPIO *POBJGPIO; + +#ifndef __NVOC_CLASS_OBJRPC_TYPEDEF__ +#define __NVOC_CLASS_OBJRPC_TYPEDEF__ +typedef struct OBJRPC OBJRPC; +#endif /* __NVOC_CLASS_OBJRPC_TYPEDEF__ */ +typedef struct OBJRPC *POBJRPC; + +#ifndef __NVOC_CLASS_OBJRPCSTRUCTURECOPY_TYPEDEF__ +#define __NVOC_CLASS_OBJRPCSTRUCTURECOPY_TYPEDEF__ +typedef struct OBJRPCSTRUCTURECOPY OBJRPCSTRUCTURECOPY; +#endif /* __NVOC_CLASS_OBJRPCSTRUCTURECOPY_TYPEDEF__ */ +typedef struct OBJRPCSTRUCTURECOPY *POBJRPCSTRUCTURECOPY; + + + +#if NV_PRINTF_STRINGS_ALLOWED +#define odbGetClassName(p) (objGetClassInfo((p))->name) +#endif + +// TODO : temporary hack, to delete +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ +typedef struct OBJGPU *POBJGPU; + +#ifndef __NVOC_CLASS_OBJDISP_TYPEDEF__ +#define __NVOC_CLASS_OBJDISP_TYPEDEF__ +typedef struct OBJDISP OBJDISP; +#endif /* __NVOC_CLASS_OBJDISP_TYPEDEF__ */ +typedef struct OBJDISP *POBJDISP; + +// +// #define staticCast(pObj, TYPE) ((pObj)? __staticCast_##TYPE((pObj)) : NULL) +// +#define __staticCast_OBJGPIO(pObj) ((pObj)->__iom_pbase_OBJGPIO) +#define __staticCast_OBJRPC(pObj) ((pObj)->__iom_pbase_OBJRPC) +#define __staticCast_OBJRPCSTRUCTURECOPY(pObj) ((pObj)->__iom_pbase_OBJRPCSTRUCTURECOPY) + + +// +// #define dynamicCast(pObj, TYPE) (__dynamicCast_##TYPE((pObj))) +// +#define __dynamicCast_OBJGPIO(pObj) NULL +#define __dynamicCast_OBJRPC(pObj) NULL +#define __dynamicCast_OBJRPCSTRUCTURECOPY(pObj) NULL + + + +#define PDB_PROP_GPIO_DCB_ENTRIES_PARSED_BASE_CAST +#define PDB_PROP_GPIO_DCB_ENTRIES_PARSED_BASE_NAME pdb.PDB_PROP_GPIO_DCB_ENTRIES_PARSED + +#define PDB_PROP_GPIO_ENTRY_ORIGIN_DCB_GAT_BASE_CAST +#define PDB_PROP_GPIO_ENTRY_ORIGIN_DCB_GAT_BASE_NAME pdb.PDB_PROP_GPIO_ENTRY_ORIGIN_DCB_GAT + +#define PDB_PROP_GPIO_EPC_HWSLOW_FC7E081B_BASE_CAST +#define PDB_PROP_GPIO_EPC_HWSLOW_FC7E081B_BASE_NAME pdb.PDB_PROP_GPIO_EPC_HWSLOW_FC7E081B + +#define PDB_PROP_GPIO_FORCE_FAST_LVDS_MUX_SWITCH_BASE_CAST +#define PDB_PROP_GPIO_FORCE_FAST_LVDS_MUX_SWITCH_BASE_NAME pdb.PDB_PROP_GPIO_FORCE_FAST_LVDS_MUX_SWITCH + +#define PDB_PROP_GPIO_IS_MISSING_BASE_CAST __nvoc_pbase_OBJENGSTATE-> +#define PDB_PROP_GPIO_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +#define PDB_PROP_GPIO_RM_PMU_GPIO_SYNC_ENABLED_DEF_BASE_CAST +#define PDB_PROP_GPIO_RM_PMU_GPIO_SYNC_ENABLED_DEF_BASE_NAME pdb.PDB_PROP_GPIO_RM_PMU_GPIO_SYNC_ENABLED_DEF + + + +#endif // _G_ODB_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.c new file mode 100644 index 0000000..c7e9047 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.c @@ -0,0 +1,323 @@ +#define NVOC_OS_DESC_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_os_desc_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xb3dacd = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OsDescMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +void __nvoc_init_OsDescMemory(OsDescMemory*); +void __nvoc_init_funcTable_OsDescMemory(OsDescMemory*); +NV_STATUS __nvoc_ctor_OsDescMemory(OsDescMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_OsDescMemory(OsDescMemory*); +void __nvoc_dtor_OsDescMemory(OsDescMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OsDescMemory; + +static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_OsDescMemory = { + /*pClassDef=*/ &__nvoc_class_def_OsDescMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OsDescMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OsDescMemory = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_OsDescMemory_OsDescMemory, + &__nvoc_rtti_OsDescMemory_Memory, + &__nvoc_rtti_OsDescMemory_RmResource, + &__nvoc_rtti_OsDescMemory_RmResourceCommon, + &__nvoc_rtti_OsDescMemory_RsResource, + &__nvoc_rtti_OsDescMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OsDescMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OsDescMemory), + /*classId=*/ classId(OsDescMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OsDescMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OsDescMemory, + /*pCastInfo=*/ &__nvoc_castinfo_OsDescMemory, + /*pExportInfo=*/ &__nvoc_export_info_OsDescMemory +}; + +static NvBool __nvoc_thunk_OsDescMemory_resCanCopy(struct RsResource *pOsDescMemory) { + return osdescCanCopy((struct OsDescMemory *)(((unsigned char *)pOsDescMemory) - __nvoc_rtti_OsDescMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescCheckMemInterUnmap(struct OsDescMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescControl(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescUnmap(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescGetMemInterMapParams(struct OsDescMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescGetMemoryMappingDescriptor(struct OsDescMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescGetMapAddrSpace(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_osdescShareCallback(struct OsDescMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_osdescControlFilter(struct OsDescMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_osdescAddAdditionalDependants(struct RsClient *pClient, struct OsDescMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_osdescGetRefCount(struct OsDescMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_osdescMapTo(struct OsDescMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_osdescControl_Prologue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescIsReady(struct OsDescMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescCheckCopyPermissions(struct OsDescMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_osdescPreDestruct(struct OsDescMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_osdescUnmapFrom(struct OsDescMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_osdescControl_Epilogue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_osdescControlLookup(struct OsDescMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescMap(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_osdescAccessCallback(struct OsDescMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OsDescMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_OsDescMemory(OsDescMemory *pThis) { + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OsDescMemory(OsDescMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_OsDescMemory(OsDescMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_OsDescMemory_fail_Memory; + __nvoc_init_dataField_OsDescMemory(pThis); + + status = __nvoc_osdescConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_OsDescMemory_fail__init; + goto __nvoc_ctor_OsDescMemory_exit; // Success + +__nvoc_ctor_OsDescMemory_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_OsDescMemory_fail_Memory: +__nvoc_ctor_OsDescMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_OsDescMemory_1(OsDescMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__osdescCanCopy__ = &osdescCanCopy_IMPL; + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_OsDescMemory_resCanCopy; + + pThis->__osdescCheckMemInterUnmap__ = &__nvoc_thunk_Memory_osdescCheckMemInterUnmap; + + pThis->__osdescControl__ = &__nvoc_thunk_Memory_osdescControl; + + pThis->__osdescUnmap__ = &__nvoc_thunk_Memory_osdescUnmap; + + pThis->__osdescGetMemInterMapParams__ = &__nvoc_thunk_Memory_osdescGetMemInterMapParams; + + pThis->__osdescGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_osdescGetMemoryMappingDescriptor; + + pThis->__osdescGetMapAddrSpace__ = &__nvoc_thunk_Memory_osdescGetMapAddrSpace; + + pThis->__osdescShareCallback__ = &__nvoc_thunk_RmResource_osdescShareCallback; + + pThis->__osdescControlFilter__ = &__nvoc_thunk_RsResource_osdescControlFilter; + + pThis->__osdescAddAdditionalDependants__ = &__nvoc_thunk_RsResource_osdescAddAdditionalDependants; + + pThis->__osdescGetRefCount__ = &__nvoc_thunk_RsResource_osdescGetRefCount; + + pThis->__osdescMapTo__ = &__nvoc_thunk_RsResource_osdescMapTo; + + pThis->__osdescControl_Prologue__ = &__nvoc_thunk_RmResource_osdescControl_Prologue; + + pThis->__osdescIsReady__ = &__nvoc_thunk_Memory_osdescIsReady; + + pThis->__osdescCheckCopyPermissions__ = &__nvoc_thunk_Memory_osdescCheckCopyPermissions; + + pThis->__osdescPreDestruct__ = &__nvoc_thunk_RsResource_osdescPreDestruct; + + pThis->__osdescUnmapFrom__ = &__nvoc_thunk_RsResource_osdescUnmapFrom; + + pThis->__osdescControl_Epilogue__ = &__nvoc_thunk_RmResource_osdescControl_Epilogue; + + pThis->__osdescControlLookup__ = &__nvoc_thunk_RsResource_osdescControlLookup; + + pThis->__osdescMap__ = &__nvoc_thunk_Memory_osdescMap; + + pThis->__osdescAccessCallback__ = &__nvoc_thunk_RmResource_osdescAccessCallback; +} + +void __nvoc_init_funcTable_OsDescMemory(OsDescMemory *pThis) { + __nvoc_init_funcTable_OsDescMemory_1(pThis); +} + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_OsDescMemory(OsDescMemory *pThis) { + pThis->__nvoc_pbase_OsDescMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; + __nvoc_init_Memory(&pThis->__nvoc_base_Memory); + __nvoc_init_funcTable_OsDescMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_OsDescMemory(OsDescMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + OsDescMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(OsDescMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OsDescMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OsDescMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OsDescMemory(pThis); + status = __nvoc_ctor_OsDescMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_OsDescMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OsDescMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OsDescMemory(OsDescMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_OsDescMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.h new file mode 100644 index 0000000..863bdba --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.h @@ -0,0 +1,224 @@ +#ifndef _G_OS_DESC_MEM_NVOC_H_ +#define _G_OS_DESC_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_os_desc_mem_nvoc.h" + +#ifndef _OS_DESC_MEMORY_H_ +#define _OS_DESC_MEMORY_H_ + +#include "mem_mgr/mem.h" + +/*! + * Bind memory allocated through os descriptor + */ +#ifdef NVOC_OS_DESC_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OsDescMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct Memory __nvoc_base_Memory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct OsDescMemory *__nvoc_pbase_OsDescMemory; + NvBool (*__osdescCanCopy__)(struct OsDescMemory *); + NV_STATUS (*__osdescCheckMemInterUnmap__)(struct OsDescMemory *, NvBool); + NV_STATUS (*__osdescControl__)(struct OsDescMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__osdescUnmap__)(struct OsDescMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__osdescGetMemInterMapParams__)(struct OsDescMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__osdescGetMemoryMappingDescriptor__)(struct OsDescMemory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__osdescGetMapAddrSpace__)(struct OsDescMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__osdescShareCallback__)(struct OsDescMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__osdescControlFilter__)(struct OsDescMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__osdescAddAdditionalDependants__)(struct RsClient *, struct OsDescMemory *, RsResourceRef *); + NvU32 (*__osdescGetRefCount__)(struct OsDescMemory *); + NV_STATUS (*__osdescMapTo__)(struct OsDescMemory *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__osdescControl_Prologue__)(struct OsDescMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__osdescIsReady__)(struct OsDescMemory *); + NV_STATUS (*__osdescCheckCopyPermissions__)(struct OsDescMemory *, struct OBJGPU *, NvHandle); + void (*__osdescPreDestruct__)(struct OsDescMemory *); + NV_STATUS (*__osdescUnmapFrom__)(struct OsDescMemory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__osdescControl_Epilogue__)(struct OsDescMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__osdescControlLookup__)(struct OsDescMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__osdescMap__)(struct OsDescMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__osdescAccessCallback__)(struct OsDescMemory *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_OsDescMemory_TYPEDEF__ +#define __NVOC_CLASS_OsDescMemory_TYPEDEF__ +typedef struct OsDescMemory OsDescMemory; +#endif /* __NVOC_CLASS_OsDescMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OsDescMemory +#define __nvoc_class_id_OsDescMemory 0xb3dacd +#endif /* __nvoc_class_id_OsDescMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OsDescMemory; + +#define __staticCast_OsDescMemory(pThis) \ + ((pThis)->__nvoc_pbase_OsDescMemory) + +#ifdef __nvoc_os_desc_mem_h_disabled +#define __dynamicCast_OsDescMemory(pThis) ((OsDescMemory*)NULL) +#else //__nvoc_os_desc_mem_h_disabled +#define __dynamicCast_OsDescMemory(pThis) \ + ((OsDescMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OsDescMemory))) +#endif //__nvoc_os_desc_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OsDescMemory(OsDescMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OsDescMemory(OsDescMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_OsDescMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_OsDescMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define osdescCanCopy(pOsDescMemory) osdescCanCopy_DISPATCH(pOsDescMemory) +#define osdescCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) osdescCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define osdescControl(pMemory, pCallContext, pParams) osdescControl_DISPATCH(pMemory, pCallContext, pParams) +#define osdescUnmap(pMemory, pCallContext, pCpuMapping) osdescUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define osdescGetMemInterMapParams(pMemory, pParams) osdescGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define osdescGetMemoryMappingDescriptor(pMemory, ppMemDesc) osdescGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define osdescGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) osdescGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define osdescShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) osdescShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define osdescControlFilter(pResource, pCallContext, pParams) osdescControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define osdescAddAdditionalDependants(pClient, pResource, pReference) osdescAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define osdescGetRefCount(pResource) osdescGetRefCount_DISPATCH(pResource) +#define osdescMapTo(pResource, pParams) osdescMapTo_DISPATCH(pResource, pParams) +#define osdescControl_Prologue(pResource, pCallContext, pParams) osdescControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define osdescIsReady(pMemory) osdescIsReady_DISPATCH(pMemory) +#define osdescCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) osdescCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define osdescPreDestruct(pResource) osdescPreDestruct_DISPATCH(pResource) +#define osdescUnmapFrom(pResource, pParams) osdescUnmapFrom_DISPATCH(pResource, pParams) +#define osdescControl_Epilogue(pResource, pCallContext, pParams) osdescControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define osdescControlLookup(pResource, pParams, ppEntry) osdescControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define osdescMap(pMemory, pCallContext, pParams, pCpuMapping) osdescMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define osdescAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) osdescAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool osdescCanCopy_IMPL(struct OsDescMemory *pOsDescMemory); + +static inline NvBool osdescCanCopy_DISPATCH(struct OsDescMemory *pOsDescMemory) { + return pOsDescMemory->__osdescCanCopy__(pOsDescMemory); +} + +static inline NV_STATUS osdescCheckMemInterUnmap_DISPATCH(struct OsDescMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__osdescCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS osdescControl_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__osdescControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS osdescUnmap_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__osdescUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS osdescGetMemInterMapParams_DISPATCH(struct OsDescMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__osdescGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS osdescGetMemoryMappingDescriptor_DISPATCH(struct OsDescMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__osdescGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS osdescGetMapAddrSpace_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__osdescGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool osdescShareCallback_DISPATCH(struct OsDescMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__osdescShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS osdescControlFilter_DISPATCH(struct OsDescMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__osdescControlFilter__(pResource, pCallContext, pParams); +} + +static inline void osdescAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct OsDescMemory *pResource, RsResourceRef *pReference) { + pResource->__osdescAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 osdescGetRefCount_DISPATCH(struct OsDescMemory *pResource) { + return pResource->__osdescGetRefCount__(pResource); +} + +static inline NV_STATUS osdescMapTo_DISPATCH(struct OsDescMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__osdescMapTo__(pResource, pParams); +} + +static inline NV_STATUS osdescControl_Prologue_DISPATCH(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__osdescControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS osdescIsReady_DISPATCH(struct OsDescMemory *pMemory) { + return pMemory->__osdescIsReady__(pMemory); +} + +static inline NV_STATUS osdescCheckCopyPermissions_DISPATCH(struct OsDescMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__osdescCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void osdescPreDestruct_DISPATCH(struct OsDescMemory *pResource) { + pResource->__osdescPreDestruct__(pResource); +} + +static inline NV_STATUS osdescUnmapFrom_DISPATCH(struct OsDescMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__osdescUnmapFrom__(pResource, pParams); +} + +static inline void osdescControl_Epilogue_DISPATCH(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__osdescControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS osdescControlLookup_DISPATCH(struct OsDescMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__osdescControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS osdescMap_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__osdescMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool osdescAccessCallback_DISPATCH(struct OsDescMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__osdescAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS osdescConstruct_IMPL(struct OsDescMemory *arg_pOsDescMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_osdescConstruct(arg_pOsDescMemory, arg_pCallContext, arg_pParams) osdescConstruct_IMPL(arg_pOsDescMemory, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_OS_DESC_MEM_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_hal.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_hal.h new file mode 100644 index 0000000..6ec7761 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_hal.h @@ -0,0 +1,10 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_eng_empty.h +// +// The file is added to smoonth NVOC migration. After converting a module to +// NVOC class, the stale generated headers in output directory causes failures +// of incremental builds. This file ensures the content of the old header is +// removed. +// diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.c new file mode 100644 index 0000000..5d7c8eb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.c @@ -0,0 +1,149 @@ +#define NVOC_OS_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_os_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xaa1d70 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJOS; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJOS(OBJOS*); +void __nvoc_init_funcTable_OBJOS(OBJOS*); +NV_STATUS __nvoc_ctor_OBJOS(OBJOS*); +void __nvoc_init_dataField_OBJOS(OBJOS*); +void __nvoc_dtor_OBJOS(OBJOS*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJOS; + +static const struct NVOC_RTTI __nvoc_rtti_OBJOS_OBJOS = { + /*pClassDef=*/ &__nvoc_class_def_OBJOS, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJOS, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJOS_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJOS, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJOS = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJOS_OBJOS, + &__nvoc_rtti_OBJOS_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJOS = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJOS), + /*classId=*/ classId(OBJOS), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJOS", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJOS, + /*pCastInfo=*/ &__nvoc_castinfo_OBJOS, + /*pExportInfo=*/ &__nvoc_export_info_OBJOS +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJOS = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJOS(OBJOS *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJOS(OBJOS *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + pThis->setProperty(pThis, PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER, !(1)); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJOS(OBJOS *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJOS_fail_Object; + __nvoc_init_dataField_OBJOS(pThis); + goto __nvoc_ctor_OBJOS_exit; // Success + +__nvoc_ctor_OBJOS_fail_Object: +__nvoc_ctor_OBJOS_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJOS_1(OBJOS *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJOS(OBJOS *pThis) { + __nvoc_init_funcTable_OBJOS_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJOS(OBJOS *pThis) { + pThis->__nvoc_pbase_OBJOS = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJOS(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJOS(OBJOS **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJOS *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJOS)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJOS)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJOS); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJOS(pThis); + status = __nvoc_ctor_OBJOS(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJOS_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJOS_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJOS(OBJOS **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJOS(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.h new file mode 100644 index 0000000..c29e7d7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.h @@ -0,0 +1,1472 @@ +#ifndef _G_OS_NVOC_H_ +#define _G_OS_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_os_nvoc.h" + + +#ifndef _OS_H_ +#define _OS_H_ + +/*! + * @file os.h + * @brief Interface for Operating System module + */ + +/* ------------------------ Core & Library Includes ------------------------- */ +#include "core/core.h" +#include "containers/btree.h" + +/* ------------------------ SDK & Interface Includes ------------------------ */ +#include "nvsecurityinfo.h" +#include "nvacpitypes.h" +#include "nvimpshared.h" // TODO - should move from sdk to resman/interface +#include "nvi2c.h" // TODO - should move from sdk to resman/interface + +/* ------------------------ OS Includes ------------------------------------- */ +#include "os/nv_memory_type.h" +#include "os/capability.h" +#include "os/os_fixed_mode_timings_props.h" + +/* ------------------------ Forward Declarations ---------------------------- */ +struct OBJOS; + +#ifndef __NVOC_CLASS_OBJOS_TYPEDEF__ +#define __NVOC_CLASS_OBJOS_TYPEDEF__ +typedef struct OBJOS OBJOS; +#endif /* __NVOC_CLASS_OBJOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOS +#define __nvoc_class_id_OBJOS 0xaa1d70 +#endif /* __nvoc_class_id_OBJOS */ + + + +// +// The OS module should NOT depend on RM modules. The only exception is +// core/core.h. +// +// DO NOT ADD INCLUDES TO RM MODULE HEADERS FROM THIS FILE. OS module should be +// a leaf module. Dependencies on RM headers in this files results in circular +// dependencies as most modules depend on the OS module. +// +// Ideally, all types used by the OS module's interface are from the SDK, +// resman/interface or self-contained within the OS module header. For now, +// since the OS module depends on a few RM internal types we forward declare to +// avoid the need to pull in headers from across RM. +// +typedef struct SYS_STATIC_CONFIG SYS_STATIC_CONFIG; +typedef struct MEMORY_DESCRIPTOR MEMORY_DESCRIPTOR; +typedef struct IOVAMAPPING *PIOVAMAPPING; +typedef struct OBJGPUMGR OBJGPUMGR; +typedef struct EVENTNOTIFICATION EVENTNOTIFICATION, *PEVENTNOTIFICATION; +typedef struct DEVICE_MAPPING DEVICE_MAPPING; +typedef void *PUID_TOKEN; +typedef struct OBJTMR OBJTMR; +typedef struct OBJCL OBJCL; +typedef struct _GUID *LPGUID; + +// +// Forward declare OS_GPU_INFO type +// +// TODO - We shouldn't need a special definition per-OS. OS implementations +// should use a consistent type +// +typedef struct nv_state_t OS_GPU_INFO; + +/* ------------------------ OS Interface ------------------------------------ */ + +typedef struct os_wait_queue OS_WAIT_QUEUE; + +// +// Defines and Typedefs used by the OS +// +typedef NvU64 OS_THREAD_HANDLE; + +// +// Forward references for OS1HZTIMERENTRY symbols +// +typedef struct OS1HZTIMERENTRY *POS1HZTIMERENTRY; +typedef struct OS1HZTIMERENTRY OS1HZTIMERENTRY; + +// +// Simple 1 second callback facility. Schedules the given routine to be called with the supplied data +// in approximately 1 second. Might be called from an elevated IRQL. +// Unlike the tmr facilities (tmrScheduleCallbackXXX), this does not rely on the hardware. +// +typedef void (*OS1HZPROC)(OBJGPU *, void *); + +#define NV_OS_1HZ_ONESHOT 0x00000000 +#define NV_OS_1HZ_REPEAT 0x00000001 + +struct OS1HZTIMERENTRY +{ + OS1HZPROC callback; + void* data; + NvU32 flags; + POS1HZTIMERENTRY next; +}; + +typedef struct RM_PAGEABLE_SECTION { + void *osHandle; // handle returned from OS API + void *pDataSection; // pointer to a date inside the target data/bss/const segment +} RM_PAGEABLE_SECTION; + + +// OSSetVideoSource defines +#define NV_OS_VIDEO_SOURCE_MCE 0x0 +#define NV_OS_VIDEO_SOURCE_WINDVR 0x1 +#define NV_OS_VIDEO_SOURCE_WMP9 0x2 +#define NV_OS_VIDEO_SOURCE_VMR9 0x3 +#define NV_OS_VIDEO_SOURCE_WINDVD 0x4 + +// OSPollHotkeyState return values +#define NV_OS_HOTKEY_STATE_DISPLAY_CHANGE 0:0 +#define NV_OS_HOTKEY_STATE_DISPLAY_CHANGE_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_DISPLAY_CHANGE_FOUND 0x00000001 +#define NV_OS_HOTKEY_STATE_SCALE_EVENT 1:1 +#define NV_OS_HOTKEY_STATE_SCALE_EVENT_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_SCALE_EVENT_FOUND 0x00000001 +#define NV_OS_HOTKEY_STATE_LID_EVENT 2:2 +#define NV_OS_HOTKEY_STATE_LID_EVENT_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_LID_EVENT_FOUND 0x00000001 +#define NV_OS_HOTKEY_STATE_POWER_EVENT 3:3 +#define NV_OS_HOTKEY_STATE_POWER_EVENT_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_POWER_EVENT_FOUND 0x00000001 +#define NV_OS_HOTKEY_STATE_DOCK_EVENT 4:4 +#define NV_OS_HOTKEY_STATE_DOCK_EVENT_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_DOCK_EVENT_FOUND 0x00000001 + +#define MAX_BRIGHTNESS_BCL_ELEMENTS 103 + +// ACPI _DOD Bit defines +// These bits are defined in the Hybrid SAS +#define NV_ACPI_DOD_DISPLAY_OWNER 20:18 +#define NV_ACPI_DOD_DISPLAY_OWNER_ALL 0x00000000 +#define NV_ACPI_DOD_DISPLAY_OWNER_MGPU 0x00000001 +#define NV_ACPI_DOD_DISPLAY_OWNER_DGPU1 0x00000002 + +#define NV_OS_ALLOCFLAGS_LOCKPAGES NVBIT(0) +#define NV_OS_ALLOCFLAGS_PAGEDPOOL NVBIT(1) +#define NV_OS_ALLOCFLAGS_NONPAGEDPOOL 0 + +// ACPI 3.0a definitions for requested data length +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_128B 0x00000001 +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_256B 0x00000002 +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_384B 0x00000003 +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_512B 0x00000004 +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_DEFAULT 0x00000001 + +typedef enum _OS_PEX_RECOVERY_STATUS +{ + OS_PEX_RECOVERY_GPU_RESET_PENDING = 0, + OS_PEX_RECOVERY_GPU_RESTORED, + OS_PEX_RECOVERY_GPU_REMOVED +} OS_PEX_RECOVERY_STATUS; + +// osBugCheck bugcode defines +#define OS_BUG_CHECK_BUGCODE_UNKNOWN (0) +#define OS_BUG_CHECK_BUGCODE_INTERNAL_TEST (1) +#define OS_BUG_CHECK_BUGCODE_BUS (2) +#define OS_BUG_CHECK_BUGCODE_ECC_DBE (3) +#define OS_BUG_CHECK_BUGCODE_NVLINK_TL_ERR (4) +#define OS_BUG_CHECK_BUGCODE_PAGED_SEGMENT (5) +#define OS_BUG_CHECK_BUGCODE_BDOD_ON_ASSERT (6) +#define OS_BUG_CHECK_BUGCODE_DISPLAY_UNDERFLOW (7) +#define OS_BUG_CHECK_BUGCODE_LAST OS_BUG_CHECK_BUGCODE_DISPLAY_UNDERFLOW + +#define OS_BUG_CHECK_BUGCODE_STR \ + { \ + "Unknown Error", \ + "Nv Internal Testing", \ + "Bus Error", \ + "Double Bit Error", \ + "NVLink TL Error", \ + "Invalid Bindata Access", \ + "BSOD on Assert or Breakpoint", \ + "Display Underflow" \ + } + +// Flags needed by OSAllocPagesNode +#define OS_ALLOC_PAGES_NODE_NONE 0x0 +#define OS_ALLOC_PAGES_NODE_SKIP_RECLAIM 0x1 + +// +// Structures for osPackageRegistry and osUnpackageRegistry +// +typedef struct PACKED_REGISTRY_ENTRY +{ + NvU32 nameOffset; + NvU8 type; + NvU32 data; + NvU32 length; +} PACKED_REGISTRY_ENTRY; + +typedef struct PACKED_REGISTRY_TABLE +{ + NvU32 size; + NvU32 numEntries; + PACKED_REGISTRY_ENTRY entries[0]; +} PACKED_REGISTRY_TABLE; + +// +// Values for PACKED_REGISTRY_ENTRY::type +// +#define REGISTRY_TABLE_ENTRY_TYPE_UNKNOWN 0 +#define REGISTRY_TABLE_ENTRY_TYPE_DWORD 1 +#define REGISTRY_TABLE_ENTRY_TYPE_BINARY 2 +#define REGISTRY_TABLE_ENTRY_TYPE_STRING 3 + +/* + * OS_DRIVER_BLOCK + * + * driverStart + * CPU VA of where the driver is loaded + * unique_id + * Debug GUID of the Driver. Used to match with Pdb + * age + * Additional GUID information + * offset + * Offset from VA to start of text + */ +typedef struct { + NvP64 driverStart NV_ALIGN_BYTES(8); + NvU8 unique_id[16]; + NvU32 age; + NvU32 offset; +} OS_DRIVER_BLOCK; + +// Basic OS interface functions +typedef NvU32 OSSetEvent(OBJGPU *, NvP64); +typedef NV_STATUS OSEventNotification(OBJGPU *, PEVENTNOTIFICATION, NvU32, void *, NvU32); +typedef NV_STATUS OSEventNotificationWithInfo(OBJGPU *, PEVENTNOTIFICATION, NvU32, NvU32, NvU16, void *, NvU32); +typedef NV_STATUS OSObjectEventNotification(NvHandle, NvHandle, NvU32, PEVENTNOTIFICATION, NvU32, void *, NvU32); +typedef NV_STATUS NV_FORCERESULTCHECK OSAllocPages(MEMORY_DESCRIPTOR *); +typedef NV_STATUS NV_FORCERESULTCHECK OSAllocPagesInternal(MEMORY_DESCRIPTOR *); +typedef void OSFreePages(MEMORY_DESCRIPTOR *); +typedef void OSFreePagesInternal(MEMORY_DESCRIPTOR *); +typedef NV_STATUS NV_FORCERESULTCHECK OSLockMem(MEMORY_DESCRIPTOR *); +typedef NV_STATUS OSUnlockMem(MEMORY_DESCRIPTOR *); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapGPU(OBJGPU *, RS_PRIV_LEVEL, NvU64, NvU64, NvU32, NvP64 *, NvP64 *); +typedef void OSUnmapGPU(OS_GPU_INFO *, RS_PRIV_LEVEL, NvP64, NvU64, NvP64); +typedef NV_STATUS NV_FORCERESULTCHECK OSDeviceClassToDeviceName(NvU32, NvU8 *); +typedef NV_STATUS NV_FORCERESULTCHECK OSNotifyEvent(OBJGPU *, PEVENTNOTIFICATION, NvU32, NvU32, NV_STATUS); +typedef NV_STATUS OSReadRegistryString(OBJGPU *, const char *, NvU8 *, NvU32 *); +typedef NV_STATUS OSWriteRegistryBinary(OBJGPU *, const char *, NvU8 *, NvU32); +typedef NV_STATUS OSWriteRegistryVolatile(OBJGPU *, const char *, NvU8 *, NvU32); +typedef NV_STATUS OSReadRegistryVolatile(OBJGPU *, const char *, NvU8 *, NvU32); +typedef NV_STATUS OSReadRegistryVolatileSize(OBJGPU *, const char *, NvU32 *); +typedef NV_STATUS OSReadRegistryBinary(OBJGPU *, const char *, NvU8 *, NvU32 *); +typedef NV_STATUS OSWriteRegistryDword(OBJGPU *, const char *, NvU32); +typedef NV_STATUS OSReadRegistryDword(OBJGPU *, const char *, NvU32 *); +typedef NV_STATUS OSReadRegistryDwordBase(OBJGPU *, const char *, NvU32 *); +typedef NV_STATUS OSReadRegistryStringBase(OBJGPU *, const char *, NvU8 *, NvU32 *); +typedef NV_STATUS OSPackageRegistry(OBJGPU *, PACKED_REGISTRY_TABLE *, NvU32 *); +typedef NV_STATUS OSUnpackageRegistry(PACKED_REGISTRY_TABLE *); +typedef NvBool OSQueueDpc(OBJGPU *); +typedef void OSFlushCpuWriteCombineBuffer(void); +typedef NV_STATUS OSNumaMemblockSize(NvU64 *); +typedef NvBool OSNumaOnliningEnabled(OS_GPU_INFO *); +typedef NV_STATUS OSAllocPagesNode(NvS32, NvLength, NvU32, NvU64 *); +typedef NV_STATUS OSAllocAcquirePage(NvU64); +typedef NV_STATUS OSAllocReleasePage(NvU64); +typedef NvU32 OSGetPageRefcount(NvU64); +typedef NvU32 OSCountTailPages(NvU64); +typedef NvU32 OSGetPageSize(void); + + +// We use osAcquireRmSema to catch "unported" sema code to new lock model +typedef NV_STATUS NV_FORCERESULTCHECK OSAcquireRmSema(void *); +typedef NvBool NV_FORCERESULTCHECK OSIsRmSemaOwner(void *); + +#define DPC_RELEASE_ALL_GPU_LOCKS (1) +#define DPC_RELEASE_SINGLE_GPU_LOCK (2) + +typedef NV_STATUS OSGpuLocksQueueRelease(OBJGPU *pGpu, NvU32 dpcGpuLockRelease); +typedef NvU32 OSApiLockAcquireConfigureFlags(NvU32 flags); +typedef NV_STATUS NV_FORCERESULTCHECK OSCondAcquireRmSema(void *); +typedef NvU32 OSReleaseRmSema(void *, OBJGPU *); + +typedef NvU32 OSGetCpuCount(void); +typedef NvU32 OSGetMaximumCoreCount(void); +typedef NvU32 OSGetCurrentProcessorNumber(void); +typedef NV_STATUS OSDelay(NvU32); +typedef NV_STATUS OSDelayUs(NvU32); +typedef NV_STATUS OSDelayNs(NvU32); +typedef void OSSpinLoop(void); +typedef NvU32 OSGetCurrentProcess(void); +typedef void OSGetCurrentProcessName(char *, NvU32); +typedef NvU32 OSGetCurrentPasid(void); +typedef NV_STATUS OSGetCurrentThread(OS_THREAD_HANDLE *); +typedef NV_STATUS OSAttachToProcess(void **, NvU32); +typedef void OSDetachFromProcess(void*); +typedef NV_STATUS OSVirtualToPhysicalAddr(MEMORY_DESCRIPTOR *, NvP64, RmPhysAddr *); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapPciMemoryUser(OS_GPU_INFO *, RmPhysAddr, NvU64, NvU32, NvP64 *, NvP64 *, NvU32); +typedef void OSUnmapPciMemoryUser(OS_GPU_INFO *, NvP64, NvU64, NvP64); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapPciMemoryKernelOld(OBJGPU *, RmPhysAddr, NvU64, NvU32, void **, NvU32); +typedef void OSUnmapPciMemoryKernelOld(OBJGPU *, void *); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapPciMemoryKernel64(OBJGPU *, RmPhysAddr, NvU64, NvU32, NvP64 *, NvU32); +typedef void OSUnmapPciMemoryKernel64(OBJGPU *, NvP64); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapSystemMemory(MEMORY_DESCRIPTOR *, NvU64, NvU64, NvBool, NvU32, NvP64*, NvP64*); +typedef void OSUnmapSystemMemory(MEMORY_DESCRIPTOR *, NvBool, NvU32, NvP64, NvP64); +typedef NvBool OSLockShouldToggleInterrupts(OBJGPU *); +typedef NV_STATUS OSGetPerformanceCounter(NvU64 *); +typedef NvBool OSDbgBreakpointEnabled(void); +typedef NV_STATUS OSAttachGpu(OBJGPU *, void *); +typedef NV_STATUS OSDpcAttachGpu(OBJGPU *, void *); +typedef void OSDpcDetachGpu(OBJGPU *); +typedef NV_STATUS OSHandleGpuLost(OBJGPU *); +typedef void OSHandleGpuSurpriseRemoval(OBJGPU *); +typedef void OSInitScalabilityOptions(OBJGPU *, void *); +typedef void OSHandleDeferredRecovery(OBJGPU *); +typedef NvBool OSIsSwPreInitOnly(OS_GPU_INFO *); + +#define NVRM_MAX_FILE_NAME_LENGTH (128) +#define NVRM_FILE_ACCESS_READ NVBIT(0) +#define NVRM_FILE_ACCESS_WRITE NVBIT(1) + +typedef void OSGetTimeoutParams(OBJGPU *, NvU32 *, NvU32 *, NvU32 *); +typedef NvBool OSIsRaisedIRQL(void); +typedef NvBool OSIsISR(void); +typedef NV_STATUS OSGetDriverBlock(OS_GPU_INFO *, OS_DRIVER_BLOCK *); +typedef NvBool OSIsEqualGUID(void *, void *); + +#define OS_QUEUE_WORKITEM_FLAGS_NONE 0x00000000 +#define OS_QUEUE_WORKITEM_FLAGS_DONT_FREE_PARAMS NVBIT(0) +#define OS_QUEUE_WORKITEM_FLAGS_FALLBACK_TO_DPC NVBIT(1) +// +// Lock flags: +// Only one of the LOCK_GPU flags should be provided. If multiple are, +// the priority ordering should be GPUS > GROUP_DEVICE > GROUP_SUBDEVICE +// +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA NVBIT(8) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW NVBIT(9) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO NVBIT(10) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW NVBIT(11) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RO NVBIT(12) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW NVBIT(13) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RO NVBIT(14) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW NVBIT(15) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RO NVBIT(16) +// +// Perform a GPU full power sanity after getting GPU locks. +// One of the above LOCK_GPU flags must be provided when using this flag. +// +#define OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY NVBIT(17) +#define OS_QUEUE_WORKITEM_FLAGS_FOR_PM_RESUME NVBIT(18) +typedef void OSWorkItemFunction(NvU32 gpuInstance, void *); +typedef void OSSystemWorkItemFunction(void *); +typedef NV_STATUS OSQueueWorkItem(OBJGPU *, OSWorkItemFunction, void *); +typedef NV_STATUS OSQueueWorkItemWithFlags(OBJGPU *, OSWorkItemFunction, void *, NvU32); +typedef NV_STATUS OSQueueSystemWorkItem(OSSystemWorkItemFunction, void *); + +// MXM ACPI calls +typedef NV_STATUS OSCallACPI_MXMX(OBJGPU *, NvU32, NvU8 *); +typedef NV_STATUS OSCallACPI_DDC(OBJGPU *, NvU32, NvU8*,NvU32*, NvBool); +typedef NV_STATUS OSCallACPI_BCL(OBJGPU *, NvU32, NvU32 *, NvU16 *); + +// Display MUX ACPI calls +typedef NV_STATUS OSCallACPI_MXDS(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_MXDM(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_MXID(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_LRST(OBJGPU *, NvU32, NvU32 *); + +// Hybrid GPU ACPI calls +typedef NV_STATUS OSCallACPI_NVHG_GPUON(OBJGPU *, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_GPUOFF(OBJGPU *, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_GPUSTA(OBJGPU *, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_MXDS(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_MXMX(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_DOS(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_ROM(OBJGPU *, NvU32 *, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_DCS(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_DOD(OBJGPU *, NvU32 *, NvU32 *); + +// Tegra ACPI calls +typedef NV_STATUS OSCallACPI_SUB(OBJGPU *, NvU8 *, NvU32 *); +typedef NV_STATUS OSCallACPI_ON(OBJGPU *, NvU32); +typedef NV_STATUS OSCallACPI_OFF(OBJGPU *, NvU32); + +// Notebook Power Balancing ACPI calls +typedef NV_STATUS OSCallACPI_NBPS(OBJGPU *, NvU8 *, NvU32 *); +typedef NV_STATUS OSCallACPI_NBSL(OBJGPU *, NvU32); + +// Optimus WMI ACPI calls +typedef NV_STATUS OSCallACPI_OPTM_GPUON(OBJGPU *); + +// Generic ACPI _DSM call +typedef NV_STATUS OSCallACPI_DSM(OBJGPU *pGpu, ACPI_DSM_FUNCTION acpiDSMFunction, + NvU32 NVHGDSMSubfunction, NvU32 *pInOut, NvU16 *size); + +// UEFI variable calls +typedef NV_STATUS OSGetUefiVariable(OBJGPU *, char *, LPGUID, NvU8 *, NvU32 *, NvU32 *); + +// The following functions are also implemented in WinNT +typedef void OSQADbgRegistryInit(struct OBJOS *); +typedef NV_STATUS OSGetVersionDump(void *); +// End of WinNT + +// OS functions typically only implemented for MacOS core +// These next functions also appear on UNIX +typedef NvU32 OSnv_rdcr4(struct OBJOS *); +typedef NvU64 OSnv_rdxcr0(struct OBJOS *); +typedef int OSnv_cpuid(struct OBJOS *, int, int, NvU32 *, NvU32 *, NvU32 *, NvU32 *); +// end of functions shared between MacOSX and UNIX + +// These next functions also appear on UNIX +typedef NvU32 OSnv_rdmsr(struct OBJOS *, NvU32, NvU32 *, NvU32 *); +typedef NvU32 OSnv_wrmsr(struct OBJOS *, NvU32, NvU32, NvU32); +// end functions shared by MacOS and UNIX + +typedef NvU32 OSRobustChannelsDefaultState(struct OBJOS *); + +// NOTE: The following functions are also implemented in MODS +typedef NV_STATUS OSSimEscapeWrite(OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 Value); +typedef NV_STATUS OSSimEscapeWriteBuffer(OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer); +typedef NV_STATUS OSSimEscapeRead(OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 *Value); +typedef NV_STATUS OSSimEscapeReadBuffer(OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer); +typedef NvU32 OSGetSimulationMode(void); +typedef void OSLogString(const char*, ...); +typedef void OSFlushLog(void); + +// End of MODS functions + +//Vista Specific Functions + +typedef NV_STATUS OSSetupVBlank(OBJGPU *pGpu, void * pProc, + void * pParm1, void * pParm2, NvU32 Head, void * pParm3); + +// Heap reserve tracking functions +typedef void OSInternalReserveAllocCallback(NvU64 offset, NvU64 size, NvU32 gpuId); +typedef void OSInternalReserveFreeCallback(NvU64 offset, NvU32 gpuId); + + +// +// SPB_GPS (Vista) specific defines +// +typedef struct +{ + NvU64 cpuFPCounter1; // CPU Fixed Performance Counter 1 + NvU64 cpuFPCounter2; // CPU Fixed Performance Counter 2 + NvU64 cpuC0Counter; // C0 Counter + NvU64 cpuCoreTSC; // per core Time Stamp Counter value + NvU8 cpuCoreC0Value; // average C0 residency per core + NvU8 cpuCoreAperf; // CPU Aperf value per core + +}OS_CPU_CORE_PERF_COUNTERS, *POS_CPU_CORE_PERF_COUNTERS; + +typedef NV_STATUS OsGetSystemCpuLogicalCoreCounts(NvU32 *pCpuCoreCount); +typedef NV_STATUS OsGetSystemCpuC0AndAPerfCounters(NvU32 coreIndex, POS_CPU_CORE_PERF_COUNTERS pCpuPerfData); +typedef void OsEnableCpuPerformanceCounters(struct OBJOS *pOS); +typedef NV_STATUS OsCpuDpcObjInit(void **ppCpuDpcObj, OBJGPU *pGpu, NvU32 coreCount); +typedef void OsCpuDpcObjQueue(void **ppCpuDpcObj, NvU32 coreCount, POS_CPU_CORE_PERF_COUNTERS pCpuPerfData); +typedef void OsCpuDpcObjFree(void **ppCpuDpcObj); +typedef NV_STATUS OsSystemGetBatteryDrain(NvS32 *pChargeRate); + +// OSDRIVERERROR structure +typedef struct +{ + enum { + OS_DRIVER_ERROR_CODE_NONE = 0, + OS_DRIVER_ERROR_CODE_HP_GT216_VBIOS_BUG_587560, + OS_DRIVER_ERROR_CODE_COUNT, // Must always be last + } code; + + union + { + void *osDriverErrorContextNone; + + } context; + +} OSDRIVERERROR, * POSDRIVERERROR; + +typedef NV_STATUS OSPexRecoveryCallback(OS_GPU_INFO *, OS_PEX_RECOVERY_STATUS); + +// +// Function pointer typedef for use as callback prototype when filtering +// address ranges in os memory access routines +// +typedef NV_STATUS (OSMemFilterCb)(void *pPriv, NvU64 addr, void *pData, NvU64 size, NvBool bRead); + +// Structure typedef for storing the callback pointer and priv data +typedef struct +{ + NODE node; + OSMemFilterCb *pFilterCb; + void *pPriv; +} OSMEMFILTERDATA, *POSMEMFILTERDATA; + +// +// OS Functions typically only implemented for MODS +// Note: See comments above for other functions that +// are also implemented on MODS as well as other +// OS's. +// + +typedef NvBool OSRmInitRm(struct OBJOS *); +typedef NV_STATUS OSGetPanelStrapAndIndex(struct OBJOS *, OBJGPU *, NvU32 *, NvU32 *); +typedef NV_STATUS OSNotifySbiosDisplayChangeEnd(OBJGPU *, NvU32); +typedef NvU32 OSGetDfpScalerFromSbios(OBJGPU *); +typedef NvU32 OSPollHotkeyState(OBJGPU *); + +typedef NV_STATUS OSInitGpuMgr(OBJGPUMGR *); +typedef void OSSyncWithRmDestroy(void); +typedef void OSSyncWithGpuDestroy(NvBool); + +typedef void OSModifyGpuSwStatePersistence(OS_GPU_INFO *, NvBool); + +typedef NV_STATUS OSMemAddFilter(NvU64, NvU64, OSMemFilterCb*, void *); +typedef NV_STATUS OSMemRemoveFilter(NvU64); +typedef POSMEMFILTERDATA OSMemGetFilter(NvUPtr); + +typedef NV_STATUS OSGetCarveoutInfo(NvU64*, NvU64*); +typedef NV_STATUS OSGetVPRInfo(NvU64*, NvU64*); +typedef NV_STATUS OSAllocInVPR(MEMORY_DESCRIPTOR*); +typedef NV_STATUS OSGetGenCarveout(NvU64*, NvU64 *, NvU32, NvU64); + +typedef NvU32 OSPepReadReg(OBJGPU *, NvU32); +typedef void OSPepWriteReg(OBJGPU *, NvU32, NvU32); + +typedef NV_STATUS OSI2CClosePorts(OS_GPU_INFO *, NvU32); +typedef NV_STATUS OSWriteI2CBufferDirect(OBJGPU *, NvU32, NvU8, void *, NvU32, void *, NvU32); +typedef NV_STATUS OSReadI2CBufferDirect(OBJGPU *, NvU32, NvU8, void *, NvU32, void *, NvU32); +typedef NV_STATUS OSI2CTransfer(OBJGPU *, NvU32, NvU8, nv_i2c_msg_t *, NvU32); +typedef NV_STATUS OSSetGpuRailVoltage(OBJGPU *, NvU32, NvU32*); +typedef NV_STATUS OSGetGpuRailVoltage(OBJGPU *, NvU32*); +typedef NV_STATUS OSGetGpuRailVoltageInfo(OBJGPU *, NvU32 *, NvU32 *, NvU32 *); +typedef NV_STATUS OSTegraSocGetImpImportData(TEGRA_IMP_IMPORT_DATA *); +typedef NV_STATUS OSTegraSocEnableDisableRfl(OS_GPU_INFO *, NvBool); +typedef NV_STATUS OSTegraAllocateDisplayBandwidth(OS_GPU_INFO *, NvU32, NvU32); + +typedef NV_STATUS OSMemdrvQueryInterface(OS_GPU_INFO *); +typedef void OSMemdrvReleaseInterface(void); +typedef NV_STATUS OSMemdrvGetAsid(NvU32, NvU32 *); +typedef NV_STATUS OSMemdrvGetStreamId(NvU32, NvU32 *); + +typedef NV_STATUS OSGC6PowerControl(OBJGPU *, NvU32, NvU32 *); + +typedef RmPhysAddr OSPageArrayGetPhysAddr(OS_GPU_INFO *pOsGpuInfo, void* pPageData, NvU32 pageIndex); +typedef NV_STATUS OSGetChipInfo(OBJGPU *, NvU32*, NvU32*, NvU32*, NvU32*); +typedef NV_STATUS OSGetCurrentIrqPrivData(OS_GPU_INFO *, NvU32*); + +typedef enum +{ + RC_CALLBACK_IGNORE, + RC_CALLBACK_ISOLATE, + RC_CALLBACK_ISOLATE_NO_RESET, +} RC_CALLBACK_STATUS; +typedef RC_CALLBACK_STATUS OSRCCallback(OBJGPU *, NvHandle, NvHandle, NvHandle, NvHandle, NvU32, NvU32, NvU32 *, void *); +typedef NvBool OSCheckCallback(OBJGPU *); +typedef NV_STATUS OSReadPFPciConfigInVF(NvU32, NvU32*); + +// Actual definition of the OBJOS structure +#ifdef NVOC_OS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJOS { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJOS *__nvoc_pbase_OBJOS; + NvBool PDB_PROP_OS_PAT_UNSUPPORTED; + NvBool PDB_PROP_OS_SLI_ALLOWED; + NvBool PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED; + NvBool PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT; + NvBool PDB_PROP_OS_WAIT_FOR_ACPI_SUBSYSTEM; + NvBool PDB_PROP_OS_UNCACHED_MEMORY_MAPPINGS_NOT_SUPPORTED; + NvBool PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE; + NvBool PDB_PROP_OS_LIMIT_GPU_RESET; + NvBool PDB_PROP_OS_SUPPORTS_TDR; + NvBool PDB_PROP_OS_GET_ACPI_TABLE_FROM_UEFI; + NvBool PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER; + NvBool PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS; + NvBool PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS; + OSDbgBreakpointEnabled *osDbgBreakpointEnabled; + OSQADbgRegistryInit *osQADbgRegistryInit; + OSQueueWorkItem *osQueueWorkItem; + OSQueueWorkItemWithFlags *osQueueWorkItemWithFlags; + OSQueueSystemWorkItem *osQueueSystemWorkItem; + void *(*osGetStereoDongleInterface)(void); + OSnv_rdcr4 *osNv_rdcr4; + OSnv_rdxcr0 *osNv_rdxcr0; + OSnv_cpuid *osNv_cpuid; + OSnv_rdmsr *osNv_rdmsr; + OSnv_wrmsr *osNv_wrmsr; + OSRobustChannelsDefaultState *osRobustChannelsDefaultState; + OSSimEscapeWrite *osSimEscapeWrite; + OSSimEscapeWriteBuffer *osSimEscapeWriteBuffer; + OSSimEscapeRead *osSimEscapeRead; + OSSimEscapeReadBuffer *osSimEscapeReadBuffer; + OSRmInitRm *osRmInitRm; + OSGetSimulationMode *osGetSimulationMode; + OSCallACPI_MXMX *osCallACPI_MXMX; + OSCallACPI_DDC *osCallACPI_DDC; + OSCallACPI_BCL *osCallACPI_BCL; + OSCallACPI_MXDS *osCallACPI_MXDS; + OSCallACPI_MXDM *osCallACPI_MXDM; + OSCallACPI_MXID *osCallACPI_MXID; + OSCallACPI_LRST *osCallACPI_LRST; + OSCallACPI_NVHG_GPUON *osCallACPI_NVHG_GPUON; + OSCallACPI_NVHG_GPUOFF *osCallACPI_NVHG_GPUOFF; + OSCallACPI_NVHG_GPUSTA *osCallACPI_NVHG_GPUSTA; + OSCallACPI_NVHG_MXDS *osCallACPI_NVHG_MXDS; + OSCallACPI_NVHG_MXMX *osCallACPI_NVHG_MXMX; + OSCallACPI_NVHG_DOS *osCallACPI_NVHG_DOS; + OSCallACPI_NVHG_ROM *osCallACPI_NVHG_ROM; + OSCallACPI_NVHG_DCS *osCallACPI_NVHG_DCS; + OSCallACPI_DOD *osCallACPI_DOD; + OSCallACPI_SUB *osCallACPI_SUB; + OSCallACPI_ON *osCallACPI_ON; + OSCallACPI_OFF *osCallACPI_OFF; + OSCallACPI_DSM *osCallACPI_DSM; + OSGetUefiVariable *osGetUefiVariable; + OSCheckCallback *osCheckCallback; + OSRCCallback *osRCCallback; + OSCallACPI_NBPS *osCallACPI_NBPS; + OSCallACPI_NBSL *osCallACPI_NBSL; + OSCallACPI_OPTM_GPUON *osCallACPI_OPTM_GPUON; + OSSetupVBlank *osSetupVBlank; + OSPexRecoveryCallback *osPexRecoveryCallback; + OSInternalReserveAllocCallback *osInternalReserveAllocCallback; + OSInternalReserveFreeCallback *osInternalReserveFreeCallback; + NvU32 SystemMemorySize; + OSPageArrayGetPhysAddr *osPageArrayGetPhysAddr; + NvU32 dynamicPowerSupportGpuMask; + NvBool bIsSimMods; +}; + +#ifndef __NVOC_CLASS_OBJOS_TYPEDEF__ +#define __NVOC_CLASS_OBJOS_TYPEDEF__ +typedef struct OBJOS OBJOS; +#endif /* __NVOC_CLASS_OBJOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOS +#define __nvoc_class_id_OBJOS 0xaa1d70 +#endif /* __nvoc_class_id_OBJOS */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJOS; + +#define __staticCast_OBJOS(pThis) \ + ((pThis)->__nvoc_pbase_OBJOS) + +#ifdef __nvoc_os_h_disabled +#define __dynamicCast_OBJOS(pThis) ((OBJOS*)NULL) +#else //__nvoc_os_h_disabled +#define __dynamicCast_OBJOS(pThis) \ + ((OBJOS*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJOS))) +#endif //__nvoc_os_h_disabled + +#define PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER_BASE_CAST +#define PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER_BASE_NAME PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER +#define PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS_BASE_CAST +#define PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS_BASE_NAME PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS +#define PDB_PROP_OS_WAIT_FOR_ACPI_SUBSYSTEM_BASE_CAST +#define PDB_PROP_OS_WAIT_FOR_ACPI_SUBSYSTEM_BASE_NAME PDB_PROP_OS_WAIT_FOR_ACPI_SUBSYSTEM +#define PDB_PROP_OS_UNCACHED_MEMORY_MAPPINGS_NOT_SUPPORTED_BASE_CAST +#define PDB_PROP_OS_UNCACHED_MEMORY_MAPPINGS_NOT_SUPPORTED_BASE_NAME PDB_PROP_OS_UNCACHED_MEMORY_MAPPINGS_NOT_SUPPORTED +#define PDB_PROP_OS_LIMIT_GPU_RESET_BASE_CAST +#define PDB_PROP_OS_LIMIT_GPU_RESET_BASE_NAME PDB_PROP_OS_LIMIT_GPU_RESET +#define PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT_BASE_CAST +#define PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT_BASE_NAME PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT +#define PDB_PROP_OS_PAT_UNSUPPORTED_BASE_CAST +#define PDB_PROP_OS_PAT_UNSUPPORTED_BASE_NAME PDB_PROP_OS_PAT_UNSUPPORTED +#define PDB_PROP_OS_SLI_ALLOWED_BASE_CAST +#define PDB_PROP_OS_SLI_ALLOWED_BASE_NAME PDB_PROP_OS_SLI_ALLOWED +#define PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS_BASE_CAST +#define PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS_BASE_NAME PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS +#define PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE_BASE_CAST +#define PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE_BASE_NAME PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE +#define PDB_PROP_OS_SUPPORTS_TDR_BASE_CAST +#define PDB_PROP_OS_SUPPORTS_TDR_BASE_NAME PDB_PROP_OS_SUPPORTS_TDR +#define PDB_PROP_OS_GET_ACPI_TABLE_FROM_UEFI_BASE_CAST +#define PDB_PROP_OS_GET_ACPI_TABLE_FROM_UEFI_BASE_NAME PDB_PROP_OS_GET_ACPI_TABLE_FROM_UEFI +#define PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED_BASE_CAST +#define PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED_BASE_NAME PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED + +NV_STATUS __nvoc_objCreateDynamic_OBJOS(OBJOS**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJOS(OBJOS**, Dynamic*, NvU32); +#define __objCreate_OBJOS(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJOS((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#undef PRIVATE_FIELD + + +NV_STATUS addProbe(OBJGPU *, NvU32); + + +typedef NV_STATUS OSFlushCpuCache(void); +typedef void OSAddRecordForCrashLog(void *, NvU32); +typedef void OSDeleteRecordForCrashLog(void *); + +OSFlushCpuCache osFlushCpuCache; +OSAddRecordForCrashLog osAddRecordForCrashLog; +OSDeleteRecordForCrashLog osDeleteRecordForCrashLog; + + +// +// This file should only contain the most common OS functions that provide +// direct call. Ex. osDelay, osIsAdministrator +// +NV_STATUS osTegraSocPmPowergate(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osTegraSocPmUnpowergate(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osTegraSocDeviceReset(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osTegraSocGetImpImportData(TEGRA_IMP_IMPORT_DATA *pTegraImpImportData); +NV_STATUS osTegraSocEnableDisableRfl(OS_GPU_INFO *pOsGpuInfo, NvBool bEnable); +NV_STATUS osTegraAllocateDisplayBandwidth(OS_GPU_INFO *pOsGpuInfo, + NvU32 averageBandwidthKBPS, + NvU32 floorBandwidthKBPS); + +NvBool osIsAdministrator(void); +NvBool osAllowPriorityOverride(void); +NV_STATUS osGetCurrentTime(NvU32 *pSec,NvU32 *puSec); +NV_STATUS osGetCurrentTick(NvU64 *pTimeInNs); +NvU64 osGetTickResolution(void); +NvU64 osGetTimestamp(void); +NvU64 osGetTimestampFreq(void); + +NV_STATUS osDeferredIsr(OBJGPU *pGpu); + +void osEnableInterrupts(OBJGPU *pGpu); + +void osDisableInterrupts(OBJGPU *pGpu, + NvBool bIsr); + +void osBugCheck(NvU32 bugCode); +void osAssertFailed(void); + +// OS PCI R/W functions +void *osPciInitHandle(NvU32 domain, NvU8 bus, NvU8 slot, NvU8 function, + NvU16 *pVendor, NvU16 *pDevice); +NvU32 osPciReadDword(void *pHandle, NvU32 offset); +NvU16 osPciReadWord(void *pHandle, NvU32 offset); +NvU8 osPciReadByte(void *pHandle, NvU32 offset); +void osPciWriteDword(void *pHandle, NvU32 offset, NvU32 value); +void osPciWriteWord(void *pHandle, NvU32 offset, NvU16 value); +void osPciWriteByte(void *pHandle, NvU32 offset, NvU8 value); + +// OS RM capabilities calls + +void osRmCapInitDescriptor(NvU64 *pCapDescriptor); +NV_STATUS osRmCapAcquire(OS_RM_CAPS *pOsRmCaps, NvU32 rmCap, + NvU64 capDescriptor, + NvU64 *dupedCapDescriptor); +void osRmCapRelease(NvU64 dupedCapDescriptor); +NV_STATUS osRmCapRegisterGpu(OS_GPU_INFO *pOsGpuInfo, OS_RM_CAPS **ppOsRmCaps); +void osRmCapUnregister(OS_RM_CAPS **ppOsRmCaps); +NV_STATUS osRmCapRegisterSmcPartition(OS_RM_CAPS *pGpuOsRmCaps, + OS_RM_CAPS **ppPartitionOsRmCaps, + NvU32 partitionId); +NV_STATUS osRmCapRegisterSmcExecutionPartition( + OS_RM_CAPS *pPartitionOsRmCaps, + OS_RM_CAPS **ppExecPartitionOsRmCaps, + NvU32 execPartitionId); +NV_STATUS osRmCapRegisterSys(OS_RM_CAPS **ppOsRmCaps); + +NV_STATUS osGetRandomBytes(NvU8 *pBytes, NvU16 numBytes); + +NV_STATUS osAllocWaitQueue(OS_WAIT_QUEUE **ppWq); +void osFreeWaitQueue(OS_WAIT_QUEUE *pWq); +void osWaitUninterruptible(OS_WAIT_QUEUE *pWq); +void osWaitInterruptible(OS_WAIT_QUEUE *pWq); +void osWakeUp(OS_WAIT_QUEUE *pWq); + +NvU32 osGetDynamicPowerSupportMask(void); + +void osUnrefGpuAccessNeeded(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osRefGpuAccessNeeded(OS_GPU_INFO *pOsGpuInfo); + +NV_STATUS osIovaMap(PIOVAMAPPING pIovaMapping); +void osIovaUnmap(PIOVAMAPPING pIovaMapping); +NV_STATUS osGetAtsTargetAddressRange(OBJGPU *pGpu, + NvU64 *pAddr, + NvU32 *pAddrWidth, + NvU32 *pMask, + NvU32 *pMaskWidth, + NvBool bIsPeer, + NvU32 peerIndex); +NV_STATUS osGetFbNumaInfo(OBJGPU *pGpu, + NvU64 *pAddrPhys, + NvS32 *pNodeId); +NV_STATUS osGetForcedNVLinkConnection(OBJGPU *pGpu, + NvU32 maxLinks, + NvU32 *pLinkConnection); +NV_STATUS osGetForcedC2CConnection(OBJGPU *pGpu, + NvU32 maxLinks, + NvU32 *pLinkConnection); +void osSetNVLinkSysmemLinkState(OBJGPU *pGpu,NvBool enabled); +NV_STATUS osGetPlatformNvlinkLinerate(OBJGPU *pGpu,NvU32 *lineRate); +const struct nvlink_link_handlers* osGetNvlinkLinkCallbacks(void); + +void osRemoveGpu(NvU32 domain, NvU8 bus, NvU8 device); +NvBool osRemoveGpuSupported(void); + +void initVGXSpecificRegistry(OBJGPU *); + +NV_STATUS osVgpuVfioWake(void *waitQueue); +NV_STATUS osVgpuInjectInterrupt(void *pArg1); +NV_STATUS osVgpuRegisterMdev(OS_GPU_INFO *pArg1); +NV_STATUS osIsVgpuVfioPresent(void); +NV_STATUS osVgpuAllocVmbusEventDpc(void **ppArg1); +void osVgpuScheduleVmbusEventDpc(void *pArg1, void *pArg2); +NV_STATUS osLockPageableDataSection(RM_PAGEABLE_SECTION *pSection); +NV_STATUS osUnlockPageableDataSection(RM_PAGEABLE_SECTION *pSection); + +void osFlushGpuCoherentCpuCacheRange(OS_GPU_INFO *pOsGpuInfo, + NvU64 cpuVirtual, + NvU64 size); +NvBool osUidTokensEqual(PUID_TOKEN arg1, PUID_TOKEN arg2); + +NV_STATUS osValidateClientTokens(PSECURITY_TOKEN arg1, + PSECURITY_TOKEN arg2); +PUID_TOKEN osGetCurrentUidToken(void); +PSECURITY_TOKEN osGetSecurityToken(void); + +NV_STATUS osIsKernelBuffer(void *pArg1, NvU32 arg2); + +NV_STATUS osMapViewToSection(OS_GPU_INFO *pArg1, + void *pSectionHandle, + void **ppAddress, + NvU64 actualSize, + NvU64 sectionOffset, + NvBool bIommuEnabled); +NV_STATUS osUnmapViewFromSection(OS_GPU_INFO *pArg1, + void *pAddress, + NvBool bIommuEnabled); + +NV_STATUS osOpenTemporaryFile(void **ppFile); +void osCloseFile(void *pFile); +NV_STATUS osWriteToFile(void *pFile, NvU8 *buffer, + NvU64 size, NvU64 offset); +NV_STATUS osReadFromFile(void *pFile, NvU8 *buffer, + NvU64 size, NvU64 offset); + +NV_STATUS osSrPinSysmem(OS_GPU_INFO *pArg1, + NvU64 commitSize, + void *pMdl); +NV_STATUS osSrUnpinSysmem(OS_GPU_INFO *pArg1); + +void osPagedSegmentAccessCheck(void); + +NV_STATUS osCreateMemFromOsDescriptorInternal(OBJGPU *pGpu, void *pAddress, + NvU32 flags, NvU64 size, + MEMORY_DESCRIPTOR **ppMemDesc, + NvBool bCachedKernel, + RS_PRIV_LEVEL privilegeLevel); + +NV_STATUS osReserveCpuAddressSpaceUpperBound(void **ppSectionHandle, + NvU64 maxSectionSize); +void osReleaseCpuAddressSpaceUpperBound(void *pSectionHandle); + +// OS Tegra IPC functions +NV_STATUS osTegraDceRegisterIpcClient(NvU32 interfaceType, void *usrCtx, + NvU32 *clientId); +NV_STATUS osTegraDceClientIpcSendRecv(NvU32 clientId, void *msg, + NvU32 msgLength); +NV_STATUS osTegraDceUnregisterIpcClient(NvU32 clientId); + +// +// Define OS-layer specific type instead of #include "clk_domains.h" for +// CLKWHICH, avoids upwards dependency from OS interface on higher level +// RM modules +// +typedef NvU32 OS_CLKWHICH; + +NV_STATUS osTegraSocEnableClk(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM); +NV_STATUS osTegraSocDisableClk(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM); +NV_STATUS osTegraSocGetCurrFreqKHz(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM, NvU32 *pCurrFreqKHz); +NV_STATUS osTegraSocGetMaxFreqKHz(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM, NvU32 *pMaxFreqKHz); +NV_STATUS osTegraSocGetMinFreqKHz(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM, NvU32 *pMinFreqKHz); +NV_STATUS osTegraSocSetFreqKHz(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM, NvU32 reqFreqKHz); +NV_STATUS osTegraSocSetParent(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRMsource, OS_CLKWHICH whichClkRMparent); +NV_STATUS osTegraSocGetParent(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRMsource, OS_CLKWHICH *pWhichClkRMparent); + +NV_STATUS osTegraSocDeviceReset(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osTegraSocPmPowergate(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osTegraSocPmUnpowergate(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osGetSyncpointAperture(OS_GPU_INFO *pOsGpuInfo, + NvU32 syncpointId, + NvU64 *physAddr, + NvU64 *limit, + NvU32 *offset); +NV_STATUS osTegraI2CGetBusState(OS_GPU_INFO *pOsGpuInfo, NvU32 port, NvS32 *scl, NvS32 *sda); +NV_STATUS osTegraSocParseFixedModeTimings(OS_GPU_INFO *pOsGpuInfo, + NvU32 dcbIndex, + OS_FIXED_MODE_TIMINGS *pFixedModeTimings); + +NV_STATUS osGetVersion(NvU32 *pMajorVer, + NvU32 *pMinorVer, + NvU32 *pBuildNum, + NvU16 *pServicePackMaj, + NvU16 *pProductType); + +NvBool osGrService(OS_GPU_INFO *pOsGpuInfo, NvU32 grIdx, NvU32 intr, NvU32 nstatus, NvU32 addr, NvU32 dataLo); + +NvBool osDispService(NvU32 Intr0, NvU32 Intr1); + +NV_STATUS osReferenceObjectCount(void *pEvent); + +NV_STATUS osDereferenceObjectCount(void *pEvent); + +// +// Perform OS-specific error logging. +// Like libc's vsnprintf(), osErrorLogV() invalidates its va_list argument. The va_list argument +// may not be reused after osErrorLogV() returns. If the va_list is needed after the +// osErrorLogV() call, create a copy of the va_list using va_copy(). +// The caller controls the lifetime of the va_list argument, and should free it using va_end. +// +void osErrorLogV(OBJGPU *pGpu, NvU32 num, const char * pFormat, va_list arglist); +void osErrorLog(OBJGPU *pGpu, NvU32 num, const char* pFormat, ...); + +NV_STATUS osNvifInitialize(OBJGPU *pGpu); + +NV_STATUS osNvifMethod(OBJGPU *pGpu, NvU32 func, + NvU32 subFunc, void *pInParam, + NvU16 inParamSize, NvU32 *pOutStatus, + void *pOutData, NvU16 *pOutDataSize); + +NV_STATUS osCreateMemFromOsDescriptor(OBJGPU *pGpu, NvP64 pDescriptor, + NvHandle hClient, NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + NvU32 descriptorType, + RS_PRIV_LEVEL privilegeLevel); + +void* osMapKernelSpace(RmPhysAddr Start, + NvU64 Size, + NvU32 Mode, + NvU32 Protect); + +void osUnmapKernelSpace(void *addr, NvU64 size); + + +void *osMapIOSpace(RmPhysAddr start, + NvU64 size_bytes, + void ** priv, + NvU32 user, + NvU32 mode, + NvU32 Protect); + +void osUnmapIOSpace(void *pAddress, + NvU64 Size, + void *pData, + NvU32 User); + +NvBool osTestPcieExtendedConfigAccess(void *handle, NvU32 offset); + +NvU32 osGetCpuFrequency(void); + +void osIoWriteByte(NvU32 Address, NvU8 Value); + +NvU8 osIoReadByte(NvU32 Address); + +void osIoWriteWord(NvU32 Address, NvU16 Value); + +NvU16 osIoReadWord(NvU32 Address); + +void osIoWriteDword(NvU32 port, NvU32 data); + +NvU32 osIoReadDword(NvU32 port); + +// OS functions to get memory pages + +NV_STATUS osGetNumMemoryPages (MEMORY_DESCRIPTOR *pMemDesc, NvU32 *pNumPages); +NV_STATUS osGetMemoryPages (MEMORY_DESCRIPTOR *pMemDesc, void *pPages, NvU32 *pNumPages); + +NV_STATUS osGetAcpiTable(NvU32 tableSignature, + void **ppTable, + NvU32 tableSize, + NvU32 *retSize); + +NV_STATUS osInitGetAcpiTable(void); + +NV_STATUS osGetIbmnpuGenregInfo(OS_GPU_INFO *pArg1, + NvU64 *pArg2, + NvU64 *pArg3); + +NV_STATUS osGetIbmnpuRelaxedOrderingMode(OS_GPU_INFO *pArg1, + NvBool *pArg2); + +void osWaitForIbmnpuRsync(OS_GPU_INFO *pArg1); + +NV_STATUS osGetAcpiRsdpFromUefi(NvU32 *pRsdpAddr); + +NV_STATUS osCreateNanoTimer(OS_GPU_INFO *pArg1, + void *tmrEvent, + void **tmrUserData); + +NV_STATUS osStartNanoTimer(OS_GPU_INFO *pArg1, + void *pTimer, + NvU64 timeNs); + +NV_STATUS osCancelNanoTimer(OS_GPU_INFO *pArg1, + void *pArg2); + +NV_STATUS osDestroyNanoTimer(OS_GPU_INFO *pArg1, + void *pArg2); + +NV_STATUS osSchedule(void); + +NV_STATUS osDmaMapPages(OS_GPU_INFO *pArg1, + MEMORY_DESCRIPTOR *pMemDesc); + +NV_STATUS osDmaUnmapPages(OS_GPU_INFO *pArg1, + MEMORY_DESCRIPTOR *pMemDesc); + +void osDmaSetAddressSize(OS_GPU_INFO *pArg1, + NvU32 bits); + +void osClientGcoffDisallowRefcount(OS_GPU_INFO *pArg1, + NvBool arg2); + +NV_STATUS osTegraSocGpioGetPinState(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3); + +void osTegraSocGpioSetPinState(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3); + +NV_STATUS osTegraSocGpioSetPinDirection(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3); + +NV_STATUS osTegraSocGpioGetPinDirection(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3); + +NV_STATUS osTegraSocGpioGetPinNumber(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3); + +NV_STATUS osTegraSocGpioGetPinInterruptStatus(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3, + NvBool *pArg4); + +NV_STATUS osTegraSocGpioSetPinInterrupt(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3); + +NV_STATUS osTegraSocDsiParsePanelProps(OS_GPU_INFO *pArg1, + void *pArg2); + +NvBool osTegraSocIsDsiPanelConnected(OS_GPU_INFO *pArg1); + +NV_STATUS osTegraSocDsiPanelEnable(OS_GPU_INFO *pArg1, + void *pArg2); + +NV_STATUS osTegraSocDsiPanelReset(OS_GPU_INFO *pArg1, + void *pArg2); + +void osTegraSocDsiPanelDisable(OS_GPU_INFO *pArg1, + void *pArg2); + +void osTegraSocDsiPanelCleanup(OS_GPU_INFO *pArg1, + void *pArg2); + +NV_STATUS osTegraSocResetMipiCal(OS_GPU_INFO *pArg1); + +NV_STATUS osGetTegraNumDpAuxInstances(OS_GPU_INFO *pArg1, + NvU32 *pArg2); + +NvU32 osTegraSocFuseRegRead(NvU32 addr); + +NV_STATUS osGetCurrentIrqPrivData(OS_GPU_INFO *pArg1, + NvU32 *pArg2); + +NV_STATUS osGetTegraBrightnessLevel(OS_GPU_INFO *pArg1, + NvU32 *pArg2); + +NV_STATUS osSetTegraBrightnessLevel(OS_GPU_INFO *pArg1, + NvU32 arg2); + +NvBool osIsVga(OS_GPU_INFO *pArg1, + NvBool bIsGpuPrimaryDevice); + +void osInitOSHwInfo(OBJGPU *pGpu); + +void osDestroyOSHwInfo(OBJGPU *pGpu); + +NV_STATUS osUserHandleToKernelPtr(NvU32 hClient, + NvP64 Handle, + NvP64 *pHandle); + +NV_STATUS osGetSmbiosTable(void **pBaseVAddr, NvU64 *pLength, + NvU64 *pNumSubTypes, NvU32 *pVersion); + +void osPutSmbiosTable(void *pBaseVAddr, NvU64 length); + +NvBool osIsNvswitchPresent(void); + +void osQueueMMUFaultHandler(OBJGPU *); + +NvBool osIsGpuAccessible(OBJGPU *pGpu); + +NvBool osMatchGpuOsInfo(OBJGPU *pGpu, void *pOsInfo); + +void osReleaseGpuOsInfo(void *pOsInfo); + +void osGpuWriteReg008(OBJGPU *pGpu, + NvU32 thisAddress, + NvV8 thisValue); + +void osDevWriteReg008(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV8 thisValue); + +NvU8 osGpuReadReg008(OBJGPU *pGpu, + NvU32 thisAddress); + +NvU8 osDevReadReg008(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress); + +void osGpuWriteReg016(OBJGPU *pGpu, + NvU32 thisAddress, + NvV16 thisValue); + +void osDevWriteReg016(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV16 thisValue); + +NvU16 osGpuReadReg016(OBJGPU *pGpu, + NvU32 thisAddress); + +NvU16 osDevReadReg016(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress); + +void osGpuWriteReg032(OBJGPU *pGpu, + NvU32 thisAddress, + NvV32 thisValue); + +void osDevWriteReg032(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV32 thisValue); + +NvU32 osGpuReadReg032(OBJGPU *pGpu, + NvU32 thisAddress); + +NvU32 osDevReadReg032(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress); + +NV_STATUS osIsr(OBJGPU *pGpu); + +NV_STATUS osSanityTestIsr(OBJGPU *pGpu); + +NV_STATUS osInitMapping(OBJGPU *pGpu); + +NV_STATUS osVerifySystemEnvironment(OBJGPU *pGpu); + +NV_STATUS osSanityTestIsr(OBJGPU *pGpu); + +NvBool osDmabufIsSupported(void); + +static NV_INLINE NV_STATUS isrWrapper(NvBool testIntr, OBJGPU *pGpu) +{ + // + // If pGpu->testIntr is not true then use original osIsr function. + // On VMware Esxi 6.0, both rm isr and dpc handlers are called from Esxi 6.0 + // dpc handler. Because of this when multiple GPU are present in the system, + // we may get a call to rm_isr routine for a hw interrupt corresponding to a + // previously initialized GPU. In that case we need to call original osIsr + // function. + // + + NV_STATUS status = NV_OK; + + if (testIntr) + { + status = osSanityTestIsr(pGpu); + } + else + { + status = osIsr(pGpu); + } + + return status; +} + +#define OS_PCIE_CAP_MASK_REQ_ATOMICS_32 NVBIT(0) +#define OS_PCIE_CAP_MASK_REQ_ATOMICS_64 NVBIT(1) +#define OS_PCIE_CAP_MASK_REQ_ATOMICS_128 NVBIT(2) + +// Os 1Hz timer callback functions +NV_STATUS osInit1HzCallbacks(OBJTMR *pTmr); +NV_STATUS osDestroy1HzCallbacks(OBJTMR *pTmr); +NV_STATUS osSchedule1SecondCallback(OBJGPU *pGpu, OS1HZPROC callback, void *pData, NvU32 flags); +void osRemove1SecondRepeatingCallback(OBJGPU *pGpu, OS1HZPROC callback, void *pData); +NvBool osRun1HzCallbacksNow(OBJGPU *pGpu); +NV_STATUS osDoFunctionLevelReset(OBJGPU *pGpu); + +void vgpuDevWriteReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvV32 thisValue, + NvBool *vgpuHandled +); + +NvU32 vgpuDevReadReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvBool *vgpuHandled +); + +void osInitSystemStaticConfig(SYS_STATIC_CONFIG *); + +void osDbgBugCheckOnAssert(void); + +NvBool osBugCheckOnTimeoutEnabled(void); + +// +// TODO: to clean-up the rest of the list +// +OSAttachGpu osAttachGpu; +OSDpcAttachGpu osDpcAttachGpu; +OSDpcDetachGpu osDpcDetachGpu; +OSHandleGpuLost osHandleGpuLost; +OSHandleGpuSurpriseRemoval osHandleGpuSurpriseRemoval; +OSInitScalabilityOptions osInitScalabilityOptions; +OSQueueDpc osQueueDpc; +OSRmInitRm osRmInitRm; +OSSetEvent osSetEvent; +OSEventNotification osEventNotification; +OSEventNotificationWithInfo osEventNotificationWithInfo; +OSObjectEventNotification osObjectEventNotification; +OSNotifyEvent osNotifyEvent; +OSFlushCpuWriteCombineBuffer osFlushCpuWriteCombineBuffer; +OSDeviceClassToDeviceName osDeviceClassToDeviceName; +OSDelay osDelay; +OSSpinLoop osSpinLoop; +OSDelayUs osDelayUs; +OSDelayNs osDelayNs; +OSGetCpuCount osGetCpuCount; +OSGetMaximumCoreCount osGetMaximumCoreCount; +OSGetCurrentProcessorNumber osGetCurrentProcessorNumber; +OSGetVersionDump osGetVersionDump; + +OSMemAddFilter osMemAddFilter; +OSMemRemoveFilter osMemRemoveFilter; +OSMemGetFilter osMemGetFilter; + +OSAllocPagesInternal osAllocPagesInternal; +OSFreePagesInternal osFreePagesInternal; + +OSGetPageSize osGetPageSize; +OSNumaMemblockSize osNumaMemblockSize; +OSNumaOnliningEnabled osNumaOnliningEnabled; +OSAllocPagesNode osAllocPagesNode; +OSAllocAcquirePage osAllocAcquirePage; +OSAllocReleasePage osAllocReleasePage; +OSGetPageRefcount osGetPageRefcount; +OSCountTailPages osCountTailPages; +OSVirtualToPhysicalAddr osKernVirtualToPhysicalAddr; +OSLockMem osLockMem; +OSUnlockMem osUnlockMem; +OSMapSystemMemory osMapSystemMemory; +OSUnmapSystemMemory osUnmapSystemMemory; +OSWriteRegistryDword osWriteRegistryDword; +OSReadRegistryDword osReadRegistryDword; +OSReadRegistryString osReadRegistryString; +OSWriteRegistryBinary osWriteRegistryBinary; +OSWriteRegistryVolatile osWriteRegistryVolatile; +OSReadRegistryVolatile osReadRegistryVolatile; +OSReadRegistryVolatileSize osReadRegistryVolatileSize; +OSReadRegistryBinary osReadRegistryBinary; +OSReadRegistryDwordBase osReadRegistryDwordBase; +OSReadRegistryStringBase osReadRegistryStringBase; +OSPackageRegistry osPackageRegistry; +OSUnpackageRegistry osUnpackageRegistry; +NV_STATUS osDestroyRegistry(void); +OSMapPciMemoryUser osMapPciMemoryUser; +OSUnmapPciMemoryUser osUnmapPciMemoryUser; +OSMapPciMemoryKernelOld osMapPciMemoryKernelOld; +OSMapPciMemoryKernel64 osMapPciMemoryKernel64; +OSUnmapPciMemoryKernelOld osUnmapPciMemoryKernelOld; +OSUnmapPciMemoryKernel64 osUnmapPciMemoryKernel64; +OSMapGPU osMapGPU; +OSUnmapGPU osUnmapGPU; +OSLockShouldToggleInterrupts osLockShouldToggleInterrupts; + +OSGetPerformanceCounter osGetPerformanceCounter; + +OSI2CClosePorts osI2CClosePorts; +OSWriteI2CBufferDirect osWriteI2CBufferDirect; +OSReadI2CBufferDirect osReadI2CBufferDirect; +OSI2CTransfer osI2CTransfer; +OSSetGpuRailVoltage osSetGpuRailVoltage; +OSGetGpuRailVoltage osGetGpuRailVoltage; +OSGetChipInfo osGetChipInfo; +OSGetGpuRailVoltageInfo osGetGpuRailVoltageInfo; + +OSGetCurrentProcess osGetCurrentProcess; +OSGetCurrentProcessName osGetCurrentProcessName; +OSGetCurrentThread osGetCurrentThread; +OSAttachToProcess osAttachToProcess; +OSDetachFromProcess osDetachFromProcess; +OSPollHotkeyState osPollHotkeyState; + +OSIsRaisedIRQL osIsRaisedIRQL; +OSIsISR osIsISR; +OSGetDriverBlock osGetDriverBlock; + +OSInitGpuMgr osInitGpuMgr; + +OSSyncWithRmDestroy osSyncWithRmDestroy; +OSSyncWithGpuDestroy osSyncWithGpuDestroy; + +OSModifyGpuSwStatePersistence osModifyGpuSwStatePersistence; + +OSPexRecoveryCallback osPexRecoveryCallback; +OSHandleDeferredRecovery osHandleDeferredRecovery; +OSIsSwPreInitOnly osIsSwPreInitOnly; +OSGetCarveoutInfo osGetCarveoutInfo; +OSGetVPRInfo osGetVPRInfo; +OSAllocInVPR osAllocInVPR; +OSGetGenCarveout osGetGenCarveout; +OsGetSystemCpuLogicalCoreCounts osGetSystemCpuLogicalCoreCounts; +OsGetSystemCpuC0AndAPerfCounters osGetSystemCpuC0AndAPerfCounters; +OsEnableCpuPerformanceCounters osEnableCpuPerformanceCounters; +OsCpuDpcObjInit osCpuDpcObjInit; +OsCpuDpcObjQueue osCpuDpcObjQueue; +OsCpuDpcObjFree osCpuDpcObjFree; +OsSystemGetBatteryDrain osSystemGetBatteryDrain; +OSGC6PowerControl osGC6PowerControl; +OSReadPFPciConfigInVF osReadPFPciConfigInVF; + +// +// When the new basic lock model is enabled then the following legacy RM +// system semaphore routines are stubbed. +// +#define osAllocRmSema(s) (NV_OK) +#define osFreeRmSema(s) +#define osIsAcquiredRmSema(s) (NV_TRUE) +#define osIsRmSemaOwner(s) (NV_TRUE) +#define osCondReleaseRmSema(s) (NV_TRUE) +#define osAcquireRmSemaForced(s) osAcquireRmSema(s) +#define osGpuLockSetOwner(s,t) (NV_OK) + +// +// This version of osAcquireRmSema asserts that the GPUs lock is held when the +// basic lock model is enabled. This should help catch newly introduced +// dependencies on the legacy RM system semaphore that do not have +// corresponding corresponding basic lock model support. +// +OSAcquireRmSema osAcquireRmSema; +OSAcquireRmSema osAcquireRmSemaForced; + +OSApiLockAcquireConfigureFlags osApiLockAcquireConfigureFlags; +OSGpuLocksQueueRelease osGpuLocksQueueRelease; +OSCondAcquireRmSema osCondAcquireRmSema; +OSReleaseRmSema osReleaseRmSema; + +OSFlushLog osFlushLog; + +#define MODS_ARCH_ERROR_PRINTF(format, ...) +#define MODS_ARCH_INFO_PRINTF(format, ...) +#define MODS_ARCH_REPORT(event, format, ...) + + +#define osAllocPages(a) osAllocPagesInternal(a) +#define osFreePages(a) osFreePagesInternal(a) + +extern NV_STATUS constructObjOS(struct OBJOS *); +extern void osInitObjOS(struct OBJOS *); + +extern OSGetTimeoutParams osGetTimeoutParams; +extern OSGetSimulationMode osGetSimulationMode; + +// +// NV OS simulation mode defines +// Keep in sync with gpu.h SIM MODE defines until osGetSimulationMode is deprecated. +// +#ifndef NV_SIM_MODE_DEFS +#define NV_SIM_MODE_DEFS +#define NV_SIM_MODE_HARDWARE 0U +#define NV_SIM_MODE_RTL 1U +#define NV_SIM_MODE_CMODEL 2U +#define NV_SIM_MODE_MODS_AMODEL 3U +#define NV_SIM_MODE_TEGRA_FPGA 4U +#define NV_SIM_MODE_INVALID (~0x0U) +#endif + +// +// NV Heap control defines +// +#define NV_HEAP_CONTROL_INTERNAL 0 +#define NV_HEAP_CONTROL_EXTERNAL 1 + +// osDelayUs flags +#define OSDELAYUS_FLAGS_USE_TMR_DELAY NVBIT(0) + +// osEventNotification notifyIndex all value +#define OS_EVENT_NOTIFICATION_INDEX_ALL (0xffffffff) + +// tells osEventNotification to only issue notifies/events on this subdev +#define OS_EVENT_NOTIFICATION_INDEX_MATCH_SUBDEV (0x10000000) + +// Notify callback action +#define NV_OS_WRITE_THEN_AWAKEN 0x00000001 + +// +// Include per-OS definitions +// +// #ifdef out for nvoctrans, this hides include to system headers which +// breaks the tool. +// +// TODO - we should delete the per-OS os_custom.h files exposed to +// OS-agnostic code. Cross-OS code shouldn't pull in per-OS headers or +// per-OS definitions. +// +#include "os_custom.h" + +#define NV_SEMA_RELEASE_SUCCEED 0 // lock released, no waiting thread to notify +#define NV_SEMA_RELEASE_FAILED 1 // failed to lock release +#define NV_SEMA_RELEASE_NOTIFIED 2 // lock released, notify waiting thread +#define NV_SEMA_RELEASE_DPC_QUEUED 3 // lock released, queue DPC to notify waiting thread +#define NV_SEMA_RELEASE_DPC_FAILED 4 // lock released, but failed to queue a DPC to notify waiting thread + + #define ADD_PROBE(pGpu, probeId) + +#define IS_SIM_MODS(pOS) (pOS->bIsSimMods) + +#endif // _OS_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_OS_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_private.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_private.h new file mode 100644 index 0000000..6ec7761 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_private.h @@ -0,0 +1,10 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_eng_empty.h +// +// The file is added to smoonth NVOC migration. After converting a module to +// NVOC class, the stale generated headers in output directory causes failures +// of incremental builds. This file ensures the content of the old header is +// removed. +// diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.c new file mode 100644 index 0000000..ee126ac --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.c @@ -0,0 +1,155 @@ +#define NVOC_PREREQ_TRACKER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_prereq_tracker_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x0e171b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_PrereqTracker; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_PrereqTracker(PrereqTracker*); +void __nvoc_init_funcTable_PrereqTracker(PrereqTracker*); +NV_STATUS __nvoc_ctor_PrereqTracker(PrereqTracker*, struct OBJGPU * arg_pParent); +void __nvoc_init_dataField_PrereqTracker(PrereqTracker*); +void __nvoc_dtor_PrereqTracker(PrereqTracker*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_PrereqTracker; + +static const struct NVOC_RTTI __nvoc_rtti_PrereqTracker_PrereqTracker = { + /*pClassDef=*/ &__nvoc_class_def_PrereqTracker, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_PrereqTracker, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_PrereqTracker_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(PrereqTracker, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_PrereqTracker = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_PrereqTracker_PrereqTracker, + &__nvoc_rtti_PrereqTracker_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_PrereqTracker = +{ + /*classInfo=*/ { + /*size=*/ sizeof(PrereqTracker), + /*classId=*/ classId(PrereqTracker), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "PrereqTracker", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_PrereqTracker, + /*pCastInfo=*/ &__nvoc_castinfo_PrereqTracker, + /*pExportInfo=*/ &__nvoc_export_info_PrereqTracker +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_PrereqTracker = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_PrereqTracker(PrereqTracker *pThis) { + __nvoc_prereqDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_PrereqTracker(PrereqTracker *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_PrereqTracker(PrereqTracker *pThis, struct OBJGPU * arg_pParent) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_PrereqTracker_fail_Object; + __nvoc_init_dataField_PrereqTracker(pThis); + + status = __nvoc_prereqConstruct(pThis, arg_pParent); + if (status != NV_OK) goto __nvoc_ctor_PrereqTracker_fail__init; + goto __nvoc_ctor_PrereqTracker_exit; // Success + +__nvoc_ctor_PrereqTracker_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_PrereqTracker_fail_Object: +__nvoc_ctor_PrereqTracker_exit: + + return status; +} + +static void __nvoc_init_funcTable_PrereqTracker_1(PrereqTracker *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_PrereqTracker(PrereqTracker *pThis) { + __nvoc_init_funcTable_PrereqTracker_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_PrereqTracker(PrereqTracker *pThis) { + pThis->__nvoc_pbase_PrereqTracker = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_PrereqTracker(pThis); +} + +NV_STATUS __nvoc_objCreate_PrereqTracker(PrereqTracker **ppThis, Dynamic *pParent, NvU32 createFlags, struct OBJGPU * arg_pParent) { + NV_STATUS status; + Object *pParentObj; + PrereqTracker *pThis; + + pThis = portMemAllocNonPaged(sizeof(PrereqTracker)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(PrereqTracker)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_PrereqTracker); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_PrereqTracker(pThis); + status = __nvoc_ctor_PrereqTracker(pThis, arg_pParent); + if (status != NV_OK) goto __nvoc_objCreate_PrereqTracker_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_PrereqTracker_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_PrereqTracker(PrereqTracker **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct OBJGPU * arg_pParent = va_arg(args, struct OBJGPU *); + + status = __nvoc_objCreate_PrereqTracker(ppThis, pParent, createFlags, arg_pParent); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.h new file mode 100644 index 0000000..b29327e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.h @@ -0,0 +1,254 @@ +#ifndef _G_PREREQ_TRACKER_NVOC_H_ +#define _G_PREREQ_TRACKER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file prereq_tracker.h + * @brief Holds interfaces and data structures required by the prerequisite + * tracking feature/code. + * + * Code depending on multiple other features should use prereqComposeEntry() to create + * a prerequisite tracking structure with a provided bitVector of all necessary + * dependencies, which will arm the prereq to start watching those dependencies. + * Once those dependencies are fulfilled they should issue prereqSatisfy() (one-by-one) + * This common code should broadcast those to all prerequisite tracking structures + * and once all respective dependencies are satisfied, will issue the + * registered callback. + * Similarly, dependencies should issue prereqRetract() before they change + * their state and common code will broadcast that to all tracking structures + * and issue callbacks again with bSatisfied=false, if all dependencies + * for that prereq were previously satisfied. + * + * @note Feature is designed to prevent creating new prerequisites once + * dependencies start issuing Satisfy()/Retract() notifications. + * Therefore, ComposeEntry all prerequisites during + * stateInit() and allow code to issue Satisfy()/Retract() only in + * stateLoad() or later. + */ + +#include "g_prereq_tracker_nvoc.h" + +#ifndef __PREREQUISITE_TRACKER_H__ +#define __PREREQUISITE_TRACKER_H__ + +/* ------------------------ Includes ---------------------------------------- */ +#include "containers/list.h" +#include "utils/nvbitvector.h" + +#include "nvoc/object.h" + +/* ------------------------ Macros ------------------------------------------ */ + +#define PREREQ_ID_VECTOR_SIZE 64 + +/*! + * Performs check if all dependencies of the given prerequisite tracking + * structure has been satisfied. + * + * @param[in] _pPrereq PREREQ_ENTRY pointer + * + * @return boolean if prerequisite has been satisfied. + */ +#define PREREQ_IS_SATISFIED(_pPrereq) \ + ((_pPrereq)->countRequested == (_pPrereq)->countSatisfied) + +/* ------------------------ Datatypes --------------------------------------- */ + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + + +/*! + * @brief Callback prototype. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] bSatisfied + * Indicates if dependencies were just satisfied or about to be retracted. + * + * @return NV_OK if callback successfully executed + * @return status failure specific error code + */ +typedef NV_STATUS GpuPrereqCallback(struct OBJGPU *pGpu, NvBool bSatisfied); + +typedef NvU16 PREREQ_ID; + +/*! + * Bitvector for storing prereq IDs required for another prereq struct + * Limited to size defined above, set to largest required by users + */ +MAKE_BITVECTOR(PREREQ_ID_BIT_VECTOR, PREREQ_ID_VECTOR_SIZE); + +/*! + * An individual prerequisite tracking entry structure. + */ +typedef struct +{ + /*! + * Mask of the dependencies (prerequisites that have to be satisfied before + * callback can be issues). + */ + PREREQ_ID_BIT_VECTOR requested; + + /*! + * Counter of all dependencies (prerequisites) tracked by this structure. + */ + NvS32 countRequested; + /*! + * Counter of currently satisfied dependencies (prerequisites) tracked by + * this structure. Once equal to @ref countRequested, callback can be issued. + */ + NvS32 countSatisfied; + + /*! + * Boolean indicating that the given PREREQ_ENTRY is armed and ready to fire @ref + * callback whenever all PREREQ_IDs specified in @ref requested are satisfied. + * + * This bit is set during @ref prereqComposeEntry_IMPL(), which will also do an + * initial satisfaction check of all @ref requested PREREQ_IDs + * and fire the @ref callback if necessary. + */ + NvBool bArmed; + + /*! + * @copydoc GpuPrereqCallback + */ + GpuPrereqCallback *callback; +} PREREQ_ENTRY; +MAKE_LIST(PrereqList, PREREQ_ENTRY); + +/*! + * Holds common prerequisite tracking information. + */ +#ifdef NVOC_PREREQ_TRACKER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct PrereqTracker { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct PrereqTracker *__nvoc_pbase_PrereqTracker; + union PREREQ_ID_BIT_VECTOR satisfied; + NvBool bInitialized; + PrereqList prereqList; + struct OBJGPU *pParent; +}; + +#ifndef __NVOC_CLASS_PrereqTracker_TYPEDEF__ +#define __NVOC_CLASS_PrereqTracker_TYPEDEF__ +typedef struct PrereqTracker PrereqTracker; +#endif /* __NVOC_CLASS_PrereqTracker_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PrereqTracker +#define __nvoc_class_id_PrereqTracker 0x0e171b +#endif /* __nvoc_class_id_PrereqTracker */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_PrereqTracker; + +#define __staticCast_PrereqTracker(pThis) \ + ((pThis)->__nvoc_pbase_PrereqTracker) + +#ifdef __nvoc_prereq_tracker_h_disabled +#define __dynamicCast_PrereqTracker(pThis) ((PrereqTracker*)NULL) +#else //__nvoc_prereq_tracker_h_disabled +#define __dynamicCast_PrereqTracker(pThis) \ + ((PrereqTracker*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(PrereqTracker))) +#endif //__nvoc_prereq_tracker_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_PrereqTracker(PrereqTracker**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_PrereqTracker(PrereqTracker**, Dynamic*, NvU32, struct OBJGPU * arg_pParent); +#define __objCreate_PrereqTracker(ppNewObj, pParent, createFlags, arg_pParent) \ + __nvoc_objCreate_PrereqTracker((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pParent) + +NV_STATUS prereqConstruct_IMPL(struct PrereqTracker *arg_pTracker, struct OBJGPU *arg_pParent); +#define __nvoc_prereqConstruct(arg_pTracker, arg_pParent) prereqConstruct_IMPL(arg_pTracker, arg_pParent) +void prereqDestruct_IMPL(struct PrereqTracker *pTracker); +#define __nvoc_prereqDestruct(pTracker) prereqDestruct_IMPL(pTracker) +NV_STATUS prereqSatisfy_IMPL(struct PrereqTracker *pTracker, PREREQ_ID prereqId); +#ifdef __nvoc_prereq_tracker_h_disabled +static inline NV_STATUS prereqSatisfy(struct PrereqTracker *pTracker, PREREQ_ID prereqId) { + NV_ASSERT_FAILED_PRECOMP("PrereqTracker was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_prereq_tracker_h_disabled +#define prereqSatisfy(pTracker, prereqId) prereqSatisfy_IMPL(pTracker, prereqId) +#endif //__nvoc_prereq_tracker_h_disabled + +NV_STATUS prereqRetract_IMPL(struct PrereqTracker *pTracker, PREREQ_ID prereqId); +#ifdef __nvoc_prereq_tracker_h_disabled +static inline NV_STATUS prereqRetract(struct PrereqTracker *pTracker, PREREQ_ID prereqId) { + NV_ASSERT_FAILED_PRECOMP("PrereqTracker was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_prereq_tracker_h_disabled +#define prereqRetract(pTracker, prereqId) prereqRetract_IMPL(pTracker, prereqId) +#endif //__nvoc_prereq_tracker_h_disabled + +NvBool prereqIdIsSatisfied_IMPL(struct PrereqTracker *pTracker, PREREQ_ID prereqId); +#ifdef __nvoc_prereq_tracker_h_disabled +static inline NvBool prereqIdIsSatisfied(struct PrereqTracker *pTracker, PREREQ_ID prereqId) { + NV_ASSERT_FAILED_PRECOMP("PrereqTracker was disabled!"); + return NV_FALSE; +} +#else //__nvoc_prereq_tracker_h_disabled +#define prereqIdIsSatisfied(pTracker, prereqId) prereqIdIsSatisfied_IMPL(pTracker, prereqId) +#endif //__nvoc_prereq_tracker_h_disabled + +NV_STATUS prereqComposeEntry_IMPL(struct PrereqTracker *pTracker, GpuPrereqCallback *callback, union PREREQ_ID_BIT_VECTOR *pDepends, PREREQ_ENTRY **ppPrereq); +#ifdef __nvoc_prereq_tracker_h_disabled +static inline NV_STATUS prereqComposeEntry(struct PrereqTracker *pTracker, GpuPrereqCallback *callback, union PREREQ_ID_BIT_VECTOR *pDepends, PREREQ_ENTRY **ppPrereq) { + NV_ASSERT_FAILED_PRECOMP("PrereqTracker was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_prereq_tracker_h_disabled +#define prereqComposeEntry(pTracker, callback, pDepends, ppPrereq) prereqComposeEntry_IMPL(pTracker, callback, pDepends, ppPrereq) +#endif //__nvoc_prereq_tracker_h_disabled + +#undef PRIVATE_FIELD + + +#endif // __PREREQUISITE_TRACKER_H__ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_PREREQ_TRACKER_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_ref_count_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_ref_count_nvoc.h new file mode 100644 index 0000000..4d39c80 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_ref_count_nvoc.h @@ -0,0 +1,183 @@ +#ifndef _G_REF_COUNT_NVOC_H_ +#define _G_REF_COUNT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_ref_count_nvoc.h" + +#ifndef REF_COUNT_H +#define REF_COUNT_H + +/****************** Resource Manager Defines and Structures *****************\ +* * +* Defines and structures used for the Reference-Counting Object. * +* * +\****************************************************************************/ + +#include "containers/map.h" +#include "nvoc/object.h" + +#define NV_REQUESTER_INIT NV_U64_MIN +#define NV_REQUESTER_RM NV_U64_MAX +#define NV_REQUESTER_CLIENT_OBJECT(c,o) (((NvU64)(c) << 32) | o) + +typedef enum +{ + REFCNT_STATE_DEFAULT = 0, + REFCNT_STATE_ENABLED, + REFCNT_STATE_DISABLED, + REFCNT_STATE_ERROR, +} REFCNT_STATE; + +typedef struct +{ + NvU32 numReferences; +} REFCNT_REQUESTER_ENTRY, *PREFCNT_REQUESTER_ENTRY; + +MAKE_MAP(REFCNT_REQUESTER_ENTRY_MAP, REFCNT_REQUESTER_ENTRY); + +typedef struct OBJREFCNT *POBJREFCNT; + +#ifndef __NVOC_CLASS_OBJREFCNT_TYPEDEF__ +#define __NVOC_CLASS_OBJREFCNT_TYPEDEF__ +typedef struct OBJREFCNT OBJREFCNT; +#endif /* __NVOC_CLASS_OBJREFCNT_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJREFCNT +#define __nvoc_class_id_OBJREFCNT 0xf89281 +#endif /* __nvoc_class_id_OBJREFCNT */ + + + +// +// XXX-IOM: +// These callback types are good candidates to be replaced with IOM +// functionality, where small derived classes can be created on a 'callback' +// base interface, should that become more practical (currently, adding any +// kind of class still requires a non-trivial amount of boilerplate to wire +// up). +// +typedef NV_STATUS RefcntStateChangeCallback(POBJREFCNT, Dynamic *, + REFCNT_STATE, REFCNT_STATE); + +typedef void RefcntResetCallback(POBJREFCNT, Dynamic *, NvU64); + +#ifdef NVOC_REF_COUNT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJREFCNT { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJREFCNT *__nvoc_pbase_OBJREFCNT; + NvBool PDB_PROP_REFCNT_ALLOW_RECURSIVE_REQUESTS; + Dynamic *pParent; + NvU32 tag; + REFCNT_REQUESTER_ENTRY_MAP requesterTree; + REFCNT_STATE state; + NvU32 count; + RefcntStateChangeCallback *refcntStateChangeCallback; + RefcntResetCallback *refcntResetCallback; +}; + +#ifndef __NVOC_CLASS_OBJREFCNT_TYPEDEF__ +#define __NVOC_CLASS_OBJREFCNT_TYPEDEF__ +typedef struct OBJREFCNT OBJREFCNT; +#endif /* __NVOC_CLASS_OBJREFCNT_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJREFCNT +#define __nvoc_class_id_OBJREFCNT 0xf89281 +#endif /* __nvoc_class_id_OBJREFCNT */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJREFCNT; + +#define __staticCast_OBJREFCNT(pThis) \ + ((pThis)->__nvoc_pbase_OBJREFCNT) + +#ifdef __nvoc_ref_count_h_disabled +#define __dynamicCast_OBJREFCNT(pThis) ((OBJREFCNT*)NULL) +#else //__nvoc_ref_count_h_disabled +#define __dynamicCast_OBJREFCNT(pThis) \ + ((OBJREFCNT*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJREFCNT))) +#endif //__nvoc_ref_count_h_disabled + +#define PDB_PROP_REFCNT_ALLOW_RECURSIVE_REQUESTS_BASE_CAST +#define PDB_PROP_REFCNT_ALLOW_RECURSIVE_REQUESTS_BASE_NAME PDB_PROP_REFCNT_ALLOW_RECURSIVE_REQUESTS + +NV_STATUS __nvoc_objCreateDynamic_OBJREFCNT(OBJREFCNT**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJREFCNT(OBJREFCNT**, Dynamic*, NvU32, Dynamic * arg_pParent, NvU32 arg_tag, RefcntStateChangeCallback * arg_pStateChangeCallback, RefcntResetCallback * arg_pResetCallback); +#define __objCreate_OBJREFCNT(ppNewObj, pParent, createFlags, arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback) \ + __nvoc_objCreate_OBJREFCNT((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback) + +NV_STATUS refcntConstruct_IMPL(POBJREFCNT arg_pRefcnt, Dynamic *arg_pParent, NvU32 arg_tag, RefcntStateChangeCallback *arg_pStateChangeCallback, RefcntResetCallback *arg_pResetCallback); +#define __nvoc_refcntConstruct(arg_pRefcnt, arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback) refcntConstruct_IMPL(arg_pRefcnt, arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback) +void refcntDestruct_IMPL(POBJREFCNT pRefcnt); +#define __nvoc_refcntDestruct(pRefcnt) refcntDestruct_IMPL(pRefcnt) +NV_STATUS refcntRequestReference_IMPL(POBJREFCNT pRefcnt, NvU64 arg0, NvU32 arg1, NvBool arg2); +#ifdef __nvoc_ref_count_h_disabled +static inline NV_STATUS refcntRequestReference(POBJREFCNT pRefcnt, NvU64 arg0, NvU32 arg1, NvBool arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJREFCNT was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_ref_count_h_disabled +#define refcntRequestReference(pRefcnt, arg0, arg1, arg2) refcntRequestReference_IMPL(pRefcnt, arg0, arg1, arg2) +#endif //__nvoc_ref_count_h_disabled + +NV_STATUS refcntReleaseReferences_IMPL(POBJREFCNT pRefcnt, NvU64 arg0, NvBool arg1); +#ifdef __nvoc_ref_count_h_disabled +static inline NV_STATUS refcntReleaseReferences(POBJREFCNT pRefcnt, NvU64 arg0, NvBool arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJREFCNT was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_ref_count_h_disabled +#define refcntReleaseReferences(pRefcnt, arg0, arg1) refcntReleaseReferences_IMPL(pRefcnt, arg0, arg1) +#endif //__nvoc_ref_count_h_disabled + +NV_STATUS refcntReset_IMPL(POBJREFCNT pRefcnt, NvBool arg0); +#ifdef __nvoc_ref_count_h_disabled +static inline NV_STATUS refcntReset(POBJREFCNT pRefcnt, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJREFCNT was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_ref_count_h_disabled +#define refcntReset(pRefcnt, arg0) refcntReset_IMPL(pRefcnt, arg0) +#endif //__nvoc_ref_count_h_disabled + +#undef PRIVATE_FIELD + + +#endif // REF_COUNT_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_REF_COUNT_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_fwd_decls_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_fwd_decls_nvoc.h new file mode 100644 index 0000000..1c6bf0a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_fwd_decls_nvoc.h @@ -0,0 +1,1252 @@ +#ifndef _G_RESOURCE_FWD_DECLS_NVOC_H_ +#define _G_RESOURCE_FWD_DECLS_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_resource_fwd_decls_nvoc.h" + +// +// This header is a temporary WAR for CORERM-3115 +// When that RFE is implemented, we'll be able to generate these forward decls +// from resource_list.h directly +// +#ifndef RESOURCE_FWD_DECLS_H +#define RESOURCE_FWD_DECLS_H + +// Base classes +struct ChannelDescendant; + +#ifndef __NVOC_CLASS_ChannelDescendant_TYPEDEF__ +#define __NVOC_CLASS_ChannelDescendant_TYPEDEF__ +typedef struct ChannelDescendant ChannelDescendant; +#endif /* __NVOC_CLASS_ChannelDescendant_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ChannelDescendant +#define __nvoc_class_id_ChannelDescendant 0x43d7c4 +#endif /* __nvoc_class_id_ChannelDescendant */ + + +struct DispChannel; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + + +struct GpuResource; + +#ifndef __NVOC_CLASS_GpuResource_TYPEDEF__ +#define __NVOC_CLASS_GpuResource_TYPEDEF__ +typedef struct GpuResource GpuResource; +#endif /* __NVOC_CLASS_GpuResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuResource +#define __nvoc_class_id_GpuResource 0x5d5d9f +#endif /* __nvoc_class_id_GpuResource */ + + +struct INotifier; + +#ifndef __NVOC_CLASS_INotifier_TYPEDEF__ +#define __NVOC_CLASS_INotifier_TYPEDEF__ +typedef struct INotifier INotifier; +#endif /* __NVOC_CLASS_INotifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_INotifier +#define __nvoc_class_id_INotifier 0xf8f965 +#endif /* __nvoc_class_id_INotifier */ + + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + +struct Notifier; + +#ifndef __NVOC_CLASS_Notifier_TYPEDEF__ +#define __NVOC_CLASS_Notifier_TYPEDEF__ +typedef struct Notifier Notifier; +#endif /* __NVOC_CLASS_Notifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Notifier +#define __nvoc_class_id_Notifier 0xa8683b +#endif /* __nvoc_class_id_Notifier */ + + +struct NotifShare; + +#ifndef __NVOC_CLASS_NotifShare_TYPEDEF__ +#define __NVOC_CLASS_NotifShare_TYPEDEF__ +typedef struct NotifShare NotifShare; +#endif /* __NVOC_CLASS_NotifShare_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NotifShare +#define __nvoc_class_id_NotifShare 0xd5f150 +#endif /* __nvoc_class_id_NotifShare */ + + +struct Resource; + +#ifndef __NVOC_CLASS_Resource_TYPEDEF__ +#define __NVOC_CLASS_Resource_TYPEDEF__ +typedef struct Resource Resource; +#endif /* __NVOC_CLASS_Resource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Resource +#define __nvoc_class_id_Resource 0xbe8545 +#endif /* __nvoc_class_id_Resource */ + + +struct RmResource; + +#ifndef __NVOC_CLASS_RmResource_TYPEDEF__ +#define __NVOC_CLASS_RmResource_TYPEDEF__ +typedef struct RmResource RmResource; +#endif /* __NVOC_CLASS_RmResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmResource +#define __nvoc_class_id_RmResource 0x03610d +#endif /* __nvoc_class_id_RmResource */ + + +struct RmResourceCommon; + +#ifndef __NVOC_CLASS_RmResourceCommon_TYPEDEF__ +#define __NVOC_CLASS_RmResourceCommon_TYPEDEF__ +typedef struct RmResourceCommon RmResourceCommon; +#endif /* __NVOC_CLASS_RmResourceCommon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmResourceCommon +#define __nvoc_class_id_RmResourceCommon 0x8ef259 +#endif /* __nvoc_class_id_RmResourceCommon */ + + +struct RsResource; + +#ifndef __NVOC_CLASS_RsResource_TYPEDEF__ +#define __NVOC_CLASS_RsResource_TYPEDEF__ +typedef struct RsResource RsResource; +#endif /* __NVOC_CLASS_RsResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsResource +#define __nvoc_class_id_RsResource 0xd551cb +#endif /* __nvoc_class_id_RsResource */ + + +struct RsShared; + +#ifndef __NVOC_CLASS_RsShared_TYPEDEF__ +#define __NVOC_CLASS_RsShared_TYPEDEF__ +typedef struct RsShared RsShared; +#endif /* __NVOC_CLASS_RsShared_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsShared +#define __nvoc_class_id_RsShared 0x830542 +#endif /* __nvoc_class_id_RsShared */ + + + +// Allocatable resources +struct AccessCounterBuffer; + +#ifndef __NVOC_CLASS_AccessCounterBuffer_TYPEDEF__ +#define __NVOC_CLASS_AccessCounterBuffer_TYPEDEF__ +typedef struct AccessCounterBuffer AccessCounterBuffer; +#endif /* __NVOC_CLASS_AccessCounterBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_AccessCounterBuffer +#define __nvoc_class_id_AccessCounterBuffer 0x1f0074 +#endif /* __nvoc_class_id_AccessCounterBuffer */ + + +struct KernelCeContext; + +#ifndef __NVOC_CLASS_KernelCeContext_TYPEDEF__ +#define __NVOC_CLASS_KernelCeContext_TYPEDEF__ +typedef struct KernelCeContext KernelCeContext; +#endif /* __NVOC_CLASS_KernelCeContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelCeContext +#define __nvoc_class_id_KernelCeContext 0x2d0ee9 +#endif /* __nvoc_class_id_KernelCeContext */ + + +struct Channel; + +#ifndef __NVOC_CLASS_Channel_TYPEDEF__ +#define __NVOC_CLASS_Channel_TYPEDEF__ +typedef struct Channel Channel; +#endif /* __NVOC_CLASS_Channel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Channel +#define __nvoc_class_id_Channel 0x781dc9 +#endif /* __nvoc_class_id_Channel */ + + +struct ConsoleMemory; + +#ifndef __NVOC_CLASS_ConsoleMemory_TYPEDEF__ +#define __NVOC_CLASS_ConsoleMemory_TYPEDEF__ +typedef struct ConsoleMemory ConsoleMemory; +#endif /* __NVOC_CLASS_ConsoleMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ConsoleMemory +#define __nvoc_class_id_ConsoleMemory 0xaac69e +#endif /* __nvoc_class_id_ConsoleMemory */ + + +struct ContextDma; + +#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__ +#define __NVOC_CLASS_ContextDma_TYPEDEF__ +typedef struct ContextDma ContextDma; +#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ContextDma +#define __nvoc_class_id_ContextDma 0x88441b +#endif /* __nvoc_class_id_ContextDma */ + + +struct DebugBufferApi; + +#ifndef __NVOC_CLASS_DebugBufferApi_TYPEDEF__ +#define __NVOC_CLASS_DebugBufferApi_TYPEDEF__ +typedef struct DebugBufferApi DebugBufferApi; +#endif /* __NVOC_CLASS_DebugBufferApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DebugBufferApi +#define __nvoc_class_id_DebugBufferApi 0x5e7a1b +#endif /* __nvoc_class_id_DebugBufferApi */ + + +struct DeferredApiObject; + +#ifndef __NVOC_CLASS_DeferredApiObject_TYPEDEF__ +#define __NVOC_CLASS_DeferredApiObject_TYPEDEF__ +typedef struct DeferredApiObject DeferredApiObject; +#endif /* __NVOC_CLASS_DeferredApiObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DeferredApiObject +#define __nvoc_class_id_DeferredApiObject 0x8ea933 +#endif /* __nvoc_class_id_DeferredApiObject */ + + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + +struct DiagApi; + +#ifndef __NVOC_CLASS_DiagApi_TYPEDEF__ +#define __NVOC_CLASS_DiagApi_TYPEDEF__ +typedef struct DiagApi DiagApi; +#endif /* __NVOC_CLASS_DiagApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DiagApi +#define __nvoc_class_id_DiagApi 0xaa3066 +#endif /* __nvoc_class_id_DiagApi */ + + +struct DispCapabilities; + +#ifndef __NVOC_CLASS_DispCapabilities_TYPEDEF__ +#define __NVOC_CLASS_DispCapabilities_TYPEDEF__ +typedef struct DispCapabilities DispCapabilities; +#endif /* __NVOC_CLASS_DispCapabilities_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispCapabilities +#define __nvoc_class_id_DispCapabilities 0x99db3e +#endif /* __nvoc_class_id_DispCapabilities */ + + +struct DispChannelDma; + +#ifndef __NVOC_CLASS_DispChannelDma_TYPEDEF__ +#define __NVOC_CLASS_DispChannelDma_TYPEDEF__ +typedef struct DispChannelDma DispChannelDma; +#endif /* __NVOC_CLASS_DispChannelDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannelDma +#define __nvoc_class_id_DispChannelDma 0xfe3d2e +#endif /* __nvoc_class_id_DispChannelDma */ + + +struct DispChannelPio; + +#ifndef __NVOC_CLASS_DispChannelPio_TYPEDEF__ +#define __NVOC_CLASS_DispChannelPio_TYPEDEF__ +typedef struct DispChannelPio DispChannelPio; +#endif /* __NVOC_CLASS_DispChannelPio_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannelPio +#define __nvoc_class_id_DispChannelPio 0x10dec3 +#endif /* __nvoc_class_id_DispChannelPio */ + + +struct DispCommon; + +#ifndef __NVOC_CLASS_DispCommon_TYPEDEF__ +#define __NVOC_CLASS_DispCommon_TYPEDEF__ +typedef struct DispCommon DispCommon; +#endif /* __NVOC_CLASS_DispCommon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispCommon +#define __nvoc_class_id_DispCommon 0x41f4f2 +#endif /* __nvoc_class_id_DispCommon */ + + +struct DispSfUser; + +#ifndef __NVOC_CLASS_DispSfUser_TYPEDEF__ +#define __NVOC_CLASS_DispSfUser_TYPEDEF__ +typedef struct DispSfUser DispSfUser; +#endif /* __NVOC_CLASS_DispSfUser_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSfUser +#define __nvoc_class_id_DispSfUser 0xba7439 +#endif /* __nvoc_class_id_DispSfUser */ + + +struct DispSwObj; + +#ifndef __NVOC_CLASS_DispSwObj_TYPEDEF__ +#define __NVOC_CLASS_DispSwObj_TYPEDEF__ +typedef struct DispSwObj DispSwObj; +#endif /* __NVOC_CLASS_DispSwObj_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSwObj +#define __nvoc_class_id_DispSwObj 0x6aa5e2 +#endif /* __nvoc_class_id_DispSwObj */ + + +struct DispSwObject; + +#ifndef __NVOC_CLASS_DispSwObject_TYPEDEF__ +#define __NVOC_CLASS_DispSwObject_TYPEDEF__ +typedef struct DispSwObject DispSwObject; +#endif /* __NVOC_CLASS_DispSwObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSwObject +#define __nvoc_class_id_DispSwObject 0x99ad6d +#endif /* __nvoc_class_id_DispSwObject */ + + +struct Event; + +#ifndef __NVOC_CLASS_Event_TYPEDEF__ +#define __NVOC_CLASS_Event_TYPEDEF__ +typedef struct Event Event; +#endif /* __NVOC_CLASS_Event_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Event +#define __nvoc_class_id_Event 0xa4ecfc +#endif /* __nvoc_class_id_Event */ + + +struct EventBuffer; + +#ifndef __NVOC_CLASS_EventBuffer_TYPEDEF__ +#define __NVOC_CLASS_EventBuffer_TYPEDEF__ +typedef struct EventBuffer EventBuffer; +#endif /* __NVOC_CLASS_EventBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_EventBuffer +#define __nvoc_class_id_EventBuffer 0x63502b +#endif /* __nvoc_class_id_EventBuffer */ + + +struct FbSegment; + +#ifndef __NVOC_CLASS_FbSegment_TYPEDEF__ +#define __NVOC_CLASS_FbSegment_TYPEDEF__ +typedef struct FbSegment FbSegment; +#endif /* __NVOC_CLASS_FbSegment_TYPEDEF__ */ + +#ifndef __nvoc_class_id_FbSegment +#define __nvoc_class_id_FbSegment 0x2d55be +#endif /* __nvoc_class_id_FbSegment */ + + +struct FlaMemory; + +#ifndef __NVOC_CLASS_FlaMemory_TYPEDEF__ +#define __NVOC_CLASS_FlaMemory_TYPEDEF__ +typedef struct FlaMemory FlaMemory; +#endif /* __NVOC_CLASS_FlaMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_FlaMemory +#define __nvoc_class_id_FlaMemory 0xe61ee1 +#endif /* __nvoc_class_id_FlaMemory */ + + +struct FmSessionApi; + +#ifndef __NVOC_CLASS_FmSessionApi_TYPEDEF__ +#define __NVOC_CLASS_FmSessionApi_TYPEDEF__ +typedef struct FmSessionApi FmSessionApi; +#endif /* __NVOC_CLASS_FmSessionApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_FmSessionApi +#define __nvoc_class_id_FmSessionApi 0xdfbd08 +#endif /* __nvoc_class_id_FmSessionApi */ + + +struct GenericEngineApi; + +#ifndef __NVOC_CLASS_GenericEngineApi_TYPEDEF__ +#define __NVOC_CLASS_GenericEngineApi_TYPEDEF__ +typedef struct GenericEngineApi GenericEngineApi; +#endif /* __NVOC_CLASS_GenericEngineApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GenericEngineApi +#define __nvoc_class_id_GenericEngineApi 0x4bc329 +#endif /* __nvoc_class_id_GenericEngineApi */ + + +struct GpuManagementApi; + +#ifndef __NVOC_CLASS_GpuManagementApi_TYPEDEF__ +#define __NVOC_CLASS_GpuManagementApi_TYPEDEF__ +typedef struct GpuManagementApi GpuManagementApi; +#endif /* __NVOC_CLASS_GpuManagementApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuManagementApi +#define __nvoc_class_id_GpuManagementApi 0x376305 +#endif /* __nvoc_class_id_GpuManagementApi */ + + +struct GraphicsContext; + +#ifndef __NVOC_CLASS_GraphicsContext_TYPEDEF__ +#define __NVOC_CLASS_GraphicsContext_TYPEDEF__ +typedef struct GraphicsContext GraphicsContext; +#endif /* __NVOC_CLASS_GraphicsContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GraphicsContext +#define __nvoc_class_id_GraphicsContext 0x954c97 +#endif /* __nvoc_class_id_GraphicsContext */ + + +struct GraphicsObject; + +#ifndef __NVOC_CLASS_GraphicsObject_TYPEDEF__ +#define __NVOC_CLASS_GraphicsObject_TYPEDEF__ +typedef struct GraphicsObject GraphicsObject; +#endif /* __NVOC_CLASS_GraphicsObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GraphicsObject +#define __nvoc_class_id_GraphicsObject 0x8cddfd +#endif /* __nvoc_class_id_GraphicsObject */ + + +struct Griddisplayless; + +#ifndef __NVOC_CLASS_Griddisplayless_TYPEDEF__ +#define __NVOC_CLASS_Griddisplayless_TYPEDEF__ +typedef struct Griddisplayless Griddisplayless; +#endif /* __NVOC_CLASS_Griddisplayless_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Griddisplayless +#define __nvoc_class_id_Griddisplayless 0x3d03b2 +#endif /* __nvoc_class_id_Griddisplayless */ + + +struct Hdacodec; + +#ifndef __NVOC_CLASS_Hdacodec_TYPEDEF__ +#define __NVOC_CLASS_Hdacodec_TYPEDEF__ +typedef struct Hdacodec Hdacodec; +#endif /* __NVOC_CLASS_Hdacodec_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Hdacodec +#define __nvoc_class_id_Hdacodec 0xf59a20 +#endif /* __nvoc_class_id_Hdacodec */ + + +struct HostVgpuDeviceApi; + +#ifndef __NVOC_CLASS_HostVgpuDeviceApi_TYPEDEF__ +#define __NVOC_CLASS_HostVgpuDeviceApi_TYPEDEF__ +typedef struct HostVgpuDeviceApi HostVgpuDeviceApi; +#endif /* __NVOC_CLASS_HostVgpuDeviceApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_HostVgpuDeviceApi +#define __nvoc_class_id_HostVgpuDeviceApi 0x4c4173 +#endif /* __nvoc_class_id_HostVgpuDeviceApi */ + + +struct HostVgpuDeviceApi_KERNEL; + +#ifndef __NVOC_CLASS_HostVgpuDeviceApi_KERNEL_TYPEDEF__ +#define __NVOC_CLASS_HostVgpuDeviceApi_KERNEL_TYPEDEF__ +typedef struct HostVgpuDeviceApi_KERNEL HostVgpuDeviceApi_KERNEL; +#endif /* __NVOC_CLASS_HostVgpuDeviceApi_KERNEL_TYPEDEF__ */ + +#ifndef __nvoc_class_id_HostVgpuDeviceApi_KERNEL +#define __nvoc_class_id_HostVgpuDeviceApi_KERNEL 0xeb7e48 +#endif /* __nvoc_class_id_HostVgpuDeviceApi_KERNEL */ + + +struct I2cApi; + +#ifndef __NVOC_CLASS_I2cApi_TYPEDEF__ +#define __NVOC_CLASS_I2cApi_TYPEDEF__ +typedef struct I2cApi I2cApi; +#endif /* __NVOC_CLASS_I2cApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_I2cApi +#define __nvoc_class_id_I2cApi 0xceb8f6 +#endif /* __nvoc_class_id_I2cApi */ + + +struct KernelChannel; + +#ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__ +#define __NVOC_CLASS_KernelChannel_TYPEDEF__ +typedef struct KernelChannel KernelChannel; +#endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannel +#define __nvoc_class_id_KernelChannel 0x5d8d70 +#endif /* __nvoc_class_id_KernelChannel */ + + +struct KernelChannelGroupApi; + +#ifndef __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ +#define __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ +typedef struct KernelChannelGroupApi KernelChannelGroupApi; +#endif /* __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannelGroupApi +#define __nvoc_class_id_KernelChannelGroupApi 0x2b5b80 +#endif /* __nvoc_class_id_KernelChannelGroupApi */ + + +struct KernelCtxShareApi; + +#ifndef __NVOC_CLASS_KernelCtxShareApi_TYPEDEF__ +#define __NVOC_CLASS_KernelCtxShareApi_TYPEDEF__ +typedef struct KernelCtxShareApi KernelCtxShareApi; +#endif /* __NVOC_CLASS_KernelCtxShareApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelCtxShareApi +#define __nvoc_class_id_KernelCtxShareApi 0x1f9af1 +#endif /* __nvoc_class_id_KernelCtxShareApi */ + + +struct KernelGraphicsContext; + +#ifndef __NVOC_CLASS_KernelGraphicsContext_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsContext_TYPEDEF__ +typedef struct KernelGraphicsContext KernelGraphicsContext; +#endif /* __NVOC_CLASS_KernelGraphicsContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsContext +#define __nvoc_class_id_KernelGraphicsContext 0x7ead09 +#endif /* __nvoc_class_id_KernelGraphicsContext */ + + +struct KernelGraphicsObject; + +#ifndef __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ +typedef struct KernelGraphicsObject KernelGraphicsObject; +#endif /* __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsObject +#define __nvoc_class_id_KernelGraphicsObject 0x097648 +#endif /* __nvoc_class_id_KernelGraphicsObject */ + + +struct KernelSMDebuggerSession; + +#ifndef __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ +#define __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ +typedef struct KernelSMDebuggerSession KernelSMDebuggerSession; +#endif /* __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelSMDebuggerSession +#define __nvoc_class_id_KernelSMDebuggerSession 0x4adc81 +#endif /* __nvoc_class_id_KernelSMDebuggerSession */ + + +struct MemoryFabric; + +#ifndef __NVOC_CLASS_MemoryFabric_TYPEDEF__ +#define __NVOC_CLASS_MemoryFabric_TYPEDEF__ +typedef struct MemoryFabric MemoryFabric; +#endif /* __NVOC_CLASS_MemoryFabric_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryFabric +#define __nvoc_class_id_MemoryFabric 0x127499 +#endif /* __nvoc_class_id_MemoryFabric */ + + +struct MemoryHwResources; + +#ifndef __NVOC_CLASS_MemoryHwResources_TYPEDEF__ +#define __NVOC_CLASS_MemoryHwResources_TYPEDEF__ +typedef struct MemoryHwResources MemoryHwResources; +#endif /* __NVOC_CLASS_MemoryHwResources_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryHwResources +#define __nvoc_class_id_MemoryHwResources 0x9a2a71 +#endif /* __nvoc_class_id_MemoryHwResources */ + + +struct MemoryList; + +#ifndef __NVOC_CLASS_MemoryList_TYPEDEF__ +#define __NVOC_CLASS_MemoryList_TYPEDEF__ +typedef struct MemoryList MemoryList; +#endif /* __NVOC_CLASS_MemoryList_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryList +#define __nvoc_class_id_MemoryList 0x298f78 +#endif /* __nvoc_class_id_MemoryList */ + + +struct MmuFaultBuffer; + +#ifndef __NVOC_CLASS_MmuFaultBuffer_TYPEDEF__ +#define __NVOC_CLASS_MmuFaultBuffer_TYPEDEF__ +typedef struct MmuFaultBuffer MmuFaultBuffer; +#endif /* __NVOC_CLASS_MmuFaultBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MmuFaultBuffer +#define __nvoc_class_id_MmuFaultBuffer 0x7e1829 +#endif /* __nvoc_class_id_MmuFaultBuffer */ + + +struct MpsApi; + +#ifndef __NVOC_CLASS_MpsApi_TYPEDEF__ +#define __NVOC_CLASS_MpsApi_TYPEDEF__ +typedef struct MpsApi MpsApi; +#endif /* __NVOC_CLASS_MpsApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MpsApi +#define __nvoc_class_id_MpsApi 0x22ce42 +#endif /* __nvoc_class_id_MpsApi */ + + +struct MsencContext; + +#ifndef __NVOC_CLASS_MsencContext_TYPEDEF__ +#define __NVOC_CLASS_MsencContext_TYPEDEF__ +typedef struct MsencContext MsencContext; +#endif /* __NVOC_CLASS_MsencContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MsencContext +#define __nvoc_class_id_MsencContext 0x88c92a +#endif /* __nvoc_class_id_MsencContext */ + + +struct NoDeviceMemory; + +#ifndef __NVOC_CLASS_NoDeviceMemory_TYPEDEF__ +#define __NVOC_CLASS_NoDeviceMemory_TYPEDEF__ +typedef struct NoDeviceMemory NoDeviceMemory; +#endif /* __NVOC_CLASS_NoDeviceMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NoDeviceMemory +#define __nvoc_class_id_NoDeviceMemory 0x6c0832 +#endif /* __nvoc_class_id_NoDeviceMemory */ + + +struct NpuResource; + +#ifndef __NVOC_CLASS_NpuResource_TYPEDEF__ +#define __NVOC_CLASS_NpuResource_TYPEDEF__ +typedef struct NpuResource NpuResource; +#endif /* __NVOC_CLASS_NpuResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NpuResource +#define __nvoc_class_id_NpuResource 0x4d1af2 +#endif /* __nvoc_class_id_NpuResource */ + + +struct NvdecContext; + +#ifndef __NVOC_CLASS_NvdecContext_TYPEDEF__ +#define __NVOC_CLASS_NvdecContext_TYPEDEF__ +typedef struct NvdecContext NvdecContext; +#endif /* __NVOC_CLASS_NvdecContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvdecContext +#define __nvoc_class_id_NvdecContext 0x70d2be +#endif /* __nvoc_class_id_NvdecContext */ + + +struct NvDispApi; + +#ifndef __NVOC_CLASS_NvDispApi_TYPEDEF__ +#define __NVOC_CLASS_NvDispApi_TYPEDEF__ +typedef struct NvDispApi NvDispApi; +#endif /* __NVOC_CLASS_NvDispApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvDispApi +#define __nvoc_class_id_NvDispApi 0x36aa0b +#endif /* __nvoc_class_id_NvDispApi */ + + +struct NvjpgContext; + +#ifndef __NVOC_CLASS_NvjpgContext_TYPEDEF__ +#define __NVOC_CLASS_NvjpgContext_TYPEDEF__ +typedef struct NvjpgContext NvjpgContext; +#endif /* __NVOC_CLASS_NvjpgContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvjpgContext +#define __nvoc_class_id_NvjpgContext 0x08c1ce +#endif /* __nvoc_class_id_NvjpgContext */ + + +struct OfaContext; + +#ifndef __NVOC_CLASS_OfaContext_TYPEDEF__ +#define __NVOC_CLASS_OfaContext_TYPEDEF__ +typedef struct OfaContext OfaContext; +#endif /* __NVOC_CLASS_OfaContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OfaContext +#define __nvoc_class_id_OfaContext 0xf63d99 +#endif /* __nvoc_class_id_OfaContext */ + + +struct OsDescMemory; + +#ifndef __NVOC_CLASS_OsDescMemory_TYPEDEF__ +#define __NVOC_CLASS_OsDescMemory_TYPEDEF__ +typedef struct OsDescMemory OsDescMemory; +#endif /* __NVOC_CLASS_OsDescMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OsDescMemory +#define __nvoc_class_id_OsDescMemory 0xb3dacd +#endif /* __nvoc_class_id_OsDescMemory */ + + +struct UserLocalDescMemory; + +#ifndef __NVOC_CLASS_UserLocalDescMemory_TYPEDEF__ +#define __NVOC_CLASS_UserLocalDescMemory_TYPEDEF__ +typedef struct UserLocalDescMemory UserLocalDescMemory; +#endif /* __NVOC_CLASS_UserLocalDescMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UserLocalDescMemory +#define __nvoc_class_id_UserLocalDescMemory 0x799456 +#endif /* __nvoc_class_id_UserLocalDescMemory */ + + +struct P2PApi; + +#ifndef __NVOC_CLASS_P2PApi_TYPEDEF__ +#define __NVOC_CLASS_P2PApi_TYPEDEF__ +typedef struct P2PApi P2PApi; +#endif /* __NVOC_CLASS_P2PApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_P2PApi +#define __nvoc_class_id_P2PApi 0x3982b7 +#endif /* __nvoc_class_id_P2PApi */ + + +struct PerfBuffer; + +#ifndef __NVOC_CLASS_PerfBuffer_TYPEDEF__ +#define __NVOC_CLASS_PerfBuffer_TYPEDEF__ +typedef struct PerfBuffer PerfBuffer; +#endif /* __NVOC_CLASS_PerfBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PerfBuffer +#define __nvoc_class_id_PerfBuffer 0x4bc43b +#endif /* __nvoc_class_id_PerfBuffer */ + + +struct PhysicalMemory; + +#ifndef __NVOC_CLASS_PhysicalMemory_TYPEDEF__ +#define __NVOC_CLASS_PhysicalMemory_TYPEDEF__ +typedef struct PhysicalMemory PhysicalMemory; +#endif /* __NVOC_CLASS_PhysicalMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PhysicalMemory +#define __nvoc_class_id_PhysicalMemory 0x5fccf2 +#endif /* __nvoc_class_id_PhysicalMemory */ + + +struct PhysMemSubAlloc; + +#ifndef __NVOC_CLASS_PhysMemSubAlloc_TYPEDEF__ +#define __NVOC_CLASS_PhysMemSubAlloc_TYPEDEF__ +typedef struct PhysMemSubAlloc PhysMemSubAlloc; +#endif /* __NVOC_CLASS_PhysMemSubAlloc_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PhysMemSubAlloc +#define __nvoc_class_id_PhysMemSubAlloc 0x2351fc +#endif /* __nvoc_class_id_PhysMemSubAlloc */ + + +struct Profiler; + +#ifndef __NVOC_CLASS_Profiler_TYPEDEF__ +#define __NVOC_CLASS_Profiler_TYPEDEF__ +typedef struct Profiler Profiler; +#endif /* __NVOC_CLASS_Profiler_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Profiler +#define __nvoc_class_id_Profiler 0x65b4c7 +#endif /* __nvoc_class_id_Profiler */ + + +struct ProfilerCtx; + +#ifndef __NVOC_CLASS_ProfilerCtx_TYPEDEF__ +#define __NVOC_CLASS_ProfilerCtx_TYPEDEF__ +typedef struct ProfilerCtx ProfilerCtx; +#endif /* __NVOC_CLASS_ProfilerCtx_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ProfilerCtx +#define __nvoc_class_id_ProfilerCtx 0xe99229 +#endif /* __nvoc_class_id_ProfilerCtx */ + + +struct ProfilerDev; + +#ifndef __NVOC_CLASS_ProfilerDev_TYPEDEF__ +#define __NVOC_CLASS_ProfilerDev_TYPEDEF__ +typedef struct ProfilerDev ProfilerDev; +#endif /* __NVOC_CLASS_ProfilerDev_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ProfilerDev +#define __nvoc_class_id_ProfilerDev 0x54d077 +#endif /* __nvoc_class_id_ProfilerDev */ + + +struct RegisterMemory; + +#ifndef __NVOC_CLASS_RegisterMemory_TYPEDEF__ +#define __NVOC_CLASS_RegisterMemory_TYPEDEF__ +typedef struct RegisterMemory RegisterMemory; +#endif /* __NVOC_CLASS_RegisterMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RegisterMemory +#define __nvoc_class_id_RegisterMemory 0x40d457 +#endif /* __nvoc_class_id_RegisterMemory */ + + +struct RemapperObject; + +#ifndef __NVOC_CLASS_RemapperObject_TYPEDEF__ +#define __NVOC_CLASS_RemapperObject_TYPEDEF__ +typedef struct RemapperObject RemapperObject; +#endif /* __NVOC_CLASS_RemapperObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RemapperObject +#define __nvoc_class_id_RemapperObject 0xfc96cb +#endif /* __nvoc_class_id_RemapperObject */ + + +struct RgLineCallback; + +#ifndef __NVOC_CLASS_RgLineCallback_TYPEDEF__ +#define __NVOC_CLASS_RgLineCallback_TYPEDEF__ +typedef struct RgLineCallback RgLineCallback; +#endif /* __NVOC_CLASS_RgLineCallback_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RgLineCallback +#define __nvoc_class_id_RgLineCallback 0xa3ff1c +#endif /* __nvoc_class_id_RgLineCallback */ + + +struct RmClientResource; + +#ifndef __NVOC_CLASS_RmClientResource_TYPEDEF__ +#define __NVOC_CLASS_RmClientResource_TYPEDEF__ +typedef struct RmClientResource RmClientResource; +#endif /* __NVOC_CLASS_RmClientResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmClientResource +#define __nvoc_class_id_RmClientResource 0x37a701 +#endif /* __nvoc_class_id_RmClientResource */ + + +struct MIGConfigSession; + +#ifndef __NVOC_CLASS_MIGConfigSession_TYPEDEF__ +#define __NVOC_CLASS_MIGConfigSession_TYPEDEF__ +typedef struct MIGConfigSession MIGConfigSession; +#endif /* __NVOC_CLASS_MIGConfigSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MIGConfigSession +#define __nvoc_class_id_MIGConfigSession 0x36a941 +#endif /* __nvoc_class_id_MIGConfigSession */ + + +struct ComputeInstanceSubscription; + +#ifndef __NVOC_CLASS_ComputeInstanceSubscription_TYPEDEF__ +#define __NVOC_CLASS_ComputeInstanceSubscription_TYPEDEF__ +typedef struct ComputeInstanceSubscription ComputeInstanceSubscription; +#endif /* __NVOC_CLASS_ComputeInstanceSubscription_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ComputeInstanceSubscription +#define __nvoc_class_id_ComputeInstanceSubscription 0xd1f238 +#endif /* __nvoc_class_id_ComputeInstanceSubscription */ + + +struct MIGMonitorSession; + +#ifndef __NVOC_CLASS_MIGMonitorSession_TYPEDEF__ +#define __NVOC_CLASS_MIGMonitorSession_TYPEDEF__ +typedef struct MIGMonitorSession MIGMonitorSession; +#endif /* __NVOC_CLASS_MIGMonitorSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MIGMonitorSession +#define __nvoc_class_id_MIGMonitorSession 0x29e15c +#endif /* __nvoc_class_id_MIGMonitorSession */ + + +struct GPUInstanceSubscription; + +#ifndef __NVOC_CLASS_GPUInstanceSubscription_TYPEDEF__ +#define __NVOC_CLASS_GPUInstanceSubscription_TYPEDEF__ +typedef struct GPUInstanceSubscription GPUInstanceSubscription; +#endif /* __NVOC_CLASS_GPUInstanceSubscription_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GPUInstanceSubscription +#define __nvoc_class_id_GPUInstanceSubscription 0x91fde7 +#endif /* __nvoc_class_id_GPUInstanceSubscription */ + + +struct SMDebuggerSession; + +#ifndef __NVOC_CLASS_SMDebuggerSession_TYPEDEF__ +#define __NVOC_CLASS_SMDebuggerSession_TYPEDEF__ +typedef struct SMDebuggerSession SMDebuggerSession; +#endif /* __NVOC_CLASS_SMDebuggerSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SMDebuggerSession +#define __nvoc_class_id_SMDebuggerSession 0x9afab7 +#endif /* __nvoc_class_id_SMDebuggerSession */ + + +struct SoftwareMethodTest; + +#ifndef __NVOC_CLASS_SoftwareMethodTest_TYPEDEF__ +#define __NVOC_CLASS_SoftwareMethodTest_TYPEDEF__ +typedef struct SoftwareMethodTest SoftwareMethodTest; +#endif /* __NVOC_CLASS_SoftwareMethodTest_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SoftwareMethodTest +#define __nvoc_class_id_SoftwareMethodTest 0xdea092 +#endif /* __nvoc_class_id_SoftwareMethodTest */ + + +struct Subdevice; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + + +struct BinaryApi; + +#ifndef __NVOC_CLASS_BinaryApi_TYPEDEF__ +#define __NVOC_CLASS_BinaryApi_TYPEDEF__ +typedef struct BinaryApi BinaryApi; +#endif /* __NVOC_CLASS_BinaryApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_BinaryApi +#define __nvoc_class_id_BinaryApi 0xb7a47c +#endif /* __nvoc_class_id_BinaryApi */ + + +struct BinaryApiPrivileged; + +#ifndef __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ +#define __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ +typedef struct BinaryApiPrivileged BinaryApiPrivileged; +#endif /* __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ */ + +#ifndef __nvoc_class_id_BinaryApiPrivileged +#define __nvoc_class_id_BinaryApiPrivileged 0x1c0579 +#endif /* __nvoc_class_id_BinaryApiPrivileged */ + + +struct SyncGpuBoost; + +#ifndef __NVOC_CLASS_SyncGpuBoost_TYPEDEF__ +#define __NVOC_CLASS_SyncGpuBoost_TYPEDEF__ +typedef struct SyncGpuBoost SyncGpuBoost; +#endif /* __NVOC_CLASS_SyncGpuBoost_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SyncGpuBoost +#define __nvoc_class_id_SyncGpuBoost 0xc7e30b +#endif /* __nvoc_class_id_SyncGpuBoost */ + + +struct SyncpointMemory; + +#ifndef __NVOC_CLASS_SyncpointMemory_TYPEDEF__ +#define __NVOC_CLASS_SyncpointMemory_TYPEDEF__ +typedef struct SyncpointMemory SyncpointMemory; +#endif /* __NVOC_CLASS_SyncpointMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SyncpointMemory +#define __nvoc_class_id_SyncpointMemory 0x529def +#endif /* __nvoc_class_id_SyncpointMemory */ + + +struct SystemMemory; + +#ifndef __NVOC_CLASS_SystemMemory_TYPEDEF__ +#define __NVOC_CLASS_SystemMemory_TYPEDEF__ +typedef struct SystemMemory SystemMemory; +#endif /* __NVOC_CLASS_SystemMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SystemMemory +#define __nvoc_class_id_SystemMemory 0x007a98 +#endif /* __nvoc_class_id_SystemMemory */ + + +struct ThirdPartyP2P; + +#ifndef __NVOC_CLASS_ThirdPartyP2P_TYPEDEF__ +#define __NVOC_CLASS_ThirdPartyP2P_TYPEDEF__ +typedef struct ThirdPartyP2P ThirdPartyP2P; +#endif /* __NVOC_CLASS_ThirdPartyP2P_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ThirdPartyP2P +#define __nvoc_class_id_ThirdPartyP2P 0x34d08b +#endif /* __nvoc_class_id_ThirdPartyP2P */ + + +struct TimedSemaSwObject; + +#ifndef __NVOC_CLASS_TimedSemaSwObject_TYPEDEF__ +#define __NVOC_CLASS_TimedSemaSwObject_TYPEDEF__ +typedef struct TimedSemaSwObject TimedSemaSwObject; +#endif /* __NVOC_CLASS_TimedSemaSwObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_TimedSemaSwObject +#define __nvoc_class_id_TimedSemaSwObject 0x335775 +#endif /* __nvoc_class_id_TimedSemaSwObject */ + + +struct TimerApi; + +#ifndef __NVOC_CLASS_TimerApi_TYPEDEF__ +#define __NVOC_CLASS_TimerApi_TYPEDEF__ +typedef struct TimerApi TimerApi; +#endif /* __NVOC_CLASS_TimerApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_TimerApi +#define __nvoc_class_id_TimerApi 0xb13ac4 +#endif /* __nvoc_class_id_TimerApi */ + + +struct UserModeApi; + +#ifndef __NVOC_CLASS_UserModeApi_TYPEDEF__ +#define __NVOC_CLASS_UserModeApi_TYPEDEF__ +typedef struct UserModeApi UserModeApi; +#endif /* __NVOC_CLASS_UserModeApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UserModeApi +#define __nvoc_class_id_UserModeApi 0x6f57ec +#endif /* __nvoc_class_id_UserModeApi */ + + +struct UvmChannelRetainer; + +#ifndef __NVOC_CLASS_UvmChannelRetainer_TYPEDEF__ +#define __NVOC_CLASS_UvmChannelRetainer_TYPEDEF__ +typedef struct UvmChannelRetainer UvmChannelRetainer; +#endif /* __NVOC_CLASS_UvmChannelRetainer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UvmChannelRetainer +#define __nvoc_class_id_UvmChannelRetainer 0xa3f03a +#endif /* __nvoc_class_id_UvmChannelRetainer */ + + +struct UvmSwObject; + +#ifndef __NVOC_CLASS_UvmSwObject_TYPEDEF__ +#define __NVOC_CLASS_UvmSwObject_TYPEDEF__ +typedef struct UvmSwObject UvmSwObject; +#endif /* __NVOC_CLASS_UvmSwObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UvmSwObject +#define __nvoc_class_id_UvmSwObject 0xc35503 +#endif /* __nvoc_class_id_UvmSwObject */ + + +struct VaSpaceApi; + +#ifndef __NVOC_CLASS_VaSpaceApi_TYPEDEF__ +#define __NVOC_CLASS_VaSpaceApi_TYPEDEF__ +typedef struct VaSpaceApi VaSpaceApi; +#endif /* __NVOC_CLASS_VaSpaceApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VaSpaceApi +#define __nvoc_class_id_VaSpaceApi 0xcd048b +#endif /* __nvoc_class_id_VaSpaceApi */ + + +struct VblankCallback; + +#ifndef __NVOC_CLASS_VblankCallback_TYPEDEF__ +#define __NVOC_CLASS_VblankCallback_TYPEDEF__ +typedef struct VblankCallback VblankCallback; +#endif /* __NVOC_CLASS_VblankCallback_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VblankCallback +#define __nvoc_class_id_VblankCallback 0x4c1997 +#endif /* __nvoc_class_id_VblankCallback */ + + +struct VgpuApi; + +#ifndef __NVOC_CLASS_VgpuApi_TYPEDEF__ +#define __NVOC_CLASS_VgpuApi_TYPEDEF__ +typedef struct VgpuApi VgpuApi; +#endif /* __NVOC_CLASS_VgpuApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VgpuApi +#define __nvoc_class_id_VgpuApi 0x7774f5 +#endif /* __nvoc_class_id_VgpuApi */ + + +struct VgpuConfigApi; + +#ifndef __NVOC_CLASS_VgpuConfigApi_TYPEDEF__ +#define __NVOC_CLASS_VgpuConfigApi_TYPEDEF__ +typedef struct VgpuConfigApi VgpuConfigApi; +#endif /* __NVOC_CLASS_VgpuConfigApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VgpuConfigApi +#define __nvoc_class_id_VgpuConfigApi 0x4d560a +#endif /* __nvoc_class_id_VgpuConfigApi */ + + +struct VideoMemory; + +#ifndef __NVOC_CLASS_VideoMemory_TYPEDEF__ +#define __NVOC_CLASS_VideoMemory_TYPEDEF__ +typedef struct VideoMemory VideoMemory; +#endif /* __NVOC_CLASS_VideoMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VideoMemory +#define __nvoc_class_id_VideoMemory 0xed948f +#endif /* __nvoc_class_id_VideoMemory */ + + +struct VirtualMemory; + +#ifndef __NVOC_CLASS_VirtualMemory_TYPEDEF__ +#define __NVOC_CLASS_VirtualMemory_TYPEDEF__ +typedef struct VirtualMemory VirtualMemory; +#endif /* __NVOC_CLASS_VirtualMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VirtualMemory +#define __nvoc_class_id_VirtualMemory 0x2aea5c +#endif /* __nvoc_class_id_VirtualMemory */ + + +struct VirtualMemoryRange; + +#ifndef __NVOC_CLASS_VirtualMemoryRange_TYPEDEF__ +#define __NVOC_CLASS_VirtualMemoryRange_TYPEDEF__ +typedef struct VirtualMemoryRange VirtualMemoryRange; +#endif /* __NVOC_CLASS_VirtualMemoryRange_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VirtualMemoryRange +#define __nvoc_class_id_VirtualMemoryRange 0x7032c6 +#endif /* __nvoc_class_id_VirtualMemoryRange */ + + +struct VmmuApi; + +#ifndef __NVOC_CLASS_VmmuApi_TYPEDEF__ +#define __NVOC_CLASS_VmmuApi_TYPEDEF__ +typedef struct VmmuApi VmmuApi; +#endif /* __NVOC_CLASS_VmmuApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VmmuApi +#define __nvoc_class_id_VmmuApi 0x40d73a +#endif /* __nvoc_class_id_VmmuApi */ + + +struct ZbcApi; + +#ifndef __NVOC_CLASS_ZbcApi_TYPEDEF__ +#define __NVOC_CLASS_ZbcApi_TYPEDEF__ +typedef struct ZbcApi ZbcApi; +#endif /* __NVOC_CLASS_ZbcApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ZbcApi +#define __nvoc_class_id_ZbcApi 0x397ee3 +#endif /* __nvoc_class_id_ZbcApi */ + + + +#endif // RESOURCE_FWD_DECLS_H + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_RESOURCE_FWD_DECLS_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.c new file mode 100644 index 0000000..9547f8d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.c @@ -0,0 +1,371 @@ +#define NVOC_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_resource_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x8ef259 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +void __nvoc_init_RmResourceCommon(RmResourceCommon*); +void __nvoc_init_funcTable_RmResourceCommon(RmResourceCommon*); +NV_STATUS __nvoc_ctor_RmResourceCommon(RmResourceCommon*); +void __nvoc_init_dataField_RmResourceCommon(RmResourceCommon*); +void __nvoc_dtor_RmResourceCommon(RmResourceCommon*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmResourceCommon; + +static const struct NVOC_RTTI __nvoc_rtti_RmResourceCommon_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmResourceCommon, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RmResourceCommon = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_RmResourceCommon_RmResourceCommon, + }, +}; + +// Not instantiable because it's not derived from class "Object" +const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmResourceCommon), + /*classId=*/ classId(RmResourceCommon), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmResourceCommon", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_RmResourceCommon, + /*pExportInfo=*/ &__nvoc_export_info_RmResourceCommon +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RmResourceCommon = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResourceCommon(RmResourceCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmResourceCommon(RmResourceCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResourceCommon(RmResourceCommon *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_RmResourceCommon(pThis); + + status = __nvoc_rmrescmnConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_RmResourceCommon_fail__init; + goto __nvoc_ctor_RmResourceCommon_exit; // Success + +__nvoc_ctor_RmResourceCommon_fail__init: +__nvoc_ctor_RmResourceCommon_exit: + + return status; +} + +static void __nvoc_init_funcTable_RmResourceCommon_1(RmResourceCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_RmResourceCommon(RmResourceCommon *pThis) { + __nvoc_init_funcTable_RmResourceCommon_1(pThis); +} + +void __nvoc_init_RmResourceCommon(RmResourceCommon *pThis) { + pThis->__nvoc_pbase_RmResourceCommon = pThis; + __nvoc_init_funcTable_RmResourceCommon(pThis); +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x03610d = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_funcTable_RmResource(RmResource*); +NV_STATUS __nvoc_ctor_RmResource(RmResource*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RmResource(RmResource*); +void __nvoc_dtor_RmResource(RmResource*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmResource; + +static const struct NVOC_RTTI __nvoc_rtti_RmResource_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmResource, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmResource_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmResource, __nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmResource_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmResource, __nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmResource_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmResource, __nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RmResource = { + /*numRelatives=*/ 4, + /*relatives=*/ { + &__nvoc_rtti_RmResource_RmResource, + &__nvoc_rtti_RmResource_RmResourceCommon, + &__nvoc_rtti_RmResource_RsResource, + &__nvoc_rtti_RmResource_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmResource), + /*classId=*/ classId(RmResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RmResource, + /*pCastInfo=*/ &__nvoc_castinfo_RmResource, + /*pExportInfo=*/ &__nvoc_export_info_RmResource +}; + +static NvBool __nvoc_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) - __nvoc_rtti_RmResource_RsResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NvBool __nvoc_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) - __nvoc_rtti_RmResource_RsResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) - __nvoc_rtti_RmResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) - __nvoc_rtti_RmResource_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pParams); +} + +static NvU32 __nvoc_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pReference); +} + +static NvBool __nvoc_thunk_RsResource_rmresCanCopy(struct RmResource *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresControlLookup(struct RmResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RmResource = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsResource(RsResource*); +void __nvoc_dtor_RmResourceCommon(RmResourceCommon*); +void __nvoc_dtor_RmResource(RmResource *pThis) { + __nvoc_dtor_RsResource(&pThis->__nvoc_base_RsResource); + __nvoc_dtor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmResource(RmResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsResource(RsResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RmResourceCommon(RmResourceCommon* ); +NV_STATUS __nvoc_ctor_RmResource(RmResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsResource(&pThis->__nvoc_base_RsResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmResource_fail_RsResource; + status = __nvoc_ctor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + if (status != NV_OK) goto __nvoc_ctor_RmResource_fail_RmResourceCommon; + __nvoc_init_dataField_RmResource(pThis); + + status = __nvoc_rmresConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmResource_fail__init; + goto __nvoc_ctor_RmResource_exit; // Success + +__nvoc_ctor_RmResource_fail__init: + __nvoc_dtor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); +__nvoc_ctor_RmResource_fail_RmResourceCommon: + __nvoc_dtor_RsResource(&pThis->__nvoc_base_RsResource); +__nvoc_ctor_RmResource_fail_RsResource: +__nvoc_ctor_RmResource_exit: + + return status; +} + +static void __nvoc_init_funcTable_RmResource_1(RmResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__rmresAccessCallback__ = &rmresAccessCallback_IMPL; + + pThis->__rmresShareCallback__ = &rmresShareCallback_IMPL; + + pThis->__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL; + + pThis->__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL; + + pThis->__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL; + + pThis->__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL; + + pThis->__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL; + + pThis->__nvoc_base_RsResource.__resAccessCallback__ = &__nvoc_thunk_RmResource_resAccessCallback; + + pThis->__nvoc_base_RsResource.__resShareCallback__ = &__nvoc_thunk_RmResource_resShareCallback; + + pThis->__nvoc_base_RsResource.__resControl_Prologue__ = &__nvoc_thunk_RmResource_resControl_Prologue; + + pThis->__nvoc_base_RsResource.__resControl_Epilogue__ = &__nvoc_thunk_RmResource_resControl_Epilogue; + + pThis->__rmresControl__ = &__nvoc_thunk_RsResource_rmresControl; + + pThis->__rmresUnmap__ = &__nvoc_thunk_RsResource_rmresUnmap; + + pThis->__rmresMapTo__ = &__nvoc_thunk_RsResource_rmresMapTo; + + pThis->__rmresGetRefCount__ = &__nvoc_thunk_RsResource_rmresGetRefCount; + + pThis->__rmresControlFilter__ = &__nvoc_thunk_RsResource_rmresControlFilter; + + pThis->__rmresAddAdditionalDependants__ = &__nvoc_thunk_RsResource_rmresAddAdditionalDependants; + + pThis->__rmresCanCopy__ = &__nvoc_thunk_RsResource_rmresCanCopy; + + pThis->__rmresPreDestruct__ = &__nvoc_thunk_RsResource_rmresPreDestruct; + + pThis->__rmresUnmapFrom__ = &__nvoc_thunk_RsResource_rmresUnmapFrom; + + pThis->__rmresControlLookup__ = &__nvoc_thunk_RsResource_rmresControlLookup; + + pThis->__rmresMap__ = &__nvoc_thunk_RsResource_rmresMap; +} + +void __nvoc_init_funcTable_RmResource(RmResource *pThis) { + __nvoc_init_funcTable_RmResource_1(pThis); +} + +void __nvoc_init_RsResource(RsResource*); +void __nvoc_init_RmResourceCommon(RmResourceCommon*); +void __nvoc_init_RmResource(RmResource *pThis) { + pThis->__nvoc_pbase_RmResource = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResourceCommon; + __nvoc_init_RsResource(&pThis->__nvoc_base_RsResource); + __nvoc_init_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + __nvoc_init_funcTable_RmResource(pThis); +} + +NV_STATUS __nvoc_objCreate_RmResource(RmResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RmResource *pThis; + + pThis = portMemAllocNonPaged(sizeof(RmResource)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RmResource)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RmResource); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RmResource(pThis); + status = __nvoc_ctor_RmResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RmResource_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RmResource_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RmResource(RmResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RmResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.h new file mode 100644 index 0000000..4760fc5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.h @@ -0,0 +1,355 @@ +#ifndef _G_RESOURCE_NVOC_H_ +#define _G_RESOURCE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_resource_nvoc.h" + +#ifndef _RESOURCE_H_ +#define _RESOURCE_H_ + +#include "core/core.h" +#include "resserv/rs_resource.h" +#include "rmapi/control.h" + +/* Forward declarations */ +struct MEMORY_DESCRIPTOR; +struct OBJVASPACE; + +struct RMRES_MEM_INTER_MAP_PARAMS +{ + /// [in] + OBJGPU *pGpu; + RsResourceRef *pMemoryRef; + NvBool bSubdeviceHandleProvided; + + /// [out] + OBJGPU *pSrcGpu; + struct MEMORY_DESCRIPTOR *pSrcMemDesc; + NvHandle hMemoryDevice; + NvBool bDmaMapNeeded; + // This flag will be set when this is FLA mapping + NvBool bFlaMapping; +}; + +struct RS_RES_MAP_TO_PARAMS +{ + OBJGPU *pGpu; ///< [in] + OBJGPU *pSrcGpu; ///< [in] + struct MEMORY_DESCRIPTOR *pSrcMemDesc; ///< [in] + struct MEMORY_DESCRIPTOR **ppMemDesc; ///< [out] + RsResourceRef *pMemoryRef; ///< [in] + NvHandle hBroadcastDevice; ///< [in] + NvHandle hMemoryDevice; ///< [in] + NvU32 gpuMask; ///< [in] + NvU64 offset; ///< [in] + NvU64 length; ///< [in] + NvU32 flags; ///< [in] + NvU64 *pDmaOffset; ///< [inout] + NvBool bSubdeviceHandleProvided; ///< [in] + NvBool bDmaMapNeeded; ///< [in] + NvBool bFlaMapping; ///< [in] +}; + +struct RS_RES_UNMAP_FROM_PARAMS +{ + OBJGPU *pGpu; ///< [in] + NvHandle hMemory; ///< [in] + NvHandle hBroadcastDevice; ///< [in] + NvU32 gpuMask; ///< [in] + NvU32 flags; ///< [in] + NvU64 dmaOffset; ///< [in] + struct MEMORY_DESCRIPTOR *pMemDesc; ///< [in] + NvBool bSubdeviceHandleProvided; ///< [in] +}; + +struct RS_INTER_MAP_PRIVATE +{ + OBJGPU *pGpu; + OBJGPU *pSrcGpu; + struct MEMORY_DESCRIPTOR *pSrcMemDesc; + NvHandle hBroadcastDevice; + NvHandle hMemoryDevice; + NvU32 gpuMask; + NvBool bSubdeviceHandleProvided; + NvBool bDmaMapNeeded; + NvBool bFlaMapping; +}; + +struct RS_INTER_UNMAP_PRIVATE +{ + OBJGPU *pGpu; + NvHandle hBroadcastDevice; + NvU32 gpuMask; + NvBool bSubdeviceHandleProvided; + NvBool bcState; + NvBool bAllocated; ///< This struct has been allocated and must be freed +}; + +struct RS_CPU_MAPPING_PRIVATE +{ + NvU64 gpuAddress; + NvU64 gpuMapLength; + OBJGPU *pGpu; + NvP64 pPriv; + NvU32 protect; + NvBool bKernel; +}; + +typedef struct RMRES_MEM_INTER_MAP_PARAMS RMRES_MEM_INTER_MAP_PARAMS; + +/*! + * All RsResource subclasses in RM must inherit from this class + */ +#ifdef NVOC_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RmResourceCommon { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; +}; + +#ifndef __NVOC_CLASS_RmResourceCommon_TYPEDEF__ +#define __NVOC_CLASS_RmResourceCommon_TYPEDEF__ +typedef struct RmResourceCommon RmResourceCommon; +#endif /* __NVOC_CLASS_RmResourceCommon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmResourceCommon +#define __nvoc_class_id_RmResourceCommon 0x8ef259 +#endif /* __nvoc_class_id_RmResourceCommon */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +#define __staticCast_RmResourceCommon(pThis) \ + ((pThis)->__nvoc_pbase_RmResourceCommon) + +#ifdef __nvoc_resource_h_disabled +#define __dynamicCast_RmResourceCommon(pThis) ((RmResourceCommon*)NULL) +#else //__nvoc_resource_h_disabled +#define __dynamicCast_RmResourceCommon(pThis) \ + ((RmResourceCommon*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmResourceCommon))) +#endif //__nvoc_resource_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RmResourceCommon(RmResourceCommon**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmResourceCommon(RmResourceCommon**, Dynamic*, NvU32); +#define __objCreate_RmResourceCommon(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_RmResourceCommon((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS rmrescmnConstruct_IMPL(struct RmResourceCommon *arg_pResourceCommmon); +#define __nvoc_rmrescmnConstruct(arg_pResourceCommmon) rmrescmnConstruct_IMPL(arg_pResourceCommmon) +#undef PRIVATE_FIELD + + +/*! + * Utility base class for all RsResource subclasses in by RM. Doesn't have to be + * used but if it isn't used RmResourceCommon must be inherited manually + */ +#ifdef NVOC_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RmResource { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsResource __nvoc_base_RsResource; + struct RmResourceCommon __nvoc_base_RmResourceCommon; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + NvBool (*__rmresAccessCallback__)(struct RmResource *, struct RsClient *, void *, RsAccessRight); + NvBool (*__rmresShareCallback__)(struct RmResource *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__rmresGetMemInterMapParams__)(struct RmResource *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__rmresCheckMemInterUnmap__)(struct RmResource *, NvBool); + NV_STATUS (*__rmresGetMemoryMappingDescriptor__)(struct RmResource *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__rmresControl_Prologue__)(struct RmResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__rmresControl_Epilogue__)(struct RmResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__rmresControl__)(struct RmResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__rmresUnmap__)(struct RmResource *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__rmresMapTo__)(struct RmResource *, RS_RES_MAP_TO_PARAMS *); + NvU32 (*__rmresGetRefCount__)(struct RmResource *); + NV_STATUS (*__rmresControlFilter__)(struct RmResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__rmresAddAdditionalDependants__)(struct RsClient *, struct RmResource *, RsResourceRef *); + NvBool (*__rmresCanCopy__)(struct RmResource *); + void (*__rmresPreDestruct__)(struct RmResource *); + NV_STATUS (*__rmresUnmapFrom__)(struct RmResource *, RS_RES_UNMAP_FROM_PARAMS *); + NV_STATUS (*__rmresControlLookup__)(struct RmResource *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__rmresMap__)(struct RmResource *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvU32 rpcGpuInstance; + NvBool bRpcFree; +}; + +#ifndef __NVOC_CLASS_RmResource_TYPEDEF__ +#define __NVOC_CLASS_RmResource_TYPEDEF__ +typedef struct RmResource RmResource; +#endif /* __NVOC_CLASS_RmResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmResource +#define __nvoc_class_id_RmResource 0x03610d +#endif /* __nvoc_class_id_RmResource */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +#define __staticCast_RmResource(pThis) \ + ((pThis)->__nvoc_pbase_RmResource) + +#ifdef __nvoc_resource_h_disabled +#define __dynamicCast_RmResource(pThis) ((RmResource*)NULL) +#else //__nvoc_resource_h_disabled +#define __dynamicCast_RmResource(pThis) \ + ((RmResource*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmResource))) +#endif //__nvoc_resource_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RmResource(RmResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmResource(RmResource**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RmResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RmResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define rmresAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) rmresAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define rmresShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) rmresShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define rmresGetMemInterMapParams(pRmResource, pParams) rmresGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define rmresCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) rmresCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define rmresGetMemoryMappingDescriptor(pRmResource, ppMemDesc) rmresGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define rmresControl_Prologue(pResource, pCallContext, pParams) rmresControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define rmresControl_Epilogue(pResource, pCallContext, pParams) rmresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define rmresControl(pResource, pCallContext, pParams) rmresControl_DISPATCH(pResource, pCallContext, pParams) +#define rmresUnmap(pResource, pCallContext, pCpuMapping) rmresUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define rmresMapTo(pResource, pParams) rmresMapTo_DISPATCH(pResource, pParams) +#define rmresGetRefCount(pResource) rmresGetRefCount_DISPATCH(pResource) +#define rmresControlFilter(pResource, pCallContext, pParams) rmresControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define rmresAddAdditionalDependants(pClient, pResource, pReference) rmresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define rmresCanCopy(pResource) rmresCanCopy_DISPATCH(pResource) +#define rmresPreDestruct(pResource) rmresPreDestruct_DISPATCH(pResource) +#define rmresUnmapFrom(pResource, pParams) rmresUnmapFrom_DISPATCH(pResource, pParams) +#define rmresControlLookup(pResource, pParams, ppEntry) rmresControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define rmresMap(pResource, pCallContext, pParams, pCpuMapping) rmresMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +NvBool rmresAccessCallback_IMPL(struct RmResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); + +static inline NvBool rmresAccessCallback_DISPATCH(struct RmResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__rmresAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NvBool rmresShareCallback_IMPL(struct RmResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + +static inline NvBool rmresShareCallback_DISPATCH(struct RmResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__rmresShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +NV_STATUS rmresGetMemInterMapParams_IMPL(struct RmResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); + +static inline NV_STATUS rmresGetMemInterMapParams_DISPATCH(struct RmResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__rmresGetMemInterMapParams__(pRmResource, pParams); +} + +NV_STATUS rmresCheckMemInterUnmap_IMPL(struct RmResource *pRmResource, NvBool bSubdeviceHandleProvided); + +static inline NV_STATUS rmresCheckMemInterUnmap_DISPATCH(struct RmResource *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__rmresCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +NV_STATUS rmresGetMemoryMappingDescriptor_IMPL(struct RmResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); + +static inline NV_STATUS rmresGetMemoryMappingDescriptor_DISPATCH(struct RmResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__rmresGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +NV_STATUS rmresControl_Prologue_IMPL(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS rmresControl_Prologue_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__rmresControl_Prologue__(pResource, pCallContext, pParams); +} + +void rmresControl_Epilogue_IMPL(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline void rmresControl_Epilogue_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__rmresControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS rmresControl_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__rmresControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS rmresUnmap_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__rmresUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS rmresMapTo_DISPATCH(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__rmresMapTo__(pResource, pParams); +} + +static inline NvU32 rmresGetRefCount_DISPATCH(struct RmResource *pResource) { + return pResource->__rmresGetRefCount__(pResource); +} + +static inline NV_STATUS rmresControlFilter_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__rmresControlFilter__(pResource, pCallContext, pParams); +} + +static inline void rmresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference) { + pResource->__rmresAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvBool rmresCanCopy_DISPATCH(struct RmResource *pResource) { + return pResource->__rmresCanCopy__(pResource); +} + +static inline void rmresPreDestruct_DISPATCH(struct RmResource *pResource) { + pResource->__rmresPreDestruct__(pResource); +} + +static inline NV_STATUS rmresUnmapFrom_DISPATCH(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__rmresUnmapFrom__(pResource, pParams); +} + +static inline NV_STATUS rmresControlLookup_DISPATCH(struct RmResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__rmresControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS rmresMap_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__rmresMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS rmresConstruct_IMPL(struct RmResource *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_rmresConstruct(arg_pResource, arg_pCallContext, arg_pParams) rmresConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // _RESOURCE_H_ + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_RESOURCE_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resserv_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resserv_nvoc.h new file mode 100644 index 0000000..426eff0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resserv_nvoc.h @@ -0,0 +1,418 @@ +#ifndef _G_RESSERV_NVOC_H_ +#define _G_RESSERV_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_resserv_nvoc.h" + +#ifndef _RESSERV_H_ +#define _RESSERV_H_ + +#include "nvoc/object.h" + +#include "containers/list.h" +#include "containers/map.h" +#include "containers/multimap.h" + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvos.h" +#include "nvsecurityinfo.h" +#include "rs_access.h" + +#if LOCK_VAL_ENABLED +#include "lockval/lockval.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if (RS_STANDALONE) +#include + +#ifndef NV_PRINTF +extern int g_debugLevel; +#define NV_PRINTF(level, format, ...) if (g_debugLevel) { printf(format, ##__VA_ARGS__); } +#endif +#include "utils/nvprintf.h" +#endif + +// +// Forward declarations +// +typedef struct RsServer RsServer; +typedef struct RsDomain RsDomain; +typedef struct CLIENT_ENTRY CLIENT_ENTRY; +typedef struct RsResourceDep RsResourceDep; +typedef struct RsResourceRef RsResourceRef; +typedef struct RsInterMapping RsInterMapping; +typedef struct RsCpuMapping RsCpuMapping; + +// RS-TODO INTERNAL and EXTERNAL params should be different structures +typedef struct RS_CLIENT_FREE_PARAMS_INTERNAL RS_CLIENT_FREE_PARAMS_INTERNAL; +typedef struct RS_CLIENT_FREE_PARAMS_INTERNAL RS_CLIENT_FREE_PARAMS; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_RES_ALLOC_PARAMS_INTERNAL; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_RES_ALLOC_PARAMS; +typedef struct RS_RES_DUP_PARAMS_INTERNAL RS_RES_DUP_PARAMS_INTERNAL; +typedef struct RS_RES_DUP_PARAMS_INTERNAL RS_RES_DUP_PARAMS; +typedef struct RS_RES_SHARE_PARAMS_INTERNAL RS_RES_SHARE_PARAMS_INTERNAL; +typedef struct RS_RES_SHARE_PARAMS_INTERNAL RS_RES_SHARE_PARAMS; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_CLIENT_ALLOC_PARAMS_INTERNAL; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_CLIENT_ALLOC_PARAMS; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS_INTERNAL; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_RES_CONTROL_PARAMS_INTERNAL; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_RES_CONTROL_PARAMS; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_LEGACY_CONTROL_PARAMS; +typedef struct RS_LEGACY_ALLOC_PARAMS RS_LEGACY_ALLOC_PARAMS; +typedef struct RS_LEGACY_FREE_PARAMS RS_LEGACY_FREE_PARAMS; + +typedef struct RS_CPU_MAP_PARAMS RS_CPU_MAP_PARAMS; +typedef struct RS_CPU_UNMAP_PARAMS RS_CPU_UNMAP_PARAMS; +typedef struct RS_INTER_MAP_PARAMS RS_INTER_MAP_PARAMS; +typedef struct RS_INTER_UNMAP_PARAMS RS_INTER_UNMAP_PARAMS; + +// Forward declarations for structs defined by user +typedef struct RS_RES_MAP_TO_PARAMS RS_RES_MAP_TO_PARAMS; +typedef struct RS_RES_UNMAP_FROM_PARAMS RS_RES_UNMAP_FROM_PARAMS; +typedef struct RS_INTER_MAP_PRIVATE RS_INTER_MAP_PRIVATE; +typedef struct RS_INTER_UNMAP_PRIVATE RS_INTER_UNMAP_PRIVATE; +typedef struct RS_CPU_MAPPING_PRIVATE RS_CPU_MAPPING_PRIVATE; + +typedef struct RS_CPU_MAPPING_BACK_REF RS_CPU_MAPPING_BACK_REF; +typedef struct RS_INTER_MAPPING_BACK_REF RS_INTER_MAPPING_BACK_REF; +typedef struct RS_FREE_STACK RS_FREE_STACK; +typedef struct CALL_CONTEXT CALL_CONTEXT; +typedef struct ACCESS_CONTROL ACCESS_CONTROL; +typedef struct RS_ITERATOR RS_ITERATOR; +typedef struct RS_ORDERED_ITERATOR RS_ORDERED_ITERATOR; +typedef struct RS_SHARE_ITERATOR RS_SHARE_ITERATOR; +typedef struct API_STATE API_STATE; +typedef struct RS_LOCK_INFO RS_LOCK_INFO; +typedef struct RS_CONTROL_COOKIE RS_CONTROL_COOKIE; +typedef NV_STATUS RsCtrlFunc(struct RS_RES_CONTROL_PARAMS_INTERNAL*); + +struct RsClient; + +#ifndef __NVOC_CLASS_RsClient_TYPEDEF__ +#define __NVOC_CLASS_RsClient_TYPEDEF__ +typedef struct RsClient RsClient; +#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClient +#define __nvoc_class_id_RsClient 0x8f87e5 +#endif /* __nvoc_class_id_RsClient */ + + +struct RsResource; + +#ifndef __NVOC_CLASS_RsResource_TYPEDEF__ +#define __NVOC_CLASS_RsResource_TYPEDEF__ +typedef struct RsResource RsResource; +#endif /* __NVOC_CLASS_RsResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsResource +#define __nvoc_class_id_RsResource 0xd551cb +#endif /* __nvoc_class_id_RsResource */ + + +struct RsShared; + +#ifndef __NVOC_CLASS_RsShared_TYPEDEF__ +#define __NVOC_CLASS_RsShared_TYPEDEF__ +typedef struct RsShared RsShared; +#endif /* __NVOC_CLASS_RsShared_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsShared +#define __nvoc_class_id_RsShared 0x830542 +#endif /* __nvoc_class_id_RsShared */ + + + +MAKE_LIST(RsResourceRefList, RsResourceRef*); +MAKE_LIST(RsResourceList, RsResource*); +MAKE_LIST(RsHandleList, NvHandle); +MAKE_LIST(RsClientList, CLIENT_ENTRY*); +MAKE_LIST(RsShareList, RS_SHARE_POLICY); +MAKE_MULTIMAP(RsIndex, RsResourceRef*); + +typedef NV_STATUS (*CtrlImpl_t)(struct RsClient*, struct RsResource*, void*); + +typedef void *PUID_TOKEN; + +// +// Defines +// + +/// Domain handles must start at this base value +#define RS_DOMAIN_HANDLE_BASE 0xD0D00000 + +/// Client handles must start at this base value +#define RS_CLIENT_HANDLE_BASE 0xC1D00000 + +/// +/// Internal Client handles must start at this base value +/// at either of these two bases +/// +#define RS_CLIENT_INTERNAL_HANDLE_BASE 0xC1E00000 + +#define RS_CLIENT_INTERNAL_HANDLE_BASE_EX 0xC1F00000 + +// +// Print a warning if any client's resource count exceeds this +// threshold. Unless this was intentional, this is likely a client bug. +// +#define RS_CLIENT_RESOURCE_WARNING_THRESHOLD 100000 + + +/// 0xFFFF max client handles. +#define RS_CLIENT_HANDLE_BUCKET_COUNT 0x400 // 1024 +#define RS_CLIENT_HANDLE_BUCKET_MASK 0x3FF + + +/// The default maximum number of domains a resource server can allocate +#define RS_MAX_DOMAINS_DEFAULT 4096 + +/// The maximum length of a line of ancestry for resource references +#define RS_MAX_RESOURCE_DEPTH 6 + +/// RS_LOCK_FLAGS +#define RS_LOCK_FLAGS_NO_TOP_LOCK NVBIT(0) +#define RS_LOCK_FLAGS_NO_CLIENT_LOCK NVBIT(1) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_1 NVBIT(2) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_2 NVBIT(3) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_3 NVBIT(4) +#define RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK NVBIT(5) +#define RS_LOCK_FLAGS_FREE_SESSION_LOCK NVBIT(6) + +/// RS_LOCK_STATE +#define RS_LOCK_STATE_TOP_LOCK_ACQUIRED NVBIT(0) +#define RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED NVBIT(1) +#define RS_LOCK_STATE_CUSTOM_LOCK_2_ACQUIRED NVBIT(2) +#define RS_LOCK_STATE_CUSTOM_LOCK_3_ACQUIRED NVBIT(3) +#define RS_LOCK_STATE_ALLOW_RECURSIVE_RES_LOCK NVBIT(6) +#define RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED NVBIT(7) +#define RS_LOCK_STATE_SESSION_LOCK_ACQUIRED NVBIT(8) + +/// RS_LOCK_RELEASE +#define RS_LOCK_RELEASE_TOP_LOCK NVBIT(0) +#define RS_LOCK_RELEASE_CLIENT_LOCK NVBIT(1) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_1 NVBIT(2) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_2 NVBIT(3) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_3 NVBIT(4) +#define RS_LOCK_RELEASE_SESSION_LOCK NVBIT(5) + +/// API enumerations used for locking knobs +typedef enum +{ + RS_LOCK_CLIENT =0, + RS_LOCK_TOP =1, + RS_LOCK_RESOURCE =2, + RS_LOCK_CUSTOM_3 =3, +} RS_LOCK_ENUM; + +typedef enum +{ + RS_API_ALLOC_CLIENT = 0, + RS_API_ALLOC_RESOURCE = 1, + RS_API_FREE_RESOURCE = 2, + RS_API_MAP = 3, + RS_API_UNMAP = 4, + RS_API_INTER_MAP = 5, + RS_API_INTER_UNMAP = 6, + RS_API_COPY = 7, + RS_API_SHARE = 8, + RS_API_CTRL = 9, + RS_API_MAX, +} RS_API_ENUM; + +NV_STATUS indexAdd(RsIndex *pIndex, NvU32 index, RsResourceRef *pResourceRef); +NV_STATUS indexRemove(RsIndex *pIndex, NvU32 index, RsResourceRef *pResourceRef); + +// +// Externs +// +/** + * NVOC wrapper for constructing resources of a given type + * + * @param[in] pAllocator Allocator for the resource object + * @param[in] pCallContext Caller context passed to resource constructor + * @param[inout] pParams Resource allocation parameters + * @param[out] ppResource New resource object + */ +extern NV_STATUS resservResourceFactory(PORT_MEM_ALLOCATOR *pAllocator, CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, struct RsResource **ppResource); + +/** + * NVOC wrapper for constructing an application-specific client. + */ +extern NV_STATUS resservClientFactory(PORT_MEM_ALLOCATOR *pAllocator, RS_RES_ALLOC_PARAMS_INTERNAL *pParams, struct RsClient **ppRsClient); + +/** + * Validate the UID/PID security token of the current user against a client's security token. + * + * This will be obsolete after phase 1. + * + * @param[in] pClientToken + * @param[in] pCurrentToken + * + * @returns NV_OK if the current user's security token matches the client's security token + */ +extern NV_STATUS osValidateClientTokens(PSECURITY_TOKEN pClientToken, PSECURITY_TOKEN pCurrentToken); + +/** + * Get the security token of the current user for the UID/PID security model. + * + * This will be obsolete after phase 1. + */ +extern PSECURITY_TOKEN osGetSecurityToken(void); + +/** + * TLS entry id for call contexts. All servers will use the same id. + */ +#define TLS_ENTRY_ID_RESSERV_CALL_CONTEXT TLS_ENTRY_ID_RESSERV_1 + +// +// Structs +// +struct RS_FREE_STACK +{ + RS_FREE_STACK *pPrev; + RsResourceRef *pResourceRef; +}; + +struct CALL_CONTEXT +{ + RsServer *pServer; ///< The resource server instance that owns the client + struct RsClient *pClient; ///< Client that was the target of the call + RsResourceRef *pResourceRef; ///< Reference that was the target of the call + RsResourceRef *pContextRef; ///< Reference that may be used to provide more context [optional] + RS_LOCK_INFO *pLockInfo; ///< Saved locking context information for the call + API_SECURITY_INFO secInfo; + RS_RES_CONTROL_PARAMS_INTERNAL *pControlParams; ///< parameters of the call [optional] +}; + +typedef enum { + RS_ITERATE_CHILDREN, ///< Iterate over a RsResourceRef's children + RS_ITERATE_DESCENDANTS, ///< Iterate over a RsResourceRef's children, grandchildren, etc. (unspecified order) + RS_ITERATE_CACHED, ///< Iterate over a RsResourceRef's cache + RS_ITERATE_DEPENDANTS, ///< Iterate over a RsResourceRef's dependants +} RS_ITER_TYPE; + +typedef enum +{ + LOCK_ACCESS_READ, + LOCK_ACCESS_WRITE, +} LOCK_ACCESS_TYPE; + + + +/** + * Access control information. This information will be filled out by the user + * of the Resource Server when allocating a client or resource. + */ +struct ACCESS_CONTROL +{ + /** + * The privilege level of this access control + */ + RS_PRIV_LEVEL privilegeLevel; + + /** + * Opaque pointer for storing a security token + */ + PSECURITY_TOKEN pSecurityToken; +}; + +// +// Utility wrappers for locking validator +// +#if LOCK_VAL_ENABLED +#define RS_LOCK_VALIDATOR_INIT(lock, lockClass, inst) \ + do { NV_ASSERT_OK(lockvalLockInit((lock), (lockClass), (inst))); } while(0) + +#define RS_RWLOCK_ACQUIRE_READ(lock, validator) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncRwLockAcquireRead((lock)); \ + lockvalPostAcquire((validator), LOCK_VAL_RLOCK); \ +} while(0) + +#define RS_RWLOCK_ACQUIRE_WRITE(lock, validator) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncRwLockAcquireWrite((lock)); \ + lockvalPostAcquire((validator), LOCK_VAL_WLOCK); \ +} while(0) + +#define RS_RWLOCK_RELEASE_READ_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_RLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_RLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncRwLockReleaseRead((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#define RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_WLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_WLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncRwLockReleaseWrite((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#else +#define RS_LOCK_VALIDATOR_INIT(lock, lockClass, inst) +#define RS_RWLOCK_ACQUIRE_READ(lock, validator) do { portSyncRwLockAcquireRead((lock)); } while(0) +#define RS_RWLOCK_ACQUIRE_WRITE(lock, validator) do { portSyncRwLockAcquireWrite((lock)); } while(0) +#define RS_RWLOCK_RELEASE_READ_EXT(lock, validator, bOutOfOrder) do { portSyncRwLockReleaseRead((lock)); } while(0) +#define RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, bOutOfOrder) do { portSyncRwLockReleaseWrite((lock)); } while(0) +#endif + +#define RS_RWLOCK_RELEASE_READ(lock, validator) RS_RWLOCK_RELEASE_READ_EXT(lock, validator, NV_FALSE) +#define RS_RWLOCK_RELEASE_WRITE(lock, validator) RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, NV_FALSE) + + +#ifdef __cplusplus +} +#endif + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_RESSERV_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_private.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_private.h new file mode 100644 index 0000000..cae6576 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_private.h @@ -0,0 +1,695 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// private rmconfig generated #defines such as IsG84(), +// RMCFG_FEATURE_ENABLED_STATUS(), etc. +// +// Only for use within resman. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_rmconfig_private.h +// +// Chips: T234D +// + +#ifndef _G_RMCFG_PRIVATE_H_ +#define _G_RMCFG_PRIVATE_H_ + +// +// CHIP identity macros such as IsGK104() +// + +// GF10X +#define IsGF100(pGpu) ((0) && (pGpu)) +#define IsGF100orBetter(pGpu) ((0) && (pGpu)) + +#define IsGF100B(pGpu) ((0) && (pGpu)) +#define IsGF100BorBetter(pGpu) ((0) && (pGpu)) + +#define IsGF104(pGpu) ((0) && (pGpu)) +#define IsGF104orBetter(pGpu) ((0) && (pGpu)) + +#define IsGF104B(pGpu) ((0) && (pGpu)) +#define IsGF104BorBetter(pGpu) ((0) && (pGpu)) + +#define IsGF106(pGpu) ((0) && (pGpu)) +#define IsGF106orBetter(pGpu) ((0) && (pGpu)) + +#define IsGF106B(pGpu) ((0) && (pGpu)) +#define IsGF106BorBetter(pGpu) ((0) && (pGpu)) + +#define IsGF108(pGpu) ((0) && (pGpu)) +#define IsGF108orBetter(pGpu) ((0) && (pGpu)) + +// Any GF10X chip? +#define IsGF10X(pGpu) (0 && (pGpu)) +#define IsGF10XorBetter(pGpu) (0 && (pGpu)) + + +// GF11X +#define IsGF110D(pGpu) ((0) && (pGpu)) +#define IsGF110DorBetter(pGpu) ((0) && (pGpu)) + +#define IsGF110(pGpu) ((0) && (pGpu)) +#define IsGF110orBetter(pGpu) ((0) && (pGpu)) + +#define IsGF117(pGpu) ((0) && (pGpu)) +#define IsGF117orBetter(pGpu) ((0) && (pGpu)) +#define IsGF117MaskRevA01(pGpu) ((0) && (pGpu)) + +#define IsGF118(pGpu) ((0) && (pGpu)) +#define IsGF118orBetter(pGpu) ((0) && (pGpu)) + +#define IsGF119(pGpu) ((0) && (pGpu)) +#define IsGF119orBetter(pGpu) ((0) && (pGpu)) +#define IsGF119MaskRevA01(pGpu) ((0) && (pGpu)) + +// Any GF11X chip? +#define IsGF11X(pGpu) (0 && (pGpu)) +#define IsGF11XorBetter(pGpu) (0 && (pGpu)) + + +// GF10XF +#define IsGF110F(pGpu) ((0) && (pGpu)) +#define IsGF110ForBetter(pGpu) ((0) && (pGpu)) + +#define IsGF110F2(pGpu) ((0) && (pGpu)) +#define IsGF110F2orBetter(pGpu) ((0) && (pGpu)) + +#define IsGF110F3(pGpu) ((0) && (pGpu)) +#define IsGF110F3orBetter(pGpu) ((0) && (pGpu)) + +// Any GF10XF chip? +#define IsGF10XF(pGpu) (0 && (pGpu)) +#define IsGF10XForBetter(pGpu) (0 && (pGpu)) + + +// GK10X +#define IsGK104(pGpu) ((0) && (pGpu)) +#define IsGK104orBetter(pGpu) ((0) && (pGpu)) +#define IsGK104MaskRevA01(pGpu) ((0) && (pGpu)) + +#define IsGK106(pGpu) ((0) && (pGpu)) +#define IsGK106orBetter(pGpu) ((0) && (pGpu)) + +#define IsGK107(pGpu) ((0) && (pGpu)) +#define IsGK107orBetter(pGpu) ((0) && (pGpu)) +#define IsGK107MaskRevA01(pGpu) ((0) && (pGpu)) + +#define IsGK20A(pGpu) ((0) && (pGpu)) +#define IsGK20AorBetter(pGpu) ((0) && (pGpu)) + +// Any GK10X chip? +#define IsGK10X(pGpu) (0 && (pGpu)) +#define IsGK10XorBetter(pGpu) (0 && (pGpu)) + + +// GK11X +#define IsGK110(pGpu) ((0) && (pGpu)) +#define IsGK110orBetter(pGpu) ((0) && (pGpu)) + +#define IsGK110B(pGpu) ((0) && (pGpu)) +#define IsGK110BorBetter(pGpu) ((0) && (pGpu)) + +#define IsGK110C(pGpu) ((0) && (pGpu)) +#define IsGK110CorBetter(pGpu) ((0) && (pGpu)) + +// Any GK11X chip? +#define IsGK11X(pGpu) (0 && (pGpu)) +#define IsGK11XorBetter(pGpu) (0 && (pGpu)) + + +// GK20X +#define IsGK208(pGpu) ((0) && (pGpu)) +#define IsGK208orBetter(pGpu) ((0) && (pGpu)) + +#define IsGK208S(pGpu) ((0) && (pGpu)) +#define IsGK208SorBetter(pGpu) ((0) && (pGpu)) + +// Any GK20X chip? +#define IsGK20X(pGpu) (0 && (pGpu)) +#define IsGK20XorBetter(pGpu) (0 && (pGpu)) + + +// GM10X +#define IsGM107(pGpu) ((0) && (pGpu)) +#define IsGM107orBetter(pGpu) ((0) && (pGpu)) +#define IsGM107MaskRevA01(pGpu) ((0) && (pGpu)) + +#define IsGM108(pGpu) ((0) && (pGpu)) +#define IsGM108orBetter(pGpu) ((0) && (pGpu)) +#define IsGM108MaskRevA01(pGpu) ((0) && (pGpu)) + +// Any GM10X chip? +#define IsGM10X(pGpu) (0 && (pGpu)) +#define IsGM10XorBetter(pGpu) (0 && (pGpu)) + + +// GM20X +#define IsGM200(pGpu) ((0) && (pGpu)) +#define IsGM200orBetter(pGpu) ((0) && (pGpu)) + +#define IsGM204(pGpu) ((0) && (pGpu)) +#define IsGM204orBetter(pGpu) ((0) && (pGpu)) + +#define IsGM206(pGpu) ((0) && (pGpu)) +#define IsGM206orBetter(pGpu) ((0) && (pGpu)) + +// Any GM20X chip? +#define IsGM20X(pGpu) (0 && (pGpu)) +#define IsGM20XorBetter(pGpu) (0 && (pGpu)) + + +// GP10X +#define IsGP100(pGpu) ((0) && (pGpu)) +#define IsGP100orBetter(pGpu) ((0) && (pGpu)) + +#define IsGP102(pGpu) ((0) && (pGpu)) +#define IsGP102orBetter(pGpu) ((0) && (pGpu)) + +#define IsGP104(pGpu) ((0) && (pGpu)) +#define IsGP104orBetter(pGpu) ((0) && (pGpu)) + +#define IsGP106(pGpu) ((0) && (pGpu)) +#define IsGP106orBetter(pGpu) ((0) && (pGpu)) + +#define IsGP107(pGpu) ((0) && (pGpu)) +#define IsGP107orBetter(pGpu) ((0) && (pGpu)) + +#define IsGP108(pGpu) ((0) && (pGpu)) +#define IsGP108orBetter(pGpu) ((0) && (pGpu)) + +// Any GP10X chip? +#define IsGP10X(pGpu) (0 && (pGpu)) +#define IsGP10XorBetter(pGpu) (0 && (pGpu)) + + +// GV10X +#define IsGV100(pGpu) ((0) && (pGpu)) +#define IsGV100orBetter(pGpu) ((0) && (pGpu)) + +// Any GV10X chip? +#define IsGV10X(pGpu) (0 && (pGpu)) +#define IsGV10XorBetter(pGpu) (0 && (pGpu)) + + +// GV11X +#define IsGV11B(pGpu) ((0) && (pGpu)) +#define IsGV11BorBetter(pGpu) ((0) && (pGpu)) + +// Any GV11X chip? +#define IsGV11X(pGpu) (0 && (pGpu)) +#define IsGV11XorBetter(pGpu) (0 && (pGpu)) + + +// TU10X +#define IsTU102(pGpu) ((0) && (pGpu)) +#define IsTU102orBetter(pGpu) ((0) && (pGpu)) + +#define IsTU104(pGpu) ((0) && (pGpu)) +#define IsTU104orBetter(pGpu) ((0) && (pGpu)) + +#define IsTU106(pGpu) ((0) && (pGpu)) +#define IsTU106orBetter(pGpu) ((0) && (pGpu)) + +#define IsTU116(pGpu) ((0) && (pGpu)) +#define IsTU116orBetter(pGpu) ((0) && (pGpu)) + +#define IsTU117(pGpu) ((0) && (pGpu)) +#define IsTU117orBetter(pGpu) ((0) && (pGpu)) + +// Any TU10X chip? +#define IsTU10X(pGpu) (0 && (pGpu)) +#define IsTU10XorBetter(pGpu) (0 && (pGpu)) + + +// GA10X +#define IsGA100(pGpu) ((0) && (pGpu)) +#define IsGA100orBetter(pGpu) ((0) && (pGpu)) + +#define IsGA102(pGpu) ((0) && (pGpu)) +#define IsGA102orBetter(pGpu) ((0) && (pGpu)) + +#define IsGA103(pGpu) ((0) && (pGpu)) +#define IsGA103orBetter(pGpu) ((0) && (pGpu)) + +#define IsGA104(pGpu) ((0) && (pGpu)) +#define IsGA104orBetter(pGpu) ((0) && (pGpu)) + +#define IsGA106(pGpu) ((0) && (pGpu)) +#define IsGA106orBetter(pGpu) ((0) && (pGpu)) + +#define IsGA107(pGpu) ((0) && (pGpu)) +#define IsGA107orBetter(pGpu) ((0) && (pGpu)) + +#define IsGA10B(pGpu) ((0) && (pGpu)) +#define IsGA10BorBetter(pGpu) ((0) && (pGpu)) + +// Any GA10X chip? +#define IsGA10X(pGpu) (0 && (pGpu)) +#define IsGA10XorBetter(pGpu) (0 && (pGpu)) + + +// GA10XF +#define IsGA102F(pGpu) ((0) && (pGpu)) +#define IsGA102ForBetter(pGpu) ((0) && (pGpu)) + +// Any GA10XF chip? +#define IsGA10XF(pGpu) (0 && (pGpu)) +#define IsGA10XForBetter(pGpu) (0 && (pGpu)) + + +// T12X +#define IsT001_FERMI_NOT_EXIST(pGpu) ((0) && (pGpu)) +#define IsT001_FERMI_NOT_EXISTorBetter(pGpu) ((0) && (pGpu)) + +#define IsT124(pGpu) ((0) && (pGpu)) +#define IsT124orBetter(pGpu) ((0) && (pGpu)) + +// Any T12X chip? +#define IsT12X(pGpu) (0 && (pGpu)) +#define IsT12XorBetter(pGpu) (0 && (pGpu)) + + +// T13X +#define IsT132(pGpu) ((0) && (pGpu)) +#define IsT132orBetter(pGpu) ((0) && (pGpu)) + +// Any T13X chip? +#define IsT13X(pGpu) (0 && (pGpu)) +#define IsT13XorBetter(pGpu) (0 && (pGpu)) + + +// T21X +#define IsT210(pGpu) ((0) && (pGpu)) +#define IsT210orBetter(pGpu) ((0) && (pGpu)) + +// Any T21X chip? +#define IsT21X(pGpu) (0 && (pGpu)) +#define IsT21XorBetter(pGpu) (0 && (pGpu)) + + +// T18X +#define IsT186(pGpu) ((0) && (pGpu)) +#define IsT186orBetter(pGpu) ((0) && (pGpu)) + +// Any T18X chip? +#define IsT18X(pGpu) (0 && (pGpu)) +#define IsT18XorBetter(pGpu) (0 && (pGpu)) + + +// T19X +#define IsT194(pGpu) ((0) && (pGpu)) +#define IsT194orBetter(pGpu) ((0) && (pGpu)) + +#define IsT002_TURING_NOT_EXIST(pGpu) ((0) && (pGpu)) +#define IsT002_TURING_NOT_EXISTorBetter(pGpu) ((0) && (pGpu)) + +// Any T19X chip? +#define IsT19X(pGpu) (0 && (pGpu)) +#define IsT19XorBetter(pGpu) (0 && (pGpu)) + + +// T23XG +#define IsT234(pGpu) ((0) && (pGpu)) +#define IsT234orBetter(pGpu) ((0) && (pGpu)) + +// Any T23XG chip? +#define IsT23XG(pGpu) (0 && (pGpu)) +#define IsT23XGorBetter(pGpu) (0 && (pGpu)) + + +// T23XD +#define IsT234D(pGpu) ((1) && (pGpu)) +#define IsT234DorBetter(pGpu) ((1) && (pGpu)) + +// Any T23XD chip? +#define IsT23XD(pGpu) (1 || (pGpu)) +#define IsT23XDorBetter(pGpu) (1 || (pGpu)) + + +// SIMS +#define IsAMODEL(pGpu) ((0) && (pGpu)) +#define IsAMODELorBetter(pGpu) ((0) && (pGpu)) + +// Any SIMS chip? +#define IsSIMS(pGpu) (0 && (pGpu)) +#define IsSIMSorBetter(pGpu) (0 && (pGpu)) + + +// Any CLASSIC_GPUS chip? +#define IsCLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsCLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dFERMI chip? +#define IsdFERMI(pGpu) (0 && (pGpu)) +#define IsdFERMIorBetter(pGpu) (0 && (pGpu)) + + +// Any FERMI chip? +#define IsFERMI(pGpu) (IsFERMI_CLASSIC_GPUS(pGpu) || IsFERMI_TEGRA_BIG_GPUS(pGpu)) +#define IsFERMIorBetter(pGpu) (IsFERMI_CLASSIC_GPUSorBetter(pGpu) || IsFERMI_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any FERMI_CLASSIC_GPUS chip? +#define IsFERMI_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsFERMI_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any DISPLAYLESS chip? +#define IsDISPLAYLESS(pGpu) (0 && (pGpu)) + + +// Any dKEPLER chip? +#define IsdKEPLER(pGpu) (0 && (pGpu)) +#define IsdKEPLERorBetter(pGpu) (0 && (pGpu)) + + +// Any KEPLER chip? +#define IsKEPLER(pGpu) (IsKEPLER_CLASSIC_GPUS(pGpu) || IsKEPLER_TEGRA_BIG_GPUS(pGpu)) +#define IsKEPLERorBetter(pGpu) (IsKEPLER_CLASSIC_GPUSorBetter(pGpu) || IsKEPLER_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any KEPLER_CLASSIC_GPUS chip? +#define IsKEPLER_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsKEPLER_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dMAXWELL chip? +#define IsdMAXWELL(pGpu) (0 && (pGpu)) +#define IsdMAXWELLorBetter(pGpu) (0 && (pGpu)) + + +// Any MAXWELL chip? +#define IsMAXWELL(pGpu) (IsMAXWELL_CLASSIC_GPUS(pGpu) || IsMAXWELL_TEGRA_BIG_GPUS(pGpu)) +#define IsMAXWELLorBetter(pGpu) (IsMAXWELL_CLASSIC_GPUSorBetter(pGpu) || IsMAXWELL_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any MAXWELL_CLASSIC_GPUS chip? +#define IsMAXWELL_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsMAXWELL_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dPASCAL chip? +#define IsdPASCAL(pGpu) (0 && (pGpu)) +#define IsdPASCALorBetter(pGpu) (0 && (pGpu)) + + +// Any PASCAL chip? +#define IsPASCAL(pGpu) (IsPASCAL_CLASSIC_GPUS(pGpu) || IsPASCAL_TEGRA_BIG_GPUS(pGpu)) +#define IsPASCALorBetter(pGpu) (IsPASCAL_CLASSIC_GPUSorBetter(pGpu) || IsPASCAL_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any PASCAL_CLASSIC_GPUS chip? +#define IsPASCAL_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsPASCAL_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dVOLTA chip? +#define IsdVOLTA(pGpu) (0 && (pGpu)) +#define IsdVOLTAorBetter(pGpu) (0 && (pGpu)) + + +// Any VOLTA chip? +#define IsVOLTA(pGpu) (IsVOLTA_CLASSIC_GPUS(pGpu) || IsVOLTA_TEGRA_BIG_GPUS(pGpu)) +#define IsVOLTAorBetter(pGpu) (IsVOLTA_CLASSIC_GPUSorBetter(pGpu) || IsVOLTA_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any VOLTA_CLASSIC_GPUS chip? +#define IsVOLTA_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsVOLTA_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dTURING chip? +#define IsdTURING(pGpu) (0 && (pGpu)) +#define IsdTURINGorBetter(pGpu) (0 && (pGpu)) + + +// Any TURING chip? +#define IsTURING(pGpu) (IsTURING_CLASSIC_GPUS(pGpu) || IsTURING_TEGRA_BIG_GPUS(pGpu)) +#define IsTURINGorBetter(pGpu) (IsTURING_CLASSIC_GPUSorBetter(pGpu) || IsTURING_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any TURING_CLASSIC_GPUS chip? +#define IsTURING_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsTURING_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any dAMPERE chip? +#define IsdAMPERE(pGpu) (0 && (pGpu)) +#define IsdAMPEREorBetter(pGpu) (0 && (pGpu)) + + +// Any AMPERE chip? +#define IsAMPERE(pGpu) (IsAMPERE_CLASSIC_GPUS(pGpu) || IsAMPERE_TEGRA_BIG_GPUS(pGpu)) +#define IsAMPEREorBetter(pGpu) (IsAMPERE_CLASSIC_GPUSorBetter(pGpu) || IsAMPERE_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any AMPERE_CLASSIC_GPUS chip? +#define IsAMPERE_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsAMPERE_CLASSIC_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any TEGRA_DGPU_AMPERE chip? +#define IsTEGRA_DGPU_AMPERE(pGpu) (0 && (pGpu)) + + +// Any TEGRA_DGPU chip? +#define IsTEGRA_DGPU(pGpu) (0 && (pGpu)) + + +// Any DFPGA chip? +#define IsDFPGA(pGpu) (0 && (pGpu)) + + +// Any TEGRA_BIG_GPUS chip? +#define IsTEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsTEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any FERMI_TEGRA_BIG_GPUS chip? +#define IsFERMI_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsFERMI_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any TEGRA chip? +#define IsTEGRA(pGpu) (1 || (pGpu)) +#define IsTEGRAorBetter(pGpu) (1 || (pGpu)) + + +// Any TEGRA_TEGRA_BIG_GPUS chip? +#define IsTEGRA_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsTEGRA_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tKEPLER chip? +#define IstKEPLER(pGpu) (0 && (pGpu)) +#define IstKEPLERorBetter(pGpu) (0 && (pGpu)) + + +// Any KEPLER_TEGRA_BIG_GPUS chip? +#define IsKEPLER_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsKEPLER_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tMAXWELL chip? +#define IstMAXWELL(pGpu) (0 && (pGpu)) +#define IstMAXWELLorBetter(pGpu) (0 && (pGpu)) + + +// Any MAXWELL_TEGRA_BIG_GPUS chip? +#define IsMAXWELL_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsMAXWELL_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tPASCAL chip? +#define IstPASCAL(pGpu) (0 && (pGpu)) +#define IstPASCALorBetter(pGpu) (0 && (pGpu)) + + +// Any PASCAL_TEGRA_BIG_GPUS chip? +#define IsPASCAL_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsPASCAL_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tVOLTA chip? +#define IstVOLTA(pGpu) (0 && (pGpu)) +#define IstVOLTAorBetter(pGpu) (0 && (pGpu)) + + +// Any VOLTA_TEGRA_BIG_GPUS chip? +#define IsVOLTA_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsVOLTA_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any TURING_TEGRA_BIG_GPUS chip? +#define IsTURING_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsTURING_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any T23X chip? +#define IsT23X(pGpu) (1 || (pGpu)) +#define IsT23XorBetter(pGpu) (1 || (pGpu)) + + +// Any T23X_TEGRA_BIG_GPUS chip? +#define IsT23X_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsT23X_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tAMPERE chip? +#define IstAMPERE(pGpu) (0 && (pGpu)) +#define IstAMPEREorBetter(pGpu) (0 && (pGpu)) + + +// Any AMPERE_TEGRA_BIG_GPUS chip? +#define IsAMPERE_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsAMPERE_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any TEGRA_NVDISP_GPUS chip? +#define IsTEGRA_NVDISP_GPUS(pGpu) (1 || (pGpu)) +#define IsTEGRA_NVDISP_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any T23X_TEGRA_NVDISP_GPUS chip? +#define IsT23X_TEGRA_NVDISP_GPUS(pGpu) (1 || (pGpu)) +#define IsT23X_TEGRA_NVDISP_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any TEGRA_TEGRA_NVDISP_GPUS chip? +#define IsTEGRA_TEGRA_NVDISP_GPUS(pGpu) (1 || (pGpu)) +#define IsTEGRA_TEGRA_NVDISP_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any SIMULATION_GPUS chip? +#define IsSIMULATION_GPUS(pGpu) (0 && (pGpu)) +#define IsSIMULATION_GPUSorBetter(pGpu) (0 && (pGpu)) + + + + + +// +// Enable/disable printing of entity names (class, engine, etc.) +// +#define RMCFG_ENTITY_NAME(entity) "" + +// +// Macros to help with enabling or disabling code based on whether +// a feature (or chip or engine or ...) is enabled or not. +// Also have RMCFG_CHIP_), RMCFG_FEATURE_ENABLED(, etc +// from rmconfig.h. +// +// NOTE: these definitions are "flat" (ie they don't use some more general +// RMCFG_ENABLED(CHIP,X) form because the pre-processor would re-evaluate +// the expansion of the item (chip, feature, class, api). For classes, +// at least, this is a problem since we would end up with class number +// instead of its name... + +// hack: MSVC is not C99 compliant + +// CHIP's +#define RMCFG_CHIP_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_CHIP_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "CHIP" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + #define RMCFG_CHIP_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_CHIP_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "CHIP" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_CHIP_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + +// FEATURE's +#define RMCFG_FEATURE_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_FEATURE_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "FEATURE" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) +#define RMCFG_FEATURE_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_FEATURE_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "FEATURE" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_FEATURE_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + +#define RMCFG_FEATURE_PLATFORM_P (RMCFG_FEATURE_PLATFORM_##P) + +// MODULE's +#define RMCFG_MODULE_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_MODULE_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "MODULE" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) +#define RMCFG_MODULE_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_MODULE_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "MODULE" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_MODULE_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + + +// CLASS's +#define RMCFG_CLASS_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_CLASS_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "CLASS" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) +#define RMCFG_CLASS_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_CLASS_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "CLASS" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_CLASS_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + +// API's +#define RMCFG_API_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_API_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "API" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) +#define RMCFG_API_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_API_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "API" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_API_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + + + +// ARCH test +#define RMCFG_IS_ARCH(arch) RMCFG_FEATURE_ARCH_##arch + +#endif // _G_RMCFG_PRIVATE_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.c new file mode 100644 index 0000000..58cdc9d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.c @@ -0,0 +1,32 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// rmconfig runtime support that will be part of "core" resman. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_rmconfig_util.c +// +// Chips: T234D +// + +#include "gpu/gpu.h" + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +// NVOC RTTI provider for IOM objects +const NVOC_RTTI_PROVIDER __iom_rtti_provider = { 0 }; + +// +// helper functions for IsCHIP() et.al. +// These help to reduce code size for runtime IsCHIP() and IsCHIPALIAS() invocations +// + + + +// NVOC class ID uniqueness checks +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x0x05c7b5 = 1; /* OBJGPIO */ +char __nvoc_class_id_uniqueness_check_0x0x1ab16a = 1; /* OBJRPC */ +char __nvoc_class_id_uniqueness_check_0x0xd4dff8 = 1; /* OBJRPCSTRUCTURECOPY */ + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.h new file mode 100644 index 0000000..e9b17e0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.h @@ -0,0 +1,23 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Prototypes for rmconfig utility functions such as _IsGK104(), etc. +// +// Only for use within resman. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_rmconfig_util.h +// +// Chips: T234D +// + +#ifndef _G_RMCFG_UTIL_H_ +#define _G_RMCFG_UTIL_H_ + +// +// Any needed prototypes for helper functions for IsCHIP(), eg rmcfg_IsGK104() +// These cannot be put in rmconfig_private.h as they need the OBJ typedefs. +// + + + +#endif // _G_RMCFG_UTIL_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-message-header.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-message-header.h new file mode 100644 index 0000000..4117e65 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-message-header.h @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * WARNING: This is an autogenerated file. DO NOT EDIT. + * This file is generated using below files: + * template file: kernel/inc/vgpu/gt_rpc-message.h + * definition file: kernel/inc/vgpu/rpc-message-header.def + */ + + +#ifdef RPC_MESSAGE_STRUCTURES +typedef union rpc_message_rpc_union_field_v03_00 +{ + NvU32 spare; + NvU32 cpuRmGfid; +} rpc_message_rpc_union_field_v03_00; + +typedef rpc_message_rpc_union_field_v03_00 rpc_message_rpc_union_field_v; + +typedef struct rpc_message_header_v03_00 +{ + NvU32 header_version; + NvU32 signature; + NvU32 length; + NvU32 function; + NvU32 rpc_result; + NvU32 rpc_result_private; + NvU32 sequence; + rpc_message_rpc_union_field_v u; + rpc_generic_union rpc_message_data[]; +} rpc_message_header_v03_00; + +typedef rpc_message_header_v03_00 rpc_message_header_v; + + +#endif + +#ifdef RPC_MESSAGE_GENERIC_UNION +// This is a generic union, that will be used for the communication between the vmioplugin & guest RM. +typedef union rpc_message_generic_union { + rpc_message_rpc_union_field_v03_00 rpc_union_field_v03_00; + rpc_message_rpc_union_field_v rpc_union_field_v; + rpc_message_header_v03_00 header_v03_00; + rpc_message_header_v header_v; +} rpc_message_generic_union; + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-structures.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-structures.h new file mode 100644 index 0000000..1c8cceb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-structures.h @@ -0,0 +1,216 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * WARNING: This is an autogenerated file. DO NOT EDIT. + * This file is generated using below files: + * template file: kernel/inc/vgpu/gt_rpc-structures.h + * definition file: kernel/inc/vgpu/rpc-structures.def + */ + + +#ifdef RPC_STRUCTURES +// These structures will be used for the communication between the vmioplugin & guest RM. +#define SDK_STRUCTURES +#include "g_sdk-structures.h" +#undef SDK_STRUCTURES +typedef struct rpc_free_v03_00 +{ + NVOS00_PARAMETERS_v03_00 params; +} rpc_free_v03_00; + +typedef rpc_free_v03_00 rpc_free_v; + +typedef struct rpc_dup_object_v03_00 +{ + NVOS55_PARAMETERS_v03_00 params; +} rpc_dup_object_v03_00; + +typedef rpc_dup_object_v03_00 rpc_dup_object_v; + +typedef struct rpc_gsp_rm_alloc_v03_00 +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvU32 hClass; + NvU32 status; + NvU32 paramsSize; + NvU8 params[]; +} rpc_gsp_rm_alloc_v03_00; + +typedef rpc_gsp_rm_alloc_v03_00 rpc_gsp_rm_alloc_v; + +typedef struct rpc_gsp_rm_control_v03_00 +{ + NvHandle hClient; + NvHandle hObject; + NvU32 cmd; + NvU32 status; + NvU32 paramsSize; + NvBool serialized; + NvU8 reserved[3]; + NvU8 params[]; +} rpc_gsp_rm_control_v03_00; + +typedef rpc_gsp_rm_control_v03_00 rpc_gsp_rm_control_v; + +typedef struct rpc_post_event_v17_00 +{ + NvHandle hClient; + NvHandle hEvent; + NvU32 notifyIndex; + NvU32 data; + NvU32 status; + NvU32 eventDataSize; + NvBool bNotifyList; + NvU8 eventData[]; +} rpc_post_event_v17_00; + +typedef rpc_post_event_v17_00 rpc_post_event_v; + +typedef struct rpc_rg_line_intr_v17_00 +{ + NvU32 head; + NvU32 rgIntr; +} rpc_rg_line_intr_v17_00; + +typedef rpc_rg_line_intr_v17_00 rpc_rg_line_intr_v; + +typedef struct rpc_display_modeset_v01_00 +{ + NvBool bModesetStart; + NvU32 minRequiredIsoBandwidthKBPS; + NvU32 minRequiredFloorBandwidthKBPS; +} rpc_display_modeset_v01_00; + +typedef rpc_display_modeset_v01_00 rpc_display_modeset_v; + +typedef struct rpc_dce_rm_init_v01_00 +{ + NvBool bInit; +} rpc_dce_rm_init_v01_00; + +typedef rpc_dce_rm_init_v01_00 rpc_dce_rm_init_v; + + +#endif + +#ifdef RPC_DEBUG_PRINT_FUNCTIONS +// These are definitions for versioned functions. These will be used for RPC logging in the vmioplugin. +#define SDK_DEBUG_PRINT_FUNCTIONS +#include "g_sdk-structures.h" +#undef SDK_DEBUG_PRINT_FUNCTIONS +#ifndef SKIP_PRINT_rpc_free_v03_00 +vmiopd_mdesc_t *rpcdebugFree_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_free_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_dup_object_v03_00 +vmiopd_mdesc_t *rpcdebugDupObject_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_dup_object_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_gsp_rm_alloc_v03_00 +vmiopd_mdesc_t *rpcdebugGspRmAlloc_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_gsp_rm_alloc_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_gsp_rm_control_v03_00 +vmiopd_mdesc_t *rpcdebugGspRmControl_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_gsp_rm_control_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_post_event_v17_00 +vmiopd_mdesc_t *rpcdebugPostEvent_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_post_event_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_rg_line_intr_v17_00 +vmiopd_mdesc_t *rpcdebugRgLineIntr_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_rg_line_intr_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_display_modeset_v01_00 +vmiopd_mdesc_t *rpcdebugDisplayModeset_v01_00(void) +{ + return &vmiopd_mdesc_t_rpc_display_modeset_v01_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_dce_rm_init_v01_00 +vmiopd_mdesc_t *rpcdebugDceRmInit_v01_00(void) +{ + return &vmiopd_mdesc_t_rpc_dce_rm_init_v01_00; +} +#endif + + +#endif + +#ifdef RPC_GENERIC_UNION +// This is a generic union, that will be used for the communication between the vmioplugin & guest RM. +typedef union rpc_generic_union { + rpc_free_v03_00 free_v03_00; + rpc_free_v free_v; + rpc_dup_object_v03_00 dup_object_v03_00; + rpc_dup_object_v dup_object_v; + rpc_gsp_rm_alloc_v03_00 gsp_rm_alloc_v03_00; + rpc_gsp_rm_alloc_v gsp_rm_alloc_v; + rpc_gsp_rm_control_v03_00 gsp_rm_control_v03_00; + rpc_gsp_rm_control_v gsp_rm_control_v; + rpc_post_event_v17_00 post_event_v17_00; + rpc_post_event_v post_event_v; + rpc_rg_line_intr_v17_00 rg_line_intr_v17_00; + rpc_rg_line_intr_v rg_line_intr_v; + rpc_display_modeset_v01_00 display_modeset_v01_00; + rpc_display_modeset_v display_modeset_v; + rpc_dce_rm_init_v01_00 dce_rm_init_v01_00; + rpc_dce_rm_init_v dce_rm_init_v; +} rpc_generic_union; + +#endif + + +#ifdef RPC_ARRAY_LENGTH_FUNCTIONS +#define SDK_ARRAY_LENGTH_FUNCTIONS +#include "g_sdk-structures.h" +#undef SDK_ARRAY_LENGTH_FUNCTIONS + +#endif + +#ifdef AUTOGENERATE_RPC_MIN_SUPPORTED_VERSION_INFORMATION +#define NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MAJOR 0x18 +#define NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MINOR 0x00 +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.c new file mode 100644 index 0000000..7dffc93 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.c @@ -0,0 +1,421 @@ +#define NVOC_RS_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_rs_client_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x8f87e5 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_RsClient(RsClient*); +void __nvoc_init_funcTable_RsClient(RsClient*); +NV_STATUS __nvoc_ctor_RsClient(RsClient*, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RsClient(RsClient*); +void __nvoc_dtor_RsClient(RsClient*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RsClient; + +static const struct NVOC_RTTI __nvoc_rtti_RsClient_RsClient = { + /*pClassDef=*/ &__nvoc_class_def_RsClient, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsClient, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsClient_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsClient, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RsClient = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_RsClient_RsClient, + &__nvoc_rtti_RsClient_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsClient), + /*classId=*/ classId(RsClient), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsClient", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsClient, + /*pCastInfo=*/ &__nvoc_castinfo_RsClient, + /*pExportInfo=*/ &__nvoc_export_info_RsClient +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RsClient = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_RsClient(RsClient *pThis) { + __nvoc_clientDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsClient(RsClient *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_RsClient(RsClient *pThis, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_RsClient_fail_Object; + __nvoc_init_dataField_RsClient(pThis); + + status = __nvoc_clientConstruct(pThis, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RsClient_fail__init; + goto __nvoc_ctor_RsClient_exit; // Success + +__nvoc_ctor_RsClient_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_RsClient_fail_Object: +__nvoc_ctor_RsClient_exit: + + return status; +} + +static void __nvoc_init_funcTable_RsClient_1(RsClient *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__clientValidate__ = &clientValidate_IMPL; + + pThis->__clientFreeResource__ = &clientFreeResource_IMPL; + + pThis->__clientDestructResourceRef__ = &clientDestructResourceRef_IMPL; + + pThis->__clientUnmapMemory__ = &clientUnmapMemory_IMPL; + + pThis->__clientInterMap__ = &clientInterMap_IMPL; + + pThis->__clientInterUnmap__ = &clientInterUnmap_IMPL; + + pThis->__clientValidateNewResourceHandle__ = &clientValidateNewResourceHandle_IMPL; + + pThis->__clientPostProcessPendingFreeList__ = &clientPostProcessPendingFreeList_IMPL; + + pThis->__clientShareResource__ = &clientShareResource_IMPL; +} + +void __nvoc_init_funcTable_RsClient(RsClient *pThis) { + __nvoc_init_funcTable_RsClient_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_RsClient(RsClient *pThis) { + pThis->__nvoc_pbase_RsClient = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_RsClient(pThis); +} + +NV_STATUS __nvoc_objCreate_RsClient(RsClient **ppThis, Dynamic *pParent, NvU32 createFlags, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RsClient *pThis; + + pThis = portMemAllocNonPaged(sizeof(RsClient)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RsClient)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RsClient); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RsClient(pThis); + status = __nvoc_ctor_RsClient(pThis, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RsClient_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RsClient_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsClient(RsClient **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct PORT_MEM_ALLOCATOR * arg_pAllocator = va_arg(args, struct PORT_MEM_ALLOCATOR *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RsClient(ppThis, pParent, createFlags, arg_pAllocator, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x083442 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClientResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +void __nvoc_init_RsClientResource(RsClientResource*); +void __nvoc_init_funcTable_RsClientResource(RsClientResource*); +NV_STATUS __nvoc_ctor_RsClientResource(RsClientResource*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RsClientResource(RsClientResource*); +void __nvoc_dtor_RsClientResource(RsClientResource*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RsClientResource; + +static const struct NVOC_RTTI __nvoc_rtti_RsClientResource_RsClientResource = { + /*pClassDef=*/ &__nvoc_class_def_RsClientResource, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsClientResource, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsClientResource_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsClientResource_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RsClientResource = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_RsClientResource_RsClientResource, + &__nvoc_rtti_RsClientResource_RsResource, + &__nvoc_rtti_RsClientResource_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsClientResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsClientResource), + /*classId=*/ classId(RsClientResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsClientResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsClientResource, + /*pCastInfo=*/ &__nvoc_castinfo_RsClientResource, + /*pExportInfo=*/ &__nvoc_export_info_RsClientResource +}; + +static NvBool __nvoc_thunk_RsResource_clientresShareCallback(struct RsClientResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return resShareCallback((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresControl(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresUnmap(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresMapTo(struct RsClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pParams); +} + +static NvU32 __nvoc_thunk_RsResource_clientresGetRefCount(struct RsClientResource *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresControlFilter(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_clientresAddAdditionalDependants(struct RsClient *pClient, struct RsClientResource *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pReference); +} + +static NvBool __nvoc_thunk_RsResource_clientresCanCopy(struct RsClientResource *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresControl_Prologue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl_Prologue((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_clientresPreDestruct(struct RsClientResource *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresUnmapFrom(struct RsClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_clientresControl_Epilogue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + resControl_Epilogue((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresControlLookup(struct RsClientResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresMap(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RsResource_clientresAccessCallback(struct RsClientResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return resAccessCallback((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RsClientResource = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsResource(RsResource*); +void __nvoc_dtor_RsClientResource(RsClientResource *pThis) { + __nvoc_clientresDestruct(pThis); + __nvoc_dtor_RsResource(&pThis->__nvoc_base_RsResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsClientResource(RsClientResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsResource(RsResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RsClientResource(RsClientResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsResource(&pThis->__nvoc_base_RsResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RsClientResource_fail_RsResource; + __nvoc_init_dataField_RsClientResource(pThis); + + status = __nvoc_clientresConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RsClientResource_fail__init; + goto __nvoc_ctor_RsClientResource_exit; // Success + +__nvoc_ctor_RsClientResource_fail__init: + __nvoc_dtor_RsResource(&pThis->__nvoc_base_RsResource); +__nvoc_ctor_RsClientResource_fail_RsResource: +__nvoc_ctor_RsClientResource_exit: + + return status; +} + +static void __nvoc_init_funcTable_RsClientResource_1(RsClientResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__clientresShareCallback__ = &__nvoc_thunk_RsResource_clientresShareCallback; + + pThis->__clientresControl__ = &__nvoc_thunk_RsResource_clientresControl; + + pThis->__clientresUnmap__ = &__nvoc_thunk_RsResource_clientresUnmap; + + pThis->__clientresMapTo__ = &__nvoc_thunk_RsResource_clientresMapTo; + + pThis->__clientresGetRefCount__ = &__nvoc_thunk_RsResource_clientresGetRefCount; + + pThis->__clientresControlFilter__ = &__nvoc_thunk_RsResource_clientresControlFilter; + + pThis->__clientresAddAdditionalDependants__ = &__nvoc_thunk_RsResource_clientresAddAdditionalDependants; + + pThis->__clientresCanCopy__ = &__nvoc_thunk_RsResource_clientresCanCopy; + + pThis->__clientresControl_Prologue__ = &__nvoc_thunk_RsResource_clientresControl_Prologue; + + pThis->__clientresPreDestruct__ = &__nvoc_thunk_RsResource_clientresPreDestruct; + + pThis->__clientresUnmapFrom__ = &__nvoc_thunk_RsResource_clientresUnmapFrom; + + pThis->__clientresControl_Epilogue__ = &__nvoc_thunk_RsResource_clientresControl_Epilogue; + + pThis->__clientresControlLookup__ = &__nvoc_thunk_RsResource_clientresControlLookup; + + pThis->__clientresMap__ = &__nvoc_thunk_RsResource_clientresMap; + + pThis->__clientresAccessCallback__ = &__nvoc_thunk_RsResource_clientresAccessCallback; +} + +void __nvoc_init_funcTable_RsClientResource(RsClientResource *pThis) { + __nvoc_init_funcTable_RsClientResource_1(pThis); +} + +void __nvoc_init_RsResource(RsResource*); +void __nvoc_init_RsClientResource(RsClientResource *pThis) { + pThis->__nvoc_pbase_RsClientResource = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RsResource; + __nvoc_init_RsResource(&pThis->__nvoc_base_RsResource); + __nvoc_init_funcTable_RsClientResource(pThis); +} + +NV_STATUS __nvoc_objCreate_RsClientResource(RsClientResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RsClientResource *pThis; + + pThis = portMemAllocNonPaged(sizeof(RsClientResource)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RsClientResource)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RsClientResource); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RsClientResource(pThis); + status = __nvoc_ctor_RsClientResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RsClientResource_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RsClientResource_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsClientResource(RsClientResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RsClientResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.h new file mode 100644 index 0000000..c5cd092 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.h @@ -0,0 +1,601 @@ +#ifndef _G_RS_CLIENT_NVOC_H_ +#define _G_RS_CLIENT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_rs_client_nvoc.h" + +#ifndef _RS_CLIENT_H_ +#define _RS_CLIENT_H_ + + +#include "resserv/resserv.h" +#include "nvport/nvport.h" +#include "resserv/rs_resource.h" +#include "containers/list.h" +#include "utils/nvrange.h" + +#define RS_UNIQUE_HANDLE_BASE (0xcaf00000) +#define RS_UNIQUE_HANDLE_RANGE (0x00080000) + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup RsClient + * @addtogroup RsClient + * @{*/ + +typedef enum { + CLIENT_TYPE_USER, + CLIENT_TYPE_KERNEL +} CLIENT_TYPE; + +typedef struct AccessBackRef +{ + NvHandle hClient; + NvHandle hResource; +} AccessBackRef; + +MAKE_LIST(AccessBackRefList, AccessBackRef); + +/** + * Information about a client + */ +#ifdef NVOC_RS_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RsClient { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct RsClient *__nvoc_pbase_RsClient; + NV_STATUS (*__clientValidate__)(struct RsClient *, const API_SECURITY_INFO *); + NV_STATUS (*__clientFreeResource__)(struct RsClient *, RsServer *, struct RS_RES_FREE_PARAMS_INTERNAL *); + NV_STATUS (*__clientDestructResourceRef__)(struct RsClient *, RsServer *, struct RsResourceRef *); + NV_STATUS (*__clientUnmapMemory__)(struct RsClient *, struct RsResourceRef *, struct RS_LOCK_INFO *, struct RsCpuMapping **, API_SECURITY_INFO *); + NV_STATUS (*__clientInterMap__)(struct RsClient *, struct RsResourceRef *, struct RsResourceRef *, struct RS_INTER_MAP_PARAMS *); + void (*__clientInterUnmap__)(struct RsClient *, struct RsResourceRef *, struct RS_INTER_UNMAP_PARAMS *); + NV_STATUS (*__clientValidateNewResourceHandle__)(struct RsClient *, NvHandle, NvBool); + NV_STATUS (*__clientPostProcessPendingFreeList__)(struct RsClient *, struct RsResourceRef **); + NV_STATUS (*__clientShareResource__)(struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *, struct CALL_CONTEXT *); + NvHandle hClient; + CLIENT_TYPE type; + NvBool bActive; + NvBool bResourceWarning; + RsRefMap resourceMap; + AccessBackRefList accessBackRefList; + NvHandle handleRangeStart; + NvHandle handleRangeSize; + struct NV_RANGE handleRestrictRange; + NvHandle handleGenIdx; + RsRefFreeList pendingFreeList; + struct RS_FREE_STACK *pFreeStack; +}; + +#ifndef __NVOC_CLASS_RsClient_TYPEDEF__ +#define __NVOC_CLASS_RsClient_TYPEDEF__ +typedef struct RsClient RsClient; +#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClient +#define __nvoc_class_id_RsClient 0x8f87e5 +#endif /* __nvoc_class_id_RsClient */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient; + +#define __staticCast_RsClient(pThis) \ + ((pThis)->__nvoc_pbase_RsClient) + +#ifdef __nvoc_rs_client_h_disabled +#define __dynamicCast_RsClient(pThis) ((RsClient*)NULL) +#else //__nvoc_rs_client_h_disabled +#define __dynamicCast_RsClient(pThis) \ + ((RsClient*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsClient))) +#endif //__nvoc_rs_client_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RsClient(RsClient**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsClient(RsClient**, Dynamic*, NvU32, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RsClient(ppNewObj, pParent, createFlags, arg_pAllocator, arg_pParams) \ + __nvoc_objCreate_RsClient((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pAllocator, arg_pParams) + +#define clientValidate(pClient, pSecInfo) clientValidate_DISPATCH(pClient, pSecInfo) +#define clientFreeResource(pClient, pServer, pParams) clientFreeResource_DISPATCH(pClient, pServer, pParams) +#define clientDestructResourceRef(pClient, pServer, pResourceRef) clientDestructResourceRef_DISPATCH(pClient, pServer, pResourceRef) +#define clientUnmapMemory(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo) clientUnmapMemory_DISPATCH(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo) +#define clientInterMap(pClient, pMapperRef, pMappableRef, pParams) clientInterMap_DISPATCH(pClient, pMapperRef, pMappableRef, pParams) +#define clientInterUnmap(pClient, pMapperRef, pParams) clientInterUnmap_DISPATCH(pClient, pMapperRef, pParams) +#define clientValidateNewResourceHandle(pClient, hResource, bRestrict) clientValidateNewResourceHandle_DISPATCH(pClient, hResource, bRestrict) +#define clientPostProcessPendingFreeList(pClient, ppFirstLowPriRef) clientPostProcessPendingFreeList_DISPATCH(pClient, ppFirstLowPriRef) +#define clientShareResource(pClient, pResourceRef, pSharePolicy, pCallContext) clientShareResource_DISPATCH(pClient, pResourceRef, pSharePolicy, pCallContext) +NV_STATUS clientValidate_IMPL(struct RsClient *pClient, const API_SECURITY_INFO *pSecInfo); + +static inline NV_STATUS clientValidate_DISPATCH(struct RsClient *pClient, const API_SECURITY_INFO *pSecInfo) { + return pClient->__clientValidate__(pClient, pSecInfo); +} + +NV_STATUS clientFreeResource_IMPL(struct RsClient *pClient, RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS clientFreeResource_DISPATCH(struct RsClient *pClient, RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) { + return pClient->__clientFreeResource__(pClient, pServer, pParams); +} + +NV_STATUS clientDestructResourceRef_IMPL(struct RsClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef); + +static inline NV_STATUS clientDestructResourceRef_DISPATCH(struct RsClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef) { + return pClient->__clientDestructResourceRef__(pClient, pServer, pResourceRef); +} + +NV_STATUS clientUnmapMemory_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo); + +static inline NV_STATUS clientUnmapMemory_DISPATCH(struct RsClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo) { + return pClient->__clientUnmapMemory__(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo); +} + +NV_STATUS clientInterMap_IMPL(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams); + +static inline NV_STATUS clientInterMap_DISPATCH(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams) { + return pClient->__clientInterMap__(pClient, pMapperRef, pMappableRef, pParams); +} + +void clientInterUnmap_IMPL(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams); + +static inline void clientInterUnmap_DISPATCH(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams) { + pClient->__clientInterUnmap__(pClient, pMapperRef, pParams); +} + +NV_STATUS clientValidateNewResourceHandle_IMPL(struct RsClient *pClient, NvHandle hResource, NvBool bRestrict); + +static inline NV_STATUS clientValidateNewResourceHandle_DISPATCH(struct RsClient *pClient, NvHandle hResource, NvBool bRestrict) { + return pClient->__clientValidateNewResourceHandle__(pClient, hResource, bRestrict); +} + +NV_STATUS clientPostProcessPendingFreeList_IMPL(struct RsClient *pClient, struct RsResourceRef **ppFirstLowPriRef); + +static inline NV_STATUS clientPostProcessPendingFreeList_DISPATCH(struct RsClient *pClient, struct RsResourceRef **ppFirstLowPriRef) { + return pClient->__clientPostProcessPendingFreeList__(pClient, ppFirstLowPriRef); +} + +NV_STATUS clientShareResource_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext); + +static inline NV_STATUS clientShareResource_DISPATCH(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + return pClient->__clientShareResource__(pClient, pResourceRef, pSharePolicy, pCallContext); +} + +NV_STATUS clientConstruct_IMPL(struct RsClient *arg_pClient, struct PORT_MEM_ALLOCATOR *arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_clientConstruct(arg_pClient, arg_pAllocator, arg_pParams) clientConstruct_IMPL(arg_pClient, arg_pAllocator, arg_pParams) +void clientDestruct_IMPL(struct RsClient *pClient); +#define __nvoc_clientDestruct(pClient) clientDestruct_IMPL(pClient) +NV_STATUS clientGetResourceByRef_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, struct RsResource **ppResource); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResourceByRef(struct RsClient *pClient, struct RsResourceRef *pResourceRef, struct RsResource **ppResource) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResourceByRef(pClient, pResourceRef, ppResource) clientGetResourceByRef_IMPL(pClient, pResourceRef, ppResource) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGetResource_IMPL(struct RsClient *pClient, NvHandle hResource, NvU32 internalClassId, struct RsResource **ppResource); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResource(struct RsClient *pClient, NvHandle hResource, NvU32 internalClassId, struct RsResource **ppResource) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResource(pClient, hResource, internalClassId, ppResource) clientGetResource_IMPL(pClient, hResource, internalClassId, ppResource) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGetResourceRef_IMPL(struct RsClient *pClient, NvHandle hResource, struct RsResourceRef **ppResourceRef); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResourceRef(struct RsClient *pClient, NvHandle hResource, struct RsResourceRef **ppResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResourceRef(pClient, hResource, ppResourceRef) clientGetResourceRef_IMPL(pClient, hResource, ppResourceRef) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGetResourceRefWithAccess_IMPL(struct RsClient *pClient, NvHandle hResource, const RS_ACCESS_MASK *pRightsRequired, struct RsResourceRef **ppResourceRef); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResourceRefWithAccess(struct RsClient *pClient, NvHandle hResource, const RS_ACCESS_MASK *pRightsRequired, struct RsResourceRef **ppResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResourceRefWithAccess(pClient, hResource, pRightsRequired, ppResourceRef) clientGetResourceRefWithAccess_IMPL(pClient, hResource, pRightsRequired, ppResourceRef) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGetResourceRefByType_IMPL(struct RsClient *pClient, NvHandle hResource, NvU32 internalClassId, struct RsResourceRef **ppResourceRef); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResourceRefByType(struct RsClient *pClient, NvHandle hResource, NvU32 internalClassId, struct RsResourceRef **ppResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResourceRefByType(pClient, hResource, internalClassId, ppResourceRef) clientGetResourceRefByType_IMPL(pClient, hResource, internalClassId, ppResourceRef) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientAllocResource_IMPL(struct RsClient *pClient, RsServer *pServer, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientAllocResource(struct RsClient *pClient, RsServer *pServer, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientAllocResource(pClient, pServer, pParams) clientAllocResource_IMPL(pClient, pServer, pParams) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientCopyResource_IMPL(struct RsClient *pClient, RsServer *pServer, struct RS_RES_DUP_PARAMS_INTERNAL *pParams); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientCopyResource(struct RsClient *pClient, RsServer *pServer, struct RS_RES_DUP_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientCopyResource(pClient, pServer, pParams) clientCopyResource_IMPL(pClient, pServer, pParams) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGenResourceHandle_IMPL(struct RsClient *pClient, NvHandle *pHandle); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGenResourceHandle(struct RsClient *pClient, NvHandle *pHandle) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGenResourceHandle(pClient, pHandle) clientGenResourceHandle_IMPL(pClient, pHandle) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientAssignResourceHandle_IMPL(struct RsClient *pClient, NvHandle *phResource); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientAssignResourceHandle(struct RsClient *pClient, NvHandle *phResource) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientAssignResourceHandle(pClient, phResource) clientAssignResourceHandle_IMPL(pClient, phResource) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientUpdatePendingFreeList_IMPL(struct RsClient *pClient, struct RsResourceRef *pTarget, struct RsResourceRef *pReference, NvBool bMove); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientUpdatePendingFreeList(struct RsClient *pClient, struct RsResourceRef *pTarget, struct RsResourceRef *pReference, NvBool bMove) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientUpdatePendingFreeList(pClient, pTarget, pReference, bMove) clientUpdatePendingFreeList_IMPL(pClient, pTarget, pReference, bMove) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientAddAccessBackRef_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientAddAccessBackRef(struct RsClient *pClient, struct RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientAddAccessBackRef(pClient, pResourceRef) clientAddAccessBackRef_IMPL(pClient, pResourceRef) +#endif //__nvoc_rs_client_h_disabled + +void clientFreeAccessBackRefs_IMPL(struct RsClient *pClient, RsServer *pServer); +#ifdef __nvoc_rs_client_h_disabled +static inline void clientFreeAccessBackRefs(struct RsClient *pClient, RsServer *pServer) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); +} +#else //__nvoc_rs_client_h_disabled +#define clientFreeAccessBackRefs(pClient, pServer) clientFreeAccessBackRefs_IMPL(pClient, pServer) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientSetHandleGenerator_IMPL(struct RsClient *pClient, NvHandle handleRangeStart, NvHandle handleRangeSize); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientSetHandleGenerator(struct RsClient *pClient, NvHandle handleRangeStart, NvHandle handleRangeSize) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientSetHandleGenerator(pClient, handleRangeStart, handleRangeSize) clientSetHandleGenerator_IMPL(pClient, handleRangeStart, handleRangeSize) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientCanShareResource_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientCanShareResource(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientCanShareResource(pClient, pResourceRef, pSharePolicy, pCallContext) clientCanShareResource_IMPL(pClient, pResourceRef, pSharePolicy, pCallContext) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientShareResourceTargetClient_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientShareResourceTargetClient(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientShareResourceTargetClient(pClient, pResourceRef, pSharePolicy, pCallContext) clientShareResourceTargetClient_IMPL(pClient, pResourceRef, pSharePolicy, pCallContext) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientSetRestrictedRange_IMPL(struct RsClient *pClient, NvHandle handleRangeStart, NvU32 handleRangeSize); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientSetRestrictedRange(struct RsClient *pClient, NvHandle handleRangeStart, NvU32 handleRangeSize) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientSetRestrictedRange(pClient, handleRangeStart, handleRangeSize) clientSetRestrictedRange_IMPL(pClient, handleRangeStart, handleRangeSize) +#endif //__nvoc_rs_client_h_disabled + +#undef PRIVATE_FIELD + + +/** + * Get an iterator to the elements in the client's resource map + * @param[in] pClient + * @param[in] pScopeRef Restrict the iteration based on this reference [optional] + * @param[in] internalClassId Only iterate over resources with this class id [optional] + * @param[in] type RS_ITERATE_CHILDREN, RS_ITERATE_DESCENDANTS, RS_ITERATE_CACHED, RS_ITERATE_DEPENDANTS + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + * + * @note If type=RS_ITERATE_CHILDREN, pScopeRef will restrict iteration to children of the scope ref + * @note If type=RS_ITERATE_DESCENDANTS, pScopeRef will restrict iteration to descendants of the scope ref + * @note If type=RS_ITERATE_CACHED, pScopeRef will restrict iteration to references cached by the scope ref + */ +RS_ITERATOR clientRefIter(struct RsClient *pClient, RsResourceRef *pScopeRef, NvU32 internalClassId, RS_ITER_TYPE type, NvBool bExactMatch); + +/** + * Get the next iterator to the elements in the client's resource map + * @param[in] pClient + * @param[inout] pIt The iterator + */ +NvBool clientRefIterNext(struct RsClient *pClient, RS_ITERATOR *pIt); + +/** + * Get an iterator to the elements in the client's resource map. + * + * This iterator will visit all descendants in pre-order according to the parent-child + * resource hierarchy. + * + * @param[in] pClient + * @param[in] pScopeRef Restrict the iteration based on this reference [optional] + * @param[in] internalClassId Only iterate over resources with this class id [optional] + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + */ +RS_ORDERED_ITERATOR clientRefOrderedIter(struct RsClient *pClient, RsResourceRef *pScopeRef, NvU32 internalClassId, NvBool bExactMatch); + +/** + * Get the next ordered iterator to the elements in the client's resource map + * @param[in] pClient + * @param[inout] pIt The iterator + */ +NvBool clientRefOrderedIterNext(struct RsClient *pClient, RS_ORDERED_ITERATOR *pIt); + + +/** + * RsResource interface to a RsClient + * + * This allows clients to be interfaced with as-if they were resources (e.g., + * to perform a control call on a client). + * + * An RsClientResource is automatically allocated under a client as a top-level + * object when that client is allocated and cannot be explicitly freed. Only + * one RsClientResource is permitted per-client. + * + * Any resource allocated under a client will be a descendant of the client + * proxy resource. + * + */ +#ifdef NVOC_RS_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RsClientResource { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsResource __nvoc_base_RsResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RsClientResource *__nvoc_pbase_RsClientResource; + NvBool (*__clientresShareCallback__)(struct RsClientResource *, struct RsClient *, RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__clientresControl__)(struct RsClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__clientresUnmap__)(struct RsClientResource *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__clientresMapTo__)(struct RsClientResource *, RS_RES_MAP_TO_PARAMS *); + NvU32 (*__clientresGetRefCount__)(struct RsClientResource *); + NV_STATUS (*__clientresControlFilter__)(struct RsClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__clientresAddAdditionalDependants__)(struct RsClient *, struct RsClientResource *, RsResourceRef *); + NvBool (*__clientresCanCopy__)(struct RsClientResource *); + NV_STATUS (*__clientresControl_Prologue__)(struct RsClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__clientresPreDestruct__)(struct RsClientResource *); + NV_STATUS (*__clientresUnmapFrom__)(struct RsClientResource *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__clientresControl_Epilogue__)(struct RsClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__clientresControlLookup__)(struct RsClientResource *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__clientresMap__)(struct RsClientResource *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__clientresAccessCallback__)(struct RsClientResource *, struct RsClient *, void *, RsAccessRight); + struct RsClient *pClient; +}; + +#ifndef __NVOC_CLASS_RsClientResource_TYPEDEF__ +#define __NVOC_CLASS_RsClientResource_TYPEDEF__ +typedef struct RsClientResource RsClientResource; +#endif /* __NVOC_CLASS_RsClientResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClientResource +#define __nvoc_class_id_RsClientResource 0x083442 +#endif /* __nvoc_class_id_RsClientResource */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClientResource; + +#define __staticCast_RsClientResource(pThis) \ + ((pThis)->__nvoc_pbase_RsClientResource) + +#ifdef __nvoc_rs_client_h_disabled +#define __dynamicCast_RsClientResource(pThis) ((RsClientResource*)NULL) +#else //__nvoc_rs_client_h_disabled +#define __dynamicCast_RsClientResource(pThis) \ + ((RsClientResource*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsClientResource))) +#endif //__nvoc_rs_client_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RsClientResource(RsClientResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsClientResource(RsClientResource**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RsClientResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RsClientResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define clientresShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) clientresShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define clientresControl(pResource, pCallContext, pParams) clientresControl_DISPATCH(pResource, pCallContext, pParams) +#define clientresUnmap(pResource, pCallContext, pCpuMapping) clientresUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define clientresMapTo(pResource, pParams) clientresMapTo_DISPATCH(pResource, pParams) +#define clientresGetRefCount(pResource) clientresGetRefCount_DISPATCH(pResource) +#define clientresControlFilter(pResource, pCallContext, pParams) clientresControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define clientresAddAdditionalDependants(pClient, pResource, pReference) clientresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define clientresCanCopy(pResource) clientresCanCopy_DISPATCH(pResource) +#define clientresControl_Prologue(pResource, pCallContext, pParams) clientresControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define clientresPreDestruct(pResource) clientresPreDestruct_DISPATCH(pResource) +#define clientresUnmapFrom(pResource, pParams) clientresUnmapFrom_DISPATCH(pResource, pParams) +#define clientresControl_Epilogue(pResource, pCallContext, pParams) clientresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define clientresControlLookup(pResource, pParams, ppEntry) clientresControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define clientresMap(pResource, pCallContext, pParams, pCpuMapping) clientresMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define clientresAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) clientresAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool clientresShareCallback_DISPATCH(struct RsClientResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__clientresShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS clientresControl_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__clientresControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS clientresUnmap_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__clientresUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS clientresMapTo_DISPATCH(struct RsClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__clientresMapTo__(pResource, pParams); +} + +static inline NvU32 clientresGetRefCount_DISPATCH(struct RsClientResource *pResource) { + return pResource->__clientresGetRefCount__(pResource); +} + +static inline NV_STATUS clientresControlFilter_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__clientresControlFilter__(pResource, pCallContext, pParams); +} + +static inline void clientresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RsClientResource *pResource, RsResourceRef *pReference) { + pResource->__clientresAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvBool clientresCanCopy_DISPATCH(struct RsClientResource *pResource) { + return pResource->__clientresCanCopy__(pResource); +} + +static inline NV_STATUS clientresControl_Prologue_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__clientresControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void clientresPreDestruct_DISPATCH(struct RsClientResource *pResource) { + pResource->__clientresPreDestruct__(pResource); +} + +static inline NV_STATUS clientresUnmapFrom_DISPATCH(struct RsClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__clientresUnmapFrom__(pResource, pParams); +} + +static inline void clientresControl_Epilogue_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__clientresControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS clientresControlLookup_DISPATCH(struct RsClientResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__clientresControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS clientresMap_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__clientresMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool clientresAccessCallback_DISPATCH(struct RsClientResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__clientresAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS clientresConstruct_IMPL(struct RsClientResource *arg_pClientRes, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_clientresConstruct(arg_pClientRes, arg_pCallContext, arg_pParams) clientresConstruct_IMPL(arg_pClientRes, arg_pCallContext, arg_pParams) +void clientresDestruct_IMPL(struct RsClientResource *pClientRes); +#define __nvoc_clientresDestruct(pClientRes) clientresDestruct_IMPL(pClientRes) +#undef PRIVATE_FIELD + + +/** + * Client destruction parameters + */ +struct RS_CLIENT_FREE_PARAMS_INTERNAL +{ + NvHandle hDomain; ///< [in] The parent domain + NvHandle hClient; ///< [in] The client handle + NvBool bHiPriOnly; ///< [in] Only free high priority resources + NvU32 state; ///< [in] User-defined state + + RS_RES_FREE_PARAMS_INTERNAL *pResFreeParams; ///< [in] Necessary for locking state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * Return an iterator to a resource reference multi-map + * @param[in] pIndex The multi-map to iterate + * @param[in] index Return only the references belonging to this index + */ +RsIndexIter indexRefIter(RsIndex *pIndex, NvU32 index); + +/** + * Return an iterator to all resource references in a multi-map + * @param[in] pIndex The multi-map to iterate + */ +RsIndexIter indexRefIterAll(RsIndex *pIndex); + +/** + * Get the next iterator in a resource reference multi-map + * @param[in] pIt Iterator + */ +NvBool indexRefIterNext(RsIndexIter *pIt); + +/* @} */ + +#ifdef __cplusplus +} +#endif + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_RS_CLIENT_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.c new file mode 100644 index 0000000..f57685e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.c @@ -0,0 +1,186 @@ +#define NVOC_RS_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_rs_resource_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xd551cb = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_RsResource(RsResource*); +void __nvoc_init_funcTable_RsResource(RsResource*); +NV_STATUS __nvoc_ctor_RsResource(RsResource*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RsResource(RsResource*); +void __nvoc_dtor_RsResource(RsResource*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RsResource; + +static const struct NVOC_RTTI __nvoc_rtti_RsResource_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsResource, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsResource_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsResource, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RsResource = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_RsResource_RsResource, + &__nvoc_rtti_RsResource_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsResource), + /*classId=*/ classId(RsResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsResource, + /*pCastInfo=*/ &__nvoc_castinfo_RsResource, + /*pExportInfo=*/ &__nvoc_export_info_RsResource +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RsResource = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_RsResource(RsResource *pThis) { + __nvoc_resDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsResource(RsResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_RsResource(RsResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_RsResource_fail_Object; + __nvoc_init_dataField_RsResource(pThis); + + status = __nvoc_resConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RsResource_fail__init; + goto __nvoc_ctor_RsResource_exit; // Success + +__nvoc_ctor_RsResource_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_RsResource_fail_Object: +__nvoc_ctor_RsResource_exit: + + return status; +} + +static void __nvoc_init_funcTable_RsResource_1(RsResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__resCanCopy__ = &resCanCopy_IMPL; + + pThis->__resPreDestruct__ = &resPreDestruct_IMPL; + + pThis->__resControlLookup__ = &resControlLookup_IMPL; + + pThis->__resControl__ = &resControl_IMPL; + + pThis->__resControlFilter__ = &resControlFilter_IMPL; + + pThis->__resControl_Prologue__ = &resControl_Prologue_IMPL; + + pThis->__resControl_Epilogue__ = &resControl_Epilogue_IMPL; + + pThis->__resMap__ = &resMap_IMPL; + + pThis->__resUnmap__ = &resUnmap_IMPL; + + pThis->__resMapTo__ = &resMapTo_IMPL; + + pThis->__resUnmapFrom__ = &resUnmapFrom_IMPL; + + pThis->__resGetRefCount__ = &resGetRefCount_IMPL; + + pThis->__resAccessCallback__ = &resAccessCallback_IMPL; + + pThis->__resShareCallback__ = &resShareCallback_IMPL; + + pThis->__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL; +} + +void __nvoc_init_funcTable_RsResource(RsResource *pThis) { + __nvoc_init_funcTable_RsResource_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_RsResource(RsResource *pThis) { + pThis->__nvoc_pbase_RsResource = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_RsResource(pThis); +} + +NV_STATUS __nvoc_objCreate_RsResource(RsResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RsResource *pThis; + + pThis = portMemAllocNonPaged(sizeof(RsResource)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RsResource)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RsResource); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RsResource(pThis); + status = __nvoc_ctor_RsResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RsResource_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RsResource_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsResource(RsResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RsResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.h new file mode 100644 index 0000000..be788fa --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.h @@ -0,0 +1,860 @@ +#ifndef _G_RS_RESOURCE_NVOC_H_ +#define _G_RS_RESOURCE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_rs_resource_nvoc.h" + +#ifndef _RS_RESOURCE_H_ +#define _RS_RESOURCE_H_ + +#include "nvport/nvport.h" +#include "resserv/resserv.h" +#include "nvoc/object.h" +#include "resserv/rs_access_map.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct RsSession; + +#ifndef __NVOC_CLASS_RsSession_TYPEDEF__ +#define __NVOC_CLASS_RsSession_TYPEDEF__ +typedef struct RsSession RsSession; +#endif /* __NVOC_CLASS_RsSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsSession +#define __nvoc_class_id_RsSession 0x830d90 +#endif /* __nvoc_class_id_RsSession */ + + + +/** + * @defgroup RsResource + * @addtogroup RsResource + * @{*/ + +#define ALLOC_STATE_INTERNAL_CLIENT_HANDLE NVBIT(5) + +/* + * Locking operations for lock-metering + */ +#define RS_LOCK_TRACE_INVALID 1 +#define RS_LOCK_TRACE_ACQUIRE 1 +#define RS_LOCK_TRACE_RELEASE 2 +#define RS_LOCK_TRACE_ALLOC 3 +#define RS_LOCK_TRACE_FREE 4 +#define RS_LOCK_TRACE_CTRL 5 +#define RS_LOCK_TRACE_MAP 6 +#define RS_LOCK_TRACE_UNMAP 7 + +/** + * Context information for top-level, resource-level, and client-level locking + * operations + */ +struct RS_LOCK_INFO +{ + struct RsClient *pClient; ///< Pointer to client that was locked (if any) + struct RsClient *pSecondClient; ///< Pointer to second client, for dual-client locking + RsResourceRef *pContextRef; ///< User-defined reference + struct RsSession *pSession; ///< Session object to be locked, if any + NvU32 flags; ///< RS_LOCK_FLAGS_* + NvU32 state; ///< RS_LOCK_STATE_* + NvU32 gpuMask; + NvU8 traceOp; ///< RS_LOCK_TRACE_* operation for lock-metering + NvU32 traceClassId; ///< Class of initial resource that was locked for lock metering +}; + +struct RS_RES_ALLOC_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the resource's client + NvHandle hParent; ///< [in] The handle of the resource's parent. This may be a client or another resource. + NvHandle hResource; ///< [inout] Server will assign a handle if this is 0, or else try the value provided + NvU32 externalClassId; ///< [in] External class ID of resource + NvHandle hDomain; ///< UNUSED + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + struct RsClient *pClient; ///< [out] Cached client + RsResourceRef *pResourceRef; ///< [out] Cached resource reference + NvU32 allocFlags; ///< [in] Allocation flags + NvU32 allocState; ///< [inout] Allocation state + API_SECURITY_INFO *pSecInfo; + + void *pAllocParams; ///< [in] Copied-in allocation parameters + + // ... Dupe alloc + struct RsClient *pSrcClient; ///< The client that is sharing the resource + RsResourceRef *pSrcRef; ///< Reference to the resource that will be shared + + RS_ACCESS_MASK *pRightsRequested; ///< [in] Access rights requested on the new resource + // Buffer for storing contents of user mask. Do not use directly, use pRightsRequested instead. + RS_ACCESS_MASK rightsRequestedCopy; + + RS_ACCESS_MASK *pRightsRequired; ///< [in] Access rights required to alloc this object type +}; + +struct RS_RES_DUP_PARAMS_INTERNAL +{ + NvHandle hClientSrc; ///< [in] The handle of the source resource's client + NvHandle hResourceSrc; ///< [in] The handle of the source resource. + NvHandle hClientDst; ///< [in] The handle of the destination resource's client (may be different from source client) + NvHandle hParentDst; ///< [in] The handle of the destination resource's parent. + NvHandle hResourceDst; ///< [inout] The handle of the destination resource. Generated if 0. + void *pShareParams; ///< [in] Copied-in sharing parameters + NvU32 flags; ///< [in] Flags to denote special cases ( Bug: 2859347 to track removal) + // Internal use only + struct RsClient *pSrcClient; + RsResourceRef *pSrcRef; + API_SECURITY_INFO *pSecInfo; ///< [in] Security info + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state +}; + +struct RS_RES_SHARE_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the owner's client + NvHandle hResource; ///< [in] The handle of the resource. + RS_SHARE_POLICY *pSharePolicy; ///< [in] The policy to share with + + // Internal use only + API_SECURITY_INFO *pSecInfo; ///< [in] Security info + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state +}; + +#define RS_IS_COPY_CTOR(pParams) ((pParams)->pSrcRef != NULL) + +struct RS_RES_FREE_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the resource's client + NvHandle hResource; ///< [in] The handle of the resource + NvBool bInvalidateOnly; ///< [in] Free the resource, but don't release its handle + NvHandle hDomain; ///< UNUSED + + // Internal use only + NvBool bHiPriOnly; ///< [in] Only free if this is a high priority resources + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + NvU32 freeFlags; ///< [in] Flags for the free operation + NvU32 freeState; ///< [inout] Free state + RsResourceRef *pResourceRef; ///< [inout] Cached RsResourceRef + NV_STATUS status; ///< [out] Status of free operation + API_SECURITY_INFO *pSecInfo; ///< [in] Security info +}; + +struct NVOC_EXPORTED_METHOD_DEF; +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +struct OBJGPUGRP; + +#ifndef __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +typedef struct OBJGPUGRP OBJGPUGRP; +#endif /* __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUGRP +#define __nvoc_class_id_OBJGPUGRP 0xe40531 +#endif /* __nvoc_class_id_OBJGPUGRP */ + + + +// +// RS_RES_CONTROL_PARAMS +// +// This structure encapsulates data sent to the cmd-specific rmctrl +// handlers. Along with the arguments supplied by the requesting +// client (hClient, hObject, cmd, pParams, paramSize). +// +struct RS_RES_CONTROL_PARAMS_INTERNAL +{ + NvHandle hClient; // client-specified NV01_ROOT object handle + NvHandle hObject; // client-specified object handle + NvU32 cmd; // client-specified command # + NvU32 flags; // flags related to control call execution + void *pParams; // client-specified params (in kernel space) + NvU32 paramsSize; // client-specified size of pParams in bytes + + NvHandle hParent; // handle of hObject parent + struct OBJGPU *pGpu; // ptr to OBJGPU struct if applicable + struct OBJGPUGRP *pGpuGrp; // ptr to OBJGPUGRP struct if applicable + RsResourceRef *pResourceRef; // ptr to RsResourceRef if object is managed by + // Resource Server + API_SECURITY_INFO secInfo; // information on privilege level and pointer location (user/kernel) + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + RS_CONTROL_COOKIE *pCookie; + NvBool bInternal; // True if control call was not issued from an external client + NvBool bDeferredApi; // Indicates ctrl is being dispatched via deferred API + + struct RS_RES_CONTROL_PARAMS_INTERNAL *pLegacyParams; // RS-TODO removeme +}; + +struct RS_RES_DTOR_PARAMS +{ + CALL_CONTEXT *pFreeContext; + RS_RES_FREE_PARAMS_INTERNAL *pFreeParams; +}; + +/** + * Base class for all resources. Mostly a pure virtual interface which + * should be overridden to implement resource specific behavior. + */ +#ifdef NVOC_RS_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RsResource { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + NvBool (*__resCanCopy__)(struct RsResource *); + void (*__resPreDestruct__)(struct RsResource *); + NV_STATUS (*__resControlLookup__)(struct RsResource *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__resControl__)(struct RsResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__resControlFilter__)(struct RsResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__resControl_Prologue__)(struct RsResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__resControl_Epilogue__)(struct RsResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__resMap__)(struct RsResource *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__resUnmap__)(struct RsResource *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__resMapTo__)(struct RsResource *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__resUnmapFrom__)(struct RsResource *, RS_RES_UNMAP_FROM_PARAMS *); + NvU32 (*__resGetRefCount__)(struct RsResource *); + NvBool (*__resAccessCallback__)(struct RsResource *, struct RsClient *, void *, RsAccessRight); + NvBool (*__resShareCallback__)(struct RsResource *, struct RsClient *, RsResourceRef *, RS_SHARE_POLICY *); + void (*__resAddAdditionalDependants__)(struct RsClient *, struct RsResource *, RsResourceRef *); + RsResourceRef *pResourceRef; + struct RS_RES_DTOR_PARAMS dtorParams; + NvBool bConstructed; +}; + +#ifndef __NVOC_CLASS_RsResource_TYPEDEF__ +#define __NVOC_CLASS_RsResource_TYPEDEF__ +typedef struct RsResource RsResource; +#endif /* __NVOC_CLASS_RsResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsResource +#define __nvoc_class_id_RsResource 0xd551cb +#endif /* __nvoc_class_id_RsResource */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +#define __staticCast_RsResource(pThis) \ + ((pThis)->__nvoc_pbase_RsResource) + +#ifdef __nvoc_rs_resource_h_disabled +#define __dynamicCast_RsResource(pThis) ((RsResource*)NULL) +#else //__nvoc_rs_resource_h_disabled +#define __dynamicCast_RsResource(pThis) \ + ((RsResource*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsResource))) +#endif //__nvoc_rs_resource_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RsResource(RsResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsResource(RsResource**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RsResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RsResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define resCanCopy(pResource) resCanCopy_DISPATCH(pResource) +#define resPreDestruct(pResource) resPreDestruct_DISPATCH(pResource) +#define resControlLookup(pResource, pParams, ppEntry) resControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define resControl(pResource, pCallContext, pParams) resControl_DISPATCH(pResource, pCallContext, pParams) +#define resControlFilter(pResource, pCallContext, pParams) resControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define resControl_Prologue(pResource, pCallContext, pParams) resControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define resControl_Epilogue(pResource, pCallContext, pParams) resControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define resMap(pResource, pCallContext, pParams, pCpuMapping) resMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define resUnmap(pResource, pCallContext, pCpuMapping) resUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define resMapTo(pResource, pParams) resMapTo_DISPATCH(pResource, pParams) +#define resUnmapFrom(pResource, pParams) resUnmapFrom_DISPATCH(pResource, pParams) +#define resGetRefCount(pResource) resGetRefCount_DISPATCH(pResource) +#define resAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) resAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define resShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) resShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define resAddAdditionalDependants(pClient, pResource, pReference) resAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +NvBool resCanCopy_IMPL(struct RsResource *pResource); + +static inline NvBool resCanCopy_DISPATCH(struct RsResource *pResource) { + return pResource->__resCanCopy__(pResource); +} + +void resPreDestruct_IMPL(struct RsResource *pResource); + +static inline void resPreDestruct_DISPATCH(struct RsResource *pResource) { + pResource->__resPreDestruct__(pResource); +} + +NV_STATUS resControlLookup_IMPL(struct RsResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry); + +static inline NV_STATUS resControlLookup_DISPATCH(struct RsResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__resControlLookup__(pResource, pParams, ppEntry); +} + +NV_STATUS resControl_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS resControl_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__resControl__(pResource, pCallContext, pParams); +} + +NV_STATUS resControlFilter_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS resControlFilter_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__resControlFilter__(pResource, pCallContext, pParams); +} + +NV_STATUS resControl_Prologue_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS resControl_Prologue_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__resControl_Prologue__(pResource, pCallContext, pParams); +} + +void resControl_Epilogue_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline void resControl_Epilogue_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__resControl_Epilogue__(pResource, pCallContext, pParams); +} + +NV_STATUS resMap_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); + +static inline NV_STATUS resMap_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__resMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS resUnmap_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); + +static inline NV_STATUS resUnmap_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__resUnmap__(pResource, pCallContext, pCpuMapping); +} + +NV_STATUS resMapTo_IMPL(struct RsResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); + +static inline NV_STATUS resMapTo_DISPATCH(struct RsResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__resMapTo__(pResource, pParams); +} + +NV_STATUS resUnmapFrom_IMPL(struct RsResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); + +static inline NV_STATUS resUnmapFrom_DISPATCH(struct RsResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__resUnmapFrom__(pResource, pParams); +} + +NvU32 resGetRefCount_IMPL(struct RsResource *pResource); + +static inline NvU32 resGetRefCount_DISPATCH(struct RsResource *pResource) { + return pResource->__resGetRefCount__(pResource); +} + +NvBool resAccessCallback_IMPL(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); + +static inline NvBool resAccessCallback_DISPATCH(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__resAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NvBool resShareCallback_IMPL(struct RsResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + +static inline NvBool resShareCallback_DISPATCH(struct RsResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__resShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +void resAddAdditionalDependants_IMPL(struct RsClient *pClient, struct RsResource *pResource, RsResourceRef *pReference); + +static inline void resAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RsResource *pResource, RsResourceRef *pReference) { + pResource->__resAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS resConstruct_IMPL(struct RsResource *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_resConstruct(arg_pResource, arg_pCallContext, arg_pParams) resConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void resDestruct_IMPL(struct RsResource *pResource); +#define __nvoc_resDestruct(pResource) resDestruct_IMPL(pResource) +NV_STATUS resSetFreeParams_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_FREE_PARAMS_INTERNAL *pParams); +#ifdef __nvoc_rs_resource_h_disabled +static inline NV_STATUS resSetFreeParams(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("RsResource was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_resource_h_disabled +#define resSetFreeParams(pResource, pCallContext, pParams) resSetFreeParams_IMPL(pResource, pCallContext, pParams) +#endif //__nvoc_rs_resource_h_disabled + +NV_STATUS resGetFreeParams_IMPL(struct RsResource *pResource, struct CALL_CONTEXT **ppCallContext, struct RS_RES_FREE_PARAMS_INTERNAL **ppParams); +#ifdef __nvoc_rs_resource_h_disabled +static inline NV_STATUS resGetFreeParams(struct RsResource *pResource, struct CALL_CONTEXT **ppCallContext, struct RS_RES_FREE_PARAMS_INTERNAL **ppParams) { + NV_ASSERT_FAILED_PRECOMP("RsResource was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_resource_h_disabled +#define resGetFreeParams(pResource, ppCallContext, ppParams) resGetFreeParams_IMPL(pResource, ppCallContext, ppParams) +#endif //__nvoc_rs_resource_h_disabled + +#undef PRIVATE_FIELD + + +/* @} */ + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + + +/** + * @defgroup RsCpuMapping + * @addtogroup RsCpuMapping + * @{*/ +struct RsCpuMapping +{ + NvU64 offset; + NvU64 length; + NvU32 flags; + NvP64 pLinearAddress; + RsResourceRef *pContextRef; ///< Context resource that may be needed for the mapping + void *pContext; ///< Additional context data for the mapping + NvU32 processId; + + RS_CPU_MAPPING_PRIVATE *pPrivate; ///< Opaque struct allocated and freed by resserv on behalf of the user +}; +MAKE_LIST(RsCpuMappingList, RsCpuMapping); + +/** + * CPU mapping parameters + */ +struct RS_CPU_MAP_PARAMS +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU64 offset; ///< [in] Offset into the resource + NvU64 length; ///< [in] Size of the region to map + NvP64 *ppCpuVirtAddr; + NvU32 flags; ///< [in] Resource-specific flags + + // Passed from RM into CpuMapping + NvU32 protect; ///< [in] Protection flags + NvBool bKernel; + + /// [in] hContext Handle of resource that provides a context for the mapping (e.g., subdevice for channel map) + NvHandle hContext; + + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * CPU unmapping params for resource server tests + */ +struct RS_CPU_UNMAP_PARAMS +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pLinearAddress; ///< [in] Address of mapped memory + NvU32 flags; ///< [in] Resource-specific flags + NvU32 processId; + + /// [in] hContext Handle of resource that provides a context for the mapping (e.g., subdevice for channel map) + NvHandle hContext; + + // RM-only + void *pProcessHandle; + + NvBool (*fnFilter)(RsCpuMapping*); ///< [in] Mapping-filter function + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * CPU mapping back-reference + */ +struct RS_CPU_MAPPING_BACK_REF +{ + RsCpuMapping *pCpuMapping; ///< Mapping linked to this backref + RsResourceRef *pBackRef; ///< Resource reference with mapping +}; +MAKE_LIST(RsCpuMappingBackRefList, RS_CPU_MAPPING_BACK_REF); +/* @} */ + +/** + * @defgroup RsInterMapping + * @addtogroup RsInterMapping + * @{*/ +struct RS_INTER_MAP_PARAMS +{ + NvHandle hClient; + NvHandle hMapper; + NvHandle hMappable; + NvHandle hDevice; + NvU64 offset; + NvU64 length; + NvU32 flags; + NvU64 dmaOffset; ///< [inout] RS-TODO rename this + void *pMemDesc; ///< [out] + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info + + RS_INTER_MAP_PRIVATE *pPrivate; ///< Opaque struct controlled by caller +}; + +struct RS_INTER_UNMAP_PARAMS +{ + NvHandle hClient; + NvHandle hMapper; + NvHandle hMappable; + NvHandle hDevice; + NvU32 flags; + NvU64 dmaOffset; ///< [in] RS-TODO rename this + void *pMemDesc; ///< MEMORY_DESCRIPTOR * + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info + + RS_INTER_UNMAP_PRIVATE *pPrivate; ///< Opaque struct controlled by caller +}; + +/** + * Inter-mapping information + * Used to keep track of inter-mappings and unmap them on free + */ +struct RsInterMapping +{ + // RsResourceRef *pMapperRef ///< (Implied) the resource that created and owns this mapping (this resource) + RsResourceRef *pMappableRef; ///< The resource being mapped by the mapper (e.g. hMemory) + RsResourceRef *pContextRef; ///< A resource used to provide additional context for the mapping (e.g. hDevice) + NvU32 flags; ///< Flags passed when mapping, same flags also passed when unmapping + NvU64 dmaOffset; + void *pMemDesc; +}; +MAKE_LIST(RsInterMappingList, RsInterMapping); + +/** + * Inter-mapping back-reference + */ +struct RS_INTER_MAPPING_BACK_REF +{ + RsResourceRef *pMapperRef; ///< Resource reference with mapping + RsInterMapping *pMapping; ///< Pointer to the inter-mapping linked to this backref +}; +MAKE_LIST(RsInterMappingBackRefList, RS_INTER_MAPPING_BACK_REF); +/* @} */ + +typedef struct RS_RESOURCE_DESC RS_RESOURCE_DESC; +RS_RESOURCE_DESC *RsResInfoByExternalClassId(NvU32); +NvU32 RsResInfoGetInternalClassId(const RS_RESOURCE_DESC *); + +/** + * A reference to a resource that has been allocated in RM. + */ +struct RsResourceRef +{ + struct RsClient *pClient; ///< Pointer to the client that owns the ref + struct RsResource *pResource; ///< Pointer to the actual resource + NvHandle hResource; ///< Resource handle + struct RsResourceRef *pParentRef; ///< Parent resource reference + RsIndex childRefMap; ///< Child reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + + /** + * Cached reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * The resource reference cache is a one-way association between this resource reference and + * any other resource reference. Resource server does not populate the cache so it is up to the + * resource implementation to manage it. clientRefIter can be used to iterate this cache. + */ + RsIndex cachedRefMap; + + /** + * Dependants reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * A map of all resources that strongly depend on this resource. + */ + RsIndex depRefMap; + + /** + * Dependants back-reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * AKA dependencies map + * + * A map of all resources that this resource strongly depends on. + */ + RsIndex depBackRefMap; + + /** + * Policy under which this resource can be shared with other clients + */ + RsShareList sharePolicyList; + NvBool bSharePolicyListModified; + + /** + * A mask of the access rights that the owner client has on this object. + */ + RS_ACCESS_MASK accessMask; + + const RS_RESOURCE_DESC *pResourceDesc; ///< Cached pointer to the resource descriptor + NvU32 internalClassId; ///< Internal resource class id + NvU32 externalClassId; ///< External resource class id + NvU32 depth; ///< The depth of this reference in the resource graph + NvBool bInvalidated; ///< Reference has been freed but not removed yet + + RsCpuMappingList cpuMappings; ///< List of CPU mappings to the resource from this resource reference + RsCpuMappingBackRefList backRefs; ///< List of references that have this reference as a mapping context + + RsInterMappingList interMappings; ///< List of inter-resource mappings created by this resource + RsInterMappingBackRefList interBackRefs; ///< List of inter-resource mappings this resource has been mapped into + + struct RsSession *pSession; ///< If set, this ref depends on a shared session + struct RsSession *pDependantSession; ///< If set, this ref is depended on by a shared session + + ListNode freeNode; ///< Links to the client's pendingFreeList +}; +MAKE_MAP(RsRefMap, RsResourceRef); +MAKE_INTRUSIVE_LIST(RsRefFreeList, RsResourceRef, freeNode); + + +// Iterator data structure to save state while walking through a list +struct RS_ITERATOR +{ + union + { + RsRefMapIter mapIt; ///< Map iterator for all resource references under a client + RsIndexIter idxIt; ///< Index iterator for child references of a resource reference + }; + + struct RsClient *pClient; + RsResourceRef *pScopeRef; ///< Reference to the resource that limits the scope of iteration + NvU32 internalClassId; + RsResourceRef *pResourceRef; ///< Resource ref that is being iterated over + NvU8 type; ///< RS_ITERATE_* + NvBool bExactMatch; ///< If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId +}; + +// Iterator data structure to save state while walking through a resource tree in pre-order +struct RS_ORDERED_ITERATOR +{ + NvS8 depth; ///< Depth of index stack; special value of -1 implies that the scope reference should be iterated over as well + RsIndexIter idxIt[RS_MAX_RESOURCE_DEPTH+1]; ///< Stack of index iterators for child references of a resource reference + + struct RsClient *pClient; + RsResourceRef *pScopeRef; ///< Reference to the resource that limits the scope of iteration + NvU32 internalClassId; + NvBool bExactMatch; ///< If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + + RsResourceRef *pResourceRef; ///< Resource ref that is being iterated over +}; + +/** + * Macro for looking up a reference from a resource + */ +#define RES_GET_REF(pResource) (staticCast((pResource), RsResource)->pResourceRef) + +/** + * Macro for looking up a resource handle from a resource + */ +#define RES_GET_HANDLE(pResource) (RES_GET_REF(pResource)->hResource) + +/** + * Macro for looking up a resource's external class from a resource + */ +#define RES_GET_EXT_CLASS_ID(pResource) (RES_GET_REF(pResource)->externalClassId) + +/** + * Macro for looking up a resource's parent handle from a resource + */ +#define RES_GET_PARENT_HANDLE(pResource) (RES_GET_REF(pResource)->pParentRef->hResource) + +/** + * Macro for looking up a client from a resource + */ +#define RES_GET_CLIENT(pResource) (RES_GET_REF(pResource)->pClient) + +/** + * Macro for looking up a client handle from a resource + */ +#define RES_GET_CLIENT_HANDLE(pResource) (RES_GET_REF(pResource)->pClient->hClient) + +/** + * Find a CPU mapping owned by a resource reference + * + * @param[in] pResourceRef + * @param[in] pAddress The CPU virtual address of the mapping to search for + * @param[out] ppMapping The returned mapping + */ +NV_STATUS refFindCpuMapping(RsResourceRef *pResourceRef, NvP64 pAddress, RsCpuMapping **ppMapping); + +/** + * Find a CPU mapping owned by a resource reference + * + * @param[in] pResourceRef + * @param[in] pAddress The CPU virtual address of the mapping to search for + * @param[in] fnFilter A user-provided filtering function that determines which mappings to ignore. + * If fnFilter is provided, then we will only return mappings for which fnFilter(mapping) returns NV_TRUE + * All mappings will be searched over if fnFilter is NULL. + * @param[out] ppMapping The returned mapping + * @param[in] fnFilter A user-provided filtering function that determines which mappings to ignore. + * If fnFilter is provided, then we will only return mappings for which fnFilter(mapping) returns NV_TRUE + * All mappings will be searched over if fnFilter is NULL. + */ +NV_STATUS refFindCpuMappingWithFilter(RsResourceRef *pResourceRef, NvP64 pAddress, NvBool (*fnFilter)(RsCpuMapping*), RsCpuMapping **ppMapping); + +/** + * Find the first child object of given type + * + * @param[in] pParentRef + * @param[in] internalClassId + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + * @param[out] pResourceRef The returned RsResourceRef (Optional) + */ +NV_STATUS refFindChildOfType(RsResourceRef *pParentRef, NvU32 internalClassId, NvBool bExactMatch, RsResourceRef **ppResourceRef); + +/** + * Traverse up the reference parent-child hierarchy to find an ancestor reference of a given type + * + * @param[in] pDescendantRef + * @param[in] internalClassId + * @param[out] ppAncestorRef The returned RsResourceRef (Optional) + */ +NV_STATUS refFindAncestorOfType(RsResourceRef *pDescendantRef, NvU32 internalClassId, RsResourceRef **ppAncestorRef); + +/** + * Traverse up the reference parent-child hierarchy to find if a ref is a descendant of a given ancestor ref + * + * @param[in] pDescendantRef The node to start searching from (not included in the search) + * @param[in] pAncestorRef The node to search for in the parent-child hierarchy + */ +NvBool refHasAncestor(RsResourceRef *pDescendantRef, RsResourceRef *pAncestorRef); + +/** + * Add a new mapping to a reference's mapping list + * @param[in] pResourceRef The reference to add a mapping to + * @param[in] pMapParams The parameters used to initialize the mapping + * @param[in] pContextRef A reference to a resource that provides a context for the mapping + * @param[out] ppMapping Pointer to the allocated mapping [optional] + */ +NV_STATUS refAddMapping(RsResourceRef *pResourceRef, RS_CPU_MAP_PARAMS *pMapParams, + RsResourceRef *pContextRef, RsCpuMapping **ppMapping); + +/** + * Remove an existing mapping from a reference's mapping list and remove back-references to the mapping. + * @param[in] pResourceRef The reference to add a mapping to + * @param[in] pMapping Pointer to the allocated mapping + */ +void refRemoveMapping(RsResourceRef *pResourceRef, RsCpuMapping *pMapping); + +/** + * Allocate the user-controlled private pointer within the RsCpuMapping struct. + * Resserv will call this function to alloc the private struct when the mapping is created + * @param[in] pMapParams The parameters which were used to create the mapping + * @param[inout] pMapping Pointer to the mapping whose private struct should be allocated + */ +NV_STATUS refAllocCpuMappingPrivate(RS_CPU_MAP_PARAMS *pMapParams, RsCpuMapping *pMapping); + +/** + * Free the user-controlled private pointer within the RsCpuMapping struct. + * Resserv will call this function to free the private struct when the mapping is removed + * @param[inout] pMapping Pointer to the mapping whose private struct should be freed + */ +void refFreeCpuMappingPrivate(RsCpuMapping *pMapping); + +/** + * Add a dependency between this resource reference and a dependent reference. + * If this reference is freed, the dependent will be invalidated and torn down. + * + * @note Dependencies are implicit between a parent resource reference and child resource reference + * @note No circular dependency checking is performed + */ +NV_STATUS refAddDependant(RsResourceRef *pResourceRef, RsResourceRef *pDependantRef); + +/** + * Remove the dependency between this resource reference and a dependent resource reference. + */ +NV_STATUS refRemoveDependant(RsResourceRef *pResourceRef, RsResourceRef *pDependantRef); + +/** + * Find, Add, or Remove an inter-mapping between two resources to the Mapper's list of inter-mappings + * Inter-mappings are stored in the Mapper, and are matched by both the MappableRef and offset. + * + * @param[in] pMapperRef The reference which owns the inter-mapping + * @param[in] pMappableRef The reference which was mapped from to create the inter-mapping + * If NULL, will be ignored while matching inter-mappings + * @param[in] dmaOffset The offset value assigned while mapping, used to identify mappings + * @param[in] pContextRef A reference used during mapping and locking for additional context, used to identify mappings + * @param[inout] ppMapping Writes the resulting inter-mapping, if successfully created (Add) or found (Find) + * @param[in] pMapping The inter-mapping to remove (Remove) + */ +NV_STATUS refFindInterMapping(RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RsResourceRef *pContextRef, NvU64 dmaOffset, RsInterMapping **ppMapping); +NV_STATUS refAddInterMapping(RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RsResourceRef *pContextRef, RsInterMapping **ppMapping); +void refRemoveInterMapping(RsResourceRef *pMapperRef, RsInterMapping *pMapping); + +/** + * Store a resource reference in another reference's cache. + * @param[in] pParentRef The resource reference that owns the cache + * @param[in] pResourceRef The resource reference to store in the cache + */ +NV_STATUS refCacheRef(RsResourceRef *pParentRef, RsResourceRef *pResourceRef); + +/** + * Remove a resource reference from another reference's cache + * @param[in] pParentRef The resource reference that owns the cache + * @param[in] pResourceRef The resource reference to de-index + */ +NV_STATUS refUncacheRef(RsResourceRef *pParentRef, RsResourceRef *pResourceRef); + +/** + * Determine whether a reference is queued for removal + * @param[in] pResourceRef + * @param[in] pClient + */ +NvBool refPendingFree(RsResourceRef *pResourceRef, struct RsClient *pClient); + + +#ifdef __cplusplus +} +#endif + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_RS_RESOURCE_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.c new file mode 100644 index 0000000..2af2871 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.c @@ -0,0 +1,313 @@ +#define NVOC_RS_SERVER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_rs_server_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x830542 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_RsShared(RsShared*); +void __nvoc_init_funcTable_RsShared(RsShared*); +NV_STATUS __nvoc_ctor_RsShared(RsShared*); +void __nvoc_init_dataField_RsShared(RsShared*); +void __nvoc_dtor_RsShared(RsShared*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RsShared; + +static const struct NVOC_RTTI __nvoc_rtti_RsShared_RsShared = { + /*pClassDef=*/ &__nvoc_class_def_RsShared, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsShared, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsShared_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsShared, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RsShared = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_RsShared_RsShared, + &__nvoc_rtti_RsShared_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsShared), + /*classId=*/ classId(RsShared), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsShared", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsShared, + /*pCastInfo=*/ &__nvoc_castinfo_RsShared, + /*pExportInfo=*/ &__nvoc_export_info_RsShared +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RsShared = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_RsShared(RsShared *pThis) { + __nvoc_shrDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsShared(RsShared *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_RsShared(RsShared *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_RsShared_fail_Object; + __nvoc_init_dataField_RsShared(pThis); + + status = __nvoc_shrConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_RsShared_fail__init; + goto __nvoc_ctor_RsShared_exit; // Success + +__nvoc_ctor_RsShared_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_RsShared_fail_Object: +__nvoc_ctor_RsShared_exit: + + return status; +} + +static void __nvoc_init_funcTable_RsShared_1(RsShared *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_RsShared(RsShared *pThis) { + __nvoc_init_funcTable_RsShared_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_RsShared(RsShared *pThis) { + pThis->__nvoc_pbase_RsShared = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_RsShared(pThis); +} + +NV_STATUS __nvoc_objCreate_RsShared(RsShared **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + RsShared *pThis; + + pThis = portMemAllocNonPaged(sizeof(RsShared)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RsShared)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RsShared); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RsShared(pThis); + status = __nvoc_ctor_RsShared(pThis); + if (status != NV_OK) goto __nvoc_objCreate_RsShared_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RsShared_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsShared(RsShared **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_RsShared(ppThis, pParent, createFlags); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x830d90 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsSession; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +void __nvoc_init_RsSession(RsSession*); +void __nvoc_init_funcTable_RsSession(RsSession*); +NV_STATUS __nvoc_ctor_RsSession(RsSession*); +void __nvoc_init_dataField_RsSession(RsSession*); +void __nvoc_dtor_RsSession(RsSession*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RsSession; + +static const struct NVOC_RTTI __nvoc_rtti_RsSession_RsSession = { + /*pClassDef=*/ &__nvoc_class_def_RsSession, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsSession, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsSession_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsSession, __nvoc_base_RsShared.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsSession_RsShared = { + /*pClassDef=*/ &__nvoc_class_def_RsShared, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsSession, __nvoc_base_RsShared), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RsSession = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_RsSession_RsSession, + &__nvoc_rtti_RsSession_RsShared, + &__nvoc_rtti_RsSession_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsSession = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsSession), + /*classId=*/ classId(RsSession), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsSession", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsSession, + /*pCastInfo=*/ &__nvoc_castinfo_RsSession, + /*pExportInfo=*/ &__nvoc_export_info_RsSession +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RsSession = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsShared(RsShared*); +void __nvoc_dtor_RsSession(RsSession *pThis) { + __nvoc_sessionDestruct(pThis); + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsSession(RsSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsShared(RsShared* ); +NV_STATUS __nvoc_ctor_RsSession(RsSession *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared); + if (status != NV_OK) goto __nvoc_ctor_RsSession_fail_RsShared; + __nvoc_init_dataField_RsSession(pThis); + + status = __nvoc_sessionConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_RsSession_fail__init; + goto __nvoc_ctor_RsSession_exit; // Success + +__nvoc_ctor_RsSession_fail__init: + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); +__nvoc_ctor_RsSession_fail_RsShared: +__nvoc_ctor_RsSession_exit: + + return status; +} + +static void __nvoc_init_funcTable_RsSession_1(RsSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__sessionRemoveDependant__ = &sessionRemoveDependant_IMPL; + + pThis->__sessionRemoveDependency__ = &sessionRemoveDependency_IMPL; +} + +void __nvoc_init_funcTable_RsSession(RsSession *pThis) { + __nvoc_init_funcTable_RsSession_1(pThis); +} + +void __nvoc_init_RsShared(RsShared*); +void __nvoc_init_RsSession(RsSession *pThis) { + pThis->__nvoc_pbase_RsSession = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object; + pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared; + __nvoc_init_RsShared(&pThis->__nvoc_base_RsShared); + __nvoc_init_funcTable_RsSession(pThis); +} + +NV_STATUS __nvoc_objCreate_RsSession(RsSession **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + RsSession *pThis; + + pThis = portMemAllocNonPaged(sizeof(RsSession)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RsSession)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RsSession); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RsSession(pThis); + status = __nvoc_ctor_RsSession(pThis); + if (status != NV_OK) goto __nvoc_objCreate_RsSession_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RsSession_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsSession(RsSession **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_RsSession(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.h new file mode 100644 index 0000000..47e49cd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.h @@ -0,0 +1,1062 @@ +#ifndef _G_RS_SERVER_NVOC_H_ +#define _G_RS_SERVER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_rs_server_nvoc.h" + +#ifndef _RS_SERVER_H_ +#define _RS_SERVER_H_ + +#include "nvport/nvport.h" +#include "resserv/resserv.h" +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup RsServer + * @addtogroup RsServer + * @{*/ + +/** + * Book-keeping for individual client locks + */ +struct CLIENT_ENTRY +{ + PORT_RWLOCK *pLock; + struct RsClient *pClient; + NvHandle hClient; + NvU64 lockOwnerTid; ///< Thread id of the lock owner + +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK lockVal; +#endif +}; + +/** + * Base-class for objects that are shared among multiple + * RsResources (including RsResources from other clients) + */ +#ifdef NVOC_RS_SERVER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RsShared { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct RsShared *__nvoc_pbase_RsShared; + NvS32 refCount; + struct MapNode node; +}; + +#ifndef __NVOC_CLASS_RsShared_TYPEDEF__ +#define __NVOC_CLASS_RsShared_TYPEDEF__ +typedef struct RsShared RsShared; +#endif /* __NVOC_CLASS_RsShared_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsShared +#define __nvoc_class_id_RsShared 0x830542 +#endif /* __nvoc_class_id_RsShared */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +#define __staticCast_RsShared(pThis) \ + ((pThis)->__nvoc_pbase_RsShared) + +#ifdef __nvoc_rs_server_h_disabled +#define __dynamicCast_RsShared(pThis) ((RsShared*)NULL) +#else //__nvoc_rs_server_h_disabled +#define __dynamicCast_RsShared(pThis) \ + ((RsShared*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsShared))) +#endif //__nvoc_rs_server_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RsShared(RsShared**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsShared(RsShared**, Dynamic*, NvU32); +#define __objCreate_RsShared(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_RsShared((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS shrConstruct_IMPL(struct RsShared *arg_pShared); +#define __nvoc_shrConstruct(arg_pShared) shrConstruct_IMPL(arg_pShared) +void shrDestruct_IMPL(struct RsShared *pShared); +#define __nvoc_shrDestruct(pShared) shrDestruct_IMPL(pShared) +#undef PRIVATE_FIELD + +MAKE_INTRUSIVE_MAP(RsSharedMap, RsShared, node); + +/** + * Utility class for objects that can reference + * multiple client handle spaces. Free's and control calls + * that occur on objects which reference an RsSession will + * need to acquire pLock first. + */ +#ifdef NVOC_RS_SERVER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RsSession { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsShared __nvoc_base_RsShared; + struct Object *__nvoc_pbase_Object; + struct RsShared *__nvoc_pbase_RsShared; + struct RsSession *__nvoc_pbase_RsSession; + void (*__sessionRemoveDependant__)(struct RsSession *, RsResourceRef *); + void (*__sessionRemoveDependency__)(struct RsSession *, RsResourceRef *); + PORT_RWLOCK *pLock; + NvBool bValid; + RsResourceRefList dependencies; + RsResourceRefList dependants; +}; + +#ifndef __NVOC_CLASS_RsSession_TYPEDEF__ +#define __NVOC_CLASS_RsSession_TYPEDEF__ +typedef struct RsSession RsSession; +#endif /* __NVOC_CLASS_RsSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsSession +#define __nvoc_class_id_RsSession 0x830d90 +#endif /* __nvoc_class_id_RsSession */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsSession; + +#define __staticCast_RsSession(pThis) \ + ((pThis)->__nvoc_pbase_RsSession) + +#ifdef __nvoc_rs_server_h_disabled +#define __dynamicCast_RsSession(pThis) ((RsSession*)NULL) +#else //__nvoc_rs_server_h_disabled +#define __dynamicCast_RsSession(pThis) \ + ((RsSession*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsSession))) +#endif //__nvoc_rs_server_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RsSession(RsSession**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsSession(RsSession**, Dynamic*, NvU32); +#define __objCreate_RsSession(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_RsSession((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define sessionRemoveDependant(pSession, pResourceRef) sessionRemoveDependant_DISPATCH(pSession, pResourceRef) +#define sessionRemoveDependency(pSession, pResourceRef) sessionRemoveDependency_DISPATCH(pSession, pResourceRef) +void sessionRemoveDependant_IMPL(struct RsSession *pSession, RsResourceRef *pResourceRef); + +static inline void sessionRemoveDependant_DISPATCH(struct RsSession *pSession, RsResourceRef *pResourceRef) { + pSession->__sessionRemoveDependant__(pSession, pResourceRef); +} + +void sessionRemoveDependency_IMPL(struct RsSession *pSession, RsResourceRef *pResourceRef); + +static inline void sessionRemoveDependency_DISPATCH(struct RsSession *pSession, RsResourceRef *pResourceRef) { + pSession->__sessionRemoveDependency__(pSession, pResourceRef); +} + +NV_STATUS sessionConstruct_IMPL(struct RsSession *arg_pSession); +#define __nvoc_sessionConstruct(arg_pSession) sessionConstruct_IMPL(arg_pSession) +void sessionDestruct_IMPL(struct RsSession *pSession); +#define __nvoc_sessionDestruct(pSession) sessionDestruct_IMPL(pSession) +NV_STATUS sessionAddDependant_IMPL(struct RsSession *pSession, RsResourceRef *pResourceRef); +#ifdef __nvoc_rs_server_h_disabled +static inline NV_STATUS sessionAddDependant(struct RsSession *pSession, RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsSession was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_server_h_disabled +#define sessionAddDependant(pSession, pResourceRef) sessionAddDependant_IMPL(pSession, pResourceRef) +#endif //__nvoc_rs_server_h_disabled + +NV_STATUS sessionAddDependency_IMPL(struct RsSession *pSession, RsResourceRef *pResourceRef); +#ifdef __nvoc_rs_server_h_disabled +static inline NV_STATUS sessionAddDependency(struct RsSession *pSession, RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsSession was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_server_h_disabled +#define sessionAddDependency(pSession, pResourceRef) sessionAddDependency_IMPL(pSession, pResourceRef) +#endif //__nvoc_rs_server_h_disabled + +NV_STATUS sessionCheckLocksForAdd_IMPL(struct RsSession *pSession, RsResourceRef *pResourceRef); +#ifdef __nvoc_rs_server_h_disabled +static inline NV_STATUS sessionCheckLocksForAdd(struct RsSession *pSession, RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsSession was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_server_h_disabled +#define sessionCheckLocksForAdd(pSession, pResourceRef) sessionCheckLocksForAdd_IMPL(pSession, pResourceRef) +#endif //__nvoc_rs_server_h_disabled + +void sessionCheckLocksForRemove_IMPL(struct RsSession *pSession, RsResourceRef *pResourceRef); +#ifdef __nvoc_rs_server_h_disabled +static inline void sessionCheckLocksForRemove(struct RsSession *pSession, RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsSession was disabled!"); +} +#else //__nvoc_rs_server_h_disabled +#define sessionCheckLocksForRemove(pSession, pResourceRef) sessionCheckLocksForRemove_IMPL(pSession, pResourceRef) +#endif //__nvoc_rs_server_h_disabled + +#undef PRIVATE_FIELD + + +// Iterator data structure to save state while walking through a map +struct RS_SHARE_ITERATOR +{ + RsSharedMapIter mapIt; + NvU32 internalClassId; + struct RsShared *pShared; ///< Share that is being iterated over +}; + +/** + * Top-level structure that RMAPI and RM interface with + * + * This class is all that needs to be allocated to use the resource server + * library. + * + * The RsServer interface should be kept as narrow as possible. Map and + * MapTo are added because <1> the unmap variants operate in addresses and not + * handles and <2> having explicit knowledge of map operations in the server is + * helpful when dealing with multiple levels of address spaces (e.g., guest + * user-mode, guest kernel-mode, host kernel-mode). + */ +struct RsServer +{ + /** + * Privilege level determines what objects a server is allowed to allocate, and + * also determines whether additional handle validation needs to be performed. + */ + RS_PRIV_LEVEL privilegeLevel; + + RsClientList *pClientSortedList; ///< Bucket if linked List of clients (and their locks) owned by this server + NvU32 clientCurrentHandleIndex; + + NvBool bConstructed; ///< Determines whether the server is ready to be used + PORT_MEM_ALLOCATOR *pAllocator; ///< Allocator to use for all objects allocated by the server + + PORT_RWLOCK *pClientListLock; ///< Lock that needs to be taken when accessing the client list + + PORT_SPINLOCK *pShareMapLock; ///< Lock that needs to be taken when accessing the shared resource map + RsSharedMap shareMap; ///< Map of shared resources + +#if (RS_STANDALONE) + NvU64 topLockOwnerTid; ///< Thread id of top-lock owner + PORT_RWLOCK *pTopLock; ///< Top-level resource server lock + PORT_RWLOCK *pResLock; ///< Resource-level resource server lock +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK topLockVal; + LOCK_VAL_LOCK resLockVal; +#endif +#endif + + /// Print out a list of all resources that will be freed when a free request is made + NvBool bDebugFreeList; + + /// If true, control call param copies will be performed outside the top/api lock + NvBool bUnlockedParamCopy; + + /** + * Setting this flag to false disables any attempts to + * automatically acquire access rights or to control access to resources by + * checking for access rights. + */ + NvBool bRsAccessEnabled; + + /** + * Mask of interfaces (RS_API_*) that will use a read-only top lock by default + */ + NvU32 roTopLockApiMask; + + /// Share policies which clients default to when no other policies are used + RsShareList defaultInheritedSharePolicyList; + /// Share policies to apply to all shares, regardless of other policies + RsShareList globalInternalSharePolicyList; + + NvU32 internalHandleBase; + + NvU32 activeClientCount; + NvU64 activeResourceCount; +}; + +/** + * Construct a server instance. This must be performed before any other server + * operation. + * + * @param[in] pServer This server instance + * @param[in] privilegeLevel Privilege level for this resource server instance + * @param[in] maxDomains Maximum number of domains to support, or 0 for the default + */ +NV_STATUS serverConstruct(RsServer *pServer, RS_PRIV_LEVEL privilegeLevel, NvU32 maxDomains); + +/** + * Destroy a server instance. Destructing a server does not guarantee that child domains + * and clients will be appropriately freed. serverFreeDomain should be explicitly called + * on all allocated domains to ensure all clients and resources get cleaned up. + * + * @param[in] pServer This server instance + */ +NV_STATUS serverDestruct(RsServer *pServer); + +/** + * Allocate a domain handle. Domain handles are used to track clients created by a domain. + * + * @param[in] pServer This server instance + * @param[in] hParentDomain + * @param[in] pAccessControl + * @param[out] phDomain + * + */ +NV_STATUS serverAllocDomain(RsServer *pServer, NvU32 hParentDomain, ACCESS_CONTROL *pAccessControl, NvHandle *phDomain); + +/** + * Verify that the calling user is allowed to perform the access. This check only + * applies to calls from RING_USER or RING_KERNEL. No check is performed in + * RING_HOST. + * + * @param[in] pServer This server instance + * @param[in] hDomain + * @param[in] hClient + * + */ +NV_STATUS serverValidate(RsServer *pServer, NvU32 hDomain, NvHandle hClient); + +/** + * Verify that the domain has sufficient permission to allocate the given class. + * @param[in] pServer + * @param[in] hDomain + * @param[in] externalClassId External resource class id + */ +NV_STATUS serverValidateAlloc(RsServer *pServer, NvU32 hDomain, NvU32 externalClassId); + +/** + * Free a domain handle. All clients of this domain will be freed. + * + * @param[in] pServer This server instance + * @param[in] hDomain The handle of the domain to free + */ +NV_STATUS serverFreeDomain(RsServer *pServer, NvHandle hDomain); + +/** + * Allocate a client handle. A client handle is required to allocate resources. + * + * @param[in] pServer This server instance + * @param[inout] pParams Client allocation parameters + */ +NV_STATUS serverAllocClient(RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +/** + * Free a client handle. All resources references owned by the client will be + * freed. + * + * It is invalid to attempt to free a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[in] pParams Client free params + */ +NV_STATUS serverFreeClient(RsServer *pServer, RS_CLIENT_FREE_PARAMS* pParams); + +/** + * Free a list of client handles. All resources references owned by the client will be + * freed. All priority resources will be freed first across all listed clients. + * + * It is invalid to attempt to free a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[in] phClientList The list of client handles to free + * @param[in] numClients The number of clients in the list + * @param[in] freeState User-defined free state + * @param[in] pSecInfo Security Info + * + */ +NV_STATUS serverFreeClientList(RsServer *pServer, NvHandle *phClientList, NvU32 numClients, NvU32 freeState, API_SECURITY_INFO *pSecInfo); + +/** + * Allocate a resource. + * + * It is invalid to attempt to allocate a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[inout] pParams The allocation parameters + */ +NV_STATUS serverAllocResource(RsServer *pServer, RS_RES_ALLOC_PARAMS *params); + +/** + * Allocate a ref-counted resource share. + * + * @param[in] pServer + * @param[in] pClassInfo NVOC class info for the shared class (must derive from RsShared) + * @param[out] ppShare Allocated share + */ +NV_STATUS serverAllocShare(RsServer *pServer, const NVOC_CLASS_INFO* pClassInfo, struct RsShared **ppShare); + +/** + * Allocate a ref-counted resource share with Halspec parent. + * + * @param[in] pServer + * @param[in] pClassInfo NVOC class info for the shared class (must derive from RsShared) + * @param[out] ppShare Allocated share + * @param[in] pHalspecParent Parent object whose Halspec can be used for the shared class object + */ +NV_STATUS serverAllocShareWithHalspecParent(RsServer *pServer, const NVOC_CLASS_INFO* pClassInfo, struct RsShared **ppShare, struct Object *pHalspecParent); + +/** + * Get the ref-count of a resource share. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NvS32 serverGetShareRefCount(RsServer *pServer, struct RsShared *pShare); + +/** + * Increment the ref-count of a resource share. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NV_STATUS serverRefShare(RsServer *pServer, struct RsShared *pShare); + +/** + * Decrement the ref-count of a resource share. If the ref-count + * has reached zero, the resource share will be freed. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NV_STATUS serverFreeShare(RsServer *pServer, struct RsShared *pShare); + +/** + * Get an iterator to the elements in the server's shared object map + * @param[in] pServer + * @param[in] internalClassId If non-zero, only RsShared that are (or can be + * derived from) the specified class will be returned + */ +RS_SHARE_ITERATOR serverShareIter(RsServer *pServer, NvU32 internalClassId); + +/** + * Get an iterator to the elements in the server's shared object map + */ +NvBool serverShareIterNext(RS_SHARE_ITERATOR*); + + +/** + * Allocate a resource. Assumes top-level lock has been taken. + * + * It is invalid to attempt to allocate a client from a user other than the one + * that allocated it. User-implemented. + * + * @param[in] pServer This server instance + * @param[inout] pParams The allocation parameters + */ +extern NV_STATUS serverAllocResourceUnderLock(RsServer *pServer, RS_RES_ALLOC_PARAMS *pAllocParams); + +/** + * Call Free RPC for given resource. Assumes top-level lock has been taken. + * + * @param[in] pServer This server instance + * @param[inout] pFreeParams The Free parameters + */ +extern NV_STATUS serverFreeResourceRpcUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pFreeParams); + +/** + * Copy-in parameters supplied by caller, and initialize API state. User-implemented. + * @param[in] pServer + * @param[in] pAllocParams Resource allocation parameters + * @param[out] ppApiState User-defined API_STATE; should be allocated by this function + */ +extern NV_STATUS serverAllocApiCopyIn(RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams, API_STATE **ppApiState); + +/** + * Copy-out parameters supplied by caller, and release API state. User-implemented. + * @param[in] pServer + * @param[in] status Status of allocation request + * @param[in] pApiState API_STATE for the allocation + */ +extern NV_STATUS serverAllocApiCopyOut(RsServer *pServer, NV_STATUS status, API_STATE *pApiState); + +/** + * Obtain a second client handle to lock if required for the allocation. + * @param[in] pParams Resource allocation parameters + * @param[in] phClient Client to lock, if any + */ +extern NV_STATUS serverLookupSecondClient(RS_RES_ALLOC_PARAMS_INTERNAL *pParams, NvHandle *phClient); + +/** + * Acquires a top-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Output flags indicating the locks that need to be released + */ +extern NV_STATUS serverTopLock_Prologue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a top-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverTopLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquires a session lock. + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[in] pResourceRef Resource reference to take session locks on + * @param[inout] pLockInfo Lock state + */ +extern NV_STATUS serverSessionLock_Prologue(LOCK_ACCESS_TYPE access, RsResourceRef *pResourceRef, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a session lock. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverSessionLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquires a resource-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Output flags indicating the locks that need to be released + */ +extern NV_STATUS serverResLock_Prologue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a resource-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverResLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * WAR for additional tasks that must be performed after resource-level locks are released. User-implemented. + * @param[inout] status Allocation status + * @param[in] bClientAlloc Caller is attempting to allocate a client + * @param[inout] pParams Allocation parameters + */ +extern NV_STATUS serverAllocEpilogue_WAR(RsServer *pServer, NV_STATUS status, NvBool bClientAlloc, RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams); + +/** + * Free a resource reference and all of its descendants. This will decrease the + * resource's reference count. The resource itself will only be freed if there + * are no more references to it. + * + * It is invalid to attempt to free a resource from a user other than the one that allocated it. + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +NV_STATUS serverFreeResourceTree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Same as serverFreeResourceTree except the top-level lock is assumed to have been taken. + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +NV_STATUS serverFreeResourceTreeUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Updates the lock flags in the dup parameters + * + * @param[in] pServer This server instance + * @param[in] pParams Dup parameters + */ +extern NV_STATUS serverUpdateLockFlagsForCopy(RsServer *pServer, RS_RES_DUP_PARAMS *pParams); + +/** + * Updates the lock flags in the free parameters + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +extern NV_STATUS serverUpdateLockFlagsForFree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Updates the lock flags for automatic inter-unmap during free + * + * @param[in] pServer This server instance + * @param[inout] pParams Unmap params, contained pLockInfo will be modified + */ +extern NV_STATUS serverUpdateLockFlagsForInterAutoUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams); + +/** + * Initialize parameters for a recursive call to serverFreeResourceTree. User-implemented. + * @param[in] hClient + * @param[in] hResource + * @param[inout] pParams + */ +extern NV_STATUS serverInitFreeParams_Recursive(NvHandle hClient, NvHandle hResource, RS_LOCK_INFO *pLockInfo, RS_RES_FREE_PARAMS *pParams); + +/** + * Common operations performed after top locks and client locks are taken, but before + * the control call is executed. This includes validating the control call cookie, + * looking up locking flags, parameter copy-in, and taking resource locks. + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[in] pAccess Lock access type + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +NV_STATUS serverControl_Prologue(RsServer *pServer, RS_RES_CONTROL_PARAMS_INTERNAL *pParams, LOCK_ACCESS_TYPE *pAccess, NvU32 *pReleaseFlags); + +/** + * Common operations performed after the control call is executed. This + * includes releasing locks and parameter copy-out. + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[in] pAccess Lock access type + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + * @param[in] status Control call status + */ +NV_STATUS serverControl_Epilogue(RsServer *pServer, RS_RES_CONTROL_PARAMS_INTERNAL *pParams, LOCK_ACCESS_TYPE access, NvU32 *pReleaseFlags, NV_STATUS status); + +/** + * Initialize a NVOC export control call cookie + * + * @param[in] pExportedEntry + * @param[inout] pCookie + */ +extern void serverControl_InitCookie(const struct NVOC_EXPORTED_METHOD_DEF *pExportedEntry, RS_CONTROL_COOKIE *pCookie); + +/** + * Validate a NVOC export control call cookie + * + * @param[in] pParams + * @param[inout] pCookie + */ +extern NV_STATUS serverControl_ValidateCookie(RS_RES_CONTROL_PARAMS_INTERNAL *pParams, RS_CONTROL_COOKIE *pCookie); + +/** + * Copy-in control call parameters + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[inout] pCookie Control call cookie + */ +extern NV_STATUS serverControlApiCopyIn(RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie); + +/** + * Copy-out control call parameters + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[inout] pCookie Control call cookie + */ +extern NV_STATUS serverControlApiCopyOut(RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + NV_STATUS rmStatus); + +/** + * Determine whether an API supports a read-only lock for a given lock + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] api RS_API* + */ +NvBool serverSupportsReadOnlyLock(RsServer *pServer, RS_LOCK_ENUM lock, RS_API_ENUM api); + +/** + * Determine whether the current thread has taken the RW API lock + * @param[in] pServer ResServ instance + */ +extern NvBool serverRwApiLockIsOwner(RsServer *pServer); + +/** + * Lookup locking flags for a resource alloc + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverAllocResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess); +/** + * + * Lookup level locking flags for a resource free + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverFreeResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_FREE_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a resource copy + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverCopyResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_DUP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a resource access share + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Share parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverShareResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_SHARE_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a control call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Control call parameters + * @param[in] pCookie Control call cookie + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverControlLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for a map call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams CPU map parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverMapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for an unmap call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams CPU unmap parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverUnmapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for an inter-resource map call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Inter-resource map parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverInterMapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for an inter-resource unmap call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Inter-resource unmap parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverInterUnmapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Fill the server's share policy lists with any default or global policies needed + */ +extern NV_STATUS serverInitGlobalSharePolicies(RsServer *pServer); + +/** + * Issue a control command to a resource + * + * @param[in] pServer This server instance + * @param[in] pParams Control parameters + */ +NV_STATUS serverControl(RsServer *pServer, RS_RES_CONTROL_PARAMS *pParams); + +/** + * Copy a resource owned by one client into another client. + * + * The clients must be in the same client handle space. The underlying + * resource is not duplicated, but it is refcounted so the resource will + * not be freed until the reference count hits zero. + * + * Copying a resource will fail if the user making the call does not own + * the source client. + * + * @param[in] pServer This server instance + * @param[inout] pParams Resource sharing parameters + */ +NV_STATUS serverCopyResource(RsServer *pServer, RS_RES_DUP_PARAMS *pParams); + +/** + * Share certain access rights to a resource with other clients using the provided share policy + * + * The policy entry passed in will be added to the object's share policy list. + * If the bRevoke is true, the policy will be removed instead. + * + * Sharing will fail if the user making the call does not own the source client. + * + * @param[in] pServer This server instance + * @param[in] pParams Resource sharing parameters + */ +NV_STATUS serverShareResourceAccess(RsServer *pServer, RS_RES_SHARE_PARAMS *pParams); + +/** + * Creates a CPU mapping of the resource in the virtual address space of the process. + * + * Not all resources support mapping. + * + * @param[in] pServer This server instance + * @param[in] hClient Client handle of the resource to map + * @param[in] hResource Handle of the resource to map + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverMap(RsServer *pServer, NvHandle hClient, NvHandle hResource, RS_CPU_MAP_PARAMS *pParams); + +/** + * Release a CPU virtual address unmapping + * + * @param[in] pServer This server instance + * @param[in] hClient Client handle of the resource to map + * @param[in] hResource Handle of the resource to map + * @param[in] pParams CPU unmapping parameters + */ +NV_STATUS serverUnmap(RsServer *pServer, NvHandle hClient, NvHandle hResource, RS_CPU_UNMAP_PARAMS *pParams); + +/** + * Pre-map operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverMap_Prologue(RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams); + +/** + * Post-map operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +void serverMap_Epilogue(RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams); + +/** + * Pre-unmap operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverUnmap_Prologue(RsServer *pServer, RS_CPU_UNMAP_PARAMS *pUnmapParams); + +/** + * Post-unmap operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +void serverUnmap_Epilogue(RsServer *pServer, RS_CPU_UNMAP_PARAMS *pUnmapParams); + +/** + * Creates an inter-mapping between two resources + * + * Not all resources support mapping. + * + * @param[in] pServer This server instance + * @param[inout] pParams mapping parameters + */ +NV_STATUS serverInterMap(RsServer *pServer, RS_INTER_MAP_PARAMS *pParams); + +/** + * Release an inter-mapping between two resources + * + * @param[in] pServer This server instance + * @param[in] pParams unmapping parameters + */ +NV_STATUS serverInterUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams); + +/** + * Pre-inter-map operations. Called with top/client locks acquired. + * This function acquires resource locks. + * + * @param[in] pServer + * @param[in] pMapperRef The resource that can be used to create the mapping + * @param[in] pMappableRef The resource that can be mapped + * @param[inout] pMapParams mapping parameters + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +NV_STATUS serverInterMap_Prologue(RsServer *pServer, RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RS_INTER_MAP_PARAMS *pMapParams, NvU32 *pReleaseFlags); + +/** + * Post-inter-map operations. Called with top, client, and resource locks acquired. + * This function releases resource locks. + * + * @param[in] pServer + * @param[inout] pMapParams mapping parameters + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +void serverInterMap_Epilogue(RsServer *pServer, RS_INTER_MAP_PARAMS *pMapParams, NvU32 *pReleaseFlags); + +/** + * Pre-inter-unmap operations. Called with top, client, and resource locks acquired. + * + * @param[in] pServer + * @param[inout] pParams mapping parameters + */ +NV_STATUS serverInterUnmap_Prologue(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pUnmapParams); + +/** + * Post-inter-unmap operations. Called with top, client, and resource locks acquired. + * + * @param[in] pServer + * @param[inout] pParams mapping parameters + */ +void serverInterUnmap_Epilogue(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pUnmapParams); + +/** + * Acquire a client pointer from a client handle. The caller is responsible for + * ensuring that lock ordering is not violated (otherwise there can be + * deadlock): clients must be locked in increasing order of client index (not + * handle). + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + * @param[in] lockAccess LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[out] ppClient Pointer to the RsClient + */ +NV_STATUS serverAcquireClient(RsServer *pServer, NvHandle hClient, LOCK_ACCESS_TYPE lockAccess, struct RsClient **ppClient); + +/** + * Release a client pointer + * + * @param[in] pServer This server instance + * @param[in] lockAccess LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[in] pClient Pointer to the RsClient + */ +NV_STATUS serverReleaseClient(RsServer *pServer, LOCK_ACCESS_TYPE lockAccess, struct RsClient *pClient); + +/** + * Get a client pointer from a client handle without taking any locks. + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + * @param[out] ppClient Pointer to the RsClient + */ +NV_STATUS serverGetClientUnderLock(RsServer *pServer, NvHandle hClient, struct RsClient **ppClient); + +/** + * Get the count of clients allocated under this resource server + * + * @param[in] pServer This server instance + */ +NvU32 serverGetClientCount(RsServer *pServer); + +/** + * Get the count of resources allocated under this resource server + * + * @param[in] pServer This server instance + */ +NvU64 serverGetResourceCount(RsServer *pServer); + +/** + * Swap a TLS call context entry and increment the TLS entry refcount. + * A new TLS entry for call context will be allocated if necessary. + * + * @note This should be paired with a corresponding resservRestoreTlsCallContext call + */ +NV_STATUS resservSwapTlsCallContext(CALL_CONTEXT **ppOldCallContext, CALL_CONTEXT *pNewCallContext); + +/** + * Get the current TLS call context. This will not increment a refcount on the TLS entry. + */ +CALL_CONTEXT *resservGetTlsCallContext(void); + +/** + * Set a TLS call context entry and decrement the TLS entry refcount. + * @note This should be paired with a corresponding resservSwapTlsCallContext call + */ +NV_STATUS resservRestoreTlsCallContext(CALL_CONTEXT *pOldCallContext); + +/** + * Find a resource reference of a given type from the TLS call context + * @param[in] internalClassId Only return a reference if it matches this type + * @param[in] bSearchAncestors Search parents of the call context resource ref + */ +RsResourceRef *resservGetContextRefByType(NvU32 internalClassId, NvBool bSearchAncestors); + +#ifdef __cplusplus +} +#endif + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_RS_SERVER_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_sdk-structures.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_sdk-structures.h new file mode 100644 index 0000000..37cfbb7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_sdk-structures.h @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * WARNING: This is an autogenerated file. DO NOT EDIT. + * This file is generated using below files: + * template file: kernel/inc/vgpu/gt_sdk-structures.h + * definition file: kernel/inc/vgpu/sdk-structures.def + */ + + +#ifdef SDK_STRUCTURES +// These are copy of sdk structures, that will be used for the communication between the vmioplugin & guest RM. +typedef struct NVOS00_PARAMETERS_v03_00 +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +} NVOS00_PARAMETERS_v03_00; + +typedef NVOS00_PARAMETERS_v03_00 NVOS00_PARAMETERS_v; + +typedef struct NVOS55_PARAMETERS_v03_00 +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvHandle hClientSrc; + NvHandle hObjectSrc; + NvU32 flags; + NvU32 status; +} NVOS55_PARAMETERS_v03_00; + +typedef NVOS55_PARAMETERS_v03_00 NVOS55_PARAMETERS_v; + + +#endif + +#ifdef SDK_ARRAY_LENGTH_FUNCTIONS + +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.c new file mode 100644 index 0000000..9a64a73 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.c @@ -0,0 +1,323 @@ +#define NVOC_STANDARD_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_standard_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x897bf7 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +void __nvoc_init_StandardMemory(StandardMemory*); +void __nvoc_init_funcTable_StandardMemory(StandardMemory*); +NV_STATUS __nvoc_ctor_StandardMemory(StandardMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_StandardMemory(StandardMemory*); +void __nvoc_dtor_StandardMemory(StandardMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_StandardMemory; + +static const struct NVOC_RTTI __nvoc_rtti_StandardMemory_StandardMemory = { + /*pClassDef=*/ &__nvoc_class_def_StandardMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_StandardMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_StandardMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_StandardMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_StandardMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_StandardMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_StandardMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(StandardMemory, __nvoc_base_Memory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_StandardMemory = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_StandardMemory_StandardMemory, + &__nvoc_rtti_StandardMemory_Memory, + &__nvoc_rtti_StandardMemory_RmResource, + &__nvoc_rtti_StandardMemory_RmResourceCommon, + &__nvoc_rtti_StandardMemory_RsResource, + &__nvoc_rtti_StandardMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(StandardMemory), + /*classId=*/ classId(StandardMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "StandardMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_StandardMemory, + /*pCastInfo=*/ &__nvoc_castinfo_StandardMemory, + /*pExportInfo=*/ &__nvoc_export_info_StandardMemory +}; + +static NvBool __nvoc_thunk_StandardMemory_resCanCopy(struct RsResource *pStandardMemory) { + return stdmemCanCopy((struct StandardMemory *)(((unsigned char *)pStandardMemory) - __nvoc_rtti_StandardMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemCheckMemInterUnmap(struct StandardMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemControl(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemUnmap(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemGetMemInterMapParams(struct StandardMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemGetMemoryMappingDescriptor(struct StandardMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemGetMapAddrSpace(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_stdmemShareCallback(struct StandardMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_stdmemControlFilter(struct StandardMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_stdmemAddAdditionalDependants(struct RsClient *pClient, struct StandardMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_stdmemGetRefCount(struct StandardMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_stdmemMapTo(struct StandardMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_stdmemControl_Prologue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemIsReady(struct StandardMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemCheckCopyPermissions(struct StandardMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_stdmemPreDestruct(struct StandardMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_stdmemUnmapFrom(struct StandardMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_stdmemControl_Epilogue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_stdmemControlLookup(struct StandardMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemMap(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_stdmemAccessCallback(struct StandardMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_StandardMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_StandardMemory(StandardMemory *pThis) { + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_StandardMemory(StandardMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_StandardMemory(StandardMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_StandardMemory_fail_Memory; + __nvoc_init_dataField_StandardMemory(pThis); + + status = __nvoc_stdmemConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_StandardMemory_fail__init; + goto __nvoc_ctor_StandardMemory_exit; // Success + +__nvoc_ctor_StandardMemory_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_StandardMemory_fail_Memory: +__nvoc_ctor_StandardMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_StandardMemory_1(StandardMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__stdmemCanCopy__ = &stdmemCanCopy_IMPL; + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_StandardMemory_resCanCopy; + + pThis->__stdmemCheckMemInterUnmap__ = &__nvoc_thunk_Memory_stdmemCheckMemInterUnmap; + + pThis->__stdmemControl__ = &__nvoc_thunk_Memory_stdmemControl; + + pThis->__stdmemUnmap__ = &__nvoc_thunk_Memory_stdmemUnmap; + + pThis->__stdmemGetMemInterMapParams__ = &__nvoc_thunk_Memory_stdmemGetMemInterMapParams; + + pThis->__stdmemGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_stdmemGetMemoryMappingDescriptor; + + pThis->__stdmemGetMapAddrSpace__ = &__nvoc_thunk_Memory_stdmemGetMapAddrSpace; + + pThis->__stdmemShareCallback__ = &__nvoc_thunk_RmResource_stdmemShareCallback; + + pThis->__stdmemControlFilter__ = &__nvoc_thunk_RsResource_stdmemControlFilter; + + pThis->__stdmemAddAdditionalDependants__ = &__nvoc_thunk_RsResource_stdmemAddAdditionalDependants; + + pThis->__stdmemGetRefCount__ = &__nvoc_thunk_RsResource_stdmemGetRefCount; + + pThis->__stdmemMapTo__ = &__nvoc_thunk_RsResource_stdmemMapTo; + + pThis->__stdmemControl_Prologue__ = &__nvoc_thunk_RmResource_stdmemControl_Prologue; + + pThis->__stdmemIsReady__ = &__nvoc_thunk_Memory_stdmemIsReady; + + pThis->__stdmemCheckCopyPermissions__ = &__nvoc_thunk_Memory_stdmemCheckCopyPermissions; + + pThis->__stdmemPreDestruct__ = &__nvoc_thunk_RsResource_stdmemPreDestruct; + + pThis->__stdmemUnmapFrom__ = &__nvoc_thunk_RsResource_stdmemUnmapFrom; + + pThis->__stdmemControl_Epilogue__ = &__nvoc_thunk_RmResource_stdmemControl_Epilogue; + + pThis->__stdmemControlLookup__ = &__nvoc_thunk_RsResource_stdmemControlLookup; + + pThis->__stdmemMap__ = &__nvoc_thunk_Memory_stdmemMap; + + pThis->__stdmemAccessCallback__ = &__nvoc_thunk_RmResource_stdmemAccessCallback; +} + +void __nvoc_init_funcTable_StandardMemory(StandardMemory *pThis) { + __nvoc_init_funcTable_StandardMemory_1(pThis); +} + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_StandardMemory(StandardMemory *pThis) { + pThis->__nvoc_pbase_StandardMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; + __nvoc_init_Memory(&pThis->__nvoc_base_Memory); + __nvoc_init_funcTable_StandardMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_StandardMemory(StandardMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + StandardMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(StandardMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(StandardMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_StandardMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_StandardMemory(pThis); + status = __nvoc_ctor_StandardMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_StandardMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_StandardMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_StandardMemory(StandardMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_StandardMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.h new file mode 100644 index 0000000..bb1573e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.h @@ -0,0 +1,261 @@ +#ifndef _G_STANDARD_MEM_NVOC_H_ +#define _G_STANDARD_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_standard_mem_nvoc.h" + +#ifndef _STANDARD_MEMORY_H_ +#define _STANDARD_MEMORY_H_ + +#include "mem_mgr/mem.h" + +#include "ctrl/ctrl003e.h" + +typedef struct MEMORY_ALLOCATION_REQUEST MEMORY_ALLOCATION_REQUEST; + +struct MemoryManager; + +#ifndef __NVOC_CLASS_MemoryManager_TYPEDEF__ +#define __NVOC_CLASS_MemoryManager_TYPEDEF__ +typedef struct MemoryManager MemoryManager; +#endif /* __NVOC_CLASS_MemoryManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryManager +#define __nvoc_class_id_MemoryManager 0x22ad47 +#endif /* __nvoc_class_id_MemoryManager */ + + +/*! + * Allocator for normal virtual, video and system memory + */ +#ifdef NVOC_STANDARD_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct StandardMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct Memory __nvoc_base_Memory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct StandardMemory *__nvoc_pbase_StandardMemory; + NvBool (*__stdmemCanCopy__)(struct StandardMemory *); + NV_STATUS (*__stdmemCheckMemInterUnmap__)(struct StandardMemory *, NvBool); + NV_STATUS (*__stdmemControl__)(struct StandardMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__stdmemUnmap__)(struct StandardMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__stdmemGetMemInterMapParams__)(struct StandardMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__stdmemGetMemoryMappingDescriptor__)(struct StandardMemory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__stdmemGetMapAddrSpace__)(struct StandardMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__stdmemShareCallback__)(struct StandardMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__stdmemControlFilter__)(struct StandardMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__stdmemAddAdditionalDependants__)(struct RsClient *, struct StandardMemory *, RsResourceRef *); + NvU32 (*__stdmemGetRefCount__)(struct StandardMemory *); + NV_STATUS (*__stdmemMapTo__)(struct StandardMemory *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__stdmemControl_Prologue__)(struct StandardMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__stdmemIsReady__)(struct StandardMemory *); + NV_STATUS (*__stdmemCheckCopyPermissions__)(struct StandardMemory *, struct OBJGPU *, NvHandle); + void (*__stdmemPreDestruct__)(struct StandardMemory *); + NV_STATUS (*__stdmemUnmapFrom__)(struct StandardMemory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__stdmemControl_Epilogue__)(struct StandardMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__stdmemControlLookup__)(struct StandardMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__stdmemMap__)(struct StandardMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__stdmemAccessCallback__)(struct StandardMemory *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_StandardMemory_TYPEDEF__ +#define __NVOC_CLASS_StandardMemory_TYPEDEF__ +typedef struct StandardMemory StandardMemory; +#endif /* __NVOC_CLASS_StandardMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_StandardMemory +#define __nvoc_class_id_StandardMemory 0x897bf7 +#endif /* __nvoc_class_id_StandardMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory; + +#define __staticCast_StandardMemory(pThis) \ + ((pThis)->__nvoc_pbase_StandardMemory) + +#ifdef __nvoc_standard_mem_h_disabled +#define __dynamicCast_StandardMemory(pThis) ((StandardMemory*)NULL) +#else //__nvoc_standard_mem_h_disabled +#define __dynamicCast_StandardMemory(pThis) \ + ((StandardMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(StandardMemory))) +#endif //__nvoc_standard_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_StandardMemory(StandardMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_StandardMemory(StandardMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_StandardMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_StandardMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define stdmemCanCopy(pStandardMemory) stdmemCanCopy_DISPATCH(pStandardMemory) +#define stdmemCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) stdmemCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define stdmemControl(pMemory, pCallContext, pParams) stdmemControl_DISPATCH(pMemory, pCallContext, pParams) +#define stdmemUnmap(pMemory, pCallContext, pCpuMapping) stdmemUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define stdmemGetMemInterMapParams(pMemory, pParams) stdmemGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define stdmemGetMemoryMappingDescriptor(pMemory, ppMemDesc) stdmemGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define stdmemGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) stdmemGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define stdmemShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) stdmemShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define stdmemControlFilter(pResource, pCallContext, pParams) stdmemControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define stdmemAddAdditionalDependants(pClient, pResource, pReference) stdmemAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define stdmemGetRefCount(pResource) stdmemGetRefCount_DISPATCH(pResource) +#define stdmemMapTo(pResource, pParams) stdmemMapTo_DISPATCH(pResource, pParams) +#define stdmemControl_Prologue(pResource, pCallContext, pParams) stdmemControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define stdmemIsReady(pMemory) stdmemIsReady_DISPATCH(pMemory) +#define stdmemCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) stdmemCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define stdmemPreDestruct(pResource) stdmemPreDestruct_DISPATCH(pResource) +#define stdmemUnmapFrom(pResource, pParams) stdmemUnmapFrom_DISPATCH(pResource, pParams) +#define stdmemControl_Epilogue(pResource, pCallContext, pParams) stdmemControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define stdmemControlLookup(pResource, pParams, ppEntry) stdmemControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define stdmemMap(pMemory, pCallContext, pParams, pCpuMapping) stdmemMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define stdmemAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) stdmemAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvU32 stdmemGetSysmemPageSize_IMPL(struct OBJGPU *pGpu, struct StandardMemory *pMemory); + +#ifdef __nvoc_standard_mem_h_disabled +static inline NvU32 stdmemGetSysmemPageSize(struct OBJGPU *pGpu, struct StandardMemory *pMemory) { + NV_ASSERT_FAILED_PRECOMP("StandardMemory was disabled!"); + return 0; +} +#else //__nvoc_standard_mem_h_disabled +#define stdmemGetSysmemPageSize(pGpu, pMemory) stdmemGetSysmemPageSize_IMPL(pGpu, pMemory) +#endif //__nvoc_standard_mem_h_disabled + +#define stdmemGetSysmemPageSize_HAL(pGpu, pMemory) stdmemGetSysmemPageSize(pGpu, pMemory) + +NvBool stdmemCanCopy_IMPL(struct StandardMemory *pStandardMemory); + +static inline NvBool stdmemCanCopy_DISPATCH(struct StandardMemory *pStandardMemory) { + return pStandardMemory->__stdmemCanCopy__(pStandardMemory); +} + +static inline NV_STATUS stdmemCheckMemInterUnmap_DISPATCH(struct StandardMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__stdmemCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS stdmemControl_DISPATCH(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__stdmemControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS stdmemUnmap_DISPATCH(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__stdmemUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS stdmemGetMemInterMapParams_DISPATCH(struct StandardMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__stdmemGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS stdmemGetMemoryMappingDescriptor_DISPATCH(struct StandardMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__stdmemGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS stdmemGetMapAddrSpace_DISPATCH(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__stdmemGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool stdmemShareCallback_DISPATCH(struct StandardMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__stdmemShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS stdmemControlFilter_DISPATCH(struct StandardMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__stdmemControlFilter__(pResource, pCallContext, pParams); +} + +static inline void stdmemAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct StandardMemory *pResource, RsResourceRef *pReference) { + pResource->__stdmemAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 stdmemGetRefCount_DISPATCH(struct StandardMemory *pResource) { + return pResource->__stdmemGetRefCount__(pResource); +} + +static inline NV_STATUS stdmemMapTo_DISPATCH(struct StandardMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__stdmemMapTo__(pResource, pParams); +} + +static inline NV_STATUS stdmemControl_Prologue_DISPATCH(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__stdmemControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS stdmemIsReady_DISPATCH(struct StandardMemory *pMemory) { + return pMemory->__stdmemIsReady__(pMemory); +} + +static inline NV_STATUS stdmemCheckCopyPermissions_DISPATCH(struct StandardMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__stdmemCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void stdmemPreDestruct_DISPATCH(struct StandardMemory *pResource) { + pResource->__stdmemPreDestruct__(pResource); +} + +static inline NV_STATUS stdmemUnmapFrom_DISPATCH(struct StandardMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__stdmemUnmapFrom__(pResource, pParams); +} + +static inline void stdmemControl_Epilogue_DISPATCH(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__stdmemControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS stdmemControlLookup_DISPATCH(struct StandardMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__stdmemControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS stdmemMap_DISPATCH(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__stdmemMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool stdmemAccessCallback_DISPATCH(struct StandardMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__stdmemAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS stdmemConstruct_IMPL(struct StandardMemory *arg_pStandardMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_stdmemConstruct(arg_pStandardMemory, arg_pCallContext, arg_pParams) stdmemConstruct_IMPL(arg_pStandardMemory, arg_pCallContext, arg_pParams) +NV_STATUS stdmemValidateParams_IMPL(struct OBJGPU *pGpu, NvHandle hClient, NV_MEMORY_ALLOCATION_PARAMS *pAllocData); +#define stdmemValidateParams(pGpu, hClient, pAllocData) stdmemValidateParams_IMPL(pGpu, hClient, pAllocData) +void stdmemDumpInputAllocParams_IMPL(NV_MEMORY_ALLOCATION_PARAMS *pAllocData, CALL_CONTEXT *pCallContext); +#define stdmemDumpInputAllocParams(pAllocData, pCallContext) stdmemDumpInputAllocParams_IMPL(pAllocData, pCallContext) +void stdmemDumpOutputAllocParams_IMPL(NV_MEMORY_ALLOCATION_PARAMS *pAllocData); +#define stdmemDumpOutputAllocParams(pAllocData) stdmemDumpOutputAllocParams_IMPL(pAllocData) +NvU32 stdmemQueryPageSize_IMPL(struct MemoryManager *pMemoryManager, NvHandle hClient, NV_MEMORY_ALLOCATION_PARAMS *pAllocData); +#define stdmemQueryPageSize(pMemoryManager, hClient, pAllocData) stdmemQueryPageSize_IMPL(pMemoryManager, hClient, pAllocData) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_STANDARD_MEM_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.c new file mode 100644 index 0000000..1bef8fd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.c @@ -0,0 +1,1504 @@ +#define NVOC_SUBDEVICE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_subdevice_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x4b01b3 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Subdevice; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_Subdevice(Subdevice*); +void __nvoc_init_funcTable_Subdevice(Subdevice*); +NV_STATUS __nvoc_ctor_Subdevice(Subdevice*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_Subdevice(Subdevice*); +void __nvoc_dtor_Subdevice(Subdevice*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Subdevice; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_Subdevice = { + /*pClassDef=*/ &__nvoc_class_def_Subdevice, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Subdevice, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Subdevice = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_Subdevice_Subdevice, + &__nvoc_rtti_Subdevice_Notifier, + &__nvoc_rtti_Subdevice_INotifier, + &__nvoc_rtti_Subdevice_GpuResource, + &__nvoc_rtti_Subdevice_RmResource, + &__nvoc_rtti_Subdevice_RmResourceCommon, + &__nvoc_rtti_Subdevice_RsResource, + &__nvoc_rtti_Subdevice_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Subdevice = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Subdevice), + /*classId=*/ classId(Subdevice), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Subdevice", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Subdevice, + /*pCastInfo=*/ &__nvoc_castinfo_Subdevice, + /*pExportInfo=*/ &__nvoc_export_info_Subdevice +}; + +static void __nvoc_thunk_Subdevice_resPreDestruct(struct RsResource *pResource) { + subdevicePreDestruct((struct Subdevice *)(((unsigned char *)pResource) - __nvoc_rtti_Subdevice_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Subdevice_gpuresInternalControlForward(struct GpuResource *pSubdevice, NvU32 command, void *pParams, NvU32 size) { + return subdeviceInternalControlForward((struct Subdevice *)(((unsigned char *)pSubdevice) - __nvoc_rtti_Subdevice_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_Subdevice_resControlFilter(struct RsResource *pSubdevice, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return subdeviceControlFilter((struct Subdevice *)(((unsigned char *)pSubdevice) - __nvoc_rtti_Subdevice_RsResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_GpuResource_subdeviceShareCallback(struct Subdevice *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_subdeviceMapTo(struct Subdevice *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_subdeviceGetOrAllocNotifShare(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_Subdevice_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_subdeviceCheckMemInterUnmap(struct Subdevice *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Subdevice_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_GpuResource_subdeviceGetMapAddrSpace(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_subdeviceSetNotificationShare(struct Subdevice *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_Subdevice_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_subdeviceGetRefCount(struct Subdevice *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_subdeviceAddAdditionalDependants(struct RsClient *pClient, struct Subdevice *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_subdeviceControl_Prologue(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_subdeviceGetRegBaseOffsetAndSize(struct Subdevice *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_RsResource_subdeviceUnmapFrom(struct Subdevice *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_subdeviceControl_Epilogue(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_subdeviceControlLookup(struct Subdevice *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_subdeviceGetInternalObjectHandle(struct Subdevice *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_subdeviceControl(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_subdeviceUnmap(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_subdeviceGetMemInterMapParams(struct Subdevice *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Subdevice_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_subdeviceGetMemoryMappingDescriptor(struct Subdevice *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Subdevice_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Notifier_subdeviceUnregisterEvent(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_Subdevice_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_subdeviceCanCopy(struct Subdevice *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_subdeviceGetNotificationListPtr(struct Subdevice *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_Subdevice_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_subdeviceGetNotificationShare(struct Subdevice *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_Subdevice_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_subdeviceMap(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_subdeviceAccessCallback(struct Subdevice *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevice[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetInfoV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800102u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetInfoV2" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetNameString_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa10u) + /*flags=*/ 0xa10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800110u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetNameString" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4a10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetShortNameString_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4a10u) + /*flags=*/ 0x4a10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800111u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetShortNameString" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetSdm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800118u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_SDM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetSdm" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetSimulationInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*flags=*/ 0x813u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800119u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetSimulationInfo" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuSetSdm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*flags=*/ 0x5u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800120u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_SET_SDM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuSetSdm" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEngines_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800123u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEngines" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEngineClasslist_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800124u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEngineClasslist" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuQueryMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800128u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_QUERY_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuQueryMode" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuPromoteCtx_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*flags=*/ 0x2204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080012bu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuPromoteCtx" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuEvictCtx_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*flags=*/ 0x2200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080012cu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_EVICT_CTX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuEvictCtx" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuInitializeCtx_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*flags=*/ 0x2204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080012du, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuInitializeCtx" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetOEMBoardInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + /*flags=*/ 0x4210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080013fu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetOEMBoardInfo" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + /*flags=*/ 0x812u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800142u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetId" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEnginePartnerList_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800147u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEnginePartnerList" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetGidInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + /*flags=*/ 0xa50u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080014au, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_GID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetGidInfo" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuSetOptimusInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080014cu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuSetOptimusInfo" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetIpVersion_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080014du, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetIpVersion" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuHandleGpuSR_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800167u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuHandleGpuSR" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetOEMInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + /*flags=*/ 0x4210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800169u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetOEMInfo" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEnginesV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800170u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEnginesV2" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuQueryFunctionStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800173u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuQueryFunctionStatus" +#endif + }, + { /* [22] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetCachedInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*flags=*/ 0x813u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800182u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetCachedInfo" +#endif + }, + { /* [23] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetMaxSupportedPageSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*flags=*/ 0x50u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800188u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetMaxSupportedPageSize" +#endif + }, + { /* [24] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetPids_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080018du, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_PIDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetPids" +#endif + }, + { /* [25] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetPidInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080018eu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_PID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetPidInfo" +#endif + }, + { /* [26] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdValidateMemMapRequest_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800198u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdValidateMemMapRequest" +#endif + }, + { /* [27] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x12u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEngineLoadTimes_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x12u) + /*flags=*/ 0x12u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080019bu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEngineLoadTimes" +#endif + }, + { /* [28] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetNotification_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800301u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetNotification" +#endif + }, + { /* [29] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetTrigger_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800302u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetTrigger" +#endif + }, + { /* [30] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetMemoryNotifies_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800303u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetMemoryNotifies" +#endif + }, + { /* [31] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetSemaphoreMemory_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800304u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetSemaphoreMemory" +#endif + }, + { /* [32] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetSemaMemValidation_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800306u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetSemaMemValidation" +#endif + }, + { /* [33] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetTriggerFifo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800308u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetTriggerFifo" +#endif + }, + { /* [34] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerSchedule_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800401u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerSchedule" +#endif + }, + { /* [35] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerCancel_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800402u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerCancel" +#endif + }, + { /* [36] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerGetTime_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800403u, + /*paramSize=*/ sizeof(NV2080_CTRL_TIMER_GET_TIME_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerGetTime" +#endif + }, + { /* [37] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerGetRegisterOffset_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800404u, + /*paramSize=*/ sizeof(NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerGetRegisterOffset" +#endif + }, + { /* [38] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800406u, + /*paramSize=*/ sizeof(NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo" +#endif + }, + { /* [39] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplayGetStaticInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a01u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplayGetStaticInfo" +#endif + }, + { /* [40] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetChipInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*flags=*/ 0x4600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a36u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetChipInfo" +#endif + }, + { /* [41] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetDeviceInfoTable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*flags=*/ 0x4600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a40u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetDeviceInfoTable" +#endif + }, + { /* [42] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetUserRegisterAccessMap_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*flags=*/ 0x4600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a41u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetUserRegisterAccessMap" +#endif + }, + { /* [43] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetConstructedFalconInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a42u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetConstructedFalconInfo" +#endif + }, + { /* [44] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplayWriteInstMem_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a49u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplayWriteInstMem" +#endif + }, + { /* [45] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalRecoverAllComputeContexts_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a4au, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalRecoverAllComputeContexts" +#endif + }, + { /* [46] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplayGetIpVersion_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a4bu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplayGetIpVersion" +#endif + }, + { /* [47] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetSmcMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a4cu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetSmcMode" +#endif + }, + { /* [48] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplaySetupRgLineIntr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a4du, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplaySetupRgLineIntr" +#endif + }, + { /* [49] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplaySetImportedImpData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a54u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplaySetImportedImpData" +#endif + }, + { /* [50] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplaySetChannelPushbuffer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a58u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplaySetChannelPushbuffer" +#endif + }, + { /* [51] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplayGetDisplayMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a5du, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplayGetDisplayMask" +#endif + }, + { /* [52] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetPcieP2pCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800ab8u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetPcieP2pCaps" +#endif + }, + { /* [53] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEccGetClientExposedCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803400u, + /*paramSize=*/ sizeof(NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEccGetClientExposedCounters" +#endif + }, + { /* [54] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGspGetFeatures_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*flags=*/ 0x211u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803601u, + /*paramSize=*/ sizeof(NV2080_CTRL_GSP_GET_FEATURES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGspGetFeatures" +#endif + }, + { /* [55] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u) + /*flags=*/ 0x3u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803d01u, + /*paramSize=*/ sizeof(NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdOsUnixGc6BlockerRefCnt" +#endif + }, + { /* [56] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803d02u, + /*paramSize=*/ sizeof(NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdOsUnixAllowDisallowGcoff" +#endif + }, + { /* [57] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*flags=*/ 0x1u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803d03u, + /*paramSize=*/ sizeof(NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdOsUnixAudioDynamicPower" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Subdevice = +{ + /*numEntries=*/ 58, + /*pExportEntries=*/ __nvoc_exported_method_def_Subdevice +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_Subdevice(Subdevice *pThis) { + __nvoc_subdeviceDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Subdevice(Subdevice *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_Subdevice(Subdevice *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Subdevice_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_Subdevice_fail_Notifier; + __nvoc_init_dataField_Subdevice(pThis); + + status = __nvoc_subdeviceConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Subdevice_fail__init; + goto __nvoc_ctor_Subdevice_exit; // Success + +__nvoc_ctor_Subdevice_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_Subdevice_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_Subdevice_fail_GpuResource: +__nvoc_ctor_Subdevice_exit: + + return status; +} + +static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__subdevicePreDestruct__ = &subdevicePreDestruct_IMPL; + + pThis->__subdeviceInternalControlForward__ = &subdeviceInternalControlForward_IMPL; + + pThis->__subdeviceControlFilter__ = &subdeviceControlFilter_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + pThis->__subdeviceCtrlCmdGpuGetCachedInfo__ = &subdeviceCtrlCmdGpuGetCachedInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdGpuGetInfoV2__ = &subdeviceCtrlCmdGpuGetInfoV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuGetIpVersion__ = &subdeviceCtrlCmdGpuGetIpVersion_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuSetOptimusInfo__ = &subdeviceCtrlCmdGpuSetOptimusInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa10u) + pThis->__subdeviceCtrlCmdGpuGetNameString__ = &subdeviceCtrlCmdGpuGetNameString_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4a10u) + pThis->__subdeviceCtrlCmdGpuGetShortNameString__ = &subdeviceCtrlCmdGpuGetShortNameString_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdGpuGetSdm__ = &subdeviceCtrlCmdGpuGetSdm_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + pThis->__subdeviceCtrlCmdGpuSetSdm__ = &subdeviceCtrlCmdGpuSetSdm_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + pThis->__subdeviceCtrlCmdGpuGetSimulationInfo__ = &subdeviceCtrlCmdGpuGetSimulationInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__subdeviceCtrlCmdGpuGetEngines__ = &subdeviceCtrlCmdGpuGetEngines_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__subdeviceCtrlCmdGpuGetEnginesV2__ = &subdeviceCtrlCmdGpuGetEnginesV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__subdeviceCtrlCmdGpuGetEngineClasslist__ = &subdeviceCtrlCmdGpuGetEngineClasslist_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdGpuGetEnginePartnerList__ = &subdeviceCtrlCmdGpuGetEnginePartnerList_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuQueryMode__ = &subdeviceCtrlCmdGpuQueryMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + pThis->__subdeviceCtrlCmdGpuGetOEMBoardInfo__ = &subdeviceCtrlCmdGpuGetOEMBoardInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + pThis->__subdeviceCtrlCmdGpuGetOEMInfo__ = &subdeviceCtrlCmdGpuGetOEMInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__subdeviceCtrlCmdGpuHandleGpuSR__ = &subdeviceCtrlCmdGpuHandleGpuSR_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + pThis->__subdeviceCtrlCmdGpuInitializeCtx__ = &subdeviceCtrlCmdGpuInitializeCtx_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + pThis->__subdeviceCtrlCmdGpuPromoteCtx__ = &subdeviceCtrlCmdGpuPromoteCtx_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + pThis->__subdeviceCtrlCmdGpuEvictCtx__ = &subdeviceCtrlCmdGpuEvictCtx_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + pThis->__subdeviceCtrlCmdGpuGetId__ = &subdeviceCtrlCmdGpuGetId_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + pThis->__subdeviceCtrlCmdGpuGetGidInfo__ = &subdeviceCtrlCmdGpuGetGidInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetPids__ = &subdeviceCtrlCmdGpuGetPids_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetPidInfo__ = &subdeviceCtrlCmdGpuGetPidInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuQueryFunctionStatus__ = &subdeviceCtrlCmdGpuQueryFunctionStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + pThis->__subdeviceCtrlCmdGpuGetMaxSupportedPageSize__ = &subdeviceCtrlCmdGpuGetMaxSupportedPageSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__subdeviceCtrlCmdValidateMemMapRequest__ = &subdeviceCtrlCmdValidateMemMapRequest_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x12u) + pThis->__subdeviceCtrlCmdGpuGetEngineLoadTimes__ = &subdeviceCtrlCmdGpuGetEngineLoadTimes_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdEventSetTrigger__ = &subdeviceCtrlCmdEventSetTrigger_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdEventSetTriggerFifo__ = &subdeviceCtrlCmdEventSetTriggerFifo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdEventSetNotification__ = &subdeviceCtrlCmdEventSetNotification_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdEventSetMemoryNotifies__ = &subdeviceCtrlCmdEventSetMemoryNotifies_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdEventSetSemaphoreMemory__ = &subdeviceCtrlCmdEventSetSemaphoreMemory_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdEventSetSemaMemValidation__ = &subdeviceCtrlCmdEventSetSemaMemValidation_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdTimerCancel__ = &subdeviceCtrlCmdTimerCancel_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdTimerSchedule__ = &subdeviceCtrlCmdTimerSchedule_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdTimerGetTime__ = &subdeviceCtrlCmdTimerGetTime_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdTimerGetRegisterOffset__ = &subdeviceCtrlCmdTimerGetRegisterOffset_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo__ = &subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdEccGetClientExposedCounters__ = &subdeviceCtrlCmdEccGetClientExposedCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + pThis->__subdeviceCtrlCmdGspGetFeatures__ = &subdeviceCtrlCmdGspGetFeatures_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u) + pThis->__subdeviceCtrlCmdOsUnixGc6BlockerRefCnt__ = &subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdOsUnixAllowDisallowGcoff__ = &subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + pThis->__subdeviceCtrlCmdOsUnixAudioDynamicPower__ = &subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplayGetIpVersion__ = &subdeviceCtrlCmdDisplayGetIpVersion_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplayGetStaticInfo__ = &subdeviceCtrlCmdDisplayGetStaticInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplaySetChannelPushbuffer__ = &subdeviceCtrlCmdDisplaySetChannelPushbuffer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplayWriteInstMem__ = &subdeviceCtrlCmdDisplayWriteInstMem_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplaySetupRgLineIntr__ = &subdeviceCtrlCmdDisplaySetupRgLineIntr_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplaySetImportedImpData__ = &subdeviceCtrlCmdDisplaySetImportedImpData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplayGetDisplayMask__ = &subdeviceCtrlCmdDisplayGetDisplayMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + pThis->__subdeviceCtrlCmdInternalGetChipInfo__ = &subdeviceCtrlCmdInternalGetChipInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + pThis->__subdeviceCtrlCmdInternalGetUserRegisterAccessMap__ = &subdeviceCtrlCmdInternalGetUserRegisterAccessMap_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + pThis->__subdeviceCtrlCmdInternalGetDeviceInfoTable__ = &subdeviceCtrlCmdInternalGetDeviceInfoTable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalGetConstructedFalconInfo__ = &subdeviceCtrlCmdInternalGetConstructedFalconInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalRecoverAllComputeContexts__ = &subdeviceCtrlCmdInternalRecoverAllComputeContexts_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalGetSmcMode__ = &subdeviceCtrlCmdInternalGetSmcMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalGetPcieP2pCaps__ = &subdeviceCtrlCmdInternalGetPcieP2pCaps_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__resPreDestruct__ = &__nvoc_thunk_Subdevice_resPreDestruct; + + pThis->__nvoc_base_GpuResource.__gpuresInternalControlForward__ = &__nvoc_thunk_Subdevice_gpuresInternalControlForward; + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__resControlFilter__ = &__nvoc_thunk_Subdevice_resControlFilter; + + pThis->__subdeviceShareCallback__ = &__nvoc_thunk_GpuResource_subdeviceShareCallback; + + pThis->__subdeviceMapTo__ = &__nvoc_thunk_RsResource_subdeviceMapTo; + + pThis->__subdeviceGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_subdeviceGetOrAllocNotifShare; + + pThis->__subdeviceCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_subdeviceCheckMemInterUnmap; + + pThis->__subdeviceGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_subdeviceGetMapAddrSpace; + + pThis->__subdeviceSetNotificationShare__ = &__nvoc_thunk_Notifier_subdeviceSetNotificationShare; + + pThis->__subdeviceGetRefCount__ = &__nvoc_thunk_RsResource_subdeviceGetRefCount; + + pThis->__subdeviceAddAdditionalDependants__ = &__nvoc_thunk_RsResource_subdeviceAddAdditionalDependants; + + pThis->__subdeviceControl_Prologue__ = &__nvoc_thunk_RmResource_subdeviceControl_Prologue; + + pThis->__subdeviceGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_subdeviceGetRegBaseOffsetAndSize; + + pThis->__subdeviceUnmapFrom__ = &__nvoc_thunk_RsResource_subdeviceUnmapFrom; + + pThis->__subdeviceControl_Epilogue__ = &__nvoc_thunk_RmResource_subdeviceControl_Epilogue; + + pThis->__subdeviceControlLookup__ = &__nvoc_thunk_RsResource_subdeviceControlLookup; + + pThis->__subdeviceGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_subdeviceGetInternalObjectHandle; + + pThis->__subdeviceControl__ = &__nvoc_thunk_GpuResource_subdeviceControl; + + pThis->__subdeviceUnmap__ = &__nvoc_thunk_GpuResource_subdeviceUnmap; + + pThis->__subdeviceGetMemInterMapParams__ = &__nvoc_thunk_RmResource_subdeviceGetMemInterMapParams; + + pThis->__subdeviceGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_subdeviceGetMemoryMappingDescriptor; + + pThis->__subdeviceUnregisterEvent__ = &__nvoc_thunk_Notifier_subdeviceUnregisterEvent; + + pThis->__subdeviceCanCopy__ = &__nvoc_thunk_RsResource_subdeviceCanCopy; + + pThis->__subdeviceGetNotificationListPtr__ = &__nvoc_thunk_Notifier_subdeviceGetNotificationListPtr; + + pThis->__subdeviceGetNotificationShare__ = &__nvoc_thunk_Notifier_subdeviceGetNotificationShare; + + pThis->__subdeviceMap__ = &__nvoc_thunk_GpuResource_subdeviceMap; + + pThis->__subdeviceAccessCallback__ = &__nvoc_thunk_RmResource_subdeviceAccessCallback; +} + +void __nvoc_init_funcTable_Subdevice(Subdevice *pThis) { + __nvoc_init_funcTable_Subdevice_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_Subdevice(Subdevice *pThis) { + pThis->__nvoc_pbase_Subdevice = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_Subdevice(pThis); +} + +NV_STATUS __nvoc_objCreate_Subdevice(Subdevice **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + Subdevice *pThis; + + pThis = portMemAllocNonPaged(sizeof(Subdevice)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Subdevice)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Subdevice); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_Subdevice(pThis); + status = __nvoc_ctor_Subdevice(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Subdevice_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Subdevice_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Subdevice(Subdevice **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Subdevice(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.h new file mode 100644 index 0000000..86eb767 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.h @@ -0,0 +1,929 @@ +#ifndef _G_SUBDEVICE_NVOC_H_ +#define _G_SUBDEVICE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_subdevice_nvoc.h" + +#ifndef _SUBDEVICE_H_ +#define _SUBDEVICE_H_ + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_resource.h" +#include "gpu/gpu_resource.h" +#include "rmapi/event.h" +#include "containers/btree.h" +#include "nvoc/utility.h" +#include "gpu/gpu_halspec.h" + +#include "class/cl2080.h" +#include "ctrl/ctrl0000/ctrl0000system.h" +#include "ctrl/ctrl2080.h" // rmcontrol parameters + +#ifndef NV2080_NOTIFIERS_CE_IDX +// TODO these need to be moved to cl2080.h +#define NV2080_NOTIFIERS_CE_IDX(i) ((i) - NV2080_NOTIFIERS_CE0) +#define NV2080_NOTIFIERS_NVENC_IDX(i) ((i) - NV2080_NOTIFIERS_NVENC0) +#define NV2080_NOTIFIERS_NVDEC_IDX(i) ((i) - NV2080_NOTIFIERS_NVDEC0) +#define NV2080_NOTIFIERS_GR_IDX(i) ((i) - NV2080_NOTIFIERS_GR0) +#endif + +#define NV2080_ENGINE_RANGE_GR() rangeMake(NV2080_ENGINE_TYPE_GR(0), NV2080_ENGINE_TYPE_GR(NV2080_ENGINE_TYPE_GR_SIZE - 1)) +#define NV2080_ENGINE_RANGE_COPY() rangeMake(NV2080_ENGINE_TYPE_COPY(0), NV2080_ENGINE_TYPE_COPY(NV2080_ENGINE_TYPE_COPY_SIZE - 1)) +#define NV2080_ENGINE_RANGE_NVDEC() rangeMake(NV2080_ENGINE_TYPE_NVDEC(0), NV2080_ENGINE_TYPE_NVDEC(NV2080_ENGINE_TYPE_NVDEC_SIZE - 1)) +#define NV2080_ENGINE_RANGE_NVENC() rangeMake(NV2080_ENGINE_TYPE_NVENC(0), NV2080_ENGINE_TYPE_NVENC(NV2080_ENGINE_TYPE_NVENC_SIZE - 1)) +#define NV2080_ENGINE_RANGE_NVJPEG() rangeMake(NV2080_ENGINE_TYPE_NVJPEG(0), NV2080_ENGINE_TYPE_NVJPEG(NV2080_ENGINE_TYPE_NVJPEG_SIZE - 1)) + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + +struct P2PApi; + +#ifndef __NVOC_CLASS_P2PApi_TYPEDEF__ +#define __NVOC_CLASS_P2PApi_TYPEDEF__ +typedef struct P2PApi P2PApi; +#endif /* __NVOC_CLASS_P2PApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_P2PApi +#define __nvoc_class_id_P2PApi 0x3982b7 +#endif /* __nvoc_class_id_P2PApi */ + + + +/** + * A subdevice represents a single GPU within a device. Subdevice provide + * unicast semantics; that is, operations involving a subdevice are applied to + * the associated GPU only. + */ +#ifdef NVOC_SUBDEVICE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Subdevice { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct Subdevice *__nvoc_pbase_Subdevice; + void (*__subdevicePreDestruct__)(struct Subdevice *); + NV_STATUS (*__subdeviceInternalControlForward__)(struct Subdevice *, NvU32, void *, NvU32); + NV_STATUS (*__subdeviceControlFilter__)(struct Subdevice *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetCachedInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetInfoV2__)(struct Subdevice *, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetIpVersion__)(struct Subdevice *, NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuSetOptimusInfo__)(struct Subdevice *, NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetNameString__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetShortNameString__)(struct Subdevice *, NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetSdm__)(struct Subdevice *, NV2080_CTRL_GPU_GET_SDM_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuSetSdm__)(struct Subdevice *, NV2080_CTRL_GPU_SET_SDM_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetSimulationInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetEngines__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENGINES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetEnginesV2__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetEngineClasslist__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetEnginePartnerList__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuQueryMode__)(struct Subdevice *, NV2080_CTRL_GPU_QUERY_MODE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetOEMBoardInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetOEMInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuHandleGpuSR__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdGpuInitializeCtx__)(struct Subdevice *, NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuPromoteCtx__)(struct Subdevice *, NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuEvictCtx__)(struct Subdevice *, NV2080_CTRL_GPU_EVICT_CTX_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetId__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ID_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetGidInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetPids__)(struct Subdevice *, NV2080_CTRL_GPU_GET_PIDS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetPidInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_PID_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuQueryFunctionStatus__)(struct Subdevice *, NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetMaxSupportedPageSize__)(struct Subdevice *, NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdValidateMemMapRequest__)(struct Subdevice *, NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetEngineLoadTimes__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdEventSetTrigger__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdEventSetTriggerFifo__)(struct Subdevice *, NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdEventSetNotification__)(struct Subdevice *, NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdEventSetMemoryNotifies__)(struct Subdevice *, NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdEventSetSemaphoreMemory__)(struct Subdevice *, NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdEventSetSemaMemValidation__)(struct Subdevice *, NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdTimerCancel__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdTimerSchedule__)(struct Subdevice *, NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdTimerGetTime__)(struct Subdevice *, NV2080_CTRL_TIMER_GET_TIME_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdTimerGetRegisterOffset__)(struct Subdevice *, NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo__)(struct Subdevice *, NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdEccGetClientExposedCounters__)(struct Subdevice *, NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGspGetFeatures__)(struct Subdevice *, NV2080_CTRL_GSP_GET_FEATURES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdOsUnixGc6BlockerRefCnt__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdOsUnixAllowDisallowGcoff__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdOsUnixAudioDynamicPower__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplayGetIpVersion__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplayGetStaticInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplaySetChannelPushbuffer__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplayWriteInstMem__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplaySetupRgLineIntr__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplaySetImportedImpData__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplayGetDisplayMask__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalGetChipInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalGetUserRegisterAccessMap__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalGetDeviceInfoTable__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalGetConstructedFalconInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalRecoverAllComputeContexts__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdInternalGetSmcMode__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalGetPcieP2pCaps__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS *); + NvBool (*__subdeviceShareCallback__)(struct Subdevice *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__subdeviceMapTo__)(struct Subdevice *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__subdeviceGetOrAllocNotifShare__)(struct Subdevice *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__subdeviceCheckMemInterUnmap__)(struct Subdevice *, NvBool); + NV_STATUS (*__subdeviceGetMapAddrSpace__)(struct Subdevice *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__subdeviceSetNotificationShare__)(struct Subdevice *, struct NotifShare *); + NvU32 (*__subdeviceGetRefCount__)(struct Subdevice *); + void (*__subdeviceAddAdditionalDependants__)(struct RsClient *, struct Subdevice *, RsResourceRef *); + NV_STATUS (*__subdeviceControl_Prologue__)(struct Subdevice *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__subdeviceGetRegBaseOffsetAndSize__)(struct Subdevice *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__subdeviceUnmapFrom__)(struct Subdevice *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__subdeviceControl_Epilogue__)(struct Subdevice *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__subdeviceControlLookup__)(struct Subdevice *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__subdeviceGetInternalObjectHandle__)(struct Subdevice *); + NV_STATUS (*__subdeviceControl__)(struct Subdevice *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__subdeviceUnmap__)(struct Subdevice *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__subdeviceGetMemInterMapParams__)(struct Subdevice *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__subdeviceGetMemoryMappingDescriptor__)(struct Subdevice *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__subdeviceUnregisterEvent__)(struct Subdevice *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__subdeviceCanCopy__)(struct Subdevice *); + PEVENTNOTIFICATION *(*__subdeviceGetNotificationListPtr__)(struct Subdevice *); + struct NotifShare *(*__subdeviceGetNotificationShare__)(struct Subdevice *); + NV_STATUS (*__subdeviceMap__)(struct Subdevice *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__subdeviceAccessCallback__)(struct Subdevice *, struct RsClient *, void *, RsAccessRight); + NvU32 deviceInst; + NvU32 subDeviceInst; + struct Device *pDevice; + NvBool bMaxGrTickFreqRequested; + NvU64 P2PfbMappedBytes; + NvU32 notifyActions[165]; + NvHandle hNotifierMemory; + struct Memory *pNotifierMemory; + NvHandle hSemMemory; + NvU32 videoStream4KCount; + NvU32 videoStreamHDCount; + NvU32 videoStreamSDCount; + NvU32 videoStreamLinearCount; + NvU32 ofaCount; + NvBool bGpuDebugModeEnabled; + NvBool bRcWatchdogEnableRequested; + NvBool bRcWatchdogDisableRequested; + NvBool bRcWatchdogSoftDisableRequested; + NvBool bReservePerfMon; + NvU32 perfBoostIndex; + NvU32 perfBoostRefCount; + NvBool perfBoostEntryExists; + NvBool bLockedClockModeRequested; + NvU32 bNvlinkErrorInjectionModeRequested; + NvBool bSchedPolicySet; + NvBool bGcoffDisallowed; + NvBool bUpdateTGP; +}; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Subdevice; + +#define __staticCast_Subdevice(pThis) \ + ((pThis)->__nvoc_pbase_Subdevice) + +#ifdef __nvoc_subdevice_h_disabled +#define __dynamicCast_Subdevice(pThis) ((Subdevice*)NULL) +#else //__nvoc_subdevice_h_disabled +#define __dynamicCast_Subdevice(pThis) \ + ((Subdevice*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Subdevice))) +#endif //__nvoc_subdevice_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Subdevice(Subdevice**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_Subdevice(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Subdevice((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define subdevicePreDestruct(pResource) subdevicePreDestruct_DISPATCH(pResource) +#define subdeviceInternalControlForward(pSubdevice, command, pParams, size) subdeviceInternalControlForward_DISPATCH(pSubdevice, command, pParams, size) +#define subdeviceControlFilter(pSubdevice, pCallContext, pParams) subdeviceControlFilter_DISPATCH(pSubdevice, pCallContext, pParams) +#define subdeviceCtrlCmdGpuGetCachedInfo(pSubdevice, pGpuInfoParams) subdeviceCtrlCmdGpuGetCachedInfo_DISPATCH(pSubdevice, pGpuInfoParams) +#define subdeviceCtrlCmdGpuGetInfoV2(pSubdevice, pGpuInfoParams) subdeviceCtrlCmdGpuGetInfoV2_DISPATCH(pSubdevice, pGpuInfoParams) +#define subdeviceCtrlCmdGpuGetIpVersion(pSubdevice, pGpuIpVersionParams) subdeviceCtrlCmdGpuGetIpVersion_DISPATCH(pSubdevice, pGpuIpVersionParams) +#define subdeviceCtrlCmdGpuSetOptimusInfo(pSubdevice, pGpuOptimusInfoParams) subdeviceCtrlCmdGpuSetOptimusInfo_DISPATCH(pSubdevice, pGpuOptimusInfoParams) +#define subdeviceCtrlCmdGpuGetNameString(pSubdevice, pNameStringParams) subdeviceCtrlCmdGpuGetNameString_DISPATCH(pSubdevice, pNameStringParams) +#define subdeviceCtrlCmdGpuGetShortNameString(pSubdevice, pShortNameStringParams) subdeviceCtrlCmdGpuGetShortNameString_DISPATCH(pSubdevice, pShortNameStringParams) +#define subdeviceCtrlCmdGpuGetSdm(pSubdevice, pSdmParams) subdeviceCtrlCmdGpuGetSdm_DISPATCH(pSubdevice, pSdmParams) +#define subdeviceCtrlCmdGpuSetSdm(pSubdevice, pSdmParams) subdeviceCtrlCmdGpuSetSdm_DISPATCH(pSubdevice, pSdmParams) +#define subdeviceCtrlCmdGpuGetSimulationInfo(pSubdevice, pGpuSimulationInfoParams) subdeviceCtrlCmdGpuGetSimulationInfo_DISPATCH(pSubdevice, pGpuSimulationInfoParams) +#define subdeviceCtrlCmdGpuGetEngines(pSubdevice, pParams) subdeviceCtrlCmdGpuGetEngines_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetEnginesV2(pSubdevice, pEngineParams) subdeviceCtrlCmdGpuGetEnginesV2_DISPATCH(pSubdevice, pEngineParams) +#define subdeviceCtrlCmdGpuGetEngineClasslist(pSubdevice, pClassParams) subdeviceCtrlCmdGpuGetEngineClasslist_DISPATCH(pSubdevice, pClassParams) +#define subdeviceCtrlCmdGpuGetEnginePartnerList(pSubdevice, pPartnerListParams) subdeviceCtrlCmdGpuGetEnginePartnerList_DISPATCH(pSubdevice, pPartnerListParams) +#define subdeviceCtrlCmdGpuQueryMode(pSubdevice, pQueryMode) subdeviceCtrlCmdGpuQueryMode_DISPATCH(pSubdevice, pQueryMode) +#define subdeviceCtrlCmdGpuGetOEMBoardInfo(pSubdevice, pBoardInfo) subdeviceCtrlCmdGpuGetOEMBoardInfo_DISPATCH(pSubdevice, pBoardInfo) +#define subdeviceCtrlCmdGpuGetOEMInfo(pSubdevice, pOemInfo) subdeviceCtrlCmdGpuGetOEMInfo_DISPATCH(pSubdevice, pOemInfo) +#define subdeviceCtrlCmdGpuHandleGpuSR(pSubdevice) subdeviceCtrlCmdGpuHandleGpuSR_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdGpuInitializeCtx(pSubdevice, pInitializeCtxParams) subdeviceCtrlCmdGpuInitializeCtx_DISPATCH(pSubdevice, pInitializeCtxParams) +#define subdeviceCtrlCmdGpuPromoteCtx(pSubdevice, pPromoteCtxParams) subdeviceCtrlCmdGpuPromoteCtx_DISPATCH(pSubdevice, pPromoteCtxParams) +#define subdeviceCtrlCmdGpuEvictCtx(pSubdevice, pEvictCtxParams) subdeviceCtrlCmdGpuEvictCtx_DISPATCH(pSubdevice, pEvictCtxParams) +#define subdeviceCtrlCmdGpuGetId(pSubdevice, pIdParams) subdeviceCtrlCmdGpuGetId_DISPATCH(pSubdevice, pIdParams) +#define subdeviceCtrlCmdGpuGetGidInfo(pSubdevice, pGidInfoParams) subdeviceCtrlCmdGpuGetGidInfo_DISPATCH(pSubdevice, pGidInfoParams) +#define subdeviceCtrlCmdGpuGetPids(pSubdevice, pGetPidsParams) subdeviceCtrlCmdGpuGetPids_DISPATCH(pSubdevice, pGetPidsParams) +#define subdeviceCtrlCmdGpuGetPidInfo(pSubdevice, pGetPidInfoParams) subdeviceCtrlCmdGpuGetPidInfo_DISPATCH(pSubdevice, pGetPidInfoParams) +#define subdeviceCtrlCmdGpuQueryFunctionStatus(pSubdevice, pParams) subdeviceCtrlCmdGpuQueryFunctionStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetMaxSupportedPageSize(pSubdevice, pParams) subdeviceCtrlCmdGpuGetMaxSupportedPageSize_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdValidateMemMapRequest(pSubdevice, pParams) subdeviceCtrlCmdValidateMemMapRequest_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetEngineLoadTimes(pSubdevice, pParams) subdeviceCtrlCmdGpuGetEngineLoadTimes_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdEventSetTrigger(pSubdevice) subdeviceCtrlCmdEventSetTrigger_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdEventSetTriggerFifo(pSubdevice, pTriggerFifoParams) subdeviceCtrlCmdEventSetTriggerFifo_DISPATCH(pSubdevice, pTriggerFifoParams) +#define subdeviceCtrlCmdEventSetNotification(pSubdevice, pSetEventParams) subdeviceCtrlCmdEventSetNotification_DISPATCH(pSubdevice, pSetEventParams) +#define subdeviceCtrlCmdEventSetMemoryNotifies(pSubdevice, pSetMemoryNotifiesParams) subdeviceCtrlCmdEventSetMemoryNotifies_DISPATCH(pSubdevice, pSetMemoryNotifiesParams) +#define subdeviceCtrlCmdEventSetSemaphoreMemory(pSubdevice, pSetSemMemoryParams) subdeviceCtrlCmdEventSetSemaphoreMemory_DISPATCH(pSubdevice, pSetSemMemoryParams) +#define subdeviceCtrlCmdEventSetSemaMemValidation(pSubdevice, pSetSemaMemValidationParams) subdeviceCtrlCmdEventSetSemaMemValidation_DISPATCH(pSubdevice, pSetSemaMemValidationParams) +#define subdeviceCtrlCmdTimerCancel(pSubdevice) subdeviceCtrlCmdTimerCancel_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdTimerSchedule(pSubdevice, pParams) subdeviceCtrlCmdTimerSchedule_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdTimerGetTime(pSubdevice, pParams) subdeviceCtrlCmdTimerGetTime_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdTimerGetRegisterOffset(pSubdevice, pTimerRegOffsetParams) subdeviceCtrlCmdTimerGetRegisterOffset_DISPATCH(pSubdevice, pTimerRegOffsetParams) +#define subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo(pSubdevice, pParams) subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdEccGetClientExposedCounters(pSubdevice, pParams) subdeviceCtrlCmdEccGetClientExposedCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGspGetFeatures(pSubdevice, pGspFeaturesParams) subdeviceCtrlCmdGspGetFeatures_DISPATCH(pSubdevice, pGspFeaturesParams) +#define subdeviceCtrlCmdOsUnixGc6BlockerRefCnt(pSubdevice, pParams) subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdOsUnixAllowDisallowGcoff(pSubdevice, pParams) subdeviceCtrlCmdOsUnixAllowDisallowGcoff_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdOsUnixAudioDynamicPower(pSubdevice, pParams) subdeviceCtrlCmdOsUnixAudioDynamicPower_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplayGetIpVersion(pSubdevice, pParams) subdeviceCtrlCmdDisplayGetIpVersion_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplayGetStaticInfo(pSubdevice, pParams) subdeviceCtrlCmdDisplayGetStaticInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplaySetChannelPushbuffer(pSubdevice, pParams) subdeviceCtrlCmdDisplaySetChannelPushbuffer_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplayWriteInstMem(pSubdevice, pParams) subdeviceCtrlCmdDisplayWriteInstMem_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplaySetupRgLineIntr(pSubdevice, pParams) subdeviceCtrlCmdDisplaySetupRgLineIntr_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplaySetImportedImpData(pSubdevice, pParams) subdeviceCtrlCmdDisplaySetImportedImpData_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplayGetDisplayMask(pSubdevice, pParams) subdeviceCtrlCmdDisplayGetDisplayMask_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGetChipInfo(pSubdevice, pParams) subdeviceCtrlCmdInternalGetChipInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGetUserRegisterAccessMap(pSubdevice, pParams) subdeviceCtrlCmdInternalGetUserRegisterAccessMap_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGetDeviceInfoTable(pSubdevice, pParams) subdeviceCtrlCmdInternalGetDeviceInfoTable_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGetConstructedFalconInfo(pSubdevice, pParams) subdeviceCtrlCmdInternalGetConstructedFalconInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalRecoverAllComputeContexts(pSubdevice) subdeviceCtrlCmdInternalRecoverAllComputeContexts_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalGetSmcMode(pSubdevice, pParams) subdeviceCtrlCmdInternalGetSmcMode_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGetPcieP2pCaps(pSubdevice, pParams) subdeviceCtrlCmdInternalGetPcieP2pCaps_DISPATCH(pSubdevice, pParams) +#define subdeviceShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) subdeviceShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define subdeviceMapTo(pResource, pParams) subdeviceMapTo_DISPATCH(pResource, pParams) +#define subdeviceGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) subdeviceGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define subdeviceCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) subdeviceCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define subdeviceGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) subdeviceGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define subdeviceSetNotificationShare(pNotifier, pNotifShare) subdeviceSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define subdeviceGetRefCount(pResource) subdeviceGetRefCount_DISPATCH(pResource) +#define subdeviceAddAdditionalDependants(pClient, pResource, pReference) subdeviceAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define subdeviceControl_Prologue(pResource, pCallContext, pParams) subdeviceControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define subdeviceGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) subdeviceGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define subdeviceUnmapFrom(pResource, pParams) subdeviceUnmapFrom_DISPATCH(pResource, pParams) +#define subdeviceControl_Epilogue(pResource, pCallContext, pParams) subdeviceControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define subdeviceControlLookup(pResource, pParams, ppEntry) subdeviceControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define subdeviceGetInternalObjectHandle(pGpuResource) subdeviceGetInternalObjectHandle_DISPATCH(pGpuResource) +#define subdeviceControl(pGpuResource, pCallContext, pParams) subdeviceControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define subdeviceUnmap(pGpuResource, pCallContext, pCpuMapping) subdeviceUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define subdeviceGetMemInterMapParams(pRmResource, pParams) subdeviceGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define subdeviceGetMemoryMappingDescriptor(pRmResource, ppMemDesc) subdeviceGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define subdeviceUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) subdeviceUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define subdeviceCanCopy(pResource) subdeviceCanCopy_DISPATCH(pResource) +#define subdeviceGetNotificationListPtr(pNotifier) subdeviceGetNotificationListPtr_DISPATCH(pNotifier) +#define subdeviceGetNotificationShare(pNotifier) subdeviceGetNotificationShare_DISPATCH(pNotifier) +#define subdeviceMap(pGpuResource, pCallContext, pParams, pCpuMapping) subdeviceMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define subdeviceAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) subdeviceAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +void subdevicePreDestruct_IMPL(struct Subdevice *pResource); + +static inline void subdevicePreDestruct_DISPATCH(struct Subdevice *pResource) { + pResource->__subdevicePreDestruct__(pResource); +} + +NV_STATUS subdeviceInternalControlForward_IMPL(struct Subdevice *pSubdevice, NvU32 command, void *pParams, NvU32 size); + +static inline NV_STATUS subdeviceInternalControlForward_DISPATCH(struct Subdevice *pSubdevice, NvU32 command, void *pParams, NvU32 size) { + return pSubdevice->__subdeviceInternalControlForward__(pSubdevice, command, pParams, size); +} + +NV_STATUS subdeviceControlFilter_IMPL(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS subdeviceControlFilter_DISPATCH(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pSubdevice->__subdeviceControlFilter__(pSubdevice, pCallContext, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetCachedInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetCachedInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetCachedInfo__(pSubdevice, pGpuInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetInfoV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetInfoV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetInfoV2__(pSubdevice, pGpuInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetIpVersion_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS *pGpuIpVersionParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetIpVersion_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS *pGpuIpVersionParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetIpVersion__(pSubdevice, pGpuIpVersionParams); +} + +NV_STATUS subdeviceCtrlCmdGpuSetOptimusInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS *pGpuOptimusInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuSetOptimusInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS *pGpuOptimusInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuSetOptimusInfo__(pSubdevice, pGpuOptimusInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetNameString_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS *pNameStringParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetNameString_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS *pNameStringParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetNameString__(pSubdevice, pNameStringParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetShortNameString_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS *pShortNameStringParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetShortNameString_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS *pShortNameStringParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetShortNameString__(pSubdevice, pShortNameStringParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetSdm_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SDM_PARAMS *pSdmParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetSdm_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SDM_PARAMS *pSdmParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetSdm__(pSubdevice, pSdmParams); +} + +NV_STATUS subdeviceCtrlCmdGpuSetSdm_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_SDM_PARAMS *pSdmParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuSetSdm_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_SDM_PARAMS *pSdmParams) { + return pSubdevice->__subdeviceCtrlCmdGpuSetSdm__(pSubdevice, pSdmParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetSimulationInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS *pGpuSimulationInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetSimulationInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS *pGpuSimulationInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetSimulationInfo__(pSubdevice, pGpuSimulationInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetEngines_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEngines_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEngines__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetEnginesV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *pEngineParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEnginesV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *pEngineParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEnginesV2__(pSubdevice, pEngineParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetEngineClasslist_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *pClassParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEngineClasslist_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *pClassParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEngineClasslist__(pSubdevice, pClassParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetEnginePartnerList_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pPartnerListParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEnginePartnerList_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pPartnerListParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEnginePartnerList__(pSubdevice, pPartnerListParams); +} + +NV_STATUS subdeviceCtrlCmdGpuQueryMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_MODE_PARAMS *pQueryMode); + +static inline NV_STATUS subdeviceCtrlCmdGpuQueryMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_MODE_PARAMS *pQueryMode) { + return pSubdevice->__subdeviceCtrlCmdGpuQueryMode__(pSubdevice, pQueryMode); +} + +NV_STATUS subdeviceCtrlCmdGpuGetOEMBoardInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *pBoardInfo); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetOEMBoardInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *pBoardInfo) { + return pSubdevice->__subdeviceCtrlCmdGpuGetOEMBoardInfo__(pSubdevice, pBoardInfo); +} + +NV_STATUS subdeviceCtrlCmdGpuGetOEMInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS *pOemInfo); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetOEMInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS *pOemInfo) { + return pSubdevice->__subdeviceCtrlCmdGpuGetOEMInfo__(pSubdevice, pOemInfo); +} + +NV_STATUS subdeviceCtrlCmdGpuHandleGpuSR_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdGpuHandleGpuSR_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdGpuHandleGpuSR__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdGpuInitializeCtx_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *pInitializeCtxParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuInitializeCtx_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *pInitializeCtxParams) { + return pSubdevice->__subdeviceCtrlCmdGpuInitializeCtx__(pSubdevice, pInitializeCtxParams); +} + +NV_STATUS subdeviceCtrlCmdGpuPromoteCtx_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *pPromoteCtxParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuPromoteCtx_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *pPromoteCtxParams) { + return pSubdevice->__subdeviceCtrlCmdGpuPromoteCtx__(pSubdevice, pPromoteCtxParams); +} + +NV_STATUS subdeviceCtrlCmdGpuEvictCtx_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_EVICT_CTX_PARAMS *pEvictCtxParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuEvictCtx_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_EVICT_CTX_PARAMS *pEvictCtxParams) { + return pSubdevice->__subdeviceCtrlCmdGpuEvictCtx__(pSubdevice, pEvictCtxParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetId_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ID_PARAMS *pIdParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetId_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ID_PARAMS *pIdParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetId__(pSubdevice, pIdParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetGidInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *pGidInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetGidInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *pGidInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetGidInfo__(pSubdevice, pGidInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetPids_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PIDS_PARAMS *pGetPidsParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetPids_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PIDS_PARAMS *pGetPidsParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetPids__(pSubdevice, pGetPidsParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetPidInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PID_INFO_PARAMS *pGetPidInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetPidInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PID_INFO_PARAMS *pGetPidInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetPidInfo__(pSubdevice, pGetPidInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuQueryFunctionStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuQueryFunctionStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuQueryFunctionStatus__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetMaxSupportedPageSize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetMaxSupportedPageSize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetMaxSupportedPageSize__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdValidateMemMapRequest_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdValidateMemMapRequest_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdValidateMemMapRequest__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetEngineLoadTimes_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEngineLoadTimes_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEngineLoadTimes__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdEventSetTrigger_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdEventSetTrigger_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdEventSetTrigger__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdEventSetTriggerFifo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS *pTriggerFifoParams); + +static inline NV_STATUS subdeviceCtrlCmdEventSetTriggerFifo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS *pTriggerFifoParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetTriggerFifo__(pSubdevice, pTriggerFifoParams); +} + +NV_STATUS subdeviceCtrlCmdEventSetNotification_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams); + +static inline NV_STATUS subdeviceCtrlCmdEventSetNotification_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetNotification__(pSubdevice, pSetEventParams); +} + +NV_STATUS subdeviceCtrlCmdEventSetMemoryNotifies_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *pSetMemoryNotifiesParams); + +static inline NV_STATUS subdeviceCtrlCmdEventSetMemoryNotifies_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *pSetMemoryNotifiesParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetMemoryNotifies__(pSubdevice, pSetMemoryNotifiesParams); +} + +NV_STATUS subdeviceCtrlCmdEventSetSemaphoreMemory_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS *pSetSemMemoryParams); + +static inline NV_STATUS subdeviceCtrlCmdEventSetSemaphoreMemory_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS *pSetSemMemoryParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetSemaphoreMemory__(pSubdevice, pSetSemMemoryParams); +} + +NV_STATUS subdeviceCtrlCmdEventSetSemaMemValidation_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS *pSetSemaMemValidationParams); + +static inline NV_STATUS subdeviceCtrlCmdEventSetSemaMemValidation_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS *pSetSemaMemValidationParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetSemaMemValidation__(pSubdevice, pSetSemaMemValidationParams); +} + +NV_STATUS subdeviceCtrlCmdTimerCancel_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdTimerCancel_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdTimerCancel__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdTimerSchedule_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdTimerSchedule_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdTimerSchedule__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdTimerGetTime_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_TIME_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdTimerGetTime_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_TIME_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdTimerGetTime__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdTimerGetRegisterOffset_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS *pTimerRegOffsetParams); + +static inline NV_STATUS subdeviceCtrlCmdTimerGetRegisterOffset_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS *pTimerRegOffsetParams) { + return pSubdevice->__subdeviceCtrlCmdTimerGetRegisterOffset__(pSubdevice, pTimerRegOffsetParams); +} + +NV_STATUS subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdEccGetClientExposedCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdEccGetClientExposedCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdEccGetClientExposedCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGspGetFeatures_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GSP_GET_FEATURES_PARAMS *pGspFeaturesParams); + +static inline NV_STATUS subdeviceCtrlCmdGspGetFeatures_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GSP_GET_FEATURES_PARAMS *pGspFeaturesParams) { + return pSubdevice->__subdeviceCtrlCmdGspGetFeatures__(pSubdevice, pGspFeaturesParams); +} + +NV_STATUS subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdOsUnixGc6BlockerRefCnt__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdOsUnixAllowDisallowGcoff_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdOsUnixAllowDisallowGcoff__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdOsUnixAudioDynamicPower_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdOsUnixAudioDynamicPower__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplayGetIpVersion_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplayGetIpVersion_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplayGetIpVersion__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplayGetStaticInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplayGetStaticInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplayGetStaticInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplaySetChannelPushbuffer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplaySetChannelPushbuffer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplaySetChannelPushbuffer__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplayWriteInstMem_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplayWriteInstMem_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplayWriteInstMem__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplaySetupRgLineIntr_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplaySetupRgLineIntr_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplaySetupRgLineIntr__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplaySetImportedImpData_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplaySetImportedImpData_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplaySetImportedImpData__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplayGetDisplayMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplayGetDisplayMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplayGetDisplayMask__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalGetChipInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGetChipInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetChipInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalGetUserRegisterAccessMap_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGetUserRegisterAccessMap_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetUserRegisterAccessMap__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalGetDeviceInfoTable_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGetDeviceInfoTable_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetDeviceInfoTable__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalGetConstructedFalconInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGetConstructedFalconInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetConstructedFalconInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalRecoverAllComputeContexts_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdInternalRecoverAllComputeContexts_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalRecoverAllComputeContexts__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdInternalGetSmcMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGetSmcMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetSmcMode__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalGetPcieP2pCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGetPcieP2pCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetPcieP2pCaps__(pSubdevice, pParams); +} + +static inline NvBool subdeviceShareCallback_DISPATCH(struct Subdevice *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__subdeviceShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS subdeviceMapTo_DISPATCH(struct Subdevice *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__subdeviceMapTo__(pResource, pParams); +} + +static inline NV_STATUS subdeviceGetOrAllocNotifShare_DISPATCH(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__subdeviceGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS subdeviceCheckMemInterUnmap_DISPATCH(struct Subdevice *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__subdeviceCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS subdeviceGetMapAddrSpace_DISPATCH(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__subdeviceGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void subdeviceSetNotificationShare_DISPATCH(struct Subdevice *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__subdeviceSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 subdeviceGetRefCount_DISPATCH(struct Subdevice *pResource) { + return pResource->__subdeviceGetRefCount__(pResource); +} + +static inline void subdeviceAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Subdevice *pResource, RsResourceRef *pReference) { + pResource->__subdeviceAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS subdeviceControl_Prologue_DISPATCH(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__subdeviceControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS subdeviceGetRegBaseOffsetAndSize_DISPATCH(struct Subdevice *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__subdeviceGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS subdeviceUnmapFrom_DISPATCH(struct Subdevice *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__subdeviceUnmapFrom__(pResource, pParams); +} + +static inline void subdeviceControl_Epilogue_DISPATCH(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__subdeviceControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS subdeviceControlLookup_DISPATCH(struct Subdevice *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__subdeviceControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle subdeviceGetInternalObjectHandle_DISPATCH(struct Subdevice *pGpuResource) { + return pGpuResource->__subdeviceGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS subdeviceControl_DISPATCH(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__subdeviceControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS subdeviceUnmap_DISPATCH(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__subdeviceUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS subdeviceGetMemInterMapParams_DISPATCH(struct Subdevice *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__subdeviceGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS subdeviceGetMemoryMappingDescriptor_DISPATCH(struct Subdevice *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__subdeviceGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS subdeviceUnregisterEvent_DISPATCH(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__subdeviceUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool subdeviceCanCopy_DISPATCH(struct Subdevice *pResource) { + return pResource->__subdeviceCanCopy__(pResource); +} + +static inline PEVENTNOTIFICATION *subdeviceGetNotificationListPtr_DISPATCH(struct Subdevice *pNotifier) { + return pNotifier->__subdeviceGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *subdeviceGetNotificationShare_DISPATCH(struct Subdevice *pNotifier) { + return pNotifier->__subdeviceGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS subdeviceMap_DISPATCH(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__subdeviceMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool subdeviceAccessCallback_DISPATCH(struct Subdevice *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__subdeviceAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS subdeviceSetPerfmonReservation(struct Subdevice *pSubdevice, NvBool bReservation, NvBool bClientHandlesGrGating, NvBool bRmHandlesIdleSlow) { + return NV_OK; +} + +static inline NV_STATUS subdeviceResetTGP(struct Subdevice *pSubdevice) { + return NV_OK; +} + +static inline NV_STATUS subdeviceReleaseVideoStreams(struct Subdevice *pSubdevice) { + return NV_OK; +} + +static inline void subdeviceRestoreLockedClock(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) { + return; +} + +static inline void subdeviceReleaseNvlinkErrorInjectionMode(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) { + return; +} + +static inline void subdeviceRestoreGrTickFreq(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) { + return; +} + +static inline void subdeviceRestoreWatchdog(struct Subdevice *pSubdevice) { + return; +} + +NV_STATUS subdeviceConstruct_IMPL(struct Subdevice *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_subdeviceConstruct(arg_pResource, arg_pCallContext, arg_pParams) subdeviceConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void subdeviceDestruct_IMPL(struct Subdevice *pResource); +#define __nvoc_subdeviceDestruct(pResource) subdeviceDestruct_IMPL(pResource) +void subdeviceUnsetGpuDebugMode_IMPL(struct Subdevice *pSubdevice); +#ifdef __nvoc_subdevice_h_disabled +static inline void subdeviceUnsetGpuDebugMode(struct Subdevice *pSubdevice) { + NV_ASSERT_FAILED_PRECOMP("Subdevice was disabled!"); +} +#else //__nvoc_subdevice_h_disabled +#define subdeviceUnsetGpuDebugMode(pSubdevice) subdeviceUnsetGpuDebugMode_IMPL(pSubdevice) +#endif //__nvoc_subdevice_h_disabled + +void subdeviceReleaseComputeModeReservation_IMPL(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext); +#ifdef __nvoc_subdevice_h_disabled +static inline void subdeviceReleaseComputeModeReservation(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) { + NV_ASSERT_FAILED_PRECOMP("Subdevice was disabled!"); +} +#else //__nvoc_subdevice_h_disabled +#define subdeviceReleaseComputeModeReservation(pSubdevice, pCallContext) subdeviceReleaseComputeModeReservation_IMPL(pSubdevice, pCallContext) +#endif //__nvoc_subdevice_h_disabled + +NV_STATUS subdeviceGetByHandle_IMPL(struct RsClient *pClient, NvHandle hSubdevice, struct Subdevice **ppSubdevice); +#define subdeviceGetByHandle(pClient, hSubdevice, ppSubdevice) subdeviceGetByHandle_IMPL(pClient, hSubdevice, ppSubdevice) +NV_STATUS subdeviceGetByGpu_IMPL(struct RsClient *pClient, struct OBJGPU *pGpu, struct Subdevice **ppSubdevice); +#define subdeviceGetByGpu(pClient, pGpu, ppSubdevice) subdeviceGetByGpu_IMPL(pClient, pGpu, ppSubdevice) +NV_STATUS subdeviceGetByInstance_IMPL(struct RsClient *pClient, NvHandle hDevice, NvU32 subDeviceInst, struct Subdevice **ppSubdevice); +#define subdeviceGetByInstance(pClient, hDevice, subDeviceInst, ppSubdevice) subdeviceGetByInstance_IMPL(pClient, hDevice, subDeviceInst, ppSubdevice) +#undef PRIVATE_FIELD + + +// **************************************************************************** +// Deprecated Definitions +// **************************************************************************** + +/** + * WARNING: This function is deprecated! Please use subdeviceGetByGpu and + * GPU_RES_SET_THREAD_BC_STATE (if needed to set thread UC state for SLI) + */ +struct Subdevice *CliGetSubDeviceInfoFromGpu(NvHandle, struct OBJGPU*); + +/** + * WARNING: This function is deprecated! Please use subdeviceGetByGpu and + * RES_GET_HANDLE + */ +NV_STATUS CliGetSubDeviceHandleFromGpu(NvHandle, struct OBJGPU*, NvHandle *); + +/** + * WARNING: This function is deprecated and use is *strongly* discouraged + * (especially for new code!) + * + * From the function name (CliSetSubDeviceContext) it appears as a simple + * accessor but violates expectations by modifying the SLI BC threadstate (calls + * to GPU_RES_SET_THREAD_BC_STATE). This can be dangerous if not carefully + * managed by the caller. + * + * Instead of using this routine, please use subdeviceGetByHandle then call + * GPU_RES_GET_GPU, RES_GET_HANDLE, GPU_RES_SET_THREAD_BC_STATE as needed. + * + * Note that GPU_RES_GET_GPU supports returning a pGpu for both pDevice, + * pSubdevice, the base pResource type, and any resource that inherits from + * GpuResource. That is, instead of using CliSetGpuContext or + * CliSetSubDeviceContext, please use following pattern to look up the pGpu: + * + * OBJGPU *pGpu = GPU_RES_GET_GPU(pResource or pResourceRef->pResource) + * + * To set the threadstate, please use: + * + * GPU_RES_SET_THREAD_BC_STATE(pResource or pResourceRef->pResource); + */ +NV_STATUS CliSetSubDeviceContext(NvHandle hClient, NvHandle hSubdevice, NvHandle *phDevice, + struct OBJGPU **ppGpu); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_SUBDEVICE_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.c new file mode 100644 index 0000000..e087c20 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.c @@ -0,0 +1,323 @@ +#define NVOC_SYNCPOINT_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_syncpoint_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x529def = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SyncpointMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +void __nvoc_init_SyncpointMemory(SyncpointMemory*); +void __nvoc_init_funcTable_SyncpointMemory(SyncpointMemory*); +NV_STATUS __nvoc_ctor_SyncpointMemory(SyncpointMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_SyncpointMemory(SyncpointMemory*); +void __nvoc_dtor_SyncpointMemory(SyncpointMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_SyncpointMemory; + +static const struct NVOC_RTTI __nvoc_rtti_SyncpointMemory_SyncpointMemory = { + /*pClassDef=*/ &__nvoc_class_def_SyncpointMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_SyncpointMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_SyncpointMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SyncpointMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SyncpointMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SyncpointMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SyncpointMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SyncpointMemory, __nvoc_base_Memory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_SyncpointMemory = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_SyncpointMemory_SyncpointMemory, + &__nvoc_rtti_SyncpointMemory_Memory, + &__nvoc_rtti_SyncpointMemory_RmResource, + &__nvoc_rtti_SyncpointMemory_RmResourceCommon, + &__nvoc_rtti_SyncpointMemory_RsResource, + &__nvoc_rtti_SyncpointMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_SyncpointMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(SyncpointMemory), + /*classId=*/ classId(SyncpointMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "SyncpointMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_SyncpointMemory, + /*pCastInfo=*/ &__nvoc_castinfo_SyncpointMemory, + /*pExportInfo=*/ &__nvoc_export_info_SyncpointMemory +}; + +static NvBool __nvoc_thunk_SyncpointMemory_resCanCopy(struct RsResource *pSyncpointMemory) { + return syncpointCanCopy((struct SyncpointMemory *)(((unsigned char *)pSyncpointMemory) - __nvoc_rtti_SyncpointMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_syncpointCheckMemInterUnmap(struct SyncpointMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SyncpointMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_syncpointControl(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SyncpointMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_syncpointUnmap(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SyncpointMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_syncpointGetMemInterMapParams(struct SyncpointMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SyncpointMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_syncpointGetMemoryMappingDescriptor(struct SyncpointMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SyncpointMemory_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_syncpointGetMapAddrSpace(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SyncpointMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_syncpointShareCallback(struct SyncpointMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncpointMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_syncpointControlFilter(struct SyncpointMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncpointMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_syncpointAddAdditionalDependants(struct RsClient *pClient, struct SyncpointMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncpointMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_syncpointGetRefCount(struct SyncpointMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncpointMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_syncpointMapTo(struct SyncpointMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncpointMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_syncpointControl_Prologue(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncpointMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_syncpointIsReady(struct SyncpointMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SyncpointMemory_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_syncpointCheckCopyPermissions(struct SyncpointMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SyncpointMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_syncpointPreDestruct(struct SyncpointMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncpointMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_syncpointUnmapFrom(struct SyncpointMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncpointMemory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_syncpointControl_Epilogue(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncpointMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_syncpointControlLookup(struct SyncpointMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncpointMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_syncpointMap(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SyncpointMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_syncpointAccessCallback(struct SyncpointMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncpointMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_SyncpointMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_SyncpointMemory(SyncpointMemory *pThis) { + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_SyncpointMemory(SyncpointMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_SyncpointMemory(SyncpointMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_SyncpointMemory_fail_Memory; + __nvoc_init_dataField_SyncpointMemory(pThis); + + status = __nvoc_syncpointConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_SyncpointMemory_fail__init; + goto __nvoc_ctor_SyncpointMemory_exit; // Success + +__nvoc_ctor_SyncpointMemory_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_SyncpointMemory_fail_Memory: +__nvoc_ctor_SyncpointMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_SyncpointMemory_1(SyncpointMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__syncpointCanCopy__ = &syncpointCanCopy_IMPL; + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_SyncpointMemory_resCanCopy; + + pThis->__syncpointCheckMemInterUnmap__ = &__nvoc_thunk_Memory_syncpointCheckMemInterUnmap; + + pThis->__syncpointControl__ = &__nvoc_thunk_Memory_syncpointControl; + + pThis->__syncpointUnmap__ = &__nvoc_thunk_Memory_syncpointUnmap; + + pThis->__syncpointGetMemInterMapParams__ = &__nvoc_thunk_Memory_syncpointGetMemInterMapParams; + + pThis->__syncpointGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_syncpointGetMemoryMappingDescriptor; + + pThis->__syncpointGetMapAddrSpace__ = &__nvoc_thunk_Memory_syncpointGetMapAddrSpace; + + pThis->__syncpointShareCallback__ = &__nvoc_thunk_RmResource_syncpointShareCallback; + + pThis->__syncpointControlFilter__ = &__nvoc_thunk_RsResource_syncpointControlFilter; + + pThis->__syncpointAddAdditionalDependants__ = &__nvoc_thunk_RsResource_syncpointAddAdditionalDependants; + + pThis->__syncpointGetRefCount__ = &__nvoc_thunk_RsResource_syncpointGetRefCount; + + pThis->__syncpointMapTo__ = &__nvoc_thunk_RsResource_syncpointMapTo; + + pThis->__syncpointControl_Prologue__ = &__nvoc_thunk_RmResource_syncpointControl_Prologue; + + pThis->__syncpointIsReady__ = &__nvoc_thunk_Memory_syncpointIsReady; + + pThis->__syncpointCheckCopyPermissions__ = &__nvoc_thunk_Memory_syncpointCheckCopyPermissions; + + pThis->__syncpointPreDestruct__ = &__nvoc_thunk_RsResource_syncpointPreDestruct; + + pThis->__syncpointUnmapFrom__ = &__nvoc_thunk_RsResource_syncpointUnmapFrom; + + pThis->__syncpointControl_Epilogue__ = &__nvoc_thunk_RmResource_syncpointControl_Epilogue; + + pThis->__syncpointControlLookup__ = &__nvoc_thunk_RsResource_syncpointControlLookup; + + pThis->__syncpointMap__ = &__nvoc_thunk_Memory_syncpointMap; + + pThis->__syncpointAccessCallback__ = &__nvoc_thunk_RmResource_syncpointAccessCallback; +} + +void __nvoc_init_funcTable_SyncpointMemory(SyncpointMemory *pThis) { + __nvoc_init_funcTable_SyncpointMemory_1(pThis); +} + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_SyncpointMemory(SyncpointMemory *pThis) { + pThis->__nvoc_pbase_SyncpointMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; + __nvoc_init_Memory(&pThis->__nvoc_base_Memory); + __nvoc_init_funcTable_SyncpointMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_SyncpointMemory(SyncpointMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + SyncpointMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(SyncpointMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(SyncpointMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_SyncpointMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_SyncpointMemory(pThis); + status = __nvoc_ctor_SyncpointMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_SyncpointMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_SyncpointMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_SyncpointMemory(SyncpointMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_SyncpointMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.h new file mode 100644 index 0000000..b2f6cb5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.h @@ -0,0 +1,224 @@ +#ifndef _G_SYNCPOINT_MEM_NVOC_H_ +#define _G_SYNCPOINT_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_syncpoint_mem_nvoc.h" + +#ifndef _SYNCPOINT_MEMORY_H_ +#define _SYNCPOINT_MEMORY_H_ + +#include "mem_mgr/mem.h" + +/*! + * Bind memory allocated through os descriptor + */ +#ifdef NVOC_SYNCPOINT_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct SyncpointMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct Memory __nvoc_base_Memory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct SyncpointMemory *__nvoc_pbase_SyncpointMemory; + NvBool (*__syncpointCanCopy__)(struct SyncpointMemory *); + NV_STATUS (*__syncpointCheckMemInterUnmap__)(struct SyncpointMemory *, NvBool); + NV_STATUS (*__syncpointControl__)(struct SyncpointMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__syncpointUnmap__)(struct SyncpointMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__syncpointGetMemInterMapParams__)(struct SyncpointMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__syncpointGetMemoryMappingDescriptor__)(struct SyncpointMemory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__syncpointGetMapAddrSpace__)(struct SyncpointMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__syncpointShareCallback__)(struct SyncpointMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__syncpointControlFilter__)(struct SyncpointMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__syncpointAddAdditionalDependants__)(struct RsClient *, struct SyncpointMemory *, RsResourceRef *); + NvU32 (*__syncpointGetRefCount__)(struct SyncpointMemory *); + NV_STATUS (*__syncpointMapTo__)(struct SyncpointMemory *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__syncpointControl_Prologue__)(struct SyncpointMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__syncpointIsReady__)(struct SyncpointMemory *); + NV_STATUS (*__syncpointCheckCopyPermissions__)(struct SyncpointMemory *, struct OBJGPU *, NvHandle); + void (*__syncpointPreDestruct__)(struct SyncpointMemory *); + NV_STATUS (*__syncpointUnmapFrom__)(struct SyncpointMemory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__syncpointControl_Epilogue__)(struct SyncpointMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__syncpointControlLookup__)(struct SyncpointMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__syncpointMap__)(struct SyncpointMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__syncpointAccessCallback__)(struct SyncpointMemory *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_SyncpointMemory_TYPEDEF__ +#define __NVOC_CLASS_SyncpointMemory_TYPEDEF__ +typedef struct SyncpointMemory SyncpointMemory; +#endif /* __NVOC_CLASS_SyncpointMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SyncpointMemory +#define __nvoc_class_id_SyncpointMemory 0x529def +#endif /* __nvoc_class_id_SyncpointMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SyncpointMemory; + +#define __staticCast_SyncpointMemory(pThis) \ + ((pThis)->__nvoc_pbase_SyncpointMemory) + +#ifdef __nvoc_syncpoint_mem_h_disabled +#define __dynamicCast_SyncpointMemory(pThis) ((SyncpointMemory*)NULL) +#else //__nvoc_syncpoint_mem_h_disabled +#define __dynamicCast_SyncpointMemory(pThis) \ + ((SyncpointMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(SyncpointMemory))) +#endif //__nvoc_syncpoint_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_SyncpointMemory(SyncpointMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_SyncpointMemory(SyncpointMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_SyncpointMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_SyncpointMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define syncpointCanCopy(pSyncpointMemory) syncpointCanCopy_DISPATCH(pSyncpointMemory) +#define syncpointCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) syncpointCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define syncpointControl(pMemory, pCallContext, pParams) syncpointControl_DISPATCH(pMemory, pCallContext, pParams) +#define syncpointUnmap(pMemory, pCallContext, pCpuMapping) syncpointUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define syncpointGetMemInterMapParams(pMemory, pParams) syncpointGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define syncpointGetMemoryMappingDescriptor(pMemory, ppMemDesc) syncpointGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define syncpointGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) syncpointGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define syncpointShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) syncpointShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define syncpointControlFilter(pResource, pCallContext, pParams) syncpointControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define syncpointAddAdditionalDependants(pClient, pResource, pReference) syncpointAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define syncpointGetRefCount(pResource) syncpointGetRefCount_DISPATCH(pResource) +#define syncpointMapTo(pResource, pParams) syncpointMapTo_DISPATCH(pResource, pParams) +#define syncpointControl_Prologue(pResource, pCallContext, pParams) syncpointControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define syncpointIsReady(pMemory) syncpointIsReady_DISPATCH(pMemory) +#define syncpointCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) syncpointCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define syncpointPreDestruct(pResource) syncpointPreDestruct_DISPATCH(pResource) +#define syncpointUnmapFrom(pResource, pParams) syncpointUnmapFrom_DISPATCH(pResource, pParams) +#define syncpointControl_Epilogue(pResource, pCallContext, pParams) syncpointControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define syncpointControlLookup(pResource, pParams, ppEntry) syncpointControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define syncpointMap(pMemory, pCallContext, pParams, pCpuMapping) syncpointMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define syncpointAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) syncpointAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool syncpointCanCopy_IMPL(struct SyncpointMemory *pSyncpointMemory); + +static inline NvBool syncpointCanCopy_DISPATCH(struct SyncpointMemory *pSyncpointMemory) { + return pSyncpointMemory->__syncpointCanCopy__(pSyncpointMemory); +} + +static inline NV_STATUS syncpointCheckMemInterUnmap_DISPATCH(struct SyncpointMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__syncpointCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS syncpointControl_DISPATCH(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__syncpointControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS syncpointUnmap_DISPATCH(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__syncpointUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS syncpointGetMemInterMapParams_DISPATCH(struct SyncpointMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__syncpointGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS syncpointGetMemoryMappingDescriptor_DISPATCH(struct SyncpointMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__syncpointGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS syncpointGetMapAddrSpace_DISPATCH(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__syncpointGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool syncpointShareCallback_DISPATCH(struct SyncpointMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__syncpointShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS syncpointControlFilter_DISPATCH(struct SyncpointMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__syncpointControlFilter__(pResource, pCallContext, pParams); +} + +static inline void syncpointAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct SyncpointMemory *pResource, RsResourceRef *pReference) { + pResource->__syncpointAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 syncpointGetRefCount_DISPATCH(struct SyncpointMemory *pResource) { + return pResource->__syncpointGetRefCount__(pResource); +} + +static inline NV_STATUS syncpointMapTo_DISPATCH(struct SyncpointMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__syncpointMapTo__(pResource, pParams); +} + +static inline NV_STATUS syncpointControl_Prologue_DISPATCH(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__syncpointControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS syncpointIsReady_DISPATCH(struct SyncpointMemory *pMemory) { + return pMemory->__syncpointIsReady__(pMemory); +} + +static inline NV_STATUS syncpointCheckCopyPermissions_DISPATCH(struct SyncpointMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__syncpointCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void syncpointPreDestruct_DISPATCH(struct SyncpointMemory *pResource) { + pResource->__syncpointPreDestruct__(pResource); +} + +static inline NV_STATUS syncpointUnmapFrom_DISPATCH(struct SyncpointMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__syncpointUnmapFrom__(pResource, pParams); +} + +static inline void syncpointControl_Epilogue_DISPATCH(struct SyncpointMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__syncpointControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS syncpointControlLookup_DISPATCH(struct SyncpointMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__syncpointControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS syncpointMap_DISPATCH(struct SyncpointMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__syncpointMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool syncpointAccessCallback_DISPATCH(struct SyncpointMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__syncpointAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS syncpointConstruct_IMPL(struct SyncpointMemory *arg_pSyncpointMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_syncpointConstruct(arg_pSyncpointMemory, arg_pCallContext, arg_pParams) syncpointConstruct_IMPL(arg_pSyncpointMemory, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_SYNCPOINT_MEM_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.c new file mode 100644 index 0000000..ddfa4c0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.c @@ -0,0 +1,378 @@ +#define NVOC_SYSTEM_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_system_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x007a98 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SystemMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory; + +void __nvoc_init_SystemMemory(SystemMemory*); +void __nvoc_init_funcTable_SystemMemory(SystemMemory*); +NV_STATUS __nvoc_ctor_SystemMemory(SystemMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_SystemMemory(SystemMemory*); +void __nvoc_dtor_SystemMemory(SystemMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_SystemMemory; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_SystemMemory = { + /*pClassDef=*/ &__nvoc_class_def_SystemMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_SystemMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_StandardMemory = { + /*pClassDef=*/ &__nvoc_class_def_StandardMemory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_SystemMemory = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_SystemMemory_SystemMemory, + &__nvoc_rtti_SystemMemory_StandardMemory, + &__nvoc_rtti_SystemMemory_Memory, + &__nvoc_rtti_SystemMemory_RmResource, + &__nvoc_rtti_SystemMemory_RmResourceCommon, + &__nvoc_rtti_SystemMemory_RsResource, + &__nvoc_rtti_SystemMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_SystemMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(SystemMemory), + /*classId=*/ classId(SystemMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "SystemMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_SystemMemory, + /*pCastInfo=*/ &__nvoc_castinfo_SystemMemory, + /*pExportInfo=*/ &__nvoc_export_info_SystemMemory +}; + +static NV_STATUS __nvoc_thunk_Memory_sysmemCheckMemInterUnmap(struct SystemMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemControl(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemUnmap(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemGetMemInterMapParams(struct SystemMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemGetMemoryMappingDescriptor(struct SystemMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemGetMapAddrSpace(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_sysmemShareCallback(struct SystemMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_sysmemControlFilter(struct SystemMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_sysmemAddAdditionalDependants(struct RsClient *pClient, struct SystemMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_sysmemGetRefCount(struct SystemMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_sysmemMapTo(struct SystemMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_sysmemControl_Prologue(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_StandardMemory_sysmemCanCopy(struct SystemMemory *pStandardMemory) { + return stdmemCanCopy((struct StandardMemory *)(((unsigned char *)pStandardMemory) + __nvoc_rtti_SystemMemory_StandardMemory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemIsReady(struct SystemMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemCheckCopyPermissions(struct SystemMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_sysmemPreDestruct(struct SystemMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_sysmemUnmapFrom(struct SystemMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_sysmemControl_Epilogue(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_sysmemControlLookup(struct SystemMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemMap(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_sysmemAccessCallback(struct SystemMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_SystemMemory[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) sysmemCtrlCmdGetSurfaceNumPhysPages_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3e0102u, + /*paramSize=*/ sizeof(NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_SystemMemory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "sysmemCtrlCmdGetSurfaceNumPhysPages" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) sysmemCtrlCmdGetSurfacePhysPages_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3e0103u, + /*paramSize=*/ sizeof(NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_SystemMemory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "sysmemCtrlCmdGetSurfacePhysPages" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_SystemMemory = +{ + /*numEntries=*/ 2, + /*pExportEntries=*/ __nvoc_exported_method_def_SystemMemory +}; + +void __nvoc_dtor_StandardMemory(StandardMemory*); +void __nvoc_dtor_SystemMemory(SystemMemory *pThis) { + __nvoc_dtor_StandardMemory(&pThis->__nvoc_base_StandardMemory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_SystemMemory(SystemMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_StandardMemory(StandardMemory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_SystemMemory(SystemMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_StandardMemory(&pThis->__nvoc_base_StandardMemory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_SystemMemory_fail_StandardMemory; + __nvoc_init_dataField_SystemMemory(pThis); + + status = __nvoc_sysmemConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_SystemMemory_fail__init; + goto __nvoc_ctor_SystemMemory_exit; // Success + +__nvoc_ctor_SystemMemory_fail__init: + __nvoc_dtor_StandardMemory(&pThis->__nvoc_base_StandardMemory); +__nvoc_ctor_SystemMemory_fail_StandardMemory: +__nvoc_ctor_SystemMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_SystemMemory_1(SystemMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__sysmemCtrlCmdGetSurfaceNumPhysPages__ = &sysmemCtrlCmdGetSurfaceNumPhysPages_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__sysmemCtrlCmdGetSurfacePhysPages__ = &sysmemCtrlCmdGetSurfacePhysPages_IMPL; +#endif + + pThis->__sysmemCheckMemInterUnmap__ = &__nvoc_thunk_Memory_sysmemCheckMemInterUnmap; + + pThis->__sysmemControl__ = &__nvoc_thunk_Memory_sysmemControl; + + pThis->__sysmemUnmap__ = &__nvoc_thunk_Memory_sysmemUnmap; + + pThis->__sysmemGetMemInterMapParams__ = &__nvoc_thunk_Memory_sysmemGetMemInterMapParams; + + pThis->__sysmemGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_sysmemGetMemoryMappingDescriptor; + + pThis->__sysmemGetMapAddrSpace__ = &__nvoc_thunk_Memory_sysmemGetMapAddrSpace; + + pThis->__sysmemShareCallback__ = &__nvoc_thunk_RmResource_sysmemShareCallback; + + pThis->__sysmemControlFilter__ = &__nvoc_thunk_RsResource_sysmemControlFilter; + + pThis->__sysmemAddAdditionalDependants__ = &__nvoc_thunk_RsResource_sysmemAddAdditionalDependants; + + pThis->__sysmemGetRefCount__ = &__nvoc_thunk_RsResource_sysmemGetRefCount; + + pThis->__sysmemMapTo__ = &__nvoc_thunk_RsResource_sysmemMapTo; + + pThis->__sysmemControl_Prologue__ = &__nvoc_thunk_RmResource_sysmemControl_Prologue; + + pThis->__sysmemCanCopy__ = &__nvoc_thunk_StandardMemory_sysmemCanCopy; + + pThis->__sysmemIsReady__ = &__nvoc_thunk_Memory_sysmemIsReady; + + pThis->__sysmemCheckCopyPermissions__ = &__nvoc_thunk_Memory_sysmemCheckCopyPermissions; + + pThis->__sysmemPreDestruct__ = &__nvoc_thunk_RsResource_sysmemPreDestruct; + + pThis->__sysmemUnmapFrom__ = &__nvoc_thunk_RsResource_sysmemUnmapFrom; + + pThis->__sysmemControl_Epilogue__ = &__nvoc_thunk_RmResource_sysmemControl_Epilogue; + + pThis->__sysmemControlLookup__ = &__nvoc_thunk_RsResource_sysmemControlLookup; + + pThis->__sysmemMap__ = &__nvoc_thunk_Memory_sysmemMap; + + pThis->__sysmemAccessCallback__ = &__nvoc_thunk_RmResource_sysmemAccessCallback; +} + +void __nvoc_init_funcTable_SystemMemory(SystemMemory *pThis) { + __nvoc_init_funcTable_SystemMemory_1(pThis); +} + +void __nvoc_init_StandardMemory(StandardMemory*); +void __nvoc_init_SystemMemory(SystemMemory *pThis) { + pThis->__nvoc_pbase_SystemMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory; + pThis->__nvoc_pbase_StandardMemory = &pThis->__nvoc_base_StandardMemory; + __nvoc_init_StandardMemory(&pThis->__nvoc_base_StandardMemory); + __nvoc_init_funcTable_SystemMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_SystemMemory(SystemMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + SystemMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(SystemMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(SystemMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_SystemMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_SystemMemory(pThis); + status = __nvoc_ctor_SystemMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_SystemMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_SystemMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_SystemMemory(SystemMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_SystemMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.h new file mode 100644 index 0000000..6ba1b8e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.h @@ -0,0 +1,254 @@ +#ifndef _G_SYSTEM_MEM_NVOC_H_ +#define _G_SYSTEM_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_system_mem_nvoc.h" + +#ifndef _SYSTEM_MEMORY_H_ +#define _SYSTEM_MEMORY_H_ + +#include "mem_mgr/standard_mem.h" +#include "gpu/mem_mgr/heap_base.h" + +#ifdef NVOC_SYSTEM_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct SystemMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct StandardMemory __nvoc_base_StandardMemory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct StandardMemory *__nvoc_pbase_StandardMemory; + struct SystemMemory *__nvoc_pbase_SystemMemory; + NV_STATUS (*__sysmemCtrlCmdGetSurfaceNumPhysPages__)(struct SystemMemory *, NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS *); + NV_STATUS (*__sysmemCtrlCmdGetSurfacePhysPages__)(struct SystemMemory *, NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS *); + NV_STATUS (*__sysmemCheckMemInterUnmap__)(struct SystemMemory *, NvBool); + NV_STATUS (*__sysmemControl__)(struct SystemMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__sysmemUnmap__)(struct SystemMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__sysmemGetMemInterMapParams__)(struct SystemMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__sysmemGetMemoryMappingDescriptor__)(struct SystemMemory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__sysmemGetMapAddrSpace__)(struct SystemMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__sysmemShareCallback__)(struct SystemMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__sysmemControlFilter__)(struct SystemMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__sysmemAddAdditionalDependants__)(struct RsClient *, struct SystemMemory *, RsResourceRef *); + NvU32 (*__sysmemGetRefCount__)(struct SystemMemory *); + NV_STATUS (*__sysmemMapTo__)(struct SystemMemory *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__sysmemControl_Prologue__)(struct SystemMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__sysmemCanCopy__)(struct SystemMemory *); + NV_STATUS (*__sysmemIsReady__)(struct SystemMemory *); + NV_STATUS (*__sysmemCheckCopyPermissions__)(struct SystemMemory *, struct OBJGPU *, NvHandle); + void (*__sysmemPreDestruct__)(struct SystemMemory *); + NV_STATUS (*__sysmemUnmapFrom__)(struct SystemMemory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__sysmemControl_Epilogue__)(struct SystemMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__sysmemControlLookup__)(struct SystemMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__sysmemMap__)(struct SystemMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__sysmemAccessCallback__)(struct SystemMemory *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_SystemMemory_TYPEDEF__ +#define __NVOC_CLASS_SystemMemory_TYPEDEF__ +typedef struct SystemMemory SystemMemory; +#endif /* __NVOC_CLASS_SystemMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SystemMemory +#define __nvoc_class_id_SystemMemory 0x007a98 +#endif /* __nvoc_class_id_SystemMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SystemMemory; + +#define __staticCast_SystemMemory(pThis) \ + ((pThis)->__nvoc_pbase_SystemMemory) + +#ifdef __nvoc_system_mem_h_disabled +#define __dynamicCast_SystemMemory(pThis) ((SystemMemory*)NULL) +#else //__nvoc_system_mem_h_disabled +#define __dynamicCast_SystemMemory(pThis) \ + ((SystemMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(SystemMemory))) +#endif //__nvoc_system_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_SystemMemory(SystemMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_SystemMemory(SystemMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_SystemMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_SystemMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define sysmemCtrlCmdGetSurfaceNumPhysPages(pStandardMemory, pParams) sysmemCtrlCmdGetSurfaceNumPhysPages_DISPATCH(pStandardMemory, pParams) +#define sysmemCtrlCmdGetSurfacePhysPages(pStandardMemory, pParams) sysmemCtrlCmdGetSurfacePhysPages_DISPATCH(pStandardMemory, pParams) +#define sysmemCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) sysmemCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define sysmemControl(pMemory, pCallContext, pParams) sysmemControl_DISPATCH(pMemory, pCallContext, pParams) +#define sysmemUnmap(pMemory, pCallContext, pCpuMapping) sysmemUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define sysmemGetMemInterMapParams(pMemory, pParams) sysmemGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define sysmemGetMemoryMappingDescriptor(pMemory, ppMemDesc) sysmemGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define sysmemGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) sysmemGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define sysmemShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) sysmemShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define sysmemControlFilter(pResource, pCallContext, pParams) sysmemControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define sysmemAddAdditionalDependants(pClient, pResource, pReference) sysmemAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define sysmemGetRefCount(pResource) sysmemGetRefCount_DISPATCH(pResource) +#define sysmemMapTo(pResource, pParams) sysmemMapTo_DISPATCH(pResource, pParams) +#define sysmemControl_Prologue(pResource, pCallContext, pParams) sysmemControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define sysmemCanCopy(pStandardMemory) sysmemCanCopy_DISPATCH(pStandardMemory) +#define sysmemIsReady(pMemory) sysmemIsReady_DISPATCH(pMemory) +#define sysmemCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) sysmemCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define sysmemPreDestruct(pResource) sysmemPreDestruct_DISPATCH(pResource) +#define sysmemUnmapFrom(pResource, pParams) sysmemUnmapFrom_DISPATCH(pResource, pParams) +#define sysmemControl_Epilogue(pResource, pCallContext, pParams) sysmemControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define sysmemControlLookup(pResource, pParams, ppEntry) sysmemControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define sysmemMap(pMemory, pCallContext, pParams, pCpuMapping) sysmemMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define sysmemAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) sysmemAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS sysmemInitAllocRequest_SOC(struct OBJGPU *pGpu, struct SystemMemory *pSystemMemory, MEMORY_ALLOCATION_REQUEST *pAllocRequest); + +#ifdef __nvoc_system_mem_h_disabled +static inline NV_STATUS sysmemInitAllocRequest(struct OBJGPU *pGpu, struct SystemMemory *pSystemMemory, MEMORY_ALLOCATION_REQUEST *pAllocRequest) { + NV_ASSERT_FAILED_PRECOMP("SystemMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_system_mem_h_disabled +#define sysmemInitAllocRequest(pGpu, pSystemMemory, pAllocRequest) sysmemInitAllocRequest_SOC(pGpu, pSystemMemory, pAllocRequest) +#endif //__nvoc_system_mem_h_disabled + +#define sysmemInitAllocRequest_HAL(pGpu, pSystemMemory, pAllocRequest) sysmemInitAllocRequest(pGpu, pSystemMemory, pAllocRequest) + +NV_STATUS sysmemCtrlCmdGetSurfaceNumPhysPages_IMPL(struct SystemMemory *pStandardMemory, NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS *pParams); + +static inline NV_STATUS sysmemCtrlCmdGetSurfaceNumPhysPages_DISPATCH(struct SystemMemory *pStandardMemory, NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS *pParams) { + return pStandardMemory->__sysmemCtrlCmdGetSurfaceNumPhysPages__(pStandardMemory, pParams); +} + +NV_STATUS sysmemCtrlCmdGetSurfacePhysPages_IMPL(struct SystemMemory *pStandardMemory, NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS *pParams); + +static inline NV_STATUS sysmemCtrlCmdGetSurfacePhysPages_DISPATCH(struct SystemMemory *pStandardMemory, NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS *pParams) { + return pStandardMemory->__sysmemCtrlCmdGetSurfacePhysPages__(pStandardMemory, pParams); +} + +static inline NV_STATUS sysmemCheckMemInterUnmap_DISPATCH(struct SystemMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__sysmemCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS sysmemControl_DISPATCH(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__sysmemControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS sysmemUnmap_DISPATCH(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__sysmemUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS sysmemGetMemInterMapParams_DISPATCH(struct SystemMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__sysmemGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS sysmemGetMemoryMappingDescriptor_DISPATCH(struct SystemMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__sysmemGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS sysmemGetMapAddrSpace_DISPATCH(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__sysmemGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool sysmemShareCallback_DISPATCH(struct SystemMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__sysmemShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS sysmemControlFilter_DISPATCH(struct SystemMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__sysmemControlFilter__(pResource, pCallContext, pParams); +} + +static inline void sysmemAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct SystemMemory *pResource, RsResourceRef *pReference) { + pResource->__sysmemAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 sysmemGetRefCount_DISPATCH(struct SystemMemory *pResource) { + return pResource->__sysmemGetRefCount__(pResource); +} + +static inline NV_STATUS sysmemMapTo_DISPATCH(struct SystemMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__sysmemMapTo__(pResource, pParams); +} + +static inline NV_STATUS sysmemControl_Prologue_DISPATCH(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__sysmemControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool sysmemCanCopy_DISPATCH(struct SystemMemory *pStandardMemory) { + return pStandardMemory->__sysmemCanCopy__(pStandardMemory); +} + +static inline NV_STATUS sysmemIsReady_DISPATCH(struct SystemMemory *pMemory) { + return pMemory->__sysmemIsReady__(pMemory); +} + +static inline NV_STATUS sysmemCheckCopyPermissions_DISPATCH(struct SystemMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__sysmemCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void sysmemPreDestruct_DISPATCH(struct SystemMemory *pResource) { + pResource->__sysmemPreDestruct__(pResource); +} + +static inline NV_STATUS sysmemUnmapFrom_DISPATCH(struct SystemMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__sysmemUnmapFrom__(pResource, pParams); +} + +static inline void sysmemControl_Epilogue_DISPATCH(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__sysmemControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS sysmemControlLookup_DISPATCH(struct SystemMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__sysmemControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS sysmemMap_DISPATCH(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__sysmemMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool sysmemAccessCallback_DISPATCH(struct SystemMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__sysmemAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS sysmemConstruct_IMPL(struct SystemMemory *arg_pStandardMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_sysmemConstruct(arg_pStandardMemory, arg_pCallContext, arg_pParams) sysmemConstruct_IMPL(arg_pStandardMemory, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +NV_STATUS sysmemAllocResources(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, FB_ALLOC_INFO *pFbAllocInfo, + struct SystemMemory *pSystemMemory); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_SYSTEM_MEM_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.c new file mode 100644 index 0000000..de53aaf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.c @@ -0,0 +1,182 @@ +#define NVOC_SYSTEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_system_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x40e2c8 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJSYS; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE; + +void __nvoc_init_OBJSYS(OBJSYS*); +void __nvoc_init_funcTable_OBJSYS(OBJSYS*); +NV_STATUS __nvoc_ctor_OBJSYS(OBJSYS*); +void __nvoc_init_dataField_OBJSYS(OBJSYS*); +void __nvoc_dtor_OBJSYS(OBJSYS*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJSYS; + +static const struct NVOC_RTTI __nvoc_rtti_OBJSYS_OBJSYS = { + /*pClassDef=*/ &__nvoc_class_def_OBJSYS, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJSYS, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJSYS_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJSYS, __nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJSYS_OBJTRACEABLE = { + /*pClassDef=*/ &__nvoc_class_def_OBJTRACEABLE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJSYS, __nvoc_base_OBJTRACEABLE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJSYS = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_OBJSYS_OBJSYS, + &__nvoc_rtti_OBJSYS_OBJTRACEABLE, + &__nvoc_rtti_OBJSYS_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJSYS = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJSYS), + /*classId=*/ classId(OBJSYS), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJSYS", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJSYS, + /*pCastInfo=*/ &__nvoc_castinfo_OBJSYS, + /*pExportInfo=*/ &__nvoc_export_info_OBJSYS +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJSYS = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_dtor_OBJSYS(OBJSYS *pThis) { + __nvoc_sysDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJSYS(OBJSYS *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + pThis->setProperty(pThis, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, ((0) || (1) || (0))); + pThis->setProperty(pThis, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, ((1) && !0)); + pThis->setProperty(pThis, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, (0)); + pThis->setProperty(pThis, PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED, ((0) || (0))); + pThis->setProperty(pThis, PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED, (0)); + pThis->setProperty(pThis, PDB_PROP_SYS_PRIORITY_BOOST, (0)); + pThis->setProperty(pThis, PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US, 16 * 1000); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE* ); +NV_STATUS __nvoc_ctor_OBJSYS(OBJSYS *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJSYS_fail_Object; + status = __nvoc_ctor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + if (status != NV_OK) goto __nvoc_ctor_OBJSYS_fail_OBJTRACEABLE; + __nvoc_init_dataField_OBJSYS(pThis); + + status = __nvoc_sysConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJSYS_fail__init; + goto __nvoc_ctor_OBJSYS_exit; // Success + +__nvoc_ctor_OBJSYS_fail__init: + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); +__nvoc_ctor_OBJSYS_fail_OBJTRACEABLE: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJSYS_fail_Object: +__nvoc_ctor_OBJSYS_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJSYS_1(OBJSYS *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__sysCaptureState__ = &sysCaptureState_IMPL; +} + +void __nvoc_init_funcTable_OBJSYS(OBJSYS *pThis) { + __nvoc_init_funcTable_OBJSYS_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_init_OBJSYS(OBJSYS *pThis) { + pThis->__nvoc_pbase_OBJSYS = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + pThis->__nvoc_pbase_OBJTRACEABLE = &pThis->__nvoc_base_OBJTRACEABLE; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + __nvoc_init_funcTable_OBJSYS(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJSYS(OBJSYS **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJSYS *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJSYS)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJSYS)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJSYS); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJSYS(pThis); + status = __nvoc_ctor_OBJSYS(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJSYS_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJSYS_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJSYS(OBJSYS **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJSYS(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.h new file mode 100644 index 0000000..63d51de --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.h @@ -0,0 +1,603 @@ +#ifndef _G_SYSTEM_NVOC_H_ +#define _G_SYSTEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_system_nvoc.h" + +#ifndef SYSTEM_H +#define SYSTEM_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Defines and structures used for the System Object. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "nvlimits.h" // NV_MAX_DEVICES +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "diagnostics/traceable.h" +#include "nvCpuUuid.h" +#include "os/capability.h" +#include "containers/btree.h" + +#define SYS_GET_INSTANCE() (g_pSys) +#define SYS_GET_GPUMGR(p) ((p)->pGpuMgr) +#define SYS_GET_GSYNCMGR(p) ((p)->pGsyncMgr) +#define SYS_GET_VGPUMGR(p) ((p)->pVgpuMgr) +#define SYS_GET_KERNEL_VGPUMGR(p) (RMCFG_FEATURE_KERNEL_RM ? (p)->pKernelVgpuMgr : NULL) +#define SYS_GET_OS(p) sysGetOs((p)) +#define SYS_GET_PFM(p) ((p)->pPfm) +#define SYS_GET_CL(p) ((p)->pCl) +#define SYS_GET_SWINSTR(p) ((p)->pSwInstr) +#define SYS_GET_GPUACCT(p) ((p)->pGpuAcct) +#define SYS_GET_GPS(p) ((p)->pGps) +#define SYS_GET_RCDB(p) ((p)->pRcDB) +#define SYS_GET_VMM(p) (RMCFG_MODULE_VMM ? (p)->pVmm : NULL) +#define SYS_GET_HYPERVISOR(p) ((p)->pHypervisor) +#define SYS_GET_VRRMGR(p) ((p)->pVrrMgr) +#define SYS_GET_GPUBOOSTMGR(p) ((p)->pGpuBoostMgr) +#define SYS_GET_DISPMGR(p) ((p)->pDispMgr) +#define SYS_GET_FABRIC(p) ((p)->pFabric) +#define SYS_GET_GPUDB(p) ((p)->pGpuDb) +#define SYS_GET_HALMGR(p) ((p)->pHalMgr) + +#define IsMobile(p) 0 + +// +// OS defines (Windows flavor can be added later on) +// Unix flavor need to be sync with defines in file "nv.h" +// +#define OS_TYPE_LINUX 0x1 +#define OS_TYPE_FREEBSD 0x2 +#define OS_TYPE_SUNOS 0x3 +#define OS_TYPE_VMWARE 0x4 + +// Child class forward declarations. +struct OBJPFM; + +#ifndef __NVOC_CLASS_OBJPFM_TYPEDEF__ +#define __NVOC_CLASS_OBJPFM_TYPEDEF__ +typedef struct OBJPFM OBJPFM; +#endif /* __NVOC_CLASS_OBJPFM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPFM +#define __nvoc_class_id_OBJPFM 0xb543ae +#endif /* __nvoc_class_id_OBJPFM */ + + +struct OBJVMM; + +#ifndef __NVOC_CLASS_OBJVMM_TYPEDEF__ +#define __NVOC_CLASS_OBJVMM_TYPEDEF__ +typedef struct OBJVMM OBJVMM; +#endif /* __NVOC_CLASS_OBJVMM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVMM +#define __nvoc_class_id_OBJVMM 0xa030ab +#endif /* __nvoc_class_id_OBJVMM */ + + +struct OBJHYPERVISOR; + +#ifndef __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +#define __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +typedef struct OBJHYPERVISOR OBJHYPERVISOR; +#endif /* __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHYPERVISOR +#define __nvoc_class_id_OBJHYPERVISOR 0x33c1ba +#endif /* __nvoc_class_id_OBJHYPERVISOR */ + + +struct OBJGPUMGR; + +#ifndef __NVOC_CLASS_OBJGPUMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUMGR_TYPEDEF__ +typedef struct OBJGPUMGR OBJGPUMGR; +#endif /* __NVOC_CLASS_OBJGPUMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUMGR +#define __nvoc_class_id_OBJGPUMGR 0xcf1b25 +#endif /* __nvoc_class_id_OBJGPUMGR */ + + +struct OBJDISPMGR; + +#ifndef __NVOC_CLASS_OBJDISPMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJDISPMGR_TYPEDEF__ +typedef struct OBJDISPMGR OBJDISPMGR; +#endif /* __NVOC_CLASS_OBJDISPMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDISPMGR +#define __nvoc_class_id_OBJDISPMGR 0x69ad03 +#endif /* __nvoc_class_id_OBJDISPMGR */ + + +struct OBJGPS; + +#ifndef __NVOC_CLASS_OBJGPS_TYPEDEF__ +#define __NVOC_CLASS_OBJGPS_TYPEDEF__ +typedef struct OBJGPS OBJGPS; +#endif /* __NVOC_CLASS_OBJGPS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPS +#define __nvoc_class_id_OBJGPS 0x7ee07d +#endif /* __nvoc_class_id_OBJGPS */ + + +struct GpuAccounting; + +#ifndef __NVOC_CLASS_GpuAccounting_TYPEDEF__ +#define __NVOC_CLASS_GpuAccounting_TYPEDEF__ +typedef struct GpuAccounting GpuAccounting; +#endif /* __NVOC_CLASS_GpuAccounting_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuAccounting +#define __nvoc_class_id_GpuAccounting 0x0f1350 +#endif /* __nvoc_class_id_GpuAccounting */ + + +struct OBJHALMGR; + +#ifndef __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +typedef struct OBJHALMGR OBJHALMGR; +#endif /* __NVOC_CLASS_OBJHALMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHALMGR +#define __nvoc_class_id_OBJHALMGR 0xbf26de +#endif /* __nvoc_class_id_OBJHALMGR */ + + +struct Fabric; + +#ifndef __NVOC_CLASS_Fabric_TYPEDEF__ +#define __NVOC_CLASS_Fabric_TYPEDEF__ +typedef struct Fabric Fabric; +#endif /* __NVOC_CLASS_Fabric_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Fabric +#define __nvoc_class_id_Fabric 0x0ac791 +#endif /* __nvoc_class_id_Fabric */ + + +struct GpuDb; + +#ifndef __NVOC_CLASS_GpuDb_TYPEDEF__ +#define __NVOC_CLASS_GpuDb_TYPEDEF__ +typedef struct GpuDb GpuDb; +#endif /* __NVOC_CLASS_GpuDb_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuDb +#define __nvoc_class_id_GpuDb 0xcdd250 +#endif /* __nvoc_class_id_GpuDb */ + + +struct OBJSWINSTR; + +#ifndef __NVOC_CLASS_OBJSWINSTR_TYPEDEF__ +#define __NVOC_CLASS_OBJSWINSTR_TYPEDEF__ +typedef struct OBJSWINSTR OBJSWINSTR; +#endif /* __NVOC_CLASS_OBJSWINSTR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSWINSTR +#define __nvoc_class_id_OBJSWINSTR 0xd586f3 +#endif /* __nvoc_class_id_OBJSWINSTR */ + + +struct OBJCL; + +#ifndef __NVOC_CLASS_OBJCL_TYPEDEF__ +#define __NVOC_CLASS_OBJCL_TYPEDEF__ +typedef struct OBJCL OBJCL; +#endif /* __NVOC_CLASS_OBJCL_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJCL +#define __nvoc_class_id_OBJCL 0x547dbb +#endif /* __nvoc_class_id_OBJCL */ + + +struct KernelVgpuMgr; + +#ifndef __NVOC_CLASS_KernelVgpuMgr_TYPEDEF__ +#define __NVOC_CLASS_KernelVgpuMgr_TYPEDEF__ +typedef struct KernelVgpuMgr KernelVgpuMgr; +#endif /* __NVOC_CLASS_KernelVgpuMgr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelVgpuMgr +#define __nvoc_class_id_KernelVgpuMgr 0xa793dd +#endif /* __nvoc_class_id_KernelVgpuMgr */ + + +struct OBJVRRMGR; + +#ifndef __NVOC_CLASS_OBJVRRMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJVRRMGR_TYPEDEF__ +typedef struct OBJVRRMGR OBJVRRMGR; +#endif /* __NVOC_CLASS_OBJVRRMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVRRMGR +#define __nvoc_class_id_OBJVRRMGR 0x442804 +#endif /* __nvoc_class_id_OBJVRRMGR */ + + +struct OBJGPUBOOSTMGR; + +#ifndef __NVOC_CLASS_OBJGPUBOOSTMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUBOOSTMGR_TYPEDEF__ +typedef struct OBJGPUBOOSTMGR OBJGPUBOOSTMGR; +#endif /* __NVOC_CLASS_OBJGPUBOOSTMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUBOOSTMGR +#define __nvoc_class_id_OBJGPUBOOSTMGR 0x9f6bbf +#endif /* __nvoc_class_id_OBJGPUBOOSTMGR */ + + +struct OBJGSYNCMGR; + +#ifndef __NVOC_CLASS_OBJGSYNCMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJGSYNCMGR_TYPEDEF__ +typedef struct OBJGSYNCMGR OBJGSYNCMGR; +#endif /* __NVOC_CLASS_OBJGSYNCMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGSYNCMGR +#define __nvoc_class_id_OBJGSYNCMGR 0xd07fd0 +#endif /* __nvoc_class_id_OBJGSYNCMGR */ + + +struct OBJVGPUMGR; + +#ifndef __NVOC_CLASS_OBJVGPUMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJVGPUMGR_TYPEDEF__ +typedef struct OBJVGPUMGR OBJVGPUMGR; +#endif /* __NVOC_CLASS_OBJVGPUMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVGPUMGR +#define __nvoc_class_id_OBJVGPUMGR 0x0e9beb +#endif /* __nvoc_class_id_OBJVGPUMGR */ + + +struct OBJOS; + +#ifndef __NVOC_CLASS_OBJOS_TYPEDEF__ +#define __NVOC_CLASS_OBJOS_TYPEDEF__ +typedef struct OBJOS OBJOS; +#endif /* __NVOC_CLASS_OBJOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOS +#define __nvoc_class_id_OBJOS 0xaa1d70 +#endif /* __nvoc_class_id_OBJOS */ + + + +typedef struct OBJRCDB Journal; + +/*! + * This structure contains static system configuration data. This structure + * will become a typesafe structure that can be exchanged with code + * running on GSP. + */ +typedef struct SYS_STATIC_CONFIG +{ + /*! Indicates if the GPU is in a notebook or not. */ + NvBool bIsNotebook; + + /*! Initial SLI configuration flags */ + NvU32 initialSliFlags; + + /*! Indicates the type of OS flavor */ + NvU32 osType; + + /*! AMD SEV (AMD's Secure Encrypted Virtualization) Status */ + NvU32 osSevStatus; + + /*! Indicates AMD SEV is enabled or not */ + NvBool bOsSevEnabled; +} SYS_STATIC_CONFIG; + +typedef struct +{ + NvBool bInitialized; // Set to true once we id the CPU + NvU32 type; // NV0000_CTRL_SYSTEM_CPU_TYPE value + NvU32 caps; // NV0000_CTRL_SYSTEM_CPU_CAP value + NvU32 brandId; // CPU Brand ID + NvU32 clock; + NvU32 l1DataCacheSize; // L1 data (or unified) cache size (KB) + NvU32 l2DataCacheSize; // L2 data (or unified) cache size (KB) + NvU32 dataCacheLineSize; // Bytes per line in the L1 data cache + NvU32 hostPageSize; // Native host os page size (4k/64k/etc) + NvU32 numPhysicalCpus; // Number of physical cpus + NvU32 numLogicalCpus; // Total number of logical cpus + NvU32 maxLogicalCpus; // Max Number of Cores on the System + char name[52]; // Embedded processor name; only filled + // filled in if CPU has embedded name + NvU32 family; // Vendor defined Family/extended Family + NvU32 model; // Vendor defined Model/extended Model + NvU32 coresOnDie; // # of cores on the die (0 if unknown) + NvU32 platformID; // Chip package type + NvU8 stepping; // Silicon stepping + NvBool bSEVCapable; // Is capable of SEV (Secure Encrypted Virtualization) + NvU32 maxEncryptedGuests; // Max # of encrypted guests supported +} SYS_CPU_INFO; + +typedef struct +{ + NvU32 strapUser; + NvU32 genRegsVse2VidsysEn; + NvU32 genRegsMiscIoAdr; +} SYS_VGA_POST_STATE; + + +#ifdef NVOC_SYSTEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJSYS { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct OBJTRACEABLE __nvoc_base_OBJTRACEABLE; + struct Object *__nvoc_pbase_Object; + struct OBJTRACEABLE *__nvoc_pbase_OBJTRACEABLE; + struct OBJSYS *__nvoc_pbase_OBJSYS; + NV_STATUS (*__sysCaptureState__)(struct OBJSYS *); + NvBool PDB_PROP_SYS_SBIOS_NVIF_POWERMIZER_LIMIT; + NvBool PDB_PROP_SYS_MXM_THERMAL_CONTROL_PRESENT; + NvBool PDB_PROP_SYS_POWER_BATTERY; + NvBool PDB_PROP_SYS_NVIF_INIT_DONE; + NvBool PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED; + NvBool PDB_PROP_SYS_PRIMARY_VBIOS_STATE_SAVED; + NvBool PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS; + NvBool PDB_PROP_SYS_ENABLE_STREAM_MEMOPS; + NvBool PDB_PROP_SYS_IS_UEFI; + NvBool PDB_PROP_SYS_WIN_PRIMARY_DEVICE_MARKED; + NvBool PDB_PROP_SYS_IS_GSYNC_ENABLED; + NvBool PDB_PROP_SYS_NVSWITCH_IS_PRESENT; + NvBool PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED; + NvBool PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED; + NvBool PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED; + NvBool PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING; + NvBool PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE; + NvBool PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT; + NvBool PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS; + NvBool PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED; + NvBool PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED; + NvBool PDB_PROP_SYS_IS_EFI_INIT; + NvBool PDB_PROP_SYS_IN_OCA_DATA_COLLECTION; + NvBool PDB_PROP_SYS_DEBUGGER_DISABLED; + NvBool PDB_PROP_SYS_PRIORITY_BOOST; + NvU32 PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US; + NvBool PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT; + NvU32 apiLockMask; + NvU32 apiLockModuleMask; + NvU32 gpuLockModuleMask; + NvU32 pwrTransitionTimeoutOverride; + SYS_STATIC_CONFIG staticConfig; + NvU32 debugFlags; + NvU32 backtraceStackDepth; + SYS_CPU_INFO cpuInfo; + SYS_VGA_POST_STATE vgaPostState; + NvBool gpuHotPlugPollingActive[32]; + NvU32 gridSwPkg; + void *pSema; + NvU32 binMask; + PNODE pMemFilterList; + NvU64 rmInstanceId; + NvU32 currentCid; + OS_RM_CAPS *pOsRmCaps; + struct OBJGPUMGR *pGpuMgr; + struct OBJGSYNCMGR *pGsyncMgr; + struct OBJVGPUMGR *pVgpuMgr; + struct KernelVgpuMgr *pKernelVgpuMgr; + struct OBJOS *pOS; + struct OBJCL *pCl; + struct OBJPFM *pPfm; + struct OBJSWINSTR *pSwInstr; + struct GpuAccounting *pGpuAcct; + struct OBJGPS *pGps; + Journal *pRcDB; + struct OBJVMM *pVmm; + struct OBJHYPERVISOR *pHypervisor; + struct OBJVRRMGR *pVrrMgr; + struct OBJGPUBOOSTMGR *pGpuBoostMgr; + struct OBJDISPMGR *pDispMgr; + struct OBJHALMGR *pHalMgr; + struct Fabric *pFabric; + struct GpuDb *pGpuDb; +}; + +#ifndef __NVOC_CLASS_OBJSYS_TYPEDEF__ +#define __NVOC_CLASS_OBJSYS_TYPEDEF__ +typedef struct OBJSYS OBJSYS; +#endif /* __NVOC_CLASS_OBJSYS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSYS +#define __nvoc_class_id_OBJSYS 0x40e2c8 +#endif /* __nvoc_class_id_OBJSYS */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJSYS; + +#define __staticCast_OBJSYS(pThis) \ + ((pThis)->__nvoc_pbase_OBJSYS) + +#ifdef __nvoc_system_h_disabled +#define __dynamicCast_OBJSYS(pThis) ((OBJSYS*)NULL) +#else //__nvoc_system_h_disabled +#define __dynamicCast_OBJSYS(pThis) \ + ((OBJSYS*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJSYS))) +#endif //__nvoc_system_h_disabled + +#define PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED_BASE_CAST +#define PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED_BASE_NAME PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED +#define PDB_PROP_SYS_IS_EFI_INIT_BASE_CAST +#define PDB_PROP_SYS_IS_EFI_INIT_BASE_NAME PDB_PROP_SYS_IS_EFI_INIT +#define PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS_BASE_CAST +#define PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS_BASE_NAME PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS +#define PDB_PROP_SYS_POWER_BATTERY_BASE_CAST +#define PDB_PROP_SYS_POWER_BATTERY_BASE_NAME PDB_PROP_SYS_POWER_BATTERY +#define PDB_PROP_SYS_NVIF_INIT_DONE_BASE_CAST +#define PDB_PROP_SYS_NVIF_INIT_DONE_BASE_NAME PDB_PROP_SYS_NVIF_INIT_DONE +#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT_BASE_CAST +#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT_BASE_NAME PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT +#define PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS_BASE_CAST +#define PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS_BASE_NAME PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS +#define PDB_PROP_SYS_PRIMARY_VBIOS_STATE_SAVED_BASE_CAST +#define PDB_PROP_SYS_PRIMARY_VBIOS_STATE_SAVED_BASE_NAME PDB_PROP_SYS_PRIMARY_VBIOS_STATE_SAVED +#define PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT_BASE_CAST +#define PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT_BASE_NAME PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT +#define PDB_PROP_SYS_ENABLE_STREAM_MEMOPS_BASE_CAST +#define PDB_PROP_SYS_ENABLE_STREAM_MEMOPS_BASE_NAME PDB_PROP_SYS_ENABLE_STREAM_MEMOPS +#define PDB_PROP_SYS_SBIOS_NVIF_POWERMIZER_LIMIT_BASE_CAST +#define PDB_PROP_SYS_SBIOS_NVIF_POWERMIZER_LIMIT_BASE_NAME PDB_PROP_SYS_SBIOS_NVIF_POWERMIZER_LIMIT +#define PDB_PROP_SYS_IS_UEFI_BASE_CAST +#define PDB_PROP_SYS_IS_UEFI_BASE_NAME PDB_PROP_SYS_IS_UEFI +#define PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED_BASE_CAST +#define PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED_BASE_NAME PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED +#define PDB_PROP_SYS_IS_GSYNC_ENABLED_BASE_CAST +#define PDB_PROP_SYS_IS_GSYNC_ENABLED_BASE_NAME PDB_PROP_SYS_IS_GSYNC_ENABLED +#define PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED_BASE_CAST +#define PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED_BASE_NAME PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED +#define PDB_PROP_SYS_PRIORITY_BOOST_BASE_CAST +#define PDB_PROP_SYS_PRIORITY_BOOST_BASE_NAME PDB_PROP_SYS_PRIORITY_BOOST +#define PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US_BASE_CAST +#define PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US_BASE_NAME PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US +#define PDB_PROP_SYS_IN_OCA_DATA_COLLECTION_BASE_CAST +#define PDB_PROP_SYS_IN_OCA_DATA_COLLECTION_BASE_NAME PDB_PROP_SYS_IN_OCA_DATA_COLLECTION +#define PDB_PROP_SYS_NVSWITCH_IS_PRESENT_BASE_CAST +#define PDB_PROP_SYS_NVSWITCH_IS_PRESENT_BASE_NAME PDB_PROP_SYS_NVSWITCH_IS_PRESENT +#define PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED_BASE_CAST +#define PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED_BASE_NAME PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED +#define PDB_PROP_SYS_WIN_PRIMARY_DEVICE_MARKED_BASE_CAST +#define PDB_PROP_SYS_WIN_PRIMARY_DEVICE_MARKED_BASE_NAME PDB_PROP_SYS_WIN_PRIMARY_DEVICE_MARKED +#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_BASE_CAST +#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_BASE_NAME PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE +#define PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED_BASE_CAST +#define PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED_BASE_NAME PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED +#define PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED_BASE_CAST +#define PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED_BASE_NAME PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED +#define PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING_BASE_CAST +#define PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING_BASE_NAME PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING +#define PDB_PROP_SYS_DEBUGGER_DISABLED_BASE_CAST +#define PDB_PROP_SYS_DEBUGGER_DISABLED_BASE_NAME PDB_PROP_SYS_DEBUGGER_DISABLED +#define PDB_PROP_SYS_MXM_THERMAL_CONTROL_PRESENT_BASE_CAST +#define PDB_PROP_SYS_MXM_THERMAL_CONTROL_PRESENT_BASE_NAME PDB_PROP_SYS_MXM_THERMAL_CONTROL_PRESENT + +NV_STATUS __nvoc_objCreateDynamic_OBJSYS(OBJSYS**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJSYS(OBJSYS**, Dynamic*, NvU32); +#define __objCreate_OBJSYS(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJSYS((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define sysCaptureState(arg0) sysCaptureState_DISPATCH(arg0) +NV_STATUS sysCaptureState_IMPL(struct OBJSYS *arg0); + +static inline NV_STATUS sysCaptureState_DISPATCH(struct OBJSYS *arg0) { + return arg0->__sysCaptureState__(arg0); +} + +static inline NvU32 sysGetPwrTransitionTimeout(struct OBJSYS *pSys) { + return pSys->pwrTransitionTimeoutOverride; +} + +static inline const SYS_STATIC_CONFIG *sysGetStaticConfig(struct OBJSYS *pSys) { + return &pSys->staticConfig; +} + +NV_STATUS sysConstruct_IMPL(struct OBJSYS *arg_); +#define __nvoc_sysConstruct(arg_) sysConstruct_IMPL(arg_) +void sysDestruct_IMPL(struct OBJSYS *arg0); +#define __nvoc_sysDestruct(arg0) sysDestruct_IMPL(arg0) +void sysInitRegistryOverrides_IMPL(struct OBJSYS *arg0); +#ifdef __nvoc_system_h_disabled +static inline void sysInitRegistryOverrides(struct OBJSYS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); +} +#else //__nvoc_system_h_disabled +#define sysInitRegistryOverrides(arg0) sysInitRegistryOverrides_IMPL(arg0) +#endif //__nvoc_system_h_disabled + +void sysApplyLockingPolicy_IMPL(struct OBJSYS *arg0); +#ifdef __nvoc_system_h_disabled +static inline void sysApplyLockingPolicy(struct OBJSYS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); +} +#else //__nvoc_system_h_disabled +#define sysApplyLockingPolicy(arg0) sysApplyLockingPolicy_IMPL(arg0) +#endif //__nvoc_system_h_disabled + +struct OBJOS *sysGetOs_IMPL(struct OBJSYS *arg0); +#ifdef __nvoc_system_h_disabled +static inline struct OBJOS *sysGetOs(struct OBJSYS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); + return NULL; +} +#else //__nvoc_system_h_disabled +#define sysGetOs(arg0) sysGetOs_IMPL(arg0) +#endif //__nvoc_system_h_disabled + +void sysEnableExternalFabricMgmt_IMPL(struct OBJSYS *arg0); +#ifdef __nvoc_system_h_disabled +static inline void sysEnableExternalFabricMgmt(struct OBJSYS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); +} +#else //__nvoc_system_h_disabled +#define sysEnableExternalFabricMgmt(arg0) sysEnableExternalFabricMgmt_IMPL(arg0) +#endif //__nvoc_system_h_disabled + +void sysForceInitFabricManagerState_IMPL(struct OBJSYS *arg0); +#ifdef __nvoc_system_h_disabled +static inline void sysForceInitFabricManagerState(struct OBJSYS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); +} +#else //__nvoc_system_h_disabled +#define sysForceInitFabricManagerState(arg0) sysForceInitFabricManagerState_IMPL(arg0) +#endif //__nvoc_system_h_disabled + +NV_STATUS sysSyncExternalFabricMgmtWAR_IMPL(struct OBJSYS *arg0, OBJGPU *arg1); +#ifdef __nvoc_system_h_disabled +static inline NV_STATUS sysSyncExternalFabricMgmtWAR(struct OBJSYS *arg0, OBJGPU *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_system_h_disabled +#define sysSyncExternalFabricMgmtWAR(arg0, arg1) sysSyncExternalFabricMgmtWAR_IMPL(arg0, arg1) +#endif //__nvoc_system_h_disabled + +#undef PRIVATE_FIELD + + +extern struct OBJSYS *g_pSys; + +#endif // SYSTEM_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_SYSTEM_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.c new file mode 100644 index 0000000..e6f048f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.c @@ -0,0 +1,417 @@ +#define NVOC_TMR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_tmr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xb13ac4 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_TimerApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_TimerApi(TimerApi*); +void __nvoc_init_funcTable_TimerApi(TimerApi*); +NV_STATUS __nvoc_ctor_TimerApi(TimerApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_TimerApi(TimerApi*); +void __nvoc_dtor_TimerApi(TimerApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_TimerApi; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_TimerApi = { + /*pClassDef=*/ &__nvoc_class_def_TimerApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_TimerApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_TimerApi = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_TimerApi_TimerApi, + &__nvoc_rtti_TimerApi_Notifier, + &__nvoc_rtti_TimerApi_INotifier, + &__nvoc_rtti_TimerApi_GpuResource, + &__nvoc_rtti_TimerApi_RmResource, + &__nvoc_rtti_TimerApi_RmResourceCommon, + &__nvoc_rtti_TimerApi_RsResource, + &__nvoc_rtti_TimerApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_TimerApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(TimerApi), + /*classId=*/ classId(TimerApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "TimerApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_TimerApi, + /*pCastInfo=*/ &__nvoc_castinfo_TimerApi, + /*pExportInfo=*/ &__nvoc_export_info_TimerApi +}; + +static NV_STATUS __nvoc_thunk_TimerApi_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pTimerApi, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return tmrapiGetRegBaseOffsetAndSize((struct TimerApi *)(((unsigned char *)pTimerApi) - __nvoc_rtti_TimerApi_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_GpuResource_tmrapiShareCallback(struct TimerApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_tmrapiMapTo(struct TimerApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_tmrapiGetOrAllocNotifShare(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimerApi_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_tmrapiCheckMemInterUnmap(struct TimerApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_TimerApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tmrapiGetMapAddrSpace(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_tmrapiSetNotificationShare(struct TimerApi *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimerApi_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_tmrapiGetRefCount(struct TimerApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_tmrapiAddAdditionalDependants(struct RsClient *pClient, struct TimerApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_tmrapiControl_Prologue(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tmrapiInternalControlForward(struct TimerApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_tmrapiUnmapFrom(struct TimerApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_tmrapiControl_Epilogue(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_tmrapiControlLookup(struct TimerApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_tmrapiGetInternalObjectHandle(struct TimerApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tmrapiControl(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tmrapiUnmap(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_tmrapiGetMemInterMapParams(struct TimerApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_TimerApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_tmrapiGetMemoryMappingDescriptor(struct TimerApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_TimerApi_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_tmrapiControlFilter(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_tmrapiUnregisterEvent(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimerApi_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_tmrapiCanCopy(struct TimerApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_tmrapiPreDestruct(struct TimerApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_tmrapiGetNotificationListPtr(struct TimerApi *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimerApi_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_tmrapiGetNotificationShare(struct TimerApi *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimerApi_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tmrapiMap(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_tmrapiAccessCallback(struct TimerApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_TimerApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) tmrapiCtrlCmdTmrSetAlarmNotify_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x40110u, + /*paramSize=*/ sizeof(NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_TimerApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "tmrapiCtrlCmdTmrSetAlarmNotify" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_TimerApi = +{ + /*numEntries=*/ 1, + /*pExportEntries=*/ __nvoc_exported_method_def_TimerApi +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_TimerApi(TimerApi *pThis) { + __nvoc_tmrapiDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_TimerApi(TimerApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_TimerApi(TimerApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_TimerApi_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_TimerApi_fail_Notifier; + __nvoc_init_dataField_TimerApi(pThis); + + status = __nvoc_tmrapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_TimerApi_fail__init; + goto __nvoc_ctor_TimerApi_exit; // Success + +__nvoc_ctor_TimerApi_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_TimerApi_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_TimerApi_fail_GpuResource: +__nvoc_ctor_TimerApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_TimerApi_1(TimerApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__tmrapiGetRegBaseOffsetAndSize__ = &tmrapiGetRegBaseOffsetAndSize_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__tmrapiCtrlCmdTmrSetAlarmNotify__ = &tmrapiCtrlCmdTmrSetAlarmNotify_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_thunk_TimerApi_gpuresGetRegBaseOffsetAndSize; + + pThis->__tmrapiShareCallback__ = &__nvoc_thunk_GpuResource_tmrapiShareCallback; + + pThis->__tmrapiMapTo__ = &__nvoc_thunk_RsResource_tmrapiMapTo; + + pThis->__tmrapiGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_tmrapiGetOrAllocNotifShare; + + pThis->__tmrapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_tmrapiCheckMemInterUnmap; + + pThis->__tmrapiGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_tmrapiGetMapAddrSpace; + + pThis->__tmrapiSetNotificationShare__ = &__nvoc_thunk_Notifier_tmrapiSetNotificationShare; + + pThis->__tmrapiGetRefCount__ = &__nvoc_thunk_RsResource_tmrapiGetRefCount; + + pThis->__tmrapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_tmrapiAddAdditionalDependants; + + pThis->__tmrapiControl_Prologue__ = &__nvoc_thunk_RmResource_tmrapiControl_Prologue; + + pThis->__tmrapiInternalControlForward__ = &__nvoc_thunk_GpuResource_tmrapiInternalControlForward; + + pThis->__tmrapiUnmapFrom__ = &__nvoc_thunk_RsResource_tmrapiUnmapFrom; + + pThis->__tmrapiControl_Epilogue__ = &__nvoc_thunk_RmResource_tmrapiControl_Epilogue; + + pThis->__tmrapiControlLookup__ = &__nvoc_thunk_RsResource_tmrapiControlLookup; + + pThis->__tmrapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_tmrapiGetInternalObjectHandle; + + pThis->__tmrapiControl__ = &__nvoc_thunk_GpuResource_tmrapiControl; + + pThis->__tmrapiUnmap__ = &__nvoc_thunk_GpuResource_tmrapiUnmap; + + pThis->__tmrapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_tmrapiGetMemInterMapParams; + + pThis->__tmrapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_tmrapiGetMemoryMappingDescriptor; + + pThis->__tmrapiControlFilter__ = &__nvoc_thunk_RsResource_tmrapiControlFilter; + + pThis->__tmrapiUnregisterEvent__ = &__nvoc_thunk_Notifier_tmrapiUnregisterEvent; + + pThis->__tmrapiCanCopy__ = &__nvoc_thunk_RsResource_tmrapiCanCopy; + + pThis->__tmrapiPreDestruct__ = &__nvoc_thunk_RsResource_tmrapiPreDestruct; + + pThis->__tmrapiGetNotificationListPtr__ = &__nvoc_thunk_Notifier_tmrapiGetNotificationListPtr; + + pThis->__tmrapiGetNotificationShare__ = &__nvoc_thunk_Notifier_tmrapiGetNotificationShare; + + pThis->__tmrapiMap__ = &__nvoc_thunk_GpuResource_tmrapiMap; + + pThis->__tmrapiAccessCallback__ = &__nvoc_thunk_RmResource_tmrapiAccessCallback; +} + +void __nvoc_init_funcTable_TimerApi(TimerApi *pThis) { + __nvoc_init_funcTable_TimerApi_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_TimerApi(TimerApi *pThis) { + pThis->__nvoc_pbase_TimerApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_TimerApi(pThis); +} + +NV_STATUS __nvoc_objCreate_TimerApi(TimerApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + TimerApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(TimerApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(TimerApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_TimerApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_TimerApi(pThis); + status = __nvoc_ctor_TimerApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_TimerApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_TimerApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_TimerApi(TimerApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_TimerApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.h new file mode 100644 index 0000000..21be57f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.h @@ -0,0 +1,332 @@ +#ifndef _G_TMR_NVOC_H_ +#define _G_TMR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_tmr_nvoc.h" + +#ifndef _TMR_H_ +#define _TMR_H_ + +/****************************** Timer Module *******************************\ +* * +* Module: TMR.H * +* Timer functions. * +* * +****************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu_resource.h" +#include "rmapi/event.h" + +#include "ctrl/ctrl0004.h" + +typedef struct OBJTMR *POBJTMR; + +#ifndef __NVOC_CLASS_OBJTMR_TYPEDEF__ +#define __NVOC_CLASS_OBJTMR_TYPEDEF__ +typedef struct OBJTMR OBJTMR; +#endif /* __NVOC_CLASS_OBJTMR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJTMR +#define __nvoc_class_id_OBJTMR 0x9ddede +#endif /* __nvoc_class_id_OBJTMR */ + + + +//--------------------------------------------------------------------------- +// +// Time objects. +// +//--------------------------------------------------------------------------- + +#define TIMER_STATE_IDLE 0 +#define TIMER_STATE_BUSY 1 + +// Opaque callback memory type for interfacing the scheduling API +typedef struct TMR_EVENT *PTMR_EVENT; +typedef struct TMR_EVENT TMR_EVENT; + +typedef NV_STATUS (*TIMEPROC)(OBJGPU *, struct OBJTMR *, PTMR_EVENT); +typedef NV_STATUS (*TIMEPROC_OBSOLETE)(OBJGPU *, struct OBJTMR *, void *); +typedef NV_STATUS (*TIMEPROC_COUNTDOWN)(OBJGPU *, THREAD_STATE_NODE *); + +/*! + * RM internal class representing NV01_TIMER (child of SubDevice) + */ +#ifdef NVOC_TMR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct TimerApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct TimerApi *__nvoc_pbase_TimerApi; + NV_STATUS (*__tmrapiGetRegBaseOffsetAndSize__)(struct TimerApi *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__tmrapiCtrlCmdTmrSetAlarmNotify__)(struct TimerApi *, NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS *); + NvBool (*__tmrapiShareCallback__)(struct TimerApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__tmrapiMapTo__)(struct TimerApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__tmrapiGetOrAllocNotifShare__)(struct TimerApi *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__tmrapiCheckMemInterUnmap__)(struct TimerApi *, NvBool); + NV_STATUS (*__tmrapiGetMapAddrSpace__)(struct TimerApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__tmrapiSetNotificationShare__)(struct TimerApi *, struct NotifShare *); + NvU32 (*__tmrapiGetRefCount__)(struct TimerApi *); + void (*__tmrapiAddAdditionalDependants__)(struct RsClient *, struct TimerApi *, RsResourceRef *); + NV_STATUS (*__tmrapiControl_Prologue__)(struct TimerApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__tmrapiInternalControlForward__)(struct TimerApi *, NvU32, void *, NvU32); + NV_STATUS (*__tmrapiUnmapFrom__)(struct TimerApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__tmrapiControl_Epilogue__)(struct TimerApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__tmrapiControlLookup__)(struct TimerApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__tmrapiGetInternalObjectHandle__)(struct TimerApi *); + NV_STATUS (*__tmrapiControl__)(struct TimerApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__tmrapiUnmap__)(struct TimerApi *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__tmrapiGetMemInterMapParams__)(struct TimerApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__tmrapiGetMemoryMappingDescriptor__)(struct TimerApi *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__tmrapiControlFilter__)(struct TimerApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__tmrapiUnregisterEvent__)(struct TimerApi *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__tmrapiCanCopy__)(struct TimerApi *); + void (*__tmrapiPreDestruct__)(struct TimerApi *); + PEVENTNOTIFICATION *(*__tmrapiGetNotificationListPtr__)(struct TimerApi *); + struct NotifShare *(*__tmrapiGetNotificationShare__)(struct TimerApi *); + NV_STATUS (*__tmrapiMap__)(struct TimerApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__tmrapiAccessCallback__)(struct TimerApi *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_TimerApi_TYPEDEF__ +#define __NVOC_CLASS_TimerApi_TYPEDEF__ +typedef struct TimerApi TimerApi; +#endif /* __NVOC_CLASS_TimerApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_TimerApi +#define __nvoc_class_id_TimerApi 0xb13ac4 +#endif /* __nvoc_class_id_TimerApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_TimerApi; + +#define __staticCast_TimerApi(pThis) \ + ((pThis)->__nvoc_pbase_TimerApi) + +#ifdef __nvoc_tmr_h_disabled +#define __dynamicCast_TimerApi(pThis) ((TimerApi*)NULL) +#else //__nvoc_tmr_h_disabled +#define __dynamicCast_TimerApi(pThis) \ + ((TimerApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(TimerApi))) +#endif //__nvoc_tmr_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_TimerApi(TimerApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_TimerApi(TimerApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_TimerApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_TimerApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define tmrapiGetRegBaseOffsetAndSize(pTimerApi, pGpu, pOffset, pSize) tmrapiGetRegBaseOffsetAndSize_DISPATCH(pTimerApi, pGpu, pOffset, pSize) +#define tmrapiCtrlCmdTmrSetAlarmNotify(pTimerApi, pParams) tmrapiCtrlCmdTmrSetAlarmNotify_DISPATCH(pTimerApi, pParams) +#define tmrapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) tmrapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define tmrapiMapTo(pResource, pParams) tmrapiMapTo_DISPATCH(pResource, pParams) +#define tmrapiGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) tmrapiGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define tmrapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) tmrapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define tmrapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) tmrapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define tmrapiSetNotificationShare(pNotifier, pNotifShare) tmrapiSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define tmrapiGetRefCount(pResource) tmrapiGetRefCount_DISPATCH(pResource) +#define tmrapiAddAdditionalDependants(pClient, pResource, pReference) tmrapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define tmrapiControl_Prologue(pResource, pCallContext, pParams) tmrapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define tmrapiInternalControlForward(pGpuResource, command, pParams, size) tmrapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define tmrapiUnmapFrom(pResource, pParams) tmrapiUnmapFrom_DISPATCH(pResource, pParams) +#define tmrapiControl_Epilogue(pResource, pCallContext, pParams) tmrapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define tmrapiControlLookup(pResource, pParams, ppEntry) tmrapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define tmrapiGetInternalObjectHandle(pGpuResource) tmrapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define tmrapiControl(pGpuResource, pCallContext, pParams) tmrapiControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define tmrapiUnmap(pGpuResource, pCallContext, pCpuMapping) tmrapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define tmrapiGetMemInterMapParams(pRmResource, pParams) tmrapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define tmrapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) tmrapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define tmrapiControlFilter(pResource, pCallContext, pParams) tmrapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define tmrapiUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) tmrapiUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define tmrapiCanCopy(pResource) tmrapiCanCopy_DISPATCH(pResource) +#define tmrapiPreDestruct(pResource) tmrapiPreDestruct_DISPATCH(pResource) +#define tmrapiGetNotificationListPtr(pNotifier) tmrapiGetNotificationListPtr_DISPATCH(pNotifier) +#define tmrapiGetNotificationShare(pNotifier) tmrapiGetNotificationShare_DISPATCH(pNotifier) +#define tmrapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) tmrapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define tmrapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) tmrapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS tmrapiGetRegBaseOffsetAndSize_IMPL(struct TimerApi *pTimerApi, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +static inline NV_STATUS tmrapiGetRegBaseOffsetAndSize_DISPATCH(struct TimerApi *pTimerApi, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pTimerApi->__tmrapiGetRegBaseOffsetAndSize__(pTimerApi, pGpu, pOffset, pSize); +} + +NV_STATUS tmrapiCtrlCmdTmrSetAlarmNotify_IMPL(struct TimerApi *pTimerApi, NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS *pParams); + +static inline NV_STATUS tmrapiCtrlCmdTmrSetAlarmNotify_DISPATCH(struct TimerApi *pTimerApi, NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS *pParams) { + return pTimerApi->__tmrapiCtrlCmdTmrSetAlarmNotify__(pTimerApi, pParams); +} + +static inline NvBool tmrapiShareCallback_DISPATCH(struct TimerApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__tmrapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS tmrapiMapTo_DISPATCH(struct TimerApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__tmrapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS tmrapiGetOrAllocNotifShare_DISPATCH(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__tmrapiGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS tmrapiCheckMemInterUnmap_DISPATCH(struct TimerApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__tmrapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS tmrapiGetMapAddrSpace_DISPATCH(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__tmrapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void tmrapiSetNotificationShare_DISPATCH(struct TimerApi *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__tmrapiSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 tmrapiGetRefCount_DISPATCH(struct TimerApi *pResource) { + return pResource->__tmrapiGetRefCount__(pResource); +} + +static inline void tmrapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct TimerApi *pResource, RsResourceRef *pReference) { + pResource->__tmrapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS tmrapiControl_Prologue_DISPATCH(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__tmrapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS tmrapiInternalControlForward_DISPATCH(struct TimerApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__tmrapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS tmrapiUnmapFrom_DISPATCH(struct TimerApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__tmrapiUnmapFrom__(pResource, pParams); +} + +static inline void tmrapiControl_Epilogue_DISPATCH(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__tmrapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS tmrapiControlLookup_DISPATCH(struct TimerApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__tmrapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle tmrapiGetInternalObjectHandle_DISPATCH(struct TimerApi *pGpuResource) { + return pGpuResource->__tmrapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS tmrapiControl_DISPATCH(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__tmrapiControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS tmrapiUnmap_DISPATCH(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__tmrapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS tmrapiGetMemInterMapParams_DISPATCH(struct TimerApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__tmrapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS tmrapiGetMemoryMappingDescriptor_DISPATCH(struct TimerApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__tmrapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS tmrapiControlFilter_DISPATCH(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__tmrapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS tmrapiUnregisterEvent_DISPATCH(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__tmrapiUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool tmrapiCanCopy_DISPATCH(struct TimerApi *pResource) { + return pResource->__tmrapiCanCopy__(pResource); +} + +static inline void tmrapiPreDestruct_DISPATCH(struct TimerApi *pResource) { + pResource->__tmrapiPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *tmrapiGetNotificationListPtr_DISPATCH(struct TimerApi *pNotifier) { + return pNotifier->__tmrapiGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *tmrapiGetNotificationShare_DISPATCH(struct TimerApi *pNotifier) { + return pNotifier->__tmrapiGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS tmrapiMap_DISPATCH(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__tmrapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool tmrapiAccessCallback_DISPATCH(struct TimerApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__tmrapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS tmrapiConstruct_IMPL(struct TimerApi *arg_pTimerApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_tmrapiConstruct(arg_pTimerApi, arg_pCallContext, arg_pParams) tmrapiConstruct_IMPL(arg_pTimerApi, arg_pCallContext, arg_pParams) +void tmrapiDestruct_IMPL(struct TimerApi *pTimerApi); +#define __nvoc_tmrapiDestruct(pTimerApi) tmrapiDestruct_IMPL(pTimerApi) +void tmrapiDeregisterEvents_IMPL(struct TimerApi *pTimerApi); +#ifdef __nvoc_tmr_h_disabled +static inline void tmrapiDeregisterEvents(struct TimerApi *pTimerApi) { + NV_ASSERT_FAILED_PRECOMP("TimerApi was disabled!"); +} +#else //__nvoc_tmr_h_disabled +#define tmrapiDeregisterEvents(pTimerApi) tmrapiDeregisterEvents_IMPL(pTimerApi) +#endif //__nvoc_tmr_h_disabled + +#undef PRIVATE_FIELD + + + +//--------------------------------------------------------------------------- +// +// Function prototypes. +// +//--------------------------------------------------------------------------- + + +#endif // _TMR_H_ + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_TMR_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.c new file mode 100644 index 0000000..5d4d94a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.c @@ -0,0 +1,88 @@ +#define NVOC_TRACEABLE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_traceable_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x6305d2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE; + +void __nvoc_init_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_init_funcTable_OBJTRACEABLE(OBJTRACEABLE*); +NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_init_dataField_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJTRACEABLE; + +static const struct NVOC_RTTI __nvoc_rtti_OBJTRACEABLE_OBJTRACEABLE = { + /*pClassDef=*/ &__nvoc_class_def_OBJTRACEABLE, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJTRACEABLE, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJTRACEABLE = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_OBJTRACEABLE_OBJTRACEABLE, + }, +}; + +// Not instantiable because it's not derived from class "Object" +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJTRACEABLE), + /*classId=*/ classId(OBJTRACEABLE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJTRACEABLE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_OBJTRACEABLE, + /*pExportInfo=*/ &__nvoc_export_info_OBJTRACEABLE +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJTRACEABLE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJTRACEABLE(OBJTRACEABLE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_OBJTRACEABLE(pThis); + goto __nvoc_ctor_OBJTRACEABLE_exit; // Success + +__nvoc_ctor_OBJTRACEABLE_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJTRACEABLE_1(OBJTRACEABLE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJTRACEABLE(OBJTRACEABLE *pThis) { + __nvoc_init_funcTable_OBJTRACEABLE_1(pThis); +} + +void __nvoc_init_OBJTRACEABLE(OBJTRACEABLE *pThis) { + pThis->__nvoc_pbase_OBJTRACEABLE = pThis; + __nvoc_init_funcTable_OBJTRACEABLE(pThis); +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.h new file mode 100644 index 0000000..0e59e39 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.h @@ -0,0 +1,87 @@ +#ifndef _G_TRACEABLE_NVOC_H_ +#define _G_TRACEABLE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_traceable_nvoc.h" + +#ifndef __ANCI_TRACEABLE_H__ +#define __ANCI_TRACEABLE_H__ + +#include "core/core.h" + +#ifdef NVOC_TRACEABLE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJTRACEABLE { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJTRACEABLE *__nvoc_pbase_OBJTRACEABLE; +}; + +#ifndef __NVOC_CLASS_OBJTRACEABLE_TYPEDEF__ +#define __NVOC_CLASS_OBJTRACEABLE_TYPEDEF__ +typedef struct OBJTRACEABLE OBJTRACEABLE; +#endif /* __NVOC_CLASS_OBJTRACEABLE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJTRACEABLE +#define __nvoc_class_id_OBJTRACEABLE 0x6305d2 +#endif /* __nvoc_class_id_OBJTRACEABLE */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE; + +#define __staticCast_OBJTRACEABLE(pThis) \ + ((pThis)->__nvoc_pbase_OBJTRACEABLE) + +#ifdef __nvoc_traceable_h_disabled +#define __dynamicCast_OBJTRACEABLE(pThis) ((OBJTRACEABLE*)NULL) +#else //__nvoc_traceable_h_disabled +#define __dynamicCast_OBJTRACEABLE(pThis) \ + ((OBJTRACEABLE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJTRACEABLE))) +#endif //__nvoc_traceable_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJTRACEABLE(OBJTRACEABLE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJTRACEABLE(OBJTRACEABLE**, Dynamic*, NvU32); +#define __objCreate_OBJTRACEABLE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJTRACEABLE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#undef PRIVATE_FIELD + + +void objTraverseCaptureState_IMPL(struct Object *pObj); +#define objTraverseCaptureState(p) objTraverseCaptureState_IMPL(staticCast((p), Object)) + +#endif // __ANCI_TRACEABLE_H__ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_TRACEABLE_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.c new file mode 100644 index 0000000..f6a3038 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.c @@ -0,0 +1,131 @@ +#define NVOC_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_vaspace_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x6c347f = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJVASPACE(OBJVASPACE*); +void __nvoc_init_funcTable_OBJVASPACE(OBJVASPACE*); +NV_STATUS __nvoc_ctor_OBJVASPACE(OBJVASPACE*); +void __nvoc_init_dataField_OBJVASPACE(OBJVASPACE*); +void __nvoc_dtor_OBJVASPACE(OBJVASPACE*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJVASPACE; + +static const struct NVOC_RTTI __nvoc_rtti_OBJVASPACE_OBJVASPACE = { + /*pClassDef=*/ &__nvoc_class_def_OBJVASPACE, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJVASPACE, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJVASPACE_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJVASPACE, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJVASPACE = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJVASPACE_OBJVASPACE, + &__nvoc_rtti_OBJVASPACE_Object, + }, +}; + +// Not instantiable because it's an abstract class with following pure virtual functions: +// vaspaceConstruct_ +// vaspaceAlloc +// vaspaceFree +// vaspaceApplyDefaultAlignment +// vaspaceGetVasInfo +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJVASPACE), + /*classId=*/ classId(OBJVASPACE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJVASPACE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_OBJVASPACE, + /*pExportInfo=*/ &__nvoc_export_info_OBJVASPACE +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJVASPACE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJVASPACE(OBJVASPACE *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJVASPACE(OBJVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJVASPACE(OBJVASPACE *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJVASPACE_fail_Object; + __nvoc_init_dataField_OBJVASPACE(pThis); + goto __nvoc_ctor_OBJVASPACE_exit; // Success + +__nvoc_ctor_OBJVASPACE_fail_Object: +__nvoc_ctor_OBJVASPACE_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJVASPACE_1(OBJVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__vaspaceConstruct___ = NULL; + + pThis->__vaspaceAlloc__ = NULL; + + pThis->__vaspaceFree__ = NULL; + + pThis->__vaspaceApplyDefaultAlignment__ = NULL; + + pThis->__vaspaceIncAllocRefCnt__ = &vaspaceIncAllocRefCnt_b7902c; + + pThis->__vaspaceGetVaStart__ = &vaspaceGetVaStart_IMPL; + + pThis->__vaspaceGetVaLimit__ = &vaspaceGetVaLimit_IMPL; + + pThis->__vaspaceGetVasInfo__ = NULL; + + pThis->__vaspaceGetFlags__ = &vaspaceGetFlags_edd98b; + + pThis->__vaspaceIsInternalVaRestricted__ = &vaspaceIsInternalVaRestricted_IMPL; +} + +void __nvoc_init_funcTable_OBJVASPACE(OBJVASPACE *pThis) { + __nvoc_init_funcTable_OBJVASPACE_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJVASPACE(OBJVASPACE *pThis) { + pThis->__nvoc_pbase_OBJVASPACE = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJVASPACE(pThis); +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.h new file mode 100644 index 0000000..e54e246 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.h @@ -0,0 +1,389 @@ +#ifndef _G_VASPACE_NVOC_H_ +#define _G_VASPACE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_vaspace_nvoc.h" + +#ifndef _VASPACE_H_ +#define _VASPACE_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: VASPACE.H * +* Defines and structures used for Virtual Address Space Object. * +\***************************************************************************/ + +#include "ctrl/ctrl0080/ctrl0080dma.h" + +#include "core/core.h" +#include "resserv/rs_client.h" +#include "containers/eheap_old.h" +#include "gpu/mem_mgr/heap_base.h" +#include "gpu/mem_mgr/mem_desc.h" + + +typedef struct OBJVASPACE *POBJVASPACE; +typedef struct VASPACE VASPACE, *PVASPACE; +struct VirtMemAllocator; + +#ifndef __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +#define __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +typedef struct VirtMemAllocator VirtMemAllocator; +#endif /* __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VirtMemAllocator +#define __nvoc_class_id_VirtMemAllocator 0x899e48 +#endif /* __nvoc_class_id_VirtMemAllocator */ + + +typedef struct MMU_MAP_TARGET MMU_MAP_TARGET; +typedef struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS; + +typedef struct +{ + NvBool bReverse : 1; + NvBool bPreferSysmemPageTables : 1; + NvBool bExternallyManaged : 1; + NvBool bLazy : 1; + NvBool bSparse : 1; + NvBool bPrivileged : 1; + NvBool bClientAllocation : 1; + NvBool bFixedAddressRange : 1; + NvBool bFixedAddressAllocate : 1; + NvBool bForceContig : 1; + NvBool bForceNonContig : 1; + + // + // Using this flag may have security implications. So. use it only when + // you are sure about its usage. + // + NvBool bSkipTlbInvalidateOnFree : 1; +} VAS_ALLOC_FLAGS; + +#define VAS_EHEAP_OWNER_NVRM NvU32_BUILD('n','v','r','m') +#define VAS_EHEAP_OWNER_RSVD NvU32_BUILD('r','s','v','d') + +typedef struct +{ + NvBool bRemap : 1; // +// This flag will create a privileged PDB as part of this vaspace +// This new PDB will mirror all of the allocations made in the +// original PDB. The first PDE is considered privileged for this +// address space. +// SHARED_MANAGEMENT Enables mode where only a portion of the VAS is managed +// and the page directory may be allocated/set externally. +// ALLOW_ZERO_ADDRESS Explicitly allows the base VAS address to start at 0. +// Normally 0 is reserved to distinguish NULL pointers. +// +// BIG_PAGE_SIZE Field that specifies the big page size to be used. +// DEFAULT is used till GM10X. GM20X and later, uses +// custom value for big page size. +// SIZE_DEFAULT Lets RM pick the default value +// SIZE_64K Uses 64K as big page size for this VA space +// SIZE_128K Uses 128K as big page size for this VA space +// +// MMU_FMT_VA_BITS Selects the MMU format of the VA space by the number +// of VA bits supported. +// DEFAULT RM picks the default for the underlying MMU HW. +// 40 Fermi+ 40-bit (2-level) format. +// 49 Pascal+ 49-bit (5-level) format. +// +// ENABLE_VMM +// Temp flag to enable new VMM code path on select +// VA spaces (e.g. client but not BAR1/PMU VAS). +// +// ZERO_OLD_STRUCT Deprecated. +// +// ENABLE_FAULTING This address space is participating in UVM. +// RM will enable page faulting for all channels that will be +// associated with this address space. +// +// IS_UVM_MANAGED This flag will replace the SET_MIRRORED flag. It is used to +// denote that this VASpace is participating in UVM. +// +// ENABLE_ATS This address space has ATS enabled. +// +// +// ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR This flag when set will allow page table allocations +// to be routed to suballocator of the current process +// requesting mapping. If no suballocator, allocations +// will fallback to global heap. +// +// VASPACE_FLAGS_INVALIDATE_SCOPE_NVLINK_TLB This flag must be used by the VASs which use +// the NVLink MMU. +// +#define VASPACE_FLAGS_NONE 0 +#define VASPACE_FLAGS_BAR NVBIT(0) +#define VASPACE_FLAGS_SCRATCH_INVAL NVBIT(1) +#define VASPACE_FLAGS_ENABLE_ATS NVBIT(2) +#define VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS NVBIT(3) +#define VASPACE_FLAGS_MINIMIZE_PTETABLE_SIZE NVBIT(4) +#define VASPACE_FLAGS_RETRY_PTE_ALLOC_IN_SYS NVBIT(5) +// unused NVBIT(6) +#define VASPACE_FLAGS_BAR_BAR1 NVBIT(7) +#define VASPACE_FLAGS_BAR_BAR2 NVBIT(8) +#define VASPACE_FLAGS_BAR_IFB NVBIT(9) +#define VASPACE_FLAGS_PERFMON NVBIT(10) +#define VASPACE_FLAGS_PMU NVBIT(11) +#define VASPACE_FLAGS_DEFAULT_SIZE NVBIT(12) +#define VASPACE_FLAGS_DEFAULT_PARAMS NVBIT(13) +#define VASPACE_FLAGS_PTETABLE_PMA_MANAGED NVBIT(14) +#define VASPACE_FLAGS_INVALIDATE_SCOPE_NVLINK_TLB NVBIT(15) +#define VASPACE_FLAGS_DISABLE_SPLIT_VAS NVBIT(16) +#define VASPACE_FLAGS_SET_MIRRORED NVBIT(17) +#define VASPACE_FLAGS_SHARED_MANAGEMENT NVBIT(18) +#define VASPACE_FLAGS_ALLOW_ZERO_ADDRESS NVBIT(19) +#define VASPACE_FLAGS_SKIP_SCRUB_MEMPOOL NVBIT(20) +#define NV_VASPACE_FLAGS_BIG_PAGE_SIZE 22:21 +#define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_DEFAULT 0x00000000 +#define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_64K 0x00000001 +#define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_128K 0x00000002 +#define VASPACE_FLAGS_HDA NVBIT(23) +#define VASPACE_FLAGS_FLA NVBIT(24) // Soon to be deprecated and removed. + // Used by legacy FLA implementation. +#define VASPACE_FLAGS_HWPM NVBIT(25) +#define VASPACE_FLAGS_ENABLE_VMM NVBIT(26) +#define VASPACE_FLAGS_OPTIMIZE_PTETABLE_MEMPOOL_USAGE NVBIT(27) +#define VASPACE_FLAGS_REVERSE NVBIT(28) +#define VASPACE_FLAGS_ENABLE_FAULTING NVBIT(29) +#define VASPACE_FLAGS_IS_EXTERNALLY_OWNED NVBIT(30) +#define VASPACE_FLAGS_ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR NVBIT(31) + +/*! + * Flags for page table memory pools. + * + * VASPACE_RESERVE_FLAGS_ALLOC_UPTO_TARGET_LEVEL_ONLY + * Only allocate levels from the top to the specified level only. + * Anything below the specified level is not allocated. + */ +#define VASPACE_RESERVE_FLAGS_NONE (0) +#define VASPACE_RESERVE_FLAGS_ALLOC_UPTO_TARGET_LEVEL_ONLY NVBIT32(0) + +/*! + * Level of RM-management for a given VA range. + * + * FULL + * RM manages everything (e.g. PDEs, PTEs). + * PDES_ONLY + * RM only manages PDEs (through non-buffer version of UpdatePde2). + * Buffer versions of FillPteMem and UpdatePde2 are still allowed. + * NONE + * RM does not manage anything. + * Buffer versions of FillPteMem and UpdatePde2 are still allowed. + */ +typedef enum +{ + VA_MANAGEMENT_FULL = 0, + VA_MANAGEMENT_PDES_ONLY, + VA_MANAGEMENT_NONE, +} VA_MANAGEMENT; + +/*! + * Abstract base class of an RM-managed virtual address space. + */ +#ifdef NVOC_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJVASPACE { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJVASPACE *__nvoc_pbase_OBJVASPACE; + NV_STATUS (*__vaspaceConstruct___)(struct OBJVASPACE *, NvU32, NvU32, NvU64, NvU64, NvU64, NvU64, NvU32); + NV_STATUS (*__vaspaceAlloc__)(struct OBJVASPACE *, NvU64, NvU64, NvU64, NvU64, NvU64, VAS_ALLOC_FLAGS, NvU64 *); + NV_STATUS (*__vaspaceFree__)(struct OBJVASPACE *, NvU64); + NV_STATUS (*__vaspaceApplyDefaultAlignment__)(struct OBJVASPACE *, const FB_ALLOC_INFO *, NvU64 *, NvU64 *, NvU64 *); + NV_STATUS (*__vaspaceIncAllocRefCnt__)(struct OBJVASPACE *, NvU64); + NvU64 (*__vaspaceGetVaStart__)(struct OBJVASPACE *); + NvU64 (*__vaspaceGetVaLimit__)(struct OBJVASPACE *); + NV_STATUS (*__vaspaceGetVasInfo__)(struct OBJVASPACE *, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *); + NvU32 (*__vaspaceGetFlags__)(struct OBJVASPACE *); + NvBool (*__vaspaceIsInternalVaRestricted__)(struct OBJVASPACE *); + NvU32 gpuMask; + ADDRESS_TRANSLATION addressTranslation; + NvU32 refCnt; + NvU32 vaspaceId; + NvU64 vasStart; + NvU64 vasLimit; +}; + +#ifndef __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +typedef struct OBJVASPACE OBJVASPACE; +#endif /* __NVOC_CLASS_OBJVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVASPACE +#define __nvoc_class_id_OBJVASPACE 0x6c347f +#endif /* __nvoc_class_id_OBJVASPACE */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE; + +#define __staticCast_OBJVASPACE(pThis) \ + ((pThis)->__nvoc_pbase_OBJVASPACE) + +#ifdef __nvoc_vaspace_h_disabled +#define __dynamicCast_OBJVASPACE(pThis) ((OBJVASPACE*)NULL) +#else //__nvoc_vaspace_h_disabled +#define __dynamicCast_OBJVASPACE(pThis) \ + ((OBJVASPACE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJVASPACE))) +#endif //__nvoc_vaspace_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJVASPACE(OBJVASPACE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJVASPACE(OBJVASPACE**, Dynamic*, NvU32); +#define __objCreate_OBJVASPACE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJVASPACE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define vaspaceConstruct_(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) vaspaceConstruct__DISPATCH(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) +#define vaspaceAlloc(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) vaspaceAlloc_DISPATCH(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) +#define vaspaceFree(pVAS, vAddr) vaspaceFree_DISPATCH(pVAS, vAddr) +#define vaspaceApplyDefaultAlignment(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) vaspaceApplyDefaultAlignment_DISPATCH(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) +#define vaspaceIncAllocRefCnt(pVAS, vAddr) vaspaceIncAllocRefCnt_DISPATCH(pVAS, vAddr) +#define vaspaceGetVaStart(pVAS) vaspaceGetVaStart_DISPATCH(pVAS) +#define vaspaceGetVaLimit(pVAS) vaspaceGetVaLimit_DISPATCH(pVAS) +#define vaspaceGetVasInfo(pVAS, pParams) vaspaceGetVasInfo_DISPATCH(pVAS, pParams) +#define vaspaceGetFlags(pVAS) vaspaceGetFlags_DISPATCH(pVAS) +#define vaspaceIsInternalVaRestricted(pVAS) vaspaceIsInternalVaRestricted_DISPATCH(pVAS) +static inline NV_STATUS vaspaceConstruct__DISPATCH(struct OBJVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) { + return pVAS->__vaspaceConstruct___(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); +} + +static inline NV_STATUS vaspaceAlloc_DISPATCH(struct OBJVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) { + return pVAS->__vaspaceAlloc__(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr); +} + +static inline NV_STATUS vaspaceFree_DISPATCH(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__vaspaceFree__(pVAS, vAddr); +} + +static inline NV_STATUS vaspaceApplyDefaultAlignment_DISPATCH(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) { + return pVAS->__vaspaceApplyDefaultAlignment__(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask); +} + +static inline NV_STATUS vaspaceIncAllocRefCnt_b7902c(struct OBJVASPACE *pVAS, NvU64 vAddr) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS vaspaceIncAllocRefCnt_DISPATCH(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__vaspaceIncAllocRefCnt__(pVAS, vAddr); +} + +NvU64 vaspaceGetVaStart_IMPL(struct OBJVASPACE *pVAS); + +static inline NvU64 vaspaceGetVaStart_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceGetVaStart__(pVAS); +} + +NvU64 vaspaceGetVaLimit_IMPL(struct OBJVASPACE *pVAS); + +static inline NvU64 vaspaceGetVaLimit_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceGetVaLimit__(pVAS); +} + +static inline NV_STATUS vaspaceGetVasInfo_DISPATCH(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return pVAS->__vaspaceGetVasInfo__(pVAS, pParams); +} + +static inline NvU32 vaspaceGetFlags_edd98b(struct OBJVASPACE *pVAS) { + return 0U; +} + +static inline NvU32 vaspaceGetFlags_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceGetFlags__(pVAS); +} + +NvBool vaspaceIsInternalVaRestricted_IMPL(struct OBJVASPACE *pVAS); + +static inline NvBool vaspaceIsInternalVaRestricted_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceIsInternalVaRestricted__(pVAS); +} + +void vaspaceIncRefCnt_IMPL(struct OBJVASPACE *pVAS); +#ifdef __nvoc_vaspace_h_disabled +static inline void vaspaceIncRefCnt(struct OBJVASPACE *pVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVASPACE was disabled!"); +} +#else //__nvoc_vaspace_h_disabled +#define vaspaceIncRefCnt(pVAS) vaspaceIncRefCnt_IMPL(pVAS) +#endif //__nvoc_vaspace_h_disabled + +void vaspaceDecRefCnt_IMPL(struct OBJVASPACE *pVAS); +#ifdef __nvoc_vaspace_h_disabled +static inline void vaspaceDecRefCnt(struct OBJVASPACE *pVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVASPACE was disabled!"); +} +#else //__nvoc_vaspace_h_disabled +#define vaspaceDecRefCnt(pVAS) vaspaceDecRefCnt_IMPL(pVAS) +#endif //__nvoc_vaspace_h_disabled + +NV_STATUS vaspaceGetByHandleOrDeviceDefault_IMPL(struct RsClient *pClient, NvHandle hDeviceOrSubDevice, NvHandle hVASpace, struct OBJVASPACE **ppVAS); +#define vaspaceGetByHandleOrDeviceDefault(pClient, hDeviceOrSubDevice, hVASpace, ppVAS) vaspaceGetByHandleOrDeviceDefault_IMPL(pClient, hDeviceOrSubDevice, hVASpace, ppVAS) +#undef PRIVATE_FIELD + + +// Ideally all non-static base class method declaration should be in the _private.h file +NvU64 vaspaceGetVaStart_IMPL(struct OBJVASPACE *pVAS); + +// For getting the address translation after the MMU (i.e.: after VA->PA translation) +#define VAS_ADDRESS_TRANSLATION(pVASpace) ((pVASpace)->addressTranslation) + +#endif // _VASPACE_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_VASPACE_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.c new file mode 100644 index 0000000..4cb2434 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.c @@ -0,0 +1,148 @@ +#define NVOC_VIRT_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_virt_mem_mgr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xa030ab = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVMM; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJVMM(OBJVMM*); +void __nvoc_init_funcTable_OBJVMM(OBJVMM*); +NV_STATUS __nvoc_ctor_OBJVMM(OBJVMM*); +void __nvoc_init_dataField_OBJVMM(OBJVMM*); +void __nvoc_dtor_OBJVMM(OBJVMM*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJVMM; + +static const struct NVOC_RTTI __nvoc_rtti_OBJVMM_OBJVMM = { + /*pClassDef=*/ &__nvoc_class_def_OBJVMM, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJVMM, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJVMM_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJVMM, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJVMM = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJVMM_OBJVMM, + &__nvoc_rtti_OBJVMM_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVMM = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJVMM), + /*classId=*/ classId(OBJVMM), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJVMM", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJVMM, + /*pCastInfo=*/ &__nvoc_castinfo_OBJVMM, + /*pExportInfo=*/ &__nvoc_export_info_OBJVMM +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJVMM = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJVMM(OBJVMM *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJVMM(OBJVMM *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJVMM(OBJVMM *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJVMM_fail_Object; + __nvoc_init_dataField_OBJVMM(pThis); + goto __nvoc_ctor_OBJVMM_exit; // Success + +__nvoc_ctor_OBJVMM_fail_Object: +__nvoc_ctor_OBJVMM_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJVMM_1(OBJVMM *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJVMM(OBJVMM *pThis) { + __nvoc_init_funcTable_OBJVMM_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJVMM(OBJVMM *pThis) { + pThis->__nvoc_pbase_OBJVMM = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJVMM(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJVMM(OBJVMM **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJVMM *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJVMM)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJVMM)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJVMM); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJVMM(pThis); + status = __nvoc_ctor_OBJVMM(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJVMM_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJVMM_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJVMM(OBJVMM **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJVMM(ppThis, pParent, createFlags); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.h new file mode 100644 index 0000000..2913032 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.h @@ -0,0 +1,133 @@ +#ifndef _G_VIRT_MEM_MGR_NVOC_H_ +#define _G_VIRT_MEM_MGR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_virt_mem_mgr_nvoc.h" + +#ifndef VIRT_MEM_MGR_H +#define VIRT_MEM_MGR_H + +/**************** Resource Manager Defines and Structures ******************\ +* Defines and structures used for Virtual Memory Management Object. * +\***************************************************************************/ + +#include "mem_mgr/vaspace.h" + +typedef struct OBJVMM *POBJVMM; + +#ifndef __NVOC_CLASS_OBJVMM_TYPEDEF__ +#define __NVOC_CLASS_OBJVMM_TYPEDEF__ +typedef struct OBJVMM OBJVMM; +#endif /* __NVOC_CLASS_OBJVMM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVMM +#define __nvoc_class_id_OBJVMM 0xa030ab +#endif /* __nvoc_class_id_OBJVMM */ + + + +#ifdef NVOC_VIRT_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJVMM { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJVMM *__nvoc_pbase_OBJVMM; +}; + +#ifndef __NVOC_CLASS_OBJVMM_TYPEDEF__ +#define __NVOC_CLASS_OBJVMM_TYPEDEF__ +typedef struct OBJVMM OBJVMM; +#endif /* __NVOC_CLASS_OBJVMM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVMM +#define __nvoc_class_id_OBJVMM 0xa030ab +#endif /* __nvoc_class_id_OBJVMM */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVMM; + +#define __staticCast_OBJVMM(pThis) \ + ((pThis)->__nvoc_pbase_OBJVMM) + +#ifdef __nvoc_virt_mem_mgr_h_disabled +#define __dynamicCast_OBJVMM(pThis) ((OBJVMM*)NULL) +#else //__nvoc_virt_mem_mgr_h_disabled +#define __dynamicCast_OBJVMM(pThis) \ + ((OBJVMM*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJVMM))) +#endif //__nvoc_virt_mem_mgr_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJVMM(OBJVMM**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJVMM(OBJVMM**, Dynamic*, NvU32); +#define __objCreate_OBJVMM(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJVMM((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS vmmCreateVaspace_IMPL(struct OBJVMM *pVmm, NvU32 _class, NvU32 vaspaceId, NvU32 gpuMask, NvU64 vaStart, NvU64 vaLimit, NvU64 vaInternalStart, NvU64 vaInternalEnd, struct OBJVASPACE *pPteSpaceMap, NvU32 flags, struct OBJVASPACE **ppVAS); +#ifdef __nvoc_virt_mem_mgr_h_disabled +static inline NV_STATUS vmmCreateVaspace(struct OBJVMM *pVmm, NvU32 _class, NvU32 vaspaceId, NvU32 gpuMask, NvU64 vaStart, NvU64 vaLimit, NvU64 vaInternalStart, NvU64 vaInternalEnd, struct OBJVASPACE *pPteSpaceMap, NvU32 flags, struct OBJVASPACE **ppVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVMM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_mgr_h_disabled +#define vmmCreateVaspace(pVmm, _class, vaspaceId, gpuMask, vaStart, vaLimit, vaInternalStart, vaInternalEnd, pPteSpaceMap, flags, ppVAS) vmmCreateVaspace_IMPL(pVmm, _class, vaspaceId, gpuMask, vaStart, vaLimit, vaInternalStart, vaInternalEnd, pPteSpaceMap, flags, ppVAS) +#endif //__nvoc_virt_mem_mgr_h_disabled + +void vmmDestroyVaspace_IMPL(struct OBJVMM *pVmm, struct OBJVASPACE *pVAS); +#ifdef __nvoc_virt_mem_mgr_h_disabled +static inline void vmmDestroyVaspace(struct OBJVMM *pVmm, struct OBJVASPACE *pVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVMM was disabled!"); +} +#else //__nvoc_virt_mem_mgr_h_disabled +#define vmmDestroyVaspace(pVmm, pVAS) vmmDestroyVaspace_IMPL(pVmm, pVAS) +#endif //__nvoc_virt_mem_mgr_h_disabled + +NV_STATUS vmmGetVaspaceFromId_IMPL(struct OBJVMM *pVmm, NvU32 vaspaceId, NvU32 classId, struct OBJVASPACE **ppVAS); +#ifdef __nvoc_virt_mem_mgr_h_disabled +static inline NV_STATUS vmmGetVaspaceFromId(struct OBJVMM *pVmm, NvU32 vaspaceId, NvU32 classId, struct OBJVASPACE **ppVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVMM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_mgr_h_disabled +#define vmmGetVaspaceFromId(pVmm, vaspaceId, classId, ppVAS) vmmGetVaspaceFromId_IMPL(pVmm, vaspaceId, classId, ppVAS) +#endif //__nvoc_virt_mem_mgr_h_disabled + +#undef PRIVATE_FIELD + + +#endif // VIRT_MEM_MGR_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_VIRT_MEM_MGR_NVOC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/rmconfig.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/rmconfig.h new file mode 100644 index 0000000..c38d2eb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/rmconfig.h @@ -0,0 +1,709 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// defines to indicate enabled/disabled for all chips, features, classes, engines, and apis. +// +// Profile: devel-soc-disp-dce-client +// Template: templates/gt_rmconfig.h +// +// Chips: T234D +// + +#ifndef _RMCFG_H_ +#define _RMCFG_H_ + + +// +// CHIP families - enabled or disabled +// +#define RMCFG_CHIP_GF10X 0 +#define RMCFG_CHIP_GF11X 0 +#define RMCFG_CHIP_GF10XF 0 +#define RMCFG_CHIP_GK10X 0 +#define RMCFG_CHIP_GK11X 0 +#define RMCFG_CHIP_GK20X 0 +#define RMCFG_CHIP_GM10X 0 +#define RMCFG_CHIP_GM20X 0 +#define RMCFG_CHIP_GP10X 0 +#define RMCFG_CHIP_GV10X 0 +#define RMCFG_CHIP_GV11X 0 +#define RMCFG_CHIP_TU10X 0 +#define RMCFG_CHIP_GA10X 0 +#define RMCFG_CHIP_GA10XF 0 +#define RMCFG_CHIP_T12X 0 +#define RMCFG_CHIP_T13X 0 +#define RMCFG_CHIP_T21X 0 +#define RMCFG_CHIP_T18X 0 +#define RMCFG_CHIP_T19X 0 +#define RMCFG_CHIP_T23XG 0 +#define RMCFG_CHIP_T23XD 1 +#define RMCFG_CHIP_SIMS 0 + + +// +// CHIPS - enabled or disabled +// +#define RMCFG_CHIP_GM107 0 +#define RMCFG_CHIP_GM108 0 + +#define RMCFG_CHIP_GM200 0 +#define RMCFG_CHIP_GM204 0 +#define RMCFG_CHIP_GM206 0 + +#define RMCFG_CHIP_GP100 0 +#define RMCFG_CHIP_GP102 0 +#define RMCFG_CHIP_GP104 0 +#define RMCFG_CHIP_GP106 0 +#define RMCFG_CHIP_GP107 0 +#define RMCFG_CHIP_GP108 0 + +#define RMCFG_CHIP_GV100 0 + +#define RMCFG_CHIP_GV11B 0 + +#define RMCFG_CHIP_TU102 0 +#define RMCFG_CHIP_TU104 0 +#define RMCFG_CHIP_TU106 0 +#define RMCFG_CHIP_TU116 0 +#define RMCFG_CHIP_TU117 0 + +#define RMCFG_CHIP_GA100 0 +#define RMCFG_CHIP_GA102 0 +#define RMCFG_CHIP_GA103 0 +#define RMCFG_CHIP_GA104 0 +#define RMCFG_CHIP_GA106 0 +#define RMCFG_CHIP_GA107 0 +#define RMCFG_CHIP_GA10B 0 + +#define RMCFG_CHIP_GA102F 0 + +#define RMCFG_CHIP_T194 0 + +#define RMCFG_CHIP_T234 0 + +#define RMCFG_CHIP_T234D 1 + +#define RMCFG_CHIP_AMODEL 0 + +// +// Obsolete CHIPS +// +#define RMCFG_CHIP_GF100 0 +#define RMCFG_CHIP_GF100B 0 +#define RMCFG_CHIP_GF104 0 +#define RMCFG_CHIP_GF104B 0 +#define RMCFG_CHIP_GF106 0 +#define RMCFG_CHIP_GF106B 0 +#define RMCFG_CHIP_GF108 0 +#define RMCFG_CHIP_GF110D 0 +#define RMCFG_CHIP_GF110 0 +#define RMCFG_CHIP_GF117 0 +#define RMCFG_CHIP_GF118 0 +#define RMCFG_CHIP_GF119 0 +#define RMCFG_CHIP_GF110F 0 +#define RMCFG_CHIP_GF110F2 0 +#define RMCFG_CHIP_GF110F3 0 +#define RMCFG_CHIP_GK104 0 +#define RMCFG_CHIP_GK106 0 +#define RMCFG_CHIP_GK107 0 +#define RMCFG_CHIP_GK20A 0 +#define RMCFG_CHIP_GK110 0 +#define RMCFG_CHIP_GK110B 0 +#define RMCFG_CHIP_GK110C 0 +#define RMCFG_CHIP_GK208 0 +#define RMCFG_CHIP_GK208S 0 +#define RMCFG_CHIP_T001_FERMI_NOT_EXIST 0 +#define RMCFG_CHIP_T124 0 +#define RMCFG_CHIP_T132 0 +#define RMCFG_CHIP_T210 0 +#define RMCFG_CHIP_T186 0 +#define RMCFG_CHIP_T002_TURING_NOT_EXIST 0 + + +// +// CHIP aliases +// +#define RMCFG_CHIP_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dFERMI 0 +#define RMCFG_CHIP_DFERMI 0 +#define RMCFG_CHIP_FERMI 0 +#define RMCFG_CHIP_FERMI_CLASSIC_GPUS 0 +#define RMCFG_CHIP_ALL 1 +#define RMCFG_CHIP_ALL_CLASSIC_GPUS 0 +#define RMCFG_CHIP_ALL_CHIPS 1 +#define RMCFG_CHIP_ALL_CHIPS_CLASSIC_GPUS 0 +#define RMCFG_CHIP_DISPLAYLESS 0 +#define RMCFG_CHIP_dKEPLER 0 +#define RMCFG_CHIP_DKEPLER 0 +#define RMCFG_CHIP_KEPLER 0 +#define RMCFG_CHIP_KEPLER_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dMAXWELL 0 +#define RMCFG_CHIP_DMAXWELL 0 +#define RMCFG_CHIP_MAXWELL 0 +#define RMCFG_CHIP_MAXWELL_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dPASCAL 0 +#define RMCFG_CHIP_DPASCAL 0 +#define RMCFG_CHIP_PASCAL 0 +#define RMCFG_CHIP_PASCAL_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dVOLTA 0 +#define RMCFG_CHIP_DVOLTA 0 +#define RMCFG_CHIP_VOLTA 0 +#define RMCFG_CHIP_VOLTA_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dTURING 0 +#define RMCFG_CHIP_DTURING 0 +#define RMCFG_CHIP_TURING 0 +#define RMCFG_CHIP_TURING_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dAMPERE 0 +#define RMCFG_CHIP_DAMPERE 0 +#define RMCFG_CHIP_AMPERE 0 +#define RMCFG_CHIP_AMPERE_CLASSIC_GPUS 0 +#define RMCFG_CHIP_TEGRA_DGPU_AMPERE 0 +#define RMCFG_CHIP_TEGRA_DGPU 0 +#define RMCFG_CHIP_DFPGA 0 +#define RMCFG_CHIP_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_FERMI_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_TEGRA 1 +#define RMCFG_CHIP_TEGRA_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_ALL_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_ALL_CHIPS_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tKEPLER 0 +#define RMCFG_CHIP_TKEPLER 0 +#define RMCFG_CHIP_KEPLER_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tMAXWELL 0 +#define RMCFG_CHIP_TMAXWELL 0 +#define RMCFG_CHIP_MAXWELL_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tPASCAL 0 +#define RMCFG_CHIP_TPASCAL 0 +#define RMCFG_CHIP_PASCAL_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tVOLTA 0 +#define RMCFG_CHIP_TVOLTA 0 +#define RMCFG_CHIP_VOLTA_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_TURING_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_T23X 1 +#define RMCFG_CHIP_T23X_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tAMPERE 0 +#define RMCFG_CHIP_TAMPERE 0 +#define RMCFG_CHIP_AMPERE_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_TEGRA_NVDISP_GPUS 1 +#define RMCFG_CHIP_T23X_TEGRA_NVDISP_GPUS 1 +#define RMCFG_CHIP_TEGRA_TEGRA_NVDISP_GPUS 1 +#define RMCFG_CHIP_ALL_TEGRA_NVDISP_GPUS 1 +#define RMCFG_CHIP_ALL_CHIPS_TEGRA_NVDISP_GPUS 1 +#define RMCFG_CHIP_SIMULATION_GPUS 0 +#define RMCFG_CHIP_ALL_SIMULATION_GPUS 0 +#define RMCFG_CHIP_ALL_CHIPS_SIMULATION_GPUS 0 + + +// +// Features - enabled or disabled +// +#define RMCFG_FEATURE_PLATFORM_UNKNOWN 0 // Running on an unknown platform +#define RMCFG_FEATURE_PLATFORM_WINDOWS 0 // Running on Windows +#define RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM 0 // Running on Windows LDDM +#define RMCFG_FEATURE_PLATFORM_WINDOWS_VISTA 0 // aka PLATFORM_WINDOWS_LDDM +#define RMCFG_FEATURE_PLATFORM_UNIX 1 // Running on Unix +#define RMCFG_FEATURE_PLATFORM_DCE 0 // Running on Display Control Engine (DCE, an ARM Cortex R5 on Tegra) +#define RMCFG_FEATURE_PLATFORM_SIM 0 // Running on Simulator +#define RMCFG_FEATURE_PLATFORM_MODS 0 // Running as part of MODS +#define RMCFG_FEATURE_PLATFORM_GSP 0 // Running as part of GSP Firmware +#define RMCFG_FEATURE_PLATFORM_MODS_WINDOWS 0 // Running as part of MODS on Windows +#define RMCFG_FEATURE_PLATFORM_MODS_UNIX 0 // Running as part of MODS on UNIX +#define RMCFG_FEATURE_ARCH_UNKNOWN 0 // unknown arch +#define RMCFG_FEATURE_ARCH_X86 0 // Intel x86, 32bit +#define RMCFG_FEATURE_ARCH_X64 0 // Intel 64bit +#define RMCFG_FEATURE_ARCH_RISCV64 0 // RISCV, 64bit +#define RMCFG_FEATURE_ARCH_AMD64 0 // AMD, 64bit +#define RMCFG_FEATURE_ARCH_PPC 0 // Power PC +#define RMCFG_FEATURE_ARCH_PPC64LE 0 // 64-bit PPC little-endian +#define RMCFG_FEATURE_ARCH_ARM 0 // ARM +#define RMCFG_FEATURE_ARCH_ARM_V7 0 // ARM v7 +#define RMCFG_FEATURE_ARCH_AARCH64 1 // AArch64 +#define RMCFG_FEATURE_RMCORE_BASE 1 // RMCORE Base +#define RMCFG_FEATURE_ORIN_PHYSICAL_RM 0 // Physical layer of RM, disabled only on Orin +#define RMCFG_FEATURE_KERNEL_RM 1 // Kernel layer of RM +#define RMCFG_FEATURE_NOTEBOOK 0 // Notebook support +#define RMCFG_FEATURE_MXM 0 // MXM Module Support (all versions) +#define RMCFG_FEATURE_DCB_0X 1 // Fallback DCB routines +#define RMCFG_FEATURE_DCB_4X 1 // DCB4x (used on G8x and later) +#define RMCFG_FEATURE_XAPI 0 // Use XAPI for resman api calls +#define RMCFG_FEATURE_HOTPLUG_POLLING 0 // HotPlug polling +#define RMCFG_FEATURE_RM_BASIC_LOCK_MODEL 1 // Support for Basic Lock Model in RM +#define RMCFG_FEATURE_VIRTUALIZATION 0 // Detection and Guest RM Implementation within a Virtualization environment +#define RMCFG_FEATURE_PRESILICON 0 // For builds that can run on simulated or emulated GPU +#define RMCFG_FEATURE_GSP_CLIENT_RM 0 // GSP client RM +#define RMCFG_FEATURE_DCE_CLIENT_RM 1 // DCE client RM +#define RMCFG_FEATURE_PROTOBUF 0 // Protobuf data encoding for OCA data dumps +#define RMCFG_FEATURE_RELEASE_BUILD 1 // Release Build +#define RMCFG_FEATURE_MODULE_BRANCH 1 // Used to differ Release/Module(develop) branches +#define RMCFG_FEATURE_VERIF_ONLY_CONTROLS 0 // Allow verify only control cmds to be used on verif builds(determined by this feature) +#define RMCFG_FEATURE_DEVINIT_SCRIPT 0 // VBIOS scripting engine for sharing register sequences +#define RMCFG_FEATURE_VBIOS_IMAGE 1 // GPU uses a VBIOS image for data +#define RMCFG_FEATURE_DSI_INFO 0 // DSI information structures support +#define RMCFG_FEATURE_CAMERA 1 // platform and chip specific feature +#define RMCFG_FEATURE_SPARSE_TEXTURE 0 // Enables optimization and defaults for sparse texture +#define RMCFG_FEATURE_NVSR_ON_NVDISPLAY 1 // NVSR on Nvdisplay +#define RMCFG_FEATURE_MANUAL_TRIGGER_BA_DMA_MODE 0 // Support for manually actuated BA DMA mode data collection. +#define RMCFG_FEATURE_RM_DRIVEN_BA_DMA_MODE 0 // Support for RM-driven BA DMA mode data collection. +#define RMCFG_FEATURE_TEGRA_SOC_NVDISPLAY 1 // Tegra SOC NvDisplay Driver +#define RMCFG_FEATURE_TEGRA_SOC_NVDISPLAY_MINIMAL 1 // Enable only those parts of display code which are needed for Tegra SOC NvDisplay Driver +#define RMCFG_FEATURE_HEAD_REGIONAL_CRC 1 // Display Head Regional CRC support +#define RMCFG_FEATURE_MULTICAST_FABRIC 1 // Support for MULTICAST_FABRIC + + + +// +// Classes - enabled or disabled +// +#define RMCFG_CLASS_NV01_ROOT 1 +#define RMCFG_CLASS_NV1_ROOT 1 // aka NV01_ROOT +#define RMCFG_CLASS_NV01_NULL_OBJECT 1 // aka NV01_ROOT +#define RMCFG_CLASS_NV1_NULL_OBJECT 1 // aka NV01_ROOT +#define RMCFG_CLASS_NV01_ROOT_NON_PRIV 1 +#define RMCFG_CLASS_NV1_ROOT_NON_PRIV 1 // aka NV01_ROOT_NON_PRIV +#define RMCFG_CLASS_NV01_ROOT_CLIENT 1 +#define RMCFG_CLASS_NV0020_GPU_MANAGEMENT 1 +#define RMCFG_CLASS_NV01_DEVICE_0 1 +#define RMCFG_CLASS_NV20_SUBDEVICE_0 1 +#define RMCFG_CLASS_NV2081_BINAPI 1 +#define RMCFG_CLASS_NV2082_BINAPI_PRIVILEGED 1 +#define RMCFG_CLASS_NV20_SUBDEVICE_DIAG 0 +#define RMCFG_CLASS_NV01_CONTEXT_DMA 1 +#define RMCFG_CLASS_NV01_MEMORY_SYSTEM 1 +#define RMCFG_CLASS_NV1_MEMORY_SYSTEM 1 // aka NV01_MEMORY_SYSTEM +#define RMCFG_CLASS_NV01_MEMORY_LOCAL_PRIVILEGED 0 +#define RMCFG_CLASS_NV1_MEMORY_LOCAL_PRIVILEGED 0 // aka NV01_MEMORY_LOCAL_PRIVILEGED +#define RMCFG_CLASS_NV01_MEMORY_PRIVILEGED 0 // aka NV01_MEMORY_LOCAL_PRIVILEGED +#define RMCFG_CLASS_NV1_MEMORY_PRIVILEGED 0 // aka NV01_MEMORY_LOCAL_PRIVILEGED +#define RMCFG_CLASS_NV01_MEMORY_LOCAL_USER 0 +#define RMCFG_CLASS_NV1_MEMORY_LOCAL_USER 0 // aka NV01_MEMORY_LOCAL_USER +#define RMCFG_CLASS_NV01_MEMORY_USER 0 // aka NV01_MEMORY_LOCAL_USER +#define RMCFG_CLASS_NV1_MEMORY_USER 0 // aka NV01_MEMORY_LOCAL_USER +#define RMCFG_CLASS_NV01_MEMORY_VIRTUAL 0 +#define RMCFG_CLASS_NV01_MEMORY_SYSTEM_DYNAMIC 0 // aka NV01_MEMORY_VIRTUAL +#define RMCFG_CLASS_NV1_MEMORY_SYSTEM_DYNAMIC 0 // aka NV01_MEMORY_VIRTUAL +#define RMCFG_CLASS_NV01_MEMORY_LOCAL_PHYSICAL 0 +#define RMCFG_CLASS_NV01_MEMORY_SYNCPOINT 1 +#define RMCFG_CLASS_NV01_MEMORY_SYSTEM_OS_DESCRIPTOR 1 +#define RMCFG_CLASS_NV01_MEMORY_LIST_SYSTEM 0 +#define RMCFG_CLASS_NV_IMEX_SESSION 0 +#define RMCFG_CLASS_NV_MEMORY_FABRIC_EXPORT_V2 0 +#define RMCFG_CLASS_NV_MEMORY_FABRIC_IMPORT_V2 0 +#define RMCFG_CLASS_NV_MEMORY_FABRIC_EXPORTED_REF 0 +#define RMCFG_CLASS_NV_MEMORY_FABRIC_IMPORTED_REF 0 +#define RMCFG_CLASS_IO_VASPACE_A 1 +#define RMCFG_CLASS_NV01_NULL 0 +#define RMCFG_CLASS_NV1_NULL 0 // aka NV01_NULL +#define RMCFG_CLASS_NV01_EVENT 1 +#define RMCFG_CLASS_NV1_EVENT 1 // aka NV01_EVENT +#define RMCFG_CLASS_NV01_EVENT_KERNEL_CALLBACK 1 +#define RMCFG_CLASS_NV1_EVENT_KERNEL_CALLBACK 1 // aka NV01_EVENT_KERNEL_CALLBACK +#define RMCFG_CLASS_NV01_EVENT_OS_EVENT 1 +#define RMCFG_CLASS_NV1_EVENT_OS_EVENT 1 // aka NV01_EVENT_OS_EVENT +#define RMCFG_CLASS_NV01_EVENT_WIN32_EVENT 1 // aka NV01_EVENT_OS_EVENT +#define RMCFG_CLASS_NV1_EVENT_WIN32_EVENT 1 // aka NV01_EVENT_OS_EVENT +#define RMCFG_CLASS_NV01_EVENT_KERNEL_CALLBACK_EX 1 +#define RMCFG_CLASS_NV1_EVENT_KERNEL_CALLBACK_EX 1 // aka NV01_EVENT_KERNEL_CALLBACK_EX +#define RMCFG_CLASS_NV01_TIMER 0 +#define RMCFG_CLASS_NV1_TIMER 0 // aka NV01_TIMER +#define RMCFG_CLASS_NVC372_DISPLAY_SW 1 +#define RMCFG_CLASS_NVC673_DISP_CAPABILITIES 1 +#define RMCFG_CLASS_NV04_DISPLAY_COMMON 1 +#define RMCFG_CLASS_NVC670_DISPLAY 1 +#define RMCFG_CLASS_NVC671_DISP_SF_USER 1 +#define RMCFG_CLASS_NVC67A_CURSOR_IMM_CHANNEL_PIO 1 +#define RMCFG_CLASS_NVC67B_WINDOW_IMM_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC67D_CORE_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC67E_WINDOW_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC77F_ANY_CHANNEL_DMA 1 +#define RMCFG_CLASS_NV50_P2P 0 +#define RMCFG_CLASS_GF100_HDACODEC 1 +#define RMCFG_CLASS_NV_E3_THREED 0 // Tegra 3D class +#define RMCFG_CLASS_NV_EVENT_BUFFER 0 // Event buffer class used to share event data with UMD + + + +// +// MODULES - enabled or disabled +// +#define RMCFG_MODULE_Object 1 // Base class for NVOC objects +#define RMCFG_MODULE_OBJECT 1 // aka Object +#define RMCFG_MODULE_TRACEABLE 0 // Interface for CaptureState +#define RMCFG_MODULE_ENGSTATE 1 // Base class for engines with generic constructors, StateLoad, etc. +#define RMCFG_MODULE_HOSTENG 0 // Base class for host engines +#define RMCFG_MODULE_FLCNABLE 0 // Base class for engines requiring falcon +#define RMCFG_MODULE_PMUCLIENT 0 // Base class for engines that use PMU engine +#define RMCFG_MODULE_INTRABLE 0 // Base class to generate and service top-level interrupts +#define RMCFG_MODULE_MUTEXABLE 0 // Base class for engines that implements mutex +#define RMCFG_MODULE_GpuMutexMgr 0 // GPU Mutex Manager +#define RMCFG_MODULE_GPUMUTEXMGR 0 // aka GpuMutexMgr +#define RMCFG_MODULE_BIF 0 // Bus Interface +#define RMCFG_MODULE_KERNEL_BIF 0 // Bus Interface on Kernel(CPU) RM +#define RMCFG_MODULE_BUS 0 // Bus +#define RMCFG_MODULE_KERNEL_BUS 0 // Bus on Kernel(CPU) RM +#define RMCFG_MODULE_ClockManager 0 // Clock Manager +#define RMCFG_MODULE_CLOCKMANAGER 0 // aka ClockManager +#define RMCFG_MODULE_KERNEL_ClockManager 0 // Kernel controls for Clock Manager +#define RMCFG_MODULE_KERNEL_CLOCKMANAGER 0 // aka KERNEL_ClockManager +#define RMCFG_MODULE_DAC 0 // DAC Resource +#define RMCFG_MODULE_KERNEL_DISPLAY 1 // Display module on Kernel(CPU) RM +#define RMCFG_MODULE_DISP 0 // Display +#define RMCFG_MODULE_VIRT_MEM_ALLOCATOR 0 +#define RMCFG_MODULE_DPAUX 0 +#define RMCFG_MODULE_MEMORY_SYSTEM 0 // Memory System +#define RMCFG_MODULE_KERNEL_MEMORY_SYSTEM 0 // Kernel Memory System +#define RMCFG_MODULE_MEMORY_MANAGER 1 // Memory Manager +#define RMCFG_MODULE_FBFLCN 0 // FB falcon +#define RMCFG_MODULE_FBSR 0 // Frame Buffer Save/Restore +#define RMCFG_MODULE_KERNEL_FIFO 0 // Fifo Module on Kernel(CPU) RM +#define RMCFG_MODULE_FIFO 0 // aka. HOST +#define RMCFG_MODULE_SCHED 0 // Scheduler for runlist +#define RMCFG_MODULE_FLCN 0 // Falcon-derived engines +#define RMCFG_MODULE_KERNEL_FALCON 0 // Falcon on Kernel(CPU) RM. Used for booting Falcon cores. +#define RMCFG_MODULE_GR 0 // Graphic +#define RMCFG_MODULE_GR0 0 // aka GR +#define RMCFG_MODULE_KERNEL_GRAPHICS 0 // Graphic on Kernel(CPU) RM +#define RMCFG_MODULE_GRMGR 0 // Graphics manager. Used for maintaining Gr partitioning policies +#define RMCFG_MODULE_MIG_MANAGER 0 // MIG manager on Physical (GSP) RM. Used for maintaining device partitioning policies +#define RMCFG_MODULE_KERNEL_MIG_MANAGER 0 // MIG manager on Kernel (CPU) RM. Used for maintaining device partitioning policies +#define RMCFG_MODULE_KERNEL_GRAPHICS_MANAGER 0 // Graphics manager on Kernel (CPU) RM. Used for maintaining Gr partitioning policies +#define RMCFG_MODULE_HAL 1 // Hardware Abstraction Layer +#define RMCFG_MODULE_HEAD 1 // Display component: Head +#define RMCFG_MODULE_SF 1 // Display component: Serial Formatter, output protocol formatting +#define RMCFG_MODULE_DISPLAY_INSTANCE_MEMORY 1 +#define RMCFG_MODULE_KERNEL_HEAD 1 +#define RMCFG_MODULE_INTR 0 +#define RMCFG_MODULE_MC 0 +#define RMCFG_MODULE_KERNEL_MC 0 // Master Control-related code needed in Kernel RM +#define RMCFG_MODULE_PRIV_RING 0 +#define RMCFG_MODULE_KERNEL_PERF 0 // Performance module on Kernel(CPU) RM +#define RMCFG_MODULE_PERF 0 // Performance Monitor +#define RMCFG_MODULE_STEREO 0 // Stereo Viewing +#define RMCFG_MODULE_TMR 1 +#define RMCFG_MODULE_SEQ 0 // Sequencer for backlight and LVDS control +#define RMCFG_MODULE_VGA 0 // Video Graphics Array +#define RMCFG_MODULE_VBIOS 0 +#define RMCFG_MODULE_KERNEL_RC 0 // Robust Channels and Watchdog Kernel API +#define RMCFG_MODULE_RC 0 // Robust Channels +#define RMCFG_MODULE_NV_DEBUG_DUMP 0 // NV Debug +#define RMCFG_MODULE_SWENG 0 // Software Engine for all SW classes +#define RMCFG_MODULE_GPU 1 // GPU Control Object +#define RMCFG_MODULE_I2C 0 // i2c Serial Interface +#define RMCFG_MODULE_KERNEL_I2C 0 // Kernel controls for I2C +#define RMCFG_MODULE_SPI 0 // SPI Interface +#define RMCFG_MODULE_SMBPBI 0 // SMBus Post-Box Interface +#define RMCFG_MODULE_GPIO 0 // General Purpose I/O Pins +#define RMCFG_MODULE_KERNEL_GPIO 0 // Kernel controls for GPIO +#define RMCFG_MODULE_FAN 0 // General Purpose I/O Pins +#define RMCFG_MODULE_KERNEL_FAN 0 // Kernel controls for FAN +#define RMCFG_MODULE_FUSE 0 +#define RMCFG_MODULE_VOLT 0 +#define RMCFG_MODULE_KERNEL_VOLT 0 // Kernel controls for VOLT +#define RMCFG_MODULE_THERM 0 // Thermal Monitoring +#define RMCFG_MODULE_KERNEL_THERM 0 // Kernel controls Thermal Monitoring +#define RMCFG_MODULE_OR 1 // Display component: Output Resource +#define RMCFG_MODULE_PIOR 0 // Display component: Parallel Input Output Resource +#define RMCFG_MODULE_SOR 1 // Display component: Serial Output Resource +#define RMCFG_MODULE_DSI 1 // Display Serial Interface +#define RMCFG_MODULE_HDCP 1 // High-bandwidth Digital Content Protection +#define RMCFG_MODULE_HDMI 1 // High-Definition Multimedia Interface +#define RMCFG_MODULE_ISOHUB 1 // Display's memory read interface +#define RMCFG_MODULE_BSP 0 // Bit Stream Processor/NVDEC +#define RMCFG_MODULE_NVDEC 0 // aka BSP +#define RMCFG_MODULE_KERNEL_NVDEC 0 // NVDEC on Kernel(CPU) RM. Used for booting Falcon cores. +#define RMCFG_MODULE_CIPHER 0 +#define RMCFG_MODULE_CE 0 // Copy Engine +#define RMCFG_MODULE_KERNEL_CE 0 // Kernel Copy Engine +#define RMCFG_MODULE_PMU 0 // PMU peregrine core +#define RMCFG_MODULE_KERNEL_PMU 0 // PMU peregrine core on Kernel(CPU) RM +#define RMCFG_MODULE_GPS 0 // GPU Performance Scaling +#define RMCFG_MODULE_MSENC 0 // Video Encoder (MSENC) Engine +#define RMCFG_MODULE_KERNEL_NVENC 0 +#define RMCFG_MODULE_HDA 0 // High Definition Audio (HDA) Engine +#define RMCFG_MODULE_HDACODEC 0 // High Definition Audio (HDA) Codec Engine +#define RMCFG_MODULE_INFOROM 0 // InfoROM Engine +#define RMCFG_MODULE_KERNEL_INFOROM 0 // Kernel controls for InfoROM Engine +#define RMCFG_MODULE_LPWR 0 // Low Power Object. This objects manages all power saving features. +#define RMCFG_MODULE_KERNEL_LPWR 0 // Low Power Object. This objects manages all power saving features. +#define RMCFG_MODULE_PGCTRL 0 // Power Gating Controller (PGCTRL) Engine +#define RMCFG_MODULE_LPWRFSM 1 // LPWR FSM Object Engine + +#define RMCFG_MODULE_PGISLAND 0 // Power Gating Island (PGISLAND) +#define RMCFG_MODULE_AP 0 // Adaptive Power Object (AP) Engine +#define RMCFG_MODULE_PSI 0 // Phase State Indicator Engine. HW folks calls it as Power Saving Interface. +#define RMCFG_MODULE_CG 0 // Clock Gating Object Engine. +#define RMCFG_MODULE_RPPG 0 // RAM Periphery Power Gating Object Engine. +#define RMCFG_MODULE_EI 0 // Engine Idle Framework Object Engine. +#define RMCFG_MODULE_DPU 0 // Display Falcon +#define RMCFG_MODULE_PMGR 0 // PCB Manager engine +#define RMCFG_MODULE_KERNEL_PMGR 0 // Kernel controls for Pmgr +#define RMCFG_MODULE_SYS 1 // System +#define RMCFG_MODULE_OS 1 // OS Layer +#define RMCFG_MODULE_GPUMGR 1 // GPU Manager object +#define RMCFG_MODULE_HEAP 0 // Heap Engine Object +#define RMCFG_MODULE_BRIGHTC 1 // Backlight brightness control module +#define RMCFG_MODULE_GSYNCMGR 0 // GSYNC Manager +#define RMCFG_MODULE_OD 1 // Display component: Output Device +#define RMCFG_MODULE_DFP 1 // Display component: Display Flat Panel +#define RMCFG_MODULE_CRT 0 // Display component: Cathode ray tube +#define RMCFG_MODULE_DisplayPort 1 // Display component: DisplayPort +#define RMCFG_MODULE_DISPLAYPORT 1 // aka DisplayPort +#define RMCFG_MODULE_TMDS 1 // Display component: Transition Minimized Differential Signaling +#define RMCFG_MODULE_CL 0 // Core Logic +#define RMCFG_MODULE_RCDB 0 // RC Journal log DB +#define RMCFG_MODULE_SWINSTR 0 // Software Instrumentation +#define RMCFG_MODULE_GPUACCT 0 // GPU Accounting +#define RMCFG_MODULE_GRDBG 0 // Debugger Engine Object +#define RMCFG_MODULE_PSR 1 // Panel Self Refresh +#define RMCFG_MODULE_UVM 0 // Unified Virtual Memory - provides interface to separate UVM and verification support +#define RMCFG_MODULE_VGPUMGR 0 // Virtual GPU management +#define RMCFG_MODULE_SEC2 0 // New secure falcon +#define RMCFG_MODULE_KERNEL_SEC2 0 // SEC2 on Kernel(CPU) RM. Used for booting Falcon cores. +#define RMCFG_MODULE_PMS 0 // PMU ModeSet object +#define RMCFG_MODULE_GCX 0 // Idle power states of GPU +#define RMCFG_MODULE_LSFM 0 // Light Secure Falcon Manager object +#define RMCFG_MODULE_ACR 0 // Programs MMU to protect the region +#define RMCFG_MODULE_REFCNT 0 // Reference Counting +#define RMCFG_MODULE_GPULOG 0 // Logger for logging GPU related data +#define RMCFG_MODULE_FECS 0 // Front-end context switch +#define RMCFG_MODULE_HYPERVISOR 0 // Hypervisor object to support its native API +#define RMCFG_MODULE_VRRMGR 0 // VRR Management object +#define RMCFG_MODULE_GPCCS 0 // GPC context switch +#define RMCFG_MODULE_MISSING 0 // MISSING (placeholder) Engine +#define RMCFG_MODULE_VMM 1 // virtual memory manager +#define RMCFG_MODULE_VASPACE 1 // virtual address space +#define RMCFG_MODULE_GVASPACE 0 // GPU virtual address space +#define RMCFG_MODULE_AVASPACE 0 // AMODEL virtual address space +#define RMCFG_MODULE_IOVASPACE 1 // IOMMU virtual address space +#define RMCFG_MODULE_FABRICVASPACE 0 // FABRIC virtual address space +#define RMCFG_MODULE_MMU 0 // Memory Management Unit- HW interface +#define RMCFG_MODULE_GMMU 0 // GPU Memory Management Unit +#define RMCFG_MODULE_KERNEL_GMMU 0 // GPU Memory Management Unit on Kernel(CPU) RM +#define RMCFG_MODULE_VMMU 0 // Virtual Memory Management Unit (for vGPU) +#define RMCFG_MODULE_GPUGRP 1 // Group of GPU(s) that may or may not be in SLI +#define RMCFG_MODULE_KERNEL_HWPM 0 // Hardware Performance Monitor on Kernel(CPU) RM +#define RMCFG_MODULE_HWPM 0 // Hardware Performance Monitor +#define RMCFG_MODULE_NVLINK 0 // NVLINK High-speed GPU interconnect +#define RMCFG_MODULE_KERNEL_NVLINK 0 // Nvlink on Kernel(CPU) RM +#define RMCFG_MODULE_KERNEL_NVLINK 0 // Nvlink on Kernel(CPU) RM +#define RMCFG_MODULE_IOCTRL 0 // NVLINK Ioctrl +#define RMCFG_MODULE_HSHUB 0 // High Speed Hub +#define RMCFG_MODULE_HSHUBMANAGER 0 // High Speed Hub Manager +#define RMCFG_MODULE_KERNEL_HSHUB 0 // High Speed Hub on Kernel(CPU) RM +#define RMCFG_MODULE_KERNEL_HSHUB 0 // High Speed Hub on Kernel(CPU) RM +#define RMCFG_MODULE_GPUMON 0 // GPU Monitoring +#define RMCFG_MODULE_GPUBOOSTMGR 0 // Sync Gpu Boost Manager +#define RMCFG_MODULE_GRIDDISPLAYLESS 0 // GRID Displayless +#define RMCFG_MODULE_WINDOW 1 // NvDisplay WINDOW channel +#define RMCFG_MODULE_RPC 0 // RPC Engine for VGPU +#define RMCFG_MODULE_RPCSTRUCTURECOPY 0 // RPC structure copying for VGPU +#define RMCFG_MODULE_NVJPG 0 // Video JPEG (NVJPG) Engine +#define RMCFG_MODULE_KERNEL_NVJPG 0 +#define RMCFG_MODULE_GSP 0 // GPU system processor +#define RMCFG_MODULE_KERNEL_GSP 0 // GSP on Kernel(CPU) RM. Used for booting RM on GSP. +#define RMCFG_MODULE_OFA 0 // Optical Flow Accelarator +#define RMCFG_MODULE_KERNEL_OFA 0 +#define RMCFG_MODULE_HOT_PLUG 0 // Display component: hot plug +#define RMCFG_MODULE_FABRIC 0 // NVLink Fabric +#define RMCFG_MODULE_GPUDB 1 // GPU DATABASE +#define RMCFG_MODULE_NNE 0 // Neural Net Engine (NNE) +#define RMCFG_MODULE_DCECLIENTRM 1 // DCE Client RM +#define RMCFG_MODULE_DCB 0 // Display Control Block for all display related data in VBIOS/DCB Image +#define RMCFG_MODULE_DISPMACRO 0 // DispMacro RM infrastructure for IED removal from VBIOS +#define RMCFG_MODULE_DISP_MGR 0 // Lid- and dock-related disp code for NOTEBOOK +#define RMCFG_MODULE_PLATFORM 0 // Object for platform related features + + + +// +// API's - enabled or disabled +// +#define RMCFG_API_NV04_ALLOC 1 +#define RMCFG_API_NVOS21_PARAMETERS 1 // aka NV04_ALLOC +#define RMCFG_API_NV_ESC_RM_ALLOC 1 // aka NV04_ALLOC +#define RMCFG_API_Nv04Alloc 1 // aka NV04_ALLOC +#define RMCFG_API_NvRmAlloc 1 // aka NV04_ALLOC +#define RMCFG_API_NV04_ALLOC_WITH_ACCESS 1 +#define RMCFG_API_NVOS64_PARAMETERS 1 // aka NV04_ALLOC_WITH_ACCESS +#define RMCFG_API_NV_ESC_RM_ALLOC 1 // aka NV04_ALLOC_WITH_ACCESS +#define RMCFG_API_Nv04AllocWithAccess 1 // aka NV04_ALLOC_WITH_ACCESS +#define RMCFG_API_NvRmAllocWithAccess 1 // aka NV04_ALLOC_WITH_ACCESS +#define RMCFG_API_NV01_ALLOC_MEMORY 1 +#define RMCFG_API_NVOS02_PARAMETERS 1 // aka NV01_ALLOC_MEMORY +#define RMCFG_API_NV_ESC_RM_ALLOC_MEMORY 1 // aka NV01_ALLOC_MEMORY +#define RMCFG_API_Nv01AllocMemory 1 // aka NV01_ALLOC_MEMORY +#define RMCFG_API_NvRmAllocMemory64 1 // aka NV01_ALLOC_MEMORY +#define RMCFG_API_NV01_ALLOC_OBJECT 1 +#define RMCFG_API_NVOS05_PARAMETERS 1 // aka NV01_ALLOC_OBJECT +#define RMCFG_API_NV_ESC_RM_ALLOC_OBJECT 1 // aka NV01_ALLOC_OBJECT +#define RMCFG_API_Nv01AllocObject 1 // aka NV01_ALLOC_OBJECT +#define RMCFG_API_NvRmAllocObject 1 // aka NV01_ALLOC_OBJECT +#define RMCFG_API_NV01_FREE 1 +#define RMCFG_API_NVOS00_PARAMETERS 1 // aka NV01_FREE +#define RMCFG_API_NV_ESC_RM_FREE 1 // aka NV01_FREE +#define RMCFG_API_Nv01Free 1 // aka NV01_FREE +#define RMCFG_API_NvRmFree 1 // aka NV01_FREE +#define RMCFG_API_NV04_VID_HEAP_CONTROL 1 +#define RMCFG_API_NVOS32_PARAMETERS 1 // aka NV04_VID_HEAP_CONTROL +#define RMCFG_API_NV_ESC_RM_VID_HEAP_CONTROL 1 // aka NV04_VID_HEAP_CONTROL +#define RMCFG_API_Nv04VidHeapControl 1 // aka NV04_VID_HEAP_CONTROL +#define RMCFG_API_NvRmVidHeapControl 1 // aka NV04_VID_HEAP_CONTROL +#define RMCFG_API_NV01_CONFIG_GET 0 +#define RMCFG_API_NVOS13_PARAMETERS 0 // aka NV01_CONFIG_GET +#define RMCFG_API_NV_ESC_RM_CONFIG_GET 0 // aka NV01_CONFIG_GET +#define RMCFG_API_Nv01ConfigGet 0 // aka NV01_CONFIG_GET +#define RMCFG_API_NvRmConfigGet 0 // aka NV01_CONFIG_GET +#define RMCFG_API_NV01_CONFIG_SET 0 +#define RMCFG_API_NVOS14_PARAMETERS 0 // aka NV01_CONFIG_SET +#define RMCFG_API_NV_ESC_RM_CONFIG_SET 0 // aka NV01_CONFIG_SET +#define RMCFG_API_Nv01ConfigSet 0 // aka NV01_CONFIG_SET +#define RMCFG_API_NvRmConfigSet 0 // aka NV01_CONFIG_SET +#define RMCFG_API_NV04_CONFIG_GET_EX 0 +#define RMCFG_API_NVOS_CONFIG_GET_EX_PARAMS 0 // aka NV04_CONFIG_GET_EX +#define RMCFG_API_NV_ESC_RM_CONFIG_GET_EX 0 // aka NV04_CONFIG_GET_EX +#define RMCFG_API_Nv04ConfigGetEx 0 // aka NV04_CONFIG_GET_EX +#define RMCFG_API_NvRmConfigGetEx 0 // aka NV04_CONFIG_GET_EX +#define RMCFG_API_NV04_CONFIG_SET_EX 0 +#define RMCFG_API_NVOS_CONFIG_SET_EX_PARAMS 0 // aka NV04_CONFIG_SET_EX +#define RMCFG_API_NV_ESC_RM_CONFIG_SET_EX 0 // aka NV04_CONFIG_SET_EX +#define RMCFG_API_Nv04ConfigSetEx 0 // aka NV04_CONFIG_SET_EX +#define RMCFG_API_NvRmConfigSetEx 0 // aka NV04_CONFIG_SET_EX +#define RMCFG_API_NV04_I2C_ACCESS 1 +#define RMCFG_API_NVOS_I2C_ACCESS_PARAMS 1 // aka NV04_I2C_ACCESS +#define RMCFG_API_NV_ESC_RM_I2C_ACCESS 1 // aka NV04_I2C_ACCESS +#define RMCFG_API_Nv04I2CAccess 1 // aka NV04_I2C_ACCESS +#define RMCFG_API_NvRmI2CAccess 1 // aka NV04_I2C_ACCESS +#define RMCFG_API_NV04_IDLE_CHANNELS 1 +#define RMCFG_API_NVOS30_PARAMETERS 1 // aka NV04_IDLE_CHANNELS +#define RMCFG_API_NV_ESC_RM_IDLE_CHANNELS 1 // aka NV04_IDLE_CHANNELS +#define RMCFG_API_Nv04IdleChannels 1 // aka NV04_IDLE_CHANNELS +#define RMCFG_API_NvRmIdleChannels 1 // aka NV04_IDLE_CHANNELS +#define RMCFG_API_NV04_MAP_MEMORY 1 +#define RMCFG_API_NVOS33_PARAMETERS 1 // aka NV04_MAP_MEMORY +#define RMCFG_API_NV_ESC_RM_MAP_MEMORY 1 // aka NV04_MAP_MEMORY +#define RMCFG_API_Nv04MapMemory 1 // aka NV04_MAP_MEMORY +#define RMCFG_API_NvRmMapMemory 1 // aka NV04_MAP_MEMORY +#define RMCFG_API_NV04_UNMAP_MEMORY 1 +#define RMCFG_API_NVOS34_PARAMETERS 1 // aka NV04_UNMAP_MEMORY +#define RMCFG_API_NV_ESC_RM_UNMAP_MEMORY 1 // aka NV04_UNMAP_MEMORY +#define RMCFG_API_Nv04UnmapMemory 1 // aka NV04_UNMAP_MEMORY +#define RMCFG_API_NvRmUnmapMemory 1 // aka NV04_UNMAP_MEMORY +#define RMCFG_API_NV04_MAP_MEMORY_DMA 1 +#define RMCFG_API_NVOS46_PARAMETERS 1 // aka NV04_MAP_MEMORY_DMA +#define RMCFG_API_NV_ESC_RM_MAP_MEMORY_DMA 1 // aka NV04_MAP_MEMORY_DMA +#define RMCFG_API_Nv04MapMemoryDma 1 // aka NV04_MAP_MEMORY_DMA +#define RMCFG_API_NvRmMapMemoryDma 1 // aka NV04_MAP_MEMORY_DMA +#define RMCFG_API_NV04_UNMAP_MEMORY_DMA 1 +#define RMCFG_API_NVOS47_PARAMETERS 1 // aka NV04_UNMAP_MEMORY_DMA +#define RMCFG_API_NV_ESC_RM_UNMAP_MEMORY_DMA 1 // aka NV04_UNMAP_MEMORY_DMA +#define RMCFG_API_Nv04UnmapMemoryDma 1 // aka NV04_UNMAP_MEMORY_DMA +#define RMCFG_API_NvRmUnmapMemoryDma 1 // aka NV04_UNMAP_MEMORY_DMA +#define RMCFG_API_NV04_ALLOC_CONTEXT_DMA 1 +#define RMCFG_API_NVOS39_PARAMETERS 1 // aka NV04_ALLOC_CONTEXT_DMA +#define RMCFG_API_NV_ESC_RM_ALLOC_CONTEXT_DMA2 1 // aka NV04_ALLOC_CONTEXT_DMA +#define RMCFG_API_Nv04AllocContextDma 1 // aka NV04_ALLOC_CONTEXT_DMA +#define RMCFG_API_NvRmAllocContextDma2 1 // aka NV04_ALLOC_CONTEXT_DMA +#define RMCFG_API_NV04_BIND_CONTEXT_DMA 1 +#define RMCFG_API_NVOS49_PARAMETERS 1 // aka NV04_BIND_CONTEXT_DMA +#define RMCFG_API_NV_ESC_RM_BIND_CONTEXT_DMA 1 // aka NV04_BIND_CONTEXT_DMA +#define RMCFG_API_Nv04BindContextDma 1 // aka NV04_BIND_CONTEXT_DMA +#define RMCFG_API_NvRmBindContextDma 1 // aka NV04_BIND_CONTEXT_DMA +#define RMCFG_API_NV04_CONTROL 1 +#define RMCFG_API_NVOS54_PARAMETERS 1 // aka NV04_CONTROL +#define RMCFG_API_NV_ESC_RM_CONTROL 1 // aka NV04_CONTROL +#define RMCFG_API_Nv04Control 1 // aka NV04_CONTROL +#define RMCFG_API_NvRmControl 1 // aka NV04_CONTROL +#define RMCFG_API_NV04_DUP_OBJECT 1 +#define RMCFG_API_NVOS55_PARAMETERS 1 // aka NV04_DUP_OBJECT +#define RMCFG_API_NV_ESC_RM_DUP_OBJECT 1 // aka NV04_DUP_OBJECT +#define RMCFG_API_Nv04DupObject 1 // aka NV04_DUP_OBJECT +#define RMCFG_API_NvRmDupObject 1 // aka NV04_DUP_OBJECT +#define RMCFG_API_NV04_DUP_OBJECT2 1 +#define RMCFG_API_NVOS55_PARAMETERS 1 // aka NV04_DUP_OBJECT2 +#define RMCFG_API_NV_ESC_RM_DUP_OBJECT 1 // aka NV04_DUP_OBJECT2 +#define RMCFG_API_Nv04DupObject 1 // aka NV04_DUP_OBJECT2 +#define RMCFG_API_NvRmDupObject2 1 // aka NV04_DUP_OBJECT2 +#define RMCFG_API_NV04_SHARE_OBJECT 1 +#define RMCFG_API_NVOS57_PARAMETERS 1 // aka NV04_SHARE_OBJECT +#define RMCFG_API_NV_ESC_RM_SHARE 1 // aka NV04_SHARE_OBJECT +#define RMCFG_API_Nv04Share 1 // aka NV04_SHARE_OBJECT +#define RMCFG_API_NvRmShare 1 // aka NV04_SHARE_OBJECT +#define RMCFG_API_NV04_ADD_VBLANK_CALLBACK 1 +#define RMCFG_API_NVOS61_PARAMETERS 1 // aka NV04_ADD_VBLANK_CALLBACK +#define RMCFG_API_NV_ESC_RM_ADD_VBLANK_CALLBACK 1 // aka NV04_ADD_VBLANK_CALLBACK +#define RMCFG_API_Nv04AddVblankCallback 1 // aka NV04_ADD_VBLANK_CALLBACK +#define RMCFG_API_NvRmAddVblankCallback 1 // aka NV04_ADD_VBLANK_CALLBACK +#define RMCFG_API_NV04_ACCESS_REGISTRY 1 +#define RMCFG_API_NvRmReadRegistryDword 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NvRmWriteRegistryDword 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NvRmGetNumRegistryEntries 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NvRmDeleteRegistryEntry 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NvRmReadRegistryEntry 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NvRmReadRegistryBinary 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NvRmWriteRegistryBinary 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NVOS38_PARAMETERS 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NV_ESC_RM_ACCESS_REGISTRY 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NV04_GET_EVENT_DATA 1 +#define RMCFG_API_NVOS41_PARAMETERS 1 // aka NV04_GET_EVENT_DATA +#define RMCFG_API_NV_ESC_RM_GET_EVENT_DATA 1 // aka NV04_GET_EVENT_DATA +#define RMCFG_API_NvRmGetEventData 1 // aka NV04_GET_EVENT_DATA +#define RMCFG_API_NV04_UPDATE_DEVICE_MAPPING_INFO 1 // Update Mapping Parameters (unix-only) +#define RMCFG_API_NVOS56_PARAMETERS 1 // aka NV04_UPDATE_DEVICE_MAPPING_INFO +#define RMCFG_API_NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO 1 // aka NV04_UPDATE_DEVICE_MAPPING_INFO +#define RMCFG_API_NVXX_CARD_INFO 1 +#define RMCFG_API_nv_ioctl_card_info_t 1 // aka NVXX_CARD_INFO +#define RMCFG_API_NV_ESC_CARD_INFO 1 // aka NVXX_CARD_INFO +#define RMCFG_API_NVXX_ENV_INFO 1 +#define RMCFG_API_nv_ioctl_env_info_t 1 // aka NVXX_ENV_INFO +#define RMCFG_API_NV_ESC_ENV_INFO 1 // aka NVXX_ENV_INFO +#define RMCFG_API_NVXX_ALLOC_OS_EVENT 1 +#define RMCFG_API_nv_ioctl_alloc_os_event_t 1 // aka NVXX_ALLOC_OS_EVENT +#define RMCFG_API_NV_ESC_ALLOC_OS_EVENT 1 // aka NVXX_ALLOC_OS_EVENT +#define RMCFG_API_NvRmAllocOsEvent 1 // aka NVXX_ALLOC_OS_EVENT +#define RMCFG_API_NVXX_FREE_OS_EVENT 1 +#define RMCFG_API_nv_ioctl_free_os_event_t 1 // aka NVXX_FREE_OS_EVENT +#define RMCFG_API_NV_ESC_FREE_OS_EVENT 1 // aka NVXX_FREE_OS_EVENT +#define RMCFG_API_NvRmFreeOsEvent 1 // aka NVXX_FREE_OS_EVENT +#define RMCFG_API_NVXX_STATUS_CODE 1 +#define RMCFG_API_nv_ioctl_status_code_t 1 // aka NVXX_STATUS_CODE +#define RMCFG_API_NV_ESC_STATUS_CODE 1 // aka NVXX_STATUS_CODE +#define RMCFG_API_NVXX_CHECK_VERSION_STR 1 +#define RMCFG_API_nv_ioctl_rm_api_version_t 1 // aka NVXX_CHECK_VERSION_STR +#define RMCFG_API_NV_ESC_CHECK_VERSION_STR 1 // aka NVXX_CHECK_VERSION_STR +#define RMCFG_API_NVXX_ATTACH_GPUS_TO_FD 1 +#define RMCFG_API_NvU32 1 // aka NVXX_ATTACH_GPUS_TO_FD +#define RMCFG_API_NV_ESC_ATTACH_GPUS_TO_FD 1 // aka NVXX_ATTACH_GPUS_TO_FD + + + +// Disable misspelling detection +#define __RMCFG_vet_enabled 0 + + + + + + + + +// Make sure the specified feature is defined and not a misspelling +// by checking the "_def" forms above which are all set to '1' for +// each defined chip, feature, etc, irrespective of it's enable/disable +// state. +#define _RMCFG_vet(x) 0 +#if __RMCFG_vet_enabled && defined(__GNUC__) // broken on MSVC +# undef _RMCFG_vet +# define _RMCFG_vet(x) ((__def_RMCFG ## x) ? 0 : (0 * (1/0))) +#endif + +// +// Compile-time constant macros to help with enabling or disabling code based +// on whether a feature (or chip or class or engine or ...) is enabled. +// May be used by both C code ('if') and C-preprocessor directives ('#if') +// + +#define RMCFG_CHIP_ENABLED(_chip) (RMCFG_CHIP_##_chip + _RMCFG_vet(_CHIP_ ## _chip)) +#define RMCFG_FEATURE_ENABLED(_feature) (RMCFG_FEATURE_##_feature + _RMCFG_vet(_FEATURE_ ## _feature)) +#define RMCFG_MODULE_ENABLED(_module) (RMCFG_MODULE_##_module + _RMCFG_vet(_MODULE_ ## _module)) +#define RMCFG_CLASS_ENABLED(_clss) (RMCFG_CLASS_##_clss + _RMCFG_vet(_CLASS_ ## _clss)) +#define RMCFG_API_ENABLED(_api) (RMCFG_API_##_api + _RMCFG_vet(_API_ ## _api)) + +#endif // _RMCFG_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/core.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/core.h new file mode 100644 index 0000000..edf41ba --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/core.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __CORE_H__ +#define __CORE_H__ + +#include "core/prelude.h" + +/** + * @brief Global RM initialization + * + * The single entrypoint into the RM for all platforms. This will initial cross + * platform RM subsystems and call into OS specific init as needed. + * + * Must be called once and only once before any RM internal functions can be + * called + * + * @return NV_OK if successful, error otherwise + */ +NV_STATUS coreInitializeRm(void); + +/** + * @brief Global RM shutdown + * + * Must be called once and only once when a driver is shutting down and no more + * RM internal functions will be called. + * + */ +void coreShutdownRm(void); + +#endif /* __CORE_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal.h new file mode 100644 index 0000000..eaa931b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal.h @@ -0,0 +1,3 @@ + +#include "g_hal_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal_mgr.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal_mgr.h new file mode 100644 index 0000000..b15d9cd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal_mgr.h @@ -0,0 +1,3 @@ + +#include "g_hal_mgr_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/info_block.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/info_block.h new file mode 100644 index 0000000..f77da15 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/info_block.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _INFO_BLOCK_H_ +#define _INFO_BLOCK_H_ + +// +// HAL privata data management. +// +typedef struct ENG_INFO_LINK_NODE *PENG_INFO_LINK_NODE; +typedef struct ENG_INFO_LINK_NODE ENG_INFO_LINK_NODE; + +// new style typedef for info block functions, simple typedef. +// Used by hal .def files via INFO_BLOCK_GROUP template in Gpuhal.def +typedef void *EngGetInfoBlockFn(PENG_INFO_LINK_NODE pHead, NvU32 dataId); +typedef void *EngAddInfoBlockFn(PENG_INFO_LINK_NODE *ppHead, NvU32 dataId, NvU32 size); +typedef void EngDeleteInfoBlockFn(PENG_INFO_LINK_NODE *ppHead, NvU32 dataId); +typedef NvBool EngTestInfoBlockFn(PENG_INFO_LINK_NODE pHead, NvU32 dataId); + +// old style typedef for info block functions (ptr to fn) +// delete these 4 typedefs once all .def files converted to use OBJECT_INTERFACES +typedef EngGetInfoBlockFn *EngGetInfoBlock; +typedef EngAddInfoBlockFn *EngAddInfoBlock; +typedef EngDeleteInfoBlockFn *EngDeleteInfoBlock; +typedef EngTestInfoBlockFn *EngTestInfoBlock; + + +struct ENG_INFO_LINK_NODE +{ + NvU32 dataId; + void *infoBlock; + PENG_INFO_LINK_NODE next; +}; + +void* getInfoPtr(PENG_INFO_LINK_NODE pHead, NvU32 dataId); +void* addInfoPtr(PENG_INFO_LINK_NODE *ppHead, NvU32 dataId, NvU32 size); +void deleteInfoPtr(PENG_INFO_LINK_NODE * ppHead, NvU32 dataId); +NvBool testInfoPtr(PENG_INFO_LINK_NODE, NvU32 dataId); + +#endif // _INFO_BLOCK_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/locks.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/locks.h new file mode 100644 index 0000000..de6ab64 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/locks.h @@ -0,0 +1,205 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef LOCKS_H +#define LOCKS_H + +#include "core/core.h" +#include "os/os.h" + +// Forward declarations +typedef struct OBJSYS OBJSYS; + +typedef enum +{ + GPU_LOCK_GRP_SUBDEVICE, // locks will be taken for subdevice only + GPU_LOCK_GRP_DEVICE, // locks will be taken for device only + GPU_LOCK_GRP_MASK, // locks will be taken for devices specified by the mask + GPU_LOCK_GRP_ALL // locks will be taken for all devices +} GPU_LOCK_GRP_ID; +typedef NvU32 GPU_MASK; + +// +// This structure is used to trace lock acquire/release activity. +// The calling IP is stored in a circular array. +// +#define MAX_TRACE_LOCK_CALLS 32 + +typedef enum +{ + lockTraceEmpty, + lockTraceAcquire, + lockTraceRelease, + lockTraceAlloc, + lockTraceFree +} LOCK_TRACE_TYPE; + +typedef struct +{ + LOCK_TRACE_TYPE type; + union { + GPU_MASK gpuMask; // For GPU locks + NvU32 lockModule; // For API lock + NvU32 value; + } data32; + union { + NvU16 gpuInst; // For GPU locks + NvU16 lockFlags; // For API lock + NvU16 value; + } data16; + NvBool bHighIrql; + NvU8 priority; + NvU64 callerRA; + NvU64 threadId; + NvU64 timestamp; +} LOCK_TRACE_ENTRY; + +typedef struct +{ + LOCK_TRACE_ENTRY entries[MAX_TRACE_LOCK_CALLS]; + NvU32 index; +} LOCK_TRACE_INFO; + +#define INSERT_LOCK_TRACE(plti, ra, t, d16, d32, ti, irql, pr, ts) \ +{ \ + (plti)->entries[(plti)->index].callerRA = (NvUPtr)ra; \ + (plti)->entries[(plti)->index].type = t; \ + (plti)->entries[(plti)->index].data16.value = d16; \ + (plti)->entries[(plti)->index].data32.value = d32; \ + (plti)->entries[(plti)->index].threadId = ti; \ + (plti)->entries[(plti)->index].timestamp = ts; \ + (plti)->entries[(plti)->index].bHighIrql = irql; \ + (plti)->entries[(plti)->index].priority = pr; \ + (plti)->index = ((plti)->index + 1) % MAX_TRACE_LOCK_CALLS; \ +} + +// +// Callers specify this value when they to lock all possible GPUs. +// +#define GPUS_LOCK_ALL (0xFFFFFFFF) + +// +// Flags for rmGpusLock[Acquire,Release] operations. +// + +// default no flags +#define GPUS_LOCK_FLAGS_NONE (0x00000000) +// conditional acquire; if lock is already held then return error +#define GPU_LOCK_FLAGS_COND_ACQUIRE NVBIT(0) +// acquire the lock in read (shared) mode, if applicable +#define GPU_LOCK_FLAGS_READ NVBIT(1) +// Attempt acquire even if it potentially violates the locking order +// But do not block in a way that could cause a deadlock +#define GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE NVBIT(2) +// Old name alias +#define GPUS_LOCK_FLAGS_COND_ACQUIRE GPU_LOCK_FLAGS_COND_ACQUIRE + +// +// RM Lock Related Functions +// +NV_STATUS rmLocksAlloc(OBJSYS *); +void rmLocksFree(OBJSYS *); + +NV_STATUS rmLocksAcquireAll(NvU32 module); +void rmLocksReleaseAll(void); + +NV_STATUS workItemLocksAcquire(NvU32 gpuInstance, NvU32 flags, NvU32 *pReleaseLocks, NvU32 *pGpuMask); +void workItemLocksRelease(NvU32 releaseLocks, NvU32 gpuMask); + +// +// Thread priority boosting and throttling: +// Used to temporarily increase the priority of a thread on Windows platforms +// in order to prevent starvation from the scheduler. +// +void threadPriorityStateAlloc(void); +void threadPriorityStateFree(void); + +//! Temporarily boost the priority of the current thread +void threadPriorityBoost(NvU64* pBoostPriority, NvU64 *pOriginalPriority); + +//! Gradually lower the priority of the current thread if it is boosted and sufficient time has elapsed +void threadPriorityThrottle(void); + +//! Restore the original priority of the current thread if it is boosted +void threadPriorityRestore(void); + +NV_STATUS rmGpuGroupLockGetMask(NvU32 gpuInst, GPU_LOCK_GRP_ID gpuGrpId, GPU_MASK* pGpuMask); + +// +// Defines for rmGpuLockSetOwner operation. +// +#define GPUS_LOCK_OWNER_PENDING_DPC_REFRESH (OS_THREAD_HANDLE)(-1) + +NV_STATUS rmGpuLockInfoInit(void); +void rmGpuLockInfoDestroy(void); +NV_STATUS rmGpuLockAlloc(NvU32); +void rmGpuLockFree(NvU32); +NV_STATUS rmGpuLocksAcquire(NvU32, NvU32); +NvU32 rmGpuLocksRelease(NvU32, OBJGPU *); +void rmGpuLocksFreeze(GPU_MASK); +void rmGpuLocksUnfreeze(GPU_MASK); +NV_STATUS rmGpuLockHide(NvU32); +void rmGpuLockShow(NvU32); +NvBool rmGpuLockIsOwner(void); +NvU32 rmGpuLocksGetOwnedMask(void); +NvBool rmGpuLockIsHidden(OBJGPU *); +NV_STATUS rmGpuLockSetOwner(OS_THREAD_HANDLE); +NV_STATUS rmGpuGroupLockAcquire(NvU32, GPU_LOCK_GRP_ID, NvU32, NvU32, GPU_MASK *); +NV_STATUS rmGpuGroupLockRelease(GPU_MASK, NvU32); +NvBool rmGpuGroupLockIsOwner(NvU32, GPU_LOCK_GRP_ID, GPU_MASK*); + +NvBool rmDeviceGpuLockIsOwner(NvU32); +NV_STATUS rmDeviceGpuLockSetOwner(OBJGPU *, OS_THREAD_HANDLE); +NV_STATUS rmDeviceGpuLocksAcquire(OBJGPU *, NvU32, NvU32); +NvU32 rmDeviceGpuLocksRelease(OBJGPU *, NvU32, OBJGPU *); + +NV_STATUS rmIntrMaskLockAlloc(NvU32 gpuInst); +void rmIntrMaskLockFree(NvU32 gpuInst); +/// @note The return value is always zero, not the actual IRQL +NvU64 rmIntrMaskLockAcquire(OBJGPU *pGpu); +void rmIntrMaskLockRelease(OBJGPU *pGpu, NvU64 oldIrql); + +// wrappers for handling lock-related NV_ASSERT_OR_RETURNs +#define LOCK_ASSERT_AND_RETURN(cond) NV_ASSERT_OR_ELSE_STR((cond), #cond, return NV_ERR_INVALID_LOCK_STATE) +#define IRQL_ASSERT_AND_RETURN(cond) NV_ASSERT_OR_ELSE_STR((cond), #cond, return NV_ERR_INVALID_IRQ_LEVEL) +#define LOCK_ASSERT_AND_RETURN_BOOL(cond, bRet) NV_ASSERT_OR_ELSE_STR((cond), #cond, return (bRet)) + +#define LOCK_METER_OP(f,l,t,d0,d1,d2) +#define LOCK_METER_DATA(t,d0,d1,d2) + +#define rmInitLockMetering() +#define rmDestroyLockMetering() + +// +// RM API lock definitions are handled by the rmapi module. Providing legacy +// rmApiLockXxx interface for temporary compatibility. CORERM-1370 +// +#include "rmapi/rmapi.h" + +#define API_LOCK_FLAGS_NONE RMAPI_LOCK_FLAGS_NONE +#define API_LOCK_FLAGS_COND_ACQUIRE RMAPI_LOCK_FLAGS_COND_ACQUIRE + +#define rmApiLockAcquire(flags, module) (rmapiLockAcquire(flags, module)) +static NV_INLINE NV_STATUS rmApiLockRelease(void) {rmapiLockRelease(); return NV_OK;} +#define rmApiLockIsOwner() (rmapiLockIsOwner()) + +#endif // LOCKS_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/prelude.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/prelude.h new file mode 100644 index 0000000..fc7c788 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/prelude.h @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __PRELUDE_H__ +#define __PRELUDE_H__ + +/* ------------------------ C library --------------------------------------- */ +#include // NULL + +/* ------------------------ SDK includes ------------------------------------ */ + +#include "nvtypes.h" +#include "nvrangetypes.h" +#include "nvstatus.h" +#include "nvmisc.h" +#include "nvlimits.h" +#include "nvos.h" + +#include "nvctassert.h" + +/* ------------------------ RM library and utils ---------------------------- */ +#include "nvport/nvport.h" +#include "nvoc/runtime.h" +#include "core/printf.h" +#include "core/strict.h" +#include "utils/nvassert.h" + +/* ------------------------ Code-generation --------------------------------- */ +#include "rmconfig.h" // RMCONFIG header generated by config/rmconfig.pl +#include "g_rmconfig_private.h" // resman-private hal setup such as: IsGK104(), etc. +#include "g_nvh_state.h" // pass enable/disable state to NVOC headers +#include "g_odb.h" +#include "g_hal.h" + +/* ------------------------ Common types ------------------------------------ */ +typedef NvU64 RmPhysAddr; // A physical address should be 64 bits + +typedef struct THREAD_STATE_NODE THREAD_STATE_NODE; // FW declare thread state + +/* ------------------------ Utility Macros ---------------------------------- */ + +// +// Power of 2 alignment. +// (Will give unexpected results if 'gran' is not a power of 2.) +// (v - v + gran) ensures that gran is upcasted to match v before +// the ~ operation, without explicitly having to typecast it. +// +#define RM_ALIGN_DOWN(v, gran) ((v) & ~(((v) - (v) + (gran)) - 1)) +#define RM_ALIGN_UP(v, gran) (((v) + ((gran) - 1)) & ~(((v) - (v) + (gran))-1)) +#define RM_IS_ALIGNED(v, gran) ((((gran) - 1) & (v)) == 0) + +#define RM_ALIGN_PTR_DOWN(p, gran) ((void *) RM_ALIGN_DOWN(((NvUPtr)p), (gran))) +#define RM_ALIGN_PTR_UP(p, gran) ((void *) RM_ALIGN_UP(((NvUPtr)p), (gran))) + +#define RM_PAGE_ALIGN_DOWN(value) RM_ALIGN_DOWN((value), RM_PAGE_SIZE) +#define RM_PAGE_ALIGN_UP(value) RM_ALIGN_UP((value), RM_PAGE_SIZE) + +#define NV_DELTA(a, b) (NV_MAX((a), (b)) - NV_MIN((a), (b))) // Okay for unsigned or signed + +#define NV_ROUNDUP(a,b) ((NV_CEIL(a,b))*(b)) +#define NV_ROUND_TO_QUANTA(a, quanta) (((quanta) == 0) ? (a): ((((a) + ((quanta) >> 1)) / (quanta)) * (quanta))) +#define NV_FLOOR_TO_QUANTA(a, quanta) (((a) / (quanta)) * (quanta)) +#define NV_SIZEOF32(x) (sizeof(x)) +#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0]))) +#define NV_ARRAY_ELEMENTS32(x) ((NV_SIZEOF32(x)/NV_SIZEOF32((x)[0]))) +#define NV_BYTESWAP16(a) ((((a) & 0xff00)>>8) | \ + (((a) & 0x00ff)<<8)) +#define NV_BYTESWAP32(a) ((((a) & 0xff000000)>>24) | \ + (((a) & 0x00ff0000)>>8) | \ + (((a) & 0x0000ff00)<<8) | \ + (((a) & 0x000000ff)<<24)) +#define NV_TO_LOWER(c) (((c)>='A'&&(c)<='Z')?(c)+('a'-'A'):(c)) +#define NV_TO_UPPER(c) (((c)>='a'&&(c)<='z')?((c)-'a'+'A'):(c)) + +/*! + * Creates a byte mask for a word at given offset. + * offset = 0 0xffffff00 + * offset = 1 0xffff00ff + * offset = 2 0xff00ffff + * offset = 3 0x00ffffff + * + * @param[in] offset Offset for the mask. + */ +#define NV_BYTE_MASK(offset) (~(0xff << ((offset)<<3))) + +// +// note: the following trick fails if (z-1) * y > max_int +// +// since the calculation contains (x % z) * y, +// and the maximum value of (x % z) is (z-1). +// +// selecting the smaller of x and y to be y reduces the chances +// of problems, but for big enough z, the problem will return... +// +#define OVERFLOW_CAREFUL_MUL_DIV(x, y, z) \ + ((x) > (y)) ? (((x) / (z)) * (y) + (((x) % (z)) * (y)) / (z)) : (((y) / (z)) * (x) + (((y) % (z)) * (x)) / (z)) + +#define MASK_BITS(n) (~(0xFFFFFFFF << (n))) + +#endif /* __PRELUDE_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/printf.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/printf.h new file mode 100644 index 0000000..d4db226 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/printf.h @@ -0,0 +1,315 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _PRINTF_H_ +#define _PRINTF_H_ + +/* + * RM PRINTF definitions. + * + * Provides RM internal definitions built on the generic nvprintf utilities + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvport/nvport.h" +#include "utils/nvprintf.h" +#include "nvlog/nvlog.h" + +#define DBG_FILE_LINE_FUNCTION NV_FILE_STR, __LINE__, NV_FUNCTION_STR + +/** + * @todo bug 1583359 - Move to NvPort compiler specifics + */ +#if defined(__GNUC__) || defined(__clang__) +#define NV_RETURN_ADDRESS() __builtin_return_address(0) +#else +#define NV_RETURN_ADDRESS() _ReturnAddress() +#endif + + +//****************************************************************************** +// BREAKPOINTS +//****************************************************************************** + +// NV_DBG_BREAKPOINT_ALLOWED can be overridden through CFLAGS +#if !defined(NV_DBG_BREAKPOINT_ALLOWED) +#if defined(DEBUG) || defined(ASSERT_BUILD) || defined(NV_MODS) || defined(QA_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) +#define NV_DBG_BREAKPOINT_ALLOWED 1 +#else +#define NV_DBG_BREAKPOINT_ALLOWED 0 +#endif +#endif // !defined(NV_DBG_BREAKPOINT_ALLOWED) + +#define NV_DEBUG_BREAK_FLAGS_RC 0:0 +#define NV_DEBUG_BREAK_FLAGS_RC_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_RC_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_ASSERT 1:1 +#define NV_DEBUG_BREAK_FLAGS_ASSERT_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_ASSERT_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_DBG_BREAK 2:2 +#define NV_DEBUG_BREAK_FLAGS_DBG_BREAK_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_DBG_BREAK_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_GPU_TIMEOUT 3:3 +#define NV_DEBUG_BREAK_FLAGS_GPU_TIMEOUT_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_GPU_TIMEOUT_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_POOL_TAGS 4:4 +#define NV_DEBUG_BREAK_FLAGS_POOL_TAGS_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_POOL_TAGS_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_POWER_ON 5:5 +#define NV_DEBUG_BREAK_FLAGS_POWER_ON_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_POWER_ON_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_SMU_ERROR 6:6 +#define NV_DEBUG_BREAK_FLAGS_SMU_ERROR_DISABLE (0x0) +#define NV_DEBUG_BREAK_FLAGS_SMU_ERROR_ENABLE (0x1) +#define NV_DEBUG_BREAK_FLAGS_CRASH 7:7 +#define NV_DEBUG_BREAK_FLAGS_CRASH_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_CRASH_ENABLE (0x00000001) + +#define NV_DEBUG_BREAK_ATTRIBUTES 7:0 +#define NV_DEBUG_BREAK_ATTRIBUTES_NONE (0x00000000) +#define NV_DEBUG_BREAK_ATTRIBUTES_RC (0x00000001) +#define NV_DEBUG_BREAK_ATTRIBUTES_ASSERT (0x00000002) +#define NV_DEBUG_BREAK_ATTRIBUTES_DBG_BREAK (0x00000004) +#define NV_DEBUG_BREAK_ATTRIBUTES_GPU_TIMEOUT (0x00000008) +#define NV_DEBUG_BREAK_ATTRIBUTES_POOL_TAGS (0x00000010) +#define NV_DEBUG_BREAK_ATTRIBUTES_POWER_ON (0x00000020) +#define NV_DEBUG_BREAK_ATTRIBUTES_SMU_ERROR (0x00000040) +#define NV_DEBUG_BREAK_ATTRIBUTES_CRASH (0x00000080) + +// Checks RMINFO and OS config to see if triggering a breakpoint is ever allowed +NvBool nvDbgBreakpointEnabled(void); +// Flushes the logs before a breakpoint, so we can see all the prints. +void osFlushLog(void); + +#define DBG_ROUTINE() \ + do \ + { \ + if (nvDbgBreakpointEnabled()) \ + PORT_BREAKPOINT_ALWAYS(); \ + } while (0) + +#define REL_DBG_BREAKPOINT() \ + REL_DBG_BREAKPOINT_MSG("NVRM-RC: Nvidia GPU Error Detected\n") + +#if NV_DBG_BREAKPOINT_ALLOWED + +#if !NVCPU_IS_RISCV64 + +#define DBG_BREAKPOINT_EX(PGPU, LEVEL) \ + do \ + { \ + NV_PRINTF(LEVEL_ERROR, "bp @ " NV_FILE_FMT ":%d\n", NV_FILE, __LINE__);\ + osFlushLog(); \ + DBG_ROUTINE(); \ + } while (0) + +#else // !NVCPU_IS_RISCV64 + +#define DBG_BREAKPOINT_EX(PGPU, LEVEL) \ + do \ + { \ + NV_ASSERT_FAILED("DBG_BREAKPOINT"); \ + } while (0) + +#endif // !NVCPU_IS_RISCV64 + +#define DBG_BREAKPOINT() DBG_BREAKPOINT_EX(NULL, 0) + +#define DBG_BREAKPOINT_EX_ARGS_IGNORED 1 +#define REL_DBG_BREAKPOINT_MSG(msg) \ + do \ + { \ + PORT_DBG_PRINT_STRING_LITERAL(msg); \ + DBG_BREAKPOINT(); \ + } while (0) + +#else // !NV_DBG_BREAKPOINT_ALLOWED + +#define DBG_BREAKPOINT() +#define DBG_BREAKPOINT_EX(PGPU, LEVEL) +#define DBG_BREAKPOINT_EX_ARGS_IGNORED 1 + +#define REL_DBG_BREAKPOINT_MSG(msg) \ + do \ + { \ + PORT_DBG_PRINT_STRING_LITERAL(msg); \ + DBG_ROUTINE(); \ + } while (0) + + +#endif // NV_DBG_BREAKPOINT_ALLOWED + +#define DBG_BREAKPOINT_REASON(reason) DBG_BREAKPOINT() + +#define DBG_BREAKPOINT_ERROR_INFO(errorCategory, errorInfo) DBG_BREAKPOINT() + +//****************************************************************************** +// PRINTS +//****************************************************************************** + +#include "utils/nvprintf.h" + +#define MAX_ERROR_STRING 256 +#ifndef NVPORT_CHECK_PRINTF_ARGUMENTS +#define NVPORT_CHECK_PRINTF_ARGUMENTS(x,c) +#endif +// +// Prototypes +// +NvBool nvDbgInit(void); +void nvDbgDestroy(void); +void nvDbg_Printf (const char *file, int line, const char *function, int debuglevel, const char *s, ...) NVPORT_CHECK_PRINTF_ARGUMENTS(5, 6); + +// +// Like libc's vsnprintf(), nvDbg_vPrintf() invalidates its va_list argument. The va_list argument +// may not be reused after nvDbg_vPrintf() returns. If the va_list is needed after the +// nvDbg_vPrintf() call, create a copy of the va_list using va_copy(). +// The caller controls the lifetime of the va_list argument, and should free it using va_end. +// +void nvDbg_vPrintf (const char *file, int line, const char *function, int debuglevel, const char *s, va_list args); +void nvDbg_PrintBuf(const char *file, int line, const char *function, int debgulevel, NvU8 buffer[], NvU32 bufsize); + +int nvDbgVsnprintf(char *dest, NvU32 destSize, const char *fmt, va_list args); +int nvDbgSnprintf (char *dest, NvU32 destSize, const char *fmt, ...); + +struct OBJGPU; +void nvDbgInitRmMsg(struct OBJGPU *); +// RmMsgPrefix return value +#define NVRM_MSG_PREFIX_NVRM NVBIT(0) +#define NVRM_MSG_PREFIX_FILE NVBIT(1) +#define NVRM_MSG_PREFIX_FUNCTION NVBIT(2) +#define NVRM_MSG_PREFIX_LINE NVBIT(3) +#define NVRM_MSG_PREFIX_OSTIMESTAMP NVBIT(4) +NvU32 RmMsgPrefix(NvU32 prefix, const char *filename, NvU32 linenumber, const char *function, char *str, NvU32 len); +// nvDbgRmMsgCheck return code +#define NVRM_MSG_NORMAL 0 // Use normal message handling (warnings/errors) +#define NVRM_MSG_HIDE 1 // Skip this message +#define NVRM_MSG_PRINT 2 // Force printing of this message +NvU32 nvDbgRmMsgCheck(const char *filename, NvU32 linenumber, const char *function, NvU32 level, const char *format, NvU32 *pPrefix); +void nvDbgDumpBufferBytes(void *pBuffer, NvU32 length); + + +#if NV_PRINTF_STRINGS_ALLOWED +#define DBG_STRING(str) str +#define DBG_INIT() nvDbgInit() +#define DBG_DESTROY() nvDbgDestroy() +#define DBG_VSNPRINTF(ptr_size_format_and_stuff) nvDbgVsnprintf ptr_size_format_and_stuff +#define DBG_PRINTBUF(dbglevel, buffer, bufsize) nvDbg_PrintBuf(DBG_FILE_LINE_FUNCTION, dbglevel, buffer, bufsize) +#define DBG_RMMSG_CHECK(level) (nvDbgRmMsgCheck(DBG_FILE_LINE_FUNCTION, level, NULL, NULL) == NVRM_MSG_PRINT) +#else // ! NV_PRINTF_STRINGS_ALLOWED -- debug printf strings not enabled +#define DBG_STRING(str) "" +#define DBG_INIT() (NV_TRUE) +#define DBG_DESTROY() +#define DBG_VSNPRINTF(ptr_size_format_and_stuff) +#define DBG_PRINTBUF(dbglevel, buffer, bufsize) +#define DBG_RMMSG_CHECK(level) (0) +#endif // NV_PRINTF_STRINGS_ALLOWED + + + +//****************************************************************************** +// POWER SANITY CHECKS +//****************************************************************************** +// +// Make sure the GPU is in full power or resuming from D3 state. Else, +// bailout from the calling function. An exception for systems, which support +// surprise removal feature. See Bugs 440565, 479003, and 499228.DO NOT IGNORE +// OR REMOVE THIS ASSERT. If you have problems with it, please talk to cplummer. +// +// bAllowWithoutSysmemAccess: Allow this RM Control when sysmem access is not available +// from the GPU. SHould be NV_TRUE only for NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS +// +// On systems supporting surprise removal, if the GPU is in D3 cold +// and still attached we would consider it a true D3 cold state +// and return NOT_FULL_POWER. See bug 1679965. +// +// +#define API_GPU_FULL_POWER_SANITY_CHECK(pGpu, bGpuAccess, bAllowWithoutSysmemAccess) \ + if ((!gpuIsGpuFullPower(pGpu)) && \ + (!(pGpu)->getProperty((pGpu), \ + PDB_PROP_GPU_IN_PM_RESUME_CODEPATH))) \ + { \ + DBG_BREAKPOINT(); \ + if (bGpuAccess || (!gpuIsSurpriseRemovalSupported(pGpu))) \ + { \ + return NV_ERR_GPU_NOT_FULL_POWER; \ + } \ + else if (gpuIsSurpriseRemovalSupported(pGpu) && \ + (pGpu)->getProperty((pGpu), PDB_PROP_GPU_IS_CONNECTED)) \ + { \ + return NV_ERR_GPU_NOT_FULL_POWER; \ + } \ + } \ + if (!(bAllowWithoutSysmemAccess) && !gpuCheckSysmemAccess(pGpu)) \ + { \ + return NV_ERR_GPU_NOT_FULL_POWER; \ + } + +#define API_GPU_FULL_POWER_SANITY_CHECK_OR_GOTO(pGpu, bGpuAccess, bAllowWithoutSysmemAccess, status, tag) \ + if ((!gpuIsGpuFullPower(pGpu)) && \ + (!(pGpu)->getProperty((pGpu), \ + PDB_PROP_GPU_IN_PM_RESUME_CODEPATH))) \ + { \ + DBG_BREAKPOINT(); \ + if (bGpuAccess || (!gpuIsSurpriseRemovalSupported(pGpu))) \ + { \ + status = NV_ERR_GPU_NOT_FULL_POWER; \ + goto tag; \ + } \ + else if (gpuIsSurpriseRemovalSupported(pGpu) && \ + (pGpu)->getProperty((pGpu), PDB_PROP_GPU_IS_CONNECTED)) \ + { \ + status = NV_ERR_GPU_NOT_FULL_POWER; \ + goto tag; \ + } \ + } \ + if (!(bAllowWithoutSysmemAccess) && !gpuCheckSysmemAccess(pGpu)) \ + { \ + return NV_ERR_GPU_NOT_FULL_POWER; \ + } + + +#if defined(PORT_IS_FUNC_SUPPORTED) +#if PORT_IS_FUNC_SUPPORTED(portMemExValidate) +#define DBG_VAL_PTR(p) portMemExValidate(p, NV_TRUE) +#endif +#endif +#ifndef DBG_VAL_PTR +#define DBG_VAL_PTR(p) +#endif + + +void nvErrorLog(void *pVoid, NvU32 num, const char *pFormat, va_list arglist); +void nvErrorLog_va(void * pGpu, NvU32 num, const char * pFormat, ...); + +// memory allocation tracking data structs and globals +#define MAX_STACK_LEVEL 6 + +#ifdef __cplusplus +} +#endif + +#endif // _PRINTF_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/strict.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/strict.h new file mode 100644 index 0000000..d102e45 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/strict.h @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __STRICT_H__ +#define __STRICT_H__ + +// +// RM_STRICT_SUPPRESS_DEPRECATED_DEFINITIONS_VER_XYZ should be set +// before including any RM internal headers when disabling deprecated +// definitions is desired. +// +// For transition during refactoring, we might introduce new types and +// interfaces and use macros/wrappers to forward the old interface to +// the new one. +// +// Once a callsite is migrated to use the new interface it can use RM +// strict to disable the deprecated definitions to prevent changes from +// reintroducing calls to a deprecated interface within a cleansed +// module. +// +// Controlling disablement of deprecated definitions is versioned. This +// enables us to introduce new deprecated interfaces incrementally. +// Example, ModuleA might scrub to versionX (removal of OBJFB defns) but +// not versionY (removal of legacy CLI types). +// +// Flags to turn off deprecated definitions are intended to be +// temporary, once all modules remove references the deprecated +// definitions and knobs in this header should be deleted. +// +#ifdef RM_STRICT_SUPPRESS_DEPRECATED_DEFINITIONS_VER_JAN_21_2020 +#define RM_STRICT_CONFIG_EMIT_DEPRECATED_OBJFB_DEFINITIONS 0 +#define RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS 0 +#endif + +// +// RM_STRICT_SUPPRESS_PHYSICAL_DEFINITIONS_VER_XYZ should be set before +// including any RM internal headers when disabling "physical" definitions is +// desired. +// +// Physical definitions refers to interfaces/types that are only used by GSP-RM +// and VGPU-host, i.e.: not to be used by VGPU Client or GSP Client +// +#ifdef RM_STRICT_SUPPRESS_PHYSICAL_DEFINITIONS_VER_JAN_21_2020 +#define RM_STRICT_CONFIG_EMIT_MEMORY_SYSTEM_DEFINITIONS 0 +#endif + +// +// Default deprecated and "physical engine" definitions on unless specified +// +#ifndef RM_STRICT_CONFIG_EMIT_DEPRECATED_OBJFB_DEFINITIONS +#define RM_STRICT_CONFIG_EMIT_DEPRECATED_OBJFB_DEFINITIONS 1 +#endif + +#ifndef RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS +#define RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS 1 +#endif + +#ifndef RM_STRICT_CONFIG_EMIT_MEMORY_SYSTEM_DEFINITIONS +#define RM_STRICT_CONFIG_EMIT_MEMORY_SYSTEM_DEFINITIONS 1 +#endif + +// +// "Physical engine" definitions not yet included in any version, but available +// for T234X. Should be defined to 0 before including any RM internal headers +// when disabling OBJDISP (and related) definitions is desired. +// +#ifndef RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 1 +#endif + +// +// Generate OBJGPU engine accessors (GPU_GET_FOO(pGpu)) for disabled engines. +// These will always return NULL, but will allow the code that references them +// to compile. +// +#ifndef RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS +#define RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS 1 +#endif + +#endif /* __STRICT_H__ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/system.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/system.h new file mode 100644 index 0000000..4e95886 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/system.h @@ -0,0 +1,3 @@ + +#include "g_system_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/thread_state.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/thread_state.h new file mode 100644 index 0000000..6567989 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/thread_state.h @@ -0,0 +1,217 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef THREAD_STATE_H +#define THREAD_STATE_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Defines and structures used for Thread State management * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "os/os.h" +#include "tls/tls.h" +#include "containers/map.h" +#include "containers/list.h" + +typedef struct OBJGPU OBJGPU; + +// +// Thread State Tracking structures and defines +// +typedef struct THREAD_TIMEOUT_STATE +{ + NvU64 enterTime; + NvU64 nonComputeTime; + NvU64 computeTime; + NvU64 nextCpuYieldTime; + NvU64 overrideTimeoutMsecs; + +} THREAD_TIMEOUT_STATE, *PTHREAD_TIMEOUT_STATE; + +typedef struct THREAD_STATE_FREE_CALLBACK +{ + void *pCbData; + void (*pCb)(void *pCbData); +} THREAD_STATE_FREE_CALLBACK; + +MAKE_LIST(THREAD_STATE_FREE_CB_LIST, THREAD_STATE_FREE_CALLBACK); + +typedef struct THREAD_STATE_NODE THREAD_STATE_NODE; + +struct THREAD_STATE_NODE +{ + OS_THREAD_HANDLE threadId; + /*! + * Thread sequencer id. This is a unique identifier for a given thread + * entry into the RM. This is separate from @ref threadId, as the threadId + * is really the OS's thread handle/pointer. In cases where the same + * physical thread is re-used (e.g. WORK_ITEMs are scheduled from a + * pre-allocated pool of worker threads), different RM threads will have the + * same threadId. + * + * This value is set by @ref threadStateInitXYZ() based off the global @ref + * THREAD_STATE_DB::threadSeqCntr. + */ + NvU32 threadSeqId; + NvBool bValid; + THREAD_TIMEOUT_STATE timeout; + NvU32 cpuNum; + NvU32 flags; + MapNode node; + + /*! + * If a callback is installed, threadStateFree() may block on it. + * + * The installed callbacks will be processed in FIFO order only. + * + * Only supported on non-ISR CPU RM paths. + */ + THREAD_STATE_FREE_CB_LIST cbList; +}; + +MAKE_INTRUSIVE_MAP(ThreadStateNodeMap, THREAD_STATE_NODE, node); + +typedef struct THREAD_STATE_DB_TIMEOUT +{ + NvU64 nonComputeTimeoutMsecs; + NvU64 computeTimeoutMsecs; + NvU32 computeGpuMask; + NvU32 flags; + +} THREAD_STATE_DB_TIMEOUT, *PTHREAD_STATE_DB_TIMEOUT; + +#define THREAD_STATE_TRACE_MAX_ENTRIES 8 + +typedef struct THREAD_STATE_TRACE_ENTRY +{ + NvU64 callerRA; + NvU32 flags; + +} THREAD_STATE_TRACE_ENTRY; + +typedef struct THREAD_STATE_TRACE_INFO +{ + NvU32 index; + THREAD_STATE_TRACE_ENTRY entries[THREAD_STATE_TRACE_MAX_ENTRIES]; + +} THREAD_STATE_TRACE_INFO; + +typedef struct THREAD_STATE_ISR_LOCKLESS +{ + THREAD_STATE_NODE **ppIsrThreadStateGpu; +} THREAD_STATE_ISR_LOCKLESS, *PTHREAD_STATE_ISR_LOCKLESS, **PPTHREAD_STATE_ISR_LOCKLESS; + +typedef struct THREAD_STATE_DB +{ + NvU32 setupFlags; + NvU32 maxCPUs; + /*! + * Thread state sequencer id counter. The last allocated thread state + * sequencer id via @ref threadStateInitXYZ(). + */ + NvU32 threadSeqCntr; + PORT_SPINLOCK *spinlock; + ThreadStateNodeMap dbRoot; + ThreadStateNodeMap dbRootPreempted; + THREAD_STATE_NODE **ppISRDeferredIntHandlerThreadNode; + PTHREAD_STATE_ISR_LOCKLESS pIsrlocklessThreadNode; + THREAD_STATE_DB_TIMEOUT timeout; + THREAD_STATE_TRACE_INFO traceInfo; +} THREAD_STATE_DB, *PTHREAD_STATE_DB; + +// +// This is the same for all OSes. This value was chosen because it is +// the minimum found on any OS at the time of this writing (May, 2008). +// +#define TIMEOUT_DEFAULT_OS_RESCHEDULE_INTERVAL_SECS 2 + +// +// The normal power transition requirement for Windows is 4 seconds. +// Use longer time to let OS fire timeout and ask recovery. +// +#define TIMEOUT_WDDM_POWER_TRANSITION_INTERVAL_MS 9800 + +// +// Thread State flags used for threadStateInitSetupFlags +// +#define THREAD_STATE_SETUP_FLAGS_NONE 0 +#define THREAD_STATE_SETUP_FLAGS_ENABLED NVBIT(0) +#define THREAD_STATE_SETUP_FLAGS_TIMEOUT_ENABLED NVBIT(1) +#define THREAD_STATE_SETUP_FLAGS_SLI_LOGIC_ENABLED NVBIT(2) +#define THREAD_STATE_SETUP_FLAGS_CHECK_TIMEOUT_AT_FREE_ENABLED NVBIT(3) +#define THREAD_STATE_SETUP_FLAGS_ASSERT_ON_TIMEOUT_ENABLED NVBIT(4) +#define THREAD_STATE_SETUP_FLAGS_ASSERT_ON_FAILED_LOOKUP_ENABLED NVBIT(5) +#define THREAD_STATE_SETUP_FLAGS_RESET_ON_TIMEOUT_ENABLED NVBIT(6) +#define THREAD_STATE_SETUP_FLAGS_DO_NOT_INCLUDE_SLEEP_TIME_ENABLED NVBIT(7) +#define THREAD_STATE_SETUP_FLAGS_PRINT_INFO_ENABLED NVBIT(31) + +// +// Thread State flags used for threadState[Init,Free] +// +#define THREAD_STATE_FLAGS_NONE 0 +#define THREAD_STATE_FLAGS_IS_ISR NVBIT(0) +#define THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER NVBIT(1) +#define THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER NVBIT(2) +#define THREAD_STATE_FLAGS_IS_ISR_LOCKLESS NVBIT(3) +#define THREAD_STATE_FLAGS_TIMEOUT_INITED NVBIT(5) +#define THREAD_STATE_FLAGS_PLACED_ON_PREEMPT_LIST NVBIT(6) +#define THREAD_STATE_FLAGS_DEVICE_INIT NVBIT(7) +#define THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED NVBIT(8) + +// These Threads run exclusively between a conditional acquire +#define THREAD_STATE_FLAGS_EXCLUSIVE_RUNNING (THREAD_STATE_FLAGS_IS_ISR | \ + THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER | \ + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER) + +#define THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING (THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER | \ + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER) + +NV_STATUS threadStateGlobalAlloc(void); +void threadStateGlobalFree(void); +void threadStateInitRegistryOverrides(OBJGPU *pGpu); +void threadStateInitSetupFlags(NvU32 flags); +NvU32 threadStateGetSetupFlags(void); + +void threadStateInitISRLockless(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateFreeISRLockless(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateInitISRAndDeferredIntHandler(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateFreeISRAndDeferredIntHandler(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateInit(THREAD_STATE_NODE *pThreadNode, NvU32 flags); +void threadStateFree(THREAD_STATE_NODE *pThreadNode, NvU32 flags); + +NV_STATUS threadStateGetCurrent(THREAD_STATE_NODE **ppThreadNode, OBJGPU *pGpu); +NV_STATUS threadStateGetCurrentUnchecked(THREAD_STATE_NODE **ppThreadNode, OBJGPU *pGpu); +NV_STATUS threadStateInitTimeout(OBJGPU *pGpu, NvU32 timeoutUs, NvU32 flags); +NV_STATUS threadStateCheckTimeout(OBJGPU *pGpu, NvU64 *pElapsedTimeUs); +NV_STATUS threadStateResetTimeout(OBJGPU *pGpu); +void threadStateLogTimeout(OBJGPU *pGpu, NvU64 funcAddr, NvU32 lineNum); +void threadStateYieldCpuIfNecessary(OBJGPU *pGpu); +void threadStateSetTimeoutOverride(THREAD_STATE_NODE *, NvU64); + +NV_STATUS threadStateEnqueueCallbackOnFree(THREAD_STATE_NODE *pThreadNode, + THREAD_STATE_FREE_CALLBACK *pCallback); +void threadStateRemoveCallbackOnFree(THREAD_STATE_NODE *pThreadNode, + THREAD_STATE_FREE_CALLBACK *pCallback); +#endif // THREAD_STATE_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal.h new file mode 100644 index 0000000..d19db84 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal.h @@ -0,0 +1,3 @@ + +#include "g_journal_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal_structs.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal_structs.h new file mode 100644 index 0000000..c435c8c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal_structs.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef JOURNAL_STRUCTS_H +#define JOURNAL_STRUCTS_H 1 + +#include "nvcd.h" +#include "rmcd.h" + +// Meta Data to Describe an error block +typedef struct RMCD_ERROR_BLOCK { + NvU8 * pBlock; + NvU32 blockSize; + struct RMCD_ERROR_BLOCK * pNext; +} RMCD_ERROR_BLOCK; + +typedef struct RMERRORHEADER { + struct RMFIFOERRORELEMENT_V3 *pNextError; + RMCD_ERROR_BLOCK *pErrorBlock; + NvU32 GPUTag; + NvU32 ErrorNumber; +} RMERRORHEADER; + +typedef struct { + RMERRORHEADER ErrorHeader; + RmPrbInfo_RECORD_V2 RmPrbErrorData; +} RMPRBERRORELEMENT_V2; + +typedef struct RMFIFOERRORELEMENT_V3 { + RMERRORHEADER ErrorHeader; +} RMFIFOERRORELEMENT_V3; + +#endif /* ifndef JOURNAL_STRUCTS_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h new file mode 100644 index 0000000..1a09575 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h @@ -0,0 +1,3 @@ + +#include "g_nv_debug_dump_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/profiler.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/profiler.h new file mode 100644 index 0000000..26def56 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/profiler.h @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _PROFILER_H_ +#define _PROFILER_H_ + +/*! + * @file profiler.h + * @brief Simple API to measure elapsed times in RM for profiling and statistics + * + * The primary goals of this API are to: + * 1. Be lightweight and have little-to-no setup required (built into release drivers) + * 2. Defer as much analysis as possible to the user of the data (keep it simple) + * 3. Provide sub-millisecond resolution if possible (medium-high granularity) + * + * This is intended mainly for coarse measurements of time-critical software + * sequences, such as GC6. For example, the measurements could be used to catch + * major latency regressions in a particular timing module. + * + * For more sophisticated profiling (e.g. for prospective analysis), use of an + * external profiling tool (e.g. xperf with ETW) is recommended instead. + */ + +#include "core/core.h" + +/*! + * Record containing the statistics of a single time module to be profiled + * periodically. + * + * This tracks the min/max elapsed time over all the measurement + * cycles, as well as the total elapsed time and number of cycles. + * To calculate the average elapsed time per cycle, divide total_ns by count. + * + * 64-bit precision integers are used to hold nanosecond resolution + * over long periods of time (e.g. greater than 4 seconds). + */ +typedef struct +{ + NvU64 count; //gpuId, data, RMTRACE_FUNC_PROG_ENTER); +// ... +// RMTRACE_MARKER_PROBE("About To Enter XXX", pGpu->gpuId, data, RMTRACE_FUNC_PROG_STEP); +// XXX(); +// ... +// BBB(); +// ... +// RMTRACE_MARKER_PROBE("AAA Function", pGpu->gpuId, data, RMTRACE_FUNC_PROG_EXIT); +// } +// +// +// void BBB() +// { +// RMTRACE_MARKER_PROBE("BBB Function", pGpu->gpuId, data, RMTRACE_FUNC_PROG_ENTER); +// ... +// CCC(); +// ... +// RMTRACE_MARKER_PROBE("BBB Function", pGpu->gpuId, data, RMTRACE_FUNC_PROG_EXIT); +// } +// +// With a tool (like EtwTool), we can generate below message automatically +// +// AAA Function (Enter) +// (0.1234ms) +// About to Enter XXX +// (0.0012ms) +// BBB Function (Enter) +// BBB Function (Leave) - 0.23ms +// AAA Function (Leave) -- 0.4111ms +// + +#define RMTRACE_FUNC_PROG_ENTER 0x0000 +#define RMTRACE_FUNC_PROG_EXIT 0x00FF +#define RMTRACE_FUNC_PROG_STEP 0x007F +#define RMTRACE_UNKNOWN_GPUID 0xFFFFFFFF +#define RMTRACE_UNUSED_PARAM 0 +#define RMTRACE_MAX_PRINT_BUFFER_SIZE 128 + +// +// Empty macros +// + +#define RMTRACE_INIT_NEW() +#define RMTRACE_DESTROY_NEW() +#define RMTRACE_SET_PTIMER_LOG(enable) +#define RMTRACE_IS_PTIMER_LOG_ENABLED() \ + NV_FALSE +#define RMTRACE_RMAPI(id, cmd) +#define RMTRACE_RMLOCK(id) +#define RMTRACE_DISP1(id, gpuId, param1) +#define RMTRACE_DISP2(id, gpuId, param1, param2) +#define RMTRACE_DISP3(id, gpuId, param1, param2, param3) +#define RMTRACE_DISP4(id, gpuId, param1, param2, param3, param4) +#define RMTRACE_DISP5(id, gpuId, param1, param2, param3, param4, param5) +#define RMTRACE_DISP6(id, gpuId, param1, param2, param3, param4, param5, param6) +#define RMTRACE_DISP_EDID(gpuId, publicId, connectedId, data, size) +#define RMTRACE_DISP_BRIGHTNESS_ENTRY(dispId, flags, blType, pwmInfoProvider, pwmInfoEntries, SBEnable, lmnProvider, lmnEntryCount, blPwmInfoSize, blPwmInfo) +#define RMTRACE_DISP_ERROR(id, gpuId, param1, param2, status) +#define RMTRACE_DISP_EXCEPTION(gpuId, param1, param2, param3, param4, param5) +#define RMTRACE_GPIO(id, _function, _state, _gpioPin, param) +#define RMTRACE_GPIO_LIST(id, count, list) +#define RMTRACE_I2C(id, gpuId, portId, address, indexSize, pIndex, dataSize, pData, status) +#define RMTRACE_I2C_SET_ACQUIRED(gpuId, portId, acquirer, status, curTime) +#define RMTRACE_I2C_ENUM_PORTS(gpuId, count, ports) +#define RMTRACE_GPU(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_RMJOURNAL(id, gpuId, type, group, key, count, firstTime, lastTime) +#define RMTRACE_POWER(id, gpuId, state, head, forcePerf, fastBootPowerState) +#define RMTRACE_PERF(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_THERM2(id, gpuId, param1, param2) +#define RMTRACE_THERM3(id, gpuId, param1, param2, param3) +#define RMTRACE_THERM6(id, gpuId, param1, param2, param3, param4, param5, param6) +#define RMTRACE_TIMEOUT(id, gpuId) +#define RMTRACE_VBIOS(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_VBIOS_ERROR(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_NVLOG(id, pData, dataSize) +#define RMTRACE_SBIOS(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_USBC0(id, gpuId) +#define RMTRACE_USBC1(id, gpuId, param1) +#define RMTRACE_USBC2(id, gpuId, param1, param2) +#define RMTRACE_USBC7(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_RMGENERAL(id, param1, param2, param3) +#define RMTRACE_NVTELEMETRY(id, gpuId, param1, param2, param3) +#define RMTRACE_NOCAT(id, gpuId, type, group, key, count, timeStamp) +#define RMTRACE_PRINT + + +#ifndef RMTRACE_FLAG_ENABLED +#define RMTRACE_FLAG_ENABLED (0) +#endif + +// +// Empty macros +// +#define RMTRACE_INIT() +#define RMTRACE_DESTROY() +#define RMTRACE_ENABLE(eventEventMask) +#define RMTRACE_PROBE(module, event) + +#define RMTRACE_PROBE1(module, event, dataType, data, dataSize) + +#define RMTRACE_PROBE2(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2) + +#define RMTRACE_PROBE3(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3) + +#define RMTRACE_PROBE4(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4) + +#define RMTRACE_PROBE5(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4, \ + dataType5, data5, dataSize5) + +#define RMTRACE_PROBE6(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4, \ + dataType5, data5, dataSize5, dataType6, data6, dataSize6) + +#define RMTRACE_PROBE7(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4, \ + dataType5, data5, dataSize5, dataType6, data6, dataSize6, \ + dataType7, data7, dataSize7) +#define RMTRACE_PROBE10(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4, \ + dataType5, data5, dataSize5, dataType6, data6, dataSize6, \ + dataType7, data7, dataSize7, dataType8, data8, dataSize8, \ + dataType9, data9, dataSize9, dataType10, data10, dataSize10) +#define RMTRACE_PROBE2_PRIMTYPE(module, event, type0, val0, type1, val1) +#define RMTRACE_PROBE3_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2) +#define RMTRACE_PROBE4_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2, type3, val3) +#define RMTRACE_PROBE5_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2, type3, val3, \ + type4, val4) +#define RMTRACE_PROBE7_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2, type3, val3, \ + type4, val4, type5, val5, type6, val6) +#define RMTRACE_PROBE10_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2, type3, val3, \ + type4, val4, type5, val5, type6, val6, type7, val7, type8, val8, \ + type9, val9) +#define RMTRACE_MARKER_PROBE(name, gpuId, payload, id) + + +#endif /* TRACER_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h new file mode 100644 index 0000000..30d9aaf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h @@ -0,0 +1,3 @@ + +#include "g_hda_codec_api_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h new file mode 100644 index 0000000..ad600f3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h @@ -0,0 +1,3 @@ + +#include "g_dce_client_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/device/device.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/device/device.h new file mode 100644 index 0000000..76faf3c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/device/device.h @@ -0,0 +1,3 @@ + +#include "g_device_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h new file mode 100644 index 0000000..eee436c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h @@ -0,0 +1,3 @@ + +#include "g_disp_capabilities_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_channel.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_channel.h new file mode 100644 index 0000000..146dfbb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_channel.h @@ -0,0 +1,3 @@ + +#include "g_disp_channel_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_objs.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_objs.h new file mode 100644 index 0000000..6dde057 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_objs.h @@ -0,0 +1,3 @@ + +#include "g_disp_objs_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h new file mode 100644 index 0000000..5addedc --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h @@ -0,0 +1,3 @@ + +#include "g_disp_sf_user_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h new file mode 100644 index 0000000..60fbcb0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h @@ -0,0 +1,3 @@ + +#include "g_kernel_head_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h new file mode 100644 index 0000000..94c9909 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h @@ -0,0 +1,3 @@ + +#include "g_disp_inst_mem_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp.h new file mode 100644 index 0000000..25be380 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp.h @@ -0,0 +1,3 @@ + +#include "g_kern_disp_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h new file mode 100644 index 0000000..649c979 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERN_DISP_MAX_H +#define KERN_DISP_MAX_H + +/*! + * @brief Defines max values used for the KernelDisplay Engine Object, + * including values shared by OBJDISP code. + */ + +#define OBJ_MAX_HEADS 4 +#define MAX_RG_LINE_CALLBACKS_PER_HEAD 2 +#define OBJ_MAX_DFPS 31 + +#endif // KERN_DISP_MAX_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h new file mode 100644 index 0000000..929ada5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERN_DISP_TYPE_H +#define KERN_DISP_TYPE_H + +/****************************************************************************** +* +* Defines display type enums that can be used in the KernelDisplay object. +* +******************************************************************************/ +#define NV_PDISP_CHN_NUM_ANY 0x7F + +typedef enum +{ + dispChnClass_Curs, + dispChnClass_Ovim, + dispChnClass_Base, + dispChnClass_Core, + dispChnClass_Ovly, + dispChnClass_Winim, + dispChnClass_Win, + dispChnClass_Any, + dispChnClass_Supported +} DISPCHNCLASS; + +enum DISPLAY_ICC_BW_CLIENT +{ + DISPLAY_ICC_BW_CLIENT_RM, + DISPLAY_ICC_BW_CLIENT_EXT, // DD or MODS + NUM_DISPLAY_ICC_BW_CLIENTS +}; + +typedef enum +{ + dispMemoryTarget_physNVM, + dispMemoryTarget_physPCI, + dispMemoryTarget_physPCICoherent +} DISPMEMORYTARGET; + +typedef struct +{ + NvU64 addr; + DISPMEMORYTARGET memTarget; + NvBool valid; +} VGAADDRDESC; + +#endif // #ifndef KERN_DISP_TYPE_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h new file mode 100644 index 0000000..0634ce8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef VBLANK_H +#define VBLANK_H + +#include "gpu/gpu.h" +/* ------------------------ Types definitions ------------------------------ */ +/*! + * Callback function prototype + */ +typedef NV_STATUS (*VBLANKCALLBACKPROC)(OBJGPU*, void *, NvU32, NvU32, NV_STATUS); + +typedef struct VBLANKCALLBACK +{ + VBLANKCALLBACKPROC Proc; + void *pObject; + NvBool bObjectIsChannelDescendant; + NvU32 Param1; + NvU32 Param2; + NvU32 VBlankCount; + NvU32 VBlankOffset; + NvU64 TimeStamp; + NvU32 MC_CallbackFlag; + NvU32 Flags; + NV_STATUS Status; + struct VBLANKCALLBACK *Next; + NvBool bImmediateCallback; + NvBool bIsVblankNotifyEnable; +}VBLANKCALLBACK; + +/* ------------------------ Macros & Defines ------------------------------- */ + +/*! + * Callback function registration flags + */ +#define VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_COUNT 0x00000001 +#define VBLANK_CALLBACK_FLAG_COMPLETE_ON_OBJECT_CLEANUP 0x00000002 +#define VBLANK_CALLBACK_FLAG_PERSISTENT 0x00000004 +#define VBLANK_CALLBACK_FLAG_SPECIFIED_TIMESTAMP 0x00000010 +#define VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_NEXT 0x00000020 // Explicit request for the next vblank. +#define VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_OFFSET 0x00000040 // Explicit request for the vblank offset from the current one +#define VBLANK_CALLBACK_FLAG_PROMOTE_TO_FRONT 0x00000080 // Promotes to being 'first', while still honoring VBlankCount +#define VBLANK_CALLBACK_FLAG_RELEASES_SEMAPHORE 0x00000100 // A flag for deadlock detection to check if this callback could release a semaphore +#define VBLANK_CALLBACK_FLAG_GUARANTEE_SAFETY 0x00000200 // This callback absolutely needs to run during vertical blank, even if it runs late as a consequence. +#define VBLANK_CALLBACK_FLAG_LOW_LATENCY__ISR_ONLY 0x08000000 // This means always process during ISR (never DPC.) Be careful! +#define VBLANK_CALLBACK_FLAG_LOW_LATENCY 0x10000000 // This now means ASAP, which could be ISR or DPC, depending on which happens first +#define VBLANK_CALLBACK_FLAG_MC_EXECUTE_ONCE 0x40000000 // A special flag for MultiChip configurations to have the callback execute only once +#define VBLANK_CALLBACK_FLAG_USER 0x80000000 + +/*! + * A little macro help for the CALLBACK_FLAG_MC_EXECUTE_ONCE flag above + */ +#define VBLANK_CALLBACK_EXECUTE_ONCE(x) (x & VBLANK_CALLBACK_FLAG_MC_EXECUTE_ONCE) + +/*! + * VBlank Service info gathering keep-alive in seconds. This value is the number of seconds the vblank service will run after a client request vblank info. + */ +#define VBLANK_INFO_GATHER_KEEPALIVE_SECONDS (5) + +/*! + * VBLANK SERVICE RELATED + * VBlank Service callback processing flags + * These two flags describe when to process the queues + */ + +#define VBLANK_STATE_PROCESS_NORMAL (0x00000000) // Process the requested queues if associated vblank interrupt is pending +#define VBLANK_STATE_PROCESS_IMMEDIATE (0x00000001) // Process the requested queues now, regardless of any vblank interrupt pending state + +/*! + * These three flags describe which queues to process + */ +#define VBLANK_STATE_PROCESS_LOW_LATENCY (0x00000002) // Process the low-latency vblank callback queue +#define VBLANK_STATE_PROCESS_NORMAL_LATENCY (0x00000004) // Process the normal-latency vblank callback queue + +#define VBLANK_STATE_PROCESS_ALL_CALLBACKS (VBLANK_STATE_PROCESS_LOW_LATENCY|VBLANK_STATE_PROCESS_NORMAL_LATENCY) // Process all callback (high and low latency) queues + +#define VBLANK_STATE_PROCESS_CALLBACKS_ONLY (0x00000008) // Process only the callback queue(s) and nothing else + +/*! + * set when called from an ISR; if VBlank() is in an ISR and there is + * more work to do, then VBlank() will not clear the pending bit + */ +#define VBLANK_STATE_PROCESS_CALLED_FROM_ISR (0x00000010) +#define VBLANK_STATE_PROCESS_CALLED_FROM_DPC (0x00000020) + +/*! Vblank Interrupt state */ +#define NV_HEAD_VBLANK_INTR_UNAVAILABLE (0x00000000) +#define NV_HEAD_VBLANK_INTR_AVAILABLE (0x00000001) +#define NV_HEAD_VBLANK_INTR_ENABLED (0x00000002) + +#endif // VBLANK_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_desc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_desc.h new file mode 100644 index 0000000..bce623d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_desc.h @@ -0,0 +1,3 @@ + +#include "g_eng_desc_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_state.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_state.h new file mode 100644 index 0000000..9f732f6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_state.h @@ -0,0 +1,3 @@ + +#include "g_eng_state_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu.h new file mode 100644 index 0000000..29fdb18 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu.h @@ -0,0 +1,3 @@ + +#include "g_gpu_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_access.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_access.h new file mode 100644 index 0000000..127c38a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_access.h @@ -0,0 +1,381 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPU_ACCESS_H_ +#define _GPU_ACCESS_H_ + +#include "ioaccess/ioaccess.h" +#include "gpu/gpu_device_mapping.h" + +// Go straight at the memory or hardware. +#define MEM_RD08(a) (*(const volatile NvU8 *)(a)) +#define MEM_RD16(a) (*(const volatile NvU16 *)(a)) +#define MEM_RD32(a) (*(const volatile NvU32 *)(a)) +#define MEM_WR08(a, d) do { *(volatile NvU8 *)(a) = (d); } while (0) +#define MEM_WR16(a, d) do { *(volatile NvU16 *)(a) = (d); } while (0) +#define MEM_WR32(a, d) do { *(volatile NvU32 *)(a) = (d); } while (0) +#define MEM_WR64(a, d) do { *(volatile NvU64 *)(a) = (d); } while (0) + +// +// Define the signature of the register filter callback function +// +// flags can be optionally used for filters to decide whether to actually +// touch HW or not. flags should be OR'ed every time a new filter is found. (see objgpu.c) +// +typedef void (*GpuWriteRegCallback)(OBJGPU *, void *, NvU32 addr, NvU32 val, NvU32 accessSize, NvU32 flags); +typedef NvU32 (*GpuReadRegCallback)(OBJGPU *, void *, NvU32 addr, NvU32 accessSize, NvU32 flags); + +union GPUHWREG +{ + volatile NvV8 Reg008[1]; + volatile NvV16 Reg016[1]; + volatile NvV32 Reg032[1]; +}; + +typedef union GPUHWREG GPUHWREG; + +// +// Register filter record +// +// If REGISTER_FILTER_FLAGS_READ is set, then that means that the base RegRead +// function will not read the register, so the provided read callback function +// is expected to read the register and return the value. +// +// If REGISTER_FILTER_FLAGS_WRITE is set, then that means that the base RegWrite +// function will not write the register, so the provided callback write function +// is expected to write the given value to the register. +// +// It is an error to specify REGISTER_FILTER_FLAGS_READ and not provide a +// read callback function. +// +// It is an error to specify REGISTER_FILTER_FLAGS_WRITE and not provide a +// write callback function. +// +#define REGISTER_FILTER_FLAGS_READ (NVBIT(0)) +#define REGISTER_FILTER_FLAGS_WRITE (NVBIT(1)) +// filter is in the list but it is invalid and should be removed +#define REGISTER_FILTER_FLAGS_INVALID (NVBIT(2)) + +#define REGISTER_FILTER_FLAGS_VIRTUAL (0) +#define REGISTER_FILTER_FLAGS_READ_WRITE (REGISTER_FILTER_FLAGS_READ | REGISTER_FILTER_FLAGS_WRITE) + +typedef struct REGISTER_FILTER REGISTER_FILTER; + +struct REGISTER_FILTER +{ + REGISTER_FILTER *pNext; //!< pointer to next filter + NvU32 flags; //!< attributes of this filter + DEVICE_INDEX devIndex; //!< filter device + NvU32 devInstance; //!< filter device instance + NvU32 rangeStart; //!< filter range start (can overlap) + NvU32 rangeEnd; //!< filter range end (can overlap) + GpuWriteRegCallback pWriteCallback; //!< callback for write + GpuReadRegCallback pReadCallback; //!< callback for read + void *pParam; //!< pointer to param which gets passed to callbacks +}; + +typedef struct { + REGISTER_FILTER *pRegFilterList; // Active filters + REGISTER_FILTER *pRegFilterRecycleList; // Inactive filters + PORT_SPINLOCK * pRegFilterLock; // Thread-safe list management + NvU32 regFilterRefCnt; // Thread-safe list management + NvBool bRegFilterNeedRemove; // Thread-safe list garbage collection +} DEVICE_REGFILTER_INFO; + +typedef struct DEVICE_MAPPING +{ + GPUHWREG *gpuNvAddr; // CPU Virtual Address + RmPhysAddr gpuNvPAddr; // Physical Base Address + NvU32 gpuNvLength; // Length of the Aperture + NvU32 gpuNvSaveLength; + NvU32 gpuDeviceEnum; // Device ID NV_DEVID_* + NvU32 refCount; // refCount for the device map. + DEVICE_REGFILTER_INFO devRegFilterInfo; // register filter range list +} DEVICE_MAPPING; + +typedef struct +{ + IO_DEVICE parent; + OBJGPU *pGpu; + DEVICE_INDEX deviceIndex; + NvU32 instance; + // The following members are initialized in objgpu.c, + // but are not used anywhere. gpuApertureReg* functions + // fall back to DEVICE_MAPPING instead + GPUHWREG *gpuNvAddr; // CPU Virtual Address + RmPhysAddr gpuNvPAddr; // Physical Base Address + NvU32 gpuNvLength; // Length of Aperture + NvU32 gpuDeviceEnum; // Device ID NV_DEVID_* + NvU32 refCount; // refCount for the device map. + DEVICE_REGFILTER_INFO devRegFilterInfo; // register filter range list +} GPU_IO_DEVICE; + +typedef struct +{ + // Pointer to GPU linked to this RegisterAccess object + OBJGPU *pGpu; + + // HW register access tools + GPUHWREG *gpuFbAddr; + GPUHWREG *gpuInstAddr; + + // Register access profiling + NvU32 regReadCount; + NvU32 regWriteCount; +} RegisterAccess; + +/*! Init register IO access path */ +NV_STATUS regAccessConstruct(RegisterAccess *, OBJGPU *pGpu); + +/*! Shutdown register IO access path */ +void regAccessDestruct(RegisterAccess *); + +/*! Writes to 8 bit register */ +void regWrite008(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV8); + +/*! Writes to 16 bit register */ +void regWrite016(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV16); + +/*! Writes to 32 bit register, with thread state on the stack */ +void regWrite032(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV32, THREAD_STATE_NODE *); + +/*! Unicast register access, with thread state on the stack */ +void regWrite032Unicast(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV32, THREAD_STATE_NODE *); + +/*! Reads from 8 bit register */ +NvU8 regRead008(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32); + +/*! Reads from 16 bit register */ +NvU16 regRead016(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32); + +/*! Reads from 32 bit register, with thread state on the stack */ +NvU32 regRead032(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, THREAD_STATE_NODE *); + +/*! Reads from 32 bit register and checks bit mask, with thread state on the stack */ +NvU32 regCheckRead032(RegisterAccess *, NvU32, NvU32, THREAD_STATE_NODE *); + +/*! Reads 32 bit register and polls bit field for specific value */ +NV_STATUS regRead032_AndPoll(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvU32); + +/*! Adds a register filter */ +NV_STATUS regAddRegisterFilter(RegisterAccess *, NvU32, DEVICE_INDEX, NvU32, NvU32, NvU32, GpuWriteRegCallback, GpuReadRegCallback, void *, REGISTER_FILTER **); + +/*! Removes register filter */ +void regRemoveRegisterFilter(RegisterAccess *, REGISTER_FILTER *); + +/*! Check status of read return value for GPU/bus errors */ +void regCheckAndLogReadFailure(RegisterAccess *, NvU32 addr, NvU32 mask, NvU32 value); + +// +// GPU register I/O macros. +// + +// +// GPU neutral macros typically used for register I/O. +// +#define GPU_DRF_SHIFT(drf) ((0?drf) % 32) +#define GPU_DRF_MASK(drf) (0xFFFFFFFF>>(31-((1?drf) % 32)+((0?drf) % 32))) +#define GPU_DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_DRF_SHIFTMASK(drf) (GPU_DRF_MASK(drf)<<(GPU_DRF_SHIFT(drf))) +#define GPU_DRF_WIDTH(drf) ((1?drf) - (0?drf) + 1) + + +// Device independent macros +// Multiple device instance macros + +#define REG_INST_RD08(g,dev,inst,a) regRead008(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a) +#define REG_INST_RD16(g,dev,inst,a) regRead016(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a) +#define REG_INST_RD32(g,dev,inst,a) regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, NULL) + +#define REG_INST_WR08(g,dev,inst,a,v) regWrite008(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v) +#define REG_INST_WR16(g,dev,inst,a,v) regWrite016(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v) +#define REG_INST_WR32(g,dev,inst,a,v) regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, NULL) +#define REG_INST_WR32_UC(g,dev,inst,a,v) regWrite032Unicast(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, NULL) + +#define REG_INST_RD32_EX(g,dev,inst,a,t) regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, t) +#define REG_INST_WR32_EX(g,dev,inst,a,v,t) regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, t) + +#define REG_INST_DEVIDX_RD32_EX(g,devidx,inst,a,t) regRead032(GPU_GET_REGISTER_ACCESS(g), devidx, inst, a, t) +#define REG_INST_DEVIDX_WR32_EX(g,devidx,inst,a,v,t) regWrite032(GPU_GET_REGISTER_ACCESS(g), devidx, inst, a, v, t) + +// GPU macros defined in terms of DEV_ macros +#define GPU_REG_RD08(g,a) REG_INST_RD08(g,GPU,0,a) +#define GPU_REG_RD16(g,a) REG_INST_RD16(g,GPU,0,a) +#define GPU_REG_RD32(g,a) REG_INST_RD32(g,GPU,0,a) +#define GPU_CHECK_REG_RD32(g,a,m) regCheckRead032(GPU_GET_REGISTER_ACCESS(g),a,m,NULL) +#define GPU_REG_RD32_AND_POLL(g,r,m,v) regRead032_AndPoll(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_GPU, r, m, v) + +#define GPU_REG_WR08(g,a,v) REG_INST_WR08(g,GPU,0,a,v) +#define GPU_REG_WR16(g,a,v) REG_INST_WR16(g,GPU,0,a,v) +#define GPU_REG_WR32(g,a,v) REG_INST_WR32(g,GPU,0,a,v) +#define GPU_REG_WR32_UC(g,a,v) REG_INST_WR32_UC(g,GPU,0,a,v) + +// GPU macros for SR-IOV +#define GPU_VREG_RD32(g, a) GPU_REG_RD32(g, g->sriovState.virtualRegPhysOffset + a) +#define GPU_VREG_WR32(g, a, v) GPU_REG_WR32(g, g->sriovState.virtualRegPhysOffset + a, v) +#define GPU_VREG_RD32_EX(g,a,t) REG_INST_RD32_EX(g, GPU, 0, g->sriovState.virtualRegPhysOffset + a, t) +#define GPU_VREG_WR32_EX(g,a,v,t) REG_INST_WR32_EX(g, GPU, 0, g->sriovState.virtualRegPhysOffset + a, v, t) +#define GPU_VREG_FLD_WR_DRF_DEF(g,d,r,f,c) GPU_VREG_WR32(g, NV##d##r,(GPU_VREG_RD32(g,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +#define VREG_INST_RD32(g,dev,inst,a) regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, g->sriovState.virtualRegPhysOffset + a, NULL) +#define VREG_INST_WR32(g,dev,inst,a,v) regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, g->sriovState.virtualRegPhysOffset + a, v, NULL) +#define GPU_VREG_FLD_WR_DRF_NUM(g,d,r,f,n) VREG_INST_WR32(g,GPU,0,NV##d##r,(VREG_INST_RD32(g,GPU,0,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<sriovState.virtualRegPhysOffset + a) + +#define GPU_VREG_IDX_RD_DRF(g,d,r,i,f) (((GPU_VREG_RD32(g, NV ## d ## r(i)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_VREG_FLD_IDX_WR_DRF_DEF(g,d,r,i,f,c) GPU_VREG_WR32(g, NV##d##r(i),(GPU_VREG_RD32(g,NV##d##r(i))&~(GPU_DRF_MASK(NV##d##r##f)<sriovState.virtualRegPhysOffset + a) +#define GPU_VREG_WR32(g, a, v) gpuRegWr32_dumpinfo(__FUNCTION__,#a,"(VREG)",g, g->sriovState.virtualRegPhysOffset + a, v) + +#endif // GPU_REGISTER_ACCESS_DUMP + +// +// Macros for register I/O +// +#define GPU_FLD_WR_DRF_NUM(g,d,r,f,n) REG_INST_WR32(g,GPU,0,NV##d##r,(REG_INST_RD32(g,GPU,0,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_FLD_TEST_DRF_DEF(g,d,r,f,c) (GPU_REG_RD_DRF(g, d, r, f) == NV##d##r##f##c) +#define GPU_FLD_TEST_DRF_NUM(g,d,r,f,n) (GPU_REG_RD_DRF(g, d, r, f) == n) +#define GPU_FLD_IDX_TEST_DRF_DEF(g,d,r,f,c,i) (GPU_REG_IDX_RD_DRF(g, d, r, i, f) == NV##d##r##f##c) +#define GPU_FLD_2IDX_TEST_DRF_DEF(g,d,r,f,c,i,j) (GPU_REG_2IDX_RD_DRF(g, d, r, i, j, f) == NV##d##r##f##c) + +#define GPU_REG_RD_DRF_EX(g,d,r,f,t) (((GPU_REG_RD32_EX(g, NV ## d ## r, t))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +#define GPU_FLD_WR_DRF_NUM_EX(g,d,r,f,n,t) REG_INST_WR32_EX(g,GPU,0,NV##d##r,(REG_INST_RD32_EX(g,GPU,0,NV##d##r,t)&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_REG_2IDX_RD_DRF(g,d,r,i,j,f) (((GPU_REG_RD32(g, NV ## d ## r(i, j)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_REG_RD_DRF_IDX(g,d,r,f,i) (((GPU_REG_RD32(g, NV ## d ## r))>>GPU_DRF_SHIFT(NV ## d ## r ## f(i)))&GPU_DRF_MASK(NV ## d ## r ## f(i))) +#define GPU_REG_IDX_OFFSET_RD_DRF(g,d,r,i,o,f) (((GPU_REG_RD32(g, NV ## d ## r(i,o)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +// +// Macros that abstract the use of bif object to access GPU bus config registers +// This is the preferred set >= NV50 +// +#define GPU_BUS_CFG_RD32(g,r,d) gpuReadBusConfigReg_HAL(g, r, d) +#define GPU_BUS_CFG_WR32(g,r,d) gpuWriteBusConfigReg_HAL(g, r, d) +#define GPU_BUS_CFG_FLD_WR_DRF_DEF(g,x,d,r,f,c) GPU_BUS_CFG_WR32(g, NV##d##r,(x &~(GPU_DRF_MASK(NV##d##r##f)<>(31-(1?sf)+(0?sf))) +#define SF_SHIFTMASK(sf) (SF_MASK(sf) << SF_SHIFT(sf)) +#define SF_DEF(s,f,c) ((NV ## s ## f ## c)<>SF_SHIFT(NV ## s ## f))&SF_MASK(NV ## s ## f)) +#define SF_WIDTH(sf) ((1?sf) - (0?sf) + 1) +// This macro parses multi-word/array defines +#define SF_ARR32_VAL(s,f,arr) \ + (((arr)[SF_INDEX(NV ## s ## f)] >> SF_SHIFT(NV ## s ## f)) & SF_MASK(NV ## s ## f)) +#define FLD_SF_DEF(s,f,d,l) ((l)&~(SF_MASK(NV##s##f) << SF_SHIFT(NV##s##f)))| SF_DEF(s,f,d) +#define FLD_SF_NUM(s,f,n,l) ((l)&~(SF_MASK(NV##s##f) << SF_SHIFT(NV##s##f)))| SF_NUM(s,f,n) +#define FLD_SF_IDX_DEF(s,f,c,i,l) (((l) & ~SF_SHIFTMASK(NV ## s ## f(i))) | SF_IDX_DEF(s,f,c,i)) +#define FLD_SF_IDX_NUM(s,f,n,i,l) (((l) & ~SF_SHIFTMASK(NV ## s ## f(i))) | SF_IDX_NUM(s,f,n,i)) + +#endif // _GPU_ACCESS_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_child_list.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_child_list.h new file mode 100644 index 0000000..8728953 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_child_list.h @@ -0,0 +1,306 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// No include guards - this file is included multiple times, each time with a +// different definition for GPU_CHILD_SINGLE_INST and GPU_CHILD_GPU_CHILD_MULTI_INST +// +// Callers that will use the same definition for single- and multi- instance +// can define GPU_CHILD that will be used for both +// +#if defined(GPU_CHILD) +#if !defined(GPU_CHILD_SINGLE_INST) && !defined(GPU_CHILD_MULTI_INST) +#define GPU_CHILD_SINGLE_INST GPU_CHILD +#define GPU_CHILD_MULTI_INST GPU_CHILD +#else +#error "Must not define GPU_CHILD_{SINGLE,MULTI}_INST and GPU_CHILD at the same time" +#endif +#endif + +// +// GPU child list. All objects must inherit from OBJENGSTATE. Objects are +// constructed in the listed order and destructed in reverse order. Storage in +// OBJGPU and accessor macros (i.e.: GET_GPU_XXX) are generated from this list. +// + +// +// Temporarily needed to generate stubs for disabled modules +// To be removed when the references to these modules are gone +// +#if defined(GPU_CHILD_LIST_DISABLED_ONLY) +#define GPU_CHILD_MODULE(_rmcfgModule) !RMCFG_MODULE_ENABLED(_rmcfgModule) +#else +#define GPU_CHILD_MODULE(_rmcfgModule) RMCFG_MODULE_ENABLED(_rmcfgModule) +#endif + + /* Class Name Accessor Name Max Instances bConstructEarly bAlwaysCreate OBJGPU Field */ +#if GPU_CHILD_MODULE(FUSE) + GPU_CHILD_SINGLE_INST( OBJFUSE, GPU_GET_FUSE, 1, NV_TRUE, NV_TRUE, pFuse ) +#endif +#if GPU_CHILD_MODULE(BIF) + GPU_CHILD_SINGLE_INST( OBJBIF, GPU_GET_BIF, 1, NV_TRUE, NV_FALSE, pBif ) +#endif +#if GPU_CHILD_MODULE(KERNEL_BIF) + GPU_CHILD_SINGLE_INST( KernelBif, GPU_GET_KERNEL_BIF, 1, NV_TRUE, NV_FALSE, pKernelBif ) +#endif +#if GPU_CHILD_MODULE(NNE) + GPU_CHILD_SINGLE_INST( OBJNNE, GPU_GET_NNE, 1, NV_TRUE, NV_FALSE, pNne ) +#endif +#if GPU_CHILD_MODULE(MC) + GPU_CHILD_SINGLE_INST( OBJMC, GPU_GET_MC, 1, NV_FALSE, NV_FALSE, pMc ) +#endif +#if GPU_CHILD_MODULE(KERNEL_MC) + GPU_CHILD_SINGLE_INST( KernelMc, GPU_GET_KERNEL_MC, 1, NV_FALSE, NV_FALSE, pKernelMc ) +#endif +#if GPU_CHILD_MODULE(PRIV_RING) + GPU_CHILD_SINGLE_INST( PrivRing, GPU_GET_PRIV_RING, 1, NV_FALSE, NV_FALSE, pPrivRing ) +#endif +#if GPU_CHILD_MODULE(INTR) + GPU_CHILD_SINGLE_INST( SwIntr, GPU_GET_SW_INTR, 1, NV_FALSE, NV_FALSE, pSwIntr ) +#endif +#if GPU_CHILD_MODULE(MEMORY_SYSTEM) + GPU_CHILD_SINGLE_INST( MemorySystem, GPU_GET_MEMORY_SYSTEM, 1, NV_FALSE, NV_FALSE, pMemorySystem ) +#endif +#if GPU_CHILD_MODULE(KERNEL_MEMORY_SYSTEM) + GPU_CHILD_SINGLE_INST( KernelMemorySystem, GPU_GET_KERNEL_MEMORY_SYSTEM, 1, NV_FALSE, NV_FALSE, pKernelMemorySystem ) +#endif +#if GPU_CHILD_MODULE(MEMORY_MANAGER) + GPU_CHILD_SINGLE_INST( MemoryManager, GPU_GET_MEMORY_MANAGER, 1, NV_FALSE, NV_FALSE, pMemoryManager ) +#endif +#if GPU_CHILD_MODULE(FBFLCN) + GPU_CHILD_SINGLE_INST( OBJFBFLCN, GPU_GET_FBFLCN, 1, NV_FALSE, NV_FALSE, pFbflcn ) +#endif +#if GPU_CHILD_MODULE(HSHUBMANAGER) + GPU_CHILD_SINGLE_INST( OBJHSHUBMANAGER, GPU_GET_HSHUBMANAGER, 1, NV_FALSE, NV_FALSE, pHshMgr ) +#endif +#if GPU_CHILD_MODULE(HSHUB) + GPU_CHILD_MULTI_INST ( Hshub, GPU_GET_HSHUB, GPU_MAX_HSHUBS, NV_FALSE, NV_FALSE, pHshub ) +#endif +#if GPU_CHILD_MODULE(SEQ) + GPU_CHILD_SINGLE_INST( OBJSEQ, GPU_GET_SEQ, 1, NV_FALSE, NV_TRUE, pSeq ) +#endif +#if GPU_CHILD_MODULE(GpuMutexMgr) + GPU_CHILD_SINGLE_INST( GpuMutexMgr, GPU_GET_MUTEX_MGR, 1, NV_FALSE, NV_TRUE, pMutexMgr ) +#endif +#if GPU_CHILD_MODULE(KERNEL_DISPLAY) + GPU_CHILD_SINGLE_INST( KernelDisplay, GPU_GET_KERNEL_DISPLAY, 1, NV_FALSE, NV_FALSE, pKernelDisplay ) +#endif +#if GPU_CHILD_MODULE(DISP) + GPU_CHILD_SINGLE_INST( OBJDISP, GPU_GET_DISP, 1, NV_FALSE, NV_FALSE, pDisp ) +#endif +#if GPU_CHILD_MODULE(TMR) + GPU_CHILD_SINGLE_INST( OBJTMR, GPU_GET_TIMER, 1, NV_TRUE, NV_TRUE, pTmr ) +#endif +#if GPU_CHILD_MODULE(BUS) + GPU_CHILD_SINGLE_INST( OBJBUS, GPU_GET_BUS, 1, NV_FALSE, NV_FALSE, pBus ) +#endif +#if GPU_CHILD_MODULE(KERNEL_BUS) + GPU_CHILD_SINGLE_INST( KernelBus, GPU_GET_KERNEL_BUS, 1, NV_FALSE, NV_FALSE, pKernelBus ) +#endif +#if GPU_CHILD_MODULE(GMMU) + GPU_CHILD_SINGLE_INST( OBJGMMU, GPU_GET_GMMU, 1, NV_FALSE, NV_FALSE, pGmmu ) +#endif +#if GPU_CHILD_MODULE(KERNEL_GMMU) + GPU_CHILD_SINGLE_INST( KernelGmmu, GPU_GET_KERNEL_GMMU, 1, NV_FALSE, NV_FALSE, pKernelGmmu ) +#endif +#if GPU_CHILD_MODULE(KERNEL_NVDEC) + GPU_CHILD_SINGLE_INST( KernelNvdec, GPU_GET_KERNEL_NVDEC, 1, NV_FALSE, NV_FALSE, pKernelNvdec ) +#endif +#if GPU_CHILD_MODULE(KERNEL_SEC2) + GPU_CHILD_SINGLE_INST( KernelSec2, GPU_GET_KERNEL_SEC2, 1, NV_FALSE, NV_FALSE, pKernelSec2 ) +#endif +#if GPU_CHILD_MODULE(KERNEL_GSP) + GPU_CHILD_SINGLE_INST( KernelGsp, GPU_GET_KERNEL_GSP, 1, NV_FALSE, NV_FALSE, pKernelGsp ) +#endif +#if GPU_CHILD_MODULE(DCECLIENTRM) + GPU_CHILD_SINGLE_INST( OBJDCECLIENTRM, GPU_GET_DCECLIENTRM, 1, NV_FALSE, NV_FALSE, pDceclientrm ) +#endif +#if GPU_CHILD_MODULE(VIRT_MEM_ALLOCATOR) + GPU_CHILD_SINGLE_INST( VirtMemAllocator, GPU_GET_DMA, 1, NV_FALSE, NV_FALSE, pDma ) +#endif +#if GPU_CHILD_MODULE(GRMGR) + GPU_CHILD_SINGLE_INST( GraphicsManager, GPU_GET_GRMGR, 1, NV_FALSE, NV_TRUE, pGrMgr ) +#endif +#if GPU_CHILD_MODULE(MIG_MANAGER) + GPU_CHILD_SINGLE_INST( MIGManager, GPU_GET_MIG_MANAGER, 1, NV_FALSE, NV_TRUE, pMIGManager ) +#endif +#if GPU_CHILD_MODULE(KERNEL_MIG_MANAGER) + GPU_CHILD_SINGLE_INST( KernelMIGManager, GPU_GET_KERNEL_MIG_MANAGER, 1, NV_FALSE, NV_TRUE, pKernelMIGManager ) +#endif +#if GPU_CHILD_MODULE(KERNEL_GRAPHICS_MANAGER) + GPU_CHILD_SINGLE_INST( KernelGraphicsManager, GPU_GET_KERNEL_GRAPHICS_MANAGER, 1, NV_FALSE, NV_TRUE, pKernelGraphicsManager ) +#endif +#if GPU_CHILD_MODULE(GR) + GPU_CHILD_MULTI_INST ( Graphics, GPU_GET_GR_UNSAFE, GPU_MAX_GRS, NV_FALSE, NV_FALSE, pGr ) +#endif +#if GPU_CHILD_MODULE(KERNEL_GRAPHICS) + GPU_CHILD_MULTI_INST ( KernelGraphics, GPU_GET_KERNEL_GRAPHICS, GPU_MAX_GRS, NV_FALSE, NV_FALSE, pKernelGraphics ) +#endif +#if GPU_CHILD_MODULE(ClockManager) + GPU_CHILD_SINGLE_INST( ClockManager, GPU_GET_CLK_MGR, 1, NV_FALSE, NV_FALSE, pClk ) +#endif +#if GPU_CHILD_MODULE(FAN) + GPU_CHILD_SINGLE_INST( OBJFAN, GPU_GET_FAN, 1, NV_FALSE, NV_FALSE, pFan ) +#endif +#if GPU_CHILD_MODULE(PERF) + GPU_CHILD_SINGLE_INST( Perf, GPU_GET_PERF, 1, NV_FALSE, NV_FALSE, pPerf ) +#endif +#if GPU_CHILD_MODULE(KERNEL_PERF) + GPU_CHILD_SINGLE_INST( KernelPerf, GPU_GET_KERNEL_PERF, 1, NV_FALSE, NV_FALSE, pKernelPerf ) +#endif +#if GPU_CHILD_MODULE(THERM) + GPU_CHILD_SINGLE_INST( Therm, GPU_GET_THERM, 1, NV_FALSE, NV_FALSE, pTherm ) +#endif +#if GPU_CHILD_MODULE(BSP) + GPU_CHILD_MULTI_INST ( OBJBSP, GPU_GET_BSP, GPU_MAX_NVDECS, NV_FALSE, NV_FALSE, pBsp ) +#endif +#if GPU_CHILD_MODULE(CIPHER) + GPU_CHILD_SINGLE_INST( OBJCIPHER, GPU_GET_CIPHER, 1, NV_FALSE, NV_FALSE, pCipher ) +#endif +#if GPU_CHILD_MODULE(VBIOS) + GPU_CHILD_SINGLE_INST( OBJVBIOS, GPU_GET_VBIOS, 1, NV_FALSE, NV_TRUE, pVbios ) +#endif +#if GPU_CHILD_MODULE(DCB) + GPU_CHILD_SINGLE_INST( OBJDCB, GPU_GET_DCB, 1, NV_FALSE, NV_TRUE, pDcb ) +#endif +#if GPU_CHILD_MODULE(GPIO) + GPU_CHILD_SINGLE_INST( OBJGPIO, GPU_GET_GPIO, 1, NV_FALSE, NV_TRUE, pGpio ) +#endif +#if GPU_CHILD_MODULE(VOLT) + GPU_CHILD_SINGLE_INST( OBJVOLT, GPU_GET_VOLT, 1, NV_FALSE, NV_FALSE, pVolt ) +#endif +#if GPU_CHILD_MODULE(I2C) + GPU_CHILD_SINGLE_INST( OBJI2C, GPU_GET_I2C, 1, NV_FALSE, NV_TRUE, pI2c ) +#endif +#if GPU_CHILD_MODULE(SPI) + GPU_CHILD_SINGLE_INST( Spi, GPU_GET_SPI, 1, NV_FALSE, NV_TRUE, pSpi ) +#endif +#if GPU_CHILD_MODULE(KERNEL_RC) + GPU_CHILD_SINGLE_INST( KernelRc, GPU_GET_KERNEL_RC, 1, NV_FALSE, NV_TRUE, pKernelRc ) +#endif +#if GPU_CHILD_MODULE(RC) + GPU_CHILD_SINGLE_INST( OBJRC, GPU_GET_RC, 1, NV_FALSE, NV_TRUE, pRC ) +#endif +#if GPU_CHILD_MODULE(STEREO) + GPU_CHILD_SINGLE_INST( OBJSTEREO, GPU_GET_STEREO, 1, NV_FALSE, NV_TRUE, pStereo ) +#endif +#if GPU_CHILD_MODULE(INTR) + GPU_CHILD_SINGLE_INST( Intr, GPU_GET_INTR, 1, NV_FALSE, NV_TRUE, pIntr ) +#endif +#if GPU_CHILD_MODULE(DPAUX) + GPU_CHILD_SINGLE_INST( OBJDPAUX, GPU_GET_DPAUX, 1, NV_FALSE, NV_FALSE, pDpAux ) +#endif +#if GPU_CHILD_MODULE(PMU) + GPU_CHILD_SINGLE_INST( Pmu, GPU_GET_PMU, 1, NV_FALSE, NV_FALSE, pPmu ) +#endif +#if GPU_CHILD_MODULE(KERNEL_PMU) + GPU_CHILD_SINGLE_INST( KernelPmu, GPU_GET_KERNEL_PMU, 1, NV_FALSE, NV_FALSE, pKernelPmu ) +#endif +#if GPU_CHILD_MODULE(CE) + GPU_CHILD_MULTI_INST ( OBJCE, GPU_GET_CE, GPU_MAX_CES, NV_FALSE, NV_FALSE, pCe ) +#endif +#if GPU_CHILD_MODULE(KERNEL_CE) + GPU_CHILD_MULTI_INST ( KernelCE, GPU_GET_KCE, GPU_MAX_CES, NV_FALSE, NV_FALSE, pKCe ) +#endif +#if GPU_CHILD_MODULE(MSENC) + GPU_CHILD_MULTI_INST ( OBJMSENC, GPU_GET_MSENC, GPU_MAX_MSENCS, NV_FALSE, NV_FALSE, pMsenc ) +#endif +#if GPU_CHILD_MODULE(HDA) + GPU_CHILD_SINGLE_INST( OBJHDA, GPU_GET_HDA, 1, NV_FALSE, NV_FALSE, pHda ) +#endif +#if GPU_CHILD_MODULE(HDACODEC) + GPU_CHILD_SINGLE_INST( OBJHDACODEC, GPU_GET_HDACODEC, 1, NV_FALSE, NV_FALSE, pHdacodec ) +#endif +#if GPU_CHILD_MODULE(LPWR) + GPU_CHILD_SINGLE_INST( Lpwr, GPU_GET_LPWR, 1, NV_FALSE, NV_FALSE, pLpwr ) +#endif +#if GPU_CHILD_MODULE(KERNEL_FIFO) + GPU_CHILD_SINGLE_INST( KernelFifo, GPU_GET_KERNEL_FIFO_UC, 1, NV_FALSE, NV_FALSE, pKernelFifo ) +#endif +#if GPU_CHILD_MODULE(FIFO) + GPU_CHILD_SINGLE_INST( OBJFIFO, GPU_GET_FIFO_UC, 1, NV_FALSE, NV_FALSE, pFifo ) +#endif +#if GPU_CHILD_MODULE(INFOROM) + GPU_CHILD_SINGLE_INST( OBJINFOROM, GPU_GET_INFOROM, 1, NV_FALSE, NV_TRUE, pInforom ) +#endif +#if GPU_CHILD_MODULE(PMGR) + GPU_CHILD_SINGLE_INST( Pmgr, GPU_GET_PMGR, 1, NV_FALSE, NV_FALSE, pPmgr ) +#endif +#if GPU_CHILD_MODULE(UVM) + GPU_CHILD_SINGLE_INST( OBJUVM, GPU_GET_UVM, 1, NV_FALSE, NV_FALSE, pUvm ) +#endif +#if GPU_CHILD_MODULE(NV_DEBUG_DUMP) + GPU_CHILD_SINGLE_INST( NvDebugDump, GPU_GET_NVD, 1, NV_FALSE, NV_TRUE, pNvd ) +#endif +#if GPU_CHILD_MODULE(GRDBG) + GPU_CHILD_SINGLE_INST( SMDebugger, GPU_GET_GRDBG, 1, NV_FALSE, NV_TRUE, pGrdbg ) +#endif +#if GPU_CHILD_MODULE(SEC2) + GPU_CHILD_SINGLE_INST( OBJSEC2, GPU_GET_SEC2, 1, NV_FALSE, NV_FALSE, pSec2 ) +#endif +#if GPU_CHILD_MODULE(LSFM) + GPU_CHILD_SINGLE_INST( OBJLSFM, GPU_GET_LSFM, 1, NV_FALSE, NV_FALSE, pLsfm ) +#endif +#if GPU_CHILD_MODULE(ACR) + GPU_CHILD_SINGLE_INST( OBJACR, GPU_GET_ACR, 1, NV_FALSE, NV_FALSE, pAcr ) +#endif +#if GPU_CHILD_MODULE(KERNEL_NVLINK) + GPU_CHILD_SINGLE_INST( KernelNvlink, GPU_GET_KERNEL_NVLINK, 1, NV_FALSE, NV_FALSE, pKernelNvlink ) +#endif +#if GPU_CHILD_MODULE(NVLINK) + GPU_CHILD_SINGLE_INST( Nvlink, GPU_GET_NVLINK, 1, NV_FALSE, NV_FALSE, pNvLink ) +#endif +#if GPU_CHILD_MODULE(GPULOG) + GPU_CHILD_SINGLE_INST( OBJGPULOG, GPU_GET_GPULOG, 1, NV_FALSE, NV_TRUE, pGpuLog ) +#endif +#if GPU_CHILD_MODULE(GPUMON) + GPU_CHILD_SINGLE_INST( OBJGPUMON, GPU_GET_GPUMON, 1, NV_FALSE, NV_TRUE, pGpuMon ) +#endif +#if GPU_CHILD_MODULE(HWPM) + GPU_CHILD_SINGLE_INST( OBJHWPM, GPU_GET_HWPM, 1, NV_FALSE, NV_FALSE, pHwpm ) +#endif +#if GPU_CHILD_MODULE(GRIDDISPLAYLESS) + GPU_CHILD_SINGLE_INST( OBJGRIDDISPLAYLESS, GPU_GET_GRIDDISPLAYLESS, 1, NV_FALSE, NV_FALSE, pGridDisplayless ) +#endif +#if GPU_CHILD_MODULE(SWENG) + GPU_CHILD_SINGLE_INST( OBJSWENG, GPU_GET_SWENG, 1, NV_FALSE, NV_FALSE, pSwEng ) +#endif +#if GPU_CHILD_MODULE(VMMU) + GPU_CHILD_SINGLE_INST( OBJVMMU, GPU_GET_VMMU, 1, NV_FALSE, NV_FALSE, pVmmu ) +#endif +#if GPU_CHILD_MODULE(NVJPG) + GPU_CHILD_MULTI_INST( OBJNVJPG, GPU_GET_NVJPG, GPU_MAX_NVJPGS, NV_FALSE, NV_FALSE, pNvjpg ) +#endif +#if GPU_CHILD_MODULE(GSP) + GPU_CHILD_SINGLE_INST( Gsp, GPU_GET_GSP, 1, NV_FALSE, NV_FALSE, pGsp ) +#endif +#if GPU_CHILD_MODULE(OFA) + GPU_CHILD_SINGLE_INST( OBJOFA, GPU_GET_OFA, 1, NV_FALSE, NV_FALSE, pOfa ) +#endif + +// Undefine the entry macros to simplify call sites +#undef GPU_CHILD +#undef GPU_CHILD_SINGLE_INST +#undef GPU_CHILD_MULTI_INST +#undef GPU_CHILD_MODULE +#undef GPU_CHILD_LIST_DISABLED_ONLY diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h new file mode 100644 index 0000000..53bdc1c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPU_DEVICE_MAPPING_H_ +#define _GPU_DEVICE_MAPPING_H_ + +// Defines the enum type DEVICE_INDEX used for identifying the device type being accessed +typedef enum +{ + DEVICE_INDEX_GPU = 0, + DEVICE_INDEX_HOST1X, + DEVICE_INDEX_DISPLAY, + DEVICE_INDEX_DPAUX, + DEVICE_INDEX_MC, + DEVICE_INDEX_CLKRST, + DEVICE_INDEX_MSS_NVLINK, + DEVICE_INDEX_HDACODEC, + DEVICE_INDEX_EMC, + DEVICE_INDEX_FUSE, + DEVICE_INDEX_KFUSE, + DEVICE_INDEX_MIPICAL, + DEVICE_INDEX_MAX //Should always be the last entry +} DEVICE_INDEX; + +typedef enum +{ + SOC_DEV_MAPPING_DISP = 0, + SOC_DEV_MAPPING_DPAUX0, + SOC_DEV_MAPPING_DPAUX1, // Update NV_MAX_SOC_DPAUX_NUM_DEVICES if adding new DPAUX mappings + SOC_DEV_MAPPING_HDACODEC, + SOC_DEV_MAPPING_MIPICAL, + SOC_DEV_MAPPING_MAX +} SOC_DEV_MAPPING; + +#define GPU_MAX_DEVICE_MAPPINGS (60) + +typedef struct +{ + DEVICE_INDEX deviceIndex; // DEVICE_INDEX_* + NvU32 devId; // NV_DEVID_* +} DEVICE_ID_MAPPING; + +#endif // _GPU_DEVICE_MAPPING_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_halspec.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_halspec.h new file mode 100644 index 0000000..3287743 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_halspec.h @@ -0,0 +1,3 @@ + +#include "g_gpu_halspec_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource.h new file mode 100644 index 0000000..4f25bcb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource.h @@ -0,0 +1,3 @@ + +#include "g_gpu_resource_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h new file mode 100644 index 0000000..a9bca9d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPU_RESOURCE_DESC_H_ +#define _GPU_RESOURCE_DESC_H_ + +#include "gpu/eng_desc.h" + +typedef struct GPU_RESOURCE_DESC +{ + NvU32 externalClassId; + ENGDESCRIPTOR engDesc; +} GPU_RESOURCE_DESC; + +// CLASSDESCRIPTOR is deprecated, please use GPU_RESOURCE_DESC +typedef struct GPU_RESOURCE_DESC CLASSDESCRIPTOR, *PCLASSDESCRIPTOR; + +#endif // _GPU_RESOURCE_DESC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_timeout.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_timeout.h new file mode 100644 index 0000000..037602b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_timeout.h @@ -0,0 +1,144 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPU_TIMEOUT_H_ +#define _GPU_TIMEOUT_H_ + +/* ------------------------ Includes ---------------------------------------- */ +#include "core/core.h" + + +/* ------------------------ Forward Definitions ----------------------------- */ +struct OBJGPU; + +/* ------------------------ Macros ------------------------------------------ */ +/*! + * @note GPU_TIMEOUT_DEFAULT is different per platform and can range anywhere + * from 2 to 30 secs depending on the GPU Mode and Platform. + * By default if GPU_TIMEOUT_DEFAULT is specified, we use the ThreadState + * unless explicitly told not to via GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE. + */ +#define GPU_TIMEOUT_DEFAULT 0 + +/*! + * gpuSetTimeout Flags - saved in pTimeout->flags + */ +#define GPU_TIMEOUT_FLAGS_DEFAULT NVBIT(0) //!< default timeout mechanism as set by platform +#define GPU_TIMEOUT_FLAGS_USE_THREAD_STATE NVBIT(1) //!< default timeout time used - use the ThreadState +#define GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE NVBIT(2) //!< even if default time was used - skip the ThreadState +#define GPU_TIMEOUT_FLAGS_OSTIMER NVBIT(3) //!< osGetCurrentTime() +#define GPU_TIMEOUT_FLAGS_OSDELAY NVBIT(4) //!< osDelay() +#define GPU_TIMEOUT_FLAGS_TMR NVBIT(5) //!< tmrGetCurrentTime() +#define GPU_TIMEOUT_FLAGS_BYPASS_JOURNAL_LOG NVBIT(6) //!< bypass timeout logging in the RM journal +#define GPU_TIMEOUT_FLAGS_TMRDELAY NVBIT(7) //!< tmrDelay() +#define GPU_TIMEOUT_FLAGS_BYPASS_CPU_YIELD NVBIT(8) //!< don't explicitly let other threads run first +/*! + * gpuCheckTimeout Flags set in pTimeout->flags upon NV_ERR_TIMEOUT + */ +#define GPU_TIMEOUT_FLAGS_STATUS_LOCAL_TIMEOUT NVBIT(30) +#define GPU_TIMEOUT_FLAGS_STATUS_THREAD_STATE_TIMEOUT NVBIT(31) + +/* ------------------------ Datatypes --------------------------------------- */ +/*! + * Timeout support. + */ +typedef struct +{ + NvU64 timeout; + NvU32 flags; + OBJGPU *pTmrGpu; //!< The GPU whose timer is used in SLI mode + // Defined only if flags is set to _TMR or _TMRDELAY +} RMTIMEOUT, +*PRMTIMEOUT; + +/*! + * @brief GPU timeout related data. + */ +typedef struct +{ + volatile NvBool bDefaultOverridden; + volatile NvBool bScaled; + volatile NvU32 defaultus; //!< Default timeout in us + volatile NvU32 defaultResetus; //!< Default timeout reset value in us + NvU32 defaultFlags; //!< Default timeout mode + NvU32 scale; //!< Emulation/Simulation multiplier + OBJGPU *pGpu; +} TIMEOUT_DATA; + +/*! + * @brief A prototype of the condition evaluation function required by the + * @ref gpuTimeoutCondWait_IMPL interface. + * + * @note Function is responsible for evaluation of the encapsulated condition + * as well as for triggering of required prerequisites (if any). + * For example if condition depends on a PMU issued message function + * should assure proper servicing of the PMU interrupts. + * + * @param[in] pGpu OBJGPU pointer for this conditional function + * @param[in] pVoid + * Void parameter pointer which can be used to pass in the + * pCondData from @ref gpuTimeoutCondWait_IMPL(). + * + * @return NV_TRUE + * Waited condition has happened and @ref + * gpuTimeoutCondWait_IMPL() may return to caller. + * @return NV_FALSE + * Waited condition has not happened and @ref + * gpuTimeoutCondWait_IMPL() should continue to wait until this + * interface returns NV_TRUE or timeout occurs (whichever occurs + * first). + */ +typedef NvBool GpuWaitConditionFunc(OBJGPU *pGpu, void *pVoid); + +/* ------------------------ Function Prototypes ----------------------------- */ + +void timeoutInitializeGpuDefault(TIMEOUT_DATA *pTD, OBJGPU *pGpu); + +void timeoutRegistryOverride(TIMEOUT_DATA *pTD, OBJGPU *pGpu); + +void timeoutOverride(TIMEOUT_DATA *pTD, NvBool bOverride, NvU32 timeoutMs); + +/*! Initialize the RMTIMEOUT structure with the selected timeout scheme. */ +void timeoutSet(TIMEOUT_DATA *, RMTIMEOUT *, NvU32 timeoutUs, NvU32 flags); + +/*! Check if the passed in RMTIMEOUT struct has expired. */ +NV_STATUS timeoutCheck(TIMEOUT_DATA *, RMTIMEOUT *, NvU32); + +/*! Wait for the condition to become satisfied while checking for the timeout */ +NV_STATUS timeoutCondWait(TIMEOUT_DATA *, RMTIMEOUT *, GpuWaitConditionFunc *, void *pCondData, NvU32); + +/*! Scales timeout values depending on the environment we are running in. */ +static NV_INLINE NvU32 timeoutApplyScale(TIMEOUT_DATA *pTD, NvU32 timeout) +{ + return timeout * pTD->scale; +} + + +// Deprecated macros +#define gpuSetTimeout(g,a,t,c) timeoutSet(&(g)->timeoutData, t, a, c) +#define gpuCheckTimeout(g,t) timeoutCheck(&(g)->timeoutData, t, __LINE__) +#define gpuScaleTimeout(g,a) timeoutApplyScale(&(g)->timeoutData, a) +#define gpuTimeoutCondWait(g,a,b,t) timeoutCondWait(&(g)->timeoutData, t, a, b, __LINE__) + +#define GPU_ENG_RESET_TIMEOUT_VALUE(g, t) (t) + +#endif // _GPU_TIMEOUT_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_uuid.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_uuid.h new file mode 100644 index 0000000..8840094 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_uuid.h @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPUUUID_H_ +#define _GPUUUID_H_ + +#include "core/core.h" +#include "nvCpuUuid.h" + +// +// GPU unique ID sizes. RM_SHA1_GID_SIZE uses the first 16 bytes of +// the SHA-1 digest (this is consistent with the way canonical UUIDs are +// constructed) +// +#define RM_SHA1_GID_SIZE 16 + +// UUID conversion routine: +NV_STATUS transformGidToUserFriendlyString(const NvU8 *pGidData, NvU32 gidSize, NvU8 **ppGidString, + NvU32 *pGidStrlen, NvU32 gidFlags); + +NV_STATUS nvGenerateGpuUuid(NvU16 chipId, NvU64 pdi, NvUuid *pUuid); + +NV_STATUS nvGenerateSmcUuid(NvU16 chipId, NvU64 pdi, + NvU32 swizzId, NvU32 syspipeId, NvUuid *pUuid); + +// 'G' 'P' 'U' '-'(x5), '\0x0', extra = 9 +#define NV_UUID_STR_LEN ((NV_UUID_LEN << 1) + 9) + +void nvGetSmcUuidString(const NvUuid *pUuid, char *pUuidStr); + +void nvGetGpuUuidString(const NvUuid *pUuid, char *pUuidStr); + +#endif // _GPUUUID_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gsp/message_queue.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gsp/message_queue.h new file mode 100644 index 0000000..1e02029 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gsp/message_queue.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * GSP MESSAGE QUEUE + */ + +#ifndef _MESSAGE_QUEUE_H_ +#define _MESSAGE_QUEUE_H_ + +#endif // _MESSAGE_QUEUE_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h new file mode 100644 index 0000000..4225666 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h @@ -0,0 +1,3 @@ + +#include "g_context_dma_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h new file mode 100644 index 0000000..c0f3eb1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _HEAP_BASE_H_ +#define _HEAP_BASE_H_ + +#include "nvtypes.h" +#include "core/prelude.h" +#include "gpu/mem_mgr/mem_desc.h" + +// Contains the minimal set of resources used to compute a PTE kind +typedef struct _def_fb_alloc_page_format +{ + NvU32 attr; + NvU32 attr2; + NvU32 flags; + NvU32 kind; + NvU32 type; +} FB_ALLOC_PAGE_FORMAT; + +// +// FB allocation resources structure +// Need to be allocated from heap +// +typedef struct _def_fb_alloc_info +{ + NvU32 owner; + NvU32 hwResId; + NvU32 height; + NvU32 width; + NvU32 pitch; + NvU64 size; + NvU64 align; + NvU64 alignPad; + NvU64 pad; + NvU64 offset; + NvU32 internalflags; + NvU32 retAttr; + NvU32 retAttr2; + NvU32 format; + NvU32 comprCovg; + NvU32 zcullCovg; + NvU32 uncompressedKind; + NvU32 compPageShift; + NvU32 compressedKind; + NvU32 compTagLineMin; + NvU32 compPageIndexLo; + NvU32 compPageIndexHi; + NvU32 compTagLineMultiplier; + NvU32 startCovg; + NvU64 origSize; + NvU64 adjustedSize; + NvU64 desiredOffset; + + FB_ALLOC_PAGE_FORMAT * pageFormat; + + // Tracking client for VGPU + NvHandle hClient; + NvHandle hDevice; + + // These are only used in Vista + // no need yet for possAttr2 + NvU32 possAttr; // AllocHint, BindCompr + NvU32 ctagOffset; + + // Special flag for kernel allocations + NvBool bIsKernelAlloc; + + // + // Number of 4KB pages in the PTE array + // For contiguous allocation, this will be set to '1' + // + NvU64 pageCount4k; + + // denote that underlying physical allocation is contiguous or not + NvBool bContig; + + // + // Store the PTE Array to be used for allocating comptaglines + // If the NVOS32_ATTR_PHYSICALITY_CONTIGUOUS is set, it will only have + // one entry, otherwise it will have dynamically allocated memory + // This will track the pages in 4KB granularity + // + RmPhysAddr pteArray[1]; +} FB_ALLOC_INFO; + +// +// Contains information on the various hw resources (compr, etc...) that +// can be associated with a memory allocation. +// +typedef struct HWRESOURCE_INFO +{ + NvU32 attr; // NVOS32_ATTR_* + NvU32 attr2; // NVOS32_ATTR2_* + NvU32 comprCovg; // compression coverage + NvU32 ctagOffset; // comptag offset + NvU32 hwResId; + NvU32 refCount; + NvBool isVgpuHostAllocated; // used in vGPU guest RM to indicate if this HW resource is allocated by host RM or not. Used in Windows guest. + NvBool isGuestAllocated; // used in vGPU host RM to indicate if this HW resource is allocated from LIST_OBJECT path on behalf of Linux guest. +} HWRESOURCE_INFO; + + +typedef struct PMA_ALLOC_INFO +{ + NvBool bContig; + NvU32 pageCount; + NvU32 pageSize; + NvU32 refCount; + NvU64 allocSize; + NvU32 flags; + // + // If bContig is TRUE, this array consists of one element. + // If bContig is FALSE, this array is actually larger and + // has one entry for each physical page in the allocation. + // As a result, this structure must be allocated from heap. + // + NvU64 pageArray[1]; + //!!! Place nothing behind pageArray!!! +} PMA_ALLOC_INFO; + +typedef struct MEMORY_ALLOCATION_REQUEST +{ + NV_MEMORY_ALLOCATION_PARAMS *pUserParams; + OBJGPU *pGpu; + NvHandle hMemory; // in: can be NULL (translates to 0) + NvU32 internalflags; // Extended flags ?! flags seem exhausted. + HWRESOURCE_INFO *pHwResource; // out: data copied in if non-NULL + MEMORY_DESCRIPTOR *pMemDesc; // in/out: allocate memdesc if NULL + PMA_ALLOC_INFO *pPmaAllocInfo[NV_MAX_SUBDEVICES]; // out: tracks the pre-allocated memory per GPU. + NvU32 classNum; + NvHandle hClient; + NvHandle hParent; +} MEMORY_ALLOCATION_REQUEST; + +typedef struct +{ + NvU64 address; + NvU32 type; +} BLACKLIST_ADDRESS; + +#endif //_HEAP_BASE_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h new file mode 100644 index 0000000..6050d43 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h @@ -0,0 +1,3 @@ + +#include "g_mem_desc_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h new file mode 100644 index 0000000..811d902 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h @@ -0,0 +1,3 @@ + +#include "g_mem_mgr_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h new file mode 100644 index 0000000..78ad160 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _MEM_UTILS_H_ +#define _MEM_UTILS_H_ + +#include "core/prelude.h" + +#define CLEAR_HAL_ATTR(a) \ + a = (a &~(DRF_NUM(OS32, _ATTR, _COMPR, 0x3) | \ + DRF_NUM(OS32, _ATTR, _TILED, 0x3) | \ + DRF_NUM(OS32, _ATTR, _ZCULL, 0x3))); + +#define CLEAR_HAL_ATTR2(a) \ + a = (a & ~(DRF_SHIFTMASK(NVOS32_ATTR2_ZBC) | \ + DRF_SHIFTMASK(NVOS32_ATTR2_GPU_CACHEABLE))); + +NvU64 memUtilsLeastCommonAlignment(NvU64 align1, NvU64 align2); + +void memUtilsInitFBAllocInfo(NV_MEMORY_ALLOCATION_PARAMS *pAllocParams, FB_ALLOC_INFO *pFbAllocInfo, + NvHandle hClient, NvHandle hDevice); + +NV_STATUS memUtilsAllocMemDesc(OBJGPU *pGpu, MEMORY_ALLOCATION_REQUEST *pAllocRequest, FB_ALLOC_INFO *pFbAllocInfo, + MEMORY_DESCRIPTOR **ppMemDesc, Heap *pHeap, NV_ADDRESS_SPACE addrSpace, + NvBool bContig, NvBool *bAllocedMemDesc); + +#endif //_MEM_UTILS_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h new file mode 100644 index 0000000..779b608 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h @@ -0,0 +1,151 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef VIRT_MEM_ALLOCATOR_COMMON_H +#define VIRT_MEM_ALLOCATOR_COMMON_H + +/********************************* DMA Manager *****************************\ +* * +* DMA object/engine management. * +* * +****************************************************************************/ + +#include "nvtypes.h" +#include "nvgputypes.h" +#include "nvstatus.h" + +typedef struct OBJGPU OBJGPU; +typedef struct ChannelDescendant ChannelDescendant; +typedef struct ContextDma ContextDma; +typedef struct Memory Memory; +typedef struct EVENTNOTIFICATION EVENTNOTIFICATION; +//--------------------------------------------------------------------------- +// +// Memory page defines. +// +// These correspond to the granularity understood by the hardware +// for address mapping; the system page size can be larger. +// +//--------------------------------------------------------------------------- +#define RM_PAGE_SIZE 4096 +#define RM_PAGE_SIZE_64K (64 * 1024) +#define RM_PAGE_SIZE_128K (128 * 1024) +#define RM_PAGE_MASK 0x0FFF +#define RM_PAGE_SHIFT 12 +#define RM_PAGE_SHIFT_64K 16 + +// Huge page size is 2 MB +#define RM_PAGE_SHIFT_HUGE 21 +#define RM_PAGE_SIZE_HUGE (1 << RM_PAGE_SHIFT_HUGE) +#define RM_PAGE_MASK_HUGE ((1 << RM_PAGE_SHIFT_HUGE) - 1) + +// 512MB page size +#define RM_PAGE_SHIFT_512M 29 +#define RM_PAGE_SIZE_512M (1 << RM_PAGE_SHIFT_512M) +#define RM_PAGE_MASK_512M (RM_PAGE_SIZE_512M - 1) + +//--------------------------------------------------------------------------- +// +// Memory page attributes. +// +// These attributes are used by software for page size mapping; +// Big pages can be of 64/128KB[Fermi/Kepler/Pascal] +// Huge page is 2 MB[Pascal+] +// 512MB page is Ampere+ +// Default page attribute lets driver decide the optimal page size +// +//--------------------------------------------------------------------------- +typedef enum +{ + RM_ATTR_PAGE_SIZE_DEFAULT = 0x0, + RM_ATTR_PAGE_SIZE_4KB = 0x1, + RM_ATTR_PAGE_SIZE_BIG = 0x2, + RM_ATTR_PAGE_SIZE_HUGE = 0x3, + RM_ATTR_PAGE_SIZE_512MB = 0x4, + RM_ATTR_PAGE_SIZE_INVALID = 0x5 +} +RM_ATTR_PAGE_SIZE; + +//--------------------------------------------------------------------------- +// +// Notification buffer structure. +// +//--------------------------------------------------------------------------- +typedef union _def_info_status_buffer +{ + struct + { + NvV16 OtherInfo16; + NvV16 Status; + } Info16Status_16; + + NvU32 Info16Status_32; + +} INFO16_STATUS; + +typedef struct _def_notification_buffer +{ + NvU32 TimeLo; + NvU32 TimeHi; + NvV32 OtherInfo32; + INFO16_STATUS Info16Status; +} NOTIFICATION, *PNOTIFICATION; + + +//--------------------------------------------------------------------------- +// +// Function prototypes. +// +//--------------------------------------------------------------------------- +void notifyMethodComplete(OBJGPU*, ChannelDescendant *, NvU32, NvV32, NvU32, NvU16, NV_STATUS); + +NV_STATUS notifyFillNotifier (OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS); +NV_STATUS notifyFillNotifierOffset (OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS, NvU64); +NV_STATUS notifyFillNotifierOffsetTimestamp(OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS, NvU64, NvU64); +NV_STATUS notifyFillNotifierArray (OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS, NvU32); +NV_STATUS notifyFillNotifierArrayTimestamp (OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS, NvU32, NvU64); +void notifyFillNOTIFICATION(OBJGPU *pGpu, + NOTIFICATION *pNotifyBuffer, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvBool TimeSupplied, + NvU64 Time); +NV_STATUS notifyFillNotifierGPUVA (OBJGPU*, NvHandle, NvHandle, NvU64, NvV32, NvV16, NV_STATUS, NvU32); +NV_STATUS notifyFillNotifierGPUVATimestamp (OBJGPU*, NvHandle, NvHandle, NvU64, NvV32, NvV16, NV_STATUS, NvU32, NvU64); +NV_STATUS notifyFillNotifierMemory (OBJGPU*, Memory *, NvV32, NvV16, NV_STATUS, NvU32); +NV_STATUS notifyFillNotifierMemoryTimestamp(OBJGPU*, Memory *, NvV32, NvV16, NV_STATUS, NvU32, NvU64); +void notifyFillNvNotification(OBJGPU *pGpu, + NvNotification *pNotification, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvBool TimeSupplied, + NvU64 Time); + +NV_STATUS semaphoreFillGPUVA (OBJGPU*, NvHandle, NvHandle, NvU64, NvV32, NvV32, NvBool); +NV_STATUS semaphoreFillGPUVATimestamp(OBJGPU*, NvHandle, NvHandle, NvU64, NvV32, NvV32, NvBool, NvU64); + +RM_ATTR_PAGE_SIZE dmaNvos32ToPageSizeAttr(NvU32 attr, NvU32 attr2); + +#endif // VIRT_MEM_ALLOCATOR_COMMON_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h new file mode 100644 index 0000000..c1a8628 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h @@ -0,0 +1,3 @@ + +#include "g_generic_engine_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h new file mode 100644 index 0000000..a9b688e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h @@ -0,0 +1,3 @@ + +#include "g_subdevice_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h new file mode 100644 index 0000000..0a4dc41 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h @@ -0,0 +1,3 @@ + +#include "g_gpu_db_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h new file mode 100644 index 0000000..425a303 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h @@ -0,0 +1,3 @@ + +#include "g_gpu_group_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h new file mode 100644 index 0000000..f3acd0c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h @@ -0,0 +1,3 @@ + +#include "g_gpu_mgmt_api_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h new file mode 100644 index 0000000..068c748 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h @@ -0,0 +1,3 @@ + +#include "g_gpu_mgr_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h new file mode 100644 index 0000000..2f07507 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h @@ -0,0 +1,3 @@ + +#include "g_io_vaspace_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/mem.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/mem.h new file mode 100644 index 0000000..f2f8ce0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/mem.h @@ -0,0 +1,3 @@ + +#include "g_mem_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h new file mode 100644 index 0000000..8b9685b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h @@ -0,0 +1,3 @@ + +#include "g_os_desc_mem_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/standard_mem.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/standard_mem.h new file mode 100644 index 0000000..f4b5ecb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/standard_mem.h @@ -0,0 +1,3 @@ + +#include "g_standard_mem_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h new file mode 100644 index 0000000..4894657 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h @@ -0,0 +1,3 @@ + +#include "g_syncpoint_mem_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/system_mem.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/system_mem.h new file mode 100644 index 0000000..9ff0b13 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/system_mem.h @@ -0,0 +1,3 @@ + +#include "g_system_mem_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/vaspace.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/vaspace.h new file mode 100644 index 0000000..6910058 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/vaspace.h @@ -0,0 +1,3 @@ + +#include "g_vaspace_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h new file mode 100644 index 0000000..ab20731 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h @@ -0,0 +1,3 @@ + +#include "g_virt_mem_mgr_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/capability.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/capability.h new file mode 100644 index 0000000..e2fa861 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/capability.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _OS_CAPABILITY_H_ +#define _OS_CAPABILITY_H_ + +// OS specific RM capabilities structure +typedef struct OS_RM_CAPS OS_RM_CAPS; + +// RM capabilities +#define NV_RM_CAP_SYS_BASE 0x0 +#define NV_RM_CAP_SYS_PROFILER_CONTEXT (NV_RM_CAP_SYS_BASE + 0) +#define NV_RM_CAP_SYS_PROFILER_DEVICE (NV_RM_CAP_SYS_BASE + 1) +#define NV_RM_CAP_SYS_SMC_CONFIG (NV_RM_CAP_SYS_BASE + 2) +#define NV_RM_CAP_SYS_SMC_MONITOR (NV_RM_CAP_SYS_BASE + 3) + +#define NV_RM_CAP_SMC_PARTITION_BASE 0x100 +#define NV_RM_CAP_SMC_PARTITION_ACCESS (NV_RM_CAP_SMC_PARTITION_BASE + 0) + +#define NV_RM_CAP_EXT_BASE 0x200 +#define NV_RM_CAP_EXT_FABRIC_MGMT (NV_RM_CAP_EXT_BASE + 0) + +#define NV_RM_CAP_SMC_EXEC_PARTITION_BASE 0x300 +#define NV_RM_CAP_SMC_EXEC_PARTITION_ACCESS (NV_RM_CAP_SMC_EXEC_PARTITION_BASE + 0) + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/nv_memory_type.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/nv_memory_type.h new file mode 100644 index 0000000..34255c7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/nv_memory_type.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_MEMORY_TYPE_H +#define NV_MEMORY_TYPE_H + +#define NV_MEMORY_NONCONTIGUOUS 0 +#define NV_MEMORY_CONTIGUOUS 1 + +#define NV_MEMORY_CACHED 0 +#define NV_MEMORY_UNCACHED 1 +#define NV_MEMORY_WRITECOMBINED 2 +#define NV_MEMORY_WRITEBACK 5 +#define NV_MEMORY_DEFAULT 6 +#define NV_MEMORY_UNCACHED_WEAK 7 + +#define NV_PROTECT_READABLE 1 +#define NV_PROTECT_WRITEABLE 2 +#define NV_PROTECT_READ_WRITE (NV_PROTECT_READABLE | NV_PROTECT_WRITEABLE) + +#endif /* NV_MEMORY_TYPE_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os.h new file mode 100644 index 0000000..c58aa0c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os.h @@ -0,0 +1,3 @@ + +#include "g_os_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_fixed_mode_timings_props.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_fixed_mode_timings_props.h new file mode 100644 index 0000000..5b0e989 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_fixed_mode_timings_props.h @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _OS_FIXED_MODE_TIMINGS_PROPS_H_ +#define _OS_FIXED_MODE_TIMINGS_PROPS_H_ + +#include "gpu/disp/kern_disp_max.h" +#include "nvtypes.h" + +typedef struct +{ + NvU16 hActive; + NvU16 hFrontPorch; + NvU16 hSyncWidth; + NvU16 hBackPorch; + + NvU16 vActive; + NvU16 vFrontPorch; + NvU16 vSyncWidth; + NvU16 vBackPorch; + + NvU32 pclkKHz; + NvU32 rrx1k; +} OS_MODE_TIMING; + +typedef struct +{ + OS_MODE_TIMING timingsPerStream[OBJ_MAX_HEADS]; + NvU8 numTimings; +} OS_FIXED_MODE_TIMINGS; + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_stub.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_stub.h new file mode 100644 index 0000000..11132d4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_stub.h @@ -0,0 +1,87 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef OS_STUB_H +#define OS_STUB_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Extern definitions of all public stub function interfaces * +* * +\***************************************************************************/ + +#include "os/os.h" + +// +// Each of these stub functions returns a different type. Used to +// stub out function pointers in OBJOS. +// +OSQADbgRegistryInit stubOsQADbgRegistryInit; +OSnv_rdcr4 stubOsnv_rdcr4; +OSnv_rdxcr0 stubOsnv_rdxcr0; +OSnv_cpuid stubOsnv_cpuid; +OSnv_rdmsr stubOsnv_rdmsr; +OSnv_wrmsr stubOsnv_wrmsr; +OSRobustChannelsDefaultState stubOsRobustChannelsDefaultState; +OSSpinLoop stubOsSpinLoop; +OSDbgBreakpointEnabled stubOsDbgBreakpointEnabled; +OSQueueWorkItem stubOsQueueWorkItem; +OSQueueWorkItemWithFlags stubOsQueueWorkItemWithFlags; +OSQueueSystemWorkItem stubOsQueueSystemWorkItem; +OSSimEscapeWrite stubOsSimEscapeWrite; +OSSimEscapeWriteBuffer stubOsSimEscapeWriteBuffer; +OSSimEscapeRead stubOsSimEscapeRead; +OSSimEscapeReadBuffer stubOsSimEscapeReadBuffer; +OSCallACPI_MXMX stubOsCallACPI_MXMX; +OSCallACPI_DSM stubOsCallACPI_DSM; +OSCallACPI_DDC stubOsCallACPI_DDC; +OSCallACPI_BCL stubOsCallACPI_BCL; +OSCallACPI_ON stubOsCallACPI_ON; +OSCallACPI_OFF stubOsCallACPI_OFF; +OSCallACPI_NVHG_GPUON stubOsCallWMI_NVHG_GPUON; +OSCallACPI_NVHG_GPUOFF stubOsCallWMI_NVHG_GPUOFF; +OSCallACPI_NVHG_GPUSTA stubOsCallWMI_NVHG_GPUSTA; +OSCallACPI_NVHG_MXDS stubOsCallWMI_NVHG_MXDS; +OSCallACPI_NVHG_MXMX stubOsCallWMI_NVHG_MXMX; +OSCallACPI_NVHG_DOS stubOsCallWMI_NVHG_DOS; +OSCallACPI_NVHG_ROM stubOsCallWMI_NVHG_ROM; +OSCallACPI_NVHG_DCS stubOsCallWMI_NVHG_DCS; +OSCallACPI_DOD stubOsCallWMI_DOD; +OSCheckCallback stubOsCheckCallback; +OSRCCallback stubOsRCCallback; + +OSCallACPI_NBPS stubOsCallACPI_NBPS; +OSCallACPI_NBSL stubOsCallACPI_NBSL; +OSCallACPI_OPTM_GPUON stubOsCallWMI_OPTM_GPUON; +OSSetupVBlank stubOsSetupVBlank; +OSObjectEventNotification stubOsObjectEventNotification; +OSPageArrayGetPhysAddr stubOsPageArrayGetPhysAddr; +OSInternalReserveFreeCallback stubOsInternalReserveFreeCallback; +OSInternalReserveAllocCallback stubOsInternalReserveAllocCallback; +OSGetUefiVariable stubOsGetUefiVariable; +OSCallACPI_MXDS stubOsCallACPI_MXDS; +OSCallACPI_MXDM stubOsCallACPI_MXDM; +OSCallACPI_MXID stubOsCallACPI_MXID; +OSCallACPI_LRST stubOsCallACPI_LRST; + +#endif // OS_STUB_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/acpi_common.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/acpi_common.h new file mode 100644 index 0000000..2b46703 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/acpi_common.h @@ -0,0 +1,113 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _ACPICOMMON_H_ +#define _ACPICOMMON_H_ + +#include "acpigenfuncs.h" +#include "core/core.h" +#include "rmconfig.h" + +NV_STATUS testIfDsmFuncSupported(OBJGPU *, ACPI_DSM_FUNCTION); +NV_STATUS testIfDsmSubFunctionEnabled(OBJGPU *, ACPI_DSM_FUNCTION, NvU32); +NV_STATUS remapDsmFunctionAndSubFunction(OBJGPU *, ACPI_DSM_FUNCTION *, NvU32 *); +NV_STATUS getDsmGetObjectSubfunction(OBJGPU *, ACPI_DSM_FUNCTION *, NvU32 *, NvU32*); +void cacheDsmSupportedFunction(OBJGPU *, ACPI_DSM_FUNCTION, NvU32, NvU32 *, NvU32); +NV_STATUS checkDsmCall(OBJGPU *, ACPI_DSM_FUNCTION *, NvU32 *, NvU32 *, NvU16 *); +void acpiDsmInit(OBJGPU *); +NV_STATUS getLicenseKey(OBJGPU *, NvU32, NvU32 *, NvU16 *); +void uncacheDsmFuncStatus(OBJGPU *, ACPI_DSM_FUNCTION, NvU32); + +// useful macros +#if NV_PRINTF_ENABLED +#define DSMFuncStr(func) (func <= ACPI_DSM_FUNCTION_CURRENT ? DSMCalls[func] : DSMCalls[ACPI_DSM_FUNCTION_COUNT]) +#endif + +#define isDsmGetSuppFuncListCached(pGpu, acpiDsmFunction) (pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus != DSM_FUNC_STATUS_UNKNOWN) +#define isDsmGetSuppFuncListFailed(pGpu, acpiDsmFunction) (pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus == DSM_FUNC_STATUS_FAILED) +#define isGenericDsmFunction(acpiDsmFunction) (acpiDsmFunction >= ACPI_DSM_FUNCTION_COUNT) +#define isGenericDsmSubFunction(acpiDsmSubFunction) (acpiDsmSubFunction >= NV_ACPI_GENERIC_FUNC_START) + + +#define NV_ACPI_ALL_FUNC_SUPPORT 0x00000000 // Common is supported subfunction. +#define NV_ACPI_ALL_FUNC_SUPPORTED NVBIT(NV_ACPI_ALL_FUNC_SUPPORT) // is common Function supported? +#define NV_ACPI_ALL_SUBFUNC_UNKNOWN 0xFFFFFFFF // Common define for unknown ACPI sub-function + +// All the callbacks (MXM, NBCI, NVHG) use the same bits. These are common. +#define NV_ACPI_CALLBACKS_ARG_POSTPOWERSTATE 2:2 +#define NV_ACPI_CALLBACKS_ARG_POSTPOWERSTATE_NOTIFY 0x00000001 +#define NV_ACPI_CALLBACKS_ARG_CURRENTPOWERSTATE 7:4 +#define NV_ACPI_CALLBACKS_ARG_3DSTEREOSTATE_ACTIVE 8:8 +#define NV_ACPI_CALLBACKS_ARG_3DSTEREOSTATE_ACTIVE_NO 0x00000000 +#define NV_ACPI_CALLBACKS_ARG_3DSTEREOSTATE_ACTIVE_YES 0x00000001 + + +#define NV_ACPI_CALLBACKS_RET_POSTPOWERSTATE 2:2 +#define NV_ACPI_CALLBACKS_RET_POSTPOWERSTATE_NOTIFY 0x00000001 +#define NV_ACPI_CALLBACKS_RET_HOTPLUG 9:9 +#define NV_ACPI_CALLBACKS_RET_HOTPLUG_NOTIFY 0x00000001 +#define NV_ACPI_CALLBACKS_RET_CONFIG 10:10 +#define NV_ACPI_CALLBACKS_RET_CONFIG_NOTIFY 0x00000001 +#define NV_ACPI_CALLBACKS_RET_3DSTEREOSTATE_ACTIVE 12:12 +#define NV_ACPI_CALLBACKS_RET_3DSTEREOSTATE_ACTIVE_NOTIFY 0x00000001 + +#define ACPI_NOTIFY_DOCK_EVENT 0x77 +#define ACPI_NOTIFY_PANEL_SWITCH 0x80 +#define ACPI_NOTIFY_DEVICE_HOTPLUG 0x81 +#define ACPI_NOTIFY_CYCLE_DISPLAY_HOTKEY 0x82 +#define ACPI_NOTIFY_NEXT_DISPLAY_HOTKEY 0x83 +#define ACPI_NOTIFY_PREV_DISPLAY_HOTKEY 0x84 +#define ACPI_NOTIFY_CYCLE_BRIGHTNESS_HOTKEY 0x85 +#define ACPI_NOTIFY_INC_BRIGHTNESS_HOTKEY 0x86 +#define ACPI_NOTIFY_DEC_BRIGHTNESS_HOTKEY 0x87 +#define ACPI_NOTIFY_ZERO_BRIGHTNESS_HOTKEY 0x88 +#define ACPI_NOTIFY_VIDEO_WAKEUP 0x90 + +#define ACPI_NOTIFY_GPS_STATUS_CHANGE 0xC0 + +#define ACPI_NOTIFY_BACKLIGHT_OFF 0xC1 +#define ACPI_NOTIFY_BACKLIGHT_ON 0xC2 + +#define ACPI_NOTIFY_POWER_LEVEL_D1 0xD1 +#define ACPI_NOTIFY_POWER_LEVEL_D2 0xD2 +#define ACPI_NOTIFY_POWER_LEVEL_D3 0xD3 +#define ACPI_NOTIFY_POWER_LEVEL_D4 0xD4 +#define ACPI_NOTIFY_POWER_LEVEL_D5 0xD5 + +#define ACPI_VIDEO_NOTIFY_PROBE 0x81 + +#define NV_ACPI_DSM_READ_SIZE (4*1024) // 4K as per spec + +// **************************************************** +// For _ROM Get ROM Data Method +// **************************************************** +#define ROM_METHOD_MAX_RETURN_BUFFER_SIZE 0x1000 + +// these are debug strings for printing which DSM subfunction didn't work. +// These map directly to the ACPI_DSM_FUNCTION enum in gpu/gpu.h. +#if NV_PRINTF_ENABLED +extern const char * const DSMCalls[]; +#endif + +#endif // _ACPICOMMON_H_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/sli/sli.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/sli/sli.h new file mode 100644 index 0000000..969069b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/sli/sli.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef RMSLI_H +#define RMSLI_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Private SLI related defines and structures. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "nvlimits.h" +#include "gpu_mgr/gpu_mgr.h" + +#define IsDeviceDestroyed(p) (gpuGetDeviceInstance(p) == NV_MAX_DEVICES) + +// Unlinked SLI is implemented in RM clients +#define IsUnlinkedSLIEnabled(p) ((p)->getProperty((p), PDB_PROP_GPU_RM_UNLINKED_SLI)) + +#define IsSLIEnabled(p) 0 +#define NumSubDevices(p) 0 + +#define SLI_LOOP_START(sliLoopFlags) { NvU32 loopIndex = 0; do { +#define SLI_LOOP_END } while (loopIndex); } +#define SLI_LOOP_BREAK break +#define SLI_LOOP_CONTINUE continue +#define SLI_LOOP_GOTO(loc) { goto loc; } +#define SLI_LOOP_RETURN(SLi_ret) { return(SLi_ret); } +#define SLI_LOOP_RETURN_VOID { return; } + +// macro to use when declaring array vars that'll be used w/i SLI_LOOPs +#define SLI_LOOP_ARRAY_SIZE (NV_MAX_SUBDEVICES+1) + +// macro to veirfy that arrays are properly sized +#define VERIFY_SLI_LOOP_ARRAY_SIZE(arr) \ +do { \ + if (sizeof(arr) > sizeof(void *)) \ + { \ + NV_ASSERT(SLI_LOOP_ARRAY_SIZE == (sizeof(arr) / sizeof(arr[0]))); \ + } \ +} while (0) + +#endif // RMSLI_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/alloc_size.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/alloc_size.h new file mode 100644 index 0000000..55734f2 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/alloc_size.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _ALLOC_SIZE_H_ +#define _ALLOC_SIZE_H_ + +#include "nvstatus.h" + +/* + * rmapiGetClassAllocParamSize() + * + * Returns class size in number of bytes. Returns zero + * if the specified class has no optional allocation parameters + * + */ +NV_STATUS rmapiGetClassAllocParamSize(NvU32 *pAllocParamSizeBytes, NvP64 pUserParams, NvBool *pBAllowNull, NvU32 hClass); + +#endif // _ALLOC_SIZE_H_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/binary_api.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/binary_api.h new file mode 100644 index 0000000..9b461d5 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/binary_api.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_binary_api_nvoc.h" + +#ifndef BINARY_API_H +#define BINARY_API_H + +#include "core/core.h" +#include "rmapi/resource.h" +#include "gpu/gpu_resource.h" +#include "resserv/rs_resource.h" +#include "rmapi/control.h" + +NVOC_PREFIX(binapi) class BinaryApi : GpuResource +{ +public: + NV_STATUS binapiConstruct(BinaryApi *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams) : + GpuResource(pCallContext, pParams); + + virtual NV_STATUS binapiControl(BinaryApi *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +}; + +NVOC_PREFIX(binapipriv) class BinaryApiPrivileged : BinaryApi +{ +public: + NV_STATUS binapiprivConstruct(BinaryApiPrivileged *pResource, CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams) : + BinaryApi(pCallContext, pParams); + + virtual NV_STATUS binapiprivControl(BinaryApiPrivileged *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +}; + +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client.h new file mode 100644 index 0000000..bf1a434 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client.h @@ -0,0 +1,3 @@ + +#include "g_client_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client_resource.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client_resource.h new file mode 100644 index 0000000..405ee3a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client_resource.h @@ -0,0 +1,3 @@ + +#include "g_client_resource_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/control.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/control.h new file mode 100644 index 0000000..e95f576 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/control.h @@ -0,0 +1,272 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _CONTROL_H_ +#define _CONTROL_H_ + +#include "core/core.h" + +#include "resserv/rs_resource.h" +#include "resserv/resserv.h" + +#include "utils/nvmacro.h" +#include "rmapi/param_copy.h" + +struct NVOC_EXPORTED_METHOD_DEF; +typedef RS_RES_CONTROL_PARAMS_INTERNAL RmCtrlParams; + +// +// RmCtrlExecuteCookie +// +// This typedef describes the data used by the rmctrl cmd execution +// path. The data is filled at the beginning of rmControlCmdExecute() +// and used as necessary in the other stages. +// +struct RS_CONTROL_COOKIE +{ + // Rmctrl Command ID + NvU32 cmd; + + // Rmctrl Flags + NvU32 ctrlFlags; + + // Required Access Rights for this command + const RS_ACCESS_MASK rightsRequired; + + NvBool bFreeParamCopy; ///< Indicates that param copies should be cleaned up + NvBool bFreeEmbeddedCopy; ///< Indicates embedded param copies should be cleaned up + + RMAPI_PARAM_COPY paramCopy; + RMAPI_PARAM_COPY embeddedParamCopies[4]; // Up to 4 embedded pointers per one RmControl identified +}; +typedef RS_CONTROL_COOKIE RmCtrlExecuteCookie; + +// values for RmCtrlDeferredCmd.pending +#define RMCTRL_DEFERRED_FREE 0 // buffer is free +#define RMCTRL_DEFERRED_ACQUIRED 1 // buffer is acquired to fill in data +#define RMCTRL_DEFERRED_READY 2 // buffer is acquired and data has been copied. + +#define RMCTRL_DEFERRED_MAX_PARAM_SIZE 128 // 128 bytes internal buffer for rmctrl param + +typedef struct +{ + NvS32 volatile pending; + NvU32 cpuInst; + RmCtrlParams rmCtrlDeferredParams; + NvU8 paramBuffer[RMCTRL_DEFERRED_MAX_PARAM_SIZE]; // buffer to hold rmCtrlDeferredParams.pParams +} RmCtrlDeferredCmd; + +// catch commands misdirected to non-existent engines +#define VERIFY_OBJ_PTR(p) if (p == NULL) return NV_ERR_INVALID_ARGUMENT + +// macros to get/set/clear cap bits +#define RMCTRL_GET_CAP(tbl,cap,field) (((NvU8)tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)) +#define RMCTRL_SET_CAP(tbl,cap,field) ((tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) |= (0?cap##field)) +#define RMCTRL_CLEAR_CAP(tbl,cap,field) ((tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) &= ~(0?cap##field)) + +// macros to AND/OR caps between two tables +#define RMCTRL_AND_CAP(finaltbl,tmptbl,tmp,cap,field) \ + tmp = ((finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] & tmptbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)); \ + finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \ + finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] |= tmp; + +#define RMCTRL_OR_CAP(finaltbl,tmptbl,tmp,cap,field) \ + tmp = ((finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] | tmptbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)); \ + finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \ + finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] |= tmp; + +// Whether the command ID is a NULL command? +// We allow NVXXXX_CTRL_CMD_NULL (0x00000000) as well as the +// per-class NULL cmd ( _CATEGORY==0x00 and _INDEX==0x00 ) +#define RMCTRL_IS_NULL_CMD(cmd) ((cmd == NVXXXX_CTRL_CMD_NULL) || \ + (FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _CATEGORY, 0x00, cmd) && \ + FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _INDEX, 0x00, cmd))) + +// top-level internal RM Control interface +NV_STATUS rmControl_Deferred(RmCtrlDeferredCmd *pRmCtrlDeferredCmd); + +// Helper functions for handling embedded parameter copies +NV_STATUS embeddedParamCopyIn(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmCtrlParams); +NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmCtrlParams); + +#define RM_CLIENT_PTR_ACCESS_CHECK_READ NVBIT(0) +#define RM_CLIENT_PTR_ACCESS_CHECK_WRITE NVBIT(1) + +// +// For NVOC Exported functions +// +// RMCTRL_FLAGS(A, B, C) is expanded to +// 0 | RMCTRL_FLAGS_A | RMCTRL_FLAGS_B | RMCTRL_FLAGS_C +// +// ACCESS_RIGHTS(A, B, C) is expanded to +// 0 | NVBIT(RS_ACCESS_A) | NVBIT(RS_ACCESS_B) | NVBIT(RS_ACCESS_C) +// +#define RMCTRL_EXPORT(cmdId, ...) [[nvoc::export(cmdId, __VA_ARGS__)]] +#define _RMCTRL_PREP_FLAG_ARG(x) | NV_CONCATENATE(RMCTRL_FLAGS_, x) +#define RMCTRL_FLAGS(...) (0 NV_FOREACH_ARG_NOCOMMA(_RMCTRL_PREP_FLAG_ARG, __VA_ARGS__)) +#define _RMCTRL_PREP_ACCESS_ARG(x) | NVBIT(NV_CONCATENATE(RS_ACCESS_, x)) +#define ACCESS_RIGHTS(...) (0 NV_FOREACH_ARG_NOCOMMA(_RMCTRL_PREP_ACCESS_ARG, __VA_ARGS__)) + +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(ctrlFlags) \ + ( \ + (ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL) && \ + !RMCFG_FEATURE_PHYSICAL_RM \ + ) + +// +// 'FLAGS' Attribute +// ----------------- +// +// RMCTRL_FLAGS is used to specify per-command state. +// + +#define RMCTRL_FLAGS_NONE 0x000000000 + +// +// If the KERNEL_PRIVILEGED flag is specified, the call will only be allowed +// for kernel mode callers (such as other kernel drivers) using a privileged +// kernel RM client (CliCheckIsKernelClient() returning true). Otherwise, +// NV_ERR_INSUFFICIENT_PERMISSIONS is returned. +// +#define RMCTRL_FLAGS_KERNEL_PRIVILEGED 0x000000000 + +// +// The resman rmcontrol handler will not grab the "gpus lock" +// before executing the implementing function. +// +// Please be sure you know what you're doing before using this! +// +#define RMCTRL_FLAGS_NO_GPUS_LOCK 0x000000001 + +// +// Indicate to resman that this rmcontrol does not access any gpu +// resources and can therefore run even when the gpu is powered down. +// +// Please be sure you know what you're doing before using this! +// +#define RMCTRL_FLAGS_NO_GPUS_ACCESS 0x000000002 + +// +// If the PRIVILEGED flag is specified, the call will only be allowed for +// a) user contexts with admin privleges (osIsAdministrator() returning true), or +// b) kernel mode callers, such as other kernel drivers. +// Otherwise, NV_ERR_INSUFFICIENT_PERMISSIONS is returned. +// +#define RMCTRL_FLAGS_PRIVILEGED 0x000000004 + +// +// If the NON_PRIVILEGED flag is specified, the call will be allowed from any +// client. +// +#define RMCTRL_FLAGS_NON_PRIVILEGED 0x000000010 + +// +// The resman rmcontrol handler will grab the per-device lock instead +// of the "gpus lock" before executing the implementing function. +// +// Please be sure you know what you're doing before using this! +// +#define RMCTRL_FLAGS_GPU_LOCK_DEVICE_ONLY 0x000000040 + +// +// This flag is equivalent to PRIVILEGED when the RM access rights +// implementation is disabled. Otherwise, it has no effect. +// +// The purpose of this flag is to aid in the transition to the access rights +// system, so that access rights can be used for control calls that were +// previously PRIVILEGED. Once access rights are enabled, this flag will no +// longer be necessary. +// +#define RMCTRL_FLAGS_PRIVILEGED_IF_RS_ACCESS_DISABLED 0x000000100 // for Resserv Access Rights migration + +// +// This flag specifies that the control shall be directly forwarded to the +// physical object if called on the CPU-RM kernel. +// +#define RMCTRL_FLAGS_ROUTE_TO_PHYSICAL 0x000000200 + +// +// If the INTERNAL flag is specified, the call will only be allowed +// to be issued from RM itself. Otherwise, NV_ERR_NOT_SUPPORTED is returned. +// +#define RMCTRL_FLAGS_INTERNAL 0x000000400 + +// +// If the API_LOCK_READONLY flag is specified, the call will acquire the +// read-only API lock and may run concurrently with other operations that have +// also taken the read-only API lock. This flag is ignored if read-only API +// locking is disabled in RM. +// +#define RMCTRL_FLAGS_API_LOCK_READONLY 0x000000800 + +// +// If the :GPU_LOCK_READONLY flag is specified, the call will acquire a +// read-only GPU lock and may run concurrently with other operations that have +// also taken a read-only GPU lock. This flag is ignored if read-only GPU +// locking is disabled in RM. +// +#define RMCTRL_FLAGS_GPU_LOCK_READONLY 0x000001000 + +// +// This flag specifies that the control shall be directly forwarded to the +// the VGPU host if called from a guest (where IS_VIRTUAL() is true) +// +#define RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST 0x000002000 + +// +// This flag specifies that the control output does not depend on the input +// parameters and can be cached on the receiving end. +// The cache is transparent and may not exist on all platforms. +// +#define RMCTRL_FLAGS_CACHEABLE 0x000004000 + +// +// This flag specifies that the control parameters will be +// copied out back to the caller even if the control call fails. +// +#define RMCTRL_FLAGS_COPYOUT_ON_ERROR 0x000008000 + +// ?? +#define RMCTRL_FLAGS_ALLOW_WITHOUT_SYSMEM_ACCESS 0x000010000 + + +// +// 'ACCESS_RIGHTS' Attribute +// ------------------------ +// +// Used to specify a set of access rights that the client must hold on the +// target resource to execute this control call. Note that this can only check +// access rights on the target object; for other objects, such as those +// specified by handles in parameter structs, checks must be done manually. +// +// The definition of each access right and its meaning is provided in the +// README located at drivers/common/shared/accessrights/README. The prefix +// "RS_ACCESS" is appended to each entry in the control call definition; +// for example, :NICE -> RS_ACCESS_NICE. +// +// This attribute only has an effect when the RM access rights implementation +// is enabled; see g_bRsAccessEnabled. +// + +#endif // _CONTROL_H_ + + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event.h new file mode 100644 index 0000000..3282443 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event.h @@ -0,0 +1,3 @@ + +#include "g_event_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event_buffer.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event_buffer.h new file mode 100644 index 0000000..2867ac3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event_buffer.h @@ -0,0 +1,3 @@ + +#include "g_event_buffer_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/exports.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/exports.h new file mode 100644 index 0000000..2c7f066 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/exports.h @@ -0,0 +1,127 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _EXPORTS_H +#define _EXPORTS_H + +#include "core/core.h" + +// +// !! Deprecated. Do not use these exported API functions. Instead use the +// User or Kernel ones below depending on if they are called from Kernel or +// User space. +// +// A User export is to be used for code paths originating from user space and +// MUST pass only user client handles and user-mode pointers. On most OSes, RM +// will sanity check the use of handles and pointers against incorrect or +// malicious use. +// +// A Kernel export is to be used for code paths originating from kernel space +// and MUST pass only kernel client handles and kernel-mode pointers. By default +// RM will skip any validation checks when a Kernel export is called. The onus +// is on the caller that only valid handles and pointers are passed. +// TBD. RM may enable the checks on debug builds or when a regkey is set. +// +// For more information refer to the Kernel_Client_Data_Validation wiki page +// +// WARNING!! RM has validation checks for handles and pointers. An incorrect use +// of export can cause RM failing the API. +// +void Nv01AllocMemory (NVOS02_PARAMETERS*); +void Nv01AllocObject (NVOS05_PARAMETERS*); +void Nv04Alloc (NVOS21_PARAMETERS*); +void Nv04AllocWithAccess (NVOS64_PARAMETERS*); +void Nv01Free (NVOS00_PARAMETERS*); +void Nv04Control (NVOS54_PARAMETERS*); +void Nv04VidHeapControl (NVOS32_PARAMETERS*); +void Nv04IdleChannels (NVOS30_PARAMETERS*); +void Nv04MapMemory (NVOS33_PARAMETERS*); +void Nv04UnmapMemory (NVOS34_PARAMETERS*); +void Nv04I2CAccess (NVOS_I2C_ACCESS_PARAMS*); +void Nv04AllocContextDma (NVOS39_PARAMETERS*); +void Nv04BindContextDma (NVOS49_PARAMETERS*); +void Nv04MapMemoryDma (NVOS46_PARAMETERS*); +void Nv04UnmapMemoryDma (NVOS47_PARAMETERS*); +void Nv04DupObject (NVOS55_PARAMETERS*); +void Nv04Share (NVOS57_PARAMETERS*); +void Nv04AddVblankCallback (NVOS61_PARAMETERS*); + +// exported "User" API functions +void Nv01AllocMemoryUser (NVOS02_PARAMETERS*); +void Nv01AllocObjectUser (NVOS05_PARAMETERS*); +void Nv04AllocUser (NVOS21_PARAMETERS*); +void Nv04AllocWithAccessUser (NVOS64_PARAMETERS*); +void Nv01FreeUser (NVOS00_PARAMETERS*); +void Nv04ControlUser (NVOS54_PARAMETERS*); +void Nv04VidHeapControlUser (NVOS32_PARAMETERS*); +void Nv04IdleChannelsUser (NVOS30_PARAMETERS*); +void Nv04MapMemoryUser (NVOS33_PARAMETERS*); +void Nv04UnmapMemoryUser (NVOS34_PARAMETERS*); +void Nv04I2CAccessUser (NVOS_I2C_ACCESS_PARAMS*); +void Nv04AllocContextDmaUser (NVOS39_PARAMETERS*); +void Nv04BindContextDmaUser (NVOS49_PARAMETERS*); +void Nv04MapMemoryDmaUser (NVOS46_PARAMETERS*); +void Nv04UnmapMemoryDmaUser (NVOS47_PARAMETERS*); +void Nv04DupObjectUser (NVOS55_PARAMETERS*); +void Nv04ShareUser (NVOS57_PARAMETERS*); +void Nv04AddVblankCallbackUser (NVOS61_PARAMETERS*); + +// exported "Kernel" API functions +void Nv01AllocMemoryKernel (NVOS02_PARAMETERS*); +void Nv01AllocObjectKernel (NVOS05_PARAMETERS*); +void Nv04AllocKernel (NVOS21_PARAMETERS*); +void Nv04AllocWithAccessKernel (NVOS64_PARAMETERS*); +void Nv01FreeKernel (NVOS00_PARAMETERS*); +void Nv04ControlKernel (NVOS54_PARAMETERS*); +void Nv04VidHeapControlKernel (NVOS32_PARAMETERS*); +void Nv04IdleChannelsKernel (NVOS30_PARAMETERS*); +void Nv04MapMemoryKernel (NVOS33_PARAMETERS*); +void Nv04UnmapMemoryKernel (NVOS34_PARAMETERS*); +void Nv04I2CAccessKernel (NVOS_I2C_ACCESS_PARAMS*); +void Nv04AllocContextDmaKernel (NVOS39_PARAMETERS*); +void Nv04BindContextDmaKernel (NVOS49_PARAMETERS*); +void Nv04MapMemoryDmaKernel (NVOS46_PARAMETERS*); +void Nv04UnmapMemoryDmaKernel (NVOS47_PARAMETERS*); +void Nv04DupObjectKernel (NVOS55_PARAMETERS*); +void Nv04ShareKernel (NVOS57_PARAMETERS*); +void Nv04AddVblankCallbackKernel (NVOS61_PARAMETERS*); + +// exported "WithSecInfo" API functions +void Nv01AllocMemoryWithSecInfo (NVOS02_PARAMETERS*, API_SECURITY_INFO); +void Nv01AllocObjectWithSecInfo (NVOS05_PARAMETERS*, API_SECURITY_INFO); +void Nv04AllocWithSecInfo (NVOS21_PARAMETERS*, API_SECURITY_INFO); +void Nv04AllocWithAccessSecInfo (NVOS64_PARAMETERS*, API_SECURITY_INFO); +void Nv01FreeWithSecInfo (NVOS00_PARAMETERS*, API_SECURITY_INFO); +void Nv04ControlWithSecInfo (NVOS54_PARAMETERS*, API_SECURITY_INFO); +void Nv04VidHeapControlWithSecInfo (NVOS32_PARAMETERS*, API_SECURITY_INFO); +void Nv04IdleChannelsWithSecInfo (NVOS30_PARAMETERS*, API_SECURITY_INFO); +void Nv04MapMemoryWithSecInfo (NVOS33_PARAMETERS*, API_SECURITY_INFO); +void Nv04UnmapMemoryWithSecInfo (NVOS34_PARAMETERS*, API_SECURITY_INFO); +void Nv04I2CAccessWithSecInfo (NVOS_I2C_ACCESS_PARAMS*, API_SECURITY_INFO); +void Nv04AllocContextDmaWithSecInfo (NVOS39_PARAMETERS*, API_SECURITY_INFO); +void Nv04BindContextDmaWithSecInfo (NVOS49_PARAMETERS*, API_SECURITY_INFO); +void Nv04MapMemoryDmaWithSecInfo (NVOS46_PARAMETERS*, API_SECURITY_INFO); +void Nv04UnmapMemoryDmaWithSecInfo (NVOS47_PARAMETERS*, API_SECURITY_INFO); +void Nv04DupObjectWithSecInfo (NVOS55_PARAMETERS*, API_SECURITY_INFO); +void Nv04ShareWithSecInfo (NVOS57_PARAMETERS*, API_SECURITY_INFO); + +#endif // _EXPORTS_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/mapping_list.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/mapping_list.h new file mode 100644 index 0000000..fd5d558 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/mapping_list.h @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _MAPPING_LIST_H_ +#define _MAPPING_LIST_H_ + +#include +#include "containers/btree.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" +#include "rmapi/resource.h" + +typedef struct VirtualMemory VirtualMemory; +typedef struct Memory Memory; + +// **************************************************************************** +// Type definitions +// **************************************************************************** + +// dma information definitions +typedef struct _def_client_dma_mapping_info CLI_DMA_MAPPING_INFO, *PCLI_DMA_MAPPING_INFO; +typedef struct _def_client_dma_mapping_info_iterator CLI_DMA_MAPPING_INFO_ITERATOR, *PCLI_DMA_MAPPING_INFO_ITERATOR; + +// mapping information definitions +typedef struct _def_client_dma_alloc_map_info CLI_DMA_ALLOC_MAP_INFO; + +// +// DMA memory mapping XXX keep around since needed by mapping.c +// We need to figure out what to do with this +// RS-TODO gradually remove this with inter-mapping cleanup +// +struct _def_client_dma_mapping_info +{ + NvHandle hDevice; + NvU64 DmaOffset; + void* KernelVAddr[NV_MAX_SUBDEVICES]; // Kernel's virtual address, if required + void* KernelPriv; // Token required to unmap the kernel mapping + NvU64 FbAperture[NV_MAX_SUBDEVICES]; // GPU aperture addresses, if required + NvU64 FbApertureLen[NV_MAX_SUBDEVICES]; // GPU aperture mapped lengths + MEMORY_DESCRIPTOR *pMemDesc; // Subregion to be mapped + NvU32 Flags; + NvBool bP2P; + NvU32 gpuMask; + ADDRESS_TRANSLATION addressTranslation; + MEMORY_DESCRIPTOR *pBar1P2PVirtMemDesc; // The peer GPU mapped BAR1 region + MEMORY_DESCRIPTOR *pBar1P2PPhysMemDesc; // The peer GPU vidmem sub region + PCLI_DMA_MAPPING_INFO Next; + PCLI_DMA_MAPPING_INFO Prev; +}; + +// +// iterator object to enum CLI_DMA_MAPPING_INFO from 'pDmaMappingList' +// +struct _def_client_dma_mapping_info_iterator +{ + PNODE pDmaMappingList; // list of hDevices + PNODE pCurrentList; // current hDevice list entry, is list of pDmaMappings + PNODE pNextDmaMapping; // next pDmaMapping while iterating over the DmaOffsets +}; + +// +// DMA allocMapping +// +struct _def_client_dma_alloc_map_info +{ + CLI_DMA_MAPPING_INFO *pDmaMappingInfo; + struct VirtualMemory *pVirtualMemory; + struct Memory *pMemory; +}; + +// **************************************************************************** +// Function definitions +// **************************************************************************** + +// Client Memory Mappings +// +// CliUpdateMemoryMappingInfo - Fill in RsCpuMapping fields for system memory mappings +// +static inline NV_STATUS +CliUpdateMemoryMappingInfo +( + RsCpuMapping *pCpuMapping, + NvBool bKernel, + NvP64 cpuAddress, + NvP64 priv, + NvU64 cpuMapLength, + NvU32 flags +) +{ + if (pCpuMapping == NULL) + return NV_ERR_INVALID_ARGUMENT; + + pCpuMapping->pPrivate->bKernel = bKernel; + pCpuMapping->length = cpuMapLength; + pCpuMapping->flags = flags; + pCpuMapping->processId = osGetCurrentProcess(); + pCpuMapping->pLinearAddress = cpuAddress; + pCpuMapping->pPrivate->pPriv = priv; + pCpuMapping->pPrivate->gpuAddress = -1; + pCpuMapping->pPrivate->gpuMapLength = -1; + + return NV_OK; +} + +// **************************************************************************** +// Device Memory Mappings +// **************************************************************************** + +// +// CliUpdateDeviceMemoryMapping - Fill in RsCpuMapping fields for device memory mappings +// +static inline NV_STATUS +CliUpdateDeviceMemoryMapping +( + RsCpuMapping *pCpuMapping, + NvBool bKernel, + NvP64 priv, + NvP64 cpuAddress, + NvU64 cpuMapLength, + NvU64 gpuAddress, + NvU64 gpuMapLength, + NvU32 flags +) +{ + if (pCpuMapping == NULL) + return NV_ERR_INVALID_ARGUMENT; + + pCpuMapping->pPrivate->bKernel = bKernel; + pCpuMapping->length = cpuMapLength; + pCpuMapping->flags = flags; + pCpuMapping->processId = osGetCurrentProcess(); + pCpuMapping->pLinearAddress = cpuAddress; + pCpuMapping->pPrivate->pPriv = priv; + pCpuMapping->pPrivate->gpuAddress = gpuAddress; + pCpuMapping->pPrivate->gpuMapLength = gpuMapLength; + + return NV_OK; +} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/param_copy.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/param_copy.h new file mode 100644 index 0000000..06c9aaa --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/param_copy.h @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _PARAM_COPY_H_ +#define _PARAM_COPY_H_ + +// +// RMAPI_PARAM_COPY - a mechanism for getting user params in and out of resman. +// +// The struct RMAPI_PARAM_COPY keeps track of current API params for eventual +// copyout and free as needed. +// + +#include + +struct API_STATE +{ + NvP64 pUserParams; // ptr to params in client's addr space + void **ppKernelParams; // ptr to current 'pKernelParams' + NvU32 paramsSize; // # bytes + NvU32 flags; + NvBool bSizeValid; + const char *msgTag; +}; +typedef struct API_STATE RMAPI_PARAM_COPY; + +#define RMAPI_PARAM_COPY_FLAGS_NONE 0x00000000 +#define RMAPI_PARAM_COPY_FLAGS_IS_DIRECT_USAGE NVBIT(0) +#define RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN NVBIT(1) +#define RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT NVBIT(2) +#define RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER NVBIT(3) +// +// Only set this if the paramsSize member of RMAPI_PARAM_COPY has been validated for +// correctness before calling apiParamAccess. There is a default cap on the +// largest size allowed in order to avoid huge memory allocations triggering +// out of memory scenarios if the user passes in a bogus size. +// +#define RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK NVBIT(4) +// +// 1MB is the largest size allowed for an embedded pointer accessed through +// apiParamAccess unless RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK is specified +// and the size is validated before calling apiParamsAcquire. +// +#define RMAPI_PARAM_COPY_MAX_PARAMS_SIZE (1*1024*1024) + +#if NV_PRINTF_STRINGS_ALLOWED +#define RMAPI_PARAM_COPY_MSG_TAG(x) x +#define RMAPI_PARAM_COPY_SET_MSG_TAG(paramCopy, theMsgTag) (paramCopy).msgTag = theMsgTag +#else +#define RMAPI_PARAM_COPY_MSG_TAG(x) ((const char *) 0) +#define RMAPI_PARAM_COPY_SET_MSG_TAG(paramCopy, theMsgTag) (paramCopy).msgTag = ((const char *) 0) +#endif + +// +// Initializes the RMAPI_PARAM_COPY structure. Sets bValid to false if calculating size +// caused an overflow. This makes the rmapiParamsAcquire() call fail with +// NV_ERR_INVALID_ARGUMENT. Since rmapiParamsAcquire() always directly follows +// this initialization, there is no need to make it return a status and +// duplicate error checking. +// +#define RMAPI_PARAM_COPY_INIT(paramCopy, pKernelParams, theUserParams, numElems, sizeOfElem) \ + do { \ + RMAPI_PARAM_COPY_SET_MSG_TAG((paramCopy), __FUNCTION__); \ + (paramCopy).ppKernelParams = (void **) &(pKernelParams); \ + (paramCopy).pUserParams = (theUserParams); \ + (paramCopy).flags = RMAPI_PARAM_COPY_FLAGS_NONE; \ + (paramCopy).bSizeValid = portSafeMulU32((numElems), (sizeOfElem), &(paramCopy).paramsSize); \ + } while(0) + +// Routines for alloc/copyin/copyout/free sequences +NV_STATUS rmapiParamsAcquire(RMAPI_PARAM_COPY *, NvBool); +NV_STATUS rmapiParamsRelease(RMAPI_PARAM_COPY *); + +NV_STATUS rmapiParamsCopyOut(const char *msgTag, void *pKernelParams, NvP64 pUserParams, NvU32 paramsSize, NvBool); +NV_STATUS rmapiParamsCopyIn(const char *msgTag, void *pKernelParams, NvP64 pUserParams, NvU32 paramsSize, NvBool); + +// Init copy_param structure +NV_STATUS rmapiParamsCopyInit(RMAPI_PARAM_COPY *, NvU32 hClass); + +#endif // _PARAM_COPY_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource.h new file mode 100644 index 0000000..054e13a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource.h @@ -0,0 +1,3 @@ + +#include "g_resource_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h new file mode 100644 index 0000000..cdbabe7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h @@ -0,0 +1,3 @@ + +#include "g_resource_fwd_decls_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi.h new file mode 100644 index 0000000..43145b8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi.h @@ -0,0 +1,410 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RMAPI_H_ +#define _RMAPI_H_ + +#include "core/core.h" +#include "nvsecurityinfo.h" + +// +// Forward declarations +// +typedef struct _RM_API RM_API; +typedef struct RsServer RsServer; +typedef struct OBJGPU OBJGPU; +typedef struct RsResource RsResource; +typedef struct RsCpuMapping RsCpuMapping; +typedef struct CALL_CONTEXT CALL_CONTEXT; +typedef struct MEMORY_DESCRIPTOR MEMORY_DESCRIPTOR; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS_INTERNAL; +typedef struct RS_LOCK_INFO RS_LOCK_INFO; +typedef NvU32 NV_ADDRESS_SPACE; + +extern RsServer g_resServ; + +/** + * Initialize RMAPI module. + * + * Must be called once and only once before any RMAPI functions can be called + */ +NV_STATUS rmapiInitialize(void); + +/** + * Shutdown RMAPI module + * + * Must be called once and only once when a driver is shutting down and no more + * RMAPI functions will be called. + */ +void rmapiShutdown(void); + +// Flags for rmapiLockAcquire +#define RMAPI_LOCK_FLAGS_NONE (0x00000000) // default no flags +#define RMAPI_LOCK_FLAGS_COND_ACQUIRE NVBIT(0) // conditional acquire; if lock is + // already held then return error +#define RMAPI_LOCK_FLAGS_READ NVBIT(1) // Acquire API lock for READ +#define RMAPI_LOCK_FLAGS_WRITE (0x00000000) // Acquire API lock for WRITE - Default + +/** + * Acquire the RM API Lock + * + * The API lock is a sleeping mutex that is used to serialize access to RM APIs + * by (passive-level) RM clients. + * + * The API lock is not used to protect state accessed by DPC and ISRs. For DPC + * and ISRs that GPU lock is used instead. For state controlled by clients, this + * often requires taking both API and GPU locks in API paths + * + * @param[in] flags RM_LOCK_FLAGS_* + * @param[in] module RM_LOCK_MODULES_* + */ +NV_STATUS rmapiLockAcquire(NvU32 flags, NvU32 module); + +/** + * Release RM API Lock + */ +void rmapiLockRelease(void); + +/** + * Check if current thread owns the API lock + */ +NvBool rmapiLockIsOwner(void); + + +/** + * Type of RM API client interface + */ +typedef enum +{ + RMAPI_EXTERNAL, // For clients external from RM TLS, locks, etc -- no default security attributes + RMAPI_EXTERNAL_KERNEL, // For clients external from TLS and locks but which still need default security attributes + RMAPI_MODS_LOCK_BYPASS, // Hack for MODS - skip RM locks but initialize TLS (bug 1808386) + RMAPI_API_LOCK_INTERNAL, // For clients that already have the TLS & API lock held -- security is RM internal + RMAPI_GPU_LOCK_INTERNAL, // For clients that have TLS, API lock, and GPU lock -- security is RM internal + RMAPI_STUBS, // All functions just return NV_ERR_NOT_SUPPORTED + RMAPI_TYPE_MAX +} RMAPI_TYPE; + +/** + * Query interface that can be used to perform operations through the + * client-level RM API + */ +RM_API *rmapiGetInterface(RMAPI_TYPE rmapiType); + +// Flags for RM_API::Alloc +#define RMAPI_ALLOC_FLAGS_NONE 0 +#define RMAPI_ALLOC_FLAGS_SKIP_RPC NVBIT(0) + +// Flags for RM_API::Free +#define RMAPI_FREE_FLAGS_NONE 0 + +/** + * Interface for performing operations through the RM API exposed to client + * drivers. Interface provides consistent view to the RM API while abstracting + * the individuals callers from specifying security attributes and/or from + * locking needs. For example, this interface can be used either before or after + * the API or GPU locks. + */ +struct _RM_API +{ + // Allocate a resource with default security attributes and local pointers (no NvP64) + NV_STATUS (*Alloc)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, void *pAllocParams); + + // Allocate a resource with default security attributes and local pointers (no NvP64) + // and client assigned handle + NV_STATUS (*AllocWithHandle)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle hObject, NvU32 hClass, void *pAllocParams); + + // Allocate a resource + NV_STATUS (*AllocWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, NvP64 pAllocParams, + NvU32 flags, NvP64 pRightsRequested, API_SECURITY_INFO *pSecInfo); + + // Free a resource with default security attributes + NV_STATUS (*Free)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject); + + // Free a resource + NV_STATUS (*FreeWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + NvU32 flags, API_SECURITY_INFO *pSecInfo); + + // Free a list of clients with default security attributes + NV_STATUS (*FreeClientList)(struct _RM_API *pRmApi, NvHandle *phClientList, NvU32 numClients); + + // Free a list of clients + NV_STATUS (*FreeClientListWithSecInfo)(struct _RM_API *pRmApi, NvHandle *phClientList, + NvU32 numClients, API_SECURITY_INFO *pSecInfo); + + // Invoke a control with default security attributes and local pointers (no NvP64) + NV_STATUS (*Control)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd, + void *pParams, NvU32 paramsSize); + + // Invoke a control + NV_STATUS (*ControlWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd, + NvP64 pParams, NvU32 paramsSize, NvU32 flags, API_SECURITY_INFO *pSecInfo); + + // Prefetch a control parameters into the control call cache (0000, 0080 and 2080 classes only) + NV_STATUS (*ControlPrefetch)(struct _RM_API *pRmApi, NvU32 cmd); + + // Dup an object with default security attributes + NV_STATUS (*DupObject)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, NvHandle *phObject, + NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags); + + // Dup an object + NV_STATUS (*DupObjectWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags, + API_SECURITY_INFO *pSecInfo); + + // Share an object with default security attributes + NV_STATUS (*Share)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy); + + // Share an object + NV_STATUS (*ShareWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, API_SECURITY_INFO *pSecInfo); + + // Map memory with default security attributes and local pointers (no NvP64). Provides + // RM internal implementation for NvRmMapMemory(). + NV_STATUS (*MapToCpu)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, void **ppCpuVirtAddr, NvU32 flags); + + // Map memory. Provides RM internal implementation for NvRmMapMemory(). + NV_STATUS (*MapToCpuWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 flags, API_SECURITY_INFO *pSecInfo); + + // Map memory v2. Pass in flags as a pointer for in/out access + NV_STATUS (*MapToCpuWithSecInfoV2)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 *flags, API_SECURITY_INFO *pSecInfo); + + // Unmap memory with default security attributes and local pointers (no NvP64) + NV_STATUS (*UnmapFromCpu)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, void *pLinearAddress, + NvU32 flags, NvU32 ProcessId); + + // Unmap memory + NV_STATUS (*UnmapFromCpuWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvP64 pLinearAddress, NvU32 flags, NvU32 ProcessId, API_SECURITY_INFO *pSecInfo); + + // Map dma memory with default security attributes. Provides RM internal implementation for NvRmMapMemoryDma(). + NV_STATUS (*Map)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU64 offset, NvU64 length, NvU32 flags, NvU64 *pDmaOffset); + + // Map dma memory. Provides RM internal implementation for NvRmMapMemoryDma(). + NV_STATUS (*MapWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU64 offset, NvU64 length, NvU32 flags, NvU64 *pDmaOffset, API_SECURITY_INFO *pSecInfo); + + // Unmap dma memory with default security attributes + NV_STATUS (*Unmap)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU32 flags, NvU64 dmaOffset); + + // Unmap dma memory + NV_STATUS (*UnmapWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU32 flags, NvU64 dmaOffset, API_SECURITY_INFO *pSecInfo); + + API_SECURITY_INFO defaultSecInfo; + NvBool bHasDefaultSecInfo; + NvBool bTlsInternal; + NvBool bApiLockInternal; + NvBool bRmSemaInternal; + NvBool bGpuLockInternal; + void *pPrivateContext; +}; + +// Called before any RM resource is freed +NV_STATUS rmapiFreeResourcePrologue(RS_RES_FREE_PARAMS_INTERNAL *pRmFreeParams); + +// Mark for deletion the client resources given a GPU mask +void rmapiSetDelPendingClientResourcesFromGpuMask(NvU32 gpuMask); + +// Delete the marked client resources +void rmapiDelPendingClients(void); +void rmapiDelPendingDevices(NvU32 gpuMask); +void rmapiReportLeakedDevices(NvU32 gpuMask); + +// +// Given a value, retrieves an array of client handles corresponding to clients +// with matching pOSInfo fields. The array is allocated dynamically, and is +// expected to be freed by the caller. +// +NV_STATUS rmapiGetClientHandlesFromOSInfo(void*, NvHandle**, NvU32*); + +// +// Base mapping routines for use by RsResource subclasses +// +NV_STATUS rmapiMapGpuCommon(RsResource *, CALL_CONTEXT *, RsCpuMapping *, OBJGPU *, NvU32, NvU32); +NV_STATUS rmapiValidateKernelMapping(RS_PRIV_LEVEL privLevel, NvU32 flags, NvBool *pbKernel); +NV_STATUS rmapiGetEffectiveAddrSpace(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags, NV_ADDRESS_SPACE *pAddrSpace); + +/** + * Deprecated RM API interfaces. Use RM_API instead. + */ +NV_STATUS RmUnmapMemoryDma(NvHandle, NvHandle, NvHandle, NvHandle, MEMORY_DESCRIPTOR*, NvU32, NvU64); +NV_STATUS RmConfigGetEx (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvBool); +NV_STATUS RmConfigSetEx (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvBool); + +/** + * Control cache API. + * Every function except rmapiControlCacheInit and rmapiControlCacheFree is thread safe. + */ +void rmapiControlCacheInit(void); +NvBool rmapiControlIsCacheable(NvU32 flags, NvBool isGSPClient); +NvBool rmapiCmdIsCacheable(NvU32 cmd, NvBool isGSPClient); +const void* rmapiControlCacheGet(NvHandle hClient, NvHandle hObject, NvU32 cmd); +NV_STATUS rmapiControlCacheSet(NvHandle hClient, NvHandle hObject, NvU32 cmd, + const void* params, NvU32 paramsSize); +void rmapiControlCacheFree(void); +void rmapiControlCacheFreeClient(NvHandle hClient); +void rmapiControlCacheFreeObject(NvHandle hClient, NvHandle hObject); + +typedef struct _RM_API_CONTEXT { + NvU32 gpuMask; +} RM_API_CONTEXT; + +// +// Handler to do stuff that is required before invoking a RM API +// +NV_STATUS +rmapiPrologue +( + RM_API *pRmApi, + RM_API_CONTEXT *pContext +); + +// +// Handler to do stuff that is required after invoking a RM API +// +void +rmapiEpilogue +( + RM_API *pRmApi, + RM_API_CONTEXT *pContext +); + +void +rmapiInitLockInfo +( + RM_API *pRmApi, + NvHandle hClient, + RS_LOCK_INFO *pLockInfo +); + +// +// RM locking modules: 24-bit group bitmask, 8-bit subgroup id +// +// Lock acquires are tagged with a RM_LOCK_MODULE_* in order to partition +// the acquires into groups, which allows read-only locks to be +// enabled / disabled on a per-group basis (via apiLockMask and gpuLockMask +// in OBJSYS.) +// +// The groups are further partitioned into subgroups, which +// are used for lock profiling data collection. +// +#define RM_LOCK_MODULE_VAL(grp, subgrp) ((((grp) & 0xffffff) << 8) | ((subgrp) & 0xff)) +#define RM_LOCK_MODULE_GRP(val) (((val) >> 8) & 0xffffff) +// Grp SubGrp +#define RM_LOCK_MODULES_NONE RM_LOCK_MODULE_VAL(0x000000, 0x00) + +#define RM_LOCK_MODULES_WORKITEM RM_LOCK_MODULE_VAL(0x000001, 0x00) + +#define RM_LOCK_MODULES_CLIENT RM_LOCK_MODULE_VAL(0x000002, 0x00) + +#define RM_LOCK_MODULES_GPU_OPS RM_LOCK_MODULE_VAL(0x000004, 0x00) + +#define RM_LOCK_MODULES_OSAPI RM_LOCK_MODULE_VAL(0x000010, 0x00) +#define RM_LOCK_MODULES_STATE_CONFIG RM_LOCK_MODULE_VAL(0x000010, 0x01) +#define RM_LOCK_MODULES_EVENT RM_LOCK_MODULE_VAL(0x000010, 0x02) +#define RM_LOCK_MODULES_VBIOS RM_LOCK_MODULE_VAL(0x000010, 0x03) + +#define RM_LOCK_MODULES_MEM RM_LOCK_MODULE_VAL(0x000020, 0x00) +#define RM_LOCK_MODULES_MEM_FLA RM_LOCK_MODULE_VAL(0x000020, 0x01) +#define RM_LOCK_MODULES_MEM_PMA RM_LOCK_MODULE_VAL(0x000020, 0x02) + +#define RM_LOCK_MODULES_POWER RM_LOCK_MODULE_VAL(0x000040, 0x00) +#define RM_LOCK_MODULES_ACPI RM_LOCK_MODULE_VAL(0x000040, 0x01) +#define RM_LOCK_MODULES_DYN_POWER RM_LOCK_MODULE_VAL(0x000040, 0x02) + +#define RM_LOCK_MODULES_HYPERVISOR RM_LOCK_MODULE_VAL(0x000080, 0x00) +#define RM_LOCK_MODULES_VGPU RM_LOCK_MODULE_VAL(0x000080, 0x01) +#define RM_LOCK_MODULES_RPC RM_LOCK_MODULE_VAL(0x000080, 0x02) + +#define RM_LOCK_MODULES_DIAG RM_LOCK_MODULE_VAL(0x000100, 0x00) +#define RM_LOCK_MODULES_RC RM_LOCK_MODULE_VAL(0x000100, 0x01) + +#define RM_LOCK_MODULES_SLI RM_LOCK_MODULE_VAL(0x000200, 0x00) +#define RM_LOCK_MODULES_P2P RM_LOCK_MODULE_VAL(0x000200, 0x01) +#define RM_LOCK_MODULES_NVLINK RM_LOCK_MODULE_VAL(0x000200, 0x02) + +#define RM_LOCK_MODULES_HOTPLUG RM_LOCK_MODULE_VAL(0x000400, 0x00) +#define RM_LOCK_MODULES_DISP RM_LOCK_MODULE_VAL(0x000400, 0x01) +#define RM_LOCK_MODULES_KERNEL_RM_EVENTS RM_LOCK_MODULE_VAL(0x000400, 0x02) + +#define RM_LOCK_MODULES_GPU RM_LOCK_MODULE_VAL(0x000800, 0x00) +#define RM_LOCK_MODULES_GR RM_LOCK_MODULE_VAL(0x000800, 0x01) +#define RM_LOCK_MODULES_FB RM_LOCK_MODULE_VAL(0x000800, 0x02) +#define RM_LOCK_MODULES_FIFO RM_LOCK_MODULE_VAL(0x000800, 0x03) +#define RM_LOCK_MODULES_TMR RM_LOCK_MODULE_VAL(0x000800, 0x04) + +#define RM_LOCK_MODULES_I2C RM_LOCK_MODULE_VAL(0x001000, 0x00) +#define RM_LOCK_MODULES_GPS RM_LOCK_MODULE_VAL(0x001000, 0x01) +#define RM_LOCK_MODULES_SEC2 RM_LOCK_MODULE_VAL(0x001000, 0x02) +#define RM_LOCK_MODULES_THERM RM_LOCK_MODULE_VAL(0x001000, 0x03) +#define RM_LOCK_MODULES_INFOROM RM_LOCK_MODULE_VAL(0x001000, 0x04) + +#define RM_LOCK_MODULES_ISR RM_LOCK_MODULE_VAL(0x002000, 0x00) +#define RM_LOCK_MODULES_DPC RM_LOCK_MODULE_VAL(0x002000, 0x01) + +#define RM_LOCK_MODULES_INIT RM_LOCK_MODULE_VAL(0x004000, 0x00) +#define RM_LOCK_MODULES_STATE_LOAD RM_LOCK_MODULE_VAL(0x004000, 0x01) + +#define RM_LOCK_MODULES_STATE_UNLOAD RM_LOCK_MODULE_VAL(0x008000, 0x00) +#define RM_LOCK_MODULES_DESTROY RM_LOCK_MODULE_VAL(0x008000, 0x01) + +// +// ResServ lock flag translation +// +#define RM_LOCK_FLAGS_NONE 0 +#define RM_LOCK_FLAGS_NO_API_LOCK RS_LOCK_FLAGS_NO_TOP_LOCK +#define RM_LOCK_FLAGS_NO_CLIENT_LOCK RS_LOCK_FLAGS_NO_CLIENT_LOCK +#define RM_LOCK_FLAGS_NO_GPUS_LOCK RS_LOCK_FLAGS_NO_CUSTOM_LOCK_1 +#define RM_LOCK_FLAGS_GPU_GROUP_LOCK RS_LOCK_FLAGS_NO_CUSTOM_LOCK_2 +#define RM_LOCK_FLAGS_RM_SEMA RS_LOCK_FLAGS_NO_CUSTOM_LOCK_3 + +// +// ResServ lock state translation +// +#define RM_LOCK_STATES_NONE 0 +#define RM_LOCK_STATES_API_LOCK_ACQUIRED RS_LOCK_STATE_TOP_LOCK_ACQUIRED +#define RM_LOCK_STATES_GPUS_LOCK_ACQUIRED RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED +#define RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED RS_LOCK_STATE_CUSTOM_LOCK_2_ACQUIRED +#define RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS RS_LOCK_STATE_ALLOW_RECURSIVE_RES_LOCK +#define RM_LOCK_STATES_CLIENT_LOCK_ACQUIRED RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED +#define RM_LOCK_STATES_RM_SEMA_ACQUIRED RS_LOCK_STATE_CUSTOM_LOCK_3_ACQUIRED + +// +// ResServ lock release translation +// +#define RM_LOCK_RELEASE_API_LOCK RS_LOCK_RELEASE_TOP_LOCK +#define RM_LOCK_RELEASE_CLIENT_LOCK RS_LOCK_RELEASE_CLIENT_LOCK +#define RM_LOCK_RELEASE_GPUS_LOCK RS_LOCK_RELEASE_CUSTOM_LOCK_1 +#define RM_LOCK_RELEASE_GPU_GROUP_LOCK RS_LOCK_RELEASE_CUSTOM_LOCK_2 +#define RM_LOCK_RELEASE_RM_SEMA RS_LOCK_RELEASE_CUSTOM_LOCK_3 + +#endif // _RMAPI_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi_utils.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi_utils.h new file mode 100644 index 0000000..2bb6df4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi_utils.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef RMAPI_UTILS_H +#define RMAPI_UTILS_H + +#include "rmapi/rmapi.h" + +// +// Alloc a client, device and subdevice handle for a gpu +// +NV_STATUS +rmapiutilAllocClientAndDeviceHandles +( + RM_API *pRmApi, + OBJGPU *pGpu, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubDevice +); + +// +// Free client, device and subdevice handles +// +void +rmapiutilFreeClientAndDeviceHandles +( + RM_API *pRmApi, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubDevice +); + +// +// Return NV_TRUE if the given external class ID is an INTERNAL_ONLY class +// +NvBool rmapiutilIsExternalClassIdInternalOnly(NvU32 externalClassId); + +#endif /* RMAPI_UTILS_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rs_utils.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rs_utils.h new file mode 100644 index 0000000..203d11b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rs_utils.h @@ -0,0 +1,188 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RS_UTILS_H_ +#define _RS_UTILS_H_ + +/** + * @defgroup RsUtilities + * + * Provides convenience utilities for resserv. Utility functions provide + * abstractions that take handles as inputs -- helpful for legacy code that + * passes hClient or hResource handles and not underlying objects. Desire + * is for pClient and RsResourceRef types to be used for new code instead of + * passing handles around and this utility module phased out. + * + * @{ + */ + +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" + +#include "rmapi/client.h" + +#include "containers/list.h" + +MAKE_LIST(ClientHandlesList, NvHandle); + +#define serverutilGetDerived(pRmClient, hResource, ppBaseRes, type) \ + (clientGetResource(staticCast((pRmClient), RsClient), \ + (hResource), \ + classId(type), \ + (ppBaseRes)) != NV_OK) \ + ? NULL \ + : dynamicCast(*(ppBaseRes), type) + +/** + * Get the reference to a resource + * @param[in] hClient Client handle + * @param[in] hResource The resource to lookup + * @param[out] ppResourceRef The reference to the resource + */ +NV_STATUS serverutilGetResourceRef(NvHandle hClient, NvHandle hObject, + RsResourceRef **ppResourceRef); + +/** + * Get the reference to a resource (with a type check) + * @param[in] hClient Client handle + * @param[in] hResource The resource to lookup + * @param[out] ppResourceRef The reference to the resource + */ +NV_STATUS serverutilGetResourceRefWithType(NvHandle hClient, NvHandle hObject, + NvU32 internalClassId, RsResourceRef **ppResourceRef); + +/** + * Get the reference to a resource (with a type and parent check) + * @param[in] hClient Client handle + * @param[in] hResource The resource to lookup + * @param[out] ppResourceRef The reference to the resource + */ +NV_STATUS serverutilGetResourceRefWithParent(NvHandle hClient, NvHandle hParent, NvHandle hObject, + NvU32 internalClassId, RsResourceRef **ppResourceRef); + +/** + * Find the first child object of given type + */ +RsResourceRef *serverutilFindChildRefByType(NvHandle hClient, NvHandle hParent, NvU32 internalClassId, NvBool bExactMatch); + + +/** + * Get an iterator to the elements in the client's resource map + * + * See clientRefIter for documentation on hScopedObject and iterType + */ +RS_ITERATOR serverutilRefIter(NvHandle hClient, NvHandle hScopedObject, NvU32 internalClassId, RS_ITER_TYPE iterType, NvBool bExactMatch); + +/** + * Get an iterator to the elements in the server's shared object map + */ +RS_SHARE_ITERATOR serverutilShareIter(NvU32 internalClassId); + +/** + * Get an iterator to the elements in the server's shared object map + */ +NvBool serverutilShareIterNext(RS_SHARE_ITERATOR* pIt); + +/** + * Validate that a given resource handle is well-formed and does not already + * exist under a given client. + */ +NvBool serverutilValidateNewResourceHandle(NvHandle, NvHandle); + +/** + * Generate an unused handle for a resource. The handle will be generated in the white-listed range that was + * specified when the client was allocated. + */ +NV_STATUS serverutilGenResourceHandle(NvHandle, NvHandle*); + +/** + * Get a client pointer from a client handle without taking any locks. + * + * @param[in] hClient The client to acquire + * @param[out] ppClient Pointer to the RmClient + */ +NV_STATUS serverutilGetClientUnderLock(NvHandle hClient, RmClient **ppClient); + +/** + * Get a client pointer from a client handle and lock it. + * + * @param[in] hClient The client to acquire + * @param[in] access LOCK_ACCESS_* + * @param[out] ppClient Pointer to the RmClient + */ +NV_STATUS serverutilAcquireClient(NvHandle hClient, LOCK_ACCESS_TYPE access, RmClient **ppClient); + +/** + * Unlock a client + * + * @param[in] access LOCK_ACCESS_* + * @param[in] pClient Pointer to the RmClient + */ +void serverutilReleaseClient(LOCK_ACCESS_TYPE access, RmClient *pClient); + +/** + * Get the first valid client pointer in resource server without taking any locks. + */ +RmClient **serverutilGetFirstClientUnderLock(void); + +/** + * Get the next valid client pointer in resource server without taking any locks. + * + * @param[in] ppClient Pointer returned by a previous call to + * serverutilGetFirstClientUnderLock or + * serverutilGetNextClientUnderLock + */ +RmClient **serverutilGetNextClientUnderLock(RmClient **pClient); + +/*! + * @brief Retrieve all hClients allocated for the given (ProcID, SubProcessID) + * + * This function iterates through all the clients in the resource server and finds + * hClients allocated for the given (ProcID, SubProcessID) and returns them to + * the caller. + * + * @param[in] procID Process ID + * @param[in] subProcessID SubProcess ID + * @param[out] pClientList List in which the client handles are returned + * + * @return NV_STATUS + */ +NV_STATUS serverutilGetClientHandlesFromPid(NvU32 procID, NvU32 subProcessID, ClientHandlesList *pClientList); + +/** + * This is a filtering function intended to be used with refFindCpuMappingWithFilter. + * This filter will only match user mappings belonging to the current process. + * + * @param[in] ppMapping The mapping that is being filtered + */ +NvBool serverutilMappingFilterCurrentUserProc(RsCpuMapping *ppMapping); + +/** + * This is a filtering function intended to be used with refFindCpuMappingWithFilter. + * This filter will only match kernel mappings. + * + * @param[in] ppMapping The mapping that is being filtered + */ +NvBool serverutilMappingFilterKernel(RsCpuMapping *ppMapping); + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h new file mode 100644 index 0000000..2a07abc --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h @@ -0,0 +1,3 @@ + +#include "g_hypervisor_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/base_utils.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/base_utils.h new file mode 100644 index 0000000..27ab7b1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/base_utils.h @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef BASE_UTILS_H +#define BASE_UTILS_H + +#include "nvtypes.h" + +/*! + * @file + * @brief Various helper utility functions that have no other home. + */ + +NvU32 nvLogBase2(NvU64); + +// bit field helper functions +NvU32 nvBitFieldLSZero(NvU32 *, NvU32); +NvU32 nvBitFieldMSZero(NvU32 *, NvU32); +NvBool nvBitFieldTest(NvU32 *, NvU32, NvU32); +void nvBitFieldSet(NvU32 *, NvU32, NvU32, NvBool); + +// +// Sort an array of n elements/structures. +// Example: +// NvBool integerLess(void * a, void * b) +// { +// return *(int *)a < *(int *)b; +// } +// int array[1000]; +// ... +// nvMergeSort(array, sizeof(array)/sizeof(*array), sizeof(*array), integerLess); +// +void nvMergeSort(void * array, NvU32 n, void * tempBuffer, NvU32 elementSize, NvBool (*less)(void *, void *)); + +// +#define BASE10 (10) +#define BASE16 (16) + +// Do not conflict with libc naming +NvS32 nvStrToL(NvU8* pStr, NvU8** pEndStr, NvS32 base, NvU8 stopChar, NvU32 *numFound); + +// +// Returns bit mask of most significant bit of input +// +NvU64 nvMsb64(NvU64); + +// +// Converts unsigned long int to string +// +char * nvU32ToStr(NvU32 value, char *string, NvU32 radix); + +// +// Find the string length +// +NvU32 nvStringLen(const char * str); + +#endif // BASE_UTILS_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/protobuf/prb.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/protobuf/prb.h new file mode 100644 index 0000000..1cdba45 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/protobuf/prb.h @@ -0,0 +1,299 @@ +/* + * Lightweight protocol buffers. + * + * Based on code taken from + * https://code.google.com/archive/p/lwpb/source/default/source + * + * The code there is licensed as Apache 2.0. However, NVIDIA has received the + * code from the original author under MIT license terms. + * + * + * Copyright 2009 Simon Kallweit + * Copyright 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __PRB_H__ +#define __PRB_H__ + +// Make sure the generated files can see rmconfig.h + +#ifndef _RMCFG_H +#include "rmconfig.h" +#endif + +// Maximum depth of message embedding +#ifndef PRB_MAX_DEPTH +#define PRB_MAX_DEPTH 8 +#endif + +// Maximum number of required fields in a message +#ifndef PRB_MAX_REQUIRED_FIELDS +#define PRB_MAX_REQUIRED_FIELDS 16 +#endif + +// Provide enum names as strings +#ifndef PRB_ENUM_NAMES +#define PRB_ENUM_NAMES 0 +#endif + +#if PRB_ENUM_NAMES +#define PRB_MAYBE_ENUM_NAME(n) n, +#else +#define PRB_MAYBE_ENUM_NAME(n) +#endif + +// Provide field names as strings +#ifndef PRB_FIELD_NAMES +#define PRB_FIELD_NAMES 0 +#endif + +#if PRB_FIELD_NAMES +#define PRB_MAYBE_FIELD_NAME(n) n, +#else +#define PRB_MAYBE_FIELD_NAME(n) +#endif + +// Provide field default values +#ifndef PRB_FIELD_DEFAULTS +#define PRB_FIELD_DEFAULTS 0 +#endif + +#if PRB_FIELD_DEFAULTS +#define PRB_MAYBE_FIELD_DEFAULT_DEF(n) n +#define PRB_MAYBE_FIELD_DEFAULT(n) n, +#else +#define PRB_MAYBE_FIELD_DEFAULT_DEF(n) +#define PRB_MAYBE_FIELD_DEFAULT(n) +#endif + +// Provide message names as strings +#ifndef PRB_MESSAGE_NAMES +#define PRB_MESSAGE_NAMES 0 +#endif + +#if PRB_MESSAGE_NAMES +#define PRB_MAYBE_MESSAGE_NAME(n) n, +#else +#define PRB_MAYBE_MESSAGE_NAME(n) +#endif + +// Provide method names as strings +#ifndef PRB_METHOD_NAMES +#define PRB_METHOD_NAMES 0 +#endif + +#if PRB_METHOD_NAMES +#define PRB_MAYBE_METHOD_NAME(n) n, +#else +#define PRB_MAYBE_MESSAGE_NAME(n) +#endif + +// Provide service names as strings +#ifndef PRB_SERVICE_NAMES +#define PRB_SERVICE_NAMES 0 +#endif + +#if PRB_SERVICE_NAMES +#define PRB_MAYBE_SERVICE_NAME(n) n, +#else +#define PRB_MAYBE_SERVICE_NAME(n) +#endif + +// Field labels +#define PRB_REQUIRED 0 +#define PRB_OPTIONAL 1 +#define PRB_REPEATED 2 + +// Field value types +#define PRB_DOUBLE 0 +#define PRB_FLOAT 1 +#define PRB_INT32 2 +#define PRB_INT64 3 +#define PRB_UINT32 4 +#define PRB_UINT64 5 +#define PRB_SINT32 6 +#define PRB_SINT64 7 +#define PRB_FIXED32 8 +#define PRB_FIXED64 9 +#define PRB_SFIXED32 10 +#define PRB_SFIXED64 11 +#define PRB_BOOL 12 +#define PRB_ENUM 13 +#define PRB_STRING 14 +#define PRB_BYTES 15 +#define PRB_MESSAGE 16 + +// Field flags +#define PRB_HAS_DEFAULT (1 << 0) +#define PRB_IS_PACKED (1 << 1) +#define PRB_IS_DEPRECATED (1 << 2) + +typedef struct +{ + unsigned int label : 2; + unsigned int typ : 6; + unsigned int flags : 8; +} PRB_FIELD_OPTS; + +// Protocol buffer wire types +typedef enum +{ + WT_VARINT = 0, + WT_64BIT = 1, + WT_STRING = 2, + WT_32BIT = 5 +} WIRE_TYPE; + +// Protocol buffer wire values +typedef union +{ + NvU64 varint; + NvU64 int64; + struct { + NvU64 len; + const void *data; + } string; + NvU32 int32; +} WIRE_VALUE; + +typedef struct +{ + const char *str; + NvU32 len; +} PRB_VALUE_STRING; + +typedef struct +{ + NvU8 *data; + NvU32 len; +} PRB_VALUE_BYTES; + +typedef struct +{ + void *data; + NvU32 len; +} PRB_VALUE_MESSAGE; + +typedef union +{ + NvF64 double_; + NvF32 float_; + NvS32 int32; + NvS64 int64; + NvU32 uint32; + NvU64 uint64; + NvBool bool_; + PRB_VALUE_STRING string; + PRB_VALUE_BYTES bytes; + PRB_VALUE_MESSAGE message; + int enum_; + int null; +} PRB_VALUE; + +typedef struct +{ + int value; +#if PRB_ENUM_NAMES + const char *name; +#endif +} PRB_ENUM_MAPPING; + +typedef struct +{ + const PRB_ENUM_MAPPING *mappings; + NvU32 count; +#if PRB_ENUM_NAMES + const char *name; +#endif +} PRB_ENUM_DESC; + +struct PRB_MSG_DESC; + +//* Protocol buffer field descriptor +typedef struct PRB_FIELD_DESC +{ + NvU32 number; + PRB_FIELD_OPTS opts; + const struct PRB_MSG_DESC *msg_desc; + const PRB_ENUM_DESC *enum_desc; +#if PRB_FIELD_NAMES + const char *name; +#endif +#if PRB_FIELD_DEFAULTS + const PRB_VALUE *def; +#endif +} PRB_FIELD_DESC; + +//* Protocol buffer message descriptor +typedef struct PRB_MSG_DESC +{ + NvU32 num_fields; + const PRB_FIELD_DESC *fields; +#if PRB_MESSAGE_NAMES + const char *name; +#endif +} PRB_MSG_DESC; + +// Forward declaration +struct PRB_SERVICE_DESC; + +// Protocol buffer method descriptor +struct PRB_METHOD_DESC +{ + const struct PRB_SERVICE_DESC *service; + const PRB_MSG_DESC *req_desc; + const PRB_MSG_DESC *res_desc; +#if PRB_METHOD_NAMES + const char *name; +#endif +}; + +// Protocol buffer service descriptor +typedef struct PRB_SERVICE_DESC +{ + const NvU32 num_methods; + const struct PRB_METHOD_DESC *methods; +#if PRB_SERVICE_NAMES + const char *name; +#endif +} PRB_SERVICE_DESC; + +// Simple memory buffer +typedef struct +{ + NvU8 *base; + NvU8 *pos; + NvU8 *end; +} PRB_BUF; + +// Encoder interface +typedef struct +{ + PRB_BUF buf; + const PRB_FIELD_DESC *field_desc; + const PRB_MSG_DESC *msg_desc; +} PRB_ENCODER_STACK_FRAME; + +typedef NV_STATUS PrbBufferCallback(void *pEncoder, NvBool bBufferFull); + +typedef NvU32 PRB_ENCODER; + +#endif // __PRB_H__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/ref_count.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/ref_count.h new file mode 100644 index 0000000..c530d0e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/ref_count.h @@ -0,0 +1,3 @@ + +#include "g_ref_count_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/zlib/inflate.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/zlib/inflate.h new file mode 100644 index 0000000..51f8b64 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/zlib/inflate.h @@ -0,0 +1,134 @@ +/* + Portions of this file are based on zlib. Subsequent additions by NVIDIA. + + Copyright (c) 2001-2021, NVIDIA CORPORATION. All rights reserved. + + zlib.h -- interface of the 'zlib' general purpose compression library + version 1.1.3, July 9th, 1998 + + Copyright (C) 1995-1998 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + The data format used by the zlib library is described by RFCs (Request for + Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt + (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). +*/ +#ifndef _INFLATE_H_ +#define _INFLATE_H_ + +#include "nvos.h" + +#define NOMEMCPY 1 + +typedef NvU8 uch; +typedef NvU16 ush; +typedef NvU32 ulg; + +#define GZ_SLIDE_WINDOW_SIZE 32768 + +#define NEXTBYTE() pGzState->inbuf[pGzState->inptr++] +#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<>=(n);k-=(n);} + +/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */ +#define BMAX 16 /* maximum bit length of any code (16 for explode) */ +#define N_MAX 288 /* maximum number of codes in any set */ + +/* Huffman code lookup table entry--this entry is four bytes for machines + that have 16-bit pointers (e.g. PC's in the small or medium model). + Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16 + means that v is a literal, 16 < e < 32 means that v is a pointer to + the next table, which codes e - 16 bits, and lastly e == 99 indicates + an unused code. If a code with e == 99 is looked up, this implies an + error in the data. */ +struct huft { + uch e; /* number of extra bits or operation */ + uch b; /* number of bits in this code or subcode */ + union { + ush n; /* literal, length base, or distance base */ + struct huft *t; /* pointer to next level of table */ + } v; +}; + +/* The inflate algorithm uses a sliding 32K byte window on the uncompressed + stream to find repeated byte strings. This is implemented here as a + circular buffer. The index is updated simply by incrementing and then + and'ing with 0x7fff (32K-1). */ +/* It is left to other modules to supply the 32K area. It is assumed + to be usable as if it were declared "uch slide[32768];" or as just + "uch *slide;" and then malloc'ed in the latter case. The definition + must be in unzip.h, included above. */ +/* unsigned pGzState->wp; current position in slide */ +#define WSIZE GZ_SLIDE_WINDOW_SIZE +#define flush_output(w) (pGzState->wp=(w),flush_window(pGzState)) +#define Tracecv(A,B) +#define Tracevv(X) + +#define GZ_STATE_ITERATOR_OK 0 +#define GZ_STATE_ITERATOR_ERROR 1 +#define GZ_STATE_ITERATOR_END 2 + +#define GZ_STATE_HUFT_OK 0 +#define GZ_STATE_HUFT_INCOMP 1 +#define GZ_STATE_HUFT_ERROR 2 + +typedef struct { + unsigned int e; /* table entry flag/number of extra bits */ + unsigned int n, d; /* length and index for copy */ + unsigned int w; /* current window position */ + struct huft *t; /* pointer to table entry */ + ulg b; /* bit buffer */ + unsigned int k; /* number of bits in bit buffer */ + int continue_copy; /* last flush not finished*/ + unsigned int sn; /* used by inflated type 0 (stored) block */ +} GZ_INFLATE_CODES_STATE, *PGZ_INFLATE_CODES_STATE; + +typedef struct { + struct huft *tl; /* literal/length code table */ + struct huft *td; /* distance code table */ + NvS32 bl; /* lookup bits for tl */ + NvS32 bd; /* lookup bits for td */ + + NvU8 *inbuf,*outbuf; + NvU32 outBufSize; + NvU32 inptr,outptr; + NvU32 outLower,outUpper; + unsigned int wp; + unsigned int wp1; /* wp1 is index of first unflushed byte in slide window */ + unsigned int wp2; /* wp2 is index of last unflushed byte in slide window */ + uch *window; + + ulg bb; /* bit buffer */ + unsigned int bk; /* bits in bit buffer */ + int e; /* last block flag */ + + int newblock; /* start a new decompression block */ + NvU32 optSize; + GZ_INFLATE_CODES_STATE codesState; + +} GZ_INFLATE_STATE, *PGZ_INFLATE_STATE; + +NV_STATUS utilGzIterator(PGZ_INFLATE_STATE pGzState); +NV_STATUS utilGzAllocate(const NvU8 *zArray, NvU32 numTotalBytes, PGZ_INFLATE_STATE *ppGzState); +NvU32 utilGzGetData(PGZ_INFLATE_STATE pGzState, NvU32 offset, NvU32 size, NvU8 * outBuffer); +NV_STATUS utilGzDestroy(PGZ_INFLATE_STATE pGzState); + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/btree.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/btree.h new file mode 100644 index 0000000..a463e06 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/btree.h @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _BTREE_H_ +#define _BTREE_H_ + +/*********************** Balanced Tree data structure **********************\ +* * +* Module: BTREE.H * +* API to BTREE routines. * +* * +\***************************************************************************/ + +// +// RED BLACK TREE structure. +// +#include "nvtypes.h" +#include "nvstatus.h" + +typedef struct NODE +{ + // public: + void *Data; + NvU64 keyStart; + NvU64 keyEnd; + + // private: + NvBool isRed; // !IsRed == IsBlack + struct NODE *parent; // tree links + struct NODE *left; + struct NODE *right; + +} NODE, *PNODE; + +//--------------------------------------------------------------------------- +// +// Function prototypes. +// +//--------------------------------------------------------------------------- + +NV_STATUS btreeInsert(PNODE, PNODE *); +NV_STATUS btreeUnlink(PNODE, PNODE *); +NV_STATUS btreeSearch(NvU64, PNODE *, PNODE); +NV_STATUS btreeEnumStart(NvU64, PNODE *, PNODE); +NV_STATUS btreeEnumNext(PNODE *, PNODE); +NV_STATUS btreeDestroyData(PNODE); +NV_STATUS btreeDestroyNodes(PNODE); + +#endif // _BTREE_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/eheap_old.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/eheap_old.h new file mode 100644 index 0000000..6c19e8b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/eheap_old.h @@ -0,0 +1,116 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _EHEAP_H_ +#define _EHEAP_H_ + +/*! + * @brief + * EHEAP is an extent allocator. It is just an abstract E(xtent)Heap. + */ + +#include "nvtypes.h" +#include "nvos.h" +#include "containers/btree.h" +#include "utils/nvrange.h" + +typedef struct OBJEHEAP *POBJEHEAP; +typedef struct OBJEHEAP OBJEHEAP; + +typedef struct EMEMBLOCK *PEMEMBLOCK; +typedef struct EMEMBLOCK +{ + NvU64 begin; + NvU64 end; + NvU64 align; + NvU32 growFlags; + NvU32 refCount; + NvU32 owner; + NODE node; + PEMEMBLOCK prevFree; + PEMEMBLOCK nextFree; + PEMEMBLOCK prev; + PEMEMBLOCK next; + void *pData; +} EMEMBLOCK; + +typedef NvBool EHeapOwnershipComparator(void*, void*); + +typedef NV_STATUS (*EHeapDestruct)(POBJEHEAP); +typedef NV_STATUS (*EHeapAlloc)(POBJEHEAP, NvU32, NvU32 *, NvU64 *, NvU64 *, NvU64 , NvU64, PEMEMBLOCK*, void*, EHeapOwnershipComparator*); +typedef NV_STATUS (*EHeapFree)(POBJEHEAP, NvU64); +typedef void (*EHeapInfo)(POBJEHEAP, NvU64 *, NvU64 *,NvU64 *, NvU64 *, NvU32 *, NvU64 *); +typedef void (*EHeapInfoForRange)(POBJEHEAP, NV_RANGE, NvU64 *, NvU64 *, NvU32 *, NvU64 *); +typedef NV_STATUS (*EHeapGetSize)(POBJEHEAP, NvU64 *); +typedef NV_STATUS (*EHeapGetFree)(POBJEHEAP, NvU64 *); +typedef NV_STATUS (*EHeapGetBase)(POBJEHEAP, NvU64 *); +typedef PEMEMBLOCK (*EHeapGetBlock)(POBJEHEAP, NvU64, NvBool bReturnFreeBlock); +typedef NV_STATUS (*EHeapSetAllocRange)(POBJEHEAP, NvU64 rangeLo, NvU64 rangeHi); +typedef NV_STATUS (*EHeapTraversalFn)(POBJEHEAP, void *pEnv, PEMEMBLOCK, NvU32 *pContinue, NvU32 *pInvalCursor); +typedef NV_STATUS (*EHeapTraverse)(POBJEHEAP, void *pEnv, EHeapTraversalFn, NvS32 direction); +typedef NvU32 (*EHeapGetNumBlocks)(POBJEHEAP); +typedef NV_STATUS (*EHeapGetBlockInfo)(POBJEHEAP, NvU32, NVOS32_HEAP_DUMP_BLOCK *); +typedef NV_STATUS (*EHeapSetOwnerIsolation)(POBJEHEAP, NvBool bEnable, NvU32 granularity); + +struct OBJEHEAP +{ + // Public heap interface methods + EHeapDestruct eheapDestruct; + EHeapAlloc eheapAlloc; + EHeapFree eheapFree; + EHeapInfo eheapInfo; + EHeapInfoForRange eheapInfoForRange; + EHeapGetSize eheapGetSize; + EHeapGetFree eheapGetFree; + EHeapGetBase eheapGetBase; + EHeapGetBlock eheapGetBlock; + EHeapSetAllocRange eheapSetAllocRange; + EHeapTraverse eheapTraverse; + EHeapGetNumBlocks eheapGetNumBlocks; + EHeapGetBlockInfo eheapGetBlockInfo; + EHeapSetOwnerIsolation eheapSetOwnerIsolation; + + // private data + NvU64 base; + NvU64 total; + NvU64 free; + NvU64 rangeLo; + NvU64 rangeHi; + NvBool bOwnerIsolation; + NvU32 ownerGranularity; + PEMEMBLOCK pBlockList; + PEMEMBLOCK pFreeBlockList; + NvU32 memHandle; + NvU32 numBlocks; + NvU32 sizeofMemBlock; + PNODE pBlockTree; + // user can specify num of EMEMBLOCK structs to + // be allocated at heap construction time so that + // we will not call portMemAllocNonPaged during eheapAlloc. + NvU32 numPreAllocMemStruct; + PEMEMBLOCK pFreeMemStructList; + PEMEMBLOCK pPreAllocAddr; +}; + +extern void constructObjEHeap(POBJEHEAP, NvU64, NvU64, NvU32, NvU32); + +#endif // _EHEAP_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/list.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/list.h new file mode 100644 index 0000000..1dc0ab7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/list.h @@ -0,0 +1,331 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_CONTAINERS_LIST_H_ +#define _NV_CONTAINERS_LIST_H_ + +// Contains mix of C/C++ declarations. +#include "containers/type_safety.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvmisc.h" +#include "nvport/nvport.h" + +/** + * @defgroup NV_CONTAINERS_LIST List + * + * @brief List (sequence) of user-defined values. + * + * @details Order of values is not necessarily increasing or sorted, but order is + * preserved across mutation. Please see + * http://en.wikipedia.org/wiki/Sequence for a formal definition. + * + * The provided interface is abstract, decoupling the user from the underlying + * list implementation. Two options are available with regard to memory + * management, intrusive and non-intrusive. Users can select either one based + * on different situations. Despite the two versions of the list, the following + * implementation constraints are guaranteed. + * + * - Time Complexity: + * * Operations are \b O(1), + * * Unless stated otherwise. + * + * - Memory Usage: + * * \b O(N) memory is required for N values. + * * Intrusive and non-intrusive variants are provided. + * See @ref mem-ownership for further details. + * + * - Synchronization: + * * \b None. The container is not thread-safe. + * * Locking must be handled by the user if required. + */ + +#define MAKE_LIST(listTypeName, dataType) \ + typedef union listTypeName##Iter \ + { \ + dataType *pValue; \ + ListIterBase iter; \ + } listTypeName##Iter; \ + typedef union listTypeName \ + { \ + NonIntrusiveList real; \ + CONT_TAG_TYPE(ListBase, dataType, listTypeName##Iter); \ + CONT_TAG_NON_INTRUSIVE(dataType); \ + } listTypeName + +#define DECLARE_LIST(listTypeName) \ + typedef union listTypeName##Iter listTypeName##Iter; \ + typedef union listTypeName listTypeName + +#define MAKE_INTRUSIVE_LIST(listTypeName, dataType, node) \ + typedef union listTypeName##Iter \ + { \ + dataType *pValue; \ + ListIterBase iter; \ + } listTypeName##Iter; \ + typedef union listTypeName \ + { \ + IntrusiveList real; \ + CONT_TAG_TYPE(ListBase, dataType, listTypeName##Iter); \ + CONT_TAG_INTRUSIVE(dataType, node); \ + } listTypeName \ + +#define DECLARE_INTRUSIVE_LIST(listTypeName) \ + typedef union listTypeName##Iter listTypeName##Iter; \ + typedef union listTypeName listTypeName + +/** +* @brief Internal node structure to embed within intrusive list values. +*/ +typedef struct ListNode ListNode; + +/** + * @brief Base type common to both intrusive and non-intrusive variants. + */ +typedef struct ListBase ListBase; + +/** + * @brief Non-intrusive list (container-managed memory). + */ +typedef struct NonIntrusiveList NonIntrusiveList; + +/** + * @brief Intrusive list (user-managed memory). + */ +typedef struct IntrusiveList IntrusiveList; + +/** + * @brief Iterator over a range of list values. + * + * See @ref iterators for usage details. + */ +typedef struct ListIterBase ListIterBase; + +struct ListNode +{ + /// @privatesection + ListNode *pPrev; + ListNode *pNext; +#if PORT_IS_CHECKED_BUILD + ListBase *pList; +#endif +}; + +struct ListIterBase +{ + void *pValue; + ListBase *pList; + ListNode *pNode; + ListNode *pLast; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; +#endif +}; + +ListIterBase listIterRange_IMPL(ListBase *pList, void *pFirst, void *pLast); +CONT_VTABLE_DECL(ListBase, ListIterBase); + +struct ListBase +{ + CONT_VTABLE_FIELD(ListBase); + ListNode *pHead; + ListNode *pTail; + NvU32 count; + NvS32 nodeOffset; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; +#endif +}; + +struct NonIntrusiveList +{ + ListBase base; + PORT_MEM_ALLOCATOR *pAllocator; + NvU32 valueSize; +}; + +struct IntrusiveList +{ + ListBase base; +}; + +#define listInit(pList, pAllocator) \ + listInit_IMPL(&((pList)->real), pAllocator, sizeof(*(pList)->valueSize)) + +#define listInitIntrusive(pList) \ + listInitIntrusive_IMPL(&((pList)->real), sizeof(*(pList)->nodeOffset)) + +#define listDestroy(pList) \ + CONT_DISPATCH_ON_KIND(pList, \ + listDestroy_IMPL((NonIntrusiveList*)&((pList)->real)), \ + listDestroyIntrusive_IMPL(&((pList)->real.base)), \ + contDispatchVoid_STUB()) + +#define listCount(pList) \ + listCount_IMPL(&((pList)->real).base) + +#define listInsertNew(pList, pNext) \ + CONT_CAST_ELEM(pList, \ + listInsertNew_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pNext))) + +#define listAppendNew(pList) \ + CONT_CAST_ELEM(pList, listAppendNew_IMPL(&(pList)->real)) + +#define listPrependNew(pList) \ + CONT_CAST_ELEM(pList, listPrependNew_IMPL(&(pList)->real)) + +#define listInsertValue(pList, pNext, pValue) \ + CONT_CAST_ELEM(pList, \ + listInsertValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pNext), \ + CONT_CHECK_ARG(pList, pValue))) + +#define listAppendValue(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listAppendValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue))) + +#define listPrependValue(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listPrependValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue))) + +#define listInsertExisting(pList, pNext, pValue) \ + listInsertExisting_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pNext), \ + CONT_CHECK_ARG(pList, pValue)) + +#define listAppendExisting(pList, pValue) \ + listAppendExisting_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)) + +#define listPrependExisting(pList, pValue) \ + listPrependExisting_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)) + +#define listRemove(pList, pValue) \ + CONT_DISPATCH_ON_KIND(pList, \ + listRemove_IMPL((NonIntrusiveList*)&((pList)->real), \ + CONT_CHECK_ARG(pList, pValue)), \ + listRemoveIntrusive_IMPL(&((pList)->real).base, \ + CONT_CHECK_ARG(pList, pValue)), \ + contDispatchVoid_STUB()) + +#define listRemoveFirstByValue(pList, pValue) \ + listRemoveFirstByValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)) + +#define listRemoveAllByValue(pList, pValue) \ + listRemoveAllByValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)) + +#define listClear(pList) \ + listDestroy(pList) + +#define listFindByValue(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listFindByValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue))) + +#define listHead(pList) \ + CONT_CAST_ELEM(pList, listHead_IMPL(&((pList)->real).base)) + +#define listTail(pList) \ + CONT_CAST_ELEM(pList, listTail_IMPL(&((pList)->real).base)) + +#define listNext(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listNext_IMPL(&((pList)->real).base, \ + CONT_CHECK_ARG(pList, pValue))) + +#define listPrev(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listPrev_IMPL(&((pList)->real).base, \ + CONT_CHECK_ARG(pList, pValue))) + +#define listIterAll(pList) \ + listIterRange(pList, listHead(pList), listTail(pList)) + +#define listIterRange(pList, pFirst, pLast) \ + CONT_ITER_RANGE(pList, &listIterRange_IMPL, \ + CONT_CHECK_ARG(pList, pFirst), CONT_CHECK_ARG(pList, pLast)) + +#define listIterNext(pIt) \ + listIterNext_IMPL(&((pIt)->iter)) + +void listInit_IMPL(NonIntrusiveList *pList, PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize); +void listInitIntrusive_IMPL(IntrusiveList *pList, NvS32 nodeOffset); +void listDestroy_IMPL(NonIntrusiveList *pList); +void listDestroyIntrusive_IMPL(ListBase *pList); + +NvU32 listCount_IMPL(ListBase *pList); +void *listInsertNew_IMPL(NonIntrusiveList *pList, void *pNext); +void *listAppendNew_IMPL(NonIntrusiveList *pList); +void *listPrependNew_IMPL(NonIntrusiveList *pList); +void *listInsertValue_IMPL(NonIntrusiveList *pList, void *pNext, void *pValue); +void *listAppendValue_IMPL(NonIntrusiveList *pList, void *pValue); +void *listPrependValue_IMPL(NonIntrusiveList *pList, void *pValue); +void listInsertExisting_IMPL(IntrusiveList *pList, void *pNext, void *pValue); +void listAppendExisting_IMPL(IntrusiveList *pList, void *pValue); +void listPrependExisting_IMPL(IntrusiveList *pList, void *pValue); +void listRemove_IMPL(NonIntrusiveList *pList, void *pValue); +void listRemoveIntrusive_IMPL(ListBase *pList, void *pValue); +void listRemoveFirstByValue_IMPL(NonIntrusiveList *pList, void *pValue); +void listRemoveAllByValue_IMPL(NonIntrusiveList *pList, void *pValue); + +void *listFindByValue_IMPL(NonIntrusiveList *pList, void *pValue); +void *listHead_IMPL(ListBase *pList); +void *listTail_IMPL(ListBase *pList); +void *listNext_IMPL(ListBase *pList, void *pValue); +void *listPrev_IMPL(ListBase *pList, void *pValue); + +ListIterBase listIterAll_IMPL(ListBase *pList); +ListIterBase listIterRange_IMPL(ListBase *pList, void *pFirst, void *pLast); +NvBool listIterNext_IMPL(ListIterBase *pIt); + +static NV_FORCEINLINE ListNode * +listValueToNode(ListBase *pList, void *pValue) +{ + if (NULL == pList) return NULL; + if (NULL == pValue) return NULL; + return (ListNode*)((NvU8*)pValue + pList->nodeOffset); +} + +static NV_FORCEINLINE void * +listNodeToValue(ListBase *pList, ListNode *pNode) +{ + if (NULL == pList) return NULL; + if (NULL == pNode) return NULL; + return (NvU8*)pNode - pList->nodeOffset; +} + +#ifdef __cplusplus +} +#endif + +#endif // _NV_CONTAINERS_LIST_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/map.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/map.h new file mode 100644 index 0000000..b5f20a4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/map.h @@ -0,0 +1,300 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_CONTAINERS_MAP_H_ +#define _NV_CONTAINERS_MAP_H_ + +// Contains mix of C/C++ declarations. +#include "containers/type_safety.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvmisc.h" +#include "nvport/nvport.h" +#include "utils/nvassert.h" + +/** + * @defgroup NV_CONTAINERS_MAP Map + * + * @brief Map (ordered) from 64-bit integer keys to user-defined values. + * + * @details The provided interface is abstract, decoupling the user from the + * underlying ordered map implementation. Two options are available with regard + * to memory management, intrusive and non-intrusive. Users can select either + * one based on different situations. Despite the two versions of the map, + * the following implementation constraints are guaranteed. + * + * - Time Complexity: + * * Operations are \b O(log N), + * * Unless stated otherwise, + * * Where N is the number of values in the map. + * + * - Memory Usage: + * * \b O(N) memory is required for N values. + * * Intrusive and non-intrusive variants are provided. + * See @ref mem-ownership for further details. + * + * - Synchronization: + * * \b None. The container is not thread-safe. + * * Locking must be handled by the user if required. + * + */ + +#define MAKE_MAP(mapTypeName, dataType) \ + typedef union mapTypeName##Iter \ + { \ + dataType *pValue; \ + MapIterBase iter; \ + } mapTypeName##Iter; \ + typedef union mapTypeName \ + { \ + NonIntrusiveMap real; \ + CONT_TAG_TYPE(MapBase, dataType, mapTypeName##Iter); \ + CONT_TAG_NON_INTRUSIVE(dataType); \ + } mapTypeName + +#define DECLARE_MAP(mapTypeName) \ + typedef union mapTypeName##Iter mapTypeName##Iter; \ + typedef union mapTypeName mapTypeName + +#define MAKE_INTRUSIVE_MAP(mapTypeName, dataType, node) \ + typedef union mapTypeName##Iter \ + { \ + dataType *pValue; \ + MapIterBase iter; \ + } mapTypeName##Iter; \ + typedef union mapTypeName \ + { \ + IntrusiveMap real; \ + CONT_TAG_TYPE(MapBase, dataType, mapTypeName##Iter); \ + CONT_TAG_INTRUSIVE(dataType, node); \ + } mapTypeName + +#define DECLARE_INTRUSIVE_MAP(mapTypeName) \ + typedef union mapTypeName##Iter mapTypeName##Iter; \ + typedef union mapTypeName mapTypeName + +/** + * @brief Internal node structure to embed within intrusive map values. + */ +typedef struct MapNode MapNode; + +/** + * @brief Base type common to both intrusive and non-intrusive variants. + */ +typedef struct MapBase MapBase; + +/** + * @brief Non-intrusive map (container-managed memory). + */ +typedef struct NonIntrusiveMap NonIntrusiveMap; + +/** + * @brief Intrusive map (user-managed memory). + */ +typedef struct IntrusiveMap IntrusiveMap; + +/** + * @brief Iterator over a range of map values. + * + * See @ref iterators for usage details. + */ +typedef struct MapIterBase MapIterBase; + +struct MapNode +{ + /// @privatesection + NvU64 key; + MapNode *pParent; + MapNode *pLeft; + MapNode *pRight; + NvBool bIsRed; +#if PORT_IS_CHECKED_BUILD + MapBase *pMap; +#endif +}; + +struct MapIterBase +{ + void *pValue; + MapBase *pMap; + MapNode *pNode; + MapNode *pLast; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; +#endif +}; + +MapIterBase mapIterRange_IMPL(MapBase *pMap, void *pFirst, void *pLast); +CONT_VTABLE_DECL(MapBase, MapIterBase); + +struct MapBase +{ + CONT_VTABLE_FIELD(MapBase); + MapNode *pRoot; + NvS32 nodeOffset; + NvU32 count; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; +#endif +}; + +struct NonIntrusiveMap +{ + MapBase base; + PORT_MEM_ALLOCATOR *pAllocator; + NvU32 valueSize; +}; + +struct IntrusiveMap +{ + MapBase base; +}; + +#define mapInit(pMap, pAllocator) \ + mapInit_IMPL(&((pMap)->real), pAllocator, sizeof(*(pMap)->valueSize)) + +#define mapInitIntrusive(pMap) \ + mapInitIntrusive_IMPL(&((pMap)->real), sizeof(*(pMap)->nodeOffset)) + +#define mapDestroy(pMap) \ + CONT_DISPATCH_ON_KIND(pMap, \ + mapDestroy_IMPL((NonIntrusiveMap*)&((pMap)->real)), \ + mapDestroyIntrusive_IMPL(&((pMap)->real.base)), \ + contDispatchVoid_STUB()) + +#define mapCount(pMap) \ + mapCount_IMPL(&((pMap)->real).base) + +#define mapKey(pMap, pValue) \ + mapKey_IMPL(&((pMap)->real).base, pValue) + +#define mapInsertNew(pMap, key) \ + CONT_CAST_ELEM(pMap, mapInsertNew_IMPL(&(pMap)->real, key)) + +#define mapInsertValue(pMap, key, pValue) \ + CONT_CAST_ELEM(pMap, \ + mapInsertValue_IMPL(&(pMap)->real, key, \ + CONT_CHECK_ARG(pMap, pValue))) + +#define mapInsertExisting(pMap, key, pValue) \ + mapInsertExisting_IMPL(&(pMap)->real, key, \ + CONT_CHECK_ARG(pMap, pValue)) + +#define mapRemove(pMap, pValue) \ + CONT_DISPATCH_ON_KIND(pMap, \ + mapRemove_IMPL((NonIntrusiveMap*)&((pMap)->real), \ + CONT_CHECK_ARG(pMap, pValue)), \ + mapRemoveIntrusive_IMPL(&((pMap)->real).base, \ + CONT_CHECK_ARG(pMap, pValue)), \ + contDispatchVoid_STUB()) + +#define mapClear(pMap) \ + mapDestroy(pMap) + +#define mapRemoveByKey(pMap, key) \ + CONT_DISPATCH_ON_KIND(pMap, \ + mapRemoveByKey_IMPL((NonIntrusiveMap*)&((pMap)->real), key), \ + mapRemoveByKeyIntrusive_IMPL(&((pMap)->real).base, key), \ + contDispatchVoid_STUB()) + +#define mapFind(pMap, key) \ + CONT_CAST_ELEM(pMap, mapFind_IMPL(&((pMap)->real).base, key)) + +#define mapFindGEQ(pMap, keyMin) \ + CONT_CAST_ELEM(pMap, \ + mapFindGEQ_IMPL(&((pMap)->real).base, keyMin)) + +#define mapFindLEQ(pMap, keyMax) \ + CONT_CAST_ELEM(pMap, \ + mapFindLEQ_IMPL(&((pMap)->real).base, keyMax)) + +#define mapNext(pMap, pValue) \ + CONT_CAST_ELEM(pMap, \ + mapNext_IMPL(&((pMap)->real).base, \ + CONT_CHECK_ARG(pMap, pValue))) + +#define mapPrev(pMap, pValue) \ + CONT_CAST_ELEM(pMap, \ + mapPrev_IMPL(&((pMap)->real).base, \ + CONT_CHECK_ARG(pMap, pValue))) + +#define mapIterAll(pMap) \ + mapIterRange(pMap, mapFindGEQ(pMap, 0), mapFindLEQ(pMap, NV_U64_MAX)) + +#define mapIterRange(pMap, pFirst, pLast) \ + CONT_ITER_RANGE(pMap, &mapIterRange_IMPL, \ + CONT_CHECK_ARG(pMap, pFirst), CONT_CHECK_ARG(pMap, pLast)) + +#define mapIterNext(pIt) \ + mapIterNext_IMPL(&((pIt)->iter)) + +void mapInit_IMPL(NonIntrusiveMap *pMap, + PORT_MEM_ALLOCATOR *pAllocator, NvU32 valueSize); +void mapInitIntrusive_IMPL(IntrusiveMap *pMap, NvS32 nodeOffset); +void mapDestroy_IMPL(NonIntrusiveMap *pMap); +void mapDestroyIntrusive_IMPL(MapBase *pMap); + +NvU32 mapCount_IMPL(MapBase *pMap); +NvU64 mapKey_IMPL(MapBase *pMap, void *pValue); + +void *mapInsertNew_IMPL(NonIntrusiveMap *pMap, NvU64 key); +void *mapInsertValue_IMPL(NonIntrusiveMap *pMap, NvU64 key, void *pValue); +NvBool mapInsertExisting_IMPL(IntrusiveMap *pMap, NvU64 key, void *pValue); +void mapRemove_IMPL(NonIntrusiveMap *pMap, void *pValue); +void mapRemoveIntrusive_IMPL(MapBase *pMap, void *pValue); +void mapRemoveByKey_IMPL(NonIntrusiveMap *pMap, NvU64 key); +void mapRemoveByKeyIntrusive_IMPL(MapBase *pMap, NvU64 key); + +void *mapFind_IMPL(MapBase *pMap, NvU64 key); +void *mapFindGEQ_IMPL(MapBase *pMap, NvU64 keyMin); +void *mapFindLEQ_IMPL(MapBase *pMap, NvU64 keyMax); +void *mapNext_IMPL(MapBase *pMap, void *pValue); +void *mapPrev_IMPL(MapBase *pMap, void *pValue); + +MapIterBase mapIterAll_IMPL(MapBase *pMap); +NvBool mapIterNext_IMPL(MapIterBase *pIt); + +static NV_FORCEINLINE MapNode * +mapValueToNode(MapBase *pMap, void *pValue) +{ + if (NULL == pMap) return NULL; + if (NULL == pValue) return NULL; + return (MapNode*)((NvU8*)pValue + pMap->nodeOffset); +} + +static NV_FORCEINLINE void * +mapNodeToValue(MapBase *pMap, MapNode *pNode) +{ + if (NULL == pMap) return NULL; + if (NULL == pNode) return NULL; + return (NvU8*)pNode - pMap->nodeOffset; +} + +#ifdef __cplusplus +} +#endif + +#endif // _NV_CONTAINERS_MAP_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/multimap.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/multimap.h new file mode 100644 index 0000000..7231c8b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/multimap.h @@ -0,0 +1,296 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_CONTAINERS_MULTIMAP_H_ +#define _NV_CONTAINERS_MULTIMAP_H_ + +// Contains mix of C/C++ declarations. +#include "containers/type_safety.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "containers/map.h" + +/** + * @defgroup NV_CONTAINERS_MULTIMAP Multimap + * + * @brief Two-layer multimap (ordered) from pairs of 64-bit unsigned integer + * keys to user-defined values. + * + * @details The provided interface is abstract, decoupling the user from the + * underlying ordered multimap implementation. Currently, memory management is + * limited to non-intrusive container-managed memory. The following + * implementation constraints are guaranteed. + * + * - Time Complexity: + * * Operations are \b O(log M + log N), + * * Unless stated otherwise, + * * Where M is the number of submaps and N is the total number of values in + * the map. + * + * - Memory Usage: + * * \b O(M + N) memory is required for M submaps and N values. + * * Only a non-intrusive variant is provided. + * See @ref mem-ownership for further details. + * + * - Synchronization: + * * \b None. The container is not thread-safe. + * * Locking must be handled by the user if required. + * + */ + +#define MAKE_MULTIMAP(multimapTypeName, dataType) \ + typedef struct multimapTypeName##Leaf \ + { \ + dataType data; \ + MultimapNode node; \ + } multimapTypeName##Leaf; \ + MAKE_INTRUSIVE_MAP(multimapTypeName##Submap, multimapTypeName##Leaf, \ + node.submapNode); \ + MAKE_MAP(multimapTypeName##Supermap, multimapTypeName##Submap); \ + typedef union multimapTypeName##Iter \ + { \ + dataType *pValue; \ + MultimapIterBase iter; \ + } multimapTypeName##Iter; \ + typedef union multimapTypeName \ + { \ + CONT_TAG_TYPE(MultimapBase, dataType, multimapTypeName##Iter); \ + struct { MultimapBase base; } real; \ + struct \ + { \ + /* This field simply aligns map with the one in MultimapBase */ \ + CONT_VTABLE_FIELD(MultimapBase); \ + multimapTypeName##Supermap map; \ + } type; \ + CONT_TAG_NON_INTRUSIVE(dataType); \ + struct {char _[NV_OFFSETOF(multimapTypeName##Leaf, node)];} *nodeOffset; \ + struct {char _[sizeof(multimapTypeName##Submap)];} *submapSize; \ + } multimapTypeName; + +#define DECLARE_MULTIMAP(multimapTypeName) \ + typedef struct multimapTypeName##Leaf multimapTypeName##Leaf; \ + DECLARE_INTRUSIVE_MAP(multimapTypeName##Submap); \ + DECLARE_MAP(multimapTypeName##Supermap); \ + typedef union multimapTypeName##Iter multimapTypeName##Iter; \ + typedef union multimapTypeName multimapTypeName + +/** + * @brief Internal node structure associated with multimap values. + */ +typedef struct MultimapNode MultimapNode; + +/** + * @brief Base type common to all multimap iterator types. + */ +typedef struct MultimapIterBase MultimapIterBase; + +/** + * @brief Base type common to all multimap types. + */ +typedef struct MultimapBase MultimapBase; + +struct MultimapNode +{ + void *pSubmap; + MapNode submapNode; +}; + +struct MultimapIterBase +{ + void *pValue; + MultimapBase *pMultimap; + void *pNext; + void *pLast; +}; + +CONT_VTABLE_DECL(MultimapBase, MultimapIterBase); + +struct MultimapBase +{ + CONT_VTABLE_FIELD(MultimapBase); + NonIntrusiveMap map; + NvS32 multimapNodeOffset; + NvU32 itemCount; + NvU32 itemSize; +}; + + +#define multimapInit(pMultimap, pAllocator) \ + multimapInit_IMPL(&(pMultimap)->real.base, pAllocator, \ + sizeof(*(pMultimap)->valueSize), \ + sizeof(*(pMultimap)->nodeOffset), \ + sizeof(*(pMultimap)->submapSize)) + +#define multimapDestroy(pMultimap) \ + multimapDestroy_IMPL(&(pMultimap)->real.base) + +#define multimapClear(pMultimap) \ + multimapClear_IMPL(&(pMultimap)->real.base) + +#define multimapCountSubmaps(pMultimap) \ + mapCount(&(pMultimap)->type.map) + +#define multimapCountItems(pMultimap) \ + (pMultimap)->real.base.itemCount + +#define multimapFindSubmap(pMultimap, submapKey) \ + CONT_CAST_ELEM(&(pMultimap)->type.map, \ + multimapFindSubmap_IMPL(&(pMultimap)->real.base, submapKey)) + +#define multimapFindSubmapLEQ(pMultimap, submapKey) \ + CONT_CAST_ELEM(&(pMultimap)->type.map, \ + multimapFindSubmapLEQ_IMPL(&(pMultimap)->real.base, submapKey)) + +#define multimapFindSubmapGEQ(pMultimap, submapKey) \ + CONT_CAST_ELEM(&(pMultimap)->type.map, \ + multimapFindSubmapGEQ_IMPL(&(pMultimap)->real.base, submapKey)) + +#define multimapCountSubmapItems(pMultimap, pSubmap) \ + mapCount(pSubmap) + +#define multimapInsertItemNew(pMultimap, submapKey, itemKey) \ + CONT_CAST_ELEM(pMultimap, \ + multimapInsertItemNew_IMPL(&(pMultimap)->real.base, submapKey, itemKey)) + +#define multimapInsertItemValue(pMultimap, submapKey, itemKey, pValue) \ + CONT_CAST_ELEM(pMultimap, \ + multimapInsertItemValue_IMPL(&(pMultimap)->real.base, \ + submapKey, itemKey, pValue)) + +#define multimapInsertSubmap(pMultimap, submapKey) \ + CONT_CAST_ELEM(&(pMultimap)->type.map, \ + multimapInsertSubmap_IMPL(&(pMultimap)->real.base, submapKey)) + +#define multimapFindItem(pMultimap, submapKey, itemKey) \ + CONT_CAST_ELEM(pMultimap, \ + multimapFindItem_IMPL(&(pMultimap)->real.base, submapKey, itemKey)) + +#define multimapRemoveItem(pMultimap, pValue) \ + multimapRemoveItem_IMPL(&(pMultimap)->real.base, pValue) + +#define multimapRemoveSubmap(pMultimap, pSubmap) \ + multimapRemoveSubmap_IMPL(&(pMultimap)->real.base, &(pSubmap)->real.base) + +#define multimapRemoveItemByKey(pMultimap, submapKey, itemKey) \ + multimapRemoveItemByKey_IMPL(&(pMultimap)->real.base, submapKey, itemKey) + +#define multimapNextItem(pMultimap, pValue) \ + CONT_CAST_ELEM(pMultimap, \ + multimapNextItem_IMPL(&(pMultimap)->real.base, pValue)) + +#define multimapPrevItem(pMultimap, pValue) \ + CONT_CAST_ELEM(pMultimap, \ + multimapPrevItem_IMPL(&(pMultimap)->real.base, pValue)) + +#define multimapFirstItem(pMultimap) \ + CONT_CAST_ELEM(pMultimap, multimapFirstItem_IMPL(&(pMultimap)->real.base)) + +#define multimapLastItem(pMultimap) \ + CONT_CAST_ELEM(pMultimap, multimapLastItem_IMPL(&(pMultimap)->real.base)) + +#define multimapItemIterAll(pMultimap) \ + multimapItemIterRange(pMultimap, \ + multimapFirstItem(pMultimap), multimapLastItem(pMultimap)) + +#define multimapItemIterRange(pMultimap, pFirst, pLast) \ + CONT_ITER_RANGE(pMultimap, multimapItemIterRange_IMPL, \ + CONT_CHECK_ARG(pMultimap, pFirst), CONT_CHECK_ARG(pMultimap, pLast)) + +#define multimapSubmapIterItems(pMultimap, pSubmap) \ + multimapItemIterRange(pMultimap, \ + &mapFindGEQ(pSubmap, 0)->data, &mapFindLEQ(pSubmap, NV_U64_MAX)->data) + +#define multimapItemIterNext(pIt) \ + multimapItemIterNext_IMPL(&(pIt)->iter) + +#define multimapSubmapIterAll(pMultimap) \ + mapIterAll(&(pMultimap)->type.map) + +#define multimapSubmapIterRange(pMultimap, pFirst, pLast) \ + mapIterRange(&(pMultimap)->type.map, pFirst, pLast) + +#define multimapSubmapIterNext(pIt) \ + mapIterNext(pIt) + +#define multimapItemKey(pMultimap, pValue) \ + multimapValueToNode(&(pMultimap)->real.base, pValue)->submapNode.key + +#define multimapSubmapKey(pMultimap, pSubmap) \ + mapKey(&(pMultimap)->type.map, pSubmap) + +void multimapInit_IMPL(MultimapBase *pBase, PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize, NvS32 nodeOffset, NvU32 submapSize); +void multimapRemoveSubmap_IMPL(MultimapBase *pMultimap, MapBase *submap); +void multimapDestroy_IMPL(MultimapBase *pBase); +void multimapClear_IMPL(MultimapBase *pBase); + +void *multimapInsertSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey); + +void *multimapFindSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey); +void *multimapFindSubmapLEQ_IMPL(MultimapBase *pBase, NvU64 submapKey); +void *multimapFindSubmapGEQ_IMPL(MultimapBase *pBase, NvU64 submapKey); + +void *multimapInsertItemNew_IMPL(MultimapBase *pBase, NvU64 submapKey, + NvU64 itemKey); +void *multimapInsertItemValue_IMPL(MultimapBase *pBase, NvU64 submapKey, + NvU64 itemKey, void *pValue); + +void *multimapFindItem_IMPL(MultimapBase *pBase, NvU64 submapKey, + NvU64 itemKey); + +void multimapRemoveItem_IMPL(MultimapBase *pBase, void *pLeaf); +void multimapRemoveItemByKey_IMPL(MultimapBase *pBase, NvU64 submapKey, + NvU64 itemKey); + +void *multimapNextItem_IMPL(MultimapBase *pBase, void *pValue); +void *multimapPrevItem_IMPL(MultimapBase *pBase, void *pValue); + +void *multimapFirstItem_IMPL(MultimapBase *pBase); +void *multimapLastItem_IMPL(MultimapBase *pBase); + +MultimapIterBase multimapItemIterRange_IMPL(MultimapBase *pBase, + void *pFirst, void *pLast); +NvBool multimapItemIterNext_IMPL(MultimapIterBase *pIt); + +static NV_FORCEINLINE MultimapNode * +multimapValueToNode(MultimapBase *pBase, void *pValue) +{ + if (NULL == pBase || NULL == pValue) return NULL; + + return (MultimapNode *)((NvU8*)pValue + pBase->multimapNodeOffset); +} +static NV_FORCEINLINE void * +multimapNodeToValue(MultimapBase *pBase, MultimapNode *pNode) +{ + if (NULL == pBase || NULL == pNode) return NULL; + + return (NvU8*)pNode - pBase->multimapNodeOffset; +} + +#ifdef __cplusplus +} +#endif + +#endif // _NV_CONTAINERS_MULTIMAP_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/queue.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/queue.h new file mode 100644 index 0000000..e23a303 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/queue.h @@ -0,0 +1,143 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef NV_CONTAINERS_QUEUE_H +#define NV_CONTAINERS_QUEUE_H + +#include "containers/type_safety.h" +#include "nvtypes.h" +#include "nvmisc.h" +#include "nvport/nvport.h" +#include "utils/nvassert.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAKE_QUEUE_CIRCULAR(queueTypeName, dataType) \ + typedef struct queueTypeName##Iter_UNUSED \ + { \ + NvLength dummyElem; \ + } queueTypeName##Iter_UNUSED; \ + typedef union queueTypeName \ + { \ + Queue real; \ + CONT_TAG_TYPE(Queue, dataType, queueTypeName##Iter_UNUSED); \ + CONT_TAG_NON_INTRUSIVE(dataType); \ + } queueTypeName + +#define DECLARE_QUEUE_CIRCULAR(queueTypeName) \ + typedef struct queueTypeName##Iter_UNUSED queueTypeName##Iter_UNUSED; \ + typedef union queueTypeName queueTypeName + +struct Queue; +struct QueueContext; + +typedef void QueueCopyData(NvLength msgSize, NvLength opIdx, + struct QueueContext *pCtx, void *pClientData, + NvLength count, NvBool bCopyIn); + +typedef struct QueueContext { + QueueCopyData *pCopyData; // Function performing accesses to queue memory. + void *pData; // Private data. +} QueueContext; + +typedef struct Queue { + NvLength capacity; // Queue Capacity + PORT_MEM_ALLOCATOR *pAllocator; // Set of functions used for managing queue memory + void *pData; // Queue memory, if managed by pAllocator + NvLength msgSize; // Message size produced by Producer + NvLength getIdx NV_ALIGN_BYTES(64);// GET index modified by Consumer + NvLength putIdx NV_ALIGN_BYTES(64);// PUT index modified by Producer +} Queue; + +//for future use (more possible queues - just an example, currently only CIRCULAR will get implemented) +typedef enum +{ + QUEUE_TYPE_CIRCULAR = 1, + //QUEUE_TYPE_LINEAR = 2, + //QUEUE_TYPE_PRIORITY = 3, +}QUEUE_TYPE; + +#define queueInit(pQueue, pAllocator, capacity) \ + circularQueueInit_IMPL(&((pQueue)->real), pAllocator, \ + capacity, sizeof(*(pQueue)->valueSize)) + +#define queueInitNonManaged(pQueue, capacity) \ + circularQueueInitNonManaged_IMPL(&((pQueue)->real), \ + capacity, sizeof(*(pQueue)->valueSize)) + +#define queueDestroy(pQueue) \ + circularQueueDestroy_IMPL(&((pQueue)->real)) + +#define queueCount(pQueue) \ + circularQueueCount_IMPL(&((pQueue)->real)) + +#define queueCapacity(pQueue) \ + circularQueueCapacity_IMPL(&((pQueue)->real)) + +#define queueIsEmpty(pQueue) \ + circularQueueIsEmpty_IMPL(&((pQueue)->real)) + +#define queuePush(pQueue, pElements, numElements) \ + circularQueuePush_IMPL(&(pQueue)->real, \ + CONT_CHECK_ARG(pQueue, pElements), numElements) + +#define queuePushNonManaged(pQueue, pCtx, pElements, numElements) \ + circularQueuePushNonManaged_IMPL(&(pQueue)->real, pCtx, \ + CONT_CHECK_ARG(pQueue, pElements), numElements) + +#define queuePeek(pQueue) \ + CONT_CAST_ELEM(pQueue, circularQueuePeek_IMPL(&((pQueue)->real))) + +#define queuePop(pQueue) \ + circularQueuePop_IMPL(&((pQueue)->real)) + +#define queuePopAndCopy(pQueue, pCopyTo) \ + circularQueuePopAndCopy_IMPL(&((pQueue)->real), \ + CONT_CHECK_ARG(pQueue, pCopyTo)) + +#define queuePopAndCopyNonManaged(pQueue, pCtx, pCopyTo) \ + circularQueuePopAndCopyNonManaged_IMPL(&((pQueue)->real), pCtx, \ + CONT_CHECK_ARG(pQueue, pCopyTo)) + +NV_STATUS circularQueueInit_IMPL(Queue *pQueue, PORT_MEM_ALLOCATOR *pAllocator, + NvLength capacity, NvLength msgSize); +NV_STATUS circularQueueInitNonManaged_IMPL(Queue *pQueue, NvLength capacity, + NvLength msgSize); +void circularQueueDestroy_IMPL(Queue *pQueue); +NvLength circularQueueCapacity_IMPL(Queue *pQueue); +NvLength circularQueueCount_IMPL(Queue *pQueue); +NvBool circularQueueIsEmpty_IMPL(Queue *pQueue); +NvLength circularQueuePush_IMPL(Queue *pQueue, void* pElements, NvLength numElements); +NvLength circularQueuePushNonManaged_IMPL(Queue *pQueue, QueueContext *pCtx, + void* pElements, NvLength numElements); +void* circularQueuePeek_IMPL(Queue *pQueue); +void circularQueuePop_IMPL(Queue *pQueue); +NvBool circularQueuePopAndCopy_IMPL(Queue *pQueue, void *pCopyTo); +NvBool circularQueuePopAndCopyNonManaged_IMPL(Queue *pQueue, QueueContext *pCtx, + void *pCopyTo); +#ifdef __cplusplus +} +#endif + +#endif // NV_CONTAINERS_QUEUE_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/type_safety.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/type_safety.h new file mode 100644 index 0000000..ad8b9a3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/type_safety.h @@ -0,0 +1,254 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_CONTAINERS_TYPE_SAFETY_H_ +#define _NV_CONTAINERS_TYPE_SAFETY_H_ + +#include "nvtypes.h" +#include "nvport/nvport.h" + +// Check for typeof support. For now restricting to GNUC compilers. +#if defined(__GNUC__) +#define NV_TYPEOF_SUPPORTED 1 +#else +#define NV_TYPEOF_SUPPORTED 0 +#endif + +/** + * Tag a non-intrusive container union with the following info: + * valueSize : size of its element type for non-intrusive malloc + * kind : non-intrusive kind ID for static dispatch + */ +#define CONT_TAG_NON_INTRUSIVE(elemType) \ + struct {char _[sizeof(elemType)];} *valueSize; \ + struct {char _[CONT_KIND_NON_INTRUSIVE];} *kind + +/** + * Tag an intrusive container union with the following info: + * nodeOffset : offset of the data structure node within element type + * kind : intrusive kind ID for static dispatch + */ +// FIXME: Do not use this for any structure members with offset 0! +// The size of a 0 length array is undefined according to the C99 standard +// and we've seen non-zero values of sizeof(*nodeOffset) appear at runtime +// leading to corruption. Filed Bug 2858103 to track work against this. +#define CONT_TAG_INTRUSIVE(elemType, node) \ + struct {char _[NV_OFFSETOF(elemType, node)];} *nodeOffset; \ + struct {char _[CONT_KIND_INTRUSIVE];} *kind + + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Utility identity function for several type-safety mechanisms. + */ +static NV_FORCEINLINE void *contId(void *pValue) +{ + return pValue; +} + +#ifdef __cplusplus +} +#endif + +/** + * @def CONT_TAG_ELEM_TYPE + * Tag a container union with element type info. + */ + +/** + * @def CONT_CHECK_ARG + * Check that a value has a container's element type. + */ + +/** + * @def CONT_CAST_ELEM + * Cast a void pointer to a container's element type. + */ + +// With C++ we can use typedef and templates for 100% type safety. +#if defined(__cplusplus) && !defined(NV_CONTAINERS_NO_TEMPLATES) + +#define CONT_TAG_TYPE(contType, elemType, iterType) \ + CONT_VTABLE_TAG(contType, elemType, iterType); \ + typedef contType ContType; \ + typedef elemType ElemType; \ + typedef iterType IterType + +template +typename T::ElemType *CONT_CHECK_ARG(T *pCont, typename T::ElemType *pValue) +{ + return pValue; +} + +template +typename T::ElemType *CONT_CAST_ELEM(T *pCont, void *pValue) +{ + return (typename T::ElemType *)pValue; +} + +template +typename T::IterType CONT_ITER_RANGE +( + T *pCont, + It (*pFunc)(typename T::ContType *, void *, void *), + void *pFirst, + void *pLast +) +{ + typename T::IterType temp; + temp.iter = pFunc(&pCont->real.base, pFirst, pLast); + return temp; +} + +template +typename T::IterType CONT_ITER_RANGE_INDEX +( + T *pCont, + It (*pFunc)(typename T::ContType *, NvU64, NvU64), + NvU64 first, + NvU64 last +) +{ + typename T::IterType temp; + temp.iter = pFunc(&pCont->real.base, first, last); + return temp; +} + +// Without C++ we need more creativity. :) +#else + +// Element tag is a pointer to the element type (no mem overhead in union). +#define CONT_TAG_TYPE(contType, elemType, iterType) \ + CONT_VTABLE_TAG(contType, elemType, iterType); \ + elemType *elem; \ + iterType *iter + +// Argument check uses sizeof to get error message without runtime overhead. +#define CONT_CHECK_ARG(pCont, pValue) \ + (sizeof((pCont)->elem = (pValue)) ? (pValue) : NULL) + +// +// Return checks are more problematic, but typeof is perfect when available. +// Without typeof we resort to a runtime vtable. +// +#if NV_TYPEOF_SUPPORTED + +#define CONT_CAST_ELEM(pCont, ret) ((typeof((pCont)->elem))(ret)) + +// +// The dummy contId prevents compilers from warning about incompatible +// function casts. This is safe since we know the two return structures +// are identical (modulo alpha-conversion). +// +#define CONT_ITER_RANGE(pCont, pFunc, pFirst, pLast) \ + (((typeof(*(pCont)->iter)(*)(void *, void *, void *))contId(pFunc))( \ + pCont, pFirst, pLast)) + +#define CONT_ITER_RANGE_INDEX(pCont, pFunc, first, last) \ + (((typeof(*(pCont)->iter)(*)(void *, NvU64, NvU64))contId(pFunc))( \ + pCont, first, last)) + +#else + +#define CONT_CAST_ELEM(pCont, ret) ((pCont)->vtable->checkRet(ret)) + +#define CONT_ITER_RANGE(pCont, pFunc, pFirst, pLast) \ + ((pCont)->vtable->iterRange(&(pCont)->real.base, pFirst, pLast)) + +#define CONT_ITER_RANGE_RANGE(pCont, pFunc, first, last) \ + ((pCont)->vtable->iterRangeIndex(&(pCont)->real.base, first, last)) + +#endif + +#endif + +#if NV_TYPEOF_SUPPORTED + +#define CONT_VTABLE_DECL(contType, iterType) +#define CONT_VTABLE_DEFN(contType, contIterRange, contIterRangeIndex) +#define CONT_VTABLE_TAG(contType, elemType, iterType) +#define CONT_VTABLE_FIELD(contType) +#define CONT_VTABLE_INIT(contType, pCont) + +#else + +#define CONT_VTABLE_DECL(contType, iterType) \ + typedef struct \ + { \ + void *(*checkRet)(void *pValue); \ + iterType (*iterRange)(contType *pCont, void *pFirst, void *pLast); \ + iterType (*iterRangeIndex)(contType *pCont, NvU64 first, NvU64 last);\ + } contType##_VTABLE; \ + +#define CONT_VTABLE_DEFN(contType, contIterRange, contIterRangeIndex) \ + static const contType##_VTABLE g_##contType##_VTABLE = \ + { \ + contId, \ + contIterRange, \ + contIterRangeIndex, \ + } + +#define CONT_VTABLE_TAG(contType, elemType, iterType) \ + const struct \ + { \ + elemType *(*checkRet)(void *pValue); \ + iterType (*iterRange)(contType *pCont, void *pFirst, void *pLast); \ + iterType (*iterRangeIndex)(contType *pCont, NvU64 first, NvU64 last);\ + } *vtable + +#define CONT_VTABLE_FIELD(contType) const contType##_VTABLE *vtable + +#define CONT_VTABLE_INIT(contType, pCont) \ + ((pCont)->vtable = &g_##contType##_VTABLE) + +#endif + +enum CONT_KIND +{ + CONT_KIND_NON_INTRUSIVE = 1, + CONT_KIND_INTRUSIVE = 2, +}; + +/** + * Static dispatch uses sizeof with dummy arrays to select a path. + * + * With optimizations enabled the unused paths should be trimmed, so this + * should have zero overhead in release builds. +*/ +#define CONT_DISPATCH_ON_KIND(pCont, ret1, ret2, ret3) \ + ((sizeof(*(pCont)->kind) == CONT_KIND_NON_INTRUSIVE) ? (ret1) : \ + (sizeof(*(pCont)->kind) == CONT_KIND_INTRUSIVE) ? (ret2) : \ + (ret3)) + +/** + * Utility stub useful for the above ret3 argument (unreachable path). + * Add stubs for different return types as needed. + */ +static NV_FORCEINLINE void contDispatchVoid_STUB(void) +{ + PORT_BREAKPOINT(); +} + +#endif // _NV_CONTAINERS_TYPE_SAFETY_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/eventbufferproducer.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/eventbufferproducer.h new file mode 100644 index 0000000..3a7412e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/eventbufferproducer.h @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_EVENT_BUFFER_PRODUCER_H_ +#define _NV_EVENT_BUFFER_PRODUCER_H_ +#include "nvtypes.h" +#include "class/cl90cd.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* +* RECORD_BUFFER_INFO +* This structure holds information for the record buffer, which is a circular +* buffer with fixed sized records +* +* pHeader +* This is a shared header between the producer and consumer. +* It holds the get put pointers and overflow counts. +* +* recordBuffAddr +* This is the producer mapping to the record buffer. +* +* recordSize +* Size of each record in bytes. +* +* totalRecordCount +* Total number of records that this buffer can hold. +* +* bufferSize +* Total size of record buffer in bytes. +* +* notificationThreshold +* This felid specifies the number of records that the buffer can +* still hold before it gets full. +* Consumer is notified when this threshold is met. +* +*/ +typedef struct +{ + NV_EVENT_BUFFER_HEADER* pHeader; + NvP64 recordBuffAddr; + NvU32 recordSize; + NvU32 totalRecordCount; + NvU32 bufferSize; + NvU32 notificationThreshold; +} RECORD_BUFFER_INFO; + +/* +* VARDATA_BUFFER_INFO: +* This structure holds information for the variable length data buffer, +* which is a circular buffer with variable sized data records +* +* vardataBuffAddr +* This is the producer mapping to the vardata buffer. +* +* bufferSize +* Total size of vardata buffer in bytes. +* +* notificationThreshold +* This felid specifies the number of records that the buffer can +* still hold before it gets full. +* Consumer is notified when this threshold is met. +* +* get\put +* These are the get and put offsets for vardata buffer. +* These are not shared with the consumer. +* +* remainingSize +* Size in bytes remaining in the vardata buffer. +*/ +typedef struct +{ + NvP64 vardataBuffAddr; + NvU32 bufferSize; + NvU32 notificationThreshold; + NvU32 get; + NvU32 put; + NvU32 remainingSize; +} VARDATA_BUFFER_INFO; + +/* +* EVENT_BUFFER_PRODUCER_INFO: +* +* recordBuffer +* Record buffer information +* +* vardataBuffer +* Vardata buffer information +* +* notificationHandle +* notification handle used to notify the consumer +* +* isEnabled +* Data is added to the event buffer only if this flag is set +* Controlled by Consumer. +* +* isKeepNewest +* This flag is set if keepNewest mode is selected by the consumer. +*/ +typedef struct +{ + RECORD_BUFFER_INFO recordBuffer; + VARDATA_BUFFER_INFO vardataBuffer; + NvP64 notificationHandle; + NvBool isEnabled; + NvBool isKeepNewest; +} EVENT_BUFFER_PRODUCER_INFO; + +/* +* EVENT_BUFFER_PRODUCER_DATA: +* This structure holds data info to add a record in a buffer +* +* pPayload +* Pointer to the payload that needs to be added in the record buffer +* +* payloadSize +* Size of payload in bytes. +* +* pVardata +* Pointer to data that needs to be added in the vardata buffer +* +* vardataSize +* Size of vardata in bytes. +*/ +typedef struct +{ + NvP64 pPayload; + NvU32 payloadSize; + NvP64 pVardata; + NvU32 vardataSize; +} EVENT_BUFFER_PRODUCER_DATA; + +void eventBufferInitRecordBuffer(EVENT_BUFFER_PRODUCER_INFO *info, NV_EVENT_BUFFER_HEADER* pHeader, + NvP64 recordBuffAddr, NvU32 recordSize, NvU32 recordCount, NvU32 bufferSize, NvU32 notificationThreshold); + +void eventBufferInitVardataBuffer(EVENT_BUFFER_PRODUCER_INFO *info, NvP64 vardataBuffAddr, + NvU32 bufferSize, NvU32 notificationThreshold); + +void eventBufferInitNotificationHandle(EVENT_BUFFER_PRODUCER_INFO *info, NvP64 notificationHandle); +void eventBufferSetEnable(EVENT_BUFFER_PRODUCER_INFO *info, NvBool isEnabled); +void eventBufferSetKeepNewest(EVENT_BUFFER_PRODUCER_INFO *info, NvBool isKeepNewest); +void eventBufferUpdateRecordBufferGet(EVENT_BUFFER_PRODUCER_INFO *info, NvU32 get); +void eventBufferUpdateVardataBufferGet(EVENT_BUFFER_PRODUCER_INFO *info, NvU32 get); +NvU32 eventBufferGetRecordBufferCount(EVENT_BUFFER_PRODUCER_INFO *info); +NvU32 eventBufferGetVardataBufferCount(EVENT_BUFFER_PRODUCER_INFO *info); + +void eventBufferProducerAddEvent(EVENT_BUFFER_PRODUCER_INFO* info, NvU16 eventType, NvU16 eventSubtype, + EVENT_BUFFER_PRODUCER_DATA *pData); + +NvBool eventBufferIsNotifyThresholdMet(EVENT_BUFFER_PRODUCER_INFO* info); + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif //_NV_EVENT_BUFFER_PRODUCER_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/ioaccess/ioaccess.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/ioaccess/ioaccess.h new file mode 100644 index 0000000..42a3629 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/ioaccess/ioaccess.h @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _IO_ACCESS_H_ +#define _IO_ACCESS_H_ + +#include "nvtypes.h" +#include "nvstatus.h" + +typedef struct IO_DEVICE IO_DEVICE, *PIO_DEVICE; +typedef struct IO_APERTURE IO_APERTURE, *PIO_APERTURE; + +typedef NvU8 ReadReg008Fn(PIO_APERTURE a, NvU32 addr); +typedef NvU16 ReadReg016Fn(PIO_APERTURE a, NvU32 addr); +typedef NvU32 ReadReg032Fn(PIO_APERTURE a, NvU32 addr); +typedef void WriteReg008Fn(PIO_APERTURE a, NvU32 addr, NvV8 value); +typedef void WriteReg016Fn(PIO_APERTURE a, NvU32 addr, NvV16 value); +typedef void WriteReg032Fn(PIO_APERTURE a, NvU32 addr, NvV32 value); +typedef NvBool ValidRegFn(PIO_APERTURE a, NvU32 addr); + +#define REG_DRF_SHIFT(drf) ((0?drf) % 32) +#define REG_DRF_MASK(drf) (0xFFFFFFFF>>(31-((1?drf) % 32)+((0?drf) % 32))) +#define REG_DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) +#define REG_DRF_SHIFTMASK(drf) (REG_DRF_MASK(drf)<<(REG_DRF_SHIFT(drf))) +#define REG_DRF_WIDTH(drf) ((1?drf) - (0?drf) + 1) + +#define REG_RD08(ap, addr) (ap)->pDevice->pReadReg008Fn((ap), (addr)) +#define REG_RD16(ap, addr) (ap)->pDevice->pReadReg016Fn((ap), (addr)) +#define REG_RD32(ap, addr) (ap)->pDevice->pReadReg032Fn((ap), (addr)) +#define REG_WR08(ap, addr, val) (ap)->pDevice->pWriteReg008Fn((ap), (addr), (val)) +#define REG_WR16(ap, addr, val) (ap)->pDevice->pWriteReg016Fn((ap), (addr), (val)) +#define REG_WR32(ap, addr, val) (ap)->pDevice->pWriteReg032Fn((ap), (addr), (val)) +#define REG_WR32_UC(ap, addr, val) (ap)->pDevice->pWriteReg032UcFn((ap), (addr), (val)) +#define REG_VALID(ap, addr) (ap)->pDevice->pValidRegFn((ap), (addr)) + +// Get the address of a register given the Aperture and offset. +#define REG_GET_ADDR(ap, offset) ((ap)->baseAddress + (offset)) + +// +// Macros for register I/O +// + +#define REG_FLD_WR_DRF_NUM(ap,d,r,f,n) REG_WR32(ap,NV##d##r,(REG_RD32(ap,NV##d##r)&~(REG_DRF_MASK(NV##d##r##f)<>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) +#define REG_FLD_TEST_DRF_DEF(ap,d,r,f,c) (REG_RD_DRF(ap,d, r, f) == NV##d##r##f##c) +#define REG_FLD_TEST_DRF_NUM(ap,d,r,f,n) (REG_RD_DRF(ap,d, r, f) == n) +#define REG_FLD_IDX_TEST_DRF_DEF(ap,d,r,f,c,i) (REG_IDX_RD_DRF(ap, d, r, i, f) == NV##d##r##f##c) + +// Read/write a field or entire register of which there are several copies each accessed via an index +#define REG_IDX_WR_DRF_NUM(ap,d,r,i,f,n) REG_WR32(ap,NV ## d ## r(i), REG_DRF_NUM(d,r,f,n)) +#define REG_IDX_WR_DRF_DEF(ap,d,r,i,f,c) REG_WR32(ap,NV ## d ## r(i), REG_DRF_DEF(d,r,f,c)) +#define REG_FLD_IDX_WR_DRF_NUM(ap,d,r,i,f,n) REG_WR32(ap,NV##d##r(i),(REG_RD32(ap,NV##d##r(i))&~(REG_DRF_MASK(NV##d##r##f)<>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) +#define REG_RD_DRF_IDX(ap,d,r,f,i) (((REG_RD32(ap,NV ## d ## r))>>REG_DRF_SHIFT(NV ## d ## r ## f(i)))®_DRF_MASK(NV ## d ## r ## f(i))) +#define REG_IDX_OFFSET_RD_DRF(ap,d,r,i,o,f) (((REG_RD32(ap,NV ## d ## r(i,o)))>>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) + +struct IO_DEVICE +{ + ReadReg008Fn *pReadReg008Fn; + ReadReg016Fn *pReadReg016Fn; + ReadReg032Fn *pReadReg032Fn; + WriteReg008Fn *pWriteReg008Fn; + WriteReg016Fn *pWriteReg016Fn; + WriteReg032Fn *pWriteReg032Fn; + WriteReg032Fn *pWriteReg032UcFn; + ValidRegFn *pValidRegFn; +}; + +struct IO_APERTURE +{ + PIO_DEVICE pDevice; // Pointer to module specific IO_DEVICE + NvU32 baseAddress; // register base address + NvU32 length; // length of aperture +}; + +//--------------------------------------------------------------------------- +// +// Function prototypes. +// +//--------------------------------------------------------------------------- + +NV_STATUS ioaccessCreateIOAperture +( + IO_APERTURE **ppAperture, + IO_APERTURE *pParentAperture, + IO_DEVICE *pDevice, + NvU32 offset, + NvU32 length +); + +NV_STATUS ioaccessInitIOAperture +( + IO_APERTURE *pAperture, + IO_APERTURE *pParentAperture, + IO_DEVICE *pDevice, + NvU32 offset, + NvU32 length +); + +void ioaccessDestroyIOAperture(IO_APERTURE *pAperture); + +#endif // _IO_ACCESS_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h new file mode 100644 index 0000000..a236b6d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h @@ -0,0 +1,149 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017,2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Internal macro definitions for NVLOG_PRINTF + * + * Macro magic example: (Assuming nothing gets compiled out) + * 0) NV_PRINTF(LEVEL_ERROR, "Bla %d %d", arg0, arg1) + * 1) NVLOG_PRINTF(GLOBAL, LEVEL_ERROR, "Bla %d %d", arg0, arg1)) + * - This gets picked up by the parser + * 2) _NVLOG_GET_PRINT + * 3) _NVLOG_GET_PRINT1(NVLOG_, NVLOG_FILEID, __LINE__, PRINT_REL, ___please_include_noprecomp_h___) + * 4) _NVLOG_GET_PRINT2(NVLOG_, 0xaaaaaa, 1024, PRINT_REL, ___please_include_noprecomp_h___) + * 5) NVLOG_0xaaaaaa_1024_PRINT_REL + * 6) NVLOG_PRINT(LEVEL_ERROR, 0xaaaaaa, 0x04001100, arg0, arg1) + * 7) NVLOG_PRINT2(LEVEL_ERROR) (0xaaaaaa, 0x04001100, arg0, arg1) + * 8) NVLOG_PRINT_LEVEL_0x4 (0xaaaaaa, 0x04001100, arg0, arg1) + * 9) nvLog_Printf4 (0xaaaaaa, 0x04001100, arg0, arg1) + * + */ + +// Compile time stubbing out output below NVLOG_LEVEL level +#define _NVLOG_NOTHING(...) ((void)0) + +// +// Use __COUNTER__ if available. If not, we can use __LINE__ since it is also +// monotonically rising. If __COUNTER__ is unavailable, we can't have inline +// functions using NvLog. +// +#if PORT_COMPILER_HAS_COUNTER +#define _NVLOG_COUNTER __COUNTER__ +#else +#define _NVLOG_COUNTER __LINE__ +#endif + +// +// NVLOG_PARSING is defined if the file is being compiled for the parser run +// +#if defined(NVLOG_PARSING) +// +// Since the '@' symbol is not found in C code, using it here makes it trivial +// for the parser code to extract the needed info from preprocessed source. +// +#define _NVLOG_PRINTF2(count, file, line, tag, route, level, format, ...) \ + NVLOG@@@count@@@file@@@line@@@level@@@tag@@@route@@@format@@@__VA_ARGS__@@@ + +#define _NVLOG_PRINTF(tag, route, level, format, ...) \ + _NVLOG_PRINTF2(_NVLOG_COUNTER, __FILE__, __LINE__, tag, route, level, format, __VA_ARGS__) + +#elif !NVLOG_ENABLED +#define _NVLOG_PRINTF _NVLOG_NOTHING + +#else // NVLOG_ENABLED && !defined(NVLOG_PARSING) + +#include "nvlog_inc.h" + +#ifdef NVLOG_STRINGS_ALLOWED +#define NVLOG_STRING(...) __VA_ARGS__ +#else +#define NVLOG_STRING(...) +#endif + +// +// One for every debug level, needed for compile time filtering. +// +typedef NV_STATUS NVLOG_PRINTF_PROTO(NvU32, NvU32, ...); +NVLOG_PRINTF_PROTO nvlogPrint_printf0; +NVLOG_PRINTF_PROTO nvlogPrint_printf1; +NVLOG_PRINTF_PROTO nvlogPrint_printf2; +NVLOG_PRINTF_PROTO nvlogPrint_printf3; +NVLOG_PRINTF_PROTO nvlogPrint_printf4; +NVLOG_PRINTF_PROTO nvlogPrint_printf5; +NVLOG_PRINTF_PROTO nvlogPrint_printf6; + +// This one is used for unknown debug level - It has an extra argument +NV_STATUS nvlogPrint_printf(NvU32 dbgLevel, NvU32 file, NvU32 line, ...); + + +#if NVLOG_LEVEL <= 0x0 +#define NVLOG_PRINT_LEVEL_0x0 nvlogPrint_printf0 +#else +#define NVLOG_PRINT_LEVEL_0x0 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= 0x1 +#define NVLOG_PRINT_LEVEL_0x1 nvlogPrint_printf1 +#else +#define NVLOG_PRINT_LEVEL_0x1 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= 0x2 +#define NVLOG_PRINT_LEVEL_0x2 nvlogPrint_printf2 +#else +#define NVLOG_PRINT_LEVEL_0x2 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= 0x3 +#define NVLOG_PRINT_LEVEL_0x3 nvlogPrint_printf3 +#else +#define NVLOG_PRINT_LEVEL_0x3 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= 0x4 +#define NVLOG_PRINT_LEVEL_0x4 nvlogPrint_printf4 +#else +#define NVLOG_PRINT_LEVEL_0x4 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= 0x5 +#define NVLOG_PRINT_LEVEL_0x5 nvlogPrint_printf5 +#else +#define NVLOG_PRINT_LEVEL_0x5 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= 0x6 +#define NVLOG_PRINT_LEVEL_0x6 nvlogPrint_printf6 +#else +#define NVLOG_PRINT_LEVEL_0x6 _NVLOG_NOTHING +#endif +// For when the level isn't known at compile time +#define NVLOG_PRINT_LEVEL_ NVLOG_PRINT_LEVEL_UNKNOWN +#define NVLOG_PRINT_LEVEL_UNKNOWN nvlogPrint_printf + + +#define NVLOG_PRINT2(dbglvl) NVLOG_PRINT_LEVEL_ ## dbglvl +#define NVLOG_PRINT(level, ...) NVLOG_PRINT2(level)(__VA_ARGS__) + +#define _NVLOG_GET_PRINT2(prefix, x) prefix ##x +#define _NVLOG_GET_PRINT1(prefix, id) _NVLOG_GET_PRINT2(prefix, id) +#define _NVLOG_GET_PRINT _NVLOG_GET_PRINT1(NVLOG_PRINT_ID_, _NVLOG_COUNTER) + +#define _NVLOG_PRINTF(tag, route, level, format, ...) _NVLOG_GET_PRINT + +#endif // NVLOG_ENABLED && !defined(NVLOG_PARSING) diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog.h new file mode 100644 index 0000000..00debf7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog.h @@ -0,0 +1,334 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVLOG_H_ +#define _NVLOG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvstatus.h" + +/******************* Common Debug & Trace Defines **************************\ +* * +* Module: NVLOG.H * +* * +\***************************************************************************/ + +// Include common NvLog definitions +#include "nvlog_defs.h" + +// Include printf definitions +#include "nvlog/nvlog_printf.h" + +extern NVLOG_LOGGER NvLogLogger; +extern NVLOG_PRINT_LOGGER NvLogPrintLogger; + +/********************************/ +/***** Exported functions *****/ +/********************************/ + + +/** + * @brief Global NvLog initialization function + * + * @return NV_OK on success + */ +NV_STATUS nvlogInit(void *pData); + +/** + * @brief Update the NvLog configuration from the registry + * + */ +void nvlogUpdate(void); + +/** + * @brief Global NvLog deinitialization function + * + * @return NV_OK on success + */ +NV_STATUS nvlogDestroy(void); + +/** + * @brief Allocate a new NvLog buffer + * + * @param[in] size Size of the buffer to allocate + * @param[in] flags Buffer flags, uses NVLOG_BUFFER_FLAGS_* DRF's + * @param[in] tag Tag for the new buffer, to identify it in a dump + * @param[out] pBufferHandle Handle of the newly created buffer + * + * @return NV_OK on success + */ +NV_STATUS nvlogAllocBuffer(NvU32 size, NvU32 flags, NvU32 tag, NVLOG_BUFFER_HANDLE *pBufferHandle, ...); + +/** + * @brief Deallocate a buffer with the given handle + * + * @param[in] hBuffer Handle of the buffer to deallocate + */ +void nvlogDeallocBuffer(NVLOG_BUFFER_HANDLE hBuffer); + +/** + * @brief Write to a buffer with the given handle + * + * @param[in] hBuffer Handle of the buffer to write to + * @param[in] pData Pointer to the data to be written + * @param[in] dataSize Size of the data to be written + * + * @return NV_OK on success + */ +NV_STATUS nvlogWriteToBuffer(NVLOG_BUFFER_HANDLE hBuffer, NvU8 *pData, NvU32 dataSize); + +/** + * @brief Extract a chunk of a buffer + * + * @param[in] hBuffer Handle of the buffer to extract + * @param[in] chunkNum Index (0-based) of the chunk to extract + * @param[in,out] pCunkSize In - Size of the chunk to extract + * Out - Size that was actually extracted, can be less + * @param[out] pDest Pointer to the memory the chunk will be copied to + * + * @return NV_OK on success + */ +NV_STATUS nvlogExtractBufferChunk(NVLOG_BUFFER_HANDLE hBuffer, NvU32 chunkNum, NvU32 *pChunkSize, NvU8 *pDest); + +/** + * @brief Get the size of a specified buffer + * + * @param[in] hBuffer Handle of the buffer + * @param[out] pSize Buffer size. + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferSize(NVLOG_BUFFER_HANDLE hBuffer, NvU32 *pSize); + +/** + * @brief Get the tag of a specified buffer. + * + * @param[in] hBuffer Handle of the buffer + * @param[out] pTag Buffer tag. + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferTag(NVLOG_BUFFER_HANDLE hBuffer, NvU32 *pTag); + +/** + * @brief Get flags for a specified buffer. + * Flag fields are defined as NVLOG_BUFFER_FLAGS_* in nvlog_defs.h + * + * @param[in] hBuffer Handle of the buffer + * @param[out] pFlags Buffer flags. + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferFlags(NVLOG_BUFFER_HANDLE hBuffer, NvU32 *pFlags); + +/** + * @brief Pause/resume logging to a specified buffer + * + * @param[in] hBuffer Handle of the buffer + * @param[out] bPause NV_TRUE ??Pause, NV_FALSE ??resume + * + * @return NV_OK on success + */ +NV_STATUS nvlogPauseLoggingToBuffer(NVLOG_BUFFER_HANDLE hBuffer, NvBool bPause); + +/** + * @brief Pause/resume logging to all buffers + * + * @param[out] bPause NV_TRUE ??Pause, NV_FALSE ??resume + * + * @return NV_OK on success + */ +NV_STATUS nvlogPauseAllLogging(NvBool bPause); + +/** + * @brief Get the handle of a buffer with the given tag + * + * @param[in] tag Tag of the buffer requested + * @param[out] pBufferHandle Handle of the buffer + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferHandleFromTag(NvU32 tag, NVLOG_BUFFER_HANDLE *pBufferHandle); + +/** + * @brief Get the handle of a buffer with the given tag + * + * @param[in] tag Tag of the buffer requested + * @param[out] pBufferHandle Handle of the buffer + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferSnapshot(NVLOG_BUFFER_HANDLE hBuffer, NvU8 *pDest, NvU32 destSize); + + +/** + * @brief Dumps all logs into the the kernel print log + * + * @note this will write to the log even if all other prints are disabled, + * including external release builds. The output will be base64 encoded and + * not decodable without the database, and pollute the logs. Use with caution. + * + * The format of the dump will be the same as the OS Crash Log dumps. + */ +void nvlogDumpToKernelLog(NvBool bDumpUnchangedBuffersOnlyOnce); + +// +// The values returned by CheckFilter functions contain up to four buffers. +// These indexes are in the local buffer array (i.e. in NVLOG_PRINT_LOGGER) +// There can be more than 256 total NvLog buffers, but only 256 per subsystem. +// +#define NVLOG_FILTER_BUFFER_NONE 0xFF + +// +// NvLog Print functions +// + +/** + * @brief Check the filtering rules for a given DBG_PRINTF + * + * @param[in] fileId ID (name hash) of the file + * @param[in] line Line number of the print + * @param[in] level Debug level (DBG_LEVEL_*) of the print + * @param[in] module Debug module (DBG_MODULE_*) of the print + * + * @return 32 bits to indicate which of the print buffers to log to. + */ +NvU32 nvlogPrintCheckFilter(NvU32 fileId, NvU16 line, NvU32 level, NvU32 module); + +/** + * @brief Global NvLog Print initialization function + * + * @return NV_OK on success + */ +NV_STATUS nvlogPrintInit(void); + +/** + * @brief NvLog Print update function + * + * @return NV_OK on success + */ +NV_STATUS nvlogPrintUpdate(void); + +/** + * @brief Global NvLog Print deinitialization function + * + * @return NV_OK on success + */ +NV_STATUS nvlogPrintDestroy(void); + +/** + * @brief Global NvLog ETW capture state function + * + * @return NV_OK on success + */ +NV_STATUS nvlogETWCaptureState(void); + +// +// Global initialization macros +// +extern volatile NvU32 nvlogInitCount; +#define NVLOG_INIT(pData) \ + do \ + { \ + if (portAtomicIncrementU32(&nvlogInitCount) == 1) \ + { \ + nvlogInit(pData); \ + } \ + } while (0) + +#define NVLOG_UPDATE() \ + do \ + { \ + if (nvlogInitCount == 1) \ + { \ + nvlogUpdate(); \ + } \ + } while (0) + +#define NVLOG_DESTROY() \ + do \ + { \ + if (portAtomicDecrementU32(&nvlogInitCount) == 0) \ + { \ + nvlogDestroy(); \ + } \ + } while (0) + +/********************************/ +/****** NvLog Filtering *******/ +/********************************/ + +// +// Used both by print and regtrace functions. +// + +/** + * @brief Binary search the range array for a given number + * + * @param[in] ranges Range array to search + * @param[in] numRanges Size of the given array + * @param[in] num Number to search for. + * + * @return Number that is found in the given range. + * If no number is found, returns ~0 (0xFFFFFFFF) + */ +NvU32 nvlogFindInRange16Array(NVLOG_RANGE_16 *ranges, NvU32 numRanges, NvU16 num); +/** + * @brief Binary search the range array for a given number + * + * @param[in] ranges Range array to search + * @param[in] numRanges Size of the given array + * @param[in] num Number to search for. + * + * @return Number that is found in the given range. + * If no number is found, returns ~0 (0xFFFFFFFF) + */ +NvU32 nvlogFindInRange32Array(NVLOG_RANGE_32 *ranges, NvU32 numRanges, NvU32 num); + +// Returns the rules for the given fileId-lineNum pair +/** + * @brief Binary search the range array for a given number + * + * @param[in] pFileLineFilter File:line filter to check + * @param[in] fileId ID of the file to search + * @param[in] lineNum Line number to search in the file entry + * + * @return Number that is found for the given file:line. + * If no number is found, returns ~0 (0xFFFFFFFF) + */ +NvU32 nvlogGetFileLineFilterRules(NVLOG_FILELINE_FILTER *pFileLineFilter, NvU32 fileId, NvU16 lineNum); + + +/** + * @brief Dump nvlog to kernel log only if enabled (performs regkey and platform checks) + */ +void nvlogDumpToKernelLogIfEnabled(void); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _NVLOG_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog_printf.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog_printf.h new file mode 100644 index 0000000..aa5fb9b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog_printf.h @@ -0,0 +1,91 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief NvLog call that logs prints. + * + * This is the traditional NvLog component. When enabled, it will also activate + * preprocessing of all source files to detect calls to NVLOG_PRINTF, and + * generate a database to be used for decoding. + * + * This file just defines the macros used by NV_PRINTF and others clients + */ + +#ifndef _NVLOG_PRINTF_H_ +#define _NVLOG_PRINTF_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef NVLOG_ENABLED +/// @brief If zero, most of NvLog will be compiled out +#define NVLOG_ENABLED 0 +#endif + +#ifndef NVLOG_LEVEL +/// @brief Level below which all prints will be compiled out. +#define NVLOG_LEVEL 2 +#endif + +/// @brief Maximum number of arguments to NVLOG_PRINTF +#define NVLOG_MAX_ARGS 20 + +/** + * @brief Log this printf in NvLog internal binary buffers + * + * These calls are picked up by the NvLog parser, and are replaced with custom + * calls from the generated header. See @page nvlog-parser for details. + * + * @param tag - An identifier to help with offline filtering. Doesn't need to + * be defined anywhere. + * @param route - 8bit mask of buffers the print will be routed to. + * Use NVLOG_BUFFER_XXX constants + * @param level - Level at which to print. Calls with level < NVLOG_LEVEL will + * be compiled out. + * @param format - printf-like format string + * @param ... - printf arguments + */ +#define NVLOG_PRINTF(tag, route, level, format, ...) _NVLOG_PRINTF(tag, route, level, format, __VA_ARGS__) + +#define NVLOG_BUFFER_NULL 0x01 +#define NVLOG_BUFFER_RM 0x02 +#define NVLOG_BUFFER_RM_BOOT 0x04 +#define NVLOG_BUFFER_ETW 0x08 +#define NVLOG_BUFFER_KMD_BOOT 0x10 +#define NVLOG_BUFFER_KMD 0x20 +#define NVLOG_BUFFER_ERROR 0x40 +#define NVLOG_BUFFER_DD 0x80 + +#define NVLOG_ROUTE_RM (NVLOG_BUFFER_RM | NVLOG_BUFFER_RM_BOOT | NVLOG_BUFFER_ETW) +#define NVLOG_ROUTE_KMD (NVLOG_BUFFER_KMD | NVLOG_BUFFER_KMD_BOOT | NVLOG_BUFFER_ETW) +#define NVLOG_ROUTE_DD (NVLOG_BUFFER_DD | NVLOG_BUFFER_KMD_BOOT | NVLOG_BUFFER_ETW) + +#include "nvlog/internal/nvlog_printf_internal.h" + +#ifdef __cplusplus +} //extern "C" +#endif + +#endif // _NVLOG_PRINTF_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/object.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/object.h new file mode 100644 index 0000000..285c009 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/object.h @@ -0,0 +1,126 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#include "g_object_nvoc.h" + +#ifndef _NVOC_OBJECT_H_ +#define _NVOC_OBJECT_H_ + +#include "nvtypes.h" +#include "nvstatus.h" + +#include "nvoc/prelude.h" + +class Object; +struct NVOC_CLASS_INFO; + +/*! + * Tracks the head of an object's child list, and the next object in its + * parent's child list. + */ +struct NVOC_CHILD_TREE +{ + Object *pChild; + Object *pSibling; +}; + +//! The base class of all instantiable NVOC objects. +NVOC_PREFIX(obj) class Object +{ +public: + + //! Runtime ownership tree parent node. + Object *pParent; + + //! Runtime ownership tree child and sibling links. + struct NVOC_CHILD_TREE childTree; + + //! IP Version value. Temporary until NVOC-style HALs are rolled out. + NvU32 ipVersion; + + /*! + * @brief Add pChild as a child of this object. + * + * This method is wrapped by objCreate and typically doesn't need to be + * called directly. + * + * Asserts if pChild is already a child of any object. + */ + void objAddChild(Object *pObj, Object *pChild); + + /*! + * @brief Remove pChild as a child of this object. + * + * This method is wrapped by objDelete and typically doesn't need to be + * called directly. + * + * Asserts if pChild is not a child of this object. + */ + void objRemoveChild(Object *pObj, Object *pChild); + + /*! + * @brief Gets the head of this object's child list from the child tree. + * + * This is a constant-time operation. + */ + Object *objGetChild(Object *pObj); + + /*! + * @brief Gets the next child of this object's parent from the child tree. + * + * This is a constant-time operation. + */ + Object *objGetSibling(Object *pObj); + + /*! + * @brief Gets the direct parent of this object. + * + * This is a constant-time operation. + */ + Object *objGetDirectParent(Object *pObj); +}; + +// +// IP versioning definitions are temporary until NVOC halspec support is +// finished. +// +// IP_VERSION format as defined by the hardware engines. +// A _MAJOR value of 0 means the object has no version number. +// + +#define NV_ODB_IP_VER_DEV 7:0 /* R-IVF */ +#define NV_ODB_IP_VER_ECO 15:8 /* R-IVF */ +#define NV_ODB_IP_VER_MINOR 23:16 /* R-IVF */ +#define NV_ODB_IP_VER_MAJOR 31:24 /* R-IVF */ + +#define IPVersion(pObj) staticCast((pObj), Object)->ipVersion +#define IsIPVersionValid(pObj) (DRF_VAL(_ODB, _IP_VER, _MAJOR, IPVersion(pObj)) != 0) +#define IsIPVersionOrLater(pObj, v0) (IPVersion(pObj) >= (v0)) +// v0 .. v1 inclusive +#define IsIPVersionInRange(pObj, v0, v1) ((IPVersion(pObj) >= (v0)) && (IPVersion(pObj) <= (v1))) + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/prelude.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/prelude.h new file mode 100644 index 0000000..1e1b14b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/prelude.h @@ -0,0 +1,255 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#ifndef _NVOC_PRELUDE_H_ +#define _NVOC_PRELUDE_H_ + +#include "utils/nvmacro.h" + +/* Calls the macro named in the first parameter with the rest of the given arguments. Written + * like this instead of just func(__VA_ARGS__) because some preprocessors treat __VA_ARGS__ + * as a single argument even when it contains commas. */ +#define NVOC_PP_CALL(func, ...) NV_EXPAND(func NV_EXPAND() (__VA_ARGS__)) + +/*! Macro to help specify prefixes on NVOC classes */ +#define NVOC_PREFIX(x) [[nvoc::prefix(x)]] + +/*! Macro to help specify NVOC classes attributes */ +#define NVOC_ATTRIBUTE(str) [[nvoc::classAttributes("\""#str"\"")]] + +/*! Macro to help specify properties on NVOC classes */ +#define NVOC_PROPERTY [[nvoc::property]] + +#ifndef NV_PRINTF_STRINGS_ALLOWED +#if defined(DEBUG) || defined(NV_MODS) || defined(QA_BUILD) +#define NV_PRINTF_STRINGS_ALLOWED 1 +#else +#define NV_PRINTF_STRINGS_ALLOWED 0 +#endif +#endif + +/*! + * @brief Gets a pointer to the NVOC_CLASS_INFO for the named NVOC class. + * + * This is similar to C++'s typeid macro. + */ +#define classInfo(x) reinterpretCast((&__nvoc_class_def_##x), const NVOC_CLASS_INFO *) + +/*! + * @brief Gets a unique integer identifier for the named NVOC class. + * + * This is similar to the hash_code of C++'s std::type_info. + */ +#define classId(x) __nvoc_class_id_##x + + +/*! NVOC class IDs will be no wider than NVOC_CLASS_ID_MAX_WIDTH bits. */ +#define NVOC_CLASS_ID_MAX_WIDTH 24 + +/*! + * @brief Statically casts pObj to a TYPE*. Fails to compile if the cast is invalid. + * + * This is similar to C++'s static_cast(pObj). + */ +#define staticCast(pObj, TYPE) ((pObj)? __staticCast_##TYPE((pObj)) : NULL) + +/*! + * @brief Statically casts pObj to a TYPE*. Fails to compile if the cast is invalid. + * + * This version staticCast() skips pointer check as a trade of better binary size and + * runtime efficiency. The caller is responsible to ensure pObj can never be NULL. + */ +#define staticCastNoPtrCheck(pObj, TYPE) __staticCast_##TYPE((pObj)) + +/*! + * @brief Dynamically casts pObj to a TYPE*. Returns NULL if the cast is invalid. + * + * This is similar to C++'s dynamic_cast(pObj). + */ +#define dynamicCast(pObj, TYPE) (__dynamicCast_##TYPE((pObj))) + +/*! + * @brief Reinterpret e as if it had type T. + * + * This is similar to C++'s reinterpret_cast(e). + */ +#define reinterpretCast(e, T) ((T)(e)) + +/*! + * NVOC_OBJ_CREATE_FLAGS* are used with objCreateWithFlags()/objCreateDynamicWithFlags(). + * + * NVOC_OBJ_CREATE_FLAGS_NONE + * Default behavior + * NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY + * Use halspec from parent without adding the new created object the child tree + */ +#define NVOC_OBJ_CREATE_FLAGS_NONE 0x0000u +#define NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY 0x0001u + +/*! + * @brief Create and construct a new object by class name. + * + * @param[out] ppNewObj A pointer to the new object + * @param[in] pParent A pointer to the object that should be the new object's parent, + * or NULL if the new object has no parent. + * @param[in] NAME The name of the class of the new object. + */ +/* MSVC suppresses trailing commas at the final expansion but not at intermediate expansions, so we + * need to put our trailing comma inside another macro so it will be eaten. Normally, one would + * just wrap the trailing comma and __VA_ARGS__ in NV_EXPAND, but Bullseye's preprocessor has + * trouble dealing with that properly, so instead we use an indirect macro caller that Bullseye + * seems to handle properly. This avoids producing a "too many arguments for macro" warning (C4002). */ +#define objCreate(ppNewObj, pParent, NAME, ...) \ + NVOC_PP_CALL(__objCreate_##NAME, (ppNewObj), (pParent), (NVOC_OBJ_CREATE_FLAGS_NONE), ##__VA_ARGS__) +#define objCreateWithFlags(ppNewObj, pParent, NAME, flags, ...) \ + NVOC_PP_CALL(__objCreate_##NAME, (ppNewObj), (pParent), (flags), ##__VA_ARGS__) + +/*! + * @brief Destruct and free an object and all of its children recursively. + * + * In C++, fields are destructed in reverse syntactic order. Similarly, in + * NVOC, runtime children are deleted in the reverse of the order they were + * added (usually reverse creation order). + */ +#define objDelete(pObj) __nvoc_objDelete(staticCast((pObj), Dynamic)) + +/*! + * @brief Get the given object's class ID + */ +#define objGetClassId(pObj) __nvoc_objGetClassId(staticCast((pObj), Dynamic)) + +/*! + * @brief Get the given object's class info + */ +#define objGetClassInfo(pObj) __nvoc_objGetClassInfo(staticCast((pObj), Dynamic)) + +#if NV_PRINTF_STRINGS_ALLOWED +/*! + * Get the given class's name from its class info. + */ +#define objGetClassName(pObj) (objGetClassInfo((pObj))->name) +#endif + +/*! + * @brief Create and construct a new object by class ID. + * + * @param[out] ppNewObj A pointer to the new object + * @param[in] pParent A pointer to the object that should be the new object's parent, + * or NULL if the new object has no parent. + * @param[in] pClassInfo A pointer to the NVOC_CLASS_INFO for the desired class. + */ +#define objCreateDynamic(ppNewObj, pParent, pClassInfo, ...) \ + __nvoc_objCreateDynamic((ppNewObj), staticCast((pParent), Dynamic), \ + (pClassInfo), (NVOC_OBJ_CREATE_FLAGS_NONE), ##__VA_ARGS__) +#define objCreateDynamicWithFlags(ppNewObj, pParent, pClassInfo, flags, ...) \ + __nvoc_objCreateDynamic((ppNewObj), staticCast((pParent), Dynamic), \ + (pClassInfo), (flags), ##__VA_ARGS__) + +/*! + * @brief Cast any object supporting Run-Time Type Information (RTTI) to 'Dynamic'. + * + * Since '__nvoc_rtti' is always first, pObj == &(pObj)->__nvoc_rtti + * The purpose of this expression is to force a compile-time error if + * pObj does not contain RTTI information + */ +#define __staticCast_Dynamic(pObj) ((Dynamic*) &(pObj)->__nvoc_rtti) + + +/* + * Helper macros for "pObject->getProperty(pObject, prop)" + * + * The NVOC property macros are currently based on IOM's property macros. + * + * Property inheritance for IOM (Improved Object Model) is done by introducing + * 'prop##_BASE_CAST' and 'prop##_BASE_NAME'. For IOM, those are defined in + * generated file g_odb.h. For NVOC, they are defined in each class's generated + * header. + * + * In non-inheritance cases, getProperty/setProperty functions are equal to: + * #define getProperty(pObj, prop) prop // or pdb.prop for IOM + * #define setProperty(pObj, prop, val) prop = val // or pdb.prop = val for IOM + * + * Once the IOM model is phased out, these will become: + * #define getProperty(pObj, prop) pObj->prop + * #define setProperty(pObj, prop, val) pObj->prop = val + */ +#define getProperty(pObj, prop) prop##_BASE_CAST prop##_BASE_NAME +#define setProperty(pObj, prop, val) prop##_BASE_CAST prop##_BASE_NAME = val + +/*! Special NULL pointer for macros that expect to staticCast their parameter */ +#define NVOC_NULL_OBJECT ((Object*) NULL) + + +/*! + * @brief Wrapper of the Run-Time Type Information (RTTI) pointer. + * + * @details In effect, this is the base class (not Object) for all classes + * that support RTTI because the RTTI pointer is always first in memory, + */ +typedef struct { + const struct NVOC_RTTI *__nvoc_rtti; +} Dynamic; + +typedef NvU32 NVOC_CLASS_ID; + +typedef struct NVOC_RTTI_PROVIDER { + NvU32 dummy; +} NVOC_RTTI_PROVIDER; + +typedef const NVOC_RTTI_PROVIDER *NVOC_RTTI_PROVIDER_ID; + +//! Public metadata about an NVOC class definition. +typedef struct NVOC_CLASS_INFO +{ + const NvU32 size; + const NVOC_CLASS_ID classId; + const NVOC_RTTI_PROVIDER_ID providerId; +#if NV_PRINTF_STRINGS_ALLOWED + const char *name; +#endif +} NVOC_CLASS_INFO; + +/*! + * @brief Wrapper of private field and private function + */ +#if defined(__clang__) // clang +#define NVOC_PRIVATE_FIELD(x) __attribute__((unavailable(#x " is a private field"))) x +#define NVOC_PRIVATE_FUNCTION(x) __attribute__((unavailable(#x " is a private function"))) x +#elif defined(__INTEL_COMPILER) // icc +#pragma warning(error: 1786) // treat deprecated as error (globally affected) +#define NVOC_PRIVATE_FIELD(x) __attribute__((deprecated(#x " is a private field"))) x +#define NVOC_PRIVATE_FUNCTION(x) __attribute__((deprecated(#x " is a private function"))) x +#elif defined(__GNUC__) || defined(__GNUG__) // gcc +#pragma GCC diagnostic error "-Wdeprecated-declarations" // treat deprecated as error (globally affected) +#define NVOC_PRIVATE_FIELD(x) __attribute__((deprecated(#x " is a private field"))) x +#define NVOC_PRIVATE_FUNCTION(x) __attribute__((error(#x " is a private function"))) x +#else // other +#define NVOC_PRIVATE_FIELD(x) x##_PRIVATE +#define NVOC_PRIVATE_FUNCTION(x) x##_PRIVATE +#endif + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/rtti.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/rtti.h new file mode 100644 index 0000000..37cca27 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/rtti.h @@ -0,0 +1,77 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#ifndef _NVOC_RTTI_H_ +#define _NVOC_RTTI_H_ + +#include "nvtypes.h" +#include "nvoc/runtime.h" +#include "nvport/inline/util_valist.h" + +typedef NV_STATUS (*NVOC_DYNAMIC_OBJ_CREATE)(Dynamic**, Dynamic *pParent, NvU32 createFlags, va_list); +typedef void (*NVOC_DYNAMIC_DTOR)(Dynamic*); + +// struct NVOC_CLASS_METADATA +// { +// // NvBool isMixedMode; +// // NvS32 ring; +// // const struct NVOC_EXPORTS *const pExportedClasses; +// }; + +// MSVC warning C4200 on "NVOC_CASTINFO::relatives": zero-sized array in struct/union +// Ignore the warning on VS2013+ +//! List of valid casts, needed for dynamicCast. +struct NVOC_CASTINFO +{ + const NvU32 numRelatives; + const struct NVOC_RTTI *const relatives[]; +}; + + + +//! Items unique to each NVOC class definition. Used to identify NVOC classes. +struct NVOC_CLASS_DEF { + const NVOC_CLASS_INFO classInfo; // public, defined in runtime.h; contains classId, size, and name + const NVOC_DYNAMIC_OBJ_CREATE objCreatefn; + const struct NVOC_CASTINFO *const pCastInfo; + const struct NVOC_EXPORT_INFO* const pExportInfo; +}; + +//! Items used to identify base class subobjects. +struct NVOC_RTTI // one per derived-ancestor relationship pair (and every derived class is also its own ancestor) +{ + const struct NVOC_CLASS_DEF *const pClassDef; // drives dynamicCast and objCreateDynamic, one per class + const NVOC_DYNAMIC_DTOR dtor; // __nvoc_destructFromBase for base substructures, real destructor for derived + const NvU32 offset; // 0 for derived +}; + + +void __nvoc_initRtti(Dynamic *pNewObject, const struct NVOC_CLASS_DEF *pClassDef); + + + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/runtime.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/runtime.h new file mode 100644 index 0000000..bf59317 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/runtime.h @@ -0,0 +1,116 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#ifndef _NVOC_RUNTIME_H_ +#define _NVOC_RUNTIME_H_ + +#include "nvport/nvport.h" +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvmisc.h" + +#include "nvoc/object.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NVOC_CLASS_ID __nvoc_objGetClassId(Dynamic *pObj); +const NVOC_CLASS_INFO *__nvoc_objGetClassInfo(Dynamic *pObj); + +void __nvoc_objDelete(Dynamic *pDynamic); + +NV_STATUS __nvoc_objCreateDynamic( + Dynamic **pNewObject, + Dynamic *pParent, + const NVOC_CLASS_INFO *pClassInfo, + NvU32 createFlags, + ...); + +Dynamic *__nvoc_dynamicCast(Dynamic *pFromObj, const NVOC_CLASS_INFO *pClassInfo); +Dynamic *__nvoc_dynamicCastById(Dynamic *pFromObj, NVOC_CLASS_ID classId); + +void __nvoc_destructFromBase(Dynamic *pDynamic); + +Dynamic *fullyDeriveWrapper(Dynamic *pDynamic); + +extern const NVOC_RTTI_PROVIDER __nvoc_rtti_provider; + +#define objFindAncestor(pObj, classId) objFindAncestor_IMPL(staticCast((pObj), Dynamic), classId) +#define objDynamicCastById(pObj, classId) objDynamicCastById_IMPL(staticCast((pObj), Dynamic), classId) +#define objFindAncestorOfType(TYPE, pObj) dynamicCast(objFindAncestor((pObj), classId(TYPE)), TYPE) +#define __nvoc_fullyDerive(pObj) __nvoc_fullyDerive_IMPL(staticCast((pObj), Dynamic)) +#define objFullyDerive(pObj) fullyDeriveWrapper(staticCast((pObj), Dynamic)) +#define objGetExportedMethodDef(pObj, methodId) objGetExportedMethodDef_IMPL(pObj, methodId) + +//! Contains data needed to call the exported method from kernel +struct NVOC_EXPORTED_METHOD_DEF +{ + void (*pFunc) (void); // Pointer to the method itself + NvU32 flags; // Export flags used for permission, method attribute verification (eg. NO_LOCK, PRIVILEGED...) + NvU32 accessRight; // Access rights required for this method + NvU32 methodId; // Id of the method in the class. Used for method identification. + NvU32 paramSize; // Size of the parameter structure that the method takes as the argument (0 if it takes no arguments) + const NVOC_CLASS_INFO* pClassInfo; // Class info for the parent class of the method + +#if NV_PRINTF_STRINGS_ALLOWED + const char *func; // Debug info +#endif +}; + +struct NVOC_EXPORT_INFO { + NvU32 numEntries; // Number of entries + const struct NVOC_EXPORTED_METHOD_DEF *pExportEntries; //An array of exported methods +}; + +/*! + * @brief Finds the closest ancestor of this object with the given class ID. + * + * This is a linear-time operation. + */ +Dynamic *objFindAncestor_IMPL(Dynamic *pDynamic, NVOC_CLASS_ID classId); + +/*! + * @brief Finds the exported method with the given method ID. + * + * If the method isn't found in the derived class, we search the ancestors. + * Returns NULL if the search is unsuccessful. + * This is a linear-time operation. + */ +const struct NVOC_EXPORTED_METHOD_DEF* objGetExportedMethodDef_IMPL(Dynamic* pObj, NvU32 methodId); +const struct NVOC_EXPORTED_METHOD_DEF* nvocGetExportedMethodDefFromMethodInfo_IMPL(const struct NVOC_EXPORT_INFO *pExportInfo, NvU32 methodId); + +/*! + * @brief Dynamic cast by class id + */ +Dynamic *objDynamicCastById_IMPL(Dynamic *pFromObj, NVOC_CLASS_ID classId); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/utility.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/utility.h new file mode 100644 index 0000000..9adb2ac --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/utility.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVOC_UTILITY_H_ +#define _NVOC_UTILITY_H_ + + +#endif // _NVOC_UTILITY_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/atomic.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/atomic.h new file mode 100644 index 0000000..8922b73 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/atomic.h @@ -0,0 +1,418 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Atomic module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_ATOMIC_H_ +#define _NVPORT_ATOMIC_H_ +/** + * @defgroup NVPORT_ATOMIC Atomic operations + * + * @brief This module contains atomic operations + * + * @note that mixing atomic and non-atomic modifications to the same memory + * location can have undefined behavior that varies from platform to platform. + * You are better off not trying it. + * + * @note All atomic operations actually impose at least a compiler memory + * barrier - either just on the variable manipulated, or on all globally + * accessible variables. This is just a consequence of the current + * implementations, and should not be relied on. If you need a memory barrier, + * use @ref portAtomicMemFenceFull. + * + * @{ + */ + +/** See @ref PORT_UTIL_INLINE */ +#ifndef PORT_ATOMIC_INLINE +#if PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) +#define PORT_ATOMIC_INLINE PORT_INLINE +#if NVCPU_IS_64_BITS +#define PORT_ATOMIC64_INLINE PORT_INLINE +#else +#define PORT_ATOMIC64_INLINE +#endif +#else +#define PORT_ATOMIC_INLINE +#define PORT_ATOMIC64_INLINE +#endif +#endif + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Atomic addition on a signed 32b integer + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal += val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicAddS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicAddS32 +PORT_ATOMIC_INLINE NvU32 portAtomicAddU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic subtraction on a signed 32b integer + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal -= val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicSubS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicSubS32 +PORT_ATOMIC_INLINE NvU32 portAtomicSubU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic set a signed 32b integer to the specified value + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal = val; + * ~~~ + * + * Once complete `val` will be visible in the location pointed to by `pVal` by all + * threads on all processors. + * + * @note On some platforms this operation is different from other atomic + * operations with respect to memory ordering. The best that can be guaranteed + * for this operation that it will behave as an acquire barrier. This + * means that operations occurring after this one in program order are + * guaranteed to not occur until the atomic operation is complete. It also + * means that it does not guarantee that previous stores are visible, or that + * previous loads have been satisfied. + * + */ +PORT_ATOMIC_INLINE void portAtomicSetS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicSetS32 +PORT_ATOMIC_INLINE void portAtomicSetU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic compare-and-swap on a signed 32b integer. + * + * A compare and swap is an atomic operation that reads a memory location, + * compares it to `oldVal` and if they are equal sets the memory location to + * `newVal`. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * if (*pVal == oldVal) + * { + * *pVal = newVal; + * return NV_TRUE; + * } + * return NV_FALSE; + * ~~~ + * + * @return NV_TRUE if the operation modified the target of `pVal`, NV_FALSE otherwise + * + */ +PORT_ATOMIC_INLINE NvBool portAtomicCompareAndSwapS32(volatile NvS32 *pVal, NvS32 newVal, NvS32 oldVal); +/// @brief Unsigned version of @ref portAtomicCompareAndSwapS32 +PORT_ATOMIC_INLINE NvBool portAtomicCompareAndSwapU32(volatile NvU32 *pVal, NvU32 newVal, NvU32 oldVal); + +/** + * @brief Atomic increments of a signed 32b integer. + * + * Adds one to the memory location pointed to by the parameter and returns the + * resulting value. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * ++(*pVal); + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + * + */ +PORT_ATOMIC_INLINE NvS32 portAtomicIncrementS32(volatile NvS32 *pVal); +/// @brief Unsigned version of @ref portAtomicIncrementS32 +PORT_ATOMIC_INLINE NvU32 portAtomicIncrementU32(volatile NvU32 *pVal); + +/** + * @brief Atomic decrements of a signed 32b integer. + * + * Subtracts one to the memory location pointed to by the parameter and returns + * the resulting value. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * --(*pVal); + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicDecrementS32(volatile NvS32 *pVal); +/// @brief Unsigned version of @ref portAtomicDecrementS32 +PORT_ATOMIC_INLINE NvU32 portAtomicDecrementU32(volatile NvU32 *pVal); + + +/** + * @brief Atomic bitwise XOR on a signed 32b integer. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal ^= val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicXorS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicXorS32 +PORT_ATOMIC_INLINE NvU32 portAtomicXorU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic bitwise OR on a signed 32b integer. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal |= val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicOrS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicOrS32 +PORT_ATOMIC_INLINE NvU32 portAtomicOrU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic bitwise AND on a signed 32b integer. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal &= val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicAndS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicAndS32 +PORT_ATOMIC_INLINE NvU32 portAtomicAndU32(volatile NvU32 *pVal, NvU32 val); + + + +/** + * @name Memory Barrier functions + * @note Memory fence functions must be marked inline, so the compiler knows + * about the barrier and doesn't reorder instructions around the call. Thus, + * this is PORT_INLINE and not PORT_ATOMIC_INLINE. (Force inline not necessary) + * + * @note A given platform is allowed to implement the load/store barriers as + * full barriers instead, if the former isn't supported. Thus, you should only + * use @ref portAtomicMemoryFenceLoad and @ref portAtomicMemoryFenceStore for + * possible performance bonus over @ref portAtomicMemoryFenceFull. Don't write + * code that relies on those being load/store only barriers. + * + * @{ + */ + +/** + * @brief Creates a full HW and compiler memory barrier. + * + * A memory fence (memory barrier) imposes a sequential ordering on access to + * all globally accessible variables. That means that all accesses found before + * the fence will finish before any of those after it. + */ +PORT_INLINE void portAtomicMemoryFenceFull(void); +/** + * @brief Creates a HW and compiler load memory barrier. + * + * A load memory fence (memory barrier) imposes a sequential ordering on all + * loads to globally accessible variables. All loads found before the barrier + * will happen before any loads found after it. A load barrier has no effect on + * store operations. + */ +PORT_INLINE void portAtomicMemoryFenceLoad(void); +/** + * @brief Creates a HW and compiler store memory barrier. + * + * A store memory fence (memory barrier) imposes a sequential ordering on all + * stores to globally accessible variables. All stores found before the barrier + * will happen before any stores found after it. A store barrier has no effect + * on load operations. + */ +PORT_INLINE void portAtomicMemoryFenceStore(void); +/// @} End memory barrier functions + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +#ifndef PORT_ATOMIC_64_BIT_SUPPORTED +/// @note We support 64bit atomics on all 64bit systems (and some 32bit) +#define PORT_ATOMIC_64_BIT_SUPPORTED NVCPU_IS_64_BITS +#endif + +#define portAtomicExAddS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExSubS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExSetS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExCompareAndSwapS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExIncrementS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExDecrementS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExXorS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExOrS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExAndS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED + +#if PORT_ATOMIC_64_BIT_SUPPORTED +/** + * @brief Like @ref portAtomicAddS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExAddS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExAddS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExAddU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicSubS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExSubS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExSubS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExSubU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicSetS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE void portAtomicExSetS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExSetS64 +PORT_ATOMIC64_INLINE void portAtomicExSetU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicCompareAndSwapS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvBool portAtomicExCompareAndSwapS64(volatile NvS64 *pVal, NvS64 newVal, NvS64 oldVal); +/// @brief Unsigned version of @ref portAtomicExCompareAndSwapS64 +PORT_ATOMIC64_INLINE NvBool portAtomicExCompareAndSwapU64(volatile NvU64 *pVal, NvU64 newVal, NvU64 oldVal); +/** + * @brief Like @ref portAtomicIncrementS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExIncrementS64(volatile NvS64 *pVal); +/// @brief Unsigned version of @ref portAtomicExIncrementS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExIncrementU64(volatile NvU64 *pVal); +/** + * @brief Like @ref portAtomicDecrementS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExDecrementS64(volatile NvS64 *pVal); +/// @brief Unsigned version of @ref portAtomicExDecrementS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExDecrementU64(volatile NvU64 *pVal); +/** + * @brief Like @ref portAtomicXorS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExXorS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExXorS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExXorU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicOrS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExOrS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExOrS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExOrU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicAndS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExAndS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExAndS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExAndU64(volatile NvU64 *pVal, NvU64 val); + +#endif // PORT_ATOMIC_64_BIT_SUPPORTED + +/// @} End extended functions + +/** + * Platform-specific inline implementations + */ +#if NVOS_IS_LIBOS +#include "nvport/inline/atomic_libos.h" +#endif + +#if PORT_COMPILER_IS_GCC +#include "nvport/inline/atomic_gcc.h" +#elif PORT_COMPILER_IS_CLANG +#include "nvport/inline/atomic_clang.h" +#elif PORT_COMPILER_IS_MSVC +#include "nvport/inline/atomic_msvc.h" +#endif + + +/** + * @name Utility Functions + * + * These are utility functions for performing operations on pointer sized + * operands. While the 64bit functions are "extended", they should always be + * present on systems where pointers and NvLength are 64 bits. + * @{ + */ +#if !NVCPU_IS_64_BITS +#define portAtomicAddSize(a,b) (NvSPtr)portAtomicAddS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicSubSize(a,b) (NvSPtr)portAtomicSubS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicSetSize(a,b) portAtomicSetS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicCompareAndSwapSize(a,b,c) portAtomicCompareAndSwapS32((volatile NvSPtr *)a, (NvSPtr)b, (NvSPtr)c) +#define portAtomicIncrementSize(a) (NvSPtr)portAtomicIncrementS32((volatile NvSPtr *)a) +#define portAtomicDecrementSize(a) (NvSPtr)portAtomicDecrementS32((volatile NvSPtr *)a) +#define portAtomicXorSize(a,b) (NvSPtr)portAtomicXorS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicOrSize(a,b) (NvSPtr)portAtomicOrS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicAndSize(a,b) (NvSPtr)portAtomicAndS32((volatile NvSPtr *)a, (NvSPtr)b) +#else +#define portAtomicAddSize(a,b) (NvSPtr)portAtomicExAddS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicSubSize(a,b) (NvSPtr)portAtomicExSubS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicSetSize(a,b) portAtomicExSetS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicCompareAndSwapSize(a,b,c) portAtomicExCompareAndSwapS64((volatile NvSPtr *)a, (NvSPtr)b, (NvSPtr)c) +#define portAtomicIncrementSize(a) (NvSPtr)portAtomicExIncrementS64((volatile NvSPtr *)a) +#define portAtomicDecrementSize(a) (NvSPtr)portAtomicExDecrementS64((volatile NvSPtr *)a) +#define portAtomicXorSize(a,b) (NvSPtr)portAtomicExXorS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicOrSize(a,b) (NvSPtr)portAtomicExOrS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicAndSize(a,b) (NvSPtr)portAtomicExAndS64((volatile NvSPtr *)a, (NvSPtr)b) +#endif +/// @} + +#endif // _NVPORT_ATOMIC_H_ +/// @} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/core.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/core.h new file mode 100644 index 0000000..26f8ec1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/core.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVPORT_CORE_H_ +#define _NVPORT_CORE_H_ + +/** + * @defgroup NVPORT_CORE Core Functions + * + * @brief These are core NvPort functions present in all configurations. + * @{ + */ +/** + * @brief Global initialization + * + * Must be called once and only once before any NvPort functions can be called + * + * If this function returns an error then calling any NvPort function will result + * in undefined behavior. + * + * + * @return NV_OK if successful, error otherwise + */ +NV_STATUS portInitialize(void); + +/** + * @brief Global shutdown + * + * Must be called once and only once when a driver is shutting down and no more + * NvPort functions will be called. + * + */ +void portShutdown(void); + +/** + * @brief Returns if NvPort is initialized + * + * This function can be called at any time. It returns if @ref portInitialize + * has been called (and @ref portShutdown has not). + */ +NvBool portIsInitialized(void); + +/// @} + +#endif // _NVPORT_CORE_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/cpu.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/cpu.h new file mode 100644 index 0000000..ac95ec0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/cpu.h @@ -0,0 +1,637 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CPU module public interface. + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_CPU_H_ +#define _NVPORT_CPU_H_ +/** + * @defgroup NVPORT_CPU CPU specifice operations. + * + * @brief This module contains cpu specific operations. + * + * @{ + */ +/** + * @brief Initialize global CPU module state. This function is called by + * @ref portInitialize. + */ +void portCpuInitialize(void); + +/** + * @brief Clean up global CPU module state. This function is called by + * @ref portShutdown + */ +void portCpuShutdown(void); + +/** + * @name Core Functions + * @{ + */ +/** + * @brief Read requested MSR + * + * @param [in] address Address of the MSR + * @param [out] *pValue Value of MSR + * + * @return NV_OK If successful. + */ +NV_STATUS portReadMsr(NvU32 address, NvU64 *pValue); + +/** + * @brief Write value to requested MSR + * + * @param [in] address Address of the MSR + * @param [in] value Value to be written + * + * @return NV_OK If successful. + */ +NV_STATUS portWriteMsr(NvU32 address, NvU64 value); + + /// @} End Core functions + + /** + * @name Extended Functions + * @{ + */ + /** + * @name Intel X86 Structures, unions and enums. + * @{ + */ + +/** +* @brief Structure representing Intel Processor's general +* features broken down into individual bit fields. +*/ +typedef struct PORT_CPU_INTEL_X86_FEATURES +{ + /// @{ + NvU32 SSE3 : 1; /**< Streaming SIMD Extensions 3.*/ + NvU32 PCLMULQDQ : 1; /**< PCLMULQDQ instruction.*/ + NvU32 DTES64 : 1; /**< 64-bit DS Area.*/ + NvU32 MONITOR : 1; /**< MONITOR/MWAIT.*/ + NvU32 DSCPL : 1; /**< CPL Qualified Debug Store.*/ + NvU32 VMX : 1; /**< Virtual Machine Extensions.*/ + NvU32 SMX : 1; /**< Safer Mode Extensions.*/ + NvU32 EIST : 1; /**< Enhanced Intel SpeedStep(R) technology*/ + NvU32 TM2 : 1; /**< Thermal Monitor 2.*/ + NvU32 SSSE3 : 1; /**< Supplemental Streaming SIMD Extensions 3*/ + NvU32 CNXTID : 1; /**< L1 Context ID*/ + NvU32 SDBG : 1; /**< IA32_DEBUG_INTERFACE MSR for silicon debug.*/ + NvU32 FMA : 1; /**< FMA extensions using YMM state.*/ + NvU32 CMPXCHG16B : 1; /**< CMPXCHG8B/CMPXCHG16B Compare and Exchange Bytes*/ + NvU32 xTPRUpdateControl : 1; /** supports changing + IA32_MISC_ENABLE[bit 23].*/ + NvU32 PDCM : 1; /**< Perfmon and Debug Capability: supports the performance + and debug feature indication MSR IA32_PERF_CAPABILITIES.*/ + NvU32 PCID : 1; /**< Process-context identifiers: Supports PCIDs and that + software may set CR4.PCIDE to 1.*/ + NvU32 DCA : 1; /**< Supports the ability to prefetch data from a memory mapped + device.*/ + NvU32 SSE41 : 1; /**< Supports SSE4.1.*/ + NvU32 SSE42 : 1; /**< Supports SSE4.2.*/ + NvU32 x2APIC : 1; /**< Support x2APIC.*/ + NvU32 MOVBE : 1; /**< Supports MOVBE instruction.*/ + NvU32 POPCNT : 1; /**< Supports the POPCNT instruction.*/ + NvU32 TSCDeadline : 1; /**< The processor's local APIC timer supports + one-shot operation using a TSC deadline value.*/ + NvU32 AES : 1; /**< Supports the AESNI instruction extensions.*/ + NvU32 XSAVE : 1; /**< Supports the XSAVE/XRSTOR processor extended states + feature, the XSETBV/XGETBV instructions, and XCR0.*/ + NvU32 OSXSAVE : 1; /**< the OS has set CR4.OSXSAVE[bit 18] to enable + XSETBV/XGETBV instructions to access XCR0 and to support + processor extended state management using + XSAVE/XRSTOR.*/ + NvU32 AVX : 1; /**< The processor supports the AVX instruction + extensions.*/ + NvU32 F16C : 1; /**< processor supports 16-bit floating-point conversion + instructions.*/ + NvU32 RDEND : 1; /**< Processor supports RDRAND instruction.*/ + NvU32 FPU : 1; /**< Floating Point Unit On-Chip.*/ + NvU32 VME : 1; /**< Virtual 8086 Mode Enhancements.*/ + NvU32 DE : 1; /**< Debugging Extensions.*/ + NvU32 PSE : 1; /**< Page Size Extension.*/ + NvU32 TSC : 1; /**< Time Stamp Counter.*/ + NvU32 MSR : 1; /**< Model Specific Registers RDMSR and WRMSR + Instructions.*/ + NvU32 PAE : 1; /**< Physical Address Extension.*/ + NvU32 MCE : 1; /**< Machine Check Exception.*/ + NvU32 CX8 : 1; /**< CMPXCHG8B Instruction.*/ + NvU32 APIC : 1; /**< APIC On-Chip.*/ + NvU32 SEP : 1; /**< SYSENTER and SYSEXIT Instructions.*/ + NvU32 MTRR : 1; /**< Memory Type Range Registers.*/ + NvU32 PGE : 1; /**< Page Global Bit*/ + NvU32 MCA : 1; /**< Machine Check Architecture.*/ + NvU32 CMOV : 1; /**< Conditional Move Instructions.*/ + NvU32 PAT : 1; /**< Page Attribute Table.*/ + NvU32 PSE36 : 1; /**< 36-Bit Page Size Extension.*/ + NvU32 PSN : 1; /**< 96-Bit Processor Serial Number.*/ + NvU32 CLFSH : 1; /**< CLFLUSH Instruction.*/ + NvU32 DEBUGS : 1; /**< Debug Store.*/ + NvU32 ACPI : 1; /**< Thermal Monitor and Software Controlled Clock + Facilities.*/ + NvU32 MMX : 1; /**< Intel MMX Technology.*/ + NvU32 FXSR : 1; /**< FXSAVE and FXRSTOR Instructions.*/ + NvU32 SSE : 1; /**< SSE Extensions.*/ + NvU32 SSE2 : 1; /**< SSE2 extensions.*/ + NvU32 SELFS : 1; /**< Self Snoop.*/ + NvU32 HTT : 1; /**< Max APIC IDs reserved field is Valid.*/ + NvU32 TM : 1; /**< Thermal Monitor.*/ + NvU32 PBE : 1; /**< Pending Break Enable.*/ + /// @} +} PORT_CPU_INTEL_X86_FEATURES; + +/** + * @brief Enum representing Intel processor family information. + * + */ +typedef enum PORT_CPU_INTEL_FAMILY +{ + PORT_CPU_INTEL_FAMILY_6 = 6, + PORT_CPU_INTEL_FAMILY_7 = 7 +} PORT_CPU_INTEL_FAMILY; + +/** + * @brief Enum representing Intel family 6 processor model information. + * + */ +typedef enum PORT_CPU_INTEL_FAMILY_6_MODEL +{ + PORT_CPU_INTEL_FAMLLY_6_MODEL_SANDYBRIDGE = 42, + PORT_CPU_INTEL_FAMLLY_6_MODEL_SANDYBRIDGE_X = 45, + PORT_CPU_INTEL_FAMLLY_6_MODEL_IVYBRIDGE = 58, + PORT_CPU_INTEL_FAMLLY_6_MODEL_IVYBRIDGE_X = 62, + PORT_CPU_INTEL_FAMLLY_6_MODEL_HASWELL = 60, + PORT_CPU_INTEL_FAMLLY_6_MODEL_HASWELL_X = 63, + PORT_CPU_INTEL_FAMLLY_6_MODEL_HASWELL_ULT = 69, + PORT_CPU_INTEL_FAMLLY_6_MODEL_HASWELL_GT3 = 70, + PORT_CPU_INTEL_FAMLLY_6_MODEL_BROADWELL = 61, + PORT_CPU_INTEL_FAMLLY_6_MODEL_BROADWELL_GT3 = 71, + PORT_CPU_INTEL_FAMLLY_6_MODEL_BROADWELL_X = 79, + PORT_CPU_INTEL_FAMLLY_6_MODEL_SKYLAKE = 94, + PORT_CPU_INTEL_FAMLLY_6_MODEL_SKYLAKE_MOBILE = 78, + PORT_CPU_INTEL_FAMLLY_6_MODEL_KABYLAKE = 158, + PORT_CPU_INTEL_FAMLLY_6_MODEL_KABYLAKE_MOBILE = 142, + PORT_CPU_INTEL_FAMLLY_6_MODEL_SKYLAKE_X = 85, + PORT_CPU_INTEL_FAMLLY_6_MODEL_CANNONLAKE_MOBILE = 102, + PORT_CPU_INTEL_FAMILY_6_MODEL_COMETLAKE_MOBILE = 166, + PORT_CPU_INTEL_FAMILY_6_MODEL_COMETLAKE = 165, + PORT_CPU_INTEL_FAMILY_6_MODEL_TIGERLAKE_MOBILE = 140, + PORT_CPU_INTEL_FAMILY_6_MODEL_TIGERLAKE = 141, +} PORT_CPU_INTEL_FAMILY_6_MODEL; + +/** + * @brief Union representing Intel processor family information. + * + */ +typedef union PORT_CPU_INTEL_MODEL +{ + PORT_CPU_INTEL_FAMILY_6_MODEL family6; +} PORT_CPU_INTEL_MODEL; + +/** + * @brief Enum representing Intel processor type information. + * + */ +typedef enum PORT_CPU_INTEL_PROCESSOR_TYPE +{ + PORT_CPU_INTEL_PROCESSOR_TYPE_ORIGINAL_OEM = 0, + PORT_CPU_INTEL_PROCESSOR_TYPE_OVERDRIVE = 1, + PORT_CPU_INTEL_PROCESSOR_TYPE_DUAL_PROCESSOR = 2, + PORT_CPU_INTEL_PROCESSOR_TYPE_RESERVED = 3 +} PORT_CPU_INTEL_PROCESSOR_TYPE; + +/** + * @brief Structure representing Intel Processor's Threamal & Power Management + * features broken down into individual bit fields. + */ +typedef struct PORT_CPU_INTEL_TPM_FEATURES +{ + /// @{ + NvU32 DTS : 1; /**< Digital Temperature Sensor is supported if set.*/ + NvU32 IntelTurboBoost : 1; /**< Intel Turbo Boost Technology available.*/ + NvU32 ARAT : 1; /**< APIC-Timer-always-running feature is supported + if set.*/ + NvU32 PLN : 1; /**< Power limit notification controls are supported + if set.*/ + NvU32 ECMD : 1; /**< Clock modulation duty cycle extension is supported + if set.*/ + NvU32 PTM : 1; /**< Package thermal management is supported if set.*/ + NvU32 HWP : 1; /**< HWP base registers (IA32_PM_ENABLE[bit 0], + IA32_HWP_CAPABILITIES, IA32_HWP_REQUEST, IA32_HWP_STATUS) + are supported if set.*/ + NvU32 HWPNotification : 1; /**< IA32_HWP_INTERRUPT MSR is supported + if set.*/ + NvU32 HWPActivityWindow : 1; /**< IA32_HWP_REQUEST[bits 41:32] is + supported if set.*/ + NvU32 HWPEPP : 1; /**< HWP_Energy_Performance_Preference. + IA32_HWP_REQUEST[bits 31:24] is supported if set.*/ + NvU32 HWPPLR : 1; /**< HWP_Package_Level_Request. IA32_HWP_REQUEST_PKG MSR + is supported if set.*/ + NvU32 HDC : 1; /**< HDC base registers IA32_PKG_HDC_CTL, IA32_PM_CTL1, + IA32_THREAD_STALL MSRs are supported if set.*/ + NvU32 IntelTurboBoostMaxTech30 : 1; /**< Intel(R) Turbo Boost Max Technology + 3.0 available.*/ + NvU32 HWPCapabilities : 1; /**< Highest Performance change is supported + if set.*/ + NvU32 HWPPECI : 1; /**< HWP PECI override is supported if set.*/ + NvU32 FLEXHWP : 1; /**< Flexible HWP is supported if set.*/ + NvU32 FAM : 1; /**< Fast access mode for the IA32_HWP_REQUEST MSR is + supported if set.*/ + NvU32 ILPHWPRequest : 1; /**< Ignoring Idle Logical Processor HWP request + is supported if set.*/ + NvU32 NoOfInterruptThresholdsInDTS : 4; /**< Number of Interrupt Thresholds + in Digital Thermal Sensor.*/ + NvU32 HCFC : 1; /**< Hardware Coordination Feedback Capability + (Presence of IA32_MPERF and IA32_APERF). The capability to + provide a measure of delivered processor performance + (since last reset of the counters), as a percentage of the + expected processor performance when running at the TSC + frequency.*/ + NvU32 PEBP : 1; /**< The processor supports performance-energy bias + preference if CPUID.06H:ECX.SETBH[bit 3] is set and it also + implies the presence of a new architectural MSR called + IA32_ENERGY_PERF_BIAS (1B0H).*/ + /// @} +} PORT_CPU_INTEL_TPM_FEATURES; + +/** + * @brief Structure representing Intel Processor's Architecture Performance + * monitering features broken down into individual bit fields. + */ +typedef struct PORT_CPU_INTEL_ARCH_PERF_MONITOR +{ + /// @{ + NvU32 versionId; /**< Version ID of architectural performance monitoring.*/ + NvU32 noOfGPPerfMonitoringCounters; /**< Number of general-purpose + performance monitoring counter per + logical processor.*/ + NvU32 bitWidthOfGPCounters; /** Bit width of general-purpose, performance + monitoring counter.*/ + NvU32 coreCycleEvent : 1; /**< Core cycle event available if 1.*/ + NvU32 instructionRetiredEvent : 1; /**< Instruction retired event + available if 1.*/ + NvU32 referenceCycelEvent : 1; /**< Reference cycles event available if 1.*/ + NvU32 lastLevelCacheRefEvent : 1; /**< Last-level cache reference event + available if 1.*/ + NvU32 lastLevelCacheMissEvent : 1; /**< Last-level cache misses event not + available if 1.*/ + NvU32 branchInstructionRetiredEvent : 1; /**< Branch instruction retired + event not available if 1.*/ + NvU32 branchMispredictRetiredEvent : 1; /**< Branch mispredict retired event + not available if 1.*/ + NvU32 noOfFixedFuncPerfCounters; /**< Number of fixed-function performance + counters (if Version ID > 1).*/ + NvU32 bitWidthOfFixedFuncPerfCounters; /**< Bit width of fixed-function + performance counters + (if Version ID > 1).*/ + /// @} +} PORT_CPU_INTEL_ARCH_PERF_MONITOR; + +/** + * @brief Structure representing Intel Processor version and features + * broken down into individual fields. + */ +typedef struct PORT_CPU_INTEL +{ + /// @{ + PORT_CPU_INTEL_FAMILY family; /**< Family of the Processor.*/ + PORT_CPU_INTEL_MODEL model; /**< Model of the Processor.*/ + PORT_CPU_INTEL_PROCESSOR_TYPE processorType; /**< Processor Type.*/ + NvU8 steppingId; /**< Stepping ID of the Processor.*/ + NvU8 brandIndex; /**< Numerical Index of Brand String Index Table + entry.*/ + NvU8 localApicId; /** Local APIC ID of the Processor.*/ + NvU32 threadCountPerCore; /**< Threads Per Core.*/ + NvU32 physicalCoreCount; /**< Physical Cores Per Package.*/ + NvU32 logicalCoreCount; /**< Logical Cores Per Package.*/ + PORT_CPU_INTEL_X86_FEATURES features; /**< General Features.*/ + PORT_CPU_INTEL_TPM_FEATURES tpmFeatures; /**< Thermal and Power Management + Features.*/ + PORT_CPU_INTEL_ARCH_PERF_MONITOR archPerfMonitor; /**< Architecture + Performance Monitoring + Features.*/ + /// @} +} PORT_CPU_INTEL; + +/// @} + +/** + * @name AMD X86 Structures, unions and enums. + * @{ + */ + +/** + * @brief Enum representing AMD processor family information. + * + */ +typedef enum PORT_CPU_AMD_FAMILY +{ + PORT_CPU_AMD_FAMILY_0 = 0, + PORT_CPU_AMD_FAMILY_1 = 1, + PORT_CPU_AMD_FAMILY_ZEN3 = 25 +} PORT_CPU_AMD_FAMILY; + +/** + * @brief Enum representing AMD processor family 0 model information. + * + */ +typedef enum PORT_CPU_AMD_FAMILY_0_MODEL +{ + PORT_CPU_AMD_FAMLLY_0_MODEL_X = 0, +} PORT_CPU_AMD_FAMILY_0_MODEL; + +/** + * @brief Union representing AMD processor family wise model information. + * + */ +typedef union PORT_CPU_AMD_MODEL +{ + PORT_CPU_AMD_FAMILY_0_MODEL family0; +} PORT_CPU_AMD_MODEL; + +/** + * @brief Structure representing AMD Processor's Threamal & Power Management + * features broken down into individual bit fields. + */ +typedef struct PORT_CPU_AMD_TPM_FEATURES +{ + /// @{ + NvU32 EffFreq : 1; /**< */ + /// @} +} PORT_CPU_AMD_TPM_FEATURES; + +/** + * @brief Structure representing AMD Processor version and features + * broken down into individual fields. + */ +typedef struct PORT_CPU_AMD +{ + /// @{ + PORT_CPU_AMD_FAMILY family; /**< Family of the Processor.*/ + PORT_CPU_AMD_MODEL model; /**< Model of the Processor.*/ + NvU8 steppingId; /**< Stepping ID of the Processor.*/ + NvU8 brandIndex; /**< Numerical Index of Brand String Index Table + entry.*/ + NvU8 localApicId; /** Local APIC ID of the Processor.*/ + NvU32 threadCountPerCore; /**< Threads Per Core.*/ + NvU32 physicalCoreCount; /**< Physical Cores Per Package.*/ + NvU32 logicalCoreCount; /**< Logical Cores Per Package.*/ + PORT_CPU_AMD_TPM_FEATURES tpmFeatures; /**< Thermal and Power Management + Features.*/ + /// @} +} PORT_CPU_AMD; + +/// @} + +/** + * @name Generic CPU Information Structures, unions and enums. + * @{ + */ + +/** +*@brief Maximum length of Vendor ID Null terminated string. +*/ +#define PORT_CPU_VENDOR_ID_LENGTH 20 + +/** +*@brief Enum represening the Processor Architecture Type. +*/ +typedef enum PORT_CPU_TYPE +{ + /// @{ + PORT_CPU_TYPE_INTEL_X86 = 0, /**< Intel X86/X86-64 Architecture.*/ + PORT_CPU_TYPE_AMD_X86 = 1, /**< AMD X86/AMD64 Architecture.*/ + PORT_CPU_TYPE_ARM = 2 /**< ARM Architecture.*/ + /// @} +} PORT_CPU_TYPE; + +/** +*@brief Union represening the Abstract Processor data structure. +*/ +typedef union PORT_CPU +{ + PORT_CPU_AMD amd; + PORT_CPU_INTEL intel; +} PORT_CPU; + +/** + * @brief Structure representing processor information broken down into + * individual fields. + */ +typedef struct PORT_CPU_INFORMATION +{ + /// @{ + PORT_CPU_TYPE type; /**< Type of Architecture/CPU.*/ + char vendorId[PORT_CPU_VENDOR_ID_LENGTH]; /**< Null terminated Vendor Id + String.*/ + NvLength vendorIdLength; /**< Actual length of the null terminated Vendor + Id string.*/ + PORT_CPU cpu; /**< CPU specifice information.*/ + /// @} +} PORT_CPU_INFORMATION; + +/** + * @brief Structure representing processor logical topology information broken + * down into individual fields. + */ +typedef struct PORT_CPU_LOGICAL_TOPOLOGY +{ + /// @{ + NvU64 activeCpuCount; /**< Active Logical CPUs.*/ + NvU64 activeGroupCount; /**< Active Logical CPU Group count.*/ + NvU64 maxCpuCount; /**< Maximum Logical CPUs system can support*/ + NvU64 maxGroupCount; /**< Maximum Logical CPUs Groups system can support*/ + NvU64 maxCpuPerGroup; /**< Maximum Logical CPUs system can support per group*/ + /// @} +} PORT_CPU_LOGICAL_TOPOLOGY; + +/** + * @brief Structure representing a BAR descriptor for a PCIe device + */ +typedef struct PORT_CPU_BAR_DESC +{ + /// @{ + void *pBarAddr; /**< Starting virtual address of the BAR space */ + NvU64 physAddr; /**< Starting physical address of the BAR space */ + NvU32 barSize; /**< Size of BAR space */ + /// @} +} PORT_CPU_BAR_DESC; + +/// @} End Generic CPU Information Structures, unions and enums. + +/** + * @brief Get Logical Topology of CPU. + * @param[out] pCpuTopology PORT_CPU_LOGICAL_TOPOLOGY pointer. + * @return NV_OK If successful and cpu logical topology information + * in pCpuInfo structure. + */ +NV_STATUS portCpuGetLogicalTopology(PORT_CPU_LOGICAL_TOPOLOGY *pCpuTopology); +#define portCpuGetLogicalTopology_SUPPORTED (NVOS_IS_WINDOWS) + +/** + * @brief Get CPU Logical Topology Information. + * @param[out] pCpuInfo PORT_CPU_INFORMATION pointer. + * @return NV_OK If successful and CPU Information in pCpuInfo structure. + */ +NV_STATUS portCpuGetInfo(PORT_CPU_INFORMATION* pCpuInfo); +#define portCpuGetInfo_SUPPORTED (_X86_ || _AMD64_) + +/** + * @brief Get CPU information using CPUID Instruction (X86-64 Specifice) + * @param[out] pCpuInfo Pointer to array which return value + * cpuInfo[0] = EAX, + * cpuInfo[1] = EBX, + * cpuInfo[2] = ECX, + * cpuInfo[3] = EDX. + * @param[in] functionId Function Id of CPUID instruction to execute. + * @param[in] subfunctionId Sub-Function Id of CPUID instruction to execute. + * subfunctionId enables you to gather additional information about + * the processor + + * @return NV_OK if successful, otherwise return errors. + */ +NV_STATUS portCpuExCpuId(NvU32* pCpuInfo, NvU32 functionId, + NvU32 subfunctionId); +#define portCpuExCpuId_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) +/// @} End extended functions + +/** + * @brief Retrieve the current value and frequency of the performance counter + * + * @param[out] pFreq A pointer to a variable to which this routine writes the + * performance counter frequency, in ticks per second. + * This parameter is optional and can be NULL if the caller + * does not need the counter frequency value. + * + * @return The performance counter value in units of ticks + */ +NvS64 portCpuExQueryPerformanceCounter(NvS64 *pFreq); +#define portCpuExQueryPerformanceCounter_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Enable PMC read feature + */ +void portCpuExEnablePmc(void); +#define portCpuExEnablePmc_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Read requested PMC register + * + * @param [in] address Address of the PMC register + * @param [out] *pValue Value of PMC register + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExReadPmc(NvU32 address, NvU64 *pValue); +#define portCpuExReadPmc_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Fill in BAR descriptor of Integrated memory controller + * + * @param [in] pImcBarDesc Pointer to BAR descriptor structure + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExAllocImcBarDesc(PORT_CPU_BAR_DESC *pImcBarDesc); +#define portCpuExAllocImcBarDesc_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Free BAR descriptor of Integrated memory controller + * + * @param [in] pImcBarDesc Pointer to BAR descriptor structure + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExFreeImcBarDesc(PORT_CPU_BAR_DESC *pImcBarDesc); +#define portCpuExFreeImcBarDesc_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Reset Performance monitoring counters + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExResetPmu(void); +#define portCpuExResetPmu_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Program Performance monitoring counters + * + * @param [in] numValidEvents Number of valid events in array pPerfEvents + * @param [in] pPerfEvents Array of events to be configured into general + * purpose performance monitoring counters(PMCs) + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExProgramPmu(NvU32 numValidEvents, NvU32 *pPerfEvents); +#define portCpuExProgramPmu_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Get number of DRAM reads in terms of bytes + * + * @param [out] pNumReads + * + * @return NV_OK If successful + */ +NV_STATUS portCpuExGetDRamReads(NvU64 *pNumReads); +#define portCpuExGetDRamReads_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Get number of DRAM writes in terms of bytes + * + * @param [out] pNumWrites + * + * @return NV_OK If successful + */ +NV_STATUS portCpuExGetDRamWrites(NvU64 *pNumWrites); +#define portCpuExGetDRamWrites_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Check if the given MSR is supported on the current processor + * + * @param [in] address Address of the MSR that needs to be checked + * + * @return NV_TRUE If MSR is supported + * NV_FALSE If MSR is not supported + */ +NvBool portCpuExIsMsrSupported(NvU32 address); +#define portCpuExIsMsrSupported_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Check if the current processor supports DRAM read/write request counting + * + * @return NV_TRUE If supported + * NV_FALSE If not supported + */ +NvBool portCpuExIsDramRwCountingSupported(void); +#define portCpuExIsDramRwCountingSupported_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +#endif // _NVPORT_CPU_H_ +/// @} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/crypto.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/crypto.h new file mode 100644 index 0000000..ab320ce --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/crypto.h @@ -0,0 +1,346 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Crypto module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_CRYPTO_H_ +#define _NVPORT_CRYPTO_H_ +/** + * @defgroup NVPORT_CRYPTO Cryptography operations + * + * @brief This module contains cryptographic and PRNG functions + * + * + * A note on terminology: + * + * Pseudorandom numbers are deterministic and reproducible. When given the same + * seed, they will always give the same sequence, across all platforms. They are + * not suitable for cryptography or any security sensitive operations. + * + * True random numbers are generated from hardware, and as such are completely + * nondeterministic. There is no support for setting a seed, and you can expect + * the output to always be different. Unlike pseudorandom numbers, true random + * output will always vary across different platforms. + * + * These numbers are suitable for security sensitive and cryptography operations. + * + * In case of kernelmode code, the entropy pool will contain bits that are not + * available to usermode clients. As a consequence, a usermode client cannot + * deplete the entropy pool to lower the security + * + * @note Unless ending with the "-Blocking" suffix, all functions are + * non-blocking. With regards to True Random numbers, this has a consequence + * that if there are insufficient bits in the entropy pool, they will be used + * to seed a custom PRNG which will provide the final output. A blocking + * version of some functions may be available as an extended function. + * + * @note As a general rule, you should always use the non-blocking version of a + * function, unless ALL the following conditions are satisfied: + * - First time booting a clean OS + * - No connection to the network + * - The GPU is not booted yet + * - Dealing with a remote machine (i.e. no direct mouse/keyboard input) + * - No HW random support (older CPUs) + * + * For additional information, see these links: + * - http://www.2uo.de/myths-about-urandom/ + * - https://bugs.ruby-lang.org/issues/9569 + * - http://security.stackexchange.com/questions/3936/is-a-rand-from-dev-urandom-secure-for-a-login-key + * + * @{ + */ + + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Initializes global CRYPTO module state + * + * This function is called by @ref portInitialize. It is available here in case + * it is needed to initialize the CRYPTO module without initializing all the + * others. e.g. for unit tests. + * + */ +void portCryptoInitialize(void); +/** + * @brief Destroys global CRYPTO module state + * + * This function is called by @ref portShutdown. It is available here in case + * it is needed to initialize the CRYPTO module without initializing all the + * others. e.g. for unit tests. + * + */ +void portCryptoShutdown(void); + +/** + * @brief A pseudorandom number generator object + */ +typedef struct PORT_CRYPTO_PRNG PORT_CRYPTO_PRNG; + + +/** + * @brief Construct a PRNG with the given seed. + * + * @warning These objects are not Cryptographically Secure, and thus not + * appropriate for any security sensitive operations. Use "True" random instead. + * + * The same seed will always result in the same sequence returned by + * @ref portCryptoPseudoRandomGeneratorGetU32, + * @ref portCryptoPseudoRandomGeneratorGetU64 and + * @ref portCryptoPseudoRandomGeneratorFillBuffer. This behavior is consistent + * across all platforms. The following code will always print the same thing: + * ~~~{.c} + * PORT_CRYPTO_PRNG *pPrng = portCryptoPseudoRandomGeneratorCreate(0xdeadbeef); + * if (pPrng) + * { + * NvU32 n = portCryptoPseudoRandomGeneratorGetU32(pPrng); + * portDbgPrintf("%u", n); + * portCryptoPseudoRandomGeneratorDestroy(pPrng); + * } + * ~~~ + * + * @return NULL if the construction failed, a PRNG object otherwise. + * + */ +PORT_CRYPTO_PRNG *portCryptoPseudoRandomGeneratorCreate(NvU64 seed); +/** + * @brief Destroys an object created with + * @ref portCryptoPseudoRandomGeneratorCreate + * + */ +void portCryptoPseudoRandomGeneratorDestroy(PORT_CRYPTO_PRNG *pPrng); +/** + * @brief Returns a 32bit pseudorandom number from a given PRNG. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomGetU32 instead. + * + * @param [in] pPrng - Generator object. If NULL, the default one will be used. + * + */ +NvU32 portCryptoPseudoRandomGeneratorGetU32(PORT_CRYPTO_PRNG *pPrng); +/** + * @brief Returns a 64bit pseudorandom number from a given PRNG. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomGetU64 instead. + * + * @param [in] pPrng - Generator object. If NULL, the default one will be used + * + */ +NvU64 portCryptoPseudoRandomGeneratorGetU64(PORT_CRYPTO_PRNG *pPrng); +/** + * @brief Fills a user provided buffer with a pseudorandom sequence from a given + * PRNG + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomFillBuffer instead. + * + * @param [in] pPrng - Generator object. If NULL, the default one will be used + * + * @return NV_OK if successful; + * NV_ERR_INVALID_POINTER if pBuffer is NULL; + * + */ +NV_STATUS portCryptoPseudoRandomGeneratorFillBuffer(PORT_CRYPTO_PRNG *pPrng, NvU8 *pBuffer, NvLength bufSize); + +/** + * @brief Sets the PRNG seed of the global generator + * + * The same seed will always result in the same sequence returned by + * @ref portCryptoPseudoRandomGetU32, @ref portCryptoPseudoRandomGetU64 and + * @ref portCryptoPseudoRandomFillBuffer. This behavior is consistent across + * all platforms. The following code will print the same thing on all platforms: + * ~~~{.c} + * portCryptoPseudoRandomSetSeed(0xdeadbeef); + * NvU32 n = portCryptoPseudoRandomGetU32(); + * portDbgPrintf("%u", n); + * ~~~ + * + */ +void portCryptoPseudoRandomSetSeed(NvU64 seed); + +/** + * @brief Returns a 32bit pseudorandom number from global generator + * + * This is equivalent to calling @ref portCryptoPseudoRandomGeneratorGetU32 with + * a NULL generator object. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomGetU32 instead. + * + */ +NvU32 portCryptoPseudoRandomGetU32(void); +/** + * @brief Returns a 64bit pseudorandom number + * + * This is equivalent to calling @ref portCryptoPseudoRandomGeneratorGetU64 with + * a NULL generator object. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomGetU64 instead. + * + */ +NvU64 portCryptoPseudoRandomGetU64(void); +/** + * @brief Fills a user provided buffer with a pseudorandom sequence. + * + * This is equivalent to calling @ref portCryptoPseudoRandomGeneratorFillBuffer + * with a NULL generator object. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomFillBuffear instead. + * + * @return NV_OK if successful; + * NV_ERR_INVALID_POINTER if pBuffer is NULL; + * + */ +NV_STATUS portCryptoPseudoRandomFillBuffer(NvU8 *pBuffer, NvLength bufSize); + +/** + * @brief Calculate the MD5 hash of a given buffer + * + * @param [in] pInBuffer - Input data. Must not be NULL. + * @param [in] bufSize - Size of input buffer, in bytes. + * @param [out] pOutBuffer - Output buffer. Must be at least 16 bytes in length + * + * @return NV_OK if successful. + */ +NV_STATUS portCryptoHashMD5(const NvU8 *pInBuffer, NvLength bufSize, NvU8 pOutBuffer[16]); +/** + * @brief Calculate the first 24 bits of the MD5 hash of a given buffer + * + * The 24 bits are interpreted as a full hash, and are stored as big endian. So, + * if the full hash was d41d8cd98f00b204e9800998ecf8427e, the short 24bit hash + * would be 0x00d41d8c. + * + * @param [in] pInBuffer - Input data. Must not be NULL. + * @param [in] bufSize - Size of input buffer, in bytes. + * @param [out] pOut - Output location. Only the lowest 24 bits are set. + * + * @return NV_OK if successful. + */ +NV_STATUS portCryptoHashMD5Short(const NvU8 *pInBuffer, NvLength bufSize, NvU32 *pOut); +/** + * @brief Convert a binary representation of the MD5 hash to a 32digit hex string + */ +NV_STATUS portCryptoHashMD5BinaryToHexString(const NvU8 pBinary[16], char pHexStr[33]); +/** + * @brief Convert a 32 digit hex string representation of the MD5 hash to binary + */ +NV_STATUS portCryptoHashMD5HexStringToBinary(const char *pHexStr, NvU8 pBinary[16]); + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ +#if defined(NV_MODS) || PORT_IS_KERNEL_BUILD +#define PORT_CRYPTO_TRUE_RANDOM_SUPPORTED 0 +#else +#define PORT_CRYPTO_TRUE_RANDOM_SUPPORTED 1 +#endif +/** + * @brief Returns a 32bit random number + * + * @note This function does not block, but rather combines the bits from the + * entropy pool with a PRNG to produce a random output of desired width. + * This is considered safe for most cryptographic applications. You can use + * @ref portCryptoExTrueRandomGetU32Blocking for a guaranteed high entropy output. + */ +NvU32 portCryptoExTrueRandomGetU32(void); +#define portCryptoExTrueRandomGetU32_SUPPORTED 0 +/** + * @brief Returns a 64bit random number + * + * @note This function does not block, but rather combines the bits from the + * entropy pool with a PRNG to produce a random output of desired width. + * This is considered safe for most cryptographic applications. You can use + * @ref portCryptoExTrueRandomGetU64Blocking for a guaranteed high entropy output. + */ +NvU64 portCryptoExTrueRandomGetU64(void); +#define portCryptoExTrueRandomGetU64_SUPPORTED 0 +/** + * @brief Fills a user provided buffer with a random sequence. + * + * @note This function does not block, but rather combines the bits from the + * entropy pool with a PRNG to produce a random output of desired width. This is + * considered safe for most cryptographic applications. You can use + * @ref portCryptoExTrueRandomFillBufferBlocking for a guaranteed high entropy + * output. + * + * @return NV_OK if successful; + * NV_ERR_INVALID_POINTER if pBuffer is NULL; + */ +NV_STATUS portCryptoExTrueRandomFillBuffer(NvU8 *pBuffer, NvLength bufSize); +#define portCryptoExTrueRandomFillBuffer_SUPPORTED 0 + +#define PORT_CRYPTO_TRUE_RANDOM_BLOCKING_SUPPORTED (!PORT_IS_KERNEL_BUILD && !NVOS_IS_WINDOWS) + +/** + * @brief Returns a 32bit random number, possibly blocking the thread. + * + * If there is not enough entropy bits available, the function will block until + * available. Use @ref portCryptoExTrueRandomGetU32 unless you really need the + * entire result to exclusively made of true random bits. + */ +NvU32 portCryptoExTrueRandomGetU32Blocking(void); +#define portCryptoExTrueRandomGetU32Blocking_SUPPORTED 0 +/** + * @brief Returns a 64bit random number, possibly blocking the thread. + * + * If there is not enough entropy bits available, the function will block until + * available. Use @ref portCryptoExTrueRandomGetU64 unless you really need the + * entire result to exclusively made of true random bits. + */ +NvU64 portCryptoExTrueRandomGetU64Blocking(void); +#define portCryptoExTrueRandomGetU64Blocking_SUPPORTED 0 + +/** + * @brief Fills a user provided buffer with a random sequence, + * possibly blocking the thread. + * + * If there is not enough entropy bits available, the function will block until + * available. Use @ref portCryptoExTrueRandomFillBuffer unless you really need the + * entire result to exclusively made of true random bits. + */ +NV_STATUS portCryptoExTrueRandomFillBufferBlocking(NvU8 *pBuffer, NvLength bufSize); +#define portCryptoExTrueRandomFillBufferBlocking_SUPPORTED 0 + +/// @} End extended functions + +/// @} + +#endif // _NVPORT_CRYPTO_H_ +/// @} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/debug.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/debug.h new file mode 100644 index 0000000..2240a26 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/debug.h @@ -0,0 +1,314 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/** + * @file + * @brief Debug module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_DEBUG_H_ +#define _NVPORT_DEBUG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup NVPORT_DEBUG Debug Support Routines + * @brief This module provides debug support routines like breakpoints and prints. + * @{ + */ + +/** @brief See @ref PORT_UTIL_INLINE */ +#ifndef PORT_DEBUG_INLINE +#define PORT_DEBUG_INLINE PORT_INLINE +#endif + +/** + * @name Core Functions + * @{ + * + * @note The breakpoint macro comes in several flavors: + * - @ref PORT_BREAKPOINT - + * Shouldn't be used directly + * - @ref PORT_BREAKPOINT_DEBUG - + * Causes a breakpoint in debug builds only, use for all debug purposes. + * - @ref PORT_BREAKPOINT_CHECKED - + * Causes a breakpoint in checked builds only, use when you want the + * @c int3 present in develop and release builds, such as QA builds. + * - @ref PORT_BREAKPOINT_ALWAYS - + * Always breaks, use only if you want to trigger @c int3 even on + * public release builds. + */ + + +/** + * @brief Prints a string to a platform dependent output stream + * + * This function will print the string where you would expect it for a given + * platform. In user space it will be standard out. In kernel space it will + * be the kernel debug log. + * + * Note NvPort does not provide advanced logging capabilities, only the ability + * to emit a string. For a more robust logging solution see the NvLog project. + * + */ +PORT_DEBUG_INLINE void portDbgPrintString(const char *str, NvLength length); + +/** + * @brief Convenience macro when printing a string literal. + */ +#define PORT_DBG_PRINT_STRING_LITERAL(s) portDbgPrintString(s, sizeof(s)-1) + +/** + * @def PORT_BREAKPOINT_DEBUG_ENABLED + * @brief Controls whether @ref PORT_BREAKPOINT_DEBUG is enabled or not + */ +#if !defined(PORT_BREAKPOINT_DEBUG_ENABLED) +#if defined(DEBUG) +#define PORT_BREAKPOINT_DEBUG_ENABLED 1 +#else +#define PORT_BREAKPOINT_DEBUG_ENABLED 0 +#endif +#endif + + +/** + * @def PORT_BREAKPOINT_DEBUG + * @brief Cause a breakpoint into the debugger only when + * @ref PORT_BREAKPOINT_DEBUG_ENABLED is defined. + * + * By default PORT_BREAKPOINT_DEBUG_ENABLED is set based on the value of DEBUG. + * However it is kept as a separate define so you can override separately if so + * desired. + */ +#if PORT_BREAKPOINT_DEBUG_ENABLED +#define PORT_BREAKPOINT_DEBUG PORT_BREAKPOINT +#else +#define PORT_BREAKPOINT_DEBUG() +#endif + +#define PORT_FILE_STR __FILE__ + +/// @cond NVPORT_INTERNAL +#if !defined(PORT_ASSERT_FAILED_USES_STRINGS) +#define PORT_ASSERT_FAILED_USES_STRINGS PORT_IS_CHECKED_BUILD +#endif + +#if PORT_ASSERT_FAILED_USES_STRINGS +#define _PORT_STRINGIFY2(x) #x +#define _PORT_STRINGIFY(x) _PORT_STRINGIFY2(x) +#define _PORT_ASSERT_MESSAGE(cond) "Assertion failed: \"" #cond "\" at " \ + PORT_FILE_STR ":" _PORT_STRINGIFY(__LINE__) "\n" +#else +#define _PORT_ASSERT_MESSAGE(cond) "Assertion failed" +#endif +/// @endcond + +/** + * @brief Causes a breakpoint if the condition evaluates to false. + */ +#define PORT_ASSERT(cond) \ + do \ + { \ + PORT_COVERAGE_PUSH_OFF(); \ + if (!(cond)) \ + { \ + PORT_DBG_PRINT_STRING_LITERAL(_PORT_ASSERT_MESSAGE(cond)); \ + PORT_BREAKPOINT(); \ + } \ + PORT_COVERAGE_POP(); \ + } while (0) + +/* + * Checks osDbgBreakpointEnabled and PDB_PROP_SYS_DEBUGGER_DISABLED + * to see if breakpoints are allowed + */ +NvBool nvDbgBreakpointEnabled(void); + +/** + * @def PORT_BREAKPOINT_CHECKED() + * @brief Causes a breakpoint in checked builds only + */ +/** + * @def PORT_ASSERT_CHECKED(x) + * @brief Causes an assert in checked builds only + */ +#if PORT_IS_CHECKED_BUILD + +/* + * TODO: defined(NVRM) && PORT_IS_KERNEL_BUILD && defined(NVWATCH) are all true + * when NvWatch is included in the Debug Linux AMD64 Mfg Mods build. + * This seems wrong... + */ +#if defined(NVRM) && PORT_IS_KERNEL_BUILD == 1 && !defined(NVWATCH) +#define PORT_BREAKPOINT_CHECKED() \ + do \ + { \ + if (nvDbgBreakpointEnabled()) \ + PORT_BREAKPOINT(); \ + } while (0) +#else +#define PORT_BREAKPOINT_CHECKED() PORT_BREAKPOINT() +#endif +#define PORT_ASSERT_CHECKED(x) PORT_ASSERT(x) +#else // PORT_IS_CHECKED_BUILD +#define PORT_BREAKPOINT_CHECKED() +#define PORT_ASSERT_CHECKED(x) +#endif // PORT_IS_CHECKED_BUILD + +/** + * @brief Causes a breakpoint into the debugger regardless of build configuration. + * + * Note this is equivalent to just calling @ref PORT_BREAKPOINT. It is only + * included to provide an alternative to @ref PORT_BREAKPOINT_DEBUG that is + * consistent in look and usage. + */ +#define PORT_BREAKPOINT_ALWAYS PORT_BREAKPOINT + +/** + * @def PORT_COVERAGE_PUSH_OFF() + * @brief Saves the current coverage tracking state to a stack and disables it + * + * This is useful to do around some error checking code (e.g. "default:") so the + * bullseye tool doesn't take those branches into account when checking code + * coverage. + * + * - See @ref PORT_ASSERT for usage example. + * - See http://www.bullseye.com/help/build-exclude.html for more details. + */ +/** + * @def PORT_COVERAGE_PUSH_ON() + * @brief Saves the current coverage tracking state to a stack and enables it + */ +/** + * @def PORT_COVERAGE_POP() + * @brief Restores the last saved coverage tracking state + * + * See @ref PORT_ASSERT for usage example. + */ +#if defined(NV_BULLSEYE) +#define PORT_COVERAGE_PUSH_OFF() "BullseyeCoverage save off" +#define PORT_COVERAGE_PUSH_ON() "BullseyeCoverage save on" +#define PORT_COVERAGE_POP() "BullseyeCoverage restore" +#else +#define PORT_COVERAGE_PUSH_OFF() +#define PORT_COVERAGE_PUSH_ON() +#define PORT_COVERAGE_POP() +#endif + + + +/// @} End core functions + +/** + * @def NVPORT_CHECK_PRINTF_ARGUMENTS(a,b) + * @brief Compile time check that arguments conform to printf rules + */ +#if PORT_COMPILER_HAS_ATTRIBUTE_FORMAT +#define NVPORT_CHECK_PRINTF_ARGUMENTS(a,b) __attribute__((format(printf, a, b))) +#else +#define NVPORT_CHECK_PRINTF_ARGUMENTS(a,b) +#endif + +/** + * @name Extended Functions + * @{ + */ + +#if !defined(portDbgPrintf_SUPPORTED) +#define portDbgPrintf_SUPPORTED 0 +#endif +#if !defined(portDbgExPrintfLevel_SUPPORTED) +#define portDbgExPrintfLevel_SUPPORTED 0 +#endif + +#if PORT_IS_FUNC_SUPPORTED(portDbgPrintf) +/** + * @brief Prints a formatted string to using @ref portDbgPrintString + * + * The parameters are like those of printf(). + */ +PORT_DEBUG_INLINE void portDbgPrintf(const char *format, ...) NVPORT_CHECK_PRINTF_ARGUMENTS(1, 2); +#endif + +#if PORT_IS_FUNC_SUPPORTED(portDbgExPrintfLevel) +/** + * @brief Similar to @ref portDbgPrintf, except that it passes the level to the + * underlying implementation. + * + * Some platforms (e.g. MODS) have an API where prints are given a level, and + * some tools may depend on certain prints being at a certain level. This + * function simply passes the level to that API- NvPort does not understand + * or filter these levels. + * + * @param level - An int representing the level at which to print. + */ +PORT_DEBUG_INLINE void portDbgExPrintfLevel(NvU32 level, const char *format, ...) NVPORT_CHECK_PRINTF_ARGUMENTS(2, 3); +#endif + +/// @} End extended functions + +// Include platform specific inline definitions + +#if NVOS_IS_QNX +#include "nvport/inline/debug_qnx.h" +#elif NVOS_IS_DCECORE +#include "nvport/inline/debug_dcecore.h" +#else + +#if PORT_IS_KERNEL_BUILD + +#if NVOS_IS_WINDOWS +#include "nvport/inline/debug_win_kernel.h" +#elif NVOS_IS_UNIX +#include "nvport/inline/debug_unix_kernel_os.h" +#elif NVOS_IS_LIBOS +#include "nvport/inline/debug_libos.h" +#else +#error "Unsupported target OS" +#endif + +#else // Usermode build + +#if NVOS_IS_WINDOWS +#include "nvport/inline/debug_win_user.h" +#elif NVOS_IS_UNIX +#include "nvport/inline/debug_unix_user.h" +#elif NVOS_IS_LIBOS +#include "nvport/inline/debug_libos.h" +#else +#error "Unsupported target OS" +#endif + +#endif // PORT_IS_KERNEL_BUILD +#endif // NV_MODS + +#ifdef __cplusplus +} +#endif //__cplusplus +#endif // _NVPORT_DEBUG_H_ +/// @} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h new file mode 100644 index 0000000..8d73e2f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h @@ -0,0 +1,472 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Atomic functions implementations using clang compiler intrinsics + */ + +#ifndef _NVPORT_ATOMIC_CLANG_H_ +#define _NVPORT_ATOMIC_CLANG_H_ + + +#if !(defined(__clang__)) +#error "Unsupported compiler: This file can only be compiled by clang" +#endif + + +PORT_INLINE void +portAtomicMemoryFenceLoad(void) +{ + __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); +} +PORT_INLINE void +portAtomicMemoryFenceStore(void) +{ + __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); +} +PORT_INLINE void +portAtomicMemoryFenceFull(void) +{ + __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); +} +PORT_INLINE void +portAtomicTimerBarrier(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("ISB" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("isync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("lfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence.i" : : : "memory"); +#else +#error "portAtomicTimerBarrier implementation not found" +#endif +} + +#if PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) && !NVOS_IS_LIBOS + +PORT_ATOMIC_INLINE NvS32 +portAtomicAddS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_add((_Atomic NvS32 *)pVal, val, __ATOMIC_SEQ_CST) + val; +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicSubS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_sub((_Atomic NvS32 *)pVal, + val, __ATOMIC_SEQ_CST) - val; +} + +PORT_ATOMIC_INLINE void +portAtomicSetS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + __c11_atomic_store((_Atomic NvS32 *)pVal, val, __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC_INLINE NvBool +portAtomicCompareAndSwapS32 +( + volatile NvS32 *pVal, + NvS32 newVal, + NvS32 oldVal +) +{ + NvS32 tmp = oldVal; // Needed so the compiler can still inline this function + return __c11_atomic_compare_exchange_strong((_Atomic NvS32 *)pVal, + &tmp, + newVal, + __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicIncrementS32 +( + volatile NvS32 *pVal +) +{ + return portAtomicAddS32(pVal, 1); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicDecrementS32 +( + volatile NvS32 *pVal +) +{ + return portAtomicSubS32(pVal, 1); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicXorS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_xor((_Atomic NvS32 *)pVal, + val, __ATOMIC_SEQ_CST) ^ val; +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicOrS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_or((_Atomic NvS32 *)pVal, + val, __ATOMIC_SEQ_CST) | val; +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicAndS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_and((_Atomic NvS32 *)pVal, + val, __ATOMIC_SEQ_CST) & val; +} + + +PORT_ATOMIC_INLINE NvU32 +portAtomicAddU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_add((_Atomic NvU32 *)pVal, val, __ATOMIC_SEQ_CST) + val; +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicSubU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_sub((_Atomic NvU32 *)pVal, + val, __ATOMIC_SEQ_CST) - val; +} + +PORT_ATOMIC_INLINE void +portAtomicSetU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + __c11_atomic_store((_Atomic NvU32 *)pVal, val, __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC_INLINE NvBool +portAtomicCompareAndSwapU32 +( + volatile NvU32 *pVal, + NvU32 newVal, + NvU32 oldVal +) +{ + NvU32 tmp = oldVal; // Needed so the compiler can still inline this function + return __c11_atomic_compare_exchange_strong((_Atomic NvU32 *)pVal, + &tmp, + newVal, + __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicIncrementU32 +( + volatile NvU32 *pVal +) +{ + return portAtomicAddU32(pVal, 1); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicDecrementU32 +( + volatile NvU32 *pVal +) +{ + return portAtomicSubU32(pVal, 1); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicXorU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_xor((_Atomic NvU32 *)pVal, + val, __ATOMIC_SEQ_CST) ^ val; +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicOrU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_or((_Atomic NvU32 *)pVal, + val, __ATOMIC_SEQ_CST) | val; +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicAndU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_and((_Atomic NvU32 *)pVal, + val, __ATOMIC_SEQ_CST) & val; +} + + +#if NVCPU_IS_64_BITS + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExAddS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_add((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) + val; +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExSubS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_sub((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) - val; +} + +PORT_ATOMIC64_INLINE void +portAtomicExSetS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + __c11_atomic_store((_Atomic NvS64 *)pVal, val, __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC64_INLINE NvBool +portAtomicExCompareAndSwapS64 +( + volatile NvS64 *pVal, NvS64 newVal, NvS64 oldVal +) +{ + NvS64 tmp = oldVal; // Needed so the compiler can still inline this function + return __c11_atomic_compare_exchange_strong((_Atomic NvS64 *)pVal, + &tmp, + newVal, + __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExIncrementS64 +( + volatile NvS64 *pVal +) +{ + return portAtomicExAddS64(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExDecrementS64 +( + volatile NvS64 *pVal +) +{ + return portAtomicExSubS64(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExXorS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_xor((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) ^ val; +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExOrS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_or((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) | val; +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExAndS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_and((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) & val; +} + + + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExAddU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_add((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) + val; +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExSubU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_sub((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) - val; +} + +PORT_ATOMIC64_INLINE void +portAtomicExSetU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + __c11_atomic_store((_Atomic NvU64 *)pVal, val, __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC64_INLINE NvBool +portAtomicExCompareAndSwapU64 +( + volatile NvU64 *pVal, NvU64 newVal, NvU64 oldVal +) +{ + NvU64 tmp = oldVal; // Needed so the compiler can still inline this function + return __c11_atomic_compare_exchange_strong((_Atomic NvU64 *)pVal, + &tmp, + newVal, + __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExIncrementU64 +( + volatile NvU64 *pVal +) +{ + return portAtomicExAddU64(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExDecrementU64 +( + volatile NvU64 *pVal +) +{ + return portAtomicExSubU64(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExXorU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_xor((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) ^ val; +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExOrU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_or((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) | val; +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExAndU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_and((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) & val; +} + + +#endif // NVCPU_IS_64_BITS + +#endif // PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) && !NVOS_IS_LIBOS + +#endif // _NVPORT_ATOMIC_CLANG_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h new file mode 100644 index 0000000..0b00799 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h @@ -0,0 +1,460 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Atomic functions implementations using gcc compiler intrinsics + */ + +#ifndef _NVPORT_ATOMIC_GCC_H_ +#define _NVPORT_ATOMIC_GCC_H_ + + +PORT_INLINE void +portAtomicMemoryFenceStore(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("DMB ST" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("sync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("sfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence" : : : "memory"); +#else +#error "portAtomicMemoryFenceStore implementation not found" +#endif +} +PORT_INLINE void +portAtomicMemoryFenceLoad(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("DMB SY" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("sync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("lfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence" : : : "memory"); +#else +#error "portAtomicMemoryFenceLoad implementation not found" +#endif +} +PORT_INLINE void +portAtomicMemoryFenceFull(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("DMB SY" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("sync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("mfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence" : : : "memory"); +#else +#error "portAtomicMemoryFenceFull implementation not found" +#endif +} +PORT_INLINE void +portAtomicTimerBarrier(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("ISB" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("isync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("lfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence.i" : : : "memory"); +#else +#error "portAtomicTimerBarrier implementation not found" +#endif +} + +#if PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) && !NVOS_IS_LIBOS + +PORT_ATOMIC_INLINE NvS32 +portAtomicAddS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_add_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicSubS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_sub_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE void +portAtomicSetS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + while (!__sync_bool_compare_and_swap(pVal, *pVal, val)); +} + +PORT_ATOMIC_INLINE NvBool +portAtomicCompareAndSwapS32 +( + volatile NvS32 *pVal, + NvS32 newVal, + NvS32 oldVal +) +{ + return __sync_bool_compare_and_swap(pVal, oldVal, newVal); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicIncrementS32 +( + volatile NvS32 *pVal +) +{ + return __sync_add_and_fetch(pVal, 1); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicDecrementS32 +( + volatile NvS32 *pVal +) +{ + return __sync_sub_and_fetch(pVal, 1); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicXorS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_xor_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicOrS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_or_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicAndS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_and_and_fetch(pVal, val); +} + + +PORT_ATOMIC_INLINE NvU32 +portAtomicAddU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_add_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicSubU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_sub_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE void +portAtomicSetU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + while (!__sync_bool_compare_and_swap(pVal, *pVal, val)); +} + +PORT_ATOMIC_INLINE NvBool +portAtomicCompareAndSwapU32 +( + volatile NvU32 *pVal, + NvU32 newVal, + NvU32 oldVal +) +{ + return __sync_bool_compare_and_swap(pVal, oldVal, newVal); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicIncrementU32 +( + volatile NvU32 *pVal +) +{ + return __sync_add_and_fetch(pVal, 1); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicDecrementU32 +( + volatile NvU32 *pVal +) +{ + return __sync_sub_and_fetch(pVal, 1); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicXorU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_xor_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicOrU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_or_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicAndU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_and_and_fetch(pVal, val); +} + + + +#if defined(NV_64_BITS) + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExAddS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_add_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExSubS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_sub_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE void +portAtomicExSetS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + while (!__sync_bool_compare_and_swap(pVal, *pVal, val)); +} + +PORT_ATOMIC64_INLINE NvBool +portAtomicExCompareAndSwapS64 +( + volatile NvS64 *pVal, + NvS64 newVal, + NvS64 oldVal +) +{ + return __sync_bool_compare_and_swap(pVal, oldVal, newVal); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExIncrementS64 +( + volatile NvS64 *pVal +) +{ + return __sync_add_and_fetch(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExDecrementS64 +( + volatile NvS64 *pVal +) +{ + return __sync_sub_and_fetch(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExXorS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_xor_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExOrS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_or_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExAndS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_and_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExAddU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_add_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExSubU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_sub_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE void +portAtomicExSetU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + while (!__sync_bool_compare_and_swap(pVal, *pVal, val)); +} + +PORT_ATOMIC64_INLINE NvBool +portAtomicExCompareAndSwapU64 +( + volatile NvU64 *pVal, + NvU64 newVal, + NvU64 oldVal +) +{ + return __sync_bool_compare_and_swap(pVal, oldVal, newVal); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExIncrementU64 +( + volatile NvU64 *pVal +) +{ + return __sync_add_and_fetch(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExDecrementU64 +( + volatile NvU64 *pVal +) +{ + return __sync_sub_and_fetch(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExXorU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_xor_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExOrU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_or_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExAndU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_and_and_fetch(pVal, val); +} + +#endif // NV_64_BITS + +#endif // PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) && !NVOS_IS_LIBOS +#endif // _NVPORT_ATOMIC_GCC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h new file mode 100644 index 0000000..094f544 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief DEBUG module implementation for Unix kernelspace. + */ +#ifndef _NVPORT_DEBUG_UNIX_KERNEL_OS_H_ +#define _NVPORT_DEBUG_UNIX_KERNEL_OS_H_ +#ifdef __cplusplus +extern "C" { +#endif + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "nv-kernel-interface-api.h" +void NV_API_CALL os_dbg_breakpoint(void); +void NV_API_CALL out_string(const char *str); +int NV_API_CALL nv_printf(NvU32 debuglevel, const char *format, ...); + +// No init/shutdown needed +#define portDbgInitialize() +#define portDbgShutdown() + + +PORT_DEBUG_INLINE void +portDbgPrintString +( + const char *str, + NvLength length +) +{ + out_string(str); +} + +#define portDbgPrintf(fmt, ...) nv_printf(0xFFFFFFFF, fmt, ##__VA_ARGS__) +#undef portDbgPrintf_SUPPORTED +#define portDbgPrintf_SUPPORTED 1 + +#define portDbgExPrintfLevel(level, fmt, ...) nv_printf(level, fmt, ##__VA_ARGS__) +#undef portDbgExPrintfLevel_SUPPORTED +#define portDbgExPrintfLevel_SUPPORTED 1 + +#define PORT_BREAKPOINT() os_dbg_breakpoint() + +#ifdef __cplusplus +} +#endif //__cplusplus +#endif // _NVPORT_DEBUG_UNIX_KERNEL_OS_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h new file mode 100644 index 0000000..5ebdae1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h @@ -0,0 +1,323 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief NvPort MEMORY module extension to track memory allocations + * + * This file is internal to NvPort MEMORY module. + * @cond NVPORT_INTERNAL + */ + +#ifndef _NVPORT_MEMORY_INTERNAL_H_ +#define _NVPORT_MEMORY_INTERNAL_H_ + +/** @brief Untracked paged memory allocation, platform specific */ +void *_portMemAllocPagedUntracked(NvLength lengthBytes); +/** @brief Untracked nonpaged memory allocation, platform specific */ +void *_portMemAllocNonPagedUntracked(NvLength lengthBytes); +/** @brief Untracked memory free, platform specific */ +void _portMemFreeUntracked(void *pMemory); +/** @brief Wrapper around pAlloc->_portAlloc() that tracks the allocation */ +void *_portMemAllocatorAlloc(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); +/** @brief Wrapper around pAlloc->_portFree() that tracks the allocation */ +void _portMemAllocatorFree(PORT_MEM_ALLOCATOR *pAlloc, void *pMem); + + +typedef struct PORT_MEM_COUNTER +{ + volatile NvU32 activeAllocs; + volatile NvU32 totalAllocs; + volatile NvU32 peakAllocs; + volatile NvLength activeSize; + volatile NvLength totalSize; + volatile NvLength peakSize; +} PORT_MEM_COUNTER; + +typedef struct PORT_MEM_FENCE_HEAD +{ + PORT_MEM_ALLOCATOR *pAllocator; + NvLength blockSize; + NvU32 magic; +} PORT_MEM_FENCE_HEAD; + +typedef struct PORT_MEM_FENCE_TAIL +{ + NvU32 magic; +} PORT_MEM_FENCE_TAIL; + +typedef struct PORT_MEM_LIST +{ + struct PORT_MEM_LIST *pPrev; + struct PORT_MEM_LIST *pNext; +} PORT_MEM_LIST; + +#if PORT_MEM_TRACK_USE_CALLERINFO + +#if PORT_MEM_TRACK_USE_CALLERINFO_IP + +typedef NvU64 PORT_MEM_CALLERINFO; +#define PORT_MEM_CALLERINFO_MAKE ((NvU64)portUtilGetIPAddress()) + +#else // PORT_MEM_TRACK_USE_CALLERINFO_IP + +typedef struct PORT_MEM_CALLERINFO +{ + const char *file; + const char *func; + NvU32 line; +} PORT_MEM_CALLERINFO; + +/** @note Needed since not all compilers support automatic struct creation */ +static NV_INLINE PORT_MEM_CALLERINFO +_portMemCallerInfoMake +( + const char *file, + const char *func, + NvU32 line +) +{ + PORT_MEM_CALLERINFO callerInfo; + callerInfo.file = file; + callerInfo.func = func; + callerInfo.line = line; + return callerInfo; +} + +#define PORT_MEM_CALLERINFO_MAKE \ + _portMemCallerInfoMake(__FILE__, __FUNCTION__, __LINE__) +#endif // PORT_MEM_TRACK_USE_CALLERINFO_IP + +void *portMemAllocPaged_CallerInfo(NvLength, PORT_MEM_CALLERINFO); +void *portMemAllocNonPaged_CallerInfo(NvLength, PORT_MEM_CALLERINFO); +PORT_MEM_ALLOCATOR *portMemAllocatorCreatePaged_CallerInfo(PORT_MEM_CALLERINFO); +PORT_MEM_ALLOCATOR *portMemAllocatorCreateNonPaged_CallerInfo(PORT_MEM_CALLERINFO); +void portMemInitializeAllocatorTracking_CallerInfo(PORT_MEM_ALLOCATOR *, PORT_MEM_ALLOCATOR_TRACKING *, PORT_MEM_CALLERINFO); +void *_portMemAllocatorAlloc_CallerInfo(PORT_MEM_ALLOCATOR*, NvLength, PORT_MEM_CALLERINFO); +PORT_MEM_ALLOCATOR *portMemAllocatorCreateOnExistingBlock_CallerInfo(void *, NvLength, PORT_MEM_CALLERINFO); +#if portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED +PORT_MEM_ALLOCATOR *portMemExAllocatorCreateLockedOnExistingBlock_CallerInfo(void *, NvLength, void *, PORT_MEM_CALLERINFO); +#endif //portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED +#undef PORT_ALLOC +#define PORT_ALLOC(pAlloc, length) \ + _portMemAllocatorAlloc_CallerInfo(pAlloc, length, PORT_MEM_CALLERINFO_MAKE) + +#define portMemAllocPaged(size) \ + portMemAllocPaged_CallerInfo((size), PORT_MEM_CALLERINFO_MAKE) +#define portMemAllocNonPaged(size) \ + portMemAllocNonPaged_CallerInfo((size), PORT_MEM_CALLERINFO_MAKE) +#define portMemAllocatorCreatePaged() \ + portMemAllocatorCreatePaged_CallerInfo(PORT_MEM_CALLERINFO_MAKE) +#define portMemAllocatorCreateNonPaged() \ + portMemAllocatorCreateNonPaged_CallerInfo(PORT_MEM_CALLERINFO_MAKE) + +#define portMemInitializeAllocatorTracking(pAlloc, pTrack) \ + portMemInitializeAllocatorTracking_CallerInfo(pAlloc, pTrack, PORT_MEM_CALLERINFO_MAKE) + +#define portMemAllocatorCreateOnExistingBlock(pMem, size) \ + portMemAllocatorCreateOnExistingBlock_CallerInfo(pMem, size, PORT_MEM_CALLERINFO_MAKE) +#if portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED +#define portMemExAllocatorCreateLockedOnExistingBlock(pMem, size, pLock) \ + portMemExAllocatorCreateLockedOnExistingBlock_CallerInfo(pMem, size, pLock,\ + PORT_MEM_CALLERINFO_MAKE) +#endif //portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED +#else +#define PORT_MEM_CALLERINFO_MAKE +#endif // CALLERINFO + + +#if PORT_MEM_TRACK_USE_FENCEPOSTS || PORT_MEM_TRACK_USE_ALLOCLIST || PORT_MEM_TRACK_USE_CALLERINFO +typedef struct PORT_MEM_HEADER +{ +#if PORT_MEM_TRACK_USE_CALLERINFO + PORT_MEM_CALLERINFO callerInfo; +#endif +#if PORT_MEM_TRACK_USE_ALLOCLIST + PORT_MEM_LIST list; +#endif +#if PORT_MEM_TRACK_USE_FENCEPOSTS + PORT_MEM_FENCE_HEAD fence; +#endif +} PORT_MEM_HEADER; + +typedef struct PORT_MEM_FOOTER +{ +#if PORT_MEM_TRACK_USE_FENCEPOSTS + PORT_MEM_FENCE_TAIL fence; +#endif +} PORT_MEM_FOOTER; + +#define PORT_MEM_ADD_HEADER_PTR(p) ((PORT_MEM_HEADER*)p + 1) +#define PORT_MEM_SUB_HEADER_PTR(p) ((PORT_MEM_HEADER*)p - 1) +#define PORT_MEM_STAGING_SIZE (sizeof(PORT_MEM_HEADER)+sizeof(PORT_MEM_FOOTER)) + +#else +#define PORT_MEM_ADD_HEADER_PTR(p) p +#define PORT_MEM_SUB_HEADER_PTR(p) p +#define PORT_MEM_STAGING_SIZE 0 +#endif + +struct PORT_MEM_ALLOCATOR_TRACKING +{ + PORT_MEM_ALLOCATOR *pAllocator; + struct PORT_MEM_ALLOCATOR_TRACKING *pPrev; + struct PORT_MEM_ALLOCATOR_TRACKING *pNext; + +#if PORT_MEM_TRACK_USE_COUNTER + PORT_MEM_COUNTER counter; +#endif +#if PORT_MEM_TRACK_USE_ALLOCLIST + PORT_MEM_LIST *pFirstAlloc; + void *listLock; +#endif +#if PORT_MEM_TRACK_USE_CALLERINFO + PORT_MEM_CALLERINFO callerInfo; +#endif +}; + + +#define portMemExTrackingGetActiveStats_SUPPORTED PORT_MEM_TRACK_USE_COUNTER +#define portMemExTrackingGetTotalStats_SUPPORTED PORT_MEM_TRACK_USE_COUNTER +#define portMemExTrackingGetPeakStats_SUPPORTED PORT_MEM_TRACK_USE_COUNTER +#define portMemExTrackingGetNext_SUPPORTED \ + (PORT_MEM_TRACK_USE_FENCEPOSTS & PORT_MEM_TRACK_USE_ALLOCLIST) + +#define portMemExValidate_SUPPORTED 0 +#define portMemExValidateAllocations_SUPPORTED 0 +#define portMemExFreeAll_SUPPORTED 0 + +/// @brief Actual size of an allocator structure, including internals +#define PORT_MEM_ALLOCATOR_SIZE \ + (sizeof(PORT_MEM_ALLOCATOR) + sizeof(PORT_MEM_ALLOCATOR_TRACKING)) + +#if defined(BIT) +#define NVIDIA_UNDEF_LEGACY_BIT_MACROS +#endif +#include "nvmisc.h" + +// +// Internal bitvector structures for allocators over existing blocks +// +#define PORT_MEM_BITVECTOR_CHUNK_SIZE 16U +typedef NvU8 PORT_MEM_BITVECTOR_CHUNK[PORT_MEM_BITVECTOR_CHUNK_SIZE]; +typedef struct +{ + // + // Points to a PORT_SPINLOCK that make memory thread safe. + // If this is not thread safe variant, then it is NULL. + // + void *pSpinlock; + // Points to after the bitvector, aligned to first chunk. + PORT_MEM_BITVECTOR_CHUNK *pChunks; + NvU32 numChunks; + // + // What follows are two bitvectors one next to another: + // - The first represents availability of chunks: 0=free, 1=allocated + // - The second represents allocation sizes: 1=last chunk of an allocation + // So the total size of this array is 2*numChunks bits + // The second vector continues immediately after the first, no alignment + // + // Example: numChunks = 8, 2 allocations of 3 chunks each: + // bits == |11111100| <- 2*3 chunks allocated, 2 free + // |00100100| <- Chunks 2 and 5 are last in allocation + // + NvU32 bits[NV_ANYSIZE_ARRAY]; +} PORT_MEM_BITVECTOR; + +/// @note the following can be used as arguments for static array size, so +/// they must be fully known at compile time - macros, not inline functions + +/// @brief Total number of chunks in a preallocated block of given size +#define PORT_MEM_PREALLOCATED_BLOCK_NUM_CHUNKS(size) \ + NV_DIV_AND_CEIL(size, PORT_MEM_BITVECTOR_CHUNK_SIZE) + +/// @brief Minimal nonaligned bookkeeping size required for a preallocated block +#define PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_NONALIGNED_EXTRA_SIZE \ + sizeof(PORT_MEM_ALLOCATOR) + sizeof(PORT_MEM_BITVECTOR) + +/// @brief Minimal bookkeeping size required for a preallocated block +#define PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_EXTRA_SIZE \ + NV_ALIGN_UP(PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_NONALIGNED_EXTRA_SIZE, \ + PORT_MEM_BITVECTOR_CHUNK_SIZE) + +/// @brief Number of chunks that can be tracked in the minimal bookkeeping size +#define PORT_MEM_PREALLOCATED_BLOCK_CHUNKS_GRATIS \ + (( \ + PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_EXTRA_SIZE - \ + sizeof(PORT_MEM_ALLOCATOR) - \ + NV_OFFSETOF(PORT_MEM_BITVECTOR, bits) \ + )*4U) + +// Although we can never execute the underflow branch, the compiler will complain +// if any constant expression results in underflow, even in dead code. +// Note: Skipping (parens) around a and b on purpose here. +#define _PORT_CEIL_NO_UNDERFLOW(a, b) (NV_DIV_AND_CEIL(b + a, b) - 1) + +/// @brief Required additional size for a given number of chunks +#define PORT_MEM_PREALLOCATED_BLOCK_SIZE_FOR_NONGRATIS_CHUNKS(num_chunks) \ + ((num_chunks > PORT_MEM_PREALLOCATED_BLOCK_CHUNKS_GRATIS) \ + ? _PORT_CEIL_NO_UNDERFLOW(num_chunks - PORT_MEM_PREALLOCATED_BLOCK_CHUNKS_GRATIS,\ + 4*PORT_MEM_BITVECTOR_CHUNK_SIZE) \ + * PORT_MEM_BITVECTOR_CHUNK_SIZE \ + : 0) + +/// @brief Total required bookkeeping size for a block of given useful size +#define PORT_MEM_PREALLOCATED_BLOCK_EXTRA_SIZE(size) \ + PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_EXTRA_SIZE + \ + PORT_MEM_PREALLOCATED_BLOCK_SIZE_FOR_NONGRATIS_CHUNKS( \ + PORT_MEM_PREALLOCATED_BLOCK_NUM_CHUNKS(size)) + +/** + * Macros for defining memory allocation wrappers. + * + * The function / file / line reference is not useful when portMemAlloc + * is called from a generic memory allocator function, such as the memCreate + * function in resman. + * + * These macros can be used to push the function /file / line reference up one + * level when defining a memory allocator function. In other words, log who + * calls memCreate instead of logging memCreate. + * + * These macros are also used throughout memory-tracking.c + */ +#if PORT_MEM_TRACK_USE_CALLERINFO + +#define PORT_MEM_CALLERINFO_PARAM _portMemCallerInfo +#define PORT_MEM_CALLERINFO_TYPE_PARAM \ + PORT_MEM_CALLERINFO PORT_MEM_CALLERINFO_PARAM +#define PORT_MEM_CALLERINFO_COMMA_PARAM ,PORT_MEM_CALLERINFO_PARAM +#define PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM ,PORT_MEM_CALLERINFO_TYPE_PARAM +#define PORT_MEM_CALLINFO_FUNC(f) f##_CallerInfo + +#else // PORT_MEM_TRACK_USE_CALLERINFO + +#define PORT_MEM_CALLERINFO_PARAM +#define PORT_MEM_CALLERINFO_TYPE_PARAM +#define PORT_MEM_CALLERINFO_COMMA_PARAM +#define PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +#define PORT_MEM_CALLINFO_FUNC(f) f + +#endif // PORT_MEM_TRACK_USE_CALLERINFO + +#endif // _NVPORT_MEMORY_INTERNAL_H_ +/// @endcond diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/safe_generic.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/safe_generic.h new file mode 100644 index 0000000..e851b77 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/safe_generic.h @@ -0,0 +1,311 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// Disable warnings when constant expressions are always true/false, and +// some signed/unsigned mismatch. To get a common implementation for all safe +// functions, we need to rely on these. There is no undefined behavior here. +// +#if PORT_COMPILER_IS_MSVC +#pragma warning( disable : 4296) +#elif PORT_COMPILER_IS_GCC +// GCC 4.6+ needed for GCC diagnostic +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +// Allow unknown pragmas to ignore unrecognized -W flags. +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wtautological-constant-out-of-range-compare" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wtype-limits" +#else +// +// On older GCCs we declare this as a system header, which tells the compiler +// to ignore all warnings in it (this has no effect on the primary source file) +// +#pragma GCC system_header +#endif +#elif PORT_COMPILER_IS_CLANG +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wtautological-constant-out-of-range-compare" +#pragma clang diagnostic ignored "-Wsign-compare" +#pragma clang diagnostic ignored "-Wtype-limits" +#endif + +#define PORT_SAFE_OP(a, b, pRes, _op_, _US_) \ + ((sizeof(a) == 1) ? portSafe##_op_##_US_##8 (a, b, pRes) : \ + (sizeof(a) == 2) ? portSafe##_op_##_US_##16(a, b, pRes) : \ + (sizeof(a) == 4) ? portSafe##_op_##_US_##32(a, b, pRes) : \ + (sizeof(a) == 8) ? portSafe##_op_##_US_##64(a, b, pRes) : \ + NV_FALSE) + +#define PORT_SAFE_ADD_U(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Add, U) +#define PORT_SAFE_SUB_U(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Sub, U) +#define PORT_SAFE_MUL_U(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Mul, U) +#define PORT_SAFE_DIV_U(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Div, U) + +#define PORT_SAFE_ADD_S(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Add, S) +#define PORT_SAFE_SUB_S(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Sub, S) +#define PORT_SAFE_MUL_S(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Mul, S) +#define PORT_SAFE_DIV_S(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Div, S) + +#define PORT_SAFE_ADD(a, b, pRes) PORT_SAFE_ADD_U(a, b, pRes) +#define PORT_SAFE_SUB(a, b, pRes) PORT_SAFE_SUB_U(a, b, pRes) +#define PORT_SAFE_MUL(a, b, pRes) PORT_SAFE_MUL_U(a, b, pRes) +#define PORT_SAFE_DIV(a, b, pRes) PORT_SAFE_DIV_U(a, b, pRes) + +//////////////////////////////////////////////////////////////////////////////// + +#define PORT_EXPAND(X) X +#define PORT_SAFE_MAX(t) PORT_EXPAND(NV_##t##_MAX) +#define PORT_SAFE_MIN(t) PORT_EXPAND(NV_##t##_MIN) + +// These constants should really be in nvtypes.h +#if !defined (NV_UPtr_MAX) +#if defined(NV_64_BITS) +#define NV_UPtr_MAX NV_U64_MAX +#define NV_Length_MAX NV_U64_MAX +#else +#define NV_UPtr_MAX NV_U32_MAX +#define NV_Length_MAX NV_U32_MAX +#endif +#define NV_UPtr_MIN 0 +#define NV_Length_MIN 0 +#endif + +#define PORT_WILL_OVERFLOW_UADD(a, b) ((a + b) < a) +#define PORT_WILL_OVERFLOW_USUB(a, b) (b > a) +#define PORT_WILL_OVERFLOW_UMUL(a, b, r) (a != 0 && b != (r/a)) + +/** @note Signed overflow is Undefined Behavior, which means we have to detect + * it before it actually happens. We can't do (a+b) unless we are sure it won't + * overflow. + */ +#define PORT_WILL_OVERFLOW_SADD(a, b, size) \ + ((b < 0) ? (a < (NV_S##size##_MIN - b)) : (a > (NV_S##size##_MAX - b))) + +#define PORT_WILL_OVERFLOW_SSUB(a, b, size) \ + ((b < 0) ? (a > (NV_S##size##_MAX + b)) : (a < (NV_S##size##_MIN + b))) + +#define PORT_MIN_MUL(x, s) ((x < 0) ? (NV_S##s##_MAX / x) : (NV_S##s##_MIN / x)) +#define PORT_MAX_MUL(x, s) ((x < 0) ? (NV_S##s##_MIN / x) : (NV_S##s##_MAX / x)) +#define PORT_WILL_OVERFLOW_SMUL(a, b, size) \ + (a != 0 && b != 0 && (a > PORT_MAX_MUL(b, size) || a < PORT_MIN_MUL(b, size))) + +#define PORT_SAFE_DIV_IMPL(a, b, pRes) \ + ((b == 0) ? NV_FALSE : ((*pRes = a / b), NV_TRUE)) + +#define PORT_SAFE_Add_IMPL_S(a, b, pRes, n) \ + (PORT_WILL_OVERFLOW_SADD(a, b, n) ? NV_FALSE : ((*pRes = a + b), NV_TRUE)) +#define PORT_SAFE_Sub_IMPL_S(a, b, pRes, n) \ + (PORT_WILL_OVERFLOW_SSUB(a, b, n) ? NV_FALSE : ((*pRes = a - b), NV_TRUE)) +#define PORT_SAFE_Mul_IMPL_S(a, b, pRes, n) \ + (PORT_WILL_OVERFLOW_SMUL(a, b, n) ? NV_FALSE : ((*pRes = a * b), NV_TRUE)) +#define PORT_SAFE_Div_IMPL_S(a, b, pRes, n) PORT_SAFE_DIV_IMPL(a, b, pRes) + +#define PORT_SAFE_Add_IMPL_U(a, b, pRes, n) \ + ((*pRes = a + b), ((*pRes < a) ? NV_FALSE : NV_TRUE)) +#define PORT_SAFE_Sub_IMPL_U(a, b, pRes, n) \ + ((*pRes = a - b), ((b > a) ? NV_FALSE : NV_TRUE)) +#define PORT_SAFE_Mul_IMPL_U(a, b, pRes, n) \ + ((*pRes = a * b), ((a != 0 && b != *pRes/a) ? NV_FALSE : NV_TRUE)) +#define PORT_SAFE_Div_IMPL_U(a, b, pRes, n) PORT_SAFE_DIV_IMPL(a, b, pRes) + + +#define PORT_SAFE_Add_IMPL_ PORT_SAFE_Add_IMPL_U +#define PORT_SAFE_Sub_IMPL_ PORT_SAFE_Sub_IMPL_U +#define PORT_SAFE_Mul_IMPL_ PORT_SAFE_Mul_IMPL_U +#define PORT_SAFE_Div_IMPL_ PORT_SAFE_Div_IMPL_U + +#define PORT_SAFE_CAST(a, b, t) \ + ((a < PORT_SAFE_MIN(t) || a > PORT_SAFE_MAX(t)) ? \ + NV_FALSE : \ + ((b = (Nv##t) a), NV_TRUE)) + + +#define PORT_SAFE_DEFINE_MATH_FUNC(_op_, _US_, _size_) \ + PORT_SAFE_INLINE NvBool \ + portSafe##_op_##_US_##_size_ \ + ( \ + Nv##_US_##_size_ x, \ + Nv##_US_##_size_ y, \ + Nv##_US_##_size_ *pRes \ + ) \ + { \ + return PORT_EXPAND(PORT_SAFE_##_op_##_IMPL_##_US_)(x, y, pRes, _size_);\ + } + + +#define PORT_SAFE_DEFINE_CAST_FUNC(_type_from_, _type_to_) \ + PORT_SAFE_INLINE NvBool \ + portSafe##_type_from_##To##_type_to_ \ + ( \ + Nv##_type_from_ data, \ + Nv##_type_to_ *pResult \ + ) \ + { \ + if (((data<0) && (PORT_SAFE_MIN(_type_to_) == 0 || \ + PORT_SAFE_MIN(_type_to_) > data)) \ + || data > PORT_SAFE_MAX(_type_to_)) \ + return NV_FALSE; \ + *pResult = (Nv##_type_to_) data; \ + return NV_TRUE; \ + } + + + +PORT_SAFE_DEFINE_MATH_FUNC(Add, S, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, S, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, S, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Div, S, 8) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, S, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, S, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, S, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Div, S, 16) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, S, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, S, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, S, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Div, S, 32) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, S, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, S, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, S, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Div, S, 64) + + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, 8) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, 16) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, 32) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, 64) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, Ptr) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, Ptr) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, Ptr) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, Ptr) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, , Length) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, , Length) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, , Length) +PORT_SAFE_DEFINE_MATH_FUNC(Div, , Length) + + +PORT_SAFE_DEFINE_CAST_FUNC(S8, U8) +PORT_SAFE_DEFINE_CAST_FUNC(S8, U16) +PORT_SAFE_DEFINE_CAST_FUNC(S8, U32) +PORT_SAFE_DEFINE_CAST_FUNC(S8, U64) +PORT_SAFE_DEFINE_CAST_FUNC(S8, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(S8, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(S16, S8) +PORT_SAFE_DEFINE_CAST_FUNC(S16, U8) +PORT_SAFE_DEFINE_CAST_FUNC(S16, U16) +PORT_SAFE_DEFINE_CAST_FUNC(S16, U32) +PORT_SAFE_DEFINE_CAST_FUNC(S16, U64) +PORT_SAFE_DEFINE_CAST_FUNC(S16, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(S16, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(S32, S8) +PORT_SAFE_DEFINE_CAST_FUNC(S32, S16) +PORT_SAFE_DEFINE_CAST_FUNC(S32, U8) +PORT_SAFE_DEFINE_CAST_FUNC(S32, U16) +PORT_SAFE_DEFINE_CAST_FUNC(S32, U32) +PORT_SAFE_DEFINE_CAST_FUNC(S32, U64) +PORT_SAFE_DEFINE_CAST_FUNC(S32, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(S32, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(S64, S8) +PORT_SAFE_DEFINE_CAST_FUNC(S64, S16) +PORT_SAFE_DEFINE_CAST_FUNC(S64, S32) +PORT_SAFE_DEFINE_CAST_FUNC(S64, U8) +PORT_SAFE_DEFINE_CAST_FUNC(S64, U16) +PORT_SAFE_DEFINE_CAST_FUNC(S64, U32) +PORT_SAFE_DEFINE_CAST_FUNC(S64, U64) +PORT_SAFE_DEFINE_CAST_FUNC(S64, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(S64, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(U8, S8) + +PORT_SAFE_DEFINE_CAST_FUNC(U16, S8) +PORT_SAFE_DEFINE_CAST_FUNC(U16, S16) +PORT_SAFE_DEFINE_CAST_FUNC(U16, U8) + +PORT_SAFE_DEFINE_CAST_FUNC(U32, S8) +PORT_SAFE_DEFINE_CAST_FUNC(U32, S16) +PORT_SAFE_DEFINE_CAST_FUNC(U32, S32) +PORT_SAFE_DEFINE_CAST_FUNC(U32, U8) +PORT_SAFE_DEFINE_CAST_FUNC(U32, U16) + +PORT_SAFE_DEFINE_CAST_FUNC(U64, S8) +PORT_SAFE_DEFINE_CAST_FUNC(U64, S16) +PORT_SAFE_DEFINE_CAST_FUNC(U64, S32) +PORT_SAFE_DEFINE_CAST_FUNC(U64, S64) +PORT_SAFE_DEFINE_CAST_FUNC(U64, U8) +PORT_SAFE_DEFINE_CAST_FUNC(U64, U16) +PORT_SAFE_DEFINE_CAST_FUNC(U64, U32) +PORT_SAFE_DEFINE_CAST_FUNC(U64, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(U64, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, S8) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, S16) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, S32) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, S64) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, U8) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, U16) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, U32) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, U64) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(Length, S8) +PORT_SAFE_DEFINE_CAST_FUNC(Length, S16) +PORT_SAFE_DEFINE_CAST_FUNC(Length, S32) +PORT_SAFE_DEFINE_CAST_FUNC(Length, S64) +PORT_SAFE_DEFINE_CAST_FUNC(Length, U8) +PORT_SAFE_DEFINE_CAST_FUNC(Length, U16) +PORT_SAFE_DEFINE_CAST_FUNC(Length, U32) +PORT_SAFE_DEFINE_CAST_FUNC(Length, U64) +PORT_SAFE_DEFINE_CAST_FUNC(Length, UPtr) + + +#if PORT_COMPILER_IS_MSVC +#pragma warning( default : 4296) +#elif PORT_COMPILER_IS_GCC && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#pragma GCC diagnostic pop +#elif PORT_COMPILER_IS_CLANG +#pragma clang diagnostic pop +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h new file mode 100644 index 0000000..be23a99 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h @@ -0,0 +1,211 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/** + * @file + * @brief SYNC debugging utilities + * + * If PORT_SYNC_PRINT_DEBUG_INFO is defined, the definitions in this file will + * cause all Sync operations to verbosely print out the actions performed. + */ + +#if defined(PORT_SYNC_PRINT_DEBUG_INFO) + +#if defined(PORT_SYNC_IMPL) + +#undef portSyncInitialize +#undef portSyncShutdown +#undef portSyncSpinlockInitialize +#undef portSyncSpinlockCreate +#undef portSyncSpinlockDestroy +#undef portSyncSpinlockAcquire +#undef portSyncSpinlockRelease +#undef portSyncMutexInitialize +#undef portSyncMutexCreate +#undef portSyncMutexDestroy +#undef portSyncMutexAcquire +#undef portSyncMutexRelease +#undef portSyncMutexAcquireConditional +#undef portSyncSemaphoreInitialize +#undef portSyncSemaphoreCreate +#undef portSyncSemaphoreDestroy +#undef portSyncSemaphoreAcquire +#undef portSyncSemaphoreRelease +#undef portSyncSemaphoreAcquireConditional + +#else + +#define portSyncInitialize() \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncInitialize() ", __FILE__, __LINE__); \ + portSyncInitialize(); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncShutdown() \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncShutdown() ", __FILE__, __LINE__); \ + portSyncShutdown(); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + + + +static NV_INLINE NV_STATUS _syncPrintReturnStatus(NV_STATUS status) +{ + portDbgPrintf("%s\n", nvstatusToString(status)); + return status; +} + +static NV_INLINE void *_syncPrintReturnPtr(void *ptr) +{ + portDbgPrintf("%p\n", ptr); + return ptr; +} + + +#define portSyncSpinlockInitialize(pSpinlock) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockInitialize(%p) - ", \ + __FILE__, __LINE__, pSpinlock),\ + _syncPrintReturnStatus(portSyncSpinlockInitialize(pSpinlock))) + +#define portSyncSpinlockCreate(pAllocator) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockCreate(%p) - ", \ + __FILE__, __LINE__, pAllocator),\ + _syncPrintReturnPtr(portSyncSpinlockCreate(pAllocator))) + +#define portSyncSpinlockDestroy(pSpinlock) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockDestroy(%p) ",\ + __FILE__, __LINE__, pSpinlock); \ + portSyncSpinlockDestroy(pSpinlock); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSpinlockAcquire(pSpinlock) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockAcquire(%p) ",\ + __FILE__, __LINE__, pSpinlock); \ + portSyncSpinlockAcquire(pSpinlock); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSpinlockRelease(pSpinlock) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockRelease(%p) ",\ + __FILE__, __LINE__, pSpinlock); \ + portSyncSpinlockRelease(pSpinlock); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + + + + +#define portSyncMutexInitialize(pMutex) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexInitialize(%p) - ", \ + __FILE__, __LINE__, pMutex),\ + _syncPrintReturnStatus(portSyncMutexInitialize(pMutex))) + +#define portSyncMutexCreate(pAllocator) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexCreate(%p) - ", \ + __FILE__, __LINE__, pAllocator),\ + _syncPrintReturnPtr(portSyncMutexCreate(pAllocator))) + +#define portSyncMutexDestroy(pMutex) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexDestroy(%p) ",\ + __FILE__, __LINE__, pMutex); \ + portSyncMutexDestroy(pMutex); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncMutexAcquire(pMutex) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexAcquire(%p) ",\ + __FILE__, __LINE__, pMutex); \ + portSyncMutexAcquire(pMutex); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncMutexRelease(pMutex) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexRelease(%p) ",\ + __FILE__, __LINE__, pMutex); \ + portSyncMutexRelease(pMutex); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncMutexAcquireConditional(pMutex) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexAcquireConditional(%p) - ", \ + __FILE__, __LINE__, pMutex),\ + (portSyncMutexAcquireConditional(pMutex) ? \ + (portDbgPrintf("TRUE\n"),NV_TRUE) : (portDbgPrintf("FALSE\n"),NV_FALSE))) + + + + + +#define portSyncSemaphoreInitialize(pSemaphore, s, l) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreInitialize(%p, %u, %u) - ", \ + __FILE__, __LINE__, pSemaphore, s, l),\ + _syncPrintReturnStatus(portSyncSemaphoreInitialize(pSemaphore, s, l))) + +#define portSyncSemaphoreCreate(pAllocator, s, l) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreCreate(%p, %u, %u) - ", \ + __FILE__, __LINE__, pAllocator, s, l),\ + _syncPrintReturnPtr(portSyncSemaphoreCreate(pAllocator, s, l))) + +#define portSyncSemaphoreDestroy(pSemaphore) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreDestroy(%p) ",\ + __FILE__, __LINE__, pSemaphore); \ + portSyncSemaphoreDestroy(pSemaphore); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSemaphoreAcquire(pSemaphore) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreAcquire(%p) ",\ + __FILE__, __LINE__, pSemaphore); \ + portSyncSemaphoreAcquire(pSemaphore); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSemaphoreRelease(pSemaphore) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreRelease(%p) ",\ + __FILE__, __LINE__, pSemaphore); \ + portSyncSemaphoreRelease(pSemaphore); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSemaphoreAcquireConditional(pSemaphore) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreAcquireConditional(%p) - ", \ + __FILE__, __LINE__, pSemaphore),\ + (portSyncSemaphoreAcquireConditional(pSemaphore) ? \ + (portDbgPrintf("TRUE\n"),NV_TRUE) : (portDbgPrintf("FALSE\n"),NV_FALSE))) + + +#endif // PORT_SYNC_IMPL +#endif // PORT_SYNC_PRINT_DEBUG_INFO diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h new file mode 100644 index 0000000..e9c089c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h @@ -0,0 +1,188 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Util functions implementations using gcc and clang compiler intrinsics + */ + +#ifndef _NVPORT_UTIL_GCC_CLANG_H_ +#define _NVPORT_UTIL_GCC_CLANG_H_ + +// +// Disabling portUtilExGetStackTrace_SUPPORTED on all clients because the +// implementation is unsafe and generates warnings on new build compilers. +// +// From https://gcc.gnu.org/onlinedocs/gcc/Return-Address.html : +// Calling this function with a nonzero argument can have unpredictable effects, +// including crashing the calling program. As a result, calls that are considered +// unsafe are diagnosed when the -Wframe-address option is in effect. Such calls +// should only be made in debugging situations. +// +// If this feature is desirable, please replace the body of portUtilExGetStackTrace() +// with implementations that tie into native stacktrace reporting infrastructure +// of the platforms nvport runs on. +// +#define portUtilExGetStackTrace_SUPPORTED 0 +#define portUtilExGetStackTrace(_level) ((NvUPtr)0) + +#define portUtilGetReturnAddress() (NvUPtr)__builtin_return_address(0) + +#if NVCPU_IS_X86 || NVCPU_IS_X86_64 +#define NVPORT_DUMMY_LOOP() \ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + \ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + \ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + \ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause"); +#else +#define NVPORT_DUMMY_LOOP() \ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + \ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + \ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + \ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop"); +#endif + +#if (__GNUC__ < 4) || (NVCPU_IS_ARM) || (NVCPU_IS_X86 && PORT_IS_KERNEL_BUILD) || (NVCPU_IS_RISCV64) +#define PORT_UTIL_CLZ_CTX_NOT_DEFINED 1 +#else +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros64(NvU64 n) +{ + if (n == 0) + return 64; + + return __builtin_clzll(n); +} +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros32(NvU32 n) +{ + if (n == 0) + return 32; + + return __builtin_clz(n); +} + + +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros64(NvU64 n) +{ + if (n == 0) + return 64; + + return __builtin_ctzll(n); +} +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros32(NvU32 n) +{ + if (n == 0) + return 32; + + return __builtin_ctz(n); +} + +#endif + + +#if NVCPU_IS_FAMILY_X86 && !defined(NV_MODS) +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter() +{ + NvU32 lo; + NvU32 hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (lo | ((NvU64)hi << 32)); +} +#define portUtilExReadTimestampCounter_SUPPORTED 1 + +#elif NVCPU_IS_AARCH64 && !defined(NV_MODS) +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter() +{ + NvU64 ts = 0; + __asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (ts)); + return ts; +} +#define portUtilExReadTimestampCounter_SUPPORTED 1 + +#elif NVCPU_IS_PPC64LE && !defined(NV_MODS) +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter() +{ + NvU64 ts; + __asm__ __volatile__ ("mfspr %0,268" : "=r"(ts)); + return ts; +} +#define portUtilExReadTimestampCounter_SUPPORTED 1 + +#elif NVCPU_IS_PPC && !defined(NV_MODS) +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter() +{ + NvU32 lo, hi, tmp; + __asm__ __volatile__ ( + "0:\n" + "mftbu %0\n" + "mftbl %1\n" + "mftbu %2\n" + "cmpw %0, %2\n" + "bne- 0b" + : "=r" (hi), "=r" (lo), "=r" (tmp) ); + return ((hi << 32) | lo); +} +#define portUtilExReadTimestampCounter_SUPPORTED 1 + +#else +#define portUtilExReadTimestampCounter_SUPPORTED 0 +#endif + +#endif // _NVPORT_UTIL_GCC_CLANG_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_generic.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_generic.h new file mode 100644 index 0000000..d9fe83a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_generic.h @@ -0,0 +1,267 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief UTIL module generic crossplatform implementation + */ + +#ifndef _NVPORT_UTIL_GENERIC_H_ +#define _NVPORT_UTIL_GENERIC_H_ + +PORT_UTIL_INLINE NvBool +portUtilCheckOverlap +( + const NvU8 *pData0, + NvLength len0, + const NvU8 *pData1, + NvLength len1 +) +{ + return (pData0 >= pData1 && pData0 < (pData1 + len1)) || + (pData1 >= pData0 && pData1 < (pData0 + len0)); +} + +PORT_UTIL_INLINE NvBool +portUtilCheckAlignment +( + const void *address, + NvU32 align +) +{ + if (!portUtilIsPowerOfTwo(align)) + return NV_FALSE; + + return ((NvUPtr)address & (align-1)) == 0; +} + +PORT_UTIL_INLINE NvBool +portUtilIsPowerOfTwo +( + NvU64 num +) +{ + return (num & (num-1)) == 0; +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteLittleEndian16 +( + void *pBuf, + NvU16 value +) +{ + *((NvU8*)pBuf + 1) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 0) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteLittleEndian32 +( + void *pBuf, + NvU32 value +) +{ + *((NvU8*)pBuf + 3) = (NvU8)(value >> 24); + *((NvU8*)pBuf + 2) = (NvU8)(value >> 16); + *((NvU8*)pBuf + 1) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 0) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteLittleEndian64 +( + void *pBuf, + NvU64 value +) +{ + *((NvU8*)pBuf + 7) = (NvU8)(value >> 56); + *((NvU8*)pBuf + 6) = (NvU8)(value >> 48); + *((NvU8*)pBuf + 5) = (NvU8)(value >> 40); + *((NvU8*)pBuf + 4) = (NvU8)(value >> 32); + *((NvU8*)pBuf + 3) = (NvU8)(value >> 24); + *((NvU8*)pBuf + 2) = (NvU8)(value >> 16); + *((NvU8*)pBuf + 1) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 0) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteBigEndian16 +( + void *pBuf, + NvU16 value +) +{ + *((NvU8*)pBuf + 0) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 1) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteBigEndian32 +( + void *pBuf, + NvU32 value +) +{ + *((NvU8*)pBuf + 0) = (NvU8)(value >> 24); + *((NvU8*)pBuf + 1) = (NvU8)(value >> 16); + *((NvU8*)pBuf + 2) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 3) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteBigEndian64 +( + void *pBuf, + NvU64 value +) +{ + *((NvU8*)pBuf + 0) = (NvU8)(value >> 56); + *((NvU8*)pBuf + 1) = (NvU8)(value >> 48); + *((NvU8*)pBuf + 2) = (NvU8)(value >> 40); + *((NvU8*)pBuf + 3) = (NvU8)(value >> 32); + *((NvU8*)pBuf + 4) = (NvU8)(value >> 24); + *((NvU8*)pBuf + 5) = (NvU8)(value >> 16); + *((NvU8*)pBuf + 6) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 7) = (NvU8)(value); +} + +#if PORT_COMPILER_IS_GCC || PORT_COMPILER_IS_CLANG +#include "nvport/inline/util_gcc_clang.h" +#elif PORT_COMPILER_IS_MSVC +#include "nvport/inline/util_msvc.h" +#else +#error "Unsupported compiler" +#endif // switch + +#ifdef PORT_UTIL_CLZ_CTX_NOT_DEFINED +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros64(NvU64 n) +{ + NvU32 y; + + if (n == 0) + return 64; + + for (y = 0; !(n & 0x8000000000000000LL); y++) + n <<= 1; + + return y; +} +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros32(NvU32 n) +{ + NvU32 y; + + if (n == 0) + return 32; + + for (y = 0; !(n & 0x80000000); y++) + n <<= 1; + + return y; +} + +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros64(NvU64 n) +{ + NvU32 bz, b5, b4, b3, b2, b1, b0; + NvU64 y; + + y = n & (~n + 1); + bz = y ? 0 : 1; + b5 = (y & 0x00000000FFFFFFFFLL) ? 0 : 32; + b4 = (y & 0x0000FFFF0000FFFFLL) ? 0 : 16; + b3 = (y & 0x00FF00FF00FF00FFLL) ? 0 : 8; + b2 = (y & 0x0F0F0F0F0F0F0F0FLL) ? 0 : 4; + b1 = (y & 0x3333333333333333LL) ? 0 : 2; + b0 = (y & 0x5555555555555555LL) ? 0 : 1; + + return (bz + b5 + b4 + b3 + b2 + b1 + b0); +} +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros32(NvU32 n) +{ + NvU32 bz, b4, b3, b2, b1, b0; + NvU32 y; + + y = n & (~n + 1); + bz = y ? 0 : 1; + b4 = (y & 0x0000FFFF) ? 0 : 16; + b3 = (y & 0x00FF00FF) ? 0 : 8; + b2 = (y & 0x0F0F0F0F) ? 0 : 4; + b1 = (y & 0x33333333) ? 0 : 2; + b0 = (y & 0x55555555) ? 0 : 1; + + return (bz + b4 + b3 + b2 + b1 + b0); +} +#endif + +static NV_FORCEINLINE void +portUtilSpin(void) +{ + NvU32 idx; + for (idx = 0; idx < 100; idx++) + { + NVPORT_DUMMY_LOOP(); + } +} + +#if NVCPU_IS_FAMILY_X86 && !defined(NV_MODS) && PORT_IS_MODULE_SUPPORTED(atomic) +static NV_FORCEINLINE NvU64 +portUtilExReadTimestampCounterSerialized(void) +{ + NvU64 val; + + portAtomicMemoryFenceLoad(); + val = portUtilExReadTimestampCounter(); + portAtomicMemoryFenceLoad(); + + return val; +} +#define portUtilExReadTimestampCounterSerialized_SUPPORTED 1 +#else +#define portUtilExReadTimestampCounterSerialized_SUPPORTED 0 +#endif + +#endif // _NVPORT_UTIL_GENERIC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_valist.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_valist.h new file mode 100644 index 0000000..4d293c3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_valist.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief va_list declarations for all platforms + */ + +// We used to have custom implementations in here, but now we just take the standard ones +#include // define va_* diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/memory.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/memory.h new file mode 100644 index 0000000..dd5646e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/memory.h @@ -0,0 +1,962 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Memory module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_MEMORY_H_ +#define _NVPORT_MEMORY_H_ + +/** + * Platform-specific inline implementations + */ +#if NVOS_IS_LIBOS +#include "nvport/inline/memory_libos.h" +#endif + +/** + * @defgroup NVPORT_MEMORY Memory + * @brief This module contains memory management related functionality. + * + * @{ + */ + +/** + * @brief Single allocation description - forward reference. + */ +struct PORT_MEM_TRACK_ALLOC_INFO; +typedef struct PORT_MEM_TRACK_ALLOC_INFO PORT_MEM_TRACK_ALLOC_INFO; + + +/** + * @name Core Functions + * @{ + */ + + +/** + * @brief Initializes global Memory tracking structures. + * + * This function is called by @ref portInitialize. It is available here in case + * it is needed to initialize the MEMORY module without initializing all the + * others. e.g. for unit tests. + */ +void portMemInitialize(void); +/** + * @brief Destroys global Memory tracking structures, and checks for leaks + * + * This function is called by @ref portShutdown. It is available here in case + * it is needed to initialize the MEMORY module without initializing all the + * others. e.g. for unit tests. + * + * @param bForceSilent - Will not print the report, even if + * @ref PORT_MEM_TRACK_PRINT_LEVEL isn't PORT_MEM_TRACK_PRINT_LEVEL_SILENT + */ +void portMemShutdown(NvBool bForceSilent); + + +/** + * @brief Allocates pageable virtual memory of given size. + * + * Will allocate at least lengthBytes bytes and return a pointer to the + * allocated virtual memory. The caller will be able to both read and write + * the returned memory via standard pointer accesses. + * + * The memory is not guaranteed to be initialized before being returned to the + * caller. + * + * An allocation request of size 0 will result in a return value of NULL. + * + * @par Checked builds only: + * Requests of size 0 will breakpoint/assert. + * + * @par Undefined: + * It is possible this function will consume more than lengthBytes of virtual + * address space. However behavior is undefined if the caller attempts to read + * or write addresses beyond lengthBytes. + * + * @return Pointer to requested memory, NULL if allocation fails. + * + * @note Calling this function is identical to calling + * @ref PORT_ALLOC ( @ref portMemAllocatorGetGlobalPaged() , lengthBytes) + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void *portMemAllocPaged(NvLength lengthBytes); + +/** + * @brief Allocates non-paged (i.e. pinned) memory. + * + * This function is essentially the same to @ref portMemAllocPaged except that + * the virtual memory once returned will always be resident in CPU memory. + * + * @return Pointer to requested memory, NULL if allocation fails. + * + * @note Calling this function is identical to calling + * @ref PORT_ALLOC ( @ref portMemAllocatorGetGlobalNonPaged() , lengthBytes) + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void *portMemAllocNonPaged(NvLength lengthBytes); + +/** + * @brief Allocates non-paged (i.e. pinned) memory on the stack or the heap + * + * USE ONLY FOR MEMORY THAT ALLOCATED AND FREED IN THE SAME FUNCTION! + * + * This function allocates memory on the stack for platforms with a large stack. + * Otherwise it is defined to @ref portMemAllocNonPaged and @ref portMemFree. + */ +#define portMemExAllocStack(lengthBytes) __builtin_alloca(lengthBytes) +#define portMemExAllocStack_SUPPORTED PORT_COMPILER_IS_GCC + +#if portMemExAllocStack_SUPPORTED && NVOS_IS_LIBOS +#define portMemAllocStackOrHeap(lengthBytes) portMemExAllocStack(lengthBytes) +#define portMemFreeStackOrHeap(pData) +#else +#define portMemAllocStackOrHeap(size) portMemAllocNonPaged(size) +#define portMemFreeStackOrHeap(pData) portMemFree(pData) +#endif + +/** + * @brief Frees memory allocated by @ref portMemAllocPaged or @ref portMemAllocNonPaged. + * + * Frees either paged or non-paged virtual memory. The pointer passed in must + * have been the exact value returned by the allocation routine. + * + * Calling with NULL has no effect. + * + * @par Checked builds only: + * Will fill the memory with a pattern to help detect use after free.
+ * Will assert/breakpoint if the memory fenceposts have been corrupted + * + * @par Undefined: + * Freeing the same address multiple times results in undefined behavior.
+ * Accessing memory in the region freed by this function results in undefined + * behavior. It may generate a page fault, or if the memory has been + * reallocated (or kept around to optimize subsequent allocation requests) then + * the access may unexpectedly work. + * + * @pre Windows: IRQL <= APC_LEVEL (DISPATCH_LEVEL if freeing NonPaged memory) + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void portMemFree(void *pData); + +/** + * @brief Copies data from one address to another. + * + * Copies srcSize bytes from pSource to pDestination, returning pDestination. + * pDestination should be at least destSize bytes, pSource at least srcSize. + * destSize should be equal or greater to srcSize. + * + * If destSize is 0, it is guaranteed to not access either buffer. + * + * @par Undefined: + * Behavior is undefined if memory regions referred to by pSource and + * pDestination overlap. + * + * @par Checked builds only: + * Will assert/breakpoint if the regions overlap.
+ * Will assert/breakpoint if destSize < srcSize
+ * Will assert/breakpoint if either pointer is NULL + * + * @return pDestination on success, NULL if the operation failed. + * + */ +void *portMemCopy(void *pDestination, NvLength destSize, const void *pSource, NvLength srcSize); + +/** + * @brief Moves data from one address to another. + * + * Copies memory from pSource to pDestination, returning pDestination. + * pDestination should be at least destSize bytes, pSource at least srcSize. + * srcSize should be equal or greater to destSize. + * + * If destSize is 0, it is guaranteed to not access either buffer. + * + * Unlike @ref portMemCopy this function allows the regions to overlap. + * + * @par Checked builds only: + * Will assert/breakpoint if destSize < srcSize
+ * Will assert/breakpoint if either pointer is NULL + * + * @return pDestination on success, NULL if the operation failed. + * + */ +void *portMemMove(void *pDestination, NvLength destSize, const void *pSource, NvLength srcSize); + +/** + * @brief Sets given memory to specified value. + * + * Writes lengthBytes bytes of data starting at pData with value. + * The buffer is assumed to have the size of at least lengthBytes. + * + * if lengthBytes is 0 it is guaranteed to not access pData. + * + * @return pData + */ +void *portMemSet(void *pData, NvU8 value, NvLength lengthBytes); + +/** + * @brief Sets given memory to specified pattern + * + * Fills lengthBytes of pData repeating the pPattern pattern. + * The pData buffer is assumed to have the size of at least lengthBytes. + * The pPattern buffer is assumed to have the size of at least patternBytes. + * + * If lengthBytes is 0 it is guaranteed to not access pData. + * @par Undefined: + * Behavior is undefined if patternBytes is zero.
+ * Behavior is undefined if pPattern and pData overlap. + * + * @return pData + */ +void *portMemSetPattern(void *pData, NvLength lengthBytes, const NvU8 *pPattern, NvLength patternBytes); + +/** + * @brief Compares two memory regions. + * + * This function does a byte by byte comparison of the 2 memory regions provided. + * + * It simultaneously scans pData0 and pData1 starting from byte 0 and going + * until lengthBytes bytes have been scanned or the bytes in pData0 and pData1 + * are not equal. + * + * The return value will be + * - 0 if all lengthBytes bytes are equal. + * - <0 if pData0 is less than pData1 for the first unequal byte. + * - >0 if pData0 is greater than pData1 for the first unequal byte. + * + * Both buffers are assumed to have the size of at least lengthBytes. + * + * @par Undefined: + * Behavior is undefined if memory regions referred to by pData0 and pData1 + * overlap.
+ * Behavior is undefined if lengthBytes is 0. + * + * @par Checked builds only: + * The function will return 0 and breakpoint/assert if there is overlap.
+ * The function will return 0 and breakpoint/assert if the length is 0. + */ +NvS32 portMemCmp(const void *pData0, const void *pData1, NvLength lengthBytes); + + +typedef struct PORT_MEM_ALLOCATOR PORT_MEM_ALLOCATOR; + +/** + * @brief Function signature for PORT_MEM_ALLOCATOR::alloc. + * + * Basic behavior is similar to @ref portMemAllocPaged. What type of memory + * is returned depends on the type of allocator that was created. + * + * Must be given the same instance of @ref PORT_MEM_ALLOCATOR as that which + * contains the calling function pointer. A different copy returned by the + * same function is not sufficient. Behavior is undefined if this is not done. + */ +typedef void *PortMemAllocatorAlloc(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); + +/** + * @brief Function signature for PORT_MEM_ALLOCATOR::free. + * + * See @ref portMemFree for details. + * + * Must be given the same instance of @ref PORT_MEM_ALLOCATOR as that which + * contains the calling function pointer. A different copy returned by the + * same function is not sufficient. Behavior is undefined if this is not done. + * + * @par Checked builds only: + * Will assert if given a different pointer than the one the memory + * was allocated with. + */ +typedef void PortMemAllocatorFree(PORT_MEM_ALLOCATOR *pAlloc, void *pMemory); + +/** + * @brief Function signature for PORT_MEM_ALLOCATOR::release. + * + * This function is called by @ref portMemAllocatorRelease when the allocator is + * released. This is only needed when implementing custom allocators, to be able + * to clean up as necessary. + */ +typedef void PortMemAllocatorRelease(PORT_MEM_ALLOCATOR *pAlloc); + + +/** + * @brief Platform specific allocator implementation. + */ +typedef struct PORT_MEM_ALLOCATOR_IMPL PORT_MEM_ALLOCATOR_IMPL; + +/** + * @brief Opaque structure to hold all memory tracking information. + */ +typedef struct PORT_MEM_ALLOCATOR_TRACKING PORT_MEM_ALLOCATOR_TRACKING; + +/** + * @brief Initializes an allocator tracking structures. + * + * You only need to call this when creating a custom allocator. The functions + * declared in this file call this internally. + * + * @param pTracking - Pointer to an already allocated tracking structure. + */ +void portMemInitializeAllocatorTracking(PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_ALLOCATOR_TRACKING *pTracking); + +/** + * @brief A set of functions that can be used to manage a specific type of memory. + * + * The intent of the allocator paradigm is to allow for generic code to be + * given an instance of PORT_MEM_ALLOCATOR for use to create memory so it does + * not have to embed a policy decision in its implementation. It can also + * allow for the implementation of specialized allocators that can be leveraged + * through a generic interface. + * + * Don't call these functions directly, use @ref PORT_ALLOC and @ref PORT_FREE + * This is done to provide full tracking support for these calls. + */ +struct PORT_MEM_ALLOCATOR { + /** + * @brief see @ref PortMemAllocatorAlloc for documentation + */ + PortMemAllocatorAlloc *_portAlloc; + /** + * @brief see @ref PortMemAllocatorFree for documentation + */ + PortMemAllocatorFree *_portFree; + /** + * @brief see @ref PortMemAllocatorRelease for documentation + */ + PortMemAllocatorRelease *_portRelease; + /** + * @brief Pointer to tracking structure. + */ + PORT_MEM_ALLOCATOR_TRACKING *pTracking; + /** + * @brief Pointer to the platform specific implementation. + */ + PORT_MEM_ALLOCATOR_IMPL *pImpl; +}; + +/** + * @brief Macro for calling the alloc method of an allocator object. + * + * Please use this instead of calling the methods directly, to ensure proper + * memory tracking in all cases. + * + * @pre Windows: IRQL <= APC_LEVEL(DISPATCH_LEVEL if allocating NonPaged memory) + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +#define PORT_ALLOC(pAlloc, length) _portMemAllocatorAlloc(pAlloc, length) +/** + * @brief Macro for calling the free method of an allocator object + * + * Please use this instead of calling the methods directly, to ensure proper + * memory tracking in all cases. + * + * @pre Windows: IRQL <= APC_LEVEL (DISPATCH_LEVEL if freeing NonPaged memory) + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +#define PORT_FREE(pAlloc, pMem) _portMemAllocatorFree(pAlloc, pMem) + +/** + * @brief Creates an allocator for paged memory. + * + * Returns an allocator instance where @ref PORT_ALLOC will behave + * like @ref portMemAllocPaged. Note the memory holding the PORT_MEM_ALLOCATOR + * instance may also be paged. + * + * @return NULL if creation failed. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +PORT_MEM_ALLOCATOR *portMemAllocatorCreatePaged(void); + +/** + * @brief Creates an allocator for non-paged memory. + * + * Returns an allocator instance where @ref PORT_ALLOC will + * behave like @ref portMemAllocNonPaged. Note the memory holding the + * PORT_MEM_ALLOCATOR instance will also be non-paged. + * + * @return NULL if creation failed. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +PORT_MEM_ALLOCATOR *portMemAllocatorCreateNonPaged(void); + +/** + * @brief Creates an allocator over an existing block of memory. + * + * Adds allocator bookkeeping information to an existing memory block, so that + * it can be used with the standard allocator interface. Some of the space of + * the preallocated block will be consumed for bookkeeping, so not all of the + * memory will be allocatable. + * + * Use this to create an allocator object on an ISR stack, so memory allocations + * can be done at DIQRL. + * + * @par Implementation details: + * The allocator allocates in chunks of 16 bytes, and uses a 2bit-vector to keep + * track of free chunks. Thus, the bookkeeping structures for a block of size N + * will take about N/64+sizeof(PORT_MEM_ALLOCATOR) bytes. + * Use @ref PORT_MEM_PREALLOCATED_BLOCK if you want to specify useful(allocable) + * size instead of total size. + * + * The allocator is only valid while the memory it was created on is valid. + * @ref portMemAllocatorRelease must be called on the allocator before the + * memory lifecycle ends. + * + * @return NULL if creation failed. + * + * @pre Usable at any IRQL/interrupt context + * @note Will not put the thread to sleep. + * @note This allocator is not thread safe. + */ +PORT_MEM_ALLOCATOR *portMemAllocatorCreateOnExistingBlock(void *pPreallocatedBlock, NvLength blockSizeBytes); + +/** + * @brief Extends the given size to fit the required bookkeeping information + * + * To be used when preallocating blocks that will be used to create an allocator + * Consider these two preallocated memory blocks: + * ~~~{.c} + * NvU8 xxx[1024]; + * NvU8 yyy[PORT_MEM_PREALLOCATED_BLOCK(1024)]; + * ~~~ + * Block @c xxx has a size of 1024, but only ~950 of that can be allocated. + * Block @c yyy has a size of ~1100, and exactly 1024 bytes can be allocated. + */ +#define PORT_MEM_PREALLOCATED_BLOCK(size) \ + (size + PORT_MEM_PREALLOCATED_BLOCK_EXTRA_SIZE(size)) + +/** + * @brief releases an allocator instance. + * + * This must be called to release any resources associated with the allocator. + * + * @par Checked builds only: + * Will assert if pAllocator has unfreed allocations + * + * @par Undefined: + * pAllocator must be an instance of PORT_MEM_ALLOCATOR that was provided by one + * of the portMemAllocatorCreate* functions. + * + * These limitations don't apply to allocators created using @ref portMemAllocatorCreateOnExistingBlock and + * @ref portMemExAllocatorCreateLockedOnExistingBlock. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void portMemAllocatorRelease(PORT_MEM_ALLOCATOR *pAllocator); + +/** + * @brief Returns the pointer to the global nonpaged allocator. + * + * This allocator is always initialized does not need to be released. + * + * Allocations performed using this allocator are identical to the ones done + * by @ref portMemAllocNonPaged + */ +PORT_MEM_ALLOCATOR *portMemAllocatorGetGlobalNonPaged(void); +/** + * @brief Returns the pointer to the global paged allocator. + * + * This allocator is always initialized does not need to be released. + * + * Allocations performed using this allocator are identical to the ones done + * by @ref portMemAllocPaged + */ +PORT_MEM_ALLOCATOR *portMemAllocatorGetGlobalPaged(void); +/** + * @brief Prints the memory details gathered by whatever tracking mechanism is + * enabled. If pAllocator is NULL, it will print data for all allocators. + * + * @note Printing is done using portDbgPrintString, which prints regardless of + * build type and debug levels. + */ +void portMemPrintTrackingInfo(const PORT_MEM_ALLOCATOR *pAllocator); + +// @} End core functions + + +/** + * @name Extended Functions + * @{ + */ + +/** + * @brief Returns true if it is safe to allocate paged memory. + */ +NvBool portMemExSafeForPagedAlloc(void); +#define portMemExSafeForPagedAlloc_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Returns true if it is safe to allocate non-paged memory. + */ +NvBool portMemExSafeForNonPagedAlloc(void); +#define portMemExSafeForNonPagedAlloc_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Public allocator tracking information + */ +typedef struct PORT_MEM_TRACK_ALLOCATOR_STATS +{ + /** @brief Total number of allocations */ + NvU32 numAllocations; + /** @brief Total allocated bytes, including all staging */ + NvLength allocatedSize; + /** @brief Useful size of allocations - What was actually requested */ + NvLength usefulSize; + /** @brief Extra size allocated for tracking/debugging purposes */ + NvLength metaSize; +} PORT_MEM_TRACK_ALLOCATOR_STATS; + +/** + * @brief Returns the statistics of currently active allocations for the given + * allocator. + * + * If pAllocator is NULL, it returns stats for all allocators, as well as the + * memory allocated with @ref portMemAllocPaged and @ref portMemAllocNonPaged + */ +NV_STATUS portMemExTrackingGetActiveStats(const PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_TRACK_ALLOCATOR_STATS *pStats); + +/** + * @brief Returns the statistics of all allocations made with the given + * allocator since it was created. + * + * If pAllocator is NULL, it returns stats for all allocators, as well as the + * memory allocated with @ref portMemAllocPaged and @ref portMemAllocNonPaged + */ +NV_STATUS portMemExTrackingGetTotalStats(const PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_TRACK_ALLOCATOR_STATS *pStats); + +/** + * @brief Returns the statistics of peak allocations made with the given + * allocator since it was created. + * + * Peak data reports each field independently. For example, if the peak data + * reports 100 allocations and 100000 bytes allocated, those two did not + * necessarily happen *at the same time*. It could also be that the allocator + * created 100 allocations of 1 byte each, then freed them and allocated a + * single 100000 bytes block. + * + * If pAllocator is NULL, it returns stats for all allocators, as well as the + * memory allocated with @ref portMemAllocPaged and @ref portMemAllocNonPaged + */ +NV_STATUS portMemExTrackingGetPeakStats(const PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_TRACK_ALLOCATOR_STATS *pStats); + +/** + * @brief Cycles through the tracking infos for allocations by pAllocator + * If pAllocator is NULL, it will cycle through all allocations. + * + * @param [out] pInfo The info will be written to this buffer. + * @param [in, out] pIterator + * Should point to NULL the first time it is called. + * Every next call should pass the value returned by previous. + * To reset the loop, set the iterator to NULL. + * Upon writing the last range, the iterator will be set to NULL. + * The iterator is only valid until the next alloc/free from this allocator. + * There is no need to release the iterator in any way. + * + * @return NV_ERR_OBJECT_NOT_FOUND if no allocations exist. + */ +NV_STATUS portMemExTrackingGetNext(const PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_TRACK_ALLOC_INFO *pInfo, void **pIterator); + +/** + * @brief Copies from user memory to kernel memory. + * + * When accepting data as input from user space it is necessary to take + * additional precautions to access it safely and securely. This means copy + * the user data into a kernel buffer and then using that kernel buffer for all + * needed accesses. + * + * The function will fail if pUser is an invalid user space pointer or if the + * memory it refers to is less than length bytes long. A valid kernel pointer + * is interpreted as an invalid user pointer. + * @par Checked builds only: + * Will trigger a breakpoint if pUser is invalid userspace pointer + * + * The function will fail if pKernel is NULL. + * + * The function will fail if lengthBytes is 0. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pUser is invalid or pKernel is NULL + * - NV_ERR_INVALID_ARGUMENT if lengthBytes is 0 + */ +NV_STATUS portMemExCopyFromUser(const NvP64 pUser, void *pKernel, NvLength lengthBytes); +#define portMemExCopyFromUser_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief Copies from kernel memory to user memory. + * + * This is the reverse of @ref portMemExCopyFromUser. The copy in this case is + * from pKernel to pUser. + * + * See @ref portMemExCopyFromUser for more details. + * + */ +NV_STATUS portMemExCopyToUser(const void *pKernel, NvP64 pUser, NvLength lengthBytes); +#define portMemExCopyToUser_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Returns the size (in bytes) of a single memory page. + */ +NvLength portMemExGetPageSize(void); +#define portMemExGetPageSize_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Opaque container holding an allocation of physical system memory. + */ +typedef struct PORT_PHYSICAL_MEMDESC PORT_PHYSICAL_MEMDESC; + +/** + * @brief Creates a handle used to manage and manipulate a physical memory + * allocation. + * + * @param pAllocator the allocator to use the create the allocation's tracking + * structures. This allocator is *not* used to allocate physical memory. + * + * @return NULL if the allocation failed. + */ +PORT_PHYSICAL_MEMDESC *portMemExPhysicalDescCreate(PORT_MEM_ALLOCATOR *pAllocator); +#define portMemExPhysicalDescCreate_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Types of caching for physical memory mappings. + * + * In case a target architecture does not support a specific caching mode, + * the mapping call will fail. + * Specifying PORT_MEM_ANYCACHE lets the implementation pick a caching mode that + * is present on the target architecture. This way the mapping will not fail. + */ +typedef enum +{ + PORT_MEM_UNCACHED, + PORT_MEM_CACHED, + PORT_MEM_WRITECOMBINED, + PORT_MEM_ANYCACHE +} PortMemCacheMode; + +/** + * @brief Types of access protections for physical memory mappings. + */ +typedef enum +{ + PORT_MEM_PROT_NO_ACCESS = 0, + PORT_MEM_PROT_READ = 1, + PORT_MEM_PROT_WRITE = 2, + PORT_MEM_PROT_READ_WRITE = 3, + PORT_MEM_PROT_EXEC = 4, + PORT_MEM_PROT_READ_EXEC = 5, + PORT_MEM_PROT_WRITE_EXEC = 6, + PORT_MEM_PROT_READ_WRITE_EXEC = 7 +} PortMemProtectMode; + +/** + * @brief Populates a physical memory descriptor with backing pages. + * + * Populates a descriptor with physical pages. Pages will be zeroed. + */ +NV_STATUS portMemExPhysicalDescPopulate(PORT_PHYSICAL_MEMDESC *pPmd, NvLength sizeBytes, NvBool bContiguous); +#define portMemExPhysicalDescPopulate_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief allocates a PMD and populates it with memory + * + * This is a combination of @ref portMemExPhysicalDescCreate and @ref + * portMemExPhysicalDescPopulate. It should be the preferred method to allocate + * physical memory when it is possible to do it as a single step. Not only + * does the caller require less code and error handling but it allows the + * implementation the option to combine the tracking data into fewer + * allocations since it knows the size up front. + * + * @param [out] ppPmd - Pointer to the allocated PMD. + * @param pAllocator - Allocator to use when allocating the PMD + */ +NV_STATUS portMemExPhysicalDescCreateAndPopulate(PORT_MEM_ALLOCATOR *pAllocator, + PORT_PHYSICAL_MEMDESC **ppPmd, NvLength sizeBytes, NvBool bContiguous); +#define portMemExPhysicalDescCreateAndPopulate_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief Adds a contiguous memory range to the physical memory descriptor + * + * To describe a non-contiguous memory range, call this function once for every + * contiguous range. Range order will be determined by function call order, + * not the range addresses. + */ +NV_STATUS portMemExPhysicalDescribeRange(PORT_PHYSICAL_MEMDESC *pPmd, NvU64 start, NvLength length); +#define portMemExPhysicalDescribeRange_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Hands back the next contiguous memory range in the memory descriptor + * + * @param [out] pStart - Physical address of the range + * @param [out] pLength - Length of the range + * @param [in, out] pIterator + * Should point to NULL the first time it is called. + * Every next call should pass the value returned by previous. + * To reset the loop, set the iterator to NULL. + * Upon writing the last range, the iterator will be set to NULL. + * The iterator is valid until pPmd is destroyed. + * There is no need to release the iterator in any way. + * + * @return NV_ERR_OBJECT_NOT_FOUND if no ranges exist. + */ +NV_STATUS portMemExPhysicalGetNextRange(PORT_PHYSICAL_MEMDESC *pPmd, + NvU64 *pStart, NvLength *pLength, void **pIterator); +#define portMemExPhysicalGetNextRange_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Frees the memory descriptor and all tracking data. The descriptor must + * have been allocated with @ref portMemExPhysicalDescCreate or + * @ref portMemExPhysicalDescCreateAndPopulate + * + * Freed memory is not automatically unmapped. + * + * It is guaranteed that after memory has been freed, the original data can no + * longer be read in any way. + * @par Undefined: + * Accessing a mapping that has been freed results in undefined behavior. + */ +void portMemExPhysicalDescFree(PORT_PHYSICAL_MEMDESC *pPmd); +#define portMemExPhysicalDescFree_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief Frees physical memory allocated with @ref portMemExPhysicalDescPopulate + */ +void portMemExPhysicalFree(PORT_PHYSICAL_MEMDESC *pPmd); +#define portMemExPhysicalFree_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief Maps a region of a @ref PORT_PHYSICAL_MEMDESC + * + * @param [out] ppMapping - Virtual address where the physical memory is mapped + * @param offset - Offset of the physical memory where the region starts. + * The region must start on a page boundary. + * @param length - Length of the physical memory region. + * Needs to be a multiple of page size. + * @param protect - Mapping protections + * @param cacheMode - Mapping cache mode. + * Only PORT_MEM_ANYCACHE is guaranteed to be supported. + * + * @return NV_ERR_NOT_SUPPORTED if the specified cache mode is not supported by + * the current architecture. + */ +NV_STATUS portMemExPhysicalMap(PORT_PHYSICAL_MEMDESC *pPmd, + void **ppMapping, NvU64 offset, NvU64 length, + PortMemProtectMode protect, PortMemCacheMode cacheMode); +#define portMemExPhysicalMap_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Unmaps a region created with @ref portMemExPhysicalMap. + * + * @par Undefined: + * Accessing an unmapped memory is undefined, but it is guaranteed that the + * actual data can't be read/overwritten. + */ +NV_STATUS portMemExPhysicalUnmap(PORT_PHYSICAL_MEMDESC *pPmd, void *pMapping); +#define portMemExPhysicalUnmap_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Creates a thread safe allocator over an existing block of memory. + * + * @note See @ref portMemAllocatorCreateOnExistingBlock for other limitations. + * @note User should initialize @p pSpinlock and destroy it after it + * has finished using this allocator. + */ +PORT_MEM_ALLOCATOR *portMemExAllocatorCreateLockedOnExistingBlock(void *pPreallocatedBlock, NvLength blockSizeBytes, void *pSpinlock); +#define portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED \ + (PORT_IS_MODULE_SUPPORTED(sync)) + + +/** + * @brief Maps the given physical address range to nonpaged system space. + * + * @param[in] start Specifies the starting physical address of the I/O + * range to be mapped. + * @param[in] byteSize Specifies the number of bytes to be mapped. + * + * @return The base virtual address that maps the base physical address for + * the range + */ +void *portMemExMapIOSpace(NvU64 start, NvU64 byteSize); +#define portMemExMapIOSpace_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +/** + * @brief Unmaps a specified range of physical addresses previously mapped by + * portMapIOSpace + * + * @param[in] addr Pointer to the base virtual address to which the + * physical pages were mapped. + * @param[in] byteSize Specifies the number of bytes that were mapped. + */ +void portMemExUnmapIOSpace(void *addr, NvU64 byteSize); +#define portMemExUnmapIOSpace_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +// @} End extended functions + + +/** + * @note Memory tracking is controlled through the following compile-time flags. + * The PORT_MEM_TRACK_USE_* constants should be defined to 0 or 1. + * If nothing is defined, the default values are assigned here. + */ +#if !defined(PORT_MEM_TRACK_USE_COUNTER) +/** + * @brief Use allocations counter for all allocators + * + * Allocation counter is lightweight and can detect if a leak is present. + * Default is always on. + */ +#define PORT_MEM_TRACK_USE_COUNTER 1 +#endif +#if !defined(PORT_MEM_TRACK_USE_FENCEPOSTS) +/** + * @brief Use fenceposts around all allocated blocks + * + * Fenceposts can detect out of bounds writes and improper free calls + * Default is on for checked builds (where it will assert if an error occurs) + */ +#define PORT_MEM_TRACK_USE_FENCEPOSTS PORT_IS_CHECKED_BUILD +#endif +#if !defined(PORT_MEM_TRACK_USE_ALLOCLIST) +/** + * @brief Keep a list of all allocations. + * + * Allocation lists can give more details about detected leaks, and allow + * cycling through all allocations. + * Default is off. + * @todo Perhaps enable for checked builds? + */ +#define PORT_MEM_TRACK_USE_ALLOCLIST 0 +#endif +#if !defined(PORT_MEM_TRACK_USE_CALLERINFO) +/** + * @brief Track file:line information for all allocations + * + * On release builds the filename hash is passed instead of the string. This + * requires NvLog to be enabled. + * Default is off. + */ +#define PORT_MEM_TRACK_USE_CALLERINFO 0 +#endif +/** + * @brief Track instruction pointer instead of function/file/line information + * for all allocations + * + * Has no effect unless PORT_MEM_TRACK_USE_CALLERINFO is also set. + */ +#if !defined(PORT_MEM_TRACK_USE_CALLERINFO_IP) +#if NVCPU_IS_RISCV64 +#define PORT_MEM_TRACK_USE_CALLERINFO_IP 1 +#else +#define PORT_MEM_TRACK_USE_CALLERINFO_IP 0 +#endif +#endif +#if !defined(PORT_MEM_TRACK_USE_LOGGING) +/** + * @brief Log all alloc and free calls to a binary NvLog buffer + * Requires NvLog to be enabled. + * + * Default is off. + */ +#define PORT_MEM_TRACK_USE_LOGGING 0 +#endif + +/** @brief Nothing is printed unless @ref portMemPrintTrackingInfo is called */ +#define PORT_MEM_TRACK_PRINT_LEVEL_SILENT 0 +/** @brief Print when an error occurs and at shutdown */ +#define PORT_MEM_TRACK_PRINT_LEVEL_BASIC 1 +/** @brief Print at every alloc and free, and at any abnormal situation */ +#define PORT_MEM_TRACK_PRINT_LEVEL_VERBOSE 2 + +#if !defined(PORT_MEM_TRACK_PRINT_LEVEL) +#if PORT_IS_CHECKED_BUILD +#define PORT_MEM_TRACK_PRINT_LEVEL PORT_MEM_TRACK_PRINT_LEVEL_BASIC +#else +#define PORT_MEM_TRACK_PRINT_LEVEL PORT_MEM_TRACK_PRINT_LEVEL_SILENT +#endif // PORT_IS_CHECKED_BUILD +#endif // !defined(PORT_MEM_TRACK_PRINT_LEVEL) + +// Memory tracking header can redefine some functions declared here. +#include "nvport/inline/memory_tracking.h" + +/** + * @brief Single allocation description. + * + * Must be defined after memory_tracking.h is included for PORT_MEM_CALLERINFO. + */ +struct PORT_MEM_TRACK_ALLOC_INFO +{ +#if PORT_MEM_TRACK_USE_CALLERINFO + /** + * @brief Function / file / line or instruction pointer. + */ + PORT_MEM_CALLERINFO callerInfo; +#endif + /** + * @brief pointer to the allocated memory block. + */ + void *pMemory; + /** + * @brief Size of the allocated memory block + */ + NvLength size; + /** + * @brief Pointer to the allocator that allocated the memory. + * If the memory was allocated globally, this will be NULL + */ + PORT_MEM_ALLOCATOR *pAllocator; + /** + * @brief Timestamp of the allocation. Will be 0 if it wasn't logged. + */ + NvU64 timestamp; +}; + +/** + * @} + */ + +#endif // _NVPORT_MEMORY_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/nvport.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/nvport.h new file mode 100644 index 0000000..3049e4b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/nvport.h @@ -0,0 +1,262 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief NvPort global definitions + */ + +#ifndef _NVPORT_H_ +#define _NVPORT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @note nvport.h may be included through nvrm.h by projects which haven't yet + * configured their makefiles appropriately. These files don't use any NvPort + * features, so it's safe to define away this entire file instead of failing + * the build. This will be removed once NvPort becomes ubiquitous enough. + */ +#if defined(PORT_IS_KERNEL_BUILD) + +#include +#include + +#if !defined(PORT_IS_KERNEL_BUILD) +#error "PORT_IS_KERNEL_BUILD must be defined to 0 or 1 by makefile" +#endif + +#if !defined(PORT_IS_CHECKED_BUILD) +#error "PORT_IS_CHECKED_BUILD must be defined to 0 or 1 by makefile" +#endif + +/** + * @defgroup NVPORT_CORE Core Functions + * @{ + */ + +/** + * @brief Helper macro to test if an extended function is supported + * + * Whether an extended function is supported or not is a compile time decision. + * Every function has an associated define that will look like this: + * + * ~~~{.c} + * #define portSomeFunction_SUPPORTED SOME_EXPRESSION + * ~~~ + * + * That will be evaluated by the preprocessor to either 0 or 1 (not supported + * or supported). If it evaluates to 0 then the symbol will not exist and the + * function cannot be referenced. + */ +#define PORT_IS_FUNC_SUPPORTED(function) function ## _SUPPORTED + +/** + * @brief Helper macro to test if a module is supported. The argument should be + * a lowercase module name, e.g. @c PORT_IS_MODULE_SUPPORTED(memory) + * + * Whether a module is included in the build is decided at compile time. + * Modules can either not support a given platform or be explicitly disabled + * through the Makefile. + * + * This define will be equal to 1 if the module is supported. + * If it evaluates to 0 or is not defined, then none of the module's symbols or + * defines will exist in the build. + */ +#define PORT_IS_MODULE_SUPPORTED(module) PORT_MODULE_ ## module + + +#if defined(__clang__) +#define PORT_COMPILER_IS_CLANG 1 +#define PORT_COMPILER_HAS_INTRINSIC_ATOMICS __has_builtin(__c11_atomic_fetch_add) +#define PORT_COMPILER_HAS_ATTRIBUTE_FORMAT __has_attribute(__format__) +#define PORT_COMPILER_HAS_COUNTER 1 +#else +#define PORT_COMPILER_IS_CLANG 0 +#endif + +#if defined(__GNUC__) && !defined(__clang__) +#define PORT_COMPILER_IS_GCC 1 +#define PORT_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#define PORT_COMPILER_HAS_INTRINSIC_ATOMICS (PORT_GCC_VERSION >= 40100) +#define PORT_COMPILER_HAS_ATTRIBUTE_FORMAT (PORT_GCC_VERSION >= 20300) +#define PORT_COMPILER_HAS_COUNTER (PORT_GCC_VERSION >= 40300) +#define PORT_COMPILER_HAS_INTRINSIC_CPUID 1 +#else +#define PORT_COMPILER_IS_GCC 0 +#endif + +#define PORT_COMPILER_IS_MSVC 0 + +#if !(PORT_COMPILER_IS_GCC || PORT_COMPILER_IS_CLANG || PORT_COMPILER_IS_MSVC) +#error "Unsupported compiler" +#endif + +// +// Need to define an IS_MODS macro that expands to 1 or 0 as defined(NV_MODS) +// is not entirely portable when used within a macro expansion. +// e.g. this would not always work: #define PORT_IS_MODS defined(NV_MODS) +// +#define PORT_IS_MODS 0 + +#ifndef PORT_INLINE +/** + * @brief Qualifier for all inline functions declared by NvPort. + * Modules will usually define PORT__INLINE which is either PORT_INLINE + * or nothing, depending whether the functions are being inlined in that module. + */ +#define PORT_INLINE static NV_INLINE +#endif + + +/** + * @def PORT_CHECKED_ONLY(x) + * @brief Evaluates the argument only if it is a checked build + */ +#if PORT_IS_CHECKED_BUILD +#define PORT_CHECKED_ONLY(x) x +#else +#define PORT_CHECKED_ONLY(x) +#endif + +/** + * @def PORT_KERNEL_ONLY(x) + * @brief Evaluates the argument only if it is a kernel build + */ +#if PORT_IS_KERNEL_BUILD +#define PORT_KERNEL_ONLY(x) x +#else +#define PORT_KERNEL_ONLY(x) +#endif + +#ifndef PORT_INCLUDE_NEW_STYLE_ALIASES +/** + * @brief Switch to include aliases for objects and methods that conform to the + * new RM style. + * + * This switch will define type and method aliases for object types in NvPort. + * The current NvPort style object names are PORT_MODULE_OBJECT, while the + * methods are portModuleObjectMethod(). + * The update proposal dictates these to be PortModuleObject and objectMethod. + * + * @todo Currently we just alias the new names to the old ones. Once the coding + * style has been finalized, we should add a deprecation note to the old names, + * and do a mass search and replace. + */ +#define PORT_INCLUDE_NEW_STYLE_ALIASES 1 +#endif // PORT_INCLUDE_NEW_STYLE_ALIASES + +/** + * @brief Suppresses unused variable warnings + * @param x - Variable or argument name + * + * No compilation errors are reported by any compiler when we use + * the following definition. + * + * #define PORT_UNREFERENCED_VARIABLE(x) ((void)sizeof(&(x))) + * + * But Coverity reports BAD_SIZEOF error with this definition. + * Adding a Coverity annotation "coverity[bad_sizeof]" near + * the definition does not work. The preprocessor ignores all + * the comments and the Coverity annotation is also ignored + * as a legal comment. As a result, this annotation never ends + * up in the source code where this macro is used. Hence, we use + * two definitions of this macro - one for Coverity and the other + * for the rest of the targets. + * + * Coverity does not report any warnings for unused variables. + * Hence, we do nothing while building for Coverity. + */ +#if !defined(__COVERITY__) +#define PORT_UNREFERENCED_VARIABLE(x) ((void)sizeof(&(x))) +#else +#define PORT_UNREFERENCED_VARIABLE(x) +#endif + +/// @} + +#if PORT_IS_MODULE_SUPPORTED(core) +#include "nvport/core.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(example) +#include "nvport/example.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(atomic) +#include "nvport/atomic.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(debug) +#include "nvport/debug.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(util) +#include "nvport/util.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(memory) +#include "nvport/memory.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(sync) +#include "nvport/sync.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(safe) +#include "nvport/safe.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(mmio) +#include "nvport/mmio.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(thread) +#include "nvport/thread.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(time) +#include "nvport/time.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(crypto) +#include "nvport/crypto.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(string) +#include "nvport/string.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(cpu) +#include "nvport/cpu.h" +#endif + +#endif // defined(PORT_IS_KERNEL_BUILD) + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _NVPORT_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/safe.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/safe.h new file mode 100644 index 0000000..2847c71 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/safe.h @@ -0,0 +1,621 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Safe module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_SAFE_H_ +#define _NVPORT_SAFE_H_ + +#ifndef PORT_SAFE_INLINE +#define PORT_SAFE_INLINE PORT_INLINE +#endif + +/** + * @defgroup NVPORT_SAFE Safe integer operations module + * + * @brief This module contains functions for safe use of integer types, without + * concern for overflow errors + * + * All functions return true if the operation was performed successfully, + * and false if there was an overflow (or division by zero). + * The final value is handed back in both cases, but if the function returned + * false, the value handed back is undefined. + * + * @note These functions should be used: + * - When operating on data passing through the trust boundary (e.g. RM API) + * - When operating on signed data types (where overflows are undefined!) + * - Instead of checking overflows manually + * For internal functions, it is recommended not to clutter the code with this. + * Usually an internal overflow is a bug, and it should be fixed up the stack. + * + * + * @note A couple of artificial examples: + * - GOOD - Data received from user, not to be trusted. + * ~~~{.c} + * NvU32 NV_APIENTRY NvRmBzero(NvU8 *mem, NvLength len) + * { + * NvUPtr uptr = (NvUPtr) mem; + * if (mem == NULL || !portSafeAddUPtr(uptr, len, &uptr)) + * return NV_ERR_INVALID_PARAMETER; + * while ((NvUPtr) mem != uptr) + * *mem++ = 0; + * return NV_OK; + * } + * ~~~ + * - GOOD - Internal RM function, allowed to crash if given invalid params + * ~~~{.c} + * void bzero(NvU8 *mem, NvLength len) + * { + * while (len > 0) + * mem[--len] = 0; + * } + * ~~~ + * - BAD - If you are already checking for overflows manually, use these functions + * ~~~{.c} + * NV_STATUS osAllocMemTracked(void **ppAddress, NvU32 size) + * { + * NvU32 paddedSize = size; + * // allocate three extra dwords to hold the size and some debug tags + * paddedSize += 3 * sizeof(NvU32); + * // check for the overflow after increasing the size + * if (paddedSize < size) + * return NV_ERR_INSUFFICIENT_RESOURCES; + * size = paddedSize; + * ... + * } + * ~~~ + * - GOOD - Use provided functions + * ~~~{.c} + * NV_STATUS osAllocMemTracked(void **ppAddress, NvU32 size) + * { + * if (!portSafeAddU32(size, 3*sizeof(NvU32), &size)) + * return NV_ERR_INSUFFICIENT_RESOURCES; + * ... + * } + * ~~~ + * + * @{ + */ + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Add two signed 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddS8(NvS8 augend, NvS8 addend, NvS8 *pResult); +/** + * @brief Subtract two signed 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubS8(NvS8 minuend, NvS8 subtrahend, NvS8 *pResult); +/** + * @brief Multiply two signed 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulS8(NvS8 multiplicand, NvS8 multiplier, NvS8 *pResult); +/** + * @brief Divide two signed 8bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivS8(NvS8 dividend, NvS8 divisor, NvS8 *pResult); + + +/** + * @brief Add two signed 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddS16(NvS16 augend, NvS16 addend, NvS16 *pResult); +/** + * @brief Subtract two signed 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubS16(NvS16 minuend, NvS16 subtrahend, NvS16 *pResult); +/** + * @brief Multiply two signed 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulS16(NvS16 multiplicand, NvS16 multiplier, NvS16 *pResult); +/** + * @brief Divide two signed 16bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivS16(NvS16 dividend, NvS16 divisor, NvS16 *pResult); + + +/** + * @brief Add two signed 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddS32(NvS32 augend, NvS32 addend, NvS32 *pResult); +/** + * @brief Subtract two signed 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubS32(NvS32 minuend, NvS32 subtrahend, NvS32 *pResult); +/** + * @brief Multiply two signed 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulS32(NvS32 multiplicand, NvS32 multiplier, NvS32 *pResult); +/** + * @brief Divide two signed 32bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivS32(NvS32 dividend, NvS32 divisor, NvS32 *pResult); + + +/** + * @brief Add two signed 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddS64(NvS64 augend, NvS64 addend, NvS64 *pResult); +/** + * @brief Subtract two signed 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubS64(NvS64 minuend, NvS64 subtrahend, NvS64 *pResult); +/** + * @brief Multiply two signed 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulS64(NvS64 multiplicand, NvS64 multiplier, NvS64 *pResult); +/** + * @brief Divide two signed 64bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivS64(NvS64 dividend, NvS64 divisor, NvS64 *pResult); + + + + +/** + * @brief Add two unsigned 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddU8(NvU8 augend, NvU8 addend, NvU8 *pResult); +/** + * @brief Subtract two unsigned 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubU8(NvU8 minuend, NvU8 subtrahend, NvU8 *pResult); +/** + * @brief Multiply two unsigned 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulU8(NvU8 multiplicand, NvU8 multiplier, NvU8 *pResult); +/** + * @brief Divide two unsigned 8bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivU8(NvU8 dividend, NvU8 divisor, NvU8 *pResult); + + +/** + * @brief Add two unsigned 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddU16(NvU16 augend, NvU16 addend, NvU16 *pResult); +/** + * @brief Subtract two unsigned 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubU16(NvU16 minuend, NvU16 subtrahend, NvU16 *pResult); +/** + * @brief Multiply two unsigned 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulU16(NvU16 multiplicand, NvU16 multiplier, NvU16 *pResult); +/** + * @brief Divide two unsigned 16bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivU16(NvU16 dividend, NvU16 divisor, NvU16 *pResult); + + +/** + * @brief Add two unsigned 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddU32(NvU32 augend, NvU32 addend, NvU32 *pResult); +/** + * @brief Subtract two unsigned 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubU32(NvU32 minuend, NvU32 subtrahend, NvU32 *pResult); +/** + * @brief Multiply two unsigned 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulU32(NvU32 multiplicand, NvU32 multiplier, NvU32 *pResult); +/** + * @brief Divide two unsigned 32bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivU32(NvU32 dividend, NvU32 divisor, NvU32 *pResult); + + +/** + * @brief Add two unsigned 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddU64(NvU64 augend, NvU64 addend, NvU64 *pResult); +/** + * @brief Subtract two unsigned 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubU64(NvU64 minuend, NvU64 subtrahend, NvU64 *pResult); +/** + * @brief Multiply two unsigned 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulU64(NvU64 multiplicand, NvU64 multiplier, NvU64 *pResult); +/** + * @brief Divide two unsigned 64bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivU64(NvU64 dividend, NvU64 divisor, NvU64 *pResult); + + +/** + * @brief Add two pointer-sized integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddUPtr(NvUPtr augend, NvUPtr addend, NvUPtr *pResult); +/** + * @brief Subtract two pointer-sized integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubUPtr(NvUPtr minuend, NvUPtr subtrahend, NvUPtr *pResult); +/** + * @brief Multiply two pointer-sized integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulUPtr(NvUPtr multiplicand, NvUPtr multiplier, NvUPtr *pResult); +/** + * @brief Divide two pointer-sized integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivUPtr(NvUPtr dividend, NvUPtr divisor, NvUPtr *pResult); + + +/** + * @brief Add two length integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddLength(NvLength augend, NvLength addend, NvLength *pResult); +/** + * @brief Subtract two length integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubLength(NvLength minuend, NvLength subtrahend, NvLength *pResult); +/** + * @brief Multiply two length integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulLength(NvLength multiplicand, NvLength multiplier, NvLength *pResult); +/** + * @brief Divide two length integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivLength(NvLength dividend, NvLength divisor, NvLength *pResult); + + + + + + +/** + * @brief Convert a 8bit signed integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToU8(NvS8 data, NvU8 *pResult); +/** + * @brief Convert a 8bit signed integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToU16(NvS8 data, NvU16 *pResult); +/** + * @brief Convert a 8bit signed integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToU32(NvS8 data, NvU32 *pResult); +/** + * @brief Convert a 8bit signed integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToU64(NvS8 data, NvU64 *pResult); +/** + * @brief Convert a 8bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToUPtr(NvS8 data, NvUPtr *pResult); +/** + * @brief Convert a 8bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToLength(NvS8 data, NvLength *pResult); + + +/** + * @brief Convert a 16bit signed integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToS8(NvS16 data, NvS8 *pResult); +/** + * @brief Convert a 16bit signed integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToU8(NvS16 data, NvU8 *pResult); +/** + * @brief Convert a 16bit signed integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToU16(NvS16 data, NvU16 *pResult); +/** + * @brief Convert a 16bit signed integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToU32(NvS16 data, NvU32 *pResult); +/** + * @brief Convert a 16bit signed integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToU64(NvS16 data, NvU64 *pResult); +/** + * @brief Convert a 16bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToUPtr(NvS16 data, NvUPtr *pResult); +/** + * @brief Convert a 16bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToLength(NvS16 data, NvLength *pResult); + +/** + * @brief Convert a 32bit signed integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToS8(NvS32 data, NvS8 *pResult); +/** + * @brief Convert a 32bit signed integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToS16(NvS32 data, NvS16 *pResult); +/** + * @brief Convert a 32bit signed integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToU8(NvS32 data, NvU8 *pResult); +/** + * @brief Convert a 32bit signed integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToU16(NvS32 data, NvU16 *pResult); +/** + * @brief Convert a 32bit signed integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToU32(NvS32 data, NvU32 *pResult); +/** + * @brief Convert a 32bit signed integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToU64(NvS32 data, NvU64 *pResult); +/** + * @brief Convert a 32bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToUPtr(NvS32 data, NvUPtr *pResult); +/** + * @brief Convert a 32bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToLength(NvS32 data, NvLength *pResult); + + +/** + * @brief Convert a 64bit signed integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToS8(NvS64 data, NvS8 *pResult); +/** + * @brief Convert a 64bit signed integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToS16(NvS64 data, NvS16 *pResult); +/** + * @brief Convert a 64bit signed integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToS32(NvS64 data, NvS32 *pResult); +/** + * @brief Convert a 64bit signed integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToU8(NvS64 data, NvU8 *pResult); +/** + * @brief Convert a 64bit signed integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToU16(NvS64 data, NvU16 *pResult); +/** + * @brief Convert a 64bit signed integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToU32(NvS64 data, NvU32 *pResult); +/** + * @brief Convert a 64bit signed integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToU64(NvS64 data, NvU64 *pResult); +/** + * @brief Convert a 64bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToUPtr(NvS64 data, NvUPtr *pResult); +/** + * @brief Convert a 64bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToLength(NvS64 data, NvLength *pResult); + + + +/** + * @brief Convert a 8bit unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU8ToS8(NvU8 data, NvS8 *pResult); + +/** + * @brief Convert a 16bit unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU16ToS8(NvU16 data, NvS8 *pResult); +/** + * @brief Convert a 16bit unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU16ToS16(NvU16 data, NvS16 *pResult); +/** + * @brief Convert a 16bit unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU16ToU8(NvU16 data, NvU8 *pResult); + + +/** + * @brief Convert a 32bit unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToS8(NvU32 data, NvS8 *pResult); +/** + * @brief Convert a 32bit unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToS16(NvU32 data, NvS16 *pResult); +/** + * @brief Convert a 32bit unsigned integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToS32(NvU32 data, NvS32 *pResult); +/** + * @brief Convert a 32bit unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToU8(NvU32 data, NvU8 *pResult); +/** + * @brief Convert a 32bit unsigned integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToU16(NvU32 data, NvU16 *pResult); + + +/** + * @brief Convert a 64bit unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToS8(NvU64 data, NvS8 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToS16(NvU64 data, NvS16 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToS32(NvU64 data, NvS32 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 64bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToS64(NvU64 data, NvS64 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToU8(NvU64 data, NvU8 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToU16(NvU64 data, NvU16 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToU32(NvU64 data, NvU32 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToUPtr(NvU64 data, NvUPtr *pResult); +/** + * @brief Convert a 64bit unsigned integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToLength(NvU64 data, NvLength *pResult); + + +/** + * @brief Convert a pointer-sized unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToS8(NvUPtr data, NvS8 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToS16(NvUPtr data, NvS16 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToS32(NvUPtr data, NvS32 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 64bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToS64(NvUPtr data, NvS64*pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToU8(NvUPtr data, NvU8 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToU16(NvUPtr data, NvU16 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToU32(NvUPtr data, NvU32 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToU64(NvUPtr data, NvU64 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToLength(NvUPtr data, NvLength *pResult); + + +/** + * @brief Convert a length-sized unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToS8(NvLength data, NvS8 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToS16(NvLength data, NvS16 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToS32(NvLength data, NvS32 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 64bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToS64(NvLength data, NvS64 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToU8(NvLength data, NvU8 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToU16(NvLength data, NvU16 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToU32(NvLength data, NvU32 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToU64(NvLength data, NvU64 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToUPtr(NvLength data, NvUPtr *pResult); + + + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +// Place extended functions here + +/// @} End extended functions + + +#if NVOS_IS_WINDOWS +#include "nvport/inline/safe_win.h" +#else +#include "nvport/inline/safe_generic.h" +#endif + +#endif // _NVPORT_SAFE_H_ +/// @} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/string.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/string.h new file mode 100644 index 0000000..cdc70df --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/string.h @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief String module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_STRING_H_ +#define _NVPORT_STRING_H_ + +/** + * @defgroup NVPORT_STRING String module + * + * @brief This module contains string functionality used by other modules. + * + * @{ + */ + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Compare two strings, character by character. + * + * Will only compare lengthBytes bytes. Strings are assumed to be at least that + * long. + * + * Strings are allowed to overlap, but in . + * + * @returns: + * - 0 if all bytes are equal + * - <0 if str1 is less than str2 for the first unequal byte. + * - >0 if str1 is greater than str2 for the first unequal byte. + * @par Undefined: + * Behavior is undefined if str1, str2 is NULL.
+ */ +NvS32 portStringCompare(const char *str1, const char *str2, NvLength length); +/** + * @brief Copy a string. + * + * Will copy at most destSize bytes, stopping early if a null-terminator is found + * or if srcSize bytes are read from the source. + * + * Null character is always written at the end of the string. + * + * @param dest destination buffer, of at least destSize bytes (including null terminator). + * @param src source buffer, of at least srcSize bytes (including null terminator). + * + * @return size bytes copied successfully including null terminator, min(destSize, srcSize) + * + * @par Undefined: + * Number of allocated bytes in destination buffer are smaller than destSize.
+ * Behavior is undefined if destination and source overlaps.
+ */ +NvLength portStringCopy(char *dest, NvLength destSize, const char *src, NvLength srcSize); +/** + * @brief Concatenate two strings + * + * Will copy cat string after the end of str. Will copy only until str buffer is + * filled. str is assumed to point to a buffer of at least strSize bytes. + * + * Null character is always written at the end of the string. + * + * @return str if concatenation is succeeded. + * + * @par Undefined: + * Number of allocated bytes in destination buffer are smaller than destSize.
+ * Behavior is undefined if destination and source overlaps.
+ */ +char *portStringCat(char *str, NvLength strSize, const char *cat, NvLength catSize); + + +/** + * @brief Returns the index of the first NULL byte in the given string + * + */ + NvLength portStringLength(const char *str); + + + /** + * @brief Returns the index of the first NULL byte in the given string, it searches maxLength + * chars. If NULL byte is not found it returns maxLength. + * + */ + NvLength portStringLengthSafe(const char *str, NvLength maxLength); + + +/** + * @brief Converts a string from ASCII (8-bit) to UTF16 (16 bit) + * + * Can perform the conversion in place if dest == src. + * + * @returns The number of characters in destination buffer, without the null + * terminator (i.e. strlen(dest)) + */ +NvLength portStringConvertAsciiToUtf16(NvU16 *dest, NvLength destSize, const char *src, NvLength srcSize); + +/** + * @brief Writes the hexadecimal string representation of the buffer + * + * @returns The number of characters in destination buffer, without the null + * terminator (i.e. strlen(str)) + */ +NvLength portStringBufferToHex(char *str, NvLength strSize, const NvU8 *buf, NvLength bufSize); + +/** + * @brief Convert a binary buffer into readable group of hex digits + * + * @param groupCount - Number of groups + * @param groups - How to structure the groups (in number of hex chars) + * @param separator - Character to separate the groups + * + * For the traditional display of UUIDs, there would be five groups, {8,4,4,4,12} + * with the separator being '-'. + * + * @note odd numbers for group sizes are not supported, they will be rounded down + * + * @returns The number of characters in destination buffer, without the null + * terminator (i.e. strlen(str)) + */ +NvLength portStringBufferToHexGroups(char *str, NvLength strSize, const NvU8 *buf, NvLength bufSize, NvLength groupCount, const NvU32 *groups, const char *separator); + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +// Place extended functions here + +/// @} End extended functions + +#endif // _NVPORT_STRING_H_ +/// @} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/sync.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/sync.h new file mode 100644 index 0000000..2db3b7e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/sync.h @@ -0,0 +1,829 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Sync module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_SYNC_H_ +#define _NVPORT_SYNC_H_ +/** + * @defgroup NVPORT_SYNC Synchronization + * @brief This module includes synchronization primitives. + * + * @note The module provides two types of constructors: + * - portSyncXXXInitialize initializes the structure in the caller provided + * memory. + * - portSyncXXXCreate takes a @ref PORT_MEM_ALLOCATOR object that is used to + * allocate the memory. This memory is freed when the object is destroyed. + * If running in kernel mode, the provided memory (or allocator) must be + * non-paged. The functions do not check this, and behavior is undefined if + * the object is allocated in paged memory. + * + * Typical usage of synchronization objects is: + * ~~~{.c} + * PORT_XXX *pXxx = portSyncXxxCreate(pAllocator); + * if (!pXxx) + * return NV_ERR_INSUFFICIENT_RESOURCES; + * + * portSyncXxxAcquire(pXxx); + * doCriticalSection(); + * portSyncXxxRelease(pXxx); + * portSyncXxxDestroy(pXxx); + * ~~~ + * + * @par Checked builds only: + * The functions will assert the needed IRQL/interrupt requirements. These are + * specified for every function in a "Precondition" block. + * + * @note The IRQL/interrupt context requirements listed in "Precondition" blocks + * are only valid for Kernel Mode builds of NvPort. Usermode builds have no such + * restrictions. + * @{ + */ + +#if !PORT_IS_MODULE_SUPPORTED(memory) +#error "NvPort SYNC module requires MEMORY module to be present." +#endif + +#if LOCK_VAL_ENABLED +#define PORT_SYNC_RENAME_SUFFIX _REAL +#include "inline/sync_rename.h" +#endif + +/** + * Platform-specific inline implementations + */ +#if NVOS_IS_LIBOS +#include "nvport/inline/sync_libos.h" +#endif + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Initializes global sYNC tracking structures + * + * This function is called by @ref portInitialize. It is available here in case + * it is needed to initialize the SYNC module without initializing all the + * others. e.g. for unit tests. + */ +void portSyncInitialize(void); + +/** + * @brief Destroys global sYNC tracking structures + * + * This function is called by @ref portShutdown. It is available here in case + * it is needed to initialize the SYNC module without initializing all the + * others. e.g. for unit tests. + */ +void portSyncShutdown(void); + +/** + * @brief A spinlock data type. + * + * For documentation on what a spinlock is and how it behaves see + * http://en.wikipedia.org/wiki/Spinlock + * + * - A valid spinlock is any which is non-NULL + * - Spinlocks are not recursive. + * - Spinlocks will not put the thread to sleep. + * - No pageable data or code can be accessed while holding a spinlock (@ref + * portMemAllocPaged). + * - Spinlocks can be used in ISRs. + * + * @par Undefined: + * The behavior is undefined if the spinlock is acquired by one thread and + * released by another. + */ +typedef struct PORT_SPINLOCK PORT_SPINLOCK; +/** + * @brief Size (in bytes) of the @ref PORT_SPINLOCK structure + */ +extern NvLength portSyncSpinlockSize; + +/** + * @brief Initializes a spinlock using caller provided memory. + * + * Spinlocks are initialized in the released state. After a spinlock is + * initialized it can only be freed or acquired. + * + * On some platforms the underlying platform code may allocate memory. + * This memory will be freed upon calling @ref portSyncSpinlockDestroy. + * + * @par Undefined: + * Initializing a spinlock multiple times is undefined.
+ * Using a spinlock before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pSpinlock is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + */ +NV_STATUS portSyncSpinlockInitialize(PORT_SPINLOCK *pSpinlock); + +/** + * @brief Creates a new spinlock using the provided allocator. The newly created + * spinlock is initialized, as if @ref portSyncSpinlockInitialize was called. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->alloc, which may have additional restrictions. + */ +PORT_SPINLOCK *portSyncSpinlockCreate(PORT_MEM_ALLOCATOR *pAllocator); + +/** + * @brief Destroys a spinlock created with @ref portSyncSpinlockInitialize or + * @ref portSyncSpinlockCreate + * + * This frees any internally allocated resources that may be associated with + * the spinlock. If the spinlock was created using @ref portSyncSpinlockCreate, + * the memory will also be freed. + * + * @par Checked builds only: + * Will assert if pSpinlock == NULL
+ * Will assert if the lock is being held
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Behavior is undefined if called on an uninitialized spinlock.
+ * Behavior is undefined if called on a currently acquired spinlock.
+ * Behavior is undefined if any operation is performed on a spinlock that has + * been destroyed. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->free, which may have additional restrictions. + */ +void portSyncSpinlockDestroy(PORT_SPINLOCK *pSpinlock); + +/** + * @brief Acquires a spinlock + * + * Blocks until the spinlock is acquired. + * + * Recursive acquires are not allowed and will result in a deadlock. + * + * @par Checked builds only: + * Will assert if pSpinlock == NULL
+ * Will assert if the lock is held by the current thread + * + * @pre Windows: Any IRQL + * @pre Unix: Interrupt context is OK. + * @note Will not put the thread to sleep. + * @post Will raise the IRQL / mask interrupts + */ +void portSyncSpinlockAcquire(PORT_SPINLOCK *pSpinlock); + +/** + * @brief Releases a spinlock acquired with @ref portSyncSpinlockAcquire. + * + * @par Checked builds only: + * Will assert if pSpinlock == NULL
+ * Will assert if the lock is not held by the current thread + * + * @par Undefined: + * Behavior is undefined if the spinlock has not previously been acquired. + * + * @pre Windows: Any IRQL + * @pre Unix: Interrupt context is OK. + * @post Will restore the IRQL / interrupts + */ +void portSyncSpinlockRelease(PORT_SPINLOCK *pSpinlock); + +/** + * @brief A mutex data type. + * + * A PORT_MUTEX is a classic mutex that follows the following rules. + * - Only a single thread can hold the mutex. + * - The thread that acquires the mutex must be the one to release it. + * - Failure to acquire the mutex may result in the thread blocking and not + * resuming until the mutex is available. + * - Failure of a thread to release a mutex before it exits can result in a + * deadlock if any other threads attempts to acquire it. + * - Mutexes are not recursive. + * - Mutexes may put the thread to sleep. + * + * Mutexes can be used on IRQL <= DISPATCH_LEVEL on Windows, and in + * non-interrupt context on Unix. + */ +typedef struct PORT_MUTEX PORT_MUTEX; + +/** + * @brief Size (in bytes) of the @ref PORT_MUTEX structure + */ +extern NvLength portSyncMutexSize; + +/** + * @brief Creates a new mutex using the provided allocator. The newly created + * mutex is initialized, as if @ref portSyncMutexInitialize was called. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->alloc, which may have additional restrictions. + */ +PORT_MUTEX *portSyncMutexCreate(PORT_MEM_ALLOCATOR *pAllocator); +/** + * @brief Initializes a mutex using caller provided memory. + * + * Mutexes are initialized in the released state. After a mutex is + * initialized it can only be freed or acquired. + * + * On some platforms the underlying platform code may allocate memory. + * This memory will be freed upon calling @ref portSyncMutexDestroy. + * + * @par Checked builds only: + * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Initializing a mutex multiple times is undefined.
+ * Using a mutex before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pMutex is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + */ +NV_STATUS portSyncMutexInitialize(PORT_MUTEX *pMutex); +/** + * @brief Destroys a mutex created with @ref portSyncMutexInitialize or + * @ref portSyncMutexCreate + * + * This frees any internally allocated resources that may be associated with + * the mutex. If the mutex was created using @ref portSyncMutexCreate, + * the memory will also be freed. + * + * @par Checked builds only: + * Will assert if pMutex == NULL
+ * Will assert if the lock is being held
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Behavior is undefined if called on an uninitialized mutex.
+ * Behavior is undefined if the mutex is currently acquired and it is + * destroyed.
+ * Behavior is undefined if any operation is performed on a mutex that has + * been destroyed. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->free, which may have additional restrictions. + */ +void portSyncMutexDestroy(PORT_MUTEX *pMutex); + +/** + * @brief Acquires a mutex. + * + * If the mutex is already held a call will block and the thread may be put to + * sleep until it is released. + * + * @par Checked builds only: + * Will assert if pMutex == NULL
+ * Will assert if the lock is held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note May put the thread to sleep. + */ +void portSyncMutexAcquire(PORT_MUTEX *pMutex); + +/** + * @brief Attempts to acquire a mutex without blocking. + * + * A call to this function will immediately return NV_TRUE with the mutex + * acquired by the calling thread if the mutex is not held by another thread. + * It will immediately return NV_FALSE if the mutex is held by another thread. + * + * If the mutex is held by the calling thread then this call will always fail. + * + * @par Checked builds only: + * Will assert if pMutex == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +NvBool NV_FORCERESULTCHECK portSyncMutexAcquireConditional(PORT_MUTEX *pMutex); + +/** + * @brief Releases a mutex held by the current thread. + * + * A call to this function releases control of the mutex. Immediately on + * return of this function another thread will be allowed to acquire the mutex. + * + * @par Checked builds only: + * Will assert if pMutex == NULL
+ * Will assert if the lock is not held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Attempting to release a mutex not held by the current thread will result in + * undefined behavior + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void portSyncMutexRelease(PORT_MUTEX *pMutex); + +PORT_INLINE void portSyncMutexReleaseOutOfOrder(PORT_MUTEX *pMutex) +{ + portSyncMutexRelease(pMutex); +} + +/** + * @brief Represents a semaphore data type. + * + * This behaves as you would expect a classic semaphore to. It follows the + * following rules: + * - A semaphore is initialized with a starting value + * - Acquiring the semaphore decrements the count. If the count is 0 it will + * block until the count is non-zero. + * - Releasing the semaphore increments the count. + * - A semaphore can be acquired or released by any thread and a + * acquire/release pair are not required to be from the same thread. + * - PORT_SEMAPHORE is a 32 bit semaphore. + * - Semaphores may put the thread to sleep. + * + * Semaphores have varying IRQL restrictions on Windows, which is documented for + * every function separately. + * They can only be used in non-interrupt context on Unix. + */ +typedef struct PORT_SEMAPHORE PORT_SEMAPHORE; +/** + * @brief Size (in bytes) of the @ref PORT_SEMAPHORE structure + */ +extern NvLength portSyncSemaphoreSize; + +/** + * @brief Initializes a semaphore using caller provided memory. + * + * Semaphores are initialized with startValue. + * + * On some platforms the underlying platform code may allocate memory. + * This memory will be freed upon calling @ref portSyncSemaphoreDestroy. + * + * @par Checked builds only: + * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Initializing a semaphore multiple times is undefined.
+ * Using a semaphore before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pSemaphore is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: IRQL == PASSIVE_LEVEL + * @pre Unix: Non-interrupt context + */ +NV_STATUS portSyncSemaphoreInitialize(PORT_SEMAPHORE *pSemaphore, NvU32 startValue); +/** + * @brief Creates a new semaphore using the provided allocator. The newly + * created semaphore is initialized, as if @ref portSyncSemaphoreInitialize + * was called. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: IRQL == PASSIVE_LEVEL + * @pre Unix: Non-interrupt context + */ +PORT_SEMAPHORE *portSyncSemaphoreCreate(PORT_MEM_ALLOCATOR *pAllocator, NvU32 startValue); +/** + * @brief Destroys a semaphore created with @ref portSyncSemaphoreInitialize or + * @ref portSyncSemaphoreCreate + * + * This frees any internally allocated resources that may be associated with + * the semaphore. If the semaphore was created using + * @ref portSyncSemaphoreCreate, the memory will also be freed. + * + * @par Checked builds only: + * Will assert if pSemaphore == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Behavior is undefined if called on an uninitialized semaphore.
+ * Behavior is undefined if the semaphore is currently acquired and it is + * destroyed.
+ * Behavior is undefined if any operation is performed on a semaphore that has + * been destroyed. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->free, which may have additional restrictions. + */ +void portSyncSemaphoreDestroy(PORT_SEMAPHORE *pSemaphore); +/** + * @brief Acquires (decrements) a semaphore. + * + * @par Checked builds only: + * Will assert if pSemaphore == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note May put the thread to sleep. + */ +void portSyncSemaphoreAcquire(PORT_SEMAPHORE *pSemaphore); +/** + * @brief Conditionally acquires a semaphore. + * + * A call to this function will immediately return NV_TRUE and acquire the + * semaphore if it can be done without blocking. + * + * It will immediately return NV_FALSE if acquiring the semaphore would require + * blocking. + * + * @par Checked builds only: + * Will assert if pSemaphore == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +NvBool NV_FORCERESULTCHECK portSyncSemaphoreAcquireConditional(PORT_SEMAPHORE *pSemaphore); +/** + * @brief Releases (increments) a semaphore. + * + * @par Checked builds only: + * Will assert if pSemaphore == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void portSyncSemaphoreRelease(PORT_SEMAPHORE *pSemaphore); + +PORT_INLINE void portSyncSemaphoreReleaseOutOfOrder(PORT_SEMAPHORE *pSemaphore) +{ + portSyncSemaphoreRelease(pSemaphore); +} + +/** + * @brief Represents a readers-writer lock data type. + * + * AcquireRead and AcquireWrite will do a sleeping wait if the lock isn't + * immediately available. + * + * PORT_RWLOCK prevents starvation of both readers and writers. + * + * @par Undefined: + * Any irregular use will result in undefined behavior. This includes: + * - One thread acquiring both read and write locks + * - Performing operations on an uninitialized/destroyed lock + * - Releasing locks which weren't acquired by the releasing thread + * - Acquiring the same lock twice without releasing (it is not recursive) + * + * @note If you desire to upgrade the lock (shared->exclusive), you must first + * release the shared lock, then acquire the exclusive. + */ +typedef struct PORT_RWLOCK PORT_RWLOCK; +/** + * @brief Size (in bytes) of the @ref PORT_RWLOCK structure + */ +extern NvLength portSyncRwLockSize; + +/** + * @brief Initializes a RWLock using caller provided memory. + * + * On some platforms the underlying platform code may allocate memory. + * This memory will be freed upon calling @ref portSyncRwLockDestroy. + * + * @par Checked builds only: + * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Initializing a lock multiple times is undefined.
+ * Using a lock before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pLock is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + */ +NV_STATUS portSyncRwLockInitialize(PORT_RWLOCK *pLock); +/** + * @brief Creates and initializes a new RWLock using the provided allocator. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + */ +PORT_RWLOCK *portSyncRwLockCreate(PORT_MEM_ALLOCATOR *pAllocator); +/** + * @brief Acquires the read (shared) lock on the given RW_LOCK + * + * If the lock cannot be immediately acquired, the thread will sleep. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note May put the thread to sleep. + * @post Windows: Normal APCs will be disabled. + */ +void portSyncRwLockAcquireRead(PORT_RWLOCK *pLock); +/** + * @brief Conditionally acquires the read (shared) lock on the given RW_LOCK + * + * If the lock cannot be immediately acquired, it will return NV_FALSE without + * blocking. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NV_TRUE if the lock was acquired. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + * @post Windows: Normal APCs will be disabled if the lock was acquired. + */ +NvBool NV_FORCERESULTCHECK portSyncRwLockAcquireReadConditional(PORT_RWLOCK *pLock); +/** + * @brief Acquires the write (exclusive) lock on the given RW_LOCK + * + * If the lock cannot be immediately acquired, the thread will sleep. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note May put the thread to sleep. + * @post Windows: Normal APCs will be disabled. + */ +void portSyncRwLockAcquireWrite(PORT_RWLOCK *pLock); +/** + * @brief Conditionally acquires the write (exclusive) lock on the given RW_LOCK + * + * If the lock cannot be immediately acquired, it will return NV_FALSE without + * blocking. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NV_TRUE if the lock was acquired. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + * @post Windows: Normal APCs will be disabled if the lock was acquired. + */ +NvBool NV_FORCERESULTCHECK portSyncRwLockAcquireWriteConditional(PORT_RWLOCK *pLock); +/** + * @brief Releases the read (shared) lock on the given RW_LOCK + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is not held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + * @post Windows: Normal APCs will be re-enabled. + */ +void portSyncRwLockReleaseRead(PORT_RWLOCK *pLock); + +PORT_INLINE void portSyncRwLockReleaseReadOutOfOrder(PORT_RWLOCK *pLock) +{ + portSyncRwLockReleaseRead(pLock); +} + +/** + * @brief Releases the write (exclusive) lock on the given RW_LOCK + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is not held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + * @post Windows: Normal APCs will be re-enabled. + */ +void portSyncRwLockReleaseWrite(PORT_RWLOCK *pLock); + +PORT_INLINE void portSyncRwLockReleaseWriteOutOfOrder(PORT_RWLOCK *pLock) +{ + portSyncRwLockReleaseWrite(pLock); +} + +/** + * @brief Destroys a RWLock created with @ref portSyncRwLockCreate o + * + * This frees any internally allocated resources that may be associated with + * the lock. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is being held
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Behavior is undefined if called on an uninitialized lock.
+ * Behavior is undefined if the lock is currently acquired and it is destroyed.
+ * Behavior is undefined if any operation is performed on a lock that has + * been destroyed. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->free, which may have additional restrictions. + */ +void portSyncRwLockDestroy(PORT_RWLOCK *pLock); + + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +/** + * @brief Creates a new fast mutex using the provided allocator. The newly + * created mutex is initialized, as if @ref portSyncExFastMutexInitialize was + * called. + * + * See @ref portSyncExFastMutexInitialize for details on fast mutex objects. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + */ +PORT_MUTEX *portSyncExFastMutexCreate(PORT_MEM_ALLOCATOR *pAllocator); +/** + * @brief Initializes a fast mutex using caller provided memory. + * + * A fast mutex is a subtype of the @ref PORT_MUTEX object that is generally + * more performant, but cannot be acquired from DPCs (IRQL == DISPATCH_LEVEL), + * even when using @ref portSyncMutexAcquireConditional. + * + * Code allocating fast mutex objects must ensure that conditional acquire is + * never attempted at DISPATCH_LEVEL. In checked builds, an assert will be + * triggered if this is not satisfied. + * + * Other than the limitation above, fast mutex objects have the same interface + * as regular @ref PORT_MUTEX objects. + * + * @par Checked builds only: + * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Initializing a mutex multiple times is undefined.
+ * Using a mutex before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pMutex is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + */ +NV_STATUS portSyncExFastMutexInitialize(PORT_MUTEX *pMutex); + +// Fast mutexes only make sense on Windows kernel mode +#define portSyncExFastMutexCreate_SUPPORTED (PORT_IS_KERNEL_BUILD && NVOS_IS_WINDOWS) +#define portSyncExFastMutexInitialize_SUPPORTED (PORT_IS_KERNEL_BUILD && NVOS_IS_WINDOWS) + +/** + * @brief Returns true if it is safe to put the current thread to sleep. + * + * Safety in this case relates only to the current interrupt level, and does not + * take into account any locks held by the thread that may result in deadlocks. + */ +NvBool portSyncExSafeToSleep(void); +#define portSyncExSafeToSleep_SUPPORTED PORT_IS_KERNEL_BUILD +/** + * @brief Returns true if it is safe to wake other threads. + * + * Safety in this case relates only to the current interrupt level. + */ +NvBool portSyncExSafeToWake(void); +#define portSyncExSafeToWake_SUPPORTED PORT_IS_KERNEL_BUILD +/** + * @brief Returns the platform specific implementation of the interrupt level. + * + * On platforms that have multiplie interrupt levels (i.e. Windows), this will + * return the numeric representation that the underlying platform uses. + * + * If a platform only has a binary distinction, this will return 0 or 1. + * + * On platforms where the concept of interrupt levels does not exist, it will + * return 0. + */ +NvU64 portSyncExGetInterruptLevel(void); +#define portSyncExGetInterruptLevel_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Disable preemption on a given CPU + * + * After calling this function, the thread will not be scheduled out of the + * current CPU until a call to @ref portSyncExRestorePreemption is made. + * + * The thread may still be paused to service an IRQ on the same CPU, but upon + * completion, execution will resume on the same CPU. + * + * @pre Can be called at any IRQL/interrupt context + * @post Blocking calls are prohibited while preemption is disabled. + * + * @return Returns the previous preemption state, that should be passed onto + * @ref portSyncExRestorePreemption + */ +NvU64 portSyncExDisablePreemption(void); +/** + * @todo bug 1583359 - Implement for other platforms + * Only on Windows for now, needed for bug 1995797 + */ +#define portSyncExDisablePreemption_SUPPORTED (PORT_IS_KERNEL_BUILD && NVOS_IS_WINDOWS) + +/** + * @brief Restores the previous preemption state + * + * See @ref portSyncExDisablePreemption for details + */ +void portSyncExRestorePreemption(NvU64 preemptionState); +#define portSyncExRestorePreemption_SUPPORTED (PORT_IS_KERNEL_BUILD && NVOS_IS_WINDOWS) + + +/// @} End extended functions + +#include "nvport/inline/sync_tracking.h" + +#endif // _NVPORT_SYNC_H_ +/// @} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/thread.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/thread.h new file mode 100644 index 0000000..5b9234f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/thread.h @@ -0,0 +1,318 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Thread module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_THREAD_H_ +#define _NVPORT_THREAD_H_ + +/** + * Platform-specific inline implementations + */ +#if NVOS_IS_LIBOS +#include "nvport/inline/thread_libos.h" +#endif + +/** + * @defgroup NVPORT_THREAD Threading module + * + * @brief This module contains basic threading functionality. + * + * @{ + */ + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Opaque structure representing a thread. + * + * Structure is allocated on the stack. + */ +struct PORT_THREAD +{ + NvU64 threadId; +}; + +typedef struct PORT_THREAD PORT_THREAD; + +/** + * @brief Opaque structure representing a process. + * + * While this structure is opaque, you can still allocate it on the stack. + */ +struct PORT_PROCESS +{ + NvU64 pid; +}; + +typedef struct PORT_PROCESS PORT_PROCESS; + +/// @brief An invalid thread handle. Depencence on OS. +extern const PORT_THREAD PORT_THREAD_INVALID; +/// @brief An invalid process handle. Dependnce on OS. +extern const PORT_PROCESS PORT_PROCESS_INVALID; + +/** + * @brief returns true if the given thread handle is valid. + */ +NvBool portThreadIsValid(PORT_THREAD thread); + +/** + * @brief Get the handle of the currently executing thread. + * + * @note In case of win-user you need to destroy returned thread. + */ +PORT_THREAD portThreadGetCurrentThread(void); + +/** + * @brief Get the thread handle by the thread ID. + * + * This ID translates directly into the underlying platform's thread ID. + * + * @returns PORT_THREAD_INVALID if the ID is not valid, thread handle if it is. + * + * @note In case of win-user you need to destroy returned thread. + */ +PORT_THREAD portThreadGetThreadById(NvU64 threadId); + +/** + * @brief Get the id of the currently executing thread. + */ +NvU64 portThreadGetCurrentThreadId(void); + +/** + * @brief Get the process id of the currently executing thread. + */ +NvU64 portThreadGetCurrentProcessId(void); + +/** + * @brief Compare two thread handles + * + * @returns TRUE if the handles are equal. + */ +NvBool portThreadEqual(PORT_THREAD thread1, PORT_THREAD thread2); + +/** + * @brief A thread's "main" function. The arguments are passed through a single + * void*, which the thread can then cast accordingly. + */ +typedef NvS32 (*PORT_THREAD_MAIN)(void *); + +/** + * @brief A thread constructor + * + * Creates a thread with the given main function and its argument. The created + * thread will immediately start executing. Any synchronization should be done + * in the thread body. + * + * @param [out] pThread - The new thread's handle + * @param [in] threadMain - The new thread's main() function + * @param [in] argument - The void* pointer to be passed into thread's main() + * + * @return NV_OK on success + * + * @todo Should we provide a flag to automatically destroy the thread when finished? + */ +NV_STATUS portThreadCreate(PORT_THREAD *pThread, PORT_THREAD_MAIN threadMain, void *argument); + +/** + * @brief A thread destructor + * + * Destroys the given thread, freeing any resources associated with it. If the + * specified thread has not finished its execution, it will block until it finishes. + * + * Will assert if called on a thread that hasn't been created using + * @ref portThreadCreate + */ +void portThreadDestroy(PORT_THREAD thread); + +/** + * @brief End execution of the current thread, returning the status. + * + * This behaves like the C standard exit(int) function - Execution is + * immediately stopped, without any stack unwinding. No resources allocated in + * the thread are freed. The status is returned to the parent thread. + * + * @par Kernel mode only: + * Will assert if called on a thread not created by @ref portThreadCreate. + * On usermode, this is acceptable (equivalent of calling exit() from main()) + */ +void portThreadExit(NvS32 status); + +/** + * @brief Block the current thread until the given thread has finished. + * + * Sometimes called a thread join operation. The current thread is suspended + * until threadToWaitOn has completed execution, either by returning from the + * main function, by calling @ref portThreadExit, or by being killed by @ref + * portThreadKill. + * + * The current thread can also be awoken by @ref portThreadWake. + * + * @param [out, optional] pReturnStatus - The finished thread's return status. + */ +NV_STATUS portThreadWaitToComplete(PORT_THREAD threadToWaitOn, NvS32 *pReturnStatus); + +/** + * @brief Move the current thread to the end of the run queue + * + * The OS schedules other waiting threads to run, before returning to the current thread. + * + * This function must not be called in interrupt context or raised IRQL. It may not be + * advisable to call this function while holding various RM locks. + */ +void portThreadYield(void); + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +/** + * @brief Get the priority of a given thread as an int + * + * The priority values are defined by the OS, but they can be compared with < and > + */ +NvU64 portThreadExGetPriority(NvU64 threadId); +#define portThreadExGetPriority_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +/** + * @brief Set the priority of a given thread + * + * Only valid values are those returned by a previous call to @ref portThreadGetPriority, + * though not necessarily on the same thread object + */ +void portThreadExSetPriority(NvU64 threadId, NvU64 priority); +#define portThreadExSetPriority_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +#if PORT_IS_FUNC_SUPPORTED(portThreadExGetPriority) +extern const NvU64 portThreadPriorityMin; +extern const NvU64 portThreadPriorityDefault; +extern const NvU64 portThreadPriorityLowRealtime; +extern const NvU64 portThreadPriorityMax; +#endif + +/** + * @brief Structure representing the processors affinity of the thread. + * + * A structure describes a thread affinity, which is a set of processors on + * which a thread is allowed to run. All of the processors in this set belong + * to the group that is identified by the cpuGroup member of the structure. + * The mask member contains an affinity mask that identifies the processors in + * the set of 64 processors. +*/ +typedef struct PORT_THREAD_AFFINITY +{ + NvU64 cpuGroup; + NvU64 mask; +} PORT_THREAD_AFFINITY; + +/** + * @brief Set the affinity of a current thread. + * @param [in] pAffinity - Pointer to affinity structure. + * @param [out] pPreAffinity - Pointer to Previous affinity structure. + * @return NV_OK If successful else return the following errors + * NV_ERR_INVALID_IRQ_LEVEL: IRQL is >= DISPATCH_LEVEL in Windows Drivers. + * NV_ERR_INVALID_ARGUMENT: Either of the passed arguments are NULL. + */ +NV_STATUS portThreadExSetAffinity(const PORT_THREAD_AFFINITY *pAffinity, + PORT_THREAD_AFFINITY *pPreAffinity); +#define portThreadExSetAffinity_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +/** + * @brief Set the affinity of the current thread with input as logical core index + * + * @param [in] coreIndex Logical core to which affinity needs to be set. For + * systems with more than one group, client need to compute + * required core index. + * + * @param [out] pPrevAffinity Pointer to previous affinity + * + * @return NV_OK on success + */ +NV_STATUS portThreadExSetSystemGroupAffinity(NvU32 coreIndex, PORT_THREAD_AFFINITY* pPrevAffinity); +#define portThreadExSetSystemGroupAffinity_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +/** + * @brief Restores the previous affinity of the current thread + * + * @param [in] pPrevAffinity Specifies the new system affinity of the current thread. + Set this parameter to the value that was returned by a + previous call to the portThreadExSetSystemGroupAffinity + * + * @return NV_OK on success + */ +NV_STATUS portThreadExRevertToUserGroupAffinity(PORT_THREAD_AFFINITY* pPrevAffinity); +#define portThreadExRevertToUserGroupAffinity_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +typedef enum PORT_THREAD_PROCESS_NOTIFY_EVENT +{ + PORT_THREAD_PROCESS_NOTIFY_EVENT_CREATE, + PORT_THREAD_PROCESS_NOTIFY_EVENT_EXIT +} PORT_THREAD_PROCESS_NOTIFY_EVENT; + +typedef void (*PORT_THREAD_PROCESS_NOTIFY_ROUTINE)(NvU64 processId, + PORT_THREAD_PROCESS_NOTIFY_EVENT notifyEvent, void *pPvtData); +/** + * @brief Register a callback function with nvport thread module to get thread + * create/exit event notifications. + * @param [in] pFunc Pointer to event callback function. + * @param [in] pPvtData Pointer to event callback function private data. + * @param [out] ppOldFunc Pointer to old event callback function. + * @param [out] ppPvtData Pointer to old event callback function private data. + * + * @return NV_OK on success + */ +NV_STATUS portThreadExRegisterProcessNotifyRoutine(PORT_THREAD_PROCESS_NOTIFY_ROUTINE pFunc, void *pPvtData, + PORT_THREAD_PROCESS_NOTIFY_ROUTINE *ppOldFunc, void **ppPvtData); +#define portThreadExRegisterProcessNotifyRoutine_SUPPORTED (NVOS_IS_WINDOWS && PORT_IS_KERNEL_BUILD && !PORT_IS_MODS) + +/** + * @brief Unregister a callback function with nvport thread module to get thread + * create/exit event notifications. + * @param [in] pOldFunc Pointer to old event callback function which was returned + * by portThreadExRegisterProcessNotifyRoutine. + * @param [in] pOldPvtData Pointer to old event callback function private data which + * was returned by portThreadExRegisterProcessNotifyRoutine. + * + * @return NV_OK on success + */ +NV_STATUS portThreadExUnregisterProcessNotifyRoutine(PORT_THREAD_PROCESS_NOTIFY_ROUTINE pOldFunc, void* pOldPvtData); +#define portThreadExUnregisterProcessNotifyRoutine_SUPPORTED (NVOS_IS_WINDOWS && PORT_IS_KERNEL_BUILD && !PORT_IS_MODS) +/// @} End extended functions + +#endif // _NVPORT_THREAD_H_ +/// @} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/util.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/util.h new file mode 100644 index 0000000..6234d61 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/util.h @@ -0,0 +1,254 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Util module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_UTIL_H_ +#define _NVPORT_UTIL_H_ + +/** + * @defgroup NVPORT_UTIL Utilities module + * + * @brief This module contains utility functions used by other modules. + * + * Generic implementation for all functions is in util-generic.h + * + * @{ + */ + +/** + * @def PORT_UTIL_INLINE + * + * @note There are ways to declare a function without qualifiers, and then + * redefine it as static/extern inline, but none work across all compilers that + * we use. The easiest solution is to just specify the qualifiers upon function + * declaration. We assume all these will be inline, but that can be changed + * through the makefile when adding non-inline implementations: + * MODULE_DEFINES += PORT_UTIL_INLINE + * MODULE_SOURCES += util-impl.c + */ +#ifndef PORT_UTIL_INLINE +#define PORT_UTIL_INLINE PORT_INLINE +#endif + +#if NVOS_IS_LIBOS +#include "nvport/inline/util_libos.h" +#endif + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Returns true if the two buffers overlap. + * + * Buffer length is specified in len0 and len1 params. + */ +PORT_UTIL_INLINE NvBool portUtilCheckOverlap(const NvU8 *pData0, NvLength len0, const NvU8 *pData1, NvLength len1); + +/** + * @brief Returns true if address is aligned to align bytes + * + * If align is not a power of two, it will return false. + */ +PORT_UTIL_INLINE NvBool portUtilCheckAlignment(const void *address, NvU32 align); + +/** + * @brief Returns true if num is a power of two. + */ +PORT_UTIL_INLINE NvBool portUtilIsPowerOfTwo(NvU64 num); + +/** + * @brief Write the 16bit number to pBuf in Little Endian + */ +PORT_UTIL_INLINE void portUtilWriteLittleEndian16(void *pBuf, NvU16 value); + +/** + * @brief Write the 32bit number to pBuf in Little Endian + */ +PORT_UTIL_INLINE void portUtilWriteLittleEndian32(void *pBuf, NvU32 value); + +/** + * @brief Write the 64bit number to pBuf in Little Endian + */ +PORT_UTIL_INLINE void portUtilWriteLittleEndian64(void *pBuf, NvU64 value); + +/** + * @brief Write the 16bit number to pBuf in Big Endian + */ +PORT_UTIL_INLINE void portUtilWriteBigEndian16(void *pBuf, NvU16 value); + +/** + * @brief Write the 32bit number to pBuf in Big Endian + */ +PORT_UTIL_INLINE void portUtilWriteBigEndian32(void *pBuf, NvU32 value); + +/** + * @brief Write the 64bit number to pBuf in Big Endian + */ +PORT_UTIL_INLINE void portUtilWriteBigEndian64(void *pBuf, NvU64 value); + +/** + * @brief Efficient spinloop body that doesn't waste power. + * + * This function will spin for a very short time, then return, so it should be + * called as: + * + * ~~~{.c} + * while (bShouldSpin) + * portUtilSpin(); + * ~~~ + */ +static NV_FORCEINLINE void portUtilSpin(void); + +/** + * @brief Returns true if the caller is currently in interrupt context. + * + * Interrupt context here means: + * - Unix: Interrupts are masked + * - Windows: IRQL > DISPATCH_LEVEL + */ +NvBool portUtilIsInterruptContext(void); + +/** + * @def portUtilGetReturnAddress() + * Returns the current function's return address. + */ + +/** + * @def portUtilGetIPAddress() + * Returns the current IP address. + */ +NV_NOINLINE NvUPtr portUtilGetIPAddress(void); + +/** + * @brief Returns number of leading zeros - starting from MSB; + * + * Examples: + * portUtilCountLeadingZeros64(0) == 64 + * portUtilCountLeadingZeros64(1) == 63 + * portUtilCountLeadingZeros64(2) == 62 + * portUtilCountLeadingZeros64(0xFFFFFFFFFFFFFF00) == 0 + */ +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros64(NvU64 n); + +/** + * @brief Like @ref portUtilCountLeadingZeros64 but operating on 32bit ints + */ +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros32(NvU32 n); + +/** + * @brief Returns number of trailing zeros - starting from LSB; + * + * Examples: + * portUtilCountTrailingZeros64(0) == 64 + * portUtilCountTrailingZeros64(1) == 0 + * portUtilCountTrailingZeros64(2) == 1 + * portUtilCountTrailingZeros64(0xFFFFFFFFFFFFFF00) == 8 + */ +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros64(NvU64 n); + +/** + * @brief Like @ref portUtilCountTrailingZeros64 but operating on 32bit ints + */ +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros32(NvU32 n); + +/// @} End core functions + +#include /* NULL */ + +/** + * @name Extended Functions + * @{ + */ + +/** + * @brief Returns a return address up the stack of the current function. + * + * @param level The number of levels up the stack to go. + * level == 0 - Gives the current IP. + * level == 1 - The current function's return address, same as + * @ref portUtilGetReturnAddress + */ +NV_NOINLINE NvUPtr portUtilExGetStackTrace(NvU32 level); + +#define portUtilExSpinNs_SUPPORTED PORT_IS_MODULE_SUPPORTED(time) +#define portUtilExDelayMs_SUPPORTED PORT_IS_MODULE_SUPPORTED(time) + +/** + * @brief Spin for the given amount of nanoseconds. + * + * Utilizes @ref portUtilSpin to spin for the given duration, without putting + * the thread to sleep. + */ +void portUtilExSpinNs(NvU32 nanoseconds); + +/** + * @brief Delay the thread execution for the given duration in milliseconds. + * + * Unlike @ref portUtilSpinNs, this function may put the thread to sleep. + */ +void portUtilExDelayMs(NvU32 milliseconds); + +#if (NVCPU_IS_FAMILY_X86 || NVCPU_IS_PPC64LE || NVCPU_IS_PPC || NVCPU_IS_AARCH64) && !defined(NV_MODS) +/** + * @brief Gets the Time stamp counter. + * + * @note This function is not serialized, and can be reorder by cpu or compiler. + * @note On Intel "pre-Nehalem multi-core" cpu and all multi-socket cpu, time + * may not be synced on all the cores and this function may return timestamps + * that are not monotonically increasing. + * @note On some old Intel cpus (P3/P4), timestamp counter was not incremented + * at a fixed clock rate, but Intel fixed this with "invariant TSC" in late P4+ + * chips. + */ +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void); +#endif + +#if NVCPU_IS_FAMILY_X86 && !defined(NV_MODS) && PORT_IS_MODULE_SUPPORTED(atomic) +/** + * @brief Gets the Time stamp counter. + * + * Unlike @ref portUtilExReadTimestampCounter, this function serializes + * the reading of time stamp counter to prevent both compiler and cpu + * reordering. + * @note Other than serialization issue, this function has same issues as + * @ref portUtilExReadTimestampCounter. + */ +static NV_FORCEINLINE NvU64 portUtilExReadTimestampCounterSerialized(void); +#endif +/// @} End extended functions + +#include "nvport/inline/util_generic.h" +#include "nvport/inline/util_valist.h" + +#endif // _NVPORT_UTIL_H_ +/// @} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/poolalloc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/poolalloc.h new file mode 100644 index 0000000..c1ef418 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/poolalloc.h @@ -0,0 +1,289 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file poolalloc.h + * @brief This file contains the interfaces for pool allocator. + * A chained sub-allocator originally designed to sub-allocate GPU + * frame buffer given out by PMA (physical memory allocator). + * + * The only requirement of a node in the chained allocator is that the ratio + * between upSTreamPageSize and allocPageSize is less or equal to 64. + * + * @bug Make more abstract -- fix up the variable names + */ + + +#ifndef _NV_POOLALLOC_H_ +#define _NV_POOLALLOC_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvport/nvport.h" +#include "containers/list.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct poolnode POOLNODE; + +/*! + * Each node corresponds to one page of upStreamPageSize + * The pool allocator sub-allocates from each of these pages. + */ +struct poolnode +{ + NvU64 pageAddr; // Address of the page to sub-allocate + NvU64 bitmap; // The bit map for this page. Only used if the + // node represents a partially allocated node + POOLNODE *pParent; // The upstream pool node in case this node is + // allocated from the upper pool. + ListNode node; // For intrusive lists. +}; + +MAKE_INTRUSIVE_LIST(PoolNodeList, POOLNODE, node); + +/*! + * The handle contains a generic metadata field that is needed for fast + * access. In the case of a linked list implementation of the pool allocator, + * the metadata is the pointer to the node that contains the page it was + * sub-allocated from + */ +typedef struct poolallocHandle +{ + NvU64 address; // The base address for this chunk + void *pMetadata; // The opaque metadata for storing necessary info +} POOLALLOC_HANDLE; + + +// non-intrusive list of page handles +MAKE_LIST(PoolPageHandleList, POOLALLOC_HANDLE); + +/*! + * @brief Callback function to upstream allocators for allocating new pages + * + * This function only allocate 1 page at a time right now + * + * @param[in] ctxPtr Provides context to upstream allocator + * @param[in] pageSize Not really needed. For debugging only + * @param[out] pPage The output page handle from upstream + * + * @return NV_OK if successfully allocated NvF32 totalTest, doneTest, failTest; the page + * NV_ERR_NO_MEMORY if allocator cannot allocate enough backing + * NV_ERR_BAD_PARAM if any parameter is invalid or size info is not + * multiple of SMALLEST_PAGE_SIZE + * + */ +typedef NV_STATUS (*allocCallback_t)(void *ctxPtr, NvU64 pageSize, + POOLALLOC_HANDLE *pPage); + +/*! + * @brief Callback function to upstream allocators for freeing unused pages + * + * This function only allocate 1 page at a time right now + * + * @param[in] ctxPtr Provides context to upstream allocator + * @param[in] pageSize Not really needed. For debugging only + * @param[in] pPage The input page handle to be freed + * + */ +typedef void (*freeCallback_t)(void *ctxPtr, NvU64 pageSize, POOLALLOC_HANDLE *pPage); + +/*! + * Structure representing a pool. + */ +typedef struct poolalloc +{ + PoolNodeList freeList; // List of nodes representing free pages + PoolNodeList fullList; // List of nodes representing fully allocated pages + PoolNodeList partialList; // List of nodes representing partially allocated pages + + PORT_MEM_ALLOCATOR *pAllocator; + + struct + { + allocCallback_t allocCb; // Callback to upstream allocator + freeCallback_t freeCb; // Callback to free pages + void *pUpstreamCtx; // The context to pass to upstream allocator + } callBackInfo; + + NvU32 upstreamPageSize; // Page size for upstream allocations + NvU32 allocPageSize; // Page size to give out + NvU32 ratio; // Ratio == upstreamPageSize / allocPageSize + NvU32 flags; // POOLALLOC_FLAGS_* +} POOLALLOC; + + +/*! + * Dump the lists maintained by the pools. + */ +void poolAllocPrint(POOLALLOC *pPool); + +/*! + * If _AUTO_POPULATE is set to ENABLE then poolAllocate will call upstream function to repopulate + * the pool when it runs out of memory. If set to DISABLE, poolAllocate will fail when it runs out of memory + * By default this is disabled as for usecases like page tables or context buffers since upstream function can call + * into PMA with GPU lock held which has a possibility of deadlocking + */ +#define NV_RMPOOL_FLAGS_AUTO_POPULATE 1:0 +#define NV_RMPOOL_FLAGS_AUTO_POPULATE_DEFAULT 0x0 +#define NV_RMPOOL_FLAGS_AUTO_POPULATE_DISABLE 0x0 +#define NV_RMPOOL_FLAGS_AUTO_POPULATE_ENABLE 0x1 + +/*! + * @brief This function initializes a pool allocator object + * + * This function establishes a link from this allocator to its upstream + * allocator by registering a callback function that lazily allocates memory + * if needed. + * + * @param[in] upstreamPageSize The page size granularity managed by + * the allocator + * @param[in] allocPageSize The page size to hand out + * @param[in] allocCb The allocation callback function + * @param[in] freeCb The free callback function + * @param[in] pUpstreamCtxPtr The context pointer for the upstream + * allocator, passed back on callback + * @param[in] mallocFun The allocator for internal strutures + * @param[in] freeFun The free for internal structures + * @param[in] pAllocCtxPtr The context pointer for the special + * allocator + * @param[in] flags POOLALLOC_FLAGS_* + * + * @return A pointer to a POOLALLOC structure if the initialization + * succeeded; NULL otherwise + * + */ + +POOLALLOC *poolInitialize(NvU32 upstreamPageSize, NvU32 allocPageSize, + allocCallback_t allocCb, freeCallback_t freeCb, void *pUpstreamCtxPtr, + PORT_MEM_ALLOCATOR *pAllocator, NvU32 flags); + + +/*! + * @brief Reserves numPages from upstream allocator. After the call + * freeListSize will equal to/greater than numPages. + * + * Since it will call into the upstream allocator, the page size of those + * pages will be the upstream page size. + * + * @param[in] pPool The pool allocator + * @param[out] numPages Number of pages to reserve + * + * @return NV_OK if successful + * NV_ERR_NO_MEMORY if allocator cannot allocate enough backing + * NV_ERR_BAD_PARAM if any parameter is invalid + * + */ +NV_STATUS poolReserve(POOLALLOC *pPool, NvU64 numPages); + + +/*! + * @brief This call will give back any free pages. After the call + * freeListSize will be less or equal to preserveNum. + * + * If the allocator has less or equal number of pages than preserveNum before + * the call, this function will simply return. + * + * @param[in] pPool The pool allocator to trim from + * @param[in] preserveNum The number of pages that we try to preserve + */ +void poolTrim(POOLALLOC *pPool, NvU64 preserveNum); + + +/*! + * @brief This function allocates memory from the allocator and returns one + * page of the fixed allocPageSize as specified in the initialization function + * + * The implementation does not guarantee the allocated pages are contiguous. + * Although there is no potential synchronization issues, if two allocation + * happen to lie on upstream page bundaries, the allocation will most likely + * be discontiguous. + * + * This function will also callback to upstream allocator to get more pages if + * it does not have enough pages already reserved. + * + * @param[in] pPool The pool allocator + * @param[out] pPageHandle The allocation handle that contains address and + * metadata for optimization + * + * @return NV_OK if successful + * NV_ERR_NO_MEMORY if allocator cannot allocate enough backing + * NV_ERR_BAD_PARAM if any parameter is invalid + */ +NV_STATUS poolAllocate(POOLALLOC *pPool, POOLALLOC_HANDLE *pPageHandle); + + +/*! + * @brief This function allocates memory from the allocator and returns numPages + * of the fixed allocPageSize as specified in the initialization function + * + * These pages are allocated contiguously and the single start address is returned. + * Although there is no potential synchronization issues, if two allocation + * happen to lie on upstream page bundaries, the allocation will most likely + * be discontiguous. + * + * This function will not callback to upstream allocator to get more pages as + * this is relying on a single chunk of free pages to make contiguous allocations. + * So the max number of pages that can be allocated contiguously is the number of pages + * fit in upstream page size i.e the "ratio" of this pool + * + * @param[in] pPool The pool allocator + * @param[in] numPages The number of pages requested to be allocated + * @param[out] pPageHandleList The allocation handles that contain addresses and + * metadata for optimization + * + * @return NV_OK if successful + * NV_ERR_NO_MEMORY if allocator cannot allocate enough backing + * NV_ERR_BAD_PARAM if any parameter is invalid + */ +NV_STATUS poolAllocateContig(POOLALLOC *pPool, NvU32 numPages, PoolPageHandleList *pPageHandleList); + +/*! + * @brief This function frees the page based on the allocPageSize + * + * @param[in] pPool The pool allocator + * @param[out] pPageHandle The allocation handle that contains address and + * metadata for optimization + * + */ +void poolFree(POOLALLOC *pPool, POOLALLOC_HANDLE *pPageHandle); + + +/*! + * @brief Destroys the pool allocator and frees memory + */ +void poolDestroy(POOLALLOC *pPool); + +/*! + * @briefs Returns the lengths of a pool's lists + */ +void poolGetListLength(POOLALLOC *pPool, NvU32 *pFreeListLength, + NvU32 *pPartialListLength, NvU32 *pFullListLength); + +#ifdef __cplusplus +} +#endif + +#endif /* _NV_POOLALLOC_H_ */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h new file mode 100644 index 0000000..e574ca3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h @@ -0,0 +1,3 @@ + +#include "g_prereq_tracker_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/resserv.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/resserv.h new file mode 100644 index 0000000..7407e1c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/resserv.h @@ -0,0 +1,372 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_resserv_nvoc.h" + +#ifndef _RESSERV_H_ +#define _RESSERV_H_ + +#include "nvoc/object.h" + +#include "containers/list.h" +#include "containers/map.h" +#include "containers/multimap.h" + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvos.h" +#include "nvsecurityinfo.h" +#include "rs_access.h" + +#if LOCK_VAL_ENABLED +#include "lockval/lockval.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if (RS_STANDALONE) +#include + +#ifndef NV_PRINTF +extern int g_debugLevel; +#define NV_PRINTF(level, format, ...) if (g_debugLevel) { printf(format, ##__VA_ARGS__); } +#endif +#include "utils/nvprintf.h" +#endif + +// +// Forward declarations +// +typedef struct RsServer RsServer; +typedef struct RsDomain RsDomain; +typedef struct CLIENT_ENTRY CLIENT_ENTRY; +typedef struct RsResourceDep RsResourceDep; +typedef struct RsResourceRef RsResourceRef; +typedef struct RsInterMapping RsInterMapping; +typedef struct RsCpuMapping RsCpuMapping; + +// RS-TODO INTERNAL and EXTERNAL params should be different structures +typedef struct RS_CLIENT_FREE_PARAMS_INTERNAL RS_CLIENT_FREE_PARAMS_INTERNAL; +typedef struct RS_CLIENT_FREE_PARAMS_INTERNAL RS_CLIENT_FREE_PARAMS; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_RES_ALLOC_PARAMS_INTERNAL; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_RES_ALLOC_PARAMS; +typedef struct RS_RES_DUP_PARAMS_INTERNAL RS_RES_DUP_PARAMS_INTERNAL; +typedef struct RS_RES_DUP_PARAMS_INTERNAL RS_RES_DUP_PARAMS; +typedef struct RS_RES_SHARE_PARAMS_INTERNAL RS_RES_SHARE_PARAMS_INTERNAL; +typedef struct RS_RES_SHARE_PARAMS_INTERNAL RS_RES_SHARE_PARAMS; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_CLIENT_ALLOC_PARAMS_INTERNAL; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_CLIENT_ALLOC_PARAMS; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS_INTERNAL; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_RES_CONTROL_PARAMS_INTERNAL; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_RES_CONTROL_PARAMS; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_LEGACY_CONTROL_PARAMS; +typedef struct RS_LEGACY_ALLOC_PARAMS RS_LEGACY_ALLOC_PARAMS; +typedef struct RS_LEGACY_FREE_PARAMS RS_LEGACY_FREE_PARAMS; + +typedef struct RS_CPU_MAP_PARAMS RS_CPU_MAP_PARAMS; +typedef struct RS_CPU_UNMAP_PARAMS RS_CPU_UNMAP_PARAMS; +typedef struct RS_INTER_MAP_PARAMS RS_INTER_MAP_PARAMS; +typedef struct RS_INTER_UNMAP_PARAMS RS_INTER_UNMAP_PARAMS; + +// Forward declarations for structs defined by user +typedef struct RS_RES_MAP_TO_PARAMS RS_RES_MAP_TO_PARAMS; +typedef struct RS_RES_UNMAP_FROM_PARAMS RS_RES_UNMAP_FROM_PARAMS; +typedef struct RS_INTER_MAP_PRIVATE RS_INTER_MAP_PRIVATE; +typedef struct RS_INTER_UNMAP_PRIVATE RS_INTER_UNMAP_PRIVATE; +typedef struct RS_CPU_MAPPING_PRIVATE RS_CPU_MAPPING_PRIVATE; + +typedef struct RS_CPU_MAPPING_BACK_REF RS_CPU_MAPPING_BACK_REF; +typedef struct RS_INTER_MAPPING_BACK_REF RS_INTER_MAPPING_BACK_REF; +typedef struct RS_FREE_STACK RS_FREE_STACK; +typedef struct CALL_CONTEXT CALL_CONTEXT; +typedef struct ACCESS_CONTROL ACCESS_CONTROL; +typedef struct RS_ITERATOR RS_ITERATOR; +typedef struct RS_ORDERED_ITERATOR RS_ORDERED_ITERATOR; +typedef struct RS_SHARE_ITERATOR RS_SHARE_ITERATOR; +typedef struct API_STATE API_STATE; +typedef struct RS_LOCK_INFO RS_LOCK_INFO; +typedef struct RS_CONTROL_COOKIE RS_CONTROL_COOKIE; +typedef NV_STATUS RsCtrlFunc(struct RS_RES_CONTROL_PARAMS_INTERNAL*); + +class RsClient; +class RsResource; +class RsShared; + +MAKE_LIST(RsResourceRefList, RsResourceRef*); +MAKE_LIST(RsResourceList, RsResource*); +MAKE_LIST(RsHandleList, NvHandle); +MAKE_LIST(RsClientList, CLIENT_ENTRY*); +MAKE_LIST(RsShareList, RS_SHARE_POLICY); +MAKE_MULTIMAP(RsIndex, RsResourceRef*); + +typedef NV_STATUS (*CtrlImpl_t)(RsClient*, RsResource*, void*); + +typedef void *PUID_TOKEN; + +// +// Defines +// + +/// Domain handles must start at this base value +#define RS_DOMAIN_HANDLE_BASE 0xD0D00000 + +/// Client handles must start at this base value +#define RS_CLIENT_HANDLE_BASE 0xC1D00000 + +/// +/// Internal Client handles must start at this base value +/// at either of these two bases +/// +#define RS_CLIENT_INTERNAL_HANDLE_BASE 0xC1E00000 + +#define RS_CLIENT_INTERNAL_HANDLE_BASE_EX 0xC1F00000 + +// +// Print a warning if any client's resource count exceeds this +// threshold. Unless this was intentional, this is likely a client bug. +// +#define RS_CLIENT_RESOURCE_WARNING_THRESHOLD 100000 + + +/// 0xFFFF max client handles. +#define RS_CLIENT_HANDLE_BUCKET_COUNT 0x400 // 1024 +#define RS_CLIENT_HANDLE_BUCKET_MASK 0x3FF + + +/// The default maximum number of domains a resource server can allocate +#define RS_MAX_DOMAINS_DEFAULT 4096 + +/// The maximum length of a line of ancestry for resource references +#define RS_MAX_RESOURCE_DEPTH 6 + +/// RS_LOCK_FLAGS +#define RS_LOCK_FLAGS_NO_TOP_LOCK NVBIT(0) +#define RS_LOCK_FLAGS_NO_CLIENT_LOCK NVBIT(1) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_1 NVBIT(2) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_2 NVBIT(3) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_3 NVBIT(4) +#define RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK NVBIT(5) +#define RS_LOCK_FLAGS_FREE_SESSION_LOCK NVBIT(6) + +/// RS_LOCK_STATE +#define RS_LOCK_STATE_TOP_LOCK_ACQUIRED NVBIT(0) +#define RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED NVBIT(1) +#define RS_LOCK_STATE_CUSTOM_LOCK_2_ACQUIRED NVBIT(2) +#define RS_LOCK_STATE_CUSTOM_LOCK_3_ACQUIRED NVBIT(3) +#define RS_LOCK_STATE_ALLOW_RECURSIVE_RES_LOCK NVBIT(6) +#define RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED NVBIT(7) +#define RS_LOCK_STATE_SESSION_LOCK_ACQUIRED NVBIT(8) + +/// RS_LOCK_RELEASE +#define RS_LOCK_RELEASE_TOP_LOCK NVBIT(0) +#define RS_LOCK_RELEASE_CLIENT_LOCK NVBIT(1) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_1 NVBIT(2) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_2 NVBIT(3) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_3 NVBIT(4) +#define RS_LOCK_RELEASE_SESSION_LOCK NVBIT(5) + +/// API enumerations used for locking knobs +typedef enum +{ + RS_LOCK_CLIENT =0, + RS_LOCK_TOP =1, + RS_LOCK_RESOURCE =2, + RS_LOCK_CUSTOM_3 =3, +} RS_LOCK_ENUM; + +typedef enum +{ + RS_API_ALLOC_CLIENT = 0, + RS_API_ALLOC_RESOURCE = 1, + RS_API_FREE_RESOURCE = 2, + RS_API_MAP = 3, + RS_API_UNMAP = 4, + RS_API_INTER_MAP = 5, + RS_API_INTER_UNMAP = 6, + RS_API_COPY = 7, + RS_API_SHARE = 8, + RS_API_CTRL = 9, + RS_API_MAX, +} RS_API_ENUM; + +NV_STATUS indexAdd(RsIndex *pIndex, NvU32 index, RsResourceRef *pResourceRef); +NV_STATUS indexRemove(RsIndex *pIndex, NvU32 index, RsResourceRef *pResourceRef); + +// +// Externs +// +/** + * NVOC wrapper for constructing resources of a given type + * + * @param[in] pAllocator Allocator for the resource object + * @param[in] pCallContext Caller context passed to resource constructor + * @param[inout] pParams Resource allocation parameters + * @param[out] ppResource New resource object + */ +extern NV_STATUS resservResourceFactory(PORT_MEM_ALLOCATOR *pAllocator, CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, RsResource **ppResource); + +/** + * NVOC wrapper for constructing an application-specific client. + */ +extern NV_STATUS resservClientFactory(PORT_MEM_ALLOCATOR *pAllocator, RS_RES_ALLOC_PARAMS_INTERNAL *pParams, RsClient **ppRsClient); + +/** + * Validate the UID/PID security token of the current user against a client's security token. + * + * This will be obsolete after phase 1. + * + * @param[in] pClientToken + * @param[in] pCurrentToken + * + * @returns NV_OK if the current user's security token matches the client's security token + */ +extern NV_STATUS osValidateClientTokens(PSECURITY_TOKEN pClientToken, PSECURITY_TOKEN pCurrentToken); + +/** + * Get the security token of the current user for the UID/PID security model. + * + * This will be obsolete after phase 1. + */ +extern PSECURITY_TOKEN osGetSecurityToken(void); + +/** + * TLS entry id for call contexts. All servers will use the same id. + */ +#define TLS_ENTRY_ID_RESSERV_CALL_CONTEXT TLS_ENTRY_ID_RESSERV_1 + +// +// Structs +// +struct RS_FREE_STACK +{ + RS_FREE_STACK *pPrev; + RsResourceRef *pResourceRef; +}; + +struct CALL_CONTEXT +{ + RsServer *pServer; ///< The resource server instance that owns the client + RsClient *pClient; ///< Client that was the target of the call + RsResourceRef *pResourceRef; ///< Reference that was the target of the call + RsResourceRef *pContextRef; ///< Reference that may be used to provide more context [optional] + RS_LOCK_INFO *pLockInfo; ///< Saved locking context information for the call + API_SECURITY_INFO secInfo; + RS_RES_CONTROL_PARAMS_INTERNAL *pControlParams; ///< parameters of the call [optional] +}; + +typedef enum { + RS_ITERATE_CHILDREN, ///< Iterate over a RsResourceRef's children + RS_ITERATE_DESCENDANTS, ///< Iterate over a RsResourceRef's children, grandchildren, etc. (unspecified order) + RS_ITERATE_CACHED, ///< Iterate over a RsResourceRef's cache + RS_ITERATE_DEPENDANTS, ///< Iterate over a RsResourceRef's dependants +} RS_ITER_TYPE; + +typedef enum +{ + LOCK_ACCESS_READ, + LOCK_ACCESS_WRITE, +} LOCK_ACCESS_TYPE; + + + +/** + * Access control information. This information will be filled out by the user + * of the Resource Server when allocating a client or resource. + */ +struct ACCESS_CONTROL +{ + /** + * The privilege level of this access control + */ + RS_PRIV_LEVEL privilegeLevel; + + /** + * Opaque pointer for storing a security token + */ + PSECURITY_TOKEN pSecurityToken; +}; + +// +// Utility wrappers for locking validator +// +#if LOCK_VAL_ENABLED +#define RS_LOCK_VALIDATOR_INIT(lock, lockClass, inst) \ + do { NV_ASSERT_OK(lockvalLockInit((lock), (lockClass), (inst))); } while(0) + +#define RS_RWLOCK_ACQUIRE_READ(lock, validator) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncRwLockAcquireRead((lock)); \ + lockvalPostAcquire((validator), LOCK_VAL_RLOCK); \ +} while(0) + +#define RS_RWLOCK_ACQUIRE_WRITE(lock, validator) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncRwLockAcquireWrite((lock)); \ + lockvalPostAcquire((validator), LOCK_VAL_WLOCK); \ +} while(0) + +#define RS_RWLOCK_RELEASE_READ_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_RLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_RLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncRwLockReleaseRead((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#define RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_WLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_WLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncRwLockReleaseWrite((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#else +#define RS_LOCK_VALIDATOR_INIT(lock, lockClass, inst) +#define RS_RWLOCK_ACQUIRE_READ(lock, validator) do { portSyncRwLockAcquireRead((lock)); } while(0) +#define RS_RWLOCK_ACQUIRE_WRITE(lock, validator) do { portSyncRwLockAcquireWrite((lock)); } while(0) +#define RS_RWLOCK_RELEASE_READ_EXT(lock, validator, bOutOfOrder) do { portSyncRwLockReleaseRead((lock)); } while(0) +#define RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, bOutOfOrder) do { portSyncRwLockReleaseWrite((lock)); } while(0) +#endif + +#define RS_RWLOCK_RELEASE_READ(lock, validator) RS_RWLOCK_RELEASE_READ_EXT(lock, validator, NV_FALSE) +#define RS_RWLOCK_RELEASE_WRITE(lock, validator) RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, NV_FALSE) + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_map.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_map.h new file mode 100644 index 0000000..e75da74 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_map.h @@ -0,0 +1,234 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef RS_ACCESS_MAP_H +#define RS_ACCESS_MAP_H + +#include "nvstatus.h" +#include "nvtypes.h" + +#include "containers/map.h" +#include "resserv/resserv.h" +#include "resserv/rs_access_rights.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * @brief Returns the client's access mask for the resource, returning NULL if + * the resource is not owned by the client + * + * @param[in] pResourceRef The resource whose access mask is being checked + * @param[in] pClient The client accessing the resource + * + * @return The resource's access rights mask, or + * @return NULL if pClient does not own pResourceRef + */ +RS_ACCESS_MASK *rsAccessLookup(RsResourceRef *pResourceRef, RsClient *pClient); + +/*! + * @brief Fills a mask with all rights available to a client on a resource + * Gets both rights directly available through the access mask, + * as well as rights shared by the resource. + * + * @param[in] pResourceRef + * @param[in] pClient + * @param[out] pRightsShared The set of access rights available for this client on this resource + * + * @return none + */ +void rsAccessGetAvailableRights(RsResourceRef *pResourceRef, RsClient *pClient, + RS_ACCESS_MASK *pAvailableRights); + +/*! + * @brief Perform an access rights check on a target resource + * + * This function should be used to determine whether sufficient access + * rights are already present, NOT whether access rights should be granted. + * It will not update any state on its own. + * + * For each of the required access rights, the invoking client must hold + * that access right on the target resource. + * + * @param[in] pResourceRef A reference to the target resource for which we are + * checking access rights + * @param[in] pInvokingClient The client that is requesting access rights + * @param[in] pRightsRequired The set of access rights that the invoking client + * should have on the target resource + * + * @return NV_OK if the invoking client has the required access rights on the + * target resource + * @return NV_ERR_INSUFFICIENT_PERMISSIONS if the invoking client does not have + * the required access rights on the target resource + */ +NV_STATUS rsAccessCheckRights(RsResourceRef *pResourceRef, RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsRequired); + +/*! + * @brief Update what access rights are currently owned on a target resource + * based on the target resource's current privilege. + * + * This function should be used to update the access rights currently owned + * by the target resource. Most access rights are only obtained once and don't + * disappear/reappear. However, the RS_ACCESS_FLAG_UNCACHED_CHECK flag can be + * used to indicate access rights that are present/not present based on the target + * resource's current level of privilege, NOT what the level of privilege was when + * the access right was initially requested. This function is useful for updating the + * which access rights are owned when accounting for uncached access rights. + * + * @param[in] pResourceRef A reference to the target resource for which we are + * checking access rights + * @param[in] pInvokingClient The client to check level of access with + * @param[in] pRightsToUpdate If non-NULL, only access rights set in this mask + * will be updated + * + * @return none + */ +void rsAccessUpdateRights(RsResourceRef *pResourceRef, RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsToUpdate); + +/*! + * @brief Searches a resource's share list for an entry equal to the + * passed in share policy, as defined by rsSharePolicyEquals + * + * @param[in] pShareList The RsShareList to be searched + * @param[in] pSharePolicy The policy to be found + * + * @return A pointer to the corresponding policy, or + * @return NULL if no matching entry is found + */ +RS_SHARE_POLICY *rsShareListLookup(RsShareList *pShareList, RS_SHARE_POLICY *pSharePolicy); + +/*! + * @brief Adds a new share policy to a resource's share list, or merges into + * an existing policy, if possible + * + * @param[in] pShareList The RsShareList to be searched + * @param[in] pSharePolicy The policy to be added to the list, may be merged with + * another policy with a matching pSharePolicy->type and ->target. + * In this case, ->accessMask for the existing entry and the + * new pSharePolicy will be merged with a union operation. + * @param[out] pAccessMask The rights now shared for this share policy, may or + * may not match pSharePolicy->accessMask if merged with an existing policy. + * User may pass NULL, in which case nothing is written into this. + * + * @return NV_OK if the operation succeeded, + * @return NV_ERR_NO_MEMORY if a new element needed to be added to the list, but + * insufficient memory was present to allocate one + */ +NV_STATUS rsShareListInsert(RsShareList *pShareList, RS_SHARE_POLICY *pSharePolicy, + RS_ACCESS_MASK *pAccessMask); + +/*! + * @brief Removes certain rights from being shared in a share policy entry + * from a resource's RsShareList. + * + * @param[in] pShareList The RsShareList to be searched + * @param[in] pSharePolicy The policy to be removed from the list, matched using + * pSharePolicy->type and ->target. Only rights specified in + * pSharePolicy->accessMask are revoked, others will remain. + * Use RS_ACCESS_MASK_FILL to for a full mask to revoke all rights. + * @param[out] pAccessMask The rights still shared for this share policy, may or + * may not be empty. + * User may pass NULL, in which case nothing is written into this. + * + * @return none + */ +void rsShareListRemove(RsShareList *pShareList, RS_SHARE_POLICY *pSharePolicy, + RS_ACCESS_MASK *pAccessMask); + +/*! + * @brief Copy one share list into another + * Note that this does not replace the Dst list if it is not empty, + * elements will be appended onto any existing list. + * + * @param[in] pShareListDst The list to copy into + * @param[in] pShareListSrc The list to copy from + * + * @return NV_ERR_NO_MEMORY, NV_OK + */ +NV_STATUS rsShareListCopy(RsShareList *pShareListDst, RsShareList *pShareListSrc); + +/*! + * @brief Returns the list which should be used for a resource's sharing + * In order, selects either the resource's own list, the client's inherited + * list, or the server's global default list. + * + * @param[in] pResourceRef + * @param[in] pServer + * + * @return A pointer to the relevant share list + * @return NULL if no list is available, and no pServer was provided. + */ +RsShareList * rsAccessGetActiveShareList(RsResourceRef *pResourceRef, RsServer *pServer); + +/*! + * @brief Attempts to grant a set of requested access rights on this resource. + * + * This function will attempt to grant the rights specified in pRightsRequested + * to the client referred to by pClient. If successful, it will update the + * access rights of the target resource referred to by pResourceRef. + * + * The resAccessCallback method on the target resource will be invoked to + * perform checks. This requires that the target resource + * pResourceRef->pResource already be initialized. + * + * If pRightsRequested is non-NULL, then the call will return an error code if + * it is unable to grant any of the requested rights. + * + * If pRightsRequested is NULL, then the call will ignore any failure to + * grant, taking a "best-effort" approach to granting access rights. The + * rights requested will be determined as follows: + * + * - If pResourceRef is a client resource, the function will attempt to + * request all possible access rights + * - For any other resource, the function will attempt to request the + * same set of access rights held by the invoking client on the parent + * resource + * + * @param[in] pResourceRef The target resource reference on which access + * rights will be granted + * @param[in] pCallContext Information about the call context + * @param[in] pInvokingClient The client requesting the access right + * @param[in] pRightsRequested The set of access rights to attempt to grant, + * or NULL if no access rights were explicitly requested + * @param[in] pRightsRequired Any rights additionally required for the operation, + * will be requested if pRightsRequested is not specified. + * If specified, all rights in this mask must be granted for the call to succeed. + * @param[in] pAllocParams per-class allocation parameters passed into Alloc, + * NULL if this is not being called from the Alloc path. + * + * @return NV_OK if the access right should be granted + * @return NV_ERR_INSUFFICIENT_PERMISSIONS if access rights were + * explicitly requested, and the function failed to grant all of the + * requested access rights + */ +NV_STATUS rsAccessGrantRights(RsResourceRef *pResourceRef, CALL_CONTEXT *pCallContext, + RsClient *pInvokingClient, const RS_ACCESS_MASK *pRightsRequested, + const RS_ACCESS_MASK *pRightsRequired, void *pAllocParams); + +#ifdef __cplusplus +} +#endif + +#endif /* RS_ACCESS_MAP_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_rights.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_rights.h new file mode 100644 index 0000000..9ff6397 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_rights.h @@ -0,0 +1,167 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef RS_ACCESS_RIGHTS_H +#define RS_ACCESS_RIGHTS_H + +#include "nvstatus.h" +#include "nvtypes.h" +#include "nvmisc.h" + +// Part of this header in userspace, at sdk/nvidia/inc/rs_access.h +#include "rs_access.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/****************************************************************************/ +/* Access right flags */ +/****************************************************************************/ + +// +// The meaning of each access right flag is documented in +// drivers/resman/docs/rmapi/resource_server/rm_capabilities.adoc +// + +#define RS_ACCESS_FLAG_NONE 0U +#define RS_ACCESS_FLAG_ALLOW_KERNEL_PRIVILEGED NVBIT(1) +#define RS_ACCESS_FLAG_ALLOW_PRIVILEGED NVBIT(2) +#define RS_ACCESS_FLAG_UNCACHED_CHECK NVBIT(3) +#define RS_ACCESS_FLAG_ALLOW_OWNER NVBIT(4) + + +/****************************************************************************/ +/* Access right metadata */ +/****************************************************************************/ + +/*! + * @brief Metadata about each access right + * + * The ith entry in this array represents access right i. + */ +extern const RS_ACCESS_INFO g_rsAccessMetadata[RS_ACCESS_COUNT]; + + +/****************************************************************************/ +/* Access right macros */ +/****************************************************************************/ + +/*! + * @brief Initializer for an access mask. Avoid use if possible. + * + * To initialize an access mask, if possible, first zero-initialize it then + * add specific access rights at runtime. Zero-initialization can be performed + * with the RS_ACCESS_MASK_EMPTY static initializer, the RS_ACCESS_MASK_CLEAR() macro, + * or a memset. + * + * Only use this when a static initializer is TRULY needed, and when the code is + * generated by a script, not hardcoded. For instance, this is useful when + * statically initializing control call table entries. + * + * The ith argument will directly initialize the ith limb. An access right A + * should be placed in the limb SDK_RS_ACCESS_LIMB_INDEX(A). Each limb should be a + * mask of flags, where each flag is in the form SDK_RS_ACCESS_OFFSET_MASK(A), or 0 + * to indicate no flags. + * + * For example, suppose we have access righs A, B, and C, where + * + * SDK_RS_ACCESS_LIMB_INDEX(A) == 0 + * SDK_RS_ACCESS_LIMB_INDEX(B) == 2 + * SDK_RS_ACCESS_LIMB_INDEX(C) == 2 + * + * In this case, the appropriate way to initialize a mask containing all + * three access rights is: + * + * RS_ACCESS_MASK mask = RS_ACCESS_MASK_INITIALIZER + * ( + * SDK_RS_ACCESS_OFFSET_MASK(A), + * 0, + * SDK_RS_ACCESS_OFFSET_MASK(B) | SDK_RS_ACCESS_OFFSET_MASK(C) + * ); + */ +#define RS_ACCESS_MASK_INITIALIZER(...) { { __VA_ARGS__ } } + +/*! + * @brief Empty initializer for an access mask. + * + * An example of usage is as follows: + * + * RS_ACCESS_MASK mask = RS_ACCESS_MASK_EMPTY; + */ +#define RS_ACCESS_MASK_EMPTY RS_ACCESS_MASK_INITIALIZER(0) + + +/****************************************************************************/ +/* Access right functions */ +/****************************************************************************/ + +/*! + * @brief Checks if one access rights mask is a subset of another + * + * @param[in] pRightsPresent The access rights that are held by some actor + * @param[in] pRightsRequired The access rights that must be a subset of + * the rights in pRightsPresent + * + * @return NV_TRUE if each of the access rights in pRightsPresent is also + * present in pRightsRequired + * @return NV_FALSE otherwise + */ +NvBool rsAccessMaskIsSubset(const RS_ACCESS_MASK *pRightsPresent, + const RS_ACCESS_MASK *pRightsRequired); + +/*! + * @brief Checks if an access right mask is empty + * + * @param[in] pAccessMask The mask to check for emptiness + * + * @return NV_TRUE if the mask contains no access rights + * @return NV_FALSE otherwise + */ +NvBool rsAccessMaskIsEmpty(const RS_ACCESS_MASK *pAccessMask); + + +/*! + * @brief Converts an array of access rights into a mask + * + * This function is useful for processing a statically-initialized array of + * access rights, since it is not always desirable to directly statically + * initialize an access mask. One example of this use is the definitions used + * in resource_list.h. + * + * @param[out] pAccessMask The newly initialized access mask + * @param[in] pRightsArray An array of access right values + * @param[in] length The number of elements in pRightsList + * + * @return Void + */ +void rsAccessMaskFromArray(RS_ACCESS_MASK *pAccessMask, + const RsAccessRight *pRightsArray, + NvLength length); + + +#ifdef __cplusplus +} +#endif + +#endif /* RS_ACCESS_RIGHTS_H */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_client.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_client.h new file mode 100644 index 0000000..f2a18d8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_client.h @@ -0,0 +1,509 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_rs_client_nvoc.h" + +#ifndef _RS_CLIENT_H_ +#define _RS_CLIENT_H_ + + +#include "resserv/resserv.h" +#include "nvport/nvport.h" +#include "resserv/rs_resource.h" +#include "containers/list.h" +#include "utils/nvrange.h" + +#define RS_UNIQUE_HANDLE_BASE (0xcaf00000) +#define RS_UNIQUE_HANDLE_RANGE (0x00080000) + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup RsClient + * @addtogroup RsClient + * @{*/ + +typedef enum { + CLIENT_TYPE_USER, + CLIENT_TYPE_KERNEL +} CLIENT_TYPE; + +typedef struct AccessBackRef +{ + NvHandle hClient; + NvHandle hResource; +} AccessBackRef; + +MAKE_LIST(AccessBackRefList, AccessBackRef); + +/** + * Information about a client + */ +NVOC_PREFIX(client) class RsClient : Object +{ +public: + /** + * The handle of this client + */ + NvHandle hClient; + + /** + * Kernel or user client + */ + CLIENT_TYPE type; + + /** + * Client is in a state where it can allocate new objects + */ + NvBool bActive; + + /** + * True if client tripped the resource count warning threshold + */ + NvBool bResourceWarning; + + /** + * Maps resource handle -> RsResourceRef + */ + RsRefMap resourceMap; + + /** + * Access right back reference list of pairs + * + * A map of all hResource's (with hClient to scope the handle) that have + * shared access rights with us. + */ + AccessBackRefList accessBackRefList; + + /** + * The first generated handle in the generated resource handle space + * + * It is an error for the handleRangeStart to be 0 because that is a + * reserved handle. + * + * The first generated handle is not necessarily the lowest possible handle + * because the handle generator may overflow. The lowest possible resource + * handle is 0x1. + * + * Generated handles will be of the form: handleRangeStart + [0, handleRangeSize) + */ + NvHandle handleRangeStart; + + /** + * The size of the generated resource handle space. + * + * It is an error for the handleRangeSize to be 0. + * + * Generated handles will be of the form: handleRangeStart + [0, handleRangeSize) + */ + NvHandle handleRangeSize; + + /** + * The handles in the restricted resource handle space. + */ + NV_RANGE handleRestrictRange; + + /** + * Index used to generate the next handle in the resource handle space + */ + NvHandle handleGenIdx; + + /** + * Ordered list of resources that are to be freed + */ + RsRefFreeList pendingFreeList; + + /** + * Information about recursive resource free calls is stored here + */ + RS_FREE_STACK *pFreeStack; + + /** + * Construct a client instance + * @param[in] pClient This client + * @param[in] pAllocator NvPort memory allocation interface for client memory allocations + * @param[in] pParams The allocation params + */ + NV_STATUS clientConstruct(RsClient *pClient, PORT_MEM_ALLOCATOR *pAllocator, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + + /** + * Destruct a client instance and free all allocated resources + * @param[in] pClient This client + */ + void clientDestruct(RsClient *pClient); + + /** + * Get a resource pointer from a resource reference. No resource locks are taken. + * @param[in] pClient This client + * @param[in] pResourceRef The reference to the resource + * @param[out] ppResource Pointer to the resource + */ + NV_STATUS clientGetResourceByRef(RsClient *pClient, RsResourceRef *pResourceRef, RsResource **ppResource); + + /** + * Get a resource pointer from a resource handle. No resource locks are taken. + * @param[in] pClient This client + * @param[in] hResource Resource handle + * @param[in] internalClassId Expected internal class ID of object. Must match. + * @param[out] ppResource Pointer to the resource + */ + NV_STATUS clientGetResource(RsClient *pClient, NvHandle hResource, NvU32 internalClassId, RsResource **ppResource); + + /** + * Get the reference to a resource + * @param[in] pClient This client + * @param[in] hResource The resource to lookup + * @param[out] ppResourceRef The reference to the resource + */ + NV_STATUS clientGetResourceRef(RsClient *pClient, NvHandle hResource, RsResourceRef **ppResourceRef); + + /** + * Get the reference to a resource, but only if the passed in access rights are + * possessed by the invoking client. + * + * @param[in] pClient This client + * @param[in] hResource The resource to lookup + * @param[in] pRightsRequired The rights required for success + * @param[out] ppResourceRef The reference to the resource + */ + NV_STATUS clientGetResourceRefWithAccess(RsClient *pClient, NvHandle hResource, const RS_ACCESS_MASK *pRightsRequired, RsResourceRef **ppResourceRef); + + /** + * Get the reference to a resource (with a type check) + * @param[in] pClient This client + * @param[in] hResource The resource to lookup + * @param[in] internalClassId The internal resource class id + * @param[out] ppResourceRef The reference to the resource + */ + NV_STATUS clientGetResourceRefByType(RsClient *pClient, NvHandle hResource, NvU32 internalClassId, RsResourceRef **ppResourceRef); + + /** + * Validate that current process is allowed to use this client + * @param[in] pClient This client + * @param[in] pSecInfo Security info of the current API call + */ + virtual NV_STATUS clientValidate(RsClient *pClient, const API_SECURITY_INFO * pSecInfo); + + /** + * Allocate a resource in RM for this client + * @param[in] pClient This client + * @param[in] pServer + * @param[inout] pParams Resource allocation parameters + */ + NV_STATUS clientAllocResource(RsClient *pClient, RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + + /** + * Duplicate a resource reference into this client + * @param[in] pClient This client + * @param[in] pServer The resource server instance + * @param[inout] pParams Resource sharing parameters + */ + NV_STATUS clientCopyResource(RsClient *pClient, RsServer *pServer, RS_RES_DUP_PARAMS_INTERNAL *pParams); + + /** + * Free a resource for this client and updates resource reference book-keeping. + * If the resource has a non-zero reference count, only book-keeping will be updated. + * Resources should never be freed in control calls. + * + * @param[in] pClient This client + * @param[in] pServer + * @param[in] pParams Resource destruction parameters + */ + virtual NV_STATUS clientFreeResource(RsClient *pClient, RsServer *pServer, RS_RES_FREE_PARAMS_INTERNAL *pParams); + + /** + * Remove a resource reference to the client's resource hashmap + * @param[in] pClient This client + * @param[in] pResourceRef The reference to free + */ + virtual NV_STATUS clientDestructResourceRef(RsClient *pClient, RsServer *pServer, RsResourceRef *pResourceRef); + + /** + * Unmap a mapping that belongs to a resource reference in this client. + * @param[in] pClient This client + * @param[in] pResourceRef The reference that owns the mapping + * @param[inout] ppCpuMapping The mapping to unmap + */ + virtual NV_STATUS clientUnmapMemory(RsClient *pClient, RsResourceRef *pResourceRef, + RS_LOCK_INFO *pLockInfo, RsCpuMapping **ppCpuMapping, + API_SECURITY_INFO *pSecInfo); + /** + * Create an inter-mapping between two resources owned by this client + * Resserv only implements a stub, users should override this to fill their own MapTo params struct + * + * @param[in] pClient This client + * @param[in] pMapperRef The resource that can be used to create the mapping + * @param[in] pMappableRef The resource that can be mapped + * @param[in] pParams parameters describing the unmapping + */ + virtual NV_STATUS clientInterMap(RsClient *pClient, RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RS_INTER_MAP_PARAMS *pParams); + + /** + * Unmap an inter-mapping between two resources owned by this client + * Resserv only implements a stub, users should override this to fill their own UnmapFrom params struct + * + * @param[in] pClient This client + * @param[in] pMapperRef The reference that was was used to create the mapping + * @param[in] pParams parameters describing the unmapping + */ + virtual void clientInterUnmap(RsClient *pClient, RsResourceRef *pMapperRef, RS_INTER_UNMAP_PARAMS *pParams); + + /** + * Generate an unused handle for a resource. The handle will be generated in the white-listed range that was + * specified when the client was allocated. + * + * The handle generator will wrap-around when the number of handles generated is greater than handleRangeSize, and + * the generator will start at handle 0x1 if it overflows (0x0 is a reserved handle). + * + * The handle generator can generate up to 2^32-2 unique handles if handleRangeStart + handleRangeSize overflows + * (because 0x0 is a reserved handle). Otherwise, the handle generator can generate up to 2^32-1 unique handles. + * + * @param[in] pClient This client + * @param[out] pHandle The generated handle + * + */ + NV_STATUS clientGenResourceHandle(RsClient *pClient, NvHandle *pHandle); + + /** + * Validate that a given resource handle is well-formed and does not already + * exist under a given client. + * + * @param[in] pClient + * @param[in] hResource + * @param[in] bRestrict If true, fail validation for handles in the client's restricted range + */ + virtual NV_STATUS clientValidateNewResourceHandle(RsClient *pClient, NvHandle hResource, NvBool bRestrict); + + /** + * Wrapper that generates a resource handle if a handle of 0 is provided, or otherwise + * validates a handle that was provided. + * + * @param[in] pClient + * @param[inout] phResource + */ + NV_STATUS clientAssignResourceHandle(RsClient *pClient, NvHandle *phResource); + + /** + * Recursively generate a client's list of resources to free + * @param[in] pClient + * @param[in] pTarget The resource ref currently being processed + * @param[in] pReference The resource ref that this function was initially called on + * @param[in] bMove If NV_TRUE: Add/move the target to the front of the list + * If NV_FALSE: Add the target to the front of the list if it isn't already in the list + */ + NV_STATUS clientUpdatePendingFreeList(RsClient *pClient, RsResourceRef *pTarget, + RsResourceRef *pReference, NvBool bMove); + + /** + * Allow derived client classes to modify the generated list of resources to free + * before they are freed. + * @param[in] pClient + * @param[out] ppFirstLowPriRef A pointer to the first reference that is low priority + */ + virtual NV_STATUS clientPostProcessPendingFreeList(RsClient *pClient, RsResourceRef **ppFirstLowPriRef); + + /** + * Add a back reference to a client/resource pair that shared access with our client + * so we can remove that access entry on client destruction. + * @param[in] pClient This client + * @param[in] pResourceRef Resource reference that decided to share access with us + */ + NV_STATUS clientAddAccessBackRef(RsClient* pClient, RsResourceRef* pResourceRef); + + /** + * Remove all access map entries for all back references we stored so other clients + * reusing the same client handle won't get unauthorized access. Intended to be called + * during client destruction. + * @param[in] pClient This client + * @param[in] pServer Resource Server instance + */ + void clientFreeAccessBackRefs(RsClient *pClient, RsServer *pServer); + + /* + * Set the start handle and range for this client's handle generator. + * + * @note Supplying a range and size of 0 will set the generator to the default start handle and range + * @note The handle generator can only be set before any handle has been generated + * + * @param[in] pClient + * @param[in] handleRangeStart + * @param[in] handleRangeSize + */ + NV_STATUS clientSetHandleGenerator(RsClient *pClient, NvHandle handleRangeStart, NvHandle handleRangeSize); + + /** + * Verify whether a client is able to share a resource under a certain share policy + * + * @param[in] pClient Client attempting to share the resource + * @param[in] pReousrceRef The resource being shared + * @param[in] pSharePolicy The policy under which the resource is to be shared + * @param[in] pCallContext The context of the call intending to perform the share + */ + NV_STATUS clientCanShareResource(RsClient *pClient, RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, CALL_CONTEXT *pCallContext); + + /** + * Share access to a resource with other clients under the specified share policy. + * + * @param[in] pClient This client + * @param[in] pResourceRef Resource reference which is sharing access + * @param[in] pSharePolicy The policy under which the resource is sharing access + */ + virtual NV_STATUS clientShareResource(RsClient *pClient, RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, + CALL_CONTEXT *pCallContext); + + /** + * Share access to a resource with other clients under the specified share policy. + * + * @param[in] pClient This client + * @param[in] pResourceRef Resource reference which is sharing access + * @param[in] pSharePolicy The policy under which the resource is sharing access + */ + NV_STATUS clientShareResourceTargetClient(RsClient *pClient, RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, CALL_CONTEXT *pCallContext); + /* + * Set the start handle and range for this client's restricted handle + * range. This range of handles cannot be explicitly requested. Any + * restricted handles that are in the client's resource handle generator + * range can still be generated. + * + * @note Supplying a range and size of 0 will disable the restricted range + * @note The handle generator can only be set before any handle has been generated + * + * @param[in] pClient + * @param[in] handleRangeStart + * @param[in] handleRangeSize + */ + NV_STATUS clientSetRestrictedRange(RsClient *pClient, NvHandle handleRangeStart, NvU32 handleRangeSize); +}; + +/** + * Get an iterator to the elements in the client's resource map + * @param[in] pClient + * @param[in] pScopeRef Restrict the iteration based on this reference [optional] + * @param[in] internalClassId Only iterate over resources with this class id [optional] + * @param[in] type RS_ITERATE_CHILDREN, RS_ITERATE_DESCENDANTS, RS_ITERATE_CACHED, RS_ITERATE_DEPENDANTS + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + * + * @note If type=RS_ITERATE_CHILDREN, pScopeRef will restrict iteration to children of the scope ref + * @note If type=RS_ITERATE_DESCENDANTS, pScopeRef will restrict iteration to descendants of the scope ref + * @note If type=RS_ITERATE_CACHED, pScopeRef will restrict iteration to references cached by the scope ref + */ +RS_ITERATOR clientRefIter(RsClient *pClient, RsResourceRef *pScopeRef, NvU32 internalClassId, RS_ITER_TYPE type, NvBool bExactMatch); + +/** + * Get the next iterator to the elements in the client's resource map + * @param[in] pClient + * @param[inout] pIt The iterator + */ +NvBool clientRefIterNext(RsClient *pClient, RS_ITERATOR *pIt); + +/** + * Get an iterator to the elements in the client's resource map. + * + * This iterator will visit all descendants in pre-order according to the parent-child + * resource hierarchy. + * + * @param[in] pClient + * @param[in] pScopeRef Restrict the iteration based on this reference [optional] + * @param[in] internalClassId Only iterate over resources with this class id [optional] + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + */ +RS_ORDERED_ITERATOR clientRefOrderedIter(RsClient *pClient, RsResourceRef *pScopeRef, NvU32 internalClassId, NvBool bExactMatch); + +/** + * Get the next ordered iterator to the elements in the client's resource map + * @param[in] pClient + * @param[inout] pIt The iterator + */ +NvBool clientRefOrderedIterNext(RsClient *pClient, RS_ORDERED_ITERATOR *pIt); + + +/** + * RsResource interface to a RsClient + * + * This allows clients to be interfaced with as-if they were resources (e.g., + * to perform a control call on a client). + * + * An RsClientResource is automatically allocated under a client as a top-level + * object when that client is allocated and cannot be explicitly freed. Only + * one RsClientResource is permitted per-client. + * + * Any resource allocated under a client will be a descendant of the client + * proxy resource. + * + */ +NVOC_PREFIX(clientres) class RsClientResource : RsResource +{ +public: + NV_STATUS clientresConstruct(RsClientResource* pClientRes, CALL_CONTEXT *pCallContext, RS_RES_ALLOC_PARAMS_INTERNAL *pParams) + : RsResource(pCallContext, pParams); + void clientresDestruct(RsClientResource* pClientRes); + +// private: + RsClient* pClient; +}; + +/** + * Client destruction parameters + */ +struct RS_CLIENT_FREE_PARAMS_INTERNAL +{ + NvHandle hDomain; ///< [in] The parent domain + NvHandle hClient; ///< [in] The client handle + NvBool bHiPriOnly; ///< [in] Only free high priority resources + NvU32 state; ///< [in] User-defined state + + RS_RES_FREE_PARAMS_INTERNAL *pResFreeParams; ///< [in] Necessary for locking state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * Return an iterator to a resource reference multi-map + * @param[in] pIndex The multi-map to iterate + * @param[in] index Return only the references belonging to this index + */ +RsIndexIter indexRefIter(RsIndex *pIndex, NvU32 index); + +/** + * Return an iterator to all resource references in a multi-map + * @param[in] pIndex The multi-map to iterate + */ +RsIndexIter indexRefIterAll(RsIndex *pIndex); + +/** + * Get the next iterator in a resource reference multi-map + * @param[in] pIt Iterator + */ +NvBool indexRefIterNext(RsIndexIter *pIt); + +/* @} */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_domain.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_domain.h new file mode 100644 index 0000000..856568c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_domain.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RS_DOMAIN_H_ +#define _RS_DOMAIN_H_ + +#include "resserv/resserv.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup RsDomain + * @addtogroup RsDomain + * @{*/ + +/** + * @brief Domains are being re-worked + */ +struct RsDomain +{ + NvU32 dummy; +}; + +/** + * Construct a domain instance + * @param[in] pDomain This domain + * @param[in] pAllocator + * @param[in] hDomain The handle for this domain + * @param[in] hParentDomain The handle for the parent domain + * @param[in] pAccessControl The privileges of the domain + */ +NV_STATUS +domainConstruct +( + RsDomain *pDomain, + PORT_MEM_ALLOCATOR *pAllocator, + NvHandle hDomain, + NvHandle hParentDomain, + ACCESS_CONTROL *pAccessControl +); + +/** + * Destruct a domain instance + * @param[in] pDomain The domain to destruct + */ +NV_STATUS +domainDestruct +( + RsDomain *pDomain +); + +/* @} */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_resource.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_resource.h new file mode 100644 index 0000000..8df3313 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_resource.h @@ -0,0 +1,829 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_rs_resource_nvoc.h" + +#ifndef _RS_RESOURCE_H_ +#define _RS_RESOURCE_H_ + +#include "nvport/nvport.h" +#include "resserv/resserv.h" +#include "nvoc/object.h" +#include "resserv/rs_access_map.h" + +#ifdef __cplusplus +extern "C" { +#endif + +class RsSession; + +/** + * @defgroup RsResource + * @addtogroup RsResource + * @{*/ + +#define ALLOC_STATE_INTERNAL_CLIENT_HANDLE NVBIT(5) + +/* + * Locking operations for lock-metering + */ +#define RS_LOCK_TRACE_INVALID 1 +#define RS_LOCK_TRACE_ACQUIRE 1 +#define RS_LOCK_TRACE_RELEASE 2 +#define RS_LOCK_TRACE_ALLOC 3 +#define RS_LOCK_TRACE_FREE 4 +#define RS_LOCK_TRACE_CTRL 5 +#define RS_LOCK_TRACE_MAP 6 +#define RS_LOCK_TRACE_UNMAP 7 + +/** + * Context information for top-level, resource-level, and client-level locking + * operations + */ +struct RS_LOCK_INFO +{ + RsClient *pClient; ///< Pointer to client that was locked (if any) + RsClient *pSecondClient; ///< Pointer to second client, for dual-client locking + RsResourceRef *pContextRef; ///< User-defined reference + RsSession *pSession; ///< Session object to be locked, if any + NvU32 flags; ///< RS_LOCK_FLAGS_* + NvU32 state; ///< RS_LOCK_STATE_* + NvU32 gpuMask; + NvU8 traceOp; ///< RS_LOCK_TRACE_* operation for lock-metering + NvU32 traceClassId; ///< Class of initial resource that was locked for lock metering +}; + +struct RS_RES_ALLOC_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the resource's client + NvHandle hParent; ///< [in] The handle of the resource's parent. This may be a client or another resource. + NvHandle hResource; ///< [inout] Server will assign a handle if this is 0, or else try the value provided + NvU32 externalClassId; ///< [in] External class ID of resource + NvHandle hDomain; ///< UNUSED + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + RsClient *pClient; ///< [out] Cached client + RsResourceRef *pResourceRef; ///< [out] Cached resource reference + NvU32 allocFlags; ///< [in] Allocation flags + NvU32 allocState; ///< [inout] Allocation state + API_SECURITY_INFO *pSecInfo; + + void *pAllocParams; ///< [in] Copied-in allocation parameters + + // ... Dupe alloc + RsClient *pSrcClient; ///< The client that is sharing the resource + RsResourceRef *pSrcRef; ///< Reference to the resource that will be shared + + RS_ACCESS_MASK *pRightsRequested; ///< [in] Access rights requested on the new resource + // Buffer for storing contents of user mask. Do not use directly, use pRightsRequested instead. + RS_ACCESS_MASK rightsRequestedCopy; + + RS_ACCESS_MASK *pRightsRequired; ///< [in] Access rights required to alloc this object type +}; + +struct RS_RES_DUP_PARAMS_INTERNAL +{ + NvHandle hClientSrc; ///< [in] The handle of the source resource's client + NvHandle hResourceSrc; ///< [in] The handle of the source resource. + NvHandle hClientDst; ///< [in] The handle of the destination resource's client (may be different from source client) + NvHandle hParentDst; ///< [in] The handle of the destination resource's parent. + NvHandle hResourceDst; ///< [inout] The handle of the destination resource. Generated if 0. + void *pShareParams; ///< [in] Copied-in sharing parameters + NvU32 flags; ///< [in] Flags to denote special cases ( Bug: 2859347 to track removal) + // Internal use only + RsClient *pSrcClient; + RsResourceRef *pSrcRef; + API_SECURITY_INFO *pSecInfo; ///< [in] Security info + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state +}; + +struct RS_RES_SHARE_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the owner's client + NvHandle hResource; ///< [in] The handle of the resource. + RS_SHARE_POLICY *pSharePolicy; ///< [in] The policy to share with + + // Internal use only + API_SECURITY_INFO *pSecInfo; ///< [in] Security info + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state +}; + +#define RS_IS_COPY_CTOR(pParams) ((pParams)->pSrcRef != NULL) + +struct RS_RES_FREE_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the resource's client + NvHandle hResource; ///< [in] The handle of the resource + NvBool bInvalidateOnly; ///< [in] Free the resource, but don't release its handle + NvHandle hDomain; ///< UNUSED + + // Internal use only + NvBool bHiPriOnly; ///< [in] Only free if this is a high priority resources + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + NvU32 freeFlags; ///< [in] Flags for the free operation + NvU32 freeState; ///< [inout] Free state + RsResourceRef *pResourceRef; ///< [inout] Cached RsResourceRef + NV_STATUS status; ///< [out] Status of free operation + API_SECURITY_INFO *pSecInfo; ///< [in] Security info +}; + +struct NVOC_EXPORTED_METHOD_DEF; +class OBJGPU; +class OBJGPUGRP; + +// +// RS_RES_CONTROL_PARAMS +// +// This structure encapsulates data sent to the cmd-specific rmctrl +// handlers. Along with the arguments supplied by the requesting +// client (hClient, hObject, cmd, pParams, paramSize). +// +struct RS_RES_CONTROL_PARAMS_INTERNAL +{ + NvHandle hClient; // client-specified NV01_ROOT object handle + NvHandle hObject; // client-specified object handle + NvU32 cmd; // client-specified command # + NvU32 flags; // flags related to control call execution + void *pParams; // client-specified params (in kernel space) + NvU32 paramsSize; // client-specified size of pParams in bytes + + NvHandle hParent; // handle of hObject parent + OBJGPU *pGpu; // ptr to OBJGPU struct if applicable + OBJGPUGRP *pGpuGrp; // ptr to OBJGPUGRP struct if applicable + RsResourceRef *pResourceRef; // ptr to RsResourceRef if object is managed by + // Resource Server + API_SECURITY_INFO secInfo; // information on privilege level and pointer location (user/kernel) + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + RS_CONTROL_COOKIE *pCookie; + NvBool bInternal; // True if control call was not issued from an external client + NvBool bDeferredApi; // Indicates ctrl is being dispatched via deferred API + + struct RS_RES_CONTROL_PARAMS_INTERNAL *pLegacyParams; // RS-TODO removeme +}; + +struct RS_RES_DTOR_PARAMS +{ + CALL_CONTEXT *pFreeContext; + RS_RES_FREE_PARAMS_INTERNAL *pFreeParams; +}; + +/** + * Base class for all resources. Mostly a pure virtual interface which + * should be overridden to implement resource specific behavior. + */ +NVOC_PREFIX(res) class RsResource : Object +{ +public: +// private: + + /** + * Back-reference to the RsResourceRef that owns this object + */ + RsResourceRef *pResourceRef; + + /** + * Params for dtor + */ + RS_RES_DTOR_PARAMS dtorParams; + + /** + * Flag that indicates whether the RsResource was constructed. If params to + * resConstruct are null the Resource ctor and dtor will be skipped. This is + * only added for migration where the entire class hierarchy can't be + * converted at once. + * + * RS-TODO: Remove once migrations are finished (added initially for + * DmaObject) + */ + NvBool bConstructed; + +public: + + /** + * Resource initializer + * @param[in] pResource Resource object to init + * @param[in] pCallContext + * @param[in] params Resource create parameters + */ + NV_STATUS resConstruct(RsResource *pResource, CALL_CONTEXT *pCallContext, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + + /** + * Returns TRUE if the resource can be copied + */ + virtual NvBool resCanCopy(RsResource *pResource); + + /** + * Resource destructor + * @param[in] pResource Resource object to destruct + */ + void resDestruct(RsResource *pResource); + + /** + * Resource destructor prologue (occurs before mappings are torn-down) + * @param[in] pResource Resource object to destruct + */ + virtual void resPreDestruct(RsResource *pResource); + + /** + * Resource dtors take no parameters, so set them here. + * @param[in] pResource + * @param[in] pCallContext + * @param[in] params Resource destroy parameters + */ + NV_STATUS resSetFreeParams(RsResource *pResource, CALL_CONTEXT *pCallContext, RS_RES_FREE_PARAMS_INTERNAL *pParams); + + /** + * Resource dtors take no parameters, so get them here. + * @param[in] pResource + * @param[out] ppCallContext + * @param[out] ppParams Resource destroy parameters + */ + NV_STATUS resGetFreeParams(RsResource *pResource, CALL_CONTEXT **ppCallContext, RS_RES_FREE_PARAMS_INTERNAL **ppParams); + + /** + * Lookup a control call entry from a NVOC export table + * + * @param[in] pResource + * @param[in] pParams + * @param[out] ppEntry + */ + virtual NV_STATUS resControlLookup(RsResource *pResource, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + const struct NVOC_EXPORTED_METHOD_DEF **ppEntry); + + /** + * Dispatch resource control call + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual NV_STATUS resControl(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Early filter for control calls we don't want to service on a particular platform + * + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual NV_STATUS resControlFilter(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Operations performed right before the control call is executed. Default stubbed. + * + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual NV_STATUS resControl_Prologue(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Operations performed right after the control call is executed. No return value. (void) + * + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual void resControl_Epilogue(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Creates a mapping of the underlying resource in the physical address space of the requested process. + * + * The difference between serverResMap and resMap is that resMap provides a locked physical address + * and serverResMap creates a virtual mapping to the physical address. For virtualization, the + * tandem resource servers should be able to map a host physical address in a guest user space + * VA without any resource-specific VA mapping code. + * + * Not all resources support mapping. + * + * @param[in] pResource Resource to map + * @param[in] pCallContext + * @param[inout] pCpuMapping + */ + virtual NV_STATUS resMap(RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping); + + /** + * Release a virtual address mapping + * @param[in] pResource Resource to map + * @param[in] pCallContext + * @param[in] pCpuMapping + */ + virtual NV_STATUS resUnmap(RsResource *pResource, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); + + /** + * Maps to this resource from another resource + * Not all resources can be mapped to, in such a case returns NV_ERR_INVALID_OBJECT_HANDLE + * + * @param[in] pResource + * @param[inout] pParams + */ + virtual NV_STATUS resMapTo(RsResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); + + /** + * Unmaps a resource mapped to this resource + * Not all resources can be unmapped, in such a case returns NV_ERR_INVALID_OBJECT_HANDLE + * + * @param[in] pResource + * @param[in] pParams + */ + virtual NV_STATUS resUnmapFrom(RsResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); + + /** + * Gets a refcount for any underlying shared resource + * @returns refcount + */ + virtual NvU32 resGetRefCount(RsResource *pResource); + + /** + * Decides whether the invoking client should be granted an access right on this resource. + * + * The purpose of providing this function is to provide subclassed resources the ability + * to set custom policies for granting access rights. These policies can be implemented + * based on the ambient privilege of the caller, such as the PID. + * + * @param[in] pResource The resource for which the access right will be granted + * @param[in] pInvokingClient The client requesting the access right + * @param[in] pAllocParams The alloc params struct passed into the alloc call, + * NULL if called from outside the Alloc path + * @param[in] accessRight The access right to be granted + * @returns NV_TRUE if the access right should be granted, and NV_FALSE otherwise + */ + virtual NvBool resAccessCallback(RsResource *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); + + /** + * Decides whether rights can be shared with a client under a certain policy. + * + * The purpose of this function is to provide subclasses the ability to set custom definitions + * for how certain policies will share. Certain share types can then be created to work based + * on components not stored directly in resserv, such as PID. + * + * @param[in] pResource The resource being shared + * @param[in] pInvokingClient The client being shared with + * @param[in] pParentRef dstParent if calling from DupObject, NULL otherwise + * @param[in] pSharePolicy The policy under which to share + * @returns NV_TRUE if the share policy applies and rights should be shared, NV_FALSE otherwise + */ + virtual NvBool resShareCallback(RsResource *pResource, RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + + /** + * Adds dependants that aren't in childRefMap or depRefMap to the pending free list. + * + * Due to RAM constraints, some classes can add more dependants that aren't + * represented in childRefMap or depRefMap. They can override this function + * to put them in the pending free list while we are updating it. + * + * @param[in] pClient + * @param[in] pResource The RsResource with potential additional dependants + * @param[in] pReference The pReference to pass in to + * clientUpdatePendingFreeList() + */ + virtual void resAddAdditionalDependants(RsClient *pClient, RsResource *pResource, RsResourceRef *pReference); +}; + +/* @} */ + +class OBJGPU; + +/** + * @defgroup RsCpuMapping + * @addtogroup RsCpuMapping + * @{*/ +struct RsCpuMapping +{ + NvU64 offset; + NvU64 length; + NvU32 flags; + NvP64 pLinearAddress; + RsResourceRef *pContextRef; ///< Context resource that may be needed for the mapping + void *pContext; ///< Additional context data for the mapping + NvU32 processId; + + RS_CPU_MAPPING_PRIVATE *pPrivate; ///< Opaque struct allocated and freed by resserv on behalf of the user +}; +MAKE_LIST(RsCpuMappingList, RsCpuMapping); + +/** + * CPU mapping parameters + */ +struct RS_CPU_MAP_PARAMS +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU64 offset; ///< [in] Offset into the resource + NvU64 length; ///< [in] Size of the region to map + NvP64 *ppCpuVirtAddr; + NvU32 flags; ///< [in] Resource-specific flags + + // Passed from RM into CpuMapping + NvU32 protect; ///< [in] Protection flags + NvBool bKernel; + + /// [in] hContext Handle of resource that provides a context for the mapping (e.g., subdevice for channel map) + NvHandle hContext; + + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * CPU unmapping params for resource server tests + */ +struct RS_CPU_UNMAP_PARAMS +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pLinearAddress; ///< [in] Address of mapped memory + NvU32 flags; ///< [in] Resource-specific flags + NvU32 processId; + + /// [in] hContext Handle of resource that provides a context for the mapping (e.g., subdevice for channel map) + NvHandle hContext; + + // RM-only + void *pProcessHandle; + + NvBool (*fnFilter)(RsCpuMapping*); ///< [in] Mapping-filter function + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * CPU mapping back-reference + */ +struct RS_CPU_MAPPING_BACK_REF +{ + RsCpuMapping *pCpuMapping; ///< Mapping linked to this backref + RsResourceRef *pBackRef; ///< Resource reference with mapping +}; +MAKE_LIST(RsCpuMappingBackRefList, RS_CPU_MAPPING_BACK_REF); +/* @} */ + +/** + * @defgroup RsInterMapping + * @addtogroup RsInterMapping + * @{*/ +struct RS_INTER_MAP_PARAMS +{ + NvHandle hClient; + NvHandle hMapper; + NvHandle hMappable; + NvHandle hDevice; + NvU64 offset; + NvU64 length; + NvU32 flags; + NvU64 dmaOffset; ///< [inout] RS-TODO rename this + void *pMemDesc; ///< [out] + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info + + RS_INTER_MAP_PRIVATE *pPrivate; ///< Opaque struct controlled by caller +}; + +struct RS_INTER_UNMAP_PARAMS +{ + NvHandle hClient; + NvHandle hMapper; + NvHandle hMappable; + NvHandle hDevice; + NvU32 flags; + NvU64 dmaOffset; ///< [in] RS-TODO rename this + void *pMemDesc; ///< MEMORY_DESCRIPTOR * + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info + + RS_INTER_UNMAP_PRIVATE *pPrivate; ///< Opaque struct controlled by caller +}; + +/** + * Inter-mapping information + * Used to keep track of inter-mappings and unmap them on free + */ +struct RsInterMapping +{ + // RsResourceRef *pMapperRef ///< (Implied) the resource that created and owns this mapping (this resource) + RsResourceRef *pMappableRef; ///< The resource being mapped by the mapper (e.g. hMemory) + RsResourceRef *pContextRef; ///< A resource used to provide additional context for the mapping (e.g. hDevice) + NvU32 flags; ///< Flags passed when mapping, same flags also passed when unmapping + NvU64 dmaOffset; + void *pMemDesc; +}; +MAKE_LIST(RsInterMappingList, RsInterMapping); + +/** + * Inter-mapping back-reference + */ +struct RS_INTER_MAPPING_BACK_REF +{ + RsResourceRef *pMapperRef; ///< Resource reference with mapping + RsInterMapping *pMapping; ///< Pointer to the inter-mapping linked to this backref +}; +MAKE_LIST(RsInterMappingBackRefList, RS_INTER_MAPPING_BACK_REF); +/* @} */ + +typedef struct RS_RESOURCE_DESC RS_RESOURCE_DESC; +RS_RESOURCE_DESC *RsResInfoByExternalClassId(NvU32); +NvU32 RsResInfoGetInternalClassId(const RS_RESOURCE_DESC *); + +/** + * A reference to a resource that has been allocated in RM. + */ +struct RsResourceRef +{ + RsClient *pClient; ///< Pointer to the client that owns the ref + RsResource *pResource; ///< Pointer to the actual resource + NvHandle hResource; ///< Resource handle + struct RsResourceRef *pParentRef; ///< Parent resource reference + RsIndex childRefMap; ///< Child reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + + /** + * Cached reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * The resource reference cache is a one-way association between this resource reference and + * any other resource reference. Resource server does not populate the cache so it is up to the + * resource implementation to manage it. clientRefIter can be used to iterate this cache. + */ + RsIndex cachedRefMap; + + /** + * Dependants reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * A map of all resources that strongly depend on this resource. + */ + RsIndex depRefMap; + + /** + * Dependants back-reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * AKA dependencies map + * + * A map of all resources that this resource strongly depends on. + */ + RsIndex depBackRefMap; + + /** + * Policy under which this resource can be shared with other clients + */ + RsShareList sharePolicyList; + NvBool bSharePolicyListModified; + + /** + * A mask of the access rights that the owner client has on this object. + */ + RS_ACCESS_MASK accessMask; + + const RS_RESOURCE_DESC *pResourceDesc; ///< Cached pointer to the resource descriptor + NvU32 internalClassId; ///< Internal resource class id + NvU32 externalClassId; ///< External resource class id + NvU32 depth; ///< The depth of this reference in the resource graph + NvBool bInvalidated; ///< Reference has been freed but not removed yet + + RsCpuMappingList cpuMappings; ///< List of CPU mappings to the resource from this resource reference + RsCpuMappingBackRefList backRefs; ///< List of references that have this reference as a mapping context + + RsInterMappingList interMappings; ///< List of inter-resource mappings created by this resource + RsInterMappingBackRefList interBackRefs; ///< List of inter-resource mappings this resource has been mapped into + + RsSession *pSession; ///< If set, this ref depends on a shared session + RsSession *pDependantSession; ///< If set, this ref is depended on by a shared session + + ListNode freeNode; ///< Links to the client's pendingFreeList +}; +MAKE_MAP(RsRefMap, RsResourceRef); +MAKE_INTRUSIVE_LIST(RsRefFreeList, RsResourceRef, freeNode); + + +// Iterator data structure to save state while walking through a list +struct RS_ITERATOR +{ + union + { + RsRefMapIter mapIt; ///< Map iterator for all resource references under a client + RsIndexIter idxIt; ///< Index iterator for child references of a resource reference + }; + + RsClient *pClient; + RsResourceRef *pScopeRef; ///< Reference to the resource that limits the scope of iteration + NvU32 internalClassId; + RsResourceRef *pResourceRef; ///< Resource ref that is being iterated over + NvU8 type; ///< RS_ITERATE_* + NvBool bExactMatch; ///< If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId +}; + +// Iterator data structure to save state while walking through a resource tree in pre-order +struct RS_ORDERED_ITERATOR +{ + NvS8 depth; ///< Depth of index stack; special value of -1 implies that the scope reference should be iterated over as well + RsIndexIter idxIt[RS_MAX_RESOURCE_DEPTH+1]; ///< Stack of index iterators for child references of a resource reference + + RsClient *pClient; + RsResourceRef *pScopeRef; ///< Reference to the resource that limits the scope of iteration + NvU32 internalClassId; + NvBool bExactMatch; ///< If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + + RsResourceRef *pResourceRef; ///< Resource ref that is being iterated over +}; + +/** + * Macro for looking up a reference from a resource + */ +#define RES_GET_REF(pResource) (staticCast((pResource), RsResource)->pResourceRef) + +/** + * Macro for looking up a resource handle from a resource + */ +#define RES_GET_HANDLE(pResource) (RES_GET_REF(pResource)->hResource) + +/** + * Macro for looking up a resource's external class from a resource + */ +#define RES_GET_EXT_CLASS_ID(pResource) (RES_GET_REF(pResource)->externalClassId) + +/** + * Macro for looking up a resource's parent handle from a resource + */ +#define RES_GET_PARENT_HANDLE(pResource) (RES_GET_REF(pResource)->pParentRef->hResource) + +/** + * Macro for looking up a client from a resource + */ +#define RES_GET_CLIENT(pResource) (RES_GET_REF(pResource)->pClient) + +/** + * Macro for looking up a client handle from a resource + */ +#define RES_GET_CLIENT_HANDLE(pResource) (RES_GET_REF(pResource)->pClient->hClient) + +/** + * Find a CPU mapping owned by a resource reference + * + * @param[in] pResourceRef + * @param[in] pAddress The CPU virtual address of the mapping to search for + * @param[out] ppMapping The returned mapping + */ +NV_STATUS refFindCpuMapping(RsResourceRef *pResourceRef, NvP64 pAddress, RsCpuMapping **ppMapping); + +/** + * Find a CPU mapping owned by a resource reference + * + * @param[in] pResourceRef + * @param[in] pAddress The CPU virtual address of the mapping to search for + * @param[in] fnFilter A user-provided filtering function that determines which mappings to ignore. + * If fnFilter is provided, then we will only return mappings for which fnFilter(mapping) returns NV_TRUE + * All mappings will be searched over if fnFilter is NULL. + * @param[out] ppMapping The returned mapping + * @param[in] fnFilter A user-provided filtering function that determines which mappings to ignore. + * If fnFilter is provided, then we will only return mappings for which fnFilter(mapping) returns NV_TRUE + * All mappings will be searched over if fnFilter is NULL. + */ +NV_STATUS refFindCpuMappingWithFilter(RsResourceRef *pResourceRef, NvP64 pAddress, NvBool (*fnFilter)(RsCpuMapping*), RsCpuMapping **ppMapping); + +/** + * Find the first child object of given type + * + * @param[in] pParentRef + * @param[in] internalClassId + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + * @param[out] pResourceRef The returned RsResourceRef (Optional) + */ +NV_STATUS refFindChildOfType(RsResourceRef *pParentRef, NvU32 internalClassId, NvBool bExactMatch, RsResourceRef **ppResourceRef); + +/** + * Traverse up the reference parent-child hierarchy to find an ancestor reference of a given type + * + * @param[in] pDescendantRef + * @param[in] internalClassId + * @param[out] ppAncestorRef The returned RsResourceRef (Optional) + */ +NV_STATUS refFindAncestorOfType(RsResourceRef *pDescendantRef, NvU32 internalClassId, RsResourceRef **ppAncestorRef); + +/** + * Traverse up the reference parent-child hierarchy to find if a ref is a descendant of a given ancestor ref + * + * @param[in] pDescendantRef The node to start searching from (not included in the search) + * @param[in] pAncestorRef The node to search for in the parent-child hierarchy + */ +NvBool refHasAncestor(RsResourceRef *pDescendantRef, RsResourceRef *pAncestorRef); + +/** + * Add a new mapping to a reference's mapping list + * @param[in] pResourceRef The reference to add a mapping to + * @param[in] pMapParams The parameters used to initialize the mapping + * @param[in] pContextRef A reference to a resource that provides a context for the mapping + * @param[out] ppMapping Pointer to the allocated mapping [optional] + */ +NV_STATUS refAddMapping(RsResourceRef *pResourceRef, RS_CPU_MAP_PARAMS *pMapParams, + RsResourceRef *pContextRef, RsCpuMapping **ppMapping); + +/** + * Remove an existing mapping from a reference's mapping list and remove back-references to the mapping. + * @param[in] pResourceRef The reference to add a mapping to + * @param[in] pMapping Pointer to the allocated mapping + */ +void refRemoveMapping(RsResourceRef *pResourceRef, RsCpuMapping *pMapping); + +/** + * Allocate the user-controlled private pointer within the RsCpuMapping struct. + * Resserv will call this function to alloc the private struct when the mapping is created + * @param[in] pMapParams The parameters which were used to create the mapping + * @param[inout] pMapping Pointer to the mapping whose private struct should be allocated + */ +NV_STATUS refAllocCpuMappingPrivate(RS_CPU_MAP_PARAMS *pMapParams, RsCpuMapping *pMapping); + +/** + * Free the user-controlled private pointer within the RsCpuMapping struct. + * Resserv will call this function to free the private struct when the mapping is removed + * @param[inout] pMapping Pointer to the mapping whose private struct should be freed + */ +void refFreeCpuMappingPrivate(RsCpuMapping *pMapping); + +/** + * Add a dependency between this resource reference and a dependent reference. + * If this reference is freed, the dependent will be invalidated and torn down. + * + * @note Dependencies are implicit between a parent resource reference and child resource reference + * @note No circular dependency checking is performed + */ +NV_STATUS refAddDependant(RsResourceRef *pResourceRef, RsResourceRef *pDependantRef); + +/** + * Remove the dependency between this resource reference and a dependent resource reference. + */ +NV_STATUS refRemoveDependant(RsResourceRef *pResourceRef, RsResourceRef *pDependantRef); + +/** + * Find, Add, or Remove an inter-mapping between two resources to the Mapper's list of inter-mappings + * Inter-mappings are stored in the Mapper, and are matched by both the MappableRef and offset. + * + * @param[in] pMapperRef The reference which owns the inter-mapping + * @param[in] pMappableRef The reference which was mapped from to create the inter-mapping + * If NULL, will be ignored while matching inter-mappings + * @param[in] dmaOffset The offset value assigned while mapping, used to identify mappings + * @param[in] pContextRef A reference used during mapping and locking for additional context, used to identify mappings + * @param[inout] ppMapping Writes the resulting inter-mapping, if successfully created (Add) or found (Find) + * @param[in] pMapping The inter-mapping to remove (Remove) + */ +NV_STATUS refFindInterMapping(RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RsResourceRef *pContextRef, NvU64 dmaOffset, RsInterMapping **ppMapping); +NV_STATUS refAddInterMapping(RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RsResourceRef *pContextRef, RsInterMapping **ppMapping); +void refRemoveInterMapping(RsResourceRef *pMapperRef, RsInterMapping *pMapping); + +/** + * Store a resource reference in another reference's cache. + * @param[in] pParentRef The resource reference that owns the cache + * @param[in] pResourceRef The resource reference to store in the cache + */ +NV_STATUS refCacheRef(RsResourceRef *pParentRef, RsResourceRef *pResourceRef); + +/** + * Remove a resource reference from another reference's cache + * @param[in] pParentRef The resource reference that owns the cache + * @param[in] pResourceRef The resource reference to de-index + */ +NV_STATUS refUncacheRef(RsResourceRef *pParentRef, RsResourceRef *pResourceRef); + +/** + * Determine whether a reference is queued for removal + * @param[in] pResourceRef + * @param[in] pClient + */ +NvBool refPendingFree(RsResourceRef *pResourceRef, RsClient *pClient); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_server.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_server.h new file mode 100644 index 0000000..17c561c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_server.h @@ -0,0 +1,928 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_rs_server_nvoc.h" + +#ifndef _RS_SERVER_H_ +#define _RS_SERVER_H_ + +#include "nvport/nvport.h" +#include "resserv/resserv.h" +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup RsServer + * @addtogroup RsServer + * @{*/ + +/** + * Book-keeping for individual client locks + */ +struct CLIENT_ENTRY +{ + PORT_RWLOCK *pLock; + RsClient *pClient; + NvHandle hClient; + NvU64 lockOwnerTid; ///< Thread id of the lock owner + +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK lockVal; +#endif +}; + +/** + * Base-class for objects that are shared among multiple + * RsResources (including RsResources from other clients) + */ +NVOC_PREFIX(shr) class RsShared : Object +{ +public: + NV_STATUS shrConstruct(RsShared *pShared); + void shrDestruct(RsShared *pShared); + + NvS32 refCount; + MapNode node; +}; +MAKE_INTRUSIVE_MAP(RsSharedMap, RsShared, node); + +/** + * Utility class for objects that can reference + * multiple client handle spaces. Free's and control calls + * that occur on objects which reference an RsSession will + * need to acquire pLock first. + */ +NVOC_PREFIX(session) class RsSession : RsShared +{ +public: + NV_STATUS sessionConstruct(RsSession *pSession); + void sessionDestruct(RsSession *pSession); + + NV_STATUS sessionAddDependant(RsSession *pSession, RsResourceRef *pResourceRef); + NV_STATUS sessionAddDependency(RsSession *pSession, RsResourceRef *pResourceRef); + virtual void sessionRemoveDependant(RsSession *pSession, RsResourceRef *pResourceRef); + virtual void sessionRemoveDependency(RsSession *pSession, RsResourceRef *pResourceRef); + + PORT_RWLOCK *pLock; +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK lockVal; +#endif + + NvBool bValid; + + RsResourceRefList dependencies; + RsResourceRefList dependants; +// private: + NV_STATUS sessionCheckLocksForAdd(RsSession *pSession, RsResourceRef *pResourceRef); + void sessionCheckLocksForRemove(RsSession *pSession, RsResourceRef *pResourceRef); +}; + +// Iterator data structure to save state while walking through a map +struct RS_SHARE_ITERATOR +{ + RsSharedMapIter mapIt; + NvU32 internalClassId; + RsShared *pShared; ///< Share that is being iterated over +}; + +/** + * Top-level structure that RMAPI and RM interface with + * + * This class is all that needs to be allocated to use the resource server + * library. + * + * The RsServer interface should be kept as narrow as possible. Map and + * MapTo are added because <1> the unmap variants operate in addresses and not + * handles and <2> having explicit knowledge of map operations in the server is + * helpful when dealing with multiple levels of address spaces (e.g., guest + * user-mode, guest kernel-mode, host kernel-mode). + */ +struct RsServer +{ + /** + * Privilege level determines what objects a server is allowed to allocate, and + * also determines whether additional handle validation needs to be performed. + */ + RS_PRIV_LEVEL privilegeLevel; + + RsClientList *pClientSortedList; ///< Bucket if linked List of clients (and their locks) owned by this server + NvU32 clientCurrentHandleIndex; + + NvBool bConstructed; ///< Determines whether the server is ready to be used + PORT_MEM_ALLOCATOR *pAllocator; ///< Allocator to use for all objects allocated by the server + + PORT_RWLOCK *pClientListLock; ///< Lock that needs to be taken when accessing the client list + + PORT_SPINLOCK *pShareMapLock; ///< Lock that needs to be taken when accessing the shared resource map + RsSharedMap shareMap; ///< Map of shared resources + +#if (RS_STANDALONE) + NvU64 topLockOwnerTid; ///< Thread id of top-lock owner + PORT_RWLOCK *pTopLock; ///< Top-level resource server lock + PORT_RWLOCK *pResLock; ///< Resource-level resource server lock +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK topLockVal; + LOCK_VAL_LOCK resLockVal; +#endif +#endif + + /// Print out a list of all resources that will be freed when a free request is made + NvBool bDebugFreeList; + + /// If true, control call param copies will be performed outside the top/api lock + NvBool bUnlockedParamCopy; + + /** + * Setting this flag to false disables any attempts to + * automatically acquire access rights or to control access to resources by + * checking for access rights. + */ + NvBool bRsAccessEnabled; + + /** + * Mask of interfaces (RS_API_*) that will use a read-only top lock by default + */ + NvU32 roTopLockApiMask; + + /// Share policies which clients default to when no other policies are used + RsShareList defaultInheritedSharePolicyList; + /// Share policies to apply to all shares, regardless of other policies + RsShareList globalInternalSharePolicyList; + + NvU32 internalHandleBase; + + NvU32 activeClientCount; + NvU64 activeResourceCount; +}; + +/** + * Construct a server instance. This must be performed before any other server + * operation. + * + * @param[in] pServer This server instance + * @param[in] privilegeLevel Privilege level for this resource server instance + * @param[in] maxDomains Maximum number of domains to support, or 0 for the default + */ +NV_STATUS serverConstruct(RsServer *pServer, RS_PRIV_LEVEL privilegeLevel, NvU32 maxDomains); + +/** + * Destroy a server instance. Destructing a server does not guarantee that child domains + * and clients will be appropriately freed. serverFreeDomain should be explicitly called + * on all allocated domains to ensure all clients and resources get cleaned up. + * + * @param[in] pServer This server instance + */ +NV_STATUS serverDestruct(RsServer *pServer); + +/** + * Allocate a domain handle. Domain handles are used to track clients created by a domain. + * + * @param[in] pServer This server instance + * @param[in] hParentDomain + * @param[in] pAccessControl + * @param[out] phDomain + * + */ +NV_STATUS serverAllocDomain(RsServer *pServer, NvU32 hParentDomain, ACCESS_CONTROL *pAccessControl, NvHandle *phDomain); + +/** + * Verify that the calling user is allowed to perform the access. This check only + * applies to calls from RING_USER or RING_KERNEL. No check is performed in + * RING_HOST. + * + * @param[in] pServer This server instance + * @param[in] hDomain + * @param[in] hClient + * + */ +NV_STATUS serverValidate(RsServer *pServer, NvU32 hDomain, NvHandle hClient); + +/** + * Verify that the domain has sufficient permission to allocate the given class. + * @param[in] pServer + * @param[in] hDomain + * @param[in] externalClassId External resource class id + */ +NV_STATUS serverValidateAlloc(RsServer *pServer, NvU32 hDomain, NvU32 externalClassId); + +/** + * Free a domain handle. All clients of this domain will be freed. + * + * @param[in] pServer This server instance + * @param[in] hDomain The handle of the domain to free + */ +NV_STATUS serverFreeDomain(RsServer *pServer, NvHandle hDomain); + +/** + * Allocate a client handle. A client handle is required to allocate resources. + * + * @param[in] pServer This server instance + * @param[inout] pParams Client allocation parameters + */ +NV_STATUS serverAllocClient(RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +/** + * Free a client handle. All resources references owned by the client will be + * freed. + * + * It is invalid to attempt to free a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[in] pParams Client free params + */ +NV_STATUS serverFreeClient(RsServer *pServer, RS_CLIENT_FREE_PARAMS* pParams); + +/** + * Free a list of client handles. All resources references owned by the client will be + * freed. All priority resources will be freed first across all listed clients. + * + * It is invalid to attempt to free a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[in] phClientList The list of client handles to free + * @param[in] numClients The number of clients in the list + * @param[in] freeState User-defined free state + * @param[in] pSecInfo Security Info + * + */ +NV_STATUS serverFreeClientList(RsServer *pServer, NvHandle *phClientList, NvU32 numClients, NvU32 freeState, API_SECURITY_INFO *pSecInfo); + +/** + * Allocate a resource. + * + * It is invalid to attempt to allocate a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[inout] pParams The allocation parameters + */ +NV_STATUS serverAllocResource(RsServer *pServer, RS_RES_ALLOC_PARAMS *params); + +/** + * Allocate a ref-counted resource share. + * + * @param[in] pServer + * @param[in] pClassInfo NVOC class info for the shared class (must derive from RsShared) + * @param[out] ppShare Allocated share + */ +NV_STATUS serverAllocShare(RsServer *pServer, const NVOC_CLASS_INFO* pClassInfo, RsShared **ppShare); + +/** + * Allocate a ref-counted resource share with Halspec parent. + * + * @param[in] pServer + * @param[in] pClassInfo NVOC class info for the shared class (must derive from RsShared) + * @param[out] ppShare Allocated share + * @param[in] pHalspecParent Parent object whose Halspec can be used for the shared class object + */ +NV_STATUS serverAllocShareWithHalspecParent(RsServer *pServer, const NVOC_CLASS_INFO* pClassInfo, RsShared **ppShare, Object *pHalspecParent); + +/** + * Get the ref-count of a resource share. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NvS32 serverGetShareRefCount(RsServer *pServer, RsShared *pShare); + +/** + * Increment the ref-count of a resource share. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NV_STATUS serverRefShare(RsServer *pServer, RsShared *pShare); + +/** + * Decrement the ref-count of a resource share. If the ref-count + * has reached zero, the resource share will be freed. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NV_STATUS serverFreeShare(RsServer *pServer, RsShared *pShare); + +/** + * Get an iterator to the elements in the server's shared object map + * @param[in] pServer + * @param[in] internalClassId If non-zero, only RsShared that are (or can be + * derived from) the specified class will be returned + */ +RS_SHARE_ITERATOR serverShareIter(RsServer *pServer, NvU32 internalClassId); + +/** + * Get an iterator to the elements in the server's shared object map + */ +NvBool serverShareIterNext(RS_SHARE_ITERATOR*); + + +/** + * Allocate a resource. Assumes top-level lock has been taken. + * + * It is invalid to attempt to allocate a client from a user other than the one + * that allocated it. User-implemented. + * + * @param[in] pServer This server instance + * @param[inout] pParams The allocation parameters + */ +extern NV_STATUS serverAllocResourceUnderLock(RsServer *pServer, RS_RES_ALLOC_PARAMS *pAllocParams); + +/** + * Call Free RPC for given resource. Assumes top-level lock has been taken. + * + * @param[in] pServer This server instance + * @param[inout] pFreeParams The Free parameters + */ +extern NV_STATUS serverFreeResourceRpcUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pFreeParams); + +/** + * Copy-in parameters supplied by caller, and initialize API state. User-implemented. + * @param[in] pServer + * @param[in] pAllocParams Resource allocation parameters + * @param[out] ppApiState User-defined API_STATE; should be allocated by this function + */ +extern NV_STATUS serverAllocApiCopyIn(RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams, API_STATE **ppApiState); + +/** + * Copy-out parameters supplied by caller, and release API state. User-implemented. + * @param[in] pServer + * @param[in] status Status of allocation request + * @param[in] pApiState API_STATE for the allocation + */ +extern NV_STATUS serverAllocApiCopyOut(RsServer *pServer, NV_STATUS status, API_STATE *pApiState); + +/** + * Obtain a second client handle to lock if required for the allocation. + * @param[in] pParams Resource allocation parameters + * @param[in] phClient Client to lock, if any + */ +extern NV_STATUS serverLookupSecondClient(RS_RES_ALLOC_PARAMS_INTERNAL *pParams, NvHandle *phClient); + +/** + * Acquires a top-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Output flags indicating the locks that need to be released + */ +extern NV_STATUS serverTopLock_Prologue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a top-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverTopLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquires a session lock. + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[in] pResourceRef Resource reference to take session locks on + * @param[inout] pLockInfo Lock state + */ +extern NV_STATUS serverSessionLock_Prologue(LOCK_ACCESS_TYPE access, RsResourceRef *pResourceRef, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a session lock. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverSessionLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquires a resource-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Output flags indicating the locks that need to be released + */ +extern NV_STATUS serverResLock_Prologue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a resource-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverResLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * WAR for additional tasks that must be performed after resource-level locks are released. User-implemented. + * @param[inout] status Allocation status + * @param[in] bClientAlloc Caller is attempting to allocate a client + * @param[inout] pParams Allocation parameters + */ +extern NV_STATUS serverAllocEpilogue_WAR(RsServer *pServer, NV_STATUS status, NvBool bClientAlloc, RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams); + +/** + * Free a resource reference and all of its descendants. This will decrease the + * resource's reference count. The resource itself will only be freed if there + * are no more references to it. + * + * It is invalid to attempt to free a resource from a user other than the one that allocated it. + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +NV_STATUS serverFreeResourceTree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Same as serverFreeResourceTree except the top-level lock is assumed to have been taken. + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +NV_STATUS serverFreeResourceTreeUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Updates the lock flags in the dup parameters + * + * @param[in] pServer This server instance + * @param[in] pParams Dup parameters + */ +extern NV_STATUS serverUpdateLockFlagsForCopy(RsServer *pServer, RS_RES_DUP_PARAMS *pParams); + +/** + * Updates the lock flags in the free parameters + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +extern NV_STATUS serverUpdateLockFlagsForFree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Updates the lock flags for automatic inter-unmap during free + * + * @param[in] pServer This server instance + * @param[inout] pParams Unmap params, contained pLockInfo will be modified + */ +extern NV_STATUS serverUpdateLockFlagsForInterAutoUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams); + +/** + * Initialize parameters for a recursive call to serverFreeResourceTree. User-implemented. + * @param[in] hClient + * @param[in] hResource + * @param[inout] pParams + */ +extern NV_STATUS serverInitFreeParams_Recursive(NvHandle hClient, NvHandle hResource, RS_LOCK_INFO *pLockInfo, RS_RES_FREE_PARAMS *pParams); + +/** + * Common operations performed after top locks and client locks are taken, but before + * the control call is executed. This includes validating the control call cookie, + * looking up locking flags, parameter copy-in, and taking resource locks. + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[in] pAccess Lock access type + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +NV_STATUS serverControl_Prologue(RsServer *pServer, RS_RES_CONTROL_PARAMS_INTERNAL *pParams, LOCK_ACCESS_TYPE *pAccess, NvU32 *pReleaseFlags); + +/** + * Common operations performed after the control call is executed. This + * includes releasing locks and parameter copy-out. + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[in] pAccess Lock access type + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + * @param[in] status Control call status + */ +NV_STATUS serverControl_Epilogue(RsServer *pServer, RS_RES_CONTROL_PARAMS_INTERNAL *pParams, LOCK_ACCESS_TYPE access, NvU32 *pReleaseFlags, NV_STATUS status); + +/** + * Initialize a NVOC export control call cookie + * + * @param[in] pExportedEntry + * @param[inout] pCookie + */ +extern void serverControl_InitCookie(const struct NVOC_EXPORTED_METHOD_DEF *pExportedEntry, RS_CONTROL_COOKIE *pCookie); + +/** + * Validate a NVOC export control call cookie + * + * @param[in] pParams + * @param[inout] pCookie + */ +extern NV_STATUS serverControl_ValidateCookie(RS_RES_CONTROL_PARAMS_INTERNAL *pParams, RS_CONTROL_COOKIE *pCookie); + +/** + * Copy-in control call parameters + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[inout] pCookie Control call cookie + */ +extern NV_STATUS serverControlApiCopyIn(RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie); + +/** + * Copy-out control call parameters + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[inout] pCookie Control call cookie + */ +extern NV_STATUS serverControlApiCopyOut(RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + NV_STATUS rmStatus); + +/** + * Determine whether an API supports a read-only lock for a given lock + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] api RS_API* + */ +NvBool serverSupportsReadOnlyLock(RsServer *pServer, RS_LOCK_ENUM lock, RS_API_ENUM api); + +/** + * Determine whether the current thread has taken the RW API lock + * @param[in] pServer ResServ instance + */ +extern NvBool serverRwApiLockIsOwner(RsServer *pServer); + +/** + * Lookup locking flags for a resource alloc + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverAllocResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess); +/** + * + * Lookup level locking flags for a resource free + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverFreeResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_FREE_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a resource copy + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverCopyResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_DUP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a resource access share + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Share parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverShareResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_SHARE_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a control call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Control call parameters + * @param[in] pCookie Control call cookie + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverControlLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for a map call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams CPU map parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverMapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for an unmap call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams CPU unmap parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverUnmapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for an inter-resource map call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Inter-resource map parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverInterMapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for an inter-resource unmap call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Inter-resource unmap parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverInterUnmapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Fill the server's share policy lists with any default or global policies needed + */ +extern NV_STATUS serverInitGlobalSharePolicies(RsServer *pServer); + +/** + * Issue a control command to a resource + * + * @param[in] pServer This server instance + * @param[in] pParams Control parameters + */ +NV_STATUS serverControl(RsServer *pServer, RS_RES_CONTROL_PARAMS *pParams); + +/** + * Copy a resource owned by one client into another client. + * + * The clients must be in the same client handle space. The underlying + * resource is not duplicated, but it is refcounted so the resource will + * not be freed until the reference count hits zero. + * + * Copying a resource will fail if the user making the call does not own + * the source client. + * + * @param[in] pServer This server instance + * @param[inout] pParams Resource sharing parameters + */ +NV_STATUS serverCopyResource(RsServer *pServer, RS_RES_DUP_PARAMS *pParams); + +/** + * Share certain access rights to a resource with other clients using the provided share policy + * + * The policy entry passed in will be added to the object's share policy list. + * If the bRevoke is true, the policy will be removed instead. + * + * Sharing will fail if the user making the call does not own the source client. + * + * @param[in] pServer This server instance + * @param[in] pParams Resource sharing parameters + */ +NV_STATUS serverShareResourceAccess(RsServer *pServer, RS_RES_SHARE_PARAMS *pParams); + +/** + * Creates a CPU mapping of the resource in the virtual address space of the process. + * + * Not all resources support mapping. + * + * @param[in] pServer This server instance + * @param[in] hClient Client handle of the resource to map + * @param[in] hResource Handle of the resource to map + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverMap(RsServer *pServer, NvHandle hClient, NvHandle hResource, RS_CPU_MAP_PARAMS *pParams); + +/** + * Release a CPU virtual address unmapping + * + * @param[in] pServer This server instance + * @param[in] hClient Client handle of the resource to map + * @param[in] hResource Handle of the resource to map + * @param[in] pParams CPU unmapping parameters + */ +NV_STATUS serverUnmap(RsServer *pServer, NvHandle hClient, NvHandle hResource, RS_CPU_UNMAP_PARAMS *pParams); + +/** + * Pre-map operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverMap_Prologue(RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams); + +/** + * Post-map operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +void serverMap_Epilogue(RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams); + +/** + * Pre-unmap operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverUnmap_Prologue(RsServer *pServer, RS_CPU_UNMAP_PARAMS *pUnmapParams); + +/** + * Post-unmap operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +void serverUnmap_Epilogue(RsServer *pServer, RS_CPU_UNMAP_PARAMS *pUnmapParams); + +/** + * Creates an inter-mapping between two resources + * + * Not all resources support mapping. + * + * @param[in] pServer This server instance + * @param[inout] pParams mapping parameters + */ +NV_STATUS serverInterMap(RsServer *pServer, RS_INTER_MAP_PARAMS *pParams); + +/** + * Release an inter-mapping between two resources + * + * @param[in] pServer This server instance + * @param[in] pParams unmapping parameters + */ +NV_STATUS serverInterUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams); + +/** + * Pre-inter-map operations. Called with top/client locks acquired. + * This function acquires resource locks. + * + * @param[in] pServer + * @param[in] pMapperRef The resource that can be used to create the mapping + * @param[in] pMappableRef The resource that can be mapped + * @param[inout] pMapParams mapping parameters + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +NV_STATUS serverInterMap_Prologue(RsServer *pServer, RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RS_INTER_MAP_PARAMS *pMapParams, NvU32 *pReleaseFlags); + +/** + * Post-inter-map operations. Called with top, client, and resource locks acquired. + * This function releases resource locks. + * + * @param[in] pServer + * @param[inout] pMapParams mapping parameters + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +void serverInterMap_Epilogue(RsServer *pServer, RS_INTER_MAP_PARAMS *pMapParams, NvU32 *pReleaseFlags); + +/** + * Pre-inter-unmap operations. Called with top, client, and resource locks acquired. + * + * @param[in] pServer + * @param[inout] pParams mapping parameters + */ +NV_STATUS serverInterUnmap_Prologue(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pUnmapParams); + +/** + * Post-inter-unmap operations. Called with top, client, and resource locks acquired. + * + * @param[in] pServer + * @param[inout] pParams mapping parameters + */ +void serverInterUnmap_Epilogue(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pUnmapParams); + +/** + * Acquire a client pointer from a client handle. The caller is responsible for + * ensuring that lock ordering is not violated (otherwise there can be + * deadlock): clients must be locked in increasing order of client index (not + * handle). + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + * @param[in] lockAccess LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[out] ppClient Pointer to the RsClient + */ +NV_STATUS serverAcquireClient(RsServer *pServer, NvHandle hClient, LOCK_ACCESS_TYPE lockAccess, RsClient **ppClient); + +/** + * Release a client pointer + * + * @param[in] pServer This server instance + * @param[in] lockAccess LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[in] pClient Pointer to the RsClient + */ +NV_STATUS serverReleaseClient(RsServer *pServer, LOCK_ACCESS_TYPE lockAccess, RsClient *pClient); + +/** + * Get a client pointer from a client handle without taking any locks. + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + * @param[out] ppClient Pointer to the RsClient + */ +NV_STATUS serverGetClientUnderLock(RsServer *pServer, NvHandle hClient, RsClient **ppClient); + +/** + * Get the count of clients allocated under this resource server + * + * @param[in] pServer This server instance + */ +NvU32 serverGetClientCount(RsServer *pServer); + +/** + * Get the count of resources allocated under this resource server + * + * @param[in] pServer This server instance + */ +NvU64 serverGetResourceCount(RsServer *pServer); + +/** + * Swap a TLS call context entry and increment the TLS entry refcount. + * A new TLS entry for call context will be allocated if necessary. + * + * @note This should be paired with a corresponding resservRestoreTlsCallContext call + */ +NV_STATUS resservSwapTlsCallContext(CALL_CONTEXT **ppOldCallContext, CALL_CONTEXT *pNewCallContext); + +/** + * Get the current TLS call context. This will not increment a refcount on the TLS entry. + */ +CALL_CONTEXT *resservGetTlsCallContext(void); + +/** + * Set a TLS call context entry and decrement the TLS entry refcount. + * @note This should be paired with a corresponding resservSwapTlsCallContext call + */ +NV_STATUS resservRestoreTlsCallContext(CALL_CONTEXT *pOldCallContext); + +/** + * Find a resource reference of a given type from the TLS call context + * @param[in] internalClassId Only return a reference if it matches this type + * @param[in] bSearchAncestors Search parents of the call context resource ref + */ +RsResourceRef *resservGetContextRefByType(NvU32 internalClassId, NvBool bSearchAncestors); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/tls/tls.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/tls/tls.h new file mode 100644 index 0000000..eb45c2d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/tls/tls.h @@ -0,0 +1,345 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Thread local storage public interface + */ + +#include "nvport/nvport.h" + +#ifndef _NV_TLS_H_ +#define _NV_TLS_H_ + +/** + * @defgroup Thread local storage operations + * + * @brief This module contains thread local storage functionality used by other + * modules. + * + * @par Module dependencies: + * - NvPort (UTIL, ATOMIC, MEMORY, SYNC and THREAD modules) + * - NvContainers (Map) + * - NvUtils (NV_PRINTF and NV_ASSERT) + * + * @par TLS architecture: + * A base TLS allocation unit is an Entry (@ref TLS_ENTRY). Entries are local + * to a thread and are identified by a 64bit ID. Entries are lazy-allocated + * and refcounted. All entries for a given thread are organized in one Map - + * i.e. TLS has as many Maps active as there are threads; each map is + * inherently single-threaded. The Map for a given thread ID is obtained by + * searching a map of all threads with thread ID as key. + * The whole TLS system can be thought of as: + * map> + * + * @par Complexity: + * All operations are O(log(numActiveThreads) + log(numEntriesForGivenThread)) + * + * @par A note on ISRs and DPCs + * Interrupt Service Routines (and in some cases Deferred Procedure Calls) do + * not have their own thread IDs - they can have the same ID as a regular + * thread. Because of this, they are kept in a separate map indexed by their + * stack pointer instead of thread ID. Because getting the exact base of the + * ISR stack can be difficult, when searching we use the closest one, in the + * direction of stack growth. This assumes that the given entry always exists, + * so ISR thread entries are preallocated with @ref tlsIsrInit. + * + * An example of how this works: + * ~~~{.c} + * if (is_isr()) + * return isr_map.find(get_approx_sp()); + * else + * return thread_map.find(get_thread_id()); + * ~~~ + * The exact definition of is_isr() varies by platform, but generally means + * "if it does not have a unique thread ID". Threaded IRQs are not ISRs. + * + * @par Locking: + * Currently, TLS has two spinlocks - separate locks for ISR and passive + * thread maps. This will be changed to RW-spinlocks in the future. + * We cannot use RW sleeper locks in passive threads, since they may modify + * their IRQL and thus be unable to acquire them, even conditionally. + * + * In cases where ISRs are not allowed to acquire a spinlock at all, the ISR + * map is implemented in a lockless fashion. This is slower than the locked + * implementation (O(maxIsrs)), but works in cases where all locks are banned. + * + * + * @{ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @note Only returned in cases of irregular order of public API calls. + */ +#define TLS_ERROR_VAL ~0 + +/** + * @brief Global TLS structure initialization. + * + * Must be called before any TLS functions can be called. + * + * If this function returns an error then calling any TLS function will result + * in undefined behavior. + * + * Called on RmInitRm(). + * @return NV_OK if successful; + * @return Error code otherwise. + * + */ +NV_STATUS tlsInitialize(void); + +/** + * @brief Global TLS structure termination. + * + * It frees resources allocated by tlsInitialize. + * Called on RmDestroyRm(). + * + */ +void tlsShutdown(void); + +enum { + TLS_ENTRY_ID_THREADSTATE, + TLS_ENTRY_ID_RESSERV_1, + TLS_ENTRY_ID_CURRENT_GPU_INSTANCE, + TLS_ENTRY_ID_PRIORITY, + TLS_ENTRY_ID_DYNAMIC, // dynamic allocations start here + TLS_ENTRY_ID_TAG_START = 0x100000 // Custom tags start here +}; +/** + * @brief Allocates a new entry spot and returns a unique entry ID. + * + * Ids are unique for all threads. + * + * @return 0 if all ids are used; + * @return unique id otherwise. + * + */ +NvU64 tlsEntryAlloc(void); + +/** + * @brief Get pointer to TLS entry for given @p entryId. + * + * This function increments the refCount of the given entry. + * + * @return NULL if @p entryId is invalid (Not returned by @ref tlsEntryAlloc), + * in case of not enough memory. + * @return Pointer to a void* the users can use to point to custom structure. + * + * Example usage: + * ~~~{.c} + * NvU64 id = tlsEntryAlloc(); + * MY_THREAD_DATA **ppData = tlsEntryAcquire(id); + * if (**ppData == NULL) + * *ppData = portMemAllocNonPaged(sizeof(MY_THREAD_DATA)) + * ~~~ + * + * @note On first call for given @p entryId, the dereferenced (user) pointer + * will be set to NULL - (*tlsEntryAcquire(x) == NULL) + * + */ +NvP64 *tlsEntryAcquire(NvU64 entryId); + +/** + * @brief Like @ref tlsEntryAcquire, but memory is allocated using @p pAllocator. + * + * @note Should be used only when performance is important in allocation or + * when a spinlock is acquired in a non ISR thread and there is a need for the tls. + * + * @note pAllocator should be thread safe. + */ +NvP64 *tlsEntryAcquireWithAllocator(NvU64 entryId, PORT_MEM_ALLOCATOR *pAllocator); + +/** + * @brief Release the TLS entry for given @p entryId. + * + * This functions decrements the refCount of the given entry. + * + * @return refCount after releasing the structure if @p entryId is valid, + * @return TLS_ERROR_VAL if TLS entry for given @p entryId doesn't exist. + * + * ~~~{.c} + * if (tlsEntryRelease(id) == 0) + * portMemFree(*ppData); + * ~~~ + */ +NvU32 tlsEntryRelease(NvU64 entryId); + +/** + * @brief Like @ref tlsEntryRelease, but memory is allocated using @p pAllocator. + * + * @note Should be used only when performance is important in allocation or + * when a spinlock is acquired in a non ISR thread and there is a need for the tls. + * + * @note @p pAllocator should be thread safe. + */ +NvU32 tlsEntryReleaseWithAllocator(NvU64 entryId, PORT_MEM_ALLOCATOR *pAllocator); + +/** + * @brief Get pointer to TLS data for given entryId. + * + * This function will not modify the refCount, and does not return a double + * pointer required to set the entry value. + * + * @return NULL if the entry doesn't exist. + * @return Otherwise pointer on user's custom structure. + * + * Example usage: + * ~~~{.c} + * NvU64 id = tlsEntryAlloc(); + * MY_THREAD_DATA **ppData = tlsEntryAcquire(id); + * if (**ppData == NULL) + * { + * *ppData = portMemAllocNonPaged(sizeof(MY_THREAD_DATA)) + * *ppData->myData = 1; + * } + * MY_THREAD_DATA *pData = tlsEntryGet(id); + * if (pData->myData == 1) + * { + * ... + * } + * ~~~ + * + */ +NvP64 tlsEntryGet(NvU64 entryId); + +/** + * @brief Increment the refCount of given TLS entry. + * + * If an entry with given entryId doesn't exist, this function does nothing. + * + * This is useful when the code requires a call to a function that might call + * @ref tlsEntryRelease, but TLS should not be freed. An example might be when + * calling a function that acquires the GPU lock while already holding the lock. + * Currently, the code will temporarily release the lock, so the nested function + * acquires it again. Since rmGpuLock{Acquire,Release} acquires/releases TLS, + * this release could cause the data to be freed. + * + * @return TLS_ERROR_VAL if the entry doesn't exist. + * @return New TLS entry refCount, after increment. + * + */ +NvU32 tlsEntryReference(NvU64 entryId); + +/** + * @brief Decrement the refCount of given TLS entry. + * + * If an entry with given entryId doesn't exist, this function does nothing. + * See @ref tlsEntryReference for details. + * + * @return TLS_ERROR_VAL if the entry doesn't exist. + * @return New TLS entry refCount, after decrement. + * + */ +NvU32 tlsEntryUnreference(NvU64 entryId); + +/// @brief Size of memory to preallocate on ISR stack for TLS +#if PORT_IS_CHECKED_BUILD +// Checked builds have per-allocation overhead for tracking +#define TLS_ISR_ALLOCATOR_SIZE 512 +#else +#if defined(LOCK_VAL_ENABLED) + #define TLS_ISR_ALLOCATOR_SIZE 400 +#else + #define TLS_ISR_ALLOCATOR_SIZE 256 +#endif +#endif + +/** + * @brief Allocates thread id for current ISR thread. + * + * @note Function should be called on the beginning of ISR, as early as possible + * + */ +void tlsIsrInit(PORT_MEM_ALLOCATOR *pIsrAllocator); + +/** + * @brief Destroys thread id for current ISR thread. + * + * @note should be called at end of ISR. Must be NOINLINE because if it gets + * inlined and tlsIsrInit doesn't, SP order can be wrong. + */ +NV_NOINLINE void tlsIsrDestroy(PORT_MEM_ALLOCATOR *pIsrAllocator); + +/** + * @brief Returns allocator that can be used for allocations of memory in ISR + * threads. + * In case this function is called outside of ISR NULL will be returned. + * @note Should be called between tlsIsrInit and tlsIsrDestroy if you are in ISR, + * otherwise it will ASSERT and return NULL. + */ +PORT_MEM_ALLOCATOR *tlsIsrAllocatorGet(void); + +/** + * @brief Set if DPCs have a unique thread ID that can be acquired by + * @ref portThreadGetCurrentThreadId. Windows DPCs have the same thread ID + * as the thread they preempted, so they are treated like ISRs. + * + * This isn't used by the TLS implementation, but is needed to decide whether + * the DPCs should call @ref tlsIsrInit + */ +#if PORT_IS_KERNEL_BUILD && !defined(NV_MODS) && NVOS_IS_WINDOWS +#define TLS_DPC_HAVE_UNIQUE_ID 0 +#else +#define TLS_DPC_HAVE_UNIQUE_ID 1 +#endif + +/** + * @brief Set if threads can modify their own IRQL/interrupt context. + * On such builds, we cannot use @ref portUtilIsInterruptContext to decide + * whether a given thread is an ISR or a passive thread, and instead use a + * per-CPU ISR counter. + */ +#if PORT_IS_KERNEL_BUILD && (defined(NV_MODS) || NVOS_IS_WINDOWS) +#define TLS_THREADS_CAN_RAISE_IRQL 1 +#else +#define TLS_THREADS_CAN_RAISE_IRQL 0 +#endif + +/** + * @brief Set if ISRs are allowed to acquire a spinlock. On VMWare, the top + * level interrupt handler (ACK function) is not allowed to hold the spinlock + * for any amount of time (enforced by validation suite), so it uses a slower + * lockless implementation. + */ +#if PORT_IS_KERNEL_BUILD && NVOS_IS_VMWARE +#define TLS_ISR_CAN_USE_LOCK 0 +#else +#define TLS_ISR_CAN_USE_LOCK 1 +#endif + +/// @brief If set, a copy of THREAD_STATE_NODE pointer will be kept in TLS. +#ifndef TLS_MIRROR_THREADSTATE +#define TLS_MIRROR_THREADSTATE 0 +#endif + +#ifdef __cplusplus +} +#endif + +///@} + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nv_enum.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nv_enum.h new file mode 100644 index 0000000..873200a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nv_enum.h @@ -0,0 +1,684 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file nv_enum.h + * @brief A header providing code-generation tools to define an enumerated type + * from a specification of a potentially-nested enum of limited depth. + * @see confluence page "Proposal for Better Enum Generation (NV_ENUM) Version 1.0" + */ + +/** + * @defgroup NV_UTILS_ENUM Infrastructure for generating better enumerated values. + * + * @brief Generates symbols comprising an enumerated type given a list of + * entries provided via macro argument. + * + * This file exposes macro functions which generate enum types and associated + * metadata from an enum specification consisting of entry names and values, + * with support for nesting enums up to a maximum depth of 2. The associated + * metadata generated from the enum specification allows for conversion of + * contiguous enums (those without holes within their valid value range) to and + * from indices, iteration over enum values (for each loop) and runtime + * determination of whether a given integer is a valid enum value. Additionally, + * macros are provided to "export" an enum such that only the entry names and + * values are defined, and no name is given to the enum. This is useful in + * situations where two different APIs utilize the same enum definition, such as + * in the RM SDK where enums are disallowed in control call parameters, but are + * very much desired inside of the driver. + * @{ + */ + +#ifndef NV_ENUM_H_ +#define NV_ENUM_H_ + +#define NV_ENUM_MIN ((NvS64) 0) +#define NV_ENUM_MAX ((NvS64)0xFFFFFFFF) + +/** @brief Fully expands both arguments, then concatenates them. */ +#define NV_ENUM_CONCATENATE(a, b) NV_ENUM_CONCATENATE2(a, b) +#define NV_ENUM_CONCATENATE2(a, b) _NV_ENUM_CONCATENATE(a, b) +#define _NV_ENUM_CONCATENATE(a, b) a##b + +/** @brief Fully expands the given argument, then stringifies it. */ +#define NV_ENUM_STRINGIFY(s) _NV_ENUM_STRINGIFY(s) +#define _NV_ENUM_STRINGIFY(s) #s + +/** @brief Expands the given argument. */ +#define NV_EXPAND_ONE(x) x + +/** @brief Discards the given argument, expands to nothing. */ +#define NV_DISCARD_ONE(x) + + +/** + * @brief Fully expands both arguments, then attempts to invoke parameter `a` as + * a macro with parameter `b` as its argument. + * + * @param a : Macro Macro to apply + * @param b : Argument List Arguments to pass to macro `a`, surrounded by parentehses + */ +#define NV_ENUM_APPLY(a, b) _NV_ENUM_APPLY(a, b) +#define _NV_ENUM_APPLY(a, b) a b + +/** @brief expands to the Nth argument */ +#define NV_ENUM_A1(a, b, c, d, e, f) a +#define NV_ENUM_A2(a, b, c, d, e, f) b +#define NV_ENUM_A3(a, b, c, d, e, f) c +#define NV_ENUM_A4(a, b, c, d, e, f) d +#define NV_ENUM_A5(a, b, c, d, e, f) e +#define NV_ENUM_A6(a, b, c, d, e, f) f + +/** + * @brief Expands to an argument list containing 6 elements with argument `b` + * moved to the last place. + */ +#define NV_ENUM_DL_POP(a, b, c, d, e, f) (a, c, d, e, f, b) + +/** + * @brief Expands to argument list `l` with its first element replaced by + * parameter `r` + */ +#define NV_ENUM_NV_ENUM_REPLACE_1(r, l) (r, NV_ENUM_APPLY(NV_ENUM_A2, l), NV_ENUM_APPLY(NV_ENUM_A3, l), NV_ENUM_APPLY(NV_ENUM_A4, l), NV_ENUM_APPLY(NV_ENUM_A5, l), NV_ENUM_APPLY(NV_ENUM_A6, l)) + +/** + * @brief Expands to argument list `l` with its first element replaced by + * parameter `r1`, its fifth argument replaced by parameter `r5`, and its + * sixth argument replaced by parameter `r6` + */ +#define NV_ENUM_REPLACE_3(r1, r5, r6, l) (r1, NV_ENUM_APPLY(NV_ENUM_A2, l), NV_ENUM_APPLY(NV_ENUM_A3, l), NV_ENUM_APPLY(NV_ENUM_A4, l), r5, r6) + +/** + * @brief Expands to argument list `l` with its first element replaced by + * parameter `r1`, its second argument replaced by parameter `r2`, its + * fifth argument replaced by parameter `r5`, and its sixth argument + * replaced by parameter `r6` + */ +#define NV_ENUM_REPLACE_4(r1, r2, r5, r6, l) (r1, r2, NV_ENUM_APPLY(NV_ENUM_A3, l), NV_ENUM_APPLY(NV_ENUM_A4, l), r5, r6) + + +/*! + * @brief Convenience LISP-like wrappers for CAR and CDR + * + * @note For those unfamiliar with LISP, most LISP interpreters allow for + * convenient macros which expand to nested invocations of CAR and CDR, + * formed by specifying 'A' and 'D' in any order between 'C' and 'R'. A + * regular expression which identifies this pattern is: 'C(A|D)+R'. The + * order of operations is performed from right to left, e.g. CAAADR + * applies CDR, then CAR, then CAR, then CAR. These are used to unpack + * data at specific locations within nested lists, which this tool uses + * often. There is no such thing as a meta-macro in the c preprocessor, so + * we have defined the operations which we use frequently here. + * + * @note instead of LISP-style structured lists which are formatted as + * records containing two elements each (e.g. (car, (cdr, ()))), this tool + * uses preprocessor argument lists (e.g. (car, cdr, etc)) because the + * former require proper recursion to deal with, which this tool does not + * have available to it. + * + * @note Because some compilers do not support variadic macros, we cannot use + * the generic versions of CAR and CDR here, so we have replaced them + * with very specific size-restricted versions. + */ +#define NV_CAAR(l) NV_ENUM_APPLY(NV_ENUM_A1, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADAR(l) NV_ENUM_APPLY(NV_ENUM_A2, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADDAR(l) NV_ENUM_APPLY(NV_ENUM_A3, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADDDAR(l) NV_ENUM_APPLY(NV_ENUM_A4, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADDDDAR(l) NV_ENUM_APPLY(NV_ENUM_A5, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADDDDDAR(l) NV_ENUM_APPLY(NV_ENUM_A6, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CAADR(l) NV_ENUM_APPLY(NV_ENUM_A1, NV_ENUM_APPLY(NV_ENUM_A2, l)) +#define NV_CADADR(l) NV_ENUM_APPLY(NV_ENUM_A2, NV_ENUM_APPLY(NV_ENUM_A2, l)) +#define NV_CADDADR(l) NV_ENUM_APPLY(NV_ENUM_A3, NV_ENUM_APPLY(NV_ENUM_A2, l)) +#define NV_CADDDADR(l) NV_ENUM_APPLY(NV_ENUM_A4, NV_ENUM_APPLY(NV_ENUM_A2, l)) +#define NV_CADDDDADR(l) NV_ENUM_APPLY(NV_ENUM_A5, NV_ENUM_APPLY(NV_ENUM_A2, l)) + +/*! + * @brief Performs per-entry generation function, and either expands or extinguishes it + * + * @param dat__ Data List - Generation data table + * @param name Identifier - Name of enum entry + * @param value Integer Literal - Value for this entry + * + * @return the result of the generation function for this table, + * or nothing if this layer is being filtered (i.e. nested enum) + */ +#define NV_ENUM_ENTRY(dat__, name, value) \ + NV_ENUM_DAT_ENTRY(dat__) (NV_ENUM_DAT_GEN2(dat__) (dat__, name, value)) + +/*! + * @brief Expands enum entries within nested enum specification using an updated + * data list specification + * + * @note the Current Enum Name is concatenated with parameter `name` + * the function table has its first entry popped + * other variables are unchanged. + * + * @param dat__ Data List - Generation data table + * @param name Token - String to append to previous enum name + * @param res_lo Integer Literal - Min value of this nested enum + * @param res_hi Integer Literal - Max value of this nested enum + * @param entries Macro - Nested Enum Specification + */ +#define NV_ENUM_NEST_EXPAND0(dat__, name, res_lo, res_hi, entries) \ + entries(NV_ENUM_APPLY(NV_ENUM_DL_POP, NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), NV_ENUM_APPLY(NV_ENUM_A1, dat__)), dat__))) +#define NV_ENUM_NEST_EXPAND1(dat__, name, res_lo, res_hi, entries) \ + entries(NV_ENUM_APPLY(NV_ENUM_DL_POP, NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), NV_ENUM_APPLY(NV_ENUM_A1, dat__)), dat__))) +#define NV_ENUM_NEST_EXPAND2(dat__, name, res_lo, res_hi, entries) \ + entries(NV_ENUM_APPLY(NV_ENUM_DL_POP, NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), NV_ENUM_APPLY(NV_ENUM_A1, dat__)), dat__))) +#define NV_ENUM_NEST_EXPAND3(dat__, name, res_lo, res_hi, entries) \ + entries(NV_ENUM_APPLY(NV_ENUM_DL_POP, NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), NV_ENUM_APPLY(NV_ENUM_A1, dat__)), dat__))) + +/*! + * @brief Performs all enum generation for the given nested enum specification + * + * @note the Current Enum Name is concatenated with parameter `name` + * the Nested Enum Name is updated to match the Current Enum Name + * Res. Min is updated with parameter `res_lo` + * Res. Max is updated with parameter `res_hi` + * the function table has its first entry popped + * other variables are unchanged + * + * @param dat__ Data List - Generation data table + * @param name Token - String to append to previous enum name + * @param res_lo Integer Literal - Min value of this nested enum + * @param res_hi Integer Literal - Max value of this nested enum + * @param entries Macro - Nested Enum Specification + */ +#define NV_ENUM_NEST_GEN(dat__, name, res_lo, res_hi, entries) \ + NV_ENUM_DAT_GEN1(dat__)( \ + NV_ENUM_APPLY( \ + NV_ENUM_DL_POP, \ + NV_ENUM_NV_ENUM_REPLACE_1( \ + NV_ENUM_REPLACE_4( \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), \ + res_lo, \ + res_hi, \ + NV_ENUM_APPLY( \ + NV_ENUM_A1, \ + dat__ \ + ) \ + ), \ + dat__ \ + ) \ + ), \ + entries \ + ) + +/*! + * @note Definition: Data List + * This tool packs information used in each depth of processing within a + * nested list, which is passed to each invocation of NV_ENUM_ENTRY and + * NV_ENUM_NEST. The format of this object is as follows: + * ( + * ( , <0-Depth Nested Enum Name>, , , , ) + * ( , , , ) + * ( , , , ) + * ( , , , ) + * ... + * ) + * + * Root Enum Name: Name of enum passed to NV_ENUM DEF (unaffected by NV_ENUM_NEST) + * 0-Depth Nested Enum Name: Name of the enum at depth 0 (affected by NV_ENUM_NEST) + * Prefix: Text prepended to each entry name (no spaces) + * Current Enum Name: Name of the enum at current depth + * Reserved Min: The minimum allowable enum value at this depth + * Reserved Max: The maximum allowable enum value at this depth + * Entry Fn: macro called once per entry with the entry as an argument + * Nest Fn: Duplicate definition of NV_ENUM_NEST_EXPAND to WAR recursion limits + * Per-Enum Gen Fn: Function to call once per NV_ENUM_DEF or NV_ENUM_NEST + * Per-Entry Gen Fn: Function to call once per NV_ENUM_ENTRY + * + */ + +// +// Data list accessor macros +// + +/*! @brief Given data list, returns Current Enum Name */ +#define NV_ENUM_DAT_CURR_NAME(dat__) NV_CAAR(dat__) +/*! @brief Given data list, returns 0-depth nested enum name */ +#define NV_ENUM_DAT_NEST_NAME(dat__) NV_CADAR(dat__) +/*! @brief Given data list, returns Prefix */ +#define NV_ENUM_DAT_PREFIX(dat__) NV_CADDAR(dat__) +/*! @brief Given data List, returns Root Enum Name */ +#define NV_ENUM_DAT_ROOT_NAME(dat__) NV_CADDDAR(dat__) +/*! @brief Given data list, returns Res. Min at current depth */ +#define NV_ENUM_DAT_MIN(dat__) NV_CADDDDAR(dat__) +/*! @brief Given data list, returns Res. Max at current depth */ +#define NV_ENUM_DAT_MAX(dat__) NV_CADDDDDAR(dat__) +/*! @brief Given data list, returns Entry Fn at current depth */ +#define NV_ENUM_DAT_ENTRY(dat__) NV_CAADR(dat__) +/*! @brief Given data list, returns Nest Fn at current depth */ +#define NV_ENUM_NEST(dat__) NV_CADADR(dat__) +/*! @brief Given data list, returns Per-Enum Gen Fn at current depth */ +#define NV_ENUM_DAT_GEN1(dat__) NV_CADDADR(dat__) +/*! @brief Given data list, returns Per-Entry Gen Fn at current depth */ +#define NV_ENUM_DAT_GEN2(dat__) NV_CADDDADR(dat__) + +/*! + * @brief constructs a data list to be used for generation of the root enum + */ +#define NV_ENUM_DEPTH_0(name, prefix, gen1_fn, gen2_fn) \ + ( (name, name, prefix, name, NV_ENUM_MIN, NV_ENUM_MAX) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND0, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND1, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND2, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND3, gen1_fn, gen2_fn, unused, unused) \ + , (unused, unused, unused, unused, unused, unused) \ + ) + +/*! + * @brief constructs a data list to be used for generation of enums at depth 1 + */ +#define NV_ENUM_DEPTH_1(name, prefix, gen1_fn, gen2_fn) \ + ( (name, name, prefix, name, NV_ENUM_MIN, NV_ENUM_MAX) \ + , (NV_DISCARD_ONE, NV_ENUM_NEST_GEN, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND0, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND1, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND2, gen1_fn, gen2_fn, unused, unused) \ + , (unused, unused, unused, unused, unused, unused) \ + ) + +/*! + * @brief constructs a data list to be used for generation of enums at depth 2 + */ +#define NV_ENUM_DEPTH_2(name, prefix, gen1_fn, gen2_fn) \ + ( (name, name, prefix, name, NV_ENUM_MIN, NV_ENUM_MAX) \ + , (NV_DISCARD_ONE, NV_ENUM_NEST_EXPAND0, gen1_fn, gen2_fn, unused, unused) \ + , (NV_DISCARD_ONE, NV_ENUM_NEST_GEN, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND1, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND2, gen1_fn, gen2_fn, unused, unused) \ + , (unused, unused, unused, unused, unused, unused) \ + ) + +/// +/// Generator Functions +/// + + +/*! @brief Generates an enum type given the enum specification in entries */ +#define NV_ENUM_GEN_MAIN(dat__, entries) \ + enum NV_ENUM_DAT_CURR_NAME(dat__) { entries(dat__) }; + +/*! @brief Generates a single enum entry with the given name and value */ +#define NV_ENUM_GEN_MAIN_FN(dat__, entry_name, value) \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_PREFIX(dat__), entry_name) = value, + + +/*! @brief Generates an enum typedef for the given enum. All nested types receive the same typedef (i.e. the root enum */ +#define NV_ENUM_GEN_TYPEDEF(dat__, entries) \ + typedef enum NV_ENUM_DAT_ROOT_NAME(dat__) NV_ENUM_DAT_CURR_NAME(dat__); + +/*! @brief Does nothing. There is no per-entry generation for typedefs. */ +#define NV_ENUM_GEN_TYPEDEF_FN(dat__, entry_name, value) + + +/*! @brief Generates an enum with an added entry at the end to provide the enum size*/ +#define NV_ENUM_GEN_SIZE(dat__, entries) \ + enum { entries(dat__) NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __SIZE) }; + +/*! @brief Generates a single enum entry with __UNUSED appended. These values are not meant to be used. */ +#define NV_ENUM_GEN_SIZE_FN(dat__, entry_name, value) \ + NV_ENUM_CONCATENATE(NV_ENUM_CONCATENATE(NV_ENUM_DAT_NEST_NAME(dat__), entry_name), __UNUSED) = value, + + +/*! @brief Generates a conversion function from an enum value to string representation. */ +#define NV_ENUM_GEN_STRING(dat__, entries) \ + static inline const char * \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), _TO_STRING) \ + ( \ + enum NV_ENUM_DAT_ROOT_NAME(dat__) in \ + ) \ + { \ + switch (in) \ + { \ + entries(dat__) \ + default: \ + break; \ + } \ + return NV_ENUM_STRINGIFY(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __UNKNOWN)); \ + } + +/*! @brief Generates a case for the given enum entry, and its string representation. */ +#define NV_ENUM_GEN_STRING_FN(dat__, entry_name, value) \ + case NV_ENUM_CONCATENATE(NV_ENUM_DAT_PREFIX(dat__), entry_name): \ + return NV_ENUM_STRINGIFY(NV_ENUM_CONCATENATE(NV_ENUM_DAT_PREFIX(dat__), entry_name)); + + +/*! @brief Generates a conversion function from NvU32 to enum value. */ +#define NV_ENUM_GEN_FROM(dat__, entries) \ + static inline NV_STATUS \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), _FROM32) \ + ( \ + NvU32 in, \ + enum NV_ENUM_DAT_ROOT_NAME(dat__) *out \ + ) \ + { \ + switch (in) \ + { \ + entries(dat__) \ + if (out != NULL) \ + *out = ((enum NV_ENUM_DAT_ROOT_NAME(dat__))in); \ + return NV_OK; \ + default: \ + break; \ + } \ + return NV_ERR_OUT_OF_RANGE; \ + } + +/*! @brief Generates a case for the given enum entry. */ +#define NV_ENUM_GEN_FROM_FN(dat__, entry_name, value) \ + case NV_ENUM_CONCATENATE(NV_ENUM_DAT_PREFIX(dat__), entry_name): + + +/*! @brief Generates a struct constant containing the smallest value contained within the enum (plus one). */ +#define NV_ENUM_GEN_LO(dat__, entries) \ + typedef struct { char lo[(1 * entries(dat__) 0 + 1)]; } NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __LO) ; + +/*! @brief Builds a portion of the expression calculating the smallest enum value. */ +#define NV_ENUM_GEN_LO_FN(dat__, entry_name, value) \ + (value)) + (0 * + + +/*! @brief Generates a struct constant containing the number of values contained within the enum. */ +#define NV_ENUM_GEN_COUNT(dat__, entries) \ + typedef struct { char count[(0 + entries(dat__) 0)]; } NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __COUNT) ; + +/*! @brief Builds a portion of the expression calculating the number of enum values. */ +#define NV_ENUM_GEN_COUNT_FN(dat__, entry_name, value) \ + 1 + + + +/*! @brief Generates a group of struct constants containing the above generated values. */ +#define NV_ENUM_GEN_META(dat__, entries) \ + typedef struct { char lo[sizeof(((NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __LO) *) NULL)->lo)]; \ + char hi[NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __SIZE)];\ + char count[sizeof(((NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __COUNT) *) NULL)->count)]; \ + char size[NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __SIZE)]; \ + char bContiguous[(sizeof(((NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __COUNT) *) NULL)->count) == (NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __SIZE) - sizeof(((NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __LO) *) NULL)->lo) + 1)) + 1]; \ + } NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __META) ; + +/*! @brief unused. Nothing needs to be generated per-entry for this generator. */ +#define NV_ENUM_GEN_META_FN(dat__, entry_name, value) + +/*! @brief Generates a compile-time assertion. */ +#define NV_ENUM_GEN_ASSERT_MONOTONIC(dat__, entries) \ + typedef char NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), _assert_monotonic)[ (2 * ((0 > entries(dat__) ((NvU32)-1)) == 0)) - 1 ]; + +/*! + * @brief Builds a portion of the expression asserting that all enum values + * must be declared in strictly monotonically increasing order. + */ +#define NV_ENUM_GEN_ASSERT_MONOTONIC_FN(dat__, entry_name, value) \ + value) + (value > + + +/*! @brief Generates a compile-time assertion. */ +#define NV_ENUM_GEN_ASSERT_IN_RANGE(dat__, entries) \ + typedef char NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), _assert_in_range)[ (2 * ((1 * entries(dat__) 1) == 1)) - 1 ]; + +/*! + * @brief Builds a portion of the expression asserting that all nested enum + * values must be within the reserved range of their parent enum. + */ +#define NV_ENUM_GEN_ASSERT_IN_RANGE_FN(dat__, entry_name, value) \ + (((NvS64)value) >= NV_ENUM_DAT_MIN(dat__)) * (((NvS64)value) <= NV_ENUM_DAT_MAX(dat__)) * + + +/// +/// End of Generator Functions +/// + + +/*! + * @brief Performs code generation for the given generator function pair + * + * @note This function must be updated if supporting deeper nesting in the future + * + * @param fn1 Macro - Per-Enum Gen Fn + * @param fn2 Macro - Per-Entry Gen Fn + * @param enum_name Token - Root Enum Name + * @param prefix Token - Prefix + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_GENERATOR(fn1, fn2, enum_name, prefix, entries) \ + fn1(NV_ENUM_DEPTH_0(enum_name, prefix, fn1, fn2), entries) \ + entries(NV_ENUM_DEPTH_1(enum_name, prefix, fn1, fn2)) \ + entries(NV_ENUM_DEPTH_2(enum_name, prefix, fn1, fn2)) \ + +// +// Windows preprocessor crashes with "ran out of heap space" errors if the +// preproccessed output from a single macro gets too large, so skip the +// verification sanity asserts when running on windows to increase the size of +// representable enums +// +#if NVOS_IS_WINDOWS + +/*! + * @brief Generates an enum and associated metadata with the given enum name and prefix + * + * @param prefix Token - Prefix + * @param enum_name Token - Root Enum Name + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_DEF_PREFIX(prefix, enum_name, entries) \ + NV_ENUM_GEN_MAIN(NV_ENUM_DEPTH_0(enum_name, prefix, NV_ENUM_GEN_MAIN, NV_ENUM_GEN_MAIN_FN), entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_TYPEDEF, NV_ENUM_GEN_TYPEDEF_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_SIZE, NV_ENUM_GEN_SIZE_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_STRING, NV_ENUM_GEN_STRING_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_FROM, NV_ENUM_GEN_FROM_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_LO, NV_ENUM_GEN_LO_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_COUNT, NV_ENUM_GEN_COUNT_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_META, NV_ENUM_GEN_META_FN, enum_name, prefix, entries) \ + +#else + +/*! + * @brief Generates an enum and associated metadata with the given enum name and prefix + * + * @param prefix Token - Prefix + * @param enum_name Token - Root Enum Name + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_DEF_PREFIX(prefix, enum_name, entries) \ + NV_ENUM_GEN_MAIN(NV_ENUM_DEPTH_0(enum_name, prefix, NV_ENUM_GEN_MAIN, NV_ENUM_GEN_MAIN_FN), entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_TYPEDEF, NV_ENUM_GEN_TYPEDEF_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_SIZE, NV_ENUM_GEN_SIZE_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_STRING, NV_ENUM_GEN_STRING_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_FROM, NV_ENUM_GEN_FROM_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_LO, NV_ENUM_GEN_LO_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_COUNT, NV_ENUM_GEN_COUNT_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_META, NV_ENUM_GEN_META_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_ASSERT_MONOTONIC, NV_ENUM_GEN_ASSERT_MONOTONIC_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_ASSERT_IN_RANGE, NV_ENUM_GEN_ASSERT_IN_RANGE_FN, enum_name, prefix, entries) + + + +#endif // NVOS_IS_WINDOWS + +#define NV_ENUM_NOTHING + +/*! + * @brief Generates an enum and associated metadata with the given enum name + * + * @param prefix Token - Prefix + * @param enum_name Token - Root Enum Name + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_DEF(enum_name, entries) \ + NV_ENUM_DEF_PREFIX(NV_ENUM_NOTHING, enum_name, entries) + +/*! + * @brief Generates an exported enum with the given prefix + * + * @param prefix Token - Prefix + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_EXPORT_PREFIX(prefix, entries) \ + NV_ENUM_GEN_MAIN(NV_ENUM_DEPTH_0(NV_ENUM_NOTHING, prefix, NV_ENUM_GEN_MAIN, NV_ENUM_GEN_MAIN_FN), entries) + +/*! + * @brief Generates an exported enum + * + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_EXPORT(entries) \ + NV_ENUM_EXPORT_PREFIX( , entries) + + +/// +/// Runtime Utility Functions +/// + +/*! + * @brief Converrts an unsigned integer into an enum value, or returns error. + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer value belonging to given enum + * @param[out] pResult pointer - Optional pointer to enum, updated with value on success + * + * @return NV_OK if the value belongs to the enum + * NV_ERR_OUT_OF_RANGE otherwise + */ +#define NV_ENUM_FROM32(type, value, pResult) \ + (NV_ENUM_CONCATENATE(type, _FROM32)((value), (pResult))) + +/*! + * @brief Returns a string representation of the name of the given enum value + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer value belonging to given enum + * + * @return a string representing the given value + */ +#define NV_ENUM_TO_STRING(type, value) \ + (NV_ENUM_CONCATENATE(type, _TO_STRING)(value)) + +/*! + * @brief Queries whether the given value belongs to the given enum + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer to check + * + * @return NV_TRUE if the given value is valid + * NV_FALSE otherwise + */ +#define NV_ENUM_IS(type, value) \ + (NV_OK == NV_ENUM_FROM32(type, (value), NULL)) + +/*! + * @brief Queries the value of the smallest enum entry + * + * @param[in] type identifier - Enum type name + */ +#define NV_ENUM_LO(type) \ + ((type)(sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->lo) - 1)) + +/*! + * @brief Queries the value of the largest enum entry + * + * @param[in] type identifier - Enum type name + */ +#define NV_ENUM_HI(type) \ + ((type)(sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->hi) - 1)) + +/*! + * @brief Queries the number of values between the first and last enum entries + * @note This value is large enough to use in an array declaration with enum + * entries used as indices into the array. + * + * @param[in] type identifier - Enum type name + */ +#define NV_ENUM_SIZE(type) \ + (sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->size)) + +/*! + * @brief Queries the number of values defined by the enum + * + * @param[in] type identifier - Enum type name + */ +#define NV_ENUM_COUNT(type) \ + (sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->count)) + +/*! + * @brief Queries whether or not the enum is defined contiguously (i.e. no holes) + * + * @param[in] type identifier - Enum type name + * + * @return NV_TRUE if each value between the lo and hi enum values are valid enum values + */ +#define NV_ENUM_IS_CONTIGUOUS(type) \ + ((NvBool)(sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->bContiguous) - 1)) + +/*! + * @brief Macros providing iteration over each value defined by the enum type + * @note Iteration is faster over contiguous enums + * + * @param[in] type identifier - Enum type name + * @param[in] value lvalue - iterator holding current enum value + */ +#define FOR_EACH_IN_ENUM(type, value) \ +{ \ + NvU32 localValue; \ + for (localValue = value = NV_ENUM_LO(type); localValue <= NV_ENUM_HI(type); (value) = (type) (++localValue)) \ + { \ + if (!NV_ENUM_IS_CONTIGUOUS(type) && !NV_ENUM_IS(type, localValue)) \ + continue; + +#define FOR_EACH_IN_ENUM_END \ + } \ +} + +/*! + * @brief Given the Nth enum value defined by the enum type, returns N + * @note Only supports contiguous enums + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer value belonging to enum type + * + * @return the index at which the enum value was defined within the enum, or -1 + */ +#define NV_ENUM_TO_IDX(type, value) \ + ((NV_ENUM_IS_CONTIGUOUS(type) && NV_ENUM_IS(type, value)) ? ((value) - NV_ENUM_LO(type)) : ((NvU32)-1)) + +/*! + * @brief Returns the Nth enum value defined by the given type + * @note Only supports contiguous enums + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer specifying entry index + * + * @return The Nth enum value defined within the enum, or NV_ENUM_SIZE(type) if non-existent + */ +#define NV_ENUM_FROM_IDX(type, idx) \ + ((type)((NV_ENUM_IS_CONTIGUOUS(type) && idx < NV_ENUM_COUNT(type)) ? (NV_ENUM_LO(type) + (idx)) : NV_ENUM_SIZE(type))) + +/// +/// End of Runtime Utility Functions +/// + +///@} +/// NV_UTILS_ENUM + +#endif // NV_ENUM_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvassert.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvassert.h new file mode 100644 index 0000000..b575373 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvassert.h @@ -0,0 +1,970 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief Utility assertion macros + * + * @see "NV_ASSERT" confluence page for additional documentation + */ + +#ifndef _NV_UTILS_ASSERT_H_ +#define _NV_UTILS_ASSERT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup NV_UTILS_ASSERT Utility Assertion Macros + * + * @brief Provides a light abstraction layer for common assert macro patterns. + * + * NvPort and NvPrintf are used for debug and logging primitives. + * If an environment cannot use these directly then it can override + * the NV_PORT_HEADER and NV_PRINTF_HEADER defines in its makefile + * to point to appropriate replacements. + * @{ + */ +#include "nvstatus.h" +#include "nvmacro.h" + +// Include portability header, falling back to NvPort if not provided. +#ifndef NV_PORT_HEADER +#define NV_PORT_HEADER "nvport/nvport.h" +#endif +#include NV_PORT_HEADER + +// Include printf header, falling back to NvPrintf if not provided. +#ifndef NV_PRINTF_HEADER +#define NV_PRINTF_HEADER "utils/nvprintf.h" +#endif +#include NV_PRINTF_HEADER + +/* + * Use __builtin_expect to improve branch predictions on the GNU compiler. + * + * Note that these macros convert the parameter to bool. They should + * only be used in 'if' statements. + * + * '!= 0' is used (instead of a cast to NvBool or !!) to avoid 'will always + * evaluate as 'true'' warnings in some gcc versions. + */ +#if defined(__GNUC__) && __GNUC__ >= 3 +#define NV_LIKELY(expr) __builtin_expect(((expr) != 0), 1) +#define NV_UNLIKELY(expr) __builtin_expect(((expr) != 0), 0) +#else +#define NV_LIKELY(expr) ((expr) != 0) +#define NV_UNLIKELY(expr) ((expr) != 0) +#endif + +/* + * Set this to pass expression, function name, file name, and line number + * to the nvAssertFailed functions. + * + * NOTE: NV_PRINTF_STRINGS_ALLOWED defaults to: + * defined(DEBUG) || defined(NV_MODS) || defined(QA_BUILD) + * + * RM_ASSERT used this condition to decide whether to print assert strings: + * defined(DEBUG) || defined(ASSERT_BUILD) || defined(QA_BUILD) + */ +#if !defined(NV_ASSERT_FAILED_USES_STRINGS) +#if (NV_PRINTF_STRINGS_ALLOWED && (defined(DEBUG) || defined(ASSERT_BUILD) || defined(QA_BUILD))) +#define NV_ASSERT_FAILED_USES_STRINGS 1 +#else +#define NV_ASSERT_FAILED_USES_STRINGS 0 +#endif +#endif + +// Hook NV_ASSERT into RCDB. +#if !defined(NV_JOURNAL_ASSERT_ENABLE) +#if defined(NVRM) && (NVOS_IS_WINDOWS || NVOS_IS_UNIX || NVCPU_IS_RISCV64) && !defined(NVWATCH) && !defined(NV_MODS) +#define NV_JOURNAL_ASSERT_ENABLE 1 +#else +#define NV_JOURNAL_ASSERT_ENABLE 0 +#endif +#endif + +#if !defined(COVERITY_ASSERT_FAIL) +#if defined(__COVERITY__) +void __coverity_panic__(void); +#define COVERITY_ASSERT_FAIL() __coverity_panic__() +#else // defined(__COVERITY__) +#define COVERITY_ASSERT_FAIL() ((void) 0) +#endif // defined(__COVERITY__) +#endif // !defined(COVERITY_ASSERT_FAIL) + +const char *nvAssertStatusToString(NV_STATUS nvStatusIn); + +/* + * NV_ASSERT_FAILED, NV_ASSERT_OK_FAILED, NV_CHECK_FAILED, and NV_CHECK_OK_FAILED + * These macros are defined in three flavors: + * + * normal - expr/file/line are concatenated with format string for NVLOG. + * expr/file/line are passed in as parameters to a helper function + * for NV_PRINTF. + * + * normal for GSP-RM - expr/file/line are omitted, since each NV_PRINTF line + * already has them. NVLOG is not used. + * + * _FUNC - expr/file/line are passed in as parameters to a helper function + * for both NVLOG and NV_PRINTF. + * The _FUNC macros are used for pre-compiled headers on most platforms. + */ +#if defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) + +void nvAssertInit(void); +void nvAssertDestroy(void); + +#if NV_JOURNAL_ASSERT_ENABLE +void nvAssertFailed(void); +void nvAssertOkFailed(NvU32 status); +#else +#define nvAssertFailed(...) +#define nvAssertOkFailed(...) +#endif + +#define NV_ASSERT_FAILED(exprStr) \ + do { \ + NV_PRINTF(LEVEL_ERROR, "Assertion failed: " exprStr "\n"); \ + nvAssertFailed(); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT(); \ + } while(0) + +#define NV_ASSERT_OK_FAILED(exprStr, status) \ + do { \ + NV_PRINTF(LEVEL_ERROR, "Assertion failed: %s (0x%08X) returned from " \ + exprStr "\n", nvAssertStatusToString(status), status); \ + nvAssertOkFailed(status); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT(); \ + } while(0) + +#define NV_CHECK_FAILED(level, exprStr) \ + NV_PRINTF(level, "Check failed: " exprStr "\n") + +#define NV_CHECK_OK_FAILED(level, exprStr, status) \ + NV_PRINTF(level, "Check failed: %s (0x%08X) returned from " exprStr "\n", \ + nvAssertStatusToString(status), status) + +#else // defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) + +#if NV_ASSERT_FAILED_USES_STRINGS +#define NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr) , exprStr, __FILE__, __LINE__ +#define NV_ASSERT_FAILED_FUNC_PARAM(exprStr) exprStr, __FILE__, __LINE__ +#define NV_ASSERT_FAILED_FUNC_COMMA_TYPE ,const char *pszExpr, const char *pszFileName, NvU32 lineNum +#define NV_ASSERT_FAILED_FUNC_TYPE const char *pszExpr, const char *pszFileName, NvU32 lineNum +#else +#define NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr) , __LINE__ +#define NV_ASSERT_FAILED_FUNC_PARAM(exprStr) __LINE__ +#define NV_ASSERT_FAILED_FUNC_COMMA_TYPE , NvU32 lineNum +#define NV_ASSERT_FAILED_FUNC_TYPE NvU32 lineNum +#endif + +void nvAssertInit(void); +void nvAssertDestroy(void); + +// Helper function prototypes for _FAILED macros below. +#if NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE +void nvAssertFailed(NV_ASSERT_FAILED_FUNC_TYPE); +void nvAssertOkFailed(NvU32 status NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvCheckFailed(NvU32 level NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvCheckOkFailed(NvU32 level, NvU32 status NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvAssertFailedNoLog(NV_ASSERT_FAILED_FUNC_TYPE); +void nvAssertOkFailedNoLog(NvU32 status NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvCheckFailedNoLog(NvU32 level NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvCheckOkFailedNoLog(NvU32 level, NvU32 status NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +#else // NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE +#define nvAssertFailed(...) +#define nvAssertOkFailed(...) +#define nvCheckFailed(...) +#define nvCheckOkFailed(...) +#define nvAssertFailedNoLog(...) +#define nvAssertOkFailedNoLog(...) +#define nvCheckFailedNoLog(...) +#define nvCheckOkFailedNoLog(...) +#endif // NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE + +#define NV_ASSERT_LOG(level, fmt, ...) \ + NVLOG_PRINTF(NV_PRINTF_MODULE, NVLOG_ROUTE_RM, level, \ + NV_PRINTF_ADD_PREFIX(fmt), ##__VA_ARGS__) + +#define NV_ASSERT_FAILED(exprStr) \ + do { \ + NV_ASSERT_LOG(LEVEL_ERROR, "Assertion failed: " exprStr); \ + nvAssertFailedNoLog(NV_ASSERT_FAILED_FUNC_PARAM(exprStr)); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT_CHECKED(); \ + } while(0) + +#define NV_ASSERT_OK_FAILED(exprStr, status) \ + do { \ + NV_ASSERT_LOG(LEVEL_ERROR, "Assertion failed: 0x%08X returned from " \ + exprStr, status); \ + nvAssertOkFailedNoLog(status \ + NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT_CHECKED(); \ + } while(0) + +#define NV_CHECK_FAILED(level, exprStr) \ + do { \ + NV_ASSERT_LOG(level, "Check failed: " exprStr); \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + nvCheckFailedNoLog(level \ + NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)); \ + } \ + } while(0) + +#define NV_CHECK_OK_FAILED(level, exprStr, status) \ + do { \ + NV_ASSERT_LOG(level, "Check failed: 0x%08X returned from " \ + exprStr, status); \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + nvCheckOkFailedNoLog(level, status \ + NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)); \ + } \ + } while(0) + +#define NV_ASSERT_FAILED_FUNC(exprStr) \ + do { \ + nvAssertFailed(NV_ASSERT_FAILED_FUNC_PARAM(exprStr)); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT_CHECKED(); \ + } while(0) + +#define NV_ASSERT_OK_FAILED_FUNC(exprStr, status) \ + do { \ + nvAssertOkFail(status NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT_CHECKED(); \ + } while(0) + +#define NV_CHECK_FAILED_FUNC(level, exprStr) \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + nvCheckFailed(level NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)) \ + } + +#define NV_CHECK_OK_FAILED_FUNC(level, exprStr, status) \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + nvCheckOkFailed(level, status \ + NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)) \ + } + +#endif // defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) + +/* + * Defines for precompiled headers. + * + * On platforms other than GSP-RM, the _INLINE macros cannot be used inside + * precompiled headers due to conflicting NVLOG_PRINT_IDs. + */ +#if defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) +#define NV_ASSERT_FAILED_PRECOMP NV_ASSERT_FAILED +#else +#define NV_ASSERT_FAILED_PRECOMP NV_ASSERT_FAILED_FUNC +#endif + +// ***************************************************************************** +// * NV_ASSERT family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Assert that an expression is true. If not, do the actions defined + * in NV_ASSERT_FAILED as well as an "other action": + * Print an error message in the debug output + * Log an error message in NvLog + * Mark as an error condition for coverity + * Breakpoint + * Log an assert record to the journal + * "Other action" as defined by each macro below. + * + * The actions are enabled or omitted based on platform and build, and the + * implementations are platform dependent. + * + * The expression is always evaluated even if assertion failures are not logged + * in the environment. Use @ref NV_ASSERT_CHECKED if the expression should only + * be evaluated in checked builds. + * + * USE GENEROUSLY FOR any condition that requires immediate action from NVIDIA. + * Expect to be ARBed on bugs when an assert you added shows up internally + * or in the field. + * + * DO NOT USE for normal run-time conditions, such as a user application + * passing a bad parameter. + */ + +/** + * Assert that an expression is true. + * + * @param[in] expr Expression that evaluates to a truth value. + */ +#define NV_ASSERT(expr) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, /* no other action */) + +/** + * Assert that an expression is true only in checked builds. + * + * @note The expression is only evaluated in checked builds so should + * not contain required side-effects. + * Also to prevent side effects, no "other action" is permitted. + * + * @param[in] expr Expression that evaluates to a truth value. + */ +#if PORT_IS_CHECKED_BUILD +#define NV_ASSERT_CHECKED(expr) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, /* no other action */) +#define NV_ASSERT_CHECKED_PRECOMP(expr) \ + NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, #expr, /* no other action */) +#else +#define NV_ASSERT_CHECKED(expr) ((void)0) +#define NV_ASSERT_CHECKED_PRECOMP(expr) ((void)0) +#endif + +/** + * Assert that an expression is true or else do something. + * + * This macro can't use NV_ASSERT_OR_ELSE_STR when NV_PRINTF is passed in as + * the elseStmt parameter. + * + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_ASSERT_OR_ELSE(expr, elseStmt) \ + if (1) \ + { \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_ASSERT_FAILED(#expr); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } else ((void) 0) + +/** + * Assert that an expression is true or else goto a label. + * + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] label Label to jump to when the expression is false. + */ +#define NV_ASSERT_OR_GOTO(expr, label) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, goto label) + +/** + * Assert that an expression is true or else return a value. + * + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] retval Value to return if the expression is false. + */ +#define NV_ASSERT_OR_RETURN(expr, retval) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, return (retval)) + +/** + * Assert that an expression is true or else return void. + * + * @param[in] expr Expression that evaluates to a truth value. + */ +#define NV_ASSERT_OR_RETURN_VOID(expr) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, return) + +/** + * Assert that an expression is true or else do something. + * + * Although it can be used directly, this second level macro is designed to be + * called from other macros. Passing expr through multiple levels of macros + * before it is stringified expands it. This is especially bad for DRF macros, + * which result in an embedded %, breaking the format string in the + * NV_ASSERT_FAILED_INLINE macro defined above. The macros in this header + * always pass the stringified expr as a into the second level macros as + * a separate parameter. + * + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] exprStr Expression as a string. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_ASSERT_OR_ELSE_STR(expr, exprStr, elseStmt) \ + do \ + { \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_ASSERT_FAILED(exprStr); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } while (0) + +// ***************************************************************************** +// * NV_ASSERT_OK family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Assert that an expression evaluates to NV_OK. If not, do the actions defined + * in NV_ASSERT_OK_FAILED as well as an "other action": + * Print an error message in the debug output, including decoded NV_STATUS. + * Log an error message in NvLog. + * Mark as an error condition for coverity. + * Breakpoint. + * Log an assert record to the journal. + * "Other action" as defined by each macro below. + * + * The actions are enabled or omitted based on platform and build, and the + * implementations are platform dependent. + * + * The expression is always evaluated even if assertion failures are not logged + * in the environment. Use @ref NV_ASSERT_OK_CHECKED if the expression should + * only be evaluated in checked builds. + * + * USE GENEROUSLY FOR any condition that requires immediate action from NVIDIA. + * Expect to be ARBed on bugs when an assert you added shows up internally + * or in the field. + * + * DO NOT USE for normal run-time conditions, such as a user application + * passing a bad parameter. + */ + +/** + * Assert that an expression evaluates to NV_OK. + * + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_ASSERT_OK(expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_ASSERT_OK_OR_ELSE_STR(rm_pvt_status, expr, #expr, \ + /* no other action */); \ + } while(0) + +/** + * Assert that an expression evaluates to NV_OK only in checked builds. + * + * @note The expression is only evaluated in checked builds so should + * not contain required side-effects. + * Also to prevent side effects, no "other action" is permitted, + * and the status parameter is omitted. + * + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#if PORT_IS_CHECKED_BUILD +#define NV_ASSERT_OK_CHECKED(expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_ASSERT_OK_OR_ELSE_STR(rm_pvt_status, expr, #expr, \ + return rm_pvt_status); \ + } while(0) +#else +#define NV_ASSERT_OK_CHECKED(expr) ((void)0) +#endif + +/*! + * Call a function that returns NV_STATUS and assert that the + * return values is NV_OK. In case this was a first failure + * update global status @ref status. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_ASSERT_OK_OR_ELSE_STR(rm_pvt_status, expr, #expr, \ + if (status == NV_OK) status = rm_pvt_status); \ + } while (0) + +/** + * Assert that an expression evaluates to NV_OK or else do something. + * + * This macro can't use NV_ASSERT_OK_OR_ELSE_STR when NV_PRINTF is passed in as + * the elseStmt parameter. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_ASSERT_OK_OR_ELSE(status, expr, elseStmt) \ + do \ + { \ + status = (expr); \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(NV_OK != status)) \ + { \ + NV_ASSERT_OK_FAILED(#expr, status); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } while(0) + +/** + * Assert that an expression evaluates to NV_OK or else goto a label. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] label Label to jump to when the expression is false. +*/ +#define NV_ASSERT_OK_OR_GOTO(status, expr, label) \ + NV_ASSERT_OK_OR_ELSE_STR(status, expr, #expr, goto label); + +/** + * Assert that an expression evaluates to NV_TRUE or else goto a label. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] cond Condition that evaluates to either NV_TRUE or NV_FALSE. + * @param[in] error Error to be reflected in @ref status when @cond evaluates + to NV_FALSE. + * @param[in] label Label to jump to when @ref cond evaluates to NV_FALSE. +*/ +#define NV_ASSERT_TRUE_OR_GOTO(status, cond, error, label) \ + NV_ASSERT_OK_OR_ELSE_STR(status, ((cond) ? NV_OK : (error)), \ + #cond, goto label); + +/** + * Assert that an expression evaluates to NV_OK or else return the status. + * + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_ASSERT_OK_OR_RETURN(expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_ASSERT_OK_OR_ELSE_STR(rm_pvt_status, expr, #expr, \ + return rm_pvt_status); \ + } while(0) + +/** + * Assert that an expression evaluates to NV_OK or else do something. + * + * Although it can be used directly, this second level macro is designed to be + * called from other macros. Passing expr through multiple levels of macros + * before it is stringified expands it. This is especially bad for DRF macros, + * which result in an embedded %, breaking the format string in the + * NV_ASSERT_OK_FAILED_INLINE macro defined above. The macros in this header + * always pass the stringified expr as a into the second level macros as + * a separate parameter. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] exprStr Expression as a string. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_ASSERT_OK_OR_ELSE_STR(status, expr, exprStr, elseStmt) \ + do \ + { \ + status = (expr); \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(NV_OK != status)) \ + { \ + NV_ASSERT_OK_FAILED(exprStr, status); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } while(0) + +// ***************************************************************************** +// * NV_CHECK family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Check that an expression is true. If not, do the following actions: + * Print a message in the debug output at user specified level. + * Log a message in NvLog at user specified level. + * "Other action" as defined by each macro below. + * + * The actions are enabled or omitted based on platform and build, and the + * implementations are platform dependent. + * + * The expression is always evaluated even if check failures are not logged + * in the environment. Use @ref NV_CHECK_CHECKED if the expression should only + * be evaluated in checked builds. + * + * USE FOR error conditions that DO NOT require immediate action from NVIDIA, + * but may be useful in diagnosing other issues. + */ + +/** + * Check that an expression is true. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + */ +#define NV_CHECK(level, expr) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, /* no other action */) + +/** + * Check that an expression is true only in checked builds. + * + * @note The expression is only evaluated in checked builds so should + * not contain required side-effects. + * Also to prevent side effects, no "other action" is permitted. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + */ +#if PORT_IS_CHECKED_BUILD +#define NV_CHECK_CHECKED(level, expr) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, /* no other action */) +#else +#define NV_CHECK_CHECKED(level, expr) ((void)0) +#endif + +/** + * Check that an expression is true or else do something. + * + * This macro can't use NV_CHECK_OR_ELSE_STR when NV_PRINTF is passed in as + * the elseStmt parameter. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_CHECK_OR_ELSE(level, expr, elseStmt) \ + do \ + { \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_CHECK_FAILED(level, #expr); \ + elseStmt; \ + } \ + } while (0) + +/** + * Check that an expression is true or else goto a label. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] label Label to jump to when the expression is false. + */ +#define NV_CHECK_OR_GOTO(level, expr, label) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, goto label) + +/** + * Check that an expression is true or else return a value. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] retval Value to return if the expression is false. + */ +#define NV_CHECK_OR_RETURN(level, expr, retval) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, return (retval)) + +/** + * Check that an expression is true or else return void. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + */ +#define NV_CHECK_OR_RETURN_VOID(level, expr) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, return) + +/** + * Check that an expression is true or else do something. + * + * Although it can be used directly, this second level macro is designed to be + * called from other macros. Passing expr through multiple levels of macros + * before it is stringified expands it. This is especially bad for DRF macros, + * which result in an embedded %, breaking the format string in the + * NV_CHECK_FAILED_INLINE macro defined above. The macros in this header + * always pass the stringified expr as a into the second level macros as + * a separate parameter. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] exprStr Expression as a string. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_CHECK_OR_ELSE_STR(level, expr, exprStr, elseStmt) \ + do \ + { \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_CHECK_FAILED(level, exprStr); \ + elseStmt; \ + } \ + } while (0) + + +// ***************************************************************************** +// * NV_CHECK_OK family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Check that an expression evaluates to NV_OK. If not, do the following actions: + * Print a message in the debug output at user specified. + * Log a message in NvLog at user specified level. + * "Other action" as defined by each macro below. + * + * The actions are enabled or omitted based on platform and build, and the + * implementations are platform dependent. + * + * The expression is always evaluated even if assertion failures are not logged + * in the environment. Use @ref NV_ASSERT_OK_CHECKED if the expression should + * only be evaluated in checked builds. + * + * USE FOR error conditions that DO NOT require immediate action from NVIDIA, + * but may be useful in diagnosing other issues. + */ + +/** + * Check that an expression evaluates to NV_OK. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_CHECK_OK(status, level, expr) \ + do \ + { \ + NV_CHECK_OK_OR_ELSE_STR(status, level, expr, #expr, \ + /* no other action */); \ + } while(0) + +/** + * Check that an expression evaluates to NV_OK only in checked builds. + * + * @note The expression is only evaluated in checked builds so should + * not contain required side-effects. + * Also to prevent side effects, no "other action" is permitted, + * and the status parameter is omitted. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#if PORT_IS_CHECKED_BUILD +#define NV_CHECK_OK_CHECKED(level, expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_CHECK_OK_OR_ELSE_STR(rm_pvt_status, level, expr, #expr, \ + /* no other action */); \ + } while(0) +#else +#define NV_CHECK_OK_CHECKED(level, expr) ((void)0) +#endif + +/*! + * Call a function that returns NV_STATUS and check that the return values is + * NV_OK. If an error is returned, record the error code. In case this was a + * first failure update global status @ref status. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, level, expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_CHECK_OK_OR_ELSE_STR(rm_pvt_status, level, expr, #expr, \ + if (status == NV_OK) status = rm_pvt_status); \ + } while (0) + +/** + * Check that an expression evaluates to NV_OK or else do something. + * + * This macro can't use NV_CHECK_OK_OR_ELSE_STR when NV_PRINTF is passed in as + * the elseStmt parameter. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] elseStmt Statement to evaluate if the expression returns error. + */ +#define NV_CHECK_OK_OR_ELSE(status, level, expr, elseStmt) \ + do \ + { \ + status = (expr); \ + if (NV_UNLIKELY(NV_OK != status)) \ + { \ + NV_CHECK_OK_FAILED(level, #expr, status); \ + elseStmt; \ + } \ + } while (0) + +/** + * Check that an expression evaluates to NV_OK or else goto a label. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] label Label to jump to when the expression returns error. + */ +#define NV_CHECK_OK_OR_GOTO(status, level, expr, label) \ + NV_CHECK_OK_OR_ELSE_STR(status, level, expr, #expr, goto label) + +/** + * Check that an expression evaluates to NV_OK or return the status. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_CHECK_OK_OR_RETURN(level, expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_CHECK_OK_OR_ELSE_STR(rm_pvt_status, level, expr, #expr, \ + return rm_pvt_status); \ + } while(0) + + +/** + * Check that an expression evaluates to NV_OK or else record the error code and + * do something. + * + * Although it can be used directly, this second level macro is designed to be + * called from other macros. Passing expr through multiple levels of macros + * before it is stringified expands it. This is especially bad for DRF macros, + * which result in an embedded %, breaking the format string in the + * NV_CHECK_OK_FAILED_INLINE macro defined above. The macros in this header + * always pass the stringified expr as a into the second level macros as + * a separate parameter. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] exprStr Expression as a string. + * @param[in] elseStmt Statement to evaluate if the expression returns error. + */ +#define NV_CHECK_OK_OR_ELSE_STR(status, level, expr, exprStr, elseStmt) \ + do \ + { \ + status = (expr); \ + if (NV_UNLIKELY(NV_OK != status)) \ + { \ + NV_CHECK_OK_FAILED(level, exprStr, status); \ + elseStmt; \ + } \ + } while (0) + + +// ***************************************************************************** +// * NV_ASSERT_PRECOMP family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Exactly the same as the NV_ASSERT macros, but always safe to use in + * precompiled headers. + * + * On platforms other than GSP-RM, the _INLINE macros cannot be used inside + * precompiled headers due to conflicting NVLOG_PRINT_IDs. The PRECOMP macros + * work around this issue by calling helper functions for NvLog. + * + * Hoping for a better solution, only the macro variants that are currently + * used in precompiled headers are defined. + * + * See the NV_ASSERT documentation above for parameters and use cases. + */ +#define NV_ASSERT_PRECOMP(expr) \ + NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, #expr, /* no other action */) + +#define NV_ASSERT_OR_RETURN_PRECOMP(expr, retval) \ + NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, #expr, return (retval)) + +#define NV_ASSERT_OR_RETURN_VOID_PRECOMP(expr) \ + NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, #expr, return) + +#define NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, exprStr, elseStmt) \ + do \ + { \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_ASSERT_FAILED_PRECOMP(exprStr); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } while (0) + +/** + * @def NV_CHECKED_ONLY + * @brief Compile a piece of code only in checked builds. + * + * This macro helps avoid #ifdefs to improve readability but should be + * used sparingly. + * + * Cases that make heavy use of this should likely define a wrapper + * macro or other abstraction for the build variation. + * For example NV_CHECKED_ONLY(NV_PRINTF(...)) is not a good use case. + * + * A typical use case is declaring and setting a canary value: + * ~~~{.c} + * typedef struct + * { + * NV_CHECKED_ONLY(NvU32 canary); + * ... + * } MY_STRUCT; + * + * void initMyStruct(MY_STRUCT *pStruct) + * { + * NV_CHECKED_ONLY(pStruct->canaray = 0xDEADBEEF); + * ... + * } + * + * void destroyMyStruct(MY_STRUCT *pStruct) + * { + * NV_ASSERT_CHECKED(pStruct->canaray == 0xDEADBEEF); + * ... + * } + * ~~~ + */ +#if PORT_IS_CHECKED_BUILD +#define NV_CHECKED_ONLY NV_EXPAND +#else +#define NV_CHECKED_ONLY NV_DISCARD +#endif + +// Verify prerequisites are defined. +#ifndef PORT_IS_CHECKED_BUILD +#error "NV_PORT_HEADER must define PORT_IS_CHECKED_BUILD" +#endif +#ifndef PORT_BREAKPOINT_CHECKED +#error "NV_PORT_HEADER must define PORT_BREAKPOINT_CHECKED" +#endif +#ifndef PORT_COVERAGE_PUSH_OFF +#error "NV_PORT_HEADER must define PORT_COVERAGE_PUSH_OFF" +#endif +#ifndef PORT_COVERAGE_POP +#error "NV_PORT_HEADER must define PORT_COVERAGE_POP" +#endif +#ifndef NV_PRINTF +#error "NV_PRINTF_HEADER must define NV_PRINTF" +#endif + + +#ifdef __cplusplus +} +#endif //__cplusplus +/// @} +#endif // _NV_UTILS_ASSERT_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvbitvector.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvbitvector.h new file mode 100644 index 0000000..fccd288 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvbitvector.h @@ -0,0 +1,476 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_UTILS_NV_BITVECTOR_H_ +#define _NV_UTILS_NV_BITVECTOR_H_ + +#include "nvport/nvport.h" +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvmisc.h" +#include "utils/nvassert.h" +#include "utils/nvrange.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// +// Note: This will need to be recalculated if the data size changes +// IDX(i) = (index & ~(MASK(num bits)) >> log2(num bits) +// +#define NV_BITVECTOR_IDX(index) (((index) & ~(0x3F)) >> 6) +#define NV_BITVECTOR_ARRAY_SIZE(last) (NV_BITVECTOR_IDX((last) - 1) + 1) +#define NV_BITVECTOR_BYTE_SIZE(last) (NV_BITVECTOR_ARRAY_SIZE((last)) * sizeof(NvU64)) +#define NV_BITVECTOR_OFFSET(index) ((index) & ((sizeof(NvU64) * 8) - 1)) + +/** + * \anchor NV_BITVECTOR_1 + * @defgroup NV_BITVECTOR NV_BITVECTOR + * + * @brief NV_BITVECTOR is a collection of individual consecutive bit flags + * packed within an array of 64-bit integers. Each derivative of the + * NV_BITVECTOR type may specify the number of queryable flags, and the + * array will be sized according to the minimum number of 64-bit integers + * required to hold the flags. + * + * @details NV_BITVECTOR is a general purpose data structure utility. + * It consists of a single (real) field, named \b qword. + * Flags within a NV_BITVECTOR are represented beginning with the LSB of + * index 0 of \b qword, and are packed fully within a single qword + * before expanding into a new qword. Derivatives of NV_BITVECTOR must + * provide a type name for the new type, and the first index outside of + * the range of the new type (this value must be greater than 0.) A + * bitvector with bits 63 and 64 raised is represented in memory in a + * little-endian system as follows: + * + * 63 NV_BITVECTOR_OFFSET(i) 0 + * .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-. + * 0 |1 | + * 1 | 1| + * `-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' + * + * Thus, in order to conceptually model an NV_BITVECTOR horizontally as + * a continual ordered list of bits, one would have to write the + * bitvector from highest index to lowest, and read from right to left. + * + * @note The unused bits within a derivative type of NV_BITVECTOR are reserved, + * and must not be depended upon to contain any consistent value. + * + * @{ + */ +typedef struct NV_BITVECTOR NV_BITVECTOR; +struct NV_BITVECTOR +{ + NvU64 qword; +}; + +#define TYPEDEF_BITVECTOR(bitvectTypeName) \ + union bitvectTypeName; \ + typedef union bitvectTypeName bitvectTypeName; \ + +#define IMPL_BITVECTOR(bitvectTypeName, last_val) \ + union bitvectTypeName \ + { \ + NV_BITVECTOR real; \ + NvU64 qword[NV_BITVECTOR_ARRAY_SIZE(last_val)]; \ + struct \ + { \ + char _[last_val]; \ + char asrt[1 - 2 * !(last_val > 0)]; \ + } *last; \ + } + +#define MAKE_BITVECTOR(bitvectTypeName, last_val) \ + TYPEDEF_BITVECTOR(bitvectTypeName) \ + IMPL_BITVECTOR(bitvectTypeName, last_val) + +#define MAKE_ANON_BITVECTOR(last_val) \ + IMPL_BITVECTOR( , last_val) + +#define bitVectorSizeOf(pBitVector) \ + bitVectorSizeOf_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorClrAll(pBitVector) \ + bitVectorClrAll_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorClr(pBitVector, idx) \ + bitVectorClr_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), (idx)) + +#define bitVectorClrRange(pBitVector, range) \ + bitVectorClrRange_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), (range)) + +#define bitVectorSetAll(pBitVector) \ + bitVectorSetAll_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorSet(pBitVector, idx) \ + bitVectorSet_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), (idx)) + +#define bitVectorSetRange(pBitVector, range) \ + bitVectorSetRange_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), (range)) + +#define bitVectorFromArrayU16(pBitVector, pArr, sz) \ + bitVectorFromArrayU16_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + (pArr), \ + (sz)) + +#define bitVectorTestAllSet(pBitVector) \ + bitVectorTestAllSet_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorTestAllCleared(pBitVector) \ + bitVectorTestAllCleared_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorTestEqual(pBitVectorA, pBitVectorB) \ + bitVectorTestEqual_IMPL(&((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorTestIsSubset(pBitVectorA, pBitVectorB) \ + bitVectorTestIsSubset_IMPL(&((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorTest(pBitVector, idx) \ + bitVectorTest_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + (idx)) + +#define bitVectorAnd(pBitVectorDst, pBitVectorA, pBitVectorB) \ + bitVectorAnd_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorOr(pBitVectorDst, pBitVectorA, pBitVectorB) \ + bitVectorOr_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorXor(pBitVectorDst, pBitVectorA, pBitVectorB) \ + bitVectorXor_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorComplement(pBitVectorDst, pBitVectorSrc) \ + bitVectorComplement_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorSrc)->real), \ + sizeof(((pBitVectorSrc)->last->_))) + +#define bitVectorCopy(pBitVectorDst, pBitVectorSrc) \ + bitVectorCopy_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorSrc)->real), \ + sizeof(((pBitVectorSrc)->last->_))) + +#define bitVectorCountTrailingZeros(pBitVector) \ + bitVectorCountTrailingZeros_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorCountLeadingZeros(pBitVector) \ + bitVectorCountLeadingZeros_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorCountSetBits(pBitVector) \ + bitVectorCountSetBits_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorToRaw(pBitVector, pRawMask, rawMaskSize) \ + bitVectorToRaw_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + pRawMask, \ + rawMaskSize) + +#define bitVectorFromRaw(pBitVector, pRawMask, rawMaskSize) \ + bitVectorFromRaw_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + pRawMask, \ + rawMaskSize) + +#define FOR_EACH_IN_BITVECTOR(pBitVector, index) \ + { \ + MAKE_ANON_BITVECTOR(sizeof(((pBitVector)->last->_))) localMask; \ + bitVectorCopy(&localMask, (pBitVector)); \ + for ((index) = bitVectorCountTrailingZeros(&localMask); \ + !bitVectorTestAllCleared(&localMask); \ + bitVectorClr(&localMask, (index)), \ + (index) = bitVectorCountTrailingZeros(&localMask)) \ + { + +#define FOR_EACH_IN_BITVECTOR_END() \ + } \ + } + +#define FOR_EACH_IN_BITVECTOR_PAIR(pBitVectorA, indexA, pBitVectorB, indexB)\ + { \ + MAKE_ANON_BITVECTOR(sizeof(((pBitVectorA)->last->_))) localMaskA; \ + bitVectorCopy(&localMaskA, (pBitVectorA)); \ + MAKE_ANON_BITVECTOR(sizeof(((pBitVectorB)->last->_))) localMaskB; \ + bitVectorCopy(&localMaskB, (pBitVectorB)); \ + for ((indexA) = bitVectorCountTrailingZeros(&localMaskA), \ + (indexB) = bitVectorCountTrailingZeros(&localMaskB); \ + !bitVectorTestAllCleared(&localMaskA) && \ + !bitVectorTestAllCleared(&localMaskB); \ + bitVectorClr(&localMaskA, (indexA)), \ + bitVectorClr(&localMaskB, (indexB)), \ + (indexA) = bitVectorCountTrailingZeros(&localMaskA), \ + (indexB) = bitVectorCountTrailingZeros(&localMaskB)) \ + { + +#define FOR_EACH_IN_BITVECTOR_PAIR_END() \ + } \ + } + +NvU32 +bitVectorSizeOf_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NV_STATUS +bitVectorClrAll_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NV_STATUS +bitVectorClr_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +); + +NV_STATUS +bitVectorClrRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +); + +NV_STATUS +bitVectorSetAll_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NV_STATUS +bitVectorSet_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +); + +NV_STATUS +bitVectorSetRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +); + +NV_STATUS +bitVectorInv_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +); + +NV_STATUS +bitVectorInvRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +); + +NV_STATUS +bitVectorFromArrayU16_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 *pIndices, + NvU32 indicesSize +); + +NvBool +bitVectorTestAllSet_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NvBool +bitVectorTestAllCleared_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NvBool +bitVectorTestEqual_IMPL +( + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NvBool +bitVectorTestIsSubset_IMPL +( + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NvBool +bitVectorTest_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +); + +NV_STATUS +bitVectorAnd_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NV_STATUS +bitVectorOr_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NV_STATUS +bitVectorXor_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NV_STATUS +bitVectorComplement_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast +); + +NV_STATUS +bitVectorCopy_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast +); + +NvU32 +bitVectorCountTrailingZeros_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NvU32 +bitVectorCountLeadingZeros_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NvU32 +bitVectorCountSetBits_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NV_STATUS +bitVectorToRaw_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + void *pRawMask, + NvU32 rawMaskize +); + +NV_STATUS +bitVectorFromRaw_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + const void *pRawMask, + NvU32 rawMaskSize +); + +#ifdef __cplusplus +} +#endif +///@} +/// NV_UTILS_BITVECTOR +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvmacro.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvmacro.h new file mode 100644 index 0000000..b6407ee --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvmacro.h @@ -0,0 +1,251 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief Standard utility macros for some more advanced CPP operations + */ + +#ifndef _NV_UTILS_MACRO_H_ +#define _NV_UTILS_MACRO_H_ + +/** + * @defgroup NV_UTILS_MACRO Standard utility Macros + * + * @brief Implements commonly used macros for advanced CPP operations + * + * @{ + */ + +/** + * @brief Expands all arguments + */ +#define NV_EXPAND(...) __VA_ARGS__ +/** + * @brief Discards all arguments + */ +#define NV_DISCARD(...) + +/** + * @brief Fully expands the given argument, then stringifies it. + */ +#define NV_STRINGIFY(s) _NV_STRINGIFY(s) +/** + * @brief Fully expands both arguments, then concatenates them. + */ +#define NV_CONCATENATE(a, b) _NV_CONCATENATE(a, b) + +/** + * @brief Returns a number literal corresponding to the number of arguments. + * + * NV_NUM_ARGS(x) expands to 1 + * NV_NUM_ARGS(x,y,z) expands to 3 + * + * @warning Due to differences in standards, it is impossible to make this + * consistently return 0 when called without arguments. Thus, the behavior of + * NV_NUM_ARGS() is undefined, and shouldn't be counted on. + * If you do decide to use it: It usually returns 0, except when -std=c++11. + * + * @note Works for a maximum of 16 arguments + */ +#define NV_NUM_ARGS(...) _NV_NUM_ARGS(unused, ##__VA_ARGS__, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#define _NV_NUM_ARGS(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, N, ...) N + +/** + * @brief Performs an operation on each of its arguments, except first + * + * @param what - Function or function-like macro that takes exactly one param. + * This will be called for other args: what(arg1), what(arg2), ... + * + * NV_FOREACH_ARG(foo, a, b, c) expands to: foo(a), foo(b), foo(c) + * #define INC(x) (x+1) + * NV_FOREACH_ARG(INC,0,1,2,3,4) expands to: (0+1), (1+1), (2+1), (3+1), (4+1) + * + * @note Works for a maximum of 16 arguments, not counting 'what' param + */ +#define NV_FOREACH_ARG(what, ...) \ + NV_CONCATENATE(_NV_FOREACH_ARG_, NV_NUM_ARGS(__VA_ARGS__)) (what, __VA_ARGS__) + +/** + * @brief Similar NV_FOREACH_ARG, but without comma in the expanded result + * + * @param what - Function or function-like macro that takes exactly one param. + * This will be called for other args: what(arg1) what(arg2) ... + * + * NV_FOREACH_ARG(foo, a, b, c) expands to: foo(a) foo(b) foo(c) + * #define OR(x) | (x) + * #define FLAGS(...) (0 NV_FOREACH_ARG_NOCOMMA(OR, __VA_ARGS__)) + * FLAGS(flag1, flag2, flag3) expands to: 0 | (flag1) | (flag2) | (flag3) + * + * @note Works for a maximum of 16 arguments, not counting 'what' param + */ +#define NV_FOREACH_ARG_NOCOMMA(what, ...) \ + NV_CONCATENATE(_NV_FOREACH_ARG_NOCOMMA_, NV_NUM_ARGS(__VA_ARGS__)) (what, __VA_ARGS__) + + +/** + * @brief Compile time evaluate a condition + * + * - If 'cond' evaluates to 1 at compile time, macro expands to 'then' + * - If 'cond' evaluates to 0 at compile time, macro expands to nothing + * - If 'cond' is undefined or evaluates to anything else, report a build error + */ +#define NV_STATIC_IF(cond, then) \ + NV_EXPAND(NV_CONCATENATE(NV_STATIC_IF_, NV_EXPAND(cond))) (then) + + +/** + * @brief Similar to @ref NV_STATIC_IF except condition is reversed + * + * - If 'cond' evaluates to 0 at compile time, macro expands to 'then' + * - If 'cond' evaluates to 1 at compile time, macro expands to nothing + * - If 'cond' is undefined or evaluates to anything else, report a build error + */ +#define NV_STATIC_IFNOT(cond, then) \ + NV_EXPAND(NV_CONCATENATE(NV_STATIC_IFNOT_, NV_EXPAND(cond))) (then) + + +/** + * @brief Similar to @ref NV_STATIC_IF except with both THEN and ELSE branches + * + * - If 'cond' evaluates to 1 at compile time, macro expands to 'then' + * - If 'cond' evaluates to 0 at compile time, macro expands to 'els' + * - If 'cond' is undefined or evaluates to anything else, report a build error + */ +#define NV_STATIC_IFELSE(cond, then, els) \ + NV_STATIC_IF(NV_EXPAND(cond), then) NV_STATIC_IFNOT(NV_EXPAND(cond), els) + +/// @} + +/// @cond NV_MACROS_IMPLEMENTATION + +#define _NV_STRINGIFY(s) #s +#define _NV_CONCATENATE(a, b) a##b + +#define NV_STATIC_IF_0(then) NV_DISCARD(then) +#define NV_STATIC_IF_1(then) NV_EXPAND(then) + +#define NV_STATIC_IFNOT_0(then) NV_EXPAND(then) +#define NV_STATIC_IFNOT_1(then) NV_DISCARD(then) + +// Iterating over empty list is unsupported. Give a semi-readable error. +#define _NV_FOREACH_ARG_0(X) NV_FOREACH_ERROR_argument_list_emtpy + +#define _NV_FOREACH_ARG_1(X, _1) \ + X(_1) +#define _NV_FOREACH_ARG_2(X, _1, _2) \ + X(_1), X(_2) +#define _NV_FOREACH_ARG_3(X, _1, _2, _3) \ + X(_1), X(_2), X(_3) +#define _NV_FOREACH_ARG_4(X, _1, _2, _3, _4) \ + X(_1), X(_2), X(_3), X(_4) +#define _NV_FOREACH_ARG_5(X, _1, _2, _3, _4, _5) \ + X(_1), X(_2), X(_3), X(_4), X(_5), +#define _NV_FOREACH_ARG_6(X, _1, _2, _3, _4, _5, _6) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6) +#define _NV_FOREACH_ARG_7(X, _1, _2, _3, _4, _5, _6, _7) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7) +#define _NV_FOREACH_ARG_8(X, _1, _2, _3, _4, _5, _6, _7, _8) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8) +#define _NV_FOREACH_ARG_9(X, _1, _2, _3, _4, _5, _6, _7, _8, _9) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9) +#define _NV_FOREACH_ARG_10(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10) +#define _NV_FOREACH_ARG_11(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11) +#define _NV_FOREACH_ARG_12(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12) +#define _NV_FOREACH_ARG_13(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12), X(_13) +#define _NV_FOREACH_ARG_14(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12), X(_13), X(_14) +#define _NV_FOREACH_ARG_15(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12), X(_13), X(_14), X(_15) +#define _NV_FOREACH_ARG_16(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12), X(_13), X(_14), X(_15), X(_16) + +// Iterating over empty list is unsupported. Give a semi-readable error. +#define _NV_FOREACH_ARG_NOCOMMA_0(X) NV_FOREACH_NOCOMMA_ERROR_argument_list_emtpy + +#define _NV_FOREACH_ARG_NOCOMMA_1(X, _1) \ + X(_1) +#define _NV_FOREACH_ARG_NOCOMMA_2(X, _1, _2) \ + X(_1) X(_2) +#define _NV_FOREACH_ARG_NOCOMMA_3(X, _1, _2, _3) \ + X(_1) X(_2) X(_3) +#define _NV_FOREACH_ARG_NOCOMMA_4(X, _1, _2, _3, _4) \ + X(_1) X(_2) X(_3) X(_4) +#define _NV_FOREACH_ARG_NOCOMMA_5(X, _1, _2, _3, _4, _5) \ + X(_1) X(_2) X(_3) X(_4) X(_5) +#define _NV_FOREACH_ARG_NOCOMMA_6(X, _1, _2, _3, _4, _5, _6) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) +#define _NV_FOREACH_ARG_NOCOMMA_7(X, _1, _2, _3, _4, _5, _6, _7) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) +#define _NV_FOREACH_ARG_NOCOMMA_8(X, _1, _2, _3, _4, _5, _6, _7, _8) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) +#define _NV_FOREACH_ARG_NOCOMMA_9(X, _1, _2, _3, _4, _5, _6, _7, _8, _9) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) +#define _NV_FOREACH_ARG_NOCOMMA_10(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) +#define _NV_FOREACH_ARG_NOCOMMA_11(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) +#define _NV_FOREACH_ARG_NOCOMMA_12(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) +#define _NV_FOREACH_ARG_NOCOMMA_13(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) X(_13) +#define _NV_FOREACH_ARG_NOCOMMA_14(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) X(_13) X(_14) +#define _NV_FOREACH_ARG_NOCOMMA_15(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) X(_13) X(_14) X(_15) +#define _NV_FOREACH_ARG_NOCOMMA_16(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) X(_13) X(_14) X(_15) X(_16) + +/// @endcond + +/// @cond NV_MACROS_COMPILE_TIME_TESTS +// +// What follows are a couple of compile time smoke tests that will let us know +// if the given compiler does not properly implement these macros. +// These are disabled by default in the interest of compile time. +// +#if defined(NVMACRO_DO_COMPILETIME_TESTS) +#if NV_NUM_ARGS(a) != 1 +#error "[NvMacros CT Test] NV_NUM_ARGS fails when given 1 args" +#endif +#if NV_NUM_ARGS(a,b,c,d) != 4 +#error "[NvMacros CT Test] NV_NUM_ARGS fails when given 4 args" +#endif +#if NV_NUM_ARGS(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) != 16 +#error "[NvMacros CT Test] NV_NUM_ARGS fails when given 16 args" +#endif + +#define _NVMACRO_ADD_TYPE(x) int x +extern void _nvmacro_compiletime_test_func_proto1(NV_FOREACH_ARG(_NVMACRO_ADD_TYPE, aa, bb, cc)); + +#define _NVMACRO_ADD_TYPES(...) NV_FOREACH_ARG(_NVMACRO_ADD_TYPE, __VA_ARGS__) +extern void _nvmacro_compiletime_test_func_proto2(_NVMACRO_ADD_TYPES(a, b, c)); + +#endif // NVMACRO_DO_COMPILETIME_TESTS +/// @endcond + +#endif // _NV_UTILS_MACRO_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvprintf.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvprintf.h new file mode 100644 index 0000000..2588e0c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvprintf.h @@ -0,0 +1,453 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief Standard printf logging interface + */ + +#ifndef _NV_UTILS_PRINTF_H_ +#define _NV_UTILS_PRINTF_H_ + +#ifdef __cplusplus +extern "C" { +#endif + + +/// @defgroup NV_PRINTF_LEVELS Printf verbosity levels +/// @{ +/// @brief Prints at this level are discarded +#define LEVEL_SILENT 0x0 +/// @brief Verbose debug logging level (e.g. signaling function entry) +#define LEVEL_INFO 0x1 +/// @brief Standard debug logging level (e.g. Illegal ctrcall call) +#define LEVEL_NOTICE 0x2 +/// @brief Warning logging level (e.g. feature not supported) +#define LEVEL_WARNING 0x3 +/// @brief Error logging level (e.g. resource allocation failed) +#define LEVEL_ERROR 0x4 +/// @brief Recoverable HW error (e.g. RC events) +#define LEVEL_HW_ERROR 0x5 +/// @brief Unrecoverable error (e.g. Bus timeout) +#define LEVEL_FATAL 0x6 +/// @} + +// Used only in nvlogFilterApplyRule() +#define NV_LEVEL_MAX LEVEL_FATAL + +/** + * @def NV_PRINTF_LEVEL_ENABLED(level) + * @brief This macro evaluates to 1 if prints of a given level will be compiled. + * + * By default, it is available on all builds that allow strings + */ +#ifndef NV_PRINTF_LEVEL_ENABLED +#define NV_PRINTF_LEVEL_ENABLED(level) ((level) >= NV_PRINTF_LEVEL) +#endif + +#if defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) + +/** + * GSPRM uses a different system for logging. + * The format strings, filename, line number, etc. are stored in a separate + * data segment that is not loaded on the GSP, but is available to the decoder. + */ + +#include "libos_log.h" + +/** + * Define NV_PRINTF_LEVEL to the minimum level for debug output. This is compared + * to the level for each NV_PRINT to cull them at compile time. + */ +#define NV_PRINTF_LEVEL LEVEL_NOTICE + +#undef NV_PRINTF_ENABLED +#define NV_PRINTF_ENABLED 1 + +#undef NV_PRINTF_STRINGS_ALLOWED +#define NV_PRINTF_STRINGS_ALLOWED 0 + +#define NV_PRINTF_STRING_SECTION LIBOS_SECTION_LOGGING + +#define MAKE_NV_PRINTF_STR(str) \ +({ \ + static NV_PRINTF_STRING_SECTION const char rm_pvt_str[] = str; \ + rm_pvt_str; \ +}) + +// NVLOG is not used on GSP-RM. +#undef NVLOG_LEVEL +#define NVLOG_LEVEL LEVEL_FATAL + +// Direct dmesg printing through NV_PRINTF_STRING is a no-op on GSP-RM +#define NV_PRINTF_STRING(module, level, format, ...) + +#if defined(GSP_PLUGIN_BUILD) + +void log_vgpu_log_entry(const NvU64 n_args, const NvU64 * args); + +#define NV_PRINTF(level, format, ...) do { \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + LIBOS_LOG_INTERNAL(log_vgpu_log_entry, LOG_LEVEL_ERROR, \ + format, ##__VA_ARGS__); \ + } \ +} while (0) + +#define NV_PRINTF_EX(module, level, format, ...) do { \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + LIBOS_LOG_INTERNAL(log_vgpu_log_entry, LOG_LEVEL_ERROR, \ + format, ##__VA_ARGS__); \ + } \ +} while (0) + +#define NVLOG_PRINTF(...) + +#else + +void log_rm_log_entry(const NvU64 n_args, const NvU64 * args); + +#define NV_PRINTF(level, format, ...) do { \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + LIBOS_LOG_INTERNAL(log_rm_log_entry, LOG_LEVEL_ERROR, \ + format, ##__VA_ARGS__); \ + } \ +} while (0) + +#define NV_PRINTF_EX(module, level, format, ...) do { \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + LIBOS_LOG_INTERNAL(log_rm_log_entry, LOG_LEVEL_ERROR, \ + format, ##__VA_ARGS__); \ + } \ +} while (0) + +#endif // NVOC + +#else // defined(NVRM) && NVCPU_IS_RISCV64 + +/** + * @defgroup NV_UTILS_PRINTF Utility Printing Macros + * + * @brief Provides a light abstraction layer for printf logging. + * + * NvPort and NvLog are used for portability and logging primitives. + * If an environment cannot use these directly then it can override + * the NV_PORT_HEADER and NV_LOG_HEADER defines in its makefile + * to point to appropriate replacements. + * @{ + */ + +#ifndef NV_PRINTF_PREFIX +/** + * @brief Prefix to prepend to all messages printed by @ref NV_PRINTF. + */ +#define NV_PRINTF_PREFIX "" +#endif + +#ifndef NV_PRINTF_PREFIX_SEPARATOR +/** + * @brief Separator between prefix messages printed by @ref NV_PRINTF. + * + * If defined, it must be a single character followed by an optional space. + */ +#define NV_PRINTF_PREFIX_SEPARATOR "" +#endif + +#ifndef NV_PRINTF_ADD_PREFIX +/** + * @brief Apply the full prefix string to a format string. + * + * This is a function-like macro so it can support inserting arguments after the + * format string. Example: + * #define NV_PRINTF_ADD_PREFIX(fmt) "%s():"fmt, __FUNCTION__ + */ +#define NV_PRINTF_ADD_PREFIX(fmt) NV_PRINTF_PREFIX NV_PRINTF_PREFIX_SEPARATOR fmt +#endif + +// Include portability header, falling back to NvPort if not provided. +#ifndef NV_PORT_HEADER +#define NV_PORT_HEADER "nvport/nvport.h" +#endif +#include NV_PORT_HEADER + + +// Include logging header, falling back to NvLog if not provided. +#ifndef NV_LOG_HEADER +#define NV_LOG_HEADER "nvlog/nvlog_printf.h" +#endif +#include NV_LOG_HEADER + +#define NV_PRINTF_STRING_SECTION + +#define MAKE_NV_PRINTF_STR(str) str + +/** + * @def NV_PRINTF(level, format, args...) + * @brief Standard formatted printing/logging interface. + * + * @param level - Debug level to print at. One of @ref NV_PRINTF_LEVELS + * @param format - A standard printf format string. Must be a string literal. + * @param args... - Arguments for the format string literal, like regular printf + * + * The logging header can redefine the behavior, but the basic implementation + * will just print to standard output, like the printf function. + * + * This will print to the @ref NV_PRINTF_MODULE module. If the module is not + * defined, it will default to GLOBAL. Use @ref NV_PRINTF_EX to specify another + * module. + * + * This will prefix the prints with @ref NV_PRINTF_PREFIX string and function + * name. To specify a different (or no) prefix, use @ref NV_PRINTF_EX + * + * @note The format string must be a string literal. The level can be a variable, + * but it may have positive speed and size effects to use the above levels + * directly. + */ +#ifndef NV_PRINTF +#define NV_PRINTF(level, format, ...) \ + NV_PRINTF_EX(NV_PRINTF_MODULE, level, NV_PRINTF_ADD_PREFIX(format), ##__VA_ARGS__) +#endif + + + +/** + * @def NV_PRINTF_EX(module, level, format, args...) + * @brief Extended version of the standard @ref NV_PRINTF + * + * This interface allows you to explicitly specify the module to print to and + * doesn't perform any automatic prefixing. + * + * The logging header can redefine the behavior, but the basic implementation + * will just print to standard output, like the printf function. + * + * @note The format string must be a string literal. The level can be a variable, + * but it may have positive speed and size effects to use the above levels + * directly. + */ +#ifndef NV_PRINTF_EX +#define NV_PRINTF_EX(module, level, format, ...) \ + do \ + { \ + NVLOG_PRINTF(module, NVLOG_ROUTE_RM, level, format, ##__VA_ARGS__); \ + NV_PRINTF_STRING(module, level, format, ##__VA_ARGS__); \ + } while (0) +#endif + + +/** + * @def NV_PRINTF_STRINGS_ALLOWED + * @brief This switch controls whether strings are allowed to appear in the + * final binary. + * + * By default, strings are allowed on DEBUG and QA builds, and all MODS builds + */ +#ifndef NV_PRINTF_STRINGS_ALLOWED +#if defined(DEBUG) || defined(NV_MODS) || defined(QA_BUILD) +#define NV_PRINTF_STRINGS_ALLOWED 1 +#else +#define NV_PRINTF_STRINGS_ALLOWED 0 +#endif +#endif // NV_PRINTF_STRINGS_ALLOWED + + + +// +// Default values for the compile time switches: +// - Strings are allowed on DEBUG and QA builds, and all MODS builds +// - NV_PRINTF is only available if strings are allowed +// - All levels are available if NV_PRINTF is available. + + + +// +// Special handling for RM internal prints so we have equivalent functionality +// between NV_PRINTF and DBG_PRINTF. This is needed to seamlessly migrate RM to +// the new interface. The implementations will eventually be fully extracted and +// only depend on other common code, such as NvPort. +// +#if defined(NVRM) && !defined(NVWATCH) + +#undef NV_PRINTF_PREFIX +#define NV_PRINTF_PREFIX "NVRM" +#undef NV_PRINTF_PREFIX_SEPARATOR +#define NV_PRINTF_PREFIX_SEPARATOR ": " + +#if NV_PRINTF_STRINGS_ALLOWED + +// Declare internal RM print function: +// This is utDbg_Printf in unit tests and nvDbg_Printf in regular RM builds +#if defined(RM_UNITTEST) +#define NVRM_PRINTF_FUNCTION utDbg_Printf +#else +#define NVRM_PRINTF_FUNCTION nvDbg_Printf +#endif // defined(RM_UNITTEST) + +void NVRM_PRINTF_FUNCTION(const char *file, + int line, + const char *function, + int debuglevel, + const char *s, + ...) NVPORT_CHECK_PRINTF_ARGUMENTS(5, 6); + +#define NV_PRINTF_STRING(module, level, format, ...) \ + NVRM_PRINTF_FUNCTION(NV_FILE_STR, __LINE__, NV_FUNCTION_STR, level, format, ##__VA_ARGS__) + +#endif // NV_PRINTF_STRINGS_ALLOWED + +// RM always has printf enabled +#define NV_PRINTF_ENABLED 1 +#endif // defined(NVRM) && !defined(NVWATCH) + + +// +// Default definitions if none are specified +// + +/** + * @def NV_PRINTF_ENABLED + * @brief This macro evaluates to 1 if NV_PRINTF is available (either as regular + * debug prints or binary logging) + * + * By default, it is available on all builds that allow strings + */ +#ifndef NV_PRINTF_ENABLED +#define NV_PRINTF_ENABLED NV_PRINTF_STRINGS_ALLOWED +#endif + +#if NV_PRINTF_STRINGS_ALLOWED +#define NV_PRINTF_LEVEL LEVEL_INFO +#else +#define NV_PRINTF_LEVEL LEVEL_NOTICE +#endif + +/** + * @def NV_PRINTF_STRING(module, level, format, ...) + * @brief Prints the string to the given output, if strings are allowed. + */ +#ifndef NV_PRINTF_STRING +#if NV_PRINTF_STRINGS_ALLOWED +#define NV_PRINTF_STRING(module, level, format, ...) \ + portDbgPrintf(format, ##__VA_ARGS__) + +#if !defined(portDbgPrintf) && !PORT_IS_FUNC_SUPPORTED(portDbgPrintf) +#error "NV_PORT_HEADER must implement portDbgPrintf()" +#endif + +#else +#define NV_PRINTF_STRING(module, level, format, ...) +#endif +#endif // NV_PRINTF_STRING + +#ifndef NVLOG_PRINTF +#define NVLOG_PRINTF(...) +#endif + +#endif // defined(NVRM) && NVCPU_IS_RISCV64 + +/** + * @def NV_PRINTF_COND(condition, leveltrue, levelfalse, format, args...) + * @brief NV_PRINTF with conditional print level + * + * Splits NV_PRINTF calls with a print level based on a variable or ternary + * operation, to be handled by preprocessors to remove INFO-level prints + * + * If condition is true, uses leveltrue, else uses levelfalse + */ +#ifndef NV_PRINTF_COND +#define NV_PRINTF_COND(condition, leveltrue, levelfalse, format, ...) \ + do { \ + if (condition) \ + { \ + NV_PRINTF(leveltrue, format, ##__VA_ARGS__); \ + } \ + else \ + { \ + NV_PRINTF(levelfalse, format, ##__VA_ARGS__); \ + } \ + } while (0) +#endif + + +// +// NV_FILE and NV_FUNCTION macros are used to wrap the __FILE__ and __FUNCTION__ +// macros, respectively, to enable passing them as parameters on release builds +// without linking the strings into the object files. Instead, this will use +// NV_LOG and other utilities to pass values in a way that the same information +// can be decoded on retail builds. +// +// On non-release builds, the strings are directly referenced and included in +// the builds (just like their normal references in DBG_PRINTF() and +// DBG_BREAKPOINT()). +// +// In MODS builds, we allow all printfs, but don't automatically include the +// __FILE__ or __FUNCTION__ references. +// +#if NV_PRINTF_STRINGS_ALLOWED && (!defined(NV_MODS) || defined(SIM_BUILD) || defined(DEBUG) || defined(NV_MODS_INTERNAL)) +#define NV_FILE_STR __FILE__ +#define NV_FILE __FILE__ +#define NV_FILE_FMT "%s" +#define NV_FILE_TYPE const char * +#define NV_FUNCTION_STR __FUNCTION__ +#define NV_FUNCTION __FUNCTION__ +#define NV_FUNCTION_FMT "%s" +#define NV_FUNCTION_TYPE const char * +#else +#ifndef NV_FILE_STR +#define NV_FILE_STR "" +#endif +#ifdef NVLOG_FILEID +# define NV_FILE NVLOG_FILEID +#else +# define NV_FILE 0 +#endif +#define NV_FILE_FMT "" +#define NV_FILE_TYPE NvU32 +// +// A couple caveats on portUtilExGetStackTrace(): +// +// 1. portUtilExGetStackTrace is not supported on all builds. For example, see +// GCC support in util-gcc-clang.h. +// +// 2. portUtilExGetStackTrace(0) will give us the current IP, which is +// current_function()+offset. Commands such as `ln` in windbg can translate the +// IP into func+offset. But sometimes, due to inlining/optimizations, the +// current function at runtime is not the same as at compile time. In the +// inlining example, if a function using NV_FUNCTION is inlined, the pointer +// printed will be calling_function()+offset. +// +//#define NV_FUNCTION portUtilExGetStackTrace(0) +#define NV_FUNCTION_STR "" +#define NV_FUNCTION 0 +#define NV_FUNCTION_FMT "" +#define NV_FUNCTION_TYPE NvUPtr +#endif + +#ifdef __cplusplus +} +#endif //__cplusplus + +/// @} +#endif // _NV_UTILS_PRINTF_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvrange.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvrange.h new file mode 100644 index 0000000..f558e11 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvrange.h @@ -0,0 +1,282 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_UTILS_NV_RANGE_H_ +#define _NV_UTILS_NV_RANGE_H_ + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \anchor NV_RANGE_1 + * @defgroup NV_RANGE NV_RANGE + * + * @brief Range is a sequence of unsigned 64 bit integers, represented by its + * lower and upper bounds, inclusive. + * + * @details Range is a general purpose data structure utility. + * It consist of two fields, lower and upper bound. + * It is assumed that both lower and upper bounds are \b inclusive. + * Range with lower bound greater than the upper bound is considered to + * be an empty range. + * + * @note If a range is declared like Range r = {0x0, 0x2} it consist + * of elements 0x0, 0x1 and 0x2 , i.e. Range = [lo, hi] ! + * + * > There are 4 possible options + * > -# (lo, hi) lo+1 .. hi-1 + * > -# [lo, hi) lo .. hi-1 + * > -# (lo, hi] lo+1 .. hi + * > -# [lo, hi] lo .. hi + * + * Notice that only option 4 is capable of describing a full range. + * Full range would be 0x0..NvU64_MAX, where + * NvU64_MAX = 0xFFFFFFFFFFFFFFFF. + * + * @{ + */ +typedef struct NV_RANGE NV_RANGE; +struct NV_RANGE +{ + /** Lower bound of the range, where range includes the lower bound.*/ + NvU64 lo; + /** Upper bound of the range, where range includes the upper bound.*/ + NvU64 hi; +}; + +static const NV_RANGE NV_RANGE_EMPTY = {1, 0}; + +/** + * @brief Checks if range is empty, i.e. range.lo > range.hi + * + * @returns NV_TRUE if range is empty, NV_FALSE otherwise. + */ +static NV_INLINE NvBool rangeIsEmpty(NV_RANGE range) +{ + return range.lo > range.hi; +} + +/** + * @brief Calculate range length in bytes. + * + * @warning If the range is max, i.e. from 0 to NvU64_MAX, calling this + * function would result in overflow since range length is calculated + * as hi-lo+1. + * + * @par Example: + * @snippet nv_range-test.cpp rangeLengthExample + */ +static NV_INLINE NvU64 rangeLength(NV_RANGE range) +{ + return rangeIsEmpty(range) ? 0 : range.hi - range.lo + 1; +} + +/** + * @brief Creates a range + * + * @details This is useful since on some compilers the following code won't + * work: `rangeLength({0, 100})`. + * However, `rangeLength(rangeMake(0, 100))` will always work. + * + * @Returns Range of elements from and including \a lo to and + * including \a hi, i.e. [lo, hi] + */ +static NV_INLINE NV_RANGE rangeMake(NvU64 lo, NvU64 hi) +{ + NV_RANGE rng = {lo, hi}; + return rng; +} + +/** + * @brief Check if the two given ranges are equal. + */ +static NV_INLINE NvBool rangeEquals(NV_RANGE range1, NV_RANGE range2) +{ + if (rangeIsEmpty(range1) && rangeIsEmpty(range2)) + { + return NV_TRUE; + } + + return (range1.lo == range2.lo) && (range1.hi == range2.hi); +} + +/** + * @brief Check if \a range1 contains \a range2. + * + * @param[in] range1 Container. + * @param[in] range2 Containee. + * + * @par Example: + * @snippet nv_range-test.cpp rangeContainsExample + */ +static NV_INLINE NvBool rangeContains(NV_RANGE range1, NV_RANGE range2) +{ + return (range1.lo <= range2.lo) &&(range1.hi >= range2.hi); +} + +/** + * @brief Checks if intersection of two ranges is not an empty range. + * + * @par Example: + * @snippet nv_range-test.cpp rangeOverlapExample + */ +static NV_INLINE NvBool rangeOverlaps(NV_RANGE range1, NV_RANGE range2) +{ + return (range1.lo <= range2.lo && range2.lo <= range1.hi) + || + (range1.lo <= range2.hi && range2.hi <= range1.hi) + || + (range2.lo <= range1.lo && range1.lo <= range2.hi) + || + (range2.lo <= range1.hi && range1.hi <= range2.hi); +} + +/** + * @brief Returns a range representing an intersection between two given ranges. + * + * @par Example: + * @snippet nv_range-test.cpp rangeOverlapExample + */ +static NV_INLINE NV_RANGE rangeIntersection(NV_RANGE range1, NV_RANGE range2) +{ + NV_RANGE intersect; + + if (rangeIsEmpty(range1) || rangeIsEmpty(range2)) + { + return NV_RANGE_EMPTY; + } + + intersect.lo = range1.lo < range2.lo ? range2.lo : range1.lo; + intersect.hi = range1.hi > range2.hi ? range2.hi : range1.hi; + + return intersect; +} + +/** + * @brief Compares two ranges. + * @returns 0 - \a range1's lower bound is equal to \a range2's lower bound, + * <0 - \a range1's lower bound is less than \a range2's lower bound, + * >0 - \a range2's lower bound is greater than \a range2's lower bound. + * + * @warning If function returns 0 that does not mean that ranges are equal, + * just that their lower bounds are equal! + * + * @par Example: + * @snippet nv_range-test.cpp rangeCompareExample + */ +static NV_INLINE NvS32 rangeCompare(NV_RANGE range1, NV_RANGE range2) +{ + if (rangeIsEmpty(range1) && rangeIsEmpty(range2)) + { + return 0; + } + + return range1.lo >= range2.lo ? (range1.lo == range2.lo ? 0 : 1) : -1; +} + +/** + * @brief Merge two ranges into one. + * + * @returns Merged range. If two ranges have no intersection + * the returned range will be empty. + * + * @note Empty range is range with lo > hi. + * + * @par Example: + * @snippet nv_range-test.cpp rangeMergeExample + */ +static NV_INLINE NV_RANGE rangeMerge(NV_RANGE range1, NV_RANGE range2) +{ + NV_RANGE merged = NV_RANGE_EMPTY; + + if (rangeIsEmpty(range1) || rangeIsEmpty(range2) || !rangeOverlaps(range1, range2)) + { + return merged; + } + + merged.lo = range1.lo; + merged.hi = range1.hi; + + if (range2.lo < merged.lo) + { + merged.lo = range2.lo; + } + if (range2.hi > merged.hi) + { + merged.hi = range2.hi; + } + + return merged; +} + +/** + * @brief Checks if \a range1 borders with \a range2, i.e. \a range1.lo == + * \a range2.hi+1 or \a range2.lo == \a range1.hi+1 + * + * @note [a,b] borders with [b+1,c] where a < b < c + * + */ +static NV_INLINE NvBool rangeBorders(NV_RANGE range1, NV_RANGE range2) +{ + if (rangeIsEmpty(range1) || rangeIsEmpty(range2)) + { + return NV_FALSE; + } + + return (range1.hi + 1 == range2.lo) || (range2.hi + 1 == range1.lo); +} + +/** + * @brief Splits \a pBigRange + * + * @param[in] pBigRange Pointer to starting range. + * @param[in] rangeToSplit Range to split the first range over. + * @param[in] pSecondPartAfterSplit Second range after split. + * + * @par Example: + * @snippet nv_range-test.cpp rangeSplitExample + */ +static NV_INLINE NV_STATUS rangeSplit(NV_RANGE *pBigRange, + NV_RANGE rangeToSplit, NV_RANGE *pSecondPartAfterSplit) +{ + if (rangeIsEmpty(*pBigRange) || rangeIsEmpty(rangeToSplit) || + !rangeContains(*pBigRange, rangeToSplit)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pSecondPartAfterSplit->hi = pBigRange->hi; + pBigRange->hi = rangeToSplit.lo; + pSecondPartAfterSplit->lo = rangeToSplit.hi + 1; + + return NV_OK; +} + +#ifdef __cplusplus +} +#endif +///@} +/// NV_UTILS_RANGE +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/os/dce_rm_client_ipc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/os/dce_rm_client_ipc.h new file mode 100644 index 0000000..9b1b5d0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/os/dce_rm_client_ipc.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _OS_DCE_CLIENT_IPC_H_ +#define _OS_DCE_CLIENT_IPC_H_ + +// RM IPC Client Types + +#define DCE_CLIENT_RM_IPC_TYPE_SYNC 0x0 +#define DCE_CLIENT_RM_IPC_TYPE_EVENT 0x1 +#define DCE_CLIENT_RM_IPC_TYPE_MAX 0x2 + +void dceclientHandleAsyncRpcCallback(NvU32 handle, NvU32 interfaceType, + NvU32 msgLength, void *data, + void *usrCtx); +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/acpigenfuncs.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/acpigenfuncs.h new file mode 100644 index 0000000..54992f0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/acpigenfuncs.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvacpitypes.h" + +#ifndef _ACPIGENFUNCS_H_ +#define _ACPIGENFUNCS_H_ + +#define NV_ACPI_DSM_READ_SIZE (4*1024) + +#define NV_ACPI_GENERIC_FUNC_START 0x0200 +#define NV_ACPI_GENERIC_FUNC_COUNT 9 + +#endif // _ACPIGENFUNCS_H_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated.h new file mode 100644 index 0000000..78281d3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated.h @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RMAPI_DEPRECATED_H_ +#define _RMAPI_DEPRECATED_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvmisc.h" +#include "nvos.h" +#include "nvsecurityinfo.h" +// +// This file provides implementations for deprecated RM API by building on the +// modern APIs. The implementations support running in either +// user-mode or kernel-mode context and should have no dependencies on RM +// internals. +// + +/*! + * GSS legacy command masks + */ +#define RM_GSS_LEGACY_MASK 0x00008000 +#define RM_GSS_LEGACY_MASK_NON_PRIVILEGED 0x00008000 +#define RM_GSS_LEGACY_MASK_PRIVILEGED 0x0000C000 + + +typedef enum +{ + RMAPI_DEPRECATED_COPYIN, + RMAPI_DEPRECATED_COPYOUT, + RMAPI_DEPRECATED_COPYRELEASE, +} RMAPI_DEPRECATED_COPY_OP; + +typedef enum +{ + RMAPI_DEPRECATED_BUFFER_EMPLACE, // Use buffer passed into CopyUser + RMAPI_DEPRECATED_BUFFER_ALLOCATE // Allocate a new buffer in CopyUser +} RMAPI_DEPRECATED_BUFFER_POLICY; + +/** + * Fields are populated by the deprecated RM API caller. RmAlloc, RmControl, and + * RmFree should be routed to RM. pExtendedContext can hold any domain specific + * state needed by the RmAlloc/etc implementations. AllocMem/FreeMem are routed + * to OS layers for allocation/free-up of system memory. + */ +typedef struct _DEPRECATED_CONTEXT +{ + NV_STATUS (*RmAlloc)(struct _DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, void *pAllocParams); + + NV_STATUS (*RmControl)(struct _DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvHandle hObject, + NvU32 cmd, void *pParams, NvU32 paramsSize); + + NV_STATUS (*RmFree)(struct _DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvHandle hObject); + + NV_STATUS (*RmMapMemory)(struct _DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvHandle hDevice, + NvHandle hMemory, NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 flags); + + // Copies data in/out of user-mode address space. + NV_STATUS (*CopyUser)(struct _DEPRECATED_CONTEXT *pContext, RMAPI_DEPRECATED_COPY_OP op, + RMAPI_DEPRECATED_BUFFER_POLICY bufPolicy, NvP64 dataPtr, + NvU32 dataSize, void **ppKernelPtr); + void * (*AllocMem)(NvU32 length); + void (*FreeMem)(void *pAddress); + void *pExtendedContext; +} DEPRECATED_CONTEXT; + +/** + * List of deprecated APIs supported by this library + */ +void RmDeprecatedAllocObject(DEPRECATED_CONTEXT *pContext, NVOS05_PARAMETERS *pArgs); +void RmDeprecatedAddVblankCallback(DEPRECATED_CONTEXT *pContext, NVOS61_PARAMETERS *pArgs); +void RmDeprecatedAllocContextDma(DEPRECATED_CONTEXT *pContext, NVOS39_PARAMETERS *pArgs); +void RmDeprecatedBindContextDma(DEPRECATED_CONTEXT *pContext, NVOS49_PARAMETERS *pArgs); +void RmDeprecatedI2CAccess(DEPRECATED_CONTEXT *pContext, NVOS_I2C_ACCESS_PARAMS *pArgs); +void RmDeprecatedIdleChannels(DEPRECATED_CONTEXT *pContext, NVOS30_PARAMETERS *pArgs); +void RmDeprecatedVidHeapControl(DEPRECATED_CONTEXT *pContext, NVOS32_PARAMETERS *pArgs); +void RmDeprecatedAllocMemory(DEPRECATED_CONTEXT *pContext, NVOS02_PARAMETERS *pArgs); + + +/** + * List of utility functions (used within shims) + */ +typedef NV_STATUS (*RmDeprecatedControlHandler)(API_SECURITY_INFO*,DEPRECATED_CONTEXT*,NVOS54_PARAMETERS*); +RmDeprecatedControlHandler RmDeprecatedGetControlHandler(NVOS54_PARAMETERS *pArgs); + +NV_STATUS RmDeprecatedGetHandleParent(DEPRECATED_CONTEXT *pContext, NvHandle hClient, + NvHandle hObject, NvHandle *phParent); +NV_STATUS RmDeprecatedGetClassID(DEPRECATED_CONTEXT *pContext, NvHandle hClient, + NvHandle hObject, NvU32 *pClassId); +NV_STATUS RmDeprecatedFindOrCreateSubDeviceHandle(DEPRECATED_CONTEXT *pContext, NvHandle hClient, + NvHandle hDeviceOrSubDevice, NvHandle *pHSubDevice, + NvBool *pBMustFree); +NV_STATUS RmDeprecatedConvertOs32ToOs02Flags(NvU32 attr, NvU32 attr2, NvU32 os32Flags, NvU32 *pOs02Flags); +NV_STATUS RmDeprecatedConvertOs02ToOs32Flags(NvU32 os02Flags, NvU32 *pAttr, NvU32 *pAttr2, NvU32 *pOs32Flags); + +NV_STATUS RmDeprecatedGetOrAllocObject(DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvU32 classId, NvHandle *pHObject); + +NV_STATUS RmCopyUserForDeprecatedApi(RMAPI_DEPRECATED_COPY_OP op,RMAPI_DEPRECATED_BUFFER_POLICY bufPolicy, + NvP64 dataPtr, NvU32 dataSize, void **ppKernelPtr, NvBool bUserModeArgs); +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c new file mode 100644 index 0000000..024a274 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c @@ -0,0 +1,421 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "deprecated/rmapi_deprecated.h" + +#include "class/cl0080.h" // NV01_DEVICE_0 +#include "class/cl2080.h" // NV20_SUBDEVICE_0 +#include "ctrl/ctrl0000/ctrl0000client.h" // NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE +#include "ctrl/ctrl0080/ctrl0080gpu.h" // NV0080_CTRL_CMD_GPU_FIND_SUBDEVICE_HANDLE +#include "nvos.h" + +NV_STATUS +RmDeprecatedGetHandleParent +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hObject, + NvHandle *phParent +) +{ + NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS parentParams = {0}; + NV_STATUS status; + + parentParams.hObject = hObject; + parentParams.index = NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_PARENT; + + status = pContext->RmControl(pContext, hClient, hClient, NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO, + &parentParams, sizeof(parentParams)); + + *phParent = parentParams.data.hResult; + + return status; +} + +NV_STATUS +RmDeprecatedGetClassID +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hObject, + NvU32 *pClassId +) +{ + NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS classIdParams = {0}; + NV_STATUS status; + + classIdParams.hObject = hObject; + classIdParams.index = NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_CLASSID; + + status = pContext->RmControl(pContext, hClient, hClient, + NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO, + &classIdParams, + sizeof(classIdParams)); + + *pClassId = NvU64_LO32(classIdParams.data.iResult); + + return status; +} + +NV_STATUS +RmDeprecatedFindOrCreateSubDeviceHandle +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hDeviceOrSubDevice, + NvHandle *pHSubDevice, + NvBool *pBMustFree +) +{ + NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM findParams = {0}; + NV_STATUS status; + NvU32 classId; + + // + // Step 1.) check if we already have a subdevice + // + status = RmDeprecatedGetClassID(pContext, hClient, hDeviceOrSubDevice, &classId); + + if (status != NV_OK) + return status; + + if (classId == NV20_SUBDEVICE_0) + { + *pBMustFree = NV_FALSE; + *pHSubDevice = hDeviceOrSubDevice; + return NV_OK; + } + else if (classId != NV01_DEVICE_0) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Step 2.) check if there is a subdevice allocated under this device + // + findParams.subDeviceInst = 0; + + status = pContext->RmControl(pContext, hClient, hDeviceOrSubDevice, + NV0080_CTRL_CMD_GPU_FIND_SUBDEVICE_HANDLE, + &findParams, + sizeof(findParams)); + + if (status == NV_OK && findParams.hSubDevice) + { + *pBMustFree = NV_FALSE; + *pHSubDevice = findParams.hSubDevice; + return status; + } + + // + // Step 3.) if there is no device, we temporarily allocate a subdevice. + // Subdevice must be freed before we exit out to allow the client to reserve + // it if it chooses to do so later on. + // + *pBMustFree = NV_TRUE; + + *pHSubDevice = 0; + + status = pContext->RmAlloc(pContext, hClient, hDeviceOrSubDevice, pHSubDevice, NV20_SUBDEVICE_0, 0); + + return status; +} + +NV_STATUS RmDeprecatedGetOrAllocObject +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvU32 classId, + NvHandle *pHObject +) +{ + NV_STATUS status; + + NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS params = {0}; + params.hParent = *pHObject; + params.classId = classId; + status = pContext->RmControl(pContext, hClient, hClient, + NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE, + ¶ms, sizeof(params)); + // Object already exists, just return it + if (status == NV_OK && params.hObject != 0) + { + *pHObject = params.hObject; + } + else + { + // Object does not exist yet, allocate. + void *pAllocParams = (void*)0; // TODO: Fill for classes that need them + status = pContext->RmAlloc(pContext, hClient, *pHObject, + pHObject, classId, pAllocParams); + } + return status; +} + +NV_STATUS +RmDeprecatedConvertOs32ToOs02Flags +( + NvU32 attr, + NvU32 attr2, + NvU32 os32Flags, + NvU32 *pOs02Flags +) +{ + NvU32 os02Flags = 0; + NV_STATUS rmStatus = NV_OK; + + switch (DRF_VAL(OS32, _ATTR, _PHYSICALITY, attr)) + { + case NVOS32_ATTR_PHYSICALITY_DEFAULT: // NVOS02 defaults to contiguous. + case NVOS32_ATTR_PHYSICALITY_CONTIGUOUS: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, os02Flags); + break; + } + case NVOS32_ATTR_PHYSICALITY_NONCONTIGUOUS: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _PHYSICALITY, _NONCONTIGUOUS, os02Flags); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + switch (DRF_VAL(OS32, _ATTR, _LOCATION, attr)) + { + case NVOS32_ATTR_LOCATION_PCI: + case NVOS32_ATTR_LOCATION_ANY: // NVOS02 defaults to PCI + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _LOCATION, _PCI, os02Flags); + break; + } + case NVOS32_ATTR_LOCATION_AGP: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _LOCATION, _AGP, os02Flags); + break; + } + case NVOS32_ATTR_LOCATION_VIDMEM: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _LOCATION, _VIDMEM, os02Flags); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + switch (DRF_VAL(OS32, _ATTR, _COHERENCY, attr)) + { + case NVOS32_ATTR_COHERENCY_UNCACHED: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_CACHED: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _CACHED, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_WRITE_COMBINE: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_COMBINE, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_WRITE_THROUGH: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_THROUGH, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_WRITE_PROTECT: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_PROTECT, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_WRITE_BACK: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, os02Flags); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + switch (DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, attr2)) + { + case NVOS32_ATTR2_GPU_CACHEABLE_YES: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, os02Flags); + break; + } + case NVOS32_ATTR2_GPU_CACHEABLE_DEFAULT: // NVOS02 defaults to non-cacheable + case NVOS32_ATTR2_GPU_CACHEABLE_NO: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _NO, os02Flags); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + // VidHeapControl never creates a mapping + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, os02Flags); + if (os32Flags & NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _KERNEL_MAPPING, _MAP, os02Flags); + else + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _KERNEL_MAPPING, _NO_MAP, os02Flags); + + if (FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, attr2)) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, _YES, os02Flags); + + if (FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY, attr2)) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, _YES, os02Flags); + + if (FLD_TEST_DRF(OS32, _ATTR2, _NISO_DISPLAY, _YES, attr2)) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, os02Flags); + else + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _NO, os02Flags); + + if (rmStatus == NV_OK) + { + *pOs02Flags = os02Flags; + } + + return rmStatus; +} + +NV_STATUS +RmDeprecatedConvertOs02ToOs32Flags +( + NvU32 os02Flags, + NvU32 *pAttr, + NvU32 *pAttr2, + NvU32 *pOs32Flags +) +{ + NvU32 os32Flags = 0; + NvU32 attr = 0, attr2 = 0; + NV_STATUS rmStatus = NV_OK; + + attr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _DEFAULT, attr); + + if (FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, os02Flags)) + attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr); + else + attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, attr); + + switch (DRF_VAL(OS02, _FLAGS, _LOCATION, os02Flags)) + { + case NVOS02_FLAGS_LOCATION_PCI: + { + attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, attr); + break; + } + case NVOS02_FLAGS_LOCATION_VIDMEM: + { + attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, attr); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + switch (DRF_VAL(OS02, _FLAGS, _COHERENCY, os02Flags)) + { + case NVOS02_FLAGS_COHERENCY_UNCACHED: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_CACHED: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _CACHED, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_WRITE_COMBINE: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_WRITE_THROUGH: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_THROUGH, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_WRITE_PROTECT: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_PROTECT, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_WRITE_BACK: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, attr); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, os02Flags)) + attr2 |= DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _YES); + else + attr2 |= DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO); + + if (FLD_TEST_DRF(OS02, _FLAGS, _KERNEL_MAPPING, _MAP, os02Flags)) + os32Flags |= NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP; + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, os02Flags)) + attr2 |= DRF_DEF(OS32, _ATTR2, _NISO_DISPLAY, _YES); + else + attr2 |= DRF_DEF(OS32, _ATTR2, _NISO_DISPLAY, _NO); + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, _YES, os02Flags)) + attr2 |= DRF_DEF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY); + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, _YES, os02Flags)) + attr2 |= DRF_DEF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY); + + if (rmStatus == NV_OK) + { + *pOs32Flags = os32Flags; + *pAttr = attr; + *pAttr2 = attr2; + } + + return rmStatus; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nv_firmware_types.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nv_firmware_types.h new file mode 100644 index 0000000..90dd93f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nv_firmware_types.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_FIRMWARE_TYPES_H +#define NV_FIRMWARE_TYPES_H + +typedef enum { + NV_FIRMWARE_MODE_DISABLED = 0, + NV_FIRMWARE_MODE_ENABLED = 1, + NV_FIRMWARE_MODE_DEFAULT = 2, + NV_FIRMWARE_MODE_INVALID = 0xFF +} NvFirmwareMode; + +#endif // NV_FIRMWARE_TYPES_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvacpitypes.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvacpitypes.h new file mode 100644 index 0000000..4cb6115 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvacpitypes.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVACPITYPES_H_ +#define _NVACPITYPES_H_ + +typedef enum _ACPI_DSM_FUNCTION +{ + ACPI_DSM_FUNCTION_NBSI = 0, + ACPI_DSM_FUNCTION_NVHG, + ACPI_DSM_FUNCTION_MXM, + ACPI_DSM_FUNCTION_NBCI, + ACPI_DSM_FUNCTION_NVOP, + ACPI_DSM_FUNCTION_PCFG, + ACPI_DSM_FUNCTION_GPS_2X, + ACPI_DSM_FUNCTION_JT, + ACPI_DSM_FUNCTION_PEX, + ACPI_DSM_FUNCTION_NVPCF_2X, + ACPI_DSM_FUNCTION_GPS, + ACPI_DSM_FUNCTION_NVPCF, + // insert new DSM Functions here + ACPI_DSM_FUNCTION_COUNT, + ACPI_DSM_FUNCTION_CURRENT, // pseudo option to select currently available GUID which supports the subfunction. + ACPI_DSM_FUNCTION_INVALID = 0xFF +} ACPI_DSM_FUNCTION; + +#endif // _NVACPITYPES_H_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvrm_registry.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvrm_registry.h new file mode 100644 index 0000000..bba7339 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvrm_registry.h @@ -0,0 +1,1605 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1997-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVRM_REGISTRY_H +#define NVRM_REGISTRY_H + +#include "nvtypes.h" + +// +// Some shared defines with nvReg.h +// +#if defined(NV_UNIX) +#define NV4_REG_GLOBAL_BASE_KEY "" +#define NV4_REG_GLOBAL_BASE_PATH "_NV_" +#else +#define NV4_REG_GLOBAL_BASE_KEY HKEY_LOCAL_MACHINE +#define NV4_REG_GLOBAL_BASE_PATH "SOFTWARE\\NVIDIA Corporation\\Global" +#endif +#define NV4_REG_SUBKEY "NVidia" +#define NV4_REG_DISPLAY_DRIVER_SUBKEY "Display" +#define NV4_REG_RESOURCE_MANAGER_SUBKEY "System" + +// +// Globally overrides the memory type used to store surfaces. +// Used by all parts of the driver and stored in the hardware-specific key. +// Mirrored from nvReg.h +// +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE "GlobalSurfaceOverrides" +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE_DISABLE (0x00000000) // Do not use global surface overrides +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE_ENABLE (0x00000001) +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE_RM_VALUE 1:0 +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE_RM_ENABLE 3:3 + + +#define NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT "RmDefaultTimeout" +// Type Dword +// Override default RM timeout. Measured in milliseconds. +// Not scaled for emulation + +#define NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS "RmDefaultTimeoutFlags" +#define NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS_OSTIMER 4 +#define NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS_OSDELAY 8 +// Type Dword +// Override default RM timeout flags to either OSDELAY or OSTIMER. + + +#define NV_REG_STR_SUPPRESS_CLASS_LIST "SuppressClassList" +// Type String +// A list of comma separated classes to suppress +// examples: +// 5097 +// 4097, 5097 +// etc + + +// +// Allow instance memory overrides. Some fields are chip specific +// and may not apply to all chips. Since there are many fields, +// this is spread across several DWORD registry keys. +// +// Type DWORD +// Encoding: +// DEFAULT RM determines +// COH Coherent system memory +// NCOH Non-coherent system memory +// VID Local video memory +// +#define NV_REG_STR_RM_INST_LOC "RMInstLoc" +#define NV_REG_STR_RM_INST_LOC_2 "RMInstLoc2" +#define NV_REG_STR_RM_INST_LOC_3 "RMInstLoc3" +#define NV_REG_STR_RM_INST_LOC_4 "RMInstLoc4" + +#define NV_REG_STR_RM_INST_LOC_DEFAULT (0x00000000) +#define NV_REG_STR_RM_INST_LOC_COH (0x00000001) +#define NV_REG_STR_RM_INST_LOC_NCOH (0x00000002) +#define NV_REG_STR_RM_INST_LOC_VID (0x00000003) + +#define NV_REG_STR_RM_INST_LOC_ALL_DEFAULT (0x00000000) +#define NV_REG_STR_RM_INST_LOC_ALL_COH (0x55555555) +#define NV_REG_STR_RM_INST_LOC_ALL_NCOH (0xAAAAAAAA) +#define NV_REG_STR_RM_INST_LOC_ALL_VID (0xFFFFFFFF) + +// +// Allow instance memory overrides. Some fields are chip specific +// and may not apply to all chips. Since there are many fields, +// this is spread across several DWORD registry keys. +// +// The registry keys are defined in nvrm_registry. +// Specific overrrides are defined here. +// +// Type DWORD +// Encoding: +// DEFAULT RM determines +// COH Coherent system memory +// NCOH Non-coherent system memory +// VID Local video memory +// +#define NV_REG_STR_RM_INST_LOC_PTE 1:0 // Context PTE +#define NV_REG_STR_RM_INST_LOC_PTE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_PTE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_PTE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_PTE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_BAR_PTE 3:2 // BAR PTE +#define NV_REG_STR_RM_INST_LOC_BAR_PTE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_BAR_PTE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_BAR_PTE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_BAR_PTE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_INSTBLK 5:4 // Instance block +#define NV_REG_STR_RM_INST_LOC_INSTBLK_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_INSTBLK_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_INSTBLK_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_INSTBLK_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_RAMFC 7:6 // RAMFC save area +#define NV_REG_STR_RM_INST_LOC_RAMFC_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_RAMFC_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_RAMFC_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_RAMFC_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_CACHE1 9:8 // CACHE1 save area +#define NV_REG_STR_RM_INST_LOC_CACHE1_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_CACHE1_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_CACHE1_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_CACHE1_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_GRCTX 11:10 // Graphics contxt +#define NV_REG_STR_RM_INST_LOC_GRCTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_GRCTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_GRCTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_GRCTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_RUNLIST 13:12 // Runlist +#define NV_REG_STR_RM_INST_LOC_RUNLIST_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_RUNLIST_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_RUNLIST_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_RUNLIST_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_DISPLAY 15:14 // Display +#define NV_REG_STR_RM_INST_LOC_DISPLAY_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_DISPLAY_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_DISPLAY_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_DISPLAY_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_USERD 17:16 // USERD +#define NV_REG_STR_RM_INST_LOC_USERD_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_USERD_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_USERD_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_USERD_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER 19:18 // EVENTBUFFER +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_UNUSED 21:20 // UNUSED +#define NV_REG_STR_RM_INST_LOC_UNUSED_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG 23:22 // Cipher exchange memory resources +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_PDE 25:24 // Context PDE +#define NV_REG_STR_RM_INST_LOC_PDE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_PDE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_PDE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_PDE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_BAR_PDE 27:26 // BAR PDE +#define NV_REG_STR_RM_INST_LOC_BAR_PDE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_BAR_PDE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_BAR_PDE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_BAR_PDE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_PMUINST 29:28 // PMUINST +#define NV_REG_STR_RM_INST_LOC_PMUINST_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_PMUINST_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_PMUINST_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_PMUINST_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_PMUUCODE 31:30 // PMU UCODE +#define NV_REG_STR_RM_INST_LOC_PMUUCODE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_PMUUCODE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_PMUUCODE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_PMUUCODE_VID NV_REG_STR_RM_INST_LOC_VID + +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE 1:0 // Compbit backing store +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB 3:2 // Attribute Circular Buffer +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB 5:4 // Bundle Circular Buffer +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL 7:6 // Pagepool Buffer +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX 9:8 // Golden Context Image +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX 11:10 // Bar context aperture +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PMU_PWR_RAIL_VIDEO_PRED_BUFFER_SURFACE 13:12 // Power Rail Video Prediction +#define NV_REG_STR_RM_INST_LOC_2_PMU_PWR_RAIL_VIDEO_PRED_BUFFER_SURFACE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PMU_PWR_RAIL_VIDEO_PRED_BUFFER_SURFACE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PMU_PWR_RAIL_VIDEO_PRED_BUFFER_SURFACE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PMU_PWR_RAIL_VIDEO_PRED_BUFFER_SURFACE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH 15:14 // context patch +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ 17:16 // MMU Read +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE 19:18 // MMU Write +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_UNUSED 21:20 // Unused +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX 23:22 // zcull context buffer +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PMCTX 25:24 // PM context buffer +#define NV_REG_STR_RM_INST_LOC_2_PMCTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PMCTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PMCTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PMCTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG 27:26 // DPU Debug/Falctrace Buffer +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PMUPG 29:28 // PMU PG buffer +#define NV_REG_STR_RM_INST_LOC_2_PMUPG_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PMUPG_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PMUPG_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PMUPG_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER 31:30 +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER_VID NV_REG_STR_RM_INST_LOC_VID + +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE 1:0 // PG log surface +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER 3:2 // Preemption buffer +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER 5:4 // GFXP BetaCB buffer +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER 7:6 // GFXP Pagepool buffer +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE 9:8 // BSI RAM image +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP 11:10 // Priv whitelist buffer +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG 13:12 // SEC2 Debug/Falctrace Buffer +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE 15:14 // FECS UCODE +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER 17:16 // GFXP Pagepool buffer +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE 19:18 // UVM Non-Replayable fault buffer +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE 21:20 // BAR scratch pages +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST 23:22 // FLCNINST +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER 25:24 // RTVCB buffer +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER 27:26 // GFXP RTVCB buffer +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER 29:28 // Fault method buffer +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA 31:30 // PMU/DPU DMA transfers +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA_VID NV_REG_STR_RM_INST_LOC_VID + +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC 1:0 // Display state cache buffer +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER 3:2 // FIFO channel push buffer +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND 5:4 // Firmware security license command +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_VRDS 7:6 // VBIOS runtime data security +#define NV_REG_STR_RM_INST_LOC_4_VRDS_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_VRDS_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_VRDS_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_VRDS_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS 9:8 // Falcon uCode buffers +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE 11:10 // UVM Replayable fault buffer +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of BARs. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_BAR 13:12 // BAR Bind location +#define NV_REG_STR_RM_INST_LOC_4_BAR_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_BAR_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_BAR_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_BAR_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of async CEs. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_CE 15:14 // Async CE Bind location +#define NV_REG_STR_RM_INST_LOC_4_CE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_CE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_CE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_CE_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of GR/GRCE. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_GR 17:16 // GR/GRCE Bind location +#define NV_REG_STR_RM_INST_LOC_4_GR_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_GR_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_GR_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_GR_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of VEs. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_FALCON 19:18 // FALCON Bind location +#define NV_REG_STR_RM_INST_LOC_4_FALCON_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_FALCON_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_FALCON_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_FALCON_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of HWPM PMA. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA 21:20 // HWPM PMA Bind location +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of HWPM PMA. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF 23:22 // FECS EVENT buffer location +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF_VID NV_REG_STR_RM_INST_LOC_VID + +#define NV_REG_STR_RM_DISABLE_GSP_OFFLOAD "RmDisableGspOffload" +#define NV_REG_STR_RM_DISABLE_GSP_OFFLOAD_FALSE (0x00000000) +#define NV_REG_STR_RM_DISABLE_GSP_OFFLOAD_TRUE (0x00000001) +// Type DWORD (Boolean) +// Override any other settings and disable GSP-RM offload. + + +#define NV_REG_STR_RM_MSG "RmMsg" +// Type String: Set parameters for RM DBG_PRINTF. Only for builds with printfs enabled. +// Encoding: +// rule = [!][filename|function][:startline][-endline] +// Format = rule[,rule] + + +#define NV_REG_STR_RM_THREAD_STATE_SETUP_FLAGS "RmThreadStateSetupFlags" +// Type DWORD +// Enables or disables various ThreadState features +// See resman/inc/kernel/core/thread_state.h for +// THREAD_STATE_SETUP_FLAGS values. + + +#define NV_REG_STR_RM_ENABLE_EVENT_TRACER "RMEnableEventTracer" +#define NV_REG_STR_RM_ENABLE_EVENT_TRACER_DISABLE 0 +#define NV_REG_STR_RM_ENABLE_EVENT_TRACER_ENABLE 1 +#define NV_REG_STR_RM_ENABLE_EVENT_TRACER_DEFAULT NV_REG_STR_RM_ENABLE_EVENT_TRACER_DISABLE +// Type DWORD +// Encoding boolean +// Enable/Disable RM event tracing +// 0 - Disable RM event tracing +// 1 - Enable RM event tracing + + +#define NV_REG_STR_RM_COMPUTE_MODE_RULES "RmComputeModeRules" +// Type DWORD +// Saves the last compute mode rule set by the client. +// Encoding: +// Bits 31:0 : Last compute mode rule set by the client + + +#define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_1 "RMNvLogExtraBuffer1" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_2 "RMNvLogExtraBuffer2" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_3 "RMNvLogExtraBuffer3" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_4 "RMNvLogExtraBuffer4" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_5 "RMNvLogExtraBuffer5" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_6 "RMNvLogExtraBuffer6" +// Type DWORD +// Used to specify up to 6 additional logging buffers +// Encoding: +// _BUFFER_FLAGS +// x: uses NVLOG_BUFFER_FLAGS fields, for main nvlog buffer +// _BUFFER_SIZE +// n: Size of main buffer, in kilobytes + + +// Type DWORD +// This can be used for dumping NvLog buffers (in /var/log/vmkernel.log ), when +// we hit critical XIDs e.g 31/79. +#define NV_REG_STR_RM_DUMP_NVLOG "RMDumpNvLog" +#define NV_REG_STR_RM_DUMP_NVLOG_DEFAULT (0x00000000) +#define NV_REG_STR_RM_DUMP_NVLOG_DISABLE (0x00000000) +#define NV_REG_STR_RM_DUMP_NVLOG_ENABLE (0x00000001) + + +// +// Type DWORD +// RM external fabric management. +// +// RM currently uses nvlink core driver APIs which internally trigger +// link initialization and training. However, nvlink core driver now exposes a +// set of APIs for managing nvlink fabric externally (from user mode). +// +// When the regkey is enabled, RM will skip use of APIs which trigger +// link initialization and training. In that case, link training needs to be +// triggered externally. +// +#define NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT "RMExternalFabricMgmt" +#define NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT_MODE 0:0 +#define NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT_MODE_ENABLE (0x00000001) +#define NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT_MODE_DISABLE (0x00000000) + + +// +// Type DWORD +// BIT 1:0: All Data validation +// 0 - Default +// 1 - Validate the kernel data - enable all below +// 2 - Do not validate the kernel data - disable all below +// BIT 3:2: Buffer validation +// 0 - Default +// 1 - Validate the kernel buffers +// 2 - Do not validate the kernel buffers +// BIT 5:4: Handle validation +// 0 - Default +// 1 - Validate the handles +// 2 - Do not validate the handles +// BIT 7:6: Strict client validation +// 0 - Default +// 1 - Enable strict client validation +// 2 - Do not enable strict client validation +// +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION "RmValidateClientData" +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_ALL 1:0 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_ALL_DEFAULT 0x00000000 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_ALL_ENABLED 0x00000001 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_ALL_DISABLED 0x00000002 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_KERNEL_BUFFERS 3:2 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_KERNEL_BUFFERS_DEFAULT 0x00000000 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_KERNEL_BUFFERS_ENABLED 0x00000001 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_KERNEL_BUFFERS_DISABLED 0x00000002 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_HANDLE 5:4 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_HANDLE_DEFAULT 0x00000000 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_HANDLE_ENABLED 0x00000001 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_HANDLE_DISABLED 0x00000002 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_STRICT_CLIENT 7:6 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_STRICT_CLIENT_DEFAULT 0x00000000 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_STRICT_CLIENT_ENABLED 0x00000001 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_STRICT_CLIENT_DISABLED 0x00000002 + + +// +// Type: Dword +// Encoding: +// 1 - Enable remote GPU +// 0 - Disable remote GPU +// +#define NV_REG_STR_RM_REMOTE_GPU "RMRemoteGpu" +#define NV_REG_STR_RM_REMOTE_GPU_ENABLE 0x00000001 +#define NV_REG_STR_RM_REMOTE_GPU_DISABLE 0x00000000 +#define NV_REG_STR_RM_REMOTE_GPU_DEFAULT NV_REG_STR_RM_REMOTE_GPU_DISABLE + + +// +// Type: DWORD +// +// This regkey configures thread priority boosting whenever +// the thread is holding a GPU lock. +// +#define NV_REG_STR_RM_PRIORITY_BOOST "RMPriorityBoost" +#define NV_REG_STR_RM_PRIORITY_BOOST_DISABLE 0x00000000 +#define NV_REG_STR_RM_PRIORITY_BOOST_ENABLE 0x00000001 +#define NV_REG_STR_RM_PRIORITY_BOOST_DEFAULT NV_REG_STR_RM_PRIORITY_BOOST_DISABLE + + +// +// Type: DWORD +// +// This regkey configures the delay (us) before a boosted thread is throttled +// down. +// +// Default value: 0 (Disable) +// +#define NV_REG_STR_RM_PRIORITY_THROTTLE_DELAY "RMPriorityThrottleDelay" +#define NV_REG_STR_RM_PRIORITY_THROTTLE_DELAY_DISABLE 0x00000000 + + +// +// Type DWORD +// Enable support for CUDA Stream Memory Operations in user-mode applications. +// +// BIT 0:0 - Feature enablement +// 0 - disable feature (default) +// 1 - enable feature +// +#define NV_REG_STR_RM_STREAM_MEMOPS "RmStreamMemOps" +#define NV_REG_STR_RM_STREAM_MEMOPS_ENABLE 0:0 +#define NV_REG_STR_RM_STREAM_MEMOPS_ENABLE_YES 1 +#define NV_REG_STR_RM_STREAM_MEMOPS_ENABLE_NO 0 + + +// +// Type DWORD: Enable read-only RMAPI locks for select interfaces +// +// Setting an interface to 0 will disable read-only API locks for that interface +// Setting an interface to 1 will enable read-only API locks for that interface, +// however, RM may still choose to take a read-write lock if it needs to. +// +#define NV_REG_STR_RM_READONLY_API_LOCK "RmRoApiLock" +#define NV_REG_STR_RM_READONLY_API_LOCK_ALLOC_RESOURCE 1:1 +#define NV_REG_STR_RM_READONLY_API_LOCK_ALLOC_RESOURCE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_ALLOC_RESOURCE_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_ALLOC_RESOURCE_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_FREE_RESOURCE 2:2 +#define NV_REG_STR_RM_READONLY_API_LOCK_FREE_RESOURCE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_FREE_RESOURCE_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_FREE_RESOURCE_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_MAP 3:3 +#define NV_REG_STR_RM_READONLY_API_LOCK_MAP_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MAP_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MAP_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_UNMAP 4:4 +#define NV_REG_STR_RM_READONLY_API_LOCK_UNMAP_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_UNMAP_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_UNMAP_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_MAP 5:5 +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_MAP_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_MAP_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_MAP_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_UNMAP 6:6 +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_UNMAP_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_UNMAP_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_UNMAP_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_COPY 7:7 +#define NV_REG_STR_RM_READONLY_API_LOCK_COPY_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_COPY_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_COPY_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_SHARE 8:8 +#define NV_REG_STR_RM_READONLY_API_LOCK_SHARE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_SHARE_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_SHARE_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_CTRL 9:9 +#define NV_REG_STR_RM_READONLY_API_LOCK_CTRL_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_CTRL_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_CTRL_ENABLE (0x00000001) + + +// +// Type DWORD: Enable read-only RMAPI locks for select modules +// +// Setting an interface to 0 will disable read-only API locks for that module +// Setting an interface to 1 will enable read-only API locks for that module, +// however, RM may still choose to take a read-write lock if it needs to. +// +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE "RmRoApiLockModule" +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_GPU_OPS 0:0 +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_GPU_OPS_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_GPU_OPS_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_GPU_OPS_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_WORKITEM 1:1 +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_WORKITEM_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_WORKITEM_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_WORKITEM_ENABLE (0x00000001) + + +// +// Type DWORD: Enable read-only GPU locks for select modules +// +// Setting an interface to 0 will disable read-only GPU locks for that module +// Setting an interface to 1 will enable read-only GPU locks for that module, +// however, RM may still choose to take a read-write lock if it needs to. +// +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE "RmRoGpuLockModule" +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_GPU_OPS 0:0 +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_GPU_OPS_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_GPU_OPS_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_GPU_OPS_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_WORKITEM 1:1 +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_WORKITEM_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_WORKITEM_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_WORKITEM_ENABLE (0x00000001) + + +// Enable support for CACHEABLE rmapi control flag +// 0: never cache any controls +// 1 (default): cache only ROUTE_TO_PHYSICAL controls, and only if GSP-RM is running +// 2: cache all controls +#define NV_REG_STR_RM_CACHEABLE_CONTROLS "RmEnableCacheableControls" +#define NV_REG_STR_RM_CACHEABLE_CONTROLS_DISABLE 0 +#define NV_REG_STR_RM_CACHEABLE_CONTROLS_GSP_ONLY 1 +#define NV_REG_STR_RM_CACHEABLE_CONTROLS_ENABLE 2 + +// Type DWORD +// This regkey forces for Maxwell+ that on FB Unload we wait for FB pull before issuing the +// L2 clean. WAR for bug 1032432 +#define NV_REG_STR_RM_L2_CLEAN_FB_PULL "RmL2CleanFbPull" +#define NV_REG_STR_RM_L2_CLEAN_FB_PULL_ENABLED (0x00000000) +#define NV_REG_STR_RM_L2_CLEAN_FB_PULL_DISABLED (0x00000001) +#define NV_REG_STR_RM_L2_CLEAN_FB_PULL_DEFAULT (0x00000000) + +// Enable backtrace dumping at assertion failure. +// If physical RM or RCDB is unavailable, then this regkey controls the behaviour of backtrace +// printing. +// 0: disable +// 1 (default): only print unique backtraces, identified by instruction pointer of the failed assert +// 2: print all +#define NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE "RmPrintAssertBacktrace" +#define NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_DISABLE 0 +#define NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE 1 +#define NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_ENABLE 2 + + +// +// Type DWORD +// Used to enable no locking on copy +// +#define NV_REG_STR_RM_PARAM_COPY_NO_LOCK "RMParamCopyNoLock" + +// +// Type DWORD +// This regkey restricts profiling capabilities (creation of profiling objects +// and access to profiling-related registers) to admin only. +// 0 - (default - disabled) +// 1 - Enables admin check +// +#define NV_REG_STR_RM_PROFILING_ADMIN_ONLY "RmProfilingAdminOnly" +#define NV_REG_STR_RM_PROFILING_ADMIN_ONLY_FALSE 0x00000000 +#define NV_REG_STR_RM_PROFILING_ADMIN_ONLY_TRUE 0x00000001 + + +#define NV_REG_STR_GPU_BROKEN_FB "nvBrokenFb" +#define NV_REG_STR_GPU_BROKEN_FB_ALL_OKAY 0x00000000 +#define NV_REG_STR_GPU_BROKEN_FB_ALL_BROKEN 0xffffffff +#define NV_REG_STR_GPU_BROKEN_FB_DEFAULT NV_REG_STR_GPU_BROKEN_FB_ALL_OKAY +#define NV_REG_STR_GPU_BROKEN_FB_DEFAULT_GF100_A01 NV_REG_STR_GPU_BROKEN_FB_MEMORY_BROKEN +#define NV_REG_STR_GPU_BROKEN_FB_MEMORY 0:0 +#define NV_REG_STR_GPU_BROKEN_FB_MEMORY_OKAY 0x00000000 +#define NV_REG_STR_GPU_BROKEN_FB_MEMORY_BROKEN 0x00000001 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_CPU 1:1 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_CPU_OKAY 0x00000000 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_CPU_BROKEN 0x00000001 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_PMU 2:2 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_PMU_OKAY 0x00000000 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_PMU_BROKEN 0x00000001 +// Type DWORD +// _ALL_OKAY: FB is not broken. All is okay. +// _ALL_BROKEN: FB is broken and no software will try to use it. +// _MEMORY: Memory itself can/cannot be accessed. (PDB_PROP_GPU_BROKEN_FB property) +// _REG_VIA_CPU: CPU can/cannot access FBPA/FBIO registers. (PDB_PROP_GPU_BROKEN_FB_REG_VIA_CPU property) +// _REG_VIA_PMU: PMU can/cannot access FBPA/FBIO registers. (PDB_PROP_GPU_BROKEN_FB_REG_VIA_PMU property) +// FBPA/FBIO register addresses are defined by gpuIsBrokenFbReg(). +// Note that if the CPU and the PMU can't access registers, then memory isn't going to work either. +// In other words, the only even number that makes sense for this regkey is zero. +// Default depends on the chip and mask revision. + +#define NV_REG_STR_OVERRIDE_FB_SIZE "OverrideFbSize" +// Type Dword +// Encoding Numeric Value +// Size in MB +// Used to reduce FB for testing memory management +// +#define NV_REG_STR_OVERRIDE_FB_SIZE_DEFAULT 0 + +// +// TYPE DWORD +// This regkey helps increase the size of RM reserved region. +// Exposed to clients for bug 2404337. +// Note: In GSP builds this key applies to the kernel (CPU) RM only. +// +#define NV_REG_STR_RM_INCREASE_RSVD_MEMORY_SIZE_MB "RMIncreaseRsvdMemorySizeMB" +#define NV_REG_STR_RM_INCREASE_RSVD_MEMORY_SIZE_MB_DEFAULT 0x0 + +// TYPE Dword +// Determines whether or not RM reserved space should be increased. +// 1 - Increases RM reserved space +// 0 - (default) Keeps RM reserved space as it is. + +#define NV_REG_STR_RM_DISABLE_SCRUB_ON_FREE "RMDisableScrubOnFree" +// Type DWORD +// Encoding 0 (default) - Scrub on free +// 1 - Disable Scrub on Free + +#define NV_REG_STR_RM_DISABLE_FAST_SCRUBBER "RMDisableFastScrubber" +// Type DWORD +// Encoding 0 (default) - Enable Fast Scrubber +// 1 - Disable Fast Scrubber + +// +// Type DWORD +// Controls enable of PMA memory management instead of existing legacy +// RM FB heap manager. +// +#define NV_REG_STR_RM_ENABLE_PMA "RMEnablePMA" +#define NV_REG_STR_RM_ENABLE_PMA_YES (0x00000001) +#define NV_REG_STR_RM_ENABLE_PMA_NO (0x00000000) + +// +// Type DWORD +// Controls management of client page tables by PMA on MODS. +// Default enable. MODS will use regkey to override to disable feature. +// +#define NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES "RMEnablePmaManagedPtables" +#define NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES_YES (0x00000001) +#define NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES_NO (0x00000000) +#define NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES_DEFAULT (0x00000001) + +// +// Type DWORD +// Controls enable of Address Tree memory tracking instead of regmap +// for the PMA memory manager. +// +#define NV_REG_STR_RM_ENABLE_ADDRTREE "RMEnableAddrtree" +#define NV_REG_STR_RM_ENABLE_ADDRTREE_YES (0x00000001) +#define NV_REG_STR_RM_ENABLE_ADDRTREE_NO (0x00000000) + +#define NV_REG_STR_RM_SCRUB_BLOCK_SHIFT "RMScrubBlockShift" +// Type DWORD +// Encoding Numeric Value +// A value in the range 12 to 20 represents logbase2 of maxBlockSize for heap +// scrubber. Any other value will be defaulted to 16 i.e. maxBlockSize = 64KB. + +#define NV_REG_STR_RM_INST_VPR "RMInstVPR" +// Type DWORD +// Encoding: takes effect for allocations in VIDEO memory +// TRUE Make allocation in protected region +// FALSE Make allocation in non-protected region (default) +// +#define NV_REG_STR_RM_INST_VPR_INSTBLK 0:0 // Instance block +#define NV_REG_STR_RM_INST_VPR_INSTBLK_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_INSTBLK_TRUE (0x00000001) +#define NV_REG_STR_RM_INST_VPR_RAMFC 1:1 // RAMFC save area +#define NV_REG_STR_RM_INST_VPR_RAMFC_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_RAMFC_TRUE (0x00000001) +#define NV_REG_STR_RM_INST_VPR_RUNLIST 2:2 // Runlist +#define NV_REG_STR_RM_INST_VPR_RUNLIST_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_RUNLIST_TRUE (0x00000001) +#define NV_REG_STR_RM_INST_VPR_MMU_READ 3:3 // MMU Debug Read +#define NV_REG_STR_RM_INST_VPR_MMU_READ_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_MMU_READ_TRUE (0x00000001) +#define NV_REG_STR_RM_INST_VPR_MMU_WRITE 4:4 // MMU Debug Read +#define NV_REG_STR_RM_INST_VPR_MMU_WRITE_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_MMU_WRITE_TRUE (0x00000001) + +#define NV_REG_STR_RM_GPU_SURPRISE_REMOVAL "RMGpuSurpriseRemoval" +// Type DWORD +// Encoding boolean +// If set, this will cause RM mark GPU as lost when it detects 0xFF from register +// access. + +#define NV_REG_STR_RM_BLACKLIST_ADDRESSES "RmBlackListAddresses" +// Type BINARY: +// struct +// { +// NvU64 addresses[NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES]; +// }; + +#define NV_REG_STR_RM_NUM_FIFOS "RmNumFifos" +// Type Dword +// Override number of fifos (channels) on NV4X +#define NV_REG_STR_RM_NUM_FIFOS_COMPAT 0x00000020 +#define NV_REG_STR_RM_NUM_FIFOS_EXTENDED 0x00000200 + +#define NV_REG_STR_RM_SUPPORT_USERD_MAP_DMA "RMSupportUserdMapDma" +// Type DWORD +// Encoding: Boolean +// If set, allow MapMemoryDma calls to be made on channel objects + +// +// Type DWORD +// Encoding Numeric Value +// Overrides chipset-based P2P configurations. +// Only be used to test on internal issues +// +// P2P reads: +// 0 - Do not allow P2P reads +// 1 - Allow P2P reads +// 2 - Do not override chipset-selected config (default) +// P2P writes: +// 0 - Do not allow P2P writes +// 1 - Allow P2P writes +// 2 - Do not override chipset-selected config (default) +// +#define NV_REG_STR_CL_FORCE_P2P "ForceP2P" +#define NV_REG_STR_CL_FORCE_P2P_READ 1:0 +#define NV_REG_STR_CL_FORCE_P2P_READ_DISABLE 0x00000000 +#define NV_REG_STR_CL_FORCE_P2P_READ_ENABLE 0x00000001 +#define NV_REG_STR_CL_FORCE_P2P_READ_DEFAULT 0x00000002 +#define NV_REG_STR_CL_FORCE_P2P_WRITE 5:4 +#define NV_REG_STR_CL_FORCE_P2P_WRITE_DISABLE 0x00000000 +#define NV_REG_STR_CL_FORCE_P2P_WRITE_ENABLE 0x00000001 +#define NV_REG_STR_CL_FORCE_P2P_WRITE_DEFAULT 0x00000002 + +// +// Type DWORD +// Use this regkey to force RM to pick a P2P type. HW has to support the picked TYPE to take effect. +// e.g., TYPE_BAR1P2P will not work if HW does not support it. A call to create NV50_P2P object will +// will fail in such a case. +// +// TYPE_DEFAULT let RM to choose a P2P type. The priority is: +// C2C > NVLINK > BAR1P2P > mailbox P2P +// +// TYPE_C2C to use C2C P2P if it supports +// TYPE_NVLINK to use NVLINK P2P, including INDIRECT_NVLINK_P2P if it supports +// TYPE_BAR1P2P to use BAR1 P2P if it supports +// TYPE_MAILBOXP2P to use mailbox p2p if it supports +// +#define NV_REG_STR_RM_FORCE_P2P_TYPE "RMForceP2PType" +#define NV_REG_STR_RM_FORCE_P2P_TYPE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_MAILBOXP2P (0x00000001) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_BAR1P2P (0x00000002) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_NVLINK (0x00000003) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_C2C (0x00000004) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_MAX NV_REG_STR_RM_FORCE_P2P_TYPE_C2C + +// +// Type: DWORD +// Enables/Disables the WAR for bug 1630288 where we disable 3rd-party peer mappings +// Disabled by default +// +#define NV_REG_STR_PEERMAPPING_OVERRIDE "PeerMappingOverride" +#define NV_REG_STR_PEERMAPPING_OVERRIDE_DEFAULT 0 + +#define NV_REG_STR_P2P_MAILBOX_CLIENT_ALLOCATED "P2PMailboxClientAllocated" +#define NV_REG_STR_P2P_MAILBOX_CLIENT_ALLOCATED_FALSE 0 +#define NV_REG_STR_P2P_MAILBOX_CLIENT_ALLOCATED_TRUE 1 +// Type Dword +// Overrides the P2P Mailbox allocation policy +// For testing only +// 0 - P2P Mailbox area is allocated by RM +// 1 - P2P Mailbox area is not allocated by RM, but by the client. + +#define NV_REG_STR_RM_MAP_P2P_PEER_ID "RMP2PPeerId" +// Type DWORD +// Encoding: +// Peer ID to use when mapping p2p to peer subdevice in p2p loopback mode +// Default: RM takes care of assigning peer ID. + +#define NV_REG_STR_OVERRIDE_GPU_NUMA_NODE_ID "RMOverrideGpuNumaNodeId" +// Type DWORD: +// Encoding -- NvS32 +// Override GPU NUMA Node ID assigned by OS + +// +// Type DWORD +// Numa allocations allow for skipping reclaim less than a specified memory occupancy threshold. +// This override allows for its tuning, value supplied here shall indicate a percent of free memory +// less than which GFP_RECLAIM flag will be dropped. +// +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE "RmNumaAllocSkipReclaimPercent" +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_DEFAULT 4 +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_DISABLED 0 +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_MIN 0 +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_MAX 100 + +// +// Disable 64KB BAR1 mappings +// 0 - Disable 64KB BAR1 mappings +// 1 - Force/Enable 64KB BAR1 mappings +// +#define NV_REG_STR_RM_64KB_BAR1_MAPPINGS "RM64KBBAR1Mappings" +#define NV_REG_STR_RM_64KB_BAR1_MAPPINGS_ENABLED 0x00000001 +#define NV_REG_STR_RM_64KB_BAR1_MAPPINGS_DISABLED 0x00000000 + +#define NV_REG_STR_RM_BAR1_APERTURE_SIZE_MB "RMBar1ApertureSizeMB" +// Type DWORD +// Encoding Numeric Value +// Overrides the size of the BAR1 aperture. Used to shrink BAR1. It cannot be +// greater than the physical size of BAR1. + +#define NV_REG_STR_RM_BAR2_APERTURE_SIZE_MB "RMBar2ApertureSizeMB" +// Type DWORD +// Encoding Numeric Value +// Overrides the size of the BAR2 aperture. Cannot be greater than the +// physical size of BAR2 available to RM (which may be less than the total size +// of BAR2). When this regkey is present we cap the total aperture size to the +// RM aperture size. This can result in undefined beahvior in environments that +// rely on a virtual bar2 aperture shared between RM and VBIOS for VESA support. + +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) +// +// TYPE DWORD +// This setting will override the BAR1 Big page size +// This is used for interop testing for MODS +// +#define NV_REG_STR_RM_SET_BAR1_ADDRESS_SPACE_BIG_PAGE_SIZE "RMSetBAR1AddressSpaceBigPageSize" +#define NV_REG_STR_RM_SET_BAR1_ADDRESS_SPACE_BIG_PAGE_SIZE_64k (64 * 1024) +#define NV_REG_STR_RM_SET_BAR1_ADDRESS_SPACE_BIG_PAGE_SIZE_128k (128 * 1024) +#endif //DEVELOP || DEBUG || NV_MODS + +// This regkey is to disable coherent path CPU->Nvlink/C2C->FB and force BAR path. +#define NV_REG_STR_RM_FORCE_BAR_PATH "RMForceBarPath" +// Type DWORD +// Encoding 0 (default) - Enable Coherent C2C/NvLink Path +// 1 - Force BAR Path + +// +// Type: Dword +// Encoding: +// 0 - client RM allocated context buffer feature is disabled +// 1 - client RM allocated context buffer feature is enabled +// +#define NV_REG_STR_RM_CLIENT_RM_ALLOCATED_CTX_BUFFER "RMSetClientRMAllocatedCtxBuffer" +#define NV_REG_STR_RM_CLIENT_RM_ALLOCATED_CTX_BUFFER_DISABLED 0x00000000 +#define NV_REG_STR_RM_CLIENT_RM_ALLOCATED_CTX_BUFFER_ENABLED 0x00000001 + +// +// Type: Dword +// Encoding: +// 0 - Split VA space management between server/client RM is disabled +// 1 - Split VA space management between server/client RM is enabled +// +#define NV_REG_STR_RM_SPLIT_VAS_MGMT_SERVER_CLIENT_RM "RMSplitVasMgmtServerClientRm" +#define NV_REG_STR_RM_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_DISABLED 0x00000000 +#define NV_REG_STR_RM_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_ENABLED 0x00000001 + +// +// Restrict the VA range to be <= @ref VASPACE_SIZE_FERMI. +// Used in cases where some engines support 49 bit VA and some don't. +// Ignored if NVOS32_ALLOC_FLAGS_USE_BEGIN_END (DDMA_ALLOC_VASPACE_USE_RANGE) or +// NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE (DMA_ALLOC_VASPACE_VA_FIXED) is set. +// Default: OFF (0x0) +// Type: DWORD +// +#define NV_REG_STR_RM_RESTRICT_VA_RANGE "RMRestrictVARange" +#define NV_REG_STR_RM_RESTRICT_VA_RANGE_DEFAULT (0x0) +#define NV_REG_STR_RM_RESTRICT_VA_RANGE_ON (0x1) + +#define NV_REG_STR_RESERVE_PTE_SYSMEM_MB "RmReservePteSysmemMB" +// Type DWORD: +// Encoding -- Value = 0 -> Do not reserve sysmem for PTEs (default) +// Value > 0 -> Reserve ValueMB for PTEs when we run out of video and system memory +// + +// Type DWORD +// Contains the sysmem page size. +#define NV_REG_STR_RM_SYSMEM_PAGE_SIZE "RMSysmemPageSize" + +// +// Allows pages that are aligned to large page boundaries to be mapped as large +// pages. +// +#define NV_REG_STR_RM_ALLOW_SYSMEM_LARGE_PAGES "RMAllowSysmemLargePages" + +#define NV_REG_STR_FERMI_BIG_PAGE_SIZE "RMFermiBigPageSize" +#define NV_REG_STR_FERMI_BIG_PAGE_SIZE_64KB (64 * 1024) +#define NV_REG_STR_FERMI_BIG_PAGE_SIZE_128KB (128 * 1024) + +// +// TYPE DWORD +// This setting will disable big page size per address space +// +#define NV_REG_STR_RM_DISABLE_BIG_PAGE_PER_ADDRESS_SPACE "RmDisableBigPagePerAddressSpace" +#define NV_REG_STR_RM_DISABLE_BIG_PAGE_PER_ADDRESS_SPACE_FALSE (0x00000000) +#define NV_REG_STR_RM_DISABLE_BIG_PAGE_PER_ADDRESS_SPACE_TRUE (0x00000001) + +#define NV_REG_STR_RM_DISABLE_NONCONTIGUOUS_ALLOCATION "RMDisableNoncontigAlloc" +#define NV_REG_STR_RM_DISABLE_NONCONTIGUOUS_ALLOCATION_FALSE (0x00000000) +#define NV_REG_STR_RM_DISABLE_NONCONTIGUOUS_ALLOCATION_TRUE (0x00000001) +// Type DWORD: +// Encoding -- Boolean +// Disable noncontig vidmem allocation +// + +#define NV_REG_STR_RM_FBSR_PAGED_DMA "RmFbsrPagedDMA" +#define NV_REG_STR_RM_FBSR_PAGED_DMA_ENABLE 1 +#define NV_REG_STR_RM_FBSR_PAGED_DMA_DISABLE 0 +#define NV_REG_STR_RM_FBSR_PAGED_DMA_DEFAULT NV_REG_STR_RM_FBSR_PAGED_DMA_DISABLE +// Type Dword +// Encoding Numeric Value +// Enable the Paged DMA mode for FBSR +// 0 - Disable (default) +// 1 - Enable + +#define NV_REG_STR_RM_FBSR_FILE_MODE "RmFbsrFileMode" +#define NV_REG_STR_RM_FBSR_FILE_MODE_ENABLE 1 +#define NV_REG_STR_RM_FBSR_FILE_MODE_DISABLE 0 +#define NV_REG_STR_RM_FBSR_FILE_MODE_DEFAULT NV_REG_STR_RM_FBSR_FILE_MODE_DISABLE +// Type Dword +// Encoding Numeric Value +// Enable the File based power saving mode for Linux +// 0 - Disable (default) +// 1 - Enable + +#define NV_REG_STR_RM_FBSR_WDDM_MODE "RmFbsrWDDMMode" +#define NV_REG_STR_RM_FBSR_WDDM_MODE_ENABLE 1 +#define NV_REG_STR_RM_FBSR_WDDM_MODE_DISABLE 0 +#define NV_REG_STR_RM_FBSR_WDDM_MODE_DEFAULT NV_REG_STR_RM_FBSR_WDDM_MODE_DISABLE +// Type Dword +// Encoding Numeric Value +// Enable the WDDM power saving mode for FBSR +// 0 - Disable (default) +// 1 - Enable + +// Type DWORD: Disables HW fault buffers on Pascal+ chips +// Encoding : 1 -- TRUE +// : 0 -- False +// : Default -- False +#define NV_REG_STR_RM_DISABLE_HW_FAULT_BUFFER "RmDisableHwFaultBuffer" +#define NV_REG_STR_RM_DISABLE_HW_FAULT_BUFFER_TRUE 0x00000001 +#define NV_REG_STR_RM_DISABLE_HW_FAULT_BUFFER_FALSE 0x00000000 +#define NV_REG_STR_RM_DISABLE_HW_FAULT_BUFFER_DEFAULT 0x00000000 + +// +// Type: DWORD +// Encoding: +// 3 - Enable interrupt-based FECS context switch logging with bottom-half/APC fall-back +// 2 - Enable interrupt-based FECS context switch logging without bottom-half/APC fall-back +// 1 - Enable periodic FECS context switch logging +// 0 - Disable FECS context switch logging +// +// Note: Interrupt-based logging and periodic logging are mutually exclusive +// +#define NV_REG_STR_RM_CTXSW_LOG "RMCtxswLog" +#define NV_REG_STR_RM_CTXSW_LOG_DISABLE 0x00000000 +#define NV_REG_STR_RM_CTXSW_LOG_ENABLE 0x00000001 +#define NV_REG_STR_RM_CTXSW_LOG_ENABLE_INTR 0x00000002 +#define NV_REG_STR_RM_CTXSW_LOG_ENABLE_INTR_APC 0x00000003 +#define NV_REG_STR_RM_CTXSW_LOG_DEFAULT NV_REG_STR_RM_CTXSW_LOG_DISABLE + +// +// Type: DWORD +// +// This regkey configures the maximum number of records that can be +// processed per DPC when using interrupt-based ctxsw logging +#define NV_REG_STR_RM_CTXSW_LOG_RECORDS_PER_INTR "RMCtxswLogMaxRecordsPerIntr" +#define NV_REG_STR_RM_CTXSW_LOG_RECORDS_PER_INTR_DEFAULT 0x30 + +// +// Type: DWORD +// Encoding: +// 0 - Disable more detailed debug INTR logs +// 1 - Enable more detailed debug INTR logs +// +#define NV_REG_STR_RM_INTR_DETAILED_LOGS "RMIntrDetailedLogs" +#define NV_REG_STR_RM_INTR_DETAILED_LOGS_DISABLE 0x00000000 +#define NV_REG_STR_RM_INTR_DETAILED_LOGS_ENABLE 0x00000001 + +#define NV_REG_STR_RM_LOCKING_MODE "RMLockingMode" +// Type DWORD +// Encoding enum +// Overrides what Locking Mode is in use. +// Default 0 +#define NV_REG_STR_RM_LOCKING_MODE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_LOCKING_MODE_INTR_MASK (0x00000001) +#define NV_REG_STR_RM_LOCKING_MODE_LAZY_INTR_DISABLE (0x00000002) + +#define NV_REG_STR_RM_PER_INTR_DPC_QUEUING "RMDisablePerIntrDPCQueueing" +// Type DWORD +// This regkey is used to disable per interrupt DPC queuing. +// 0: Enable Per interrupt DPC Queuing +// 1: Disable Per interrupt DPC Queuing + +#define NV_REG_STR_INTR_STUCK_THRESHOLD "RM654663" +// Type DWORD +// Encoding NvU32 +// Number of iterations to see an interrupt in succession before considering it +// "stuck." +// Default - See INTR_STUCK_THRESHOLD + + +#define NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR "RMProcessNonStallIntrInLocklessIsr" + +// Type: DWORD +// Enables/Disables processing of non-stall interrupts in lockless ISR for +// Linux only. +// Non-stall interrupts are processed by the function +// intrServiceNonStall_HAL(pIntr,pGpu, TRUE /* bProcess*/); where bProcess is TRUE which +// means that event list will be traversed to notify clients registered for it. +// Disabled by default +// + +#define NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_DISABLE 0x00000000 +#define NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_ENABLE 0x00000001 + +#define NV_REG_STR_RM_ROBUST_CHANNELS "RmRobustChannels" +#define NV_REG_STR_RM_ROBUST_CHANNELS_ENABLE 0x00000001 +#define NV_REG_STR_RM_ROBUST_CHANNELS_DISABLE 0x00000000 +#define NV_REG_STR_RM_ROBUST_CHANNELS_DEFAULT NV_REG_STR_RM_ROBUST_CHANNELS_DISABLE + +#define NV_REG_STR_RM_RC_WATCHDOG "RmRcWatchdog" +#define NV_REG_STR_RM_RC_WATCHDOG_ENABLE 0x00000001 +#define NV_REG_STR_RM_RC_WATCHDOG_DISABLE 0x00000000 +#define NV_REG_STR_RM_RC_WATCHDOG_DEFAULT NV_REG_STR_RM_RC_WATCHDOG_ENABLE + +#define NV_REG_STR_RM_WATCHDOG_TIMEOUT "RmWatchDogTimeOut" +#define NV_REG_STR_RM_WATCHDOG_TIMEOUT_LOW 0x00000007 +#define NV_REG_STR_RM_WATCHDOG_TIMEOUT_HI 0x0000000C +#define NV_REG_STR_RM_WATCHDOG_TIMEOUT_DEFAULT NV_REG_STR_RM_WATCHDOG_TIMEOUT_LOW + +#define NV_REG_STR_RM_WATCHDOG_INTERVAL "RmWatchDogInterval" +#define NV_REG_STR_RM_WATCHDOG_INTERVAL_LOW 0x00000007 +#define NV_REG_STR_RM_WATCHDOG_INTERVAL_HI 0x0000000C +#define NV_REG_STR_RM_WATCHDOG_INTERVAL_DEFAULT NV_REG_STR_RM_WATCHDOG_INTERVAL_LOW + +#define NV_REG_STR_RM_DO_LOG_RC_EVENTS "RmLogonRC" +// Type Dword +// Encoding : 0 --> Skip Logging +// 1 --> Do log +// Enable/Disable Event Logging on RC errors +// Default is Disabled +#define NV_REG_STR_RM_DO_LOG_RC_ENABLE 0x00000001 +#define NV_REG_STR_RM_DO_LOG_RC_DISABLE 0x00000000 +#define NV_REG_STR_RM_DO_LOG_RC_DEFAULT NV_REG_STR_RM_DO_LOG_RC_DISABLE + +// Type Dword +// Encoding : 0 --> Skip Breakpoint +// nonzero --> Do Breakpoint +// Enable/Disable breakpoint on DEBUG resource manager on RC errors + +#define NV_REG_STR_RM_BREAK_ON_RC "RmBreakonRC" +#define NV_REG_STR_RM_BREAK_ON_RC_DISABLE 0x00000000 +#define NV_REG_STR_RM_BREAK_ON_RC_ENABLE 0x00000001 + +// Explicitly disable RmBreakOnRC for Retail and +// RMCFG_FEATURE_PLATFORM_GSP builds +#if ((defined(DEBUG) || defined(QA_BUILD)) && \ + (!defined(RMCFG_FEATURE_PLATFORM_GSP) || \ + (defined(RMCFG_FEATURE_PLATFORM_GSP) && !RMCFG_FEATURE_PLATFORM_GSP))) +#define NV_REG_STR_RM_BREAK_ON_RC_DEFAULT NV_REG_STR_RM_BREAK_ON_RC_ENABLE +#else +#define NV_REG_STR_RM_BREAK_ON_RC_DEFAULT NV_REG_STR_RM_BREAK_ON_RC_DISABLE +#endif + +// Volatile registry entries for previous driver version. +// Used to record driver unload/reload for debugging purposes. +#define NV_REG_STR_RM_RC_PREV_DRIVER_VERSION "RmRCPrevDriverVersion" +#define NV_REG_STR_RM_RC_PREV_DRIVER_BRANCH "RmRCPrevDriverBranch" +#define NV_REG_STR_RM_RC_PREV_DRIVER_CHANGELIST "RmRCPrevDriverChangelist" +#define NV_REG_STR_RM_RC_PREV_DRIVER_LOAD_COUNT "RmRCPrevDriverLoadCount" + +#define NV_REG_STR_USE_UNCACHED_PCI_MAPPINGS "UseUncachedPCIMappings" +// Type DWORD +// Encode -- Numeric Value +// Check to see if we are converting PCI mappings + +#define NV_REG_STR_RM_CE_USE_GEN4_MAPPING "RmCeUseGen4Mapping" +#define NV_REG_STR_RM_CE_USE_GEN4_MAPPING_TRUE 0x1 +#define NV_REG_STR_RM_CE_USE_GEN4_MAPPING_FALSE 0x0 +// Type Dword (Boolean) +// Encoding Numeric Value +// Use gen4 mapping that uses a HSHUB CE, if available +// Else, continue using FBHUB PCEs + +// Type Dword +// Enable PCE LCE auto config +#define NV_REG_STR_RM_CE_ENABLE_AUTO_CONFIG "RmCeEnableAutoConfig" +#define NV_REG_STR_RM_CE_ENABLE_AUTO_CONFIG_TRUE 0x1 +#define NV_REG_STR_RM_CE_ENABLE_AUTO_CONFIG_FALSE 0x0 + +// +// Type DWORD +// NVLINK control overrides. +// +// FORCE_DISABLE: Force disable NVLINK when the current default is ON (POR) +// +// TRAIN_AT_LOAD : Force train links during driver load +// +// FORCE_AUTOCONFIG : Force autoconfig training regardless of chiplib forced config links +// +// FORCE_ENABLE: Force enable NVLINK when the current default is OFF (bringup etc.) +// +// PARALLEL_TRAINING: Have the GPU endpoint parallelize link training +#define NV_REG_STR_RM_NVLINK_CONTROL "RMNvLinkControl" +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE 0:0 +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE_NO) +#define NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD 1:1 +#define NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD_NO) +#define NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN 2:2 +#define NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN_NO) +#define NV_REG_STR_RM_NVLINK_CONTROL_RESERVED_0 6:3 +#define NV_REG_STR_RM_NVLINK_CONTROL_LINK_TRAINING_DEBUG_SPEW 7:7 +#define NV_REG_STR_RM_NVLINK_CONTROL_LINK_TRAINING_DEBUG_SPEW_OFF (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_LINK_TRAINING_DEBUG_SPEW_ON (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG 8:8 +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG_NO) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE 31:31 +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE_NO) + +// +// Type DWORD +// Knob to control NVLink MINION +// +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL "RMNvLinkMinionControl" +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE 3:0 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE_FORCE_ON (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE_FORCE_OFF (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_PHY_CONFIG 7:4 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_PHY_CONFIG_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_PHY_CONFIG_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_PHY_CONFIG_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_DL_STATUS 11:8 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_DL_STATUS_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_DL_STATUS_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_DL_STATUS_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITNEGOTIATE 15:12 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITNEGOTIATE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITNEGOTIATE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITNEGOTIATE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITOPTIMIZE 19:16 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITOPTIMIZE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITOPTIMIZE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITOPTIMIZE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_CACHE_SEEDS 23:20 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_CACHE_SEEDS_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_CACHE_SEEDS_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_CACHE_SEEDS_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_BOOT_CORE 27:24 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_BOOT_CORE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_BOOT_CORE_RISCV (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_BOOT_CORE_FALCON (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_BOOT_CORE_RISCV_MANIFEST (0x00000003) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_BOOT_CORE_NO_MANIFEST (0x00000004) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ALI_TRAINING 30:28 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ALI_TRAINING_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ALI_TRAINING_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ALI_TRAINING_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_GFW_BOOT_DISABLE 31:31 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_GFW_BOOT_DISABLE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_GFW_BOOT_DISABLE_DISABLE (0x00000001) + +// +// Type DWORD +// Knob to change NVLink link speed +// __LAST is same as highest supported speed +// NOTE: +// NVLINK_SPEED_CONTROL_SPEED_25G is exactly 25.00000Gbps on Pascal +// NVLINK_SPEED_CONTROL_SPEED_25G is exactly 25.78125Gbps on Volta and later +// NVLINK_SPEED_CONTROL_SPEED_2500000G is exactly 25.00000Gbps on all chips +// +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL "RMNvLinkSpeedControl" +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED 4:0 +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_16G (0x00000001) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_19_2G (0x00000002) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_20G (0x00000003) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_21G (0x00000004) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_22G (0x00000005) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_23G (0x00000006) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_24G (0x00000007) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_25G (0x00000008) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_25_78125G (0x00000008) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_25_00000G (0x00000009) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_16_14583G (0x0000000A) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_26_56250G (0x0000000B) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_27_34375G (0x0000000C) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_28_12500G (0x0000000D) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_32G (0x0000000E) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_40G (0x0000000F) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_50_00000G (0x00000010) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_53_12500G (0x00000011) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_100_00000G (0x00000012) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_106_25000G (0x00000013) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_FAULT (0x00000014) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED__LAST (0x00000014) + +// +// Type DWORD +// P2P Loopback over NVLINK will be enabled by default if RM +// detects loopback links. For P2P over PCIE, force disable +// P2P loopback over NVLINK using the following regkey +// +#define NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK "RMNvLinkDisableP2PLoopback" +#define NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK_TRUE (0x00000001) +#define NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK_FALSE (0x00000000) + +// +// Type DWORD +// Knob to control NVLink Link Power States +// +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL "RMNvLinkControlLinkPM" +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_SINGLE_LANE_MODE 1:0 +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_SINGLE_LANE_MODE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_SINGLE_LANE_MODE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_SINGLE_LANE_MODE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_PROD_WRITES 3:2 +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_PROD_WRITES_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_PROD_WRITES_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_PROD_WRITES_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L1_MODE 5:4 +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L1_MODE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L1_MODE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L1_MODE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L2_MODE 7:6 +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L2_MODE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L2_MODE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L2_MODE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_RESERVED 31:8 + +// +// Type DWORD +// Knob to force lane disable and shutdown during driver unload +// The regkey will also cause a toggle of link reset on driver load +// The regkey should not be used in S/R paths +// +#define NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN "RMNvLinkForceLaneshutdown" +#define NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN_TRUE (0x00000001) +#define NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN_FALSE (0x00000000) +#define NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN_DEFAULT (NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN_FALSE) + +// +// Type DWORD +// For links that are SYSMEM, use this device type for force configs +// Choose the value from NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_* +// +#define NV_REG_STR_RM_NVLINK_SYSMEM_DEVICE_TYPE "RMNvLinkForcedSysmemDeviceType" + +// +// Type DWORD +// NVLink Disable Link Overrides +// The supplied value is ANDed with the set of discovered +// (not necessarily connected) links to remove unwanted links. +// A value of DISABLE_ALL removes/disables all links on this device. +// A value of DISABLE_NONE removes no links. +// If not present, this regkey has no effect. +// +#define NV_REG_STR_RM_NVLINK_DISABLE_LINKS "RMNvLinkDisableLinks" +#define NV_REG_STR_RM_NVLINK_DISABLE_LINKS_DISABLE_ALL (0xFFFFFFFF) +#define NV_REG_STR_RM_NVLINK_DISABLE_LINKS_DISABLE_NONE (0x00000000) + +// +// Type DWORD +// NVLINK Enable Links Overrides +// Note that this control does not force enable links, rather, it should be +// used to disable or mask off SW discovered links supported by the HW. +// +// NOTE: THIS REGKEY HAS BEEN DEPRECATED IN RM, since it does NOT work +// with NVLink auto-configuration. Instead, please move to using +// the new regkey NV_REG_STR_RM_NVLINK_DISABLE_LINKS +// +#define NV_REG_STR_RM_NVLINK_ENABLE "RMNvLinkEnable" +#define NV_REG_STR_RM_NVLINK_ENABLE_IDX(i) (i):(i) +#define NV_REG_STR_RM_NVLINK_ENABLE_IDX__SIZE 32 +#define NV_REG_STR_RM_NVLINK_ENABLE_IDX_TRUE (0x00000001) +#define NV_REG_STR_RM_NVLINK_ENABLE_IDX_FALSE (0x00000000) + +// +// Type DWORD +// Knob to control NVLink Verbose Prints +// +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL "RMNvLinkverboseControlMask" +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL_REGINIT 0:0 +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL_REGINIT_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL_REGINIT_ON (0x00000001) +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL_REGINIT_OFF (0x00000000) + +// Type DWORD: +#define NV_REG_STR_RM_PCIE_LINK_SPEED "RMPcieLinkSpeed" +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN2 1:0 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN2_DEFAULT (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN2_ENABLE (0x00000001) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN2_DISABLE (0x00000002) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN3 3:2 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN3_DEFAULT (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN3_ENABLE (0x00000001) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN3_DISABLE (0x00000002) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN4 5:4 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN4_DEFAULT (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN4_ENABLE (0x00000001) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN4_DISABLE (0x00000002) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN5 7:6 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN5_DEFAULT (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN5_ENABLE (0x00000001) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN5_DISABLE (0x00000002) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_LOCK_AT_LOAD 31:31 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_LOCK_AT_LOAD_DISABLE (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_LOCK_AT_LOAD_ENABLE (0x00000001) + +// +// Type DWORD +// +// +// This can be used as a per-device regkey or not, in which case the setting +// will apply to all devices. If this key is supplied as both a per-device and +// non-per-device regkey, the non-per-device option will apply first to all +// devices, and then the per-device key settings will apply, overriding the +// settings for the relevant devices. +// +// Encoding : 0 - Disable PCIe Relaxed Ordering TLP header bit setting. This is +// the default option. +// 1 - Try to enable PCIe Relaxed Ordering TLP header bit setting. +// Traverses the PCIe topology and only enables the header bit if +// it is safe to do so, with regard to all devices that could be +// affected. +// 2 - Forcibly enable PCIe Relaxed Ordering TLP header bit setting. +// Explicitly ignores the compatibility of the PCIe topology +// around the device or devices in question. +// +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING "RmSetPCIERelaxedOrdering" +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_DEFAULT 0x00000000 +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_DISABLE 0x00000000 +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_ENABLE 0x00000001 +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE 0x00000002 + +// Type DWORD +// This regkey overrides the default use case to optimize the GPU for. +// This regkey should not be used with the RMFermiBigPageSize regkey. +// This regkey should only be set by the RM. +#define NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX "RMOptimizeComputeOrSparseTex" +#define NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_DEFAULT (0x00000000) +#define NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_COMPUTE (0x00000001) +#define NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_SPARSE_TEX (0x00000002) + +#define NV_REG_STR_CL_ASLM_CFG "AslmCfg" +#define NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE 1:0 +#define NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE_NO 0x00000000 +#define NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE_YES 0x00000001 +#define NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE_DEFAULT 0x00000002 +#define NV_REG_STR_CL_ASLM_CFG_HOT_RESET 5:4 +#define NV_REG_STR_CL_ASLM_CFG_HOT_RESET_NO 0x00000000 +#define NV_REG_STR_CL_ASLM_CFG_HOT_RESET_YES 0x00000001 +#define NV_REG_STR_CL_ASLM_CFG_HOT_RESET_DEFAULT 0x00000002 +#define NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE 9:8 +#define NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE_NO 0x00000000 +#define NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE_YES 0x00000001 +#define NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE_DEFAULT 0x00000002 +#define NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE 11:10 +#define NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE_NO 0x00000000 +#define NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE_YES 0x00000001 +#define NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE_DEFAULT 0x00000002 +// Type Dword +// Encoding Numeric Value +// Overrides chipset-based ASLM configurations. +// +// NV link upgrade: +// 0 - Do not use NV link upgrade for ASLM +// 1 - Use NV link upgrade for ASLM +// 2 - Do not override chipset-selected config (default) +// Hot reset: +// 0 - Do not use hot reset for ASLM +// 1 - Use hot reset for ASLM +// 2 - Do not override chipset-selected config (default) +// Fast link upgrade: +// 0 - Do not use fast link upgrade for ASLM +// 1 - Use fast link upgrade for ASLM +// 2 - Do not override chipset-selected config (default) +// Gen2 link width upgrade: +// 0 - Do not use Gen2 link upgrade for ASLM +// 1 - Use Gen2 link upgrade for ASLM +// 2 - Do not override chipset-selected config (default) + +#define NV_REG_STR_RM_DISABLE_BR03_FLOW_CONTROL "MB_DisableBr03FlowControl" +// Type DWORD +// Encoding 1 -> Do not program BR03 flow control registers +// 0 -> Setup BR03 flow control registers +// Determine whether we need to program BR03 flow control registers, in objcl.c + +#define NV_REG_STR_RM_FORCE_ENABLE_GEN2 "RmForceEnableGen2" +#define NV_REG_STR_RM_FORCE_ENABLE_GEN2_NO 0 +#define NV_REG_STR_RM_FORCE_ENABLE_GEN2_YES 1 +#define NV_REG_STR_RM_FORCE_ENABLE_GEN2_DEFAULT NV_REG_STR_RM_FORCE_ENABLE_GEN2_NO +// Type DWORD: On some platform, Gen2 is disabled to work around system problems. +// This key is to force enabling Gen2 for testing or other purpose. It is +// ineffective on platforms not Gen2 capable. +// Encoding boolean: +// 0 - Do Nothing +// 1 - Force Enable Gen2 (to invalidate PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED) +// + +#define NV_REG_STR_RM_DISABLE_FSP "RmDisableFsp" +#define NV_REG_STR_RM_DISABLE_FSP_NO (0x00000000) +#define NV_REG_STR_RM_DISABLE_FSP_YES (0x00000001) +// Type DWORD (Boolean) +// Override any other settings and disable FSP + +#define NV_REG_STR_RM_DISABLE_COT_CMD "RmDisableCotCmd" +#define NV_REG_STR_RM_DISABLE_COT_CMD_FRTS_SYSMEM 1:0 +#define NV_REG_STR_RM_DISABLE_COT_CMD_FRTS_VIDMEM 3:2 +#define NV_REG_STR_RM_DISABLE_COT_CMD_GSPFMC 5:4 +#define NV_REG_STR_RM_DISABLE_COT_CMD_DEFAULT (0x00000000) +#define NV_REG_STR_RM_DISABLE_COT_CMD_YES (0x00000001) +// Type DWORD (Boolean) +// Disable the specified commands as part of Chain-Of-Trust feature + +#define NV_REG_STR_PCI_LATENCY_TIMER_CONTROL "PciLatencyTimerControl" +// Type Dword +// Encoding Numeric Value +// Override to control setting/not setting of pci timer latency value. +// Not present suggests default value. A value 0xFFFFFFFF will leave the value unmodified (ie bios value). +// All other values must be multiples of 8 + +#endif // NVRM_REGISTRY_H diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objrpc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objrpc.h new file mode 100644 index 0000000..331922a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objrpc.h @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// #ifndef NVOC +// #include "g_objrpc_nvoc.h" +// #endif + +#ifndef _OBJRPC_H_ +#define _OBJRPC_H_ + +#include "vgpu/rpc_headers.h" +#include "diagnostics/nv_debug_dump.h" +#include "ctrl/ctrl2080/ctrl2080event.h" // rmcontrol params (from hal) +#include "ctrl/ctrl2080/ctrl2080gpu.h" // rmcontrol params (from hal) +#include "ctrl/ctrl2080/ctrl2080rc.h" // rmcontrol params (from hal) +#include "ctrl/ctrl2080/ctrl2080perf.h" // rmcontrol params (from hal) +#include "ctrl/ctrl0080/ctrl0080fb.h" // rmcontrol params (from hal) +#include "ctrl/ctrl0080/ctrl0080dma.h" // rmcontrol params (from hal) +#include "gpu/gsp/message_queue.h" + + +#include "vgpu/rpc_hal_stubs.h" + +struct OBJRPC{ + OBJECT_BASE_DEFINITION(RPC); + + struct { + NvU32 ipVersion; + }__nvoc_pbase_Object[1]; // This nested structure mechanism is to bypass NVOC + + // Message buffer fields + NvU32 *message_buffer; + NvU32 *message_buffer_priv; + MEMORY_DESCRIPTOR *pMemDesc_mesg; + NvU32 maxRpcSize; + + // UVM Message buffer fields + NvU32 *message_buffer_uvm; + NvU32 *message_buffer_priv_uvm; + MEMORY_DESCRIPTOR *pMemDesc_mesg_uvm; + + // Buffer for initial GSP message. + void *init_msg_buf; + RmPhysAddr init_msg_buf_pa; + + /* Message Queue */ + +}; + +// +// Utility macros for composing RPC messages. +// See for message formats. +// A message has a fixed-format header and optionally a variable length +// parameter after the header. +// + +#define vgpu_rpc_message_header_v ((rpc_message_header_v*)(pRpc->message_buffer)) +#define rpc_message (vgpu_rpc_message_header_v->rpc_message_data) + +static inline void _objrpcAssignIpVersion(struct OBJRPC* pRpc, NvU32 ipVersion) +{ + pRpc->__nvoc_pbase_Object->ipVersion = ipVersion; +} + +// Initialize and free RPC infrastructure +NV_STATUS initRpcInfrastructure_VGPU(OBJGPU *pGpu); +NV_STATUS freeRpcInfrastructure_VGPU(OBJGPU *pGpu); +OBJRPC *initRpcObject(OBJGPU *pGpu); +void rpcSetIpVersion(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 ipVersion); +void rpcObjIfacesSetup(OBJRPC *pRpc); +void rpcRmApiSetup(OBJGPU *pGpu); +NV_STATUS rpcWriteCommonHeader(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 func, NvU32 paramLength); +NV_STATUS rpcWriteCommonHeaderSim(OBJGPU *pGpu); +NV_STATUS _allocRpcMemDesc(OBJGPU *pGpu, NvU64 size, NvBool bContig, NV_ADDRESS_SPACE addrSpace, MEMORY_DESCRIPTOR **ppMemDesc, void **ppMemBuffer, void **ppMemBufferPriv); +void _freeRpcMemDesc(OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, void **ppMemBuffer, void **ppMemBufferPriv); +NV_STATUS vgpuGspSetupBuffers(OBJGPU *pGpu); +void vgpuGspTeardownBuffers(OBJGPU *pGpu); + +// +// OBJGPU RPC member accessors. +// Historically, they have been defined inline by the following macros. +// These definitions were migrated to gpu.c in order to avoid having to include object headers in +// this file. +// + +OBJRPC *gpuGetGspClientRpc(OBJGPU*); +OBJRPC *gpuGetVgpuRpc(OBJGPU*); +OBJRPC *gpuGetRpc(OBJGPU*); + +#define GPU_GET_GSPCLIENT_RPC(u) gpuGetGspClientRpc(u) +#define GPU_GET_VGPU_RPC(u) gpuGetVgpuRpc(u) +#define GPU_GET_RPC(u) gpuGetRpc(u) + +#endif // _OBJRPC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objtmr.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objtmr.h new file mode 100644 index 0000000..4b47b50 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objtmr.h @@ -0,0 +1,3 @@ + +#include "g_objtmr_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/tmr.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/tmr.h new file mode 100644 index 0000000..fd73d0e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/tmr.h @@ -0,0 +1,3 @@ + +#include "g_tmr_nvoc.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc.h new file mode 100644 index 0000000..60db46c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc.h @@ -0,0 +1,707 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//****************************************************************************** +// +// Declarations for the RPC module. +// +// Description: +// This module declares the RPC interface functions/macros. +// +//****************************************************************************** + +#ifndef __vgpu_dev_nv_rpc_h__ +#define __vgpu_dev_nv_rpc_h__ + +#include "class/cl84a0.h" +#include "rpc_headers.h" +#include "gpu/dce_client/dce_client.h" +#include "objrpc.h" +#include "rpc_vgpu.h" + +#define KERNEL_PID (0xFFFFFFFFULL) + +typedef struct ContextDma ContextDma; + +#define NV_RM_STUB_RPC 0 + +#if NV_RM_STUB_RPC + +static inline void NV_RM_RPC_ALLOC_SHARE_DEVICE(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_ALLOC_MEMORY(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_ALLOC_CHANNEL(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_ALLOC_OBJECT(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_MAP_MEMORY_DMA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_UNMAP_MEMORY_DMA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_ALLOC_SUBDEVICE(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_DUP_OBJECT(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_IDLE_CHANNELS(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_ALLOC_EVENT(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_CONTROL(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_MANAGE_HW_RESOURCE_ALLOC(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_MANAGE_HW_RESOURCE_FREE(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_MANAGE_HW_RESOURCE_BIND(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SET_GUEST_SYSTEM_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_PERF_GET_PSTATE_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_PERF_GET_VIRTUAL_PSTATE_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_PERF_GET_LEVEL_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_UNLOADING_GUEST_DRIVER(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_GPU_EXEC_REG_OPS(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_GET_STATIC_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_REGISTER_VIRTUAL_EVENT_BUFFER(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_UPDATE_BAR_PDE(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SET_PAGE_DIRECTORY(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_UNSET_PAGE_DIRECTORY(OBJGPU *pGpu, ...) { return; } + +static inline void NV_RM_RPC_GET_GSP_STATIC_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_GSP_SET_SYSTEM_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SET_REGISTRY(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SUBDEV_EVENT_SET_NOTIFICATION(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_DUMP_PROTOBUF_COMPONENT(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_GSP_MSG_TIMING(OBJGPU *pGpu, ...) { return; } + +static inline void NV_RM_RPC_VGPU_PF_REG_READ32(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION(OBJGPU *pGpu, ...) { return; } + +// RPC free stubs +static inline void NV_RM_RPC_SIM_FREE_INFRA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_FREE(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_FREE_ON_ERROR(OBJGPU *pGpu, ...) { return; } + +// Simulation stubs +static inline void NV_RM_RPC_SIM_LOAD_ESCAPE_FUNCTIONS(OBJOS *pOS, ...) { return; } +static inline void NV_RM_RPC_SIM_ADD_DISP_CONTEXT_DMA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SIM_UPDATE_DISP_CONTEXT_DMA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SIM_DELETE_DISP_CONTEXT_DMA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SIM_UPDATE_DISP_CHANNEL_INFO(OBJGPU *pGpu, ...) { return; } + +#else // NV_RM_STUB_RPC + +#define NV_RM_RPC_ALLOC_SHARE_DEVICE_FWCLIENT(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \ + allocflags, vasize, vamode, status) \ + do \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + NV0000_ALLOC_PARAMETERS root_alloc_params = {0}; \ + \ + root_alloc_params.hClient = hclient; \ + \ + if (!IsT234D(pGpu)) \ + { \ + RmClient *pClient = NULL; \ + \ + /* Get process ID from the client database */ \ + if (NV_OK == serverutilGetClientUnderLock(hclient, &pClient)) \ + { \ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); \ + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); \ + \ + if (pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL) \ + { \ + root_alloc_params.processID = KERNEL_PID; \ + } \ + else \ + { \ + root_alloc_params.processID = pClient->ProcID; \ + NV_ASSERT(root_alloc_params.processID == osGetCurrentProcess()); \ + } \ + } \ + else \ + NV_ASSERT(0); \ + } \ + \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, NV01_NULL_OBJECT, \ + NV01_NULL_OBJECT, NV01_ROOT, \ + &root_alloc_params); \ + \ + if (status == NV_OK) \ + { \ + NV0080_ALLOC_PARAMETERS device_alloc_params = {0}; \ + \ + device_alloc_params.hClientShare = hclientshare; \ + device_alloc_params.hTargetClient = htargetclient; \ + device_alloc_params.hTargetDevice = htargetdevice; \ + device_alloc_params.flags = allocflags; \ + device_alloc_params.vaSpaceSize = vasize; \ + \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hclient, hdevice, \ + hclass, &device_alloc_params); \ + } \ + else \ + NV_ASSERT(0); \ + } \ + while (0) + +#define NV_RM_RPC_ALLOC_MEMORY(pGpu, hclient, hdevice, hmemory, hclass, \ + flags, pmemdesc, status) \ + do \ + { \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL) \ + && (!(IS_VIRTUAL_WITH_SRIOV(pGpu) && \ + !gpuIsWarBug200577889SriovHeavyEnabled(pGpu) && \ + !NV_IS_MODS))) { \ + if (IS_GSP_CLIENT(pGpu) && IsT234D(pGpu)) \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + NV_MEMORY_LIST_ALLOCATION_PARAMS listAllocParams = {0}; \ + listAllocParams.pteAdjust = pmemdesc->PteAdjust; \ + listAllocParams.format = memdescGetPteKind(pmemdesc); \ + listAllocParams.size = pmemdesc->Size; \ + listAllocParams.pageCount = pmemdesc->PageCount; \ + listAllocParams.pageNumberList = memdescGetPteArray(pmemdesc, AT_GPU); \ + listAllocParams.hClient = NV01_NULL_OBJECT; \ + listAllocParams.hParent = NV01_NULL_OBJECT; \ + listAllocParams.hObject = NV01_NULL_OBJECT; \ + listAllocParams.limit = pmemdesc->Size - 1; \ + listAllocParams.flagsOs02 = (DRF_DEF(OS02,_FLAGS,_MAPPING,_NO_MAP) | \ + DRF_DEF(OS02,_FLAGS,_PHYSICALITY,_NONCONTIGUOUS) | \ + (flags & DRF_SHIFTMASK(NVOS02_FLAGS_COHERENCY))); \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hdevice, \ + hmemory, NV01_MEMORY_LIST_SYSTEM, &listAllocParams); \ + } \ + else \ + { \ + status = rpcAllocMemory_HAL(pGpu, pRpc, hclient, hdevice, hmemory, \ + hclass, flags, pmemdesc); \ + } \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_MAP_MEMORY_DMA(pGpu, hclient, hdevice, hdma, hmemory, offset, length, flags, \ + dmaoffset, status) \ + do \ + { \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL) && \ + !gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) \ + status = rpcMapMemoryDma_HAL(pGpu, pRpc, hclient, hdevice, hdma, hmemory, offset, \ + length, flags, dmaoffset); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + + +#define NV_RM_RPC_UNMAP_MEMORY_DMA(pGpu, hclient, hdevice, hdma, hmemory, flags, dmaoffset, \ + status) \ + do \ + { \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL) && \ + !gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) \ + status = rpcUnmapMemoryDma_HAL(pGpu, pRpc, hclient, hdevice, hdma, hmemory, \ + flags, dmaoffset); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_IDLE_CHANNELS(pGpu, phclients, phdevices, phchannels, \ + nentries, flags, timeout, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcIdleChannels_HAL(pGpu, pRpc, phclients, phdevices, \ + phchannels, nentries, flags, timeout); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_ALLOC_SHARE_DEVICE(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \ + allocflags, vasize, vamode, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + NV_RM_RPC_ALLOC_SHARE_DEVICE_FWCLIENT(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \ + allocflags, vasize, vamode, status); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_CONTROL(pGpu, hClient, hObject, cmd, pParams, paramSize, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->Control(pRmApi, hClient, hObject, cmd, \ + pParams, paramSize); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_ALLOC_CHANNEL(pGpu, hclient, hparent, hchannel, hclass, \ + pGpfifoAllocParams, pchid, status) \ + do \ + { \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hparent, hchannel, \ + hclass, pGpfifoAllocParams); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_ALLOC_OBJECT(pGpu, hclient, hchannel, hobject, hclass, params, status)\ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hchannel, hobject, \ + hclass, params); \ + } \ + } while (0) + +#define NV_RM_RPC_FREE(pGpu, hclient, hparent, hobject, status) \ + do \ + { \ + (void) hparent; \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->Free(pRmApi, hclient, hobject); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_FREE_ON_ERROR(pGpu, hclient, hparent, hobject) \ + do \ + { \ + (void) hparent; \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + /* used in failure cases, macro doesn't overwrite rmStatus */ \ + if (pRpc != NULL) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + pRmApi->Free(pRmApi, hclient, hobject); \ + } \ + } \ + while (0) + +#define NV_RM_RPC_ALLOC_EVENT(pGpu, hclient, hparentclient, hchannel, hobject, \ + hevent, hclass, idx, status) \ + do \ + { \ + (void) hchannel; \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + NV0005_ALLOC_PARAMETERS allocParams = {0}; \ + allocParams.hParentClient = hparentclient; \ + allocParams.hClass = hclass; \ + allocParams.notifyIndex = idx | NV01_EVENT_CLIENT_RM; \ + allocParams.data = 0; \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, \ + hobject, hevent, \ + hclass, &allocParams); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_ALLOC_SUBDEVICE(pGpu, hclient, hdevice, hsubdevice, \ + hclass, subDeviceInst, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + NV2080_ALLOC_PARAMETERS alloc_params = {0}; \ + \ + alloc_params.subDeviceId = subDeviceInst; \ + \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hdevice, hsubdevice, \ + hclass, &alloc_params); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_DUP_OBJECT(pGpu, hclient, hparent, hobject, hclient_src, \ + hobject_src, flags, bAutoFreeRpc, pDstRef, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->DupObject(pRmApi, hclient, hparent, \ + &hobject, hclient_src, \ + hobject_src, flags); \ + if ((bAutoFreeRpc) && (pDstRef != NULL) && (status == NV_OK)) \ + { \ + RmResource *pRmResource; \ + pRmResource = dynamicCast(((RsResourceRef*)pDstRef)->pResource, RmResource); \ + pRmResource->bRpcFree = NV_TRUE; \ + } \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_VGPU_PF_REG_READ32(pGpu, address, value, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + status = rpcVgpuPfRegRead32_HAL(pGpu, pRpc, address, value, 0); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +/* + * manage HW resources RPC macro + */ +#define NV_RM_RPC_MANAGE_HW_RESOURCE_ALLOC(pGpu, hclient, hdevice, hresource, \ + pfballocinfo, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = RmRpcHwResourceAlloc(pGpu, pRpc, hclient, hdevice, \ + hresource, pfballocinfo); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_MANAGE_HW_RESOURCE_FREE(pGpu, hclient, hdevice, hresource, \ + flags, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = RmRpcHwResourceFree(pGpu, pRpc, hclient, hdevice, \ + hresource, flags); \ + if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_MANAGE_HW_RESOURCE_BIND(pGpu, hclient, hdevice, hresource, \ + virtaddr, physaddr, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = RmRpcHwResourceBind(pGpu, pRpc, hclient, hdevice, \ + hresource, virtaddr, physaddr); \ + if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_SIM_LOAD_ESCAPE_FUNCTIONS(pos) \ + do \ + { \ + NV_ASSERT(pos); \ + /* load simulation escape read/write routines */ \ + pos->osSimEscapeRead = RmRpcSimEscapeRead; \ + pos->osSimEscapeWrite = RmRpcSimEscapeWrite; \ + } \ + while(0) + +/* outgoing updates to the plugin */ +#define NV_RM_RPC_SIM_ADD_DISP_CONTEXT_DMA(pGpu, hclient, pcontextdma, channelnum) \ + do \ + { \ + NV_STATUS status; \ + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) \ + status = RmRpcSimAddDisplayContextDma(pGpu, hclient, pcontextdma, channelnum); \ + NV_ASSERT(status == NV_OK); \ + SLI_LOOP_END \ + } \ + while(0) + +#define NV_RM_RPC_SIM_UPDATE_DISP_CONTEXT_DMA(pGpu, hclient, pcontextdma, physaddrnew, \ + physlimitnew, pagesize, ptekind) \ + do \ + { \ + NV_STATUS status; \ + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) \ + status = RmRpcSimUpdateDisplayContextDma(pGpu, hclient, pcontextdma, physaddrnew,\ + physlimitnew, pagesize, ptekind); \ + NV_ASSERT(status == NV_OK); \ + SLI_LOOP_END \ + } \ + while(0) + +#define NV_RM_RPC_SIM_DELETE_DISP_CONTEXT_DMA(pGpu, hclient, pcontextdma) \ + do \ + { \ + NV_STATUS status; \ + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) \ + status = RmRpcSimDeleteDisplayContextDma(pGpu, hclient, pcontextdma); \ + NV_ASSERT(status == NV_OK); \ + SLI_LOOP_END \ + } \ + while(0) + +#define NV_RM_RPC_SIM_UPDATE_DISP_CHANNEL_INFO(pGpu, hclient, pcontextdma, channelnum) \ + do \ + { \ + NV_STATUS status; \ + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) \ + status = RmRpcSimUpdateDispChannelInfo(pGpu, hclient, pcontextdma, channelnum); \ + NV_ASSERT(status == NV_OK); \ + SLI_LOOP_END \ + } \ + while(0) + +/* + * free RPC infrastructure for simulation (not VGPU object) + */ +#define NV_RM_RPC_SIM_FREE_INFRA(pGpu, status) \ + do \ + { \ + NV_ASSERT(status == NV_OK); \ + status = RmRpcSimFreeInfra(pGpu); \ + } \ + while (0) + +#define NV_RM_RPC_SET_GUEST_SYSTEM_INFO(pGpu, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = RmRpcSetGuestSystemInfo(pGpu, pRpc); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_PERF_GET_VIRTUAL_PSTATE_INFO(pGpu, hClient, hObject, pParams, \ + pClkInfos, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = RmRpcPerfGetVirtualPstateInfo(pGpu, pRpc, hClient, hObject,\ + pParams, pClkInfos); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_UNLOADING_GUEST_DRIVER(pGpu, status, bSuspend, bGc6Entering, newPMLevel) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcUnloadingGuestDriver_HAL(pGpu, pRpc, bSuspend, bGc6Entering, newPMLevel); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_GPU_EXEC_REG_OPS(pGpu, hClient, hObject, pParams, pRegOps, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcGpuExecRegOps_HAL(pGpu, pRpc, hClient, hObject, pParams, pRegOps); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_GET_STATIC_INFO(pGpu, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcGetStaticInfo_HAL(pGpu, pRpc); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_REGISTER_VIRTUAL_EVENT_BUFFER(pGpu, hClient, hSubdevice, hEventBuffer, hBufferHeader, hRecordBuffer, recordSize, recordCount, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcRegisterVirtualEventBuffer_HAL(pGpu, pRpc, hClient, hSubdevice, hEventBuffer, hBufferHeader, hRecordBuffer, recordSize, recordCount); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_UPDATE_BAR_PDE(pGpu, barType, entryValue, entryLevelShift, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcUpdateBarPde_HAL(pGpu, pRpc, barType, entryValue, entryLevelShift); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_SET_PAGE_DIRECTORY(pGpu, hClient, hDevice, pParams, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcSetPageDirectory_HAL(pGpu, pRpc, hClient, hDevice, pParams); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_UNSET_PAGE_DIRECTORY(pGpu, hClient, hDevice, pParams, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcUnsetPageDirectory_HAL(pGpu, pRpc, hClient, hDevice, pParams); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION(pGpu, operation, status) \ + do \ + { \ + /* Call into RPC layer */ \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + status = rpcPmaScrubberSharedBufferGuestPagesOperation_HAL(pGpu, pRpc, operation); \ + } \ + } \ + while (0) + +// +// DCE_CLIENT_RM specific RPCs +// + +#define NV_RM_RPC_DCE_RM_INIT(pGpu, bInit, status) \ + do \ + { \ + OBJRPC* pRpc = GPU_GET_RPC(pGpu); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = rpcDceRmInit_dce(pRmApi, bInit); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +// +// GSP_CLIENT_RM specific RPCs +// + +#define NV_RM_RPC_GET_GSP_STATIC_INFO(pGpu, status) do {} while (0) +#define NV_RM_RPC_GSP_SET_SYSTEM_INFO(pGpu, status) do {} while (0) +#define NV_RM_RPC_SET_REGISTRY(pGpu, status) do {} while (0) +#define NV_RM_RPC_DUMP_PROTOBUF_COMPONENT(pGpu, status, pPrbEnc, pNvDumpState, component) do {} while (0) + +#define NV_RM_RPC_RMFS_INIT(pGpu, statusQueueMemDesc, status) do {} while(0) + +#define NV_RM_RPC_RMFS_CLOSE_QUEUE(pGpu, status) do {} while(0) + +#define NV_RM_RPC_RMFS_CLEANUP(pGpu, status) do {} while(0) + +#define NV_RM_RPC_RMFS_TEST(pGpu, numReps, testData1, testData2, \ + testData3, status) do {} while(0) + +static inline NV_STATUS RmRpcSimFreeInfra(OBJGPU *pGpu, ...) { return NV_OK; } +static inline NV_STATUS RmRpcSimAddDisplayContextDma(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcSimUpdateDisplayContextDma(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcSimDeleteDisplayContextDma(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcSimUpdateDispChannelInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcHwResourceAlloc(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcHwResourceFree(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcHwResourceBind(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcPerfGetPstateInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcPerfGetCurrentPstate(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcPerfGetVirtualPstateInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } + +static inline NV_STATUS RmRpcSimEscapeRead(OBJGPU *pGpu, const char *path, NvU32 index, + NvU32 count, NvU32 *data) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcSimEscapeWrite(OBJGPU *pGpu, const char *path, NvU32 index, + NvU32 count, NvU32 data) { return NV_ERR_NOT_SUPPORTED; } + +static NV_INLINE NV_STATUS RmRpcSetGuestSystemInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } + +/*! + * Defines the size of the GSP sim access buffer. + */ +#define GSP_SIM_ACCESS_BUFFER_SIZE 0x4000 + +/*! + * Defines the structure used to pass SimRead data from Kernel to Physical RM. + */ +typedef struct SimAccessBuffer +{ + volatile NvU32 data[GSP_SIM_ACCESS_BUFFER_SIZE]; + volatile NvU32 seq; +} SimAccessBuffer; + +#endif // NV_RM_STUB_RPC + +#endif // __vgpu_dev_nv_rpc_h__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_global_enums.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_global_enums.h new file mode 100644 index 0000000..b5e0ec8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_global_enums.h @@ -0,0 +1,238 @@ +#ifndef _RPC_GLOBAL_ENUMS_H_ +#define _RPC_GLOBAL_ENUMS_H_ + +#ifndef X +# define X(UNIT, RPC) NV_VGPU_MSG_FUNCTION_##RPC, +# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + X(RM, NOP) // 0 + X(RM, SET_GUEST_SYSTEM_INFO) // 1 + X(RM, ALLOC_ROOT) // 2 + X(RM, ALLOC_DEVICE) // 3 deprecated + X(RM, ALLOC_MEMORY) // 4 + X(RM, ALLOC_CTX_DMA) // 5 + X(RM, ALLOC_CHANNEL_DMA) // 6 + X(RM, MAP_MEMORY) // 7 + X(RM, BIND_CTX_DMA) // 8 deprecated + X(RM, ALLOC_OBJECT) // 9 + X(RM, FREE) //10 + X(RM, LOG) //11 + X(RM, ALLOC_VIDMEM) //12 + X(RM, UNMAP_MEMORY) //13 + X(RM, MAP_MEMORY_DMA) //14 + X(RM, UNMAP_MEMORY_DMA) //15 + X(RM, GET_EDID) //16 + X(RM, ALLOC_DISP_CHANNEL) //17 + X(RM, ALLOC_DISP_OBJECT) //18 + X(RM, ALLOC_SUBDEVICE) //19 + X(RM, ALLOC_DYNAMIC_MEMORY) //20 + X(RM, DUP_OBJECT) //21 + X(RM, IDLE_CHANNELS) //22 + X(RM, ALLOC_EVENT) //23 + X(RM, SEND_EVENT) //24 + X(RM, REMAPPER_CONTROL) //25 deprecated + X(RM, DMA_CONTROL) //26 + X(RM, DMA_FILL_PTE_MEM) //27 + X(RM, MANAGE_HW_RESOURCE) //28 + X(RM, BIND_ARBITRARY_CTX_DMA) //29 deprecated + X(RM, CREATE_FB_SEGMENT) //30 + X(RM, DESTROY_FB_SEGMENT) //31 + X(RM, ALLOC_SHARE_DEVICE) //32 + X(RM, DEFERRED_API_CONTROL) //33 + X(RM, REMOVE_DEFERRED_API) //34 + X(RM, SIM_ESCAPE_READ) //35 + X(RM, SIM_ESCAPE_WRITE) //36 + X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA) //37 + X(RM, FREE_VIDMEM_VIRT) //38 + X(RM, PERF_GET_PSTATE_INFO) //39 deprecated for vGPU, used by GSP + X(RM, PERF_GET_PERFMON_SAMPLE) //40 + X(RM, PERF_GET_VIRTUAL_PSTATE_INFO) //41 deprecated + X(RM, PERF_GET_LEVEL_INFO) //42 + X(RM, MAP_SEMA_MEMORY) //43 + X(RM, UNMAP_SEMA_MEMORY) //44 + X(RM, SET_SURFACE_PROPERTIES) //45 + X(RM, CLEANUP_SURFACE) //46 + X(RM, UNLOADING_GUEST_DRIVER) //47 + X(RM, TDR_SET_TIMEOUT_STATE) //48 + X(RM, SWITCH_TO_VGA) //49 + X(RM, GPU_EXEC_REG_OPS) //50 + X(RM, GET_STATIC_INFO) //51 + X(RM, ALLOC_VIRTMEM) //52 + X(RM, UPDATE_PDE_2) //53 + X(RM, SET_PAGE_DIRECTORY) //54 + X(RM, GET_STATIC_PSTATE_INFO) //55 + X(RM, TRANSLATE_GUEST_GPU_PTES) //56 + X(RM, RESERVED_57) //57 + X(RM, RESET_CURRENT_GR_CONTEXT) //58 + X(RM, SET_SEMA_MEM_VALIDATION_STATE) //59 + X(RM, GET_ENGINE_UTILIZATION) //60 + X(RM, UPDATE_GPU_PDES) //61 + X(RM, GET_ENCODER_CAPACITY) //62 + X(RM, VGPU_PF_REG_READ32) //63 + X(RM, SET_GUEST_SYSTEM_INFO_EXT) //64 + X(GSP, GET_GSP_STATIC_INFO) //65 + X(RM, RMFS_INIT) //66 + X(RM, RMFS_CLOSE_QUEUE) //67 + X(RM, RMFS_CLEANUP) //68 + X(RM, RMFS_TEST) //69 + X(RM, UPDATE_BAR_PDE) //70 + X(RM, CONTINUATION_RECORD) //71 + X(RM, GSP_SET_SYSTEM_INFO) //72 + X(RM, SET_REGISTRY) //73 + X(GSP, GSP_INIT_POST_OBJGPU) //74 deprecated + X(RM, SUBDEV_EVENT_SET_NOTIFICATION) //75 deprecated + X(GSP, GSP_RM_CONTROL) //76 + X(RM, GET_STATIC_INFO2) //77 + X(RM, DUMP_PROTOBUF_COMPONENT) //78 + X(RM, UNSET_PAGE_DIRECTORY) //79 + X(RM, GET_CONSOLIDATED_STATIC_INFO) //80 + X(RM, GMMU_REGISTER_FAULT_BUFFER) //81 deprecated + X(RM, GMMU_UNREGISTER_FAULT_BUFFER) //82 deprecated + X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER) //83 deprecated + X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER) //84 deprecated + X(RM, CTRL_SET_VGPU_FB_USAGE) //85 + X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO) //86 + X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO) //87 + X(RM, CTRL_RESET_CHANNEL) //88 + X(RM, CTRL_RESET_ISOLATED_CHANNEL) //89 + X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT) //90 + X(RM, CTRL_CLK_GET_EXTENDED_INFO) //91 + X(RM, CTRL_PERF_BOOST) //92 + X(RM, CTRL_PERF_VPSTATES_GET_CONTROL) //93 + X(RM, CTRL_GET_ZBC_CLEAR_TABLE) //94 + X(RM, CTRL_SET_ZBC_COLOR_CLEAR) //95 + X(RM, CTRL_SET_ZBC_DEPTH_CLEAR) //96 + X(RM, CTRL_GPFIFO_SCHEDULE) //97 + X(RM, CTRL_SET_TIMESLICE) //98 + X(RM, CTRL_PREEMPT) //99 + X(RM, CTRL_FIFO_DISABLE_CHANNELS) //100 + X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL) //101 + X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL) //102 + X(GSP, GSP_RM_ALLOC) //103 + X(RM, CTRL_GET_P2P_CAPS_V2) //104 + X(RM, CTRL_CIPHER_AES_ENCRYPT) //105 + X(RM, CTRL_CIPHER_SESSION_KEY) //106 + X(RM, CTRL_CIPHER_SESSION_KEY_STATUS) //107 + X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES) //108 + X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES) //109 + X(RM, CTRL_DBG_SET_EXCEPTION_MASK) //110 + X(RM, CTRL_GPU_PROMOTE_CTX) //111 + X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND) //112 + X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE) //113 + X(RM, CTRL_GR_CTXSW_ZCULL_BIND) //114 + X(RM, CTRL_GPU_INITIALIZE_CTX) //115 + X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES) //116 + X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT) //117 + X(RM, CTRL_GET_LATEST_ECC_ADDRESSES) //118 + X(RM, CTRL_MC_SERVICE_INTERRUPTS) //119 + X(RM, CTRL_DMA_SET_DEFAULT_VASPACE) //120 + X(RM, CTRL_GET_CE_PCE_MASK) //121 + X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY) //122 + X(RM, CTRL_GET_NVLINK_PEER_ID_MASK) //123 + X(RM, CTRL_GET_NVLINK_STATUS) //124 + X(RM, CTRL_GET_P2P_CAPS) //125 + X(RM, CTRL_GET_P2P_CAPS_MATRIX) //126 + X(RM, RESERVED_0) //127 + X(RM, CTRL_RESERVE_PM_AREA_SMPC) //128 + X(RM, CTRL_RESERVE_HWPM_LEGACY) //129 + X(RM, CTRL_B0CC_EXEC_REG_OPS) //130 + X(RM, CTRL_BIND_PM_RESOURCES) //131 + X(RM, CTRL_DBG_SUSPEND_CONTEXT) //132 + X(RM, CTRL_DBG_RESUME_CONTEXT) //133 + X(RM, CTRL_DBG_EXEC_REG_OPS) //134 + X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG) //135 + X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE) //136 + X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE) //137 + X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG) //138 + X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE) //139 + X(RM, CTRL_ALLOC_PMA_STREAM) //140 + X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT) //141 + X(RM, CTRL_FB_GET_INFO_V2) //142 + X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES) //143 + X(RM, CTRL_GR_GET_CTX_BUFFER_INFO) //144 + X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES) //145 + X(RM, CTRL_GPU_EVICT_CTX) //146 + X(RM, CTRL_FB_GET_FS_INFO) //147 + X(RM, CTRL_GRMGR_GET_GR_FS_INFO) //148 + X(RM, CTRL_STOP_CHANNEL) //149 + X(RM, CTRL_GR_PC_SAMPLING_MODE) //150 + X(RM, CTRL_PERF_RATED_TDP_GET_STATUS) //151 + X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL) //152 + X(RM, CTRL_FREE_PMA_STREAM) //153 + X(RM, CTRL_TIMER_SET_GR_TICK_FREQ) //154 + X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB) //155 + X(RM, GET_CONSOLIDATED_GR_STATIC_INFO) //156 + X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP) //157 + X(RM, CTRL_GR_GET_TPC_PARTITION_MODE) //158 + X(RM, CTRL_GR_SET_TPC_PARTITION_MODE) //159 + X(UVM, UVM_PAGING_CHANNEL_ALLOCATE) //160 + X(UVM, UVM_PAGING_CHANNEL_DESTROY) //161 + X(UVM, UVM_PAGING_CHANNEL_MAP) //162 + X(UVM, UVM_PAGING_CHANNEL_UNMAP) //163 + X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM) //164 + X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES) //165 + X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION) //166 + X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL) //167 + X(RM, DCE_RM_INIT) //168 + X(RM, REGISTER_VIRTUAL_EVENT_BUFFER) //169 + X(RM, CTRL_EVENT_BUFFER_UPDATE_GET) //170 + X(RM, GET_PLCABLE_ADDRESS_KIND) //171 + X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2) //172 + X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM) //173 + X(RM, CTRL_GET_MMU_DEBUG_MODE) //174 + X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS) //175 + X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE) //176 + X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO) //177 + X(RM, DISABLE_CHANNELS) //178 + X(RM, CTRL_FABRIC_MEMORY_DESCRIBE) //179 + X(RM, CTRL_FABRIC_MEM_STATS) //180 + X(RM, SAVE_HIBERNATION_DATA) //181 + X(RM, RESTORE_HIBERNATION_DATA) //182 + X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED) //183 + X(RM, CTRL_EXEC_PARTITIONS_CREATE) //184 + X(RM, CTRL_EXEC_PARTITIONS_DELETE) //185 + X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN) //186 + X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX) //187 + X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION) //188 + X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK) //189 + X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER) //190 + X(RM, NUM_FUNCTIONS) //END +#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +}; +# undef X +# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +#endif + +// RPC Events. Used by GSP-RM. +#ifndef E +# define E(RPC) NV_VGPU_MSG_EVENT_##RPC, +# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + E(FIRST_EVENT = 0x1000) // 0x1000 + E(GSP_INIT_DONE) // 0x1001 + E(GSP_RUN_CPU_SEQUENCER) // 0x1002 + E(POST_EVENT) // 0x1003 + E(RC_TRIGGERED) // 0x1004 + E(MMU_FAULT_QUEUED) // 0x1005 + E(OS_ERROR_LOG) // 0x1006 + E(RG_LINE_INTR) // 0x1007 + E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008 + E(SIM_READ) // 0x1009 + E(SIM_WRITE) // 0x100a + E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b + E(UCODE_LIBOS_PRINT) // 0x100c + E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d + E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e + E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f + E(VGPU_CONFIG) // 0x1010 + E(DISPLAY_MODESET) // 0x1011 + E(NUM_EVENTS) // END +#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +}; +# undef E +# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +#endif + +#endif /*_RPC_GLOBAL_ENUMS_H_*/ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_hal_stubs.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_hal_stubs.h new file mode 100644 index 0000000..60667b3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_hal_stubs.h @@ -0,0 +1,66 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RPC_HAL_STUBS_H_ +#define _RPC_HAL_STUBS_H_ + +// The file replaces g_rpc_hal.h to provide stubs for rpc HAL functions when +// Rmconfig Module RPC is disabled, while the BASE_DEFINITION for RPC object +// is not needed. Thus making it a noop. +#define __RPC_OBJECT_BASE_DEFINITION + +static inline NV_STATUS rpcAllocShareDevice_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcAllocMemory_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcAllocCtxDma_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcAllocChannelDma_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcAllocObject_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcMapMemoryDma_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcUnmapMemoryDma_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcAllocSubdevice_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcDupObject_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcIdleChannels_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcAllocEvent_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcDmaControl_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcFree_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcPerfGetLevelInfo_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcUnloadingGuestDriver_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcGpuExecRegOps_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcGetStaticInfo_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcGetStaticInfo2_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcUpdateBarPde_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcSetPageDirectory_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcUnsetPageDirectory_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcUpdateGpuPdes_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcVgpuPfRegRead32_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcGetGspStaticInfo_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcSetMemoryInfo_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcSetRegistry_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcGspInitPostObjgpu_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcDumpProtobufComponent_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcRmfsInit_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcRmfsCloseQueue_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcRmfsCleanup_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS rpcRmfsTest_HAL(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } + +#endif // _RPC_HAL_STUBS_H_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_headers.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_headers.h new file mode 100644 index 0000000..edc37a7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_headers.h @@ -0,0 +1,230 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __vgpu_rpc_nv_headers_h__ +#define __vgpu_rpc_nv_headers_h__ + +#include "ctrl/ctrl0080/ctrl0080perf.h" +#include "ctrl/ctrl2080/ctrl2080perf.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" +#include "nvstatus.h" + +#define MAX_GPC_COUNT 32 + +/* + * Maximum number of RegOps that can be accommodated within one RPC call + * due to RPC message buffer size being limited to 4k + */ +#define VGPU_MAX_REGOPS_PER_RPC 100 + +#define VGPU_RESERVED_HANDLE_BASE 0xCAF3F000 +#define VGPU_RESERVED_HANDLE_RANGE 0x1000 + +#define VGPU_CALC_PARAM_OFFSET(prev_offset, prev_params) (prev_offset + NV_ALIGN_UP(sizeof(prev_params), sizeof(NvU32))) + +/* + * Message header (in buffer addressed by ring entry) + * + * If message is invalid (bad length or signature), signature and length + * are forced to be valid (if in range of descriptor) and result is set to + * NV_VGPU_RESULT_INVALID_MESSAGE_FORMAT. Otherwise, signature, length, and + * function are always unchanged and result is always set. + * + * The function message header, if defined, immediately follows the main message + * header. + */ +#define NV_VGPU_MSG_HEADER_VERSION_MAJOR 31:24 /* R---D */ +#define NV_VGPU_MSG_HEADER_VERSION_MINOR 23:16 /* R---D */ +#define NV_VGPU_MSG_HEADER_VERSION_MAJOR_TOT 0x00000003 /* R---D */ +#define NV_VGPU_MSG_HEADER_VERSION_MINOR_TOT 0x00000000 /* R---D */ +/* signature must equal valid value */ +#define NV_VGPU_MSG_SIGNATURE_VALID 0x43505256 /* RW--V */ + +#include "rpc_global_enums.h" + +/* result code */ +/* codes below 0xFF000000 must match exactly the NV_STATUS codes in nvos.h */ +#define NV_VGPU_MSG_RESULT__RM NV_ERR_GENERIC:0x00000000 /* RW--D */ +#define NV_VGPU_MSG_RESULT_SUCCESS NV_OK +#define NV_VGPU_MSG_RESULT_CARD_NOT_PRESENT NV_ERR_CARD_NOT_PRESENT +#define NV_VGPU_MSG_RESULT_DUAL_LINK_INUSE NV_ERR_DUAL_LINK_INUSE +#define NV_VGPU_MSG_RESULT_GENERIC NV_ERR_GENERIC +#define NV_VGPU_MSG_RESULT_GPU_NOT_FULL_POWER NV_ERR_GPU_NOT_FULL_POWER +#define NV_VGPU_MSG_RESULT_IN_USE NV_ERR_IN_USE +#define NV_VGPU_MSG_RESULT_INSUFFICIENT_RESOURCES NV_ERR_INSUFFICIENT_RESOURCES +#define NV_VGPU_MSG_RESULT_INVALID_ACCESS_TYPE NV_ERR_INVALID_ACCESS_TYPE +#define NV_VGPU_MSG_RESULT_INVALID_ARGUMENT NV_ERR_INVALID_ARGUMENT +#define NV_VGPU_MSG_RESULT_INVALID_BASE NV_ERR_INVALID_BASE +#define NV_VGPU_MSG_RESULT_INVALID_CHANNEL NV_ERR_INVALID_CHANNEL +#define NV_VGPU_MSG_RESULT_INVALID_CLASS NV_ERR_INVALID_CLASS +#define NV_VGPU_MSG_RESULT_INVALID_CLIENT NV_ERR_INVALID_CLIENT +#define NV_VGPU_MSG_RESULT_INVALID_COMMAND NV_ERR_INVALID_COMMAND +#define NV_VGPU_MSG_RESULT_INVALID_DATA NV_ERR_INVALID_DATA +#define NV_VGPU_MSG_RESULT_INVALID_DEVICE NV_ERR_INVALID_DEVICE +#define NV_VGPU_MSG_RESULT_INVALID_DMA_SPECIFIER NV_ERR_INVALID_DMA_SPECIFIER +#define NV_VGPU_MSG_RESULT_INVALID_EVENT NV_ERR_INVALID_EVENT +#define NV_VGPU_MSG_RESULT_INVALID_FLAGS NV_ERR_INVALID_FLAGS +#define NV_VGPU_MSG_RESULT_INVALID_FUNCTION NV_ERR_INVALID_FUNCTION +#define NV_VGPU_MSG_RESULT_INVALID_HEAP NV_ERR_INVALID_HEAP +#define NV_VGPU_MSG_RESULT_INVALID_INDEX NV_ERR_INVALID_INDEX +#define NV_VGPU_MSG_RESULT_INVALID_LIMIT NV_ERR_INVALID_LIMIT +#define NV_VGPU_MSG_RESULT_INVALID_METHOD NV_ERR_INVALID_METHOD +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_BUFFER NV_ERR_INVALID_OBJECT_BUFFER +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_ERROR NV_ERR_INVALID_OBJECT +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_HANDLE NV_ERR_INVALID_OBJECT_HANDLE +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_NEW NV_ERR_INVALID_OBJECT_NEW +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_OLD NV_ERR_INVALID_OBJECT_OLD +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_PARENT NV_ERR_INVALID_OBJECT_PARENT +#define NV_VGPU_MSG_RESULT_INVALID_OFFSET NV_ERR_INVALID_OFFSET +#define NV_VGPU_MSG_RESULT_INVALID_OWNER NV_ERR_INVALID_OWNER +#define NV_VGPU_MSG_RESULT_INVALID_PARAM_STRUCT NV_ERR_INVALID_PARAM_STRUCT +#define NV_VGPU_MSG_RESULT_INVALID_PARAMETER NV_ERR_INVALID_PARAMETER +#define NV_VGPU_MSG_RESULT_INVALID_POINTER NV_ERR_INVALID_POINTER +#define NV_VGPU_MSG_RESULT_INVALID_REGISTRY_KEY NV_ERR_INVALID_REGISTRY_KEY +#define NV_VGPU_MSG_RESULT_INVALID_STATE NV_ERR_INVALID_STATE +#define NV_VGPU_MSG_RESULT_INVALID_STRING_LENGTH NV_ERR_INVALID_STRING_LENGTH +#define NV_VGPU_MSG_RESULT_INVALID_XLATE NV_ERR_INVALID_XLATE +#define NV_VGPU_MSG_RESULT_IRQ_NOT_FIRING NV_ERR_IRQ_NOT_FIRING +#define NV_VGPU_MSG_RESULT_MULTIPLE_MEMORY_TYPES NV_ERR_MULTIPLE_MEMORY_TYPES +#define NV_VGPU_MSG_RESULT_NOT_SUPPORTED NV_ERR_NOT_SUPPORTED +#define NV_VGPU_MSG_RESULT_OPERATING_SYSTEM NV_ERR_OPERATING_SYSTEM +#define NV_VGPU_MSG_RESULT_PROTECTION_FAULT NV_ERR_PROTECTION_FAULT +#define NV_VGPU_MSG_RESULT_TIMEOUT NV_ERR_TIMEOUT +#define NV_VGPU_MSG_RESULT_TOO_MANY_PRIMARIES NV_ERR_TOO_MANY_PRIMARIES +#define NV_VGPU_MSG_RESULT_IRQ_EDGE_TRIGGERED NV_ERR_IRQ_EDGE_TRIGGERED +#define NV_VGPU_MSG_RESULT_GUEST_HOST_DRIVER_MISMATCH NV_ERR_LIB_RM_VERSION_MISMATCH + +/* + * codes above 0xFF000000 and below 0xFF100000 must match one-for-one + * the vmiop_error_t codes in vmioplugin.h, with 0xFF000000 added. + */ +#define NV_VGPU_MSG_RESULT__VMIOP 0xFF000007:0xFF000000 /* RW--D */ +#define NV_VGPU_MSG_RESULT_VMIOP_INVAL 0xFF000001 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_RESOURCE 0xFF000002 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_RANGE 0xFF000003 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_READ_ONLY 0xFF000004 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_NOT_FOUND 0xFF000005 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_NO_ADDRESS_SPACE 0xFF000006 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_TIMEOUT 0xFF000007 /* RW--V */ +/* RPC-specific error codes */ +#define NV_VGPU_MSG_RESULT__RPC 0xFF100007:0xFF100000 /* RW--D */ +#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION 0xFF100001 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_INVALID_MESSAGE_FORMAT 0xFF100002 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_HANDLE_NOT_FOUND 0xFF100003 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_HANDLE_EXISTS 0xFF100004 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_RM_ERROR 0xFF100005 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_VMIOP_ERROR 0xFF100006 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_RESERVED_HANDLE 0xFF100007 /* RW--V */ +/* RPC-specific code in result for incomplete request */ +#define NV_VGPU_MSG_RESULT_RPC_PENDING 0xFFFFFFFF /* RW--V */ +/* shared union field */ +#define NV_VGPU_MSG_UNION_INIT 0x00000000 /* RW--V */ + +/* + * common PTEDESC message defines (used w/ ALLOC_MEMORY, ALLOC_VIDMEM, FILL_PTE_MEM) + */ +#define NV_VGPU_PTEDESC_INIT 0x00000000 /* RWI-V */ +#define NV_VGPU_PTEDESC__PROD 0x00000000 /* RW--V */ +#define NV_VGPU_PTEDESC_IDR_NONE 0x00000000 /* RW--V */ +#define NV_VGPU_PTEDESC_IDR_SINGLE 0x00000001 /* RW--V */ +#define NV_VGPU_PTEDESC_IDR_DOUBLE 0x00000002 /* RW--V */ +#define NV_VGPU_PTEDESC_IDR_TRIPLE 0x00000003 /* RW--V */ + +#define NV_VGPU_PTE_PAGE_SIZE 0x1000 /* R---V */ +#define NV_VGPU_PTE_SIZE 4 /* R---V */ +#define NV_VGPU_PTE_INDEX_SHIFT 10 /* R---V */ +#define NV_VGPU_PTE_INDEX_MASK 0x3FF /* R---V */ + +#define NV_VGPU_PTE_64_PAGE_SIZE 0x1000 /* R---V */ +#define NV_VGPU_PTE_64_SIZE 8 /* R---V */ +#define NV_VGPU_PTE_64_INDEX_SHIFT 9 /* R---V */ +#define NV_VGPU_PTE_64_INDEX_MASK 0x1FF /* R---V */ + +/* + * LOG message + */ +#define NV_VGPU_LOG_LEVEL_FATAL 0x00000000 /* RW--V */ +#define NV_VGPU_LOG_LEVEL_ERROR 0x00000001 /* RW--V */ +#define NV_VGPU_LOG_LEVEL_NOTICE 0x00000002 /* RW--V */ +#define NV_VGPU_LOG_LEVEL_STATUS 0x00000003 /* RW--V */ +#define NV_VGPU_LOG_LEVEL_DEBUG 0x00000004 /* RW--V */ + +/* + * Enums specifying the BAR number that we are going to update its PDE + */ +typedef enum +{ + NV_RPC_UPDATE_PDE_BAR_1, + NV_RPC_UPDATE_PDE_BAR_2, + NV_RPC_UPDATE_PDE_BAR_INVALID, +} NV_RPC_UPDATE_PDE_BAR_TYPE; + +/* + * UVM method stream guest pages operation + */ +typedef enum +{ + NV_RPC_GUEST_PAGE_MAP, + NV_RPC_GUEST_PAGE_UNMAP, +} NV_RPC_GUEST_PAGE_OPERATION; + +/* + * UVM method stream guest page size + */ +typedef enum +{ + NV_RPC_GUEST_PAGE_SIZE_4K, + NV_RPC_GUEST_PAGE_SIZE_UNSUPPORTED, +} NV_RPC_GUEST_PAGE_SIZE; + +/* + * UVM paging channel VASPACE operation + */ +typedef enum +{ + UVM_PAGING_CHANNEL_VASPACE_ALLOC, + UVM_PAGING_CHANNEL_VASPACE_FREE, +} UVM_PAGING_CHANNEL_VASPACE_OPERATION; + +typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS +{ + NvU32 headIndex; + NvU32 maxHResolution; + NvU32 maxVResolution; +} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS; + +typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS +{ + NvU32 numHeads; + NvU32 maxNumHeads; +} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS; + + +/* + * Maximum guest pages that can be mapped for UVM method stream + */ +#define UVM_METHOD_STREAM_MAX_GUEST_PAGES_v1C_05 500 + +#define PMA_SCRUBBER_SHARED_BUFFER_MAX_GUEST_PAGES_v1F_0C 500 + +#endif // __vgpu_rpc_nv_headers_h__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_vgpu.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_vgpu.h new file mode 100644 index 0000000..f9789da --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_vgpu.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __vgpu_dev_nv_rpc_vgpu_h__ +#define __vgpu_dev_nv_rpc_vgpu_h__ + +static NV_INLINE void NV_RM_RPC_ALLOC_LOCAL_USER(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_ALLOC_VIDMEM(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_ALLOC_VIRTMEM(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_MAP_MEMORY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_UNMAP_MEMORY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_READ_EDID(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_DMA_FILL_PTE_MEM(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_CREATE_FB_SEGMENT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_DESTROY_FB_SEGMENT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_DEFERRED_API_CONTROL(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_REMOVE_DEFERRED_API(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_FREE_VIDMEM_VIRT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_LOG(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SET_GUEST_SYSTEM_INFO_EXT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_ENGINE_UTILIZATION(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_MAP_SEMA_MEMORY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_UNMAP_SEMA_MEMORY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SET_SURFACE_PROPERTIES(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_CLEANUP_SURFACE(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SWITCH_TO_VGA(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_TDR_SET_TIMEOUT_STATE(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_CONSOLIDATED_STATIC_INFO(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_CONSOLIDATED_GR_STATIC_INFO(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_STATIC_PSTATE_INFO(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_UPDATE_PDE_2(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_TRANSLATE_GUEST_GPU_PTES(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SET_SEMA_MEM_VALIDATION_STATE(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_RESET_CURRENT_GR_CONTEXT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_ENCODER_CAPACITY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_STATIC_INFO2(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_ALLOC_CONTEXT_DMA(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_PLCABLE_ADDRESS_KIND(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_UPDATE_GPU_PDES(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_DISABLE_CHANNELS(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SAVE_HIBERNATION_DATA(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_RESTORE_HIBERNATION_DATA(OBJGPU *pGpu, ...) { } + +#endif // __vgpu_dev_nv_rpc_vgpu_h__ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/nv-kernel.ld b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/nv-kernel.ld new file mode 100644 index 0000000..89ce366 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/nv-kernel.ld @@ -0,0 +1,35 @@ +/* + * resman linker script + * + * Linking nv-kernel.o has several problems: + * + * (1) We build with '-ffunction-sections -fdata-sections' to put each + * function and data into separate ELF sections, so that the linker + * can distinguish separate functions and garbage collect dead code + * ('--gc-sections'). The linker is supposed to then merge sections + * together (e.g., all the ".text.*" into ".text", all the ".data.*" + * sections into ".data"). The linker doesn't seem to do this when + * linking a relocatable object file. + * + * (2) g++ puts inline functions, vtables, template functions, etc, in + * separate ".gnu.linkonce.*" sections. Duplicates are supposed to get + * collapsed at link time. The linker doesn't seem to do this when + * linking a relocatable object file. + * + * Resolve both of these problems by defining our own naive linker + * script to do the merging described above. + */ + +SECTIONS { + + .text : { *(.text) *(.text.*) *(.gnu.linkonce.t.*) } + + .data : { *(.data) *(.data.*) } + + .rodata : { *(.rodata) *(.rodata.*) *(.gnu.linkonce.r.*) } + + .bss : { *(.bss) *(.bss.*) } + + /* The rest of the sections ("orphaned sections") will just be copied from + the input to the output */ +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hal.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hal.c new file mode 100644 index 0000000..5dccdb4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hal.c @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/system.h" +#include "core/hal_mgr.h" +#include "core/hal.h" + +#include "g_hal_private.h" + +PMODULEDESCRIPTOR +objhalGetModuleDescriptor_IMPL(OBJHAL *thisHal) +{ + return &thisHal->moduleDescriptor; +} + +// +// registerHalModule() is referred by functions in generated file g_hal_private.h +// So, placed it here instead of gt_hal_register.h to avoid duplications of this +// function as g_hal_private.h is included by several files +// +NV_STATUS +registerHalModule(NvU32 halImpl, const HAL_IFACE_SETUP *pHalSetIfaces) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJHALMGR *pHalMgr = SYS_GET_HALMGR(pSys); + OBJHAL *pHal; + PMODULEDESCRIPTOR pMod; + NV_STATUS rmStatus; + + // create a HAL object + rmStatus = halmgrCreateHal(pHalMgr, halImpl); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + // retrieve the HAL object + pHal = HALMGR_GET_HAL(pHalMgr, halImpl); + NV_ASSERT(pHal); + + // init the iface descriptor lists + pMod = objhalGetModuleDescriptor(pHal); + + // point to rmconfig structure that can init our engines' interfaces + pMod->pHalSetIfaces = pHalSetIfaces; + + return NV_OK; +} + +// Helper to install IP_VERSIONS function pointers into pObj->hal fn ptr table +// based on IP_VER register value. +// Uses tables and code in g_FOO_private.h (generated by rmconfig) + +NV_STATUS ipVersionsSetupHal +( + OBJGPU *pGpu, + void * pDynamic_v, // eg: pDisp + IGrp_ipVersions_getInfo getInfoFn // eg: disp_iGrp_ipVersions_getInfo() +) +{ + IGRP_IP_VERSIONS_TABLE_INFO info; + const IGRP_IP_VERSIONS_ENTRY *pVer; + const IGRP_IP_VERSION_RANGE *pRange; + NV_STATUS rmStatus; + Dynamic *pDynamic = (Dynamic*)pDynamic_v; + Object *pObj = dynamicCast(pDynamic, Object); + + // nothing to do if IP_VERSION is invalid + if ( ! IsIPVersionValid(pObj)) + return NV_OK; + + info.pGpu = pGpu; + info.pDynamic = pDynamic; + + // call into the hal to finish filling in the table + rmStatus = getInfoFn(&info); + if (rmStatus != NV_OK) + return rmStatus; + + // perform setup for *all* matching variants + for (pVer = info.pTable; pVer < info.pTable + info.numEntries; pVer++) + { + // Each version has 1 or more "version ranges". + // Invoke this version's setup fn if any of it's ranges match. + for (pRange = pVer->pRanges; pRange < (pVer->pRanges + pVer->numRanges); pRange++) + { + if ((IPVersion(pObj) >= pRange->v0) && (IPVersion(pObj) <= pRange->v1)) + { + pVer->ifacesInstallFn(&info); + break; + } + } + } + + // invoke rmconfig-generated wrapup function to handle any overrides & verification + rmStatus = info.ifacesWrapupFn(&info); + + return rmStatus; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hals_all.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hals_all.c new file mode 100644 index 0000000..fb28b82 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hals_all.c @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* * +* Module: hals_all.c * +* Hal interface init routines for files generated by rmconfig * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/eng_desc.h" + +// +// These defines affect what we see in halgen generated headers. +// +// If RMCFG_ENGINE_SETUP is not already defined, then setup for +// a monolithic hal. +// +// The per-gpu-family hal setups #include this file with the RMCFG +// setup defines already defined to pull in just the interfaces +// needed for that gpu family. +// + +#if ! defined(RMCFG_ENGINE_SETUP) + +# define RMCFG_ENGINE_SETUP 1 // pull in per-gpu engine interface's + +# define RMCFG_HAL_SETUP_ALL 1 // monolithic - ALL configured gpus' support in this file +# define RMCFG_HAL_SUPPORT_ALL 1 // not required, but keeps us honest + +#endif // ! defined RMCFG_ENGINE_SETUP + +// Pull in generated code to setup each engine's hal interfaces for each gpu +#include "g_hal_register.h" +#include "g_hal_private.h" + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/info_block.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/info_block.c new file mode 100644 index 0000000..25ccd95 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/info_block.c @@ -0,0 +1,171 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * @file + * @brief Implementation for info block utility + */ + +#include "os/os.h" +#include "core/info_block.h" + +// +// getInfoPtr +// +// Return pointer to HAL implementation-specific private data info block. +// +void * +getInfoPtr(PENG_INFO_LINK_NODE head, NvU32 dataId) +{ + PENG_INFO_LINK_NODE curNode = head; + + while (curNode && (curNode->dataId != dataId)) + curNode = curNode->next; + + if (curNode == NULL) + return NULL; + + return curNode->infoBlock; +} + +// +// testInfoPtr +// +// Returns info weather HAL implementation-specific private data info block is allocated. +// +NvBool +testInfoPtr(PENG_INFO_LINK_NODE head, NvU32 dataId) +{ + PENG_INFO_LINK_NODE curNode = head; + + while (curNode && (curNode->dataId != dataId)) + curNode = curNode->next; + + if (curNode == NULL) + return NV_FALSE; + + return NV_TRUE; +} + +// +// createLinkNode +// +// Allocate and initialize new info block. +// +static PENG_INFO_LINK_NODE +createLinkNode(NvU32 dataId, NvU32 size) +{ + PENG_INFO_LINK_NODE newNode; + NV_STATUS rmStatus; + + newNode = portMemAllocNonPaged(sizeof(ENG_INFO_LINK_NODE)); + if (newNode == NULL) + { + rmStatus = NV_ERR_NO_MEMORY; + NV_ASSERT(rmStatus == NV_OK); + return NULL; + } + + portMemSet(newNode, 0, sizeof(ENG_INFO_LINK_NODE)); + + newNode->infoBlock = portMemAllocNonPaged(size); + if (newNode->infoBlock == NULL) + { + rmStatus = NV_ERR_NO_MEMORY; + portMemFree(newNode); + NV_ASSERT(rmStatus == NV_OK); + return NULL; + } + + portMemSet(newNode->infoBlock, 0, size); + + newNode->dataId = dataId; + + return newNode; +} + +// +// addInfoPtr +// +// Create new HAL privata data block and add it to specified list. +// +void * +addInfoPtr(PENG_INFO_LINK_NODE *head, NvU32 dataId, NvU32 size) +{ + PENG_INFO_LINK_NODE curNode = *head; + PENG_INFO_LINK_NODE newNode = createLinkNode(dataId, size); + + if (newNode == NULL) + return NULL; + + while (curNode && curNode->next) + curNode = curNode->next; + + if (!curNode) + *head = newNode; + else + curNode->next = newNode; + + return newNode->infoBlock; +} + +// +// deleteInfoPtr +// +// Destroy HAL privata data block and remove it from specified list. +// +void +deleteInfoPtr(PENG_INFO_LINK_NODE *head, NvU32 dataId) +{ + PENG_INFO_LINK_NODE curNode = *head; + + if (!curNode) + return ; + + // check list head + if (curNode->dataId == dataId) + { + *head = curNode->next; + NV_ASSERT(curNode->infoBlock); + portMemFree(curNode->infoBlock); + portMemFree(curNode); + return ; + } + + // search for it + while (curNode->next && (curNode->next->dataId != dataId)) + curNode = curNode->next; + + if (curNode->next) + { + PENG_INFO_LINK_NODE delNode; + + delNode = curNode->next; + curNode->next = curNode->next->next; + NV_ASSERT(delNode->infoBlock); + portMemFree(delNode->infoBlock); + portMemFree(delNode); + } + + return ; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal_mgr.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal_mgr.c new file mode 100644 index 0000000..65b7272 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal_mgr.c @@ -0,0 +1,229 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/hal_mgr.h" +#include "core/hal.h" + +#include "g_hal_archimpl.h" + +// NOTE: string arguments only get used when NV_PRINTF_STRINGS_ALLOWED is true. +#if NV_PRINTF_STRINGS_ALLOWED +static const char *_halmgrGetStringRepForHalImpl(OBJHALMGR *pHalMgr, HAL_IMPLEMENTATION); +#endif + +NV_STATUS +halmgrConstruct_IMPL +( + OBJHALMGR *pHalMgr +) +{ + HAL_IMPLEMENTATION i; + + // + // Make sure all the possible handles to the Hal Objects + // have been zeroed out. Also initialize the implementation + // and public id's in the hal descriptor list. + // + for (i = 0; i < HAL_IMPL_MAXIMUM; i++) + pHalMgr->pHalList[i] = NULL; + + return NV_OK; +} + +void +halmgrDestruct_IMPL +( + OBJHALMGR *pHalMgr +) +{ + NvU32 i; + + for (i = 0; i < HAL_IMPL_MAXIMUM; i++) + { + objDelete(pHalMgr->pHalList[i]); + pHalMgr->pHalList[i] = NULL; + } +} + +NV_STATUS +halmgrCreateHal_IMPL +( + OBJHALMGR *pHalMgr, + NvU32 halImpl +) +{ + OBJHAL *pHal; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(halImpl < HAL_IMPL_MAXIMUM, NV_ERR_INVALID_ARGUMENT); + + status = objCreate(&pHal, pHalMgr, OBJHAL); + if (status != NV_OK) + return status; + + // Store away the object pointer for this particular HAL object + pHalMgr->pHalList[halImpl] = pHal; + + return NV_OK; +} + +POBJHAL +halmgrGetHal_IMPL +( + OBJHALMGR *pHalMgr, + NvU32 halImpl +) +{ + if (halImpl < HAL_IMPL_MAXIMUM) + return pHalMgr->pHalList[halImpl]; + else + return NULL; +} + +static NvBool +_halmgrIsTegraSupported +( + NvU32 publicHalID, + NvU32 socChipID +) +{ + NvU32 chipid, majorRev; + + chipid = DRF_VAL(_PAPB_MISC, _GP_HIDREV, _CHIPID, socChipID); + majorRev = DRF_VAL(_PAPB_MISC, _GP_HIDREV, _MAJORREV, socChipID); + + // WAR: The majorrev of t234 shows 0xa on fmodel instead of 0x4 + if ((chipid == 0x23) && (majorRev == 0xa)) + { + majorRev = 0x4; + } + + // Convert to the HIDREV field format of chip-config + return ((chipid << 4) | majorRev) == chipID[publicHalID].hidrev; +} + +static NvBool +_halmgrIsChipSupported +( + OBJHALMGR *pHalMgr, + NvU32 publicHalID, + NvU32 pPmcBoot0, + NvU32 pPmcBoot42 +) +{ + NvBool retVal = NV_FALSE; + + if (chipID[publicHalID].hidrev) + return _halmgrIsTegraSupported(publicHalID, pPmcBoot0); + + if (pPmcBoot42) + { + if ((DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, pPmcBoot42) == chipID[publicHalID].arch) && + (DRF_VAL(_PMC, _BOOT_42, _IMPLEMENTATION, pPmcBoot42) == chipID[publicHalID].impl)) + { + retVal = NV_TRUE; + } + } + else + { + // Fail safely on older GPUs where pPmcBoot42 is not supported + retVal = NV_FALSE; + } + + return retVal; +} + +NV_STATUS +halmgrGetHalForGpu_IMPL +( + OBJHALMGR *pHalMgr, + NvU32 pPmcBoot0, + NvU32 pPmcBoot42, + NvU32 *pHalImpl +) +{ + HAL_IMPLEMENTATION halImpl; + OBJHAL *pHal; + + for (halImpl = 0; halImpl < HAL_IMPL_MAXIMUM; halImpl++) + { + pHal = pHalMgr->pHalList[halImpl]; + + // skip impls that have no hal object + if (pHal == NULL) + continue; + + if (_halmgrIsChipSupported(pHalMgr, halImpl, pPmcBoot0, pPmcBoot42)) + { + *pHalImpl = halImpl; + +#if NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, + "Matching %s = 0x%x to HAL_IMPL_%s\n", + pPmcBoot42 ? "PMC_BOOT_42" : "PMC_BOOT_0", + pPmcBoot42 ? pPmcBoot42 : pPmcBoot0, + _halmgrGetStringRepForHalImpl(pHalMgr, halImpl)); +#else // NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, + "Matching 0x%x to %u\n", + pPmcBoot42 ? "PMC_BOOT_42" : "PMC_BOOT_0", + halImpl); +#endif // NV_PRINTF_STRINGS_ALLOWED + + return NV_OK; + } + } + + return NV_ERR_NOT_SUPPORTED; +} + +// NOTE: string arguments only get used when NV_PRINTF_STRINGS_ALLOWED is true. +#if NV_PRINTF_STRINGS_ALLOWED +static const char * +_halmgrGetStringRepForHalImpl +( + OBJHALMGR *pHalMgr, + HAL_IMPLEMENTATION halImpl +) +{ + const char *chipName = "UNKNOWN"; + static const struct + { + HAL_IMPLEMENTATION halImpl; + const char *name; + } halImplNames[] = { HAL_IMPL_NAME_LIST }; // generated by rmconfig into g_hal.h + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS32(halImplNames); i++) + { + if (halImplNames[i].halImpl == halImpl) + { + chipName = halImplNames[i].name; + break; + } + } + + return chipName; +} +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_common.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_common.c new file mode 100644 index 0000000..0f84bfe --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_common.c @@ -0,0 +1,307 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "core/system.h" +#include "os/os.h" +#include "tls/tls.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" + +static NvBool s_bRmLocksAllocated = NV_FALSE; + +NV_STATUS +rmLocksAlloc(OBJSYS *pSys) +{ + NV_STATUS status; + + s_bRmLocksAllocated = NV_FALSE; + + // legacy lock model : RM system semaphore + status = osAllocRmSema(&pSys->pSema); + if (status != NV_OK) + return status; + + // RM_BASIC_LOCK_MODEL : GPU lock info (ISR/DPC synchronization) + status = rmGpuLockInfoInit(); + if (status != NV_OK) + { + osFreeRmSema(&pSys->pSema); + return status; + } + rmInitLockMetering(); + + s_bRmLocksAllocated = NV_TRUE; + + return status; +} + +void +rmLocksFree(OBJSYS *pSys) +{ + if (s_bRmLocksAllocated) + { + rmDestroyLockMetering(); + rmGpuLockInfoDestroy(); + osFreeRmSema(pSys->pSema); + + s_bRmLocksAllocated = NV_FALSE; + } +} + +/*! + * @brief Acquires all of the locks necessary to execute RM code safely + * + * Other threads and client APIs will be blocked from executing while the locks + * are held, so the locks should not be held longer than necessary. The locks + * should not be held across long HW delays. + * + * @returns NV_OK if locks are acquired successfully + * NV_ERR_INVALID_LOCK_STATE if locks cannot be acquired + */ +NV_STATUS +rmLocksAcquireAll(NvU32 module) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (osAcquireRmSemaForced(pSys->pSema) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to acquire the RM lock!\n"); + return NV_ERR_INVALID_LOCK_STATE; + } + + if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, module) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to acquire the API lock!\n"); + osReleaseRmSema(pSys->pSema, NULL); + return NV_ERR_INVALID_LOCK_STATE; + } + + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, module) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to acquire the GPU lock!\n"); + rmApiLockRelease(); + osReleaseRmSema(pSys->pSema, NULL); + return NV_ERR_INVALID_LOCK_STATE; + } + + return NV_OK; +} + +/*! + * @brief Releases the locks acquired by rmLocksAcquireAll + */ +void +rmLocksReleaseAll(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + rmApiLockRelease(); + osReleaseRmSema(pSys->pSema, NULL); +} + + +NV_STATUS +workItemLocksAcquire(NvU32 gpuInstance, NvU32 flags, NvU32 *pReleaseLocks, NvU32 *pGpuMask) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPU *pGpu; + NvU32 grp; + NV_STATUS status = NV_OK; + + *pReleaseLocks = 0; + *pGpuMask = 0; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA) + { + status = osAcquireRmSema(pSys->pSema); + if (status != NV_OK) + goto done; + + *pReleaseLocks |= OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA; + } + + if ((flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO)) + { + NvU32 apiLockFlags = RMAPI_LOCK_FLAGS_NONE; + NvU32 releaseFlags = OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO) + { + apiLockFlags = RMAPI_LOCK_FLAGS_READ; + releaseFlags = OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO; + } + + status = rmApiLockAcquire(apiLockFlags, RM_LOCK_MODULES_WORKITEM); + if (status != NV_OK) + goto done; + + *pReleaseLocks |= releaseFlags; + } + + if ((flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RO) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RO) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RO)) + { + NvU32 gpuLockFlags = GPUS_LOCK_FLAGS_NONE; + NvU32 releaseFlags = OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW; + + if (((flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RO) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RO) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RO)) && + (pSys->gpuLockModuleMask & RM_LOCK_MODULE_GRP(RM_LOCK_MODULES_WORKITEM))) + { + gpuLockFlags = GPU_LOCK_FLAGS_READ; + releaseFlags = OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RO; + } + + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW) + grp = GPU_LOCK_GRP_ALL; + else if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW) + grp = GPU_LOCK_GRP_DEVICE; + else // (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW) + grp = GPU_LOCK_GRP_SUBDEVICE; + + status = rmGpuGroupLockAcquire(gpuInstance, grp, gpuLockFlags, + RM_LOCK_MODULES_WORKITEM, pGpuMask); + if (status != NV_OK) + goto done; + + // All of these call into the same function, just share the flag + *pReleaseLocks |= releaseFlags; + + pGpu = gpumgrGetGpu(gpuInstance); + if (pGpu == NULL) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (flags & OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY) + { + if (!FULL_GPU_SANITY_CHECK(pGpu) || + !pGpu->getProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED)) + { + status = NV_ERR_INVALID_STATE; + NV_PRINTF(LEVEL_ERROR, + "GPU isn't full power! gpuInstance = 0x%x.\n", + gpuInstance); + goto done; + } + } + + if (flags & OS_QUEUE_WORKITEM_FLAGS_FOR_PM_RESUME) + { + if (!FULL_GPU_SANITY_FOR_PM_RESUME(pGpu)) + { + status = NV_ERR_INVALID_STATE; + NV_PRINTF(LEVEL_ERROR, + "GPU isn't full power and isn't in resume codepath! gpuInstance = 0x%x.\n", + gpuInstance); + goto done; + } + } + } + +done: + if (status != NV_OK) + { + workItemLocksRelease(*pReleaseLocks, *pGpuMask); + *pReleaseLocks = 0; + } + return status; +} + +void +workItemLocksRelease(NvU32 releaseLocks, NvU32 gpuMask) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW) + { + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + } + + if (releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RO) + { + rmGpuGroupLockRelease(gpuMask, GPU_LOCK_FLAGS_READ); + } + + if ((releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW) || + (releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO)) + { + rmApiLockRelease(); + } + + if (releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA) + { + osReleaseRmSema(pSys->pSema, NULL); + } +} + +// +// rmGpuGroupLockGetMask +// +// Given a GPU group ID this function returns the MASK for all GPUS in that group +// We skip the lookup for GPU_LOCK_GRP_MASK as that implies that the caller is aware of the mask +// +NV_STATUS +rmGpuGroupLockGetMask(NvU32 gpuInst, GPU_LOCK_GRP_ID gpuGrpId, GPU_MASK *pGpuMask) +{ + switch (gpuGrpId) + { + case GPU_LOCK_GRP_SUBDEVICE: + *pGpuMask = NVBIT(gpuInst); + break; + + case GPU_LOCK_GRP_DEVICE: + *pGpuMask = gpumgrGetGrpMaskFromGpuInst(gpuInst); + break; + + case GPU_LOCK_GRP_MASK: + break; + + case GPU_LOCK_GRP_ALL: + *pGpuMask = GPUS_LOCK_ALL; + break; + + default: + NV_ASSERT_FAILED("Unexpected gpuGrpId in gpu lock get mask"); + return NV_ERR_INVALID_ARGUMENT; + } + return NV_OK; +} + + +void threadPriorityStateAlloc(void) {} +void threadPriorityStateFree(void) {} +void threadPriorityThrottle(void) {} +void threadPriorityBoost(NvU64 *p, NvU64 *o) {} +void threadPriorityRestore(void) {} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_minimal.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_minimal.c new file mode 100644 index 0000000..28a5f82 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_minimal.c @@ -0,0 +1,270 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "core/locks.h" +#include "os/os.h" +#include "gpu_mgr/gpu_mgr.h" + +typedef struct +{ + PORT_SEMAPHORE *pLock; //rmInstanceId = (NvU64)sec * 1000000 + (NvU64)uSec; + + if (!pOS->osRmInitRm(pOS)) + { + status = NV_ERR_GENERIC; + goto failed; + } + + _sysNvSwitchDetection(pSys); + + // allocate locks, semaphores, whatever + status = rmLocksAlloc(pSys); + if (status != NV_OK) + goto failed; + + status = threadStateGlobalAlloc(); + if (status != NV_OK) + goto failed; + + status = rmapiInitialize(); + if (status != NV_OK) + goto failed; + + return NV_OK; + +failed: + _sysDeleteChildObjects(pSys); + + g_pSys = NULL; + + threadStateGlobalFree(); + + rmapiShutdown(); + rmLocksFree(pSys); + + return status; +} + +void +sysDestruct_IMPL(OBJSYS *pSys) +{ + // + // Any of these operations might fail but go ahead and + // attempt to free remaining resources before complaining. + // + listDestroy(&g_clientListBehindGpusLock); + listDestroy(&g_userInfoList); + + rmapiShutdown(); + osSyncWithRmDestroy(); + threadStateGlobalFree(); + rmLocksFree(pSys); + + // + // Free child objects + // + _sysDeleteChildObjects(pSys); + + g_pSys = NULL; + + RMTRACE_DESTROY(); + RMTRACE_DESTROY_NEW(); + +} + +// +// Create static system object offspring. +// +static NV_STATUS +_sysCreateChildObjects(OBJSYS *pSys) +{ + NV_STATUS status = NV_OK; + NvU32 i, n; + + n = NV_ARRAY_ELEMENTS32(sysChildObjects); + + for (i = 0; i < n; i++) + { + if (sysChildObjects[i].bDynamicConstruct) + { + NvLength offset = sysChildObjects[i].childOffset; + Dynamic **ppChild = reinterpretCast(reinterpretCast(pSys, NvU8*) + offset, Dynamic**); + Dynamic *pNewObj; + status = objCreateDynamic(&pNewObj, pSys, sysChildObjects[i].pClassInfo); + + if (status == NV_OK) + { + *ppChild = pNewObj; + } + } + else + { + // + // More cases should NOT be added to this list. OBJOS needs to be + // cleaned up to use the bDynamicConstruct path then this hack can + // be removed. + // + switch (sysChildObjects[i].pClassInfo->classId) + { + case classId(OBJOS): + status = _sysCreateOs(pSys); + break; + default: + NV_ASSERT(0); + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (status == NV_ERR_NOT_SUPPORTED) + status = NV_OK; + if (status != NV_OK) break; + } + + return status; +} + +static void +_sysDeleteChildObjects(OBJSYS *pSys) +{ + int i; + + osRmCapUnregister(&pSys->pOsRmCaps); + + for (i = NV_ARRAY_ELEMENTS32(sysChildObjects) - 1; i >= 0; i--) + { + NvLength offset = sysChildObjects[i].childOffset; + Dynamic **ppChild = reinterpretCast(reinterpretCast(pSys, NvU8*) + offset, Dynamic**); + objDelete(*ppChild); + *ppChild = NULL; + } +} + +static void +_sysRegistryOverrideResourceServer +( + OBJSYS *pSys, + OBJGPU *pGpu +) +{ + NvU32 data32; + + // Set read-only API lock override + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_READONLY_API_LOCK, + &data32) == NV_OK) + { + NvU32 apiMask = 0; + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _ALLOC_RESOURCE, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_ALLOC_RESOURCE); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _FREE_RESOURCE, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_FREE_RESOURCE); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _MAP, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_MAP); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _UNMAP, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_UNMAP); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _INTER_MAP, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_INTER_MAP); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _INTER_UNMAP, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_INTER_UNMAP); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _CTRL, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_CTRL); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _COPY, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_COPY); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _SHARE, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_SHARE); + + pSys->apiLockMask = apiMask; + } + else + { + pSys->apiLockMask = NVBIT(RS_API_CTRL); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_READONLY_API_LOCK_MODULE, + &data32) == NV_OK) + { + pSys->apiLockModuleMask = data32; + } + else + { + pSys->apiLockModuleMask = RM_LOCK_MODULE_GRP(RM_LOCK_MODULES_CLIENT); + } +} + +static void +_sysRegistryOverrideExternalFabricMgmt +( + OBJSYS *pSys, + OBJGPU *pGpu +) +{ + NvU32 data32; + + // Set external fabric management property + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT, + &data32) == NV_OK) + { + if (FLD_TEST_DRF(_REG_STR_RM, _EXTERNAL_FABRIC_MGMT, _MODE, _ENABLE, data32)) + { + NV_PRINTF(LEVEL_INFO, + "Enabling external fabric management.\n"); + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED, NV_TRUE); + } + + if (FLD_TEST_DRF(_REG_STR_RM, _EXTERNAL_FABRIC_MGMT, _MODE, _DISABLE, data32)) + { + NV_PRINTF(LEVEL_INFO, + "Disabling external fabric management.\n"); + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED, NV_FALSE); + } + } +} + +void +sysEnableExternalFabricMgmt_IMPL +( + OBJSYS *pSys +) +{ + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED, NV_TRUE); + + NV_PRINTF(LEVEL_INFO, + "Enabling external fabric management for Proxy NvSwitch systems.\n"); +} + +void +sysForceInitFabricManagerState_IMPL +( + OBJSYS *pSys +) +{ + // + // We should only allow force init if there is not way to run fabric + // manager. For example, HGX-2 virtualization use-case. + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_NVSWITCH_IS_PRESENT) || + pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED)) + { + NV_ASSERT(0); + return; + } + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED, NV_TRUE); + + NV_PRINTF(LEVEL_INFO, + "Forcing fabric manager's state as initialized to unblock clients.\n"); +} + +static void +_sysNvSwitchDetection +( + OBJSYS *pSys +) +{ + + if (osIsNvswitchPresent()) + { + pSys->setProperty(pSys, PDB_PROP_SYS_NVSWITCH_IS_PRESENT, NV_TRUE); + + NV_PRINTF(LEVEL_INFO, "NvSwitch is found in the system\n"); + + sysEnableExternalFabricMgmt(pSys); + } +} + +/*! + * @brief Initialize static system configuration data. + * + * @param[in] pSys SYSTEM object pointer + */ +static void +_sysInitStaticConfig(OBJSYS *pSys) +{ + portMemSet(&pSys->staticConfig, 0, sizeof(pSys->staticConfig)); + osInitSystemStaticConfig(&pSys->staticConfig); +} + +NV_STATUS +coreInitializeRm(void) +{ + NV_STATUS status; + OBJSYS *pSys = NULL; + + // + // Initialize libraries used by RM + // + + // Portable runtime init + status = portInitialize(); + if (status != NV_OK) + return status; + + // Required before any NvLog (NV_PRINTF) calls + NVLOG_INIT(NULL); + + // Required before any NV_PRINTF() calls + if (!DBG_INIT()) + { + status = NV_ERR_GENERIC; + return status; + } + + // + // Initialize OBJSYS which spawns all the RM internal modules + // + status = objCreate(&pSys, NVOC_NULL_OBJECT, OBJSYS); + + nvAssertInit(); + + return status; + } + +void +coreShutdownRm(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // + // Destruct OBJSYS which frees all the RM internal modules + // + objDelete(pSys); + + // + // Deinitalize libraries used by RM + // + nvAssertDestroy(); + + DBG_DESTROY(); + + NVLOG_DESTROY(); + + portShutdown(); +} + +// Obsolete RM init function -- code should migrate to new interfaces +NvS32 +RmInitRm(void) +{ + return (coreInitializeRm() == NV_OK); +} + +// Obsolete RM destroy function -- code should migrate to new interfaces +NvS32 +RmDestroyRm(void) +{ + coreShutdownRm(); + return NV_TRUE; +} + +static NV_STATUS +_sysCreateOs(OBJSYS *pSys) +{ + OBJOS *pOS; + NV_STATUS status; + + // RMCONFIG: only if OS is enabled :-) + RMCFG_MODULE_ENABLED_OR_BAIL(OS); + + status = objCreate(&pOS, pSys, OBJOS); + if (status != NV_OK) + { + return status; + } + + status = constructObjOS(pOS); + if (status != NV_OK) + { + objDelete(pOS); + return status; + } + + status = osRmCapRegisterSys(&pSys->pOsRmCaps); + if (status != NV_OK) + { + // + // Device objects needed for some access rights failed + // This is not system-critical since access rights are currently disabled, + // so continue booting, just log error. + // + // RS-TODO make this fail once RM Capabilities are enabled (Bug 2549938) + // + NV_PRINTF(LEVEL_ERROR, "RM Access Sys Cap creation failed: 0x%x\n", status); + } + + pSys->pOS = pOS; + + return NV_OK; +} + +NV_STATUS +sysCaptureState_IMPL(OBJSYS *pSys) +{ + return NV_OK; +} + +OBJOS* +sysGetOs_IMPL(OBJSYS *pSys) +{ + if (pSys->pOS) + return pSys->pOS; + + // + // A special case for any early 'get-object' calls for the OS + // object before there is an OS object. Some RC code called on + // DBG_BREAKPOINT assumes an OS object exists, and can cause a crash. + // + PORT_BREAKPOINT_ALWAYS(); + + return NULL; +} + +void +sysInitRegistryOverrides_IMPL +( + OBJSYS *pSys +) +{ + OBJGPU *pGpu = NULL; + NvU32 data32 = 0; + + if (pSys->getProperty(pSys, + PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED)) + { + // The registry overrides, if any, have already been applied. + return; + } + + // Get some GPU - as of now we need some gpu to read registry. + pGpu = gpumgrGetSomeGpu(); + if (pGpu == NULL) + { + // Too early call ! we can not read the registry. + return; + } + + if ((osReadRegistryDword(pGpu, + NV_REG_STR_RM_ENABLE_EVENT_TRACER, &data32) == NV_OK) && data32 ) + { + RMTRACE_ENABLE(data32); + } + + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_CLIENT_DATA_VALIDATION, &data32) == NV_OK) + { + if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _KERNEL_BUFFERS, _ENABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, NV_TRUE); + } + else if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _KERNEL_BUFFERS, _DISABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, NV_FALSE); + } + + if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _HANDLE, _ENABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, NV_TRUE); + } + else if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _HANDLE, _DISABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, NV_FALSE); + } + + if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _STRICT_CLIENT, _ENABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, NV_TRUE); + } + else if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _STRICT_CLIENT, _DISABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, NV_FALSE); + } + + if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _ALL, _ENABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, NV_TRUE); + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, NV_TRUE); + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, NV_TRUE); + } + else if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _ALL, _DISABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, NV_FALSE); + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, NV_FALSE); + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, NV_FALSE); + } + } + + pSys->setProperty(pSys, PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED, NV_TRUE); + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_STREAM_MEMOPS, + &data32) == NV_OK) + { + if (FLD_TEST_DRF(_REG_STR_RM, _STREAM_MEMOPS, _ENABLE, _YES, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_ENABLE_STREAM_MEMOPS, NV_TRUE); + } + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_PRIORITY_BOOST, + &data32) == NV_OK) + { + if (data32 == NV_REG_STR_RM_PRIORITY_BOOST_DISABLE) + pSys->setProperty(pSys, PDB_PROP_SYS_PRIORITY_BOOST, NV_FALSE); + else + pSys->setProperty(pSys, PDB_PROP_SYS_PRIORITY_BOOST, NV_TRUE); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_PRIORITY_THROTTLE_DELAY, + &data32) == NV_OK) + { + pSys->setProperty(pSys, PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US, data32); + } + + _sysRegistryOverrideExternalFabricMgmt(pSys, pGpu); + _sysRegistryOverrideResourceServer(pSys, pGpu); + + if (osBugCheckOnTimeoutEnabled()) + { + pSys->setProperty(pSys, PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT, NV_TRUE); + } +} + +void +sysApplyLockingPolicy_IMPL(OBJSYS *pSys) +{ + g_resServ.roTopLockApiMask = pSys->apiLockMask; +} + +NV_STATUS +sysSyncExternalFabricMgmtWAR_IMPL +( + OBJSYS *pSys, + OBJGPU *pGpu +) +{ + NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS params; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status = NV_OK; + + params.bExternalFabricMgmt = pSys->getProperty(pSys, + PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED); + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalClient, + NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT, + ¶ms, sizeof(params)); + + return status; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/thread_state.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/thread_state.c new file mode 100644 index 0000000..7138cbc --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/thread_state.c @@ -0,0 +1,1247 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//***************************************************************************** +// +// This file contains code used for Thread State management +// +// Terminology: +// +// ISR: First level interrupt handler, acknowledge function (VMK) +// +// Deferred INT handler: DPC (Windows), Bottom-half (*nux), Interrupt handler (VMK) +// +//***************************************************************************** + +#include "core/core.h" +#include "core/thread_state.h" +#include "core/locks.h" +#include "os/os.h" +#include "containers/map.h" +#include "nvrm_registry.h" +#include "gpu/gpu.h" +#include "gpu/gpu_timeout.h" + +THREAD_STATE_DB threadStateDatabase; + +static void _threadStatePrintInfo(THREAD_STATE_NODE *pThreadNode) +{ + if ((threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_PRINT_INFO_ENABLED) == 0) + return; + + if (pThreadNode != NULL) + { + NV_PRINTF(LEVEL_NOTICE, "Thread state:\n"); + NV_PRINTF(LEVEL_NOTICE, + "threadId: 0x%llx flags: 0x0%x\n", + pThreadNode->threadId, + pThreadNode->flags); + + NV_PRINTF(LEVEL_NOTICE, + "enterTime: 0x%llx Limits: nonComputeTime: 0x%llx computeTime: 0x%llx\n", + pThreadNode->timeout.enterTime, + pThreadNode->timeout.nonComputeTime, + pThreadNode->timeout.computeTime); + } +} + +static void _threadStateFreeProcessWork(THREAD_STATE_NODE *pThreadNode) +{ + PORT_UNREFERENCED_VARIABLE(pThreadNode); +} + +/** + * @brief allocate threadState which is per-cpu and per-GPU, only supporting lockless ISR + * + * @param[in/out] ppIsrlocklessThreadNode + * + * @return NV_OK if success, error otherwise + * + */ +static NV_STATUS _threadStateAllocPerCpuPerGpu(PPTHREAD_STATE_ISR_LOCKLESS ppIsrlocklessThreadNode) +{ + NvU32 allocSize; + PTHREAD_STATE_ISR_LOCKLESS pIsrlocklessThreadNode; + NvS32 i; + NvU32 coreCount = osGetMaximumCoreCount(); + + // Bug 789767 + threadStateDatabase.maxCPUs = 32; + if (coreCount > threadStateDatabase.maxCPUs) + threadStateDatabase.maxCPUs = coreCount; + + allocSize = threadStateDatabase.maxCPUs * sizeof(PTHREAD_STATE_ISR_LOCKLESS); + + pIsrlocklessThreadNode = portMemAllocNonPaged(allocSize); + if (pIsrlocklessThreadNode == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(pIsrlocklessThreadNode, 0, allocSize); + allocSize = NV_MAX_DEVICES * sizeof(THREAD_STATE_NODE *); + + // Allocate thread node for each gpu per cpu. + for (i = 0; i < (NvS32)threadStateDatabase.maxCPUs; i++) + { + pIsrlocklessThreadNode[i].ppIsrThreadStateGpu = portMemAllocNonPaged(allocSize); + if (pIsrlocklessThreadNode[i].ppIsrThreadStateGpu == NULL) + { + for (--i; i >= 0; --i) + portMemFree(pIsrlocklessThreadNode[i].ppIsrThreadStateGpu); + + portMemFree(pIsrlocklessThreadNode); + return NV_ERR_NO_MEMORY; + } + else + { + portMemSet(pIsrlocklessThreadNode[i].ppIsrThreadStateGpu, 0, allocSize); + } + } + *ppIsrlocklessThreadNode = pIsrlocklessThreadNode; + return NV_OK; +} + +/** + * @brief free threadState which is per-cpu and per-GPU, only working for lockless ISR + * + * @param[in/out] pIsrlocklessThreadNode + * + */ +static void _threadStateFreePerCpuPerGpu(PTHREAD_STATE_ISR_LOCKLESS pIsrlocklessThreadNode) +{ + NvU32 i; + // Free any memory we allocated + if (pIsrlocklessThreadNode) + { + for (i = 0; i < threadStateDatabase.maxCPUs; i++) + portMemFree(pIsrlocklessThreadNode[i].ppIsrThreadStateGpu); + portMemFree(pIsrlocklessThreadNode); + } +} + +/** + * @brief the main function to allocate the threadState + * + * @return NV_OK if the entire global threadState is created successfully, + * and an appropriate ERROR otherwise. + * + */ +NV_STATUS threadStateGlobalAlloc(void) +{ + NV_STATUS rmStatus; + NvU32 allocSize; + + NV_ASSERT(tlsInitialize() == NV_OK); + + // Init the thread sequencer id counter to 0. + threadStateDatabase.threadSeqCntr = 0; + + threadStateDatabase.spinlock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (threadStateDatabase.spinlock == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + allocSize = NV_MAX_DEVICES * sizeof(THREAD_STATE_NODE *); + threadStateDatabase.ppISRDeferredIntHandlerThreadNode = portMemAllocNonPaged(allocSize); + if (threadStateDatabase.ppISRDeferredIntHandlerThreadNode == NULL) + { + portSyncSpinlockDestroy(threadStateDatabase.spinlock); + return NV_ERR_NO_MEMORY; + } + portMemSet(threadStateDatabase.ppISRDeferredIntHandlerThreadNode, 0, allocSize); + + rmStatus = _threadStateAllocPerCpuPerGpu(&threadStateDatabase.pIsrlocklessThreadNode); + if (rmStatus != NV_OK) + { + portMemFree(threadStateDatabase.ppISRDeferredIntHandlerThreadNode); + portSyncSpinlockDestroy(threadStateDatabase.spinlock); + return rmStatus; + } + + mapInitIntrusive(&threadStateDatabase.dbRoot); + mapInitIntrusive(&threadStateDatabase.dbRootPreempted); + + return rmStatus; +} + +void threadStateGlobalFree(void) +{ + // Disable all threadState usage once the spinlock is freed + threadStateDatabase.setupFlags = THREAD_STATE_SETUP_FLAGS_NONE; + + // Free any memory we allocated + _threadStateFreePerCpuPerGpu(threadStateDatabase.pIsrlocklessThreadNode); + threadStateDatabase.pIsrlocklessThreadNode = NULL; + + portMemFree(threadStateDatabase.ppISRDeferredIntHandlerThreadNode); + threadStateDatabase.ppISRDeferredIntHandlerThreadNode = NULL; + + if (threadStateDatabase.spinlock != NULL) + { + portSyncSpinlockDestroy(threadStateDatabase.spinlock); + threadStateDatabase.spinlock = NULL; + } + + mapDestroy(&threadStateDatabase.dbRoot); + mapDestroy(&threadStateDatabase.dbRootPreempted); + + tlsShutdown(); +} + +void threadStateInitRegistryOverrides(OBJGPU *pGpu) +{ + NvU32 flags; + + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_THREAD_STATE_SETUP_FLAGS, &flags) == NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Overriding threadStateDatabase.setupFlags from 0x%x to 0x%x\n", + threadStateDatabase.setupFlags, flags); + threadStateDatabase.setupFlags = flags; + } +} + +void threadStateInitSetupFlags(NvU32 flags) +{ + threadStateDatabase.timeout.nonComputeTimeoutMsecs = 0; + threadStateDatabase.timeout.computeTimeoutMsecs = 0; + threadStateDatabase.timeout.computeGpuMask = 0; + threadStateDatabase.setupFlags = flags; +} + +NvU32 threadStateGetSetupFlags(void) +{ + return threadStateDatabase.setupFlags; +} + +// +// Sets the nextCpuYieldTime field to a value that corresponds to a +// short time in the future. This value represents the next time that +// the osScheduler may be invoked, during long waits. +// +static void _threadStateSetNextCpuYieldTime(THREAD_STATE_NODE *pThreadNode) +{ + NvU64 timeInNs; + osGetCurrentTick(&timeInNs); + + pThreadNode->timeout.nextCpuYieldTime = timeInNs + + (TIMEOUT_DEFAULT_OS_RESCHEDULE_INTERVAL_SECS) * 1000000 * 1000; +} + +void threadStateYieldCpuIfNecessary(OBJGPU *pGpu) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE *pThreadNode = NULL; + NvU64 timeInNs; + + rmStatus = threadStateGetCurrent(&pThreadNode, pGpu); + if ((rmStatus == NV_OK) && pThreadNode ) + { + osGetCurrentTick(&timeInNs); + if (timeInNs >= pThreadNode->timeout.nextCpuYieldTime) + { + if (NV_OK == osSchedule()) + { + NV_PRINTF(LEVEL_WARNING, "Yielding\n"); + } + + _threadStateSetNextCpuYieldTime(pThreadNode); + } + } +} + +static NV_STATUS _threadNodeInitTime(THREAD_STATE_NODE *pThreadNode) +{ + NV_STATUS rmStatus = NV_OK; + NvU64 timeInNs; + NvBool firstInit; + NvU64 computeTimeoutMsecs; + NvU64 nonComputeTimeoutMsecs; + NvBool bIsDpcOrIsr = !!(pThreadNode->flags & + (THREAD_STATE_FLAGS_IS_ISR | + THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING | + THREAD_STATE_FLAGS_IS_ISR_LOCKLESS)); + + // + // _threadNodeInitTime() is used both for the first init and + // threadStateResetTimeout(). We can tell the two apart by checking whether + // enterTime has been initialized already. + // + firstInit = (pThreadNode->timeout.enterTime == 0); + + computeTimeoutMsecs = threadStateDatabase.timeout.computeTimeoutMsecs; + nonComputeTimeoutMsecs = threadStateDatabase.timeout.nonComputeTimeoutMsecs; + + // + // If we are in DPC or ISR contexts, we need to timeout the driver before OS + // mechanisms kick in and panic the kernel + // + if (bIsDpcOrIsr) + { + // + // Note that MODS does not have interrupt timeout requirements and there are + // existing code paths that violates the timeout + // + computeTimeoutMsecs = 500; + nonComputeTimeoutMsecs = 500; + } + + osGetCurrentTick(&timeInNs); + + if (firstInit) + { + // + // Save off the time we first entered the RM. We do not + // want to reset this if we call threadStateResetTimeout() + // + pThreadNode->timeout.enterTime = timeInNs; + } + + if (pThreadNode->timeout.overrideTimeoutMsecs) + { + nonComputeTimeoutMsecs = pThreadNode->timeout.overrideTimeoutMsecs; + computeTimeoutMsecs = pThreadNode->timeout.overrideTimeoutMsecs; + } + + _threadStateSetNextCpuYieldTime(pThreadNode); + + if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + pThreadNode->timeout.nonComputeTime = timeInNs + (nonComputeTimeoutMsecs * 1000 * 1000); + pThreadNode->timeout.computeTime = timeInNs + (computeTimeoutMsecs * 1000 * 1000); + } + else if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSDELAY) + { + // Convert from msecs (1,000) to usecs (1,000,000) + pThreadNode->timeout.nonComputeTime = nonComputeTimeoutMsecs * 1000; + pThreadNode->timeout.computeTime = computeTimeoutMsecs * 1000; + } + else + { + NV_PRINTF(LEVEL_INFO, + "Bad threadStateDatabase.timeout.flags: 0x%x!\n", + threadStateDatabase.timeout.flags); + + rmStatus = NV_ERR_INVALID_STATE; + } + + return rmStatus; +} + +static void _getTimeoutDataFromGpuMode( + OBJGPU *pGpu, + THREAD_STATE_NODE *pThreadNode, + NvU64 **ppThreadNodeTime, + NvU64 *pThreadStateDatabaseTimeoutMsecs) +{ + if (pGpu) + { + if (threadStateDatabase.timeout.computeGpuMask & NVBIT(pGpu->gpuInstance)) + { + *ppThreadNodeTime = &pThreadNode->timeout.computeTime; + } + else + { + *ppThreadNodeTime = &pThreadNode->timeout.nonComputeTime; + } + + *pThreadStateDatabaseTimeoutMsecs = + NV_MAX(threadStateDatabase.timeout.computeTimeoutMsecs, threadStateDatabase.timeout.nonComputeTimeoutMsecs); + } +} + +// +// The logic in _threadNodeCheckTimeout() should closely resemble +// that of _gpuCheckTimeout(). +// +static NV_STATUS _threadNodeCheckTimeout(OBJGPU *pGpu, THREAD_STATE_NODE *pThreadNode, NvU64 *pElapsedTimeUs) +{ + NV_STATUS rmStatus = NV_OK; + NvU64 threadStateDatabaseTimeoutMsecs = 0; + NvU64 *pThreadNodeTime = NULL; + NvU64 timeInNs; + + if (pGpu) + { + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, "API_GPU_ATTACHED_SANITY_CHECK failed!\n"); + return NV_ERR_TIMEOUT; + } + } + + _getTimeoutDataFromGpuMode(pGpu, pThreadNode, &pThreadNodeTime, + &threadStateDatabaseTimeoutMsecs); + if ((threadStateDatabaseTimeoutMsecs == 0) || + (pThreadNodeTime == NULL)) + { + NV_PRINTF(LEVEL_ERROR, + "threadStateDatabaseTimeoutMsecs or pThreadNodeTime was NULL!\n"); + return NV_ERR_INVALID_STATE; + } + + osGetCurrentTick(&timeInNs); + if (pElapsedTimeUs) + { + *pElapsedTimeUs = (timeInNs - pThreadNode->timeout.enterTime) / 1000; + } + + if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + if (timeInNs >= *pThreadNodeTime) + { + NV_PRINTF(LEVEL_ERROR, + "_threadNodeCheckTimeout: currentTime: %llx >= %llx\n", + timeInNs, *pThreadNodeTime); + + rmStatus = NV_ERR_TIMEOUT; + } + } + else if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSDELAY) + { + osDelayUs(100); + *pThreadNodeTime -= NV_MIN(100, *pThreadNodeTime); + if (*pThreadNodeTime == 0) + { + rmStatus = NV_ERR_TIMEOUT; + } + } + else + { + NV_PRINTF(LEVEL_INFO, + "_threadNodeCheckTimeout: Unsupported timeout.flags: 0x%x!\n", + threadStateDatabase.timeout.flags); + + rmStatus = NV_ERR_INVALID_STATE; + } + + if (rmStatus == NV_ERR_TIMEOUT) + { + // Report the time this Thread entered the RM + _threadStatePrintInfo(pThreadNode); + + // This is set via osGetTimeoutParams per platform + NV_PRINTF(LEVEL_ERROR, + "_threadNodeCheckTimeout: Timeout was set to: %lld msecs!\n", + threadStateDatabaseTimeoutMsecs); + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ASSERT_ON_TIMEOUT_ENABLED) + { + NV_ASSERT(0); + } + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_RESET_ON_TIMEOUT_ENABLED) + { + threadStateResetTimeout(pGpu); + } + } + + return rmStatus; +} + +static void _threadStateFreeInvokeCallbacks +( + THREAD_STATE_NODE *pThreadNode +) +{ + THREAD_STATE_FREE_CALLBACK *pCbListNode; + + NV_ASSERT_OR_RETURN_VOID(pThreadNode->flags & + THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED); + + // Start from head to maintain FIFO semantics. + while ((pCbListNode = listHead(&pThreadNode->cbList)) != NULL) + { + (*pCbListNode->pCb)(pCbListNode->pCbData); + listRemove(&pThreadNode->cbList, pCbListNode); + } +} + +static void _threadStateLogInitCaller(THREAD_STATE_NODE *pThreadNode, NvU64 funcAddr) +{ + threadStateDatabase.traceInfo.entries[threadStateDatabase.traceInfo.index].callerRA = funcAddr; + threadStateDatabase.traceInfo.entries[threadStateDatabase.traceInfo.index].flags = pThreadNode->flags; + threadStateDatabase.traceInfo.index = + (threadStateDatabase.traceInfo.index + 1) % THREAD_STATE_TRACE_MAX_ENTRIES; +} + +/** + * @brief Initialize a threadState for regular threads (non-interrupt context) + * + * @param[in/out] pThreadNode + * @param[in] flags + * + */ +void threadStateInit(THREAD_STATE_NODE *pThreadNode, NvU32 flags) +{ + NV_STATUS rmStatus; + NvU64 funcAddr; + + // Isrs should be using threadStateIsrInit(). + NV_ASSERT((flags & (THREAD_STATE_FLAGS_IS_ISR_LOCKLESS | + THREAD_STATE_FLAGS_IS_ISR | + THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING)) == 0); + + // Check to see if ThreadState is enabled + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + portMemSet(pThreadNode, 0, sizeof(*pThreadNode)); + pThreadNode->threadSeqId = portAtomicIncrementU32(&threadStateDatabase.threadSeqCntr); + pThreadNode->cpuNum = osGetCurrentProcessorNumber(); + pThreadNode->flags = flags; + + // + // The thread state free callbacks are only supported in the non-ISR paths + // as they invoke memory allocation routines. + // + listInit(&pThreadNode->cbList, portMemAllocatorGetGlobalNonPaged()); + pThreadNode->flags |= THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED; + + rmStatus = _threadNodeInitTime(pThreadNode); + if (rmStatus == NV_OK) + pThreadNode->flags |= THREAD_STATE_FLAGS_TIMEOUT_INITED; + + rmStatus = osGetCurrentThread(&pThreadNode->threadId); + if (rmStatus != NV_OK) + return; + + NV_ASSERT_OR_RETURN_VOID(pThreadNode->cpuNum < threadStateDatabase.maxCPUs); + + funcAddr = (NvU64) (NV_RETURN_ADDRESS()); + + portSyncSpinlockAcquire(threadStateDatabase.spinlock); + if (!mapInsertExisting(&threadStateDatabase.dbRoot, (NvU64)pThreadNode->threadId, pThreadNode)) + { + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + // Place in the Preempted List if threadId is already present in the API list + if (mapInsertExisting(&threadStateDatabase.dbRootPreempted, (NvU64)pThreadNode->threadId, pThreadNode)) + { + pThreadNode->flags |= THREAD_STATE_FLAGS_PLACED_ON_PREEMPT_LIST; + pThreadNode->bValid = NV_TRUE; + rmStatus = NV_OK; + } + else + { + // Reset the threadId as insertion failed on both maps. bValid is already NV_FALSE + pThreadNode->threadId = 0; + portSyncSpinlockRelease(threadStateDatabase.spinlock); + return; + } + } + else + { + pThreadNode->bValid = NV_TRUE; + rmStatus = NV_OK; + } + + _threadStateLogInitCaller(pThreadNode, funcAddr); + + portSyncSpinlockRelease(threadStateDatabase.spinlock); + + _threadStatePrintInfo(pThreadNode); + + NV_ASSERT(rmStatus == NV_OK); + threadPriorityStateAlloc(); + + if (TLS_MIRROR_THREADSTATE) + { + THREAD_STATE_NODE **pTls = (THREAD_STATE_NODE **)tlsEntryAcquire(TLS_ENTRY_ID_THREADSTATE); + NV_ASSERT_OR_RETURN_VOID(pTls != NULL); + if (*pTls != NULL) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: Nested threadState inits detected. Previous threadState node is %p, new is %p\n", + *pTls, pThreadNode); + } + *pTls = pThreadNode; + } +} + +/** + * @brief Initialize a threadState for locked ISR and Bottom-half + * + * @param[in/out] pThreadNode + * @param[in] pGpu + * @param[in] flags THREAD_STATE_FLAGS_IS_ISR or THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING + * + */ +void threadStateInitISRAndDeferredIntHandler +( + THREAD_STATE_NODE *pThreadNode, + OBJGPU *pGpu, + NvU32 flags +) +{ + NV_STATUS rmStatus; + + NV_ASSERT(pGpu); + + // should be using threadStateIsrInit(). + NV_ASSERT(flags & (THREAD_STATE_FLAGS_IS_ISR | THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING)); + + portMemSet(pThreadNode, 0, sizeof(*pThreadNode)); + pThreadNode->threadSeqId = portAtomicIncrementU32(&threadStateDatabase.threadSeqCntr); + pThreadNode->cpuNum = osGetCurrentProcessorNumber(); + pThreadNode->flags = flags; + + rmStatus = _threadNodeInitTime(pThreadNode); + + if (rmStatus == NV_OK) + pThreadNode->flags |= THREAD_STATE_FLAGS_TIMEOUT_INITED; + + if (TLS_MIRROR_THREADSTATE) + { + THREAD_STATE_NODE **pTls = (THREAD_STATE_NODE **)tlsEntryAcquire(TLS_ENTRY_ID_THREADSTATE); + NV_ASSERT_OR_GOTO(pTls != NULL, TlsMirror_Exit); + if (*pTls != NULL) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: Nested threadState inits detected. Previous threadState node is %p, new is %p\n", + *pTls, pThreadNode); + } + *pTls = pThreadNode; + } +TlsMirror_Exit: + + rmStatus = osGetCurrentThread(&pThreadNode->threadId); + if (rmStatus != NV_OK) + return; + + threadStateDatabase.ppISRDeferredIntHandlerThreadNode[pGpu->gpuInstance] = pThreadNode; +} + +/** + * @brief Initialize a threadState for lockless ISR + * + * @param[in/out] pThreadNode + * @param[in] pGpu + * @param[in] flags THREAD_STATE_FLAGS_IS_ISR_LOCKLESS + * + */ +void threadStateInitISRLockless(THREAD_STATE_NODE *pThreadNode, OBJGPU *pGpu, NvU32 flags) +{ + NV_STATUS rmStatus; + PTHREAD_STATE_ISR_LOCKLESS pThreadStateIsrLockless; + + NV_ASSERT(flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS); + + // Check to see if ThreadState is enabled + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + portMemSet(pThreadNode, 0, sizeof(*pThreadNode)); + pThreadNode->threadSeqId = portAtomicIncrementU32(&threadStateDatabase.threadSeqCntr); + pThreadNode->cpuNum = osGetCurrentProcessorNumber(); + pThreadNode->flags = flags; + + rmStatus = _threadNodeInitTime(pThreadNode); + if (rmStatus == NV_OK) + pThreadNode->flags |= THREAD_STATE_FLAGS_TIMEOUT_INITED; + + if (TLS_MIRROR_THREADSTATE) + { + THREAD_STATE_NODE **pTls = (THREAD_STATE_NODE **)tlsEntryAcquire(TLS_ENTRY_ID_THREADSTATE); + NV_ASSERT_OR_GOTO(pTls != NULL, TlsMirror_Exit); + if (*pTls != NULL) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: Nested threadState inits detected. Previous threadState node is %p, new is %p\n", + *pTls, pThreadNode); + } + *pTls = pThreadNode; + } +TlsMirror_Exit: + + rmStatus = osGetCurrentThread(&pThreadNode->threadId); + if (rmStatus != NV_OK) + return; + + NV_ASSERT_OR_RETURN_VOID(pThreadNode->cpuNum < threadStateDatabase.maxCPUs); + + // + // We use a cpu/gpu indexed structure to store the threadNode pointer + // instead of a tree indexed by threadId because threadId is no longer + // unique in an isr. We also need to index by both cpu num and gpu instance + // because isrs can prempt one another, and run on the same processor + // at the same time. + // + pThreadStateIsrLockless = &threadStateDatabase.pIsrlocklessThreadNode[pThreadNode->cpuNum]; + NV_ASSERT(pThreadStateIsrLockless->ppIsrThreadStateGpu[pGpu->gpuInstance] == NULL); + pThreadStateIsrLockless->ppIsrThreadStateGpu[pGpu->gpuInstance] = pThreadNode; +} + +/** + * @brief Free the thread state for locked ISR and bottom-half + * + * @param[in/out] pThreadNode + * @param[in] pGpu + * @param[in] flags THREAD_STATE_FLAGS_IS_ISR or THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING + * + */ +void threadStateFreeISRAndDeferredIntHandler +( + THREAD_STATE_NODE *pThreadNode, + OBJGPU *pGpu, + NvU32 flags +) +{ + NV_STATUS rmStatus; + + NV_ASSERT_OR_RETURN_VOID(pGpu && + (flags & (THREAD_STATE_FLAGS_IS_ISR | THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING))); + + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + // Process any work needed before exiting. + _threadStateFreeProcessWork(pThreadNode); + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_CHECK_TIMEOUT_AT_FREE_ENABLED) + { + rmStatus = _threadNodeCheckTimeout(NULL /*pGpu*/, pThreadNode, NULL /*pElapsedTimeUs*/); + NV_ASSERT(rmStatus == NV_OK); + } + + threadStateDatabase.ppISRDeferredIntHandlerThreadNode[pGpu->gpuInstance] = NULL; + + if (TLS_MIRROR_THREADSTATE) + { + NvU32 r; + THREAD_STATE_NODE *pTlsNode = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_THREADSTATE)); + NV_ASSERT(pTlsNode); + if (pTlsNode != pThreadNode) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: pTlsNode=%p, pThreadNode=%p\n", + pTlsNode, pThreadNode); + } + r = tlsEntryRelease(TLS_ENTRY_ID_THREADSTATE); + if (r != 0) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: tlsEntryRelease returned %d (this is likely due to nested threadStateInit() calls)\n", + r); + } + } +} + +/** + * @brief Free the thread state for a regular thread + * + * @param[in/out] pThreadNode + * @param[in] flags + * + */ +void threadStateFree(THREAD_STATE_NODE *pThreadNode, NvU32 flags) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE *pNode; + ThreadStateNodeMap *pMap; + + NV_ASSERT((flags & (THREAD_STATE_FLAGS_IS_ISR_LOCKLESS | + THREAD_STATE_FLAGS_IS_ISR | + THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING)) == 0); + + // Check to see if ThreadState is enabled + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + if (!(flags & THREAD_STATE_FLAGS_EXCLUSIVE_RUNNING)) + { + // + // Do not do this for exclusive running threads as all the info + // is not filled in. + // + if (!pThreadNode->bValid && pThreadNode->threadId == 0) + return; + } + + _threadStateFreeInvokeCallbacks(pThreadNode); + + listDestroy(&pThreadNode->cbList); + + // Process any work needed before exiting. + _threadStateFreeProcessWork(pThreadNode); + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_CHECK_TIMEOUT_AT_FREE_ENABLED) + { + rmStatus = _threadNodeCheckTimeout(NULL /*pGpu*/, pThreadNode, NULL /*pElapsedTimeUs*/); + NV_ASSERT(rmStatus == NV_OK); + } + + portSyncSpinlockAcquire(threadStateDatabase.spinlock); + if (pThreadNode->flags & THREAD_STATE_FLAGS_PLACED_ON_PREEMPT_LIST) + { + pMap = &threadStateDatabase.dbRootPreempted; + } + else + { + pMap = &threadStateDatabase.dbRoot; + } + + pNode = mapFind(pMap, (NvU64)pThreadNode->threadId); + + if (pNode != NULL) + { + mapRemove(pMap, pThreadNode); + pThreadNode->bValid = NV_FALSE; + rmStatus = NV_OK; + } + else + { + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + } + + portSyncSpinlockRelease(threadStateDatabase.spinlock); + + _threadStatePrintInfo(pThreadNode); + + NV_ASSERT(rmStatus == NV_OK); + + threadPriorityStateFree(); + + if (TLS_MIRROR_THREADSTATE) + { + NvU32 r; + THREAD_STATE_NODE *pTlsNode = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_THREADSTATE)); + NV_ASSERT(pTlsNode); + if (pTlsNode != pThreadNode) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: pTlsNode=%p, pThreadNode=%p\n", + pTlsNode, pThreadNode); + } + r = tlsEntryRelease(TLS_ENTRY_ID_THREADSTATE); + if (r != 0) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: tlsEntryRelease returned %d (this is likely due to nested threadStateInit() calls)\n", + r); + } + } +} + +/** + * @brief Free thread state for lockless ISR + * + * @param[in/out] pThreadNode + * @param[in] pGpu + * @param[in] flags + * + */ +void threadStateFreeISRLockless(THREAD_STATE_NODE *pThreadNode, OBJGPU *pGpu, NvU32 flags) +{ + NV_STATUS rmStatus = NV_OK; + PTHREAD_STATE_ISR_LOCKLESS pThreadStateIsrlockless; + + NV_ASSERT(flags & (THREAD_STATE_FLAGS_IS_ISR_LOCKLESS | THREAD_STATE_FLAGS_IS_ISR)); + NV_ASSERT(pThreadNode->cpuNum == osGetCurrentProcessorNumber()); + + // Check to see if ThreadState is enabled + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + // Process any work needed before exiting. + _threadStateFreeProcessWork(pThreadNode); + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_CHECK_TIMEOUT_AT_FREE_ENABLED) + { + rmStatus = _threadNodeCheckTimeout(NULL /*pGpu*/, pThreadNode, NULL /*pElapsedTimeUs*/); + NV_ASSERT(rmStatus == NV_OK); + } + + pThreadStateIsrlockless = &threadStateDatabase.pIsrlocklessThreadNode[pThreadNode->cpuNum]; + NV_ASSERT(pThreadStateIsrlockless->ppIsrThreadStateGpu[pGpu->gpuInstance] != NULL); + pThreadStateIsrlockless->ppIsrThreadStateGpu[pGpu->gpuInstance] = NULL; + + if (TLS_MIRROR_THREADSTATE) + { + NvU32 r; + THREAD_STATE_NODE *pTlsNode = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_THREADSTATE)); + NV_ASSERT(pTlsNode); + if (pTlsNode != pThreadNode) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: pTlsNode=%p, pThreadNode=%p\n", + pTlsNode, pThreadNode); + } + r = tlsEntryRelease(TLS_ENTRY_ID_THREADSTATE); + if (r != 0) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: tlsEntryRelease returned %d (this is likely due to nested threadStateInit() calls)\n", + r); + } + } +} + +/** + * @brief Get the thread state with given + * + * @param[in] threadId + * @param[in] pGpu + * @param[out] ppThreadNode + * + * @return NV_OK if we are able to locate the thread state with , + * NV_ERR_OBJECT_NOT_FOUND if we can't find inside map + * NV_ERR_INVALID_STATE if the thread state is not enabled or the CPU has + * been hotpluged. + */ +static NV_STATUS _threadStateGet +( + OS_THREAD_HANDLE threadId, + OBJGPU *pGpu, + THREAD_STATE_NODE **ppThreadNode +) +{ + THREAD_STATE_NODE *pNode; + + // Check to see if ThreadState is enabled + if ((threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED) == NV_FALSE) + { + *ppThreadNode = NULL; + return NV_ERR_INVALID_STATE; + } + else + { + NvU32 cpuNum = osGetCurrentProcessorNumber(); + THREAD_STATE_NODE *pIsrlocklessThreadNode; + THREAD_STATE_NODE *pISRDeferredIntHandlerNode; + + if (cpuNum >= threadStateDatabase.maxCPUs) + { + NV_ASSERT(0); + *ppThreadNode = NULL; + return NV_ERR_INVALID_STATE; + } + + // + // Several threadState call sites will not pass a pGpu b/c it is not + // easily available, and they are not running in interrupt context. + // _threadStateGet() only needs to pGpu for getting the thread node + // when called for an isr, so that site has assumed it will never + // be in interrupt context. + // + if (pGpu) + { + // Check to see if the this is an lockless ISR running thread. + pIsrlocklessThreadNode = threadStateDatabase.pIsrlocklessThreadNode[cpuNum].ppIsrThreadStateGpu[pGpu->gpuInstance]; + if (pIsrlocklessThreadNode && (pIsrlocklessThreadNode->threadId == threadId)) + { + *ppThreadNode = pIsrlocklessThreadNode; + return NV_OK; + } + + // Check to see if the this is an ISR or bottom-half thread + pISRDeferredIntHandlerNode = threadStateDatabase.ppISRDeferredIntHandlerThreadNode[pGpu->gpuInstance]; + if (pISRDeferredIntHandlerNode && (pISRDeferredIntHandlerNode->threadId == threadId)) + { + *ppThreadNode = pISRDeferredIntHandlerNode; + return NV_OK; + } + } + } + + // Try the Preempted list first before trying the API list + portSyncSpinlockAcquire(threadStateDatabase.spinlock); + pNode = mapFind(&threadStateDatabase.dbRootPreempted, (NvU64) threadId); + if (pNode == NULL) + { + // Not found on the Preempted, try the API list + pNode = mapFind(&threadStateDatabase.dbRoot, (NvU64) threadId); + } + portSyncSpinlockRelease(threadStateDatabase.spinlock); + + *ppThreadNode = pNode; + if (pNode != NULL) + { + NV_ASSERT((*ppThreadNode)->threadId == threadId); + return NV_OK; + } + else + { + return NV_ERR_OBJECT_NOT_FOUND; + } +} + +NV_STATUS threadStateGetCurrentUnchecked(THREAD_STATE_NODE **ppThreadNode, OBJGPU *pGpu) +{ + NV_STATUS rmStatus; + OS_THREAD_HANDLE threadId; + + // Check to see if ThreadState is enabled + if ((threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED) == NV_FALSE) + { + *ppThreadNode = NULL; + return NV_ERR_INVALID_STATE; + } + + rmStatus = osGetCurrentThread(&threadId); + if (rmStatus == NV_OK) + { + rmStatus = _threadStateGet(threadId, pGpu, ppThreadNode); + } + + // Assert if the current lookup failed - Please add the stack from this assert to bug 690089. + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ASSERT_ON_FAILED_LOOKUP_ENABLED) + { + NV_PRINTF(LEVEL_ERROR, + "threadState[Init,Free] call may be missing from this RM entry point!\n"); + NV_ASSERT(rmStatus == NV_OK); + } + + return rmStatus; +} + +NV_STATUS threadStateGetCurrent(THREAD_STATE_NODE **ppThreadNode, OBJGPU *pGpu) +{ + NV_STATUS status = threadStateGetCurrentUnchecked(ppThreadNode, pGpu); + + if (TLS_MIRROR_THREADSTATE) + { + THREAD_STATE_NODE *pTlsNode = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_THREADSTATE)); + + if ((status == NV_OK) && (pTlsNode != *ppThreadNode)) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: pTlsNode=%p, *ppThreadNode=%p; ThreadID = %llx (NvPort:%llx), sp=%p\n", + pTlsNode, *ppThreadNode, + (NvU64)(*ppThreadNode)->threadId, + portThreadGetCurrentThreadId(), &status); + + } + else if ((status != NV_OK) && (pTlsNode != NULL)) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: ThreadNode not found (status=0x%08x), but found in TLS:%p (tid=%llx;sp=%p)\n", + status, pTlsNode, + portThreadGetCurrentThreadId(), &status); + } + } + + return status; +} + +// +// Sets the timeout value and method of timeout +// +NV_STATUS threadStateInitTimeout(OBJGPU *pGpu, NvU32 timeoutUs, NvU32 flags) +{ + NvU32 timeoutMsecs = (timeoutUs / 1000); + NvU32 gpuMode = gpuGetMode(pGpu); + NvU32 scaleIgnored = 0; + NvU32 flagsIgnored = 0; + NvU32 perOSTimeoutUs = 999; // What we'll see if osGetTimeoutParams ever fails + + if (gpuMode == NV_GPU_MODE_GRAPHICS_MODE) + { + threadStateDatabase.timeout.nonComputeTimeoutMsecs = timeoutMsecs; + threadStateDatabase.timeout.computeGpuMask &= ~NVBIT(pGpu->gpuInstance); + } + else + { + threadStateDatabase.timeout.computeGpuMask |= NVBIT(pGpu->gpuInstance); + } + // + // Initializing the compute timeout limits in all cases, but use + // per-OS values: + // + osGetTimeoutParams(pGpu, &perOSTimeoutUs, &scaleIgnored, &flagsIgnored); + timeoutMsecs = (perOSTimeoutUs / 1000); + timeoutMsecs = gpuScaleTimeout(pGpu, timeoutMsecs); + + threadStateDatabase.timeout.computeTimeoutMsecs = timeoutMsecs; + threadStateDatabase.timeout.flags = flags; + + return NV_OK; +} + +// +// Resets the current threadId time +// +NV_STATUS threadStateResetTimeout(OBJGPU *pGpu) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE *pThreadNode = NULL; + + // Check to see if ThreadState Timeout is enabled + if ((threadStateDatabase.setupFlags & + THREAD_STATE_SETUP_FLAGS_TIMEOUT_ENABLED) == NV_FALSE) + { + return NV_ERR_INVALID_STATE; + } + + rmStatus = threadStateGetCurrent(&pThreadNode, pGpu); + if ((rmStatus == NV_OK) && pThreadNode ) + { + // Reset the timeout + rmStatus = _threadNodeInitTime(pThreadNode); + if (rmStatus == NV_OK) + { + pThreadNode->flags |= THREAD_STATE_FLAGS_TIMEOUT_INITED; + _threadStatePrintInfo(pThreadNode); + } + } + + return rmStatus; +} + +void threadStateLogTimeout(OBJGPU *pGpu, NvU64 funcAddr, NvU32 lineNum) +{ + + // If this is release and we have RmBreakOnRC on -- Stop +#ifndef DEBUG + OBJSYS *pSys = SYS_GET_INSTANCE(); + if (DRF_VAL(_DEBUG, _BREAK_FLAGS, _GPU_TIMEOUT, pSys->debugFlags) == + NV_DEBUG_BREAK_FLAGS_GPU_TIMEOUT_ENABLE) + { + DBG_BREAKPOINT(); + } +#endif +} + +// +// Checks the current threadId time against a set timeout period +// +NV_STATUS threadStateCheckTimeout(OBJGPU *pGpu, NvU64 *pElapsedTimeUs) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE *pThreadNode = NULL; + + if (pElapsedTimeUs) + *pElapsedTimeUs = 0; + + // + // Make sure the DB has been initialized, we have a valid threadId, + // and that the Timeout logic is enabled + // + if ((threadStateDatabase.setupFlags & + THREAD_STATE_SETUP_FLAGS_TIMEOUT_ENABLED) == NV_FALSE) + { + return NV_ERR_INVALID_STATE; + } + if (threadStateDatabase.timeout.flags == 0) + { + return NV_ERR_INVALID_STATE; + } + + rmStatus = threadStateGetCurrent(&pThreadNode, pGpu); + if ((rmStatus == NV_OK) && pThreadNode ) + { + if (pThreadNode->flags & THREAD_STATE_FLAGS_TIMEOUT_INITED) + { + rmStatus = _threadNodeCheckTimeout(pGpu, pThreadNode, pElapsedTimeUs); + } + else + { + rmStatus = NV_ERR_INVALID_STATE; + } + } + + return rmStatus; +} + +// +// Set override timeout value for specified thread +// +void threadStateSetTimeoutOverride(THREAD_STATE_NODE *pThreadNode, NvU64 newTimeoutMs) +{ + NvU64 timeInNs; + + pThreadNode->timeout.overrideTimeoutMsecs = newTimeoutMs; + + osGetCurrentTick(&timeInNs); + + _threadStateSetNextCpuYieldTime(pThreadNode); + + if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + pThreadNode->timeout.nonComputeTime = timeInNs + (newTimeoutMs * 1000 * 1000); + pThreadNode->timeout.computeTime = timeInNs + (newTimeoutMs * 1000 * 1000); + } + else if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSDELAY) + { + // Convert from msecs (1,000) to usecs (1,000,000) + pThreadNode->timeout.nonComputeTime = newTimeoutMs * 1000; + pThreadNode->timeout.computeTime = newTimeoutMs * 1000; + } +} + +NV_STATUS threadStateEnqueueCallbackOnFree +( + THREAD_STATE_NODE *pThreadNode, + THREAD_STATE_FREE_CALLBACK *pCallback +) +{ + THREAD_STATE_FREE_CALLBACK *pCbListNode; + + if (!(pThreadNode->flags & THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED)) + return NV_ERR_INVALID_OPERATION; + + if ((pThreadNode == NULL) || (pCallback == NULL) || + (pCallback->pCb == NULL)) + return NV_ERR_INVALID_ARGUMENT; + + // Add from tail to maintain FIFO semantics. + pCbListNode = listAppendNew(&pThreadNode->cbList); + if (pCbListNode == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pCbListNode->pCb = pCallback->pCb; + pCbListNode->pCbData = pCallback->pCbData; + + return NV_OK; +} + +void threadStateRemoveCallbackOnFree +( + THREAD_STATE_NODE *pThreadNode, + THREAD_STATE_FREE_CALLBACK *pCallback +) +{ + THREAD_STATE_FREE_CALLBACK *pCbListNode; + + NV_ASSERT_OR_RETURN_VOID(pThreadNode->flags & + THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED); + + // + // Remove doesn't need to obey FIFO semantics. + // + // Must remove only one entry per call to be symmetric with + // threadStateEnqueueCallbackOnFree(). It is caller's responsibility to + // invoke this API repeatedly as needed. + // + for (pCbListNode = listHead(&pThreadNode->cbList); + pCbListNode != NULL; + pCbListNode = listNext(&pThreadNode->cbList, pCbListNode)) + { + if ((pCbListNode->pCb == pCallback->pCb) && + (pCbListNode->pCbData = pCallback->pCbData)) + { + listRemove(&pThreadNode->cbList, pCbListNode); + return; + } + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog.c new file mode 100644 index 0000000..677d372 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog.c @@ -0,0 +1,727 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlog/nvlog.h" +#include "nvrm_registry.h" +#include "os/os.h" +#include "diagnostics/tracer.h" +#include "tls/tls.h" +#include "core/locks.h" + +// +// Buffer push method declarations +// +NvBool nvlogRingBufferPush (NVLOG_BUFFER *pBuffer, NvU8 *pData, NvU32 dataSize); +NvBool nvlogNowrapBufferPush(NVLOG_BUFFER *pBuffer, NvU8 *pData, NvU32 dataSize); +NvBool nvlogStringBufferPush(NVLOG_BUFFER *unused, NvU8 *pData, NvU32 dataSize); +NvBool nvlogKernelLogPush(NVLOG_BUFFER *unused, NvU8 *pData, NvU32 dataSize); + +static void _printBase64(NvU8 *pData, NvU32 dataSize); +static NV_STATUS _allocateNvlogBuffer(NvU32 size, NvU32 flags, NvU32 tag, + NVLOG_BUFFER **ppBuffer); +static void _deallocateNvlogBuffer(NVLOG_BUFFER *pBuffer); + +volatile NvU32 nvlogInitCount; +static void *nvlogRegRoot; + +// Zero (null) buffer definition. +static NVLOG_BUFFER _nvlogZeroBuffer = +{ + {nvlogStringBufferPush}, + 0, + NvU32_BUILD('l','l','u','n'), + 0, + 0, + 0 +}; + +NVLOG_LOGGER NvLogLogger = +{ + NVLOG_LOGGER_VERSION, + + // Default buffers + { + // The 0th buffer just prints to the screen in debug builds. + &_nvlogZeroBuffer + }, + + // Next available slot + 1, + + // Free slots + NVLOG_MAX_BUFFERS-1, + + // Main lock, must be allocated at runtime. + NULL +}; + +#define NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer) \ + ((hBuffer < NVLOG_MAX_BUFFERS) && (NvLogLogger.pBuffers[hBuffer] != NULL)) + +NV_STATUS +nvlogInit(void *pData) +{ + nvlogRegRoot = pData; + portInitialize(); + NvLogLogger.mainLock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (NvLogLogger.mainLock == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + tlsInitialize(); + return NV_OK; +} + +void nvlogUpdate() { +} + +NV_STATUS +nvlogDestroy() +{ + NvU32 i; + + tlsShutdown(); + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + nvlogDeallocBuffer(i); + } + if (NvLogLogger.mainLock != NULL) + { + portSyncSpinlockDestroy(NvLogLogger.mainLock); + NvLogLogger.mainLock = NULL; + } + + /// @todo Destructor should return void. + portShutdown(); + return NV_OK; +} + +static NV_STATUS +_allocateNvlogBuffer +( + NvU32 size, + NvU32 flags, + NvU32 tag, + NVLOG_BUFFER **ppBuffer +) +{ + NVLOG_BUFFER *pBuffer; + NVLOG_BUFFER_PUSHFUNC pushfunc; + + // Sanity check on some invalid combos: + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _EXPANDABLE, _YES, flags)) + { + // Only nonwrapping buffers can be expanded + if (!FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _NOWRAP, flags)) + return NV_ERR_INVALID_ARGUMENT; + // Full locking required to expand the buffer. + if (!FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _LOCKING, _FULL, flags)) + return NV_ERR_INVALID_ARGUMENT; + } + + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _SYSTEMLOG, flags)) + { + // System log does not need to allocate memory for buffer. + pushfunc = (NVLOG_BUFFER_PUSHFUNC) nvlogKernelLogPush; + size = 0; + } + else + { + NV_ASSERT_OR_RETURN(size > 0, NV_ERR_INVALID_ARGUMENT); + + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _RING, flags)) + { + pushfunc = (NVLOG_BUFFER_PUSHFUNC) nvlogRingBufferPush; + } + else if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _NOWRAP, flags)) + { + pushfunc = (NVLOG_BUFFER_PUSHFUNC) nvlogNowrapBufferPush; + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + } + + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _NONPAGED, _YES, flags)) + pBuffer = portMemAllocNonPaged(sizeof(*pBuffer) + size); + else + pBuffer = portMemAllocPaged(sizeof(*pBuffer) + size); + + if (!pBuffer) + return NV_ERR_NO_MEMORY; + + portMemSet(pBuffer, 0, sizeof(*pBuffer) + size); + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _OCA, _YES, flags)) + { + osAddRecordForCrashLog(pBuffer, NV_OFFSETOF(NVLOG_BUFFER, data) + size); + } + + pBuffer->push.fn = pushfunc; + pBuffer->size = size; + pBuffer->flags = flags; + pBuffer->tag = tag; + + *ppBuffer = pBuffer; + + return NV_OK; +} + +static void +_deallocateNvlogBuffer +( + NVLOG_BUFFER *pBuffer +) +{ + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _OCA, _YES, pBuffer->flags)) + osDeleteRecordForCrashLog(pBuffer); + + portMemFree(pBuffer); +} + +NV_STATUS +nvlogAllocBuffer +( + NvU32 size, + NvU32 flags, + NvU32 tag, + NVLOG_BUFFER_HANDLE *pBufferHandle, + ... +) +{ + NVLOG_BUFFER *pBuffer; + NV_STATUS status; + + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _SYSTEMLOG, flags)) + { + } + else + { + NV_ASSERT_OR_RETURN(NvLogLogger.totalFree > 0, + NV_ERR_INSUFFICIENT_RESOURCES); + } + + status = _allocateNvlogBuffer(size, flags, tag, &pBuffer); + + if (status != NV_OK) + { + return status; + } + + portSyncSpinlockAcquire(NvLogLogger.mainLock); + + if (NvLogLogger.nextFree < NVLOG_MAX_BUFFERS) + { + NvLogLogger.pBuffers[NvLogLogger.nextFree] = pBuffer; + *pBufferHandle = NvLogLogger.nextFree++; + NvLogLogger.totalFree--; + } + else + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + } + + // Find the next slot in the buffers array + while (NvLogLogger.nextFree < NVLOG_MAX_BUFFERS) + { + if (NvLogLogger.pBuffers[NvLogLogger.nextFree] != NULL) + NvLogLogger.nextFree++; + else break; + } + portSyncSpinlockRelease(NvLogLogger.mainLock); + + if (status != NV_OK) + { + portMemFree(pBuffer); + } + + return status; +} + +void +nvlogDeallocBuffer +( + NVLOG_BUFFER_HANDLE hBuffer +) +{ + NVLOG_BUFFER *pBuffer; + + if ((hBuffer == 0) || !NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer)) + return; + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + pBuffer->flags = FLD_SET_DRF(LOG_BUFFER, _FLAGS, _DISABLED, + _YES, pBuffer->flags); + + while (pBuffer->threadCount > 0) { /*spin*/ } + portSyncSpinlockAcquire(NvLogLogger.mainLock); + NvLogLogger.pBuffers[hBuffer] = NULL; + NvLogLogger.nextFree = NV_MIN(hBuffer, NvLogLogger.nextFree); + NvLogLogger.totalFree++; + portSyncSpinlockRelease(NvLogLogger.mainLock); + + _deallocateNvlogBuffer(pBuffer); +} + +NV_STATUS +nvlogWriteToBuffer +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU8 *pData, + NvU32 size +) +{ + NvBool status; + NVLOG_BUFFER *pBuffer; + + NV_ASSERT_OR_RETURN(size > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pData != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + // Normal condition when fetching nvLog from NV0000_CTRL_CMD_NVD_GET_NVLOG. + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _DISABLED, _YES, pBuffer->flags)) + return NV_ERR_NOT_READY; + + portAtomicIncrementS32(&pBuffer->threadCount); + status = pBuffer->push.fn(pBuffer, pData, size); + // Get pBuffer from the handle again, as it might have realloc'd + portAtomicDecrementS32(&NvLogLogger.pBuffers[hBuffer]->threadCount); + + return (status == NV_TRUE) ? NV_OK : NV_ERR_BUFFER_TOO_SMALL; +} + + + +NV_STATUS +nvlogExtractBufferChunk +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU32 chunkNum, + NvU32 *pChunkSize, + NvU8 *pDest +) +{ + NVLOG_BUFFER *pBuffer; + NvU32 index; + + NV_ASSERT_OR_RETURN(*pChunkSize > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDest != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + index = chunkNum * (*pChunkSize); + NV_ASSERT_OR_RETURN(index <= pBuffer->size, NV_ERR_OUT_OF_RANGE); + *pChunkSize = NV_MIN(*pChunkSize, (pBuffer->size - index)); + + portSyncSpinlockAcquire(NvLogLogger.mainLock); + portMemCopy(pDest, *pChunkSize, &pBuffer->data[index], *pChunkSize); + portSyncSpinlockRelease(NvLogLogger.mainLock); + + return NV_OK; +} + + +NV_STATUS +nvlogGetBufferSize +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU32 *pSize +) +{ + NV_ASSERT_OR_RETURN(pSize != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + *pSize = NvLogLogger.pBuffers[hBuffer]->size; + return NV_OK; +} + +NV_STATUS +nvlogGetBufferTag +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU32 *pTag +) +{ + NV_ASSERT_OR_RETURN(pTag != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + *pTag = NvLogLogger.pBuffers[hBuffer]->tag; + return NV_OK; +} + +NV_STATUS +nvlogGetBufferFlags +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU32 *pFlags +) +{ + NV_ASSERT_OR_RETURN(pFlags != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + *pFlags = NvLogLogger.pBuffers[hBuffer]->flags; + return NV_OK; +} + + +NV_STATUS +nvlogPauseLoggingToBuffer +( + NVLOG_BUFFER_HANDLE hBuffer, + NvBool bPause +) +{ + NVLOG_BUFFER *pBuffer; + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + pBuffer->flags = (bPause) + ? FLD_SET_DRF(LOG, _BUFFER_FLAGS, _DISABLED, _YES, pBuffer->flags) + : FLD_SET_DRF(LOG, _BUFFER_FLAGS, _DISABLED, _NO, pBuffer->flags); + + return NV_OK; +} + + +NV_STATUS +nvlogPauseAllLogging +( + NvBool bPause +) +{ + return NV_OK; +} + +NV_STATUS +nvlogGetBufferHandleFromTag +( + NvU32 tag, + NVLOG_BUFFER_HANDLE *pBufferHandle +) +{ + NvU32 i; + + NV_ASSERT_OR_RETURN(pBufferHandle != NULL, NV_ERR_INVALID_POINTER); + + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + if (NvLogLogger.pBuffers[i] != NULL) + { + if (NvLogLogger.pBuffers[i]->tag == tag) + { + *pBufferHandle = i; + return NV_OK; + } + } + } + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +nvlogGetBufferSnapshot +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU8 *pDest, + NvU32 destSize +) +{ + NVLOG_BUFFER *pBuffer; + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + NV_ASSERT_OR_RETURN(pDest != NULL, NV_ERR_INVALID_POINTER); + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + NV_ASSERT_OR_RETURN(destSize >= NVLOG_BUFFER_SIZE(pBuffer), + NV_ERR_BUFFER_TOO_SMALL); + + portSyncSpinlockAcquire(NvLogLogger.mainLock); + portMemCopy(pDest, NVLOG_BUFFER_SIZE(pBuffer), pBuffer, NVLOG_BUFFER_SIZE(pBuffer)); + portSyncSpinlockRelease(NvLogLogger.mainLock); + + return NV_OK; +} + + + +NvBool +nvlogRingBufferPush +( + NVLOG_BUFFER *pBuffer, + NvU8 *pData, + NvU32 dataSize +) +{ + NvU32 writeSize; + NvU32 oldPos; + NvU32 lock = DRF_VAL(LOG, _BUFFER_FLAGS, _LOCKING, pBuffer->flags); + + if (lock != NVLOG_BUFFER_FLAGS_LOCKING_NONE) + portSyncSpinlockAcquire(NvLogLogger.mainLock); + + oldPos = pBuffer->pos; + pBuffer->extra.ring.overflow += (pBuffer->pos + dataSize) / pBuffer->size; + pBuffer->pos = (pBuffer->pos + dataSize) % pBuffer->size; + + // State locking does portMemCopy unlocked. + if (lock == NVLOG_BUFFER_FLAGS_LOCKING_STATE) + portSyncSpinlockRelease(NvLogLogger.mainLock); + + while (dataSize > 0) + { + writeSize = NV_MIN(pBuffer->size - oldPos, dataSize); + portMemCopy(&pBuffer->data[oldPos], writeSize, pData, writeSize); + oldPos = 0; + dataSize -= writeSize; + pData += writeSize; + } + + if (lock == NVLOG_BUFFER_FLAGS_LOCKING_FULL) + portSyncSpinlockRelease(NvLogLogger.mainLock); + + return NV_TRUE; +} + +NvBool +nvlogNowrapBufferPush +( + NVLOG_BUFFER *pBuffer, + NvU8 *pData, + NvU32 dataSize +) +{ + NvU32 oldPos; + NvU32 lock = DRF_VAL(LOG, _BUFFER_FLAGS, _LOCKING, pBuffer->flags); + + if (pBuffer->pos + dataSize >= pBuffer->size) + { + NvBool bExpandable = FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _EXPANDABLE, _YES, pBuffer->flags); + NvBool bNonPaged = FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _NONPAGED, _YES, pBuffer->flags); + + // Expandable buffer, and we are at IRQL where we can do realloc + if (bExpandable && + ((bNonPaged && portMemExSafeForNonPagedAlloc()) || (!bNonPaged && portMemExSafeForPagedAlloc()))) + { + NVLOG_BUFFER *pNewBuffer; + NvU32 i; + NvU32 newSize = pBuffer->size * 2; + NvU32 allocSize = sizeof(*pBuffer) + newSize; + + pNewBuffer = bNonPaged ? portMemAllocNonPaged(allocSize) : portMemAllocPaged(allocSize); + if (pNewBuffer == NULL) + return NV_FALSE; + + // + // Two threads couid have entered this block at the same time, and + // both will have allocated their own bigger buffer. Only the one + // that takes the spinlock first should do the copy and the swap. + // + portSyncSpinlockAcquire(NvLogLogger.mainLock); + // Check if this buffer is still there and was not swapped for a bigger one + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + if (NvLogLogger.pBuffers[i] == pBuffer) + break; + } + if (i == NVLOG_MAX_BUFFERS) + { + // Another thread has already expanded the buffer, bail out. + // TODO: Maybe we could store the handle and then try again? + portSyncSpinlockRelease(NvLogLogger.mainLock); + return NV_FALSE; + } + + portMemCopy(pNewBuffer, allocSize, pBuffer, sizeof(*pBuffer)+pBuffer->size); + pNewBuffer->size = newSize; + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + if (NvLogLogger.pBuffers[i] == pBuffer) + NvLogLogger.pBuffers[i] = pNewBuffer; + } + portSyncSpinlockRelease(NvLogLogger.mainLock); + + // + // Before we can free this buffer, we need to make sure any threads + // that were still accessing it are done. Spin on volatile threadCount + // NOTE: threadCount includes the current thread too. + // + while (pBuffer->threadCount > 1) { /*spin*/ } + portMemFree(pBuffer); + pBuffer = pNewBuffer; + } + else + { + return NV_FALSE; + } + } + + if (lock != NVLOG_BUFFER_FLAGS_LOCKING_NONE) + portSyncSpinlockAcquire(NvLogLogger.mainLock); + + oldPos = pBuffer->pos; + pBuffer->pos = oldPos + dataSize; + + // State locking does portMemCopy unlocked. + if (lock == NVLOG_BUFFER_FLAGS_LOCKING_STATE) + portSyncSpinlockRelease(NvLogLogger.mainLock); + + portMemCopy(&pBuffer->data[oldPos], dataSize, pData, dataSize); + + if (lock == NVLOG_BUFFER_FLAGS_LOCKING_FULL) + portSyncSpinlockRelease(NvLogLogger.mainLock); + + return NV_TRUE; +} + +NvBool +nvlogStringBufferPush +( + NVLOG_BUFFER *unused, + NvU8 *pData, + NvU32 dataSize +) +{ + return NV_TRUE; +} + +// +// Prints the buffer encoded as base64, with a prefix for easy grepping. +// Base64 allows the padding characters ('=') to appear anywhere, not just at +// the end, so it is fine to print buffers one at a time without merging. +// +static void _printBase64(NvU8 *pData, NvU32 dataSize) +{ + const NvU8 base64_key[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + NvU8 output[64+1]; // 64 bas64 characters per line of output + NvU32 i; + + do + { + i = 0; + while (i < (sizeof(output)-1) && (dataSize > 0)) + { + output[i++] = base64_key[pData[0] >> 2]; + if (dataSize == 1) + { + output[i++] = base64_key[(pData[0] << 4) & 0x3F]; + output[i++] = '='; + output[i++] = '='; + dataSize = 0; + break; + } + + output[i++] = base64_key[((pData[0] << 4) & 0x3F) | (pData[1] >> 4)]; + if (dataSize == 2) + { + output[i++] = base64_key[(pData[1] << 2) & 0x3F]; + output[i++] = '='; + dataSize = 0; + break; + } + + output[i++] = base64_key[((pData[1] << 2) & 0x3F) | (pData[2] >> 6)]; + output[i++] = base64_key[pData[2] & 0x3F]; + + pData += 3; + dataSize -= 3; + } + output[i] = 0; + portDbgPrintf("nvrm-nvlog: %s\n", output); + } while (dataSize > 0); +} + +NvBool nvlogKernelLogPush(NVLOG_BUFFER *unused, NvU8 *pData, NvU32 dataSize) +{ + PORT_UNREFERENCED_VARIABLE(unused); + _printBase64(pData, dataSize); + return NV_TRUE; +} + +void nvlogDumpToKernelLog(NvBool bDumpUnchangedBuffersOnlyOnce) +{ + NvU32 i; + static NvU32 lastDumpPos[NVLOG_MAX_BUFFERS]; + + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + NVLOG_BUFFER *pBuf = NvLogLogger.pBuffers[i]; + + if (pBuf && pBuf->size) + { + if (bDumpUnchangedBuffersOnlyOnce) + { + NvU32 pos = pBuf->pos + (pBuf->size * pBuf->extra.ring.overflow); + + //Dump the buffer only if it's contents have changed + if (lastDumpPos[i] != pos) + { + lastDumpPos[i] = pos; + _printBase64((NvU8*)pBuf, NVLOG_BUFFER_SIZE(pBuf)); + } + } + else + { + _printBase64((NvU8*)pBuf, NVLOG_BUFFER_SIZE(pBuf)); + } + } + } +} + +void nvlogDumpToKernelLogIfEnabled(void) +{ + NvU32 dumpNvlogValue; + + // Debug and develop builds already dump everything as it happens. +#if defined(DEBUG) || defined(DEVELOP) + return; +#endif + + // Enable only if the regkey has been set + if (osReadRegistryDword(NULL, NV_REG_STR_RM_DUMP_NVLOG, &dumpNvlogValue) != NV_OK) + return; + + if (dumpNvlogValue != NV_REG_STR_RM_DUMP_NVLOG_ENABLE) + return; + + nvlogDumpToKernelLog(NV_FALSE); +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog_printf.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog_printf.c new file mode 100644 index 0000000..c7cd7a7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog_printf.c @@ -0,0 +1,1503 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************************************************************\ +* * +* Description: Common debug print defines and functions * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "core/system.h" +#include "os/os.h" // to pick up declarations for osDelay() and osDelayUs() +#include "nvrm_registry.h" + +#include // NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE + +static int inttodecfmtstr(NvS64 sval, char *dest, int fieldwidth, int flags); +static int uinttohexfmtstr(NvU64 uval, char *dest, int fieldwidth, int flags); +static int strtofmtstr(const char *src, char *dest, char *destLimit, int fieldwidth, int precision, int flags); + +#if 0 +static int float64todecfmtstr(F064 f64val, NvU8 *dest, int fieldwidth, int precision, int flags); +#endif + +// +// Numeric & string conversion flags (used if you call the 'XtoYfmtstr' routines directly) +// +enum { + DONTTERMINATE = 1, // Don't null-terminate the string if this flag is set + UNSIGNED_F = 2, // Force an unsigned number conversion (other sign options are ignored) + PLUSSIGN_F = 4, // For signed numbers >= 0, force a '+' in the sign position + SPACESIGN_F = 8, // For signed numbers >= 0, force a space in the sign position + LEFTALIGN_F = 16, // Left-justify the result in the destination field (overrides zero fill) + ZEROFILL_F = 32, // Use leading zeros for padding to a field width + LOWERCASE_F = 64 // Use lower case hex digits: a-f instead of A-F +}; + +// +// nvDbgBreakpointEnabled - Returns true if triggering a breakpoint is allowed +// +NvBool osDbgBreakpointEnabled(void); +NvBool nvDbgBreakpointEnabled() +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + if (pSys != NULL) + { + if (pSys->getProperty(pSys, PDB_PROP_SYS_DEBUGGER_DISABLED)) + return NV_FALSE; + } + return osDbgBreakpointEnabled(); +} + +#if NV_PRINTF_STRINGS_ALLOWED +static PORT_SPINLOCK *_nv_dbg_lock = NULL; +static char _nv_dbg_string[MAX_ERROR_STRING]; + +// +// nvDbgInit - Allocate the printf spinlock +// +NvBool +nvDbgInit(void) +{ + if (NULL != _nv_dbg_lock) + { + // already initialized + return NV_TRUE; + } + if (portInitialize() != NV_OK) + return NV_FALSE; + + _nv_dbg_lock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (_nv_dbg_lock == NULL) + return NV_FALSE; + else + return NV_TRUE; +} + +// +// nvDbgDestroy - Free the printf spinlock +// +void +nvDbgDestroy(void) +{ + if (NULL != _nv_dbg_lock) + { + portSyncSpinlockDestroy(_nv_dbg_lock); + _nv_dbg_lock = NULL; + portShutdown(); + } +} + +// +// nvDbg_PrintMsg - Common message control for two flavors of printf +// +// Differences for mods builds. +// * Mods has its own messaging system, and we always pass messages +// to the mods unless RmMsg explicitly wants to hide a message. +// * Mods requires messages even when the debugger is not enabled. +// * Sorry for the #ifdefs, but RmMsg complicates the code enough +// that it is nice to have one implementation. +// +static NvBool +nvDbg_PrintMsg +( + const char *filename, + int linenumber, + const char *function, + int debuglevel, + const char *printf_format, + NvBool *pForce, + NvU32 *pPrefix +) +{ + NvU32 rc; + int debuglevel_min; + +#if defined(DEVELOP) || defined(DEBUG) || defined(QA_BUILD) + debuglevel_min = LEVEL_NOTICE; +#else + debuglevel_min = LEVEL_ERROR; +#endif + + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if ((NULL == pSys) || (pSys->getProperty(pSys, PDB_PROP_SYS_DEBUGGER_DISABLED))) + { + return NV_FALSE; + } + + // + // Message is filtered by an explicit RmMsg rule + // + rc = nvDbgRmMsgCheck(filename, linenumber, (char *)function, debuglevel, printf_format, pPrefix); + switch (rc) + { + case NVRM_MSG_HIDE: + // Hide this error message + return NV_FALSE; + + case NVRM_MSG_PRINT: + // Force this error message + *pForce = NV_TRUE; + return NV_TRUE; + + case NVRM_MSG_NORMAL: + default: + if (debuglevel >= debuglevel_min) + { + return NV_TRUE; + } + break; + } + return NV_FALSE; +} + +void nvDbg_Printf +( + const char *filename, + int linenumber, + const char *function, + int debuglevel, + const char *printf_format, + ... +) +{ + va_list arglist; + va_start(arglist, printf_format); + nvDbg_vPrintf(filename, linenumber, function, debuglevel, printf_format, arglist); + va_end(arglist); +} + +// +// Internal function to prepare _nv_dbg_string for printing. +// Should only be called while _nv_dbg_lock is held. +// +static void +_nvDbgPrepareString +( + const char *file, + int line, + const char *func, + const char *fmt, + NvU32 prefix, + va_list arglist +) +{ + NvU32 len = 0; + + // + // If RmMsg has added a prefix, skip the standard NV_PRINTF_PREFIX. + // If there is no prefix, don't include the RmMsg prefix. + // + if (portStringCompare(fmt, NV_PRINTF_PREFIX, sizeof(NV_PRINTF_PREFIX) - 1) == 0) + { + len = RmMsgPrefix(prefix, file, line, func, _nv_dbg_string, MAX_ERROR_STRING); + fmt += sizeof(NV_PRINTF_PREFIX) - 1; + } + + nvDbgVsnprintf(_nv_dbg_string + len, MAX_ERROR_STRING - len, fmt, arglist); +} + +// +// Temporary helper to map LEVEL_xxx constants to a platform specific level. +// +#if PORT_IS_FUNC_SUPPORTED(portDbgExPrintfLevel) +static NvU32 _nvDbgLevelToPlatformLevel(NvBool bForce, NvU32 level) +{ + return bForce ? LEVEL_FATAL : level; +} +#endif + +// +// Some varargs interfaces need a va_list interface, but still +// want the common output buffer and the RmMsg handling. +// +void nvDbg_vPrintf +( + const char *filename, + int linenumber, + const char *function, + int debuglevel, + const char *printf_format, + va_list arglist +) +{ + NvBool force = NV_FALSE; + NvU32 prefix = 0; + + if (nvDbg_PrintMsg(filename, linenumber, function, debuglevel, printf_format, &force, &prefix)) + { + portSyncSpinlockAcquire(_nv_dbg_lock); + _nvDbgPrepareString(filename, linenumber, function, printf_format, prefix, arglist); +#if PORT_IS_FUNC_SUPPORTED(portDbgExPrintfLevel) + portDbgExPrintfLevel(_nvDbgLevelToPlatformLevel(force, debuglevel), + "%.*s", MAX_ERROR_STRING, _nv_dbg_string); +#else + portDbgPrintString(_nv_dbg_string, MAX_ERROR_STRING); +#endif + portSyncSpinlockRelease(_nv_dbg_lock); + } +} + + +#define IS_PRINT(c) (((c) >= 0x20) && ((c) <= 0x7E)) + +void nvDbg_PrintBuf +( + const char *file, + int line, + const char *function, + int dbglevel, + NvU8 buffer[], + NvU32 bufsize +) +{ + NvU32 i, j; + nvDbg_Printf(file, line, function, dbglevel, NV_PRINTF_ADD_PREFIX("printBuf [BEGIN]")); + for (i = 0; i < bufsize; i += 16) + { + nvDbg_Printf(file, line, function, dbglevel, "\n"); + nvDbg_Printf(file, line, function, dbglevel, NV_PRINTF_ADD_PREFIX("printBuf 0x%p "), buffer + i); + for (j = 0; j < 16; j++) + { + if ((i + j) < bufsize) + { + nvDbg_Printf(file, line, function, dbglevel, "%02x", *(buffer + i + j)); + } + else + { + nvDbg_Printf(file, line, function, dbglevel, " "); + } + } + nvDbg_Printf(file, line, function, dbglevel, " "); + for (j = 0; j < 16; j++) + { + if ((i + j) < bufsize) + { + nvDbg_Printf(file, line, function, dbglevel, "%c", IS_PRINT(*(buffer + i + j))? *(buffer + i + j) : '.'); + } + else + { + nvDbg_Printf(file, line, function, dbglevel, " "); + } + } + } + nvDbg_Printf(file, line, function, dbglevel, "\n"); + nvDbg_Printf(file, line, function, dbglevel, NV_PRINTF_ADD_PREFIX("printBuf [END]\n")); +} + +#endif + +#define TMPBUF_SIZE 63 +//====================================================================================== +// +// nvDbgVsnprintf() +// +//====================================================================================== +int nvDbgVsnprintf(char *dest, NvU32 destSize, const char *fmt, va_list args) +{ + int ch, precision, flags; + NvU32 fieldwidth; + int longlong; + NvS32 s32val; + NvU32 u32val; + NvS64 s64val; + NvU64 u64val; + + const char *f; + const char *specptr; + char *d; + char *strpval; + char *destLimit; + void *pval; + char tmpBuf[TMPBUF_SIZE + 1]; + NvU32 tmpSize; + + if (dest == NULL || destSize == 0) + { + return(0); // If we don't have a destination, we didn't do any characters + } + + f = fmt; + d = dest; + destLimit = dest + destSize - 1; + dest[destSize - 1] = 0; + + while ((ch = *f++) != '\0') + { + if (ch != '%') + { + if (d < destLimit) + { + *d++ = (NvU8)ch; + } + continue; + } + longlong = NV_FALSE; + specptr = f - 1; // Save a pointer to the '%' specifier, in case of syntax errors + ch = *f++; + + // revert to correct printf behavior for % + // from printf.3 regarding '%' format character: + // % A `%' is written. No argument is converted. The complete conversion specification is `%%'. + if (ch == '%') { + if (d < destLimit) + { + *d++ = (NvU8)ch; + } + continue; + } + + flags = DONTTERMINATE; // Don't terminate substrings -- we'll null-terminate when we're all done + // Check for left-alignment + if (ch == '-') { + flags |= LEFTALIGN_F; + ch = *f++; + } + // Check for using a plus sign for non-negative numbers + if (ch == '+') { + flags |= PLUSSIGN_F; + ch = *f++; + } + // Check for using a space character (sign place-holder) for non-negative numbers + if (ch == ' ') { + flags |= SPACESIGN_F; + ch = *f++; + } + // Check for leading zero fill + if (ch == '0') { + flags |= ZEROFILL_F; + // Don't bump the character pointer in case '0' was the only digit + } + // Collect the field width specifier + if (ch == '*') { + // Field width specified by separate argument + fieldwidth = va_arg(args, int); + ch = *f++; + } + else { + fieldwidth = 0; // Default field width + while (ch >= '0' && ch <= '9') { + fieldwidth = fieldwidth * 10 + ch - '0'; + ch = *f++; + } + } + + // Check for a precision specifier + precision = -1; // Default unspecified precision + if (ch == '.') { // We have a precision specifier, skip the '.' + ch = *f++; + if (ch == '*') { + // precision specified by separate argument + precision = va_arg(args, int); + ch = *f++; + } + else { + while (ch >= '0' && ch <= '9') { + precision = precision * 10 + ch - '0'; + ch = *f++; + } + } + } + + if (ch == 'l') { + ch = *f++; + if (ch == 'l') { + longlong = NV_TRUE; + ch = *f++; + } + } + + // Perform the conversion operation + switch (ch) { + case 'c': // Copy an ASCII character + u32val = va_arg(args, int); + if (d < destLimit) + { + *d++ = (NvU8) u32val; + } + break; + case 'u': // Copy a formatted, unsigned decimal number + flags |= UNSIGNED_F; + if (fieldwidth > TMPBUF_SIZE) + { + fieldwidth = TMPBUF_SIZE; + } + if ( longlong ) // long long specifier "llu" or "lld" + { + u64val = va_arg(args, unsigned long long); + // Format the number, increment the dest pointer by the characters copied + tmpSize = inttodecfmtstr(u64val, tmpBuf, fieldwidth, flags); + } + else + { + u32val = va_arg(args, unsigned int); + // Format the number, increment the dest pointer by the characters copied + tmpSize = inttodecfmtstr((NvU64)u32val, tmpBuf, fieldwidth, flags); + } + if (d < destLimit) + { + tmpSize = (d + tmpSize) < destLimit ? tmpSize : (NvU32)(destLimit - d); + portMemCopy(d, tmpSize, tmpBuf, tmpSize); + d += tmpSize; + } + break; + case 'd': // Copy a formatted, signed decimal number + if (fieldwidth > TMPBUF_SIZE) + { + fieldwidth = TMPBUF_SIZE; + } + if ( longlong ) // long long specifier "llu" or "lld" + { + s64val = va_arg(args, long long); + // Format the number, increment the dest pointer by the characters copied + tmpSize = inttodecfmtstr(s64val, tmpBuf, fieldwidth, flags); + } + else + { + s32val = va_arg(args, int); + // Format the number, increment the dest pointer by the characters copied + tmpSize = inttodecfmtstr((NvS64)s32val, tmpBuf, fieldwidth, flags); + } + if (d < destLimit) + { + tmpSize = (d + tmpSize) < destLimit ? tmpSize : (NvU32)(destLimit - d); + portMemCopy(d, tmpSize, tmpBuf, tmpSize); + d += tmpSize; + } + break; + case 'x': // Copy a formatted, lower-case hexadecimal number + flags |= LOWERCASE_F; + case 'X': // Copy a formatted, upper-case hexadecimal number + if (fieldwidth > TMPBUF_SIZE) + { + fieldwidth = TMPBUF_SIZE; + } + if ( longlong ) // long long specifier "llx" or "llX" + { + u64val = va_arg(args, long long); + // Format the number, increment the dest pointer by the characters copied + tmpSize = uinttohexfmtstr(u64val, tmpBuf, fieldwidth, flags); + } + else + { + u32val = va_arg(args, int); + // Format the number, increment the dest pointer by the characters copied + tmpSize = uinttohexfmtstr((NvU64)u32val, tmpBuf, fieldwidth, flags); + } + if (d < destLimit) + { + tmpSize = (d + tmpSize) < destLimit ? tmpSize : (NvU32)(destLimit - d); + portMemCopy(d, tmpSize, tmpBuf, tmpSize); + d += tmpSize; + } + break; + case 'p': // Copy a formatted pointer value + if (fieldwidth > TMPBUF_SIZE) + { + fieldwidth = TMPBUF_SIZE; + } + pval = va_arg(args, void *); + tmpSize = uinttohexfmtstr((NvU64)((NvUPtr)pval), tmpBuf, fieldwidth, flags); + if (d < destLimit) + { + tmpSize = (d + tmpSize) < destLimit ? tmpSize : (NvU32)(destLimit - d); + portMemCopy(d, tmpSize, tmpBuf, tmpSize); + d += tmpSize; + } + break; + case 's': // Copy a formatted string + strpval = va_arg(args, char *); + d += strtofmtstr(strpval, d, destLimit, fieldwidth, precision, flags); + break; + case 0: // Gracefully handle premature end-of-string + f--; // Back up, now f points to the null character again + default: // Unexpected conversion operator, so just echo to the destination + while (specptr < f) + { + if (d < destLimit) + { + *d++ = *specptr; + } + specptr++; + } + if (ch == 0) + { + goto stringdone; + } + break; + } + } + +stringdone: + if (d <= destLimit) + { + *d = '\0'; // Null-terminate the string + } + return((int)(d - dest)); // Return the number of characters we [might] transferred +} + +int nvDbgSnprintf(char *dest, NvU32 destSize, const char *fmt, ...) +{ + va_list arglist; + int len; + + va_start(arglist, fmt); + len = nvDbgVsnprintf(dest, destSize, fmt, arglist); + va_end(arglist); + + return len; +} + +enum { // Padding option definitions + PRESPACE_O = 1, + PREZERO_O = 2, + POSTSPACE_O = 4 +}; + +#define NUMBUFSIZE 20 // Should be enough for 64-bit integers in decimal or hex + +//====================================================================================== +// +// inttodecfmtstr() +// +// This takes a signed integer value and converts it to a formatted decimal string, +// using options (field width and flags) like those provided by sprintf(). The 32-bit +// number is assumed to be signed unless the UNSIGNED_F flag is set. Look at the code +// for dbugsprintf() above to see which formatting options are implemented. +// +//====================================================================================== +static int inttodecfmtstr(NvS64 sval, char *dest, int fieldwidth, int flags) +{ + int i, digitcount, destcount; + int sign, signchar; + int fillcount; + int pad_options; + NvU64 uval, quotient, remainder; + char *intdigp; + char nbuf[NUMBUFSIZE]; + + signchar = ' '; // avoid compiler init warning + // Process the sign-related options + if (flags & UNSIGNED_F) { // Unsigned conversion + sign = 0; // No sign character + } else { // We're doing a signed conversion + sign = 1; // Assume we'll have a sign character + if (sval < 0) { + signchar = '-'; + sval = -sval; // Make the number positive now so we can 'digitize' it + } else { // sval >= 0 + if (flags & PLUSSIGN_F) + signchar = '+'; + else if (flags & SPACESIGN_F) + signchar = ' '; + else + sign = 0; // No sign character + } + } + uval = sval; // Do unsigned math from here on out + + // Convert the number into ASCII decimal digits in our local buffer, counting them + intdigp = &nbuf[NUMBUFSIZE]; // Point past the last character in the buffer + digitcount = 0; // Nothing written to our local buffer yet + do { + quotient = uval / 10; + remainder = uval - quotient * 10; + *--intdigp = (NvU8) (remainder + '0'); // Put the digit into the next lower buffer slot + digitcount++; + uval = quotient; + } while (uval > 0); + + // Process the field-padding options + pad_options = 0; // Assume we won't be doing any padding + fillcount = fieldwidth - (sign + digitcount); // Account for the sign, if used + if (fillcount > 0) { // We need to do left or right padding + if (flags & LEFTALIGN_F) { + pad_options = POSTSPACE_O; + } else { // Right-aligned, fill with zeros or spaces + if (flags & ZEROFILL_F) + pad_options = PREZERO_O; + else + pad_options = PRESPACE_O; + } + } + + destcount = 0; // Nothing written out to the destination yet + + // Copy any leading spaces + if (pad_options & PRESPACE_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = ' '; + destcount += fillcount; + } + // Copy the sign character, if any + if (sign) { + *dest++ = (char)signchar; + destcount++; + } + // Copy any leading zeros + if (pad_options & PREZERO_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = '0'; + destcount += fillcount; + } + // Copy the decimal digits from our local buffer + for (i = 0; i < digitcount; i++) + *dest++ = *intdigp++; + destcount += digitcount; + + // Copy any trailing spaces + if (pad_options & POSTSPACE_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = ' '; + destcount += fillcount; + } + if ((flags & DONTTERMINATE) == 0) // Null-terminate the string unless requested not to + *dest = 0; + return(destcount); // Return the character count, not including the null +} + +//====================================================================================== +// +// uinttohexfmtstr() +// +// This takes an unsigned integer value and converts it to a formatted hexadecimal +// string, using options (field width and flags) like those provided by sprintf(). Look +// at the code for dbugsprintf() above to see which formatting options are implemented. +// +//====================================================================================== +static int uinttohexfmtstr(NvU64 uval, char *dest, int fieldwidth, int flags) +{ + int i, digitcount, destcount; + int c, hexadjust; + int fillcount; + char fillchar = ' '; + int pad_options; + char *intdigp; + char nbuf[NUMBUFSIZE]; + + hexadjust = 'A' - '9' - 1; + if (flags & LOWERCASE_F) + hexadjust += 'a' - 'A'; + + // Convert the number into ASCII hex digits in our local buffer, counting them + intdigp = &nbuf[NUMBUFSIZE]; // Point past the last character in the buffer + digitcount = 0; // Nothing written to our local buffer yet + do { + c = (int)(uval % 16) + '0'; + if (c > '9') /* A-F */ + c += hexadjust; + *--intdigp = (NvU8)c; // Put the digit into the next lower buffer slot + digitcount++; + uval /= 16; + } while (uval > 0); + + // Process the field-padding options + pad_options = 0; // Assume we won't be doing any padding + fillcount = fieldwidth - digitcount; // No sign to worry about + if (fillcount > 0) { // We need to do left or right padding + fillchar = ' '; // Most common fill character is the space + if (flags & LEFTALIGN_F) { + pad_options = POSTSPACE_O; + } else { // Right-aligned, fill with zeros or spaces + if (flags & ZEROFILL_F) { + pad_options = PREZERO_O; + fillchar = '0'; + } else { + pad_options = PRESPACE_O; + } + } + } + + destcount = 0; // Nothing written out to the destination yet + + // Copy any leading zeros or spaces + if (pad_options & (PREZERO_O | PRESPACE_O)) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = fillchar; + destcount += fillcount; + } + // Copy the hex digits from our local buffer + for (i = 0; i < digitcount; i++) + *dest++ = *intdigp++; + destcount += digitcount; + + // Copy any trailing spaces + if (pad_options & POSTSPACE_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = fillchar; + destcount += fillcount; + } + if ((flags & DONTTERMINATE) == 0) // Null-terminate the string unless requested not to + *dest = 0; + return(destcount); // Return the character count, not including the null +} + + +#if 0 + +//====================================================================================== +// +// float64todecfmtstr() +// +// This takes a 64-bit floating-point value and converts it to a formatted decimal +// string, using options (field width, precision, and flags) like those provided by +// sprintf(). Look at the code for dbugsprintf() above to see which formatting options +// are implemented. +// +//====================================================================================== +static int float64todecfmtstr(F064 f64val, NvU8 *dest, int fieldwidth, int precision, int flags) +{ + int i, firstcount, destcount; + int sign, signchar, decpt; + int fillcount; + int pad_options; + int reducecount, loopdigits, digitsleft; + NvU32 u32val, quotient, remainder; + F064 f64mant9 = 0.0, f64mant9factor = 0.0, fone = 0.0, ften = 0.0, fbillion = 0.0, powerof10 = 0.0; + NvU8 *digp; + NvU8 nbuf[NUMBUFSIZE]; // This only needs to hold the first 9 digits of the integer part + + // Process the sign-related options + sign = 1; // Assume at first we'll have a sign character + if (f64val < 0.0) { + signchar = '-'; + f64val = -f64val; // Make the number positive now so we can 'digitize' it + } else { // f64val >= 0.0 + if (flags & PLUSSIGN_F) + signchar = '+'; + else if (flags & SPACESIGN_F) + signchar = ' '; + else + sign = 0; // No sign character + } + + // Round the number to N decimal places. We add 0.5 x 10^(-N), which is + // equivalent to adding 1 / (2*10^N). We'll use this latter formula. + fone = 1.0; // Keep the compiler from always loading these constants from memory + ften = 10.0; + powerof10 = fone; // 10 ^ 0 + for (i = 0; i < precision; i++) + powerof10 *= ften; // Build 10 ^ N + f64val += fone / (2.0 * powerof10); + // f64val now contains the properly rounded number + + f64mant9 = f64val; // Start hunting for the mantissa's 9 uppermost decimal digits + fbillion = 1e9; // Keep it in a register + f64mant9factor = fone; + // Reduce the mantissa to less than 1 billion, so it will fit in a 32-bit integer + for (reducecount = 0; f64mant9 >= fbillion; reducecount++) { + f64mant9 /= fbillion; + f64mant9factor *= fbillion; + } + + // Process the highest 32-bits of the mantissa so we can count those digits first + + f64mant9 = f64val / f64mant9factor; // Grab highest 9 integer decimal digits + u32val = (NvU32) f64mant9; // Drop any fraction + f64mant9 = u32val; // Now we have a float with only an integer part + f64val -= f64mant9 * f64mant9factor; // Subtract out the previous high digits + f64mant9factor /= fbillion; // Adjust our division factor + + // Convert the binary into ASCII decimal digits in our local buffer, counting them + digp = &nbuf[NUMBUFSIZE]; // Point past the last char. of these 9 digits + firstcount = 0; // No digits of the first 32-bit integer part yet + do { + quotient = u32val / 10; + remainder = u32val - quotient * 10; + *--digp = (NvU8) (remainder + '0'); // Put the digit into the next lower buffer slot + firstcount++; + u32val = quotient; + } while (u32val > 0); + + // Figure out whether we'll have a decimal point + decpt = (precision > 0); // Don't use a decimal point if no fractional part + + // Process the field-padding options + pad_options = 0; // Assume we won't be doing any padding + // We have the information we need to calculate how many output characters we'll have + fillcount = fieldwidth - (sign + firstcount + (reducecount * 9) + decpt + precision); + if (fillcount > 0) { // We need to do left or right padding + if (flags & LEFTALIGN_F) { + pad_options = POSTSPACE_O; + } else { // Right-aligned, fill with zeros or spaces + if (flags & ZEROFILL_F) + pad_options = PREZERO_O; + else + pad_options = PRESPACE_O; + } + } + + destcount = 0; // Nothing written out to the destination yet + + // Copy any leading spaces + if (pad_options & PRESPACE_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = ' '; + destcount += fillcount; + } + // Copy the sign character, if any + if (sign) { + *dest++ = signchar; + destcount++; + } + // Copy any leading zeros + if (pad_options & PREZERO_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = '0'; + destcount += fillcount; + } + // Copy the highest chunk of integer digits from the local buffer + for (i = 0; i < firstcount; i++) + *dest++ = *digp++; + destcount += firstcount; + + // Now we need to convert the remaining integer digits, if any + for (i = 0; i < reducecount; i++) { + f64mant9 = f64val / f64mant9factor; // Grab 9 more decimal digits + u32val = (NvU32) f64mant9; // Drop any fraction + f64mant9 = u32val; // Now we have a float with only an integer part + f64val -= f64mant9 * f64mant9factor; // Subtract out the previous high digits + f64mant9factor /= fbillion; // Adjust our division factor + // Convert the integer part into ASCII decimal digits, directly to the destination + dest += 9; // Point past the last char. of this 9-digit chunk + digp = dest; + for (loopdigits = 0; loopdigits < 9; loopdigits++) { + quotient = u32val / 10; + remainder = u32val - quotient * 10; + *--digp = (NvU8) (remainder + '0'); // Put the digit into the next lower buffer slot + u32val = quotient; + } + destcount += 9; + } + // f64val has only the fractional part now + + if (!decpt) + goto checktrailing; // Skip the laborious fraction-processing part + + // Copy the decimal point + *dest++ = '.'; + destcount++; + + // Similar to how we handled the integer part processing, we'll process up to + // 9 digits at a time, by multiplying the fraction by a power of 10, + // converting to an integer, and converting digits to the destination. + + digitsleft = precision; + do { + loopdigits = digitsleft; + if (loopdigits > 9) + loopdigits = 9; + powerof10 = fone; // 10 ^ 0 + for (i = 0; i < loopdigits; i++) + powerof10 *= ften; // Build 10 ^ N + f64val *= powerof10; // Push some fractional digits into the integer part + u32val = (NvU32) f64val; // Conversion truncates any remaining fraction + f64val -= u32val; // Remove the integer part, leave remaining fraction digits + digp = dest + loopdigits; // Point past the last char. of this chunk + for (i = 0; i < loopdigits; i++) { + quotient = u32val / 10; + remainder = u32val - quotient * 10; + *--digp = (NvU8) (remainder + '0'); // Put the digit into the next lower buffer slot + u32val = quotient; + } + dest += loopdigits; + destcount += loopdigits; + digitsleft -= loopdigits; + } while (digitsleft > 0); + +checktrailing: + // Copy any trailing spaces + if (pad_options & POSTSPACE_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = ' '; + destcount += fillcount; + } + if ((flags & DONTTERMINATE) == 0) // Null-terminate the string unless requested not to + *dest = 0; + return(destcount); // Return the character count, not including the null +} + +#endif // 0 + +//====================================================================================== +// +// strtofmtstr() +// +// This takes a source C string and converts it to a formatted output C string, +// using options (field width, precision, and flags) like those provided by sprintf(). Look at +// the code for nvDbgVsnprintf() above to see which formatting options are implemented. +// +// fieldwidth - minimum total characters to output (including pad) +// precision - maximum characters from src to output; or entire string if negative +//====================================================================================== +static int strtofmtstr(const char *src, char *dest, char *destLimit, int fieldwidth, int precision, int flags) +{ + int i, srclen; + int fillcount; + char fillchar = ' '; + int pad_options; + const char *s; + char *d; + + // Make sure we have a source string to work with + if (src == NULL) + { + src = ""; + } + + // For padding calculations, we need to know the source string length + for (s = src, srclen = 0; *s != 0; s++) + srclen++; + + // But truncated to precision, if specified. + if(precision >= 0 && srclen > precision) + srclen = precision; + + // Process the field-padding options + pad_options = 0; // Assume we won't be doing any padding + fillcount = fieldwidth - srclen; + + if (fillcount > 0) { // We need to do left or right padding + fillchar = ' '; // Most common fill character is the space + if (flags & LEFTALIGN_F) { + pad_options = POSTSPACE_O; + } else { // Right-aligned, fill with zeros or spaces + if (flags & ZEROFILL_F) { + pad_options = PREZERO_O; + fillchar = '0'; + } else { + pad_options = PRESPACE_O; + } + } + } + + s = src; + d = dest; + + // Copy any leading zeros or spaces + if (pad_options & (PREZERO_O | PRESPACE_O)) + { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + { + if (d < destLimit) + { + *d++ = fillchar; + } + } + } + // Copy the characters from the source string + for (i = 0; i < srclen; i++) + { + if (d < destLimit) + { + *d++ = *s++; + } + } + + // Copy any trailing spaces + if (pad_options & POSTSPACE_O) + { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + { + if (d < destLimit) + { + *d++ = fillchar; + } + } + } + + if ((flags & DONTTERMINATE) == 0) // Null-terminate the string unless requested not to + *d = 0; + return((int)(d - dest)); // Return the character count, not including the null +} + +#if NV_PRINTF_STRINGS_ALLOWED +// +// String matching helper for nvDbgRmMsgCheck. +// strstr with the length of the pattern string +// passed in. +// + +static const char *nv_strnstr +( + const char *str, + const char *pat, + int patlen +) +{ + int len; + + // Should be NULL, but this makes noun optional + if (pat == NULL) + { + return str; + } + + while (*str) + { + len = 0; + while (len < patlen) + { + if (str[len] != pat[len]) + break; + len++; + } + if (len == patlen) + { + return str; + } + str++; + } + return NULL; +} + +// +// Buffer to store RmMsg string. This is stored in bss +// so it can be updated in the debugger dynamically. +// +char RmMsg[NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE]; + +// +// nvDbgRmMsgCheck +// Override priority of debug printf based on file, function with optional +// line ranges. Rules are matched on each printf. Rules are applied left to +// right and the final result is the cumulative result of all rules. +// +// Format +// rule = [!][filename|function][:startline][-endline][@level][^prefix] +// Format = rule[,rule] +// +// See RmMsg wiki for detailed documentation + +// Examples: +// "dmanv50.c" - enable all printfs in dmanv50.c +// "fifoAlloc_NV50" - enable all printfs in function fifoAlloc_NV50 +// "!fifoAlloc_NV50" - disable all printfs in function fifoAlloc_NV50 +// "dmanv50.c:150" - enable printf on line 150 of dmanv50.c +// "dmanv50.c:100-200" - enable printf on lines 100-200 in dmanv50.c +// "dmanv50.c:100-200,!dmanv50:125" - same but disable printf on line 125 +// "fifo^*" - enable verbose prefix for fifo +// ":" - enable all printfs +// "!" - disable all printfs (dangerous!) +// +NvU32 +nvDbgRmMsgCheck +( + const char * filename, + NvU32 linenumber, + const char * function, + NvU32 debuglevel, + const char * printf_format, + NvU32 * pPrefix +) +{ + enum { NOUN, STARTLINE, ENDLINE, LEVEL, PREFIX } state; + int status = NVRM_MSG_NORMAL; + int inc; + char *noun; + NvU32 nounlen; + NvU32 startline; + NvU32 endline; + NvU32 level; + NvU32 prefix = NVRM_MSG_PREFIX_NVRM | NVRM_MSG_PREFIX_FUNCTION; + NvU32 tempPrefix; + char *p; + + // Handle the normal case quickly. + if (RmMsg[0] == '\0') + { + goto done; + } + + p = RmMsg; + + while (*p != '\0') + { + // Initial default state for this rule + inc = 1; + noun = NULL; + nounlen = 0; + startline = 0; + endline = 0x7fffffff; + tempPrefix = NVRM_MSG_PREFIX_NVRM | NVRM_MSG_PREFIX_FUNCTION; + level = LEVEL_INFO; // default to everything + state = NOUN; + + for (; *p != '\0' && *p != ','; p++) + { + if (*p == ':') + { + state = STARTLINE; + continue; + } + else if (*p == '-') + { + state = ENDLINE; + endline = 0; + continue; + } + else if (*p == '!' && !noun) + { + state = NOUN; + inc = 0; + continue; + } + else if (*p == '@') + { + state = LEVEL; + level = 0; + continue; + } + else if (*p == '^') + { + state = PREFIX; + tempPrefix = NVRM_MSG_PREFIX_NVRM | NVRM_MSG_PREFIX_FUNCTION; + continue; + } + switch (state) + { + case NOUN: + if (noun == NULL) + { + noun = p; + } + nounlen++; + break; + case STARTLINE: + if ((*p >= '0') && (*p <= '9')) + { + startline *= 10; + startline += *p - '0'; + endline = startline; // only one line + } + break; + case ENDLINE: + if ((*p >= '0') && (*p <= '9')) + { + endline *= 10; + endline += *p - '0'; + } + break; + case LEVEL: + if ((*p >= '0') && (*p <= '9')) + { + level *= 10; + level += *p - '0'; + } + break; + case PREFIX: + switch (*p) + { + case '*': + tempPrefix = NVRM_MSG_PREFIX_NVRM | NVRM_MSG_PREFIX_FILE | + NVRM_MSG_PREFIX_LINE | NVRM_MSG_PREFIX_FUNCTION | + NVRM_MSG_PREFIX_OSTIMESTAMP; + break; + case 'n': + tempPrefix |= NVRM_MSG_PREFIX_NVRM; + break; + case 'N': + tempPrefix &= ~NVRM_MSG_PREFIX_NVRM; + break; + case 'c': + tempPrefix |= NVRM_MSG_PREFIX_FILE; + break; + case 'C': + tempPrefix &= ~NVRM_MSG_PREFIX_FILE; + break; + case 'l': + tempPrefix |= NVRM_MSG_PREFIX_LINE; + break; + case 'L': + tempPrefix &= ~NVRM_MSG_PREFIX_LINE; + break; + case 'f': + tempPrefix |= NVRM_MSG_PREFIX_FUNCTION; + break; + case 'F': + tempPrefix &= ~NVRM_MSG_PREFIX_FUNCTION; + break; + case 't': + tempPrefix |= NVRM_MSG_PREFIX_OSTIMESTAMP; + break; + case 'T': + tempPrefix &= ~NVRM_MSG_PREFIX_OSTIMESTAMP; + break; + } + break; + default: // ignore any trainling words + break; + } + } + + // Does the last rule hit + if (((nv_strnstr(filename, noun, nounlen) != NULL) || + (nv_strnstr(function, noun, nounlen) != NULL)) && + (linenumber >= startline) && + (linenumber <= endline) && + (debuglevel >= level)) + { + status = inc ? NVRM_MSG_PRINT : NVRM_MSG_HIDE; + prefix = tempPrefix; + } + + if (*p == '\0') + { + break; + } + p++; + } + +done: + if (pPrefix != NULL) + { + *pPrefix = prefix; + } + + return status; +} + +// +// RmMsgPrefix - Add the RmMsg prefix to the passed in string, returning +// the length of the formatted string. +// +// Format: "NVRM file linenum function timestamp: " +// +NvU32 +RmMsgPrefix +( + NvU32 prefix, + const char *filename, + NvU32 linenumber, + const char *function, + char *str, + NvU32 totalLen +) +{ + const char *space = ""; + NvU32 len = 0; + NvU32 sec, usec; + + *str = '\0'; + + if (prefix & NVRM_MSG_PREFIX_NVRM) + { + portStringCopy(str + len, totalLen - len, NV_PRINTF_PREFIX, sizeof(NV_PRINTF_PREFIX)); + len += sizeof(NV_PRINTF_PREFIX) - 1; + space = " "; + } + + if (prefix & NVRM_MSG_PREFIX_FILE) + { + len += nvDbgSnprintf(str + len, totalLen - len, "%s%s", space, filename); + space = " "; + } + + if (prefix & NVRM_MSG_PREFIX_LINE) + { + len += nvDbgSnprintf(str + len, totalLen - len, "%s%d", space, linenumber); + space = " "; + } + + if (prefix & NVRM_MSG_PREFIX_FUNCTION) + { + len += nvDbgSnprintf(str + len, totalLen - len, "%s%s", space, function); + space = " "; + } + + if (prefix & NVRM_MSG_PREFIX_OSTIMESTAMP) + { + osGetCurrentTime(&sec, &usec); + + len += nvDbgSnprintf(str + len, totalLen - len, "%s%d.%06d", space, sec, usec); + } + + return len; +} + +// +// Initialize RmMsg from the registry. Skip if the string was initialized +// already initialized (from the debugger). +// Called from the platform specific platform code. +// +void nvDbgInitRmMsg(OBJGPU *pGpu) +{ + NvU32 len = NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE; + + if (RmMsg[0] == '\0') + { + if (osReadRegistryString(pGpu, NV_REG_STR_RM_MSG, + (NvU8*)RmMsg, &len) != NV_OK) + { + len = NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE; + } + } +} + +#else // #else ! NV_PRINTF_STRINGS_ALLOWED + +void nvDbgInitRmMsg(OBJGPU *pGpu) +{ +} + +NvU32 +nvDbgRmMsgCheck +( + const char * filename, + NvU32 linenumber, + const char * function, + NvU32 debuglevel, + const char * printf_format, + NvU32 * pPrefix +) +{ + return NVRM_MSG_HIDE; +} + +#endif // #if NV_PRINTF_STRINGS_ALLOWED + +/*! + * @brief Does a byte by byte dump of the buffer passed. + * + * @param[in] pBuffer Pointer to the buffer to dump. + * @param[in] length Length of the buffer to dump (in # of bytes). + */ +void +nvDbgDumpBufferBytes +( + void *pBuffer, + NvU32 length +) +{ + NvU8 *s = (NvU8 *)pBuffer; + NvU32 remainingBytes = length % 16; + NvU32 i; + + NV_PRINTF(LEVEL_ERROR, + " x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xa xb xc xd xe xf\n"); + + for (i = 0; i < (length / 16); i++) + { + + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11], s[12], s[13], s[14], s[15]); + + s += 16; + } + + /* + * 16 statement switch, so that these are added to nvlog correctly. + */ + switch (remainingBytes) + { + case 0: + default: + break; + case 1: + NV_PRINTF(LEVEL_ERROR, + "%p %02x .. .. .. .. .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0]); + break; + case 2: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x .. .. .. .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1]); + break; + case 3: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x .. .. .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2]); + break; + case 4: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x .. .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3]); + break; + case 5: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4]); + break; + case 6: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5]); + break; + case 7: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6]); + break; + case 8: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7]); + break; + case 9: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8]); + break; + case 10: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9]); + break; + case 11: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10]); + break; + case 12: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11]); + break; + case 13: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11], s[12]); + break; + case 14: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11], s[12], s[13]); + break; + case 15: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11], s[12], s[13], s[14]); + break; + } +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/profiler.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/profiler.c new file mode 100644 index 0000000..a21e490 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/profiler.c @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** ODB State Routines **************************\ +* * +* Simple API to measure elapsed times in RM for profiling * +* * +\***************************************************************************/ + +#include "diagnostics/profiler.h" +#include "os/os.h" + +static void _rmProfStopTime(RM_PROF_STATS *pStats, NvU64 stop_ns); + +/*! + * @brief Start measuring elapsed time for a specific profiling module. + * + * @param[in,out] pStats Profiling stats for the module + */ +void +rmProfStart +( + RM_PROF_STATS *pStats +) +{ + NV_ASSERT_OR_RETURN_VOID(pStats != NULL); + + if (pStats->start_ns != 0) + { + NV_PRINTF(LEVEL_ERROR, + "Starting time measurement that is already started\n"); + // + // No breakpoint since this isn't fatal by itself. + // Most likely there was an error that propagated up the stack before + // the measurement was stopped on the last cycle. + // + // In that case, restarting the measurement is probably the right thing + // to do anyway. + // + } + osGetPerformanceCounter(&pStats->start_ns); +} + +/*! + * @brief Stop measuring elapsed time for a specific profiling module and + * update the module's statistics. + * + * @param[in,out] pStats Profiling stats for the module + */ +void +rmProfStop +( + RM_PROF_STATS *pStats +) +{ + NvU64 stop_ns; + + NV_ASSERT_OR_RETURN_VOID(pStats != NULL); + + osGetPerformanceCounter(&stop_ns); + _rmProfStopTime(pStats, stop_ns); +} + +/*! + * Same as #rmProfStop but parameterized by the stop time. + */ +static void +_rmProfStopTime +( + RM_PROF_STATS *pStats, + NvU64 stop_ns +) +{ + NV_ASSERT_OR_RETURN_VOID(pStats != NULL); + + if (pStats->start_ns == 0) + { + NV_PRINTF(LEVEL_ERROR, + "Stopping time measurement that is already stopped\n"); + DBG_BREAKPOINT(); + // + // Breakpoint since this case is more serious - something is likely + // wrong with the profiling code. Also return early so the bogus + // measurement is not recorded. + // + return; + } + RM_PROF_RECORD(pStats, stop_ns - pStats->start_ns); + pStats->start_ns = 0; +} + +/*! + * @brief Manually update the statistics for one cycle of a specific profiling + * module. + * + * @param[in,out] pStats Profiling stats for the module + * @param[in] time_ns Elapsed time in nanoseconds for this cycle. + */ +void +rmProfRecord +( + RM_PROF_STATS *pStats, + NvU64 time_ns +) +{ + NV_ASSERT_OR_RETURN_VOID(pStats != NULL); + + if (pStats->count == 0 || time_ns < pStats->min_ns) + { + pStats->min_ns = time_ns; + } + if (pStats->count == 0 || time_ns > pStats->max_ns) + { + pStats->max_ns = time_ns; + } + pStats->total_ns += time_ns; + pStats->count += 1; +} + +/*! + * @brief Start measuring time for the specified profiling group (begin a new cycle). + * + * @param[out] pGroup Profiling group structure to be used. + * @param[in/out] pTotal Optional stats for the whole group duration (may be NULL). + * @param[in/out] pFirst First module of the group. + */ +void +rmProfGroupStart +( + RM_PROF_GROUP *pGroup, + RM_PROF_STATS *pTotal, + RM_PROF_STATS *pFirst +) +{ + NV_ASSERT_OR_RETURN_VOID(pGroup != NULL); + NV_ASSERT_OR_RETURN_VOID(pFirst != NULL); + + // Start profiling the first module. + RM_PROF_START(pFirst); + + // Reuse the first modules' start time for the total module. + if (pTotal != NULL) + { + pTotal->start_ns = pFirst->start_ns; + } + + // Initialize the group structure. + pGroup->pTotal = pTotal; + pGroup->pLast = pFirst; +} + +/*! + * @brief Continue profiling the next module of a profiling group. + * + * @param[in/out] pGroup Profiling group. + * @param[in/out] pNext Next module of the group. + */ +void +rmProfGroupNext +( + RM_PROF_GROUP *pGroup, + RM_PROF_STATS *pNext +) +{ + NV_ASSERT_OR_RETURN_VOID(pGroup != NULL); + NV_ASSERT_OR_RETURN_VOID(pGroup->pLast != NULL); + NV_ASSERT_OR_RETURN_VOID(pNext != NULL); + + // Start profiling the next module. + RM_PROF_START(pNext); + + // Reuse the next modules' start time for the last module's stop time. + _rmProfStopTime(pGroup->pLast, pNext->start_ns); + + // Update the group structure. + pGroup->pLast = pNext; +} + +/*! + * @brief Stop profiling a cycle of a profiling group (ends both the last and total modules). + * + * @param[in] pGroup Profiling group. + */ +void +rmProfGroupStop +( + RM_PROF_GROUP *pGroup +) +{ + NvU64 stop_ns; + + NV_ASSERT_OR_RETURN_VOID(pGroup != NULL); + NV_ASSERT_OR_RETURN_VOID(pGroup->pLast != NULL); + + osGetPerformanceCounter(&stop_ns); + + // Reuse the same stop time for both last and total module. + _rmProfStopTime(pGroup->pLast, stop_ns); + if (pGroup->pTotal != NULL) + { + _rmProfStopTime(pGroup->pTotal, stop_ns); + } + + // Clear the group structure. + pGroup->pTotal = NULL; + pGroup->pLast = NULL; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c new file mode 100644 index 0000000..71e77d8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c @@ -0,0 +1,83 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* * +* Implementation specific Descriptor List management functions * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/eng_desc.h" +#include "g_allclasses.h" + +// See gpuChildOrderList_GM200 for documentation +static const GPUCHILDORDER +gpuChildOrderList_T234D[] = +{ + {classId(OBJDCECLIENTRM), GCO_ALL}, + {classId(MemorySystem), GCO_ALL}, + {classId(KernelMemorySystem), GCO_ALL}, + {classId(MemoryManager), GCO_ALL}, + {classId(OBJDCB), GCO_ALL}, + {classId(OBJDISP), GCO_ALL}, + {classId(KernelDisplay), GCO_ALL}, + {classId(OBJDPAUX), GCO_ALL}, + {classId(OBJI2C), GCO_ALL}, + {classId(OBJGPIO), GCO_ALL}, + {classId(OBJHDACODEC), GCO_ALL}, +}; + +// See gpuChildrenPresent_GM200 for documentation on GPUCHILDPRESENT +static const GPUCHILDPRESENT gpuChildrenPresent_T234D[] = +{ + {classId(OBJDCECLIENTRM), 1}, + {classId(OBJDISP), 1}, + {classId(KernelDisplay), 1}, + {classId(MemorySystem), 1}, + {classId(KernelMemorySystem), 1}, + {classId(MemoryManager), 1}, + {classId(OBJDPAUX), 1}, + {classId(OBJI2C), 1}, + {classId(OBJGPIO), 1}, + {classId(OBJTMR), 1}, + {classId(OBJHDACODEC), 1}, + {classId(OBJDCB), 1}, +}; + + +const GPUCHILDORDER * +gpuGetChildrenOrder_T234D(OBJGPU *pGpu, NvU32 *pNumEntries) +{ + *pNumEntries = NV_ARRAY_ELEMENTS32(gpuChildOrderList_T234D); + return gpuChildOrderList_T234D; +} + +const GPUCHILDPRESENT * +gpuGetChildrenPresent_T234D(OBJGPU *pGpu, NvU32 *pNumEntries) +{ + *pNumEntries = NV_ARRAY_ELEMENTS32(gpuChildrenPresent_T234D); + return gpuChildrenPresent_T234D; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c new file mode 100644 index 0000000..b3af417 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/audio/hda_codec_api.h" + +NV_STATUS hdacodecConstruct_IMPL +( + Hdacodec *pHdacodecApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client.c new file mode 100644 index 0000000..a74ba13 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client.c @@ -0,0 +1,259 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/dce_client/dce_client.h" + +ROOT roots[MAX_RM_CLIENTS]; +DEVICE devices[MAX_RM_CLIENTS]; +SUBDEVICE subdevices[MAX_RM_CLIENTS]; +DISPLAY_COMMON display; +DISPLAY_SW displaySW; +DISPLAY_SW_EVENT displaySWEventHotplug; +DISPLAY_SW_EVENT displaySWEventDPIRQ; +DISPLAY_HPD_CTRL displayCtrlHotplug; +DISPLAY_HPD_CTRL displayCtrlDPIRQ; +DISPLAY_DP_SET_MANUAL displayCtrlDPSetManual; + +NV_STATUS +dceclientConstructEngine_IMPL +( + OBJGPU *pGpu, + DceClient *pDceClient, + ENGDESCRIPTOR engDesc +) +{ + NV_PRINTF(LEVEL_INFO, "dceclientConstructEngine_IMPL Called\n"); + + return dceclientInitRpcInfra(pGpu, pDceClient); +} + +NV_STATUS +dceclientStateLoad_IMPL +( + OBJGPU *pGpu, + DceClient *pDceClient, + NvU32 flags +) +{ + NV_STATUS nvStatus = NV_OK; + NV_PRINTF(LEVEL_INFO, "dceclientStateLoad_IMPL Called\n"); + + if (!(flags & GPU_STATE_FLAGS_PM_TRANSITION)) + return NV_OK; + + nvStatus = dceclientInitRpcInfra(pGpu, pDceClient); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "dceclientInitRpcInfra failed\n"); + goto out; + } + + nvStatus = dceclientDceRmInit(pGpu, GPU_GET_DCECLIENTRM(pGpu), NV_TRUE); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot load DCE firmware RM\n"); + nvStatus = NV_ERR_GENERIC; + goto out; + } + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 i = 0; + + for (i = 0; i < MAX_RM_CLIENTS; i++) + { + if (roots[i].valid) + { + nvStatus = rpcRmApiAlloc_dce(pRmApi, roots[i].hClient, roots[i].hParent, + roots[i].hObject, roots[i].hClass, &roots[i].rootAllocParams); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot alloc roots[%u] object during resume\n",i); + nvStatus = NV_ERR_GENERIC; + goto out; + } + } + + if (devices[i].valid) + { + nvStatus = rpcRmApiAlloc_dce(pRmApi, devices[i].hClient, devices[i].hParent, + devices[i].hObject, devices[i].hClass, &devices[i].deviceAllocParams); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot alloc devices[%u] object during resume\n",i); + nvStatus = NV_ERR_GENERIC; + goto out; + } + } + + if (subdevices[i].valid) + { + nvStatus = rpcRmApiAlloc_dce(pRmApi, subdevices[i].hClient, subdevices[i].hParent, + subdevices[i].hObject, subdevices[i].hClass, &subdevices[i].subdeviceAllocParams); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot alloc subdevices[%u] object during resume\n",i); + nvStatus = NV_ERR_GENERIC; + goto out; + } + } + } + + if (display.valid) + { + nvStatus = rpcRmApiAlloc_dce(pRmApi, display.hClient, display.hParent, + display.hObject, display.hClass, &display.displayCommonAllocParams); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot alloc display_common object during resume\n"); + nvStatus = NV_ERR_GENERIC; + goto out; + } + + } + + if (displaySW.valid) + { + nvStatus = rpcRmApiAlloc_dce(pRmApi, displaySW.hClient, displaySW.hParent, + displaySW.hObject, displaySW.hClass, &displaySW.displaySWAllocParams); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot alloc displaySW object during resume\n"); + nvStatus = NV_ERR_GENERIC; + goto out; + } + } + if (displaySWEventHotplug.valid) + { + nvStatus = rpcRmApiAlloc_dce(pRmApi, displaySWEventHotplug.hClient, displaySWEventHotplug.hParent, + displaySWEventHotplug.hObject, displaySWEventHotplug.hClass, &displaySWEventHotplug.displaySWEventAllocParams); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot alloc displaySWEventHotplug object during resume\n"); + nvStatus = NV_ERR_GENERIC; + goto out; + } + else if(displayCtrlHotplug.valid) + { + nvStatus = rpcRmApiControl_dce(pRmApi, displayCtrlHotplug.hClient, displayCtrlHotplug.hObject, NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &displayCtrlHotplug.setEventParams,sizeof(displayCtrlHotplug.setEventParams)); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "rpcRmApiControl_dce for displayCtrlHotplug failed during resume\n"); + nvStatus = NV_ERR_GENERIC; + goto out; + } + } + } + if (displaySWEventDPIRQ.valid) + { + nvStatus = rpcRmApiAlloc_dce(pRmApi, displaySWEventDPIRQ.hClient, displaySWEventDPIRQ.hParent, + displaySWEventDPIRQ.hObject, displaySWEventDPIRQ.hClass, &displaySWEventDPIRQ.displaySWEventAllocParams); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot alloc displaySWEventDPIRQ object during resume\n"); + nvStatus = NV_ERR_GENERIC; + goto out; + } + else if(displayCtrlDPIRQ.valid) + { + nvStatus = rpcRmApiControl_dce(pRmApi, displayCtrlDPIRQ.hClient, displayCtrlDPIRQ.hObject, NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &displayCtrlDPIRQ.setEventParams,sizeof(displayCtrlDPIRQ.setEventParams)); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "rpcRmApiControl_dce displaySWEventDPIRQ object failed\n"); + nvStatus = NV_ERR_GENERIC; + goto out; + } + } + } + if (displayCtrlDPSetManual.valid) + { + nvStatus = rpcRmApiControl_dce(pRmApi, displayCtrlDPSetManual.hClient, displayCtrlDPSetManual.hObject, NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT, + &displayCtrlDPSetManual.setManualParams, sizeof(displayCtrlDPSetManual.setManualParams)); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "rpcRmApiControl_dce displayCtrlDPSetManual object failed\n"); + nvStatus = NV_ERR_GENERIC; + goto out; + } + } + } + +out: + return nvStatus; +} + +NV_STATUS +dceclientStateUnload_IMPL +( + OBJGPU *pGpu, + DceClient *pDceClient, + NvU32 flags +) +{ + NV_STATUS nvStatus = NV_OK; + NV_PRINTF(LEVEL_INFO, "dceclientStateUnload_IMPL Called\n"); + + if (!(flags & GPU_STATE_FLAGS_PM_TRANSITION)) + return NV_OK; + + nvStatus = dceclientDceRmInit(pGpu, GPU_GET_DCECLIENTRM(pGpu), NV_FALSE); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot unload DCE firmware RM\n"); + } + + dceclientDeinitRpcInfra(pDceClient); + + return nvStatus; +} + +void +dceclientStateDestroy_IMPL +( + OBJGPU *pGpu, + DceClient *pDceClient +) +{ + NvU32 i = 0; + + NV_PRINTF(LEVEL_INFO, "Destroy DCE Client Object Called\n"); + + dceclientDeinitRpcInfra(pDceClient); + + for (i = 0; i < MAX_RM_CLIENTS; i++) + { + roots[i].valid = NV_FALSE; + devices[i].valid = NV_FALSE; + subdevices[i].valid = NV_FALSE; + } + display.valid = NV_FALSE; + displaySW.valid = NV_FALSE; + displaySWEventHotplug.valid = NV_FALSE; + displaySWEventDPIRQ.valid = NV_FALSE; + displayCtrlHotplug.valid = NV_FALSE; + displayCtrlDPIRQ.valid = NV_FALSE; + displayCtrlDPSetManual.valid = NV_FALSE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c new file mode 100644 index 0000000..2359a19 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c @@ -0,0 +1,859 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/dce_client/dce_client.h" +#include "os/dce_rm_client_ipc.h" + +#include "os/os.h" + +#include "vgpu/rpc.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "class/cl0073.h" +#include "class/clc670.h" +#include "class/clc67b.h" +#include "class/clc67d.h" +#include "class/clc67e.h" +#include "class/cl84a0.h" +#include "class/clc372sw.h" + +#include + +#include "gpu/disp/kern_disp.h" + +#define RPC_STRUCTURES +#define RPC_GENERIC_UNION +#include "g_rpc-structures.h" +#undef RPC_STRUCTURES +#undef RPC_GENERIC_UNION + +#define RPC_MESSAGE_STRUCTURES +#define RPC_MESSAGE_GENERIC_UNION +#include "g_rpc-message-header.h" +#undef RPC_MESSAGE_STRUCTURES +#undef RPC_MESSAGE_GENERIC_UNION + +#define DCE_MAX_RPC_MSG_SIZE 4096 + +extern ROOT roots[MAX_RM_CLIENTS]; +extern DEVICE devices[MAX_RM_CLIENTS]; +extern SUBDEVICE subdevices[MAX_RM_CLIENTS]; +extern DISPLAY_COMMON display; +extern DISPLAY_SW displaySW; +extern DISPLAY_SW_EVENT displaySWEventHotplug; +extern DISPLAY_SW_EVENT displaySWEventDPIRQ; +extern DISPLAY_HPD_CTRL displayCtrlHotplug; +extern DISPLAY_HPD_CTRL displayCtrlDPIRQ; +extern DISPLAY_DP_SET_MANUAL displayCtrlDPSetManual; + +NV_STATUS +dceclientInitRpcInfra_IMPL +( + OBJGPU *pGpu, + DceClient *pDceClient +) +{ + NV_STATUS nvStatus = NV_OK; + + NV_PRINTF(LEVEL_INFO, "Init RPC Infra Called\n"); + + pDceClient->pRpc = initRpcObject(pGpu); + if (pDceClient->pRpc == NULL) + { + NV_PRINTF(LEVEL_ERROR, "initRpcObject failed\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pDceClient->pRpc->maxRpcSize = DCE_MAX_RPC_MSG_SIZE; + + // Register Synchronous IPC client for RPC to DCE RM + pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_SYNC] = 0; + nvStatus = osTegraDceRegisterIpcClient(DCE_CLIENT_RM_IPC_TYPE_SYNC, + NULL, + &pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_SYNC]); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Register dce ipc client failed for DCE_CLIENT_RM_IPC_TYPE_SYNC error 0x%x\n", + nvStatus); + goto ipc_register_fail; + } + + NV_PRINTF(LEVEL_INFO, "Registered dce ipc client DCE_CLIENT_RM_IPC_TYPE_SYNC handle: 0x%x\n", + pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_SYNC]); + + // Register Asynchronous IPC client for event notification from DCE RM + pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_EVENT] = 0; + nvStatus = osTegraDceRegisterIpcClient(DCE_CLIENT_RM_IPC_TYPE_EVENT, + pGpu, + &pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_EVENT]); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Register dce ipc client failed for DCE_CLIENT_RM_IPC_TYPE_EVENT error 0x%x\n", + nvStatus); + goto ipc_register_fail; + } + NV_PRINTF(LEVEL_INFO, "Register dce ipc client DCE_CLIENT_RM_IPC_TYPE_EVENT: 0x%x\n", + pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_EVENT]); + +ipc_register_fail: + if (nvStatus != NV_OK) + { + dceclientDeinitRpcInfra(pDceClient); + } + + return nvStatus; +} + +NV_STATUS +dceclientDceRmInit_IMPL +( + OBJGPU *pGpu, + DceClient *pDceClient, + NvBool bInit +) +{ + NV_STATUS nvStatus = NV_OK; + + NV_RM_RPC_DCE_RM_INIT(pGpu, bInit, nvStatus); + + return nvStatus; +} + +void +dceclientDeinitRpcInfra_IMPL +( + DceClient *pDceClient +) +{ + NvU32 i = 0; + + NV_PRINTF(LEVEL_INFO, "Free RPC Infra Called\n"); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pDceClient->clientId); i++) + { + osTegraDceUnregisterIpcClient(pDceClient->clientId[i]); + } + + portMemFree(pDceClient->pRpc); + pDceClient->pRpc = NULL; +} + +NV_STATUS +dceclientSendRpc_IMPL +( + DceClient *pDceClient, + void *msgData, + NvU32 msgLength +) +{ + NV_STATUS nvStatus = NV_OK; + NvU32 clientId = pDceClient->clientId[DCE_CLIENT_RM_IPC_TYPE_SYNC]; + + NV_PRINTF(LEVEL_INFO, "Send RPC Called, clientid used 0x%x\n", clientId); + + if (msgData) + { + nvStatus = osTegraDceClientIpcSendRecv(clientId, msgData, msgLength); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Send RPC failed for clientId %u error %u\n", clientId, nvStatus); + return nvStatus; + } + } + + return nvStatus; +} + +NV_STATUS +dceclientReceiveMsg_IMPL +( + DceClient *pDceClient +) +{ + NV_STATUS nvStatus = NV_OK; + + NV_PRINTF(LEVEL_INFO, "Receive Message Called\n"); + + return nvStatus; +} + +NV_STATUS +dceclientSendMsg_IMPL +( + DceClient *pDceClient +) +{ + NV_STATUS nvStatus = NV_OK; + + NV_PRINTF(LEVEL_INFO, "Send Message Called\n"); + + return nvStatus; +} + +static inline rpc_message_header_v *_dceRpcGetMessageHeader(OBJRPC *pRpc) +{ + return ((rpc_message_header_v*)(pRpc->message_buffer)); +} + +static inline rpc_generic_union *_dceRpcGetMessageData(OBJRPC *pRpc) +{ + return _dceRpcGetMessageHeader(pRpc)->rpc_message_data; +} + +static inline NV_STATUS _dceRpcGetRpcResult(OBJRPC *pRpc) +{ + return _dceRpcGetMessageHeader(pRpc)->rpc_result; +} + +/** + * Prints the header info when _INFO level is enabled. + */ +static void _dceclientrmPrintHdr +( + OBJRPC *pRpc +) +{ + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE : [msg-buf:0x%p] header_version = 0x%x signature = 0x%x " + "length = 0x%x function = 0x%x rpc_result = 0x%x\n", pRpc->message_buffer, + _dceRpcGetMessageHeader(pRpc)->header_version, _dceRpcGetMessageHeader(pRpc)->signature, + _dceRpcGetMessageHeader(pRpc)->length, _dceRpcGetMessageHeader(pRpc)->function, + _dceRpcGetMessageHeader(pRpc)->rpc_result); +} + +/** + * Allocate memory for rpc message + * TODO : Change static allocation of 4K to + * a better dynamic allocation + */ +static NV_STATUS _dceRpcAllocateMemory +( + OBJRPC *pRpc +) +{ + NvU32 *message_buffer; + + message_buffer = portMemAllocNonPaged(DCE_MAX_RPC_MSG_SIZE); + if (message_buffer == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Cannot allocate memory for message_buffer\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pRpc->message_buffer = message_buffer; + + return NV_OK; +} + +static void _dceRpcFreeMemory +( + OBJRPC *pRpc +) +{ + portMemFree(pRpc->message_buffer); + pRpc->message_buffer = NULL; +} + +/** + * Send RPC msg and check the result. + */ +static NV_STATUS +_dceRpcIssueAndWait +( + RM_API *pRmApi +) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + NV_STATUS status = NV_ERR_INVALID_ARGUMENT; + rpc_message_header_v* message_header = NULL; + DceClient *pDceclientrm = GPU_GET_DCECLIENTRM(pGpu); + + message_header = _dceRpcGetMessageHeader(pRpc); + if (message_header) + { + _dceclientrmPrintHdr(pRpc); + + status = dceclientSendRpc(pDceclientrm, message_header, message_header->length); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Error while issuing RPC [0x%x]\n", status); + goto done; + } + } + +done: + return status; +} + +void dceclientHandleAsyncRpcCallback +( + NvU32 handle, + NvU32 interfaceType, + NvU32 msgLength, + void *data, + void *usrCtx +) +{ + NV_PRINTF(LEVEL_INFO, "dceclientHandleAsyncRpcCallback called\n"); + + rpc_message_header_v *msg_hdr = NULL; + rpc_generic_union *rpc_msg_data = NULL; + OBJGPU *pGpu = (OBJGPU *)usrCtx; + + NV_ASSERT_OR_RETURN_VOID(interfaceType == DCE_CLIENT_RM_IPC_TYPE_EVENT); + NV_ASSERT_OR_RETURN_VOID(pGpu != NULL && data != NULL); + + msg_hdr = (rpc_message_header_v *)data; + rpc_msg_data = msg_hdr->rpc_message_data; + + switch (msg_hdr->function) + { + case NV_VGPU_MSG_EVENT_POST_EVENT: + { + rpc_post_event_v *rpc_params = &rpc_msg_data->post_event_v; + + if (rpc_params->bNotifyList) + { + gpuNotifySubDeviceEvent(pGpu, rpc_params->notifyIndex, + rpc_params->eventData, + rpc_params->eventDataSize, 0, 0); + } + else + { + PEVENTNOTIFICATION pNotifyList = NULL; + PEVENTNOTIFICATION pNotifyEvent = NULL; + Event *pEvent = NULL; + NV_STATUS nvStatus = NV_OK; + + // Get the notification list that contains this event. + NV_ASSERT(CliGetEventInfo(rpc_params->hClient, + rpc_params->hEvent, &pEvent)); + + if (pEvent->pNotifierShare != NULL) + pNotifyList = pEvent->pNotifierShare->pEventList; + + NV_ASSERT(pNotifyList != NULL); + + // Send event to a specific hEvent. Find hEvent in the notification list. + for (pNotifyEvent = pNotifyList; pNotifyEvent != NULL; pNotifyEvent = pNotifyEvent->Next) + { + if (pNotifyEvent->hEvent == rpc_params->hEvent) + { + nvStatus = osNotifyEvent(pGpu, pNotifyEvent, 0, + rpc_params->data, rpc_params->status); + if (nvStatus != NV_OK) + NV_PRINTF(LEVEL_ERROR, "osNotifyEvent failed with status: %x\n",nvStatus); + break; + } + } + NV_ASSERT(pNotifyEvent != NULL); + } + return; + } + case NV_VGPU_MSG_EVENT_RG_LINE_INTR: + { + rpc_rg_line_intr_v *rpc_params = &rpc_msg_data->rg_line_intr_v; + + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, pKernelDisplay != NULL); + + kdispInvokeRgLineCallback(pKernelDisplay, rpc_params->head, rpc_params->rgIntr, NV_FALSE); + return; + } + case NV_VGPU_MSG_EVENT_DISPLAY_MODESET: + { + rpc_display_modeset_v *rpc_params = &rpc_msg_data->display_modeset_v; + + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, pKernelDisplay != NULL); + + kdispInvokeDisplayModesetCallback(pKernelDisplay, + rpc_params->bModesetStart, + rpc_params->minRequiredIsoBandwidthKBPS, + rpc_params->minRequiredFloorBandwidthKBPS); + return; + } + default: + { + NV_PRINTF(LEVEL_ERROR, "Unexpected RPC function 0x%x\n", msg_hdr->function); + NV_ASSERT_FAILED("Unexpected RPC function"); + return; + } + } +} + +NV_STATUS rpcRmApiControl_dce +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void *pParamStructPtr, + NvU32 paramsSize +) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + rpc_generic_union *msg_data; + rpc_gsp_rm_control_v *rpc_params = NULL; + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams = { }; + NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS setManualParams = {0}; + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE : Prepare and send RmApiControl RPC\n"); + + status = _dceRpcAllocateMemory(pRpc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Memory Allocation Failed\n"); + goto done; + } + + msg_data = _dceRpcGetMessageData(pRpc); + rpc_params = &msg_data->gsp_rm_control_v; + + status = rpcWriteCommonHeader(pGpu, pRpc, + NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, + (sizeof(rpc_gsp_rm_control_v) + + paramsSize)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Writing RPC Header Failed [0x%x]\n", status); + goto done; + } + + rpc_params->hClient = hClient; + rpc_params->hObject = hObject; + rpc_params->cmd = cmd; + rpc_params->paramsSize = paramsSize; + portMemCopy(rpc_params->params, paramsSize,pParamStructPtr, paramsSize); + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) + { + if (cmd == NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION) + { + portMemCopy(&setEventParams, paramsSize,pParamStructPtr, paramsSize); + if (setEventParams.event == NV2080_NOTIFIERS_HOTPLUG) + { + displayCtrlHotplug.hClient = rpc_params->hClient; + displayCtrlHotplug.hObject = rpc_params->hObject; + portMemCopy(&displayCtrlHotplug.setEventParams, rpc_params->paramsSize, rpc_params->params, rpc_params->paramsSize); + displayCtrlHotplug.valid = NV_TRUE; + } + if (setEventParams.event == NV2080_NOTIFIERS_DP_IRQ) + { + displayCtrlDPIRQ.hClient = rpc_params->hClient; + displayCtrlDPIRQ.hObject = rpc_params->hObject; + portMemCopy(&displayCtrlDPIRQ.setEventParams, rpc_params->paramsSize, rpc_params->params, rpc_params->paramsSize); + displayCtrlDPIRQ.valid = NV_TRUE; + } + } + if (cmd == NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT) + { + portMemCopy(&setManualParams, paramsSize,pParamStructPtr, paramsSize); + displayCtrlDPSetManual.hClient = rpc_params->hClient; + displayCtrlDPSetManual.hObject = rpc_params->hObject; + portMemCopy(&displayCtrlDPSetManual.setManualParams, rpc_params->paramsSize, rpc_params->params, rpc_params->paramsSize); + displayCtrlDPSetManual.valid = NV_TRUE; + } + } + + status = _dceRpcIssueAndWait(pRmApi); + if (status != NV_OK) + { + goto done; + } + + status = _dceRpcGetRpcResult(pRpc); + if (status == NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE: RM ctrl call cmd:0x%x not supported\n", cmd); + } + else if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Failed RM ctrl call cmd:0x%x result 0x%x:\n", cmd, status); + } + + portMemCopy(pParamStructPtr, paramsSize, rpc_params->params, paramsSize); + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE: RPC for GSP RM Control Successful\n"); + +done: + if (pRpc->message_buffer) + _dceRpcFreeMemory(pRpc); + return status; +} + +NV_STATUS rpcRmApiAlloc_dce +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 hClass, + void *pAllocParams +) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + rpc_generic_union *msg_data; + rpc_gsp_rm_alloc_v *rpc_params; + NV_STATUS status; + NvU32 paramsSize; + NvBool bNullAllowed; + NV0005_ALLOC_PARAMETERS displaySWEventAllocParams; + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE: Prepare and send RmApiAlloc RPC\n"); + + NV_ASSERT_OK_OR_GOTO(status, + _dceRpcAllocateMemory(pRpc), + done); + + msg_data = _dceRpcGetMessageData(pRpc); + rpc_params = &msg_data->gsp_rm_alloc_v; + + NV_ASSERT_OK_OR_GOTO(status, + rmapiGetClassAllocParamSize(¶msSize, pAllocParams, &bNullAllowed, hClass), + done); + + if (pAllocParams == NULL && !bNullAllowed) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: NULL allocation params not allowed for class 0x%x\n", hClass); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + status = rpcWriteCommonHeader(pGpu, pRpc, + NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, + (sizeof(rpc_gsp_rm_alloc_v) + paramsSize)); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Writing RPC Header Failed [0x%x]\n", status); + goto done; + } + + rpc_params->hClient = hClient; + rpc_params->hParent = hParent; + rpc_params->hObject = hObject; + rpc_params->hClass = hClass; + rpc_params->paramsSize = pAllocParams ? paramsSize : 0; + portMemCopy(rpc_params->params, rpc_params->paramsSize, pAllocParams, paramsSize); + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) + { + if (hClass == NV01_ROOT) + { + NvU32 i = 0; + + for (i = 0; i < MAX_RM_CLIENTS; i++) + { + if (!roots[i].valid) + { + roots[i].hClient = rpc_params->hClient; + roots[i].hParent = rpc_params->hParent; + roots[i].hObject = rpc_params->hObject; + roots[i].hClass = rpc_params->hClass; + portMemCopy(&roots[i].rootAllocParams, rpc_params->paramsSize, rpc_params->params, rpc_params->paramsSize); + roots[i].valid = NV_TRUE; + break; + } + } + } + + if (hClass == NV01_DEVICE_0) + { + NvU32 i=0; + + for (i = 0; i < MAX_RM_CLIENTS; i++) + { + if (!devices[i].valid) + { + devices[i].hClient = rpc_params->hClient; + devices[i].hParent = rpc_params->hParent; + devices[i].hObject = rpc_params->hObject; + devices[i].hClass = rpc_params->hClass; + portMemCopy(&devices[i].deviceAllocParams, rpc_params->paramsSize, rpc_params->params, rpc_params->paramsSize); + devices[i].valid = NV_TRUE; + break; + } + } + } + + if (hClass == NV20_SUBDEVICE_0) + { + NvU32 i = 0; + + for (i = 0; i < MAX_RM_CLIENTS; i++) + { + if (!subdevices[i].valid) + { + subdevices[i].hClient = rpc_params->hClient; + subdevices[i].hParent = rpc_params->hParent; + subdevices[i].hObject = rpc_params->hObject; + subdevices[i].hClass = rpc_params->hClass; + portMemCopy(&subdevices[i].subdeviceAllocParams, rpc_params->paramsSize, rpc_params->params, rpc_params->paramsSize); + subdevices[i].valid = NV_TRUE; + break; + } + } + } + + if (hClass == NV04_DISPLAY_COMMON) + { + display.hClient = rpc_params->hClient; + display.hParent = rpc_params->hParent; + display.hObject = rpc_params->hObject; + display.hClass = rpc_params->hClass; + portMemCopy(&display.displayCommonAllocParams, rpc_params->paramsSize, rpc_params->params, rpc_params->paramsSize); + display.valid = NV_TRUE; + } + + if (hClass == NVC372_DISPLAY_SW) + { + displaySW.hClient = rpc_params->hClient; + displaySW.hParent = rpc_params->hParent; + displaySW.hObject = rpc_params->hObject; + displaySW.hClass = rpc_params->hClass; + portMemCopy(&displaySW.displaySWAllocParams, rpc_params->paramsSize, rpc_params->params, rpc_params->paramsSize); + displaySW.valid = NV_TRUE; + } + + if (hClass == NV01_EVENT_KERNEL_CALLBACK_EX) + { + portMemCopy(&displaySWEventAllocParams, rpc_params->paramsSize, rpc_params->params, rpc_params->paramsSize); + if (0x4000001 == displaySWEventAllocParams.notifyIndex) + { + displaySWEventHotplug.hClient = rpc_params->hClient; + displaySWEventHotplug.hParent = rpc_params->hParent; + displaySWEventHotplug.hObject = rpc_params->hObject; + displaySWEventHotplug.hClass = rpc_params->hClass; + portMemCopy(&displaySWEventHotplug.displaySWEventAllocParams, rpc_params->paramsSize, rpc_params->params, rpc_params->paramsSize); + displaySWEventHotplug.valid = NV_TRUE; + } + + if (0x4000007 == displaySWEventAllocParams.notifyIndex) + { + displaySWEventDPIRQ.hClient = rpc_params->hClient; + displaySWEventDPIRQ.hParent = rpc_params->hParent; + displaySWEventDPIRQ.hObject = rpc_params->hObject; + displaySWEventDPIRQ.hClass = rpc_params->hClass; + portMemCopy(&displaySWEventDPIRQ.displaySWEventAllocParams, rpc_params->paramsSize, rpc_params->params, rpc_params->paramsSize); + displaySWEventDPIRQ.valid = NV_TRUE; + } + } + } + + status = _dceRpcIssueAndWait(pRmApi); + if (status != NV_OK) + { + goto done; + } + + status = _dceRpcGetRpcResult(pRpc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Failed RM Alloc Object result 0x%x:\n", status); + } + + // Deserialize the response + // We do not deserialize the variable length data as we do not expect it to be modified + if (paramsSize > 0) + { + portMemCopy(pAllocParams, paramsSize, rpc_params->params, rpc_params->paramsSize); + } + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE: RPC for GSP RM Alloc Successful\n"); + +done: + if (pRpc->message_buffer) + _dceRpcFreeMemory(pRpc); + return status; +} + +NV_STATUS rpcRmApiFree_dce(RM_API *pRmApi, NvHandle hClient, NvHandle hObject) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + rpc_generic_union *msg_data; + NVOS00_PARAMETERS_v *rpc_params; + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE Free " + "RPC Called for hClient: 0x%x\n", hClient); + + status = _dceRpcAllocateMemory(pRpc); + NV_ASSERT_OK_OR_RETURN(status); + + msg_data = _dceRpcGetMessageData(pRpc); + rpc_params = &msg_data->free_v.params; + + status = rpcWriteCommonHeader(pGpu, pRpc, + NV_VGPU_MSG_FUNCTION_FREE, + sizeof(rpc_free_v)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Writing RPC Header Failed [0x%x]\n", status); + goto done; + } + + rpc_params->hRoot = hClient; + rpc_params->hObjectParent = NV01_NULL_OBJECT; + rpc_params->hObjectOld = hObject; + + status = _dceRpcIssueAndWait(pRmApi); + if (status != NV_OK) + { + goto done; + } + + status = _dceRpcGetRpcResult(pRpc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Failed RM Free Object result 0x%x:\n", status); + } + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE: RPC for Free Successful\n"); + +done: + if (pRpc->message_buffer) + _dceRpcFreeMemory(pRpc); + + return status; +} + +NV_STATUS rpcRmApiDupObject_dce +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags +) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + rpc_generic_union *msg_data; + NVOS55_PARAMETERS_v03_00 *rpc_params = NULL; + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE Dup Object " + "RPC Called for hClient: 0x%x\n", hClient); + + + status = _dceRpcAllocateMemory(pRpc); + NV_ASSERT_OK_OR_RETURN(status); + + msg_data = _dceRpcGetMessageData(pRpc); + rpc_params = &msg_data->dup_object_v.params; + + status = rpcWriteCommonHeader(pGpu, pRpc, + NV_VGPU_MSG_FUNCTION_DUP_OBJECT, + sizeof(rpc_dup_object_v)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Writing RPC Header Failed [0x%x]\n", status); + goto done; + } + + rpc_params->hClient = hClient; + rpc_params->hParent = hParent; + rpc_params->hObject = *phObject; + rpc_params->hClientSrc = hClientSrc; + rpc_params->hObjectSrc = hObjectSrc; + rpc_params->flags = flags; + + status = _dceRpcIssueAndWait(pRmApi); + if (status != NV_OK) + { + goto done; + } + + status = _dceRpcGetRpcResult(pRpc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Failed RM Dup Object result 0x%x:\n", status); + } + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE: RPC for DUP OBJECT Successful\n"); + +done: + portMemFree(pRpc->message_buffer); + return status; +} + +NV_STATUS +rpcDceRmInit_dce +( + RM_API *pRmApi, + NvBool bInit +) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + rpc_generic_union *msg_data; + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + rpc_dce_rm_init_v *rpc_params = NULL; + + NV_PRINTF(LEVEL_INFO, "NVRM_RPC_DCE RPC to trigger %s called\n", + bInit ? "RmInitAdapter" : "RmShutdownAdapter"); + + status = _dceRpcAllocateMemory(pRpc); + NV_ASSERT_OK_OR_RETURN(status); + + msg_data = _dceRpcGetMessageData(pRpc); + rpc_params = &msg_data->dce_rm_init_v; + + status = rpcWriteCommonHeader(pGpu, pRpc, + NV_VGPU_MSG_FUNCTION_DCE_RM_INIT, + sizeof(rpc_dce_rm_init_v)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Writing RPC Header Failed [0x%x]\n", status); + goto done; + } + + rpc_params->bInit = bInit; + status = _dceRpcIssueAndWait(pRmApi); + if (status != NV_OK) + { + goto done; + } + + status = _dceRpcGetRpcResult(pRpc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_RPC_DCE: Failed RM init/deinit result 0x%x:\n", status); + } + +done: + portMemFree(pRpc->message_buffer); + return status; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device.c new file mode 100644 index 0000000..db700c3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device.c @@ -0,0 +1,582 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This is a device resource implementation. +* +******************************************************************************/ + + + +#include "resserv/resserv.h" +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "gpu/device/device.h" + +#include "class/cl0080.h" +#include "core/locks.h" +#include "vgpu/rpc.h" +#include "mem_mgr/mem.h" + +#include "rmapi/rs_utils.h" +#include "nvsecurityinfo.h" + +static NV_STATUS _deviceTeardown(Device *pDevice, CALL_CONTEXT *pCallContext); +static NV_STATUS _deviceTeardownRef(Device *pDevice, CALL_CONTEXT *pCallContext); + +NV_STATUS +deviceConstruct_IMPL +( + Device *pDevice, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV0080_ALLOC_PARAMETERS *pNv0080AllocParams = pParams->pAllocParams; + NvU32 deviceInst, flags, vaMode; + NvU32 deviceClass = pParams->externalClassId; + NvHandle hClientShare; + NvHandle hTargetClient = NV01_NULL_OBJECT; + NvHandle hTargetDevice = NV01_NULL_OBJECT; + NvU64 vaSize = 0; + NV_STATUS rmStatus = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + OBJGPU *pGpu; + NvU64 vaStartInternal = 0; + NvU64 vaLimitInternal = 0; + NvU32 physicalAllocFlags; + + if (pNv0080AllocParams == NULL) + { + deviceInst = pParams->externalClassId - NV01_DEVICE_0; + hClientShare = NV01_NULL_OBJECT; + flags = 0; + vaSize = 0; + vaMode = 0; + } + else + { + deviceInst = pNv0080AllocParams->deviceId; + hClientShare = pNv0080AllocParams->hClientShare; + hTargetClient = pNv0080AllocParams->hTargetClient; + hTargetDevice = pNv0080AllocParams->hTargetDevice; + flags = pNv0080AllocParams->flags; + vaSize = pNv0080AllocParams->vaSpaceSize; + vaMode = pNv0080AllocParams->vaMode; + + // valid only if NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS is flagged. + if (flags & NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS) + { + vaStartInternal = pNv0080AllocParams->vaStartInternal; + vaLimitInternal = pNv0080AllocParams->vaLimitInternal; + + if ((vaLimitInternal < vaStartInternal) || (vaLimitInternal == 0)) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + } + + // validate device instance + if (gpumgrIsDeviceInstanceValid(deviceInst) != NV_OK) + { + return NV_ERR_INVALID_CLASS; + } + + // Make sure this device has not been disabled + if (gpumgrIsDeviceEnabled(deviceInst) == NV_FALSE) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // add new device to client and set the device context + rmStatus = deviceInit(pDevice, pCallContext, pParams->hClient, pParams->hResource, deviceInst, + hClientShare, hTargetClient, hTargetDevice, vaSize, vaStartInternal, vaLimitInternal, + flags, vaMode); + if (rmStatus != NV_OK) + return rmStatus; + + pGpu = GPU_RES_GET_GPU(pDevice); + + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + { + if (!osIsGpuAccessible(pGpu)) + { + // Delete the device from the client since we should not be allocating it + _deviceTeardownRef(pDevice, pCallContext); + _deviceTeardown(pDevice, pCallContext); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + + // + // Make sure this device is not in fullchip reset on OSes where it is + // restricted. + // + if (pOS->getProperty(pOS, PDB_PROP_OS_LIMIT_GPU_RESET) && + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET)) + { + // Delete the device from the client since we should not be allocating it + _deviceTeardownRef(pDevice, pCallContext); + _deviceTeardown(pDevice, pCallContext); + return NV_ERR_GPU_IN_FULLCHIP_RESET; + } + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + physicalAllocFlags = flags & ~(NV_DEVICE_ALLOCATION_FLAGS_PLUGIN_CONTEXT + | NV_DEVICE_ALLOCATION_FLAGS_HOST_VGPU_DEVICE); + + NV_RM_RPC_ALLOC_SHARE_DEVICE(pGpu, pParams->hParent, pParams->hResource, pDevice->hClientShare, + hTargetClient, hTargetDevice, deviceClass, physicalAllocFlags, vaSize, vaMode, rmStatus); + if (rmStatus != NV_OK) + { + return rmStatus; + } + } + + return rmStatus; +} // end of deviceConstruct_IMPL + +void +deviceDestruct_IMPL +( + Device *pDevice +) +{ + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + NV_STATUS rmStatus = NV_OK; + NV_STATUS tmpStatus; + NvHandle hClient; + NODE *pNode; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + resGetFreeParams(staticCast(pDevice, RsResource), &pCallContext, &pParams); + + hClient = pCallContext->pClient->hClient; + + NV_PRINTF(LEVEL_INFO, " type: device\n"); + + LOCK_METER_DATA(FREE_DEVICE, 0, 0, 0); + + // Free all device memory + btreeEnumStart(0, &pNode, pDevice->DevMemoryTable); + while (pNode != NULL) + { + Memory *pMemory = pNode->Data; + btreeEnumNext(&pNode, pDevice->DevMemoryTable); + + tmpStatus = pRmApi->Free(pRmApi, hClient, RES_GET_HANDLE(pMemory)); + if ((tmpStatus != NV_OK) && (rmStatus == NV_OK)) + rmStatus = tmpStatus; + } + + // free the device + if (_deviceTeardownRef(pDevice, pCallContext) != NV_OK || + _deviceTeardown(pDevice, pCallContext) != NV_OK) + { + tmpStatus = NV_ERR_INVALID_OBJECT_HANDLE; + if (tmpStatus != NV_OK && rmStatus == NV_OK) + rmStatus = tmpStatus; + } + + // + // If the client was created, but never had any devices successfully + // attached, we'll get here. The client's device structure will have + // been created, but pGpu will be NULL if the device was later found + // to be non-existent + // + if (GPU_RES_GET_GPU(pDevice)) + { + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + // vGpu support + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NvHandle hDevice = pResourceRef->hResource; + + if (rmStatus == NV_OK) + { + NV_RM_RPC_FREE(pGpu, hClient, hClient, hDevice, rmStatus); + } + + if (rmStatus != NV_OK) + { + pParams->status = rmStatus; + return; + } + + NV_RM_RPC_FREE(pGpu, hClient, NV01_NULL_OBJECT, hClient, rmStatus); + } + } +} // end of deviceDestruct_IMPL + +NV_STATUS +deviceControl_IMPL +( + Device *pDevice, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + + // + // Some assertions to make RMCTRL to NVOC migration smooth + // Those will be removed at the end of ctrl0080.def migration + // + NV_ASSERT_OR_RETURN(pParams->hClient == RES_GET_CLIENT_HANDLE(pDevice), NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pParams->hObject == RES_GET_HANDLE(pDevice), NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pParams->hParent == RES_GET_PARENT_HANDLE(pDevice), NV_ERR_INVALID_STATE); + + pParams->pGpuGrp = GPU_RES_GET_GPUGRP(pDevice); + return gpuresControl_IMPL(staticCast(pDevice, GpuResource), + pCallContext, pParams); +} + +NV_STATUS +deviceInternalControlForward_IMPL +( + Device *pDevice, + NvU32 command, + void *pParams, + NvU32 size +) +{ + return gpuresInternalControlForward_IMPL(staticCast(pDevice, GpuResource), command, pParams, size); +} + +// +// add a device with specified handle, instance num, within a specified client +// (hClientShare also specified) +// +NV_STATUS +deviceInit_IMPL +( + Device *pDevice, + CALL_CONTEXT *pCallContext, + NvHandle hClient, + NvHandle hDevice, + NvU32 deviceInst, + NvHandle hClientShare, + NvHandle hTargetClient, + NvHandle hTargetDevice, + NvU64 vaSize, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + NvU32 allocFlags, + NvU32 vaMode +) +{ + OBJGPU *pGpu; + NV_STATUS status; + GpuResource *pGpuResource = staticCast(pDevice, GpuResource); + Device *pExistingDevice; + NvU32 gpuInst; + + if (deviceInst >= NV_MAX_DEVICES) + return NV_ERR_INVALID_ARGUMENT; + + // Check if device inst already allocated, fail if this call succeeds + status = deviceGetByInstance(pCallContext->pClient, deviceInst, &pExistingDevice); + if (status == NV_OK) + { + // + // RS-TODO: Status code should be NV_ERR_STATE_IN_USE, however keeping + // existing code from CliAllocElement (for now) + // + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + // Look up GPU and GPU Group + gpuInst = gpumgrGetPrimaryForDevice(deviceInst); + + if ((pGpu = gpumgrGetGpu(gpuInst)) == NULL) + { + return NV_ERR_INVALID_STATE; + } + + pDevice->hTargetClient = hTargetClient; + pDevice->hTargetDevice = hTargetDevice; + pDevice->pHostVgpuDevice = NULL; + pDevice->pKernelHostVgpuDevice = NULL; + + pDevice->deviceInst = deviceInst; + + // Update VA Mode + pDevice->vaMode = vaMode; + + gpuresSetGpu(pGpuResource, pGpu, NV_TRUE); + + status = deviceSetClientShare(pDevice, hClientShare, vaSize, + vaStartInternal, vaLimitInternal, allocFlags); + if (NV_OK != status) + goto done; + +done: + if (status != NV_OK) + { + deviceRemoveFromClientShare(pDevice); + } + + return status; +} // end of deviceInit_IMPL() + +// +// delete a device with a specified handle within a client +// +static NV_STATUS +_deviceTeardown +( + Device *pDevice, + CALL_CONTEXT *pCallContext +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + PORT_UNREFERENCED_VARIABLE(pGpu); + + deviceRemoveFromClientShare(pDevice); + + return NV_OK; +} + +static NV_STATUS _deviceTeardownRef +( + Device *pDevice, + CALL_CONTEXT *pCallContext +) +{ + + return NV_OK; +} + +NV_STATUS +deviceGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hDevice, + Device **ppDevice +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppDevice = NULL; + + status = clientGetResourceRef(pClient, hDevice, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDevice = dynamicCast(pResourceRef->pResource, Device); + + return (*ppDevice) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +deviceGetByInstance_IMPL +( + RsClient *pClient, + NvU32 deviceInstance, + Device **ppDevice +) +{ + RS_ITERATOR it; + Device *pDevice; + + *ppDevice = NULL; + + it = clientRefIter(pClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + + while (clientRefIterNext(it.pClient, &it)) + { + pDevice = dynamicCast(it.pResourceRef->pResource, Device); + + if ((pDevice != NULL) && (deviceInstance == pDevice->deviceInst)) + { + *ppDevice = pDevice; + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +deviceGetByGpu_IMPL +( + RsClient *pClient, + OBJGPU *pGpu, + NvBool bAnyInGroup, + Device **ppDevice +) +{ + NvU32 deviceInstance = gpuGetDeviceInstance(pGpu); + NV_STATUS status; + + status = deviceGetByInstance(pClient, deviceInstance, ppDevice); + if (status != NV_OK) + return status; + + // If pGpu is not the primary GPU return failure + if (!bAnyInGroup && pGpu != GPU_RES_GET_GPU(*ppDevice)) + { + *ppDevice = NULL; + return NV_ERR_OBJECT_NOT_FOUND; + } + + return NV_OK; +} + +// **************************************************************************** +// Deprecated Functions +// **************************************************************************** + +/** + * WARNING: This function is deprecated! Please use deviceGetByHandle. + */ +Device * +CliGetDeviceInfo +( + NvHandle hClient, + NvHandle hDevice +) +{ + RsClient *pClient; + NV_STATUS status; + Device *pDevice; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NULL; + + status = deviceGetByHandle(pClient, hDevice, &pDevice); + + return (status == NV_OK) ? pDevice : NULL; +} + +/** + * WARNING: This function is deprecated and use is *strongly* discouraged + * (especially for new code!) + * + * From the function name (CliSetGpuContext) it appears as a simple accessor but + * violates expectations by modifying the SLI BC threadstate (calls to + * GPU_RES_SET_THREAD_BC_STATE). This can be dangerous if not carefully managed + * by the caller. + * + * Instead of using this routine, please use deviceGetByHandle then call + * GPU_RES_GET_GPU, GPU_RES_GET_GPUGRP, GPU_RES_SET_THREAD_BC_STATE as needed. + * + * Note that GPU_RES_GET_GPU supports returning a pGpu for both pDevice, + * pSubdevice, the base pResource type, and any resource that inherits from + * GpuResource. That is, instead of using CliSetGpuContext or + * CliSetSubDeviceContext, please use following pattern to look up the pGpu: + * + * OBJGPU *pGpu = GPU_RES_GET_GPU(pResource or pResourceRef->pResource) + * + * To set the threadstate, please use: + * + * GPU_RES_SET_THREAD_BC_STATE(pResource or pResourceRef->pResource); + */ +NV_STATUS +CliSetGpuContext +( + NvHandle hClient, + NvHandle hDevice, + OBJGPU **ppGpu, + OBJGPUGRP **ppGpuGrp +) +{ + Device *pDevice; + RsClient *pClient; + NV_STATUS status; + + if (ppGpuGrp != NULL) + *ppGpuGrp = NULL; + + if (ppGpu != NULL) + *ppGpu = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return status; + + status = deviceGetByHandle(pClient, hDevice, &pDevice); + if (status != NV_OK) + return status; + + if (ppGpu != NULL) + *ppGpu = GPU_RES_GET_GPU(pDevice); + + if (ppGpuGrp != NULL) + *ppGpuGrp = GPU_RES_GET_GPUGRP(pDevice); + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + + return NV_OK; +} + +/** + * WARNING: This function is deprecated! Please use gpuGetByRef() + */ +POBJGPU +CliGetGpuFromContext +( + RsResourceRef *pContextRef, + NvBool *pbBroadcast +) +{ + NV_STATUS status; + OBJGPU *pGpu; + + status = gpuGetByRef(pContextRef, pbBroadcast, &pGpu); + + return (status == NV_OK) ? pGpu : NULL; +} + +/** + * WARNING: This function is deprecated! Please use gpuGetByHandle() + */ +POBJGPU +CliGetGpuFromHandle +( + NvHandle hClient, + NvHandle hResource, + NvBool *pbBroadcast +) +{ + RsClient *pClient; + NV_STATUS status; + OBJGPU *pGpu; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NULL; + + status = gpuGetByHandle(pClient, hResource, pbBroadcast, &pGpu); + + return (status == NV_OK) ? pGpu : NULL; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_ctrl.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_ctrl.c new file mode 100644 index 0000000..f9fc8f8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_ctrl.c @@ -0,0 +1,271 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This module contains the gpu control interfaces for the + * device (NV01_DEVICE_0) class. Device-level control calls + * are broadcasted to all GPUs within the device. + */ + +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "core/system.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" + + + +// +// This rmctrl MUST NOT touch hw since it's tagged as NO_GPUS_ACCESS in ctrl0080.def +// RM allow this type of rmctrl to go through when GPU is not available. +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +deviceCtrlCmdGpuGetClasslist_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *pClassListParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpuGetClassList(pGpu, &pClassListParams->numClasses, + NvP64_VALUE(pClassListParams->classList), ENG_INVALID); +} + +// +// This rmctrl MUST NOT touch hw since it's tagged with flag NO_GPUS_ACCESS in device.h +// RM allow this type of rmctrl to go through when GPU is not available. +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +deviceCtrlCmdGpuGetClasslistV2_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *pClassListParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pClassListParams->numClasses = NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE; + + return gpuGetClassList(pGpu, &pClassListParams->numClasses, + pClassListParams->classList, ENG_INVALID); +} + +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +deviceCtrlCmdGpuGetNumSubdevices_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *pSubDeviceCountParams +) +{ + pSubDeviceCountParams->numSubDevices = 1; + + return NV_OK; +} + +NV_STATUS +deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 gpuMask, index; + NvBool bEnable; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + OBJGPU *pTmpGpu; + + if (NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED == + pParams->newState) + { + bEnable = NV_TRUE; + } + else if (NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED == + pParams->newState) + { + bEnable = NV_FALSE; + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Get the gpuMask for the device pGpu belongs to + gpuMask = gpumgrGetGpuMask(pGpu); + + index = 0; + while ((pTmpGpu = gpumgrGetNextGpu(gpuMask, &index)) != NULL) + { + if (bEnable) + { + pGpuMgr->persistentSwStateGpuMask |= NVBIT(pTmpGpu->gpuInstance); + pTmpGpu->setProperty(pTmpGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE, + NV_TRUE); + } + else + { + pGpuMgr->persistentSwStateGpuMask &= ~NVBIT(pTmpGpu->gpuInstance); + pTmpGpu->setProperty(pTmpGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE, + NV_FALSE); + } + + // Set/Clear OS-specific persistence flags + osModifyGpuSwStatePersistence(pTmpGpu->pOsGpuInfo, bEnable); + } + + return NV_OK; +} + +NV_STATUS +deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE)) + { + pParams->swStatePersistence = + NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED; + } + else + { + pParams->swStatePersistence = + NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED; + } + + return NV_OK; +} + +/*! + * @brief This Command is used to get the virtualization mode of GPU. GPU + * can be in NMOS/VGX/host-vGPU/host-vSGA mode. + * + * @return Returns NV_STATUS + * NV_OK If GPU is present. + * NV_ERR_INVALID_ARGUMENT If GPU is not present. + */ +NV_STATUS +deviceCtrlCmdGpuGetVirtualizationMode_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (IS_VIRTUAL(pGpu)) + { + pParams->virtualizationMode = + NV0080_CTRL_GPU_VIRTUALIZATION_MODE_VGX; + } + else if (IS_PASSTHRU(pGpu)) + { + pParams->virtualizationMode = + NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NMOS; + } + else + { + pParams->virtualizationMode = + NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NONE; + } + + NV_PRINTF(LEVEL_INFO, "Virtualization Mode: %x\n", + pParams->virtualizationMode); + + return NV_OK; +} + +/*! + * @brief This command is used to find a subdevice handle by subdeviceinst + */ +NV_STATUS +deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *pParams +) +{ + NV_STATUS status; + Subdevice *pSubdevice; + + status = subdeviceGetByInstance(RES_GET_CLIENT(pDevice), + RES_GET_HANDLE(pDevice), + pParams->subDeviceInst, + &pSubdevice); + + if (status == NV_OK) + { + pParams->hSubDevice = RES_GET_HANDLE(pSubdevice); + } + + return status; +} + +/* + * @brief Request per-VF BAR1 resizing and, subsequently, the number + * of VFs that can be created. The request will take a per-VF + * BAR1 size in MB and calculate the number of possible VFs + * + * @param[in] pParams NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS + * pointer detailing the per-VF BAR1 size and + * number of VFs + */ + +NV_STATUS +deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + return gpuSetVFBarSizes_HAL(pGpu, pParams); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_share.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_share.c new file mode 100644 index 0000000..4271934 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_share.c @@ -0,0 +1,310 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "os/os.h" +#include "mem_mgr/virt_mem_mgr.h" +#include "mem_mgr/vaspace.h" +#include "core/system.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu_mgr/gpu_group.h" +#include "class/cl00f2.h" // IO_VASPACE_A +#include "rmapi/rs_utils.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "gpu_mgr/gpu_mgr.h" + +/*! + * @brief Save client share allocation information for this device + * + * Save client share allocation information for this device. The + * client share is actually allocated as a result of CliGetVASpace() + * before the VAShare is actually used. + * + * @param[in] pDevice + * @param[in] hClientShare RM client specified share handle + * @param[in] deviceAllocFlags Allocation flags from RM client + * + * @returns NV_STATUS + */ +NV_STATUS +deviceSetClientShare_IMPL +( + Device *pDevice, + NvHandle hClientShare, + NvU64 vaSize, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + NvU32 deviceAllocFlags +) +{ + pDevice->pVASpace = NULL; + pDevice->hClientShare = hClientShare; + pDevice->deviceAllocFlags = deviceAllocFlags; + pDevice->deviceInternalAllocFlags = 0; + pDevice->vaSize = vaSize; + + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS) + { + pDevice->vaStartInternal = vaStartInternal; + pDevice->vaLimitInternal = vaLimitInternal; + } + + if ((deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SIZE) && (vaSize == 0)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +/*! + * @brief Initialize the device VASPACE + */ +static NV_STATUS +deviceInitClientShare +( + Device *pDevice, + NvHandle hClientShare, + NvU64 vaSize, + NvU32 deviceAllocFlags, + NvU32 deviceAllocInternalFlags +) +{ + Device *pShareDevice; + RsClient *pClientShare; + OBJVASPACE *pVAS = NULL; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + NV_STATUS status; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NvU32 gpuMask = gpumgrGetGpuMask(pGpu); + NvU32 vaspaceClass = 0; + + pDevice->pVASpace = NULL; + + // Set broadcast state for thread + GPU_RES_SET_THREAD_BC_STATE(pDevice); + + // + // Share "default" behavior is defined by "share w/null", which + // attaches to the global address space. + // + if (hClientShare == NV01_NULL_OBJECT) + { + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + status = gpugrpGetGlobalVASpace(pGpuGrp, &pVAS); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + vaspaceIncRefCnt(pVAS); + status = NV_OK; + } + + // + // "Force a new share" behavior is defined by "share w/myself" + // + else if (hClientShare == RES_GET_CLIENT_HANDLE(pDevice)) + { + NvU32 flags = VASPACE_FLAGS_DEFAULT_PARAMS; + NvU64 vaLimit; + + flags |= (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SHARED_MANAGEMENT) ? + VASPACE_FLAGS_SHARED_MANAGEMENT : 0; + + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_MINIMIZE_PTETABLE_SIZE) + { + flags |= VASPACE_FLAGS_MINIMIZE_PTETABLE_SIZE; + } + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_RETRY_PTE_ALLOC_IN_SYS) + { + flags |= VASPACE_FLAGS_RETRY_PTE_ALLOC_IN_SYS; + } + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SIZE) + { + vaLimit = pDevice->vaSize - 1; + } + else + { + flags |= VASPACE_FLAGS_DEFAULT_SIZE; // only needed for Tesla + vaLimit = 0; + } + + if ( (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_64k) && + (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_128k) ) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_64k) + { + flags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _DEFAULT); + } + else if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_128k) + { + flags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _DEFAULT); + } + else + { + // will cause it to use the default size + flags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _DEFAULT); + } + + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS) + { + flags |= VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS; + NV_ASSERT(pDevice->vaStartInternal); + NV_ASSERT(pDevice->vaLimitInternal); + } + else + { + NV_ASSERT(!pDevice->vaStartInternal); + NV_ASSERT(!pDevice->vaLimitInternal); + } + + // + // NV_DEVICE_ALLOCATION_FLAGS_VASPACE_IS_MIRRORED will be removed once CUDA phases out + // and uses the ctrl call NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE + // to set privileged address space + // + if ((deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_IS_MIRRORED) + ) + { + flags |= VASPACE_FLAGS_SET_MIRRORED; + } + if (NULL == GPU_GET_KERNEL_GMMU(pGpu) && (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY) || IsDFPGA(pGpu))) + vaspaceClass = IO_VASPACE_A; + else if (vaspaceClass == 0) + { + NV_ASSERT(0); + return NV_ERR_OBJECT_NOT_FOUND; + } + + flags |= VASPACE_FLAGS_ENABLE_VMM; + + // + // Page tables are allocated in guest subheap only inside non SRIOV guests + // and on host RM. + // + if (!gpuIsSplitVasManagementServerClientRmEnabled(pGpu) || + !IS_VIRTUAL(pGpu)) + { + flags |= VASPACE_FLAGS_ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR; + } + + status = vmmCreateVaspace(pVmm, vaspaceClass, 0, gpuMask, 0, + vaLimit, pDevice->vaStartInternal, + pDevice->vaLimitInternal, NULL, flags, &pVAS); + if (NV_OK != status) + { + NV_ASSERT(0); + return status; + } + } + + // + // Try to attach to another clients VA Share. Validate client and pull the + // share information off the first device. + // + else + { + status = serverGetClientUnderLock(&g_resServ, hClientShare, &pClientShare); + if (status != NV_OK) + return status; + + // + // If the share client doesn't have a device allocated for this GPU, + // there's no address space to share. + // + status = deviceGetByInstance(pClientShare, pDevice->deviceInst, &pShareDevice); + if (status != NV_OK) + return status; + + // Init target share if needed + if (pShareDevice->pVASpace == NULL) + { + status = deviceInitClientShare(pShareDevice, + pShareDevice->hClientShare, + pShareDevice->vaSize, + pShareDevice->deviceAllocFlags, + pShareDevice->deviceInternalAllocFlags); + if (status != NV_OK) + return status; + } + + pVAS = pShareDevice->pVASpace; + vaspaceIncRefCnt(pVAS); + } + + pDevice->pVASpace = pVAS; + return status; +} + + +/*! + * @brief Detach this pDevice from the share group + */ +void +deviceRemoveFromClientShare_IMPL +( + Device *pDevice +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + + if (pDevice->pVASpace != NULL) + { + vmmDestroyVaspace(pVmm, pDevice->pVASpace); + pDevice->pVASpace = NULL; + } +} + +NV_STATUS +deviceGetDefaultVASpace_IMPL +( + Device *pDevice, + OBJVASPACE **ppVAS +) +{ + NV_STATUS status = NV_OK; + + // + // There are some cases in SLI transitions where we allocate + // a device before the hal is initialized. + // + if (pDevice->pVASpace == NULL) + { + status = deviceInitClientShare(pDevice, + pDevice->hClientShare, + pDevice->vaSize, + pDevice->deviceAllocFlags, + pDevice->deviceInternalAllocFlags); + } + + *ppVAS = pDevice->pVASpace; + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c new file mode 100644 index 0000000..c265625 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c @@ -0,0 +1,279 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel Display Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" + +#include "disp/v03_00/dev_disp.h" + +#include "class/clc371.h" +#include "class/clc373.h" + +NV_STATUS +kdispGetChannelNum_v03_00 +( + KernelDisplay *pKernelDisplay, + DISPCHNCLASS channelClass, + NvU32 channelInstance, + NvU32 *pChannelNum +) +{ + NV_STATUS status = NV_ERR_INVALID_ARGUMENT; + + if (pChannelNum == NULL) + return NV_ERR_INVALID_ARGUMENT; + + const KernelDisplayStaticInfo *pStaticInfo = pKernelDisplay->pStaticInfo; + NV_ASSERT_OR_RETURN(pStaticInfo != NULL, NV_ERR_INVALID_STATE); + + switch (channelClass) + { + case dispChnClass_Curs: + if ((channelInstance < NV_PDISP_CHN_NUM_CURS__SIZE_1) && + (channelInstance < NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS__SIZE_1)) + { + if (FLD_IDX_TEST_DRF(_PDISP, _FE_HW_SYS_CAP, _HEAD_EXISTS, channelInstance, _YES, pStaticInfo->feHwSysCap)) + { + *pChannelNum = NV_PDISP_CHN_NUM_CURS(channelInstance); + status = NV_OK; + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + } + break; + + case dispChnClass_Winim: + if (channelInstance < NV_PDISP_CHN_NUM_WINIM__SIZE_1) + { + if (pStaticInfo->windowPresentMask & NVBIT32(channelInstance)) + { + *pChannelNum = NV_PDISP_CHN_NUM_WINIM(channelInstance); + status = NV_OK; + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + } + break; + + case dispChnClass_Core: + *pChannelNum = NV_PDISP_CHN_NUM_CORE; + status = NV_OK; + break; + + case dispChnClass_Win: + if (channelInstance < NV_PDISP_CHN_NUM_WIN__SIZE_1) + { + if (pStaticInfo->windowPresentMask & NVBIT32(channelInstance)) + { + *pChannelNum = NV_PDISP_CHN_NUM_WIN(channelInstance); + status = NV_OK; + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + } + break; + + case dispChnClass_Any: + // Assert incase of physical RM, Any channel is kernel only channel. + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_INVALID_CHANNEL); + *pChannelNum = NV_PDISP_CHN_NUM_ANY; + status = NV_OK; + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Unknown channel class %x\n", channelClass); + status = NV_ERR_INVALID_CHANNEL; + DBG_BREAKPOINT(); + break; + } + + return status; +} + +/*! + * @brief Get the register base address for display capabilities registers + * + * @param pGpu + * @param pKernelDisplay + * @param[out] pOffset NvU32 pointer to return base offset + * @param[out] pSize NvU32 pointer to return size + */ +void +kdispGetDisplayCapsBaseAndSize_v03_00 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 *pOffset, + NvU32 *pSize +) +{ + if (pOffset) + { + // Tegra offsets needs to be subtracted with -0x610000. + *pOffset = DRF_BASE(NV_PDISP_FE_SW) + + kdispGetBaseOffset_HAL(pGpu, pKernelDisplay); + } + + if (pSize) + { + *pSize = sizeof(NvC373DispCapabilities_Map); + } +} + +/*! + * @brief Get the register base address for SF user space. + * + * @param pGpu + * @param pKernelDisplay + * @param[out] pOffset NvU32 pointer to return base offset + * @param[out] pSize NvU32 pointer to return size + */ +void +kdispGetDisplaySfUserBaseAndSize_v03_00 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 *pOffset, + NvU32 *pSize +) +{ + if (pOffset) + { + // Tegra offsets needs to be subtracted with -0x610000. + *pOffset = DRF_BASE(NV_PDISP_SF_USER_0) + + kdispGetBaseOffset_HAL(pGpu, pKernelDisplay); + } + + if (pSize) + { + *pSize = sizeof(NvC371DispSfUserMap); + } +} + +/*! + * @brief Get the register base address and size of channel user area + * + * @param pGpu + * @param pKernelDisplay + * @param[in] channelClass Class of the channel + * @param[in] channelInstance Channel instance # + * @param[out] pOffset User space bease address + * @param[out] pSize User space length (optional) + * + * @return NV_STATUS + */ +NV_STATUS +kdispGetDisplayChannelUserBaseAndSize_v03_00 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + DISPCHNCLASS channelClass, + NvU32 channelInstance, + NvU32 *pOffset, + NvU32 *pSize +) +{ + NvU32 dispChannelNum; + NV_STATUS status; + + if (pOffset == NULL) + return NV_ERR_INVALID_ARGUMENT; + + status = kdispGetChannelNum_HAL(pKernelDisplay, channelClass, channelInstance, &dispChannelNum); + if (status != NV_OK) + return status; + + NV_ASSERT(dispChannelNum < NV_UDISP_FE_CHN_ASSY_BASEADR__SIZE_1); + + *pOffset = NV_UDISP_FE_CHN_ASSY_BASEADR(dispChannelNum); + + // + // The user are size for Core Channel is 64KB (32K for Armed and 32k for Assembly), + // and all other channels are 4KB (2K for Armed and 2k for Assembly). + // + if (pSize != NULL) + { + switch (channelClass) + { + case dispChnClass_Curs: + *pSize = NV_UDISP_FE_CHN_ASSY_BASEADR_CURS(dispChannelNum + 1) - NV_UDISP_FE_CHN_ASSY_BASEADR_CURS(dispChannelNum); + break; + + case dispChnClass_Winim: + *pSize = NV_UDISP_FE_CHN_ASSY_BASEADR_WINIM(dispChannelNum + 1) - NV_UDISP_FE_CHN_ASSY_BASEADR_WINIM(dispChannelNum); + break; + + case dispChnClass_Core: + *pSize = (NV_UDISP_FE_CHN_ARMED_BASEADR_CORE - NV_UDISP_FE_CHN_ASSY_BASEADR_CORE) * 2; + break; + + case dispChnClass_Win: + *pSize = NV_UDISP_FE_CHN_ASSY_BASEADR_WIN(dispChannelNum + 1) - NV_UDISP_FE_CHN_ASSY_BASEADR_WIN(dispChannelNum); + break; + + default: + break; + } + } + + return NV_OK; +} + +/*! + * @brief Validate selected sw class. + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelDisplay KernelDisplay object pointer + * @param[in] swClass Selected class name + */ +NV_STATUS +kdispSelectClass_v03_00_KERNEL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 swClass +) +{ + if (!gpuIsClassSupported(pGpu, swClass)) + { + NV_PRINTF(LEVEL_ERROR, "class %x not supported\n", swClass); + return NV_ERR_INVALID_CLASS; + } + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c new file mode 100644 index 0000000..7ff235e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c @@ -0,0 +1,148 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel Display Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "os/os.h" +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" +#include "disp/v04_02/dev_disp.h" + +/*! + * @brief Return base offset for NV_PDISP that needs to be adjusted + * for register accesses. + */ +NvS32 kdispGetBaseOffset_v04_02 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay +) +{ + return (0x0 - DRF_BASE(NV_PDISP)); +} + +/*! + * @brief Tracks display bandwidth requests and forwards highest request to ICC + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelDisplay KernelDisplay pointer + * @param[in] iccBwClient Identifies requester + * (DISPLAY_ICC_BW_CLIENT_xxx value) + * @param[in] minRequiredIsoBandwidthKBPS ISO BW requested (KB/sec) + * @param[in] minRequiredFloorBandwidthKBPS dramclk freq * pipe width (KB/sec) + * + * @returns NV_OK if successful, + * NV_ERR_INSUFFICIENT_RESOURCES if one of the bandwidth values is too + * high, and bandwidth cannot be allocated, + * NV_ERR_INVALID_PARAMETER if iccBwClient is not a valid value, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * NV_ERR_GENERIC if some other kind of error occurred. + */ +NV_STATUS +kdispArbAndAllocDisplayBandwidth_v04_02 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + enum DISPLAY_ICC_BW_CLIENT iccBwClient, + NvU32 minRequiredIsoBandwidthKBPS, + NvU32 minRequiredFloorBandwidthKBPS +) +{ +typedef struct +{ + NvU32 minRequiredIsoBandwidthKBPS; + NvU32 minRequiredFloorBandwidthKBPS; +} ICC_BW_VALUES; + + static ICC_BW_VALUES clientBwValues[NUM_DISPLAY_ICC_BW_CLIENTS] = {0}; + static ICC_BW_VALUES oldArbBwValues = {0}; + ICC_BW_VALUES newArbBwValues; + NV_STATUS status = NV_OK; + + NV_PRINTF(LEVEL_INFO, "%s requests ISO BW = %u KBPS, floor BW = %u KBPS\n", + (iccBwClient == DISPLAY_ICC_BW_CLIENT_RM) ? "RM" : + (iccBwClient == DISPLAY_ICC_BW_CLIENT_EXT) ? "Ext client" : + "Unknown client", + minRequiredIsoBandwidthKBPS, + minRequiredFloorBandwidthKBPS); + if (iccBwClient >= NUM_DISPLAY_ICC_BW_CLIENTS) + { + NV_PRINTF(LEVEL_ERROR, "Bad iccBwClient value (%u)\n", iccBwClient); + NV_ASSERT(NV_FALSE); + return NV_ERR_INVALID_PARAMETER; + } + if (iccBwClient == DISPLAY_ICC_BW_CLIENT_RM) + { + // + // DD should have allocated the required ISO BW prior to the modeset. + // It is not safe for RM to do the allocation because the allocation + // may fail, but the modeset has already started and cannot be aborted. + // (The only reason RM needs to put its ISO BW request in at all is to + // make sure the required BW is maintained until all of the RM work at + // the end of the modeset is done.) + // + NV_ASSERT(minRequiredIsoBandwidthKBPS <= + clientBwValues[DISPLAY_ICC_BW_CLIENT_EXT].minRequiredIsoBandwidthKBPS); + } + clientBwValues[iccBwClient].minRequiredIsoBandwidthKBPS = minRequiredIsoBandwidthKBPS; + clientBwValues[iccBwClient].minRequiredFloorBandwidthKBPS = minRequiredFloorBandwidthKBPS; + // + // Make sure there are only two BW clients; otherwise, we would need a loop + // to process the array elements. + // + ct_assert(NUM_DISPLAY_ICC_BW_CLIENTS <= 2); + newArbBwValues.minRequiredIsoBandwidthKBPS = + NV_MAX(clientBwValues[DISPLAY_ICC_BW_CLIENT_RM].minRequiredIsoBandwidthKBPS, + clientBwValues[DISPLAY_ICC_BW_CLIENT_EXT].minRequiredIsoBandwidthKBPS); + newArbBwValues.minRequiredFloorBandwidthKBPS = + NV_MAX(clientBwValues[DISPLAY_ICC_BW_CLIENT_RM].minRequiredFloorBandwidthKBPS, + clientBwValues[DISPLAY_ICC_BW_CLIENT_EXT].minRequiredFloorBandwidthKBPS); + if ((oldArbBwValues.minRequiredIsoBandwidthKBPS != + newArbBwValues.minRequiredIsoBandwidthKBPS) || + (oldArbBwValues.minRequiredFloorBandwidthKBPS != + newArbBwValues.minRequiredFloorBandwidthKBPS)) + { + NV_PRINTF(LEVEL_INFO, "Sending request to icc_set_bw: ISO BW = %u KBPS, floor BW = %u KBPS\n", + newArbBwValues.minRequiredIsoBandwidthKBPS, + newArbBwValues.minRequiredFloorBandwidthKBPS); + status = + osTegraAllocateDisplayBandwidth(pGpu->pOsGpuInfo, + newArbBwValues.minRequiredIsoBandwidthKBPS, + newArbBwValues.minRequiredFloorBandwidthKBPS); + NV_PRINTF(LEVEL_INFO, "Allocation request returns: %s (0x%08X)\n", + nvAssertStatusToString(status), status); + if (status == NV_OK) + { + oldArbBwValues = newArbBwValues; + } + } + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c new file mode 100644 index 0000000..41316cf --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c @@ -0,0 +1,85 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispCapabilities class. +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "resserv/resserv.h" + +#include "gpu/gpu.h" +#include "gpu/disp/disp_capabilities.h" +#include "gpu/disp/kern_disp.h" + +NV_STATUS +dispcapConstruct_IMPL +( + DispCapabilities *pDispCapabilities, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispCapabilities); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + // Set display caps RegBase offsets + kdispGetDisplayCapsBaseAndSize_HAL(pGpu, pKernelDisplay, + &pDispCapabilities->ControlOffset, + &pDispCapabilities->ControlLength); + + return NV_OK; +} + +NV_STATUS +dispcapGetRegBaseOffsetAndSize_IMPL +( + DispCapabilities *pDispCapabilities, + OBJGPU *pGpu, + NvU32 *pOffset, + NvU32 *pSize +) +{ + if (pOffset) + { + *pOffset = pDispCapabilities->ControlOffset; + } + if (pSize) + { + *pSize = pDispCapabilities->ControlLength; + } + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_channel.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_channel.c new file mode 100644 index 0000000..e24f074 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_channel.c @@ -0,0 +1,781 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispChannel and its derived classes. +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "resserv/resserv.h" +#include "core/locks.h" +#include "rmapi/rs_utils.h" + +#include "gpu/device/device.h" +#include "gpu/gpu_resource.h" +#include "gpu/disp/disp_channel.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/mem_mgr/context_dma.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "vgpu/rpc.h" + +static void +dispchnParseAllocParams +( + DispChannel *pDispChannel, + void *pAllocParams, + NvU32 *pChannelInstance, + NvHandle *pHObjectBuffer, + NvU32 *pInitialGetPutOffset, + NvBool *pAllowGrabWithinSameClient, + NvBool *pConnectPbAtGrab +) +{ + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *pDmaChannelAllocParams = NULL; + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *pPioChannelAllocParams = NULL; + + *pAllowGrabWithinSameClient = NV_FALSE; + *pConnectPbAtGrab = NV_FALSE; + + if (pDispChannel->bIsDma) + { + pDmaChannelAllocParams = pAllocParams; + *pChannelInstance = pDmaChannelAllocParams->channelInstance; + *pHObjectBuffer = pDmaChannelAllocParams->hObjectBuffer; + *pInitialGetPutOffset = pDmaChannelAllocParams->offset; + + if (FLD_TEST_DRF(50VAIO_CHANNELDMA_ALLOCATION, _FLAGS, + _CONNECT_PB_AT_GRAB, _YES, + pDmaChannelAllocParams->flags)) + { + *pConnectPbAtGrab = NV_TRUE; + } + + if (pDmaChannelAllocParams->hObjectNotify != 0) + { + NV_PRINTF(LEVEL_WARNING, "Error notifier parameter is not used in Display channel allocation.\n"); + } + } + else + { + pPioChannelAllocParams = pAllocParams; + *pChannelInstance = pPioChannelAllocParams->channelInstance; + *pHObjectBuffer = 0; // No one should look at this. So, 0 should be fine. + *pInitialGetPutOffset = 0; // No one should look at this. So, 0 should be fine. + + if (pPioChannelAllocParams->hObjectNotify != 0) + { + NV_PRINTF(LEVEL_WARNING, "Error notifier parameter is not used in Display channel allocation.\n"); + } + } +} + +NV_STATUS +dispchnConstruct_IMPL +( + DispChannel *pDispChannel, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvU32 isDma +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NV_STATUS rmStatus = NV_OK; + NvU32 channelInstance; + NvHandle hObjectBuffer; + NvBool bIsDma = !!isDma; + NvU32 initialGetPutOffset; + NvBool allowGrabWithinSameClient; + NvBool connectPbAtGrab; + DISPCHNCLASS internalDispChnClass; + void *pAllocParams = pParams->pAllocParams; + RsResourceRef *pParentRef = RES_GET_REF(pDispChannel)->pParentRef; + DispObject *pDispObject = dynamicCast(pParentRef->pResource, DispObject); + ContextDma *pBufferContextDma = NULL; + NvU32 hClass = RES_GET_EXT_CLASS_ID(pDispChannel); + + NV_ASSERT_OR_RETURN(pDispObject, NV_ERR_INVALID_OBJECT_HANDLE); + + if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // + // Make sure this channel class is supported on this chip. + // Need to have the check below since, the switch in RmAlloc + // doesn't tell if the current chip supports the class + // + if (!gpuIsClassSupported(pGpu, RES_GET_EXT_CLASS_ID(pDispChannel))) + { + NV_PRINTF(LEVEL_ERROR, "Unsupported class in\n"); + return NV_ERR_INVALID_CLASS; + } + + // Move params into RM's address space + pDispChannel->pDispObject = pDispObject; + pDispChannel->bIsDma = bIsDma; + dispchnParseAllocParams(pDispChannel, pAllocParams, + &channelInstance, + &hObjectBuffer, + &initialGetPutOffset, + &allowGrabWithinSameClient, + &connectPbAtGrab); + + rmStatus = kdispGetIntChnClsForHwCls(pKernelDisplay, + RES_GET_EXT_CLASS_ID(pDispChannel), + &internalDispChnClass); + if (rmStatus != NV_OK) + return rmStatus; + + if (internalDispChnClass == dispChnClass_Any) + { + // + // Any channel is kernel only channel, Physical RM doesn't need ANY channel information. + // return from here as ANY channel is constructed. + // + pDispChannel->DispClass = internalDispChnClass; + pDispChannel->InstanceNumber = channelInstance; + return NV_OK; + } + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + { + rmStatus = kdispSetPushBufferParamsToPhysical_HAL(pGpu, + pKernelDisplay, + pDispChannel, + hObjectBuffer, + pBufferContextDma, + hClass, + channelInstance, + internalDispChnClass); + if (rmStatus != NV_OK) + return rmStatus; + } + SLI_LOOP_END + + // Acquire the underlying HW resources + rmStatus = kdispAcquireDispChannelHw_HAL(pKernelDisplay, + pDispChannel, + channelInstance, + hObjectBuffer, + initialGetPutOffset, + allowGrabWithinSameClient, + connectPbAtGrab); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "disp channel[0x%x] alloc failed. Return status = 0x%x\n", + channelInstance, rmStatus); + + return rmStatus; + } + + // Channel allocation is successful, initialize new channel's data structures + pDispChannel->DispClass = internalDispChnClass; + pDispChannel->InstanceNumber = channelInstance; + dispchnSetRegBaseOffsetAndSize(pDispChannel, pGpu); + + // Map memory for parent GPU + rmStatus = kdispMapDispChannel_HAL(pKernelDisplay, pDispChannel); + + // setup to return pControl to client + if (pDispChannel->bIsDma) + { + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *pDmaChannelAllocParams = pAllocParams; + pDmaChannelAllocParams->pControl = pDispChannel->pControl; + } + else + { + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *pPioChannelAllocParams = pAllocParams; + pPioChannelAllocParams->pControl = pDispChannel->pControl; + } + + return rmStatus; +} + +// +// Performs grab operation for a channel. +// +// Pre-Volta Linux swapgroups is the only remaining use of channel grabbing. +// Bug 2869820 is tracking the transition of swapgroups from requiring this +// RM feature. +// +NV_STATUS +dispchnGrabChannel_IMPL +( + DispChannel *pDispChannel, + NvHandle hClient, + NvHandle hParent, + NvHandle hChannel, + NvU32 hClass, + void *pAllocParams +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NvU32 channelInstance; + NvHandle hObjectBuffer; + NvU32 initialGetPutOffset; + NvBool allowGrabWithinSameClient; + NvBool connectPbAtGrab; + ContextDma *pBufferContextDma = NULL; + DISPCHNCLASS internalDispChnClass; + + if (RES_GET_PARENT_HANDLE(pDispChannel) != hParent) + { + NV_PRINTF(LEVEL_ERROR, + "disp channel grab failed because of bad display parent 0x%x\n", + hParent); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_OBJECT_PARENT; + } + + // Move params into RM's address space + dispchnParseAllocParams(pDispChannel, pAllocParams, + &channelInstance, + &hObjectBuffer, + &initialGetPutOffset, + &allowGrabWithinSameClient, + &connectPbAtGrab); + + // + // The handle already exists in our DB. + // The supplied params must be same as what we already have with us + // + if (RES_GET_EXT_CLASS_ID(pDispChannel) != hClass || + pDispChannel->InstanceNumber != channelInstance) + { + NV_PRINTF(LEVEL_ERROR, + "Information supplied for handle 0x%x doesn't match that in RM's client DB\n", + hChannel); + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + rmStatus = kdispGetIntChnClsForHwCls(pKernelDisplay, + hClass, + &internalDispChnClass); + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + { + rmStatus = kdispSetPushBufferParamsToPhysical_HAL(pGpu, + pKernelDisplay, + pDispChannel, + hObjectBuffer, + pBufferContextDma, + hClass, + channelInstance, + internalDispChnClass); + if (rmStatus != NV_OK) + return rmStatus; + } + SLI_LOOP_END + + // Acquire the underlying HW resources + rmStatus = kdispAcquireDispChannelHw_HAL(pKernelDisplay, + pDispChannel, + channelInstance, + hObjectBuffer, + initialGetPutOffset, + allowGrabWithinSameClient, + connectPbAtGrab); + + // setup to return pControl to client + if (pDispChannel->bIsDma) + { + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *pDmaChannelAllocParams = pAllocParams; + pDmaChannelAllocParams->pControl = pDispChannel->pControl; + } + else + { + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *pPioChannelAllocParams = pAllocParams; + pPioChannelAllocParams->pControl = pDispChannel->pControl; + } + + return rmStatus; +} + +NV_STATUS +dispchnGetRegBaseOffsetAndSize_IMPL +( + DispChannel *pDispChannel, + OBJGPU *pGpu, + NvU32 *pOffset, + NvU32 *pSize +) +{ + if (pOffset) + *pOffset = pDispChannel->ControlOffset; + + if (pSize) + *pSize = pDispChannel->ControlLength; + + return NV_OK; +} + +void +dispchnSetRegBaseOffsetAndSize_IMPL +( + DispChannel *pDispChannel, + OBJGPU *pGpu +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + (void)kdispGetDisplayChannelUserBaseAndSize_HAL(pGpu, pKernelDisplay, + pDispChannel->DispClass, + pDispChannel->InstanceNumber, + &pDispChannel->ControlOffset, + &pDispChannel->ControlLength); + + // Tegra offsets needs to be subtracted with -0x610000. + pDispChannel->ControlOffset += kdispGetBaseOffset_HAL(pGpu, pKernelDisplay); +} + +/*! + * @brief Maps channel user area for parent GPU. + */ +NV_STATUS +kdispMapDispChannel_IMPL +( + KernelDisplay *pKernelDisplay, + DispChannel *pDispChannel +) +{ + NV_STATUS rmStatus; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + RsClient *pClient = RES_GET_CLIENT(pDispChannel); + RmClient *pRmClient = dynamicCast(pClient, RmClient); + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(pRmClient); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + // + // Only need the map for the parent GPU since we require the client to + // use RmMapMemory for subdevice channel mapping. + // + rmStatus = osMapGPU(pGpu, privLevel, + pDispChannel->ControlOffset, + pDispChannel->ControlLength, + NV_PROTECT_READ_WRITE, + &pDispChannel->pControl, + &pDispChannel->pPriv); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "disp channel[0x%x] mapping failed. Return status = 0x%x\n", + pDispChannel->InstanceNumber, rmStatus); + + (void) pRmApi->Free(pRmApi, + RES_GET_CLIENT_HANDLE(pDispChannel), + RES_GET_HANDLE(pDispChannel)); + + return rmStatus; + } + + return NV_OK; +} + +/*! + * @brief Unbinds Context DMAs and unmaps channel user area for the given channel. + */ +void kdispUnbindUnmapDispChannel_IMPL +( + KernelDisplay *pKernelDisplay, + DispChannel *pDispChannel +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + RsClient *pClient = RES_GET_CLIENT(pDispChannel); + RmClient *pRmClient = dynamicCast(pClient, RmClient); + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(pRmClient); + + // Unbind all ContextDmas from this channel + dispchnUnbindAllCtx(pGpu, pDispChannel); + + // Unmap the channel + osUnmapGPU(pGpu->pOsGpuInfo, privLevel, pDispChannel->pControl, + pDispChannel->ControlLength, pDispChannel->pPriv); +} + +void +dispchnDestruct_IMPL +( + DispChannel *pDispChannel +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + LOCK_METER_DATA(FREE_CHANNEL_DISP, pDispChannel->DispClass, 0, 0); + + // + // Before freeing the CORE channel, make sure all satellite channels are + // torn down. This is currently necessary on UNIX to deal with cases + // where X (i.e. the userspace display driver) terminates before other + // RM clients with satellite channel allocations, e.g. OpenGL clients with + // BASE channel allocations. + // + if ((pDispChannel->DispClass == dispChnClass_Core) && + pKernelDisplay->bWarPurgeSatellitesOnCoreFree) + { + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RS_ITERATOR it; + Device *pDevice; + OBJGPU *pTmpGpu; + DispChannel *pTmpDispChannel; + + NV_ASSERT(gpuIsGpuFullPower(pGpu)); + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + + while (clientRefIterNext(it.pClient, &it)) + { + RS_ITERATOR dispIt; + RsResourceRef *pResourceRef; + DispObject *pDispObject; + + pDevice = dynamicCast(it.pResourceRef->pResource, Device); + + pTmpGpu = GPU_RES_GET_GPU(pDevice); + if (pTmpGpu != pGpu) + continue; + + rmStatus = dispobjGetByDevice(pRsClient, pDevice, &pDispObject); + if (rmStatus != NV_OK) + continue; + + pResourceRef = RES_GET_REF(pDispObject); + + dispIt = clientRefIter(pRsClient, pResourceRef, classId(DispChannel), RS_ITERATE_CHILDREN, NV_FALSE); + + while (clientRefIterNext(dispIt.pClient, &dispIt)) + { + pTmpDispChannel = dynamicCast(dispIt.pResourceRef->pResource, DispChannel); + + if (pTmpDispChannel->DispClass != dispChnClass_Core) + { + rmStatus = pRmApi->Free(pRmApi, + RES_GET_CLIENT_HANDLE(pTmpDispChannel), + RES_GET_HANDLE(pTmpDispChannel)); + + if (rmStatus == NV_OK) + { + // Client's resource map has been modified, re-snap iterators + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + dispIt = clientRefIter(pRsClient, it.pResourceRef, classId(DispChannel), RS_ITERATE_DESCENDANTS, NV_FALSE); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Failed to free satellite DispChannel 0x%x!\n", + RES_GET_HANDLE(pTmpDispChannel)); + } + } + } + } + } + } + + // + // Unbind all context dmas bound to this channel, unmap the channel and + // finally release HW resources. + // + kdispUnbindUnmapDispChannel_HAL(pKernelDisplay, pDispChannel); + rmStatus = kdispReleaseDispChannelHw_HAL(pKernelDisplay, pDispChannel); + + if (rmStatus != NV_OK) + { + // Try to avoid returning error codes on free under new resource server design + NV_ASSERT(0); + } +} + +NV_STATUS +dispchnpioConstruct_IMPL +( + DispChannelPio *pDispChannelPio, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +dispchndmaConstruct_IMPL +( + DispChannelDma *pDispChannelDma, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +dispchnGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hDisplayChannel, + DispChannel **ppDispChannel +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppDispChannel = NULL; + + status = clientGetResourceRef(pClient, hDisplayChannel, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDispChannel = dynamicCast(pResourceRef->pResource, DispChannel); + + return (*ppDispChannel) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +// +// Bind the DMA context to a display channel +// +NV_STATUS +dispchnBindCtx_IMPL +( + OBJGPU *pGpu, + ContextDma *pContextDma, + NvHandle hChannel +) +{ + RsClient *pClient = RES_GET_CLIENT(pContextDma); + DispChannel *pDispChannel = NULL; + NV_STATUS rmStatus = NV_OK; + KernelDisplay *pKernelDisplay; + DisplayInstanceMemory *pInstMem; + + // Look-up channel + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + dispchnGetByHandle(pClient, hChannel, &pDispChannel)); + + // Ensure ContextDma and DisplayChannel are on the save device + NV_CHECK_OR_RETURN(LEVEL_ERROR, pContextDma->pDevice == GPU_RES_GET_DEVICE(pDispChannel), + NV_ERR_INVALID_DEVICE); + + // + // Enforce alignment requirements + // ISO ctx dmas need to be a multiple of 256B and 256B aligned + // NISO ctx dmas need to be a multiple of 4K and 4K aligned + // We can only ensure common minimum -- 4K alignment and 4K size + // Limit alignment is handled by rounding up in lower-level code. + // This will be in hw in future. + // + if (pContextDma->pMemDesc->PteAdjust != 0) + { + NV_PRINTF(LEVEL_ERROR, + "ISO ctx dmas must be 4K aligned. PteAdjust = 0x%x\n", + pContextDma->pMemDesc->PteAdjust); + return NV_ERR_INVALID_OFFSET; + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay); + + rmStatus = instmemBindContextDma(pGpu, pInstMem, pContextDma, pDispChannel); + if (rmStatus != NV_OK) + { + SLI_LOOP_RETURN(rmStatus); + } + + SLI_LOOP_END + + return NV_OK; +} + +NV_STATUS +dispchnUnbindCtx_IMPL +( + OBJGPU *pGpu, + ContextDma *pContextDma, + NvHandle hChannel +) +{ + RsClient *pClient = RES_GET_CLIENT(pContextDma); + DispChannel *pDispChannel = NULL; + NV_STATUS rmStatus = NV_OK; + KernelDisplay *pKernelDisplay; + DisplayInstanceMemory *pInstMem; + NvBool bFound = NV_FALSE; + + // Look-up channel given by client + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + dispchnGetByHandle(pClient, hChannel, &pDispChannel)); + + // Ensure ContextDma and DisplayChannel are on the save device + NV_CHECK_OR_RETURN(LEVEL_ERROR, pContextDma->pDevice == GPU_RES_GET_DEVICE(pDispChannel), + NV_ERR_INVALID_DEVICE); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay); + + rmStatus = instmemUnbindContextDma(pGpu, pInstMem, pContextDma, pDispChannel); + if (rmStatus == NV_OK) + { + bFound = NV_TRUE; + } + + SLI_LOOP_END + + return bFound ? NV_OK : NV_ERR_INVALID_STATE; +} + +/*! + * @brief Unbind all ContextDmas from the given channel + */ +void +dispchnUnbindAllCtx_IMPL +( + OBJGPU *pGpu, + DispChannel *pDispChannel +) +{ + KernelDisplay *pKernelDisplay; + DisplayInstanceMemory *pInstMem; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay); + + instmemUnbindDispChannelContextDmas(pGpu, pInstMem, pDispChannel); + + SLI_LOOP_END +} + +/*! + * @brief Unbind ContextDma from all display channels + */ +void +dispchnUnbindCtxFromAllChannels_IMPL +( + OBJGPU *pGpu, + ContextDma *pContextDma +) +{ + KernelDisplay *pKernelDisplay; + DisplayInstanceMemory *pInstMem; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay); + + instmemUnbindContextDmaFromAllChannels(pGpu, pInstMem, pContextDma); + + SLI_LOOP_END +} + +NV_STATUS +kdispSetPushBufferParamsToPhysical_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + DispChannel *pDispChannel, + NvHandle hObjectBuffer, + ContextDma *pBufferContextDma, + NvU32 hClass, + NvU32 channelInstance, + DISPCHNCLASS internalDispChnClass +) +{ + RsClient *pClient = RES_GET_CLIENT(pDispChannel); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS rmStatus = NV_OK; + NvU32 dispChannelNum; + NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS pushBufferParams = {0}; + + rmStatus = kdispGetChannelNum_HAL(pKernelDisplay, internalDispChnClass, channelInstance, &dispChannelNum); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + pushBufferParams.hclass = hClass; + pushBufferParams.channelInstance = channelInstance; + + if (pDispChannel->bIsDma) + { + rmStatus = ctxdmaGetByHandle(pClient, hObjectBuffer, &pBufferContextDma); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "disp channel[0x%x] didn't have valid ctxdma 0x%x\n", + channelInstance, hObjectBuffer); + return rmStatus; + } + + pushBufferParams.limit = pBufferContextDma->Limit; + pushBufferParams.addressSpace = memdescGetAddressSpace(pBufferContextDma->pMemDesc); + if ((pushBufferParams.addressSpace != ADDR_SYSMEM) && (pushBufferParams.addressSpace != ADDR_FBMEM)) + { + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + // Generate PUSHBUFFER_ADDR. Shift the addr to get the size in 4KB + pushBufferParams.physicalAddr = memdescGetPhysAddr(memdescGetMemDescFromGpu(pBufferContextDma->pMemDesc, pGpu), AT_GPU, 0); + pushBufferParams.cacheSnoop= pBufferContextDma->CacheSnoop; + pushBufferParams.valid = NV_TRUE; + } + else + { + pushBufferParams.valid = NV_FALSE; + } + + pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER, + &pushBufferParams, sizeof(pushBufferParams)); + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c new file mode 100644 index 0000000..92a8bd6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c @@ -0,0 +1,210 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file disp_common_kern_ctrl_minimal.c implements rmctrls which + * (a) are declared in disp_common_ctrl_minimal.h; i.e. + * (i) are dispcmnCtrlCmd* functions + * (ii) which are used by Tegra SOC NVDisplay and/or OS layer; and + * (b) are implemented in Kernel RM. + */ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "os/os.h" +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/disp_objs.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi.h" + +NV_STATUS +dispcmnCtrlCmdSystemGetHotplugUnplugState_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams +) +{ + NvHandle hDevice = RES_GET_PARENT_HANDLE(pDispCommon); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(DISPAPI_GET_GPU(pDispCommon)); + NvU32 hotPlugMask = 0; + NvU32 hotUnplugMask = 0; + NV_STATUS status; + + status = pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pDispCommon), + RES_GET_HANDLE(pDispCommon), + NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE, + pHotplugParams, + sizeof(*pHotplugParams)); + + hotPlugMask = pHotplugParams->hotPlugMask; + hotUnplugMask = pHotplugParams->hotUnplugMask; + pHotplugParams->hotPlugMask = 0; + pHotplugParams->hotUnplugMask = 0; + + if (status != NV_OK) + { + return status; + } + + if ((hotPlugMask != 0) || (hotUnplugMask != 0)) + { + RmClient **ppClient; + RsClient *pRsClient; + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pRsClient = staticCast(*ppClient, RsClient); + DispCommon *pDispCommonLoop; + + dispcmnGetByDevice(pRsClient, hDevice, &pDispCommonLoop); + if (pDispCommonLoop == NULL) + continue; + + pDispCommonLoop->hotPlugMaskToBeReported |= hotPlugMask & (~(pDispCommonLoop->hotPlugMaskToBeReported & hotUnplugMask)); + pDispCommonLoop->hotUnplugMaskToBeReported |= hotUnplugMask & (~(pDispCommonLoop->hotUnplugMaskToBeReported & hotPlugMask)); + } + } + + pHotplugParams->hotPlugMask = pDispCommon->hotPlugMaskToBeReported; + pHotplugParams->hotUnplugMask = pDispCommon->hotUnplugMaskToBeReported; + pDispCommon->hotPlugMaskToBeReported = 0; + pDispCommon->hotUnplugMaskToBeReported = 0; + + return status; +} + +/*! + * @brief Allocate display bandwidth. + */ +NV_STATUS +dispcmnCtrlCmdSystemAllocateDisplayBandwidth_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *pParams +) +{ + OBJGPU *pGpu; + KernelDisplay *pKernelDisplay; + NV_STATUS status; + + // client gave us a subdevice #: get right pGpu for it + status = dispapiSetUnicastAndSynchronize_HAL( + staticCast(pDispCommon, DisplayApi), + DISPAPI_GET_GPUGRP(pDispCommon), + &pGpu, + pParams->subDeviceInstance); + if (status != NV_OK) + { + return status; + } + + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + return kdispArbAndAllocDisplayBandwidth_HAL(pGpu, + pKernelDisplay, + DISPLAY_ICC_BW_CLIENT_EXT, + pParams->averageBandwidthKBPS, + pParams->floorBandwidthKBPS); +} + +NV_STATUS +dispcmnCtrlCmdDpGenerateFakeInterrupt_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS *pParams +) +{ + OBJGPU *pGpu = DISPAPI_GET_GPU(pDispCommon); + NvU32 displayId = pParams->displayId; + NvU32 interruptType = pParams->interruptType; + NV_STATUS status = NV_OK; + + // get target pGpu + status = dispapiSetUnicastAndSynchronize_HAL( + staticCast(pDispCommon, DisplayApi), + DISPAPI_GET_GPUGRP(pDispCommon), + &pGpu, + pParams->subDeviceInstance); + if (status != NV_OK) + { + return status; + } + + NV_ASSERT_OR_RETURN(pParams->displayId, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pGpu, NV_ERR_INVALID_ARGUMENT); + + // Send a DP IRQ (short pulse) to a registered client. + if (interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_IRQ) + { + Nv2080DpIrqNotification params = {0}; + params.displayId = displayId; + + // Check eDP power state; if off, return an error. + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV0073_CTRL_DP_GET_EDP_DATA_PARAMS edpData; + + portMemSet(&edpData, 0, sizeof(edpData)); + + status = pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pDispCommon), + RES_GET_HANDLE(pDispCommon), + NV0073_CTRL_CMD_DP_GET_EDP_DATA, + &edpData, + sizeof(edpData)); + + if (status == NV_OK && FLD_TEST_DRF(0073_CTRL_DP, _GET_EDP_DATA, _PANEL_POWER, _OFF, edpData.data)) + { + return NV_ERR_GENERIC; + } + + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_DP_IRQ, ¶ms, sizeof(params), 0, 0); + } + else if (interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PLUG || + interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_UNPLUG) + { + Nv2080HotplugNotification hotplugNotificationParams; + portMemSet(&hotplugNotificationParams, 0, sizeof(hotplugNotificationParams)); + + if (interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PLUG) + { + hotplugNotificationParams.plugDisplayMask = displayId; + hotplugNotificationParams.unplugDisplayMask = 0; + } + else if (interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_UNPLUG) + { + hotplugNotificationParams.plugDisplayMask = 0; + hotplugNotificationParams.unplugDisplayMask = displayId; + } + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_HOTPLUG, + &hotplugNotificationParams, sizeof(hotplugNotificationParams), 0, 0); + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c new file mode 100644 index 0000000..393f9eb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/disp_objs.h" +#include "class/cl5070.h" +#include "mem_mgr/mem.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" + +NV_STATUS +dispobjCtrlCmdEventSetTrigger_IMPL +( + DispObject *pDispObject +) +{ + OBJGPU *pGpu = DISPAPI_GET_GPU(pDispObject); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + kdispNotifyEvent(pGpu, pKernelDisplay, NV5070_NOTIFIERS_SW, NULL, 0, 0, 0); + + return NV_OK; +} + +NV_STATUS +dispobjCtrlCmdEventSetMemoryNotifies_IMPL +( + DispObject *pDispObject, + NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *pSetMemoryNotifiesParams +) +{ + OBJGPU *pGpu = DISPAPI_GET_GPU(pDispObject); + DisplayApi *pDisplayApi = staticCast(pDispObject, DisplayApi); + RsClient *pClient = RES_GET_CLIENT(pDispObject); + Memory *pMemory; + NvU32 *pNotifyActions, i; + + // error check subDeviceInstance + if (pSetMemoryNotifiesParams->subDeviceInstance >= gpumgrGetSubDeviceCountFromGpu(pGpu)) + { + NV_PRINTF(LEVEL_INFO, "bad subDeviceInstance 0x%x\n", + pSetMemoryNotifiesParams->subDeviceInstance); + return NV_ERR_INVALID_ARGUMENT; + } + + pNotifyActions = pDisplayApi->pNotifyActions[pSetMemoryNotifiesParams->subDeviceInstance]; + + // ensure there's no pending notifications + for (i = 0; i < pDisplayApi->numNotifiers; i++) + { + if (pNotifyActions[i] != NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + return NV_ERR_STATE_IN_USE; + } + } + + if (pSetMemoryNotifiesParams->hMemory == NV01_NULL_OBJECT) + { + pDisplayApi->hNotifierMemory = pSetMemoryNotifiesParams->hMemory; + pDisplayApi->pNotifierMemory = NULL; + return NV_OK; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memGetByHandle(pClient, pSetMemoryNotifiesParams->hMemory, &pMemory)); + + if (pMemory->pMemDesc->Size < sizeof(NvNotification) * pDisplayApi->numNotifiers) + { + return NV_ERR_INVALID_LIMIT; + } + + pDisplayApi->hNotifierMemory = pSetMemoryNotifiesParams->hMemory; + pDisplayApi->pNotifierMemory = pMemory; + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_objs.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_objs.c new file mode 100644 index 0000000..1be4f5f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_objs.c @@ -0,0 +1,742 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing the display - both Disp and DispCommon +* entries with their insides (DispChannelList and DispDmaControlList) +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "core/locks.h" +#include "resserv/rs_client.h" + +#include "gpu/gpu.h" +#include "gpu/device/device.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/disp/disp_channel.h" +#include "gpu/disp/kern_disp.h" +#include "gpu_mgr/gpu_mgr.h" + +#include "class/cl0073.h" // NV04_DISPLAY_COMMON +#include "class/cl5070.h" // NV50_DISPLAY +#include "class/clc370.h" // NVC370_DISPLAY + +NV_STATUS +dispapiConstruct_IMPL +( + DisplayApi *pDisplayApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + CLASSDESCRIPTOR *pClassDescriptor; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + OBJGPU *pGpu; + KernelDisplay *pKernelDisplay; + NvBool bBcResource; + NvU32 i; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // Use gpuGetByRef instead of GpuResource because it will work even if resource + // isn't a GpuResource. + status = gpuGetByRef(pResourceRef, &bBcResource, &pGpu); + if (status != NV_OK) + return status; + + // Find class in class db (verifies class is valid for this GPU) + status = gpuGetClassByClassId(pGpu, pParams->externalClassId, &pClassDescriptor); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "bad class 0x%x\n", pParams->externalClassId); + return NV_ERR_INVALID_CLASS; + } + + // Check display is enabled (i.e. not displayless) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + if (pKernelDisplay == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + for (i = 0; i < NV2080_MAX_SUBDEVICES; i++) + pDisplayApi->pNotifyActions[i] = NULL; + + pDisplayApi->pGpuInRmctrl = NULL; + pDisplayApi->pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + pDisplayApi->bBcResource = bBcResource; + pDisplayApi->hNotifierMemory = NV01_NULL_OBJECT; + pDisplayApi->pNotifierMemory = NULL; + + gpuSetThreadBcState(pGpu, bBcResource); + + return status; +} + +void +dispapiDestruct_IMPL +( + DisplayApi *pDisplayApi +) +{ + NvU32 i; + + // Free notify actions memory if it's been allocated + for (i = 0; i < NV2080_MAX_SUBDEVICES; i++) + { + portMemFree(pDisplayApi->pNotifyActions[i]); + pDisplayApi->pNotifyActions[i] = NULL; + } +} + +static NV_STATUS +_dispapiNotifierInit +( + DisplayApi *pDisplayApi, + NvU32 numNotifiers, + NvU32 disableCmd +) +{ + NvU32 i, j; + NV_STATUS status = NV_OK; + + pDisplayApi->numNotifiers = numNotifiers; + + for (i = 0; i < NV2080_MAX_SUBDEVICES; i++) + { + // get memory for pNotifyActions table + pDisplayApi->pNotifyActions[i] = portMemAllocNonPaged( + pDisplayApi->numNotifiers * sizeof(NvU32)); + if (pDisplayApi->pNotifyActions[i] != NULL) + { + // default actions for each notifier type is disabled + for (j = 0; j < pDisplayApi->numNotifiers; j++) + { + pDisplayApi->pNotifyActions[i][j] = disableCmd; + } + } + else + { + goto fail; + } + } + + return status; + +fail: + // first release any notifyActions memory + for (i = 0; i < NV2080_MAX_SUBDEVICES; i++) + { + portMemFree(pDisplayApi->pNotifyActions[i]); + pDisplayApi->pNotifyActions[i] = NULL; + } + + return NV_ERR_INSUFFICIENT_RESOURCES; +} + +NV_STATUS +dispobjConstructHal_IMPL +( + DispObject *pDispObject, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + DisplayApi *pDisplayApi = staticCast(pDispObject, DisplayApi); + Device *pDevice = dynamicCast(pCallContext->pResourceRef->pParentRef->pResource, Device); + GpuResource *pGpuResource = staticCast(pDevice, GpuResource); + OBJGPU *pGpu = pGpuResource->pGpu; + NV_STATUS rmStatus = NV_ERR_INVALID_STATE; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + { + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + rmStatus = kdispSelectClass_HAL(pGpu, pKernelDisplay, pCallContext->pResourceRef->externalClassId); + + if (rmStatus != NV_OK) + { + // If the operation fails, it should fail on the first try + NV_ASSERT(gpumgrIsParentGPU(pGpu)); + SLI_LOOP_BREAK; + } + } + SLI_LOOP_END; + + if (rmStatus != NV_OK) + return rmStatus; + + if(dynamicCast(pDisplayApi, NvDispApi)) + { + rmStatus = _dispapiNotifierInit(pDisplayApi, + NVC370_NOTIFIERS_MAXCOUNT, + NVC370_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE); + } + else + { + rmStatus = _dispapiNotifierInit(pDisplayApi, + NV5070_NOTIFIERS_MAXCOUNT, + NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE); + } + + return rmStatus; +} + +NV_STATUS +dispobjConstruct_IMPL +( + DispObject *pDispObject, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + pDispObject->rmFreeFlags = NV5070_CTRL_SET_RMFREE_FLAGS_NONE; + + if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return dispobjConstructHal_HAL(pDispObject, pCallContext, pParams); +} + +NV_STATUS +dispobjGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hDispObject, + DispObject **ppDispObject +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pClient, hDispObject, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDispObject = dynamicCast(pResourceRef->pResource, DispObject); + + return (*ppDispObject) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +dispobjGetByDevice_IMPL +( + RsClient *pClient, + Device *pDevice, + DispObject **ppDispObject +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = refFindChildOfType(RES_GET_REF(pDevice), classId(DispObject), NV_FALSE /*bExactMatch*/, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDispObject = dynamicCast(pResourceRef->pResource, DispObject); + + return (*ppDispObject) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +// +// Most display control calls take a subDeviceInstance argument. +// We need to verify that this argument is valid and then use it to +// locate the correct OBJGPU for the particular subdevice. +// +NV_STATUS +dispapiSetUnicastAndSynchronize_KERNEL +( + DisplayApi *pDisplayApi, + OBJGPUGRP *pGpuGroup, + OBJGPU **ppGpu, + NvU32 subDeviceInstance +) +{ + NV_STATUS nvStatus = NV_OK; + + nvStatus = gpugrpGetGpuFromSubDeviceInstance(pGpuGroup, subDeviceInstance, ppGpu); + if (nvStatus != NV_OK) + return nvStatus; + + gpumgrSetBcEnabledStatus(*ppGpu, NV_FALSE); + + return nvStatus; +} + +NV_STATUS +dispapiControl_Prologue_IMPL +( + DisplayApi *pDisplayApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams +) +{ + NvU32 subdeviceIndex; + NV_STATUS status; + RmResource *pResource = staticCast(pDisplayApi, RmResource); + + if (dynamicCast(pDisplayApi, DispCommon)) + { + Device *pDevice = dynamicCast(pCallContext->pResourceRef->pParentRef->pResource, Device); + GpuResource *pGpuResource = staticCast(pDevice, GpuResource); + + pResource->rpcGpuInstance = gpuGetInstance(pGpuResource->pGpu); + pDisplayApi->pGpuInRmctrl = pGpuResource->pGpu; + return rmresControl_Prologue_IMPL(pResource, pCallContext, pRmCtrlParams); + } + + // Read the subdevice ID out and swap GPU pointer + if (dynamicCast(pDisplayApi, NvDispApi)) + { + NVC370_CTRL_CMD_BASE_PARAMS *pBaseParameters = pRmCtrlParams->pParams; + + // + // All non-NULL disp control 5070 methods have + // NVC370_CTRL_CMD_BASE_PARAMS as their first member. + // + if ((pBaseParameters == NULL) || (pRmCtrlParams->paramsSize < sizeof(NVC370_CTRL_CMD_BASE_PARAMS))) + { + status = NV_ERR_INVALID_PARAM_STRUCT; + goto done; + } + subdeviceIndex = pBaseParameters->subdeviceIndex; + } + else if (dynamicCast(pDisplayApi, DispSwObj)) + { + NVC372_CTRL_CMD_BASE_PARAMS *pBaseParameters = pRmCtrlParams->pParams; + + // + // All non-NULL disp control C372 methods have + // NVC372_CTRL_CMD_BASE_PARAMS as their first member. + // + if ((pBaseParameters == NULL) || (pRmCtrlParams->paramsSize < sizeof(NVC372_CTRL_CMD_BASE_PARAMS))) + { + status = NV_ERR_INVALID_PARAM_STRUCT; + goto done; + } + subdeviceIndex = pBaseParameters->subdeviceIndex; + } + else + { + NV5070_CTRL_CMD_BASE_PARAMS *pBaseParameters = pRmCtrlParams->pParams; + + // + // All non-NULL disp control 5070 methods have + // NV5070_CTRL_CMD_BASE_PARAMS as their first member. + // + if ((pBaseParameters == NULL) || (pRmCtrlParams->paramsSize < sizeof(NV5070_CTRL_CMD_BASE_PARAMS))) + { + status = NV_ERR_INVALID_PARAM_STRUCT; + goto done; + } + subdeviceIndex = pBaseParameters->subdeviceIndex; + } + + status = dispapiSetUnicastAndSynchronize_HAL(pDisplayApi, + pRmCtrlParams->pGpuGrp, + &pRmCtrlParams->pGpu, + subdeviceIndex); + + if (status == NV_OK) + { + pResource->rpcGpuInstance = gpuGetInstance(pRmCtrlParams->pGpu); + pDisplayApi->pGpuInRmctrl = pRmCtrlParams->pGpu; + return rmresControl_Prologue_IMPL(pResource, pCallContext, pRmCtrlParams); + } + +done: + return status; +} + +void +dispapiControl_Epilogue_IMPL +( + DisplayApi *pDisplayApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams +) +{ + if (dynamicCast(pDisplayApi, DispCommon) == NULL) + { + RmResource *pResource = staticCast(pDisplayApi, RmResource); + pResource->rpcGpuInstance = ~0; + } + + pDisplayApi->pGpuInRmctrl = NULL; +} + +NV_STATUS +dispapiControl_IMPL +( + DisplayApi *pDisplayApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + Device *pDevice = dynamicCast(pCallContext->pResourceRef->pParentRef->pResource, Device); + GpuResource *pGpuResource = staticCast(pDevice, GpuResource); + RmCtrlParams *pRmCtrlParams = pParams->pLegacyParams; + OBJGPU *pGpu = pGpuResource->pGpu; + + NV_PRINTF(LEVEL_INFO, "class: 0x%x cmd 0x%x\n", + RES_GET_EXT_CLASS_ID(pDisplayApi), + pRmCtrlParams->cmd); + + pRmCtrlParams->pGpu = pGpu; + pRmCtrlParams->pGpuGrp = pGpuResource->pGpuGrp; + + gpuSetThreadBcState(pGpu, NV_TRUE); + + status = resControl_IMPL(staticCast(pDisplayApi, RsResource), + pCallContext, pParams); + + return status; +} + +NV_STATUS +dispswobjConstruct_IMPL +( + DispSwObj *pDispSwObj, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + return NV_OK; +} + +NV_STATUS +dispcmnConstruct_IMPL +( + DispCommon *pDispCommon, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + DisplayApi *pDisplayApi = staticCast(pDispCommon, DisplayApi); + + // + // Not adding the priv-level check for this class + // as it is being used by OpenGL from userspace.Once the Cleanup is done from the OpenGL + // we can add the priv level check here below + // + + pDispCommon->hotPlugMaskToBeReported = 0; + pDispCommon->hotUnplugMaskToBeReported = 0; + + return _dispapiNotifierInit(pDisplayApi, + NV0073_NOTIFIERS_MAXCOUNT, + NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE); +} + +NV_STATUS +dispcmnGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hDispCommon, + DispCommon **ppDispCommon +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pClient, hDispCommon, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDispCommon = dynamicCast(pResourceRef->pResource, DispCommon); + + return (*ppDispCommon) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +void +dispcmnGetByDevice_IMPL +( + RsClient *pClient, + NvHandle hDevice, + DispCommon **ppDispCommon +) +{ + Device *pDevice; + RsResourceRef *pResourceRef; + + *ppDispCommon = NULL; /* return failure by default */ + + if (deviceGetByHandle(pClient, hDevice, &pDevice) != NV_OK) + return; + + if (refFindChildOfType(RES_GET_REF(pDevice), + classId(DispCommon), + NV_FALSE, + &pResourceRef) != NV_OK) + return; + + *ppDispCommon = dynamicCast(pResourceRef->pResource, DispCommon); +} + +/** + * @brief Return NV_TRUE if RmFree() needs to preserve the HW, otherwise NV_FALSE + * + * @param[in] DispObject Pointer + */ +NvBool dispobjGetRmFreeFlags_IMPL(DispObject *pDispObject) +{ + return !!(pDispObject->rmFreeFlags & NV5070_CTRL_SET_RMFREE_FLAGS_PRESERVE_HW); +} + +/** + * @brief Clears the RmFree() temporary flags + * + * @param[in] DispObject Pointer + * + * @return void + */ +void dispobjClearRmFreeFlags_IMPL(DispObject *pDispObject) +{ + pDispObject->rmFreeFlags = NV5070_CTRL_SET_RMFREE_FLAGS_NONE; +} + +NV_STATUS +nvdispapiConstruct_IMPL +( + NvDispApi *pNvdispApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +// **************************************************************************** +// Deprecated Functions +// **************************************************************************** + +/** + * @warning This function is deprecated! Please use dispchnGetByHandle. + */ +NV_STATUS +CliFindDispChannelInfo +( + NvHandle hClient, + NvHandle hDispChannel, + DispChannel **ppDispChannel, + NvHandle *phParent +) +{ + RsClient *pClient; + NV_STATUS status; + + *ppDispChannel = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NV_ERR_INVALID_CLIENT; + + status = dispchnGetByHandle(pClient, hDispChannel, ppDispChannel); + if (status != NV_OK) + return status; + + if (phParent) + *phParent = RES_GET_PARENT_HANDLE(*ppDispChannel); + + return NV_OK; +} + +/** + * @warning This function is deprecated! Please use dispcmnGetByHandle. + */ +NvBool +CliGetDispCommonInfo +( + NvHandle hClient, + NvHandle hDispCommon, + DisplayApi **ppDisplayApi +) +{ + RsClient *pClient; + NV_STATUS status; + DispCommon *pDispCommon; + + *ppDisplayApi = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NV_FALSE; + + status = dispcmnGetByHandle(pClient, hDispCommon, &pDispCommon); + if (status != NV_OK) + return NV_FALSE; + + *ppDisplayApi = staticCast(pDispCommon, DisplayApi); + + return NV_TRUE; +} + +/** + * @warning This function is deprecated! Please use dispobjGetByHandle. + */ +NvBool +CliGetDispInfo +( + NvHandle hClient, + NvHandle hObject, + DisplayApi **pDisplayApi +) +{ + if (!pDisplayApi) + return NV_FALSE; + + *pDisplayApi = CliGetDispFromDispHandle(hClient, hObject); + + return *pDisplayApi ? NV_TRUE : NV_FALSE; +} + +/** + * @warning This function is deprecated! Please use dispobjGetByHandle. + */ +DisplayApi * +CliGetDispFromDispHandle +( + NvHandle hClient, + NvHandle hDisp +) +{ + RsClient *pClient; + NV_STATUS status; + DispObject *pDispObject; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NULL; + + status = dispobjGetByHandle(pClient, hDisp, &pDispObject); + if (status != NV_OK) + return NULL; + + return staticCast(pDispObject, DisplayApi); +} + +// +// DISP Event RM Controls +// +NV_STATUS +dispapiCtrlCmdEventSetNotification_IMPL +( + DisplayApi *pDisplayApi, + NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams +) +{ + OBJGPU *pGpu = DISPAPI_GET_GPU(pDisplayApi); + NvU32 *pNotifyActions; + NV_STATUS status = NV_OK; + PEVENTNOTIFICATION pEventNotifications = inotifyGetNotificationList(staticCast(pDisplayApi, INotifier)); + + // NV01_EVENT must have been plugged into this subdevice + if (pEventNotifications == NULL) + { + NV_PRINTF(LEVEL_INFO, "cmd 0x%x: no event list\n", NV5070_CTRL_CMD_EVENT_SET_NOTIFICATION); + return NV_ERR_INVALID_STATE; + } + + // error check event index + if (pSetEventParams->event >= pDisplayApi->numNotifiers) + { + NV_PRINTF(LEVEL_INFO, "bad event 0x%x\n", pSetEventParams->event); + return NV_ERR_INVALID_ARGUMENT; + } + + // error check subDeviceInstance + if (pSetEventParams->subDeviceInstance >= gpumgrGetSubDeviceMaxValuePlus1(pGpu)) + { + NV_PRINTF(LEVEL_INFO, "bad subDeviceInstance 0x%x\n", + pSetEventParams->subDeviceInstance); + return NV_ERR_INVALID_ARGUMENT; + } + + pNotifyActions = pDisplayApi->pNotifyActions[pSetEventParams->subDeviceInstance]; + + switch (pSetEventParams->action) + { + case NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE: + case NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT: + { + // must be in disabled state to transition to an active state + if (pNotifyActions[pSetEventParams->event] != NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + status = NV_ERR_INVALID_STATE; + break; + } + + // bind hEvent to particular subdeviceInst + status = bindEventNotificationToSubdevice(pEventNotifications, + pSetEventParams->hEvent, + pSetEventParams->subDeviceInstance); + if (status != NV_OK) + return status; + + pNotifyActions[pSetEventParams->event] = pSetEventParams->action; + break; + } + + case NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE: + { + pNotifyActions[pSetEventParams->event] = pSetEventParams->action; + break; + } + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + return status; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c new file mode 100644 index 0000000..32e6cbe --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c @@ -0,0 +1,89 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispSfUser class. +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "resserv/resserv.h" + +#include "gpu/gpu.h" +#include "gpu/disp/disp_sf_user.h" +#include "gpu/disp/kern_disp.h" + +NV_STATUS +dispsfConstruct_IMPL +( + DispSfUser *pDispSfUser, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispSfUser); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + NV_CHECK_OR_RETURN(LEVEL_ERROR, pKernelDisplay != NULL, NV_ERR_NOT_SUPPORTED); + + // Set sf user RegBase offset + kdispGetDisplaySfUserBaseAndSize_HAL(pGpu, pKernelDisplay, + &pDispSfUser->ControlOffset, + &pDispSfUser->ControlLength); + + return NV_OK; +} + +NV_STATUS +dispsfGetRegBaseOffsetAndSize_IMPL +( + DispSfUser *pDispSfUser, + OBJGPU *pGpu, + NvU32* pOffset, + NvU32* pSize +) +{ + if (pOffset) + { + *pOffset = pDispSfUser->ControlOffset; + } + + if (pSize) + { + *pSize = pDispSfUser->ControlLength; + } + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c new file mode 100644 index 0000000..6b8bfc1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c @@ -0,0 +1,419 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 +#include "gpu/disp/head/kernel_head.h" +#include "objtmr.h" + +NV_STATUS +kheadConstruct_IMPL(KernelHead *pKernelHead) +{ + return NV_OK; +} + +NvU32 +kheadGetVblankTotalCounter_IMPL +( + KernelHead *pKernelHead +) +{ + return pKernelHead->Vblank.Counters.Total; +} + +void +kheadSetVblankTotalCounter_IMPL +( + KernelHead *pKernelHead, + NvU32 counter +) +{ + pKernelHead->Vblank.Counters.Total = counter; +} + +NvU32 +kheadGetVblankLowLatencyCounter_IMPL +( + KernelHead *pKernelHead +) +{ + return pKernelHead->Vblank.Counters.LowLatency; +} + +void +kheadSetVblankLowLatencyCounter_IMPL +( + KernelHead *pKernelHead, + NvU32 counter +) +{ + pKernelHead->Vblank.Counters.LowLatency = counter; +} + +NvU32 +kheadGetVblankNormLatencyCounter_IMPL +( + KernelHead *pKernelHead +) +{ + return pKernelHead->Vblank.Counters.NormLatency; +} + +void +kheadSetVblankNormLatencyCounter_IMPL +( + KernelHead *pKernelHead, + NvU32 counter +) +{ + pKernelHead->Vblank.Counters.NormLatency = counter; +} + +static NvBool +kheadIsVblankCallbackDue +( + VBLANKCALLBACK *pCallback, + NvU32 state, + NvU64 time, + NvU32 vblankCount +) +{ + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_TIMESTAMP) + { + // + // Time stamp based call backs don't have a valid vblank count + // vblank might be delayed and we might see only one vblank instead of two. + // so, count doesn't make sense in case of TS. + // and since the semantics is flip on vblank at TS >= TS specified, we can't + // use tmrCallbacks (they might flip outside vblank) + // + return (time >= pCallback->TimeStamp); + } + else + { + // + // These are now guaranteed to be sorted by VBlank + // and, now all have a VBlankCount to make processing simpler + // in this function, 'due' means "the next time the queue's counter is incremented, + // will it be time to process this callback?" This definition requires us to add 1 to + // the current vblankCount during the comparison. + // + if (VBLANK_STATE_PROCESS_IMMEDIATE & state) + { + return NV_TRUE; + } + + // Persistent callbacks that want to run every vblank + if ((pCallback->Flags & VBLANK_CALLBACK_FLAG_PERSISTENT) && (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_NEXT)) + { + return NV_TRUE; + } + + // Every other callback whose time has come. + if (pCallback->VBlankCount == 1+vblankCount) + { + // Some callbacks might have become due, but only want ISR time exclusively (no DPC) + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_LOW_LATENCY__ISR_ONLY) + { + if (!(state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR)) + { + // Callback explicitly wants ISR time for its processing. + return NV_FALSE; + } + } + + return NV_TRUE; + } + } + return NV_FALSE; +} + +NvU32 +kheadCheckVblankCallbacksQueued_IMPL +( + OBJGPU *thisGpu, + KernelHead *pKernelHead, + NvU32 state, + NvU32 *expiring +) +{ + OBJTMR *pTmr; + NvU64 time; + NvU32 queues = 0; + + pTmr = GPU_GET_TIMER(thisGpu); + tmrGetCurrentTime(pTmr, &time); + + if (expiring) + { + *expiring = 0; + } + // + // return a union of queues (represented by VBLANK_STATE_PROCESS_XXX_LATENCY flags,) + // that are nonempty, i.e. have at least one callback. + // optionally, also return (via 'expiring', when non-NULL) which of those non-empty queues contain + // callbacks that are due to be processed, the next time that queue's counter gets incremented. + // + if ( (pKernelHead->Vblank.Callback.pListLL) && + (state & VBLANK_STATE_PROCESS_LOW_LATENCY) ) + { + queues |= VBLANK_STATE_PROCESS_LOW_LATENCY; + + if (expiring) + { + NvU32 vblankCount; + VBLANKCALLBACK *pCallback; + + vblankCount = pKernelHead->Vblank.Counters.LowLatency; + pCallback = pKernelHead->Vblank.Callback.pListLL; + + do + { + if (kheadIsVblankCallbackDue(pCallback, state, time, vblankCount)) + { + *expiring |= VBLANK_STATE_PROCESS_LOW_LATENCY; + } + pCallback = pCallback->Next; + } + while (pCallback && !(*expiring & VBLANK_STATE_PROCESS_LOW_LATENCY)); + } + } + + if ( (pKernelHead->Vblank.Callback.pListNL) && + (state & VBLANK_STATE_PROCESS_NORMAL_LATENCY) ) + { + queues |= VBLANK_STATE_PROCESS_NORMAL_LATENCY; + + if (expiring) + { + NvU32 vblankCount; + VBLANKCALLBACK *pCallback; + + vblankCount = pKernelHead->Vblank.Counters.NormLatency; + pCallback = pKernelHead->Vblank.Callback.pListNL; + + do + { + if (kheadIsVblankCallbackDue(pCallback, state, time, vblankCount)) + { + *expiring |= VBLANK_STATE_PROCESS_NORMAL_LATENCY; + } + + pCallback = pCallback->Next; + } + while (pCallback && !(*expiring & VBLANK_STATE_PROCESS_NORMAL_LATENCY)); + } + } + + return queues & state; +} +NvU32 +kheadReadVblankIntrState_IMPL +( + OBJGPU *pGpu, + KernelHead *pKernelHead +) +{ + // Check to make sure that our SW state grooves with the HW state + if (kheadReadVblankIntrEnable_HAL(pGpu, pKernelHead)) + { + // HW is enabled, check if SW state is not enabled + if (pKernelHead->Vblank.IntrState != NV_HEAD_VBLANK_INTR_ENABLED) + { + NV_PRINTF(LEVEL_ERROR, + "Head %d: HW: %d != SW: %d! Fixing SW State...\n", + pKernelHead->PublicId, NV_HEAD_VBLANK_INTR_ENABLED, + pKernelHead->Vblank.IntrState); + pKernelHead->Vblank.IntrState = NV_HEAD_VBLANK_INTR_ENABLED; + } + } + else + { + // + // If HW is not enabled, SW state would depend on whether head is + // driving display. Check for both the SW states and base the + // SW state decision on head initialized state. + // If head is initialized SW state should be AVAILABLE else + // UNAVAILABLE. + // + if ((pKernelHead->Vblank.IntrState == NV_HEAD_VBLANK_INTR_ENABLED) || + (pKernelHead->Vblank.IntrState == NV_HEAD_VBLANK_INTR_UNAVAILABLE)) + { + NvU32 state = NV_HEAD_VBLANK_INTR_UNAVAILABLE; + + // + // We should say HW not enabled is AVAILABLE or UNAVAILABLE + // So, we'll base the correct decision on whether or not + // this head is driving any display. + // + if (kheadGetDisplayInitialized_HAL(pGpu, pKernelHead)) + { + state = NV_HEAD_VBLANK_INTR_AVAILABLE; + } + + if (state != pKernelHead->Vblank.IntrState) + { + NV_PRINTF(LEVEL_ERROR, + "Head %d: HW: %d != SW: %d! Fixing SW State...\n", + pKernelHead->PublicId, state, pKernelHead->Vblank.IntrState); + pKernelHead->Vblank.IntrState = state; + } + } + else if (pKernelHead->Vblank.IntrState == NV_HEAD_VBLANK_INTR_AVAILABLE) + { + // + // If HW is not enabled and head is not driving any display then + // the SW state should be UNAVAILABLE + // + if (!kheadGetDisplayInitialized_HAL(pGpu, pKernelHead)) + { + NV_PRINTF(LEVEL_ERROR, + "Head %d: HW: %d != SW: %d! Fixing SW State...\n", + pKernelHead->PublicId, NV_HEAD_VBLANK_INTR_UNAVAILABLE, pKernelHead->Vblank.IntrState); + pKernelHead->Vblank.IntrState = NV_HEAD_VBLANK_INTR_UNAVAILABLE; + } + } + } + + return pKernelHead->Vblank.IntrState; +} + +void +kheadWriteVblankIntrState_IMPL +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + NvU32 newstate +) +{ + NvU32 previous; + NvBool enablehw = NV_FALSE; // Dont update the hw by default + NvBool updatehw = NV_FALSE; // Dont enable the hw by default + + // Get the previous state for various other stuff + previous = pKernelHead->Vblank.IntrState; + + // Make sure we really support the requested next state + if ( (newstate != NV_HEAD_VBLANK_INTR_UNAVAILABLE) && + (newstate != NV_HEAD_VBLANK_INTR_AVAILABLE) && + (newstate != NV_HEAD_VBLANK_INTR_ENABLED) ) + { + NV_PRINTF(LEVEL_ERROR, "Unknown state %x requested on head %d.\n", + newstate, pKernelHead->PublicId); + return; + } + + // Spew where we were and where we are going for tracking... +#if defined(DEBUG) + + NV_PRINTF(LEVEL_INFO, "Changing vblank state on pGpu=%p head %d: ", pGpu, + pKernelHead->PublicId); + + switch(previous) + { + case NV_HEAD_VBLANK_INTR_UNAVAILABLE: + NV_PRINTF(LEVEL_INFO, "UNAVAILABLE -> "); + break; + case NV_HEAD_VBLANK_INTR_AVAILABLE: + NV_PRINTF(LEVEL_INFO, "AVAILABLE -> "); + break; + case NV_HEAD_VBLANK_INTR_ENABLED: + NV_PRINTF(LEVEL_INFO, "ENABLED -> "); + break; + default: + NV_PRINTF(LEVEL_INFO, "UNKNOWN -> "); + break; + } + + switch(newstate) + { + case NV_HEAD_VBLANK_INTR_UNAVAILABLE: + NV_PRINTF(LEVEL_INFO, "UNAVAILABLE\n"); + break; + case NV_HEAD_VBLANK_INTR_AVAILABLE: + NV_PRINTF(LEVEL_INFO, "AVAILABLE\n"); + break; + case NV_HEAD_VBLANK_INTR_ENABLED: + NV_PRINTF(LEVEL_INFO, "ENABLED\n"); + break; + default: + NV_PRINTF(LEVEL_INFO, "UNKNOWN\n"); + break; + } + +#endif + + // Move to the new state + switch(newstate) + { + // Move to the unavailable state. This has an implied disabled state. + case NV_HEAD_VBLANK_INTR_UNAVAILABLE: + + // If the hw is on, turn it off + if (previous == NV_HEAD_VBLANK_INTR_ENABLED) + { + enablehw = NV_FALSE; + updatehw = NV_TRUE; + } + break; + + // Move to the available state. This has an implied disabled state. + case NV_HEAD_VBLANK_INTR_AVAILABLE: + + // If the hw is on, turn it off + if (previous == NV_HEAD_VBLANK_INTR_ENABLED) + { + enablehw = NV_FALSE; + updatehw = NV_TRUE; + } + break; + + // Move to the enabled state. This has an implied available state. + case NV_HEAD_VBLANK_INTR_ENABLED: + + // If the hw was off, turn it on + if (previous != NV_HEAD_VBLANK_INTR_ENABLED) + { + enablehw = NV_TRUE; + updatehw = NV_TRUE; + } + break; + + default: + // We REALLY should never get here with the correct filtering above. + NV_PRINTF(LEVEL_ERROR, "Unknown state %x requested on head %d.\n", + newstate, pKernelHead->PublicId); + DBG_BREAKPOINT(); + return; + break; + } + + // Update the sw state + pKernelHead->Vblank.IntrState = newstate; + + // Update the hw + if (updatehw) + { + kheadWriteVblankIntrEnable_HAL(pGpu, pKernelHead, enablehw); + } +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c new file mode 100644 index 0000000..0bc361d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c @@ -0,0 +1,344 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Display Instance Memory Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/context_dma.h" +#include "disp/v03_00/dev_disp.h" + +/*! + * @brief Get display instance memory and hash table size + * + * @param[in] pGpu + * @param[in] PInstMem + * @param[out] pTotalInstMemSize pointer to instance memory size + * @param[out] pHashTableSize pointer to hash table size + * + * @return void + */ +void +instmemGetSize_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 *pTotalInstMemSize, + NvU32 *pHashTableSize +) +{ + if (pTotalInstMemSize != NULL) + { + *pTotalInstMemSize = (NV_UDISP_HASH_LIMIT - NV_UDISP_HASH_BASE + 1) + + (NV_UDISP_OBJ_MEM_LIMIT - NV_UDISP_OBJ_MEM_BASE + 1); + } + + if (pHashTableSize != NULL) + { + *pHashTableSize = (NV_UDISP_HASH_LIMIT - NV_UDISP_HASH_BASE + 1); + } +} + +NvU32 +instmemGetHashTableBaseAddr_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem +) +{ + return NV_UDISP_HASH_BASE; +} + +/*! Check if the instance memory pointer is valid */ +NvBool +instmemIsValid_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 offset +) +{ + return (((offset << 5) < NV_UDISP_OBJ_MEM_LIMIT) && + ((offset << 5) > NV_UDISP_HASH_LIMIT)); +} + +NV_STATUS +instmemHashFunc_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvHandle hClient, + NvHandle hContextDma, + NvU32 dispChannelNum, + NvU32 *pResult +) +{ + NV_ASSERT_OR_RETURN(pResult, NV_ERR_INVALID_ARGUMENT); + + + // + // The hash function for display will be: + // hContextDma[9:0] + // ^ hContextDma[19:10] + // ^ hContextDma[29:20] + // ^ {hClient[7:0], hContextDma[31:30]} + // ^ {dispChannelNum[3:0], hClient[13:8]} + // ^ {7'h00, dispChannelNum[6:4]} + // + *pResult = ((hContextDma >> 0) & 0x3FF) ^ + ((hContextDma >> 10) & 0x3FF) ^ + ((hContextDma >> 20) & 0x3FF) ^ + (((hClient & 0xFF) << 2) | (hContextDma >> 30)) ^ + (((dispChannelNum & 0xF) << 6) | ((hClient >> 8) & 0x3F))^ + ((dispChannelNum >> 4) & 0x7); + + return NV_OK; +} + +/*! Generate hash table data */ +NvU32 +instmemGenerateHashTableData_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 hClient, + NvU32 offset, + NvU32 dispChannelNum +) +{ + return (SF_NUM(_UDISP, _HASH_TBL_CLIENT_ID, hClient) | + SF_NUM(_UDISP, _HASH_TBL_INSTANCE, offset) | + SF_NUM(_UDISP, _HASH_TBL_CHN, dispChannelNum)); +} + +/*! Write the Context DMA to display instance memory */ +NV_STATUS +instmemCommitContextDma_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma +) +{ + MEMORY_DESCRIPTOR *pMemDesc = memdescGetMemDescFromGpu(pContextDma->pMemDesc, pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + RmPhysAddr FrameAddr, Limit; + RmPhysAddr FrameAddr256Align; + RmPhysAddr Limit256Align; + NvU32 ctxDMAFlag; + NvU32 instoffset; + NvU8 *pInstMemCpuVA; + NvU32 kind; + NvBool bIsSurfaceBl = NV_FALSE; + TRANSFER_SURFACE dest = {0}; + + // This function must be called in unicast. + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + instoffset = pContextDma->Instance[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] << 4; + NV_ASSERT_OR_RETURN(instoffset, NV_ERR_INVALID_OBJECT); + + FrameAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + Limit = FrameAddr + pContextDma->Limit; + + kind = memdescGetPteKindForGpu(pMemDesc, pGpu); + + // Cannot bind a Z surface to display. Bug 439965. + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_Z, kind)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Set surface format + // + ctxDMAFlag = 0; + + bIsSurfaceBl = memmgrIsSurfaceBlockLinear_HAL(pMemoryManager, pContextDma->pMemory, + kind, pContextDma->Flags); + + if (bIsSurfaceBl) + { + ctxDMAFlag |= SF_DEF(_DMA, _KIND, _BLOCKLINEAR); + } + else + { + ctxDMAFlag |= SF_DEF(_DMA, _KIND, _PITCH); + } + + if (pContextDma->bReadOnly) + { + ctxDMAFlag |= SF_DEF(_DMA, _ACCESS, _READ_ONLY); + } + else + { + ctxDMAFlag |= SF_DEF(_DMA, _ACCESS, _READ_AND_WRITE); + } + + switch (memdescGetAddressSpace(pMemDesc)) + { + case ADDR_SYSMEM: + case ADDR_REGMEM: + // SOC Display always need _PHYSICAL_NVM flag to be set as display is not over PCI + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + ctxDMAFlag |= SF_DEF(_DMA, _TARGET_NODE, _PHYSICAL_NVM); + } + else + { + if (pContextDma->CacheSnoop) + ctxDMAFlag |= SF_DEF(_DMA, _TARGET_NODE, _PHYSICAL_PCI_COHERENT); + else + ctxDMAFlag |= SF_DEF(_DMA, _TARGET_NODE, _PHYSICAL_PCI); + } + break; + case ADDR_FBMEM: + ctxDMAFlag |= SF_DEF(_DMA, _TARGET_NODE, _PHYSICAL_NVM); + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid address space: %d\n", + memdescGetAddressSpace(pMemDesc)); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + + dest.pMemDesc = pInstMem->pInstMemDesc; + dest.offset = instoffset; + + pInstMemCpuVA = memmgrMemBeginTransfer(pMemoryManager, &dest, NV_DMA_SIZE, + TRANSFER_FLAGS_SHADOW_ALLOC); + if (pInstMemCpuVA == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_TARGET_NODE), ctxDMAFlag); // word 0 + + // Address in disp ctxdma is 256B aligned + FrameAddr256Align = FrameAddr >> 8; + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_ADDRESS_BASE_LO), // word 1 + NvU64_LO32(FrameAddr256Align)); + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_ADDRESS_BASE_HI), // word 2 + NvU64_HI32(FrameAddr256Align)); + + Limit256Align = Limit >> 8; + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_ADDRESS_LIMIT_LO), // word 3 + NvU64_LO32(Limit256Align)); + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_ADDRESS_LIMIT_HI), // word 4 + NvU64_HI32(Limit256Align)); + + memmgrMemEndTransfer(pMemoryManager, &dest, NV_DMA_SIZE, + TRANSFER_FLAGS_SHADOW_ALLOC); + + return NV_OK; +} + +/*! + * @brief Update the Context DMA already in display instance memory + * + * NOTE: this control call may be called at high IRQL on WDDM. + */ +NV_STATUS +instmemUpdateContextDma_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + NvU64 *pNewAddress, + NvU64 *pNewLimit, + NvHandle hMemory, + NvU32 comprInfo +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + NvU8 *pInst; + NvU32 instoffset; + TRANSFER_SURFACE dest = {0}; + + // Must use comprInfo to specify kind + NV_CHECK_OR_RETURN(LEVEL_SILENT, hMemory == NV01_NULL_OBJECT, NV_ERR_INVALID_ARGUMENT); + + instoffset = pContextDma->Instance[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] << 4; + NV_ASSERT(instoffset); + + dest.pMemDesc = pInstMem->pInstMemDesc; + dest.offset = instoffset; + + pInst = memmgrMemBeginTransfer(pMemoryManager, &dest, NV_DMA_SIZE, + TRANSFER_FLAGS_SHADOW_ALLOC | TRANSFER_FLAGS_SHADOW_INIT_MEM); + if (pInst == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto exit; + } + + if (pNewAddress != NULL) + { + // Address in disp ctxdma is 256B aligned + NvU64 newAddress256Align = (*pNewAddress) >> 8; + MEM_WR32(pInst + SF_OFFSET(NV_DMA_ADDRESS_BASE_LO), + NvU64_LO32(newAddress256Align)); + MEM_WR32(pInst + SF_OFFSET(NV_DMA_ADDRESS_BASE_HI), + NvU64_HI32(newAddress256Align)); + } + + if (pNewLimit != NULL) + { + NvU64 newLimit256Align = (*pNewLimit) >> 8; + MEM_WR32(pInst + SF_OFFSET(NV_DMA_ADDRESS_LIMIT_LO), + NvU64_LO32(newLimit256Align)); + MEM_WR32(pInst + SF_OFFSET(NV_DMA_ADDRESS_LIMIT_HI), + NvU64_HI32(newLimit256Align)); + } + + if (comprInfo != NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_NONE) + { + NvU32 word = MEM_RD32(pInst + SF_OFFSET(NV_DMA_KIND)); + + if (comprInfo == NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_FORMAT_BLOCK_LINEAR) + { + word = FLD_SF_DEF(_DMA, _KIND, _BLOCKLINEAR, word); + } + else + { + word = FLD_SF_DEF(_DMA, _KIND, _PITCH, word); + } + + MEM_WR32(pInst + SF_OFFSET(NV_DMA_KIND), word); + } + + memmgrMemEndTransfer(pMemoryManager, &dest, NV_DMA_SIZE, + TRANSFER_FLAGS_SHADOW_ALLOC | TRANSFER_FLAGS_SHADOW_INIT_MEM); + +exit: + + return status; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c new file mode 100644 index 0000000..68d9171 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c @@ -0,0 +1,1003 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/**************************** Instmem Rotuines *****************************\ +* * +* Display instance memory object function Definitions. * +* * +\***************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/disp_channel.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/mem_mgr/context_dma.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "os/nv_memory_type.h" +#include "os/os.h" + +/*! + * Display Context DMA instance memory is always 2 16B blocks in size on all chips. There + * is no HW support for scatter lists. Instance memory should be naturally aligned. + */ +#define DISPLAY_CONTEXT_DMA_INST_SIZE 2 +#define DISPLAY_CONTEXT_DMA_INST_ALIGN 2 + +#define DISP_INST_MEM_EHEAP_OWNER NvU32_BUILD('i','n','s','t') + +/*! + * A hardware display hash table entry. + */ +typedef struct +{ + NvHandle ht_ObjectHandle; + NvV32 ht_Context; +} DISP_HW_HASH_TABLE_ENTRY; + + +/*! @brief Constructor */ +NV_STATUS +instmemConstruct_IMPL +( + DisplayInstanceMemory *pInstMem +) +{ + pInstMem->pInstMem = NULL; + pInstMem->pAllocedInstMemDesc = NULL; + pInstMem->pInstMemDesc = NULL; + pInstMem->pHashTable = NULL; + pInstMem->pInstHeap = NULL; + + return NV_OK; +} + + +/*! + * @brief Instmem destructor + */ +void +instmemDestruct_IMPL +( + DisplayInstanceMemory *pInstMem +) +{ +} + +/*! @brief Initialized heap related files in display instance memory */ +static NV_STATUS +instmemInitBitmap +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 instMemSize, + NvU32 hashTableSize +) +{ + NV_STATUS status = NV_OK; + NvU64 base, limit; + NvU64 allocSize, allocOffset; + NvU32 allocFlags; + NvU32 freeInstMemBase; + NvU32 freeInstMemSize; + NvU32 freeInstMemMax; + + // + // Locate and size the free instance area. This is the base where + // allocations should start and size of the allocatable inst mem. + // Initially hash table is the only entity that's allocated. + // + freeInstMemBase = hashTableSize >> 4; + freeInstMemSize = instMemSize - hashTableSize; + freeInstMemMax = (freeInstMemSize / 16) & ~0x07; + + // Allocate the Instmem heap manager + pInstMem->pInstHeap = portMemAllocNonPaged(sizeof(OBJEHEAP)); + if (pInstMem->pInstHeap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Unable to allocate instance memory heap manager.\n"); + status = NV_ERR_NO_MEMORY; + goto exit; + } + portMemSet(pInstMem->pInstHeap, 0x00, sizeof(OBJEHEAP)); + + NV_PRINTF(LEVEL_INFO, "FB Free Size = 0x%x\n", freeInstMemSize); + NV_PRINTF(LEVEL_INFO, "FB Free Inst Base = 0x%x\n", freeInstMemBase); + NV_PRINTF(LEVEL_INFO, "FB Free Inst Max = 0x%x\n", + freeInstMemMax + freeInstMemBase); + + // + // Construct the Instmem heap manager - Pre-allocate mgmt structures + // to avoid dynamic allocation and allow bind/unbind at high IRQL + // on Windows. Size to fill hash table + NULL instance. + // + base = freeInstMemBase; + limit = freeInstMemBase + freeInstMemMax + 1; + constructObjEHeap( + pInstMem->pInstHeap, + base, + limit, + 0, // sizeofMemBlock + pInstMem->nHashTableEntries + 1); // numPreAllocMemStruct + + // Reserve instance 0 as the NULL instance. + allocSize = 1; + allocOffset = base; + allocFlags = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + status = pInstMem->pInstHeap->eheapAlloc( + pInstMem->pInstHeap, // thisHeap + DISP_INST_MEM_EHEAP_OWNER, // owner + &allocFlags, // flags + &allocOffset, // offset + &allocSize, // size + 1, // offsetAlign + 1, // sizeAlign + NULL, // ppMemBlock + NULL, // isolation id + NULL); // callback ownership checker + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "eheapAlloc failed for instance memory heap manager.\n"); + status = NV_ERR_NO_MEMORY; + } + +exit: + return status; +} + +/*! @brief Initialized hash table related files in display instance memory */ +static NV_STATUS +instmemInitHashTable +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 hashTableSize +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + + pInstMem->nHashTableEntries = hashTableSize / sizeof(DISP_HW_HASH_TABLE_ENTRY); + pInstMem->hashTableBaseAddr = instmemGetHashTableBaseAddr_HAL(pGpu, pInstMem); + + // Allocate Hash Table structure. + pInstMem->pHashTable = portMemAllocNonPaged(pInstMem->nHashTableEntries * + sizeof(SW_HASH_TABLE_ENTRY)); + if (pInstMem->pHashTable == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Unable to allocate hash table.\n"); + status = NV_ERR_NO_MEMORY; + goto exit; + } + + // Initialize Hash Table. + for (i = 0; i < pInstMem->nHashTableEntries; i++) + { + pInstMem->pHashTable[i].pContextDma = NULL; + } + +exit: + return status; +} + +/*! + * @brief Save instance memory parameters + * + * For dGPU called from mem_mgr initialization with reserved frame buffer memory. For SOC + * we dynamically allocate system memory later. + */ +void +instmemSetMemory_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NV_ADDRESS_SPACE dispInstMemAddrSpace, + NvU32 dispInstMemAttr, + NvU64 dispInstMemBase, + NvU32 dispInstMemSize +) +{ + pInstMem->instMemAddrSpace = dispInstMemAddrSpace; + pInstMem->instMemAttr = dispInstMemAttr; + pInstMem->instMemBase = dispInstMemBase; + pInstMem->instMemSize = dispInstMemSize; +} + +/*! @brief Initialize instance memory descriptor */ +static NV_STATUS +instmemInitMemDesc +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 instMemSize +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // + // FB reserved memory logic not be getting called for Tegra system memory scanout. + // So as InstMem Desc is not getting initialized, currently hardcoding + // dispInstMemAttr to NV_MEMORY_CACHED this needs to be set based on system configuration/registry parameter. + // + instmemSetMemory(pGpu, pInstMem, + ADDR_SYSMEM, NV_MEMORY_CACHED, + 0 /* base */, instMemSize); + } + else if (IS_GSP_CLIENT(pGpu)) + { + // ToDO: Need to respect RM overrides and keep monolithic design same as offload. + instmemSetMemory(pGpu, pInstMem, + ADDR_FBMEM, NV_MEMORY_WRITECOMBINED, + 0 , instMemSize); + } + + switch (pInstMem->instMemAddrSpace) + { + default: + case ADDR_FBMEM: + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescCreate(&pInstMem->pInstMemDesc, pGpu, + pInstMem->instMemSize, + DISP_INST_MEM_ALIGN, + NV_TRUE, pInstMem->instMemAddrSpace, + pInstMem->instMemAttr, + MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO), + exit); + + memdescDescribe(pInstMem->pInstMemDesc, + ADDR_FBMEM, + memmgrGetRsvdMemoryBase(pMemoryManager) + pInstMem->instMemBase, + pInstMem->instMemSize); + } + break; + + case ADDR_SYSMEM: + { + // + // memdescAlloc won't (currently) honor a request for sysmem alloc alignment! Overallocate + // and round up the address to work around this. + // + // Create a sub-memdesc to the aligned block. This keeps the alignment calculation local + // to this function. + // + NvU64 base; + NvU64 offset; + NvBool bContig = NV_TRUE; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // + // On Orin, display FE goes through the NISO SMMU to read + // from Instance Memory. As such, there's absolutely no + // reason why we need a contiguous allocation for Instance + // Memory. + // + bContig = NV_FALSE; + } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescCreate(&pInstMem->pAllocedInstMemDesc, pGpu, + instMemSize + (DISP_INST_MEM_ALIGN - RM_PAGE_SIZE), + DISP_INST_MEM_ALIGN, + bContig, pInstMem->instMemAddrSpace, + pInstMem->instMemAttr, + MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO), + exit); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescAlloc(pInstMem->pAllocedInstMemDesc), + exit); + + base = memdescGetPhysAddr(pInstMem->pAllocedInstMemDesc, AT_GPU, 0); + offset = RM_ALIGN_UP(base, DISP_INST_MEM_ALIGN) - base; + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescCreateSubMem(&pInstMem->pInstMemDesc, pInstMem->pAllocedInstMemDesc, + pGpu, + offset, + instMemSize), + exit); + } + break; + } + +exit: + // Clean-up is handled by the caller + return status; +} + +/*! @brief Free all memory allocations done for display instance memory */ +static void +instmemDestroy +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem +) +{ + // Free up the inst mem descriptors + if (pInstMem->pInstMemDesc != NULL) + { + memdescDestroy(pInstMem->pInstMemDesc); + pInstMem->pInstMemDesc = NULL; + } + if (pInstMem->pAllocedInstMemDesc != NULL) + { + memdescFree(pInstMem->pAllocedInstMemDesc); + memdescDestroy(pInstMem->pAllocedInstMemDesc); + pInstMem->pAllocedInstMemDesc = NULL; + } + + if (pInstMem->pInstHeap != NULL) + { + pInstMem->pInstHeap->eheapDestruct(pInstMem->pInstHeap); + portMemFree(pInstMem->pInstHeap); + pInstMem->pInstHeap = NULL; + } + + portMemFree(pInstMem->pHashTable); + pInstMem->pHashTable = NULL; +} + +NV_STATUS +instmemStateInitLocked_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem +) +{ + NV_STATUS status = NV_OK; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 hClient = pGpu->hInternalClient; + NvU32 hSubdevice = pGpu->hInternalSubdevice; + NvU32 instMemSize, hashTableSize; + NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS ctrlParams; + + instmemGetSize_HAL(pGpu, pInstMem, &instMemSize, &hashTableSize); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemInitHashTable(pGpu, pInstMem, hashTableSize), exit); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemInitBitmap(pGpu, pInstMem, instMemSize, hashTableSize), exit); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemInitMemDesc(pGpu, pInstMem, instMemSize), exit); + + // Make internal RPC to write the instance memory register + ctrlParams.instMemAddrSpace = memdescGetAddressSpace(pInstMem->pInstMemDesc); + ctrlParams.instMemCpuCacheAttr = memdescGetCpuCacheAttrib(pInstMem->pInstMemDesc); + ctrlParams.instMemPhysAddr = memdescGetPhysAddr(pInstMem->pInstMemDesc, AT_GPU, 0); + ctrlParams.instMemSize = memdescGetSize(pInstMem->pInstMemDesc); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, hClient, hSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM, + &ctrlParams, sizeof(ctrlParams)), exit); + +exit: + if (status != NV_OK) + { + instmemDestroy(pGpu, pInstMem); + } + + return status; +} + +void +instmemStateDestroy_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem +) +{ + instmemDestroy(pGpu, pInstMem); +} + +NV_STATUS +instmemStateLoad_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 flags +) +{ + NvBool bPersistent; + NV_STATUS status = NV_OK; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 hClient = pGpu->hInternalClient; + NvU32 hSubdevice = pGpu->hInternalSubdevice; + NvU32 instMemSize, hashTableSize; + NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS ctrlParams; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) + { + instmemGetSize_HAL(pGpu, pInstMem, &instMemSize, &hashTableSize); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemInitHashTable(pGpu, pInstMem, hashTableSize), exit); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemInitBitmap(pGpu, pInstMem, instMemSize, hashTableSize), exit); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemInitMemDesc(pGpu, pInstMem, instMemSize), exit); + + // Make internal RPC to write the instance memory register + ctrlParams.instMemAddrSpace = memdescGetAddressSpace(pInstMem->pInstMemDesc); + ctrlParams.instMemCpuCacheAttr = memdescGetCpuCacheAttrib(pInstMem->pInstMemDesc); + ctrlParams.instMemPhysAddr = memdescGetPhysAddr(pInstMem->pInstMemDesc, AT_GPU, 0); + ctrlParams.instMemSize = memdescGetSize(pInstMem->pInstMemDesc); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, hClient, hSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM, + &ctrlParams, sizeof(ctrlParams)), exit); + } + + // + // We keep a persistent mapping to instance memory in two cases: + // * Windows issues bind/update/unbind control calls with with BYPASS_LOCK, + // so we cannot generate a new BAR2 mapping at control call time. + // * System memory backing. + // + bPersistent = (pInstMem->instMemAddrSpace == ADDR_SYSMEM); + if (bPersistent) + { + // + // Windows issues bind/update/unbind control calls with BYPASS_LOCK, + // so we generate a new BAR2 mapping control call time. + // + pInstMem->pInstMem = memdescMapInternal(pGpu, pInstMem->pInstMemDesc, + TRANSFER_FLAGS_PERSISTENT_CPU_MAPPING); + if (pInstMem->pInstMem == NULL) + return NV_ERR_NO_MEMORY; + } + +exit: + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) + { + if (status != NV_OK) + { + instmemDestroy(pGpu, pInstMem); + } + } + + return NV_OK; +} + +NV_STATUS +instmemStateUnload_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 flags +) +{ + if (pInstMem->pInstMem != NULL) + { + memdescUnmapInternal(pGpu, pInstMem->pInstMemDesc, TRANSFER_FLAGS_NONE); + pInstMem->pInstMem = NULL; + } + + return NV_OK; +} + +/*! + * @brief Reserve a chunk of display instance memory (will always be for Context DMAs). + * @return offset from the base of display instance memory (not base of FB). + */ +static NV_STATUS +_instmemReserveContextDma +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 *offset +) +{ + NV_STATUS rmStatus; + NvU64 allocSize = DISPLAY_CONTEXT_DMA_INST_SIZE; // size << 4; + NvU64 allocOffset; + NvU32 allocFlags = 0; + + *offset = 0; + + rmStatus = pInstMem->pInstHeap->eheapAlloc( + pInstMem->pInstHeap, // thisHeap + DISP_INST_MEM_EHEAP_OWNER, // owner + &allocFlags, // flags + &allocOffset, // offset + &allocSize, // size + DISPLAY_CONTEXT_DMA_INST_ALIGN, // offsetAlign + DISPLAY_CONTEXT_DMA_INST_ALIGN, // sizeAlign + NULL, // ppMemBlock + NULL, // isolation id + NULL); // callback ownership checker + + // return the allocation offset if successful + if (rmStatus == NV_OK) + { + *offset = (NvU32)allocOffset; + } + else + { + rmStatus = NV_ERR_NO_MEMORY; + } + + return rmStatus; +} + +/*! + * @brief Free display instance memory reserved for Context DMA. + */ +static NV_STATUS +_instmemFreeContextDma +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 offset +) +{ + // + // If instance is already set to 0, then it has already been freed. This can + // happen in some cases when a mode switch is happening while MIDI is playing + // using the timer alarm notifies. Ignoring this case can potentially cause a + // protection fault, so be careful. + // + if (offset == 0) + return NV_OK; + + if (pInstMem->pInstHeap == NULL) + return NV_OK; + + pInstMem->pInstHeap->eheapFree( + pInstMem->pInstHeap, // thisHeap + offset); // offset + + return NV_OK; +} + +static void +_instmemClearHashEntry +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 htEntry +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + TRANSFER_SURFACE dest = {0}; + NvU32 entryOffset; + DISP_HW_HASH_TABLE_ENTRY entry; + + pInstMem->pHashTable[htEntry].pContextDma = NULL; + pInstMem->pHashTable[htEntry].pDispChannel = NULL; + + // + // If we found the entry, clear the inst mem copy of the entry + // Start with offset of base of inst mem + // Add offset of base of hash table from base of inst mem + // Add the offset of entry from base of hash table + // + entryOffset = pInstMem->hashTableBaseAddr + + (sizeof(DISP_HW_HASH_TABLE_ENTRY) * htEntry); + + dest.pMemDesc = pInstMem->pInstMemDesc; + dest.offset = entryOffset; + + entry.ht_ObjectHandle = 0; + entry.ht_Context = instmemGenerateHashTableData_HAL(pGpu, pInstMem, + 0 /* client id */, + 0 /* NV_UDISP_HASH_TBL_INSTANCE_INVALID */, + 0 /* dispChannelNum */); + + NV_ASSERT_OK(memmgrMemWrite(pMemoryManager, &dest, &entry, sizeof(entry), + TRANSFER_FLAGS_NONE)); +} + +static NV_STATUS +_instmemRemoveHashEntry +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel +) +{ + NvU32 htEntry; + + for (htEntry = 0; htEntry < pInstMem->nHashTableEntries; htEntry++) + { + if ( (pInstMem->pHashTable[htEntry].pContextDma == pContextDma) && + (pInstMem->pHashTable[htEntry].pDispChannel == pDispChannel)) + { + _instmemClearHashEntry(pGpu, pInstMem, htEntry); + return NV_OK; + } + } + + return NV_ERR_INVALID_STATE; +} + +static NV_STATUS +_instmemAddHashEntry +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel, + NvU32 offset +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pContextDma); + NvU32 entryOffset, dispChannelNum; + NvU32 Limit, i, Entry; + NvHandle handle = RES_GET_HANDLE(pContextDma); + NvU32 hash; + NV_STATUS status; + TRANSFER_SURFACE dest = {0}; + DISP_HW_HASH_TABLE_ENTRY entry; + + status = kdispGetChannelNum_HAL(pKernelDisplay, pDispChannel->DispClass, pDispChannel->InstanceNumber, &dispChannelNum); + if (status != NV_OK) + return status; + + // Query HAL for starting entry for this pair. + instmemHashFunc_HAL(pGpu, pInstMem, hClient, RES_GET_HANDLE(pContextDma), dispChannelNum, &hash); + + // + // Since all the ctx dmas are 32 byte aligned, we don't need to + // store offsets in bytes. We store "which 32 byte chunk" does the + // ctx dma reside in. So, right shift the whole thing by 5 after + // left shifting by 4 (need to left shift by 4 since internally we + // track offsets in 16 byte chunks + // + offset >>= (5 - 4); // offset <<= 4; followed by offset >>= 5 + + if (offset == 0) //NV_UDISP_HASH_TBL_INSTANCE_INVALID + { + NV_PRINTF(LEVEL_ERROR, "Instance pointer is invalid!!\n"); + return (NV_ERR_GENERIC); + } + + // + // Make sure instance memory pointer is valid as well. That is, + // it's within the mask range of possible instance values + // + NV_ASSERT(instmemIsValid_HAL(pGpu, pInstMem, offset)); + + // Make sure that hash is valid as well. + NV_ASSERT(hash < pInstMem->nHashTableEntries); + + // + // Search table for free slot. + // + // Here's the Old way that we did this - Allows for arbitrary sized hash tables + // + // Limit = hash + pDispHalPvtInfo->pPram[ChID].nHashTableEntries; // loop over whole table + // Entry = hash; + // while(Entry < Limit) + // { + // if (pDispHalPvtInfo->pPram[ChID].pHashTable[Entry].Object == NULL) + // break; + // + // // + // // if we just checked the last entry and have more entries + // // to check for empty, wrap search back to beginning of table + // // + // if (Entry == (pDispHalPvtInfo->pPram[ChID].nHashTableEntries-1) && + // ((Entry + 1) < Limit)) + // { + // Limit = Limit - Entry - 1; // -1 since we count the one we just checked + // Entry = 0; + // continue; + // } + // + // Entry++; + // } + // + // But since we know that this hash table is always 512 in size, let's go ahead + // and make this assumption to make the loops faster. Or even better, lets just + // make sure that the Hash Depth is a power of 2. That way, we can use + // nHashTableEntries - 1 as the mask of what entries are valid - and this allows for any + // nHashTableEntries that is a power of 2. + // + NV_ASSERT(!(pInstMem->nHashTableEntries & (pInstMem->nHashTableEntries - 1))); + + Limit = hash + pInstMem->nHashTableEntries; // loop over whole table + Entry = hash; + for (i = hash; i < Limit; i++) + { + // Mask off high bits of i since we loop the hash table. + Entry = i & (pInstMem->nHashTableEntries - 1); + if (pInstMem->pHashTable[Entry].pContextDma == NULL) + break; + } + + if (pInstMem->pHashTable[Entry].pContextDma != NULL) + { + NV_PRINTF(LEVEL_ERROR, "Display Hash table is FULL!!\n"); + return NV_ERR_TOO_MANY_PRIMARIES; + } + + entryOffset = pInstMem->hashTableBaseAddr + + (Entry * sizeof(DISP_HW_HASH_TABLE_ENTRY)); + + // Add object to the Hash Table. + pInstMem->pHashTable[Entry].pContextDma = pContextDma; + pInstMem->pHashTable[Entry].pDispChannel = pDispChannel; + + dest.pMemDesc = pInstMem->pInstMemDesc; + dest.offset = entryOffset; + + entry.ht_ObjectHandle = handle; + + // Note that we have full 32 bit client id at this point and we only need to tell hw the lower 14 bits + entry.ht_Context = instmemGenerateHashTableData_HAL( + pGpu, + pInstMem, + hClient, + offset, + dispChannelNum); + + NV_ASSERT_OK_OR_RETURN(memmgrMemWrite(pMemoryManager, &dest, &entry, sizeof(entry), + TRANSFER_FLAGS_NONE)); + + return NV_OK; +} + +/*! + * @brief Is the this ContextDma bound to this DispChannel + */ +static NV_STATUS +_instmemProbeHashEntry +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NvU32 dispChannelNum; + NV_STATUS status; + NvU32 hash = 0; + NvU32 limit; + NvU32 i; + + status = kdispGetChannelNum_HAL(pKernelDisplay, pDispChannel->DispClass, pDispChannel->InstanceNumber, &dispChannelNum); + if (status == NV_OK) + { + instmemHashFunc_HAL(pGpu, pInstMem, + RES_GET_CLIENT_HANDLE(pContextDma), + RES_GET_HANDLE(pContextDma), + dispChannelNum, &hash); + } + + // Hash table must be a power of 2 currently + NV_ASSERT(!(pInstMem->nHashTableEntries & (pInstMem->nHashTableEntries - 1))); + + limit = hash + pInstMem->nHashTableEntries; // loop over whole table + + for (i = hash; i < limit; i++) + { + NvU32 htEntry = i & (pInstMem->nHashTableEntries - 1); + + if ((pInstMem->pHashTable[htEntry].pDispChannel == pDispChannel) && + (pInstMem->pHashTable[htEntry].pContextDma == pContextDma)) + { + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +/*! + * @brief Bind the ContextDma to the given Display Channel + */ +NV_STATUS +instmemBindContextDma_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel +) +{ + NvU32 gpuSubDevInst; + NV_STATUS status; + + gpuSubDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + // Production SW requires each context is bound only once + status = _instmemProbeHashEntry(pGpu, pInstMem, pContextDma, pDispChannel); + if (status == NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "The ctx dma (0x%x) has already been bound\n", + RES_GET_HANDLE(pContextDma)); + status = NV_ERR_STATE_IN_USE; + goto exit; + } + + if (pContextDma->InstRefCount[gpuSubDevInst] == 0) + { + // Reserve inst mem space for this ctx dma + status = _instmemReserveContextDma(pGpu, pInstMem, &(pContextDma->Instance[gpuSubDevInst])); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to alloc space in disp inst mem for ctx dma 0x%x\n", + RES_GET_HANDLE(pContextDma)); + goto exit; + } + + // Call into HAL to write inst mem with the ctx dma info + status = instmemCommitContextDma_HAL(pGpu, pInstMem, pContextDma); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to commit ctx dma (0x%x) to inst mem\n", + RES_GET_HANDLE(pContextDma)); + _instmemFreeContextDma(pGpu, pInstMem, pContextDma->Instance[gpuSubDevInst]); + pContextDma->Instance[gpuSubDevInst] = 0; + goto exit; + } + } + + // + // Now add the hash table entry for this ctx dma + // We loop around this call instead of looping at MEM_WR level because we + // also want to propagate the SW hash table. + // + status = _instmemAddHashEntry(pGpu, pInstMem, + pContextDma, + pDispChannel, + pContextDma->Instance[gpuSubDevInst]); + if (status != NV_OK) + { + if (pContextDma->InstRefCount[gpuSubDevInst] == 0) + { + instmemDecommitContextDma_HAL(pGpu, pInstMem, pContextDma); + _instmemFreeContextDma(pGpu, pInstMem, pContextDma->Instance[gpuSubDevInst]); + pContextDma->Instance[gpuSubDevInst] = 0; + } + goto exit; + } + + // We have one more reference to the context DMA in instance memory now. + pContextDma->InstRefCount[gpuSubDevInst]++; + +exit: + + return status; +} + +/*! + * @brief Remove reference to an instance allocation. Free after last reference. + */ +void +_instmemRemoveReference +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma +) +{ + NvU32 gpuSubDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + NV_ASSERT(pContextDma->InstRefCount[gpuSubDevInst]); + if (pContextDma->InstRefCount[gpuSubDevInst]) + { + pContextDma->InstRefCount[gpuSubDevInst]--; + + // Remove DMA object if this is the last binding + if (pContextDma->InstRefCount[gpuSubDevInst] == 0) + { + instmemDecommitContextDma_HAL(pGpu, pInstMem, pContextDma); + _instmemFreeContextDma(pGpu, pInstMem, pContextDma->Instance[gpuSubDevInst]); + pContextDma->Instance[gpuSubDevInst] = 0; + } + } +} + +/*! + * @brief Unbind the ContextDma from the given Display Channel + */ +NV_STATUS +instmemUnbindContextDma_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel +) +{ + NV_STATUS status; + + // If ContextDma is not bound to this subdevice, there is no bookkeeping to do + status = _instmemRemoveHashEntry(pGpu, pInstMem, pContextDma, pDispChannel); + if (status == NV_OK) + { + _instmemRemoveReference(pGpu, pInstMem, pContextDma); + } + + return status; +} + +/*! + * @brief Unbind the ContextDma from all Display channels on the given context + */ +void +instmemUnbindContextDmaFromAllChannels_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma +) +{ + NvU32 htEntry; + + // Check all entries in the hash table + for (htEntry = 0; htEntry < pInstMem->nHashTableEntries; htEntry++) + { + if (pInstMem->pHashTable[htEntry].pContextDma == pContextDma) + { + _instmemClearHashEntry(pGpu, pInstMem, htEntry); + _instmemRemoveReference(pGpu, pInstMem, pContextDma); + } + } + +} + +/*! + * @brief Unbind the ContextDma from all Display channels on the given context + */ +void +instmemUnbindDispChannelContextDmas_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + DispChannel *pDispChannel +) +{ + NvU32 htEntry; + + // Check all entries in the hash table + for (htEntry = 0; htEntry < pInstMem->nHashTableEntries; htEntry++) + { + if (pInstMem->pHashTable[htEntry].pDispChannel == pDispChannel) + { + ContextDma *pContextDma = pInstMem->pHashTable[htEntry].pContextDma; + + _instmemClearHashEntry(pGpu, pInstMem, htEntry); + _instmemRemoveReference(pGpu, pInstMem, pContextDma); + } + } + +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/kern_disp.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/kern_disp.c new file mode 100644 index 0000000..84bcf0d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/kern_disp.c @@ -0,0 +1,933 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel Display Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "resserv/resserv.h" +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" +#include "os/os.h" + +#include "gpu/gpu.h" +#include "gpu/device/device.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/disp/head/kernel_head.h" +#include "gpu/disp/disp_objs.h" +#include "gpu_mgr/gpu_mgr.h" +#include "objtmr.h" +#include "core/locks.h" + +#include "ctrl/ctrl2080.h" + +#include "class/cl5070.h" +#include "class/cl917a.h" +#include "class/cl917b.h" +#include "class/cl917c.h" +#include "class/cl917d.h" +#include "class/cl917e.h" +#include "class/cl927c.h" +#include "class/cl927d.h" +#include "class/cl947d.h" +#include "class/cl957d.h" +#include "class/cl977d.h" +#include "class/cl987d.h" +#include "class/clc37a.h" +#include "class/clc37b.h" +#include "class/clc37d.h" +#include "class/clc37e.h" +#include "class/clc57a.h" +#include "class/clc57b.h" +#include "class/clc57d.h" +#include "class/clc57e.h" +#include "class/clc67a.h" +#include "class/clc67b.h" +#include "class/clc67d.h" +#include "class/clc67e.h" +#include "class/clc77f.h" //NVC77F_ANY_CHANNEL_DMA + +NV_STATUS +kdispConstructEngine_IMPL(OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + ENGDESCRIPTOR engDesc) +{ + NV_STATUS status; + + // + // NOTE: DO NOT call IpVersion _HAL functions in ConstructEngine. + // IP version based _HAL functions can only be used starting StatePreInit. + // Long-term: RM offload initialization will be moved earlier so KernelDisplay + // has the ability to use IP version HAL functions even in construct phase. + // + + // + // Sanity check: the only time KERNEL_DISPLAY module should be enabled + // while DISP is disabled is on KERNEL_ONLY build. + // + NV_ASSERT(IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu) || RMCFG_MODULE_DISP); + + // + // We also need to check if we are in certain configurations which can't + // even attempt a control call to DISP. + // + if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_IS_MISSING)) + return NV_ERR_NOT_SUPPORTED; + + // Create children + pKernelDisplay->pInst = NULL; + status = kdispConstructInstMem_HAL(pKernelDisplay); + if (status != NV_OK) + { + return status; + } + + status = kdispConstructKhead(pKernelDisplay); + + // We defer checking whether DISP has been disabled some other way until + // StateInit, when we can do a control call. + + return status; +} + +void +kdispDestruct_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + // Destroy children + kdispDestructInstMem_HAL(pKernelDisplay); + kdispDestructKhead(pKernelDisplay); +} + +/*! Constructor for DisplayInstanceMemory */ +NV_STATUS +kdispConstructInstMem_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + NV_STATUS status; + DisplayInstanceMemory *pInst; + + status = objCreate(&pInst, pKernelDisplay, DisplayInstanceMemory); + if (status != NV_OK) + { + return status; + } + + pKernelDisplay->pInst = pInst; + return NV_OK; +} + +/*! Destructor for DisplayInstanceMemory */ +void +kdispDestructInstMem_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + if (pKernelDisplay->pInst != NULL) + { + objDelete(pKernelDisplay->pInst); + pKernelDisplay->pInst = NULL; + } +} + +/*! Constructor for Kernel head */ +NV_STATUS +kdispConstructKhead_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + NV_STATUS status; + KernelHead *pKernelHead; + NvU8 headIdx; + + for (headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++) + { + status = objCreate(&pKernelHead, pKernelDisplay, KernelHead); + if (status != NV_OK) + { + return status; + } + + pKernelDisplay->pKernelHead[headIdx] = pKernelHead; + pKernelDisplay->pKernelHead[headIdx]->PublicId = headIdx; + } + return NV_OK; +} + +/*! Destructor for Kernel head */ +void +kdispDestructKhead_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + NvU8 headIdx; + + for (headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++) + { + if (pKernelDisplay->pKernelHead[headIdx] != NULL) + { + objDelete(pKernelDisplay->pKernelHead[headIdx]); + pKernelDisplay->pKernelHead[headIdx] = NULL; + } + } +} + +NV_STATUS +kdispStatePreInitLocked_IMPL(OBJGPU *pGpu, + KernelDisplay *pKernelDisplay) +{ + NV_STATUS status; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 hClient = pGpu->hInternalClient; + NvU32 hSubdevice = pGpu->hInternalSubdevice; + NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS ctrlParams; + + status = pRmApi->Control(pRmApi, hClient, hSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_IP_VERSION, + &ctrlParams, sizeof(ctrlParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Failed to read display IP version (FUSE disabled), status=0x%x\n", + status); + return status; + } + + // NOTE: KernelDisplay IpVersion _HAL functions can only be called after this point. + status = gpuInitDispIpHal(pGpu, ctrlParams.ipVersion); + + return status; +} + +NV_STATUS +kdispStateInitLocked_IMPL(OBJGPU *pGpu, + KernelDisplay *pKernelDisplay) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status = NV_OK; + KernelDisplayStaticInfo *pStaticInfo; + + pStaticInfo = portMemAllocNonPaged(sizeof(KernelDisplayStaticInfo)); + if (pStaticInfo == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate KernelDisplayStaticInfo"); + status = NV_ERR_NO_MEMORY; + goto exit; + } + portMemSet(pStaticInfo, 0, sizeof(*pStaticInfo)); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, + pStaticInfo, sizeof(*pStaticInfo)), + exit); + + pKernelDisplay->pStaticInfo = pStaticInfo; + pStaticInfo = NULL; + + if (pKernelDisplay->pInst != NULL) + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemStateInitLocked(pGpu, pKernelDisplay->pInst), exit); + } + + if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_IMP_ENABLE)) + { + // NOTE: Fills IMP parameters and populate those to disp object in Tegra + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kdispImportImpData_HAL(pKernelDisplay), exit); + } + +exit: + portMemFree(pStaticInfo); + + return status; +} + +void +kdispStateDestroy_IMPL(OBJGPU *pGpu, + KernelDisplay *pKernelDisplay) +{ + if (pKernelDisplay->pInst != NULL) + { + instmemStateDestroy(pGpu, pKernelDisplay->pInst); + } + + portMemFree((void*) pKernelDisplay->pStaticInfo); + pKernelDisplay->pStaticInfo = NULL; +} + +NV_STATUS +kdispStateLoad_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + + if (pKernelDisplay->pInst != NULL) + status = instmemStateLoad(pGpu, pKernelDisplay->pInst, flags); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) + { + if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_IMP_ENABLE)) + { + // NOTE: Fills IMP parameters and populate those to disp object in Tegra + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kdispImportImpData_HAL(pKernelDisplay), exit); + } + } + +exit: + return status; +} + +NV_STATUS +kdispStateUnload_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + + if (pKernelDisplay->pInst != NULL) + status = instmemStateUnload(pGpu, pKernelDisplay->pInst, flags); + + return status; +} + +/*! Get and Populate IMP init data for Tegra */ +NV_STATUS +kdispImportImpData_IMPL(KernelDisplay *pKernelDisplay) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelDisplay); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 hClient = pGpu->hInternalClient; + NvU32 hSubdevice = pGpu->hInternalSubdevice; + NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS params; + NvU32 simulationMode; + + // + // FPGA has different latency characteristics, and the current code latency + // models that IMP uses for silicon will not work for FPGA, so keep IMP + // disabled by default on Tegra FPGA. + // + simulationMode = osGetSimulationMode(); + if (simulationMode == NV_SIM_MODE_TEGRA_FPGA) + { + pKernelDisplay->setProperty(pDisp, PDB_PROP_KDISP_IMP_ENABLE, NV_FALSE); + return NV_OK; + } + + NV_ASSERT_OK_OR_RETURN(osTegraSocGetImpImportData(¶ms.tegraImpImportData)); + + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, hClient, hSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_SET_IMP_INIT_INFO, + ¶ms, sizeof(params))); + + return NV_OK; +} + +/*! Get internal enum equivalent of the HW class number */ +NV_STATUS +kdispGetIntChnClsForHwCls_IMPL +( + KernelDisplay *pKernelDisplay, + NvU32 hwClass, + DISPCHNCLASS *pDispChnClass +) +{ + // sanity check + if (pDispChnClass == NULL) + return NV_ERR_INVALID_ARGUMENT; + + switch (hwClass) + { + case NV917A_CURSOR_CHANNEL_PIO: + case NVC37A_CURSOR_IMM_CHANNEL_PIO: + case NVC57A_CURSOR_IMM_CHANNEL_PIO: + case NVC67A_CURSOR_IMM_CHANNEL_PIO: + *pDispChnClass = dispChnClass_Curs; + break; + + case NV917B_OVERLAY_IMM_CHANNEL_PIO: + *pDispChnClass = dispChnClass_Ovim; + break; + + case NV917C_BASE_CHANNEL_DMA: + case NV927C_BASE_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Base; + break; + + case NV917D_CORE_CHANNEL_DMA: + case NV927D_CORE_CHANNEL_DMA: + case NV947D_CORE_CHANNEL_DMA: + case NV957D_CORE_CHANNEL_DMA: + case NV977D_CORE_CHANNEL_DMA: + case NV987D_CORE_CHANNEL_DMA: + case NVC37D_CORE_CHANNEL_DMA: + case NVC57D_CORE_CHANNEL_DMA: + case NVC67D_CORE_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Core; + break; + + case NV917E_OVERLAY_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Ovly; + break; + + case NVC37B_WINDOW_IMM_CHANNEL_DMA: + case NVC57B_WINDOW_IMM_CHANNEL_DMA: + case NVC67B_WINDOW_IMM_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Winim; + break; + + case NVC37E_WINDOW_CHANNEL_DMA: + case NVC57E_WINDOW_CHANNEL_DMA: + case NVC67E_WINDOW_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Win; + break; + + case NVC77F_ANY_CHANNEL_DMA: + // Assert incase of physical RM, Any channel is kernel only channel. + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_INVALID_CHANNEL); + *pDispChnClass = dispChnClass_Any; + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Unknown channel class %x\n", hwClass); + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +void +kdispNotifyEvent_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 notifyIndex, + void *pNotifyParams, + NvU32 notifyParamsSize, + NvV32 info32, + NvV16 info16 +) +{ + PEVENTNOTIFICATION pEventNotifications; + NvU32 *pNotifyActions; + NvU32 disableCmd, singleCmd; + NvU32 subDeviceInst; + RS_SHARE_ITERATOR it = serverutilShareIter(classId(NotifShare)); + + // search notifiers with events hooked up for this gpu + while (serverutilShareIterNext(&it)) + { + RsShared *pShared = it.pShared; + DisplayApi *pDisplayApi; + INotifier *pNotifier; + Device *pDevice; + NotifShare *pNotifierShare = dynamicCast(pShared, NotifShare); + + if ((pNotifierShare == NULL) || (pNotifierShare->pNotifier == NULL)) + continue; + + pNotifier = pNotifierShare->pNotifier; + pDisplayApi = dynamicCast(pNotifier, DisplayApi); + + // Only notify matching GPUs + if (pDisplayApi == NULL) + continue; + + pDevice = dynamicCast(RES_GET_REF(pDisplayApi)->pParentRef->pResource, Device); + + if (GPU_RES_GET_GPU(pDevice) != pGpu) + continue; + + gpuSetThreadBcState(GPU_RES_GET_GPU(pDevice), pDisplayApi->bBcResource); + + disableCmd = NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + singleCmd = NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + + // get notify actions list + subDeviceInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + pNotifyActions = pDisplayApi->pNotifyActions[subDeviceInst]; + if (pNotifyActions == NULL) + { + continue; + } + + // get event list + pEventNotifications = inotifyGetNotificationList(pNotifier); + if (pEventNotifications == NULL) + { + continue; + } + + // skip if client not "listening" to events of this type + if (pNotifyActions[notifyIndex] == disableCmd) + { + continue; + } + + // ping events bound to subdevice associated with pGpu + osEventNotification(pGpu, pEventNotifications, + (notifyIndex | OS_EVENT_NOTIFICATION_INDEX_MATCH_SUBDEV), + pNotifyParams, notifyParamsSize); + + // reset if single shot notify action + if (pNotifyActions[notifyIndex] == singleCmd) + { + pNotifyActions[notifyIndex] = disableCmd; + } + } +} + +void +kdispSetWarPurgeSatellitesOnCoreFree_IMPL +( + KernelDisplay *pKernelDisplay, + NvBool value +) +{ + pKernelDisplay->bWarPurgeSatellitesOnCoreFree = value; +} + +NV_STATUS +kdispRegisterRgLineCallback_IMPL +( + KernelDisplay *pKernelDisplay, + RgLineCallback *pRgLineCallback, + NvU32 head, + NvU32 rgIntrLine, + NvBool bEnable +) +{ + NV_ASSERT_OR_RETURN(head < OBJ_MAX_HEADS, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rgIntrLine < MAX_RG_LINE_CALLBACKS_PER_HEAD, NV_ERR_INVALID_ARGUMENT); + + RgLineCallback **slot = &pKernelDisplay->rgLineCallbackPerHead[head][rgIntrLine]; + + if (bEnable && *slot == NULL) + { + *slot = pRgLineCallback; + } + else if (!bEnable && *slot == pRgLineCallback) + { + *slot = NULL; + } + else + { + // + // OBJDISP is the authority for *allocating* these "slots"; + // KernelDisplay trusts it as an allocator. + // If we try to register a callback in an existing slot, or free an + // empty slot, it means OBJDISP has created conflicting allocations or + // has allowed a double-free. (Or RgLineCallback has provided invalid + // parameters.) + // + NV_ASSERT_FAILED("Invalid KernelDisplay state for RgLineCallback"); + return NV_ERR_INVALID_STATE; + } + + return NV_OK; +} + +void +kdispInvokeRgLineCallback_KERNEL +( + KernelDisplay *pKernelDisplay, + NvU32 head, + NvU32 rgIntrLine, + NvBool bIsIrqlIsr +) +{ + NV_ASSERT_OR_RETURN_VOID(head < OBJ_MAX_HEADS); + NV_ASSERT_OR_RETURN_VOID(rgIntrLine < MAX_RG_LINE_CALLBACKS_PER_HEAD); + +} + +#define HOTPLUG_PROFILE 0 + +#if HOTPLUG_PROFILE + + #define ISR_TSTAMP_SIZE 18000 /* 5 minutes (5*60Hz*60)*/ + + NvU32 timeStampIndexISR = ISR_TSTAMP_SIZE-1; + + tmr_tstamp_u timeStampStartISR[ISR_TSTAMP_SIZE]; + tmr_tstamp_u timeStampDeltaISR[ISR_TSTAMP_SIZE]; + +#endif + +void +kdispServiceVblank_KERNEL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 headmask, + NvU32 state, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 pending, check_pending, pending_checked; + NvU32 Head; + NvU32 maskNonEmptyQueues[OBJ_MAX_HEADS]; // array of masks of VBLANK_STATE_PROCESS_XXX_LATENCY bits, indicating which queues are non-empty + NvU32 unionNonEmptyQueues = 0; // mask of VBLANK_STATE_PROCESS_XXX_LATENCY bits, union of queue states of all heads w/ pending vblank ints + NvU32 Count = 0; + NvU32 i, skippedcallbacks; + NvU32 maskCallbacksStillPending = 0; + KernelHead *pKernelHead = NULL; + +#if HOTPLUG_PROFILE + OBJTMR *pTmr; + pTmr = GPU_GET_TIMER(pGpu); + if (++timeStampIndexISR >= ISR_TSTAMP_SIZE) + timeStampIndexISR = 0; + + tmrGetCurrentTime(pTmr, &timeStampStartISR[timeStampIndexISR].time32.hi, &timeStampStartISR[timeStampIndexISR].time32.lo); + + // For the ISR we want to know how much time since the last ISR. + if (timeStampIndexISR) + { + NvU64 temp64; + + temp64 = timeStampStartISR[timeStampIndexISR].time64; + temp64 -= timeStampStartISR[timeStampIndexISR-1].time64; + + timeStampDeltaISR[timeStampIndexISR].time64 = temp64; + } +#endif + + + // If the caller failed to spec which queue, figure they wanted all of them + if (!(state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) ) + { + state |= VBLANK_STATE_PROCESS_ALL_CALLBACKS; + } + + // If the headmask is 0, we should process all heads + if (headmask == 0) + { + headmask = 0xFFFFFFFF; + } + + // + // If we are being asked to process the callbacks now, regardless of the true irqspending, + // we force the pending mask to the head mask passed in. + // + if (state & VBLANK_STATE_PROCESS_IMMEDIATE) + { + pending = headmask; + } + else + { + // We're here because at least one of the PCRTC bits MAY be pending. + pending = kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, pThreadState); + } + + // No sense in doing anything if there is nothing pending. + if (pending == 0) + { + return; + } + + // + // We want to check for pending service now and then we check again each + // time through the loop. Keep these seperate. + // + check_pending = pending; + + // We have not checked anything yet + pending_checked = 0; + + // Start with head 0 + Head = 0; + + // + // We keep scanning all supported heads, and if we have something pending, + // check the associated queues + // + while(pending_checked != pending) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head); + + // Move on if this crtc's interrupt isn't pending... + if ( (headmask & check_pending & ~pending_checked) & NVBIT(Head)) + { + // Track that we have now checked this head + pending_checked |= NVBIT(Head); + + // If our queues are empty, we can bail early + maskNonEmptyQueues[Head] = kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, state, NULL); + unionNonEmptyQueues |= maskNonEmptyQueues[Head]; + + // This function will check to see if there are callback states in which the + // caller has skipped execution. + skippedcallbacks = ((state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) ^ VBLANK_STATE_PROCESS_ALL_CALLBACKS); + skippedcallbacks |= (state & (VBLANK_STATE_PROCESS_CALLED_FROM_ISR | VBLANK_STATE_PROCESS_IMMEDIATE)); + + // now lets see if there's callbacks pending on the skipped callbacks + maskCallbacksStillPending |= NVBIT(Head) * !!kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, skippedcallbacks, NULL); + } + + // Don't check for new interrupts if we are in immediate mode + if (!(state & VBLANK_STATE_PROCESS_IMMEDIATE) ) + { + pending = kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, pThreadState); + } + + // if there was a change in the pending state, we should recheck everything + if (check_pending != pending) + { + // We need to recheck heads that were not pending before + check_pending = pending; + Head = 0; + } + else + { + // Nothing changed, so move on to the next head + Head++; + } + + // Make sure we dont waste time on heads that dont exist + if (Head >= OBJ_MAX_HEADS) + { + break; + } + } + + if (state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR) + { + // store off which heads have pending vblank interrupts, for comparison at the next DPC time. + pKernelDisplay->isrVblankHeads = pending; + + } + + // increment the per-head vblank total counter, for any head with a pending vblank intr + for (Head=0; Head < OBJ_MAX_HEADS; Head++) + { + // Move on if this crtc's interrupt isn't pending... + if ((pending & NVBIT(Head)) == 0) + { + continue; + } + + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head); + // + // increment vblank counters, as appropriate. + // + + // Track the fact that we passed through here. This keeps the RC manager happy. + Count = kheadGetVblankTotalCounter_HAL(pKernelHead) + 1; + kheadSetVblankTotalCounter_HAL(pKernelHead, Count); + + // + // Update the vblank counter if we are single chip or multichip master. + // We now have two queues, so we need to have two vblank counters. + // + + // did they ask for processing of low-latency work? + if (state & VBLANK_STATE_PROCESS_LOW_LATENCY /* & maskNonEmptyQueues[Head]*/) + { + // + // don't let the DPC thread increment the low-latency counter. + // otherwise, the counter will frequently increment at double the + // expected rate, breaking things like swapInterval. + // + // XXX actually, there is one case where it would be OK for the DPC + // thread to increment this counter: if the DPC thread could ascertain + // that 'pending & NVBIT(Head)' represented a new interrupt event, and + // not simply the one that the ISR left uncleared in PCRTC_INTR_0, for + // the purpose of causing this DPC thread to get queued. + // Not sure how to do that. + // + if ( !(state & VBLANK_STATE_PROCESS_CALLED_FROM_DPC) || (pending & NVBIT(Head) & ~pKernelDisplay->isrVblankHeads) ) + { + // either we were called from the ISR, or vblank is asserted in DPC when it wasn't in the ISR + + // low latency queue requested, and this isn't a DPC thread. + Count = kheadGetVblankLowLatencyCounter_HAL(pKernelHead) + 1; + kheadSetVblankLowLatencyCounter_HAL(pKernelHead, Count); + } + } + + // did they ask for processing of normal-latency work? + if (state & VBLANK_STATE_PROCESS_NORMAL_LATENCY /* & maskNonEmptyQueues[Head]*/) + { + // processing of the normal latency queue requested + Count = kheadGetVblankNormLatencyCounter_HAL(pKernelHead) + 1; + kheadSetVblankNormLatencyCounter_HAL(pKernelHead, Count); + } + } + + // + // If we have nothing to process (no work to do in queue), + // we can bail early. We got here for some reason, so make + // sure we clear the interrupts. + // + + if (!unionNonEmptyQueues) + { + // all queues (belonging to heads with pending vblank ints) are empty. + kheadResetPendingVblankForKernel_HAL(pGpu, pKernelHead, pThreadState); + return; + } + + // + // Although we have separate handlers for each head, attempt to process all + // interrupting heads now. What about DPCs schedule already? + // + for (Head = 0; Head < OBJ_MAX_HEADS; Head++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head); + // Move on if this crtc's interrupt isn't pending... + if ((pending & NVBIT(Head)) == 0) + { + continue; + } + + // Process the callback list for this Head... + kheadProcessVblankCallbacks(pGpu, pKernelHead, state); + } + + // + // if there are still callbacks pending, and we are in an ISR, + // then don't clear PCRTC_INTR; XXXar why would we *ever* want + // to clear PCRTC_INTR if there are still things pending? + // + if ( (maskCallbacksStillPending) && + (state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR) ) + { + // + // there are still callbacks pending; don't clear + // PCRTC_INTR, yet. The expectation is that the OS layer + // will see that interrupts are still pending and queue a + // DPC/BottomHalf/whatever to service the rest of the + // vblank callback queues + // + for(i=0; i< OBJ_MAX_HEADS; i++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i); + kheadResetPendingVblankForKernel_HAL(pGpu, pKernelHead, pThreadState); + } + } + else + { + // reset the VBlank intrs we've handled, and don't reset the vblank intrs we haven't. + for(i=0; i< OBJ_MAX_HEADS; i++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i); + if (pending & NVBIT(i) & ~maskCallbacksStillPending) + { + kheadResetPendingVblank_HAL(pGpu, pKernelHead, pThreadState); + } + } + } + + return; +} + +NvU32 kdispReadPendingVblank_KERNEL(OBJGPU *pGpu, KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *pThreadState) +{ + KernelHead *pKernelHead; + NvU32 headIntrMask; + NvU32 pending = 0; + NvU8 headIdx; + + for(headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, headIdx); + headIntrMask = headIntr_none; + pending |= kheadReadPendingVblank_HAL(pGpu, pKernelHead, headIntrMask); + } + return pending; +} + +/*! + * @brief Route modeset start/end notification to kernel RM + * + * Physical RM is expected to send a "start" notification at the beginning of + * every display modeset (supervisor interrupt sequence), and an "end" + * notification at the end. However, if physical RM detects back-to-back + * modesets, the intervening "end" notification MAY be skipped; in this case, + * the "start" notification for the next modeset serves as the "end notification + * for the previous modeset. + * + * Kernel RM will use the notification to update the BW allocation for display. + * The ICC call that is required to update the BW allocation cannot be made + * from physical RM. + * + * @param[in] pKernelDisplay KernelDisplay pointer + * @param[in] bModesetStart NV_TRUE -> start of modeset; + * NV_FALSE -> end of modeset + * @param[in] minRequiredIsoBandwidthKBPS Min ISO BW required by IMP (KB/sec) + * @param[in] minRequiredFloorBandwidthKBPS Min dramclk freq * pipe width (KB/sec) + */ +void +kdispInvokeDisplayModesetCallback_KERNEL +( + KernelDisplay *pKernelDisplay, + NvBool bModesetStart, + NvU32 minRequiredIsoBandwidthKBPS, + NvU32 minRequiredFloorBandwidthKBPS +) +{ + NV_STATUS status; + + NV_PRINTF(LEVEL_INFO, + "Kernel RM received \"%s of modeset\" notification " + "(minRequiredIsoBandwidthKBPS = %u, minRequiredFloorBandwidthKBPS = %u)\n", + bModesetStart ? "start" : "end", + minRequiredIsoBandwidthKBPS, + minRequiredFloorBandwidthKBPS); + + OBJGPU *pGpu = ENG_GET_GPU(pKernelDisplay); + status = + kdispArbAndAllocDisplayBandwidth_HAL(pGpu, + pKernelDisplay, + DISPLAY_ICC_BW_CLIENT_RM, + minRequiredIsoBandwidthKBPS, + minRequiredFloorBandwidthKBPS); + // + // The modeset cannot be aborted, so, if there is an error, no recovery + // is possible. + // + NV_ASSERT_OK(status); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/eng_state.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/eng_state.c new file mode 100644 index 0000000..407634c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/eng_state.c @@ -0,0 +1,543 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "gpu/eng_state.h" +#include "core/hal.h" +#include "core/info_block.h" +#include "core/locks.h" + +// Function pointer wrapper +#define engstateStatePreInitUnlocked_Fnptr(pEngstate) pEngstate->__engstateStatePreInitUnlocked__ +#define engstateStateInitUnlocked_Fnptr(pEngstate) pEngstate->__engstateStateInitUnlocked__ + +NV_STATUS +engstateConstructBase_IMPL +( + OBJENGSTATE *pEngstate, + OBJGPU *pGpu, + ENGDESCRIPTOR engDesc +) +{ + pEngstate->pGpu = pGpu; + pEngstate->engDesc = engDesc; + pEngstate->currentState = ENGSTATE_STATE_UNDEFINED; + +#if NV_PRINTF_STRINGS_ALLOWED + nvDbgSnprintf(pEngstate->name, sizeof(pEngstate->name), "%s:%d", + objGetClassName(pEngstate), ENGDESC_FIELD(pEngstate->engDesc, _INST)); +#endif + return NV_OK; +} + +void +engstateLogStateTransitionPre_IMPL +( + OBJENGSTATE *pEngstate, + ENGSTATE_STATE targetState, + ENGSTATE_TRANSITION_DATA *pData +) +{ + ENGSTATE_STATS *stats = &pEngstate->stats[targetState]; + NV_ASSERT_OR_RETURN_VOID(targetState < ENGSTATE_STATE_COUNT); + + // First call, init + portMemSet(stats, 0, sizeof(ENGSTATE_STATS)); + portMemSet(pData, 0, sizeof(ENGSTATE_TRANSITION_DATA)); + osGetPerformanceCounter(&pData->transitionStartTimeNs); + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetActiveStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS memstats = {0}; + portMemExTrackingGetActiveStats(NULL, &memstats); + + pData->memoryAllocCount = (NvS64) memstats.numAllocations; + pData->memoryAllocSize = (NvS64) memstats.usefulSize; + } +#endif +} + +void +engstateLogStateTransitionPost_IMPL +( + OBJENGSTATE *pEngstate, + ENGSTATE_STATE targetState, + ENGSTATE_TRANSITION_DATA *pData +) +{ + ENGSTATE_STATS *stats = &pEngstate->stats[targetState]; + NvU64 endTimeNs; + + NV_ASSERT_OR_RETURN_VOID(targetState < ENGSTATE_STATE_COUNT); + + osGetPerformanceCounter(&endTimeNs); + stats->transitionTimeUs = (endTimeNs - pData->transitionStartTimeNs) / 1000; + +#if NV_PRINTF_STRINGS_ALLOWED + const char *stateStrings[ENGSTATE_STATE_COUNT] = + { + "Undefined", + "Construct", + "Pre-Init", + "Init", + "Pre-Load", + "Load", + "Post-Load", + "Pre-Unload", + "Unload", + "Post-Unload", + "Destroy" + }; + ct_assert(ENGSTATE_STATE_COUNT == 11); + + NV_PRINTF(LEVEL_INFO, + "Engine %s state change: %s -> %s, took %uus\n", + engstateGetName(pEngstate), + stateStrings[pEngstate->currentState], stateStrings[targetState], + stats->transitionTimeUs); +#else + NV_PRINTF(LEVEL_INFO, + "Engine 0x%06x:%d state change: %d -> %d, took %uus\n", + objGetClassId(pEngstate), ENGDESC_FIELD(pEngstate->engDesc, _INST), + pEngstate->currentState, targetState, + stats->transitionTimeUs); +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetActiveStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS memstats = {0}; + portMemExTrackingGetActiveStats(NULL, &memstats); + + stats->memoryAllocCount = (NvS32)((NvS64)memstats.numAllocations - pData->memoryAllocCount); + stats->memoryAllocSize = (NvS32)((NvS64)memstats.usefulSize - pData->memoryAllocSize); + + NV_PRINTF(LEVEL_INFO, " Memory usage change: %d allocations, %d bytes\n", + stats->memoryAllocCount, stats->memoryAllocSize); + } +#endif + + pEngstate->currentState = targetState; +} + +const char * +engstateGetName_IMPL +( + OBJENGSTATE *pEngstate +) +{ +#if NV_PRINTF_STRINGS_ALLOWED + return pEngstate->name; +#else + return ""; +#endif +} + +/*! + * @brief generic constructor + */ +NV_STATUS +engstateConstructEngine_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + ENGDESCRIPTOR engDesc +) +{ + return NV_OK; +} + +/*! + * @brief destructor + */ +void +engstateDestruct_IMPL +( + OBJENGSTATE *pEngstate +) +{ + portMemFree(pEngstate->pOriginalTunableState); + pEngstate->pOriginalTunableState = NULL; +} + +/*! + * @brief init missing engine + */ +void +engstateInitMissing_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate +) +{ + return; +} + +/*! + * @brief Wrapper around StatePreInitUnlocked and StatePreInitLocked + */ +NV_STATUS +engstateStatePreInit_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + LOCK_ASSERT_AND_RETURN(rmGpuLockIsOwner()); + + /* Check if we overrode the unlocked variant */ + if ((engstateStatePreInitUnlocked_Fnptr(pEngstate) != + engstateStatePreInitUnlocked_IMPL)) + { + NV_STATUS status, lockStatus; + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + status = engstateStatePreInitUnlocked(pGpu, pEngstate); + + lockStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_INIT); + + if (status == NV_OK) + status = lockStatus; + if (status != NV_OK) + return status; + } + + return engstateStatePreInitLocked(pGpu, pEngstate); +} + +/*! + * @brief state pre-init locked + */ +NV_STATUS +engstateStatePreInitLocked_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + return NV_OK; +} + +/*! + * @brief state pre-init unlocked + */ +NV_STATUS +engstateStatePreInitUnlocked_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + return NV_OK; +} + +/*! + * @brief Wrapper around StateInitUnlocked and StateInitLocked + */ +NV_STATUS +engstateStateInit_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + LOCK_ASSERT_AND_RETURN(rmGpuLockIsOwner()); + + /* Check if we overrode the unlocked variant */ + if (engstateStateInitUnlocked_Fnptr(pEngstate) != engstateStateInitUnlocked_IMPL) + { + NV_STATUS status, lockStatus; + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + status = engstateStateInitUnlocked(pGpu, pEngstate); + lockStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_INIT); + + if (status == NV_OK) + status = lockStatus; + if (status != NV_OK) + return status; + } + + return engstateStateInitLocked(pGpu, pEngstate); +} + +/*! + * @brief state init locked + */ +NV_STATUS +engstateStateInitLocked_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + return NV_OK; +} + +/*! + * @brief state init unlocked + */ +NV_STATUS +engstateStateInitUnlocked_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + return NV_OK; +} + +/*! + * @brief state pre-load + */ +NV_STATUS +engstateStatePreLoad_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state load + */ +NV_STATUS +engstateStateLoad_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state post-load + */ +NV_STATUS +engstateStatePostLoad_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state unload + */ +NV_STATUS +engstateStateUnload_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state pre-unload + */ +NV_STATUS +engstateStatePreUnload_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state post-unload + */ +NV_STATUS +engstateStatePostUnload_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state destroy + */ +void +engstateStateDestroy_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate +) +{ +} + +/*! + * @brief allocates a tunable state structure + * + * @param[in] pGpu + * @param[in] pEngstate + * @param[out] ppTunableState + */ +NV_STATUS +engstateAllocTunableState_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + void **ppTunableState +) +{ + *ppTunableState = NULL; + return NV_OK; +} + +/*! + * @brief frees a tunable state structure + * + * @param[in] pGpu + * @param[in] pEngstate + * @param[in] pTunableState + */ +void +engstateFreeTunableState_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + void *pTunableState +) +{ + portMemFree(pTunableState); +} + +/*! + * @brief fills pTunableState with the current state + * + * @param[in] pGpu + * @param[in] pEngstate + * @param[out] pTunableState + */ +NV_STATUS +engstateGetTunableState_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + void *pTunableState +) +{ + return NV_OK; +} + +/*! + * @brief sets the current state to values in pTunableState + * + * @param[in] pGpu + * @param[in,out] pEngstate + * @param[in] pTunableState + */ +NV_STATUS +engstateSetTunableState_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + void *pTunableState +) +{ + return NV_OK; +} + +/*! + * @brief modifies pTunableState to be compatible with pEngstate->pOriginalTunableState + * + * @param[in] pGpu + * @param[in] pEngstate + * @param[in,out] pTunableState + */ +NV_STATUS +engstateReconcileTunableState_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + void *pTunableState +) +{ + return NV_OK; +} + +/*! + * @brief returns NV_ERR_GENERIC if two tunable states are incompatible + * + * @param[in] pGpu + * @param[in] pEngstate + * @param[in] pTunables1 + * @param[in] pTunables2 + */ +NV_STATUS +engstateCompareTunableState_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + void *pTunables1, + void *pTunables2 +) +{ + return NV_OK; +} + +/*! + * @brief returns the ENGDESCRIPTOR associated with this ENGSTATE + * + * @param[in] pEngstate + */ +ENGDESCRIPTOR +engstateGetDescriptor_IMPL +( + OBJENGSTATE *pEngstate +) +{ + return pEngstate->engDesc; +} + +/*! + * @brief checks for presence of the hardware associated with this ENGSTATE + * + * @param[in] pGpu + * @param[in] pEngstate + */ +NvBool +engstateIsPresent_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate +) +{ + NV_ASSERT(pEngstate); + return NV_TRUE; +} + + +/*! + * @brief returns the FIFO associated with this ENGSTATE + * + * @param[in] pEngstate + */ +OBJFIFO * +engstateGetFifo_IMPL +( + OBJENGSTATE *pEngstate +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pEngstate); + + return GPU_GET_FIFO(pGpu); +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu.c new file mode 100644 index 0000000..55fa991 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu.c @@ -0,0 +1,3725 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief HW State Routines: System Object Function Definitions. + */ + + +#include "lib/base_utils.h" +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/eng_desc.h" +#include "nv_ref.h" +#include "os/os.h" +#include "nvrm_registry.h" +#include "gpu_mgr/gpu_mgr.h" +#include "core/thread_state.h" +#include "core/locks.h" +#include "diagnostics/tracer.h" +#include "rmapi/client_resource.h" +#include "diagnostics/journal.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi_utils.h" +#include "core/hal_mgr.h" +#include "vgpu/rpc.h" + +#include + +#include "g_odb.h" + +typedef struct GPUCHILDINFO *PGPUCHILDINFO; +typedef struct GPUCHILDINFO GPUCHILDINFO; + +typedef struct GPUCHILDTYPE *PGPUCHILDTYPE; +typedef struct GPUCHILDTYPE GPUCHILDTYPE; + +#define RMTRACE_ENGINE_PROFILE_EVENT(EventName, EngineId, ReadCount, WriteCount) \ +{ \ + RMTRACE_PROBE4(generic, marker, \ + NvU32, EngineId, sizeof(NvU32), \ + char*, EventName, sizeof(EventName), \ + NvU32, ReadCount, sizeof(NvU32), \ + NvU32, WriteCount, sizeof(NvU32)); \ + } + +// Public interface functions + +static NV_STATUS gpuRemoveMissingEngines(OBJGPU *); + +// local static function +static NV_STATUS gpuCreateChildObjects(OBJGPU *, NvBool); +static NV_STATUS gpuStatePreLoad(OBJGPU *, NvU32); +static NV_STATUS gpuStatePostLoad(OBJGPU *, NvU32); +static NV_STATUS gpuStatePreUnload(OBJGPU *, NvU32); +static NV_STATUS gpuStatePostUnload(OBJGPU *, NvU32); +static void gpuXlateHalImplToArchImpl(OBJGPU *, HAL_IMPLEMENTATION, NvU32 *, NvU32 *); +static NvBool gpuSatisfiesTemporalOrder(OBJGPU *, HAL_IMPLEMENTATION, NvU32, NvU32); +static NvBool gpuSatisfiesTemporalOrderMaskRev(OBJGPU *, HAL_IMPLEMENTATION, NvU32, NvU32, NvU32); +static NvBool gpuIsT124ImplementationOrBetter(OBJGPU *); +static NvBool gpuShouldCreateObject(PGPUCHILDINFO, PENGDESCRIPTOR, NvU32); + +static void gpuDestroyMissingEngine(OBJGPU *, OBJENGSTATE *); +static void gpuRemoveMissingEngineClasses(OBJGPU *, NvU32); + +static NV_STATUS _gpuCreateEngineOrderList(OBJGPU *pGpu); +static void _gpuFreeEngineOrderList(OBJGPU *pGpu); + + +static void _gpuInitPciHandle(OBJGPU *pGpu); +static void _gpuInitPhysicalRmApi(OBJGPU *pGpu); +static NV_STATUS _gpuAllocateInternalObjects(OBJGPU *pGpu); +static void _gpuFreeInternalObjects(OBJGPU *pGpu); + +typedef struct +{ + NvS32 childOrderIndex; + NvS32 instanceID; + NvU32 flags; + NvBool bStarted; +} ENGLIST_ITER, *PENGLIST_ITER; + +static ENGLIST_ITER gpuGetEngineOrderListIter(OBJGPU *pGpu, NvU32 flags); +static NvBool gpuGetNextInEngineOrderList(OBJGPU *pGpu, ENGLIST_ITER *pIt, PENGDESCRIPTOR pEngDesc); + +static inline void _setPlatformNoHostbridgeDetect(NvBool bValue) +{ +} + +// Forward declare all the class definitions so that we don't need to pull in all the headers +#define GPU_CHILD(className, accessorName, numInstances, bConstructEarly, bAlwaysCreate, gpuField) \ + extern const struct NVOC_CLASS_DEF NV_CONCATENATE(__nvoc_class_def_, className); + +#include "gpu/gpu_child_list.h" + + +// Describes a child type (e.g.: classId(OBJCE)) +struct GPUCHILDTYPE +{ + NvBool bConstructEarly; // bConstructEarly objects are created in a separate step. FUSE must be created + // before BIF since we need to know the OPSB fuse value for enabling/disabling + // certain features in bifInitRegistryOverrides + NvBool bAlwaysCreate; + NvU32 instances; + NvU32 gpuChildPtrOffset; + const NVOC_CLASS_INFO *pClassInfo; // NULL if engine is disabled by chip-config +}; + +// List of all possible GPU offspring +static GPUCHILDTYPE gpuChildTypeList[] = +{ + #define GPU_CHILD(className, accessorName, numInstances, bConstructEarly, bAlwaysCreate, gpuField) \ + { bConstructEarly, bAlwaysCreate, numInstances, NV_OFFSETOF(OBJGPU, gpuField), classInfo(className) }, + + #include "gpu/gpu_child_list.h" +}; + +// Describes a child instance (e.g.: classId(OBJCE) instanceID #1) +struct GPUCHILDINFO +{ + NvBool bAlwaysCreate; + NvBool bConstructEarly; + ENGDESCRIPTOR engDesc; + NvU32 gpuChildPtrOffset; + const NVOC_CLASS_INFO *pClassInfo; + GPUCHILDTYPE *pChildType; +}; + +static PGPUCHILDTYPE gpuGetChildType(NVOC_CLASS_ID classId); +static NV_STATUS gpuGetChildInfo(NVOC_CLASS_ID classId, NvU32 instanceID, PGPUCHILDINFO pChildInfoOut); +static Dynamic **gpuGetChildPtr(OBJGPU *pGpu, NvU32 gpuChildPtrOffset); + +#define GPU_NUM_CHILD_TYPES \ + ((sizeof(gpuChildTypeList) / sizeof(GPUCHILDTYPE))) + +/*! + * GFID allocation state + */ +typedef enum _gfid_alloc_state +{ + GFID_FREE = 0, + GFID_ALLOCATED = 1 +} GFID_ALLOC_STATUS; + +// +// Generate a 32-bit id from domain, bus and device tuple. +// +// This is a one way function that is not guaranteed to generate a unique id for +// each domain, bus, device tuple as domain alone can be 32-bit. Historically, +// we have been assuming that the domain can only be 16-bit, but that has never +// been true on Linux and Hyper-V virtualization has exposed that by using +// arbitrary 32-bit domains for passthrough GPUs. This is the only known case +// today that requires immediate support. The domains on Hyper-V come from +// hashing some system and GPU information and are claimed to be unique even if +// we consider the lower 16-bits only. Hence, as a temporary solution, only the +// lower 16-bits are used and it's asserted that top 16-bits are only non-0 on +// Hyper-V. +// +// Long term the 32-bit ids should be changed to 64-bit or the generation scheme +// should be changed to guarantee uniqueness. Both of these are impactful as the +// biggest user of this is the commonly used 32-bit OBJGPU::gpuId. +// +NvU32 gpuGenerate32BitId(NvU32 domain, NvU8 bus, NvU8 device) +{ + NvU32 id = gpuEncodeBusDevice(bus, device); + + // Include only the lower 16-bits to match the old gpuId scheme + id |= (domain & 0xffff) << 16; + + return id; +} + +void gpuChangeComputeModeRefCount_IMPL(OBJGPU *pGpu, NvU32 command) +{ + switch(command) + { + case NV_GPU_COMPUTE_REFCOUNT_COMMAND_INCREMENT: + NV_ASSERT(pGpu->computeModeRefCount >= 0); + ++pGpu->computeModeRefCount; + + if (1 == pGpu->computeModeRefCount) + { + NV_PRINTF(LEVEL_INFO, "GPU (ID: 0x%x): new mode: COMPUTE\n", + pGpu->gpuId); + + timeoutInitializeGpuDefault(&pGpu->timeoutData, pGpu); + } + break; + + case NV_GPU_COMPUTE_REFCOUNT_COMMAND_DECREMENT: + --pGpu->computeModeRefCount; + NV_ASSERT(pGpu->computeModeRefCount >= 0); + + if (pGpu->computeModeRefCount < 0) + { + pGpu->computeModeRefCount = 0; + } + + if (0 == pGpu->computeModeRefCount) + { + NV_PRINTF(LEVEL_INFO, "GPU (ID: 0x%x): new mode: GRAPHICS\n", + pGpu->gpuId); + + timeoutInitializeGpuDefault(&pGpu->timeoutData, pGpu); + } + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Bad command: 0x%x\n", command); + NV_ASSERT(0); + break; + } +} + +// +// gpuPostConstruct +// +// Called by the gpu manager to finish OBJGPU construction phase. +// Tasks handled here include binding a HAL module to the gpu +// and the construction of engine object offspring. +// +NV_STATUS +gpuPostConstruct_IMPL +( + OBJGPU *pGpu, + GPUATTACHARG *pAttachArg +) +{ + NV_STATUS rmStatus; + + gpumgrAddDeviceInstanceToGpus(NVBIT(pGpu->gpuInstance)); + + rmStatus = regAccessConstruct(&pGpu->registerAccess, pGpu); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to construct IO Apertures for attached devices \n"); + return rmStatus; + } + + gpuInitChipInfo(pGpu); + + // + // gpuDetermineVirtualMode inits hPci but only for virtualization case. So if + // it does not init it, do here for using it for non-virtualization as well + // + if (pGpu->hPci == NULL) + { + // + // We don't check the return status. Even if PCI handle is not obtained + // it should not block rest of the gpu init sequence. + // + _gpuInitPciHandle(pGpu); + } + + // + // Initialize the base offset for the virtual registers for physical function + // or baremetal + // + pGpu->sriovState.virtualRegPhysOffset = gpuGetVirtRegPhysOffset_HAL(pGpu); + + // + // Initialize engine order before engine init/load/etc + // + rmStatus = _gpuCreateEngineOrderList(pGpu); + if ( rmStatus != NV_OK ) + return rmStatus; + + gpuBuildClassDB(pGpu); + + // The first time the emulation setting is checked is in timeoutInitializeGpuDefault. + pGpu->computeModeRefCount = 0; + pGpu->hComputeModeReservation = NV01_NULL_OBJECT; + + // Setting default timeout values + timeoutInitializeGpuDefault(&pGpu->timeoutData, pGpu); + + // Set 2 stage error recovery if Vista or Unix or GSP-RM. + if (!IsAMODEL(pGpu)) + { + pGpu->bTwoStageRcRecoveryEnabled = NV_TRUE; + } + + // create core objects (i.e. bif) + rmStatus = gpuCreateChildObjects(pGpu, /* bConstructEarly */ NV_TRUE); + if (rmStatus != NV_OK) + return rmStatus; + + gpuGetIdInfo_HAL(pGpu); + gpuUpdateIdInfo_HAL(pGpu); + + _gpuInitPhysicalRmApi(pGpu); + + // need to get illumination values after the GPU Id + // has been setup to allow for GPU specific settings + gpuDeterminePersistantIllumSettings(pGpu); + + // Construct and update the engine database + rmStatus = gpuConstructEngineTable(pGpu); + if (rmStatus != NV_OK) + return rmStatus; + rmStatus = gpuUpdateEngineTable(pGpu); + if (rmStatus != NV_OK) + return rmStatus; + + // create remaining gpu offspring + rmStatus = gpuCreateChildObjects(pGpu, /* bConstructEarly */ NV_FALSE); + if (rmStatus != NV_OK) + return rmStatus; + + gpuGetHwDefaults(pGpu); + + // Set any state overrides required for L2 cache only mode + if (gpuIsCacheOnlyModeEnabled(pGpu)) + { + gpuSetCacheOnlyModeOverrides_HAL(pGpu); + } + + // Register the OCA dump callback function. + gpuDumpCallbackRegister(pGpu); + + // Initialize reference count for external kernel clients + pGpu->externalKernelClientCount = 0; + + return NV_OK; +} + +NV_STATUS gpuConstruct_IMPL +( + OBJGPU *pGpu, + NvU32 gpuInstance +) +{ + + pGpu->gpuInstance = gpuInstance; + + // allocate OS-specific GPU extension area + osInitOSHwInfo(pGpu); + + return gpuConstructPhysical(pGpu); +} + +// NVOC-TODO : delete this after all Rmconfig modules migrated to NVOC +NV_STATUS +gpuBindHalLegacy_IMPL +( + OBJGPU *pGpu, + NvU32 chipId0, + NvU32 chipId1 +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJHALMGR *pHalMgr = SYS_GET_HALMGR(pSys); + NV_STATUS status; + + // chipId0 and chipId1 needs to be function parameter since GPU Reg read + // is not ready at this point. + pGpu->chipId0 = chipId0; + pGpu->chipId1 = chipId1; + + // + // The system object will pass PMC_BOOT_0 and PMC_BOOT_42 to all the HAL's and return the + // one that claims it supports this chip arch/implementation + // + status = halmgrGetHalForGpu(pHalMgr, pGpu->chipId0, pGpu->chipId1, &pGpu->halImpl); + if (status != NV_OK) + return status; + + pGpu->pHal = halmgrGetHal(pHalMgr, pGpu->halImpl); + + return status; +} + +static void +_gpuInitPciHandle +( + OBJGPU *pGpu +) +{ + NvU32 domain = gpuGetDomain(pGpu); + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU8 function = 0; + + pGpu->hPci = osPciInitHandle(domain, bus, device, function, NULL, NULL); +} + +static NV_STATUS _gpuRmApiControl +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize +) +{ + RmCtrlParams rmCtrlParams; + CALL_CONTEXT callCtx, *oldCtx = NULL; + RS_LOCK_INFO lockInfo = {0}; + NV_STATUS status = NV_OK; + + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + + // This API is only used to route locally on monolithic or UCODE + NV_ASSERT_OR_RETURN(!IS_GSP_CLIENT(pGpu), NV_ERR_INVALID_STATE); + + // + // The physical API can be used on any controls and any handles and it is + // expected to be routed correctly. However, if the caller is using the GPU + // internal handles, we can skip the resource server overhead and make a + // direct function call instead. + // + if (hClient == pGpu->hInternalClient && hObject == pGpu->hInternalSubdevice) + { + NV_ASSERT_OR_RETURN(pGpu->pCachedSubdevice && pGpu->pCachedRsClient, NV_ERR_INVALID_STATE); + + const struct NVOC_EXPORTED_METHOD_DEF *pEntry; + pEntry = objGetExportedMethodDef((void*)pGpu->pCachedSubdevice, cmd); + + NV_ASSERT_OR_RETURN(pEntry != NULL, NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(pEntry->paramSize == paramsSize, NV_ERR_INVALID_PARAM_STRUCT); + NV_PRINTF(LEVEL_INFO, "GPU Internal RM control 0x%08x on gpuInst:%x hClient:0x%08x hSubdevice:0x%08x\n", + cmd, pGpu->gpuInstance, hClient, hObject); + + portMemSet(&rmCtrlParams, 0, sizeof(rmCtrlParams)); + rmCtrlParams.hClient = hClient; + rmCtrlParams.hObject = hObject; + rmCtrlParams.pGpu = pGpu; + rmCtrlParams.cmd = cmd; + rmCtrlParams.flags = NVOS54_FLAGS_LOCK_BYPASS; + rmCtrlParams.pParams = pParams; + rmCtrlParams.paramsSize = paramsSize; + rmCtrlParams.secInfo.privLevel = RS_PRIV_LEVEL_KERNEL; + rmCtrlParams.secInfo.paramLocation = PARAM_LOCATION_KERNEL; + rmCtrlParams.bInternal = NV_TRUE; + + lockInfo.flags = RM_LOCK_FLAGS_NO_GPUS_LOCK | RM_LOCK_FLAGS_NO_CLIENT_LOCK; + rmCtrlParams.pLockInfo = &lockInfo; + + portMemSet(&callCtx, 0, sizeof(callCtx)); + callCtx.pResourceRef = RES_GET_REF(pGpu->pCachedSubdevice); + callCtx.pClient = pGpu->pCachedRsClient; + callCtx.secInfo = rmCtrlParams.secInfo; + callCtx.pServer = &g_resServ; + callCtx.pControlParams = &rmCtrlParams; + callCtx.pLockInfo = rmCtrlParams.pLockInfo; + + resservSwapTlsCallContext(&oldCtx, &callCtx); + + if (pEntry->paramSize == 0) + { + status = ((NV_STATUS(*)(void*))pEntry->pFunc)(pGpu->pCachedSubdevice); + } + else + { + status = ((NV_STATUS(*)(void*,void*))pEntry->pFunc)(pGpu->pCachedSubdevice, pParams); + } + + resservRestoreTlsCallContext(oldCtx); + } + else + { + RM_API *pInternalRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_ASSERT_OR_RETURN(rmDeviceGpuLockIsOwner(pGpu->gpuInstance), NV_ERR_INVALID_LOCK_STATE); + + status = pInternalRmApi->Control(pInternalRmApi, hClient, hObject, cmd, pParams, paramsSize); + } + + return status; +} + +static NV_STATUS _gpuRmApiAllocWithHandle +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 hClass, + void *pAllocParams +) +{ + // Simple forwarder for now + RM_API *pInternalRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + return pInternalRmApi->AllocWithHandle(pInternalRmApi, hClient, hParent, hObject, hClass, pAllocParams); +} +static NV_STATUS _gpuRmApiFree +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject +) +{ + // Simple forwarder for now + RM_API *pInternalRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + return pInternalRmApi->Free(pInternalRmApi, hClient, hObject); +} + +static void +_gpuInitPhysicalRmApi +( + OBJGPU *pGpu +) +{ + // Populate all unused APIs with stubs + pGpu->physicalRmApi = *rmapiGetInterface(RMAPI_STUBS); + pGpu->physicalRmApi.pPrivateContext = pGpu; + + portMemSet(&pGpu->physicalRmApi.defaultSecInfo, 0, + sizeof(pGpu->physicalRmApi.defaultSecInfo)); + pGpu->physicalRmApi.defaultSecInfo.privLevel = RS_PRIV_LEVEL_KERNEL; + pGpu->physicalRmApi.defaultSecInfo.paramLocation = PARAM_LOCATION_KERNEL; + pGpu->physicalRmApi.bHasDefaultSecInfo = NV_TRUE; + pGpu->physicalRmApi.bTlsInternal = NV_TRUE; + pGpu->physicalRmApi.bApiLockInternal = NV_TRUE; + pGpu->physicalRmApi.bRmSemaInternal = NV_TRUE; + pGpu->physicalRmApi.bGpuLockInternal = NV_TRUE; + + // Only initialize the methods that exist on GSP/DCE as well + pGpu->physicalRmApi.Control = _gpuRmApiControl; + pGpu->physicalRmApi.AllocWithHandle = _gpuRmApiAllocWithHandle; + pGpu->physicalRmApi.Free = _gpuRmApiFree; +} + +static NV_STATUS +_gpuInitChipInfo +( + OBJGPU *pGpu +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + const NvU32 paramSize = sizeof(NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS); + NV_STATUS status; + + pGpu->pChipInfo = portMemAllocNonPaged(paramSize); + NV_ASSERT_OR_RETURN(pGpu->pChipInfo != NULL, NV_ERR_NO_MEMORY); + + portMemSet(pGpu->pChipInfo, 0, paramSize); + + NV_ASSERT_OK_OR_GOTO(status, pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_GPU_GET_CHIP_INFO, + pGpu->pChipInfo, paramSize), done); + + pGpu->chipInfo.subRevision = pGpu->pChipInfo->chipSubRev; + pGpu->idInfo.PCIDeviceID = pGpu->pChipInfo->pciDeviceId; + pGpu->idInfo.PCISubDeviceID = pGpu->pChipInfo->pciSubDeviceId; + pGpu->idInfo.PCIRevisionID = pGpu->pChipInfo->pciRevisionId; + +done: + if (status != NV_OK) + { + portMemFree(pGpu->pChipInfo); + pGpu->pChipInfo = NULL; + } + + return status; +} + +static NV_STATUS +gpuInitVmmuInfo +( + OBJGPU *pGpu +) +{ + NV_STATUS status; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS params; + + pGpu->vmmuSegmentSize = 0; + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_GET_VMMU_SEGMENT_SIZE, + ¶ms, sizeof(params)); + + if (status == NV_ERR_NOT_SUPPORTED) + { + // Leave segment size initialized to zero to signal no VMMU present on physical + return NV_OK; + } + else if (status != NV_OK) + { + return status; + } + + pGpu->vmmuSegmentSize = params.vmmuSegmentSize; + + return status; +} + +static NV_STATUS _gpuAllocateInternalObjects +( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + + if (IS_GSP_CLIENT(pGpu)) + { + if (IsT234D(pGpu)) + { + // + // NOTE: We add +1 to the client base because DCE-RM will also + // allocate internal objects, taking the !IS_GSP_CLIENT path below. + // + pGpu->hInternalClient = RS_CLIENT_INTERNAL_HANDLE_BASE + 1; + pGpu->hInternalDevice = NV_GPU_INTERNAL_DEVICE_HANDLE; + pGpu->hInternalSubdevice = NV_GPU_INTERNAL_SUBDEVICE_HANDLE; + } + } + else + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_ASSERT_OK_OR_RETURN(rmapiutilAllocClientAndDeviceHandles( + pRmApi, pGpu, &pGpu->hInternalClient, &pGpu->hInternalDevice, &pGpu->hInternalSubdevice)); + + NV_ASSERT_OK_OR_GOTO(status, serverGetClientUnderLock(&g_resServ, pGpu->hInternalClient, + &pGpu->pCachedRsClient), done); + NV_ASSERT_OK_OR_GOTO(status, subdeviceGetByGpu(pGpu->pCachedRsClient, pGpu, + &pGpu->pCachedSubdevice), done); + } + +done: + if (status != NV_OK) + { + _gpuFreeInternalObjects(pGpu); + } + + return status; +} + +static void _gpuFreeInternalObjects +( + OBJGPU *pGpu +) +{ + if (!IS_GSP_CLIENT(pGpu)) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + rmapiutilFreeClientAndDeviceHandles(pRmApi, + &pGpu->hInternalClient, &pGpu->hInternalDevice, &pGpu->hInternalSubdevice); + } +} + +static NV_STATUS +_gpuCreateEngineOrderList +( + OBJGPU *pGpu +) +{ + NvU32 i; + NvU32 numClassDesc; + NvU32 numLists; + NV_STATUS status = NV_OK; + PGPU_ENGINE_ORDER pEngineOrder = &pGpu->engineOrder; + NvU32 numEngineDesc, curEngineDesc; + NvU32 listTypes[] = {GCO_LIST_INIT, GCO_LIST_LOAD, GCO_LIST_UNLOAD, GCO_LIST_DESTROY}; + PENGDESCRIPTOR *ppEngDescriptors[4]; + ENGLIST_ITER it; + ENGDESCRIPTOR engDesc; + + ct_assert(NV_ARRAY_ELEMENTS32(ppEngDescriptors) == NV_ARRAY_ELEMENTS32(listTypes)); + +#define GPU_CHILD(a, b, numInstances, c, d, e) +numInstances + + struct ChildList { + char children[ 0 + + #include "gpu/gpu_child_list.h" + ]; + }; + + // + // The maximum number of engines known to RM controls + // must be at least the number of actual OBJGPU children. + // + ct_assert(NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS >= + sizeof(((struct ChildList*)(NULL))->children) /* sizeof(ChildList::children) */); + + numLists = NV_ARRAY_ELEMENTS32(listTypes); + + ppEngDescriptors[0] = &pEngineOrder->pEngineInitDescriptors; + ppEngDescriptors[1] = &pEngineOrder->pEngineLoadDescriptors; + ppEngDescriptors[2] = &pEngineOrder->pEngineUnloadDescriptors; + ppEngDescriptors[3] = &pEngineOrder->pEngineDestroyDescriptors; + + // + // Find the size of the engine descriptor list. The sizes of all lists + // are checked for consistency to catch mistakes. + // + // The list is copied into OBJGPU storage as it's modified during + // dynamic engine removal (e.g.: gpuMissingEngDescriptor). + // + numEngineDesc = 0; + + for (i = 0; i < numLists; i++) + { + curEngineDesc = 0; + + it = gpuGetEngineOrderListIter(pGpu, listTypes[i]); + + while (gpuGetNextInEngineOrderList(pGpu, &it, &engDesc)) + { + curEngineDesc++; + } + + if ((numEngineDesc != 0) && (numEngineDesc != curEngineDesc)) + { + NV_PRINTF(LEVEL_ERROR, + "Sizes of all engine order lists do not match!\n"); + NV_ASSERT(0); + status = NV_ERR_INVALID_STATE; + goto done; + } + + numEngineDesc = curEngineDesc; + } + + pEngineOrder->numEngineDescriptors = numEngineDesc; + + + for (i = 0; i < numLists; i++) + { + curEngineDesc = 0; + + *ppEngDescriptors[i] = portMemAllocNonPaged(sizeof(ENGDESCRIPTOR) * numEngineDesc); + if ( NULL == *ppEngDescriptors[i]) + { + NV_ASSERT(0); + status = NV_ERR_NO_MEMORY; + goto done; + } + + it = gpuGetEngineOrderListIter(pGpu, listTypes[i]); + + while (gpuGetNextInEngineOrderList(pGpu, &it, &engDesc)) + { + (*ppEngDescriptors[i])[curEngineDesc] = engDesc; + curEngineDesc++; + } + } + + pEngineOrder->pClassDescriptors = gpuGetClassDescriptorList_HAL(pGpu, &numClassDesc); + pEngineOrder->numClassDescriptors = numClassDesc; + + return NV_OK; + +done: + portMemFree(pEngineOrder->pEngineInitDescriptors); + pEngineOrder->pEngineInitDescriptors = NULL; + + portMemFree(pEngineOrder->pEngineDestroyDescriptors); + pEngineOrder->pEngineDestroyDescriptors = NULL; + + portMemFree(pEngineOrder->pEngineLoadDescriptors); + pEngineOrder->pEngineLoadDescriptors = NULL; + + portMemFree(pEngineOrder->pEngineUnloadDescriptors); + pEngineOrder->pEngineUnloadDescriptors = NULL; + + return status; +} + +static void +_gpuFreeEngineOrderList +( + OBJGPU *pGpu +) +{ + PGPU_ENGINE_ORDER pEngineOrder = &pGpu->engineOrder; + + if (!pEngineOrder->pEngineInitDescriptors) + return; + + portMemFree(pEngineOrder->pEngineInitDescriptors); + portMemFree(pEngineOrder->pEngineDestroyDescriptors); + portMemFree(pEngineOrder->pEngineLoadDescriptors); + portMemFree(pEngineOrder->pEngineUnloadDescriptors); + + pEngineOrder->pEngineInitDescriptors = NULL; + pEngineOrder->pEngineDestroyDescriptors = NULL; + pEngineOrder->pEngineLoadDescriptors = NULL; + pEngineOrder->pEngineUnloadDescriptors = NULL; + pEngineOrder->pClassDescriptors = NULL; +} + +/*! + * @brief Returns a pointer to the GPU's pointer to a child specified by its childInfo + * + * @param[in] pGpu OBJPGU pointer + * @param[in] pChildInfo Pointer to table entry + */ +static Dynamic** +gpuGetChildPtr(OBJGPU *pGpu, NvU32 gpuChildPtrOffset) +{ + return (Dynamic**)((NvU8*)pGpu + gpuChildPtrOffset); +} + +/*! + * @brief Looks up for an instance of engine + * + * @param[in] classId + * @param[in] instanceID + * @param[out] pChildInfoOut + */ +static NV_STATUS +gpuGetChildInfo(NVOC_CLASS_ID classId, NvU32 instanceID, PGPUCHILDINFO pChildInfoOut) +{ + PGPUCHILDTYPE pChildType; + + NV_ASSERT_OR_RETURN(pChildInfoOut, NV_ERR_INVALID_STATE); + + pChildType = gpuGetChildType(classId); + + NV_ASSERT_OR_RETURN(pChildType && (instanceID < pChildType->instances), NV_ERR_INVALID_OBJECT); + + pChildInfoOut->engDesc = MKENGDESC(classId, instanceID); + pChildInfoOut->bAlwaysCreate = pChildType->bAlwaysCreate; + pChildInfoOut->bConstructEarly = pChildType->bConstructEarly; + pChildInfoOut->pClassInfo = pChildType->pClassInfo; + pChildInfoOut->pChildType = pChildType; + + pChildInfoOut->gpuChildPtrOffset = pChildType->gpuChildPtrOffset + sizeof(void *) * instanceID; + + return NV_OK; +} + +/*! + * @brief Looks up for a class in the table based on class id + * + * All classes are uniquely identified by their classId. + * + * @param[in] classId NVOC_CLASS_ID + */ +static PGPUCHILDTYPE +gpuGetChildType(NVOC_CLASS_ID classId) +{ + NvU32 i; + + for (i = 0; i < GPU_NUM_CHILD_TYPES; i++) + { + if (gpuChildTypeList[i].pClassInfo && gpuChildTypeList[i].pClassInfo->classId == classId) + { + return &gpuChildTypeList[i]; + } + } + + return NULL; +} + +/** + * @brief Initializes iterator for all possible ENGDESCRIPTORs that could be GPU + * children. + * + * @return GPU_CHILD_ITER + */ +GPU_CHILD_ITER +gpuGetPossibleEngDescriptorIter(void) +{ + GPU_CHILD_ITER it = {0}; + return it; +} + +/** + * @brief Iterator over all possible ENGDESCRIPTORs that could be GPU children. + * + * @param[in,out] pIt Iterator + * @param[out] pEngDesc The next engine descriptor + * + * @return NV_TRUE if *pEngDesc is valid, NV_FALSE if there are no more engines + */ +NvBool +gpuGetNextPossibleEngDescriptor(GPU_CHILD_ITER *pIt, ENGDESCRIPTOR *pEngDesc) +{ + PGPUCHILDTYPE pChildType; + GPUCHILDINFO childInfo; + + if (pIt->childTypeIdx >= GPU_NUM_CHILD_TYPES) + return NV_FALSE; + + pChildType = &gpuChildTypeList[pIt->childTypeIdx]; + + // Advance instance # + if (pIt->childInst < pChildType->instances && pChildType->pClassInfo) + { + NV_STATUS status = gpuGetChildInfo(pChildType->pClassInfo->classId, pIt->childInst, &childInfo); + + NV_ASSERT(status == NV_OK); + + pIt->gpuChildPtrOffset = childInfo.gpuChildPtrOffset; + pIt->childInst++; + + *pEngDesc = childInfo.engDesc; + return NV_TRUE; + } + + pIt->childTypeIdx++; + pIt->childInst = 0; + + // Recurse (max depth is 1) + return gpuGetNextPossibleEngDescriptor(pIt, pEngDesc); +} + +/*! + * @brief Returns the unshared engstate for the child object with the given engine + * descriptor (i.e.: the ENGSTATE without any of the SLI sharing hacks). + * + * All engines are uniquely identified by their engine descriptor. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc ENGDESCRIPTOR + */ +POBJENGSTATE +gpuGetEngstateNoShare_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc) +{ + ENGSTATE_ITER it = gpuGetEngstateIter(pGpu); + OBJENGSTATE *pEngstate; + + while (gpuGetNextEngstate(pGpu, &it, &pEngstate)) + { + if (engstateGetDescriptor(pEngstate) == engDesc) + { + return pEngstate; + } + } + + return NULL; +} + +/*! + * @brief Returns the engstate for the child object with the given engine descriptor + * + * All engines are uniquely identified by their engine descriptor. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc ENGDESCRIPTOR + */ +POBJENGSTATE +gpuGetEngstate_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc) +{ + + // Everything else is unshared + return gpuGetEngstateNoShare(pGpu, engDesc); +} + + +/*! + * @brief Iterates over pGpu's child engstates. Returns NV_FALSE when there are + * no more. + * + * @param[in] pGpu OBJGPU pointer + * @param[in,out] pIt Iterator + * @param[out] ppEngState The next engstate + * + * @return NV_TRUE if ppEngstate is valid, NV_FALSE if no more found + */ +NvBool +gpuGetNextEngstate_IMPL(OBJGPU *pGpu, ENGSTATE_ITER *pIt, OBJENGSTATE **ppEngstate) +{ + ENGDESCRIPTOR engDesc; + OBJENGSTATE *pEngstate; + Dynamic **ppChild; + + while (gpuGetNextPossibleEngDescriptor(pIt, &engDesc)) + { + ppChild = gpuGetChildPtr(pGpu, pIt->gpuChildPtrOffset); + if (*ppChild != NULL) + { + pEngstate = dynamicCast(*ppChild, OBJENGSTATE); + if (pEngstate != NULL) + { + *ppEngstate = pEngstate; + return NV_TRUE; + } + } + } + + return NV_FALSE; +} + +/*! + * @brief The generic object constructor + * + * @param[in] pGpu POBJGPU + * @param[in] classId NVOC_CLASS_ID + * @param[in] instanceID NvU32 + * + */ +NV_STATUS +gpuCreateObject_IMPL +( + OBJGPU *pGpu, + NVOC_CLASS_ID classId, + NvU32 instanceID +) +{ + NV_STATUS status; + OBJENGSTATE *pEngstate; + GPUCHILDINFO childInfo; + Dynamic **ppChildPtr; + ENGSTATE_TRANSITION_DATA engTransitionData; + + status = gpuGetChildInfo(classId, instanceID, &childInfo); + + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + ppChildPtr = gpuGetChildPtr(pGpu, childInfo.gpuChildPtrOffset); + + // Ask the object database utility to create a child object. + status = objCreateDynamic(ppChildPtr, pGpu, childInfo.pClassInfo); + + if (status != NV_OK) + { + return status; + } + NV_ASSERT_OR_RETURN(*ppChildPtr, NV_ERR_INVALID_STATE); + + pEngstate = dynamicCast(*ppChildPtr, OBJENGSTATE); + + if (pEngstate == NULL) + { + status = NV_ERR_INVALID_STATE; + goto gpuCreateObject_exit; + } + + status = engstateConstructBase(pEngstate, pGpu, childInfo.engDesc); + NV_ASSERT_OR_GOTO(status == NV_OK, gpuCreateObject_exit); + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_CONSTRUCT, &engTransitionData); + status = engstateConstructEngine(pGpu, pEngstate, childInfo.engDesc); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_CONSTRUCT, &engTransitionData); + + // If engine is missing, free it immediately + if (pEngstate->getProperty(pEngstate, PDB_PROP_ENGSTATE_IS_MISSING)) + { + status = NV_ERR_NOT_SUPPORTED; + } + +gpuCreateObject_exit: + if (status != NV_OK) + { + objDelete(*ppChildPtr); + *ppChildPtr = NULL; + } + + return status; +} + + +void +gpuDestruct_IMPL +( + OBJGPU *pGpu +) +{ + HWBC_LIST *pGpuHWBCList = NULL; + int typeNum; + int instNum; + GPUCHILDTYPE *pChildTypeCur; + GPUCHILDINFO childInfoCur; + Dynamic **pChildPtr; + + // Free children in reverse order from construction + for (typeNum = GPU_NUM_CHILD_TYPES - 1; typeNum >= 0; typeNum--) + { + pChildTypeCur = &gpuChildTypeList[typeNum]; + + if (!pChildTypeCur->pClassInfo) + { + continue; + } + + for (instNum = pChildTypeCur->instances - 1; instNum >= 0; instNum--) + { + NV_STATUS status; + + status = gpuGetChildInfo(pChildTypeCur->pClassInfo->classId, instNum, &childInfoCur); + + NV_ASSERT(status == NV_OK); + + pChildPtr = gpuGetChildPtr(pGpu, childInfoCur.gpuChildPtrOffset); + + if (*pChildPtr) + { + objDelete(*pChildPtr); + *pChildPtr = NULL; + } + } + } + + // + // If device instance is unassigned, we haven't initialized far enough to + // do any accounting with it + // + if (gpuGetDeviceInstance(pGpu) != NV_MAX_DEVICES) + { + rmapiReportLeakedDevices(gpuGetGpuMask(pGpu)); + } + + _gpuFreeEngineOrderList(pGpu); + + portMemFree(pGpu->pUserRegisterAccessMap); + pGpu->pUserRegisterAccessMap = NULL; + + portMemFree(pGpu->pUnrestrictedRegisterAccessMap); + pGpu->pUnrestrictedRegisterAccessMap = NULL; + + portMemFree(pGpu->pDeviceInfoTable); + pGpu->pDeviceInfoTable = NULL; + pGpu->numDeviceInfoEntries = 0; + + pGpu->userRegisterAccessMapSize = 0; + + gpuDestroyEngineTable(pGpu); + gpuDestroyClassDB(pGpu); + osDestroyOSHwInfo(pGpu); + + while(pGpu->pHWBCList) + { + pGpuHWBCList = pGpu->pHWBCList; + pGpu->pHWBCList = pGpuHWBCList->pNext; + portMemFree(pGpuHWBCList); + } + + // + // Destroy and free the RegisterAccess object linked to this GPU + // This should be moved out to gpu_mgr in the future to line up with + // the construction, but currently depends on pGpu still existing + // + regAccessDestruct(&pGpu->registerAccess); + + NV_ASSERT(pGpu->numConstructedFalcons == 0); + + portMemFree(pGpu->pRegopOffsetScratchBuffer); + pGpu->pRegopOffsetScratchBuffer = NULL; + + portMemFree(pGpu->pRegopOffsetAddrScratchBuffer); + pGpu->pRegopOffsetAddrScratchBuffer = NULL; + + pGpu->regopScratchBufferMaxOffsets = 0; + + NV_ASSERT(pGpu->numSubdeviceBackReferences == 0); + portMemFree(pGpu->pSubdeviceBackReferences); + pGpu->pSubdeviceBackReferences = NULL; + pGpu->numSubdeviceBackReferences = 0; + pGpu->maxSubdeviceBackReferences = 0; + + gpuDestructPhysical(pGpu); +} + +static NV_STATUS +gpuCreateChildObjects +( + OBJGPU *pGpu, + NvBool bConstructEarly +) +{ + PENGDESCRIPTOR pEngDescriptors; + NvU32 numEngDescriptors; + PGPUCHILDTYPE pChildTypeCur; + GPUCHILDINFO childInfoCur; + NvU32 t, i; + NV_STATUS rmStatus = NV_OK; + + pEngDescriptors = gpuGetInitEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + for (t = 0; t < GPU_NUM_CHILD_TYPES; t++) + { + pChildTypeCur = &gpuChildTypeList[t]; + + if (!pChildTypeCur->pClassInfo) + { + continue; + } + + for (i = 0; i < pChildTypeCur->instances; i++) + { + NVOC_CLASS_ID classId = pChildTypeCur->pClassInfo->classId; + + rmStatus = gpuGetChildInfo(classId, i, &childInfoCur); + + NV_ASSERT(rmStatus == NV_OK); + + if ((bConstructEarly == childInfoCur.bConstructEarly) && + gpuShouldCreateObject(&childInfoCur, + pEngDescriptors, + numEngDescriptors)) + { + rmStatus = gpuCreateObject(pGpu, classId, i); + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (rmStatus == NV_ERR_NOT_SUPPORTED) + { + rmStatus = NV_OK; + } + else if (rmStatus != NV_OK) + { + return rmStatus; + } + } + } + + // Bail out of both loops. + if (rmStatus != NV_OK) + { + break; + } + } + + return rmStatus; +} + +static NvBool +gpuShouldCreateObject +( + PGPUCHILDINFO pChildInfo, + PENGDESCRIPTOR pEngDescriptors, + NvU32 numEngDescriptors +) +{ + NvBool retVal = NV_FALSE; + NvU32 curEngDescIdx; + + if (pChildInfo->bAlwaysCreate) + { + // For now all SW engines get created + retVal = NV_TRUE; + } + else + { + // Let the HAL confirm that we should create an object for this engine. + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + if (pChildInfo->engDesc == pEngDescriptors[curEngDescIdx]) + { + retVal = NV_TRUE; + break; + } + } + } + + return retVal; +} + +NvU32 +gpuGetGpuMask_IMPL +( + OBJGPU *pGpu +) +{ + if (IsSLIEnabled(pGpu)) + { + return 1 << (gpumgrGetSubDeviceInstanceFromGpu(pGpu)); + } + else + { + return 1 << (pGpu->gpuInstance); + } +} + +static NV_STATUS gspSupportsEngine(OBJGPU *pGpu, ENGDESCRIPTOR engdesc, NvBool *supports) +{ + if (!IS_GSP_CLIENT(pGpu)) + return NV_WARN_NOTHING_TO_DO; + + NvU32 clientEngineId = 0; + + if (gpuXlateEngDescToClientEngineId(pGpu, engdesc, &clientEngineId) != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Failed to xlate engdesc 0x%x\n", engdesc); + return NV_WARN_NOTHING_TO_DO; + } + + if (pGpu->gspSupportedEngines == NULL) + { + pGpu->gspSupportedEngines = portMemAllocNonPaged(sizeof(*pGpu->gspSupportedEngines)); + NV_ASSERT_OR_RETURN(pGpu->gspSupportedEngines != NULL, NV_ERR_NO_MEMORY); + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV_STATUS status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_GET_ENGINES_V2, + pGpu->gspSupportedEngines, + sizeof(*pGpu->gspSupportedEngines)); + + if (status != NV_OK) + { + portMemFree(pGpu->gspSupportedEngines); + return status; + } + } + + NvU32 i; + for (i = 0; i < pGpu->gspSupportedEngines->engineCount; i++) + { + if (pGpu->gspSupportedEngines->engineList[i] == clientEngineId) + { + *supports = NV_TRUE; + return NV_OK; + } + } + + *supports = NV_FALSE; + return NV_OK; +} + +/* + * The engine removal protocol is as follows: + * - engines returning an error code from ConstructEngine will be immediately + * removed (this happens in gpuCreateObject) + * - engines may set ENGSTATE_IS_MISSING at any time before gpuStatePreInit + * - engines with ENGSTATE_IS_MISSING set at gpuStatePreInit will be removed + * - engines that return NV_FALSE from engstateIsPresent at gpuStatePreInit + * will be removed + * + * gpuRemoveMissingEngines takes place before the main loop in gpuStatePreInit + * and is responsible for removing engines satisfying the last two bullets + * above. + */ +static NV_STATUS +gpuRemoveMissingEngines +( + OBJGPU *pGpu +) +{ + NvU32 curEngDescIdx; + PENGDESCRIPTOR engDescriptorList = gpuGetInitEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NV_STATUS rmStatus = NV_OK; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + OBJENGSTATE *pEngstate; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + NVOC_CLASS_ID curClassId = ENGDESC_FIELD(curEngDescriptor, _CLASS); + + if (curClassId == classId(OBJINVALID)) + { + continue; + } + + pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate != NULL) + { + if (!pEngstate->getProperty(pEngstate, PDB_PROP_ENGSTATE_IS_MISSING) && + engstateIsPresent(pGpu, pEngstate)) + { + continue; + } + + gpuDestroyMissingEngine(pGpu, pEngstate); + pEngstate = NULL; + } + + // + // pEngstate is NULL or missing, so we must be sure to unregister + // all associated API classes and remove the stale engine descriptors + // from the GPU HAL engine lists. + // + NV_PRINTF(LEVEL_INFO, "engine %d:%d is missing, removing\n", + ENGDESC_FIELD(curEngDescriptor, _CLASS), + ENGDESC_FIELD(curEngDescriptor, _INST)); + + rmStatus = gpuDeleteEngineOnPreInit(pGpu, curEngDescriptor); + NV_ASSERT(rmStatus == NV_OK || !"Error while trying to remove missing engine"); + } + + return rmStatus; +} + +/* + * Removing classes from classDB of a missing engine + */ +static void +gpuRemoveMissingEngineClasses +( + OBJGPU *pGpu, + NvU32 missingEngDescriptor +) +{ + NvU32 numClasses, i; + NvU32 *pClassList = NULL; + if (gpuGetClassList(pGpu, &numClasses, NULL, missingEngDescriptor) == NV_OK) + { + pClassList = portMemAllocNonPaged(sizeof(NvU32) * numClasses); + if (NV_OK == gpuGetClassList(pGpu, &numClasses, pClassList, missingEngDescriptor)) + { + for (i = 0; i < numClasses; i++) + { + gpuDeleteClassFromClassDBByClassId(pGpu, pClassList[i]); + } + } + portMemFree(pClassList); + pClassList = NULL; + } +} + +/* + * Destroy and unregister engine object of a missing engine + */ +static void +gpuDestroyMissingEngine +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate +) +{ + GPUCHILDINFO childInfo; + Dynamic **pChildPtr; + NV_STATUS status; + ENGDESCRIPTOR engDesc; + + engstateInitMissing(pGpu, pEngstate); + + engDesc = engstateGetDescriptor(pEngstate); + + status = gpuGetChildInfo(ENGDESC_FIELD(engDesc, _CLASS), ENGDESC_FIELD(engDesc, _INST), &childInfo); + + NV_ASSERT_OR_RETURN_VOID(status == NV_OK); + + pChildPtr = gpuGetChildPtr(pGpu, childInfo.gpuChildPtrOffset); + + objDelete(*pChildPtr); + *pChildPtr = NULL; +} + +/* + * @brief Find if given engine descriptor is supported by GPU + * + * @param[in] pGpu OBJGPU pointer + * @param[in] descriptor engine descriptor to search for + * + * @returns NV_TRUE if given engine descriptor was found in a + * given engine descriptor list, NV_FALSE otherwise. + * + */ +NvBool +gpuIsEngDescSupported_IMPL +( + OBJGPU *pGpu, + NvU32 descriptor +) +{ + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + PENGDESCRIPTOR pEngDescriptor = gpuGetInitEngineDescriptors(pGpu); + NvU32 counter = 0; + NvBool engDescriptorFound = NV_FALSE; + + for (counter = 0; counter < numEngDescriptors; counter++) + { + if (pEngDescriptor[counter] == descriptor) + { + engDescriptorFound = NV_TRUE; + break; + } + } + + return engDescriptorFound; +} +/*! + * @brief Mark given Engine Descriptor with ENG_INVALID engine descriptor. + * + * Note: It is legal to have more than one entry with equal Descriptor + * in the Engine Descriptor list. + * + * @param[in] pEngDescriptor Pointer to array of engine descriptors + * @param[in] maxDescriptors Size of engine descriptor array + * @param[in] descriptor Engine descriptor to be changed to ENG_INVALID engine descriptor + * + * @returns void + */ +static void +gpuMissingEngDescriptor(PENGDESCRIPTOR pEngDescriptor, NvU32 maxDescriptors, + ENGDESCRIPTOR descriptor) +{ + NvU32 counter; + + for (counter = 0; counter < maxDescriptors; counter++) + { + if (pEngDescriptor[counter] == descriptor) + { + pEngDescriptor[counter] = ENG_INVALID; + } + } +} + + +/*! + * @brief Delete an engine from class DB. + * + * WARNING! Function doesn't remove INIT/DESTROY engines from HAL lists. + * gpuInitEng and gpuDestroyEng won't be no-ops for relevant engine. + * + * Use case: + * If an engine needs to be removed, but StateInit/Destroy are required. + * It's better to use gpuDeleteEngineOnPreInit instead. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine ID to search and remove + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuDeleteEngineFromClassDB_IMPL(OBJGPU *pGpu, NvU32 engDesc) +{ + PENGDESCRIPTOR pEngDesc = NULL; + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 engDescriptor = engDesc; + + // remove Class tagged with engDesc from Class Database + gpuDeleteClassFromClassDBByEngTag(pGpu, engDesc); + + // + // Bug 370327 + // Q: Why remove load/unload? + // A: Since this engine does not exist, we should prevent hw accesses to it + // which should ideally only take place in load/unload ( not init/destroy ) + // + // Q: Why not remove init/destroy, the engines gone right? + // A: If init does some alloc and loadhw does the probe then removing destroy + // will leak. + // + + // Remove load + pEngDesc = gpuGetLoadEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + // Remove unload + pEngDesc = gpuGetUnloadEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + pGpu->engineDB.bValid = NV_FALSE; + + return NV_OK; +} + +/*! + * @brief Delete an engine from class DB only prior or on gpuPreInit stage. + * + * WARNING! Function must be used only before INIT stage, to avoid leaks. + * See gpuDeleteEngineFromClassDB for more information. + * + * Function removes Classes with given Engine Tag from class DB + * and removes Engines from HAL lists with equal Engine Tags. + * Function doesn't remove Engines from HAL Sync list, + * see gpuDeleteEngineFromClassDB for more information. + * + * Use case: + * Any platform where an engine is absent and it is required to + * prevent engine's load/unload and init/destroy calls from getting executed. + * In other words, this function is used when it is OK to remove/STUB all + * of the HALs of an engine without jeopardizing the initialization and + * operation of other engines. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine ID to search and remove + * + * @returns NV_STATUS - NV_OK on success, error otherwise. + * + */ +NV_STATUS +gpuDeleteEngineOnPreInit_IMPL(OBJGPU *pGpu, NvU32 engDesc) +{ + PENGDESCRIPTOR pEngDesc = NULL; + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + ENGDESCRIPTOR engDescriptor = engDesc; + NV_STATUS rmStatus = NV_OK; + NvBool bGspSupported = NV_FALSE; + + rmStatus = gspSupportsEngine(pGpu, engDesc, &bGspSupported); + if (rmStatus == NV_WARN_NOTHING_TO_DO) + rmStatus = NV_OK; + + NV_ASSERT_OK_OR_RETURN(rmStatus); + + // remove Class tagged with engDesc from Class Database. + if (!bGspSupported) + gpuDeleteClassFromClassDBByEngTag(pGpu, engDesc); + + // Remove Load Engine Descriptors + pEngDesc = gpuGetLoadEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + // Remove Unload Engine Descriptors + pEngDesc = gpuGetUnloadEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + // Remove Init Engine Descriptors + pEngDesc = gpuGetInitEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + // Remove Destroy Engine Descriptors + pEngDesc = gpuGetDestroyEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + if (!bGspSupported) + { + rmStatus = gpuUpdateEngineTable(pGpu); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Update engine table operation failed!\n"); + DBG_BREAKPOINT(); + } + } + + return rmStatus; +} + +/*! + * @brief Perform GPU pre init tasks + * + * Function tries to pre-init all engines from HAL Init Engine Descriptor list. + * If engine is not present, or its engine pre-init function reports it is unsupported + * then engine will be deleted from Class DB and HAL lists. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * TODO: Fix "init missing" concept to not create unsupported objects at all. + * + * @param[in] pGpu OBJGPU pointer + * + * @returns NV_OK upon successful pre-initialization + */ +NV_STATUS +gpuStatePreInit_IMPL +( + OBJGPU *pGpu +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + NV_STATUS rmStatus = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmGpuLockIsOwner()); + + NV_ASSERT_OK_OR_RETURN(_gpuAllocateInternalObjects(pGpu)); + NV_ASSERT_OK_OR_RETURN(_gpuInitChipInfo(pGpu)); + NV_ASSERT_OK_OR_RETURN(gpuConstructUserRegisterAccessMap(pGpu)); + NV_ASSERT_OK_OR_RETURN(gpuBuildGenericKernelFalconList(pGpu)); + + rmStatus = gpuRemoveMissingEngines(pGpu); + NV_ASSERT(rmStatus == NV_OK); + + pGpu->bFullyConstructed = NV_TRUE; + + engDescriptorList = gpuGetInitEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_PRE_INIT, &engTransitionData); + rmStatus = engstateStatePreInit(pGpu, pEngstate); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_PRE_INIT, &engTransitionData); + + if (rmStatus == NV_ERR_NOT_SUPPORTED) + { + switch (curEngDescriptor) + { + // + // Allow removing kernel engines in StatePreInit if their + // physical counterpart is absent. + // + case ENG_KERNEL_DISPLAY: + // On Displayless GPU's, Display Engine is not present. So, RM should not keep the display + // classes in GET_CLASSLIST. Hence removing the Display classes from the ClassDB + gpuRemoveMissingEngineClasses(pGpu, ENG_KERNEL_DISPLAY); + break; + // + // Explicitly track engines that trigger this block + // so that we can verify they function properly + // after they are no longer removed here. + // + case ENG_INFOROM: + // TODO: try to remove this special case + NV_PRINTF(LEVEL_WARNING, + "engine removal in PreInit with NV_ERR_NOT_SUPPORTED is deprecated (%s)\n", + engstateGetName(pEngstate)); + break; + default: + NV_PRINTF(LEVEL_ERROR, + "disallowing NV_ERR_NOT_SUPPORTED PreInit removal of untracked engine (%s)\n", + engstateGetName(pEngstate)); + DBG_BREAKPOINT(); + NV_ASSERT(0); + break; + } + + gpuDestroyMissingEngine(pGpu, pEngstate); + pEngstate = NULL; + + rmStatus = gpuDeleteEngineOnPreInit(pGpu, curEngDescriptor); + // TODO: destruct engine here after MISSING support is removed + NV_ASSERT(rmStatus == NV_OK || !"Error while trying to remove missing engine"); + } + else if (rmStatus != NV_OK) + { + break; + } + } + + pGpu->boardInfo = portMemAllocNonPaged(sizeof(*pGpu->boardInfo)); + if (pGpu->boardInfo) + { + // To avoid potential race of xid reporting with the control, zero it out + portMemSet(pGpu->boardInfo, '\0', sizeof(*pGpu->boardInfo)); + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + if (pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_GET_OEM_BOARD_INFO, + pGpu->boardInfo, + sizeof(*pGpu->boardInfo)) != NV_OK) + { + portMemFree(pGpu->boardInfo); + pGpu->boardInfo = NULL; + } + } + + return rmStatus; +} + +// TODO: Merge structurally equivalent code with other gpuState* functions. +NV_STATUS +gpuStateInit_IMPL +( + OBJGPU *pGpu +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + NV_STATUS rmStatus = NV_OK; + + // Initialize numaNodeId to invalid node ID as "0" can be considered valid node + pGpu->numaNodeId = NV0000_CTRL_NO_NUMA_NODE; + + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // TODO: Move the below code into appropriate ENGSTATE objects. + // DO NOT ADD MORE SPECIAL CASES HERE! + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + engDescriptorList = gpuGetInitEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + LOCK_ASSERT_AND_RETURN(rmGpuLockIsOwner()); + + // Do this before calling stateInit() of child engines. + objCreate(&pGpu->pPrereqTracker, pGpu, PrereqTracker, pGpu); + + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // TODO: Move the above code into appropriate ENGSTATE objects. + // DO NOT ADD MORE SPECIAL CASES HERE! + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_INIT, &engTransitionData); + rmStatus = engstateStateInit(pGpu, pEngstate); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_INIT, &engTransitionData); + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + + if (rmStatus == NV_ERR_NOT_SUPPORTED) + rmStatus = NV_OK; + if (rmStatus != NV_OK) + goto gpuStateInit_exit; + } + + // Set a property indicating that the state initialization has been done + pGpu->setProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED, NV_TRUE); + +gpuStateInit_exit: + return rmStatus; +} + +/*! + * @brief Top level pre-load routine + * + * Provides a mechanism to resolve cyclic dependencies between engines. + * + * StatePreLoad() is called before StateLoad() likewise StatePostUnload() is + * called after StateUnload(). + * + * Dependencies which are DAGs should continue to be resolved by reordering the + * engine descriptor lists. Reordering the descriptor lists won't solve cyclic + * dependencies as at least one constraint would always be violated. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * + * @param[in] pGpu OBJPGU pointer + * @param[in] flags Type of transition + */ +static NV_STATUS +gpuStatePreLoad +( + OBJGPU *pGpu, + NvU32 flags +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + NV_STATUS rmStatus = NV_OK; + + engDescriptorList = gpuGetLoadEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStatePreLoadEngStart", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_PRE_LOAD, &engTransitionData); + rmStatus = engstateStatePreLoad(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_PRE_LOAD, &engTransitionData); + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStatePreLoadEngEnd", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + + // + // An engine load leaving the broadcast status to NV_TRUE + // will most likely mess up the pre-load of the next engines + // + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (rmStatus == NV_ERR_NOT_SUPPORTED) + rmStatus = NV_OK; + if (rmStatus != NV_OK) + break; + + // + // Release and re-acquire the lock to allow interrupts + // + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_STATE_LOAD); + if (rmStatus != NV_OK) + break; + } + + return rmStatus; +} + +// TODO: Merge structurally equivalent code with other gpuState* functions. +NV_STATUS +gpuStateLoad_IMPL +( + OBJGPU *pGpu, + NvU32 flags +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + NV_STATUS rmStatus = NV_OK; + NvU32 status = NV_OK; + + pGpu->registerAccess.regReadCount = pGpu->registerAccess.regWriteCount = 0; + RMTRACE_ENGINE_PROFILE_EVENT("gpuStateLoadStart", pGpu->gpuId, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + + // Initialize SRIOV specific members of OBJGPU + status = gpuInitSriov_HAL(pGpu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error initializing SRIOV: 0x%0x\n", status); + return status; + } + + // It is a no-op on baremetal and inside non SRIOV guest. + rmStatus = gpuCreateDefaultClientShare_HAL(pGpu); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + rmStatus = gpuStatePreLoad(pGpu, flags); + if (rmStatus != NV_OK) + { + // + // return early if we broke out of the preLoad sequence with + // rmStatus != NV_OK + // + return rmStatus; + } + + engDescriptorList = gpuGetLoadEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Set indicator that we are running state load + pGpu->bStateLoading = NV_TRUE; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStateLoadEngStart", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_LOAD, &engTransitionData); + rmStatus = engstateStateLoad(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_LOAD, &engTransitionData); + + + // TODO: This is temporary and may be dead with TESLA + if (rmStatus == NV_ERR_INVALID_ADDRESS) + { + NV_PRINTF(LEVEL_ERROR, "NV_ERR_INVALID_ADDRESS is no longer supported in StateLoad (%s)\n", + engstateGetName(pEngstate)); + DBG_BREAKPOINT(); + } + + // + // An engine load leaving the broadcast status to NV_TRUE + // will most likely mess up the load of the next engines + // + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (rmStatus == NV_ERR_NOT_SUPPORTED) + rmStatus = NV_OK; + if (rmStatus != NV_OK) + goto gpuStateLoad_exit; + + // + // Release and re-acquire the lock to allow interrupts + // + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_STATE_LOAD); + if (rmStatus != NV_OK) + goto gpuStateLoad_exit; + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStateLoadEngEnd", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + } + + rmStatus = gpuInitVmmuInfo(pGpu); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error initializing VMMU info: 0x%0x\n", status); + goto gpuStateLoad_exit; + } + + { + // Perform post load operations + rmStatus = gpuStatePostLoad(pGpu, flags); + if (rmStatus != NV_OK) + goto gpuStateLoad_exit; + + } + + // Clear indicator that we are running state load + pGpu->bStateLoading = NV_FALSE; + + // Set a property indicating that the state load has been done + pGpu->bStateLoaded = NV_TRUE; + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStateLoadEnd", pGpu->gpuId, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + +gpuStateLoad_exit: + return rmStatus; +} + +/*! + * @brief Top level post-load routine + * + * Provides a mechanism to resolve cyclic dependencies between engines. For + * example, OBJFB depends on OBJCE on Fermi (for memory scrubbing), likewise + * OBJCE also depends on OBJFB (for instance memory). + * + * StatePostLoad() is called after StateLoad() likewise StatePreUnload() is + * called prior to StateUnload(). + * + * Dependencies which are DAGs should continue to be resolved by reordering the + * engine descriptor lists. Reordering the descriptor lists won't solve cyclic + * dependencies as at least one constraint would always be violated. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * + * @param[in] pGpu OBJPGU pointer + * @param[in] flags Type of transition + */ +static NV_STATUS +gpuStatePostLoad +( + OBJGPU *pGpu, + NvU32 flags +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + NV_STATUS rmStatus = NV_OK; + + engDescriptorList = gpuGetLoadEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStatePostLoadEngStart", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_POST_LOAD, &engTransitionData); + rmStatus = engstateStatePostLoad(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_POST_LOAD, &engTransitionData); + RMTRACE_ENGINE_PROFILE_EVENT("gpuStatePostLoadEngEnd", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (rmStatus == NV_ERR_NOT_SUPPORTED) + rmStatus = NV_OK; + if (rmStatus != NV_OK) + goto gpuStatePostLoad_exit; + + // + // Release and re-acquire the lock to allow interrupts + // + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_STATE_LOAD); + + if (rmStatus != NV_OK) + goto gpuStatePostLoad_exit; + } + +gpuStatePostLoad_exit: + return rmStatus; +} + +/*! + * @brief Top level pre-unload routine + * + * Provides a mechanism to resolve cyclic dependencies between engines. For + * example, OBJFB depends on OBJCE on Fermi (for memory scrubbing), likewise + * OBJCE also depends on OBJFB (for instance memory). + * + * StatePostLoad() is called after StateLoad() likewise StatePreUnload() is + * called prior to StateUnload(). + * + * Dependencies which are DAGs should continue to be resolved by reordering the + * engine descriptor lists. Reordering the descriptor lists won't solve cyclic + * dependencies as at least one constraint would always be violated. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * + * @param[in] pGpu OBJPGU pointer + * @param[in] flags Type of transition + */ +static NV_STATUS +gpuStatePreUnload +( + OBJGPU *pGpu, + NvU32 flags +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + NV_STATUS rmStatus = NV_OK; + + engDescriptorList = gpuGetUnloadEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_PRE_UNLOAD, &engTransitionData); + rmStatus = engstateStatePreUnload(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_PRE_UNLOAD, &engTransitionData); + + // + // During unload, failure of a single engine may not be fatal. + // ASSERT if there is a failure, but ignore the status and continue + // unloading other engines to prevent (worse) memory leaks. + // + if (rmStatus != NV_OK) + { + if (rmStatus != NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to pre unload engine with descriptor index: 0x%x and descriptor: 0x%x\n", + curEngDescIdx, curEngDescriptor); + if (!IS_FMODEL(pGpu)) + { + NV_ASSERT(0); + } + } + rmStatus = NV_OK; + } + + // Ensure that intr on other GPUs are serviced + gpuServiceInterruptsAllGpus(pGpu); + } + + return rmStatus; +} + +NV_STATUS +gpuEnterShutdown_IMPL +( + OBJGPU *pGpu +) +{ + NV_STATUS rmStatus = gpuStateUnload(pGpu, GPU_STATE_DEFAULT); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to unload the device with error 0x%x\n", rmStatus); + } + + return rmStatus; +} + +// TODO: Merge structurally equivalent code with other gpuState* functions. +NV_STATUS +gpuStateUnload_IMPL +( + OBJGPU *pGpu, + NvU32 flags +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + OBJENGSTATE *pEngstate; + NV_STATUS rmStatus = NV_OK; + NV_STATUS fatalErrorStatus = NV_OK; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // Set indicator that state is currently unloading. + pGpu->bStateUnloading = NV_TRUE; + + { + rmStatus = gpuStatePreUnload(pGpu, flags); + } + + if (rmStatus != NV_OK) + return rmStatus; + + engDescriptorList = gpuGetUnloadEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + + pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_UNLOAD, &engTransitionData); + rmStatus = engstateStateUnload(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_UNLOAD, &engTransitionData); + + // + // An engine unload leaving the broadcast status to NV_TRUE + // will most likely mess up the unload of the next engines + // + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // + // During unload, failure of a single engine may not be fatal. + // ASSERT if there is a failure, but ignore the status and continue + // unloading other engines to prevent (worse) memory leaks. + // + if (rmStatus != NV_OK) + { + if (rmStatus != NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to unload engine with descriptor index: 0x%x and descriptor: 0x%x\n", + curEngDescIdx, curEngDescriptor); + if (!IS_FMODEL(pGpu)) + { + NV_ASSERT(0); + + if (flags & GPU_STATE_FLAGS_PRESERVING) + { + // + // FBSR can fail due to low sysmem. + // So return error. + // See bugs 2051056, 2049141 + // + if (objDynamicCastById(pEngstate, classId(MemorySystem))) + { + fatalErrorStatus = rmStatus; + } + } + } + } + rmStatus = NV_OK; + } + // Ensure that intr on other GPUs are serviced + gpuServiceInterruptsAllGpus(pGpu); + } + + // Call the gpuStatePostUnload routine + rmStatus = gpuStatePostUnload(pGpu, flags); + NV_ASSERT_OK(rmStatus); + + gpuDestroyDefaultClientShare_HAL(pGpu); + + // De-init SRIOV + gpuDeinitSriov_HAL(pGpu); + + // Set indicator that state unload finished. + pGpu->bStateUnloading = NV_FALSE; + + // Set a property indicating that the state unload has been done + if (rmStatus == NV_OK) + { + pGpu->bStateLoaded = NV_FALSE; + } + + if (fatalErrorStatus != NV_OK) + { + rmStatus = fatalErrorStatus; + } + + return rmStatus; +} + +/*! + * @brief Top level post-unload routine + * + * Provides a mechanism to resolve cyclic dependencies between engines. + * + * + * Dependencies which are DAGs should continue to be resolved by reordering the + * engine descriptor lists. Reordering the descriptor lists won't solve cyclic + * dependencies as at least one constraint would always be violated. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * + * @param[in] pGpu OBJPGU pointer + * @param[in] flags Type of transition + */ +static NV_STATUS +gpuStatePostUnload +( + OBJGPU *pGpu, + NvU32 flags +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + OBJENGSTATE *pEngstate; + NV_STATUS rmStatus = NV_OK; + + engDescriptorList = gpuGetUnloadEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + + pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_POST_UNLOAD, &engTransitionData); + rmStatus = engstateStatePostUnload(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_POST_UNLOAD, &engTransitionData); + + // + // An engine post-unload leaving the broadcast status to NV_TRUE + // will most likely mess up the post-unload of the next engines + // + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // + // During unload, failure of a single engine may not be fatal. + // ASSERT if there is a failure, but ignore the status and continue + // unloading other engines to prevent (worse) memory leaks. + // + if (rmStatus != NV_OK) + { + if (rmStatus != NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to post unload engine with descriptor index: 0x%x and descriptor: 0x%x\n", + curEngDescIdx, curEngDescriptor); + if (!IS_FMODEL(pGpu)) + { + NV_ASSERT(0); + } + } + rmStatus = NV_OK; + } + + // Ensure that intr on other GPUs are serviced + gpuServiceInterruptsAllGpus(pGpu); + } + + return rmStatus; +} + +NV_STATUS +gpuStateDestroy_IMPL +( + OBJGPU *pGpu +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + OBJENGSTATE *pEngstate; + NV_STATUS rmStatus = NV_OK; + + engDescriptorList = gpuGetDestroyEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + NV_RM_RPC_SIM_FREE_INFRA(pGpu, rmStatus); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + + pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_DESTROY, &engTransitionData); + engstateStateDestroy(pGpu, pEngstate); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_DESTROY, &engTransitionData); + } + + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // TODO: Move the below code into appropriate ENGSTATE objects. + // DO NOT ADD MORE SPECIAL CASES HERE! + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + // Do this after calling stateDestroy() of child engines. + objDelete(pGpu->pPrereqTracker); + pGpu->pPrereqTracker = NULL; + + // Clear the property indicating that the state initialization has been done + if (rmStatus == NV_OK) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED, NV_FALSE); + } + + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // TODO: Move the above code into appropriate ENGSTATE objects. + // DO NOT ADD MORE SPECIAL CASES HERE! + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + _gpuFreeInternalObjects(pGpu); + gpuDestroyGenericKernelFalconList(pGpu); + + portMemFree(pGpu->boardInfo); + pGpu->boardInfo = NULL; + + portMemFree(pGpu->gspSupportedEngines); + pGpu->gspSupportedEngines = NULL; + + portMemFree(pGpu->pChipInfo); + pGpu->pChipInfo = NULL; + + pGpu->bFullyConstructed = NV_FALSE; + + return rmStatus; +} + +// +// Logic: If arch = requested AND impl = requested --> NV_TRUE +// OR If arch = requested AND impl = requested AND maskRev = requested --> NV_TRUE +// OR If arch = requested AND impl = requested AND rev = requested --> NV_TRUE +// +NvBool +gpuIsImplementation_IMPL +( + OBJGPU *pGpu, + HAL_IMPLEMENTATION halImpl, + NvU32 maskRevision, + NvU32 revision +) +{ + NvU32 gpuArch, gpuImpl; + NvBool result = NV_FALSE; + + NV_ASSERT(revision == GPU_NO_REVISION); + + gpuXlateHalImplToArchImpl(pGpu, halImpl, &gpuArch, &gpuImpl); + + result = ((gpuGetChipArch(pGpu) == gpuArch) && + (gpuGetChipImpl(pGpu) == gpuImpl)); + + if (maskRevision != GPU_NO_MASK_REVISION) + { + result = result && (GPU_GET_MASKREVISION(pGpu) == maskRevision); + } + + return result; +} + +// Check the software state to decide if we are in full power mode or not. +NvBool +gpuIsGpuFullPower_IMPL +( + OBJGPU *pGpu +) +{ + NvBool retVal = NV_TRUE; + + // + // SW may have indicated that the GPU ins in standby, hibernate, or powered off, + // indicating a logical power state. + // + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_STANDBY) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_HIBERNATE)) + { + retVal = NV_FALSE; + } + + return retVal; +} + +// Check the software state to decide if we are in full power mode or not. +NvBool +gpuIsGpuFullPowerForPmResume_IMPL +( + OBJGPU *pGpu +) +{ + NvBool retVal = NV_TRUE; + // + // SW may have indicated that the GPU ins in standby, resume, hibernate, or powered off, + // indicating a logical power state. + // + if ((!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) && + (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_STANDBY) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_HIBERNATE))) + { + retVal = NV_FALSE; + } + return retVal; +} + +NvBool +gpuIsImplementationOrBetter_IMPL +( + OBJGPU *pGpu, + HAL_IMPLEMENTATION halImpl, + NvU32 maskRevision, + NvU32 revision +) +{ + NvU32 gpuArch, gpuImpl; + NvU32 chipArch; + NvBool result = NV_FALSE; + + NV_ASSERT(revision == GPU_NO_REVISION); + + gpuXlateHalImplToArchImpl(pGpu, halImpl, &gpuArch, &gpuImpl); + + // "is implementation or better" is only defined between 2 gpus within + // the same "gpu series" as defined in config/Gpus.pm and gpuarch.h + chipArch = gpuGetChipArch(pGpu); + + if (DRF_VAL(GPU, _ARCHITECTURE, _SERIES, chipArch) == DRF_VAL(GPU, _ARCHITECTURE, _SERIES, gpuArch)) + { + if (maskRevision != GPU_NO_MASK_REVISION) + { + result = gpuSatisfiesTemporalOrderMaskRev(pGpu, halImpl, gpuArch, + gpuImpl, maskRevision); + } + else + { + // In case there is a temporal ordering we need to account for + result = gpuSatisfiesTemporalOrder(pGpu, halImpl, gpuArch, gpuImpl); + } + } + + return result; +} + +static void +gpuXlateHalImplToArchImpl +( + OBJGPU *pGpu, + HAL_IMPLEMENTATION halImpl, + NvU32 *gpuArch, + NvU32 *gpuImpl +) +{ + switch (halImpl) + { + case HAL_IMPL_GM107: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL; + *gpuImpl = GPU_IMPLEMENTATION_GM107; + break; + } + + case HAL_IMPL_GM108: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL; + *gpuImpl = GPU_IMPLEMENTATION_GM108; + break; + } + + case HAL_IMPL_GM200: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL2; + *gpuImpl = GPU_IMPLEMENTATION_GM200; + break; + } + + case HAL_IMPL_GM204: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL2; + *gpuImpl = GPU_IMPLEMENTATION_GM204; + break; + } + + case HAL_IMPL_GM206: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL2; + *gpuImpl = GPU_IMPLEMENTATION_GM206; + break; + } + + case HAL_IMPL_GP100: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP100; + break; + } + + case HAL_IMPL_GP102: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP102; + break; + } + + case HAL_IMPL_GP104: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP104; + break; + } + + case HAL_IMPL_GP106: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP106; + break; + } + + case HAL_IMPL_GP107: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP107; + break; + } + + case HAL_IMPL_GP108: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP108; + break; + } + + case HAL_IMPL_GV100: + { + *gpuArch = GPU_ARCHITECTURE_VOLTA; + *gpuImpl = GPU_IMPLEMENTATION_GV100; + break; + } + + case HAL_IMPL_GV11B: + { + *gpuArch = GPU_ARCHITECTURE_VOLTA2; + *gpuImpl = GPU_IMPLEMENTATION_GV11B; + break; + } + + case HAL_IMPL_TU102: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU102; + break; + } + + case HAL_IMPL_TU104: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU104; + break; + } + + case HAL_IMPL_TU106: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU106; + break; + } + + case HAL_IMPL_TU116: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU116; + break; + } + + case HAL_IMPL_TU117: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU117; + break; + } + + case HAL_IMPL_AMODEL: + { + *gpuArch = GPU_ARCHITECTURE_SIMS; + *gpuImpl = GPU_IMPLEMENTATION_AMODEL; + break; + } + + case HAL_IMPL_T124: + { + *gpuArch = GPU_ARCHITECTURE_T12X; + *gpuImpl = GPU_IMPLEMENTATION_T124; + break; + } + + case HAL_IMPL_T132: + { + *gpuArch = GPU_ARCHITECTURE_T13X; + *gpuImpl = GPU_IMPLEMENTATION_T132; + break; + } + + case HAL_IMPL_T210: + { + *gpuArch = GPU_ARCHITECTURE_T21X; + *gpuImpl = GPU_IMPLEMENTATION_T210; + break; + } + + case HAL_IMPL_T186: + { + *gpuArch = GPU_ARCHITECTURE_T18X; + *gpuImpl = GPU_IMPLEMENTATION_T186; + break; + } + + case HAL_IMPL_T194: + { + *gpuArch = GPU_ARCHITECTURE_T19X; + *gpuImpl = GPU_IMPLEMENTATION_T194; + break; + } + + case HAL_IMPL_T234D: + { + *gpuArch = GPU_ARCHITECTURE_T23X; + *gpuImpl = GPU_IMPLEMENTATION_T234D; + break; + } + + case HAL_IMPL_GA100: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA100; + break; + } + + case HAL_IMPL_GA102: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA102; + break; + } + + case HAL_IMPL_GA102F: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA102F; + break; + } + + case HAL_IMPL_GA104: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA104; + break; + } + + + default: + { + *gpuArch = 0; + *gpuImpl = 0; + NV_PRINTF(LEVEL_ERROR, "Invalid halimpl\n"); + DBG_BREAKPOINT(); + break; + } + } +} + +// +// default Logic: If arch is greater than requested --> NV_TRUE +// OR If arch is = requested AND impl is >= requested --> NV_TRUE +// +// NOTE: only defined for gpus within same gpu series +// +static NvBool +gpuSatisfiesTemporalOrder +( + OBJGPU *pGpu, + HAL_IMPLEMENTATION halImpl, + NvU32 gpuArch, + NvU32 gpuImpl +) +{ + NvBool result = NV_FALSE; + + switch (halImpl) + { + // + // Comparison of Tegra series isn't straightforward with the chip ids + // following different formats and so we can't use them + // to figure out the relative ordering of chips. + // T12X, T13X use 0x40, 0x13. + // + case HAL_IMPL_T124: + { + result = gpuIsT124ImplementationOrBetter(pGpu); + break; + } + default: + { + NvU32 chipArch = gpuGetChipArch(pGpu); + NvU32 chipImpl = gpuGetChipImpl(pGpu); + + result = ((chipArch > gpuArch) || + ((chipArch == gpuArch) && + (chipImpl >= gpuImpl))); + break; + } + } + + return result; +} + +/*! + * @brief Checks if current GPU is T124OrBetter + * + * T124+ corresponds to BIG-GPU tegra chips that + * are either T124 or beyond. + * ChipArch which the generic implementation relies + * on doesn't give the hierarchy of chips + * accurately. Hence the explicit check for chips + * below. + * + * @param[in] pGpu GPU object pointer + * + * @returns NV_TRUE if T124 or any later big-gpu tegra chip, + * NV_FALSE otherwise + */ +static NvBool +gpuIsT124ImplementationOrBetter +( + OBJGPU *pGpu +) +{ + NvU32 chipArch = gpuGetChipArch(pGpu); + NvU32 chipImpl = gpuGetChipImpl(pGpu); + + // + // All Big-gpu chips like T124, T132 or later satisy the condition. + // This makes the assumption that starting from T186, there are no + // AURORA chips. + // + return (((chipArch == GPU_ARCHITECTURE_T12X) && (chipImpl == GPU_IMPLEMENTATION_T124)) || + ((chipArch == GPU_ARCHITECTURE_T13X) && (chipImpl == GPU_IMPLEMENTATION_T132)) || + ((chipArch == GPU_ARCHITECTURE_T21X) && (chipImpl == GPU_IMPLEMENTATION_T210)) || + ((chipArch == GPU_ARCHITECTURE_T19X) && (chipImpl == GPU_IMPLEMENTATION_T194)) || + ((chipArch == GPU_ARCHITECTURE_T23X) && (chipImpl == GPU_IMPLEMENTATION_T234D)) || + ((chipArch == GPU_ARCHITECTURE_T23X) && (chipImpl == GPU_IMPLEMENTATION_T232)) || + ((chipArch == GPU_ARCHITECTURE_T23X) && (chipImpl == GPU_IMPLEMENTATION_T234)) || + ((chipArch >= GPU_ARCHITECTURE_T18X) && (chipImpl == GPU_IMPLEMENTATION_T186))); +} + + +// +// default Logic: If arch = requested AND impl = requested AND +// maskRev is >= requested --> NV_TRUE +// +static NvBool +gpuSatisfiesTemporalOrderMaskRev +( + OBJGPU *pGpu, + HAL_IMPLEMENTATION halImpl, + NvU32 gpuArch, + NvU32 gpuImpl, + NvU32 maskRevision +) +{ + NvBool result = NV_FALSE; + + result = ((gpuGetChipArch(pGpu)== gpuArch) && + (gpuGetChipImpl(pGpu) == gpuImpl) && + (GPU_GET_MASKREVISION(pGpu) >= maskRevision)); + + return result; +} + +// =============== Engine Database ============================== + +typedef struct { + NvU32 clientEngineId; + NVOC_CLASS_ID class; + NvU32 instance; + NvBool bHostEngine; +} EXTERN_TO_INTERNAL_ENGINE_ID; + +static const EXTERN_TO_INTERNAL_ENGINE_ID rmClientEngineTable[] = +{ + { NV2080_ENGINE_TYPE_GR0, classId(Graphics) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR1, classId(Graphics) , 1, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR2, classId(Graphics) , 2, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR3, classId(Graphics) , 3, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR4, classId(Graphics) , 4, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR5, classId(Graphics) , 5, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR6, classId(Graphics) , 6, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR7, classId(Graphics) , 7, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY0, classId(OBJCE) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY1, classId(OBJCE) , 1, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY2, classId(OBJCE) , 2, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY3, classId(OBJCE) , 3, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY4, classId(OBJCE) , 4, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY5, classId(OBJCE) , 5, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY6, classId(OBJCE) , 6, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY7, classId(OBJCE) , 7, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY8, classId(OBJCE) , 8, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY9, classId(OBJCE) , 9, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVDEC0, classId(OBJBSP) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVDEC1, classId(OBJBSP) , 1, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVDEC2, classId(OBJBSP) , 2, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVDEC3, classId(OBJBSP) , 3, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVDEC4, classId(OBJBSP) , 4, NV_TRUE }, + { NV2080_ENGINE_TYPE_CIPHER, classId(OBJCIPHER) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVENC0, classId(OBJMSENC) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVENC1, classId(OBJMSENC) , 1, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVENC2, classId(OBJMSENC) , 2, NV_TRUE }, + { NV2080_ENGINE_TYPE_SW, classId(OBJSWENG) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_SEC2, classId(OBJSEC2) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVJPEG0, classId(OBJNVJPG) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_OFA, classId(OBJOFA) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_DPU, classId(OBJDPU) , 0, NV_FALSE }, + { NV2080_ENGINE_TYPE_PMU, classId(Pmu) , 0, NV_FALSE }, + { NV2080_ENGINE_TYPE_FBFLCN, classId(OBJFBFLCN) , 0, NV_FALSE }, + { NV2080_ENGINE_TYPE_HOST, classId(KernelFifo) , 0, NV_FALSE }, +}; + +NV_STATUS gpuConstructEngineTable_IMPL +( + OBJGPU *pGpu +) +{ + NvU32 engineId = 0; + + // Alloc engine DB + pGpu->engineDB.bValid = NV_FALSE; + pGpu->engineDB.pType = portMemAllocNonPaged( + NV_ARRAY_ELEMENTS(rmClientEngineTable) * sizeof(*pGpu->engineDB.pType)); + if (pGpu->engineDB.pType == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "gpuConstructEngineTable: Could not allocate engine DB\n"); + DBG_BREAKPOINT(); + return NV_ERR_NO_MEMORY; + } + pGpu->engineDB.size = 0; // That's right, its the size not the capacity + // of the engineDB + + // Initialize per-GPU per-engine list of non-stall interrupt event nodes. + for (engineId = 0; engineId < NV2080_ENGINE_TYPE_LAST; engineId++) + { + pGpu->engineNonstallIntr[engineId].pEventNode = NULL; + pGpu->engineNonstallIntr[engineId].pSpinlock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (pGpu->engineNonstallIntr[engineId].pSpinlock == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + return NV_OK; +} + +NV_STATUS gpuUpdateEngineTable_IMPL +( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + NvU32 counter = 0; + NvU32 numClasses = 0; + + if (pGpu->engineDB.pType == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "gpuUpdateEngineTable: EngineDB has not been created yet\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; + } + + if (pGpu->engineDB.bValid) + { + return NV_OK; + } + + // Read through the classDB and populate engineDB + pGpu->engineDB.size = 0; + for (counter = 0; counter < NV_ARRAY_ELEMENTS(rmClientEngineTable); counter++) + { + // There are tests such as ClassA06fTest that attempt to bind all engines reported + if (!rmClientEngineTable[counter].bHostEngine) + { + continue; + } + + status = gpuGetClassList(pGpu, &numClasses, NULL, + MKENGDESC(rmClientEngineTable[counter].class, rmClientEngineTable[counter].instance)); + if ((status != NV_OK) || ( numClasses == 0)) + { + continue; + } + pGpu->engineDB.pType[pGpu->engineDB.size++] = + rmClientEngineTable[counter].clientEngineId; + } + + pGpu->engineDB.bValid = NV_TRUE; + + return NV_OK; +} +void gpuDestroyEngineTable_IMPL(OBJGPU *pGpu) +{ + NvU32 engineId = 0; + + if (pGpu->engineDB.pType) + { + pGpu->engineDB.size = 0; + portMemFree(pGpu->engineDB.pType); + pGpu->engineDB.pType = NULL; + pGpu->engineDB.bValid = NV_FALSE; + } + + for (engineId = 0; engineId < NV2080_ENGINE_TYPE_LAST; engineId++) + { + NV_ASSERT(pGpu->engineNonstallIntr[engineId].pEventNode == NULL); + + if (pGpu->engineNonstallIntr[engineId].pSpinlock != NULL) + { + portSyncSpinlockDestroy(pGpu->engineNonstallIntr[engineId].pSpinlock); + } + } +} + +NvBool gpuCheckEngineTable_IMPL +( + OBJGPU *pGpu, + NvU32 engType +) +{ + NvU32 engineId; + + if (!IS_MODS_AMODEL(pGpu)) + { + NV_ASSERT_OR_RETURN(pGpu->engineDB.bValid, NV_FALSE); + } + + NV_ASSERT_OR_RETURN(engType < NV2080_ENGINE_TYPE_LAST, NV_FALSE); + + for (engineId = 0; engineId < pGpu->engineDB.size; engineId++) + { + if (engType == pGpu->engineDB.pType[engineId]) + { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +NV_STATUS +gpuXlateClientEngineIdToEngDesc_IMPL +( + OBJGPU *pGpu, + NvU32 clientEngineID, + ENGDESCRIPTOR *pEngDesc + +) +{ + NvU32 counter; + + for (counter = 0; counter < NV_ARRAY_ELEMENTS(rmClientEngineTable); counter++) + { + if (rmClientEngineTable[counter].clientEngineId == clientEngineID) + { + *pEngDesc = MKENGDESC(rmClientEngineTable[counter].class, rmClientEngineTable[counter].instance); + return NV_OK; + } + } + + return NV_ERR_INVALID_ARGUMENT; +} + +NV_STATUS +gpuXlateEngDescToClientEngineId_IMPL +( + OBJGPU *pGpu, + ENGDESCRIPTOR engDesc, + NvU32 *pClientEngineID +) +{ + NvU32 counter; + + for (counter = 0; counter < NV_ARRAY_ELEMENTS(rmClientEngineTable); counter++) + { + if (MKENGDESC(rmClientEngineTable[counter].class, rmClientEngineTable[counter].instance) == engDesc) + { + *pClientEngineID = rmClientEngineTable[counter].clientEngineId; + return NV_OK; + } + } + + return NV_ERR_INVALID_ARGUMENT; +} + +NV_STATUS +gpuGetFlcnFromClientEngineId_IMPL +( + OBJGPU *pGpu, + NvU32 clientEngineId, + Falcon **ppFlcn +) +{ + *ppFlcn = NULL; + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +gpuGetGidInfo_IMPL +( + OBJGPU *pGpu, + NvU8 **ppGidString, + NvU32 *pGidStrlen, + NvU32 gidFlags +) +{ + NV_STATUS rmStatus = NV_OK; + NvU8 gidData[RM_SHA1_GID_SIZE]; + NvU32 gidSize = RM_SHA1_GID_SIZE; + + if (!FLD_TEST_DRF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1,gidFlags)) + { + return NV_ERR_INVALID_FLAGS; + } + + if (pGpu->gpuUuid.isInitialized) + { + portMemCopy(gidData, gidSize, &pGpu->gpuUuid.uuid[0], gidSize); + goto fillGidData; + } + + rmStatus = gpuGenGidData_HAL(pGpu, gidData, gidSize, gidFlags); + + if (rmStatus != NV_OK) + { + return rmStatus; + } + + // if not cached, cache it here + portMemCopy(&pGpu->gpuUuid.uuid[0], gidSize, gidData, gidSize); + pGpu->gpuUuid.isInitialized = NV_TRUE; + +fillGidData: + if (ppGidString != NULL) + { + if (FLD_TEST_DRF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _BINARY, + gidFlags)) + { + // + // Instead of transforming the Gid into a string, just use it in its + // original binary form. The allocation rules are the same as those + // followed by the transformGidToUserFriendlyString routine: we + // allocate ppGidString here, and the caller frees ppGidString. + // + *ppGidString = portMemAllocNonPaged(gidSize); + if (*ppGidString == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemCopy(*ppGidString, gidSize, gidData, gidSize); + *pGidStrlen = gidSize; + } + else + { + NV_ASSERT_OR_RETURN(pGidStrlen != NULL, NV_ERR_INVALID_ARGUMENT); + rmStatus = transformGidToUserFriendlyString(gidData, gidSize, + ppGidString, pGidStrlen, gidFlags); + } + } + + return rmStatus; +} + +void +gpuSetDisconnectedProperties_IMPL +( + OBJGPU *pGpu +) +{ + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_LOST, NV_TRUE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED, NV_FALSE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH, NV_FALSE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_STANDBY, NV_FALSE); + pGpu->bInD3Cold = NV_FALSE; + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_HIBERNATE, NV_FALSE); + +} + +NV_STATUS +gpuAddConstructedFalcon_IMPL +( + OBJGPU *pGpu, + Falcon *pFlcn +) +{ + NV_ASSERT_OR_RETURN(pFlcn, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN( + pGpu->numConstructedFalcons < NV_ARRAY_ELEMENTS(pGpu->constructedFalcons), + NV_ERR_BUFFER_TOO_SMALL); + + pGpu->constructedFalcons[pGpu->numConstructedFalcons++] = pFlcn; + return NV_OK; +} + +NV_STATUS +gpuRemoveConstructedFalcon_IMPL +( + OBJGPU *pGpu, + Falcon *pFlcn +) +{ + NvU32 i, j; + for (i = 0; i < pGpu->numConstructedFalcons; i++) + { + if (pGpu->constructedFalcons[i] == pFlcn) + { + for (j = i+1; j < pGpu->numConstructedFalcons; j++) + { + pGpu->constructedFalcons[j-1] = pGpu->constructedFalcons[j]; + } + pGpu->numConstructedFalcons--; + pGpu->constructedFalcons[pGpu->numConstructedFalcons] = NULL; + return NV_OK; + } + } + NV_ASSERT_FAILED("Attempted to remove a non-existent initialized Falcon!"); + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +gpuGetConstructedFalcon_IMPL +( + OBJGPU *pGpu, + NvU32 index, + Falcon **ppFlcn +) +{ + if (index >= pGpu->numConstructedFalcons) + return NV_ERR_OUT_OF_RANGE; + + *ppFlcn = pGpu->constructedFalcons[index]; + NV_ASSERT(*ppFlcn != NULL); + return NV_OK; +} + +NV_STATUS gpuBuildGenericKernelFalconList_IMPL(OBJGPU *pGpu) +{ + return NV_OK; +} + +void gpuDestroyGenericKernelFalconList_IMPL(OBJGPU *pGpu) +{ +} + + +GenericKernelFalcon * +gpuGetGenericKernelFalconForEngine_IMPL +( + OBJGPU *pGpu, + ENGDESCRIPTOR engDesc +) +{ + return NULL; +} + +void gpuRegisterGenericKernelFalconIntrService_IMPL(OBJGPU *pGpu, void *pRecords) +{ +} + +/** + * @brief Initializes iterator for ENGDESCRIPTOR load order + * + * @return GPU_CHILD_ITER + */ +static ENGLIST_ITER +gpuGetEngineOrderListIter(OBJGPU *pGpu, NvU32 flags) +{ + ENGLIST_ITER it = { 0 }; + it.flags = flags; + return it; +} + + +static const GPUCHILDPRESENT * +gpuFindChildPresent(const GPUCHILDPRESENT *pChildPresentList, NvU32 numChildPresent, NvU32 classId) +{ + NvU32 i; + + for (i = 0; i < numChildPresent; i++) + { + if (pChildPresentList[i].classId == classId) + return &pChildPresentList[i]; + } + + return NULL; +} + +/*! + * @brief Sanity checks on given gfid + * + * @param[in] pGpu OBJGPU pointer + * @param[in] gfid GFID to be validated + * @param[in] bInUse NV_TRUE if GFID is being set for use + */ +NV_STATUS +gpuSanityCheckGfid_IMPL(OBJGPU *pGpu, NvU32 gfid, NvBool bInUse) +{ + // Error if pAllocatedGfids + if (pGpu->sriovState.pAllocatedGfids == NULL) + { + return NV_ERR_INVALID_ADDRESS; + } + + // Sanity check on GFID + if (gfid > pGpu->sriovState.maxGfid) + { + return NV_ERR_OUT_OF_RANGE; + } + else if((bInUse == NV_TRUE) && (pGpu->sriovState.pAllocatedGfids[gfid] == GFID_ALLOCATED)) + { + return NV_ERR_IN_USE; + } + else if((bInUse == NV_FALSE) && (pGpu->sriovState.pAllocatedGfids[gfid] == GFID_FREE)) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + return NV_OK; +} + +/*! + * @brief Set/Unset bit in pAllocatedGfids + * + * @param[in] pGpu OBJGPU pointer + * @param[in] gfid GFID to be set/unset (Assumes GFID is sanity checked before calling this function) + * @param[in] bInUse NV_TRUE if GFID in use + */ +void +gpuSetGfidUsage_IMPL(OBJGPU *pGpu, NvU32 gfid, NvBool bInUse) +{ + NV_ASSERT_OR_RETURN_VOID(pGpu->sriovState.pAllocatedGfids != NULL); + + if (bInUse == NV_TRUE) + pGpu->sriovState.pAllocatedGfids[gfid] = GFID_ALLOCATED; + else + pGpu->sriovState.pAllocatedGfids[gfid] = GFID_FREE; +} + +/** + * @brief Iterates over the engine ordering list + * + * @param[in,out] pIt Iterator + * @param[out] pEngDesc The next engine descriptor + * + * @return NV_TRUE if *pEngDesc is valid, NV_FALSE if there are no more engines + */ +NvBool +gpuGetNextInEngineOrderList(OBJGPU *pGpu, ENGLIST_ITER *pIt, PENGDESCRIPTOR pEngDesc) +{ + NvBool bReverse = !!(pIt->flags & (GCO_LIST_UNLOAD | GCO_LIST_DESTROY)); + const GPUCHILDORDER *pChildOrderList; + NvU32 numChildOrder; + const GPUCHILDPRESENT *pChildPresentList; + NvU32 numChildPresent; + const GPUCHILDPRESENT *pCurChildPresent; + const GPUCHILDORDER *pCurChildOrder; + NvBool bAdvance = NV_FALSE; + + pChildOrderList = gpuGetChildrenOrder_HAL(pGpu, &numChildOrder); + pChildPresentList = gpuGetChildrenPresent_HAL(pGpu, &numChildPresent); + + if (!pIt->bStarted) + { + pIt->bStarted = NV_TRUE; + pIt->childOrderIndex = bReverse ? (NvS32)numChildOrder - 1 : 0; + } + + while (1) + { + if (bAdvance) + pIt->childOrderIndex += bReverse ? -1 : 1; + + if ((pIt->childOrderIndex >= (NvS32)numChildOrder) || (pIt->childOrderIndex < 0)) + return NV_FALSE; + + pCurChildOrder = &pChildOrderList[pIt->childOrderIndex]; + + if ((pCurChildOrder->flags & pIt->flags) != pIt->flags) + { + bAdvance = NV_TRUE; + continue; + } + + pCurChildPresent = gpuFindChildPresent(pChildPresentList, numChildPresent, pCurChildOrder->classId); + + if (!pCurChildPresent) + { + bAdvance = NV_TRUE; + continue; + } + + if (bAdvance) + { + pIt->instanceID = bReverse ? pCurChildPresent->instances - 1 : 0; + } + + if ((pIt->instanceID < (NvS32)pCurChildPresent->instances) && (pIt->instanceID >= 0)) + { + *pEngDesc = MKENGDESC(pCurChildOrder->classId, pIt->instanceID); + + pIt->instanceID += bReverse ? -1 : 1; + + return NV_TRUE; + } + + bAdvance = NV_TRUE; + } + + return NV_FALSE; +} + +/** + * Set SLI broadcast state in threadstate if SLI is enabled for the GPU + */ +void +gpuSetThreadBcState_IMPL(OBJGPU *pGpu, NvBool bcState) +{ + { + gpumgrSetBcEnabledStatus(pGpu, bcState); + } +} + + +NV_STATUS +gpuInitDispIpHal_IMPL +( + OBJGPU *pGpu, + NvU32 ipver +) +{ + RmHalspecOwner *pRmHalspecOwner = staticCast(pGpu, RmHalspecOwner); + DispIpHal *pDispIpHal = &pRmHalspecOwner->dispIpHal; + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + // + // 0xFFFFFFFF ipver value happens when Display engines is disabled. NVOC Disp IP + // halspec doesn't have a hal variant maps to this value. Convert it to DISPv0000. + // + if (ipver == 0xFFFFFFFF) + { + ipver = 0; + } + else if (ipver == 0x03010000) + { + // + // Display on GV100 has 0x0301 IP ver while it uses v0300 manuals. It is listed + // in disp.def IP_VERSIONS table as v03_00 since we added the chip. This wasn't a + // problem in chip-config as there it maps a range of IP ver to an implementation. + // Versions in "v0300 <= ipver < 0400" map to _v03_00 or lower IP version function. + // NVOC maps exact number but not range, thus we need to override the value when + // initializing halspec. + // + ipver = 0x03000000; + } + + __nvoc_init_halspec_DispIpHal(pDispIpHal, ipver & 0xFFFF0000); + + if ((ipver & 0xFFFF0000) != 0) + { + DispIpHal dispIpHalv00; + __nvoc_init_halspec_DispIpHal(&dispIpHalv00, 0); + + // + // At GPU creation time, dispIpHal.__nvoc_HalVarIdx is initialized with DISPv0000. + // Any valid non-zero IP version listed in halspec DispIpHal assigns __nvoc_HalVarIdx + // to different value. + // + // If dispIpHal.__nvoc_HalVarIdx keeps same idx of DISPv0000 for a non-zero ipver, + // this means the IP ver is not listed in halspec DispIpHal and should be fixed. + // + // NVOC-TODO : make __nvoc_init_halspec_DispIpHal return error code and remove the check + if (pDispIpHal->__nvoc_HalVarIdx == dispIpHalv00.__nvoc_HalVarIdx) + { + NV_PRINTF(LEVEL_ERROR, "Invalid dispIpHal.__nvoc_HalVarIdx %d for Disp IP Vertion 0x%08x\n", + pDispIpHal->__nvoc_HalVarIdx, ipver); + + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + } + + void __nvoc_init_funcTable_KernelDisplay(KernelDisplay *, RmHalspecOwner *); + __nvoc_init_funcTable_KernelDisplay(pKernelDisplay, pRmHalspecOwner); + + void __nvoc_init_funcTable_DisplayInstanceMemory(DisplayInstanceMemory *, RmHalspecOwner *); + __nvoc_init_funcTable_DisplayInstanceMemory(KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay), + pRmHalspecOwner); + + return NV_OK; +} + +/*! + * @brief: Initialize chip related info + * This function fills up the chip info structure of OBJGPU. + * + * @param[in] pGpu OBJGPU pointer + * + * @returns void + */ + +void +gpuInitChipInfo_IMPL +( + OBJGPU *pGpu +) +{ + // + // NOTE: Register access and DRF field splitting should generally always + // go in HAL functions, but PMC_BOOT_0 and PMC_BOOT_42 are an exception + // as these are guaranteed to remain the same across chips, since we use + // them to figure out which chip it is and how to wire up the HALs. + // + pGpu->chipInfo.pmcBoot0.impl = DRF_VAL(_PMC, _BOOT_0, _IMPLEMENTATION, pGpu->chipId0); + pGpu->chipInfo.pmcBoot0.arch = DRF_VAL(_PMC, _BOOT_0, _ARCHITECTURE, pGpu->chipId0) << GPU_ARCH_SHIFT; + pGpu->chipInfo.pmcBoot0.majorRev = DRF_VAL(_PMC, _BOOT_0, _MAJOR_REVISION, pGpu->chipId0); + pGpu->chipInfo.pmcBoot0.minorRev = DRF_VAL(_PMC, _BOOT_0, _MINOR_REVISION, pGpu->chipId0); + pGpu->chipInfo.pmcBoot0.minorExtRev = NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_NONE; + pGpu->chipInfo.pmcBoot42.impl = DRF_VAL(_PMC, _BOOT_42, _IMPLEMENTATION, pGpu->chipId1); + pGpu->chipInfo.pmcBoot42.arch = DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, pGpu->chipId1) << GPU_ARCH_SHIFT; + pGpu->chipInfo.pmcBoot42.majorRev = DRF_VAL(_PMC, _BOOT_42, _MAJOR_REVISION, pGpu->chipId1); + pGpu->chipInfo.pmcBoot42.minorRev = DRF_VAL(_PMC, _BOOT_42, _MINOR_REVISION, pGpu->chipId1); + pGpu->chipInfo.pmcBoot42.minorExtRev = DRF_VAL(_PMC, _BOOT_42, _MINOR_EXTENDED_REVISION, pGpu->chipId1); + + // + // SOC do not use pmcBoot0/pmcBoot42 and instead write the impl details to + // these top level chipInfo fields, which is what the getters return. + // + pGpu->chipInfo.implementationId = pGpu->chipInfo.pmcBoot42.impl; + pGpu->chipInfo.platformId = pGpu->chipInfo.pmcBoot42.arch; +} + +/*! + * @brief: Returns physical address of end of DMA accessible range. + * + * @param[in] pGpu GPU object pointer + * + * @returns physical address of end of DMA accessible range + */ +RmPhysAddr +gpuGetDmaEndAddress_IMPL(OBJGPU *pGpu) +{ + NvU32 numPhysAddrBits = gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM); + RmPhysAddr dmaWindowStartAddr = gpuGetDmaStartAddress(pGpu); + + return dmaWindowStartAddr + (1ULL << numPhysAddrBits) - 1; +} + +void *gpuGetStaticInfo(OBJGPU *pGpu) +{ + + return NULL; +} + +void *gpuGetGspStaticInfo(OBJGPU *pGpu) +{ + return NULL; +} + +OBJRPC *gpuGetGspClientRpc(OBJGPU *pGpu) +{ + if (IS_GSP_CLIENT(pGpu)) + { + if (IsT234D(pGpu)) + return GPU_GET_DCECLIENTRM(pGpu)->pRpc; + } + return NULL; +} + +OBJRPC *gpuGetVgpuRpc(OBJGPU *pGpu) +{ + return NULL; +} + +OBJRPC *gpuGetRpc(OBJGPU *pGpu) +{ + if (IS_VIRTUAL(pGpu)) + return gpuGetVgpuRpc(pGpu); + + if (IS_GSP_CLIENT(pGpu)) + return gpuGetGspClientRpc(pGpu); + + return NULL; +} + +/*! + * @brief: Check if system memory is accessible by GPU + * Dependent on NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS only exercised on Windows. + * + * @param[in] pGpu OBJGPU pointer + * + * @returns NvBool NV_TRUE is system memory is accessible, + * NV_FALSE otherwise + */ +NvBool +gpuCheckSysmemAccess_IMPL(OBJGPU* pGpu) +{ + return NV_TRUE; +} + +/*! + * @brief Read the pcie spec registers using config cycles + * + * @param[in] pGpu GPU object pointer + * @param[in] index Register offset in PCIe config space + * @param[out] pData Value of the register + * + * @returns NV_OK on success + */ +NV_STATUS +gpuReadBusConfigCycle_IMPL +( + OBJGPU *pGpu, + NvU32 index, + NvU32 *pData +) +{ + NvU32 domain = gpuGetDomain(pGpu); + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU8 function = 0; + + if (pGpu->hPci == NULL) + { + pGpu->hPci = osPciInitHandle(domain, bus, device, function, NULL, NULL); + } + + *pData = osPciReadDword(pGpu->hPci, index); + + return NV_OK; +} + +/*! + * @brief Write to pcie spec registers using config cycles + * + * @param[in] pGpu GPU object pointer + * @param[in] index Register offset in PCIe config space + * @param[in] value Write this value to the register + * + * @returns NV_OK on success + */ +NV_STATUS +gpuWriteBusConfigCycle_IMPL +( + OBJGPU *pGpu, + NvU32 index, + NvU32 value +) +{ + NvU32 domain = gpuGetDomain(pGpu); + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU8 function = 0; + + if (pGpu->hPci == NULL) + { + pGpu->hPci = osPciInitHandle(domain, bus, device, function, NULL, NULL); + } + + osPciWriteDword(pGpu->hPci, index, value); + + return NV_OK; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_access.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_access.c new file mode 100644 index 0000000..130b281 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_access.c @@ -0,0 +1,1748 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/gpu.h" + +#include "core/thread_state.h" +#include "nv_ref.h" + +// Following enums are duplicated in 'apps/nvbucket/oca/ocarm.h'. +typedef enum { + BAD_READ_GPU_OFF_BUS = 1, + BAD_READ_LOW_POWER, + BAD_READ_PCI_DEVICE_DISABLED, + BAD_READ_GPU_RESET, + BAD_READ_DWORD_SHIFT, + BAD_READ_UNKNOWN, +} RMCD_BAD_READ_REASON; + +static NV_STATUS _allocGpuIODevice(GPU_IO_DEVICE **ppIODevice); +static void _gpuCleanRegisterFilterList(DEVICE_REGFILTER_INFO *); +static NvU32 _gpuHandleReadRegisterFilter(OBJGPU *, DEVICE_INDEX devIndex, NvU32 devInstance, NvU32 addr, NvU32 accessSize, NvU32 *pFlags, THREAD_STATE_NODE *pThreadState); +static void _gpuHandleWriteRegisterFilter(OBJGPU *, DEVICE_INDEX devIndex, NvU32 devInstance, NvU32 addr, NvU32 val, NvU32 accessSize, NvU32 *pFlags, THREAD_STATE_NODE *pThreadState); + +static void _gpuApertureWriteRegUnicast(OBJGPU *, IO_APERTURE *pAperture, NvU32 addr, NvV32 val, NvU32 size); +static NvU32 _gpuApertureReadReg(IO_APERTURE *pAperture, NvU32 addr, NvU32 size); + +static NvU8 _gpuApertureReadReg008(IO_APERTURE *a, NvU32 addr); +static NvU16 _gpuApertureReadReg016(IO_APERTURE *a, NvU32 addr); +static NvU32 _gpuApertureReadReg032(IO_APERTURE *a, NvU32 addr); +static void _gpuApertureWriteReg008(IO_APERTURE *a, NvU32 addr, NvV8 value); +static void _gpuApertureWriteReg016(IO_APERTURE *a, NvU32 addr, NvV16 value); +static void _gpuApertureWriteReg032(IO_APERTURE *a, NvU32 addr, NvV32 value); +static void _gpuApertureWriteReg032Unicast(IO_APERTURE *a, NvU32 addr, NvV32 value); +static NvBool _gpuApertureValidReg(IO_APERTURE *a, NvU32 addr); + +static REGISTER_FILTER * _findGpuRegisterFilter(DEVICE_INDEX devIndex, NvU32 devInstance, NvU32 addr, REGISTER_FILTER *); +static NV_STATUS _gpuInitIODeviceAndAperture(OBJGPU *, NvU32, NvU32, RmPhysAddr, NvU32); + +NV_STATUS +regAccessConstruct +( + RegisterAccess *pRegisterAccess, + OBJGPU *pGpu +) +{ + NV_STATUS rmStatus = NV_OK; + DEVICE_INDEX deviceIndex, minDeviceIndex, maxDeviceIndex; + + pRegisterAccess->pGpu = pGpu; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // DEVICE_INDEX_GPU aperture is of GPU, as Tegra SOC NvDisplay constructs + // display device IO aperture as part of objdisp construction so its safe to + // skip this function. + return NV_OK; + } + + // Check that GPU is the first device + ct_assert(DEVICE_INDEX_GPU == 0); + + minDeviceIndex = DEVICE_INDEX_GPU; + maxDeviceIndex = pGpu->bIsSOC ? (DEVICE_INDEX_MAX - 1) : (DEVICE_INDEX_GPU); + + for (deviceIndex = minDeviceIndex; deviceIndex <= maxDeviceIndex; deviceIndex++) + { + // Initialize IO Device and Aperture + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, deviceIndex, 0); + if (pMapping != NULL) + { + rmStatus = _gpuInitIODeviceAndAperture(pGpu, deviceIndex, + pMapping->gpuDeviceEnum, + pMapping->gpuNvPAddr, + pMapping->gpuNvLength); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to initialize pGpu IO device/aperture for deviceIndex=%d.\n", deviceIndex); + return rmStatus; + } + } + } + + return rmStatus; +} + +void +regAccessDestruct +( + RegisterAccess *pRegisterAccess +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + DEVICE_INDEX deviceIndex; + NvU32 mappingNum; + IO_APERTURE *pIOAperture; + REGISTER_FILTER *pNode; + + // Ignore attempt to destruct a not-fully-constructed RegisterAccess + if (pGpu == NULL) + { + return; + } + + for (deviceIndex = 0; deviceIndex < DEVICE_INDEX_MAX; deviceIndex++) + { + pIOAperture = pGpu->pIOApertures[deviceIndex]; + if (pIOAperture != NULL) + { + portMemFree(pIOAperture->pDevice); + ioaccessDestroyIOAperture(pIOAperture); + } + } + + for (mappingNum = 0; mappingNum < pGpu->gpuDeviceMapCount; mappingNum++) + { + // Device-specific register filter list + NV_ASSERT(!pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterList); + if (NULL != pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterLock) + { + portSyncSpinlockDestroy(pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterLock); + pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterLock = NULL; + } + + while (pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterRecycleList) + { + pNode = pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterRecycleList; + + pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterRecycleList = pNode->pNext; + portMemFree(pNode); + } + } +} + +/*! + * @brief Allocates GPU_IO_DEVICE object + * + * @param[in] ppIODevice Pointer to uninitialized GPU_IO_DEVICE + */ +static NV_STATUS +_allocGpuIODevice +( + GPU_IO_DEVICE **ppIODevice +) +{ + GPU_IO_DEVICE *pDevice; + + pDevice = portMemAllocNonPaged(sizeof(GPU_IO_DEVICE)); + if (pDevice == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "memory allocation failed for GPU IO Device\n"); + DBG_BREAKPOINT(); + return NV_ERR_NO_MEMORY; + } + + portMemSet(pDevice, 0, sizeof(GPU_IO_DEVICE)); + + *ppIODevice = pDevice; + + return NV_OK; +} + + +// +// The following register I/O functions are organized into two groups; +// a multi-chip unaware group and a multi-chip aware group. +// The multi-chip aware group of register I/O functions is also split +// into two groups; one that really does multi-chip logic and another +// that has the same interface but doesn't do any of the multi-chip +// logic. +// +// In the interests of performance, the determination as to whether +// multi-chip logic is necessary is done at two levels; the upper-level +// functions use 'MC' register I/O macros where multi-chip considerations +// are required, and when the 'MC' register I/O macros are used they +// call through GPU object pointers that are polymorphic - they contain +// pointers to one of the two groups of multi-chip aware functions +// depending on whether the multi-chip condition actually exists. +// This avoids a run-time SLI LOOP call. +// +static void +_regWriteUnicast +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvU32 val, + NvU32 size, + THREAD_STATE_NODE *pThreadState +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + NvU32 flags = 0; + NV_STATUS status; + DEVICE_MAPPING *pMapping; + + pRegisterAccess->regWriteCount++; + + pMapping = gpuGetDeviceMapping(pGpu, deviceIndex, instance); + if (pMapping == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Could not find mapping for reg %x, deviceIndex=0x%x instance=%d\n", + addr, deviceIndex, instance); + NV_ASSERT(0); + return; + } + + status = gpuSanityCheckRegisterAccess(pGpu, addr, NULL); + if (status != NV_OK) + { + return; + } + + _gpuHandleWriteRegisterFilter(pGpu, deviceIndex, instance, addr, val, size, &flags, pThreadState); + + if (!(flags & REGISTER_FILTER_FLAGS_WRITE)) + { + switch (size) + { + case 8: + osDevWriteReg008(pGpu, pMapping, addr, 0xFFU & (val)); + break; + case 16: + osDevWriteReg016(pGpu, pMapping, addr, 0xFFFFU & (val)); + break; + case 32: + osDevWriteReg032(pGpu, pMapping, addr, val); + break; + } + } +} + +static void +_gpuApertureWriteRegUnicast +( + OBJGPU *pGpu, + IO_APERTURE *pAperture, + NvU32 addr, + NvV32 val, + NvU32 size +) +{ + NV_ASSERT_OR_RETURN_VOID(pAperture); + NV_ASSERT_OR_RETURN_VOID(pAperture->pDevice); + + GPU_IO_DEVICE *pDevice = (GPU_IO_DEVICE*) pAperture->pDevice; + NvU32 deviceIndex = pDevice->deviceIndex; + NvU32 instance = pDevice->instance; + NvU32 regAddr = pAperture->baseAddress + addr; + NvU32 flags = 0; + NV_STATUS status; + THREAD_STATE_NODE *pThreadState; + DEVICE_MAPPING *pMapping; + + pMapping = gpuGetDeviceMapping(pGpu, deviceIndex, instance); + + if (pMapping == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Could not find mapping for reg %x, deviceIndex=0x%x instance=%d\n", + regAddr, deviceIndex, instance); + NV_ASSERT(0); + return; + } + + status = gpuSanityCheckRegisterAccess(pGpu, regAddr, NULL); + if (status != NV_OK) + { + return; + } + + threadStateGetCurrentUnchecked(&pThreadState, pGpu); + + _gpuHandleWriteRegisterFilter(pGpu, deviceIndex, instance, regAddr, + val, size, &flags, pThreadState); + + if (!(flags & REGISTER_FILTER_FLAGS_WRITE)) + { + switch (size) + { + case 8: + osDevWriteReg008(pGpu, pMapping, regAddr, 0xFFU & (val)); + break; + case 16: + osDevWriteReg016(pGpu, pMapping, regAddr, 0xFFFFU & (val)); + break; + case 32: + osDevWriteReg032(pGpu, pMapping, regAddr, val); + break; + } + } +} + +void +_gpuApertureWriteReg008 +( + IO_APERTURE *pAperture, + NvU32 addr, + NvV8 val +) +{ + GPU_IO_DEVICE *pDevice = (GPU_IO_DEVICE*)pAperture->pDevice; + OBJGPU *pGpu = pDevice->pGpu; + + // + // NOTE: The SLI loop below reuses pAperture's values across all iterations + // OBJGPU's apertures are initialized to have the same baseAddress and length + // on all GPU device instances, so reusing the aperture here is fine. + // Device-specific instances are obtained via gpuGetDeviceMapping in the SLI loop. + // + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + _gpuApertureWriteRegUnicast(pGpu, pAperture, addr, val, 8 /* size */); + SLI_LOOP_END; +} + +void +_gpuApertureWriteReg016 +( + IO_APERTURE *pAperture, + NvU32 addr, + NvV16 val +) +{ + GPU_IO_DEVICE *pDevice = (GPU_IO_DEVICE*)pAperture->pDevice; + OBJGPU *pGpu = pDevice->pGpu; + + // + // NOTE: The SLI loop below reuses pAperture's values across all iterations + // OBJGPU's apertures are initialized to have the same baseAddress and length + // on all GPU device instances, so reusing the aperture here is fine. + // Device-specific instances are obtained via gpuGetDeviceMapping in the SLI loop. + // + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + _gpuApertureWriteRegUnicast(pGpu, pAperture, addr, val, 16 /* size */); + SLI_LOOP_END; +} + +void +_gpuApertureWriteReg032 +( + IO_APERTURE *pAperture, + NvU32 addr, + NvV32 val +) +{ + GPU_IO_DEVICE *pDevice = (GPU_IO_DEVICE*)pAperture->pDevice; + OBJGPU *pGpu = pDevice->pGpu; + + // + // NOTE: The SLI loop below reuses pAperture's values across all iterations + // OBJGPU's apertures are initialized to have the same baseAddress and length + // on all GPU device instances, so reusing the aperture here is fine. + // Device-specific instances are obtained via gpuGetDeviceMapping in the SLI loop. + // + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + _gpuApertureWriteRegUnicast(pGpu, pAperture, addr, val, 32 /* size */); + SLI_LOOP_END; +} + +void +_gpuApertureWriteReg032Unicast +( + IO_APERTURE *pAperture, + NvU32 addr, + NvV32 val +) +{ + GPU_IO_DEVICE *pDevice = (GPU_IO_DEVICE*)pAperture->pDevice; + OBJGPU *pGpu = pDevice->pGpu; + + _gpuApertureWriteRegUnicast(pGpu, pAperture, addr, val, 32 /* size */); +} + +void +regWrite008 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvV8 val +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + _regWriteUnicast(GPU_GET_REGISTER_ACCESS(pGpu), deviceIndex, instance, addr, val, 8, NULL); + SLI_LOOP_END; +} +void +regWrite016 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvV16 val +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + _regWriteUnicast(GPU_GET_REGISTER_ACCESS(pGpu), deviceIndex, instance, addr, val, 16, NULL); + SLI_LOOP_END; +} + +void +regWrite032 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvV32 val, + THREAD_STATE_NODE *pThreadState +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + regWrite032Unicast(GPU_GET_REGISTER_ACCESS(pGpu), deviceIndex, instance, addr, val, pThreadState); + SLI_LOOP_END +} + +void +regWrite032Unicast +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvV32 val, + THREAD_STATE_NODE *pThreadState +) +{ + + _regWriteUnicast(pRegisterAccess, deviceIndex, instance, addr, val, 32, pThreadState); +} + +static NvU32 +_gpuApertureReadReg +( + IO_APERTURE *pAperture, + NvU32 addr, + NvU32 size +) +{ + NV_ASSERT_OR_RETURN(pAperture, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pAperture->pDevice, NV_ERR_INVALID_ARGUMENT); + + NvU32 flags = 0; + NvU32 returnValue = 0; + GPU_IO_DEVICE *pDevice = (GPU_IO_DEVICE*) pAperture->pDevice; + OBJGPU *pGpu = pDevice->pGpu; + NV_STATUS status = NV_OK; + NvU32 regAddr = pAperture->baseAddress + addr; + NvU32 deviceIndex = pDevice->deviceIndex; + NvU32 instance = pDevice->instance; + THREAD_STATE_NODE *pThreadState; + + pGpu->registerAccess.regReadCount++; + + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, deviceIndex, instance); + if (!pMapping) + { + NV_PRINTF(LEVEL_ERROR, + "Could not find mapping for reg %x, deviceIndex=0x%x instance=%d\n", + regAddr, deviceIndex, instance); + NV_ASSERT(0); + return 0xd0d0d0d0U; + } + + status = gpuSanityCheckRegisterAccess(pGpu, regAddr, NULL); + if (status != NV_OK) + { + return (~0); + } + + threadStateGetCurrentUnchecked(&pThreadState, pGpu); + + returnValue = _gpuHandleReadRegisterFilter(pGpu, deviceIndex, instance, + regAddr, size, &flags, pThreadState); + + if (!(flags & REGISTER_FILTER_FLAGS_READ)) + { + switch (size) + { + case 8: + returnValue = osDevReadReg008(pGpu, pMapping, regAddr); + break; + case 16: + returnValue = osDevReadReg016(pGpu, pMapping, regAddr); + break; + case 32: + returnValue = osDevReadReg032(pGpu, pMapping, regAddr); + break; + } + } + + // Make sure the value read is sane before we party on it. + gpuSanityCheckRegRead(pGpu, regAddr, size, &returnValue); + + return returnValue; +} + +NvU8 +_gpuApertureReadReg008 +( + IO_APERTURE *pAperture, + NvU32 addr +) +{ + return (NvU8) _gpuApertureReadReg(pAperture, addr, 8 /* size */); +} + +static NvU16 +_gpuApertureReadReg016 +( + IO_APERTURE *pAperture, + NvU32 addr +) +{ + return (NvU16) _gpuApertureReadReg(pAperture, addr, 16 /* size */); +} + +static NvU32 +_gpuApertureReadReg032 +( + IO_APERTURE *pAperture, + NvU32 addr + +) +{ + return _gpuApertureReadReg(pAperture, addr, 32 /* size */); +} + +/*! + * Checks if the register address is valid for a particular aperture + * + * @param[in] pAperture IO_APERTURE pointer + * @param[in] addr register address + * + * @returns NV_TRUE Register offset is valid + */ +static NvBool +_gpuApertureValidReg +( + IO_APERTURE *pAperture, + NvU32 addr +) +{ + NV_ASSERT_OR_RETURN(pAperture != NULL, NV_FALSE); + + return addr < pAperture->length; +} + +static NvU32 +_regRead +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvU32 size, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 flags = 0; + NvU32 returnValue = 0; + OBJGPU *pGpu = pRegisterAccess->pGpu; + DEVICE_MAPPING *pMapping; + NV_STATUS status = NV_OK; + + pRegisterAccess->regReadCount++; + + pMapping = gpuGetDeviceMapping(pGpu, deviceIndex, instance); + if (pMapping == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Could not find mapping for reg %x, deviceIndex=0x%x instance=%d\n", + addr, deviceIndex, instance); + NV_ASSERT(0); + return 0xd0d0d0d0; + } + + if ((size == 32) && + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE)) + { + return osDevReadReg032(pGpu, pMapping, addr); + } + + status = gpuSanityCheckRegisterAccess(pGpu, addr, &returnValue); + if (status != NV_OK) + return returnValue; + + returnValue = _gpuHandleReadRegisterFilter(pGpu, deviceIndex, instance, + addr, size, &flags, pThreadState); + + if (!(flags & REGISTER_FILTER_FLAGS_READ)) + { + switch (size) + { + case 8: + returnValue = osDevReadReg008(pGpu, pMapping, addr); + break; + case 16: + returnValue = osDevReadReg016(pGpu, pMapping, addr); + break; + case 32: + returnValue = osDevReadReg032(pGpu, pMapping, addr); + break; + } + } + + // Make sure the value read is sane before we party on it. + gpuSanityCheckRegRead(pGpu, addr, size, &returnValue); + + return returnValue; +} + +NvU8 +regRead008 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr +) +{ + return _regRead(pRegisterAccess, deviceIndex, instance, addr, 8, NULL); +} + +NvU16 +regRead016 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr +) +{ + return _regRead(pRegisterAccess, deviceIndex, instance, addr, 16, NULL); +} + +/*! + * This function is used for converting do-while read register constructs in RM to + * equivalent PMU sequencer handling. The idea is to construct seq instruction + * which polls on a field in the given register. + * + * @param[in] pRegisterAccess RegisterAccess object pointer + * @param[in] deviceIndex deviceIndex + * @param[in] addr register address + * @param[in] mask required mask for the field + * @param[in] val value to poll for + * + * @returns NV_OK if val is found + * NV_ERR_TIMEOUT if val is not found within timeout limit + */ +NV_STATUS +regRead032_AndPoll +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 addr, + NvU32 mask, + NvU32 val +) +{ + RMTIMEOUT timeout; + OBJGPU *pGpu = pRegisterAccess->pGpu; + NvU32 data = 0; + NV_STATUS status = NV_OK; + + { + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + + do + { + data = GPU_REG_RD32(pGpu, addr); + + if ((data & mask) == val) + { + status = NV_OK; + break; + } + + // Loosen this loop + osSpinLoop(); + + status = gpuCheckTimeout(pGpu, &timeout); + } while (status != NV_ERR_TIMEOUT); + } + + return status; +} + +NvU32 +regRead032 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + THREAD_STATE_NODE *pThreadState +) +{ + if (pRegisterAccess == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + return _regRead(pRegisterAccess, deviceIndex, instance, addr, 32, pThreadState); +} + +/*! + * @brief Allocates and initializes GPU_IO_DEVICE and IO Aperture. + * + * @param pGpu + * @param[in] deviceIndex DEVICE_INDEX enum value for identifying device type + * @param[in] gpuDeviceEnum Device ID NV_DEVID_* + * @param[in] gpuNvPAddr Physical Base Address + * @param[in] gpuNvLength Length of Aperture + * + * @return NV_OK if IO Aperture is successfully initialized, error otherwise. + */ +static NV_STATUS +_gpuInitIODeviceAndAperture +( + OBJGPU *pGpu, + NvU32 deviceIndex, + NvU32 gpuDeviceEnum, + RmPhysAddr gpuNvPAddr, + NvU32 gpuNvLength +) +{ + NV_STATUS rmStatus; + GPU_IO_DEVICE *pIODevice = NULL; + + // Initialize GPU IO Device + rmStatus = _allocGpuIODevice(&pIODevice); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to initialize pGpu IO device for devIdx %d.\n", + deviceIndex); + + return rmStatus; + } + + pIODevice->pGpu = pGpu; + pIODevice->deviceIndex = deviceIndex; + pIODevice->gpuDeviceEnum = gpuDeviceEnum; + pIODevice->gpuNvPAddr = gpuNvPAddr; + pIODevice->gpuNvLength = gpuNvLength; + pIODevice->refCount = 0; + + // GPU register operations are always on instance 0 + pIODevice->instance = 0; + + // Initialize register functions in IO_DEVICE + pIODevice->parent.pReadReg008Fn = (ReadReg008Fn*) &_gpuApertureReadReg008; + pIODevice->parent.pReadReg016Fn = (ReadReg016Fn*) &_gpuApertureReadReg016; + pIODevice->parent.pReadReg032Fn = (ReadReg032Fn*) &_gpuApertureReadReg032; + pIODevice->parent.pWriteReg008Fn = (WriteReg008Fn*) &_gpuApertureWriteReg008; + pIODevice->parent.pWriteReg016Fn = (WriteReg016Fn*) &_gpuApertureWriteReg016; + pIODevice->parent.pWriteReg032Fn = (WriteReg032Fn*) &_gpuApertureWriteReg032; + pIODevice->parent.pWriteReg032UcFn = (WriteReg032Fn*) &_gpuApertureWriteReg032Unicast; + pIODevice->parent.pValidRegFn = (ValidRegFn*) &_gpuApertureValidReg; + + rmStatus = ioaccessCreateIOAperture(&pGpu->pIOApertures[deviceIndex], + NULL, // no parent aperture + (IO_DEVICE*) pIODevice, + 0, gpuNvLength); // offset, length + if (rmStatus != NV_OK) + { + portMemFree(pIODevice); + + NV_PRINTF(LEVEL_ERROR, + "Failed to initialize pGpu IO aperture for devIdx %d.\n", + deviceIndex); + + return rmStatus; + } + + return NV_OK; +} + + +NV_STATUS +regAddRegisterFilter +( + RegisterAccess *pRegisterAccess, + NvU32 flags, + DEVICE_INDEX devIndex, NvU32 devInstance, + NvU32 rangeStart, NvU32 rangeEnd, + GpuWriteRegCallback pWriteCallback, + GpuReadRegCallback pReadCallback, + void *pParam, + REGISTER_FILTER **ppFilter +) +{ + DEVICE_REGFILTER_INFO *pRegFilter; + REGISTER_FILTER *pNode; + REGISTER_FILTER *pTmpNode; + DEVICE_MAPPING *pMapping; + + NV_ASSERT_OR_RETURN(devIndex < DEVICE_INDEX_MAX, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pRegisterAccess != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(ppFilter != NULL, NV_ERR_INVALID_ARGUMENT); + + // Get the device filter + pMapping = gpuGetDeviceMapping(pRegisterAccess->pGpu, devIndex, devInstance); + NV_ASSERT_OR_RETURN(pMapping != NULL, NV_ERR_INVALID_ARGUMENT); + + pRegFilter = &pMapping->devRegFilterInfo; + + if (!pWriteCallback && !pReadCallback) + { + // At least one register callback needs to be passed. + NV_PRINTF(LEVEL_ERROR, + "Need to specify at least one callback function.\n"); + + return NV_ERR_NOT_SUPPORTED; + } + + NV_ASSERT(!(flags & REGISTER_FILTER_FLAGS_INVALID)); + + if ((flags & REGISTER_FILTER_FLAGS_READ) && !pReadCallback) + { + // If REGISTER_FILTER_FLAGS_READ is specified, then a read + // callback must also be specified. + NV_PRINTF(LEVEL_ERROR, + "REGISTER_FILTER_FLAGS_READ requires a read callback function.\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + if ((flags & REGISTER_FILTER_FLAGS_WRITE) && !pWriteCallback) + { + // If REGISTER_FILTER_FLAGS_WRITE is specified, then a write + // callback must also be specified. + NV_PRINTF(LEVEL_ERROR, + "REGISTER_FILTER_FLAGS_WRITE requires a write callback function.\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + // If the regfilter hasn't been used yet, then allocate a lock + if (NULL == pRegFilter->pRegFilterLock) + { + // Allocate spinlock for reg filter access + pRegFilter->pRegFilterLock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + NV_ASSERT_OR_RETURN(pRegFilter->pRegFilterLock != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + } + + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + + if (NULL != pRegFilter->pRegFilterRecycleList) + { + pNode = pRegFilter->pRegFilterRecycleList; + pRegFilter->pRegFilterRecycleList = pNode->pNext; + } + else + { + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + pNode = portMemAllocNonPaged(sizeof(REGISTER_FILTER)); + if (NULL == pNode) + { + return NV_ERR_NO_MEMORY; + } + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + } + + // Print a warning if there's another register filter already registered. + if (((pTmpNode = _findGpuRegisterFilter(devIndex, devInstance, rangeStart, pRegFilter->pRegFilterList)) != NULL) || + ((pTmpNode = _findGpuRegisterFilter(devIndex, devInstance, rangeEnd, pRegFilter->pRegFilterList)) != NULL)) + { + NV_PRINTF(LEVEL_WARNING, + "WARNING!! Previously registered reg filter found. Handle: %p, dev: " + "%d(%d) Range : 0x%x - 0x%x, WR/RD Callback: %p/%p, flags : %x\n", + pTmpNode, pTmpNode->devIndex, pTmpNode->devInstance, + pTmpNode->rangeStart, pTmpNode->rangeEnd, + pTmpNode->pWriteCallback, pTmpNode->pReadCallback, + pTmpNode->flags); + } + + // Populate structure + pNode->flags = flags; + pNode->devIndex = devIndex; + pNode->devInstance = devInstance; + pNode->rangeStart = rangeStart; + pNode->rangeEnd = rangeEnd; + pNode->pWriteCallback = pWriteCallback; + pNode->pReadCallback = pReadCallback; + pNode->pParam = pParam; + + // Link in + pNode->pNext = pRegFilter->pRegFilterList; + pRegFilter->pRegFilterList = pNode; + + // return pNode + *ppFilter = pNode; + + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + return NV_OK; +} + +void +regRemoveRegisterFilter +( + RegisterAccess *pRegisterAccess, + REGISTER_FILTER *pFilter +) +{ + REGISTER_FILTER *pNode; + REGISTER_FILTER *pPrev = NULL; + REGISTER_FILTER *pNext = NULL; + DEVICE_REGFILTER_INFO *pRegFilter; + DEVICE_MAPPING *pMapping; + + // Get the device filter + pMapping = gpuGetDeviceMapping(pRegisterAccess->pGpu, pFilter->devIndex, pFilter->devInstance); + NV_ASSERT_OR_RETURN_VOID(pMapping != NULL); + + pRegFilter = &pMapping->devRegFilterInfo; + + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pNode = pRegFilter->pRegFilterList; + while (pNode) + { + // + // we could have used a doubly linked list to do a quick removal, but + // iterating the list to find the match serves as sanity test, so let's + // stick with a singly linked list. + // + if (pNode == pFilter) + { + if (pRegFilter->regFilterRefCnt > 0) + { + // defer removal if another thread is working on the list + pNode->flags |= REGISTER_FILTER_FLAGS_INVALID; + pRegFilter->bRegFilterNeedRemove = NV_TRUE; + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + return; + } + + // Unlink + pNext = pNode->pNext; + + // place on recycle list + pNode->pNext = pRegFilter->pRegFilterRecycleList; + pRegFilter->pRegFilterRecycleList = pNode; + + if (pPrev) + { + pPrev->pNext = pNext; + } + else + { + pRegFilter->pRegFilterList = pNext; + } + + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + return; + } + + pPrev = pNode; + pNode = pNode->pNext; + } + NV_ASSERT_FAILED("Attempted to remove a nonexistent filter"); + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); +} + +// called with lock held +static void +_gpuCleanRegisterFilterList +( + DEVICE_REGFILTER_INFO *pRegFilter +) +{ + REGISTER_FILTER *pNode = pRegFilter->pRegFilterList; + REGISTER_FILTER *pPrev = NULL; + REGISTER_FILTER *pNext = NULL; + + while (pNode) + { + if (pNode->flags & REGISTER_FILTER_FLAGS_INVALID) + { + // Unlink + pNext = pNode->pNext; + + // place on recycle list + pNode->pNext = pRegFilter->pRegFilterRecycleList; + pRegFilter->pRegFilterRecycleList = pNode; + + if (pPrev) + { + pPrev->pNext = pNext; + } + else + { + pRegFilter->pRegFilterList = pNext; + } + + pNode = pNext; + continue; + } + + pPrev = pNode; + pNode = pNode->pNext; + } +} + +static NvU32 +_gpuHandleReadRegisterFilter +( + OBJGPU *pGpu, + DEVICE_INDEX devIndex, + NvU32 devInstance, + NvU32 addr, + NvU32 accessSize, + NvU32 *pFlags, + THREAD_STATE_NODE *pThreadState +) +{ + REGISTER_FILTER *pFilter; + NvU32 returnValue = 0; + NvU32 tempVal = 0; + DEVICE_REGFILTER_INFO *pRegFilter; + DEVICE_MAPPING *pMapping; + + // Get the device filter + pMapping = gpuGetDeviceMapping(pGpu, devIndex, devInstance); + NV_ASSERT_OR_RETURN(pMapping != NULL, returnValue); + + pRegFilter = &pMapping->devRegFilterInfo; + + // if there is no filter, do nothing. just bail out. + if (pRegFilter->pRegFilterList == NULL) + { + return returnValue; + } + + if (pThreadState != NULL) + { + // Filters should be only used with GPU lock is held. + if (pThreadState->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + return returnValue; + } + } +#ifdef DEBUG + else + { + THREAD_STATE_NODE *pCurThread; + + if (NV_OK == threadStateGetCurrentUnchecked(&pCurThread, pGpu)) + { + // Filters should be only used with GPU lock is held. + // Assert because ISRs are expected to pass threadstate down the stack. + // Don't bale out to keep release and debug path behavior identical. + if (pCurThread->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + NV_ASSERT(0); + } + } + } +#endif + + // + // NOTE: we can't simply grab the lock and release it after + // the search since it is not safe to assume that + // callbacks can be called with spinlock held + // + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pRegFilter->regFilterRefCnt++; + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + + // + // Note there is potential thread race condition where a filter may be + // being added or removed in one thread (dispatch) while another thread + // is searching the list. This search should have a lock in place. + // + pFilter = pRegFilter->pRegFilterList; + while ((pFilter) && (pFilter = _findGpuRegisterFilter(devIndex, devInstance, addr, pFilter))) + { + if (pFilter->pReadCallback) + { + tempVal = pFilter->pReadCallback(pGpu, pFilter->pParam, addr, + accessSize, *pFlags); + // + // if there are multiple filters, we use the last filter found to + // save returnValue + // + if (pFilter->flags & REGISTER_FILTER_FLAGS_READ) + { + returnValue = tempVal; + } + } + *pFlags |= pFilter->flags; + pFilter = pFilter->pNext; + } + + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pRegFilter->regFilterRefCnt--; + if (pRegFilter->regFilterRefCnt == 0 && pRegFilter->bRegFilterNeedRemove) + { + // no other thread can be touching the list. remove invalid entries + _gpuCleanRegisterFilterList(pRegFilter); + pRegFilter->bRegFilterNeedRemove = NV_FALSE; + } + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + return returnValue; +} + +static void +_gpuHandleWriteRegisterFilter +( + OBJGPU *pGpu, + DEVICE_INDEX devIndex, + NvU32 devInstance, + NvU32 addr, + NvU32 val, + NvU32 accessSize, + NvU32 *pFlags, + THREAD_STATE_NODE *pThreadState +) +{ + REGISTER_FILTER *pFilter; + DEVICE_REGFILTER_INFO *pRegFilter; + DEVICE_MAPPING *pMapping; + + // Get the device filter + pMapping = gpuGetDeviceMapping(pGpu, devIndex, devInstance); + NV_ASSERT_OR_RETURN_VOID(pMapping != NULL); + + pRegFilter = &pMapping->devRegFilterInfo; + + // if there is no filter, do nothing. just bail out. + if (pRegFilter->pRegFilterList == NULL) + { + return; + } + + if (pThreadState != NULL) + { + // Filters should be only used with GPU lock is held. + if (pThreadState->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + return; + } + } +#ifdef DEBUG + else + { + THREAD_STATE_NODE *pCurThread; + + if (NV_OK == threadStateGetCurrentUnchecked(&pCurThread, pGpu)) + { + // Filters should be only used with GPU lock is held. + // Assert because ISRs are expected to pass threadstate down the stack. + // Don't bale out to keep release and debug path behavior identical. + if (pCurThread->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + NV_ASSERT(0); + } + } + } +#endif + + // + // NOTE: we can't simply grab the lock and release it after + // the search since it is not safe to assume that + // callbacks can be called with spinlock held + // + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pRegFilter->regFilterRefCnt++; + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + + // + // Note there is potential thread race condition where a filter may be + // being added or removed in one thread (dispatch) while another thread + // is searching the list. This search should have a lock in place. + // + pFilter = pRegFilter->pRegFilterList; + while ((pFilter) && (pFilter = _findGpuRegisterFilter(devIndex, devInstance, addr, pFilter))) + { + if (pFilter->pWriteCallback) + { + pFilter->pWriteCallback(pGpu, pFilter->pParam, addr, val, + accessSize, *pFlags); + } + *pFlags |= pFilter->flags; + pFilter = pFilter->pNext; + } + + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pRegFilter->regFilterRefCnt--; + if (pRegFilter->regFilterRefCnt == 0 && pRegFilter->bRegFilterNeedRemove) + { + // no other thread can be touching the list. remove invalid entries + _gpuCleanRegisterFilterList(pRegFilter); + pRegFilter->bRegFilterNeedRemove = NV_FALSE; + } + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); +} + +static REGISTER_FILTER * +_findGpuRegisterFilter +( + DEVICE_INDEX devIndex, + NvU32 devInstance, + NvU32 addr, + REGISTER_FILTER *pFilter +) +{ + while (pFilter != NULL) + { + if (!(pFilter->flags & REGISTER_FILTER_FLAGS_INVALID) && + (devIndex == pFilter->devIndex) && + (devInstance == pFilter->devInstance) && + (addr >= pFilter->rangeStart) && (addr <= pFilter->rangeEnd)) + { + break; + } + + pFilter = pFilter->pNext; + } + + return pFilter; +} + +static NvBool +_gpuEnablePciMemSpaceAndCheckPmcBoot0Match +( + OBJGPU *pGpu +) +{ + NvU16 VendorId; + NvU16 DeviceId; + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU32 domain = gpuGetDomain(pGpu); + void *Handle = osPciInitHandle(domain, bus, device, 0, &VendorId, &DeviceId); + NvU32 Enabled = osPciReadDword(Handle, NV_CONFIG_PCI_NV_1); + NvU32 pmcBoot0; + + // If Memory Spaced is not enabled, enable it + if (DRF_VAL(_CONFIG, _PCI_NV_1, _MEMORY_SPACE, Enabled) != NV_CONFIG_PCI_NV_1_MEMORY_SPACE_ENABLED) + { + osPciWriteDword(Handle, NV_CONFIG_PCI_NV_1, + Enabled | + (DRF_DEF(_CONFIG, _PCI_NV_1, _MEMORY_SPACE, _ENABLED) | + DRF_DEF(_CONFIG, _PCI_NV_1, _BUS_MASTER, _ENABLED))); + } + + // Check PMC_ENABLE to make sure that it matches + pmcBoot0 = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + if (pmcBoot0 == pGpu->chipId0) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +static NvU32 +_regCheckReadFailure +( + OBJGPU *pGpu, + NvU32 value +) +{ + NvU32 flagsFailed; + NvU32 reason = BAD_READ_UNKNOWN; + + if ((!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH)) && + (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST))) + { + gpuSanityCheck(pGpu, GPU_SANITY_CHECK_FLAGS_ALL, &flagsFailed); + + // This is where we need to determine why we might be seeing this failure + if (value == GPU_REG_VALUE_INVALID) + { + // Does PCI Space Match + if (flagsFailed & GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH) + { + reason = BAD_READ_GPU_OFF_BUS; + goto exit; + } + + // Is Memory Spaced Enabled + if (flagsFailed & GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED) + { + reason = BAD_READ_PCI_DEVICE_DISABLED; + + if (!_gpuEnablePciMemSpaceAndCheckPmcBoot0Match(pGpu)) + { + // We have been reset! + reason = BAD_READ_GPU_RESET; + goto exit; + } + } + } + + // Are we off by N + if (flagsFailed & GPU_SANITY_CHECK_FLAGS_OFF_BY_N) + { + reason = BAD_READ_DWORD_SHIFT; + } + } + else + { + reason = BAD_READ_LOW_POWER; + } + +exit: + return reason; +} + +void +regCheckAndLogReadFailure +( + RegisterAccess *pRegisterAccess, + NvU32 addr, + NvU32 mask, + NvU32 value +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + const NvU32 failureReason = _regCheckReadFailure(pGpu, value); + + PORT_UNREFERENCED_VARIABLE(failureReason); +} + +NvU32 +regCheckRead032 +( + RegisterAccess *pRegisterAccess, + NvU32 addr, + NvU32 mask, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 returnValue; + OBJGPU *pGpu = pRegisterAccess->pGpu; + + returnValue = GPU_REG_RD32_EX(pGpu, addr, pThreadState); + if (returnValue & mask) + { + if (!API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + regCheckAndLogReadFailure(pRegisterAccess, addr, mask, returnValue); + returnValue = 0; + } + + return returnValue; +} + +#if GPU_REGISTER_ACCESS_DUMP + +NvU8 +gpuRegRd08_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr) +{ + NvU8 val = REG_INST_RD08(pGpu, GPU, 0, addr); + // filter out duplicate read + static NvU32 prev_addr = 0; + static NvU8 prev_val = 0; + if (addr != prev_addr || val != prev_val) + { + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "READ func: %s, reg name: %s, addr: %08x, val: %02x\n", + func, addrStr, addr, val); + } + prev_addr = addr; + prev_val = val; + } + return val; +} + +NvU16 +gpuRegRd16_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr) +{ + NvU16 val = REG_INST_RD16(pGpu, GPU, 0, addr); + // filter out duplicate read + static NvU32 prev_addr = 0; + static NvU16 prev_val = 0; + if (addr != prev_addr || val != prev_val) + { + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "READ func: %s, reg name: %s, addr: %08x, val: %04x\n", + func, addrStr, addr, val); + } + prev_addr = addr; + prev_val = val; + } + return val; +} + +NvU32 +gpuRegRd32_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr) +{ + NvU32 val = REG_INST_RD32(pGpu, GPU, 0, addr); + // filter out duplicate read + static NvU32 prev_addr = 0; + static NvU32 prev_val = 0; + if (addr != prev_addr || val != prev_val) + { + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "READ %s func: %s, reg name: %s, addr: %08x, val: %08x\n", + vreg, func, addrStr, addr, val); + } + prev_addr = addr; + prev_val = val; + } + return val; +} + +void +gpuRegWr08_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV8 val) +{ + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "WRITE func: %s, reg name: %s, addr: %08x, val: %02x\n", + func, addrStr, addr, val); + } + REG_INST_WR08(pGpu, GPU, 0, addr, val); +} + +void +gpuRegWr16_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV16 val) +{ + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "WRITE func: %s, reg name: %s, addr: %08x, val: %04x\n", + func, addrStr, addr, val); + } + REG_INST_WR16(pGpu, GPU, 0, addr, val); +} + +void +gpuRegWr32_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV32 val) +{ + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "WRITE %s func: %s, reg name: %s, addr: %08x, val: %08x\n", + vreg, func, addrStr, addr, val); + } + REG_INST_WR32(pGpu, GPU, 0, addr, val); +} + +void +gpuRegWr32Uc_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV32 val) +{ + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "WRITE func: %s, reg name: %s, addr: %08x, val: %08x\n", + func, addrStr, addr, val); + } + REG_INST_WR32_UC(pGpu, GPU, 0, addr, val); +} + +#endif // GPU_REGISTER_ACCESS_DUMP + +/*! + * @brief Do any sanity checks for the GPU's state before actually reading/writing to the chip. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] addr Address of the register to be sanity checked + * @param[out] pRetVal Default return value for read accesses incase of sanity check failure. Only for U032 hals. + * + * @returns NV_ERR_GPU_IN_FULLCHIP_RESET if GPU is in reset + * NV_ERR_GPU_IS_LOST if GPU is inaccessible + * NV_ERR_GPU_NOT_FULL_POWER if GPU is not at full power AND + * GPU is not in resume codepath + * sim low power reg access is disabled + * NV_OK Otherwise + */ +NV_STATUS +gpuSanityCheckRegisterAccess_IMPL +( + OBJGPU *pGpu, + NvU32 addr, + NvU32 *pRetVal +) +{ + NV_STATUS status = NV_OK; + NvU32 retVal = ~0; + + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + status = NV_ERR_GPU_IN_FULLCHIP_RESET; + goto done; + } + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + status = NV_ERR_GPU_IS_LOST; + goto done; + } + + if ((status = gpuSanityCheckVirtRegAccess_HAL(pGpu, addr)) != NV_OK) + { + // Return 0 to match with HW behavior + retVal = 0; + goto done; + } + + // + // Make sure the GPU is in full power or resuming. When the OS has put the + // GPU in suspend (i.e. any of the D3 variants) there's no guarantee the GPU is + // accessible over PCI-E: the GPU may be completely powered off, the + // upstream bridges may not be properly configured, etc. Attempts to access + // the GPU may then result in PCI-E errors and/or bugchecks. For examples, + // see Bugs 440565 and 479003. + // On Mshybrid, the OS will make sure we are up and alive before calling + // into the driver. So we can skip this check on MsHybrid. + // + // DO NOT IGNORE OR REMOVE THIS ASSERT. It is a warning that improperly + // written RM code further up the stack is trying to access a GPU which is + // in suspend (i.e. low power). Any entry points into the RM (especially + // those between GPUs or for asynchronous callbacks) should always check + // that the GPU is in full power via gpuIsGpuFullPower(), bailing out in the + // appropriate manner when it returns NV_FALSE. + // + // If you are not an RM engineer and are encountering this assert, please + // file a bug against the RM. + // + if ((gpuIsGpuFullPower(pGpu) == NV_FALSE) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) + { + DBG_BREAKPOINT(); + status = NV_ERR_GPU_NOT_FULL_POWER; + goto done; + } + + // TODO: More complete sanity checking + +done: + // Assign the return value + if ((status != NV_OK) && (pRetVal != NULL)) + { + *pRetVal = retVal; + } + return status; +} + +/** + * @brief checks if the register offset is valid + * + * @param[in] pGpu + * @param[in] offset + * + * @returns NV_OK if valid + * @returns NV_ERR_INVALID_ARGUMENT if offset is too large for bar + * @returns NV_ERR_INSUFFICIENT_PERMISSIONS if user is not authorized to access register + */ +NV_STATUS +gpuValidateRegOffset_IMPL +( + OBJGPU *pGpu, + NvU32 offset +) +{ + NvU64 maxBar0Size = pGpu->deviceMappings[0].gpuNvLength; + + // The register offset should be 4 bytes smaller than the max bar size + if (offset > (maxBar0Size - 4)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (!osIsAdministrator() && + !gpuGetUserRegisterAccessPermissions(pGpu, offset)) + { + NV_PRINTF(LEVEL_ERROR, + "User does not have permission to access register offset 0x%x\n", + offset); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return NV_OK; +} + +/*! + * @brief Verify existence function. + * + * @param[in] pGpu + * + * @returns NV_OK if GPU is still accessible + * NV_ERR_INVALID_STATE if GPU is inaccessible + */ +NV_STATUS +gpuVerifyExistence_IMPL +( + OBJGPU *pGpu +) +{ + NvU32 regVal = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + + if (regVal != pGpu->chipId0) + { + osHandleGpuLost(pGpu); + regVal = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + if (regVal != pGpu->chipId0) + { + return NV_ERR_GPU_IS_LOST; + } + } + + return NV_OK; +} + +/*! + * @brief Perform a sanity check on a register read value + * Starts with gpu-independent check, then calls into HAL for specific cases + * + * @param[in] pGpu GPU object pointer + * @param[in] addr Value address + * @param[in] size Access size + * @param[in/out] pValue Value to sanity check + */ +NV_STATUS +gpuSanityCheckRegRead_IMPL +( + OBJGPU *pGpu, + NvU32 addr, + NvU32 size, + void *pValue +) +{ + NvU8 *pValue8; + NvU16 *pValue16; + NvU32 *pValue32; + NvU32 value; + + switch (size) + { + case 8: + { + pValue8 = ((NvU8 *) pValue); + if (*pValue8 == (NvU8) (~0)) + { + // + // The result looks suspicious, let's check if the GPU is still attached. + // + NvU32 testValue = osGpuReadReg032(pGpu, NV_PMC_BOOT_0); + if (testValue == GPU_REG_VALUE_INVALID) + { + osHandleGpuLost(pGpu); + *pValue8 = osGpuReadReg008(pGpu, addr); + } + } + break; + } + case 16: + { + pValue16 = ((NvU16 *) pValue); + if (*pValue16 == (NvU16) (~0)) + { + // + // The result looks suspicious, let's check if the GPU is still attached. + // + NvU32 testValue = osGpuReadReg032(pGpu, NV_PMC_BOOT_0); + if (testValue == GPU_REG_VALUE_INVALID) + { + osHandleGpuLost(pGpu); + *pValue16 = osGpuReadReg016(pGpu, addr); + } + } + break; + } + case 32: + { + pValue32 = ((NvU32 *) pValue); + if (*pValue32 == (NvU32) (~0)) + { + // + // The result looks suspicious, let's check if the GPU is still attached. + // + NvU32 testValue = osGpuReadReg032(pGpu, NV_PMC_BOOT_0); + if (testValue == GPU_REG_VALUE_INVALID) + { + osHandleGpuLost(pGpu); + *pValue32 = osGpuReadReg032(pGpu, addr); + } + } + + value = *((NvU32 *)pValue); + + // + // HW will return 0xbad in the upper 3 nibbles + // when there is a possible issue. + // + if ((value & GPU_READ_PRI_ERROR_MASK) == GPU_READ_PRI_ERROR_CODE) + { + gpuHandleSanityCheckRegReadError_HAL(pGpu, addr, value); + } + break; + } + default: + { + NV_ASSERT_FAILED("Invalid access size"); + break; + } + } + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_device_mapping.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_device_mapping.c new file mode 100644 index 0000000..7adaa44 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_device_mapping.c @@ -0,0 +1,329 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "gpu/gpu_device_mapping.h" +#include "core/thread_state.h" +#include "nv_ref.h" + +/** + * @brief Finds the device mapping matching the specified address and device index + * + * @param[in] pGpu + * @param[in] deviceIndex device specific device enum (DEVICE_INDEX_*) + * @param[in] addr device register address + * + * @returns matching mapping, or NULL if not found. + */ +static DEVICE_MAPPING * +_gpuFindDeviceMapping +( + OBJGPU *pGpu, + DEVICE_INDEX deviceIndex, + NvU32 instance +) +{ + NvU32 i; + NvU32 devId = 0; + DEVICE_ID_MAPPING *deviceIdMapping; + NvU32 numDeviceIDs; + + numDeviceIDs = gpuGetDeviceIDList_HAL(pGpu, &deviceIdMapping); + + // Find the devID that matches the requested device index + for (i = 0; i < numDeviceIDs; i++) + { + if (deviceIdMapping[i].deviceIndex == deviceIndex) + { + devId = deviceIdMapping[i].devId; + break; + } + } + + if (devId == 0) + { + // For discrete GPU, just return BAR0 mapping + if (deviceIndex == DEVICE_INDEX_GPU) + { + return &pGpu->deviceMappings[0]; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Could not find mapping for deviceIndex=%d\n", + deviceIndex); + return NULL; + } + } + return gpuGetDeviceMappingFromDeviceID(pGpu, devId, instance); +} + +DEVICE_MAPPING * +gpuGetDeviceMapping_IMPL +( + OBJGPU *pGpu, + DEVICE_INDEX deviceIndex, + NvU32 instance +) +{ + // Fast lookup path for first instance of a device + if ((deviceIndex < DEVICE_INDEX_MAX) && (instance == 0)) + { + if (!pGpu->pDeviceMappingsByDeviceInstance[deviceIndex]) + { + pGpu->pDeviceMappingsByDeviceInstance[deviceIndex] = _gpuFindDeviceMapping(pGpu, deviceIndex, instance); + } + return pGpu->pDeviceMappingsByDeviceInstance[deviceIndex]; + } + + return _gpuFindDeviceMapping(pGpu, deviceIndex, instance); +} + +/** + * @brief Returns the device mapping matching the specified device ID from + * project relocation table + * + * @param[in] pGpu OBJGPU pointer + * @param[in] deviceId device ID from project relocation table + * @param[in] instance instance of the particular device ID + * + * @returns matching mapping, or NULL if not found. + */ + +DEVICE_MAPPING * +gpuGetDeviceMappingFromDeviceID_IMPL +( + OBJGPU *pGpu, + NvU32 deviceId, + NvU32 instance +) +{ + NvU32 i; + + // + // For SOC, walk the list of devices to find the device/instance requested. + // For GPU (legacy), only NV_DEVID_GPU(0) is expected & allowed + // + if (pGpu->bIsSOC) + { + for (i = 0; i < pGpu->gpuDeviceMapCount; i++) + { + if (pGpu->deviceMappings[i].gpuDeviceEnum == deviceId) + { + // Find the Nth instance of the requested device + if (instance) + instance--; + else + return &pGpu->deviceMappings[i]; + } + } + + NV_PRINTF(LEVEL_ERROR, "Could not find mapping for deviceId=%d\n", + deviceId); + } + else + { + // For GPU, always assume NV_DEVID_GPU instance 0. + NV_ASSERT(instance == 0); + NV_ASSERT(pGpu->gpuDeviceMapCount == 1); + + return &pGpu->deviceMappings[0]; + } + + return NULL; +} + +static NvBool _gpuCheckIsBar0OffByN(OBJGPU *pGpu) +{ + NvU32 i, pmcBoot0; + + // Check to see if we can find PMC_BOOT_0 + for (i = 0; i < 20; i++) + { + pmcBoot0 = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0 + (i * 4)); + if (pmcBoot0 == pGpu->chipId0) + { + break; + } + } + + if ((i != 0) && (i != 20)) + { + // We are off by N + return NV_TRUE; + } + + // Everything looks ok + return NV_FALSE; +} + +static NvBool _gpuCheckDoesPciSpaceMatch(OBJGPU *pGpu) +{ + NvU16 VendorId; + NvU16 DeviceId; + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU32 domain = gpuGetDomain(pGpu); + + osPciInitHandle(domain, bus, device, 0, &VendorId, &DeviceId); + if ((DeviceId == 0xFFFF) || + (VendorId != 0x10DE)) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +static NvBool _gpuCheckIsPciMemSpaceEnabled(OBJGPU *pGpu) +{ + NvU16 VendorId; + NvU16 DeviceId; + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU32 domain = gpuGetDomain(pGpu); + void *Handle = osPciInitHandle(domain, bus, device, 0, &VendorId, &DeviceId); + NvU32 Enabled = osPciReadDword(Handle, NV_CONFIG_PCI_NV_1); + + // Is Memory Spaced Enabled + if (DRF_VAL(_CONFIG, _PCI_NV_1, _MEMORY_SPACE, Enabled) != NV_CONFIG_PCI_NV_1_MEMORY_SPACE_ENABLED) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +NV_STATUS gpuSanityCheck_IMPL +( + OBJGPU *pGpu, + NvU32 flags, + NvU32 *pFlagsFailed +) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 flagsFailed = GPU_SANITY_CHECK_FLAGS_NONE; + THREAD_STATE_NODE *pThreadNode = NULL; + + if (pFlagsFailed != NULL) + { + *pFlagsFailed = GPU_SANITY_CHECK_FLAGS_NONE; + } + + if (pGpu->bIsSOC) + { + flags &= ~( + GPU_SANITY_CHECK_FLAGS_BOOT_0 | + GPU_SANITY_CHECK_FLAGS_OFF_BY_N | + GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH | + GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED | + GPU_SANITY_CHECK_FLAGS_FB); + + } + + // + // Check to make sure the lock is held for this thread as the underlying + // functions can touch state and lists that expect exclusive access. + // + rmStatus = threadStateGetCurrent(&pThreadNode, pGpu); + if (rmStatus != NV_OK) + { + return rmStatus; + } + if (pThreadNode->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + return NV_ERR_NOT_SUPPORTED; + } + + // Check to make sure we are powered on first + if (gpuIsGpuFullPower(pGpu) == NV_FALSE) + { + NV_ASSERT(0); + return NV_ERR_GPU_NOT_FULL_POWER; + } + + if (flags & GPU_SANITY_CHECK_FLAGS_BOOT_0) + { + // + // When GPU is in reset reg reads will return 0xFFFFFFFF. + // Without this check RM would keep hitting assert during TDR recovery. + // + if (!API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + NvU32 pmcBoot0 = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + if (pmcBoot0 != pGpu->chipId0) + { + flagsFailed |= GPU_SANITY_CHECK_FLAGS_BOOT_0; + NV_ASSERT(0); + } + } + } + + if (flags & GPU_SANITY_CHECK_FLAGS_OFF_BY_N) + { + if (_gpuCheckIsBar0OffByN(pGpu)) + { + flagsFailed |= GPU_SANITY_CHECK_FLAGS_OFF_BY_N; + NV_ASSERT(0); + } + } + + if (flags & GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH) + { + if (!_gpuCheckDoesPciSpaceMatch(pGpu)) + { + flagsFailed |= GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH; + NV_ASSERT(0); + } + } + + if (flags & GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED) + { + if (!_gpuCheckIsPciMemSpaceEnabled(pGpu)) + { + flagsFailed |= GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED; + NV_ASSERT(0); + } + } + + if (flags & GPU_SANITY_CHECK_FLAGS_FB) + { + if (!gpuIsGpuFullPower(pGpu)) + { + NV_ASSERT(0); + } + } + + if (flagsFailed != GPU_SANITY_CHECK_FLAGS_NONE) + { + rmStatus = NV_ERR_GENERIC; + NV_PRINTF(LEVEL_ERROR, "Failed test flags: 0x%x\n", flagsFailed); + } + + if (pFlagsFailed != NULL) + { + *pFlagsFailed = flagsFailed; + } + + return rmStatus; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_gspclient.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_gspclient.c new file mode 100644 index 0000000..8333b0c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_gspclient.c @@ -0,0 +1,146 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief GSP Client (CPU RM) specific GPU routines reside in this file. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "ctrl/ctrl2080.h" + +/*! + * @brief These functions are used on CPU RM when pGpu is a GSP client. + * Data is fetched from GSP using subdeviceCtrlCmdInternalGetChipInfo and cached, + * then retrieved through the internal gpuGetChipInfo. + * + * Functions either return value directly, or through a second [out] param, depending + * on the underlying function. + * + * @param[in] pGpu + */ +NvU8 +gpuGetChipSubRev_FWCLIENT +( + OBJGPU *pGpu +) +{ + const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo = gpuGetChipInfo(pGpu); + NV_ASSERT_OR_RETURN(pChipInfo != NULL, 0); + + return pChipInfo->chipSubRev; +} + +NvU32 +gpuGetEmulationRev1_FWCLIENT +( + OBJGPU *pGpu +) +{ + const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo = gpuGetChipInfo(pGpu); + NV_ASSERT_OR_RETURN(pChipInfo != NULL, 0); + + return pChipInfo->emulationRev1; +} + +NV_STATUS +gpuConstructDeviceInfoTable_FWCLIENT +( + OBJGPU *pGpu +) +{ + NV_STATUS status; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS *pParams; + const NvU32 cmd = NV2080_CTRL_CMD_INTERNAL_GET_DEVICE_INFO_TABLE; + + if (pGpu->pDeviceInfoTable) // already initialized + return NV_OK; + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + NV_ASSERT_OR_RETURN(pParams != NULL, NV_ERR_NO_MEMORY); + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + cmd, pParams, sizeof(*pParams)); + if (status != NV_OK) + goto done; + + if (pParams->numEntries == 0) + goto done; + + pGpu->pDeviceInfoTable = portMemAllocNonPaged(pParams->numEntries * sizeof(DEVICE_INFO2_TABLE)); + NV_ASSERT_TRUE_OR_GOTO(status, pGpu->pDeviceInfoTable != NULL, NV_ERR_NO_MEMORY, done); + + pGpu->numDeviceInfoEntries = pParams->numEntries; + portMemCopy(pGpu->pDeviceInfoTable, pGpu->numDeviceInfoEntries * sizeof(DEVICE_INFO2_TABLE), + pParams->deviceInfoTable, pParams->numEntries * sizeof(DEVICE_INFO2_TABLE)); + +done: + portMemFree(pParams); + return status; +} + +NvU32 +gpuGetLitterValues_FWCLIENT +( + OBJGPU *pGpu, + NvU32 index +) +{ + return 0; +} + +NV_STATUS +gpuGetRegBaseOffset_FWCLIENT +( + OBJGPU *pGpu, + NvU32 regBase, + NvU32 *pOffset +) +{ + const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo = gpuGetChipInfo(pGpu); + NV_ASSERT_OR_RETURN(pChipInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(regBase < NV_ARRAY_ELEMENTS(pChipInfo->regBases), NV_ERR_NOT_SUPPORTED); + + if (pChipInfo->regBases[regBase] != 0xFFFFFFFF) + { + *pOffset = pChipInfo->regBases[regBase]; + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; +} + +NvU32 +gpuReadBAR1Size_FWCLIENT +( + OBJGPU *pGpu +) +{ + const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo = gpuGetChipInfo(pGpu); + NV_ASSERT_OR_RETURN(pChipInfo != NULL, 0); + + return pChipInfo->bar1Size; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource.c new file mode 100644 index 0000000..ee56964 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource.c @@ -0,0 +1,413 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This implements functions of the base class for gpu resources. +* +******************************************************************************/ + +#include "core/core.h" +#include "os/os.h" +#include "resserv/resserv.h" +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "rmapi/client.h" +#include "rmapi/resource.h" +#include "gpu/gpu.h" +#include "gpu/gpu_resource.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu_mgr/gpu_mgr.h" + +#include "g_allclasses.h" + +NV_STATUS +gpuresConstruct_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pDeviceRef; + RsResourceRef *pSubdeviceRef; + OBJGPU *pGpu = NULL; + NvBool bBcResource = NV_TRUE; + NV_STATUS status; + + // Check if instance is a subdevice + pGpuResource->pSubdevice = dynamicCast(pGpuResource, Subdevice); + + // Else check for ancestor + if (!pGpuResource->pSubdevice) + { + status = refFindAncestorOfType(pResourceRef, classId(Subdevice), &pSubdeviceRef); + if (status == NV_OK) + pGpuResource->pSubdevice = dynamicCast(pSubdeviceRef->pResource, Subdevice); + } + + // Check if instance is a device + pGpuResource->pDevice = dynamicCast(pGpuResource, Device); + + // Else check for ancestor + if (!pGpuResource->pDevice) + { + status = refFindAncestorOfType(pResourceRef, classId(Device), &pDeviceRef); + if (status == NV_OK) + pGpuResource->pDevice = dynamicCast(pDeviceRef->pResource, Device); + } + + if (RS_IS_COPY_CTOR(pParams)) + return gpuresCopyConstruct(pGpuResource, pCallContext, pParams); + + // Fails during device/subdevice ctor. Subclass ctor calls gpuresSetGpu + status = gpuGetByRef(pResourceRef, &bBcResource, &pGpu); + if (status == NV_OK) + gpuresSetGpu(pGpuResource, pGpu, bBcResource); + + return NV_OK; +} + +NV_STATUS +gpuresCopyConstruct_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + GpuResource *pGpuResourceSrc = dynamicCast(pParams->pSrcRef->pResource, GpuResource); + + if (pGpuResourceSrc == NULL) + return NV_ERR_INVALID_OBJECT; + + gpuresSetGpu(pGpuResource, pGpuResourceSrc->pGpu, pGpuResourceSrc->bBcResource); + + return NV_OK; +} + +NV_STATUS +gpuresMap_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping +) +{ + OBJGPU *pGpu; + NvU32 offset, size; + NV_STATUS rmStatus; + NvBool bBroadcast; + + pGpu = CliGetGpuFromContext(pCpuMapping->pContextRef, &bBroadcast); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + gpuSetThreadBcState(pGpu, bBroadcast); + + rmStatus = gpuresGetRegBaseOffsetAndSize(pGpuResource, pGpu, &offset, &size); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = rmapiMapGpuCommon(staticCast(pGpuResource, RsResource), + pCallContext, + pCpuMapping, + pGpu, + offset, + size); + pCpuMapping->processId = osGetCurrentProcess(); + + if (pParams->ppCpuVirtAddr) + *pParams->ppCpuVirtAddr = pCpuMapping->pLinearAddress; + + return rmStatus; +} + +NV_STATUS +gpuresUnmap_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping +) +{ + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + OBJGPU *pGpu; + NvBool bBroadcast; + + pGpu = CliGetGpuFromContext(pCpuMapping->pContextRef, &bBroadcast); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + gpuSetThreadBcState(pGpu, bBroadcast); + + osUnmapGPU(pGpu->pOsGpuInfo, + rmclientGetCachedPrivilege(pClient), + pCpuMapping->pLinearAddress, + pCpuMapping->length, + pCpuMapping->pPrivate->pPriv); + + return NV_OK; +} + +NvBool +gpuresShareCallback_IMPL +( + GpuResource *pGpuResource, + RsClient *pInvokingClient, + RsResourceRef *pParentRef, + RS_SHARE_POLICY *pSharePolicy +) +{ + NvBool bMIGInUse = NV_FALSE; + NvU16 shareType = pSharePolicy->type; + + if ((shareType == RS_SHARE_TYPE_SMC_PARTITION) && !bMIGInUse) + { + // When MIG is not enabled, ignore Require restrictions + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) + return NV_TRUE; + + // Fallback if feature is not available + shareType = RS_SHARE_TYPE_GPU; + } + + switch (shareType) + { + case RS_SHARE_TYPE_SMC_PARTITION: + { + if (RS_ACCESS_MASK_TEST(&pSharePolicy->accessMask, RS_ACCESS_DUP_OBJECT)) + { + // Special exceptions only for Dup + RsResourceRef *pSrcRef = RES_GET_REF(pGpuResource); + + switch (pSrcRef->externalClassId) + { + case NV01_MEMORY_SYSTEM: + return NV_TRUE; + } + + } + + break; + } + case RS_SHARE_TYPE_GPU: + { + RsResourceRef *pDeviceAncestorRef; + RsResourceRef *pParentDeviceAncestorRef; + + // This share type only works when called from dup + if (pParentRef == NULL) + break; + + if (pParentRef->internalClassId == classId(Device)) + { + // pParentRef is allowed to itself be the Device ancestor + pParentDeviceAncestorRef = pParentRef; + } + else + { + // If pParentRef is not itself the device, try to find a Device ancestor. If none exist, fail. + if (refFindAncestorOfType(pParentRef, classId(Device), &pParentDeviceAncestorRef) != NV_OK) + break; + } + // Check that the source resource's ancestor device instance matches the destination parent's device instance + if (refFindAncestorOfType(RES_GET_REF(pGpuResource), classId(Device), &pDeviceAncestorRef) == NV_OK) + { + Device *pDevice = dynamicCast(pDeviceAncestorRef->pResource, Device); + Device *pParentDevice = dynamicCast(pParentDeviceAncestorRef->pResource, Device); + + if ((pDevice != NULL) && (pParentDevice != NULL) && + (pDevice->deviceInst == pParentDevice->deviceInst)) + { + return NV_TRUE; + } + } + } + } + + // Delegate to superclass + return rmresShareCallback_IMPL(staticCast(pGpuResource, RmResource), pInvokingClient, pParentRef, pSharePolicy); +} + +NV_STATUS +gpuresGetRegBaseOffsetAndSize_IMPL +( + GpuResource *pGpuResource, + OBJGPU *pGpu, + NvU32 *pOffset, + NvU32 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +gpuresGetMapAddrSpace_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pGpuResource); + NV_STATUS status; + NvU32 offset; + NvU32 size; + + // Default to REGMEM if the GPU resource has a register base and offset defined + status = gpuresGetRegBaseOffsetAndSize(pGpuResource, pGpu, &offset, &size); + if (status != NV_OK) + return status; + + if (pAddrSpace) + *pAddrSpace = ADDR_REGMEM; + + return NV_OK; +} + +/*! + * @brief Forward a control call to the Physical RM portion of this API. + */ +NV_STATUS +gpuresInternalControlForward_IMPL +( + GpuResource *pGpuResource, + NvU32 command, + void *pParams, + NvU32 size +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(GPU_RES_GET_GPU(pGpuResource)); + return pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pGpuResource), + gpuresGetInternalObjectHandle(pGpuResource), + command, + pParams, + size); +} + +/*! + * @brief Retrieve the handle associated with the Physical RM portion of the API. + * For non-split object, this is the same as the handle of the object. + */ +NvHandle +gpuresGetInternalObjectHandle_IMPL(GpuResource *pGpuResource) +{ + return RES_GET_HANDLE(pGpuResource); +} + +NV_STATUS +gpuresControl_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + gpuresControlSetup(pParams, pGpuResource); + + return resControl_IMPL(staticCast(pGpuResource, RsResource), + pCallContext, pParams); +} + +void +gpuresControlSetup_IMPL +( + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + GpuResource *pGpuResource +) +{ + RmCtrlParams *pRmCtrlParams = pParams->pLegacyParams; + pRmCtrlParams->pGpu = pGpuResource->pGpu; + + GPU_RES_SET_THREAD_BC_STATE(pGpuResource); +} + +void +gpuresSetGpu_IMPL +( + GpuResource *pGpuResource, + OBJGPU *pGpu, + NvBool bBcResource +) +{ + if (pGpu != NULL) + { + RmResource *pResource = staticCast(pGpuResource, RmResource); + pResource->rpcGpuInstance = gpuGetInstance(pGpu); + pGpuResource->pGpu = pGpu; + pGpuResource->pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpuResource->pGpu); + pGpuResource->bBcResource = bBcResource; + gpuSetThreadBcState(pGpu, bBcResource); + } +} + +NV_STATUS +gpuresGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hResource, + GpuResource **ppGpuResource +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppGpuResource = NULL; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + return status; + + *ppGpuResource = dynamicCast(pResourceRef->pResource, GpuResource); + + return (*ppGpuResource) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +gpuresGetByDeviceOrSubdeviceHandle +( + RsClient *pClient, + NvHandle hResource, + GpuResource **ppGpuResource +) +{ + NV_STATUS status; + + status = gpuresGetByHandle(pClient, hResource, ppGpuResource); + + if (status != NV_OK) + return status; + + // Must be device or subdevice + if (!dynamicCast(*ppGpuResource, Device) && + !dynamicCast(*ppGpuResource, Subdevice)) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource_desc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource_desc.c new file mode 100644 index 0000000..959bd89 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource_desc.c @@ -0,0 +1,512 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief Object Manager: Object Classes are defined in this module. + */ + +#include "gpu/gpu.h" +#include "os/os.h" +#include "core/locks.h" +#include "nvrm_registry.h" +#include "lib/base_utils.h" + +ct_assert(NVOC_CLASS_ID_MAX_WIDTH <= SF_WIDTH(ENGDESC_CLASS)); + +NV_STATUS +gpuBuildClassDB_IMPL(OBJGPU *pGpu) +{ + PGPU_ENGINE_ORDER pEngineOrder = &pGpu->engineOrder; + PCLASSDESCRIPTOR pClassDynamic; + const CLASSDESCRIPTOR *pClassStatic; + NvU32 numClasses; + NvU32 i, j; + NV_STATUS status; + PGPUCLASSDB pClassDB = &pGpu->classDB; + + // + // Calculate number of classes supported by this device. + // + // Loop through the list of GPU-specific classes throwing out any the + // rmconfig has marked not supported. + // + numClasses = 0; + + pClassStatic = &pEngineOrder->pClassDescriptors[0]; + for (i = 0; i < pEngineOrder->numClassDescriptors; i++) + { + // RMCONFIG: throw out any that are not supported + if (pClassStatic[i].externalClassId == (NvU32)~0) + continue; + + numClasses++; + } + + NV_PRINTF(LEVEL_INFO, "num class descriptors: 0x%x\n", numClasses); + + // + // Allocate space for correct number of entries. + // + pClassDynamic = portMemAllocNonPaged(sizeof(CLASSDESCRIPTOR) * numClasses); + if (pClassDynamic == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_PRINTF(LEVEL_ERROR, "alloc failed: 0x%x\n", status); + DBG_BREAKPOINT(); + return status; + } + portMemSet((void *)pClassDynamic, 0, sizeof(CLASSDESCRIPTOR) * numClasses); + + // + // Now load up chip-dependent classes into pClass table. + // + pClassStatic = &pEngineOrder->pClassDescriptors[0]; + i = 0; + for (j = 0; j < pEngineOrder->numClassDescriptors; j++) + { + // RMCONFIG: skip over any that are not supported + if (pClassStatic[j].externalClassId == (NvU32)~0) + continue; + + // store info for class in class DB entry + pClassDynamic[i] = pClassStatic[j]; + + // move to next slot in class DB + i++; + } + + pClassDB->pClasses = pClassDynamic; + pClassDB->numClasses = numClasses; + pClassDB->pSuppressClasses = NULL; + pClassDB->bSuppressRead = NV_FALSE; + pGpu->engineDB.bValid = NV_FALSE; + + return NV_OK; +} + +NV_STATUS +gpuDestroyClassDB_IMPL(OBJGPU *pGpu) +{ + portMemFree(pGpu->classDB.pClasses); + portMemFree(pGpu->classDB.pSuppressClasses); + + pGpu->engineDB.bValid = NV_FALSE; + return NV_OK; +} + +NvBool +gpuIsClassSupported_IMPL(OBJGPU *pGpu, NvU32 externalClassId) +{ + PCLASSDESCRIPTOR pClassDesc; + NV_STATUS status; + + status = gpuGetClassByClassId(pGpu, externalClassId, &pClassDesc); + + return (status == NV_OK) && (pClassDesc); +} + +NV_STATUS +gpuGetClassByClassId_IMPL(OBJGPU *pGpu, NvU32 externalClassId, PCLASSDESCRIPTOR *ppClassDesc) +{ + PGPUCLASSDB pClassDB = &pGpu->classDB; + NvU32 i; + + for (i = 0; i < pClassDB->numClasses; i++) + { + if (pClassDB->pClasses[i].externalClassId == externalClassId) + { + if (ppClassDesc != NULL) + { + *ppClassDesc = &pClassDB->pClasses[i]; + } + return NV_OK; + } + } + + return NV_ERR_INVALID_ARGUMENT; +} + +NV_STATUS +gpuGetClassByEngineAndClassId_IMPL(OBJGPU *pGpu, NvU32 externalClassId, NvU32 engDesc, PCLASSDESCRIPTOR *ppClassDesc) +{ + PGPUCLASSDB pClassDB = &pGpu->classDB; + NvU32 i; + + for (i = 0; i < pClassDB->numClasses; i++) + { + if (pClassDB->pClasses[i].externalClassId == externalClassId && pClassDB->pClasses[i].engDesc == engDesc) + { + *ppClassDesc = &pClassDB->pClasses[i]; + return NV_OK; + } + } + + return NV_ERR_GENERIC; +} + +static NvU32 * +gpuGetSuppressedClassList +( + OBJGPU *pGpu +) +{ + NvU8 *pStr; + NvU8 *pEndStr; + NvU8 *pSaveStr; + NvU32 strLength; + NvU32 nIndex; + NvU32 nCount = 0; + NvU32 *pData = NULL; + NvU32 numAModelClassesInChip = 0; + NvBool bSuppressClassList = NV_FALSE; + NvU32 numFound; + + // alloc regkey buffer + strLength = 256; + pStr = portMemAllocNonPaged(strLength); + if (pStr == NULL) + { + NV_PRINTF(LEVEL_ERROR, "portMemAllocNonPaged failed\n"); + return NULL; + } + + pSaveStr = pStr; + + if (osReadRegistryString(pGpu, NV_REG_STR_SUPPRESS_CLASS_LIST, pStr, &strLength) == NV_OK) + { + bSuppressClassList = NV_TRUE; + } + + if (bSuppressClassList) + { + // count number of classes + for (; *pStr; pStr = pEndStr, nCount++) + { + nvStrToL(pStr, &pEndStr, BASE16, 0, &numFound); + } + } + + // allocate memory only if there is something to suppress. + if ( ! ( nCount + numAModelClassesInChip ) ) + { + portMemFree(pSaveStr); + return NULL; + } + + // + // add one dword to store the count of classes here. + // This fixes a memory leak caused by changelist 1620538 + // + nCount++; + + pData = portMemAllocNonPaged(sizeof(NvU32)*(nCount + numAModelClassesInChip)); + if (pData == NULL) + { + NV_PRINTF(LEVEL_ERROR, "portMemAllocNonPaged failed\n"); + portMemFree(pSaveStr); + return NULL; + } + + // fill array -- first is number of classes + pData[0]=nCount; + + if (bSuppressClassList) + { + pStr = pSaveStr; + for (nIndex = 1; *pStr; pStr = pEndStr, nIndex++) + { + pData[nIndex] = nvStrToL(pStr, &pEndStr, BASE16, 0, &numFound); + } + } + + portMemFree(pSaveStr); + + return pData; +} + +/** + * @brief Returns list of classes supported by engDesc. + * If ( engDesc == ENG_INVALID ) returns classes + * supported by all engines. + * @param[in] pGpu OBJGPU pointer + * @param[in/out] pNumClasses in - denotes the size of pClassList when pClassList != NULL + out - when pClassList is NULL, denotes the number of matching + classes found + * @param[out] pClassList Returns matching class(s) when pNumClasses in not 0 + * @param[out] engDesc Engine ID + * + * @return NV_OK if class match found + */ +NV_STATUS +gpuGetClassList_IMPL(OBJGPU *pGpu, NvU32 *pNumClasses, NvU32 *pClassList, NvU32 engDesc) +{ + NvU32 *pSuppressClasses = NULL; + NvU32 numClasses; + NV_STATUS status = NV_OK; + NvU32 i, k; + NvBool bCount; + PCLASSDESCRIPTOR classDB = pGpu->classDB.pClasses; + + // Read the registry one time to get the list + if (NV_FALSE == pGpu->classDB.bSuppressRead) + { + pGpu->classDB.pSuppressClasses = gpuGetSuppressedClassList(pGpu); + pGpu->classDB.bSuppressRead = NV_TRUE; + } + + pSuppressClasses = pGpu->classDB.pSuppressClasses; + + numClasses = 0; + + for (i = 0; i < pGpu->classDB.numClasses; i++) + { + if ((engDesc != ENG_INVALID) && (classDB[i].engDesc != engDesc)) + continue; + + bCount = NV_TRUE; + + if (pSuppressClasses != NULL) + { + for (k=1; k < pSuppressClasses[0]; k++) + { + if (pSuppressClasses[k] == classDB[i].externalClassId) + { + bCount = NV_FALSE; + break; + } + } + } + + if (bCount) + { + // save the class in caller's buffer, if provided + if (pClassList) + { + if (numClasses < *pNumClasses) + pClassList[numClasses] = classDB[i].externalClassId; + else + status = NV_ERR_INVALID_PARAM_STRUCT; + } + numClasses++; + } + } + + // and return number of classes + if (status == NV_OK) + *pNumClasses = numClasses; + + return status; +} + +/*! + * @brief Add a class to class DB with given Engine Tag and Class Id. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pEngDesc EngDesc of Classes to be added to Class DB + * (NULL = don't care) + * @param[in] pExternalClassId Class to add to DB (NULL = don't care) + * + * @returns NV_STATUS - + * NV_ERR_INVALID_ARGUMENT if both pEngineTag and pClass are NULL. + * NV_OK otherwise + */ +static NV_STATUS +_gpuAddClassToClassDBByEngTagClassId(OBJGPU *pGpu, ENGDESCRIPTOR *pEngDesc, NvU32 *pExternalClassId) +{ + PGPU_ENGINE_ORDER pEngineOrder = &pGpu->engineOrder; + const CLASSDESCRIPTOR *pClassDesc = &pEngineOrder->pClassDescriptors[0]; + PGPUCLASSDB pClassDB = &pGpu->classDB; + NvU32 numClasses = pClassDB->numClasses; + NvU32 i; + + NV_CHECK_OR_RETURN(LEVEL_INFO, (NULL != pEngDesc) || (NULL != pExternalClassId), NV_ERR_INVALID_ARGUMENT); + + // Return early if requested class/engine is already in classdb + for (i = 0; i < pClassDB->numClasses; i++) + { + if (((NULL == pEngDesc) || (pClassDB->pClasses[i].engDesc == *pEngDesc)) && + ((NULL == pExternalClassId) || (pClassDB->pClasses[i].externalClassId == *pExternalClassId))) + { + return NV_OK; + } + } + + // Populate the ClassDB with information from PMODULEDESCRIPTOR (R/O classhal.h data) + for (i = 0; i < pEngineOrder->numClassDescriptors; i++) + { + // RMCONFIG: skip over any that are not supported + if (pClassDesc[i].externalClassId == (NvU32)~0) + continue; + + if (((NULL == pEngDesc) || (pClassDesc[i].engDesc == *pEngDesc)) && + ((NULL == pExternalClassId) || (pClassDesc[i].externalClassId == *pExternalClassId))) + { + // store info for class in class DB entry + pClassDB->pClasses[numClasses] = pClassDesc[i]; + pClassDB->numClasses++; + break; + } + } + + pGpu->engineDB.bValid = NV_FALSE; + + return NV_OK; +} + +/*! + * @brief Add a class to class DB with given Engine Tag and Class Id. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine ID of Classes to be added to Class DB + * @param[in] class Class to add to DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuAddClassToClassDBByEngTagClassId_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc, NvU32 externalClassId) +{ + return _gpuAddClassToClassDBByEngTagClassId(pGpu, &engDesc, &externalClassId); +} + +/*! + * @brief Add a class to class DB with given Engine Tag. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine ID of Class to be added to Class DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS gpuAddClassToClassDBByEngTag_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc) +{ + return _gpuAddClassToClassDBByEngTagClassId(pGpu, &engDesc, NULL); +} + +/*! + * @brief Add a class to class DB with given Class ID. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] class Class ID + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS gpuAddClassToClassDBByClassId_IMPL(OBJGPU *pGpu, NvU32 externalClassId) +{ + return _gpuAddClassToClassDBByEngTagClassId(pGpu, NULL, &externalClassId); +} + +/*! + * @brief Delete a class from class DB with given Engine Tag and Class Id. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pEngDesc Engine Tag of Classes to be removed from Class DB + * (NULL = don't care) + * @param[in] pExternalClassId Class to remove from DB (NULL = don't care) + * + * @returns NV_STATUS - NV_OK always. + */ +static NV_STATUS +_gpuDeleteClassFromClassDBByEngTagClassId(OBJGPU *pGpu, ENGDESCRIPTOR *pEngDesc, NvU32 *pExternalClassId) +{ + PGPUCLASSDB pClassDB = &pGpu->classDB; + NvU32 i, j; + + NV_CHECK_OR_RETURN(LEVEL_INFO, (NULL != pEngDesc) || (NULL != pExternalClassId), NV_ERR_INVALID_ARGUMENT); + + for (i = 0; i < pClassDB->numClasses; i++) + { + if (((NULL == pEngDesc) || (pClassDB->pClasses[i].engDesc == *pEngDesc)) && + ((NULL == pExternalClassId) || (pClassDB->pClasses[i].externalClassId == *pExternalClassId))) + { + for (j = i; j < pClassDB->numClasses - 1; j++) + { + pClassDB->pClasses[j] = pClassDB->pClasses[j + 1]; + } + pClassDB->numClasses--; + i--; // Be sure to check the new entry at index i on the next loop. + } + } + + pGpu->engineDB.bValid = NV_FALSE; + + return NV_OK; +} + +/*! + * @brief Delete a class from class DB with given Engine Tag and Class Id. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc EngDesc of Classes to be removed from Class DB + * @param[in] externalClassId Class to remove from DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuDeleteClassFromClassDBByEngTagClassId_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc, NvU32 externalClassId) +{ + return _gpuDeleteClassFromClassDBByEngTagClassId(pGpu, &engDesc, &externalClassId); +} + +/*! + * @brief Delete a class from class DB with given Engine Tag. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] externalClassId Class to remove from DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuDeleteClassFromClassDBByClassId_IMPL(OBJGPU *pGpu, NvU32 externalClassId) +{ + return _gpuDeleteClassFromClassDBByEngTagClassId(pGpu, NULL, &externalClassId); +} + +/*! + * @brief Delete a class from class DB with given Engine Tag. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine Descriptor of Classes to be removed from Class DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuDeleteClassFromClassDBByEngTag_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc) +{ + return _gpuDeleteClassFromClassDBByEngTagClassId(pGpu, &engDesc, NULL); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_rmapi.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_rmapi.c new file mode 100644 index 0000000..fb32100 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_rmapi.c @@ -0,0 +1,635 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "class/cl0040.h" /* NV01_MEMORY_LOCAL_USER */ +#include "class/cl84a0.h" /* NV01_MEMORY_LIST_XXX */ +#include "class/cl00b1.h" /* NV01_MEMORY_HW_RESOURCES */ + +#include "nverror.h" + +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi.h" +#include "rmapi/client.h" +#include "rmapi/resource_fwd_decls.h" +#include "core/thread_state.h" + +NV_STATUS +gpuSetExternalKernelClientCount_IMPL(OBJGPU *pGpu, NvBool bIncr) +{ + if (bIncr) + { + pGpu->externalKernelClientCount++; + } + else + { + NV_ASSERT_OR_RETURN(pGpu->externalKernelClientCount > 0, NV_ERR_INVALID_OPERATION); + pGpu->externalKernelClientCount--; + } + + return NV_OK; +} + +// Get the count of user clients that are using given gpu +static NvU32 +_gpuGetUserClientCount +( + OBJGPU *pGpu, + NvBool bCount +) +{ + NvU32 count = 0; + Device *pDevice; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + NV_STATUS status; + + // Search list of clients for any that have an InUse ref to the gpu + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + // Skip internal client + if (pRsClient->type == CLIENT_TYPE_KERNEL) + continue; + + status = deviceGetByGpu(pRsClient, pGpu, NV_TRUE /* bAnyInGroup */, &pDevice); + + if (status != NV_OK) + continue; + + count++; + + if (!bCount) + break; + } + + return count; +} + +NvBool +gpuIsInUse_IMPL +( + OBJGPU *pGpu +) +{ + return !!_gpuGetUserClientCount(pGpu, NV_FALSE) || + (pGpu->externalKernelClientCount > 0); +} + +// Get the count of user clients that are using given gpu +NvU32 +gpuGetUserClientCount_IMPL +( + OBJGPU *pGpu +) +{ + return _gpuGetUserClientCount(pGpu, NV_TRUE); +} + +// Get the count of external clients (User+External modules) that are using given gpu +NvU32 +gpuGetExternalClientCount_IMPL +( + OBJGPU *pGpu +) +{ + return _gpuGetUserClientCount(pGpu, NV_TRUE) + pGpu->externalKernelClientCount; +} + +/** + * Find the GPU associated with a resource reference in this order: + * + * 1. Directly from the RsResource if the resource is a Device or Subdevice + * 2. From an ancestor subdevice (if any) + * 3. From an ancestor device (if any) + * + * If the resource your querying is guaranteed to be a GpuResource you should + * directly call GPU_RES_GET_GPU() + * + * @param[out] pbBroadcast True if the found GPU corresponds to a device + * [optional] + */ +NV_STATUS +gpuGetByRef +( + RsResourceRef *pContextRef, + NvBool *pbBroadcast, + OBJGPU **ppGpu +) +{ + NV_STATUS status = NV_OK; + RsResourceRef *pDeviceRef; + RsResourceRef *pSubdeviceRef; + GpuResource *pGpuResource; + + if (ppGpu != NULL) + *ppGpu = NULL; + + if (pContextRef == NULL) + return NV_ERR_INVALID_ARGUMENT; + + pGpuResource = dynamicCast(pContextRef->pResource, GpuResource); + + // + // NULL check on GpuResource::pGpu as this routine is used from within + // GpuResource::Construct to initialize GpuResource::pGpu + // + if ((pGpuResource == NULL) || (pGpuResource->pGpu == NULL)) + { + status = refFindAncestorOfType(pContextRef, classId(Subdevice), &pSubdeviceRef); + if (status == NV_OK) + { + pGpuResource = dynamicCast(pSubdeviceRef->pResource, GpuResource); + if ((pGpuResource == NULL) || (pGpuResource->pGpu == NULL)) + status = NV_ERR_OBJECT_NOT_FOUND; + } + + if (status != NV_OK) + { + status = refFindAncestorOfType(pContextRef, classId(Device), &pDeviceRef); + if (status == NV_OK) + { + pGpuResource = dynamicCast(pDeviceRef->pResource, GpuResource); + if ((pGpuResource == NULL) || (pGpuResource->pGpu == NULL)) + status = NV_ERR_OBJECT_NOT_FOUND; + } + } + } + + if (status == NV_OK) + { + if (pbBroadcast != NULL) + *pbBroadcast = pGpuResource->bBcResource; + + if (ppGpu != NULL) + *ppGpu = pGpuResource->pGpu; + } + + return status; +} + +/** + * Wrapper for gpuGetByRef that takes a pClient + hResource instead of a + * pResourceRef. + * + * Find the GPU associated with a resource; + */ +NV_STATUS +gpuGetByHandle +( + RsClient *pClient, + NvHandle hResource, + NvBool *pbBroadcast, + OBJGPU **ppGpu +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + if (ppGpu != NULL) + *ppGpu = NULL; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + return status; + + return gpuGetByRef(pResourceRef, pbBroadcast, ppGpu); +} + +NV_STATUS gpuRegisterSubdevice_IMPL(OBJGPU *pGpu, Subdevice *pSubdevice) +{ + const NvU32 initialSize = 32; + const NvU32 expansionFactor = 2; + + if (pGpu->numSubdeviceBackReferences == pGpu->maxSubdeviceBackReferences) + { + if (pGpu->pSubdeviceBackReferences == NULL) + { + pGpu->pSubdeviceBackReferences = portMemAllocNonPaged(initialSize * sizeof(Subdevice*)); + if (pGpu->pSubdeviceBackReferences == NULL) + return NV_ERR_NO_MEMORY; + pGpu->maxSubdeviceBackReferences = initialSize; + } + else + { + const NvU32 newSize = expansionFactor * pGpu->maxSubdeviceBackReferences * sizeof(Subdevice*); + Subdevice **newArray = portMemAllocNonPaged(newSize); + if (newArray == NULL) + return NV_ERR_NO_MEMORY; + + portMemCopy(newArray, newSize, pGpu->pSubdeviceBackReferences, pGpu->maxSubdeviceBackReferences * sizeof(Subdevice*)); + portMemFree(pGpu->pSubdeviceBackReferences); + pGpu->pSubdeviceBackReferences = newArray; + pGpu->maxSubdeviceBackReferences *= expansionFactor; + } + } + pGpu->pSubdeviceBackReferences[pGpu->numSubdeviceBackReferences++] = pSubdevice; + return NV_OK; +} + +void gpuUnregisterSubdevice_IMPL(OBJGPU *pGpu, Subdevice *pSubdevice) +{ + NvU32 i; + for (i = 0; i < pGpu->numSubdeviceBackReferences; i++) + { + if (pGpu->pSubdeviceBackReferences[i] == pSubdevice) + { + pGpu->numSubdeviceBackReferences--; + pGpu->pSubdeviceBackReferences[i] = pGpu->pSubdeviceBackReferences[pGpu->numSubdeviceBackReferences]; + pGpu->pSubdeviceBackReferences[pGpu->numSubdeviceBackReferences] = NULL; + return; + } + } + NV_ASSERT_FAILED("Subdevice not found!"); +} + +// +// For a particular gpu, find all the clients waiting for a particular event, +// fill in the notifier if allocated, and raise an event to the client if registered. +// +void +gpuNotifySubDeviceEvent_IMPL +( + OBJGPU *pGpu, + NvU32 notifyIndex, + void *pNotifyParams, + NvU32 notifyParamsSize, + NvV32 info32, + NvV16 info16 +) +{ + PEVENTNOTIFICATION pEventNotification; + THREAD_STATE_NODE *pCurThread; + NvU32 localNotifyType; + NvU32 localInfo32; + NvU32 i; + + if (NV_OK == threadStateGetCurrent(&pCurThread, pGpu)) + { + // This function shouldn't be used from lockless ISR. + // Use engineNonStallIntrNotify() to notify event from lockless ISR. + NV_ASSERT_OR_RETURN_VOID(!(pCurThread->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS)); + } + + NV_ASSERT(notifyIndex < NV2080_NOTIFIERS_MAXCOUNT); + + // search notifiers with events hooked up for this gpu + for (i = 0; i < pGpu->numSubdeviceBackReferences; i++) + { + Subdevice *pSubdevice = pGpu->pSubdeviceBackReferences[i]; + INotifier *pNotifier = staticCast(pSubdevice, INotifier); + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + // + // For SMC, partitioned engines have partition local IDs and events are + // registered using partition localId while RM deals with global Ids. + // Convert global to partition local if necessary + // + localNotifyType = notifyIndex; + localInfo32 = info32; + + if (pSubdevice->notifyActions[localNotifyType] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + continue; + } + + pEventNotification = inotifyGetNotificationList(pNotifier); + if (pEventNotification != NULL) + { + // ping any events on the list of type notifyIndex + osEventNotificationWithInfo(pGpu, pEventNotification, localNotifyType, localInfo32, info16, + pNotifyParams, notifyParamsSize); + } + + // reset if single shot notify action + if (pSubdevice->notifyActions[localNotifyType] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE) + { + if (notifyIndex == NV2080_NOTIFIERS_FIFO_EVENT_MTHD) + { + NV_ASSERT(pGpu->activeFifoEventMthdNotifiers); + pGpu->activeFifoEventMthdNotifiers--; + } + + pSubdevice->notifyActions[localNotifyType] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + } +} + + +// +// Searches the Pid Array to see if the process this client belongs to is already +// in the list. +// +static NvBool +_gpuiIsPidSavedAlready +( + NvU32 pid, + NvU32 *pPidArray, + NvU32 pidCount +) +{ + NvU32 j; + + for (j = 0; j < pidCount; j++) + { + if (pid == pPidArray[j]) + return NV_TRUE; + } + return NV_FALSE; +} + +// +// Searches through clients to find processes with clients that have +// allocated an ElementType of class, defined by elementID. The return values +// are the array containing the PIDs for the processes and the count for the +// array. +// If a valid partitionRef is provided, the scope of search gets limited to a +// partition +// +NV_STATUS +gpuGetProcWithObject_IMPL +( + OBJGPU *pGpu, + NvU32 elementID, + NvU32 internalClassId, + NvU32 *pPidArray, + NvU32 *pPidArrayCount, + MIG_INSTANCE_REF *pRef +) +{ + NvU32 pidcount = 0; + NvHandle hClient; + Device *pDevice; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RsResourceRef *pResourceRef; + + NV_ASSERT_OR_RETURN((pPidArray != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pPidArrayCount != NULL), NV_ERR_INVALID_ARGUMENT); + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + NvBool elementInClient = NV_FALSE; + RS_ITERATOR iter; + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(*ppClient); + + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + hClient = pRsClient->hClient; + + // Skip reporting of kernel mode and internal RM clients + if ((privLevel >= RS_PRIV_LEVEL_KERNEL) && rmclientIsAdmin(pClient, privLevel)) + continue; + + if (_gpuiIsPidSavedAlready(pClient->ProcID, pPidArray, pidcount)) + continue; + + if (deviceGetByGpu(pRsClient, pGpu, NV_TRUE /* bAnyInGroup */, &pDevice) != NV_OK) + continue; + + iter = serverutilRefIter(hClient, NV01_NULL_OBJECT, 0, RS_ITERATE_DESCENDANTS, NV_TRUE); + + // + // At this point it has been determined that the client's subdevice + // is associated with the Gpu of interest, and it is not already + // included in the pidArray. In the call, objects belonging to the + // client are returned. If any object in the client belongs to + // the class being queried, then that process is added to the array. + // + while (clientRefIterNext(iter.pClient, &iter)) + { + pResourceRef = iter.pResourceRef; + + if (!objDynamicCastById(pResourceRef->pResource, internalClassId)) + continue; + + switch (internalClassId) + { + + case (classId(Device)): + case (classId(Subdevice)): + { + // + // It has been already verified that the client's subdevice + // or device is associated with the GPU of interest. + // Hence, Just add the client->pid into the list. + // + elementInClient = NV_TRUE; + break; + } + case (classId(MpsApi)): + { + elementInClient = NV_TRUE; + break; + } + default: + return NV_ERR_INVALID_ARGUMENT; + } + if (elementInClient) + { + pPidArray[pidcount] = pClient->ProcID; + pidcount++; + + if (pidcount == NV2080_CTRL_GPU_GET_PIDS_MAX_COUNT) + { + NV_PRINTF(LEVEL_ERROR, + "Maximum PIDs reached. Returning.\n"); + + goto done; + } + + break; + } + } + } +done: + *pPidArrayCount = pidcount; + + return NV_OK; +} + +// +// _gpuCollectMemInfo +// +// Retrieves all the FB memory allocated for that client and returned as *pData. +// If the input parameter bIsGuestProcess is true, that means we are on VGX host +// and the caller is trying to find FB memory usage of a process which is +// running inside a VM. +// +static void +_gpuCollectMemInfo +( + NvHandle hClient, + NvHandle hDevice, + Heap *pTargetedHeap, + NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA *pData, + NvBool bIsGuestProcess, + NvBool bGlobalInfo +) +{ + RS_ITERATOR iter; + Memory *pMemory = NULL; + RsResourceRef *pResourceRef; + + NV_ASSERT_OR_RETURN_VOID(pData != NULL); + + iter = serverutilRefIter(hClient, NV01_NULL_OBJECT, 0, RS_ITERATE_DESCENDANTS, NV_TRUE); + + while (clientRefIterNext(iter.pClient, &iter)) + { + pResourceRef = iter.pResourceRef; + pMemory = dynamicCast(pResourceRef->pResource, Memory); + + if (!pMemory) + continue; + + // In case we are trying to find memory allocated by a process running + // on a VM - the case where isGuestProcess is true, only consider the + // memory : + // 1. which is allocated by the guest VM or by a process running in it. + // 2. if the memory is not tagged with NVOS32_TYPE_UNUSED type. + // Windows KMD and Linux X driver makes dummy allocations which is + // done using NV01_MEMORY_LOCAL_USER class with rmAllocMemory() + // function. + // On VGX, while passing this allocation in RPC, we use the memory + // type NVOS32_TYPE_UNUSED. So while calculating the per process FB + // usage, only consider the allocation if memory type is not + // NVOS32_TYPE_UNUSED. + if ((pResourceRef->externalClassId == NV01_MEMORY_LOCAL_USER || + pResourceRef->externalClassId == NV01_MEMORY_LIST_FBMEM || + pResourceRef->externalClassId == NV01_MEMORY_LIST_OBJECT || + pResourceRef->externalClassId == NV01_MEMORY_HW_RESOURCES) && + (pMemory->categoryClassId == NV01_MEMORY_LOCAL_USER) && + (bGlobalInfo || (pMemory->pHeap == pTargetedHeap)) && + (RES_GET_HANDLE(pMemory->pDevice) == hDevice) && + (pMemory->pMemDesc != NULL) && + ((!bIsGuestProcess && (!memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_LIST_MEMORY))) || + (bIsGuestProcess && (memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) && (pMemory->Type != NVOS32_TYPE_UNUSED)))) + { + + if (pMemory->pMemDesc->DupCount == 1) + { + pData->memPrivate += pMemory->Length; + } + else if (pMemory->isMemDescOwner) + { + pData->memSharedOwned += pMemory->Length; + } + else + { + pData->memSharedDuped += pMemory->Length; + } + } + } +} + +// +// This function takes in the PID for the process of interest, and queries all +// clients for elementType. The 64-bit Data is updated by specific functions +// which handle queries for different elementTypes. +// +NV_STATUS +gpuFindClientInfoWithPidIterator_IMPL +( + OBJGPU *pGpu, + NvU32 pid, + NvU32 subPid, + NvU32 internalClassId, + NV2080_CTRL_GPU_PID_INFO_DATA *pData, + NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, + MIG_INSTANCE_REF *pRef, + NvBool bGlobalInfo +) +{ + NvHandle hClient; + Device *pDevice; + NvHandle hDevice; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + Heap *pHeap = GPU_GET_HEAP(pGpu); + NvU32 computeInstanceId = PARTITIONID_INVALID; + NvU32 gpuInstanceId = PARTITIONID_INVALID; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN((pid != 0), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pData != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pSmcInfo != NULL), NV_ERR_INVALID_ARGUMENT); + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + if (((subPid == 0) && (pClient->ProcID == pid)) || + ((subPid != 0) && (pClient->ProcID == pid) && (pClient->SubProcessID == subPid))) + { + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(pClient); + hClient = pRsClient->hClient; + + // Skip reporting of kernel mode and internal RM clients + if ((privLevel >= RS_PRIV_LEVEL_KERNEL) && rmclientIsAdmin(pClient, privLevel)) + continue; + + if (deviceGetByGpu(pRsClient, pGpu, NV_TRUE, &pDevice) != NV_OK) + continue; + + hDevice = RES_GET_HANDLE(pDevice); + + switch (internalClassId) + { + case (classId(Memory)): + { + // TODO - + // When single process spanning across multiple GI or CI by creating multiple + // clients, RM needs to provide the unique list being used by the client + _gpuCollectMemInfo(hClient, hDevice, pHeap, + &pData->vidMemUsage, ((subPid != 0) ? NV_TRUE : NV_FALSE), + bGlobalInfo); + break; + } + default: + return NV_ERR_INVALID_ARGUMENT; + } + } + } + + pSmcInfo->computeInstanceId = computeInstanceId; + pSmcInfo->gpuInstanceId = gpuInstanceId; + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c new file mode 100644 index 0000000..013f49e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief T234D / DCE client specific kernel stubs + */ + +#include "core/core.h" +#include "gpu/gpu.h" + +NvU32 +gpuGetPhysAddrWidth_T234D(OBJGPU *pGpu, NV_ADDRESS_SPACE addrSp) +{ + // Currently we support retrieving sys mem addr width only. + NV_ASSERT_OR_RETURN(ADDR_SYSMEM == addrSp, 0); + return 39; +} + +NV_STATUS +gpuGetNameString_T234D +( + OBJGPU *pGpu, + NvU32 type, + void *nameStringBuffer +) +{ + const char name[] = "T234D"; + const NvU32 inputLength = NV2080_GPU_MAX_NAME_STRING_LENGTH; + + if (type == NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII) + { + portStringCopy(nameStringBuffer, inputLength, name, sizeof(name)); + } + else + { + portStringConvertAsciiToUtf16(nameStringBuffer, inputLength, name, sizeof(name)); + } + + return NV_OK; +} + +NV_STATUS +gpuGetShortNameString_T234D +( + OBJGPU *pGpu, + NvU8 *nameStringBuffer +) +{ + return gpuGetNameString_T234D(pGpu, NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII, nameStringBuffer); +} + +NvU32 gpuGetSimulationModeHal_T234D(OBJGPU *pGpu) +{ + return NV_SIM_MODE_INVALID; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_timeout.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_timeout.c new file mode 100644 index 0000000..4a0d26e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_timeout.c @@ -0,0 +1,541 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief GPU Timeout related routines. + */ + +/* ------------------------ Includes ---------------------------------------- */ + +#include "lib/base_utils.h" +#include "gpu/gpu.h" +#include "objtmr.h" +#include "nvrm_registry.h" +#include "core/thread_state.h" +#include "core/locks.h" +#include "gpu_mgr/gpu_mgr.h" + +/* ------------------------ Public Functions ------------------------------- */ + +/*! + * @brief Initializes default timeout values from a provided GPU. + */ +void +timeoutInitializeGpuDefault +( + TIMEOUT_DATA *pTD, + OBJGPU *pGpu +) +{ + NvU32 timeoutDefault; + + pTD->pGpu = pGpu; + + // Set default timeout mode before loading HAL state + osGetTimeoutParams(pGpu, &timeoutDefault, &(pTD->scale), &(pTD->defaultFlags)); + if (!pTD->bDefaultOverridden) + { + pTD->defaultResetus = timeoutDefault; + pTD->defaultus = timeoutDefault; + pTD->bScaled = NV_FALSE; + } + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the host, use the OS + // timer by default in the guest OS (where IS_VIRTUAL(pGpu) is true), + // as it (hopefully) tracks a VM's actual time executing + // (vs. reading the HW PTIMER which'll be too fast). + // SOC NvDisplay: + // SOC NvDisplay doesn't have HW timer so use OSTIMER as default + // + pTD->defaultFlags = GPU_TIMEOUT_FLAGS_OSTIMER; + } + + // Using this boolean to ensure defaultus isn't scaled more than once. + if (!pTD->bScaled) + { + pTD->defaultus = gpuScaleTimeout(pGpu, pTD->defaultus); + pTD->bScaled = NV_TRUE; + } + + // + // Note we need to call threadStateResetTimeout() now that the timeout + // mechanism and values are known to allow threadStateCheckTimeout() + // to work after this point during init. + // + threadStateInitTimeout(pGpu, pTD->defaultus, pTD->defaultFlags); + threadStateResetTimeout(pGpu); +} + +/*! + * @brief Applies external timeout override based on registry values. + */ +void +timeoutRegistryOverride +( + TIMEOUT_DATA *pTD, + OBJGPU *pGpu +) +{ + NvU32 data32 = 0; + + // Override timeout value + if ((osReadRegistryDword(pGpu, + NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT, + &data32) == NV_OK) && (data32 != 0)) + { + // Handle 32-bit overflow. + if (data32 > (NV_U32_MAX / 1000)) + { + pTD->defaultus = NV_U32_MAX; + pTD->defaultResetus = NV_U32_MAX; + } + else + { + // Convert to [us] + pTD->defaultus = data32 * 1000; + pTD->defaultResetus = data32 * 1000; + } + pTD->bDefaultOverridden = NV_TRUE; + NV_PRINTF(LEVEL_ERROR, "Overriding default timeout to 0x%08x\n", + pTD->defaultus); + } + + // Override timeout flag values + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS, + &data32) == NV_OK) + { + switch (data32) + { + case NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS_OSDELAY: + { + pTD->defaultFlags = GPU_TIMEOUT_FLAGS_OSDELAY; + break; + } + + case NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS_OSTIMER: + { + pTD->defaultFlags = GPU_TIMEOUT_FLAGS_OSTIMER; + break; + } + + default: + { + NV_PRINTF(LEVEL_ERROR, "Unknown TIMEOUT_FLAGS value: 0x%08x\n", + data32); + NV_ASSERT(0); + } + } + + NV_PRINTF(LEVEL_ERROR, "Overriding default flags to 0x%08x\n", + pTD->defaultFlags); + } +} + +/*! + * @brief Applies external timeout override. + */ +void +timeoutOverride +( + TIMEOUT_DATA *pTD, + NvBool bOverride, + NvU32 timeoutMs +) +{ + pTD->bDefaultOverridden = bOverride; + + pTD->defaultus = bOverride ? (timeoutMs * 1000) : pTD->defaultResetus; +} + +/*! + * @brief Initialize the RMTIMEOUT structure with the selected timeout scheme. + */ +void +timeoutSet +( + TIMEOUT_DATA *pTD, + RMTIMEOUT *pTimeout, + NvU32 timeoutUs, + NvU32 flags +) +{ + OBJTMR *pTmr; + NvU64 timeInNs; + NvU64 timeoutNs; + + portMemSet(pTimeout, 0, sizeof(*pTimeout)); + + // + // Note that if GPU_TIMEOUT_DEFAULT is used we will go through + // threadStateCheckTimeout rather than timeoutCheck as we do + // not want to have "stacked" gpuSetTimeouts. The intent of + // GPU_TIMEOUT_DEFAULT was to cover the *entire* RM API stack. + // If GPU_TIMEOUT_DEFAULT was specified, this is essentially a + // NULL operation other than setting the flags to route us to + // threadStateCheckTimeout. This can be overridden by + // setting GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE. + // + pTimeout->flags = flags; + if ((flags == 0) || (flags & GPU_TIMEOUT_FLAGS_DEFAULT) || + !(flags & (GPU_TIMEOUT_FLAGS_OSTIMER | GPU_TIMEOUT_FLAGS_OSDELAY | + GPU_TIMEOUT_FLAGS_TMR | GPU_TIMEOUT_FLAGS_TMRDELAY))) + { + pTimeout->flags |= pTD->defaultFlags; + } + + if (timeoutUs == GPU_TIMEOUT_DEFAULT) + { + timeoutUs = pTD->defaultus; + + // + // Use the ThreadState by default if GPU_TIMEOUT_DEFAULT was specified + // unless we were told explicitly not to. + // ThreadState only supports OSTIMER and OSDELAY + // + if (!(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE) && + (pTimeout->flags & (GPU_TIMEOUT_FLAGS_OSTIMER | GPU_TIMEOUT_FLAGS_OSDELAY))) + { + pTimeout->flags |= GPU_TIMEOUT_FLAGS_USE_THREAD_STATE; + } + } + + // Set end time for elapsed time methods + timeoutNs = (NvU64)timeoutUs * 1000; + if (pTimeout->flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + // + // For small timeouts (timeout durations on the order of magnitude of + // the OS tick resolution), starting the timeout near the end of a tick + // could cause a premature timeout since the start time is determined + // by the start of the tick. Mitigate this by always padding the + // timeout using the OS tick resolution, to bump us to the next tick. + // + timeoutNs += osGetTickResolution(); + + osGetCurrentTick(&timeInNs); + + pTimeout->pTmrGpu = NULL; + pTimeout->timeout = timeInNs + timeoutNs; + } + else if ((pTimeout->flags & GPU_TIMEOUT_FLAGS_TMR) || + (pTimeout->flags & GPU_TIMEOUT_FLAGS_TMRDELAY)) + { + OBJGPU *pGpu = pTD->pGpu; + NV_ASSERT_OR_RETURN_VOID(pGpu != NULL); + + OBJGPU *pParentGpu = gpumgrGetParentGPU(pGpu); + + // + // Set timer GPU to primary GPU for accurate timeout with SLI loop. But only + // use the primary GPU if it is in full power mode or in the process of resuming. + // Also don't use the primary if it is in full chip reset. + // + if (gpumgrIsParentGPU(pGpu) || + ((gpuIsGpuFullPower(pParentGpu) == NV_FALSE) && + !pParentGpu->getProperty(pParentGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) || + pParentGpu->getProperty(pParentGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET)) + { + pTimeout->pTmrGpu = pGpu; + } + else + { + pTimeout->pTmrGpu = pParentGpu; + } + + pTmr = GPU_GET_TIMER(pTimeout->pTmrGpu); + NV_ASSERT_OR_RETURN_VOID(pTmr != NULL); + + if (pTimeout->flags & GPU_TIMEOUT_FLAGS_TMR) + { + + // nanoseconds + tmrGetCurrentTime(pTmr, &pTimeout->timeout); + pTimeout->timeout += timeoutNs; + } + else // GPU_TIMEOUT_FLAGS_TMRDELAY + { + pTimeout->timeout = timeoutUs; + } + } + else + { + pTimeout->pTmrGpu = NULL; + pTimeout->timeout = timeoutUs; + } +} + +/*! + * We typically only use this code if a time other than GPU_TIMEOUT_DEFAULT + * was specified. For GPU_TIMEOUT_DEFAULT we use threadStateCheckTimeout. + * The logic in the _threadNodeCheckTimeout() should closely resemble that + * of the _checkTimeout(). + */ +static NV_STATUS +_checkTimeout +( + RMTIMEOUT *pTimeout +) +{ + NV_STATUS status = NV_OK; + OBJTMR *pTmr; + NvU64 current; + NvU64 timeInNs; + + if (pTimeout->flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + osGetCurrentTick(&timeInNs); + if (timeInNs >= pTimeout->timeout) + { + NV_PRINTF(LEVEL_INFO, "OS elapsed %llx >= %llx\n", + timeInNs, pTimeout->timeout); + status = NV_ERR_TIMEOUT; + } + } + else if (pTimeout->flags & GPU_TIMEOUT_FLAGS_OSDELAY) + { + osDelayUs(100); + + // + // TODO: Bug: 3312158 - Isolate the fix timeout logic to emulation. + // This is because of the numerous timeout issues exposed in DVS + // Emulation requires this to make sure we are not wasting emulation resources + // by waiting for timeouts too long. + // Once DVS issues are fixed, this fix will be enabled for all platforms. + // + if ((pTimeout->pTmrGpu != NULL) && (IS_EMULATION(pTimeout->pTmrGpu))) + { + // + // Adjust the remaining time. Note that the remaining time is in nanoseconds unit + // for GPU_TIMEOUT_FLAGS_OSDELAY + // + pTimeout->timeout -= NV_MIN(100ULL * 1000ULL, pTimeout->timeout); + } + else + { + pTimeout->timeout -= NV_MIN(100ULL , pTimeout->timeout); + } + + if (pTimeout->timeout == 0) + { + NV_PRINTF(LEVEL_INFO, "OS timeout == 0\n"); + status = NV_ERR_TIMEOUT; + } + } + else if (pTimeout->flags & GPU_TIMEOUT_FLAGS_TMR) + { + NV_ASSERT_OR_RETURN(pTimeout->pTmrGpu != NULL, NV_ERR_INVALID_STATE); + if (!API_GPU_ATTACHED_SANITY_CHECK(pTimeout->pTmrGpu)) + return NV_ERR_TIMEOUT; + + pTmr = GPU_GET_TIMER(pTimeout->pTmrGpu); + NV_ASSERT_OR_RETURN(pTmr != NULL, NV_ERR_INVALID_STATE); + + tmrDelay(pTmr, 5ULL * 1000ULL); + tmrGetCurrentTime(pTmr, ¤t); + + if (current >= pTimeout->timeout) + { + NV_PRINTF(LEVEL_ERROR, "ptmr elapsed %llx >= %llx\n", + current, pTimeout->timeout); + status = NV_ERR_TIMEOUT; + } + } + else if (pTimeout->flags & GPU_TIMEOUT_FLAGS_TMRDELAY) + { + NV_ASSERT_OR_RETURN(pTimeout->pTmrGpu != NULL, NV_ERR_INVALID_STATE); + if (!API_GPU_ATTACHED_SANITY_CHECK(pTimeout->pTmrGpu)) + return NV_ERR_TIMEOUT; + + pTmr = GPU_GET_TIMER(pTimeout->pTmrGpu); + NV_ASSERT_OR_RETURN(pTmr != NULL, NV_ERR_INVALID_STATE); + + tmrDelay(pTmr, 5ULL * 1000ULL); + pTimeout->timeout -= NV_MIN(5, pTimeout->timeout); + + if (pTimeout->timeout == 0) + { + NV_PRINTF(LEVEL_INFO, "ptmr timeout == 0\n"); + status = NV_ERR_TIMEOUT; + } + } + else + { + NV_PRINTF(LEVEL_ERROR, "Invalid timeout flags 0x%08x\n", + pTimeout->flags); + DBG_BREAKPOINT(); + status = NV_ERR_INVALID_STATE; + } + + return status; +} + +/*! + * @brief Check if the passed in RMTIMEOUT struct has expired. + */ +NV_STATUS +timeoutCheck +( + TIMEOUT_DATA *pTD, + RMTIMEOUT *pTimeout, + NvU32 lineNum +) +{ + OBJGPU *pGpu = pTD->pGpu; + NV_STATUS status = NV_OK; + + NV_ASSERT(pTimeout != NULL); + + if ((pGpu != NULL) && API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + return NV_ERR_TIMEOUT; + + if (!(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_CPU_YIELD)) + { + threadStateYieldCpuIfNecessary(pGpu); + } + + // + // Note that if GPU_TIMEOUT_DEFAULT is used we will go through + // threadStateCheckTimeout rather than timeoutCheck as we do + // not want to have "stacked" gpuSetTimeouts. The intent of + // GPU_TIMEOUT_DEFAULT is to cover the *entire* RM API stack. + // If we are going through the case below, we should have just + // called threadStateCheckTimeout directly rather than + // timeoutCheck. + // + + // If local timeout check was intended, check that first. + if (!(pTimeout->flags & GPU_TIMEOUT_FLAGS_USE_THREAD_STATE)) + { + status = _checkTimeout(pTimeout); + if (status == NV_ERR_TIMEOUT) + { + // Mark that this Timeout is the result of a local timeout + pTimeout->flags |= GPU_TIMEOUT_FLAGS_STATUS_LOCAL_TIMEOUT; + } + } + + // + // Always check for the thread timeout in addition to any local timeout + // unless we have EXPLICITLY been instructed not to by a timeout flag. + // + if ((status != NV_ERR_TIMEOUT) && !(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE)) + { + status = threadStateCheckTimeout(pGpu, NULL /*pElapsedTime*/); + + if (status == NV_ERR_TIMEOUT) + { + // Mark that this Timeout is the result of ThreadState + pTimeout->flags |= GPU_TIMEOUT_FLAGS_STATUS_THREAD_STATE_TIMEOUT; + } + else if (status != NV_OK) + { + // Try the local timeout as fallback, unless it was already checked. + if (pTimeout->flags & GPU_TIMEOUT_FLAGS_USE_THREAD_STATE) + { + status = _checkTimeout(pTimeout); + if (status == NV_ERR_TIMEOUT) + { + // Mark that this Timeout is the result of a local timeout + pTimeout->flags |= GPU_TIMEOUT_FLAGS_STATUS_LOCAL_TIMEOUT; + } + } + } + } + + // Throttle priority of boosted threads if necessary + threadPriorityThrottle(); + + // Log the Timeout in the RM Journal + if ( (status == NV_ERR_TIMEOUT) && + !(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_JOURNAL_LOG)) + { + NvU64 funcAddr = (NvU64) (NV_RETURN_ADDRESS()); + threadStateLogTimeout(pGpu, funcAddr, lineNum); + } + + return status; +} + +/*! + * @brief Wait for a condition function to return NV_TRUE or timeout. + * + * @param[in] pTD Timeout data + * @param[in] pTimeout RM timeout structure to be used, or NULL to use default timeout + * @param[in] pCondFunc Function implementing condition check to wait for + * @param[in] pCondData An optional param to @ref pCondFunc (NULL if unused) + * + * @return NV_OK Condition met within the provided timeout period. + * @return NV_ERR_TIMEOUT Timed out while waiting for the condition. + * + * @note This interface addresses the recurring problem of reporting time-out + * when condition is actually met. That can happen since RM can get + * preempted by the OS any time during the execution. It is achieved by + * one additional condition check before the exit in case when timeout + * has been detected. + */ +NV_STATUS +timeoutCondWait +( + TIMEOUT_DATA *pTD, + RMTIMEOUT *pTimeout, + GpuWaitConditionFunc *pCondFunc, + void *pCondData, + NvU32 lineNum +) +{ + OBJGPU *pGpu = pTD->pGpu; + NV_STATUS status = NV_OK; + RMTIMEOUT timeout; + + if (pTimeout == NULL) + { + timeoutSet(pTD, &timeout, GPU_TIMEOUT_DEFAULT, 0); + pTimeout = &timeout; + } + + while (!pCondFunc(pGpu, pCondData)) + { + osSpinLoop(); + + status = timeoutCheck(pTD, pTimeout, lineNum); + if (status != NV_OK) + { + if ((status == NV_ERR_TIMEOUT) && + pCondFunc(pGpu, pCondData)) + { + status = NV_OK; + } + break; + } + } + + return status; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_uuid.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_uuid.c new file mode 100644 index 0000000..c259e13 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_uuid.c @@ -0,0 +1,317 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu_uuid.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "os/os.h" +#include "nvSha1.h" + +/** + * @brief Transforms a raw GPU ID into an ASCII string of the form + * "GPU-%08x-%04x-%04x-%04x-%012x" (SHA-1) + * + * @param[in] pGidData Raw GID from OBJPMU/OBJBIF + * @param[in] gidSize Size of the raw ID + * @param[out] ppGidString Return pointer for the GID string + * @param[out] pGidStrlen Return pointer for the GID string length + * @param[in] gidFlags NV2080_GPU_CMD_GPU_GET_GID_FLAGS values: selects + * SHA-1 only + * + * @returns matching mapping, or NULL if not found. + */ +NV_STATUS +transformGidToUserFriendlyString +( + const NvU8 *pGidData, + NvU32 gidSize, + NvU8 **ppGidString, + NvU32 *pGidStrlen, + NvU32 gidFlags +) +{ + NvUuid uuid; + + if (!FLD_TEST_DRF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1,gidFlags)) + { + return NV_ERR_INVALID_FLAGS; + } + + NV_ASSERT(NV_UUID_LEN == gidSize); + + portMemCopy(uuid.uuid, NV_UUID_LEN, pGidData, gidSize); + + *ppGidString = portMemAllocNonPaged(NV_UUID_STR_LEN); + if (*ppGidString == NULL) + { + return NV_ERR_NO_MEMORY; + } + + nvGetGpuUuidString(&uuid, (char*)*ppGidString); + *pGidStrlen = NV_UUID_STR_LEN; + + return NV_OK; +} + +static NvU32 +_nvCopyUuid +( + NvU8 *pBuff, + NvU32 index, + NvU32 size, + void *pInfo +) +{ + NvU8 *pBytes = pInfo; + portMemCopy(pBuff, size, pBytes + index, size); + return size; +} + +/** + * @brief Generates SHA1 UUID for a GPU or a MIG instance. + * + * The UUID will be computed as SHA1(message) where the message is as follows: + * + * offset 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 + * value c8 16 c9 a3 52 24 56 bf 9d 9a ac 7e a7 03 fb 5b + * + * offset 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + * value N V I D I A '' G P U 02 x x 08 y y + * + * offset 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 + * value y y y y y y 0b S M C z z z z p p + * + * offset 48 49 + * value p p + */ +/** + * where, + * Char is the byte value in ASCII encoding ('' is space = 0x20) + * Number is the numeric byte value in hex (0x02) + * xx is the chip id in little endian format. + * The chip ID ARCH+IMPL. For example: 0x017B for GA10B + * yyyyyyyy is the 64-bit PDI in little endian. PDI = (PDI_1 << 32) OR PDI_0. + * + * Additionally, when fractional GPU with MIG is used, and the MIG + * configurations are exposed as separate logical devices, the following bytes + * are appended in the message: + * + * zzzz is the numeric value of the swizzle id (32-bit little-endian) + * pppp is the numeric value of the graphics engine physical + * sys pipe ID (32-bit little-endian) + * + * See bug 3028068 for more details. + * + * @param[in] bMIG "MIG" or "GPU" UUID prefix + * @param[in] chipId GPU chip ID + * @param[in] pdi GPU PDI + * @param[in] swizzId MIG GPU instance swizz ID (only needed for MIG) + * @param[in] syspipeId MIG GPU instance syspipe ID (only needed for MIG) + * @param[out] pUuid UUID + * + * @returns NV_OK upon success, otherwise returns NV_ERR_* + */ + +#define UUID_MESSAGE_SIZE 50 +#define GPU_UUID_MESSAGE_SIZE 38 +#define SMC_UUID_MESSAGE_SIZE UUID_MESSAGE_SIZE + +static const NvU8 uuidMessage[UUID_MESSAGE_SIZE] = +{ + 0xc8, 0x16, 0xc9, 0xa3, 0x52, 0x24, 0x56, 0xbf, 0x9d, 0x9a, 0xac, + 0x7e, 0xa7, 0x03, 0xfb, 0x5b, 'N', 'V', 'I', 'D', 'I', 'A', + ' ', 'G', 'P', 'U', 0x02, 'x', 'x', 0x08, 'y', 'y', 'y', + 'y', 'y', 'y', 'y', 'y', 0x0b, 'S', 'M', 'C', 'z', 'z', + 'z', 'z', 'p', 'p', 'p', 'p' +}; + +static NV_STATUS +_nvGenerateUuid +( + NvBool bMIG, + NvU16 chipId, + NvU64 pdi, + NvU32 swizzId, + NvU32 syspipeId, + NvUuid *pUuid +) +{ + NvU8 *pSha1Digest; + NvU8 *pMessage; + NvU32 messageSize = GPU_UUID_MESSAGE_SIZE; + + pSha1Digest = portMemAllocNonPaged(NV_SHA1_DIGEST_LENGTH + + UUID_MESSAGE_SIZE); + if (pSha1Digest == NULL) + { + return NV_ERR_NO_MEMORY; + } + + pMessage = pSha1Digest + NV_SHA1_DIGEST_LENGTH; + + portMemCopy(pMessage, UUID_MESSAGE_SIZE, uuidMessage, UUID_MESSAGE_SIZE); + + portUtilWriteLittleEndian16(&pMessage[27], chipId); + portUtilWriteLittleEndian64(&pMessage[30], pdi); + + if (bMIG) + { + portUtilWriteLittleEndian32(&pMessage[42], swizzId); + portUtilWriteLittleEndian32(&pMessage[46], syspipeId); + + messageSize = SMC_UUID_MESSAGE_SIZE; + } + + // UUID strings only use the first 16 bytes of the 20-byte SHA-1 digest. + sha1Generate(pSha1Digest, pMessage, messageSize, _nvCopyUuid); + portMemCopy(pUuid->uuid, NV_UUID_LEN, pSha1Digest, NV_UUID_LEN); + + // version 5 - SHA1-based + pUuid->uuid[6] = (pUuid->uuid[6] & 0x0f) | 0x50; + // variant 1 - network byte ordering + pUuid->uuid[8] = (pUuid->uuid[8] & 0x3f) | 0x80; + + portMemFree(pSha1Digest); + + return NV_OK; +} + +/** + * @brief Generates SHA1 UUID for GPU. + * + * @param[in] chipId GPU chip ID + * @param[in] pdi GPU PDI + * @param[out] pUuid UUID + * + * @returns NV_OK upon success, otherwise returns NV_ERR_* + */ +NV_STATUS +nvGenerateGpuUuid +( + NvU16 chipId, + NvU64 pdi, + NvUuid *pUuid +) +{ + return _nvGenerateUuid(NV_FALSE, chipId, pdi, 0, 0, pUuid); +} + +/** + * @brief Generates SHA1 UUID for MIG instance. + * + * @param[in] chipId GPU chip ID + * @param[in] pdi GPU PDI + * @param[in] swizzId MIG GPU instance swizz ID (only needed for _TYPE_SMC) + * @param[in] syspipeId MIG GPU instance syspipe ID (only needed for _TYPE_SMC) + * @param[out] pUuid UUID + * + * @returns NV_OK upon success, otherwise returns NV_ERR_* + */ +NV_STATUS +nvGenerateSmcUuid +( + NvU16 chipId, + NvU64 pdi, + NvU32 swizzId, + NvU32 syspipeId, + NvUuid *pUuid +) +{ + return _nvGenerateUuid(NV_TRUE, chipId, pdi, swizzId, syspipeId, pUuid); +} + +/** + * @brief Gets UUID ASCII string, "GPU-%08x-%04x-%04x-%04x-%012x" + * (SHA-1) or "MIG-%08x-%04x-%04x-%04x-%012x" (SHA-1) + * + * @param[in] bMIG "MIG" or "GPU" UUID prefix + * @param[in] pUuid UUID + * @param[out] pUuidStr Returns UUID string + * + * @returns void + */ +static void +_nvGetUuidString +( + NvBool bMIG, + const NvUuid *pUuid, + char *pUuidStr +) +{ + const NvU32 sha1GroupEntryNum[] = { 8, 4, 4, 4, 12 }; + const NvU32 *pGroupEntryNum; + const NvU32 extraSymbolLen = 9; // 'G' 'P' 'U' '-'(x5), '\0x0', total = 9 + const NvU8 prefixLen = 4; + const char *pPrefix; + NvU32 groupCount; + NvU32 expectedStringLength = (NV_UUID_LEN << 1) + extraSymbolLen; + + pGroupEntryNum = sha1GroupEntryNum; + groupCount = NV_ARRAY_ELEMENTS(sha1GroupEntryNum); + + pPrefix = bMIG ? "MIG-" : "GPU-"; + portMemCopy(pUuidStr, prefixLen, pPrefix, prefixLen); + pUuidStr += prefixLen; + + portStringBufferToHexGroups(pUuidStr, (expectedStringLength - prefixLen), + pUuid->uuid, NV_UUID_LEN, + groupCount, pGroupEntryNum, "-"); +} + +/** + * @brief Gets UUID ASCII string, "GPU-%08x-%04x-%04x-%04x-%012x" + * (SHA-1) + * + * @param[in] pUuid UUID + * @param[out] pUuidStr Returns UUID string + * + * @returns void + */ +void +nvGetGpuUuidString +( + const NvUuid *pUuid, + char *pUuidStr +) +{ + _nvGetUuidString(NV_FALSE, pUuid, pUuidStr); +} + +/** + * @brief Gets UUID ASCII string, "MIG-%08x-%04x-%04x-%04x-%012x" + * (SHA-1) + * + * @param[in] pUuid UUID + * @param[out] pUuidStr Returns UUID string + * + * @returns void + */ +void +nvGetSmcUuidString +( + const NvUuid *pUuid, + char *pUuidStr +) +{ + _nvGetUuidString(NV_TRUE, pUuid, pUuidStr); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c new file mode 100644 index 0000000..f6845bc --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "turing/tu102/dev_mmu.h" +#include "turing/tu102/kind_macros.h" + +/*! + * @brief Returns NV_TRUE if memory kind matches the given op. + * + * @param[in] op Kind-type to check for + * @param[in] kind Value to check + * + * @return NV_TRUE if "kind" matches kind-type specified by op. + * NV_FALSE otherwise. + */ +NvBool +memmgrIsKind_TU102 +( + MemoryManager *pMemoryManager, + FB_IS_KIND_OP op, + NvU32 kind +) +{ + switch (op) + { + case FB_IS_KIND_Z: + return KIND_Z(kind); + case FB_IS_KIND_ZBC: + return PTEKIND_COMPRESSIBLE(kind); + case FB_IS_KIND_COMPRESSIBLE: + return PTEKIND_COMPRESSIBLE(kind); + case FB_IS_KIND_ZBC_ALLOWS_1: + case FB_IS_KIND_ZBC_ALLOWS_2: + case FB_IS_KIND_COMPRESSIBLE_1: + case FB_IS_KIND_COMPRESSIBLE_2: + case FB_IS_KIND_COMPRESSIBLE_4: + return NV_FALSE; + case FB_IS_KIND_SUPPORTED: + return (PTEKIND_SUPPORTED(kind) && !(KIND_INVALID(kind))); + case FB_IS_KIND_DISALLOW_PLC: + return PTEKIND_DISALLOWS_PLC(kind); + default: + NV_PRINTF(LEVEL_ERROR, "Bad op (%08x) passed in\n", op); + DBG_BREAKPOINT(); + return NV_FALSE; + } +} + +/** + * From Turing, we will not have Pitch Kind, so this function will determine + * type of surface from pMemoryInfo of the allocation. + * return NV_TRUE for BL surfaces and NV_FALSE otherwise. + */ +NvBool +memmgrIsSurfaceBlockLinear_TU102 +( + MemoryManager *pMemoryManager, + Memory *pMemory, + NvU32 kind, + NvU32 dmaFlags +) +{ + if (FLD_TEST_DRF(OS03, _FLAGS, _PTE_KIND, _BL, dmaFlags)) + { + return NV_TRUE; + } + else if (FLD_TEST_DRF(OS03, _FLAGS, _PTE_KIND, _PITCH, dmaFlags)) + { + return NV_FALSE; + } + + return FLD_TEST_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, pMemory->Attr); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c new file mode 100644 index 0000000..0357d28 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c @@ -0,0 +1,683 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This module contains contextDma implementation. +* +******************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "gpu/mem_mgr/context_dma.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/disp/disp_channel.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "os/os.h" +#include "gpu_mgr/gpu_mgr.h" +#include "vgpu/rpc.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/rs_utils.h" +#include "rmapi/mapping_list.h" + +#include "ctrl/ctrl0002.h" + +static NV_STATUS _ctxdmaConstruct(ContextDma *pContextDma, RsClient *, NvHandle, NvU32, NvU32, RsResourceRef *, NvU64, NvU64); +static NV_STATUS _ctxdmaDestruct(ContextDma *pContextDma, NvHandle hClient); + +NV_STATUS +ctxdmaConstruct_IMPL +( + ContextDma *pContextDma, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + NV_CONTEXT_DMA_ALLOCATION_PARAMS *pAllocParams = pParams->pAllocParams; + NvU32 cachesnoop, type, i; + NvBool bReadOnly; + RsResourceRef *pMemoryRef; + NvHandle hParentFromMemory; + RsClient *pClient = pCallContext->pClient; + NvHandle hSubDevice = pAllocParams->hSubDevice; + NvU32 hClass = pParams->externalClassId; + NvU32 flags = pAllocParams->flags; + NvU64 offset = pAllocParams->offset; + NvU64 limit = pAllocParams->limit; + + status = clientGetResourceRef(pClient, pAllocParams->hMemory, &pMemoryRef); + if (status != NV_OK) + return status; + + hParentFromMemory = pMemoryRef->pParentRef ? pMemoryRef->pParentRef->hResource : 0; + + if (RES_GET_PARENT_HANDLE(pContextDma) != hParentFromMemory) + return NV_ERR_INVALID_OBJECT_PARENT; + + // validate the flags + switch (flags >> DRF_SHIFT(NVOS03_FLAGS_ACCESS) & DRF_MASK(NVOS03_FLAGS_ACCESS)) + { + case NVOS03_FLAGS_ACCESS_WRITE_ONLY: + // we don't currently have a need to distinguish write-only + // permissions; fall through to read/write + + case NVOS03_FLAGS_ACCESS_READ_WRITE: + bReadOnly = NV_FALSE; + break; + + case NVOS03_FLAGS_ACCESS_READ_ONLY: + bReadOnly = NV_TRUE; + break; + + default: + return NV_ERR_INVALID_FLAGS; + } + + switch (DRF_VAL(OS03, _FLAGS, _CACHE_SNOOP, flags)) + { + case NVOS03_FLAGS_CACHE_SNOOP_ENABLE: + cachesnoop = NV_TRUE; + break; + + case NVOS03_FLAGS_CACHE_SNOOP_DISABLE: + cachesnoop = NV_FALSE; + break; + + default: + return NV_ERR_INVALID_FLAGS; + } + + /* + * Note that the NV_OS03_FLAGS_MAPPING is an alias to xg + * the LSB of the NV_OS03_FLAGS_TYPE. And in fact if + * type is NV_OS03_FLAGS_TYPE_NOTIFIER (bit 20 set) + * then it implicitly means that NV_OS03_FLAGS_MAPPING + * is _MAPPING_KERNEL. If the client wants to have a + * Kernel Mapping, it should use the _MAPPING_KERNEL + * flag set and the _TYPE_NOTIFIER should be used only + * with NOTIFIERS. + */ + type = DRF_VAL(OS03, _FLAGS, _MAPPING, flags); + + // fill in dmaInfo + pContextDma->Flags = flags; + pContextDma->bReadOnly = bReadOnly; + pContextDma->CacheSnoop = cachesnoop; + pContextDma->Type = type; + pContextDma->Limit = limit; + + for (i = 0; i < NV_ARRAY_ELEMENTS32(pContextDma->KernelVAddr); i++) + pContextDma->KernelVAddr[i] = NULL; + + pContextDma->KernelPriv = NULL; + + for (i = 0; i < NV_ARRAY_ELEMENTS32(pContextDma->FbAperture); i++) + { + pContextDma->FbAperture[i] = (NvU64)-1; + pContextDma->FbApertureLen[i] = 0; + } + + for (i = 0; i < NV_ARRAY_ELEMENTS32(pContextDma->Instance); i++) + { + pContextDma->Instance[i] = 0; + pContextDma->InstRefCount[i] = 0; + } + + pContextDma->pMemDesc = NULL; + pContextDma->AddressSpace = ADDR_UNKNOWN; + + // Display context dmas have always been explicitly bound. + if (DRF_VAL(OS03, _FLAGS, _HASH_TABLE, flags) == NVOS03_FLAGS_HASH_TABLE_ENABLE) + { + NV_PRINTF(LEVEL_ERROR, "HASH_TABLE=ENABLE no longer supported!\n"); + return NV_ERR_INVALID_FLAGS; + } + + status = _ctxdmaConstruct(pContextDma, pClient, hSubDevice, hClass, + flags, pMemoryRef, offset, limit); + + if (status == NV_OK) + refAddDependant(pMemoryRef, RES_GET_REF(pContextDma)); + + return status; +} + +void +ctxdmaDestruct_IMPL +( + ContextDma *pContextDma +) +{ + _ctxdmaDestruct(pContextDma, RES_GET_CLIENT_HANDLE(pContextDma)); +} + +/*! + * NOTE: this control call may be called at high IRQL with LOCK_BYPASS on WDDM. + */ +NV_STATUS +ctxdmaCtrlCmdUpdateContextdma_IMPL +( + ContextDma *pContextDma, + NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *pUpdateCtxDmaParams +) +{ + RsClient *pClient = RES_GET_CLIENT(pContextDma); + OBJGPU *pGpu; + KernelDisplay *pKernelDisplay; + DisplayInstanceMemory *pInstMem; + NvU64 *pNewAddress = NULL; + NvU64 *pNewLimit = NULL; + NvHandle hMemory = NV01_NULL_OBJECT; + NvU32 comprInfo; + NV_STATUS status = NV_OK; + + // + // Validate that if hCtxDma is passed in it is the same as the hCtxDma + // used for the top level RmControl hObject + // + if (pUpdateCtxDmaParams->hCtxDma != NV01_NULL_OBJECT) + NV_ASSERT_OR_RETURN(pUpdateCtxDmaParams->hCtxDma == RES_GET_HANDLE(pContextDma), NV_ERR_INVALID_OBJECT); + + if (pUpdateCtxDmaParams->hSubDevice != NV01_NULL_OBJECT) + { + Subdevice *pSubdevice; + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + subdeviceGetByHandle(pClient, pUpdateCtxDmaParams->hSubDevice, &pSubdevice)); + + // Ensure requested hSubDevice is valid for the GPU associated with this contextdma + NV_CHECK_OR_RETURN(LEVEL_ERROR, pSubdevice->pDevice == pContextDma->pDevice, NV_ERR_INVALID_OBJECT_HANDLE); + + pGpu = GPU_RES_GET_GPU(pSubdevice); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + } + else + { + pGpu = pContextDma->pGpu; + gpuSetThreadBcState(pGpu, !pContextDma->bUnicast); + } + + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + if (pKernelDisplay == NULL) + return NV_ERR_GENERIC; + + if (FLD_TEST_DRF(0002_CTRL_CMD, _UPDATE_CONTEXTDMA, _FLAGS_BASEADDRESS, _VALID, pUpdateCtxDmaParams->flags)) + pNewAddress = &pUpdateCtxDmaParams->baseAddress; + if (FLD_TEST_DRF(0002_CTRL_CMD, _UPDATE_CONTEXTDMA, _FLAGS_LIMIT, _VALID, pUpdateCtxDmaParams->flags)) + pNewLimit = &pUpdateCtxDmaParams->limit; + if (FLD_TEST_DRF(0002_CTRL_CMD, _UPDATE_CONTEXTDMA, _FLAGS_HINT, _VALID, pUpdateCtxDmaParams->flags)) + hMemory = pUpdateCtxDmaParams->hintHandle; + + comprInfo = DRF_VAL(0002_CTRL_CMD, _UPDATE_CONTEXTDMA_FLAGS, _USE_COMPR_INFO, pUpdateCtxDmaParams->flags); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay); + + status = instmemUpdateContextDma_HAL(pGpu, pInstMem, pContextDma, + pNewAddress, pNewLimit, hMemory, comprInfo); + NV_ASSERT(status == NV_OK); + + SLI_LOOP_END + + return status; +} + +static NV_STATUS +_ctxdmaDestruct +( + ContextDma *pContextDma, + NvHandle hClient +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = NULL; + + pGpu = pContextDma->pGpu; + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_WARN_NULL_OBJECT); + gpuSetThreadBcState(pGpu, !pContextDma->bUnicast); + + if (pContextDma->bUnicast || RES_GET_PARENT_HANDLE(pContextDma) == RES_GET_HANDLE(pContextDma->pDevice)) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + if ((IS_VIRTUAL(pGpu) && + (!(IS_VIRTUAL_WITH_SRIOV(pGpu) && (!gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))))) + { + NV_RM_RPC_FREE(pGpu, hClient, RES_GET_HANDLE(pContextDma->pMemory), RES_GET_HANDLE(pContextDma), rmStatus); + } + } + + // Clean-up the context, first unbind from display + if (ctxdmaIsBound(pContextDma)) + dispchnUnbindCtxFromAllChannels(pGpu, pContextDma); + + // Handle unicast sysmem mapping mapping before _ctxdmaDestroyFBMappings() + if (pContextDma->AddressSpace == ADDR_SYSMEM) + { + NvU32 gpuDevInst = gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu)); + + if (pContextDma->KernelVAddr[gpuDevInst]) + { + memdescUnmapOld(pContextDma->pMemory->pMemDesc, NV_TRUE, 0, + pContextDma->KernelVAddr[gpuDevInst], + pContextDma->KernelPriv); + pContextDma->KernelVAddr[gpuDevInst] = NULL; + pContextDma->KernelPriv = NULL; + } + } + + // Ideally we'd do all of the below in RmFreeDeviceContextDma when + // DeviceRefCount goes to 0 but leaving here because RmFreeDeviceContextDma + // is also called from other places. + memdescFree(pContextDma->pMemDesc); + memdescDestroy(pContextDma->pMemDesc); + pContextDma->pMemDesc = NULL; + + return rmStatus; +} + +/*! + * NOTE: this control call may be called at high IRQL with LOCK_BYPASS on WDDM. + */ +NV_STATUS +ctxdmaCtrlCmdBindContextdma_IMPL +( + ContextDma *pContextDma, + NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *pBindCtxDmaParams +) +{ + NvHandle hChannel = pBindCtxDmaParams->hChannel; + + gpuSetThreadBcState(pContextDma->pGpu, !pContextDma->bUnicast); + + API_GPU_FULL_POWER_SANITY_CHECK(pContextDma->pGpu, NV_TRUE, NV_FALSE); + + // + // Call dispchn to alloc inst mem, write the ctxdma data, and write + // the hash table entry. + // + return dispchnBindCtx(pContextDma->pGpu, pContextDma, hChannel); +} + +/*! + * NOTE: this control call may be called at high IRQL with LOCK_BYPASS on WDDM. + */ +NV_STATUS +ctxdmaCtrlCmdUnbindContextdma_IMPL +( + ContextDma *pContextDma, + NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *pUnbindCtxDmaParams +) +{ + gpuSetThreadBcState(pContextDma->pGpu, !pContextDma->bUnicast); + + API_GPU_FULL_POWER_SANITY_CHECK(pContextDma->pGpu, NV_TRUE, NV_FALSE); + + return dispchnUnbindCtx(pContextDma->pGpu, pContextDma, pUnbindCtxDmaParams->hChannel); +} + +static NV_STATUS +_ctxdmaConstruct +( + ContextDma *pContextDma, + RsClient *pClient, + NvHandle hSubDevice, + NvU32 hClass, + NvU32 flags, + RsResourceRef *pMemoryRef, + NvU64 offset, + NvU64 limit +) +{ + NV_STATUS rmStatus = NV_OK; + Memory *pMemory = NULL; + OBJGPU *pGpu = NULL; + MemoryManager *pMemoryManager = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvHandle hDevice = 0; + NvHandle hClient = pClient->hClient; + Device *pDevice = NULL; + + pMemory = dynamicCast(pMemoryRef->pResource, Memory); + if (pMemory == NULL) + return NV_ERR_INVALID_OBJECT; + + if (hSubDevice != 0) + { + pContextDma->bUnicast = NV_TRUE; + rmStatus = gpuGetByHandle(pClient, hSubDevice, NULL, &pGpu); + if (rmStatus != NV_OK) + return rmStatus; + } + else + { + pContextDma->bUnicast = NV_FALSE; + pGpu = pMemory->pGpu; + if (pGpu == NULL) + return NV_ERR_INVALID_OBJECT_PARENT; + } + + gpuSetThreadBcState(pGpu, !pContextDma->bUnicast); + + rmStatus = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); + if (rmStatus != NV_OK) + return NV_ERR_INVALID_OBJECT_PARENT; + + pContextDma->pDevice = pDevice; + + hDevice = RES_GET_HANDLE(pDevice); + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + + pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + pMemDesc = pMemory->pMemDesc; + + // + // Validate the offset and limit passed in + // Check end of contextdma is within the memory object which was created (RmAllocMemory) + // Since "limit" is inclusive, it should be strictly less than the length + // + { + NvU64 combinedLimit; + if (!portSafeAddU64(offset, limit, &combinedLimit) || + (combinedLimit >= pMemory->Length)) + { + return NV_ERR_INVALID_LIMIT; + } + } + + // The destructor expects the following fields in pContextDma to be set, + // so do not invoke destructor (goto done) before they are assigned. + pContextDma->pMemory = pMemory; + pContextDma->pGpu = pGpu; + + pContextDma->AddressSpace = memdescGetAddressSpace(memdescGetMemDescFromGpu(pMemDesc, pGpu)); + + // Fail allocation of virtual ContextDmas. These have moved the DynamicMemory. + if (pContextDma->AddressSpace == ADDR_VIRTUAL) + { + return NV_ERR_OBJECT_TYPE_MISMATCH; + } + + // + // Create a MEMORY_DESCRIPTOR describing this region of the memory alloc + // in question + // + rmStatus = memdescCreateSubMem( + &pContextDma->pMemDesc, pMemDesc, pGpu, offset, limit+1); + if (rmStatus != NV_OK) + goto done; + + if (pContextDma->AddressSpace == ADDR_SYSMEM) + { + if (pContextDma->Type == NVOS03_FLAGS_MAPPING_KERNEL) + { + rmStatus = memdescMapOld( + pMemDesc, + offset, limit+1, NV_TRUE, NV_PROTECT_READ_WRITE, + &pContextDma->KernelVAddr[gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu))], + &pContextDma->KernelPriv); + if (rmStatus != NV_OK) + goto done; + } + } + + if (FLD_TEST_DRF(OS03, _FLAGS, _PTE_KIND, _BL, flags)) + { + memdescSetPteKind(pContextDma->pMemDesc, memmgrGetPteKindBl_HAL(pGpu, pMemoryManager)); + } + else if (FLD_TEST_DRF(OS03, _FLAGS, _PTE_KIND, _PITCH, flags)) + { + memdescSetPteKind(pContextDma->pMemDesc, memmgrGetPteKindPitch_HAL(pGpu, pMemoryManager)); + } + +done: + + if (rmStatus == NV_OK) + { + if (IS_VIRTUAL(pGpu)) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + NV_RM_RPC_ALLOC_CONTEXT_DMA(pGpu, hClient, hDevice, RES_GET_HANDLE(pContextDma), hClass, + flags, RES_GET_HANDLE(pMemory), offset, limit, rmStatus); + } + } + + if (rmStatus != NV_OK) + { + memdescDestroy(pContextDma->pMemDesc); + pContextDma->pMemDesc = NULL; + + _ctxdmaDestruct(pContextDma, hClient); + } + + return rmStatus; +} + +// +// Fetch ContextDma from resource server +// +NV_STATUS +ctxdmaGetByHandle +( + RsClient *pClient, + NvHandle hContextDma, + ContextDma **ppContextDma +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppContextDma = NULL; + + status = clientGetResourceRef(pClient, hContextDma, &pResourceRef); + if (status != NV_OK) + { + return status; + } + + *ppContextDma = dynamicCast(pResourceRef->pResource, ContextDma); + + return (*ppContextDma) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +// +// Validate that the range described by Start+Length is within ContextDma +// limits. +// +NV_STATUS +ctxdmaValidate_IMPL +( + ContextDma *pContextDma, + NvU64 Start, + NvU64 Length +) +{ + if (pContextDma == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Invalid DMA context in ctxdmaValidate\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_XLATE; + } + DBG_VAL_PTR(pContextDma); + + if ((Start + Length - 1) > pContextDma->Limit) + return NV_ERR_INVALID_OFFSET; + + return NV_OK; +} + +// +// Return the CPU VA of a DMA buffer. +// +NV_STATUS +ctxdmaGetKernelVA_IMPL +( + ContextDma *pContextDma, + NvU64 Start, + NvU64 Length, + void **ppAddress, + NvU32 VA_idx +) +{ + NV_STATUS status; + + if (pContextDma == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Invalid DMA context in ctxdmaGetKernelVA\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_XLATE; + } + DBG_VAL_PTR(pContextDma); + + status = ctxdmaValidate(pContextDma, Start, Length); + if (status != NV_OK) + return status; + + if (pContextDma->KernelVAddr[VA_idx] == NULL) + return NV_ERR_DMA_MEM_NOT_LOCKED; + + *ppAddress = (NvU8*)pContextDma->KernelVAddr[VA_idx] + Start; + + return NV_OK; +} + +// **************************************************************************** +// Deprecated Functions +// **************************************************************************** + +/** + * @warning This function is deprecated! Please use ctxdmaGetByHandle. + */ +NV_STATUS +CliGetContextDma +( + NvHandle hClient, + NvHandle hContextDma, + ContextDma **ppContextDma +) +{ + RsClient *pClient; + NV_STATUS status; + + *ppContextDma = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NV_ERR_INVALID_CLIENT; + + return ctxdmaGetByHandle(pClient, hContextDma, ppContextDma); +} + +NV_STATUS +ctxdmaMapTo_IMPL +( + ContextDma *pContextDma, + RS_RES_MAP_TO_PARAMS *pParams +) +{ + OBJGPU *pGpu = pParams->pGpu; + MEMORY_DESCRIPTOR *pSrcMemDesc = pParams->pSrcMemDesc; + NvU64 offset = pParams->offset; + + // + // For video memory, provide a way to look up the offset of an FB allocation within + // the given context target context dma. still useful for dFPGA. + // It is used by mods. + // + if ((memdescGetAddressSpace(memdescGetMemDescFromGpu(pSrcMemDesc, pGpu)) == ADDR_FBMEM) && + (memdescGetAddressSpace(memdescGetMemDescFromGpu(pContextDma->pMemDesc, pGpu)) == ADDR_FBMEM)) + { + RmPhysAddr physaddr; + if (!memdescGetContiguity(pSrcMemDesc, AT_GPU)) + { + NV_PRINTF(LEVEL_ERROR, "Cannot obtain the video memory offset of a noncontiguous vidmem alloc!\n"); + return NV_ERR_GENERIC; + } + + // Return an Big GPU device physical address, if available + physaddr = memdescGetPhysAddr(pSrcMemDesc, AT_GPU, offset); + *pParams->pDmaOffset = physaddr - memdescGetPhysAddr(pContextDma->pMemDesc, AT_GPU, 0); + return NV_OK; + } + + // We no longer support tracking mappings on ContextDma. Has moved to DynamicMemory. + return NV_ERR_INVALID_ARGUMENT; +} + +NV_STATUS +ctxdmaUnmapFrom_IMPL +( + ContextDma *pContextDma, + RS_RES_UNMAP_FROM_PARAMS *pParams +) +{ + // + // With ContextDmas only supporting physical (or IOMMU VA) there is + // nothing to unmap. We silently allow this call for compatibility. + // + return NV_OK; +} + +/*! + * @brief Is the ContextDma bound to a display channel? + * + * This is a fast check to see if a ContextDma is bound to a display channel. + * + * This is called during display channel or ContextDma teardown only, + * which DD cannot do while a using LOCK_BYPASS bind is active with + * these objects. Locking would require per subdevice lock/unlock. + */ +NvBool +ctxdmaIsBound_IMPL +( + ContextDma *pContextDma +) +{ + NvU32 refs = 0; + NvU32 i; + + for (i=0; i < NV_MAX_SUBDEVICES; i++) + refs += pContextDma->InstRefCount[i]; + + return refs != 0; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c new file mode 100644 index 0000000..69da505 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c @@ -0,0 +1,3699 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief Memory descriptor handling utility routines. + */ + +#include "gpu/mem_mgr/mem_desc.h" + +#include "os/os.h" + +#include "gpu_mgr/gpu_mgr.h" +#include "core/locks.h" +#include "mem_mgr/io_vaspace.h" +#include "mem_mgr/virt_mem_mgr.h" +#include "core/system.h" + +#include "rmconfig.h" +#include "vgpu/rpc.h" +#include "mem_mgr/mem.h" + +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/mem_utils.h" + +#include "nvrm_registry.h" // For memdescOverrideInstLoc*() + +#include "deprecated/rmapi_deprecated.h" +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" +#include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR + +// Structure for keeping track of BAR1 mappings +typedef struct +{ + NvU64 FbAperture; + NvU64 FbApertureLen; + NvP64 pPriv; +} FB_MAPPING_INFO; + +// +// Common address space lists +// +const NV_ADDRESS_SPACE ADDRLIST_FBMEM_PREFERRED[] = {ADDR_FBMEM, ADDR_SYSMEM, ADDR_UNKNOWN}; +const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_PREFERRED[] = {ADDR_SYSMEM, ADDR_FBMEM, ADDR_UNKNOWN}; +const NV_ADDRESS_SPACE ADDRLIST_FBMEM_ONLY[] = {ADDR_FBMEM, ADDR_UNKNOWN}; +const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_ONLY[] = {ADDR_SYSMEM, ADDR_UNKNOWN}; + +// XXX These could probably encode the whole list in the u32 bits. +NvU32 memdescAddrSpaceListToU32(const NV_ADDRESS_SPACE *addrlist) +{ + if (addrlist == ADDRLIST_FBMEM_PREFERRED) + return 1; + else if (addrlist == ADDRLIST_SYSMEM_PREFERRED) + return 2; + else if (addrlist == ADDRLIST_FBMEM_ONLY) + return 3; + else if (addrlist == ADDRLIST_SYSMEM_ONLY) + return 4; + else + return 0; +} + +const NV_ADDRESS_SPACE *memdescU32ToAddrSpaceList(NvU32 index) +{ + switch (index) + { + case 1: return ADDRLIST_FBMEM_PREFERRED; + case 2: return ADDRLIST_SYSMEM_PREFERRED; + case 3: return ADDRLIST_FBMEM_ONLY; + case 4: return ADDRLIST_SYSMEM_ONLY; + default: + return NULL; + } +} + +/* + * @brief Setting a MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE has to initialize + * pHeap and bUsingSubAllocator flags + */ +static NV_STATUS _memdescSetSubAllocatorFlag +( + OBJGPU *pGpu, + PMEMORY_DESCRIPTOR pMemDesc, + NvBool bSet +) +{ + if (!bSet) + { + NV_PRINTF(LEVEL_ERROR, + "Unsetting MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE not supported\n"); + NV_ASSERT(0); + return NV_ERR_INVALID_ARGUMENT; + } + + NV_ASSERT(!(pMemDesc->_flags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL)); + + // Set flag forcing the allocation to fall into suballocator + pMemDesc->_flags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + + return NV_OK; +} + +/*! + * @brief Initializing GFID for guest allocated memdescs + */ +static NV_STATUS _memdescSetGuestAllocatedFlag +( + OBJGPU *pGpu, + PMEMORY_DESCRIPTOR pMemDesc, + NvBool bSet +) +{ + + return NV_OK; +} + +/*! + * @brief Allocate and initialize a new empty memory descriptor + * + * Allocate a new memory descriptor. This allocates the memory descriptor + * only. memdescAlloc or memdescDescribe are later used to allocate or associate + * memory to the memory descriptor. + * + * This routine takes size and the physical contiguous of the future allocation + * in order to size the PTE array for non-contiguous requests. + * + * memdescDestroy should be called to free a memory descriptor. + * + * If MEMDESC_FLAGS_PRE_ALLOCATED is specified, use the memory descriptor + * supplied by the client instead of allocating a new one. + * + * @param[out] ppMemDesc Return pointer to new memory descriptor + * @param[in] pGpu + * @param[in] Size Size of memory descriptor in bytes. + * @param[in] PhysicallyContiguous Need physical contig or can it be scattered? + * @param[in] AddressSpace NV_ADDRESS_SPACE requested + * @param[in] CpuCacheAttrib CPU cacheability requested + * @param[in] Flags MEMDESC_FLAGS_* + * + * @returns NV_OK on success + */ +NV_STATUS +memdescCreate +( + MEMORY_DESCRIPTOR **ppMemDesc, + OBJGPU *pGpu, + NvU64 Size, + NvU64 Alignment, + NvBool PhysicallyContiguous, + NV_ADDRESS_SPACE AddressSpace, + NvU32 CpuCacheAttrib, + NvU64 Flags +) +{ + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 allocSize, MdSize, PageCount; + NvU32 gpuCacheAttrib = NV_MEMORY_UNCACHED; + + allocSize = Size; + + // + // this memdesc may have gotten forced to sysmem if no carveout, + // but for VPR it needs to be in vidmem, so check and re-direct here, + // unless running with zero-FB + // + if ((AddressSpace != ADDR_UNKNOWN) && + (Flags & MEMDESC_ALLOC_FLAGS_PROTECTED) && + (!pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) || + gpuIsCacheOnlyModeEnabled(pGpu))) + { + AddressSpace = ADDR_FBMEM; + } + + if (pGpu != NULL) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (((AddressSpace == ADDR_SYSMEM) || (AddressSpace == ADDR_UNKNOWN)) && + !(Flags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL)) + { + if (pMemoryManager && pMemoryManager->sysmemPageSize) + { + allocSize = RM_ALIGN_UP(allocSize, pMemoryManager->sysmemPageSize); + } + } + + if (RMCFG_FEATURE_PLATFORM_MODS || IsT194(pGpu)) + { + if ( (AddressSpace == ADDR_FBMEM) && + !(Flags & MEMDESC_ALLOC_FLAGS_PROTECTED) && + memmgrGetUsableMemSizeMB_HAL(pGpu, pMemoryManager) == 0 && + gpuIsUnifiedMemorySpaceEnabled(pGpu)) + { + // On Tegra, force sysmem if carveout and SMMU are not available + AddressSpace = ADDR_SYSMEM; + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM)) + { + CpuCacheAttrib = pGpu->instCacheOverride; + } + } + + // + // Support for aligned contiguous SYSMEM allocations. + // + if ((AddressSpace == ADDR_SYSMEM || AddressSpace == ADDR_UNKNOWN) && + PhysicallyContiguous && (Alignment > RM_PAGE_SIZE)) + { + allocSize += (Alignment - RM_PAGE_SIZE); + } + } + } + + // + // Must allocate a larger buffer to store the PTEs for noncontiguous memory + // Note that we allocate one extra PTE, since we don't know what the PteAdjust + // is yet; if the PteAdjust is zero, we simply won't use it. This is in the + // MEMORY_DESCRIPTOR structure definition. + // + // RM_PAGE_SIZE is 4k and RM_PAGE_SHIFT is 12, so shift operation can be + // modified from ((allocSize + RM_PAGE_SIZE-1) >> RM_PAGE_SHIFT) to below as + // (4k >> 12 = 1). This modification helps us to avoid overflow of variable + // allocSize, in case caller of this function passes highest value of NvU64. + // + // If allocSize is passed as 0, PageCount should be returned as 0. + // + if (allocSize == 0) + { + PageCount = 0; + } + else + { + PageCount = ((allocSize - 1) >> RM_PAGE_SHIFT) + 1; + } + + if (PhysicallyContiguous) + { + MdSize = sizeof(MEMORY_DESCRIPTOR); + } + else + { + MdSize = sizeof(MEMORY_DESCRIPTOR) + + (sizeof(RmPhysAddr) * PageCount); + NV_ASSERT(MdSize <= 0xffffffffULL); + if (MdSize > 0xffffffffULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + if (Flags & MEMDESC_FLAGS_PAGED_SYSMEM) + { + // The flag MEMDESC_FLAGS_PAGED_SYSMEM is only for Windows + return NV_ERR_NOT_SUPPORTED; + } + + if (Flags & MEMDESC_FLAGS_PRE_ALLOCATED) + { + // Only fixed sized memDesc can be supported + if (PhysicallyContiguous == NV_FALSE) + { + return NV_ERR_BUFFER_TOO_SMALL; + } + + NV_ASSERT_OR_RETURN(*ppMemDesc, NV_ERR_NOT_SUPPORTED); + + pMemDesc = *ppMemDesc; + } + else + { + pMemDesc = portMemAllocNonPaged((NvU32)MdSize); + if (pMemDesc == NULL) + { + return NV_ERR_NO_MEMORY; + } + } + + portMemSet(pMemDesc, 0, (NvU32)MdSize); + + // Fill in initial non-zero parameters + pMemDesc->pGpu = pGpu; + pMemDesc->Size = Size; + pMemDesc->PageCount = PageCount; + pMemDesc->ActualSize = allocSize; + pMemDesc->_addressSpace = AddressSpace; + pMemDesc->RefCount = 1; + pMemDesc->DupCount = 1; + pMemDesc->_subDeviceAllocCount = 1; + pMemDesc->_flags = Flags; + pMemDesc->_gpuCacheAttrib = gpuCacheAttrib; + pMemDesc->_gpuP2PCacheAttrib = NV_MEMORY_UNCACHED; + pMemDesc->Alignment = Alignment; + pMemDesc->gfid = GPU_GFID_PF; + pMemDesc->bUsingSuballocator = NV_FALSE; + pMemDesc->bDeferredFree = NV_FALSE; + + memdescSetCpuCacheAttrib(pMemDesc, CpuCacheAttrib); + + // Set any additional flags + pMemDesc->_flags |= MEMDESC_FLAGS_KERNEL_MODE; + if (PhysicallyContiguous) + pMemDesc->_flags |= MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; + else + pMemDesc->_flags &= ~MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; + + // OBJHEAP may not be created at this time and pMemDesc->pHeap may be NULL after this if-else + if (Flags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL) + { + pMemDesc->_flags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL; + pMemDesc->_flags &= ~MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + } + else if (Flags & MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE) + { + NV_ASSERT_OK_OR_RETURN(_memdescSetSubAllocatorFlag(pGpu, pMemDesc, NV_TRUE)); + } + + // In case of guest allocated memory, just initialize GFID + if (Flags & MEMDESC_FLAGS_GUEST_ALLOCATED) + { + NV_ASSERT_OK_OR_RETURN(_memdescSetGuestAllocatedFlag(pGpu, pMemDesc, NV_TRUE)); + } + + *ppMemDesc = pMemDesc; + + return NV_OK; +} + +/*! + * @brief Initialize an caller allocated memory descriptor + * + * Helper to make it easier to get the memDesc **, and typically used + * with memdescDescribe. + * + * Only can be used for physically contiguous regions with a fixed + * size PTE array. + * + * memdescDestroy should be called to free a memory descriptor. + * + * If MEMDESC_FLAGS_PRE_ALLOCATED is specified, use the memory descriptor + * supplied by the client instead of allocating a new one. + * + * @param[out] pMemDesc Return pointer to new memory descriptor + * @param[in] pGpu + * @param[in] Size Size of memory descriptor in bytes + * @param[in] AddressSpace NV_ADDRESS_SPACE requested + * @param[in] CpuCacheAttrib CPU cacheability requested + * @param[in] Flags MEMDESC_FLAGS_* + * + * @returns void with no malloc there should be no failure cases + */ +void +memdescCreateExisting +( + MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + NvU64 Size, + NV_ADDRESS_SPACE AddressSpace, + NvU32 CpuCacheAttrib, + NvU64 Flags +) +{ + NV_STATUS status; + status = memdescCreate(&pMemDesc, pGpu, Size, 0, NV_TRUE, AddressSpace, + CpuCacheAttrib, + Flags | MEMDESC_FLAGS_PRE_ALLOCATED | MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE); + NV_ASSERT(status == NV_OK); +} + + +/*! + * Increment ref count + */ +void memdescAddRef +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(pMemDesc != NULL); + ++(pMemDesc->RefCount); +} + +/*! + * Decrement ref count + */ +void memdescRemoveRef +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT_OR_RETURN_VOID(pMemDesc != NULL); + --(pMemDesc->RefCount); +} + +// +// Destroy all IOMMU mappings under this memdesc, including child +// mappings for root memdescs. +// +// TODO: merge the new IOMMU paths with the SMMU paths (see bug 1625121). +// +static void +_memdescFreeIommuMappings(PMEMORY_DESCRIPTOR pMemDesc) +{ +#if (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_MODS) && !NVCPU_IS_ARM + PIOVAMAPPING pIovaMapping = pMemDesc->_pIommuMappings; + + if (!pIovaMapping) + return; + + if (memdescIsSubMemoryMemDesc(pMemDesc)) + { + iovaMappingDestroy(pIovaMapping); + return; + } + + while (pIovaMapping) + { + PIOVAMAPPING pTmpIovaMapping = pIovaMapping->pNext; + iovaMappingDestroy(pIovaMapping); + pIovaMapping = pTmpIovaMapping; + } + + pMemDesc->_pIommuMappings = NULL; +#endif +} + +/*! + * Destroy a memory descriptor if last reference is released + * + * If the memory descriptor is down to one reference, we need + * to check with the bus code check if that reference needs + * to be reclaimed. + * + * @param[in] pMemDesc Memory descriptor to be destroyed + * + * @returns None + */ +void +memdescDestroy +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Allow null frees + if (!pMemDesc) + { + return; + } + + memdescRemoveRef(pMemDesc); + + // if still more references are there for pMemDesc (pMemDesc->RefCount != 0), then bail out. + + if (pMemDesc->RefCount == 0) + { + MEM_DESC_DESTROY_CALLBACK *pCb = memdescGetDestroyCallbackList(pMemDesc); + MEM_DESC_DESTROY_CALLBACK *pNext; + + if (pMemDesc->_flags & MEMDESC_FLAGS_DUMMY_TOPLEVEL) + { + // When called from RmFreeFrameBuffer() and memdescFree could not do it because it is unallocated. + pMemDesc->_pNext = NULL; + pMemDesc->_subDeviceAllocCount = 1; + } + + NV_ASSERT(pMemDesc->childDescriptorCnt == 0); + NV_ASSERT(pMemDesc->_addressSpace == ADDR_FBMEM || pMemDesc->pHeap == NULL); + + // + // If there is private memdata, use the CB to free + // + if (pMemDesc->_pMemData && pMemDesc->_pMemDataReleaseCallback) + { + pMemDesc->_pMemDataReleaseCallback(pMemDesc); + } + + if (pMemDesc->bDeferredFree) + { + memdescFree(pMemDesc); + } + else if (pMemDesc->Allocated != 0) + { + // + // The caller forgot to free the actual memory before destroying the memdesc. + // Please fix this by calling memdescFree(). + // To prevent memory leaks, we explicitly free here until its fixed elsewhere. + // + NV_PRINTF(LEVEL_ERROR, "Destroying unfreed memory %p\n", pMemDesc); + NV_PRINTF(LEVEL_ERROR, "Please call memdescFree()\n"); + memdescFree(pMemDesc); + NV_ASSERT(!pMemDesc->Allocated); + } + + if (memdescGetStandbyBuffer(pMemDesc)) + { + memdescFree(memdescGetStandbyBuffer(pMemDesc)); + memdescDestroy(memdescGetStandbyBuffer(pMemDesc)); + memdescSetStandbyBuffer(pMemDesc, NULL); + } + + // + // Submemory descriptors will be destroyed without going through a free + // path, so we need to make sure that we remove the IOMMU submapping + // here. For root descriptors, we should already have removed all the + // associated IOVA mappings. + // + // However, for memory descriptors that weren't allocated by the RM, + // (e.g., were created from a user allocation), we won't go through a + // free path at all. In this case, mappings for other GPUs may still be + // attached to this root memory descriptor, so release them now. + // + _memdescFreeIommuMappings(pMemDesc); + + // Notify all interested parties of destruction + while (pCb) + { + pNext = pCb->pNext; + pCb->destroyCallback(pMemDesc->pGpu, pCb->pObject, pMemDesc); + // pCb is now invalid + pCb = pNext; + } + + portMemFree(pMemDesc->pPteSpaMappings); + pMemDesc->pPteSpaMappings = NULL; + portMemFree(pMemDesc->pSubMemDescList); + pMemDesc->pSubMemDescList = NULL; + + if (pMemDesc->_pParentDescriptor) + { + if ((pMemDesc->_flags & MEMDESC_FLAGS_PRE_ALLOCATED) == 0) + pMemDesc->_pParentDescriptor->childDescriptorCnt--; + memdescDestroy(pMemDesc->_pParentDescriptor); + pMemDesc->_pParentDescriptor = NULL; + } + + // Verify memdesc is not top + NV_ASSERT(memdescHasSubDeviceMemDescs(pMemDesc) == NV_FALSE); + + if ((pMemDesc->_flags & MEMDESC_FLAGS_PRE_ALLOCATED) == 0) + { + portMemFree(pMemDesc); + } + } +} + +/*! + * @brief Function that frees subdevice memory descriptors. If there are no + * subdevice memory descriptors function just simply resets memdesc structure. + * Top level memory descriptor is not destroyed. + * + * @param[in,out] pMemDesc Top level memory descriptor. + * + * @returns None + */ +static void +_memSubDeviceFreeAndDestroy +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + MEMORY_DESCRIPTOR *pSubDevMemDesc = pMemDesc->_pNext; + MEMORY_DESCRIPTOR *pNextMemDesc; + OBJGPU *pGpu = pMemDesc->pGpu; + NvBool bBcState; + + // No subdevice memdescs + if (pSubDevMemDesc == NULL || pGpu == NULL) + { + return; + } + + bBcState = gpumgrGetBcEnabledStatus(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + + do + { + pNextMemDesc = pSubDevMemDesc->_pNext; + pSubDevMemDesc->_pNext = NULL; + memdescFree(pSubDevMemDesc); + memdescDestroy(pSubDevMemDesc); + pSubDevMemDesc = pNextMemDesc; + } while (pSubDevMemDesc != NULL); + + gpumgrSetBcEnabledStatus(pGpu, bBcState); +} + +/*! + * @brief Lower memdesc allocation layer for the special case of allocation + in the VPR region when MODS is managing it. + * + * @param[in] pMemDesc Memory descriptor to allocate + * + * @returns NV_OK on successful allocation. + * NV_ERR_NOT_SUPPORTED if not supported + */ +static NV_STATUS +_memdescAllocVprRegion +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Lower memdesc allocation layer. Provides underlying allocation + * functionality. + * + * @param[in,out] pMemDesc Memory descriptor to allocate + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +static NV_STATUS +_memdescAllocInternal +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + NV_STATUS status = NV_OK; + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + + if (pMemDesc->Allocated) + { + NV_ASSERT(!pMemDesc->Allocated); + return NV_ERR_INVALID_OBJECT_BUFFER; + } + + // Special case of an allocation request in MODS managed VPR region. + status = _memdescAllocVprRegion(pMemDesc); + if (status != NV_ERR_NOT_SUPPORTED) + goto done; + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + // System memory can be obtained from osAllocPages + status = osAllocPages(pMemDesc); + if (status != NV_OK) + { + goto done; + } + + // + // The pages have been allocated, so mark the descriptor as + // allocated. The IOMMU-mapping code needs the memdesc to be + // allocated in order to create the mapping. + // + pMemDesc->Allocated = 1; + + // + // TODO: merge new IOMMU paths with the SMMU paths below (see bug + // 1625121). For now they are parallel, and only one will be + // used. + // + if (!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_CPU_ONLY)) + { + status = memdescMapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + if (status != NV_OK) + { + pMemDesc->Allocated = 0; + osFreePages(pMemDesc); + goto done; + } + } + + if (pMemDesc->_flags & MEMDESC_FLAGS_PROVIDE_IOMMU_MAP) + { + NV_PRINTF(LEVEL_ERROR, "SMMU mapping allocation is not supported for ARMv7.\n"); + NV_ASSERT(0); + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + else if ((pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) && + RMCFG_FEATURE_PLATFORM_MODS) + { + if (pMemDesc->Alignment > RM_PAGE_SIZE) + { + RmPhysAddr addr = memdescGetPhysAddr(pMemDesc, AT_CPU, 0); + NvU64 offset; + + NV_ASSERT((addr & (RM_PAGE_SIZE - 1)) == 0); + + NV_ASSERT((pMemDesc->Alignment & (pMemDesc->Alignment - 1)) == 0); + offset = addr & (pMemDesc->Alignment - 1); + + if (offset) + { + NV_ASSERT((pMemDesc->PageCount * RM_PAGE_SIZE - pMemDesc->Size) >= offset); + NV_ASSERT(pMemDesc->PteAdjust == 0); + pMemDesc->PteAdjust += NvU64_LO32(pMemDesc->Alignment - offset); + } + } + } + + break; + + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + status = NV_ERR_GENERIC; + goto done; + } + +done: + if (status == NV_OK) + memdescPrintMemdesc(pMemDesc, NV_TRUE, MAKE_NV_PRINTF_STR("memdesc allocated")); + + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + + return status; +} + +/*! + * @brief Upper memdesc allocation layer. Provides support for per-subdevice + * sysmem buffers and lockless sysmem allocation. + * + * @param[in,out] pMemDesc Memory descriptor to allocate + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +NV_STATUS +memdescAlloc +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + NV_STATUS status = NV_OK; + NvBool bcState = NV_FALSE; + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvBool reAcquire; + NvU32 gpuMask = 0; + + NV_ASSERT_OR_RETURN(!pMemDesc->Allocated, NV_ERR_INVALID_OBJECT_BUFFER); + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + // Can't alloc sysmem on GSP firmware. + if (RMCFG_FEATURE_PLATFORM_GSP && !memdescGetFlag(pMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) + { + // + // TO DO: Make this an error once existing allocations are cleaned up. + // After that pHeap selection can be moved to memdescAllocInternal() + // + NV_PRINTF(LEVEL_ERROR, + "WARNING sysmem alloc on GSP firmware\n"); + pMemDesc->_addressSpace = ADDR_FBMEM; + pMemDesc->pHeap = GPU_GET_HEAP(pGpu); + } + + break; + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + + if (status != NV_OK) + { + return status; + } + + if (gpumgrGetBcEnabledStatus(pGpu)) + { + // Broadcast memdescAlloc call with flag set to allocate per subdevice. + if (pMemDesc->_flags & MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE) + { + NvU32 i; + MEMORY_DESCRIPTOR *pSubDevMemDesc = pMemDesc; + MEMORY_DESCRIPTOR *pPrev = pMemDesc; + OBJGPU *pGpuChild; + + pMemDesc->_subDeviceAllocCount = NumSubDevices(pGpu); + + for (i = 0; i < pMemDesc->_subDeviceAllocCount; i++) + { + // Get pGpu for this subdeviceinst + pGpuChild = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu), i); + if (NULL == pGpuChild) + { + NV_ASSERT(0); + status = NV_ERR_OBJECT_NOT_FOUND; + goto subdeviceAlloc_failed; + } + + // + // We are accessing the fields of the top level desc here directly without using the + // accessor routines on purpose. + // + status = memdescCreate(&pSubDevMemDesc, pGpuChild, pMemDesc->Size, pMemDesc->Alignment, + !!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS), + pMemDesc->_addressSpace, + pMemDesc->_cpuCacheAttrib, + pMemDesc->_flags & ~MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE); + + if (status != NV_OK) + { + NV_ASSERT(0); + goto subdeviceAlloc_failed; + } + + pSubDevMemDesc->_gpuCacheAttrib = pMemDesc->_gpuCacheAttrib; + pSubDevMemDesc->_pageSize = pMemDesc->_pageSize; + + // Force broadcast state to false when allocating a subdevice memdesc + gpumgrSetBcEnabledStatus(pGpuChild, NV_FALSE); + + status = memdescAlloc(pSubDevMemDesc); + + if (pMemDesc->_addressSpace == ADDR_FBMEM) + { + // + // The top level memdesc could have flags that don't reflect contiguity which + // is set after memdescAlloc. + // + pMemDesc->Alignment = pSubDevMemDesc->Alignment; + pMemDesc->_flags = pSubDevMemDesc->_flags | MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE; + pMemDesc->ActualSize = pSubDevMemDesc->ActualSize; + } + + // Restore broadcast state to true after allocating a subdevice memdesc + gpumgrSetBcEnabledStatus(pGpuChild, NV_TRUE); + + if (status != NV_OK) + { + memdescDestroy(pSubDevMemDesc); + NV_ASSERT(0); + goto subdeviceAlloc_failed; + } + + // Check for similarity in allocations for previous allocated subdev with current allocated subdev. + // If subdev0 ~ subdev1 && subdev1~subdev2 then subdev0 ~ subdev2 and so on...Thus can check symmetry across all subdev allocations + if (i > 0) + { + NV_ASSERT(pPrev->Size == pSubDevMemDesc->Size); + NV_ASSERT(pPrev->PteAdjust == pSubDevMemDesc->PteAdjust); + NV_ASSERT(pPrev->_addressSpace == pSubDevMemDesc->_addressSpace); + NV_ASSERT(pPrev->_flags == pSubDevMemDesc->_flags); + NV_ASSERT(pPrev->_pteKind == pSubDevMemDesc->_pteKind); + NV_ASSERT(pPrev->_pteKindCompressed == pSubDevMemDesc->_pteKindCompressed); + NV_ASSERT(pPrev->pHeap != pSubDevMemDesc->pHeap); + } + + pPrev->_pNext = pSubDevMemDesc; + pPrev = pSubDevMemDesc; + } + pMemDesc->Allocated = 1; + return NV_OK; + } + else if (pMemDesc->_addressSpace == ADDR_FBMEM) + { + // Broadcast memdescAlloc call on vidmem *without* flag set to allocate per subdevice + NV_ASSERT(0); + } + } + + // Unicast memdescAlloc call but with flag set to allocate per subdevice. + NV_ASSERT(!((pMemDesc->_flags & MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE) && !gpumgrGetBcEnabledStatus(pGpu))); + + reAcquire = NV_FALSE; + bcState = NV_FALSE; + + if ((pMemDesc->_flags & MEMDESC_FLAGS_LOCKLESS_SYSMEM_ALLOC) && (pMemDesc->_addressSpace != ADDR_FBMEM)) + { + bcState = gpumgrGetBcEnabledStatus(pGpu); + if (RMCFG_FEATURE_RM_BASIC_LOCK_MODEL) + { + // + // There is no equivalent routine for osCondReleaseRmSema in + // the new basic lock model. + + // + // However, we can't drop the RM system semaphore in this + // path because on non-windows platforms (i.e. MODS) it + // has undesirable consequences. So for now we must + // bracket this section with a reference to the feature + // flag until we can rework this interface. + // + // + // Check to make sure we own the lock and that we are + // not at elevated IRQL; this models the behavior + // of osCondReleaseRmSema. + // + if (!osIsRaisedIRQL() && + (rmGpuGroupLockIsOwner(pGpu->gpuInstance, GPU_LOCK_GRP_DEVICE, &gpuMask) || + rmGpuGroupLockIsOwner(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, &gpuMask))) + { + // + // Release all owned gpu locks rather than just the + // device-related locks because the caller may be holding more + // than the required device locks. All currently owned + // locks will be re-acquired before returning. + // + // This prevents potential GPU locking violations (e.g., if the + // caller is holding all the gpu locks but only releases the + // first of two device locks, then attempting to re-acquire + // the first device lock will be a locking violation with + // respect to the second device lock.) + // + gpuMask = rmGpuLocksGetOwnedMask(); + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + reAcquire = NV_TRUE; + } + } + else + { + reAcquire = osCondReleaseRmSema(pSys->pSema); + } + } + + // Actually allocate the memory + NV_CHECK_OK(status, LEVEL_ERROR, _memdescAllocInternal(pMemDesc)); + + if (status != NV_OK) + { + pMemDesc->pHeap = NULL; + } + + if (reAcquire) + { + if (osAcquireRmSema(pSys->pSema) != NV_OK) + { + DBG_BREAKPOINT(); + + } + + if (rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_MASK, + GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM, + &gpuMask) != NV_OK) + { + DBG_BREAKPOINT(); + } + // Releasing the semaphore allows another thread to enter RM and + // modify broadcast state. We need to set it back (see bug 368643) + gpumgrSetBcEnabledStatus(pGpu, bcState); + } + + return status; + +subdeviceAlloc_failed: + _memSubDeviceFreeAndDestroy(pMemDesc); + pMemDesc->_subDeviceAllocCount = 1; + pMemDesc->_pNext = NULL; + return status; +} + +/*! + * Allocate memory from one of the possible locations specified in pList. + * + * @param[in,out] pMemDesc Memory descriptor to allocate + * @param[in] pList List of NV_ADDRESS_SPACE values. Terminated + * by an ADDR_UNKNOWN entry. + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +NV_STATUS +memdescAllocList +( + MEMORY_DESCRIPTOR *pMemDesc, + const NV_ADDRESS_SPACE *pList +) +{ + NV_STATUS status = NV_ERR_INVALID_ARGUMENT; + NvU32 i = 0; + + if (!pList) + { + return status; + } + + // + // this memdesc may have gotten forced to sysmem if no carveout, + // but for VPR it needs to be in vidmem, so check and re-direct here + // + if (pMemDesc->_flags & MEMDESC_ALLOC_FLAGS_PROTECTED) + { + OBJGPU *pGpu = pMemDesc->pGpu; + + // Only force to vidmem if not running with zero-FB. + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) || + gpuIsCacheOnlyModeEnabled(pGpu)) + { + pList = ADDRLIST_FBMEM_ONLY; + } + } + + while (pList[i] != ADDR_UNKNOWN) + { + pMemDesc->_addressSpace = pList[i]; + status = memdescAlloc(pMemDesc); + + if (status == NV_OK) + { + return status; + } + + i++; + } + + return status; +} + +/*! + * @brief Lower memdesc free layer. Provides underlying free + * functionality. + * + * @param[in,out] pMemDesc Memory descriptor to free + * + * @returns None + */ +static void +_memdescFreeInternal +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + MEM_DESC_DESTROY_CALLBACK *pCb, *pNext; + NvU64 oldSize; + + // Allow null frees + if (!pMemDesc) + { + return; + } + + pCb = memdescGetDestroyCallbackList(pMemDesc); + + // Notify all interested parties of destruction + while (pCb) + { + pNext = pCb->pNext; + pCb->destroyCallback(pMemDesc->pGpu, pCb->pObject, pMemDesc); + // pCb is now invalid + pCb = pNext; + } + + if (memdescHasSubDeviceMemDescs(pMemDesc)) + return; + + memdescPrintMemdesc(pMemDesc, NV_FALSE, MAKE_NV_PRINTF_STR("memdesc being freed")); + + // Bail our early in case this memdesc describes a MODS managed VPR region. + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_VPR_REGION_CLIENT_MANAGED)) + return; + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + + oldSize = pMemDesc->Size; + pMemDesc->Size = pMemDesc->ActualSize; + pMemDesc->PageCount = ((pMemDesc->ActualSize + RM_PAGE_SIZE-1) >> RM_PAGE_SHIFT); + + osFreePages(pMemDesc); + + pMemDesc->Size = oldSize; + pMemDesc->PageCount = ((oldSize + RM_PAGE_SIZE-1) >> RM_PAGE_SHIFT); + + break; + + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + } +} + +/*! + * @brief Upper memdesc free layer. Provides support for per-subdevice + * sysmem buffers and lockless sysmem allocation. Because of SLI and subdevice + * submem allocations (refer to submem chart) support, if memory has never + * been allocated function will just unlink subdevice structure and destroy + * subdevice descriptors. + * + * @param[in,out] pMemDesc Memory descriptor to free + * + * @returns None + */ +void +memdescFree +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Allow null frees + if (!pMemDesc) + { + return; + } + + + if (memdescIsSubMemoryMemDesc(pMemDesc)) + { + NV_ASSERT(!pMemDesc->_pInternalMapping); + + if (pMemDesc->_addressSpace == ADDR_SYSMEM) + { + // The memdesc is being freed so destroy all of its IOMMU mappings. + _memdescFreeIommuMappings(pMemDesc); + } + + if (pMemDesc->_addressSpace != ADDR_FBMEM && + pMemDesc->_addressSpace != ADDR_SYSMEM) + { + return; + } + + _memSubDeviceFreeAndDestroy(pMemDesc); + } + else + { + // + // In case RM attempts to free memory that has more than 1 refcount, the free is deferred until refcount reaches 0 + // + // Bug 3307574 RM crashes when client's specify sysmem UserD location. + // RM attempts to peek at the client allocated UserD when waiting for a channel to go idle. + // + if (pMemDesc->RefCount > 1 && pMemDesc->Allocated == 1) + { + pMemDesc->bDeferredFree = NV_TRUE; + return; + } + + if (!pMemDesc->Allocated) + { + /* + * For sysmem not allocated by RM but only registered to it, we + * would need to update the shared sysmem pfn bitmap here + */ + return; + } + pMemDesc->Allocated--; + if (0 != pMemDesc->Allocated) + { + return; + } + + // If standbyBuffer memory was allocated then free it + if (pMemDesc->_pStandbyBuffer) + { + memdescFree(pMemDesc->_pStandbyBuffer); + memdescDestroy(pMemDesc->_pStandbyBuffer); + pMemDesc->_pStandbyBuffer = NULL; + } + + NV_ASSERT(!pMemDesc->_pInternalMapping); + + if (pMemDesc->_addressSpace == ADDR_SYSMEM) + { + // The memdesc is being freed so destroy all of its IOMMU mappings. + _memdescFreeIommuMappings(pMemDesc); + } + + if (pMemDesc->_addressSpace != ADDR_FBMEM && + pMemDesc->_addressSpace != ADDR_SYSMEM) + { + return; + } + + _memSubDeviceFreeAndDestroy(pMemDesc); + + _memdescFreeInternal(pMemDesc); + } + + // Reset tracking state + pMemDesc->_pNext = NULL; + pMemDesc->_subDeviceAllocCount = 1; + + // + // Reset tracking state of parent + // Why it is needed: + // When a submemory toplevel memdesc with subdevices is freed, + // the subdecice memdescs and their parent are destroyed or their + // refcount decreased. + // When the parent subdevice descriptors are destroyed, their + // top level descriptor is left alone and has a dangling + // _pNext pointer + // + if ((pMemDesc->_pParentDescriptor != NULL) && + (memdescHasSubDeviceMemDescs(pMemDesc->_pParentDescriptor)) && + (pMemDesc->_pParentDescriptor->RefCount == 1)) + { + pMemDesc->_pParentDescriptor->_pNext = NULL; + pMemDesc->_pParentDescriptor->_subDeviceAllocCount = 1; + } +} + +/*! + * @brief Lock the paged virtual memory descripted by the memory descriptor + * + * @param[in] pMemDesc Memory descriptor to lock + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +NV_STATUS +memdescLock +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PAGED_SYSMEM)) + { + return NV_ERR_ILLEGAL_ACTION; + } + + return osLockMem(pMemDesc); +} + +/*! + * @brief Unlock the paged virtual memory descripted by the memory descriptor + * + * @param[in] pMemDesc Memory descriptor to unlock + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +NV_STATUS +memdescUnlock +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PAGED_SYSMEM)) + { + return NV_ERR_ILLEGAL_ACTION; + } + + return osUnlockMem(pMemDesc); +} + +/*! + * @brief Get a CPU mapping to the memory described by a memory descriptor + * + * This is for memory descriptors used by RM clients, not by the RM itself. + * For internal mappings the busMapRmAperture() hal routines are used. + * + * @param[in] pMemDesc Memory descriptor to map + * @param[in] Offset Offset into memory descriptor to start map + * @param[in] Size Size of mapping + * @param[in] Kernel Kernel or user address space + * @param[in] Protect NV_PROTECT_* + * @param[out] pAddress Return address + * @param[out] pPriv Return cookie to be passed back to memdescUnmap + * + * @returns NV_STATUS + */ + +NV_STATUS +memdescMapOld +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 Offset, + NvU64 Size, + NvBool Kernel, + NvU32 Protect, + void **pAddress, + void **pPriv +) +{ + NvP64 pAddressP64 = NV_PTR_TO_NvP64(*pAddress); + NvP64 pPrivP64 = NV_PTR_TO_NvP64(*pPriv); + NV_STATUS status; + +#if !defined(NV_64_BITS) + NV_ASSERT(Kernel); +#endif + + status = memdescMap(pMemDesc, + Offset, + Size, + Kernel, + Protect, + &pAddressP64, + &pPrivP64); + + *pAddress = NvP64_VALUE(pAddressP64); + *pPriv = NvP64_VALUE(pPrivP64); + + return status; +} + +NV_STATUS +memdescMap +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 Offset, + NvU64 Size, + NvBool Kernel, + NvU32 Protect, + NvP64 *pAddress, + NvP64 *pPriv +) +{ + NV_STATUS status = NV_OK; + NvU64 rootOffset = 0; + + NV_ASSERT_OR_RETURN(((Offset + Size) <= memdescGetSize(pMemDesc)), NV_ERR_INVALID_ARGUMENT); + + pMemDesc = memdescGetRootMemDesc(pMemDesc, &rootOffset); + Offset += rootOffset; + + if (pMemDesc->PteAdjust && + (pMemDesc->Alignment > RM_PAGE_SIZE) && + (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) && + RMCFG_FEATURE_PLATFORM_MODS) + { + Offset += pMemDesc->PteAdjust; + } + + // + // Sanity check, the top-level descriptor should be allocated or else + // memDesc must be marked as user allocate memory. This allows mapping of + // memDesc keeping track of PA's for user allocated memory, wherein RM + // marks the corresponding memDesc as not allocated. + // + NV_ASSERT_OR_RETURN(pMemDesc->Allocated || + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM) || + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM), + NV_ERR_INVALID_OBJECT_BUFFER); + + NV_ASSERT_OR_RETURN(!memdescHasSubDeviceMemDescs(pMemDesc), NV_ERR_INVALID_OBJECT_BUFFER); + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + { + status = osMapSystemMemory(pMemDesc, Offset, Size, + Kernel, Protect, pAddress, pPriv); + if (status != NV_OK) + { + return status; + } + break; + } + + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + return NV_OK; +} +void +memdescUnmapOld +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool Kernel, + NvU32 ProcessId, + void *Address, + void *Priv +) +{ + memdescUnmap(pMemDesc, + Kernel, + ProcessId, + NV_PTR_TO_NvP64(Address), + NV_PTR_TO_NvP64(Priv)); +} + +/*! + * @brief Remove a mapping for the memory descriptor, reversing memdescMap + * + * @param[in] pMemDesc Memory descriptor to unmap + * @param[in] Kernel Kernel or user address space + * @param[in] ProcessId Process ID if user space + * @param[in] Address Mapped address + * @param[in] Priv Return priv cookie from memdescMap + * + * @returns None + */ +void +memdescUnmap +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool Kernel, + NvU32 ProcessId, + NvP64 Address, + NvP64 Priv +) +{ + // Allow null unmaps + if (!Address) + return; + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + + // find first allocated parent descriptor + while (!pMemDesc->Allocated && pMemDesc->_pParentDescriptor) + { + pMemDesc = pMemDesc->_pParentDescriptor; + } + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + { + osUnmapSystemMemory(pMemDesc, Kernel, ProcessId, Address, Priv); + break; + } + + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + } +} + +typedef enum +{ + MEMDESC_MAP_INTERNAL_TYPE_GSP, // On GSP, use a pre-existing mapping + MEMDESC_MAP_INTERNAL_TYPE_COHERENT_FBMEM, // For NVLINK, use a pre-existing mapping for fbmem + MEMDESC_MAP_INTERNAL_TYPE_BAR2, // Use BAR2 (fbmem or reflected sysmem) + MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT, // Use OS to map sysmem +} MEMDESC_MAP_INTERNAL_TYPE; + +static MEMDESC_MAP_INTERNAL_TYPE +memdescGetMapInternalType +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + if (RMCFG_FEATURE_PLATFORM_GSP) + { + return MEMDESC_MAP_INTERNAL_TYPE_GSP; + } + + return MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT; +} + +void +memdescFlushCpuCaches +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Flush WC to get the data written to this mapping out to memory + osFlushCpuWriteCombineBuffer(); + +} + +/* + * @brief map memory descriptor for internal access + * + * flags - subset of TRANSFER_FLAGS_ + */ +void* +memdescMapInternal +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + MEMDESC_MAP_INTERNAL_TYPE mapType; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(pMemDesc != NULL, NULL); + + if (pMemDesc->_addressSpace == ADDR_FBMEM) + { + pMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + } + + mapType = memdescGetMapInternalType(pGpu, pMemDesc); + + if (pMemDesc->_pInternalMapping != NULL) + { + NV_ASSERT(pMemDesc->_internalMappingRefCount); + + pMemDesc->_internalMappingRefCount++; + return pMemDesc->_pInternalMapping; + } + + switch (mapType) + { + case MEMDESC_MAP_INTERNAL_TYPE_GSP: + NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemDesc->_pInternalMapping != NULL, NULL); + break; + case MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT: + { + status = memdescMapOld(pMemDesc, 0, pMemDesc->Size, NV_TRUE, NV_PROTECT_READ_WRITE, + &pMemDesc->_pInternalMapping, &pMemDesc->_pInternalMappingPriv); + NV_CHECK_OR_RETURN(LEVEL_ERROR, status == NV_OK, NULL); + break; + } + + default: + DBG_BREAKPOINT(); + } + + pMemDesc->_internalMappingRefCount = 1; + return pMemDesc->_pInternalMapping; +} + +void memdescUnmapInternal +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + MEMDESC_MAP_INTERNAL_TYPE mapType; + + NV_ASSERT_OR_RETURN_VOID(pMemDesc != NULL); + NV_ASSERT_OR_RETURN_VOID(pMemDesc->_pInternalMapping != NULL && pMemDesc->_internalMappingRefCount != 0); + + if (pMemDesc->_addressSpace == ADDR_FBMEM) + { + pMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + } + + mapType = memdescGetMapInternalType(pGpu, pMemDesc); + + if (mapType == MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT || mapType == MEMDESC_MAP_INTERNAL_TYPE_BAR2) + { + memdescFlushCpuCaches(pGpu, pMemDesc); + } + + if (--pMemDesc->_internalMappingRefCount == 0) + { + switch (mapType) + { + case MEMDESC_MAP_INTERNAL_TYPE_GSP: + break; + case MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT: + memdescUnmapOld(pMemDesc, NV_TRUE, 0, + pMemDesc->_pInternalMapping, pMemDesc->_pInternalMappingPriv); + break; + + default: + DBG_BREAKPOINT(); + } + + pMemDesc->_pInternalMapping = NULL; + pMemDesc->_pInternalMappingPriv = NULL; + pMemDesc->_internalMappingRefCount = 0; + } + +} + +/*! + * Describe an existing region of memory in a memory descriptor + * + * Memory must be physically contiguous. + * + * The memory descriptor must be initialized with + * memdescCreate*(), typically memdescCreateExisting() + * prior to calling memdescDescribe. + * + * memdescDescribe() now only updates the fields needed in the call. + * + * @param[out] pMemDesc Memory descriptor to fill + * @param[in] AddressSpace Address space of memory + * @param[in] Base Physical address of region + * @param[in] Size Size of region + * + * @returns None + */ +void +memdescDescribe +( + MEMORY_DESCRIPTOR *pMemDesc, + NV_ADDRESS_SPACE AddressSpace, + RmPhysAddr Base, + NvU64 Size +) +{ + // Some sanity checks to see if we went through MemCreate*() first + NV_ASSERT((pMemDesc->RefCount == 1) && + (memdescGetDestroyCallbackList(pMemDesc) == NULL) && + (pMemDesc->PteAdjust == 0)); + + NV_ASSERT(pMemDesc->_pIommuMappings == NULL); + NV_ASSERT(pMemDesc->Allocated == 0); + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + // + // Check if the base address accounts for the DMA window start address + // (always in the high, unaddressable bits of the address) and add it + // if necessary. On most platforms, the DMA window start address will + // simply be 0. + // + // This is most likely to happen in cases where the Base address is + // read directly from a register or MMU entry, which does not already + // account for the DMA window. + // + if (pMemDesc->pGpu == NULL) + { + NV_PRINTF(LEVEL_WARNING, + "unable to check Base 0x%016llx for DMA window\n", Base); + } + + if (pMemDesc->Alignment != 0) + { + NV_ASSERT(NV_FLOOR_TO_QUANTA(Base, pMemDesc->Alignment) == Base); + } + + pMemDesc->Size = Size; + pMemDesc->ActualSize = Size; + pMemDesc->_flags |= MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; + pMemDesc->_addressSpace = AddressSpace; + pMemDesc->_pteArray[0] = Base & ~RM_PAGE_MASK; + pMemDesc->_subDeviceAllocCount = 1; + pMemDesc->PteAdjust = NvU64_LO32(Base) & RM_PAGE_MASK; + pMemDesc->PageCount = ((Size + pMemDesc->PteAdjust + RM_PAGE_SIZE - 1) >> RM_PAGE_SHIFT); + pMemDesc->_pParentDescriptor = NULL; + pMemDesc->childDescriptorCnt = 0; +} + +/*! + * Fill the PTE array of a memory descriptor with an array of addresses + * returned by pmaAllocatePages(). + * + * Memory must be physically discontiguous. For the contiguous case + * memdescDescribe() is more apt. + * + * The memory descriptor must be initialized with memdescCreate*(), + * typically memdescCreateExisting() prior to calling + * memdescFillPages(). + * + * @param[in] pMemDesc Memory descriptor to fill + * @param[in] pageIndex Index into memory descriptor to fill from + * @param[in] pPages Array of physical addresses + * @param[in] pageCount Number of entries in pPages + * @param[in] pageSize Size of each page in pPages + * + * @returns None + */ +void +memdescFillPages +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 pageIndex, + NvU64 *pPages, + NvU32 pageCount, + NvU32 pageSize +) +{ + NvU32 i, j, k; + NvU32 numChunks4k = pageSize / RM_PAGE_SIZE; + NvU32 offset4k = numChunks4k * pageIndex; + NvU32 pageCount4k = numChunks4k * pageCount; + NvU32 result4k, limit4k; + NvU64 addr; + + NV_ASSERT(pMemDesc != NULL); + + NV_ASSERT(offset4k < pMemDesc->PageCount); + NV_ASSERT(portSafeAddU32(offset4k, pageCount4k, &result4k)); + + // + // There is a possibility that the pMemDesc was created using 4K aligned + // allocSize, but the actual memory allocator could align up the allocation + // size based on its supported pageSize, (e.g. PMA supports 64K pages). In + // that case, pageCount4k would be greater than pMemdesc->pageCount. We + // limit pageCount4k to stay within pMemdesc->pageCount in that case. + // + if (result4k > pMemDesc->PageCount) + pageCount4k = pMemDesc->PageCount - offset4k; + + NV_ASSERT(pageSize > 0); + NV_ASSERT(0 == (pageSize & (RM_PAGE_SIZE - 1))); + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + // Fill _pteArray array using numChunks4k as a stride + for (i = 0, j = offset4k; i < pageCount; i++, j += numChunks4k) + { + pMemDesc->_pteArray[j] = addr = pPages[i]; + + // Fill _pteArray at 4K granularity + limit4k = NV_MIN(j + numChunks4k, pageCount4k); + + addr += RM_PAGE_SIZE; + for (k = j + 1; k < limit4k; k++, addr += RM_PAGE_SIZE) + pMemDesc->_pteArray[k] = addr; + } +} + +// +// SubMemory per subdevice chart: (MD - Memory Descriptor, SD - subdevice) +// +// If we try to create submemory of descriptor which has subdevices: +// +// [Top level MD] +// ^ | +// | +--------> [ Subdevice 0 MD ] --------> [Subdevice 1 MD] +// | ^ ^ +// | | | +// [SubMemory top level MD] | | +// | | | +// +--------> [Subdevice 0 SubMemory MD] --------> [Subdevice 1 SubMemory MD] +// +// Top Level MD : parent of SubMemoryTopLevelMD; has subdescriptors +// for two subdevices +// SubMemory top level MD : has pointer to parent memory descriptor; has two +// subdevice MDs +// Subdevice 0 MD : subdevice MD of topLevelMD and parent of SD0 +// submemory descriptor; has pointer to next in the +// list of subdevice MDs +// Subdevice 0 SubMemory MD : submemory of subdevice 0 MD; has pointer to +// parent, subdevice 0 MD and to next in list of +// submemory subdevice memory descriptors +// + + + +/*! + * @brief Create a new memory descriptor that is a subset of pMemDesc. If + * pMemDesc has subdevice memory descriptors subMemory will be created for all + * subdevices and new memory descriptor will be top level for them (ASCII art) + * + * @param[out] ppMemDescNew New memory descriptor + * @param[in] pMemDesc Original memory descriptor + * @param[in] pGpu The GPU that this memory will be mapped to + * @param[in] Offset Sub memory descriptor starts at pMemdesc+Offset + * @param[in] Size For Size bytes + * + * @returns None + */ +NV_STATUS +memdescCreateSubMem +( + MEMORY_DESCRIPTOR **ppMemDescNew, + MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + NvU64 Offset, + NvU64 Size +) +{ + NV_STATUS status; + MEMORY_DESCRIPTOR *pMemDescNew; + NvU32 subDevInst; + NvU64 tmpSize = Size; + MEMORY_DESCRIPTOR *pLast; + MEMORY_DESCRIPTOR *pNew; + OBJGPU *pGpuChild; + + // Default to the original memdesc's GPU if none is specified + if (pGpu == NULL) + { + pGpu = pMemDesc->pGpu; + } + + // Allocation size should be adjusted for the memory descriptor _pageSize. + // Also note that the first 4k page may not be at _pageSize boundary so at + // the time of the mapping, we maybe overmapping at the beginning or end of + // the descriptor. To fix it in the right way, memory descriptor needs to + // be further cleaned. Do not round to page size if client specifies so. + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PAGE_SIZE_ALIGN_IGNORE) && + pMemDesc->_pageSize != 0) + { + PMEMORY_DESCRIPTOR pTempMemDesc = pMemDesc; + NvU64 pageOffset; + + if (memdescHasSubDeviceMemDescs(pMemDesc)) + { + NV_ASSERT(pGpu); + pTempMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + } + + pageOffset = memdescGetPhysAddr(pTempMemDesc, AT_CPU, Offset) & + (pTempMemDesc->_pageSize - 1); + tmpSize = RM_ALIGN_UP(pageOffset + Size, pTempMemDesc->_pageSize); + } + + // Allocate the new MEMORY_DESCRIPTOR + status = memdescCreate(&pMemDescNew, pGpu, tmpSize, 0, + !!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS), + pMemDesc->_addressSpace, + pMemDesc->_cpuCacheAttrib, + ((pMemDesc->_flags & ~MEMDESC_FLAGS_PRE_ALLOCATED) | MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE)); + + if (status != NV_OK) + { + return status; + } + + // Fill in various fields as best we can; XXX this can get sort of sketchy + // in places, which should be all the more motivation to rip some of these + // fields out of the MEMORY_DESCRIPTOR. + if (pMemDesc->_flags & MEMDESC_FLAGS_KERNEL_MODE) + pMemDescNew->_flags |= MEMDESC_FLAGS_KERNEL_MODE; + else + pMemDescNew->_flags &= ~MEMDESC_FLAGS_KERNEL_MODE; + + pMemDescNew->Size = Size; + pMemDescNew->_pteKind = pMemDesc->_pteKind; + pMemDescNew->_hwResId = pMemDesc->_hwResId; + if (pMemDesc->_flags & MEMDESC_FLAGS_ENCRYPTED) + pMemDescNew->_flags |= MEMDESC_FLAGS_ENCRYPTED; + else + pMemDescNew->_flags &= ~MEMDESC_FLAGS_ENCRYPTED; + pMemDescNew->_pageSize = pMemDesc->_pageSize; + pMemDescNew->_gpuCacheAttrib = pMemDesc->_gpuCacheAttrib; + pMemDescNew->_gpuP2PCacheAttrib = pMemDesc->_gpuP2PCacheAttrib; + pMemDescNew->gfid = pMemDesc->gfid; + pMemDescNew->bUsingSuballocator = pMemDesc->bUsingSuballocator; + pMemDescNew->_pParentDescriptor = pMemDesc; + pMemDesc->childDescriptorCnt++; + + pMemDescNew->subMemOffset = Offset; + + // increase refCount of parent descriptor + memdescAddRef(pMemDesc); + + // Fill in the PteArray and PteAdjust + if ((pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) || + (pMemDesc->PageCount == 1)) + { + // Compute the base address, then fill it in + RmPhysAddr Base = pMemDesc->_pteArray[0] + pMemDesc->PteAdjust + Offset; + pMemDescNew->_pteArray[0] = Base & ~RM_PAGE_MASK; + pMemDescNew->PteAdjust = NvU64_LO32(Base) & RM_PAGE_MASK; + } + else + { + // More complicated... + RmPhysAddr Adjust; + NvU32 PageIndex, i; + + // We start this many bytes into the memory alloc + Adjust = pMemDesc->PteAdjust + Offset; + + // Break it down into pages (PageIndex) and bytes (PteAdjust) + PageIndex = (NvU32)(Adjust >> RM_PAGE_SHIFT); + pMemDescNew->PteAdjust = NvU64_LO32(Adjust) & RM_PAGE_MASK; + + // Fill in the PTEs; remember to copy the extra PTE, in case we need it + if (pMemDesc->PageCount) + { + for (i = 0; i < pMemDescNew->PageCount+1; i++) + { + NvU32 j = i + PageIndex; + if (j < pMemDesc->PageCount) + { + pMemDescNew->_pteArray[i] = pMemDesc->_pteArray[j]; + } + else + { + // + // This case can happen with page size greater than 4KB. + // Since pages are always tracked at 4KB granularity the + // subset description may overflow the parent memdesc. + // + // In this case the best we can do is describe the contiguous + // memory after the last 4KB page in the sub-memdesc. + // + // TODO: Tracking memdesc pages at native page size would + // remove the need for several hacks, including this one. + // + NV_ASSERT(i > 0); + pMemDescNew->_pteArray[i] = pMemDescNew->_pteArray[i - 1] + RM_PAGE_SIZE; + } + } + } + } + + if ((pMemDesc->_addressSpace == ADDR_SYSMEM) && + !memdescGetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_CPU_ONLY) && + !memdescGetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1)) + { + // + // For different IOVA spaces, the IOMMU mapping will often not be a + // subrange of the original mapping. + // + // Request the submapping to be associated with the submemdesc. + // + // TODO: merge the new IOMMU paths with the SMMU path above (see bug + // 1625121). + // + status = memdescMapIommu(pMemDescNew, pGpu->busInfo.iovaspaceId); + if (status != NV_OK) + { + memdescDestroy(pMemDescNew); + return status; + } + } + + // Support for SLI submemory per-subdevice allocations (refer to chart) + if (memdescHasSubDeviceMemDescs(pMemDesc)) + { + NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu); + + if (gpumgrGetBcEnabledStatus(pGpu) && (pMemDesc->_addressSpace == ADDR_FBMEM)) + { + NV_ASSERT(!!(pMemDesc->_flags & MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE)); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + } + pLast = pMemDescNew; + + pMemDescNew->_subDeviceAllocCount = pMemDesc->_subDeviceAllocCount; + + for (subDevInst = 0; subDevInst < pMemDesc->_subDeviceAllocCount; subDevInst++) + { + pGpuChild = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu), subDevInst); + status = memdescCreateSubMem(&pNew, memdescGetMemDescFromGpu(pMemDesc, pGpuChild), pGpuChild, Offset, Size); + + if (status != NV_OK) + { + while (NULL != pMemDescNew) + { + pNew = pMemDescNew; + pMemDescNew = pMemDescNew->_pNext; + memdescDestroy(pNew); + } + return status; + } + + pLast->_pNext = pNew; + pLast = pNew; + } + + gpumgrSetBcEnabledStatus(pGpu, bBcState); + } + + *ppMemDescNew = pMemDescNew; + + return NV_OK; +} + +/*! + * Given a memdesc, this checks if the allocated memory falls under subheap or in GPA address space + */ +static NvBool +_memIsSriovMappingsEnabled +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + return gpuIsSriovEnabled(pMemDesc->pGpu) && + (((pMemDesc->_flags & MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE) && pMemDesc->bUsingSuballocator) || + (pMemDesc->_flags & MEMDESC_FLAGS_GUEST_ALLOCATED)); +} + +/*! + * @brief Return the physical addresses of pMemdesc + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] pGpu GPU to return the addresses for + * @param[in] addressTranslation Address translation identifier + * @param[in] offset Offset into memory descriptor + * @param[in] stride How much to advance the offset for each + * consecutive address + * @param[in] count How many addresses to retrieve + * @param[out] pAddresses Returned array of addresses + * + */ +void memdescGetPhysAddrsForGpu(MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses) +{ + // + // Get the PTE array that we should use for phys addr lookups based on the + // MMU context. (see bug 1625121) + // + NvU64 i; + RmPhysAddr *pteArray = memdescGetPteArrayForGpu(pMemDesc, pGpu, addressTranslation); + const NvBool contiguous = (memdescGetPteArraySize(pMemDesc, addressTranslation) == 1); + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + offset += pMemDesc->PteAdjust; + + for (i = 0; i < count; ++i) + { + if (contiguous) + { + pAddresses[i] = pteArray[0] + offset; + } + else + { + NvU32 PageIndex = (NvU32)(offset >> RM_PAGE_SHIFT); + pAddresses[i] = pteArray[PageIndex] + (offset & RM_PAGE_MASK); + } + + offset += stride; + } +} + + +/*! + * @brief Return the physical addresses of pMemdesc + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] offset Offset into memory descriptor + * @param[in] stride How much to advance the offset for each + * consecutive address + * @param[in] count How many addresses to retrieve + * @param[out] pAddresses Returned array of addresses + * + */ +void memdescGetPhysAddrs(MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses) +{ + memdescGetPhysAddrsForGpu(pMemDesc, pMemDesc->pGpu, addressTranslation, offset, stride, count, pAddresses); +} + +/*! + * @brief Return the physical address of pMemdesc+Offset + * + * "long description" + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] offset Offset into memory descriptor + * + * @returns A physical address + */ +RmPhysAddr +memdescGetPhysAddr +( + MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset +) +{ + RmPhysAddr addr; + memdescGetPhysAddrs(pMemDesc, addressTranslation, offset, 0, 1, &addr); + return addr; +} + +/*! + * @brief Return physical address for page specified by PteIndex + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] addressTranslation Address translation identifier + * @param[in] PteIndex Look up this PteIndex + * + * @returns A physical address + */ +RmPhysAddr +memdescGetPte +( + PMEMORY_DESCRIPTOR pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU32 PteIndex +) +{ + // + // Get the PTE array that we should use for phys addr lookups based on the + // MMU context. (see bug 1625121) + // + RmPhysAddr *pteArray = memdescGetPteArray(pMemDesc, addressTranslation); + RmPhysAddr PhysAddr; + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + if (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) + { + PhysAddr = pteArray[0] + (PteIndex << RM_PAGE_SHIFT); + } + else + { + PhysAddr = pteArray[PteIndex]; + } + + return PhysAddr; +} + +/*! + * @brief Return physical address for page specified by PteIndex + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] addressTranslation Address translation identifier + * @param[in] PteIndex Look up this PteIndex + * @param[in] PhysAddr PTE address + * + * @returns None + */ +void +memdescSetPte +( + PMEMORY_DESCRIPTOR pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU32 PteIndex, + RmPhysAddr PhysAddr +) +{ + // + // Get the PTE array that we should use for phys addr lookups based on the + // MMU context. (see bug 1625121) + // + RmPhysAddr *pteArray = memdescGetPteArray(pMemDesc, addressTranslation); + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + if (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) + { + NV_ASSERT_OR_RETURN_VOID(PteIndex == 0); + } + + pteArray[PteIndex] = PhysAddr; + + // Free pteArraySpa + portMemFree(pMemDesc->pPteSpaMappings); + pMemDesc->pPteSpaMappings = NULL; +} + +/*! + * @brief Return page array size based on the MMU context + * For SRIOV, the host context (AT_PA) will + * have discontiguous view of the GPA in SPA space + * This is treated similar to discontiguous memdescs + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] addressTranslation Address translation identifier + * + * @returns PageArray + */ +NvU32 memdescGetPteArraySize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation) +{ + // Contiguous allocations in SPA domain can be non-contiguous at vmmusegment granularity. + // Hence treat SPA domain allocations as non-contiguous by default. + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) || + ((addressTranslation == AT_PA) && (pMemDesc->_addressSpace == ADDR_FBMEM) && _memIsSriovMappingsEnabled(pMemDesc))) + { + return NvU64_LO32(pMemDesc->PageCount); + } + return 1; +} + +/*! + * @brief Return page array + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] pGpu GPU to get the PTE array for. + * @param[in] addressTranslation Address translation identifier + * + * @returns PageArray + */ +RmPhysAddr * +memdescGetPteArrayForGpu +( + PMEMORY_DESCRIPTOR pMemDesc, + OBJGPU *pGpu, + ADDRESS_TRANSLATION addressTranslation +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + switch (AT_VALUE(addressTranslation)) + { + // + // In SRIOV systems, an access from guest has to go through the following translations + // GVA -> GPA -> SPA + // + // Given HOST manages channel/memory management for guest, there are certain code paths that + // expects VA -> GPA translations and some may need GPA -> SPA translations. We use addressTranslation + // to differentiate between these cases. + // Since GPA -> SPA is very similar to IOMMU xlation and since existing AT_PA is used only in + // SYSMEM allocations, we decided to reuse AT_PA addressTranslation to fetch GPA -> SPA xlations. + // In case of non-SRIOV systems, using AT_PA will fall back to AT_GPU or default context. + // + // pMemDesc -> _pteArray tracks GVA -> GPA translations + // pMemDesc -> pPteSpaMappings tracks GPA -> SPA translations + // + + case AT_VALUE(AT_PA): + { + } + case AT_VALUE(AT_GPU): + { + // Imported ADDR_FABRIC_V2 memdescs are device-less. + if (pGpu != NULL) + { + PIOVAMAPPING pIovaMap = memdescGetIommuMap(pMemDesc, pGpu->busInfo.iovaspaceId); + if (pIovaMap != NULL) + { + return pIovaMap->iovaArray; + } + } + + // + // If no IOMMU mapping exists in the default IOVASPACE, fall + // through and use the physical memory descriptor instead. + // + } + default: + { + return pMemDesc->_pteArray; + } + } +} + + + +/*! + * @brief Convert aperture into a descriptive string. + * + * @param[in] addressSpace + * + * @returns String + * + * @todo "text" + */ +const char * +memdescGetApertureString +( + NV_ADDRESS_SPACE addressSpace +) +{ + static NV_PRINTF_STRING_SECTION const char ADDR_FBMEM_STR[] = "VIDEO MEMORY"; + static NV_PRINTF_STRING_SECTION const char ADDR_SYSMEM_STR[] = "SYSTEM MEMORY"; + + if (addressSpace == ADDR_FBMEM) + { + return ADDR_FBMEM_STR; + } + + if (addressSpace == ADDR_SYSMEM) + { + return ADDR_SYSMEM_STR; + } + + return NULL; +} + +/*! + * @brief Compare two memory descriptors to see if the memory described the same + * + * @param[in] pMemDescOne + * @param[in] pMemDescTwo + * + * @returns NV_TRUE if the memory descriptors refer to the same memory + */ +NvBool +memdescDescIsEqual +( + MEMORY_DESCRIPTOR *pMemDescOne, + MEMORY_DESCRIPTOR *pMemDescTwo +) +{ + if ((pMemDescOne == NULL) || (pMemDescTwo == NULL)) + return NV_FALSE; + + if (pMemDescOne->_addressSpace != pMemDescTwo->_addressSpace) + return NV_FALSE; + + // All the physical memory views should match. + if ((memdescGetPhysAddr(pMemDescOne, AT_CPU, 0) != memdescGetPhysAddr(pMemDescTwo, AT_CPU, 0)) || + (memdescGetPhysAddr(pMemDescOne, AT_GPU, 0) != memdescGetPhysAddr(pMemDescTwo, AT_GPU, 0))) + return NV_FALSE; + + if (memdescGetCpuCacheAttrib(pMemDescOne) != memdescGetCpuCacheAttrib(pMemDescTwo)) + return NV_FALSE; + + if (pMemDescOne->Size != pMemDescTwo->Size) + return NV_FALSE; + + if (pMemDescOne->Alignment != pMemDescTwo->Alignment) + return NV_FALSE; + + if (pMemDescOne->_pageSize != pMemDescTwo->_pageSize) + return NV_FALSE; + + return NV_TRUE; +} + +/*! + * @brief Add callback block to the destroy callback queue + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] pCb Callee allocated block with callback func/arg + * + * @returns nothing + */ +void +memdescAddDestroyCallback +( + MEMORY_DESCRIPTOR *pMemDesc, + MEM_DESC_DESTROY_CALLBACK *pCb +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pCb->pNext = memdescGetDestroyCallbackList(pMemDesc); + memdescSetDestroyCallbackList(pMemDesc, pCb); +} + +/*! + * @brief Remove callback block from the destroy callback queue + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] pRemoveCb Callee allocated block with callback func/arg + * + * @returns nothing + */ +void +memdescRemoveDestroyCallback +( + MEMORY_DESCRIPTOR *pMemDesc, + MEM_DESC_DESTROY_CALLBACK *pRemoveCb +) +{ + MEM_DESC_DESTROY_CALLBACK *pCb = memdescGetDestroyCallbackList(pMemDesc); + MEM_DESC_DESTROY_CALLBACK *pPrev = NULL; + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + while (pCb) + { + if (pCb == pRemoveCb) + { + if (pPrev == NULL) + { + memdescSetDestroyCallbackList(pMemDesc, pCb->pNext); + } + else + { + pPrev->pNext = pCb->pNext; + } + break; + } + pPrev = pCb; + pCb = pCb->pNext; + } +} + +/*! + * @brief Retrieves a subdevice's memory descriptor by subdevice instance + * + * Subdevice memory descriptors are memory descriptors that describe + * per-subdevice memory buffers. This functionality is required by our current + * SLI programming model as our memdescAlloc() calls are primarily broadcast + * operations. A singular memdesc works for video memory as the + * heaps are symmetric. However, we run into trouble when dealing with system + * memory as both GPUs then share the same address space and symmetric + * addressing is no longer possible. + * + * N.B. The rational for exposing this routine is that it keeps SLI-isms out of + * most of the RM -- the alternative approach would've been to pass in the + * subdevice or a pGpu for all memdesc methods which would require more code + * changes solely for SLI. Long term hopefully we can transition to a unicast + * allocation model (SLI loops above memdescAlloc()/memdescCreate()) and the + * subdevice support in memdesc can (easily) be deleted. This approach also + * provides a safety net against misuse, e.g., if we added pGpu to + * memdescGetPhysAddr, current code which utilizes that routine outside an SLI loop + * would execute cleanly even though it's incorrect. + * + * @param[in] pMemDesc Memory descriptor to query + * @param[in] subDeviceInst SLI subdevice instance (subdevice - 1) + * + * @returns Memory descriptor if one exist for the subdevice. + * NULL if none is found. + */ +MEMORY_DESCRIPTOR * +memdescGetMemDescFromSubDeviceInst(MEMORY_DESCRIPTOR *pMemDesc, NvU32 subDeviceInst) +{ + if (!memdescHasSubDeviceMemDescs(pMemDesc)) + { + return pMemDesc; + } + else + { + return memdescGetMemDescFromIndex(pMemDesc, subDeviceInst); + } +} + +/*! + * @brief Retrieves a subdevice's memory descriptor by GPU object + * + * See memdescGetMemDescFromSubDeviceInst for an explanation of subdevice memory + * descriptors + * + * @param[in] pMemDesc Memory descriptor to query + * @param[in] pGpu + * + * @returns Memory descriptor if one exist for the GPU. + * NULL if none is found. + */ +MEMORY_DESCRIPTOR * +memdescGetMemDescFromGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu) +{ + NvU32 subDeviceInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + return memdescGetMemDescFromSubDeviceInst(pMemDesc, subDeviceInst); +} + +/*! + * @brief Retrieves a subdevice's memory descriptor by memdesc index. + * + * See memdescGetMemDescFromSubDeviceInst for an explanation of subdevice memory + * descriptors + * + * @param[in] pMemDesc Memory descriptor to query + * @param[in] index Index into array of memdesc + * + * @returns Memory descriptor if one exist for the GPU. + * NULL if none is found. + */ +MEMORY_DESCRIPTOR * +memdescGetMemDescFromIndex(MEMORY_DESCRIPTOR *pMemDesc, NvU32 index) +{ + if (!memdescHasSubDeviceMemDescs(pMemDesc)) + { + return pMemDesc; + } + else + { + MEMORY_DESCRIPTOR *pSubDevMemDesc = pMemDesc->_pNext; + + NV_ASSERT(pSubDevMemDesc); + + while (index--) + { + pSubDevMemDesc = pSubDevMemDesc->_pNext; + + if (!pSubDevMemDesc) + { + NV_ASSERT(0); + return NULL; + } + } + + return pSubDevMemDesc; + } +} + +/*! + * @brief Set address for a fixed heap allocation. + * + * Offset must refer to the heap. A later memdescAlloc() will + * force this offset. + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] fbOffset Offset to refer to + * + * @returns nothing + */ +void +memdescSetHeapOffset +( + MEMORY_DESCRIPTOR *pMemDesc, + RmPhysAddr fbOffset +) +{ + NV_ASSERT(pMemDesc->_addressSpace == ADDR_FBMEM); + NV_ASSERT(pMemDesc->Allocated == NV_FALSE); + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_flags |= MEMDESC_FLAGS_FIXED_ADDRESS_ALLOCATE; + pMemDesc->_pteArray[0] = fbOffset; +} + +/*! + * @brief Set GPU cacheability + * + * A later memdescAlloc() will use this setting. + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] cacheAttrib Set memory to GPU cacheable + * + * @returns nothing + */ +void memdescSetGpuCacheAttrib +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 cacheAttrib +) +{ + NV_ASSERT(pMemDesc->Allocated == NV_FALSE); + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_gpuCacheAttrib = cacheAttrib; +} + +/*! + * @brief Get GPU P2P cache attributes + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current GPU P2P cache attributes + */ +NvU32 memdescGetGpuP2PCacheAttrib +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_gpuP2PCacheAttrib; +} + +/*! + * @brief Set GPU P2P cacheability + * + * A later memdescAlloc() will use this setting. + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] cacheAttrib Set memory to GPU P2P cacheable + * + * @returns nothing + */ +void memdescSetGpuP2PCacheAttrib +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 cacheAttrib +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_gpuP2PCacheAttrib = cacheAttrib; +} + +/*! + * @brief Set CPU cacheability + * + * A later memdescAlloc() will use this setting. + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] cacheAttrib Set memory to CPU cacheable + * + * @returns nothing + */ +void memdescSetCpuCacheAttrib +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 cpuCacheAttrib +) +{ + // + // When running 64-bit MODS on ARM v8, we need to force all CPU mappings as WC. + // This seems to be an issue with glibc. See bug 1556221. + // + // Ideally, this should have been set based on a Core Logic (CL) property. + // But chipset initialization will only happen during bifStateInit(). + // RM can makes sysmem CPU mappings before bifStateInit(). + // + if (RMCFG_FEATURE_PLATFORM_MODS && NVCPU_IS_AARCH64) + { + if (cpuCacheAttrib == NV_MEMORY_UNCACHED) + { + cpuCacheAttrib = NV_MEMORY_WRITECOMBINED; + } + } + + pMemDesc->_cpuCacheAttrib = cpuCacheAttrib; +} + +/*! + * @brief Print contents of a MEMORY_DESCRIPTOR in a human readable format. + * + * @param[in] pMemDesc Memory Descriptor to print + * @param[in] bPrintIndividualPages Individual pages will also be printed + * iff they are discontiguous + * @param[in] pPrefixMessage Message that will be printed before the contents + * of the Memory Descriptor are printed. + * + * @returns nothing + */ +void memdescPrintMemdesc +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool bPrintIndividualPages, + const char *pPrefixMessage +) +{ +#if 0 + NvU32 i; + + if ((DBG_RMMSG_CHECK(DBG_LEVEL_INFO) == 0) || (pPrefixMessage == NULL) || (pMemDesc == NULL)) + { + return; + } + + NV_PRINTF(LEVEL_INFO, + "%s Aperture %s starting at 0x%llx and of size 0x%llx\n", + pPrefixMessage, + memdescGetApertureString(pMemDesc->_addressSpace), + memdescGetPhysAddr(pMemDesc, AT_CPU, 0), + pMemDesc->Size); + + if ((bPrintIndividualPages == NV_TRUE) && + (pMemDesc->PageCount > 1) && + (!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS))) + { + for (i = 0; i < pMemDesc->PageCount; i++) + { + NV_PRINTF(LEVEL_INFO, + " contains page starting @0x%llx\n", + pMemDesc->_pteArray[i]); + } + } + + // TODO: merge with SMMU path above (see bug 1625121). + if (pMemDesc->_pIommuMappings != NULL) + { + if (!memdescIsSubMemoryMemDesc(pMemDesc)) + { + PIOVAMAPPING pIovaMap = pMemDesc->_pIommuMappings; + while (pIovaMap != NULL) + { + NV_PRINTF(LEVEL_INFO, + "Has additional IOMMU mapping for IOVA space 0x%x starting @ 0x%llx\n", + pIovaMap->iovaspaceId, + pIovaMap->iovaArray[0]); + pIovaMap = pIovaMap->pNext; + } + } + else + { + NV_PRINTF(LEVEL_INFO, + "Has additional IOMMU mapping starting @ 0x%llx\n", + memdescGetPhysAddr(pMemDesc, AT_PA, 0)); + } + } +#endif // NV_PRINTF_ENABLED +} + +/*! + * @brief Return page offset from a MEMORY_DESCRIPTOR for an arbitrary power of two page size + * + * PageAdjust covers the 4KB alignment, but must include bits from the address for big pages. + * + * @param[in] pMemDesc Memory Descriptor to print + * @param[in] pageSize Page size (4096, 64K, 128K, etc) + * + * @returns nothing + */ +NvU64 memdescGetPageOffset +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 pageSize +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return (pMemDesc->PteAdjust + pMemDesc->_pteArray[0]) & (pageSize-1); +} + +/*! + * @brief Get PTE kind using GPU + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pGpu GPU to be used get supported kind + * @param[in] addressTranslation Address translation identifier + * + * @returns Current PTE kind value. + */ +NvU32 memdescGetPteKindForGpu +( + PMEMORY_DESCRIPTOR pMemDesc, + OBJGPU *pGpu +) +{ + return memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), pMemDesc->_pteKind); +} + +/*! + * @brief Set PTE kind using GPU. + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pGpu GPU to be used set supported kind + * @param[in] addressTranslation Address translation identifier + * @param[in] pteKind New PTE kind + * + * @returns nothing + */ +void memdescSetPteKindForGpu +( + PMEMORY_DESCRIPTOR pMemDesc, + OBJGPU *pGpu, + NvU32 pteKind +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pteKind = memmgrGetSwPteKindFromHwPteKind_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), pteKind); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_SET_KIND, NV_TRUE); +} + +/*! + * @brief Set PTE kind compressed value. + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pteKind New PTE kind compressed value + * + * @returns nothing + */ +void memdescSetPteKindCompressed +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU32 pteKindCmpr +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pteKindCompressed = pteKindCmpr; +} + +/*! + * @brief Get PTE kind compressed value. + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] addressTranslation Address translation identifier + * + * @returns Current PTE kind compressed value. + */ +NvU32 memdescGetPteKindCompressed +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_pteKindCompressed; +} + +/*! + * @brief Get kernel mapping + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current kernel mapping + */ +NvP64 memdescGetKernelMapping +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_kernelMapping; +} + +/*! + * @brief Set kernel mapping + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] kernelMapping New kernel mapping + * + * @returns nothing + */ +void memdescSetKernelMapping +( + MEMORY_DESCRIPTOR *pMemDesc, + NvP64 kernelMapping +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_kernelMapping = kernelMapping; +} + +/*! + * @brief Get privileged kernel mapping + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current privileged kernel mapping + */ +NvP64 memdescGetKernelMappingPriv +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_kernelMappingPriv; +} + +/*! + * @brief Set HW resource identifier (HwResId) + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] kernelMappingPriv New privileged kernel mapping + * + * @returns nothing + */ +void memdescSetKernelMappingPriv +( + MEMORY_DESCRIPTOR *pMemDesc, + NvP64 kernelMappingPriv +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_kernelMappingPriv = kernelMappingPriv; +} + + +/*! + * @brief Set standby buffer memory descriptor + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Pointer to standby buffer memory descriptor + */ +MEMORY_DESCRIPTOR *memdescGetStandbyBuffer +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_pStandbyBuffer; +} + +/*! + * @brief Set standby buffer memory descriptor + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pStandbyBuffer Standby buffer memory descriptor pointer + * + * @returns nothing + */ +void memdescSetStandbyBuffer +( + MEMORY_DESCRIPTOR *pMemDesc, + MEMORY_DESCRIPTOR *pStandbyBuffer +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pStandbyBuffer = pStandbyBuffer; +} + +/*! + * @brief Set mem destroy callback list pointer + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pMemDestroyCallbackList Memory destroy callback list pointer + * + * @returns nothing + */ +void memdescSetDestroyCallbackList +( + MEMORY_DESCRIPTOR *pMemDesc, + MEM_DESC_DESTROY_CALLBACK *pCb +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pMemDestroyCallbackList = pCb; +} + +/*! + * @brief Get guest ID for specified memory descriptor + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Guest ID value + */ +NvU64 memdescGetGuestId +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_guestId; +} + +/*! + * @brief Set guest ID for memory descriptor + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] guestId New guest ID + * + * @returns nothing + */ +void memdescSetGuestId +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 guestId +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_guestId = guestId; +} + +/*! + * @brief Get value of specified flag + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] flag MEMDESC_FLAGS_* value + * + * @returns Boolean value of specified flag + */ +NvBool memdescGetFlag +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 flag +) +{ + // For checking contiguity, use the memdescGetContiguity() api + NV_ASSERT(flag != MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); + // GPU_IN_RESET is set/got from top level memdesc always. + if (flag != MEMDESC_FLAGS_GPU_IN_RESET) + { + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + } + return !!(pMemDesc->_flags & flag); +} + +/*! + * @brief Set value of specified flag + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] flag MEMDESC_FLAGS_* value + * @param[in] bValue Boolean value of flag + * + * @returns nothing + */ +void memdescSetFlag +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 flag, + NvBool bValue +) +{ + // For setting contiguity, use the memdescSetContiguity() api + NV_ASSERT(flag != MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); + + // GPU_IN_RESET is set/got from top level memdesc always. + if (flag != MEMDESC_FLAGS_GPU_IN_RESET) + { + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + } + + if (flag == MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE) + { + NV_ASSERT_OK(_memdescSetSubAllocatorFlag(pMemDesc->pGpu, pMemDesc, bValue)); + return; + } + else if (flag == MEMDESC_FLAGS_GUEST_ALLOCATED) + { + NV_ASSERT_OK(_memdescSetGuestAllocatedFlag(pMemDesc->pGpu, pMemDesc, bValue)); + return; + } + + if (bValue) + pMemDesc->_flags |= flag; + else + pMemDesc->_flags &= ~flag; +} + +/*! + * @brief Return memory descriptor address pointer + * + * The address value is returned by osAllocPages + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Memory descriptor address pointer + */ +NvP64 memdescGetAddress +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_address; +} + +/*! + * @brief Set memory descriptor address pointer + * + * The address value is returned by osAllocPages + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pAddress Pointer to address information + * + * @returns nothing + */ +void memdescSetAddress +( + MEMORY_DESCRIPTOR *pMemDesc, + NvP64 pAddress +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_address = pAddress; +} + +/*! + * @brief Get memory descriptor os-specific memory data pointer + * + * The pMemData value is returned by osAllocPages + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Memory data pointer + */ +void *memdescGetMemData +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_pMemData; +} + +/*! + * @brief Set memory descriptor os-specific memory data pointer + * + * The pMemData value is returned by osAllocPages + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pMemData Pointer to new os-specific memory data + * @param[in] pMemDataReleaseCallback Pointer to CB to be called when memdesc + * is freed. + * + * @returns nothing + */ +void memdescSetMemData +( + MEMORY_DESCRIPTOR *pMemDesc, + void *pMemData, + MEM_DATA_RELEASE_CALL_BACK *pMemDataReleaseCallback +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pMemData = pMemData; + pMemDesc->_pMemDataReleaseCallback = pMemDataReleaseCallback; +} + +/*! + * @brief Return memory descriptor volatile attribute + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Volatile or not + */ +NvBool memdescGetVolatility +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + NvBool bVolatile = NV_FALSE; + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + if (pMemDesc->_addressSpace == ADDR_SYSMEM) + { + bVolatile = (memdescGetGpuCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED); + } + else + { + NV_ASSERT(pMemDesc->_addressSpace == ADDR_FBMEM); + } + + return bVolatile; +} + +/*! + * @brief Quick check whether the memory is contiguous or not + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * + * @returns NV_TRUE if contiguous + */ +NvBool memdescGetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation) +{ + return !!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); +} + +/*! + * @brief Detailed Check whether the memory is contiguous or not + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * + * @returns NV_TRUE if contiguous + */ +NvBool memdescCheckContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation) +{ + NvU32 i; + + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS)) + { + for (i = 0; i < (pMemDesc->PageCount - 1); i++) + { + if ((memdescGetPte(pMemDesc, addressTranslation, i) + RM_PAGE_SIZE) != + memdescGetPte(pMemDesc, addressTranslation, i + 1)) + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/*! + * @brief Set the contiguity of the memory descriptor + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] isContiguous Contiguity value + * + * @returns nothing + */ +void memdescSetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvBool isContiguous) +{ + NV_ASSERT_OR_RETURN_VOID(pMemDesc); + + if (isContiguous) + pMemDesc->_flags |= MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; + else + pMemDesc->_flags &= ~MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; +} + +/*! + * @brief Get the address space of the memory descriptor + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * + * @returns addresspace + */ +NV_ADDRESS_SPACE memdescGetAddressSpace(PMEMORY_DESCRIPTOR pMemDesc) +{ + NV_ASSERT_OR_RETURN(pMemDesc != NULL, 0); + return pMemDesc->_addressSpace; +} + +/*! + * @brief Get page size + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] addressTranslation Address translation identifier + * + * @returns Current page size. + */ +NvU32 memdescGetPageSize +( + PMEMORY_DESCRIPTOR pMemDesc, + ADDRESS_TRANSLATION addressTranslation +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_pageSize; +} + +/*! + * @brief Set page size + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] addressTranslation Address translation identifier + * @param[in] pteKind New PTE kind + * + * @returns nothing + */ +void memdescSetPageSize +( + PMEMORY_DESCRIPTOR pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU32 pageSize +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pageSize = pageSize; +} + +/*! + * @brief Get the Root memory descriptor. + * + * This can also be used to get the root offset as well. + * + * Root memory descriptor is the top level memory descriptor with no parent, + * from which this memory descriptor was derived + * + * @param[in] pMemDesc Pointer to memory descriptor. + * @param[out] pRootOffset Pointer to the root offset parameter. + * + * @returns the Root memory descriptor. + */ +PMEMORY_DESCRIPTOR memdescGetRootMemDesc +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU64 *pRootOffset +) +{ + NvU64 offset = 0; + + // Find the top-level parent descriptor + while (pMemDesc->_pParentDescriptor) + { + // Sanity check, None of the child descriptors should be allocated + NV_ASSERT(!pMemDesc->Allocated); + offset += pMemDesc->subMemOffset; + pMemDesc = pMemDesc->_pParentDescriptor; + } + + if (pRootOffset) + { + *pRootOffset = offset; + } + + return pMemDesc; +} +/*! + * @brief Sets the CUSTOM_HEAP flag of MEMDESC. + * + * Since we have ACR region, Memory descriptor can be allocated in ACR region + * in that case, we need to set this flag since we are using the custom ACR HEAP + * + * @param[in] pMemDesc Pointer to memory descriptor. + * + * @returns void. + */ +void +memdescSetCustomHeap +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + NV_ASSERT(0); +} + +/*! + * @brief Returns the ACR CUSTOM_HEAP flag. + * + * + * @param[in] pMemDesc Pointer to memory descriptor. + * + * @returns NV_TRUE if flag MEMDESC_FLAGS_CUSTOM_HEAP_ACR is SET. + */ +NvBool +memdescGetCustomHeap +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + return NV_FALSE; +} + +PIOVAMAPPING memdescGetIommuMap +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU32 iovaspaceId +) +{ + PIOVAMAPPING pIommuMap = pMemDesc->_pIommuMappings; + while (pIommuMap != NULL) + { + if (pIommuMap->iovaspaceId == iovaspaceId) + { + break; + } + + pIommuMap = pIommuMap->pNext; + } + + return pIommuMap; +} + +NV_STATUS memdescAddIommuMap +( + PMEMORY_DESCRIPTOR pMemDesc, + PIOVAMAPPING pIommuMap +) +{ + NV_ASSERT_OR_RETURN((pMemDesc->_pIommuMappings == NULL) || + (!memdescIsSubMemoryMemDesc(pMemDesc)), NV_ERR_INVALID_ARGUMENT); + + // + // Only root physical memdescs can have multiple IOMMU mappings. + // Submemdescs can only have one, and the list linkage is used + // instead to link it as a child of the root IOMMU mapping, so we + // don't want to overwrite that here. + // + if (!memdescIsSubMemoryMemDesc(pMemDesc)) + { + pIommuMap->pNext = pMemDesc->_pIommuMappings; + } + + pMemDesc->_pIommuMappings = pIommuMap; + + return NV_OK; +} + +void memdescRemoveIommuMap +( + PMEMORY_DESCRIPTOR pMemDesc, + PIOVAMAPPING pIommuMap +) +{ + // + // Only root physical memdescs can have multiple IOMMU mappings. + // Submemdescs can only have one, and the list linkage is used + // instead to link it as a child of the root IOMMU mapping, so we + // don't want to overwrite that here. + // + if (!memdescIsSubMemoryMemDesc(pMemDesc)) + { + PIOVAMAPPING *ppTmpIommuMap = &pMemDesc->_pIommuMappings; + while ((*ppTmpIommuMap != NULL) && (*ppTmpIommuMap != pIommuMap)) + { + ppTmpIommuMap = &(*ppTmpIommuMap)->pNext; + } + + if (*ppTmpIommuMap != NULL) + { + *ppTmpIommuMap = pIommuMap->pNext; + + } + else + { + NV_ASSERT(*ppTmpIommuMap != NULL); + } + } + else if (pMemDesc->_pIommuMappings == pIommuMap) + { + pMemDesc->_pIommuMappings = NULL; + } + else + { + // + // Trying to remove a submemory mapping that doesn't belong to this + // descriptor? + // + NV_ASSERT(pMemDesc->_pIommuMappings == pIommuMap); + } +} + +NV_STATUS memdescMapIommu +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU32 iovaspaceId +) +{ +#if (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_MODS) && !NVCPU_IS_ARM + if (iovaspaceId != NV_IOVA_DOMAIN_NONE) + { + NV_ADDRESS_SPACE addrSpace = memdescGetAddressSpace(pMemDesc); + OBJGPU *pMappingGpu = gpumgrGetGpuFromId(iovaspaceId); + PMEMORY_DESCRIPTOR pRootMemDesc = memdescGetRootMemDesc(pMemDesc, NULL); + if ((addrSpace == ADDR_SYSMEM) || gpumgrCheckIndirectPeer(pMappingGpu, pRootMemDesc->pGpu)) + { + NV_STATUS status; + OBJIOVASPACE *pIOVAS = iovaspaceFromId(iovaspaceId); + NV_ASSERT_OR_RETURN(pIOVAS, NV_ERR_OBJECT_NOT_FOUND); + + status = iovaspaceAcquireMapping(pIOVAS, pMemDesc); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + } + } +#endif + + // + // Verify that the final physical addresses are indeed addressable by the + // GPU. We only need to do this for internally allocated sysmem (RM owned) + // as well as externally allocated/mapped sysmem. Note, addresses for peer + // (P2P mailbox registers) BARs are actually not handled by the GMMU and + // support a full 64-bit address width, hence validation is not needed. + // + if ((pMemDesc->Allocated || + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM) || + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) && + memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) + { + // TODO This should look up the GPU corresponding to the IOVAS instead. + OBJGPU *pGpu = pMemDesc->pGpu; + RmPhysAddr dmaWindowStartAddr = gpuGetDmaStartAddress(pGpu); + RmPhysAddr dmaWindowEndAddr = gpuGetDmaEndAddress_HAL(pGpu); + RmPhysAddr physAddr; + + if (memdescGetContiguity(pMemDesc, AT_GPU)) + { + physAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + if ((physAddr < dmaWindowStartAddr) || + (physAddr + pMemDesc->Size - 1 > dmaWindowEndAddr)) + { + NV_PRINTF(LEVEL_ERROR, + "0x%llx-0x%llx is not addressable by GPU 0x%x [0x%llx-0x%llx]\n", + physAddr, physAddr + pMemDesc->Size - 1, + pGpu->gpuId, dmaWindowStartAddr, dmaWindowEndAddr); + memdescUnmapIommu(pMemDesc, iovaspaceId); + return NV_ERR_INVALID_ADDRESS; + } + } + else + { + NvU32 i; + for (i = 0; i < pMemDesc->PageCount; i++) + { + physAddr = memdescGetPte(pMemDesc, AT_GPU, i); + if ((physAddr < dmaWindowStartAddr) || + (physAddr + (RM_PAGE_SIZE - 1) > dmaWindowEndAddr)) + { + NV_PRINTF(LEVEL_ERROR, + "0x%llx is not addressable by GPU 0x%x [0x%llx-0x%llx]\n", + physAddr, pGpu->gpuId, dmaWindowStartAddr, + dmaWindowEndAddr); + memdescUnmapIommu(pMemDesc, iovaspaceId); + return NV_ERR_INVALID_ADDRESS; + } + } + } + } + + return NV_OK; +} + +void memdescUnmapIommu +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU32 iovaspaceId +) +{ +#if (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_MODS) && !NVCPU_IS_ARM + PIOVAMAPPING pIovaMapping; + OBJIOVASPACE *pIOVAS; + + if (iovaspaceId == NV_IOVA_DOMAIN_NONE) + return; + + pIovaMapping = memdescGetIommuMap(pMemDesc, iovaspaceId); + NV_ASSERT(pIovaMapping); + + pIOVAS = iovaspaceFromMapping(pIovaMapping); + iovaspaceReleaseMapping(pIOVAS, pIovaMapping); +#endif +} + +void memdescCheckSubDevicePageSizeConsistency +( + OBJGPU *pGpu, + PMEMORY_DESCRIPTOR pMemDesc, + OBJVASPACE *pVAS, + NvU64 pageSize, + NvU64 pageOffset +) +{ + NvU64 tempPageSize, tempPageOffset; + PMEMORY_DESCRIPTOR pTempMemDesc = NULL; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pTempMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + tempPageSize = memdescGetPageSize(pTempMemDesc, VAS_ADDRESS_TRANSLATION(pVAS)); + tempPageOffset = memdescGetPhysAddr(pTempMemDesc, VAS_ADDRESS_TRANSLATION(pVAS), 0) & (tempPageSize - 1); + + // Assert if inconsistent + NV_ASSERT(pageSize == tempPageSize); + NV_ASSERT(pageOffset == tempPageOffset); + SLI_LOOP_END +} + +void memdescCheckSubDeviceMemContiguityConsistency +( + OBJGPU *pGpu, + PMEMORY_DESCRIPTOR pMemDesc, + OBJVASPACE *pVAS, + NvBool bIsMemContiguous +) +{ + NvBool bTempIsMemContiguous = NV_FALSE; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + bTempIsMemContiguous = memdescGetContiguity(memdescGetMemDescFromGpu(pMemDesc, pGpu), VAS_ADDRESS_TRANSLATION(pVAS)); + // Assert if inconsistent + NV_ASSERT(bIsMemContiguous == bTempIsMemContiguous); + SLI_LOOP_END +} + +/* @brief Get GPA(guest physical addresses) for given GPU physical addresses. + * + * @param[in] pGpu GPU for which GPAs are needed + * @param[in] pageCount Size of array. Should be 1 for contiguous mappings + * @param[in/out] pGpa Array of GPU PAs to be converted to guest PAs + * + * @returns NV_STATUS + */ + +/*! + * @brief Override the registry INST_LOC two-bit enum to an aperture (list) + cpu attr. + * + * Caller must set initial default values. + */ +void +memdescOverrideInstLocList +( + NvU32 instLoc, // NV_REG_STR_RM_INST_LOC + const char *name, + const NV_ADDRESS_SPACE **ppAllocList, + NvU32 *pCpuMappingAttr +) +{ + switch (instLoc) + { + case NV_REG_STR_RM_INST_LOC_COH: + NV_PRINTF(LEVEL_INFO, "using coh system memory for %s\n", name); + *ppAllocList = ADDRLIST_SYSMEM_ONLY; + *pCpuMappingAttr = NV_MEMORY_CACHED; + break; + case NV_REG_STR_RM_INST_LOC_NCOH: + NV_PRINTF(LEVEL_INFO, "using ncoh system memory for %s\n", name); + *ppAllocList = ADDRLIST_SYSMEM_ONLY; + *pCpuMappingAttr = NV_MEMORY_UNCACHED; + break; + case NV_REG_STR_RM_INST_LOC_VID: + NV_PRINTF(LEVEL_INFO, "using video memory for %s\n", name); + *ppAllocList = ADDRLIST_FBMEM_ONLY; + *pCpuMappingAttr = NV_MEMORY_WRITECOMBINED; + break; + case NV_REG_STR_RM_INST_LOC_DEFAULT: + default: + // Do not update parameters + break; + } +} + +/*! + * @brief Override wrapper for callers needed an aperture + */ +void +memdescOverrideInstLoc +( + NvU32 instLoc, + const char *name, + NV_ADDRESS_SPACE *pAddrSpace, + NvU32 *pCpuMappingAttr +) +{ + const NV_ADDRESS_SPACE *pAllocList = NULL; + + memdescOverrideInstLocList(instLoc, name, &pAllocList, pCpuMappingAttr); + if (pAllocList != NULL) + *pAddrSpace = pAllocList[0]; +} +/*! +* @brief override physical address width +* +* address width to be override in bits. +* @param[in] pGpu +* @param[in] pMemDesc Memory descriptor to update +* @param[in] addresswidth Offset to refer to +* +* @returns nothing +*/ +void +memdescOverridePhysicalAddressWidthWindowsWAR +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 addressWidth +) +{ + return; +} + +/*! +* @brief Register MEMDESC to GSP +* Life of the registration: until memdescDeregisterFromGSP is called, +* always occurs when the memory is freed. +* Have argument as pMemory*; Move to NVOC +* +* @param[in] pGpu +* @param[in] hClient NvHandle +* @param[in] hDevice NvHandle +* @param[in] hMemory NvHandle +* +* @returns NV_STATUS +*/ +NV_STATUS +memdescRegisterToGSP +( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle hParent, + NvHandle hMemory +) +{ + NV_STATUS status = NV_OK; + Memory *pMemory = NULL; + RsResourceRef *pMemoryRef = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + // Nothing to do without GSP + if (!IS_GSP_CLIENT(pGpu)) + { + return NV_OK; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, serverutilGetResourceRef(hClient, hMemory, &pMemoryRef)); + + pMemory = dynamicCast(pMemoryRef->pResource, Memory); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemory != NULL, NV_ERR_INVALID_OBJECT); + + pMemDesc = pMemory->pMemDesc; + + // Check: memory already registered + if ((pMemDesc->_flags & MEMDESC_FLAGS_REGISTERED_TO_GSP) != 0) + { + return NV_OK; + } + + // Check: no subdevice memDescs + NV_CHECK_OR_RETURN(LEVEL_ERROR, + !memdescHasSubDeviceMemDescs(pMemDesc), + NV_ERR_INVALID_STATE); + + // Check: SYSMEM only + NV_CHECK_OR_RETURN(LEVEL_ERROR, + memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM, + NV_ERR_INVALID_STATE); + + NvU32 os02Flags = 0; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + RmDeprecatedConvertOs32ToOs02Flags(pMemory->Attr, + pMemory->Attr2, + pMemory->Flags, + &os02Flags)); + NV_RM_RPC_ALLOC_MEMORY(pGpu, + hClient, + hParent, + hMemory, + NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + os02Flags, + pMemDesc, + status); + + if (status == NV_OK) + { + // Mark memory as registered in GSP + pMemDesc->_flags |= MEMDESC_FLAGS_REGISTERED_TO_GSP; + } + + return status; +} + + +/*! +* @brief Deregister MEMDESC from GSP +* Is always called when the memory is freed. +* Have argument as pMemory*; Move to NVOC +* +* @param[in] pGpu +* @param[in] hClient NvHandle +* @param[in] hParent NvHandle +* @param[in] hMemory NvHandle +* +* @returns NV_STATUS +*/ +NV_STATUS +memdescDeregisterFromGSP +( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle hParent, + NvHandle hMemory +) +{ + NV_STATUS status = NV_OK; + Memory *pMemory = NULL; + RsResourceRef *pMemoryRef = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + // Nothing to do without GSP + if ((pGpu == NULL) || + !IS_GSP_CLIENT(pGpu)) + { + return NV_OK; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, serverutilGetResourceRef(hClient, hMemory, &pMemoryRef)); + + pMemory = dynamicCast(pMemoryRef->pResource, Memory); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemory != NULL, NV_ERR_INVALID_OBJECT); + + pMemDesc = pMemory->pMemDesc; + + // Nothing to do if memory is not registered to GSP + if ((pMemDesc == NULL) || + (pMemDesc->_flags & MEMDESC_FLAGS_REGISTERED_TO_GSP) == 0) + { + return NV_OK; + } + + NV_RM_RPC_FREE(pGpu, + hClient, + hParent, + hMemory, + status); + + if (status == NV_OK) + { + // Mark memory as not registered in GSP + pMemDesc->_flags &= ~MEMDESC_FLAGS_REGISTERED_TO_GSP; + } + + return status; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c new file mode 100644 index 0000000..1dfbd59 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c @@ -0,0 +1,830 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/heap_base.h" +#include "gpu/mem_mgr/mem_utils.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "os/nv_memory_type.h" +#include "core/locks.h" + +/* ------------------------ Private functions --------------------------------------- */ + +/*! + * @brief This utility routine helps in determining the appropriate + * memory transfer technique to be used + */ +static TRANSFER_TYPE +memmgrGetMemTransferType +( + MemoryManager *pMemoryManager +) +{ + return TRANSFER_TYPE_PROCESSOR; +} + +/*! + * @brief This function is used for copying data b/w two memory regions + * using the specified memory transfer technique. Both memory regions + * can be in the same aperture or in different apertures. + * + * @param[in] pDstInfo TRANSFER_SURFACE info for destination region + * @param[in] pSrcInfo TRANSFER_SURFACE info for source region + * @param[in] size Size in bytes of the memory transfer + * @param[in] transferType Memory transfer technique to be used + * @param[in] flags Flags + */ +static NV_STATUS +memmgrMemCopyWithTransferType +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + TRANSFER_SURFACE *pSrcInfo, + NvU32 size, + TRANSFER_TYPE transferType, + NvU32 flags +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU8 *pSrc; + NvU8 *pDst; + + // Sanitize the input + NV_ASSERT_OR_RETURN(pDstInfo != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pSrcInfo != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDstInfo->pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pSrcInfo->pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(!memdescDescIsEqual(pDstInfo->pMemDesc, pSrcInfo->pMemDesc), + NV_ERR_INVALID_ARGUMENT); + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + pDst = memdescMapInternal(pGpu, pDstInfo->pMemDesc, TRANSFER_FLAGS_NONE); + NV_ASSERT_OR_RETURN(pDst != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + pSrc = memdescMapInternal(pGpu, pSrcInfo->pMemDesc, TRANSFER_FLAGS_NONE); + if (pSrc == NULL) + { + memdescUnmapInternal(pGpu, pDstInfo->pMemDesc, 0); + NV_ASSERT_OR_RETURN(0, NV_ERR_INSUFFICIENT_RESOURCES); + } + + portMemCopy(pDst + pDstInfo->offset, size, pSrc + pSrcInfo->offset, size); + + memdescUnmapInternal(pGpu, pSrcInfo->pMemDesc, TRANSFER_FLAGS_NONE); + memdescUnmapInternal(pGpu, pDstInfo->pMemDesc, flags); + break; + case TRANSFER_TYPE_GSP_DMA: + NV_PRINTF(LEVEL_INFO, "Add call to GSP DMA task\n"); + break; + case TRANSFER_TYPE_CE: + NV_PRINTF(LEVEL_INFO, "Add call to CE\n"); + break; + } + + return NV_OK; +} + +/*! + * @brief This function is used for setting a memory region to a constant state + * using a specified memory transfer technique + * + * @param[in] pDstInfo TRANSFER_SURFACE info for destination region + * @param[in] value Value to be written to the region + * @param[in] size Size in bytes of the memory to be initialized + * @param[in] transferType Memory transfer technique to be used + * @param[in] flags Flags + */ +static NV_STATUS +memmgrMemSetWithTransferType +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + NvU32 value, + NvU32 size, + TRANSFER_TYPE transferType, + NvU32 flags +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU8 *pDst; + + // Sanitize the input + NV_ASSERT_OR_RETURN(pDstInfo != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDstInfo->pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(size > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDstInfo->offset + size <= pDstInfo->pMemDesc->Size, NV_ERR_INVALID_ARGUMENT); + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + pDst = memdescMapInternal(pGpu, pDstInfo->pMemDesc, TRANSFER_FLAGS_NONE); + NV_ASSERT_OR_RETURN(pDst != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + + portMemSet(pDst + pDstInfo->offset, value, size); + + memdescUnmapInternal(pGpu, pDstInfo->pMemDesc, flags); + break; + case TRANSFER_TYPE_GSP_DMA: + NV_PRINTF(LEVEL_INFO, "Add call to GSP DMA task\n"); + break; + case TRANSFER_TYPE_CE: + NV_PRINTF(LEVEL_INFO, "Add call to CE\n"); + break; + } + + return NV_OK; +} + +/*! + * @brief This function is used for writing data placed in a caller passed buffer + * to a given memory region using the specified memory transfer technique + * + * @param[in] pDstInfo TRANSFER_SURFACE info for the destination region + * @param[in] pBuf Buffer allocated by caller + * @param[in] size Size in bytes of the buffer + * @param[in] transferType Memory transfer technique to be used + * @param[in] flags Flags + */ +static NV_STATUS +memmgrMemWriteWithTransferType +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + void *pBuf, + NvU64 size, + TRANSFER_TYPE transferType, + NvU32 flags +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU8 *pDst; + NvU8 *pMapping = memdescGetKernelMapping(pDstInfo->pMemDesc); + + // Sanitize the input + NV_ASSERT_OR_RETURN(pDstInfo != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDstInfo->pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pBuf != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(size > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDstInfo->offset + size <= pDstInfo->pMemDesc->Size, NV_ERR_INVALID_ARGUMENT); + + if (pMapping != NULL) + { + portMemCopy(pMapping + pDstInfo->offset, size, pBuf, size); + return NV_OK; + } + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + pDst = memdescMapInternal(pGpu, pDstInfo->pMemDesc, TRANSFER_FLAGS_NONE); + NV_ASSERT_OR_RETURN(pDst != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + + portMemCopy(pDst + pDstInfo->offset, size, pBuf, size); + + memdescUnmapInternal(pGpu, pDstInfo->pMemDesc, flags); + break; + case TRANSFER_TYPE_GSP_DMA: + NV_PRINTF(LEVEL_INFO, "Add call to GSP DMA task\n"); + break; + case TRANSFER_TYPE_CE: + NV_PRINTF(LEVEL_INFO, "Add call to CE\n"); + break; + } + + return NV_OK; +} + +/*! + * @brief This function is used for reading specified number of bytes from + * a source memory region into a caller passed buffer using a specified + * memory transfer technique + * + * @param[in] pSrcInfo TRANSFER_SURFACE info for the source region + * @param[in] pBuf Caller allocated buffer + * @param[in] size Size in bytes of the buffer + * @param[in] transferType Memory transfer technique to be used + * @param[in] flags Flags + */ +static NV_STATUS +memmgrMemReadWithTransferType +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pSrcInfo, + void *pBuf, + NvU64 size, + TRANSFER_TYPE transferType, + NvU32 flags +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU8 *pSrc; + NvU8 *pMapping = memdescGetKernelMapping(pSrcInfo->pMemDesc); + + + // Sanitize the input + NV_ASSERT_OR_RETURN(pSrcInfo != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pSrcInfo->pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pBuf != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(size > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pSrcInfo->offset + size <= pSrcInfo->pMemDesc->Size, NV_ERR_INVALID_ARGUMENT); + + if (pMapping != NULL) + { + portMemCopy(pBuf, size, pMapping + pSrcInfo->offset, size); + return NV_OK; + } + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + pSrc = memdescMapInternal(pGpu, pSrcInfo->pMemDesc, TRANSFER_FLAGS_NONE); + NV_ASSERT_OR_RETURN(pSrc != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + + portMemCopy(pBuf, size, pSrc + pSrcInfo->offset, size); + + memdescUnmapInternal(pGpu, pSrcInfo->pMemDesc, 0); + break; + case TRANSFER_TYPE_GSP_DMA: + NV_PRINTF(LEVEL_INFO, "Add call to GSP DMA task\n"); + break; + case TRANSFER_TYPE_CE: + NV_PRINTF(LEVEL_INFO, "Add call to CE\n"); + break; + } + + return NV_OK; +} + +/* ------------------------ Public functions --------------------------------------- */ + +NvU64 memUtilsLeastCommonAlignment(NvU64 align1, NvU64 align2) +{ + NvU64 a, b; // For Euclid's algorithm + NvU64 lcm; // Least Common Multiple of align1 and align2 + NvU64 maxAlignment = NV_U64_MAX; + + // WOLOG, make sure align1 >= align2. + // + if (align2 > align1) + { + NvU64 tmp = align1; + align1 = align2; + align2 = tmp; + } + + // If align2 is 0, return min(align1, maxAlignment) + // + if (align2 == 0) + { + return align1 < maxAlignment ? align1 : maxAlignment; + } + + // Use Euclid's algorithm (GCD(a, b) = GCD(b, a % b)) to find the + // GCD of the two alignments, and use the GCD to find the LCM. + // + a = align1; + b = align2; + while (b != 0) + { + NvU64 old_a = a; + a = b; + b = old_a % b; + NV_ASSERT(a > b); // Ensure termination. Should never fail. + } + lcm = align1 * (align2 / a); // May overflow + + // Return min(lcm, maxAlignment). Also return maxAlignment if the + // lcm calculation overflowed, since that means it must have been + // much bigger than maxAlignment. + // + if (lcm > maxAlignment || lcm < align1 || + 0 != (lcm % align1) || 0 != (lcm % align2)) + { + NV_CHECK_FAILED(LEVEL_ERROR, "Alignment limit exceeded"); + return maxAlignment; + } + return lcm; +} + +void memUtilsInitFBAllocInfo +( + NV_MEMORY_ALLOCATION_PARAMS *pAllocParams, + FB_ALLOC_INFO *pFbAllocInfo, + NvHandle hClient, + NvHandle hDevice +) +{ + pFbAllocInfo->pageFormat->type = pAllocParams->type; + pFbAllocInfo->owner = pAllocParams->owner; + pFbAllocInfo->hwResId = 0; + pFbAllocInfo->pad = 0; + pFbAllocInfo->alignPad = 0; + pFbAllocInfo->height = pAllocParams->height; + pFbAllocInfo->width = pAllocParams->width; + pFbAllocInfo->pitch = pAllocParams->pitch; + pFbAllocInfo->size = pAllocParams->size; + pFbAllocInfo->origSize = pAllocParams->size; + pFbAllocInfo->adjustedSize = pAllocParams->size; + pFbAllocInfo->offset = ~0; + pFbAllocInfo->pageFormat->flags = pAllocParams->flags; + pFbAllocInfo->pageFormat->attr = pAllocParams->attr; + pFbAllocInfo->retAttr = pAllocParams->attr; + pFbAllocInfo->pageFormat->attr2 = pAllocParams->attr2; + pFbAllocInfo->retAttr2 = pAllocParams->attr2; + pFbAllocInfo->format = pAllocParams->format; + pFbAllocInfo->comprCovg = pAllocParams->comprCovg; + pFbAllocInfo->zcullCovg = 0; + pFbAllocInfo->ctagOffset = pAllocParams->ctagOffset; + pFbAllocInfo->bIsKernelAlloc = NV_FALSE; + pFbAllocInfo->internalflags = 0; + pFbAllocInfo->hClient = hClient; + pFbAllocInfo->hDevice = hDevice; + + if ((pAllocParams->flags & NVOS32_ALLOC_FLAGS_ALIGNMENT_HINT) || + (pAllocParams->flags & NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE)) + pFbAllocInfo->align = pAllocParams->alignment; + else + pFbAllocInfo->align = RM_PAGE_SIZE; + + if (pAllocParams->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + pFbAllocInfo->offset = pAllocParams->offset; + pFbAllocInfo->desiredOffset = pAllocParams->offset; + } +} + +/*! + * @brief This function is used for copying data b/w two memory regions + * Both memory regions can be in the same aperture of different apertures + * + * @param[in] pDstInfo TRANSFER_SURFACE info for destination region + * @param[in] pSrcInfo TRANSFER_SURFACE info for source region + * @param[in] size Size in bytes of the memory transfer + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemCopy_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + TRANSFER_SURFACE *pSrcInfo, + NvU32 size, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + + return memmgrMemCopyWithTransferType(pMemoryManager, pDstInfo, pSrcInfo, + size, transferType, flags); +} + +/*! + * @brief This function is used for setting a memory region to a constant state + * + * @param[in] pDstInfo TRANSFER_SURFACE info for the destination region + * @param[in] value Value to be written to the region + * @param[in] size Size in bytes of the memory to be initialized + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemSet_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + NvU32 value, + NvU32 size, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + + return memmgrMemSetWithTransferType(pMemoryManager, pDstInfo, value, + size, transferType, flags); +} + +/*! + * @brief This function is used for setting a memory region to a constant state + * + * @param[in] pMemDesc Memory descriptor to end transfer to + * @param[in] value Value to be written to the region + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemDescMemSet_IMPL +( + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 value, + NvU32 flags +) +{ + TRANSFER_SURFACE transferSurface = {.offset = 0, .pMemDesc = pMemDesc}; + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + + return memmgrMemSetWithTransferType(pMemoryManager, &transferSurface, value, + (NvU32)memdescGetSize(pMemDesc), + transferType, flags); +} + +/*! + * @brief This function is used for writing data placed in a user buffer + * to a given memory region + * + * @param[in] pDstInfo TRANSFER_SURFACE info for the destination region + * @param[in] pBuf Buffer allocated by caller + * @param[in] size Size in bytes of the buffer + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemWrite_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + void *pBuf, + NvU64 size, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + + return memmgrMemWriteWithTransferType(pMemoryManager, pDstInfo, pBuf, + size, transferType, flags); +} + +/*! + * @brief This function is used for reading specified number of bytes from + * a source memory region into a caller passed buffer + * + * @param[in] pSrcInfo TRANSFER_SURFACE info for the source region + * @param[in] pBuf Caller allocated buffer + * @param[in] size Size in bytes of the buffer + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemRead_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pSrcInfo, + void *pBuf, + NvU64 size, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + + return memmgrMemReadWithTransferType(pMemoryManager, pSrcInfo, pBuf, + size, transferType, flags); +} + +/*! + * @brief This helper function can be used to begin transfers + * + * @param[in] pTransferInfo Transfer information + * @param[in] shadowBufSize Size of allocated shadow buffer in case of shadow mapping + * @param[in] flags Flags + */ +NvU8 * +memmgrMemBeginTransfer_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pTransferInfo, + NvU64 shadowBufSize, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + MEMORY_DESCRIPTOR *pMemDesc = pTransferInfo->pMemDesc; + NvU64 offset = pTransferInfo->offset; + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU8 *pPtr = NULL; + NvU64 memSz = 0; + + NV_ASSERT_OR_RETURN(pMemDesc != NULL, NULL); + NV_ASSERT_OR_RETURN((memSz = memdescGetSize(pMemDesc)) >= shadowBufSize, NULL); + NV_ASSERT_OR_RETURN(memdescGetKernelMapping(pMemDesc) == NULL, NULL); + + memSz = shadowBufSize == 0 ? memSz : shadowBufSize; + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + NV_ASSERT_OR_RETURN((pPtr = memdescMapInternal(pGpu, pMemDesc, flags)) != NULL, NULL); + pPtr = &pPtr[offset]; + break; + case TRANSFER_TYPE_GSP_DMA: + case TRANSFER_TYPE_CE: + if (flags & TRANSFER_FLAGS_SHADOW_ALLOC) + { + NV_ASSERT_OR_RETURN((pPtr = portMemAllocNonPaged(memSz)), NULL); + if (flags & TRANSFER_FLAGS_SHADOW_INIT_MEM) + { + NV_ASSERT_OK(memmgrMemRead(pMemoryManager, pTransferInfo, pPtr, memSz, flags)); + } + } + break; + default: + NV_ASSERT(0); + } + memdescSetKernelMapping(pMemDesc, pPtr); + return pPtr; +} + +/*! + * @brief This helper function can be used to end transfers + * + * @param[in] pTransferInfo Transfer information + * @param[in] shadowBufSize Size of allocated shadow buffer in case of shadow mapping + * @param[in] flags Flags + */ +void +memmgrMemEndTransfer_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pTransferInfo, + NvU64 shadowBufSize, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + MEMORY_DESCRIPTOR *pMemDesc = pTransferInfo->pMemDesc; + NvU64 offset = pTransferInfo->offset; + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU64 memSz = 0; + NvU8 *pMapping = memdescGetKernelMapping(pMemDesc); + + NV_ASSERT_OR_RETURN_VOID(pMemDesc != NULL); + NV_ASSERT_OR_RETURN_VOID((memSz = memdescGetSize(pMemDesc)) >= (shadowBufSize + offset) ); + + memSz = shadowBufSize == 0 ? memSz : shadowBufSize; + + memdescSetKernelMapping(pMemDesc, NULL); + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + memdescUnmapInternal(pGpu, pMemDesc, flags); + return; + case TRANSFER_TYPE_GSP_DMA: + case TRANSFER_TYPE_CE: + if (pMapping != NULL) + { + NV_ASSERT_OK(memmgrMemWrite(pMemoryManager, pTransferInfo, pMapping, memSz, flags)); + portMemFree(pMapping); + } + return; + default: + NV_ASSERT(0); + } + return; +} + +/*! + * @brief Helper function that ends transfers to a memdesc with default offset/size + * + * @param[in] pMemDesc Memory descriptor to end transfer to + * @param[in] flags Flags + */ +void +memmgrMemDescEndTransfer_IMPL +( + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + TRANSFER_SURFACE transferSurface = {.offset = 0, .pMemDesc = pMemDesc}; + memmgrMemEndTransfer(pMemoryManager, &transferSurface, memdescGetSize(pMemDesc), flags); +} + +/*! + * @brief Helper function that begins transfers to a memdesc with default offset/size + * + * @param[in] pMemDesc Memory descriptor to begin transfer to + * @param[in] flags Flags + */ +NvU8 * +memmgrMemDescBeginTransfer_IMPL +( + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + TRANSFER_SURFACE transferSurface = {.offset = 0, .pMemDesc = pMemDesc}; + return memmgrMemBeginTransfer(pMemoryManager, &transferSurface, memdescGetSize(pMemDesc), flags); +} + +/*! + * @brief This function is used to allocate common resources across memory + * classes, and must be used before memory-specific resource alloc. + * + * @param[in/out] pAllocRequest User-provided alloc request struct + * @param[in/out] pFbAllocInfo Initialized FB_ALLOC_INFO struct to alloc + */ +NV_STATUS +memmgrAllocResources_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + FB_ALLOC_INFO *pFbAllocInfo +) +{ + NV_STATUS status = NV_OK; + NvU64 alignment = 0; + NV_MEMORY_ALLOCATION_PARAMS *pVidHeapAlloc = pAllocRequest->pUserParams; + NV_ADDRESS_SPACE addrSpace = memmgrAllocGetAddrSpace(pMemoryManager, pVidHeapAlloc->flags, + pFbAllocInfo->retAttr); + + // IRQL TEST: must be running at equivalent of passive-level + IRQL_ASSERT_AND_RETURN(!osIsRaisedIRQL()); + + // + // Check for valid size. + // + if (pVidHeapAlloc->size == 0) + return NV_ERR_INVALID_ARGUMENT; + + // + // Ensure a valid allocation pVidHeapAlloc->type was passed in + // + if (pVidHeapAlloc->type > NVOS32_NUM_MEM_TYPES - 1) + return NV_ERR_INVALID_ARGUMENT; + + if (ADDR_VIRTUAL != addrSpace) + { + // If vidmem not requested explicitly, decide on the physical location. + if (FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _PCI, pFbAllocInfo->retAttr) || + FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _ANY, pFbAllocInfo->retAttr)) + { + if (ADDR_FBMEM == addrSpace) + { + pFbAllocInfo->retAttr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, pFbAllocInfo->retAttr); + } + else + { + pFbAllocInfo->retAttr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, pFbAllocInfo->retAttr); + } + } + } + else // Virtual + { + // Clear location to ANY since virtual does not associate with location. + pFbAllocInfo->retAttr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _ANY, pFbAllocInfo->retAttr); + } + + // call HAL to set resources + status = memmgrSetAllocParameters_HAL(pGpu, pMemoryManager, pFbAllocInfo); + + if (status != NV_OK) + { + // + // Two possibilties: either some attribute was set to REQUIRED, ran out of resources, + // or unaligned address / size was passed down. Free up memory and fail this call. + // heapFree will fix up heap pointers. + // + goto failed; + } + + // + // for fixed allocation check if the alignment needs to adjusted. + // some hardware units request allocation aligned to smaller than + // page sizes which can be handled through alignPad + // + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + // + // is our desired offset suitably aligned? + // if not adjust alignment using alignPad(offset into a page), the + // allocation is page size aligned as required for swizzling. + // + if (pFbAllocInfo->desiredOffset % (pFbAllocInfo->align + 1)) + { + pFbAllocInfo->alignPad = pFbAllocInfo->desiredOffset % (pFbAllocInfo->align + 1); + pFbAllocInfo->desiredOffset -= pFbAllocInfo->alignPad; + } + } + + // + // Refresh search parameters. + // + pFbAllocInfo->adjustedSize = pFbAllocInfo->size - pFbAllocInfo->alignPad; + pVidHeapAlloc->height = pFbAllocInfo->height; + pVidHeapAlloc->pitch = pFbAllocInfo->pitch; + + // + // The api takes alignment-1 (used to be a mask). + // + alignment = pFbAllocInfo->align + 1; + pVidHeapAlloc->alignment = pFbAllocInfo->align + 1; // convert mask to size + + // + // Allow caller to request host page alignment to make it easier + // to move things around with host os VM subsystem + // + if ((pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE) && + (addrSpace == ADDR_FBMEM)) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 hostPageSize = pSys->cpuInfo.hostPageSize; + + // hostPageSize *should* always be set, but.... + if (hostPageSize == 0) + hostPageSize = RM_PAGE_SIZE; + + alignment = memUtilsLeastCommonAlignment(alignment, hostPageSize); + } + + pVidHeapAlloc->alignment = alignment; + pFbAllocInfo->align = alignment - 1; + + return status; + +failed: + + return status; +} + +/*! + * @brief This function is used to create a memory descriptor if needed. + * + * @param[in/out] pAllocRequest User-provided alloc request struct + * @param[in/out] pFbAllocInfo Initialized FB_ALLOC_INFO struct to alloc + * @param[out] ppMemDesc Double pointer to created descriptor + * @param[in] pHeap Heap pointer to store in descriptor + * @param[in] addrSpace Address space identifier + * @param[in] memDescFlags Memory descriptor alloc flags + * @param[out] bAllocedMemDesc NV_TRUE if a descriptor was created + */ +NV_STATUS +memUtilsAllocMemDesc +( + OBJGPU *pGpu, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + FB_ALLOC_INFO *pFbAllocInfo, + MEMORY_DESCRIPTOR **ppMemDesc, + Heap *pHeap, + NV_ADDRESS_SPACE addrSpace, + NvBool bContig, + NvBool *bAllocedMemDesc +) +{ + NV_STATUS status = NV_OK; + + // + // Allocate a memory descriptor if needed. We do this after the fbHwAllocResources() call + // so we have the updated size information. Linear callers like memdescAlloc() can live with + // only having access to the requested size in bytes, but block linear callers really do + // need to allocate after fbAlloc() rounding takes place. + // + if (pAllocRequest->pMemDesc == NULL) + { + NvU64 memDescFlags = MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE; + + // + // Allocate a contig vidmem descriptor now; if needed we'll + // allocate a new noncontig memdesc later + // + status = memdescCreate(&pAllocRequest->pMemDesc, pGpu, pFbAllocInfo->adjustedSize, 0, bContig, + addrSpace, NV_MEMORY_UNCACHED, memDescFlags); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "cannot alloc memDesc!\n"); + return status; + } + + *bAllocedMemDesc = NV_TRUE; + } + + *ppMemDesc = pAllocRequest->pMemDesc; + (*ppMemDesc)->pHeap = pHeap; + + // Set attributes tracked by the memdesc + memdescSetPteKind(*ppMemDesc, pFbAllocInfo->format); + memdescSetHwResId(*ppMemDesc, pFbAllocInfo->hwResId); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c new file mode 100644 index 0000000..4820eeb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c @@ -0,0 +1,161 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "os/os.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/generic_engine.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/client.h" + + +NV_STATUS +genapiConstruct_IMPL +( + GenericEngineApi *pGenericEngineApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RS_ITERATOR it; + OBJGPU *pGpu = GPU_RES_GET_GPU(pGenericEngineApi); + + if (!gpuIsClassSupported(pGpu, pCallContext->pResourceRef->externalClassId)) + return NV_ERR_INVALID_CLASS; + + // + // We allow multiple instances of GenericEngineApi class, however, only want + // to allow a single instance of each external class id type. E.g.: + // GF100_SUBDEVICE_GRAPHICS is allowed alongside GF100_SUBDEVICE_FB. + // + it = clientRefIter(pCallContext->pClient, + pCallContext->pResourceRef->pParentRef, + classId(GenericEngineApi), RS_ITERATE_CHILDREN, NV_TRUE); + + while (clientRefIterNext(pCallContext->pClient, &it)) + { + if (it.pResourceRef->externalClassId == pCallContext->pResourceRef->externalClassId && + it.pResourceRef != pCallContext->pResourceRef) + { + return NV_ERR_STATE_IN_USE; + } + } + + return NV_OK; +} + +void +genapiDestruct_IMPL +( + GenericEngineApi *pGenericEngineApi +) +{ +} + +NV_STATUS +genapiControl_IMPL +( + GenericEngineApi *pGenericEngineApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + + return gpuresControl_IMPL(staticCast(pGenericEngineApi, GpuResource), + pCallContext, pParams); +} + +NV_STATUS +genapiMap_IMPL +( + GenericEngineApi *pGenericEngineApi, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping +) +{ + OBJGPU *pGpu; + NvU32 engineOffset, regSize, regBase; + NvU32 protect; + NV_STATUS rmStatus; + + pGpu = GPU_RES_GET_GPU(pGenericEngineApi); + + // XXX The default should really be more restrictive + protect = NV_PROTECT_READ_WRITE; + + switch (RES_GET_EXT_CLASS_ID(pGenericEngineApi)) + { + default: + return NV_ERR_INVALID_CLASS; + } + + // Get the offset to the engine registers + rmStatus = gpuGetRegBaseOffset_HAL(pGpu, regBase, &engineOffset); + if (rmStatus != NV_OK) + return rmStatus; + + // Round down to nearest 4k page + engineOffset &= ~(0x1000-1); + + // Check the caller is requesting more privilieges than we allow + if (pCpuMapping->pPrivate->protect & ~protect) + { + NV_PRINTF(LEVEL_ERROR, "%s%saccess not allowed on class 0x%x\n", + (pCpuMapping->pPrivate->protect & ~protect) & NV_PROTECT_READABLE ? "Read " : "", + (pCpuMapping->pPrivate->protect & ~protect) & NV_PROTECT_WRITEABLE ? "Write " : "", + RES_GET_EXT_CLASS_ID(pGenericEngineApi)); + + return NV_ERR_PROTECTION_FAULT; + } + + // Create mapping + rmStatus = rmapiMapGpuCommon(staticCast(pGenericEngineApi, RsResource), + pCallContext, + pCpuMapping, + pGpu, + engineOffset, + regSize); + pCpuMapping->processId = osGetCurrentProcess(); + + if (pParams->ppCpuVirtAddr) + *pParams->ppCpuVirtAddr = pCpuMapping->pLinearAddress; + + return rmStatus; +} + +NV_STATUS +genapiGetMapAddrSpace_IMPL +( + GenericEngineApi *pGenericEngineApi, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + if (pAddrSpace) + *pAddrSpace = ADDR_REGMEM; + + return NV_OK; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice.c new file mode 100644 index 0000000..0f83aca --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice.c @@ -0,0 +1,453 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This is a subdevice resource implementation. +* +******************************************************************************/ + +#include "resserv/resserv.h" +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" + +#include "vgpu/rpc.h" +#include "core/locks.h" +#include "rmapi/rs_utils.h" +#include "core/thread_state.h" + +#include "objtmr.h" + +NV_STATUS +subdeviceConstruct_IMPL +( + Subdevice *pSubdevice, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV2080_ALLOC_PARAMETERS *pNv2080AllocParams = pParams->pAllocParams; + OBJGPU *pPrimaryGpu; + OBJGPU *pGpu; + NvU32 subDeviceInst; + NV_STATUS status = NV_OK; + RsClient *pRsClient = pCallContext->pClient; + GpuResource *pSubdevGpuRes = staticCast(pSubdevice, GpuResource); + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pParentRef = pResourceRef->pParentRef; + Device *pDevice = GPU_RES_GET_DEVICE(pSubdevice); + NvU32 i; + Subdevice *pSubdeviceTest; + + if (pNv2080AllocParams == NULL) + subDeviceInst = 0; + else + subDeviceInst = pNv2080AllocParams->subDeviceId; + + // validate subdevice instance + if (gpumgrIsSubDeviceInstanceValid(subDeviceInst) == NV_FALSE) + return NV_ERR_INVALID_CLASS; + + status = gpuGetByRef(pResourceRef->pParentRef, NULL, &pPrimaryGpu); + if (status != NV_OK) + return status; + + // Lookup GPU for subdevice instance + status = gpugrpGetGpuFromSubDeviceInstance(GPU_RES_GET_GPUGRP(pDevice), subDeviceInst, &pGpu); + if (status != NV_OK) + return NV_ERR_INVALID_CLASS; + + // Check if subdevice already allocated + if (subdeviceGetByInstance(pRsClient, RES_GET_HANDLE(pDevice), subDeviceInst, &pSubdeviceTest) == NV_OK) + return NV_ERR_INSUFFICIENT_RESOURCES; + + gpuresSetGpu(pSubdevGpuRes, pGpu, NV_FALSE); + + pSubdevice->pDevice = pDevice; + pSubdevice->deviceInst = pDevice->deviceInst; + pSubdevice->subDeviceInst = subDeviceInst; + pSubdevice->bUpdateTGP = NV_FALSE; + + for (i = 0; i < NV2080_NOTIFIERS_MAXCOUNT; i++) + pSubdevice->notifyActions[i] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + + pSubdevice->hNotifierMemory = NV01_NULL_OBJECT; + pSubdevice->hSemMemory = NV01_NULL_OBJECT; + + { + } + + NV_ASSERT_OK_OR_RETURN(gpuRegisterSubdevice(pGpu, pSubdevice)); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NV_RM_RPC_ALLOC_SUBDEVICE(pPrimaryGpu, pRsClient->hClient, pParentRef->hResource, + pResourceRef->hResource, NV20_SUBDEVICE_0, + subDeviceInst, status); + NV_ASSERT_OK_OR_RETURN(status); + } + + return status; +} + +void +subdevicePreDestruct_IMPL +( + Subdevice *pSubdevice +) +{ + subdeviceResetTGP(pSubdevice); +} + +void +subdeviceDestruct_IMPL +( + Subdevice* pSubdevice +) +{ + CALL_CONTEXT *pCallContext; + RsClient *pRsClient = RES_GET_CLIENT(pSubdevice); + RsResourceRef *pResourceRef = RES_GET_REF(pSubdevice); + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + + if (pSubdevice->bGcoffDisallowed) + { + osClientGcoffDisallowRefcount(pGpu->pOsGpuInfo, NV_FALSE); + } + + LOCK_METER_DATA(FREE_SUBDEVICE, 0, 0, 0); + + // TODO - Call context lookup in dtor can likely be phased out now that we have RES_GET_CLIENT + resGetFreeParams(staticCast(pSubdevice, RsResource), &pCallContext, NULL); + + // check for any pending client's timer notification for this subdevice + if (pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + tmrCancelCallback(pTmr, pSubdevice); + pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + + subdeviceRestoreLockedClock(pSubdevice, pCallContext); + + // Restore GR tick frequency to default. + subdeviceRestoreGrTickFreq(pSubdevice, pCallContext); + + // Remove NVLink error injection mode request + subdeviceReleaseNvlinkErrorInjectionMode(pSubdevice, pCallContext); + + subdeviceReleaseComputeModeReservation(pSubdevice, pCallContext); + +#ifdef DEBUG + NV_ASSERT(pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE); +#endif + + subdeviceUnsetGpuDebugMode(pSubdevice); + subdeviceRestoreWatchdog(pSubdevice); + + if (pResourceRef != NULL && (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))) + { + NV_RM_RPC_FREE(pGpu, pRsClient->hClient, + pResourceRef->pParentRef->hResource, + pResourceRef->hResource, status); + } + + gpuUnregisterSubdevice(pGpu, pSubdevice); +} + +NV_STATUS +subdeviceInternalControlForward_IMPL +( + Subdevice *pSubdevice, + NvU32 command, + void *pParams, + NvU32 size +) +{ + return gpuresInternalControlForward_IMPL(staticCast(pSubdevice, GpuResource), command, pParams, size); +} + +NV_STATUS +subdeviceControlFilter_IMPL(Subdevice *pSubdevice, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams) +{ + return NV_OK; +} + +NV_STATUS +subdeviceGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hSubdevice, + Subdevice **ppSubdevice +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppSubdevice = NULL; + + status = clientGetResourceRef(pClient, hSubdevice, &pResourceRef); + if (status != NV_OK) + return status; + + *ppSubdevice = dynamicCast(pResourceRef->pResource, Subdevice); + + return (*ppSubdevice) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +subdeviceGetByGpu_IMPL +( + RsClient *pClient, + OBJGPU *pGpu, + Subdevice **ppSubdevice +) +{ + Subdevice *pSubdevice = NULL; + OBJGPU *pTmpGpu = NULL; + RS_ITERATOR it; + RsResourceRef *pResourceRef; + + *ppSubdevice = NULL; + + it = clientRefIter(pClient, NULL, classId(Subdevice), RS_ITERATE_DESCENDANTS, NV_TRUE); + while (clientRefIterNext(pClient, &it)) + { + pResourceRef = it.pResourceRef; + pSubdevice = dynamicCast(pResourceRef->pResource, Subdevice); + if (pSubdevice == NULL) + continue; + + pTmpGpu = GPU_RES_GET_GPU(pSubdevice); + + if (pTmpGpu == pGpu) + { + *ppSubdevice = pSubdevice; + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +subdeviceGetByInstance_IMPL +( + RsClient *pClient, + NvHandle hDevice, + NvU32 subDeviceInst, + Subdevice **ppSubdevice +) +{ + RsResourceRef *pDeviceRef; + Subdevice *pSubdevice; + RS_ITERATOR it; + + *ppSubdevice = NULL; + + if (clientGetResourceRefByType(pClient, hDevice, classId(Device), &pDeviceRef) != NV_OK) + return NV_ERR_INVALID_ARGUMENT; + + it = clientRefIter(pClient, pDeviceRef, classId(Subdevice), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(pClient, &it)) + { + pSubdevice = dynamicCast(it.pResourceRef->pResource, Subdevice); + + if (pSubdevice && pSubdevice->subDeviceInst == subDeviceInst) + { + *ppSubdevice = pSubdevice; + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +// **************************************************************************** +// Helper functions +// **************************************************************************** +void +subdeviceUnsetGpuDebugMode_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + if (!pSubdevice->bGpuDebugModeEnabled) + { + return; + } + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + pGpu->bIsDebugModeEnabled = NV_FALSE; +} + +void +subdeviceReleaseComputeModeReservation_IMPL +( + Subdevice *pSubdevice, + CALL_CONTEXT *pCallContext +) +{ + RsClient *pRsClient = pCallContext->pClient; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + // Release the reservation ONLY IF we had the reservation to begin with. Otherwise, + // leave it alone, because someone else has acquired it: + if (pGpu->hComputeModeReservation == pRsClient->hClient) + { + pGpu->hComputeModeReservation = NV01_NULL_OBJECT; + } +} + +// **************************************************************************** +// Deprecated Functions +// **************************************************************************** + +/** + * WARNING: This function is deprecated! Please use subdeviceGetByGpu and + * GPU_RES_SET_THREAD_BC_STATE (if needed to set thread UC state for SLI) + */ +Subdevice * +CliGetSubDeviceInfoFromGpu +( + NvHandle hClient, + OBJGPU *pGpu +) +{ + RsClient *pClient; + NV_STATUS status; + Subdevice *pSubdevice; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NULL; + + status = subdeviceGetByGpu(pClient, pGpu, &pSubdevice); + if (status != NV_OK) + return NULL; + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + return pSubdevice; +} + +/** + * WARNING: This function is deprecated! Please use subdeviceGetByGpu and + * RES_GET_HANDLE + */ +NV_STATUS +CliGetSubDeviceHandleFromGpu +( + NvHandle hClient, + OBJGPU *pGpu, + NvHandle *phSubDevice +) +{ + Subdevice *pSubdevice; + + if (phSubDevice == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if ((pSubdevice = CliGetSubDeviceInfoFromGpu(hClient, pGpu)) == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + *phSubDevice = RES_GET_HANDLE(pSubdevice); + + return NV_OK; +} + +/** + * WARNING: This function is deprecated and use is *strongly* discouraged + * (especially for new code!) + * + * From the function name (CliSetSubDeviceContext) it appears as a simple + * accessor but violates expectations by modifying the SLI BC threadstate (calls + * to GPU_RES_SET_THREAD_BC_STATE). This can be dangerous if not carefully + * managed by the caller. + * + * Instead of using this routine, please use subdeviceGetByHandle then call + * GPU_RES_GET_GPU, RES_GET_HANDLE, GPU_RES_SET_THREAD_BC_STATE as needed. + * + * Note that GPU_RES_GET_GPU supports returning a pGpu for both pDevice, + * pSubdevice, the base pResource type, and any resource that inherits from + * GpuResource. That is, instead of using CliSetGpuContext or + * CliSetSubDeviceContext, please use following pattern to look up the pGpu: + * + * OBJGPU *pGpu = GPU_RES_GET_GPU(pResource or pResourceRef->pResource) + * + * To set the threadstate, please use: + * + * GPU_RES_SET_THREAD_BC_STATE(pResource or pResourceRef->pResource); + */ +NV_STATUS +CliSetSubDeviceContext +( + NvHandle hClient, + NvHandle hSubdevice, + NvHandle *phDevice, + OBJGPU **ppGpu +) +{ + Subdevice *pSubdevice; + RsClient *pClient; + NV_STATUS status; + + if (phDevice != NULL) + { + *phDevice = 0; + } + *ppGpu = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return status; + + status = subdeviceGetByHandle(pClient, hSubdevice, &pSubdevice); + if (status != NV_OK) + return status; + + *ppGpu = GPU_RES_GET_GPU(pSubdevice); + if (phDevice != NULL) + { + *phDevice = RES_GET_HANDLE(pSubdevice->pDevice); + } + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c new file mode 100644 index 0000000..7df4194 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c @@ -0,0 +1,279 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This module contains the gpu control interfaces for the + * subdevice (NV20_SUBDEVICE_0) class. Subdevice-level control calls + * are directed unicast to the associated GPU. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "diagnostics/journal.h" +#include "diagnostics/tracer.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/client.h" +#include "rmapi/rs_utils.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" + +// +// EVENT RM SubDevice Controls +// +NV_STATUS +subdeviceCtrlCmdEventSetTrigger_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_SW, NULL, 0, 0, 0); + + return NV_OK; +} + +// +// subdeviceCtrlCmdEventSetTriggerFifo +// +// Used to signal Vulkan timeline semaphores from the CPU. +// +NV_STATUS +subdeviceCtrlCmdEventSetTriggerFifo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS *pTriggerFifoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + engineNonStallIntrNotifyEvent(pGpu, NV2080_ENGINE_TYPE_HOST, + pTriggerFifoParams->hEvent); + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdEventSetNotification_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + + // NV01_EVENT must have been plugged into this subdevice + if (inotifyGetNotificationList(staticCast(pSubdevice, INotifier)) == NULL) + { + NV_PRINTF(LEVEL_INFO, "cmd 0x%x: no event list\n", NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION); + return NV_ERR_INVALID_STATE; + } + + if (pSetEventParams->event >= NV2080_NOTIFIERS_MAXCOUNT) + { + NV_PRINTF(LEVEL_INFO, "bad event 0x%x\n", pSetEventParams->event); + return NV_ERR_INVALID_ARGUMENT; + } + + if (pSetEventParams->event == NV2080_NOTIFIERS_TIMER) + { + NV_PRINTF(LEVEL_INFO, "wrong control call for timer event\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + if (IS_GSP_CLIENT(pGpu)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + pRmApi->Control(pRmApi, RES_GET_CLIENT_HANDLE(pSubdevice), + RES_GET_HANDLE(pSubdevice), + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + pSetEventParams, + sizeof *pSetEventParams)); + } + + switch (pSetEventParams->action) + { + case NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE: + case NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT: + { + // must be in disabled state to transition to an active state + if (pSubdevice->notifyActions[pSetEventParams->event] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + status = NV_ERR_INVALID_STATE; + break; + } + + if (pSetEventParams->event == NV2080_NOTIFIERS_FIFO_EVENT_MTHD) + { + pGpu->activeFifoEventMthdNotifiers++; + } + + pSubdevice->notifyActions[pSetEventParams->event] = pSetEventParams->action; + break; + } + + case NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE: + { + if ((pSetEventParams->event == NV2080_NOTIFIERS_FIFO_EVENT_MTHD) && + (pSubdevice->notifyActions[pSetEventParams->event] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE)) + { + NV_ASSERT(pGpu->activeFifoEventMthdNotifiers); + pGpu->activeFifoEventMthdNotifiers--; + } + + pSubdevice->notifyActions[pSetEventParams->event] = pSetEventParams->action; + break; + } + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + return status; +} + +NV_STATUS +subdeviceCtrlCmdEventSetMemoryNotifies_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *pSetMemoryNotifiesParams +) +{ + Memory *pMemory; + RsClient *pClient = RES_GET_CLIENT(pSubdevice); + NvU32 i; + + // ensure there's no pending notifications if there is an existing notification buffer + if (pSubdevice->hNotifierMemory != NV01_NULL_OBJECT) + { + for (i = 0; i < NV2080_NOTIFIERS_MAXCOUNT; i++) + { + if (pSubdevice->notifyActions[i] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + return NV_ERR_STATE_IN_USE; + } + } + } + + if (pSetMemoryNotifiesParams->hMemory == NV01_NULL_OBJECT) + { + pSubdevice->hNotifierMemory = pSetMemoryNotifiesParams->hMemory; + pSubdevice->pNotifierMemory = NULL; + return NV_OK; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memGetByHandle(pClient, pSetMemoryNotifiesParams->hMemory, &pMemory)); + + if (pMemory->pMemDesc->Size < NV_SIZEOF32(NvNotification) * NV2080_NOTIFIERS_MAXCOUNT) + { + return NV_ERR_INVALID_LIMIT; + } + + pSubdevice->hNotifierMemory = pSetMemoryNotifiesParams->hMemory; + pSubdevice->pNotifierMemory = pMemory; + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdEventSetSemaphoreMemory_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS *pSetSemMemoryParams +) +{ + RsClient *pClient = RES_GET_CLIENT(pSubdevice); + Memory *pMemory; + NvU32 i; + + if (pSubdevice->hSemMemory != NV01_NULL_OBJECT) + { + for (i = 0; i < NV2080_NOTIFIERS_MAXCOUNT; i++) + { + if (pSubdevice->notifyActions[i] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + return NV_ERR_STATE_IN_USE; + } + } + } + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memGetByHandle(pClient, pSetSemMemoryParams->hSemMemory, &pMemory)); + + if (pSetSemMemoryParams->semOffset >= pMemory->pMemDesc->Size) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pSubdevice->hSemMemory = pSetSemMemoryParams->hSemMemory; + pMemory->vgpuNsIntr.nsSemOffset = pSetSemMemoryParams->semOffset; + + pMemory->vgpuNsIntr.nsSemValue = 0; + pMemory->vgpuNsIntr.guestMSIAddr = 0; + pMemory->vgpuNsIntr.guestMSIData = 0; + pMemory->vgpuNsIntr.guestDomainId = 0; + pMemory->vgpuNsIntr.pVgpuVfioRef = NULL; + pMemory->vgpuNsIntr.isSemaMemValidationEnabled = NV_TRUE; + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdEventSetSemaMemValidation_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS *pSetSemaMemValidationParams +) +{ + Memory *pMemory; + RsClient *pClient = RES_GET_CLIENT(pSubdevice); + NvU32 *pSemValue; + NV_STATUS rmStatus = NV_ERR_INVALID_OBJECT_HANDLE; + + rmStatus = memGetByHandle(pClient, pSetSemaMemValidationParams->hSemMemory, &pMemory); + + if (rmStatus == NV_OK) + { + pSemValue = (NvU32 *)NvP64_VALUE(memdescGetKernelMapping(pMemory->pMemDesc)); + + if (pSemValue == NULL) + { + return NV_ERR_INVALID_ADDRESS; + } + + portMemSet(pSemValue, 0, RM_PAGE_SIZE); + pMemory->vgpuNsIntr.nsSemValue = 0; + pMemory->vgpuNsIntr.isSemaMemValidationEnabled = pSetSemaMemValidationParams->isSemaMemValidationEnabled; + } + + return rmStatus; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c new file mode 100644 index 0000000..381fc17 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c @@ -0,0 +1,792 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This module contains the gpu control interfaces for the + * subdevice (NV20_SUBDEVICE_0) class. Subdevice-level control calls + * are directed unicast to the associated GPU. + * File contains ctrls related to general GPU + */ + +#include "core/core.h" +#include "core/locks.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_db.h" +#include "nvrm_registry.h" +#include "nvVer.h" +#include "objtmr.h" +#include "vgpu/rpc.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "rmapi/resource_fwd_decls.h" +#include "rmapi/client.h" + +#include "class/cl900e.h" + + + + +static NV_STATUS +getGpuInfos(Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pParams, NvBool bCanAccessHw) +{ + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdGpuGetInfoV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams +) +{ + return getGpuInfos(pSubdevice, pGpuInfoParams, NV_TRUE); +} + +// +// subdeviceCtrlCmdGpuGetCachedInfo: As subdeviceCtrlCmdGpuGetInfoV2, except +// does not perform any HW access (NO_GPUS_ACCESS and NO_GPUS_LOCK flags) +// +NV_STATUS +subdeviceCtrlCmdGpuGetCachedInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams +) +{ + return getGpuInfos(pSubdevice, pGpuInfoParams, NV_FALSE); +} + +/*! + * @brief This command can be used for Optimus enabled system. + * + * @return : + * NV_OK + */ +NV_STATUS +subdeviceCtrlCmdGpuSetOptimusInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS *pGpuOptimusInfoParams +) +{ + NvU32 status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + if (pGpuOptimusInfoParams->isOptimusEnabled) + { + // + // Setting pMemoryManager->bPersistentStandbyBuffer for Optimus system. + // It is used for sys_mem allocation which is pinned across + // S3 transitions.Sys_mem allocations are done at first S3 cycle + // and release during driver unload, which reduces system + // VM fragmentation, which was a problem in optimus system. + // For more details refer bug 754122. + // + GPU_GET_MEMORY_MANAGER(pGpu)->bPersistentStandbyBuffer = NV_TRUE; + } + return status; +} + +// +// subdeviceCtrlCmdGpuGetSdm +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetSdm_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_SDM_PARAMS *pSdmParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pSdmParams->subdeviceMask = gpuGetSubdeviceMask(pGpu); + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuSetSdm +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuSetSdm_IMPL +( + Subdevice* pSubdevice, + NV2080_CTRL_GPU_SET_SDM_PARAMS* pSdmParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvU32 subdeviceInstance; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (!ONEBITSET(pSdmParams->subdeviceMask)) + { + NV_PRINTF(LEVEL_ERROR, "Subdevice mask has none or more than one bit set"); + return NV_ERR_INVALID_DATA; + } + + if (gpuIsStateLoaded(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, "NV2080_CTRL_CMD_GPU_SET_SDM cannot be called after the GPU is loaded"); + return NV_ERR_INVALID_STATE; + } + subdeviceInstance = BIT_IDX_32(pSdmParams->subdeviceMask); + + if (subdeviceInstance >= NV_MAX_SUBDEVICES) + { + NV_PRINTF(LEVEL_ERROR, "Subdevice mask exceeds the max count of subdevices"); + return NV_ERR_INVALID_DATA; + } + pGpu->subdeviceInstance = subdeviceInstance; + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetSimulationInfo +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetSimulationInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS *pGpuSimulationInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (IS_SILICON(pGpu)) + { + pGpuSimulationInfoParams->type = NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE; + } + else + { + pGpuSimulationInfoParams->type = NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_UNKNOWN; + } + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetEngines +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEngines_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINES_PARAMS *pParams +) +{ + NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS getEngineParamsV2; + NvU32 *pKernelEngineList = NvP64_VALUE(pParams->engineList); + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + portMemSet(&getEngineParamsV2, 0, sizeof(getEngineParamsV2)); + + status = subdeviceCtrlCmdGpuGetEnginesV2(pSubdevice, &getEngineParamsV2); + NV_CHECK_OR_RETURN(LEVEL_INFO, NV_OK == status, status); + + // NULL clients just want an engine count + if (NULL != pKernelEngineList) + { + NV_CHECK_OR_RETURN(LEVEL_INFO, pParams->engineCount >= getEngineParamsV2.engineCount, + NV_ERR_BUFFER_TOO_SMALL); + portMemCopy(pKernelEngineList, + getEngineParamsV2.engineCount * sizeof(*getEngineParamsV2.engineList), getEngineParamsV2.engineList, + getEngineParamsV2.engineCount * sizeof(*getEngineParamsV2.engineList)); + } + + pParams->engineCount = getEngineParamsV2.engineCount; + + return status; +} + +// +// subdeviceCtrlCmdGpuGetEnginesV2 +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEnginesV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *pEngineParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // Update the engine Database + NV_ASSERT_OK_OR_RETURN(gpuUpdateEngineTable(pGpu)); + + // Validate engine count + if (pGpu->engineDB.size > NV2080_GPU_MAX_ENGINES_LIST_SIZE) + { + NV_PRINTF(LEVEL_ERROR, "The engine database's size (0x%x) exceeds " + "NV2080_GPU_MAX_ENGINES_LIST_SIZE (0x%x)!\n", + pGpu->engineDB.size, NV2080_GPU_MAX_ENGINES_LIST_SIZE); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; + } + + { + NvU32 i; + pEngineParams->engineCount = pGpu->engineDB.size; + for (i = 0; i < pEngineParams->engineCount; i++) + { + pEngineParams->engineList[i] = pGpu->engineDB.pType[i]; + } + } + + return status; +} + +// +// subdeviceCtrlCmdGpuGetEngineClasslist +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEngineClasslist_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *pClassParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + ENGDESCRIPTOR engDesc; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + status = gpuXlateClientEngineIdToEngDesc(pGpu, pClassParams->engineType, &engDesc); + NV_ASSERT(status == NV_OK); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST Invalid engine ID 0x%x\n", + pClassParams->engineType); + DBG_BREAKPOINT(); + return status; + } + + status = gpuGetClassList(pGpu, &pClassParams->numClasses, NvP64_VALUE(pClassParams->classList), engDesc); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST Class List query failed\n"); + } + + return status; +} + +// +// subdeviceCtrlCmdGpuGetEnginePartnerList +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEnginePartnerList_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pPartnerListParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + ENGDESCRIPTOR engDesc; + NvU32 localType; + NvU32 i; + PCLASSDESCRIPTOR pClass; + NV_STATUS status = NV_OK; + + pPartnerListParams->numPartners = 0; + + status = gpuXlateClientEngineIdToEngDesc(pGpu, pPartnerListParams->engineType, &engDesc); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "Invalid engine ID 0x%x\n", + pPartnerListParams->engineType); + return status; + } + + // find class in class db + status = gpuGetClassByClassId(pGpu, pPartnerListParams->partnershipClassId, &pClass); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "Invalid class ID 0x%x\n", + pPartnerListParams->partnershipClassId); + return status; + } + + // Make sure that the engine related to this class is FIFO... + if (pClass->engDesc != ENG_KERNEL_FIFO) + { + NV_PRINTF(LEVEL_ERROR, + "Class 0x%x is not considered a partnership class.\n", + pPartnerListParams->partnershipClassId); + return NV_ERR_NOT_SUPPORTED; + } + + // Translate the instance-local engine type to the global engine type in MIG mode + localType = pPartnerListParams->engineType; + + // Restore the client's passed engineType + pPartnerListParams->engineType = localType; + + // + // For channels that the hal didn't handle, we should just return + // all of the supported engines except for the target engine. + // + + // Update the engine Database + NV_ASSERT_OK_OR_RETURN(gpuUpdateEngineTable(pGpu)); + + // Make sure it all will fit + if (pGpu->engineDB.size > NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS) + { + NV_PRINTF(LEVEL_ERROR, + "partnerList space is too small, time to increase. This is fatal\n"); + DBG_BREAKPOINT(); + return status; + } + + // Copy over all of the engines except the target + for (i = 0; i < pGpu->engineDB.size; i++) + { + // Skip the engine handed in + if (pGpu->engineDB.pType[i] != pPartnerListParams->engineType ) + { + pPartnerListParams->partnerList[pPartnerListParams->numPartners++] = pGpu->engineDB.pType[i]; + } + } + + return status; +} + +// +// subdeviceCtrlCmdGpuQueryMode_IMPL +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuQueryMode_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_QUERY_MODE_PARAMS *pQueryMode +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + switch (gpuGetMode(pGpu)) + { + case NV_GPU_MODE_GRAPHICS_MODE: + { + pQueryMode->mode = NV2080_CTRL_GPU_QUERY_MODE_GRAPHICS_MODE; + break; + } + case NV_GPU_MODE_COMPUTE_MODE: + { + pQueryMode->mode = NV2080_CTRL_GPU_QUERY_MODE_COMPUTE_MODE; + break; + } + default: + { + pQueryMode->mode = NV2080_CTRL_GPU_QUERY_MODE_UNKNOWN_MODE; + break; + } + } + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuHandleGpuSR +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuHandleGpuSR_IMPL +( + Subdevice *pSubdevice +) +{ + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetId +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetId_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ID_PARAMS *pIdParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pIdParams->gpuId = pGpu->gpuId; + + return NV_OK; +} + +// +// nv2080CtrlCmdGpuGetPids +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetPids_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_PIDS_PARAMS *pGetPidsParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvU32 internalClassId; + NV_STATUS status; + MIG_INSTANCE_REF *pRef = NULL; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + switch (pGetPidsParams->idType) + { + case (NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_CLASS): + { + if (pGetPidsParams->id == NV20_SUBDEVICE_0) + { + internalClassId = classId(Subdevice); + } + else if (pGetPidsParams->id == MPS_COMPUTE) + { + internalClassId = classId(MpsApi); + } + else + { + internalClassId = classId(ChannelDescendant); + } + break; + } + case (NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_VGPU_GUEST): + { + internalClassId = classId(HostVgpuDeviceApi); + break; + } + default: + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Search over all clients to see if any contain objects of type = id. + // If they do, then add their PID to the PIDArray param and also + // return the amount of valid entries in the Array through pidTblCount. + // + status = gpuGetProcWithObject(pGpu, pGetPidsParams->id, internalClassId, + pGetPidsParams->pidTbl, &pGetPidsParams->pidTblCount, + pRef); + return status; +} + +// +// subdeviceCtrlCmdGpuGetPidInfo +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetPidInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_PID_INFO_PARAMS *pGetPidInfoParams +) +{ + NV2080_CTRL_GPU_PID_INFO_DATA *pPidInfoData; + NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV2080_CTRL_GPU_PID_INFO *pPidInfo; + NvU32 internalClassId; + NvU32 i; + MIG_INSTANCE_REF *pRef = NULL; + NvBool bGlobalInfo = NV_TRUE; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if ((pGetPidInfoParams->pidInfoListCount <= 0) || + (pGetPidInfoParams->pidInfoListCount > + NV2080_CTRL_GPU_GET_PID_INFO_MAX_COUNT)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + for (i = 0; i < pGetPidInfoParams->pidInfoListCount; ++i) + { + pPidInfo = &pGetPidInfoParams->pidInfoList[i]; + + pSmcInfo = &pPidInfo->smcSubscription; + pSmcInfo->computeInstanceId = PARTITIONID_INVALID; + pSmcInfo->gpuInstanceId = PARTITIONID_INVALID; + + switch (pPidInfo->index) + { + case (NV2080_CTRL_GPU_PID_INFO_INDEX_VIDEO_MEMORY_USAGE): + { + internalClassId = classId(Memory); + + pPidInfoData = &pPidInfo->data; + portMemSet(pPidInfoData, 0, sizeof(NV2080_CTRL_GPU_PID_INFO_DATA)); + pPidInfo->result = gpuFindClientInfoWithPidIterator(pGpu, pPidInfo->pid, 0, + internalClassId, + pPidInfoData, + pSmcInfo, + pRef, + bGlobalInfo); + break; + } + default: + { + pPidInfo->result = NV_ERR_INVALID_ARGUMENT; + break; + } + } + } + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdGpuGetMaxSupportedPageSize_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + // Default to minimal page size (4k) + pParams->maxSupportedPageSize = RM_PAGE_SIZE; + + if (gpuIsSriovEnabled(pGpu)) + { + NvU64 vmmuSegmentSize = gpuGetVmmuSegmentSize(pGpu); + if (vmmuSegmentSize > 0 && + vmmuSegmentSize < NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_512MB) + { + pParams->maxSupportedPageSize = RM_PAGE_SIZE_HUGE; + } + } + + return status; +} + +/*! + * @brief Check if address range is within the provided limits + * + * @param[in] addrStart Staring address of address range + * @param[in] addrLength Size of address range + * @param[in] limitStart Staring address of limit + * @param[in] limitLength Size of limit + * + * @return + * NV_TRUE, if address range is within the provided limits + * NV_FALSE, if address range is outside the provided limits + * + */ +static NvBool isAddressWithinLimits +( + NvU64 addrStart, + NvU64 addrLength, + NvU64 limitStart, + NvU64 limitLength +) +{ + NvU64 addrEnd = 0; + NvU64 limitEnd = 0; + + // + // Calculate End address of address range and limit, + // Return NV_FALSE in case of 64-bit addition overflow + // + if (!portSafeAddU64(addrStart, addrLength - 1, &addrEnd) || + !portSafeAddU64(limitStart, limitLength - 1, &limitEnd)) + { + return NV_FALSE; + } + + return ((addrStart >= limitStart) && (addrEnd <= limitEnd)); +} + +/*! + * @brief Validate the address range for Memory Map request by comparing the + * user supplied address range with GPU BAR0/BAR1 range. + * + * Lock Requirements: + * Assert that API and GPUs lock held on entry + * + * @param[in] pSubdevice + * @param[in] pParams pointer to control parameters + * + * Possible status values returned are: + * NV_OK + * NV_ERR_PROTECTION_FAULT + * + */ +NV_STATUS subdeviceCtrlCmdValidateMemMapRequest_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvU64 start = pParams->addressStart; + NvU64 length = pParams->addressLength; + NV_STATUS rmStatus; + NvU32 bar0MapSize; + NvU64 bar0MapOffset; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + pParams->protection = NV_PROTECT_READ_WRITE; + + if (isAddressWithinLimits(start, length, pGpu->busInfo.gpuPhysAddr, + pGpu->deviceMappings[0].gpuNvLength)) + { + start -= pGpu->busInfo.gpuPhysAddr; + + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + rmStatus = tmrGetTimerBar0MapInfo_HAL(pGpu, pTmr, + &bar0MapOffset, &bar0MapSize); + if ((rmStatus == NV_OK) && + isAddressWithinLimits(start, length, bar0MapOffset, bar0MapSize)) + { + pParams->protection = NV_PROTECT_READABLE; + return NV_OK; + } + + // + // If the kernel side does not know about the object being mapped, + // fall-through to GSP and see if it knows anything. + // + if (IS_GSP_CLIENT(pGpu)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + return pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_VALIDATE_MEM_MAP_REQUEST, + pParams, sizeof(*pParams)); + } + + return NV_ERR_PROTECTION_FAULT; + } + + return NV_ERR_PROTECTION_FAULT; +} + +/*! + * @brief: This command returns the load time (latency) of each engine, + * implementing NV2080_CTRL_CMD_GPU_GET_ENGINE_LOAD_TIMES control call. + * + * @param[in] pSubdevice + * @param[in] pParams + * + * @return + * NV_OK Success + */ +NV_STATUS +subdeviceCtrlCmdGpuGetEngineLoadTimes_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + PENGDESCRIPTOR engDescriptorList = gpuGetInitEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 curEngDescIdx; + + NV_ASSERT_OR_RETURN(numEngDescriptors < NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS, NV_ERR_BUFFER_TOO_SMALL); + + pParams->engineCount = numEngDescriptors; + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + pParams->engineIsInit[curEngDescIdx] = NV_FALSE; + continue; + } + + pParams->engineList[curEngDescIdx] = pEngstate->engDesc; + pParams->engineStateLoadTime[curEngDescIdx] = pEngstate->stats[ENGSTATE_STATE_LOAD].transitionTimeUs * 1000; + pParams->engineIsInit[curEngDescIdx] = NV_TRUE; + } + + return NV_OK; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c new file mode 100644 index 0000000..2a569ac --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c @@ -0,0 +1,410 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This module contains the gpu control interfaces for the + * subdevice (NV20_SUBDEVICE_0) class. Subdevice-level control calls + * are directed unicast to the associated GPU. + * File contains ctrls related to TMR engine object + */ + +#include "core/core.h" + + +#include "core/locks.h" +#include "gpu/subdevice/subdevice.h" +#include "objtmr.h" +#include "rmapi/client.h" + +// +// subdeviceCtrlCmdTimerCancel +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdTimerCancel_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu; + OBJTMR *pTmr; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (pSubdevice == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pGpu = GPU_RES_GET_GPU(pSubdevice); + pTmr = GPU_GET_TIMER(pGpu); + + if (pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + tmrCancelCallback(pTmr, pSubdevice); + pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + return NV_OK; +} + +static NV_STATUS +gpuControlTimerCallback(OBJGPU *pGpu, OBJTMR *pTmr, void * pData) +{ + Subdevice *pSubDevice = (Subdevice *) pData; + PEVENTNOTIFICATION pNotifyEvent = inotifyGetNotificationList(staticCast(pSubDevice, INotifier)); + + if (pSubDevice->notifyActions[NV2080_NOTIFIERS_TIMER] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + NV_PRINTF(LEVEL_INFO, + "callback is called but the timer is not scheduled\n"); + return NV_ERR_INVALID_STATE; + } + + // Mark the timer as processed (no self-rescheduling for now) + pSubDevice->notifyActions[NV2080_NOTIFIERS_TIMER] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + + // Find timer event + while ((pNotifyEvent != NULL) && (pNotifyEvent->NotifyIndex != NV2080_NOTIFIERS_TIMER)) + { + pNotifyEvent = pNotifyEvent->Next; + } + if (pNotifyEvent == NULL) + { + NV_PRINTF(LEVEL_INFO, "timer event is missing\n"); + return NV_ERR_INVALID_STATE; + } + + // perform a direct callback to the client + if (pNotifyEvent->Data != NvP64_NULL) + { + NvU64 currentTime = tmrGetTime_HAL(pGpu, pTmr); + osEventNotification(pGpu, pNotifyEvent, NV2080_NOTIFIERS_TIMER, + ¤tTime, sizeof(currentTime)); + } + else + { + NV_PRINTF(LEVEL_INFO, "timer callback pointer is missing\n"); + return NV_ERR_INVALID_STATE; + } + return NV_OK; +} + +static NV_STATUS +timerSchedule +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *pTimerScheduleParams +) +{ + OBJGPU *pGpu; + OBJTMR *pTmr; + PEVENTNOTIFICATION pNotifyEvent; + + if (pSubdevice == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pGpu = GPU_RES_GET_GPU(pSubdevice); + pTmr = GPU_GET_TIMER(pGpu); + + pNotifyEvent = inotifyGetNotificationList(staticCast(pSubdevice, INotifier)); + + if (pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + NV_PRINTF(LEVEL_INFO, + "gpuControlTimerCallback: the timer is already scheduled for this subdevice\n"); + return NV_ERR_INVALID_STATE; + } + + // Validate the timer event + while ((pNotifyEvent != NULL) && (pNotifyEvent->NotifyIndex != NV2080_NOTIFIERS_TIMER)) + { + pNotifyEvent = pNotifyEvent->Next; + } + if (pNotifyEvent == NULL) + { + NV_PRINTF(LEVEL_INFO, + "gpuControlTimerCallback: timer event is missing\n"); + return NV_ERR_INVALID_STATE; + } + if (((pNotifyEvent->NotifyType != NV01_EVENT_KERNEL_CALLBACK) && (pNotifyEvent->NotifyType != NV01_EVENT_KERNEL_CALLBACK_EX)) || + (pNotifyEvent->Data == NvP64_NULL)) + { + NV_PRINTF(LEVEL_INFO, + "gpuControlTimer: cmd 0x%x: callback function is missing\n", + NV2080_CTRL_CMD_TIMER_SCHEDULE); + return NV_ERR_INVALID_STATE; + + } + + // Mark the timer as processed (no self-rescheduling for now). Set the flag before calling the timer + // since callback may be called right away. + pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + + // schedule the timer + if (DRF_VAL(2080, _CTRL_TIMER_SCHEDULE_FLAGS, _TIME, pTimerScheduleParams->flags) == NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_ABS) + { + tmrScheduleCallbackAbs(pTmr, gpuControlTimerCallback, pSubdevice, pTimerScheduleParams->time_nsec, 0, 0); + } + else + { + tmrScheduleCallbackRel(pTmr, gpuControlTimerCallback, pSubdevice, pTimerScheduleParams->time_nsec, 0, 0); + } + + return NV_OK; +} + +// +// subdeviceCtrlCmdTimerSchedule +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdTimerSchedule_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *pParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + if (pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) + { + LOCK_ASSERT_AND_RETURN(rmDeviceGpuLockIsOwner(GPU_RES_GET_GPU(pSubdevice)->gpuInstance)); + } + else + { + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + } + + return timerSchedule(pSubdevice, pParams); +} + +// +// subdeviceCtrlCmdTimerGetTime +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// Timer callback list accessed in tmrService at DPC +// +NV_STATUS +subdeviceCtrlCmdTimerGetTime_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_TIMER_GET_TIME_PARAMS *pParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + + if ((pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) && + (pRmCtrlParams->flags & NVOS54_FLAGS_LOCK_BYPASS)) + { + if (pTmr->tmrChangePending) + { + return NV_ERR_STATE_IN_USE; + } + } + else + { + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + } + + tmrGetCurrentTime(pTmr, &pParams->time_nsec); + + return NV_OK; +} + +// +// subdeviceCtrlCmdTimerGetRegisterOffset +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +subdeviceCtrlCmdTimerGetRegisterOffset_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS *pTimerRegOffsetParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpuGetRegBaseOffset_HAL(pGpu, NV_REG_BASE_TIMER, &pTimerRegOffsetParams->tmr_offset); +} + +/*! + * @brief Provides correlation information between GPU time and CPU time. + * + * @param[in] pSubDevice + * @param[in] pParams + * + * @return NV_OK Success + * @return NV_ERR_INVALID_ARGUMENT Invalid argument + * @return NV_ERR_NOT_SUPPORTED Unsupported CPU clock id + */ +NV_STATUS +subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + NV_STATUS status = NV_OK; + NvU8 i; + NvU32 sec, usec; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_ASSERT_OR_RETURN((pParams->sampleCount <= + NV2080_CTRL_TIMER_GPU_CPU_TIME_MAX_SAMPLES), + NV_ERR_INVALID_ARGUMENT); + + switch (pParams->cpuClkId) + { + case NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_OSTIME: + { + for (i = 0; i < pParams->sampleCount; i++) + { + osGetCurrentTime(&sec, &usec); + pParams->samples[i].cpuTime = (((NvU64)sec) * 1000000) + usec; + status = tmrGetCurrentTime(pTmr, + &pParams->samples[i].gpuTime); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not get GPU time. status=0x%08x\n", + status); + break; + } + } + break; + } + + case NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PLATFORM_API: + { + // + // As reading CPU time and GPU time is a serial process we need to + // have a technique to mitigate the effects of preemption so we read + // the timestamps in a zipper pattern like c G c G c G c into an + // array storing all 7 values, find the two c values closest together, + // and report the sync point as the average of those two c values and + // the G between them. One complication is that reading a GPU's PTIMER + // directly from the CPU must be done via two non-adjacent BAR0-mapped + // memory locations for the low 32 bits and high 32 bits, and there's + // no way to atomically get both. One way to fix this is to make the + // read of the GPU time do the high bits, the low bits, and the high + // bits again, and if the two high values differ, we repeat the process + // until Ghi1 and Ghi2 match Once Ghi1 and 2 match, we use that as + // the high bits and the lo bits & CPU time from the zipper. + // + const NvU32 numTimerSamples = 3; // We take (hardcoded) 3 gpu timestamps. + NvU32 gpuTimeLo[3]; // Array to hold num_timer_samples gpu timestamps. + NvU64 cpuTime[4]; // Array to hold num_timer_samples+1 cpu timestamps. + NvU64 min; + NvU32 closestPairBeginIndex; + NvU32 gpuTimeHiOld; + NvU32 gpuTimeHiNew; + NvU32 i; + + gpuTimeHiNew = tmrReadTimeHiReg_HAL(pGpu, pTmr, NULL); + + do + { + gpuTimeHiOld = gpuTimeHiNew; + for (i = 0; i < numTimerSamples; i++) + { + + osGetPerformanceCounter(&cpuTime[i]); + + gpuTimeLo[i] = tmrReadTimeLoReg_HAL(pGpu, pTmr, NULL); + } + + osGetPerformanceCounter(&cpuTime[i]); + + // Read GPU TIME_1(High) again to detect wrap around. + gpuTimeHiNew = tmrReadTimeHiReg_HAL(pGpu, pTmr, NULL); + } while (gpuTimeHiNew != gpuTimeHiOld); + + // find i such that cpuTime[i+1] - cpuTime[i] is minimum + // i.e. find closest pair of cpuTime. + min = cpuTime[1] - cpuTime[0]; + closestPairBeginIndex = 0; + for (i = 0; i < numTimerSamples; i++) + { + if ((cpuTime[i+1] - cpuTime[i]) < min) + { + closestPairBeginIndex = i; + min = cpuTime[i+1] - cpuTime[i]; + } + } + + pParams->samples[0].gpuTime = ((((NvU64)gpuTimeHiNew) << 32) | + gpuTimeLo[closestPairBeginIndex]); + pParams->samples[0].cpuTime = (cpuTime[closestPairBeginIndex] + + cpuTime[closestPairBeginIndex + 1])/2; + NV_PRINTF(LEVEL_INFO, + "GPUTime = %llx CPUTime = %llx\n", + pParams->samples[0].gpuTime, pParams->samples[0].cpuTime); + break; + } + + case NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_TSC: + { + for (i = 0; i < pParams->sampleCount; i++) + { + status = tmrGetGpuAndCpuTimestampPair_HAL(pGpu, pTmr, &pParams->samples[i].gpuTime, &pParams->samples[i].cpuTime); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not get CPU GPU time. status=0x%08x\n", + status); + break; + } + } + break; + } + default: + { + status = NV_ERR_NOT_SUPPORTED; + break; + } + } + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer.c new file mode 100644 index 0000000..d4a1339 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer.c @@ -0,0 +1,1632 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file timer.c + * @brief Timer Object Function Definitions. + */ + +/* ------------------------ Includes ---------------------------------------- */ +#include "objtmr.h" +#include "class/cl0004.h" // NV004_NOTIFIERS_SET_ALARM_NOTIFY +#include "gpu/gpu_resource.h" + +/* ------------------------ Static Function Prototypes ---------------------- */ +static PTMR_EVENT_PVT _tmrPullCallbackFromHead (OBJTMR *); +static void _tmrScanCallback(OBJTMR *, void *); +static PTMR_EVENT_PVT _tmrGetNextFreeCallback(OBJTMR *); +static NV_STATUS _tmrInsertCallback(OBJTMR *, PTMR_EVENT_PVT, NvU64); +static void _tmrInsertCallbackInList(OBJGPU *pGpu, OBJTMR *pTmr, PTMR_EVENT_PVT pEvent); +static void _tmrStateLoadCallbacks(OBJGPU *, OBJTMR *); +static NV_STATUS _tmrGetNextAlarmTime(OBJTMR *, NvU64 *); +static void _tmrScheduleCallbackInterrupt(OBJGPU *, OBJTMR *, NvU64); + +NV_STATUS +tmrConstructEngine_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr, + ENGDESCRIPTOR engDesc +) +{ + // Mark that this timer is not yet initialized + pTmr->bInitialized = NV_FALSE; + + // Create the Granular lock for SWRL Timer callback + pTmr->pTmrSwrlLock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (pTmr->pTmrSwrlLock == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Alloc spinlock failed\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + tmrInitCallbacks(pTmr); + osInit1HzCallbacks(pTmr); + + pTmr->retryTimes = 3; + + pTmr->errorCount = 0; + + pTmr->pGrTickFreqRefcnt = NULL; + + return NV_OK; +} + +void +tmrDestruct_IMPL(OBJTMR *pTmr) +{ + // Delete the Granular lock for SWRL Timer callback + if (pTmr->pTmrSwrlLock != NULL) + { + portSyncSpinlockDestroy(pTmr->pTmrSwrlLock); + pTmr->pTmrSwrlLock = NULL; + } + + if (pTmr->pGrTickFreqRefcnt != NULL) + { + objDelete(pTmr->pGrTickFreqRefcnt); + pTmr->pGrTickFreqRefcnt = NULL; + } + + osDestroy1HzCallbacks(pTmr); +} + +/*! + * Simple Utility function, checks if there are any queued callbacks + */ +static NV_INLINE NvBool tmrEventsExist(OBJTMR *pTmr) +{ + return pTmr->pRmActiveEventList != NULL; +} + +/*! + * Allocates the necessary memory for storing a callback in the timer. + * + * @param[out] ppEvent A reference to the client's pointer. + */ +NV_STATUS tmrEventCreate_IMPL +( + OBJTMR *pTmr, + PTMR_EVENT *ppEventPublic, + TIMEPROC Proc, + void *pUserData, + NvU32 flags +) +{ + PTMR_EVENT_PVT *ppEvent = (PTMR_EVENT_PVT*)ppEventPublic; + NV_STATUS status = NV_OK; + + *ppEvent = portMemAllocNonPaged(sizeof(TMR_EVENT_PVT)); + if (*ppEvent == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate timer event\n"); + return NV_ERR_NO_MEMORY; + } + (*ppEvent)->bLegacy = NV_FALSE; + (*ppEvent)->bInUse = NV_FALSE; + (*ppEvent)->pNext = NULL; + (*ppEventPublic)->pTimeProc = Proc; + (*ppEventPublic)->pUserData = pUserData; + (*ppEventPublic)->flags = flags; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS)) + { + status = tmrEventCreateOSTimer_HAL(pTmr, *ppEventPublic); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create OS timer \n"); + } + } + return status; +} + +static void +_tmrScheduleCallbackInterrupt +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU64 alarmTime +) +{ + // + // Don't schedule the interrupt if we are polling. The interrupt can be + // routed to a different device, which could get confused. Also we don't + // want the extra priv writes. + // + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS)) + return; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + NvU64 currentTime; + NvU32 countdownTime; + + tmrGetCurrentTime(pTmr, ¤tTime); + countdownTime = currentTime < alarmTime ? NvU64_LO32(alarmTime - currentTime) : 0; + tmrSetCountdown_HAL(pGpu, pTmr, countdownTime, 0, NULL); + } + else + { + tmrSetAlarm_HAL(pGpu, pTmr, alarmTime, NULL); + } +} + +void +tmrResetCallbackInterrupt_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + tmrSetCountdownIntrReset_HAL(pGpu, pTmr, NULL); + } + else + { + tmrSetAlarmIntrReset_HAL(pGpu, pTmr, NULL); + } +} + +NvBool +tmrGetCallbackInterruptPending_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + return tmrGetCountdownPending_HAL(pGpu, pTmr, NULL); + } + else + { + return tmrGetAlarmPending_HAL(pGpu, pTmr, NULL); + } +} + +/*! + * Cancels a given callback, marking it invalid and preventing it from being executed. + * Updates the next alarm time appropriately + * + * @param[in] pEvent The callback to be cancelled + */ +void tmrEventCancel_IMPL +( + OBJTMR *pTmr, + PTMR_EVENT pEventPublic +) +{ + NvU64 nextAlarmTime; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT)pEventPublic; + PTMR_EVENT_PVT pChaser = pTmr->pRmActiveEventList; + NvBool bRemovedHead = pChaser == pEvent; + + if (pEventPublic == NULL) + { + return; + } + + NV_ASSERT(!pEvent->bLegacy); + + pEvent->bInUse = NV_FALSE; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS)) + { + NV_STATUS status = NV_OK; + status = tmrEventCancelOSTimer_HAL(pTmr, pEventPublic); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed in cancel of OS timer callback\n"); + } + return; + } + + if (bRemovedHead) + { + pTmr->pRmActiveEventList = pEvent->pNext; + + // Need to update the alarm time + if (NV_OK == _tmrGetNextAlarmTime(pTmr, &nextAlarmTime)) + { + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } + else + { + // List is empty! Disable PTIMER interrupt. + tmrRmCallbackIntrDisable(pTmr, pGpu); + } + } + else + { + while (pChaser != NULL && pChaser->pNext != pEvent) + { + pChaser = pChaser->pNext; + } + if (pChaser == NULL) + { + // The callback wasn't currently scheduled, nothing to change. + return; + } + pChaser->pNext = pEvent->pNext; + } +} + +/*! + * Frees the memory used for maintaining a given callback in the timer. + * Currently automatically calls cancel on the event. + * + * @param[in] pEvent The callback to cancel and free. + */ +void tmrEventDestroy_IMPL +( + OBJTMR *pTmr, + PTMR_EVENT pEventPublic +) +{ + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT)pEventPublic; + + if (pEvent != NULL) + { + NV_ASSERT(!pEvent->bLegacy); + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS)) + { + // OS timer destroying will cancel the timer + tmrEventDestroyOSTimer_HAL(pTmr, pEventPublic); + } + else + { + tmrEventCancel(pTmr, pEventPublic); + } + portMemFree(pEvent); + } +} + +/*! + * TODO: document + */ +static NV_STATUS +_nv0004CtrlCmdTmrSetAlarmNotifyCallback(OBJGPU *pGpu, OBJTMR *pTmr, void *pData) +{ + PEVENTNOTIFICATION pNotifyEvent = pData; + NV_STATUS status = NV_OK; + + // perform a direct callback to the client + if (NvP64_VALUE(pNotifyEvent->Data) != NULL) + { + //one shot signal + status = osNotifyEvent(pGpu, pNotifyEvent, NV004_NOTIFIERS_SET_ALARM_NOTIFY, 0, NV_OK); + } + + return status; +} + +/*! + * TODO: document + * TODO: Migrate this to match current API (probably) + */ +NV_STATUS +tmrapiCtrlCmdTmrSetAlarmNotify_IMPL +( + TimerApi *pTimerApi, + NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pTimerApi); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + PEVENTNOTIFICATION pNotifyEvent = inotifyGetNotificationList(staticCast(pTimerApi, INotifier)); + + // Validate the timer event + while ((pNotifyEvent != NULL) && (pNotifyEvent->hEvent != pParams->hEvent)) + { + pNotifyEvent = pNotifyEvent->Next; + } + + if (pNotifyEvent == NULL) + { + NV_PRINTF(LEVEL_INFO, "timer event is missing\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // schedule the timer + tmrScheduleCallbackRel(pTmr, + _nv0004CtrlCmdTmrSetAlarmNotifyCallback, + pNotifyEvent, + pParams->alarmTimeNsecs, 0, 0); + + return NV_OK; +} + +NV_STATUS tmrGetCurrentTimeEx_IMPL +( + OBJTMR *pTmr, + NvU64 *pTime, + THREAD_STATE_NODE *pThreadState +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + *pTime = (NvU64)(~0); + return NV_ERR_GPU_IN_FULLCHIP_RESET; + } + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + *pTime = (NvU64)(~0); + return NV_ERR_GPU_IS_LOST; + } + + *pTime = tmrGetTimeEx_HAL(pGpu, pTmr, pThreadState); + + return NV_OK; +} + +NV_STATUS tmrGetCurrentTime_IMPL +( + OBJTMR *pTmr, + NvU64 *pTime +) +{ + return tmrGetCurrentTimeEx(pTmr, pTime, NULL); +} + +/*! + * TODO: document + */ +NV_STATUS tmrGetCurrentDiffTime_IMPL +( + OBJTMR *pTmr, + NvU64 startTime, + NvU64 *pDiffTime +) +{ + NvU64 currentTime; + NV_STATUS rmStatus; + + rmStatus = tmrGetCurrentTime(pTmr, ¤tTime); + + *pDiffTime = currentTime - startTime; + + return rmStatus; +} + +/*! + * Schedule a callback relative to current time specified in units of nanoseconds. + * Callbacks should be expected to be late however, this is not an RTOS, and a + * scheduling delay has been implemented to fix some race condition bugs. + * User has to provide a structure in memory for the timer to use. + * + * @Note: For statically defined events it is recommended to preallocate them all + * at the appropriate stage in task life-cycle, and deallocated at the + * corresponding end of the life-cycle. For dynamically generated events + * consider the affects on fragmentation and potentially deferring deallocation. + * + * @param[in] pEvent Callback memory structure, provided by user. + * @param[in] RelTime Number of nanoseconds from now to call Proc. + * + * @returns Status + */ +NV_STATUS tmrEventScheduleRel_IMPL +( + OBJTMR *pTmr, + PTMR_EVENT pEvent, + NvU64 RelTime +) +{ + NvU64 AbsTime, currentTime; + NV_STATUS rmStatus; + + rmStatus = tmrGetCurrentTime(pTmr, ¤tTime); + if (rmStatus != NV_OK) + return rmStatus; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS)) + { + /*HR timer scheduled in relative mode*/ + /*TBD : This condition needs to be moved to OS timer handling functions */ + AbsTime = RelTime; + } + else + { + AbsTime = currentTime + RelTime; + } + + return tmrEventScheduleAbs(pTmr, pEvent, AbsTime); +} + +/*! + * Warning! This code is dangerous, it can cause the whole system to crash. It will be + * removed as soon as possible! Use the new API! + * It only remains for transitional purposes only. + */ +NV_STATUS tmrScheduleCallbackRel_IMPL +( + OBJTMR *pTmr, + TIMEPROC_OBSOLETE Proc, + void *Object, + NvU64 RelTime, + NvU32 Flags, + NvU32 ChId +) +{ + NvU64 AbsTime, currentTime; + NV_STATUS rmStatus; + + rmStatus = tmrGetCurrentTime(pTmr, ¤tTime); + if (rmStatus != NV_OK) + return rmStatus; + + AbsTime = currentTime + RelTime; + + return tmrScheduleCallbackAbs(pTmr, Proc, Object, AbsTime, Flags, ChId); +} + +/*! + * Warning! This code is dangerous, it can cause the whole system to crash. It will be + * removed as soon as possible! Use the new API! + * It only remains for transitional purposes only. + */ +NV_STATUS tmrScheduleCallbackRelSec_IMPL +( + OBJTMR *pTmr, + TIMEPROC_OBSOLETE Proc, + void *Object, + NvU32 RelTimeSec, + NvU32 Flags, + NvU32 ChId +) +{ + NvU64 RelTimeNs; + + RelTimeNs = (NvU64)RelTimeSec * 1000000000; + + return tmrScheduleCallbackRel(pTmr, Proc, Object, RelTimeNs, Flags, ChId); +} + +/*! + * Determines if the Callback is actually scheduled currently. + * + * @param[in] pEvent The event in question + */ +NvBool tmrEventOnList_IMPL +( + OBJTMR *pTmr, + PTMR_EVENT pEventPublic +) +{ + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT)pEventPublic; + PTMR_EVENT_PVT pScan = pTmr->pRmActiveEventList; + + if (pEvent->super.flags & TMR_FLAG_OS_TIMER_QUEUED) + { + return NV_TRUE; + } + else + { + return NV_FALSE; + } + + while (pScan != NULL) + { + if (pScan == pEvent) + { + NV_ASSERT(pEvent->bInUse); + return NV_TRUE; + } + pScan = pScan->pNext; + } + return NV_FALSE; +} + +/*! + * Warning! This code is dangerous, it can cause the whole system to crash. It will be + * removed as soon as possible! Use the new API! + * It only remains for transitional purposes only. + */ +NvBool tmrCallbackOnList_IMPL +( + OBJTMR *pTmr, + TIMEPROC_OBSOLETE Proc, + void *Object +) +{ + NvBool onList = NV_FALSE; + PTMR_EVENT_PVT tmrScan; + PTMR_EVENT_PVT tmrList; + + tmrList = pTmr->pRmActiveEventList; + + for (tmrScan = tmrList; tmrScan; tmrScan = tmrScan->pNext) + { + if ((Proc == tmrScan->pTimeProc_OBSOLETE) && + (Object == tmrScan->super.pUserData)) + { + onList = NV_TRUE; + break; + } + } + + return onList; +} + +/*! + * OBSOLETE: This will be removed very soon! + */ +static PTMR_EVENT_PVT +_tmrGetNextFreeCallback +( + OBJTMR *pTmr +) +{ + PTMR_EVENT_PVT pEvent = NULL; + + pEvent = pTmr->pRmCallbackFreeList_OBSOLETE; + if (pEvent != NULL) + { + NV_ASSERT(pEvent->bLegacy); // OBSOLETE, remove later + pTmr->pRmCallbackFreeList_OBSOLETE = pEvent->pNext; + // just to be sure. + pEvent->pNext = NULL; + } + + return pEvent; +} + +/*! + * Creates and inserts a node into the callback list. + * + * @param[in] pEvent Callback memory structure, provided by user. + * @param[in] Time Absolute nanoseconds at which to call Proc. + * + * @returns Status + */ +static NV_STATUS +_tmrInsertCallback +( + OBJTMR *pTmr, + PTMR_EVENT_PVT pEvent, + NvU64 Time +) +{ + NV_STATUS returnStatus = NV_ERR_GENERIC; // Indicate that the timer was NOT inserted in the list + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + + // If this is a free callback + if (!pEvent->bInUse && !tmrEventOnList(pTmr, (PTMR_EVENT)pEvent)) + { + pEvent->timens = Time; + + _tmrInsertCallbackInList(pGpu, pTmr, pEvent); + + returnStatus = NV_OK; + } + else + { + // Shouldn't get here. Don't call this function unless valid + NV_ASSERT_OR_RETURN(!"Invalid call to insert, already in use", NV_ERR_INVALID_ARGUMENT); + } + + return returnStatus; +} + +/*! + * Insert (time sorted) a specific event into the callback queue. + * + * Handles setting the next alarm time as well as enabling alarm if needed + * + * @param[in] pEvent The event to be inserted, must be initialized + */ +static void +_tmrInsertCallbackInList +( + OBJGPU *pGpu, + OBJTMR *pTmr, + PTMR_EVENT_PVT pEvent +) +{ + PTMR_EVENT_PVT pScan; + NvBool bAddedAsHead = NV_TRUE; + NvU64 nextAlarmTime; + + NV_ASSERT(!pEvent->bInUse); + + pEvent->bInUse = NV_TRUE; + + if (pTmr->pRmActiveEventList == NULL) + { + // Enable PTIMER interrupt. + tmrRmCallbackIntrEnable(pTmr, pGpu); + + // insert pEvent as first and only entry. + pEvent->pNext = NULL; + pTmr->pRmActiveEventList = pEvent; + } + else if (pEvent->timens <= pTmr->pRmActiveEventList->timens) + { + // insert pEvent as head entry of the non-empty callback list. + pEvent->pNext = pTmr->pRmActiveEventList; + pTmr->pRmActiveEventList = pEvent; + } + else + { + bAddedAsHead = NV_FALSE; + + pScan = pTmr->pRmActiveEventList; + + while (pScan->pNext != NULL) + { + if (pEvent->timens <= pScan->pNext->timens) + { + // insert into the middle of the list. + pEvent->pNext = pScan->pNext; + pScan->pNext = pEvent; + + break; + } + pScan = pScan->pNext; + } + + if (pScan->pNext == NULL) + { + // insert it at the end of the list. + pEvent->pNext = NULL; + pScan->pNext = pEvent; + } + } + + if (bAddedAsHead) + { + // Find out when the next alarm should be. + if (NV_OK != _tmrGetNextAlarmTime(pTmr, &nextAlarmTime)) + { + // if there is no event list, then just use 0. + nextAlarmTime = 0; + } + + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } +} + +/*! + * Schedule a callback at the absolute time specified in units of nanoseconds. + * + * Account for bad scheduling times, if the time too close in the future push + * it back till a short delay later. This avoids some race conditions. Even though + * callbacks may be delayed. However callbacks will not happen early. + * + * @Note: For statically defined events it is recommended to preallocate them all + * at the appropriate stage in task life-cycle, and deallocated at the + * corresponding end of the life-cycle. For dynamically generated events + * consider the affects on fragmentation and potentially deferring deallocation. + * + * @param[in] pEvent Callback memory structure, provided by user. + * @param[in] Time Absolute nanoseconds at which to call Proc. + * + * @returns Status + */ +NV_STATUS tmrEventScheduleAbs_IMPL +( + OBJTMR *pTmr, + PTMR_EVENT pEventPublic, + NvU64 Time +) +{ + NV_STATUS rmStatus = NV_OK; + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT)pEventPublic; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS)) + { + NV_CHECK_OK(rmStatus, LEVEL_ERROR, + tmrEventScheduleAbsOSTimer_HAL(pTmr, pEventPublic, Time)); + return rmStatus; + } + + if (pEventPublic->pTimeProc == NULL && pEvent->pTimeProc_OBSOLETE == NULL) + { + // + // Bug 372159: Not sure exactly how this is happening, but we are seeing + // it in OCA. If you see this during development/testing, please update + // the bug. + // + NV_ASSERT_FAILED( + "Attempting to schedule callback with NULL procedure. " + "Please update Bug 372159 with appropriate information."); + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + else + { + // + // Insert this proc into the callback list. + // + // if (Time <= CurrentTime + SCHEDULING_DELAY_MIN): + // + // We used to return NV_ERR_CALLBACK_NOT_SCHEDULED here. + // The next fix called the callback immediately in order to simulate + // it being "scheduled", however this introduced nasty stack-overflow + // due self rescheduling tasks. + // + // CL 16512758 fixed the stack-overflow issue, and added a case for + // handling callbacks scheduled to occur within 250 ns. Later we found + // out that a 1 us callback could cause the alarm to be set to the past + // and cause a 4+ second delay due to wrap-around. To fix this, we + // removed the 250 ns threshold, so that we will always re-read the + // current time after setting the alarm to prevent the wrap-around. + // + rmStatus = _tmrInsertCallback(pTmr, pEvent, Time); + } + + return rmStatus; +} + +/*! + * Warning! This code is dangerous, it can cause the whole system to crash. It will be + * removed as soon as possible! Use the new API! + */ +NV_STATUS tmrScheduleCallbackAbs_IMPL +( + OBJTMR *pTmr, + TIMEPROC_OBSOLETE Proc, + void *Object, + NvU64 Time, + NvU32 Flags, + NvU32 ChId +) +{ + PTMR_EVENT_PVT tmrInsert; + // Get a free callback from the free list. + if(pTmr->pRmCallbackFreeList_OBSOLETE == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + if (!tmrCallbackOnList(pTmr, Proc, Object)) + { + tmrInsert = _tmrGetNextFreeCallback(pTmr); + if (tmrInsert){ + tmrInsert->pTimeProc_OBSOLETE = Proc; + tmrInsert->super.pUserData = Object; + tmrInsert->super.flags = Flags; + tmrInsert->super.chId = ChId; + + return tmrEventScheduleAbs(pTmr, (PTMR_EVENT)tmrInsert, Time); + } + else + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + else + { + NV_PRINTF(LEVEL_ERROR, "Proc %p Object %p already on tmrList\n", Proc, + Object); + } + return NV_OK; +} + +/*! + * Searches specified lists for PTMR_EVENT associated with Object and + * removes it. + * + * @param[in] Object Unique identifier based on TMR_POBJECT_BASE (tmr.h) + * + * @returns None + */ +static void _tmrScanCallback +( + OBJTMR *pTmr, + void *pObject +) +{ + PTMR_EVENT_PVT tmrScan; + PTMR_EVENT_PVT tmrNext; + PTMR_EVENT_PVT tmrCurrent; + + // + // Start at the beginning of the callback list. + // + // 'current' is either the same as 'scan' or + // it's the item immediately before 'scan' in + // the algorithm below. + // + tmrScan = tmrCurrent = pTmr->pRmActiveEventList; + + // + // Loop through the callback list while there are entries. + // + while (tmrScan) + { + // Point to the next callback so that we + // can continue our scan through the list. + tmrNext = tmrScan->pNext; + + // + // Scan list looking for matches to 'Object'. + // + if (tmrScan->super.pUserData == pObject) + { + // + // If the 'current' is not the item to be deleted + // (It must be the previous item) then link it + // to the 'next' item + // + if (tmrCurrent != tmrScan) + { + tmrCurrent->pNext = tmrScan->pNext; + } + else + { + // + // If 'current' is the same as the item to be deleted + // then move it to the next item. + // + tmrCurrent = tmrNext; + + // + // Update the head pointer if removing the head entry. + // This fixes bug 93812. + // + if (pTmr->pRmActiveEventList == tmrScan) + { + pTmr->pRmActiveEventList = tmrScan->pNext; + } + } + + if (tmrScan->bLegacy) + { + // + // Tack the object to be deleted onto the head of the + // callback free list (OBSOLETE) + // + tmrScan->pNext = pTmr->pRmCallbackFreeList_OBSOLETE; + pTmr->pRmCallbackFreeList_OBSOLETE = tmrScan; + } + + tmrScan->bInUse = NV_FALSE; + } + else + { + // + // If we haven't deleted this item, then the 'current' + // item becomes this item. So 'scan' will advance ONE beyond + // the item that was NOT deleted, and 'current' becomes + // the item NOT deleted. + // + tmrCurrent = tmrScan; + } + + // Now point to the 'next' object in the callback list. + tmrScan = tmrNext; + } +} + +// determine which (if any) callback should determine the next alarm time +static NV_STATUS +_tmrGetNextAlarmTime +( + OBJTMR *pTmr, + NvU64 *pNextAlarmTime +) +{ + if (pTmr->pRmActiveEventList == NULL) + { + *pNextAlarmTime = 0; + return NV_ERR_CALLBACK_NOT_SCHEDULED; + } + + *pNextAlarmTime = pTmr->pRmActiveEventList->timens; + + return NV_OK; +} + +/*! + * Return the very next callback to be scheduled, removing it from the list + * and marking it as free (only "In Use" when in the list) + */ +static PTMR_EVENT_PVT _tmrPullCallbackFromHead +( + OBJTMR *pTmr +) +{ + PTMR_EVENT_PVT tmrDelete = pTmr->pRmActiveEventList; + if (tmrDelete) + { + // remove from callbackList + pTmr->pRmActiveEventList = tmrDelete->pNext; + tmrDelete->bInUse = NV_FALSE; + + if(tmrDelete->bLegacy) + { + // Might be a race condition, but will be removed so it's OK + tmrDelete->pNext = pTmr->pRmCallbackFreeList_OBSOLETE; + pTmr->pRmCallbackFreeList_OBSOLETE = tmrDelete; + } + } + + return tmrDelete; +} + +/*! + * Time until next callback expires. + * + * Returns NV_ERR_CALLBACK_NOT_SCHEDULED if not callbacks are scheduled. + */ +NV_STATUS +tmrTimeUntilNextCallback_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU64 *pTimeUntilCallbackNs +) +{ + NvU64 currentTime; + NvU64 nextAlarmTime; + NV_STATUS status; + + *pTimeUntilCallbackNs = 0; + + // Get the time from the first (earliest) entry. + status = _tmrGetNextAlarmTime(pTmr, &nextAlarmTime); + if (status != NV_OK) + return status; + + status = tmrGetCurrentTime(pTmr, ¤tTime); + if (status != NV_OK) + return status; + + if (currentTime < nextAlarmTime) + *pTimeUntilCallbackNs = nextAlarmTime - currentTime; + + return NV_OK; +} + +/*! + * Used by tmrService, iteratively checks which callbacks need to be executed. + */ +NvBool +tmrCallExpiredCallbacks_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + NvU64 currentTime = 0; + NvU64 nextAlarmTime; + PTMR_EVENT_PVT pEvent; + NV_STATUS rmStatus; + NvBool bProccessedCallback = NV_FALSE; + + // Call all callbacks that have expired + if (pTmr && (tmrEventsExist(pTmr))) + { + // Check for expired time. + for (;;) + { + // Get the time from the first (earliest) entry. + rmStatus = _tmrGetNextAlarmTime(pTmr, &nextAlarmTime); + if (rmStatus != NV_OK) + break; + + if (nextAlarmTime > currentTime) + { + rmStatus = tmrGetCurrentTime(pTmr, ¤tTime); + if ((rmStatus != NV_OK) || (nextAlarmTime > currentTime)) + break; + } + + // Pull from head of list. + pEvent = _tmrPullCallbackFromHead(pTmr); + + if (pEvent && + ((pEvent->super.pTimeProc != NULL) || + (pEvent->pTimeProc_OBSOLETE != NULL)) ) + { + // Call callback. This could insert a new callback into the list. + if (pEvent->bLegacy) + { + pEvent->pTimeProc_OBSOLETE(pGpu, pTmr, pEvent->super.pUserData); + } + else + { + pEvent->super.pTimeProc(pGpu, pTmr, (PTMR_EVENT)pEvent); + } + + bProccessedCallback = NV_TRUE; + } + else + { + // + // Bug 372159: Hopefully by checking that the callback procedure + // is not NULL in tmrEventScheduleAbs() we should never hit + // this point, but this is just to be certain. If you hit this + // assert please update Bug 3721259 with pertinent details + // (Swak, !stacks, what you were developing/testing, etc.). + // + NV_ASSERT_FAILED( + "Attempting to execute callback with NULL procedure. " + "Please update Bug 372159 with appropriate information."); + } + } + + // + // rmStatus is NV_OK only when there are more events in the list AND + // the GPU has not fallen off the bus AND the GPU is not in full chip + // reset. + // + // We get this this routine with bInterrupt set to true when we got + // (and cleared) the timer interrupt. So, we need to set it again. + // + if (rmStatus == NV_OK) + { + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } + } + + return bProccessedCallback; +} + +/*! + * TODO: document + */ +static void +_tmrStateLoadCallbacks +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + NvU64 nextAlarmTime = 0; + + if (tmrEventsExist(pTmr)) + { + if (tmrGetCallbackInterruptPending(pGpu, pTmr)) + { + if (NV_OK == _tmrGetNextAlarmTime(pTmr, &nextAlarmTime)) + { + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } + } + + // + // else - we have alarm pending - just proceed to enable interrupts + // so that it's immediately handled + // + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + tmrSetCountdownIntrEnable_HAL(pGpu, pTmr); + } + else + { + tmrSetAlarmIntrEnable_HAL(pGpu, pTmr); + } + } +} + +/*! + * Wraps HAL functions to enable hardware timer interrupts for the rm callbacks. + */ +void +tmrRmCallbackIntrEnable_IMPL +( + OBJTMR *pTmr, + OBJGPU *pGpu +) +{ + tmrResetCallbackInterrupt(pGpu, pTmr); + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + tmrSetCountdownIntrEnable_HAL(pGpu, pTmr); + } + else + { + tmrSetAlarmIntrEnable_HAL(pGpu, pTmr); + } +} + +/*! + * Wraps HAL functions to disable hardware timer interrupts for the rm callbacks. + */ +void +tmrRmCallbackIntrDisable_IMPL +( + OBJTMR *pTmr, + OBJGPU *pGpu +) +{ + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + tmrSetCountdownIntrDisable_HAL(pGpu, pTmr); + } + else + { + tmrSetAlarmIntrDisable_HAL(pGpu, pTmr); + } +} + +void +tmrSetCountdownCallback_IMPL +( + OBJTMR *pTmr, + TIMEPROC_COUNTDOWN pSwrlCallback +) +{ + pTmr->pSwrlCallback = pSwrlCallback; +} + +/*! + * TODO: document + */ +void +tmrGetSystemTime_IMPL +( + OBJTMR *pTmr, + PDAYMSECTIME pTime +) +{ + NvU32 sec; + NvU32 usec; + + // + // This function finds out the current time in terms of number of days and + // milliseconds since 1900. Note that the estimates are really crude since + // 1 year is treated as 365 days, 1 month as 30 days and so on. Keep these + // points in mind before using the function. + // + if (pTime != NULL) + { + // Get the system time and calculate the contents of the returned structure. + osGetCurrentTime(&sec, &usec); + pTime->days = sec / (3600 * 24); // # of days since ref point + sec = sec % (3600 * 24); // seconds since day began + pTime->msecs = sec * 1000 + (usec / 1000); // milliseconds since day began + } +} + +/*! + * This has become obsolete, it should be replaced with userData logic + */ +NvBool +tmrCheckCallbacksReleaseSem_IMPL +( + OBJTMR *pTmr, + NvU32 chId +) +{ + PTMR_EVENT_PVT pScan; + + for (pScan = pTmr->pRmActiveEventList; pScan != NULL; pScan = pScan->pNext) + { + if ((pScan->super.flags & TMR_FLAG_RELEASE_SEMAPHORE) && + (pScan->super.chId == chId)) + { + break; + } + } + + return pScan != NULL; +} + +/*! + * TODO: document + */ +void +tmrInitCallbacks_IMPL +( + OBJTMR *pTmr +) +{ + NvU32 i; + + // Initialize the timer callback lists. + pTmr->pRmActiveEventList = NULL; + + // Everything below this comment will be removed with new API + pTmr->pRmCallbackFreeList_OBSOLETE = pTmr->rmCallbackTable_OBSOLETE; + + // Fill in all the forward pointers in the callback table. + for (i = 0; i < (TMR_NUM_CALLBACKS_RM - 1); i++) + { + pTmr->rmCallbackTable_OBSOLETE[i].pNext = &pTmr->rmCallbackTable_OBSOLETE[i+1]; + pTmr->rmCallbackTable_OBSOLETE[i].bInUse = NV_FALSE; + pTmr->rmCallbackTable_OBSOLETE[i].bLegacy = NV_TRUE; + } + pTmr->rmCallbackTable_OBSOLETE[i].pNext = NULL; + pTmr->rmCallbackTable_OBSOLETE[i].bInUse = NV_FALSE; + pTmr->rmCallbackTable_OBSOLETE[i].bLegacy = NV_TRUE; +} + +/*! + * Searches for all events associated with an Object and removes them. + * + * @param[in,out] pTmr TMR object pointer + * @param[in] pObject Unique identifier based on TMR_POBJECT_BASE (tmr.h) + * + * @returns NV_OK always succeeds + */ +NV_STATUS +tmrCancelCallback_IMPL +( + OBJTMR *pTmr, + void *pObject +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + NvU64 nextAlarmTime; + + if (tmrEventsExist(pTmr) && pObject != NULL) + { + // Pull all objects with the same address from the callback list. + _tmrScanCallback(pTmr, pObject); + + // + // If there's anything left then set an alarm for the soonest one. + // Otherwise, disable the PTIMER interrupt altogether. + // + if (NV_OK == _tmrGetNextAlarmTime(pTmr, &nextAlarmTime)) + { + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } + else + { + // List is empty! Disable PTIMER interrupt. + tmrRmCallbackIntrDisable(pTmr, pGpu); + } + } + + return NV_OK; +} + +/*! + * TODO: document + * + * This function finds out if the (futureTime - pastTime) > maxCacheTimeInMSec + */ +NvBool +tmrDiffExceedsTime_IMPL +( + OBJTMR *pTmr, + PDAYMSECTIME pFutureTime, + PDAYMSECTIME pPastTime, + NvU32 time +) +{ + NvU32 msecsInADay = 1000 * 3600 * 24; + NvBool bRetVal = NV_FALSE; + + if ((pFutureTime->days < pPastTime->days) || + (((pFutureTime->days == pPastTime->days) && + (pFutureTime->msecs < pPastTime->msecs)))) + { + bRetVal = NV_TRUE; + } + else + { + // Because of overflow possibility, first check for diff in days + if ((((pFutureTime->days - pPastTime->days) + + (pFutureTime->msecs - pPastTime->msecs)/msecsInADay)) > (time/msecsInADay)) + { + bRetVal = NV_TRUE; + } + else + { + // Now diff in millisecs + if ((((pFutureTime->days - pPastTime->days) * msecsInADay) + + (pFutureTime->msecs - pPastTime->msecs)) > time) + { + bRetVal = NV_TRUE; + } + } + } + + return bRetVal; +} + +/*! + * TODO: document + */ +NV_STATUS +tmrStateInitLocked_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + + return NV_OK; +} + +/*! + * TODO: document + */ +NV_STATUS +tmrStateLoad_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU32 flags +) +{ + // Have to restore any pending callbacks' state + _tmrStateLoadCallbacks(pGpu, pTmr); + + return NV_OK; +} + +/*! + * TODO: document + */ +NV_STATUS +tmrStateUnload_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU32 flags +) +{ + // Disable Timer interrupt. + tmrSetAlarmIntrDisable_HAL(pGpu, pTmr); + tmrSetCountdownIntrDisable_HAL(pGpu, pTmr); + + return NV_OK; +} + +/*! + * TODO: document + */ +void +tmrStateDestroy_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + objDelete(pTmr->pGrTickFreqRefcnt); + pTmr->pGrTickFreqRefcnt = NULL; +} + +NV_STATUS +tmrapiConstruct_IMPL +( + TimerApi *pTimerApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +void +tmrapiDestruct_IMPL +( + TimerApi *pTimerApi +) +{ +} + +NV_STATUS +tmrapiGetRegBaseOffsetAndSize_IMPL +( + TimerApi *pTimerApi, + OBJGPU *pGpu, + NvU32 *pOffset, + NvU32 *pSize +) +{ + NV_STATUS status; + NvU32 offset; + + status = gpuGetRegBaseOffset_HAL(GPU_RES_GET_GPU(pTimerApi), NV_REG_BASE_TIMER, &offset); + if (status != NV_OK) + return status; + + if (pOffset) + *pOffset = offset; + + if (pSize) + *pSize = sizeof(Nv01TimerMap); + + return NV_OK; +} + +void +tmrapiDeregisterEvents_IMPL(TimerApi *pTimerApi) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pTimerApi); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + PEVENTNOTIFICATION pNotifyEvent = inotifyGetNotificationList(staticCast(pTimerApi, INotifier)); + + // Validate the timer event + while (pNotifyEvent != NULL) + { + tmrCancelCallback(pTmr, pNotifyEvent); + + pNotifyEvent = pNotifyEvent->Next; + } +} + +//--------------------------------------------------------------------------- +// +// NV0004 Control Functions +// +//--------------------------------------------------------------------------- + +// +// There is some type hacking involved here. The inner callback is called correctly here +// though it is cast to the outer callback type for storage. The timer only sees the +// outer callback type directly so it will call it correctly, and this wrapper hides the +// inner callback and calls it correctly from itself. Hacky but it should work around the +// limitations in the SDK (all RM derived types undefined, so TIMEPROC type is impossible). +// +typedef NvU32 (*TMR_CALLBACK_FUNCTION)(void *pCallbackData); + +typedef struct +{ + TMR_CALLBACK_FUNCTION pTimeProc; + void *pCallbackData; +} wrapperStorage_t; + +static NV_STATUS _tmrCallbackWrapperfunction +( + OBJGPU *pGpu, + OBJTMR *pTmr, + PTMR_EVENT pEvent +) +{ + wrapperStorage_t *pObj_Inner = (wrapperStorage_t *)pEvent->pUserData; + + // Backup the wrapper function and data + TIMEPROC pCallback_Outer = pEvent->pTimeProc; + void *pCallbackData_Outer = pEvent->pUserData; + + // Swap in the inner function and data + pEvent->pTimeProc = (TIMEPROC) pObj_Inner->pTimeProc; // Intentionally the wrong type! + pEvent->pUserData = pObj_Inner->pCallbackData; + + // Perform the actual callback the way the user expects it + pObj_Inner->pTimeProc((void *)pEvent->pUserData); + + // Rewrap whatever changes the user may have made + pObj_Inner->pTimeProc = (TMR_CALLBACK_FUNCTION) pEvent->pTimeProc; + pObj_Inner->pCallbackData = pEvent->pUserData; + + // Restore the wrapper function and data + pEvent->pTimeProc = pCallback_Outer; + pEvent->pUserData = pCallbackData_Outer; + + return NV_OK; +} + +/*! + * Creates an event and initializes the wrapper callback data, putting the + * desired callback inside of it's struct to be swapped in later. + * + * @returns NV_STATUS + */ +NV_STATUS +tmrCtrlCmdEventCreate +( + OBJGPU *pGpu, + TMR_EVENT_SET_PARAMS *pParams +) +{ + NV_STATUS rc; + PTMR_EVENT pEvent; + wrapperStorage_t *pWrapper; + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + + // ALlocate the wrapper's callerdata to store real caller data! + pWrapper = portMemAllocNonPaged(sizeof(wrapperStorage_t)); + if (pWrapper == NULL) + { + return NV_ERR_NO_MEMORY; + } + pWrapper->pTimeProc = (TMR_CALLBACK_FUNCTION)NvP64_VALUE(pParams->pTimeProc); + pWrapper->pCallbackData = NvP64_VALUE(pParams->pCallbackData); + + rc = tmrEventCreate(pTmr, + &pEvent, + _tmrCallbackWrapperfunction, + pWrapper, + pParams->flags); + + *(pParams->ppEvent) = NV_PTR_TO_NvP64(pEvent); + + return rc; +} + + +/*! + * Schedules an existing event. Takes in time arguments and a flag to + * determine if it should be interpreted as absolute or relative time. + * + * @returns NV_STATUS + */ +NV_STATUS +tmrCtrlCmdEventSchedule +( + OBJGPU *pGpu, + TMR_EVENT_SCHEDULE_PARAMS *pParams +) +{ + NV_STATUS rc; + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + PTMR_EVENT pEvent = (PTMR_EVENT)NvP64_VALUE(pParams->pEvent); + + if(pParams->bUseTimeAbs) + { + rc = tmrEventScheduleAbs(pTmr, pEvent, pParams->timeNs); + } + else + { + rc = tmrEventScheduleRel(pTmr, pEvent, pParams->timeNs); + } + + return rc; +} + +/*! + * Cancels an existing event. NOP on unscheduled event. + * + * @returns NV_OK + */ +NV_STATUS +tmrCtrlCmdEventCancel +( + OBJGPU *pGpu, + TMR_EVENT_GENERAL_PARAMS *pParams +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + PTMR_EVENT pEvent = (PTMR_EVENT)NvP64_VALUE(pParams->pEvent); + tmrEventCancel(pTmr, pEvent); + + return NV_OK; +} + +/*! + * Cancel and destroys an existing event. It also cleans up the special + * wrapper memory used by this API framework. + * + * @returns NV_OK + */ +NV_STATUS +tmrCtrlCmdEventDestroy +( + OBJGPU *pGpu, + TMR_EVENT_GENERAL_PARAMS *pParams +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + PTMR_EVENT pEvent = (PTMR_EVENT)NvP64_VALUE(pParams->pEvent); + + // Free our temporary wrapper storage + portMemFree(pEvent->pUserData); + + tmrEventDestroy(pTmr, pEvent); + + return NV_OK; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c new file mode 100644 index 0000000..f0fa0f4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c @@ -0,0 +1,325 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/********************* Non-Chip Specific HAL TMR Routines ******************\ +* * +* This file contains TMR method implementations using OSTIMER * +* * +\***************************************************************************/ + +#include "objtmr.h" + +// +// This function returns current time from OS timer +// +NvU64 +tmrGetTimeEx_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 seconds; // Time since 1970 in seconds + NvU32 useconds; // and uSeconds. + NvU64 timeNs; // Time since 1970 in ns. + + // + // Get current time from operating system. + // + // We get the time in seconds and microseconds since 1970 + // Note that we don't really need the real time of day + // + osGetCurrentTime(&seconds, &useconds); + + // + // Calculate ns since 1970. + // + timeNs = ((NvU64)seconds * 1000000 + useconds) * 1000; + + return timeNs; +} + +/*! + * Creates OS timer event + * + * @param[in] pTmr Pointer to Timer Object + * @param[in] pEvent pointer to timer event information + * @param[out] NV_STATUS + */ +NV_STATUS tmrEventCreateOSTimer_OSTIMER +( + OBJTMR *pTmr, + PTMR_EVENT pEventPublic +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT)pEventPublic; + + status = osCreateNanoTimer(pGpu->pOsGpuInfo, pEvent, &(pEvent->super.pOSTmrCBdata)); + + if (status != NV_OK) + { + pEvent->super.pOSTmrCBdata = NULL; + NV_PRINTF(LEVEL_ERROR, "OS create timer failed\n"); + } + + return status; +} + +/*! + * This function Starts or Schedules OS Timer + * + * @param[in] pTmr Pointer to Timer Object + * @param[in] pEvent pointer to timer event information + * @param[in] absolute time in nano seconds + * + * @returns NV_ERR_INVALID_REQUEST failed to create timer +*/ +NV_STATUS tmrEventScheduleAbsOSTimer_OSTIMER +( + OBJTMR *pTmr, + PTMR_EVENT pPublicEvent, + NvU64 timeNs +) +{ + NV_STATUS status= NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT) pPublicEvent; + + if (pEvent->super.pOSTmrCBdata == NULL) + { + NV_PRINTF(LEVEL_ERROR, "OS Timer not created\n"); + return NV_ERR_INVALID_REQUEST; + } + + status = osStartNanoTimer(pGpu->pOsGpuInfo, pEvent->super.pOSTmrCBdata, timeNs); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "OS Start timer FAILED!\n"); + } + + pEvent->super.flags |= TMR_FLAG_OS_TIMER_QUEUED; + return status; +} + +/*! + * This function runs OS timer callback + * +* @param[in] pGpu Pointer to GPU object +* @param[in] pTmr Pointer to Timer Object +* @param[in] pEvent pointer to timer event information +* + * @returns NV_ERR_INVALID_REQUEST if callback not found + */ +NV_STATUS tmrEventServiceOSTimerCallback_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + PTMR_EVENT pPublicEvent +) +{ + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT)pPublicEvent; + NV_STATUS status = NV_OK; + + if (pEvent && (pEvent->super.pTimeProc != NULL)) + { + pEvent->super.pTimeProc(pGpu, pTmr, (PTMR_EVENT)pEvent); + pEvent->super.flags &= ~TMR_FLAG_OS_TIMER_QUEUED; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "ERROR No Timer event callback found, invalid timer SW state\n"); + status = NV_ERR_INVALID_REQUEST; + } + + return status; +} + +/*! + * This function cancels OS timer callback + * + * @param[in] pTmr Pointer to Timer Object + * @param[in] pEvent pointer to timer event information + * @returns NV_ERR_INVALID_REQUEST if callback entry not found + */ +NV_STATUS tmrEventCancelOSTimer_OSTIMER +( + OBJTMR *pTmr, + PTMR_EVENT pPublicEvent +) +{ + NV_STATUS status= NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + PTMR_EVENT_PVT pTmrEvent = (PTMR_EVENT_PVT) pPublicEvent; + + if (pTmrEvent != NULL && pTmrEvent->super.pOSTmrCBdata != NULL) + { + // Cancel the callback of OS timer + status = osCancelNanoTimer(pGpu->pOsGpuInfo, pTmrEvent->super.pOSTmrCBdata); + pTmrEvent->super.flags &= ~TMR_FLAG_OS_TIMER_QUEUED; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "ERROR No Timer event callback found, invalid timer SW state\n"); + status = NV_ERR_INVALID_REQUEST; + } + + return status; +} + +/*! + * This function cancels OS timer callback + * + * @param[in] pTmr Pointer to Timer Object + * @param[in] pEvent pointer to timer event information + * + * @returns NV_ERR_INVALID_REQUEST if callback entry not found + */ +NV_STATUS tmrEventDestroyOSTimer_OSTIMER +( + OBJTMR *pTmr, + PTMR_EVENT pPublicEvent +) +{ + NV_STATUS status= NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + PTMR_EVENT_PVT pTmrEvent = (PTMR_EVENT_PVT) pPublicEvent; + + if (pTmrEvent != NULL && pTmrEvent->super.pOSTmrCBdata != NULL) + { + // Cancel the callback of OS timer + status = osDestroyNanoTimer(pGpu->pOsGpuInfo, pTmrEvent->super.pOSTmrCBdata); + pTmrEvent->super.flags &= ~TMR_FLAG_OS_TIMER_QUEUED; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "No Timer event callback found, invalid timer SW state\n"); + status = NV_ERR_INVALID_REQUEST; + } + + return status; +} + +NV_STATUS +tmrGetIntrStatus_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU32 *pStatus, + THREAD_STATE_NODE *pThreadState +) +{ + *pStatus = 0; + return NV_OK; +} + +// +// For functions that only need a short delta of time elapsed (~ 4.29 seconds) +// NOTE: Since it wraps around every 4.29 seconds, for general GetTime purposes, +// it's better to use tmrGetTime(). +// +NvU32 +tmrGetTimeLo_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + return NvU64_LO32(tmrGetTimeEx_HAL(pGpu, pTmr, NULL)); +} + +NvU64 +tmrGetTime_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + return tmrGetTimeEx_HAL(pGpu, pTmr, NULL); +} + +NvU32 +tmrReadTimeLoReg_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + return NvU64_LO32(tmrGetTimeEx_HAL(pGpu, pTmr, pThreadState)); +} + +NvU32 +tmrReadTimeHiReg_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + return NvU64_HI32(tmrGetTimeEx_HAL(pGpu, pTmr, pThreadState)); +} + +NV_STATUS +tmrGetGpuAndCpuTimestampPair_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU64 *pGpuTime, + NvU64 *pCpuTime +) +{ +#if PORT_IS_FUNC_SUPPORTED(portUtilExReadTimestampCounter) + *pGpuTime = tmrGetTimeEx_HAL(pGpu, pTmr, NULL); + *pCpuTime = portUtilExReadTimestampCounter(); + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS +tmrDelay_OSTIMER +( + OBJTMR *pTmr, + NvU32 nsec +) +{ + if (nsec > 50000000) // 50 ms. + { + osDelay(nsec / 1000000); + } + else if (nsec > 0) + { + osDelayNs(nsec); + } + + return NV_OK; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_db.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_db.c new file mode 100644 index 0000000..238b9c7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_db.c @@ -0,0 +1,370 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/**************************************************************************** + * + * Description: + * This file contains the functions managing the gpu database + * + ***************************************************************************/ + +#include "gpu_mgr/gpu_db.h" +#include "core/system.h" + +#include "gpu/gpu.h" // for NBADDR + +NV_STATUS +gpudbConstruct_IMPL +( + GpuDb *pGpuDb +) +{ + listInit(&pGpuDb->gpuList, portMemAllocatorGetGlobalNonPaged()); + + pGpuDb->pLock = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + if (pGpuDb->pLock == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Gpu data base list lock init failed\n"); + listDestroy(&pGpuDb->gpuList); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + return NV_OK; +} + +void +gpudbDestruct_IMPL +( + GpuDb *pGpuDb +) +{ + if (pGpuDb->pLock != NULL) + { + portSyncMutexDestroy(pGpuDb->pLock); + } + + listDestroy(&pGpuDb->gpuList); +} + +static PGPU_INFO_LIST_NODE +_gpudbFindGpuInfoByUuid +( + const NvU8 *pUuid +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode = NULL; + + for (pNode = listHead(&pGpuDb->gpuList); + pNode != NULL; + pNode = listNext(&pGpuDb->gpuList, pNode)) + { + if (portMemCmp(pNode->uuid, pUuid, RM_SHA1_GID_SIZE) == 0) + { + break; + } + } + + return pNode; +} + +NV_STATUS +gpudbRegisterGpu(const NvU8 *pUuid, const NBADDR *pUpstreamPortPciInfo, NvU64 pciInfo) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_OK; + NvU32 i = 0; + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode != NULL) + { + pNode->bShutdownState = NV_FALSE; + goto done; + } + + pNode = listAppendNew(&pGpuDb->gpuList); + if (pNode == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Append the list failed\n"); + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + portMemCopy(pNode->uuid, RM_SHA1_GID_SIZE, pUuid, RM_SHA1_GID_SIZE); + + pNode->pciPortInfo.domain = gpuDecodeDomain(pciInfo); + pNode->pciPortInfo.bus = gpuDecodeBus(pciInfo); + pNode->pciPortInfo.device = gpuDecodeDevice(pciInfo); + pNode->pciPortInfo.function = 0; + pNode->pciPortInfo.bValid = NV_TRUE; + + pNode->upstreamPciPortInfo.domain = pUpstreamPortPciInfo->domain; + pNode->upstreamPciPortInfo.bus = pUpstreamPortPciInfo->bus; + pNode->upstreamPciPortInfo.device = pUpstreamPortPciInfo->device; + pNode->upstreamPciPortInfo.function = pUpstreamPortPciInfo->func; + pNode->upstreamPciPortInfo.bValid = pUpstreamPortPciInfo->valid; + + pNode->bShutdownState = NV_FALSE; + + // Initialize all compute polcies with default values + pNode->policyInfo.timeslice = NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_DEFAULT; + + // Initialize all choesnIdx to _INVALID + for (i = 0; i < GPUDB_CLK_PROP_TOP_POLS_COUNT; ++i) + { + ct_assert(sizeof(pNode->clkPropTopPolsControl.chosenIdx[0]) == sizeof(NvU8)); + pNode->clkPropTopPolsControl.chosenIdx[i] = NV_U8_MAX; + } + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Update/Set the compute policy config for a GPU +* +* @param[in] uuid GPU uuid +* @param[in] policyType Policy for which config has to be set +* @param[in] policyInfo Requested policy config +* +* @return NV_OK Config updated successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbSetGpuComputePolicyConfig +( + const NvU8 *pUuid, + NvU32 policyType, + GPU_COMPUTE_POLICY_INFO *policyInfo +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (pUuid == NULL || policyInfo == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + // Store the policy specific data + switch(policyType) + { + case NV2080_CTRL_GPU_COMPUTE_POLICY_TIMESLICE: + pNode->policyInfo.timeslice = policyInfo->timeslice; + status = NV_OK; + break; + default: + status = NV_ERR_INVALID_ARGUMENT; + break; + + } + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Get all compute policy configs for a GPU +* +* @param[in] uuid GPU uuid +* @param[in] policyInfo Pointer in which to retrieve all compute policies +* for the requested GPU +* +* @return NV_OK Configs retrieved successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbGetGpuComputePolicyConfigs +( + const NvU8 *pUuid, + GPU_COMPUTE_POLICY_INFO *policyInfo +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (pUuid == NULL || policyInfo == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + // Return the policy specific data + portMemCopy(policyInfo, sizeof(GPU_COMPUTE_POLICY_INFO), + &pNode->policyInfo, sizeof(GPU_COMPUTE_POLICY_INFO)); + status = NV_OK; + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Set clock policies control for a GPU +* +* @param[in] pUuid Pointer to GPU uuid +* @param[in] pControl Pointer to the control tuple +* +* @return NV_OK Configs retrieved successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbSetClockPoliciesControl +( + const NvU8 *pUuid, + GPU_CLK_PROP_TOP_POLS_CONTROL *pControl +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (pUuid == NULL || pControl == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + portMemCopy(&pNode->clkPropTopPolsControl, + sizeof(GPU_CLK_PROP_TOP_POLS_CONTROL), + pControl, + sizeof(GPU_CLK_PROP_TOP_POLS_CONTROL)); + + status = NV_OK; +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Get clock policies control for a GPU +* +* @param[in] pUuid Pointer to GPU uuid +* @param[out] pControl Pointer to the control tuple +* +* @return NV_OK Configs retrieved successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbGetClockPoliciesControl +( + const NvU8 *pUuid, + GPU_CLK_PROP_TOP_POLS_CONTROL *pControl +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (pUuid == NULL || pControl == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + portMemCopy(pControl, + sizeof(GPU_CLK_PROP_TOP_POLS_CONTROL), + &pNode->clkPropTopPolsControl, + sizeof(GPU_CLK_PROP_TOP_POLS_CONTROL)); + + status = NV_OK; +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +NV_STATUS +gpudbSetShutdownState +( + const NvU8 *pUuid +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_OK; + + portSyncMutexAcquire(pGpuDb->pLock); + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + pNode->bShutdownState = NV_TRUE; + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_group.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_group.c new file mode 100644 index 0000000..7f1ae1e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_group.c @@ -0,0 +1,329 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* * +* GpuGrp Object Function Definitions. * +* * +\***************************************************************************/ + +#include "gpu_mgr/gpu_group.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" +#include "core/system.h" +#include "class/cl2080.h" +#include "mem_mgr/virt_mem_mgr.h" +#include "mem_mgr/vaspace.h" +#include "class/cl90f1.h" // FERMI_VASPACE_A +#include "nvlimits.h" + +/*! + * Creates the gpugrp object. + * + * @param[in] pGpuGrp gpugrp object pointer + * @param[in] gpuMask Mask of GPUs corresponding to this gpugrp + * + * @returns NV_OK on success, appropriate error on failure. + */ +NV_STATUS +gpugrpCreate_IMPL +( + OBJGPUGRP *pGpuGrp, + NvU32 gpuMask +) +{ + pGpuGrp->gpuMask = gpuMask; + // + // Add the gpugrp instance to the GPU objects in the mask + // At boot this call fails and is deferred to GPU post construct. + // When coming out of SLI this call is succeeding - and postconstruct + // is not called. + // + gpumgrAddDeviceInstanceToGpus(gpuMask); + return NV_OK; +} + +/*! + * Destroys gpugrp object. + * + * It first iterates over the GPUs that belong to this gpugrp + * object indicated by the gpuMask. + * Following this it destroys the object. + * + * @param[in] pGpuGrp gpugrp object pointer + */ +NV_STATUS +gpugrpDestroy_IMPL +( + OBJGPUGRP *pGpuGrp +) +{ + NV_STATUS rmStatus = NV_ERR_OBJECT_NOT_FOUND; + OBJGPU *pGpu = NULL; + NvU32 gpuIndex = 0; + + // Add the gpugrp instance to the GPU objects in the mask + while ((pGpu = gpumgrGetNextGpu(pGpuGrp->gpuMask, &gpuIndex))) + { + rmStatus = NV_OK; + pGpu->deviceInstance = NV_MAX_DEVICES; + } + + // Call the utility routine that does the object deletion. + objDelete(pGpuGrp); + return rmStatus; +} + +/*! + * Gets the gpu mask for the gpugrp. + * + * @param[in] pGpuGrp gpugrp object pointer + * + * @returns NvU32 gpumask + */ +NvU32 +gpugrpGetGpuMask_IMPL(OBJGPUGRP *pGpuGrp) +{ + return pGpuGrp->gpuMask; +} + +/*! + * Sets the gpu mask for the gpugrp. + * + * @param[in] pGpuGrp gpugrp object pointer + * @param[in] gpuMask gpumask to set + * + */ +void +gpugrpSetGpuMask_IMPL(OBJGPUGRP *pGpuGrp, NvU32 gpuMask) +{ + pGpuGrp->gpuMask = gpuMask; +} +/*! + * Gets the broadcast enabled state + * + * @param[in] pGpuGrp gpugrp object pointer + * + * @returns NvBool + */ +NvBool +gpugrpGetBcEnabledState_IMPL(OBJGPUGRP *pGpuGrp) +{ + return pGpuGrp->bcEnabled; +} + +/*! + * Sets the broadcast enable state + * + * @param[in] pGpuGrp gpugrp object pointer + * @param[in] bcState Broadcast enable state + * + */ +void +gpugrpSetBcEnabledState_IMPL(OBJGPUGRP *pGpuGrp, NvBool bcState) +{ + pGpuGrp->bcEnabled = bcState; +} + +/*! + * Sets the parent GPU for the gpugrp + * + * @param[in] pGpuGrp gpugrp object pointer + * @param[in] pGpu Parent GPU object pointer + * + */ +void +gpugrpSetParentGpu_IMPL +( + OBJGPUGRP *pGpuGrp, + OBJGPU *pParentGpu +) +{ + pGpuGrp->parentGpu = pParentGpu; +} + +/*! + * Gets the parent GPU for the gpugrp + * + * @param[in] pGpuGrp gpugrp object pointer + * + * @returns GPU pointer + */ +POBJGPU +gpugrpGetParentGpu_IMPL(OBJGPUGRP *pGpuGrp) +{ + return pGpuGrp->parentGpu; +} + + +/*! + * @brief gpugrpCreateVASpace - creates the GLobal VASpace for this gpugrp. + * + * This is created once per group. So for GPUs in SLI, there is only + * one of this created. + * + * @param[in] pGpuGrp GPUGRP object pointer + * @param[in] pGpu Parent GPU object pointer + * @param[in] vaspaceClass VASPACE class to create + * @param[in] vaStart vaspace start + * @param[in] vaLimit vaspace limit + * @param[in] vaspaceFlags VASPACE flags for creation + * @param[out] ppGlobalVASpace Global vaspace that is created + * + * @return NV_OK on success or appropriate RM_ERR on failure + * + */ +NV_STATUS +gpugrpCreateGlobalVASpace_IMPL +( + OBJGPUGRP *pGpuGrp, + OBJGPU *pGpu, + NvU32 vaspaceClass, + NvU64 vaStart, + NvU64 vaLimit, + NvU32 vaspaceFlags, + OBJVASPACE **ppGlobalVASpace +) +{ + NV_STATUS rmStatus; + NvU32 gpuMask = pGpuGrp->gpuMask; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + NvBool bcState = gpumgrGetBcEnabledStatus(pGpu); + + NV_ASSERT_OR_RETURN(ppGlobalVASpace != NULL, NV_ERR_INVALID_ARGUMENT); + *ppGlobalVASpace = NULL; + + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + vaspaceFlags |= VASPACE_FLAGS_ENABLE_VMM; + rmStatus = vmmCreateVaspace(pVmm, vaspaceClass, 0x0, gpuMask, vaStart, + vaLimit, 0, 0, NULL, vaspaceFlags, ppGlobalVASpace); + gpumgrSetBcEnabledStatus(pGpu, bcState); + if (NV_OK != rmStatus) + { + *ppGlobalVASpace = NULL; + return rmStatus; + } + pGpuGrp->pGlobalVASpace = (*ppGlobalVASpace); + + return rmStatus; +} + +/*! + * @brief gpugrpDestroyVASpace - Destroys the gpugrp global vaspace + * + * @param[in] pGpuGrp GPUGRP object pointer + * @param[in] pGpu Parent GPU object pointer + * + * @return NV_OK on success or appropriate RM_ERR on failure + * + */ +NV_STATUS +gpugrpDestroyGlobalVASpace_IMPL(OBJGPUGRP *pGpuGrp, OBJGPU *pGpu) +{ + NV_STATUS rmStatus = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + NvBool bcState = gpumgrGetBcEnabledStatus(pGpu); + + // Nothing to destroy, bail out early + if (pGpuGrp->pGlobalVASpace == NULL) + return rmStatus; + + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + vmmDestroyVaspace(pVmm, pGpuGrp->pGlobalVASpace); + gpumgrSetBcEnabledStatus(pGpu, bcState); + pGpuGrp->pGlobalVASpace = NULL; + return rmStatus; +} + + +/*! + * @brief gpugrpGetVASpace - retrieves the group global vaspace + * + * @param[in] pGpuGrp GPUGRP object pointer + * @param[out] ppGlobalVASpace Global vaspace for this GPUGRP + * + * @return NV_OK on success + * NV_ERR_INVALID_ARGUMENT on NULL pointer parameter + * NV_ERR_OBJECT_NOT_FOUND if there is no device vaspace + */ +NV_STATUS +gpugrpGetGlobalVASpace_IMPL(OBJGPUGRP *pGpuGrp, OBJVASPACE **ppVASpace) +{ + NV_ASSERT_OR_RETURN(ppVASpace != NULL, NV_ERR_INVALID_ARGUMENT); + + if (pGpuGrp->pGlobalVASpace == NULL) + { + *ppVASpace = NULL; + return NV_ERR_OBJECT_NOT_FOUND; + } + *ppVASpace = pGpuGrp->pGlobalVASpace; + return NV_OK; +} + + +/*! + * @brief gpugrpGetGpuFromSubDeviceInstance - retrieves the pGpu associated to + * a GPU group and a subdevice instance. + * + * @param[in] pGpuGrp GPUGRP object pointer + * @param[in] subDeviceInst GPU sundevice Instance + * @param[out] ppGpu POBJGPU* pointer + * + * @return NV_OK on success + * NV_ERR_INVALID_ARGUMENT on NULL pointer parameter + * NV_ERR_OBJECT_NOT_FOUND if there is no GPU for the input parameters + */ +NV_STATUS +gpugrpGetGpuFromSubDeviceInstance_IMPL(OBJGPUGRP *pGpuGrp, NvU32 subDeviceInst, OBJGPU **ppGpu) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuInst = 0; + NvU32 gpuMask; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NV_ERR_INVALID_ARGUMENT); + + *ppGpu = NULL; + + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + // check for single GPU case + if (gpumgrGetSubDeviceCount(gpuMask) == 1) + { + *ppGpu = gpumgrGetNextGpu(gpuMask, &gpuInst); + } + else + { + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInst)) != NULL) + { + if (gpumgrGetSubDeviceInstanceFromGpu(pGpu) == subDeviceInst) + { + *ppGpu = pGpu; + break; + } + } + } + return (*ppGpu == NULL ? NV_ERR_OBJECT_NOT_FOUND : NV_OK); +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c new file mode 100644 index 0000000..354581f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * File: gpu_mgmt_api.c + * + * Description: + * This file contains the functions managing the GPU information + * encapsulated by GPUDB object or probed state GPU. + * + *****************************************************************************/ + +#include "core/core.h" +#include "gpu_mgr/gpu_mgmt_api.h" +#include "gpu_mgr/gpu_db.h" + +NV_STATUS +gpumgmtapiConstruct_IMPL +( + GpuManagementApi *pGpuMgmt, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +void +gpumgmtapiDestruct_IMPL +( + GpuManagementApi *pGpuMgmt +) +{ +} + +NV_STATUS +gpumgmtapiCtrlCmdSetShutdownState_IMPL +( + GpuManagementApi *pGpuMgmt, + NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *pParams +) +{ + return gpudbSetShutdownState(pParams->uuid); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c new file mode 100644 index 0000000..ac57fce --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c @@ -0,0 +1,2554 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* GPU Manager * +\***************************************************************************/ + + + +#include "core/system.h" +#include "core/locks.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" +#include "tls/tls.h" +#include "nvrm_registry.h" +#include "nv_ref.h" +#include "nvlimits.h" + +// local static funcs +static void gpumgrSetAttachInfo(OBJGPU *, GPUATTACHARG *); +static void gpumgrGetGpuHalFactor(NvU32 *pChipId0, NvU32 *pChipId1, RM_RUNTIME_VARIANT *pRmVariant, GPUATTACHARG *pAttachArg); + +static void +_gpumgrUnregisterRmCapsForGpuUnderLock(NvU64 gpuDomainBusDevice) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + // SMC partition caps must be destroyed before GPU caps. + gpumgrUnregisterRmCapsForMIGGI(gpuDomainBusDevice); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuDomainBusDevice == gpuDomainBusDevice && + pProbedGpu->gpuId != NV0000_CTRL_GPU_INVALID_ID) + { + osRmCapUnregister(&pProbedGpu->pOsRmCaps); + break; + } + } +} + +static void +_gpumgrUnregisterRmCapsForGpu(NvU64 gpuDomainBusDevice) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + _gpumgrUnregisterRmCapsForGpuUnderLock(gpuDomainBusDevice); + portSyncMutexRelease(pGpuMgr->probedGpusLock); +} + +static NV_STATUS +_gpumgrRegisterRmCapsForGpu(OBJGPU *pGpu) +{ + NV_STATUS status = NV_ERR_INVALID_STATE; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (((pProbedGpu->gpuDomainBusDevice == 0) && pGpu->bIsSOC) || + (pProbedGpu->gpuDomainBusDevice == gpuGetDBDF(pGpu) && + pProbedGpu->gpuId != NV0000_CTRL_GPU_INVALID_ID)) + { + if (pProbedGpu->pOsRmCaps == NULL) + { + status = osRmCapRegisterGpu(pGpu->pOsGpuInfo, + &pProbedGpu->pOsRmCaps); + } + else + { + status = NV_OK; + } + + pGpu->pOsRmCaps = pProbedGpu->pOsRmCaps; + break; + } + } + + NV_ASSERT(status == NV_OK); + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + return status; +} + +// +// ODB functions +// +NV_STATUS +gpumgrConstruct_IMPL(OBJGPUMGR *pGpuMgr) +{ + NvU32 i; + + NV_PRINTF(LEVEL_INFO, "gpumgrConstruct\n"); + + pGpuMgr->numGpuHandles = 0; + + for (i = 0; i < NV_MAX_DEVICES; i++) + pGpuMgr->gpuHandleIDList[i].gpuInstance = NV_MAX_DEVICES; + + pGpuMgr->probedGpusLock = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + + if (pGpuMgr->probedGpusLock == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + portMemSet(&pGpuMgr->probedGpus[i], 0, sizeof(PROBEDGPU)); + pGpuMgr->probedGpus[i].gpuId = NV0000_CTRL_GPU_INVALID_ID; + } + + pGpuMgr->gpuAttachCount = 0; + pGpuMgr->gpuAttachMask = 0; + + pGpuMgr->deviceCount = 0; + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->pGpuGrpTable); i++) + { + pGpuMgr->pGpuGrpTable[i] = NULL; + } + + pGpuMgr->powerDisconnectedGpuCount = 0; + + return NV_OK; +} + + +void +gpumgrDestruct_IMPL(OBJGPUMGR *pGpuMgr) +{ + NV_PRINTF(LEVEL_INFO, "gpumgrDestruct\n"); + + portSyncMutexDestroy(pGpuMgr->probedGpusLock); +} + +// +// gpumgrAllocGpuInstance +// +// This interface returns the next available gpu number. +// +NV_STATUS +gpumgrAllocGpuInstance(NvU32 *pGpuInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + NvU32 i; + NvU64 availableIDs = ((1ULL << NV_MAX_DEVICES) - 1); + + if (pGpuMgr->numGpuHandles == 0) + { + *pGpuInstance = 0; + return NV_OK; + } + else if (pGpuMgr->numGpuHandles == NV_MAX_DEVICES) + { + *pGpuInstance = NV_MAX_DEVICES; + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + for (i = 0; i < pGpuMgr->numGpuHandles; i++) + availableIDs &= ~NVBIT(pGpuMgr->gpuHandleIDList[i].gpuInstance); + + for (i = 0; ((availableIDs & (1ULL << i)) == 0); i++) + ; + + *pGpuInstance = i; + + return NV_OK; +} + +// +// During destruction of a GPU the handle list needs to be modified. +// Since we cannot guarantee the _last_ GPU will always be the one +// destroyed we have to compact the handle list so we have no gaps +// and can simply decrement numGpuHandles. +// +static void +_gpumgrShiftDownGpuHandles(NvU32 startIndex) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i, lastMovedIndex = startIndex; + + for (i = startIndex; i < (NV_MAX_DEVICES - 1); i++) + { + if (pGpuMgr->gpuHandleIDList[i + 1].pGpu != 0) + { + lastMovedIndex = i + 1; + pGpuMgr->gpuHandleIDList[i].gpuInstance = + pGpuMgr->gpuHandleIDList[i + 1].gpuInstance; + + pGpuMgr->gpuHandleIDList[i].pGpu = + pGpuMgr->gpuHandleIDList[i + 1].pGpu; + } + } + + pGpuMgr->gpuHandleIDList[lastMovedIndex].gpuInstance = NV_MAX_DEVICES; + pGpuMgr->gpuHandleIDList[lastMovedIndex].pGpu = reinterpretCast(NULL, OBJGPU *); + pGpuMgr->numGpuHandles--; +} + +static void +_gpumgrDestroyGpu(NvU32 gpuInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu; + NvU32 i; + + osSyncWithGpuDestroy(NV_TRUE); + + pGpu = gpumgrGetGpu(gpuInstance); + + if (pGpu != NULL) + { + objDelete(pGpu); + } + + for (i = 0; i < pGpuMgr->numGpuHandles; i++) + { + if (pGpuMgr->gpuHandleIDList[i].gpuInstance == gpuInstance) + { + pGpuMgr->gpuHandleIDList[i].gpuInstance = NV_MAX_DEVICES; + pGpuMgr->gpuHandleIDList[i].pGpu = reinterpretCast(NULL, OBJGPU *); + _gpumgrShiftDownGpuHandles(i); + break; + } + } + + osSyncWithGpuDestroy(NV_FALSE); +} + +POBJGPU +gpumgrGetGpu(NvU32 gpuInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + for (i = 0; i < pGpuMgr->numGpuHandles; i++) + if (pGpuMgr->gpuHandleIDList[i].gpuInstance == gpuInstance) + if (pGpuMgr->gpuHandleIDList[i].pGpu) + return pGpuMgr->gpuHandleIDList[i].pGpu; + + return NULL; +} + +POBJGPU +gpumgrGetSomeGpu(void) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuMask = 0; + NvU32 gpuIndex = 0; + NvU32 gpuCount = 0; + + // Get some gpu to get the SLI Display Parent + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + + if (pGpu == NULL) + { + // None of the GPUs are initialized - Too early + NV_PRINTF(LEVEL_ERROR, + "Failed to retrieve pGpu - Too early call!.\n"); + NV_ASSERT(NV_FALSE); + return pGpu; + } + return pGpu; +} + + +// +// gpumgrAllocDeviceInstance +// +// This interface returns the next available broadcast device number. +// This broadcast device number is used to uniquely identify this set +// of gpu(s) both internally in the RM (e.g. OBJGPUGRP handle) as well +// as via the architecture (e.g., for the 'deviceId' parameter of +// NV0080_ALLOC_PARAMETERS). +// +NV_STATUS +gpumgrAllocDeviceInstance(NvU32 *pDeviceInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->pGpuGrpTable); i++) + if (pGpuMgr->pGpuGrpTable[i] == NULL) + break; + + if (i == NV_MAX_DEVICES) + { + *pDeviceInstance = NV_MAX_DEVICES; + return NV_ERR_GENERIC; + } + + *pDeviceInstance = i; + + return NV_OK; +} + +// +// gpumgrGetGpuAttachInfo +// +// Returns current gpu attach info. +// +NV_STATUS +gpumgrGetGpuAttachInfo(NvU32 *pGpuCnt, NvU32 *pGpuMask) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + // caller can pass in NULL for outparams that it doesn't need. + if (pGpuCnt) *pGpuCnt = pGpuMgr->gpuAttachCount; + if (pGpuMask) *pGpuMask = pGpuMgr->gpuAttachMask; + + return NV_OK; +} + +NvU32 +gpumgrGetDeviceGpuMask(NvU32 deviceInstance) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromInstance(deviceInstance); + + if (pGpuGrp == NULL) + { + NV_PRINTF(LEVEL_WARNING, + "Could not find GPU Group for deviceInstance 0x%x!\n", + deviceInstance); + return 0; + } + + return gpugrpGetGpuMask(pGpuGrp); +} + +NV_STATUS +gpumgrIsDeviceInstanceValid(NvU32 deviceInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPUGRP *pGpuGrp = NULL; + + if (deviceInstance >= NV_MAX_DEVICES) + return NV_ERR_INVALID_ARGUMENT; + + pGpuGrp = pGpuMgr->pGpuGrpTable[deviceInstance]; + if (NULL == pGpuGrp) + return NV_ERR_INVALID_DATA; + + if (0 == gpugrpGetGpuMask(pGpuGrp)) + return NV_ERR_INVALID_ARGUMENT; + + return NV_OK; +} + +NvBool +gpumgrIsSubDeviceInstanceValid(NvU32 subDeviceInstance) +{ + if (subDeviceInstance >= NV2080_MAX_SUBDEVICES) + return NV_FALSE; + + return NV_TRUE; +} + +NvU32 gpumgrGetPrimaryForDevice(NvU32 deviceInstance) +{ + NvU32 gpuMask, gpuInstance = 0; + OBJGPU *pGpu = NULL; + + gpuMask = gpumgrGetDeviceGpuMask(deviceInstance); + + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + if (pGpu != NULL) + { + return pGpu->gpuInstance; + } + } + else + { + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (gpumgrIsParentGPU(pGpu)) + { + return pGpu->gpuInstance; + } + } + } + + NV_PRINTF(LEVEL_ERROR, + "deviceInstance 0x%x does not exist!\n", deviceInstance); + + return 0; // this should not happen, never +} + +NvBool +gpumgrIsDeviceEnabled(NvU32 deviceInstance) +{ + NvU32 gpuMask, gpuInstance = 0; + NvBool bEnabled; + + gpuMask = gpumgrGetDeviceGpuMask(deviceInstance); + + if (gpuMask == 0) + { + return NV_FALSE; + } + /* + * Check if this device + * - has been disabled via Power-SLI + * - is in the "drain" state + */ + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + OBJGPU *pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + + if (pGpu == NULL) + return NV_FALSE; + + if ((gpumgrQueryGpuDrainState(pGpu->gpuId, &bEnabled, NULL) == NV_OK) + && bEnabled) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +// +// gpumgrRegisterGpuId +// +// This interface is used by os-dependent code to insert a probed +// gpu into the table of probed gpus known to the RM. +// +NV_STATUS +gpumgrRegisterGpuId(NvU32 gpuId, NvU64 gpuDomainBusDevice) +{ + NV_STATUS status = NV_ERR_INSUFFICIENT_RESOURCES; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + NV_PRINTF(LEVEL_ERROR, + "GPU id 0x%x already registered at index %u\n", + gpuId, i); + + // Duplicate gpu + status = NV_ERR_IN_USE; + goto done; + } + + if (pGpuMgr->probedGpus[i].gpuId == NV0000_CTRL_GPU_INVALID_ID) + { + pGpuMgr->probedGpus[i].gpuId = gpuId; + pGpuMgr->probedGpus[i].gpuDomainBusDevice = gpuDomainBusDevice; + pGpuMgr->probedGpus[i].bInitAttempted = NV_FALSE; + pGpuMgr->probedGpus[i].bExcluded = NV_FALSE; + pGpuMgr->probedGpus[i].bUuidValid = NV_FALSE; + pGpuMgr->probedGpus[i].pOsRmCaps = NULL; + status = NV_OK; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return status; +} + +// +// gpumgrUnregisterGpuId +// +// This interface is used by os-dependent code to remove a gpu +// from the table of probed gpus known to the RM. +// +NV_STATUS +gpumgrUnregisterGpuId(NvU32 gpuId) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuId == gpuId) + { + _gpumgrUnregisterRmCapsForGpuUnderLock(pProbedGpu->gpuDomainBusDevice); + pProbedGpu->gpuId = NV0000_CTRL_GPU_INVALID_ID; + pProbedGpu->bDrainState = NV_FALSE; + pProbedGpu->bRemoveIdle = NV_FALSE; + pProbedGpu->bExcluded = NV_FALSE; + pProbedGpu->bUuidValid = NV_FALSE; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return NV_OK; +} + +// +// gpumgrExcludeGpuId +// +// This interface is used by os-dependent code to 'exclude' a gpu. +// +// gpuId: the device to exclude +// +NV_STATUS +gpumgrExcludeGpuId(NvU32 gpuId) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuId == gpuId) + { + pProbedGpu->bExcluded = NV_TRUE; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return NV_OK; +} + +// +// gpumgrSetUuid +// +// This interface is used by os-dependent code to pass the UUID for a gpu. +// The UUID is a 16-byte raw UUID/GID. +// +NV_STATUS +gpumgrSetUuid(NvU32 gpuId, NvU8 *uuid) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + if (uuid == NULL) + return NV_ERR_INVALID_DATA; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuId == gpuId) + { + portMemCopy(pProbedGpu->uuid, RM_SHA1_GID_SIZE, uuid, RM_SHA1_GID_SIZE); + pProbedGpu->bUuidValid = NV_TRUE; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return NV_OK; +} + +// +// gpumgrGetCachedUuid +// +// Lookup the cached UUID for a GPU +// +static NV_STATUS +gpumgrGetCachedUuid(NvU32 gpuId, NvU8 *uuid, unsigned int len) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (uuid == NULL || len < RM_SHA1_GID_SIZE) + return NV_ERR_INVALID_ARGUMENT; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuId == gpuId) + { + if (pProbedGpu->bUuidValid) + { + portMemCopy(uuid, RM_SHA1_GID_SIZE, pProbedGpu->uuid, RM_SHA1_GID_SIZE); + status = NV_OK; + } + else + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return status; +} + +NV_STATUS +gpumgrGetGpuUuidInfo(NvU32 gpuId, NvU8 **ppUuidStr, NvU32 *pUuidStrLen, NvU32 uuidFlags) +{ + NvU8 *pUuid; + NV_STATUS status; + + if (ppUuidStr == NULL || pUuidStrLen == NULL) + return NV_ERR_INVALID_DATA; + + // gpumgr only supports SHA1 format; error out if requesting otherwise + if (!FLD_TEST_DRF(0000_CTRL_CMD, _GPU_GET_UUID_FROM_GPU_ID_FLAGS, _TYPE, _SHA1, uuidFlags)) + return NV_ERR_INVALID_ARGUMENT; + + pUuid = portMemAllocNonPaged(RM_SHA1_GID_SIZE); + if (pUuid == NULL) + return NV_ERR_NO_MEMORY; + + status = gpumgrGetCachedUuid(gpuId, pUuid, RM_SHA1_GID_SIZE); + if (status != NV_OK) + { + portMemFree(pUuid); + return status; + } + + if (FLD_TEST_DRF(0000_CTRL_CMD, _GPU_GET_UUID_FROM_GPU_ID_FLAGS, _FORMAT, _BINARY, uuidFlags)) + { + // Binary case - pUuid is freed by the caller + *ppUuidStr = pUuid; + *pUuidStrLen = RM_SHA1_GID_SIZE; + } + else + { + // Conversion to ASCII or UNICODE + status = transformGidToUserFriendlyString(pUuid, RM_SHA1_GID_SIZE, + ppUuidStr, pUuidStrLen, uuidFlags); + portMemFree(pUuid); + } + + return status; +} + +static void +gpumgrGetGpuHalFactorOfVirtual(NvBool *pIsVirtual, GPUATTACHARG *pAttachArg) +{ + *pIsVirtual = NV_FALSE; +} + +static NvBool gpumgrCheckRmFirmwarePolicy +( + NvU64 nvDomainBusDeviceFunc, + NvBool bRequestFwClientRm, + NvU32 pmcBoot42 +) +{ + + if (!bRequestFwClientRm) + return NV_FALSE; + + return NV_TRUE; +} + +// +// gpumgrGetGpuHalFactor +// +// Get Gpu Hal factors those are used to init Hal binding +// +// TODO : later this function will be used to read out NVOC Halspec init value for OBJGPU +// +static void +gpumgrGetGpuHalFactor +( + NvU32 *pChipId0, + NvU32 *pChipId1, + RM_RUNTIME_VARIANT *pRmVariant, + GPUATTACHARG *pAttachArg +) +{ + NvBool isVirtual; + NvBool isFwClient; + + DEVICE_MAPPING gpuDevMapping = {0}; + gpuDevMapping.gpuNvAddr = pAttachArg->regBaseAddr; + gpuDevMapping.gpuNvLength = pAttachArg->regLength; + + // get ChipId0 and ChipId1 + if (pAttachArg->socDeviceArgs.specified) + { + // This path is taken for T234D+ devices. + + *pChipId0 = pAttachArg->socDeviceArgs.socChipId0; + *pChipId1 = 0; + isVirtual = NV_FALSE; + } + else if (pAttachArg->bIsSOC) + { + // This path is only taken for ARCH MODS iGPU verification. + + *pChipId0 = pAttachArg->socChipId0; + *pChipId1 = 0; + isVirtual = NV_FALSE; + } + else + { + // + // PMC_BOOT_42 register is added above G94+ chips which is internal to NVIDIA + // Earlier we used PMC_BOOT_0 as Internal ID which is now exposed to customers + // + *pChipId0 = osDevReadReg032(/*pGpu=*/ NULL, &gpuDevMapping, NV_PMC_BOOT_0); + *pChipId1 = osDevReadReg032(/*pGpu=*/ NULL, &gpuDevMapping, NV_PMC_BOOT_42); + + gpumgrGetGpuHalFactorOfVirtual(&isVirtual, pAttachArg); + } + + isFwClient = gpumgrCheckRmFirmwarePolicy(pAttachArg->nvDomainBusDeviceFunc, + pAttachArg->bRequestFwClientRm, + *pChipId1); + + if (RMCFG_FEATURE_PLATFORM_GSP || RMCFG_FEATURE_PLATFORM_DCE) + *pRmVariant = RM_RUNTIME_VARIANT_UCODE; + else if (isVirtual) + *pRmVariant = RM_RUNTIME_VARIANT_VF; + else if (isFwClient) + *pRmVariant = RM_RUNTIME_VARIANT_PF_KERNEL_ONLY; + else + *pRmVariant = RM_RUNTIME_VARIANT_PF_MONOLITHIC; // default, monolithic mode +} + + +// +// _gpumgrCreateGpu +// +// Former _sysCreateGpu(). The function is moved to Gpumgr for hinding struct +// GPUATTACHARG from SYS. SYS is still the parent object of both GPUMGR and +// GPU. +// +static NV_STATUS +_gpumgrCreateGpu(NvU32 gpuInstance, GPUATTACHARG *pAttachArg) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu; + NV_STATUS status; + RM_RUNTIME_VARIANT rmVariant; + NvU32 chipId0; // 32-bit chipId (pmcBoot0 on GPU) + NvU32 chipId1; // 32-bit chipId (pmcBoot42 on GPU) + NvU32 hidrev, majorRev; + + gpumgrGetGpuHalFactor(&chipId0, &chipId1, &rmVariant, pAttachArg); + + hidrev = DRF_VAL(_PAPB_MISC, _GP_HIDREV, _CHIPID, chipId0); + majorRev = DRF_VAL(_PAPB_MISC, _GP_HIDREV, _MAJORREV, chipId0); + + // WAR: The majorrev of t234 shows 0xa on fmodel instead of 0x4 + if ((hidrev == 0x23) && (majorRev == 0xa)) + { + majorRev = 0x4; + } + + hidrev = (hidrev << 4) | majorRev; + + // create OBJGPU with halspec factor initialization value + status = objCreate(&pGpu, pSys, OBJGPU, + /* ChipHal_arch = */ DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, chipId1), + /* ChipHal_impl = */ DRF_VAL(_PMC, _BOOT_42, _IMPLEMENTATION, chipId1), + /* ChipHal_hidrev = */ hidrev, + /* RmVariantHal_rmVariant = */ rmVariant, + /* DispIpHal_ipver = */ 0, // initialized later + /* ctor.gpuInstance = */ gpuInstance); + if (status != NV_OK) + { + return status; + } + + // legacy chip-config Hal registration path + status = gpuBindHalLegacy(pGpu, chipId0, chipId1); + if (status != NV_OK) + { + objDelete(pGpu); + return status; + } + + // + // Save away the public ID associated with the handle just returned + // from create object. + // + pGpuMgr->gpuHandleIDList[pGpuMgr->numGpuHandles].gpuInstance = gpuInstance; + pGpuMgr->gpuHandleIDList[pGpuMgr->numGpuHandles].pGpu = pGpu; + + pGpuMgr->numGpuHandles++; + + return status; +} + + +static void +_gpumgrGetEncSessionStatsReportingState(OBJGPU *pGpu) +{ +} + +// +// gpumgrAttachGpu +// +// This interface is used by os-dependent code to attach a new gpu +// to the pool managed by the RM. Construction of OBJGPU and it's +// descendants is handled here, along with any other necessary prep +// for the subsequent gpu preinit/init stages. +// +NV_STATUS +gpumgrAttachGpu(NvU32 gpuInstance, GPUATTACHARG *pAttachArg) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu = NULL; + NV_STATUS status; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // create the new OBJGPU + if ((status = _gpumgrCreateGpu(gpuInstance, pAttachArg)) != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + // get a pointer to the new OBJGPU + pGpu = gpumgrGetGpu(gpuInstance); + + // load up attach parameters + gpumgrSetAttachInfo(pGpu, pAttachArg); + + // Load OOR check address mode based on arch +#if defined(NVCPU_X86_64) + pGpu->busInfo.oorArch = OOR_ARCH_X86_64; +#elif defined(NVCPU_PPC64LE) + pGpu->busInfo.oorArch = OOR_ARCH_PPC64LE; +#elif defined(NVCPU_ARM) + pGpu->busInfo.oorArch = OOR_ARCH_ARM; +#elif defined(NVCPU_AARCH64) + pGpu->busInfo.oorArch = OOR_ARCH_AARCH64; +#else + pGpu->busInfo.oorArch = OOR_ARCH_NONE; +#endif + + pGpu->pOS = SYS_GET_OS(pSys); + + // let os fill in dpc details before we get into engine construction + if ((status = osDpcAttachGpu(pGpu, pAttachArg->pOsAttachArg)) != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + // let os fill in what it needs before we get into engine construction + if ((status = osAttachGpu(pGpu, pAttachArg->pOsAttachArg)) != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + NV_ASSERT((pGpuMgr->gpuAttachMask & NVBIT(gpuInstance)) == 0); + pGpuMgr->gpuAttachMask |= NVBIT(gpuInstance); + pGpuMgr->gpuAttachCount++; + + status = _gpumgrRegisterRmCapsForGpu(pGpu); + if (status != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + // finish gpu construction + if ((status = gpuPostConstruct(pGpu, pAttachArg)) != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + _gpumgrGetEncSessionStatsReportingState(pGpu); + + // Add entry into system partition topo array + gpumgrAddSystemMIGInstanceTopo(pAttachArg->nvDomainBusDeviceFunc); + + return status; + +gpumgrAttach_error_and_exit: + if ((pGpuMgr->gpuAttachMask & NVBIT(gpuInstance)) != 0) + { + pGpuMgr->gpuAttachMask &= ~NVBIT(gpuInstance); + pGpuMgr->gpuAttachCount--; + } + + if (pGpu != NULL) + { + _gpumgrUnregisterRmCapsForGpu(gpuGetDBDF(pGpu)); + } + + osDpcDetachGpu(pGpu); + _gpumgrDestroyGpu(gpuInstance); + return status; +} + +// +// gpumgrDetachGpu +// +// This entry point detaches a gpu from the RM. The corresponding +// OBJGPU and any of it's offspring are released, etc. +// +NV_STATUS +gpumgrDetachGpu(NvU32 gpuInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu = gpumgrGetGpu(gpuInstance); + NvBool bDelClientResourcesFromGpuMask = !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TIMEOUT_RECOVERY); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // Mark for deletion the stale clients related to the GPU mask + if (bDelClientResourcesFromGpuMask) + { + rmapiSetDelPendingClientResourcesFromGpuMask(NVBIT(gpuInstance)); + } + + osDpcDetachGpu(pGpu); + + pGpu->pOsRmCaps = NULL; + + // release pDev + _gpumgrDestroyGpu(gpuInstance); + + // Delete the marked clients related to the GPU mask + if (bDelClientResourcesFromGpuMask) + { + rmapiDelPendingDevices(NVBIT(gpuInstance)); + rmapiDelPendingClients(); + } + + NV_ASSERT(pGpuMgr->gpuAttachMask & NVBIT(gpuInstance)); + pGpuMgr->gpuAttachMask &= ~NVBIT(gpuInstance); + pGpuMgr->gpuAttachCount--; + + return NV_OK; +} + +// +// gpumgrCreateDevice +// +// Create a broadcast device. The set of one or more gpus +// comprising the broadcast device is described by gpuMask. +// +NV_STATUS +gpumgrCreateDevice(NvU32 *pDeviceInstance, NvU32 gpuMask, NvU32 *pGpuIdsOrdinal) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pParentGpu = NULL; + NvU32 gpuInstance; + NV_STATUS status = NV_ERR_INVALID_REQUEST; + OBJGPUGRP *pGpuGrp = NULL; + + pGpuMgr->deviceCount++; + + NV_ASSERT(gpuMask != 0); + + // if only 1 gpu in the set, we're done + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + // alloc new broadcast device instance + status = gpumgrAllocDeviceInstance(pDeviceInstance); + if (status != NV_OK) + { + goto gpumgrCreateDevice_exit; + } + + gpumgrConstructGpuGrpObject(pGpuMgr, gpuMask, + &pGpuMgr->pGpuGrpTable[*pDeviceInstance]); + // + // Set up parent gpu state. pParentGpu == NULL during boot when + // we're first creating this device because the GPU attach process + // has not yet completed. pParentGpu != NULL when we're coming + // out of SLI (unlinking). + // + gpuInstance = 0; + pParentGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + if (pParentGpu) + { + gpumgrSetParentGPU(pParentGpu, pParentGpu); + } + + gpumgrAddDeviceMaskToGpuInstTable(gpuMask); + status = NV_OK; + goto gpumgrCreateDevice_exit; + } + +gpumgrCreateDevice_exit: + if (status != NV_OK) + { + // Device creation failed + pGpuMgr->deviceCount--; + } + else + { + pGpuGrp = pGpuMgr->pGpuGrpTable[*pDeviceInstance]; + if (gpugrpGetGpuMask(pGpuGrp) != gpuMask) + { + NV_ASSERT(0); + gpumgrDestroyDevice(*pDeviceInstance); + return NV_ERR_INVALID_DATA; + } + NV_PRINTF(LEVEL_INFO, + "gpumgrCreateDevice: deviceInst 0x%x mask 0x%x\n", + *pDeviceInstance, gpuMask); + } + return status; +} + +NV_STATUS +gpumgrDestroyDevice(NvU32 deviceInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NV_STATUS status = NV_OK; + OBJGPUGRP *pGpuGrp = pGpuMgr->pGpuGrpTable[deviceInstance]; + NvU32 gpuMask; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NV_ERR_OBJECT_NOT_FOUND); + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + NV_ASSERT(gpuMask != 0); + + // if we only have one subdevice we're done + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + gpugrpDestroy(pGpuGrp); + pGpuMgr->pGpuGrpTable[deviceInstance] = NULL; + gpumgrClearDeviceMaskFromGpuInstTable(gpuMask); + goto gpumgrDestroyDevice_exit; + } + +gpumgrDestroyDevice_exit: + pGpuMgr->deviceCount--; + + return status; +} + +// +// gpumgrGetDeviceInstanceMask +// +// Returns mask of enabled (or valid) device instances. +// This mask tells clients which NV01_DEVICE class +// instances are valid. +// +NvU32 +gpumgrGetDeviceInstanceMask(void) +{ + NvU32 i, deviceInstanceMask = 0; + + // for every broadcast device... + for (i = 0; i < NV_MAX_DEVICES; i++) + { + // ...add it to our mask if it's enabled + if (NV_OK == gpumgrIsDeviceInstanceValid(i)) + deviceInstanceMask |= NVBIT(i); + } + + return deviceInstanceMask; +} + +NvU32 +gpumgrGetGpuMask(OBJGPU *pGpu) +{ + NvU32 deviceInstance = gpuGetDeviceInstance(pGpu); + + NV_ASSERT(deviceInstance < NV_MAX_DEVICES); + + return gpumgrGetDeviceGpuMask(deviceInstance); +} + +// +// gpumgrGetSubDeviceCount +// +NvU32 +gpumgrGetSubDeviceCount(NvU32 gpuMask) +{ + NvU32 subDeviceCount = 0; + + // tally # of gpus in the set + while (gpuMask != 0) + { + subDeviceCount ++; + gpuMask &= (gpuMask-1); // remove lowest bit in gpuMask + } + return subDeviceCount; +} + +// +// gpumgrGetSubDeviceCountFromGpu +// ATTENTION: When using with SLI Next / RM Unlinked SLI, the +// subdevice count is always 1 for each GPU. This can cause +// bugs, buffer overflows with arrays based on subdevice instances as +// with RM Unlinked SLI: +// - subdevice count is always 1 (the GPUs are not linked) +// - GPU subdevice instance can be non zero +// For subdevice instance arrays, please use +// gpumgrGetSubDeviceMaxValuePlus1() +// +NvU32 +gpumgrGetSubDeviceCountFromGpu(OBJGPU *pGpu) +{ + NvU32 gpuMask = gpumgrGetGpuMask(pGpu); + NvU32 subDeviceCount = gpumgrGetSubDeviceCount(gpuMask); + + NV_ASSERT(subDeviceCount > 0); + return subDeviceCount; +} + +// +// gpumgrGetSubDeviceMaxValuePlus1 +// SLI disabled: return 1 +// SLI enabled with RM linked in SLI: returns 2 or more +// SLI enabled with RM unlinked: return current subdeviceInstance + 1 +// Life of the function: until a full transition to SLI Next / RM Unlinked SLI. +// +NvU32 +gpumgrGetSubDeviceMaxValuePlus1(OBJGPU *pGpu) +{ + if (!IsSLIEnabled(pGpu)) + { + // SLI disabled: return 1 as all GPU subdevice instances are 0 + // Unkinked SLI: returns the current subdevice instance + 1 + return gpumgrGetSubDeviceInstanceFromGpu(pGpu) + 1; + } + else + { + // SLI Enabled in RM: The count of subdevice instances for that GPU/device + return gpumgrGetSubDeviceCountFromGpu(pGpu); + } +} + +static void +gpumgrSetAttachInfo(OBJGPU *pGpu, GPUATTACHARG *pAttachArg) +{ + NvU32 gpuId = NV0000_CTRL_GPU_INVALID_ID; + + if (pAttachArg->socDeviceArgs.specified) + { + NvU32 idx; + NvU32 maxIdx; + // This path is taken for T234D+ SOC devices. + + // + // TODO: This existing field is specifically used to safeguard + // iGPU-specific code paths within RM, and should actually be NV_FALSE for + // T234D+. + // + // See JIRA TDS-5101 for more details. + // + pGpu->bIsSOC = NV_TRUE; + maxIdx = SOC_DEV_MAPPING_MAX; + + for (idx = 0; idx < maxIdx; idx++) + { + pGpu->deviceMappings[idx] = pAttachArg->socDeviceArgs.deviceMapping[idx]; + } + + pGpu->busInfo.iovaspaceId = pAttachArg->socDeviceArgs.iovaspaceId; + { + pGpu->busInfo.gpuPhysAddr = pGpu->deviceMappings[SOC_DEV_MAPPING_DISP].gpuNvPAddr; + pGpu->gpuDeviceMapCount = 1; + } + + // + // TODO bug 2100708: a fake DBDF is used on SOC to opt out of some + // RM paths that cause issues otherwise, see the bug for details. + // + pGpu->busInfo.nvDomainBusDeviceFunc = pAttachArg->nvDomainBusDeviceFunc; + } + else if (pAttachArg->bIsSOC) + { + // This path is only taken for ARCH MODS iGPU verification. + + NV_ASSERT(sizeof(pGpu->deviceMappings) == sizeof(pAttachArg->socDeviceMappings)); + pGpu->bIsSOC = NV_TRUE; + pGpu->idInfo.PCIDeviceID = pAttachArg->socId; + pGpu->idInfo.PCISubDeviceID = pAttachArg->socSubId; + pGpu->busInfo.iovaspaceId = pAttachArg->iovaspaceId; + if (RMCFG_FEATURE_PLATFORM_MODS || RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM) + { + NV_ASSERT(sizeof(pGpu->deviceMappings) == sizeof(pAttachArg->socDeviceMappings)); + portMemCopy(pGpu->deviceMappings, sizeof(pGpu->deviceMappings), pAttachArg->socDeviceMappings, sizeof(pGpu->deviceMappings)); + pGpu->gpuDeviceMapCount = pAttachArg->socDeviceCount; + + // + // TODO bug 2100708: a fake DBDF is used on SOC to opt out of some + // RM paths that cause issues otherwise, see the bug for details. + // + pGpu->busInfo.nvDomainBusDeviceFunc = pAttachArg->nvDomainBusDeviceFunc; + } + } + else + { + // + // Set this gpu's hardware register access address pointers + // from the contents of mappingInfo. + // + pGpu->bIsSOC = NV_FALSE; + + pGpu->deviceMappings[0].gpuNvAddr = pAttachArg->regBaseAddr; + pGpu->registerAccess.gpuFbAddr = pAttachArg->fbBaseAddr; + pGpu->busInfo.gpuPhysAddr = pAttachArg->devPhysAddr; + pGpu->busInfo.gpuPhysFbAddr = pAttachArg->fbPhysAddr; + pGpu->busInfo.gpuPhysInstAddr = pAttachArg->instPhysAddr; + pGpu->busInfo.gpuPhysIoAddr = pAttachArg->ioPhysAddr; + pGpu->busInfo.iovaspaceId = pAttachArg->iovaspaceId; + pGpu->busInfo.nvDomainBusDeviceFunc = pAttachArg->nvDomainBusDeviceFunc; + pGpu->deviceMappings[0].gpuNvLength = pAttachArg->regLength; + pGpu->fbLength = pAttachArg->fbLength; + pGpu->busInfo.IntLine = pAttachArg->intLine; + pGpu->gpuDeviceMapCount = 1; + + if ( ! pAttachArg->instBaseAddr ) + { + // + // The OS init goo didn't map a separate region for instmem. + // So instead use the 1M mapping in bar0. + // + pGpu->instSetViaAttachArg = NV_FALSE; + pGpu->registerAccess.gpuInstAddr = (GPUHWREG*)(((NvU8*)pGpu->deviceMappings[0].gpuNvAddr) + 0x00700000); // aka NV_PRAMIN. + if (!pGpu->busInfo.gpuPhysInstAddr) + { + // + // Only use the bar0 window physical address if the OS didn't + // specify a bar2 physical address. + // + pGpu->busInfo.gpuPhysInstAddr = pGpu->busInfo.gpuPhysAddr + 0x00700000; // aka NV_PRAMIN + } + pGpu->instLength = 0x100000; // 1MB + } + else + { + pGpu->instSetViaAttachArg = NV_TRUE; + pGpu->registerAccess.gpuInstAddr = pAttachArg->instBaseAddr; + pGpu->instLength = pAttachArg->instLength; + } + } + + gpuId = gpuGenerate32BitId(gpuGetDomain(pGpu), gpuGetBus(pGpu), gpuGetDevice(pGpu)); + if (gpuId != NV0000_CTRL_GPU_INVALID_ID) + { + gpumgrSetGpuId(pGpu, gpuId); + } +} + +// +// gpumgrStatePreInitGpu & gpumgrStateInitGpu +// +// These routines handle unicast gpu initialization. +// +NV_STATUS +gpumgrStatePreInitGpu(OBJGPU *pGpu) +{ + NV_STATUS status; + + // LOCK: acquire GPUs lock + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT); + if (status == NV_OK) + { + if (FULL_GPU_SANITY_CHECK(pGpu)) + { + // pre-init phase done in UC mode + status = gpuStatePreInit(pGpu); + } + else + { + status = NV_ERR_GPU_IS_LOST; + DBG_BREAKPOINT(); + } + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + // save the init status for later client queries + gpumgrSetGpuInitStatus(pGpu->gpuId, status); + + return status; +} + +NV_STATUS +gpumgrStateInitGpu(OBJGPU *pGpu) +{ + NV_STATUS status; + + // LOCK: acquire GPUs lock + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT); + if (status == NV_OK) + { + // init phase + status = gpuStateInit(pGpu); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + // save the init status for later client queries + gpumgrSetGpuInitStatus(pGpu->gpuId, status); + + return status; +} + +NV_STATUS +gpumgrStateLoadGpu(OBJGPU *pGpu, NvU32 flags) +{ + NV_STATUS status; + + // LOCK: acquire GPUs lock + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT); + if (status == NV_OK) + { + // Load phase + status = gpuStateLoad(pGpu, flags); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + if (status != NV_OK) + goto gpumgrStateLoadGpu_exit; + +gpumgrStateLoadGpu_exit: + // save the init status for later client queries + gpumgrSetGpuInitStatus(pGpu->gpuId, status); + + return status; +} + +// +// gpumgrGetNextGpu +// +// This routine searches subDeviceMask for the next gpu by using +// the start index value as a beginning bit position. If a gpu is +// found, the start index value is bumped to the next bit position +// in the mask. +// +POBJGPU +gpumgrGetNextGpu(NvU32 subDeviceMask, NvU32 *pStartIndex) +{ + NvU32 i; + + if (*pStartIndex > NV_MAX_DEVICES) + { + *pStartIndex = NV_MAX_DEVICES; + return NULL; + } + + for (i = *pStartIndex; i < NV_MAX_DEVICES; i++) + { + if (subDeviceMask & NVBIT(i)) + { + *pStartIndex = i+1; + return gpumgrGetGpu(i); + } + } + + *pStartIndex = NV_MAX_DEVICES; + return NULL; +} + + +// +// gpumgrIsGpuPointerValid - Validates pGpu without dereferencing it. +// +NvBool +gpumgrIsGpuPointerValid(OBJGPU *pGpu) +{ + OBJGPU *pTempGpu = NULL; + NvU32 gpuMask = 0; + NvU32 gpuCount = 0; + NvU32 gpuIndex = 0; + + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + pTempGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + + while(pTempGpu) + { + if (pTempGpu->getProperty(pTempGpu, PDB_PROP_GPU_STATE_INITIALIZED)) + { + if (pTempGpu == pGpu) + { + return NV_TRUE; + } + } + + pTempGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + } + + return NV_FALSE; +} + +NvBool gpumgrIsGpuDisplayParent(OBJGPU *pGpu) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + NvBool rc = NV_FALSE; + NvU32 gpuMask; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NV_FALSE); + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + // If there's only one GPU in the device, then of course it's the display parent! + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + rc = NV_TRUE; + } + // + // If the gpuInstance argument is the first gpuInstance in the ordering, + // then it's the display parent! + // + else if (pGpu->gpuInstance == pGpuGrp->SliLinkOrder[0].gpuInstance) + { + rc = NV_TRUE; + } + + // Otherwise it isn't. + return rc; +} + +OBJGPU *gpumgrGetDisplayParent(OBJGPU *pGpu) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + NvU32 gpuCount; + NvU32 gpuMask; + NvU32 gpuInstance; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NULL); + gpuMask = gpugrpGetGpuMask(pGpuGrp); + gpuCount = gpumgrGetSubDeviceCount(gpuMask); + + if (gpuCount > 1) + { + gpuInstance = pGpuGrp->SliLinkOrder[0].gpuInstance; + pGpu = gpumgrGetGpu(gpuInstance); + } + + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + + return pGpu; +} + +// +// gpumgrGetProbedGpuIds +// +// This routine services the NV0000_CTRL_GPU_GET_PROBED_IDS command. +// The passed in gpuIds table is filled in with valid gpuId info +// for each probed gpu. Invalid entries in the table are set to the +// invalid id value. +// +NV_STATUS +gpumgrGetProbedGpuIds(NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuIdsParams) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i, j, k; + + ct_assert(NV_MAX_DEVICES == NV0000_CTRL_GPU_MAX_PROBED_GPUS); + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0, j = 0, k = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId != NV0000_CTRL_GPU_INVALID_ID) + { + if (pGpuMgr->probedGpus[i].bExcluded) + pGpuIdsParams->excludedGpuIds[k++] = pGpuMgr->probedGpus[i].gpuId; + else + pGpuIdsParams->gpuIds[j++] = pGpuMgr->probedGpus[i].gpuId; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + for (i = j; i < NV_ARRAY_ELEMENTS(pGpuIdsParams->gpuIds); i++) + pGpuIdsParams->gpuIds[i] = NV0000_CTRL_GPU_INVALID_ID; + + for (i = k; i < NV_ARRAY_ELEMENTS(pGpuIdsParams->excludedGpuIds); i++) + pGpuIdsParams->excludedGpuIds[i] = NV0000_CTRL_GPU_INVALID_ID; + + return NV_OK; +} + +// +// gpumgrGetAttachedGpuIds +// +// This routine services the NV0000_CTRL_GPU_GET_ATTACHED_IDS command. +// The passed in gpuIds table is filled in with valid gpuId info +// for each attached gpu. Any remaining entries in the table are set to +// the invalid id value. +// +NV_STATUS +gpumgrGetAttachedGpuIds(NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuIdsParams) +{ + OBJGPU *pGpu; + NvU32 gpuAttachCnt, gpuAttachMask, i, cnt; + NvU32 *pGpuIds = &pGpuIdsParams->gpuIds[0]; + + // fill the table w/valid entries + gpumgrGetGpuAttachInfo(&gpuAttachCnt, &gpuAttachMask); + for (cnt = 0, i = 0; i < NV_MAX_DEVICES; i++) + { + if (gpuAttachMask & NVBIT(i)) + { + pGpu = gpumgrGetGpu(i); + pGpuIds[cnt++] = pGpu->gpuId; + } + } + + // invalidate rest of the entries + while (cnt < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS) + pGpuIds[cnt++] = NV0000_CTRL_GPU_INVALID_ID; + + return NV_OK; +} + +// +// gpumgrGetSubDeviceDeviceInstanceFromGpu +// +// Given a pGpu return the corresponding subdevice instance value. +// +NvU32 +gpumgrGetSubDeviceInstanceFromGpu(OBJGPU *pGpu) +{ + return pGpu->subdeviceInstance; +} + +// +// gpumgrGetParentGPU +// +POBJGPU +gpumgrGetParentGPU(OBJGPU *pGpu) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + NvU32 gpuMask; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NULL); + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + return pGpu; + } + else + { + return gpugrpGetParentGpu(pGpuGrp); + } +} + +// +// gpumgrSetParentGPU +// +void +gpumgrSetParentGPU(OBJGPU *pGpu, OBJGPU *pParentGpu) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + + NV_ASSERT_OR_RETURN_VOID(pGpuGrp != NULL); + gpugrpSetParentGpu(pGpuGrp, pParentGpu); +} + +// +// gpumgrGetGpuFromId +// +// Find the specified gpu from it's gpuId. +// +POBJGPU +gpumgrGetGpuFromId(NvU32 gpuId) +{ + OBJGPU *pGpu; + NvU32 gpuAttachCnt, gpuAttachMask; + NvU32 i; + + gpumgrGetGpuAttachInfo(&gpuAttachCnt, &gpuAttachMask); + for (i = 0; i < NV_MAX_DEVICES; i++) + { + if (gpuAttachMask & NVBIT(i)) + { + pGpu = gpumgrGetGpu(i); + + // found it + if (pGpu->gpuId == gpuId) + return pGpu; + } + } + + // didn't find it + return NULL; +} + +// +// gpumgrGetGpuFromUuid() +// +// Get GPUOBJECT from UUID. Returns NULL if it cannot find a GPU with the +// requested UUID. +// +POBJGPU +gpumgrGetGpuFromUuid(const NvU8 *pGpuUuid, NvU32 flags) +{ + OBJGPU *pGpu; + NvU32 attachedGpuCount; + NvU32 attachedGpuMask; + NvU32 gpuIndex; + NvU32 gidStrLen; + NvU8 *pGidString = NULL; + NV_STATUS rmStatus; + + // get all attached GPUs + rmStatus = gpumgrGetGpuAttachInfo(&attachedGpuCount, &attachedGpuMask); + + gpuIndex = 0; + + for(pGpu = gpumgrGetNextGpu(attachedGpuMask, &gpuIndex); + pGpu != NULL; + pGpu = gpumgrGetNextGpu(attachedGpuMask, &gpuIndex)) + { + // + // get the GPU's UUID + // + // This implementation relies on the fact that gpuGetGidInfo() only + // allocates memory if it succeeds. + // + rmStatus = gpuGetGidInfo(pGpu, &pGidString, &gidStrLen, flags); + if (NV_OK != rmStatus) + return NULL; + + // check if it matches + if (0 == portMemCmp(pGidString, pGpuUuid, gidStrLen)) + { + portMemFree(pGidString); + return pGpu; + } + else + { + // if it doesn't match, clean up allocated memory for next iteration + portMemFree(pGidString); + } + } + + return NULL; // Failed to find a GPU with the requested UUID +} + +// +// gpumgrGetGpuFromBusInfo +// +// Find the specified GPU using its PCI bus info. +// +POBJGPU +gpumgrGetGpuFromBusInfo(NvU32 domain, NvU8 bus, NvU8 device) +{ + NV_STATUS status; + OBJGPU *pGpu; + NvU32 attachedGpuCount; + NvU32 attachedGpuMask; + NvU32 gpuIndex = 0; + + status = gpumgrGetGpuAttachInfo(&attachedGpuCount, &attachedGpuMask); + NV_ASSERT_OR_RETURN(status == NV_OK, NULL); + + for (pGpu = gpumgrGetNextGpu(attachedGpuMask, &gpuIndex); + pGpu != NULL; + pGpu = gpumgrGetNextGpu(attachedGpuMask, &gpuIndex)) + { + if ((gpuGetDomain(pGpu) == domain) && + (gpuGetBus(pGpu) == bus) && + (gpuGetDevice(pGpu) == device)) + { + return pGpu; + } + } + + return NULL; +} + +// +// gpumgrSetGpuId +// +// This routine assigns the specified gpuId to the specified gpu. +// +void +gpumgrSetGpuId(OBJGPU *pGpu, NvU32 gpuId) +{ + pGpu->gpuId = gpuId; + + // if boardId is unassigned then give it a default value now + if (pGpu->boardId == 0xffffffff) + { + pGpu->boardId = gpuId; + } +} + +// +// gpumgrGetGpuIdInfo +// +// Special purpose routine that handles NV0000_CTRL_CMD_GPU_GET_ID_INFO +// requests from clients. +// NV0000_CTRL_CMD_GPU_GET_ID_INFO is deprecated in favour of +// NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2, per comments in ctrl0000gpu.h +// +NV_STATUS +gpumgrGetGpuIdInfoV2(NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuInfo) +{ + OBJGPU *pGpu; + NvU32 deviceInstance, subDeviceInstance; + + // start by making sure client request specifies a valid gpu + pGpu = gpumgrGetGpuFromId(pGpuInfo->gpuId); + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_INFO, + "gpumgrGetGpuInfoV2: bad gpuid spec: 0x%x\n", + pGpuInfo->gpuId); + return NV_ERR_INVALID_ARGUMENT; + } + + LOCK_ASSERT_AND_RETURN(rmGpuLockIsOwner()); + + // + // We have a valid gpuInstance, so now let's get the corresponding + // deviceInstance/subDeviceInstance pair. + // + deviceInstance = gpuGetDeviceInstance(pGpu); + if (deviceInstance == NV_MAX_DEVICES) + { + NV_PRINTF(LEVEL_ERROR, + "gpumgrGetGpuInfoV2: deviceInstance not found\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + subDeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + pGpuInfo->gpuInstance = pGpu->gpuInstance; + pGpuInfo->deviceInstance = deviceInstance; + pGpuInfo->subDeviceInstance = subDeviceInstance; + pGpuInfo->boardId = pGpu->boardId; + + // + // Setup gpu info flags; see ctrl0000gpu.h for list of flags. + // + pGpuInfo->gpuFlags = 0; + pGpuInfo->numaId = NV0000_CTRL_NO_NUMA_NODE; + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED)) + { + pGpuInfo->gpuFlags |= DRF_NUM(0000, _CTRL_GPU_ID_INFO, _ATS_ENABLED, + NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED_TRUE); + pGpuInfo->numaId = pGpu->numaNodeId; + } + + // is this gpu in use? + pGpuInfo->gpuFlags |= DRF_NUM(0000, _CTRL_GPU_ID_INFO, _IN_USE, gpuIsInUse(pGpu)); + + // is this gpu part of a sli device? + pGpuInfo->gpuFlags |= DRF_NUM(0000, _CTRL_GPU_ID_INFO, _LINKED_INTO_SLI_DEVICE, IsSLIEnabled(pGpu)); + + // is this gpu a mobile gpu? + if (IsMobile(pGpu)) + { + pGpuInfo->gpuFlags |= DRF_DEF(0000, _CTRL_GPU_ID_INFO, _MOBILE, _TRUE); + } + + // is this gpu the boot primary? + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_PRIMARY_DEVICE)) + { + pGpuInfo->gpuFlags |= DRF_DEF(0000, _CTRL_GPU_ID_INFO, _BOOT_MASTER, _TRUE); + } + + // is this GPU part of an SOC + if (pGpu->bIsSOC) + { + pGpuInfo->gpuFlags |= DRF_DEF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE); + } + + // GPU specific SLI status + pGpuInfo->sliStatus = pGpu->sliStatus; + + NV_PRINTF(LEVEL_INFO, + "gpumgrGetGpuInfoV2: gpu[0x%x]: device 0x%x subdevice 0x%x\n", + pGpuInfo->gpuId, pGpuInfo->deviceInstance, + pGpuInfo->subDeviceInstance); + + return NV_OK; +} +NV_STATUS +gpumgrGetGpuIdInfo(NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuInfo) +{ + NV_STATUS status; + NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS pGpuInfoV2 = {0}; + pGpuInfoV2.gpuId = pGpuInfo->gpuId; + + status = gpumgrGetGpuIdInfoV2(&pGpuInfoV2); + + if (status != NV_OK) + { + return status; + } + pGpuInfo->gpuFlags = pGpuInfoV2.gpuFlags; + pGpuInfo->deviceInstance = pGpuInfoV2.deviceInstance; + pGpuInfo->subDeviceInstance = pGpuInfoV2.subDeviceInstance; + pGpuInfo->sliStatus = pGpuInfoV2.sliStatus; + pGpuInfo->boardId = pGpuInfoV2.boardId; + pGpuInfo->gpuInstance = pGpuInfoV2.gpuInstance; + pGpuInfo->numaId = pGpuInfoV2.numaId; + + // If we get a non-NULL szName parameter, let os-dependent code + // fill it in from information we already have. + if (NvP64_VALUE(pGpuInfo->szName) != NULL) + { + status = osDeviceClassToDeviceName(pGpuInfo->deviceInstance, + NvP64_VALUE(pGpuInfo->szName)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "gpumgrGetGpuInfo: deviceInstance to szName translation failed\n"); + return status; + } + } + + return status; +} + +// +// gpumgrGetGpuInitStatus +// +// Special purpose routine that handles NV0000_CTRL_CMD_GET_GPU_INIT_STATUS +// requests from clients. +// +NV_STATUS +gpumgrGetGpuInitStatus(NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatus) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + NV_STATUS rmStatus; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId == pGpuInitStatus->gpuId) + { + if (pGpuMgr->probedGpus[i].bInitAttempted) + { + pGpuInitStatus->status = pGpuMgr->probedGpus[i].initStatus; + rmStatus = NV_OK; + } + else + { + // + // No init has been attempted on this GPU yet, so this request + // doesn't make any sense. + // + rmStatus = NV_ERR_INVALID_STATE; + } + goto done; + } + } + + // We couldn't find a probed gpuId matching the requested one. + rmStatus = NV_ERR_INVALID_ARGUMENT; +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return rmStatus; +} + +NV_STATUS +gpumgrGetProbedGpuDomainBusDevice(NvU32 gpuId, NvU64 *gpuDomainBusDevice) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + NV_STATUS rmStatus; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + *gpuDomainBusDevice = pGpuMgr->probedGpus[i].gpuDomainBusDevice; + rmStatus = NV_OK; + goto done; + } + } + + // + // We couldn't find a probed gpuId matching the requested one. + // + // This used to return a generic NV_ERR_INVALID_ARGUMENT, but we want to be + // more specific as at least nvml wants to be able to tell this case apart + // from other errors. This case is expected when GPUs are removed from the + // driver (e.g. through unbind on Linux) after a client queries for the + // probed GPUs, but before getting the PCI info for all of them. + // + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return rmStatus; +} + +// +// gpumgrSetGpuInitStatus +// +// Marks initialization of the gpu in question as attempted and stores the +// status. +// +void +gpumgrSetGpuInitStatus(NvU32 gpuId, NV_STATUS status) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + // Overwrite any previous init status + pGpuMgr->probedGpus[i].bInitAttempted = NV_TRUE; + pGpuMgr->probedGpus[i].initStatus = status; + break; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); +} + +// +// gpumgrGetDefaultPrimaryGpu +// +// This routine looks at the set of GPUs and picks a the primary (parent) +// with the following rules, in this order: +// 1- If a primary GPU has been passed in an SLI config by a client +// 2- If there is a boot primary in the GPU mask +// 3- The first VGA device attached (not 3d controller) +// +NvU32 +gpumgrGetDefaultPrimaryGpu +( + NvU32 gpuMask +) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuInstance; + + if (gpuMask == 0) + { + NV_ASSERT(gpuMask); + return 0; + } + + // Find masterFromSLIConfig, set when a RM client passes a primary GPU + // index from a SLI config + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (pGpu->masterFromSLIConfig) + { + break; + } + } + + // default to boot primary + if (pGpu == NULL) + { + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_PRIMARY_DEVICE)) + { + break; + } + } + } + + if (pGpu) + { + return gpuGetInstance(pGpu); + } + + // otherwise the primary is the first non 3d controller in the set attached to the RM + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_3D_CONTROLLER)) + { + break; + } + } + + if (!pGpu) + { + // The GPU mask contains only 3d Controllers. + // Choose first one in the set attached to the RM. + gpuInstance = 0; + pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + } + + if (pGpu == NULL) + { + return 0; // This should never happen + } + + return gpuGetInstance(pGpu); +} + +NV_STATUS +gpumgrGetGpuLockAndDrPorts +( + OBJGPU *pGpu, + OBJGPU *pPeerGpu, + NvU32 *pPinsetOut, + NvU32 *pPinsetIn +) +{ + *pPinsetOut = 0; + *pPinsetIn = 0; + return NV_OK; +} + +// +// Stores the address of the boot primary in pGpu +// Returns NV_OK on success NV_ERR_GENERIC otherwise. +// +NV_STATUS +gpumgrGetBootPrimary(OBJGPU **ppGpu) +{ + NvU32 gpuCount, gpuMask, idx1; + OBJGPU *pGpu = NULL; + + // Find boot primary + idx1 = 0; + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + while ((pGpu = gpumgrGetNextGpu(gpuMask, &idx1)) != NULL) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_PRIMARY_DEVICE)) + break; + } + *ppGpu = pGpu; + + // No boot primary + if (pGpu == NULL) + { + return NV_ERR_GENERIC; + } + + return NV_OK; +} + +// +// Returns the mGpu +// +OBJGPU *gpumgrGetMGpu (void) +{ + OBJGPU *pGpu; + NvU32 gpuCount, gpuMask, gpuIndex = 0; + // Parse through all the GPUs + + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex))) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_HYBRID_MGPU)) + { + break; + } + } + + return pGpu; +} + +// +// Get PhysFbAddr for the given GPU which may be different if +// the GPU is broadcast or chipset broadcast are enabled or not: +// - BC GPU + no CL BC -> returns gpu address +// - UC GPU -> returns GPU address +// - BC GPU + CL BC -> returns broadcast address +// +RmPhysAddr gpumgrGetGpuPhysFbAddr(OBJGPU *pGpu) +{ + RmPhysAddr physFbAddr; + + physFbAddr = pGpu->busInfo.gpuPhysFbAddr; + + NV_ASSERT(physFbAddr); + return physFbAddr; +} + + +// +// Get GPU object from subdevice instance +// +POBJGPU +gpumgrGetGpuFromSubDeviceInst(NvU32 deviceInst, NvU32 subDeviceInst) +{ + OBJGPU *pGpu = NULL; + OBJGPUGRP *pGpuGrp = NULL; + NvU32 gpuInst = 0; + NvU32 gpuMask; + + pGpuGrp = gpumgrGetGpuGrpFromInstance(deviceInst); + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NULL); + + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + // check for single GPU case + if (gpumgrGetSubDeviceCount(gpuMask) == 1) + return gpumgrGetNextGpu(gpuMask, &gpuInst); + + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInst)) != NULL) + { + if (gpumgrGetSubDeviceInstanceFromGpu(pGpu) == subDeviceInst) + { + break; + } + } + + NV_ASSERT(pGpu); + + return pGpu; +} + +/*! + * @brief sets the device instance pGpu->deviceInstance for the GPUs indicated by the gpu mask + * + * Only remove the device instance if it is the last GPU to be removed. + * + * At RM initialization we fill in the software feature values for this GPU. + * The values are determined from the software feature database + * + * @param[in] gpuMask NvU32 value + * + * @return NV_OK or NV_ERR_OBJECT_NOT_FOUND if no GPU has been found + * + */ +NV_STATUS +gpumgrAddDeviceInstanceToGpus(NvU32 gpuMask) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NV_STATUS rmStatus = NV_ERR_OBJECT_NOT_FOUND; + OBJGPU *pGpu = NULL; + NvU32 i, gpuIndex = 0; + OBJGPUGRP *pGpuGrp = NULL; + + // Add the device instance to the GPU objects in the mask + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex))) + { + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->pGpuGrpTable); i++) + { + pGpuGrp = pGpuMgr->pGpuGrpTable[i]; + // if it contains the specified gpu... + if ((pGpuGrp != NULL) && + (gpugrpGetGpuMask(pGpuGrp) & NVBIT(pGpu->gpuInstance))) + { + pGpu->deviceInstance = i; + rmStatus = NV_OK; + break; + } + } + NV_ASSERT_OK_OR_RETURN(rmStatus); + } + + return rmStatus; +} + +/*! + * @brief Retrieves the OBJGPUGRP pointer given the instance + * + * @param[in] gpugrpInstance GPUGRP instance + * + * @return GPUGRP pointer on success, NULL on error + * + */ +POBJGPUGRP +gpumgrGetGpuGrpFromInstance +( + NvU32 gpugrpInstance +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NV_ASSERT_OR_RETURN(gpugrpInstance < NV_MAX_DEVICES, NULL); + return pGpuMgr->pGpuGrpTable[gpugrpInstance]; +} + +/*! + * @brief Retrieves the OBJGPUGRP pointer given the GPU pointer. + * + * @param[in] pGpu GPU object pointer + * + * @return OBJGPUGRP pointer on success, NULL on error + * + */ +POBJGPUGRP +gpumgrGetGpuGrpFromGpu +( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 gpugrpInstance = gpuGetDeviceInstance(pGpu); + NV_ASSERT_OR_RETURN(gpugrpInstance < NV_MAX_DEVICES, NULL); + + return pGpuMgr->pGpuGrpTable[gpugrpInstance]; +} + +/*! + * @brief Constructs the GPUGRP object for the given instance + * + * @param[in] pGpu GPU object pointer + * @param[in] gpuMask GpuMask corresponding to this GPUGRP + * @param[out] ppGpuGrp Newly created gpugrp object pointer + * + * @return NV_OK on success, appropriate error on failure. + * + */ +NV_STATUS +gpumgrConstructGpuGrpObject +( + OBJGPUMGR *pGpuMgr, + NvU32 gpuMask, + OBJGPUGRP **ppGpuGrp +) +{ + NV_STATUS status; + + status = objCreate(ppGpuGrp, pGpuMgr, OBJGPUGRP); + if (NV_OK != status) + { + return status; + } + + status = gpugrpCreate(*ppGpuGrp, gpuMask); + if (NV_OK != status) + { + return status; + } + + return NV_OK; +} + +/*! + * @brief Enter/exit "drain" state on a given GPU + * + * @param[in] gpuId Platform specific GPU Id + * @param[in] bEnable NV_TRUE: enter, NV_FALSE: exit + * @param[in] bRemove Ask the OS to forget the GPU, once quiescent + * @param[in] bLinkDisable Shut down the upstream PCIe link after the removal. + * This is done in user-land, we just check that the + * GPU is in the right state. + * + * @return NV_OK on success, appropriate error on failure. + */ +NV_STATUS +gpumgrModifyGpuDrainState + +( + NvU32 gpuId, + NvBool bEnable, + NvBool bRemove, + NvBool bLinkDisable +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu; + NvBool bAttached; + NvBool bStateChange = NV_FALSE; + NvU32 i; + NvU32 domain = 0; + NvU8 bus = 0; + NvU8 device = 0; + + if (bRemove && !osRemoveGpuSupported()) + { + return NV_ERR_NOT_SUPPORTED; + } + + bAttached = ((pGpu = gpumgrGetGpuFromId(gpuId)) != NULL); + + if (bEnable && bLinkDisable && bAttached) + { + return NV_ERR_IN_USE; + } + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); ++i) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + bStateChange = pGpuMgr->probedGpus[i].bDrainState != bEnable; + pGpuMgr->probedGpus[i].bDrainState = bEnable; + pGpuMgr->probedGpus[i].bRemoveIdle = bEnable && bRemove; + domain = gpuDecodeDomain(pGpuMgr->probedGpus[i].gpuDomainBusDevice); + bus = gpuDecodeBus(pGpuMgr->probedGpus[i].gpuDomainBusDevice); + device = gpuDecodeDevice(pGpuMgr->probedGpus[i].gpuDomainBusDevice); + break; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + if (i == NV_MAX_DEVICES) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // If the initial drain state (characterized by enabling draining without + // setting the remove flag) is already enabled, multiple clients may be + // trying to simultaneously manage drain state. Only return success for the + // first to allow them to filter out the others. + // + if (bEnable && !bRemove && !bStateChange) + { + return NV_ERR_IN_USE; + } + + if (bEnable && bRemove && !bAttached) + { + osRemoveGpu(domain, bus, device); + } + + return NV_OK; +} + +/*! + * @brief Query "drain"/remove state on a given GPU + * + * @param[in] gpuId Platform specific GPU Id + * @param[out] pBEnable Drain state ptr + * @param[out] pBRemove Remove flag ptr + * + * @return NV_OK on success, appropriate error on failure. + */ +NV_STATUS +gpumgrQueryGpuDrainState + +( + NvU32 gpuId, + NvBool *pBEnable, + NvBool *pBRemove +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); ++i) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + if (pBEnable != NULL) + { + *pBEnable = pGpuMgr->probedGpus[i].bDrainState; + } + + if (pBRemove != NULL) + { + *pBRemove = pGpuMgr->probedGpus[i].bRemoveIdle; + } + + break; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + // + // This used to return a generic NV_ERR_INVALID_ARGUMENT on error, but we + // want to be more specific as at least nvml wants to be able to tell this + // case apart from other errors. This case is expected when GPUs are + // removed from the driver (e.g. through unbind on Linux) after a client + // queries for the probed GPUs, but before getting the PCI info for all of + // them. + // + return (i == NV_MAX_DEVICES) ? NV_ERR_OBJECT_NOT_FOUND : NV_OK; +} + +/*! +* @brief Retrieves the group gpuMask that contains this gpuInstance. +* Used for locking all gpus under the same device together +* +* @param[in] gpuInstance: unique Index per GPU +* +* @return gpuMask: mask of all GPU that are in the same group +* +*/ +NvU32 +gpumgrGetGrpMaskFromGpuInst +( + NvU32 gpuInst +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + NV_ASSERT_OR_RETURN(gpuInst < NV_MAX_DEVICES, 0); + + return pGpuMgr->gpuInstMaskTable[gpuInst]; +} + +/*! +* @brief Updates per GPU isnstance table to contain correct group mask +* +* @param[in] gpuMask: mask of all GPUs that are in the same group +* +*/ +void +gpumgrAddDeviceMaskToGpuInstTable +( + NvU32 gpuMask +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + int gpuInst; + NvU32 tempGpuMask = gpuMask; + + for (gpuInst = 0; (tempGpuMask != 0) && (gpuInst < NV_MAX_DEVICES); gpuInst++) + { + if (NVBIT(gpuInst) & gpuMask) + pGpuMgr->gpuInstMaskTable[gpuInst] = gpuMask; + + tempGpuMask &= ~NVBIT(gpuInst); + } +} + +/*! +* @brief Clear group mask from per GPU isnstance table (when group is destroyed) +* +* @param[in] gpuMask: gpu group mask being teared down +* +*/ +void +gpumgrClearDeviceMaskFromGpuInstTable +( + NvU32 gpuMask +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + int gpuInst; + NvU32 tempGpuMask = gpuMask; + + for (gpuInst = 0; (tempGpuMask != 0) && (gpuInst < NV_MAX_DEVICES); gpuInst++) + { + if (NVBIT(gpuInst) & gpuMask) + pGpuMgr->gpuInstMaskTable[gpuInst] = 0; + + tempGpuMask &= ~NVBIT(gpuInst); + } +} + +/** + * @brief Saves a pointer to the current GPU instance in thread local storage, + * to be logged by NVLOG, until gpumgrSetGpuRelease is called. + * Returns a pointer to tls entry (to be passed to gpumgrSetGpuRelease) + * + * @param[in] pGpu + */ +NvBool +gpumgrSetGpuAcquire(OBJGPU *pGpu) +{ + NvU32 **ppGpuInstance; + ppGpuInstance = (NvU32 **)tlsEntryAcquire + (TLS_ENTRY_ID_CURRENT_GPU_INSTANCE); + if (ppGpuInstance) + { + *ppGpuInstance = &(pGpu->gpuInstance); + return NV_TRUE; + } + return NV_FALSE; +} + +/** + * @brief Releases the thread local storage for GPU ID. + */ +void +gpumgrSetGpuRelease(void) +{ + tlsEntryRelease(TLS_ENTRY_ID_CURRENT_GPU_INSTANCE); +} + +/** +* @brief Returns the type of bridge SLI_BT_* +*/ +NvU8 +gpumgrGetGpuBridgeType(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + return pGpuMgr->gpuBridgeType; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/io_vaspace.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/io_vaspace.c new file mode 100644 index 0000000..d0ee95a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/io_vaspace.c @@ -0,0 +1,561 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** HW State Rotuines ***************************\ +* * +* IOMMU Virtual Address Space Function Definitions. * +* * +\***************************************************************************/ + +#include "mem_mgr/io_vaspace.h" +#include "class/cl00f2.h" // IO_VASPACE_A +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "gpu_mgr/gpu_mgr.h" +#include "os/os.h" +#include "core/system.h" +#include "mem_mgr/virt_mem_mgr.h" + + +NV_STATUS +iovaspaceConstruct__IMPL +( + OBJIOVASPACE *pIOVAS, + NvU32 classId, + NvU32 vaspaceId, + NvU64 vaStart, + NvU64 vaLimit, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + NvU32 flags +) +{ + NV_ASSERT_OR_RETURN(IO_VASPACE_A == classId, NV_ERR_INVALID_ARGUMENT); + pIOVAS->mappingCount = 0; + return NV_OK; +} + +void +iovaspaceDestruct_IMPL(OBJIOVASPACE *pIOVAS) +{ + OBJVASPACE *pVAS = staticCast(pIOVAS, OBJVASPACE); + + if (pIOVAS->mappingCount != 0) + { + NV_PRINTF(LEVEL_ERROR, "%lld left-over mappings in IOVAS 0x%x\n", + pIOVAS->mappingCount, pVAS->vaspaceId); + DBG_BREAKPOINT(); + } +} + +NV_STATUS +iovaspaceAlloc_IMPL +( + OBJIOVASPACE *pIOVAS, + NvU64 size, + NvU64 align, + NvU64 rangeLo, + NvU64 rangeHi, + NvU64 pageSizeLockMask, + VAS_ALLOC_FLAGS flags, + NvU64 *pAddr +) +{ + NV_STATUS status = NV_OK; + + // TBD implement iommu specific stuff + return status; +} + +NV_STATUS +iovaspaceFree_IMPL +( + OBJIOVASPACE *pIOVAS, + NvU64 vAddr +) +{ + NV_STATUS status = NV_OK; + + // TBD implement iommu specific stuff + return status; +} + +NV_STATUS +iovaspaceApplyDefaultAlignment_IMPL +( + OBJIOVASPACE *pIOVAS, + const FB_ALLOC_INFO *pAllocInfo, + NvU64 *pAlign, + NvU64 *pSize, + NvU64 *pPageSizeLockMask +) +{ + RM_ATTR_PAGE_SIZE pageSizeAttr; + NvU64 maxPageSize = RM_PAGE_SIZE; + + pageSizeAttr = dmaNvos32ToPageSizeAttr(pAllocInfo->pageFormat->attr, pAllocInfo->pageFormat->attr2); + switch(pageSizeAttr) + { + case RM_ATTR_PAGE_SIZE_DEFAULT: + case RM_ATTR_PAGE_SIZE_4KB: + *pAlign = NV_MAX(*pAlign, maxPageSize); + *pSize = RM_ALIGN_UP(*pSize, maxPageSize); + return NV_OK; + default: + break; + } + + return NV_OK; +} + +NV_STATUS +iovaspaceIncAllocRefCnt_IMPL +( + OBJIOVASPACE *pIOVAS, + NvU64 vAddr +) +{ + NV_STATUS status = NV_OK; + + // TBD: Implement iommu specific stuff + return status; +} + +NV_STATUS +iovaspaceGetVasInfo_IMPL +( + OBJIOVASPACE *pIOVAS, + struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams +) +{ + return NV_OK; +} + +NvU64 +iovaspaceGetVaStart_IMPL(OBJIOVASPACE *pIOVAS) +{ + // TODO: query OS layer, this could also be set in ctor, not virtual? + return 0; +} + +NvU64 +iovaspaceGetVaLimit_IMPL(OBJIOVASPACE *pIOVAS) +{ + // TODO: query OS layer, this could also be set in ctor, not virtual? + return NVBIT64(32) - 1; +} + +#if (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_MODS_FEATURES) && !NVCPU_IS_ARM +static PIOVAMAPPING +_iovaspaceCreateMappingDataFromMemDesc +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + PIOVAMAPPING pIovaMapping = NULL; + NvU64 mappingDataSize = 0; + + mappingDataSize = sizeof(IOVAMAPPING); + if (!memdescGetContiguity(pMemDesc, AT_CPU)) + { + mappingDataSize += sizeof(RmPhysAddr) * + (NvU64_LO32(pMemDesc->PageCount) - 1); + } + + // + // The portMemAllocNonPaged() and portMemSet() interfaces work with 32-bit sizes, + // so make sure we don't exceed that here. + // + if (NvU64_HI32(mappingDataSize) != 0UL) + { + NV_PRINTF(LEVEL_ERROR, "too much memory to map! (0x%llx bytes)\n", + mappingDataSize); + DBG_BREAKPOINT(); + return NULL; + } + + pIovaMapping = portMemAllocNonPaged(NvU64_LO32(mappingDataSize)); + if (pIovaMapping == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "failed to allocate 0x%x bytes for IOVA mapping metadata\n", + NvU64_LO32(mappingDataSize)); + return NULL; + } + + portMemSet((void *)pIovaMapping, 0, NvU64_LO32(mappingDataSize)); + + pIovaMapping->pPhysMemDesc = pMemDesc; + + return pIovaMapping; +} + +static NV_STATUS +_iovaspaceCreateSubmapping +( + OBJIOVASPACE *pIOVAS, + PMEMORY_DESCRIPTOR pPhysMemDesc +) +{ + NvU64 rootOffset; + NV_STATUS status = NV_OK; + OBJVASPACE *pVAS = staticCast(pIOVAS, OBJVASPACE); + PMEMORY_DESCRIPTOR pRootMemDesc = memdescGetRootMemDesc(pPhysMemDesc, &rootOffset); + PIOVAMAPPING pRootIovaMapping; + PIOVAMAPPING pSubMapping = NULL; + + NV_ASSERT(pRootMemDesc != pPhysMemDesc); + + // + // A submapping requires the root mapping to be there, acquire a reference + // on it so that it sticks around for at least as long as the submapping. + // The reference is released when the submapping is destroyed. + // + status = iovaspaceAcquireMapping(pIOVAS, pRootMemDesc); + if (status != NV_OK) + return status; + + // + // The root mapping has been just successfully acquired so it has to be + // there. + // + pRootIovaMapping = memdescGetIommuMap(pRootMemDesc, pVAS->vaspaceId); + NV_ASSERT(pRootIovaMapping != NULL); + + // + // Since this is a submemory descriptor, we need to account for the + // PteAdjust as well, which is included in rootOffset. We don't want to + // account for it in the iovaArray because it is not accounted for in the + // memdesc's PTE array. This should result in a 4K-aligned root offset. + // + rootOffset -= pPhysMemDesc->PteAdjust; + NV_ASSERT((rootOffset & RM_PAGE_MASK) == 0); + + // + // For submemory descriptors, there are two possibilities: + // (1) The root descriptor already has an IOVA mapping for the entire + // allocation in this IOVA space, in which case we just need a subset + // of that. + // (2) The root descriptor does not have an IOVA mapping for any of the + // allocation in this IOVA space, in which case we need to create one + // first. + // + + pSubMapping = _iovaspaceCreateMappingDataFromMemDesc(pPhysMemDesc); + if (pSubMapping == NULL) + { + iovaspaceReleaseMapping(pIOVAS, pRootIovaMapping); + return NV_ERR_NO_MEMORY; + } + + pSubMapping->refcount = 1; + pSubMapping->iovaspaceId = pRootIovaMapping->iovaspaceId; + pSubMapping->link.pParent = pRootIovaMapping; + + pSubMapping->pNext = pRootIovaMapping->link.pChildren; + pRootIovaMapping->link.pChildren = pSubMapping; + + // + // We need to copy over the corresponding entries from the root IOVA + // mapping before we assign it to the physical memdesc. The root offset + // determines where in the root mapping we need to start. + // + if (memdescGetContiguity(pPhysMemDesc, AT_CPU)) + { + pSubMapping->iovaArray[0] = pRootIovaMapping->iovaArray[0] + rootOffset; + } + else + { + NvU64 i, j; + NV_ASSERT(((rootOffset >> RM_PAGE_SHIFT) + pPhysMemDesc->PageCount) <= + pRootMemDesc->PageCount); + for (i = (rootOffset >> RM_PAGE_SHIFT), j = 0; + j < pPhysMemDesc->PageCount && i < pRootMemDesc->PageCount; i++, j++) + { + pSubMapping->iovaArray[j] = pRootIovaMapping->iovaArray[i]; + } + } + + memdescAddIommuMap(pPhysMemDesc, pSubMapping); + + ++pIOVAS->mappingCount; + + return NV_OK; +} + +static void +_iovaspaceDestroySubmapping +( + OBJIOVASPACE *pIOVAS, + PIOVAMAPPING pIovaMapping +) +{ + PMEMORY_DESCRIPTOR pPhysMemDesc = pIovaMapping->pPhysMemDesc; + PIOVAMAPPING pRootIovaMapping = pIovaMapping->link.pParent; + PIOVAMAPPING pTmpIovaMapping = pRootIovaMapping->link.pChildren; + + memdescRemoveIommuMap(pPhysMemDesc, pIovaMapping); + + if (pTmpIovaMapping == pIovaMapping) + { + pRootIovaMapping->link.pChildren = pIovaMapping->pNext; + } + else + { + while (pTmpIovaMapping != NULL && pTmpIovaMapping->pNext != pIovaMapping) + { + pTmpIovaMapping = pTmpIovaMapping->pNext; + } + + if (pTmpIovaMapping != NULL) + { + pTmpIovaMapping->pNext = pIovaMapping->pNext; + } + else + { + // Not found in the root submappings list? + NV_ASSERT(pTmpIovaMapping != NULL); + } + } + + portMemFree(pIovaMapping); + --pIOVAS->mappingCount; + + // + // After destroying a submapping, release its reference on the root mapping. + // The reference was acquired in _iovaspaceCreateSubmapping(). + // + iovaspaceReleaseMapping(pIOVAS, pRootIovaMapping); +} + +static NV_STATUS +_iovaspaceCreateMapping +( + OBJIOVASPACE *pIOVAS, + PMEMORY_DESCRIPTOR pPhysMemDesc +) +{ + NV_STATUS status; + OBJVASPACE *pVAS = staticCast(pIOVAS, OBJVASPACE); + NV_ADDRESS_SPACE addressSpace; + PIOVAMAPPING pIovaMapping = NULL; + OBJGPU *pMappingGpu = NULL; + + // + // The source memdesc has to be allocated to acquire an I/O VA space + // mapping, because the OS layer will be setting up a layer of indirection + // that assumes the PTEs in the memdesc are valid. There is no requirement + // that it be mapped to the CPU at this point. + // + if (pPhysMemDesc == NULL) + { + NV_ASSERT(pPhysMemDesc != NULL); + return NV_ERR_INVALID_ARGUMENT; + } + + pMappingGpu = gpumgrGetGpuFromId(pVAS->vaspaceId); + addressSpace = memdescGetAddressSpace(pPhysMemDesc); + + // Only support SYSMEM or indirect peer mappings + if ((addressSpace != ADDR_SYSMEM) && + !gpumgrCheckIndirectPeer(pMappingGpu, pPhysMemDesc->pGpu)) + { + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + + pIovaMapping = _iovaspaceCreateMappingDataFromMemDesc(pPhysMemDesc); + if (pIovaMapping == NULL) + { + return NV_ERR_NO_MEMORY; + } + + // Initialize the mapping as an identity mapping for the OS layer + if (memdescGetContiguity(pPhysMemDesc, AT_CPU)) + { + pIovaMapping->iovaArray[0] = memdescGetPte(pPhysMemDesc, AT_CPU, 0); + } + else + { + NvU32 i; + for (i = 0; i < pPhysMemDesc->PageCount; i++) + { + pIovaMapping->iovaArray[i] = memdescGetPte(pPhysMemDesc, AT_CPU, i); + } + } + + pIovaMapping->iovaspaceId = pVAS->vaspaceId; + pIovaMapping->refcount = 1; + + status = osIovaMap(pIovaMapping); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to map memdesc into I/O VA space 0x%x (status = 0x%x)\n", + pVAS->vaspaceId, status); + goto error; + } + + memdescAddIommuMap(pPhysMemDesc, pIovaMapping); + ++pIOVAS->mappingCount; + + return NV_OK; + +error: + portMemFree(pIovaMapping); + pIovaMapping = NULL; + + return status; +} + +NV_STATUS +iovaspaceAcquireMapping_IMPL +( + OBJIOVASPACE *pIOVAS, + PMEMORY_DESCRIPTOR pPhysMemDesc +) +{ + OBJVASPACE *pVAS = staticCast(pIOVAS, OBJVASPACE); + PIOVAMAPPING pIovaMapping = memdescGetIommuMap(pPhysMemDesc, pVAS->vaspaceId); + + if (pIovaMapping) + { + // If the mapping is already there, just increment its refcount. + NV_ASSERT(pIovaMapping->refcount != 0); + ++pIovaMapping->refcount; + return NV_OK; + } + + if (memdescIsSubMemoryMemDesc(pPhysMemDesc)) + return _iovaspaceCreateSubmapping(pIOVAS, pPhysMemDesc); + else + return _iovaspaceCreateMapping(pIOVAS, pPhysMemDesc); +} + +static void +_iovaspaceDestroyRootMapping +( + OBJIOVASPACE *pIOVAS, + PIOVAMAPPING pIovaMapping +) +{ + PMEMORY_DESCRIPTOR pPhysMemDesc = pIovaMapping->pPhysMemDesc; + PIOVAMAPPING pNextIovaMapping, pTmpIovaMapping; + + // + // Increment the refcount to guarantee that destroying the last submapping + // won't end up trying to destroy the root mapping we are already + // destroying. + // + ++pIovaMapping->refcount; + + // + // Clear out any submappings underneath this mapping, since they will no + // longer be valid. + // + pNextIovaMapping = pIovaMapping->link.pChildren; + while (pNextIovaMapping != NULL) + { + pTmpIovaMapping = pNextIovaMapping->pNext; + _iovaspaceDestroySubmapping(pIOVAS, pNextIovaMapping); + pNextIovaMapping = pTmpIovaMapping; + } + + memdescRemoveIommuMap(pPhysMemDesc, pIovaMapping); + + osIovaUnmap(pIovaMapping); + portMemFree(pIovaMapping); + + --pIOVAS->mappingCount; +} + +void +iovaspaceDestroyMapping_IMPL +( + OBJIOVASPACE *pIOVAS, + PIOVAMAPPING pIovaMapping +) +{ + if (memdescIsSubMemoryMemDesc(pIovaMapping->pPhysMemDesc)) + _iovaspaceDestroySubmapping(pIOVAS, pIovaMapping); + else + _iovaspaceDestroyRootMapping(pIOVAS, pIovaMapping); +} + +void +iovaspaceReleaseMapping_IMPL +( + OBJIOVASPACE *pIOVAS, + PIOVAMAPPING pIovaMapping +) +{ + if (pIovaMapping == NULL) + { + NV_ASSERT(0); + return; + } + + if (pIovaMapping->refcount == 0) + NV_ASSERT(pIovaMapping->refcount > 0); + + if (--pIovaMapping->refcount != 0) + return; + + iovaspaceDestroyMapping(pIOVAS, pIovaMapping); +} + +OBJIOVASPACE *iovaspaceFromId(NvU32 iovaspaceId) +{ + OBJVASPACE *pVAS; + OBJVMM *pVmm = SYS_GET_VMM(SYS_GET_INSTANCE()); + NV_STATUS status = vmmGetVaspaceFromId(pVmm, iovaspaceId, IO_VASPACE_A, &pVAS); + + if (status != NV_OK) + return NULL; + + return dynamicCast(pVAS, OBJIOVASPACE); +} + +OBJIOVASPACE *iovaspaceFromMapping(PIOVAMAPPING pIovaMapping) +{ + OBJIOVASPACE *pIOVAS = iovaspaceFromId(pIovaMapping->iovaspaceId); + + // + // The IOVASPACE has to be there as the mapping is referencing it. If it's + // not, the mapping has been left dangling outlasting the IOVAS it was + // under. + // + NV_ASSERT(pIOVAS != NULL); + + return pIOVAS; +} + +void iovaMappingDestroy(PIOVAMAPPING pIovaMapping) +{ + OBJIOVASPACE *pIOVAS = iovaspaceFromMapping(pIovaMapping); + + NV_ASSERT_OR_RETURN_VOID(pIOVAS != NULL); + iovaspaceDestroyMapping(pIOVAS, pIovaMapping); +} + +#endif // (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_MODS_FEATURES) && !NVCPU_IS_ARM diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem.c new file mode 100644 index 0000000..41ee509 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem.c @@ -0,0 +1,962 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr/mem.h" + +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" +#include "core/locks.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "vgpu/rpc.h" + +#include "class/cl0041.h" // NV04_MEMORY +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM +#include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR + +NV_STATUS +memConstruct_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pParentRef = pResourceRef->pParentRef; + + // + // Common initialization used for both normal construction & copy + // constructor + // + + // NULL if parent isn't a device + pMemory->pDevice = dynamicCast(pParentRef->pResource, Device); + + // NULL if parent isn't a subdevice + pMemory->pSubDevice = dynamicCast(pParentRef->pResource, Subdevice); + + // If parent subdevice, grandparent must be a device + if (pMemory->pSubDevice) + { + RsResourceRef *pGrandParentRef = pParentRef->pParentRef; + + pMemory->pDevice = dynamicCast(pGrandParentRef->pResource, Device); + + if (pMemory->pDevice == NULL) + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + // If child of device, we have a pGpu + if (pMemory->pDevice) + { + // NOTE: pGpu and pDevice be NULL for NoDeviceMemory + pMemory->pGpu = CliGetGpuFromContext(pResourceRef, &pMemory->bBcResource); + + // Set thread BC state + gpuSetThreadBcState(pMemory->pGpu, pMemory->bBcResource); + } + + if (RS_IS_COPY_CTOR(pParams)) + { + // + // Copy constructor path (NvRmDupObject) + // + return memCopyConstruct_IMPL(pMemory, pCallContext, pParams); + } + else + { + // + // Default constructor path (NvRmAlloc) + // + } + + return NV_OK; +} + +NV_STATUS +memGetMapAddrSpace_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + NV_ADDRESS_SPACE addrSpace; + OBJGPU *pGpu = pMemory->pGpu; + NvBool bBcResource = pMemory->bBcResource; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + if (pGpu == NULL) + return NV_ERR_INVALID_OBJECT; + + gpuSetThreadBcState(pGpu, bBcResource); + + pMemDesc = memdescGetMemDescFromGpu(pMemory->pMemDesc, pGpu); + + NV_ASSERT_OK_OR_RETURN(rmapiGetEffectiveAddrSpace(pGpu, pMemDesc, mapFlags, &addrSpace)); + + if (addrSpace == ADDR_SYSMEM) + { + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_BAR0_REFLECT)) + { + addrSpace = ADDR_REGMEM; + } + else if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_BAR1_REFLECT)) + { + addrSpace = ADDR_FBMEM; + } + } + + if (pAddrSpace) + *pAddrSpace = addrSpace; + + return NV_OK; +} + +void +memDestruct_IMPL +( + Memory *pMemory +) +{ + OBJGPU *pGpu = pMemory->pGpu; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pMemory); + NvHandle hParent = RES_GET_PARENT_HANDLE(pMemory); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + NV_STATUS status = NV_OK; + + // + // The default destructor is used when memConstructCommon() is called by + // the subclass but not memDestructCommon(). + // + if (pMemory->bConstructed && pMemory->pMemDesc != NULL) + { + // Remove the system memory reference from the client + memDestructCommon(pMemory); + memdescFree(pMemory->pMemDesc); + memdescDestroy(pMemory->pMemDesc); + } + + // if the allocation is RPC-ed, free using RPC + if (pMemory->bRpcAlloc && (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))) + { + NV_RM_RPC_FREE(pGpu, hClient, hParent, hMemory, status); + NV_ASSERT(status == NV_OK); + } +} + +NV_STATUS +memCreateMemDesc_IMPL +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR **ppMemDesc, + NV_ADDRESS_SPACE addrSpace, + NvU64 FBOffset, + NvU64 length, + NvU32 attr, + NvU32 attr2 +) +{ + NV_STATUS status = NV_OK; + NvU32 CpuCacheAttrib, gpuCacheAttrib; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + *ppMemDesc = NULL; + + if (addrSpace == ADDR_SYSMEM) + NV_ASSERT_OR_RETURN(FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr), NV_ERR_INVALID_ARGUMENT); + + // setup the CpuCacheAttrib as well.. (if the caller doesn't specify anything it will be 0=UNCACHED) + switch (DRF_VAL(OS32, _ATTR, _COHERENCY, attr)) + { + case NVOS32_ATTR_COHERENCY_UNCACHED: + CpuCacheAttrib = NV_MEMORY_UNCACHED; + break; + case NVOS32_ATTR_COHERENCY_WRITE_COMBINE: + CpuCacheAttrib = NV_MEMORY_WRITECOMBINED; + break; + case NVOS32_ATTR_COHERENCY_CACHED: + case NVOS32_ATTR_COHERENCY_WRITE_THROUGH: + case NVOS32_ATTR_COHERENCY_WRITE_PROTECT: + case NVOS32_ATTR_COHERENCY_WRITE_BACK: + CpuCacheAttrib = NV_MEMORY_CACHED; + break; + default: + NV_ASSERT(0); + CpuCacheAttrib = NV_MEMORY_UNCACHED; + break; + } + + gpuCacheAttrib = FLD_TEST_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _YES, attr2) ? NV_MEMORY_CACHED : NV_MEMORY_UNCACHED; + + // Create and fill in a memory descriptor + status = memdescCreate(&pMemDesc, pGpu, length, 0, NV_TRUE, addrSpace, + CpuCacheAttrib, + MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE_FB_BC_ONLY(pGpu, addrSpace)); + if (status == NV_OK) + { + if (memdescHasSubDeviceMemDescs(pMemDesc)) + { + MEMORY_DESCRIPTOR *pMemDescNext = pMemDesc->_pNext; + while (pMemDescNext) + { + memdescDescribe(pMemDescNext, addrSpace, FBOffset, length); + memdescSetGpuCacheAttrib(pMemDescNext, gpuCacheAttrib); + pMemDescNext = pMemDescNext->_pNext; + } + } + else + { + memdescDescribe(pMemDesc, addrSpace, FBOffset, length); + memdescSetGpuCacheAttrib(pMemDesc, gpuCacheAttrib); + } + + *ppMemDesc = pMemDesc; + } + + return status; +} + +NV_STATUS +memCreateKernelMapping_IMPL +( + Memory *pMemory, + NvU32 Protect, + NvBool bClear +) +{ + NV_STATUS status; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemory)); + + if (pMemory->KernelVAddr == NvP64_NULL) + { + if (memdescGetAddressSpace(pMemory->pMemDesc) != ADDR_SYSMEM) + { + return NV_ERR_NOT_SUPPORTED; + } + + status = memdescMap(pMemory->pMemDesc, 0, pMemory->Length, NV_TRUE, + Protect, &pMemory->KernelVAddr, &pMemory->KernelMapPriv); + + if (status != NV_OK) + { + pMemory->KernelVAddr = NvP64_NULL; + pMemory->KernelMapPriv = NvP64_NULL; + return status; + } + + memdescSetKernelMapping(pMemory->pMemDesc, pMemory->KernelVAddr); + memdescSetKernelMappingPriv(pMemory->pMemDesc, pMemory->KernelMapPriv); + + if (bClear) + { + portMemSet(NvP64_VALUE(pMemory->KernelVAddr), 0, pMemory->Length); + } + } + + return NV_OK; +} + +RM_ATTR_PAGE_SIZE +dmaNvos32ToPageSizeAttr +( + NvU32 attr, + NvU32 attr2 +) +{ + switch (DRF_VAL(OS32, _ATTR, _PAGE_SIZE, attr)) + { + case NVOS32_ATTR_PAGE_SIZE_DEFAULT: + return RM_ATTR_PAGE_SIZE_DEFAULT; + case NVOS32_ATTR_PAGE_SIZE_4KB: + return RM_ATTR_PAGE_SIZE_4KB; + case NVOS32_ATTR_PAGE_SIZE_BIG: + return RM_ATTR_PAGE_SIZE_BIG; + case NVOS32_ATTR_PAGE_SIZE_HUGE: + switch (DRF_VAL(OS32, _ATTR2, _PAGE_SIZE_HUGE, attr2)) + { + case NVOS32_ATTR2_PAGE_SIZE_HUGE_DEFAULT: + case NVOS32_ATTR2_PAGE_SIZE_HUGE_2MB: + return RM_ATTR_PAGE_SIZE_HUGE; + case NVOS32_ATTR2_PAGE_SIZE_HUGE_512MB: + return RM_ATTR_PAGE_SIZE_512MB; + } + break; + } + + NV_ASSERT_FAILED("Invalid attr and attr2 page size arguments"); + return RM_ATTR_PAGE_SIZE_DEFAULT; +} + +NV_STATUS +memConstructCommon_IMPL +( + Memory *pMemory, + NvU32 categoryClassId, + NvU32 flags, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 heapOwner, + Heap *pHeap, + NvU32 attr, + NvU32 attr2, + NvU32 Pitch, + NvU32 type, + NvU32 tag, + HWRESOURCE_INFO *pHwResource +) +{ + OBJGPU *pGpu = NULL; + NV_STATUS status = NV_OK; + + if (pMemDesc == NULL) + return NV_ERR_INVALID_ARGUMENT; + + // initialize the memory description + pMemory->categoryClassId = categoryClassId; + pMemory->pMemDesc = pMemDesc; + pMemory->Length = pMemDesc->Size; + pMemory->RefCount = 1; + pMemory->HeapOwner = heapOwner; + pMemory->pHeap = pHeap; + pMemory->Attr = attr; + pMemory->Attr2 = attr2; + pMemory->Pitch = Pitch; + pMemory->Type = type; + pMemory->Flags = flags; + pMemory->tag = tag; + pMemory->isMemDescOwner = NV_TRUE; + pMemory->bRpcAlloc = NV_FALSE; + + // We are finished if this instance is device-less + if (pMemory->pDevice == NULL) + { + goto done; + } + + if (pMemDesc->pGpu == NULL) + { + return NV_ERR_INVALID_STATE; + } + + // Memory has hw resources associated with it that need to be tracked. + if (pHwResource != NULL) + { + pMemory->pHwResource = portMemAllocNonPaged(sizeof(HWRESOURCE_INFO)); + if (pMemory->pHwResource != NULL) + { + *pMemory->pHwResource = *pHwResource; // struct copy + pMemory->pHwResource->refCount = 1; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Unable to allocate HWRESOURCE_INFO tracking structure\n"); + status = NV_ERR_NO_MEMORY; + goto done; + } + } + + NV_ASSERT(status == NV_OK); + + // + // Apply attr and flags to the memory descriptor. Ideally all should + // be handled before we get here. + // + + // Check whether encryption should be enabled + if (flags & NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED) + { + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_ENCRYPTED, NV_TRUE); + SLI_LOOP_END + } + + if (FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, attr2)) + { + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_USER_READ_ONLY, NV_TRUE); + SLI_LOOP_END + } + + if (FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY, attr2)) + { + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_DEVICE_READ_ONLY, NV_TRUE); + SLI_LOOP_END + } + + // setup GpuP2PCacheAttrib + switch (DRF_VAL(OS32, _ATTR2, _P2P_GPU_CACHEABLE, attr2)) + { + case NVOS32_ATTR2_P2P_GPU_CACHEABLE_YES: + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetGpuP2PCacheAttrib(memdescGetMemDescFromGpu(pMemDesc, pGpu), NV_MEMORY_CACHED); + SLI_LOOP_END + break; + default: + NV_ASSERT(0); + /*FALLSTHRU*/ + case NVOS32_ATTR2_P2P_GPU_CACHEABLE_NO: + case NVOS32_ATTR2_P2P_GPU_CACHEABLE_DEFAULT: + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetGpuP2PCacheAttrib(memdescGetMemDescFromGpu(pMemDesc, pGpu), NV_MEMORY_UNCACHED); + SLI_LOOP_END + break; + } + + // + // Page size may be specified at allocation. This if for fermi family + // chips and is a nop for previous generations. At this point the hal call + // to set the page size should never fail as the memory was just allocated. + // + if (pMemDesc->pGpu) + { + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + RM_ATTR_PAGE_SIZE pageSizeAttr = dmaNvos32ToPageSizeAttr(attr, attr2); + status = memmgrSetMemDescPageSize_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), memdescGetMemDescFromGpu(pMemDesc, pGpu), + AT_GPU, pageSizeAttr); + if (status != NV_OK) + { + SLI_LOOP_BREAK; + } + SLI_LOOP_END + + if (status != NV_OK) + { + goto done; + } + } + + pMemory->Node.keyStart = RES_GET_HANDLE(pMemory); + pMemory->Node.keyEnd = RES_GET_HANDLE(pMemory); + pMemory->Node.Data = pMemory; + + status = btreeInsert(&pMemory->Node, &pMemory->pDevice->DevMemoryTable); + if (status != NV_OK) + goto done; + + // Initialize the circular list item for tracking dup/sharing of pMemDesc + pMemory->dupListItem.pNext = pMemory->dupListItem.pPrev = pMemory; + +done: + if (status != NV_OK) + { + if (pMemory != NULL && pMemory->pHwResource != NULL) + { + portMemFree(pMemory->pHwResource); + } + } + else + { + pMemory->bConstructed = NV_TRUE; + } + + return status; +} + +static void +_memDestructCommonWithDevice +( + Memory *pMemory +) +{ + NvHandle hMemory = RES_GET_HANDLE(pMemory); + OBJGPU *pGpu = pMemory->pGpu; + Device *pDevice = pMemory->pDevice; + RsResourceRef *pDeviceRef = RES_GET_REF(pDevice); + NvHandle hDevice = RES_GET_HANDLE(pDevice); + Subdevice *pSubDeviceInfo; + DispCommon *pDispCommon; + RsClient *pRsClient = RES_GET_CLIENT(pMemory); + NV_STATUS status; + RS_ITERATOR subDevIt; + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + + gpuSetThreadBcState(pGpu, pMemory->bBcResource); + + subDevIt = clientRefIter(pRsClient, pDeviceRef, classId(Subdevice), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(pRsClient, &subDevIt)) + { + pSubDeviceInfo = dynamicCast(subDevIt.pResourceRef->pResource, Subdevice); + + if (hMemory == pSubDeviceInfo->hNotifierMemory) + { + pSubDeviceInfo->hNotifierMemory = NV01_NULL_OBJECT; + pSubDeviceInfo->pNotifierMemory = NULL; + } + } + + dispcmnGetByDevice(pRsClient, hDevice, &pDispCommon); + + if (pDispCommon != NULL) + { + DisplayApi *pDisplayApi = staticCast(pDispCommon, DisplayApi); + if (pDisplayApi->hNotifierMemory == hMemory) + { + pDisplayApi->hNotifierMemory = NV01_NULL_OBJECT; + pDisplayApi->pNotifierMemory = NULL; + } + } + + NV_ASSERT_OK_OR_GOTO(status, btreeUnlink(&pMemory->Node, &pDevice->DevMemoryTable), done); + + pMemory->pMemDesc->DupCount--; + + // Choose the new owner + if (pMemory->isMemDescOwner) + { + (pMemory->dupListItem.pNext)->isMemDescOwner = NV_TRUE; + } + // Remove from circular list tracking dup/sharing of pMemDesc + pMemory->dupListItem.pPrev->dupListItem.pNext = pMemory->dupListItem.pNext; + pMemory->dupListItem.pNext->dupListItem.pPrev = pMemory->dupListItem.pPrev; + pMemory->dupListItem.pNext = pMemory->dupListItem.pPrev = NULL; + + pMemory->bConstructed = NV_FALSE; + +done: + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + + // The unmap call(s) above may have changed the broadcast state so restore it here + gpuSetThreadBcState(pGpu, pMemory->bBcResource); +} + +void +memDestructCommon_IMPL +( + Memory *pMemory +) +{ + OBJGPU *pGpu = pMemory->pGpu; + RsResourceRef *pResourceRef = RES_GET_REF(pMemory); + RsResourceRef *pParentRef = pResourceRef->pParentRef; + RsClient *pClient = RES_GET_CLIENT(pMemory); + NvHandle hClient = pClient->hClient; + NvHandle hParent = pParentRef->hResource; + NvHandle hMemory = RES_GET_HANDLE(pMemory); + + if (!pMemory->bConstructed) + return; + + NV_ASSERT_OK(memdescDeregisterFromGSP(pGpu, hClient, hParent, hMemory)); + + // Do device specific teardown if we have a device + if (pMemory->pDevice != NULL) + { + _memDestructCommonWithDevice(pMemory); + } + else + { + pMemory->bConstructed = NV_FALSE; + } + + if (pMemory->KernelVAddr != NvP64_NULL) + { + memdescUnmap(pMemory->pMemDesc, NV_TRUE, osGetCurrentProcess(), + pMemory->KernelVAddr, pMemory->KernelMapPriv); + pMemory->KernelVAddr = NvP64_NULL; + pMemory->KernelMapPriv = NvP64_NULL; + } +} + +NV_STATUS +memGetByHandleAndDevice_IMPL +( + RsClient *pClient, + NvHandle hMemory, + NvHandle hDevice, + Memory **ppMemory +) +{ + NV_STATUS status; + + status = memGetByHandle(pClient, hMemory, ppMemory); + if (status != NV_OK) + return status; + + if (hDevice != RES_GET_HANDLE((*ppMemory)->pDevice)) + { + *ppMemory = NULL; + return NV_ERR_OBJECT_NOT_FOUND; + } + + return NV_OK; +} + +NV_STATUS +memGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hMemory, + Memory **ppMemory +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppMemory = NULL; + + status = clientGetResourceRef(pClient, hMemory, &pResourceRef); + if (status != NV_OK) + return status; + + *ppMemory = dynamicCast(pResourceRef->pResource, Memory); + + if (*ppMemory == NULL) + return NV_ERR_INVALID_OBJECT_HANDLE; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(*ppMemory)); + + return NV_OK; +} + +NV_STATUS +memGetByHandleAndGroupedGpu_IMPL +( + RsClient *pClient, + NvHandle hMemory, + OBJGPU *pGpu, + Memory **ppMemory +) +{ + Device *pDevice; + NV_STATUS status; + + // Get device handle + status = deviceGetByInstance(pClient, gpuGetDeviceInstance(pGpu), &pDevice); + if (status != NV_OK) + return NV_ERR_INVALID_OBJECT_HANDLE; + + return memGetByHandleAndDevice(pClient, hMemory, RES_GET_HANDLE(pDevice), ppMemory); +} + +NV_STATUS +memIsReady_IMPL +( + Memory *pMemory +) +{ + if (pMemory->pMemDesc == NULL) + return NV_ERR_INVALID_OBJECT; + + return NV_OK; +} + +NV_STATUS +memControl_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + RmCtrlParams *pRmCtrlParams = pParams->pLegacyParams; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemory)); + + if (!pMemory->pGpu) + return NV_ERR_INVALID_OBJECT_PARENT; + + if (REF_VAL(NVXXXX_CTRL_CMD_CLASS, pParams->cmd) == NV04_MEMORY) + { + if (pMemory->categoryClassId == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR) + return NV_ERR_NOT_SUPPORTED; + } + + pRmCtrlParams->pGpu = pMemory->pGpu; + + gpuSetThreadBcState(pMemory->pGpu, pMemory->bBcResource); + + return resControl_IMPL(staticCast(pMemory, RsResource), pCallContext, pParams); +} + +NV_STATUS +memCopyConstruct_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsClient *pDstClient = pCallContext->pClient; + RsClient *pSrcClient = pParams->pSrcClient; + RsResourceRef *pDstRef = pCallContext->pResourceRef; + RsResourceRef *pSrcRef = pParams->pSrcRef; + Memory *pMemorySrc = dynamicCast(pSrcRef->pResource, Memory); + Memory *pMemoryDst = pMemory; + OBJGPU *pSrcGpu = NULL; + OBJGPU *pDstGpu = NULL; + NV_STATUS status = NV_OK; + NvBool bReleaseGpuLock = NV_FALSE; + Device *pSrcDevice = NULL; + Device *pDstDevice = NULL; + Subdevice *pSrcSubDevice = NULL; + Subdevice *pDstSubDevice = NULL; + RsResourceRef *pSrcParentRef = pSrcRef->pParentRef; + RsResourceRef *pDstParentRef = pDstRef->pParentRef; + + NV_ASSERT_OR_RETURN(pSrcParentRef != NULL, NV_ERR_INVALID_OBJECT_PARENT); + NV_ASSERT_OR_RETURN(pDstParentRef != NULL, NV_ERR_INVALID_OBJECT_PARENT); + NV_ASSERT_OR_RETURN(pMemorySrc != NULL, NV_ERR_INVALID_OBJECT_HANDLE); + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemorySrc)); + + // + // Must return early when parent is Client. + // This copy constructor is very device-specific so it is up + // to the device-less Memory subclasses to define their own dup behavior. + // + if (RES_GET_CLIENT_HANDLE(pMemoryDst) == RES_GET_PARENT_HANDLE(pMemoryDst)) + { + NV_ASSERT_OR_RETURN(RES_GET_CLIENT_HANDLE(pMemorySrc) == + RES_GET_PARENT_HANDLE(pMemorySrc), + NV_ERR_INVALID_OBJECT_PARENT); + return NV_OK; + } + + pSrcGpu = pMemorySrc->pGpu; + pDstGpu = pMemoryDst->pGpu; + pSrcDevice = pMemorySrc->pDevice; + pDstDevice = pMemoryDst->pDevice; + pSrcSubDevice = pMemorySrc->pSubDevice; + pDstSubDevice = pMemoryDst->pSubDevice; + + // Only children of device are supported + NV_ASSERT_OR_RETURN(pSrcDevice != NULL, NV_ERR_INVALID_OBJECT_PARENT); + NV_ASSERT_OR_RETURN(pDstDevice != NULL, NV_ERR_INVALID_OBJECT_PARENT); + + if (!!pSrcSubDevice != !!pDstSubDevice) + { + NV_PRINTF(LEVEL_ERROR, "Parent type mismatch between Src and Dst objects" + "Both should be either device or subDevice\n"); + return NV_ERR_INVALID_OBJECT_PARENT; + } + + // RS-TODO: This should use pMemorySrc->bBcResource when adding full support for subdevice duping + gpuSetThreadBcState(pSrcGpu, NV_TRUE); + + if (!rmGpuLockIsOwner() && + !(rmDeviceGpuLockIsOwner(pSrcGpu->gpuInstance) && + rmDeviceGpuLockIsOwner(pDstGpu->gpuInstance))) + { + // LOCK: acquire GPUs lock + if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to acquire GPU locks, error 0x%x\n", status); + return status; + } + + bReleaseGpuLock = NV_TRUE; + } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memCheckCopyPermissions(pMemorySrc, pDstGpu, pDstClient->hClient), done); + + // Initialize Memory + pMemoryDst->categoryClassId = pMemorySrc->categoryClassId; + pMemoryDst->Length = pMemorySrc->Length; + pMemoryDst->HeapOwner = pMemorySrc->HeapOwner; + pMemoryDst->pHeap = pMemorySrc->pHeap; + pMemoryDst->pMemDesc = pMemorySrc->pMemDesc; + pMemoryDst->KernelVAddr = NvP64_NULL; + pMemoryDst->KernelMapPriv = NvP64_NULL; + pMemoryDst->Attr = pMemorySrc->Attr; + pMemoryDst->Attr2 = pMemorySrc->Attr2; + pMemoryDst->Pitch = pMemorySrc->Pitch; + pMemoryDst->Type = pMemorySrc->Type; + pMemoryDst->Flags = pMemorySrc->Flags; + pMemoryDst->tag = pMemorySrc->tag; + pMemoryDst->pHwResource = pMemorySrc->pHwResource; + pMemoryDst->isMemDescOwner = NV_FALSE; + pMemoryDst->bRpcAlloc = pMemorySrc->bRpcAlloc; + + // Link in the new device memory mapping + pMemoryDst->Node.keyStart = RES_GET_HANDLE(pMemoryDst); + pMemoryDst->Node.keyEnd = RES_GET_HANDLE(pMemoryDst); + pMemoryDst->Node.Data = pMemoryDst; + + status = btreeInsert(&pMemoryDst->Node, &pDstDevice->DevMemoryTable); + if (status != NV_OK) + goto done; + + { + OBJGPU *pGpu = pDstGpu; // Need pGpu for SLI loop + + gpuSetThreadBcState(pDstGpu, NV_TRUE); + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + if (memdescGetPageSize(memdescGetMemDescFromGpu(pMemoryDst->pMemDesc, pGpu), AT_GPU) == 0) + { + status = memmgrSetMemDescPageSize_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), + memdescGetMemDescFromGpu(pMemoryDst->pMemDesc, pGpu), + AT_GPU, RM_ATTR_PAGE_SIZE_DEFAULT); + NV_ASSERT(status == NV_OK); + } + SLI_LOOP_END + } + + // + // ref-count increments for shared structs after all places where we + // could return early. + // + if (pMemoryDst->pHwResource != NULL) + pMemoryDst->pHwResource->refCount++; + + memdescAddRef(pMemoryDst->pMemDesc); + pMemoryDst->pMemDesc->DupCount++; + if (pMemoryDst->pMemDesc->Allocated) + pMemoryDst->pMemDesc->Allocated++; + + // Insert pMemoryDst after pMemorySrc in circular list to track dup/sharing of pMemDesc + pMemoryDst->dupListItem.pNext = pMemorySrc->dupListItem.pNext; + pMemoryDst->dupListItem.pPrev = pMemorySrc; + pMemorySrc->dupListItem.pNext = pMemoryDst; + pMemoryDst->dupListItem.pNext->dupListItem.pPrev = pMemoryDst; + +done: + + // If the original allocation was RPCed, also send the Dup. + if (pMemory->bRpcAlloc && (IS_VIRTUAL(pSrcGpu) || IS_GSP_CLIENT(pSrcGpu))) + { + NV_RM_RPC_DUP_OBJECT(pSrcGpu, pDstClient->hClient, pDstParentRef->hResource, pDstRef->hResource, + pSrcClient->hClient, pSrcRef->hResource, 0, + NV_FALSE, // do not automatically issue RPC_FREE on object free + NULL, + status); + NV_ASSERT(status == NV_OK); + } + + // UNLOCK: release GPUs lock + if (bReleaseGpuLock) + { + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + pMemory->bConstructed = (status == NV_OK); + return status; +} + +NV_STATUS +memGetMemInterMapParams_IMPL +( + Memory *pMemory, + RMRES_MEM_INTER_MAP_PARAMS *pParams +) +{ + OBJGPU *pGpu = pParams->pGpu; + RsResourceRef *pMemoryRef = pParams->pMemoryRef; + + MEMORY_DESCRIPTOR *pSrcMemDesc = pMemory->pMemDesc; + Device *pDevice; + Subdevice *pSubdevice; + NvBool bcState = gpumgrGetBcEnabledStatus(pGpu); + + // Don't expect to use default, but safe thing to do is set src=dest + NvHandle hMemoryDevice = 0; + OBJGPU *pSrcGpu = pGpu; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemory)); + + if (pMemoryRef->pParentRef != NULL) + { + pDevice = dynamicCast(pMemoryRef->pParentRef->pResource, Device); + if (pDevice != NULL) + { + pSrcGpu = GPU_RES_GET_GPU(pDevice); + hMemoryDevice = RES_GET_HANDLE(pDevice); + GPU_RES_SET_THREAD_BC_STATE(pDevice); + } + else + { + pSubdevice = dynamicCast(pMemoryRef->pParentRef->pResource, Subdevice); + if (pSubdevice != NULL) + { + pSrcGpu = GPU_RES_GET_GPU(pSubdevice); + hMemoryDevice = RES_GET_HANDLE(pSubdevice->pDevice); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + } + } + } + + pParams->pSrcGpu = pSrcGpu; + pParams->hMemoryDevice = hMemoryDevice; + + // + // Restore pGpu's bcState in case it was overwritten above (i.e., + // the case that hMemoryDevice and hBroadcastDevice are the same + // device, but a unicast mapping was desired). + // + gpumgrSetBcEnabledStatus(pGpu, bcState); + + // + // Mapping Guest allocated memory in PF is not supported + // + if (pSrcMemDesc->pGpu != pGpu && gpuIsSriovEnabled(pGpu) && + !(memdescGetFlag(pSrcMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED))) + { + // + // Memory allocated by pSrcMemDesc->pGpu needs to be + // remapped for pGpu as requested by client. + // + pParams->bDmaMapNeeded = NV_TRUE; + } + + pParams->pSrcMemDesc = pSrcMemDesc; + + return NV_OK; +} + +NV_STATUS +memGetMemoryMappingDescriptor_IMPL +( + Memory *pMemory, + MEMORY_DESCRIPTOR **ppMemDesc +) +{ + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemory)); + if (pMemory->pGpu != NULL) + { + *ppMemDesc = memdescGetMemDescFromGpu(pMemory->pMemDesc, pMemory->pGpu); + } + else + { + *ppMemDesc = pMemory->pMemDesc; + } + return NV_OK; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h new file mode 100644 index 0000000..cc0253c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _MM_INTERNAL_H_ +#define _MM_INTERNAL_H_ + +// +// Don't define deprecated definitions for RM MM implementations +// +#define RM_STRICT_SUPPRESS_DEPRECATED_DEFINITIONS_VER_JAN_21_2020 + +// +// MM API runs within VGPU guest/GSP client. Don't allow direct access to +// physical engine objects/definitions. +// +#define RM_STRICT_SUPPRESS_PHYSICAL_DEFINITIONS_VER_JAN_21_2020 + +#endif // _MM_INTERNAL_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c new file mode 100644 index 0000000..42dd00c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c @@ -0,0 +1,199 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr_internal.h" +#include "mem_mgr/os_desc_mem.h" +#include "rmapi/client.h" +#include "rmapi/mapping_list.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" +#include "gpu/device/device.h" +#include "vgpu/rpc.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "deprecated/rmapi_deprecated.h" + +#include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR + +NV_STATUS +osdescConstruct_IMPL +( + OsDescMemory *pOsDescMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + Memory *pMemory = staticCast(pOsDescMemory, Memory); + NV_OS_DESC_MEMORY_ALLOCATION_PARAMS *pUserParams; + OBJGPU *pGpu = pMemory->pGpu; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NV_STATUS status; + NvU64 limit; + NvU32 os02Flags; + NvHandle hClient = pCallContext->pClient->hClient; + NvHandle hParent = pCallContext->pResourceRef->pParentRef->hResource; + NvHandle hMemory = pCallContext->pResourceRef->hResource; + + // Copy-construction has already been done by the base Memory class + if (RS_IS_COPY_CTOR(pRmAllocParams)) + return NV_OK; + + pUserParams = pRmAllocParams->pAllocParams; + + limit = pUserParams->limit; + + // + // Bug 860684: osCreateMemFromOsDescriptor expects OS02 flags + // from the old NvRmAllocMemory64() interface so we need to + // translate the OS32_ATTR flags to OS02 flags. + // + status = RmDeprecatedConvertOs32ToOs02Flags(pUserParams->attr, + pUserParams->attr2, + pUserParams->flags, + &os02Flags); + + if (status != NV_OK) + { + return status; + } + + // Only kernel user is allowed to register physical address with RM + if (pUserParams->descriptorType == NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR) + { + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + { + return NV_ERR_NOT_SUPPORTED; + } + } + + if (pUserParams->descriptorType == NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY) + { + // + // We currently allow RmMapMemory on external IO resources which are + // safe to share across processes. For example, NpuResource. + // + // Otherwise we would be affected by the security issues like Bug 1630288. + // + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _MAPPING, _NEVER_MAP, os02Flags); + + // + // Force peerMappingOverride check for IO memory registration through + // RmVidHeapCtrl. See Bug 1630288 "[PeerSync] threat related to GPU.." for + // more details. + // + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _PEER_MAP_OVERRIDE, _REQUIRED, os02Flags); + } + + // + // Create and fill in the memory descriptor based on the current + // state of the OS descriptor. + // + status = osCreateMemFromOsDescriptor(pGpu, + pUserParams->descriptor, + hClient, + os02Flags, + &limit, + &pMemDesc, + pUserParams->descriptorType, + pRmAllocParams->pSecInfo->privLevel); + + if (status != NV_OK) + { + return status; + } + + if (pMemoryManager->bAllowSysmemHugePages && pMemDesc->bForceHugePages) + { + pUserParams->attr = DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _HUGE); + pUserParams->attr2 = DRF_DEF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _DEFAULT); + } + + status = memConstructCommon(pMemory, NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, pUserParams->flags, + pMemDesc, 0, NULL, pUserParams->attr, pUserParams->attr2, 0, 0, + pUserParams->tag, (HWRESOURCE_INFO *)NULL); + + if (status == NV_OK) + { + RsResourceRef *pResourceRef = RES_GET_REF(pMemory); + RsCpuMapping *pCpuMapping = NULL; + NvU32 flags = 0; + flags = FLD_SET_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, flags); + RS_CPU_MAP_PARAMS dummyParams; + portMemSet(&dummyParams, 0, sizeof(dummyParams)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + refAddMapping(pResourceRef, &dummyParams, pResourceRef->pParentRef, &pCpuMapping)); + + NV_ASSERT_OK_OR_RETURN(CliUpdateMemoryMappingInfo(pCpuMapping, + pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL, + pUserParams->descriptor, NvP64_NULL, + limit+1, flags)); + pCpuMapping->pPrivate->pGpu = pGpu; + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + if (IS_VIRTUAL(pGpu)) + { + NV_RM_RPC_ALLOC_MEMORY(pGpu, + hClient, + hParent, + hMemory, + NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + os02Flags, + pMemDesc, + status); + if (status == NV_OK) + pMemory->bRpcAlloc = NV_TRUE; + + } + } + + // + // RM support for MODS PTE kind in external allocations + // bug 1858656 + // + + // failure case + if (status != NV_OK) + { + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + } + + return status; +} + +NvBool +osdescCanCopy_IMPL +( + OsDescMemory *pOsDescMemory +) +{ + return RMCFG_FEATURE_PLATFORM_UNIX; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/standard_mem.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/standard_mem.c new file mode 100644 index 0000000..e145d9e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/standard_mem.c @@ -0,0 +1,240 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr_internal.h" +#include "mem_mgr/standard_mem.h" +#include "vgpu/rpc.h" +#include "rmapi/client.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/device/device.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "resserv/rs_server.h" +#include "rmapi/rs_utils.h" + +NV_STATUS stdmemValidateParams +( + OBJGPU *pGpu, + NvHandle hClient, + NV_MEMORY_ALLOCATION_PARAMS *pAllocData +) +{ + NvBool bIso; + RS_PRIV_LEVEL privLevel; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + privLevel = pCallContext->secInfo.privLevel; + + // + // Make sure UMD does not impact the internal allocation flags + // Do this check right after copy in. RM is free to set these flags later + // + if ((privLevel < RS_PRIV_LEVEL_KERNEL) && + (pAllocData->internalflags != 0)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // These flags don't do anything in this path. No mapping on alloc and + // kernel map is controlled by TYPE + // + pAllocData->flags |= NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED; + pAllocData->flags &= ~NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP; + + pAllocData->address = NvP64_NULL; + + bIso = (pAllocData->type == NVOS32_TYPE_PRIMARY) || + (pAllocData->type == NVOS32_TYPE_VIDEO) || + (pAllocData->type == NVOS32_TYPE_CURSOR); + + // + // MM-TODO: If surface requires ISO guarantees, ensure it's of the proper + // NVOS32_TYPE. Eventually, we should decouple NVOS32_TYPE from conveying + // ISO behavior; RM needs to audit NVOS32_TYPE uses wrt ISO determination. + // + if (!bIso && FLD_TEST_DRF(OS32, _ATTR2, _ISO, _YES, pAllocData->attr2)) + { + NV_PRINTF(LEVEL_INFO, "type is non-ISO but attributes request ISO!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // + // check PAGE_OFFLINING flag for client + // If the client is not a ROOT client, then turning PAGE_OFFLINIG OFF is invalid + // + if (FLD_TEST_DRF(OS32, _ATTR2, _PAGE_OFFLINING, _OFF, pAllocData->attr2)) + { + { + // if the client requesting is not kernel mode, return early +#if defined(DEBUG) || defined(DEVELOP) || defined(NV_VERIF_FEATURES) + if (!rmclientIsAdminByHandle(hClient, privLevel)) +#else + if (privLevel < RS_PRIV_LEVEL_KERNEL) +#endif + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + } + + // + // If NVOS32_TYPE indicates ISO requirements, set + // NVOS32_ATTR2_NISO_DISPLAY_YES so it can be used within RM instead of + // NVOS32_TYPE for ISO determination. + // + if (bIso) + { + pAllocData->attr2 = FLD_SET_DRF(OS32, _ATTR2, _ISO, _YES, + pAllocData->attr2); + } + + if (!(pAllocData->flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END)) + { + NV_ASSERT_OR_RETURN((pAllocData->rangeLo == 0) && + (pAllocData->rangeHi == 0), NV_ERR_INVALID_ARGUMENT); + } + NV_PRINTF(LEVEL_INFO, "MMU_PROFILER Attr 0x%x Type 0x%x Attr2 0x%x\n", + pAllocData->attr, pAllocData->type, pAllocData->attr2); + + // Make sure that encryption is supported if it is requested + if ((pAllocData->flags & NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED) && + DRF_VAL(OS32, _ATTR, _LOCATION, pAllocData->attr) == NVOS32_ATTR_LOCATION_VIDMEM) + { + NV_PRINTF(LEVEL_ERROR, + "Encryption requested for video memory on a non-0FB chip;\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + if (FLD_TEST_DRF(OS32, _ATTR2, _ALLOCATE_FROM_SUBHEAP, _YES, pAllocData->attr2)) + { + NV_CHECK_OR_RETURN(LEVEL_ERROR, FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, pAllocData->attr), + NV_ERR_INVALID_ARGUMENT); + } + + return NV_OK; +} + +void stdmemDumpInputAllocParams +( + NV_MEMORY_ALLOCATION_PARAMS *pAllocData, + CALL_CONTEXT *pCallContext +) +{ + NV_PRINTF(LEVEL_INFO, "stdmemConstruct input\n"); + NV_PRINTF(LEVEL_INFO, " Owner: 0x%x\n", pAllocData->owner); + NV_PRINTF(LEVEL_INFO, " hMemory: 0x%x\n", pCallContext->pResourceRef->hResource); + NV_PRINTF(LEVEL_INFO, " Type: 0x%x\n", pAllocData->type); + NV_PRINTF(LEVEL_INFO, " Flags: 0x%x\n", pAllocData->flags); + NV_PRINTF(LEVEL_INFO, " Begin: 0x%08llx\n", pAllocData->rangeLo); + NV_PRINTF(LEVEL_INFO, " End: 0x%08llx\n", pAllocData->rangeHi); + NV_PRINTF(LEVEL_INFO, " Height: 0x%x\n", pAllocData->height); + NV_PRINTF(LEVEL_INFO, " Width: 0x%x\n", pAllocData->width); + NV_PRINTF(LEVEL_INFO, " Pitch: 0x%x\n", pAllocData->pitch); + NV_PRINTF(LEVEL_INFO, " Size: 0x%08llx\n", pAllocData->size); + NV_PRINTF(LEVEL_INFO, " Alignment: 0x%08llx\n", pAllocData->alignment); + NV_PRINTF(LEVEL_INFO, " Offset: 0x%08llx\n", pAllocData->offset); + NV_PRINTF(LEVEL_INFO, " Attr: 0x%x\n", pAllocData->attr); + NV_PRINTF(LEVEL_INFO, " Attr2: 0x%x\n", pAllocData->attr2); + NV_PRINTF(LEVEL_INFO, " Format: 0x%x\n", pAllocData->format); + NV_PRINTF(LEVEL_INFO, " ComprCovg: 0x%x\n", pAllocData->comprCovg); + NV_PRINTF(LEVEL_INFO, " ZCullCovg: 0x%x\n", pAllocData->zcullCovg); + NV_PRINTF(LEVEL_INFO, " CtagOffset: 0x%x\n", pAllocData->ctagOffset); + NV_PRINTF(LEVEL_INFO, " hVASpace: 0x%x\n", pAllocData->hVASpace); + NV_PRINTF(LEVEL_INFO, " tag: 0x%x\n", pAllocData->tag); +} + +void stdmemDumpOutputAllocParams +( + NV_MEMORY_ALLOCATION_PARAMS *pAllocData +) +{ + NV_PRINTF(LEVEL_INFO, "stdmemConstruct output\n"); + NV_PRINTF(LEVEL_INFO, " Height: 0x%x\n", pAllocData->height); + NV_PRINTF(LEVEL_INFO, " Width: 0x%x\n", pAllocData->width); + NV_PRINTF(LEVEL_INFO, " Pitch: 0x%x\n", pAllocData->pitch); + NV_PRINTF(LEVEL_INFO, " Size: 0x%08llx\n", pAllocData->size); + NV_PRINTF(LEVEL_INFO, " Alignment: 0x%08llx\n", pAllocData->alignment); + NV_PRINTF(LEVEL_INFO, " Offset: 0x%08llx\n", pAllocData->offset); + NV_PRINTF(LEVEL_INFO, " Attr: 0x%x\n", pAllocData->attr); + NV_PRINTF(LEVEL_INFO, " Attr2: 0x%x\n", pAllocData->attr2); + NV_PRINTF(LEVEL_INFO, " Format: 0x%x\n", pAllocData->format); + NV_PRINTF(LEVEL_INFO, " ComprCovg: 0x%x\n", pAllocData->comprCovg); + NV_PRINTF(LEVEL_INFO, " ZCullCovg: 0x%x\n", pAllocData->zcullCovg); +} + +NV_STATUS +stdmemConstruct_IMPL +( + StandardMemory *pStandardMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + + +NvBool stdmemCanCopy_IMPL(StandardMemory *pStandardMemory) +{ + return NV_TRUE; +} + +/*! + * stdmemQueryPageSize + * + * @brief + * Returns page size requested by client. + * + * @param[in] pMemoryManager MemoryManager pointer + * @param[in] hClient Client handle. + * @param[in] pAllocData Pointer to VIDHEAP_ALLOC_DATA + * + * @returns + * The page size in bytes. + */ +NvU32 +stdmemQueryPageSize +( + MemoryManager *pMemoryManager, + NvHandle hClient, + NV_MEMORY_ALLOCATION_PARAMS *pAllocData +) +{ + NvU32 retAttr = pAllocData->attr; + NvU32 retAttr2 = pAllocData->attr2; + + return memmgrDeterminePageSize(pMemoryManager, hClient, pAllocData->size, + pAllocData->format, pAllocData->flags, &retAttr, &retAttr2); +} + +// +// Control calls for system memory objects maintained outside the heap. +// + +NvU32 stdmemGetSysmemPageSize_IMPL(OBJGPU * pGpu, StandardMemory *pStdMemory) +{ + return GPU_GET_MEMORY_MANAGER(pGpu)->sysmemPageSize; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c new file mode 100644 index 0000000..3f2633a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c @@ -0,0 +1,121 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "mem_mgr_internal.h" +#include "mem_mgr/syncpoint_mem.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" +#include "gpu/device/device.h" +#include "rmapi/client.h" + +#include "class/cl00c3.h" // NV01_MEMORY_SYNCPOINT + +NV_STATUS +syncpointConstruct_IMPL +( + SyncpointMemory *pSyncpointMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + Memory *pMemory = staticCast(pSyncpointMemory, Memory); + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvP64 physAddr = NvP64_NULL; + NvU64 syncPointBase = 0; + NvU64 limit = 0; + NvU32 offset = 0; + NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS *pAllocParams = pRmAllocParams->pAllocParams; + NV_STATUS status = NV_OK; + OBJGPU *pGpu = pMemory->pGpu; + + if (pRmAllocParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, "%s On Tegra, only root(admin)/kernel clients are allowed\n", __FUNCTION__); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // OS get sync-point aperture address. + status = osGetSyncpointAperture(pGpu->pOsGpuInfo, pAllocParams->syncpointId, &syncPointBase, &limit, &offset); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to get syncpoint aperture %x\n", status); + return status; + } + + physAddr = (NvP64)(syncPointBase + offset); + + NvU32 os02Flags = + DRF_DEF(OS02, _FLAGS, _MAPPING, _NO_MAP) + | DRF_DEF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS) + | DRF_DEF(OS02, _FLAGS, _ALLOC_TYPE_SYNCPOINT, _APERTURE) + | DRF_DEF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES); + + status = osCreateMemFromOsDescriptor(pGpu, + physAddr, + pCallContext->pClient->hClient, + os02Flags, + &limit, + &pMemDesc, + NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR, + RS_PRIV_LEVEL_KERNEL); // Physical address is obtained using osGetSyncpointAperture, Overriding the privlevel here to KERNEL. + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to import syncpoint memory %x\n", status); + return status; + } + + status = memConstructCommon(pMemory, + NV01_MEMORY_SYNCPOINT, + 0, // pUserParams->flags + pMemDesc, + 0, + NULL, + 0, // pUserParams->attr + 0, // pUserParams->attr2 + 0, + 0, + NVOS32_MEM_TAG_NONE, + NULL); + + + // failure case + if (status != NV_OK) + { + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + } + + return status; +} + +NvBool +syncpointCanCopy_IMPL +( + SyncpointMemory *pSyncpointMemory +) +{ + return NV_TRUE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/system_mem.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/system_mem.c new file mode 100644 index 0000000..5faf27b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/system_mem.c @@ -0,0 +1,570 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr/system_mem.h" +#include "vgpu/rpc.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "core/locks.h" +#include "os/os.h" +#include "rmapi/client.h" +#include "deprecated/rmapi_deprecated.h" +#include "gpu/mem_mgr/mem_utils.h" +#include "core/system.h" + +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM + +/*! + * sysmemConstruct + * + * @brief + * This routine provides common allocation services used by the + * following heap allocation functions: + * NVOS32_FUNCTION_ALLOC_DEPTH_WIDTH_HEIGHT + * NVOS32_FUNCTION_ALLOC_SIZE + * NVOS32_FUNCTION_ALLOC_SIZE_RANGE + * NVOS32_FUNCTION_ALLOC_TILED_PITCH_HEIGHT + * + * @param[in] pSystemMemory Pointer to SystemMemory object + * @param[in] pCallContext Pointer to the current CALL_CONTEXT. + * @param[in] pParams Pointer to the alloc params + * + * @return 'NV_OK' + * Operation completed successfully. + * @return 'NV_ERR_NO_MEMORY' + * There is not enough available memory to satisfy allocation request. + * @return 'NV_ERR_INSUFFICIENT_RESOURCES' + * Not enough available resources to satisfy allocation request. + */ +NV_STATUS +sysmemConstruct_IMPL +( + SystemMemory *pSystemMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + Memory *pMemory = staticCast(pSystemMemory, Memory); + NV_MEMORY_ALLOCATION_PARAMS *pAllocData = pParams->pAllocParams; + MEMORY_ALLOCATION_REQUEST allocRequest = {0}; + MEMORY_ALLOCATION_REQUEST *pAllocRequest = &allocRequest; + OBJGPU *pGpu = pMemory->pGpu; + HWRESOURCE_INFO hwResource; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NvU32 gpuCacheAttrib; + NV_STATUS rmStatus = NV_OK; + NvHandle hClient = pCallContext->pClient->hClient; + NvHandle hParent = pCallContext->pResourceRef->pParentRef->hResource; + NvU64 sizeOut; + NvU64 offsetOut; + MEMORY_DESCRIPTOR *pMemDesc; + NvU32 Cache; + NvU32 flags; + StandardMemory *pStdMemory = staticCast(pSystemMemory, StandardMemory); + + // Copy-construction has already been done by the base Memory class + if (RS_IS_COPY_CTOR(pParams)) + return NV_OK; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, stdmemValidateParams(pGpu, hClient, pAllocData)); + NV_CHECK_OR_RETURN(LEVEL_ERROR, + DRF_VAL(OS32, _ATTR, _LOCATION, pAllocData->attr) != NVOS32_ATTR_LOCATION_VIDMEM && + !(pAllocData->flags & NVOS32_ALLOC_FLAGS_VIRTUAL), + NV_ERR_INVALID_ARGUMENT); + + stdmemDumpInputAllocParams(pAllocData, pCallContext); + + // send it through the regular allocator even though it is for sysmem + pAllocRequest->classNum = NV01_MEMORY_SYSTEM; + pAllocRequest->pUserParams = pAllocData; + pAllocRequest->hMemory = pResourceRef->hResource; + pAllocRequest->hClient = hClient; + pAllocRequest->hParent = hParent; + pAllocRequest->pGpu = pGpu; + pAllocRequest->internalflags = NVOS32_ALLOC_INTERNAL_FLAGS_CLIENTALLOC; + pAllocRequest->pHwResource = &hwResource; + + // Unsure if we need to keep separate copies, but keeping old behavior for now. + sizeOut = pAllocData->size; + offsetOut = pAllocData->offset; + + { + } + + rmStatus = sysmemInitAllocRequest(pGpu, pSystemMemory, pAllocRequest); + + if (rmStatus != NV_OK) + goto failed; + + NV_ASSERT(pAllocRequest->pMemDesc); + pMemDesc = pAllocRequest->pMemDesc; + + // Copy final heap size/offset back to client struct + // + // What should we return ?. System or the Device physical address. + // Return the Device physical address for now. + // May change with the heap refactoring !. + // + // System and Device physical address can be got using the nv0041CtrlCmdGetSurfacePhysAttr ctrl call + offsetOut = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + sizeOut = pMemDesc->Size; + pAllocData->limit = sizeOut - 1; + + // + // For system memory default to GPU uncached. GPU caching is different from + // the expected default memory model since it is not coherent. Clients must + // understand this an handle any coherency requirements explicitly. + // + if (DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, pAllocData->attr2) == + NVOS32_ATTR2_GPU_CACHEABLE_DEFAULT) + { + pAllocData->attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, + pAllocData->attr2); + } + + if (DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, pAllocData->attr2) == + NVOS32_ATTR2_GPU_CACHEABLE_YES) + { + gpuCacheAttrib = NV_MEMORY_CACHED; + } + else + { + gpuCacheAttrib = NV_MEMORY_UNCACHED; + } + + if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_UNCACHED) + Cache = NV_MEMORY_UNCACHED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_CACHED) + Cache = NV_MEMORY_CACHED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_WRITE_COMBINE) + Cache = NV_MEMORY_WRITECOMBINED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_WRITE_THROUGH) + Cache = NV_MEMORY_CACHED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_WRITE_PROTECT) + Cache = NV_MEMORY_CACHED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_WRITE_BACK) + Cache = NV_MEMORY_CACHED; + else + Cache = 0; + + ct_assert(NVOS32_ATTR_COHERENCY_UNCACHED == NVOS02_FLAGS_COHERENCY_UNCACHED); + ct_assert(NVOS32_ATTR_COHERENCY_CACHED == NVOS02_FLAGS_COHERENCY_CACHED); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_COMBINE == NVOS02_FLAGS_COHERENCY_WRITE_COMBINE); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_THROUGH == NVOS02_FLAGS_COHERENCY_WRITE_THROUGH); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_PROTECT == NVOS02_FLAGS_COHERENCY_WRITE_PROTECT); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_BACK == NVOS02_FLAGS_COHERENCY_WRITE_BACK); + + flags = DRF_DEF(OS02, _FLAGS, _LOCATION, _PCI) | + DRF_DEF(OS02, _FLAGS, _MAPPING, _NO_MAP) | + DRF_NUM(OS02, _FLAGS, _COHERENCY, DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr)); + + NV_ASSERT(memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM); + memdescSetCpuCacheAttrib(pMemDesc, Cache); + + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_FALSE); + + if (pAllocData->flags & NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED) + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_ENCRYPTED, NV_TRUE); + + if (FLD_TEST_DRF(OS32, _ATTR2, _NISO_DISPLAY, _YES, pAllocData->attr2)) + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO, NV_TRUE); + + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_SYSMEM_OWNED_BY_CLIENT, NV_TRUE); + + memdescSetGpuCacheAttrib(pMemDesc, gpuCacheAttrib); + + rmStatus = memdescAlloc(pMemDesc); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "*** Cannot allocate sysmem through fb heap\n"); + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + goto failed; + } + + // ClientDB can set the pagesize for memdesc. + // With GPU SMMU mapping, this needs to be set on the SMMU memdesc. + // So SMMU allocation should happen before memConstructCommon() + // Eventually SMMU allocation will be part of memdescAlloc(). + + // An SMMU mapping will be added to SYSMEM allocations in the following cases: + // 1. BIG page allocations with non-contiguous SYSMEM in Tegra. + // 2. RM clients forcing SMMU mapping via flags. + // GPU Arch verification with VPR is one such usecase. + // + // fbAlloc_GF100() will set the page size attribute to BIG for these cases. + + if (FLD_TEST_DRF(OS32, _ATTR2, _SMMU_ON_GPU, _ENABLE, pAllocData->attr2)) + { + NV_PRINTF(LEVEL_ERROR, "SMMU mapping allocation is not supported.\n"); + NV_ASSERT(0); + rmStatus = NV_ERR_NOT_SUPPORTED; + + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + goto failed; + } + else if ((FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _BIG, pAllocData->attr) || + FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, pAllocData->attr)) && + FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, pAllocData->attr) && + (stdmemGetSysmemPageSize_HAL(pGpu, pStdMemory) == RM_PAGE_SIZE)) + { + NV_PRINTF(LEVEL_ERROR, + "Non-contiguous allocation not supported where requested page size is larger than sysmem page size.\n"); + NV_ASSERT(0); + rmStatus = NV_ERR_NOT_SUPPORTED; + + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + goto failed; + } + + rmStatus = memConstructCommon(pMemory, pAllocRequest->classNum, flags, pMemDesc, 0, + NULL, pAllocData->attr, pAllocData->attr2, 0, 0, + pAllocData->tag, &hwResource); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "*** Cannot add symem through fb heap to client db\n"); + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + goto failed; + } + + // + // We need to force a kernel mapping of system memory-backed notifiers + // allocated in this path. + // + if (pAllocData->type == NVOS32_TYPE_NOTIFIER) + { + rmStatus = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_FALSE); + if (rmStatus != NV_OK) + { + memDestructCommon(pMemory); + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + goto failed; + } + } + + if (IS_VIRTUAL(pGpu)) + { + NvU32 os02Flags; + NvU32 os32Flags = pAllocData->flags; + + // NVOS32_TYPE_NOTIFIER notifier indicates kernel mapping in this path + if (pAllocData->type == NVOS32_TYPE_NOTIFIER) + os32Flags |= NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP; + + // + // Calculate os02flags as VGPU plugin allocates sysmem with legacy + // RmAllocMemory API + // + rmStatus = RmDeprecatedConvertOs32ToOs02Flags(pAllocData->attr, + pAllocData->attr2, + os32Flags, + &os02Flags); + + if (rmStatus == NV_OK) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + NV_RM_RPC_ALLOC_MEMORY(pGpu, + hClient, + hParent, + pAllocRequest->hMemory, + pAllocRequest->classNum, + os02Flags, + pMemDesc, + rmStatus); + } + + if (rmStatus != NV_OK) + { + memDestructCommon(pMemory); + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + goto failed; + } + + pMemory->bRpcAlloc = NV_TRUE; + } + + pAllocData->size = sizeOut; + pAllocData->offset = offsetOut; + + stdmemDumpOutputAllocParams(pAllocData); + +failed: + return rmStatus; +} + +NV_STATUS +sysmemCtrlCmdGetSurfaceNumPhysPages_IMPL +( + SystemMemory *pSystemMemory, + NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS *pParams +) +{ + Memory *pMemory = staticCast(pSystemMemory, Memory); + NV_STATUS status; + + NV_ASSERT_OR_RETURN(memdescGetAddressSpace(pMemory->pMemDesc) == ADDR_SYSMEM, NV_ERR_NOT_SUPPORTED); + + status = osGetNumMemoryPages(pMemory->pMemDesc, + &pParams->numPages); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get sysmem pages\n"); + } + + return status; +} + +NV_STATUS +sysmemCtrlCmdGetSurfacePhysPages_IMPL +( + SystemMemory *pSystemMemory, + NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS *pParams +) +{ + Memory *pMemory = staticCast(pSystemMemory, Memory); + NV_STATUS status; + + NV_ASSERT_OR_RETURN(memdescGetAddressSpace(pMemory->pMemDesc) == ADDR_SYSMEM, NV_ERR_NOT_SUPPORTED); + + status = osGetMemoryPages(pMemory->pMemDesc, + NvP64_VALUE(pParams->pPages), + &pParams->numPages); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get sysmem pages\n"); + } + + return status; +} + +NV_STATUS +sysmemInitAllocRequest_SOC +( + OBJGPU *pGpu, + SystemMemory *pSystemMemory, + MEMORY_ALLOCATION_REQUEST *pAllocRequest +) +{ + NV_MEMORY_ALLOCATION_PARAMS *pAllocParams = pAllocRequest->pUserParams; + NV_STATUS status = NV_OK; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvBool bAllocedMemDesc = NV_FALSE; + NvBool bContig = NV_TRUE; + + // Check for valid size. + NV_CHECK_OR_RETURN(LEVEL_ERROR, pAllocParams->size != 0, NV_ERR_INVALID_ARGUMENT); + + // Ensure a valid allocation pAllocParams->type was passed in + NV_CHECK_OR_RETURN(LEVEL_ERROR,(pAllocParams->type < NVOS32_NUM_MEM_TYPES), NV_ERR_INVALID_ARGUMENT); + + // If vidmem not requested explicitly, decide on the physical location. + if (FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _PCI, pAllocParams->attr) || + FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _ANY, pAllocParams->attr)) + { + + pAllocParams->attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, + _PCI, pAllocParams->attr); + } + + if (pAllocParams->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + NV_PRINTF(LEVEL_ERROR, + "Fixed allocation on sysmem not allowed.\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + CLEAR_HAL_ATTR(pAllocParams->attr) + CLEAR_HAL_ATTR2(pAllocParams->attr2) + + bContig = FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, pAllocParams->attr); + + // Allocate a memory descriptor if needed. + if (pAllocRequest->pMemDesc == NULL) + { + NV_ASSERT_OK_OR_GOTO(status, + memdescCreate(&pAllocRequest->pMemDesc, pGpu, pAllocParams->size, 0, + bContig, ADDR_SYSMEM, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE), + failed); + bAllocedMemDesc = NV_TRUE; + } + + pMemDesc = pAllocRequest->pMemDesc; + + // Set attributes tracked by the memdesc + memdescSetPteKind(pMemDesc, pAllocParams->format); + memdescSetHwResId(pMemDesc, 0); // hwResId is 0. + + // update contiguity attribute to reflect memdesc + if (memdescGetContiguity(pAllocRequest->pMemDesc, AT_GPU)) + { + pAllocParams->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _CONTIGUOUS, pAllocParams->attr); + } + else + { + pAllocParams->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _NONCONTIGUOUS, pAllocParams->attr); + } + + pAllocParams->offset = ~0; + + return NV_OK; + +failed: + if (bAllocedMemDesc) + { + memdescDestroy(pAllocRequest->pMemDesc); + pAllocRequest->pMemDesc = NULL; + } + + return status; +} + +NV_STATUS +sysmemAllocResources +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + FB_ALLOC_INFO *pFbAllocInfo, + SystemMemory *pSystemMemory +) +{ + NV_STATUS status = NV_OK; + NV_MEMORY_ALLOCATION_PARAMS *pVidHeapAlloc = pAllocRequest->pUserParams; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvBool bAllocedMemDesc = NV_FALSE; + NvBool bContig = FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, + _CONTIGUOUS, pVidHeapAlloc->attr); + // + // BUG 3506666 + // While replaying a trace, it is possible for the playback OS to have a smaller page size + // than the capture OS so if we're running a replay where the requested page size is larger, + // assume this is a contiguous piece of memory, if contiguity is not specified. + // + if (FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _DEFAULT, pVidHeapAlloc->attr)) + { + if ((FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _BIG, pVidHeapAlloc->attr) || + FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, pVidHeapAlloc->attr)) && + (stdmemGetSysmemPageSize_HAL(pGpu, staticCast(pSystemMemory, StandardMemory)) == RM_PAGE_SIZE)) + { + bContig = NV_TRUE; + } + } + + // + // Check for virtual-only parameters used on physical allocs. + // + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_VIRTUAL_ONLY) + { + NV_PRINTF(LEVEL_ERROR, + "Virtual-only flag used with physical allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + if (FLD_TEST_DRF(OS32, _ATTR2, _32BIT_POINTER, _ENABLE, pVidHeapAlloc->attr2)) + { + NV_PRINTF(LEVEL_ERROR, + "Virtual-only 32-bit pointer attr used with physical allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + if (pVidHeapAlloc->hVASpace != 0) + { + NV_PRINTF(LEVEL_ERROR, + "VA space handle used with physical allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + NV_ASSERT(!(pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_WPR1) && !(pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_WPR2)); + + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + NV_PRINTF(LEVEL_ERROR, + "Expected fixed address allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, memUtilsAllocMemDesc(pGpu, pAllocRequest, pFbAllocInfo, &pMemDesc, NULL, + ADDR_SYSMEM, bContig, &bAllocedMemDesc), failed); + + // get possibly updated surface attributes + pVidHeapAlloc->attr = pFbAllocInfo->retAttr; + pVidHeapAlloc->attr2 = pFbAllocInfo->retAttr2; + + // update contiguity attribute to reflect memdesc + if (memdescGetContiguity(pAllocRequest->pMemDesc, AT_GPU)) + { + pVidHeapAlloc->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _CONTIGUOUS, + pVidHeapAlloc->attr); + } + else + { + pVidHeapAlloc->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _NONCONTIGUOUS, + pVidHeapAlloc->attr); + } + + pVidHeapAlloc->offset = pFbAllocInfo->offset; + + if (pAllocRequest->pHwResource != NULL) + { + pAllocRequest->pHwResource->attr = pFbAllocInfo->retAttr; + pAllocRequest->pHwResource->attr2 = pFbAllocInfo->retAttr2; + pAllocRequest->pHwResource->hwResId = pFbAllocInfo->hwResId; + pAllocRequest->pHwResource->comprCovg = pFbAllocInfo->comprCovg; + pAllocRequest->pHwResource->ctagOffset = pFbAllocInfo->ctagOffset; + pAllocRequest->pHwResource->hwResId = pFbAllocInfo->hwResId; + } + + return NV_OK; + +failed: + + if (bAllocedMemDesc) + { + memdescDestroy(pAllocRequest->pMemDesc); + pAllocRequest->pMemDesc = NULL; + } + + return status; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/vaspace.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/vaspace.c new file mode 100644 index 0000000..a9b851c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/vaspace.c @@ -0,0 +1,283 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* Virtual Address Space Function Definitions. * +\***************************************************************************/ + + +#include "mem_mgr/vaspace.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "rmapi/rs_utils.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" + +void +vaspaceIncRefCnt_IMPL(OBJVASPACE *pVAS) +{ + pVAS->refCnt++; +} + +void +vaspaceDecRefCnt_IMPL(OBJVASPACE *pVAS) +{ + NV_ASSERT_OR_RETURN_VOID(pVAS->refCnt != 0); + pVAS->refCnt--; +} + +NV_STATUS +vaspaceFillAllocParams_IMPL +( + OBJVASPACE *pVAS, + const FB_ALLOC_INFO *pAllocInfo, + NvU64 *pSize, + NvU64 *pAlign, + NvU64 *pRangeLo, + NvU64 *pRangeHi, + NvU64 *pPageSizeLockMask, + VAS_ALLOC_FLAGS *pFlags +) +{ + NvBool bRestrictedVaRange = NV_FALSE; + NvBool bEnforce32bitPtr = NV_FALSE; + NvU32 vasFlags = vaspaceGetFlags(pVAS); + + // Apply default alignment policies to offset alignment and size. + NV_ASSERT_OK_OR_RETURN( + vaspaceApplyDefaultAlignment(pVAS, pAllocInfo, pAlign, pSize, + pPageSizeLockMask)); + + pFlags->bClientAllocation = !!(pAllocInfo->internalflags & NVOS32_ALLOC_INTERNAL_FLAGS_CLIENTALLOC); + + if (pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + // Fixed address allocation implemented by restricting range. + *pRangeLo = pAllocInfo->offset; + *pRangeHi = pAllocInfo->offset + *pSize - 1; + } + else if (!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END)) + { + // If user didn't specify fixed or restricted range, allow full VAS range. + *pRangeLo = vaspaceGetVaStart(pVAS); + *pRangeHi = vaspaceGetVaLimit(pVAS); + + // + // For MODS we also allow restricting the range to 40 bits by default. + // This is needed for Pascal 49b support where some HW units can only + // access 40b VA. MODS must use range/fixed address allocations to + // get a VA above 40 bits in this mode. + // + if (bRestrictedVaRange && !(vasFlags & VASPACE_FLAGS_FLA)) + { + *pRangeHi = NV_MIN(*pRangeHi, NVBIT64(40) - 1); + } + } + + if (vaspaceIsInternalVaRestricted(pVAS)) // will be true only for MAC's GPUVA. + { + + if (pFlags->bClientAllocation) // client allocations + { + NvU64 partitionRangeLo = 0; + NvU64 partitionRangeHi = 0; + + // If 32 bit enforcement is set, route to the lower va range. + if (FLD_TEST_DRF(OS32, _ATTR2, _32BIT_POINTER, _ENABLE, pAllocInfo->pageFormat->attr2)) + { + partitionRangeLo = vaspaceGetVaStart(pVAS); + partitionRangeHi = NVBIT64(32) - 1; + } + else + { + // route to >4gig + partitionRangeLo = NVBIT64(32); + partitionRangeHi = vaspaceGetVaLimit(pVAS); + } + + // If fixed address is requested - the range should be entirely contained within the partition. + if (pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE || + pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END) // Is this a valid expectation? + { + // Within the 32 bit or 64 bit partition. + if (!(*pRangeLo >= partitionRangeLo && *pRangeHi <=partitionRangeHi)) + { + return NV_ERR_INVALID_PARAMETER; + } + // both use_begin_end and fixed_addr_range will have this flag set + pFlags->bFixedAddressRange = NV_TRUE; + } + else + { + *pRangeLo = partitionRangeLo; + *pRangeHi = partitionRangeHi; + pFlags->bFixedAddressRange = NV_FALSE; + } + } + } + else + { + // + // Handle 32bit pointer requests. 32b pointers are forced below 32b + // on all chips. Non-32b requests are only forced on some chips, + // typically kepler, and only if there are no other address hints. + // + // If requested size cannot be satisfied with range above 4 GB, then relax that + // restriction. + // + if (FLD_TEST_DRF(OS32, _ATTR2, _32BIT_POINTER, _ENABLE, pAllocInfo->pageFormat->attr2)) + { + *pRangeHi = NV_MIN(*pRangeHi, NVBIT64(32) - 1); + } + + else if (bEnforce32bitPtr && + !(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) && + !(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END) && + ((*pRangeHi - *pRangeLo + 1 - *pSize) > NVBIT64(32)) && + !(vasFlags & VASPACE_FLAGS_FLA)) + { + *pRangeLo = NV_MAX(*pRangeLo, NVBIT64(32)); + } + } + + if ((*pRangeHi - *pRangeLo + 1) < *pSize) // Moved the range check here + { + NV_PRINTF(LEVEL_ERROR, + "Requested size 0x%llx more than available range. RangeLo=0x%llx, RangeHi=0x%llx\n", + *pSize, *pRangeLo, *pRangeHi); + NV_ASSERT_OR_RETURN(0, NV_ERR_INSUFFICIENT_RESOURCES); + } + + // Convert flags. + pFlags->bReverse = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN); + + pFlags->bPreferSysmemPageTables = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_PREFER_PTES_IN_SYSMEMORY); + + pFlags->bExternallyManaged = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED); + + pFlags->bLazy = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_LAZY); + + pFlags->bSparse = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_SPARSE); + + // + // The protected flag for kernel allocations is honoured only + // if this is a root client(kernel client). + // + pFlags->bPrivileged = pAllocInfo->bIsKernelAlloc; + + return NV_OK; +} + +NvU64 +vaspaceGetVaStart_IMPL(OBJVASPACE *pVAS) +{ + return pVAS->vasStart; +} + +NvU64 +vaspaceGetVaLimit_IMPL(OBJVASPACE *pVAS) +{ + return pVAS->vasLimit; +} + +void +vaspaceInvalidateTlb_IMPL +( + OBJVASPACE *pVAS, + OBJGPU *pGpu, + VAS_PTE_UPDATE_TYPE type +) +{ + NV_ASSERT(0); +} + +NvBool +vaspaceIsInternalVaRestricted_IMPL(OBJVASPACE *pVAS) +{ + return NV_FALSE; +} + +NV_STATUS +vaspaceGetByHandleOrDeviceDefault_IMPL +( + RsClient *pClient, + NvHandle hDeviceOrSubDevice, + NvHandle hVASpace, + OBJVASPACE **ppVAS +) +{ + NV_STATUS status = NV_OK; + NvHandle _hDeviceOrSubDevice; + Device *pDevice = NULL; + + if (hVASpace == NV01_NULL_OBJECT) + { + if (hDeviceOrSubDevice == 0) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + _hDeviceOrSubDevice = hDeviceOrSubDevice; + } + else + { + NV_PRINTF(LEVEL_ERROR, "Trying to fetch VASpace with VASPACE handle. Not supported \n"); + return NV_ERR_NOT_SUPPORTED; + } + + status = deviceGetByHandle(pClient, _hDeviceOrSubDevice, &pDevice); + if (status != NV_OK) + { + Subdevice *pSubdevice; + + status = subdeviceGetByHandle(pClient, _hDeviceOrSubDevice, &pSubdevice); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Invalid parent handle!\n"); + return status; + } + + pDevice = pSubdevice->pDevice; + } + + // Allocates/Finds VA Space according to the handle type. + if (hVASpace == NV01_NULL_OBJECT) + { + // Check the vaspace mode + if (pDevice->vaMode == NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES) + { + NV_PRINTF(LEVEL_ERROR, + "VA mode %d (PRIVATE) doesn't support allocating an implicit VA space.\n", + pDevice->vaMode); + return NV_ERR_INVALID_STATE; + } + return deviceGetDefaultVASpace(pDevice, ppVAS); + } + + return NV_OK; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c new file mode 100644 index 0000000..a727044 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c @@ -0,0 +1,185 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* * +* Virtual Memory Manager Object Function Definitions. * +* * +\***************************************************************************/ + +#include "mem_mgr/virt_mem_mgr.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/io_vaspace.h" +#include "class/cl00f2.h" // IO_VASPACE_A +#include "class/cl00fc.h" // FABRIC_VASPACE_A + +NV_STATUS +vmmCreateVaspace_IMPL +( + OBJVMM *pVmm, + NvU32 classId, + NvU32 vaspaceId, + NvU32 gpuMask, + NvU64 vaStart, + NvU64 vaLimit, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + OBJVASPACE *pPteSpaceMap, + NvU32 flags, + OBJVASPACE **ppVAS +) +{ + NV_STATUS status = NV_OK; + const NVOC_CLASS_INFO *pClassInfo; + Dynamic *pNewObj; + ADDRESS_TRANSLATION addressTranslation; + + NV_ASSERT_OR_RETURN(ppVAS != NULL, NV_ERR_INVALID_ARGUMENT); + + // + // IOMMU vaspaces may be created for a device before the device itself + // has been created, so there isn't an OBJGPU to get here yet. In these + // cases, the vaspaceId is used to correlate the vaspace with the GPU (it + // is the GPU ID). + // + if (gpuMask == 0) + { + NV_ASSERT_OR_RETURN(IO_VASPACE_A == classId, NV_ERR_INVALID_ARGUMENT); + } + + switch (classId) + { + case IO_VASPACE_A: + addressTranslation = AT_PA; + pClassInfo = RMCFG_MODULE_IOVASPACE ? classInfo(OBJIOVASPACE) : NULL; + // + // For IOMMU vaspace, there is only one per vaspaceID. See if + // vaspace for this vaspaceID already exists, if it does, just increment + // the refcount. + // + if (vmmGetVaspaceFromId(pVmm, vaspaceId, classId, ppVAS) == NV_OK) + { + vaspaceIncRefCnt(*ppVAS); + return NV_OK; + } + break; + default: // Unsupported class + addressTranslation = AT_GPU; + pClassInfo = NULL; + break; + } + + if (pClassInfo == NULL) + { + *ppVAS = NULL; + return NV_ERR_INVALID_CLASS; + } + + status = objCreateDynamic(&pNewObj, pVmm, pClassInfo); + if (NV_OK != status) + return status; + + *ppVAS = dynamicCast(pNewObj, OBJVASPACE); + + (*ppVAS)->addressTranslation = addressTranslation; + (*ppVAS)->vaspaceId = vaspaceId; + (*ppVAS)->gpuMask = gpuMask; + + vaspaceIncRefCnt(*ppVAS); + + status = vaspaceConstruct_(*ppVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); + if (status != NV_OK) + { + vmmDestroyVaspace(pVmm, *ppVAS); + *ppVAS = NULL; + return status; + } + + return status; +} + +void +vmmDestroyVaspace_IMPL +( + OBJVMM *pVmm, + OBJVASPACE *pVAS +) +{ + OBJVASPACE *pTargetVAS = pVAS; + + vaspaceDecRefCnt(pTargetVAS); + + // + // Call the utility routine that does the object deletion when the last + // reference has been destroyed. + // + if (0 == pTargetVAS->refCnt) + { + objDelete(pTargetVAS); + pTargetVAS = NULL; + } +} + +NV_STATUS +vmmGetVaspaceFromId_IMPL +( + OBJVMM *pVmm, + NvU32 vaspaceId, + NvU32 classId, + OBJVASPACE **ppVAS +) +{ + Object *pIter = NULL; + OBJVASPACE *pVAS = NULL; + OBJIOVASPACE *pIOVAS = NULL; + + pIter = objGetChild(staticCast(pVmm, Object)); + while (pIter != NULL) + { + switch (classId) + { + case IO_VASPACE_A: + pIOVAS = dynamicCast(pIter, OBJIOVASPACE); + if (pIOVAS != NULL) + { + pVAS = staticCast(pIOVAS, OBJVASPACE); + } + break; + default: + NV_ASSERT(0); + break; + } + + if ((pVAS != NULL) && (pVAS->vaspaceId == vaspaceId)) + { + *ppVAS = pVAS; + return NV_OK; + } + + pIter = objGetSibling(pIter); + } + + *ppVAS = NULL; + return NV_ERR_OBJECT_NOT_FOUND; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_init.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_init.c new file mode 100644 index 0000000..42959b4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_init.c @@ -0,0 +1,582 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* * +* Common Operating System Object Function Pointer Initializations. * +* All the function pointers in the OS object are initialized here. * +* The initializations are broken into 'bite-sized' sub-functions * +* for ease of reading. Any functions that are common among all OS's * +* are directly initialized to the common function name. However, * +* the actual code for that function may be different from one OS * +* to the other; each OS compiles separately. Any function pointers * +* that are either not used by some OS's or are initialized to * +* different functions by different OS's are 'stubbed' out by * +* initializing them to a 'stub' function. * +\***************************************************************************/ + +#include "os/os.h" +#include "os/os_stub.h" +#include "core/system.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "gpu/gpu_access.h" +#include "nv_ref.h" +#include "virtualization/hypervisor/hypervisor.h" + + +#include "g_os_private.h" + +// +// Functions to fill function stubs +// +static void initOSFunctionPointers(OBJOS *); + +// +// Helper functions to assist the above functions +// +static void initMiscOSFunctionPointers(OBJOS *); +static void initCommonMiscOSFunctionPointers(OBJOS *); +static void initStubMiscOSFunctionPointers(OBJOS *); +static void initWinNTStubOSFunctionPointers(OBJOS *); +static void initMacOSCoreOSFunctionPointers(OBJOS *); +static void initAPIOSFunctionPointers(OBJOS *); + +// Bug check code string common to all OS +const char *ppOsBugCheckBugcodeStr[] = OS_BUG_CHECK_BUGCODE_STR; + +NV_STATUS +constructObjOS(OBJOS *pOS) +{ + // Stub out function pointers + initOSFunctionPointers(pOS); + + // Now call the OS specific initialization + osInitObjOS(pOS); + + return NV_OK; +} + +static void +initOSFunctionPointers(OBJOS *pOS) +{ + initMiscOSFunctionPointers(pOS); + initWinNTStubOSFunctionPointers(pOS); + initMacOSCoreOSFunctionPointers(pOS); + initAPIOSFunctionPointers(pOS); +} + +static void +initMiscOSFunctionPointers(OBJOS *pOS) +{ + initCommonMiscOSFunctionPointers(pOS); + initStubMiscOSFunctionPointers(pOS); +} + +static void +initCommonMiscOSFunctionPointers(OBJOS *pOS) +{ + // Common OS function pointers. + pOS->osGetSimulationMode = osGetSimulationMode; +} + +static void +initStubMiscOSFunctionPointers(OBJOS *pOS) +{ + // Stubbed OS function pointers. + pOS->osSimEscapeWrite = stubOsSimEscapeWrite; + pOS->osSimEscapeWriteBuffer = stubOsSimEscapeWriteBuffer; + pOS->osSimEscapeRead = stubOsSimEscapeRead; + pOS->osSimEscapeReadBuffer = stubOsSimEscapeReadBuffer; + + pOS->osCheckCallback = stubOsCheckCallback; + pOS->osRCCallback = stubOsRCCallback; + + pOS->osPageArrayGetPhysAddr = stubOsPageArrayGetPhysAddr; + + pOS->osInternalReserveAllocCallback = stubOsInternalReserveAllocCallback; + pOS->osInternalReserveFreeCallback = stubOsInternalReserveFreeCallback; +} + +static void +initWinNTStubOSFunctionPointers(OBJOS *pOS) +{ + pOS->osQADbgRegistryInit = stubOsQADbgRegistryInit; + pOS->osQueueWorkItem = stubOsQueueWorkItem; + pOS->osQueueWorkItemWithFlags = stubOsQueueWorkItemWithFlags; + pOS->osQueueSystemWorkItem = stubOsQueueSystemWorkItem; + pOS->osCallACPI_MXMX = stubOsCallACPI_MXMX; + pOS->osCallACPI_DDC = stubOsCallACPI_DDC; + pOS->osCallACPI_BCL = stubOsCallACPI_BCL; + pOS->osCallACPI_ON = stubOsCallACPI_ON; + pOS->osCallACPI_OFF = stubOsCallACPI_OFF; + pOS->osCallACPI_NVHG_GPUON = stubOsCallWMI_NVHG_GPUON; + pOS->osCallACPI_NVHG_GPUOFF = stubOsCallWMI_NVHG_GPUOFF; + pOS->osCallACPI_NVHG_GPUSTA = stubOsCallWMI_NVHG_GPUSTA; + pOS->osCallACPI_NVHG_MXDS = stubOsCallWMI_NVHG_MXDS; + pOS->osCallACPI_NVHG_MXMX = stubOsCallWMI_NVHG_MXMX; + pOS->osCallACPI_NVHG_DOS = stubOsCallWMI_NVHG_DOS; + pOS->osCallACPI_NVHG_ROM = stubOsCallWMI_NVHG_ROM; + pOS->osCallACPI_NVHG_DCS = stubOsCallWMI_NVHG_DCS; + pOS->osCallACPI_DOD = stubOsCallWMI_DOD; + pOS->osSetupVBlank = stubOsSetupVBlank; + pOS->osCallACPI_NBPS = stubOsCallACPI_NBPS; + pOS->osCallACPI_NBSL = stubOsCallACPI_NBSL; + pOS->osCallACPI_DSM = stubOsCallACPI_DSM; + pOS->osCallACPI_OPTM_GPUON = stubOsCallWMI_OPTM_GPUON; + pOS->osGetUefiVariable = stubOsGetUefiVariable; + pOS->osCallACPI_MXDS = stubOsCallACPI_MXDS; + pOS->osCallACPI_MXDM = stubOsCallACPI_MXDM; + pOS->osCallACPI_MXID = stubOsCallACPI_MXID; + pOS->osCallACPI_LRST = stubOsCallACPI_LRST; +} + +static void +initMacOSCoreOSFunctionPointers(OBJOS *pOS) +{ + pOS->osNv_rdcr4 = stubOsnv_rdcr4; + pOS->osNv_rdxcr0 = stubOsnv_rdxcr0; + pOS->osNv_cpuid = stubOsnv_cpuid; + pOS->osNv_rdmsr = stubOsnv_rdmsr; + pOS->osNv_wrmsr = stubOsnv_wrmsr; + pOS->osRobustChannelsDefaultState = stubOsRobustChannelsDefaultState; + pOS->osCallACPI_MXMX = stubOsCallACPI_MXMX; + pOS->osCallACPI_DDC = stubOsCallACPI_DDC; + pOS->osCallACPI_BCL = stubOsCallACPI_BCL; + pOS->osGetUefiVariable = stubOsGetUefiVariable; +} + +static void +initAPIOSFunctionPointers(OBJOS *pOS) +{ + pOS->osRmInitRm = osRmInitRm; +} + +// +// Function to find the maximum number of cores in the system +// +NvU32 osGetMaximumCoreCount() +{ + // + // Windows provides an API to query this that supports CPU hotadd that our + // cpuid() didn't catch, so favor that. + // +#if NVOS_IS_WINDOWS && PORT_IS_KERNEL_BUILD && !defined(NV_MODS) + extern unsigned long KeQueryMaximumProcessorCountEx(unsigned short); + return KeQueryMaximumProcessorCountEx(0xFFFF); // All groups. +#else + OBJSYS *pSys = SYS_GET_INSTANCE(); + return pSys ? pSys->cpuInfo.maxLogicalCpus : 0; +#endif +} + +/*! + * @brief Generic OS 8-bit GPU register write function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevWriteReg008 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be written + * @param[in] thisValue - Value to be written + * + */ +void osGpuWriteReg008( + OBJGPU *pGpu, + NvU32 thisAddress, + NvU8 thisValue +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + osDevWriteReg008(pGpu, pMapping, thisAddress, thisValue); +} + +/*! + * @brief Generic OS 16-bit GPU register write function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevWriteReg016 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be written + * @param[in] thisValue - Value to be written + * + */ +void osGpuWriteReg016( + OBJGPU *pGpu, + NvU32 thisAddress, + NvV16 thisValue +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + osDevWriteReg016(pGpu, pMapping, thisAddress, thisValue); +} + +/*! + * @brief Generic OS 32-bit GPU register write function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevWriteReg032 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be written + * @param[in] thisValue - Value to be written + * + */ +void osGpuWriteReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvV32 thisValue +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + osDevWriteReg032(pGpu, pMapping, thisAddress, thisValue); +} + +/*! + * @brief Generic OS 8-bit GPU register read function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevReadReg008 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be read. + * + * @return The value read from the register + */ +NvU8 osGpuReadReg008( + OBJGPU *pGpu, + NvU32 thisAddress +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + return osDevReadReg008(pGpu, pMapping, thisAddress); +} + +/*! + * @brief Generic OS 16-bit GPU register read function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevReadReg016 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be read. + * + * @return The value read from the register + */ +NvU16 osGpuReadReg016( + OBJGPU *pGpu, + NvU32 thisAddress +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + return osDevReadReg016(pGpu, pMapping, thisAddress); +} + +/*! + * @brief Generic OS 32-bit GPU register read function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevReadReg032 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be read. + * + * @return The value read from the register + */ +NvU32 osGpuReadReg032( + OBJGPU *pGpu, + NvU32 thisAddress +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + return osDevReadReg032(pGpu, pMapping, thisAddress); +} + +void vgpuDevWriteReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvV32 thisValue, + NvBool *vgpuHandled +) +{ + + *vgpuHandled = NV_FALSE; +} + +NvU32 vgpuDevReadReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvBool *vgpuHandled +) +{ + + *vgpuHandled = NV_FALSE; + return 0; +} + + +/** + * @brief Adds a filter to trap a certain CPU virtual address range + * + * Sets up a filter so all accesses to an address range are sent through the + * specified callback. + * + * Only one filter is allowed for any given address. + * + * @param[in] rangeStart start of CPU address range (inclusive) + * @param[in] rangeEnd end of CPU address range (inclusive) + * @param[in] pCb Callback function + * @param[in] pPriv opaque point to data pass to callback + * + * @return NV_OK is success, appropriate error otherwise. + */ +NV_STATUS +osMemAddFilter +( + NvU64 rangeStart, + NvU64 rangeEnd, + OSMemFilterCb *pCb, + void *pPriv +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + POSMEMFILTERDATA pFilterData = NULL; + + pFilterData = portMemAllocNonPaged(sizeof(OSMEMFILTERDATA)); + if (pFilterData == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to alloc mem for os mem filter data!\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pFilterData->node.keyStart = rangeStart; + pFilterData->node.keyEnd = rangeEnd; + pFilterData->pPriv = pPriv; + pFilterData->pFilterCb = pCb; + pFilterData->node.Data = (void *)pFilterData; + + return btreeInsert(&pFilterData->node, &pSys->pMemFilterList); +} + +/** + * @brief Remove a filter added with @ref osMemAddFilter + * + * @param[in] rangeStart memory address to remove filter from. + */ +NV_STATUS +osMemRemoveFilter +( + NvU64 rangeStart +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + PNODE pNode = NULL; + + if (btreeSearch(rangeStart, &pNode, pSys->pMemFilterList) != NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "Failed to find filter data for the given range start address!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + NV_ASSERT(pNode); + NV_ASSERT(pNode->keyStart == rangeStart); + + if (btreeUnlink(pNode, &pSys->pMemFilterList) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to unlink filter data!\n"); + return NV_ERR_INVALID_STATE; + } + + portMemFree(pNode->Data); + pNode = NULL; + + return NV_OK; +} + +/** + * @brief Retrieves a filter added with @ref osMemAddFilter. + * + * @param[in] address Address to search for + * + * @return Appropriate filter data if a filter exists, NULL otherwise. + */ +POSMEMFILTERDATA +osMemGetFilter(NvUPtr address) +{ + OBJSYS *pSys; + PNODE pNode = NULL; + + pSys = SYS_GET_INSTANCE(); + if (!pSys) + return NULL; + + if (btreeSearch(address, &pNode, pSys->pMemFilterList) != NV_OK) + return NULL; + + return pNode->Data; +} + +/*! + * Some data such as Bindata array are placed on paged memory. Access to paged segment + * on high IRQL is not allowed on some platform (e.g. Windows). The issue could be + * difficult to debug as the repro rate is random. The failure only happens when the + * target segment is paged out. + * + * This utility function checks whether it is safe to access paged segments. When the + * function is called at high IRQL path, it gives an assertion with a message. On + * developer branches, such as chips_a, it triggers an intended Bugcheck. + * + * @param[in] void No input required + * + * @return void To avoid random failure, do not return and check the error + * code of this function. BSOD D1 or internal BSOD provides + * full call stack that is much helpful for debugging. + */ + +void osPagedSegmentAccessCheck() +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + + // check whether it is safe to access/alloc Paged memory + if (! portMemExSafeForPagedAlloc() || pOS->getProperty(pOS, PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS)) + { + NV_ASSERT_FAILED("Paged memory access is prohibited"); + + // + // Trigger internal BSOD on developer branches + // This code should never be reachable on release branches + // + osBugCheck(OS_BUG_CHECK_BUGCODE_PAGED_SEGMENT); + } +} + +/*! + * @brief Retrieves a registry key DWORD value and returns the best result + * from both nbsi and os registry tables. + * + * @param[in] OBJGPU pointer + * @param[in] pRegParmStr Registry key string + * @param[out] pData Registry key DWORD value + * + * @return NV_OK if key was found and data returned in pData + * @return Other unexpected errors + */ +NV_STATUS osReadRegistryDword +( + OBJGPU *pGpu, + const char *pRegParmStr, + NvU32 *pData +) +{ + NV_STATUS status; + + NV_ASSERT_OR_RETURN(pRegParmStr != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pData != NULL, NV_ERR_INVALID_ARGUMENT); + + status = osReadRegistryDwordBase(pGpu, pRegParmStr, pData); + + return status; +} + +/*! + * @brief Retrieves a registry key STRING value and returns the best result + * from both nbsi and os registry tables. + * + * @param[in] OBJGPU pointer + * @param[in] pRegParmStr Registry key string + * @param[out] pData Registry key STRING value + * @param[in] pCbLen Count of bytes in registry value. + * + * @return NV_OK if key was found and data returned in pData + * @return Other unexpected errors + */ +NV_STATUS osReadRegistryString +( + OBJGPU *pGpu, + const char *pRegParmStr, + NvU8 *pData, + NvU32 *pCbLen +) +{ + NV_STATUS status; + NV_ASSERT_OR_RETURN(pRegParmStr != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pCbLen != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(!(*pCbLen != 0 && pData == NULL), NV_ERR_INVALID_ARGUMENT); + + status = osReadRegistryStringBase(pGpu, pRegParmStr, pData, pCbLen); + + return status; +} + +void nvErrorLog(void *pVoid, NvU32 num, const char *pFormat, va_list arglist) +{ + if ((pFormat == NULL) || (*pFormat == '\0')) + { + return; + } + + OBJGPU *pGpu = reinterpretCast(pVoid, OBJGPU *); + +#if RMCFG_MODULE_SMBPBI || \ + (RMCFG_MODULE_KERNEL_RC && !RMCFG_FEATURE_PLATFORM_GSP) + char *errorString = portMemAllocNonPaged(MAX_ERROR_STRING); + if (errorString == NULL) + goto done; + + unsigned msglen; + va_list arglistCpy; + + va_copy(arglistCpy, arglist); + msglen = nvDbgVsnprintf(errorString, MAX_ERROR_STRING, pFormat, arglistCpy); + va_end(arglistCpy); + + if (msglen == 0) + goto done; + +done: + portMemFree(errorString); +#endif // RMCFG_MODULE_SMBPBI || (RMCFG_MODULE_KERNEL_RC && + // !RMCFG_FEATURE_PLATFORM_GSP) + + osErrorLogV(pGpu, num, pFormat, arglist); +} + +void +nvErrorLog_va +( + void * pVoid, + NvU32 num, + const char * pFormat, + ... +) +{ + va_list arglist; + + va_start(arglist, pFormat); + nvErrorLog(pVoid, num, pFormat, arglist); + va_end(arglist); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_sanity.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_sanity.c new file mode 100644 index 0000000..f1bc777 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_sanity.c @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/************************************************************************************************************** +* +* Description: +* Sanity test the system environment to verify our driver can run properly +* +**************************************************************************************************************/ + +#include +#include +#include +#include +#include + +#include "g_os_private.h" + +NV_STATUS osSanityTestIsr( + OBJGPU *pGpu +) +{ + NvBool serviced = NV_FALSE; + + return (serviced) ? NV_OK : NV_ERR_GENERIC; +} + +// +// add various system environment start-up tests here +// currently, just verify interrupt hookup, but could also verify other details +// +NV_STATUS osVerifySystemEnvironment( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_stubs.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_stubs.c new file mode 100644 index 0000000..5d80f70 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_stubs.c @@ -0,0 +1,854 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** HW State Routines ***************************\ +* * +* Module: os_stubs.c * +* Stubs for all the public stub routines * +\***************************************************************************/ + +#include "os/os_stub.h" + +// +// Here's a little debugging tool. It is possible that some code is stubbed for +// certain OS's that shouldn't be. In debug mode, the stubs below will dump out +// a stub 'number' to help you identify any stubs that are getting called. You +// can then evaluate whether or not that is correct. +// +// Highest used STUB_CHECK is 237. +// +#if defined(DEBUG) +#define STUB_CHECK(n) _stubCallCheck(n) + +int enableOsStubCallCheck = 0; + +static void _stubCallCheck(int funcNumber) +{ + if (enableOsStubCallCheck) { + NV_PRINTF(LEVEL_INFO, "STUB CALL: %d \r\n", funcNumber); + } +} + +#else +#define STUB_CHECK(n) +#endif // DEBUG + +struct OBJCL; + +void stubOsQADbgRegistryInit(OBJOS *pOS) +{ + STUB_CHECK(61); +} + +NvU32 stubOsnv_rdcr4(OBJOS *pOS) +{ + STUB_CHECK(76); + return 0; +} + +NvU64 stubOsnv_rdxcr0(OBJOS *pOs) +{ + STUB_CHECK(237); + return 0; +} + +int stubOsnv_cpuid(OBJOS *pOS, int arg1, int arg2, NvU32 *arg3, + NvU32 *arg4, NvU32 *arg5, NvU32 *arg6) +{ + STUB_CHECK(77); + return 0; +} + +NvU32 stubOsnv_rdmsr(OBJOS *pOS, NvU32 arg1, NvU32 *arg2, NvU32 *arg3) +{ + STUB_CHECK(122); + return 0; +} + +NvU32 stubOsnv_wrmsr(OBJOS *pOS, NvU32 arg1, NvU32 arg2, NvU32 arg3) +{ + STUB_CHECK(123); + return 0; +} + +NvU32 stubOsRobustChannelsDefaultState(OBJOS *pOS) +{ + STUB_CHECK(128); + return 0; +} + +NV_STATUS stubOsQueueWorkItem(OBJGPU *pGpu, OSWorkItemFunction pFunction, void * pParms) +{ + STUB_CHECK(180); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsQueueSystemWorkItem(OSSystemWorkItemFunction pFunction, void *pParms) +{ + STUB_CHECK(181); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsQueueWorkItemWithFlags(OBJGPU *pGpu, OSWorkItemFunction pFunction, void * pParms, NvU32 flags) +{ + STUB_CHECK(182); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsSimEscapeWrite(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, NvU32 Value) +{ + STUB_CHECK(195); + return NV_ERR_GENERIC; +} + +NV_STATUS stubOsSimEscapeWriteBuffer(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, void* pBuffer) +{ + STUB_CHECK(197); + return NV_ERR_GENERIC; +} + +NV_STATUS stubOsSimEscapeRead(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, NvU32 *Value) +{ + STUB_CHECK(196); + return NV_ERR_GENERIC; +} + +NV_STATUS stubOsSimEscapeReadBuffer(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, void* pBuffer) +{ + STUB_CHECK(198); + return NV_ERR_GENERIC; +} + +NV_STATUS stubOsCallACPI_MXMX(OBJGPU *pGpu, NvU32 AcpiId, NvU8 *pInOut) +{ + STUB_CHECK(220); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_DDC(OBJGPU *pGpu, NvU32 ulAcpiId, NvU8 *pOut, NvU32 *size, NvBool bReadMultiBlock) +{ + STUB_CHECK(221); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_BCL(OBJGPU *pGpu, NvU32 acpiId, NvU32 *pOut, NvU16 *size) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_ON(OBJGPU *pGpu, NvU32 uAcpiId) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_OFF(OBJGPU *pGpu, NvU32 uAcpiId) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_NBPS(OBJGPU *pGpu, NvU8 *pOut, NvU32 *pOutSize) +{ + *pOutSize = 0; + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_NBSL(OBJGPU *pGpu, NvU32 val) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_OPTM_GPUON(OBJGPU *pGpu) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_GPUON(OBJGPU *pGpu, NvU32 *pInOut) +{ + //STUB_CHECK(225); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_GPUOFF(OBJGPU *pGpu, NvU32 *pInOut) +{ + //STUB_CHECK(226); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_GPUSTA(OBJGPU *pGpu, NvU32 *pInOut) +{ + //STUB_CHECK(227); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_MXDS(OBJGPU *pGpu, NvU32 AcpiId, NvU32 *pInOut) +{ + //STUB_CHECK(228); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_MXMX(OBJGPU *pGpu, NvU32 AcpiId, NvU32 *pInOut) +{ + //STUB_CHECK(229); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_DOS(OBJGPU *pGpu, NvU32 AcpiId, NvU32 *pInOut) +{ + //STUB_CHECK(230); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_ROM(OBJGPU *pGpu, NvU32 *pIn, NvU32 *pOut) +{ + //STUB_CHECK(231); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_DCS(OBJGPU *pGpu, NvU32 AcpiId, NvU32 *pInOut) +{ + //STUB_CHECK(232); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_DOD(OBJGPU *pGpu, NvU32 *pInOut, NvU32 *pOutSize) +{ + //STUB_CHECK(233); + return NV_ERR_NOT_SUPPORTED; +} + + +NV_STATUS stubOsCallACPI_DSM(OBJGPU *pGpu, ACPI_DSM_FUNCTION acpiDSMFunction, NvU32 NVHGDSMSubfunction, NvU32 *pInOut, NvU16 *size) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_MXDS(OBJGPU *pGpu, NvU32 ulAcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_MXDM(OBJGPU *pGpu, NvU32 ulAcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_MXID(OBJGPU *pGpu, NvU32 ulAcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_LRST(OBJGPU *pGpu, NvU32 ulAcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool stubOsCheckCallback(OBJGPU *pGpu) +{ + return NV_FALSE; +} + +RC_CALLBACK_STATUS +stubOsRCCallback +( + OBJGPU *pGpu, + NvHandle hClient, // IN The client RC is on + NvHandle hDevice, // IN The device RC is on + NvHandle hFifo, // IN The channel or TSG RC is on + NvHandle hChannel, // IN The channel RC is on + NvU32 errorLevel, // IN Error Level + NvU32 errorType, // IN Error type + NvU32 *data, // IN/OUT context of RC handler + void *pfnRmRCReenablePusher +) +{ + return RC_CALLBACK_IGNORE; +} + +NV_STATUS stubOsSetupVBlank(OBJGPU *pGpu, void * pProc, + void * pParm1, void * pParm2, NvU32 Head, void * pParm3) +{ + return NV_OK; +} + +NV_STATUS stubOsObjectEventNotification(NvHandle hClient, NvHandle hObject, NvU32 hClass, PEVENTNOTIFICATION pNotifyEvent, + NvU32 notifyIndex, void * pEventData, NvU32 eventDataSize) +{ + return NV_ERR_NOT_SUPPORTED; +} + +RmPhysAddr +stubOsPageArrayGetPhysAddr(OS_GPU_INFO *pOsGpuInfo, void* pPageData, NvU32 pageIndex) +{ + NV_ASSERT(0); + return 0; +} + +void stubOsInternalReserveAllocCallback(NvU64 offset, NvU64 size, NvU32 gpuId) +{ + return; +} + +void stubOsInternalReserveFreeCallback(NvU64 offset, NvU32 gpuId) +{ + return; +} + +#if !(RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_DCE) || \ + (RMCFG_FEATURE_PLATFORM_UNIX && !RMCFG_FEATURE_TEGRA_SOC_NVDISPLAY) +NV_STATUS osTegraSocGpioGetPinState( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void osTegraSocGpioSetPinState( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3 +) +{ +} + +NV_STATUS osTegraSocGpioSetPinDirection( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osTegraSocGpioGetPinDirection( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osTegraSocGpioGetPinNumber( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osTegraSocGpioGetPinInterruptStatus( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3, + NvBool *pArg4 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osTegraSocGpioSetPinInterrupt( + OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocResetMipiCal +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} +#endif + +NV_STATUS +osTegraSocParseFixedModeTimings +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 dcbIndex, + OS_FIXED_MODE_TIMINGS *pFixedModeTimings +) +{ + return NV_OK; +} + + +NV_STATUS osVgpuAllocVmbusEventDpc(void **ppArg1) +{ + return NV_OK; +} + +void osVgpuScheduleVmbusEventDpc(void *pArg1, void *pArg2) +{ +} + +NV_STATUS osLockPageableDataSection(RM_PAGEABLE_SECTION *pSection) +{ + return NV_OK; +} + +NV_STATUS osUnlockPageableDataSection(RM_PAGEABLE_SECTION *pSection) +{ + return NV_OK; +} + +NV_STATUS osIsKernelBuffer(void *pArg1, NvU32 arg2) +{ + return NV_OK; +} + +NV_STATUS osMapViewToSection(OS_GPU_INFO *pArg1, + void *pSectionHandle, + void **ppAddress, + NvU64 actualSize, + NvU64 sectionOffset, + NvBool bIommuEnabled) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osUnmapViewFromSection(OS_GPU_INFO *pArg1, + void *pAddress, + NvBool bIommuEnabled) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osSrPinSysmem( + OS_GPU_INFO *pArg1, + NvU64 commitSize, + void *pMdl +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osSrUnpinSysmem(OS_GPU_INFO *pArg1) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCreateMemFromOsDescriptorInternal( + OBJGPU *pGpu, + void *pAddress, + NvU32 flags, + NvU64 size, + MEMORY_DESCRIPTOR **ppMemDesc, + NvBool bCachedKernel, + RS_PRIV_LEVEL privilegeLevel +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osReserveCpuAddressSpaceUpperBound(void **ppSectionHandle, + NvU64 maxSectionSize) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void osReleaseCpuAddressSpaceUpperBound(void *pSectionHandle) +{ +} + +void osIoWriteDword( + NvU32 port, + NvU32 data +) +{ +} + +NvU32 osIoReadDword( + NvU32 port +) +{ + return 0; +} + +NvBool osIsVga( + OS_GPU_INFO *pArg1, + NvBool bIsGpuPrimaryDevice +) +{ + return bIsGpuPrimaryDevice; +} + +void osInitOSHwInfo( + OBJGPU *pGpu +) +{ +} + +void osDestroyOSHwInfo( + OBJGPU *pGpu +) +{ +} + +NV_STATUS osDoFunctionLevelReset( + OBJGPU *pGpu +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool osGrService( + OS_GPU_INFO *pOsGpuInfo, + NvU32 grIdx, + NvU32 intr, + NvU32 nstatus, + NvU32 addr, + NvU32 dataLo +) +{ + return NV_FALSE; +} + +NvBool osDispService( + NvU32 Intr0, + NvU32 Intr1 +) +{ + return NV_FALSE; +} + +NV_STATUS osDeferredIsr( + OBJGPU *pGpu +) +{ + return NV_OK; +} + +NV_STATUS osGetAcpiTable( + NvU32 tableSignature, + void **ppTable, + NvU32 tableSize, + NvU32 *retSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osInitGetAcpiTable(void) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void osDbgBugCheckOnAssert(void) +{ + return; +} + +NvBool osQueueDpc(OBJGPU *pGpu) +{ + return NV_FALSE; +} + +NvBool osBugCheckOnTimeoutEnabled(void) +{ + return NV_FALSE; +} + +NV_STATUS osNvifMethod( + OBJGPU *pGpu, + NvU32 func, + NvU32 subFunc, + void *pInParam, + NvU16 inParamSize, + NvU32 *pOutStatus, + void *pOutData, + NvU16 *pOutDataSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osNvifInitialize( + OBJGPU *pGpu +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +stubOsGetUefiVariable +( + OBJGPU *pGpu, + char *pName, + LPGUID pGuid, + NvU8 *pBuffer, + NvU32 *pSize, + NvU32 *pAttributes +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +#if !RMCFG_FEATURE_PLATFORM_UNIX || \ + (RMCFG_FEATURE_PLATFORM_UNIX && !RMCFG_FEATURE_TEGRA_SOC_NVDISPLAY) +NV_STATUS +osTegraSocDsiParsePanelProps +( + OS_GPU_INFO *pOsGpuInfo, + void *dsiPanelInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocDsiPanelEnable +( + OS_GPU_INFO *pOsGpuInfo, + void *dsiPanelInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocDsiPanelReset +( + OS_GPU_INFO *pOsGpuInfo, + void *dsiPanelInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void +osTegraSocDsiPanelDisable +( + OS_GPU_INFO *pOsGpuInfo, + void *dsiPanelInfo +) +{ + return; +} + +void +osTegraSocDsiPanelCleanup +( + OS_GPU_INFO *pOsGpuInfo, + void *dsiPanelInfo +) +{ + return; +} +#endif + +NvU32 osGetDynamicPowerSupportMask(void) +{ + return 0; +} + +void osUnrefGpuAccessNeeded(OS_GPU_INFO *pOsGpuInfo) +{ + return; +} + +NV_STATUS osRefGpuAccessNeeded(OS_GPU_INFO *pOsGpuInfo) +{ + return NV_OK; +} + +void osClientGcoffDisallowRefcount( + OS_GPU_INFO *pArg1, + NvBool arg2 +) +{ +} + +#if !RMCFG_FEATURE_PLATFORM_DCE /* dce_core_rm_clk_reset.c */ && \ + (!RMCFG_FEATURE_PLATFORM_UNIX || !RMCFG_FEATURE_TEGRA_SOC_NVDISPLAY || \ + RMCFG_FEATURE_DCE_CLIENT_RM /* osSocNvDisp.c */ ) +NV_STATUS +osTegraSocEnableClk +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRM +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocDisableClk +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRM +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocGetCurrFreqKHz +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRM, + NvU32 *pCurrFreqKHz +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocGetMaxFreqKHz +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRM, + NvU32 *pMaxFreqKHz +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocGetMinFreqKHz +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRM, + NvU32 *pMinFreqKHz +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocSetFreqKHz +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRM, + NvU32 reqFreqKHz +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocSetParent +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRMsource, + NvU32 whichClkRMparent +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocGetParent +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRMsource, + NvU32 *pWhichClkRMparent +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocDeviceReset +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocPmPowergate +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocPmUnpowergate +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} +#endif + +NV_STATUS osIsr +( + OBJGPU *pGpu +) +{ + return NV_OK; +} + +NvBool osLockShouldToggleInterrupts +( + OBJGPU *pGpu +) +{ + return NV_TRUE; +} + +void osEnableInterrupts +( + OBJGPU *pGpu +) +{ +} + +void osDisableInterrupts +( + OBJGPU *pGpu, + NvBool bIsr +) +{ +} + +NV_STATUS osInitMapping +( + OBJGPU *pGpu +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +#if !(RMCFG_FEATURE_PLATFORM_UNIX) || \ + (RMCFG_FEATURE_PLATFORM_UNIX && !RMCFG_FEATURE_TEGRA_SOC_NVDISPLAY) +NvU32 +osTegraSocFuseRegRead(NvU32 addr) +{ + return 0; +} +#endif + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_timer.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_timer.c new file mode 100644 index 0000000..db4dc90 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_timer.c @@ -0,0 +1,423 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This file contains platform-independent code for the 1 Hz OS timer. + */ + +#include "objtmr.h" +#include "core/thread_state.h" +#include "core/locks.h" + +static NvBool _os1HzCallbackIsOnList(OBJTMR *pTmr, OS1HZPROC callback, void *pData, NvU32 flags); +static NV_STATUS _os1HzCallback(OBJGPU *pGpu, OBJTMR *pTmr, TMR_EVENT *pTmrEvent); + +/*! + * @brief Initialize 1Hz callbacks + * + * Initialize the 1Hz callback list and create a timer event + * (if using PTIMER events). + * + * @param[in,out] pTmr TMR object pointer + */ +NV_STATUS +osInit1HzCallbacks +( + OBJTMR *pTmr +) +{ + NvU32 i; + + // Initialize the OS 1 Hz timer callback list. + pTmr->pOs1HzCallbackList = NULL; + pTmr->pOs1HzCallbackFreeList = pTmr->os1HzCallbackTable; + + // Fill in all the forward pointers in the callback table. + for (i = 0; i < (TMR_NUM_CALLBACKS_OS - 1); i++) + { + pTmr->os1HzCallbackTable[i].next = &pTmr->os1HzCallbackTable[i+1]; + } + pTmr->os1HzCallbackTable[i].next = NULL; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS)) + { + NV_ASSERT_OK_OR_RETURN(tmrEventCreate(pTmr, &pTmr->pOs1HzEvent, + _os1HzCallback, NULL, TMR_FLAG_RECUR)); + } + + return NV_OK; +} + +/*! + * @brief Destroy 1Hz callbacks + * + * Destroy the 1Hz callback list and free the timer event + * (if using PTIMER events). + * + * @param[in,out] pTmr TMR object pointer + */ +NV_STATUS +osDestroy1HzCallbacks +( + OBJTMR *pTmr +) +{ + if (pTmr->pOs1HzEvent != NULL) + { + tmrEventCancel(pTmr, pTmr->pOs1HzEvent); + tmrEventDestroy(pTmr, pTmr->pOs1HzEvent); + pTmr->pOs1HzEvent = NULL; + } + + pTmr->pOs1HzCallbackList = NULL; + pTmr->pOs1HzCallbackFreeList = NULL; + return NV_OK; +} + +/*! + * @brief Timer function to insert 1Hz callback to the list. + * + * This function is used to insert/register the 1Hz callback to the callback list. + * + * @param[in,out] pTmr TMR object pointer + * @param[in] callback OS1HZPROC callback function point + * @param[in] pData Unique identifier for the callback + * @param[in] flags Callback flags + * + * @return NV_OK The callback has been added + * @return NV_ERR_INVALID_REQUEST The callback has not been added + */ +NV_STATUS +osSchedule1SecondCallback +( + OBJGPU *pGpu, + OS1HZPROC callback, + void *pData, + NvU32 flags +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + OS1HZTIMERENTRY *pEntry; + NV_STATUS nvStatus = NV_OK; + + // Grab the next free timer entry. + if ((pTmr->pOs1HzCallbackFreeList != NULL) && + !_os1HzCallbackIsOnList(pTmr, callback, pData, flags)) + { + if ((pTmr->pOs1HzCallbackList == NULL) && (pTmr->pOs1HzEvent != NULL)) + { + // First one. Add 1Hz callback to timer events. + NV_ASSERT_OK(tmrEventScheduleRelSec(pTmr, pTmr->pOs1HzEvent, 1)); + } + + pEntry = pTmr->pOs1HzCallbackFreeList; + pTmr->pOs1HzCallbackFreeList = pEntry->next; + + pEntry->callback = callback; + pEntry->data = pData; + pEntry->flags = flags; + + pEntry->next = pTmr->pOs1HzCallbackList; + pTmr->pOs1HzCallbackList = pEntry; + } + else + { + NV_PRINTF(LEVEL_INFO, "Callback registration FAILED!\n"); + nvStatus = NV_ERR_INVALID_REQUEST; + } + + return nvStatus; +} + +/*! + * @brief Timer function to remove 1Hz callback from the list. + * + * This function is used to remove/unregister the 1Hz callback from + * the callback list. + * + * @param[in,out] pTmr TMR object pointer + * @param[in] callback OS1HZPROC callback function point + * @param[in] pData Unique identifier for the callback + */ +void +osRemove1SecondRepeatingCallback +( + OBJGPU *pGpu, + OS1HZPROC callback, + void *pData +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + OS1HZTIMERENTRY *pEntry; + OS1HZTIMERENTRY **ppEntryPtr; + + ppEntryPtr = &pTmr->pOs1HzCallbackList; + while ((*ppEntryPtr) != NULL) + { + pEntry = *ppEntryPtr; + if ((pEntry->callback == callback) && + (pEntry->data == pData) && + (pEntry->flags & NV_OS_1HZ_REPEAT)) + { + *ppEntryPtr = pEntry->next; + pEntry->next = pTmr->pOs1HzCallbackFreeList; + pEntry->data = NULL; + pEntry->callback = NULL; + pEntry->flags = NV_OS_1HZ_REPEAT; + pTmr->pOs1HzCallbackFreeList = pEntry; + break; + } + ppEntryPtr = &pEntry->next; + } + + if ((pTmr->pOs1HzCallbackList == NULL) && (pTmr->pOs1HzEvent != NULL)) + { + // Last one. Remove 1Hz callback from timer events. + tmrEventCancel(pTmr, pTmr->pOs1HzEvent); + } +} + +// +// Return Value(TRUE) is used by Vista to determine if we were able to acquire the lock +// If we cannot acquire the lock this means the API or ISR/DPC has it +// +NvBool +osRun1HzCallbacksNow +( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + OS1HZTIMERENTRY **ppEntryPtr; + OS1HZPROC pProc; + THREAD_STATE_NODE threadState; + void *pData; + NvBool bAcquired = NV_TRUE; + GPU_MASK lockedGpus = 0; +#if !TLS_DPC_HAVE_UNIQUE_ID + NvU8 stackAllocator[TLS_ISR_ALLOCATOR_SIZE]; // ISR allocations come from this buffer + PORT_MEM_ALLOCATOR *pDpcAllocator; + pDpcAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator)); + tlsIsrInit(pDpcAllocator); +#endif + + // + // LOCK: + // + // What irql are we at here? Should we acquire the API lock in addition to + // or instead of the GPUs lock? + // + + // LOCK: try to acquire GPU lock + if (rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_DEVICE, + GPUS_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_TMR, + &lockedGpus) != NV_OK) + { + // Out of conflicting thread + bAcquired = NV_FALSE; + goto exit; + } + + if (osCondAcquireRmSema(pSys->pSema) != NV_OK) + { + // UNLOCK: release GPU lock + rmGpuGroupLockRelease(lockedGpus, GPUS_LOCK_FLAGS_NONE); + // Out of conflicting thread + bAcquired = NV_FALSE; + goto exit; + } + + threadStateInitISRAndDeferredIntHandler(&threadState, pGpu, + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + + if (!gpuIsGpuFullPower(pGpu)) + { + goto exit; + } + + ppEntryPtr = &pTmr->pOs1HzCallbackList; + for (;;) + { + POS1HZTIMERENTRY entry; + + // Be paranoid. + entry = *ppEntryPtr; + + // End of list? + if (entry == NULL) + break; + + // Run the callback. + if (entry->callback != NULL) + { + pProc = entry->callback; + pData = entry->data; + pProc(pGpu, pData); + } + + // + // The proc call above can add new entries to the list. + // When new entries are added, they are added at the + // beginning of the list. That means that our *entryPtr + // might no longer point to our current entry. If that is + // the case, then we need to search the list again to find + // our entry. Or inside this code, we need to find the + // entryPtr over again. + // + if (entry != *ppEntryPtr) + { + POS1HZTIMERENTRY item; + + ppEntryPtr = &pTmr->pOs1HzCallbackList; + for (;;) + { + // Be paranoid. + item = *ppEntryPtr; + + // End of list? + if (item == NULL) + break; + + if (item == entry) + { + break; + } + + ppEntryPtr = &item->next; + } + + if (item != entry) + { + // + // The entry was removed from the list inside the proc. + // So, we don't need to do anything below. Use + // ppEntryPtr = NULL to indicate that for now. + // + ppEntryPtr = NULL; + } + + } + + // + // If this timer is supposed to repeat, leave it in place. + // Otherwise, move it to the free list. + // + if ( (ppEntryPtr != NULL) && + !(entry->flags & NV_OS_1HZ_REPEAT)) + { + *ppEntryPtr = entry->next; + entry->next = pTmr->pOs1HzCallbackFreeList; + pTmr->pOs1HzCallbackFreeList = entry; + } + else + { + ppEntryPtr = &entry->next; + } + } + +exit: + if (bAcquired) + { + // Out of conflicting thread + threadStateFreeISRAndDeferredIntHandler(&threadState, + pGpu, THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + osReleaseRmSema(pSys->pSema, NULL); + // UNLOCK: release GPU lock + rmGpuGroupLockRelease(lockedGpus, GPUS_LOCK_FLAGS_NONE); + } + +#if !TLS_DPC_HAVE_UNIQUE_ID + tlsIsrDestroy(pDpcAllocator); + portMemAllocatorRelease(pDpcAllocator); +#endif + + return bAcquired; +} + +/*! + * @brief Timer function to check the duplicate callback on the list. + * + * This function is used to check if there's any duplicate repeat callback has + * been registered to the list, walk through the list and find if there's any + * registered callback matched with flags NV_OS_1HZ_REPEAT. + * + * @param[in,out] pTmr TMR object pointer + * @param[in] callback OS1HZPROC callback function point + * @param[in] pData Unique identifier for the callback + * @param[in] flags Callback flags + * + * @return NV_TRUE The callback has been registered + * @return NV_FALSE The callback has not been registered + */ +static NvBool +_os1HzCallbackIsOnList +( + OBJTMR *pTmr, + OS1HZPROC callback, + void *pData, + NvU32 flags +) +{ + POS1HZTIMERENTRY pScan; + + for (pScan = pTmr->pOs1HzCallbackList; pScan != NULL; pScan = pScan->next) + { + if ((pScan->callback == callback) && + (pScan->data == pData) && + (pScan->flags & NV_OS_1HZ_REPEAT)) + { + break; + } + } + + return pScan != NULL; +} + +/*! + * @brief Os 1Hz callback function + * + * Calls all callbacks on the 1Hz list and reschedules callback + * (if using PTIMER events). + * + * @param[in,out] pGpu GPU object pointer + * @param[in,out] pTmr TMR object pointer + * @param[in] pTmrEvent pointer to the timer event + * + * @return NV_OK The callback was rescheduled successfully. + * @return NV_ERR_INVALID_ARGUMENT The callback was not rescheduled. + */ +static NV_STATUS +_os1HzCallback +( + OBJGPU *pGpu, + OBJTMR *pTmr, + TMR_EVENT *pTmrEvent +) +{ + osRun1HzCallbacksNow(pGpu); + + // TMR_FLAG_RECUR does not work, so reschedule it here. + return tmrEventScheduleRelSec(pTmr, pTmrEvent, 1); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/alloc_free.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/alloc_free.c new file mode 100644 index 0000000..c515807 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/alloc_free.c @@ -0,0 +1,1453 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "rmapi/rmapi.h" +#include "rmapi/client.h" +#include "entry_points.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "vgpu/rpc.h" +#include "resource_desc.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/disp/disp_channel.h" +#include "nvsecurityinfo.h" + +#include "gpu/device/device.h" + +#include "class/cl0005.h" // NV01_EVENT +#include "class/clc574.h" // UVM_CHANNEL_RETAINER + +#include "tmr.h" + +// +// RM Alloc & Free internal flags -- code should be migrated to use rsresdesc +// and rmapi types directly where possible. +// +#define RM_ALLOC_STATES_NONE 0 +#define RM_ALLOC_STATES_INTERNAL_CLIENT_HANDLE ALLOC_STATE_INTERNAL_CLIENT_HANDLE // NVBIT(5) +#define RM_ALLOC_STATES_SKIP_RPC NVBIT(6) +#define RM_ALLOC_STATES_INTERNAL_ALLOC NVBIT(7) + +#define RM_FREE_STATES_NONE 0 + +static void +rmapiResourceDescToLegacyFlags +( + const RS_RESOURCE_DESC *pResDesc, + NvU32 *pAllocFlags, + NvU32 *pFreeFlags +) +{ + if (pAllocFlags) + { + *pAllocFlags = (pResDesc->flags & RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC) ? RM_LOCK_FLAGS_NONE : RM_LOCK_FLAGS_NO_GPUS_LOCK; + *pAllocFlags |= (pResDesc->flags & RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_ALLOC) ? RM_LOCK_FLAGS_GPU_GROUP_LOCK : 0; + } + + if (pFreeFlags) + { + *pFreeFlags = (pResDesc->flags & RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE) ? RM_LOCK_FLAGS_NONE : RM_LOCK_FLAGS_NO_GPUS_LOCK; + *pFreeFlags |= (pResDesc->flags & RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_FREE) ? RM_LOCK_FLAGS_GPU_GROUP_LOCK : 0; + } +} + +NV_STATUS +serverAllocApiCopyIn +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams, + API_STATE **ppParamCopy +) +{ + NV_STATUS status; + API_SECURITY_INFO *pSecInfo = pRmAllocParams->pSecInfo; + NvBool bCopyInParams = pSecInfo->paramLocation == PARAM_LOCATION_USER; + RMAPI_PARAM_COPY *pParamCopy = NULL; + NvU32 allocParamsSize = 0; + void *pUserParams = pRmAllocParams->pAllocParams; + + pParamCopy = (RMAPI_PARAM_COPY*)PORT_ALLOC(g_resServ.pAllocator, sizeof(*pParamCopy)); + if (pParamCopy == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + portMemSet(pParamCopy, 0, sizeof(*pParamCopy)); + pRmAllocParams->pAllocParams = NULL; + + // Setup for access to param + // Param size is initialized to zero, and then set via rmapiParamsCopyInit + RMAPI_PARAM_COPY_INIT(*pParamCopy, pRmAllocParams->pAllocParams, NV_PTR_TO_NvP64(pUserParams), allocParamsSize, 1); + + // Look up param size based on hClass + status = rmapiParamsCopyInit(pParamCopy, pRmAllocParams->externalClassId); + if (NV_OK != status) + goto done; + + // Using the per-class info set above, pull in the parameters for this allocation + if (pParamCopy->paramsSize > 0) + { + // gain access to client's parameters via 'pKernelCtrl' + status = rmapiParamsAcquire(pParamCopy, bCopyInParams); + if (status != NV_OK) + goto done; + } + + // Prevent requesting rights before rights are enabled, just in case old code doesn't zero it properly. + if (!pServer->bRsAccessEnabled) + pRmAllocParams->pRightsRequested = NULL; + + if (pRmAllocParams->pRightsRequested != NULL) + { + // copyFromUser requires a non-stack buffer, allocate one to copy into + RS_ACCESS_MASK *pMaskBuffer = (RS_ACCESS_MASK*)PORT_ALLOC(g_resServ.pAllocator, sizeof(RS_ACCESS_MASK)); + if (pMaskBuffer == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + // Mask is a fixed size, just copy it directly into allocParams + status = rmapiParamsCopyIn("RightsRequested", + pMaskBuffer, + NV_PTR_TO_NvP64(pRmAllocParams->pRightsRequested), + sizeof(RS_ACCESS_MASK), + bCopyInParams); + + portMemCopy(&pRmAllocParams->rightsRequestedCopy, sizeof(RS_ACCESS_MASK), + pMaskBuffer, sizeof(RS_ACCESS_MASK)); + + PORT_FREE(g_resServ.pAllocator, pMaskBuffer); + + if (status != NV_OK) + goto done; + + pRmAllocParams->pRightsRequested = &pRmAllocParams->rightsRequestedCopy; + } +done: + if (status != NV_OK) + { + if (pParamCopy != NULL) + PORT_FREE(g_resServ.pAllocator, pParamCopy); + pParamCopy = NULL; + } + + if (ppParamCopy != NULL) + *ppParamCopy = pParamCopy; + + return status; +} + +NV_STATUS +serverAllocApiCopyOut +( + RsServer *pServer, + NV_STATUS status, + API_STATE *pParamCopy +) +{ + NV_STATUS cpStatus = NV_OK; + if (pParamCopy->paramsSize > 0) + { + // don't copyout if an error + if (status != NV_OK) + pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + + cpStatus = rmapiParamsRelease(pParamCopy); + if (status == NV_OK) + status = cpStatus; + } + + PORT_FREE(g_resServ.pAllocator, pParamCopy); + + return status; +} + +NV_STATUS +serverLookupSecondClient +( + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvHandle *phClient +) +{ + *phClient = 0; + + switch (pParams->externalClassId) + { + case UVM_CHANNEL_RETAINER: + { + NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS *pUvmChannelRetainerParams = pParams->pAllocParams; + + if (pUvmChannelRetainerParams->hClient != pParams->hClient) + *phClient = pUvmChannelRetainerParams->hClient; + + break; + } + default: + break; + } + + return NV_OK; +} + +NV_STATUS +serverTopLock_Prologue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status; + if ((pLockInfo->flags & RM_LOCK_FLAGS_RM_SEMA) && + !(pLockInfo->state & RM_LOCK_STATES_RM_SEMA_ACQUIRED)) + { + if ((status = osAcquireRmSema(pSys->pSema)) != NV_OK) + return status; + pLockInfo->state |= RM_LOCK_STATES_RM_SEMA_ACQUIRED; + *pReleaseFlags |= RM_LOCK_RELEASE_RM_SEMA; + } + + if (!(pLockInfo->flags & RM_LOCK_FLAGS_NO_API_LOCK)) + { + if (!(pLockInfo->state & RM_LOCK_STATES_API_LOCK_ACQUIRED)) + { + NvU32 flags = RMAPI_LOCK_FLAGS_NONE; + if (access == LOCK_ACCESS_READ) + flags |= RMAPI_LOCK_FLAGS_READ; + + if ((status = rmApiLockAcquire(flags, RM_LOCK_MODULES_CLIENT)) != NV_OK) + { + return status; + } + pLockInfo->state |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + *pReleaseFlags |= RM_LOCK_RELEASE_API_LOCK; + } + else + { + if (!rmApiLockIsOwner()) + { + NV_ASSERT(0); + return NV_ERR_INVALID_LOCK_STATE; + } + } + } + + return NV_OK; +} + +void +serverTopLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (*pReleaseFlags & RM_LOCK_RELEASE_API_LOCK) + { + rmApiLockRelease(); + pLockInfo->state &= ~RM_LOCK_STATES_API_LOCK_ACQUIRED; + *pReleaseFlags &= ~RM_LOCK_RELEASE_API_LOCK; + } + + if (*pReleaseFlags & RM_LOCK_RELEASE_RM_SEMA) + { + osReleaseRmSema(pSys->pSema, NULL); + pLockInfo->state &= ~RM_LOCK_STATES_RM_SEMA_ACQUIRED; + *pReleaseFlags &= ~RM_LOCK_RELEASE_RM_SEMA; + } +} + +NV_STATUS +serverResLock_Prologue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pParentGpu = NULL; + + if (pLockInfo->state & RM_LOCK_STATES_GPUS_LOCK_ACQUIRED) + { + if (rmGpuLockIsOwner()) + { + return NV_OK; + } + else + { + NV_ASSERT(0); + status = NV_ERR_INVALID_LOCK_STATE; + goto done; + } + } + + if (!(pLockInfo->flags & RM_LOCK_FLAGS_NO_GPUS_LOCK)) + { + if (rmGpuLockIsOwner()) + { + if (!(pLockInfo->state & RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS)) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_LOCK_STATE; + goto done; + } + } + else + { + if ((status = rmGpuLocksAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_CLIENT)) != NV_OK) + goto done; + + *pReleaseFlags |= RM_LOCK_RELEASE_GPUS_LOCK; + pLockInfo->state |= RM_LOCK_STATES_GPUS_LOCK_ACQUIRED; + } + } + + if (pLockInfo->flags & RM_LOCK_FLAGS_GPU_GROUP_LOCK) + { + RsResourceRef *pParentRef; + GpuResource *pGpuResource; + NvU32 gpuMask; + (void)gpuMask; + + pParentRef = pLockInfo->pContextRef; + if (pParentRef == NULL) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_OBJECT_PARENT; + goto done; + } + + // + // Use the pGpu from parent resource as it will work on alloc & free. + // Everything below NV0080_DEVICE uses the same pGpu group + // + // GPU teardown paths free client resources before tearing down pGpu so + // pGpu should always be valid at this point. + // + pGpuResource = dynamicCast(pParentRef->pResource, GpuResource); + + if (pGpuResource == NULL) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_OBJECT_PARENT; + goto done; + } + + pParentGpu = GPU_RES_GET_GPU(pGpuResource); + + if (pLockInfo->state & RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED) + { + if (rmGpuGroupLockIsOwner(pParentGpu->gpuInstance, GPU_LOCK_GRP_DEVICE, &gpuMask)) + { + goto done; + } + else + { + NV_ASSERT(0); + status = NV_ERR_INVALID_LOCK_STATE; + goto done; + } + } + + if (rmGpuGroupLockIsOwner(pParentGpu->gpuInstance, GPU_LOCK_GRP_DEVICE, &gpuMask)) + { + if (!(pLockInfo->state & RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS)) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_LOCK_STATE; + goto done; + } + } + else + { + status = rmGpuGroupLockAcquire(pParentGpu->gpuInstance, + GPU_LOCK_GRP_DEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_CLIENT, + &pLockInfo->gpuMask); + if (status != NV_OK) + goto done; + + *pReleaseFlags |= RM_LOCK_RELEASE_GPU_GROUP_LOCK; + pLockInfo->state |= RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED; + } + } + +done: + switch(pLockInfo->traceOp) + { + case RS_LOCK_TRACE_ALLOC: + LOCK_METER_DATA(ALLOC, pLockInfo->traceClassId, 0, 0); + break; + case RS_LOCK_TRACE_FREE: + LOCK_METER_DATA(FREE_OBJECT, pLockInfo->traceClassId, 0, 0); + break; + case RS_LOCK_TRACE_CTRL: + LOCK_METER_DATA(RMCTRL, pLockInfo->traceClassId, pLockInfo->flags, status); + break; + default: + break; + } + + return status; +} + +NV_STATUS +serverAllocEpilogue_WAR +( + RsServer *pServer, + NV_STATUS status, + NvBool bClientAlloc, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + // + // Pre-Volta Linux swapgroups is the only remaining use of channel grabbing. + // Bug 2869820 is tracking the transition of swapgroups from requiring this + // RM feature. + // + NV_STATUS tmpStatus; + if (!bClientAlloc && status == NV_ERR_INSERT_DUPLICATE_NAME) + { + NvBool gpulockRelease = NV_FALSE; + RsResourceRef *pResourceRef; + + if (!rmGpuLockIsOwner()) + { + tmpStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_CLIENT); + + if (tmpStatus != NV_OK) + return tmpStatus; + + gpulockRelease = NV_TRUE; + } + + // + // Hack for taking ownership of display channels. Clients call rmAlloc + // on a previously allocated handle to indicate they want to grab + // ownership of the underlying hardware channel. + // + // TODO - this should be moved to an RM control and called directly by + // clients instead of the overloaded allocation call. RmAlloc should + // be for allocating objects only. + // + tmpStatus = clientGetResourceRef(pRmAllocParams->pClient, pRmAllocParams->hResource, &pResourceRef); + if (tmpStatus == NV_OK) + { + DispChannel *pDispChannel = dynamicCast(pResourceRef->pResource, DispChannel); + if (pDispChannel != NULL) + { + status = dispchnGrabChannel(pDispChannel, + pRmAllocParams->hClient, + pRmAllocParams->hParent, + pRmAllocParams->hResource, + pRmAllocParams->externalClassId, + pRmAllocParams->pAllocParams); + } + } + + if (gpulockRelease) + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + return status; +} + +static NV_STATUS +_rmAlloc +( + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pUserAllocParams, + NvU32 allocFlags, + NvU32 allocInitStates, + RS_LOCK_INFO *pLockInfo, + NvP64 pRightsRequested, + API_SECURITY_INFO secInfo +) +{ + NV_STATUS status; + RS_RES_ALLOC_PARAMS_INTERNAL rmAllocParams = {0}; + + NV_ASSERT_OR_RETURN(phObject != NULL, NV_ERR_INVALID_ARGUMENT); + + // init RmAllocParams + rmAllocParams.hClient = hClient; + rmAllocParams.hParent = hParent; + rmAllocParams.hResource = *phObject; + rmAllocParams.externalClassId = hClass; + rmAllocParams.allocFlags = allocFlags; + rmAllocParams.allocState = allocInitStates; + rmAllocParams.pSecInfo = &secInfo; + rmAllocParams.pResourceRef = NULL; + rmAllocParams.pAllocParams = NvP64_VALUE(pUserAllocParams); + rmAllocParams.pLockInfo = pLockInfo; + rmAllocParams.pRightsRequested = NvP64_VALUE(pRightsRequested); + rmAllocParams.pRightsRequired = NULL; + + status = serverAllocResource(&g_resServ, &rmAllocParams); + *phObject = rmAllocParams.hResource; + + return status; + +} + +static +NV_STATUS +_fixupAllocParams +( + RS_RESOURCE_DESC **ppResDesc, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + RS_RESOURCE_DESC *pResDesc = *ppResDesc; + + if ((pResDesc->pClassInfo != NULL) && (pResDesc->pClassInfo->classId == classId(Event))) + { + NV0005_ALLOC_PARAMETERS *pNv0005Params = pRmAllocParams->pAllocParams; + + // + // This field isn't filled out consistently by clients. Some clients specify NV01_EVENT as the class + // and then override it using the subclass in the event parameters, while other clients specify the + // same subclass in both the RmAllocParams and event params. NV01_EVENT isn't a valid class to allocate + // so overwrite it with the subclass from the event params. + // + if (pRmAllocParams->externalClassId == NV01_EVENT) + pRmAllocParams->externalClassId = pNv0005Params->hClass; + + pNv0005Params->hSrcResource = pRmAllocParams->hParent; + + // No support for event and src resource that reside under different clients + if (pNv0005Params->hParentClient != pRmAllocParams->hClient) + pRmAllocParams->hParent = pRmAllocParams->hClient; + + // class id may have changed so refresh the resource descriptor, but make sure it is still an Event + pResDesc = RsResInfoByExternalClassId(pRmAllocParams->externalClassId); + if (pResDesc == NULL || pResDesc->pClassInfo == NULL || pResDesc->pClassInfo->classId != classId(Event)) + return NV_ERR_INVALID_CLASS; + + *ppResDesc = pResDesc; + } + + return NV_OK; +} + +NV_STATUS +serverAllocResourceUnderLock +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS *pRmAllocParams +) +{ + NvHandle hClient = pRmAllocParams->hClient; + NvHandle hParent; + RS_RESOURCE_DESC *pResDesc; + NV_STATUS status = NV_OK; + NV_STATUS tmpStatus; + RsClient *pClient = pRmAllocParams->pClient; + RsResourceRef *pParentRef = NULL; + RsResourceRef *pResourceRef = NULL; + NvU32 i = 0; + RS_LOCK_INFO *pLockInfo = pRmAllocParams->pLockInfo; + NvU32 releaseFlags = 0; + RS_ACCESS_MASK rightsRequired; + LOCK_ACCESS_TYPE resLockAccess = LOCK_ACCESS_WRITE; + OBJGPU *pGpu = NULL; + NvBool bClearRecursiveStateFlag = NV_FALSE; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + if (pRmAllocParams->pSecInfo == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pResDesc = RsResInfoByExternalClassId(pRmAllocParams->externalClassId); + if (pResDesc == NULL) + { + return NV_ERR_INVALID_CLASS; + } + + NV_ASSERT_OK_OR_RETURN(_fixupAllocParams(&pResDesc, pRmAllocParams)); + rmapiResourceDescToLegacyFlags(pResDesc, &pLockInfo->flags, NULL); + + pLockInfo->traceOp = RS_LOCK_TRACE_ALLOC; + pLockInfo->traceClassId = pRmAllocParams->externalClassId; + hParent = pRmAllocParams->hParent; + if (pRmAllocParams->hResource == hClient) + { + if (pResDesc->pParentList[i] != 0) + status = NV_ERR_INVALID_OBJECT_PARENT; + hParent = 0; + + // Single instance restriction is implied + NV_ASSERT(!pResDesc->bMultiInstance); + } + else + { + // Check if parent is valid + status = clientGetResourceRef(pClient, hParent, &pParentRef); + if (status != NV_OK) + { + goto done; + } + pLockInfo->pContextRef = pParentRef; + } + + if ((pResDesc->flags & RS_FLAGS_INTERNAL_ONLY) && + !(pRmAllocParams->allocState & RM_ALLOC_STATES_INTERNAL_ALLOC)) + { + status = NV_ERR_INVALID_CLASS; + goto done; + } + + status = serverAllocResourceLookupLockFlags(&g_resServ, RS_LOCK_RESOURCE, pRmAllocParams, &resLockAccess); + if (status != NV_OK) + goto done; + + // + // We can get the GPU pointer for alloc of a device child. + // Device allocs need to be handled separately. See deviceInit_IMPL() + // + tmpStatus = gpuGetByRef(pParentRef, NULL, &pGpu); + + // Override locking flags if we'll need to RPC to GSP + if (pGpu != NULL && IS_GSP_CLIENT(pGpu) && + (pResDesc->flags & RS_FLAGS_ALLOC_RPC_TO_PHYS_RM)) + { + resLockAccess = LOCK_ACCESS_WRITE; // always write as we're RPCing to GSP + + // + // If the resource desc says no need for GPU locks, we still need to lock + // the current pGpu in order to send the RPC + // + if (pLockInfo->flags & RM_LOCK_FLAGS_NO_GPUS_LOCK) + { + NV_PRINTF(LEVEL_INFO, "Overriding flags for alloc of class %04x\n", + pRmAllocParams->externalClassId); + pLockInfo->flags &= ~RM_LOCK_FLAGS_NO_GPUS_LOCK; + pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + if ((pLockInfo->state & RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS) == 0) + { + pLockInfo->state |= RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; + bClearRecursiveStateFlag = NV_TRUE; + } + } + } + + status = serverResLock_Prologue(&g_resServ, resLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + if (pParentRef != NULL) + { + + // If single instance, ensure parent doesn't yet have a class of this type + if (!pResDesc->bMultiInstance) + { + if (refFindChildOfType(pParentRef, pResDesc->pClassInfo->classId, NV_TRUE, NULL) == NV_OK) + { + status = NV_ERR_STATE_IN_USE; + } + } + + // Check if hParent is an allowed parent for this resource + if (status == NV_OK && !pResDesc->bAnyParent) + { + status = NV_ERR_INVALID_OBJECT_PARENT; + for (i = 0; pResDesc->pParentList[i]; i++) + { + if (pParentRef->internalClassId == pResDesc->pParentList[i]) + { + status = NV_OK; + break; + } + } + } + } + + if (status != NV_OK) + goto done; + + status = clientAssignResourceHandle(pClient, &pRmAllocParams->hResource); + if (status != NV_OK) + goto done; + + pRmAllocParams->hParent = (pRmAllocParams->hParent == 0) ? pRmAllocParams->hClient : pRmAllocParams->hParent; + + if (pServer->bRsAccessEnabled) + { + rsAccessMaskFromArray(&rightsRequired, pResDesc->pRightsRequiredArray, + pResDesc->rightsRequiredLength); + pRmAllocParams->pRightsRequired = &rightsRequired; + } + + status = clientAllocResource(pClient, &g_resServ, pRmAllocParams); + if (status != NV_OK) + goto done; + + pResourceRef = pRmAllocParams->pResourceRef; + + // + // Alloc RPC handling + // + if (!(pRmAllocParams->allocState & RM_ALLOC_STATES_SKIP_RPC)) + { + if (pResDesc->flags & (RS_FLAGS_ALLOC_RPC_TO_VGPU_HOST | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM)) + { + OBJGPU *pGpu = NULL; + RmResource *pRmResource = dynamicCast(pResourceRef->pResource, RmResource); + CALL_CONTEXT callContext = {0}; + CALL_CONTEXT *pOldContext = NULL; + + status = gpuGetByRef(pResourceRef, NULL, &pGpu); + if (status != NV_OK || pRmResource == NULL) + { + status = NV_ERR_INVALID_CLASS; + goto done; + } + + if (!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu)) + { + status = NV_OK; + goto done; + } + + // if physical RM RPC make sure we're a GSP client otherwise skip + if (((pResDesc->flags & (RS_FLAGS_ALLOC_RPC_TO_VGPU_HOST | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM)) == RS_FLAGS_ALLOC_RPC_TO_PHYS_RM) && + (!IS_GSP_CLIENT(pGpu))) + { + status = NV_OK; + goto done; + } + + // Set the call context to allow vgpuGetCallingContextDevice() + // and other context dependent functions to operate in the RPC code. + // + // The context is assigned in the above clientAllocResource() call, + // but we can't simply extend the context scope to this place + // as pResourceRef is allocated internally in clientAllocResource(). + // + // Instead, we create basically the same context here once again + // and use it for the RPC call. + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pLockInfo = pRmAllocParams->pLockInfo; + callContext.secInfo = *pRmAllocParams->pSecInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + NV_RM_RPC_ALLOC_OBJECT(pGpu, + pRmAllocParams->hClient, + pRmAllocParams->hParent, + pRmAllocParams->hResource, + pRmAllocParams->externalClassId, + pRmAllocParams->pAllocParams, + status); + resservRestoreTlsCallContext(pOldContext); + + if (status != NV_OK) + goto done; + + pRmResource->bRpcFree = NV_TRUE; + } + } + +done: + if ((status != NV_OK) && (pResourceRef != NULL)) + { + RS_RES_FREE_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = hClient; + params.hResource = pRmAllocParams->hResource; + params.pResourceRef = pResourceRef; + params.pSecInfo = pRmAllocParams->pSecInfo; + params.pLockInfo = pRmAllocParams->pLockInfo; + tmpStatus = clientFreeResource(pClient, &g_resServ, ¶ms); + NV_ASSERT(tmpStatus == NV_OK); + pRmAllocParams->pResourceRef = NULL; + } + + serverResLock_Epilogue(&g_resServ, resLockAccess, pLockInfo, &releaseFlags); + + if (bClearRecursiveStateFlag) + { + pLockInfo->state &= ~RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; + } + return status; +} + +NV_STATUS +serverFreeResourceRpcUnderLock +( + RsServer *pServer, + RS_RES_FREE_PARAMS *pFreeParams +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef = pFreeParams->pResourceRef; + OBJGPU *pGpu = NULL; + NvBool bBcResource; + RmResource *pRmResource = NULL; + + NV_ASSERT_OR_RETURN(pResourceRef != NULL, NV_ERR_INVALID_OBJECT_HANDLE); + + pRmResource = dynamicCast(pResourceRef->pResource, RmResource); + status = gpuGetByRef(pResourceRef, &bBcResource, &pGpu); + if ((status != NV_OK) || + (!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu)) || + (pRmResource == NULL) || + (pRmResource->bRpcFree == NV_FALSE)) + { + status = NV_OK; + goto rpc_done; + } + + gpuSetThreadBcState(pGpu, bBcResource); + NV_RM_RPC_FREE(pGpu, pResourceRef->pClient->hClient, + pResourceRef->pParentRef->hResource, + pResourceRef->hResource, status); + +rpc_done: + return status; +} + +void +serverResLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + NvU32 gpuLockFlags = GPUS_LOCK_FLAGS_NONE; + if (access == LOCK_ACCESS_READ) + gpuLockFlags |= GPU_LOCK_FLAGS_READ; + + if (*pReleaseFlags & RM_LOCK_RELEASE_GPU_GROUP_LOCK) + { + // UNLOCK: release GPU group lock + rmGpuGroupLockRelease(pLockInfo->gpuMask, GPUS_LOCK_FLAGS_NONE); + pLockInfo->state &= ~RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED; + *pReleaseFlags &= ~RM_LOCK_RELEASE_GPU_GROUP_LOCK; + } + + if (*pReleaseFlags & RM_LOCK_RELEASE_GPUS_LOCK) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + pLockInfo->state &= ~RM_LOCK_STATES_GPUS_LOCK_ACQUIRED; + *pReleaseFlags &= ~RM_LOCK_RELEASE_GPUS_LOCK; + } +} + +NV_STATUS +serverInitFreeParams_Recursive(NvHandle hClient, NvHandle hResource, RS_LOCK_INFO *pLockInfo, RS_RES_FREE_PARAMS *pParams) +{ + portMemSet(pParams, 0, sizeof(*pParams)); + pParams->hClient = hClient; + pParams->hResource = hResource; + pParams->pLockInfo = pLockInfo; + return NV_OK; +} + +NV_STATUS +serverUpdateLockFlagsForFree +( + RsServer *pServer, + RS_RES_FREE_PARAMS_INTERNAL *pRmFreeParams +) +{ + RS_LOCK_INFO *pLockInfo = pRmFreeParams->pLockInfo; + OBJGPU *pGpu = NULL; + + rmapiResourceDescToLegacyFlags(pRmFreeParams->pResourceRef->pResourceDesc, NULL, &pLockInfo->flags); + + pLockInfo->pContextRef = pRmFreeParams->pResourceRef->pParentRef; + if (gpuGetByRef(pLockInfo->pContextRef, NULL, &pGpu) == NV_OK) + { + RmResource *pRmResource = dynamicCast(pRmFreeParams->pResourceRef->pResource, RmResource); + if (pGpu != NULL && IS_GSP_CLIENT(pGpu) && pRmResource != NULL && pRmResource->bRpcFree) + { + // + // If the resource desc says no need for GPU locks, we still need to lock + // the current pGpu in order to send the RPC + // + if (pLockInfo->flags & RM_LOCK_FLAGS_NO_GPUS_LOCK) + { + NV_PRINTF(LEVEL_INFO, "Overriding flags for free of class %04x\n", + pRmFreeParams->pResourceRef->externalClassId); + pLockInfo->flags &= ~RM_LOCK_FLAGS_NO_GPUS_LOCK; + pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + pLockInfo->state |= RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; + } + } + } + + return NV_OK; +} + +NV_STATUS +rmapiFreeResourcePrologue +( + RS_RES_FREE_PARAMS_INTERNAL *pRmFreeParams +) +{ + RsResourceRef *pResourceRef = pRmFreeParams->pResourceRef; + NV_STATUS tmpStatus; + OBJGPU *pGpu = NULL; + NvBool bBcResource; + + NV_ASSERT_OR_RETURN(pResourceRef, NV_ERR_INVALID_OBJECT_HANDLE); + + rmapiControlCacheFreeObject(pRmFreeParams->hClient, pRmFreeParams->hResource); + + // + // Use gpuGetByRef instead of GpuResource because gpuGetByRef will work even + // if resource isn't a GpuResource (e.g.: Memory which can be allocated + // under a subdevice, device or client root) + // + tmpStatus = gpuGetByRef(pResourceRef, &bBcResource, &pGpu); + if (tmpStatus == NV_OK) + gpuSetThreadBcState(pGpu, bBcResource); + + // + // Need to cancel pending timer callbacks before event structs are freed. + // RS-TODO: provide notifications to objects referencing events or add + // dependency + // + TimerApi *pTimerApi = dynamicCast(pResourceRef->pResource, TimerApi); + if (pTimerApi != NULL) + { + tmrapiDeregisterEvents(pTimerApi); + } + + CliDelObjectEvents(pRmFreeParams->hClient, pRmFreeParams->hResource); + + return NV_OK; +} + +NV_STATUS +rmapiAlloc +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + void *pAllocParams +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->AllocWithSecInfo(pRmApi, hClient, hParent, phObject, hClass, NV_PTR_TO_NvP64(pAllocParams), + RMAPI_ALLOC_FLAGS_NONE, NvP64_NULL, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiAllocWithHandle +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 hClass, + void *pAllocParams +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->AllocWithSecInfo(pRmApi, hClient, hParent, &hObject, hClass, NV_PTR_TO_NvP64(pAllocParams), + RMAPI_ALLOC_FLAGS_NONE, NvP64_NULL, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiAllocWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pAllocParams, + NvU32 flags, + NvP64 pRightsRequested, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + NvU32 allocInitStates = RM_ALLOC_STATES_NONE; + RM_API_CONTEXT rmApiContext = {0}; + RS_LOCK_INFO *pLockInfo; + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + pLockInfo = portMemAllocNonPaged(sizeof(*pLockInfo)); + if (pLockInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + portMemSet(pLockInfo, 0, sizeof(*pLockInfo)); + rmapiInitLockInfo(pRmApi, hClient, pLockInfo); + + // RS-TODO: Fix calls that use RMAPI_GPU_LOCK_INTERNAL without holding the API lock + if (pRmApi->bGpuLockInternal && !rmApiLockIsOwner()) + { + NV_PRINTF(LEVEL_ERROR, "RMAPI_GPU_LOCK_INTERNAL alloc requested without holding the RMAPI lock\n"); + pLockInfo->flags |= RM_LOCK_FLAGS_NO_API_LOCK; + pLockInfo->state &= ~RM_LOCK_STATES_API_LOCK_ACQUIRED; + } + + // This flag applies to both VGPU and GSP cases + if (flags & RMAPI_ALLOC_FLAGS_SKIP_RPC) + allocInitStates |= RM_ALLOC_STATES_SKIP_RPC; + + // + // Mark internal client allocations as such, so the resource server generates + // the internal client handle with a distinct template. + // The distinct template purpose is to make sure that GSP client provided + // client handles do not collide with the client handles ganerated by the GSP/FW RM. + // + if ((pSecInfo->privLevel >= RS_PRIV_LEVEL_KERNEL) && + (pSecInfo->paramLocation == PARAM_LOCATION_KERNEL) && pRmApi->bGpuLockInternal) + allocInitStates |= RM_ALLOC_STATES_INTERNAL_CLIENT_HANDLE; + + if ((pSecInfo->paramLocation == PARAM_LOCATION_KERNEL) && + (pRmApi->bApiLockInternal || pRmApi->bGpuLockInternal)) + allocInitStates |= RM_ALLOC_STATES_INTERNAL_ALLOC; + + NV_PRINTF(LEVEL_INFO, "client:0x%x parent:0x%x object:0x%x class:0x%x\n", + hClient, hParent, *phObject, hClass); + + status = _rmAlloc(hClient, + hParent, + phObject, + hClass, + pAllocParams, + flags, + allocInitStates, + pLockInfo, + pRightsRequested, + *pSecInfo); + + + // + // If hClient is allocated behind GPU locks, client is marked as internal + // + if ((status == NV_OK) && ((hClass == NV01_ROOT) || (hClass == NV01_ROOT_NON_PRIV) || (hClass == NV01_ROOT_CLIENT)) && + pSecInfo->paramLocation == PARAM_LOCATION_KERNEL && pRmApi->bGpuLockInternal) + { + void *pHClient = *(void **)&pAllocParams; + + // flag this client as an RM internal client + rmclientSetClientFlagsByHandle(*(NvU32*)pHClient /* hClient */, RMAPI_CLIENT_FLAG_RM_INTERNAL_CLIENT); + } + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "allocation complete\n"); + } + else + { + NV_PRINTF(LEVEL_WARNING, "allocation failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_WARNING, + "client:0x%x parent:0x%x object:0x%x class:0x%x\n", hClient, + hParent, *phObject, hClass); + } + + portMemFree(pLockInfo); + +done: + rmapiEpilogue(pRmApi, &rmApiContext); + + return status; +} + +NV_STATUS +resservClientFactory +( + PORT_MEM_ALLOCATOR *pAllocator, + RS_RES_ALLOC_PARAMS *pParams, + RsClient **ppRsClient +) +{ + RmClient *pClient; + NV_STATUS status; + + status = objCreate(&pClient, NVOC_NULL_OBJECT, RmClient, pAllocator, pParams); + + if (status != NV_OK) + { + return status; + } + NV_ASSERT(pClient != NULL); + + *ppRsClient = staticCast(pClient, RsClient); + return NV_OK; +} + +NV_STATUS +resservResourceFactory +( + PORT_MEM_ALLOCATOR *pAllocator, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS *pParams, + RsResource **ppResource +) +{ + RS_RESOURCE_DESC *pResDesc; + NV_STATUS status; + Dynamic *pDynamic = NULL; + RsResource *pResource = NULL; + OBJGPU *pGpu = NULL; + + pResDesc = RsResInfoByExternalClassId(pParams->externalClassId); + if (pResDesc == NULL) + return NV_ERR_INVALID_CLASS; + + if (pCallContext->pResourceRef->pParentRef != NULL && + pCallContext->pResourceRef->pParentRef->pResource != NULL) + { + GpuResource *pParentGpuResource = dynamicCast(pCallContext->pResourceRef->pParentRef->pResource, + GpuResource); + if (pParentGpuResource != NULL) + { + pGpu = GPU_RES_GET_GPU(pParentGpuResource); + } + } + + status = objCreateDynamicWithFlags(&pDynamic, + (Object*)pGpu, + pResDesc->pClassInfo, + NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY, + pCallContext, + pParams); + if (status != NV_OK) + return status; + + pResource = dynamicCast(pDynamic, RsResource); + + if (pResource == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + *ppResource = pResource; + + return status; +} + +NV_STATUS +rmapiAllocWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pAllocParams, + NvU32 flags, + NvP64 pRightsRequested, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiAllocWithSecInfo(pRmApi, hClient, hParent, phObject, hClass, + pAllocParams, flags, pRightsRequested, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +rmapiFree +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->FreeWithSecInfo(pRmApi, hClient, hObject, RMAPI_FREE_FLAGS_NONE, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiFreeWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RS_RES_FREE_PARAMS freeParams; + RS_LOCK_INFO lockInfo; + RM_API_CONTEXT rmApiContext = {0}; + + portMemSet(&freeParams, 0, sizeof(freeParams)); + + NV_PRINTF(LEVEL_INFO, "Nv01Free: client:0x%x object:0x%x\n", hClient, + hObject); + + status = rmapiPrologue(pRmApi, &rmApiContext); + + if (status != NV_OK) + return status; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + + // RS-TODO: Fix calls that use RMAPI_GPU_LOCK_INTERNAL without holding the API lock + if (pRmApi->bGpuLockInternal && !rmApiLockIsOwner()) + { + NV_PRINTF(LEVEL_ERROR, "RMAPI_GPU_LOCK_INTERNAL free requested without holding the RMAPI lock\n"); + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK; + lockInfo.state &= ~RM_LOCK_STATES_API_LOCK_ACQUIRED; + } + + freeParams.hClient = hClient; + freeParams.hResource = hObject; + freeParams.freeState = RM_FREE_STATES_NONE; + freeParams.pLockInfo = &lockInfo; + freeParams.freeFlags = flags; + freeParams.pSecInfo = pSecInfo; + + status = serverFreeResourceTree(&g_resServ, &freeParams); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv01Free: free complete\n"); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv01Free: free failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_WARNING, "Nv01Free: client:0x%x object:0x%x\n", + hClient, hObject); + } + + return status; +} + +NV_STATUS +rmapiFreeWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiFreeWithSecInfo(pRmApi, hClient, hObject, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +rmapiFreeClientList +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->FreeClientListWithSecInfo(pRmApi, phClientList, numClients, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiFreeClientListWithSecInfo +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 lockState = 0; + + NV_PRINTF(LEVEL_INFO, "Nv01FreeClientList: numClients: %d\n", numClients); + + if (!pRmApi->bRmSemaInternal && osAcquireRmSema(pSys->pSema) != NV_OK) + return NV_ERR_INVALID_LOCK_STATE; + + if (pRmApi->bApiLockInternal) + lockState |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + + if (pRmApi->bGpuLockInternal) + lockState |= RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; + + status = serverFreeClientList(&g_resServ, phClientList, numClients, lockState, pSecInfo); + + if (!pRmApi->bRmSemaInternal) + osReleaseRmSema(pSys->pSema, NULL); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv01FreeClientList: free complete\n"); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv01FreeList: free failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiFreeClientListWithSecInfoTls +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiFreeClientListWithSecInfo(pRmApi, phClientList, numClients, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NvBool +serverRwApiLockIsOwner +( + RsServer *pServer +) +{ + return rmapiLockIsOwner(); +} + +NV_STATUS +serverAllocResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + if (lock == RS_LOCK_TOP) + { + RS_RESOURCE_DESC *pResDesc; + + if (!serverSupportsReadOnlyLock(&g_resServ, RS_LOCK_TOP, RS_API_ALLOC_RESOURCE)) + { + *pAccess = LOCK_ACCESS_WRITE; + return NV_OK; + } + + pResDesc = RsResInfoByExternalClassId(pParams->externalClassId); + + if (pResDesc == NULL) + { + return NV_ERR_INVALID_CLASS; + } + + if (pResDesc->flags & RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC) + *pAccess = LOCK_ACCESS_READ; + else + *pAccess = LOCK_ACCESS_WRITE; + + return NV_OK; + } + + if (lock == RS_LOCK_RESOURCE) + { + *pAccess = LOCK_ACCESS_WRITE; + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +serverFreeResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_FREE_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_FREE_RESOURCE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/binary_api.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/binary_api.c new file mode 100644 index 0000000..661922b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/binary_api.c @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "rmapi/binary_api.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" +#include "rmapi/client.h" +#include "rmapi/resource.h" +#include "rmapi/rmapi.h" +#include "rmapi/control.h" +#include "ctrl/ctrlxxxx.h" +#include "gpu/gpu_resource.h" +#include "gpu/gpu.h" +#include "core/locks.h" + + +NV_STATUS +binapiConstruct_IMPL +( + BinaryApi *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +binapiprivConstruct_IMPL +( + BinaryApiPrivileged *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +binapiControl_IMPL +( + BinaryApi *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + OBJGPU *pGpu = GPU_RES_GET_GPU(pResource); + GPU_MASK gpuMaskRelease = 0; + + // check if CMD is NULL, return early + if (RMCTRL_IS_NULL_CMD(pParams->cmd)) + return NV_OK; + + if (pGpu == NULL) + return NV_ERR_INVALID_ARGUMENT; + + NV_ASSERT_OK_OR_RETURN(rmGpuGroupLockAcquire(pGpu->gpuInstance, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_RPC, + &gpuMaskRelease)); + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + status = pRmApi->Control(pRmApi, + pParams->hClient, + pParams->hObject, + pParams->cmd, + pParams->pParams, + pParams->paramsSize); + if (gpuMaskRelease != 0) + { + rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); + } + return status; +} + +NV_STATUS +binapiprivControl_IMPL +( + BinaryApiPrivileged *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + // check if CMD is NULL, return early + if (RMCTRL_IS_NULL_CMD(pParams->cmd)) + return NV_OK; + + // Add check if privileged client + if (pParams->secInfo.privLevel >= RS_PRIV_LEVEL_USER_ROOT) + { + return binapiControl_IMPL(staticCast(pResource, BinaryApi), pCallContext, pParams); + } + else + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client.c new file mode 100644 index 0000000..5ed997c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client.c @@ -0,0 +1,832 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" + +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" +#include "rmapi/client.h" +#include "rmapi/client_resource.h" +#include "rmapi/resource_fwd_decls.h" +#include "core/locks.h" +#include "core/system.h" +#include "gpu/device/device.h" +#include "resource_desc.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" + +UserInfoList g_userInfoList; +RmClientList g_clientListBehindGpusLock; // RS-TODO remove this WAR + +#define RS_FW_UNIQUE_HANDLE_BASE (0xc9f00000) + +NV_STATUS _registerUserInfo(PUID_TOKEN *ppUidToken, UserInfo **ppUserInfo); +NV_STATUS _unregisterUserInfo(UserInfo *pUserInfo); + +NV_STATUS +rmclientConstruct_IMPL +( + RmClient *pClient, + PORT_MEM_ALLOCATOR* pAllocator, + RS_RES_ALLOC_PARAMS_INTERNAL* pParams +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + OBJSYS *pSys = SYS_GET_INSTANCE(); + RsClient *pRsClient = staticCast(pClient, RsClient); + NvBool bReleaseLock = NV_FALSE; + API_SECURITY_INFO *pSecInfo = pParams->pSecInfo; + + pClient->bIsRootNonPriv = (pParams->externalClassId == NV01_ROOT_NON_PRIV); + pClient->pUserInfo = NULL; + pClient->pSecurityToken = NULL; + pClient->pOSInfo = pSecInfo->clientOSInfo; + + // TODO: Revisit in M2, see GPUSWSEC-1176 + pClient->ProcID = osGetCurrentProcess(); + + pClient->cachedPrivilege = pSecInfo->privLevel; + + // Set user-friendly client name from current process + osGetCurrentProcessName(pClient->name, NV_PROC_NAME_MAX_LENGTH); + + for (i = 0; i < NV0000_NOTIFIERS_MAXCOUNT; i++) + { + pClient->CliSysEventInfo.notifyActions[i] = + NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + + // Prevent kernel clients from requesting handles in the FW handle generator range + status = clientSetRestrictedRange(pRsClient, + RS_FW_UNIQUE_HANDLE_BASE, RS_UNIQUE_HANDLE_RANGE); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Failed to set host client restricted resource handle range. Status=%x\n", status); + return status; + } + + if (!rmGpuLockIsOwner()) + { + // LOCK: acquire GPUs lock + if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_CLIENT)) != NV_OK) + { + NV_ASSERT(0); + return status; + } + bReleaseLock = NV_TRUE; + } + + pClient->bIsClientVirtualMode = (pSecInfo->pProcessToken != NULL); + + // + // Cache the security/uid tokens only if the client handle validation is + // enabled AND its a user mode path or a non privileged kernel class. + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE) && + ((pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL) || pClient->bIsRootNonPriv)) + { + PSECURITY_TOKEN pSecurityToken = (pClient->bIsClientVirtualMode ? + pSecInfo->pProcessToken : osGetSecurityToken()); + + PUID_TOKEN pUidToken = osGetCurrentUidToken(); + UserInfo *pUserInfo = NULL; + + // pUserInfo takes ownership of pUidToken upon successful registration + status = _registerUserInfo(&pUidToken, &pUserInfo); + + if (status == NV_OK) + { + pClient->pUserInfo = pUserInfo; + pClient->pSecurityToken = pSecurityToken; + } + else + { + portMemFree(pUidToken); + + if (pSecurityToken != NULL && !pClient->bIsClientVirtualMode) + portMemFree(pSecurityToken); + } + } + + if (listAppendValue(&g_clientListBehindGpusLock, (void*)&pClient) == NULL) + status = NV_ERR_INSUFFICIENT_RESOURCES; + + if (bReleaseLock) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + // RM gets the client handle from the allocation parameters + if (status == NV_OK && pParams->pAllocParams != NULL) + *(NvHandle*)(pParams->pAllocParams) = pParams->hClient; + + return status; +} + +void +rmclientDestruct_IMPL +( + RmClient *pClient +) +{ + NV_STATUS status = NV_OK; + RsClient *pRsClient = staticCast(pClient, RsClient); + NV_STATUS tmpStatus; + NvHandle hClient = pRsClient->hClient; + NvBool bReleaseLock = NV_FALSE; + RS_ITERATOR it; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + NV_PRINTF(LEVEL_INFO, " type: client\n"); + + LOCK_METER_DATA(FREE_CLIENT, hClient, 0, 0); + + rmapiControlCacheFreeClient(hClient); + + // + // Free all of the devices of the client (do it in reverse order to + // facilitate tear down of things like ctxdmas, etc) + // + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(it.pClient, &it)) + { + Device *pDeviceInfo = dynamicCast(it.pResourceRef->pResource, Device); + + // This path is deprecated. + NV_ASSERT(0); + + tmpStatus = pRmApi->Free(pRmApi, hClient, RES_GET_HANDLE(pDeviceInfo)); + if ((tmpStatus != NV_OK) && (status == NV_OK)) + status = tmpStatus; + + // re-snap iterator as Device list was mutated + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + } + + // Updating the client list just before client handle unregister // + // in case child free functions need to iterate over all clients // + if (!rmGpuLockIsOwner()) + { + // LOCK: acquire GPUs lock + if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_CLIENT)) != NV_OK) + { + // This is the only chance that the shadow client list can be + // updated so modify it regardless of whether or not we obtained the lock + NV_ASSERT(0); + } + else + { + bReleaseLock = NV_TRUE; + } + } + + listRemoveFirstByValue(&g_clientListBehindGpusLock, (void*)&pClient); + + if (pClient->pUserInfo != NULL) + { + _unregisterUserInfo(pClient->pUserInfo); + pClient->pUserInfo = NULL; + } + + if (pClient->pSecurityToken != NULL) + { + if (!pClient->bIsClientVirtualMode) + portMemFree(pClient->pSecurityToken); + + pClient->pSecurityToken = NULL; + } + + if (bReleaseLock) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } +} + +NV_STATUS +rmclientInterMap_IMPL +( + RmClient *pClient, + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RS_INTER_MAP_PARAMS *pParams +) +{ + RS_INTER_MAP_PRIVATE *pPrivate = pParams->pPrivate; + RS_RES_MAP_TO_PARAMS mapToParams; + + // Use virtual MapTo to perform the class-specific mapping to pMapperRef + portMemSet(&mapToParams, 0, sizeof(mapToParams)); + + mapToParams.pMemoryRef = pMappableRef; + mapToParams.offset = pParams->offset; + mapToParams.length = pParams->length; + mapToParams.flags = pParams->flags; + mapToParams.pDmaOffset = &pParams->dmaOffset; + mapToParams.ppMemDesc = (MEMORY_DESCRIPTOR**)&pParams->pMemDesc; + + mapToParams.pGpu = pPrivate->pGpu; + mapToParams.pSrcGpu = pPrivate->pSrcGpu; + mapToParams.pSrcMemDesc = pPrivate->pSrcMemDesc; + mapToParams.hBroadcastDevice = pPrivate->hBroadcastDevice; + mapToParams.hMemoryDevice = pPrivate->hMemoryDevice; + mapToParams.gpuMask = pPrivate->gpuMask; + mapToParams.bSubdeviceHandleProvided = pPrivate->bSubdeviceHandleProvided; + mapToParams.bDmaMapNeeded = pPrivate->bDmaMapNeeded; + mapToParams.bFlaMapping = pPrivate->bFlaMapping; + + return resMapTo(pMapperRef->pResource, &mapToParams); +} + +void +rmclientInterUnmap_IMPL +( + RmClient *pClient, + RsResourceRef *pMapperRef, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + RS_INTER_UNMAP_PRIVATE *pPrivate = pParams->pPrivate; + RS_RES_UNMAP_FROM_PARAMS unmapFromParams; + + // Use virtual UnmapFrom to perform the class-specific unmapping from pMapperRef + portMemSet(&unmapFromParams, 0, sizeof(unmapFromParams)); + + unmapFromParams.pMemDesc = pParams->pMemDesc; + unmapFromParams.hMemory = pParams->hMappable; + unmapFromParams.flags = pParams->flags; + unmapFromParams.dmaOffset = pParams->dmaOffset; + + unmapFromParams.pGpu = pPrivate->pGpu; + unmapFromParams.hBroadcastDevice = pPrivate->hBroadcastDevice; + unmapFromParams.gpuMask = pPrivate->gpuMask; + unmapFromParams.bSubdeviceHandleProvided = pPrivate->bSubdeviceHandleProvided; + + resUnmapFrom(pMapperRef->pResource, &unmapFromParams); +} + +RS_PRIV_LEVEL +rmclientGetCachedPrivilege_IMPL +( + RmClient *pClient +) +{ + return pClient->cachedPrivilege; +} + +NvBool +rmclientIsAdmin_IMPL +( + RmClient *pClient, + RS_PRIV_LEVEL privLevel +) +{ + if (pClient == NULL) + return NV_FALSE; + + return (privLevel >= RS_PRIV_LEVEL_USER_ROOT) && !pClient->bIsRootNonPriv; +} + +void +rmclientSetClientFlags_IMPL +( + RmClient *pClient, + NvU32 clientFlags +) +{ + pClient->Flags |= clientFlags; +} + +static void +_rmclientPromoteDebuggerState +( + RmClient *pClient, + NvU32 newMinimumState +) +{ + if (pClient->ClientDebuggerState < newMinimumState) + { + pClient->ClientDebuggerState = newMinimumState; + } +} + +void * +rmclientGetSecurityToken_IMPL +( + RmClient *pClient +) +{ + return pClient->pSecurityToken; +} + +/*! + * @brief Given a client handle, validate the handle for security. + * + * Important!! This function should be called ONLY in the user mode paths. + * The security validations will fail in kernel paths, especially if called + * with privileged kernel handles. + * + * @param[in] hClient The client handle + * @param[in] pSecInfo The new calling context's security info. + * + * @return NV_OK if validated + * NV_ERR_INVALID_CLIENT if client cannot be found + * or if there isn't a match. + */ +static NV_STATUS +_rmclientUserClientSecurityCheck +( + RmClient *pClient, + const API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + PSECURITY_TOKEN pCurrentToken = NULL; + PSECURITY_TOKEN pSecurityToken = pSecInfo->pProcessToken; + + if ((pSys == NULL) || + (!pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE))) + { + return NV_OK; + } + + // + // Check 1: + // The following check to make sure that user paths cannot be called with + // privileged kernel handles + // + // Note: For the user paths, we are checking against both kernel and admin. + // client The reason is that KMD today creates unprivileged kernel handles + // (of class NV01_ROOT_NON_PRIV) on behalf of user clients (cuda debugger, + // profiler, OGL etc) and gives out those handles. These handles are + // kernel, but they do not have admin privileges and since clients already + // use these handles to call into RM through the user paths, we are allowing + // them through ... for now. + // + // Till we either fix the clients to wean off these kernel handles or change + // KMD to not give out the kernel handles, we need to keep the check restricted + // to handles created with NV01_ROOT using the the CliCheckAdmin interface. + // + if ((pSecInfo->privLevel >= RS_PRIV_LEVEL_KERNEL) && !pClient->bIsRootNonPriv) + { + NV_PRINTF(LEVEL_WARNING, "Incorrect client handle used in the User export\n"); + return NV_ERR_INVALID_CLIENT; + } + + // + // Check 2: + // Validate the client handle to make sure that the user who created the + // handle is the one that uses it. Otherwise a malicious user can guess the + // client handle created by another user and access information that its + // not privy to. + // + pCurrentToken = (pSecurityToken != NULL ? pSecurityToken : osGetSecurityToken()); + if (pCurrentToken == NULL) + { + NV_PRINTF(LEVEL_WARNING, + "Cannot get the security token for the current user.\n"); + NV_PRINTF(LEVEL_WARNING, + "The user client cannot be validated\n"); + status = NV_ERR_INVALID_CLIENT; + DBG_BREAKPOINT(); + goto CliUserClientSecurityCheck_exit; + } + + status = osValidateClientTokens((void*)rmclientGetSecurityToken(pClient), + (void*)pCurrentToken); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Error validating client token. Status = 0x%08x\n", status); + goto CliUserClientSecurityCheck_exit; + } + +CliUserClientSecurityCheck_exit: + if (pCurrentToken != NULL && pSecurityToken == NULL) + { + portMemFree(pCurrentToken); + pCurrentToken = NULL; + } + return status; +} + +NV_STATUS +rmclientPostProcessPendingFreeList_IMPL +( + RmClient *pClient, + RsResourceRef **ppFirstLowPriRef +) +{ + RsClient *pRsClient = staticCast(pClient, RsClient); + RsResourceRef *pTargetRef = NULL; + RsResourceRef *pStopRef = NULL; + RsResourceRef *pFirstLowPriRef = NULL; + + pStopRef = pRsClient->pFreeStack->pResourceRef; + pTargetRef = listHead(&pRsClient->pendingFreeList); + while (pTargetRef != pStopRef) + { + RsResourceRef *pNextRef = listNext(&pRsClient->pendingFreeList, pTargetRef); + + // Ensure that high priority resources (and their children/dependents) are freed first + if (pTargetRef->pResourceDesc->freePriority == RS_FREE_PRIORITY_HIGH) + { + clientUpdatePendingFreeList(pRsClient, pTargetRef, pTargetRef, NV_TRUE); + } + pTargetRef = pNextRef; + } + + // + // Find the last high-priority resource in the list. + // The next resource will be the first low priority resource. + // If there are no high-priority resources: use the head of the list + // + pTargetRef = (pStopRef != NULL) + ? pStopRef + : listTail(&pRsClient->pendingFreeList); + pFirstLowPriRef = listHead(&pRsClient->pendingFreeList); + + while (pTargetRef != NULL) + { + RsResourceRef *pPrevRef = listPrev(&pRsClient->pendingFreeList, pTargetRef); + + if (pTargetRef->pResourceDesc->freePriority == RS_FREE_PRIORITY_HIGH) + { + pFirstLowPriRef = listNext(&pRsClient->pendingFreeList, pTargetRef); + break; + } + pTargetRef = pPrevRef; + } + + if (ppFirstLowPriRef) + *ppFirstLowPriRef = pFirstLowPriRef; + + return NV_OK; +} + +static RmClient *handleToObject(NvHandle hClient) +{ + RmClient *pClient; + return (NV_OK == serverutilGetClientUnderLock(hClient, &pClient)) ? pClient : NULL; +} + +RS_PRIV_LEVEL rmclientGetCachedPrivilegeByHandle(NvHandle hClient) +{ + RmClient *pClient = handleToObject(hClient); + return pClient ? rmclientGetCachedPrivilege(pClient) : RS_PRIV_LEVEL_USER; +} + +NvBool rmclientIsAdminByHandle(NvHandle hClient, RS_PRIV_LEVEL privLevel) +{ + RmClient *pClient = handleToObject(hClient); + return pClient ? rmclientIsAdmin(pClient, privLevel) : NV_FALSE; +} + +NvBool rmclientSetClientFlagsByHandle(NvHandle hClient, NvU32 clientFlags) +{ + RmClient *pClient = handleToObject(hClient); + if (pClient) + rmclientSetClientFlags(pClient, clientFlags); + return !!pClient; +} + +void rmclientPromoteDebuggerStateByHandle(NvHandle hClient, NvU32 newMinimumState) +{ + RmClient *pClient = handleToObject(hClient); + if (pClient) + _rmclientPromoteDebuggerState(pClient, newMinimumState); +} + +void *rmclientGetSecurityTokenByHandle(NvHandle hClient) +{ + RmClient *pClient = handleToObject(hClient); + return pClient ? rmclientGetSecurityToken(pClient) : NULL; +} + +NV_STATUS rmclientUserClientSecurityCheckByHandle(NvHandle hClient, const API_SECURITY_INFO *pSecInfo) +{ + RmClient *pClient = handleToObject(hClient); + + // + // Return early if it's a null object. This is probably the allocation of + // the root client object, so the client class is going to be null. + // + // RS-TODO - This check should move to the caller. + // + if (hClient == NV01_NULL_OBJECT) + { + return NV_OK; + } + + if (pClient) + { + return _rmclientUserClientSecurityCheck(pClient, pSecInfo); + } + else + return NV_ERR_INVALID_CLIENT; +} + +/** + * Register a uid token with the client database and return a UserInfo that + * corresponds to the uid token. + * + * If the uid token has not been registered before, a new UserInfo will be registered and returned. + * If the uid token is already registered, an existing UserInfo will be ref-counted and + * returned. + * + * This function must be protected by a lock (currently the GPUs lock.) + * + * @param[inout] ppUidToken + * @param[out] ppUserInfo + */ +NV_STATUS +_registerUserInfo +( + PUID_TOKEN *ppUidToken, + UserInfo **ppUserInfo +) +{ + NV_STATUS status = NV_OK; + NvBool bFound = NV_FALSE; + UserInfo *pUserInfo = NULL; + UserInfoListIter it = listIterAll(&g_userInfoList); + PUID_TOKEN pUidToken; + + if ((!ppUidToken) || (!(*ppUidToken))) + return NV_ERR_INVALID_ARGUMENT; + + pUidToken = *ppUidToken; + + // Find matching user token + while(listIterNext(&it)) + { + pUserInfo = *it.pValue; + if (osUidTokensEqual(pUserInfo->pUidToken, pUidToken)) + { + bFound = NV_TRUE; + break; + } + } + + if (!bFound) + { + RsShared *pShared; + status = serverAllocShare(&g_resServ, classInfo(UserInfo), &pShared); + if (status != NV_OK) + return status; + + pUserInfo = dynamicCast(pShared, UserInfo); + pUserInfo->pUidToken = pUidToken; + + if (listAppendValue(&g_userInfoList, (void*)&pUserInfo) == NULL) + { + serverFreeShare(&g_resServ, pShared); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + else + { + serverRefShare(&g_resServ, staticCast(pUserInfo, RsShared)); + portMemFree(pUidToken); + *ppUidToken = NULL; + } + + *ppUserInfo = pUserInfo; + + return NV_OK; +} + +/** + * + * Unregister a client from a user info list + * + * This function must be protected by a lock (currently the GPUs lock.) + * + * @param[in] pUserInfo + */ +NV_STATUS +_unregisterUserInfo +( + UserInfo *pUserInfo +) +{ + NvS32 refCount = serverGetShareRefCount(&g_resServ, staticCast(pUserInfo, RsShared)); + if (--refCount == 0) + { + listRemoveFirstByValue(&g_userInfoList, (void*)&pUserInfo); + } + return serverFreeShare(&g_resServ, staticCast(pUserInfo, RsShared)); +} + +NV_STATUS userinfoConstruct_IMPL +( + UserInfo *pUserInfo +) +{ + return NV_OK; +} + +void +userinfoDestruct_IMPL +( + UserInfo *pUserInfo +) +{ + portMemFree(pUserInfo->pUidToken); +} + +NV_STATUS +rmclientValidate_IMPL +( + RmClient *pClient, + const API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE) && + pSecInfo != NULL) + { + if (pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT) && + pSecInfo->clientOSInfo != NULL) + { + if (pClient->pOSInfo != pSecInfo->clientOSInfo) + { + status = NV_ERR_INVALID_CLIENT; + } + } + else if (pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL) + { + status = _rmclientUserClientSecurityCheck(pClient, pSecInfo); + } + } + + return status; +} + +NV_STATUS +rmclientFreeResource_IMPL +( + RmClient *pClient, + RsServer *pServer, + RS_RES_FREE_PARAMS_INTERNAL *pRmFreeParams +) +{ + NV_STATUS status; + OBJGPU *pGpu; + NvBool bBcState; + NvBool bRestoreBcState = NV_FALSE; + + if (gpuGetByRef(pRmFreeParams->pResourceRef, NULL, &pGpu) == NV_OK) + { + bBcState = gpumgrGetBcEnabledStatus(pGpu); + bRestoreBcState = NV_TRUE; + } + + rmapiFreeResourcePrologue(pRmFreeParams); + + status = clientFreeResource_IMPL(staticCast(pClient, RsClient), pServer, pRmFreeParams); + + if (bRestoreBcState) + { + gpumgrSetBcEnabledStatus(pGpu, bBcState); + } + return status; +} + +static NvBool _rmclientIsCapable +( + NvHandle hClient, + NvU32 capability +) +{ + NvU32 internalClassId; + RsResourceRef *pResourceRef = NULL; + + switch(capability) + { + case NV_RM_CAP_SYS_SMC_CONFIG: + { + internalClassId = classId(MIGConfigSession); + break; + } + case NV_RM_CAP_EXT_FABRIC_MGMT: + { + internalClassId = classId(FmSessionApi); + break; + } + case NV_RM_CAP_SYS_SMC_MONITOR: + { + internalClassId = classId(MIGMonitorSession); + break; + } + default: + { + NV_ASSERT(0); + return NV_FALSE; + } + } + + // Check if client has allocated a given class + pResourceRef = serverutilFindChildRefByType(hClient, hClient, internalClassId, NV_TRUE); + if (pResourceRef == NULL) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +NvBool rmclientIsCapableOrAdmin_IMPL +( + RmClient *pClient, + NvU32 capability, + RS_PRIV_LEVEL privLevel +) +{ + RsClient *pRsClient = staticCast(pClient, RsClient); + NvHandle hClient = pRsClient->hClient; + + if (rmclientIsAdmin(pClient, privLevel)) + { + return NV_TRUE; + } + + return _rmclientIsCapable(hClient, capability); +} + +// +// RS-TODO: Delete this function once the RM Capabilities framework is in place. +// JIRA GR-139 +// +NvBool rmclientIsCapableOrAdminByHandle +( + NvHandle hClient, + NvU32 capability, + RS_PRIV_LEVEL privLevel +) +{ + RmClient *pClient = handleToObject(hClient); + if (pClient == NULL) + { + return NV_FALSE; + } + + return rmclientIsCapableOrAdmin(pClient, capability, privLevel); +} + +NvBool rmclientIsCapable_IMPL +( + RmClient *pClient, + NvU32 capability +) +{ + RsClient *pRsClient = staticCast(pClient, RsClient); + NvHandle hClient = pRsClient->hClient; + + return _rmclientIsCapable(hClient, capability); +} + +// +// RS-TODO: Delete this function once the RM Capabilities framework is in place. +// JIRA GR-139 +// +NvBool rmclientIsCapableByHandle +( + NvHandle hClient, + NvU32 capability +) +{ + RmClient *pClient = handleToObject(hClient); + if (pClient == NULL) + { + return NV_FALSE; + } + + return rmclientIsCapable(pClient, capability); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client_resource.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client_resource.c new file mode 100644 index 0000000..9ed5a64 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client_resource.c @@ -0,0 +1,1537 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "core/system.h" +#include "os/os.h" +#include "rmapi/client_resource.h" +#include "rmapi/param_copy.h" +#include "rmapi/rs_utils.h" +#include "gpu/gpu.h" +#include "gpu/device/device.h" +#include "gpu_mgr/gpu_mgr.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" +#include "resserv/rs_access_map.h" +#include "nvBldVer.h" +#include "nvVer.h" +#include "mem_mgr/mem.h" +#include "nvsecurityinfo.h" +#include "resource_desc.h" + +#include "mem_mgr/virt_mem_mgr.h" + +NV_STATUS +cliresConstruct_IMPL +( + RmClientResource *pRmCliRes, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL* pParams +) +{ + return NV_OK; +} + +void +cliresDestruct_IMPL +( + RmClientResource *pRmCliRes +) +{ +} + +NvBool +cliresAccessCallback_IMPL +( + RmClientResource *pRmCliRes, + RsClient *pInvokingClient, + void *pAllocParams, + RsAccessRight accessRight +) +{ + // Client resource's access callback will grant any rights here to any resource it owns + switch (accessRight) + { + case RS_ACCESS_NICE: + { + // Grant if the caller satisfies osAllowPriorityOverride + return osAllowPriorityOverride(); + } + } + + // Delegate to superclass + return resAccessCallback_IMPL(staticCast(pRmCliRes, RsResource), pInvokingClient, pAllocParams, accessRight); +} + +NvBool +cliresShareCallback_IMPL +( + RmClientResource *pRmCliRes, + RsClient *pInvokingClient, + RsResourceRef *pParentRef, + RS_SHARE_POLICY *pSharePolicy +) +{ + RmClient *pSrcClient = dynamicCast(RES_GET_CLIENT(pRmCliRes), RmClient); + RmClient *pDstClient = dynamicCast(pInvokingClient, RmClient); + NvBool bDstKernel = (pDstClient != NULL) && + (rmclientGetCachedPrivilege(pDstClient) >= RS_PRIV_LEVEL_KERNEL); + + // Client resource's share callback will also share rights it shares here with any resource it owns + // + // If a kernel client is validating share policies, that means it's most likely duping on behalf of + // a user space client. For this case, we check against the current process instead of the kernel + // client object's process. + // + switch (pSharePolicy->type) + { + case RS_SHARE_TYPE_OS_SECURITY_TOKEN: + if ((pSrcClient != NULL) && (pDstClient != NULL) && + (pSrcClient->pSecurityToken != NULL)) + { + if (bDstKernel) + { + NV_STATUS status; + PSECURITY_TOKEN *pCurrentToken; + + pCurrentToken = osGetSecurityToken(); + if (pCurrentToken == NULL) + { + NV_ASSERT_FAILED("Cannot get the security token for the current user"); + return NV_FALSE; + } + + status = osValidateClientTokens(pSrcClient->pSecurityToken, pCurrentToken); + portMemFree(pCurrentToken); + if (status == NV_OK) + { + return NV_TRUE; + } + } + else if (pDstClient->pSecurityToken != NULL) + { + if (osValidateClientTokens(pSrcClient->pSecurityToken, pDstClient->pSecurityToken) == NV_OK) + return NV_TRUE; + } + } + break; + case RS_SHARE_TYPE_PID: + if ((pSrcClient != NULL) && (pDstClient != NULL)) + { + if ((pParentRef != NULL) && bDstKernel) + { + if (pSrcClient->ProcID == osGetCurrentProcess()) + return NV_TRUE; + } + else + { + if (pSrcClient->ProcID == pDstClient->ProcID) + return NV_TRUE; + } + } + break; + case RS_SHARE_TYPE_SMC_PARTITION: + case RS_SHARE_TYPE_GPU: + // Require exceptions, since RmClientResource is not an RmResource + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) + return NV_TRUE; + break; + } + + // Delegate to superclass + return resShareCallback_IMPL(staticCast(pRmCliRes, RsResource), pInvokingClient, pParentRef, pSharePolicy); +} + +// **************************************************************************** +// Helper functions +// **************************************************************************** + + +static NV_STATUS +CliControlSystemEvent +( + NvHandle hClient, + NvU32 event, + NvU32 action +) +{ + NV_STATUS status = NV_OK; + RmClient *pClient; + PEVENTNOTIFICATION *pEventNotification = NULL; + + if (event >= NV0000_NOTIFIERS_MAXCOUNT) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (NV_OK != serverutilGetClientUnderLock(hClient, &pClient)) + return NV_ERR_INVALID_CLIENT; + + CliGetEventNotificationList(hClient, hClient, NULL, &pEventNotification); + if (pEventNotification != NULL) + { + switch (action) + { + case NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE: + case NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT: + { + if (pClient->CliSysEventInfo.notifyActions[event] != NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + status = NV_ERR_INVALID_STATE; + break; + } + + //fall through + } + case NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE: + { + pClient->CliSysEventInfo.notifyActions[event] = action; + break; + } + + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + } + else + { + status = NV_ERR_INVALID_STATE; + } + + return status; +} + + + +static NV_STATUS +CliGetSystemEventStatus +( + NvHandle hClient, + NvU32 *pEvent, + NvU32 *pStatus +) +{ + NvU32 Head, Tail; + RmClient *pClient; + + if (NV_OK != serverutilGetClientUnderLock(hClient, &pClient)) + return NV_ERR_INVALID_CLIENT; + + Head = pClient->CliSysEventInfo.systemEventsQueue.Head; + Tail = pClient->CliSysEventInfo.systemEventsQueue.Tail; + + if (Head == Tail) + { + *pEvent = NV0000_NOTIFIERS_EVENT_NONE_PENDING; + *pStatus = 0; + } + else + { + *pEvent = pClient->CliSysEventInfo.systemEventsQueue.EventQueue[Tail].event; + *pStatus = pClient->CliSysEventInfo.systemEventsQueue.EventQueue[Tail].status; + pClient->CliSysEventInfo.systemEventsQueue.Tail = (Tail + 1) % NV_SYSTEM_EVENT_QUEUE_SIZE; + } + + return NV_OK; +} + + + +// **************************************************************************** +// Other functions +// **************************************************************************** + +// +// cliresCtrlCmdSystemGetFeatures +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdSystemGetFeatures_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *pFeaturesParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 featuresMask = 0; + + NV_ASSERT_OR_RETURN(pSys != NULL, NV_ERR_INVALID_STATE); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (pSys->getProperty(pSys, PDB_PROP_SYS_IS_UEFI)) + { + featuresMask = FLD_SET_DRF(0000, _CTRL_SYSTEM_GET_FEATURES, + _UEFI, _TRUE, featuresMask); + } + + // Don't update EFI init on non Display system + if (pSys->getProperty(pSys, PDB_PROP_SYS_IS_EFI_INIT)) + { + featuresMask = FLD_SET_DRF(0000, _CTRL_SYSTEM_GET_FEATURES, + _IS_EFI_INIT, _TRUE, featuresMask); + } + + pFeaturesParams->featuresMask = featuresMask; + + return NV_OK; +} + +// +// cliresCtrlCmdSystemGetBuildVersionV2 +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdSystemGetBuildVersionV2_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *pParams +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + ct_assert(sizeof(NV_VERSION_STRING) <= sizeof(pParams->driverVersionBuffer)); + ct_assert(sizeof(NV_BUILD_BRANCH_VERSION) <= sizeof(pParams->versionBuffer)); + ct_assert(sizeof(NV_DISPLAY_DRIVER_TITLE) <= sizeof(pParams->titleBuffer)); + + portMemCopy(pParams->driverVersionBuffer, sizeof(pParams->driverVersionBuffer), + NV_VERSION_STRING, sizeof(NV_VERSION_STRING)); + portMemCopy(pParams->versionBuffer, sizeof(pParams->versionBuffer), + NV_BUILD_BRANCH_VERSION, sizeof(NV_BUILD_BRANCH_VERSION)); + portMemCopy(pParams->titleBuffer, sizeof(pParams->titleBuffer), + NV_DISPLAY_DRIVER_TITLE, sizeof(NV_DISPLAY_DRIVER_TITLE)); + + pParams->changelistNumber = NV_BUILD_CHANGELIST_NUM; + pParams->officialChangelistNumber = NV_LAST_OFFICIAL_CHANGELIST_NUM; + + return NV_OK; +} + +// +// cliresCtrlCmdSystemGetCpuInfo +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdSystemGetCpuInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *pCpuInfoParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pCpuInfoParams->type = pSys->cpuInfo.type; + pCpuInfoParams->capabilities = pSys->cpuInfo.caps; + pCpuInfoParams->clock = pSys->cpuInfo.clock; + pCpuInfoParams->L1DataCacheSize = pSys->cpuInfo.l1DataCacheSize; + pCpuInfoParams->L2DataCacheSize = pSys->cpuInfo.l2DataCacheSize; + pCpuInfoParams->dataCacheLineSize = pSys->cpuInfo.dataCacheLineSize; + pCpuInfoParams->numLogicalCpus = pSys->cpuInfo.numLogicalCpus; + pCpuInfoParams->numPhysicalCpus = pSys->cpuInfo.numPhysicalCpus; + pCpuInfoParams->coresOnDie = pSys->cpuInfo.coresOnDie; + pCpuInfoParams->family = pSys->cpuInfo.family; + pCpuInfoParams->model = pSys->cpuInfo.model; + pCpuInfoParams->stepping = pSys->cpuInfo.stepping; + portMemCopy(pCpuInfoParams->name, + sizeof (pCpuInfoParams->name), pSys->cpuInfo.name, + sizeof (pCpuInfoParams->name)); + + return NV_OK; +} + +// +// cliresCtrlCmdSystemSetMemorySize +// +// Set system memory size in pages. +// +// Lock Requirements: +// Assert that API and GPUs locks held on entry +// +NV_STATUS +cliresCtrlCmdSystemSetMemorySize_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + pOS->SystemMemorySize = pParams->memorySize; + + return NV_OK; +} + +static NV_STATUS +classGetSystemClasses(NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams) +{ + NvU32 i; + NvU32 numResources; + const RS_RESOURCE_DESC *resources; + NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS params; + + NV_ASSERT_OR_RETURN(pParams, NV_ERR_INVALID_ARGUMENT); + + RsResInfoGetResourceList(&resources, &numResources); + + portMemSet(¶ms, 0x0, sizeof(params)); + + for (i = 0; i < numResources; i++) + { + if ((resources[i].pParentList[0] == classId(RmClientResource)) && + (resources[i].pParentList[1] == 0x0)) + { + NV_ASSERT_OR_RETURN(params.numClasses < NV0000_CTRL_SYSTEM_MAX_CLASSLIST_SIZE, + NV_ERR_INVALID_STATE); + + params.classes[params.numClasses] = resources[i].externalClassId; + params.numClasses++; + } + } + + portMemCopy(pParams, sizeof(*pParams), ¶ms, sizeof(params)); + + return NV_OK; +} + +// +// cliresCtrlCmdSystemGetClassList +// +// Get list of supported system classes. +// +// Lock Requirements: +// Assert that API and GPUs locks held on entry +// +NV_STATUS +cliresCtrlCmdSystemGetClassList_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams +) +{ + NV_STATUS status; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + status = classGetSystemClasses(pParams); + + return status; +} + +// +// cliresCtrlCmdSystemNotifyEvent +// +// This function exists to allow the RM Client to notify us when they receive +// a system event message. We generally will store off the data, but in some +// cases, we'll trigger our own handling of that code. Prior to Vista, we +// would just poll a scratch bit for these events. But for Vista, we get them +// directly from the OS. +// +// Added Support for notifying power change event to perfhandler +// +NV_STATUS +cliresCtrlCmdSystemNotifyEvent_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + + switch(pParams->eventType) + { + case NV0000_CTRL_SYSTEM_EVENT_TYPE_LID_STATE: + case NV0000_CTRL_SYSTEM_EVENT_TYPE_DOCK_STATE: + case NV0000_CTRL_SYSTEM_EVENT_TYPE_TRUST_LID: + case NV0000_CTRL_SYSTEM_EVENT_TYPE_TRUST_DOCK: + { + status = NV_ERR_NOT_SUPPORTED; + break; + } + + case NV0000_CTRL_SYSTEM_EVENT_TYPE_POWER_SOURCE: + status = NV_ERR_NOT_SUPPORTED; + break; + + default: + status = NV_ERR_INVALID_ARGUMENT; + break; + } + + return status; +} + +NV_STATUS +cliresCtrlCmdSystemDebugCtrlRmMsg_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *pParams +) +{ +// NOTE: RmMsg is only available when NV_PRINTF_STRINGS_ALLOWED is true. +#if NV_PRINTF_STRINGS_ALLOWED + NvU32 len = 0; + + extern char RmMsg[NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE]; + + switch (pParams->cmd) + { + case NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_CMD_GET: + { + len = (NvU32)portStringLength(RmMsg); + portMemCopy(pParams->data, len, RmMsg, len); + pParams->count = len; + break; + } + case NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_CMD_SET: + { +#if !(defined(DEBUG) || defined(DEVELOP)) + RmClient *pRmClient = dynamicCast(RES_GET_CLIENT(pRmCliRes), RmClient); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pRmClient != NULL, NV_ERR_INVALID_CLIENT); + + if (!rmclientIsAdmin(pRmClient, pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_WARNING, "Non-privileged context issued privileged cmd\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } +#endif + portMemCopy(RmMsg, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE, pParams->data, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE); + break; + } + default: + return NV_ERR_INVALID_ARGUMENT; + break; + } + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS +cliresCtrlCmdSystemGetRmInstanceId_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *pRmInstanceIdParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + pRmInstanceIdParams->rm_instance_id = pSys->rmInstanceId; + + return NV_OK; +} + +// +// cliresCtrlCmdGpuGetAttachedIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetAttachedIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpumgrGetAttachedGpuIds(pGpuAttachedIds); +} + +// +// cliresCtrlCmdGpuGetIdInfo +// +// Lock Requirements: +// Assert that API lock and Gpus lock held on entry +// +NV_STATUS +cliresCtrlCmdGpuGetIdInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuIdInfoParams +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpumgrGetGpuIdInfo(pGpuIdInfoParams); +} + +NV_STATUS +cliresCtrlCmdGpuGetIdInfoV2_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuIdInfoParams +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpumgrGetGpuIdInfoV2(pGpuIdInfoParams); +} + +// +// cliresCtrlCmdGpuGetInitStatus +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetInitStatus_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatusParams +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpumgrGetGpuInitStatus(pGpuInitStatusParams); +} + +// +// cliresCtrlCmdGpuGetDeviceIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetDeviceIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *pDeviceIdsParams +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pDeviceIdsParams->deviceIds = gpumgrGetDeviceInstanceMask(); + + return NV_OK; +} + +// +// cliresCtrlCmdGpuGetPciInfo +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetPciInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *pPciInfoParams +) +{ + NV_STATUS status; + NvU64 gpuDomainBusDevice; + + NV_ASSERT(rmApiLockIsOwner()); + + status = gpumgrGetProbedGpuDomainBusDevice(pPciInfoParams->gpuId, &gpuDomainBusDevice); + if (status != NV_OK) + return status; + + pPciInfoParams->domain = gpuDecodeDomain(gpuDomainBusDevice); + pPciInfoParams->bus = gpuDecodeBus(gpuDomainBusDevice); + pPciInfoParams->slot = gpuDecodeDevice(gpuDomainBusDevice); + + return NV_OK; +} + +// +// cliresCtrlCmdGpuGetProbedIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetProbedIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpumgrGetProbedGpuIds(pGpuProbedIds); +} + +// +// cliresCtrlCmdGpuAttachIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuAttachIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *pGpuAttachIds +) +{ + NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds = NULL; + NvU32 i, j; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (pGpuAttachIds->gpuIds[0] == NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS) + { + // XXX add callback to attach logic on Windows + status = NV_OK; + goto done; + } + + pGpuProbedIds = portMemAllocNonPaged(sizeof(*pGpuProbedIds)); + if (pGpuProbedIds == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + status = gpumgrGetProbedGpuIds(pGpuProbedIds); + if (status != NV_OK) + { + goto done; + } + + for (i = 0; (i < NV0000_CTRL_GPU_MAX_PROBED_GPUS) && + (pGpuAttachIds->gpuIds[i] != NV0000_CTRL_GPU_INVALID_ID); i++) + { + for (j = 0; (j < NV0000_CTRL_GPU_MAX_PROBED_GPUS) && + (pGpuProbedIds->gpuIds[j] != NV0000_CTRL_GPU_INVALID_ID); j++) + { + if (pGpuAttachIds->gpuIds[i] == pGpuProbedIds->gpuIds[j]) + break; + } + + if ((j == NV0000_CTRL_GPU_MAX_PROBED_GPUS) || + (pGpuProbedIds->gpuIds[j] == NV0000_CTRL_GPU_INVALID_ID)) + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + // XXX add callback to attach logic on Windows +done: + portMemFree(pGpuProbedIds); + return status; +} + +// +// cliresCtrlCmdGpuDetachIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuDetachIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_DETACH_IDS_PARAMS *pGpuDetachIds +) +{ + NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds = NULL; + NvU32 i, j; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (pGpuDetachIds->gpuIds[0] == NV0000_CTRL_GPU_DETACH_ALL_ATTACHED_IDS) + { + // XXX add callback to detach logic on Windows + status = NV_OK; + goto done; + } + else + { + pGpuAttachedIds = portMemAllocNonPaged(sizeof(*pGpuAttachedIds)); + if (pGpuAttachedIds == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + status = gpumgrGetAttachedGpuIds(pGpuAttachedIds); + if (status != NV_OK) + { + goto done; + } + + for (i = 0; (i < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS) && + (pGpuDetachIds->gpuIds[i] != NV0000_CTRL_GPU_INVALID_ID); i++) + { + for (j = 0; (j < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS) && + (pGpuAttachedIds->gpuIds[j] != NV0000_CTRL_GPU_INVALID_ID); j++) + { + if (pGpuDetachIds->gpuIds[i] == pGpuAttachedIds->gpuIds[j]) + break; + } + + if ((j == NV0000_CTRL_GPU_MAX_ATTACHED_GPUS) || + (pGpuAttachedIds->gpuIds[j] == NV0000_CTRL_GPU_INVALID_ID)) + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + else + { + // XXX add callback to detach logic on Windows + break; + } + } + } + +done: + portMemFree(pGpuAttachedIds); + return status; +} + +NV_STATUS +cliresCtrlCmdGpuGetSvmSize_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS *pSvmSizeGetParams +) +{ + OBJGPU *pGpu = NULL; + + // error check incoming gpu id + pGpu = gpumgrGetGpuFromId(pSvmSizeGetParams->gpuId); + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_WARNING, "GET_SVM_SIZE: bad gpuid: 0x%x\n", + pSvmSizeGetParams->gpuId); + return NV_ERR_INVALID_ARGUMENT; + } + + // Get the SVM size in MB. + pSvmSizeGetParams->svmSize = 0; + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGsyncGetAttachedIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *pGsyncAttachedIds +) +{ + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS32(pGsyncAttachedIds->gsyncIds); i++) + { + pGsyncAttachedIds->gsyncIds[i] = NV0000_CTRL_GSYNC_INVALID_ID; + } + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGsyncGetIdInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *pGsyncIdInfoParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +cliresCtrlCmdEventSetNotification_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pEventSetNotificationParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + + return CliControlSystemEvent(hClient, pEventSetNotificationParams->event, pEventSetNotificationParams->action); +} + +NV_STATUS +cliresCtrlCmdEventGetSystemEventStatus_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS *pSystemEventStatusParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + + return CliGetSystemEventStatus(hClient, &pSystemEventStatusParams->event, &pSystemEventStatusParams->status); +} + + +NV_STATUS +cliresCtrlCmdSystemGetPrivilegedStatus_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *pParams +) +{ + RmClient *pClient = dynamicCast(RES_GET_CLIENT(pRmCliRes), RmClient); + NvU8 privStatus = 0; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN (pClient != NULL, NV_ERR_INVALID_CLIENT); + + if (pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL) + { + privStatus |= NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_KERNEL_HANDLE_FLAG; + } + + if (pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_USER_ROOT) + { + privStatus |= NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PRIV_USER_FLAG; + } + + if (rmclientIsAdmin(pClient, pCallContext->secInfo.privLevel)) + { + privStatus |= NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PRIV_HANDLE_FLAG; + } + + pParams->privStatusFlags = privStatus; + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdSystemGetFabricStatus_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 fabricStatus = NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_SKIP; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + fabricStatus = NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_UNINITIALIZED; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED)) + { + fabricStatus = NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_IN_PROGRESS; + } + + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED)) + { + fabricStatus = NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_INITIALIZED; + } + } + + pParams->fabricStatus = fabricStatus; + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGpuGetUuidInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = NULL; + + pGpu = gpumgrGetGpuFromUuid(pParams->gpuUuid, pParams->flags); + + if (NULL == pGpu) + return NV_ERR_OBJECT_NOT_FOUND; + + pParams->gpuId = pGpu->gpuId; + pParams->deviceInstance = gpuGetDeviceInstance(pGpu); + pParams->subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGpuGetUuidFromGpuId_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *pParams +) +{ + OBJGPU *pGpu = NULL; + NvU8 *pGidString = NULL; + NvU32 gidStrLen = 0; + NV_STATUS rmStatus; + + // First check for UUID cached by gpumgr + rmStatus = gpumgrGetGpuUuidInfo(pParams->gpuId, &pGidString, &gidStrLen, pParams->flags); + + if (rmStatus != NV_OK) + { + // If UUID not cached by gpumgr then try to query device + pGpu = gpumgrGetGpuFromId(pParams->gpuId); + + if (NULL == pGpu) + return NV_ERR_OBJECT_NOT_FOUND; + + // get the UUID of this GPU + rmStatus = gpuGetGidInfo(pGpu, &pGidString, &gidStrLen, pParams->flags); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "gpumgrGetGpuInfo: getting gpu GUID failed\n"); + return rmStatus; + } + } + + if (gidStrLen <= NV0000_GPU_MAX_GID_LENGTH) + { + portMemCopy(pParams->gpuUuid, gidStrLen, pGidString, gidStrLen); + pParams->uuidStrLen = gidStrLen; + } + + // cleanup the allocated gidstring + portMemFree(pGidString); + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGpuModifyGpuDrainState_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *pParams +) +{ + NV_STATUS status; + NvBool bEnable; + NvBool bRemove = NV_FALSE; + NvBool bLinkDisable = NV_FALSE; + OBJGPU *pGpu = gpumgrGetGpuFromId(pParams->gpuId); + + if (NV0000_CTRL_GPU_DRAIN_STATE_ENABLED == pParams->newState) + { + if ((pGpu != NULL) && IsSLIEnabled(pGpu)) + { + // "drain" state not supported in SLI configurations + return NV_ERR_NOT_SUPPORTED; + } + + bEnable = NV_TRUE; + bRemove = + ((pParams->flags & NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE) != 0); + bLinkDisable = + ((pParams->flags & NV0000_CTRL_GPU_DRAIN_STATE_FLAG_LINK_DISABLE) != 0); + + if (bLinkDisable && !bRemove) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + else if (NV0000_CTRL_GPU_DRAIN_STATE_DISABLED == + pParams->newState) + { + bEnable = NV_FALSE; + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Set/Clear GPU manager drain state + status = gpumgrModifyGpuDrainState(pParams->gpuId, bEnable, bRemove, bLinkDisable); + + return status; +} + +NV_STATUS +cliresCtrlCmdGpuQueryGpuDrainState_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *pParams +) +{ + NvBool bDrainState; + NvBool bRemove; + NV_STATUS status; + + status = gpumgrQueryGpuDrainState(pParams->gpuId, &bDrainState, &bRemove); + + if (status != NV_OK) + { + return status; + } + + pParams->drainState = bDrainState ? NV0000_CTRL_GPU_DRAIN_STATE_ENABLED + : NV0000_CTRL_GPU_DRAIN_STATE_DISABLED; + + pParams->flags = bRemove ? NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE : 0; + + return NV_OK; +} + +/* + * Associate sub process ID with client handle + * + * @return 'NV_OK' on success. Otherwise return NV_ERR_INVALID_CLIENT + */ +NV_STATUS +cliresCtrlCmdSetSubProcessID_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RmClient *pClient; + + if (NV_OK != serverutilGetClientUnderLock(hClient, &pClient)) + return NV_ERR_INVALID_CLIENT; + + pClient->SubProcessID = pParams->subProcessID; + portStringCopy(pClient->SubProcessName, sizeof(pClient->SubProcessName), pParams->subProcessName, sizeof(pParams->subProcessName)); + + return NV_OK; +} + +/* + * Disable USERD isolation among all the sub processes within a user process + * + * @return 'NV_OK' on success. Otherwise return NV_ERR_INVALID_CLIENT + */ +NV_STATUS +cliresCtrlCmdDisableSubProcessUserdIsolation_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RmClient *pClient; + + if (NV_OK != serverutilGetClientUnderLock(hClient, &pClient)) + return NV_ERR_INVALID_CLIENT; + + pClient->bIsSubProcessDisabled = pParams->bIsSubProcessDisabled; + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdClientGetAddrSpaceType_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + CALL_CONTEXT callContext; + RsClient *pRsClient; + RsResourceRef *pResourceRef; + Memory *pMemory = NULL; + GpuResource *pGpuResource = NULL; + NV_ADDRESS_SPACE memType; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, pParams->hObject, &pResourceRef)); + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pClient = pRsClient; + callContext.pResourceRef = pResourceRef; + + pMemory = dynamicCast(pResourceRef->pResource, Memory); + if (pMemory != NULL) + { + NV_ASSERT_OK_OR_RETURN(memGetMapAddrSpace(pMemory, &callContext, pParams->mapFlags, &memType)); + + } + else + { + pGpuResource = dynamicCast(pResourceRef->pResource, GpuResource); + if (pGpuResource != NULL) + { + NV_ASSERT_OK_OR_RETURN(gpuresGetMapAddrSpace(pGpuResource, &callContext, pParams->mapFlags, &memType)); + } + else + { + return NV_ERR_INVALID_OBJECT; + } + } + + switch (memType) + { + case ADDR_SYSMEM: + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM; + break; + case ADDR_FBMEM: + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM; + break; + case ADDR_REGMEM: + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_REGMEM; + break; + case ADDR_FABRIC_V2: + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC; + break; + case ADDR_FABRIC_MC: +#ifdef NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC_MC + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC_MC; + break; +#else + NV_ASSERT(0); + return NV_ERR_INVALID_ARGUMENT; +#endif + case ADDR_VIRTUAL: + NV_PRINTF(LEVEL_ERROR, + "VIRTUAL (0x%x) is not a valid NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE\n", + memType); + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID; + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + default: + NV_PRINTF(LEVEL_ERROR, "Cannot determine address space 0x%x\n", + memType); + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID; + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdClientGetHandleInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + NV_STATUS status; + RsResourceRef *pRsResourceRef; + + status = serverutilGetResourceRef(hClient, pParams->hObject, &pRsResourceRef); + if (status != NV_OK) + { + return status; + } + + switch (pParams->index) + { + case NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_PARENT: + pParams->data.hResult = pRsResourceRef->pParentRef ? pRsResourceRef->pParentRef->hResource : 0; + break; + case NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_CLASSID: + pParams->data.iResult = pRsResourceRef->externalClassId; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdClientGetAccessRights_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *pParams +) +{ + NV_STATUS status; + RsResourceRef *pRsResourceRef; + RsResourceRef *pClientRef = RES_GET_REF(pRmCliRes); + RsClient *pClient = pClientRef->pClient; + + status = serverutilGetResourceRef(pParams->hClient, pParams->hObject, &pRsResourceRef); + if (status != NV_OK) + { + return status; + } + + rsAccessUpdateRights(pRsResourceRef, pClient, NULL); + + rsAccessGetAvailableRights(pRsResourceRef, pClient, &pParams->maskResult); + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdClientSetInheritedSharePolicy_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *pParams +) +{ + NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.sharePolicy = pParams->sharePolicy; + params.hObject = RES_GET_REF(pRmCliRes)->hResource; + + return cliresCtrlCmdClientShareObject(pRmCliRes, ¶ms); +} + +NV_STATUS +cliresCtrlCmdClientShareObject_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *pParams +) +{ + RS_SHARE_POLICY *pSharePolicy = &pParams->sharePolicy; + RsClient *pClient = RES_GET_CLIENT(pRmCliRes); + RsResourceRef *pObjectRef; + + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldCallContext; + + NV_STATUS status; + + if (pSharePolicy->type >= RS_SHARE_TYPE_MAX) + return NV_ERR_INVALID_ARGUMENT; + + status = clientGetResourceRef(pClient, pParams->hObject, &pObjectRef); + if (status != NV_OK) + return status; + + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = &g_resServ; + callContext.pClient = pClient; + callContext.pResourceRef = pObjectRef; + callContext.secInfo = pCallContext->secInfo; + + resservSwapTlsCallContext(&pOldCallContext, &callContext); + status = clientShareResource(pClient, pObjectRef, pSharePolicy, &callContext); + resservRestoreTlsCallContext(pOldCallContext); + if (status != NV_OK) + return status; + + // + // Above clientShareResource does everything needed for normal sharing, + // but we may still need to add a backref if we're sharing with a client, + // to prevent stale access. + // + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_REVOKE) && + (pSharePolicy->type == RS_SHARE_TYPE_CLIENT)) + { + RsClient *pClientTarget; + + // Trying to share with self, nothing to do. + if (pSharePolicy->target == pClient->hClient) + return NV_OK; + + status = serverGetClientUnderLock(&g_resServ, pSharePolicy->target, &pClientTarget); + if (status != NV_OK) + return status; + + status = clientAddAccessBackRef(pClientTarget, pObjectRef); + if (status != NV_OK) + return status; + } + + return status; +} + +NV_STATUS +cliresCtrlCmdClientGetChildHandle_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + NV_STATUS status; + RsResourceRef *pParentRef; + RsResourceRef *pResourceRef; + + status = serverutilGetResourceRef(hClient, pParams->hParent, &pParentRef); + if (status != NV_OK) + { + return status; + } + + status = refFindChildOfType(pParentRef, pParams->classId, NV_TRUE, &pResourceRef); + if (status == NV_OK) + { + pParams->hObject = pResourceRef ? pResourceRef->hResource : 0; + } + return status; +} + +NV_STATUS +cliresCtrlCmdGpuGetMemOpEnable_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *pMemOpEnableParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status = NV_OK; + + pMemOpEnableParams->enableMask = 0; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_ENABLE_STREAM_MEMOPS)) + { + NV_PRINTF(LEVEL_INFO, "MemOpOverride enabled\n"); + pMemOpEnableParams->enableMask = NV0000_CTRL_GPU_FLAGS_MEMOP_ENABLE; + } + + return status; +} + +NV_STATUS +cliresCtrlCmdGpuDisableNvlinkInit_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + if (!rmclientIsCapableOrAdminByHandle(hClient, + NV_RM_CAP_EXT_FABRIC_MGMT, + pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_WARNING, "Non-privileged context issued privileged cmd\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + if (pParams->gpuId == NV0000_CTRL_GPU_INVALID_ID) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return gpumgrSetGpuInitDisabledNvlinks(pParams->gpuId, pParams->mask, pParams->bSkipHwNvlinkDisable); +} + +NV_STATUS +cliresCtrlCmdLegacyConfig_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RsClient *pClient = RES_GET_CLIENT(pRmCliRes); + RmClient *pRmClient = dynamicCast(pClient, RmClient); + NvHandle hDeviceOrSubdevice = pParams->hContext; + NvHandle hDevice; + OBJGPU *pGpu; + GpuResource *pGpuResource; + NV_STATUS rmStatus = NV_OK; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pRmClient != NULL, NV_ERR_INVALID_CLIENT); + + // + // Clients pass in device or subdevice as context for NvRmConfigXyz. + // + rmStatus = gpuresGetByDeviceOrSubdeviceHandle(pClient, + hDeviceOrSubdevice, + &pGpuResource); + if (rmStatus != NV_OK) + return rmStatus; + + hDevice = RES_GET_HANDLE(GPU_RES_GET_DEVICE(pGpuResource)); + pGpu = GPU_RES_GET_GPU(pGpuResource); + + // + // GSP client builds should have these legacy APIs disabled, + // but a monolithic build running in offload mode can still reach here, + // so log those cases and bail early to keep the same behavior. + // + NV_ASSERT_OR_RETURN(!IS_GSP_CLIENT(pGpu), NV_ERR_NOT_SUPPORTED); + + GPU_RES_SET_THREAD_BC_STATE(pGpuResource); + + pParams->dataType = pParams->opType; + + switch (pParams->opType) + { + default: + PORT_UNREFERENCED_VARIABLE(pGpu); + PORT_UNREFERENCED_VARIABLE(hDevice); + PORT_UNREFERENCED_VARIABLE(hClient); + rmStatus = NV_ERR_NOT_SUPPORTED; + break; + } + + return rmStatus; +} + +NV_STATUS +cliresCtrlCmdSystemSyncExternalFabricMgmt_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *pExtFabricMgmtParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED, + pExtFabricMgmtParams->bExternalFabricMgmt); + return NV_OK; +} + +NV_STATUS cliresCtrlCmdSystemGetClientDatabaseInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *pParams +) +{ + pParams->clientCount = g_resServ.activeClientCount; + pParams->resourceCount = g_resServ.activeResourceCount; + return NV_OK; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/control.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/control.c new file mode 100644 index 0000000..7d7fbff --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/control.c @@ -0,0 +1,839 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "rmapi/rmapi.h" +#include "rmapi/control.h" +#include "rmapi/client.h" +#include "rmapi/rs_utils.h" +#include "diagnostics/tracer.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "gpu/device/device.h" + +#include "entry_points.h" +#include "resserv/rs_access_map.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/subdevice.h" + +#include "ctrl/ctrl2080/ctrl2080tmr.h" // NV2080_CTRL_CMD_TIMER_* + +static NV_STATUS +releaseDeferRmCtrlBuffer(RmCtrlDeferredCmd* pRmCtrlDeferredCmd) +{ + portMemSet(&pRmCtrlDeferredCmd->paramBuffer, 0, RMCTRL_DEFERRED_MAX_PARAM_SIZE); + + portAtomicSetS32(&pRmCtrlDeferredCmd->pending, RMCTRL_DEFERRED_FREE); + + return NV_OK; +} + +// +// This is the rmControl internal handler for deferred calls. +// +// + +NV_STATUS +rmControl_Deferred(RmCtrlDeferredCmd* pRmCtrlDeferredCmd) +{ + RmCtrlParams rmCtrlParams; + RmClient *pClient; + NvU8 paramBuffer[RMCTRL_DEFERRED_MAX_PARAM_SIZE]; + NV_STATUS status; + RS_LOCK_INFO lockInfo = {0}; + RS_CONTROL_COOKIE rmCtrlExecuteCookie = {0}; + + // init RmCtrlParams + portMemCopy(&rmCtrlParams, sizeof(RmCtrlParams), &pRmCtrlDeferredCmd->rmCtrlDeferredParams, sizeof(RmCtrlParams)); + rmCtrlParams.hParent = NV01_NULL_OBJECT; + rmCtrlParams.pGpu = NULL; + rmCtrlParams.pLockInfo = &lockInfo; + rmCtrlParams.pCookie = &rmCtrlExecuteCookie; + + // Temporary: tell ResServ not to take any locks + lockInfo.flags = RM_LOCK_FLAGS_NO_GPUS_LOCK | + RM_LOCK_FLAGS_NO_CLIENT_LOCK; + + if (rmapiLockIsOwner()) + { + lockInfo.state = RM_LOCK_STATES_API_LOCK_ACQUIRED; + } + else + { + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK; + } + + // paramsSize not > _MAX already verified in _rmControlDeferred + if ((rmCtrlParams.pParams != NvP64_NULL) && (rmCtrlParams.paramsSize != 0)) + { + // copy param to a local buffer so that pRmCtrlDeferredCmd can be released + portMemSet(paramBuffer, 0, RMCTRL_DEFERRED_MAX_PARAM_SIZE); + portMemCopy(paramBuffer, rmCtrlParams.paramsSize, rmCtrlParams.pParams, rmCtrlParams.paramsSize); + rmCtrlParams.pParams = paramBuffer; + } + + releaseDeferRmCtrlBuffer(pRmCtrlDeferredCmd); + + // client was checked when we came in through rmControl() + // but check again to make sure it's still good + if (serverutilGetClientUnderLock(rmCtrlParams.hClient, &pClient) != NV_OK) + { + status = NV_ERR_INVALID_CLIENT; + goto exit; + } + + status = serverControl(&g_resServ, &rmCtrlParams); + +exit: + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "deferred rmctrl %x failed %x!\n", + rmCtrlParams.cmd, status); + } + + return status; +} + +static NV_STATUS +_rmControlDeferred(RmCtrlParams *pRmCtrlParams, NvP64 pUserParams, NvU32 paramsSize) +{ + // Schedule a deferred rmctrl call + OBJGPU *pGpu; + NvBool bBcResource; + NV_STATUS rmStatus; + RsClient *pClient; + + // We can't allocate memory at DIRQL, so use pre-allocated buffer to store any rmctrl param. + // The size can't be large than DEFERRED_RMCTRL_MAX_PARAM_SIZE (defined in rmctrl.h), otherwise, + // fail this call. + if (paramsSize > RMCTRL_DEFERRED_MAX_PARAM_SIZE) + { + NV_PRINTF(LEVEL_WARNING, + "rmctrl param size (%d) larger than limit (%d).\n", + paramsSize, RMCTRL_DEFERRED_MAX_PARAM_SIZE); + rmStatus = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + rmStatus = serverGetClientUnderLock(&g_resServ, pRmCtrlParams->hClient, &pClient); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = gpuGetByHandle(pClient, pRmCtrlParams->hObject, &bBcResource, &pGpu); + if (rmStatus != NV_OK) + return rmStatus; + + // Set SLI BC state for thread + gpuSetThreadBcState(pGpu, bBcResource); + + pRmCtrlParams->pGpu = pGpu; + pRmCtrlParams->pLockInfo = NULL; + + switch (pRmCtrlParams->cmd) + { + // we don't have available bit left in RmCtrlParams.cmd to + // indicate a rmctrl type as deferrable so use cmd list here + case NV2080_CTRL_CMD_TIMER_SCHEDULE: + { + if (pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 idx; + + for ( idx = 0; idx < MAX_DEFERRED_CMDS; idx++) + { + if (portAtomicCompareAndSwapS32(&pGpu->pRmCtrlDeferredCmd[idx].pending, + RMCTRL_DEFERRED_ACQUIRED, + RMCTRL_DEFERRED_FREE)) + { + portMemCopy(&pGpu->pRmCtrlDeferredCmd[idx].rmCtrlDeferredParams, + sizeof(RmCtrlParams), pRmCtrlParams, sizeof(RmCtrlParams)); + + // copyin param to kernel buffer for deferred rmctrl + if (paramsSize != 0 && pUserParams != 0) + { + portMemCopy(pGpu->pRmCtrlDeferredCmd[idx].paramBuffer, paramsSize, + NvP64_VALUE(pUserParams), paramsSize); + + if (paramsSize < RMCTRL_DEFERRED_MAX_PARAM_SIZE) + { + portMemSet(pGpu->pRmCtrlDeferredCmd[idx].paramBuffer + + paramsSize, + 0, RMCTRL_DEFERRED_MAX_PARAM_SIZE - paramsSize); + } + + pGpu->pRmCtrlDeferredCmd[idx].rmCtrlDeferredParams.pParams = + pGpu->pRmCtrlDeferredCmd[idx].paramBuffer; + } + + portAtomicSetS32(&pGpu->pRmCtrlDeferredCmd[idx].pending, + RMCTRL_DEFERRED_READY); + + // Make sure there's a release call to trigger the deferred rmctrl. + // Previous rmctrl that is holding the lock can already + // finished (release its lock) during the period before the pending + // flag is set and after this rmctrl failed to acquire lock. + + // LOCK: try to acquire GPUs lock + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_COND_ACQUIRE, + RM_LOCK_MODULES_CLIENT) == NV_OK) + { + if (osCondAcquireRmSema(pSys->pSema) == NV_OK) + { + // In case this is called from device interrupt, use pGpu to queue DPC. + osReleaseRmSema(pSys->pSema, pGpu); + } + // In case this is called from device interrupt, use pGpu to queue DPC. + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, pGpu); + } + + rmStatus = NV_OK; + goto done; + } + } + } + + rmStatus = NV_ERR_STATE_IN_USE; + break; + } + + default: + rmStatus = NV_ERR_BUSY_RETRY; + break; + } + +done: + return rmStatus; +} + +NV_STATUS +serverControlApiCopyIn +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams, + RS_CONTROL_COOKIE *pCookie +) +{ + NV_STATUS rmStatus; + API_STATE *pParamCopy; + API_STATE *pEmbeddedParamCopies; + NvP64 pUserParams; + NvU32 paramsSize; + + NV_ASSERT_OR_RETURN(pCookie != NULL, NV_ERR_INVALID_ARGUMENT); + pParamCopy = &pCookie->paramCopy; + pEmbeddedParamCopies = pCookie->embeddedParamCopies; + pUserParams = NV_PTR_TO_NvP64(pRmCtrlParams->pParams); + paramsSize = pRmCtrlParams->paramsSize; + + RMAPI_PARAM_COPY_INIT(*pParamCopy, pRmCtrlParams->pParams, pUserParams, paramsSize, 1); + + rmStatus = rmapiParamsAcquire(pParamCopy, (pRmCtrlParams->secInfo.paramLocation == PARAM_LOCATION_USER)); + if (rmStatus != NV_OK) + return rmStatus; + pCookie->bFreeParamCopy = NV_TRUE; + + rmStatus = embeddedParamCopyIn(pEmbeddedParamCopies, pRmCtrlParams); + if (rmStatus != NV_OK) + return rmStatus; + pCookie->bFreeEmbeddedCopy = NV_TRUE; + + return NV_OK; +} + +NV_STATUS +serverControlApiCopyOut +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams, + RS_CONTROL_COOKIE *pCookie, + NV_STATUS rmStatus +) +{ + NV_STATUS cpStatus; + API_STATE *pParamCopy; + API_STATE *pEmbeddedParamCopies; + NvP64 pUserParams; + NvBool bFreeEmbeddedCopy; + NvBool bFreeParamCopy; + + NV_ASSERT_OR_RETURN(pCookie != NULL, NV_ERR_INVALID_ARGUMENT); + + pParamCopy = &pCookie->paramCopy; + pEmbeddedParamCopies = pCookie->embeddedParamCopies; + pUserParams = pCookie->paramCopy.pUserParams; + bFreeParamCopy = pCookie->bFreeParamCopy; + bFreeEmbeddedCopy = pCookie->bFreeEmbeddedCopy; + + if ((rmStatus != NV_OK) && !(pCookie->ctrlFlags & RMCTRL_FLAGS_COPYOUT_ON_ERROR)) + { + pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + + if (bFreeEmbeddedCopy) + { + pEmbeddedParamCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + pEmbeddedParamCopies[1].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + pEmbeddedParamCopies[2].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + pEmbeddedParamCopies[3].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + } + } + + if (bFreeEmbeddedCopy) + { + cpStatus = embeddedParamCopyOut(pEmbeddedParamCopies, pRmCtrlParams); + if (rmStatus == NV_OK) + rmStatus = cpStatus; + pCookie->bFreeEmbeddedCopy = NV_FALSE; + } + + if (bFreeParamCopy) + { + cpStatus = rmapiParamsRelease(pParamCopy); + if (rmStatus == NV_OK) + rmStatus = cpStatus; + pRmCtrlParams->pParams = NvP64_VALUE(pUserParams); + pCookie->bFreeParamCopy = NV_FALSE; + } + + return rmStatus; +} + +static NvBool _rmapiRmControlCanBeRaisedIrql(NvU32 cmd) +{ + return NV_FALSE; +} + +static NvBool _rmapiRmControlCanBeBypassLock(NvU32 cmd) +{ + return NV_FALSE; +} + +static NV_STATUS +_rmapiRmControl(NvHandle hClient, NvHandle hObject, NvU32 cmd, NvP64 pUserParams, NvU32 paramsSize, NvU32 flags, RM_API *pRmApi, API_SECURITY_INFO *pSecInfo) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + RmClient *pClient; + RmCtrlParams rmCtrlParams; + RS_CONTROL_COOKIE rmCtrlExecuteCookie = {0}; + NvBool bIsRaisedIrqlCmd; + NvBool bIsLockBypassCmd; + NvBool bInternalRequest; + NV_STATUS rmStatus = NV_OK; + RS_LOCK_INFO lockInfo = {0}; + + RMTRACE_RMAPI(_RMCTRL_ENTRY, cmd); + + // Check first for the NULL command. + // Return NV_OK immediately for NVXXXX_CTRL_CMD_NULL (0x00000000) + // as well as the per-class NULL cmd ( _CATEGORY==0x00 and _INDEX==0x00 ) + if ((cmd == NVXXXX_CTRL_CMD_NULL) || + (FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _CATEGORY, 0x00, cmd) && + FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _INDEX, 0x00, cmd))) + { + return NV_OK; + } + + NV_PRINTF(LEVEL_INFO, + "rmControl: hClient 0x%x hObject 0x%x cmd 0x%x\n", hClient, + hObject, cmd); + + NV_PRINTF(LEVEL_INFO, "rmControl: pUserParams 0x%p paramSize 0x%x\n", + NvP64_VALUE(pUserParams), paramsSize); + + // If we're behind either API lock or GPU lock treat as internal. + bInternalRequest = pRmApi->bApiLockInternal || pRmApi->bGpuLockInternal; + + // is this a raised IRQL cmd? + bIsRaisedIrqlCmd = (flags & NVOS54_FLAGS_IRQL_RAISED); + + // is this a lock bypass cmd? + bIsLockBypassCmd = ((flags & NVOS54_FLAGS_LOCK_BYPASS) || pRmApi->bGpuLockInternal); + + // NVOS54_FLAGS_IRQL_RAISED cmds are only allowed to be called in raised irq level. + if (bIsRaisedIrqlCmd) + { + // Check that we support this control call at raised IRQL + if (!_rmapiRmControlCanBeRaisedIrql(cmd)) + { + NV_PRINTF(LEVEL_WARNING, + "rmControl: cmd 0x%x cannot be called at raised irq level\n", cmd); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (!osIsRaisedIRQL()) + { + NV_PRINTF(LEVEL_WARNING, + "rmControl: raised cmd 0x%x at normal irq level\n", cmd); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + + if (bIsLockBypassCmd) + { + flags |= NVOS54_FLAGS_LOCK_BYPASS; + + if (!bInternalRequest) + { + // Check that we support bypassing locks with this control call + if (!_rmapiRmControlCanBeBypassLock(cmd)) + { + NV_PRINTF(LEVEL_WARNING, + "rmControl: cmd 0x%x cannot bypass locks\n", cmd); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + } + + // Potential race condition if run lockless? + if (serverutilGetClientUnderLock(hClient, &pClient) != NV_OK) + { + rmStatus = NV_ERR_INVALID_CLIENT; + goto done; + } + + // only kernel clients can issue raised IRQL or lock bypass cmds + // bypass client priv check for internal calls done on behalf of lower priv + // clients + if ((bIsRaisedIrqlCmd || bIsLockBypassCmd) && !bInternalRequest) + { + if (pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL) + { + rmStatus = NV_ERR_INVALID_CLIENT; + goto done; + } + } + + // error check parameters + if (((paramsSize != 0) && (pUserParams == (NvP64) 0)) || + ((paramsSize == 0) && (pUserParams != (NvP64) 0))) + { + NV_PRINTF(LEVEL_WARNING, "bad params: ptr " NvP64_fmt " size: 0x%x\n", + pUserParams, paramsSize); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // init RmCtrlParams + portMemSet(&rmCtrlParams, 0, sizeof(rmCtrlParams)); + rmCtrlParams.hClient = hClient; + rmCtrlParams.hObject = hObject; + rmCtrlParams.cmd = cmd; + rmCtrlParams.flags = flags; + rmCtrlParams.pParams = NvP64_VALUE(pUserParams); + rmCtrlParams.paramsSize = paramsSize; + rmCtrlParams.hParent = NV01_NULL_OBJECT; + rmCtrlParams.pGpu = NULL; + rmCtrlParams.pResourceRef = NULL; + rmCtrlParams.secInfo = *pSecInfo; + rmCtrlParams.pLockInfo = &lockInfo; + rmCtrlParams.pCookie = &rmCtrlExecuteCookie; + rmCtrlParams.bInternal = bInternalRequest; + + if (pRmApi->bApiLockInternal) + { + lockInfo.state |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK; + } + + // + // Three separate rmctrl command modes: + // + // mode#1: lock bypass rmctrl request + // mode#2: raised-irql rmctrl request + // mode#3: normal rmctrl request + // + if (bIsLockBypassCmd) + { + lockInfo.state |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK | + RM_LOCK_FLAGS_NO_GPUS_LOCK | + RM_LOCK_FLAGS_NO_CLIENT_LOCK; + + // + // Lock bypass rmctrl request. + // + rmStatus = serverControl(&g_resServ, &rmCtrlParams); + } + else if (bIsRaisedIrqlCmd) + { + // + // Raised IRQL rmctrl request. + // + // Try to get lock; if we cannot get it then place on deferred queue. + // + + // LOCK: try to acquire GPUs lock + if (osCondAcquireRmSema(pSys->pSema) == NV_OK) + { + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_CLIENT) == NV_OK) + { + lockInfo.state |= RM_LOCK_STATES_GPUS_LOCK_ACQUIRED; + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK | + RM_LOCK_FLAGS_NO_GPUS_LOCK | + RM_LOCK_FLAGS_NO_CLIENT_LOCK; + rmStatus = serverControl(&g_resServ, &rmCtrlParams); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_COND_ACQUIRE, osIsISR() ? rmCtrlParams.pGpu : NULL); + } + else + { + rmStatus = _rmControlDeferred(&rmCtrlParams, pUserParams, paramsSize); + } + // we must have a pGpu here for queuing of a DPC. + NV_ASSERT(!osIsISR() || rmCtrlParams.pGpu); + osReleaseRmSema(pSys->pSema, osIsISR() ? rmCtrlParams.pGpu : NULL); + } + else + { + rmStatus = _rmControlDeferred(&rmCtrlParams, pUserParams, paramsSize); + } + } + else + { + // + // Normal rmctrl request. + // + + RM_API_CONTEXT rmApiContext = {0}; + rmStatus = rmapiPrologue(pRmApi, &rmApiContext); + if (rmStatus != NV_OK) + goto done; + + lockInfo.flags |= RM_LOCK_FLAGS_RM_SEMA; + rmStatus = serverControl(&g_resServ, &rmCtrlParams); + rmapiEpilogue(pRmApi, &rmApiContext); + } +done: + + RMTRACE_RMAPI(_RMCTRL_EXIT, cmd); + return rmStatus; +} + +// validate rmctrl flags +NV_STATUS serverControl_ValidateCookie +( + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams, + RS_CONTROL_COOKIE *pRmCtrlExecuteCookie +) +{ + NV_STATUS status; + + if (g_resServ.bRsAccessEnabled) + { + if (pRmCtrlParams->pResourceRef != NULL) + { + // + // Check that the invoking client has appropriate access rights + // For control calls, the invoking client is the owner of the ref + // + status = rsAccessCheckRights(pRmCtrlParams->pResourceRef, + pRmCtrlParams->pResourceRef->pClient, + &pRmCtrlExecuteCookie->rightsRequired); + if (status != NV_OK) + return status; + } + else + { + // pResourceRef can be NULL when rmControlCmdExecute is manually + // invoked from the deferred API path (see class5080DeferredApiV2). + // For now, we skip performing any access right checks in this case. + } + } + else + { + // + // When access rights are disabled, any control calls that have the + // *_IF_RS_ACCESS_DISABLED flags should be treated as if they were declared + // with the corresponding flags + // + if ((pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_PRIVILEGED_IF_RS_ACCESS_DISABLED) != 0) + { + pRmCtrlExecuteCookie->ctrlFlags |= RMCTRL_FLAGS_PRIVILEGED; + } + } + + if ((pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_INTERNAL)) + { + NvBool bInternalCall = pRmCtrlParams->bInternal; + if (!bInternalCall) + return NV_ERR_NOT_SUPPORTED; + } + + if (pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_PRIVILEGED) + { + // + // Calls originating from usermode require admin perms while calls + // originating from other kernel drivers are always allowed. + // + if ((pRmCtrlParams->secInfo.privLevel < RS_PRIV_LEVEL_USER_ROOT) + ) + { + NV_PRINTF(LEVEL_WARNING, + "hClient: 0x%08x, hObject 0x%08x, cmd 0x%08x: non-privileged context issued privileged cmd\n", + pRmCtrlParams->hClient, pRmCtrlParams->hObject, + pRmCtrlParams->cmd); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + + // permissions check for KERNEL_PRIVILEGED (default) unless NON_PRIVILEGED, PRIVILEGED or INTERNAL is specified + if ( !(pRmCtrlExecuteCookie->ctrlFlags & (RMCTRL_FLAGS_NON_PRIVILEGED | RMCTRL_FLAGS_PRIVILEGED | RMCTRL_FLAGS_INTERNAL))) + { + if ((pRmCtrlParams->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + ) + { + NV_PRINTF(LEVEL_WARNING, + "hClient: 0x%08x, hObject 0x%08x, cmd 0x%08x: non-kernel client issued kernel-only cmd\n", + pRmCtrlParams->hClient, pRmCtrlParams->hObject, + pRmCtrlParams->cmd); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + + // fail if GPU isn't ready + if ((!(pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_NO_GPUS_ACCESS)) && (pRmCtrlParams->pGpu != NULL)) + { + API_GPU_FULL_POWER_SANITY_CHECK(pRmCtrlParams->pGpu, NV_FALSE, + pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_ALLOW_WITHOUT_SYSMEM_ACCESS); + + if ( ! API_GPU_ATTACHED_SANITY_CHECK(pRmCtrlParams->pGpu)) + return NV_ERR_GPU_IS_LOST; + } + + if ((pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) && + (pRmCtrlParams->secInfo.paramLocation != PARAM_LOCATION_KERNEL)) + { + return NV_ERR_INVALID_PARAMETER; + } + + return NV_OK; +} + +NV_STATUS +serverControlLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RmCtrlParams *pRmCtrlParams, + RmCtrlExecuteCookie *pRmCtrlExecuteCookie, + LOCK_ACCESS_TYPE *pAccess +) +{ + if (pAccess == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *pAccess = LOCK_ACCESS_WRITE; + + if (lock == RS_LOCK_TOP) + { + if (!serverSupportsReadOnlyLock(&g_resServ, RS_LOCK_TOP, RS_API_CTRL)) + { + *pAccess = LOCK_ACCESS_WRITE; + return NV_OK; + } + + if (pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_API_LOCK_READONLY) + *pAccess = LOCK_ACCESS_READ; + + return NV_OK; + } + + if (lock == RS_LOCK_RESOURCE) + { + RS_LOCK_INFO *pLockInfo = pRmCtrlParams->pLockInfo; + + // + // Do not acquire the GPU lock if we were explicitly told + // not to or if this is an Internal Call meaning that + // we already own the GPUs Lock. + // + if ((pLockInfo->state & RM_LOCK_STATES_GPUS_LOCK_ACQUIRED) || + (pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_NO_GPUS_LOCK) || + (pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) || + (pRmCtrlParams->flags & NVOS54_FLAGS_LOCK_BYPASS)) + { + pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + pLockInfo->flags &= ~RM_LOCK_FLAGS_GPU_GROUP_LOCK; + } + else + { + if (pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_GPU_LOCK_DEVICE_ONLY) + { + pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + } + else + { + pLockInfo->flags &= ~RM_LOCK_FLAGS_NO_GPUS_LOCK; + pLockInfo->flags &= ~RM_LOCK_FLAGS_GPU_GROUP_LOCK; + } + + if (pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_GPU_LOCK_READONLY) + *pAccess = LOCK_ACCESS_READ; + } + + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +rmapiControl +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->ControlWithSecInfo(pRmApi, hClient, hObject, cmd, NV_PTR_TO_NvP64(pParams), + paramsSize, 0, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiControlWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + + NV_PRINTF(LEVEL_INFO, + "Nv04Control: hClient:0x%x hObject:0x%x cmd:0x%x params:" NvP64_fmt " paramSize:0x%x flags:0x%x\n", + hClient, hObject, cmd, pParams, paramsSize, flags); + + status = _rmapiRmControl(hClient, hObject, cmd, pParams, paramsSize, flags, pRmApi, pSecInfo); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04Control: control complete\n"); + } + else + { + NV_PRINTF(LEVEL_INFO, + "Nv04Control: control failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_INFO, + "Nv04Control: hClient:0x%x hObject:0x%x cmd:0x%x params:" NvP64_fmt " paramSize:0x%x flags:0x%x\n", + hClient, hObject, cmd, pParams, paramsSize, flags); + } + + return status; +} + + +// +// Called at DIRQL, where we can't do memory allocations +// Do not inline that function to save stack space +// +static NV_NOINLINE NV_STATUS +_rmapiControlWithSecInfoTlsIRQL +( + RM_API* pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO* pSecInfo +) +{ + NV_STATUS status; + THREAD_STATE_NODE threadState; + + NvU8 stackAllocator[TLS_ISR_ALLOCATOR_SIZE]; + PORT_MEM_ALLOCATOR* pIsrAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator)); + tlsIsrInit(pIsrAllocator); + + // + // SMP synchronization for Nv04Control is handled lower in the + // call sequence to accommodate the various operation-specific + // lock requirements (e.g. some operations can run locklessly). + // + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiControlWithSecInfo(pRmApi, hClient, hObject, cmd, pParams, paramsSize, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + tlsIsrDestroy(pIsrAllocator); + portMemAllocatorRelease(pIsrAllocator); + + return status; +} + + +NV_STATUS +rmapiControlWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + THREAD_STATE_NODE threadState; + + if (!portMemExSafeForNonPagedAlloc()) + { + return _rmapiControlWithSecInfoTlsIRQL(pRmApi, hClient, hObject, cmd, pParams, paramsSize, flags, pSecInfo); + } + + // + // SMP synchronization for Nv04Control is handled lower in the + // call sequence to accommodate the various operation-specific + // lock requirements (e.g. some operations can run locklessly). + // + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiControlWithSecInfo(pRmApi, hClient, hObject, cmd, pParams, paramsSize, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.c new file mode 100644 index 0000000..cbdfd44 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.c @@ -0,0 +1,205 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "rmapi/rmapi.h" +#include "rmapi/param_copy.h" +#include "os/os.h" +#include "deprecated_context.h" + +static NV_STATUS +_rmAllocForDeprecatedApi(DEPRECATED_CONTEXT *_pContext, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, void *pAllocParams) +{ + DEPRECATED_CONTEXT_EXT *pContext = (DEPRECATED_CONTEXT_EXT *)_pContext; + RM_API *pRmApi = pContext->pRmApi; + + return pRmApi->AllocWithSecInfo(pRmApi, hClient, hParent, phObject, + hClass, NV_PTR_TO_NvP64(pAllocParams), + RMAPI_ALLOC_FLAGS_NONE, NvP64_NULL, &pContext->secInfo); +} + +static NV_STATUS +_rmControlForDeprecatedApi(DEPRECATED_CONTEXT *_pContext, NvHandle hClient, NvHandle hObject, + NvU32 cmd, void *pParams, NvU32 paramsSize) +{ + DEPRECATED_CONTEXT_EXT *pContext = (DEPRECATED_CONTEXT_EXT *)_pContext; + RM_API *pRmApi = pContext->pRmApi; + + return pRmApi->ControlWithSecInfo(pRmApi, hClient, hObject, cmd, + NV_PTR_TO_NvP64(pParams), paramsSize, 0, + &pContext->secInfo); +} + +static NV_STATUS +_rmFreeForDeprecatedApi(DEPRECATED_CONTEXT *_pContext, NvHandle hClient, NvHandle hObject) +{ + DEPRECATED_CONTEXT_EXT *pContext = (DEPRECATED_CONTEXT_EXT *)_pContext; + RM_API *pRmApi = pContext->pRmApi; + + return pRmApi->FreeWithSecInfo(pRmApi, hClient, hObject, + RMAPI_FREE_FLAGS_NONE, &pContext->secInfo); +} + +static NV_STATUS +_rmMapMemoryForDeprecatedApi(DEPRECATED_CONTEXT *_pContext, NvHandle hClient, NvHandle hDevice, + NvHandle hMemory, NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 flags) +{ + DEPRECATED_CONTEXT_EXT *pContext = (DEPRECATED_CONTEXT_EXT *)_pContext; + RM_API *pRmApi = pContext->pRmApi; + + return pRmApi->MapToCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, flags, &pContext->secInfo); +} + +NV_STATUS +RmCopyUserForDeprecatedApi +( + RMAPI_DEPRECATED_COPY_OP op, + RMAPI_DEPRECATED_BUFFER_POLICY bufPolicy, + NvP64 dataPtr, + NvU32 dataSize, + void **ppKernelPtr, + NvBool bUserModeArgs +) +{ + NV_STATUS status = NV_OK; + + switch (op) + { + case RMAPI_DEPRECATED_COPYIN: + if (bufPolicy == RMAPI_DEPRECATED_BUFFER_ALLOCATE) + { + *ppKernelPtr = portMemAllocNonPaged(dataSize); + + if (*ppKernelPtr == NULL) + return NV_ERR_NO_MEMORY; + } + + status = rmapiParamsCopyIn(NULL, + *ppKernelPtr, + dataPtr, + dataSize, + bUserModeArgs); + + if (bufPolicy == RMAPI_DEPRECATED_BUFFER_ALLOCATE) + { + if (status != NV_OK) + { + portMemFree(*ppKernelPtr); + *ppKernelPtr = NULL; + } + } + break; + case RMAPI_DEPRECATED_COPYOUT: + status = rmapiParamsCopyOut(NULL, + *ppKernelPtr, + dataPtr, + dataSize, + bUserModeArgs); + + // intentionally fall through to release memory... + case RMAPI_DEPRECATED_COPYRELEASE: + if (bufPolicy == RMAPI_DEPRECATED_BUFFER_ALLOCATE) + { + portMemFree(*ppKernelPtr); + *ppKernelPtr = NULL; + } + break; + } + + return status; +} + +static NV_STATUS +_rmCopyUserForDeprecatedApi +( + DEPRECATED_CONTEXT *_pContext, + RMAPI_DEPRECATED_COPY_OP op, + RMAPI_DEPRECATED_BUFFER_POLICY bufPolicy, + NvP64 dataPtr, + NvU32 dataSize, + void **ppKernelPtr +) +{ + return RmCopyUserForDeprecatedApi(op, bufPolicy, dataPtr, dataSize, + ppKernelPtr, + ((DEPRECATED_CONTEXT_EXT *)_pContext)->bUserModeArgs); +} + +static void * +_rmAllocMemForDeprecatedApi(NvU32 length) +{ + return portMemAllocNonPaged(length); +} + +static void +_rmFreeMemForDeprecatedApi(void *pAddress) +{ + portMemFree(pAddress); +} + +/** + * Setting bUserModeArgs to NV_FALSE can lead to Security issues where + * Privileged RM CTRL APIs are accessible by non-admin users. + * Please find more details in Bug: 3136168. + */ +void rmapiInitDeprecatedContext +( + DEPRECATED_CONTEXT_EXT *pContext, + API_SECURITY_INFO *pSecInfo, + NvBool bUserModeArgs, + NvBool bInternal +) +{ + if (pSecInfo == NULL) + { + portMemSet(&pContext->secInfo, 0, sizeof(pContext->secInfo)); + + if (bUserModeArgs) + { + pContext->secInfo.privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER; + } + else + { + pContext->secInfo.privLevel = RS_PRIV_LEVEL_KERNEL; + } + } + else + { + pContext->secInfo = *pSecInfo; + } + + pContext->secInfo.paramLocation = PARAM_LOCATION_KERNEL; + + pContext->bInternal = bInternal; + pContext->pRmApi = rmapiGetInterface(bInternal ? RMAPI_GPU_LOCK_INTERNAL : RMAPI_EXTERNAL); + pContext->bUserModeArgs = bUserModeArgs; + + pContext->parent.RmAlloc = _rmAllocForDeprecatedApi; + pContext->parent.RmControl = _rmControlForDeprecatedApi; + pContext->parent.RmFree = _rmFreeForDeprecatedApi; + pContext->parent.RmMapMemory = _rmMapMemoryForDeprecatedApi; + pContext->parent.CopyUser = _rmCopyUserForDeprecatedApi; + pContext->parent.AllocMem = _rmAllocMemForDeprecatedApi; + pContext->parent.FreeMem = _rmFreeMemForDeprecatedApi; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.h new file mode 100644 index 0000000..1459c32 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _DEPRECATED_CONTEXT_ +#define _DEPRECATED_CONTEXT_ + +#include "deprecated/rmapi_deprecated.h" + +typedef struct +{ + DEPRECATED_CONTEXT parent; + API_SECURITY_INFO secInfo; + NvBool bInternal; + NvBool bUserModeArgs; + RM_API *pRmApi; +} DEPRECATED_CONTEXT_EXT; + +void rmapiInitDeprecatedContext (DEPRECATED_CONTEXT_EXT *pContext, + API_SECURITY_INFO *pSecInfo, + NvBool bUserModeArgs, + NvBool bInternal); + +#endif // _DEPRECATED_CONTEXT_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.c new file mode 100644 index 0000000..5a68244 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.c @@ -0,0 +1,581 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rmapi.h" +#include "entry_points.h" +#include "deprecated_context.h" +#include "os/os.h" + +#define RMAPI_DEPRECATED(pFunc, pArgs, bUserModeArgs) \ + NV_PRINTF(LEVEL_WARNING, "Calling deprecated function at %d\n", __LINE__); \ + pArgs->status = NV_ERR_NOT_SUPPORTED; + +#define RMAPI_NOT_SUPPORTED(pArgs) \ + pArgs->status = NV_ERR_NOT_SUPPORTED; + +// Primary APIs +static void _nv04Alloc(NVOS21_PARAMETERS*, NvBool); +static void _nv01Free(NVOS00_PARAMETERS*, NvBool); +static void _nv04Control(NVOS54_PARAMETERS*, NvBool, NvBool); +static void _nv04DupObject(NVOS55_PARAMETERS*, NvBool); +static void _nv04Share(NVOS57_PARAMETERS*, NvBool); +static void _nv04MapMemory(NVOS33_PARAMETERS*, NvBool, NvBool); +static void _nv04UnmapMemory(NVOS34_PARAMETERS*, NvBool, NvBool); +static void _nv04MapMemoryDma(NVOS46_PARAMETERS*, NvBool); +static void _nv04UnmapMemoryDma(NVOS47_PARAMETERS*, NvBool); + +// Legacy APIs +static void _nv01AllocMemory(NVOS02_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedAllocMemory, pArgs, bUserModeArgs); } +static void _nv01AllocObject(NVOS05_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedAllocObject, pArgs, bUserModeArgs); } +static void _nv04AddVblankCallback(NVOS61_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedAddVblankCallback, pArgs, bUserModeArgs); } +static void _nv04AllocContextDma(NVOS39_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedAllocContextDma, pArgs, bUserModeArgs); } +static void _nv04BindContextDma(NVOS49_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedBindContextDma, pArgs, bUserModeArgs); } +static void _nv04I2CAccess(NVOS_I2C_ACCESS_PARAMS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedI2CAccess, pArgs, bUserModeArgs); } +static void _nv04IdleChannels(NVOS30_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedIdleChannels, pArgs, bUserModeArgs); } +static void _nv04VidHeapControl(NVOS32_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedVidHeapControl, pArgs, bUserModeArgs); } + +static void _nv04AllocWithSecInfo(NVOS21_PARAMETERS*, API_SECURITY_INFO); +static void _nv04AllocWithAccessSecInfo(NVOS64_PARAMETERS*, API_SECURITY_INFO); +static void _nv04ControlWithSecInfo(NVOS54_PARAMETERS*, API_SECURITY_INFO, NvBool bInternalCall); +static void _nv01FreeWithSecInfo(NVOS00_PARAMETERS*, API_SECURITY_INFO); +static void _nv04AllocWithAccess(NVOS64_PARAMETERS*, NvBool); +static void _nv04MapMemoryWithSecInfo(NVOS33_PARAMETERS*, API_SECURITY_INFO); +static void _nv04UnmapMemoryWithSecInfo(NVOS34_PARAMETERS*, API_SECURITY_INFO); +static void _nv04MapMemoryDmaWithSecInfo(NVOS46_PARAMETERS*, API_SECURITY_INFO); +static void _nv04UnmapMemoryDmaWithSecInfo(NVOS47_PARAMETERS*, API_SECURITY_INFO); +static void _nv04DupObjectWithSecInfo(NVOS55_PARAMETERS*, API_SECURITY_INFO); +static void _nv04ShareWithSecInfo(NVOS57_PARAMETERS*, API_SECURITY_INFO); + + +// +// RM API entry points +// +// User mode clients should call base version (no suffix). +// +// Kernel mode clients should call Kernel or User version +// (call User if the parameters come from a user mode source). +// + +void Nv01AllocMemory(NVOS02_PARAMETERS *pArgs) { _nv01AllocMemory(pArgs, NV_TRUE); } +void Nv01AllocObject(NVOS05_PARAMETERS *pArgs) { _nv01AllocObject(pArgs, NV_TRUE); } +void Nv01Free(NVOS00_PARAMETERS *pArgs) { _nv01Free(pArgs, NV_TRUE); } +void Nv04AddVblankCallback(NVOS61_PARAMETERS *pArgs) { _nv04AddVblankCallback(pArgs, NV_TRUE); } +void Nv04Alloc(NVOS21_PARAMETERS *pArgs) { _nv04Alloc(pArgs, NV_TRUE); } +void Nv04AllocWithAccess(NVOS64_PARAMETERS *pArgs) { _nv04AllocWithAccess(pArgs, NV_TRUE); } +void Nv04AllocContextDma(NVOS39_PARAMETERS *pArgs) { _nv04AllocContextDma(pArgs, NV_TRUE); } +void Nv04BindContextDma(NVOS49_PARAMETERS *pArgs) { _nv04BindContextDma(pArgs, NV_TRUE); } +void Nv04Control(NVOS54_PARAMETERS *pArgs) { _nv04Control(pArgs, NV_TRUE, NV_FALSE); } +void Nv04DupObject(NVOS55_PARAMETERS *pArgs) { _nv04DupObject(pArgs, NV_TRUE); } +void Nv04Share(NVOS57_PARAMETERS *pArgs) { _nv04Share(pArgs, NV_TRUE); } +void Nv04I2CAccess(NVOS_I2C_ACCESS_PARAMS *pArgs) { _nv04I2CAccess(pArgs, NV_TRUE); } +void Nv04IdleChannels(NVOS30_PARAMETERS *pArgs) { _nv04IdleChannels(pArgs, NV_TRUE); } +void Nv04MapMemory(NVOS33_PARAMETERS *pArgs) { _nv04MapMemory(pArgs, NV_TRUE, NV_FALSE); } +void Nv04MapMemoryDma(NVOS46_PARAMETERS *pArgs) { _nv04MapMemoryDma(pArgs, NV_TRUE); } +void Nv04UnmapMemory(NVOS34_PARAMETERS *pArgs) { _nv04UnmapMemory(pArgs, NV_TRUE, NV_FALSE); } +void Nv04UnmapMemoryDma(NVOS47_PARAMETERS *pArgs) { _nv04UnmapMemoryDma(pArgs, NV_TRUE); } +void Nv04VidHeapControl(NVOS32_PARAMETERS *pArgs) { _nv04VidHeapControl(pArgs, NV_TRUE); } + +void Nv01AllocMemoryUser(NVOS02_PARAMETERS *pArgs) { _nv01AllocMemory(pArgs, NV_TRUE); } +void Nv01AllocObjectUser(NVOS05_PARAMETERS *pArgs) { _nv01AllocObject(pArgs, NV_TRUE); } +void Nv01FreeUser(NVOS00_PARAMETERS *pArgs) { _nv01Free(pArgs, NV_TRUE); } +void Nv04AddVblankCallbackUser(NVOS61_PARAMETERS *pArgs) { _nv04AddVblankCallback(pArgs, NV_TRUE); } +void Nv04AllocUser(NVOS21_PARAMETERS *pArgs) { _nv04Alloc(pArgs, NV_TRUE); } +void Nv04AllocWithAccessUser(NVOS64_PARAMETERS *pArgs) { _nv04AllocWithAccess(pArgs, NV_TRUE); } +void Nv04AllocContextDmaUser(NVOS39_PARAMETERS *pArgs) { _nv04AllocContextDma(pArgs, NV_TRUE); } +void Nv04BindContextDmaUser(NVOS49_PARAMETERS *pArgs) { _nv04BindContextDma(pArgs, NV_TRUE); } +void Nv04ControlUser(NVOS54_PARAMETERS *pArgs) { _nv04Control(pArgs, NV_TRUE, NV_FALSE); } +void Nv04DupObjectUser(NVOS55_PARAMETERS *pArgs) { _nv04DupObject(pArgs, NV_TRUE); } +void Nv04ShareUser(NVOS57_PARAMETERS *pArgs) { _nv04Share(pArgs, NV_TRUE); } +void Nv04I2CAccessUser(NVOS_I2C_ACCESS_PARAMS *pArgs) { _nv04I2CAccess(pArgs, NV_TRUE); } +void Nv04IdleChannelsUser(NVOS30_PARAMETERS *pArgs) { _nv04IdleChannels(pArgs, NV_TRUE); } +void Nv04MapMemoryUser(NVOS33_PARAMETERS *pArgs) { _nv04MapMemory(pArgs, NV_TRUE, NV_FALSE); } +void Nv04MapMemoryDmaUser(NVOS46_PARAMETERS *pArgs) { _nv04MapMemoryDma(pArgs, NV_TRUE); } +void Nv04UnmapMemoryUser(NVOS34_PARAMETERS *pArgs) { _nv04UnmapMemory(pArgs, NV_TRUE, NV_FALSE); } +void Nv04UnmapMemoryDmaUser(NVOS47_PARAMETERS *pArgs) { _nv04UnmapMemoryDma(pArgs, NV_TRUE); } +void Nv04VidHeapControlUser(NVOS32_PARAMETERS *pArgs) { _nv04VidHeapControl(pArgs, NV_TRUE); } + +void Nv01AllocMemoryKernel(NVOS02_PARAMETERS *pArgs) { _nv01AllocMemory(pArgs, NV_FALSE); } +void Nv01AllocObjectKernel(NVOS05_PARAMETERS *pArgs) { _nv01AllocObject(pArgs, NV_FALSE); } +void Nv01FreeKernel(NVOS00_PARAMETERS *pArgs) { _nv01Free(pArgs, NV_FALSE); } +void Nv04AddVblankCallbackKernel(NVOS61_PARAMETERS *pArgs) { _nv04AddVblankCallback(pArgs, NV_FALSE); } +void Nv04AllocKernel(NVOS21_PARAMETERS *pArgs) { _nv04Alloc(pArgs, NV_FALSE); } +void Nv04AllocWithAccessKernel(NVOS64_PARAMETERS *pArgs) { _nv04AllocWithAccess(pArgs, NV_FALSE); } +void Nv04AllocContextDmaKernel(NVOS39_PARAMETERS *pArgs) { _nv04AllocContextDma(pArgs, NV_FALSE); } +void Nv04BindContextDmaKernel(NVOS49_PARAMETERS *pArgs) { _nv04BindContextDma(pArgs, NV_FALSE); } +void Nv04ControlKernel(NVOS54_PARAMETERS *pArgs) { _nv04Control(pArgs, NV_FALSE, NV_FALSE); } +void Nv04DupObjectKernel(NVOS55_PARAMETERS *pArgs) { _nv04DupObject(pArgs, NV_FALSE); } +void Nv04ShareKernel(NVOS57_PARAMETERS *pArgs) { _nv04Share(pArgs, NV_FALSE); } +void Nv04I2CAccessKernel(NVOS_I2C_ACCESS_PARAMS *pArgs) { _nv04I2CAccess(pArgs, NV_FALSE); } +void Nv04IdleChannelsKernel(NVOS30_PARAMETERS *pArgs) { _nv04IdleChannels(pArgs, NV_FALSE); } +void Nv04MapMemoryKernel(NVOS33_PARAMETERS *pArgs) { _nv04MapMemory(pArgs, NV_FALSE, NV_FALSE); } +void Nv04MapMemoryDmaKernel(NVOS46_PARAMETERS *pArgs) { _nv04MapMemoryDma(pArgs, NV_FALSE); } +void Nv04UnmapMemoryKernel(NVOS34_PARAMETERS *pArgs) { _nv04UnmapMemory(pArgs, NV_FALSE, NV_FALSE); } +void Nv04UnmapMemoryDmaKernel(NVOS47_PARAMETERS *pArgs) { _nv04UnmapMemoryDma(pArgs, NV_FALSE); } +void Nv04VidHeapControlKernel(NVOS32_PARAMETERS *pArgs) { _nv04VidHeapControl(pArgs, NV_FALSE); } + +// MODS-specific API functions which ignore RM locking model +#if defined(LINUX_MFG) +void Nv04ControlInternal(NVOS54_PARAMETERS *pArgs) { _nv04Control(pArgs, NV_FALSE, NV_TRUE); } +void Nv04MapMemoryInternal(NVOS33_PARAMETERS *pArgs) { _nv04MapMemory(pArgs, NV_FALSE, NV_TRUE); } +void Nv04UnmapMemoryInternal(NVOS34_PARAMETERS *pArgs) { _nv04UnmapMemory(pArgs, NV_FALSE, NV_TRUE); } +#endif + +#define RMAPI_DEPRECATED_WITH_SECINFO(pFunc, pArgs, secInfo) \ + NV_PRINTF(LEVEL_WARNING, "Calling deprecated function at %d\n", __LINE__); \ + pArgs->status = NV_ERR_NOT_SUPPORTED; + +void Nv01AllocMemoryWithSecInfo(NVOS02_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedAllocMemory, pArgs, secInfo); } +void Nv01AllocObjectWithSecInfo(NVOS05_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedAllocObject, pArgs, secInfo); } +void Nv04AllocWithSecInfo(NVOS21_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04AllocWithSecInfo(pArgs, secInfo); } +void Nv04AllocWithAccessSecInfo(NVOS64_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04AllocWithAccessSecInfo(pArgs, secInfo); } +void Nv01FreeWithSecInfo(NVOS00_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv01FreeWithSecInfo(pArgs, secInfo); } +void Nv04ControlWithSecInfo(NVOS54_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04ControlWithSecInfo(pArgs, secInfo, NV_FALSE); } +void Nv04VidHeapControlWithSecInfo(NVOS32_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedVidHeapControl, pArgs, secInfo); } +void Nv04IdleChannelsWithSecInfo(NVOS30_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedIdleChannels, pArgs, secInfo); } +void Nv04MapMemoryWithSecInfo(NVOS33_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04MapMemoryWithSecInfo(pArgs, secInfo); } +void Nv04UnmapMemoryWithSecInfo(NVOS34_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04UnmapMemoryWithSecInfo(pArgs, secInfo); } +void Nv04I2CAccessWithSecInfo(NVOS_I2C_ACCESS_PARAMS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedI2CAccess, pArgs, secInfo); } +void Nv04AllocContextDmaWithSecInfo(NVOS39_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedAllocContextDma, pArgs, secInfo); } +void Nv04BindContextDmaWithSecInfo(NVOS49_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedBindContextDma, pArgs, secInfo); } +void Nv04MapMemoryDmaWithSecInfo(NVOS46_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04MapMemoryDmaWithSecInfo(pArgs, secInfo); } +void Nv04UnmapMemoryDmaWithSecInfo(NVOS47_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04UnmapMemoryDmaWithSecInfo(pArgs, secInfo); } +void Nv04DupObjectWithSecInfo(NVOS55_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04DupObjectWithSecInfo(pArgs, secInfo); } +void Nv04ShareWithSecInfo(NVOS57_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04ShareWithSecInfo(pArgs, secInfo); } + + +static void +XlateUserModeArgsToSecInfo +( + NvBool bUserModeArgs, + NvBool bInternalCall, + API_SECURITY_INFO *pSecInfo +) +{ + portMemSet(pSecInfo, 0, sizeof(*pSecInfo)); + + if (bInternalCall == NV_FALSE && bUserModeArgs == NV_TRUE) + { + pSecInfo->privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER; + pSecInfo->paramLocation = PARAM_LOCATION_USER; + } + else + { + pSecInfo->privLevel = RS_PRIV_LEVEL_KERNEL; + pSecInfo->paramLocation = PARAM_LOCATION_KERNEL; + } +} + +/* +NV04_ALLOC + NVOS21_PARAMETERS; + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvP64 pAllocParms; + NvV32 status; +*/ + +static void _nv04Alloc +( + NVOS21_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->AllocWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectParent, &pArgs->hObjectNew, + pArgs->hClass, pArgs->pAllocParms, RMAPI_ALLOC_FLAGS_NONE, + NvP64_NULL, &secInfo); +} // end of Nv04Alloc() + +/* +NV04_ALLOC + NVOS21_PARAMETERS; + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvP64 pAllocParms; + NvV32 status; +*/ + +static void _nv04AllocWithSecInfo +( + NVOS21_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->AllocWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectParent, &pArgs->hObjectNew, + pArgs->hClass, pArgs->pAllocParms, RMAPI_ALLOC_FLAGS_NONE, + NvP64_NULL, &secInfo); +} // end of _nv04AllocWithSecInfo() + +/* +NV04_ALLOC_WITH_ACCESS + NVOS64_PARAMETERS; + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvP64 pAllocParms; + NvP64 pRightsRequested; + NvV32 status; +*/ + +static void _nv04AllocWithAccess +( + NVOS64_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->AllocWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectParent, &pArgs->hObjectNew, + pArgs->hClass, pArgs->pAllocParms, RMAPI_ALLOC_FLAGS_NONE, + pArgs->pRightsRequested, &secInfo); +} // end of _nv04AllocWithAccess() + +static void _nv04AllocWithAccessSecInfo +( + NVOS64_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->AllocWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectParent, &pArgs->hObjectNew, + pArgs->hClass, pArgs->pAllocParms, RMAPI_ALLOC_FLAGS_NONE, + pArgs->pRightsRequested, &secInfo); +} // end of _nv04AllocWithAccessSecInfo() + +/* +NV01_FREE + NVOS00_PARAMETERS: + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +*/ + +static void _nv01Free +( + NVOS00_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->FreeWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectOld, RMAPI_FREE_FLAGS_NONE, &secInfo); +} // end of Nv01Free() + +/* +NV01_FREE + NVOS00_PARAMETERS: + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +*/ + +static void _nv01FreeWithSecInfo +( + NVOS00_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->FreeWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectOld, RMAPI_FREE_FLAGS_NONE, &secInfo); +} // end of Nv01FreeWithSecInfo() + +/* +NV04_MAP_MEMORY + NVOS33_PARAMETERS: + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU64 offset; + NvU64 length; + NvP64 pLinearAddress; + NvU32 status; + NvU32 flags; +*/ +static void _nv04MapMemory +( + NVOS33_PARAMETERS *pArgs, + NvBool bUserModeArgs, + NvBool bInternalCall +) +{ + RM_API *pRmApi = rmapiGetInterface(bInternalCall ? RMAPI_MODS_LOCK_BYPASS : RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->MapToCpuWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hMemory, pArgs->offset, + pArgs->length, &pArgs->pLinearAddress, pArgs->flags, &secInfo); +} // end of Nv04MapMemory() + +static void _nv04MapMemoryWithSecInfo +( + NVOS33_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->MapToCpuWithSecInfoV2(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hMemory, pArgs->offset, + pArgs->length, &pArgs->pLinearAddress, &pArgs->flags, &secInfo); +} + +/* +NV04_UNMAP_MEMORY + NVOS34_PARAMETERS: + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pLinearAddress; + NvU32 status; + NvU32 flags; +*/ +static void _nv04UnmapMemory +( + NVOS34_PARAMETERS *pArgs, + NvBool bUserModeArgs, + NvBool bInternalCall +) +{ + RM_API *pRmApi = rmapiGetInterface(bInternalCall ? RMAPI_MODS_LOCK_BYPASS : RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->UnmapFromCpuWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hMemory, + pArgs->pLinearAddress, pArgs->flags, osGetCurrentProcess(), &secInfo); +} // end of Nv04UnmapMemory() + +static void _nv04UnmapMemoryWithSecInfo +( + NVOS34_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->UnmapFromCpuWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hMemory, + pArgs->pLinearAddress, pArgs->flags, osGetCurrentProcess(), &secInfo); +} + +static void _nv04MapMemoryDma +( + NVOS46_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->MapWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hDma, + pArgs->hMemory, pArgs->offset, pArgs->length, pArgs->flags, + &pArgs->dmaOffset, &secInfo); +} // end of Nv04MapMemoryDma() + +static void _nv04MapMemoryDmaWithSecInfo +( + NVOS46_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->MapWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hDma, + pArgs->hMemory, pArgs->offset, pArgs->length, pArgs->flags, + &pArgs->dmaOffset, &secInfo); +} + +/* +NV04_UNMAP_MEMORY_DMA + NVOS47_PARAMETERS: + NvHandle hClient; + NvHandle hDevice; + NvHandle hDma; + NvHandle hMemory; + NvV32 flags; + NvU64 dmaOffset; + NvV32 status; +*/ +static void _nv04UnmapMemoryDma +( + NVOS47_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->UnmapWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hDma, + pArgs->hMemory, pArgs->flags, pArgs->dmaOffset, &secInfo); +} // end of Nv04UnmapMemoryDma() + +static void _nv04UnmapMemoryDmaWithSecInfo +( + NVOS47_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->UnmapWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hDma, + pArgs->hMemory, pArgs->flags, pArgs->dmaOffset, &secInfo); +} + +/* +NV04_CONTROL + NVOS54_PARAMETERS: + NvHandle hClient; + NvHandle hObject; + NvV32 cmd; + NvP64 params; + NvU32 paramsSize; + NvV32 status; +*/ +static void _nv04ControlWithSecInfo +( + NVOS54_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo, + NvBool bInternalCall +) +{ + { + RM_API *pRmApi = rmapiGetInterface(bInternalCall ? RMAPI_MODS_LOCK_BYPASS : RMAPI_EXTERNAL); + + pArgs->status = pRmApi->ControlWithSecInfo(pRmApi, pArgs->hClient, pArgs->hObject, pArgs->cmd, + pArgs->params, pArgs->paramsSize, pArgs->flags, &secInfo); + } +} // end of Nv04Control() + +/* +NV04_CONTROL + NVOS54_PARAMETERS: + NvHandle hClient; + NvHandle hObject; + NvV32 cmd; + NvP64 params; + NvU32 paramsSize; + NvV32 status; +*/ +static void _nv04Control +( + NVOS54_PARAMETERS *pArgs, + NvBool bUserModeArgs, + NvBool bInternalCall +) +{ + API_SECURITY_INFO secInfo = {0}; + XlateUserModeArgsToSecInfo(bUserModeArgs, bInternalCall, &secInfo); + _nv04ControlWithSecInfo(pArgs, secInfo, bInternalCall); +} // end of Nv04Control() + +/* +NV04_DUP_OBJECT + NVOS55_PARAMETERS: + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvHandle hClientSrc; + NvHandle hObjectSrc; + NvU32 flags; + NvU32 status; +*/ +static void _nv04DupObject +( + NVOS55_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->DupObjectWithSecInfo(pRmApi, pArgs->hClient, pArgs->hParent, &pArgs->hObject, + pArgs->hClientSrc, pArgs->hObjectSrc, pArgs->flags, &secInfo); +} // end of Nv04DupObject() + +static void _nv04DupObjectWithSecInfo +( + NVOS55_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->DupObjectWithSecInfo(pRmApi, pArgs->hClient, pArgs->hParent, &pArgs->hObject, + pArgs->hClientSrc, pArgs->hObjectSrc, pArgs->flags, &secInfo); +} + +static void _nv04Share +( + NVOS57_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->ShareWithSecInfo(pRmApi, pArgs->hClient, pArgs->hObject, + &pArgs->sharePolicy, &secInfo); +} // end of Nv04Share() + +static void _nv04ShareWithSecInfo +( + NVOS57_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->ShareWithSecInfo(pRmApi, pArgs->hClient, pArgs->hObject, + &pArgs->sharePolicy, &secInfo); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.h new file mode 100644 index 0000000..75103d7 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.h @@ -0,0 +1,428 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _ENTRYPOINTS_H_ +#define _ENTRYPOINTS_H_ + +// +// Internal handlers for RM APIs +// + +NV_STATUS +rmapiAlloc +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + void *pAllocParams +); + +NV_STATUS +rmapiAllocWithHandle +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 hClass, + void *pAllocParams +); + +NV_STATUS +rmapiAllocWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pAllocParams, + NvU32 flags, + NvP64 pRightsRequired, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiAllocWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pAllocParams, + NvU32 flags, + NvP64 pRightsRequired, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiFree +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject +); + +NV_STATUS +rmapiFreeWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiFreeWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiFreeClientList +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients +); + +NV_STATUS +rmapiFreeClientListWithSecInfo +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiFreeClientListWithSecInfoTls +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiControl +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize +); + +NV_STATUS +rmapiControlWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiControlWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiDupObject +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags +); + +NV_STATUS +rmapiDupObjectWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiDupObjectWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiShare +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy +); + +NV_STATUS +rmapiShareWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiShareWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiMapToCpu +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + void **ppCpuVirtAddr, + NvU32 flags +); + +NV_STATUS +rmapiMapToCpuWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiMapToCpuWithSecInfoV2 +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 *flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiMapToCpuWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); +NV_STATUS +rmapiMapToCpuWithSecInfoTlsV2 +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 *flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiUnmapFromCpu +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + void *pLinearAddress, + NvU32 flags, + NvU32 ProcessId +); + +NV_STATUS +rmapiUnmapFromCpuWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 pLinearAddress, + NvU32 flags, + NvU32 ProcessId, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiUnmapFromCpuWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 pLinearAddress, + NvU32 flags, + NvU32 ProcessId, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiMap +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset +); + +NV_STATUS +rmapiMapWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset, + API_SECURITY_INFO *pSecInfo +); + + +NV_STATUS +rmapiMapWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiUnmap +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset +); + +NV_STATUS +rmapiUnmapWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiUnmapWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset, + API_SECURITY_INFO *pSecInfo +); + +#endif // _ENTRYPOINTS_H_ + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event.c new file mode 100644 index 0000000..95b347d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event.c @@ -0,0 +1,633 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "os/os.h" +#include "rmapi/event.h" +#include "rmapi/resource_fwd_decls.h" +#include "vgpu/rpc.h" +#include "gpu/device/device.h" +#include "core/locks.h" +#include "rmapi/rs_utils.h" + +#include "resserv/rs_client.h" +#include "class/cl0005.h" + +#include "ctrl/ctrl0000/ctrl0000event.h" // NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_* + +#if (!NV_RM_STUB_RPC) +static NV_STATUS _eventRpcForType(NvHandle hClient, NvHandle hObject); +#endif + +NV_STATUS +eventConstruct_IMPL +( + Event *pEvent, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV0005_ALLOC_PARAMETERS *pNv0050AllocParams = pParams->pAllocParams; + RsClient *pRsClient = pCallContext->pClient; + RsResourceRef *pClientRef; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NV_STATUS rmStatus = NV_OK; + PEVENTNOTIFICATION *ppEventNotification; + NvHandle hChannel = 0x0; + OBJGPU *pGpu = NULL; + RS_PRIV_LEVEL privLevel = pParams->pSecInfo->privLevel; + NvBool bUserOsEventHandle = NV_FALSE; + + // never allow user mode/non-root clients to create ring0 callbacks as + // we can not trust the function pointer (encoded in data). + if ((NV01_EVENT_KERNEL_CALLBACK == pResourceRef->externalClassId) || + (NV01_EVENT_KERNEL_CALLBACK_EX == pResourceRef->externalClassId)) + { + if (privLevel < RS_PRIV_LEVEL_KERNEL) + { + // sometimes it is nice to hook up callbacks for debug purposes + // -- but disable the override for release builds! +#if defined(DEBUG) || defined(DEVELOP) + if (!(pNv0050AllocParams->notifyIndex & NV01_EVENT_PERMIT_NON_ROOT_EVENT_KERNEL_CALLBACK_CREATION)) +#endif + { + return NV_ERR_ILLEGAL_ACTION; + } + } + } + +#if (!NV_RM_STUB_RPC) + if (_eventRpcForType(pNv0050AllocParams->hParentClient, pNv0050AllocParams->hSrcResource)) + { + RsResourceRef *pSrcRef; + NV_STATUS tmpStatus; + + tmpStatus = serverutilGetResourceRef(pNv0050AllocParams->hParentClient, + pNv0050AllocParams->hSrcResource, + &pSrcRef); + + if (tmpStatus == NV_OK) + { + hChannel = pSrcRef->pParentRef ? pSrcRef->pParentRef->hResource : 0; + pGpu = CliGetGpuFromContext(pSrcRef, NULL); + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "RmAllocEvent could not set pGpu. hClient=0x%x, hObject=0x%x\n", + pRsClient->hClient, pResourceRef->hResource); + } + } + } +#endif + + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, pRsClient->hClient, &pClientRef)); + + // add event to client and parent object + rmStatus = eventInit(pEvent, + pCallContext, + pNv0050AllocParams->hParentClient, + pNv0050AllocParams->hSrcResource, + &ppEventNotification); + if (rmStatus == NV_OK) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + // In RM-offload, we don't allocate ContextDma in GSP-RM unless there + // is any necessity to use it (e.g. display channel binding time). So + // GSP-RM will find no valid object if the event is associated with + // ContextDma object. So we are ignoring the event allocation here if + // the event is associated with ContextDma object. + // + if (pGpu != NULL) + { + RsResourceRef *pSourceRef; + + if (IS_GSP_CLIENT(pGpu)) + { + NV_ASSERT_OK_OR_RETURN( + serverutilGetResourceRef(pNv0050AllocParams->hParentClient, + pNv0050AllocParams->hSrcResource, + &pSourceRef)); + } + + if ( + (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_GSP_CLIENT(pGpu) && pSourceRef->internalClassId != classId(ContextDma)) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && !(pNv0050AllocParams->notifyIndex & NV01_EVENT_NONSTALL_INTR)))) + { + // + // In SR-IOV enabled systems, nonstall events can be registered + // directly with guest RM since guest RM is capable of + // receiving and handling nonstall interrupts itself. In + // paravirtualized systems, we always need to use the RPC to + // host RM. + // + NV_RM_RPC_ALLOC_EVENT(pGpu, + pRsClient->hClient, + pEvent->hNotifierClient, + hChannel, + pEvent->hNotifierResource, + pResourceRef->hResource, + pResourceRef->externalClassId, + pNv0050AllocParams->notifyIndex, + rmStatus); + } + } + + if (NV01_EVENT_OS_EVENT == pResourceRef->externalClassId) + { + // convert a user event handle to its kernel equivalent. + if (privLevel <= RS_PRIV_LEVEL_USER_ROOT) + { + rmStatus = osUserHandleToKernelPtr(pRsClient->hClient, + pNv0050AllocParams->data, + &pNv0050AllocParams->data); + bUserOsEventHandle = NV_TRUE; + } + } + + if (rmStatus == NV_OK) + rmStatus = registerEventNotification(ppEventNotification, + pRsClient->hClient, + pEvent->hNotifierResource, + pResourceRef->hResource, + pNv0050AllocParams->notifyIndex, + pResourceRef->externalClassId, + pNv0050AllocParams->data, + bUserOsEventHandle); + } + + if (rmStatus != NV_OK) + goto cleanup; + + return NV_OK; + +cleanup: + eventDestruct_IMPL(pEvent); + return rmStatus; +} + +void eventDestruct_IMPL +( + Event *pEvent +) +{ + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + + RsClient* pRsClient; + NvHandle hEventClient; + NV_STATUS status = NV_OK; + NvHandle hEvent; + NotifShare *pNotifierShare; + + resGetFreeParams(staticCast(pEvent, RsResource), &pCallContext, &pParams); + pRsClient = pCallContext->pClient; + hEventClient = pRsClient->hClient; + hEvent = pCallContext->pResourceRef->hResource; + + LOCK_METER_DATA(FREE_EVENT, 0, 0, 0); + + pNotifierShare = pEvent->pNotifierShare; + if (pNotifierShare != NULL) + { + if (pNotifierShare->pNotifier != NULL) + { + status = inotifyUnregisterEvent(pNotifierShare->pNotifier, + pNotifierShare->hNotifierClient, + pNotifierShare->hNotifierResource, + hEventClient, + hEvent); + } + serverFreeShare(&g_resServ, staticCast(pEvent->pNotifierShare, RsShared)); + } + + if (pParams != NULL) + pParams->status = status; +} + +NV_STATUS notifyUnregisterEvent_IMPL +( + Notifier *pNotifier, + NvHandle hNotifierClient, + NvHandle hNotifierResource, + NvHandle hEventClient, + NvHandle hEvent +) +{ + NV_STATUS status = NV_OK; + PEVENTNOTIFICATION *ppEventNotification; + + ppEventNotification = inotifyGetNotificationListPtr(staticCast(pNotifier, INotifier)); + + // delete the event from the parent object and client + if (*ppEventNotification != NULL) + { + +#if (!NV_RM_STUB_RPC) + if (_eventRpcForType(hNotifierClient, hNotifierResource)) + { + OBJGPU *pGpu = CliGetGpuFromHandle(hNotifierClient, hNotifierResource, NULL); + + if (pGpu != NULL) + { + RsResourceRef *pNotifierRef = NULL; + + if (IS_GSP_CLIENT(pGpu)) + { + NV_ASSERT_OK_OR_RETURN(serverutilGetResourceRef(hNotifierClient, hNotifierResource, &pNotifierRef)); + } + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_GSP_CLIENT(pGpu) && pNotifierRef->internalClassId != classId(ContextDma)) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && !((*ppEventNotification)->bNonStallIntrEvent))) + { + // + // In SR-IOV enabled systems, nonstall events are registered + // directly with guest RM since guest RM is capable of + // receiving and handling nonstall interrupts itself. We skip + // the allocation, so here, we skip the free too. In + // paravirtualized systems, we always need to use the RPC to + // host RM. + // + NV_RM_RPC_FREE(pGpu, hEventClient, hEventClient, hEvent, status); + } + } + else + { + NV_PRINTF(LEVEL_ERROR, + "RmFreeEvent could not set pGpu. hClient=0x%x, hObject=0x%x\n", + hNotifierClient, hNotifierResource); + } + } +#endif + + unregisterEventNotification(ppEventNotification, + hEventClient, + hNotifierResource, + hEvent); + + } + + return status; +} + +NV_STATUS +eventInit_IMPL +( + Event *pEvent, + CALL_CONTEXT *pCallContext, + NvHandle hNotifierClient, + NvHandle hNotifierResource, + PEVENTNOTIFICATION **pppEventNotification +) +{ + NV_STATUS rmStatus = NV_OK; + RsClient *pRsClient = pCallContext->pClient; + RsClient *pNotifierClient; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NotifShare *pNotifierShare = NULL; + + // validate event class + switch (pResourceRef->externalClassId) + { + case NV01_EVENT_KERNEL_CALLBACK: + case NV01_EVENT_KERNEL_CALLBACK_EX: + case NV01_EVENT_OS_EVENT: + break; + + default: + return NV_ERR_INVALID_CLASS; + } + + // RS-TODO remove support for this after per-client locking is enabled + if (pRsClient->hClient != hNotifierClient) + { + rmStatus = serverGetClientUnderLock(&g_resServ, hNotifierClient, &pNotifierClient); + if (rmStatus != NV_OK) + return rmStatus; + } + else + { + pNotifierClient = pRsClient; + } + + if (pNotifierClient != NULL) + { + RsResourceRef *pNotifierRef; + INotifier *pNotifier; + if (clientGetResourceRef(pNotifierClient, hNotifierResource, &pNotifierRef) != NV_OK) + return NV_ERR_INVALID_OBJECT; + + pNotifier = dynamicCast(pNotifierRef->pResource, INotifier); + if (pNotifier == NULL) + return NV_ERR_INVALID_OBJECT; + + rmStatus = inotifyGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, &pNotifierShare); + if (rmStatus != NV_OK) + return rmStatus; + + *pppEventNotification = inotifyGetNotificationListPtr(pNotifierShare->pNotifier); + } + + serverRefShare(&g_resServ, staticCast(pNotifierShare, RsShared)); + pEvent->pNotifierShare = pNotifierShare; + + // RS-TODO these can be looked up from share + pEvent->hNotifierClient = hNotifierClient; + pEvent->hNotifierResource = hNotifierResource; + pEvent->hEvent = pCallContext->pResourceRef->hResource; + + return rmStatus; +} + +NV_STATUS +notifyGetOrAllocNotifShare_IMPL +( + Notifier *pNotifier, + NvHandle hNotifierClient, + NvHandle hNotifierResource, + NotifShare **ppNotifierShare +) +{ + NV_STATUS status; + NotifShare *pNotifierShare; + + // + // Most objects that are notifiers will never have any events to notify so + // notifier shares are allocated as needed (i.e., when an event + // registers itself with the notifier.) + // + pNotifierShare = inotifyGetNotificationShare(staticCast(pNotifier, INotifier)); + if (pNotifierShare == NULL) + { + RsShared *pShare; + status = serverAllocShare(&g_resServ, classInfo(NotifShare), &pShare); + if (status != NV_OK) + return status; + + pNotifierShare = dynamicCast(pShare, NotifShare); + pNotifierShare->pNotifier = staticCast(pNotifier, INotifier); + pNotifierShare->hNotifierClient = hNotifierClient; + pNotifierShare->hNotifierResource = hNotifierResource; + inotifySetNotificationShare(staticCast(pNotifier, INotifier), pNotifierShare); + } + + if (ppNotifierShare) + *ppNotifierShare = pNotifierShare; + + return NV_OK; +} + +NV_STATUS +CliGetEventNotificationList +( + NvHandle hClient, + NvHandle hObject, + INotifier **ppNotifier, + PEVENTNOTIFICATION **pppEventNotification +) +{ + NV_STATUS status = NV_OK; + RsResourceRef *pResourceRef; + RsClient *pRsClient; + INotifier *pNotifier; + + *pppEventNotification = NULL; + + // Populate Resource Server information + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return status; + + status = clientGetResourceRef(pRsClient, hObject, &pResourceRef); + if (status != NV_OK) + return status; + + pNotifier = dynamicCast(pResourceRef->pResource, INotifier); + if (pNotifier != NULL) + *pppEventNotification = inotifyGetNotificationListPtr(pNotifier); + + if (*pppEventNotification == NULL) + return NV_ERR_INVALID_OBJECT; + + if (ppNotifier != NULL) + *ppNotifier = pNotifier; + + return NV_OK; +} + +NvBool +CliGetEventInfo +( + NvHandle hClient, + NvHandle hEvent, + Event **ppEvent +) +{ + RmClient *pClient; + RsClient *pRsClient; + RsResourceRef *pResourceRef; + + if (NV_OK != serverutilGetClientUnderLock(hClient, &pClient)) + return NV_FALSE; + + pRsClient = staticCast(pClient, RsClient); + if (clientGetResourceRefByType(pRsClient, hEvent, classId(Event), &pResourceRef) != NV_OK) + return NV_FALSE; + + if (pResourceRef->pResource != NULL) + { + *ppEvent = dynamicCast(pResourceRef->pResource, Event); + return NV_TRUE; + } + + return NV_FALSE; + +} + +NvBool +CliDelObjectEvents +( + NvHandle hClient, + NvHandle hResource +) +{ + NotifShare *pNotifierShare; + INotifier *pNotifier; + RsClient *pRsClient; + NV_STATUS status = NV_OK; + RsResourceRef *pResourceRef; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return NV_FALSE; + + status = clientGetResourceRef(pRsClient, hResource, &pResourceRef); + if (status != NV_OK) + return NV_FALSE; + + // If not a notifier object, there aren't any events to free + pNotifier = dynamicCast(pResourceRef->pResource, INotifier); + + if (pNotifier == NULL) + return NV_TRUE; + + pNotifierShare = inotifyGetNotificationShare(pNotifier); + if (pNotifierShare != NULL) + { + while(pNotifierShare->pEventList != NULL) + { + PEVENTNOTIFICATION pEventNotif = pNotifierShare->pEventList; + status = inotifyUnregisterEvent(pNotifier, + pNotifierShare->hNotifierClient, + pNotifierShare->hNotifierResource, + pEventNotif->hEventClient, + pEventNotif->hEvent); + } + pNotifierShare->pNotifier = NULL; + } + + return NV_TRUE; + +} // end of CliDelObjectEvents() + +// **************************************************************************** +// System events +// **************************************************************************** + +void CliAddSystemEvent( + NvU32 event, + NvU32 status +) +{ + NvU32 temp; + PEVENTNOTIFICATION pEventNotification = NULL; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RsResourceRef *pCliResRef; + NV_STATUS rmStatus = NV_OK; + Notifier *pNotifier; + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + if (pClient->CliSysEventInfo.notifyActions[event] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + continue; + } + + temp = (pClient->CliSysEventInfo.systemEventsQueue.Head + 1) % NV_SYSTEM_EVENT_QUEUE_SIZE; + + if (temp == pClient->CliSysEventInfo.systemEventsQueue.Tail) + { + NV_PRINTF(LEVEL_ERROR, "system event queue is full"); + return; + } + + pClient->CliSysEventInfo.systemEventsQueue.EventQueue[pClient->CliSysEventInfo.systemEventsQueue.Head].event = event; + pClient->CliSysEventInfo.systemEventsQueue.EventQueue[pClient->CliSysEventInfo.systemEventsQueue.Head].status = status; + pClient->CliSysEventInfo.systemEventsQueue.Head = temp; + + rmStatus = clientGetResourceRef(staticCast(pClient, RsClient), pRsClient->hClient, &pCliResRef); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to look up resource reference handle: 0x%x\n", + pRsClient->hClient); + return; + } + + pNotifier = dynamicCast(pCliResRef->pResource, Notifier); + if (pNotifier != NULL) + pEventNotification = inotifyGetNotificationList(staticCast(pNotifier, INotifier)); + + if (pEventNotification != NULL) + { + while (pEventNotification) + { + if (pEventNotification->NotifyIndex == event) + { + if (osNotifyEvent(NULL, pEventNotification, 0, 0, 0) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to deliver event 0x%x", + event); + } + } + pEventNotification = pEventNotification->Next; + } + + if (pClient->CliSysEventInfo.notifyActions[event] == NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE) + { + pClient->CliSysEventInfo.notifyActions[event] = NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + } + } + + return; +} + +#if (!NV_RM_STUB_RPC) +static NV_STATUS +_eventRpcForType(NvHandle hClient, NvHandle hObject) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = serverutilGetResourceRef(hClient, hObject, &pResourceRef); + + if (status != NV_OK) + { + return NV_FALSE; + } + + if (objDynamicCastById(pResourceRef->pResource, classId(Subdevice)) || + objDynamicCastById(pResourceRef->pResource, classId(ChannelDescendant)) || + objDynamicCastById(pResourceRef->pResource, classId(ContextDma)) || + objDynamicCastById(pResourceRef->pResource, classId(DispChannel)) || + objDynamicCastById(pResourceRef->pResource, classId(TimerApi)) || + objDynamicCastById(pResourceRef->pResource, classId(KernelSMDebuggerSession))) + { + return NV_TRUE; + } + + return NV_FALSE; +} +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_buffer.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_buffer.c new file mode 100644 index 0000000..a75ca65 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_buffer.c @@ -0,0 +1,696 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/event_buffer.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi_utils.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi_utils.h" +#include "class/cl0040.h" + +static NV_STATUS _allocAndMapMemory(CALL_CONTEXT *pCallContext, NvP64 pAddress, MEMORY_DESCRIPTOR** ppMemDesc, NvU64 size, NvBool bKernel, + NvP64* pKernelAddr, NvP64* pKernelPriv, NvP64* pUserAddr, NvP64* pUserPriv); + +static void _unmapAndFreeMemory(MEMORY_DESCRIPTOR *pMemDesc, NvBool bKernel, NvP64 kernelAddr, + NvP64 kernelPriv, NvP64 userAddr, NvP64 userPriv); + +NV_STATUS +eventbufferConstruct_IMPL +( + EventBuffer *pEventBuffer, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + NV_EVENT_BUFFER_ALLOC_PARAMETERS *pAllocParams = pParams->pAllocParams; + + EVENT_BUFFER_MAP_INFO *pKernelMap = &pEventBuffer->kernelMapInfo; + EVENT_BUFFER_MAP_INFO *pClientMap = &pEventBuffer->clientMapInfo; + + NvU32 hClient = pCallContext->pClient->hClient; + NvBool bKernel = (rmclientGetCachedPrivilegeByHandle(hClient) >= RS_PRIV_LEVEL_KERNEL); + + NvU32 recordBufferSize; + NvP64 kernelNotificationhandle; + Subdevice *pSubdevice = NULL; + NvBool bInternalAlloc = (pAllocParams->hBufferHeader == 0); + NvBool bNoDeviceMem = NV_FALSE; + NvBool bUsingVgpuStagingBuffer = NV_FALSE; + OBJGPU *pGpu = NULL; + RsResourceRef *pHeaderRef = NULL; + RsResourceRef *pRecordRef = NULL; + RsResourceRef *pVardataRef = NULL; + NvHandle hMapperClient = 0; + NvHandle hMapperDevice = 0; + + pAllocParams->bufferHeader = NvP64_NULL; + pAllocParams->recordBuffer = NvP64_NULL; + pAllocParams->vardataBuffer = NvP64_NULL; + + if (bInternalAlloc) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvBool bSupported = pSys->getProperty(pSys, PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED); + NV_ASSERT_OR_RETURN(bSupported, NV_ERR_NOT_SUPPORTED); + } + else + { + NV_ASSERT_OR_RETURN((pAllocParams->hRecordBuffer != 0), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((pAllocParams->vardataBufferSize == 0) ^ (pAllocParams->hVardataBuffer != 0)), + NV_ERR_INVALID_ARGUMENT); + + status = clientGetResourceRef(pCallContext->pClient, pAllocParams->hBufferHeader, &pHeaderRef); + if (status != NV_OK) + return status; + + status = clientGetResourceRef(pCallContext->pClient, pAllocParams->hRecordBuffer, &pRecordRef); + if (status != NV_OK) + return status; + + // Avoid mixing and matching backing-memory + if (pRecordRef->externalClassId != pHeaderRef->externalClassId) + return NV_ERR_INVALID_ARGUMENT; + + if (pAllocParams->hVardataBuffer != 0) + { + status = clientGetResourceRef(pCallContext->pClient, pAllocParams->hVardataBuffer, &pVardataRef); + if (status != NV_OK) + return status; + + if (pVardataRef->externalClassId != pHeaderRef->externalClassId) + return NV_ERR_INVALID_ARGUMENT; + } + + if (!bNoDeviceMem) + { + if (pAllocParams->hSubDevice == 0) + { + NV_PRINTF(LEVEL_WARNING, "hSubDevice must be provided.\n"); + return NV_ERR_INVALID_ARGUMENT; + } + } + else + { + return NV_ERR_NOT_SUPPORTED; + } + } + + // bound check inputs and also check for overflow + if ((pAllocParams->recordSize == 0) || (pAllocParams->recordCount == 0) || + (!portSafeMulU32(pAllocParams->recordSize, pAllocParams->recordCount, &recordBufferSize)) || + (recordBufferSize / pAllocParams->recordCount != pAllocParams->recordSize) || + (pAllocParams->recordsFreeThreshold > pAllocParams->recordCount) || + (pAllocParams->vardataFreeThreshold > pAllocParams->vardataBufferSize)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pEventBuffer->hClient = pCallContext->pClient->hClient; + pEventBuffer->hSubDevice = pAllocParams->hSubDevice; + if (pEventBuffer->hSubDevice) + { + status = subdeviceGetByHandle(pCallContext->pClient, pEventBuffer->hSubDevice, &pSubdevice); + if (status != NV_OK) + return NV_ERR_INVALID_OBJECT_HANDLE; + + pEventBuffer->subDeviceInst = pSubdevice->subDeviceInst; + pGpu = GPU_RES_GET_GPU(pSubdevice); + + if (!bNoDeviceMem) + { + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + // Staging buffer should be mapped as read-only in guest RM + bUsingVgpuStagingBuffer = NV_TRUE; + } + + if (!bKernel) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + status = rmapiutilAllocClientAndDeviceHandles(pRmApi, + pGpu, + &pEventBuffer->hInternalClient, + &pEventBuffer->hInternalDevice, + &pEventBuffer->hInternalSubdevice); + + if (status != NV_OK) + return status; + + hMapperClient = pEventBuffer->hInternalClient; + hMapperDevice = pEventBuffer->hInternalDevice; + } + else + { + hMapperClient = pCallContext->pClient->hClient; + hMapperDevice = RES_GET_PARENT_HANDLE(pSubdevice); + } + } + } + + + // + // Use goto cleanup on failure below here + // + + if (!bInternalAlloc) + { + Memory *pMemory; + NvBool bRequireReadOnly = bUsingVgpuStagingBuffer || !bKernel; + + // + // Buffer header + // + pEventBuffer->pHeader = dynamicCast(pHeaderRef->pResource, Memory); + pMemory = pEventBuffer->pHeader; + if ((pMemory == NULL) || (bRequireReadOnly && !memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY))) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (pMemory->Length < sizeof(NV_EVENT_BUFFER_HEADER)) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (!bNoDeviceMem) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + + if (!bKernel) + { + status = pRmApi->DupObject(pRmApi, + hMapperClient, + hMapperDevice, + &hMemory, + pCallContext->pClient->hClient, + hMemory, 0); + if (status != NV_OK) + { + goto cleanup; + } + } + + status = pRmApi->MapToCpu(pRmApi, + hMapperClient, + hMapperDevice, + hMemory, + 0, + pMemory->Length, + &pKernelMap->headerAddr, + bUsingVgpuStagingBuffer + ? DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY) + : DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_WRITE)); + + if (status != NV_OK) + { + goto cleanup; + } + } + else + { + status = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_TRUE); + if (status != NV_OK) + goto cleanup; + + pKernelMap->headerAddr = pMemory->KernelVAddr; + } + + // + // Record buffer + // + pEventBuffer->pRecord = dynamicCast(pRecordRef->pResource, Memory); + pMemory = pEventBuffer->pRecord; + if ((pMemory == NULL) || (bRequireReadOnly && !memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY))) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (pMemory->Length < recordBufferSize) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (!bNoDeviceMem) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + + if (!bKernel) + { + status = pRmApi->DupObject(pRmApi, + hMapperClient, + hMapperDevice, + &hMemory, + pCallContext->pClient->hClient, + hMemory, 0); + if (status != NV_OK) + { + goto cleanup; + } + } + + status = pRmApi->MapToCpu(pRmApi, + hMapperClient, + hMapperDevice, + hMemory, + 0, + pMemory->Length, + &pKernelMap->recordBuffAddr, + bUsingVgpuStagingBuffer + ? DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY) + : DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_WRITE)); + if (status != NV_OK) + { + goto cleanup; + } + } + else + { + status = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_TRUE); + pKernelMap->recordBuffAddr = pMemory->KernelVAddr; + if (status != NV_OK) + goto cleanup; + } + + // + // Vardata buffer [optional] + // + if (pAllocParams->hVardataBuffer != 0) + { + pEventBuffer->pVardata = dynamicCast(pVardataRef->pResource, Memory); + pMemory = pEventBuffer->pVardata; + if ((pMemory == NULL) || (bRequireReadOnly && !memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY))) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (pMemory->Length < pAllocParams->vardataBufferSize) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (!bNoDeviceMem) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + + if (!bKernel) + { + status = pRmApi->DupObject(pRmApi, + hMapperClient, + hMapperDevice, + &hMemory, + pCallContext->pClient->hClient, + hMemory, 0); + if (status != NV_OK) + { + goto cleanup; + } + } + + status = pRmApi->MapToCpu(pRmApi, + hMapperClient, + hMapperDevice, + hMemory, + 0, + pMemory->Length, + &pKernelMap->recordBuffAddr, + bUsingVgpuStagingBuffer + ? DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY) + : DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_WRITE)); + if (status != NV_OK) + { + goto cleanup; + } + } + else + { + status = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_TRUE); + if (status != NV_OK) + goto cleanup; + } + + pKernelMap->vardataBuffAddr = pMemory->KernelVAddr; + + refAddDependant(pVardataRef, pCallContext->pResourceRef); + } + + refAddDependant(pHeaderRef, pCallContext->pResourceRef); + refAddDependant(pRecordRef, pCallContext->pResourceRef); + } + + if (bInternalAlloc) + { + status = _allocAndMapMemory(pCallContext, + pAllocParams->bufferHeader, + &pEventBuffer->pHeaderDesc, + sizeof(NV_EVENT_BUFFER_HEADER), + bKernel, + &pKernelMap->headerAddr, + &pKernelMap->headerPriv, + &pClientMap->headerAddr, + &pClientMap->headerPriv); + if (status != NV_OK) + goto cleanup; + + status = _allocAndMapMemory(pCallContext, + pAllocParams->recordBuffer, + &pEventBuffer->pRecordBufDesc, + recordBufferSize, + bKernel, + &pKernelMap->recordBuffAddr, + &pKernelMap->recordBuffPriv, + &pClientMap->recordBuffAddr, + &pClientMap->recordBuffPriv); + if (status != NV_OK) + goto cleanup; + } + + eventBufferInitRecordBuffer(&pEventBuffer->producerInfo, + KERNEL_POINTER_FROM_NvP64(NV_EVENT_BUFFER_HEADER*, pKernelMap->headerAddr), + pKernelMap->recordBuffAddr, + pAllocParams->recordSize, + pAllocParams->recordCount, + recordBufferSize, + pAllocParams->recordsFreeThreshold); + + // not needed for all events, such as FECS context switch events + if (pAllocParams->vardataBufferSize != 0) + { + if (bInternalAlloc) + { + status = _allocAndMapMemory(pCallContext, + pAllocParams->vardataBuffer, + &pEventBuffer->pVardataBufDesc, + pAllocParams->vardataBufferSize, + bKernel, + &pKernelMap->vardataBuffAddr, + &pKernelMap->vardataBuffPriv, + &pClientMap->vardataBuffAddr, + &pClientMap->vardataBuffPriv); + + if (status != NV_OK) + goto cleanup; + } + + eventBufferInitVardataBuffer(&pEventBuffer->producerInfo, + pKernelMap->vardataBuffAddr, + pAllocParams->vardataBufferSize, + pAllocParams->vardataFreeThreshold); + } + + kernelNotificationhandle = (NvP64)pAllocParams->notificationHandle; + if (bKernel != NV_TRUE) + status = osUserHandleToKernelPtr(pCallContext->pClient->hClient, + kernelNotificationhandle, + &kernelNotificationhandle); + + eventBufferInitNotificationHandle(&pEventBuffer->producerInfo, kernelNotificationhandle); + eventBufferSetEnable(&pEventBuffer->producerInfo, NV_FALSE); + + // return user mode mappings + pAllocParams->bufferHeader = pClientMap->headerAddr; + pAllocParams->recordBuffer = pClientMap->recordBuffAddr; + pAllocParams->vardataBuffer = pClientMap->vardataBuffAddr; + + return NV_OK; + +cleanup: + eventbufferDestruct_IMPL(pEventBuffer); + return status; +} + +void +eventbufferDestruct_IMPL +( + EventBuffer *pEventBuffer +) +{ + CALL_CONTEXT *pCallContext; + EVENT_BUFFER_MAP_INFO *pClientMap = &pEventBuffer->clientMapInfo; + EVENT_BUFFER_MAP_INFO *pKernelMap = &pEventBuffer->kernelMapInfo; + NvBool bKernel = rmclientGetCachedPrivilegeByHandle(pEventBuffer->hClient) >= RS_PRIV_LEVEL_KERNEL; + void *notificationHandle = NvP64_VALUE(pEventBuffer->producerInfo.notificationHandle); + + resGetFreeParams(staticCast(pEventBuffer, RsResource), &pCallContext, NULL); + + if (notificationHandle != NULL) + { + osDereferenceObjectCount(notificationHandle); + } + + _unmapAndFreeMemory(pEventBuffer->pHeaderDesc, bKernel, pKernelMap->headerAddr, + pKernelMap->headerPriv, pClientMap->headerAddr, pClientMap->headerPriv); + + _unmapAndFreeMemory(pEventBuffer->pRecordBufDesc, bKernel, pKernelMap->recordBuffAddr, + pKernelMap->recordBuffPriv, pClientMap->recordBuffAddr, pClientMap->recordBuffPriv); + + _unmapAndFreeMemory(pEventBuffer->pVardataBufDesc, bKernel, pKernelMap->vardataBuffAddr, + pKernelMap->vardataBuffPriv, pClientMap->vardataBuffAddr, pClientMap->vardataBuffPriv); + + if (pEventBuffer->hInternalClient != 0) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + pRmApi->Free(pRmApi, pEventBuffer->hInternalClient, pEventBuffer->hInternalClient); + } + +} + +NV_STATUS +_allocAndMapMemory +( + CALL_CONTEXT *pCallContext, + NvP64 pAddress, + MEMORY_DESCRIPTOR** ppMemDesc, + NvU64 size, + NvBool bKernel, + NvP64* pKernelAddr, + NvP64* pKernelPriv, + NvP64* pUserAddr, + NvP64* pUserPriv +) +{ + NV_STATUS status; + MEMORY_DESCRIPTOR* pMemDesc = NULL; + + NV_ASSERT_OR_RETURN(pAddress == NvP64_NULL, NV_ERR_NOT_SUPPORTED); + + status = memdescCreate(ppMemDesc, NULL, size, 0, NV_MEMORY_CONTIGUOUS, + ADDR_SYSMEM, NV_MEMORY_WRITECOMBINED, MEMDESC_FLAGS_CPU_ONLY); + if (status != NV_OK) + return status; + + pMemDesc = *ppMemDesc; + + status = osAllocPages(pMemDesc); + if (status != NV_OK) + goto cleanup; + pMemDesc->Allocated = 1; + + // map memory to kernel VA space + status = memdescMap(pMemDesc, 0, size, NV_TRUE, NV_PROTECT_READ_WRITE, + pKernelAddr, pKernelPriv); + if (status != NV_OK) + goto cleanup; + + portMemSet(NvP64_VALUE(*pKernelAddr), 0, size); + + // map memory to user VA space + status = memdescMap(pMemDesc, 0, size, bKernel, NV_PROTECT_READABLE, + pUserAddr, pUserPriv); + + if (status != NV_OK) + goto cleanup; + + return NV_OK; + +cleanup: + _unmapAndFreeMemory(pMemDesc, bKernel, *pKernelAddr, *pKernelPriv, *pUserAddr, *pUserPriv); + return status; +} + +static void +_unmapAndFreeMemory +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool bKernel, + NvP64 kernelAddr, + NvP64 kernelPriv, + NvP64 userAddr, + NvP64 userPriv +) +{ + if (pMemDesc == NULL) + return; + + if (userAddr) + memdescUnmap(pMemDesc, bKernel, osGetCurrentProcess(), userAddr, userPriv); + + if (kernelAddr) + memdescUnmap(pMemDesc, NV_TRUE, osGetCurrentProcess(), kernelAddr, kernelPriv); + + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); +} + +NV_STATUS +eventbuffertBufferCtrlCmdFlush_IMPL +( + EventBuffer *pEventBuffer +) +{ + return NV_OK; +} + +NV_STATUS +eventbuffertBufferCtrlCmdEnableEvent_IMPL +( + EventBuffer *pEventBuffer, + NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *pEnableParams +) +{ + GPU_MASK gpuMask; + NV_STATUS status = NV_OK; + NvBool updateTelemetry = NV_FALSE; + + if (pEnableParams->flags & + ~(NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_NEWEST|NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_OLDEST)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pEnableParams->enable && !pEventBuffer->producerInfo.isEnabled) + { + updateTelemetry = NV_TRUE; + } + + eventBufferSetEnable(&pEventBuffer->producerInfo, pEnableParams->enable); + if (pEnableParams->flags & NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_NEWEST) + eventBufferSetKeepNewest(&pEventBuffer->producerInfo, NV_TRUE); + else if (pEnableParams->flags & NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_OLDEST) + eventBufferSetKeepNewest(&pEventBuffer->producerInfo, NV_FALSE); + + // NvTelemetry requires a valid subdevice + if (updateTelemetry && pEventBuffer->hSubDevice) + { + NvHandle hClient = RES_GET_CLIENT_HANDLE(pEventBuffer); + NvHandle hDevice; + OBJGPU *pGpu; + + status = rmGpuGroupLockAcquire(pEventBuffer->subDeviceInst, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, &gpuMask); + if (status != NV_OK) + return status; + + status = CliSetSubDeviceContext(hClient, pEventBuffer->hSubDevice, &hDevice, &pGpu); + + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + } + return NV_OK; +} + +NV_STATUS +eventbuffertBufferCtrlCmdUpdateGet_IMPL +( + EventBuffer *pEventBuffer, + NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *pUpdateParams +) +{ + EVENT_BUFFER_PRODUCER_INFO *pProducerInfo = &pEventBuffer->producerInfo; + NvP64 pVardataBuf = pEventBuffer->kernelMapInfo.vardataBuffAddr; + + if ((pUpdateParams->recordBufferGet >= eventBufferGetRecordBufferCount(pProducerInfo)) || + (pVardataBuf == NvP64_NULL && pUpdateParams->varDataBufferGet > 0) || + (pVardataBuf != NvP64_NULL && pUpdateParams->varDataBufferGet >= eventBufferGetVardataBufferCount(pProducerInfo))) + { + return NV_ERR_INVALID_ARGUMENT; + } + + eventBufferUpdateRecordBufferGet(pProducerInfo, pUpdateParams->recordBufferGet); + if (pVardataBuf) + eventBufferUpdateVardataBufferGet(pProducerInfo, pUpdateParams->varDataBufferGet); + + pEventBuffer->bNotifyPending = NV_FALSE; + + return NV_OK; +} + +/* + * eventbuffertBufferCtrlCmdPostTelemetryEvent posts an event to the event buffer for testing purposes. + * Note -- in order to post an event, a handle to the buffer is required. since the handle is + * only available to the client that created the buffer, one can only post events to buffers that + * it created. this has been done to limit the ability to post to buffers for testing purposes + * only. if it is determined that we want to open this up to other callers, then this ctrl call + * should be moved to the 2080 class & adjustments made for acquiring the pGpu based on the + * subdevice handle there. + */ +NV_STATUS +eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL +( + EventBuffer *pEventBuffer, + NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *pPostTelemetryEvent +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +eventBufferAdd(EventBuffer* pEventBuffer, void *pEventData, NvU32 recordType, NvBool *pBNotify, NvP64 *pHandle) +{ + EVENT_BUFFER_PRODUCER_DATA *pProducerData = (EVENT_BUFFER_PRODUCER_DATA*)pEventData; + RECORD_BUFFER_INFO *pRBI; + NV_EVENT_BUFFER_HEADER *pHeader; + + if (!pEventBuffer->producerInfo.isEnabled) + return NV_WARN_NOTHING_TO_DO; + + pRBI = &pEventBuffer->producerInfo.recordBuffer; + pHeader = pEventBuffer->producerInfo.recordBuffer.pHeader; + + NV_ASSERT_OR_RETURN(pHeader->recordPut < pRBI->totalRecordCount, NV_ERR_INVALID_STATE); + + eventBufferProducerAddEvent(&pEventBuffer->producerInfo, + recordType, 0, pProducerData); + + *pBNotify = (!pEventBuffer->bNotifyPending) && + (eventBufferIsNotifyThresholdMet(&pEventBuffer->producerInfo)); + *pHandle = pEventBuffer->producerInfo.notificationHandle; + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_notification.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_notification.c new file mode 100644 index 0000000..7bd4110 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_notification.c @@ -0,0 +1,864 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/********************************* DMA Manager *****************************\ +* * +* Event notifications are handled in this module. DMA report and OS * +* action are dealt with on a per-object basis. * +* * +****************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "class/cl0000.h" +#include "os/os.h" +#include "class/cl0005.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/rs_utils.h" +#include "mem_mgr/mem.h" + +static NV_STATUS _insertEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hEvent, + NvU32 NotifyIndex, + NvU32 NotifyType, + NvP64 Data, + NvBool bUserOsEventHandle +); + +static NV_STATUS _removeEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hEvent, + NvBool bMatchData, + NvP64 Data, + PEVENTNOTIFICATION *ppOldEvent +); + +//--------------------------------------------------------------------------- +// +// Event support. +// +//--------------------------------------------------------------------------- + +static NV_STATUS engineNonStallEventOp +( + OBJGPU *pGpu, + NvU32 engineId, + PEVENTNOTIFICATION pEventNotify, + Memory *pMemory, + NvBool bInsert +) +{ + ENGINE_EVENT_NODE *pTempNode; + NvBool bFound = NV_FALSE; + + if (bInsert) + { + pTempNode = portMemAllocNonPaged(sizeof(ENGINE_EVENT_NODE)); + + if (pTempNode == NULL) + return NV_ERR_NO_MEMORY; + + // Acquire engine list spinlock before adding to engine event list + portSyncSpinlockAcquire(pGpu->engineNonstallIntr[engineId].pSpinlock); + pTempNode->pNext = pGpu->engineNonstallIntr[engineId].pEventNode; + pTempNode->pEventNotify = pEventNotify; + pTempNode->pMemory = pMemory; + + pGpu->engineNonstallIntr[engineId].pEventNode = pTempNode; + + // Release engine list spinlock + portSyncSpinlockRelease(pGpu->engineNonstallIntr[engineId].pSpinlock); + } + else + { + ENGINE_EVENT_NODE *pEngNode, *pPrevNode = NULL; + + // Acquire engine list spinlock before traversing engine event list + portSyncSpinlockAcquire(pGpu->engineNonstallIntr[engineId].pSpinlock); + + pEngNode = pGpu->engineNonstallIntr[engineId].pEventNode; + while (pEngNode) + { + if (pEngNode->pEventNotify == pEventNotify) + { + if (pPrevNode == NULL) + pGpu->engineNonstallIntr[engineId].pEventNode = pEngNode->pNext; + else + pPrevNode->pNext = pEngNode->pNext; + + pTempNode = pEngNode; + bFound = NV_TRUE; + break; + } + else + { + pPrevNode = pEngNode; + } + pEngNode = pEngNode->pNext; + } + + // Release engine list spinlock + portSyncSpinlockRelease(pGpu->engineNonstallIntr[engineId].pSpinlock); + + if (bFound) + { + portMemFree(pTempNode); + } + else + { + NV_ASSERT_FAILED("failed to find non-stall event!"); + return NV_ERR_INVALID_STATE; + } + } + + return NV_OK; +} + +static NV_STATUS _engineNonStallIntrNotifyImpl(OBJGPU *pGpu, NvU32 engineId, NvHandle hEvent) +{ + ENGINE_EVENT_NODE *pTempHead; + Memory *pSemMemory; + NvU32 semValue; + NvU32 *pTempKernelMapping = NULL; + NV_STATUS rmStatus = NV_OK; + + // + // Acquire engine list spinlock before traversing the list. Note that this + // is called without holding locks from ISR for Linux. This spinlock is used + // to protect per GPU per engine event node list. + // + portSyncSpinlockAcquire(pGpu->engineNonstallIntr[engineId].pSpinlock); + + pTempHead = pGpu->engineNonstallIntr[engineId].pEventNode; + while (pTempHead) + { + if (!pTempHead->pEventNotify) + { + rmStatus = NV_ERR_INVALID_STATE; + break; + } + + if (hEvent && pTempHead->pEventNotify->hEvent != hEvent) + goto nextEvent; + + pSemMemory = pTempHead->pMemory; + + if (pSemMemory && pSemMemory->vgpuNsIntr.isSemaMemValidationEnabled && + pSemMemory->pMemDesc && pSemMemory->pMemDesc->Allocated) + { + pTempKernelMapping = (NvU32 *)NvP64_VALUE(memdescGetKernelMapping(pSemMemory->pMemDesc)); + if (pTempKernelMapping == NULL) + { + NV_PRINTF(LEVEL_WARNING, "Per-vGPU semaphore location mapping is NULL. Skipping the current node.\n"); + pTempHead = pTempHead->pNext; + continue; + } + semValue = MEM_RD32(pTempKernelMapping + (pSemMemory->vgpuNsIntr.nsSemOffset / sizeof(NvU32))); + + if (pSemMemory->vgpuNsIntr.nsSemValue == semValue) + { + pTempHead = pTempHead->pNext; + continue; + } + + pSemMemory->vgpuNsIntr.nsSemValue = semValue; + + } + + if (osNotifyEvent(pGpu, pTempHead->pEventNotify, 0, 0, NV_OK) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to notify event for engine 0x%x\n", + engineId); + NV_ASSERT(0); + rmStatus = NV_ERR_INVALID_STATE; + break; + } + + nextEvent: + pTempHead = pTempHead->pNext; + } + + portSyncSpinlockRelease(pGpu->engineNonstallIntr[engineId].pSpinlock); + return rmStatus; +} + +NV_STATUS +engineNonStallIntrNotify(OBJGPU *pGpu, NvU32 engineId) +{ + return _engineNonStallIntrNotifyImpl(pGpu, engineId, 0); +} + +NV_STATUS +engineNonStallIntrNotifyEvent(OBJGPU *pGpu, NvU32 engineId, NvHandle hEvent) +{ + return _engineNonStallIntrNotifyImpl(pGpu, engineId, hEvent); +} + +static NV_STATUS +eventGetEngineTypeFromSubNotifyIndex +( + NvU32 notifyIndex, + NvU32 *engineIdx +) +{ + NV_ASSERT_OR_RETURN(engineIdx, NV_ERR_INVALID_ARGUMENT); + + *engineIdx = NV2080_ENGINE_TYPE_NULL; + + switch (notifyIndex) + { + case NV2080_NOTIFIERS_FIFO_EVENT_MTHD: + *engineIdx = NV2080_ENGINE_TYPE_HOST; + break; + case NV2080_NOTIFIERS_CE0: + *engineIdx = NV2080_ENGINE_TYPE_COPY0; + break; + case NV2080_NOTIFIERS_CE1: + *engineIdx = NV2080_ENGINE_TYPE_COPY1; + break; + case NV2080_NOTIFIERS_CE2: + *engineIdx = NV2080_ENGINE_TYPE_COPY2; + break; + case NV2080_NOTIFIERS_CE3: + *engineIdx = NV2080_ENGINE_TYPE_COPY3; + break; + case NV2080_NOTIFIERS_CE4: + *engineIdx = NV2080_ENGINE_TYPE_COPY4; + break; + case NV2080_NOTIFIERS_CE5: + *engineIdx = NV2080_ENGINE_TYPE_COPY5; + break; + case NV2080_NOTIFIERS_CE6: + *engineIdx = NV2080_ENGINE_TYPE_COPY6; + break; + case NV2080_NOTIFIERS_CE7: + *engineIdx = NV2080_ENGINE_TYPE_COPY7; + break; + case NV2080_NOTIFIERS_CE8: + *engineIdx = NV2080_ENGINE_TYPE_COPY8; + break; + case NV2080_NOTIFIERS_CE9: + *engineIdx = NV2080_ENGINE_TYPE_COPY9; + break; + case NV2080_NOTIFIERS_GR0: + *engineIdx = NV2080_ENGINE_TYPE_GR0; + break; + case NV2080_NOTIFIERS_GR1: + *engineIdx = NV2080_ENGINE_TYPE_GR1; + break; + case NV2080_NOTIFIERS_GR2: + *engineIdx = NV2080_ENGINE_TYPE_GR2; + break; + case NV2080_NOTIFIERS_GR3: + *engineIdx = NV2080_ENGINE_TYPE_GR3; + break; + case NV2080_NOTIFIERS_GR4: + *engineIdx = NV2080_ENGINE_TYPE_GR4; + break; + case NV2080_NOTIFIERS_GR5: + *engineIdx = NV2080_ENGINE_TYPE_GR5; + break; + case NV2080_NOTIFIERS_GR6: + *engineIdx = NV2080_ENGINE_TYPE_GR6; + break; + case NV2080_NOTIFIERS_GR7: + *engineIdx = NV2080_ENGINE_TYPE_GR7; + break; + case NV2080_NOTIFIERS_PPP: + *engineIdx = NV2080_ENGINE_TYPE_PPP; + break; + case NV2080_NOTIFIERS_NVDEC0: + *engineIdx = NV2080_ENGINE_TYPE_NVDEC0; + break; + case NV2080_NOTIFIERS_NVDEC1: + *engineIdx = NV2080_ENGINE_TYPE_NVDEC1; + break; + case NV2080_NOTIFIERS_NVDEC2: + *engineIdx = NV2080_ENGINE_TYPE_NVDEC2; + break; + case NV2080_NOTIFIERS_NVDEC3: + *engineIdx = NV2080_ENGINE_TYPE_NVDEC3; + break; + case NV2080_NOTIFIERS_NVDEC4: + *engineIdx = NV2080_ENGINE_TYPE_NVDEC4; + break; + case NV2080_NOTIFIERS_PDEC: + *engineIdx = NV2080_ENGINE_TYPE_VP; + break; + case NV2080_NOTIFIERS_MSENC: + NV_ASSERT(NV2080_NOTIFIERS_MSENC == NV2080_NOTIFIERS_NVENC0); + NV_ASSERT(NV2080_ENGINE_TYPE_MSENC == NV2080_ENGINE_TYPE_NVENC0); + *engineIdx = NV2080_ENGINE_TYPE_MSENC; + break; + case NV2080_NOTIFIERS_NVENC1: + *engineIdx = NV2080_ENGINE_TYPE_NVENC1; + break; + case NV2080_NOTIFIERS_NVENC2: + *engineIdx = NV2080_ENGINE_TYPE_NVENC2; + break; + case NV2080_NOTIFIERS_SEC2: + *engineIdx = NV2080_ENGINE_TYPE_SEC2; + break; + case NV2080_NOTIFIERS_NVJPEG0: + *engineIdx = NV2080_ENGINE_TYPE_NVJPEG0; + break; + case NV2080_NOTIFIERS_OFA: + *engineIdx = NV2080_ENGINE_TYPE_OFA; + break; + default: + NV_PRINTF(LEVEL_WARNING, + "engine 0x%x doesn't use the fast non-stall interrupt path!\n", + notifyIndex); + NV_ASSERT(0); + return NV_ERR_NOT_SUPPORTED; + } + + return NV_OK; +} + +NV_STATUS registerEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hNotifier, + NvHandle hEvent, + NvU32 NotifyIndex, + NvU32 NotifyType, + NvP64 Data, + NvBool bUserOsEventHandle +) +{ + Subdevice *pSubDevice; + PEVENTNOTIFICATION pTargetEvent = NULL; + NV_STATUS rmStatus = NV_OK, rmTmpStatus = NV_OK; + OBJGPU *pGpu; + NvBool bNonStallIntrEvent = NV_FALSE; + NvU32 engineId; + NvHandle hDevice; + RsResourceRef *pResourceRef; + Memory *pSemMemory = NULL; + + rmStatus = _insertEventNotification(ppEventNotification, hEventClient, + hEvent, NotifyIndex, NotifyType, Data, bUserOsEventHandle); + + if (rmStatus != NV_OK) + goto failed_insert; + + bNonStallIntrEvent = ((NotifyIndex & NV01_EVENT_NONSTALL_INTR) ? NV_TRUE : NV_FALSE); + + if (bNonStallIntrEvent) + { + // + // For non-stall interrupt, the event parent type is NV20_SUBDEVICE, so we can locate + // the correct OBJGPU and attach to its per-engine non-stall event list. + // + if ((serverutilGetResourceRef(hEventClient, hNotifier, &pResourceRef) != NV_OK) || + (!dynamicCast(pResourceRef->pResource, Subdevice))) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto free_entry; + } + + pSubDevice = dynamicCast(pResourceRef->pResource, Subdevice); + hDevice = RES_GET_PARENT_HANDLE(pSubDevice); + + if (CliSetSubDeviceContext(hEventClient, RES_GET_HANDLE(pSubDevice), &hDevice, &pGpu) != NV_OK) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto free_entry; + } + + rmStatus = eventGetEngineTypeFromSubNotifyIndex( + DRF_VAL(0005, _NOTIFY_INDEX, _INDEX, NotifyIndex), &engineId); + + if (rmStatus != NV_OK) + goto free_entry; + + if (pSubDevice->hSemMemory != NV01_NULL_OBJECT) + { + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT, + memGetByHandle(RES_GET_CLIENT(pSubDevice), + pSubDevice->hSemMemory, + &pSemMemory), + free_entry); + } + + rmStatus = engineNonStallEventOp(pGpu, engineId, + *ppEventNotification, pSemMemory, NV_TRUE); + + if (rmStatus != NV_OK) + goto free_entry; + + return rmStatus; + } + +free_entry: + if (rmStatus != NV_OK) + { + rmTmpStatus = _removeEventNotification(ppEventNotification, hEventClient, + hEvent, NV_TRUE, Data, &pTargetEvent); + + if (rmTmpStatus == NV_OK) + portMemFree(pTargetEvent); + } + +failed_insert: + NV_ASSERT(rmStatus == NV_OK); + return rmStatus; +} + +static NV_STATUS _insertEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hEvent, + NvU32 NotifyIndex, + NvU32 NotifyType, + NvP64 Data, + NvBool bUserOsEventHandle + +) +{ + PEVENTNOTIFICATION EventNotify; + + // + // Create the event notification object + // + EventNotify = portMemAllocNonPaged(sizeof(EVENTNOTIFICATION)); + if (EventNotify == NULL) + return NV_ERR_NO_MEMORY; + + // + // Fill in the fields + // + if (NotifyIndex & NV01_EVENT_BROADCAST) + { + EventNotify->bBroadcastEvent = NV_TRUE; + } + else + { + EventNotify->bBroadcastEvent = NV_FALSE; + } + + if (NotifyIndex & NV01_EVENT_SUBDEVICE_SPECIFIC) + { + EventNotify->bSubdeviceSpecificEvent = NV_TRUE; + EventNotify->SubdeviceSpecificValue = + DRF_VAL(0005, _NOTIFY_INDEX, _SUBDEVICE, NotifyIndex); + } + else + { + EventNotify->bSubdeviceSpecificEvent = NV_FALSE; + EventNotify->SubdeviceSpecificValue = 0; + } + + if (NotifyIndex & NV01_EVENT_WITHOUT_EVENT_DATA) + { + EventNotify->bEventDataRequired = NV_FALSE; + } + else + { + EventNotify->bEventDataRequired = NV_TRUE; + } + + if (NotifyIndex & NV01_EVENT_CLIENT_RM) + { + EventNotify->bClientRM = NV_TRUE; + } + else + { + EventNotify->bClientRM = NV_FALSE; + } + + EventNotify->bNonStallIntrEvent = + ((NotifyIndex & NV01_EVENT_NONSTALL_INTR) ? NV_TRUE : NV_FALSE); + + // strip the upper bits as they are actually flags + NotifyIndex = DRF_VAL(0005, _NOTIFY_INDEX, _INDEX, NotifyIndex); + + EventNotify->hEventClient = hEventClient; + EventNotify->hEvent = hEvent; + EventNotify->subdeviceInst = 0; + EventNotify->NotifyIndex = NotifyIndex; + EventNotify->NotifyType = NotifyType; + EventNotify->Data = Data; + EventNotify->NotifyTriggerCount = 0; + EventNotify->bUserOsEventHandle = bUserOsEventHandle; + + // + // Now insert the event into the event chain of this object. + // Order doesn't really matter. + // + EventNotify->Next = *ppEventNotification; + *ppEventNotification = EventNotify; + + return (NV_OK); +} + +//--------------------------------------------------------------------------- +// +// Event Notification support. +// +//--------------------------------------------------------------------------- + +NV_STATUS unregisterEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hNotifier, + NvHandle hEvent +) +{ + return unregisterEventNotificationWithData(ppEventNotification, + hEventClient, + hNotifier, + hEvent, + NV_FALSE, + NvP64_NULL); +} + +NV_STATUS unregisterEventNotificationWithData +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hNotifier, + NvHandle hEvent, + NvBool bMatchData, + NvP64 Data +) +{ + NV_STATUS rmStatus = NV_OK; + PEVENTNOTIFICATION pTargetEvent = NULL; + Subdevice *pSubDevice; + RsResourceRef *pResourceRef; + NvHandle hDevice; + NvU32 engineId; + OBJGPU *pGpu; + + rmStatus = _removeEventNotification(ppEventNotification, hEventClient, + hEvent, bMatchData, Data, &pTargetEvent); + + if (rmStatus != NV_OK) + goto error; + + if (pTargetEvent->bNonStallIntrEvent) + { + // + // For non-stall interrupt, the event parent type is NV20_SUBDEVICE, so we can locate + // the correct OBJGPU and attach to its per-engine non-stall event list. + // + if ((serverutilGetResourceRef(hEventClient, hNotifier, &pResourceRef) != NV_OK) || + (!dynamicCast(pResourceRef->pResource, Subdevice))) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto free_entry; + } + + pSubDevice = dynamicCast(pResourceRef->pResource, Subdevice); + hDevice = RES_GET_PARENT_HANDLE(pSubDevice); + + if (CliSetSubDeviceContext(hEventClient, RES_GET_HANDLE(pSubDevice), &hDevice, &pGpu) != NV_OK) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto free_entry; + } + + rmStatus = eventGetEngineTypeFromSubNotifyIndex(pTargetEvent->NotifyIndex, &engineId); + + if (rmStatus != NV_OK) + goto free_entry; + + rmStatus = engineNonStallEventOp(pGpu, engineId, + pTargetEvent, NULL, NV_FALSE); + } + +free_entry: + portMemFree(pTargetEvent); + +error: + NV_ASSERT(rmStatus == NV_OK); + return rmStatus; +} + +static NV_STATUS _removeEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hEvent, + NvBool bMatchData, + NvP64 Data, + PEVENTNOTIFICATION *ppOldEvent +) +{ + PEVENTNOTIFICATION nextEvent, lastEvent; + NvBool found = NV_FALSE; + + // check for null list + nextEvent = NULL; + + if (*ppEventNotification != NULL) + { + // check for head of list + nextEvent = lastEvent = *ppEventNotification; + if ((nextEvent->hEventClient == hEventClient) && + (nextEvent->hEvent == hEvent) && + (!bMatchData || (nextEvent->Data == Data))) + { + *ppEventNotification = nextEvent->Next; + found = NV_TRUE; + } + else + { + // check for internal nodes + nextEvent = nextEvent->Next; + while (nextEvent) + { + if ((nextEvent->hEventClient == hEventClient) && + (nextEvent->hEvent == hEvent) && + (!bMatchData || (nextEvent->Data == Data))) + { + lastEvent->Next = nextEvent->Next; + found = NV_TRUE; + break; + } + lastEvent = nextEvent; + nextEvent = nextEvent->Next; + } + } + } + + // delete the event if it was found + if (found) + { + if (nextEvent->bUserOsEventHandle) + osDereferenceObjectCount(NvP64_VALUE(nextEvent->Data)); + + *ppOldEvent = nextEvent; + } + + return (found) ? NV_OK : NV_ERR_GENERIC; + +} // end of unregisterEventNotificationEventNotify() + +NV_STATUS notifyEvents +( + OBJGPU *pGpu, + PEVENTNOTIFICATION pEventNotification, + NvU32 Notifier, + NvU32 Method, + NvU32 Data, + NV_STATUS Status, + NvU32 Action +) +{ + NV_STATUS rmStatus = NV_OK; + PEVENTNOTIFICATION NotifyEvent; + + NV_PRINTF(LEVEL_INFO, " Method = 0x%x\n", Method); + NV_PRINTF(LEVEL_INFO, " Data = 0x%x\n", Data); + NV_PRINTF(LEVEL_INFO, " Status = 0x%x\n", Status); + NV_PRINTF(LEVEL_INFO, " Action = 0x%x\n", Action); + + // perform the type of action + switch (Action) + { + case NV_OS_WRITE_THEN_AWAKEN: + + // walk this object's event list and find any matches for this specific notify + for (NotifyEvent = pEventNotification; NotifyEvent; NotifyEvent = NotifyEvent->Next) + { + if (NotifyEvent->bSubdeviceSpecificEvent) + { + if (gpumgrGetSubDeviceInstanceFromGpu(pGpu) != NotifyEvent->SubdeviceSpecificValue) + { + continue; + } + } + + if (NotifyEvent->NotifyIndex == Notifier) + { + // Do any OS specified action related to this notification. + if (NotifyEvent->bBroadcastEvent) + { + // + // Only do the OS notify when all sub devices under + // a BC device have seen the event. + // + if (++NotifyEvent->NotifyTriggerCount == NumSubDevices(pGpu)) + { + rmStatus = osNotifyEvent(pGpu, NotifyEvent, Method, Data, Status); + NotifyEvent->NotifyTriggerCount = 0x0; + } + } + else + { + rmStatus = osNotifyEvent(pGpu, NotifyEvent, Method, Data, Status); + } + } + } + break; + + default: + // any other actions are legacy channel-based notifies + rmStatus = NV_ERR_INVALID_EVENT; + break; + } + + return rmStatus; +} + +// +// bindEventNotificationToSubdevice +// +// This routine walks the given EVENTNOTIFICATION list and sets +// the designated subdevice instance value for any that are associated +// with the specific NV01_EVENT handle hEvent. +// +NV_STATUS +bindEventNotificationToSubdevice +( + PEVENTNOTIFICATION pEventNotificationList, + NvHandle hEvent, + NvU32 subdeviceInst +) +{ + PEVENTNOTIFICATION pEventNotify; + NvU32 count = 0; + + if (pEventNotificationList == NULL) + return NV_ERR_INVALID_STATE; + + pEventNotify = pEventNotificationList; + while (pEventNotify) + { + if (pEventNotify->hEvent == hEvent) + { + pEventNotify->subdeviceInst = subdeviceInst; + count++; + } + pEventNotify = pEventNotify->Next; + } + + if (count == 0) + return NV_ERR_INVALID_STATE; + + return NV_OK; +} + +NV_STATUS +inotifyConstruct_IMPL(INotifier *pNotifier, CALL_CONTEXT *pCallContext) +{ + if (dynamicCast(pNotifier, RsResource) == NULL) + return NV_ERR_INVALID_OBJECT; + + return NV_OK; +} + +void inotifyDestruct_IMPL(INotifier* pNotifier) +{ + return; +} + +PEVENTNOTIFICATION +inotifyGetNotificationList_IMPL +( + INotifier *pNotifier +) +{ + PEVENTNOTIFICATION *ppEventNotifications = inotifyGetNotificationListPtr(pNotifier); + if (ppEventNotifications != NULL) + return *ppEventNotifications; + + return NULL; +} + +NV_STATUS +notifyConstruct_IMPL(Notifier *pNotifier, CALL_CONTEXT *pCallContext) +{ + return NV_OK; +} + +void notifyDestruct_IMPL(Notifier* pNotifier) +{ + NotifShare *pNotifierShare = inotifyGetNotificationShare(staticCast(pNotifier, INotifier)); + if (pNotifierShare != NULL) + { + pNotifierShare->pNotifier = NULL; + serverFreeShare(&g_resServ, staticCast(pNotifierShare, RsShared)); + } +} + +PEVENTNOTIFICATION +*notifyGetNotificationListPtr_IMPL +( + Notifier *pNotifier +) +{ + NotifShare *pNotifierShare = pNotifier->pNotifierShare; + if (pNotifierShare == NULL) + return NULL; + + return &pNotifierShare->pEventList; +} + +NotifShare +*notifyGetNotificationShare_IMPL +( + Notifier *pNotifier +) +{ + return pNotifier->pNotifierShare; +} + +void +notifySetNotificationShare_IMPL +( + Notifier *pNotifier, + NotifShare *pNotifierShare +) +{ + pNotifier->pNotifierShare = pNotifierShare; +} + +NV_STATUS +shrnotifConstruct_IMPL +( + NotifShare *pNotifShare +) +{ + return NV_OK; +} + +void +shrnotifDestruct_IMPL +( + NotifShare *pNotifShare +) +{ +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping.c new file mode 100644 index 0000000..3d1063d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping.c @@ -0,0 +1,555 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/device/device.h" +#include "class/cl0000.h" // NV01_NULL_OBJECT + +#include "rmapi/rs_utils.h" + +#include "entry_points.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/mem_mgr/mem_mgr.h" + +NV_STATUS +serverInterMap_Prologue +( + RsServer *pServer, + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RS_INTER_MAP_PARAMS *pParams, + NvU32 *pReleaseFlags +) +{ + OBJGPU *pGpu; + Device *pDevice; + Subdevice *pSubdevice; + NV_STATUS rmStatus = NV_OK; + NvU64 offset = pParams->offset; + NvU64 length = pParams->length; + + MEMORY_DESCRIPTOR *pSrcMemDesc = NULL; + NvHandle hBroadcastDevice; + NvBool bSubdeviceHandleProvided; + + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RsResourceRef *pDeviceRef = pCallContext->pContextRef; + RS_INTER_MAP_PRIVATE *pPrivate = pParams->pPrivate; + + NV_ASSERT_OR_RETURN(pPrivate != NULL, NV_ERR_INVALID_ARGUMENT); + + // Get pGpu, assuming user passed in either a device or subdevice handle. + pDevice = dynamicCast(pDeviceRef->pResource, Device); + if (pDevice == NULL) + { + pSubdevice = dynamicCast(pDeviceRef->pResource, Subdevice); + if (pSubdevice == NULL) + return NV_ERR_INVALID_OBJECT; + + pGpu = GPU_RES_GET_GPU(pSubdevice); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + hBroadcastDevice = RES_GET_HANDLE(pSubdevice->pDevice); + bSubdeviceHandleProvided = NV_TRUE; + pPrivate->gpuMask = NVBIT(gpuGetInstance(pGpu)); + } + else + { + pGpu = GPU_RES_GET_GPU(pDevice); + GPU_RES_SET_THREAD_BC_STATE(pDevice); + + hBroadcastDevice = pParams->hDevice; + bSubdeviceHandleProvided = NV_FALSE; + pPrivate->gpuMask = gpumgrGetGpuMask(pGpu); + } + + rmStatus = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, pReleaseFlags); + if (rmStatus != NV_OK) + return rmStatus; + + pPrivate->pGpu = pGpu; + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + + // Use virtual GetMemInterMapParams to get information needed for mapping from pMappableRef->pResource + RMRES_MEM_INTER_MAP_PARAMS memInterMapParams; + portMemSet(&memInterMapParams, 0, sizeof(memInterMapParams)); + + memInterMapParams.pGpu = pGpu; + memInterMapParams.pMemoryRef = pMappableRef; + memInterMapParams.bSubdeviceHandleProvided = bSubdeviceHandleProvided; + + rmStatus = rmresGetMemInterMapParams(dynamicCast(pMappableRef->pResource, RmResource), &memInterMapParams); + if (rmStatus != NV_OK) + return rmStatus; + + pSrcMemDesc = memInterMapParams.pSrcMemDesc; + NV_ASSERT_OR_RETURN(pSrcMemDesc != NULL, NV_ERR_INVALID_OBJECT_HANDLE); + + pPrivate->pSrcGpu = memInterMapParams.pSrcGpu; + pPrivate->hMemoryDevice = memInterMapParams.hMemoryDevice; + pPrivate->bDmaMapNeeded = memInterMapParams.bDmaMapNeeded; + pPrivate->bFlaMapping = memInterMapParams.bFlaMapping; + + // Check length for overflow and against the physical memory size. + if (((offset + length) < offset) || + ((offset + length) > pSrcMemDesc->Size)) + { + NV_PRINTF(LEVEL_ERROR, + "Mapping offset 0x%llX or length 0x%llX out of bounds!\n", + offset, length); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_LIMIT; + } + + if (memdescGetFlag(memdescGetMemDescFromGpu(pSrcMemDesc, pGpu), MEMDESC_FLAGS_DEVICE_READ_ONLY) && + !FLD_TEST_DRF(OS46, _FLAGS, _ACCESS, _READ_ONLY, pParams->flags)) + { + NV_PRINTF(LEVEL_ERROR, "Attempting to map READ_ONLY surface as READ_WRITE / WRITE_ONLY!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + pPrivate->hBroadcastDevice = hBroadcastDevice; + pPrivate->pSrcMemDesc = pSrcMemDesc; + pPrivate->bSubdeviceHandleProvided = bSubdeviceHandleProvided; + + return NV_OK; +} + +void +serverInterMap_Epilogue +( + RsServer *pServer, + RS_INTER_MAP_PARAMS *pParams, + NvU32 *pReleaseFlags +) +{ + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, pReleaseFlags); +} + +NV_STATUS +serverInterUnmap_Prologue +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + OBJGPU *pGpu = NULL; + Device *pDevice = NULL; + Subdevice *pSubdevice = NULL; + + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RsResourceRef *pDeviceRef = pCallContext->pContextRef; + + RS_INTER_UNMAP_PRIVATE *pPrivate = pParams->pPrivate; + + // Alloc pPrivate if not set, Unmap does not require any input into Prologue + if (pPrivate == NULL) + { + pPrivate = portMemAllocNonPaged(sizeof(*pPrivate)); + if (pPrivate == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(pPrivate, 0, sizeof(*pPrivate)); + pParams->pPrivate = pPrivate; + pPrivate->bAllocated = NV_TRUE; + } + + // Set subdevice or device context. + pDevice = dynamicCast(pDeviceRef->pResource, Device); + if (pDevice == NULL) + { + pSubdevice = dynamicCast(pDeviceRef->pResource, Subdevice); + if (pSubdevice == NULL) + return NV_ERR_INVALID_OBJECT; + + pGpu = GPU_RES_GET_GPU(pSubdevice); + pPrivate->bcState = gpumgrGetBcEnabledStatus(pGpu); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + pPrivate->hBroadcastDevice = RES_GET_HANDLE(pSubdevice->pDevice); + pPrivate->bSubdeviceHandleProvided = NV_TRUE; + pPrivate->gpuMask = NVBIT(gpuGetInstance(pGpu)); + } + else + { + pGpu = GPU_RES_GET_GPU(pDevice); + pPrivate->bcState = gpumgrGetBcEnabledStatus(pGpu); + GPU_RES_SET_THREAD_BC_STATE(pDevice); + pPrivate->hBroadcastDevice = RES_GET_HANDLE(pDevice); + pPrivate->bSubdeviceHandleProvided = NV_FALSE; + pPrivate->gpuMask = gpumgrGetGpuMask(pGpu); + } + + pPrivate->pGpu = pGpu; + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_FALSE, NV_FALSE); + + return NV_OK; +} + +void +serverInterUnmap_Epilogue +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + RS_INTER_UNMAP_PRIVATE *pPrivate = pParams->pPrivate; + OBJGPU *pGpu; + + if (pPrivate == NULL) + return; + + pGpu = pPrivate->pGpu; + + if (pGpu != NULL) + { + gpumgrSetBcEnabledStatus(pGpu, pPrivate->bcState); + } + + if (pPrivate->bAllocated) + { + portMemFree(pPrivate); + pParams->pPrivate = NULL; + } +} + +static NV_STATUS +_rmapiRmUnmapMemoryDma +( + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset, + RS_LOCK_INFO *pLockInfo, + API_SECURITY_INFO *pSecInfo +) +{ + RsClient *pRsClient = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + Memory *pMemory = NULL; + + RS_INTER_UNMAP_PARAMS params; + RS_INTER_UNMAP_PRIVATE private; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + + // Translate hMemory to pMemDesc + if (memGetByHandle(pRsClient, hMemory, &pMemory) == NV_OK) + { + pMemDesc = pMemory->pMemDesc; + } + + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = hClient; + params.hMapper = hMemCtx; + params.hDevice = hDevice; + params.hMappable = hMemory; + params.flags = flags; + params.dmaOffset = dmaOffset; + params.pMemDesc = pMemDesc; + params.pLockInfo = pLockInfo; + params.pSecInfo = pSecInfo; + + portMemSet(&private, 0, sizeof(private)); + params.pPrivate = &private; + + return serverInterUnmap(&g_resServ, ¶ms); +} + +NV_STATUS +rmapiMap +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->MapWithSecInfo(pRmApi, hClient, hDevice, hMemCtx, hMemory, offset, + length, flags, pDmaOffset, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiMapWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RS_INTER_MAP_PARAMS params; + RS_INTER_MAP_PRIVATE private; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04Map: client:0x%x device:0x%x context:0x%x memory:0x%x flags:0x%x\n", + hClient, hDevice, hMemCtx, hMemory, flags); + NV_PRINTF(LEVEL_INFO, + "Nv04Map: offset:0x%llx length:0x%llx dmaOffset:0x%08llx\n", + offset, length, *pDmaOffset); + + NV_PRINTF(LEVEL_INFO, "MMU_PROFILER Nv04Map 0x%x\n", flags); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + lockInfo.flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK | + RM_LOCK_FLAGS_NO_GPUS_LOCK; + + LOCK_METER_DATA(MAPMEM_DMA, flags, 0, 0); + + + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = hClient; + params.hMapper = hMemCtx; + params.hDevice = hDevice; + params.hMappable = hMemory; + params.offset = offset; + params.length = length; + params.flags = flags; + params.dmaOffset = *pDmaOffset; + params.pLockInfo = &lockInfo; + params.pSecInfo = pSecInfo; + + portMemSet(&private, 0, sizeof(private)); + params.pPrivate = &private; + + // map DMA memory + status = serverInterMap(&g_resServ, ¶ms); + + *pDmaOffset = params.dmaOffset; + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04Map: map complete\n"); + NV_PRINTF(LEVEL_INFO, "Nv04Map: dmaOffset: 0x%08llx\n", *pDmaOffset); + } + else + { + NV_PRINTF(LEVEL_ERROR, "Nv04Map: map failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiMapWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiMapWithSecInfo(pRmApi, hClient, hDevice, hMemCtx, hMemory, offset, + length, flags, pDmaOffset, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +rmapiUnmap +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->UnmapWithSecInfo(pRmApi, hClient, hDevice, hMemCtx, hMemory, + flags, dmaOffset, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiUnmapWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04Unmap: client:0x%x device:0x%x context:0x%x memory:0x%x\n", + hClient, hDevice, hMemCtx, hMemory); + NV_PRINTF(LEVEL_INFO, "Nv04Unmap: flags:0x%x dmaOffset:0x%08llx\n", + flags, dmaOffset); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + lockInfo.flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK | + RM_LOCK_FLAGS_NO_GPUS_LOCK; + + LOCK_METER_DATA(UNMAPMEM_DMA, flags, 0, 0); + + // Unmap DMA memory + status = _rmapiRmUnmapMemoryDma(hClient, hDevice, hMemCtx, hMemory, flags, + dmaOffset, &lockInfo, pSecInfo); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04Unmap: Unmap complete\n"); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Nv04Unmap: ummap failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiUnmapWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiUnmapWithSecInfo(pRmApi, hClient, hDevice, hMemCtx, hMemory, flags, dmaOffset, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +serverInterMapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_MAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverInterUnmapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_UNMAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverUpdateLockFlagsForInterAutoUnmap +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + pParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK | + RM_LOCK_FLAGS_GPU_GROUP_LOCK; + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping_cpu.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping_cpu.c new file mode 100644 index 0000000..c7399c0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping_cpu.c @@ -0,0 +1,987 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/generic_engine.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "class/cl0000.h" // NV01_NULL_OBJECT + +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" + +#include "rmapi/rs_utils.h" +#include "rmapi/mapping_list.h" +#include "entry_points.h" + +typedef struct RS_CPU_MAP_PARAMS RmMapParams; +typedef struct RS_CPU_UNMAP_PARAMS RmUnmapParams; + +NV_STATUS +rmapiMapGpuCommon +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping, + OBJGPU *pGpu, + NvU32 regionOffset, + NvU32 regionSize +) +{ + NV_STATUS rmStatus; + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + NvU64 offset; + + // Validate the offset and limit passed in. + if (pCpuMapping->offset >= regionSize) + return NV_ERR_INVALID_BASE; + if (pCpuMapping->length == 0) + return NV_ERR_INVALID_LIMIT; + if ((pCpuMapping->offset + pCpuMapping->length > regionSize) || + !portSafeAddU64(pCpuMapping->offset, pCpuMapping->length, &offset)) + return NV_ERR_INVALID_LIMIT; + + if (!portSafeAddU64((NvU64)regionOffset, pCpuMapping->offset, &offset)) + return NV_ERR_INVALID_OFFSET; + + // Create a mapping of BAR0 + rmStatus = osMapGPU(pGpu, + rmclientGetCachedPrivilege(pClient), + offset, + pCpuMapping->length, + pCpuMapping->pPrivate->protect, + &pCpuMapping->pLinearAddress, + &pCpuMapping->pPrivate->pPriv); + return rmStatus; +} + + + +NV_STATUS +rmapiGetEffectiveAddrSpace +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + NV_ADDRESS_SPACE addrSpace; + NvBool bDirectSysMappingAllowed = NV_TRUE; + + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1)) + { + addrSpace = ADDR_FBMEM; + } + else if ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) && + (bDirectSysMappingAllowed || FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, mapFlags) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && !IS_FMODEL(pGpu) && !IS_RTLSIM(pGpu)))) + { + addrSpace = ADDR_SYSMEM; + } + else if ((memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) || + ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) && !bDirectSysMappingAllowed)) + { + addrSpace = ADDR_FBMEM; + } + else + { + addrSpace = memdescGetAddressSpace(pMemDesc); + } + + if (pAddrSpace) + *pAddrSpace = addrSpace; + + return NV_OK; +} + +// Asserts to check caching type matches across sdk and nv_memory_types +ct_assert(NVOS33_FLAGS_CACHING_TYPE_CACHED == NV_MEMORY_CACHED); +ct_assert(NVOS33_FLAGS_CACHING_TYPE_UNCACHED == NV_MEMORY_UNCACHED); +ct_assert(NVOS33_FLAGS_CACHING_TYPE_WRITECOMBINED == NV_MEMORY_WRITECOMBINED); +ct_assert(NVOS33_FLAGS_CACHING_TYPE_WRITEBACK == NV_MEMORY_WRITEBACK); +ct_assert(NVOS33_FLAGS_CACHING_TYPE_DEFAULT == NV_MEMORY_DEFAULT); +ct_assert(NVOS33_FLAGS_CACHING_TYPE_UNCACHED_WEAK == NV_MEMORY_UNCACHED_WEAK); + +// +// Map memory entry points. +// +NV_STATUS +memMap_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pMapParams, + RsCpuMapping *pCpuMapping +) +{ + OBJGPU *pGpu = NULL; + RsClient *pRsClient; + RmClient *pRmClient; + RsResourceRef *pContextRef; + RsResourceRef *pMemoryRef; + Memory *pMemoryInfo; // TODO: rename this field. pMemoryInfo is the legacy name. + // Name should be clear on how pMemoryInfo different from pMemory + MEMORY_DESCRIPTOR *pMemDesc; + NvP64 priv = NvP64_NULL; + NV_STATUS rmStatus = NV_OK; + NV_ADDRESS_SPACE effectiveAddrSpace; + NvBool bBroadcast; + NvU64 mapLimit; + NvBool bIsSysmem = NV_FALSE; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(pMapParams->pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + pContextRef = pMapParams->pLockInfo->pContextRef; + if (pContextRef != NULL) + { + NV_ASSERT_OK_OR_RETURN(gpuGetByRef(pContextRef, &bBroadcast, &pGpu)); + gpuSetThreadBcState(pGpu, bBroadcast); + + } + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, pMapParams->hClient, &pRsClient)); + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, pMapParams->hMemory, &pMemoryRef)); + + pMemoryInfo = dynamicCast(pMemoryRef->pResource, Memory); + NV_ASSERT_OR_RETURN(pMemoryInfo != NULL, NV_ERR_NOT_SUPPORTED); + pMemDesc = pMemoryInfo->pMemDesc; + + if (!pMapParams->bKernel && + FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, pMemoryInfo->Attr2) && + (pMapParams->protect != NV_PROTECT_READABLE)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Validate the offset and limit passed in. + if (pMapParams->offset >= pMemoryInfo->Length) + { + return NV_ERR_INVALID_BASE; + } + if (pMapParams->length == 0) + { + return NV_ERR_INVALID_LIMIT; + } + + // + // See bug #140807 and #150889 - we need to pad memory mappings to past their + // actual allocation size (to PAGE_SIZE+1) because of a buggy ms function so + // skip the allocation size sanity check so the map operation still succeeds. + // + if ((DRF_VAL(OS33, _FLAGS, _SKIP_SIZE_CHECK, pMapParams->flags) == NVOS33_FLAGS_SKIP_SIZE_CHECK_DISABLE) && + (!portSafeAddU64(pMapParams->offset, pMapParams->length, &mapLimit) || + (mapLimit > pMemoryInfo->Length))) + { + return NV_ERR_INVALID_LIMIT; + } + + if (pGpu != NULL) + { + NV_ASSERT_OK_OR_RETURN(rmapiGetEffectiveAddrSpace(pGpu, memdescGetMemDescFromGpu(pMemDesc, pGpu), pMapParams->flags, &effectiveAddrSpace)); + } + else + { + effectiveAddrSpace = ADDR_SYSMEM; + } + + bIsSysmem = (effectiveAddrSpace == ADDR_SYSMEM); + + if (bIsSysmem) + { + // A client can specify not to map memory by default when + // calling into RmAllocMemory. In those cases, we don't have + // a mapping yet, so go ahead and map it for the client now. + rmStatus = memdescMap(pMemDesc, + pMapParams->offset, + pMapParams->length, + pMapParams->bKernel, + pMapParams->protect, + pMapParams->ppCpuVirtAddr, + &priv); + + // Associate this mapping with the client + if (rmStatus == NV_OK && *(pMapParams->ppCpuVirtAddr)) + { + pMapParams->flags = FLD_SET_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pMapParams->flags); + rmStatus = CliUpdateMemoryMappingInfo(pCpuMapping, + pMapParams->bKernel, + *(pMapParams->ppCpuVirtAddr), + priv, + pMapParams->length, + pMapParams->flags); + pCpuMapping->pPrivate->pGpu = pGpu; + } + } + else if (effectiveAddrSpace == ADDR_VIRTUAL) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + } + else if (effectiveAddrSpace == ADDR_REGMEM) + { + RS_PRIV_LEVEL privLevel; + + pRmClient = dynamicCast(pRsClient, RmClient); + if (pRmClient == NULL) + return NV_ERR_OPERATING_SYSTEM; + + privLevel = rmclientGetCachedPrivilege(pRmClient); + if (!rmclientIsAdmin(pRmClient, privLevel) && !memdescGetFlag(pMemDesc, MEMDESC_FLAGS_SKIP_REGMEM_PRIV_CHECK)) + return NV_ERR_PROTECTION_FAULT; + + if (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, pMapParams->flags) == NVOS33_FLAGS_MEM_SPACE_USER) + { + privLevel = RS_PRIV_LEVEL_USER; + } + + // Create a mapping of BAR0 + rmStatus = osMapGPU(pGpu, + privLevel, + pMapParams->offset + pMemDesc-> _pteArray[0], + pMapParams->length, + pMapParams->protect, + pMapParams->ppCpuVirtAddr, + &priv); + if (rmStatus != NV_OK) + return rmStatus; + + // Save off the mapping + rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping, + pMapParams->bKernel, + priv, + *(pMapParams->ppCpuVirtAddr), + pMapParams->length, + -1, // gpu virtual addr + -1, // gpu map length + pMapParams->flags); + pCpuMapping->pPrivate->pGpu = pGpu; + + if (rmStatus != NV_OK) + { + osUnmapGPU(pGpu->pOsGpuInfo, + privLevel, + *(pMapParams->ppCpuVirtAddr), + pMapParams->length, + priv); + return rmStatus; + } + } + else + { + return NV_ERR_INVALID_CLASS; + } + + if (rmStatus == NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "%s created. CPU Virtual Address: " NvP64_fmt "\n", + FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pMapParams->flags) ? "Direct mapping" : "Mapping", + *(pMapParams->ppCpuVirtAddr)); + } + + return rmStatus; +} + +NV_STATUS +memUnmap_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping +) +{ + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + OBJGPU *pGpu = pCpuMapping->pPrivate->pGpu; + MEMORY_DESCRIPTOR *pMemDesc = pMemory->pMemDesc; + + if (FLD_TEST_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, pCpuMapping->flags)) + { + // Nothing more to do + } + // System Memory case + else if ((pGpu == NULL) || ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) && + FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pCpuMapping->flags))) + { + if (FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pCpuMapping->flags)) + { + memdescUnmap(pMemDesc, + pCpuMapping->pPrivate->bKernel, + pCpuMapping->processId, + pCpuMapping->pLinearAddress, + pCpuMapping->pPrivate->pPriv); + } + } + else if (memdescGetAddressSpace(pMemDesc) == ADDR_VIRTUAL) + { + // If the memory is tiled, then it's being mapped through BAR1 + if( DRF_VAL(OS32, _ATTR, _TILED, pMemory->Attr) ) + { + // BAR1 mapping. Unmap it. + if (pCpuMapping->pPrivate->bKernel) + { + osUnmapPciMemoryKernel64(pGpu, pCpuMapping->pLinearAddress); + } + else + { + osUnmapPciMemoryUser(pGpu->pOsGpuInfo, + pCpuMapping->pLinearAddress, + pCpuMapping->length, + pCpuMapping->pPrivate->pPriv); + } + } + else + { + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + } + } + else if (memdescGetAddressSpace(pMemDesc) == ADDR_REGMEM) + { + osUnmapGPU(pGpu->pOsGpuInfo, + rmclientGetCachedPrivilege(pClient), + pCpuMapping->pLinearAddress, + pCpuMapping->length, + pCpuMapping->pPrivate->pPriv); + } + return NV_OK; +} + +NV_STATUS +rmapiValidateKernelMapping +( + RS_PRIV_LEVEL privLevel, + NvU32 flags, + NvBool *pbKernel +) +{ + NvBool bKernel; + NV_STATUS status = NV_OK; + if (privLevel < RS_PRIV_LEVEL_KERNEL) + { + // only kernel clients should be specifying the user mapping flags + if (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, flags) == NVOS33_FLAGS_MEM_SPACE_USER) + status = NV_ERR_INVALID_FLAGS; + bKernel = NV_FALSE; + } + else + { + // + // Kernel clients can only use the persistent flag if they are + // doing a user mapping. + // + bKernel = (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, flags) == NVOS33_FLAGS_MEM_SPACE_CLIENT); + } + + // OS descriptor will already be mapped + if (FLD_TEST_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, flags)) + status = NV_ERR_INVALID_FLAGS; + + if (pbKernel != NULL) + *pbKernel = bKernel; + + return status; +} + +NV_STATUS +serverMap_Prologue +( + RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams +) +{ + NV_STATUS rmStatus; + RsClient *pRsClient; + RmClient *pRmClient; + RsResourceRef *pMemoryRef; + NvHandle hClient = pMapParams->hClient; + NvHandle hParent = hClient; + NvHandle hSubDevice = NV01_NULL_OBJECT; + NvBool bClientAlloc = (hClient == pMapParams->hDevice); + NvU32 flags = pMapParams->flags; + RS_PRIV_LEVEL privLevel; + + // Persistent sysmem mapping support is no longer supported + if (DRF_VAL(OS33, _FLAGS, _PERSISTENT, flags) == NVOS33_FLAGS_PERSISTENT_ENABLE) + return NV_ERR_INVALID_FLAGS; + + // Populate Resource Server information + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + + // Validate hClient + pRmClient = dynamicCast(pRsClient, RmClient); + if (pRmClient == NULL) + return NV_ERR_OPERATING_SYSTEM; + privLevel = rmclientGetCachedPrivilege(pRmClient); + + // RS-TODO: Assert if this fails after all objects are converted + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, pMapParams->hMemory, &pMemoryRef)); + + if (pMemoryRef->pParentRef != NULL) + hParent = pMemoryRef->pParentRef->hResource; + + // check if we have a user or kernel RM client + rmStatus = rmapiValidateKernelMapping(privLevel, flags, &pMapParams->bKernel); + if (rmStatus != NV_OK) + return rmStatus; + + // + // First check to see if it is a standard device or the BC region of + // a MC adapter. + // + pMapParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + if (!bClientAlloc) + { + NV_ASSERT_OR_RETURN(hParent != hClient, NV_ERR_INVALID_OBJECT_PARENT); + + RsResourceRef *pContextRef; + rmStatus = clientGetResourceRef(pRsClient, pMapParams->hDevice, &pContextRef); + if (rmStatus != NV_OK) + return rmStatus; + + if (pContextRef->internalClassId == classId(Device)) + { + } + else if (pContextRef->internalClassId == classId(Subdevice)) + { + hSubDevice = pMapParams->hDevice; + pMapParams->hDevice = pContextRef->pParentRef->hResource; + } + else + { + return NV_ERR_INVALID_OBJECT_PARENT; + } + + pMapParams->pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + pMapParams->pLockInfo->pContextRef = pContextRef; + } + else + { + NV_ASSERT_OR_RETURN(hParent == hClient, NV_ERR_INVALID_OBJECT_PARENT); + } + + pMapParams->hContext = (hSubDevice != NV01_NULL_OBJECT) + ? hSubDevice + : pMapParams->hDevice; + + + // convert from OS33 flags to RM's memory protection flags + switch (DRF_VAL(OS33, _FLAGS, _ACCESS, flags)) + { + case NVOS33_FLAGS_ACCESS_READ_WRITE: + pMapParams->protect = NV_PROTECT_READ_WRITE; + break; + case NVOS33_FLAGS_ACCESS_READ_ONLY: + pMapParams->protect = NV_PROTECT_READABLE; + break; + case NVOS33_FLAGS_ACCESS_WRITE_ONLY: + pMapParams->protect = NV_PROTECT_WRITEABLE; + break; + default: + return NV_ERR_INVALID_FLAGS; + } + + return NV_OK; +} + +NV_STATUS +serverUnmap_Prologue +( + RsServer *pServer, + RS_CPU_UNMAP_PARAMS *pUnmapParams +) +{ + OBJGPU *pGpu = NULL; + NV_STATUS rmStatus; + RsClient *pRsClient; + RmClient *pRmClient; + RsResourceRef *pMemoryRef; + NvHandle hClient = pUnmapParams->hClient; + NvHandle hParent = hClient; + NvHandle hMemory = pUnmapParams->hMemory; + NvBool bClientAlloc = (pUnmapParams->hDevice == pUnmapParams->hClient); + NvBool bKernel; + NvBool bBroadcast; + NvU32 ProcessId = pUnmapParams->processId; + RS_PRIV_LEVEL privLevel; + void *pProcessHandle = NULL; + + // Populate Resource Server information + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + + // check if we have a user or kernel RM client + pRmClient = dynamicCast(pRsClient, RmClient); + if (pRmClient == NULL) + return NV_ERR_OPERATING_SYSTEM; + privLevel = rmclientGetCachedPrivilege(pRmClient); + + // RS-TODO: Assert if this fails after all objects are converted + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, hMemory, &pMemoryRef)); + + if (pMemoryRef->pParentRef != NULL) + hParent = pMemoryRef->pParentRef->hResource; + + // + // First check to see if it is a standard device or the BC region of + // a MC adapter. + // + pUnmapParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + if (!bClientAlloc) + { + NV_ASSERT_OR_RETURN(hParent != hClient, NV_ERR_INVALID_OBJECT_PARENT); + + RsResourceRef *pContextRef; + rmStatus = clientGetResourceRef(pRsClient, pUnmapParams->hDevice, &pContextRef); + if (rmStatus != NV_OK) + return rmStatus; + + if (pContextRef->internalClassId == classId(Subdevice)) + { + pUnmapParams->hDevice = pContextRef->pParentRef->hResource; + } + else if (pContextRef->internalClassId != classId(Device)) + { + return NV_ERR_INVALID_OBJECT_PARENT; + } + + pUnmapParams->pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + pUnmapParams->pLockInfo->pContextRef = pContextRef; + NV_ASSERT_OK_OR_RETURN(gpuGetByRef(pUnmapParams->pLockInfo->pContextRef, &bBroadcast, &pGpu)); + gpuSetThreadBcState(pGpu, bBroadcast); + } + else + { + NV_ASSERT_OR_RETURN(hParent == hClient, NV_ERR_INVALID_OBJECT_PARENT); + } + + // Decide what sort of mapping it is, user or kernel + if (privLevel < RS_PRIV_LEVEL_KERNEL) + { + bKernel = NV_FALSE; + } + else + { + bKernel = (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, pUnmapParams->flags) == NVOS33_FLAGS_MEM_SPACE_CLIENT); + } + + // + // If it's a user mapping, and we're not currently in the same process that + // it's mapped into, then attempt to attach to the other process first. + // + if (!bKernel && (ProcessId != osGetCurrentProcess())) + { + rmStatus = osAttachToProcess(&pProcessHandle, ProcessId); + if (rmStatus != NV_OK) + return rmStatus; + + pUnmapParams->pProcessHandle = pProcessHandle; + } + + pUnmapParams->fnFilter = bKernel + ? serverutilMappingFilterKernel + : serverutilMappingFilterCurrentUserProc; + + return NV_OK; +} + +void +serverUnmap_Epilogue +( + RsServer *pServer, + RS_CPU_UNMAP_PARAMS *pUnmapParams +) +{ + // do we need to detach? + if (pUnmapParams->pProcessHandle != NULL) + { + osDetachFromProcess(pUnmapParams->pProcessHandle); + pUnmapParams->pProcessHandle = NULL; + } +} + +NV_STATUS +rmapiMapToCpu +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + void **ppCpuVirtAddr, + NvU32 flags +) +{ + NvP64 pCpuVirtAddrNvP64 = NvP64_NULL; + NV_STATUS status; + + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + status = pRmApi->MapToCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, offset, length, + &pCpuVirtAddrNvP64, flags, &pRmApi->defaultSecInfo); + + if (ppCpuVirtAddr) + *ppCpuVirtAddr = NvP64_VALUE(pCpuVirtAddrNvP64); + + return status; +} + +/** + * Call into Resource Server to register and execute a CPU mapping operation. + * + * Resource Server will: + * 1. Callback into RM (serverMap_Prologue) to set up mapping parameters, mapping context object, + * and locking requirements + * 2. Take locks (if required) + * 3. Allocate and register a RsCpuMapping book-keeping entry on the target object's RsResourceRef + * 4. Call the target object's mapping virtual function (xxxMap_IMPL, defined in RM) + * 5. Setup back-references to the mapping context object (if required.) This mapping will automatically + * be unmapped if either the target object or mapping context object are freed. + * 6. Release any locks taken + */ +NV_STATUS +rmapiMapToCpuWithSecInfoV2 +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 *flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RmMapParams rmMapParams; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04MapMemory: client:0x%x device:0x%x memory:0x%x\n", hClient, + hDevice, hMemory); + NV_PRINTF(LEVEL_INFO, + "Nv04MapMemory: offset: %llx length: %llx flags:0x%x\n", + offset, length, *flags); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + NV_PRINTF(LEVEL_INFO, "MMU_PROFILER Nv04MapMemory 0x%x\n", *flags); + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + + LOCK_METER_DATA(MAPMEM, flags, 0, 0); + + // clear params for good measure + portMemSet(&rmMapParams, 0, sizeof (rmMapParams)); + + // load user args + rmMapParams.hClient = hClient; + rmMapParams.hDevice = hDevice; + rmMapParams.hMemory = hMemory; + rmMapParams.offset = offset; + rmMapParams.length = length; + rmMapParams.ppCpuVirtAddr = ppCpuVirtAddr; + rmMapParams.flags = *flags; + rmMapParams.pLockInfo = &lockInfo; + rmMapParams.pSecInfo = pSecInfo; + + status = serverMap(&g_resServ, rmMapParams.hClient, rmMapParams.hMemory, &rmMapParams); + + rmapiEpilogue(pRmApi, &rmApiContext); + + *flags = rmMapParams.flags; + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04MapMemory: complete\n"); + NV_PRINTF(LEVEL_INFO, + "Nv04MapMemory: *ppCpuVirtAddr:" NvP64_fmt "\n", + *ppCpuVirtAddr); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv04MapMemory: map failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiMapToCpuWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + return rmapiMapToCpuWithSecInfoV2(pRmApi, hClient, + hDevice, hMemory, offset, length, ppCpuVirtAddr, + &flags, pSecInfo); +} + +NV_STATUS +rmapiMapToCpuWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiMapToCpuWithSecInfoV2(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, &flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} +NV_STATUS +rmapiMapToCpuWithSecInfoTlsV2 +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 *flags, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiMapToCpuWithSecInfoV2(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +rmapiUnmapFromCpu +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + void *pLinearAddress, + NvU32 flags, + NvU32 ProcessId +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->UnmapFromCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, NV_PTR_TO_NvP64(pLinearAddress), + flags, ProcessId, &pRmApi->defaultSecInfo); +} + +/** + * Call into Resource Server to execute a CPU unmapping operation. + * + * Resource Server will: + * 1. Callback into RM (serverUnmap_Prologue) to set up unmapping parameters, locking requirements, + * and attempt to attach to the mapping's user process (for user mappings only) + * 2. Take locks (if required) + * 3. Lookup the mapping + * 4. Call the target object's unmapping virtual function (xxxUnmap_IMPL, defined in RM) + * 5. Unregister the mapping from its back-references, and free the mapping + * 6. Callback into RM (serverUnmap_Epilogue) to detach from the mapping's user process (if required) + * 7. Release any locks taken + */ +NV_STATUS +rmapiUnmapFromCpuWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 pLinearAddress, + NvU32 flags, + NvU32 ProcessId, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RmUnmapParams rmUnmapParams; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04UnmapMemory: client:0x%x device:0x%x memory:0x%x pLinearAddr:" NvP64_fmt " flags:0x%x\n", + hClient, hDevice, hMemory, pLinearAddress, flags); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + + LOCK_METER_DATA(UNMAPMEM, flags, 0, 0); + + portMemSet(&rmUnmapParams, 0, sizeof (rmUnmapParams)); + rmUnmapParams.hClient = hClient; + rmUnmapParams.hDevice = hDevice; + rmUnmapParams.hMemory = hMemory; + rmUnmapParams.pLinearAddress = pLinearAddress; + rmUnmapParams.flags = flags; + rmUnmapParams.processId = ProcessId; + rmUnmapParams.pLockInfo = &lockInfo; + rmUnmapParams.pSecInfo = pSecInfo; + + status = serverUnmap(&g_resServ, hClient, hMemory, &rmUnmapParams); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04UnmapMemory: unmap complete\n"); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv04UnmapMemory: unmap failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiUnmapFromCpuWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 pLinearAddress, + NvU32 flags, + NvU32 ProcessId, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiUnmapFromCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, pLinearAddress, + flags, ProcessId, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +serverMapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_MAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverUnmapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_UNMAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +refAllocCpuMappingPrivate +( + RS_CPU_MAP_PARAMS *pMapParams, + RsCpuMapping *pCpuMapping +) +{ + pCpuMapping->pPrivate = portMemAllocNonPaged(sizeof(RS_CPU_MAPPING_PRIVATE)); + if (pCpuMapping->pPrivate == NULL) + return NV_ERR_NO_MEMORY; + + pCpuMapping->pPrivate->protect = pMapParams->protect; + pCpuMapping->pPrivate->bKernel = pMapParams->bKernel; + + return NV_OK; +} + +void +refFreeCpuMappingPrivate +( + RsCpuMapping *pCpuMapping +) +{ + portMemFree(pCpuMapping->pPrivate); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/param_copy.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/param_copy.c new file mode 100644 index 0000000..18458cd --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/param_copy.c @@ -0,0 +1,341 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "core/system.h" +#include "rmapi/rmapi.h" +#include "rmapi/param_copy.h" +#include "rmapi/alloc_size.h" +#include "rmapi/control.h" +#include "os/os.h" + +NV_STATUS rmapiParamsAcquire +( + RMAPI_PARAM_COPY *pParamCopy, + NvBool bUserModeArgs +) +{ + NvBool bUseParamsDirectly; + void *pKernelParams = NULL; + NV_STATUS rmStatus = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // Error check parameters + if (((pParamCopy->paramsSize != 0) && (pParamCopy->pUserParams == NvP64_NULL)) || + ((pParamCopy->paramsSize == 0) && (pParamCopy->pUserParams != NvP64_NULL)) || + !pParamCopy->bSizeValid) + { + NV_PRINTF(LEVEL_WARNING, + "%s: bad params from client: ptr " NvP64_fmt " size: 0x%x (%s)\n", + pParamCopy->msgTag, pParamCopy->pUserParams, pParamCopy->paramsSize, + pParamCopy->bSizeValid ? "valid" : "invalid"); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + bUseParamsDirectly = (pParamCopy->paramsSize == 0) || (!bUserModeArgs); + + // if we can use client params directly, we're done. + if (bUseParamsDirectly) + { + if (pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS)) + { + // Check that its a kernel pointer + rmStatus = osIsKernelBuffer((void*)NvP64_VALUE(pParamCopy->pUserParams), + pParamCopy->paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Error validating kernel pointer. Status 0x%x\n", + rmStatus); + goto done; + } + } + + pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_IS_DIRECT_USAGE; + pKernelParams = NvP64_VALUE(pParamCopy->pUserParams); + goto done; + } + + if (!(pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK)) + { + if (pParamCopy->paramsSize > RMAPI_PARAM_COPY_MAX_PARAMS_SIZE) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): Requested size exceeds max (%ud > %ud)\n", + pParamCopy->msgTag, pParamCopy->paramsSize, + RMAPI_PARAM_COPY_MAX_PARAMS_SIZE); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + + pKernelParams = portMemAllocNonPaged(pParamCopy->paramsSize); + if (pKernelParams == NULL) + { + rmStatus = NV_ERR_INSUFFICIENT_RESOURCES; + NV_PRINTF(LEVEL_WARNING, "(%s): portMemAllocNonPaged failure: status 0x%x\n", + pParamCopy->msgTag, rmStatus); + goto done; + } + + // Copyin unless directed otherwise + if (pParamCopy->pUserParams) + { + if (pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN) + { + if (pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER) + portMemSet(pKernelParams, 0, pParamCopy->paramsSize); + } + else + { + rmStatus = portMemExCopyFromUser(pParamCopy->pUserParams, pKernelParams, pParamCopy->paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): portMemExCopyFromUser failure: status 0x%x\n", + pParamCopy->msgTag, rmStatus); + goto done; + } + } + } + +done: + if (rmStatus != NV_OK) // There was an error, be sure to free the buffer + { + portMemFree(pKernelParams); + pKernelParams = NULL; + } + + NV_ASSERT(pParamCopy->ppKernelParams != NULL); + *(pParamCopy->ppKernelParams) = pKernelParams; + return rmStatus; +} + +// +// Copyout if needed and free any tmp param buffer +// Skips copyout if API_PARAMS_SKIP_COPYOUT is set. +// +NV_STATUS rmapiParamsRelease +( + RMAPI_PARAM_COPY *pParamCopy +) +{ + NV_STATUS rmStatus = NV_OK; + + // nothing to do, rmapiParamsAcquire() is either not called or not completed + if (NULL == pParamCopy->ppKernelParams) + return NV_OK; + + // if using the client's buffer directly, there's nothing to do + if (pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_IS_DIRECT_USAGE) + goto done; + + // if no kernel param ptr, there must be nothing to copy out + // This can only happen if rmapiParamsAccess() returned an error, + // but we need to handle it since rmapiParamsRelease() might be + // called anyway. + if (NULL == *pParamCopy->ppKernelParams) + goto done; + + // do the copyout if something to copy, unless told to skip it... + if (pParamCopy->pUserParams && ! (pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT)) + { + rmStatus = portMemExCopyToUser(*(pParamCopy->ppKernelParams), pParamCopy->pUserParams, pParamCopy->paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): portMemExCopyToUser failure: status 0x%x\n", + pParamCopy->msgTag, rmStatus); + + // even if the copyout fails, we still need to free the kernel mem + } + } + + portMemFree(*pParamCopy->ppKernelParams); + +done: + // no longer ok to use the ptr, even if it was a direct usage + *pParamCopy->ppKernelParams = NULL; + return rmStatus; +} + +// This is a one-shot suitable for a case where we already have a kernel +// buffer and just need to copy into it from a user buffer. +// Not for general use... +// +// It uses the same logic as rmapiParamsAccess(), but does not maintain +// an RMAPI_PARAM_COPY container. +NV_STATUS rmapiParamsCopyIn +( + const char *msgTag, + void *pKernelParams, + NvP64 pUserParams, + NvU32 paramsSize, + NvBool bUserModeArgs +) +{ + NV_STATUS rmStatus; + + // error check parameters + if ((paramsSize == 0) || + (pKernelParams == NULL) || + (pUserParams == NvP64_NULL)) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): bad params from client: ptr " NvP64_fmt " size: 0x%x\n", + msgTag, pUserParams, paramsSize); + + return NV_ERR_INVALID_ARGUMENT; + } + + // if we can use client params directly, just memcpy() + if (bUserModeArgs == NV_FALSE) + { + // If the same ptr we can skip the memcpy + if (pKernelParams != NvP64_VALUE(pUserParams)) + { + (void) portMemCopy(pKernelParams, paramsSize, NvP64_VALUE(pUserParams), paramsSize); + } + rmStatus = NV_OK; + } + else + { + rmStatus = portMemExCopyFromUser(pUserParams, pKernelParams, paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): portMemExCopyFromUser failure: status 0x%x\n", + msgTag, rmStatus); + } + } + + return rmStatus; +} + +// This is a one-shot suitable for a case where we already have a kernel +// buffer and just need to copy it out correctly to a user buffer. +// Not for general use... +// +// It uses the same logic as rmapiParamsAccess(), but does not maintain +// an RMAPI_PARAM_COPY container. + +NV_STATUS rmapiParamsCopyOut +( + const char *msgTag, + void *pKernelParams, + NvP64 pUserParams, + NvU32 paramsSize, + NvBool bUserModeArgs +) +{ + NV_STATUS rmStatus; + + // error check parameters + if ((paramsSize == 0) || + (pKernelParams == NULL) || + (pUserParams == NvP64_NULL)) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): bad params from client: ptr " NvP64_fmt " size: 0x%x\n", + msgTag, pUserParams, paramsSize); + + return NV_ERR_INVALID_ARGUMENT; + } + + // if we can use client params directly, just memcpy() + if (bUserModeArgs == NV_FALSE) + { + // If the same ptr we can skip the memcpy + if (pKernelParams != NvP64_VALUE(pUserParams)) + { + (void) portMemCopy(NvP64_VALUE(pUserParams), paramsSize, pKernelParams, paramsSize); + } + rmStatus = NV_OK; + } + else + { + rmStatus = portMemExCopyToUser(pKernelParams, pUserParams, paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): portMemExCopyToUser failure: status 0x%x\n", + msgTag, rmStatus); + } + } + + return rmStatus; +} + +NV_STATUS +rmapiParamsCopyInit +( + RMAPI_PARAM_COPY *pParamCopy, + NvU32 hClass +) +{ + NvU32 status; + NvBool bAllowNull; + + status = rmapiGetClassAllocParamSize(&pParamCopy->paramsSize, + pParamCopy->pUserParams, + &bAllowNull, + hClass); + if (status != NV_OK) + return status; + + // NULL pUserParams is not allowed for given class + if (bAllowNull == NV_FALSE && pParamCopy->pUserParams == NvP64_NULL) + return NV_ERR_INVALID_ARGUMENT; + + pParamCopy->bSizeValid = NV_TRUE; + return NV_OK; +} + + +#include "ctrl/ctrl0080/ctrl0080gpu.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +static inline NV_STATUS _embeddedParamsCheck(RmCtrlParams *pRmCtrlParams) +{ + // + // These Orin controls have embedded params in them, so they can only be + // called by kernel clients + // + switch (pRmCtrlParams->cmd) + { + case NV0080_CTRL_CMD_GPU_GET_CLASSLIST: + case NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST: + case NV2080_CTRL_CMD_GPU_GET_ENGINES: + case NV2080_CTRL_CMD_GPU_GET_INFO: + NV_ASSERT_OR_RETURN(pRmCtrlParams->secInfo.paramLocation == PARAM_LOCATION_KERNEL, NV_ERR_INVALID_POINTER); + } + return NV_OK; +} + +NV_STATUS embeddedParamCopyIn(RMAPI_PARAM_COPY *paramCopies, RmCtrlParams *pRmCtrlParams) +{ + return _embeddedParamsCheck(pRmCtrlParams); +} +NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *paramCopies, RmCtrlParams *pRmCtrlParams) +{ + return _embeddedParamsCheck(pRmCtrlParams); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource.c new file mode 100644 index 0000000..de19007 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource.c @@ -0,0 +1,286 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" +#include "rmapi/client.h" +#include "rmapi/resource.h" +#include "rmapi/rmapi.h" +#include "rmapi/control.h" +#include "ctrl/ctrlxxxx.h" +#include "gpu/gpu_resource.h" +#include "gpu/gpu.h" +#include "vgpu/rpc.h" +#include "core/locks.h" + +NV_STATUS +rmrescmnConstruct_IMPL +( + RmResourceCommon *pResourceCommmon +) +{ + return NV_OK; +} + +NV_STATUS +rmresConstruct_IMPL +( + RmResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + if (RS_IS_COPY_CTOR(pParams)) + { + RmResource *pSrcResource = dynamicCast(pParams->pSrcRef->pResource, RmResource); + + pResource->rpcGpuInstance = pSrcResource->rpcGpuInstance; + pResource->bRpcFree = pSrcResource->bRpcFree; + } + else + { + pResource->rpcGpuInstance = ~0; + pResource->bRpcFree = NV_FALSE; + } + + return NV_OK; +} + +NvBool +rmresAccessCallback_IMPL +( + RmResource *pResource, + RsClient *pInvokingClient, + void *pAllocParams, + RsAccessRight accessRight +) +{ + NV_STATUS status; + RsResourceRef *pCliResRef; + + status = clientGetResourceRef(RES_GET_CLIENT(pResource), + RES_GET_CLIENT_HANDLE(pResource), + &pCliResRef); + + if (status == NV_OK) + { + // Allow access if the resource's owner would get the access right + if(resAccessCallback(pCliResRef->pResource, pInvokingClient, pAllocParams, accessRight)) + return NV_TRUE; + } + + // Delegate to superclass + return resAccessCallback_IMPL(staticCast(pResource, RsResource), pInvokingClient, pAllocParams, accessRight); +} + +NvBool +rmresShareCallback_IMPL +( + RmResource *pResource, + RsClient *pInvokingClient, + RsResourceRef *pParentRef, + RS_SHARE_POLICY *pSharePolicy +) +{ + NV_STATUS status; + RsResourceRef *pCliResRef; + + // + // cliresShareCallback contains some require exceptions for non-GpuResource, + // which we don't want to hit. ClientResource doesn't normally implement these + // share types anyway, so we're fine with skipping them. + // + switch (pSharePolicy->type) + { + case RS_SHARE_TYPE_SMC_PARTITION: + case RS_SHARE_TYPE_GPU: + { + // + // We do not want to lock down these GpuResource-specific require policies + // when the check cannot be applied for other resources, so add these checks + // as an alternative bypass for those policies + // + if ((pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) && + (NULL == dynamicCast(pResource, GpuResource))) + { + return NV_TRUE; + } + break; + } + case RS_SHARE_TYPE_FM_CLIENT: + { + RmClient *pSrcClient = dynamicCast(RES_GET_CLIENT(pResource), RmClient); + NvBool bSrcIsKernel = (pSrcClient != NULL) && (rmclientGetCachedPrivilege(pSrcClient) >= RS_PRIV_LEVEL_KERNEL); + + if (rmclientIsCapable(dynamicCast(pInvokingClient, RmClient), + NV_RM_CAP_EXT_FABRIC_MGMT) && !bSrcIsKernel) + { + return NV_TRUE; + } + break; + } + default: + { + status = clientGetResourceRef(RES_GET_CLIENT(pResource), + RES_GET_CLIENT_HANDLE(pResource), + &pCliResRef); + if (status == NV_OK) + { + // Allow sharing if the resource's owner would be shared with + if (resShareCallback(pCliResRef->pResource, pInvokingClient, + pParentRef, pSharePolicy)) + return NV_TRUE; + } + break; + } + } + + // Delegate to superclass + return resShareCallback_IMPL(staticCast(pResource, RsResource), + pInvokingClient, pParentRef, pSharePolicy); +} + +void serverControl_InitCookie +( + const struct NVOC_EXPORTED_METHOD_DEF *exportedEntry, + RmCtrlExecuteCookie *pRmCtrlExecuteCookie +) +{ + // Copy from NVOC exportedEntry + pRmCtrlExecuteCookie->cmd = exportedEntry->methodId; + pRmCtrlExecuteCookie->ctrlFlags = exportedEntry->flags; + // One time initialization of a const variable + *(NvU32 *)&pRmCtrlExecuteCookie->rightsRequired.limbs[0] + = exportedEntry->accessRight; +} + +// +// This routine searches through the Resource's NVOC exported methods for an entry +// that matches the specified command. +// +// Same logic as rmControlCmdLookup() in legacy RMCTRL path +// +NV_STATUS rmresControlLookup_IMPL +( + RmResource *pResource, + RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams, + const struct NVOC_EXPORTED_METHOD_DEF **ppEntry +) +{ + NvU32 cmd = pRsParams->cmd; + + if (RMCTRL_IS_NULL_CMD(cmd)) + return NV_WARN_NOTHING_TO_DO; + + return resControlLookup_IMPL(staticCast(pResource, RsResource), pRsParams, ppEntry); +} + +NV_STATUS +rmresGetMemInterMapParams_IMPL +( + RmResource *pRmResource, + RMRES_MEM_INTER_MAP_PARAMS *pParams +) +{ + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +rmresCheckMemInterUnmap_IMPL +( + RmResource *pRmResource, + NvBool bSubdeviceHandleProvided +) +{ + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +rmresGetMemoryMappingDescriptor_IMPL +( + RmResource *pRmResource, + struct MEMORY_DESCRIPTOR **ppMemDesc +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +rmresControl_Prologue_IMPL +( + RmResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = gpumgrGetGpu(pResource->rpcGpuInstance); + + if (pGpu == NULL) + return NV_OK; + + if ((IS_VIRTUAL(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST)) || + (IS_GSP_CLIENT(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL))) + { + // + // GPU lock is required to protect the RPC buffers. + // However, some controls have ROUTE_TO_PHYSICAL + NO_GPUS_LOCK flags set. + // This is not valid in offload mode, but is in monolithic. + // In those cases, just acquire the lock for the RPC + // + GPU_MASK gpuMaskRelease = 0; + if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + { + // + // Log any case where the above assumption is not true, but continue + // anyway. Use SAFE_LOCK_UPGRADE to try and recover in these cases. + // + NV_ASSERT(pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_NO_GPUS_LOCK); + NV_ASSERT_OK_OR_RETURN(rmGpuGroupLockAcquire(pGpu->gpuInstance, + GPU_LOCK_GRP_SUBDEVICE, + GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, + RM_LOCK_MODULES_RPC, + &gpuMaskRelease)); + } + + NV_RM_RPC_CONTROL(pGpu, pParams->hClient, pParams->hObject, pParams->cmd, + pParams->pParams, pParams->paramsSize, status); + + if (gpuMaskRelease != 0) + { + rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); + } + + return (status == NV_OK) ? NV_WARN_NOTHING_TO_DO : status; + } + return NV_OK; +} + +void +rmresControl_Epilogue_IMPL +( + RmResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.c new file mode 100644 index 0000000..d04d7cb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.c @@ -0,0 +1,219 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "resource_desc.h" + +// Need the full header for the class allocation param structure. +#define SDK_ALL_CLASSES_INCLUDE_FULL_HEADER +#include "g_allclasses.h" +// Not a class header, but contains an allocation struct used by several classes +#include "class/clb0b5sw.h" +#include "nvos.h" + +#include "rmapi/alloc_size.h" +#include "rmapi/resource_fwd_decls.h" +#include "resserv/rs_access_rights.h" + +// +// Macros to transform list into static table +// + +// NULL terminated list +#define RS_LIST(...) {__VA_ARGS__, 0} +#define RS_ROOT_OBJECT {0} +#define RS_ANY_PARENT {0} + +// Populate parents +#define RS_ENTRY(cls, internalClass, bMultiInstance, parentList, allocParam, freePriority, flags, rightsRequired) \ + NvU32 cls##ParentList[] = parentList; + +#include "resource_list.h" + +#undef RS_LIST +#undef RS_ROOT_OBJECT +#undef RS_ANY_PARENT + + +#define RS_ACCESS_NONE {-1} +#define RS_ACCESS_LIST(...) {__VA_ARGS__} + +// Populate rights required +#define RS_ENTRY(cls, internalClass, bMultiInstance, parentList, allocParam, freePriority, flags, rightsRequired) \ + static const RsAccessRight cls##_RightsRequiredArray[] = rightsRequired; + +#include "resource_list.h" + +#undef RS_ACCESS_NONE +#undef RS_ACCESS_LIST + +// Populate forward declarations +#define RS_ENTRY(cls, internalClass, bMultiInstance, parentList, allocParam, freePriority, flags, rightsRequired) \ + extern const struct NVOC_CLASS_DEF __nvoc_class_def_##internalClass; /* defn here to keep POPULATE_STRUCT happy if the class is disabled */ + +#include "resource_list.h" + + +#define RS_REQUIRED(allocParam) sizeof(allocParam), NV_TRUE +#define RS_OPTIONAL(allocParam) sizeof(allocParam), NV_FALSE +#define RS_NONE 0, NV_FALSE +#define RS_ENTRY(cls, internalClass, bMultiInstance, bAnyParent, allocParam, freePriority, flags, bRightsRequired) \ +{ \ + cls, \ + classId(internalClass), \ + classInfo(internalClass), \ + allocParam, \ + bMultiInstance, \ + bAnyParent, \ + cls##ParentList, \ + freePriority, \ + flags, \ + cls##_RightsRequiredArray, \ + bRightsRequired ? NV_ARRAY_ELEMENTS(cls##_RightsRequiredArray) : 0, \ +}, + +#define RS_LIST(...) NV_FALSE +#define RS_ROOT_OBJECT NV_FALSE +#define RS_ANY_PARENT NV_TRUE +#define RS_ACCESS_NONE NV_FALSE +#define RS_ACCESS_LIST(...) NV_TRUE +static RS_RESOURCE_DESC +g_RsResourceDescList[] = +{ +#include "resource_list.h" +}; +#undef RS_LIST +#undef RS_ROOT_OBJECT +#undef RS_ANY_PARENT +#undef RS_ACCESS_NONE +#undef RS_ACCESS_LIST +#undef RS_REQUIRED +#undef RS_OPTIONAL +#undef RS_NONE + +#define NUM_ENTRIES_DESC_LIST NV_ARRAY_ELEMENTS32(g_RsResourceDescList) + +void RsResInfoInitialize(void) +{ + // + // Keep the array sorted by externalClassId, so we can binary search it + // Simple bubble-sort is fine here as the number of elements is below 300, + // and we only call this once on boot anyway. + // + NvU32 i, j; + for (i = 0; i < NUM_ENTRIES_DESC_LIST - 1; i++) + { + for (j = i + 1; j < NUM_ENTRIES_DESC_LIST; j++) + { + RS_RESOURCE_DESC *a = &g_RsResourceDescList[i]; + RS_RESOURCE_DESC *b = &g_RsResourceDescList[j]; + + if (a->externalClassId > b->externalClassId) + { + RS_RESOURCE_DESC tmp; + portMemCopy(&tmp, sizeof(tmp), a, sizeof(*a)); + portMemCopy(a, sizeof(*a), b, sizeof(*b)); + portMemCopy(b, sizeof(*b), &tmp, sizeof(tmp)); + } + } + } +} + +RS_RESOURCE_DESC * +RsResInfoByExternalClassId +( + NvU32 externalClassId +) +{ + NvU32 low = 0; + NvU32 high = NUM_ENTRIES_DESC_LIST; + + // Binary search the array; If not found, the break in the middle will be hit + while (1) + { + NvU32 mid = (low + high) / 2; + + if (g_RsResourceDescList[mid].externalClassId == externalClassId) + return &g_RsResourceDescList[mid]; + + if (high == mid || low == mid) + break; + + if (g_RsResourceDescList[mid].externalClassId > externalClassId) + high = mid; + else + low = mid; + } + + return NULL; +} + +NvU32 RsResInfoGetInternalClassId(const RS_RESOURCE_DESC *pResDesc) +{ + return pResDesc ? pResDesc->internalClassId : 0; +} + +void RsResInfoGetResourceList(const RS_RESOURCE_DESC **ppResourceList, NvU32 *numResources) +{ + *ppResourceList = g_RsResourceDescList; + *numResources = NV_ARRAY_ELEMENTS(g_RsResourceDescList); +} + +NV_STATUS +rmapiGetClassAllocParamSize +( + NvU32 *pAllocParamSizeBytes, + NvP64 pUserParams, + NvBool *pBAllowNull, + NvU32 hClass +) +{ + RS_RESOURCE_DESC *pResDesc; + + *pAllocParamSizeBytes = 0; + *pBAllowNull = NV_FALSE; + + pResDesc = RsResInfoByExternalClassId(hClass); + + if (!pResDesc) + return NV_ERR_INVALID_CLASS; + + if (pResDesc->bParamRequired) + { + // params are required + *pAllocParamSizeBytes = pResDesc->allocParamSize; + } + else if (pResDesc->allocParamSize) + { + // params are *optional* + *pBAllowNull = NV_TRUE; + if (pUserParams != (NvP64) 0) + *pAllocParamSizeBytes = pResDesc->allocParamSize; + } + else + { + // no params + *pBAllowNull = NV_TRUE; + } + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.h new file mode 100644 index 0000000..4750f50 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.h @@ -0,0 +1,88 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RESOURCE_DESC_H_ +#define _RESOURCE_DESC_H_ + +#include "nvtypes.h" +#include "nvoc/runtime.h" +#include "resserv/rs_access_rights.h" + +// Flags for RS_ENTRY +#define RS_FLAGS_NONE 0 + +#define RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC NVBIT(0) ///< GPUs Lock is acquired on allocation +#define RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE NVBIT(1) ///< GPUs Lock is acquired for free +#define RS_FLAGS_ACQUIRE_GPUS_LOCK (RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC | RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE) + +#define RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_ALLOC NVBIT(2) ///< GPU Group Lock is acquired on allocation +#define RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_FREE NVBIT(3) ///< GPU Group Lock is acquired for free +#define RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK (RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_ALLOC | RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_FREE) + +#define RS_FLAGS_ALLOC_RPC_TO_VGPU_HOST NVBIT(4) ///< Issue RPC to host to allocate resource for virtual GPUs + +#define RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC NVBIT(5) ///< Acquire the RO API lock for allocation, default is RW API lock + +#define RS_FLAGS_ALLOC_RPC_TO_PHYS_RM NVBIT(6) ///< Issue RPC to allocate resource in physical RM + +#define RS_FLAGS_ALLOC_RPC_TO_ALL (RS_FLAGS_ALLOC_RPC_TO_VGPU_HOST | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM) + +#define RS_FLAGS_INTERNAL_ONLY NVBIT(7) ///< Class cannot be allocated outside of RM + +#define RS_FLAGS_CHANNEL_DESCENDANT_COMMON (RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL) + +#define RS_FREE_PRIORITY_DEFAULT 0 +#define RS_FREE_PRIORITY_HIGH 1 ///< Resources with this priority will be freed ahead of others + +/** + * Information about a RsResource subclass. + */ +typedef struct RS_RESOURCE_DESC +{ + NvU32 externalClassId; ///< Id of the class as seen by the client + NvU32 internalClassId; ///< NVOC class ID, mirrored from pClassInfo->classId + const NVOC_CLASS_INFO *pClassInfo; ///< RTTI information for internal class + NvU32 allocParamSize; ///< Size of allocation param structure + NvBool bParamRequired; ///< If not required, param size can be 0 or allocParamSize + NvBool bMultiInstance; ///< Multiple instances of this object under a parent + NvBool bAnyParent; ///< Resource can be allocated under any parent + NvU32 *pParentList; ///< NULL terminated list of internalClassId of parents + NvU32 freePriority; ///< RS_FREE_PRIORITY_* + NvU32 flags; ///< Flags + const RsAccessRight *pRightsRequiredArray; ///< Access rights required to allocate this resource + NvLength rightsRequiredLength; ///< Length of pRightsRequiredArray +} RS_RESOURCE_DESC; + +/** Initialize the global resource info table */ +void RsResInfoInitialize(void); + +/** + * Look up RS_RESOURCE_DESC using the externalClassId. The id of the class as + * seen by clients. + */ +RS_RESOURCE_DESC *RsResInfoByExternalClassId(NvU32 externalClassId); +NvU32 RsResInfoGetInternalClassId(const RS_RESOURCE_DESC *); + +/** Get the global resource info table */ +void RsResInfoGetResourceList(const RS_RESOURCE_DESC **ppResourceList, NvU32 *numResources); + +#endif // _RESOURCE_DESC_H_ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_list.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_list.h new file mode 100644 index 0000000..44620b1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_list.h @@ -0,0 +1,336 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// No include guards - this file is included multiple times, each time with a +// different definition for RS_ENTRY +// + +// +// Table describing all RsResource subclasses. +// +// Internal Class - there is a RM internal class representing all classes +// exported to RM clients. The internal name of the class should be similar to +// the symbolic name used by clients. If there is ambiguity between RM internal +// classes, e.g.: between the PMU engine (OBJPMU) and the exported class, it's +// recommended to use Api as the suffix to disambiguate; for example, OBJPMU +// (the engine) vs PmuApi (the per-client api object). It's also recommended to +// avoid using Object, Resource, etc as those terms don't improve clarity. +// If there is no ambiguity, there is no need to add the Api suffix; for example, +// Channel is preferred over ChannelApi (there is no other Channel object in +// RM). +// +// Multi-Instance - NV_TRUE if there can be multiple instances of this object's +// *internal* class id under a parent. +// +// This list should eventually replace the similar lists in nvapi.c and +// rmctrl.c. The number of fields in the table should be kept minimal, just +// enough to create the object, with as much of the detail being specified +// within the class itself. +// +// In the future we should consider switching to a registration approach or +// generating with NVOC and/or annotating the class definition. +// +// RS-TODO: Rename classes that have 'Object' in their names +// + + + +RS_ENTRY( + /* External Class */ NV01_ROOT, + /* Internal Class */ RmClientResource, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_ROOT_OBJECT, + /* Alloc Param Info */ RS_OPTIONAL(NvHandle), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_ROOT_NON_PRIV, + /* Internal Class */ RmClientResource, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_ROOT_OBJECT, + /* Alloc Param Info */ RS_OPTIONAL(NvHandle), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_ROOT_CLIENT, + /* Internal Class */ RmClientResource, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_ROOT_OBJECT, + /* Alloc Param Info */ RS_OPTIONAL(NvHandle), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV0020_GPU_MANAGEMENT, + /* Internal Class */ GpuManagementApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(RmClientResource)), + /* Alloc Param Info */ RS_OPTIONAL(NvHandle), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_DEVICE_0, + /* Internal Class */ Device, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(RmClientResource)), + /* Alloc Param Info */ RS_OPTIONAL(NV0080_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ GF100_HDACODEC, + /* Internal Class */ Hdacodec, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) + /* Channels can have a CHANNEL_GROUP, a DEVICE, or a CONTEXT_SHARE (starting in Volta) as parents */ + /* RS-TODO: Update channel parent list when CONTEXT_SHARE is added */ +RS_ENTRY( + /* External Class */ NV20_SUBDEVICE_0, + /* Internal Class */ Subdevice, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_OPTIONAL(NV2080_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV2081_BINAPI, + /* Internal Class */ BinaryApi, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_OPTIONAL(NV2081_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV2082_BINAPI_PRIVILEGED, + /* Internal Class */ BinaryApiPrivileged, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_OPTIONAL(NV2082_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_SYSTEM, + /* Internal Class */ SystemMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV_MEMORY_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + /* Internal Class */ OsDescMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV_OS_DESC_MEMORY_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_SYNCPOINT, + /* Internal Class */ SyncpointMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) + /* Subdevice Children: */ +RS_ENTRY( + /* External Class */ NVC671_DISP_SF_USER, + /* Internal Class */ DispSfUser, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) + /* Display classes: */ +RS_ENTRY( + /* External Class */ NVC670_DISPLAY, + /* Internal Class */ NvDispApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC372_DISPLAY_SW, + /* Internal Class */ DispSwObj, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV04_DISPLAY_COMMON, + /* Internal Class */ DispCommon, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC67A_CURSOR_IMM_CHANNEL_PIO, + /* Internal Class */ DispChannelPio, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC67B_WINDOW_IMM_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC67D_CORE_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC77F_ANY_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC67E_WINDOW_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC673_DISP_CAPABILITIES, + /* Internal Class */ DispCapabilities, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) + /* Classes allocated under channel: */ +RS_ENTRY( + /* External Class */ NV01_CONTEXT_DMA, + /* Internal Class */ ContextDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV_CONTEXT_DMA_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_EVENT, + /* Internal Class */ Event, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV0005_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_EVENT_OS_EVENT, + /* Internal Class */ Event, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV0005_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_EVENT_KERNEL_CALLBACK, + /* Internal Class */ Event, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV0005_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_EVENT_KERNEL_CALLBACK_EX, + /* Internal Class */ Event, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV0005_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) + +// Undefine the entry macro to simplify call sites +#undef RS_ENTRY diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi.c new file mode 100644 index 0000000..a07481d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi.c @@ -0,0 +1,694 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvrm_registry.h" +#include "rmapi/rmapi.h" +#include "entry_points.h" +#include "resserv/rs_server.h" +#include "rmapi/rs_utils.h" +#include "gpu/gpu_resource.h" +#include "gpu/device/device.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "diagnostics/tracer.h" +#include "tls/tls.h" +#include "core/thread_state.h" +#include "gpu_mgr/gpu_mgr.h" +#include "resource_desc.h" + +typedef struct +{ + PORT_RWLOCK * pLock; + NvU64 threadId; + NvU64 timestamp; + LOCK_TRACE_INFO traceInfo; + NvU64 tlsEntryId; + +} RMAPI_LOCK; + +RsServer g_resServ; +static RM_API g_RmApiList[RMAPI_TYPE_MAX]; +static NvBool g_bResServInit = NV_FALSE; +static RMAPI_LOCK g_RmApiLock; + +static void _rmapiInitInterface(RM_API *pRmApi, API_SECURITY_INFO *pDefaultSecurityInfo, NvBool bTlsInternal, + NvBool bApiLockInternal, NvBool bGpuLockInternal); +static NV_STATUS _rmapiLockAlloc(void); +static void _rmapiLockFree(void); + +// from rmapi_stubs.c +void rmapiInitStubInterface(RM_API *pRmApi); + +NV_STATUS +rmapiInitialize +( + void +) +{ + NV_STATUS status = NV_OK; + API_SECURITY_INFO secInfo = {0}; + + NV_ASSERT(!g_bResServInit); + + status = _rmapiLockAlloc(); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot allocate rmapi locks\n"); + return status; + } + + RsResInfoInitialize(); + status = serverConstruct(&g_resServ, RS_PRIV_LEVEL_HOST, 0); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot initialize resource server\n"); + _rmapiLockFree(); + return status; + } + + rmapiControlCacheInit(); + + listInit(&g_clientListBehindGpusLock, g_resServ.pAllocator); + listInit(&g_userInfoList, g_resServ.pAllocator); + + secInfo.privLevel = RS_PRIV_LEVEL_KERNEL; + secInfo.paramLocation = PARAM_LOCATION_KERNEL; + + _rmapiInitInterface(&g_RmApiList[RMAPI_EXTERNAL], NULL, NV_FALSE /* bTlsInternal */, NV_FALSE /* bApiLockInternal */, NV_FALSE /* bGpuLockInternal */); + _rmapiInitInterface(&g_RmApiList[RMAPI_EXTERNAL_KERNEL], &secInfo, NV_FALSE /* bTlsInternal */, NV_FALSE /* bApiLockInternal */, NV_FALSE /* bGpuLockInternal */); + _rmapiInitInterface(&g_RmApiList[RMAPI_MODS_LOCK_BYPASS], &secInfo, NV_FALSE /* bTlsInternal */, NV_TRUE /* bApiLockInternal */, NV_TRUE /* bGpuLockInternal */); + _rmapiInitInterface(&g_RmApiList[RMAPI_API_LOCK_INTERNAL], &secInfo, NV_TRUE /* bTlsInternal */, NV_TRUE /* bApiLockInternal */, NV_FALSE /* bGpuLockInternal */); + _rmapiInitInterface(&g_RmApiList[RMAPI_GPU_LOCK_INTERNAL], &secInfo, NV_TRUE /* bTlsInternal */, NV_TRUE /* bApiLockInternal */, NV_TRUE /* bGpuLockInternal */); + + rmapiInitStubInterface(&g_RmApiList[RMAPI_STUBS]); + + g_bResServInit = NV_TRUE; + + return status; +} + +void +rmapiShutdown +( + void +) +{ + if (!g_bResServInit) + return; + + serverFreeDomain(&g_resServ, 0); + serverDestruct(&g_resServ); + _rmapiLockFree(); + + rmapiControlCacheFree(); + + g_bResServInit = NV_FALSE; +} + +static void +_rmapiInitInterface +( + RM_API *pRmApi, + API_SECURITY_INFO *pDefaultSecInfo, + NvBool bTlsInternal, + NvBool bApiLockInternal, + NvBool bGpuLockInternal +) +{ + // + // Initialize to all stubs first, so any APIs not explicitly set here + // will return NV_ERR_NOT_SUPPORTED if called + // + rmapiInitStubInterface(pRmApi); + + // + // Init members + // + if (pDefaultSecInfo) + pRmApi->defaultSecInfo = *pDefaultSecInfo; + + pRmApi->bHasDefaultSecInfo = !!pDefaultSecInfo; + pRmApi->bTlsInternal = bTlsInternal; + pRmApi->bApiLockInternal = bApiLockInternal; + pRmApi->bRmSemaInternal = bApiLockInternal; + pRmApi->bGpuLockInternal = bGpuLockInternal; + pRmApi->pPrivateContext = NULL; + + // + // Init function pointers + // + pRmApi->Alloc = rmapiAlloc; + pRmApi->AllocWithHandle = rmapiAllocWithHandle; + pRmApi->AllocWithSecInfo = pRmApi->bTlsInternal ? rmapiAllocWithSecInfo : rmapiAllocWithSecInfoTls; + + pRmApi->FreeClientList = rmapiFreeClientList; + pRmApi->FreeClientListWithSecInfo = pRmApi->bTlsInternal ? rmapiFreeClientListWithSecInfo : rmapiFreeClientListWithSecInfoTls; + + pRmApi->Free = rmapiFree; + pRmApi->FreeWithSecInfo = pRmApi->bTlsInternal ? rmapiFreeWithSecInfo : rmapiFreeWithSecInfoTls; + + pRmApi->Control = rmapiControl; + pRmApi->ControlWithSecInfo = pRmApi->bTlsInternal ? rmapiControlWithSecInfo : rmapiControlWithSecInfoTls; + + pRmApi->DupObject = rmapiDupObject; + pRmApi->DupObjectWithSecInfo = pRmApi->bTlsInternal ? rmapiDupObjectWithSecInfo : rmapiDupObjectWithSecInfoTls; + + pRmApi->Share = rmapiShare; + pRmApi->ShareWithSecInfo = pRmApi->bTlsInternal ? rmapiShareWithSecInfo : rmapiShareWithSecInfoTls; + + pRmApi->MapToCpu = rmapiMapToCpu; + pRmApi->MapToCpuWithSecInfo = pRmApi->bTlsInternal ? rmapiMapToCpuWithSecInfo : rmapiMapToCpuWithSecInfoTls; + pRmApi->MapToCpuWithSecInfoV2 = pRmApi->bTlsInternal ? rmapiMapToCpuWithSecInfoV2 : rmapiMapToCpuWithSecInfoTlsV2; + + pRmApi->UnmapFromCpu = rmapiUnmapFromCpu; + pRmApi->UnmapFromCpuWithSecInfo = pRmApi->bTlsInternal ? rmapiUnmapFromCpuWithSecInfo : rmapiUnmapFromCpuWithSecInfoTls; + + pRmApi->Map = rmapiMap; + pRmApi->MapWithSecInfo = pRmApi->bTlsInternal ? rmapiMapWithSecInfo : rmapiMapWithSecInfoTls; + + pRmApi->Unmap = rmapiUnmap; + pRmApi->UnmapWithSecInfo = pRmApi->bTlsInternal ? rmapiUnmapWithSecInfo : rmapiUnmapWithSecInfoTls; +} + +RM_API * +rmapiGetInterface +( + RMAPI_TYPE rmapiType +) +{ + return &g_RmApiList[rmapiType]; +} + +NV_STATUS +rmapiPrologue +( + RM_API *pRmApi, + RM_API_CONTEXT *pContext +) +{ + NV_STATUS status = NV_OK; + return status; +} + +void +rmapiEpilogue +( + RM_API *pRmApi, + RM_API_CONTEXT *pContext +) +{ +} + +void +rmapiInitLockInfo +( + RM_API *pRmApi, + NvHandle hClient, + RS_LOCK_INFO *pLockInfo +) +{ + NV_ASSERT_OR_RETURN_VOID(pLockInfo != NULL); + pLockInfo->flags = 0; + pLockInfo->state = 0; + + if (hClient != 0) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + if ((pCallContext != NULL) && (pCallContext->pLockInfo != NULL)) + { + pLockInfo->state = pCallContext->pLockInfo->state; + + if ((pCallContext->pLockInfo->pClient != NULL) && + (pCallContext->pLockInfo->pClient->hClient == hClient)) + { + pLockInfo->pClient = pCallContext->pLockInfo->pClient; + } + else + { + pLockInfo->state &= ~RM_LOCK_STATES_CLIENT_LOCK_ACQUIRED; + } + } + } + + if (!pRmApi->bRmSemaInternal) + pLockInfo->flags |= RM_LOCK_FLAGS_RM_SEMA; + + if (pRmApi->bApiLockInternal) + { + pLockInfo->state |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + + // RS-TODO: Assert that API rwlock is taken if no client is locked + if (pLockInfo->pClient == NULL) + pLockInfo->flags |= RM_LOCK_FLAGS_NO_CLIENT_LOCK; + } + + if (pRmApi->bGpuLockInternal) + pLockInfo->state |= RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; +} + +static NV_STATUS +_rmapiLockAlloc(void) +{ + // Turn on by default for Linux to get some soak time + // bug 2539044, bug 2536036: Enable by default. + g_resServ.bUnlockedParamCopy = NV_TRUE; + + NvU32 val = 0; + if ((osReadRegistryDword(NULL, + NV_REG_STR_RM_PARAM_COPY_NO_LOCK, + &val) == NV_OK)) + { + g_resServ.bUnlockedParamCopy = (val != 0); + } + + portMemSet(&g_RmApiLock, 0, sizeof(g_RmApiLock)); + g_RmApiLock.threadId = ~((NvU64)(0)); + g_RmApiLock.pLock = portSyncRwLockCreate(portMemAllocatorGetGlobalNonPaged()); + if (g_RmApiLock.pLock == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + g_RmApiLock.tlsEntryId = tlsEntryAlloc(); + + return NV_OK; +} + +static void +_rmapiLockFree(void) +{ + portSyncRwLockDestroy(g_RmApiLock.pLock); +} + +NV_STATUS +rmapiLockAcquire(NvU32 flags, NvU32 module) +{ + NV_STATUS rmStatus = NV_OK; + NvU64 threadId = portThreadGetCurrentThreadId(); + + NvU64 myPriority = 0; + + LOCK_ASSERT_AND_RETURN(!rmapiLockIsOwner()); + + // + // If a read-only lock was requested, check to see if the module is allowed + // to take read-only locks + // + if ((flags & RMAPI_LOCK_FLAGS_READ) && (module != RM_LOCK_MODULES_NONE)) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + if ((pSys->apiLockModuleMask & RM_LOCK_MODULE_GRP(module)) == 0) + { + flags &= ~RMAPI_LOCK_FLAGS_READ; + } + } + + // + // For conditional acquires and DISPATCH_LEVEL we want to exit + // immediately without waiting. + // + // If RM Locking V3 Lite is not enabled, *always* acquire the API + // lock in WRITE mode to ensure compatibility with Locking model V2 + // behavior (providing exclusive access to the resource). + // + flags = osApiLockAcquireConfigureFlags(flags); + if (flags & API_LOCK_FLAGS_COND_ACQUIRE) + { + if ((flags & RMAPI_LOCK_FLAGS_READ)) + { + if (!portSyncRwLockAcquireReadConditional(g_RmApiLock.pLock)) + rmStatus = NV_ERR_TIMEOUT_RETRY; + } + else + { + if (portSyncRwLockAcquireWriteConditional(g_RmApiLock.pLock)) + { + g_RmApiLock.threadId = threadId; + } + else + { + rmStatus = NV_ERR_TIMEOUT_RETRY; + } + } + } + else + { + if ((flags & RMAPI_LOCK_FLAGS_READ)) + { + portSyncRwLockAcquireRead(g_RmApiLock.pLock); + } + else + { + + portSyncRwLockAcquireWrite(g_RmApiLock.pLock); + g_RmApiLock.threadId = threadId; + } + } + + + if (rmStatus == NV_OK) + { + NvU64 timestamp; + osGetCurrentTick(×tamp); + + if (g_RmApiLock.threadId == threadId) + g_RmApiLock.timestamp = timestamp; + + // save off owning thread + RMTRACE_RMLOCK(_API_LOCK_ACQUIRE); + + // add api lock trace record + INSERT_LOCK_TRACE(&g_RmApiLock.traceInfo, + NV_RETURN_ADDRESS(), + lockTraceAcquire, + flags, module, + threadId, + !portSyncExSafeToSleep(), + myPriority, + timestamp); + + // + // If enabled, reset the timeout now that we are running and off + // the Sleep Queue. + // + if (threadStateGetSetupFlags() & + THREAD_STATE_SETUP_FLAGS_DO_NOT_INCLUDE_SLEEP_TIME_ENABLED) + { + threadStateResetTimeout(NULL); + } + } + + NvP64 *pAcquireAddress = tlsEntryAcquire(g_RmApiLock.tlsEntryId); + if (pAcquireAddress != NULL) + { + *pAcquireAddress = (NvP64)(NvUPtr)NV_RETURN_ADDRESS(); + } + + return rmStatus; +} + +void +rmapiLockRelease(void) +{ + NvU64 threadId = portThreadGetCurrentThreadId(); + NvU64 timestamp; + + osGetCurrentTick(×tamp); + + RMTRACE_RMLOCK(_API_LOCK_RELEASE); + + // add api lock trace record + INSERT_LOCK_TRACE(&g_RmApiLock.traceInfo, + NV_RETURN_ADDRESS(), + lockTraceRelease, + 0, 0, + threadId, + !portSyncExSafeToSleep(), + 0, + timestamp); + + if (g_RmApiLock.threadId == threadId) + { + // + // If the threadId in the global is same as current thread id, then + // we know that it was acquired in WRITE mode. + // + g_RmApiLock.threadId = ~0ull; + g_RmApiLock.timestamp = timestamp; + portSyncRwLockReleaseWrite(g_RmApiLock.pLock); + + } + else + { + portSyncRwLockReleaseRead(g_RmApiLock.pLock); + } + + tlsEntryRelease(g_RmApiLock.tlsEntryId); +} + +NvBool +rmapiLockIsOwner(void) +{ + return tlsEntryGet(g_RmApiLock.tlsEntryId) != NvP64_NULL; +} + +// +// Mark for deletion the client resources from the data base, given a GPU mask +// +void +rmapiSetDelPendingClientResourcesFromGpuMask +( + NvU32 gpuMask +) +{ + RS_ITERATOR it; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + Device *pDevice; + NvBool bDevicesInMask = NV_FALSE; + OBJGPU *pGpu; + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + + // Check that one of the devices is in the GPU mask + bDevicesInMask = NV_FALSE; + while (clientRefIterNext(it.pClient, &it)) + { + pDevice = dynamicCast(it.pResourceRef->pResource, Device); + + if (!pDevice) + { + continue; + } + + pGpu = GPU_RES_GET_GPU(pDevice); + if ((gpuMask & NVBIT(gpuGetInstance(pGpu))) != 0) + { + bDevicesInMask = NV_TRUE; + break; + } + } + + if (bDevicesInMask == NV_FALSE) + { + continue; + } + + pClient->Flags |= RMAPI_CLIENT_FLAG_DELETE_PENDING; + } +} + +void +rmapiDelPendingDevices +( + NvU32 gpuMask +) +{ + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RS_ITERATOR it; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + ppClient = serverutilGetFirstClientUnderLock(); + while (ppClient) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + if (((pClient->Flags & RMAPI_CLIENT_FLAG_DELETE_PENDING) != 0) && + ((pClient->Flags & RMAPI_CLIENT_FLAG_RM_INTERNAL_CLIENT) == 0)) + { + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + while(clientRefIterNext(pRsClient, &it)) + { + RsResourceRef *pDeviceRef = it.pResourceRef; + Device *pDevice = dynamicCast(pDeviceRef->pResource, Device); + + if ((gpuMask & NVBIT(gpuGetInstance(GPU_RES_GET_GPU(pDevice)))) != 0) + { + pRmApi->Free(pRmApi, pRsClient->hClient, pDeviceRef->hResource); + + // Client's resource map has been modified, re-snap iterator + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + } + } + + } + + ppClient = serverutilGetNextClientUnderLock(ppClient); + } +} + +void +rmapiReportLeakedDevices +( + NvU32 gpuMask +) +{ + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RS_ITERATOR it; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + ppClient = serverutilGetFirstClientUnderLock(); + while (ppClient) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + while(clientRefIterNext(pRsClient, &it)) + { + RsResourceRef *pDeviceRef = it.pResourceRef; + Device *pDevice = dynamicCast(pDeviceRef->pResource, Device); + + if ((gpuMask & NVBIT(gpuGetInstance(GPU_RES_GET_GPU(pDevice)))) != 0) + { + NV_PRINTF(LEVEL_ERROR, + "Device object leak: (0x%x, 0x%x). Please file a bug against RM-core.\n", + pRsClient->hClient, pDeviceRef->hResource); + NV_ASSERT(0); + + // Delete leaked resource from database + pRmApi->Free(pRmApi, pRsClient->hClient, pDeviceRef->hResource); + + // Client's resource map has been modified, re-snap iterator + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + } + } + + ppClient = serverutilGetNextClientUnderLock(ppClient); + } +} + +// +// Delete the marked client resources +// +void +rmapiDelPendingClients +( + void +) +{ + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RS_ITERATOR it; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + ppClient = serverutilGetFirstClientUnderLock(); + while (ppClient) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + ppClient = serverutilGetNextClientUnderLock(ppClient); + if ((pClient->Flags & RMAPI_CLIENT_FLAG_DELETE_PENDING) != 0) + { + // Only free clients that have no devices left + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + if (!clientRefIterNext(pRsClient, &it)) + pRmApi->Free(pRmApi, pRsClient->hClient, pRsClient->hClient); + } + } +} + +NV_STATUS +rmapiGetClientHandlesFromOSInfo +( + void *pOSInfo, + NvHandle **ppClientHandleList, + NvU32 *pClientHandleListSize +) +{ + NvHandle *pClientHandleList; + NvU32 clientHandleListSize = 0; + NvU32 k; + + RmClient **ppClient; + RmClient **ppFirstClient; + RmClient *pClient; + RsClient *pRsClient; + + ppFirstClient = NULL; + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + if (pClient->pOSInfo != pOSInfo) + { + continue; + } + clientHandleListSize++; + + if (NULL == ppFirstClient) + ppFirstClient = ppClient; + } + + if (clientHandleListSize == 0) + { + *pClientHandleListSize = 0; + *ppClientHandleList = NULL; + return NV_ERR_INVALID_ARGUMENT; + } + + pClientHandleList = portMemAllocNonPaged(clientHandleListSize * sizeof(NvU32)); + if (pClientHandleList == NULL) + { + return NV_ERR_NO_MEMORY; + } + + *pClientHandleListSize = clientHandleListSize; + *ppClientHandleList = pClientHandleList; + + k = 0; + for (ppClient = ppFirstClient; + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + if (pClient->pOSInfo != pOSInfo) + { + continue; + } + pClientHandleList[k++] = pRsClient->hClient; + + if (clientHandleListSize <= k) + break; + } + + return NV_OK; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_cache.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_cache.c new file mode 100644 index 0000000..b4c8d91 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_cache.c @@ -0,0 +1,277 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/map.h" +#include "containers/multimap.h" +#include "ctrl/ctrlxxxx.h" +#include "nvctassert.h" +#include "nvmisc.h" +#include "nvoc/rtti.h" +#include "nvport/sync.h" +#include "nvrm_registry.h" +#include "os/os.h" +#include "rmapi/control.h" +#include "resource_desc.h" +#include "rmapi/rmapi.h" + +typedef struct +{ + void* params; +} RmapiControlCacheEntry; + +MAKE_MULTIMAP(CachedCallParams, RmapiControlCacheEntry); + +ct_assert(sizeof(NvHandle) <= 4); + +#define CLIENT_KEY_SHIFT (sizeof(NvHandle) * 8) + +static NvHandle keyToClient(NvU64 key) +{ + return (key >> CLIENT_KEY_SHIFT); +} + +static NvU64 handlesToKey(NvHandle hClient, NvHandle hObject) +{ + return ((NvU64)hClient << CLIENT_KEY_SHIFT) | hObject; +} + +static struct { + /* NOTE: Size unbounded for now */ + CachedCallParams cachedCallParams; + NvU32 mode; + PORT_MUTEX *mtx; +} RmapiControlCache; + +NvBool rmapiControlIsCacheable(NvU32 flags, NvBool isGSPClient) +{ + if (RmapiControlCache.mode == NV_REG_STR_RM_CACHEABLE_CONTROLS_ENABLE) + { + return !!(flags & RMCTRL_FLAGS_CACHEABLE); + } + if (RmapiControlCache.mode == NV_REG_STR_RM_CACHEABLE_CONTROLS_GSP_ONLY) + { + return (flags & RMCTRL_FLAGS_CACHEABLE) && + (flags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL) && + isGSPClient; + } + return NV_FALSE; +} + +NvBool rmapiCmdIsCacheable(NvU32 cmd, NvBool isGSPClient) +{ + RS_RESOURCE_DESC *pResourceDesc = RsResInfoByExternalClassId(DRF_VAL(XXXX, _CTRL_CMD, _CLASS, cmd)); + + if (pResourceDesc) + { + struct NVOC_CLASS_DEF *pClassDef = (void*)pResourceDesc->pClassInfo; + if (pClassDef) + { + const struct NVOC_EXPORTED_METHOD_DEF *pMethodDef = nvocGetExportedMethodDefFromMethodInfo_IMPL(pClassDef->pExportInfo, cmd); + + if (pMethodDef) + return rmapiControlIsCacheable(pMethodDef->flags, isGSPClient); + } + + } + return NV_FALSE; +} + +void rmapiControlCacheInit() +{ + RmapiControlCache.mode = NV_REG_STR_RM_CACHEABLE_CONTROLS_GSP_ONLY; + + osReadRegistryDword(NULL, NV_REG_STR_RM_CACHEABLE_CONTROLS, &RmapiControlCache.mode); + NV_PRINTF(LEVEL_INFO, "using cache mode %d\n", RmapiControlCache.mode); + + if (RmapiControlCache.mode) + { + multimapInit(&RmapiControlCache.cachedCallParams, portMemAllocatorGetGlobalNonPaged()); + RmapiControlCache.mtx = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + if (!RmapiControlCache.mtx) + { + NV_PRINTF(LEVEL_ERROR, "failed to create mutex"); + RmapiControlCache.mode = NV_REG_STR_RM_CACHEABLE_CONTROLS_DISABLE; + } + } +} + +const void* rmapiControlCacheGet(NvHandle hClient, NvHandle hObject, NvU32 cmd) +{ + NV_PRINTF(LEVEL_INFO, "cache lookup for 0x%x 0x%x 0x%x\n", hClient, hObject, cmd); + portSyncMutexAcquire(RmapiControlCache.mtx); + RmapiControlCacheEntry* entry = multimapFindItem(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, hObject), cmd); + portSyncMutexRelease(RmapiControlCache.mtx); + NV_PRINTF(LEVEL_INFO, "cache entry for 0x%x 0x%x 0x%x: entry 0x%p\n", hClient, hObject, cmd, entry); + if (entry) + return entry->params; + return NULL; +} + +NV_STATUS rmapiControlCacheSet +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + const void* params, + NvU32 paramsSize +) +{ + portSyncMutexAcquire(RmapiControlCache.mtx); + NV_STATUS status = NV_OK; + RmapiControlCacheEntry* entry = multimapFindItem(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, hObject), cmd); + CachedCallParamsSubmap* insertedSubmap = NULL; + + if (!entry) + { + if (!multimapFindSubmap(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, hObject))) + { + insertedSubmap = multimapInsertSubmap(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, hObject)); + if (!insertedSubmap) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + } + + entry = multimapInsertItemNew(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, hObject), cmd); + } + + if (!entry) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + entry->params = portMemAllocNonPaged(paramsSize); + if (!entry->params) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + portMemCopy(entry->params, paramsSize, params, paramsSize); + +done: + if (status != NV_OK) + { + /* To avoid leaking memory, remove the newly inserted empty submap and entry */ + if (entry) + { + portMemFree(entry->params); + multimapRemoveItem(&RmapiControlCache.cachedCallParams, entry); + } + + if (insertedSubmap) + multimapRemoveSubmap(&RmapiControlCache.cachedCallParams, insertedSubmap); + } + + portSyncMutexRelease(RmapiControlCache.mtx); + + return status; +} + +static void freeSubmap(CachedCallParamsSubmap* submap) +{ + /* (Sub)map modification invalidates the iterator, so we have to restart */ + while (NV_TRUE) + { + CachedCallParamsIter it = multimapSubmapIterItems(&RmapiControlCache.cachedCallParams, submap); + + if (multimapItemIterNext(&it)) + { + RmapiControlCacheEntry* entry = it.pValue; + portMemFree(entry->params); + multimapRemoveItem(&RmapiControlCache.cachedCallParams, entry); + } + else + { + break; + } + } + multimapRemoveSubmap(&RmapiControlCache.cachedCallParams, submap); +} + +void rmapiControlCacheFreeClient(NvHandle hClient) +{ + if (!RmapiControlCache.mode) + return; + + portSyncMutexAcquire(RmapiControlCache.mtx); + while (NV_TRUE) + { + CachedCallParamsSubmap* start = multimapFindSubmapGEQ(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, 0)); + CachedCallParamsSubmap* end = multimapFindSubmapLEQ(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, NV_U32_MAX)); + + if (!start || !end || + keyToClient(multimapSubmapKey(&RmapiControlCache.cachedCallParams, start)) != hClient || + keyToClient(multimapSubmapKey(&RmapiControlCache.cachedCallParams, end)) != hClient) + { + break; + } + + CachedCallParamsSupermapIter it = multimapSubmapIterRange(&RmapiControlCache.cachedCallParams, start, end); + + if (multimapSubmapIterNext(&it)) + { + CachedCallParamsSubmap* submap = it.pValue; + freeSubmap(submap); + } + else + { + break; + } + } + portSyncMutexRelease(RmapiControlCache.mtx); +} + +void rmapiControlCacheFreeObject(NvHandle hClient, NvHandle hObject) +{ + CachedCallParamsSubmap* submap; + + if (!RmapiControlCache.mode) + return; + + portSyncMutexAcquire(RmapiControlCache.mtx); + + submap = multimapFindSubmap(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, hObject)); + if (submap) + freeSubmap(submap); + + portSyncMutexRelease(RmapiControlCache.mtx); +} + +void rmapiControlCacheFree(void) { + CachedCallParamsIter it; + + if (!RmapiControlCache.mode) + return; + + it = multimapItemIterAll(&RmapiControlCache.cachedCallParams); + while (multimapItemIterNext(&it)) + { + RmapiControlCacheEntry* entry = it.pValue; + portMemFree(entry->params); + } + + multimapDestroy(&RmapiControlCache.cachedCallParams); + portSyncMutexDestroy(RmapiControlCache.mtx); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_stubs.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_stubs.c new file mode 100644 index 0000000..dabb2cb --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_stubs.c @@ -0,0 +1,183 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rmapi.h" + + +static NV_STATUS _rmapiAlloc_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, void *pAllocParams) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiAllocWithHandle_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle hObject, NvU32 hClass, void *pAllocParams) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiAllocWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, NvP64 pAllocParams, + NvU32 flags, NvP64 pRightsRequested, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiFree_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiFreeWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + NvU32 flags, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiFreeClientList_STUB(RM_API *pRmApi, NvHandle *phClientList, NvU32 numClients) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiFreeClientListWithSecInfo_STUB(RM_API *pRmApi, NvHandle *phClientList, + NvU32 numClients, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiControl_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd, + void *pParams, NvU32 paramsSize) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiControlWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd, + NvP64 pParams, NvU32 paramsSize, NvU32 flags, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiControlPrefetch_STUB(RM_API *pRmApi, NvU32 cmd) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiDupObject_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, NvHandle *phObject, + NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiDupObjectWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags, + API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiShare_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiShareWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiMapToCpu_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, void **ppCpuVirtAddr, NvU32 flags) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiMapToCpuWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 flags, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiUnmapFromCpu_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, void *pLinearAddress, + NvU32 flags, NvU32 ProcessId) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiUnmapFromCpuWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvP64 pLinearAddress, NvU32 flags, NvU32 ProcessId, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiMap_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU64 offset, NvU64 length, NvU32 flags, NvU64 *pDmaOffset) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiMapWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU64 offset, NvU64 length, NvU32 flags, NvU64 *pDmaOffset, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiUnmap_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU32 flags, NvU64 dmaOffset) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiUnmapWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU32 flags, NvU64 dmaOffset, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void rmapiInitStubInterface(RM_API *pRmApi) +{ + portMemSet(pRmApi, 0, sizeof(*pRmApi)); + + pRmApi->Alloc = _rmapiAlloc_STUB; + pRmApi->AllocWithHandle = _rmapiAllocWithHandle_STUB; + pRmApi->AllocWithSecInfo = _rmapiAllocWithSecInfo_STUB; + pRmApi->Free = _rmapiFree_STUB; + pRmApi->FreeWithSecInfo = _rmapiFreeWithSecInfo_STUB; + pRmApi->FreeClientList = _rmapiFreeClientList_STUB; + pRmApi->FreeClientListWithSecInfo = _rmapiFreeClientListWithSecInfo_STUB; + pRmApi->Control = _rmapiControl_STUB; + pRmApi->ControlWithSecInfo = _rmapiControlWithSecInfo_STUB; + pRmApi->ControlPrefetch = _rmapiControlPrefetch_STUB; + pRmApi->DupObject = _rmapiDupObject_STUB; + pRmApi->DupObjectWithSecInfo = _rmapiDupObjectWithSecInfo_STUB; + pRmApi->Share = _rmapiShare_STUB; + pRmApi->ShareWithSecInfo = _rmapiShareWithSecInfo_STUB; + pRmApi->MapToCpu = _rmapiMapToCpu_STUB; + pRmApi->MapToCpuWithSecInfo = _rmapiMapToCpuWithSecInfo_STUB; + pRmApi->UnmapFromCpu = _rmapiUnmapFromCpu_STUB; + pRmApi->UnmapFromCpuWithSecInfo = _rmapiUnmapFromCpuWithSecInfo_STUB; + pRmApi->Map = _rmapiMap_STUB; + pRmApi->MapWithSecInfo = _rmapiMapWithSecInfo_STUB; + pRmApi->Unmap = _rmapiUnmap_STUB; + pRmApi->UnmapWithSecInfo = _rmapiUnmapWithSecInfo_STUB; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_utils.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_utils.c new file mode 100644 index 0000000..7090b1d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_utils.c @@ -0,0 +1,147 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rmapi_utils.h" +#include "rmapi/rs_utils.h" +#include "resource_desc.h" + +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" + +#include "class/cl0080.h" +#include "class/cl2080.h" + +NV_STATUS +rmapiutilAllocClientAndDeviceHandles +( + RM_API *pRmApi, + OBJGPU *pGpu, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubDevice +) +{ + NV_STATUS rmStatus; + NV0080_ALLOC_PARAMETERS nv0080AllocParams; + NV2080_ALLOC_PARAMETERS nv2080AllocParams; + NvHandle hClient = NV01_NULL_OBJECT; + NvHandle hDevice = NV01_NULL_OBJECT; + NvHandle hSubDevice = NV01_NULL_OBJECT; + + NV_ASSERT_OR_RETURN(phClient != NULL, NV_ERR_INVALID_ARGUMENT); + + // Allocate a client + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &hClient), + cleanup); + + // Allocate a device + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + serverutilGenResourceHandle(hClient, &hDevice), + cleanup); + + portMemSet(&nv0080AllocParams, 0, sizeof(nv0080AllocParams)); + nv0080AllocParams.deviceId = gpuGetDeviceInstance(pGpu); + nv0080AllocParams.hClientShare = hClient; + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + hClient, + hClient, + hDevice, + NV01_DEVICE_0, + &nv0080AllocParams), + cleanup); + + // Allocate a subDevice + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + serverutilGenResourceHandle(hClient, &hSubDevice), + cleanup); + + portMemSet(&nv2080AllocParams, 0, sizeof(nv2080AllocParams)); + nv2080AllocParams.subDeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + hClient, + hDevice, + hSubDevice, + NV20_SUBDEVICE_0, + &nv2080AllocParams), + cleanup); + + *phClient = hClient; + if (phDevice != NULL) + *phDevice = hDevice; + if (phSubDevice != NULL) + *phSubDevice = hSubDevice; + + return rmStatus; + +cleanup: + rmapiutilFreeClientAndDeviceHandles(pRmApi, &hClient, &hDevice, &hSubDevice); + return rmStatus; +} + +void +rmapiutilFreeClientAndDeviceHandles +( + RM_API *pRmApi, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubDevice +) +{ + NV_ASSERT_OR_RETURN_VOID(phClient != NULL); + NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, *phClient != NV01_NULL_OBJECT); + + if (phSubDevice != NULL && *phSubDevice != NV01_NULL_OBJECT) + { + pRmApi->Free(pRmApi, *phClient, *phSubDevice); + *phSubDevice = NV01_NULL_OBJECT; + } + + if (phDevice != NULL && *phDevice != NV01_NULL_OBJECT) + { + pRmApi->Free(pRmApi, *phClient, *phDevice); + *phDevice = NV01_NULL_OBJECT; + } + + pRmApi->Free(pRmApi, *phClient, *phClient); + *phClient = NV01_NULL_OBJECT; +} + +NvBool +rmapiutilIsExternalClassIdInternalOnly +( + NvU32 externalClassId +) +{ + RS_RESOURCE_DESC *pResDesc = RsResInfoByExternalClassId(externalClassId); + NV_ASSERT_OR_RETURN(pResDesc != NULL, NV_FALSE); + return (pResDesc->flags & RS_FLAGS_INTERNAL_ONLY) != 0x0; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rpc_common.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rpc_common.c new file mode 100644 index 0000000..dcea17c --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rpc_common.c @@ -0,0 +1,113 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//****************************************************************************** +// +// Description: +// This file implements RPC code common to all builds. +// +//****************************************************************************** + +#include "gpu/gpu.h" +#include "vgpu/rpc.h" +#include "os/os.h" + +#define RPC_STRUCTURES +#define RPC_GENERIC_UNION +#include "g_rpc-structures.h" +#undef RPC_STRUCTURES +#undef RPC_GENERIC_UNION + +#define RPC_MESSAGE_STRUCTURES +#define RPC_MESSAGE_GENERIC_UNION +#include "g_rpc-message-header.h" +#undef RPC_MESSAGE_STRUCTURES +#undef RPC_MESSAGE_GENERIC_UNION + +void rpcRmApiSetup(OBJGPU *pGpu) +{ + // + // Physical RMAPI is already initialized for monolithic, and this function + // just needs to overwrite individual methods as needed + // + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + PORT_UNREFERENCED_VARIABLE(pRmApi); + + if (IS_VIRTUAL(pGpu)) + { + // none for now + } + else if (IS_GSP_CLIENT(pGpu) && IsT234D(pGpu)) + { + pRmApi->Control = rpcRmApiControl_dce; + pRmApi->AllocWithHandle = rpcRmApiAlloc_dce; + pRmApi->Free = rpcRmApiFree_dce; + pRmApi->DupObject = rpcRmApiDupObject_dce; + } +} + +OBJRPC *initRpcObject(OBJGPU *pGpu) +{ + OBJRPC *pRpc = NULL; + + pRpc = portMemAllocNonPaged(sizeof(OBJRPC)); + if (pRpc == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "cannot allocate memory for OBJRPC (instance %d)\n", + gpuGetInstance(pGpu)); + return NULL; + } + + rpcRmApiSetup(pGpu); + + return pRpc; +} + +NV_STATUS rpcWriteCommonHeader(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 func, NvU32 paramLength) +{ + NV_STATUS status = NV_OK; + + if (!pRpc) + { + NV_PRINTF(LEVEL_ERROR, + "NVRM_RPC: called with NULL pRpc. Function %d.\n", func); + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + + portMemSet(pRpc->message_buffer, 0, pRpc->maxRpcSize); + + vgpu_rpc_message_header_v->header_version = DRF_DEF(_VGPU, _MSG_HEADER_VERSION, _MAJOR, _TOT) | + DRF_DEF(_VGPU, _MSG_HEADER_VERSION, _MINOR, _TOT); + vgpu_rpc_message_header_v->signature = NV_VGPU_MSG_SIGNATURE_VALID; + vgpu_rpc_message_header_v->rpc_result = NV_VGPU_MSG_RESULT_RPC_PENDING; + vgpu_rpc_message_header_v->rpc_result_private = NV_VGPU_MSG_RESULT_RPC_PENDING; + { + vgpu_rpc_message_header_v->u.spare = NV_VGPU_MSG_UNION_INIT; + } + vgpu_rpc_message_header_v->function = func; + vgpu_rpc_message_header_v->length = sizeof(rpc_message_header_v) + paramLength; + + return status; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rs_utils.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rs_utils.c new file mode 100644 index 0000000..da5bb75 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rs_utils.c @@ -0,0 +1,383 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi.h" +#include "core/locks.h" + +NV_STATUS +serverutilGetResourceRef +( + NvHandle hClient, + NvHandle hObject, + RsResourceRef **ppResourceRef +) +{ + RsResourceRef *pResourceRef; + RsClient *pRsClient; + NV_STATUS status; + + *ppResourceRef = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return NV_ERR_INVALID_CLIENT; + + status = clientGetResourceRef(pRsClient, hObject, &pResourceRef); + if (status != NV_OK) + return status; + + *ppResourceRef = pResourceRef; + + return NV_OK; +} + +NV_STATUS +serverutilGetResourceRefWithType +( + NvHandle hClient, + NvHandle hObject, + NvU32 internalClassId, + RsResourceRef **ppResourceRef +) +{ + if (serverutilGetResourceRef(hClient, hObject, ppResourceRef) != NV_OK) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (!objDynamicCastById((*ppResourceRef)->pResource, internalClassId)) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + return NV_OK; +} + +NV_STATUS +serverutilGetResourceRefWithParent +( + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 internalClassId, + RsResourceRef **ppResourceRef +) +{ + NvHandle hFoundParent; + + if (serverutilGetResourceRef(hClient, hObject, ppResourceRef) != NV_OK) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + hFoundParent = (*ppResourceRef)->pParentRef ? (*ppResourceRef)->pParentRef->hResource : 0; + + if (!objDynamicCastById((*ppResourceRef)->pResource, internalClassId) || + hFoundParent != hParent) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + return NV_OK; +} + +NV_STATUS +serverutilGetClientUnderLock +( + NvHandle hClient, + RmClient **ppClient +) +{ + NV_STATUS status; + RsClient *pRsClient; + RmClient *pClient; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return status; + + pClient = dynamicCast(pRsClient, RmClient); + NV_ASSERT(pClient != NULL); + + if (ppClient) + *ppClient = pClient; + + return NV_OK; +} + +RmClient +**serverutilGetFirstClientUnderLock +( + void +) +{ + RmClient **ppClient; + + // + // Resource server's client list is not protected by any RM locks + // so, as a WAR, we access a lock-protected shadow client list. This avoids + // the race condition where a client is freed while a DPC is iterating + // through the client list. + // + ppClient = listHead(&g_clientListBehindGpusLock); + if (NULL == ppClient) + return NULL; + + return ppClient; +} + +RmClient +**serverutilGetNextClientUnderLock +( + RmClient **ppClient +) +{ + // + // Resource server's client list is not protected by any RM locks + // so, as a WAR, we access a lock-protected shadow client list. This avoids + // the race condition where a client is freed while a DPC is iterating + // through the client list. + // + ppClient = listNext(&g_clientListBehindGpusLock, ppClient); + if (NULL == ppClient) + return NULL; + + return ppClient; +} + +RsResourceRef * +serverutilFindChildRefByType +( + NvHandle hClient, + NvHandle hParent, + NvU32 internalClassId, + NvBool bExactMatch +) +{ + NV_STATUS status; + RsClient *pRsClient; + RsResourceRef *pResourceRef; + RsResourceRef *pParentRef; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return NULL; + + status = clientGetResourceRef(pRsClient, hParent, &pParentRef); + if (status != NV_OK) + { + return NULL; + } + + status = refFindChildOfType(pParentRef, internalClassId, bExactMatch, &pResourceRef); + if (status != NV_OK) + { + return NULL; + } + + return pResourceRef; +} + +RS_ITERATOR +serverutilRefIter +( + NvHandle hClient, + NvHandle hScopedObject, + NvU32 internalClassId, + RS_ITER_TYPE iterType, + NvBool bExactMatch +) +{ + NV_STATUS status; + RsClient *pRsClient; + RsResourceRef *pScopedRef = NULL; + RS_ITERATOR it; + + portMemSet(&it, 0, sizeof(it)); + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return it; + + if (hScopedObject != NV01_NULL_OBJECT) + { + status = clientGetResourceRef(pRsClient, hScopedObject, &pScopedRef); + if (status != NV_OK) + { + return it; + } + } + + return clientRefIter(pRsClient, pScopedRef, internalClassId, iterType, bExactMatch); +} + +NvBool +serverutilValidateNewResourceHandle +( + NvHandle hClient, + NvHandle hObject +) +{ + RmClient *pClient; + + return ((NV_OK == serverutilGetClientUnderLock(hClient, &pClient)) && + (NV_OK == clientValidateNewResourceHandle(staticCast(pClient, RsClient), hObject, NV_TRUE))); +} + +NV_STATUS +serverutilGenResourceHandle +( + NvHandle hClient, + NvHandle *returnHandle +) +{ + NV_STATUS status; + RmClient *pClient; + + // LOCK TEST: we should have the API lock here + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (NV_OK != serverutilGetClientUnderLock(hClient, &pClient)) + return NV_ERR_INVALID_CLIENT; + + status = clientGenResourceHandle(staticCast(pClient, RsClient), returnHandle); + return status; +} + +RS_SHARE_ITERATOR +serverutilShareIter +( + NvU32 internalClassId +) +{ + return serverShareIter(&g_resServ, internalClassId); +} + +NvBool +serverutilShareIterNext +( + RS_SHARE_ITERATOR* pIt +) +{ + return serverShareIterNext(pIt); +} + +NV_STATUS +serverutilGetClientHandlesFromPid +( + NvU32 procID, + NvU32 subProcessID, + ClientHandlesList *pClientList +) +{ + RmClient **ppClient; + RmClient *pClient; + + // If the list passed in has old elements, lets clear its elements. + if (listCount(pClientList)) + { + // Clear & free nodes in temp list + listDestroy(pClientList); + } + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + RsClient *pRsClient; + + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + if ((pClient->ProcID == procID) && + (pClient->SubProcessID == subProcessID)) + { + if (listAppendValue(pClientList, + &pRsClient->hClient) == NULL) + { + listClear(pClientList); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + } + + return NV_OK; +} + +NvBool +serverutilMappingFilterCurrentUserProc +( + RsCpuMapping *pMapping +) +{ + return (!pMapping->pPrivate->bKernel && + (pMapping->processId == osGetCurrentProcess())); +} + +NvBool +serverutilMappingFilterKernel +( + RsCpuMapping *pMapping +) +{ + return pMapping->pPrivate->bKernel; +} + + +NV_STATUS +serverutilAcquireClient +( + NvHandle hClient, + LOCK_ACCESS_TYPE access, + RmClient **ppClient +) +{ + RsClient *pRsClient; + RmClient *pClient; + + // LOCK TEST: we should have the API lock here + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (NV_OK != serverAcquireClient(&g_resServ, hClient, access, &pRsClient)) + return NV_ERR_INVALID_CLIENT; + + pClient = dynamicCast(pRsClient, RmClient); + if (pClient == NULL) + { + serverReleaseClient(&g_resServ, access, pRsClient); + return NV_ERR_INVALID_CLIENT; + } + + *ppClient = pClient; + return NV_OK; +} + +void +serverutilReleaseClient +( + LOCK_ACCESS_TYPE access, + RmClient *pClient +) +{ + serverReleaseClient(&g_resServ, access, staticCast(pClient, RsClient)); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/sharing.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/sharing.c new file mode 100644 index 0000000..7495611 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/sharing.c @@ -0,0 +1,412 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rmapi.h" +#include "entry_points.h" +#include "core/thread_state.h" +#include "rmapi/rs_utils.h" +#include "resserv/rs_access_map.h" +#include "resource_desc.h" +#include "class/cl0071.h" + +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" + +static NV_STATUS +_RmDupObject +( + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo, + RS_LOCK_INFO *pLockInfo +) +{ + NV_STATUS rmStatus; + RS_RES_DUP_PARAMS params; + + NV_ASSERT_OR_RETURN(phObject != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + portMemSet(¶ms, 0, sizeof(params)); + params.hClientSrc = hClientSrc; + params.hResourceSrc = hObjectSrc; + params.hClientDst = hClient; + params.hParentDst = hParent; + params.hResourceDst = *phObject; + params.pSecInfo = pSecInfo; + params.flags = flags; + params.pLockInfo = pLockInfo; + + rmStatus = serverCopyResource(&g_resServ, ¶ms); + + if (rmStatus == NV_OK) + *phObject = params.hResourceDst; + + return rmStatus; +} + +NV_STATUS +rmapiDupObject +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->DupObjectWithSecInfo(pRmApi, hClient, hParent, phObject, hClientSrc, hObjectSrc, + flags, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiDupObjectWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04DupObject: hClient:0x%x hParent:0x%x hObject:0x%x\n", + hClient, hParent, *phObject); + NV_PRINTF(LEVEL_INFO, + "Nv04DupObject: hClientSrc:0x%x hObjectSrc:0x%x flags:0x%x\n", + hClientSrc, hObjectSrc, flags); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + { + return status; + } + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + + if (pRmApi->bApiLockInternal) + { + // + // DupObject requires taking two client locks, but internal calls have probably + // already taken one client lock. Taking a second would require unlocking + // the first lock in the middle of the API call, which could mess with the client. + // In such cases, we need an exclusive API lock, then skip taking client locks. + // + if (lockInfo.pClient != NULL) + { + NV_ASSERT(rmapiLockIsOwner()); + // RS-TODO assert RW api lock + lockInfo.flags |= RM_LOCK_FLAGS_NO_CLIENT_LOCK; + } + } + + status = _RmDupObject(hClient, hParent, phObject, hClientSrc, hObjectSrc, flags, pSecInfo, &lockInfo); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "...handle dup complete\n"); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv04DupObject: dup failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_WARNING, + "Nv04DupObject: hClient:0x%x hParent:0x%x hObject:0x%x\n", + hClient, hParent, *phObject); + NV_PRINTF(LEVEL_WARNING, + "Nv04DupObject: hClientSrc:0x%x hObjectSrc:0x%x flags:0x%x\n", + hClientSrc, hObjectSrc, flags); + } + + return status; +} + +NV_STATUS +rmapiDupObjectWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiDupObjectWithSecInfo(pRmApi, hClient, hParent, phObject, hClientSrc, hObjectSrc, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + + +static NV_STATUS +_RmShare +( + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo, + RS_LOCK_INFO *pLockInfo +) +{ + RS_RES_SHARE_PARAMS params; + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = hClient; + params.hResource = hObject; + params.pSharePolicy = pSharePolicy; + params.pSecInfo = pSecInfo; + params.pLockInfo = pLockInfo; + + return serverShareResourceAccess(&g_resServ, ¶ms); +} + +NV_STATUS +rmapiShare +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->ShareWithSecInfo(pRmApi, hClient, hObject, pSharePolicy, + &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiShareWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04Share: hClient:0x%x hObject:0x%x pSharePolicy:%p\n", + hClient, hObject, pSharePolicy); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + { + return status; + } + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + + // + // Currently, Share should have no internal callers. + // If this changes and one takes a client lock, this could mess with + // Share since it may require two clients when sharing with SHARE_TYPE_CLIENT. + // Assert this for now, handle it properly if this ever changes (See DupObject) + // + NV_ASSERT (lockInfo.pClient == NULL); + + status = _RmShare(hClient, hObject, pSharePolicy, pSecInfo, &lockInfo); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "...resource share complete\n"); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv04Share: share failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_WARNING, + "Nv04Share: hClient:0x%x hObject:0x%x pSharePolicy:%p\n", + hClient, hObject, pSharePolicy); + } + + return status; +} + +NV_STATUS +rmapiShareWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiShareWithSecInfo(pRmApi, hClient, hObject, pSharePolicy, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +serverCopyResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_DUP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_COPY)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverShareResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_SHARE_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_SHARE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverInitGlobalSharePolicies +( + RsServer *pServer +) +{ + RS_SHARE_POLICY sharePolicy; + + // Global default policies, these can be overridden by clients/objects + portMemSet(&sharePolicy, 0, sizeof(sharePolicy)); + RS_ACCESS_MASK_ADD(&sharePolicy.accessMask, RS_ACCESS_DUP_OBJECT); + sharePolicy.type = RS_SHARE_TYPE_PID; + + if (listAppendValue(&pServer->defaultInheritedSharePolicyList, + &sharePolicy) == NULL) + return NV_ERR_NO_MEMORY; + + // Internal share policies, these can't be overridden + + // SMC dup policy: Do not allow duping across different SMC partition + portMemSet(&sharePolicy, 0, sizeof(sharePolicy)); + sharePolicy.type = RS_SHARE_TYPE_SMC_PARTITION; + sharePolicy.action = RS_SHARE_ACTION_FLAG_REQUIRE; + RS_ACCESS_MASK_ADD(&sharePolicy.accessMask, RS_ACCESS_DUP_OBJECT); + + if (listAppendValue(&pServer->globalInternalSharePolicyList, + &sharePolicy) == NULL) + return NV_ERR_NO_MEMORY; + + // FM dup policy: Allow FM to dup any user-mode client's resource. + portMemSet(&sharePolicy, 0, sizeof(sharePolicy)); + sharePolicy.type = RS_SHARE_TYPE_FM_CLIENT; + RS_ACCESS_MASK_ADD(&sharePolicy.accessMask, RS_ACCESS_DUP_OBJECT); + + if (listAppendValue(&pServer->globalInternalSharePolicyList, + &sharePolicy) == NULL) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +NV_STATUS serverUpdateLockFlagsForCopy(RsServer *pServer, RS_RES_DUP_PARAMS *pParams) +{ + RS_RESOURCE_DESC *pResDesc; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + + if (pParams->pSrcRef == NULL) + return NV_ERR_INVALID_STATE; + + // Special cases; TODO move these to resource_list.h + if (pParams->pSrcRef->externalClassId == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR) + { + // Lock all GPUs + return NV_OK; + } + + pResDesc = RsResInfoByExternalClassId(pParams->pSrcRef->externalClassId); + if (pResDesc == NULL) + return NV_ERR_INVALID_OBJECT; + + // Use the same flags from alloc. These should be split out in the future. + if (!(pResDesc->flags & RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC)) + { + pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + } + + if (pResDesc->flags & RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_ALLOC) + { + pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + } + + pLockInfo->pContextRef = pParams->pSrcRef->pParentRef; + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/base_utils.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/base_utils.c new file mode 100644 index 0000000..9a02e3d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/base_utils.c @@ -0,0 +1,358 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief Common utility code that has no natural home + */ + + +#include "lib/base_utils.h" +#include "os/os.h" + +// +// Log2 approximation that assumes a power of 2 number passed in. +// +NvU32 nvLogBase2(NvU64 val) +{ + NvU32 i; + + NV_ASSERT(((val)&(val-1)) == 0); + + for (i = 0; i < 64; i++) + { + if ((1ull << i) == val) + { + break; + } + } + + NV_ASSERT(i < 64); + + return i; +} + + +/** + * @brief Finds the lowest unset bit of a given bitfield. + * + * Returns the lowest value of X such that the expression + * pBitField[X/32] & (1<<(X%32)) is zero. + * + * If all bits are set, returns numElements*32. + * + * @param[in] pBitField + * @param[in] numElements size of array pBitField + * + * @return the lowest zero bit, numElements*32 otherwise. + */ +NvU32 nvBitFieldLSZero(NvU32 *pBitField32, NvU32 numElements) +{ + NvU32 i; + + for (i = 0; i < numElements; ++i) + { + NvU32 temp = ~pBitField32[i]; + if (temp) + { + LOWESTBITIDX_32(temp); + return temp + i * sizeof(NvU32) * 8; + } + } + + return numElements*32; +} + +/** + * @brief Finds the highest unset bit of a given bitfield. + * + * Returns the highest value of X such that the expression + * pBitField[X/32] & (1<<(X%32)) is zero. + * + * If all bits are set, returns numElements*32. + * + * @param[in] pBitField + * @param[in] numBits must be a multiple of 32. + * + * @return The highest zero bit, numElements*32 otherwise. + */ +NvU32 nvBitFieldMSZero(NvU32 *pBitField32, NvU32 numElements) +{ + NvU32 i = 0, j = numElements - 1; + + while (i++ < numElements) + { + NvU32 temp = ~pBitField32[j]; + if (temp) + { + HIGHESTBITIDX_32(temp); + return temp + j * sizeof(NvU32) * 8; + } + j--; + } + + return numElements * 32; +} + +NvBool nvBitFieldTest(NvU32 *pBitField, NvU32 numElements, NvU32 bit) +{ + return (bit < numElements*32 ? (NvBool) !!(pBitField[bit/32] & NVBIT(bit%32)) : NV_FALSE); +} + +void nvBitFieldSet(NvU32 *pBitField, NvU32 numElements, NvU32 bit, NvBool val) +{ + NV_ASSERT(bit < numElements*32); + pBitField[bit/32] = (pBitField[bit/32] & ~NVBIT(bit%32)) | (val ? NVBIT(bit%32) : 0); +} + +// +// Sort an array of n elements/structures. +// Example: +// NvBool integerLess(void * a, void * b) +// { +// return *(NvU32 *)a < *(NvU32 *)b; +// } +// NvU32 array[1000]; +// ... +// NvU32 temp[1000]; +// nvMergeSort(array, arrsize(array), temp, sizeof(NvU32), integerLess); +// +#define EL(n) ((char *)array+(n)*elementSize) +void nvMergeSort(void * array, NvU32 n, void * tempBuffer, NvU32 elementSize, NvBool (*less)(void *, void *)) +{ + char * mergeArray = (char *)tempBuffer; + NvU32 m, i; + + // + // Bottom-up merge sort divides the sort into a sequence of passes. + // In each pass, the array is divided into blocks of size 'm'. + // Every pair of two adjacent blocks are merged (in place). + // The next pass is started with twice the block size + // + for (m = 1; m<=n; m*=2) + { + for (i = 0; i<(n-m); i+=2*m) + { + NvU32 loMin = i; + NvU32 lo = loMin; + NvU32 loMax = i+m; + NvU32 hi = i+m; + NvU32 hiMax = NV_MIN(n,i+2*m); + + char * dest = mergeArray; + + // + // Standard merge of [lo, loMax) and [hi, hiMax) + // + while (1) + { + if (less(EL(lo), EL(hi))) + { + portMemCopy(dest, elementSize, EL(lo), elementSize); + lo++; + dest+=elementSize; + if (lo >= loMax) + break; + } + else + { + portMemCopy(dest, elementSize, EL(hi), elementSize); + hi++; + dest+=elementSize; + if (hi >= hiMax) + break; + } + } + + // + // Copy remaining items (only one of these loops can run) + // + while (lo < loMax) + { + portMemCopy(dest, elementSize,EL(lo), elementSize); + dest+=elementSize; + lo++; + } + + while (hi < hiMax) + { + portMemCopy(dest, elementSize,EL(hi), elementSize); + dest+=elementSize; + hi++; + } + + // + // Copy merged data back over array + // + portMemCopy(EL(loMin), (NvU32)(dest - mergeArray), mergeArray, (NvU32)(dest - mergeArray)); + } + } +} + +#define RANGE(val,low,hi) (((val) >= (low)) && ((val) <= (hi))) + +// Do not conflict with libc naming +NvS32 nvStrToL +( + NvU8* pStr, + NvU8** pEndStr, + NvS32 base, + NvU8 stopChar, + NvU32 *numFound +) +{ + NvU32 num; + NvU32 newnum; + + *numFound = 0; + + // scan for start of number + for (;*pStr;pStr++) + { + if (RANGE(*pStr, '0', '9')) + { + *numFound = 1; + break; + } + else if ((BASE16 == base) && (RANGE(*pStr,'a','f'))) + { + *numFound = 1; + break; + } + else if ((BASE16 == base) && (RANGE(*pStr,'A', 'F'))) + { + *numFound = 1; + break; + } + else if(*pStr == stopChar) + { + break; + } + } + + // convert number + num = 0; + for (;*pStr;pStr++) + { + if (RANGE(*pStr, '0', '9')) + { + newnum = *pStr - '0'; + } + else if ((BASE16 == base) && (RANGE(*pStr,'a','f'))) + { + newnum = *pStr - 'a' + 10; + } + else if ((BASE16 == base) && (RANGE(*pStr,'A', 'F'))) + { + newnum = *pStr - 'A' + 10; + } + else + break; + + num *= base; + num += newnum; + + } + + *pEndStr = pStr; + + return num; +} + +/** + * @brief Returns MSB of input as a bit mask + * + * @param x + * @return MSB of x + */ +NvU64 +nvMsb64(NvU64 x) +{ + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); + x |= (x >> 32); + // + // At this point, x has same MSB as input, but with all 1's below it, clear + // everything but MSB + // + return(x & ~(x >> 1)); +} + +/** + * @brief Convert unsigned long int to char* + * + * @param value to be converted to string + * @param *string is the char array to be have the converted data + * @param radix denoted the base of the operation : hex(16),octal(8)..etc + * @return the converted string + */ +char * nvU32ToStr(NvU32 value, char *string, NvU32 radix) +{ + char tmp[33]; + char *tp = tmp; + NvS32 i; + NvU32 v = value; + char *sp; + + if (radix > 36 || radix <= 1) + { + return 0; + } + + while (v || tp == tmp) + { + i = v % radix; + v = v / radix; + if (i < 10) + *tp++ = (char)(i + '0'); + else + *tp++ = (char)(i + 'a' - 10); + } + + sp = string; + + while (tp > tmp) + *sp++ = *--tp; + *sp = 0; + + return string; +} + + +/** + * @brief Get the string length + * + * @param string for which length has to be calculated + * @return the string length + */ +NvU32 nvStringLen(const char * str) +{ + NvU32 i = 0; + while (str[i++] != '\0') + ; + return i - 1; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/zlib/inflate.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/zlib/inflate.c new file mode 100644 index 0000000..f1f6ba9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/zlib/inflate.c @@ -0,0 +1,1157 @@ +/* inflate.c -- Not copyrighted 1992 by Mark Adler + version c10p1, 10 January 1993 */ + +/* You can do whatever you like with this source file, though I would + prefer that if you modify it and redistribute it that you include + comments to that effect with your name and the date. Thank you. + [The history has been moved to the file ChangeLog.] + */ + +/* + Inflate deflated (PKZIP's method 8 compressed) data. The compression + method searches for as much of the current string of bytes (up to a + length of 258) in the previous 32K bytes. If it doesn't find any + matches (of at least length 3), it codes the next byte. Otherwise, it + codes the length of the matched string and its distance backwards from + the current position. There is a single Huffman code that codes both + single bytes (called "literals") and match lengths. A second Huffman + code codes the distance information, which follows a length code. Each + length or distance code actually represents a base value and a number + of "extra" (sometimes zero) bits to get to add to the base value. At + the end of each deflated block is a special end-of-block (EOB) literal/ + length code. The decoding process is basically: get a literal/length + code; if EOB then done; if a literal, emit the decoded byte; if a + length then get the distance and emit the referred-to bytes from the + sliding window of previously emitted data. + + There are (currently) three kinds of inflate blocks: stored, fixed, and + dynamic. The compressor deals with some chunk of data at a time, and + decides which method to use on a chunk-by-chunk basis. A chunk might + typically be 32K or 64K. If the chunk is uncompressible, then the + "stored" method is used. In this case, the bytes are simply stored as + is, eight bits per byte, with none of the above coding. The bytes are + preceded by a count, since there is no longer an EOB code. + + If the data is compressible, then either the fixed or dynamic methods + are used. In the dynamic method, the compressed data is preceded by + an encoding of the literal/length and distance Huffman codes that are + to be used to decode this block. The representation is itself Huffman + coded, and so is preceded by a description of that code. These code + descriptions take up a little space, and so for small blocks, there is + a predefined set of codes, called the fixed codes. The fixed method is + used if the block codes up smaller that way (usually for quite small + chunks), otherwise the dynamic method is used. In the latter case, the + codes are customized to the probabilities in the current block, and so + can code it much better than the pre-determined fixed codes. + + The Huffman codes themselves are decoded using a mutli-level table + lookup, in order to maximize the speed of decoding plus the speed of + building the decoding tables. See the comments below that precede the + lbits and dbits tuning parameters. + */ + + +/* + Notes beyond the 1.93a appnote.txt: + + 1. Distance pointers never point before the beginning of the output + stream. + 2. Distance pointers can point back across blocks, up to 32k away. + 3. There is an implied maximum of 7 bits for the bit length table and + 15 bits for the actual data. + 4. If only one code exists, then it is encoded using one bit. (Zero + would be more efficient, but perhaps a little confusing.) If two + codes exist, they are coded using one bit each (0 and 1). + 5. There is no way of sending zero distance codes--a dummy must be + sent if there are none. (History: a pre 2.0 version of PKZIP would + store blocks with no distance codes, but this was discovered to be + too harsh a criterion.) Valid only for 1.93a. 2.04c does allow + zero distance codes, which is sent as one code of zero bits in + length. + 6. There are up to 286 literal/length codes. Code 256 represents the + end-of-block. Note however that the static length tree defines + 288 codes just to fill out the Huffman codes. Codes 286 and 287 + cannot be used though, since there is no length base or extra bits + defined for them. Similarly, there are up to 30 distance codes. + However, static trees define 32 codes (all 5 bits) to fill out the + Huffman codes, but the last two had better not show up in the data. + 7. Unzip can check dynamic Huffman blocks for complete code sets. + The exception is that a single code would not be complete (see #4). + 8. The five bits following the block type is really the number of + literal codes sent minus 257. + 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits + (1+6+6). Therefore, to output three times the length, you output + three codes (1+1+1), whereas to output four times the same length, + you only need two codes (1+3). Hmm. + 10. In the tree reconstruction algorithm, Code = Code + Increment + only if BitLength(i) is not zero. (Pretty obvious.) + 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) + 12. Note: length code 284 can represent 227-258, but length code 285 + really is 258. The last length deserves its own, short code + since it gets used a lot in very redundant files. The length + 258 is special since 258 - 3 (the min match length) is 255. + 13. The literal/length and distance code bit lengths are read as a + single stream of lengths. It is possible (and advantageous) for + a repeat code (16, 17, or 18) to go across the boundary between + the two sets of lengths. + */ + +//----------------------------------------------------------------------------- +// NVIDIA modifications are solely around interface cleanup, compiler warnings, etc. +//----------------------------------------------------------------------------- + +#include "nvtypes.h" +#include "nvstatus.h" + +#ifndef NVGZ_USER +#define __DRIVER_BUILD__ +// driver build +#include "os/os.h" +#endif /* NVGZ_USER */ + +#ifndef __DRIVER_BUILD__ +// user build : NVGZ_USER +#include +#include +#include +#include + +#define osMemCopy memcpy +#define portMemSet memset +#define portMemAllocNonPaged malloc +#define portMemFree free +#define sizeof sizeof +#define NV_PRINTF(a,b) printf(b) +#endif + +#include "lib/zlib/inflate.h" + +/* Function prototypes */ +static NvU32 huft_build(NvU8 *, NvU16, NvU32 , ush *, ush *, + struct huft **, NvS32 *); +static NvU32 huft_free(struct huft *); +static NvU32 inflate_codes_iterator(PGZ_INFLATE_STATE); +static NvU32 fixed_huft_build(PGZ_INFLATE_STATE); +static NvU32 dynamic_huft_build(PGZ_INFLATE_STATE); + +static void flush_window(PGZ_INFLATE_STATE pGzState) +{ + if ( pGzState->wp == 0) return; + + pGzState->wp2 = pGzState->wp; + + // If output range is not specified, do normal output + if (pGzState->outLower == 0xFFFFFFFF && pGzState->outUpper == 0xFFFFFFFF) + { + portMemCopy(pGzState->outbuf + pGzState->outptr, pGzState->wp, pGzState->window, pGzState->wp); + pGzState->wp1 += pGzState->wp; + pGzState->optSize += pGzState->wp; + } + // slide pGzState->outLower pGzState->outUpper slide + // ----============-----|--------------|-----============ + else if (pGzState->outptr + pGzState->wp - 1 < pGzState->outLower + || pGzState->outptr > pGzState->outUpper) + { + } + // slide pGzState->outLower pGzState->outUpper + // ----=================|===-----------|----------------- + else if (pGzState->outptr <= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 >= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 <= pGzState->outUpper) + { + portMemCopy(pGzState->outbuf, + pGzState->wp - (pGzState->outLower - pGzState->outptr), pGzState->window + pGzState->outLower - pGzState->outptr, + pGzState->wp - (pGzState->outLower - pGzState->outptr)); + pGzState->wp1 += pGzState->wp - (pGzState->outLower - pGzState->outptr); + pGzState->optSize += pGzState->wp - (pGzState->outLower - pGzState->outptr); + } + // slide pGzState->outLower pGzState->outUpper + // ----=================|==============|===-------------- + else if (pGzState->outptr <= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 > pGzState->outUpper ) + { + portMemCopy(pGzState->outbuf, + pGzState->outUpper - pGzState->outLower + 1, pGzState->window + pGzState->outLower - pGzState->outptr, + pGzState->outUpper - pGzState->outLower + 1); + pGzState->wp1 += pGzState->outUpper - pGzState->outptr + 1; + pGzState->optSize += pGzState->outUpper - pGzState->outLower + 1; + } + // slide pGzState->outLower pGzState->outUpper + // ---------------------|===========---|----------------- + else if (pGzState->outptr >= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 <= pGzState->outUpper) + { + portMemCopy(pGzState->outbuf + pGzState->outptr - pGzState->outLower, + pGzState->wp, pGzState->window, + pGzState->wp); + pGzState->wp1 += pGzState->wp; + pGzState->optSize += pGzState->wp; + } + // slide pGzState->outLower pGzState->outUpper + // ---------------------|==============|===-------------- + else if (pGzState->outptr >= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 > pGzState->outUpper) + { + portMemCopy(pGzState->outbuf + pGzState->outptr - pGzState->outLower, + pGzState->outUpper - pGzState->outptr + 1, pGzState->window, + pGzState->outUpper - pGzState->outptr + 1); + pGzState->wp1 += pGzState->outUpper - pGzState->outptr + 1; + pGzState->optSize += pGzState->outUpper - pGzState->outptr + 1; + } + + pGzState->outptr += pGzState->wp; + pGzState->wp = 0; +} + + +/* Tables for deflate from PKZIP's appnote.txt. */ +static NvU32 border[] = { /* Order of the bit length code lengths */ + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; +static ush cplens[] = { /* Copy lengths for literal codes 257..285 */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; + /* note: see note #13 above about the 258 in this list. */ +static ush cplext[] = { /* Extra bits for literal codes 257..285 */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */ +static ush cpdist[] = { /* Copy offsets for distance codes 0..29 */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577}; +static ush cpdext[] = { /* Extra bits for distance codes */ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, + 12, 12, 13, 13}; + +/* Macros for inflate() bit peeking and grabbing. + The usage is: + + NEEDBITS(j) + x = b & mask_bits[j]; + DUMPBITS(j) + + where NEEDBITS makes sure that b has at least j bits in it, and + DUMPBITS removes the bits from b. The macros use the variable k + for the number of bits in b. Normally, b and k are register + variables for speed, and are initialized at the beginning of a + routine that uses these macros from a global bit buffer and count. + + If we assume that EOB will be the longest code, then we will never + ask for bits with NEEDBITS that are beyond the end of the stream. + So, NEEDBITS should not read any more bytes than are needed to + meet the request. Then no bytes need to be "returned" to the buffer + at the end of the last block. + + However, this assumption is not true for fixed blocks--the EOB code + is 7 bits, but the other literal/length codes can be 8 or 9 bits. + (The EOB code is shorter than other codes because fixed blocks are + generally short. So, while a block always has an EOB, many other + literal/length codes have a significantly lower probability of + showing up at all.) However, by making the first table have a + lookup of seven bits, the EOB code will be found in that first + lookup, and so will not require that too many bits be pulled from + the stream. + */ + +static ush mask_bits[] = { + 0x0000, + 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, + 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff +}; + +/* + Huffman code decoding is performed using a multi-level table lookup. + The fastest way to decode is to simply build a lookup table whose + size is determined by the longest code. However, the time it takes + to build this table can also be a factor if the data being decoded + is not very long. The most common codes are necessarily the + shortest codes, so those codes dominate the decoding time, and hence + the speed. The idea is you can have a shorter table that decodes the + shorter, more probable codes, and then point to subsidiary tables for + the longer codes. The time it costs to decode the longer codes is + then traded against the time it takes to make longer tables. + + This results of this trade are in the variables lbits and dbits + below. lbits is the number of bits the first level table for literal/ + length codes can decode in one step, and dbits is the same thing for + the distance codes. Subsequent tables are also less than or equal to + those sizes. These values may be adjusted either when all of the + codes are shorter than that, in which case the longest code length in + bits is used, or when the shortest code is *longer* than the requested + table size, in which case the length of the shortest code in bits is + used. + + There are two different values for the two tables, since they code a + different number of possibilities each. The literal/length table + codes 286 possible values, or in a flat code, a little over eight + bits. The distance table codes 30 possible values, or a little less + than five bits, flat. The optimum values for speed end up being + about one bit more than those, so lbits is 8+1 and dbits is 5+1. + The optimum values may differ though from machine to machine, and + possibly even between compilers. Your mileage may vary. + */ + + +const NvU32 lbits = 9; /* bits in base literal/length lookup table */ +const NvU32 dbits = 6; /* bits in base distance lookup table */ + +static NvU32 hufts; /* track memory usage */ + +/* + * Given a list of code lengths and a maximum table size, make a set of + * tables to decode that set of codes. Return zero on success, one if + * the given code set is incomplete (the tables are still built in + * case), two if the input is invalid (all zero length codes or an + * oversubscribed set of lengths), and three if not enough memory. + */ +static NvU32 huft_build +( + NvU8 *b, /* code lengths in bits (all assumed <= BMAX) */ + NvU16 n, /* number of codes (assumed <= N_MAX) */ + NvU32 s, /* number of simple-valued codes (0..s-1) */ + ush *d, /* list of base values for non-simple codes */ + ush *e, /* list of extra bits for non-simple codes */ + struct huft **t, /* result: starting table */ + NvS32 *m /* maximum lookup bits, returns actual */ +) +{ + NvU32 a; /* counter for codes of length k */ + NvU32 c[BMAX+1]; /* bit length count table */ + NvU32 f; /* i repeats in table every f entries */ + NvS32 g; /* maximum code length */ + NvS32 h; /* table level */ + NvU16 i; /* counter, current code */ + NvU32 j; /* counter */ + NvS32 k; /* number of bits in current code */ + NvS32 l; /* bits per table (returned in m) */ + NvU8 *p8; /* pointer into b[] */ + NvU16 *p16; /* pointer into v[] */ + NvU32 *p32; /* pointer into c[] */ + struct huft *q; /* points to current table */ + struct huft r; /* table entry for structure assignment */ + struct huft *u[BMAX]; /* table stack */ + NvU16 v[N_MAX]; /* values in order of bit length */ + NvS32 w; /* bits before this table == (l * h) */ + NvU32 x[BMAX+1]; /* bit offsets, then code stack */ + NvU32 *xp; /* pointer into x */ + NvS32 y; /* number of dummy codes added */ + NvU32 z; /* number of entries in current table */ + + /* Generate counts for each bit length */ + portMemSet((void*)c,0,sizeof(c)); + + p8 = b; i = n; + do { + Tracecv(*p8, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"), + n-i, *p8)); + c[*p8]++; /* assume all entries <= BMAX */ + p8++; /* Can't combine with above line (Solaris bug) */ + } while (--i); + if (c[0] == n) /* null input--all zero length codes */ + { + *t = (struct huft *)NULL; + *m = 0; + return GZ_STATE_HUFT_OK; + } + + + /* Find minimum and maximum length, bound *m by those */ + l = *m; + for (j = 1; j <= BMAX; j++) + if (c[j]) + break; + k = j; /* minimum code length */ + if ((NvU32)l < j) + l = j; + for (i = BMAX; i; i--) + if (c[i]) + break; + g = i; /* maximum code length */ + if ((NvU32)l > i) + l = i; + *m = l; + + + /* Adjust last length count to fill out codes, if needed */ + for (y = 1 << j; j < i; j++, y <<= 1) + if ((y -= c[j]) < 0) + return GZ_STATE_HUFT_ERROR; /* bad input: more codes than bits */ + if ((y -= c[i]) < 0) + return GZ_STATE_HUFT_ERROR; + c[i] += y; + + + /* Generate starting offsets into the value table for each length */ + x[1] = j = 0; + p32 = c + 1; xp = x + 2; + while (--i) { /* note that i == g from above */ + *xp++ = (j += *p32++); + } + + + /* Make a table of values in order of bit lengths */ + p8 = b; i = 0; + do { + if ((j = *p8++) != 0) + v[x[j]++] = i; + } while (++i < n); + + + /* Generate the Huffman codes and for each, make the table entries */ + x[0] = i = 0; /* first Huffman code is zero */ + p16 = v; /* grab values in bit order */ + h = -1; /* no tables yet--level -1 */ + w = -l; /* bits decoded == (l * h) */ + u[0] = (struct huft *)NULL; /* just to keep compilers happy */ + q = (struct huft *)NULL; /* ditto */ + z = 0; /* ditto */ + + /* go through the bit lengths (k already is bits in shortest code) */ + for (; k <= g; k++) + { + a = c[k]; + while (a--) + { + /* here i is the Huffman code of length k bits for value *p */ + /* make tables up to required level */ + while (k > w + l) + { + h++; + w += l; /* previous table always l bits */ + + /* compute minimum size table less than or equal to l bits */ + z = (NvU32)((z = (NvU32)(g - w)) > (NvU32)l ? (NvU32)l : z); /* upper limit on table size */ + if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ + { /* too few codes for k-w bit table */ + f -= a + 1; /* deduct codes from patterns left */ + xp = c + k; + while (++j < z) /* try smaller tables up to z bits */ + { + if ((f <<= 1) <= *++xp) + break; /* enough codes to use up j bits */ + f -= *xp; /* else deduct codes from patterns */ + } + } + z = 1 << j; /* table entries for j-bit table */ + + /* allocate and link in new table */ + + q = portMemAllocNonPaged((z + 1)*sizeof(struct huft)); + if (q == NULL) + { + return GZ_STATE_HUFT_ERROR; + } + + if (q == (struct huft *)NULL) + { + if (h) + huft_free(u[0]); + return GZ_STATE_HUFT_ERROR; /* not enough memory */ + } + hufts += z + 1; /* track memory usage */ + *t = q + 1; /* link to list for huft_free() */ + *(t = &(q->v.t)) = (struct huft *)NULL; + u[h] = ++q; /* table starts after link */ + + /* connect to last table, if there is one */ + if (h) + { + x[h] = i; /* save pattern for backing up */ + r.b = (uch)l; /* bits to dump before this table */ + r.e = (uch)(16 + j); /* bits in this table */ + r.v.t = q; /* pointer to this table */ + j = i >> (w - l); /* (get around Turbo C bug) */ + u[h-1][j] = r; /* connect to last table */ + } + } + + /* set up table entry in r */ + r.b = (uch)(k - w); + if (p16 >= v + n) + r.e = 99; /* out of values--invalid code */ + else if (*p16 < s) + { + r.e = (uch)(*p16 < 256 ? 16 : 15); /* 256 is end-of-block code */ + r.v.n = (ush)(*p16); /* simple code is just the value */ + p16++; /* one compiler does not like *p++ */ + } + else + { + r.e = (uch)e[*p16 - s]; /* non-simple--look up in lists */ + r.v.n = d[*p16++ - s]; + } + + /* fill code-like entries with r */ + f = 1 << (k - w); + for (j = i >> w; j < z; j += f) + q[j] = r; + + /* backwards increment the k-bit code i */ + for (j = 1 << (k - 1); i & j; j >>= 1) + i ^= j; + i ^= j; + + /* backup over finished tables */ + while ((i & ((NvU32)(1 << w) - 1)) != x[h]) + { + h--; /* don't need to update q */ + w -= l; + } + } + } + + + /* Return true (1) if we were given an incomplete table */ + return y != 0 && g != 1; +} + +/* + * Free the malloc'ed tables built by huft_build(), which makes a linked + * list of the tables it made, with the links in a dummy first entry of + * each table. + */ +static NvU32 huft_free +( + struct huft *t /* table to free */ +) +{ + struct huft *p, *q; + + /* Go through linked list, freeing from the malloced (t[-1]) address. */ + p = t; + while (p != (struct huft *)NULL) + { + q = (--p)->v.t; + portMemFree((void*)p); + p = q; + } + return GZ_STATE_HUFT_OK; +} + +static NvU32 inflate_codes_iterator_store(PGZ_INFLATE_STATE pGzState) +{ + NvU32 n = pGzState->codesState.sn; /* number of bytes in block */ + NvU32 w = pGzState->codesState.w; /* current window position */ + NvU32 k = pGzState->codesState.k; /* number of bits in bit buffer */ + ulg b = pGzState->codesState.b; /* bit buffer */ + + /* read and output the compressed data */ + while (n) + { + n--; + NEEDBITS(8) + pGzState->window[w++] = (uch)b; + DUMPBITS(8) + if (w == WSIZE) + { + flush_output(w); + w = 0; + break; + } + } + + /* restore the globals from the locals */ + pGzState->codesState.sn = n; + pGzState->codesState.w = w; + pGzState->codesState.b = b; + pGzState->codesState.k = k; + + if (n != 0) + { + return GZ_STATE_ITERATOR_OK; + } + else + { + return GZ_STATE_ITERATOR_END; + } +} + +/* inflate (decompress) the codes in a deflated (compressed) block. +Return an error code or zero if it all goes ok. */ +static NvU32 inflate_codes_iterator(PGZ_INFLATE_STATE pGzState) +{ + NvU32 e = pGzState->codesState.e; /* table entry flag/number of extra bits */ + NvU32 n = pGzState->codesState.n; /* length and index for copy */ + NvU32 d = pGzState->codesState.d; + NvU32 w = pGzState->codesState.w; /* current window position */ + struct huft *t = pGzState->codesState.t; /* pointer to table entry */ + ulg b = pGzState->codesState.b; /* bit buffer */ + NvU32 k = pGzState->codesState.k; /* number of bits in bit buffer */ + NvU32 ml = mask_bits[pGzState->bl]; /* masks for bl and bd bits */ + NvU32 md = mask_bits[pGzState->bd]; + NvU32 r = 0; + + if (pGzState->codesState.continue_copy == 1) + goto continue_copy; + + for (;;) + { + NEEDBITS((unsigned)pGzState->bl) + if ((e = (t = pGzState->tl + ((unsigned)b & ml))->e) > 16) + { + do { + if (e == 99) + return GZ_STATE_ITERATOR_ERROR; + DUMPBITS(t->b) + e -= 16; + NEEDBITS(e) + } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); + } + DUMPBITS(t->b) + + if (e == 16) /* then it's a literal */ + { + pGzState->window[w++] = (uch)t->v.n; + Tracevv((stderr, "%c", pGzState->window[w-1])); + if (w == WSIZE) + { + pGzState->wp1 = 0; + flush_output(w); + w = 0; + r = GZ_STATE_ITERATOR_OK; + goto exit; + } + } + else /* it's an EOB or a length */ + { + /* exit if end of block */ + if (e == 15) + { + r = GZ_STATE_ITERATOR_END; + goto exit; + } + + /* get length of block to copy */ + NEEDBITS(e) + n = t->v.n + ((unsigned)b & mask_bits[e]); + DUMPBITS(e); + + /* decode distance of block to copy */ + NEEDBITS((unsigned)pGzState->bd) + if ((e = (t = pGzState->td + ((unsigned)b & md))->e) > 16) + { + do { + if (e == 99) + return 1; + DUMPBITS(t->b) + e -= 16; + NEEDBITS(e) + } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); + } + DUMPBITS(t->b) + + NEEDBITS(e) + d = w - t->v.n - ((unsigned)b & mask_bits[e]); + DUMPBITS(e) + + Tracevv((stderr,"\\[%d,%d]", w-d, n)); + + /* do the copy */ + do { + n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e); +#if !defined(NOMEMCPY) && !defined(DEBUG) + if (w - d >= e) /* (this test assumes unsigned comparison) */ + { + memcpy(pGzState->window + w, pGzState->window + d, e); + w += e; + d += e; + } + else /* do it slow to avoid memcpy() overlap */ +#endif /* !NOMEMCPY */ + { + do { + pGzState->window[w++] = pGzState->window[d++]; + Tracevv((stderr, "%c", pGzState->window[w-1])); + } while (--e); + } + if (w == WSIZE) + { + pGzState->wp1 = 0; + flush_output(w); + w = 0; + r = GZ_STATE_ITERATOR_OK; + pGzState->codesState.continue_copy = 1; + goto exit; + } +continue_copy: ; + } while (n); + + pGzState->codesState.continue_copy = 0; + } + } + +exit: + + pGzState->codesState.e = e; /* table entry flag/number of extra bits */ + pGzState->codesState.n = n; + pGzState->codesState.d = d; /* length and index for copy */ + pGzState->codesState.w = w; /* current window position */ + pGzState->codesState.t = t; /* pointer to table entry */ + pGzState->codesState.b = b; /* bit buffer */ + pGzState->codesState.k = k; /* number of bits in bit buffer */ + + /* done */ + return r; +} + +static void huft_destroy(PGZ_INFLATE_STATE pGzState) +{ + /* free the decoding tables, return */ + if (pGzState->tl != NULL) + { + huft_free(pGzState->tl); + pGzState->tl = NULL; + } + + if (pGzState->td != NULL) + { + huft_free(pGzState->td); + pGzState->td = NULL; + } +} + +static NvU32 fixed_huft_build(PGZ_INFLATE_STATE pGzState) +/* decompress an inflated type 1 (fixed Huffman codes) block. We should + either replace this with a custom decoder, or at least precompute the + Huffman tables. */ +{ + NvU32 i; /* temporary variable */ + NvU8 l[N_MAX]; /* length list for huft_build */ + + /* set up literal table */ + for (i = 0; i < 144; i++) + l[i] = 8; + for (; i < 256; i++) + l[i] = 9; + for (; i < 280; i++) + l[i] = 7; + for (; i < N_MAX; i++) /* make a complete, but wrong code set */ + l[i] = 8; + pGzState->bl = 7; + if ((i = huft_build(l, N_MAX, 257, cplens, cplext, &pGzState->tl, &pGzState->bl)) != 0) + return i; + + + /* set up distance table */ + for (i = 0; i < 30; i++) /* make an incomplete code set */ + l[i] = 5; + pGzState->bd = 5; + if ((i = huft_build(l, 30, 0, cpdist, cpdext, &pGzState->td, &pGzState->bd)) > GZ_STATE_HUFT_INCOMP) + { + huft_free(pGzState->tl); + return i; + } + + return GZ_STATE_HUFT_OK; +} + +/* decompress an inflated type 2 (dynamic Huffman codes) block. */ +static NvU32 dynamic_huft_build(PGZ_INFLATE_STATE pGzState) +{ + NvU32 i; /* temporary variables */ + NvU32 j; + NvU32 l; /* last length */ + NvU32 m; /* mask for bit lengths table */ + NvU32 n; /* number of lengths to get */ + NvU32 nb; /* number of bit length codes */ + NvU16 nl; /* number of literal/length codes */ + NvU16 nd; /* number of distance codes */ +#ifdef PKZIP_BUG_WORKAROUND + NvU8 ll[288+32]; /* literal/length and distance code lengths */ +#else + NvU8 ll[286+30]; /* literal/length and distance code lengths */ +#endif + ulg b; /* bit buffer */ + NvU32 k; /* number of bits in bit buffer */ + + + /* make local bit buffer */ + b = pGzState->bb; + k = pGzState->bk; + + + /* read in table lengths */ + NEEDBITS(5) + nl = 257 + ((NvU8)b & 0x1f); /* number of literal/length codes */ + DUMPBITS(5) + NEEDBITS(5) + nd = 1 + ((NvU8)b & 0x1f); /* number of distance codes */ + DUMPBITS(5) + NEEDBITS(4) + nb = 4 + ((NvU8)b & 0xf); /* number of bit length codes */ + DUMPBITS(4) +#ifdef PKZIP_BUG_WORKAROUND + if (nl > 288 || nd > 32) +#else + if (nl > 286 || nd > 30) +#endif + return GZ_STATE_HUFT_INCOMP; /* bad lengths */ + + /* read in bit-length-code lengths */ + for (j = 0; j < nb; j++) + { + NEEDBITS(3) + ll[border[j]] = (NvU8)b & 7; + DUMPBITS(3) + } + for (; j < 19; j++) + ll[border[j]] = 0; + + /* build decoding table for trees--single level, 7 bit lookup */ + pGzState->bl = 7; + if ((i = huft_build(ll, 19, 19, NULL, NULL, &pGzState->tl, &pGzState->bl)) != 0) + { + if (i == GZ_STATE_HUFT_INCOMP) + huft_free(pGzState->tl); + return i; /* incomplete code set */ + } + + /* read in literal and distance code lengths */ + n = nl + nd; + m = mask_bits[pGzState->bl]; + i = l = 0; + while ((NvU32)i < n) + { + NEEDBITS((NvU32)pGzState->bl) + j = (pGzState->td = pGzState->tl + ((NvU32)b & m))->b; + DUMPBITS(j) + j = pGzState->td->v.n; + if (j < 16) /* length of code in bits (0..15) */ + ll[i++] = (NvU8)(l = j); /* save last length in l */ + else if (j == 16) /* repeat last length 3 to 6 times */ + { + NEEDBITS(2) + j = 3 + ((NvU32)b & 3); + DUMPBITS(2) + if ((NvU32)i + j > n) + return GZ_STATE_HUFT_INCOMP; + while (j--) + ll[i++] = (NvU8)l; + } + else if (j == 17) /* 3 to 10 zero length codes */ + { + NEEDBITS(3) + j = 3 + ((NvU32)b & 7); + DUMPBITS(3) + if ((NvU32)i + j > n) + return GZ_STATE_HUFT_INCOMP; + while (j--) + ll[i++] = 0; + l = 0; + } + else /* j == 18: 11 to 138 zero length codes */ + { + NEEDBITS(7) + j = 11 + ((NvU32)b & 0x7f); + DUMPBITS(7) + if ((NvU32)i + j > n) + return GZ_STATE_HUFT_INCOMP; + while (j--) + ll[i++] = 0; + l = 0; + } + } + + /* free decoding table for trees */ + huft_free(pGzState->tl); + + /* restore the global bit buffer */ + pGzState->bb = b; + pGzState->bk = k; + + /* build the decoding tables for literal/length and distance codes */ + pGzState->bl = lbits; + if ((i = huft_build(ll, nl, 257, cplens, cplext, &pGzState->tl, &pGzState->bl)) != 0) + { + if (i == GZ_STATE_HUFT_INCOMP) { + NV_PRINTF(LEVEL_ERROR, "dload, incomplete literal tree\n"); + huft_free(pGzState->tl); + } + return i; /* incomplete code set */ + } + pGzState->bd = dbits; + if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &pGzState->td, &pGzState->bd)) != 0) + { + if (i == GZ_STATE_HUFT_INCOMP) { + NV_PRINTF(LEVEL_ERROR, "dload, incomplete distance tree\n"); +#ifdef PKZIP_BUG_WORKAROUND + i = GZ_STATE_HUFT_OK; + } +#else + huft_free(pGzState->td); + } + huft_free(pGzState->tl); + return i; /* incomplete code set */ +#endif + } + + return GZ_STATE_HUFT_OK; +} + +static +NV_STATUS utilGzInit(const NvU8 *zArray, NvU8* oBuffer, NvU32 numTotalBytes, NvU8* window, PGZ_INFLATE_STATE pGzState) +{ + portMemSet(pGzState, 0, sizeof(GZ_INFLATE_STATE)); + portMemSet(window, 0, sizeof(GZ_SLIDE_WINDOW_SIZE)); + + pGzState->inbuf = (NvU8*)zArray; + pGzState->outbuf = oBuffer; + pGzState->outBufSize = numTotalBytes; + pGzState->window = window; + pGzState->newblock = 1; + pGzState->outLower = 0xFFFFFFFF; + pGzState->outUpper = 0xFFFFFFFF; + + return NV_OK; +} + +/* NVIDIA addition: give pointers to input and known-large-enough output buffers. */ +/* decompress an inflated entry */ +NV_STATUS utilGzAllocate(const NvU8 *zArray, NvU32 numTotalBytes, PGZ_INFLATE_STATE *ppGzState) +{ + PGZ_INFLATE_STATE pGzState = NULL; + NvU8 *window = NULL; + NV_STATUS status = NV_OK; + + pGzState = portMemAllocNonPaged(sizeof(GZ_INFLATE_STATE)); + if (pGzState == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + window = portMemAllocNonPaged(GZ_SLIDE_WINDOW_SIZE); + if (window == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + utilGzInit(zArray, 0, numTotalBytes, window, pGzState); + + *ppGzState = pGzState; + +done: + if (status != NV_OK) + { + portMemFree(pGzState); + portMemFree(window); + } + return status; + +} + +NvU32 utilGzIterator(PGZ_INFLATE_STATE pGzState) +{ + NvU32 t; /* block type */ + NvU32 w; /* current window position */ + NvU32 b; /* bit buffer */ + NvU32 k; /* number of bits in bit buffer */ + NvU32 gzStatus = GZ_STATE_ITERATOR_ERROR; + + // new decompression block, we need to construct huffman tree. + if (pGzState->newblock == 1) + { + /* make local bit buffer */ + b = pGzState->bb; + k = pGzState->bk; + + /* read in last block bit */ + NEEDBITS(1) + pGzState->e = (NvU32)b & 1; + DUMPBITS(1) + + /* read in block type */ + NEEDBITS(2) + t = (NvU32)b & 3; + DUMPBITS(2) + + /* restore the global bit buffer */ + pGzState->bb = b; + pGzState->bk = k; + + /* inflate that block type */ + switch (t) + { + case 2: + { + gzStatus = dynamic_huft_build(pGzState); + break; + } + case 1: + { + gzStatus = fixed_huft_build(pGzState); + break; + } + case 0: + { + NvU32 n; + b = pGzState->bb; + k = pGzState->bk; + w = pGzState->wp; + + n = k & 7; + DUMPBITS(n); + + /* get the length and its complement */ + NEEDBITS(16) + n = ((unsigned int)b & 0xffff); + DUMPBITS(16) + NEEDBITS(16) + if (n != (unsigned int)((~b) & 0xffff)) + { + return GZ_STATE_ITERATOR_ERROR; /* error in compressed data */ + } + DUMPBITS(16) + + pGzState->wp = w; /* restore global window pointer */ + pGzState->bb = b; /* restore global bit buffer */ + pGzState->bk = k; + pGzState->codesState.sn = n; + break; + } + default: + { + return GZ_STATE_ITERATOR_ERROR; + } + } + + if (t != 0 && gzStatus != GZ_STATE_HUFT_OK) + { + return GZ_STATE_ITERATOR_ERROR; + } + + pGzState->newblock = 0; + + /* make local copies of globals */ + pGzState->codesState.b = pGzState->bb; /* initialize bit buffer */ + pGzState->codesState.k = pGzState->bk; + pGzState->codesState.w = pGzState->wp; /* initialize window position */ + } + + // decompress one slide window + if (pGzState->codesState.sn == 0) + { + gzStatus = inflate_codes_iterator(pGzState); + } + else + { + gzStatus = inflate_codes_iterator_store(pGzState); + } + + // decompression ok and current block finished. + if (gzStatus == GZ_STATE_ITERATOR_END) + { + /* restore the globals from the locals */ + pGzState->wp = pGzState->codesState.w; /* restore global window pointer */ + pGzState->bb = pGzState->codesState.b; /* restore global bit buffer */ + pGzState->bk = pGzState->codesState.k; + portMemSet(&pGzState->codesState, 0, sizeof(GZ_INFLATE_CODES_STATE)); + + huft_destroy(pGzState); + pGzState->newblock = 1; + + // current block is the last one, flush remain data in slide window + if (pGzState->e) + { + while (pGzState->bk >= 8) + { + pGzState->bk -= 8; + pGzState->inptr--; + } + + /* flush out slide */ + flush_output(pGzState->wp); + } + + // continue iteration + gzStatus = GZ_STATE_ITERATOR_OK; + } + + return gzStatus; +} + +NV_STATUS utilGzDestroy(PGZ_INFLATE_STATE pGzState) +{ + huft_destroy(pGzState); + portMemFree(pGzState->window); + portMemFree(pGzState); + return NV_OK; +} + +NvU32 utilGzGetData(PGZ_INFLATE_STATE pGzState, NvU32 offset, NvU32 size, NvU8 * outBuffer) +{ + NvU32 sizew = 0, oldOutBufSize; + NvU8 * oldInBuf, *oldOutBuf; + uch * oldWindow; + NV_STATUS status = NV_OK; + + if (pGzState == NULL || outBuffer == NULL || offset >= pGzState->outBufSize) + { + return 0; + } + + pGzState->optSize = 0; + // check requested range [offset, offset + size) with outptr + if (pGzState->outptr != 0) + { + if ( offset >= ((pGzState->outptr + WSIZE - 1) / WSIZE - 1) * WSIZE + pGzState->wp1 ) + { + // check remaining data in previous slide window + pGzState->wp1 = offset - (((pGzState->outptr + WSIZE -1 ) / WSIZE - 1) * WSIZE); + + if (pGzState->wp1 < pGzState->wp2) + { + sizew = pGzState->wp2 - pGzState->wp1; + + // request can be satisfied from window + if (sizew >= size) + { + portMemCopy(outBuffer, size, pGzState->window + pGzState->wp1, size); + pGzState->wp1 += size; + pGzState->optSize += size; + return pGzState->optSize; + } + // copy data from slide window and continue iteration + else + { + portMemCopy(outBuffer, sizew,pGzState->window + pGzState->wp1, sizew); + outBuffer += sizew; + pGzState->optSize += sizew; + } + } + } + else + { + // slide window passed requested range, restart decompression from beginning. + huft_destroy(pGzState); + + oldInBuf = pGzState->inbuf; + oldOutBuf = pGzState->outbuf; + oldOutBufSize = pGzState->outBufSize; + oldWindow = pGzState->window; + + utilGzInit(oldInBuf, oldOutBuf, oldOutBufSize, oldWindow, pGzState); + } + } + + pGzState->outLower = offset + sizew; + pGzState->outUpper = offset + size - 1; + pGzState->outbuf = outBuffer; + pGzState->wp1 = 0; + pGzState->wp2 = 0; + + while (pGzState->outptr < offset + size) + { + if ((status = utilGzIterator(pGzState)) != GZ_STATE_ITERATOR_OK) + break; + } + + if (status == GZ_STATE_ITERATOR_ERROR) + { + return 0; + } + + return pGzState->optSize; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/btree/btree.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/btree/btree.c new file mode 100644 index 0000000..155dee1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/btree/btree.c @@ -0,0 +1,841 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** Balanced Tree *******************************\ +* * +* A generic library to red black tree -- every operation is O(log(n)) * +* check http://en.wikipedia.org/wiki/Red-black_tree or similar www pages * +* * +\***************************************************************************/ + +#include "utils/nvprintf.h" +#include "utils/nvassert.h" +#include "nvport/nvport.h" +#include "containers/btree.h" + +// +// Debugging support. +// +#if PORT_IS_CHECKED_BUILD + +// +// Dump current tree to debug port. +// +static NV_STATUS +_btreeDumpBranch +( + NODE *pNode, + NvU32 level +) +{ + NvU32 i; + if (pNode) + { + _btreeDumpBranch(pNode->left, level+1); + + NV_PRINTF(LEVEL_INFO, "NVRM_BTREE: "); + for (i=0; ikeyStart); + NV_PRINTF(LEVEL_INFO, "keyEnd = 0x%llx\n", pNode->keyEnd); + NV_PRINTF(LEVEL_INFO, "isRed = 0x%d\n", pNode->isRed ? 1 : 0); + NV_PRINTF(LEVEL_INFO, "parent = 0x%p\n", pNode->parent); + NV_PRINTF(LEVEL_INFO, "left = 0x%p\n", pNode->left); + NV_PRINTF(LEVEL_INFO, "right = 0x%p\n", pNode->right); + + _btreeDumpBranch(pNode->right, level+1); + } + return (NV_OK); +} + +static NV_STATUS +_btreeDumpTree +( + NODE *pRoot +) +{ + NV_PRINTF(LEVEL_INFO, "NVRM_BTREE: ======================== Tree Dump ==========================\n\r"); + if (pRoot == NULL) + { + NV_PRINTF(LEVEL_INFO, "NVRM_BTREE: NULL\n\r"); + } + else + { + _btreeDumpBranch(pRoot, 0); + } + NV_PRINTF(LEVEL_INFO, "NVRM_BTREE: =============================================================\n\r"); + return (NV_OK); +} + +// +// Validate node. +// +#define VALIDATE_NODE(pn) \ +{ \ + NV_ASSERT(_btreeNodeValidate(pn) == NV_OK); \ +} + +#define VALIDATE_TREE(pt) \ +{ \ + NV_ASSERT(_btreeTreeValidate(pt) == NV_OK); \ +} + +// +// Validate a nodes branch and count values. +// +static NV_STATUS +_btreeNodeValidate +( + NODE *pNode +) +{ + NV_STATUS status; + + status = NV_OK; + if (pNode == NULL) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR validating NULL NODE.\n\r"); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + return (NV_ERR_INVALID_PARAMETER); + } + if (pNode->left) + { + if (pNode->left->keyEnd >= pNode->keyStart) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR inconsistent left branch, keyStart = 0x%llx\n", pNode->keyStart); + NV_PRINTF(LEVEL_ERROR, " Left keyEnd = 0x%llx\n", pNode->left->keyEnd); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + if (pNode->left->parent != pNode) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR inconsistent left branch, Node = 0x%p\n", pNode); + NV_PRINTF(LEVEL_ERROR, " left->parent = 0x%p\n", pNode->left); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + } + if (pNode->right) + { + if (pNode->right->keyStart <= pNode->keyEnd) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR inconsistent right branch, keyEnd = 0x%llx\n", pNode->keyEnd); + NV_PRINTF(LEVEL_ERROR, " Right keyStart = 0x%llx\n", pNode->right->keyStart); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + if (pNode->right->parent != pNode) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR inconsistent right branch, Node = 0x%p\n", pNode); + NV_PRINTF(LEVEL_ERROR, " right->parent = 0x%p\n", pNode->right); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + } + + // red black tree property: Every red node that is not a leaf has only black children. + if (pNode->isRed) + { + if (pNode->left && pNode->left->isRed) + { + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + if (pNode->right && pNode->right->isRed) + { + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + } + + return (status); +} + + +static NV_STATUS +_btreeBranchValidate +( + NODE *pNode +) +{ + NV_STATUS status; + status = NV_OK; + if (pNode) + { + if (pNode->left) + { + status |= _btreeBranchValidate(pNode->left); + } + status |= _btreeNodeValidate(pNode); + if (pNode->right) + { + status |= _btreeBranchValidate(pNode->right); + } + } + return (status); +} + +static NV_STATUS +_btreeTreeValidate +( + NODE *pRoot +) +{ + NV_STATUS status; + + status = NV_OK; + if (pRoot) + { + NV_ASSERT(!pRoot->isRed); + status = _btreeNodeValidate(pRoot); + if (pRoot->left) + { + status |= _btreeBranchValidate(pRoot->left); + } + if (pRoot->right) + { + status |= _btreeBranchValidate(pRoot->right); + } + } + if (status) + { + _btreeDumpTree(pRoot); + } + return (status); +} + +#else +// +// Validate nothing. +// +#define VALIDATE_NODE(pn) +#define VALIDATE_TREE(pt) +#endif // PORT_IS_CHECKED_BUILD + +// rbt helper function +static void _rotateLeft(NODE **pRoot, NODE *x) +{ + // rotate node x to left + NODE *y = x->right; + + NV_ASSERT (x); + NV_ASSERT (y); + + // establish x->right link + x->right = y->left; + if (y->left) + { + y->left->parent = x; + } + + // establish y->parent link + y->parent = x->parent; + if (x->parent) + { + if (x == x->parent->left) + { + x->parent->left = y; + } + else + { + x->parent->right = y; + } + } + else + { + *pRoot = y; + } + + // link x and y + y->left = x; + x->parent = y; + VALIDATE_NODE(x); +} + +// rbt helper function +static void _rotateRight(NODE **pRoot, NODE *x) +{ + // rotate node x to right + NODE *y = x->left; + + NV_ASSERT (x); + NV_ASSERT (y); + + // establish x->left link + x->left = y->right; + if (y->right) + { + y->right->parent = x; + } + + // establish y->parent link + y->parent = x->parent; + if (x->parent) + { + if (x == x->parent->right) + { + x->parent->right = y; + } + else + { + x->parent->left = y; + } + } + else + { + *pRoot = y; + } + + // link x and y + y->right = x; + x->parent = y; + VALIDATE_NODE(x); +} + +// rbt helper function: +// - maintain red-black tree balance after inserting node x +static void _insertFixup(NODE **pRoot, NODE *x) +{ + // check red-black properties + while((x!=*pRoot) && x->parent->isRed) + { + // we have a violation + if (x->parent == x->parent->parent->left) + { + NODE *y = x->parent->parent->right; + if (y && y->isRed) + { + // uncle is RED + x->parent->isRed = NV_FALSE; + y->isRed = NV_FALSE; + x->parent->parent->isRed = NV_TRUE; + x = x->parent->parent; + } + else + { + // uncle is BLACK + if (x == x->parent->right) + { + // make x a left child + x = x->parent; + _rotateLeft(pRoot, x); + } + + // recolor and rotate + x->parent->isRed = NV_FALSE; + x->parent->parent->isRed = NV_TRUE; + _rotateRight(pRoot, x->parent->parent); + } + } + else + { + // mirror image of above code + NODE *y = x->parent->parent->left; + if (y && y->isRed) + { + // uncle is RED + x->parent->isRed = NV_FALSE; + y->isRed = NV_FALSE; + x->parent->parent->isRed = NV_TRUE; + x = x->parent->parent; + } + else + { + // uncle is BLACK + if (x == x->parent->left) + { + x = x->parent; + _rotateRight(pRoot, x); + } + x->parent->isRed = NV_FALSE; + x->parent->parent->isRed = NV_TRUE; + _rotateLeft(pRoot, x->parent->parent); + } + } + } + (*pRoot)->isRed = NV_FALSE; +} + +// insert a new node (no duplicates allowed) +NV_STATUS +btreeInsert +( + PNODE newNode, + PNODE *pRoot +) +{ + NODE *current; + NODE *parent; + + if (newNode == NULL || pRoot == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + // find future parent + current = *pRoot; + parent = NULL; + + if (newNode->keyEnd < newNode->keyStart) + { + return NV_ERR_INVALID_ARGUMENT; + } + + while (current) + { + parent = current; + if (newNode->keyEnd < current->keyStart) + { + current = current->left; + } + else if (newNode->keyStart > current->keyEnd) + { + current = current->right; + } + else + { + return NV_ERR_INSERT_DUPLICATE_NAME; + } + } + + // the caller allocated the node already, just fix the links + newNode->parent = parent; + newNode->left = NULL; + newNode->right = NULL; + newNode->isRed = NV_TRUE; + + // insert node in tree + if(parent) + { + if (newNode->keyEnd < parent->keyStart) + { + parent->left = newNode; + } + else + { + parent->right = newNode; + } + } + else + { + *pRoot = newNode; + } + + _insertFixup(pRoot, newNode); + VALIDATE_NODE(newNode); + + return NV_OK; +} + +// rbt helper function +// - maintain red-black tree balance after deleting node x +// - this is a bit ugly because we use NULL as a sentinel +static void _deleteFixup(NODE **pRoot, NODE *parentOfX, NODE *x) +{ + while ((x != *pRoot) && (!x || !x->isRed)) + { + NV_ASSERT (!(x == NULL && parentOfX == NULL)); + // NULL nodes are sentinel nodes. If we delete a sentinel node (x==NULL) it + // must have a parent node (or be the root). Hence, parentOfX == NULL with + // x==NULL is never possible (tree invariant) + + if ((parentOfX != NULL) && (x == parentOfX->left)) + { + NODE *w = parentOfX->right; + if (w && w->isRed) + { + w->isRed = NV_FALSE; + parentOfX->isRed = NV_TRUE; + _rotateLeft(pRoot, parentOfX); + w = parentOfX->right; + } + if (!w || (((!w->left || !w->left->isRed) && (!w->right || !w->right->isRed)))) + { + if (w) + { + w->isRed = NV_TRUE; + } + x = parentOfX; + } + else + { + if (!w->right || !w->right->isRed) + { + w->left->isRed = NV_FALSE; + w->isRed = NV_TRUE; + _rotateRight(pRoot, w); + w = parentOfX->right; + } + w->isRed = parentOfX->isRed; + parentOfX->isRed = NV_FALSE; + w->right->isRed = NV_FALSE; + _rotateLeft(pRoot, parentOfX); + x = *pRoot; + } + } + else if (parentOfX != NULL) + { + NODE *w = parentOfX->left; + if (w && w->isRed) + { + w->isRed = NV_FALSE; + parentOfX->isRed = NV_TRUE; + _rotateRight(pRoot, parentOfX); + w = parentOfX->left; + } + if (!w || ((!w->right || !w->right->isRed) && (!w->left || !w->left->isRed))) + { + if (w) + { + w->isRed = NV_TRUE; + } + x = parentOfX; + } + else + { + if (!w->left || !w->left->isRed) + { + w->right->isRed = NV_FALSE; + w->isRed = NV_TRUE; + _rotateLeft(pRoot, w); + w = parentOfX->left; + } + w->isRed = parentOfX->isRed; + parentOfX->isRed = NV_FALSE; + w->left->isRed = NV_FALSE; + _rotateRight(pRoot, parentOfX); + x = *pRoot; + } + } + else if (x == NULL) + { + // This should never happen. + break; + } + parentOfX = x->parent; + } + if (x) + { + x->isRed = NV_FALSE; + } +} + +// +// Unlink node from tree +// +NV_STATUS +btreeUnlink +( + PNODE pNode, + PNODE *pRoot +) +{ + NODE *x; + NODE *y; + NODE *z; + NODE *parentOfX; + NvU32 yWasBlack; + + NV_ASSERT_CHECKED(btreeSearch(pNode->keyStart, &z, *pRoot) == NV_OK); + NV_ASSERT_CHECKED(z == pNode); + + if (pNode == NULL || pRoot == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + z = pNode; + + // unlink + if (!z->left || !z->right) + { + // y has a SENTINEL node as a child + y = z; + } + else + { + // find tree successor + y = z->right; + while (y->left) + { + y = y->left; + } + } + + // x is y's only child + if (y->left) + { + x = y->left; + } + else + { + x = y->right; + } + + // remove y from the parent chain + parentOfX = y->parent; + if (x) + { + x->parent = parentOfX; + } + if (y->parent) + { + if (y == y->parent->left) + { + y->parent->left = x; + } + else + { + y->parent->right = x; + } + } + else + { + *pRoot = x; + } + + yWasBlack = !y->isRed; + if (y != z) + { + // we need to replace z with y so the memory for z can be freed + y->parent = z->parent; + if (z->parent) + { + if (z == z->parent->left) + { + z->parent->left = y; + } + else + { + z->parent->right = y; + } + } + else + { + *pRoot = y; + } + + y->isRed = z->isRed; + + y->left = z->left; + if (z->left) + { + z->left->parent = y; + } + y->right = z->right; + if (z->right) + { + z->right->parent = y; + } + + if (parentOfX == z) + { + parentOfX = y; + } + } + + if (yWasBlack) + { + _deleteFixup(pRoot, parentOfX, x); + if (parentOfX) + { + VALIDATE_NODE(parentOfX); + } + } + + return NV_OK; +} + +// +// Search for node in tree. +// +NV_STATUS +btreeSearch +( + NvU64 keyOffset, + PNODE *pNode, + PNODE root +) +{ + // uninitialized ? + NODE *current = root; + while(current) + { + VALIDATE_NODE(current); + if (keyOffset < current->keyStart) + { + current = current->left; + } + else if (keyOffset > current->keyEnd) + { + current = current->right; + } + else + { + *pNode = current; + return NV_OK; + } + } + *pNode = NULL; + return NV_ERR_OBJECT_NOT_FOUND; +} + +// +// Enumerate tree (starting at the node with specified value) +// +NV_STATUS +btreeEnumStart +( + NvU64 keyOffset, + PNODE *pNode, + PNODE root +) +{ + *pNode = NULL; + + // initialized ? + if (root) + { + NODE *current = root; + VALIDATE_TREE(root); + while(current) + { + if (keyOffset < current->keyStart) + { + *pNode = current; + current = current->left; + } + else if (keyOffset > current->keyEnd) + { + current = current->right; + } + else + { + *pNode = current; + break; + + } + } + if (*pNode) + { + VALIDATE_NODE(*pNode); + } + return NV_OK; + } + return NV_OK; +} + +NV_STATUS +btreeEnumNext +( + PNODE *pNode, + PNODE root +) +{ + // no nodes ? + NODE *current = NULL; + VALIDATE_NODE(*pNode); + VALIDATE_NODE(root); + if (root && *pNode) + { + // if we don't have a right subtree return the parent + current = *pNode; + + // pick the leftmost node of the right subtree ? + if (current->right) + { + current = current->right; + for(;current->left;) + { + current = current->left; + } + } + else + { + // go up until we find the right inorder node + for(current = current->parent; current; current = current->parent) + { + if (current->keyStart > (*pNode)->keyEnd) + { + break; + } + } + } + } + *pNode = current; + if (*pNode) + { + VALIDATE_NODE(*pNode); + } + return NV_OK; +} + + + +// +// Frees all the "Data" fields stored in Nodes. +// If each Node is embedded in the structure pointed by its "Data" field, then +// this function destroys the whole btree +// +NV_STATUS +btreeDestroyData +( + PNODE pNode +) +{ + if (pNode == NULL) + return NV_OK; + + btreeDestroyData(pNode->left); + btreeDestroyData(pNode->right); + portMemFree (pNode->Data); + + return NV_OK; +} + + + +// +// Frees all the nodes and data stored in them. +// Don't use if the nodes were allocated within other structs +// (e.g. if the Node is embedded within the struct pointed by its "Data" field) +// +NV_STATUS +btreeDestroyNodes +( + PNODE pNode +) +{ + if (pNode == NULL) + return NV_OK; + + btreeDestroyNodes(pNode->left); + btreeDestroyNodes(pNode->right); + portMemFree (pNode); + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/eheap/eheap_old.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/eheap/eheap_old.c new file mode 100644 index 0000000..ec07078 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/eheap/eheap_old.c @@ -0,0 +1,1418 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if defined(NVRM) +# include "os/os.h" +#else +# include "shrdebug.h" +# include "nvos.h" +#endif +#include "containers/eheap_old.h" + +#if !defined(SRT_BUILD) +#include "os/os.h" +#endif + +static void initPublicObjectFunctionPointers_EHeap(POBJEHEAP pHeap); +static NV_STATUS eheapInit(POBJEHEAP, NvU64, NvU64, NvU32, NvU32); +static NV_STATUS eheapDestruct(POBJEHEAP); +static NV_STATUS eheapAlloc(POBJEHEAP, NvU32, NvU32 *, NvU64 *, NvU64 *,NvU64, NvU64, PEMEMBLOCK*, void*, EHeapOwnershipComparator*); +static NV_STATUS eheapFree(POBJEHEAP, NvU64); +static void eheapInfo(POBJEHEAP, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvU64 *); +static void eheapInfoForRange(POBJEHEAP, NV_RANGE, NvU64 *, NvU64 *, NvU32 *, NvU64 *); +static NV_STATUS eheapGetSize(POBJEHEAP, NvU64 *); +static NV_STATUS eheapGetFree(POBJEHEAP, NvU64 *); +static NV_STATUS eheapGetBase(POBJEHEAP, NvU64 *); +static PEMEMBLOCK eheapGetBlock(POBJEHEAP, NvU64, NvBool); +static NV_STATUS eheapSetAllocRange(POBJEHEAP, NvU64, NvU64); +static NV_STATUS eheapTraverse(POBJEHEAP, void *, EHeapTraversalFn, NvS32); +static NV_STATUS _eheapBlockFree(POBJEHEAP pHeap, PEMEMBLOCK block); +static NvU32 eheapGetNumBlocks(POBJEHEAP); +static NV_STATUS eheapGetBlockInfo(POBJEHEAP, NvU32, NVOS32_HEAP_DUMP_BLOCK *); +static NV_STATUS eheapSetOwnerIsolation(POBJEHEAP, NvBool, NvU32); +static NvBool _eheapCheckOwnership(POBJEHEAP, void*, NvU64, NvU64, PEMEMBLOCK, EHeapOwnershipComparator*); + +void +constructObjEHeap(POBJEHEAP pHeap, NvU64 Base, NvU64 LimitPlusOne, NvU32 sizeofMemBlock, NvU32 numPreAllocMemStruct) +{ + initPublicObjectFunctionPointers_EHeap(pHeap); + + eheapInit(pHeap, Base, LimitPlusOne, sizeofMemBlock, numPreAllocMemStruct); +} + +static void +initPublicObjectFunctionPointers_EHeap(POBJEHEAP pHeap) +{ + pHeap->eheapDestruct = eheapDestruct; + pHeap->eheapAlloc = eheapAlloc; + pHeap->eheapFree = eheapFree; + pHeap->eheapInfo = eheapInfo; + pHeap->eheapInfoForRange = eheapInfoForRange; + pHeap->eheapGetSize = eheapGetSize; + pHeap->eheapGetFree = eheapGetFree; + pHeap->eheapGetBase = eheapGetBase; + pHeap->eheapGetBlock = eheapGetBlock; + pHeap->eheapSetAllocRange = eheapSetAllocRange; + pHeap->eheapTraverse = eheapTraverse; + pHeap->eheapGetNumBlocks = eheapGetNumBlocks; + pHeap->eheapGetBlockInfo = eheapGetBlockInfo; + pHeap->eheapSetOwnerIsolation = eheapSetOwnerIsolation; +} + +static NV_STATUS +_eheapAllocMemStruct +( + POBJEHEAP pHeap, + PEMEMBLOCK* ppMemBlock +) +{ + if (pHeap->numPreAllocMemStruct > 0) + { + // We are out of pre-allocated mem data structs + if (NULL == pHeap->pFreeMemStructList) + { + NV_ASSERT(0); + return NV_ERR_OPERATING_SYSTEM; + } + + *ppMemBlock = pHeap->pFreeMemStructList; + pHeap->pFreeMemStructList = pHeap->pFreeMemStructList->next; + } + else + { + *ppMemBlock = portMemAllocNonPaged(pHeap->sizeofMemBlock); + + if (*ppMemBlock == NULL) + { + NV_ASSERT(0); + return NV_ERR_OPERATING_SYSTEM; + } + portMemSet(*ppMemBlock, 0, pHeap->sizeofMemBlock); + } + + return NV_OK; +} + +static NV_STATUS +_eheapFreeMemStruct +( + POBJEHEAP pHeap, + PEMEMBLOCK* ppMemBlock +) +{ + if (pHeap->numPreAllocMemStruct > 0) + { + portMemSet(*ppMemBlock, 0, pHeap->sizeofMemBlock); + + (*ppMemBlock)->next = pHeap->pFreeMemStructList; + pHeap->pFreeMemStructList = *ppMemBlock; + + *ppMemBlock = NULL; + } + else + { + portMemFree(*ppMemBlock); + *ppMemBlock = NULL; + } + + return NV_OK; +} + +// +// Create a heap. Even though we can return error here the resultant +// object must be self consistent (zero pointers, etc) if there were +// alloc failures, etc. +// +static NV_STATUS +eheapInit +( + POBJEHEAP pHeap, + NvU64 Base, + NvU64 LimitPlusOne, + NvU32 sizeofData, + NvU32 numPreAllocMemStruct +) +{ + PEMEMBLOCK block; + NvU32 i; + + // + // Simply create a free heap. + // + pHeap->base = Base; + pHeap->total = LimitPlusOne - Base; + pHeap->rangeLo = pHeap->base; + pHeap->rangeHi = pHeap->base + pHeap->total - 1; + pHeap->free = pHeap->total; + pHeap->sizeofMemBlock = sizeofData + sizeof(EMEMBLOCK); + + pHeap->numPreAllocMemStruct = 0; + pHeap->pPreAllocAddr = NULL; + pHeap->pBlockList = NULL; + pHeap->pFreeBlockList = NULL; + pHeap->pFreeMemStructList = NULL; + pHeap->numBlocks = 0; + pHeap->pBlockTree = NULL; + pHeap->bOwnerIsolation = NV_FALSE; + pHeap->ownerGranularity = 0; + + // + // User requested a static eheap that has a list of pre-allocated + // EMEMBLOCK data structure. + // + if (numPreAllocMemStruct > 0) + { + ++numPreAllocMemStruct; // reserve one for us - see below + + pHeap->pPreAllocAddr = portMemAllocNonPaged(pHeap->sizeofMemBlock * numPreAllocMemStruct); + + if (pHeap->pPreAllocAddr) + { + pHeap->numPreAllocMemStruct = numPreAllocMemStruct; + pHeap->pFreeMemStructList = pHeap->pPreAllocAddr; + + portMemSet(pHeap->pFreeMemStructList, 0, pHeap->sizeofMemBlock * numPreAllocMemStruct); + + // + // Form the list of free mem structures. Just need to utilize the next field of EMEMBLOCK. + // + for (i = 0; i < numPreAllocMemStruct - 1; i++) + { + ((PEMEMBLOCK)((NvU8 *)pHeap->pFreeMemStructList + (i * pHeap->sizeofMemBlock)))->next + = (PEMEMBLOCK)((NvU8 *)pHeap->pFreeMemStructList + (i + 1) * pHeap->sizeofMemBlock); + } + } + } + + if (_eheapAllocMemStruct(pHeap, &block) != NV_OK) + { + return NV_ERR_OPERATING_SYSTEM; + } + + block->owner = NVOS32_BLOCK_TYPE_FREE; + block->refCount = 0; + block->begin = Base; + block->align = Base; + block->end = LimitPlusOne - 1; + block->prevFree = block; + block->nextFree = block; + block->next = block; + block->prev = block; + block->pData = (void*)(block+1); + + // + // Fill in the heap bank info. + // + pHeap->pBlockList = block; + pHeap->pFreeBlockList = block; + pHeap->numBlocks = 1; + + portMemSet((void *)&block->node, 0, sizeof(NODE)); + block->node.keyStart = block->begin; + block->node.keyEnd = block->end; + block->node.Data = (void *)block; + if (btreeInsert(&block->node, &pHeap->pBlockTree) != NV_OK) + { + eheapDestruct(pHeap); + return NV_ERR_OPERATING_SYSTEM; + } + + return NV_OK; +} + +static NV_STATUS +eheapDestruct +( + POBJEHEAP pHeap +) +{ + PEMEMBLOCK block, blockFirst, blockNext; + NvBool headptr_updated; + + if (!pHeap->pBlockList) + return NV_OK; + + // + // Free all allocated blocks + // + do { + block = blockFirst = pHeap->pBlockList; + headptr_updated = NV_FALSE; + + do { + blockNext = block->next; + + _eheapBlockFree(pHeap, block); + + // restart scanning the list, if the heap->pBlockList changed + if (blockFirst != pHeap->pBlockList) { + headptr_updated = NV_TRUE; + break; + } + + block = blockNext; + + } while (block != pHeap->pBlockList); + + } while (headptr_updated); + + if (pHeap->numPreAllocMemStruct > 0) + { + // free static blocks + portMemFree(pHeap->pPreAllocAddr); + pHeap->pPreAllocAddr = NULL; + } + else + { + portMemFree(pHeap->pBlockList); + pHeap->pBlockList = NULL; + } + + return NV_OK; +} + +// 'flags' using NVOS32_ALLOC_FLAGS_* though some are n/a +static NV_STATUS +eheapAlloc +( + POBJEHEAP pHeap, + NvU32 owner, + NvU32 *flags, + NvU64 *offset, + NvU64 *size, + NvU64 offsetAlign, + NvU64 sizeAlign, + PEMEMBLOCK * ppMemBlock, // not generally useful over e.g. a split! + void *pIsolationID, + EHeapOwnershipComparator *checker +) +{ + NvU64 allocLo, allocAl, allocHi; + PEMEMBLOCK blockFirstFree, blockFree; + PEMEMBLOCK blockNew = NULL, blockSplit = NULL; + NvU64 desiredOffset; + NvU64 allocSize; + NvU64 rangeLo, rangeHi; + + if ((*flags & NVOS32_ALLOC_FLAGS_FORCE_INTERNAL_INDEX) && + (*flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Save the offset for fixed address requests, or it's likely uninitialized. + desiredOffset = (*flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) ? *offset: 0; + + // + // zero result so that apps that ignore return code have another + // chance to see the error of their ways... + // + *offset = 0; + + // + // Check for valid size. + // + if (*size == 0) + return NV_ERR_INVALID_ARGUMENT; + + // + // Range-limited the request. + // + rangeLo = pHeap->rangeLo; + rangeHi = pHeap->rangeHi; + + if (rangeLo == 0 && rangeHi == 0) { + rangeLo = pHeap->base; + rangeHi = pHeap->base + pHeap->total - 1; + } + if (rangeHi > pHeap->base + pHeap->total - 1) { + rangeHi = pHeap->base + pHeap->total - 1; + } + if (rangeLo > rangeHi) + return NV_ERR_INVALID_ARGUMENT; + + // Align size up. + allocSize = ((*size + (sizeAlign - 1)) / sizeAlign) * sizeAlign; + + // + // Trivial reject size vs. free. + // + if (pHeap->free < allocSize) + return NV_ERR_NO_MEMORY; + + /* This flag will force an exclusive allocation of the request + * within the range of ownerGranularity + */ + + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_INTERNAL_INDEX ) + { + NvU64 desiredOffsetLo, desiredOffsetHi; + + NV_ASSERT_OR_RETURN(pHeap->ownerGranularity, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pHeap->bOwnerIsolation && checker, NV_ERR_INVALID_ARGUMENT); + + blockFree = pHeap->pFreeBlockList; + + if (blockFree == NULL) + goto failed; + + do + { + desiredOffset = NV_ALIGN_DOWN(blockFree->begin, pHeap->ownerGranularity) + offsetAlign; + + while (desiredOffset + allocSize - 1 <= blockFree->end) + { + desiredOffsetLo = NV_ALIGN_DOWN(desiredOffset, pHeap->ownerGranularity); + desiredOffsetHi = (((desiredOffset % pHeap->ownerGranularity) == 0) ? + NV_ALIGN_UP((desiredOffset + 1), pHeap->ownerGranularity) : + NV_ALIGN_UP(desiredOffset, pHeap->ownerGranularity)); + + if ((desiredOffset >= blockFree->begin) && + ((desiredOffsetLo >= blockFree->begin) && + (desiredOffsetHi <= blockFree->end))) + { + if (_eheapCheckOwnership(pHeap, pIsolationID, desiredOffset, + desiredOffset + allocSize - 1, blockFree, checker)) + { + allocLo = desiredOffset; + allocHi = desiredOffset + allocSize - 1; + allocAl = allocLo; + goto got_one; + } + } + + desiredOffset += pHeap->ownerGranularity; + } + + blockFree = blockFree->nextFree; + + } while (blockFree != pHeap->pFreeBlockList); + + /* return error if can't get that particular address */ + goto failed; + } + + // Ensure a valid allocation type was passed in + //if (type > NVOS32_NUM_MEM_TYPES - 1) + //return NV_ERR_INVALID_ARGUMENT; + + // + // Check for fixed address request. + // This allows caller to say: I really want this memory at a particular + // offset. Returns error if can't get that offset. + // + if ( *flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE ) + { + // is our desired offset suitably aligned? + if (desiredOffset % offsetAlign) + goto failed; + + blockFree = pHeap->pFreeBlockList; + + if (blockFree == NULL) + { + goto failed; + } + + do + { + // + // Allocate from the bottom of the memory block. + // + blockFree = blockFree->nextFree; + + // Does this block contain our desired range? + if ( (desiredOffset >= blockFree->begin) && + (desiredOffset + allocSize - 1) <= blockFree->end ) + { + // + // Make sure no allocated block between ALIGN_DOWN(allocLo, granularity) + // and ALIGN_UP(allocHi, granularity) have a different owner than the current allocation + // + if (pHeap->bOwnerIsolation) + { + NV_ASSERT(NULL != checker); + if (!_eheapCheckOwnership(pHeap, pIsolationID, desiredOffset, + desiredOffset + allocSize - 1, blockFree, checker)) + { + break; + } + } + + // we have a match, now remove it from the pool + allocLo = desiredOffset; + allocHi = desiredOffset + allocSize - 1; + allocAl = allocLo; + goto got_one; + } + + } while (blockFree != pHeap->pFreeBlockList); + + // return error if can't get that particular address + goto failed; + } + + blockFirstFree = pHeap->pFreeBlockList; + if (!blockFirstFree) + goto failed; + + // + // When scanning upwards, start at the bottom - 1 so the following loop looks symmetric. + // + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN ) { + blockFirstFree = blockFirstFree->prevFree; + } + blockFree = blockFirstFree; + do + { + NvU64 blockLo; + NvU64 blockHi; + + // + // Is this block completely out of range? + // + if ( ( blockFree->end < rangeLo ) || ( blockFree->begin > rangeHi ) ) + { + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN ) + blockFree = blockFree->prevFree; + else + blockFree = blockFree->nextFree; + continue; + } + + // + // Find the intersection of the free block and the specified range. + // + blockLo = (rangeLo > blockFree->begin) ? rangeLo : blockFree->begin; + blockHi = (rangeHi < blockFree->end) ? rangeHi : blockFree->end; + + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN ) + { + // + // Allocate from the top of the memory block. + // + allocLo = (blockHi - allocSize + 1) / offsetAlign * offsetAlign; + allocAl = allocLo; + allocHi = allocAl + allocSize - 1; + } + else + { + // + // Allocate from the bottom of the memory block. + // + allocAl = (blockLo + (offsetAlign - 1)) / offsetAlign * offsetAlign; + allocLo = allocAl; + allocHi = allocAl + allocSize - 1; + } + + // + // Make sure no allocated block between ALIGN_DOWN(allocLo, granularity) + // and ALIGN_UP(allocHi, granularity) have a different owner than the current allocation + // + if (pHeap->bOwnerIsolation) + { + NV_ASSERT(NULL != checker); + + if (_eheapCheckOwnership(pHeap, pIsolationID, allocLo, allocHi, blockFree, checker)) + { + goto alloc_done; + } + + // + // Try realloc if we still have enough free memory in current free block + // + if (*flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN) + { + NvU64 checkLo = NV_ALIGN_DOWN(allocLo, pHeap->ownerGranularity); + + if (checkLo > blockFree->begin) + { + blockHi = checkLo; + + allocLo = (blockHi - allocSize + 1) / offsetAlign * offsetAlign; + allocAl = allocLo; + allocHi = allocAl + allocSize - 1; + + if (_eheapCheckOwnership(pHeap, pIsolationID, allocLo, allocHi, blockFree, checker)) + { + goto alloc_done; + } + } + } + else + { + NvU64 checkHi = NV_ALIGN_UP(allocHi, pHeap->ownerGranularity); + + if (checkHi < blockFree->end) + { + blockLo = checkHi; + + allocAl = (blockLo + (offsetAlign - 1)) / offsetAlign * offsetAlign; + allocLo = allocAl; + allocHi = allocAl + allocSize - 1; + + if (_eheapCheckOwnership(pHeap, pIsolationID, allocLo, allocHi, blockFree, checker)) + { + goto alloc_done; + } + } + } + + // + // Cannot find any available memory in current free block, go to the next + // + goto next_free; + } + +alloc_done: + // + // Does the desired range fall completely within this block? + // Also make sure it does not wrap-around. + // Also make sure it is within the desired range. + // + if ((allocLo >= blockFree->begin) && (allocHi <= blockFree->end)) + { + if (allocLo <= allocHi) + if ((allocLo >= rangeLo) && (allocHi <= rangeHi)) + goto got_one; + + } + +next_free: + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN ) + blockFree = blockFree->prevFree; + else + blockFree = blockFree->nextFree; + + } while (blockFree != blockFirstFree); + + // + // Out of memory. + // + goto failed; + + // + // We have a match. Now link it in, trimming or splitting + // any slop from the enclosing block as needed. + // + + got_one: + + if ((allocLo == blockFree->begin) && (allocHi == blockFree->end)) + { + // + // Wow, exact match so replace free block. + // Remove from free list. + // + blockFree->nextFree->prevFree = blockFree->prevFree; + blockFree->prevFree->nextFree = blockFree->nextFree; + if (pHeap->pFreeBlockList == blockFree) + { + // + // This could be the last free block. + // + if (blockFree->nextFree == blockFree) + pHeap->pFreeBlockList = NULL; + else + pHeap->pFreeBlockList = blockFree->nextFree; + } + + // + // Set owner/type values here. Don't move because some fields are unions. + // + blockFree->owner = owner; + blockFree->refCount = 1; + blockFree->align = allocAl; + + // tail end code below assumes 'blockFree' is the new block + blockNew = blockFree; + } + else if ((allocLo >= blockFree->begin) && (allocHi <= blockFree->end)) + { + // + // Found a fit. + // It isn't exact, so we'll have to do a split + // + if (_eheapAllocMemStruct(pHeap, &blockNew) != NV_OK) + { + goto failed; + } + + blockNew->owner = owner; + blockNew->refCount = 1; + blockNew->begin = allocLo; + blockNew->align = allocAl; + blockNew->end = allocHi; + + if ((blockFree->begin < blockNew->begin) && (blockFree->end > blockNew->end)) + { + // + // Split free block in two. + // + if (_eheapAllocMemStruct(pHeap, &blockSplit) != NV_OK) + { + goto failed; + } + + // + // Remove free block from rb-tree since node's range will be + // changed. + // + if (btreeUnlink(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + blockSplit->owner = NVOS32_BLOCK_TYPE_FREE; + blockSplit->refCount = 0; + blockSplit->begin = blockNew->end + 1; + blockSplit->align = blockSplit->begin; + blockSplit->end = blockFree->end; + blockSplit->pData = (void*)(blockNew+1); + blockFree->end = blockNew->begin - 1; + // + // Insert free split block into free list. + // + blockSplit->nextFree = blockFree->nextFree; + blockSplit->prevFree = blockFree; + blockSplit->nextFree->prevFree = blockSplit; + blockFree->nextFree = blockSplit; + // + // Insert new and split blocks into block list. + // + blockNew->next = blockSplit; + blockNew->prev = blockFree; + blockSplit->next = blockFree->next; + blockSplit->prev = blockNew; + blockFree->next = blockNew; + blockSplit->next->prev = blockSplit; + + // update numBlocks count + pHeap->numBlocks++; + + // re-insert updated free block into rb-tree + blockFree->node.keyEnd = blockFree->end; + if (btreeInsert(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // insert new and split blocks into rb-tree + portMemSet((void *)&blockNew->node, 0, sizeof(NODE)); + portMemSet((void *)&blockSplit->node, 0, sizeof(NODE)); + blockNew->node.keyStart = blockNew->begin; + blockNew->node.keyEnd = blockNew->end; + blockNew->node.Data = (void *)blockNew; + blockSplit->node.keyStart = blockSplit->begin; + blockSplit->node.keyEnd = blockSplit->end; + blockSplit->node.Data = (void *)blockSplit; + if (btreeInsert(&blockNew->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + if (btreeInsert(&blockSplit->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + } + else if (blockFree->end == blockNew->end) + { + // + // Remove free block from rb-tree since node's range will be + // changed. + // + if (btreeUnlink(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // + // New block inserted after free block. + // + blockFree->end = blockNew->begin - 1; + blockNew->next = blockFree->next; + blockNew->prev = blockFree; + blockFree->next->prev = blockNew; + blockFree->next = blockNew; + + // re-insert updated free block into rb-tree + blockFree->node.keyEnd = blockFree->end; + if (btreeInsert(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // insert new block into rb-tree + portMemSet((void *)&blockNew->node, 0, sizeof(NODE)); + blockNew->node.keyStart = blockNew->begin; + blockNew->node.keyEnd = blockNew->end; + blockNew->node.Data = (void *)blockNew; + if (btreeInsert(&blockNew->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + } + else if (blockFree->begin == blockNew->begin) + { + // + // Remove free block from rb-tree since node's range will be + // changed. + // + if (btreeUnlink(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // + // New block inserted before free block. + // + blockFree->begin = blockNew->end + 1; + blockFree->align = blockFree->begin; + blockNew->next = blockFree; + blockNew->prev = blockFree->prev; + blockFree->prev->next = blockNew; + blockFree->prev = blockNew; + if (pHeap->pBlockList == blockFree) + pHeap->pBlockList = blockNew; + + // re-insert updated free block into rb-tree + blockFree->node.keyStart = blockFree->begin; + if (btreeInsert(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // insert new block into rb-tree + portMemSet((void *)&blockNew->node, 0, sizeof(NODE)); + blockNew->node.keyStart = blockNew->begin; + blockNew->node.keyEnd = blockNew->end; + blockNew->node.Data = (void *)blockNew; + if (btreeInsert(&blockNew->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + } + else + { + failed: + if (blockNew) _eheapFreeMemStruct(pHeap, &blockNew); + if (blockSplit) _eheapFreeMemStruct(pHeap, &blockSplit); + return NV_ERR_NO_MEMORY; + } + + pHeap->numBlocks++; + } + + NV_ASSERT(blockNew != NULL); // assert is for Coverity + pHeap->free -= blockNew->end - blockNew->begin + 1; // Reduce free amount by allocated block size. + + // Initialize a pointer to the outer wrapper's specific control structure, tacked to the end of the EMEMBLOCK + blockNew->pData = (void*)(blockNew+1); + + // Return values + *size = allocSize; + *offset = blockNew->align; + if ( ppMemBlock) *ppMemBlock = blockNew; + + return NV_OK; +} + +static NV_STATUS +_eheapBlockFree +( + POBJEHEAP pHeap, + PEMEMBLOCK block +) +{ + PEMEMBLOCK blockTmp; + + // + // Check for valid owner. + // + if (block->owner == NVOS32_BLOCK_TYPE_FREE) return NV_ERR_INVALID_ARGUMENT; + + // + // Check refCount. + // + if (--block->refCount != 0) + return NV_OK; + + // + // Update free count. + // + pHeap->free += block->end - block->begin + 1; + + // + // + // Can this merge with any surrounding free blocks? + // + if ((block->prev->owner == NVOS32_BLOCK_TYPE_FREE) && (block != pHeap->pBlockList)) + { + // + // Remove block to be freed and previous one since nodes will be + // combined into single one. + // + if (btreeUnlink(&block->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + if (btreeUnlink(&block->prev->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + + // + // Merge with previous block. + // + block->prev->next = block->next; + block->next->prev = block->prev; + block->prev->end = block->end; + blockTmp = block; + block = block->prev; + pHeap->numBlocks--; + _eheapFreeMemStruct(pHeap, &blockTmp); + + // re-insert updated free block into rb-tree + block->node.keyEnd = block->end; + if (btreeInsert(&block->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + } + if ((block->next->owner == NVOS32_BLOCK_TYPE_FREE) && (block->next != pHeap->pBlockList)) + { + // + // Remove block to be freed and next one since nodes will be + // combined into single one. + // + if (btreeUnlink(&block->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + if (btreeUnlink(&block->next->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + + // + // Merge with next block. + // + block->prev->next = block->next; + block->next->prev = block->prev; + block->next->begin = block->begin; + if (pHeap->pBlockList == block) + pHeap->pBlockList = block->next; + if (block->owner == NVOS32_BLOCK_TYPE_FREE) + { + if (pHeap->pFreeBlockList == block) + pHeap->pFreeBlockList = block->nextFree; + block->nextFree->prevFree = block->prevFree; + block->prevFree->nextFree = block->nextFree; + } + blockTmp = block; + block = block->next; + pHeap->numBlocks--; + _eheapFreeMemStruct(pHeap, &blockTmp); + + // re-insert updated free block into rb-tree + block->node.keyStart = block->begin; + if (btreeInsert(&block->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + } + if (block->owner != NVOS32_BLOCK_TYPE_FREE) + { + // + // Nothing was merged. Add to free list. + // + blockTmp = pHeap->pFreeBlockList; + if (!blockTmp) + { + pHeap->pFreeBlockList = block; + block->nextFree = block; + block->prevFree = block; + } + else + { + if (blockTmp->begin > block->begin) + // + // Insert into beginning of free list. + // + pHeap->pFreeBlockList = block; + else if (blockTmp->prevFree->begin > block->begin) + // + // Insert into free list. + // + do + { + blockTmp = blockTmp->nextFree; + } while (blockTmp->begin < block->begin); + /* + else + * Insert at end of list. + */ + block->nextFree = blockTmp; + block->prevFree = blockTmp->prevFree; + block->prevFree->nextFree = block; + blockTmp->prevFree = block; + } + } + block->owner = NVOS32_BLOCK_TYPE_FREE; + //block->mhandle = 0x0; + block->align = block->begin; + + portMemSet((block+1), 0, pHeap->sizeofMemBlock - sizeof(EMEMBLOCK)); + + return NV_OK; +} + +static NV_STATUS +eheapFree +( + POBJEHEAP pHeap, + NvU64 offset +) +{ + PEMEMBLOCK block; + + block = (PEMEMBLOCK) eheapGetBlock(pHeap, offset, 0); + if (!block) + return NV_ERR_INVALID_OFFSET; + + return _eheapBlockFree(pHeap, block); +} + +static PEMEMBLOCK +eheapGetBlock +( + POBJEHEAP pHeap, + NvU64 offset, + NvBool bReturnFreeBlock +) +{ + PEMEMBLOCK block; + PNODE pNode; + + if (btreeSearch(offset, &pNode, pHeap->pBlockTree) != NV_OK) + { + return NULL; + } + + block = (PEMEMBLOCK)pNode->Data; + if ((block->owner == NVOS32_BLOCK_TYPE_FREE ) && !bReturnFreeBlock) + { + return NULL; + } + + return block; +} + +static NV_STATUS +eheapGetSize +( + POBJEHEAP pHeap, + NvU64 *size +) +{ + *size = pHeap->total; + return NV_OK; +} + +static NV_STATUS +eheapGetFree +( + POBJEHEAP pHeap, + NvU64 *free +) +{ + *free = pHeap->free; + return NV_OK; +} + +static NV_STATUS +eheapGetBase +( + POBJEHEAP pHeap, + NvU64 *base +) +{ + *base = pHeap->base; + return NV_OK; +} + +static void +eheapInfo +( + POBJEHEAP pHeap, + NvU64 *pBytesFree, // in all of the space managed + NvU64 *pBytesTotal, // in all of the space managed + NvU64 *pLargestFreeOffset, // constrained to pHeap->rangeLo, pHeap->rangeHi + NvU64 *pLargestFreeSize, // constrained to pHeap->rangeLo, pHeap->rangeHi + NvU32 *pNumFreeBlocks, + NvU64 *pUsableBytesFree // constrained to pHeap->rangeLo, pHeap->rangeHi +) +{ + NV_RANGE range = rangeMake(pHeap->rangeLo, pHeap->rangeHi); + + if (pBytesFree) + { + *pBytesFree = pHeap->free; + } + if (pBytesTotal) + { + *pBytesTotal = pHeap->total; + } + eheapInfoForRange(pHeap, range, pLargestFreeOffset, pLargestFreeSize, pNumFreeBlocks, pUsableBytesFree); +} + +static void +eheapInfoForRange +( + POBJEHEAP pHeap, + NV_RANGE range, + NvU64 *pLargestFreeOffset, // constrained to rangeLo, rangeHi + NvU64 *pLargestFreeSize, // constrained to rangeLo, rangeHi + NvU32 *pNumFreeBlocks, + NvU64 *pUsableBytesFree // constrained to rangeLo, rangeHi +) +{ + PEMEMBLOCK blockFirstFree, blockFree; + NvU64 freeBlockSize = 0; + NvU64 largestFreeOffset = 0; + NvU64 largestFreeSize = 0; + NvU32 numFreeBlocks = 0; + + if (pUsableBytesFree) + *pUsableBytesFree = 0; + + blockFirstFree = pHeap->pFreeBlockList; + if (blockFirstFree) + { + NV_ASSERT( range.lo <= range.hi ); + + blockFree = blockFirstFree; + do { + NvU64 clampedBlockBegin = (blockFree->begin >= range.lo) ? + blockFree->begin : range.lo; + NvU64 clampedBlockEnd = (blockFree->end <= range.hi) ? + blockFree->end : range.hi; + if (clampedBlockBegin <= clampedBlockEnd) + { + numFreeBlocks++; + freeBlockSize = clampedBlockEnd - clampedBlockBegin + 1; + + if (pUsableBytesFree) + *pUsableBytesFree += freeBlockSize; + + if ( freeBlockSize > largestFreeSize ) + { + largestFreeOffset = clampedBlockBegin; + largestFreeSize = freeBlockSize; + } + } + blockFree = blockFree->nextFree; + } while (blockFree != blockFirstFree); + } + + if (pLargestFreeOffset) + { + *pLargestFreeOffset = largestFreeOffset; + } + if (pLargestFreeSize) + { + *pLargestFreeSize = largestFreeSize; + } + if (pNumFreeBlocks) + { + *pNumFreeBlocks = numFreeBlocks; + } +} + +static NV_STATUS +eheapSetAllocRange +( + POBJEHEAP pHeap, + NvU64 rangeLo, + NvU64 rangeHi +) +{ + + if ( rangeLo < pHeap->base ) + rangeLo = pHeap->base; + + if ( rangeHi > (pHeap->base + pHeap->total - 1) ) + rangeHi = (pHeap->base + pHeap->total - 1); + + if ( rangeHi < rangeLo ) + return NV_ERR_INVALID_ARGUMENT; + + pHeap->rangeLo = rangeLo; + pHeap->rangeHi = rangeHi; + + return NV_OK; +} + +static NV_STATUS +eheapTraverse +( + POBJEHEAP pHeap, + void *pEnv, + EHeapTraversalFn traversalFn, + NvS32 direction +) +{ + NvU32 cont = 1, backAtFirstBlock = 0; + PEMEMBLOCK pBlock, pBlockNext; + NV_STATUS rc; + NvU64 cursorOffset; // for dealing with cursor invalidates. + NvU64 firstBlockBegin, firstBlockEnd; // we'll never call the traversal fn twice on the same (sub)extent. + + pBlock = (direction > 0) ? pHeap->pBlockList : pHeap->pBlockList->prev; + NV_ASSERT(pBlock); + + // + // Cursor invalidates mean we can't compare with 'pHeap->pBlockList'. + // Instead we'll compare with the extent. If we intersect it at all in + // a later block then we'll consider that as having returned to the first block. + // + firstBlockBegin = pBlock->begin; + firstBlockEnd = pBlock->end; + + do + { + NvU32 invalCursor = 0; + + if ( direction > 0 ) + { + pBlockNext = pBlock->next; + cursorOffset = pBlockNext->begin; + } + else + { + pBlockNext = pBlock->prev; + cursorOffset = pBlockNext->end; + } + + rc = traversalFn(pHeap, pEnv, pBlock, &cont, &invalCursor); + + if ( invalCursor ) + { + // A block was added at or freed. So far only freeing the current block. + pBlock = eheapGetBlock(pHeap, cursorOffset, 1 /*return even if it is a free block*/); + + // Advance to the next block if the cursor block was merged. + if ((direction > 0) && (pBlock->begin < cursorOffset)) + { + pBlock = pBlock->next; + } + else if ((direction <= 0) && (pBlock->end > cursorOffset)) + { + pBlock = pBlock->prev; + } + } + else + { + // No change to the list, use the fast way to find the next block. + pBlock = pBlockNext; + + } + + NV_ASSERT(pBlock); // 1. list is circular, 2. cursorOffset should always be found unless the list is badly malformed. + + // + // Back to first block? Defined as being at a block for which the + // intersection with the original first block is non-null. + // + if ( ((firstBlockBegin >= pBlock->begin ) && (firstBlockBegin <= pBlock->end)) || + ((firstBlockEnd <= pBlock->end ) && (firstBlockEnd >= pBlock->begin)) ) + { + backAtFirstBlock = 1; + } + + } while (cont && !backAtFirstBlock); + + return rc; +} + +/*! + * @brief returns number of blocks in eHeap. + * + * @param[in] pHeap: pointer to eHeap struct to get data from + * + * @returns the number of blocks (free or allocated) currently in the heap + */ +static NvU32 +eheapGetNumBlocks +( + POBJEHEAP pHeap +) +{ + return pHeap->numBlocks; +} + +/*! + * @brief Copies over block information for each block + * in the heap into the provided buffer. + * + * @param[in] pHeap: pointer to eHeap struct to get data from + * @param[in] numBlocks: number of blocks passed in block buffer + * @param[out] pBlockBuffer: pointer to buffer where info will be copied to + * + * @return 'NV_OK' Operation completed successfully + * 'NV_ERR_INVALID_ARGUMENT' size of buffer passed in is + * incorrect + * 'NV_ERR_INVALID_STATE' if the blocklist doesn't match the + * heapSize + */ +static NV_STATUS +eheapGetBlockInfo +( + POBJEHEAP pHeap, + NvU32 numBlocks, + NVOS32_HEAP_DUMP_BLOCK *pBlockBuffer +) +{ + PEMEMBLOCK pBlock; + NvU32 heapSize, i; + NV_STATUS rmStatus = NV_OK; + + // ensure buffer is the same numBlocks + heapSize = eheapGetNumBlocks(pHeap); + NV_ASSERT_OR_RETURN(heapSize == numBlocks, NV_ERR_INVALID_ARGUMENT); + + pBlock = pHeap->pBlockList; + for (i = 0; i < heapSize; i++) + { + pBlockBuffer->begin = pBlock->begin; + pBlockBuffer->align = pBlock->align; + pBlockBuffer->end = pBlock->end; + pBlockBuffer->owner = pBlock->owner; + pBlockBuffer->format = 0; // EMEMBLOCK does not have format, ignore for now + pBlock = pBlock->next; + if (pBlock == NULL) + { + return NV_ERR_INVALID_STATE; + } + pBlockBuffer++; + } + + return rmStatus; +} + +/** + * @brief Set up block owner isolation + * + * Owner isolation means that no two block owners can own allocations which live within a specified range. + * + * @param[in] pHeap pointer to EHEAP object + * @param[in] bEnable NV_TRUE to enable the allocation isolation + * @param[in] granularity allocation granularity + * + * @return NV_OK on success + */ +NV_STATUS +eheapSetOwnerIsolation +( + POBJEHEAP pHeap, + NvBool bEnable, + NvU32 granularity +) +{ + // This can only be set before any allocations have occurred. + if (pHeap->free != pHeap->total) + { + return NV_ERR_INVALID_STATE; + } + // Saying no 2 block owners can share the same block doesn't make sense. + if (bEnable && granularity < 2) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (bEnable && (granularity & (granularity-1))) + { + return NV_ERR_INVALID_ARGUMENT; + } + pHeap->bOwnerIsolation = bEnable; + pHeap->ownerGranularity = granularity; + + return NV_OK; +} + +/** + * @brief Check heap block ownership + * + * @param[in] pHeap Pointer to EHEAP object + * @param[in] pIsolationID Unique isolation ID constructed by the caller + * @param[in] allocLo Allocated range low + * @param[in] allocHi Allocated range high + * @param[in] blockFree Free block list + * @param[in] pChecker Caller defined ownership ID comparator + * + * @return NV_TRUE if success + */ +static NvBool +_eheapCheckOwnership +( + POBJEHEAP pHeap, + void *pIsolationID, + NvU64 allocLo, + NvU64 allocHi, + PEMEMBLOCK blockFree, + EHeapOwnershipComparator *pComparator +) +{ + EMEMBLOCK *pTmpBlock; + NvU64 checkLo = NV_ALIGN_DOWN(allocLo, pHeap->ownerGranularity); + NvU64 checkHi = (((allocHi % pHeap->ownerGranularity) == 0) ? + NV_ALIGN_UP((allocHi + 1), pHeap->ownerGranularity) : + NV_ALIGN_UP(allocHi, pHeap->ownerGranularity)); + NvU64 check; + + checkLo = (checkLo <= pHeap->base) ? pHeap->base : checkLo; + checkHi = (checkHi >= pHeap->base + pHeap->total - 1) ? (pHeap->base + pHeap->total - 1) : checkHi; + + NV_ASSERT(NULL != blockFree); + + if (blockFree->begin > checkLo || blockFree->end < checkHi) + { + for (check = checkLo; check < checkHi; /* in-loop */) + { + pTmpBlock = pHeap->eheapGetBlock(pHeap, check, NV_TRUE); + NV_ASSERT(pTmpBlock); + + if (pTmpBlock->owner != NVOS32_BLOCK_TYPE_FREE) + { + if (!pComparator(pIsolationID, pTmpBlock->pData)) + { + return NV_FALSE; + } + } + + check = pTmpBlock->end + 1; + } + } + + return NV_TRUE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/list.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/list.c new file mode 100644 index 0000000..ad38ba4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/list.c @@ -0,0 +1,409 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/list.h" +#include "utils/nvassert.h" + +CONT_VTABLE_DEFN(ListBase, listIterRange_IMPL, NULL); + +#if PORT_IS_CHECKED_BUILD +static NvBool _listIterRangeCheck(ListBase *pList, ListNode *pFirst, + ListNode *pLast); +#endif +static void _listInsertBase(ListBase *pList, void *pNext, void *pValue); + +void listInit_IMPL(NonIntrusiveList *pList, PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + NV_ASSERT_OR_RETURN_VOID(NULL != pAllocator); + + portMemSet(&(pList->base), 0, sizeof(pList->base)); + CONT_VTABLE_INIT(ListBase, &pList->base); + pList->pAllocator = pAllocator; + pList->valueSize = valueSize; + pList->base.nodeOffset = (NvS32)(0 - sizeof(ListNode)); +} + +void listInitIntrusive_IMPL(IntrusiveList *pList, NvS32 nodeOffset) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + portMemSet(&(pList->base), 0, sizeof(pList->base)); + CONT_VTABLE_INIT(ListBase, &pList->base); + pList->base.nodeOffset = nodeOffset; +} + +static void +_listDestroy(ListBase *pList, PORT_MEM_ALLOCATOR *pAllocator) +{ + ListNode *pNode; + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + + pNode = pList->pHead; + + pList->pHead = NULL; + pList->pTail = NULL; + pList->count = 0; + NV_CHECKED_ONLY(pList->versionNumber++); + + while (pNode != NULL) + { + ListNode *pTemp = pNode; + pNode = pNode->pNext; + pTemp->pPrev = NULL; + pTemp->pNext = NULL; + NV_CHECKED_ONLY(pTemp->pList = NULL); + if (NULL != pAllocator) + { + PORT_FREE(pAllocator, pTemp); + } + } +} + +void listDestroy_IMPL(NonIntrusiveList *pList) +{ + _listDestroy(&pList->base, pList->pAllocator); +} + +void listDestroyIntrusive_IMPL(ListBase *pList) +{ + _listDestroy(pList, NULL); +} + +NvU32 listCount_IMPL(ListBase *pList) +{ + NV_ASSERT_OR_RETURN(pList, 0); + return pList->count; +} + +void *listInsertNew_IMPL(NonIntrusiveList *pList, void *pNext) +{ + void *pNode = NULL; + void *pValue; + + NV_ASSERT_OR_RETURN(NULL != pList, NULL); + + pNode = PORT_ALLOC(pList->pAllocator, sizeof(ListNode) + pList->valueSize); + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + + portMemSet(pNode, 0, sizeof(ListNode) + pList->valueSize); + pValue = listNodeToValue(&pList->base, pNode); + _listInsertBase(&(pList->base), pNext, pValue); + + return pValue; +} + +void *listAppendNew_IMPL(NonIntrusiveList *pList) +{ + return listInsertNew_IMPL(pList, NULL); +} + +void *listPrependNew_IMPL(NonIntrusiveList *pList) +{ + return listInsertNew_IMPL(pList, listHead_IMPL(&(pList->base))); +} + +void *listInsertValue_IMPL(NonIntrusiveList *pList, void *pNext, void *pValue) +{ + void *pCurrent; + + NV_ASSERT_OR_RETURN(NULL != pValue, NULL); + + pCurrent = listInsertNew_IMPL(pList, pNext); + if (NULL == pCurrent) + return NULL; + + return portMemCopy(pCurrent, pList->valueSize, pValue, pList->valueSize); +} + +void *listAppendValue_IMPL(NonIntrusiveList *pList, void *pValue) +{ + return listInsertValue_IMPL(pList, NULL, pValue); +} + +void *listPrependValue_IMPL(NonIntrusiveList *pList, void *pValue) +{ + return listInsertValue_IMPL(pList, listHead_IMPL(&(pList->base)), pValue); +} + +void listInsertExisting_IMPL(IntrusiveList *pList, void *pNext, void *pValue) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + NV_ASSERT_OR_RETURN_VOID(NULL != pValue); + _listInsertBase(&(pList->base), pNext, pValue); +} + +void listAppendExisting_IMPL(IntrusiveList *pList, void *pValue) +{ + listInsertExisting_IMPL(pList, NULL, pValue); +} + +void listPrependExisting_IMPL(IntrusiveList *pList, void *pValue) +{ + listInsertExisting_IMPL(pList, listHead_IMPL(&(pList->base)), pValue); +} + +// for nonintrusive version +void listRemove_IMPL(NonIntrusiveList *pList, void *pValue) +{ + if (pValue == NULL) + return; + listRemoveIntrusive_IMPL(&(pList->base), pValue); + PORT_FREE(pList->pAllocator, listValueToNode(&pList->base, pValue)); +} + +// intrusive version +void listRemoveIntrusive_IMPL +( + ListBase *pList, + void *pValue +) +{ + ListNode *pNode; + + if (pValue == NULL) + return; + + pNode = listValueToNode(pList, pValue); + NV_ASSERT_OR_RETURN_VOID(NULL != pNode); + NV_ASSERT_CHECKED(pNode->pList == pList); + + if (pNode->pPrev != NULL) + pNode->pPrev->pNext = pNode->pNext; + else + pList->pHead = pNode->pNext; + + if (pNode->pNext != NULL) + pNode->pNext->pPrev = pNode->pPrev; + else + pList->pTail = pNode->pPrev; + + pNode->pNext = NULL; + pNode->pPrev = NULL; + + pList->count--; + NV_CHECKED_ONLY(pList->versionNumber++); + NV_CHECKED_ONLY(pNode->pList = NULL); +} + +// pvalue here means the value +void listRemoveFirstByValue_IMPL +( + NonIntrusiveList *pList, + void *pValue +) +{ + void *pValueFound = listFindByValue_IMPL(pList, pValue); + if (pValueFound) + { + listRemove_IMPL(pList, pValueFound); + } +} + +void listRemoveAllByValue_IMPL +( + NonIntrusiveList *pList, + void *pValue +) +{ + void *pValueFound; + ListNode *pNode; + + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + NV_ASSERT_OR_RETURN_VOID(NULL != pValue); + + pNode = pList->base.pHead; + while (pNode != NULL) + { + pValueFound = listNodeToValue(&pList->base, pNode); + pNode = pNode->pNext; + + if (portMemCmp(pValueFound, pValue, pList->valueSize) == 0) + { + listRemove_IMPL(pList, pValueFound); + pValueFound = NULL; + } + } +} + +void *listFindByValue_IMPL +( + NonIntrusiveList *pList, + void *pValue +) +{ + void *pResult; + ListNode *pNode; + + NV_ASSERT_OR_RETURN(NULL != pList, NULL); + NV_ASSERT_OR_RETURN(NULL != pValue, NULL); + + pNode = pList->base.pHead; + while (pNode != NULL) + { + pResult = listNodeToValue(&pList->base, pNode); + + if (portMemCmp(pResult, pValue, pList->valueSize) == 0) + return pResult; + + pNode = pNode->pNext; + } + + return NULL; +} + +void *listHead_IMPL +( + ListBase *pList +) +{ + NV_ASSERT_OR_RETURN(NULL != pList, NULL); + return listNodeToValue(pList, pList->pHead); +} + +void *listTail_IMPL +( + ListBase *pList +) +{ + NV_ASSERT_OR_RETURN(NULL != pList, NULL); + return listNodeToValue(pList, pList->pTail); +} + +void *listNext_IMPL +( + ListBase *pList, + void *pValue +) +{ + ListNode *pNode = listValueToNode(pList, pValue); + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + NV_ASSERT_CHECKED(pNode->pList == pList); + return listNodeToValue(pList, pNode->pNext); +} + +void *listPrev_IMPL +( + ListBase *pList, + void *pValue +) +{ + ListNode *pNode = listValueToNode(pList, pValue); + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + NV_ASSERT_CHECKED(pNode->pList == pList); + return listNodeToValue(pList, pNode->pPrev); +} + +ListIterBase listIterRange_IMPL +( + ListBase *pList, + void *pFirst, + void *pLast +) +{ + ListIterBase it; + + NV_ASSERT(NULL != pList); + + NV_CHECKED_ONLY(it.versionNumber = pList->versionNumber); + it.pList = pList; + it.pNode = listValueToNode(pList, pFirst); + it.pLast = listValueToNode(pList, pLast); + it.pValue = NULL; + + NV_ASSERT_CHECKED(it.pNode == NULL || it.pNode->pList == pList); + NV_ASSERT_CHECKED(it.pLast == NULL || it.pLast->pList == pList); + NV_ASSERT_CHECKED(_listIterRangeCheck(pList, it.pNode, it.pLast)); + + return it; +} + +NvBool listIterNext_IMPL(ListIterBase *pIt) +{ + NV_ASSERT_OR_RETURN(NULL != pIt, NV_FALSE); + + NV_ASSERT_CHECKED(pIt->versionNumber == pIt->pList->versionNumber); + + if (!pIt->pNode) + return NV_FALSE; + + pIt->pValue = listNodeToValue(pIt->pList, pIt->pNode); + + if (pIt->pNode == pIt->pLast) + pIt->pNode = NULL; + else + pIt->pNode = pIt->pNode->pNext; + + return NV_TRUE; +} + +#if PORT_IS_CHECKED_BUILD +// @todo: optimize for best average complexity +// assumption: nodes ownership checked in the caller function +// allow same node +static NvBool _listIterRangeCheck +( + ListBase *pList, + ListNode *pFirst, + ListNode *pLast +) +{ + ListNode *pNode; + + for (pNode = pFirst; pNode != NULL; pNode = pNode->pNext) + { + if (pNode == pLast) + return NV_TRUE; + } + + // Check for both NULL (empty range) case. + return pNode == pLast; +} +#endif + +static void _listInsertBase +( + ListBase *pList, + void *pNextValue, + void *pValue +) +{ + ListNode *pNext = listValueToNode(pList, pNextValue); + ListNode *pNode = listValueToNode(pList, pValue); + + pNode->pPrev = pNext ? pNext->pPrev : pList->pTail; + pNode->pNext = pNext; + + if (pNode->pPrev) + pNode->pPrev->pNext = pNode; + else + pList->pHead = pNode; + + if (pNode->pNext) + pNode->pNext->pPrev = pNode; + else + pList->pTail = pNode; + + pList->count++; + NV_CHECKED_ONLY(pList->versionNumber++); + NV_CHECKED_ONLY(pNode->pList = pList); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/map.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/map.c new file mode 100644 index 0000000..ede5889 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/map.c @@ -0,0 +1,898 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/map.h" + +CONT_VTABLE_DEFN(MapBase, mapIterRange_IMPL, NULL); + +static void _mapRotateLeft(MapNode **pPRoot, MapNode *x); +static void _mapRotateRight(MapNode **pPRoot, MapNode *x); +static void _mapInsertFixup(MapNode **pRoot, MapNode *x); +static void _mapDeleteFixup(MapNode **pRoot, MapNode *parentOfX, MapNode *x); + +/** + * @brief Replace the old node with the new one. + * @details Does nothing if old node is NULL. Does not + * update oldnode links + */ +static void _mapPutNodeInPosition(MapBase *pMap, MapNode *pTargetPosition, + MapNode *pNewNode); + +/** + * @brief Take on target node's children connections. + * @details Does nothing is any of the input is NULL. + * Does not update oldnode links + */ +static void _mapAdoptChildrenNodes(MapNode *pTargetNode, MapNode *pNewNode); + +/** + * @brief Basic insertion procedure + * @details Shared by three versions of map insertion functions + */ +static NvBool _mapInsertBase(MapBase *pMap, NvU64 key, void *pValue); + +void mapInit_IMPL +( + NonIntrusiveMap *pMap, + PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pMap); + NV_ASSERT_OR_RETURN_VOID(NULL != pAllocator); + portMemSet(&(pMap->base), 0, sizeof(pMap->base)); + CONT_VTABLE_INIT(MapBase, &pMap->base); + pMap->pAllocator = pAllocator; + pMap->valueSize = valueSize; + pMap->base.nodeOffset = (NvS32)(0 - sizeof(MapNode)); +} + +void mapInitIntrusive_IMPL +( + IntrusiveMap *pMap, + NvS32 nodeOffset +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pMap); + portMemSet(&(pMap->base), 0, sizeof(pMap->base)); + CONT_VTABLE_INIT(MapBase, &pMap->base); + pMap->base.nodeOffset = nodeOffset; +} + +static void _mapDestroy(MapBase *pMap, PORT_MEM_ALLOCATOR *pAllocator) +{ + MapNode *pNode; + + NV_ASSERT_OR_RETURN_VOID(NULL != pMap); + + pNode = pMap->pRoot; + while (NULL != pNode) + { + while (NULL != pNode->pLeft) + pNode = pNode->pLeft; + + while (NULL != pNode->pRight) + pNode = pNode->pRight; + + if ((NULL == pNode->pLeft) && (NULL == pNode->pRight)) + { + MapNode *pTemp = pNode->pParent; + + // update parent node + if (NULL != pTemp) + { + if (pTemp->pLeft == pNode) + pTemp->pLeft = NULL; + else + pTemp->pRight = NULL; + } + + // free the node + pNode->pParent = NULL; + NV_CHECKED_ONLY(pNode->pMap = NULL); + if (NULL != pAllocator) + { + PORT_FREE(pAllocator, pNode); + } + + pNode = pTemp; + } + } + + pMap->pRoot = NULL; + pMap->count = 0; + NV_CHECKED_ONLY(pMap->versionNumber++); +} + +void mapDestroy_IMPL +( + NonIntrusiveMap *pMap +) +{ + _mapDestroy(&pMap->base, pMap->pAllocator); +} + +void mapDestroyIntrusive_IMPL +( + MapBase *pMap +) +{ + _mapDestroy(pMap, NULL); +} + +NvU32 mapCount_IMPL +( + MapBase *pMap +) +{ + NV_ASSERT_OR_RETURN(pMap, 0); + return pMap->count; +} + +NvU64 mapKey_IMPL +( + MapBase *pMap, + void *pValue +) +{ + MapNode *pNode = mapValueToNode(pMap, pValue); + NV_ASSERT_OR_RETURN(NULL != pNode, 0); + NV_ASSERT_CHECKED(pNode->pMap == pMap); + return pNode->key; +} + +void *mapInsertNew_IMPL +( + NonIntrusiveMap *pMap, + NvU64 key +) +{ + void *pNode = NULL; + void *pValue; + + NV_ASSERT_OR_RETURN(NULL != pMap, NULL); + + pNode = PORT_ALLOC(pMap->pAllocator, sizeof(MapNode) + pMap->valueSize); + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + + portMemSet(pNode, 0, sizeof(MapNode) + pMap->valueSize); + pValue = mapNodeToValue(&pMap->base, pNode); + + // check key duplication + if (!_mapInsertBase(&(pMap->base), key, pValue)) + { + PORT_FREE(pMap->pAllocator, pNode); + return NULL; + } + + return pValue; +} + +void *mapInsertValue_IMPL +( + NonIntrusiveMap *pMap, + NvU64 key, + void *pValue +) +{ + void *pCurrent; + + NV_ASSERT_OR_RETURN(NULL != pValue, NULL); + + pCurrent = mapInsertNew_IMPL(pMap, key); + if (NULL == pCurrent) + return NULL; + + return portMemCopy(pCurrent, pMap->valueSize, pValue, + pMap->valueSize); +} + +NvBool mapInsertExisting_IMPL +( + IntrusiveMap *pMap, + NvU64 key, + void *pValue +) +{ + NV_ASSERT_OR_RETURN(NULL != pMap, NV_FALSE); + NV_ASSERT_OR_RETURN(NULL != pValue, NV_FALSE); + return _mapInsertBase(&(pMap->base), key, pValue); +} + +void mapRemove_IMPL +( + NonIntrusiveMap *pMap, + void *pValue +) +{ + if (pValue == NULL) + return; + mapRemoveIntrusive_IMPL(&(pMap->base), pValue); + PORT_FREE(pMap->pAllocator, mapValueToNode(&pMap->base, pValue)); +} + +void mapRemoveIntrusive_IMPL +( + MapBase *pMap, + void *pValue +) +{ + MapNode *x; // child node of y, might be NULL + MapNode *y; // successor for z + MapNode *z; // node to remove + MapNode *parentOfX; + NvU32 yWasBlack; + + // do nothing is pValue is NULL + if (pValue == NULL) + return; + + // 1. find y, the successor for z + z = mapValueToNode(pMap, pValue); + NV_ASSERT_OR_RETURN_VOID(NULL != z); + NV_ASSERT_CHECKED(z->pMap == pMap); + + if (z->pLeft == NULL || z->pRight == NULL) + { + // z has at least one empty successor, y = z + y = z; + } + + else + { + // y is z's least greater node + y = z->pRight; + + while (y->pLeft != NULL) + y = y->pLeft; + } + + // 2. find x, y's children + if (y->pLeft != NULL) + x = y->pLeft; + else + x = y->pRight; + + // 3. put x into y's position + _mapPutNodeInPosition(pMap, y, x); + // 4. put y into z's position if not the same + parentOfX = y->pParent; + yWasBlack = !y->bIsRed; + + if (y != z) + { + _mapPutNodeInPosition(pMap, z, y); + _mapAdoptChildrenNodes(z, y); + y->bIsRed = z->bIsRed; + + if (parentOfX == z) + parentOfX = y; + } + + // 5. fixup, to rebalance the tree + if (yWasBlack) + _mapDeleteFixup(&(pMap->pRoot), parentOfX, x); + + // 6. update the count + NV_CHECKED_ONLY(pMap->versionNumber++); + NV_CHECKED_ONLY(z->pMap = NULL); + pMap->count--; + return; +} + +void mapRemoveByKey_IMPL +( + NonIntrusiveMap *pMap, + NvU64 key +) +{ + mapRemove_IMPL(pMap, mapFind_IMPL(&(pMap->base), key)); +} + +void mapRemoveByKeyIntrusive_IMPL +( + MapBase *pMap, + NvU64 key +) +{ + mapRemoveIntrusive_IMPL(pMap, mapFind_IMPL(pMap, key)); +} + +void *mapFind_IMPL +( + MapBase *pMap, + NvU64 key +) +{ + MapNode *pCurrent; + NV_ASSERT_OR_RETURN(NULL != pMap, NULL); + pCurrent = pMap->pRoot; + + while (pCurrent != NULL) + { + if (key < pCurrent->key) + pCurrent = pCurrent->pLeft; + else if (key > pCurrent->key) + pCurrent = pCurrent->pRight; + else + return mapNodeToValue(pMap, pCurrent); + } + + return NULL; +} + +void *mapFindGEQ_IMPL +( + MapBase *pMap, + NvU64 keyMin +) +{ + MapNode *pCurrent; + MapNode *pResult; + NV_ASSERT_OR_RETURN(NULL != pMap, NULL); + pCurrent = pMap->pRoot; + pResult = NULL; + + while (pCurrent != NULL) + { + if (pCurrent->key > keyMin) + { + pResult = pCurrent; + pCurrent = pCurrent->pLeft; + } + + else if (pCurrent->key == keyMin) + return mapNodeToValue(pMap, pCurrent); + else + pCurrent = pCurrent->pRight; + } + + if (pResult == NULL) + return NULL; + + return mapNodeToValue(pMap, pResult); +} + +void *mapFindLEQ_IMPL +( + MapBase *pMap, + NvU64 keyMax +) +{ + MapNode *pCurrent; + MapNode *pResult; + NV_ASSERT_OR_RETURN(NULL != pMap, NULL); + pCurrent = pMap->pRoot; + pResult = NULL; + + while (pCurrent != NULL) + { + if (pCurrent->key > keyMax) + pCurrent = pCurrent->pLeft; + else if (pCurrent->key == keyMax) + return mapNodeToValue(pMap, pCurrent); + else + { + pResult = pCurrent; + pCurrent = pCurrent->pRight; + } + } + + if (pResult == NULL) + return NULL; + + return mapNodeToValue(pMap, pResult); +} + +void *mapNext_IMPL +( + MapBase *pMap, + void *pValue +) +{ + MapNode *pCurrent; + MapNode *pNode = mapValueToNode(pMap, pValue); + + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + NV_ASSERT_CHECKED(pNode->pMap == pMap); + + if (NULL != (pCurrent = pNode->pRight)) + { + while (pCurrent->pLeft != NULL) + pCurrent = pCurrent->pLeft; + + return mapNodeToValue(pMap, pCurrent); + } + + else + { + pCurrent = pNode->pParent; + + while (pCurrent != NULL && pNode == pCurrent->pRight) + { + if (pCurrent == pMap->pRoot) + return NULL; + + pNode = pCurrent; + pCurrent = pCurrent->pParent; + } + + if (pCurrent == NULL) + return NULL; + + return mapNodeToValue(pMap, pCurrent); + } +} + +void *mapPrev_IMPL +( + MapBase *pMap, + void *pValue +) +{ + MapNode *pCurrent; + MapNode *pNode = mapValueToNode(pMap, pValue); + + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + NV_ASSERT_CHECKED(pNode->pMap == pMap); + + if (NULL != (pCurrent = pNode->pLeft)) + { + while (pCurrent->pRight != NULL) + pCurrent = pCurrent->pRight; + + return mapNodeToValue(pMap, pCurrent); + } + + else + { + pCurrent = pNode->pParent; + + while (pCurrent != NULL && pNode == pCurrent->pLeft) + { + if (pCurrent == pMap->pRoot) + { + return NULL; + } + + pNode = pCurrent; + pCurrent = pCurrent->pParent; + } + + if (pCurrent == NULL) + return NULL; + + return mapNodeToValue(pMap, pCurrent); + } +} + +// @todo: do we need to change the definition of pFirst and pLast? +// currently they are mapNodes +MapIterBase mapIterRange_IMPL +( + MapBase *pMap, + void *pFirst, + void *pLast +) +{ + MapIterBase it; + MapNode *pFirstNode; + MapNode *pLastNode; + NV_ASSERT(pMap); + + portMemSet(&it, 0, sizeof(it)); + it.pMap = pMap; + + if (pMap->count == 0) + { + NV_CHECKED_ONLY(it.versionNumber = pMap->versionNumber); + return it; + } + + NV_ASSERT(pFirst); + NV_ASSERT(pLast); + NV_ASSERT_CHECKED((mapValueToNode(pMap, pFirst))->pMap == pMap); + NV_ASSERT_CHECKED((mapValueToNode(pMap, pLast))->pMap == pMap); + NV_ASSERT(mapKey_IMPL(pMap, pLast) >= mapKey_IMPL(pMap, pFirst)); + pFirstNode = mapValueToNode(pMap, pFirst); + pLastNode = mapValueToNode(pMap, pLast); + it.pNode = pFirstNode; + it.pLast = pLastNode; + NV_CHECKED_ONLY(it.versionNumber = pMap->versionNumber); + return it; +} + +// @todo: not sure about ppvalue, change it from void * to void ** +NvBool mapIterNext_IMPL(MapIterBase *pIt) +{ + NV_ASSERT_OR_RETURN(pIt, NV_FALSE); + + // + // Check whether the map was mutated during the iteration. + // If the map changed (by adding or removing entries), + // the iterator becomes invalid and must be reinitialized. + // + NV_ASSERT_CHECKED(pIt->versionNumber == pIt->pMap->versionNumber); + + if (!pIt->pNode) + return NV_FALSE; + + pIt->pValue = mapNodeToValue(pIt->pMap, pIt->pNode); + + if (pIt->pNode == pIt->pLast) + pIt->pNode = NULL; + else + pIt->pNode = mapValueToNode(pIt->pMap, + mapNext_IMPL(pIt->pMap, pIt->pValue)); + + return NV_TRUE; +} + +static void _mapRotateLeft +( + MapNode **pPRoot, + MapNode *x +) +{ + // rotate node x to left + MapNode *y = x->pRight; + // establish x->pRight link + x->pRight = y->pLeft; + + if (y->pLeft) + y->pLeft->pParent = x; + + // establish y->pParent link + y->pParent = x->pParent; + + if (x->pParent) + { + if (x == x->pParent->pLeft) + x->pParent->pLeft = y; + else + x->pParent->pRight = y; + } + + else + (*pPRoot) = y; + + // link x and y + y->pLeft = x; + x->pParent = y; +} + +static void _mapRotateRight +( + MapNode **pPRoot, + MapNode *x +) +{ + // rotate node x to right + MapNode *y = x->pLeft; + // establish x->pLeft link + x->pLeft = y->pRight; + + if (y->pRight) + y->pRight->pParent = x; + + // establish y->pParent link + y->pParent = x->pParent; + + if (x->pParent) + { + if (x == x->pParent->pRight) + x->pParent->pRight = y; + else + x->pParent->pLeft = y; + } + + else + (*pPRoot) = y; + + // link x and y + y->pRight = x; + x->pParent = y; +} + +static void _mapInsertFixup +( + MapNode **pPRoot, + MapNode *x +) +{ + // check red-black properties + while ((x != *pPRoot) && x->pParent->bIsRed) + { + // we have a violation + if (x->pParent == x->pParent->pParent->pLeft) + { + MapNode *y = x->pParent->pParent->pRight; + + if (y && y->bIsRed) + { + // uncle is RED + x->pParent->bIsRed = NV_FALSE; + y->bIsRed = NV_FALSE; + x->pParent->pParent->bIsRed = NV_TRUE; + x = x->pParent->pParent; + } + + else + { + // uncle is BLACK + if (x == x->pParent->pRight) + { + // make x a left child + x = x->pParent; + _mapRotateLeft(pPRoot, x); + } + + // recolor and rotate + x->pParent->bIsRed = NV_FALSE; + x->pParent->pParent->bIsRed = NV_TRUE; + _mapRotateRight(pPRoot, x->pParent->pParent); + } + } + + else + { + // mirror image of above code + MapNode *y = x->pParent->pParent->pLeft; + + if (y && y->bIsRed) + { + // uncle is RED + x->pParent->bIsRed = NV_FALSE; + y->bIsRed = NV_FALSE; + x->pParent->pParent->bIsRed = NV_TRUE; + x = x->pParent->pParent; + } + + else + { + // uncle is BLACK + if (x == x->pParent->pLeft) + { + x = x->pParent; + _mapRotateRight(pPRoot, x); + } + + x->pParent->bIsRed = NV_FALSE; + x->pParent->pParent->bIsRed = NV_TRUE; + _mapRotateLeft(pPRoot, x->pParent->pParent); + } + } + } + + (*pPRoot)->bIsRed = NV_FALSE; +} + +static void _mapDeleteFixup +( + MapNode **pPRoot, + MapNode *parentOfX, + MapNode *x +) +{ + while ((x != *pPRoot) && (!x || !x->bIsRed)) + { + //NV_ASSERT (!(x == NULL && parentOfX == NULL)); + // NULL nodes are sentinel nodes. If we delete a sentinel node (x==NULL) it + // must have a parent node (or be the root). Hence, parentOfX == NULL with + // x==NULL is never possible (tree invariant) + if ((parentOfX != NULL) && (x == parentOfX->pLeft)) + { + MapNode *w = parentOfX->pRight; + + if (w && w->bIsRed) + { + w->bIsRed = NV_FALSE; + parentOfX->bIsRed = NV_TRUE; + _mapRotateLeft(pPRoot, parentOfX); + w = parentOfX->pRight; + } + + if (!w || (((!w->pLeft || !w->pLeft->bIsRed) + && (!w->pRight || !w->pRight->bIsRed)))) + { + if (w) + w->bIsRed = NV_TRUE; + + x = parentOfX; + } + + else + { + if (!w->pRight || !w->pRight->bIsRed) + { + w->pLeft->bIsRed = NV_FALSE; + w->bIsRed = NV_TRUE; + _mapRotateRight(pPRoot, w); + w = parentOfX->pRight; + } + + w->bIsRed = parentOfX->bIsRed; + parentOfX->bIsRed = NV_FALSE; + w->pRight->bIsRed = NV_FALSE; + _mapRotateLeft(pPRoot, parentOfX); + x = *pPRoot; + } + } + + else if (parentOfX != NULL) + { + MapNode *w = parentOfX->pLeft; + + if (w && w->bIsRed) + { + w->bIsRed = NV_FALSE; + parentOfX->bIsRed = NV_TRUE; + _mapRotateRight(pPRoot, parentOfX); + w = parentOfX->pLeft; + } + + if (!w || ((!w->pRight || !w->pRight->bIsRed) && + (!w->pLeft || !w->pLeft->bIsRed))) + { + if (w) + w->bIsRed = NV_TRUE; + + x = parentOfX; + } + + else + { + if (!w->pLeft || !w->pLeft->bIsRed) + { + w->pRight->bIsRed = NV_FALSE; + w->bIsRed = NV_TRUE; + _mapRotateLeft(pPRoot, w); + w = parentOfX->pLeft; + } + + w->bIsRed = parentOfX->bIsRed; + parentOfX->bIsRed = NV_FALSE; + w->pLeft->bIsRed = NV_FALSE; + _mapRotateRight(pPRoot, parentOfX); + x = *pPRoot; + } + } + + else if (x == NULL) + { + // This should never happen. + break; + } + + parentOfX = x->pParent; + } + + if (x) + x->bIsRed = NV_FALSE; +} + +static void _mapPutNodeInPosition +( + MapBase *pMap, + MapNode *pTargetPosition, + MapNode *pNewNode +) +{ + // error check - can be removed + if (pTargetPosition == NULL) + return; + + // 1. change connection from new node side + if (pNewNode != NULL) + pNewNode->pParent = pTargetPosition->pParent; + + // 2. connection from parent side + if (pTargetPosition->pParent != NULL) + { + if (pTargetPosition == pTargetPosition->pParent->pLeft) + pTargetPosition->pParent->pLeft = pNewNode; + else + pTargetPosition->pParent->pRight = pNewNode; + } + + else + pMap->pRoot = pNewNode; +} + +static void _mapAdoptChildrenNodes +( + MapNode *pTargetNode, + MapNode *pNewNode +) +{ + // error check - can be removed + if (pTargetNode == NULL || pNewNode == NULL) + return; + + // take on connections + pNewNode->pLeft = pTargetNode->pLeft; + + if (pTargetNode->pLeft != NULL) + pTargetNode->pLeft->pParent = pNewNode; + + pNewNode->pRight = pTargetNode->pRight; + + if (pTargetNode->pRight != NULL) + pTargetNode->pRight->pParent = pNewNode; +} + +static NvBool _mapInsertBase +( + MapBase *pMap, + NvU64 key, + void *pValue +) +{ + MapNode *pCurrent; + MapNode *pParent; + MapNode *pNode; + pNode = mapValueToNode(pMap, pValue); + // 1. locate parent leaf node for the new node + pCurrent = pMap->pRoot; + pParent = NULL; + + while (pCurrent != NULL) + { + pParent = pCurrent; + + if (key < pCurrent->key) + pCurrent = pCurrent->pLeft; + else if (key > pCurrent->key) + pCurrent = pCurrent->pRight; + else + { + // duplication detected + return NV_FALSE; + } + } + + // 2. set up the new node structure + NV_CHECKED_ONLY(pNode->pMap = pMap); + pNode->key = key; + pNode->pParent = pParent; + pNode->pLeft = NULL; + pNode->pRight = NULL; + pNode->bIsRed = NV_TRUE; + + // 3. insert node in tree + if (pParent != NULL) + { + if (pNode->key < pParent->key) + pParent->pLeft = pNode; + else + pParent->pRight = pNode; + } + + else + pMap->pRoot = pNode; + + // 4. balance the tree + _mapInsertFixup(&(pMap->pRoot), pNode); + NV_CHECKED_ONLY(pMap->versionNumber++); + pMap->count++; + return NV_TRUE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/multimap.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/multimap.c new file mode 100644 index 0000000..b95b9f1 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/multimap.c @@ -0,0 +1,380 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/multimap.h" + +CONT_VTABLE_DEFN(MultimapBase, multimapItemIterRange_IMPL, NULL); + +void multimapInit_IMPL +( + MultimapBase *pBase, + PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize, + NvS32 nodeOffset, + NvU32 submapSize +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + NV_ASSERT_OR_RETURN_VOID(NULL != pAllocator); + mapInit_IMPL(&pBase->map, pAllocator, submapSize); + CONT_VTABLE_INIT(MultimapBase, pBase); + pBase->multimapNodeOffset = nodeOffset; + pBase->itemCount = 0; + pBase->itemSize = valueSize; +} + +void multimapDestroy_IMPL +( + MultimapBase *pBase +) +{ + void *pLeaf; + IntrusiveMap *pSubmap; + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + + pLeaf = multimapFirstItem_IMPL(pBase); + while (NULL != pLeaf) + { + void *pNext = multimapNextItem_IMPL(pBase, pLeaf); + multimapRemoveItem_IMPL(pBase, pLeaf); + pLeaf = pNext; + } + + while (NULL != (pSubmap = (IntrusiveMap *)mapFindGEQ_IMPL(&pBase->map.base, 0))) + { + mapDestroyIntrusive_IMPL(&pSubmap->base); + mapRemove_IMPL(&pBase->map, pSubmap); + } + + mapDestroy_IMPL(&pBase->map); +} + +void multimapClear_IMPL +( + MultimapBase *pBase +) +{ + PORT_MEM_ALLOCATOR *pAllocator; + NvU32 valueSize; + NvS32 nodeOffset; + NvU32 submapSize; + + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + pAllocator = pBase->map.pAllocator; + valueSize = pBase->itemSize; + nodeOffset = pBase->multimapNodeOffset; + submapSize = pBase->map.valueSize; + + multimapDestroy_IMPL(pBase); + multimapInit_IMPL(pBase, pAllocator, valueSize, nodeOffset, submapSize); +} + +void *multimapInsertSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey) +{ + void *pSubmap; + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + + pSubmap = mapInsertNew_IMPL(&pBase->map, submapKey); + if (NULL != pSubmap) + { + NvS32 submapNodeOffset = pBase->multimapNodeOffset + + NV_OFFSETOF(MultimapNode, submapNode); + mapInitIntrusive_IMPL((IntrusiveMap *)pSubmap, submapNodeOffset); + } + + return pSubmap; +} + +void *multimapFindSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey) +{ + return mapFind_IMPL(&pBase->map.base, submapKey); +} + +void *multimapFindSubmapLEQ_IMPL(MultimapBase *pBase, NvU64 submapKey) +{ + return mapFindLEQ_IMPL(&pBase->map.base, submapKey); +} + +void *multimapFindSubmapGEQ_IMPL(MultimapBase *pBase, NvU64 submapKey) +{ + return mapFindGEQ_IMPL(&pBase->map.base, submapKey); +} + +void *multimapInsertItemNew_IMPL +( + MultimapBase *pBase, + NvU64 submapKey, + NvU64 itemKey +) +{ + IntrusiveMap *pSubmap; + void *pLeaf; + NvU32 leafSize; + + if (NULL == pBase) + return NULL; + + pSubmap = (IntrusiveMap *)multimapFindSubmap_IMPL(pBase, submapKey); + if (NULL == pSubmap) + return NULL; + + leafSize = pBase->multimapNodeOffset + sizeof(MultimapNode); + pLeaf = PORT_ALLOC(pBase->map.pAllocator, leafSize); + + if (NULL == pLeaf) + return NULL; + + portMemSet(pLeaf, 0, leafSize); + + multimapValueToNode(pBase, pLeaf)->pSubmap = pSubmap; + + if (!mapInsertExisting_IMPL(pSubmap, itemKey, pLeaf)) + { + PORT_FREE(pBase->map.pAllocator, pLeaf); + return NULL; + } + + pBase->itemCount++; + + return pLeaf; +} + +void *multimapInsertItemValue_IMPL +( + MultimapBase *pBase, + NvU64 submapKey, + NvU64 itemKey, + void *pValue +) +{ + void *pLeaf; + + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + NV_ASSERT_OR_RETURN(NULL != pValue, NULL); + + pLeaf = multimapInsertItemNew_IMPL(pBase, submapKey, itemKey); + + if (NULL == pLeaf) + return NULL; + + return portMemCopy(pLeaf, pBase->itemSize, pValue, pBase->itemSize); +} + +void *multimapFindItem_IMPL +( + MultimapBase *pBase, + NvU64 submapKey, + NvU64 itemKey +) +{ + IntrusiveMap *pSubmap; + + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + + pSubmap = (IntrusiveMap *)multimapFindSubmap_IMPL(pBase, submapKey); + if (NULL == pSubmap) + return NULL; + + return mapFind_IMPL(&pSubmap->base, itemKey); +} + +void multimapRemoveItem_IMPL(MultimapBase *pBase, void *pLeaf) +{ + IntrusiveMap *pSubmap; + NvU32 itemCount; + + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + NV_ASSERT_OR_RETURN_VOID(NULL != pLeaf); + + pSubmap = (IntrusiveMap *)multimapValueToNode(pBase, pLeaf)->pSubmap; + NV_ASSERT_OR_RETURN_VOID(NULL != pSubmap); + + itemCount = pSubmap->base.count; + mapRemoveIntrusive_IMPL(&pSubmap->base, pLeaf); + // Only continue if an item was actually removed + if (itemCount == pSubmap->base.count) + return; + + PORT_FREE(pBase->map.pAllocator, pLeaf); + + pBase->itemCount--; +} + +void multimapRemoveSubmap_IMPL +( + MultimapBase *pBase, + MapBase *pSubmap +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + NV_ASSERT_OR_RETURN_VOID(NULL != pSubmap); + NV_ASSERT_OR_RETURN_VOID(pSubmap->count == 0); + mapDestroyIntrusive_IMPL(pSubmap); + mapRemove_IMPL(&pBase->map, pSubmap); +} + +void multimapRemoveItemByKey_IMPL +( + MultimapBase *pBase, + NvU64 submapKey, + NvU64 itemKey +) +{ + void *pLeaf = multimapFindItem_IMPL(pBase, submapKey, itemKey); + if (NULL != pLeaf) + multimapRemoveItem_IMPL(pBase, pLeaf); +} + +void *multimapNextItem_IMPL(MultimapBase *pBase, void *pValue) +{ + IntrusiveMap *pSubmap; + + NV_ASSERT_OR_RETURN(NULL != pBase && NULL != pValue, NULL); + + pSubmap = (IntrusiveMap *)multimapValueToNode(pBase, pValue)->pSubmap; + NV_ASSERT_OR_RETURN(NULL != pSubmap, NULL); + + pValue = mapNext_IMPL(&pSubmap->base, pValue); + while (NULL == pValue) + { + pSubmap = (IntrusiveMap *)mapNext_IMPL(&pBase->map.base, pSubmap); + if (NULL == pSubmap) + return NULL; + + pValue = mapFindGEQ_IMPL(&pSubmap->base, 0); + } + + return pValue; +} + +void *multimapPrevItem_IMPL(MultimapBase *pBase, void *pValue) +{ + IntrusiveMap *pSubmap; + + NV_ASSERT_OR_RETURN(NULL != pBase && NULL != pValue, NULL); + + pSubmap = (IntrusiveMap *)multimapValueToNode(pBase, pValue)->pSubmap; + NV_ASSERT_OR_RETURN(NULL != pSubmap, NULL); + + pValue = mapPrev_IMPL(&pSubmap->base, pValue); + while (NULL == pValue) + { + pSubmap = (IntrusiveMap *)mapPrev_IMPL(&pBase->map.base, pSubmap); + if (NULL == pSubmap) + return NULL; + + pValue = mapFindLEQ_IMPL(&pSubmap->base, NV_U64_MAX); + } + + return pValue; +} + +void *multimapFirstItem_IMPL(MultimapBase *pBase) +{ + IntrusiveMap *pSubmap; + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + + pSubmap = mapFindGEQ_IMPL(&pBase->map.base, 0); + while (NULL != pSubmap) + { + void *pItem = mapFindGEQ_IMPL(&pSubmap->base, 0); + if (NULL != pItem) + return pItem; + + pSubmap = mapNext_IMPL(&pBase->map.base, pSubmap); + } + + return NULL; +} + +void *multimapLastItem_IMPL(MultimapBase *pBase) +{ + IntrusiveMap *pSubmap; + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + + pSubmap = mapFindLEQ_IMPL(&pBase->map.base, NV_U64_MAX); + while (NULL != pSubmap) + { + void *pItem = mapFindLEQ_IMPL(&pSubmap->base, NV_U64_MAX); + if (NULL != pItem) + return pItem; + + pSubmap = mapPrev_IMPL(&pBase->map.base, pSubmap); + } + + return NULL; +} + +MultimapIterBase multimapItemIterRange_IMPL +( + MultimapBase *pBase, + void *pFirst, + void *pLast +) +{ + MultimapIterBase it; + + portMemSet(&it, 0, sizeof(it)); + it.pMultimap = pBase; + + NV_ASSERT_OR_RETURN(NULL != pBase, it); + + if (pBase->itemCount == 0 || pFirst == NULL || pLast == NULL) + return it; + + { + MultimapNode *pFirstNode; + MultimapNode *pLastNode; + NvU64 firstKey, lastKey, firstSubmapKey, lastSubmapKey; + + pFirstNode = multimapValueToNode(pBase, pFirst); + pLastNode = multimapValueToNode(pBase, pLast); + + firstKey = pFirstNode->submapNode.key; + lastKey = pLastNode->submapNode.key; + firstSubmapKey = mapValueToNode(&pBase->map.base, pFirstNode->pSubmap)->key; + lastSubmapKey = mapValueToNode(&pBase->map.base, pLastNode->pSubmap)->key; + + NV_ASSERT(firstSubmapKey < lastSubmapKey || + (firstSubmapKey == lastSubmapKey && firstKey <= lastKey)); + } + it.pNext = pFirst; + it.pLast = pLast; + return it; +} + +NvBool multimapItemIterNext_IMPL(MultimapIterBase *pIt) +{ + NV_ASSERT_OR_RETURN(NULL != pIt, NV_FALSE); + + pIt->pValue = pIt->pNext; + + if (NULL == pIt->pNext) + return NV_FALSE; + + if (pIt->pNext == pIt->pLast) + pIt->pNext = NULL; + else + pIt->pNext = multimapNextItem_IMPL(pIt->pMultimap, pIt->pNext); + + return NV_TRUE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/queue.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/queue.c new file mode 100644 index 0000000..9cb681e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/queue.c @@ -0,0 +1,299 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/queue.h" + +#define MEM_RD64(a) ((NvLength) (*(volatile NvU64 *)(a))) +#define MEM_WR64(a, d) do { *(volatile NvU64 *)(a) = (NvU64)(d); } while (0) + +#define MEM_WR(a, d) portMemCopy((a), sizeof(*(a)), &(d), sizeof(d)) +#define MEM_RD(v, a) portMemCopy(&(v), sizeof(v), (a), sizeof(*(a))) + +static +NV_STATUS circularQueueInitCommon +( + Queue *pQueue, + void *pData, + NvLength capacity, + PORT_MEM_ALLOCATOR *pAllocator, + NvLength msgSize +) +{ + NV_ASSERT_OR_RETURN(pQueue != NULL, NV_ERR_INVALID_ARGUMENT); + + MEM_WR(&pQueue->pData, pData); + MEM_WR(&pQueue->pAllocator, pAllocator); + MEM_WR64(&pQueue->msgSize, msgSize); + MEM_WR64(&pQueue->capacity, capacity); + MEM_WR64(&pQueue->getIdx, 0); + MEM_WR64(&pQueue->putIdx, 0); + + return NV_OK; +} + +static +NvLength queueGetCount(Queue *pQueue) +{ + NvLength get = MEM_RD64(&pQueue->getIdx); + NvLength put = MEM_RD64(&pQueue->putIdx); + + if (put >= get) + { + return put - get; + } + else + { + return put + MEM_RD64(&pQueue->capacity) - get; + } +} + +static +void managedCopyData(NvLength msgSize, + NvLength opIdx, + QueueContext *pCtx, + void *pClientData, + NvLength count, + NvBool bCopyIn) +{ + NvLength size = msgSize * count; + void *pQueueData = (NvU8 *)pCtx->pData + (opIdx * msgSize); + void *src = bCopyIn ? pClientData : pQueueData; + void *dst = bCopyIn ? pQueueData : pClientData; + + portMemCopy(dst, size, src, size); +} + +NV_STATUS circularQueueInit_IMPL +( + Queue *pQueue, + PORT_MEM_ALLOCATOR *pAllocator, + NvLength capacity, + NvLength msgSize +) +{ + void *pData = NULL; + + // One element is wasted as no separate count/full/empty state + // is kept - only indices. + // Managed queue, can hide this due to owning the buffer and + // preserve original queue semantics. + capacity += 1; + + NV_ASSERT_OR_RETURN(pAllocator != NULL, NV_ERR_INVALID_ARGUMENT); + + pData = PORT_ALLOC(pAllocator, capacity * msgSize); + if (pData == NULL) + return NV_ERR_NO_MEMORY; + + return circularQueueInitCommon(pQueue, pData, capacity, pAllocator, msgSize); +} + +NV_STATUS circularQueueInitNonManaged_IMPL +( + Queue *pQueue, + NvLength capacity, + NvLength msgSize +) +{ + return circularQueueInitCommon(pQueue, NULL /*pData*/, capacity, NULL /*pAllocator*/, msgSize); +} + +void circularQueueDestroy_IMPL(Queue *pQueue) +{ + PORT_MEM_ALLOCATOR *pAllocator; + + NV_ASSERT_OR_RETURN_VOID(NULL != pQueue); + + MEM_WR64(&pQueue->capacity, 1); + MEM_WR64(&pQueue->getIdx, 0); + MEM_WR64(&pQueue->putIdx, 0); + MEM_RD(pAllocator, &pQueue->pAllocator); + + if (pAllocator) + PORT_FREE(pQueue->pAllocator, pQueue->pData); +} + +NvLength circularQueueCapacity_IMPL(Queue *pQueue) +{ + NV_ASSERT_OR_RETURN(NULL != pQueue, 0); + + return MEM_RD64(&pQueue->capacity) - 1; +} + +NvLength circularQueueCount_IMPL(Queue *pQueue) +{ + NV_ASSERT_OR_RETURN(NULL != pQueue, 0); + + return queueGetCount(pQueue); +} + +NvBool circularQueueIsEmpty_IMPL(Queue *pQueue) +{ + NV_ASSERT_OR_RETURN(NULL != pQueue, 0); + + return queueGetCount(pQueue) == 0; +} + +NvLength circularQueuePushNonManaged_IMPL +( + Queue *pQueue, + QueueContext *pCtx, + void* pElements, + NvLength numElements +) +{ + void *src; + NvLength cntLimit = 0; + NvLength elemToCpy, srcSize; + NvLength putIdx; + NvLength msgSize; + NvLength capacity; + + NV_ASSERT_OR_RETURN(NULL != pQueue, 0); + + putIdx = MEM_RD64(&pQueue->putIdx); + msgSize = MEM_RD64(&pQueue->msgSize); + capacity = MEM_RD64(&pQueue->capacity); + + // Calculate the elements to copy + cntLimit = capacity - queueGetCount(pQueue) - 1; + if (numElements > cntLimit) + { + numElements = cntLimit; + } + + src = pElements; + if (numElements > 0) + { + NvLength remainingElemToCpy = numElements; + + // We need a max of 2 copies to take care of wrapAround case. See if we have a wrap around + if ((putIdx + numElements) > capacity) + { + // do the extra copy here + elemToCpy = capacity - putIdx; + srcSize = msgSize * elemToCpy; + + pCtx->pCopyData(msgSize, putIdx, pCtx, src, elemToCpy, NV_TRUE /*bCopyIn*/); + + // Update variables for next copy + remainingElemToCpy -= elemToCpy; + src = (void *)((NvU8 *)src + srcSize); + + putIdx = 0; + } + + NV_ASSERT(remainingElemToCpy <= capacity - putIdx); + + pCtx->pCopyData(msgSize, putIdx, pCtx, src, remainingElemToCpy, NV_TRUE /*bCopyIn*/); + + // The data must land before index update. + portAtomicMemoryFenceStore(); + MEM_WR64(&pQueue->putIdx, (putIdx + remainingElemToCpy) % capacity); + } + + return numElements; +} + +NvLength circularQueuePush_IMPL +( + Queue *pQueue, + void* pElements, + NvLength numElements +) +{ + QueueContext ctx = {0}; + + NV_ASSERT_OR_RETURN(pQueue != NULL, NV_FALSE); + NV_ASSERT_OR_RETURN(pQueue->pAllocator != NULL, NV_FALSE); + + ctx.pCopyData = managedCopyData; + ctx.pData = pQueue->pData; + + return circularQueuePushNonManaged_IMPL(pQueue, &ctx, pElements, numElements); +} + +void* circularQueuePeek_IMPL(Queue *pQueue) +{ + void *top; + + NV_ASSERT_OR_RETURN(pQueue != NULL, 0); + NV_ASSERT_OR_RETURN(pQueue->pAllocator != NULL, 0); + + if (queueGetCount(pQueue) == 0) return NULL; + top = (void*)((NvU8*)pQueue->pData + pQueue->getIdx * pQueue->msgSize); + return top; +} + +void circularQueuePop_IMPL(Queue *pQueue) +{ + NvLength getIdx; + NvLength capacity; + + NV_ASSERT_OR_RETURN_VOID(NULL != pQueue); + + getIdx = MEM_RD64(&pQueue->getIdx); + capacity = MEM_RD64(&pQueue->capacity); + + if (queueGetCount(pQueue) > 0) + { + MEM_WR64(&pQueue->getIdx, (getIdx + 1) % capacity); + } +} + +NvBool circularQueuePopAndCopyNonManaged_IMPL(Queue *pQueue, QueueContext *pCtx, void *pCopyTo) +{ + NvLength capacity; + NvLength msgSize; + + NV_ASSERT_OR_RETURN(pQueue != NULL, NV_FALSE); + + capacity = MEM_RD64(&pQueue->capacity); + msgSize = MEM_RD64(&pQueue->msgSize); + + if (queueGetCount(pQueue) > 0) + { + NvLength getIdx = MEM_RD64(&pQueue->getIdx); + pCtx->pCopyData(msgSize, getIdx, pCtx, pCopyTo, 1, NV_FALSE /*bCopyIn*/); + + // Update of index can't happen before we read all the data. + portAtomicMemoryFenceLoad(); + + MEM_WR64(&pQueue->getIdx, (getIdx + 1) % capacity); + + return NV_TRUE; + } + return NV_FALSE; +} + +NvBool circularQueuePopAndCopy_IMPL(Queue *pQueue, void *pCopyTo) +{ + QueueContext ctx = {0}; + + NV_ASSERT_OR_RETURN(pQueue != NULL, NV_FALSE); + NV_ASSERT_OR_RETURN(pQueue->pAllocator != NULL, NV_FALSE); + + ctx.pCopyData = managedCopyData; + ctx.pData = pQueue->pData; + + return circularQueuePopAndCopyNonManaged_IMPL(pQueue, &ctx, pCopyTo); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c new file mode 100644 index 0000000..45d776f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c @@ -0,0 +1,308 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "eventbufferproducer.h" +#include "nvport/nvport.h" + +// +// This file contains generic event buffer producer implementation for adding variable length data +// +// Data format: +// +// Event Record buffer holds fixed size records +// +// |---------|---------|---------|---------|...|---------| +// | record1 | record2 | record3 | record4 |...| recordn | +// |---------|---------|---------|---------|...|---------| +// +// Variable length data buffer: +// The fixed event record can optionally contain a pointer to variable length data. +// This buffer stores the varlength data that doesn't fit in the fixed size records. +// +// |------------|--------|...|---------| +// | data2 | data4 |...| data n | +// |------------|--------|...|---------| +// + +static NV_EVENT_BUFFER_RECORD* _eventBufferGetFreeRecord(EVENT_BUFFER_PRODUCER_INFO *); +static void _eventBufferAddVardata(EVENT_BUFFER_PRODUCER_INFO*, NvP64, NvU32, NV_EVENT_BUFFER_RECORD_HEADER*); +static void _eventBufferUpdateRecordBufferCount(EVENT_BUFFER_PRODUCER_INFO*); +static void _eventBufferUpdateVarRemaingSize(EVENT_BUFFER_PRODUCER_INFO* info); + +void +eventBufferInitRecordBuffer +( + EVENT_BUFFER_PRODUCER_INFO *info, + NV_EVENT_BUFFER_HEADER* pHeader, + NvP64 recordBuffAddr, + NvU32 recordSize, + NvU32 recordCount, + NvU32 bufferSize, + NvU32 notificationThreshold +) +{ + RECORD_BUFFER_INFO* pRecordBuffer = &info->recordBuffer; + pRecordBuffer->pHeader = pHeader; + pRecordBuffer->recordBuffAddr = recordBuffAddr; + pRecordBuffer->recordSize = recordSize; + pRecordBuffer->totalRecordCount = recordCount; + pRecordBuffer->bufferSize = bufferSize; + pRecordBuffer->notificationThreshold = notificationThreshold; +} + +void +eventBufferInitVardataBuffer +( + EVENT_BUFFER_PRODUCER_INFO *info, + NvP64 vardataBuffAddr, + NvU32 bufferSize, + NvU32 notificationThreshold +) +{ + VARDATA_BUFFER_INFO* pVardataBuffer = &info->vardataBuffer; + pVardataBuffer->vardataBuffAddr = vardataBuffAddr; + pVardataBuffer->bufferSize = bufferSize; + pVardataBuffer->notificationThreshold = notificationThreshold; + pVardataBuffer->get = 0; + pVardataBuffer->put = 0; + pVardataBuffer->remainingSize = bufferSize; +} + +void +eventBufferInitNotificationHandle(EVENT_BUFFER_PRODUCER_INFO *info, NvP64 notificationHandle) +{ + info->notificationHandle = notificationHandle; +} + +void +eventBufferSetEnable(EVENT_BUFFER_PRODUCER_INFO *info, NvBool isEnabled) +{ + info->isEnabled = isEnabled; +} + +void +eventBufferSetKeepNewest(EVENT_BUFFER_PRODUCER_INFO *info,NvBool isKeepNewest) +{ + info->isKeepNewest = isKeepNewest; +} + +void +eventBufferUpdateRecordBufferGet(EVENT_BUFFER_PRODUCER_INFO *info, NvU32 get) +{ + RECORD_BUFFER_INFO* pRecordBuffer = &info->recordBuffer; + pRecordBuffer->pHeader->recordGet = get; + + // used for notification + _eventBufferUpdateRecordBufferCount(info); + + // dropCounts get reset on every updateGet call + pRecordBuffer->pHeader->recordDropcount = 0; + pRecordBuffer->pHeader->vardataDropcount = 0; + +} + +void +_eventBufferUpdateRecordBufferCount(EVENT_BUFFER_PRODUCER_INFO *info) +{ + RECORD_BUFFER_INFO* pRecordBuffer = &info->recordBuffer; + NV_EVENT_BUFFER_HEADER* pHeader = info->recordBuffer.pHeader; + + if (pHeader->recordGet <= pHeader->recordPut) + pHeader->recordCount = (pHeader->recordPut - pHeader->recordGet); + else + pHeader->recordCount = pHeader->recordPut + (pRecordBuffer->totalRecordCount - pHeader->recordGet); +} + +void +eventBufferUpdateVardataBufferGet(EVENT_BUFFER_PRODUCER_INFO *info, NvU32 get) +{ + VARDATA_BUFFER_INFO* pVardataBuffer = &info->vardataBuffer; + pVardataBuffer->get = get; + + _eventBufferUpdateVarRemaingSize(info); +} + +NvU32 +eventBufferGetRecordBufferCount(EVENT_BUFFER_PRODUCER_INFO *info) +{ + return info->recordBuffer.totalRecordCount; +} + +NvU32 +eventBufferGetVardataBufferCount(EVENT_BUFFER_PRODUCER_INFO *info) +{ + return info->vardataBuffer.bufferSize; +} + +// +// eventBufferProducerAddEvent +// +// Adds an event to an event buffer +// This function is called after acquiring correct locks (depending on which module includes it) +// and bound checks for input parameters +// eventType : for RM this would be either 2080 subdevice events or 0000 system events +// eventSubtype: optional +// payloadSize and vardataSize must be 64 bit aligned +// +void +eventBufferProducerAddEvent +( + EVENT_BUFFER_PRODUCER_INFO *info, + NvU16 eventType, + NvU16 eventSubtype, + EVENT_BUFFER_PRODUCER_DATA* pData +) +{ + NV_EVENT_BUFFER_RECORD *record; + + if (info->isEnabled) + { + record = _eventBufferGetFreeRecord(info); + if (record) + { + record->recordHeader.type = eventType; + record->recordHeader.subtype = eventSubtype; + + if (pData->payloadSize) + portMemCopy(record->inlinePayload, pData->payloadSize, + NvP64_VALUE(pData->pPayload), pData->payloadSize); + + _eventBufferAddVardata(info, pData->pVardata, pData->vardataSize, &record->recordHeader); + } + } +} + +NV_EVENT_BUFFER_RECORD* +_eventBufferGetFreeRecord(EVENT_BUFFER_PRODUCER_INFO *info) +{ + RECORD_BUFFER_INFO* pRecInfo = &info->recordBuffer; + NV_EVENT_BUFFER_HEADER* pHeader = pRecInfo->pHeader; + NvU32 recordOffset = 0; + NV_EVENT_BUFFER_RECORD* pFreeRecord = NULL; + + NvU32 putNext = pHeader->recordPut + 1; + + if (putNext == pRecInfo->totalRecordCount) + putNext = 0; + + if ((!info->isKeepNewest) && (putNext == pHeader->recordGet)) + { + pHeader->recordDropcount++; + } + else + { + recordOffset = pHeader->recordPut * pRecInfo->recordSize; + pFreeRecord = (NV_EVENT_BUFFER_RECORD*)((NvUPtr)pRecInfo->recordBuffAddr + recordOffset); + + pHeader->recordCount++; + pHeader->recordPut = putNext; + } + return pFreeRecord; +} + +void +_eventBufferAddVardata +( + EVENT_BUFFER_PRODUCER_INFO *info, + NvP64 data, + NvU32 size, + NV_EVENT_BUFFER_RECORD_HEADER* recordHeader +) +{ + VARDATA_BUFFER_INFO *pVarInfo = &info->vardataBuffer; + NV_EVENT_BUFFER_HEADER* pHeader = info->recordBuffer.pHeader; + NvU32 pVardataOffset; + NvU32 alignedSize = NV_ALIGN_UP(size, NV_EVENT_VARDATA_GRANULARITY); + NvU32 vardataOffsetEnd = pVarInfo->put + alignedSize; + + if (vardataOffsetEnd <= pVarInfo->bufferSize) + { + if ((!info->isKeepNewest) && (pVarInfo->remainingSize < alignedSize)) + goto skip; + + pVardataOffset = pVarInfo->put; + recordHeader->varData = vardataOffsetEnd; + } + else + { + // wrap-around; the effective vardataPut=0, vardataOffsetEnd=size + vardataOffsetEnd = 0 + alignedSize; + if ((!info->isKeepNewest) && (pVarInfo->get <= vardataOffsetEnd)) + goto skip; + + recordHeader->varData = vardataOffsetEnd | NV_EVENT_VARDATA_START_OFFSET_ZERO; + pVardataOffset = 0; + } + + if(size) + { + portMemCopy((void*)((NvUPtr)pVarInfo->vardataBuffAddr + pVardataOffset), size, NvP64_VALUE(data), size); + + if (alignedSize != size) + { + pVardataOffset += size; + portMemSet((void*)((NvUPtr)pVarInfo->vardataBuffAddr + pVardataOffset), 0, (alignedSize - size)); + } + } + + pVarInfo->put = vardataOffsetEnd; + _eventBufferUpdateVarRemaingSize(info); + return; + +skip: + recordHeader->varData = pVarInfo->put; + pHeader->vardataDropcount += 1; +} + +void +_eventBufferUpdateVarRemaingSize(EVENT_BUFFER_PRODUCER_INFO* info) +{ + VARDATA_BUFFER_INFO *pVarInfo = &info->vardataBuffer; + + if (!info->isKeepNewest) + { + if (pVarInfo->get <= pVarInfo->put) + pVarInfo->remainingSize = pVarInfo->get + (pVarInfo->bufferSize - pVarInfo->put); + else + pVarInfo->remainingSize = pVarInfo->get - pVarInfo->put; + } +} + +NvBool +eventBufferIsNotifyThresholdMet(EVENT_BUFFER_PRODUCER_INFO* info) +{ + VARDATA_BUFFER_INFO *pVarInfo = &info->vardataBuffer; + RECORD_BUFFER_INFO* pRecInfo = &info->recordBuffer; + NV_EVENT_BUFFER_HEADER* pHeader = pRecInfo->pHeader; + + if (!info->isKeepNewest) + { + if (((pRecInfo->totalRecordCount - pHeader->recordCount) <= pRecInfo->notificationThreshold) || + (pVarInfo->remainingSize <= pVarInfo->notificationThreshold)) + { + return NV_TRUE; + } + } + return NV_FALSE; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/ioaccess/ioaccess.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/ioaccess/ioaccess.c new file mode 100644 index 0000000..a05f00a --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/ioaccess/ioaccess.c @@ -0,0 +1,146 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "ioaccess/ioaccess.h" +#include "utils/nvprintf.h" +#include "nvport/nvport.h" + +/*! + * @brief: Allocate and initialize an IO_APERTURE instance. + * + * @param[out] ppAperture pointer to the new IO_APERTURE. + * @param[in] pParentAperture pointer to the parent of the new IO_APERTURE. + * @param[in] pDevice pointer to IO_DEVICE of the APERTURE. + * @param[in] offset offset from the parent APERTURE's baseAddress. + * @param[in] length length of the APERTURE. + * + * @return NV_OK upon success + * NV_ERR* otherwise. + */ +NV_STATUS +ioaccessCreateIOAperture +( + IO_APERTURE **ppAperture, + IO_APERTURE *pParentAperture, + IO_DEVICE *pDevice, + NvU32 offset, + NvU32 length +) +{ + NV_STATUS status = NV_OK; + IO_APERTURE *pAperture = portMemAllocNonPaged(sizeof(IO_APERTURE)); + + if (pAperture == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemSet(pAperture, 0, sizeof(IO_APERTURE)); + + status = ioaccessInitIOAperture(pAperture, pParentAperture, pDevice, offset, length); + if (status != NV_OK) + { + portMemFree(pAperture); + } + else + { + *ppAperture = pAperture; + } + + return status; +} + + +/*! + * Initialize an IO_APERTURE instance. This enables initialization for derived IO_APERTURE instances + * that are not allocated via CreateIOAperture. + * + * @param[in,out] pAperture pointer to IO_APERTURE instance to be initialized. + * @param[in] pParentAperture pointer to parent of the new IO_APERTURE. + * @param[in] pDevice pointer to IO_DEVICE of the APERTURE. + * @param[in] offset offset from the parent APERTURE's baseAddress. + * @param[in] length length of the APERTURE. + * + * @return NV_OK when inputs are valid. + */ +NV_STATUS +ioaccessInitIOAperture +( + IO_APERTURE *pAperture, + IO_APERTURE *pParentAperture, + IO_DEVICE *pDevice, + NvU32 offset, + NvU32 length +) +{ + if (pAperture == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Aperture's IO device can't be set if both the parent aperture and IO device + // input arguments are NULL. + // + if ((pDevice == NULL) && (pParentAperture == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pDevice != NULL) + { + pAperture->pDevice = pDevice; + } + + if (pParentAperture != NULL) + { + pAperture->pDevice = pParentAperture->pDevice; + pAperture->baseAddress = pParentAperture->baseAddress; + + // Check if the child Aperture strides beyond the parent's boundary. + if ((length + offset) > pParentAperture->length) + { + NV_PRINTF(LEVEL_WARNING, + "Child aperture crosses parent's boundary, length %u offset %u, Parent's length %u\n", + length, offset, pParentAperture->length); + } + } + else + { + pAperture->baseAddress = 0; + } + + pAperture->baseAddress += offset; + pAperture->length = length; + + return NV_OK; +} + +void +ioaccessDestroyIOAperture +( + IO_APERTURE *pAperture +) +{ + portMemFree(pAperture); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvbitvector/nvbitvector.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvbitvector/nvbitvector.c new file mode 100644 index 0000000..75f4876 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvbitvector/nvbitvector.c @@ -0,0 +1,864 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "utils/nvbitvector.h" + +/** + * @brief Returns the size, in bytes, of this bitvector. + * @note due to the compiler trick of storing the last index within a + * structure pointer in the data, the minimum size of an NV_BITEVECTOR + * will be the size of one pointer on a given architecture. If the + * storage size of the underlying data is changed to something less + * than the size of a pointer on a given architecture, then two + * libraries running on different architectures transferring bitvectors + * between them may disagree on the value of the direct sizeof operator + * on a struct of an NV_BITVECTOR derivative. This version of SizeOf + * should be agreeable to all architectures, and should be used instead + * of sizeof to marshall data between libraries running on different + * architectures. + */ +NvU32 +bitVectorSizeOf_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + NV_ASSERT_OR_RETURN(NULL != pBitVector, 0); + + return NV_BITVECTOR_BYTE_SIZE(bitVectorLast); +} + +/** + * @brief Clears all flags in pBitVector. + */ +NV_STATUS +bitVectorClrAll_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + NvU32 byteSize = NV_BITVECTOR_BYTE_SIZE(bitVectorLast); + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + + portMemSet(&pBitVector->qword, 0x0, byteSize); + return NV_OK; +} + +/** + * @brief Clears the flag in pBitVector according to bit index idx + */ +NV_STATUS +bitVectorClr_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +) +{ + NvU64 *qword; + NvU16 qwordIdx = NV_BITVECTOR_IDX(idx); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(idx); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(idx < bitVectorLast, NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + qword[qwordIdx] &= ~NVBIT64(qwordOffset); + return NV_OK; +} + +/** + * @brief Clears all flags within a range in pBitVector + */ +NV_STATUS +bitVectorClrRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +) +{ + NvU64 *qword; + NvU16 idx; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rangeContains(rangeMake(0, bitVectorLast - 1), range), + NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + for (idx = (NvU16)range.lo; idx <= (NvU16)range.hi; ++idx) + { + if ((0 == NV_BITVECTOR_OFFSET(idx)) && + (rangeContains(range, rangeMake(idx + 63, idx + 63)))) + { + qword[NV_BITVECTOR_IDX(idx)] = 0x0; + idx += 63; + continue; + } + + status = bitVectorClr_IMPL(pBitVector, bitVectorLast, idx); + if (NV_OK != status) + { + return status; + } + } + + return status; +} + +/** + * @brief Sets all flags in pBitVector + */ +NV_STATUS +bitVectorSetAll_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + NvU64 *qword; + NvU32 byteSize = NV_BITVECTOR_BYTE_SIZE(bitVectorLast); + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + portMemSet(qword, NV_U8_MAX, byteSize); + qword[arraySize - 1] &= (NV_U64_MAX >> (63 - qwordOffset)); + + return NV_OK; +} + +/** + * @brief Sets the flag in pBitVector according to bit index idx + */ +NV_STATUS +bitVectorSet_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +) +{ + NvU64 *qword; + NvU16 qwordIdx = NV_BITVECTOR_IDX(idx); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(idx); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + qword[qwordIdx] |= NVBIT64(qwordOffset); + + return NV_OK; +} + +/** + * @brief Sets all flags within a range in pBitVector + */ +NV_STATUS +bitVectorSetRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +) +{ + NvU64 *qword; + NvU16 idx; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rangeContains(rangeMake(0, bitVectorLast - 1), range), + NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + for (idx = (NvU16)range.lo; idx <= (NvU16)range.hi; ++idx) + { + if ((0 == NV_BITVECTOR_OFFSET(idx)) && + (rangeContains(range, rangeMake(idx + 63, idx + 63)))) + { + qword[NV_BITVECTOR_IDX(idx)] = (NV_U64_MAX); + idx += 63; + continue; + } + + status = bitVectorSet_IMPL(pBitVector, bitVectorLast, idx); + if (NV_OK != status) + { + return status; + } + } + + return status; +} + +/** + * @brief Toggles the flag in pBitVector according to bit index idx + */ +NV_STATUS +bitVectorInv_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +) +{ + NvU64 *qword; + NvU16 qwordIdx = NV_BITVECTOR_IDX(idx); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(idx); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + qword[qwordIdx] ^= NVBIT64(qwordOffset); + + return NV_OK; +} + +/** + * @brief Toggles all flags within a range in pBitVector + */ +NV_STATUS +bitVectorInvRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +) +{ + NvU64 *qword; + NvU16 idx; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rangeContains(rangeMake(0, bitVectorLast - 1), range), + NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + for (idx = (NvU16)range.lo; idx <= (NvU16)range.hi; ++idx) + { + if ((0 == NV_BITVECTOR_OFFSET(idx)) && + (rangeContains(range, rangeMake(idx + 63, idx + 63)))) + { + qword[NV_BITVECTOR_IDX(idx)] = ~qword[NV_BITVECTOR_IDX(idx)]; + idx += 63; + continue; + } + + status = bitVectorInv_IMPL(pBitVector, bitVectorLast, idx); + if (NV_OK != status) + { + return status; + } + } + + return status; +} + +/** + * @brief Initializes a NV_BITVECTOR with the bit indices contained within + * pIndices set. + */ +NV_STATUS +bitVectorFromArrayU16_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 *pIndices, + NvU32 indicesSize +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pIndices, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(0 != indicesSize, NV_ERR_INVALID_ARGUMENT); + + status = bitVectorClrAll_IMPL(pBitVector, bitVectorLast); + if (NV_OK != status) + { + return status; + } + + for (i = 0; i < indicesSize; ++i) + { + status = bitVectorSet_IMPL(pBitVector, bitVectorLast, pIndices[i]); + if (NV_OK != status) + { + return status; + } + } + + return status; +} + +/** + * @brief Checks if all flags in pBitVector are set + */ +NvBool +bitVectorTestAllSet_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_FALSE); + + qword = (const NvU64 *)&pBitVector->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (mask != (qword[idx] & mask)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/** + * @brief Checks if all flags in pBitVector are cleared + */ +NvBool +bitVectorTestAllCleared_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_FALSE); + + qword = (const NvU64 *)&pBitVector->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (0x0 != (qword[idx] & mask)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/** + * @brief Checks if two bitVectors are equivalent + */ +NvBool +bitVectorTestEqual_IMPL +( + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorALast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorALast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((bitVectorALast == bitVectorBLast), NV_ERR_INVALID_ARGUMENT); + + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if ((qwordA[idx] & mask) != (qwordB[idx] & mask)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/** + * @brief Checks if the set of set flags in bitVectorA is a subset of the set of + * set flags in bitVectorB. + */ +NvBool +bitVectorTestIsSubset_IMPL +( + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorALast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorALast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((bitVectorALast == bitVectorBLast), NV_ERR_INVALID_ARGUMENT); + + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (((qwordA[idx] & mask) & (qwordB[idx] & mask)) != (qwordA[idx] & mask)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/** + * @brief Checks if the flag according to bit index idx in pBitVector is set + */ +NvBool +bitVectorTest_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +) +{ + const NvU64 *qword; + NvU16 qwordIdx = NV_BITVECTOR_IDX(idx); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(idx); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_FALSE); + NV_ASSERT_OR_RETURN(idx < bitVectorLast, NV_FALSE); + + qword = (const NvU64 *)&pBitVector->qword; + return !!(qword[qwordIdx] & NVBIT64(qwordOffset)); +} + +/** + * @brief Computes the intersection of flags in pBitVectorA and pBitVectorB, and + * stores the result in pBitVectorDst + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorA First operand + * @param[in] pBitVectorB Second operand + * + * @note it is valid for the same bitVector to be both destination and operand + * for this operation + */ +NV_STATUS +bitVectorAnd_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + NvU64 *qwordDst; + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorDstLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorDstLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((bitVectorDstLast == bitVectorALast) && (bitVectorALast == + bitVectorBLast)), NV_ERR_INVALID_ARGUMENT); + + qwordDst = (NvU64 *)&pBitVectorDst->qword; + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + qwordDst[idx] = (qwordA[idx] & qwordB[idx]) & mask; + } + + return NV_OK; +} + +/** + * @brief Computes the union of flags in pBitVectorA and pBitVectorB, and stores + * the result in pBitVectorDst + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorA First operand + * @param[in] pBitVectorB Second operand + * + * @note it is valid for the same bitVector to be both destination and operand + * for this operation + */ +NV_STATUS +bitVectorOr_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + NvU64 *qwordDst; + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorDstLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorDstLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((bitVectorDstLast == bitVectorALast) && (bitVectorALast == + bitVectorBLast)), NV_ERR_INVALID_ARGUMENT); + + qwordDst = (NvU64 *)&pBitVectorDst->qword; + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + qwordDst[idx] = (qwordA[idx] | qwordB[idx]) & mask; + } + + return NV_OK; +} + +/** + * @brief Computes the exclusive OR of flags in pBitVectorA and pBitVectorB, and stores + * the result in pBitVectorDst + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorA First operand + * @param[in] pBitVectorB Second operand + * + * @note it is valid for the same bitVector to be both destination and operand + * for this operation + */ +NV_STATUS +bitVectorXor_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + NvU64 *qwordDst; + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorDstLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorDstLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((bitVectorDstLast == bitVectorALast) && (bitVectorALast == + bitVectorBLast)), NV_ERR_INVALID_ARGUMENT); + + qwordDst = (NvU64 *)&pBitVectorDst->qword; + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + qwordDst[idx] = (qwordA[idx] ^ qwordB[idx]) & mask; + } + + return NV_OK; +} + +/** + * @brief Causes the set of raised flags in pBitVectorDst to be equal to the + * complement of the set of raised flags in pBitVectorSrc. + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorSrc Source + * + * @note it is valid for the same bitVector to be both destination and + * source for this operation + */ +NV_STATUS +bitVectorComplement_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast +) +{ + NvU64 *qwordDst; + const NvU64 *qwordSrc; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorDstLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorDstLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorSrc, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((bitVectorDstLast == bitVectorSrcLast)), NV_ERR_INVALID_ARGUMENT); + + qwordDst = (NvU64 *)&pBitVectorDst->qword; + qwordSrc = (const NvU64 *)&pBitVectorSrc->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + qwordDst[idx] = (~qwordSrc[idx]) & mask; + } + + return NV_OK; +} + +/** + * @brief Causes the set of raised flags in pBitVectorDst to be equal to the set + * of raised flags in pBitVectorSrc. + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorSrc Source + * + * @note it is \b invalid for the same bitVector to be both destination and + * source for this operation + */ +NV_STATUS +bitVectorCopy_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast +) +{ + NvU32 byteSizeDst = NV_BITVECTOR_BYTE_SIZE(bitVectorDstLast); + NvU32 byteSizeSrc = NV_BITVECTOR_BYTE_SIZE(bitVectorSrcLast); + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorSrc, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(bitVectorDstLast == bitVectorSrcLast, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pBitVectorDst != pBitVectorSrc, NV_WARN_NOTHING_TO_DO); + + portMemCopy(&pBitVectorDst->qword, byteSizeDst, &pBitVectorSrc->qword, byteSizeSrc); + return NV_OK; +} + +/** + * @brief Returns the bit index of the first set flag in pBitVector. + * + * @note in the absence of set flags in pBitVector, the index of the first + * invalid flag is returned. + */ +NvU32 +bitVectorCountTrailingZeros_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, 0); + + qword = (const NvU64 *)&pBitVector->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (0x0 != (qword[idx] & mask)) + { + return ((idx * (sizeof(NvU64) * 8)) + + portUtilCountTrailingZeros64(qword[idx] & mask)); + } + } + + return bitVectorLast; +} + +/** + * @brief Returns the bit index of the last set flag in pBitVector. + * + * @note in the absence of set flags in pBitVector, the index of the first + * invalid flag is returned. + */ +NvU32 +bitVectorCountLeadingZeros_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU16 idx; + NvU16 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU16 qwordUnused = 63 - qwordOffset; + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, 0); + + qword = (const NvU64 *)&pBitVector->qword; + for (idx = (arraySize - 1); idx != ((NvU16)-1); idx--) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (0x0 != qword[idx]) + { + // + // We're counting from the MSB, and we have to subtract the unused + // portion of the bitvector from the output + // + return (((arraySize - idx - 1) * (sizeof(NvU64) * 8)) + + portUtilCountLeadingZeros64(qword[idx] & mask)) - + qwordUnused; + } + } + + return bitVectorLast; +} + +/** + * @brief Returns the number of set bits in the bitvector. + */ +NvU32 +bitVectorCountSetBits_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU16 idx; + NvU16 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU64 mask; + NvU32 count; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, 0); + + count = 0; + qword = (const NvU64 *)&pBitVector->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + count += nvPopCount64(qword[idx] & mask); + } + + return count; +} + +/** + * @brief Exports the bitVector data to an NvU64 raw bitmask array. + */ +NV_STATUS +bitVectorToRaw_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + void *pRawMask, + NvU32 rawMaskSize +) +{ + const NvU32 byteSize = NV_BITVECTOR_BYTE_SIZE(bitVectorLast); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pRawMask, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rawMaskSize >= byteSize, NV_ERR_BUFFER_TOO_SMALL); + + portMemCopy(pRawMask, byteSize, &pBitVector->qword, byteSize); + return NV_OK; +} + +/** + * @brief Imports the bitVector data from an Nvu64 raw bitmask array. + */ +NV_STATUS +bitVectorFromRaw_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + const void *pRawMask, + NvU32 rawMaskSize +) +{ + const NvU32 byteSize = NV_BITVECTOR_BYTE_SIZE(bitVectorLast); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pRawMask, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rawMaskSize >= byteSize, NV_ERR_BUFFER_TOO_SMALL); + + portMemCopy(&pBitVector->qword, byteSize, pRawMask, byteSize); + return NV_OK; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvoc/src/runtime.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvoc/src/runtime.c new file mode 100644 index 0000000..8e47144 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvoc/src/runtime.c @@ -0,0 +1,311 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvport/nvport.h" + +#include "nvtypes.h" + +#include "nvoc/rtti.h" +#include "nvoc/runtime.h" + +#include "nvoc/object.h" + +# include "utils/nvassert.h" + + +static NV_FORCEINLINE Dynamic *__nvoc_fullyDerive_IMPL(Dynamic *pDynamic) +{ + return (Dynamic*)((NvU8*)pDynamic - pDynamic->__nvoc_rtti->offset); +} + +Dynamic *fullyDeriveWrapper(Dynamic *pDynamic) +{ + return __nvoc_fullyDerive_IMPL(pDynamic); +} + +const struct NVOC_RTTI_PROVIDER __nvoc_rtti_provider = { 0 }; + +NVOC_CLASS_ID __nvoc_objGetClassId(Dynamic *pObj) +{ + Dynamic *pDerivedObj = __nvoc_fullyDerive(pObj); + return pDerivedObj->__nvoc_rtti->pClassDef->classInfo.classId; +} + +const NVOC_CLASS_INFO *__nvoc_objGetClassInfo(Dynamic *pObj) +{ + Dynamic *pDerivedObj = __nvoc_fullyDerive(pObj); + return &pDerivedObj->__nvoc_rtti->pClassDef->classInfo; +} + +Dynamic *objFindAncestor_IMPL(Dynamic *pDynamic, NVOC_CLASS_ID classId) +{ + Object *pObj = dynamicCast(pDynamic, Object); + NV_ASSERT(pObj != NULL); + + while ((pObj = pObj->pParent) != NULL) + { + if (objDynamicCastById(pObj, classId) != NULL) return __nvoc_fullyDerive(pObj); + } + + NV_ASSERT(0); + return NULL; +} + +void objAddChild_IMPL(Object *pObj, Object *pChild) +{ + NV_ASSERT(pChild->pParent == NULL); + pChild->pParent = pObj; + pChild->childTree.pSibling = pObj->childTree.pChild; + pObj->childTree.pChild = pChild; +} + +void objRemoveChild_IMPL(Object *pObj, Object *pChild) +{ + Object **ppChild; + + NV_ASSERT(pObj == pChild->pParent); + pChild->pParent = NULL; + ppChild = &pObj->childTree.pChild; + while (*ppChild != NULL) + { + if (*ppChild == pChild) + { + *ppChild = pChild->childTree.pSibling; + return; + } + + ppChild = &(*ppChild)->childTree.pSibling; + } +} + +Object *objGetChild_IMPL(Object *pObj) +{ + NV_ASSERT(pObj != NULL); + return pObj->childTree.pChild; +} + +Object *objGetSibling_IMPL(Object *pObj) +{ + NV_ASSERT(pObj != NULL); + return pObj->childTree.pSibling; +} + +Object *objGetDirectParent_IMPL(Object *pObj) +{ + NV_ASSERT(pObj != NULL); + return pObj->pParent; +} + +//! Internal backing method for objDelete. +void __nvoc_objDelete(Dynamic *pDynamic) +{ + Dynamic *pDerivedObj; + Object *pObj, *pChild; + + if (pDynamic == NULL) + { + return; + } + + pDynamic->__nvoc_rtti->dtor(pDynamic); + + pObj = dynamicCast(pDynamic, Object); + if (pObj->pParent != NULL) + { + objRemoveChild(pObj->pParent, pObj); + } + + if ((pChild = objGetChild(pObj)) != NULL) + { +#if NV_PRINTF_STRINGS_ALLOWED + portDbgPrintf("NVOC: %s: Child class %s not freed from parent class %s.", + __FUNCTION__, + objGetClassInfo(pChild)->name, + objGetClassInfo(pObj)->name); +#endif + PORT_BREAKPOINT_CHECKED(); + } + + pDerivedObj = __nvoc_fullyDerive(pDynamic); + portMemFree(pDerivedObj); +} + +//! Internal method to fill out an object's RTTI pointers from a class definition. +void __nvoc_initRtti(Dynamic *pNewObject, const struct NVOC_CLASS_DEF *pClassDef) +{ + NvU32 relativeIdx; + for (relativeIdx = 0; relativeIdx < pClassDef->pCastInfo->numRelatives; relativeIdx++) + { + const struct NVOC_RTTI *pRelative = pClassDef->pCastInfo->relatives[relativeIdx]; + const struct NVOC_RTTI **ppRelativeRtti = &((Dynamic*)((NvU8*)pNewObject + pRelative->offset))->__nvoc_rtti; + *ppRelativeRtti = pRelative; + } +} + +//! Internal backing method for objCreateDynamic. +NV_STATUS __nvoc_objCreateDynamic( + Dynamic **ppNewObject, + Dynamic *pParent, + const NVOC_CLASS_INFO *pClassInfo, + NvU32 createFlags, + ...) +{ + NV_STATUS status; + va_list args; + + const struct NVOC_CLASS_DEF *pClassDef = + (const struct NVOC_CLASS_DEF*)pClassInfo; + + if (pClassDef == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + else if (pClassDef->objCreatefn == NULL) + { + return NV_ERR_INVALID_CLASS; + } + + va_start(args, createFlags); + status = pClassDef->objCreatefn(ppNewObject, pParent, createFlags, args); + va_end(args); + + return status; +} + +Dynamic *objDynamicCastById_IMPL(Dynamic *pFromObj, NVOC_CLASS_ID classId) +{ + NvU32 i, numBases; + Dynamic *pDerivedObj; + + const struct NVOC_RTTI *const *bases; + const struct NVOC_RTTI *pFromRtti; + const struct NVOC_RTTI *pDerivedRtti; + + if (pFromObj == NULL) + { + return NULL; + } + + pFromRtti = pFromObj->__nvoc_rtti; + + // fastpath, we're dynamic casting to what we already have + if (classId == pFromRtti->pClassDef->classInfo.classId) + { + return pFromObj; + } + + pDerivedObj = __nvoc_fullyDerive(pFromObj); + pDerivedRtti = pDerivedObj->__nvoc_rtti; + + // fastpath, we're dynamic casting to the fully derived class + if (classId == pDerivedRtti->pClassDef->classInfo.classId) + { + return pDerivedObj; + } + + // slowpath, search all the possibilities for a match + numBases = pDerivedRtti->pClassDef->pCastInfo->numRelatives; + bases = pDerivedRtti->pClassDef->pCastInfo->relatives; + + for (i = 0; i < numBases; i++) + { + if (classId == bases[i]->pClassDef->classInfo.classId) + { + return (Dynamic*)((NvU8*)pDerivedObj + bases[i]->offset); + } + } + + return NULL; +} + +//! Internal backing method for dynamicCast. +Dynamic *__nvoc_dynamicCast(Dynamic *pFromObj, const NVOC_CLASS_INFO *pClassInfo) +{ + return objDynamicCastById(pFromObj, pClassInfo->classId); +} + +/*! + * @brief Internal dummy destructor for non-fully-derived pointers. + * + * Resolves pDynamic to its most derived pointer and then calls the real + * destructor on the fully-derived object. + */ +void __nvoc_destructFromBase(Dynamic *pDynamic) +{ + Dynamic *pDerivedObj = __nvoc_fullyDerive(pDynamic); + pDerivedObj->__nvoc_rtti->dtor(pDerivedObj); +} + +const struct NVOC_EXPORTED_METHOD_DEF* nvocGetExportedMethodDefFromMethodInfo_IMPL(const struct NVOC_EXPORT_INFO *pExportInfo, NvU32 methodId) +{ + NvU32 exportLength; + const struct NVOC_EXPORTED_METHOD_DEF *exportArray; + + if (pExportInfo == NULL) + return NULL; + + exportLength = pExportInfo->numEntries; + exportArray = pExportInfo->pExportEntries; + + if (exportArray != NULL && exportLength > 0) + { + // The export array is sorted by methodId, so we can binary search it + NvU32 low = 0; + NvU32 high = exportLength; + while (1) + { + NvU32 mid = (low + high) / 2; + + if (exportArray[mid].methodId == methodId) + return &exportArray[mid]; + + if (high == mid || low == mid) + break; + + if (exportArray[mid].methodId > methodId) + high = mid; + else + low = mid; + } + } + + return NULL; +} + +const struct NVOC_EXPORTED_METHOD_DEF *objGetExportedMethodDef_IMPL(Dynamic *pObj, NvU32 methodId) +{ + const struct NVOC_CASTINFO *const pCastInfo = pObj->__nvoc_rtti->pClassDef->pCastInfo; + const NvU32 numRelatives = pCastInfo->numRelatives; + const struct NVOC_RTTI *const *relatives = pCastInfo->relatives; + NvU32 i; + + for (i = 0; i < numRelatives; i++) + { + const void *pDef = nvocGetExportedMethodDefFromMethodInfo_IMPL(relatives[i]->pClassDef->pExportInfo, methodId); + if (pDef != NULL) + return pDef; + } + + return NULL; +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/core/core.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/core/core.c new file mode 100644 index 0000000..3ecce65 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/core/core.c @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "nvport/nvport.h" + +typedef struct _PORT_STATE +{ + NvU32 initCount; +} PORT_STATE; +static PORT_STATE portState; + + +#if PORT_IS_MODULE_SUPPORTED(atomic) +#define PORT_DEC(x) portAtomicDecrementS32((volatile NvS32 *)&x) +#define PORT_INC(x) portAtomicIncrementS32((volatile NvS32 *)&x) +#else +#define PORT_DEC(x) --x +#define PORT_INC(x) ++x +#endif + + +/// @todo Add better way to initialize all modules +NV_STATUS portInitialize() +{ + if (PORT_INC(portState.initCount) == 1) + { +#if PORT_IS_MODULE_SUPPORTED(debug) + portDbgInitialize(); +#endif +#if PORT_IS_MODULE_SUPPORTED(sync) + portSyncInitialize(); +#endif +#if PORT_IS_MODULE_SUPPORTED(memory) + portMemInitialize(); +#endif +#if PORT_IS_MODULE_SUPPORTED(crypto) + portCryptoInitialize(); +#endif +#if PORT_IS_MODULE_SUPPORTED(cpu) + portCpuInitialize(); +#endif + } + return NV_OK; +} + +void portShutdown() +{ + if (PORT_DEC(portState.initCount) == 0) + { +#if PORT_IS_MODULE_SUPPORTED(cpu) + portCpuShutdown(); +#endif +#if PORT_IS_MODULE_SUPPORTED(crypto) + portCryptoShutdown(); +#endif +#if PORT_IS_MODULE_SUPPORTED(memory) +#if (!defined(DEBUG) || defined(NV_MODS)) && !NVCPU_IS_RISCV64 + portMemShutdown(NV_TRUE); +#else + portMemShutdown(NV_FALSE); +#endif +#endif +#if PORT_IS_MODULE_SUPPORTED(sync) + portSyncShutdown(); +#endif +#if PORT_IS_MODULE_SUPPORTED(debug) + portDbgShutdown(); +#endif + } +} + +NvBool portIsInitialized() +{ + return portState.initCount > 0; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.c new file mode 100644 index 0000000..11c4ab9 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.c @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CPU module function implementations which are shared across platforms + * + */ + +#include "nvport/nvport.h" +#include "cpu_common.h" + +void +portCpuInitialize(void) +{ + PORT_CPU_SET_IMC_BAR_DESC_INIT_STATE(NV_FALSE); +} + +void +portCpuShutdown(void) +{ + // + // Not returning status to the caller since that seems like a norm in nvort + // for init and shutdown functions + // + if (PORT_CPU_GET_IMC_BAR_DESC_INIT_STATE() == NV_TRUE) + { + // + // If PORT_CPU_GET_IMC_BAR_DESC_INIT_STATE is true then + // portCpuExFreeImcBarDesc will be supported. Adding following check + // to avoid compile time issues + // + #if PORT_IS_FUNC_SUPPORTED(portCpuExFreeImcBarDesc) + if (portCpuExFreeImcBarDesc(PORT_CPU_GET_IMC_BAR_DESC()) != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + } + #endif + } + PORT_CPU_SET_IMC_BAR_DESC_INIT_STATE(NV_FALSE); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.h new file mode 100644 index 0000000..a9c7ee3 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CPU module private defines/interfaces + */ + +#ifndef _NVPORT_CPU_COMMON_H_ +#define _NVPORT_CPU_COMMON_H_ + +#include "nvport/nvport.h" + +// +// Structure representing internal state for CPU +// +typedef struct PORT_CPU_STATE +{ + // BAR descriptor for Integrated Memory controller + PORT_CPU_BAR_DESC imcBarDesc; + + // If init for IMC BAR descriptor is done + NvBool bImcBarDescInit; +} PORT_CPU_STATE; + +PORT_CPU_STATE gCpuPortState; + +#define PORT_CPU_GET_IMC_BAR_DESC() (&(gCpuPortState.imcBarDesc)) + +#define PORT_CPU_GET_IMC_BAR_DESC_INIT_STATE() (gCpuPortState.bImcBarDescInit) + +#define PORT_CPU_SET_IMC_BAR_DESC_INIT_STATE(state) (gCpuPortState.bImcBarDescInit = state) +#endif // _NVPORT_CPU_COMMON_H_ +/// @} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c new file mode 100644 index 0000000..d8be920 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c @@ -0,0 +1,190 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CRYPTO module PRNG implementation using the xorshift algorithm. + * + * For details about the Xorshift algorithms, see: + * https://en.wikipedia.org/wiki/Xorshift + * + * @note Xorshift algorithms take either 128bit or 1024bit seeds. The algorithm + * author suggests seeding a splitmix64.c with a 64bit value, and using its + * output to seed xorshift. + * See http://xorshift.di.unimi.it/ for details. + * + * @warning Xorshift algorithms are NOT CRYPTOGRAPHICALLY SECURE. They generally + * perform really well on various randomness tests, but are not suitable for + * security sensitive operations such as key generation. If you require a CSRNG + * use @ref portCryptoExTrueRandomGetU32 and family. + */ +#include "nvport/nvport.h" + + +/** + * @brief Number of 64bit words used to store the state of the algorithm. + * xorshift128+ uses 2 qwords of state, and xorshift1024* uses 16 qwords + */ +#define XORSHIFT_STATE_QWORDS 2 + +struct PORT_CRYPTO_PRNG +{ + NvU64 state[XORSHIFT_STATE_QWORDS]; +}; +PORT_CRYPTO_PRNG *portCryptoDefaultGenerator; + +void portCryptoInitialize() +{ + NvU64 seed; +#if defined(PORT_CRYPTO_PRNG_SEED) + seed = PORT_CRYPTO_PRNG_SEED; +#elif PORT_IS_FUNC_SUPPORTED(portCryptoExTrueRandomGetU64) + seed = portCryptoExTrueRandomGetU64(); +#elif PORT_IS_MODULE_SUPPORTED(time) + seed = portTimeGetUptimeNanosecondsHighPrecision(); +#elif defined(NVRM) && !defined(NVWATCH) + { + extern NvU64 osGetTimestamp(void); + seed = osGetTimestamp(); + } +#else + seed = (NvUPtr)&portCryptoDefaultGenerator; +#endif + portCryptoPseudoRandomSetSeed(seed); +} + +void portCryptoShutdown() +{ + portCryptoPseudoRandomGeneratorDestroy(portCryptoDefaultGenerator); + portCryptoDefaultGenerator = NULL; +} + + +/** + * @brief Initializes a xorshift state from a 64bit seed. Performed using a + * splitmix64 PRNG. + * + * Adapted from: http://xorshift.di.unimi.it/splitmix64.c + */ +static void _initState(NvU64 seed64, NvU64 state[XORSHIFT_STATE_QWORDS]) +{ + NvU32 i; + for (i = 0; i < XORSHIFT_STATE_QWORDS; i++) + { + NvU64 z = (seed64 += 0x9E3779B97F4A7C15ULL); + z = (z ^ (z >> 30)) * 0xBF58476D1CE4E5B9ULL; + z = (z ^ (z >> 27)) * 0x94D049BB133111EBULL; + state[i] = z ^ (z >> 31); + } +} + +/** + * @brief Get the next 64bit value using the xorshift128+ algorithm + * + * Adapted from: http://xorshift.di.unimi.it/xorshift128plus.c + */ +static NvU64 _xorshift128plus_GetU64(NvU64 state[2]) +{ + NvU64 s1 = state[0]; + const NvU64 s0 = state[1]; + state[0] = s0; + s1 ^= s1 << 23; // a + state[1] = s1 ^ s0 ^ (s1 >> 18) ^ (s0 >> 5); // b, c + return state[1] + s0; +} + +PORT_CRYPTO_PRNG *portCryptoPseudoRandomGeneratorCreate(NvU64 seed) +{ + PORT_CRYPTO_PRNG *pPrng = portMemAllocNonPaged(sizeof(*pPrng)); + + if (pPrng != NULL) + { + _initState(seed, pPrng->state); + } + return pPrng; +} + +void portCryptoPseudoRandomGeneratorDestroy(PORT_CRYPTO_PRNG *pPrng) +{ + portMemFree(pPrng); +} + +NvU32 portCryptoPseudoRandomGeneratorGetU32(PORT_CRYPTO_PRNG *pPrng) +{ + + return (NvU32) _xorshift128plus_GetU64(pPrng->state); +} +NvU64 portCryptoPseudoRandomGeneratorGetU64(PORT_CRYPTO_PRNG *pPrng) +{ + return _xorshift128plus_GetU64(pPrng->state); +} + +NV_STATUS portCryptoPseudoRandomGeneratorFillBuffer(PORT_CRYPTO_PRNG *pPrng, NvU8 *pBuffer, NvLength bufSize) +{ + NvLength i; + + PORT_ASSERT_CHECKED(pPrng != NULL); + + /** @note Unlike True Random generators which don't have seeds, here we must + * preserve the complete order of bytes across platforms. That means that + * we cannot fill the misaligned section first, then copy aligned qwords, + * and then fill the remainder - That way we lose some bytes + */ + + // Maybe require 64bit alignment for buffers: + // PORT_ASSERT_CHECKED(portUtilCheckAlignment(pBuffer, sizeof(NvU64))); + + if (pBuffer == NULL) + return NV_ERR_INVALID_POINTER; + + for (i = 0; i < bufSize; i+=8) + { + NvU64 x = _xorshift128plus_GetU64(pPrng->state); + portMemCopy(pBuffer+i, bufSize-i, &x, (bufSize-i < 8) ? bufSize-i : 8); + } + + return NV_OK; +} + + +void portCryptoPseudoRandomSetSeed(NvU64 seed) +{ + if (portCryptoDefaultGenerator) + portCryptoPseudoRandomGeneratorDestroy(portCryptoDefaultGenerator); + portCryptoDefaultGenerator = portCryptoPseudoRandomGeneratorCreate(seed); +} + +NvU32 portCryptoPseudoRandomGetU32() +{ + return portCryptoPseudoRandomGeneratorGetU32(portCryptoDefaultGenerator); +} + +NvU64 portCryptoPseudoRandomGetU64() +{ + return portCryptoPseudoRandomGeneratorGetU64(portCryptoDefaultGenerator); +} + +NV_STATUS portCryptoPseudoRandomFillBuffer(NvU8 *pBuffer, NvLength bufSize) +{ + return portCryptoPseudoRandomGeneratorFillBuffer(portCryptoDefaultGenerator, pBuffer, bufSize); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_generic.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_generic.h new file mode 100644 index 0000000..037eee6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_generic.h @@ -0,0 +1,222 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief MEMORY module tracking functions implementation + * + */ + +#include "nvport/nvport.h" + +// Define accessor macros if not already defined +#ifndef PORT_MEM_RD08 +#define PORT_MEM_RD08(p) (*(p)) +#endif +#ifndef PORT_MEM_RD16 +#define PORT_MEM_RD16(p) (*(p)) +#endif +#ifndef PORT_MEM_RD32 +#define PORT_MEM_RD32(p) (*(p)) +#endif +#ifndef PORT_MEM_RD64 +#define PORT_MEM_RD64(p) (*(p)) +#endif +#ifndef PORT_MEM_WR08 +#define PORT_MEM_WR08(p, v) (*(p) = (v)) +#endif +#ifndef PORT_MEM_WR16 +#define PORT_MEM_WR16(p, v) (*(p) = (v)) +#endif +#ifndef PORT_MEM_WR32 +#define PORT_MEM_WR32(p, v) (*(p) = (v)) +#endif +#ifndef PORT_MEM_WR64 +#define PORT_MEM_WR64(p, v) (*(p) = (v)) +#endif + + +#if defined(PORT_MEM_USE_GENERIC_portMemSetPattern) +void * +portMemSetPattern +( + void *pData, + NvLength lengthBytes, + const NvU8 *pPattern, + NvLength patternBytes +) +{ + PORT_ASSERT_CHECKED(pData != NULL); + PORT_ASSERT_CHECKED(pPattern != NULL); + PORT_ASSERT_CHECKED(patternBytes > 0); + + if (lengthBytes > 0) + { + void *p = pData; + while (lengthBytes > patternBytes) + { + portMemCopy(p, patternBytes, pPattern, patternBytes); + p = (NvU8*)p + patternBytes; + lengthBytes -= patternBytes; + } + portMemCopy(p, lengthBytes, pPattern, lengthBytes); + } + return pData; +} +#endif + +#if defined(PORT_MEM_USE_GENERIC_portMemMove) +void * +portMemMove +( + void *pDestination, + NvLength destSize, + const void *pSource, + NvLength srcSize +) +{ + NvU32 *pDst32; + NvU8 *pDst8; + const NvU32 *pSrc32; + const NvU8 *pSrc8; + NvLength dwords = 0; + NvLength bytes = srcSize; + PORT_ASSERT_CHECKED(pDestination != NULL); + PORT_ASSERT_CHECKED(pSource != NULL); + PORT_ASSERT_CHECKED(srcSize <= destSize); + + if (pDestination == NULL || pSource == NULL || srcSize > destSize) + { + return NULL; + } + + if (pDestination == pSource) + { + return pDestination; + } + + if ((((NvUPtr)pSource & 3) == 0) && (((NvUPtr)pDestination & 3) == 0)) + { + dwords = srcSize / sizeof(NvU32); + bytes = srcSize % sizeof(NvU32); + } + + if (pDestination > pSource) + { + pDst8 = (NvU8*)pDestination + srcSize; + pSrc8 = (const NvU8*)pSource + srcSize; + + while (bytes--) + { + PORT_MEM_WR08(--pDst8, PORT_MEM_RD08(--pSrc8)); + } + pDst32 = (NvU32*)pDst8; + pSrc32 = (const NvU32*)pSrc8; + while (dwords--) + { + PORT_MEM_WR32(--pDst32, PORT_MEM_RD32(--pSrc32)); + } + } + else + { + pDst32 = (NvU32*)pDestination; + pSrc32 = (const NvU32*)pSource; + + while (dwords--) + { + PORT_MEM_WR32(pDst32++, PORT_MEM_RD32(pSrc32++)); + } + pDst8 = (NvU8*)pDst32; + pSrc8 = (const NvU8*)pSrc32; + while (bytes--) + { + PORT_MEM_WR08(pDst8++, PORT_MEM_RD08(pSrc8++)); + } + } + return pDestination; +} +#endif + +#if defined(PORT_MEM_USE_GENERIC_portMemCopy) +void * +portMemCopy +( + void *pDestination, + NvLength destSize, + const void *pSource, + NvLength srcSize +) +{ + // API guarantees this is a NOP when destSize==0 + if (destSize == 0) + return pDestination; + + PORT_ASSERT_CHECKED(!portUtilCheckOverlap((const NvU8*)pDestination, destSize, + (const NvU8*)pSource, srcSize)); + return portMemMove(pDestination, destSize, pSource, srcSize); +} +#endif + + +#if defined(PORT_MEM_USE_GENERIC_portMemCmp) +NvS32 +portMemCmp +( + const void *pData0, + const void *pData1, + NvLength lengthBytes +) +{ + const NvU8 *p0 = (const NvU8*)pData0; + const NvU8 *p1 = (const NvU8*)pData1; + PORT_ASSERT_CHECKED(pData0 != NULL); + PORT_ASSERT_CHECKED(pData1 != NULL); + PORT_ASSERT_CHECKED(lengthBytes > 0); + while (lengthBytes--) + { + NvU8 u0 = PORT_MEM_RD08(p0++); + NvU8 u1 = PORT_MEM_RD08(p1++); + if (u0 != u1) + return u0 - u1; + } + return 0; +} +#endif + +#if defined(PORT_MEM_USE_GENERIC_portMemSet) +void * +portMemSet +( + void *pData, + NvU8 value, + NvLength lengthBytes +) +{ + NvLength i; + for (i = 0; i < lengthBytes; i++) + { + PORT_MEM_WR08(((NvU8 *)pData)+i, value); + } + return pData; +} +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_tracking.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_tracking.c new file mode 100644 index 0000000..fd56947 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_tracking.c @@ -0,0 +1,1340 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief MEMORY module tracking functions implementation + * + */ + +#include "nvport/nvport.h" +#include + +#if !PORT_IS_MODULE_SUPPORTED(debug) +#error "DEBUG module must be present for memory tracking" +#endif + +#if !PORT_IS_MODULE_SUPPORTED(atomic) +#error "ATOMIC module must be present for memory tracking" +#endif + + +struct PORT_MEM_ALLOCATOR_IMPL +{ + PORT_MEM_ALLOCATOR_TRACKING tracking; +}; + +// +// Debug print macros +// +#if PORT_MEM_TRACK_PRINT_LEVEL == PORT_MEM_TRACK_PRINT_LEVEL_SILENT +#define PORT_MEM_PRINT_ERROR(...) +#define PORT_MEM_PRINT_INFO(...) +#elif PORT_MEM_TRACK_PRINT_LEVEL == PORT_MEM_TRACK_PRINT_LEVEL_BASIC +#define PORT_MEM_PRINT_ERROR(...) portDbgPrintf(__VA_ARGS__) +#define PORT_MEM_PRINT_INFO(...) +#else +#define PORT_MEM_PRINT_ERROR(...) portDbgPrintf(__VA_ARGS__) +#define PORT_MEM_PRINT_INFO(...) portDbgPrintf(__VA_ARGS__) +#endif + +// Simple implementation of a spinlock that is going to be used where sync module is not included. +#if !PORT_IS_MODULE_SUPPORTED(sync) +typedef volatile NvU32 PORT_SPINLOCK; +static NvLength portSyncSpinlockSize = sizeof(PORT_SPINLOCK); +static NV_STATUS portSyncSpinlockInitialize(PORT_SPINLOCK *pSpinlock) +{ + *pSpinlock = 0; + return NV_OK; +} +static void portSyncSpinlockAcquire(PORT_SPINLOCK *pSpinlock) +{ + while (!portAtomicCompareAndSwapU32(pSpinlock, 1, 0)); +} +static void portSyncSpinlockRelease(PORT_SPINLOCK *pSpinlock) +{ + portAtomicSetU32(pSpinlock, 0); +} +static void portSyncSpinlockDestroy(PORT_SPINLOCK *pSpinlock) +{ + PORT_UNREFERENCED_VARIABLE(pSpinlock); +} +#endif + +#define PORT_MEM_LOCK_INIT(lock) \ + do { \ + lock = _portMemAllocNonPagedUntracked(portSyncSpinlockSize); \ + portSyncSpinlockInitialize(lock); \ + } while (0) +#define PORT_MEM_LOCK_DESTROY(lock) \ + do { \ + portSyncSpinlockDestroy(lock); \ + _portMemFreeUntracked(lock); \ + } while(0) +#define PORT_MEM_LOCK_ACQUIRE(lock) portSyncSpinlockAcquire(lock) +#define PORT_MEM_LOCK_RELEASE(lock) portSyncSpinlockRelease(lock) + + +// +// List link operation that operates on structures that have pNext and pPrev +// fields. Assumes the root always exists. +// +#define PORT_LOCKED_LIST_LINK(pRoot, pNode, lock) \ + do { \ + PORT_MEM_LOCK_ACQUIRE(lock); \ + (pNode)->pNext = (pRoot); \ + (pNode)->pPrev = (pRoot)->pPrev; \ + (pRoot)->pPrev = (pNode); \ + (pNode)->pPrev->pNext = (pNode); \ + PORT_MEM_LOCK_RELEASE(lock); \ + } while(0) + +#define PORT_LOCKED_LIST_UNLINK(pRoot, pNode, lock) \ + do { \ + PORT_MEM_LOCK_ACQUIRE(lock); \ + (pNode)->pNext->pPrev = (pNode)->pPrev; \ + (pNode)->pPrev->pNext = (pNode)->pNext; \ + PORT_MEM_LOCK_RELEASE(lock); \ + } while (0) + + + +// +// Memory counter implementation +// +#if PORT_MEM_TRACK_USE_COUNTER +static NV_INLINE void +_portMemCounterInit +( + PORT_MEM_COUNTER *pCounter +) +{ + portMemSet(pCounter, 0, sizeof(*pCounter)); +} +static NV_INLINE void +_portMemCounterInc +( + PORT_MEM_COUNTER *pCounter, + NvLength size +) +{ + NvU32 activeAllocs; + NvLength activeSize = 0; + + activeAllocs = portAtomicIncrementU32(&pCounter->activeAllocs); + portAtomicIncrementU32(&pCounter->totalAllocs); + if (PORT_MEM_TRACK_USE_FENCEPOSTS) + { + activeSize = portAtomicAddSize(&pCounter->activeSize, size); + } + portAtomicAddSize(&pCounter->totalSize, size); + + // Atomically compare the peak value with the active, and update if greater. + while (1) + { + NvU32 peakAllocs = pCounter->peakAllocs; + if (activeAllocs <= peakAllocs) + break; + portAtomicCompareAndSwapU32(&pCounter->peakAllocs, activeAllocs, peakAllocs); + } + while (1) + { + NvLength peakSize = pCounter->peakSize; + if (activeSize <= peakSize) + break; + portAtomicCompareAndSwapSize(&pCounter->peakSize, activeSize, peakSize); + } +} +static NV_INLINE void +_portMemCounterDec +( + PORT_MEM_COUNTER *pCounter, + void *pMem +) +{ + portAtomicDecrementU32(&pCounter->activeAllocs); + if (PORT_MEM_TRACK_USE_FENCEPOSTS) + { + portAtomicSubSize(&pCounter->activeSize, + ((PORT_MEM_FENCE_HEAD *)pMem-1)->blockSize); + } +} + +#define PORT_MEM_COUNTER_INIT(pCounter) _portMemCounterInit(pCounter) +#define PORT_MEM_COUNTER_INC(pCounter, size) _portMemCounterInc(pCounter, size) +#define PORT_MEM_COUNTER_DEC(pCounter, pMem) _portMemCounterDec(pCounter, pMem) +#else +#define PORT_MEM_COUNTER_INIT(x) +#define PORT_MEM_COUNTER_INC(x, y) +#define PORT_MEM_COUNTER_DEC(x, y) +#endif // COUNTER + + +// +// Memory fenceposts implementation +// +#if PORT_MEM_TRACK_USE_FENCEPOSTS +#define PORT_MEM_FENCE_HEAD_MAGIC 0x68656164 // 'head' +#define PORT_MEM_FENCE_TAIL_MAGIC 0x7461696c // 'tail' + +static NV_INLINE void +_portMemFenceInit +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem, + NvLength size +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_MEM_FOOTER *pTail = (PORT_MEM_FOOTER*)((NvU8*)pMem + size); + + pHead->fence.pAllocator = pAlloc; + pHead->fence.blockSize = size; + pHead->fence.magic = PORT_MEM_FENCE_HEAD_MAGIC; + pTail->fence.magic = PORT_MEM_FENCE_TAIL_MAGIC; +} + +static NV_INLINE void +_portMemFenceCheck +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_MEM_FOOTER *pTail = (PORT_MEM_FOOTER*) + ((NvU8*)pMem + pHead->fence.blockSize); + + if (pHead->fence.magic != PORT_MEM_FENCE_HEAD_MAGIC || + pTail->fence.magic != PORT_MEM_FENCE_TAIL_MAGIC) + { + PORT_MEM_PRINT_ERROR("Memory corruption detected on block %p\n", pMem); + PORT_ASSERT_CHECKED(pHead->fence.magic == PORT_MEM_FENCE_HEAD_MAGIC); + PORT_ASSERT_CHECKED(pTail->fence.magic == PORT_MEM_FENCE_TAIL_MAGIC); + } + if (pHead->fence.pAllocator != pAlloc) + { + PORT_MEM_PRINT_ERROR("Freeing block %p using a wrong allocator (%p instead of %p)\n", + pMem, pAlloc, pHead->fence.pAllocator); + PORT_ASSERT_CHECKED(pHead->fence.pAllocator == pAlloc); + + } +} + +#define PORT_MEM_FENCE_CHECK(pAlloc, pMem) _portMemFenceCheck(pAlloc, pMem) +#define PORT_MEM_FENCE_INIT(pAlloc, pMem, size) _portMemFenceInit(pAlloc, pMem, size) +#else +#define PORT_MEM_FENCE_INIT(x, y, z) +#define PORT_MEM_FENCE_CHECK(x, y) +#endif // FENCEPOSTS + + +// +// Memory allocation lists implementation +// +#if PORT_MEM_TRACK_USE_ALLOCLIST +static NV_INLINE void +_portMemListAdd +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + void *pMem +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_MEM_LIST *pList = &pHead->list; + pList->pNext = pList; + pList->pPrev = pList; + if (!portAtomicCompareAndSwapSize(&pTracking->pFirstAlloc, pList, NULL)) + { + PORT_LOCKED_LIST_LINK(pTracking->pFirstAlloc, pList, pTracking->listLock); + } +} +static NV_INLINE void +_portMemListRemove +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + void *pMem +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_MEM_LIST *pList = &pHead->list; + + if (!portAtomicCompareAndSwapSize(&pList->pNext, NULL, pList)) + { + PORT_LOCKED_LIST_UNLINK(pTracking->pFirstAlloc, pList, pTracking->listLock); + } + portAtomicCompareAndSwapSize(&pTracking->pFirstAlloc, pList->pNext, pList); +} + +static NV_INLINE PORT_MEM_HEADER * +_portMemListGetHeader +( + PORT_MEM_LIST *pList +) +{ + return (PORT_MEM_HEADER*)((NvU8*)pList - (NvUPtr)(&((PORT_MEM_HEADER*)NULL)->list)); +} +#define PORT_MEM_LIST_INIT(pTracking) \ + do { \ + (pTracking)->pFirstAlloc = NULL; \ + PORT_MEM_LOCK_INIT((pTracking)->listLock); \ + } while (0) +#define PORT_MEM_LIST_DESTROY(pTracking) PORT_MEM_LOCK_DESTROY((pTracking)->listLock) +#define PORT_MEM_LIST_ADD(pTracking, pMem) _portMemListAdd(pTracking, pMem) +#define PORT_MEM_LIST_REMOVE(Tracking, pMem) _portMemListRemove(pTracking, pMem) +#else +#define PORT_MEM_LIST_INIT(x) +#define PORT_MEM_LIST_DESTROY(x) +#define PORT_MEM_LIST_ADD(x, y) +#define PORT_MEM_LIST_REMOVE(x, y) +#endif // ALLOCLIST + + + +// +// Memory allocation-caller info implementation +// +#if PORT_MEM_TRACK_USE_CALLERINFO + +static NV_INLINE void +_portMemCallerInfoInitMem +( + void *pMem, + PORT_MEM_CALLERINFO callerInfo +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + portMemCopy(&pHead->callerInfo, sizeof(callerInfo), + &callerInfo, sizeof(callerInfo)); +} +static NV_INLINE void +_portMemCallerInfoInitTracking +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + PORT_MEM_CALLERINFO callerInfo +) +{ + portMemCopy(&pTracking->callerInfo, sizeof(callerInfo), + &callerInfo, sizeof(callerInfo)); +} + +#define PORT_MEM_CALLERINFO_INIT_TRACKING(pTracking) \ + _portMemCallerInfoInitTracking(pTracking, PORT_MEM_CALLERINFO_PARAM) +#define PORT_MEM_CALLERINFO_INIT_MEM(pMem) \ + _portMemCallerInfoInitMem(pMem, PORT_MEM_CALLERINFO_PARAM) + +#if PORT_MEM_TRACK_USE_CALLERINFO_IP +#if NVCPU_IS_RISCV64 +// +// Libos has custom %a format specifier that decodes an instruction pointer into +// a function / file / line reference when the binary output is decoded. +// +#define PORT_MEM_CALLERINFO_PRINT_ARGS(x) "@ %a\n", x +#else +#define PORT_MEM_CALLERINFO_PRINT_ARGS(x) "@ 0x%016x\n", x +#endif // NVCPU_IS_RISCV64 +#else +#define PORT_MEM_CALLERINFO_PRINT_ARGS(x) "@ %s:%u (%s)\n", x.file, x.line, x.func +#endif // PORT_MEM_TRACK_USE_CALLERINFO_IP + +#else // PORT_MEM_TRACK_USE_CALLERINFO +#define PORT_MEM_CALLERINFO_INIT_TRACKING(x) +#define PORT_MEM_CALLERINFO_INIT_MEM(x) +#define PORT_MEM_CALLERINFO_PRINT_ARGS(x) "\n" +#endif // PORT_MEM_TRACK_USE_CALLERINFO + + +#if PORT_MEM_TRACK_USE_LOGGING +#include "nvlog/nvlog.h" +/** @brief Single log entry. Uses 64bit values even on 32bit systems. */ +typedef struct PORT_MEM_LOG_ENTRY +{ + NvP64 address; + NvP64 allocator; + NvU64 size; // if size is 0, it is a free() call, not alloc() +} PORT_MEM_LOG_ENTRY; + +#define PORT_MEM_TRACK_LOG_TAG 0x70726d74 +#define PORT_MEM_LOG_ENTRIES 4096 + +static void +_portMemLogInit() +{ + NVLOG_BUFFER_HANDLE hBuffer; + nvlogAllocBuffer(PORT_MEM_LOG_ENTRIES * sizeof(PORT_MEM_LOG_ENTRY), + DRF_DEF(LOG, _BUFFER_FLAGS, _FORMAT, _MEMTRACK), + PORT_MEM_TRACK_LOG_TAG, &hBuffer); +} + +static void +_portMemLogDestroy() +{ + NVLOG_BUFFER_HANDLE hBuffer; + nvlogGetBufferHandleFromTag(PORT_MEM_TRACK_LOG_TAG, &hBuffer); + nvlogDeallocBuffer(hBuffer); +} + +static void +_portMemLogAdd +( + PORT_MEM_ALLOCATOR *pAllocator, + void *pMem, + NvLength lengthBytes +) +{ + NVLOG_BUFFER_HANDLE hBuffer; + PORT_MEM_LOG_ENTRY entry = {0}; + entry.address = NV_PTR_TO_NvP64(pMem); + entry.address = NV_PTR_TO_NvP64(pAllocator); + entry.size = lengthBytes; + nvlogGetBufferHandleFromTag(PORT_MEM_TRACK_LOG_TAG, &hBuffer); + nvlogWriteToBuffer(hBuffer, &entry, sizeof(entry)); +} + +#define PORT_MEM_LOG_INIT() _portMemLogInit() +#define PORT_MEM_LOG_DESTROY() _portMemLogDestroy() +#define PORT_MEM_LOG_ALLOC(pAlloc, pMem, size) \ + _portMemLogAdd(pAlloc, pMem, size) +#define PORT_MEM_LOG_FREE(pAlloc, pMem) \ + _portMemLogAdd(pAlloc, pMem, 0) +#else +#define PORT_MEM_LOG_INIT() +#define PORT_MEM_LOG_DESTROY() +#define PORT_MEM_LOG_ALLOC(x, y, z) +#define PORT_MEM_LOG_FREE(x, y) +#endif // LOGGING + + +//////////////////////////////////////////////////////////////////////////////// +// +// Main memory tracking implementation +// +//////////////////////////////////////////////////////////////////////////////// + +// +// All static functions declarations. Definitions at the end of file. +// +static void *_portMemAllocatorAllocPagedWrapper(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); +static void *_portMemAllocatorAllocNonPagedWrapper(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); +static void _portMemAllocatorFreeWrapper(PORT_MEM_ALLOCATOR *pAlloc, void *pMem); +static void _portMemAllocatorReleaseWrapper(PORT_MEM_ALLOCATOR *pAlloc); + +static PORT_MEM_ALLOCATOR *_portMemAllocatorCreateOnExistingBlock(void *pAlloc, NvLength blockSizeBytes, void *pSpinlock PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM); +static void *_portMemAllocatorAllocExistingWrapper(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); +static void _portMemAllocatorFreeExistingWrapper(PORT_MEM_ALLOCATOR *pAlloc, void *pMem); + +static void _portMemTrackingRelease(PORT_MEM_ALLOCATOR_TRACKING *pTracking); +static void _portMemTrackAlloc(PORT_MEM_ALLOCATOR_TRACKING *pTracking, void *pMem, NvLength size PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM); +static void _portMemTrackFree(PORT_MEM_ALLOCATOR_TRACKING *pTracking, void *pMem); + + + +#if PORT_MEM_TRACK_USE_CALLERINFO +#undef portMemAllocPaged +#undef portMemAllocNonPaged +#undef portMemAllocatorCreatePaged +#undef portMemAllocatorCreateNonPaged +#undef portMemInitializeAllocatorTracking +#undef _portMemAllocatorAlloc +#undef portMemAllocatorCreateOnExistingBlock +#undef portMemExAllocatorCreateLockedOnExistingBlock +// These functions have different names if CallerInfo is enabled. +#define portMemAllocPaged portMemAllocPaged_CallerInfo +#define portMemAllocNonPaged portMemAllocNonPaged_CallerInfo +#define portMemAllocatorCreatePaged portMemAllocatorCreatePaged_CallerInfo +#define portMemAllocatorCreateNonPaged portMemAllocatorCreateNonPaged_CallerInfo +#define portMemInitializeAllocatorTracking portMemInitializeAllocatorTracking_CallerInfo +#define _portMemAllocatorAlloc _portMemAllocatorAlloc_CallerInfo +#define portMemAllocatorCreateOnExistingBlock portMemAllocatorCreateOnExistingBlock_CallerInfo +#define portMemExAllocatorCreateLockedOnExistingBlock portMemExAllocatorCreateLockedOnExistingBlock_CallerInfo +#endif + +// +// All memory tracking globals are contained in this structure +// +static struct PORT_MEM_GLOBALS +{ + PORT_MEM_ALLOCATOR_TRACKING mainTracking; + void *trackingLock; + struct + { + PORT_MEM_ALLOCATOR paged; + PORT_MEM_ALLOCATOR nonPaged; + PORT_MEM_ALLOCATOR_IMPL pagedImpl; + PORT_MEM_ALLOCATOR_IMPL nonPagedImpl; + } alloc; + NvU32 initCount; + NvU32 totalAllocators; +} portMemGlobals; + +static NV_INLINE PORT_MEM_ALLOCATOR_TRACKING * +_portMemGetTracking +( + const PORT_MEM_ALLOCATOR *pAlloc +) +{ + if (pAlloc == NULL) + return &portMemGlobals.mainTracking; + else + return pAlloc->pTracking; +} + + +void +portMemInitialize(void) +{ +#if PORT_MEM_TRACK_USE_CALLERINFO + PORT_MEM_CALLERINFO_TYPE_PARAM = PORT_MEM_CALLERINFO_MAKE; +#endif + if (portAtomicIncrementU32(&portMemGlobals.initCount) != 1) + return; + + portMemGlobals.mainTracking.pAllocator = NULL; + portMemGlobals.mainTracking.pNext = &portMemGlobals.mainTracking; + portMemGlobals.mainTracking.pPrev = &portMemGlobals.mainTracking; + PORT_MEM_COUNTER_INIT(&portMemGlobals.mainTracking.counter); + PORT_MEM_LIST_INIT(&portMemGlobals.mainTracking); + PORT_MEM_LOCK_INIT(portMemGlobals.trackingLock); + + portMemGlobals.alloc.paged._portAlloc = _portMemAllocatorAllocPagedWrapper; + portMemGlobals.alloc.nonPaged._portAlloc = _portMemAllocatorAllocNonPagedWrapper; + portMemGlobals.alloc.paged._portFree = _portMemAllocatorFreeWrapper; + portMemGlobals.alloc.nonPaged._portFree = _portMemAllocatorFreeWrapper; + portMemGlobals.alloc.paged._portRelease = NULL; + portMemGlobals.alloc.nonPaged._portRelease = NULL; + + if (PORT_MEM_TRACK_USE_FENCEPOSTS) + { + portMemGlobals.alloc.paged.pImpl = &portMemGlobals.alloc.pagedImpl; + portMemGlobals.alloc.nonPaged.pImpl = &portMemGlobals.alloc.nonPagedImpl; + + portMemInitializeAllocatorTracking(&portMemGlobals.alloc.paged, + &portMemGlobals.alloc.paged.pImpl->tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + portMemInitializeAllocatorTracking(&portMemGlobals.alloc.nonPaged, + &portMemGlobals.alloc.nonPaged.pImpl->tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + } + else + { + // Use the same impl for both paged and nonpaged. + portMemGlobals.alloc.paged.pImpl = &portMemGlobals.alloc.pagedImpl; + portMemGlobals.alloc.nonPaged.pImpl = &portMemGlobals.alloc.pagedImpl; + portMemInitializeAllocatorTracking(NULL, + &portMemGlobals.alloc.pagedImpl.tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + portMemGlobals.alloc.paged.pTracking = &portMemGlobals.alloc.pagedImpl.tracking; + portMemGlobals.alloc.nonPaged.pTracking = &portMemGlobals.alloc.pagedImpl.tracking; + } + PORT_MEM_LOG_INIT(); +} +void +portMemShutdown(NvBool bForceSilent) +{ + PORT_UNREFERENCED_VARIABLE(bForceSilent); + if (portAtomicDecrementU32(&portMemGlobals.initCount) != 0) + return; + +#if (PORT_MEM_TRACK_PRINT_LEVEL > PORT_MEM_TRACK_PRINT_LEVEL_SILENT) + if (!bForceSilent) + { + portMemPrintTrackingInfo(NULL); + } +#endif + PORT_MEM_LOG_DESTROY(); + + if (PORT_MEM_TRACK_USE_FENCEPOSTS) + { + _portMemTrackingRelease(&portMemGlobals.alloc.nonPaged.pImpl->tracking); + _portMemTrackingRelease(&portMemGlobals.alloc.paged.pImpl->tracking); + } + else + { + _portMemTrackingRelease(&portMemGlobals.alloc.pagedImpl.tracking); + } + + PORT_MEM_LOCK_DESTROY(portMemGlobals.trackingLock); + PORT_MEM_LIST_DESTROY(&portMemGlobals.mainTracking); + portMemSet(&portMemGlobals, 0, sizeof(portMemGlobals)); +} + + +void * +portMemAllocPaged +( + NvLength length + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + PORT_MEM_ALLOCATOR *pAlloc = portMemAllocatorGetGlobalPaged(); + return _portMemAllocatorAlloc(pAlloc, length PORT_MEM_CALLERINFO_COMMA_PARAM); +} + +void * +portMemAllocNonPaged +( + NvLength length + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + PORT_MEM_ALLOCATOR *pAlloc = portMemAllocatorGetGlobalNonPaged(); + return _portMemAllocatorAlloc(pAlloc, length PORT_MEM_CALLERINFO_COMMA_PARAM); +} + +void +portMemFree +( + void *pMem +) +{ + if (pMem != NULL) + { +#if PORT_MEM_TRACK_USE_FENCEPOSTS + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_FREE(pHead->fence.pAllocator, pMem); +#else + // Paged/nonpaged are logged together if we don't have fenceposts. + PORT_FREE(portMemAllocatorGetGlobalPaged(), pMem); +#endif + } + +#if defined(__COVERITY__) + __coverity_free__(pMem); +#endif +} + +PORT_MEM_ALLOCATOR * +portMemAllocatorCreatePaged(PORT_MEM_CALLERINFO_TYPE_PARAM) +{ + PORT_MEM_ALLOCATOR *pAllocator; + + pAllocator = portMemAllocPaged(PORT_MEM_ALLOCATOR_SIZE + PORT_MEM_CALLERINFO_COMMA_PARAM); + if (pAllocator == NULL) + return NULL; + + pAllocator->pImpl = (PORT_MEM_ALLOCATOR_IMPL*)(pAllocator + 1); + pAllocator->_portAlloc = _portMemAllocatorAllocPagedWrapper; + pAllocator->_portFree = _portMemAllocatorFreeWrapper; + pAllocator->_portRelease = _portMemAllocatorReleaseWrapper; + portMemInitializeAllocatorTracking(pAllocator, &pAllocator->pImpl->tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + + PORT_MEM_PRINT_INFO("Acquired paged allocator %p ", pAllocator); + PORT_MEM_PRINT_INFO(PORT_MEM_CALLERINFO_PRINT_ARGS(PORT_MEM_CALLERINFO_PARAM)); + + return pAllocator; +} + +PORT_MEM_ALLOCATOR * +portMemAllocatorCreateNonPaged(PORT_MEM_CALLERINFO_TYPE_PARAM) +{ + PORT_MEM_ALLOCATOR *pAllocator; + + pAllocator = portMemAllocNonPaged(PORT_MEM_ALLOCATOR_SIZE + PORT_MEM_CALLERINFO_COMMA_PARAM); + if (pAllocator == NULL) + return NULL; + + pAllocator->pImpl = (PORT_MEM_ALLOCATOR_IMPL*)(pAllocator + 1); + pAllocator->_portAlloc = _portMemAllocatorAllocNonPagedWrapper; + pAllocator->_portFree = _portMemAllocatorFreeWrapper; + pAllocator->_portRelease = _portMemAllocatorReleaseWrapper; + portMemInitializeAllocatorTracking(pAllocator, &pAllocator->pImpl->tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + + PORT_MEM_PRINT_INFO("Acquired nonpaged allocator %p ", pAllocator); + PORT_MEM_PRINT_INFO(PORT_MEM_CALLERINFO_PRINT_ARGS(PORT_MEM_CALLERINFO_PARAM)); + return pAllocator; +} + + +PORT_MEM_ALLOCATOR * +portMemAllocatorCreateOnExistingBlock +( + void *pPreallocatedBlock, + NvLength blockSizeBytes + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + return _portMemAllocatorCreateOnExistingBlock(pPreallocatedBlock, blockSizeBytes, + NULL PORT_MEM_CALLERINFO_COMMA_PARAM); +} + +PORT_MEM_ALLOCATOR * +portMemExAllocatorCreateLockedOnExistingBlock +( + void *pPreallocatedBlock, + NvLength blockSizeBytes, + void *pSpinlock + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + return _portMemAllocatorCreateOnExistingBlock(pPreallocatedBlock, blockSizeBytes, + pSpinlock PORT_MEM_CALLERINFO_COMMA_PARAM); +} + +void +portMemAllocatorRelease +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + if (pAllocator == NULL) + { + PORT_BREAKPOINT_CHECKED(); + return; + } + _portMemTrackingRelease(pAllocator->pTracking); + PORT_MEM_PRINT_INFO("Released allocator %p\n", pAllocator); + + if (pAllocator->_portRelease != NULL) + pAllocator->_portRelease(pAllocator); +} + + +PORT_MEM_ALLOCATOR * +portMemAllocatorGetGlobalNonPaged(void) +{ + return &portMemGlobals.alloc.nonPaged; +} +PORT_MEM_ALLOCATOR * +portMemAllocatorGetGlobalPaged(void) +{ + return &portMemGlobals.alloc.paged; +} + +void +portMemInitializeAllocatorTracking +( + PORT_MEM_ALLOCATOR *pAlloc, + PORT_MEM_ALLOCATOR_TRACKING *pTracking + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + if (portMemGlobals.initCount == 0) + { + portMemSet(pTracking, 0, sizeof(*pTracking)); + if (pAlloc != NULL) + pAlloc->pTracking = NULL; + return; + } + + pTracking->pAllocator = pAlloc; + if (pAlloc != NULL) + pAlloc->pTracking = pTracking; + PORT_LOCKED_LIST_LINK(&portMemGlobals.mainTracking, pTracking, portMemGlobals.trackingLock); + PORT_MEM_COUNTER_INIT(&pTracking->counter); + PORT_MEM_LIST_INIT(pTracking); + PORT_MEM_CALLERINFO_INIT_TRACKING(pTracking); + portAtomicIncrementU32(&portMemGlobals.totalAllocators); +} + +void * +_portMemAllocatorAlloc +( + PORT_MEM_ALLOCATOR *pAlloc, + NvLength length + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + void *pMem = NULL; + if (pAlloc == NULL) + { + PORT_BREAKPOINT_CHECKED(); + return NULL; + } + if (length > 0) + { + NvLength paddedLength; +// RISCV64 requires 64-bit alignment of structures, and length indicates the alignment of the footer +#if defined(__riscv) + if (PORT_MEM_STAGING_SIZE > 0 && (length & 7)) + { + if (!portSafeAddLength(length & ~7, 8, &length)) + { + return NULL; + } + } +#endif + if (!portSafeAddLength(length, PORT_MEM_STAGING_SIZE, &paddedLength)) + { + return NULL; + } + pMem = pAlloc->_portAlloc(pAlloc, paddedLength); + } + if (pMem != NULL) + { + pMem = PORT_MEM_ADD_HEADER_PTR(pMem); + _portMemTrackAlloc(_portMemGetTracking(pAlloc), pMem, length + PORT_MEM_CALLERINFO_COMMA_PARAM); + } + return pMem; +} +void +_portMemAllocatorFree +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem +) +{ + if (pAlloc == NULL) + { + PORT_BREAKPOINT_CHECKED(); + return; + } + if (pMem != NULL) + { + _portMemTrackFree(_portMemGetTracking(pAlloc), pMem); + pMem = PORT_MEM_SUB_HEADER_PTR(pMem); + pAlloc->_portFree(pAlloc, pMem); + } +} + +void +portMemPrintTrackingInfo +( + const PORT_MEM_ALLOCATOR *pAllocator +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + + portDbgPrintf("[NvPort] *************************************************\n"); + + if (pAllocator == NULL) + { + portDbgPrintf("NvPort memory tracking information for all allocations:\n"); + } + + if (pTracking == NULL) + { + portDbgPrintf("Allocator %p initialized before portMemInitialize(); no tracking info.\n", pAllocator); + return; + } + + for (;;) + { + if (pTracking->pAllocator == NULL) + { + portDbgPrintf("NULL allocator for tracker %p:\n", pTracking); + goto next_tracking; + } + + portDbgPrintf("NvPort memory tracking information for allocator %p:\n", + pTracking->pAllocator); + +#if PORT_MEM_TRACK_USE_CALLERINFO + { + portDbgPrintf(" Allocator acquired " + PORT_MEM_CALLERINFO_PRINT_ARGS(pTracking->callerInfo)); + } +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetActiveStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS stats; + + portMemSet(&stats, 0, sizeof(stats)); + + portMemExTrackingGetActiveStats(pTracking->pAllocator, &stats); + + // + // rmtest_gsp test script (dvs_gsp_sanity.sh) depends on this print, so do not change + // format without updating script! + // + portDbgPrintf("ACTIVE: %u allocations, %llu bytes allocated (%llu useful, %llu meta)\n", + stats.numAllocations, + (NvU64) stats.allocatedSize, + (NvU64) stats.usefulSize, + (NvU64) stats.metaSize); + } +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetTotalStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS stats; + + portMemSet(&stats, 0, sizeof(stats)); + + portMemExTrackingGetTotalStats(pTracking->pAllocator, &stats); + portDbgPrintf("TOTAL: %u allocations, %llu bytes allocated (%llu useful, %llu meta)\n", + stats.numAllocations, + (NvU64) stats.allocatedSize, + (NvU64) stats.usefulSize, + (NvU64) stats.metaSize); + } +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetPeakStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS stats; + + portMemSet(&stats, 0, sizeof(stats)); + + portMemExTrackingGetPeakStats(pTracking->pAllocator, &stats); + portDbgPrintf("PEAK: %u allocations, %llu bytes allocated (%llu useful, %llu meta)\n", + stats.numAllocations, + (NvU64) stats.allocatedSize, + (NvU64) stats.usefulSize, + (NvU64) stats.metaSize); + } +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetNext) + { + PORT_MEM_TRACK_ALLOC_INFO info; + NvBool bPrinted = NV_FALSE; + void *iterator = NULL; + + do + { + if (portMemExTrackingGetNext(pTracking->pAllocator, &info, &iterator) != NV_OK) + { + portDbgPrintf("(no active allocations)\n"); + break; + } + else if (!bPrinted) + { + portDbgPrintf("Currently active allocations:\n"); + bPrinted = NV_TRUE; + } + portDbgPrintf(" - A:%p - 0x%p [%8llu bytes] T=%llu ", + info.pAllocator, + info.pMemory, + (NvU64)info.size, + info.timestamp); + portDbgPrintf(PORT_MEM_CALLERINFO_PRINT_ARGS(info.callerInfo)); + } while (iterator != NULL); + } +#endif + +next_tracking: + portDbgPrintf("[NvPort] *************************************************\n"); + + if ((pAllocator != NULL) || (pTracking->pNext == &portMemGlobals.mainTracking)) + break; + + pTracking = pTracking->pNext; + } +} + +#if portMemExTrackingGetActiveStats_SUPPORTED +NV_STATUS +portMemExTrackingGetActiveStats +( + const PORT_MEM_ALLOCATOR *pAllocator, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + if (pTracking == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + pStats->numAllocations = pTracking->counter.activeAllocs; + pStats->usefulSize = pTracking->counter.activeSize; + pStats->metaSize = pStats->numAllocations * PORT_MEM_STAGING_SIZE; + pStats->allocatedSize = pStats->usefulSize + pStats->metaSize; + return NV_OK; +} +#endif + +#if portMemExTrackingGetTotalStats_SUPPORTED +NV_STATUS +portMemExTrackingGetTotalStats +( + const PORT_MEM_ALLOCATOR *pAllocator, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + if (pTracking == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + pStats->numAllocations = pTracking->counter.totalAllocs; + pStats->usefulSize = pTracking->counter.totalSize; + pStats->metaSize = pStats->numAllocations * PORT_MEM_STAGING_SIZE; + pStats->allocatedSize = pStats->usefulSize + pStats->metaSize; + return NV_OK; +} +#endif + +#if portMemExTrackingGetPeakStats_SUPPORTED +NV_STATUS +portMemExTrackingGetPeakStats +( + const PORT_MEM_ALLOCATOR *pAllocator, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + if (pTracking == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + pStats->numAllocations = pTracking->counter.peakAllocs; + pStats->usefulSize = pTracking->counter.peakSize; + pStats->metaSize = pStats->numAllocations * PORT_MEM_STAGING_SIZE; + pStats->allocatedSize = pStats->usefulSize + pStats->metaSize; + return NV_OK; +} +#endif + +#if portMemExTrackingGetNext_SUPPORTED +NV_STATUS +portMemExTrackingGetNext +( + const PORT_MEM_ALLOCATOR *pAllocator, + PORT_MEM_TRACK_ALLOC_INFO *pInfo, + void **pIterator +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + PORT_MEM_LIST *pList; + PORT_MEM_HEADER *pHead; + + if (pTracking == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (pTracking->pFirstAlloc == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + if (*pIterator == NULL) + pList = pTracking->pFirstAlloc; + else + pList = (PORT_MEM_LIST*)(*pIterator); + + pHead = _portMemListGetHeader(pList); + + // Advance iterator + if (pList->pNext == pTracking->pFirstAlloc) + *pIterator = NULL; + else + *pIterator = pList->pNext; + + // Populate pInfo + pInfo->pMemory = pHead + 1; + pInfo->size = pHead->fence.blockSize; + pInfo->pAllocator = pHead->fence.pAllocator; + pInfo->timestamp = 0; + +#if PORT_MEM_TRACK_USE_CALLERINFO + pInfo->callerInfo = pHead->callerInfo; +#endif + + return NV_OK; +} +#endif + +static void +_portMemTrackingRelease +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking +) +{ + if (pTracking == NULL) return; + + if (pTracking->counter.activeAllocs != 0) + { + PORT_MEM_PRINT_ERROR("Allocator %p released with memory allocations\n", pTracking->pAllocator); +#if (PORT_MEM_TRACK_PRINT_LEVEL > PORT_MEM_TRACK_PRINT_LEVEL_SILENT) + portMemPrintTrackingInfo(pTracking->pAllocator); +#endif + } + + PORT_LOCKED_LIST_UNLINK(&portMemGlobals.mainTracking, pTracking, portMemGlobals.trackingLock); + PORT_MEM_LIST_DESTROY(pTracking); + portAtomicDecrementU32(&portMemGlobals.totalAllocators); +} + +static void +_portMemTrackAlloc +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + void *pMem, + NvLength size + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + PORT_UNREFERENCED_VARIABLE(pMem); + if (pTracking == NULL) return; + PORT_MEM_PRINT_INFO("Allocating %u bytes at address %p", size, pMem); + PORT_MEM_PRINT_INFO(PORT_MEM_CALLERINFO_PRINT_ARGS(PORT_MEM_CALLERINFO_PARAM)); + + PORT_MEM_COUNTER_INC(&pTracking->counter, size); + PORT_MEM_COUNTER_INC(&portMemGlobals.mainTracking.counter, size); + + PORT_MEM_FENCE_INIT(pTracking->pAllocator, pMem, size); + PORT_MEM_LIST_ADD(pTracking, pMem); + PORT_MEM_CALLERINFO_INIT_MEM(pMem); + PORT_MEM_LOG_ALLOC(pTracking->pAllocator, pMem, size); +} + +static void +_portMemTrackFree +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + void *pMem +) +{ + if (pTracking == NULL) return; + PORT_MEM_PRINT_INFO("Freeing block at address %p\n", pMem); + + PORT_MEM_COUNTER_DEC(&pTracking->counter, pMem); + PORT_MEM_COUNTER_DEC(&portMemGlobals.mainTracking.counter, pMem); + + PORT_MEM_FENCE_CHECK(pTracking->pAllocator, pMem); + PORT_MEM_LIST_REMOVE(pTracking, pMem); + PORT_MEM_LOG_FREE(pTracking->pAllocator, pMem); +} + + +static void * +_portMemAllocatorAllocPagedWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + NvLength length +) +{ + PORT_UNREFERENCED_VARIABLE(pAlloc); + return _portMemAllocPagedUntracked(length); +} + +static void * +_portMemAllocatorAllocNonPagedWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + NvLength length +) +{ + PORT_UNREFERENCED_VARIABLE(pAlloc); + return _portMemAllocNonPagedUntracked(length); +} + +static void +_portMemAllocatorFreeWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem +) +{ + PORT_UNREFERENCED_VARIABLE(pAlloc); + _portMemFreeUntracked(pMem); +} + +static void +_portMemAllocatorReleaseWrapper +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + portMemFree(pAllocator); +} + +/// @todo Add these as intrinsics to UTIL module +static NV_INLINE NvBool _isBitSet(NvU32 *vect, NvU32 bit) +{ + return !!(vect[bit/32] & NVBIT32(bit%32)); +} +static NV_INLINE void _setBit(NvU32 *vect, NvU32 bit) +{ + vect[bit/32] |= NVBIT32(bit%32); +} +static NV_INLINE void _clearBit(NvU32 *vect, NvU32 bit) +{ + vect[bit/32] &= ~NVBIT32(bit%32); +} + +static PORT_MEM_ALLOCATOR * +_portMemAllocatorCreateOnExistingBlock +( + void *pPreallocatedBlock, + NvLength blockSizeBytes, + void *pSpinlock + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + PORT_MEM_ALLOCATOR *pAllocator = (PORT_MEM_ALLOCATOR *)pPreallocatedBlock; + PORT_MEM_BITVECTOR *pBitVector; + PORT_MEM_BITVECTOR_CHUNK *pLastChunkInBlock; + NvU32 bitVectorSize; + + if ((pPreallocatedBlock == NULL) || + (blockSizeBytes < PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_EXTRA_SIZE) || + (blockSizeBytes > NV_S32_MAX)) + { + return NULL; + } + + pAllocator->_portAlloc = _portMemAllocatorAllocExistingWrapper; + pAllocator->_portFree = _portMemAllocatorFreeExistingWrapper; + pAllocator->_portRelease = NULL; + pAllocator->pTracking = NULL; // No tracking for this allocator + pAllocator->pImpl = (PORT_MEM_ALLOCATOR_IMPL*)(pAllocator + 1); + + pBitVector = (PORT_MEM_BITVECTOR*)(pAllocator->pImpl); + pBitVector->pSpinlock = pSpinlock; + + // Calculate total number of chunks available + pBitVector->pChunks = (PORT_MEM_BITVECTOR_CHUNK *)(pBitVector + 1); + pBitVector->pChunks = (void*)NV_ALIGN_UP((NvUPtr)pBitVector->pChunks, + (NvUPtr)PORT_MEM_BITVECTOR_CHUNK_SIZE); + + pLastChunkInBlock = (void*)NV_ALIGN_DOWN((NvUPtr)pPreallocatedBlock + + blockSizeBytes - + PORT_MEM_BITVECTOR_CHUNK_SIZE, + (NvUPtr)PORT_MEM_BITVECTOR_CHUNK_SIZE); + if (pLastChunkInBlock < pBitVector->pChunks) + { + pBitVector->numChunks = 0; + } + else + { + pBitVector->numChunks = (NvU32)(pLastChunkInBlock - pBitVector->pChunks) + 1; + } + bitVectorSize = (NvU32)((NvU8*)pBitVector->pChunks - (NvU8*)pBitVector->bits); + + while (bitVectorSize*8 < pBitVector->numChunks*2) + { + // If too many chunks to track in current bit vector, increase bitvector by one chunk + pBitVector->pChunks++; + pBitVector->numChunks--; + bitVectorSize = (NvU32)((NvU8*)pBitVector->pChunks - (NvU8*)pBitVector->bits); + } + portMemSet(pBitVector->bits, 0, bitVectorSize); + + PORT_MEM_PRINT_INFO("Acquired preallocated block allocator %p (%llu bytes) ", pAllocator, (NvU64)blockSizeBytes); + PORT_MEM_PRINT_INFO(PORT_MEM_CALLERINFO_PRINT_ARGS(PORT_MEM_CALLERINFO_PARAM)); + return pAllocator; +} + +static void * +_portMemAllocatorAllocExistingWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + NvLength length +) +{ + NvU32 chunksNeeded = (NvU32)NV_DIV_AND_CEIL(length, PORT_MEM_BITVECTOR_CHUNK_SIZE); + void *pMem = NULL; + NvU32 chunksFound = 0; + NvU32 i; + PORT_MEM_BITVECTOR *pBitVector = (PORT_MEM_BITVECTOR*)(pAlloc->pImpl); + PORT_SPINLOCK *pSpinlock = (PORT_SPINLOCK*)(pBitVector->pSpinlock); + + if (chunksNeeded > pBitVector->numChunks) + { + return NULL; + } + if (pSpinlock != NULL) + { + portSyncSpinlockAcquire(pSpinlock); + } + for (i = 0; i < pBitVector->numChunks; i++) + { + NvBool bWholeWordSet; + bWholeWordSet = pBitVector->bits[i/32] == ~0U; + if (bWholeWordSet || (_isBitSet(pBitVector->bits, i))) + { + // Chunk not available as whole. + chunksFound = 0; + // Skip fully set words + if (bWholeWordSet) + { + i += 31; + } + if (chunksNeeded > (pBitVector->numChunks - i - (bWholeWordSet ? 1 : 0))) + { + break; + } + } + else + { + chunksFound++; + if (chunksFound == chunksNeeded) + { + NvU32 j; + NvU32 firstAllocatedChunk = i - chunksFound + 1; + + pMem = pBitVector->pChunks[firstAllocatedChunk]; + // Mark all acquired chunks as occupied + for (j = firstAllocatedChunk; j <= i; j++) + { + _setBit(pBitVector->bits, j); + } + // Mark last chunk of allocation + _setBit(pBitVector->bits, pBitVector->numChunks + i); + break; + } + } + } + if (pSpinlock != NULL) + { + portSyncSpinlockRelease(pSpinlock); + } + return pMem; +} + +static void +_portMemAllocatorFreeExistingWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem +) +{ + PORT_MEM_BITVECTOR_CHUNK *pChunk = (PORT_MEM_BITVECTOR_CHUNK *)pMem; + NvU32 i; + PORT_MEM_BITVECTOR *pBitVector = (PORT_MEM_BITVECTOR*)(pAlloc->pImpl); + PORT_SPINLOCK *pSpinlock = (PORT_SPINLOCK*)(pBitVector->pSpinlock); + + if (((NvUPtr)pMem < (NvUPtr)pBitVector->pChunks) || + ((NvUPtr)pMem > (NvUPtr)(pBitVector->pChunks + pBitVector->numChunks))) + { + // pMem not inside this allocator. + PORT_BREAKPOINT_CHECKED(); + return; + } + + if (pSpinlock != NULL) + { + portSyncSpinlockAcquire(pSpinlock); + } + for (i = (NvU32)(pChunk - pBitVector->pChunks); i < pBitVector->numChunks; i++) + { + // Mark chunk as free + _clearBit(pBitVector->bits, i); + if (_isBitSet(pBitVector->bits, pBitVector->numChunks + i)) + { + // Clear last-allocation-bit and bail + _clearBit(pBitVector->bits, pBitVector->numChunks + i); + break; + } + } + if (pSpinlock != NULL) + { + portSyncSpinlockRelease(pSpinlock); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c new file mode 100644 index 0000000..70fb5ca --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c @@ -0,0 +1,206 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/** + * @file + * @brief MEMORY module implementation for Unix kernelmode + * + * This implementation uses the NVIDIA OS interface into the unix kernels. + */ + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "nvport/nvport.h" +#include "os-interface.h" + +/** + * @note All kernel memory in unix is non-paged. + */ +void * +_portMemAllocPagedUntracked +( + NvLength lengthBytes +) +{ + return _portMemAllocNonPagedUntracked(lengthBytes); +} + +void * +_portMemAllocNonPagedUntracked +( + NvLength lengthBytes +) +{ + void *pMem = NULL; + PORT_ASSERT_CHECKED(lengthBytes > 0); + if (lengthBytes > 0) + os_alloc_mem(&pMem, lengthBytes); + return pMem; +} + + + +void +_portMemFreeUntracked +( + void *pData +) +{ + if (pData != NULL) + { + os_free_mem(pData); + } +} + +void * +portMemCopy +( + void *pDestination, + NvLength destSize, + const void *pSource, + NvLength srcSize +) +{ + // API guarantees this is a NOP when destSize==0 + if (destSize == 0) + { + return pDestination; + } + + PORT_ASSERT_CHECKED(pDestination != NULL); + PORT_ASSERT_CHECKED(pSource != NULL); + PORT_ASSERT_CHECKED(srcSize <= destSize); + PORT_ASSERT_CHECKED(!portUtilCheckOverlap(pDestination, destSize, + pSource, srcSize)); + + if ((pSource == NULL) || (pDestination == NULL) || (srcSize > destSize)) + { + return NULL; + } + return os_mem_copy(pDestination, pSource, srcSize); +} + + +void * +portMemSet +( + void *pData, + NvU8 value, + NvLength lengthBytes +) +{ + if (lengthBytes == 0) + { + return pData; + } + if (pData == NULL) + { + return pData; + } + return os_mem_set(pData, value, lengthBytes); +} + +NvS32 +portMemCmp +( + const void *pData0, + const void *pData1, + NvLength lengthBytes +) +{ + if (lengthBytes == 0) + { + return 0; + } + if ((pData0 == NULL) || (pData1 == NULL)) + { + return -1; + } + return os_mem_cmp(pData0, pData1, lengthBytes); +} + + + +#define PORT_MEM_USE_GENERIC_portMemSetPattern +#define PORT_MEM_USE_GENERIC_portMemMove +#include "memory_generic.h" + +NV_STATUS +portMemExCopyFromUser +( + const NvP64 pUser, + void *pKernel, + NvLength lengthBytes +) +{ + if (pKernel == NULL) + { + return NV_ERR_INVALID_POINTER; + } + if (lengthBytes == 0) + { + return NV_ERR_INVALID_ARGUMENT; + } + return os_memcpy_from_user(pKernel, NvP64_VALUE(pUser), lengthBytes); +} + +NV_STATUS +portMemExCopyToUser +( + const void *pKernel, + NvP64 pUser, + NvLength lengthBytes +) +{ + if (pKernel == NULL) + { + return NV_ERR_INVALID_POINTER; + } + if (lengthBytes == 0) + { + return NV_ERR_INVALID_ARGUMENT; + } + return os_memcpy_to_user(NvP64_VALUE(pUser), (void*)pKernel, lengthBytes); +} + +NvLength +portMemExGetPageSize(void) +{ + return os_page_size; +} + +// Large allocations (>KMALLOC_LIMIT) will fail, but it is safe to call +NvBool +portMemExSafeForPagedAlloc(void) +{ + return NV_TRUE; +} +NvBool +portMemExSafeForNonPagedAlloc(void) +{ + return NV_TRUE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/string/string_generic.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/string/string_generic.c new file mode 100644 index 0000000..c576ea8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/string/string_generic.c @@ -0,0 +1,274 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief STRING module implementation for platforms without stdlib support + */ + +#include "nvport/nvport.h" +#include "nvmisc.h" + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringLength +NvLength +portStringLength +( + const char *str +) +{ + const char *begin = str; + + PORT_ASSERT_CHECKED(str != NULL); + + while ('\0' != *str) str++; + + return str - begin; +} + +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringLengthSafe +NvLength +portStringLengthSafe +( + const char *str, + NvLength maxLength +) +{ + const char *begin = str; + + PORT_ASSERT_CHECKED(str != NULL); + + while ((0 != maxLength--) && ('\0' != *str)) + str++; + + return str - begin; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringCompare +NvS32 +portStringCompare +( + const char *str1, + const char *str2, + NvLength maxLength +) +{ + NvLength length; + + PORT_ASSERT_CHECKED(str1 != NULL); + PORT_ASSERT_CHECKED(str2 != NULL); + + length = portStringLengthSafe(str1, maxLength); + + // Add 1 for the null terminator. + if (length < maxLength) + length++; + + return portMemCmp(str1, str2, length); +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringCopy +NvLength +portStringCopy +( + char *dest, + NvLength destSize, + const char *src, + NvLength srcSize +) +{ + NvLength minCopyLength; + NvLength srcLen; + + PORT_ASSERT_CHECKED(dest != NULL); + PORT_ASSERT_CHECKED(src != NULL); + + PORT_ASSERT_CHECKED(destSize != 0); + PORT_ASSERT_CHECKED(srcSize != 0); + + srcLen = portStringLengthSafe(src, srcSize); + if (srcLen == srcSize) srcLen--; + + minCopyLength = NV_MIN(destSize, srcLen + 1); + + PORT_ASSERT_CHECKED(minCopyLength != 0); + + if (minCopyLength > 1) + portMemCopy(dest, destSize, src, minCopyLength - 1); + + dest[minCopyLength - 1] = '\0'; + + return minCopyLength; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringCat +char * +portStringCat +( + char *str, + NvLength strSize, + const char *cat, + NvLength catSize +) +{ + NvLength strLen; + NvLength catLen; + NvLength minCatLength; + char* begin; + + PORT_ASSERT_CHECKED(str != NULL); + PORT_ASSERT_CHECKED(cat != NULL); + + strLen = portStringLengthSafe(str, strSize); + catLen = portStringLengthSafe(cat, catSize); + + // In case of no NULL terminating char in cat. + if (catLen == catSize) catLen--; + + minCatLength = NV_MIN(strSize - strLen, catLen + 1); + if (0 == minCatLength) + return str; + + begin = str; + str = str + strLen; + + // strncat doesn't count NULL char. + if (minCatLength > 1) + portMemCopy(str, strSize, cat, minCatLength - 1); + + begin[strLen + minCatLength - 1] = '\0'; + return begin; +} + +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringConvertAsciiToUtf16 +NvLength +portStringConvertAsciiToUtf16 +( + NvU16 *dest, + NvLength destSize, + const char *src, + NvLength srcSize +) +{ + NvLength i, len; + + PORT_ASSERT_CHECKED(dest != NULL); + PORT_ASSERT_CHECKED(src != NULL); + + if (destSize == 0) + return 0; + + len = portStringLengthSafe(src, srcSize); + if (len >= destSize) + len = destSize - 1; + + i = len; + while (i-- > 0) + dest[i] = src[i]; + + dest[len] = 0; + return len; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringBufferToHex +NvLength +portStringBufferToHex +( + char *str, + NvLength strSize, + const NvU8 *buf, + NvLength bufSize +) +{ + NvLength i, len; + + if (strSize == 0) + return 0; + + PORT_ASSERT_CHECKED(str != NULL); + PORT_ASSERT_CHECKED(buf != NULL); + + len = bufSize * 2; + if (len >= strSize) + len = strSize - 1; + + for (i = 0; i < len; i++) + { + NvU8 n = (i % 2) ? (buf[i/2] & 0xF) : (buf[i/2] >> 4); + str[i] = (n < 0xA) ? ('0' + n) : ('a' + n - 0xA); + } + str[len] = 0; + return len; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringBufferToHexGroups +NvLength +portStringBufferToHexGroups +( + char *str, + NvLength strSize, + const NvU8 *buf, + NvLength bufSize, + NvLength groupCount, + const NvU32 *groups, + const char *separator +) +{ + NvLength group, sepLength, written; + + if (strSize == 0) + return 0; + + PORT_ASSERT_CHECKED(str != NULL); + PORT_ASSERT_CHECKED(buf != NULL); + PORT_ASSERT_CHECKED(groups != NULL); + PORT_ASSERT_CHECKED(separator != NULL); + + sepLength = portStringLength(separator); + + for (written = 0, group = 0; (group < groupCount) && (written < (strSize - 1)); group++) + { + NvLength groupSize = NV_MIN(groups[group] / 2, bufSize); + written += portStringBufferToHex(str + written, strSize - written, buf, groupSize); + buf += groupSize; + bufSize -= groupSize; + + if (group != groupCount - 1) + { + portMemCopy(str + written, strSize - written, separator, sepLength); + written += sepLength; + } + } + + str[written] = 0; + return written; +} +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_rwlock_def.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_rwlock_def.h new file mode 100644 index 0000000..5f956f0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_rwlock_def.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief SYNC custom rwlock struct implementations + * + */ + +#ifndef _NVPORT_SYNC_RWLOCK_DEF_H_ +#define _NVPORT_SYNC_RWLOCK_DEF_H_ + +struct PORT_RWLOCK +{ + PORT_SEMAPHORE *pSemRead; + PORT_SEMAPHORE *pSemWrite; + volatile NvS32 numReaders; + PORT_MEM_ALLOCATOR *pAllocator; +}; + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h new file mode 100644 index 0000000..a28201d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief SYNC unix kernel struct implementations + * + */ + +#ifndef _NVPORT_SYNC_UNIX_KERNEL_DEF_H_ +#define _NVPORT_SYNC_UNIX_KERNEL_DEF_H_ + +#include "os-interface.h" + +struct PORT_SPINLOCK +{ + void *lock; + NvU64 oldIrql; + PORT_MEM_ALLOCATOR *pAllocator; +}; + +struct PORT_MUTEX +{ + void *mutex; + PORT_MEM_ALLOCATOR *pAllocator; +}; + +struct PORT_SEMAPHORE +{ + void *sem; + PORT_MEM_ALLOCATOR *pAllocator; +}; + +#endif diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_common.h b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_common.h new file mode 100644 index 0000000..babb90f --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_common.h @@ -0,0 +1,158 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief SYNC common function implementations + * + * The portSyncXxxCreate functions have the same implementation, so they are + * extracted here instead of repeated in every file. + */ + +#define PORT_SYNC_IMPL +#include "nvport/nvport.h" + +#ifdef PORT_SYNC_COMMON_DEFINE_SPINLOCK +PORT_SPINLOCK * +portSyncSpinlockCreate +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + PORT_SPINLOCK *pSpinlock; + PORT_ASSERT_CHECKED(pAllocator != NULL); + pSpinlock = PORT_ALLOC(pAllocator, portSyncSpinlockSize); + if (pSpinlock != NULL) + { + if (portSyncSpinlockInitialize(pSpinlock) != NV_OK) + { + PORT_FREE(pAllocator, pSpinlock); + return NULL; + } + pSpinlock->pAllocator = pAllocator; + } + return pSpinlock; +} +#endif + +#ifdef PORT_SYNC_COMMON_DEFINE_MUTEX +PORT_MUTEX * +portSyncMutexCreate +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + PORT_MUTEX *pMutex; + PORT_ASSERT_CHECKED(pAllocator != NULL); + pMutex = PORT_ALLOC(pAllocator, portSyncMutexSize); + if (pMutex != NULL) + { + if (portSyncMutexInitialize(pMutex) != NV_OK) + { + PORT_FREE(pAllocator, pMutex); + return NULL; + } + pMutex->pAllocator = pAllocator; + } + return pMutex; +} +#endif + +#ifdef PORT_SYNC_COMMON_DEFINE_SEMAPHORE +PORT_SEMAPHORE * +portSyncSemaphoreCreate +( + PORT_MEM_ALLOCATOR *pAllocator, + NvU32 startValue +) +{ + PORT_SEMAPHORE *pSemaphore; + PORT_ASSERT_CHECKED(pAllocator != NULL); + pSemaphore = PORT_ALLOC(pAllocator, portSyncSemaphoreSize); + if (pSemaphore != NULL) + { + if (portSyncSemaphoreInitialize(pSemaphore, startValue) != NV_OK) + { + PORT_FREE(pAllocator, pSemaphore); + return NULL; + } + pSemaphore->pAllocator = pAllocator; + } + return pSemaphore; +} +#endif + +#ifdef PORT_SYNC_COMMON_DEFINE_RWLOCK +PORT_RWLOCK * +portSyncRwLockCreate +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + PORT_RWLOCK *pLock; + PORT_ASSERT_CHECKED(pAllocator != NULL); + + pLock = PORT_ALLOC(pAllocator, portSyncRwLockSize); + if (pLock != NULL) + { + if (portSyncRwLockInitialize(pLock) != NV_OK) + { + PORT_FREE(pAllocator, pLock); + return NULL; + } + pLock->pAllocator = pAllocator; + } + return pLock; +} +#endif + +#ifdef PORT_SYNC_COMMON_DEFINE_SYNC_INIT + +NvLength portSyncSpinlockSize; +NvLength portSyncMutexSize; +NvLength portSyncSemaphoreSize; +NvLength portSyncRwLockSize; + +void portSyncInitialize(void) +{ + portSyncSpinlockSize = sizeof(PORT_SPINLOCK); + portSyncMutexSize = sizeof(PORT_MUTEX); + portSyncSemaphoreSize = sizeof(PORT_SEMAPHORE); + portSyncRwLockSize = sizeof(PORT_RWLOCK); +#if LOCK_VAL_ENABLED +{ + extern void portSyncInitialize_LOCKVAL(void); + portSyncInitialize_LOCKVAL(); +} +#endif +} + +void portSyncShutdown(void) +{ +#if LOCK_VAL_ENABLED + extern void portSyncShutdown_LOCKVAL(void); + portSyncShutdown_LOCKVAL(); +#endif +} + +#endif // PORT_SYNC_COMMON_DEFINE_SYNC_INIT diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_rwlock.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_rwlock.c new file mode 100644 index 0000000..f331445 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_rwlock.c @@ -0,0 +1,178 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/** + * @file + * @brief Readers-writer lock implementation using PORT_SEMAPHORE and ATOMIC + * module. + */ +#define PORT_SYNC_IMPL +#include "nvport/nvport.h" + + +#if !PORT_IS_MODULE_SUPPORTED(atomic) +#error "NvPort SYNC RWLock implementation requires ATOMIC module to be present." +#endif + +#include "inc/sync_rwlock_def.h" + +NV_STATUS +portSyncRwLockInitialize +( + PORT_RWLOCK *pLock +) +{ + PORT_MEM_ALLOCATOR *pAllocator = portMemAllocatorGetGlobalNonPaged(); + if (pLock == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + pLock->pSemRead = portSyncSemaphoreCreate(pAllocator, 1); + if (pLock->pSemRead == NULL) + { + return NV_ERR_NO_MEMORY; + } + pLock->pSemWrite = portSyncSemaphoreCreate(pAllocator, 1); + if (pLock->pSemWrite == NULL) + { + portSyncSemaphoreDestroy(pLock->pSemRead); + return NV_ERR_NO_MEMORY; + } + + pLock->numReaders = 0; + pLock->pAllocator = NULL; + + return NV_OK; +} + + +void +portSyncRwLockAcquireRead +( + PORT_RWLOCK *pLock +) +{ + PORT_ASSERT_CHECKED(pLock != NULL); + portSyncSemaphoreAcquire(pLock->pSemRead); + if (portAtomicIncrementS32(&pLock->numReaders) == 1) + { + portSyncSemaphoreAcquire(pLock->pSemWrite); + } + portSyncSemaphoreRelease(pLock->pSemRead); +} + +NvBool +portSyncRwLockAcquireReadConditional +( + PORT_RWLOCK *pLock +) +{ + NvBool bAcquired; + PORT_ASSERT_CHECKED(pLock != NULL); + bAcquired = portSyncSemaphoreAcquireConditional(pLock->pSemRead); + if (!bAcquired) + { + return NV_FALSE; + } + if (portAtomicIncrementS32(&pLock->numReaders) == 1) + { + bAcquired = portSyncSemaphoreAcquireConditional(pLock->pSemWrite); + if (!bAcquired) + { + portAtomicDecrementS32(&pLock->numReaders); + } + } + portSyncSemaphoreRelease(pLock->pSemRead); + return bAcquired; +} + +void +portSyncRwLockAcquireWrite +( + PORT_RWLOCK *pLock +) +{ + PORT_ASSERT_CHECKED(pLock != NULL); + portSyncSemaphoreAcquire(pLock->pSemRead); + portSyncSemaphoreAcquire(pLock->pSemWrite); + portSyncSemaphoreRelease(pLock->pSemRead); +} + +NvBool +portSyncRwLockAcquireWriteConditional +( + PORT_RWLOCK *pLock +) +{ + NvBool bAcquired; + PORT_ASSERT_CHECKED(pLock != NULL); + bAcquired = portSyncSemaphoreAcquireConditional(pLock->pSemRead); + if (bAcquired) + { + bAcquired = portSyncSemaphoreAcquireConditional(pLock->pSemWrite); + portSyncSemaphoreRelease(pLock->pSemRead); + } + return bAcquired; +} + +void +portSyncRwLockReleaseRead +( + PORT_RWLOCK *pLock +) +{ + PORT_ASSERT_CHECKED(pLock != NULL); + if (portAtomicDecrementS32(&pLock->numReaders) == 0) + { + portSyncSemaphoreRelease(pLock->pSemWrite); + } +} + +void +portSyncRwLockReleaseWrite +( + PORT_RWLOCK *pLock +) +{ + PORT_ASSERT_CHECKED(pLock != NULL); + portSyncSemaphoreRelease(pLock->pSemWrite); +} + +void +portSyncRwLockDestroy +( + PORT_RWLOCK *pLock +) +{ + PORT_ASSERT_CHECKED(pLock != NULL); + portSyncSemaphoreDestroy(pLock->pSemRead); + portSyncSemaphoreDestroy(pLock->pSemWrite); + if (pLock->pAllocator != NULL) + { + PORT_FREE(pLock->pAllocator, pLock); + } +} + +// Include implementations common for all platforms +#define PORT_SYNC_COMMON_DEFINE_RWLOCK +#include "sync_common.h" diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c new file mode 100644 index 0000000..fbe60e8 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c @@ -0,0 +1,242 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief SYNC module implementation for Unix kernelmode + * + * This implementation uses the NVIDIA OS interface into the unix kernels. + */ + +#define PORT_SYNC_IMPL +#include "nvport/nvport.h" + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "os-interface.h" + +#include "inc/sync_unix_kernel_os_def.h" +#include "inc/sync_rwlock_def.h" + +NV_STATUS +portSyncSpinlockInitialize +( + PORT_SPINLOCK *pSpinlock +) +{ + if (pSpinlock == NULL) + { + return NV_ERR_INVALID_POINTER; + } + pSpinlock->pAllocator = NULL; + return os_alloc_spinlock(&pSpinlock->lock); +} + +void +portSyncSpinlockDestroy +( + PORT_SPINLOCK *pSpinlock +) +{ + PORT_ASSERT_CHECKED(pSpinlock != NULL); + os_free_spinlock(pSpinlock->lock); + if (pSpinlock->pAllocator != NULL) + { + PORT_FREE(pSpinlock->pAllocator, pSpinlock); + } +} + +void +portSyncSpinlockAcquire +( + PORT_SPINLOCK *pSpinlock +) +{ + PORT_ASSERT_CHECKED(pSpinlock != NULL); + pSpinlock->oldIrql = os_acquire_spinlock(pSpinlock->lock); +} + +void +portSyncSpinlockRelease +( + PORT_SPINLOCK *pSpinlock +) +{ + PORT_ASSERT_CHECKED(pSpinlock != NULL); + os_release_spinlock(pSpinlock->lock, pSpinlock->oldIrql); +} + + + +NV_STATUS +portSyncMutexInitialize +( + PORT_MUTEX *pMutex +) +{ + if (pMutex == NULL) + { + return NV_ERR_INVALID_POINTER; + } + pMutex->pAllocator = NULL; + return os_alloc_mutex(&pMutex->mutex); +} + +void +portSyncMutexDestroy +( + PORT_MUTEX *pMutex +) +{ + PORT_ASSERT_CHECKED(pMutex != NULL); + os_free_mutex(pMutex->mutex); + if (pMutex->pAllocator != NULL) + { + PORT_FREE(pMutex->pAllocator, pMutex); + } +} + +void +portSyncMutexAcquire +( + PORT_MUTEX *pMutex +) +{ + NV_STATUS status; + PORT_ASSERT_CHECKED(pMutex != NULL); + PORT_ASSERT_CHECKED(portSyncExSafeToSleep()); + status = os_acquire_mutex(pMutex->mutex); + PORT_ASSERT(status == NV_OK); +} + +NvBool +portSyncMutexAcquireConditional +( + PORT_MUTEX *pMutex +) +{ + PORT_ASSERT_CHECKED(pMutex != NULL); + return os_cond_acquire_mutex(pMutex->mutex) == NV_OK; + +} + +void +portSyncMutexRelease +( + PORT_MUTEX *pMutex +) +{ + PORT_ASSERT_CHECKED(pMutex != NULL); + os_release_mutex(pMutex->mutex); +} + + + +NV_STATUS +portSyncSemaphoreInitialize +( + PORT_SEMAPHORE *pSemaphore, + NvU32 startValue +) +{ + if (pSemaphore == NULL) + { + return NV_ERR_INVALID_POINTER; + } + pSemaphore->pAllocator = NULL; + pSemaphore->sem = os_alloc_semaphore(startValue); + return (pSemaphore->sem != NULL) ? NV_OK : NV_ERR_NO_MEMORY; +} + +void +portSyncSemaphoreDestroy +( + PORT_SEMAPHORE *pSemaphore +) +{ + PORT_ASSERT_CHECKED(pSemaphore != NULL); + os_free_semaphore(pSemaphore->sem); + if (pSemaphore->pAllocator != NULL) + { + PORT_FREE(pSemaphore->pAllocator, pSemaphore); + } +} + +void +portSyncSemaphoreAcquire +( + PORT_SEMAPHORE *pSemaphore +) +{ + NV_STATUS status; + PORT_ASSERT_CHECKED(pSemaphore != NULL); + status = os_acquire_semaphore(pSemaphore->sem); + PORT_ASSERT(status == NV_OK); +} + +NvBool +portSyncSemaphoreAcquireConditional +( + PORT_SEMAPHORE *pSemaphore +) +{ + + PORT_ASSERT_CHECKED(pSemaphore != NULL); + return os_cond_acquire_semaphore(pSemaphore->sem) == NV_OK; +} + +void +portSyncSemaphoreRelease +( + PORT_SEMAPHORE *pSemaphore +) +{ + PORT_ASSERT_CHECKED(pSemaphore != NULL); + os_release_semaphore(pSemaphore->sem); +} + + +NvBool portSyncExSafeToSleep() +{ + return os_semaphore_may_sleep(); +} +NvBool portSyncExSafeToWake() +{ + return NV_TRUE; +} +NvU64 portSyncExGetInterruptLevel() +{ + return !os_semaphore_may_sleep(); +} + +// Include implementations common for all platforms +#define PORT_SYNC_COMMON_DEFINE_SPINLOCK +#define PORT_SYNC_COMMON_DEFINE_MUTEX +#define PORT_SYNC_COMMON_DEFINE_SEMAPHORE +#define PORT_SYNC_COMMON_DEFINE_SYNC_INIT +#include "sync_common.h" diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c new file mode 100644 index 0000000..95d25f0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/** + * @file + * @brief THREAD module implementation for Unix kernelmode + * + * This implementation uses the NVIDIA OS interface into the unix kernels. + */ + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif + +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "nvport/nvport.h" +#include "os-interface.h" + +// Invalid value for thread. +const PORT_THREAD PORT_THREAD_INVALID = {0ULL}; + +// Invalid value for process. +const PORT_PROCESS PORT_PROCESS_INVALID = {0ULL}; + +NvU64 portThreadGetCurrentThreadId() +{ + NvU64 tid = 0; + os_get_current_thread(&tid); + return tid; +} + +void portThreadYield() +{ + os_schedule(); +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_compiler_switch.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_compiler_switch.c new file mode 100644 index 0000000..e1e222b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_compiler_switch.c @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief UTIL module implementation for cross platform, because it's not possible to + * determine which compiler is currently compiling in nvmk for some util functions. + */ + +#include "nvport/nvport.h" + +#if PORT_COMPILER_IS_MSVC +#include "util_msvc.c" +#elif PORT_COMPILER_IS_GCC || PORT_COMPILER_IS_CLANG +#include "util_gcc_clang.c" +#else +#error "Compiler is not supported" +#endif // switch for compiler diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c new file mode 100644 index 0000000..a4a1e4b --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Util functions implementations using gcc compiler intrinsics + */ + +#include "nvport/nvport.h" + +#if portUtilExGetStackTrace_SUPPORTED +NV_NOINLINE NvUPtr +portUtilExGetStackTrace +( + NvU32 level +) +{ + switch (level) + { + case 0: return (__builtin_frame_address(0) != 0) ? + (NvUPtr)__builtin_return_address(0) : (NvUPtr)0; + case 1: return (__builtin_frame_address(1) != 0) ? + (NvUPtr)__builtin_return_address(1) : (NvUPtr)0; + case 2: return (__builtin_frame_address(2) != 0) ? + (NvUPtr)__builtin_return_address(2) : (NvUPtr)0; + case 3: return (__builtin_frame_address(3) != 0) ? + (NvUPtr)__builtin_return_address(3) : (NvUPtr)0; + case 4: return (__builtin_frame_address(4) != 0) ? + (NvUPtr)__builtin_return_address(4) : (NvUPtr)0; + case 5: return (__builtin_frame_address(5) != 0) ? + (NvUPtr)__builtin_return_address(5) : (NvUPtr)0; + case 6: return (__builtin_frame_address(6) != 0) ? + (NvUPtr)__builtin_return_address(6) : (NvUPtr)0; + case 7: return (__builtin_frame_address(7) != 0) ? + (NvUPtr)__builtin_return_address(7) : (NvUPtr)0; + case 8: return (__builtin_frame_address(8) != 0) ? + (NvUPtr)__builtin_return_address(8) : (NvUPtr)0; + case 9: return (__builtin_frame_address(9) != 0) ? + (NvUPtr)__builtin_return_address(9) : (NvUPtr)0; + case 10: return (__builtin_frame_address(10) != 0) ? + (NvUPtr)__builtin_return_address(10) : (NvUPtr)0; + case 11: return (__builtin_frame_address(11) != 0) ? + (NvUPtr)__builtin_return_address(11) : (NvUPtr)0; + case 12: return (__builtin_frame_address(12) != 0) ? + (NvUPtr)__builtin_return_address(12) : (NvUPtr)0; + case 13: return (__builtin_frame_address(13) != 0) ? + (NvUPtr)__builtin_return_address(13) : (NvUPtr)0; + case 14: return (__builtin_frame_address(14) != 0) ? + (NvUPtr)__builtin_return_address(14) : (NvUPtr)0; + case 15: return (__builtin_frame_address(15) != 0) ? + (NvUPtr)__builtin_return_address(15) : (NvUPtr)0; + } + return 0; +} +#endif + +NV_NOINLINE NvUPtr portUtilGetIPAddress() +{ + return portUtilGetReturnAddress(); +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c new file mode 100644 index 0000000..44a5b6d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Util functions implementations for unix based OS. + */ + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif + +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "nvport/nvport.h" +#include "os-interface.h" + +NvBool portUtilIsInterruptContext(void) +{ + return os_is_isr(); +} + diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c new file mode 100644 index 0000000..ab9e7e6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c @@ -0,0 +1,347 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* ------------------------ Includes --------------------------------------- */ +#include "prereq_tracker/prereq_tracker.h" + +/* ------------------------ Static Function Prototypes --------------------- */ +static NvBool _prereqValid(PrereqTracker *pTracker, PREREQ_ENTRY *pPrereq); + +/* ------------------------ Public Functions ------------------------------ */ + +/*! + * @brief Construct the prereq tracker object + * + * @param[in] pTracker PrereqTracker object to be constructed + * @param[in] pParent Parent GPU passed into the first parameter of callbacks + * + * @return NV_OK Successfully constructed tracker + * @return NV_ERR_INVALID_STATE If already constructed + */ +NV_STATUS +prereqConstruct_IMPL +( + PrereqTracker *pTracker, + OBJGPU *pParent +) +{ + NV_ASSERT_OR_RETURN(!pTracker->bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_OBJECT_PARENT); + + bitVectorClrAll(&pTracker->satisfied); + + listInit(&pTracker->prereqList, portMemAllocatorGetGlobalNonPaged()); + pTracker->bInitialized = NV_TRUE; + pTracker->pParent = pParent; + + return NV_OK; +} + +/*! + * @brief Destroys the prerequisite tracker object + * + * @param[in] pTracker PrereqTracker object to be destroyed + */ +void +prereqDestruct_IMPL +( + PrereqTracker *pTracker +) +{ + NV_ASSERT_OR_RETURN_VOID(pTracker->bInitialized); + + listDestroy(&pTracker->prereqList); + pTracker->bInitialized = NV_FALSE; +} + +/*! + * @brief Arms a tracking structure to fire the callback when all prerequisites + * are satisfied. May only be called after all prerequisites are specified. No + * more prerequisites may be specified after arming. + * + * @param[in] pTracker PrereqTracker object + * @param[in] pPrereq PREREQ_ENTRY object pointer + * + * @return NV_OK Prerequisite successfully armed. + * @return error Errors propagated up from functions called. + */ +static NV_STATUS +_prereqArm +( + PrereqTracker *pTracker, + PREREQ_ENTRY *pPrereq +) +{ + PREREQ_ID_BIT_VECTOR requestedAndSatisfied; + + NV_ASSERT_OR_RETURN(pTracker->bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(_prereqValid(pTracker, pPrereq), NV_ERR_INVALID_OBJECT); + NV_ASSERT_OR_RETURN(!pPrereq->bArmed, NV_ERR_INVALID_STATE); + + // + // Set the PREREQ_ENTRY state to bArmed. No more PREREQ_IDs may be added + // after this point. + // + pPrereq->bArmed = NV_TRUE; + + // + // Put together a mask of PREREQ_IDs which are both satisfied and requested + // We do not keep track of satisfied prereqs until armed, so we have no existing + // state to worry about here. + // + NV_ASSERT_OK_OR_RETURN(bitVectorAnd(&requestedAndSatisfied, + &pPrereq->requested, + &pTracker->satisfied)); + + pPrereq->countSatisfied = bitVectorCountSetBits(&requestedAndSatisfied); + + if (PREREQ_IS_SATISFIED(pPrereq)) + { + NV_ASSERT_OK_OR_RETURN(pPrereq->callback(pTracker->pParent, NV_TRUE)); + } + + return NV_OK; +} + +/*! + * @brief Creates, adds IDs to, and Arms a prereq tracking structure into the list. + * Caller gives up all control of the prereq structure to the prereq tracker, which + * will take care of storing the completed, final struct and freeing it once done. + * + * @param[in] pTracker PrereqTracker object + * @param[in] callback Callback function pointer + * First parameter passed will be NVOC parent of pTracker + * @param[in] pDepends Bitvector of prerequisite IDs to add as requirement + * @param[out] ppPrereq PREREQ_ENTRY object pointer created, or NULL if not desired + * + * @return NV_OK Prerequisite successfully armed. + * @return error Errors propagated up from functions called. + */ +NV_STATUS +prereqComposeEntry_IMPL +( + PrereqTracker *pTracker, + GpuPrereqCallback *callback, + PREREQ_ID_BIT_VECTOR *pDepends, + PREREQ_ENTRY **ppPrereq +) +{ + PREREQ_ENTRY *pPrereq; + + NV_ASSERT_OR_RETURN(pTracker->bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(callback != NULL, NV_ERR_INVALID_POINTER); + NV_ASSERT_OR_RETURN(pDepends != NULL, NV_ERR_INVALID_POINTER); + + pPrereq = listAppendNew(&pTracker->prereqList); + NV_ASSERT_OR_RETURN(pPrereq != NULL, NV_ERR_NO_MEMORY); + + NV_ASSERT_OK_OR_RETURN(bitVectorCopy(&pPrereq->requested, pDepends)); + + pPrereq->countRequested = bitVectorCountSetBits(&pPrereq->requested); + pPrereq->countSatisfied = 0; + pPrereq->callback = callback; + + NV_ASSERT_OK_OR_RETURN(_prereqArm(pTracker, pPrereq)); + + if (ppPrereq != NULL) + *ppPrereq = pPrereq; + + return NV_OK; +} + +/*! + * @brief Notifies that prerequisite was satisfied. + * + * @param[in] pTracker PrereqTracker object + * @param[in] prereqId Prerequisite ID to add as requirement + * + * @return NV_OK Prerequisite successfully satisfied & all callbacks passed. + * @return error Errors propagated up from functions called. + */ +NV_STATUS +prereqSatisfy_IMPL +( + PrereqTracker *pTracker, + PREREQ_ID prereqId +) +{ + PREREQ_ENTRY *pPrereq; + PrereqListIter it; + + NV_ASSERT_OR_RETURN(pTracker->bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN((prereqId < PREREQ_ID_VECTOR_SIZE), + NV_ERR_INVALID_REQUEST); + + // + // The prerequisite can be satisfied only once. An attempt to satisfy + // the prerequisite multiple times should indicate bad code design. + // + NV_ASSERT_OR_RETURN(!bitVectorTest(&pTracker->satisfied, prereqId), + NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN(bitVectorSet(&pTracker->satisfied, prereqId)); + + // Broadcast satisfaction of this PREREQ_ID to all armed PREREQ_ENTRY. + it = listIterAll(&pTracker->prereqList); + while (listIterNext(&it)) + { + pPrereq = it.pValue; + if (pPrereq->bArmed && + bitVectorTest(&pPrereq->requested, prereqId)) + { + pPrereq->countSatisfied++; + NV_ASSERT_OR_RETURN(pPrereq->countSatisfied <= pPrereq->countRequested, + NV_ERR_INVALID_STATE); + + if (PREREQ_IS_SATISFIED(pPrereq)) + { + NV_ASSERT_OK_OR_RETURN(pPrereq->callback(pTracker->pParent, NV_TRUE)); + } + } + } + + return NV_OK; +} + +/*! + * @brief Notifies that prerequisite will be retracted. + * + * @param[in] pTracker PrereqTracker object + * @param[in] prereqId Prerequisite ID to add as requirement + * + * @return NV_OK Prerequisite successfully retracted & all callbacks passed. + * @return error Errors propagated up from functions called. + */ +NV_STATUS +prereqRetract_IMPL +( + PrereqTracker *pTracker, + PREREQ_ID prereqId +) +{ + PREREQ_ENTRY *pNode; + PrereqListIter it; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(pTracker->bInitialized, + NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN((prereqId < PREREQ_ID_VECTOR_SIZE), + NV_ERR_INVALID_REQUEST); + + // + // The prerequisite can be retracted even if it was not satisfied. This + // simplifies client code since it no longer need to track if prerequisite + // was satisfied (or not) and allows us avoiding isSatisfied() interface. + // + if (!bitVectorTest(&pTracker->satisfied, prereqId)) + return NV_OK; + + NV_ASSERT_OK_OR_RETURN(bitVectorClr(&pTracker->satisfied, prereqId)); + + it = listIterAll(&pTracker->prereqList); + while (listIterNext(&it)) + { + pNode = it.pValue; + + if (pNode->bArmed && + bitVectorTest(&pNode->requested, prereqId)) + { + if (PREREQ_IS_SATISFIED(pNode)) + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, pNode->callback(pTracker->pParent, NV_FALSE)); + } + + pNode->countSatisfied--; + if (pNode->countSatisfied < 0) + { + NV_ASSERT(0); + if (status == NV_OK) + { + status = NV_ERR_INVALID_STATE; + } + } + } + } + + return status; +} + +/*! + * @brief Indicates if a prerequisite ID is currently satisfied. + * + * @param[in] pTracker PrereqTracker object pointer + * @param[in] prereqId Prerequisite ID to check + * + * @return NV_TRUE Prerequisite ID is in the satisfied mask. + * NV_FALSE otherwise + */ +NvBool +prereqIdIsSatisfied_IMPL +( + PrereqTracker *pTracker, + PREREQ_ID prereqId +) +{ + NvBool bIsSatisfied; + + if ((pTracker->bInitialized) && + (prereqId < PREREQ_ID_VECTOR_SIZE)) + { + bIsSatisfied = bitVectorTest(&pTracker->satisfied, prereqId); + } + else + { + bIsSatisfied = NV_FALSE; + } + + return bIsSatisfied; +} + +/* ---------------------- Private Static Functions -------------------------- */ +/*! + * Helper function which determines whether a given PREREQ_ENTRY tracking + * structure is valid (i.e. is in the tracker's list at @ref + * PrereqTracker::prereqList). + * + * @param[in] pTracker PrereqTracker object pointer + * @param[in] pPrereq PREREQ_ENTRY object pointer + * + * @return NV_TRUE pPrereq is valid. + * @return NV_FALSE pPrereq is invalid. + */ +static NvBool +_prereqValid +( + PrereqTracker *pTracker, + PREREQ_ENTRY *pPrereq +) +{ + PrereqListIter it = listIterAll(&pTracker->prereqList); + while (listIterNext(&it)) + { + // pPrereq is valid if found in the list. + if (it.pValue == pPrereq) + return NV_TRUE; + } + + return NV_FALSE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_map.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_map.c new file mode 100644 index 0000000..2f53c04 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_map.c @@ -0,0 +1,717 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvstatus.h" +#include "nvtypes.h" + +#include "containers/map.h" +#include "resserv/resserv.h" +#include "resserv/rs_resource.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" +#include "resserv/rs_access_rights.h" +#include "resserv/rs_access_map.h" + +static NV_STATUS +_rsAccessGrantCallback +( + RsResourceRef *pResourceRef, + CALL_CONTEXT *pCallContext, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pParentRights, + void *pAllocParams, + RsAccessRight accessRight +); + +/*! + * @brief Checks which rights, if any, are being shared with the invoking client by a resource + * This is a static helper function for rsAccessGrantRights. + * + * @param[in] pResourceRef + * @param[in] pInvokingClient + * @param[in] pCallContext May be NULL + * @param[out] pRightsShared The set of access rights shared + * + * @return none + */ +static void +_rsAccessGetSharedRights +( + RsResourceRef *pResourceRef, + RsClient *pInvokingClient, + CALL_CONTEXT *pCallContext, + RS_ACCESS_MASK *pRightsShared +) +{ + RsShareList *pShareList; + RsShareListIter it; + + RsServer *pServer = NULL; + RsResourceRef *pParentRef = NULL; + + RS_ACCESS_MASK rightsGranted; + RS_ACCESS_MASK rightsDenied; + + portMemSet(&rightsGranted, 0, sizeof(RS_ACCESS_MASK)); + portMemSet(&rightsDenied, 0, sizeof(RS_ACCESS_MASK)); + + RS_ACCESS_MASK_CLEAR(pRightsShared); + + // No meaning to sharing rights with self, skip + if (pInvokingClient == pResourceRef->pClient) + return; + + if (pCallContext != NULL) + { + pServer = pCallContext->pServer; + pParentRef = pCallContext->pContextRef; + } + + pShareList = rsAccessGetActiveShareList(pResourceRef, pServer); + + if (pShareList != NULL) + { + it = listIterAll(pShareList); + while (listIterNext(&it)) + { + RS_SHARE_POLICY *pSharePolicy = it.pValue; + + if (resShareCallback(pResourceRef->pResource, pInvokingClient, pParentRef, pSharePolicy)) + { + // Allow policies give rights on success + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE)) + RS_ACCESS_MASK_UNION(&rightsGranted, &pSharePolicy->accessMask); + } + else + { + // Require policies reject rights on failure + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) + RS_ACCESS_MASK_UNION(&rightsDenied, &pSharePolicy->accessMask); + } + } + } + + if (pServer != NULL) + { + it = listIterAll(&pServer->globalInternalSharePolicyList); + while (listIterNext(&it)) + { + RS_SHARE_POLICY *pSharePolicy = it.pValue; + + if (resShareCallback(pResourceRef->pResource, pInvokingClient, pParentRef, pSharePolicy)) + { + // Allow policies give rights on success + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE)) + RS_ACCESS_MASK_UNION(&rightsGranted, &pSharePolicy->accessMask); + } + else + { + // Require policies reject rights on failure + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) + RS_ACCESS_MASK_UNION(&rightsDenied, &pSharePolicy->accessMask); + } + } + } + + RS_ACCESS_MASK_UNION(pRightsShared, &rightsGranted); + RS_ACCESS_MASK_SUBTRACT(pRightsShared, &rightsDenied); +} + +void rsAccessGetAvailableRights +( + RsResourceRef *pResourceRef, + RsClient *pClient, + RS_ACCESS_MASK *pAvailableRights +) +{ + RS_ACCESS_MASK *pTargetRights; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + RS_ACCESS_MASK_CLEAR(pAvailableRights); + + // Look up rights client has on target resource + pTargetRights = rsAccessLookup(pResourceRef, pClient); + if (pTargetRights != NULL) + { + // Client owns the resource, use those rights directly + portMemCopy(pAvailableRights, sizeof(*pAvailableRights), + pTargetRights, sizeof(*pTargetRights)); + } + else + { + // Client does not own the resource, add any rights shared with this client + _rsAccessGetSharedRights(pResourceRef, pClient, pCallContext, pAvailableRights); + } +} + +RS_ACCESS_MASK * +rsAccessLookup +( + RsResourceRef *pResourceRef, + RsClient *pClient +) +{ + if (pResourceRef->pClient == pClient) + return &pResourceRef->accessMask; + + return NULL; +} + +NV_STATUS +rsAccessCheckRights +( + RsResourceRef *pResourceRef, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsRequired +) +{ + RS_ACCESS_MASK ownedRights; + + NV_ASSERT_OR_RETURN(pRightsRequired != NULL, NV_ERR_INVALID_ARGUMENT); + + // Return if nothing to check + if (rsAccessMaskIsEmpty(pRightsRequired)) + return NV_OK; + + // Uncached access rights require executing the callback every time + rsAccessUpdateRights(pResourceRef, pInvokingClient, pRightsRequired); + + // Look up updated rights on target resource + rsAccessGetAvailableRights(pResourceRef, pInvokingClient, &ownedRights); + + // Check that rights are sufficient + if (!rsAccessMaskIsSubset(&ownedRights, pRightsRequired)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return NV_OK; +} + +void rsAccessUpdateRights +( + RsResourceRef *pResourceRef, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsToUpdate +) +{ + RS_ACCESS_MASK *pTargetRights; + RsAccessRight accessRight; + + // Look up rights on target resource + pTargetRights = rsAccessLookup(pResourceRef, pInvokingClient); + + // + // Nothing to update if the resource is not owned by the client + // (Uncached rights only have meaning for resources owned by the client) + // + if (pTargetRights == NULL) + return; + + // Update access rights owned by the client for any uncached rights + for (accessRight = 0; accessRight < RS_ACCESS_COUNT; accessRight++) + { + NV_STATUS status; + const RS_ACCESS_INFO *pAccessRightInfo = &g_rsAccessMetadata[accessRight]; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + if ((pRightsToUpdate != NULL && + !RS_ACCESS_MASK_TEST(pRightsToUpdate, accessRight)) || + !(pAccessRightInfo->flags & RS_ACCESS_FLAG_UNCACHED_CHECK)) + { + continue; + } + + status = _rsAccessGrantCallback(pResourceRef, pCallContext, pInvokingClient, NULL, NULL, accessRight); + + if (status != NV_OK) + { + RS_ACCESS_MASK_REMOVE(pTargetRights, accessRight); + } + else + { + RS_ACCESS_MASK_ADD(pTargetRights, accessRight); + } + } +} + +/*! + * @brief Checks whether two share policies are considered equal and can be merged + * + * This function uses the type and target of a share policy to determine whether + * two share policy entries would match the same clients, in which case they could + * be merged into one policy entry. + * + * @param[in] pSharePolicyA, pSharePolicyB the two policies to compare + * + * @return NV_TRUE if the two policies are equal, + * NV_FALSE otherwise + */ +static NvBool +rsSharePolicyEquals +( + const RS_SHARE_POLICY *pSharePolicyA, + const RS_SHARE_POLICY *pSharePolicyB +) +{ + if (pSharePolicyA == NULL || pSharePolicyB == NULL) + return NV_FALSE; + + if (pSharePolicyA->type != pSharePolicyB->type) + return NV_FALSE; + + if ((pSharePolicyA->action & RS_SHARE_ACTION_FLAG_REQUIRE) != + (pSharePolicyB->action & RS_SHARE_ACTION_FLAG_REQUIRE)) + { + return NV_FALSE; + } + + if (pSharePolicyA->type == RS_SHARE_TYPE_CLIENT) + { + return pSharePolicyA->target == pSharePolicyB->target; + } + + // Otherwise, ignore target entirely + return NV_TRUE; +} + +RS_SHARE_POLICY * +rsShareListLookup +( + RsShareList *pShareList, + RS_SHARE_POLICY *pSharePolicy +) +{ + RsShareListIter it; + + // + // Need to match a condition instead of just pValue, + // can't just use listLookup directly + // + it = listIterAll(pShareList); + while (listIterNext(&it)) + { + if (rsSharePolicyEquals(it.pValue, pSharePolicy)) + { + return it.pValue; + } + } + + return NULL; +} + +NV_STATUS +rsShareListInsert +( + RsShareList *pShareList, + RS_SHARE_POLICY *pSharePolicy, + RS_ACCESS_MASK *pAccessMask +) +{ + RS_ACCESS_MASK *pCurrentAccessMask; + RS_SHARE_POLICY *pCurrentPolicy; + RS_SHARE_POLICY *pNewPolicy; + + pCurrentPolicy = rsShareListLookup(pShareList, pSharePolicy); + if (pCurrentPolicy == NULL) + { + // Allocate and insert a share policy entry + pNewPolicy = listAppendValue(pShareList, pSharePolicy); + if (pNewPolicy == NULL) + { + return NV_ERR_NO_MEMORY; + } + + if (pAccessMask != NULL) + { + portMemCopy(pAccessMask, sizeof(*pAccessMask), + &pNewPolicy->accessMask, sizeof(pNewPolicy->accessMask)); + } + } + else + { + // Merge into existing share policy entry + pCurrentAccessMask = &pCurrentPolicy->accessMask; + RS_ACCESS_MASK_UNION(pCurrentAccessMask, &pSharePolicy->accessMask); + + if (pAccessMask != NULL) + { + portMemCopy(pAccessMask, sizeof(*pAccessMask), + pCurrentAccessMask, sizeof(*pCurrentAccessMask)); + } + } + + return NV_OK; +} + +void +rsShareListRemove +( + RsShareList *pShareList, + RS_SHARE_POLICY *pSharePolicy, + RS_ACCESS_MASK *pAccessMask +) +{ + RS_SHARE_POLICY *pCurrentPolicy; + RS_ACCESS_MASK *pCurrentAccessMask; + + pCurrentPolicy = rsShareListLookup(pShareList, pSharePolicy); + if (pCurrentPolicy != NULL) + { + // Revoke specified rights from found mask + pCurrentAccessMask = &pCurrentPolicy->accessMask; + RS_ACCESS_MASK_SUBTRACT(pCurrentAccessMask, &pSharePolicy->accessMask); + + // pCurrentAccessMask may not exist afterwards, so copy output first + if (pAccessMask != NULL) + { + portMemCopy(pAccessMask, sizeof(*pAccessMask), + pCurrentAccessMask, sizeof(*pCurrentAccessMask)); + } + + if (rsAccessMaskIsEmpty(pCurrentAccessMask)) + { + // No more rights shared under this policy, erase it from the list + listRemove(pShareList, pCurrentPolicy); + } + } + else + { + // No match, no rights to revoke, output empty mask + if (pAccessMask != NULL) + { + RS_ACCESS_MASK_CLEAR(pAccessMask); + } + } +} + +NV_STATUS +rsShareListCopy +( + RsShareList *pShareListDst, + RsShareList *pShareListSrc +) +{ + RsShareListIter it; + + if (pShareListSrc == NULL) + return NV_OK; + + it = listIterAll(pShareListSrc); + while (listIterNext(&it)) + { + if (NULL == listAppendValue(pShareListDst, it.pValue)) + return NV_ERR_NO_MEMORY; + } + + return NV_OK; +} + +RsShareList * +rsAccessGetActiveShareList +( + RsResourceRef *pResourceRef, + RsServer *pServer +) +{ + RsResourceRef *pSearchRef = pResourceRef; + + // Search up the tree for a resource with an edited share list + while (pSearchRef != NULL) + { + if (pSearchRef->bSharePolicyListModified) + return &pSearchRef->sharePolicyList; + + pSearchRef = pSearchRef->pParentRef; + } + + if (pServer != NULL) + return &pServer->defaultInheritedSharePolicyList; + + return NULL; +} + +/*! + * @brief Checks whether one access right can be granted on a resource + * + * This is a static helper function for rsAccessGrantRights. The pParentRights + * argument is not strictly necessary, but is used to avoid performing multiple + * identical lookups in a map. + * + * @param[in] pResourceRef + * @param[in] pCallContext + * @param[in] pInvokingClient + * @param[in] pParentRights The set of access rights held by the invoking client + * on the resource's parent + * @param[in] accessRight The access right to try to grant + * + * @return NV_OK if the access right can be granted, or an error otherwise + */ +static NV_STATUS +_rsAccessGrantCallback +( + RsResourceRef *pResourceRef, + CALL_CONTEXT *pCallContext, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pParentRights, + void *pAllocParams, + RsAccessRight accessRight +) +{ + const RS_ACCESS_INFO *pAccessRightInfo; + API_SECURITY_INFO *pSecInfo = NULL; + + NV_ASSERT_OR_RETURN(accessRight < RS_ACCESS_COUNT, NV_ERR_INVALID_ARGUMENT); + + pAccessRightInfo = &g_rsAccessMetadata[accessRight]; + + if (pCallContext != NULL) + { + pSecInfo = &pCallContext->secInfo; + } + else + { + NV_PRINTF(LEVEL_WARNING, "Called with NULL pCallContext, skipping permission checks\n"); + } + + // + // If the parent object has this access right, then we should be able to + // inherit it without doing any other checks + // + if ((pParentRights != NULL) && RS_ACCESS_MASK_TEST(pParentRights, accessRight)) + { + return NV_OK; + } + + if ((pSecInfo != NULL) && ((pAccessRightInfo->flags & RS_ACCESS_FLAG_ALLOW_PRIVILEGED) != 0)) + { + // Allow admin-privileged contexts + if (pSecInfo->privLevel >= RS_PRIV_LEVEL_USER_ROOT) + { + return NV_OK; + } + } + + if ((pSecInfo != NULL) && ((pAccessRightInfo->flags & RS_ACCESS_FLAG_ALLOW_KERNEL_PRIVILEGED) != 0)) + { + // Allow kernel-privileged contexts + if (pSecInfo->privLevel >= RS_PRIV_LEVEL_KERNEL) + { + return NV_OK; + } + } + + if ((pAccessRightInfo->flags & RS_ACCESS_FLAG_ALLOW_OWNER) != 0) + { + // Allow client this access right on itself + if (pResourceRef->hResource == pInvokingClient->hClient) + { + return NV_OK; + } + } + + // Finally, invoke the resource's access callback + if (resAccessCallback(pResourceRef->pResource, pInvokingClient, pAllocParams, accessRight)) + { + return NV_OK; + } + + // All attempts to grant access failed + return NV_ERR_INSUFFICIENT_PERMISSIONS; +} + + +/*! + * @brief Computes the list of access rights to attempt to grant on a resource + * + * This is a static helper function for rsAccessGrantRights. + * + * @param[in] pResourceRef + * @param[in] pInvokingClient + * @param[in] pRightsRequested The rights specified in the allocation parameters, + * or NULL if no access rights were explicitly requested + * @param[in] pRightsRequired Rights required for the allocation of this object + * to succeed, not used if rights were explicitly requested + * @param[out] pRightsToRequest The set of access rights that should be requested, + * based on input parameters provided + * + * @return NV_TRUE if access rights were explicitly requested, or + * NV_FALSE otherwise + */ +static NvBool +_rsAccessGetRightsToRequest +( + RsResourceRef *pResourceRef, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsRequested, + const RS_ACCESS_MASK *pRightsRequired, + RS_ACCESS_MASK *pRightsToRequest +) +{ + NvBool bExplicitlyRequested; + + NV_ASSERT(pRightsToRequest != NULL); + RS_ACCESS_MASK_CLEAR(pRightsToRequest); + + if (pRightsRequested != NULL) + { + // A set of access rights was explicitly requested + bExplicitlyRequested = NV_TRUE; + + portMemCopy(pRightsToRequest, sizeof(*pRightsToRequest), + pRightsRequested, sizeof(*pRightsRequested)); + } + else + { + // No rights were explicitly requested + bExplicitlyRequested = NV_FALSE; + + if (pResourceRef->pParentRef == NULL) + { + // Only client resources don't have a parent reference + // Try to request all access rights for new clients + RS_ACCESS_MASK_FILL(pRightsToRequest); + } + else + { + // Inherit access rights from parent reference + RS_ACCESS_MASK *pParentRights = rsAccessLookup(pResourceRef->pParentRef, pInvokingClient); + if (pParentRights != NULL) + { + portMemCopy(pRightsToRequest, sizeof(*pRightsToRequest), + pParentRights, sizeof(*pParentRights)); + } + + // Add any required rights as well + if (pRightsRequired != NULL) + { + RS_ACCESS_MASK_UNION(pRightsToRequest, pRightsRequired); + } + } + } + + return bExplicitlyRequested; +} + +NV_STATUS +rsAccessGrantRights +( + RsResourceRef *pResourceRef, + CALL_CONTEXT *pCallContext, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsRequested, + const RS_ACCESS_MASK *pRightsRequired, + void *pAllocParams +) +{ + NV_STATUS status; + NvBool bExplicitlyRequested; + RS_ACCESS_MASK rightsToRequest; + RS_ACCESS_MASK rightsShared; + RS_ACCESS_MASK *pResourceRights; + RS_ACCESS_MASK resourceRights; + RS_ACCESS_MASK *pParentRights = NULL; + RsAccessRight accessRight; + + // Determine which rights to request based on pRightsRequested + bExplicitlyRequested = _rsAccessGetRightsToRequest(pResourceRef, pInvokingClient, + pRightsRequested, pRightsRequired, + &rightsToRequest); + + // Return if nothing to grant + if (rsAccessMaskIsEmpty(&rightsToRequest)) + return NV_OK; + + // Find rights on the current resource + pResourceRights = rsAccessLookup(pResourceRef, pInvokingClient); + if (pResourceRights == NULL) + { + // + // When using grant for resources the client doesn't own, we don't modify the + // resource's mask, we only use a local mask to record which rights were available + // + RS_ACCESS_MASK_CLEAR(&resourceRights); + pResourceRights = &resourceRights; + } + + // Explicitly requesting to not get all required rights, cannot possibly succeed + if (bExplicitlyRequested && + (pRightsRequired != NULL) && + !rsAccessMaskIsSubset(&rightsToRequest, pRightsRequired)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // Get rights on the parent resource to cache for _rsAccessGrantCallback + if (pResourceRef->pParentRef != NULL) + { + pParentRights = rsAccessLookup(pResourceRef->pParentRef, pInvokingClient); + } + + // Get any rights shared with this client + _rsAccessGetSharedRights(pResourceRef, pInvokingClient, pCallContext, &rightsShared); + + // Grant each access right in rightsToRequest + for (accessRight = 0; accessRight < RS_ACCESS_COUNT; accessRight++) + { + if (!RS_ACCESS_MASK_TEST(&rightsToRequest, accessRight)) + continue; + + if (RS_ACCESS_MASK_TEST(&rightsShared, accessRight)) + { + status = NV_OK; + } + else + { + status = _rsAccessGrantCallback(pResourceRef, pCallContext, pInvokingClient, + pParentRights, pAllocParams, accessRight); + } + + if (status == NV_OK) + { + RS_ACCESS_MASK_ADD(pResourceRights, accessRight); + } + else + { + // + // The default behavior is to silently ignore failure to grant an access right, + // which happens when the requested access rights are not specified. + // + // In contrast, if access rights are explicitly requested (i.e. with + // the NvRmAllocWithAccess API), we return an error code when we fail to + // grant access rights. + // + if (bExplicitlyRequested) + return status; + } + } + + // Fail if could not get all required rights + if ((pRightsRequired != NULL) && + !rsAccessMaskIsSubset(pResourceRights, pRightsRequired)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_rights.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_rights.c new file mode 100644 index 0000000..b8e2f83 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_rights.c @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvstatus.h" +#include "nvtypes.h" +#include "utils/nvassert.h" +#include "nvctassert.h" + +#include "resserv/rs_access_rights.h" + + +// Ensure the number of declared access rights is within the capacity +// provided by the number of limbs used. +// Also, NVOC acces_right is NvU32 currently. It requires NVOC change to support 32+ bits +ct_assert(RS_ACCESS_COUNT <= SDK_RS_ACCESS_MAX_COUNT); + + +#if !(RS_STANDALONE_TEST) +const RS_ACCESS_INFO g_rsAccessMetadata[RS_ACCESS_COUNT] = +{ + // RS_ACCESS_DUP_OBJECT + { + RS_ACCESS_FLAG_ALLOW_OWNER + }, + + // RS_ACCESS_NICE + { + RS_ACCESS_FLAG_ALLOW_PRIVILEGED | RS_ACCESS_FLAG_UNCACHED_CHECK + }, + + // RS_ACCESS_DEBUG + { + RS_ACCESS_FLAG_ALLOW_OWNER + }, +}; +#endif /* RS_STANDALONE_TEST */ + + +NvBool +rsAccessMaskIsSubset +( + const RS_ACCESS_MASK *pRightsPresent, + const RS_ACCESS_MASK *pRightsRequired +) +{ + RsAccessRight accessRight; + + for (accessRight = 0; accessRight < RS_ACCESS_COUNT; accessRight++) + { + if (RS_ACCESS_MASK_TEST(pRightsRequired, accessRight) && + !RS_ACCESS_MASK_TEST(pRightsPresent, accessRight)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + + +NvBool +rsAccessMaskIsEmpty +( + const RS_ACCESS_MASK *pAccessMask +) +{ + RsAccessRight accessRight; + + for (accessRight = 0; accessRight < RS_ACCESS_COUNT; accessRight++) + { + if (RS_ACCESS_MASK_TEST(pAccessMask, accessRight)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + + +void +rsAccessMaskFromArray +( + RS_ACCESS_MASK *pAccessMask, + const RsAccessRight *pRightsArray, + NvLength length +) +{ + NvLength i; + + RS_ACCESS_MASK_CLEAR(pAccessMask); + + NV_ASSERT_OR_RETURN_VOID(pRightsArray != NULL); + + for (i = 0; i < length; i++) + { + RS_ACCESS_MASK_ADD(pAccessMask, pRightsArray[i]); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_client.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_client.c new file mode 100644 index 0000000..2ad40a4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_client.c @@ -0,0 +1,1741 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvlog_inc.h" +#include "resserv/resserv.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" + +#if !(RS_STANDALONE) +#include "os/os.h" +#include "resserv/rs_access_map.h" +#endif + +typedef enum +{ + ALLOC_NEW_RESOURCE, + ALLOC_SHARED_RESOURCE +} ALLOC_TYPE; + +/** + * Allocate a new or shared resource in RM for this client + * @param[in] pClient This client + * @param[in] pServer The resource server instance + * @param[in] pParams Parameters for the resource allocation + * @param[in,out] phResource Server will assign a handle if it is 0 + */ +static NV_STATUS _clientAllocResourceHelper(RsClient *pClient, RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvHandle *phResource); + +/** + * Add a resource reference to the client's resource hashmap + * @param[in] pClient This client + * @param[in] pServer The resource server that owns the resource ref + * @param[in] pParentRef The resource's parent reference + * @param[in] hResource The resource's handle + * @param[in] classId The resource's class + * @param[out] ppResourceRef The new resource reference + */ +static NV_STATUS _clientConstructResourceRef(RsClient *pClient, RsServer *pServer, RsResourceRef *pParentRef, + NvHandle hResource, NvU32 classId, RsResourceRef **ppResourceRef); + +/** + * Release all CPU address mappings for a resource + * + * @param[in] pClient Client that owns the resource + * @param[in] pCallContext Caller information (which includes the resource reference whose mappings will be freed) + * @param[in] pLockInfo Information about which locks are already held, for recursive calls + */ +static NV_STATUS _clientUnmapResourceRefMappings(RsClient *pClient, CALL_CONTEXT *pCallContext, RS_LOCK_INFO *pLockInfo); + +/** + * Release all CPU address mappings that reference this resource + * + * @param[in] pClient Client that owns the resource + * @param[in] pCallContext Caller information (which includes the resource reference + * whose mapping back references will be freed) + * @param[in] pLockInfo Information about which locks are already held, for recursive calls + */ +static NV_STATUS _clientUnmapBackRefMappings(RsClient *pClient, CALL_CONTEXT *pCallContext, RS_LOCK_INFO *pLockInfo); + +static void _clientUnmapInterMappings(RsClient *pClient, CALL_CONTEXT *pCallContext, RS_LOCK_INFO *pLockInfo); +static void _clientUnmapInterBackRefMappings(RsClient *pClient, CALL_CONTEXT *pCallContext, RS_LOCK_INFO *pLockInfo); + +NV_STATUS +clientConstruct_IMPL +( + RsClient *pClient, + PORT_MEM_ALLOCATOR *pAllocator, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + CLIENT_TYPE type; + + if (pParams->pSecInfo->privLevel >= RS_PRIV_LEVEL_KERNEL) + type = CLIENT_TYPE_KERNEL; + else + type = CLIENT_TYPE_USER; + + pClient->type = type; + pClient->hClient = pParams->hClient; + + mapInit(&pClient->resourceMap, pAllocator); + listInitIntrusive(&pClient->pendingFreeList); + + listInit(&pClient->accessBackRefList, pAllocator); + + pClient->handleGenIdx = 0; + status = clientSetHandleGenerator(pClient, 0, 0); + if (status != NV_OK) + return status; + + pClient->bActive = NV_TRUE; + + status = clientSetRestrictedRange(pClient, 0, 0); + if (status != NV_OK) + return status; + + return NV_OK; +} + +NV_STATUS +clientSetHandleGenerator_IMPL +( + RsClient *pClient, + NvHandle handleRangeStart, + NvHandle handleRangeSize +) +{ + // + // on vGPU, when client uses RM allocated handles, post allocation of rmclient NV01_ROOT, + // NV01_DEVICE_0 is allocated which increment the handleGenIdx to 0x1. + // In order to avoid the handle clash, we split the default RM handle ranges between Guest RM + // (0xcaf00000, 0xcaf3ffff) and host RM (0xcaf40000, 0xcaf80000). + // Hence, we should take this overriding into consideration when the ranges over the default + // RM handle ranges. + // + NvBool bShrinkUnusedRange = ((pClient->handleRangeStart == handleRangeStart) && + (pClient->handleGenIdx <= handleRangeSize)); + + if (!((pClient->handleGenIdx == 0) || bShrinkUnusedRange)) + { + return NV_ERR_INVALID_STATE; + } + + if ((handleRangeStart == 0) && (handleRangeSize == 0)) + { + pClient->handleRangeStart = RS_UNIQUE_HANDLE_BASE; + pClient->handleRangeSize = RS_UNIQUE_HANDLE_RANGE; + } + else if ((handleRangeStart != 0) && (handleRangeSize != 0)) + { + pClient->handleRangeStart = handleRangeStart; + pClient->handleRangeSize = handleRangeSize; + } + else + { + return NV_ERR_INVALID_PARAMETER; + } + + return NV_OK; +} + +NV_STATUS clientCanShareResource_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, + CALL_CONTEXT *pCallContext +) +{ + NV_STATUS status = NV_OK; + + RS_ACCESS_MASK rightsNeeded; + RS_ACCESS_MASK *pRightsHeld; + + // + // If sharing, check that the client has the rights it is trying to share + // Revoking does not require this to allow revoking all rights without checking + // + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_REVOKE)) + { + status = rsAccessCheckRights(pResourceRef, pClient, &pSharePolicy->accessMask); + if (status == NV_ERR_INSUFFICIENT_PERMISSIONS) + { + // Attempt to grant rights which aren't already owned + portMemCopy(&rightsNeeded, sizeof(rightsNeeded), + &pSharePolicy->accessMask, sizeof(pSharePolicy->accessMask)); + + pRightsHeld = rsAccessLookup(pResourceRef, pClient); + if (pRightsHeld != NULL) + { + // Skip trying to grant rights already held + RS_ACCESS_MASK_SUBTRACT(&rightsNeeded, pRightsHeld); + } + + status = rsAccessGrantRights(pResourceRef, pCallContext, pClient, + &rightsNeeded, // pRightsRequested + NULL, // pRightsRequired + NULL); // pAllocParams + } + } + + return status; +} + +NV_STATUS +clientShareResource_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, + CALL_CONTEXT *pCallContext +) +{ + RsServer *pServer = NULL; + RsShareList *pActiveList; + NV_STATUS status; + + status = clientCanShareResource(pClient, pResourceRef, pSharePolicy, pCallContext); + if (status != NV_OK) + return status; + + if (!pResourceRef->bSharePolicyListModified) + { + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_COMPOSE) + { + if (pCallContext != NULL) + pServer = pCallContext->pServer; + + pActiveList = rsAccessGetActiveShareList(pResourceRef, pServer); + status = rsShareListCopy(&pResourceRef->sharePolicyList, pActiveList); + if (status != NV_OK) + return status; + } + + pResourceRef->bSharePolicyListModified = NV_TRUE; + } + + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_COMPOSE)) + { + listClear(&pResourceRef->sharePolicyList); + } + + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REVOKE) + { + rsShareListRemove(&pResourceRef->sharePolicyList, pSharePolicy, NULL); + } + else + { + status = rsShareListInsert(&pResourceRef->sharePolicyList, pSharePolicy, NULL); + } + + return status; +} + +NV_STATUS +clientShareResourceTargetClient_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, + CALL_CONTEXT *pCallContext +) +{ + NV_STATUS status; + RS_ACCESS_MASK *pCurrentRights; + + // Special case: This should only be called when share policy is for own client + NV_ASSERT(pSharePolicy->type == RS_SHARE_TYPE_CLIENT); + NV_ASSERT(pSharePolicy->target == pClient->hClient); + + status = clientCanShareResource(pClient, pResourceRef, pSharePolicy, pCallContext); + if (status != NV_OK) + return status; + + pCurrentRights = rsAccessLookup(pResourceRef, pClient); + + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REVOKE) + { + RS_ACCESS_MASK_SUBTRACT(pCurrentRights, &pSharePolicy->accessMask); + } + else + { + RS_ACCESS_MASK_UNION(pCurrentRights, &pSharePolicy->accessMask); + } + + return NV_OK; +} + +NV_STATUS +clientSetRestrictedRange_IMPL +( + RsClient *pClient, + NvHandle handleRangeStart, + NvU32 handleRangeSize +) +{ + NvHandle hFirst = handleRangeStart; + NvHandle hLast; + + // Only allow modification if we haven't generated any handles + if (pClient->handleGenIdx != 0) + return NV_ERR_INVALID_STATE; + + if (handleRangeSize == 0) + { + if (handleRangeStart != 0) + return NV_ERR_INVALID_PARAMETER; + + pClient->handleRestrictRange = NV_RANGE_EMPTY; + return NV_OK; + } + + // Wrapping-around the reserved range is not supported + if (!portSafeAddU32(hFirst, handleRangeSize-1, &hLast)) + return NV_ERR_INVALID_REQUEST; + + pClient->handleRestrictRange = rangeMake(hFirst, hLast); + + return NV_OK; +} + +void clientDestruct_IMPL +( + RsClient *pClient +) +{ + NV_ASSERT(mapCount(&pClient->resourceMap) == 0); + mapDestroy(&pClient->resourceMap); + + NV_ASSERT(listCount(&pClient->accessBackRefList) == 0); + listDestroy(&pClient->accessBackRefList); +} + +NV_STATUS +clientGetResource_IMPL +( + RsClient *pClient, + NvHandle hResource, + NvU32 internalClassId, + RsResource **ppResource +) +{ + NV_STATUS status = NV_OK; + RsResourceRef *pResourceRef; + RsResource *pResource; + + pResourceRef = mapFind(&pClient->resourceMap, hResource); + if (pResourceRef == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + pResource = NULL; + goto done; + } + + if (pResourceRef->internalClassId != internalClassId) + { + status = NV_ERR_INVALID_CLASS; + pResource = NULL; + goto done; + } + + pResource = pResourceRef->pResource; + +done: + if (ppResource != NULL) + *ppResource = pResource; + + return status; +} + +NV_STATUS +clientGetResourceByRef_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RsResource **ppResource +) +{ + if (ppResource != NULL) + *ppResource = pResourceRef->pResource; + + return NV_OK; +} + +NV_STATUS +clientGetResourceRef_IMPL +( + RsClient *pClient, + NvHandle hResource, + RsResourceRef **ppResourceRef +) +{ + RsResourceRef *pResourceRef; + + pResourceRef = mapFind(&pClient->resourceMap, hResource); + if (pResourceRef == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; +} + +NV_STATUS +clientGetResourceRefWithAccess_IMPL +( + RsClient *pClient, + NvHandle hResource, + const RS_ACCESS_MASK *pRightsRequired, + RsResourceRef **ppResourceRef +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + return status; + + status = rsAccessCheckRights(pResourceRef, pClient, pRightsRequired); + if (status != NV_OK) + return status; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; +} + +NV_STATUS +clientGetResourceRefByType_IMPL +( + RsClient *pClient, + NvHandle hResource, + NvU32 internalClassId, + RsResourceRef **ppResourceRef +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + return status; + + if (pResourceRef->internalClassId != internalClassId) + return NV_ERR_INVALID_OBJECT_HANDLE; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; +} + +NV_STATUS +clientValidate_IMPL +( + RsClient *pClient, + const API_SECURITY_INFO *pSecInfo +) +{ + return NV_OK; +} + +NV_STATUS +clientAllocResource_IMPL +( + RsClient *pClient, + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return _clientAllocResourceHelper(pClient, pServer, pParams, &pParams->hResource); +} + +NV_STATUS +clientCopyResource_IMPL +( + RsClient *pClient, + RsServer *pServer, + RS_RES_DUP_PARAMS_INTERNAL *pParams +) +{ + RS_RES_ALLOC_PARAMS_INTERNAL params; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + + RsClient *pClientDst = NULL; + RsResourceRef *pParentRef = NULL; + + NV_STATUS status; + + status = serverGetClientUnderLock(pServer, pParams->hClientDst, &pClientDst); + if (status != NV_OK) + return status; + + status = clientGetResourceRef(pClientDst, pParams->hParentDst, &pParentRef); + if (status != NV_OK) + return status; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pParams->pSrcRef; + callContext.pContextRef = pParentRef; + callContext.secInfo = *pParams->pSecInfo; + callContext.pLockInfo = pParams->pLockInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + + // + // Kernel clients are allowed to dup anything, unless they request otherwise. + // Also, if access rights are disabled, owner client should still be able to dup. + // For anything else, check that the client has dup access on the object + // + if (((pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL) || + (pParams->flags & NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE)) && + (pServer->bRsAccessEnabled || (pParams->pSrcClient->hClient != pClient->hClient))) + { + RS_ACCESS_MASK rightsRequired; + + portMemSet(&rightsRequired, 0, sizeof(rightsRequired)); + RS_ACCESS_MASK_ADD(&rightsRequired, RS_ACCESS_DUP_OBJECT); + + status = rsAccessCheckRights(pParams->pSrcRef, pClient, &rightsRequired); + } + else + { + // Server's globalInternalSharePolicyList applies Require policies even to kernel + RsShareListIter it = listIterAll(&pServer->globalInternalSharePolicyList); + while (listIterNext(&it)) + { + RS_SHARE_POLICY *pSharePolicy = it.pValue; + + // We only care about failing Require policies which apply to Dup, ignore everything else + if ((pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) && + RS_ACCESS_MASK_TEST(&pSharePolicy->accessMask, RS_ACCESS_DUP_OBJECT) && + !resShareCallback(pParams->pSrcRef->pResource, pClient, pParentRef, pSharePolicy)) + { + status = NV_ERR_INVALID_REQUEST; + break; + } + } + } + + resservRestoreTlsCallContext(pOldContext); + + if (status != NV_OK) + return status; + + portMemSet(¶ms, 0, sizeof(params)); + + params.hClient = pClient->hClient; + params.hParent = pParams->hParentDst; + params.hResource = pParams->hResourceDst; + params.externalClassId = pParams->pSrcRef->externalClassId; + params.pSecInfo = pParams->pSecInfo; + + params.pSrcClient = pParams->pSrcClient; + params.pSrcRef = pParams->pSrcRef; + params.pAllocParams = pParams->pShareParams; + params.pLockInfo = pParams->pLockInfo; + params.allocFlags = pParams->flags; + + return _clientAllocResourceHelper(pClient, pServer, ¶ms, &pParams->hResourceDst); +} + +static +NV_STATUS +_clientAllocResourceHelper +( + RsClient *pClient, + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvHandle *phResource +) +{ + NV_STATUS status; + NvHandle hResource = *phResource; + NvU32 depth = 0; + RsResource *pResource = NULL; + RsResourceRef *pParentRef = NULL; + RsResourceRef *pResourceRef = NULL; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + NvHandle hParent = pParams->hParent; + + status = clientGetResourceRef(pClient, hParent, &pParentRef); + if (status != NV_OK && hParent != pClient->hClient && hParent != 0) + return status; + + status = _clientConstructResourceRef(pClient, pServer, pParentRef, hResource, pParams->externalClassId, &pResourceRef); + if (status != NV_OK) + goto fail; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pContextRef = pParams->pSrcRef; + callContext.pLockInfo = pParams->pLockInfo; + + if (pParams->pSecInfo == NULL) + { + status = NV_ERR_INVALID_ARGUMENT; + goto fail; + } + callContext.secInfo = *pParams->pSecInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + status = resservResourceFactory(pServer->pAllocator, &callContext, pParams, &pResource); + resservRestoreTlsCallContext(pOldContext); + + if (status != NV_OK) + goto fail; + + // Clear free params implicitly set by constructor + resSetFreeParams(pResource, NULL, NULL); + pParams->pResourceRef = pResourceRef; + + // + // resConstruct_IMPL sets these fields but we need to set them again until + // Bug 2527351 is fixed + // + pResourceRef->pResource = pResource; + pResource->pResourceRef = pResourceRef; + + if (pParentRef != NULL) + { + depth = pParentRef->depth + 1; + pResourceRef->depth = depth; + + // Allow one additional level of depth to offset the depth used up by the RsClientResource at the root + // of the object hierarchy + if (RS_MAX_RESOURCE_DEPTH + 1 <= depth) + { + status = NV_ERR_ILLEGAL_ACTION; + goto fail; + } + + // Add this ref to the parent's child map + if (NV_OK != indexAdd(&pParentRef->childRefMap, pResourceRef->internalClassId, pResourceRef)) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto fail; + } + } + + if (pServer->bRsAccessEnabled) + { + status = rsAccessGrantRights(pResourceRef, &callContext, pClient, + pParams->pRightsRequested, + pParams->pRightsRequired, + pParams->pAllocParams); + if (status != NV_OK) + goto fail; + } + + *phResource = hResource; + + return NV_OK; + +fail: + if (pResource != NULL) + { + RS_RES_FREE_PARAMS_INTERNAL params; + pOldContext = NULL; + + // First undo dependency tracking since it might access the resource + if (pResourceRef->pDependantSession != NULL) + sessionRemoveDependency(pResourceRef->pDependantSession, pResourceRef); + + if (pResourceRef->pSession != NULL) + sessionRemoveDependant(pResourceRef->pSession, pResourceRef); + + portMemSet(¶ms, 0, sizeof(params)); + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.secInfo = *pParams->pSecInfo; + callContext.pResourceRef = pResourceRef; + callContext.pLockInfo = pParams->pLockInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + resSetFreeParams(pResource, &callContext, ¶ms); + + objDelete(pResource); + resservRestoreTlsCallContext(pOldContext); + } + + if (pResourceRef != NULL) + { + if (pParentRef != NULL) + { + indexRemove(&pParentRef->childRefMap, pResourceRef->internalClassId, pResourceRef); + } + + clientDestructResourceRef(pClient, pServer, pResourceRef); + } + + return status; +} + +static +NV_STATUS +_refCleanupDependencies +( + RsResourceRef *pResourceRef +) +{ + RsResourceRef **ppIndepRef; + while (NULL != (ppIndepRef = multimapFirstItem(&pResourceRef->depBackRefMap))) + { + refRemoveDependant(*ppIndepRef, pResourceRef); + } + + return NV_OK; +} + +static +NV_STATUS +_refCleanupDependants +( + RsResourceRef *pResourceRef +) +{ + RsResourceRef **ppDepRef; + while (NULL != (ppDepRef = multimapFirstItem(&pResourceRef->depRefMap))) + { + refRemoveDependant(pResourceRef, *ppDepRef); + } + + return NV_OK; +} + +NV_STATUS +clientFreeResource_IMPL +( + RsClient *pClient, + RsServer *pServer, + RS_RES_FREE_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + NV_STATUS tmpStatus; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + RsResourceRef *pClientRef = NULL; + RsResourceRef *pParentRef = NULL; + RsResourceRef *pResourceRef; + RsResource *pResource; + + pResourceRef = mapFind(&pClient->resourceMap, pParams->hResource); + if (pResourceRef == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + if (refPendingFree(pResourceRef, pClient)) + listRemove(&pClient->pendingFreeList, pResourceRef); + + pResource = pResourceRef->pResource; + pParentRef = pResourceRef->pParentRef; + + if (!pParams->bInvalidateOnly && pResourceRef->bInvalidated) + goto done; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pServer = pServer; + callContext.pLockInfo = pParams->pLockInfo; + + // Some MODS tests don't set secInfo. + if (pParams->pSecInfo != NULL) + callContext.secInfo = *pParams->pSecInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + resSetFreeParams(pResource, &callContext, pParams); + + resPreDestruct(pResource); + + // Remove all CPU mappings + _clientUnmapResourceRefMappings(pClient, &callContext, pParams->pLockInfo); + _clientUnmapBackRefMappings(pClient, &callContext, pParams->pLockInfo); + + // Remove all inter-mappings + _clientUnmapInterMappings(pClient, &callContext, pParams->pLockInfo); + _clientUnmapInterBackRefMappings(pClient, &callContext, pParams->pLockInfo); + + // Remove this resource as a dependency from other resources + pResourceRef->bInvalidated = NV_TRUE; + _refCleanupDependencies(pResourceRef); + + if (pResourceRef->pDependantSession != NULL) + sessionRemoveDependency(pResourceRef->pDependantSession, pResourceRef); + + if (pResourceRef->pSession != NULL) + sessionRemoveDependant(pResourceRef->pSession, pResourceRef); + + status = serverFreeResourceRpcUnderLock(pServer, pParams); + NV_ASSERT(status == NV_OK); + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Freeing hResource: %x\n", + // pClient->hClient, pResourceRef->hResource); + + objDelete(pResource); + + pResourceRef->pResource = NULL; + + resservRestoreTlsCallContext(pOldContext); + +done: + if (!pParams->bInvalidateOnly) + { + // Remove this ref from its parent's child ref list + if (pParentRef != NULL) + { + multimapRemoveItemByKey(&pParentRef->childRefMap, + pResourceRef->internalClassId, pResourceRef->hResource); + } + + pClientRef = mapFind(&pClient->resourceMap, pClient->hClient); + if (pClientRef != NULL) + refUncacheRef(pClientRef, pResourceRef); + + tmpStatus = clientDestructResourceRef(pClient, pServer, pResourceRef); + NV_ASSERT(tmpStatus == NV_OK); + } + + return status; +} + +NV_STATUS +clientUnmapMemory_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RS_LOCK_INFO *pLockInfo, + RsCpuMapping **ppCpuMapping, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + RsCpuMapping *pCpuMapping = *ppCpuMapping; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pLockInfo = pLockInfo; + + // Some MODS tests don't set secInfo. + if (pSecInfo != NULL) + callContext.secInfo = *pSecInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + status = resUnmap(pResourceRef->pResource, &callContext, pCpuMapping); + resservRestoreTlsCallContext(pOldContext); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "hClient %x: Failed to unmap cpu mapping: hResource: %x error: 0x%x\n", + pClient->hClient, + pResourceRef->hResource, + status); + + if (pCpuMapping != NULL) + { + NV_PRINTF(LEVEL_ERROR, "hContext: %x\n", + (pCpuMapping->pContextRef == NULL) ? 0 : pCpuMapping->pContextRef->hResource); + } + } + + refRemoveMapping(pResourceRef, pCpuMapping); + *ppCpuMapping = NULL; + + return status; +} + +NV_STATUS +clientInterMap_IMPL +( + RsClient *pClient, + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RS_INTER_MAP_PARAMS *pParams +) +{ + return NV_ERR_INVALID_CLIENT; +} + +void +clientInterUnmap_IMPL +( + RsClient *pClient, + RsResourceRef *pMapperRef, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + return; +} + +NV_STATUS +clientGenResourceHandle_IMPL +( + RsClient *pClient, + NvHandle *pHandle +) +{ + NvHandle hFirst; + NvHandle hResource; + NV_STATUS status; + + NV_ASSERT(pClient->handleRangeStart != 0); + NV_ASSERT(pClient->handleRangeSize != 0); + + hResource = pClient->handleRangeStart + ((pClient->handleGenIdx++) % pClient->handleRangeSize); + status = clientValidateNewResourceHandle(pClient, hResource, NV_FALSE); + if (status == NV_OK) + { + goto done; + } + + hFirst = hResource; + do + { + hResource = pClient->handleRangeStart + ((pClient->handleGenIdx++) % pClient->handleRangeSize); + status = clientValidateNewResourceHandle(pClient, hResource, NV_FALSE); + } while(hResource != hFirst && status != NV_OK); + + if (status != NV_OK) + return NV_ERR_INSUFFICIENT_RESOURCES; + +done: + NV_ASSERT(hResource - pClient->handleRangeStart < pClient->handleRangeSize); + + *pHandle = hResource; + return NV_OK; +} + +NV_STATUS +clientAssignResourceHandle_IMPL +( + RsClient *pClient, + NvHandle *phResource +) +{ + NV_STATUS status; + + if (phResource == NULL) + return NV_ERR_INVALID_ARGUMENT; + + if (*phResource == 0) + { + status = clientGenResourceHandle(pClient, phResource); + } + else + { + status = clientValidateNewResourceHandle(pClient, *phResource, NV_TRUE); + } + + return status; + +} + +static +NV_STATUS +_clientConstructResourceRef +( + RsClient *pClient, + RsServer *pServer, + RsResourceRef *pParentRef, + NvHandle hResource, + NvU32 externalClassId, + RsResourceRef **ppResourceRef +) +{ + PORT_MEM_ALLOCATOR *pAllocator = pServer->pAllocator; + RsResourceRef *pResourceRef = mapInsertNew(&pClient->resourceMap, hResource); + if (pResourceRef == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + if (!pClient->bResourceWarning && (mapCount(&pClient->resourceMap) >= RS_CLIENT_RESOURCE_WARNING_THRESHOLD)) + { + NV_PRINTF(LEVEL_WARNING, "Client 0x%08x has allocated a large number of resources. [Current classid: 0x%04x]\n", pClient->hClient, externalClassId); + NV_PRINTF(LEVEL_WARNING, "The client may be leaking resources. This warning can be ignored if the allocations were intentional.\n"); + pClient->bResourceWarning = NV_TRUE; + } + + pResourceRef->pClient = pClient; + pResourceRef->pResourceDesc = RsResInfoByExternalClassId(externalClassId); + pResourceRef->externalClassId = externalClassId; + pResourceRef->internalClassId = RsResInfoGetInternalClassId(pResourceRef->pResourceDesc); + pResourceRef->pResource = NULL; + pResourceRef->pParentRef = pParentRef; + pResourceRef->hResource = hResource; + pResourceRef->depth = 0; + + multimapInit(&pResourceRef->childRefMap, pAllocator); + multimapInit(&pResourceRef->cachedRefMap, pAllocator); + multimapInit(&pResourceRef->depRefMap, pAllocator); + multimapInit(&pResourceRef->depBackRefMap, pAllocator); + listInit(&pResourceRef->cpuMappings, pAllocator); + listInit(&pResourceRef->backRefs, pAllocator); + listInit(&pResourceRef->interMappings, pAllocator); + listInit(&pResourceRef->interBackRefs, pAllocator); + listInit(&pResourceRef->sharePolicyList, pAllocator); + + portAtomicExIncrementU64(&pServer->activeResourceCount); + + *ppResourceRef = pResourceRef; + return NV_OK; +} + +NV_STATUS +clientDestructResourceRef_IMPL +( + RsClient *pClient, + RsServer *pServer, + RsResourceRef *pResourceRef +) +{ + NV_ASSERT(pResourceRef != NULL); + NV_ASSERT(listCount(&pResourceRef->backRefs) == 0); + NV_ASSERT(listCount(&pResourceRef->cpuMappings) == 0); + NV_ASSERT(listCount(&pResourceRef->interBackRefs) == 0); + NV_ASSERT(listCount(&pResourceRef->interMappings) == 0); + + listDestroy(&pResourceRef->backRefs); + listDestroy(&pResourceRef->cpuMappings); + listDestroy(&pResourceRef->interBackRefs); + listDestroy(&pResourceRef->interMappings); + listDestroy(&pResourceRef->sharePolicyList); + + // All children should be free + NV_ASSERT(0 == multimapCountItems(&pResourceRef->childRefMap)); + multimapDestroy(&pResourceRef->childRefMap); + + // Nothing should be cached + NV_ASSERT(0 == multimapCountItems(&pResourceRef->cachedRefMap)); + multimapDestroy(&pResourceRef->cachedRefMap); + + _refCleanupDependencies(pResourceRef); + multimapDestroy(&pResourceRef->depBackRefMap); + + _refCleanupDependants(pResourceRef); + multimapDestroy(&pResourceRef->depRefMap); + + mapRemove(&pClient->resourceMap, pResourceRef); + + portAtomicExDecrementU64(&pServer->activeResourceCount); + + return NV_OK; +} + +NV_STATUS +_clientUnmapResourceRefMappings +( + RsClient *pClient, + CALL_CONTEXT *pCallContext, + RS_LOCK_INFO *pLockInfo +) +{ + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsCpuMapping *pCpuMapping; + NV_STATUS status; + RS_LOCK_INFO lockInfo; + RS_CPU_UNMAP_PARAMS params; + + pCpuMapping = listHead(&pResourceRef->cpuMappings); + while(pCpuMapping != NULL) + { + portMemSet(¶ms, 0, sizeof(params)); + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + params.hClient = pClient->hClient; + params.hDevice = (pCpuMapping->pContextRef == NULL) + ? pClient->hClient + : pCpuMapping->pContextRef->hResource; + params.hMemory = pResourceRef->hResource; + params.pLinearAddress = pCpuMapping->pLinearAddress; + params.processId = pCpuMapping->processId; + params.flags = pCpuMapping->flags; + params.pSecInfo = &pCallContext->secInfo; + params.pLockInfo = &lockInfo; + lockInfo.pClient = pLockInfo->pClient; + lockInfo.state = pLockInfo->state; + + // TODO: temp WAR for bug 2840284: deadlock during recursive free operation + lockInfo.flags |= RS_LOCK_FLAGS_NO_CLIENT_LOCK; + + status = serverUnmap(pCallContext->pServer, params.hClient, params.hMemory, ¶ms); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to auto-unmap (status=0x%x) hClient %x: hResource: %x\n", + status, pClient->hClient, pResourceRef->hResource); + NV_PRINTF(LEVEL_ERROR, "hContext: %x at addr " NvP64_fmt "\n", + params.hDevice, params.pLinearAddress); + + if (pCpuMapping == listHead(&pResourceRef->cpuMappings)) + { +#if !(RS_STANDALONE_TEST) + NV_ASSERT(0); +#endif + refRemoveMapping(pResourceRef, pCpuMapping); + } + } + pCpuMapping = listHead(&pResourceRef->cpuMappings); + } + + return NV_OK; +} + +NV_STATUS +_clientUnmapBackRefMappings +( + RsClient *pClient, + CALL_CONTEXT *pCallContext, + RS_LOCK_INFO *pLockInfo +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RS_CPU_MAPPING_BACK_REF *pBackRefItem; + RS_LOCK_INFO lockInfo; + RS_CPU_UNMAP_PARAMS params; + + pBackRefItem = listHead(&pResourceRef->backRefs); + while(pBackRefItem != NULL) + { + RsCpuMapping *pCpuMapping = pBackRefItem->pCpuMapping; + RsResourceRef *pBackRef = pBackRefItem->pBackRef; + + portMemSet(¶ms, 0, sizeof(params)); + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + params.hClient = pClient->hClient; + params.hDevice = (pCpuMapping->pContextRef == NULL) + ? pClient->hClient + : pCpuMapping->pContextRef->hResource; + params.hMemory = pBackRef->hResource; + params.pLinearAddress = pCpuMapping->pLinearAddress; + params.processId = pCpuMapping->processId; + params.flags = pCpuMapping->flags; + params.pSecInfo = &pCallContext->secInfo; + params.pLockInfo = &lockInfo; + + lockInfo.pClient = pLockInfo->pClient; + lockInfo.state = pLockInfo->state; + + status = serverUnmap(pCallContext->pServer, pClient->hClient, pBackRef->hResource, ¶ms); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to auto-unmap backref (status=0x%x) hClient %x: hResource: %x\n", + status, pClient->hClient, pBackRef->hResource); + NV_PRINTF(LEVEL_ERROR, "hContext: %x at addr " NvP64_fmt "\n", + params.hDevice, params.pLinearAddress); + + if (pBackRefItem == listHead(&pResourceRef->backRefs)) + { + NV_ASSERT(0); + listRemove(&pResourceRef->backRefs, pBackRefItem); + } + } + + pBackRefItem = listHead(&pResourceRef->backRefs); + } + + return NV_OK; +} + +static NV_STATUS +_unmapInterMapping +( + RsServer *pServer, + RsClient *pClient, + RsResourceRef *pMapperRef, + RsInterMapping *pMapping, + RS_LOCK_INFO *pLockInfo, + API_SECURITY_INFO *pSecInfo +) +{ + RS_INTER_UNMAP_PARAMS params; + RS_LOCK_INFO lockInfo; + NV_STATUS status; + + portMemSet(¶ms, 0, sizeof(params)); + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + params.hClient = pClient->hClient; + params.hMapper = pMapperRef->hResource; + params.hDevice = pMapping->pContextRef->hResource; + params.hMappable = pMapping->pMappableRef->hResource; + params.flags = pMapping->flags; + params.dmaOffset = pMapping->dmaOffset; + params.pMemDesc = pMapping->pMemDesc; + params.pSecInfo = pSecInfo; + params.pLockInfo = &lockInfo; + + lockInfo.pClient = pLockInfo->pClient; + lockInfo.pContextRef = (pLockInfo->pContextRef != NULL) + ? pLockInfo->pContextRef + : pMapping->pContextRef; + lockInfo.state = pLockInfo->state; + + status = serverUpdateLockFlagsForInterAutoUnmap(pServer, ¶ms); + if (status != NV_OK) + return status; + + return serverInterUnmap(pServer, ¶ms); +} + +void +_clientUnmapInterMappings +( + RsClient *pClient, + CALL_CONTEXT *pCallContext, + RS_LOCK_INFO *pLockInfo +) +{ + NV_STATUS status; + RsResourceRef *pMapperRef = pCallContext->pResourceRef; + RsInterMapping *pMapping; + + pMapping = listHead(&pMapperRef->interMappings); + while (pMapping != NULL) + { + status = _unmapInterMapping(pCallContext->pServer, pClient, pMapperRef, + pMapping, pLockInfo, &pCallContext->secInfo); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to auto-unmap (status=0x%x) hClient %x: hMapper: %x\n", + status, pClient->hClient, pMapperRef->hResource); + NV_PRINTF(LEVEL_ERROR, "hMappable: %x hContext: %x\n", + pMapping->pMappableRef->hResource, pMapping->pContextRef->hResource); + + if (pMapping == listHead(&pMapperRef->interMappings)) + { + NV_ASSERT(0); + refRemoveInterMapping(pMapperRef, pMapping); + } + } + + pMapping = listHead(&pMapperRef->interMappings); + } +} + +void +_clientUnmapInterBackRefMappings +( + RsClient *pClient, + CALL_CONTEXT *pCallContext, + RS_LOCK_INFO *pLockInfo +) +{ + NV_STATUS status; + RS_INTER_MAPPING_BACK_REF *pBackRefItem; + + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + + pBackRefItem = listHead(&pResourceRef->interBackRefs); + while (pBackRefItem != NULL) + { + RsResourceRef *pMapperRef = pBackRefItem->pMapperRef; + RsInterMapping *pMapping = pBackRefItem->pMapping; + + status = _unmapInterMapping(pCallContext->pServer, pClient, pMapperRef, + pMapping, pLockInfo, &pCallContext->secInfo); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to auto-unmap backref (status=0x%x) hClient %x: hMapper: %x\n", + status, pClient->hClient, pMapperRef->hResource); + NV_PRINTF(LEVEL_ERROR, "hMappable: %x hContext: %x\n", + pMapping->pMappableRef->hResource, pMapping->pContextRef->hResource); + + if (pBackRefItem == listHead(&pResourceRef->interBackRefs)) + { + NV_ASSERT(0); + refRemoveInterMapping(pMapperRef, pMapping); + } + } + + pBackRefItem = listHead(&pResourceRef->interBackRefs); + } +} + +NV_STATUS +indexAdd +( + RsIndex *pIndex, + NvU32 index, + RsResourceRef *pResourceRef +) +{ + NV_ASSERT(pResourceRef != NULL && pResourceRef->hResource != 0); + + if (NULL == multimapFindSubmap(pIndex, index)) + { + if (NULL == multimapInsertSubmap(pIndex, index)) + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + if (NULL == multimapInsertItemValue(pIndex, index, pResourceRef->hResource, + &pResourceRef)) + return NV_ERR_INSUFFICIENT_RESOURCES; + + return NV_OK; +} + +NV_STATUS +indexRemove +( + RsIndex *pIndex, + NvU32 index, + RsResourceRef *pResourceRef +) +{ + RsResourceRef **ppResourceRef; + + NV_ASSERT(pResourceRef != NULL && pResourceRef->hResource != 0); + + ppResourceRef = multimapFindItem(pIndex, index, pResourceRef->hResource); + if (ppResourceRef == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + multimapRemoveItem(pIndex, ppResourceRef); + + return NV_OK; +} + +NV_STATUS +clientValidateNewResourceHandle_IMPL +( + RsClient *pClient, + NvHandle hResource, + NvBool bRestrict +) +{ + // + // Resource handle should not be the same as the client handle + // because some control calls pass hClient in the hObject field + // + if (pClient->hClient == hResource || hResource == 0) + return NV_ERR_INVALID_OBJECT_HANDLE; + + if (bRestrict && !rangeIsEmpty(pClient->handleRestrictRange)) + { + NV_RANGE requestedRange = rangeMake(hResource, hResource); + if (rangeContains(pClient->handleRestrictRange, requestedRange)) + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + if (clientGetResourceRef(pClient, hResource, NULL) == NV_OK) + return NV_ERR_INSERT_DUPLICATE_NAME; + + return NV_OK; +} + +NV_STATUS +clientresConstruct_IMPL +( + RsClientResource *pClientRes, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsClient *pClient = pCallContext->pClient; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + + // Client proxy resource must have the same handle as its client + if (pClient->hClient != pResourceRef->hResource) + return NV_ERR_INVALID_OBJECT_HANDLE; + + pClientRes->pClient = pCallContext->pClient; + return NV_OK; +} + +void +clientresDestruct_IMPL +( + RsClientResource *pClientRes +) +{ +} + +RsIndexIter +indexRefIter +( + RsIndex *pIndex, + NvU32 index +) +{ + RsIndexIter it; + RsIndexSubmap *pSubmap; + + portMemSet(&it, 0, sizeof(it)); + NV_ASSERT(pIndex); + + pSubmap = multimapFindSubmap(pIndex, index); + if (pSubmap != NULL) + it = multimapSubmapIterItems(pIndex, pSubmap); + + return it; +} + +RsIndexIter +indexRefIterAll +( + RsIndex *pIndex +) +{ + NV_ASSERT(pIndex); + return multimapItemIterAll(pIndex); +} + +NvBool +indexRefIterNext +( + RsIndexIter *pIt +) +{ + return multimapItemIterNext(pIt); +} + +RS_ITERATOR +clientRefIter +( + RsClient *pClient, + RsResourceRef *pScopeRef, + NvU32 internalClassId, + RS_ITER_TYPE type, + NvBool bExactMatch +) +{ + RS_ITERATOR it; + RsIndex *pIndex = NULL; + NvBool bChildrenOnly = (type == RS_ITERATE_CHILDREN); + NvBool bCachedOnly = (type == RS_ITERATE_CACHED); + NvBool bDependantsOnly = (type == RS_ITERATE_DEPENDANTS); + portMemSet(&it, 0, sizeof(it)); + + if (pClient == NULL) + { + NV_ASSERT(0); + return it; + } + + if (pScopeRef == NULL) + { + if (NV_OK != clientGetResourceRef(pClient, pClient->hClient, &pScopeRef)) + return it; + } + + if (bChildrenOnly || bCachedOnly || bDependantsOnly) + { + NvBool bIterAll = (internalClassId == 0) || !bExactMatch; + + if (bChildrenOnly) + { + pIndex = &pScopeRef->childRefMap; + } + else if (bCachedOnly) + { + pIndex = &pScopeRef->cachedRefMap; + } + else if (bDependantsOnly) + { + pIndex = &pScopeRef->depRefMap; + } + + if (!bIterAll && multimapFindSubmap(pIndex, internalClassId) == NULL) + goto done; + + it.idxIt = (bIterAll) + ? indexRefIterAll(pIndex) + : indexRefIter(pIndex, internalClassId); + } + else + { + // Match descendants of the scope resource (specific class / any class) + it.mapIt = mapIterAll(&pClient->resourceMap); + } + + it.pClient = pClient; + it.pScopeRef = pScopeRef; + it.internalClassId = internalClassId; + it.type = type; + it.bExactMatch = bExactMatch; + +done: + return it; +} + +RS_ORDERED_ITERATOR +clientRefOrderedIter +( + RsClient *pClient, + RsResourceRef *pScopeRef, + NvU32 internalClassId, + NvBool bExactMatch +) +{ + RS_ORDERED_ITERATOR it; + RsIndex *pIndex = NULL; + portMemSet(&it, 0, sizeof(it)); + + if (pClient == NULL) + { + NV_ASSERT(0); + return it; + } + + if (pScopeRef == NULL) + { + if (NV_OK != clientGetResourceRef(pClient, pClient->hClient, &pScopeRef)) + return it; + } + + it.depth = -1; + pIndex = &pScopeRef->childRefMap; + it.idxIt[0] = indexRefIterAll(pIndex); + + it.pClient = pClient; + it.pScopeRef = pScopeRef; + it.internalClassId = internalClassId; + it.bExactMatch = bExactMatch; + + return it; +} + +NvBool +clientRefOrderedIterNext +( + RsClient *pClient, + RS_ORDERED_ITERATOR *pIt +) +{ + RsResourceRef *pResourceRef; + NvBool bNext; + + if ((pIt == NULL) || (pIt->pClient != pClient) || pIt->pScopeRef == NULL) + { + // Iterator not initialized or nothing to iterate over + NV_ASSERT(pIt != NULL && pIt->pClient == NULL); + return NV_FALSE; + } + + // Iterate over the scope reference if the scope is not the client + if (pIt->depth == -1) + { + pIt->depth = 0; + if ((pIt->pScopeRef->hResource != pIt->pClient->hClient) && + ((pIt->internalClassId == 0) || (pIt->internalClassId == pIt->pScopeRef->internalClassId)) && + (pIt->pScopeRef->pResource != NULL)) + { + pIt->pResourceRef = pIt->pScopeRef; + return NV_TRUE; + } + } + + pIt->pResourceRef = NULL; + + bNext = NV_TRUE; + while (1) + { + // Get the next sibling, or else backtrack to parent and get its next sibling + do + { + if (!bNext) + --pIt->depth; + bNext = indexRefIterNext(&pIt->idxIt[pIt->depth]); + } while (!bNext && pIt->depth != 0); + + if (!bNext) + break; + + pResourceRef = *pIt->idxIt[pIt->depth].pValue; + + // Iterate over this resource's children next (up to max depth) + if (pIt->depth < RS_MAX_RESOURCE_DEPTH) + { + ++pIt->depth; + pIt->idxIt[pIt->depth] = indexRefIterAll(&pResourceRef->childRefMap); + } + + if (refHasAncestor(pResourceRef, pIt->pScopeRef)) + { + NvBool bMatch = NV_TRUE; + if (pIt->internalClassId != 0) + { + if (pIt->bExactMatch && (pIt->internalClassId != pResourceRef->internalClassId)) + bMatch = NV_FALSE; + + if (!pIt->bExactMatch && objDynamicCastById(pResourceRef->pResource, pIt->internalClassId) == NULL) + bMatch = NV_FALSE; + } + + if (bMatch && (pResourceRef->pResource != NULL)) + { + pIt->pResourceRef = pResourceRef; + return NV_TRUE; + } + } + } + + return NV_FALSE; +} + +NvBool +clientRefIterNext +( + RsClient *pClient, + RS_ITERATOR *pIt +) +{ + RsResourceRef *pResourceRef; + NvBool bLoop; + NvBool bUseIdx; + + if ((pIt == NULL) || (pIt->pClient != pClient) || pIt->pScopeRef == NULL) + { + // Iterator not initialized or nothing to iterate over + NV_ASSERT(pIt != NULL && pIt->pClient == NULL); + return NV_FALSE; + } + + bUseIdx = (pIt->type == RS_ITERATE_CACHED) || + (pIt->type == RS_ITERATE_CHILDREN) || + (pIt->type == RS_ITERATE_DEPENDANTS); + + pIt->pResourceRef = NULL; + + bLoop = bUseIdx ? indexRefIterNext(&pIt->idxIt) : mapIterNext(&pIt->mapIt); + while (bLoop) + { + pResourceRef = bUseIdx ? *pIt->idxIt.pValue : pIt->mapIt.pValue; + + if (bUseIdx || + ((pResourceRef == pIt->pScopeRef) || + (refHasAncestor(pResourceRef, pIt->pScopeRef)))) + { + NvBool bMatch = NV_TRUE; + if (pIt->internalClassId != 0) + { + if (pIt->bExactMatch && (pIt->internalClassId != pResourceRef->internalClassId)) + bMatch = NV_FALSE; + + if (!pIt->bExactMatch && objDynamicCastById(pResourceRef->pResource, pIt->internalClassId) == NULL) + bMatch = NV_FALSE; + } + + if (bMatch && (pResourceRef->pResource != NULL)) + { + pIt->pResourceRef = pResourceRef; + return NV_TRUE; + } + } + + bLoop = bUseIdx ? indexRefIterNext(&pIt->idxIt) : mapIterNext(&pIt->mapIt); + } + + return NV_FALSE; +} + +NV_STATUS +clientPostProcessPendingFreeList_IMPL +( + RsClient *pClient, + RsResourceRef **ppFirstLowPriRef +) +{ + if (ppFirstLowPriRef != NULL) + *ppFirstLowPriRef = NULL; + + return NV_OK; +} + +NV_STATUS +clientAddAccessBackRef_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef +) +{ + AccessBackRef *pAccessBackRef = listPrependNew(&pClient->accessBackRefList);; + + if (pAccessBackRef == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pAccessBackRef->hClient = pResourceRef->pClient->hClient; + pAccessBackRef->hResource = pResourceRef->hResource; + + return NV_OK; +} + +void clientFreeAccessBackRefs_IMPL +( + RsClient *pClient, + RsServer *pServer +) +{ + AccessBackRef *pAccessBackRef; + NV_STATUS status; + + while ((pAccessBackRef = listHead(&pClient->accessBackRefList)) != NULL) + { + RsClient *pSharedClient; + + // + // Remove access rights entry if client/resource pair is still in use + // so that another client doesn't get unauthorized access to them + // + status = serverGetClientUnderLock(pServer, pAccessBackRef->hClient, &pSharedClient); + if (status == NV_OK) + { + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pSharedClient, pAccessBackRef->hResource, &pResourceRef); + if (status == NV_OK) + { + RS_SHARE_POLICY revokePolicy; + + revokePolicy.type = RS_SHARE_TYPE_CLIENT; + revokePolicy.target = pClient->hClient; + revokePolicy.action = RS_SHARE_ACTION_FLAG_REVOKE; + RS_ACCESS_MASK_FILL(&revokePolicy.accessMask); + + // Check the resource's share policy for matching client policies + rsShareListRemove(&pResourceRef->sharePolicyList, &revokePolicy, NULL); + } + } + + listRemove(&pClient->accessBackRefList, pAccessBackRef); + } +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_domain.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_domain.c new file mode 100644 index 0000000..bbbc84d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_domain.c @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlog_inc.h" +#include "resserv/resserv.h" +#include "resserv/rs_domain.h" + +#if !(RS_STANDALONE) +#include "os/os.h" +#endif + +NV_STATUS +domainConstruct +( + RsDomain *pDomain, + PORT_MEM_ALLOCATOR *pAllocator, + NvHandle hDomain, + NvHandle hParentDomain, + ACCESS_CONTROL *pAccessControl +) +{ + return NV_OK; +} + +NV_STATUS +domainDestruct +( + RsDomain *pDomain +) +{ + return NV_OK; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_resource.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_resource.c new file mode 100644 index 0000000..81f5ed4 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_resource.c @@ -0,0 +1,799 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define NVOC_RS_RESOURCE_H_PRIVATE_ACCESS_ALLOWED + +#include "nvlog_inc.h" +#include "resserv/resserv.h" +#include "resserv/rs_resource.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" + +#if !(RS_STANDALONE) +#include "os/os.h" +#endif + +NV_STATUS +resConstruct_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsResourceRef *pResourceRef; + + if (pCallContext == NULL) + { + return NV_OK; + } + + pResourceRef = pCallContext->pResourceRef; + + pResource->bConstructed = NV_TRUE; + + // Init pResourceRef->pResource so iteration APIs work during ctor + pResourceRef->pResource = pResource; + + // Init back-ref so we can use during ctor + pResource->pResourceRef = pResourceRef; + + // Set context for free in case a chained constructor fails. + resSetFreeParams(pResource, pCallContext, NULL); + + // NV_PRINTF(LEVEL_INFO, "Constructing resource with external class: 0x%x\n", pParams->externalClassId); + + return NV_OK; +} + +void +resPreDestruct_IMPL +( + RsResource *pResource +) +{ +} + +void +resDestruct_IMPL +( + RsResource *pResource +) +{ + if (!pResource->bConstructed) + { + return; + } + + // NV_PRINTF(LEVEL_INFO, "Freeing resource: " NvP64_fmt "\n", NV_PTR_TO_NvP64(pResource)); +} + +NV_STATUS +resSetFreeParams_IMPL(RsResource *pResource, CALL_CONTEXT *pCallContext, RS_RES_FREE_PARAMS_INTERNAL *pParams) +{ + if (!pResource->bConstructed) + { + return NV_OK; + } + + pResource->dtorParams.pFreeContext = pCallContext; + pResource->dtorParams.pFreeParams = pParams; + + return NV_OK; +} + +NV_STATUS +resGetFreeParams_IMPL(RsResource *pResource, CALL_CONTEXT **ppCallContext, RS_RES_FREE_PARAMS_INTERNAL **ppParams) +{ + if (ppCallContext != NULL) + *ppCallContext = pResource->dtorParams.pFreeContext; + + if (ppParams != NULL) + *ppParams = pResource->dtorParams.pFreeParams; + + return NV_OK; +} + +NV_STATUS resControlLookup_IMPL +( + RsResource *pResource, + RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams, + const struct NVOC_EXPORTED_METHOD_DEF **ppEntry +) +{ + const struct NVOC_EXPORTED_METHOD_DEF *pEntry; + NvU32 cmd = pRsParams->cmd; + + *ppEntry = NULL; + pEntry = objGetExportedMethodDef(staticCast(objFullyDerive(pResource), Dynamic), cmd); + + if (pEntry == NULL) + return NV_ERR_NOT_SUPPORTED; + + if ((pEntry->paramSize != 0) && (pRsParams->paramsSize != pEntry->paramSize)) + { + NV_PRINTF(LEVEL_NOTICE, + "hObject 0x%08x, cmd 0x%08x: bad paramsize %d, expected %d\n", + RES_GET_HANDLE(pResource), pRsParams->cmd, + (int)pRsParams->paramsSize, + (int)pEntry->paramSize); + + return NV_ERR_INVALID_PARAM_STRUCT; + } + + *ppEntry = pEntry; + return NV_OK; +} + +typedef NV_STATUS (*CONTROL_EXPORT_FNPTR)(void*, void*); +typedef NV_STATUS (*CONTROL_EXPORT_FNPTR_NO_PARAMS)(void*); + +NV_STATUS +resControl_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams +) +{ + RsServer *pServer = pCallContext->pServer; + const struct NVOC_EXPORTED_METHOD_DEF *pEntry; + NV_STATUS status; + Dynamic *pDynamicObj; + NvU32 releaseFlags = 0; + LOCK_ACCESS_TYPE access = LOCK_ACCESS_WRITE; + + status = resControlLookup(pResource, pRsParams, &pEntry); + if (status != NV_OK) + { + if (status == NV_WARN_NOTHING_TO_DO) + return NV_OK; + return status; + } + + NV_ASSERT_OR_RETURN(pEntry != NULL, NV_ERR_NOT_SUPPORTED); + + // Initialize the execution cookie + serverControl_InitCookie(pEntry, pRsParams->pCookie); + + status = resControlFilter(pResource, pCallContext, pRsParams); + if (status != NV_OK) + return status; + + status = serverControl_Prologue(pServer, pRsParams, &access, &releaseFlags); + if (status != NV_OK) + return status; + + status = resControl_Prologue(pResource, pCallContext, pRsParams); + if ((status != NV_OK) && (status != NV_WARN_NOTHING_TO_DO)) + goto done; + + pDynamicObj = objDynamicCastById(pResource, pEntry->pClassInfo->classId); + + if (status == NV_WARN_NOTHING_TO_DO) + { + // Call handled by the prologue. + status = NV_OK; + } + else + { + // Check the size of paramSize while it is non-zero. + // Zero size means the exported method only have one param (pResource) + if (pEntry->paramSize == 0) + { + CONTROL_EXPORT_FNPTR_NO_PARAMS pFunc = ((CONTROL_EXPORT_FNPTR_NO_PARAMS) pEntry->pFunc); + status = pFunc(pDynamicObj); + } + else + { + CONTROL_EXPORT_FNPTR pFunc = ((CONTROL_EXPORT_FNPTR) pEntry->pFunc); + status = pFunc(pDynamicObj, pRsParams->pParams); + } + } + + resControl_Epilogue(pResource, pCallContext, pRsParams); + +done: + status = serverControl_Epilogue(pServer, pRsParams, access, &releaseFlags, status); + + return status; +} + +NV_STATUS +resControlFilter_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +resControl_Prologue_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +void +resControl_Epilogue_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + return; +} + +NvU32 resGetRefCount_IMPL +( + RsResource *pResource +) +{ + return 1; +} + +NV_STATUS +resMap_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +resUnmap_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +resMapTo_IMPL +( + RsResource *pResource, + RS_RES_MAP_TO_PARAMS *pParams +) +{ + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +resUnmapFrom_IMPL +( + RsResource *pResource, + RS_RES_UNMAP_FROM_PARAMS *pParams +) +{ + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +NvBool +resCanCopy_IMPL +( + RsResource *pResource +) +{ + return NV_FALSE; +} + +NvBool +resAccessCallback_IMPL +( + RsResource *pResource, + RsClient *pInvokingClient, + void *pAllocParams, + RsAccessRight accessRight +) +{ + return NV_FALSE; +} + +NvBool +resShareCallback_IMPL +( + RsResource *pResource, + RsClient *pInvokingClient, + RsResourceRef *pParentRef, + RS_SHARE_POLICY *pSharePolicy +) +{ + switch (pSharePolicy->type) + { + case RS_SHARE_TYPE_ALL: + return NV_TRUE; + case RS_SHARE_TYPE_CLIENT: + if (pSharePolicy->target == pInvokingClient->hClient) + return NV_TRUE; + break; + } + + return NV_FALSE; +} + +NV_STATUS +refFindCpuMapping +( + RsResourceRef *pResourceRef, + NvP64 pAddress, + RsCpuMapping **ppMapping +) +{ + return refFindCpuMappingWithFilter(pResourceRef, pAddress, NULL, ppMapping); +} + +NV_STATUS +refFindCpuMappingWithFilter +( + RsResourceRef *pResourceRef, + NvP64 pAddress, + NvBool (*fnFilter)(RsCpuMapping*), + RsCpuMapping **ppMapping +) +{ + RsCpuMappingListIter it; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + RsCpuMapping *pMapping = NULL; + + if (pResourceRef == NULL) + { + NV_ASSERT(0); + return status; + } + + it = listIterAll(&pResourceRef->cpuMappings); + while (listIterNext(&it)) + { + pMapping = it.pValue; + if ((pMapping->pLinearAddress == pAddress) && + ((fnFilter == NULL) || fnFilter(pMapping))) + { + status = NV_OK; + break; + } + } + + if (status != NV_OK) + pMapping = NULL; + + if (pMapping != NULL) + *ppMapping = pMapping; + + return status; +} + +NV_STATUS +refFindChildOfType +( + RsResourceRef *pParentRef, + NvU32 internalClassId, + NvBool bExactMatch, + RsResourceRef **ppResourceRef +) +{ + if (bExactMatch) + { + RsIndexIter it = indexRefIter(&pParentRef->childRefMap, internalClassId); + if (indexRefIterNext(&it)) + { + RsResourceRef *pResourceRef = *it.pValue; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; + } + } + else + { + RsIndexSupermapIter it = multimapSubmapIterAll(&pParentRef->childRefMap); + while (multimapSubmapIterNext(&it)) + { + RsIndexSubmap *pSubmap = it.pValue; + RsIndexIter subIt = multimapSubmapIterItems(&pParentRef->childRefMap, pSubmap); + if (multimapItemIterNext(&subIt)) + { + RsResourceRef *pResourceRef = *subIt.pValue; + + if (objDynamicCastById(pResourceRef->pResource, internalClassId) == NULL) + continue; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; + } + } + + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +refFindAncestorOfType +( + RsResourceRef *pDescendantRef, + NvU32 internalClassId, + RsResourceRef **ppAncestorRef +) +{ + RsResourceRef *pAncestorRef = pDescendantRef->pParentRef; + + while (pAncestorRef != NULL) + { + if (pAncestorRef->internalClassId == internalClassId) + { + if(pAncestorRef->bInvalidated) + return NV_ERR_OBJECT_NOT_FOUND; + + if (ppAncestorRef != NULL) + *ppAncestorRef = pAncestorRef; + + return NV_OK; + } + + pAncestorRef = pAncestorRef->pParentRef; + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NvBool +refHasAncestor +( + RsResourceRef *pDescendantRef, + RsResourceRef *pAncestorRef +) +{ + RsResourceRef *pSearchRef = pDescendantRef->pParentRef; + + while (pSearchRef != NULL) + { + if (pSearchRef == pAncestorRef) + return NV_TRUE; + + pSearchRef = pSearchRef->pParentRef; + } + + return NV_FALSE; +} + +NV_STATUS +refAddMapping +( + RsResourceRef *pResourceRef, + RS_CPU_MAP_PARAMS *pParams, + RsResourceRef *pContextRef, + RsCpuMapping **ppMapping +) +{ + NV_STATUS status; + RsCpuMapping *pCpuMapping = listAppendNew(&pResourceRef->cpuMappings); + if (pCpuMapping == NULL) + return NV_ERR_NO_MEMORY; + + status = refAllocCpuMappingPrivate(pParams, pCpuMapping); + if (status != NV_OK) + { + listRemove(&pResourceRef->cpuMappings, pCpuMapping); + return status; + } + + if ((pContextRef != NULL) && + (pContextRef != pResourceRef) && + !refHasAncestor(pResourceRef, pContextRef)) + { + RS_CPU_MAPPING_BACK_REF *pBackRefItem = listAppendNew(&pContextRef->backRefs); + if (pBackRefItem == NULL) + { + refFreeCpuMappingPrivate(pCpuMapping); + listRemove(&pResourceRef->cpuMappings, pCpuMapping); + return NV_ERR_NO_MEMORY; + } + + pBackRefItem->pBackRef = pResourceRef; + pBackRefItem->pCpuMapping = pCpuMapping; + } + + pCpuMapping->offset = pParams->offset; + pCpuMapping->length = pParams->length; + pCpuMapping->flags = pParams->flags; + pCpuMapping->pContextRef = pContextRef; + + if (ppMapping != NULL) + *ppMapping = pCpuMapping; + + return NV_OK; +} + +void +refRemoveMapping +( + RsResourceRef *pResourceRef, + RsCpuMapping *pCpuMapping +) +{ + if ((pCpuMapping->pContextRef != NULL) && + !refHasAncestor(pResourceRef, pCpuMapping->pContextRef)) + { + RS_CPU_MAPPING_BACK_REF *pBackRefItem; + RsCpuMappingBackRefListIter it = listIterAll(&pCpuMapping->pContextRef->backRefs); + + while (listIterNext(&it)) + { + pBackRefItem = it.pValue; + if ((pBackRefItem->pBackRef == pResourceRef) && + (pBackRefItem->pCpuMapping == pCpuMapping)) + { + listRemove(&pCpuMapping->pContextRef->backRefs, pBackRefItem); + break; + } + } + } + + refFreeCpuMappingPrivate(pCpuMapping); + listRemove(&pResourceRef->cpuMappings, pCpuMapping); +} + +#if RS_STANDALONE +NV_STATUS +refAllocCpuMappingPrivate +( + RS_CPU_MAP_PARAMS *pMapParams, + RsCpuMapping *pCpuMapping +) +{ + return NV_OK; +} + +void +refFreeCpuMappingPrivate +( + RsCpuMapping *pCpuMapping +) +{ +} +#endif /* RS_STANDALONE */ + +NV_STATUS +refFindInterMapping +( + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RsResourceRef *pContextRef, + NvU64 dmaOffset, + RsInterMapping **ppMapping +) +{ + RsInterMappingListIter it; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + RsInterMapping *pMapping = NULL; + + NV_ASSERT(pMapperRef != NULL); + + it = listIterAll(&pMapperRef->interMappings); + while (listIterNext(&it)) + { + pMapping = it.pValue; + if ((pMapping->pMappableRef == pMappableRef) && + (pMapping->pContextRef == pContextRef) && + (pMapping->dmaOffset == dmaOffset)) + { + status = NV_OK; + break; + } + } + + if (status != NV_OK) + pMapping = NULL; + + if (pMapping != NULL) + *ppMapping = pMapping; + + return status; +} + +NV_STATUS +refAddInterMapping +( + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RsResourceRef *pContextRef, + RsInterMapping **ppMapping +) +{ + RsInterMapping *pInterMapping; + RS_INTER_MAPPING_BACK_REF *pBackRefItem; + RS_INTER_MAPPING_BACK_REF *pContextBackRefItem; + + NV_ASSERT(pMapperRef != NULL); + NV_ASSERT(pMappableRef != NULL); + NV_ASSERT(pMappableRef != pMapperRef); + + pInterMapping = listAppendNew(&pMapperRef->interMappings); + if (pInterMapping == NULL) + return NV_ERR_NO_MEMORY; + + // Add backref linked to this inter-mapping + pBackRefItem = listAppendNew(&pMappableRef->interBackRefs); + if (pBackRefItem == NULL) + { + listRemove(&pMapperRef->interMappings, pInterMapping); + return NV_ERR_NO_MEMORY; + } + + pBackRefItem->pMapperRef = pMapperRef; + pBackRefItem->pMapping = pInterMapping; + + // + // Either pMapperRef or pMappableRef should be a descendant of pContextRef + // Otherwise, it becomes possible to have a stale reference if hContext is freed first + // If this is not the case, add a backref to pContextRef as well + // + if (!refHasAncestor(pMapperRef, pContextRef) && + !refHasAncestor(pMappableRef, pContextRef)) + { + pContextBackRefItem = listAppendNew(&pContextRef->interBackRefs); + if (pContextBackRefItem == NULL) + { + listRemove(&pMapperRef->interMappings, pInterMapping); + listRemove(&pMappableRef->interBackRefs, pBackRefItem); + return NV_ERR_NO_MEMORY; + } + + pContextBackRefItem->pMapperRef = pMapperRef; + pContextBackRefItem->pMapping = pInterMapping; + } + + pInterMapping->pMappableRef = pMappableRef; + pInterMapping->pContextRef = pContextRef; + + if (ppMapping != NULL) + *ppMapping = pInterMapping; + + return NV_OK; +} + +void +refRemoveInterMapping +( + RsResourceRef *pMapperRef, + RsInterMapping *pMapping +) +{ + RsInterMappingBackRefListIter it; + RS_INTER_MAPPING_BACK_REF *pBackRefItem = NULL; + RsResourceRef *pMappableRef = pMapping->pMappableRef; + RsResourceRef *pContextRef = pMapping->pContextRef; + + // Find and remove the mappable's backref linked to this inter-mapping + it = listIterAll(&pMappableRef->interBackRefs); + while (listIterNext(&it)) + { + pBackRefItem = it.pValue; + if (pBackRefItem->pMapping == pMapping) + { + listRemove(&pMappableRef->interBackRefs, pBackRefItem); + break; + } + } + + // Find and remove the context's backref linked to this inter-mapping, if present + it = listIterAll(&pContextRef->interBackRefs); + while (listIterNext(&it)) + { + pBackRefItem = it.pValue; + if (pBackRefItem->pMapping == pMapping) + { + listRemove(&pContextRef->interBackRefs, pBackRefItem); + break; + } + } + + listRemove(&pMapperRef->interMappings, pMapping); +} + +NV_STATUS +refCacheRef +( + RsResourceRef *pParentRef, + RsResourceRef *pResourceRef +) +{ + return indexAdd(&pParentRef->cachedRefMap, pResourceRef->internalClassId, pResourceRef); +} + +NV_STATUS +refUncacheRef +( + RsResourceRef *pParentRef, + RsResourceRef *pResourceRef +) +{ + return indexRemove(&pParentRef->cachedRefMap, pResourceRef->internalClassId, pResourceRef); +} + +NV_STATUS +refAddDependant +( + RsResourceRef *pResourceRef, + RsResourceRef *pDependantRef +) +{ + // dependencies are implicit between a parent resource reference and child resource reference + if (refHasAncestor(pDependantRef, pResourceRef)) + return NV_OK; + + indexAdd(&pDependantRef->depBackRefMap, pResourceRef->internalClassId, pResourceRef); + return indexAdd(&pResourceRef->depRefMap, pDependantRef->internalClassId, pDependantRef); +} + +NV_STATUS +refRemoveDependant +( + RsResourceRef *pResourceRef, + RsResourceRef *pDependantRef +) +{ + indexRemove(&pDependantRef->depBackRefMap, pResourceRef->internalClassId, pResourceRef); + return indexRemove(&pResourceRef->depRefMap, pDependantRef->internalClassId, pDependantRef); +} + +NvBool +refPendingFree +( + RsResourceRef *pResourceRef, + RsClient *pClient +) +{ + return ((pResourceRef->freeNode.pNext != NULL) || + (pResourceRef->freeNode.pPrev != NULL) || + (pResourceRef == listHead(&pClient->pendingFreeList))); +} + +void +resAddAdditionalDependants_IMPL +( + RsClient *pClient, + RsResource *pResource, + RsResourceRef *pReference +) +{ + return; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_server.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_server.c new file mode 100644 index 0000000..a5f00f0 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_server.c @@ -0,0 +1,3602 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define NVOC_RS_SERVER_H_PRIVATE_ACCESS_ALLOWED +#include "nvlog_inc.h" +#include "resserv/resserv.h" +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "tls/tls.h" +#include "nv_speculation_barrier.h" + +/** + * Get the RsClient from a client handle without taking locks + * @param[in] pServer + * @param[in] hClient The handle to lookup + * @param[out] ppClient The RsClient associated with the handle + */ +static NV_STATUS _serverFindClient(RsServer *pServer, NvHandle hClient, RsClient **ppClient); + +/** + * Get the CLIENT_ENTRY from a client handle without taking locks + * @param[in] pServer + * @param[in] hClient The handle to lookup + * @param[in] bFindPartial Include entries that have not finished constructing + * @param[out] ppClientEntry The client entry associated with the handle + */ +static NV_STATUS _serverFindClientEntry(RsServer *pServer, NvHandle hClient, NvBool bFindPartial, CLIENT_ENTRY **ppClientEntry); + +/** + * Insert a CLIENT_ENTRY in the server database without taking locks + * @param[in] pServer + * @param[in] pClientEntry The client entry associated with the handle + */ +static NV_STATUS _serverInsertClientEntry(RsServer *pServer, CLIENT_ENTRY *pClientEntry, CLIENT_ENTRY **ppClientNext); + +/** + * Find the next available client handle in bucket. + * @param[in] pServer + * @param[in] hClientIn + * @param[out] pClientOut + */ +static NV_STATUS _serverFindNextAvailableClientHandleInBucket(RsServer *pServer, NvHandle hClientIn, NvHandle *phClientOut, CLIENT_ENTRY ***pppClientNext); + +/** + * Create a client entry and a client lock for a client that does not exist yet. Used during client + * construction. No locks will be taken if this call fails. + * @param[in] pServer + * @param[in] hClient + */ +static NV_STATUS _serverCreateEntryAndLockForNewClient(RsServer *pServer, NvHandle *phClient, NvBool bInternalHandle, CLIENT_ENTRY **ppClientEntry ); + +/** + * Lock and retrieve the RsClient associated with a client handle. + * @param[in] pServer + * @param[in] access + * @param[in] hClient Handle of client to look-up + * @param[out] pClient RsClient associated with the client handle + */ +static NV_STATUS _serverLockClient(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient, RsClient **ppClient); + +/** + * Lock and retrieve the RsClient associated with a client handle, and update lock info. + * @param[in] pServer + * @param[in] access + * @param[in] hClient Handle of client to look-up + * @param[inout] pLockInfo Lock state + * @param[out] pClient RsClient associated with the client handle + */ +static NV_STATUS _serverLockClientWithLockInfo(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags, RsClient **ppClient); + +/** + * Lock and retrieve two RsClient associated with a pair of client handles, and update lock info. + * @param[in] pServer + * @param[in] access + * @param[in] hClient1, hClient2 Handles of clients to look-up and lock + * @param[inout] pLockInfo Lock state + * @param[out] pClient1, pClient2 RsClient associated with the client handles + */ +static NV_STATUS _serverLockDualClientWithLockInfo(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient1, NvHandle hClient2, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags, RsClient **ppClient1, RsClient **ppClient2); + +/** + * Unlock a client by handle + * @param[in] pServer + * @param[in] access + * @param[in] hClient Handle of the client to unlock + */ +static NV_STATUS _serverUnlockClient(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient); + +/** + * Unlock a client by handle, and update lock info. + * @param[in] pServer + * @param[in] access + * @param[in] hClient Handle of the client to unlock + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +static NV_STATUS _serverUnlockClientWithLockInfo(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient, RS_LOCK_INFO* pLockInfo, NvU32 *pReleaseFlags); + +/** + * Unlock a client by handle, and update lock info. + * @param[in] pServer + * @param[in] access + * @param[in] hClient1, hClient2 Handles of the clients to unlock + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +static NV_STATUS _serverUnlockDualClientWithLockInfo(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient1, NvHandle hClient2, RS_LOCK_INFO* pLockInfo, NvU32 *pReleaseFlags); + +NV_STATUS serverFreeResourceTreeUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pFreeParams) +{ + NV_STATUS status; + RsResourceRef *pResourceRef = pFreeParams->pResourceRef; + RS_LOCK_INFO *pLockInfo = pFreeParams->pLockInfo; + NvU32 releaseFlags = 0; + + NV_ASSERT_OR_RETURN(pResourceRef != NULL, NV_ERR_INVALID_OBJECT_HANDLE); + + status = serverUpdateLockFlagsForFree(pServer, pFreeParams); + if (status != NV_OK) + return status; + + status = serverSessionLock_Prologue(LOCK_ACCESS_WRITE, pResourceRef, pLockInfo, &releaseFlags); + if (status != NV_OK) + return status; + + pLockInfo->flags |= RS_LOCK_FLAGS_FREE_SESSION_LOCK; + pLockInfo->traceOp = RS_LOCK_TRACE_FREE; + pLockInfo->traceClassId = pResourceRef->externalClassId; + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = clientFreeResource(pResourceRef->pClient, pServer, pFreeParams); + NV_ASSERT(status == NV_OK); + +done: + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + + serverSessionLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + + return status; +} + +#if RS_STANDALONE +NV_STATUS +serverInitFreeParams_Recursive(NvHandle hClient, NvHandle hResource, RS_LOCK_INFO* pLockInfo, RS_RES_FREE_PARAMS *pParams) +{ + portMemSet(pParams, 0, sizeof(*pParams)); + pParams->hClient = hClient; + pParams->hResource = hResource; + pParams->pLockInfo = pLockInfo; + return NV_OK; +} + +NV_STATUS serverUpdateLockFlagsForCopy(RsServer *pServer, RS_RES_DUP_PARAMS *pParams) +{ + return NV_OK; +} + +NV_STATUS serverUpdateLockFlagsForFree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams) +{ + return NV_OK; +} + +NV_STATUS serverUpdateLockFlagsForInterAutoUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams) +{ + return NV_OK; +} + +NV_STATUS serverFreeResourceRpcUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pParams) +{ + return NV_OK; +} +#endif + + +// +// Client handle format: +// +// fn [ C 1 D/E ] [ *INDEX* ] +// bit 31 20 19 0 +// + +#define RS_CLIENT_HANDLE_DECODE_MASK 0xFFFFF +#define CLIENT_DECODEHANDLE(handle) (handle & RS_CLIENT_HANDLE_DECODE_MASK) + +#define CLIENT_ENCODEHANDLE(index) (RS_CLIENT_HANDLE_BASE | index) +#define CLIENT_ENCODEHANDLE_INTERNAL(internalBase, index) (internalBase | index) + +NV_STATUS +serverConstruct +( + RsServer *pServer, + RS_PRIV_LEVEL privilegeLevel, + NvU32 maxDomains +) +{ + NvU32 i; + PORT_MEM_ALLOCATOR *pAllocator = portMemAllocatorCreateNonPaged(); + + pServer->privilegeLevel = privilegeLevel; + pServer->bConstructed = NV_TRUE; + pServer->pAllocator = pAllocator; + pServer->bDebugFreeList = NV_FALSE; + pServer->bRsAccessEnabled = NV_TRUE; + pServer->internalHandleBase = RS_CLIENT_INTERNAL_HANDLE_BASE; + pServer->activeClientCount = 0; + pServer->activeResourceCount= 0; + pServer->roTopLockApiMask = 0; + /* pServer->bUnlockedParamCopy is set in _rmapiLockAlloc */ + + pServer->pClientSortedList = PORT_ALLOC(pAllocator, sizeof(RsClientList)*RS_CLIENT_HANDLE_BUCKET_COUNT); + if (NULL == pServer->pClientSortedList) + goto fail; + + for (i = 0; i < RS_CLIENT_HANDLE_BUCKET_COUNT; i++) + { + listInit(&pServer->pClientSortedList[i], pAllocator); + } + pServer->clientCurrentHandleIndex = 0; + + pServer->pClientListLock = portSyncRwLockCreate(pAllocator); + if (pServer->pClientListLock == NULL) + goto fail; + +#if RS_STANDALONE + RS_LOCK_VALIDATOR_INIT(&pServer->topLockVal, LOCK_VAL_LOCK_CLASS_API, 0xdead0000); + pServer->pTopLock = portSyncRwLockCreate(pAllocator); + if (pServer->pTopLock == NULL) + goto fail; + + RS_LOCK_VALIDATOR_INIT(&pServer->resLockVal, LOCK_VAL_LOCK_CLASS_GPU, 0xbeef0000); + pServer->pResLock = portSyncRwLockCreate(pAllocator); + if (pServer->pResLock == NULL) + goto fail; + + pServer->topLockOwnerTid = ~0; +#endif + + pServer->pShareMapLock = portSyncSpinlockCreate(pAllocator); + + mapInitIntrusive(&pServer->shareMap); + + listInit(&pServer->defaultInheritedSharePolicyList, pAllocator); + listInit(&pServer->globalInternalSharePolicyList, pAllocator); + + if (NV_OK != serverInitGlobalSharePolicies(pServer)) + { + mapDestroy(&pServer->shareMap); + listDestroy(&pServer->defaultInheritedSharePolicyList); + listDestroy(&pServer->globalInternalSharePolicyList); + goto fail; + } + + return NV_OK; +fail: + +#if RS_STANDALONE + if (pServer->pResLock != NULL) + portSyncRwLockDestroy(pServer->pResLock); + + if (pServer->pTopLock != NULL) + portSyncRwLockDestroy(pServer->pTopLock); +#endif + + if (pServer->pClientListLock != NULL) + portSyncRwLockDestroy(pServer->pClientListLock); + + if (pServer->pShareMapLock != NULL) + portSyncSpinlockDestroy(pServer->pShareMapLock); + + if (pServer->pClientSortedList != NULL) + { + for (i = 0; i < RS_CLIENT_HANDLE_BUCKET_COUNT; i++) + { + listDestroy(&pServer->pClientSortedList[i]); + } + PORT_FREE(pAllocator, pServer->pClientSortedList); + } + + if (pAllocator != NULL) + portMemAllocatorRelease(pAllocator); + + return NV_ERR_INSUFFICIENT_RESOURCES; +} + + +NV_STATUS +serverDestruct +( + RsServer *pServer +) +{ + NvU32 i; + RS_LOCK_INFO lockInfo; + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + if (!pServer->bConstructed) + return NV_ERR_INVALID_OBJECT; + + for (i = 0; i < RS_CLIENT_HANDLE_BUCKET_COUNT; i++) + { + CLIENT_ENTRY **ppClientEntry; + NvHandle hClient = 0; + + while ((ppClientEntry = listHead(&pServer->pClientSortedList[i])) != NULL) + { + RS_RES_FREE_PARAMS_INTERNAL freeParams; + lockInfo.pClient = (*ppClientEntry)->pClient; + hClient = lockInfo.pClient->hClient; + serverInitFreeParams_Recursive(hClient, hClient, &lockInfo, &freeParams); + serverFreeResourceTree(pServer, &freeParams); + } + + listDestroy(&pServer->pClientSortedList[i]); + } + + PORT_FREE(pServer->pAllocator, pServer->pClientSortedList); + mapDestroy(&pServer->shareMap); + listDestroy(&pServer->defaultInheritedSharePolicyList); + listDestroy(&pServer->globalInternalSharePolicyList); + +#if RS_STANDALONE + portSyncRwLockDestroy(pServer->pResLock); + portSyncRwLockDestroy(pServer->pTopLock); +#endif + + portSyncSpinlockDestroy(pServer->pShareMapLock); + portSyncRwLockDestroy(pServer->pClientListLock); + + portMemAllocatorRelease(pServer->pAllocator); + + pServer->bConstructed = NV_FALSE; + + return NV_OK; +} + +static +NV_STATUS +_serverFreeClient_underlock +( + RsServer *pServer, + RsClient *pClient +) +{ + CLIENT_ENTRY *pClientEntry = NULL; + NvHandle hClient; + NV_STATUS status; + PORT_RWLOCK *pLock = NULL; + + status =_serverFindClientEntry(pServer, pClient->hClient, NV_FALSE, &pClientEntry); + if (status != NV_OK) + { + return status; + } + + NV_ASSERT(pClientEntry->pClient != NULL); + + hClient = pClient->hClient; + pClientEntry->pClient = NULL; + pClientEntry->hClient = 0; + + clientFreeAccessBackRefs(pClient, pServer); + + objDelete(pClient); + + listRemoveFirstByValue(&pServer->pClientSortedList[hClient & RS_CLIENT_HANDLE_BUCKET_MASK], &pClientEntry); + pLock = pClientEntry->pLock; + + RS_RWLOCK_RELEASE_WRITE_EXT(pLock, &pClientEntry->lockVal, NV_TRUE); + portSyncRwLockDestroy(pLock); + PORT_FREE(pServer->pAllocator, pClientEntry); + + return NV_OK; +} + +NV_STATUS +serverAllocDomain +( + RsServer *pServer, + NvU32 hParentDomain, + ACCESS_CONTROL *pAccessControl, + NvHandle *phDomain +) +{ + return NV_OK; +} + +NV_STATUS +serverFreeDomain +( + RsServer *pServer, + NvHandle hDomain +) +{ + NvU32 bucket; + for (bucket = 0; bucket < RS_CLIENT_HANDLE_BUCKET_COUNT; bucket ++) + { + RsClientList *pClientList = &(pServer->pClientSortedList[bucket]); + CLIENT_ENTRY **ppClientEntry = listHead(pClientList); + while (ppClientEntry != NULL) + { + CLIENT_ENTRY *pClientEntry = *ppClientEntry; + RS_CLIENT_FREE_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + if (pClientEntry == NULL) + { + ppClientEntry = listNext(pClientList, ppClientEntry); + continue; + } + params.hClient = pClientEntry->hClient; + + serverFreeClient(pServer, ¶ms); + ppClientEntry = listHead(pClientList); + } + } + return NV_OK; +} + +NV_STATUS serverValidate +( + RsServer *pServer, + NvU32 hDomain, + NvHandle hClient +) +{ + return NV_OK; +} + +NV_STATUS +serverValidateAlloc +( + RsServer *pServer, + NvU32 hDomain, + NvU32 externalClassId +) +{ + // Placeholder for allocation validation + return NV_OK; +} + +NV_STATUS +serverAllocClient +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + NvHandle hClient = 0; + RsClient *pClient = NULL; + CLIENT_ENTRY *pClientEntry = NULL; + NvBool bLockedClient = NV_FALSE; + + if (!pServer->bConstructed) + { + status = NV_ERR_NOT_READY; + goto done; + } + + // RS-TODO Assert that the RW top lock is held + + hClient = pParams->hClient; +#if !(RS_COMPATABILITY_MODE) + if (hClient != 0) + { + // Fail if the server supplied a client id + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } +#endif + + status = _serverCreateEntryAndLockForNewClient(pServer, &hClient, !!(pParams->allocState & ALLOC_STATE_INTERNAL_CLIENT_HANDLE), &pClientEntry); + + if (status != NV_OK) + { + goto done; + } + pParams->hClient = hClient; + pParams->hResource = hClient; + bLockedClient = NV_TRUE; + + status = resservClientFactory(pServer->pAllocator, pParams, &pClient); + if (NV_OK != status) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + pClientEntry->pClient = pClient; + + // Automatically allocate client proxy resource + status = clientAllocResource(pClient, pServer, pParams); + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "Allocated hClient: %x\n", hClient); + portAtomicIncrementU32(&pServer->activeClientCount); + +done: + if (bLockedClient) + _serverUnlockClient(pServer, LOCK_ACCESS_WRITE, pParams->hClient); + + if ((status != NV_OK) && (status != NV_ERR_INSERT_DUPLICATE_NAME) && (hClient != 0)) + { + if (_serverFindClientEntry(pServer, hClient, NV_TRUE, &pClientEntry) == NV_OK) + { + listRemoveFirstByValue(&pServer->pClientSortedList[hClient & RS_CLIENT_HANDLE_BUCKET_MASK], &pClientEntry); + portSyncRwLockDestroy(pClientEntry->pLock); + PORT_FREE(pServer->pAllocator, pClientEntry); + } + + if (pClient != NULL) + { + objDelete(pClient); + } + } + + return status; +} + +static +NV_STATUS +_serverFreeClient +( + RsServer *pServer, + RS_CLIENT_FREE_PARAMS *pParams +) +{ + NV_STATUS status; + NV_STATUS lockStatus; + NvU32 releaseFlags = 0; + RsClient *pClient; + + lockStatus = _serverLockClient(pServer, LOCK_ACCESS_WRITE, pParams->hClient, &pClient); + if (lockStatus != NV_OK) + { + status = NV_ERR_INVALID_CLIENT; + goto done; + } + releaseFlags |= RS_LOCK_RELEASE_CLIENT_LOCK; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pParams->pResFreeParams->pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverFreeClient_underlock(pServer, pClient); + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "Freeing hClient: %x\n", hClient); + portAtomicDecrementU32(&pServer->activeClientCount); + +done: + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pParams->pResFreeParams->pLockInfo, &releaseFlags); + + if (releaseFlags & RS_LOCK_RELEASE_CLIENT_LOCK) + _serverUnlockClient(pServer, LOCK_ACCESS_WRITE, pParams->hClient); + + return status; +} + +NV_STATUS +serverAllocResource +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS *pParams +) +{ + NV_STATUS status; + NvU32 releaseFlags = 0; + API_STATE *pApiState; + NvBool bClientAlloc = (pParams->externalClassId == NV01_ROOT || + pParams->externalClassId == NV01_ROOT_CLIENT || + pParams->externalClassId == NV01_ROOT_NON_PRIV); + LOCK_ACCESS_TYPE topLockAccess; + NvU32 initialLockState; + RS_LOCK_INFO *pLockInfo; + RsClient *pSecondClient = NULL; + NvHandle hSecondClient; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + initialLockState = pLockInfo->state; + + status = serverAllocApiCopyIn(pServer, pParams, &pApiState); + if (status != NV_OK) + return status; + + status = serverAllocResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + if ((status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags)) != NV_OK) + goto done; + + if (status == NV_OK) + { + if (bClientAlloc) + { + status = serverAllocClient(pServer, pParams); + } + else + { + status = serverLookupSecondClient(pParams, &hSecondClient); + + if (status != NV_OK) + goto done; + + if (hSecondClient == 0) + { + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pParams->hClient, pLockInfo, + &releaseFlags, &pParams->pClient); + + if (status != NV_OK) + goto done; + + if (!pParams->pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + } + else + { + status = _serverLockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pParams->hClient, hSecondClient, + pLockInfo, &releaseFlags, + &pParams->pClient, &pSecondClient); + + if (status != NV_OK) + goto done; + + if (!pParams->pClient->bActive || !pSecondClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + } + + // The second client's usage is class-dependent and should be validated + // by the class's constructor + status = clientValidate(pParams->pClient, pParams->pSecInfo); + + if (status != NV_OK) + goto done; + + status = serverAllocResourceUnderLock(pServer, pParams); + } + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "hParent 0x%08x : hClass 0x%08x allocation failed\n", + pParams->hParent, pParams->externalClassId); + } + + // RS-TODO: Can this be moved before _ResLock? + status = serverAllocEpilogue_WAR(pServer, status, bClientAlloc, pParams); + +done: + + if (!bClientAlloc) + { + if (pSecondClient != NULL) + { + _serverUnlockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pParams->hClient, pSecondClient->hClient, + pLockInfo, &releaseFlags); + } + else + { + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, + pLockInfo, &releaseFlags); + } + } + + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + // copyout as needed, being careful not to overwrite a useful status value + status = serverAllocApiCopyOut(pServer, status, pApiState); + + NV_ASSERT(pLockInfo->state == initialLockState); + + return status; +} + +#if RS_STANDALONE +// RS-TODO rename to UnderClientLock +NV_STATUS +serverAllocResourceUnderLock +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS *pParams +) +{ + NV_STATUS status; + RsClient *pClient = pParams->pClient; + NvHandle hResource = pParams->hResource; + NvU32 releaseFlags = 0; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = clientAssignResourceHandle(pClient, &hResource); + if (status != NV_OK) + goto done; + + pParams->hResource = hResource; + pParams->hParent = (pParams->hParent == 0) ? pParams->hClient : pParams->hParent; + status = clientAllocResource(pClient, pServer, pParams); + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Allocated hResource %x with class %x\n", + // pParams->hClient, pParams->hResource, pParams->externalClassId); + +done: + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, &releaseFlags); + return status; +} +#endif + +NV_STATUS +clientUpdatePendingFreeList_IMPL +( + RsClient *pClient, + RsResourceRef *pTargetRef, + RsResourceRef *pReference, + NvBool bMove +) +{ + RsIndexIter it; + NvBool bInList = refPendingFree(pTargetRef, pClient); + RS_FREE_STACK *pFs = pClient->pFreeStack; + if (bMove) + { + if (pReference != pTargetRef) + { + // Basic circular dependency check + while (pFs != NULL) + { + RsResourceRef *pFsRef = pFs->pResourceRef; + NV_ASSERT_OR_GOTO(pFsRef != pTargetRef, done); + + pFs = pFs->pPrev; + } + } + + if (bInList) + listRemove(&pClient->pendingFreeList, pTargetRef); + listPrependExisting(&pClient->pendingFreeList, pTargetRef); + } + else if (!bInList) + { + listPrependExisting(&pClient->pendingFreeList, pTargetRef); + } + + // + // Recursively add children to the pending free list and move + // them to the front of the list + // + it = indexRefIterAll(&pTargetRef->childRefMap); + while (indexRefIterNext(&it)) + { + clientUpdatePendingFreeList(pClient, *it.pValue, pReference, NV_TRUE); + } + + // + // Recursively add dependencies to the pending free list and + // move them to the front of the list + // + it = indexRefIterAll(&pTargetRef->depRefMap); + while (indexRefIterNext(&it)) + { + clientUpdatePendingFreeList(pClient, *it.pValue, pReference, NV_TRUE); + } + + if (pTargetRef->pResource != NULL) + { + // Allow some objects to add more dependants here + resAddAdditionalDependants(pClient, pTargetRef->pResource, pReference); + } + +done: + return NV_OK; +} + +NV_STATUS +serverFreeClientList +( + RsServer *pServer, + NvHandle *phClientList, + NvU32 numClients, + NvU32 freeState, + API_SECURITY_INFO *pSecInfo +) +{ + NvU32 i, j; + + // + // Call serverFreeClient twice; first for high priority resources + // then again for remaining resources + // + for (i = 0; i < 2; ++i) + { + for (j = 0; j < numClients; ++j) + { + RS_CLIENT_FREE_PARAMS params; + portMemSet(¶ms, 0, sizeof(params)); + + if (phClientList[j] == 0) + continue; + + params.hClient = phClientList[j]; + params.bHiPriOnly = (i == 0); + params.state = freeState; + params.pSecInfo = pSecInfo; + + serverFreeClient(pServer, ¶ms); + } + } + + return NV_OK; +} + +NV_STATUS +serverFreeResourceTree +( + RsServer *pServer, + RS_RES_FREE_PARAMS *pParams +) +{ + RsClient *pClient = NULL; + NV_STATUS status; + RsResourceRef *pResourceRef = NULL; + RsResourceRef *pTargetRef; + RsResourceRef *pFirstLowPriRef; + NvBool bHiPriOnly = pParams->bHiPriOnly; + NvBool bRecursive = NV_FALSE; + RS_FREE_STACK freeStack; + NvBool bPopFreeStack = NV_FALSE; + RS_LOCK_INFO *pLockInfo; + NvU32 initialLockState; + NvU32 releaseFlags = 0; + LOCK_ACCESS_TYPE topLockAccess; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + initialLockState = pLockInfo->state; + + portMemSet(&freeStack, 0, sizeof(freeStack)); + + status = serverFreeResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + if (pClient->pFreeStack != NULL) + freeStack.pPrev = pClient->pFreeStack; + pClient->pFreeStack = &freeStack; + bPopFreeStack = NV_TRUE; + + status = clientGetResourceRef(pClient, pParams->hResource, &pResourceRef); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "hObject 0x%x not found for client 0x%x\n", + pParams->hResource, + pParams->hClient); +#if (RS_COMPATABILITY_MODE) + status = NV_OK; +#endif + goto done; + } + pParams->pResourceRef = pResourceRef; + freeStack.pResourceRef = pResourceRef; + + if (pParams->bInvalidateOnly && pResourceRef->bInvalidated) + { + status = NV_OK; + goto done; + } + + bRecursive = (freeStack.pPrev != NULL); + status = clientUpdatePendingFreeList(pClient, pResourceRef, pResourceRef, bRecursive); + if (status != NV_OK) + goto done; + + clientPostProcessPendingFreeList(pClient, &pFirstLowPriRef); + + if (pServer->bDebugFreeList) + { + NV_PRINTF(LEVEL_INFO, "PENDING FREE LIST START (0x%x)\n", pClient->hClient); + NV_PRINTF(LEVEL_INFO, " _HI_PRIORITY_:\n"); + pTargetRef = listHead(&pClient->pendingFreeList); + while (pTargetRef != NULL) + { + if (pTargetRef == pFirstLowPriRef) + NV_PRINTF(LEVEL_INFO, " _LO_PRIORITY_:\n"); + + NV_PRINTF(LEVEL_INFO, " 0x%08x [%04x]\n", + pTargetRef->hResource, + pTargetRef->externalClassId); + pTargetRef = listNext(&pClient->pendingFreeList, pTargetRef); + } + NV_PRINTF(LEVEL_INFO, "PENDING FREE LIST END (0x%x)\n", pClient->hClient); + } + + while ((pTargetRef = listHead(&pClient->pendingFreeList)) != NULL) + { + NvBool bInvalidateOnly = NV_TRUE; + RS_FREE_STACK *pFs = &freeStack; + RS_RES_FREE_PARAMS_INTERNAL freeParams; + NvHandle hTarget = pTargetRef->hResource; + + if (bHiPriOnly && pTargetRef == pFirstLowPriRef) + goto done; + + if (pServer->bDebugFreeList) + { + NV_PRINTF(LEVEL_INFO, "(%08x, %08x)\n", pClient->hClient, hTarget); + } + + if (hTarget == pParams->hResource) + { + // Target resource should always be the last one to be freed + NV_ASSERT((listCount(&pClient->pendingFreeList) == 1) || bRecursive); + status = serverFreeResourceTreeUnderLock(pServer, pParams); + break; + } + + while (pFs != NULL) + { + RsResourceRef *pFsRef = pFs->pResourceRef; + if (refHasAncestor(pTargetRef, pFsRef)) + { + bInvalidateOnly = pParams->bInvalidateOnly; + break; + } + pFs = pFs->pPrev; + } + + serverInitFreeParams_Recursive(pClient->hClient, hTarget, pLockInfo, &freeParams); + freeParams.pResourceRef = pTargetRef; + freeParams.bInvalidateOnly = bInvalidateOnly; + freeParams.pSecInfo = pParams->pSecInfo; + status = serverFreeResourceTreeUnderLock(pServer, &freeParams); + NV_ASSERT(status == NV_OK); + + if (pServer->bDebugFreeList) + { + NV_PRINTF(LEVEL_INFO, "(%08x, %08x) status=0x%x\n", + pClient->hClient, + hTarget, + status); + } + } + + if (bPopFreeStack) + { + if (pClient != NULL) + pClient->pFreeStack = freeStack.pPrev; + bPopFreeStack = NV_FALSE; + } + + if (pParams->hClient == pParams->hResource) + { + pClient->bActive = NV_FALSE; + } + + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags); + + if (pParams->hClient == pParams->hResource) + { + NvBool bReAcquireLock = (topLockAccess != LOCK_ACCESS_WRITE); + RS_CLIENT_FREE_PARAMS_INTERNAL clientFreeParams; + portMemSet(&clientFreeParams, 0, sizeof(clientFreeParams)); + clientFreeParams.pResFreeParams = pParams; + clientFreeParams.hClient = pParams->hClient; + + if (bReAcquireLock) + { + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + serverTopLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + _serverFreeClient(pServer, &clientFreeParams); + serverTopLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + initialLockState &= ~RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + } + else + { + _serverFreeClient(pServer, &clientFreeParams); + } + + pClient = NULL; + } + +done: + if (bPopFreeStack) + { + if (pClient != NULL) + pClient->pFreeStack = freeStack.pPrev; + bPopFreeStack = NV_FALSE; + } + + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + // + // Log any changes to lock state, but ignore the ALLOW_RECURSIVE_LOCKS flag + // as that can be set by serverUpdateLockFlagsForFree() when dealing with + // RPCs to GSP; this would have already printed the relevant message. + // + NV_ASSERT((pLockInfo->state == initialLockState) || + (pLockInfo->state == (initialLockState | RS_LOCK_STATE_ALLOW_RECURSIVE_RES_LOCK))); + + return status; +} + +NV_STATUS +serverControl +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS *pParams +) +{ + NV_STATUS status; + RsClient *pClient; + RsResourceRef *pResourceRef = NULL; + RS_LOCK_INFO *pLockInfo; + NvU32 releaseFlags = 0; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + LOCK_ACCESS_TYPE access = LOCK_ACCESS_WRITE; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + status = serverControlLookupLockFlags(pServer, RS_LOCK_TOP, pParams, pParams->pCookie, &access); + if (status != NV_OK) + goto done; + + if (pServer->bUnlockedParamCopy) + { + status = serverControlApiCopyIn(pServer, pParams, pParams->pCookie); + if (status != NV_OK) + goto done; + } + + status = serverTopLock_Prologue(pServer, access, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + if (!pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = clientValidate(pClient, &pParams->secInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hObject, &pResourceRef); + if (status != NV_OK) + goto done; + pParams->pResourceRef = pResourceRef; + + if (pResourceRef->bInvalidated || pResourceRef->pResource == NULL) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + + pLockInfo->flags |= RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK; + + status = serverSessionLock_Prologue(LOCK_ACCESS_WRITE, pResourceRef, + pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + if (pResourceRef->pSession != NULL) + { + if (!pResourceRef->pSession->bValid) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + } + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pResourceRef = pResourceRef; + callContext.pClient = pClient; + callContext.secInfo = pParams->secInfo; + callContext.pServer = pServer; + callContext.pControlParams = pParams; + callContext.pLockInfo = pParams->pLockInfo; + + // RS-TODO removeme + pParams->pLegacyParams = pParams; + + if (pParams->hClient == pParams->hObject) + { + pParams->hParent = pParams->hClient; + } + else + { + pParams->hParent = pResourceRef->pParentRef->hResource; + } + pLockInfo->pContextRef = pResourceRef->pParentRef; + + resservSwapTlsCallContext(&pOldContext, &callContext); + status = resControl(pResourceRef->pResource, &callContext, pParams); + resservRestoreTlsCallContext(pOldContext); + +done: + + serverSessionLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, access, pLockInfo, &releaseFlags); + + if (pServer->bUnlockedParamCopy) + { + status = serverControlApiCopyOut(pServer, pParams, pParams->pCookie, status); + } + + return status; +} + +NV_STATUS +serverCopyResource +( + RsServer *pServer, + RS_RES_DUP_PARAMS *pParams +) +{ + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + NvU32 releaseFlags = 0; + RsClient *pClientSrc; + RsClient *pClientDst; + RsResourceRef *pResourceRefSrc; + LOCK_ACCESS_TYPE topLockAccess; + + NvHandle hClientSrc = pParams->hClientSrc; + NvHandle hClientDst = pParams->hClientDst; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + status = serverCopyResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + hClientSrc, hClientDst, + pLockInfo, &releaseFlags, + &pClientSrc, &pClientDst); + if (status != NV_OK) + goto done; + + if (!pClientSrc->bActive || !pClientDst->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = clientValidate(pClientDst, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClientSrc, pParams->hResourceSrc, &pResourceRefSrc); + if (status != NV_OK) + goto done; + + if (pResourceRefSrc->bInvalidated) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + + if (!resCanCopy(pResourceRefSrc->pResource)) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + status = clientAssignResourceHandle(pClientDst, &pParams->hResourceDst); + if (status != NV_OK) + goto done; + + pParams->pSrcClient = pClientSrc; + pParams->pSrcRef = pResourceRefSrc; + + status = serverUpdateLockFlagsForCopy(pServer, pParams); + if (status != NV_OK) + return status; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = clientCopyResource(pClientDst, pServer, pParams); + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Copied hResource: %x from hClientSrc: %x hResourceSrc: %x\n", + // hClientDst, hResourceDst, hClientSrc, hResourceSrc); + +done: + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, &releaseFlags); + + _serverUnlockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + hClientSrc, hClientDst, + pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +/** + * Special case of serverShareResourceAccess for sharing with a specific client + * Requires two client locks, so separated into a different function from the normal + * @param[in] pServer + * @param[in] pParams Parameters passed into share function + */ +static NV_STATUS +_serverShareResourceAccessClient +( + RsServer *pServer, + RS_RES_SHARE_PARAMS *pParams +) +{ + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + NvU32 releaseFlags = 0; + RsClient *pClientOwner; + RsClient *pClientTarget; + RsResourceRef *pResourceRef; + LOCK_ACCESS_TYPE topLockAccess; + + NvHandle hClientOwner = pParams->hClient; + NvHandle hClientTarget = pParams->pSharePolicy->target; + + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + status = serverShareResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + hClientOwner, hClientTarget, + pLockInfo, &releaseFlags, + &pClientOwner, &pClientTarget); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClientOwner, pParams->hResource, &pResourceRef); + if (status != NV_OK) + goto done; + + if (pResourceRef->bInvalidated) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClientOwner; + callContext.pResourceRef = pResourceRef; + callContext.secInfo = *pParams->pSecInfo; + callContext.pLockInfo = pParams->pLockInfo; + resservSwapTlsCallContext(&pOldContext, &callContext); + + if (hClientOwner == hClientTarget) + { + // + // Special case: RS_SHARE_TYPE_CLIENT with own client + // Allows the caller to directly modify the access map of their object + // + status = clientShareResourceTargetClient(pClientOwner, pResourceRef, pParams->pSharePolicy, &callContext); + if (status != NV_OK) + goto restore_context; + } + + // Add backref into pClientTarget to prevent stale client handles + status = clientAddAccessBackRef(pClientTarget, pResourceRef); + if (status != NV_OK) + goto restore_context; + + status = clientShareResource(pClientOwner, pResourceRef, pParams->pSharePolicy, &callContext); + if (status != NV_OK) + goto restore_context; + +restore_context: + resservRestoreTlsCallContext(pOldContext); + + // NV_PRINTF(LEVEL_INFO, "hClientOwner %x: Shared hResource: %x with hClientTarget: %x\n", + // hClientOwner, pParams->hResource, hClientTarget); + +done: + _serverUnlockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + hClientOwner, hClientTarget, + pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + + +NV_STATUS +serverShareResourceAccess +( + RsServer *pServer, + RS_RES_SHARE_PARAMS *pParams +) +{ + NV_STATUS status; + RS_LOCK_INFO *pLockInfo; + NvU32 releaseFlags = 0; + RsClient *pClient; + RsResourceRef *pResourceRef; + NvU16 shareType; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + LOCK_ACCESS_TYPE topLockAccess; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + if (!pServer->bRsAccessEnabled) + return NV_ERR_FEATURE_NOT_ENABLED; + + if (pParams->pSharePolicy == NULL) + return NV_ERR_INVALID_ARGUMENT; + + shareType = pParams->pSharePolicy->type; + if (shareType >= RS_SHARE_TYPE_MAX) + return NV_ERR_INVALID_ARGUMENT; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + if (shareType == RS_SHARE_TYPE_CLIENT) + { + // Special case: This requires two locks, so it has its own function + return _serverShareResourceAccessClient(pServer, pParams); + } + + status = serverShareResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hResource, &pResourceRef); + if (status != NV_OK) + goto done; + + if (pResourceRef->bInvalidated) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.secInfo = *pParams->pSecInfo; + callContext.pLockInfo = pParams->pLockInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + status = clientShareResource(pClient, pResourceRef, pParams->pSharePolicy, &callContext); + resservRestoreTlsCallContext(pOldContext); + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Shared hResource: %x\n", hClient, pParams->hResource); + +done: + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags); + + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverMap +( + RsServer *pServer, + NvHandle hClient, + NvHandle hResource, + RS_CPU_MAP_PARAMS *pParams +) +{ + NV_STATUS status = NV_ERR_INVALID_STATE; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + RsClient *pClient; + RsResourceRef *pResourceRef; + RsResourceRef *pContextRef = NULL; + RsResource *pResource; + RsCpuMapping *pCpuMapping = NULL; + RS_LOCK_INFO *pLockInfo; + NvU32 releaseFlags = 0; + LOCK_ACCESS_TYPE topLockAccess = LOCK_ACCESS_WRITE; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_GOTO(pLockInfo != NULL, done); + + status = serverMapLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, hClient, pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + if (!pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + goto done; + + pResource = pResourceRef->pResource; + if (pResource == NULL) + { + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + status = serverMap_Prologue(pServer, pParams); + if (status != NV_OK) + goto done; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + if (pParams->hContext != 0) + { + status = clientGetResourceRef(pClient, pParams->hContext, &pContextRef); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "hClient %x: Cannot find hContext: 0x%x\n", pClient->hClient, pParams->hContext); + goto done; + } + } + + status = refAddMapping(pResourceRef, pParams, pContextRef, &pCpuMapping); + if (status != NV_OK) + goto done; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pLockInfo = pParams->pLockInfo; + + // Some MODS tests don't set secInfo. + if (pParams->pSecInfo != NULL) + callContext.secInfo = *pParams->pSecInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + status = resMap(pResource, &callContext, pParams, pCpuMapping); + resservRestoreTlsCallContext(pOldContext); + + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Mapped hResource: 0x%x hContext: %x at addr: " NvP64_fmt "\n", + // hClient, hResource, pParams->hContext, pCpuMapping->pAddress); + + if (pParams->ppCpuVirtAddr != NULL) + *pParams->ppCpuVirtAddr = pCpuMapping->pLinearAddress; + +done: + if (status != NV_OK) + { + if (pCpuMapping != NULL) + refRemoveMapping(pResourceRef, pCpuMapping); + } + + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, hClient, pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverUnmap +( + RsServer *pServer, + NvHandle hClient, + NvHandle hResource, + RS_CPU_UNMAP_PARAMS *pParams +) +{ + NV_STATUS status = NV_ERR_INVALID_STATE; + RsClient *pClient; + RsResourceRef *pResourceRef; + RsResource *pResource; + RsCpuMapping *pCpuMapping; + RS_LOCK_INFO *pLockInfo; + NvU32 releaseFlags = 0; + LOCK_ACCESS_TYPE topLockAccess = LOCK_ACCESS_WRITE; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_GOTO(pLockInfo != NULL, done); + + status = serverUnmapLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, hClient, pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + goto done; + + pResource = pResourceRef->pResource; + if (pResource == NULL) + { + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + status = serverUnmap_Prologue(pServer, pParams); + if (status != NV_OK) + goto done; + + status = refFindCpuMappingWithFilter(pResourceRef, + pParams->pLinearAddress, + pParams->fnFilter, + &pCpuMapping); + if (status != NV_OK) + goto done; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = clientUnmapMemory(pClient, pResourceRef, pLockInfo, &pCpuMapping, pParams->pSecInfo); + +done: + serverUnmap_Epilogue(pServer, pParams); + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, hClient, pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverInterMap +( + RsServer *pServer, + RS_INTER_MAP_PARAMS *pParams +) +{ + RsClient *pClient; + RsResourceRef *pMapperRef; + RsResourceRef *pMappableRef; + RsResourceRef *pContextRef; + RsInterMapping *pMapping = NULL; + LOCK_ACCESS_TYPE topLockAccess; + + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + NvU32 releaseFlags = 0; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + NvBool bRestoreCallContext = NV_FALSE; + + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + if (pParams->length == 0) + return NV_ERR_INVALID_LIMIT; + + status = serverInterMapLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, + pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + if (!pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hMapper, &pMapperRef); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hMappable, &pMappableRef); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hDevice, &pContextRef); + if (status != NV_OK) + goto done; + + pLockInfo->pContextRef = pContextRef; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pMapperRef; + callContext.pContextRef = pContextRef; + callContext.pLockInfo = pLockInfo; + + // Some MODS tests don't set secInfo. + if (pParams->pSecInfo != NULL) + callContext.secInfo = *pParams->pSecInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + bRestoreCallContext = NV_TRUE; + + status = refAddInterMapping(pMapperRef, pMappableRef, pContextRef, &pMapping); + if (status != NV_OK) + goto done; + + // serverResLock_Prologue should be called during serverInterMap_Prologue + status = serverInterMap_Prologue(pServer, pMapperRef, pMappableRef, pParams, &releaseFlags); + if (status != NV_OK) + goto done; + + status = clientInterMap(pClient, pMapperRef, pMappableRef, pParams); + if (status != NV_OK) + goto done; + + pMapping->flags = pParams->flags; + pMapping->dmaOffset = pParams->dmaOffset; + pMapping->pMemDesc = pParams->pMemDesc; + +done: + serverInterMap_Epilogue(pServer, pParams, &releaseFlags); + + if (bRestoreCallContext) + resservRestoreTlsCallContext(pOldContext); + + if (status != NV_OK) + { + if (pMapping != NULL) + refRemoveInterMapping(pMapperRef, pMapping); + } + + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverInterUnmap +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + RsClient *pClient; + RsResourceRef *pMapperRef; + RsResourceRef *pMappableRef; + RsResourceRef *pContextRef; + RsInterMapping *pMapping; + LOCK_ACCESS_TYPE topLockAccess; + + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + NvU32 releaseFlags = 0; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + NvBool bRestoreCallContext = NV_FALSE; + + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + status = serverInterUnmapLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, + pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hMapper, &pMapperRef); + if (status != NV_OK) + goto done; + + if ((pMapperRef->bInvalidated) && (pMapperRef->pResource == NULL)) + { + // Object has already been freed and unmapped + goto done; + } + + status = clientGetResourceRef(pClient, pParams->hMappable, &pMappableRef); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hDevice, &pContextRef); + if (status != NV_OK) + goto done; + + status = refFindInterMapping(pMapperRef, pMappableRef, pContextRef, pParams->dmaOffset, &pMapping); + if (status != NV_OK) + goto done; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pMapperRef; + callContext.pContextRef = pContextRef; + callContext.pLockInfo = pLockInfo; + + // Some MODS tests don't set secInfo. + if (pParams->pSecInfo != NULL) + callContext.secInfo = *pParams->pSecInfo; + + if (pLockInfo->pContextRef == NULL) + pLockInfo->pContextRef = pContextRef; + + resservSwapTlsCallContext(&pOldContext, &callContext); + bRestoreCallContext = NV_TRUE; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = serverInterUnmap_Prologue(pServer, pParams); + if (status != NV_OK) + goto done; + + clientInterUnmap(pClient, pMapperRef, pParams); + + refRemoveInterMapping(pMapperRef, pMapping); + +done: + serverInterUnmap_Epilogue(pServer, pParams); + + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + + if (bRestoreCallContext) + resservRestoreTlsCallContext(pOldContext); + + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverAcquireClient +( + RsServer *pServer, + NvHandle hClient, + LOCK_ACCESS_TYPE lockAccess, + RsClient **ppClient +) +{ + NV_STATUS status; + RsClient *pClient; + + // NV_PRINTF(LEVEL_INFO, "Acquiring hClient %x\n", hClient); + status = _serverLockClient(pServer, lockAccess, hClient, &pClient); + if (status != NV_OK) + return status; + + if (ppClient != NULL) + *ppClient = pClient; + + return NV_OK; +} + +NV_STATUS +serverGetClientUnderLock +( + RsServer *pServer, + NvHandle hClient, + RsClient **ppClient +) +{ + NV_STATUS status; + RsClient *pClient; + + // NV_PRINTF(LEVEL_INFO, "Acquiring hClient %x (without lock)\n", hClient); + status = _serverFindClient(pServer, hClient, &pClient); + if (status != NV_OK) + { + return status; + } + + if (ppClient != NULL) + *ppClient = pClient; + + return NV_OK; +} + +NV_STATUS +serverReleaseClient +( + RsServer *pServer, + LOCK_ACCESS_TYPE lockAccess, + RsClient *pClient +) +{ + NV_STATUS status; + status = _serverUnlockClient(pServer, lockAccess, pClient->hClient); + return status; +} + +static +NV_STATUS +_serverFindClientEntry +( + RsServer *pServer, + NvHandle hClient, + NvBool bFindPartial, + CLIENT_ENTRY **ppClientEntry +) +{ + RsClientList *pClientList = &(pServer->pClientSortedList[hClient & RS_CLIENT_HANDLE_BUCKET_MASK]); + CLIENT_ENTRY **ppClientEntryLoop = listHead(pClientList); + + if (ppClientEntry != NULL) + *ppClientEntry = NULL; + + while (ppClientEntryLoop != NULL) + { + CLIENT_ENTRY *pClientEntry = *ppClientEntryLoop; + ppClientEntryLoop = listNext(pClientList, ppClientEntryLoop); + if (pClientEntry == NULL) + { + continue; + } + else if (pClientEntry->hClient == hClient) + { + // Client may not have finished constructing yet + if (pClientEntry->pClient == NULL && !bFindPartial) + return NV_ERR_INVALID_OBJECT_HANDLE; + + if (ppClientEntry != NULL) + *ppClientEntry = pClientEntry; + + return NV_OK; + } + else if (pClientEntry->hClient > hClient) + { + // Not found in sorted list + return NV_ERR_INVALID_OBJECT; + } + } + + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +static +NV_STATUS +_serverFindClient +( + RsServer *pServer, + NvHandle hClient, + RsClient **ppClient +) +{ + CLIENT_ENTRY *pClientEntry; + NV_STATUS status; + status =_serverFindClientEntry(pServer, hClient, NV_FALSE, &pClientEntry); + if (status != NV_OK) + { + return status; + } + + *ppClient = pClientEntry->pClient; + return NV_OK; +} + +static +NV_STATUS +_serverInsertClientEntry +( + RsServer *pServer, + CLIENT_ENTRY *pClientEntry, + CLIENT_ENTRY **ppClientNext +) +{ + RsClientList *pClientList; + CLIENT_ENTRY **ppClientEntry; + NvHandle hClient = pClientEntry->hClient; + + if (hClient == 0) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + pClientList = &(pServer->pClientSortedList[hClient & RS_CLIENT_HANDLE_BUCKET_MASK]); + + if (ppClientNext == NULL) + { + ppClientEntry = (CLIENT_ENTRY **)listAppendNew(pClientList); + } + else + { + ppClientEntry = (CLIENT_ENTRY **)listInsertNew(pClientList, ppClientNext); + } + *ppClientEntry = pClientEntry; + + return NV_OK; +} + +static +NV_STATUS +_serverFindNextAvailableClientHandleInBucket +( + RsServer *pServer, + NvHandle hClientIn, + NvHandle *phClientOut, + CLIENT_ENTRY ***pppClientNext +) +{ + NvHandle hPrefixIn, hPrefixOut; + RsClientList *pClientList = &(pServer->pClientSortedList[hClientIn & RS_CLIENT_HANDLE_BUCKET_MASK]); + NvHandle hClientOut = hClientIn; + CLIENT_ENTRY **ppClientEntry = listHead(pClientList); + + *pppClientNext = NULL; + if (ppClientEntry == NULL) + { + *phClientOut = hClientOut; + return NV_OK; + } + + // + // The list is ordered by increased client handles + // We need to find a value to insert or change the handle + // + while (ppClientEntry != NULL) + { + CLIENT_ENTRY *pClientEntry = *ppClientEntry; + if ((pClientEntry == NULL) || (pClientEntry->hClient < hClientOut)) + { + ppClientEntry = listNext(pClientList, ppClientEntry); + continue; + } + else if (pClientEntry->hClient == hClientOut) + { + // Increase client handle by one unit in same bucket + hClientOut = hClientOut + RS_CLIENT_HANDLE_BUCKET_COUNT; + NV_ASSERT((hClientIn & RS_CLIENT_HANDLE_BUCKET_MASK) == (hClientOut & RS_CLIENT_HANDLE_BUCKET_MASK)); + } + else // last pClientEntry->hClient > hClientOut + { + break; + } + ppClientEntry = listNext(pClientList, ppClientEntry); + } + + hPrefixIn = hClientIn & ~RS_CLIENT_HANDLE_DECODE_MASK; + hPrefixOut = hClientOut & ~RS_CLIENT_HANDLE_DECODE_MASK; + if (hPrefixIn != hPrefixOut) + return NV_ERR_INSUFFICIENT_RESOURCES; + + *phClientOut = hClientOut; + if (ppClientEntry != NULL) + { + *pppClientNext = ppClientEntry; + } + return NV_OK; +} + + +static +NV_STATUS +_serverCreateEntryAndLockForNewClient +( + RsServer *pServer, + NvHandle *phClient, + NvBool bInternalHandle, + CLIENT_ENTRY **ppClientEntry +) +{ + CLIENT_ENTRY *pClientEntry; + NV_STATUS status = NV_OK; + NvHandle hClient = *phClient; + CLIENT_ENTRY **ppClientNext = 0; + PORT_RWLOCK *pLock=NULL; + + if (hClient == 0) + { + NvU32 clientHandleIndex = pServer->clientCurrentHandleIndex; + NvU16 clientHandleBucketInit = clientHandleIndex & RS_CLIENT_HANDLE_BUCKET_MASK; + do + { + hClient = bInternalHandle + ? CLIENT_ENCODEHANDLE_INTERNAL(pServer->internalHandleBase, clientHandleIndex) + : CLIENT_ENCODEHANDLE(clientHandleIndex); + + clientHandleIndex++; + if (clientHandleIndex > RS_CLIENT_HANDLE_DECODE_MASK) + { + // We will override the client base, loop over + clientHandleIndex = 0; + } + if (clientHandleBucketInit == (clientHandleIndex & RS_CLIENT_HANDLE_BUCKET_MASK)) + { + // We looked through all buckets and we did not find any available client (very unlikely) + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto _serverCreateEntryAndLockForNewClient_exit; + } + } + while (_serverFindNextAvailableClientHandleInBucket(pServer, hClient, &hClient, &ppClientNext) != NV_OK); + + pServer->clientCurrentHandleIndex = clientHandleIndex; + } + else + { + NvHandle hClientOut = 0; + +#if !(RS_COMPATABILITY_MODE) + // Re-encode handle so it matches expected format + NvU32 clientIndex = CLIENT_DECODEHANDLE(hClient); + hClient = bInternalHandle + ? CLIENT_ENCODEHANDLE_INTERNAL(clientIndex) + : CLIENT_ENCODEHANDLE(clientIndex); +#endif + + if (_serverFindClientEntry(pServer, hClient, NV_FALSE, NULL) == NV_OK) + { + // The handle already exists + status = NV_ERR_INSERT_DUPLICATE_NAME; + goto _serverCreateEntryAndLockForNewClient_exit; + } + status = _serverFindNextAvailableClientHandleInBucket(pServer, hClient, &hClientOut, &ppClientNext); + if (status != NV_OK) + { + goto _serverCreateEntryAndLockForNewClient_exit; + } + if (hClient != hClientOut) + { + // This should not happen as we checked for duplicates already + NV_PRINTF(LEVEL_ERROR, "Client handle mismatch: %x != %x.\n", hClient, hClientOut); + status = NV_ERR_INVALID_STATE; + goto _serverCreateEntryAndLockForNewClient_exit; + } + } + + pLock = portSyncRwLockCreate(pServer->pAllocator); + if (pLock == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto _serverCreateEntryAndLockForNewClient_exit; + } + + // At this point we have a hClient, we know in which bucket and where in the bucket to insert the entry. + pClientEntry = (CLIENT_ENTRY *)PORT_ALLOC(pServer->pAllocator, sizeof(CLIENT_ENTRY)); + if (pClientEntry == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto _serverCreateEntryAndLockForNewClient_exit; + } + portMemSet(pClientEntry, 0, sizeof(*pClientEntry)); + + pClientEntry->hClient = hClient; + pClientEntry->pLock = pLock; + + + RS_LOCK_VALIDATOR_INIT(&pClientEntry->lockVal, + bInternalHandle ? LOCK_VAL_LOCK_CLASS_CLIENT_INTERNAL : LOCK_VAL_LOCK_CLASS_CLIENT, + hClient); + + status = _serverInsertClientEntry(pServer, pClientEntry, ppClientNext); + if (status != NV_OK) + { + PORT_FREE(pServer->pAllocator, pClientEntry); + goto _serverCreateEntryAndLockForNewClient_exit; + } + + RS_RWLOCK_ACQUIRE_WRITE(pClientEntry->pLock, &pClientEntry->lockVal); + pClientEntry->lockOwnerTid = portThreadGetCurrentThreadId(); + + *phClient = hClient; + *ppClientEntry = pClientEntry; + +_serverCreateEntryAndLockForNewClient_exit: + if (status != NV_OK && pLock != NULL) + portSyncRwLockDestroy(pLock); + + return status; +} + + +static +NV_STATUS +_serverLockClient +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient, + RsClient **ppClient +) +{ + RsClient *pClient; + CLIENT_ENTRY *pClientEntry = NULL; + NV_STATUS status = NV_OK; + + status =_serverFindClientEntry(pServer, hClient, NV_FALSE, &pClientEntry); + if (status != NV_OK) + { + return status; + } + + nv_speculation_barrier(); + + if (pClientEntry->pLock == NULL) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + if (access == LOCK_ACCESS_READ) + { + RS_RWLOCK_ACQUIRE_READ(pClientEntry->pLock, &pClientEntry->lockVal); + } + else + { + RS_RWLOCK_ACQUIRE_WRITE(pClientEntry->pLock, &pClientEntry->lockVal); + pClientEntry->lockOwnerTid = portThreadGetCurrentThreadId(); + } + + pClient = pClientEntry->pClient; + NV_ASSERT(pClient->hClient == pClientEntry->hClient); + + if ((pClient == NULL) || (pClient->hClient != hClient)) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_RELEASE_READ(pClientEntry->pLock, &pClientEntry->lockVal); + else + RS_RWLOCK_RELEASE_WRITE(pClientEntry->pLock, &pClientEntry->lockVal); + + return NV_ERR_INVALID_OBJECT; + } + + if (ppClient != NULL) + *ppClient = pClient; + + return NV_OK; +} + +static +NV_STATUS +_serverLockClientWithLockInfo +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags, + RsClient **ppClient +) +{ + NV_STATUS status; + if ((pLockInfo->flags & RS_LOCK_FLAGS_NO_CLIENT_LOCK)) + { + status = _serverFindClient(pServer, hClient, ppClient); + return status; + } + + if ((pLockInfo->state & RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED)) + { + CLIENT_ENTRY *pClientEntry; + NV_ASSERT_OK_OR_RETURN(_serverFindClientEntry(pServer, hClient, NV_FALSE, &pClientEntry)); + NV_ASSERT_OR_RETURN(pLockInfo->pClient != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pLockInfo->pClient == pClientEntry->pClient, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pClientEntry->lockOwnerTid == portThreadGetCurrentThreadId(), NV_ERR_INVALID_STATE); + + *ppClient = pLockInfo->pClient; + return NV_OK; + } + + status = _serverLockClient(pServer, access, hClient, ppClient); + if (status != NV_OK) + return status; + + pLockInfo->state |= RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + pLockInfo->pClient = *ppClient; + *pReleaseFlags |= RS_LOCK_RELEASE_CLIENT_LOCK; + + return NV_OK; +} + +static +NV_STATUS +_serverLockDualClientWithLockInfo +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient1, + NvHandle hClient2, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags, + RsClient **ppClient1, + RsClient **ppClient2 +) +{ + NV_STATUS status; + + // 1st and 2nd in handle order, as opposed to fixed 1 and 2 + NvHandle hClient1st; + NvHandle hClient2nd; + RsClient **ppClient1st; + RsClient **ppClient2nd; + + *ppClient1 = NULL; + *ppClient2 = NULL; + + if ((pLockInfo->flags & RS_LOCK_FLAGS_NO_CLIENT_LOCK)) + { + status = _serverFindClient(pServer, hClient1, ppClient1); + if (status != NV_OK) + return status; + + if (hClient1 == hClient2) + { + *ppClient2 = *ppClient1; + } + else + { + status = _serverFindClient(pServer, hClient2, ppClient2); + } + + return status; + } + + if (hClient1 <= hClient2) + { + hClient1st = hClient1; + ppClient1st = ppClient1; + + hClient2nd = hClient2; + ppClient2nd = ppClient2; + } + else + { + hClient1st = hClient2; + ppClient1st = ppClient2; + + hClient2nd = hClient1; + ppClient2nd = ppClient1; + } + + if ((pLockInfo->state & RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED)) + { + CLIENT_ENTRY *pClientEntry, *pSecondClientEntry; + + NV_ASSERT_OR_RETURN(pLockInfo->pSecondClient != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pLockInfo->pClient->hClient == hClient1st, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pLockInfo->pSecondClient->hClient == hClient2nd, NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN(_serverFindClientEntry(pServer, hClient1st, NV_FALSE, &pClientEntry)); + NV_ASSERT_OR_RETURN(pClientEntry->pClient == pLockInfo->pClient, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pClientEntry->lockOwnerTid == portThreadGetCurrentThreadId(), NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN(_serverFindClientEntry(pServer, hClient2nd, NV_FALSE, &pSecondClientEntry)); + NV_ASSERT_OR_RETURN(pSecondClientEntry->pClient == pLockInfo->pSecondClient, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pSecondClientEntry->lockOwnerTid == pClientEntry->lockOwnerTid, NV_ERR_INVALID_STATE); + + *ppClient1st = pLockInfo->pClient; + *ppClient2nd = pLockInfo->pSecondClient; + return NV_OK; + } + + status = _serverLockClient(pServer, access, hClient1st, ppClient1st); + if (status != NV_OK) + return status; + + if (hClient1 == hClient2) + { + *ppClient2nd = *ppClient1st; + } + else + { + status = _serverLockClient(pServer, access, hClient2nd, ppClient2nd); + if (status != NV_OK) + { + _serverUnlockClient(pServer, access, hClient1st); + return status; + } + } + + pLockInfo->state |= RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + pLockInfo->pClient = *ppClient1st; + pLockInfo->pSecondClient = *ppClient2nd; + *pReleaseFlags |= RS_LOCK_RELEASE_CLIENT_LOCK; + + return NV_OK; +} + +static +NV_STATUS +_serverUnlockClient +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient +) +{ + CLIENT_ENTRY *pClientEntry = NULL; + NV_STATUS status = NV_OK; + + status =_serverFindClientEntry(pServer, hClient, NV_TRUE, &pClientEntry); + if (status != NV_OK) + { + return status; + } + + if (access == LOCK_ACCESS_READ) + { + RS_RWLOCK_RELEASE_READ(pClientEntry->pLock, &pClientEntry->lockVal); + } + else + { + pClientEntry->lockOwnerTid = ~0; + RS_RWLOCK_RELEASE_WRITE(pClientEntry->pLock, &pClientEntry->lockVal); + } + + return NV_OK; +} + +static +NV_STATUS +_serverUnlockClientWithLockInfo +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + NV_STATUS status; + if (*pReleaseFlags & RS_LOCK_RELEASE_CLIENT_LOCK) + { + status = _serverUnlockClient(pServer, access, hClient); + if (status != NV_OK) + return status; + + pLockInfo->state &= ~RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + pLockInfo->pClient = NULL; + *pReleaseFlags &= ~RS_LOCK_RELEASE_CLIENT_LOCK; + } + return NV_OK; +} + +static +NV_STATUS +_serverUnlockDualClientWithLockInfo +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient1, + NvHandle hClient2, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + // 1st and 2nd in handle order, as opposed to fixed 1 and 2 + NvHandle hClient1st = NV_MIN(hClient1, hClient2); + NvHandle hClient2nd = NV_MAX(hClient1, hClient2); + + if (*pReleaseFlags & RS_LOCK_RELEASE_CLIENT_LOCK) + { + // Try to unlock both, even if one fails + NV_ASSERT_OK(_serverUnlockClient(pServer, access, hClient2nd)); + if (hClient1 != hClient2) + NV_ASSERT_OK(_serverUnlockClient(pServer, access, hClient1st)); + + pLockInfo->state &= ~RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + pLockInfo->pClient = NULL; + pLockInfo->pSecondClient = NULL; + *pReleaseFlags &= ~RS_LOCK_RELEASE_CLIENT_LOCK; + } + + return NV_OK; +} + +NvU32 +serverGetClientCount(RsServer *pServer) +{ + return pServer->activeClientCount; +} + +NvU64 +serverGetResourceCount(RsServer *pServer) +{ + return pServer->activeResourceCount; +} + +NV_STATUS +resservSwapTlsCallContext +( + CALL_CONTEXT **ppOldCallContext, + CALL_CONTEXT *pNewCallContext +) +{ + CALL_CONTEXT **ppTlsCallContext; + + if (ppOldCallContext == NULL) + return NV_ERR_INVALID_ARGUMENT; + + ppTlsCallContext = (CALL_CONTEXT**)tlsEntryAcquire(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT); + if (ppTlsCallContext == NULL) + return NV_ERR_INVALID_STATE; + + *ppOldCallContext = *ppTlsCallContext; + *ppTlsCallContext = pNewCallContext; + + // New call contexts inherit the bDeferredApi flag from the old + if ((*ppOldCallContext != NULL) && (pNewCallContext != NULL) && + (pNewCallContext->pControlParams != NULL) && + ((*ppOldCallContext)->pControlParams != NULL)) + { + pNewCallContext->pControlParams->bDeferredApi |= + (*ppOldCallContext)->pControlParams->bDeferredApi; + } + + return NV_OK; +} + +CALL_CONTEXT * +resservGetTlsCallContext(void) +{ + CALL_CONTEXT *pTlsCallContext = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT)); + return pTlsCallContext; +} + +NV_STATUS +resservRestoreTlsCallContext +( + CALL_CONTEXT *pOldCallContext +) +{ + CALL_CONTEXT **ppTlsCallContext = (CALL_CONTEXT**)tlsEntryAcquire(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT); + if (ppTlsCallContext == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *ppTlsCallContext = pOldCallContext; + tlsEntryRelease(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT); + tlsEntryRelease(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT); + + return NV_OK; +} + +RsResourceRef * +resservGetContextRefByType(NvU32 internalClassId, NvBool bSearchAncestors) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RsResourceRef *pContextRef = NULL; + + if (pCallContext == NULL) + return NULL; + + if (pCallContext->pResourceRef != NULL) + { + if (pCallContext->pResourceRef->internalClassId == internalClassId) + { + return pCallContext->pResourceRef; + } + else if (bSearchAncestors && + (refFindAncestorOfType(pCallContext->pResourceRef, internalClassId, &pContextRef) == NV_OK)) + { + return pContextRef; + } + } + + if (pCallContext->pContextRef != NULL) + { + if (pCallContext->pContextRef->internalClassId == internalClassId) + { + return pCallContext->pContextRef; + } + else if (bSearchAncestors && + (refFindAncestorOfType(pCallContext->pContextRef, internalClassId, &pContextRef) == NV_OK)) + { + return pContextRef; + } + } + + return NULL; +} + +NV_STATUS serverFreeClient(RsServer *pServer, RS_CLIENT_FREE_PARAMS* pParams) +{ + RS_RES_FREE_PARAMS params; + RS_LOCK_INFO lockInfo; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = pParams->hClient; + params.hResource = pParams->hClient; + params.bHiPriOnly = pParams->bHiPriOnly; + lockInfo.state = pParams->state; + params.pLockInfo = &lockInfo; + params.pSecInfo = pParams->pSecInfo; + + return serverFreeResourceTree(pServer, ¶ms); +} + +NV_STATUS +shrConstruct_IMPL +( + RsShared *pShare +) +{ + return NV_OK; +} + +void +shrDestruct_IMPL +( + RsShared *pShare +) +{ +} + +NV_STATUS +sessionConstruct_IMPL +( + RsSession *pSession +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + pSession->bValid = NV_TRUE; + listInit(&pSession->dependencies, pCallContext->pServer->pAllocator); + listInit(&pSession->dependants, pCallContext->pServer->pAllocator); + pSession->pLock = portSyncRwLockCreate(pCallContext->pServer->pAllocator); + + RS_LOCK_VALIDATOR_INIT(&pSession->lockVal, LOCK_VAL_LOCK_CLASS_SESSION, LOCK_VAL_LOCK_GENERATE); + return NV_OK; +} + +void +sessionDestruct_IMPL +( + RsSession *pSession +) +{ + NV_ASSERT(listCount(&pSession->dependencies) == 0); + NV_ASSERT(listCount(&pSession->dependants) == 0); + listDestroy(&pSession->dependencies); + listDestroy(&pSession->dependants); + pSession->pLock = NULL; +} + +NV_STATUS +sessionAddDependant_IMPL +( + RsSession *pSession, + RsResourceRef *pResourceRef +) +{ + NV_STATUS status; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + status = sessionCheckLocksForAdd(pSession, pResourceRef); + + if (status != NV_OK) + return status; + + if (pResourceRef->pSession == pSession) + return NV_OK; + + NV_ASSERT_OR_RETURN(pResourceRef->pSession == NULL, NV_ERR_INVALID_ARGUMENT); + + if (listAppendValue(&pSession->dependants, &pResourceRef) == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + serverRefShare(pCallContext->pServer, staticCast(pSession, RsShared)); + + pResourceRef->pSession = pSession; + + return NV_OK; +} + +NV_STATUS +sessionAddDependency_IMPL +( + RsSession *pSession, + RsResourceRef *pResourceRef +) +{ + NV_STATUS status; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + status = sessionCheckLocksForAdd(pSession, pResourceRef); + + if (status != NV_OK) + return status; + + if (pResourceRef->pDependantSession == pSession) + return NV_OK; + + NV_ASSERT_OR_RETURN(pResourceRef->pDependantSession == NULL, NV_ERR_INVALID_ARGUMENT); + + if (listAppendValue(&pSession->dependencies, &pResourceRef) == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + serverRefShare(pCallContext->pServer, staticCast(pSession, RsShared)); + + pResourceRef->pDependantSession = pSession; + + return NV_OK; +} + +void +sessionRemoveDependant_IMPL +( + RsSession *pSession, + RsResourceRef *pResourceRef +) +{ + listRemoveFirstByValue(&pSession->dependants, &pResourceRef); + sessionCheckLocksForRemove(pSession, pResourceRef); + pResourceRef->pSession = NULL; +} + +void +sessionRemoveDependency_IMPL +( + RsSession *pSession, + RsResourceRef *pResourceRef +) +{ + listRemoveFirstByValue(&pSession->dependencies, &pResourceRef); + pSession->bValid = NV_FALSE; + sessionCheckLocksForRemove(pSession, pResourceRef); + pResourceRef->pDependantSession = NULL; +} + +NV_STATUS sessionCheckLocksForAdd_IMPL(RsSession *pSession, RsResourceRef *pResourceRef) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RS_LOCK_INFO *pLockInfo; + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + pLockInfo = pCallContext->pLockInfo; + + NV_ASSERT_OR_RETURN((pLockInfo != NULL), NV_ERR_INVALID_STATE); + + if (!serverRwApiLockIsOwner(pCallContext->pServer)) + { + // Assert clients locked or RW lock + if (pLockInfo->state & RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED) + { + NV_ASSERT_OR_RETURN((pLockInfo->pClient == pResourceRef->pClient) || + (pLockInfo->pSecondClient == pResourceRef->pClient), + NV_ERR_INVALID_ARGUMENT); + } + else if (pLockInfo->state & RS_LOCK_STATE_TOP_LOCK_ACQUIRED) + { + NV_ASSERT_OR_RETURN((pLockInfo->pClient == NULL) && (pLockInfo->pSecondClient == NULL), NV_ERR_INVALID_ARGUMENT); + } + else + { + NV_ASSERT_FAILED("Incorrect locks taken"); + return NV_ERR_INVALID_LOCK_STATE; + } + } + + return NV_OK; +} + +void sessionCheckLocksForRemove_IMPL(RsSession *pSession, RsResourceRef *pResourceRef) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RS_LOCK_INFO *pLockInfo; + + NV_ASSERT(pCallContext != NULL); + pLockInfo = pCallContext->pLockInfo; + + NV_ASSERT(pLockInfo != NULL); + + if (pLockInfo->flags & RS_LOCK_FLAGS_FREE_SESSION_LOCK) + { + RsShared *pShared = staticCast(pSession, RsShared); + PORT_RWLOCK *pSessionLock = pSession->pLock; + NvBool bDestroy = (pShared->refCount == 1); + + if (!(pLockInfo->state & RS_LOCK_STATE_SESSION_LOCK_ACQUIRED) || !bDestroy) + { + serverFreeShare(pCallContext->pServer, pShared); + pLockInfo->flags &= ~RS_LOCK_FLAGS_FREE_SESSION_LOCK; + } + + if (!(pLockInfo->state & RS_LOCK_STATE_SESSION_LOCK_ACQUIRED) && bDestroy) + portSyncRwLockDestroy(pSessionLock); + } +} + +NV_STATUS +serverAllocShareWithHalspecParent +( + RsServer *pServer, + const NVOC_CLASS_INFO *pClassInfo, + RsShared **ppShare, + Object *pHalspecParent +) +{ + RsShared *pShare; + NV_STATUS status; + Dynamic *pDynamic = NULL; + NvU32 flags = NVOC_OBJ_CREATE_FLAGS_NONE; + + if (pClassInfo == NULL) + return NV_ERR_INVALID_CLASS; + + if (pHalspecParent != NULL) + flags |= NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY; + + status = objCreateDynamicWithFlags(&pDynamic, + pHalspecParent, + (const NVOC_CLASS_INFO*)(const void*)pClassInfo, + flags); + if (status != NV_OK) + return status; + + if (pDynamic == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pShare = dynamicCast(pDynamic, RsShared); + if (pShare == NULL) + { + status = NV_ERR_INVALID_CLASS; + goto fail; + } + + pShare->refCount = 1; + + portSyncSpinlockAcquire(pServer->pShareMapLock); + if (mapInsertExisting(&pServer->shareMap, (NvUPtr)pShare, pShare) != NV_TRUE) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + portSyncSpinlockRelease(pServer->pShareMapLock); + goto fail; + } + portSyncSpinlockRelease(pServer->pShareMapLock); + + if (ppShare != NULL) + *ppShare = pShare; + + return NV_OK; + +fail: + if (pShare != NULL) + { + objDelete(pShare); + } + + return status; +} + +NV_STATUS +serverAllocShare +( + RsServer *pServer, + const NVOC_CLASS_INFO *pClassInfo, + RsShared **ppShare +) +{ + return serverAllocShareWithHalspecParent(pServer, pClassInfo, ppShare, NULL); +} + +NvS32 +serverGetShareRefCount +( + RsServer *pServer, + RsShared *pShare +) +{ + return pShare->refCount; +} + +NV_STATUS +serverRefShare +( + RsServer *pServer, + RsShared *pShare +) +{ + portAtomicIncrementS32(&pShare->refCount); + return NV_OK; +} + +NV_STATUS +serverFreeShare +( + RsServer *pServer, + RsShared *pShare +) +{ + if (portAtomicDecrementS32(&pShare->refCount) == 0) + { + portSyncSpinlockAcquire(pServer->pShareMapLock); + mapRemove(&pServer->shareMap, pShare); + portSyncSpinlockRelease(pServer->pShareMapLock); + + objDelete(pShare); + } + return NV_OK; +} + +RS_SHARE_ITERATOR +serverShareIter +( + RsServer *pServer, + NvU32 internalClassId +) +{ + RS_SHARE_ITERATOR it; + portMemSet(&it, 0, sizeof(it)); + it.internalClassId = internalClassId; + it.mapIt = mapIterAll(&pServer->shareMap); + + return it; +} + +NvBool +serverShareIterNext +( + RS_SHARE_ITERATOR* pIt +) +{ + NvBool bLoop = NV_TRUE; + if (pIt == NULL) + return NV_FALSE; + + pIt->pShared = NULL; + bLoop = mapIterNext(&pIt->mapIt); + while(bLoop) + { + RsShared *pShared = pIt->mapIt.pValue; + if ((pIt->internalClassId == 0) || (objDynamicCastById(pShared, pIt->internalClassId) != NULL)) + { + pIt->pShared = pShared; + return NV_TRUE; + } + bLoop = mapIterNext(&pIt->mapIt); + } + + return NV_FALSE; +} + +#if (RS_PROVIDES_API_STATE) +NV_STATUS +serverAllocApiCopyIn +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams, + API_STATE **ppApiState +) +{ + if (ppApiState != NULL) + *ppApiState = NULL; + + return NV_OK; +} + +NV_STATUS +serverAllocApiCopyOut +( + RsServer *pServer, + NV_STATUS status, + API_STATE *pApiState +) +{ + return status; +} +#endif + +#if (RS_STANDALONE) +NV_STATUS +serverAllocEpilogue_WAR +( + RsServer *pServer, + NV_STATUS status, + NvBool bClientAlloc, + RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams +) +{ + return status; +} + +NV_STATUS +serverLookupSecondClient +( + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvHandle *phClient +) +{ + *phClient = 0; + + return NV_OK; +} + +NV_STATUS serverTopLock_Prologue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if ((pLockInfo->flags & RS_LOCK_FLAGS_NO_TOP_LOCK)) + return NV_OK; + + if (!(pLockInfo->state & RS_LOCK_STATE_TOP_LOCK_ACQUIRED)) + { + if (access == LOCK_ACCESS_READ) + { + RS_RWLOCK_ACQUIRE_READ(pServer->pTopLock, &pServer->topLockVal); + } + else + { + RS_RWLOCK_ACQUIRE_WRITE(pServer->pTopLock, &pServer->topLockVal); + pServer->topLockOwnerTid = portThreadGetCurrentThreadId(); + } + + pLockInfo->state |= RS_LOCK_STATE_TOP_LOCK_ACQUIRED; + *pReleaseFlags |= RS_LOCK_RELEASE_TOP_LOCK; + } + else if (access == LOCK_ACCESS_WRITE) + { + NV_ASSERT_OR_RETURN(pServer->topLockOwnerTid == portThreadGetCurrentThreadId(), + NV_ERR_INVALID_LOCK_STATE); + } + + return NV_OK; +} + +void +serverTopLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if (*pReleaseFlags & RS_LOCK_RELEASE_TOP_LOCK) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_RELEASE_READ(pServer->pTopLock, &pServer->topLockVal); + else + { + pServer->topLockOwnerTid = ~0; + RS_RWLOCK_RELEASE_WRITE(pServer->pTopLock, &pServer->topLockVal); + } + + pLockInfo->state &= ~RS_LOCK_STATE_TOP_LOCK_ACQUIRED; + *pReleaseFlags &= ~RS_LOCK_RELEASE_TOP_LOCK; + } +} + +NV_STATUS +serverResLock_Prologue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if (!(pLockInfo->state & RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED)) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_ACQUIRE_READ(pServer->pResLock, &pServer->resLockVal); + else + RS_RWLOCK_ACQUIRE_WRITE(pServer->pResLock, &pServer->resLockVal); + + pLockInfo->state |= RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED; + *pReleaseFlags |= RS_LOCK_RELEASE_CUSTOM_LOCK_1; + } + + return NV_OK; +} + +void +serverResLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if (*pReleaseFlags & RS_LOCK_RELEASE_CUSTOM_LOCK_1) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_RELEASE_READ(pServer->pResLock, &pServer->resLockVal); + else + RS_RWLOCK_RELEASE_WRITE(pServer->pResLock, &pServer->resLockVal); + + pLockInfo->state &= ~RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED; + *pReleaseFlags &= ~RS_LOCK_RELEASE_CUSTOM_LOCK_1; + } +} + +#if !(RS_STANDALONE_TEST) +NV_STATUS +serverMap_Prologue +( + RsServer *pServer, + RS_CPU_MAP_PARAMS *pMapParams +) +{ + return NV_OK; +} +#endif /* !RS_STANDALONE_TEST */ + +void +serverMap_Epilogue +( + RsServer *pServer, + RS_CPU_MAP_PARAMS *pMapParams +) +{ +} + +#if !(RS_STANDALONE_TEST) +NV_STATUS +serverUnmap_Prologue +( + RsServer *pServer, + RS_CPU_UNMAP_PARAMS *pUnmapParams +) +{ + return NV_OK; +} +#endif /* !RS_STANDALONE_TEST */ + +void +serverUnmap_Epilogue +( + RsServer *pServer, + RS_CPU_UNMAP_PARAMS *pUnmapParams +) +{ +} + +void +serverControl_InitCookie +( + const struct NVOC_EXPORTED_METHOD_DEF *pExportedEntry, + RS_CONTROL_COOKIE *pCookie +) +{ +} + +NV_STATUS +serverInterMap_Prologue +( + RsServer *pServer, + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RS_INTER_MAP_PARAMS *pMapParams, + NvU32 *pReleaseFlags +) +{ + NV_STATUS status; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pMapParams->pLockInfo, pReleaseFlags); + + return status; +} + +void +serverInterMap_Epilogue +( + RsServer *pServer, + RS_INTER_MAP_PARAMS *pMapParams, + NvU32 *pReleaseFlags +) +{ + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pMapParams->pLockInfo, pReleaseFlags); +} + +NV_STATUS +serverInterUnmap_Prologue +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pUnmapParams +) +{ + return NV_OK; +} + +void +serverInterUnmap_Epilogue +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pUnmapParams +) +{ +} + +NvBool +serverRwApiLockIsOwner +( + RsServer *pServer +) +{ + return (pServer->topLockOwnerTid == portThreadGetCurrentThreadId()); +} + +NV_STATUS +serverAllocResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + if (lock == RS_LOCK_TOP) + { + NvBool bClientAlloc = (pParams->externalClassId == NV01_ROOT || + pParams->externalClassId == NV01_ROOT_CLIENT || + pParams->externalClassId == NV01_ROOT_NON_PRIV); + + if (bClientAlloc) + { + *pAccess = LOCK_ACCESS_WRITE; + return NV_OK; + } + } + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_ALLOC_RESOURCE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +NV_STATUS +serverFreeResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_FREE_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_FREE_RESOURCE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverCopyResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_DUP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_COPY)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverShareResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_SHARE_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_SHARE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +#if !(RS_STANDALONE_TEST) +NV_STATUS +serverControlLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_CTRL)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} +#endif + +NV_STATUS +serverMapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_MAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverUnmapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_UNMAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +NV_STATUS +serverInterMapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_MAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +NV_STATUS +serverInterUnmapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_UNMAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +NV_STATUS +serverControl_ValidateCookie +( + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie +) +{ + return NV_OK; +} + +NV_STATUS +serverControlApiCopyIn +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie +) +{ + return NV_OK; +} + +NV_STATUS +serverControlApiCopyOut +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + NV_STATUS rmStatus +) +{ + return NV_OK; +} + +NV_STATUS +serverInitGlobalSharePolicies +( + RsServer *pServer +) +{ + return NV_OK; +} +#endif + +NV_STATUS +serverSessionLock_Prologue +( + LOCK_ACCESS_TYPE access, + RsResourceRef *pResourceRef, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + RsSession *pSession = pResourceRef->pSession; + RsSession *pDependantSession = pResourceRef->pDependantSession; + + if (!(pLockInfo->state & RS_LOCK_STATE_SESSION_LOCK_ACQUIRED)) + { + if (pSession != NULL) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_ACQUIRE_READ(pSession->pLock, &pSession->lockVal); + else + RS_RWLOCK_ACQUIRE_WRITE(pSession->pLock, &pSession->lockVal); + pLockInfo->state |= RS_LOCK_STATE_SESSION_LOCK_ACQUIRED; + *pReleaseFlags |= RS_LOCK_RELEASE_SESSION_LOCK; + + pLockInfo->pSession = pSession; + } + } + else + { + NV_ASSERT_OR_RETURN(pLockInfo->pSession == pSession, NV_ERR_INVALID_LOCK_STATE); + } + + if (!(pLockInfo->flags & RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK) && + (pDependantSession != NULL)) + { + if (!(pLockInfo->state & RS_LOCK_STATE_SESSION_LOCK_ACQUIRED)) + { + // + // The only reason we lock the back reference session is if we're freeing the + // resource so take the write lock in all cases + // + RS_RWLOCK_ACQUIRE_WRITE(pDependantSession->pLock, &pDependantSession->lockVal); + + pLockInfo->state |= RS_LOCK_STATE_SESSION_LOCK_ACQUIRED; + *pReleaseFlags |= RS_LOCK_RELEASE_SESSION_LOCK; + + pLockInfo->pSession = pDependantSession; + } + else + { + // + // For now, don't allow a resource to be both depended on and depending on a + // session to keep this locking code simpler. We'll have to revisit if that + // becomes necessary. + // + NV_ASSERT_OR_RETURN(pLockInfo->pSession == pDependantSession, NV_ERR_INVALID_LOCK_STATE); + } + } + + pLockInfo->flags &= ~RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK; + + return NV_OK; +} + +void +serverSessionLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + RsSession *pSession = pLockInfo->pSession; + + if ((pSession != NULL) && (*pReleaseFlags & RS_LOCK_RELEASE_SESSION_LOCK)) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_RELEASE_READ(pSession->pLock, &pSession->lockVal); + else + RS_RWLOCK_RELEASE_WRITE(pSession->pLock, &pSession->lockVal); + + pLockInfo->state &= ~RS_LOCK_STATE_SESSION_LOCK_ACQUIRED; + *pReleaseFlags &= ~RS_LOCK_RELEASE_SESSION_LOCK; + + if (pLockInfo->flags & RS_LOCK_FLAGS_FREE_SESSION_LOCK) + { + RsShared *pShared = staticCast(pSession, RsShared); + PORT_RWLOCK *pSessionLock = pSession->pLock; + + serverFreeShare(pServer, pShared); + portSyncRwLockDestroy(pSessionLock); + } + + pLockInfo->pSession = NULL; + } + + pLockInfo->flags &= ~RS_LOCK_FLAGS_FREE_SESSION_LOCK; +} + +NV_STATUS serverControl_Prologue +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess, + NvU32* pReleaseFlags +) +{ + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + + status = serverControl_ValidateCookie(pParams, pParams->pCookie); + if (status != NV_OK) + return status; + + serverControlLookupLockFlags(pServer, RS_LOCK_RESOURCE, pParams, pParams->pCookie, pAccess); + if (status != NV_OK) + return status; + + if (!pServer->bUnlockedParamCopy) + { + status = serverControlApiCopyIn(pServer, pParams, pParams->pCookie); + if (status != NV_OK) + return status; + } + + pLockInfo->traceOp = RS_LOCK_TRACE_CTRL; + pLockInfo->traceClassId = pParams->cmd; + status = serverResLock_Prologue(pServer, *pAccess, pParams->pLockInfo, pReleaseFlags); + if (status != NV_OK) + return status; + + return NV_OK; +} + +NV_STATUS +serverControl_Epilogue +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE access, + NvU32 *pReleaseFlags, + NV_STATUS status +) +{ + serverResLock_Epilogue(pServer, access, pParams->pLockInfo, pReleaseFlags); + + if (!pServer->bUnlockedParamCopy) + { + status = serverControlApiCopyOut(pServer, pParams, pParams->pCookie, status); + } + + return status; +} + +NvBool +serverSupportsReadOnlyLock +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_API_ENUM api +) +{ + NV_ASSERT(api < RS_API_MAX); + if (lock == RS_LOCK_TOP) + { + return (!!(pServer->roTopLockApiMask & NVBIT(api))); + } + + return NV_FALSE; +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/tls/tls.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/tls/tls.c new file mode 100644 index 0000000..a7867a6 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/tls/tls.c @@ -0,0 +1,661 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "tls/tls.h" +#include "containers/map.h" +#include "nvport/nvport.h" + +/// @todo Figure out which builds have upward stack. Looks like none? +#define STACK_GROWS_DOWNWARD 1 + + +/** + * @brief Entry which counts how many times some data in TLS has been referenced. + */ +typedef struct TlsEntry +{ + NvU32 refCount; + NvP64 pUserData; + MapNode node; +} TlsEntry; + +MAKE_INTRUSIVE_MAP(TlsEntryMap, TlsEntry, node); + +/** + * @brief Single thread's TLS information + */ +typedef struct ThreadEntry +{ + union { + NvU64 threadId; /// < For passive threads + NvU64 sp; /// < For ISR threads + } key; /// @todo Use node.key instead? + TlsEntryMap map; + MapNode node; +} ThreadEntry; + +MAKE_INTRUSIVE_MAP(ThreadEntryMap, ThreadEntry, node); + +/** + * @brief Stores all necessary data for TLS mechanism. + * + * @todo Use RW Spinlocks instead. Nice perf boost. + */ +typedef struct TlsDatabase +{ + /// @brief Allocator which allocates all necessary data for current @ref TlsDatabase. + PORT_MEM_ALLOCATOR *pAllocator; + /// @brief Last allocated entry id. + NvU64 lastEntryId; + + /// @brief Lock for the passive thread entry map + PORT_SPINLOCK *pLock; + /// @brief Map of thread entries of non ISR threads. + ThreadEntryMap threadEntries; + +#if TLS_ISR_CAN_USE_LOCK + /// @brief Lock which controls access to ISR-specific structures + PORT_SPINLOCK *pIsrLock; + /// @brief Map of thread entries of ISR threads. + ThreadEntryMap isrEntries; +#else +#if !defined(TLS_ISR_UNIT_TEST) +#define TLS_MAX_ISRS 64 +#else +#define TLS_MAX_ISRS 1024 +#endif + struct { + volatile NvU64 sp; + ThreadEntry *pThreadEntry; + } isrEntries[TLS_MAX_ISRS]; +#endif + +#if TLS_THREADS_CAN_RAISE_IRQL + /** + * @brief Number of ISRs / DPCs active on a given CPU. + * + * Every time an ISR starts, it increments this, and decrements on end. + * Since ISRs never get rescheduled, and passive threads will never preempt + * them, (isrCount[current_cpu] == 0) will be true IFF we're in ISR/DPC. + */ + NvU32 *isrCount; +#endif + + volatile NvU32 initCount; +} TlsDatabase; + +TlsDatabase tlsDatabase; // Zero initialized + +// Helper function prototypes +static NvBool _tlsIsIsr(void); +static ThreadEntry *_tlsThreadEntryGet(void); +static ThreadEntry *_tlsThreadEntryGetOrAlloc(void); +static NvP64 *_tlsEntryAcquire(ThreadEntry *pThreadEntry, NvU64 entryId, PORT_MEM_ALLOCATOR *pCustomAllocator); +static NvU32 _tlsEntryRelease(ThreadEntry *pThreadEntry, TlsEntry *pTlsEntry, PORT_MEM_ALLOCATOR *pCustomAllocator); +static NV_STATUS _tlsIsrEntriesInit(void); +static void _tlsIsrEntriesDestroy(void); +static void _tlsIsrEntriesInsert(ThreadEntry *pThreadEntry); +static ThreadEntry *_tlsIsrEntriesRemove(NvU64 sp); +static ThreadEntry *_tlsIsrEntriesFind(NvU64 approxSp); +static PORT_MEM_ALLOCATOR *_tlsIsrAllocatorGet(void); +static PORT_MEM_ALLOCATOR *_tlsAllocatorGet(void); + +#if TLS_THREADS_CAN_RAISE_IRQL +/// @todo move to NvPort (bug 1583359) +NvU32 osGetCurrentProcessorNumber(void); +#if defined(NVRM) +NvU32 osGetMaximumCoreCount(void); +#else +#define osGetMaximumCoreCount() 0x0 +#endif +#endif + + +#if !PORT_IS_FUNC_SUPPORTED(portSyncExSafeToSleep) +#define portSyncExSafeToSleep() NV_TRUE +#endif + +#if !PORT_IS_FUNC_SUPPORTED(portMemExSafeForNonPagedAlloc) +#define portMemExSafeForNonPagedAlloc() NV_TRUE +#endif + +#if defined(TLS_PROFILING) +#include "tls_profiling.h" +#endif + + + + +NV_STATUS tlsInitialize() +{ + NV_STATUS status; + + if (portAtomicIncrementU32(&tlsDatabase.initCount) != 1) + { + return NV_OK; /// @todo Maybe return NV_WARN_NOTHING_TO_DO? + } + + status = portInitialize(); + if (status != NV_OK) + return status; + + tlsDatabase.pAllocator = portMemAllocatorCreateNonPaged(); + if (tlsDatabase.pAllocator == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + tlsDatabase.pLock = portSyncSpinlockCreate(tlsDatabase.pAllocator); + if (tlsDatabase.pLock == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + mapInitIntrusive(&tlsDatabase.threadEntries); + + status = _tlsIsrEntriesInit(); + if (status != NV_OK) + goto done; + + tlsDatabase.lastEntryId = TLS_ENTRY_ID_DYNAMIC; + +#if TLS_THREADS_CAN_RAISE_IRQL +{ + NvU32 maxCoreCount = osGetMaximumCoreCount(); + if (maxCoreCount == 0) + maxCoreCount = 1; // MODS reports only 1 CPU at index 0. + + tlsDatabase.isrCount = PORT_ALLOC(tlsDatabase.pAllocator, maxCoreCount * sizeof(NvU32)); + if (tlsDatabase.isrCount == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + portMemSet(tlsDatabase.isrCount, 0, maxCoreCount * sizeof(NvU32)); +} +#endif // TLS_THREADS_CAN_RAISE_IRQL + +done: + if (status != NV_OK) + { + tlsShutdown(); + } + return status; +} + +void tlsShutdown() +{ + if (portAtomicDecrementU32(&tlsDatabase.initCount) != 0) + { + return; + } + +#if defined(TLS_PROFILING) + _tlsProfilePrint(); +#endif + + mapDestroy(&tlsDatabase.threadEntries); + if (tlsDatabase.pLock) + portSyncSpinlockDestroy(tlsDatabase.pLock); + + _tlsIsrEntriesDestroy(); + + if (tlsDatabase.pAllocator) + { +#if TLS_THREADS_CAN_RAISE_IRQL + PORT_FREE(tlsDatabase.pAllocator, tlsDatabase.isrCount); +#endif + portMemAllocatorRelease(tlsDatabase.pAllocator); + } + portMemSet(&tlsDatabase, 0, sizeof(tlsDatabase)); + portShutdown(); +} + +void tlsIsrInit(PORT_MEM_ALLOCATOR *pIsrAllocator) +{ + ThreadEntry *pThreadEntry; + NV_ASSERT_OR_RETURN_VOID(tlsDatabase.initCount > 0); + + // + // If TLS_THREADS_CAN_RAISE_IRQL we treat anything that calls tlsIsrInit as + // ISR, and cannot perform this check. Will be moved to ASSERT later. + // See CORERM-96 + // + if (!TLS_THREADS_CAN_RAISE_IRQL && !_tlsIsIsr()) + { + static NvBool bAlreadyPrinted = NV_FALSE; + if (!bAlreadyPrinted) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: Unnecessary tlsIsrInit() call at %p. Will stop reporting further violations.\n", + (void*)portUtilGetReturnAddress()); + bAlreadyPrinted = NV_TRUE; + } + return; + } + + pThreadEntry = PORT_ALLOC(pIsrAllocator, sizeof(*pThreadEntry)); + NV_ASSERT_OR_RETURN_VOID(pThreadEntry != NULL); + + pThreadEntry->key.sp = (NvU64)(NvUPtr)pIsrAllocator; + mapInitIntrusive(&pThreadEntry->map); + + _tlsIsrEntriesInsert(pThreadEntry); + +#if TLS_THREADS_CAN_RAISE_IRQL + portAtomicIncrementU32(&tlsDatabase.isrCount[osGetCurrentProcessorNumber()]); +#endif +} + +void tlsIsrDestroy(PORT_MEM_ALLOCATOR *pIsrAllocator) +{ + ThreadEntry *pThreadEntry; + NV_ASSERT_OR_RETURN_VOID(tlsDatabase.initCount > 0); + + if (!_tlsIsIsr()) + { + if (TLS_THREADS_CAN_RAISE_IRQL) + { + NV_PRINTF(LEVEL_ERROR, + "TLS: Calling tlsIsrDestroy() without accompanying tlsIsrInit at %p\n", + (void*)portUtilGetReturnAddress()); + } + return; + } + + pThreadEntry = _tlsIsrEntriesRemove((NvU64)(NvUPtr)pIsrAllocator); + + mapDestroy(&pThreadEntry->map); + PORT_FREE(pIsrAllocator, pThreadEntry); + +#if TLS_THREADS_CAN_RAISE_IRQL + portAtomicDecrementU32(&tlsDatabase.isrCount[osGetCurrentProcessorNumber()]); +#endif +} + +PORT_MEM_ALLOCATOR *tlsIsrAllocatorGet(void) +{ + + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, NULL); + return _tlsIsrAllocatorGet(); +} + +NvU64 tlsEntryAlloc() +{ + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + return portAtomicExIncrementU64(&tlsDatabase.lastEntryId); +} + +NvP64 *tlsEntryAcquire(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, NULL); + + // User tries allocation of unallocated entryId. + NV_ASSERT_OR_RETURN(entryId <= tlsDatabase.lastEntryId || + entryId >= TLS_ENTRY_ID_TAG_START, NULL); + + pThreadEntry = _tlsThreadEntryGetOrAlloc(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, NULL); + + return _tlsEntryAcquire(pThreadEntry, entryId, NULL); +} + +NvP64 *tlsEntryAcquireWithAllocator(NvU64 entryId, PORT_MEM_ALLOCATOR *pCustomAllocator) +{ + ThreadEntry *pThreadEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, NULL); + + // User tries allocation of unallocated entryId. + NV_ASSERT_OR_RETURN(entryId <= tlsDatabase.lastEntryId || + entryId >= TLS_ENTRY_ID_TAG_START, NULL); + NV_ASSERT_OR_RETURN(pCustomAllocator != NULL, NULL); + + pThreadEntry = _tlsThreadEntryGetOrAlloc(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, NULL); + + return _tlsEntryAcquire(pThreadEntry, entryId, pCustomAllocator); +} + +NvU32 tlsEntryRelease(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + + pThreadEntry = _tlsThreadEntryGet(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, TLS_ERROR_VAL); + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, TLS_ERROR_VAL); + + return _tlsEntryRelease(pThreadEntry, pTlsEntry, NULL); +} + +NvU32 tlsEntryReleaseWithAllocator(NvU64 entryId, PORT_MEM_ALLOCATOR *pCustomAllocator) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + NV_ASSERT_OR_RETURN(pCustomAllocator != NULL, TLS_ERROR_VAL); + + pThreadEntry = _tlsThreadEntryGet(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, TLS_ERROR_VAL); + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, TLS_ERROR_VAL); + + return _tlsEntryRelease(pThreadEntry, pTlsEntry, pCustomAllocator); +} + +NvP64 tlsEntryGet(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, NvP64_NULL); + + pThreadEntry = _tlsThreadEntryGet(); + if (pThreadEntry == NULL) + return NvP64_NULL; + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + return pTlsEntry ? pTlsEntry->pUserData : NvP64_NULL; +} + +NvU32 tlsEntryReference(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + + pThreadEntry = _tlsThreadEntryGet(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, TLS_ERROR_VAL); + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, TLS_ERROR_VAL); + + return ++pTlsEntry->refCount; +} + +NvU32 tlsEntryUnreference(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + + pThreadEntry = _tlsThreadEntryGet(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, TLS_ERROR_VAL); + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, TLS_ERROR_VAL); + + return --pTlsEntry->refCount; +} + + +static ThreadEntry * +_tlsThreadEntryGet() +{ + ThreadEntry *pThreadEntry; + + if (_tlsIsIsr()) + { + pThreadEntry = _tlsIsrEntriesFind((NvU64)(NvUPtr)&pThreadEntry); + } + else + { + NvU64 threadId = portThreadGetCurrentThreadId(); + portSyncSpinlockAcquire(tlsDatabase.pLock); + pThreadEntry = mapFind(&tlsDatabase.threadEntries, threadId); + portSyncSpinlockRelease(tlsDatabase.pLock); + } + return pThreadEntry; +} + + +static ThreadEntry * +_tlsThreadEntryGetOrAlloc() +{ + ThreadEntry* pThreadEntry = NULL; + + pThreadEntry = _tlsThreadEntryGet(); + if (pThreadEntry == NULL) // Only non-ISRs can be missing + { + NV_ASSERT(portMemExSafeForNonPagedAlloc()); + pThreadEntry = PORT_ALLOC(tlsDatabase.pAllocator, sizeof(*pThreadEntry)); + if (pThreadEntry != NULL) + { + pThreadEntry->key.threadId = portThreadGetCurrentThreadId(); + mapInitIntrusive(&pThreadEntry->map); + portSyncSpinlockAcquire(tlsDatabase.pLock); + mapInsertExisting(&tlsDatabase.threadEntries, + pThreadEntry->key.threadId, + pThreadEntry); + portSyncSpinlockRelease(tlsDatabase.pLock); + } + } + + return pThreadEntry; +} + +static NvP64* +_tlsEntryAcquire +( + ThreadEntry *pThreadEntry, + NvU64 entryId, + PORT_MEM_ALLOCATOR *pCustomAllocator +) +{ + TlsEntry *pTlsEntry; + PORT_MEM_ALLOCATOR *pAllocator; + + pAllocator = (pCustomAllocator != NULL) ? pCustomAllocator : _tlsAllocatorGet(); + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + if (pTlsEntry != NULL) + { + pTlsEntry->refCount++; + } + else + { + pTlsEntry = PORT_ALLOC(pAllocator, sizeof(*pTlsEntry)); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, NULL); + mapInsertExisting(&pThreadEntry->map, entryId, pTlsEntry); + + pTlsEntry->refCount = 1; + pTlsEntry->pUserData = NvP64_NULL; + } + return &pTlsEntry->pUserData; +} + +static NvU32 +_tlsEntryRelease +( + ThreadEntry* pThreadEntry, + TlsEntry *pTlsEntry, + PORT_MEM_ALLOCATOR *pCustomAllocator +) +{ + NvU32 refCount; + PORT_MEM_ALLOCATOR *pAllocator; + pAllocator = (pCustomAllocator != NULL) ? pCustomAllocator : _tlsAllocatorGet(); + + refCount = --pTlsEntry->refCount; + if (refCount == 0) + { + mapRemove(&pThreadEntry->map, pTlsEntry); + PORT_FREE(pAllocator, pTlsEntry); + // Only non ISR Thread Entry can be deallocated. + if (!_tlsIsIsr() && (mapCount(&pThreadEntry->map) == 0)) + { + NV_ASSERT(portMemExSafeForNonPagedAlloc()); + mapDestroy(&pThreadEntry->map); + portSyncSpinlockAcquire(tlsDatabase.pLock); + mapRemove(&tlsDatabase.threadEntries, pThreadEntry); + portSyncSpinlockRelease(tlsDatabase.pLock); + PORT_FREE(tlsDatabase.pAllocator, pThreadEntry); + } + } + return refCount; +} + +static PORT_MEM_ALLOCATOR *_tlsIsrAllocatorGet(void) +{ + ThreadEntry *pThreadEntry; + + if (!_tlsIsIsr()) { return NULL; } + pThreadEntry = _tlsThreadEntryGet(); + + return (PORT_MEM_ALLOCATOR*)(NvUPtr)pThreadEntry->key.sp; +} + +static PORT_MEM_ALLOCATOR *_tlsAllocatorGet(void) +{ + PORT_MEM_ALLOCATOR *pIsrAllocator = _tlsIsrAllocatorGet(); + return (pIsrAllocator == NULL) ? tlsDatabase.pAllocator : pIsrAllocator; +} + +#if TLS_ISR_CAN_USE_LOCK + +static NV_STATUS _tlsIsrEntriesInit() +{ + tlsDatabase.pIsrLock = portSyncSpinlockCreate(tlsDatabase.pAllocator); + if (tlsDatabase.pLock == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + mapInitIntrusive(&tlsDatabase.isrEntries); + return NV_OK; +} +static void _tlsIsrEntriesDestroy() +{ + if (tlsDatabase.pIsrLock) + portSyncSpinlockDestroy(tlsDatabase.pIsrLock); + mapDestroy(&tlsDatabase.isrEntries); +} +static void _tlsIsrEntriesInsert(ThreadEntry *pThreadEntry) +{ + portSyncSpinlockAcquire(tlsDatabase.pIsrLock); + mapInsertExisting(&tlsDatabase.isrEntries, pThreadEntry->key.sp, pThreadEntry); + portSyncSpinlockRelease(tlsDatabase.pIsrLock); +} +static ThreadEntry *_tlsIsrEntriesRemove(NvU64 sp) +{ + ThreadEntry *pThreadEntry; + portSyncSpinlockAcquire(tlsDatabase.pIsrLock); + pThreadEntry = mapFind(&tlsDatabase.isrEntries, sp); + mapRemove(&tlsDatabase.isrEntries, pThreadEntry); + portSyncSpinlockRelease(tlsDatabase.pIsrLock); + return pThreadEntry; +} +static ThreadEntry *_tlsIsrEntriesFind(NvU64 approxSp) +{ + ThreadEntry *pThreadEntry; + portSyncSpinlockAcquire(tlsDatabase.pIsrLock); +#if STACK_GROWS_DOWNWARD + pThreadEntry = mapFindGEQ(&tlsDatabase.isrEntries, approxSp); +#else + pThreadEntry = mapFindLEQ(&tlsDatabase.isrEntries, approxSp); +#endif + portSyncSpinlockRelease(tlsDatabase.pIsrLock); + return pThreadEntry; +} + +#else // Lockless + +static NV_STATUS _tlsIsrEntriesInit() +{ + portMemSet(tlsDatabase.isrEntries, 0, sizeof(tlsDatabase.isrEntries)); + return NV_OK; +} +static void _tlsIsrEntriesDestroy() +{ + portMemSet(tlsDatabase.isrEntries, 0, sizeof(tlsDatabase.isrEntries)); +} +static void _tlsIsrEntriesInsert(ThreadEntry *pThreadEntry) +{ + NvU32 i = 0; + + while (!portAtomicExCompareAndSwapU64(&tlsDatabase.isrEntries[i].sp, + pThreadEntry->key.sp, 0)) + { + i = (i + 1) % TLS_MAX_ISRS; + } + tlsDatabase.isrEntries[i].pThreadEntry = pThreadEntry; +} +static ThreadEntry *_tlsIsrEntriesRemove(NvU64 sp) +{ + ThreadEntry *pThreadEntry; + NvU32 i = 0; + + while (tlsDatabase.isrEntries[i].sp != sp) + { + i++; + } + pThreadEntry = tlsDatabase.isrEntries[i].pThreadEntry; + portAtomicExSetU64(&tlsDatabase.isrEntries[i].sp, 0); + + return pThreadEntry; +} +static ThreadEntry *_tlsIsrEntriesFind(NvU64 approxSp) +{ + NvU32 i; + NvU32 closestIdx = ~0x0; + NvU64 closestSp = STACK_GROWS_DOWNWARD ? ~0ULL : 0; + + for (i = 0; i < TLS_MAX_ISRS; i++) + { + NvU64 sp = tlsDatabase.isrEntries[i].sp; +#if STACK_GROWS_DOWNWARD + if (sp != 0 && sp >= approxSp && sp < closestSp) +#else + if (sp != 0 && sp <= approxSp && sp > closestSp) +#endif + { + closestSp = sp; + closestIdx = i; + } + } + NV_ASSERT_OR_RETURN(closestIdx != ~0x0, NULL); + return tlsDatabase.isrEntries[closestIdx].pThreadEntry; +} + +#endif // TLS_ISR_CAN_USE_LOCK + + + +static NvBool _tlsIsIsr() +{ +#if defined (TLS_ISR_UNIT_TEST) + // In unit tests we simulate ISR tests in different ways, so tests define this + extern NvBool tlsTestIsIsr(void); + return tlsTestIsIsr(); +#elif TLS_THREADS_CAN_RAISE_IRQL + NvU64 preempt = portSyncExDisablePreemption(); + NvBool bIsIsr = (tlsDatabase.isrCount[osGetCurrentProcessorNumber()] > 0); + portSyncExRestorePreemption(preempt); + return bIsIsr; +#else // Usermode and most kernelmode platforms + return portUtilIsInterruptContext(); +#endif // TLS_ISR_UNIT_TEST +} diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/utils/nvassert.c b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/utils/nvassert.c new file mode 100644 index 0000000..dac8881 --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/utils/nvassert.c @@ -0,0 +1,422 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief UTIL module implementation implements helpter functions for + * + */ + +#include "nvport/nvport.h" +#include "utils/nvassert.h" + +#if defined(NVRM) && !defined(NVWATCH) +#include "containers/map.h" +#include "os/os.h" +#include "nvrm_registry.h" +#include "rmconfig.h" +#elif !defined(RMCFG_FEATURE_ENABLED) +#define RMCFG_FEATURE_x 0 +#endif + +#if NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE + +// Hook NV_ASSERT into RCDB. +#if NV_JOURNAL_ASSERT_ENABLE +#if NV_PRINTF_STRINGS_ALLOWED && NV_ASSERT_FAILED_USES_STRINGS +// /* FIXME: Add an rmconfig profile for dGPU OpenRM to enable this */ +#define NV_ASSERT_FAILED_BACKTRACE +static void nvAssertFailedBacktrace(NvU64 ip); +#define NV_JOURNAL_ASSERT_FAILURE(lineNum, ip) do {nvAssertFailedBacktrace(ip);} while(0) +#define NV_JOURNAL_ASSERT_FAILURE_STATUS(lineNum, ip, status) do {nvAssertFailedBacktrace(ip);} while(0) +#else +#define NV_JOURNAL_ASSERT_FAILURE(lineNum, ip) ((void)0) +#define NV_JOURNAL_ASSERT_FAILURE_STATUS(lineNum, ip, status) ((void)0) +#endif /* RMCFG_MODULE_RCDB */ +#else +#define NV_JOURNAL_ASSERT_FAILURE(lineNum, ip) ((void)0) +#define NV_JOURNAL_ASSERT_FAILURE_STATUS(lineNum, ip, status) ((void)0) +#endif /* NV_JOURNAL_ASSERT_ENABLE*/ + +#if defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) + +#if NV_JOURNAL_ASSERT_ENABLE +/* + * Helper function for NV_ASSERT_FAILED + */ +void +nvAssertFailed(void) +{ + NV_JOURNAL_ASSERT_FAILURE(NV_RM_ASSERT_UNKNOWN_LINE_NUM, portUtilGetReturnAddress()); +} + +void +nvAssertOkFailed(NvU32 status) +{ + NV_JOURNAL_ASSERT_FAILURE_STATUS(NV_RM_ASSERT_UNKNOWN_LINE_NUM, portUtilGetReturnAddress(), status); +} +#endif + +#else //defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) + +#if NV_ASSERT_FAILED_USES_STRINGS +#define NV_ASSERT_FAILED_PRINTF_FMT "%s @ %s:%d\n" +#define NV_ASSERT_FAILED_PRINTF_PARAM pszExpr, trimFN(pszFileName), lineNum +#else +#define NV_ASSERT_FAILED_PRINTF_FMT "0x%016llx\n" +#define NV_ASSERT_FAILED_PRINTF_PARAM ip +#endif + +#define NV_ASSERT_PRINTF(level, fmt, ...) NV_PRINTF_STRING \ + (NV_PRINTF_MODULE, level, NV_PRINTF_ADD_PREFIX(fmt), ##__VA_ARGS__) + +#define PATH_SEP '/' + +/* + * Trim path from source filename. + */ +#if NV_ASSERT_FAILED_USES_STRINGS +static const char *trimFN(const char *pszFileName) +{ + NvLength i; + + for (i = 0; pszFileName[i] != 0; i++) + ; + + for (; i > 0; i--) + { + if (pszFileName[i] == PATH_SEP) + return &pszFileName[i + 1]; + } + + return pszFileName; +} +#endif + +/* + * Helper function for NV_ASSERT_FAILED + */ +void +nvAssertFailed +( + NV_ASSERT_FAILED_FUNC_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(LEVEL_ERROR, "Assertion failed: " NV_ASSERT_FAILED_PRINTF_FMT, + NV_ASSERT_FAILED_PRINTF_PARAM); + NV_ASSERT_LOG(LEVEL_ERROR, "Assertion failed @ 0x%016x", ip); + NV_JOURNAL_ASSERT_FAILURE(lineNum, ip); +} + +/* + * Helper functions for NV_ASSERT_OK_FAILED + */ +void +nvAssertOkFailed +( + NvU32 status + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(LEVEL_ERROR, + "Assertion failed: %s (0x%08X) returned from " NV_ASSERT_FAILED_PRINTF_FMT, + nvAssertStatusToString(status), status, NV_ASSERT_FAILED_PRINTF_PARAM); + NV_ASSERT_LOG(LEVEL_ERROR, "Assertion failed: 0x%08X returned from 0x%016llx", + status, ip); + NV_JOURNAL_ASSERT_FAILURE_STATUS(lineNum, ip, status); +} + +/* + * Helper function for NV_CHECK_FAILED + */ +void +nvCheckFailed +( + NvU32 level + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(level, "Check failed: " NV_ASSERT_FAILED_PRINTF_FMT, + NV_ASSERT_FAILED_PRINTF_PARAM); + NV_ASSERT_LOG(level, "Check failed @ 0x%016llx", ip); +} + +/* + * Helper function for NV_CHECK_OK_FAILED + */ +void +nvCheckOkFailed +( + NvU32 level, + NvU32 status + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(level, + "Check failed: %s (0x%08X) returned from " NV_ASSERT_FAILED_PRINTF_FMT, + nvAssertStatusToString(status), status, NV_ASSERT_FAILED_PRINTF_PARAM); + NV_ASSERT_LOG(level, "Check failed: 0x%08X returned from 0x%016llx", status, ip); +} + +/* + * Helper function for NV_ASSERT_FAILED + */ +void +nvAssertFailedNoLog +( + NV_ASSERT_FAILED_FUNC_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(LEVEL_ERROR, "Assertion failed: " NV_ASSERT_FAILED_PRINTF_FMT, + NV_ASSERT_FAILED_PRINTF_PARAM); + NV_JOURNAL_ASSERT_FAILURE(lineNum, ip); +} + +/* + * Helper function for NV_ASSERT_OK_FAILED + */ +void +nvAssertOkFailedNoLog +( + NvU32 status + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(LEVEL_ERROR, + "Assertion failed: %s (0x%08X) returned from " NV_ASSERT_FAILED_PRINTF_FMT, + nvAssertStatusToString(status), status, NV_ASSERT_FAILED_PRINTF_PARAM); + NV_JOURNAL_ASSERT_FAILURE_STATUS(lineNum, ip, status); +} + +/* + * Helper function for NV_CHECK_FAILED + */ +void +nvCheckFailedNoLog +( + NvU32 level + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(level, "Check failed: " NV_ASSERT_FAILED_PRINTF_FMT, + NV_ASSERT_FAILED_PRINTF_PARAM); +} + +/* + * Helper function for NV_CHECK_OK_FAILED + */ +void +nvCheckOkFailedNoLog +( + NvU32 level, + NvU32 status + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(level, + "Check failed: %s (0x%08X) returned from " NV_ASSERT_FAILED_PRINTF_FMT, + nvAssertStatusToString(status), status, NV_ASSERT_FAILED_PRINTF_PARAM); +} + +#endif // defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) +#endif // NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE + +/* + * Temporarily duplicate the nvstatusToString code to nvAssertStatusToString. + * + * Ideally, nvassert.c and nvstatus.c should both be included in shared.nvmk. + * But nvstatus.c is already directly included in projects from multiple module + * branches that also include shared.nvmk. It is going to take some serious + * cross-module magic to move it. + */ + +#if !defined(NV_PRINTF_STRING_SECTION) +#if defined(NVRM) && NVCPU_IS_RISCV64 +#define NV_PRINTF_STRING_SECTION __attribute__ ((section (".logging"))) +#else // defined(NVRM) && NVCPU_IS_RISCV64 +#define NV_PRINTF_STRING_SECTION +#endif // defined(NVRM) && NVCPU_IS_RISCV64 +#endif // !defined(NV_PRINTF_STRING_SECTION) + +#undef NV_STATUS_CODE +#undef SDK_NVSTATUSCODES_H +#define NV_STATUS_CODE( name, code, string ) static NV_PRINTF_STRING_SECTION \ + const char rm_pvt_##name##_str[] = string " [" #name "]"; +#include "nvstatuscodes.h" + +#undef NV_STATUS_CODE +#undef SDK_NVSTATUSCODES_H +#define NV_STATUS_CODE( name, code, string ) { name, rm_pvt_##name##_str }, +static struct NvStatusCodeString +{ + NV_STATUS statusCode; + const char *statusString; +} g_StatusCodeList[] = { + #include "nvstatuscodes.h" + { 0xffffffff, "Unknown error code!" } // Some compilers don't like the trailing ',' +}; +#undef NV_STATUS_CODE + +/*! + * @brief Given an NV_STATUS code, returns the corresponding status string. + * + * @param[in] nvStatusIn NV_STATUS code for which the string is required + * + * @returns Corresponding status string from the nvstatuscodes.h + * + * TODO: Bug 200025711: convert this to an array-indexed lookup, instead of a linear search + * +*/ +const char *nvAssertStatusToString(NV_STATUS nvStatusIn) +{ + static NV_PRINTF_STRING_SECTION const char rm_pvt_UNKNOWN_str[] = "Unknown error code!"; + NvU32 i; + NvU32 n = ((NvU32)(sizeof(g_StatusCodeList))/(NvU32)(sizeof(g_StatusCodeList[0]))); + for (i = 0; i < n; i++) + { + if (g_StatusCodeList[i].statusCode == nvStatusIn) + { + return g_StatusCodeList[i].statusString; + } + } + + return rm_pvt_UNKNOWN_str; +} + +#if defined(NV_ASSERT_FAILED_BACKTRACE) +MAKE_MAP(AssertedIPMap, NvU8); + +static struct +{ + AssertedIPMap map; + NvU32 mode; + PORT_MUTEX *mtx; + NvBool init; + OS_THREAD_HANDLE tid; +} osAssertInternal; + +void nvAssertInit(void) +{ + if (osAssertInternal.init) + return; + + osAssertInternal.mode = NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE; + + // Map is not thread-safe and osAssertFailed can be called concurrently. + osReadRegistryDword(NULL, NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE, &osAssertInternal.mode); + if (osAssertInternal.mode == NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE) + { + osAssertInternal.mtx = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + if (!osAssertInternal.mtx) + { + osAssertInternal.mode = NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_DISABLE; + } + else + { + mapInit(&osAssertInternal.map, portMemAllocatorGetGlobalNonPaged()); + } + } + osAssertInternal.init = NV_TRUE; +} + +static void nvAssertFailedBacktrace(NvU64 ip) +{ + if (!osAssertInternal.init) + return; + + if (osAssertInternal.mode == NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE) + { + OS_THREAD_HANDLE tid; + if (osGetCurrentThread(&tid) != NV_OK) + return; + + // nvport mutex is not reentrant and will deadlock with nested locking. + // If the next condition holds, we're in a reentrant call. + if (tid == osAssertInternal.tid) + return; + + portSyncMutexAcquire(osAssertInternal.mtx); + osAssertInternal.tid = tid; + + if (!mapFind(&osAssertInternal.map, ip)) + { + // If we're out of memory, do not dump anything to avoid spam + if (mapInsertNew(&osAssertInternal.map, ip)) + osAssertFailed(); + } + + osAssertInternal.tid = 0; + portSyncMutexRelease(osAssertInternal.mtx); + } + else if (osAssertInternal.mode == NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_ENABLE) + osAssertFailed(); +} + +void nvAssertDestroy(void) +{ + if (!osAssertInternal.init) + return; + + if (osAssertInternal.mode == NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE && osAssertInternal.mtx) + { + portSyncMutexDestroy(osAssertInternal.mtx); + mapDestroy(&osAssertInternal.map); + } + osAssertInternal.init = 0; +} + +#elif defined(NVRM) && !defined(NVWATCH) // ignore in nvlog_decoder/nvwatch build + +// We do not expose NV_ASSERT_FAILED_BACKTRACE outside this file. The callers will use these stubs. +void nvAssertInit(void) +{ +} + +void nvAssertDestroy(void) +{ +} +#endif /* defined(NV_ASSERT_FAILED_BACKTRACE) */ diff --git a/NVIDIA-kernel-module-source-TempVersion/src/nvidia/srcs.mk b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/srcs.mk new file mode 100644 index 0000000..babff8e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/src/nvidia/srcs.mk @@ -0,0 +1,190 @@ +SRCS ?= +SRCS_CXX ?= + +SRCS += generated/g_binary_api_nvoc.c +SRCS += generated/g_chips2halspec_nvoc.c +SRCS += generated/g_client_nvoc.c +SRCS += generated/g_client_resource_nvoc.c +SRCS += generated/g_context_dma_nvoc.c +SRCS += generated/g_dce_client_nvoc.c +SRCS += generated/g_device_nvoc.c +SRCS += generated/g_disp_capabilities_nvoc.c +SRCS += generated/g_disp_channel_nvoc.c +SRCS += generated/g_disp_inst_mem_nvoc.c +SRCS += generated/g_disp_objs_nvoc.c +SRCS += generated/g_disp_sf_user_nvoc.c +SRCS += generated/g_eng_state_nvoc.c +SRCS += generated/g_event_buffer_nvoc.c +SRCS += generated/g_event_nvoc.c +SRCS += generated/g_generic_engine_nvoc.c +SRCS += generated/g_gpu_class_list.c +SRCS += generated/g_gpu_db_nvoc.c +SRCS += generated/g_gpu_group_nvoc.c +SRCS += generated/g_gpu_halspec_nvoc.c +SRCS += generated/g_gpu_mgmt_api_nvoc.c +SRCS += generated/g_gpu_mgr_nvoc.c +SRCS += generated/g_gpu_nvoc.c +SRCS += generated/g_gpu_resource_nvoc.c +SRCS += generated/g_hal_mgr_nvoc.c +SRCS += generated/g_hal_nvoc.c +SRCS += generated/g_hda_codec_api_nvoc.c +SRCS += generated/g_io_vaspace_nvoc.c +SRCS += generated/g_kern_disp_nvoc.c +SRCS += generated/g_kernel_head_nvoc.c +SRCS += generated/g_mem_mgr_nvoc.c +SRCS += generated/g_mem_nvoc.c +SRCS += generated/g_object_nvoc.c +SRCS += generated/g_objtmr_nvoc.c +SRCS += generated/g_os_desc_mem_nvoc.c +SRCS += generated/g_os_nvoc.c +SRCS += generated/g_prereq_tracker_nvoc.c +SRCS += generated/g_resource_nvoc.c +SRCS += generated/g_rmconfig_util.c +SRCS += generated/g_rs_client_nvoc.c +SRCS += generated/g_rs_resource_nvoc.c +SRCS += generated/g_rs_server_nvoc.c +SRCS += generated/g_standard_mem_nvoc.c +SRCS += generated/g_subdevice_nvoc.c +SRCS += generated/g_syncpoint_mem_nvoc.c +SRCS += generated/g_system_mem_nvoc.c +SRCS += generated/g_system_nvoc.c +SRCS += generated/g_tmr_nvoc.c +SRCS += generated/g_traceable_nvoc.c +SRCS += generated/g_vaspace_nvoc.c +SRCS += generated/g_virt_mem_mgr_nvoc.c +SRCS += ../common/shared/nvstatus/nvstatus.c +SRCS += arch/nvalloc/unix/src/escape.c +SRCS += arch/nvalloc/unix/src/exports-stubs.c +SRCS += arch/nvalloc/unix/src/gcc_helper.c +SRCS += arch/nvalloc/unix/src/os-hypervisor-stubs.c +SRCS += arch/nvalloc/unix/src/os.c +SRCS += arch/nvalloc/unix/src/osapi.c +SRCS += arch/nvalloc/unix/src/osinit.c +SRCS += arch/nvalloc/unix/src/osmemdesc.c +SRCS += arch/nvalloc/unix/src/osunix.c +SRCS += arch/nvalloc/unix/src/power-management-tegra.c +SRCS += arch/nvalloc/unix/src/registry.c +SRCS += arch/nvalloc/unix/src/rmobjexportimport.c +SRCS += interface/deprecated/rmapi_deprecated_utils.c +SRCS += src/kernel/core/hal/hal.c +SRCS += src/kernel/core/hal/hals_all.c +SRCS += src/kernel/core/hal/info_block.c +SRCS += src/kernel/core/hal_mgr.c +SRCS += src/kernel/core/locks_common.c +SRCS += src/kernel/core/locks_minimal.c +SRCS += src/kernel/core/system.c +SRCS += src/kernel/core/thread_state.c +SRCS += src/kernel/diagnostics/nvlog.c +SRCS += src/kernel/diagnostics/nvlog_printf.c +SRCS += src/kernel/diagnostics/profiler.c +SRCS += src/kernel/gpu/arch/t23x/kern_gpu_t234d.c +SRCS += src/kernel/gpu/audio/hda_codec_api.c +SRCS += src/kernel/gpu/dce_client/dce_client.c +SRCS += src/kernel/gpu/dce_client/dce_client_rpc.c +SRCS += src/kernel/gpu/device.c +SRCS += src/kernel/gpu/device_ctrl.c +SRCS += src/kernel/gpu/device_share.c +SRCS += src/kernel/gpu/disp/arch/v03/kern_disp_0300.c +SRCS += src/kernel/gpu/disp/arch/v04/kern_disp_0402.c +SRCS += src/kernel/gpu/disp/disp_capabilities.c +SRCS += src/kernel/gpu/disp/disp_channel.c +SRCS += src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c +SRCS += src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c +SRCS += src/kernel/gpu/disp/disp_objs.c +SRCS += src/kernel/gpu/disp/disp_sf_user.c +SRCS += src/kernel/gpu/disp/head/kernel_head.c +SRCS += src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c +SRCS += src/kernel/gpu/disp/inst_mem/disp_inst_mem.c +SRCS += src/kernel/gpu/disp/kern_disp.c +SRCS += src/kernel/gpu/eng_state.c +SRCS += src/kernel/gpu/gpu.c +SRCS += src/kernel/gpu/gpu_access.c +SRCS += src/kernel/gpu/gpu_device_mapping.c +SRCS += src/kernel/gpu/gpu_gspclient.c +SRCS += src/kernel/gpu/gpu_resource.c +SRCS += src/kernel/gpu/gpu_resource_desc.c +SRCS += src/kernel/gpu/gpu_rmapi.c +SRCS += src/kernel/gpu/gpu_t234d_kernel.c +SRCS += src/kernel/gpu/gpu_timeout.c +SRCS += src/kernel/gpu/gpu_uuid.c +SRCS += src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c +SRCS += src/kernel/gpu/mem_mgr/context_dma.c +SRCS += src/kernel/gpu/mem_mgr/mem_desc.c +SRCS += src/kernel/gpu/mem_mgr/mem_utils.c +SRCS += src/kernel/gpu/subdevice/generic_engine.c +SRCS += src/kernel/gpu/subdevice/subdevice.c +SRCS += src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c +SRCS += src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c +SRCS += src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c +SRCS += src/kernel/gpu/timer/timer.c +SRCS += src/kernel/gpu/timer/timer_ostimer.c +SRCS += src/kernel/gpu_mgr/gpu_db.c +SRCS += src/kernel/gpu_mgr/gpu_group.c +SRCS += src/kernel/gpu_mgr/gpu_mgmt_api.c +SRCS += src/kernel/gpu_mgr/gpu_mgr.c +SRCS += src/kernel/mem_mgr/io_vaspace.c +SRCS += src/kernel/mem_mgr/mem.c +SRCS += src/kernel/mem_mgr/os_desc_mem.c +SRCS += src/kernel/mem_mgr/standard_mem.c +SRCS += src/kernel/mem_mgr/syncpoint_mem.c +SRCS += src/kernel/mem_mgr/system_mem.c +SRCS += src/kernel/mem_mgr/vaspace.c +SRCS += src/kernel/mem_mgr/virt_mem_mgr.c +SRCS += src/kernel/os/os_init.c +SRCS += src/kernel/os/os_sanity.c +SRCS += src/kernel/os/os_stubs.c +SRCS += src/kernel/os/os_timer.c +SRCS += src/kernel/rmapi/alloc_free.c +SRCS += src/kernel/rmapi/binary_api.c +SRCS += src/kernel/rmapi/client.c +SRCS += src/kernel/rmapi/client_resource.c +SRCS += src/kernel/rmapi/control.c +SRCS += src/kernel/rmapi/deprecated_context.c +SRCS += src/kernel/rmapi/entry_points.c +SRCS += src/kernel/rmapi/event.c +SRCS += src/kernel/rmapi/event_buffer.c +SRCS += src/kernel/rmapi/event_notification.c +SRCS += src/kernel/rmapi/mapping.c +SRCS += src/kernel/rmapi/mapping_cpu.c +SRCS += src/kernel/rmapi/param_copy.c +SRCS += src/kernel/rmapi/resource.c +SRCS += src/kernel/rmapi/resource_desc.c +SRCS += src/kernel/rmapi/rmapi.c +SRCS += src/kernel/rmapi/rmapi_cache.c +SRCS += src/kernel/rmapi/rmapi_stubs.c +SRCS += src/kernel/rmapi/rmapi_utils.c +SRCS += src/kernel/rmapi/rpc_common.c +SRCS += src/kernel/rmapi/rs_utils.c +SRCS += src/kernel/rmapi/sharing.c +SRCS += src/lib/base_utils.c +SRCS += src/lib/zlib/inflate.c +SRCS += src/libraries/containers/btree/btree.c +SRCS += src/libraries/containers/eheap/eheap_old.c +SRCS += src/libraries/containers/list.c +SRCS += src/libraries/containers/map.c +SRCS += src/libraries/containers/multimap.c +SRCS += src/libraries/containers/queue.c +SRCS += src/libraries/eventbuffer/eventbufferproducer.c +SRCS += src/libraries/ioaccess/ioaccess.c +SRCS += src/libraries/nvbitvector/nvbitvector.c +SRCS += src/libraries/nvoc/src/runtime.c +SRCS += src/libraries/nvport/core/core.c +SRCS += src/libraries/nvport/cpu/cpu_common.c +SRCS += src/libraries/nvport/crypto/crypto_random_xorshift.c +SRCS += src/libraries/nvport/memory/memory_tracking.c +SRCS += src/libraries/nvport/memory/memory_unix_kernel_os.c +SRCS += src/libraries/nvport/string/string_generic.c +SRCS += src/libraries/nvport/sync/sync_rwlock.c +SRCS += src/libraries/nvport/sync/sync_unix_kernel_os.c +SRCS += src/libraries/nvport/thread/thread_unix_kernel_os.c +SRCS += src/libraries/nvport/util/util_compiler_switch.c +SRCS += src/libraries/nvport/util/util_unix_kernel_os.c +SRCS += src/libraries/prereq_tracker/prereq_tracker.c +SRCS += src/libraries/resserv/src/rs_access_map.c +SRCS += src/libraries/resserv/src/rs_access_rights.c +SRCS += src/libraries/resserv/src/rs_client.c +SRCS += src/libraries/resserv/src/rs_domain.c +SRCS += src/libraries/resserv/src/rs_resource.c +SRCS += src/libraries/resserv/src/rs_server.c +SRCS += src/libraries/tls/tls.c +SRCS += src/libraries/utils/nvassert.c diff --git a/NVIDIA-kernel-module-source-TempVersion/utils.mk b/NVIDIA-kernel-module-source-TempVersion/utils.mk new file mode 100644 index 0000000..d09807e --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/utils.mk @@ -0,0 +1,558 @@ +# +# Copyright (C) 2008 NVIDIA Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +# +# utils.mk: common Makefile fragment used by nvidia-xconfig, +# nvidia-installer, and nvidia-settings +# + + + +############################################################################## +# The calling Makefile (when building as part of the NVIDIA graphics +# driver) may export any of the following variables; we assign default +# values if they are not exported by the caller +############################################################################## + +CC ?= gcc +CXX ?= g++ +LD ?= ld +AR ?= ar +# only set these warnings if CFLAGS is unset +CFLAGS ?= -Wall +# always set these -f CFLAGS +CFLAGS += -fno-strict-aliasing -fno-omit-frame-pointer -Wformat=2 +CC_ONLY_CFLAGS ?= +CXX_ONLY_CFLAGS ?= +LDFLAGS ?= +BIN_LDFLAGS ?= + +STACK_USAGE_WARNING ?= +CFLAGS += $(if $(STACK_USAGE_WARNING),-Wstack-usage=$(STACK_USAGE_WARNING)) + +HOST_CC ?= $(CC) +HOST_LD ?= $(LD) +HOST_CFLAGS ?= $(CFLAGS) +HOST_CC_ONLY_CFLAGS ?= +HOST_CXX_ONLY_CFLAGS ?= +HOST_LDFLAGS ?= $(LDFLAGS) +HOST_BIN_LDFLAGS ?= + +# always disable warnings that will break the build +CC_ONLY_CFLAGS += -Wno-format-zero-length +CFLAGS += -Wno-unused-parameter +HOST_CC_ONLY_CFLAGS += -Wno-format-zero-length +HOST_CFLAGS += -Wno-unused-parameter + +# Treat warnings as errors, if requested +WARNINGS_AS_ERRORS ?= +CFLAGS += $(if $(WARNINGS_AS_ERRORS),-Werror) + +DEBUG ?= +DEVELOP ?= + +ifeq ($(DEBUG),1) + STRIP_CMD ?= true + DO_STRIP ?= + CFLAGS += -O0 -g + CFLAGS += -DDEBUG=1 +else + CFLAGS += -O2 +endif + +ifeq ($(DEVELOP),1) + STRIP_CMD ?= true + DO_STRIP ?= + CFLAGS += -DDEVELOP=1 +endif + +STRIP_CMD ?= strip +DO_STRIP ?= 1 + +INSTALL ?= install +INSTALL_BIN_ARGS ?= -m 755 +INSTALL_LIB_ARGS ?= -m 644 +INSTALL_DOC_ARGS ?= -m 644 + +M4 ?= m4 +SED ?= sed +M4 ?= m4 +ECHO ?= echo +PRINTF ?= printf +MKDIR ?= mkdir -p +RM ?= rm -f +TOUCH ?= touch +HARDLINK ?= ln -f +DATE ?= date +GZIP_CMD ?= gzip +CHMOD ?= chmod +OBJCOPY ?= objcopy +XZ ?= xz +WHOAMI ?= whoami +HOSTNAME ?= hostname + +NV_AUTO_DEPEND ?= 1 +NV_VERBOSE ?= 0 + +ifndef TARGET_OS + TARGET_OS := $(shell uname) +endif + +ifeq ($(TARGET_OS),Linux) + CFLAGS += -DNV_LINUX +endif + +ifeq ($(TARGET_OS),FreeBSD) + CFLAGS += -DNV_BSD +endif + +ifeq ($(TARGET_OS),SunOS) + CFLAGS += -DNV_SUNOS +endif + +ifndef TARGET_ARCH + TARGET_ARCH := $(shell uname -m) + TARGET_ARCH := $(subst i386,x86,$(TARGET_ARCH)) + TARGET_ARCH := $(subst i486,x86,$(TARGET_ARCH)) + TARGET_ARCH := $(subst i586,x86,$(TARGET_ARCH)) + TARGET_ARCH := $(subst i686,x86,$(TARGET_ARCH)) +endif + +ifeq ($(TARGET_ARCH),x86) + CFLAGS += -DNV_X86 -DNV_ARCH_BITS=32 +endif + +ifeq ($(TARGET_ARCH),x86_64) + CFLAGS += -DNV_X86_64 -DNV_ARCH_BITS=64 +endif + +ifeq ($(TARGET_ARCH),armv7l) + CFLAGS += -DNV_ARMV7 -DNV_ARCH_BITS=32 +endif + +ifeq ($(TARGET_ARCH),aarch64) + CFLAGS += -DNV_AARCH64 -DNV_ARCH_BITS=64 +endif + +ifeq ($(TARGET_ARCH),ppc64le) + CFLAGS += -DNV_PPC64LE -DNV_ARCH_BITS=64 +endif + +ifeq ($(TARGET_OS),Linux) + LIBDL_LIBS = -ldl +else + LIBDL_LIBS = +endif + +# This variable controls which floating-point ABI is targeted. For ARM, it +# defaults to "gnueabi" for softfp. Another option is "gnueabihf" for +# hard(fp). This is necessary to pick up the correct rtld_test binary. +# All other architectures default to empty. +ifeq ($(TARGET_ARCH),armv7l) + TARGET_ARCH_ABI ?= gnueabi +endif +TARGET_ARCH_ABI ?= + +ifeq ($(TARGET_ARCH_ABI),gnueabi) + CFLAGS += -DNV_GNUEABI +endif + +ifeq ($(TARGET_ARCH_ABI),gnueabihf) + CFLAGS += -DNV_GNUEABIHF +endif + +OUTPUTDIR ?= _out/$(TARGET_OS)_$(TARGET_ARCH) +OUTPUTDIR_ABSOLUTE ?= $(CURDIR)/$(OUTPUTDIR) + +NV_SEPARATE_DEBUG_INFO ?= +NV_KEEP_UNSTRIPPED_BINARIES ?= + +NV_QUIET_COMMAND_REMOVED_TARGET_PREFIX ?= + +NV_GENERATED_HEADERS ?= + +PCIACCESS_CFLAGS ?= +PCIACCESS_LDFLAGS ?= + +############################################################################## +# This makefile uses the $(eval) builtin function, which was added in +# GNU make 3.80. Check that the current make version recognizes it. +# Idea suggested by: http://www.jgc.org/blog/cookbook-sample.pdf +############################################################################## + +_eval_available := +$(eval _eval_available := T) + +ifneq ($(_eval_available),T) + $(error This Makefile requires a GNU Make that supports 'eval'. Please upgrade to GNU make 3.80 or later) +endif + + +############################################################################## +# Test passing $(1) to $(CC). If $(CC) succeeds, then echo $(1). +# +# Because this uses $(shell), it is best to use this to assign simply expanded +# variables (e.g., ":="). +# +# Example usage: +# CONDITIONAL_CFLAGS := $(call TEST_CC_ARG, -ffoo) +############################################################################## + +TEST_CC_ARG = \ + $(shell $(CC) -c -x c /dev/null -Werror $(1) -o /dev/null > /dev/null 2>&1 && \ + $(ECHO) $(1)) + + +############################################################################## +# define variables used when installing the open source utilities from +# the source tarball +############################################################################## + +PREFIX ?= /usr/local + +BINDIR = $(DESTDIR)$(PREFIX)/bin +LIBDIR = $(DESTDIR)$(PREFIX)/lib +MANDIR = $(DESTDIR)$(PREFIX)/share/man/man1 + + +############################################################################## +# default build rule, so that nothing here in utils.mk accidentally +# gets selected as the default rule +############################################################################## + +default build: all + + +############################################################################## +# get the definition of NVIDIA_VERSION from version.mk +# +# version.mk may be in one of two places: either in $(OUTPUTDIR) when +# building as part of the NVIDIA driver build, or directly in the +# source directory when building from the source tarball +# +# Throw an error if one of these two places did not define NVIDIA_VERSION. +############################################################################## + +VERSION_MK_DIR ?= . +VERSION_MK := $(wildcard $(OUTPUTDIR)/version.mk $(VERSION_MK_DIR)/version.mk ) +include $(VERSION_MK) + +ifndef NVIDIA_VERSION +$(error NVIDIA_VERSION undefined) +endif + +############################################################################## +# NV_GET_SOURCE_TYPE: if the source file $(1) should be compiled as C, this +# evalutes to "CC"; if the source file $(1) should be compiled as C++, this +# evalutes to "CXX". +############################################################################## + +NV_GET_SOURCE_TYPE = $(strip \ + $(if $(filter %.c, $(1)),CC, \ + $(if $(filter %.cpp, $(1)),CXX, \ + $(error Unrecognized source $(1))))) + + +############################################################################## +# Several of the functions below take an argument that indicates if +# the expression is for the target platform (the system the built +# program is going to run on) or the host platform (the system +# performing the build). The argument is either "HOST" or "TARGET" +# and needs to be converted: +# +# "HOST" -> "HOST_" +# "TARGET" -> "" +############################################################################## + +host_target = $(patsubst HOST,HOST_,$(patsubst TARGET,,$(1))) + + +############################################################################## +# To generate the dependency files: +# +# - Use the compiler's "-MMD" option to generate output of the form +# "foo.o : foo.c foo.h bar.h". +# +# - Also, "-MMD" will cause the compiler to name the target as if it were in the +# current directory ("foo.o: "); use -MT to rename the target in the output +# directory ("_out/Linux_x86/foo.o: ") so that the target actually applies to +# the object files produced in the build. +# +# - Use -MP to generate a phony target for each of those prerequisites (except +# the source file being compiled). E.g., +# "foo.o : foo.c foo.h bar.h +# foo.h: +# bar.h:" +# so that the makefile can handle incremental builds after a prerequisite has +# been deleted from source control. +# +# - Use sed to remove the source file from the list of prerequisties in the +# above, so that the makefile can handle increment builds after the source has +# moved from one directory to another. The DEFINE_OBJECT_RULE macro spells +# out the obj: src dependency, so we don't require it here. +############################################################################## + +ifeq ($(NV_AUTO_DEPEND),1) + AUTO_DEP_SUFFIX = -MMD -MF $$(@:.o=.d.to_be_processed) -MP -MT $$@ && \ + $$(SED) -e "1,3s| $$< | |" < $$(@:.o=.d.to_be_processed) > $$(@:.o=.d) +else + AUTO_DEP_SUFFIX = +endif + + +############################################################################## +# echo minimal compile information in the non-NV_VERBOSE case +# +# NV_MODULE_LOGGING_NAME can be set to prepend quiet build output with a +# label of which build component is being built +############################################################################## + +NV_MODULE_LOGGING_NAME ?= + +ifeq ($(NV_VERBOSE),0) + at_if_quiet := @ + quiet_cmd_no_at = $(PRINTF) \ + " $(if $(NV_MODULE_LOGGING_NAME),[ %-17.17s ],%s) $(quiet_$(1))\n" \ + "$(NV_MODULE_LOGGING_NAME)" && $($(1)) + quiet_cmd = @$(quiet_cmd_no_at) +else + at_if_quiet := + quiet_cmd_no_at = $($(1)) + quiet_cmd = $($(1)) +endif + +# define LINK and HOST_LINK to be the same as CC; this is so that, +# even though we use CC to link programs, we can have a different +# quiet rule that uses '$@' as it's arg, rather than '$<' +LINK = $(CC) +HOST_LINK = $(HOST_CC) + +# strip NV_QUIET_COMMAND_REMOVED_TARGET_PREFIX from the target string +define_quiet_cmd = $(1) $(patsubst $(NV_QUIET_COMMAND_REMOVED_TARGET_PREFIX)/%,%,$(2)) + +# define the quiet commands: +quiet_CC = $(call define_quiet_cmd,CC ,$<) +quiet_CXX = $(call define_quiet_cmd,CXX ,$<) +quiet_HOST_CC = $(call define_quiet_cmd,HOST_CC ,$<) +quiet_HOST_CXX = $(call define_quiet_cmd,HOST_CXX ,$<) +quiet_LINK = $(call define_quiet_cmd,LINK ,$@) +quiet_HOST_LINK = $(call define_quiet_cmd,HOST_LINK ,$@) +quiet_M4 = $(call define_quiet_cmd,M4 ,$<) +quiet_STRIP_CMD = $(call define_quiet_cmd,STRIP ,$@) +quiet_HARDLINK = $(call define_quiet_cmd,HARDLINK ,$@) +quiet_LD = $(call define_quiet_cmd,LD ,$@) +quiet_OBJCOPY = $(call define_quiet_cmd,OBJCOPY ,$@) +quiet_AR = $(call define_quiet_cmd,AR ,$@) +quiet_XZ = $(call define_quiet_cmd,XZ ,$@) + +############################################################################## +# Tell gmake to delete the target of a rule if it has changed and its +# commands exit with a nonzero exit status. +############################################################################## +.DELETE_ON_ERROR: + + +############################################################################## +# function to generate a list of object files from their corresponding +# source files using the specified path. The _WITH_DIR variant takes an +# output path as the second argument while the BUILD_OBJECT_LIST defaults +# to using the value of OUTPUTDIR as the output path. example usage: +# +# OBJS = $(call BUILD_OBJECT_LIST_WITH_DIR,$(SRC),$(DIR)) +############################################################################## + +BUILD_OBJECT_LIST_WITH_DIR = \ + $(addprefix $(2)/,$(notdir $(addsuffix .o,$(basename $(1))))) + +BUILD_OBJECT_LIST = \ + $(call BUILD_OBJECT_LIST_WITH_DIR,$(1),$(OUTPUTDIR)) + +$(call BUILD_OBJECT_LIST,nvpci-utils.c): CFLAGS += $(PCIACCESS_CFLAGS) + +############################################################################## +# function to generate a list of dependency files from their +# corresponding source files using the specified path. The _WITH_DIR +# variant takes an output path as the second argument while the +# BUILD_DEPENDENCY_LIST default to using the value of OUTPUTDIR as the +# output path. example usage: +# +# DEPS = $(call BUILD_DEPENDENCY_LIST_WITH_DIR,$(SRC),$(DIR)) +############################################################################## + +BUILD_DEPENDENCY_LIST_WITH_DIR = \ + $(addprefix $(2)/,$(notdir $(addsuffix .d,$(basename $(1))))) + +BUILD_DEPENDENCY_LIST = \ + $(call BUILD_DEPENDENCY_LIST_WITH_DIR,$(1),$(OUTPUTDIR)) + + +############################################################################## +# functions to define a rule to build an object file; the first +# argument for all functions is whether the rule is for the target or +# host platform ("HOST" or "TARGET"), the second argument for all +# functions is the source file to compile. +# +# An order-only dependency is added on any generated header files listed in +# $(NV_GENERATED_HEADERS), to ensure they're present before invoking the +# compiler. For incremental builds where the object file already exists, a +# real (not order-only) dependency will be created by automatic dependency +# tracking if needed. +# +# The _WITH_OBJECT_NAME and _WITH_DIR function name suffixes describe +# the third and possibly fourth arguments based on order. The +# _WITH_OBJECT_NAME argument is the object filename to produce while +# the _WITH_DIR argument is the destination path for the object file. +# +# Example usage: +# +# $(eval $(call DEFINE_OBJECT_RULE,TARGET,foo.c)) +# +# Note this also attempts to include the dependency file for this +# source file. +# +# The DEFINE_OBJECT_RULE is functionally equivalent to +# DEFINE_OBJECT_RULE_WITH_OBJECT_NAME, but infers the object file name +# from the source file name (this is normally what you want). +# +# Arguments: +# $(1) : HOST or TARGET +# $(2) : source file +# $(3) : object file +# $(4) : directory +############################################################################## + +define DEFINE_OBJECT_RULE_WITH_OBJECT_NAME_WITH_DIR + $(3): NV_SOURCE_TYPE = $$(call NV_GET_SOURCE_TYPE,$(2)) + + # obj: {HOST_,}CFLAGS += $$({HOST_,}{CC,CXX}_ONLY_CFLAGS) + $(3): $$(call host_target,$(1))CFLAGS += \ + $$($(call host_target,$(1))$$(NV_SOURCE_TYPE)_ONLY_CFLAGS) + + $(3): $(2) | $$(NV_GENERATED_HEADERS) + @$(MKDIR) $(4) + $$(call quiet_cmd,$(call host_target,$(1))$$(NV_SOURCE_TYPE)) \ + $$($(call host_target,$(1))CFLAGS) -c $$< -o $$@ \ + $(AUTO_DEP_SUFFIX) + + -include $$(call BUILD_DEPENDENCY_LIST_WITH_DIR,$(3),$(4)) + + # declare empty rule for generating dependency file; we generate the + # dependency files implicitly when compiling the source file (see + # AUTO_DEP_SUFFIX above), so we don't want gmake to spend time searching + # for an explicit rule to generate the dependency file + $$(call BUILD_DEPENDENCY_LIST_WITH_DIR,$(3),$(4)): ; + +endef + +define DEFINE_OBJECT_RULE_WITH_OBJECT_NAME + $$(eval $$(call DEFINE_OBJECT_RULE_WITH_OBJECT_NAME_WITH_DIR,$(1),$(2),\ + $(3),$(OUTPUTDIR))) +endef + +define DEFINE_OBJECT_RULE_WITH_DIR + $$(eval $$(call DEFINE_OBJECT_RULE_WITH_OBJECT_NAME_WITH_DIR,$(1),$(2),\ + $$(call BUILD_OBJECT_LIST_WITH_DIR,$(2),$(3)),$(3))) +endef + +define DEFINE_OBJECT_RULE + $$(eval $$(call DEFINE_OBJECT_RULE_WITH_DIR,$(1),$(2),$(OUTPUTDIR))) +endef + +# This is a function that will generate rules to build +# files with separate debug information, if so requested. +# +# It takes one parameter: (1) Name of unstripped binary +# +# When used, the target for linking should be named (1).unstripped +# +# If separate debug information is requested, it will +# generate a rule to build one from the unstripped binary. +# If requested, it will also retain the unstripped binary. +define DEBUG_INFO_RULES + $(1): $(1).unstripped + ifneq ($(or $(DO_STRIP),$(NV_SEPARATE_DEBUG_INFO)),) + $$(call quiet_cmd,STRIP_CMD) -o $$@ $$< + else + $$(call quiet_cmd,HARDLINK) $$^ $$@ + endif + ifeq ($(NV_SEPARATE_DEBUG_INFO),1) + $(1).debug: $(1).unstripped + $$(call quiet_cmd,STRIP_CMD) --only-keep-debug -o $$@ $$< + $(1): $(1).debug + endif + ifneq ($(NV_KEEP_UNSTRIPPED_BINARIES),1) + .INTERMEDIATE: $(1).unstripped + endif +endef + +############################################################################## +# Define rule for generating a source file containing identification information +# for the build. +# +# $(1) string name +# $(2) module name +# $(3) prerequisite object files +############################################################################## + +NVIDSTRING = $(OUTPUTDIR)/g_nvid_string.c + +ifeq ($(DEBUG),1) + NVIDSTRING_BUILD_TYPE_STRING = Debug Build +else + NVIDSTRING_BUILD_TYPE_STRING = Release Build +endif + +define GENERATE_NVIDSTRING + # g_nvid_string.c depends on all objects except g_nvid_string.o, and version.mk + $(NVIDSTRING): $$(filter-out $$(call BUILD_OBJECT_LIST,$$(NVIDSTRING)), $(3)) $$(VERSION_MK) + $(at_if_quiet)$$(MKDIR) $$(dir $$@) + $(at_if_quiet)$$(ECHO) "const char $(1)[] = \"nvidia id: NVIDIA $$(strip $(2)) for $$(TARGET_ARCH) $$(NVIDIA_VERSION) $$(NVIDSTRING_BUILD_TYPE_STRING) (`$$(WHOAMI)`@`$$(HOSTNAME)`) `$$(DATE)`\";" > $$@ + $(at_if_quiet)$$(ECHO) "const char *const p$$(strip $(1)) = $(1) + 11;" >> $$@; +endef + + +############################################################################## +# Define rules that can be used for embedding a file into an ELF object that +# contains the raw contents of that file and symbols pointing to the embedded +# data. +# +# Note that objcopy will name the symbols in the resulting object file based on +# the filename specified in $(1). For example, +# +# $(eval $(call $(READ_ONLY_OBJECT_FROM_FILE_RULE),a/b/c)) +# +# will create an object named $(OUTPUTDIR)/c.o with the symbols _binary_c_start, +# _binary_c_end, and _binary_c_size. +# +# Arguments: +# $(1): Path to the file to convert +############################################################################## + +define READ_ONLY_OBJECT_FROM_FILE_RULE + $$(OUTPUTDIR)/$$(notdir $(1)).o: $(1) + $(at_if_quiet)$$(MKDIR) $$(OUTPUTDIR) + $(at_if_quiet)cd $$(dir $(1)); \ + $$(call quiet_cmd_no_at,LD) -r -z noexecstack --format=binary \ + $$(notdir $(1)) -o $$(OUTPUTDIR_ABSOLUTE)/$$(notdir $$@) + $$(call quiet_cmd,OBJCOPY) \ + --rename-section .data=.rodata,contents,alloc,load,data,readonly \ + $$@ +endef diff --git a/NVIDIA-kernel-module-source-TempVersion/version.mk b/NVIDIA-kernel-module-source-TempVersion/version.mk new file mode 100644 index 0000000..c475d1d --- /dev/null +++ b/NVIDIA-kernel-module-source-TempVersion/version.mk @@ -0,0 +1,9 @@ +NVIDIA_VERSION = 35.4.1 + +# This file. +VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST)) +$(OUTPUTDIR)/version.h: $(VERSION_MK_FILE) + @$(MKDIR) $(OUTPUTDIR) + @$(ECHO) '#define NVIDIA_VERSION "$(NVIDIA_VERSION)"' > $@ + +NV_GENERATED_HEADERS += $(OUTPUTDIR)/version.h diff --git a/commitFile.txt b/commitFile.txt new file mode 100644 index 0000000..7b69548 --- /dev/null +++ b/commitFile.txt @@ -0,0 +1,1159 @@ +Updating prebuilts and/or headers + +c2e810fc3453d74ee0493168dbf7981ba482acd3 - NVIDIA-kernel-module-source-TempVersion/SECURITY.md +7d577fdb9594ae572ff38fdda682a4796ab832ca - NVIDIA-kernel-module-source-TempVersion/COPYING +12f1806bdc25917299525e0e48815306159de132 - NVIDIA-kernel-module-source-TempVersion/Makefile +60176067d89204db2a337983144481c56d94baf2 - NVIDIA-kernel-module-source-TempVersion/README.md +4f4410c3c8db46e5a98d7a35f7d909a49de6cb43 - NVIDIA-kernel-module-source-TempVersion/kernel-open/Makefile +90d4457b6fec29378645d5932ad82d706942f4a6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/conftest.sh +a0a15eb341be905ced2a09b8c4feb8bb43b4fb39 - NVIDIA-kernel-module-source-TempVersion/kernel-open/Kbuild +0b1508742a1c5a04b6c3a4be1b48b506f4180848 - NVIDIA-kernel-module-source-TempVersion/kernel-open/dkms.conf +1d17329caf26cdf931122b3c3b7edf4932f43c38 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-msi.h +88399279bd5e31b6e77cb32c7ef6220ce529526b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hypervisor.h +60ef64c0f15526ae2d786e5cec07f28570f0663b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/conftest.h +ea98628370602119afb1a065ff954784757ddb10 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_dsi_panel_props.h +c06b2748cd7c8f86b5864d5e9abe6ecf0ab622f0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hash.h +423282211355a8cb20bff268166885ac90e2986c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_interface.h +c75bfc368c6ce3fc2c1a0c5062834e90d822b365 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-memdbg.h +35da37c070544f565d0f1de82abc7569b5df06af - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_firmware_types.h +82940edf4650b9be67275d3a360ef4e63387a0a7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/cpuopsys.h +1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numbers.h +4b7414705ce10f0a1e312c36a43824b59d572661 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvmisc.h +e4a4f57abb8769d204468b2f5000c81f5ea7c92f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs.h +6337f595602bce9d76559de1be90553b52f405d8 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-proto.h +b249abc0a7d0c9889008e98cb2f8515a9d310b85 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvgputypes.h +e20882a9b14f2bf887e7465d3f238e5ac17bc2f5 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_speculation_barrier.h +5c4c05e5a638888babb5a8af2f0a61c94ecd150b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-format.h +b4c5d759f035b540648117b1bff6b1701476a398 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvCpuUuid.h +880e45b68b19fdb91ac94991f0e6d7fc3b406b1f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci-types.h +c45b2faf17ca2a205c56daa11e3cb9d864be2238 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-modeset-interface.h +349696856890bdbe76f457376648522b35f874ef - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvimpshared.h +003b2cbe3d82e467c09371aee86e48d65ae6c29b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numa.h +b642fb649ce2ba17f37c8aa73f61b38f99a74986 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-retpoline.h +1e7eec6561b04d2d21c3515987aaa116e9401c1f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kernel-interface-api.h +3b12d770f8592b94a8c7774c372e80ad08c5774c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvi2c.h +b02c378ac0521c380fc2403f0520949f785b1db6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-dmabuf.h +3100c536eb4c81ae913b92d4bc5905e752301311 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os-interface.h +143051f69a53db0e7c5d2f846a9c14d666e264b4 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kref.h +3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-gpu-info.h +7b2e2e6ff278acddc6980b330f68e374f38e0a6c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-timer.h +fdbaee144adb26c00776b802560e15f775ed5aef - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-mm.h +befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_stdarg.h +80fcb510fad25cb7a017139f487da1843b7cfcbd - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-lock.h +59d537c1d1b284a9d52277aff87c237e3ec2c99d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs-utils.h +e3362c33fe6c7cdec013eceac31e8f6f38dc465f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_types.h +5d8de06378994201e91c2179d149c0edcd694900 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatuscodes.h +95bf694a98ba78d5a19e66463b8adda631e6ce4c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatus.h +4750735d6f3b334499c81d499a06a654a052713d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-caps.h +009cd8e2b7ee8c0aeb05dac44cc84fc8f6f37c06 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-kapi.h +d721fca5f2317b9b325dedcbfba51c00d0b23648 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-linux.h +4b1a6c372a531b0d3e0a4e9815dde74cb222447c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/rm-gpu-ops.h +94ad0ba9fd6eb21445baec4fddd7c67a30cceefa - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci.h +f3e0f71abf34300d322e313adcd4fcbde9aa6f87 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kthread-q.h +256b5dc6f28738b3ce656c984f01d8f3e13e9faa - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pgprot.h +c57259130166701bf6d5e5bb1968397716d29fc0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-platform.h +84e9b6cba7ba26ef4032666f769c5b43fa510aad - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-list-helpers.h +df0420a5e3576e5a8b77a7bcefa6888ad62d6fd7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv.h +910255a4d92e002463175a28e38c3f24716fb654 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-api-types.h +42ece56d0459eb9f27b2497de48f08360c4f7f6b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvlimits.h +4a8b7f3cc65fa530670f510796bef51cf8c4bb6b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-register-module.h +5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/dce_rm_client_ipc.h +906329ae5773732896e6fe94948f7674d0b04c17 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_gpio.h +2f5fec803685c61c13f7955baaed056b5524652c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl.h +d25291d32caef187daf3589ce4976e4fa6bec70d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-time.h +8c041edbf4ed4fefdfd8006252cf542e34aa617b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvtypes.h +cda75171ca7d8bf920aab6d56ef9aadec16fd15d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os/nv_memory_type.h +2ea1436104463c5e3d177e8574c3b4298976d37e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms-ioctl.h +17855f638fd09abfec7d188e49b396793a9f6106 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms.h +c181ab9960b0c01a7672bc1fe1bc8870f1e8856d - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-linux.c +0b7e063481a0e195c6e91a4d3464c4792c684f03 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nv-kthread-q.c +07a2d5fa54ff88a0cb30c0945ef3c33ca630a490 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild +7d108165b4a7b6a44ac21460ea3bf4381fb48c5b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h +8bedc7374d7a43250e49fb09139c511b489d45e3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.h +9a0f445fda73c69e1bee7f6b121cbed33fcb01bf - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-mmap.c +c5cfba80ea122c9078f2d44f1538144747d7931b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv.c +95ae148b016e4111122c2d9f8f004b53e78998f3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-memdbg.c +24fd035338936c76fda8faeb0d8b1cd59875db92 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia.Kbuild +3ee953312a6a246d65520fc4a65407f448d1d2b8 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-gpio.c +cded6e9b6324fd429b865173596c8e549a682bba - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_uvm_interface.c +5f2e279a4abe0dabd478b1589be67df18de4b09d - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-i2c.c +c1ebcfec42f7898dd9d909eacd439d288b80523f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-mlock.c +d11ab03a617b29efcf00f85e24ebce60f91cf82c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-backlight.c +dc39c4ee87f4dc5f5ccc179a98e07ddb82bb8bce - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-modeset-interface.c +7b1bd10726481626dd51f4eebb693794561c20f6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-host1x.c +06e7ec77cd21c43f900984553a4960064753e444 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform-pm.c +d4f2cac6234e5ad337c254875a26d17372f28162 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-interface.c +e903f50b2624f33807214973558b9ff380bd68e0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform.c +805042e7cdb9663a0d3ca3064baeec8aa8eb3688 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.c +c7f1aaa6a5f3a3cdf1e5f80adf40b3c9f185fb94 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.c +0b0ec8d75dfece909db55136731196162c4152d5 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dmabuf.c +84d84563c003d3f568068e7322ce314387a6f579 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-caps.c +94c406f36836c3396b0ca08b4ff71496666b9c43 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-usermap.c +fbae5663e3c278d8206d07ec6446ca4c2781795f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.h +2c0d17f9babe897435c7dfa43adb96020f45da2b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dsi-parse-panel-props.c +9b701fe42a0e87d62c58b15c553086a608e89f7b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.h +0ce95e5ed52d6d6ca2bb6aac33ca8f197145ec45 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs-utils.c +cf90d9ea3abced81d182ab3c4161e1b5d3ad280d - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.h +02b1936dd9a9e30141245209d79b8304b7f12eb9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-cray.c +26c3971ea7afb4b7f237db9ab1c321c3de814518 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.c +218aac0c408be15523a2d0b70fdbdadd7e1a2e48 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-imp.c +6d4fbea733fdcd92fc6a8a5884e8bb359f9e8abd - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/rmp2pdefines.h +5ac10d9b20ccd37e1e24d4a81b8ac8f83db981e4 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vtophys.c +9999872b1513360d8ecf6c0894f81c63e7d435e9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dma.c +fc566df59becef7bc7511ae62a9a97b1532a5af2 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.c +b71bf4426322ab59e78e2a1500509a5f4b2b71ab - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.h +a3626bf1b80a81c14408c5181e8bd27696df2caf - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci.c +98c1be29932b843453567d4ada2f9912ea4523d7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vm.c +0b7e063481a0e195c6e91a4d3464c4792c684f03 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-kthread-q.c +61eadfa0f5b44a3d95e4d2d42d79321fc909c661 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-clk.c +4eee7319202366822e17d29ecec9f662c075e7ac - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.c +495bcdff3847ff67ba4bbf9af23729eb66eed487 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-acpi.c +64f1c96761f6d9e7e02ab049dd0c810196568036 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.c +d844fcaa5b02f1d1a753965a336287148b2ce689 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.h +dc165103f9196f5f9e97433ec32ef6dded86d4bb - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-registry.c +68d781e929d103e6fa55fa92b5d4f933fbfb6526 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.h +978d00b0d319c5ad5c0d3732b0e44f4ac0ac9a4c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_gpu_ops.h +fbfa2125b2bac1953af6d6fd99352898e516a686 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-msi.c +027fd0ab218eb98abe2b66d05f10b14ebb57e7a3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-nano-timer.c +07f95171c241880c472a630d1ee38fb222be4d59 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia-sources.Kbuild +a392fa800565c8345b07af5132db7078b914d59f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-pci.c +ee894ec530acbd765c04aec93c1c312d42210aeb - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ipc-soc.c +f179d308e984ff44a82f6e1c6007624f1ac916ba - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs.c +e2b0e4ef01bb28ff6dcc10cb44570e185ce82df0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-reg.h +7ac10bc4b3b1c5a261388c3f5f9ce0e9b35d7b44 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-usermap.c +d9221522e02e18b037b8929fbc075dc3c1e58654 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.c +8bedc7374d7a43250e49fb09139c511b489d45e3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.h +eca70b3b8146903ec678a60eebb0462e6ccf4569 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.h +3c9a013abdc787a1022b11099af4277c37cd666b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.Kbuild +e4bb0073eb9d6f965923bb9874e4714518850a27 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.h +99642b76e9a84b5a1d2e2f4a8c7fb7bcd77a44fd - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.h +8b2063f0cc2e328f4f986c2ce556cfb626c89810 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.c +6528efa1f8061678b8543c5c0be8761cab860858 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.h +ab63f2a971db8bf10585b1a05fe0e3ca180ad6c7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-os-interface.h +40b5613d1fbbe6b74bff67a5d07974ad321f75f0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.h +2911436a80d67074106c507871f4b480aa307237 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.c +8c95aa7ab01dd928974ce7880a532557209bd8e0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.h +fa8d8d10ae773bb7db3b3ce1df545de0e04c937e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.c +23586447526d9ffedd7878b6cf5ba00139fadb5e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h +cbcd6e13d84ea6b52db12eda98be38e321888eb0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.h +a7bc26c1078e95f9ff49c164f3652787adf1fef3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.c +bb1f2105d19b50634d46a92ade7fc5f709ec25d3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.c +c8982ace6fc79f75c092662902c0c61371195f0c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-linux.c +66b33e4ac9abe09835635f6776c1222deefad741 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.h +6d65ea9f067e09831a8196022bfe00a145bec270 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h +45ec9fd1abfe9a0c7f9ffaf665014cec89c9e7e6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.h +7129c765da5bfb77788441fed39b46dc7dc0fa8e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c +59bb05ef214b5c5f2fe3cf70142dabd47ea70650 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-ioctl.h +ef03d0ae581cc0326abe6054249791f8c0faa9a8 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.c +044071d60c8cc8ea66c6caaf1b70fe01c4081ad3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-conftest.h +708d02c8bcdfb12e4d55896e667821357c8251ec - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-priv.h +dc0fe38909e2f38e919495b7b4f21652a035a3ee - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.c +e4efab24f90d397c270568abb337ab815a447fec - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-fence-helper.h +b775af5899366845f9b87393d17a0ab0f1f6a725 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.c +1e05d0ff4e51a10fa3fcd6519dc915bf13aa69c0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.h +492a1b0b02dcd2d60f05ac670daeeddcaa4b0da5 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h +892cac6dd51ccfde68b3c29a5676504f93ee8cd7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.c +355126d65ea1472ce3b278066811d4fb764354ec - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c +5209eba37913f5d621a13091783622759706e6e3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.c +e362c64aa67b47becdbf5c8ba2a245e135adeedf - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c +9a882b31b2acc9e1ad3909c0061eee536e648aae - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.h +5008845a531207899830bcf4568c3463ad0ea6bc - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.c +97b6c56b1407de976898e0a8b5a8f38a5211f8bb - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.h +d862cc13c29bbce52f6b380b7a0a45a07fe9cbac - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.c +c294224282118c70cd546ae024a95479ad9b1de4 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h +d9221522e02e18b037b8929fbc075dc3c1e58654 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.c +bda08c8398f68ffc2866ebc390dc63a09a16b0b9 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/unix_rm_handle.c +e903bbbecf4fb3085aaccca0628f0a0e4aba3e58 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_mode_timings_utils.c +5ef40af650eb65b2c87572a1bbfe655d8821f2d5 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_memory_tracker.c +26f2a36442266c5d2664d509ecfd31094a83e152 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_vasprintf.c +9e008270f277e243f9167ab50401602378a2a6e8 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_vasprintf.h +8d9c4d69394b23d689a4aa6727eb3da1d383765a - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/unix_rm_handle.h +07c675d22c4f0f4be6647b65b6487e2d6927c347 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_memory_tracker.h +667b361db93e35d12d979c47e4d7a68be9aa93b6 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_mode_timings_utils.h +881cbcc7ed39ea9198279136205dbe40142be35e - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_assert.h +1c947cfc8a133b00727104684764e5bb900c9d28 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_mode_timings.h +83044eb5259200922f78ad3248fbc1d4de1ec098 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_common_utils.h +2476f128437c0520204e13a4ddd2239ff3f40c21 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv-float.h +a8e49041c1b95431e604852ad0fa3612548e3c82 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_dpy_id.h +e3be7ba45506c42d2fca87e9da45db75ced750ca - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_common.h +f669280a5e86ba51b691e2609fa7d8c223bd85dc - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C671.c +7c2fe72426fa304315e169e91dc6c1c58b5422fd - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_0073.c +381e1b8aeaa8bd586c51db1f9b37d3634285c16a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_class.h +67db549636b67a32d646fb7fc6c8db2f13689ecc - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9271.c +5e12a290fc91202e4ba9e823b6d8457594ed72d3 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h +d2c79c8a4e914519d653d1f14f706ec4a1f787e8 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9171.c +15d54c86d78404639c7f151adc672e19472dcf4a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.c +9be7b7be94a35d1d9a04f269ff560dbbb7860a2a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9571.c +54a1b5e5aaf0848a72befc896ed12f1de433ad4f - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9471.c +443c0a4b17a0019e4de3032c93c5cac258529f01 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_internal.h +e6d500269128cbd93790fe68fbcad5ba45c2ba7d - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C371.c +90e8ce7638a28cd781b5d30df565116dc1cea9e8 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.h +f75b1d98895bdccda0db2d8dd8feba53b88180c5 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid.h +ba9e382b24f57caa9dcf1c26a60b1f2070b1b9dd - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_displayid20.c +28d7b753825d5f4a9402aff14488c125453e95c5 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_tv.c +b4813a5e854e75fb38f460e0c27dca8e1ce8dc21 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edid.c +1290abde75d218ae24f930c3b011042a3f360c2e - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid20.h +4a2ad30f49ed92694b717a99ce7adeeb565e8a37 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_861.c +439ef00ffa340bd1b6506970d154a33ca4b64b4a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dmt.c +cfaa569ac3d63484c86e8a8d7a483dd849f96be8 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid20.c +1997adbf2f6f5be7eb6c7a88e6660391a85d891b - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_gtf.c +49df9034c1634d0a9588e5588efa832a71750a37 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_cvt.c +58b68f1272b069bb7819cbe86fd9e19d8acd0571 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/edid.h +890d8c2898a3277b0fed360301c2dc2688724f47 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_util.c +3023a58fd19d32280607d4027b09fe51fdb7a096 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.h +e66a20fc1579b0dd1392033089f97cf170e8cf10 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/dpsdp.h +b5bd3a58b499216e4fe0e0c9c99525b07ac237dc - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.c +f531475d8b978bca5b79d39d729b0c9986fe7b36 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming.h +95dae946088f21339299dae48eeafaab31b97b05 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming_pvt.h +0a04709ebdc4acb12038656c433e10c4e7096518 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid.c +1ff879eca2a273293b5cd6048419b2d2d8063b93 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mulAdd.c +1a86a6948bf6768bd23a19f1f05d40968c1d2b15 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_rem.c +c3ce12c227d25bc0de48fbcf914fc208e2448741 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sub.c +fb062ecbe62a1f5878fd47f0c61490f2bde279dd - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI32.c +38bd00e9c4d2f1354c611404cca6209a6c417669 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros64.c +0e9694d551848d88531f5461a9b3b91611652e9a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui32_r_minMag.c +9f4d355d85fbe998e243fe4c7bbf8ad23062b6e2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i64_to_f64.c +23b76c1d0be64e27a6f7e2ea7b8919f1a45a8e7c - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32_r_minMag.c +5c4ee32cc78efc718aaa60ec31d0b00b1bee3c2c - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui64_r_minMag.c +09cb0cdb90eb23b53cd9c1a76ba26021084710d1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF32.c +00c612847b3bd227a006a4a2697df85866b80315 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mulAddF32.c +29321080baa7eab86947ac825561fdcff54a0e43 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f32.c +2e0fec421f4defd293cf55c5f3af7d91f4b7d2cc - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui64_to_f32.c +ebb4f674b6213fec29761fc4e05c1e3ddeda6d17 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mulAdd.c +2e5c29d842a8ebc5fbf987068dc9394cee609cc7 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui64.c +daeb408588738b3eb4c8b092d7f92ac597cf1fc6 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_rem.c +da3b3f94a817909a3dc93ca5fa7675805c7979e0 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_isSignalingNaN.c +bc992c88f3de09e3a82447cf06dbde7c6604f7f8 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_f32.c +dafa667ee5dd52c97fc0c3b7144f6b619406c225 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mulAddF64.c +2960704c290f29aae36b8fe006884d5c4abcabb4 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_div.c +d4b26dc407a891e9ff5324853f1845a99c5d5cd2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32.c +0adfa7e174cdb488bb22b06642e14e7fc6f49c67 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI64.c +fd40a71c7ebf9d632a384fadf9487cfef4f3ea98 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_shiftRightJam128.c +9a5b93459ace2da23964da98617d6b18006fab86 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros8.c +ae25eea499b3ea5bdd96c905fd0542da11083048 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normRoundPackToF64.c +729e790328168c64d65a1355e990274c249bbb3a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32_r_minMag.c +296c40b0589536cb9af3231ad3dcd7f2baaa6887 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt.c +5c1026617c588bcf5f1e59230bd5bb900600b9ac - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mul.c +4b37be398b3e73ae59245f03b2ba2394fc902b4d - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normSubnormalF64Sig.c +69dc4cc63b2a9873a6eb636ee7cb704cbd502001 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui32.c +d0f8f08c225b60d88b6358d344404ba9df3038ec - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normSubnormalF32Sig.c +c951c9dffa123e4f77ed235eca49ef9b67f9f3d2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_subMagsF64.c +dde685423af544e5359efdb51b4bf9457c67fa3b - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sqrt.c +577821f706c7de4ca327c1e2fcc34161c96c89f3 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i64_r_minMag.c +5a5e0d9f1ee7e8c0d1d4f9fbcf6eba330a5f1792 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_isSignalingNaN.c +84b0a01ba2a667eb28b166d45bd91352ead83e69 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i64_to_f32.c +513a7d1c3053fc119efcd8ae1bcc9652edc45315 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt.c +4445b1fbbd507144f038fd939311ff95bc2cf5f1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui64_to_f64.c +b9fd15957f7ae5effeccb5d8adaa7434b43f44e1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI64.c +ab19c6b50c40b8089cb915226d4553d1aa902b0e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32_r_minMag.c +7bc81f5bc894118c08bfd52b59e010bc068ed762 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f32.c +7c8e5ab3f9bf6b2764ce5fffe80b2674be566a12 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/softfloat_state.c +ec1a797b11f6e846928a4a49a8756f288bda1dfa - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f64.c +86fdc2472526375539216461732d1db6a9f85b55 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF32.c +b22876b0695f58ee56143c9f461f1dde32fefbf3 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui64.c +d701741d8d6a92bb890e53deda1b795f5787f465 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le.c +baa7af4eea226140c26ffe6ab02a863d07f729fb - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq_signaling.c +ce37cdce572a3b02d42120e81c4969b39d1a67b6 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32.c +0108fe6f0d394ad72083aff9bb58507f97a0b669 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f64.c +b8c5ccc1e511637d8b2ba2657de4937b80c01c07 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le.c +54cbeb5872a86e822bda852ec15d3dcdad4511ce - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_add.c +c29536f617d71fe30accac44b2f1df61c98a97dc - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_div.c +871cb1a4037d7b4e73cb20ad18390736eea7ae36 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui64_r_minMag.c +21a6232d93734b01692689258a3fdfbbf4ff089d - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI32.c +760fd7c257a1f915b61a1089b2acb143c18a082e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF64.c +5e6f9e120a17cc73297a35e4d57e4b9cbce01780 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mul64To128.c +0bf499c0e3a54186fa32b38b310cc9d98ccdcfe3 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq.c +29396b7c23941024a59d5ea06698d2fbc7e1a6ca - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i64.c +108eec2abf1cddb397ce9f652465c2e52f7c143b - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_roundToInt.c +fe06512577e642b09196d46430d038d027491e9f - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq_signaling.c +d19ff7dfece53875f2d6c6f7dd9e7772f7b0b7ec - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i64_r_minMag.c +1484fc96d7731695bda674e99947280a86990997 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i64.c +8e58f0258218475616ff4e6317516d40ad475626 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt_quiet.c +6fa7493285fe2f7fdc0ac056a6367e90327905c2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sub.c +aaf6ccb77a1a89fa055a0fb63513297b35e2e54b - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le_quiet.c +bbc70102b30f152a560eb98e7a1a4b11b9ede85e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sqrt.c +e0ad81cfb5d2c0e74dc4ece9518ca15ffc77beaf - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_roundToInt.c +50b3147f8413f0595a4c3d6e6eeab84c1ffecada - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normRoundPackToF32.c +50daf9186bc5d0180d1453c957164b136d5ffc89 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq.c +6f83fa864007e8227ae09bb36a7fdc18832d4445 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mul.c +a94c8c2bd74633027e52e96f41d24714d8081eb4 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c +e7890082ce426d88b4ec93893da32e306478c0d1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt32_1.c +2db07bbb8242bc55a24ef483af6d648db0660de0 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_add.c +9266c83f3e50093cc45d7be6ab993a0e72af1685 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF64.c +00ab2120f71117161d4f6daaa9b90a3036a99841 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32.c +824383b03952c611154bea0a862da2b9e2a43827 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_subMagsF32.c +68843a93e1f46195243ef1164f611b759cf19d17 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le_quiet.c +e4930e155580a0f5aa7f3694a6205bc9aebfe7aa - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f64.c +054b23a974fc8d0bab232be433c4e516e6c1250a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt_quiet.c +0d8e42636a3409a647291fdb388001c2b11bba07 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f16.c +d9a86343e6cc75714f65f690082dd4b0ba724be9 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF16.c +1dd1b424087d9c872684df0c1b4063b077992d5f - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c +86cda6550cb02bbf595d1667573e4be83702a95e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/specialize.h +21a11759ed2afd746a47c4d78b67640c2d052165 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c +a6d5c83f6a0542b33ac9c23ac65ef69002cfff9d - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c +3d0dbc0a672d039a6346e1c21ddf87ffc9181978 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c +252c816378fddab616b1f2a61e9fedd549224483 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c +d8b0c55a49c4fa0b040541db6d5ff634d7d103e7 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c +d152bc457b655725185bdff42b36bb96d6e6715e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c +0cbae7a5abc336331d460cbd3640d2cda02af434 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c +1ded4df85ff5fa904fa54c27d681265425be1658 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitiveTypes.h +f36c896cfa01f1de9f9420189319e4e00c7fc52a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/internals.h +9645e179cf888bcd0e3836e8126b204b4b42b315 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat.h +de09949a0ca5cd2a84b882b5b5c874d01d3ae11a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitives.h +4cd1d6cfca3936a39aab9bc0eb622f5c7c848be1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat_types.h +b882497ae393bf66a728dae395b64ac53602a1a5 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/nv-softfloat.h +be9407a273620c0ba619b53ed72d59d52620c3e4 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/platform.h +91e9bc3214d6bb9b20bc8001d85fe8699df5184a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvos.h +88399279bd5e31b6e77cb32c7ef6220ce529526b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-hypervisor.h +f28f98589e65b71e47dbcb2c4230538ae0545e75 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/cpuopsys.h +4b7414705ce10f0a1e312c36a43824b59d572661 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvmisc.h +af0bc90b3ad4767de53b8ff91e246fdab0146e8b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvsecurityinfo.h +a506a41b8dcf657fb39a740ffc1dfd83835d6c89 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvcfg_sdk.h +b249abc0a7d0c9889008e98cb2f8515a9d310b85 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvgputypes.h +ae60d53603c7ddbbd72d4e16ce2951f3d42aed32 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nverror.h +a31b82c454df785a1d7893af38e83443cfe6f2fc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvdisptypes.h +ffa91e1110a5cc286ec44a7bda5461b2be941ea2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_vgpu_types.h +9bca638f5832d831880f090c583fac6fc8cf6ee6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/dpringbuffertypes.h +821a01976045d7c3d2ac35b0f115e90a9e95f8e8 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvimpshared.h +1e7eec6561b04d2d21c3515987aaa116e9401c1f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h +3b12d770f8592b94a8c7774c372e80ad08c5774c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvi2c.h +befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_stdarg.h +5d8de06378994201e91c2179d149c0edcd694900 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvstatuscodes.h +95bf694a98ba78d5a19e66463b8adda631e6ce4c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvstatus.h +50d31a6d133b0ea9230f9dc1b701ce16a88a7935 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/rs_access.h +eb42327a2b948b79edc04d9145c7aa5b2a2b420e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvlimits.h +9f2e225f027f5a04d1104d29a0039cd2bb7dd85a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvfixedtypes.h +a9bf4969ae3e39cc315b6180ee7055e0ad1279c6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvtypes.h +00e9a0ace4b59958a8b048229fb22b4d9e2f8864 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h +3449834cb8b8c630ab1de6df30503c846b26e86b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h +f779cd0470e428160fc590b590f2cd4855950058 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h +7c4aef225d174ecbe1130d63b8e8ff752bddf48e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h +5abe75cf18a2fede23529194b406c3cf742edced - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h +c8490da9f200f4dbbac7ebe636f3a83485f3001c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h +1022bba330a71b92dcc81f47ba460209fcc70cd0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h +b72318d58806bfd25f922107a606b222baa2e28c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h +7a0c878431a9b0d9dda117f165946b1cdf8ebbde - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h +e2d8133537e2687df022c6a966c55fbfea1974f3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h +9c6a4f1d864b5161564869b19f8cb2ce9d629c1d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h +0639d6cd553994aff4195e8e7547eebf8e713145 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h +79204c26eb58ee812cc2f72ee1f6d4d7d93817c7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h +ea9aac6f0e23f0de444ac3919c35e4b78c18c942 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h +f7435e356d54d682a949734574388abbe7ffe1d0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h +64f849ed19609320461b8938f24f0b40fb1a35b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h +d107e41878b5bc50a5c8b29684122c9589625a6f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h +f4a4eeb35e15e0642d1bf4e2e5b31394f4cbbfa1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h +b7b0360b1a6ca78267fa10f7adcd370da86513c3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h +862a17958488d69ca3e92c42ee1bed55cb299fa4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h +bb4182eeea20779f62165d2d50ed209b6a07e54e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h +b7f2957f506dc285acb87d41d34cfd60408b00ae - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080rc.h +c72f147e8fb78126d13567278239acfcd9b9cc1f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h +8dd5acedc0b1613314eb3fe9130a9c282bd49ca1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h +681c94b982e29049638814f6c1e4eb508f8b0bf3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h +3646710984d5c3024d16f9ab346222ad6dfdb4f0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h +6c34803c213ea0a28114bc921e1867cefebec088 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h +76c9f104e04a8fd9e73e03ad59b2e72264c5f169 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h +9e61da81ecdff15d63f9ae8a1c2f0960b820c65c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h +dac18fcaf5d652b21f84cfba455f4f5972e786c5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h +d51e47795dfe1fc0bae31b9379d6a39ac4d3080f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h +8a613db1c31724a577c4718752c15d9754882f48 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h +3966d65c9701bf97c807cf87838a08cda10f418d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h +a1830232f18afe44230d6a8598c50b3fc7656089 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h +2dd40e3e41d74de3865bc700acc9ab7e0540c647 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h +f97e7f88aa17788bbbebf55807e449c0ee016384 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h +b2b6b3b413ae17af1afde2fc8672cd1bf48e7b19 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h +3c7130d0613d3c8baef6b23bb63c6ee7a10ed21b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h +39f5e838aa6ab007c56e7a59c7d2986d1a7aa34a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h +6679d97e3852ed78ee44780408c523b94f426ca4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h +090f908931690302e3a2c77f3ce41c4de0c61efc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h +7c4e426dee0ae86c00b3bd10873a1a2bd94ed3b2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h +5bdddb9a949a78443f83a7da81ad5fee8a300c44 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h +d084d99035f4cc34cd803ff4a5328b9e10ea77fc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h +4b8fa2ce546ae3f06b7dc61df3d534449cdb5b2d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h +8855ee8bad2f2169ebd147e7ac77d9f1340cbad8 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h +82a2e7a2fc6501163d07870f3f640a591f4a8996 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h +f3a855fe7a91c2acf2be41629ce906996e01a9fc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h +3d8e37aa8485aadf55335d8f9f913273d90a2442 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h +da220a5608a0e4c73fa0315b13e2b29d92b114e9 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h +6834a9c75265c25adfb03f0b2dbfe0559f28cadf - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h +051dbfd1d5ff02b2771bc9b3fad8aaef29aab9ae - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h +c3a75647f5ca6cd7b456511af36a9de6d90329c3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h +82364e263f43ea028c2d66db58887958bdef64b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h +143c1c24ec926142d1f84dec7a543f2b98541545 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h +1684a3a8111fd3d83363cebe68d016a54eaaf686 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h +72292c9844eaf24c38967dd4a879c0c0f070a0de - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h +091f7bac99f5c786a64b6fa59d9d27af786bab10 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h +c0181e959c1ba5ebfc3f130c8764687b58453f9b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h +2a11fc0a499f8293b83e08572f5e6be04bd1da61 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h +a44d2f1b31b8ec124355018204909df19df09748 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h +8ef946f1d7545277ef64891b45a29db44c4e9913 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h +774fd1e730d1d853bf97946f7ecd24c6648c7af4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h +22d828c87b223f937c589a0e863a25d95b734371 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h +7d3819683e9f562a87f36a3e23c043b2b6fd814e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h +7d27fafff043d290b2ec1d2dddbecea2f1df4704 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h +27ad8b5c2406fcd572cd098dd215e93ae1db99e3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h +783db6da0b92b6b8ae26b180129beb0bccb13a5b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h +e6f6beaed64167088608027b442f5449cff027c1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080common.h +6b4418e269bb97b9996b05ea153ccd195c661e11 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h +0ac7e4eb4d952c84c6f4e697cbfcb355069377c2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h +1651ec548a2899391a05bc6463b3f7162c7807ab - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h +bc22bf13b7d99ee6f80c30b569e084a2b03e385a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h +1ebfe9f0f9a7d2dd2873df82bbc78b1ec982ca93 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h +291f91212d5a37aae46a2944cf89f4b74b1d1809 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h +82aa4d6108ce6abebcbbc95afcb7a6350e287f5f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h +c4474dc1f53661c67d8fce5303dcc636d9ad3b8f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h +18814de559257f07bad8a0a9006ac9751fcfa1cb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h +e9d692b06c70951dbbd0663a89f822153bce1146 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h +1248e113751f8ed9e4111e86a7f7fb632b102eca - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h +b921747a65c67fa093de08fa782c164d048824b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h +7e0773f7bf13350a9fd25b0df4d6c45a55a008df - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h +8fd661537cc4eb55c167b9daae404bfb82408bfe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h +f88f1c519a242dfa71221bdcdafc7deab14d8503 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h +ccc48726d7da49cddc4d4f86d8dbd2ad585f7b38 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h +3dc187adc0a848e68f62a6a7eb99ac02ee6502cc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h +f3b81a241efe1224798b17c062e33936469c3c2b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h +09dedebdcff3244ab8f607a7152e9116d821f9c1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h +440314f66374d35a1628ee8bd61836a80ab421eb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h +92be535d68a7f18088921faa3f1742298ad341c3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h +84fb76f9cff38c797b139cba40175717591d49df - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h +2f92bebccb9da5246b19bd13ff0e6e79de79bc3b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h +aec1b750866e34f9626e48c535336f93c5c246fa - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h +9031642283b59ee6d52e2e1ca54332df5c2f7acc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h +e10cbe4875736ef16072232789dd3f48647c022f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h +91cccede5c4f26a6b6ca7ba4bc292f3d908a88d4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h +f47136417885a729f9c5dee375ec9dec1bd170e0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h +f523fe4a55a6a9d01f41f9f34ff149ed75b2e739 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h +ad7604ced12ee18c569d2a7ebe71e185ebff3fd4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h +209ef519cb73395cea7d66016448ebc3c6bf6fe4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h +4a3e7d71b9169d703d9373ff80b02a63825a80e4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h +4d9116d23d27a3fc39c366f2685243b83ef7d485 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h +abe79ad927e7c70b7c1a8eb687052a782efcd5f4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h +ef180860a1ccbcb9f5d2f8a6656a345eef76a2a7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h +f7e56d494fea02515180f21b0f56ae0aff583be4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h +b66a45c83c84f6d458ef19fd7e0f972f2eabd109 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h +2518a62952c72ee6f3447bc8dc417129f6ac26a4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h +9373c51ca29afec3368fb5b8c2a2f05b0920f291 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h +0ee647b929e55cf39da7e26ffc0f027676fa52fa - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h +6e5b278451308efbb6911a8ab03b0feba504d035 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h +c905766589d17fcb99a5d73846ed61f7b7db56fe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h +323fcc6af8c30d5ef292ae90810c5c2fa2009e20 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h +382dc80790d870047db7cea957ef208d4439801e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gspc.h +825f4d976c76d375803e42967fdab53e7814d18d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h +8294d43d202a9cd78367f2e69388a6c6f2c369f7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h +cf78a847e0882e1d164eccdb86ea033126019599 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h +76c31150e2f589fbb96cfc06cdc6c1801e128656 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h +7f5548026751a8caaebc245945ccdc4bb037b566 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h +7812ba094d95c1b6d65afc6a1d26930400b8b96f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h +f1dae17e75a24c28135cf073bf29f9609a2418e3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h +24782552a13f627e2e94ebb5f7021246a0c0dc53 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h +127f78d2bb92ef3f74effd00c2c67cf7db5382fe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67d.h +bb79bbd1b0a37283802bc59f184abe0f9ced08a5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0040.h +4a6444c347825e06bdd62401120553469f79c188 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h +2f87e87bcf9f38017ad84417d332a6aa7022c88f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9471.h +0d8975eec1e3222694e98eb69ddb2c01accf1ba6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000_notification.h +c2600834921f8a6aad6a0404076fa76f9bc1c04d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37b.h +861b9d7581eab4a2b8cc7269b5d0e0d1294048d1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005.h +92c2dab6bc48f32f46c6bbc282c63cb4ec7a50bf - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9170.h +0285aed652c6aedd392092cdf2c7b28fde13a263 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00fc.h +dec74b9cf8062f1a0a8bbeca58b4f98722fd94b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0076.h +a30755b3003023c093f8724cf9a2e0b0c301b586 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9010.h +cb610aaae807d182b4a2ee46b9b43ebfa4a49a08 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57e.h +bb8d15aee43e1feb76fddf80398e93fd805f1ddb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2082.h +02906b5ba8aab0736a38fd1f6d7b4f6026a5185b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57esw.h +ccefba28a2c7979701f963f2c358b4414b84ca98 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9570.h +2e3d5c71793820d90973d547d8afdf41ff989f89 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67a.h +204feb997ba42deab327d570e5f12235d5160f00 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57a.h +03ab4e08e8685696477b62eb1a825e5198d61b8a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080.h +545dd1899c6988ffe5f50300232bd862d915cd5b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc770.h +022e8405220e482f83629dd482efee81cc49f665 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc77f.h +36b0dd6de0d0b49d435a4662c35d1f4ae5b2b1bc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9870.h +02ff42b6686954e4571b8a318575372239db623b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1_notification.h +82c9df617999f93ebd9362851966f601b8131fdd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc570.h +eac86d7180236683b86f980f89ec7ebfe6c85791 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl957d.h +866977d299eac812b41eb702a517e27bdc56e875 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37a.h +78259dc2a70da76ef222ac2dc460fe3caa32457a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37e.h +31939808cd46382b1c63bc1e0bd4af953302773f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl977d.h +11fd2de68ab82b81211aa20c66a9a6595199f673 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9270.h +05605d914edda157385e430ccdbeb3fcd8ad3c36 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9171.h +9db39be032023bff165cd9d36bee2466617015a5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0002.h +76c430d54887ed14cace9409712259e10f042b4c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c1.h +e63ed2e1ff3fe2a5b29cfc334d3da611db2aadf6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h +ea10b0d938d9314638882fdc20b9158a193f7b08 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070.h +f5760f5054538f4ecf04d94fb1582a80a930bc29 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc673.h +b1133e9abe15cf7b22c04d9627afa2027e781b81 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917c.h +9bd9f416844d798f352fcc6c8aaf2c251253c068 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90cd.h +04ab1761d913030cb7485149ecd365f2f9c0f7da - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005_notification.h +fb5ef3d6734a2ee6baba7981cdf6419d013cee85 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc671.h +ddbffcce44afa7c07924fd64a608f7f3fe608ccc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0071.h +68c953956a63ef8f7f9bcbe71057af510f4597c1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clb0b5sw.h +38265d86eb7c771d2d3fc5102d53e6a170a7f560 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0041.h +941a031920c0b3bb16473a6a3d4ba8c52c1259d7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917e.h +a23967cf3b15eefe0cc37fef5d03dfc716770d85 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc372sw.h +9b2d08d7a37beea802642f807d40413c7f9a8212 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37d.h +e0c9a155f829c158c02c21b49c083168f8b00cbe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dswspare.h +95d99f0805c8451f0f221483b3618e4dbd1e1dd8 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90f1.h +8b75d2586151302d181f59d314b6b3f9f80b8986 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc573.h +ff47d8a4b4bdb3b9cd04ddb7666005ac7fcf2231 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl003e.h +026f66c4cc7baad36f1af740ae885dae58498e07 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc371.h +15136a724baab270914a01a8c0e8f2c2c83675b6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c3.h +4bbb861011139be1c76b521eaa7ae10951d5bf9a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2081.h +d1a19dee52b3318714026f4fcc748cfa4681cd25 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc370.h +158c98c8721d558ab64a025e6fdd04ce7a16ba9e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl947d.h +435a34753d445eb9711c7132d70bd26df2b8bdab - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917d.h +326dbbeb275b4fc29f6a7e2e42b32736474fec04 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9571.h +1409efc057e4f0d55602f374ec006f9db7ad3926 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000.h +bd27ceb75c4604fef53658f16a5012d97c1534b2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9470.h +e6818f1728a66a70080e87dac15a6f92dd875b4e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927d.h +11b19cb8d722146044ad5a12ae96c13ed5b122b6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917b.h +1efc9d4aa038f208cd19533f6188ac3a629bf31a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917a.h +c2d8bb02052e80cd0d11695e734f5e05ab7faeb5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl907dswspare.h +4b8f95693f79a036317ab2f85e150c102ad782e9 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl84a0.h +a7c7899429766c092ee3ecf5f672b75bef55216c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9271.h +15d1f928a9b3f36065e377e29367577ae92ab065 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080_notification.h +a26ddc6c62faac1ecd5c5f43499aab32c70f32cb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67b.h +b29ba657f62f8d8d28a8bdd2976ef3ac8aa6075f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0073.h +c5ef1b16b2bd2e33f52b71f2b78db789ebb844f0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9770.h +ecc56a5803b85187aa95b788aedd4fa2262c1bb6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2080.h +dd4f75c438d19c27e52f25b36fc8ded1ce02133c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917cswspare.h +6db83e33cb3432f34d4b55c3de222eaf793a90f0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00b1.h +b29ea3f13f501327c060b9ddfac5834ed396414a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1.h +4d5ccf08ab73343343e0c804002a621996866161 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0092.h +593384ce8938ceeec46c782d6869eda3c7b8c274 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl900e.h +95ca0b08eed54d1c6dd76fdf9cf4715007df1b20 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0020.h +c61f8348c2978eef0a07191aaf92bd73e935f7bd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67e.h +509c56534ed6d48b06494bb22d3cf58d63254a05 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc574.h +da8d312d2fdc6012e354df4fa71ed62ae4aac369 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927c.h +5416c871e8d50a4e76cbad446030dbedbe1644fd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00f2.h +b7a5b31a8c3606aa98ba823e37e21520b55ba95c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl402c.h +26c3ccc33328a66ad3bcfe999424dffda991264f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc670.h +28867d69a6ceac83da53a11a5e1ef87d9476f0be - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57d.h +053e3c0de24348d3f7e7fe9cbd1743f46be7a978 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0004.h +060722ac6a529a379375bb399785cbf2380db4fd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc373.h +13f8e49349460ef0480b74a7043d0591cf3eb68f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57b.h +e72a7871d872b2eb823cc67c0a7d4cafb3d0ca18 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90ec.h +ba76ecbebe0ed71ea861ed7016abbfc16ced2df7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070_notification.h +bae36cac0a8d83003ded2305409192995d264d04 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0001.h +ab27db8414f1400a3f4d9011e83ac49628b4fe91 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl987d.h +70b155b0da07a92ede884a9cec715f67e6b5c3e8 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_list.cpp +c70d946adb4029b3476873887488748162b88b0b - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messagecodings.cpp +ac08ccd5c2e3fadf10ae53e46e582489d1579ed0 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_edid.cpp +6fd536d1849ea4cce5d9b72d1dcbc1db9c818b4e - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_groupimpl.cpp +d63fed0074b22584686ad4d0cdaa4388b42194d6 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_watermark.cpp +a5df56b2cf8df9d4d8ab6fa2b3521649ef09384a - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_wardatabase.cpp +f56f92e32710b0342805b785d34ba1a9f2a54ed3 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_guid.cpp +554e6b7dadbb68ac0f3d2e368ca3fd90832ea254 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_discovery.cpp +60994cb1131d4d37b2d3fce6cc59dfea5ebb4129 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_connectorimpl.cpp +37eabb1ab51cb38660eb24e294c63c8320750b96 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_sst_edid.cpp +a0d24a4bd71f999adbaa876168adef5a7d95f2b8 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_configcaps.cpp +fa4f4869d3d63c0180f30ae3736600a6627284c6 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_merger.cpp +d991afdb694634e9df756184b5951739fc3fd0ab - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_auxretry.cpp +1543bbaba8f3e149239cf44be3c0d080c624d5ba - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_buffer.cpp +56ee9318a7b51a04baa1d25d7d9a798c733dc1bc - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_vrr.cpp +9f31213ab8037d7bb18c96a67d2630d61546544a - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_mst_edid.cpp +fea946e5320e7de8e9229bca8d4a6a14b9e8db59 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_crc.cpp +719d2ddbfb8555636496cb5dd74ee6776059db92 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_timer.cpp +f83b3c17e9f26651f12c8835a682abdd66aed3a2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_splitter.cpp +e874ffeaeb6deec57605bf91eaa2af116a9762bd - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_bitstream.cpp +c62ef84471074a9ed428b4a03e644885989b0b83 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_evoadapter.cpp +38fe8122aba8a1bc5745d81192ec7fc75934dd0d - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_deviceimpl.cpp +66e91795dc65e1bc13c545a84556d200c8eb7bd5 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messages.cpp +4803cde0fffcf89fed46d6deaeba5c96c669a908 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messageheader.cpp +fe8007b3d98dad71b17595ecb67af77b198827a0 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dptestutil/dp_testmessage.cpp +62d03d24af041276ba2abb96fa1634ae4f99ea8a - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connectorimpl.h +aeadcb0bc061b5db0fdf8aa67c1b5703976aa946 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connector.h +01f1dd58ed5bb12503fa45be7a6657cde0a857e2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_guid.h +07d22f84e6a386dad251761278a828dab64b6dd5 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_bitstream.h +11487c992494f502d1c48ff00982998504336800 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_internal.h +f6e1b0850f5ed0f23f263d4104523d9290bb8669 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_vrr.h +02b65d96a7a345eaa87042faf6dd94052235009c - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messageheader.h +e27519c72e533a69f7433638a1d292fb9df8772e - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_crc.h +543efa25367763292067245cbc39c1382c35df77 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_discovery.h +39aece5465100489867001bf57446bcfc4999c24 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_evoadapter.h +6e515f398e9ae1b603e49ec32576ccd0ce5d8828 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messagecodings.h +070b4f6216f19feebb6a67cbb9c3eb22dc60cf74 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_buffer.h +36e80dd13c5adc64c3adc9a931d5ebbf922e9502 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_groupimpl.h +7974abf146f1f14cd3e3854ef63ddf52ebbeb222 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_deviceimpl.h +cdb1e7797c250b0a7c0449e2df5ce71e42b83432 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_merger.h +0f747fdf03bebdcd86dbdf16d00ee2d044bc906c - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messages.h +325818d0a4d1b15447923e2ed92c938d293dc079 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_hostimp.h +2067e2ca3b86014c3e6dfc51d6574d87ae12d907 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timer.h +d876d77caef3541ae05f310857f3d32e642fba04 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxdefs.h +78595e6262d5ab0e6232392dc0852feaf83c7585 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxbus.h +b4d8c44957efc90ba97092987e6e43c48e85ac86 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_address.h +78c6d7d85b47636fbb21153425ef90c6d0b2d4e2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_configcaps.h +3b74682e142e94b1c68bf619169f12e5805044bc - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_watermark.h +8f83883126b853c97e5859dafd98847ec54d36ac - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_splitter.h +7b7d9a137027fbbedfc041465987fa4ed4198ce4 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_edid.h +cca426d571c6b01f7953180e2e550e55c629f0f4 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxretry.h +80380945c76c58648756446435d615f74630f2da - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timeout.h +e2075486b392d6b231f2f133922ac096ca4bc095 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_ringbuffer.h +3eea80c74a22de43b6edad21ea5873c791e093e2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_mainlink.h +d1e8c84f279cb30978d32c784107c0247afa6e66 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkconfig.h +750ecc85242882a9e428d5a5cf1a64f418d59c5f - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_object.h +379d3933c90eaf9c35a0bad2bd6af960a321465f - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_wardatabase.h +e02e5621eaea52a2266a86dcd587f4714680caf4 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkedlist.h +5dff32bd1018e2c5c2540ea7fb571dbea596d5b1 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_regkeydatabase.h +4a098c4d09dedc33b86748d5fe9a30d097675e9f - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_list.h +5bd3706ceea585df76a75dda7f9581b91ee8f998 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_tracing.h +020194b85245bad5de4dfe372a7ccb0c247d6ede - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dptestutil/dp_testmessage.h +2f60ba753549b232e1b995046a356dbe0eced04a - NVIDIA-kernel-module-source-TempVersion/src/common/shared/nvstatus/nvstatus.c +ebccc5c2af2863509e957fe98b01d9a14d8b0367 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_list.h +48f063f09bd9b0cb6c4f47d8911643790b3ffbc8 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvUnixVersion.h +b85b49fc4ed38a241c79731a02b3b040a654a52a - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvctassert.h +764e5c4364922e3953b4db0411d1d3c3bdac99f4 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_defs.h +8f0d91e1a8f0d3474fb91dc3e6234e55d2c79fcc - NVIDIA-kernel-module-source-TempVersion/src/common/inc/rmosxfac.h +f59a2759281341e56372d3cb37b16715944dd8e1 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvPNPVendorIds.h +e015e955a05908d4a2202213353eac89f1b80ff6 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvSha1.h +b58ed1b4372a5c84d5f3755b7090b196179a2729 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_speculation_barrier.h +b4c5d759f035b540648117b1bff6b1701476a398 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvCpuUuid.h +4282574b39d1bcaf394b63aca8769bb52462b89b - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBinSegment.h +a27eb14c54c6acb647a95c264b90e25f07fc757e - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBldVer.h +5257e84f2048b01258c78cec70987f158f6b0c44 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc.h +963aebc9ec7bcb9c445eee419f72289b21680cdd - NVIDIA-kernel-module-source-TempVersion/src/common/inc/hdmi_spec.h +62e510fa46465f69e9c55fabf1c8124bee3091c4 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvHdmiFrlCommon.h +3bf0416186ee90833c727f01cc891bd568ea9d0f - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvVer.h +a346380cebac17412b4efc0aef2fad27c33b8fb5 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc2.h +d2b4cc6228c4b13ef77e47bf30326826c5662ed4 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_ref.h +06aa739230c00998e039b0104e5d73da85c322fe - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_arch.h +86a59440492fd6f869aef3509f0e64a492b4550d - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/dev_mmu.h +38edc89fd4148b5b013b9e07081ba1e9b34516ac - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/kind_macros.h +f9311a35f375c7453d99fdde3876440b54d4cb5a - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v03_00/dev_disp.h +1ea0c3d6ea0c79c01accc7b25d15b421ab49a55d - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v04_02/dev_disp.h +a26df21c3cc3eeb395428101f11da68386e0d72b - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd14.h +8159b4189c577d545c1280d7d905a2dc2ba29fa7 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd.h +96b9560d322f43a980db5d6cc5072e9e81fdb9d2 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/displayport.h +249d4f7317ce68c3ceb64e2b1ee257cc75eb002b - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd20.h +8c43da4fae8a0aeb374ce46ce19eb8c38b552ae4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/Makefile +17855f638fd09abfec7d188e49b396793a9f6106 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvkms.h +7d108165b4a7b6a44ac21460ea3bf4381fb48c5b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h +16a2e187afedf93bade7967816b0723708544e0d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-workarea.h +20213d53bb52bf9f38400e35d7963d0f4db22f96 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo-states.h +70d9251f331bbf28f5c5bbdf939ebad94db9362d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-softfloat.h +8a6f26ccf2e563b78f6e189c999ba470ed35271d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo.h +853d9005ec695cb5a1c7966a1f93fe0c9c8278cf - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hdmi.h +d4889d903bf4de06d85e55b005206ed57f28af69 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-lut.h +6b21a68e254becdd2641bc456f194f54c23abe51 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-framelock.h +c1c7047929aafc849a924c7fa9f8bc206b8e7524 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/g_nvkms-evo-states.h +71e8c5d3c4dfec6f2261654c3fc91210bff78da9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-surface.h +64af1df50d2a5b827c1c829a303844de20527522 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rm.h +260b6ef87c755e55a803adad4ce49f2d57315f9a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-event.h +4f5d723c80f607a0e5f797835d561795dbe40ada - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-cursor.h +f5f3b11c78a8b0eef40c09e1751615a47f516edb - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hal.h +d3f5bc85b538a3a1d4c2389c81001be91205ec9f - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-types.h +9c90df1fa1b6dd33a7e330c47e94b5b9194ad419 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-3dvision.h +be3a1682574426c1bf75fcdf88278c18f2783c3f - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dpy.h +8f1994f3f8d100ddcf8b23f5b24872bed939d885 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-vrr.h +75e8a8747795fad89b4d2b662477e5454863dcc7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip.h +d7861e2373ac04ffaf6c15caeba887f727aa41fb - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dma.h +182a47c12496b8b7da1c4fe7035d6b36d7316322 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc-types.h +c8f714e80dd4bb60ceab0c0c7e6a5b3304940946 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-types.h +ef78e73ec9c0b8341bd83306d1f3b2c35e20c43a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-utils.h +867e3091a945d3d43b2f28393b40edeb9d27597b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rmapi.h +c1904d38785649d2614563d0cd7de28a15ce4486 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset.h +118d0ea84ff81de16fbdc2c7daf249ee5c82ed6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modepool.h +412d8028a548e67e9ef85cb7d3f88385e70c56f9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-console-restore.h +33dbf734c9757c2c40adb2fb185e964870217743 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip-workarea.h +ebafc51b2b274cd1818e471850a5efa9618eb17d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc.h +4020b2a0d4f177c143db40b33d122017416dfa2e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo1.h +be6e0e97c1e7ffc0daa2f14ef7b05b9f9c11dc16 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-attributes.h +9dd131355ed1e25a7cee7bfef00501cf6427ae92 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-private.h +17f6fbbd5e0a75faec21347b691f44dcb65c01aa - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector.h +4625828efd425e1b29835ab91fcc3d2d85e92389 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h +a8fbb7a071c0e7b326f384fed7547e7b6ec81c3e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-timer.h +52b6c19cce320677bd3a4dfcf1698b236f29e59e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-device.h +a0cc9f36fdd73c99ad8f264efa58043d42353b0a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-sync.c +381fba24abae75d98b3ada184ed0cd57335819a9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-format.c +281fdc23f82d8bdb94b26d0093b444eb0c056f51 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-sync.h +445a409950ab8f36cfa24d1dc73e59718d335263 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api.h +2ea1436104463c5e3d177e8574c3b4298976d37e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-ioctl.h +5c4c05e5a638888babb5a8af2f0a61c94ecd150b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-format.h +910255a4d92e002463175a28e38c3f24716fb654 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api-types.h +e48c2ec8145a6f2099dddb24d2900e3ae94ec02e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h +727bd77cfbc9ac4989c2ab7eec171ceb516510aa - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h +009cd8e2b7ee8c0aeb05dac44cc84fc8f6f37c06 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi.h +fb242aa7a53983118ee019415076033e596374af - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h +f6875ef0da055900ef6ef1da5dc94cba2837e4d0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-channelevent.c +01d943d6edb0c647c2b8dbc44460948665b03e7a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c +394ea31caa5957cfb2c8bb8c3cc0e4703213fe7f - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi.c +ec97ab37cdf2cec0283657c2c04a139a1a168337 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modepool.c +85ddb19f89833ca57fd2deff2e2b4566e162a56c - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hal.c +8415bcd6ab34e356374659e965790a0715ed7971 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-prealloc.c +c98f76bcfc7c654a619762ebc3a2599f9aa89f8d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-3dvision.c +5fb73f35841c41e7376531732cb12303224e61ad - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-lut.c +e9626eee225e58ec2d5be756c5015775ca5e54b9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-vrr.c +86da3c7c09354d2c49d95562aba15cbedb543d9b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo1.c +fc8182cc1f3af77125dbfa328996bcfe0387cc41 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rm.c +05548338a73ade1b3c2ad1cebf1ab5eb16ef6c9b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-flip.c +07c2f10473e2fbe921b2781cc107b5e56e6373e3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-attributes.c +21c8184de2c9150c21ac5d6fba24e79e513a0a69 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo.c +da726d20eea99a96af4c10aace88f419e8ee2a34 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-event.c +5c79c271609ebcc739f8d73d7d47f0b376298438 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c +b55665d7bceaad04bbf29a68f44536518302c3d6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo2.c +f8bdd07a27296ef6aab86cc9dbccf8df811fff24 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modeset.c +1918ca3aa611cd9dfc79d46d038ab22706f0b1ed - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor3.c +24156462f25922c8de5b5d2558db36b2e68b28ed - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dpy.c +c2870190ca4c4d5b3a439386583d0a7c193d6263 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hw-states.c +f27f52dc428a6adeb936c8cf99e1fc2d8b0ad667 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dma.c +5acf19920d56793d96c80e8461b0d0213c871b34 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-surface.c +c2d0e6bef0c4929a3ca4adfd74bd6168fa4aa000 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-framelock.c +673ad86616f9863766bfec0e118c918297d32010 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/g_nvkms-evo-states.c +c799d52bdc792efc377fb5cd307b0eb445c44d6a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor2.c +94f4736acf7981cebfd74302a21f19cdbafa8d71 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hdmi.c +8f22c278a5839d36f74f85469b2d927d9265cb80 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-utils.c +eb09642e8b5d9333699f817caaf20483c840b376 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms.c +ab17e5b4cafa92aa03691a0c187ef8c9ae53fa59 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor.c +574b1268ff83e4e5ed4da15609247a5c0ec8f51b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-console-restore.c +45230e56d29c98ea0f10f87c1b16ba70c96f24d5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo3.c +8af6062034d464f778969e26d3bf5a9b4cdaccf0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector.cpp +69fed95ab3954dd5cb26590d02cd8ba09cdff1ac - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp +6b985fc50b5040ce1a81418bed73a60edb5d3289 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.hpp +f2a05c29383bfc8631ad31909f31a8351501eb27 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-device.cpp +31767fd551f3c89e5b00f54147b6a8e8fa3320e3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp +110ac212ee8832c3fa3c4f45d6d33eed0301e992 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-host.cpp +51af3c1ee6b74ee0c9add3fb7d50cbc502980789 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp +f96cd982b4c05351faa31d04ac30d6fa7c866bcb - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.cpp +f6c3e8bd4ee13970737e96f9d9a3e4d8afdf9695 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp +893c70c95809f463c7af6dc9c814527804fcdf53 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/Makefile +c5f16fdf43ca3d2845d120c219d1da11257072b0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/nv-kernel.ld +d1089d8ee0ffcdbf73a42d7c4edb90769aa79d8c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h +aba0bd796d932fa19e8fad55ed683ae57d68bffb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-priv.h +1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h +499e72dad20bcc283ee307471f8539b315211da4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h +40cb3c112bbcb6ae83a9186d0c9fa1857cf6a126 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os-interface.h +1b53bbf5f8452b8057ff2dd7828947a047db38d0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv_escape.h +3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h +e3679844971ecc4447259fb1bdf4fafbbdff2395 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osapi.h +4750735d6f3b334499c81d499a06a654a052713d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-caps.h +1e89b4a52a5cdc6cac511ff148c7448d53cf5d5c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os_custom.h +fbcbb81ae14e8bfde0d665ad20f9cab9b0bbd9c3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv.h +ddfedb3b81feb09ea9daadf1a7f63f6309ee6e3b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h +9c7b09c55aabbd670c860bdaf8ec9e8ff254b5e9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h +cc3b2163238b2a8acb7e3ca213fb1ae6c5f0a409 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osfuncs.h +2f5fec803685c61c13f7955baaed056b5524652c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h +285ab886f5fad5caf3f6bd0b0c7102bd4c4300bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-reg.h +6ebda7ea5b17b7b9bfa9387fc838db9f0c3405a5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osinit.c +b5b409625fde1b640e4e93276e35248f0fccfa4c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c +9d9035afd7af31f30cdbf2d4c75e5e09180f0981 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osunix.c +21ac9d6932199ce0755dbead297eb03c9900f8c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c +49dc935d4475b572478c63324f0832c972a4277d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os.c +532366fd9a288a812eca78b92b304ba3625f8c0a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c +006e77a594ae98067059ad3d7e93821316859063 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c +f134270af5ecd7c5ba91bf5228fe3166b101dd6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/escape.c +690927567b5344c8030e2c52d91f824bb94e956c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/registry.c +5940d8e83cd0014e3222952eab29eebaaad19b86 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osapi.c +54b912b640bdcae42f38c41694eb20abcaad61a7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c +fb5272f3d0e465aedbc99ddcabb1c6c428837a6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c +0cff83f4fdcc8d025cd68e0a12faaeead09fa03b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/tmr.h +7df66a87c9498ae73c986e60fcb9cb1cbcd19e19 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objrpc.h +1feab39692ea8796ac7675f4780dfd51e6e16326 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objtmr.h +28d6a6ae495d9bc032c084980ebf5d94448bcf29 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_headers.h +31deee778df2651d3d21b4d9c8ab180b8dc1ff14 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_vgpu.h +961ed81de50e67eadf163a3a8008ce1fde1d880c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_hal_stubs.h +4db7387cc1ce08ccc62404b80b19c7f1b685e746 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc.h +e4d88af4eb51d32288f913d90e490e329884970b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_global_enums.h +35da37c070544f565d0f1de82abc7569b5df06af - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nv_firmware_types.h +df4d313c66e75fa9f4a1ff8ea2c389a6ecd6eb3d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/acpigenfuncs.h +bff92c9767308a13df1d0858d5f9c82af155679a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvacpitypes.h +db0dc6915302888de06e3aa094d961cfe25e0059 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvrm_registry.h +059c1ab76a5f097593f0f8a79203e14a9cec6287 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c +d50ff73efaf5bc7e9cb3f67ed07ede01e8fad6f6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated.h +671286de97aa63201a363fd7a22c92ee8afe4c7c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/eng_state.c +6fa4ba2da905692cd39ec09054f2bd6621aa2a7a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource_desc.c +5a97d4f8ce101908f1a67ffe9cc8ed00b6bf43b2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource.c +1653c7b99cfc86db6692d9d8d6de19f1b24b9071 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_uuid.c +caf2b80fa0f01b9a3efcd8326bf6375455f2e1b9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_access.c +4e1be780ac696a61f056933e5550040a2d42c6bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_device_mapping.c +0824d200569def5bf480f2a5127911ed0ea881e6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_share.c +f6b4e40b638faf9770b632b404170e1ceb949be5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_gspclient.c +db44a803d81d42bfaf84f7ea1e09dc53c662acef - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_timeout.c +9515ea68cdac85989e4d53d4c1251115291708dd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu.c +08be13ced6566aced2f3446bb657dae8efb41fbe - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_rmapi.c +77573c8518ac7622211c4bdd16524d369cc14b96 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_ctrl.c +fa854efc5cdf4d167dee13302ee8377191624d95 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device.c +89543f7085fbc2ca01b5a8baae33b5de921c79e9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c +0e4c2d88b61a0cf63045fe70e5ba2c81c44e37af - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c +acb2a62fb60e08eb6d16518c43c974783139813b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer.c +834efbfff64c0d01272e49a08bd6196e341985a8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c +dd0bd914c6c7bfeabdd9fe87fb984702e0765624 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_objs.c +19447ad30b3fc2ee308bcc45e3409bafa5defe0d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c +3abbef0a6fc95d6f7c7c5a16cbbbb51aaa457cc0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c +0918cada217ca1883527fe805fc30babf7b8038d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_channel.c +e1a6dfb38025abeb5adfda929f61eb6ee44b5c84 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c +ed25b1e99b860468bbf22c10177e0ba99c73894f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c +8cd12c2da71acede5046c772f14aff7cbd88af12 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/kern_disp.c +01e8b56f7677f5cb7f950d9aa9bd37d04153085b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c +629566bf98be863b12e6dc6aab53d8f5ea13988c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c +b41502d73d7781496845377cebd0d445b8ca9dc6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c +8a418dce9fbeb99d5d6e175ed8c88811866f3450 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c +e26ade846573c08f7494f17a233b8a9e14685329 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c +d6e1bd038fa0eff5d3684a5a2c766fdac77f1198 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c +d4a07d1c6beb7ddb229ed6e5374343b6ce916d84 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c +bc2b57acc8fa8644615168e3ddbaf7ac161a7a04 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c +2bb921b462c4b50d1f42b39b4728374c7433c8cb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c +086e9a51757c3989dfe0bf89ca6c0b9c7734104a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c +56be7a21457145c3c6b2df7beb4c828b7bd1a3b4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice.c +5be208cc0e1eae1f85f00bb0b502fdba74d6656c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c +a64c51c515eb76208a822f1f623d11e2edd8d7ac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c +a54628e9d2733c6d0470e1e73bca1573e6486ab3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c +1f4d15f959df38f4f6ea48c7b10fc859c6e04b12 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c +ef2a3848e0302c09869a34eba1333d19a17acc56 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c +2c66e086bb149fb1b9ca8f860566a3f5e391b2f3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client.c +f89e982b0e31a1898e1e4749c9a8ae9f0bb59a0c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.c +d92267a3394ded5d7d218530fd16ce00a920b1d6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/alloc_free.c +2279fd14aab9b5f20b8fc21f04dd0fca41e418c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_notification.c +11a547cbfdbce000a6e5edf48492f5b930ddbdca - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rs_utils.c +81f66675295315cfc52be225c2e9ee912b56fbac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/sharing.c +569f56831cde7bdc528ac2e543eea485025ec6f0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client.c +05669e008dfd89e5c81381e6c60230c1fe17a876 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.c +820b6e63c2b11b0764305c483142f626b6f72038 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rpc_common.c +bc83726df04c30d02a1852a10a22c77fdb3ef7a7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.h +5f194ba056b018a8194c16b0bbb6e49c1b80a996 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/param_copy.c +e40f6742084cd04252f3ec8b8499a26547b478bc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping.c +ac6a5b3adf15eac4a7bd9ae24981f6f5fc727097 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.h +3a0f999e390d93b0db8272f55fbec56f6b055fe4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_utils.c +78f1e379c3d1df9e34baba77f78f48b8585bdc74 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_buffer.c +8e40d2f35828468f34cf6863f9bf99c20dbfc827 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_cache.c +b441ee824e9c15c82956254704949317024ceb41 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.h +277441b3da96fc01199f1d2f5102490e2e6cd830 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/control.c +38d0205b68ea2c82709b42eb7e8b9cf92cec8828 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_stubs.c +2f89b9059467e7f67a6a52c46aecae5cb0364ab6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/binary_api.c +46aa43b18480d2eb7519b2dcd0fe6a68c79b8881 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource.c +f2c7d77e4183994d7ee414e2a87745fcd23d995e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping_cpu.c +6f46dd43e4b3f2ad803a4c9492cb927aebffc1f0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client_resource.c +59d42b6a123b062237b3b6ca382211e35057ef1e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_list.h +ddaf2b8e424df9147a4e2fecf3942b64b1d2b001 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.c +68cc7b258f934097e9dc31a38e7e3bf2ce2fe5d1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event.c +c3820fa4bb1192a9317ca834aeee3434c7eb8059 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi.c +ea7be8a55a3310aa1c3926ed69c86a6491925e08 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog.c +70507a8d43797eb3cdc13408ae8635f4a2eebce0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog_printf.c +b3a29311cc22e2dae686f8ed2df6bc828aa826cf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/profiler.c +af4ffa4b423e07cf40eb863c11dbf515c7104874 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_timer.c +1793e056a0afcc5e1f5bb58b207b49c5f1556eca - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_stubs.c +63e5e17280d865ace8cdd8eb8a2598d3d7830ad7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_sanity.c +8e5af753de1725dd919185c29d03ccb0934fab6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_init.c +fe91b43c37b64472450cc25329d2dea74d2a9fcf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_minimal.c +c0822891f614e6ec847acb971e68aad8847e0cd7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_common.c +c68f2c96bfc6fce483a332a5824656d72986a145 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/system.c +37000b419d23a8b052fc1218f09815fafb1d89c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal_mgr.c +7b9c95f912b203c68b6ba1f62470dffee4b4efe3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/thread_state.c +677c655b0b8e86bdab13cdd4044de38647b00eec - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hal.c +8eac3ea49f9a53063f7106211e5236372d87bdaf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/info_block.c +b9eabee9140c62385d070628948af0dcda3b0b1a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hals_all.c +003e3012e87b8f8f655749db88141d74660e8d8e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c +a5a31b9b62e6d19b934411995c315d4fdac71ca0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_db.c +37d1e3dd86e6409b8e461f90386e013194c9e4d1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c +ed24c0406c85dc27f0fca1bac8b0dcb7a60dca2d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_group.c +6aa752ae480e883d077de842f02444151947f82f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c +956b7871a267b7d381d1cd7d4689ef1aec1da415 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem.c +9d9fcd87d784a758659b6cc8a522eaf9beac4b6c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/standard_mem.c +15f3290908931a9e4d74b0c0ec9e460956e39089 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/system_mem.c +623dad3ec0172ed7b3818caece0db5687d587ff3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c +64bd2007101cbf718beb707898e85f40071ae405 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c +94acdcebee0cdcbf359b15803ec841e5284e1ff2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/vaspace.c +079893039c2802e1b0e6fcab5d0ee0e4dc608c84 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/io_vaspace.c +5b9048e62581a3fbb0227d1a46c4ee8d8397bf5b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h +78cbb6428372c25eba0ccf8c08e7d36d18e4bae8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/base_utils.c +6d5915924b4e26a5e7592427e34b77596162d0fe - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/zlib/inflate.c +cade0f7049cdb2ab423a073887ed20ba1abdb17e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/utils/nvassert.c +8a4e2aec6fc01ce1133cfc7ef80b6363c5394208 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvoc/src/runtime.c +8ed5171254e51e59fc5586e729793831165b8c0c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/tls/tls.c +206dda159ecbc0340ac9329250302c76a504e5a8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c +d48d51a880fced52ad6e323d984e872ccf9ef3bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_client.c +d0ae6d7a363db3fdf54ae1a760630b52a2019637 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_resource.c +883ad1cf4ed1714eb74d44d3b9a41d6a4723b650 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_server.c +0c9581aa68a77cb9977a7fbcfd2077ccb618206e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_rights.c +dac54d97b38ad722198ec918668f175dc5122e4e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_map.c +1f2e9d09e658474b36d0b0ecd9380d0d2bcc86b2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_domain.c +d3e5f13be70c8e458401ec9bdad007dfadedcc11 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvbitvector/nvbitvector.c +836ba8b401fb6b6fcf4ccde1b644ebaefc3d8ee1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/ioaccess/ioaccess.c +9c40bfebe2c57b972683e45dc15f358aaa2280f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c +8f41e7127a65102f0035c03536c701b7ecdaa909 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/string/string_generic.c +b528ef8e238dd2c22c6549057b54fe33039c6473 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_tracking.c +b6d6074ca77856fc5fe4ff1534c08c023ee592a4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c +caff00b37e7f58fde886abcc2737c08526fa089e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_generic.h +66e79047600e0a40c50e709c6c82402d9b205ad0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c +da86b765702196eb0011ac9d14873fbc1589d48b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c +7cdc50ee31b9cde14c0ce6fcd390c5d4564e433d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.c +a305654bafc883ad28a134a04e83bbd409e0fc06 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.h +2fa76d2d5ba7212f826b656aa683223a470e484c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/core/core.c +6f6c83e9ee6d91fc8700e5015440f2bc72e6600b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_rwlock.c +9b69fbf3efea6ba58f9ba7cb0189c9264c994657 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_common.h +b55b7b59f35d848d5a3b43d63da4d2f7b0af5d3e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c +7416712aa964befcf8fede86e5a604871a2d00b8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_rwlock_def.h +6dd0c5f2384610ea075642d8e403ddd8c8db371a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h +87ac95cf569bb550adb3577c6a6658d094c59999 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c +a045a19d750d48387640ab659bb30f724c34b8c8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c +f0c486c1ad0f7d9516b13a02d52b4d857d8865b1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_compiler_switch.c +595a6238b9f04887dd418be43ff31f3e7ca6b121 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/map.c +4418c0344b64740050ff8ef6ee085f0687a323d4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/list.c +057ad074f6252f7809a88f918986d7d5aacff568 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/queue.c +2389c9dd3b13fd2ff26d2d1342c515579079bc71 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/multimap.c +2975e5cecee2c1fd5f69a8ffc20a49016e83025c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/btree/btree.c +f0ce913eb568f85e6e1c1b8965f2cd2b98e81928 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/eheap/eheap_old.c +cba2c17804f6f2062dc5d75583e4a03e03016d1d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.h +133e94f73c781709f407b03d8cdfdd8865c39b4b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.c +801eb295d07258ad70b99cb0fe85f3421690e0c4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_private.h +46c1a2066ead316ea69c60dc323bdb649bc11c0f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.c +f9bdef39159a8475626a0edcbc3a53505a0ff80a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_hal.h +958d9a2cddc91edfafb5c2f3d9622443ac49a6ef - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.c +d405e01478d26ea99cc0012fa2d6e0021bbe6213 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.c +182602832a033b3e2d5f88d4ba8febe63eeb2f9e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.c +376572489e0d4211663da22d5b0de7c7e740fb29 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.h +e3c4822ac998ab5c7946919c85011f6172dc35ee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.c +fa5e1c6001e60f77415d0a8f87c8b548b12e1217 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.c +ddc0ac4e1d8b8aef15e147f1f85f8df37c196763 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_register.h +4fe5357eabd0c5e351fb965ceead308240f68eb1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.h +4f4acfdefc7b9a0cdfe2d5840cc18c9c33366053 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.h +1d66bab50a7d39faa2b0fec469a4512d2c7610d5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.c +fbcbeb92e46ba11ac26c04c9688b3ffcf10f5c53 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.h +e449382e19e4dcfcf0aec0babe5a1c8ce2f4249b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.c +87a5ae8e07103074020ba052ca45ab39e918d3bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.c +47b7744ddd01b821bf2fd25fdb25c8d6d55ee01d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.c +c46cae4a17181c48bafc01237b83537df61c41ae - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.c +f42bfa3b5a801358d30f852625d8456290550f46 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.h +59a87763c6abdc54828f2785a7d90e43e607bc87 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.c +da3cc08f12ccee23bcb1c0d0c757b8bbcb81e4fd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.h +6fd6953e4ae0af707376a40ea0e4f3e70872be7b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.h +162777624d03af2f17dfdc28bc35143e2ec6cdee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.c +b82e5db65ad41764f456d6f924c89d76c165e48d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.h +63e9d0416d5ca1fdf547b5fba9ec76e54690c9dc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_ref_count_nvoc.h +b5ddae1e6960b13101aa38b2edc0610aed438ede - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.c +499a3d9c61a86b667cc77cf8653a71f7fe85078a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_name_released.h +ac842d9de5eae74ef02b0a75259fb016b80c6eac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.c +88d336f88c9b72ec2c1352d4ebe00c0831eafbca - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_sdk-structures.h +fb78615cde6323784f51d33f2acd61fd4030fee0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.c +213ebb4fdfa3c2f64b5f998e2ad990e448d4a104 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_debug_dump_nvoc.h +a6174ad345cfdf926cbb4c86c7e8eeadfccb0ddf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_class_list.c +fa785f8138598af783aefecf10b141d524e6bb42 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.c +de97c5afdc34cb9aff23c3ba166e21f660cf1f47 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal.h +f9bdef39159a8475626a0edcbc3a53505a0ff80a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_private.h +53b2c39666e1da206d44d69d54009f20440503bc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.h +93f9738c0e8aa715592306ddf023adf6b548dcc4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nvh_state.h +2b49950ba8f540ed4231c3334810edbb212bb859 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.h +d614f90730e2ee78bc3aae47b4e7976500e166e7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.h +4302502637f5c4146cb963801258444f2d8173e1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_allclasses.h +7bb406aa863430507bdf07b5f3e519c0d756220a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.c +6f3fc9676df77fa24c49140331b87ed5988ed57c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/rmconfig.h +cb02e66e5fc06aa340ab460c977961701e9ba295 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.c +079ac6d2a90bd2fc9413e092a729202dbc5f724a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.h +65d1ace1e68c9b39cce6db61aa8b86ee47a0ae4b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.c +e0988b45cf712f1a7662b6f822eaed3ffd9938f3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h +40c937ca657bda9c0b67bd24c5047d39e596c16c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.c +f8e842add67dc070cc011ea103fc56cfd81c8b9a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.c +3a5457a216d197af8f120c660690a55ee44bdd8e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.c +21e3cf689d84b1a28e11f66cc68a0bc6713108b0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.c +edead99d125425ddf8f2fa4e4261b8cc3bf566fc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.c +b07c2c5e8df4de2bb9d242fd1606f1a57b8a742d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.c +bfabd5155af3172e1c0a5a0b66721ff830c7b68f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hypervisor_nvoc.h +cc635daf3d7a9a176580951841b82e9eb0d6f5ad - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.c +757b3ecf94d0c8914a32c4bd302f8ccfa4027856 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.c +6263c1ceca0797d34a102f9846acd1fdef06fb60 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resserv_nvoc.h +3b0e038829647cfe0d8807579db33416a420d1d2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec.h +abda8536d885be1422810c184b936bbc880972eb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.c +f6f40d568bcf2ae89547ad054f9b5357bac366ab - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.h +ceb4dd72148dfe4a0581631147e8d7636abfd61f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.h +41784541b2e9ee778b52e686288fe492c0276fec - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.c +d32d0b65f5f76cb56ca7cd83c0adfe5cb5330924 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.h +d04adc777f547ae6d1369cf4c94963e5abf90b86 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.c +ac3965eea078f1998c3a3041f14212578682e599 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.c +0dae533422e24d91a29c82d7be619160bbb6f6be - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.h +3f5a391895fc900396bae68761fe9b4dcb382ec0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.h +c3b4c6a1b90a1547e229bb2973eb19c01e1d0055 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.h +285af0d0517cb191387a05ad596f74291ec81737 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_desc_nvoc.h +9646d1c4d472ad800c7c93eec15cc03dd9201073 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.h +c370a103a4c1c9cf2df3763988e77ef8f7bc6afb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.h +2239839c8a780a87e786439a49ab63e25d25001a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.h +09597f23d6a5440258656be81e7e6709390128f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_private.h +8e0e60f6d30bbed679c43b4997875989314ee88c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.c +dec0f585ca46dc8e1aae49c8ea58db5a415de65c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-message-header.h +871fd0260ab9c164b8f6a7d1aba4563af622f1ac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.h +205490d6651110f28009e752fa286f818bed22fb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.h +07a37ff685e68a703455e0ed7db7940697487ed2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.c +cc71518b4151dc2ee0592bbd2866d437043d0e1a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.h +2c28d729456749f16ae03fb48b1e416706762805 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_fwd_decls_nvoc.h +59c3612a596ad6b996c9d1506f9893bd1b5effee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.c +81a6a28692f50efeebecad125de0585dd711ff36 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.h +3f581df19314b273244c4c42ea915ec8ef0d8ce2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.h +e839f8a5ebef5f28818bb5824bd7c52320db9a74 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.h +e0b8f64c042dcbb6340552cb3517dabdeb490f1b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.h +7523c2ee9228ad0e2fb3566b23b9720d7896afae - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.c +ad50b3dbe1685eefe51c4fc296f3eade70789dfb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.h +ca042cfcdfe8cc8a141f8bb5c9e6c05d8a71b707 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.h +2ab6933e07a84c64dfcbeef3b3f4e3f14249d8c8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.h +ffd4f01212709e321d4097e424fe5d32038f5d8b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c +12776c69191b583ffcf0914697cf41802f52ef01 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_archimpl.h +05cb2fed8648f07b54dc2e8bacbafb323ea8262e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.h +0b15dd4515c5e436a659883a48e62bf3c68bf439 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.h +0269da77a8db8efde1debc8236f2b3de2cd2597e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_desc_nvoc.h +1bdccdbabf5ae52fd65b829c35079bb7a8734939 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.c +410a759c949904b7ae1eecafb31143fad579c0a1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.c +73c598515eb7985c8f4cace0946ec9613960be6c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.c +73a37ad59b9b13b61eb944748b6c2ba3cad7b630 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.h +8915f69e67e1f3a809a5479e36280df06ce8dd90 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.c +d792fbb20b6ca5f2d62addf6a94b0c5027ae15fe - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.h +6124890a54e529dff8b9d6ecf8f4bebe1e10a8a2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.h +cb03502bf603c88b709ec803b60efd1d6f8e5ee1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-structures.h +b378d336af4d5cb4b1fb13b85042fad1fe02f4cc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_journal_nvoc.h +7c1b36cca9e8bf1fe18284685a6a80620df348cb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.h +cd833a822c1ce96c79135ba7221d24f347ceadb1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.h +a016a7d8e07389736c388cb973f3b2a177ea917d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.c +42d784e8b478bbf48293a805aa227f0abdf1923b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.c +b29061454e7d8daa0cef0787f12726d105faf5c4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.c +4b9f2ee66b59181f226e1af5087db6ea80f1ee27 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.h +23d16b4534103f24fac5bb86eb8bab40e5bcba57 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.c +e48b8b6ba9da5630a7ade526acbb94e50d9b636d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.h +b86536778197748c707c3e9e4c73c5fbcb037e32 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.h +07fd5f5534a6d751107f582ba187c7a53a139954 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.h +f4a5684d5a877b90c7ae7b66436117c6feb65f91 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.h +ab79a1418b65b9d65081456583169f516dd510c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.c +bd048add5f0781d90b55a5293881a2f59ace3070 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.h +e50c91a674508b23b072e0dd2edbf743f24b333d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.c +df070e15630a11b2f4b64d52228fa5a6e7ab2aa9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.h +0f3140b5eae77a6055f32a91cb13b026bbb23905 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.h +76b1f545e3712a2f8e7c31b101acd9dd682c52f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.c +14450b18d002d4e1786d4630ef4f1994c07ef188 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_odb.h +7b0201852361118f277ee7cc6dd16212c0192f71 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.h +3d3385445934719abda1fefd4eb0762937be0e61 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.c +c4fde03d5939b0eef108fde9c2f10661568f22a9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.h +5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/os/dce_rm_client_ipc.h +76b24227c65570898c19e16bf35b2cad143f3d05 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu.h +61c7d3ac2dc61ee81abd743a6536a439592ee162 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_child_list.h +bf894a769c46d5d173e3875cd9667bb3fe82feb9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_timeout.h +f17b704f2489ffedcc057d4a6da77c42ece42923 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource.h +0e8353854e837f0ef0fbf0d5ff5d7a25aa1eef7c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_state.h +426c6ab6cecc3b1ba540b01309d1603301a86db1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_desc.h +c33ab6494c9423c327707fce2bcb771328984a3c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_halspec.h +6b27c9edf93f29a31787d9acaaefb2cefc31e7d4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h +1938fd2511213c8003864d879cf1c41ae1169a5f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_uuid.h +cf3d1427394c425c543e253adf443192ca613762 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_access.h +ce3302c1890e2f7990434f7335cb619b12dee854 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h +97d0a067e89251672f191788abe81cf26dcb335f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/device/device.h +61711ed293ee6974a6ed9a8a3732ae5fedcdc666 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h +b39826404d84e0850aa3385691d8dde6e30d70d4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h +51a209575d3e3fe8feb7269ece7df0846e18ca2a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h +277a2719f8c063037c6a9ed55ade2b1cb17f48ae - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h +74bc902cd00b17da3a1dfa7fd3ebc058de439b76 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_channel.h +be7da8d1106ee14ff808d86abffb86794299b2df - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_objs.h +576216219d27aa887beeccefc22bcead4d1234d7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp.h +5179f01acf7e9e251552dc17c0dcd84f7d341d82 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h +9a33a37c6cea9bad513aa14c942c689f28f7c0d8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h +f758ea5f9cbd23a678290ef0b8d98d470e3499e0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h +6756126ddd616d6393037bebf371fceacaf3a9f1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h +20416f7239833dcaa743bbf988702610e9251289 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h +a29f55d5fbc90dade83df3ef3263018633675284 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h +82abc2458910250c1a912e023f37e87c1c9bbb9e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h +889ba18a43cc2b5c5e970a90ddcb770ce873b785 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h +b52e6a0499640e651aa4200b2c8a1653df04a420 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h +24d01769b39a6dd62574a95fad64443b05872151 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h +efc50bb2ff6ccf1b7715fd413ca680034920758e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h +ccca322d29ae171ee81c95d58e31f1c109429ae7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gsp/message_queue.h +1e3bebe46b7f2f542eedace554a4156b3afb51f1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h +ce4e0f7177f46f4fc507a68b635e5395a3f7dde6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h +5f60ac544252b894ac7ecc0c6dc4446e6275eae5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi.h +2baec15f4c68a9c59dd107a0db288e39914e6737 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client.h +a92dbf2870fe0df245ea8967f2f6a68f5075ecaf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h +61e3704cd51161c9804cb168d5ce4553b7311973 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource.h +99a27d87c7f1487f8df5781d284c2e9a83525892 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/binary_api.h +497492340cea19a93b62da69ca2000b811c8f5d6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event_buffer.h +f3028fbcafe73212a94d295951122b532ff5445b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rs_utils.h +b4bae9ea958b4d014908459e08c93319784c47dd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event.h +ac9288d75555180c1d5dd6dd7e0e11fb57a967f2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/exports.h +2b23f2dbd8f3f63a17a1b63ebb40a2fd7fd8801a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/alloc_size.h +c9cb08c7c73c0bdd75a320640d16bf4b4defe873 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/mapping_list.h +f19dad1746e639d866c700c2f871fcc0144f2e5e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/control.h +f1713ecc0b3e58e46c346409dbf4630aa6f7f3ed - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/param_copy.h +255c28b9bd27098382bace05af3ad7f195d12895 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi_utils.h +4453fe6463e3155063f2bdbf36f44697606a80a5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client_resource.h +7615ac3a83d0ad23b2160ff8ad90bec9eb1f3c6c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal.h +b259f23312abe56d34a8f0da36ef549ef60ba5b0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h +c6efd51b8b8447829a0867cd7fb7a5a5a2fb1e3d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/traceable.h +7e75b5d99376fba058b31996d49449f8fe62d3f0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/profiler.h +fd780f85cb1cd0fd3914fa31d1bd4933437b791d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/tracer.h +3a28bf1692efb34d2161907c3781401951cc2d4f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal_structs.h +c8496199cd808ed4c79d8e149961e721ad96714e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/capability.h +e5b881419bc00d925eba9f8493f6b36cf3ce7ca7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_stub.h +408c0340350b813c3cba17fd36171075e156df72 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os.h +cda75171ca7d8bf920aab6d56ef9aadec16fd15d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/nv_memory_type.h +af25180a08db4d5d20afd09f948b15d8c4d2d738 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_fixed_mode_timings_props.h +457c02092adfc1587d6e3cd866e28c567acbc43a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/info_block.h +bffae4da6a1f9b7dc7c879587fd674b49b46dac1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/core.h +cbfff1f06eecc99fb5a1c82d43397043058f02fc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/printf.h +f929d43974893cd155ab2f5f77606f0040fe3e39 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/locks.h +b5859c7862fb3eeb266f7213845885789801194a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/system.h +37f267155ddfc3db38f110dbb0397f0463d055ff - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/strict.h +bdc4ab675c6f6c4bd77c3aaf08aa5c865b186802 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal.h +ed496ab6e8b64d3398f929146e908c5a453a03d9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/prelude.h +b319914c97f9978488e8fb049d39c72ed64fd4d2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/thread_state.h +b00302aec7e4f4e3b89a2f699f8b1f18fc17b1ba - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal_mgr.h +8ef620afdf720259cead00d20fae73d31e59c2f7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h +2c48d7335bdb0b7ea88b78216c0aeab2e11e00c1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h +e188d9f2d042ffe029b96d8fbb16c79a0fc0fb01 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h +ea32018e3464bb1ac792e39227badf482fa2dc67 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h +5b151d0d97b83c9fb76b76c476947f9e15e774ad - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h +0ce5d6370c086d2944b2e8d31ff72a510d98dc8f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h +4c386104eaead66c66df11258c3f1182b46e96ee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h +a5f49a031db4171228a27482d091283e84632ace - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/system_mem.h +d15991bc770c5ab41fe746995294c5213efa056b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h +5ae08b2077506cbc41e40e1b3672e615ce9d910f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/vaspace.h +02d6a37ef1bb057604cb98a905fa02429f200c96 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/mem.h +1a08e83fd6f0a072d6887c60c529e29211bcd007 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h +2d4afabd63699feec3aea5e89601db009fc51a08 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/standard_mem.h +5e9928552086947b10092792db4a8c4c57a84adf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/acpi_common.h +2f05394872ffa95d700b7822489fa59f74ad5819 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/sli/sli.h +fff3ebc8527b34f8c463daad4d20ee5e33321344 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/ref_count.h +04dba2b7a6a360f3e855a7d6a7484ddcdfb90c19 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/base_utils.h +f8d9eb5f6a6883de962b63b4b7de35c01b20182f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/protobuf/prb.h +601edb7333b87349d791d430f1cac84fb6fbb919 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/zlib/inflate.h +9255fff39d7422ca4a56ba5ab60866779201d3e8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/poolalloc.h +8dd7f2d9956278ed036bbc288bff4dde86a9b509 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/eventbufferproducer.h +e53d5fc9b66dbec4c947224050866cec30b2f537 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvrange.h +398e4cd63852a18da6e42b920eacd927a2c38bc0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nv_enum.h +ba3c81e9eae32eefbf81818b48fdf6ccd7e73163 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvmacro.h +18321894aa7631b491ea39edc2d45d1028cdc9c6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvprintf.h +167f49cccc912430bb6b3cb77395f665a32cc8be - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvbitvector.h +1ed5d8ae82f37112b163187fa48d2720957e6bdf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvassert.h +62a18f19f79512ebccdf286068e0b557c7926e13 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/runtime.h +00433b51c4d6254fd4dfc3dcd9b4ad59e485e7c0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/object.h +1b28bd0ee2e560ca2854a73a3ee5fb1cf713d013 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/utility.h +5cadc87ba685991c7d4c6d453dcc9a2cca4398bf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/prelude.h +664ff0e10e893923b70425fa49c9c48ed0735573 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/rtti.h +bdb558ee8f782e6be06fc262820f6bd9ce75bd51 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/tls/tls.h +56b8bae7756ed36d0831f76f95033f74eaab01db - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h +7239704e6fe88b9d75984fb5e9f4b5706502d7f3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog_printf.h +e08146f5de1596f5337c49cfbe180e30e880dedb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog.h +d2c035e67e295b8f33f0fc52d9c30e43c5d7c2ba - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h +cd033fe116a41285a979e629a2ee7b11ec99369f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_rights.h +2dec1c73507f66736674d203cc4a00813ccb11bc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_domain.h +a0d3d164eb92280353cdc4458d2561aae8a68c1d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_server.h +89ece4711626bf1e4197c69bd5754e2798214d76 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/resserv.h +bacdb2c1a1dbf182a0a3be15efa0a5f83365118f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_resource.h +df174d6b4f718ef699ca6f38c16aaeffa111ad3c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_map.h +841ddca998b570feb1d59b50d644c8f2b59ae8e9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_client.h +b795f5cb77ecd2cc407102900b63977cfb34bbfd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/ioaccess/ioaccess.h +3dcee4e110f4c571e7f49fae2f2d0630d008a906 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/nvport.h +46345715dde843be2890b33f191b2f3b69385e0d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/memory.h +a1d93b6ec8ff01a3c2651e772a826ee11a7781d7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/util.h +b93c2532babf176f7b91735682e7d7cdc41f96f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/debug.h +147d47ef4bd860394d1d8ae82c68d97887e2898b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/core.h +6d698ca4fc5e48c525f214a57e1de0cc4aa9e36b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/thread.h +3e656d5ed1f5df898ec444921ce77a40ead66b28 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/atomic.h +3ac7ddf3d402f3fd20cffe9d4e93f457de319605 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/sync.h +2487ffc1eb1e50b27ba07e0581da543d80bdaa72 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/safe.h +22420ad669a9809602f111385b7840556e58ecff - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/cpu.h +6ad1beaa2783a57330240d47b373930cd36ca5d0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/crypto.h +2805fad632acad045044e0b8417de88032177300 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/string.h +23afbd04f4e4b3301edcfdec003c8e936d898e38 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h +eedda5c4b0611c3b95f726b0a2db4b0a23b7b1cf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h +a8c9b83169aceb5f97d9f7a411db449496dc18f6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_generic.h +aafca30178f49676f640be9c6d34f623a3e3a9a4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/safe_generic.h +600ad8781585e87df49ab1aaa39a07c8e8de74f5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h +0747ee16c7e6c726f568867d0fbbad411c8795c8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h +2a76929dc6b0e8624d02002600bc454cc851dee4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h +1d6a239ed6c8dab1397f056a81ff456141ec7f9c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_valist.h +31f2042e852f074970644903335af5ffa2b59c38 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h +65a237b66732aafe39bc4a14d87debd2b094fb83 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/map.h +c9e75f7b02241ededa5328a4f559e70dec60d159 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/type_safety.h +3924b67e6d63e9a15876331c695daaf679454b05 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/list.h +a28ab42de95e4878fb46e19d7b965c23f92b3213 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/btree.h +4cd6b110470da3aee29e999e096ca582104fab21 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/queue.h +1dacc1c1efc757c12e4c64eac171474a798b86fd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/eheap_old.h +969cbac56935a80fafd7cceff157b27e623f9429 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/multimap.h diff --git a/push_info.txt b/push_info.txt new file mode 100644 index 0000000..738a8cd --- /dev/null +++ b/push_info.txt @@ -0,0 +1 @@ +jetson_35.4.1